summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorclang-format-7.0.1 <adam.martin@10gen.com>2019-07-26 18:42:24 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2019-07-26 18:42:24 -0400
commitc1a45ebbb0530e3d0201321d725527f1eb83ffce (patch)
treef523079dc5ded3052eefbdcaae424b7502df5b25
parentc9599d8610c3da0b7c3da65667aff821063cf5b9 (diff)
downloadmongo-c1a45ebbb0530e3d0201321d725527f1eb83ffce.tar.gz
Apply formatting per `clang-format-7.0.1`
-rw-r--r--jstests/aggregation/bugs/cond.js137
-rw-r--r--jstests/aggregation/bugs/cursor_timeout.js135
-rw-r--r--jstests/aggregation/bugs/explain_options_helper.js30
-rw-r--r--jstests/aggregation/bugs/firstlast.js225
-rw-r--r--jstests/aggregation/bugs/groupMissing.js94
-rw-r--r--jstests/aggregation/bugs/lookup_unwind_getmore.js68
-rw-r--r--jstests/aggregation/bugs/lookup_unwind_killcursor.js62
-rw-r--r--jstests/aggregation/bugs/match.js321
-rw-r--r--jstests/aggregation/bugs/match_swap_limit.js22
-rw-r--r--jstests/aggregation/bugs/reverseArray.js38
-rw-r--r--jstests/aggregation/bugs/server10176.js104
-rw-r--r--jstests/aggregation/bugs/server11118.js299
-rw-r--r--jstests/aggregation/bugs/server11675.js441
-rw-r--r--jstests/aggregation/bugs/server12015.js124
-rw-r--r--jstests/aggregation/bugs/server14421.js74
-rw-r--r--jstests/aggregation/bugs/server14670.js24
-rw-r--r--jstests/aggregation/bugs/server14691.js74
-rw-r--r--jstests/aggregation/bugs/server14872.js48
-rw-r--r--jstests/aggregation/bugs/server17224.js32
-rw-r--r--jstests/aggregation/bugs/server17943.js134
-rw-r--r--jstests/aggregation/bugs/server18198.js114
-rw-r--r--jstests/aggregation/bugs/server18222.js72
-rw-r--r--jstests/aggregation/bugs/server18427.js299
-rw-r--r--jstests/aggregation/bugs/server20163.js334
-rw-r--r--jstests/aggregation/bugs/server20168.js59
-rw-r--r--jstests/aggregation/bugs/server20169.js118
-rw-r--r--jstests/aggregation/bugs/server21632.js145
-rw-r--r--jstests/aggregation/bugs/server22093.js52
-rw-r--r--jstests/aggregation/bugs/server22580.js79
-rw-r--r--jstests/aggregation/bugs/server25590.js24
-rw-r--r--jstests/aggregation/bugs/server26462.js40
-rw-r--r--jstests/aggregation/bugs/server37750.js120
-rw-r--r--jstests/aggregation/bugs/server4588.js99
-rw-r--r--jstests/aggregation/bugs/server4589.js126
-rw-r--r--jstests/aggregation/bugs/server4638.js2
-rw-r--r--jstests/aggregation/bugs/server5012.js14
-rw-r--r--jstests/aggregation/bugs/server533.js56
-rw-r--r--jstests/aggregation/bugs/server6074.js148
-rw-r--r--jstests/aggregation/bugs/server6125.js6
-rw-r--r--jstests/aggregation/bugs/server6127.js28
-rw-r--r--jstests/aggregation/bugs/server6147.js70
-rw-r--r--jstests/aggregation/bugs/server6179.js98
-rw-r--r--jstests/aggregation/bugs/server6185.js20
-rw-r--r--jstests/aggregation/bugs/server6530.js48
-rw-r--r--jstests/aggregation/bugs/server6779.js26
-rw-r--r--jstests/aggregation/bugs/server7695_isodates.js469
-rw-r--r--jstests/aggregation/bugs/server7781.js232
-rw-r--r--jstests/aggregation/bugs/server8141.js72
-rw-r--r--jstests/aggregation/bugs/server8164.js280
-rw-r--r--jstests/aggregation/bugs/server8568.js72
-rw-r--r--jstests/aggregation/bugs/server8581.js64
-rw-r--r--jstests/aggregation/bugs/server9444.js90
-rw-r--r--jstests/aggregation/bugs/server9625.js116
-rw-r--r--jstests/aggregation/bugs/skip_limit_overflow.js193
-rw-r--r--jstests/aggregation/bugs/sort_arrays.js20
-rw-r--r--jstests/aggregation/bugs/substr.js4
-rw-r--r--jstests/aggregation/explain.js33
-rw-r--r--jstests/aggregation/explain_limit.js123
-rw-r--r--jstests/aggregation/explain_writing_aggs.js152
-rw-r--r--jstests/aggregation/expressions/arrayToObject.js128
-rw-r--r--jstests/aggregation/expressions/collation_expressions.js378
-rw-r--r--jstests/aggregation/expressions/convert.js621
-rw-r--r--jstests/aggregation/expressions/date_expressions_with_timezones.js150
-rw-r--r--jstests/aggregation/expressions/date_from_parts.js1790
-rw-r--r--jstests/aggregation/expressions/date_from_string.js1473
-rw-r--r--jstests/aggregation/expressions/date_from_string_on_error.js274
-rw-r--r--jstests/aggregation/expressions/date_from_string_on_null.js100
-rw-r--r--jstests/aggregation/expressions/date_to_parts.js571
-rw-r--r--jstests/aggregation/expressions/date_to_string.js487
-rw-r--r--jstests/aggregation/expressions/date_to_string_on_null.js107
-rw-r--r--jstests/aggregation/expressions/expression_mod.js160
-rw-r--r--jstests/aggregation/expressions/expression_trigonometric.js499
-rw-r--r--jstests/aggregation/expressions/floor_ceil.js62
-rw-r--r--jstests/aggregation/expressions/in.js389
-rw-r--r--jstests/aggregation/expressions/indexof_array.js82
-rw-r--r--jstests/aggregation/expressions/indexof_bytes.js202
-rw-r--r--jstests/aggregation/expressions/indexof_codepoints.js174
-rw-r--r--jstests/aggregation/expressions/let.js209
-rw-r--r--jstests/aggregation/expressions/merge_objects.js291
-rw-r--r--jstests/aggregation/expressions/objectToArray.js179
-rw-r--r--jstests/aggregation/expressions/object_ids_for_date_expressions.js157
-rw-r--r--jstests/aggregation/expressions/reduce.js103
-rw-r--r--jstests/aggregation/expressions/regex.js917
-rw-r--r--jstests/aggregation/expressions/regex_limits.js215
-rw-r--r--jstests/aggregation/expressions/round_trunc.js193
-rw-r--r--jstests/aggregation/expressions/size.js27
-rw-r--r--jstests/aggregation/expressions/split.js118
-rw-r--r--jstests/aggregation/expressions/switch.js262
-rw-r--r--jstests/aggregation/expressions/switch_errors.js96
-rw-r--r--jstests/aggregation/expressions/trim.js157
-rw-r--r--jstests/aggregation/extras/utils.js2
-rw-r--r--jstests/aggregation/group_conversion_to_distinct_scan.js1288
-rw-r--r--jstests/aggregation/illegal_reference_in_match.js48
-rw-r--r--jstests/aggregation/match_swapping_renamed_fields.js320
-rw-r--r--jstests/aggregation/mongos_merge.js801
-rw-r--r--jstests/aggregation/mongos_slaveok.js52
-rw-r--r--jstests/aggregation/optimize_away_pipeline.js550
-rw-r--r--jstests/aggregation/pipeline_pass_through_from_mongos.js258
-rw-r--r--jstests/aggregation/shard_targeting.js707
-rw-r--r--jstests/aggregation/sharded_agg_cleanup_on_error.js263
-rw-r--r--jstests/aggregation/sources/addFields/use_cases.js93
-rw-r--r--jstests/aggregation/sources/addFields/weather.js157
-rw-r--r--jstests/aggregation/sources/bucket/collation_bucket.js162
-rw-r--r--jstests/aggregation/sources/bucketauto/collation_bucketauto.js103
-rw-r--r--jstests/aggregation/sources/collStats/count.js112
-rw-r--r--jstests/aggregation/sources/collStats/shard_host_info.js96
-rw-r--r--jstests/aggregation/sources/facet/inner_graphlookup.js60
-rw-r--r--jstests/aggregation/sources/facet/inner_lookup.js51
-rw-r--r--jstests/aggregation/sources/facet/use_cases.js310
-rw-r--r--jstests/aggregation/sources/geonear/collation_geonear.js138
-rw-r--r--jstests/aggregation/sources/geonear/distancefield_and_includelocs.js319
-rw-r--r--jstests/aggregation/sources/geonear/mindistance_and_maxdistance.js191
-rw-r--r--jstests/aggregation/sources/geonear/requires_geo_index.js38
-rw-r--r--jstests/aggregation/sources/graphLookup/airports.js78
-rw-r--r--jstests/aggregation/sources/graphLookup/basic.js133
-rw-r--r--jstests/aggregation/sources/graphLookup/collation_graphlookup.js108
-rw-r--r--jstests/aggregation/sources/graphLookup/error.js253
-rw-r--r--jstests/aggregation/sources/graphLookup/filter.js58
-rw-r--r--jstests/aggregation/sources/graphLookup/nested_objects.js70
-rw-r--r--jstests/aggregation/sources/graphLookup/socialite.js46
-rw-r--r--jstests/aggregation/sources/graphLookup/variables.js31
-rw-r--r--jstests/aggregation/sources/group/collation_group.js128
-rw-r--r--jstests/aggregation/sources/group/group_by_variable.js30
-rw-r--r--jstests/aggregation/sources/group/numeric_grouping.js28
-rw-r--r--jstests/aggregation/sources/group/text_score_grouping.js35
-rw-r--r--jstests/aggregation/sources/lookup/lookup_absorb_match.js46
-rw-r--r--jstests/aggregation/sources/lookup/lookup_contains_text.js91
-rw-r--r--jstests/aggregation/sources/lookup/lookup_non_correlated.js92
-rw-r--r--jstests/aggregation/sources/lookup/lookup_sort_limit.js35
-rw-r--r--jstests/aggregation/sources/lookup/lookup_subpipeline.js615
-rw-r--r--jstests/aggregation/sources/lookup/lookup_subpipeline_geonear.js34
-rw-r--r--jstests/aggregation/sources/lookup/profile_lookup.js50
-rw-r--r--jstests/aggregation/sources/match/collation_match.js85
-rw-r--r--jstests/aggregation/sources/match/expr_match.js87
-rw-r--r--jstests/aggregation/sources/match/text_search_requires_index.js34
-rw-r--r--jstests/aggregation/sources/merge/all_modes.js556
-rw-r--r--jstests/aggregation/sources/merge/batch_writes.js112
-rw-r--r--jstests/aggregation/sources/merge/bypass_doc_validation.js366
-rw-r--r--jstests/aggregation/sources/merge/disallowed_in_lookup.js76
-rw-r--r--jstests/aggregation/sources/merge/exchange_explain.js308
-rw-r--r--jstests/aggregation/sources/merge/merge_to_referenced_collection.js181
-rw-r--r--jstests/aggregation/sources/merge/merge_to_same_collection.js22
-rw-r--r--jstests/aggregation/sources/merge/mode_fail_insert.js284
-rw-r--r--jstests/aggregation/sources/merge/mode_keep_existing_insert.js723
-rw-r--r--jstests/aggregation/sources/merge/mode_merge_discard.js454
-rw-r--r--jstests/aggregation/sources/merge/mode_merge_fail.js191
-rw-r--r--jstests/aggregation/sources/merge/mode_merge_insert.js711
-rw-r--r--jstests/aggregation/sources/merge/mode_pipeline_discard.js482
-rw-r--r--jstests/aggregation/sources/merge/mode_pipeline_fail.js158
-rw-r--r--jstests/aggregation/sources/merge/mode_pipeline_insert.js1250
-rw-r--r--jstests/aggregation/sources/merge/mode_replace_discard.js387
-rw-r--r--jstests/aggregation/sources/merge/mode_replace_fail.js187
-rw-r--r--jstests/aggregation/sources/merge/mode_replace_insert.js393
-rw-r--r--jstests/aggregation/sources/merge/on_fields_validation.js258
-rw-r--r--jstests/aggregation/sources/merge/requires_unique_index.js692
-rw-r--r--jstests/aggregation/sources/merge/use_cases.js167
-rw-r--r--jstests/aggregation/sources/out/out_in_lookup_not_allowed.js66
-rw-r--r--jstests/aggregation/sources/out/replace_collection.js102
-rw-r--r--jstests/aggregation/sources/out/required_last_position.js19
-rw-r--r--jstests/aggregation/sources/project/remove_redundant_projects.js285
-rw-r--r--jstests/aggregation/sources/redact/collation_redact.js67
-rw-r--r--jstests/aggregation/sources/replaceRoot/address.js163
-rw-r--r--jstests/aggregation/sources/replaceRoot/use_cases.js33
-rw-r--r--jstests/aggregation/sources/sort/collation_sort.js158
-rw-r--r--jstests/aggregation/sources/sort/collation_sort_japanese.js271
-rw-r--r--jstests/aggregation/sources/sort/explain_sort.js94
-rw-r--r--jstests/aggregation/sources/unset/unset.js55
-rw-r--r--jstests/aggregation/stages/skip_with_limit.js64
-rw-r--r--jstests/aggregation/testall.js1773
-rw-r--r--jstests/aggregation/testutils.js268
-rw-r--r--jstests/aggregation/use_query_project_and_sort.js99
-rw-r--r--jstests/aggregation/use_query_projection.js198
-rw-r--r--jstests/aggregation/use_query_sort.js126
-rw-r--r--jstests/aggregation/variables/layered_variables.js19
-rw-r--r--jstests/aggregation/variables/remove_system_variable.js108
-rw-r--r--jstests/auth/auth3.js40
-rw-r--r--jstests/auth/auth_helpers.js32
-rw-r--r--jstests/auth/auth_mechanism_discovery.js91
-rw-r--r--jstests/auth/auth_mechanisms_parsing.js14
-rw-r--r--jstests/auth/authentication_restrictions.js428
-rw-r--r--jstests/auth/authentication_restrictions_role.js778
-rw-r--r--jstests/auth/authz_modifications_access_control.js1
-rw-r--r--jstests/auth/autocomplete_auth.js58
-rw-r--r--jstests/auth/basic_role_auth.js330
-rw-r--r--jstests/auth/cluster_ip_whitelist.js101
-rw-r--r--jstests/auth/commands_builtin_roles.js12
-rw-r--r--jstests/auth/commands_user_defined_roles.js13
-rw-r--r--jstests/auth/curop_auth_info.js132
-rw-r--r--jstests/auth/currentop_cursors_auth.js262
-rw-r--r--jstests/auth/deleted_recreated_user.js116
-rw-r--r--jstests/auth/getMore.js682
-rw-r--r--jstests/auth/iteration_count_control.js60
-rw-r--r--jstests/auth/iteration_count_defaults.js38
-rw-r--r--jstests/auth/keyfile_rollover.js128
-rw-r--r--jstests/auth/kill_cursors.js328
-rw-r--r--jstests/auth/kill_sessions.js22
-rw-r--r--jstests/auth/killop_own_ops.js277
-rw-r--r--jstests/auth/list_all_local_sessions.js108
-rw-r--r--jstests/auth/list_all_sessions.js98
-rw-r--r--jstests/auth/list_collections_filter_views.js103
-rw-r--r--jstests/auth/list_collections_own_collections.js345
-rw-r--r--jstests/auth/list_databases.js308
-rw-r--r--jstests/auth/list_local_sessions.js117
-rw-r--r--jstests/auth/list_sessions.js158
-rw-r--r--jstests/auth/listcommands_preauth.js54
-rw-r--r--jstests/auth/logs_include_client_info.js36
-rw-r--r--jstests/auth/mongoURIAuth.js102
-rw-r--r--jstests/auth/mongos_cache_invalidation.js2
-rw-r--r--jstests/auth/pinned_users.js347
-rw-r--r--jstests/auth/pre_auth_commands_with_sessions.js95
-rw-r--r--jstests/auth/prepared_transaction.js392
-rw-r--r--jstests/auth/refresh_logical_session_cache_with_long_usernames.js63
-rw-r--r--jstests/auth/renameRestrictedCollections.js218
-rw-r--r--jstests/auth/resource_pattern_matching.js62
-rw-r--r--jstests/auth/role_management_commands_edge_cases.js10
-rw-r--r--jstests/auth/role_management_commands_lib.js8
-rw-r--r--jstests/auth/role_management_commands_sharded_wc_1.js24
-rw-r--r--jstests/auth/role_management_commands_sharded_wc_majority.js24
-rw-r--r--jstests/auth/role_management_commands_standalone.js10
-rw-r--r--jstests/auth/sasl_mechanism_discovery.js122
-rw-r--r--jstests/auth/scram-credentials-invalid.js75
-rw-r--r--jstests/auth/shell.js22
-rw-r--r--jstests/auth/system_auth_scram_mechs.js26
-rw-r--r--jstests/auth/system_roles_collMod.js32
-rw-r--r--jstests/auth/system_user_exception.js24
-rw-r--r--jstests/auth/system_user_privileges.js174
-rw-r--r--jstests/auth/transactions.js276
-rw-r--r--jstests/auth/upgrade_noauth_to_keyfile.js64
-rw-r--r--jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js36
-rw-r--r--jstests/auth/user_cache_doc_source.js86
-rw-r--r--jstests/auth/user_defined_roles.js7
-rw-r--r--jstests/auth/user_defined_roles_on_secondaries.js380
-rw-r--r--jstests/auth/user_management_commands_edge_cases.js1
-rw-r--r--jstests/auth/user_management_commands_lib.js17
-rw-r--r--jstests/auth/user_management_commands_mechanisms.js394
-rw-r--r--jstests/auth/user_management_commands_sharded_wc_1.js14
-rw-r--r--jstests/auth/user_management_commands_sharded_wc_majority.js14
-rw-r--r--jstests/auth/user_management_commands_standalone.js10
-rw-r--r--jstests/auth/user_special_chars.js97
-rw-r--r--jstests/auth/usersInfo.js83
-rw-r--r--jstests/auth/validate_auth_schema_on_startup.js65
-rw-r--r--jstests/auth/views_authz.js261
-rw-r--r--jstests/change_streams/apply_ops.js300
-rw-r--r--jstests/change_streams/apply_ops_resumability.js356
-rw-r--r--jstests/change_streams/ban_from_lookup.js24
-rw-r--r--jstests/change_streams/ban_from_views.js48
-rw-r--r--jstests/change_streams/change_stream.js513
-rw-r--r--jstests/change_streams/collation.js662
-rw-r--r--jstests/change_streams/does_not_implicitly_create_database.js120
-rw-r--r--jstests/change_streams/error_label.js40
-rw-r--r--jstests/change_streams/include_cluster_time.js80
-rw-r--r--jstests/change_streams/lookup_post_image.js477
-rw-r--r--jstests/change_streams/metadata_notifications.js491
-rw-r--r--jstests/change_streams/no_regex_leak.js94
-rw-r--r--jstests/change_streams/only_wake_getmore_for_relevant_changes.js309
-rw-r--r--jstests/change_streams/pipeline_cannot_modify_id_field.js260
-rw-r--r--jstests/change_streams/report_latest_observed_oplog_timestamp.js155
-rw-r--r--jstests/change_streams/report_post_batch_resume_token.js364
-rw-r--r--jstests/change_streams/required_as_first_stage.js90
-rw-r--r--jstests/change_streams/resume_from_high_water_mark_token.js465
-rw-r--r--jstests/change_streams/shell_helper.js394
-rw-r--r--jstests/change_streams/start_at_cluster_time.js123
-rw-r--r--jstests/change_streams/whitelist.js64
-rw-r--r--jstests/change_streams/whole_cluster.js230
-rw-r--r--jstests/change_streams/whole_cluster_metadata_notifications.js466
-rw-r--r--jstests/change_streams/whole_cluster_resumability.js290
-rw-r--r--jstests/change_streams/whole_db.js138
-rw-r--r--jstests/change_streams/whole_db_metadata_notifications.js468
-rw-r--r--jstests/change_streams/whole_db_resumability.js362
-rw-r--r--jstests/client_encrypt/fle_auto_decrypt.js73
-rw-r--r--jstests/client_encrypt/fle_aws_faults.js210
-rw-r--r--jstests/client_encrypt/fle_command_line_encryption.js62
-rw-r--r--jstests/client_encrypt/fle_encrypt_decrypt_shell.js214
-rw-r--r--jstests/client_encrypt/fle_key_faults.js152
-rw-r--r--jstests/client_encrypt/fle_keys.js98
-rw-r--r--jstests/client_encrypt/fle_valid_fle_options.js90
-rw-r--r--jstests/client_encrypt/lib/fle_command_line_explicit_encryption.js143
-rw-r--r--jstests/client_encrypt/lib/mock_kms.js10
-rw-r--r--jstests/concurrency/fsm_example.js2
-rw-r--r--jstests/concurrency/fsm_libs/assert.js2
-rw-r--r--jstests/concurrency/fsm_libs/cluster.js15
-rw-r--r--jstests/concurrency/fsm_libs/composer.js2
-rw-r--r--jstests/concurrency/fsm_libs/extend_workload.js6
-rw-r--r--jstests/concurrency/fsm_libs/parse_config.js4
-rw-r--r--jstests/concurrency/fsm_libs/resmoke_runner.js493
-rw-r--r--jstests/concurrency/fsm_libs/runner.js14
-rw-r--r--jstests/concurrency/fsm_libs/worker_thread.js4
-rw-r--r--jstests/concurrency/fsm_selftests.js58
-rw-r--r--jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js7
-rw-r--r--jstests/concurrency/fsm_workloads/access_collection_in_transaction_after_catalog_changes.js11
-rw-r--r--jstests/concurrency/fsm_workloads/agg_base.js1
-rw-r--r--jstests/concurrency/fsm_workloads/agg_graph_lookup.js1
-rw-r--r--jstests/concurrency/fsm_workloads/agg_group_external.js1
-rw-r--r--jstests/concurrency/fsm_workloads/agg_match.js1
-rw-r--r--jstests/concurrency/fsm_workloads/agg_merge_when_matched_replace_with_new.js10
-rw-r--r--jstests/concurrency/fsm_workloads/agg_merge_when_not_matched_insert.js14
-rw-r--r--jstests/concurrency/fsm_workloads/agg_out.js1
-rw-r--r--jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js1
-rw-r--r--jstests/concurrency/fsm_workloads/agg_sort.js1
-rw-r--r--jstests/concurrency/fsm_workloads/agg_sort_external.js1
-rw-r--r--jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js1
-rw-r--r--jstests/concurrency/fsm_workloads/auth_create_role.js4
-rw-r--r--jstests/concurrency/fsm_workloads/auth_create_user.js4
-rw-r--r--jstests/concurrency/fsm_workloads/auth_drop_role.js4
-rw-r--r--jstests/concurrency/fsm_workloads/auth_drop_user.js4
-rw-r--r--jstests/concurrency/fsm_workloads/collmod.js4
-rw-r--r--jstests/concurrency/fsm_workloads/convert_to_capped_collection.js2
-rw-r--r--jstests/concurrency/fsm_workloads/count.js4
-rw-r--r--jstests/concurrency/fsm_workloads/create_capped_collection.js4
-rw-r--r--jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js1
-rw-r--r--jstests/concurrency/fsm_workloads/create_collection.js4
-rw-r--r--jstests/concurrency/fsm_workloads/create_database.js6
-rw-r--r--jstests/concurrency/fsm_workloads/create_index_background.js4
-rw-r--r--jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js1
-rw-r--r--jstests/concurrency/fsm_workloads/create_index_background_unique.js2
-rw-r--r--jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js1
-rw-r--r--jstests/concurrency/fsm_workloads/create_index_background_unique_collmod_capped.js1
-rw-r--r--jstests/concurrency/fsm_workloads/database_versioning.js4
-rw-r--r--jstests/concurrency/fsm_workloads/distinct.js4
-rw-r--r--jstests/concurrency/fsm_workloads/distinct_noindex.js4
-rw-r--r--jstests/concurrency/fsm_workloads/drop_collection.js4
-rw-r--r--jstests/concurrency/fsm_workloads/drop_database.js2
-rw-r--r--jstests/concurrency/fsm_workloads/drop_index_during_replan.js2
-rw-r--r--jstests/concurrency/fsm_workloads/explain.js2
-rw-r--r--jstests/concurrency/fsm_workloads/explain_aggregate.js1
-rw-r--r--jstests/concurrency/fsm_workloads/explain_count.js1
-rw-r--r--jstests/concurrency/fsm_workloads/explain_find.js1
-rw-r--r--jstests/concurrency/fsm_workloads/explain_remove.js1
-rw-r--r--jstests/concurrency/fsm_workloads/explain_update.js1
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_inc.js1
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js2
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_remove.js4
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js4
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js1
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update.js4
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js1
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_grow.js4
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_queue.js3
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js1
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_upsert.js4
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js1
-rw-r--r--jstests/concurrency/fsm_workloads/globally_managed_cursors.js1
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_1char.js1
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_2d.js1
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js1
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_base.js2
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_compound.js1
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js1
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_large.js1
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js1
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_multikey.js1
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js1
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_text.js2
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js1
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_ttl.js1
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js1
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_upsert.js1
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_where.js1
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_wildcard.js3
-rw-r--r--jstests/concurrency/fsm_workloads/invalidated_cursors.js11
-rw-r--r--jstests/concurrency/fsm_workloads/kill_aggregation.js1
-rw-r--r--jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js1
-rw-r--r--jstests/concurrency/fsm_workloads/kill_rooted_or.js2
-rw-r--r--jstests/concurrency/fsm_workloads/list_indexes.js1
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_drop.js4
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_inline.js4
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_merge.js1
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js1
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_reduce.js1
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js1
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace.js1
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js1
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js1
-rw-r--r--jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js2
-rw-r--r--jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js1
-rw-r--r--jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js11
-rw-r--r--jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js3
-rw-r--r--jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_multi_db.js1
-rw-r--r--jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_repeated_reads.js19
-rw-r--r--jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js3
-rw-r--r--jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js1
-rw-r--r--jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_repeated_reads.js15
-rw-r--r--jstests/concurrency/fsm_workloads/plan_cache_drop_database.js4
-rw-r--r--jstests/concurrency/fsm_workloads/random_moveChunk_base.js1
-rw-r--r--jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js5
-rw-r--r--jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js1
-rw-r--r--jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js15
-rw-r--r--jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js2
-rw-r--r--jstests/concurrency/fsm_workloads/remove_multiple_documents.js2
-rw-r--r--jstests/concurrency/fsm_workloads/remove_single_document.js2
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js4
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js4
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js4
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js4
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js4
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js4
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_droptarget.js4
-rw-r--r--jstests/concurrency/fsm_workloads/secondary_reads.js2
-rw-r--r--jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js1
-rw-r--r--jstests/concurrency/fsm_workloads/server_status.js1
-rw-r--r--jstests/concurrency/fsm_workloads/sharded_base_partitioned.js24
-rw-r--r--jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js2
-rw-r--r--jstests/concurrency/fsm_workloads/sharded_moveChunk_drop_shard_key_index.js2
-rw-r--r--jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js5
-rw-r--r--jstests/concurrency/fsm_workloads/sharded_splitChunk_partitioned.js2
-rw-r--r--jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js1
-rw-r--r--jstests/concurrency/fsm_workloads/snapshot_read_kill_op_only.js1
-rw-r--r--jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js21
-rw-r--r--jstests/concurrency/fsm_workloads/update_and_bulk_insert.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_array.js4
-rw-r--r--jstests/concurrency/fsm_workloads/update_check_index.js1
-rw-r--r--jstests/concurrency/fsm_workloads/update_inc.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js1
-rw-r--r--jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_rename.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_replace.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_upsert_multi.js2
-rw-r--r--jstests/concurrency/fsm_workloads/update_where.js4
-rw-r--r--jstests/concurrency/fsm_workloads/upsert_unique_index.js1
-rw-r--r--jstests/concurrency/fsm_workloads/upsert_where.js3
-rw-r--r--jstests/concurrency/fsm_workloads/view_catalog.js4
-rw-r--r--jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js37
-rw-r--r--jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js3
-rw-r--r--jstests/concurrency/fsm_workloads/yield.js17
-rw-r--r--jstests/concurrency/fsm_workloads/yield_and_hashed.js1
-rw-r--r--jstests/concurrency/fsm_workloads/yield_and_sorted.js1
-rw-r--r--jstests/concurrency/fsm_workloads/yield_fetch.js1
-rw-r--r--jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js1
-rw-r--r--jstests/concurrency/fsm_workloads/yield_id_hack.js1
-rw-r--r--jstests/concurrency/fsm_workloads/yield_rooted_or.js1
-rw-r--r--jstests/concurrency/fsm_workloads/yield_sort.js1
-rw-r--r--jstests/concurrency/fsm_workloads/yield_sort_merge.js1
-rw-r--r--jstests/concurrency/fsm_workloads/yield_text.js1
-rw-r--r--jstests/core/SERVER-23626.js23
-rw-r--r--jstests/core/add_skip_stage_before_fetch.js111
-rw-r--r--jstests/core/agg_hint.js453
-rw-r--r--jstests/core/aggregation_accepts_write_concern.js40
-rw-r--r--jstests/core/aggregation_getmore_batchsize.js64
-rw-r--r--jstests/core/all.js2
-rw-r--r--jstests/core/andor.js4
-rw-r--r--jstests/core/apitest_db_profile_level.js66
-rw-r--r--jstests/core/apitest_dbcollection.js296
-rw-r--r--jstests/core/apply_ops1.js839
-rw-r--r--jstests/core/apply_ops2.js7
-rw-r--r--jstests/core/apply_ops_dups.js54
-rw-r--r--jstests/core/apply_ops_index_collation.js136
-rw-r--r--jstests/core/apply_ops_invalid_index_spec.js120
-rw-r--r--jstests/core/apply_ops_without_ns.js9
-rw-r--r--jstests/core/arrayfind8.js258
-rw-r--r--jstests/core/autocomplete.js74
-rw-r--r--jstests/core/automation_setparameter.js91
-rw-r--r--jstests/core/awaitdata_getmore_cmd.js339
-rw-r--r--jstests/core/background_index_multikey.js114
-rw-r--r--jstests/core/background_unique_indexes.js84
-rw-r--r--jstests/core/batch_size.js233
-rw-r--r--jstests/core/batch_write_collation_estsize.js346
-rw-r--r--jstests/core/batch_write_command_delete.js2
-rw-r--r--jstests/core/batch_write_command_insert.js2
-rw-r--r--jstests/core/batch_write_command_update.js2
-rw-r--r--jstests/core/bench_test1.js77
-rw-r--r--jstests/core/benchrun_pipeline_updates.js86
-rw-r--r--jstests/core/bindata_indexonly.js112
-rw-r--r--jstests/core/bittest.js297
-rw-r--r--jstests/core/bson.js259
-rw-r--r--jstests/core/bson_compare_bug.js72
-rw-r--r--jstests/core/bulk_insert_capped.js30
-rw-r--r--jstests/core/bulk_legacy_enforce_gle.js202
-rw-r--r--jstests/core/bypass_doc_validation.js329
-rw-r--r--jstests/core/capped6.js170
-rw-r--r--jstests/core/capped_queries_and_id_index.js30
-rw-r--r--jstests/core/capped_update.js38
-rw-r--r--jstests/core/client_metadata_ismaster.js13
-rw-r--r--jstests/core/clone_as_capped_nonexistant.js43
-rw-r--r--jstests/core/collation.js3568
-rw-r--r--jstests/core/collation_convert_to_capped.js39
-rw-r--r--jstests/core/collation_find_and_modify.js146
-rw-r--r--jstests/core/collation_plan_cache.js464
-rw-r--r--jstests/core/collation_update.js551
-rw-r--r--jstests/core/collation_with_reverse_index.js14
-rw-r--r--jstests/core/collmod_bad_spec.js24
-rw-r--r--jstests/core/collmod_without_uuid.js28
-rw-r--r--jstests/core/commands_namespace_parsing.js603
-rw-r--r--jstests/core/commands_that_do_not_write_do_not_accept_wc.js82
-rw-r--r--jstests/core/commands_with_uuid.js182
-rw-r--r--jstests/core/compact_keeps_indexes.js53
-rw-r--r--jstests/core/compare_timestamps.js12
-rw-r--r--jstests/core/connection_status.js141
-rw-r--r--jstests/core/constructors.js6
-rw-r--r--jstests/core/contained_or_with_nested_or.js70
-rw-r--r--jstests/core/convert_to_capped.js26
-rw-r--r--jstests/core/count_hint.js76
-rw-r--r--jstests/core/counta.js36
-rw-r--r--jstests/core/coveredIndex1.js134
-rw-r--r--jstests/core/coveredIndex3.js6
-rw-r--r--jstests/core/covered_index_sort_no_fetch_optimization.js454
-rw-r--r--jstests/core/covered_multikey.js174
-rw-r--r--jstests/core/create_collection.js315
-rw-r--r--jstests/core/create_index_same_spec_different_name.js15
-rw-r--r--jstests/core/create_indexes.js334
-rw-r--r--jstests/core/create_indexes_with_unknown_field_names.js60
-rw-r--r--jstests/core/crud_api.js1447
-rw-r--r--jstests/core/currentop.js78
-rw-r--r--jstests/core/currentop_cursors.js443
-rw-r--r--jstests/core/currentop_predicate.js16
-rw-r--r--jstests/core/cursora.js79
-rw-r--r--jstests/core/datasize2.js46
-rw-r--r--jstests/core/dbadmin.js50
-rw-r--r--jstests/core/dbref4.js26
-rw-r--r--jstests/core/dbstats.js126
-rw-r--r--jstests/core/diagdata.js8
-rw-r--r--jstests/core/distinct1.js99
-rw-r--r--jstests/core/distinct4.js77
-rw-r--r--jstests/core/distinct_compound_index.js53
-rw-r--r--jstests/core/distinct_index1.js124
-rw-r--r--jstests/core/distinct_multikey.js166
-rw-r--r--jstests/core/distinct_multikey_dotted_path.js374
-rw-r--r--jstests/core/doc_validation.js515
-rw-r--r--jstests/core/doc_validation_invalid_validators.js99
-rw-r--r--jstests/core/doc_validation_options.js79
-rw-r--r--jstests/core/dotted_path_in_null.js32
-rw-r--r--jstests/core/drop_index.js134
-rw-r--r--jstests/core/dropdb.js4
-rw-r--r--jstests/core/elemMatchProjection.js478
-rw-r--r--jstests/core/elemmatch_or_pushdown.js138
-rw-r--r--jstests/core/ensure_sorted.js34
-rw-r--r--jstests/core/exhaust.js35
-rw-r--r--jstests/core/existsa.js212
-rw-r--r--jstests/core/explain_agg_write_concern.js102
-rw-r--r--jstests/core/explain_db_mismatch.js5
-rw-r--r--jstests/core/explain_delete.js102
-rw-r--r--jstests/core/explain_distinct.js152
-rw-r--r--jstests/core/explain_find_and_modify.js527
-rw-r--r--jstests/core/explain_missing_collection.js68
-rw-r--r--jstests/core/explain_missing_database.js68
-rw-r--r--jstests/core/explain_multi_plan.js106
-rw-r--r--jstests/core/explain_multikey.js124
-rw-r--r--jstests/core/explain_sample.js60
-rw-r--r--jstests/core/explain_uuid.js99
-rw-r--r--jstests/core/explain_writecmd_nonexistent_collection.js54
-rw-r--r--jstests/core/expr.js599
-rw-r--r--jstests/core/expr_index_use.js459
-rw-r--r--jstests/core/expr_or_pushdown.js32
-rw-r--r--jstests/core/expr_valid_positions.js28
-rw-r--r--jstests/core/failcommand_failpoint.js513
-rw-r--r--jstests/core/field_name_validation.js335
-rw-r--r--jstests/core/filemd5.js24
-rw-r--r--jstests/core/find4.js66
-rw-r--r--jstests/core/find5.js76
-rw-r--r--jstests/core/find_and_modify3.js6
-rw-r--r--jstests/core/find_and_modify4.js3
-rw-r--r--jstests/core/find_and_modify_concurrent_update.js59
-rw-r--r--jstests/core/find_and_modify_empty_coll.js24
-rw-r--r--jstests/core/find_and_modify_invalid_query_params.js171
-rw-r--r--jstests/core/find_and_modify_pipeline_update.js85
-rw-r--r--jstests/core/find_and_modify_server6226.js10
-rw-r--r--jstests/core/find_and_modify_server6865.js555
-rw-r--r--jstests/core/find_dedup.js74
-rw-r--r--jstests/core/find_getmore_bsonsize.js136
-rw-r--r--jstests/core/find_getmore_cmd.js143
-rw-r--r--jstests/core/find_projection_meta_errors.js28
-rw-r--r--jstests/core/fsync.js206
-rw-r--r--jstests/core/fts1.js30
-rw-r--r--jstests/core/fts_array.js88
-rw-r--r--jstests/core/fts_diacritic_and_caseinsensitive.js53
-rw-r--r--jstests/core/fts_diacritic_and_casesensitive.js105
-rw-r--r--jstests/core/fts_diacriticsensitive.js68
-rw-r--r--jstests/core/fts_dotted_prefix_fields.js18
-rw-r--r--jstests/core/fts_explain.js62
-rw-r--r--jstests/core/fts_index_version2.js51
-rw-r--r--jstests/core/fts_querylang.js139
-rw-r--r--jstests/core/fts_score_sort.js116
-rw-r--r--jstests/core/fts_spanish.js51
-rw-r--r--jstests/core/fts_trailing_fields.js30
-rw-r--r--jstests/core/function_string_representations.js63
-rw-r--r--jstests/core/geo3.js134
-rw-r--r--jstests/core/geo_2d_trailing_fields.js90
-rw-r--r--jstests/core/geo_2d_with_geojson_point.js4
-rw-r--r--jstests/core/geo_big_polygon2.js810
-rw-r--r--jstests/core/geo_big_polygon3.js117
-rw-r--r--jstests/core/geo_distinct.js215
-rw-r--r--jstests/core/geo_fiddly_box.js23
-rw-r--r--jstests/core/geo_mapreduce2.js1
-rw-r--r--jstests/core/geo_mindistance.js478
-rw-r--r--jstests/core/geo_mindistance_boundaries.js26
-rw-r--r--jstests/core/geo_nearwithin.js58
-rw-r--r--jstests/core/geo_operator_crs.js70
-rw-r--r--jstests/core/geo_or.js28
-rw-r--r--jstests/core/geo_polygon1.js8
-rw-r--r--jstests/core/geo_polygon1_noindex.js8
-rw-r--r--jstests/core/geo_polygon2.js4
-rw-r--r--jstests/core/geo_polygon3.js88
-rw-r--r--jstests/core/geo_s2disjoint_holes.js23
-rw-r--r--jstests/core/geo_s2dupe_points.js31
-rw-r--r--jstests/core/geo_s2index.js12
-rw-r--r--jstests/core/geo_s2indexversion1.js38
-rw-r--r--jstests/core/geo_s2meridian.js3
-rw-r--r--jstests/core/geo_s2multi.js49
-rw-r--r--jstests/core/geo_s2near.js257
-rw-r--r--jstests/core/geo_s2near_equator_opposite.js82
-rw-r--r--jstests/core/geo_s2nearwithin.js106
-rw-r--r--jstests/core/geo_s2ordering.js100
-rw-r--r--jstests/core/geo_s2polywithholes.js6
-rw-r--r--jstests/core/geo_s2sparse.js251
-rw-r--r--jstests/core/geo_s2twofields.js136
-rw-r--r--jstests/core/geo_s2within_line_polygon_sphere.js471
-rw-r--r--jstests/core/geo_update_btree.js9
-rw-r--r--jstests/core/geob.js72
-rw-r--r--jstests/core/geonear_cmd_input_validation.js3
-rw-r--r--jstests/core/geonear_key.js186
-rw-r--r--jstests/core/getlog2.js116
-rw-r--r--jstests/core/getmore_cmd_maxtimems.js84
-rw-r--r--jstests/core/getmore_invalidated_cursors.js189
-rw-r--r--jstests/core/getmore_invalidated_documents.js435
-rw-r--r--jstests/core/hash.js82
-rw-r--r--jstests/core/idhack.js170
-rw-r--r--jstests/core/index_bigkeys.js8
-rw-r--r--jstests/core/index_bigkeys_background.js8
-rw-r--r--jstests/core/index_bounds_code.js83
-rw-r--r--jstests/core/index_bounds_maxkey.js55
-rw-r--r--jstests/core/index_bounds_minkey.js55
-rw-r--r--jstests/core/index_bounds_object.js112
-rw-r--r--jstests/core/index_bounds_pipe.js215
-rw-r--r--jstests/core/index_bounds_timestamp.js275
-rw-r--r--jstests/core/index_check6.js3
-rw-r--r--jstests/core/index_create_with_nul_in_name.js14
-rw-r--r--jstests/core/index_decimal.js97
-rw-r--r--jstests/core/index_elemmatch1.js42
-rw-r--r--jstests/core/index_elemmatch2.js115
-rw-r--r--jstests/core/index_filter_catalog_independent.js118
-rw-r--r--jstests/core/index_filter_collation.js111
-rw-r--r--jstests/core/index_id_options.js107
-rw-r--r--jstests/core/index_multikey.js53
-rw-r--r--jstests/core/index_multiple_compatibility.js433
-rw-r--r--jstests/core/index_partial_2dsphere.js99
-rw-r--r--jstests/core/index_partial_create_drop.js116
-rw-r--r--jstests/core/index_partial_read_ops.js142
-rw-r--r--jstests/core/index_partial_validate.js22
-rw-r--r--jstests/core/index_partial_write_ops.js115
-rw-r--r--jstests/core/index_stats.js430
-rw-r--r--jstests/core/index_type_change.js40
-rw-r--r--jstests/core/indexes_multiple_commands.js300
-rw-r--r--jstests/core/insert_one.js46
-rw-r--r--jstests/core/invalid_collation_locale.js35
-rw-r--r--jstests/core/invalid_db_name.js18
-rw-r--r--jstests/core/js_jit.js44
-rw-r--r--jstests/core/json1.js2
-rw-r--r--jstests/core/json_schema/additional_items.js139
-rw-r--r--jstests/core/json_schema/additional_properties.js453
-rw-r--r--jstests/core/json_schema/bsontype.js578
-rw-r--r--jstests/core/json_schema/dependencies.js203
-rw-r--r--jstests/core/json_schema/encrypt.js100
-rw-r--r--jstests/core/json_schema/items.js114
-rw-r--r--jstests/core/json_schema/json_schema.js653
-rw-r--r--jstests/core/json_schema/logical_keywords.js482
-rw-r--r--jstests/core/json_schema/min_max_items.js60
-rw-r--r--jstests/core/json_schema/min_max_properties.js84
-rw-r--r--jstests/core/json_schema/misc_validation.js612
-rw-r--r--jstests/core/json_schema/pattern_properties.js152
-rw-r--r--jstests/core/json_schema/required.js30
-rw-r--r--jstests/core/json_schema/unique_items.js106
-rw-r--r--jstests/core/jssymbol.js44
-rw-r--r--jstests/core/kill_cursors.js115
-rw-r--r--jstests/core/killop_drop_collection.js106
-rw-r--r--jstests/core/list_all_local_sessions.js50
-rw-r--r--jstests/core/list_all_sessions.js94
-rw-r--r--jstests/core/list_collections1.js571
-rw-r--r--jstests/core/list_collections_filter.js186
-rw-r--r--jstests/core/list_collections_name_only.js55
-rw-r--r--jstests/core/list_collections_no_views.js252
-rw-r--r--jstests/core/list_commands.js36
-rw-r--r--jstests/core/list_databases.js145
-rw-r--r--jstests/core/list_indexes.js344
-rw-r--r--jstests/core/list_indexes_invalidation.js57
-rw-r--r--jstests/core/list_indexes_non_existent_ns.js22
-rw-r--r--jstests/core/list_local_sessions.js118
-rw-r--r--jstests/core/list_namespaces_invalidation.js127
-rw-r--r--jstests/core/list_sessions.js110
-rw-r--r--jstests/core/long_index_rename.js36
-rw-r--r--jstests/core/max_doc_size.js106
-rw-r--r--jstests/core/max_time_ms.js56
-rw-r--r--jstests/core/min_max_bounds.js112
-rw-r--r--jstests/core/min_max_hashed_index.js22
-rw-r--r--jstests/core/min_max_key.js166
-rw-r--r--jstests/core/minmax.js304
-rw-r--r--jstests/core/minmax_edge.js444
-rw-r--r--jstests/core/mr5.js92
-rw-r--r--jstests/core/mr_bigobject.js2
-rw-r--r--jstests/core/mr_bigobject_replace.js88
-rw-r--r--jstests/core/mr_killop.js20
-rw-r--r--jstests/core/mr_stored.js124
-rw-r--r--jstests/core/mr_tolerates_js_exception.js110
-rw-r--r--jstests/core/nan.js110
-rw-r--r--jstests/core/natural.js36
-rw-r--r--jstests/core/ne_array.js111
-rw-r--r--jstests/core/nestedarr1.js59
-rw-r--r--jstests/core/nestedobj1.js57
-rw-r--r--jstests/core/nin.js5
-rw-r--r--jstests/core/no_db_created.js54
-rw-r--r--jstests/core/not2.js142
-rw-r--r--jstests/core/ns_length.js149
-rw-r--r--jstests/core/null_query_semantics.js1120
-rw-r--r--jstests/core/numberlong.js12
-rw-r--r--jstests/core/numberlong3.js2
-rw-r--r--jstests/core/objid6.js12
-rw-r--r--jstests/core/opcounters_active.js52
-rw-r--r--jstests/core/operation_latency_histogram.js331
-rw-r--r--jstests/core/optime_cmp.js19
-rw-r--r--jstests/core/optimized_match_explain.js28
-rw-r--r--jstests/core/or1.js12
-rw-r--r--jstests/core/or4.js114
-rw-r--r--jstests/core/or5.js4
-rw-r--r--jstests/core/or_always_false.js21
-rw-r--r--jstests/core/or_inexact.js104
-rw-r--r--jstests/core/ord.js88
-rw-r--r--jstests/core/plan_cache_clear.js157
-rw-r--r--jstests/core/plan_cache_list_plans.js219
-rw-r--r--jstests/core/plan_cache_list_shapes.js134
-rw-r--r--jstests/core/profile1.js196
-rw-r--r--jstests/core/profile2.js8
-rw-r--r--jstests/core/profile_agg.js182
-rw-r--r--jstests/core/profile_count.js174
-rw-r--r--jstests/core/profile_delete.js192
-rw-r--r--jstests/core/profile_distinct.js108
-rw-r--r--jstests/core/profile_find.js356
-rw-r--r--jstests/core/profile_findandmodify.js359
-rw-r--r--jstests/core/profile_getmore.js286
-rw-r--r--jstests/core/profile_insert.js144
-rw-r--r--jstests/core/profile_list_collections.js52
-rw-r--r--jstests/core/profile_list_indexes.js79
-rw-r--r--jstests/core/profile_mapreduce.js186
-rw-r--r--jstests/core/profile_no_such_db.js55
-rw-r--r--jstests/core/profile_query_hash.js207
-rw-r--r--jstests/core/profile_repair_cursor.js51
-rw-r--r--jstests/core/profile_sampling.js92
-rw-r--r--jstests/core/profile_update.js230
-rw-r--r--jstests/core/projection_dotted_paths.js145
-rw-r--r--jstests/core/push2.js28
-rw-r--r--jstests/core/query_hash_stability.js101
-rw-r--r--jstests/core/queryoptimizer3.js84
-rw-r--r--jstests/core/read_after_optime.js14
-rw-r--r--jstests/core/record_store_count.js123
-rw-r--r--jstests/core/recursion.js41
-rw-r--r--jstests/core/regex.js164
-rw-r--r--jstests/core/regex5.js1
-rw-r--r--jstests/core/regex_error.js17
-rw-r--r--jstests/core/regex_limit.js32
-rw-r--r--jstests/core/regex_unicode.js227
-rw-r--r--jstests/core/regex_util.js42
-rw-r--r--jstests/core/regex_verbs.js94
-rw-r--r--jstests/core/remove2.js82
-rw-r--r--jstests/core/remove9.js46
-rw-r--r--jstests/core/remove_undefined.js41
-rw-r--r--jstests/core/removea.js42
-rw-r--r--jstests/core/removeb.js78
-rw-r--r--jstests/core/rename6.js50
-rw-r--r--jstests/core/rename_change_target_type.js16
-rw-r--r--jstests/core/restart_catalog.js247
-rw-r--r--jstests/core/return_key.js120
-rw-r--r--jstests/core/role_management_helpers.js226
-rw-r--r--jstests/core/rollback_index_drop.js41
-rw-r--r--jstests/core/server1470.js2
-rw-r--r--jstests/core/server14747.js19
-rw-r--r--jstests/core/server14753.js23
-rw-r--r--jstests/core/server22053.js28
-rw-r--r--jstests/core/server25192.js12
-rw-r--r--jstests/core/set_param1.js106
-rw-r--r--jstests/core/set_type_change.js26
-rw-r--r--jstests/core/shell_connection_strings.js49
-rw-r--r--jstests/core/single_batch.js28
-rw-r--r--jstests/core/sort1.js115
-rw-r--r--jstests/core/sort3.js16
-rw-r--r--jstests/core/sort4.js70
-rw-r--r--jstests/core/sort_array.js398
-rw-r--r--jstests/core/sorta.js44
-rw-r--r--jstests/core/sortc.js44
-rw-r--r--jstests/core/sorth.js499
-rw-r--r--jstests/core/sortl.js54
-rw-r--r--jstests/core/sparse_index_supports_ne_null.js375
-rw-r--r--jstests/core/startup_log.js168
-rw-r--r--jstests/core/tailable_cursor_invalidation.js119
-rw-r--r--jstests/core/tailable_getmore_batch_size.js156
-rw-r--r--jstests/core/tailable_skip_limit.js150
-rw-r--r--jstests/core/text_covered_matching.js318
-rw-r--r--jstests/core/text_index_limits.js57
-rw-r--r--jstests/core/throw_big.js17
-rw-r--r--jstests/core/top.js212
-rw-r--r--jstests/core/ts1.js77
-rw-r--r--jstests/core/ttl_index_options.js74
-rw-r--r--jstests/core/txns/abort_expired_transaction.js140
-rw-r--r--jstests/core/txns/abort_prepared_transaction.js106
-rw-r--r--jstests/core/txns/abort_transaction_thread_does_not_block_on_locks.js170
-rw-r--r--jstests/core/txns/abort_unprepared_transactions_on_FCV_downgrade.js84
-rw-r--r--jstests/core/txns/aggregation_in_transaction.js199
-rw-r--r--jstests/core/txns/await_prepared_transactions_on_FCV_downgrade.js100
-rw-r--r--jstests/core/txns/banned_txn_dbs.js50
-rw-r--r--jstests/core/txns/basic_causal_consistency.js38
-rw-r--r--jstests/core/txns/commands_banning_txnnumber_outside_transactions.js88
-rw-r--r--jstests/core/txns/commands_not_allowed_in_txn.js329
-rw-r--r--jstests/core/txns/commit_and_abort_large_prepared_transactions.js87
-rw-r--r--jstests/core/txns/commit_and_abort_large_unprepared_transactions.js81
-rw-r--r--jstests/core/txns/commit_prepared_transaction.js118
-rw-r--r--jstests/core/txns/commit_prepared_transaction_errors.js111
-rw-r--r--jstests/core/txns/concurrent_drops_and_creates.js116
-rw-r--r--jstests/core/txns/create_collection_not_blocked_by_txn.js34
-rw-r--r--jstests/core/txns/currentop_blocked_operations.js127
-rw-r--r--jstests/core/txns/dbstats_not_blocked_by_txn.js42
-rw-r--r--jstests/core/txns/default_read_concern.js82
-rw-r--r--jstests/core/txns/disallow_operations_on_prepared_transaction.js242
-rw-r--r--jstests/core/txns/do_txn_atomicity.js142
-rw-r--r--jstests/core/txns/do_txn_basic.js668
-rw-r--r--jstests/core/txns/downgrade_fcv_while_large_partial_txn_in_progress.js103
-rw-r--r--jstests/core/txns/drop_collection_not_blocked_by_txn.js32
-rw-r--r--jstests/core/txns/empty_commit_abort.js95
-rw-r--r--jstests/core/txns/empty_prepare.js94
-rw-r--r--jstests/core/txns/ensure_active_txn_for_prepare_transaction.js98
-rw-r--r--jstests/core/txns/errors_on_committed_transaction.js107
-rw-r--r--jstests/core/txns/find_and_modify_in_transaction.js216
-rw-r--r--jstests/core/txns/finished_transaction_error_handling.js247
-rw-r--r--jstests/core/txns/indexing_not_blocked_by_txn.js48
-rw-r--r--jstests/core/txns/kill_cursors_in_transaction.js118
-rw-r--r--jstests/core/txns/kill_op_on_txn_expiry.js174
-rw-r--r--jstests/core/txns/kill_sessions_kills_transaction.js122
-rw-r--r--jstests/core/txns/kill_transaction_cursors_after_commit.js52
-rw-r--r--jstests/core/txns/kill_txn_cursor.js119
-rw-r--r--jstests/core/txns/large_transactions_require_fcv42.js113
-rw-r--r--jstests/core/txns/libs/prepare_helpers.js3
-rw-r--r--jstests/core/txns/libs/write_conflicts.js5
-rw-r--r--jstests/core/txns/list_collections_not_blocked_by_txn.js62
-rw-r--r--jstests/core/txns/listcollections_autocomplete.js77
-rw-r--r--jstests/core/txns/many_txns.js175
-rw-r--r--jstests/core/txns/multi_delete_in_transaction.js83
-rw-r--r--jstests/core/txns/multi_statement_transaction.js221
-rw-r--r--jstests/core/txns/multi_statement_transaction_abort.js504
-rw-r--r--jstests/core/txns/multi_statement_transaction_command_args.js598
-rw-r--r--jstests/core/txns/multi_statement_transaction_using_api.js158
-rw-r--r--jstests/core/txns/multi_statement_transaction_write_error.js348
-rw-r--r--jstests/core/txns/multi_update_in_transaction.js175
-rw-r--r--jstests/core/txns/no_implicit_collection_creation_in_txn.js201
-rw-r--r--jstests/core/txns/no_new_transactions_when_prepared_transaction_in_progress.js95
-rw-r--r--jstests/core/txns/no_read_concern_snapshot_outside_txn.js108
-rw-r--r--jstests/core/txns/no_read_or_write_concern_inside_txn.js306
-rw-r--r--jstests/core/txns/no_reads_from_system_dot_views_in_txn.js57
-rw-r--r--jstests/core/txns/no_writes_to_config_transactions_with_prepared_transaction.js177
-rw-r--r--jstests/core/txns/no_writes_to_system_collections_in_txn.js101
-rw-r--r--jstests/core/txns/non_transactional_operations_on_session_with_transaction.js181
-rw-r--r--jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js101
-rw-r--r--jstests/core/txns/prepare_conflict.js184
-rw-r--r--jstests/core/txns/prepare_conflict_aggregation_behavior.js134
-rw-r--r--jstests/core/txns/prepare_nonexistent_transaction.js161
-rw-r--r--jstests/core/txns/prepare_prepared_transaction.js49
-rw-r--r--jstests/core/txns/prepare_requires_fcv42.js94
-rw-r--r--jstests/core/txns/prepare_transaction_fails_on_temp_collections.js41
-rw-r--r--jstests/core/txns/prepare_transaction_unique_index_conflict.js48
-rw-r--r--jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js113
-rw-r--r--jstests/core/txns/read_concerns.js104
-rw-r--r--jstests/core/txns/read_own_multikey_writes.js44
-rw-r--r--jstests/core/txns/rename_collection_not_blocked_by_txn.js37
-rw-r--r--jstests/core/txns/repeatable_reads_in_transaction.js98
-rw-r--r--jstests/core/txns/shell_prompt_in_transaction.js78
-rw-r--r--jstests/core/txns/speculative_snapshot_includes_all_writes.js214
-rw-r--r--jstests/core/txns/start_transaction_with_read.js69
-rw-r--r--jstests/core/txns/statement_ids_accepted.js423
-rw-r--r--jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js435
-rw-r--r--jstests/core/txns/transaction_error_handling.js228
-rw-r--r--jstests/core/txns/transaction_ops_against_capped_collection.js186
-rw-r--r--jstests/core/txns/transactions_block_ddl.js216
-rw-r--r--jstests/core/txns/transactions_profiling.js482
-rw-r--r--jstests/core/txns/transactions_profiling_with_drops.js200
-rw-r--r--jstests/core/txns/transactions_write_conflicts.js386
-rw-r--r--jstests/core/txns/transactions_write_conflicts_unique_indexes.js243
-rw-r--r--jstests/core/txns/upconvert_read_concern.js133
-rw-r--r--jstests/core/txns/view_reads_in_transaction.js110
-rw-r--r--jstests/core/txns/write_conflicts_with_non_txns.js253
-rw-r--r--jstests/core/type4.js60
-rw-r--r--jstests/core/type5.js35
-rw-r--r--jstests/core/type6.js24
-rw-r--r--jstests/core/type7.js66
-rw-r--r--jstests/core/type8.js28
-rw-r--r--jstests/core/type_array.js109
-rw-r--r--jstests/core/uniqueness.js118
-rw-r--r--jstests/core/update_affects_indexes.js148
-rw-r--r--jstests/core/update_arrayFilters.js1299
-rw-r--r--jstests/core/update_array_offset_positional.js100
-rw-r--r--jstests/core/update_blank1.js4
-rw-r--r--jstests/core/update_hint.js154
-rw-r--r--jstests/core/update_min_max_examples.js121
-rw-r--r--jstests/core/update_modifier_pop.js217
-rw-r--r--jstests/core/update_multi5.js21
-rw-r--r--jstests/core/update_numeric_field_name.js36
-rw-r--r--jstests/core/update_pipeline_shell_helpers.js142
-rw-r--r--jstests/core/update_with_pipeline.js417
-rw-r--r--jstests/core/views/duplicate_ns.js38
-rw-r--r--jstests/core/views/invalid_system_views.js222
-rw-r--r--jstests/core/views/view_with_invalid_dbname.js38
-rw-r--r--jstests/core/views/views_aggregation.js454
-rw-r--r--jstests/core/views/views_all_commands.js1071
-rw-r--r--jstests/core/views/views_basic.js99
-rw-r--r--jstests/core/views/views_change.js182
-rw-r--r--jstests/core/views/views_coll_stats.js134
-rw-r--r--jstests/core/views/views_collation.js945
-rw-r--r--jstests/core/views/views_count.js133
-rw-r--r--jstests/core/views/views_creation.js214
-rw-r--r--jstests/core/views/views_distinct.js276
-rw-r--r--jstests/core/views/views_drop.js43
-rw-r--r--jstests/core/views/views_find.js213
-rw-r--r--jstests/core/views/views_rename.js34
-rw-r--r--jstests/core/views/views_stats.js90
-rw-r--r--jstests/core/views/views_validation.js235
-rw-r--r--jstests/core/where_tolerates_js_exception.js38
-rw-r--r--jstests/core/wildcard_and_text_indexes.js127
-rw-r--r--jstests/core/wildcard_index_basic_index_bounds.js420
-rw-r--r--jstests/core/wildcard_index_cached_plans.js268
-rw-r--r--jstests/core/wildcard_index_collation.js194
-rw-r--r--jstests/core/wildcard_index_count.js148
-rw-r--r--jstests/core/wildcard_index_covered_queries.js144
-rw-r--r--jstests/core/wildcard_index_dedup.js28
-rw-r--r--jstests/core/wildcard_index_distinct_scan.js366
-rw-r--r--jstests/core/wildcard_index_empty_arrays.js54
-rw-r--r--jstests/core/wildcard_index_equality_to_empty_obj.js114
-rw-r--r--jstests/core/wildcard_index_filter.js145
-rw-r--r--jstests/core/wildcard_index_hint.js192
-rw-r--r--jstests/core/wildcard_index_minmax.js112
-rw-r--r--jstests/core/wildcard_index_multikey.js457
-rw-r--r--jstests/core/wildcard_index_nonblocking_sort.js138
-rw-r--r--jstests/core/wildcard_index_partial_index.js66
-rw-r--r--jstests/core/wildcard_index_return_key.js73
-rw-r--r--jstests/core/wildcard_index_type.js267
-rw-r--r--jstests/core/wildcard_index_validindex.js274
-rw-r--r--jstests/core/write_commands_reject_unknown_fields.js24
-rw-r--r--jstests/core_standalone/read_concern.js84
-rw-r--r--jstests/core_standalone/write_concern.js20
-rw-r--r--jstests/decimal/decimal128_test1.js333
-rw-r--r--jstests/decimal/decimal128_test2.js609
-rw-r--r--jstests/decimal/decimal128_test3.js1239
-rw-r--r--jstests/decimal/decimal128_test4.js262
-rw-r--r--jstests/decimal/decimal128_test5.js631
-rw-r--r--jstests/decimal/decimal128_test6.js82
-rw-r--r--jstests/decimal/decimal128_test7.js788
-rw-r--r--jstests/decimal/decimal_constructors.js64
-rw-r--r--jstests/decimal/decimal_find_basic.js101
-rw-r--r--jstests/decimal/decimal_find_mixed.js114
-rw-r--r--jstests/decimal/decimal_find_query.js78
-rw-r--r--jstests/decimal/decimal_roundtrip_basic.js88
-rw-r--r--jstests/decimal/decimal_update.js60
-rw-r--r--jstests/disk/repair_does_not_invalidate_config_on_standalone.js44
-rw-r--r--jstests/disk/repair_failure_is_recoverable.js74
-rw-r--r--jstests/disk/repair_invalidates_replica_set_config.js194
-rw-r--r--jstests/disk/wt_corrupt_file_errors.js116
-rw-r--r--jstests/disk/wt_missing_file_errors.js116
-rw-r--r--jstests/disk/wt_repair_corrupt_files.js152
-rw-r--r--jstests/disk/wt_repair_corrupt_metadata.js184
-rw-r--r--jstests/disk/wt_repair_missing_files.js186
-rw-r--r--jstests/disk/wt_repair_orphaned_idents.js140
-rw-r--r--jstests/fail_point/fail_point.js170
-rw-r--r--jstests/fail_point/set_failpoint_through_set_parameter.js266
-rw-r--r--jstests/free_mon/free_mon_announce.js48
-rw-r--r--jstests/free_mon/free_mon_disable.js36
-rw-r--r--jstests/free_mon/free_mon_http_down.js32
-rw-r--r--jstests/free_mon/free_mon_http_validate.js38
-rw-r--r--jstests/free_mon/free_mon_metrics_halt.js34
-rw-r--r--jstests/free_mon/free_mon_metrics_perm_del.js34
-rw-r--r--jstests/free_mon/free_mon_register.js64
-rw-r--r--jstests/free_mon/free_mon_register_cmd.js92
-rw-r--r--jstests/free_mon/free_mon_register_off.js44
-rw-r--r--jstests/free_mon/free_mon_register_resend.js28
-rw-r--r--jstests/free_mon/free_mon_rs_corrupt.js42
-rw-r--r--jstests/free_mon/free_mon_rs_delete.js74
-rw-r--r--jstests/free_mon/free_mon_rs_halt.js76
-rw-r--r--jstests/free_mon/free_mon_rs_off.js42
-rw-r--r--jstests/free_mon/free_mon_rs_perm_del.js66
-rw-r--r--jstests/free_mon/free_mon_rs_register.js124
-rw-r--r--jstests/free_mon/free_mon_rs_resend.js91
-rw-r--r--jstests/free_mon/free_mon_server_status.js84
-rw-r--r--jstests/free_mon/libs/free_mon.js10
-rw-r--r--jstests/gle/create_index_gle.js83
-rw-r--r--jstests/gle/gle_sharded_wc.js268
-rw-r--r--jstests/gle/gle_sharded_write.js334
-rw-r--r--jstests/gle/updated_existing.js6
-rw-r--r--jstests/hooks/drop_sharded_collections.js42
-rw-r--r--jstests/hooks/run_check_repl_dbhash.js155
-rw-r--r--jstests/hooks/run_check_repl_dbhash_background.js810
-rw-r--r--jstests/hooks/run_check_repl_oplogs.js64
-rw-r--r--jstests/hooks/run_initial_sync_node_validation.js73
-rw-r--r--jstests/hooks/run_validate_collections.js67
-rw-r--r--jstests/hooks/validate_collections.js4
-rw-r--r--jstests/httpinterface/sharding_configdb_on_default_ports.js26
-rw-r--r--jstests/libs/change_stream_util.js1
-rw-r--r--jstests/libs/check_log.js234
-rw-r--r--jstests/libs/csrs_upgrade_util.js11
-rw-r--r--jstests/libs/dateutil.js1
-rw-r--r--jstests/libs/feature_compatibility_version.js3
-rw-r--r--jstests/libs/fsm_serial_client.js6
-rw-r--r--jstests/libs/geo_near_random.js3
-rw-r--r--jstests/libs/get_index_helpers.js11
-rw-r--r--jstests/libs/json_schema_test_runner.js74
-rw-r--r--jstests/libs/jstestfuzz/check_for_interrupt_hook.js64
-rw-r--r--jstests/libs/kill_sessions.js257
-rw-r--r--jstests/libs/mongoebench.js1
-rw-r--r--jstests/libs/mql_model_mongod_test_runner.js80
-rw-r--r--jstests/libs/override_methods/causally_consistent_index_builds.js72
-rw-r--r--jstests/libs/override_methods/check_for_operation_not_supported_in_transaction.js50
-rw-r--r--jstests/libs/override_methods/check_uuids_consistent_across_cluster.js2
-rw-r--r--jstests/libs/override_methods/continuous_stepdown.js637
-rw-r--r--jstests/libs/override_methods/detect_spawning_own_mongod.js58
-rw-r--r--jstests/libs/override_methods/enable_causal_consistency.js14
-rw-r--r--jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js10
-rw-r--r--jstests/libs/override_methods/enable_sessions.js98
-rw-r--r--jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js75
-rw-r--r--jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js10
-rw-r--r--jstests/libs/override_methods/find_batch_size.js16
-rw-r--r--jstests/libs/override_methods/implicit_whole_cluster_changestreams.js8
-rw-r--r--jstests/libs/override_methods/implicit_whole_db_changestreams.js87
-rw-r--r--jstests/libs/override_methods/implicitly_retry_on_background_op_in_progress.js233
-rw-r--r--jstests/libs/override_methods/implicitly_retry_on_database_drop_pending.js321
-rw-r--r--jstests/libs/override_methods/implicitly_shard_accessed_collections.js315
-rw-r--r--jstests/libs/override_methods/implicitly_wrap_pipelines_in_facets.js107
-rw-r--r--jstests/libs/override_methods/mongos_manual_intervention_actions.js103
-rw-r--r--jstests/libs/override_methods/network_error_and_txn_override.js1904
-rw-r--r--jstests/libs/override_methods/retry_writes_at_least_once.js77
-rw-r--r--jstests/libs/override_methods/set_read_and_write_concerns.js242
-rw-r--r--jstests/libs/override_methods/set_read_preference_secondary.js287
-rw-r--r--jstests/libs/override_methods/sharding_continuous_config_stepdown.js57
-rw-r--r--jstests/libs/override_methods/txn_passthrough_cmd_massage.js106
-rw-r--r--jstests/libs/override_methods/validate_collections_on_shutdown.js190
-rw-r--r--jstests/libs/test_background_ops.js26
-rw-r--r--jstests/libs/transactions_util.js2
-rw-r--r--jstests/libs/txns/txn_passthrough_runner.js18
-rw-r--r--jstests/libs/txns/txn_passthrough_runner_selftest.js41
-rw-r--r--jstests/multiVersion/2_test_launching_cluster.js90
-rw-r--r--jstests/multiVersion/add_invalid_shard.js65
-rw-r--r--jstests/multiVersion/change_streams_feature_compatibility_version.js182
-rw-r--r--jstests/multiVersion/change_streams_high_water_mark_cluster.js465
-rw-r--r--jstests/multiVersion/change_streams_high_water_mark_replset.js200
-rw-r--r--jstests/multiVersion/change_streams_resume_token_version.js245
-rw-r--r--jstests/multiVersion/clone_helper.js101
-rw-r--r--jstests/multiVersion/collection_autoIndexId_false.js165
-rw-r--r--jstests/multiVersion/collection_validator_feature_compatibility_version.js382
-rw-r--r--jstests/multiVersion/config_transactions_set_fcv.js761
-rw-r--r--jstests/multiVersion/copydb_helper.js93
-rw-r--r--jstests/multiVersion/downgrade_after_rollback_via_refetch.js92
-rw-r--r--jstests/multiVersion/drop_collection_downgrade_path.js96
-rw-r--r--jstests/multiVersion/drop_collection_upgrade_path.js98
-rw-r--r--jstests/multiVersion/failIndexKeyTooLong_FCV40.js90
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js87
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js586
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/feature_compatibility_version_lagging_secondary.js73
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js546
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/migration_between_mixed_FCV_mixed_version_mongods.js67
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary.js18
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary_drop_target.js20
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary.js18
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary_drop_target.js20
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/repair_feature_compatibility_version.js139
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/rollback_last_stable_to_latest.js8
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_last_stable.js8
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/setFCV_collmod_transaction_rollback.js66
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js747
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_while_creating_collection.js113
-rw-r--r--jstests/multiVersion/hybrid_indexes.js176
-rw-r--r--jstests/multiVersion/index_bigkeys.js159
-rw-r--r--jstests/multiVersion/index_bigkeys_feature_tracker.js311
-rw-r--r--jstests/multiVersion/index_bigkeys_mixed_version_replset.js72
-rw-r--r--jstests/multiVersion/index_bigkeys_secondary_downgrade_during_index_build_background.js102
-rw-r--r--jstests/multiVersion/initialize_from_old_node.js36
-rw-r--r--jstests/multiVersion/json_schema_encrypt_fcv.js349
-rw-r--r--jstests/multiVersion/libs/data_generators.js2
-rw-r--r--jstests/multiVersion/libs/dumprestore_helpers.js7
-rw-r--r--jstests/multiVersion/libs/global_snapshot_reads_helpers.js18
-rw-r--r--jstests/multiVersion/libs/initial_sync.js1
-rw-r--r--jstests/multiVersion/libs/multi_cluster.js1
-rw-r--r--jstests/multiVersion/libs/multi_rs.js3
-rw-r--r--jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js14
-rw-r--r--jstests/multiVersion/libs/verify_collection_data.js2
-rw-r--r--jstests/multiVersion/libs/verify_versions.js59
-rw-r--r--jstests/multiVersion/long_index_mixed_version_replset.js64
-rw-r--r--jstests/multiVersion/migration_between_mixed_version_mongods.js192
-rw-r--r--jstests/multiVersion/minor_version_tags_new_old_new.js24
-rw-r--r--jstests/multiVersion/minor_version_tags_old_new_old.js24
-rw-r--r--jstests/multiVersion/mixed_version_transactions_during_rollback_via_refetch.js99
-rw-r--r--jstests/multiVersion/mixed_version_unprepared_transactions.js117
-rw-r--r--jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js69
-rw-r--r--jstests/multiVersion/now_variable_fcv.js54
-rw-r--r--jstests/multiVersion/remove_feature_compatibility_version.js21
-rw-r--r--jstests/multiVersion/shard_collection_between_mixed_version_mongods.js106
-rw-r--r--jstests/multiVersion/sharded_txn_downgrade_cluster.js126
-rw-r--r--jstests/multiVersion/sharded_txn_upgrade_cluster.js101
-rw-r--r--jstests/multiVersion/skip_level_upgrade.js121
-rw-r--r--jstests/multiVersion/text_index_limits.js76
-rw-r--r--jstests/multiVersion/unique_index_empty_collmod.js62
-rw-r--r--jstests/multiVersion/update_shard_key_disallowed_fcv40.js444
-rw-r--r--jstests/multiVersion/upgrade_downgrade_cluster.js306
-rw-r--r--jstests/multiVersion/verify_versions_test.js154
-rw-r--r--jstests/multiVersion/view_definition_feature_compatibility_version.js291
-rw-r--r--jstests/multiVersion/wildcard_index_feature_compatability_version.js284
-rw-r--r--jstests/noPassthrough/abandon_snapshot_for_each_collection_from_db.js53
-rw-r--r--jstests/noPassthrough/absent_ns_field_in_index_specs.js93
-rw-r--r--jstests/noPassthrough/afterClusterTime_committed_reads.js113
-rw-r--r--jstests/noPassthrough/after_cluster_time.js110
-rw-r--r--jstests/noPassthrough/agg_explain_read_concern.js108
-rw-r--r--jstests/noPassthrough/aggregation_cursor_invalidations.js592
-rw-r--r--jstests/noPassthrough/aggregation_log_namespace.js97
-rw-r--r--jstests/noPassthrough/aggregation_zero_batchsize.js134
-rw-r--r--jstests/noPassthrough/apply_ops_DDL_operation_does_not_take_global_X.js131
-rw-r--r--jstests/noPassthrough/apply_ops_mode.js166
-rw-r--r--jstests/noPassthrough/apply_ops_overwrite_admin_system_version.js70
-rw-r--r--jstests/noPassthrough/atomic_rename_collection.js82
-rw-r--r--jstests/noPassthrough/auth_reject_mismatching_logical_times.js109
-rw-r--r--jstests/noPassthrough/auto_retry_on_network_error.js213
-rw-r--r--jstests/noPassthrough/backup_restore_fsync_lock.js8
-rw-r--r--jstests/noPassthrough/backup_restore_rolling.js40
-rw-r--r--jstests/noPassthrough/backup_restore_stop_start.js4
-rw-r--r--jstests/noPassthrough/bind_all_ipv6.js10
-rw-r--r--jstests/noPassthrough/bind_ip_all.js32
-rw-r--r--jstests/noPassthrough/bind_localhost.js20
-rw-r--r--jstests/noPassthrough/block_compressor_options.js60
-rw-r--r--jstests/noPassthrough/change_stream_concurrent_implicit_db_create.js76
-rw-r--r--jstests/noPassthrough/change_stream_failover.js161
-rw-r--r--jstests/noPassthrough/change_stream_resume_before_add_shard.js212
-rw-r--r--jstests/noPassthrough/change_stream_sharded_startafter_invalidate.js56
-rw-r--r--jstests/noPassthrough/change_stream_transaction.js511
-rw-r--r--jstests/noPassthrough/change_streams_collation_chunk_migration.js95
-rw-r--r--jstests/noPassthrough/change_streams_require_majority_read_concern.js157
-rw-r--r--jstests/noPassthrough/change_streams_required_privileges.js602
-rw-r--r--jstests/noPassthrough/change_streams_resume_at_same_clustertime.js95
-rw-r--r--jstests/noPassthrough/change_streams_resume_same_clustertime_different_uuid.js148
-rw-r--r--jstests/noPassthrough/change_streams_resume_token_applyops_overlap.js150
-rw-r--r--jstests/noPassthrough/change_streams_shell_helper_resume_token.js150
-rw-r--r--jstests/noPassthrough/change_streams_update_lookup_collation.js189
-rw-r--r--jstests/noPassthrough/characterize_index_builds_on_restart.js412
-rw-r--r--jstests/noPassthrough/child_op_numyields.js188
-rw-r--r--jstests/noPassthrough/client_metadata_log.js90
-rw-r--r--jstests/noPassthrough/client_metadata_slowlog.js46
-rw-r--r--jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js64
-rw-r--r--jstests/noPassthrough/coll_mod_apply_ops.js68
-rw-r--r--jstests/noPassthrough/collation_clone_collection.js120
-rw-r--r--jstests/noPassthrough/commands_handle_kill.js373
-rw-r--r--jstests/noPassthrough/commands_preserve_exec_error_code.js73
-rw-r--r--jstests/noPassthrough/commit_quorum.js176
-rw-r--r--jstests/noPassthrough/compression_options.js77
-rw-r--r--jstests/noPassthrough/configExpand_exec_digest.js109
-rw-r--r--jstests/noPassthrough/configExpand_exec_noexpand.js36
-rw-r--r--jstests/noPassthrough/configExpand_exec_permissions.js40
-rw-r--r--jstests/noPassthrough/configExpand_exec_timeeout.js46
-rw-r--r--jstests/noPassthrough/configExpand_exec_values.js43
-rw-r--r--jstests/noPassthrough/configExpand_exec_wholeconfig.js16
-rw-r--r--jstests/noPassthrough/configExpand_rest_noexpand.js48
-rw-r--r--jstests/noPassthrough/configExpand_rest_permissions.js44
-rw-r--r--jstests/noPassthrough/configExpand_rest_timeout.js52
-rw-r--r--jstests/noPassthrough/configExpand_rest_values.js59
-rw-r--r--jstests/noPassthrough/configExpand_rest_wholeconfig.js26
-rw-r--r--jstests/noPassthrough/count_helper_read_preference.js73
-rw-r--r--jstests/noPassthrough/create_view_does_not_take_database_X.js32
-rw-r--r--jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js106
-rw-r--r--jstests/noPassthrough/crud_timestamps.js207
-rw-r--r--jstests/noPassthrough/currentop_active_cursor.js200
-rw-r--r--jstests/noPassthrough/currentop_active_transaction.js355
-rw-r--r--jstests/noPassthrough/currentop_inactive_transaction_includes_last_client_info.js100
-rw-r--r--jstests/noPassthrough/currentop_includes_await_time.js92
-rw-r--r--jstests/noPassthrough/currentop_query.js1112
-rw-r--r--jstests/noPassthrough/currentop_transaction_metrics.js102
-rw-r--r--jstests/noPassthrough/cycle_detection_test.js170
-rw-r--r--jstests/noPassthrough/data_consistency_checks.js355
-rw-r--r--jstests/noPassthrough/dbhash_capped_collection.js96
-rw-r--r--jstests/noPassthrough/devnull.js14
-rw-r--r--jstests/noPassthrough/directoryperdb.js52
-rw-r--r--jstests/noPassthrough/disable_majority_reads_restart.js118
-rw-r--r--jstests/noPassthrough/disabled_test_parameters.js60
-rw-r--r--jstests/noPassthrough/do_not_drop_coll_after_succesful_out.js36
-rw-r--r--jstests/noPassthrough/do_not_rebuild_indexes_before_repair.js91
-rw-r--r--jstests/noPassthrough/document_count_functions.js78
-rw-r--r--jstests/noPassthrough/drop_connections_replSet.js94
-rw-r--r--jstests/noPassthrough/drop_connections_sharded.js90
-rw-r--r--jstests/noPassthrough/drop_view_does_not_take_database_X.js36
-rw-r--r--jstests/noPassthrough/dropcollection_duplicate_fields.js33
-rw-r--r--jstests/noPassthrough/dropdatabase_respect_maxtimems.js92
-rw-r--r--jstests/noPassthrough/durable_view_catalog.js145
-rw-r--r--jstests/noPassthrough/end_sessions_command.js173
-rw-r--r--jstests/noPassthrough/exchange_in_session.js126
-rw-r--r--jstests/noPassthrough/exhaust_option_disallowed_in_session.js38
-rw-r--r--jstests/noPassthrough/exit_logging.js185
-rw-r--r--jstests/noPassthrough/failcommand_failpoint_not_parallel.js37
-rw-r--r--jstests/noPassthrough/feature_compatibility_version.js78
-rw-r--r--jstests/noPassthrough/filemd5_kill_during_yield.js86
-rw-r--r--jstests/noPassthrough/find_by_uuid_and_rename.js107
-rw-r--r--jstests/noPassthrough/flow_control_logging.js78
-rw-r--r--jstests/noPassthrough/flow_control_replica_set.js86
-rw-r--r--jstests/noPassthrough/ftdc_connection_pool.js42
-rw-r--r--jstests/noPassthrough/ftdc_setdirectory.js218
-rw-r--r--jstests/noPassthrough/ftdc_setparam.js24
-rw-r--r--jstests/noPassthrough/geo_full.js966
-rw-r--r--jstests/noPassthrough/geo_mnypts_plus_fields.js156
-rw-r--r--jstests/noPassthrough/geo_near_random1.js26
-rw-r--r--jstests/noPassthrough/geo_near_random2.js43
-rw-r--r--jstests/noPassthrough/global_operation_latency_histogram.js325
-rw-r--r--jstests/noPassthrough/global_transaction_latency_histogram.js221
-rw-r--r--jstests/noPassthrough/hostname_bind_ips.js28
-rw-r--r--jstests/noPassthrough/http_client_keep_alive.js108
-rw-r--r--jstests/noPassthrough/hybrid_geo_index_remove_invalid_doc.js98
-rw-r--r--jstests/noPassthrough/hybrid_geo_index_update_invalid_doc.js92
-rw-r--r--jstests/noPassthrough/hybrid_index_with_updates.js202
-rw-r--r--jstests/noPassthrough/hybrid_partial_geo_index.js100
-rw-r--r--jstests/noPassthrough/hybrid_partial_index_update.js78
-rw-r--r--jstests/noPassthrough/hybrid_sparse_compound_geo_index.js66
-rw-r--r--jstests/noPassthrough/hybrid_unique_index_with_updates.js303
-rw-r--r--jstests/noPassthrough/hyphenated_database_name.js28
-rw-r--r--jstests/noPassthrough/ignore_notablescan.js106
-rw-r--r--jstests/noPassthrough/implicit_sessions.js425
-rw-r--r--jstests/noPassthrough/index_builds_ignore_prepare_conflicts.js197
-rw-r--r--jstests/noPassthrough/index_killop_standalone.js64
-rw-r--r--jstests/noPassthrough/index_partial_no_explain_cmds.js106
-rw-r--r--jstests/noPassthrough/index_version_autoupgrade.js251
-rw-r--r--jstests/noPassthrough/index_version_v2.js227
-rw-r--r--jstests/noPassthrough/indexbg1.js238
-rw-r--r--jstests/noPassthrough/indexbg2.js260
-rw-r--r--jstests/noPassthrough/indexbg_drop.js156
-rw-r--r--jstests/noPassthrough/indexbg_killop_apply_ops.js105
-rw-r--r--jstests/noPassthrough/indexbg_killop_primary.js83
-rw-r--r--jstests/noPassthrough/indexbg_killop_secondary.js84
-rw-r--r--jstests/noPassthrough/indexbg_shutdown.js182
-rw-r--r--jstests/noPassthrough/initial_sync_wt_cache_full.js107
-rw-r--r--jstests/noPassthrough/inmem_config_str.js22
-rw-r--r--jstests/noPassthrough/inmem_full.js140
-rw-r--r--jstests/noPassthrough/internal_validate_features_as_master.js44
-rw-r--r--jstests/noPassthrough/jsHeapLimit.js38
-rw-r--r--jstests/noPassthrough/js_exceptions.js208
-rw-r--r--jstests/noPassthrough/js_protection.js143
-rw-r--r--jstests/noPassthrough/js_protection_roundtrip.js78
-rw-r--r--jstests/noPassthrough/json_schema_ignore_unknown_keywords.js105
-rw-r--r--jstests/noPassthrough/kill_pinned_cursor.js179
-rw-r--r--jstests/noPassthrough/kill_sessions.js14
-rw-r--r--jstests/noPassthrough/killop.js135
-rw-r--r--jstests/noPassthrough/latency_includes_lock_acquisition_time.js259
-rw-r--r--jstests/noPassthrough/launcher_test.js43
-rw-r--r--jstests/noPassthrough/libs/backup_restore.js4
-rw-r--r--jstests/noPassthrough/libs/configExpand/lib.js4
-rw-r--r--jstests/noPassthrough/libs/index_build.js22
-rw-r--r--jstests/noPassthrough/list_databases_and_rename_collection.js100
-rw-r--r--jstests/noPassthrough/list_indexes_ready_and_in_progress.js50
-rw-r--r--jstests/noPassthrough/list_indexes_with_build_uuids.js120
-rw-r--r--jstests/noPassthrough/lock_file.js38
-rw-r--r--jstests/noPassthrough/lock_file_fail_to_open.js34
-rw-r--r--jstests/noPassthrough/lock_stats.js106
-rw-r--r--jstests/noPassthrough/lock_stats_suboperation_curop.js102
-rw-r--r--jstests/noPassthrough/lock_stats_suboperation_logs.js134
-rw-r--r--jstests/noPassthrough/log_and_profile_query_hash.js288
-rw-r--r--jstests/noPassthrough/log_find_getmore.js244
-rw-r--r--jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js873
-rw-r--r--jstests/noPassthrough/logical_session_cache_find_getmore.js40
-rw-r--r--jstests/noPassthrough/logical_session_cursor_checks.js170
-rw-r--r--jstests/noPassthrough/loglong.js70
-rw-r--r--jstests/noPassthrough/lookup_max_intermediate_size.js167
-rw-r--r--jstests/noPassthrough/low_js_heap_limit.js20
-rw-r--r--jstests/noPassthrough/match_expression_optimization_failpoint.js54
-rw-r--r--jstests/noPassthrough/maxTransactionLockRequestTimeoutMillis_serverParameter.js24
-rw-r--r--jstests/noPassthrough/max_acceptable_logical_clock_drift_secs_parameter.js99
-rw-r--r--jstests/noPassthrough/max_bson_depth_parameter.js54
-rw-r--r--jstests/noPassthrough/max_conns_override.js76
-rw-r--r--jstests/noPassthrough/max_time_ms_repl_targeting.js98
-rw-r--r--jstests/noPassthrough/member_id_too_large.js48
-rw-r--r--jstests/noPassthrough/merge_max_time_ms.js483
-rw-r--r--jstests/noPassthrough/minvalid2.js4
-rw-r--r--jstests/noPassthrough/mongoebench_test.js92
-rw-r--r--jstests/noPassthrough/mongos_exhausts_stale_config_retries.js113
-rw-r--r--jstests/noPassthrough/nested_tojson.js49
-rw-r--r--jstests/noPassthrough/non_atomic_apply_ops_logging.js136
-rw-r--r--jstests/noPassthrough/noncapped_oplog_creation.js48
-rw-r--r--jstests/noPassthrough/ns1.js78
-rw-r--r--jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js230
-rw-r--r--jstests/noPassthrough/out_majority_read_replset.js76
-rw-r--r--jstests/noPassthrough/out_max_time_ms.js223
-rw-r--r--jstests/noPassthrough/out_merge_majority_read.js344
-rw-r--r--jstests/noPassthrough/parse_zone_info.js28
-rw-r--r--jstests/noPassthrough/partial_unique_indexes.js82
-rw-r--r--jstests/noPassthrough/pipeline_optimization_failpoint.js70
-rw-r--r--jstests/noPassthrough/plan_cache_index_create.js285
-rw-r--r--jstests/noPassthrough/plan_cache_list_plans_new_format.js90
-rw-r--r--jstests/noPassthrough/plan_cache_stats_agg_source.js334
-rw-r--r--jstests/noPassthrough/port_options.js96
-rw-r--r--jstests/noPassthrough/predictive_connpool.js254
-rw-r--r--jstests/noPassthrough/profile_agg_multiple_batches.js40
-rw-r--r--jstests/noPassthrough/profile_interrupted_op.js111
-rw-r--r--jstests/noPassthrough/query_knobs_validation.js313
-rw-r--r--jstests/noPassthrough/query_yield1.js148
-rw-r--r--jstests/noPassthrough/query_yield2.js260
-rw-r--r--jstests/noPassthrough/query_yield_reset_timer.js80
-rw-r--r--jstests/noPassthrough/queryable_backup_mode_incompatible_options.js76
-rw-r--r--jstests/noPassthrough/readConcern_atClusterTime.js265
-rw-r--r--jstests/noPassthrough/readConcern_atClusterTime_noop_write.js195
-rw-r--r--jstests/noPassthrough/readConcern_atClusterTime_snapshot_selection.js146
-rw-r--r--jstests/noPassthrough/readConcern_snapshot.js248
-rw-r--r--jstests/noPassthrough/readConcern_snapshot_mongos.js253
-rw-r--r--jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js83
-rw-r--r--jstests/noPassthrough/read_concern_helper.js42
-rw-r--r--jstests/noPassthrough/read_concern_snapshot_aggregation.js385
-rw-r--r--jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js181
-rw-r--r--jstests/noPassthrough/read_concern_snapshot_yielding.js613
-rw-r--r--jstests/noPassthrough/read_majority.js392
-rw-r--r--jstests/noPassthrough/read_majority_reads.js443
-rw-r--r--jstests/noPassthrough/rebuild_multiple_indexes_at_startup.js77
-rw-r--r--jstests/noPassthrough/recovery_wt_cache_full.js149
-rw-r--r--jstests/noPassthrough/refresh_logical_session_cache_now.js69
-rw-r--r--jstests/noPassthrough/refresh_sessions_command.js185
-rw-r--r--jstests/noPassthrough/reindex_crash_rebuilds_id_index.js83
-rw-r--r--jstests/noPassthrough/repair_flag_transport_layer.js24
-rw-r--r--jstests/noPassthrough/repl_set_resize_oplog.js58
-rw-r--r--jstests/noPassthrough/repl_write_threads_start_param.js55
-rw-r--r--jstests/noPassthrough/replica_set_connection_error_codes.js145
-rw-r--r--jstests/noPassthrough/replica_set_connection_getmore.js80
-rw-r--r--jstests/noPassthrough/replica_set_connection_stepdown.js101
-rw-r--r--jstests/noPassthrough/report_post_batch_resume_token_mongod.js212
-rw-r--r--jstests/noPassthrough/restart_catalog_preserves_min_visible.js51
-rw-r--r--jstests/noPassthrough/restart_catalog_sharded_cluster.js414
-rw-r--r--jstests/noPassthrough/restart_node_with_bridge.js85
-rw-r--r--jstests/noPassthrough/retry_network_error_test.js67
-rw-r--r--jstests/noPassthrough/retryable_writes_standalone_api.js30
-rw-r--r--jstests/noPassthrough/rollback_wt_cache_full.js148
-rw-r--r--jstests/noPassthrough/rollback_wt_drop.js286
-rw-r--r--jstests/noPassthrough/router_transactions_metrics.js1081
-rw-r--r--jstests/noPassthrough/server_read_concern_metrics.js668
-rw-r--r--jstests/noPassthrough/server_transaction_metrics.js406
-rw-r--r--jstests/noPassthrough/server_transaction_metrics_for_prepared_transactions.js342
-rw-r--r--jstests/noPassthrough/server_transaction_metrics_kill_sessions.js157
-rw-r--r--jstests/noPassthrough/server_transaction_metrics_secondary.js121
-rw-r--r--jstests/noPassthrough/server_write_concern_metrics.js411
-rw-r--r--jstests/noPassthrough/session_w0.js23
-rw-r--r--jstests/noPassthrough/sessions_collection_auto_healing.js111
-rw-r--r--jstests/noPassthrough/set_step_params.js462
-rw-r--r--jstests/noPassthrough/setshellparameter.js26
-rw-r--r--jstests/noPassthrough/shard_fixture_selftest.js74
-rw-r--r--jstests/noPassthrough/shell_appname_uri.js124
-rw-r--r--jstests/noPassthrough/shell_can_retry_writes.js252
-rw-r--r--jstests/noPassthrough/shell_can_use_read_concern.js361
-rw-r--r--jstests/noPassthrough/shell_check_program_extension.js18
-rw-r--r--jstests/noPassthrough/shell_cmd_assertions.js655
-rw-r--r--jstests/noPassthrough/shell_disable_majority_reads.js48
-rw-r--r--jstests/noPassthrough/shell_gossip_cluster_time.js215
-rw-r--r--jstests/noPassthrough/shell_helper_use_database.js42
-rw-r--r--jstests/noPassthrough/shell_history.js171
-rw-r--r--jstests/noPassthrough/shell_interactive.js35
-rw-r--r--jstests/noPassthrough/shell_load_file.js60
-rw-r--r--jstests/noPassthrough/shell_mongobridge_port_allocation.js113
-rw-r--r--jstests/noPassthrough/shell_quit.js26
-rw-r--r--jstests/noPassthrough/shell_retry_writes_on_retryable_errors.js216
-rw-r--r--jstests/noPassthrough/shell_retry_writes_uri.js252
-rw-r--r--jstests/noPassthrough/shell_session_option_defaults.js120
-rw-r--r--jstests/noPassthrough/shutdown_while_fsync_locked.js14
-rw-r--r--jstests/noPassthrough/skip_sharding_configuration_checks.js93
-rw-r--r--jstests/noPassthrough/skip_write_conflict_retries_failpoint.js82
-rw-r--r--jstests/noPassthrough/snapshotWindow_serverParameters.js144
-rw-r--r--jstests/noPassthrough/snapshot_cursor_integrity.js302
-rw-r--r--jstests/noPassthrough/snapshot_cursor_shutdown_stepdown.js159
-rw-r--r--jstests/noPassthrough/snapshot_reads.js228
-rw-r--r--jstests/noPassthrough/socket_disconnect_kills.js402
-rw-r--r--jstests/noPassthrough/standalone_replication_recovery.js295
-rw-r--r--jstests/noPassthrough/start_session_command.js150
-rw-r--r--jstests/noPassthrough/startup_logging.js55
-rw-r--r--jstests/noPassthrough/step_down_during_drop_database.js76
-rw-r--r--jstests/noPassthrough/stepdown_query.js122
-rw-r--r--jstests/noPassthrough/sync_write.js38
-rw-r--r--jstests/noPassthrough/system_indexes.js136
-rw-r--r--jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js35
-rw-r--r--jstests/noPassthrough/thread_args.js72
-rw-r--r--jstests/noPassthrough/timestamp_index_builds.js128
-rw-r--r--jstests/noPassthrough/traffic_reading.js134
-rw-r--r--jstests/noPassthrough/traffic_reading_legacy.js108
-rw-r--r--jstests/noPassthrough/traffic_recording.js211
-rw-r--r--jstests/noPassthrough/transactionLifetimeLimitSeconds_serverParameter.js26
-rw-r--r--jstests/noPassthrough/transaction_reaper.js307
-rw-r--r--jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js88
-rw-r--r--jstests/noPassthrough/transactions_work_with_in_memory_engine.js48
-rw-r--r--jstests/noPassthrough/ttlMonitorSleepSecs_parameter.js25
-rw-r--r--jstests/noPassthrough/ttl_capped.js128
-rw-r--r--jstests/noPassthrough/ttl_partial_index.js46
-rw-r--r--jstests/noPassthrough/two_phase_index_build.js106
-rw-r--r--jstests/noPassthrough/two_phase_index_build_ops_disabled_through_applyops.js83
-rw-r--r--jstests/noPassthrough/txn_override_causal_consistency.js376
-rw-r--r--jstests/noPassthrough/umask.js122
-rw-r--r--jstests/noPassthrough/unix_socket.js200
-rw-r--r--jstests/noPassthrough/unknown-set-parameter.js55
-rw-r--r--jstests/noPassthrough/unsupported_change_stream_deployments.js101
-rw-r--r--jstests/noPassthrough/update_now_clustertime_replset.js488
-rw-r--r--jstests/noPassthrough/update_now_clustertime_sharding.js538
-rw-r--r--jstests/noPassthrough/update_post_image_validation.js40
-rw-r--r--jstests/noPassthrough/update_server-5552.js54
-rw-r--r--jstests/noPassthrough/upsert_duplicate_key_retry.js152
-rw-r--r--jstests/noPassthrough/use_disk.js280
-rw-r--r--jstests/noPassthrough/utf8_paths.js52
-rw-r--r--jstests/noPassthrough/validate_hook_resume_fcv_upgrade.js360
-rw-r--r--jstests/noPassthrough/verify_session_cache_updates.js112
-rw-r--r--jstests/noPassthrough/verify_sessions_expiration.js232
-rw-r--r--jstests/noPassthrough/view_catalog_deadlock_with_rename.js38
-rw-r--r--jstests/noPassthrough/views_legacy.js155
-rw-r--r--jstests/noPassthrough/wiredTigerMaxCacheOverflowSizeGB_serverParameter.js24
-rw-r--r--jstests/noPassthrough/write_conflict_wildcard.js50
-rw-r--r--jstests/noPassthrough/write_local.js71
-rw-r--r--jstests/noPassthrough/wt_cache_full.js97
-rw-r--r--jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js110
-rw-r--r--jstests/noPassthrough/wt_cache_full_restart.js108
-rw-r--r--jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js108
-rw-r--r--jstests/noPassthrough/wt_disable_majority_reads.js50
-rw-r--r--jstests/noPassthrough/wt_index_option_defaults.js265
-rw-r--r--jstests/noPassthrough/wt_malformed_creation_string.js102
-rw-r--r--jstests/noPassthrough/wt_nojournal_skip_recovery.js154
-rw-r--r--jstests/noPassthrough/wt_nojournal_toggle.js228
-rw-r--r--jstests/noPassthrough/wt_operation_stats.js148
-rw-r--r--jstests/noPassthrough/wt_prepare_conflict.js102
-rw-r--r--jstests/noPassthrough/wt_skip_prepare_conflicts_retries_failpoint.js85
-rw-r--r--jstests/noPassthrough/wt_unclean_shutdown.js224
-rw-r--r--jstests/noPassthrough/yield_during_writes.js78
-rw-r--r--jstests/noPassthroughWithMongod/apply_ops_errors.js82
-rw-r--r--jstests/noPassthroughWithMongod/bench_test_crud_commands.js147
-rw-r--r--jstests/noPassthroughWithMongod/capped_truncate.js92
-rw-r--r--jstests/noPassthroughWithMongod/captrunc_cursor_invalidation.js50
-rw-r--r--jstests/noPassthroughWithMongod/coll_mod_takes_database_x_lock.js42
-rw-r--r--jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js112
-rw-r--r--jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js316
-rw-r--r--jstests/noPassthroughWithMongod/connections_opened.js10
-rw-r--r--jstests/noPassthroughWithMongod/create_indexes_shell_helper.js134
-rw-r--r--jstests/noPassthroughWithMongod/create_indexes_waits_for_already_in_progress.js223
-rw-r--r--jstests/noPassthroughWithMongod/currentop_includes_connid.js14
-rw-r--r--jstests/noPassthroughWithMongod/currentop_plan_summary_no_dup.js98
-rw-r--r--jstests/noPassthroughWithMongod/cursor_server_status_metrics.js136
-rw-r--r--jstests/noPassthroughWithMongod/dbcommand_cursor_throws_on_closed_conn.js26
-rw-r--r--jstests/noPassthroughWithMongod/default_read_pref.js95
-rw-r--r--jstests/noPassthroughWithMongod/dup_bgindex.js32
-rw-r--r--jstests/noPassthroughWithMongod/exchangeProducer.js487
-rw-r--r--jstests/noPassthroughWithMongod/external_sort_text_agg.js6
-rw-r--r--jstests/noPassthroughWithMongod/ftdc_params.js6
-rw-r--r--jstests/noPassthroughWithMongod/geo_polygon.js15
-rw-r--r--jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js70
-rw-r--r--jstests/noPassthroughWithMongod/host_connection_string_validation.js207
-rw-r--r--jstests/noPassthroughWithMongod/index_boundary_values_validate.js40
-rw-r--r--jstests/noPassthroughWithMongod/index_limits_not_bypassed.js49
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_restart_secondary.js126
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_restart_secondary_noretry.js166
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_updates.js96
-rw-r--r--jstests/noPassthroughWithMongod/insertMulti.js82
-rw-r--r--jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js300
-rw-r--r--jstests/noPassthroughWithMongod/isMaster_feature_compatibility_version.js87
-rw-r--r--jstests/noPassthroughWithMongod/log_component_helpers.js81
-rw-r--r--jstests/noPassthroughWithMongod/logpath.js3
-rw-r--r--jstests/noPassthroughWithMongod/moveprimary-replset.js104
-rw-r--r--jstests/noPassthroughWithMongod/mr_writeconflict.js103
-rw-r--r--jstests/noPassthroughWithMongod/ne_array_indexability.js68
-rw-r--r--jstests/noPassthroughWithMongod/now_variable.js229
-rw-r--r--jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js96
-rw-r--r--jstests/noPassthroughWithMongod/plan_cache_replanning.js227
-rw-r--r--jstests/noPassthroughWithMongod/query_oplogreplay.js434
-rw-r--r--jstests/noPassthroughWithMongod/renameWithWCE.js96
-rw-r--r--jstests/noPassthroughWithMongod/replset_host_connection_validation.js126
-rw-r--r--jstests/noPassthroughWithMongod/rpc_protocols.js105
-rw-r--r--jstests/noPassthroughWithMongod/shell_advance_cluster_time.js30
-rw-r--r--jstests/noPassthroughWithMongod/shelllimit.js26
-rw-r--r--jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js67
-rw-r--r--jstests/noPassthroughWithMongod/temp_namespace.js6
-rw-r--r--jstests/noPassthroughWithMongod/top_drop.js118
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl.js3
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl_maintenance.js3
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js3
-rw-r--r--jstests/noPassthroughWithMongod/ttl_sharded.js3
-rw-r--r--jstests/noPassthroughWithMongod/validate_command.js52
-rw-r--r--jstests/noPassthroughWithMongod/validate_interrupt.js76
-rw-r--r--jstests/noPassthroughWithMongod/views_invalid.js126
-rw-r--r--jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js60
-rw-r--r--jstests/parallel/shellfork.js2
-rw-r--r--jstests/parallel/update_serializability2.js38
-rw-r--r--jstests/perf/mr_bench.js2
-rw-r--r--jstests/perf/v8_mapreduce.js1
-rw-r--r--jstests/readonly/aggregate.js87
-rw-r--r--jstests/readonly/catalog_ops.js2
-rw-r--r--jstests/readonly/geo.js37
-rw-r--r--jstests/readonly/lib/read_only_test.js286
-rw-r--r--jstests/readonly/temp_collection.js8
-rw-r--r--jstests/replsets/abort_in_progress_transactions_on_step_up.js275
-rw-r--r--jstests/replsets/agg_write_concern_zero_batch_size.js114
-rw-r--r--jstests/replsets/aggregation_write_concern.js86
-rw-r--r--jstests/replsets/already_checked_out_session.js132
-rw-r--r--jstests/replsets/apply_batch_only_goes_forward.js146
-rw-r--r--jstests/replsets/apply_batches_totalMillis.js89
-rw-r--r--jstests/replsets/apply_ops_concurrent_non_atomic_different_db.js14
-rw-r--r--jstests/replsets/apply_ops_concurrent_non_atomic_same_collection.js14
-rw-r--r--jstests/replsets/apply_ops_concurrent_non_atomic_same_db.js14
-rw-r--r--jstests/replsets/apply_ops_create_indexes.js201
-rw-r--r--jstests/replsets/apply_ops_create_view.js26
-rw-r--r--jstests/replsets/apply_ops_create_with_uuid.js97
-rw-r--r--jstests/replsets/apply_ops_idempotency.js374
-rw-r--r--jstests/replsets/apply_ops_insert_write_conflict_atomic.js12
-rw-r--r--jstests/replsets/apply_ops_insert_write_conflict_nonatomic.js12
-rw-r--r--jstests/replsets/apply_ops_lastop.js95
-rw-r--r--jstests/replsets/apply_ops_wc.js217
-rw-r--r--jstests/replsets/apply_transaction_with_yield.js52
-rw-r--r--jstests/replsets/arbiters_not_included_in_w2_wc.js59
-rw-r--r--jstests/replsets/arbiters_not_included_in_w3_wc.js55
-rw-r--r--jstests/replsets/auth1.js395
-rw-r--r--jstests/replsets/auth2.js128
-rw-r--r--jstests/replsets/auth_no_pri.js45
-rw-r--r--jstests/replsets/await_replication_timeout.js122
-rw-r--r--jstests/replsets/awaitdata_getmore_new_last_committed_optime.js198
-rw-r--r--jstests/replsets/background_index.js91
-rw-r--r--jstests/replsets/batch_write_command_wc.js315
-rw-r--r--jstests/replsets/buildindexes.js90
-rw-r--r--jstests/replsets/buildindexes_false_with_system_indexes.js137
-rw-r--r--jstests/replsets/bulk_api_wc.js281
-rw-r--r--jstests/replsets/capped_insert_order.js88
-rw-r--r--jstests/replsets/catchup.js396
-rw-r--r--jstests/replsets/catchup_takeover_one_high_priority.js166
-rw-r--r--jstests/replsets/catchup_takeover_two_nodes_ahead.js96
-rw-r--r--jstests/replsets/chaining_removal.js120
-rw-r--r--jstests/replsets/change_stream_speculative_majority.js156
-rw-r--r--jstests/replsets/change_stream_speculative_majority_conflicting_catalog_changes.js72
-rw-r--r--jstests/replsets/change_stream_speculative_majority_lastApplied_lag.js165
-rw-r--r--jstests/replsets/change_stream_speculative_majority_latest_oplog_timestamp.js150
-rw-r--r--jstests/replsets/change_stream_speculative_majority_optimized_wait.js122
-rw-r--r--jstests/replsets/change_stream_speculative_majority_rollback.js194
-rw-r--r--jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js109
-rw-r--r--jstests/replsets/change_stream_stepdown.js258
-rw-r--r--jstests/replsets/clean_shutdown_oplog_state.js160
-rw-r--r--jstests/replsets/collate_id.js96
-rw-r--r--jstests/replsets/collection_validator_initial_sync_with_feature_compatibility_version.js148
-rw-r--r--jstests/replsets/command_response_operation_time.js103
-rw-r--r--jstests/replsets/commands_that_write_accept_wc.js350
-rw-r--r--jstests/replsets/commit_prepared_transaction_before_stable_timestamp.js76
-rw-r--r--jstests/replsets/commit_transaction_initial_sync_data_already_applied.js122
-rw-r--r--jstests/replsets/commit_transaction_recovery.js90
-rw-r--r--jstests/replsets/dbcheck.js663
-rw-r--r--jstests/replsets/dbhash_lock_acquisition.js169
-rw-r--r--jstests/replsets/dbhash_read_at_cluster_time.js200
-rw-r--r--jstests/replsets/dbhash_system_collections.js100
-rw-r--r--jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js172
-rw-r--r--jstests/replsets/ddl_ops_after_prepare_lock_failpoint.js249
-rw-r--r--jstests/replsets/disallow_adding_initialized_node1.js127
-rw-r--r--jstests/replsets/disallow_adding_initialized_node2.js139
-rw-r--r--jstests/replsets/disallow_shardsvr_transactions_wcMajorityJournal_false.js86
-rw-r--r--jstests/replsets/disconnect_on_legacy_write_to_secondary.js201
-rw-r--r--jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js190
-rw-r--r--jstests/replsets/drain.js170
-rw-r--r--jstests/replsets/drop_collections_two_phase.js44
-rw-r--r--jstests/replsets/drop_collections_two_phase_apply_ops_convert_to_capped.js167
-rw-r--r--jstests/replsets/drop_collections_two_phase_apply_ops_create.js113
-rw-r--r--jstests/replsets/drop_collections_two_phase_apply_ops_drop.js95
-rw-r--r--jstests/replsets/drop_collections_two_phase_apply_ops_rename.js129
-rw-r--r--jstests/replsets/drop_collections_two_phase_dbhash.js70
-rw-r--r--jstests/replsets/drop_collections_two_phase_rename_drop_target.js266
-rw-r--r--jstests/replsets/drop_collections_two_phase_step_down.js86
-rw-r--r--jstests/replsets/drop_collections_two_phase_write_concern.js156
-rw-r--r--jstests/replsets/drop_databases_two_phase.js312
-rw-r--r--jstests/replsets/drop_db.js91
-rw-r--r--jstests/replsets/drop_oplog.js48
-rw-r--r--jstests/replsets/election_handoff_basic.js30
-rw-r--r--jstests/replsets/election_handoff_flip.js34
-rw-r--r--jstests/replsets/election_handoff_higher_priority.js36
-rw-r--r--jstests/replsets/election_handoff_one_unelectable.js32
-rw-r--r--jstests/replsets/election_handoff_via_signal.js30
-rw-r--r--jstests/replsets/emptycapped.js186
-rw-r--r--jstests/replsets/failcommand_ignores_internal.js54
-rw-r--r--jstests/replsets/find_and_modify_wc.js124
-rw-r--r--jstests/replsets/force_sync_source_candidate.js63
-rw-r--r--jstests/replsets/fsync_lock_read_secondaries.js83
-rw-r--r--jstests/replsets/get_replication_info_helper.js89
-rw-r--r--jstests/replsets/get_status.js36
-rw-r--r--jstests/replsets/groupAndMapReduce.js2
-rw-r--r--jstests/replsets/hang_before_releasing_transaction_oplog_hole.js92
-rw-r--r--jstests/replsets/id_index_replication.js116
-rw-r--r--jstests/replsets/initial_sync2.js1
-rw-r--r--jstests/replsets/initial_sync_applier_error.js66
-rw-r--r--jstests/replsets/initial_sync_capped_index.js182
-rw-r--r--jstests/replsets/initial_sync_cloner_dups.js228
-rw-r--r--jstests/replsets/initial_sync_commit_prepared_transaction.js208
-rw-r--r--jstests/replsets/initial_sync_document_validation.js36
-rw-r--r--jstests/replsets/initial_sync_drop_collection.js311
-rw-r--r--jstests/replsets/initial_sync_during_stepdown.js328
-rw-r--r--jstests/replsets/initial_sync_fail_insert_once.js46
-rw-r--r--jstests/replsets/initial_sync_fcv.js162
-rw-r--r--jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp.js378
-rw-r--r--jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application.js190
-rw-r--r--jstests/replsets/initial_sync_invalid_index_spec.js74
-rw-r--r--jstests/replsets/initial_sync_invalid_views.js44
-rw-r--r--jstests/replsets/initial_sync_move_forward.js135
-rw-r--r--jstests/replsets/initial_sync_oplog_hole.js155
-rw-r--r--jstests/replsets/initial_sync_oplog_rollover.js97
-rw-r--r--jstests/replsets/initial_sync_preserves_active_txns.js133
-rw-r--r--jstests/replsets/initial_sync_read_concern_no_oplog.js42
-rw-r--r--jstests/replsets/initial_sync_rename_collection.js192
-rw-r--r--jstests/replsets/initial_sync_replSetGetStatus.js167
-rw-r--r--jstests/replsets/initial_sync_reset_oldest_timestamp_after_failed_attempt.js134
-rw-r--r--jstests/replsets/initial_sync_test_fixture_test.js263
-rw-r--r--jstests/replsets/initial_sync_update_missing_doc1.js55
-rw-r--r--jstests/replsets/initial_sync_update_missing_doc2.js67
-rw-r--r--jstests/replsets/initial_sync_update_missing_doc3.js88
-rw-r--r--jstests/replsets/initial_sync_update_missing_doc_with_prepare.js157
-rw-r--r--jstests/replsets/initial_sync_update_reinsert_missing_doc_with_prepare.js139
-rw-r--r--jstests/replsets/initial_sync_uuid_not_found.js103
-rw-r--r--jstests/replsets/initial_sync_views.js52
-rw-r--r--jstests/replsets/initiate.js32
-rw-r--r--jstests/replsets/inmemory_preserves_active_txns.js196
-rw-r--r--jstests/replsets/interrupted_batch_insert.js208
-rw-r--r--jstests/replsets/invalid_index_spec.js108
-rw-r--r--jstests/replsets/ismaster1.js6
-rw-r--r--jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js207
-rw-r--r--jstests/replsets/kill_ttl_on_stepdown.js92
-rw-r--r--jstests/replsets/kills_reads_with_prepare_conflicts_during_stepup.js236
-rw-r--r--jstests/replsets/last_error_reported_after_stepdown.js191
-rw-r--r--jstests/replsets/last_op_visible.js83
-rw-r--r--jstests/replsets/last_vote.js397
-rw-r--r--jstests/replsets/lastop.js175
-rw-r--r--jstests/replsets/libs/election_handoff.js6
-rw-r--r--jstests/replsets/libs/initial_sync_test.js1
-rw-r--r--jstests/replsets/libs/initial_sync_update_missing_doc.js7
-rw-r--r--jstests/replsets/libs/rename_across_dbs.js17
-rw-r--r--jstests/replsets/libs/rollback_test.js1
-rw-r--r--jstests/replsets/libs/secondary_reads_test.js4
-rw-r--r--jstests/replsets/libs/tags.js93
-rw-r--r--jstests/replsets/libs/two_phase_drops.js4
-rw-r--r--jstests/replsets/linearizable_read_concern.js254
-rw-r--r--jstests/replsets/localhost1.js20
-rw-r--r--jstests/replsets/localhost2.js26
-rw-r--r--jstests/replsets/localhost3.js20
-rw-r--r--jstests/replsets/log_secondary_oplog_application.js109
-rw-r--r--jstests/replsets/maintenance2.js68
-rw-r--r--jstests/replsets/majority_writes_wait_for_all_durable_timestamp.js106
-rw-r--r--jstests/replsets/maxSyncSourceLagSecs.js100
-rw-r--r--jstests/replsets/minimum_visible_with_cluster_time.js160
-rw-r--r--jstests/replsets/mr_nonrepl_coll_in_local_db.js126
-rw-r--r--jstests/replsets/multikey_write_avoids_prepare_conflict.js96
-rw-r--r--jstests/replsets/nested_apply_ops_create_indexes.js105
-rw-r--r--jstests/replsets/no_disconnect_on_stepdown.js168
-rw-r--r--jstests/replsets/no_flapping_during_network_partition.js60
-rw-r--r--jstests/replsets/noop_write_after_read_only_txn.js134
-rw-r--r--jstests/replsets/noop_writes_wait_for_write_concern.js446
-rw-r--r--jstests/replsets/noop_writes_wait_for_write_concern_fcv.js108
-rw-r--r--jstests/replsets/not_master_unacknowledged_write.js135
-rw-r--r--jstests/replsets/opcounters_repl.js179
-rw-r--r--jstests/replsets/operation_time_read_and_write_concern.js229
-rw-r--r--jstests/replsets/oplog_format_create_indexes.js120
-rw-r--r--jstests/replsets/oplog_replay_on_startup_with_bad_op.js94
-rw-r--r--jstests/replsets/oplog_rollover.js223
-rw-r--r--jstests/replsets/oplog_term.js51
-rw-r--r--jstests/replsets/oplog_visibility.js198
-rw-r--r--jstests/replsets/oplog_wallclock.js40
-rw-r--r--jstests/replsets/optime.js10
-rw-r--r--jstests/replsets/prepare_conflict_read_concern_behavior.js636
-rw-r--r--jstests/replsets/prepare_failover_rollback_commit.js80
-rw-r--r--jstests/replsets/prepare_prepared_transaction_wc_timeout.js125
-rw-r--r--jstests/replsets/prepare_survives_primary_reconfig_failover.js8
-rw-r--r--jstests/replsets/prepare_survives_reconfig_via_heartbeat_failover.js8
-rw-r--r--jstests/replsets/prepare_transaction_fails_on_standalone.js18
-rw-r--r--jstests/replsets/prepare_transaction_fails_with_arbiters.js45
-rw-r--r--jstests/replsets/prepare_transaction_fails_without_majority_reads.js34
-rw-r--r--jstests/replsets/prepare_transaction_index_build.js140
-rw-r--r--jstests/replsets/prepare_transaction_read_at_cluster_time.js297
-rw-r--r--jstests/replsets/prepare_transaction_survives_state_transition_to_and_from_recovering.js131
-rw-r--r--jstests/replsets/prepared_transaction_commands_fail_on_secondaries.js114
-rw-r--r--jstests/replsets/prepared_transaction_on_failover.js251
-rw-r--r--jstests/replsets/primary_casts_vote_on_stepdown.js41
-rw-r--r--jstests/replsets/priority_takeover_cascading_priorities.js48
-rw-r--r--jstests/replsets/priority_takeover_one_node_higher_priority.js54
-rw-r--r--jstests/replsets/priority_takeover_two_nodes_equal_priority.js87
-rw-r--r--jstests/replsets/read_after_optime.js152
-rw-r--r--jstests/replsets/read_at_cluster_time_outside_transactions.js283
-rw-r--r--jstests/replsets/read_committed.js318
-rw-r--r--jstests/replsets/read_committed_after_rollback.js280
-rw-r--r--jstests/replsets/read_committed_lookup.js60
-rw-r--r--jstests/replsets/read_committed_no_snapshots.js123
-rw-r--r--jstests/replsets/read_committed_on_secondary.js252
-rw-r--r--jstests/replsets/read_committed_stale_history.js276
-rw-r--r--jstests/replsets/read_committed_with_catalog_changes.js551
-rw-r--r--jstests/replsets/read_concern_majority_getmore_secondaries.js120
-rw-r--r--jstests/replsets/read_concern_uninitated_set.js99
-rw-r--r--jstests/replsets/read_majority_two_arbs.js107
-rw-r--r--jstests/replsets/read_operations_during_rollback.js186
-rw-r--r--jstests/replsets/read_operations_during_step_down.js208
-rw-r--r--jstests/replsets/reconfig.js83
-rw-r--r--jstests/replsets/reconfig_during_election.js67
-rw-r--r--jstests/replsets/reconstruct_prepared_transactions_initial_sync.js458
-rw-r--r--jstests/replsets/reconstruct_prepared_transactions_initial_sync_index_build.js234
-rw-r--r--jstests/replsets/reconstruct_prepared_transactions_initial_sync_no_oplog_application.js351
-rw-r--r--jstests/replsets/reconstruct_prepared_transactions_initial_sync_on_oplog_seed.js154
-rw-r--r--jstests/replsets/recover_committed_aborted_prepared_transactions.js225
-rw-r--r--jstests/replsets/recover_multiple_prepared_transactions_startup.js284
-rw-r--r--jstests/replsets/recover_prepared_transaction_state.js331
-rw-r--r--jstests/replsets/recover_prepared_transactions_startup_secondary_application.js162
-rw-r--r--jstests/replsets/recover_prepared_txn_with_multikey_write.js54
-rw-r--r--jstests/replsets/recovery_after_clean_shutdown_but_not_all_writes_in_snapshot.js117
-rw-r--r--jstests/replsets/recovery_preserves_active_txns.js155
-rw-r--r--jstests/replsets/refresh_sessions_rs.js158
-rw-r--r--jstests/replsets/rename_across_dbs.js6
-rw-r--r--jstests/replsets/rename_across_dbs_drop_target.js10
-rw-r--r--jstests/replsets/rename_collection_between_unrepl_and_repl.js52
-rw-r--r--jstests/replsets/rename_collection_temp.js112
-rw-r--r--jstests/replsets/replset1.js1
-rw-r--r--jstests/replsets/replset2.js1
-rw-r--r--jstests/replsets/replset3.js2
-rw-r--r--jstests/replsets/replset4.js1
-rw-r--r--jstests/replsets/replset5.js133
-rw-r--r--jstests/replsets/replset8.js142
-rw-r--r--jstests/replsets/replsetarb2.js88
-rw-r--r--jstests/replsets/replsetprio1.js102
-rw-r--r--jstests/replsets/replsetrestart1.js92
-rw-r--r--jstests/replsets/replsets_killop.js8
-rw-r--r--jstests/replsets/request_primary_stepdown.js51
-rw-r--r--jstests/replsets/restore_term.js96
-rw-r--r--jstests/replsets/retryable_commit_transaction_after_failover.js183
-rw-r--r--jstests/replsets/retryable_commit_transaction_after_restart.js159
-rw-r--r--jstests/replsets/retryable_prepared_commit_transaction_after_failover.js142
-rw-r--r--jstests/replsets/retryable_write_concern.js463
-rw-r--r--jstests/replsets/retryable_writes_direct_write_to_config_transactions.js180
-rw-r--r--jstests/replsets/retryable_writes_failover.js233
-rw-r--r--jstests/replsets/rollback_aborted_prepared_transaction.js194
-rw-r--r--jstests/replsets/rollback_after_disabling_majority_reads.js58
-rw-r--r--jstests/replsets/rollback_after_enabling_majority_reads.js114
-rw-r--r--jstests/replsets/rollback_all_op_types.js608
-rw-r--r--jstests/replsets/rollback_auth.js403
-rw-r--r--jstests/replsets/rollback_capped_deletions.js66
-rw-r--r--jstests/replsets/rollback_collmods.js202
-rw-r--r--jstests/replsets/rollback_crud_op_sequences.js224
-rw-r--r--jstests/replsets/rollback_ddl_op_sequences.js274
-rw-r--r--jstests/replsets/rollback_drop_database.js109
-rw-r--r--jstests/replsets/rollback_drop_index_after_rename.js98
-rw-r--r--jstests/replsets/rollback_dup_ids.js57
-rw-r--r--jstests/replsets/rollback_files_no_prepare_conflict.js64
-rw-r--r--jstests/replsets/rollback_prepare_transaction.js162
-rw-r--r--jstests/replsets/rollback_reconstructs_transactions_prepared_before_stable.js206
-rw-r--r--jstests/replsets/rollback_recovery_commit_transaction_before_stable_timestamp.js179
-rw-r--r--jstests/replsets/rollback_remote_cursor_retry.js61
-rw-r--r--jstests/replsets/rollback_rename_collection_on_sync_source.js65
-rw-r--r--jstests/replsets/rollback_rename_count.js82
-rw-r--r--jstests/replsets/rollback_time_limit_param.js67
-rw-r--r--jstests/replsets/rollback_transaction_table.js426
-rw-r--r--jstests/replsets/rollback_transactions_count.js90
-rw-r--r--jstests/replsets/rollback_unprepared_transactions.js108
-rw-r--r--jstests/replsets/rollback_via_refetch_commit_transaction.js110
-rw-r--r--jstests/replsets/rollback_via_refetch_survives_nonexistent_collection_drop.js92
-rw-r--r--jstests/replsets/rollback_views.js235
-rw-r--r--jstests/replsets/rollback_waits_for_bgindex_completion.js160
-rw-r--r--jstests/replsets/rollback_with_socket_error_then_steady_state.js257
-rw-r--r--jstests/replsets/rollover_preserves_active_txns.js166
-rw-r--r--jstests/replsets/rslib.js774
-rw-r--r--jstests/replsets/secondary_as_sync_source.js118
-rw-r--r--jstests/replsets/secondary_reads_timestamp_visibility.js180
-rw-r--r--jstests/replsets/secondary_reads_unique_indexes.js134
-rw-r--r--jstests/replsets/server8070.js274
-rw-r--r--jstests/replsets/sessions_collection_auto_healing.js164
-rw-r--r--jstests/replsets/shutdown.js42
-rw-r--r--jstests/replsets/shutdown_primary.js82
-rw-r--r--jstests/replsets/shutdown_with_prepared_transaction.js44
-rw-r--r--jstests/replsets/sized_zero_capped.js40
-rw-r--r--jstests/replsets/slave_delay_clean_shutdown.js88
-rw-r--r--jstests/replsets/slavedelay1.js1
-rw-r--r--jstests/replsets/slaveok_read_pref.js92
-rw-r--r--jstests/replsets/speculative_majority_find.js280
-rw-r--r--jstests/replsets/speculative_majority_supported_commands.js104
-rw-r--r--jstests/replsets/speculative_read_transaction.js202
-rw-r--r--jstests/replsets/speculative_transaction.js162
-rw-r--r--jstests/replsets/standalone_replication_recovery_prepare_only.js8
-rw-r--r--jstests/replsets/standalone_replication_recovery_prepare_with_commit.js8
-rw-r--r--jstests/replsets/startParallelShell.js52
-rw-r--r--jstests/replsets/startup_recovery_commit_transaction_before_stable_timestamp.js160
-rw-r--r--jstests/replsets/startup_recovery_reconstructs_txn_prepared_before_stable_ts.js212
-rw-r--r--jstests/replsets/startup_without_fcv_document_succeeds_if_initial_sync_flag_set.js72
-rw-r--r--jstests/replsets/step_down_during_draining.js229
-rw-r--r--jstests/replsets/step_down_during_draining2.js311
-rw-r--r--jstests/replsets/step_down_during_draining3.js231
-rw-r--r--jstests/replsets/step_down_on_secondary.js228
-rw-r--r--jstests/replsets/stepdown3.js89
-rw-r--r--jstests/replsets/stepdown_catch_up_opt.js134
-rw-r--r--jstests/replsets/stepdown_kill_other_ops.js108
-rw-r--r--jstests/replsets/stepdown_killop.js118
-rw-r--r--jstests/replsets/stepdown_long_wait_time.js105
-rw-r--r--jstests/replsets/stepdown_needs_electable_secondary.js232
-rw-r--r--jstests/replsets/stepdown_needs_majority.js174
-rw-r--r--jstests/replsets/stepup.js80
-rw-r--r--jstests/replsets/storage_commit_out_of_order.js108
-rw-r--r--jstests/replsets/sync2.js92
-rw-r--r--jstests/replsets/system_profile.js75
-rw-r--r--jstests/replsets/system_profile_secondary.js34
-rw-r--r--jstests/replsets/tags.js8
-rw-r--r--jstests/replsets/tags2.js112
-rw-r--r--jstests/replsets/tags_with_reconfig.js102
-rw-r--r--jstests/replsets/temp_namespace_restart_as_standalone.js163
-rw-r--r--jstests/replsets/test_command.js255
-rw-r--r--jstests/replsets/too_stale_secondary.js168
-rw-r--r--jstests/replsets/transaction_table_multi_statement_txn.js69
-rw-r--r--jstests/replsets/transaction_table_oplog_replay.js373
-rw-r--r--jstests/replsets/transactions_after_rollback_via_refetch.js223
-rw-r--r--jstests/replsets/transactions_committed_with_tickets_exhausted.js147
-rw-r--r--jstests/replsets/transactions_during_step_down.js242
-rw-r--r--jstests/replsets/transactions_on_secondaries_not_allowed.js135
-rw-r--r--jstests/replsets/transactions_only_allowed_on_primaries.js242
-rw-r--r--jstests/replsets/transactions_reaped_with_tickets_exhausted.js137
-rw-r--r--jstests/replsets/transactions_wait_for_write_concern.js376
-rw-r--r--jstests/replsets/transient_txn_error_labels.js474
-rw-r--r--jstests/replsets/transient_txn_error_labels_with_write_concern.js236
-rw-r--r--jstests/replsets/two_nodes_priority_take_over.js2
-rw-r--r--jstests/replsets/txn_override_unittests.js3704
-rw-r--r--jstests/replsets/unconditional_step_down.js386
-rw-r--r--jstests/replsets/uninitialized_fcv_access.js41
-rw-r--r--jstests/replsets/update_commit_point_from_sync_source_ignores_term.js139
-rw-r--r--jstests/replsets/user_management_wc.js262
-rw-r--r--jstests/replsets/verify_sessions_expiration_rs.js222
-rw-r--r--jstests/replsets/view_catalog_oplog_entries.js62
-rw-r--r--jstests/replsets/view_definition_initial_sync_with_feature_compatibility_version.js165
-rw-r--r--jstests/replsets/write_concern_after_stepdown.js190
-rw-r--r--jstests/replsets/write_concern_after_stepdown_and_stepup.js212
-rw-r--r--jstests/serial_run/index_multi.js20
-rw-r--r--jstests/serial_run/srv-uri.js14
-rw-r--r--jstests/sharding/accurate_count_with_predicate.js52
-rw-r--r--jstests/sharding/add_and_remove_shard_from_zone.js52
-rw-r--r--jstests/sharding/addshard1.js115
-rw-r--r--jstests/sharding/addshard2.js371
-rw-r--r--jstests/sharding/addshard4.js83
-rw-r--r--jstests/sharding/addshard5.js60
-rw-r--r--jstests/sharding/addshard6.js86
-rw-r--r--jstests/sharding/addshard_idempotent.js105
-rw-r--r--jstests/sharding/advance_cluster_time_action_type.js117
-rw-r--r--jstests/sharding/advance_logical_time_with_valid_signature.js58
-rw-r--r--jstests/sharding/after_cluster_time.js180
-rw-r--r--jstests/sharding/agg_error_reports_shard_host_and_port.js42
-rw-r--r--jstests/sharding/agg_explain_fmt.js66
-rw-r--r--jstests/sharding/agg_project_limit_pipe_split.js140
-rw-r--r--jstests/sharding/agg_sort.js420
-rw-r--r--jstests/sharding/agg_write_stages_cannot_run_on_mongos.js71
-rw-r--r--jstests/sharding/aggregates_during_balancing.js479
-rw-r--r--jstests/sharding/aggregation_currentop.js1573
-rw-r--r--jstests/sharding/aggregation_internal_parameters.js224
-rw-r--r--jstests/sharding/aggregations_in_session.js60
-rw-r--r--jstests/sharding/all_config_servers_blackholed_from_mongos.js55
-rw-r--r--jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js110
-rw-r--r--jstests/sharding/allow_partial_results.js107
-rw-r--r--jstests/sharding/arbiters_do_not_use_cluster_time.js42
-rw-r--r--jstests/sharding/array_shard_key.js157
-rw-r--r--jstests/sharding/auth.js594
-rw-r--r--jstests/sharding/auth2.js40
-rw-r--r--jstests/sharding/authCommands.js561
-rw-r--r--jstests/sharding/authConnectionHook.js72
-rw-r--r--jstests/sharding/auth_add_shard.js137
-rw-r--r--jstests/sharding/auth_no_config_primary.js62
-rw-r--r--jstests/sharding/auth_sharding_cmd_metadata.js55
-rw-r--r--jstests/sharding/auth_slaveok_routing.js214
-rw-r--r--jstests/sharding/authmr.js217
-rw-r--r--jstests/sharding/authwhere.js141
-rw-r--r--jstests/sharding/auto_rebalance_parallel.js96
-rw-r--r--jstests/sharding/auto_rebalance_parallel_replica_sets.js102
-rw-r--r--jstests/sharding/autodiscover_config_rs_from_secondary.js102
-rw-r--r--jstests/sharding/autosplit.js126
-rw-r--r--jstests/sharding/autosplit_heuristics.js133
-rw-r--r--jstests/sharding/autosplit_with_balancer.js270
-rw-r--r--jstests/sharding/balance_repl.js106
-rw-r--r--jstests/sharding/balancer_shell_commands.js24
-rw-r--r--jstests/sharding/balancer_window.js128
-rw-r--r--jstests/sharding/basic_drop_coll.js83
-rw-r--r--jstests/sharding/basic_merge.js88
-rw-r--r--jstests/sharding/basic_sharding_params.js110
-rw-r--r--jstests/sharding/basic_split.js151
-rw-r--r--jstests/sharding/batch_write_command_sharded.js490
-rw-r--r--jstests/sharding/bouncing_count.js100
-rw-r--r--jstests/sharding/bulk_insert.js396
-rw-r--r--jstests/sharding/bulk_shard_insert.js128
-rw-r--r--jstests/sharding/causal_consistency_shell_support.js354
-rw-r--r--jstests/sharding/change_stream_chunk_migration.js322
-rw-r--r--jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js351
-rw-r--r--jstests/sharding/change_stream_lookup_single_shard_cluster.js92
-rw-r--r--jstests/sharding/change_stream_metadata_notifications.js292
-rw-r--r--jstests/sharding/change_stream_no_shards.js54
-rw-r--r--jstests/sharding/change_stream_read_preference.js263
-rw-r--r--jstests/sharding/change_stream_resume_from_different_mongos.js182
-rw-r--r--jstests/sharding/change_stream_shard_failover.js192
-rw-r--r--jstests/sharding/change_stream_show_migration_events.js518
-rw-r--r--jstests/sharding/change_stream_transaction_sharded.js481
-rw-r--r--jstests/sharding/change_stream_update_lookup_collation.js317
-rw-r--r--jstests/sharding/change_stream_update_lookup_read_concern.js373
-rw-r--r--jstests/sharding/change_streams.js485
-rw-r--r--jstests/sharding/change_streams_establishment_finds_new_shards.js104
-rw-r--r--jstests/sharding/change_streams_primary_shard_unaware.js355
-rw-r--r--jstests/sharding/change_streams_shards_start_in_sync.js191
-rw-r--r--jstests/sharding/change_streams_unsharded_becomes_sharded.js334
-rw-r--r--jstests/sharding/change_streams_whole_db.js366
-rw-r--r--jstests/sharding/cleanup_orphaned_auth.js82
-rw-r--r--jstests/sharding/cleanup_orphaned_basic.js228
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js291
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js208
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_hashed.js131
-rw-r--r--jstests/sharding/clone_catalog_data.js5
-rw-r--r--jstests/sharding/coll_epoch_test0.js2
-rw-r--r--jstests/sharding/coll_epoch_test1.js120
-rw-r--r--jstests/sharding/collation_lookup.js496
-rw-r--r--jstests/sharding/collation_targeting.js843
-rw-r--r--jstests/sharding/collation_targeting_inherited.js894
-rw-r--r--jstests/sharding/commands_that_write_accept_wc_configRS.js387
-rw-r--r--jstests/sharding/commands_that_write_accept_wc_shards.js686
-rw-r--r--jstests/sharding/config_rs_no_primary.js100
-rw-r--r--jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js374
-rw-r--r--jstests/sharding/conn_pool_stats.js54
-rw-r--r--jstests/sharding/convert_to_and_from_sharded.js174
-rw-r--r--jstests/sharding/count1.js355
-rw-r--r--jstests/sharding/count2.js77
-rw-r--r--jstests/sharding/count_config_servers.js115
-rw-r--r--jstests/sharding/count_slaveok.js92
-rw-r--r--jstests/sharding/covered_shard_key_indexes.js282
-rw-r--r--jstests/sharding/create_database.js95
-rw-r--r--jstests/sharding/create_idx_empty_primary.js39
-rw-r--r--jstests/sharding/current_op_no_shards.js15
-rw-r--r--jstests/sharding/current_op_with_drop_shard.js30
-rw-r--r--jstests/sharding/cursor1.js109
-rw-r--r--jstests/sharding/cursor_timeout.js213
-rw-r--r--jstests/sharding/cursor_valid_after_shard_stepdown.js57
-rw-r--r--jstests/sharding/database_and_shard_versioning_all_commands.js1133
-rw-r--r--jstests/sharding/database_versioning_cache_entry_without_version_updated_with_version.js106
-rw-r--r--jstests/sharding/database_versioning_safe_secondary_reads.js426
-rw-r--r--jstests/sharding/delete_during_migrate.js46
-rw-r--r--jstests/sharding/diffservers1.js34
-rw-r--r--jstests/sharding/disable_autosplit.js45
-rw-r--r--jstests/sharding/drop_configdb.js46
-rw-r--r--jstests/sharding/drop_sharded_db.js100
-rw-r--r--jstests/sharding/drop_sharded_db_tags_cleanup.js40
-rw-r--r--jstests/sharding/dump_coll_metadata.js109
-rw-r--r--jstests/sharding/empty_doc_results.js106
-rw-r--r--jstests/sharding/enable_sharding_basic.js77
-rw-r--r--jstests/sharding/enforce_zone_policy.js150
-rw-r--r--jstests/sharding/error_during_agg_getmore.js95
-rw-r--r--jstests/sharding/error_propagation.js34
-rw-r--r--jstests/sharding/explainFind_stale_mongos.js38
-rw-r--r--jstests/sharding/explain_agg_read_pref.js289
-rw-r--r--jstests/sharding/explain_cmd.js309
-rw-r--r--jstests/sharding/explain_find_and_modify_sharded.js138
-rw-r--r--jstests/sharding/explain_read_pref.js1
-rw-r--r--jstests/sharding/failcommand_failpoint_not_parallel.js32
-rw-r--r--jstests/sharding/failcommand_ignores_internal.js91
-rw-r--r--jstests/sharding/features1.js259
-rw-r--r--jstests/sharding/features2.js286
-rw-r--r--jstests/sharding/features3.js271
-rw-r--r--jstests/sharding/find_and_modify_after_multi_write.js105
-rw-r--r--jstests/sharding/find_collname_uuid_test.js20
-rw-r--r--jstests/sharding/find_getmore_cmd.js318
-rw-r--r--jstests/sharding/findandmodify1.js154
-rw-r--r--jstests/sharding/findandmodify2.js198
-rw-r--r--jstests/sharding/geo_near_random1.js76
-rw-r--r--jstests/sharding/geo_near_random2.js96
-rw-r--r--jstests/sharding/geo_near_sharded.js104
-rw-r--r--jstests/sharding/geo_near_sort.js155
-rw-r--r--jstests/sharding/graph_lookup.js42
-rw-r--r--jstests/sharding/hash_basic.js81
-rw-r--r--jstests/sharding/hash_shard_num_chunks.js45
-rw-r--r--jstests/sharding/hash_shard_unique_compound.js54
-rw-r--r--jstests/sharding/implicit_db_creation.js53
-rw-r--r--jstests/sharding/in_memory_sort_limit.js89
-rw-r--r--jstests/sharding/index1.js651
-rw-r--r--jstests/sharding/index_and_collection_option_propagation.js398
-rw-r--r--jstests/sharding/initial_split_validate_shard_collections.js128
-rw-r--r--jstests/sharding/inserts_consistent.js92
-rw-r--r--jstests/sharding/invalid_system_views_sharded_collection.js214
-rw-r--r--jstests/sharding/json_schema.js117
-rw-r--r--jstests/sharding/jumbo1.js91
-rw-r--r--jstests/sharding/key_many.js448
-rw-r--r--jstests/sharding/key_rotation.js150
-rw-r--r--jstests/sharding/key_string.js109
-rw-r--r--jstests/sharding/keys_rotation_interval_sec.js44
-rw-r--r--jstests/sharding/kill_op_overflow.js10
-rw-r--r--jstests/sharding/kill_pinned_cursor.js422
-rw-r--r--jstests/sharding/kill_sessions.js96
-rw-r--r--jstests/sharding/killop.js93
-rw-r--r--jstests/sharding/lagged_config_secondary.js92
-rw-r--r--jstests/sharding/large_chunk.js94
-rw-r--r--jstests/sharding/libs/sharded_transactions_helpers.js48
-rw-r--r--jstests/sharding/limit_push.js111
-rw-r--r--jstests/sharding/linearizable_read_concern.js200
-rw-r--r--jstests/sharding/listDatabases.js174
-rw-r--r--jstests/sharding/listshards.js112
-rw-r--r--jstests/sharding/localhostAuthBypass.js447
-rw-r--r--jstests/sharding/logical_time_api.js173
-rw-r--r--jstests/sharding/logical_time_metadata.js81
-rw-r--r--jstests/sharding/lookup.js1026
-rw-r--r--jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js208
-rw-r--r--jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js117
-rw-r--r--jstests/sharding/lookup_change_stream_post_image_id_shard_key.js176
-rw-r--r--jstests/sharding/lookup_mongod_unaware.js341
-rw-r--r--jstests/sharding/lookup_on_shard.js190
-rw-r--r--jstests/sharding/lookup_stale_mongos.js257
-rw-r--r--jstests/sharding/major_version_check.js67
-rw-r--r--jstests/sharding/mapReduce_inSharded.js165
-rw-r--r--jstests/sharding/mapReduce_inSharded_outSharded.js121
-rw-r--r--jstests/sharding/mapReduce_nonSharded.js3
-rw-r--r--jstests/sharding/mapReduce_outSharded.js3
-rw-r--r--jstests/sharding/mapReduce_outSharded_checkUUID.js294
-rw-r--r--jstests/sharding/max_time_ms_sharded.js463
-rw-r--r--jstests/sharding/max_time_ms_sharded_new_commands.js71
-rw-r--r--jstests/sharding/merge_chunks_compound_shard_key.js173
-rw-r--r--jstests/sharding/merge_chunks_test.js270
-rw-r--r--jstests/sharding/merge_chunks_test_with_md_ops.js69
-rw-r--r--jstests/sharding/merge_command_options.js335
-rw-r--r--jstests/sharding/merge_does_not_force_pipeline_split.js172
-rw-r--r--jstests/sharding/merge_from_stale_mongos.js446
-rw-r--r--jstests/sharding/merge_hashed_shard_key.js139
-rw-r--r--jstests/sharding/merge_on_fields.js138
-rw-r--r--jstests/sharding/merge_requires_unique_index.js493
-rw-r--r--jstests/sharding/merge_stale_on_fields.js328
-rw-r--r--jstests/sharding/merge_to_existing.js248
-rw-r--r--jstests/sharding/merge_to_non_existing.js197
-rw-r--r--jstests/sharding/merge_with_chunk_migrations.js222
-rw-r--r--jstests/sharding/merge_with_drop_shard.js204
-rw-r--r--jstests/sharding/merge_with_move_primary.js288
-rw-r--r--jstests/sharding/merge_write_concern.js178
-rw-r--r--jstests/sharding/migrateBig.js95
-rw-r--r--jstests/sharding/migrateBig_balancer.js105
-rw-r--r--jstests/sharding/migration_critical_section_concurrency.js88
-rw-r--r--jstests/sharding/migration_failure.js116
-rw-r--r--jstests/sharding/migration_id_index.js70
-rw-r--r--jstests/sharding/migration_ignore_interrupts_1.js136
-rw-r--r--jstests/sharding/migration_ignore_interrupts_2.js76
-rw-r--r--jstests/sharding/migration_ignore_interrupts_3.js190
-rw-r--r--jstests/sharding/migration_ignore_interrupts_4.js193
-rw-r--r--jstests/sharding/migration_move_chunk_after_receive.js106
-rw-r--r--jstests/sharding/migration_server_status.js135
-rw-r--r--jstests/sharding/migration_sets_fromMigrate_flag.js345
-rw-r--r--jstests/sharding/migration_with_source_ops.js246
-rw-r--r--jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js52
-rw-r--r--jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js67
-rw-r--r--jstests/sharding/missing_key.js58
-rw-r--r--jstests/sharding/mongod_returns_no_cluster_time_without_keys.js162
-rw-r--r--jstests/sharding/mongos_dataSize_test.js20
-rw-r--r--jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js124
-rw-r--r--jstests/sharding/mongos_local_explain.js38
-rw-r--r--jstests/sharding/mongos_no_detect_sharding.js49
-rw-r--r--jstests/sharding/mongos_no_replica_set_refresh.js222
-rw-r--r--jstests/sharding/mongos_query_comment.js155
-rw-r--r--jstests/sharding/mongos_rs_shard_failure_tolerance.js746
-rw-r--r--jstests/sharding/mongos_shard_failure_tolerance.js168
-rw-r--r--jstests/sharding/mongos_validate_writes.js120
-rw-r--r--jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js65
-rw-r--r--jstests/sharding/movePrimary1.js83
-rw-r--r--jstests/sharding/move_chunk_basic.js106
-rw-r--r--jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js221
-rw-r--r--jstests/sharding/move_chunk_insert_with_write_retryability.js76
-rw-r--r--jstests/sharding/move_chunk_open_cursors.js80
-rw-r--r--jstests/sharding/move_chunk_remove_with_write_retryability.js90
-rw-r--r--jstests/sharding/move_chunk_update_shard_key_in_retryable_write.js710
-rw-r--r--jstests/sharding/move_chunk_update_with_write_retryability.js96
-rw-r--r--jstests/sharding/move_chunk_wc.js171
-rw-r--r--jstests/sharding/move_primary_basic.js87
-rw-r--r--jstests/sharding/move_primary_clone_test.js367
-rw-r--r--jstests/sharding/move_primary_fails_without_database_version.js20
-rw-r--r--jstests/sharding/movechunk_commit_changelog_stats.js48
-rw-r--r--jstests/sharding/movechunk_interrupt_at_primary_stepdown.js98
-rw-r--r--jstests/sharding/movechunk_parallel.js148
-rw-r--r--jstests/sharding/mrShardedOutputAuth.js171
-rw-r--r--jstests/sharding/mr_and_agg_versioning.js95
-rw-r--r--jstests/sharding/mr_output_sharded_validation.js76
-rw-r--r--jstests/sharding/mr_shard_version.js119
-rw-r--r--jstests/sharding/multi_coll_drop.js53
-rw-r--r--jstests/sharding/multi_mongos2.js76
-rw-r--r--jstests/sharding/multi_mongos2a.js36
-rw-r--r--jstests/sharding/multi_shard_transaction_without_majority_reads.js42
-rw-r--r--jstests/sharding/multi_write_target.js99
-rw-r--r--jstests/sharding/names.js109
-rw-r--r--jstests/sharding/nonreplicated_uuids_on_shardservers.js30
-rw-r--r--jstests/sharding/not_allowed_on_sharded_collection_cmd.js31
-rw-r--r--jstests/sharding/now_variable_replset.js247
-rw-r--r--jstests/sharding/now_variable_sharding.js285
-rw-r--r--jstests/sharding/operation_time_api.js96
-rw-r--r--jstests/sharding/oplog_document_key.js196
-rw-r--r--jstests/sharding/out_fails_to_replace_sharded_collection.js84
-rw-r--r--jstests/sharding/parallel.js103
-rw-r--r--jstests/sharding/pending_chunk.js115
-rw-r--r--jstests/sharding/prefix_shard_key.js335
-rw-r--r--jstests/sharding/prepare_transaction_then_migrate.js96
-rw-r--r--jstests/sharding/presplit.js73
-rw-r--r--jstests/sharding/primary_config_server_blackholed_from_mongos.js112
-rw-r--r--jstests/sharding/printShardingStatus.js433
-rw-r--r--jstests/sharding/query_after_multi_write.js91
-rw-r--r--jstests/sharding/query_config.js685
-rw-r--r--jstests/sharding/range_deleter_does_not_block_stepdown_with_prepare_conflict.js93
-rw-r--r--jstests/sharding/read_after_optime.js76
-rw-r--r--jstests/sharding/read_committed_lookup.js91
-rw-r--r--jstests/sharding/read_does_not_create_namespaces.js15
-rw-r--r--jstests/sharding/read_pref.js336
-rw-r--r--jstests/sharding/read_pref_cmd.js6
-rw-r--r--jstests/sharding/read_pref_multi_mongos_stale_config.js57
-rw-r--r--jstests/sharding/recovering_slaveok.js175
-rw-r--r--jstests/sharding/refresh_sessions.js168
-rw-r--r--jstests/sharding/regex_targeting.js565
-rw-r--r--jstests/sharding/remove1.js66
-rw-r--r--jstests/sharding/remove2.js362
-rw-r--r--jstests/sharding/remove3.js61
-rw-r--r--jstests/sharding/rename.js118
-rw-r--r--jstests/sharding/rename_across_mongos.js39
-rw-r--r--jstests/sharding/repl_monitor_refresh.js135
-rw-r--r--jstests/sharding/replication_with_undefined_shard_key.js40
-rw-r--r--jstests/sharding/replmonitor_bad_seed.js38
-rw-r--r--jstests/sharding/restart_transactions.js320
-rw-r--r--jstests/sharding/resume_change_stream.js356
-rw-r--r--jstests/sharding/resume_change_stream_from_stale_mongos.js140
-rw-r--r--jstests/sharding/resume_change_stream_on_subset_of_shards.js111
-rw-r--r--jstests/sharding/retryable_writes.js1041
-rw-r--r--jstests/sharding/rs_stepdown_and_pooling.js136
-rw-r--r--jstests/sharding/safe_secondary_reads_drop_recreate.js1140
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js1007
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js818
-rw-r--r--jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js186
-rw-r--r--jstests/sharding/secondary_shard_versioning.js160
-rw-r--r--jstests/sharding/server_status.js52
-rw-r--r--jstests/sharding/server_status_crud_metrics.js104
-rw-r--r--jstests/sharding/session_info_in_oplog.js655
-rw-r--r--jstests/sharding/sessions_collection_auto_healing.js237
-rw-r--r--jstests/sharding/shard1.js61
-rw-r--r--jstests/sharding/shard2.js413
-rw-r--r--jstests/sharding/shard3.js353
-rw-r--r--jstests/sharding/shard6.js188
-rw-r--r--jstests/sharding/shard_aware_init.js306
-rw-r--r--jstests/sharding/shard_aware_init_secondaries.js127
-rw-r--r--jstests/sharding/shard_aware_on_add_shard.js91
-rw-r--r--jstests/sharding/shard_aware_primary_failover.js106
-rw-r--r--jstests/sharding/shard_collection_basic.js579
-rw-r--r--jstests/sharding/shard_collection_existing_zones.js346
-rw-r--r--jstests/sharding/shard_collection_verify_initial_chunks.js94
-rw-r--r--jstests/sharding/shard_config_db_collections.js72
-rw-r--r--jstests/sharding/shard_existing.js54
-rw-r--r--jstests/sharding/shard_existing_coll_chunk_count.js323
-rw-r--r--jstests/sharding/shard_identity_config_update.js134
-rw-r--r--jstests/sharding/shard_identity_rollback.js248
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js157
-rw-r--r--jstests/sharding/shard_keycount.js52
-rw-r--r--jstests/sharding/shard_kill_and_pooling.js99
-rw-r--r--jstests/sharding/shard_targeting.js89
-rw-r--r--jstests/sharding/shard_with_special_db_names.js38
-rw-r--r--jstests/sharding/sharded_limit_batchsize.js293
-rw-r--r--jstests/sharding/sharded_profile.js37
-rw-r--r--jstests/sharding/sharding_balance1.js98
-rw-r--r--jstests/sharding/sharding_balance2.js104
-rw-r--r--jstests/sharding/sharding_balance3.js129
-rw-r--r--jstests/sharding/sharding_balance4.js246
-rw-r--r--jstests/sharding/sharding_migrate_cursor1.js119
-rw-r--r--jstests/sharding/sharding_multiple_ns_rs.js72
-rw-r--r--jstests/sharding/sharding_options.js3
-rw-r--r--jstests/sharding/sharding_rs1.js81
-rw-r--r--jstests/sharding/sharding_rs2.js368
-rw-r--r--jstests/sharding/sharding_statistics_server_status.js343
-rw-r--r--jstests/sharding/shards_and_config_return_last_committed_optime.js342
-rw-r--r--jstests/sharding/single_shard_transaction_with_arbiter.js78
-rw-r--r--jstests/sharding/single_shard_transaction_without_majority_reads_lagged.js144
-rw-r--r--jstests/sharding/snapshot_cursor_commands_mongos.js505
-rw-r--r--jstests/sharding/sort1.js198
-rw-r--r--jstests/sharding/split_against_shard_with_invalid_split_points.js74
-rw-r--r--jstests/sharding/split_large_key.js105
-rw-r--r--jstests/sharding/split_with_force_small.js92
-rw-r--r--jstests/sharding/ssv_config_check.js64
-rw-r--r--jstests/sharding/stale_mongos_updates_and_removes.js425
-rw-r--r--jstests/sharding/stale_version_write.js42
-rw-r--r--jstests/sharding/startup_with_all_configs_down.js167
-rw-r--r--jstests/sharding/stats.js419
-rw-r--r--jstests/sharding/tag_auto_split.js44
-rw-r--r--jstests/sharding/tag_auto_split_partial_key.js62
-rw-r--r--jstests/sharding/tag_range.js126
-rw-r--r--jstests/sharding/test_stacked_migration_cleanup.js91
-rw-r--r--jstests/sharding/time_zone_info_mongos.js191
-rw-r--r--jstests/sharding/top_chunk_autosplit.js262
-rw-r--r--jstests/sharding/top_chunk_split.js273
-rw-r--r--jstests/sharding/trace_missing_docs_test.js56
-rw-r--r--jstests/sharding/transactions_causal_consistency.js131
-rw-r--r--jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js60
-rw-r--r--jstests/sharding/transactions_error_labels.js375
-rw-r--r--jstests/sharding/transactions_expiration.js135
-rw-r--r--jstests/sharding/transactions_implicit_abort.js83
-rw-r--r--jstests/sharding/transactions_multi_writes.js260
-rw-r--r--jstests/sharding/transactions_read_concerns.js121
-rw-r--r--jstests/sharding/transactions_reject_writes_for_moved_chunks.js262
-rw-r--r--jstests/sharding/transactions_snapshot_errors_first_statement.js272
-rw-r--r--jstests/sharding/transactions_snapshot_errors_subsequent_statements.js188
-rw-r--r--jstests/sharding/transactions_stale_database_version_errors.js171
-rw-r--r--jstests/sharding/transactions_stale_shard_version_errors.js365
-rw-r--r--jstests/sharding/transactions_target_at_point_in_time.js160
-rw-r--r--jstests/sharding/transactions_targeting_errors.js48
-rw-r--r--jstests/sharding/transactions_view_resolution.js565
-rw-r--r--jstests/sharding/transactions_writes_not_retryable.js210
-rw-r--r--jstests/sharding/txn_agg.js163
-rw-r--r--jstests/sharding/txn_being_applied_to_secondary_cannot_be_killed.js197
-rw-r--r--jstests/sharding/txn_commit_optimizations_for_read_only_shards.js645
-rw-r--r--jstests/sharding/txn_recover_decision_using_recovery_router.js1025
-rw-r--r--jstests/sharding/txn_two_phase_commit_basic.js469
-rw-r--r--jstests/sharding/txn_two_phase_commit_commands_basic_requirements.js99
-rw-r--r--jstests/sharding/txn_two_phase_commit_coordinator_shutdown_and_restart.js269
-rw-r--r--jstests/sharding/txn_two_phase_commit_failover.js367
-rw-r--r--jstests/sharding/txn_two_phase_commit_killop.js333
-rw-r--r--jstests/sharding/txn_two_phase_commit_server_status.js30
-rw-r--r--jstests/sharding/txn_two_phase_commit_wait_for_majority_commit_after_stepup.js226
-rw-r--r--jstests/sharding/txn_with_several_routers.js386
-rw-r--r--jstests/sharding/txn_writes_during_movechunk.js72
-rw-r--r--jstests/sharding/unique_index_on_shardservers.js44
-rw-r--r--jstests/sharding/unowned_doc_filtering.js68
-rw-r--r--jstests/sharding/unsharded_collection_targetting.js42
-rw-r--r--jstests/sharding/unsharded_lookup_in_txn.js130
-rw-r--r--jstests/sharding/update_compound_shard_key.js802
-rw-r--r--jstests/sharding/update_immutable_fields.js116
-rw-r--r--jstests/sharding/update_replace_id.js355
-rw-r--r--jstests/sharding/update_shard_key_conflicting_writes.js664
-rw-r--r--jstests/sharding/update_shard_key_doc_moves_shards.js825
-rw-r--r--jstests/sharding/update_shard_key_doc_on_same_shard.js1543
-rw-r--r--jstests/sharding/update_shard_key_pipeline_update.js437
-rw-r--r--jstests/sharding/update_sharded.js210
-rw-r--r--jstests/sharding/update_zone_key_range.js57
-rw-r--r--jstests/sharding/update_zone_key_range_not_sharded.js55
-rw-r--r--jstests/sharding/upsert_sharded.js210
-rw-r--r--jstests/sharding/use_rsm_data_for_cs.js54
-rw-r--r--jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js50
-rw-r--r--jstests/sharding/uuid_propagated_to_recipient_shard_on_recvChunkStart.js55
-rw-r--r--jstests/sharding/validate_collection.js119
-rw-r--r--jstests/sharding/verify_sessions_expiration_sharded.js246
-rw-r--r--jstests/sharding/version1.js176
-rw-r--r--jstests/sharding/version2.js146
-rw-r--r--jstests/sharding/view_rewrite.js440
-rw-r--r--jstests/sharding/views.js293
-rw-r--r--jstests/sharding/wildcard_index_banned_for_shard_key.js52
-rw-r--r--jstests/sharding/write_cmd_auto_split.js222
-rw-r--r--jstests/sharding/write_commands_sharding_state.js118
-rw-r--r--jstests/sharding/write_transactions_during_migration.js299
-rw-r--r--jstests/sharding/zbigMapReduce.js374
-rw-r--r--jstests/sharding/zero_shard_version.js275
-rw-r--r--jstests/slow1/conc_update.js82
-rw-r--r--jstests/slow1/initial_sync_many_dbs.js92
-rw-r--r--jstests/slow1/mr_during_migrate.js152
-rw-r--r--jstests/slow1/replsets_priority1.js311
-rw-r--r--jstests/slow1/sharding_multiple_collections.js100
-rw-r--r--jstests/ssl/canonicalize_command_line_opts.js58
-rw-r--r--jstests/ssl/config-canonicalize-normal-ports.js16
-rw-r--r--jstests/ssl/libs/ssl_x509_role_auth.js26
-rw-r--r--jstests/ssl/libs/ssl_x509_role_auth_email.js16
-rw-r--r--jstests/ssl/libs/ssl_x509_role_auth_escape.js18
-rw-r--r--jstests/ssl/libs/ssl_x509_role_auth_utf8.js16
-rw-r--r--jstests/ssl/mixed_mode_sharded_transition.js32
-rw-r--r--jstests/ssl/mongo_uri_secondaries.js108
-rw-r--r--jstests/ssl/repl_ssl_noca.js105
-rw-r--r--jstests/ssl/repl_ssl_split_horizon.js363
-rw-r--r--jstests/ssl/sharding_with_x509.js126
-rw-r--r--jstests/ssl/shell_option_parsing.js391
-rw-r--r--jstests/ssl/shell_x509_system_user.js116
-rw-r--r--jstests/ssl/ssl_ECDHE_suites.js176
-rw-r--r--jstests/ssl/ssl_alert_reporting.js108
-rw-r--r--jstests/ssl/ssl_cert_password.js26
-rw-r--r--jstests/ssl/ssl_client_certificate_warning_suppression.js90
-rw-r--r--jstests/ssl/ssl_cluster_ca.js130
-rw-r--r--jstests/ssl/ssl_cluster_file.js58
-rw-r--r--jstests/ssl/ssl_cn_with_san.js68
-rw-r--r--jstests/ssl/ssl_count_protocols.js189
-rw-r--r--jstests/ssl/ssl_fragment.js109
-rw-r--r--jstests/ssl/ssl_get_more.js115
-rw-r--r--jstests/ssl/ssl_intermediate_ca.js52
-rw-r--r--jstests/ssl/ssl_private_key.js66
-rw-r--r--jstests/ssl/ssl_restricted_protocols.js66
-rw-r--r--jstests/ssl/ssl_uri.js100
-rw-r--r--jstests/ssl/ssl_with_system_ca.js76
-rw-r--r--jstests/ssl/ssl_withhold_client_cert.js94
-rw-r--r--jstests/ssl/ssl_x509_SAN.js117
-rw-r--r--jstests/ssl/ssl_x509_roles.js283
-rw-r--r--jstests/ssl/upgrade_noauth_to_x509_ssl.js64
-rw-r--r--jstests/ssl/x509_all_the_oids.js73
-rw-r--r--jstests/ssl/x509_custom.js98
-rw-r--r--jstests/ssl/x509_invalid.js111
-rw-r--r--jstests/ssl/x509_multivalue.js96
-rw-r--r--jstests/ssl/x509_startup_warning.js99
-rw-r--r--jstests/sslSpecial/SERVER-26369.js32
-rw-r--r--jstests/sslSpecial/mixed_mode_sharded_transition_nossl.js22
-rw-r--r--jstests/sslSpecial/tls1_0.js179
-rw-r--r--jstests/sslSpecial/upgrade_noauth_to_x509_nossl.js40
-rw-r--r--jstests/sslSpecial/x509_cluster_auth_rollover.js175
-rw-r--r--jstests/tool/csv1.js3
-rw-r--r--jstests/tool/dumprestore10.js129
-rw-r--r--jstests/tool/dumprestore3.js104
-rw-r--r--jstests/tool/dumprestore7.js190
-rw-r--r--jstests/tool/dumprestore9.js2
-rw-r--r--jstests/tool/dumprestore_auth2.js9
-rw-r--r--jstests/tool/dumprestore_auth3.js1
-rw-r--r--jstests/tool/dumprestore_excludecollections.js16
-rw-r--r--jstests/tool/dumpsecondary.js118
-rw-r--r--jstests/tool/gridfs.js4
-rw-r--r--jstests/tool/shell_mkdir.js64
-rw-r--r--jstests/tool/tool_replset.js176
-rw-r--r--jstests/watchdog/lib/charybdefs_lib.js2
-rw-r--r--jstests/watchdog/wd_auditpath_hang.js21
-rw-r--r--jstests/watchdog/wd_dbpath_hang.js9
-rw-r--r--jstests/watchdog/wd_journal_hang.js34
-rw-r--r--jstests/watchdog/wd_logpath_hang.js9
-rw-r--r--jstests/watchdog/wd_setparam.js85
-rw-r--r--src/mongo/base/clonable_ptr.h5
-rw-r--r--src/mongo/base/concept/assignable.h8
-rw-r--r--src/mongo/base/concept/clonable.h20
-rw-r--r--src/mongo/base/concept/clone_factory.h22
-rw-r--r--src/mongo/base/concept/constructible.h49
-rw-r--r--src/mongo/base/concept/convertible_to.h16
-rw-r--r--src/mongo/base/concept/copy_assignable.h20
-rw-r--r--src/mongo/base/concept/copy_constructible.h16
-rw-r--r--src/mongo/base/concept/unique_ptr.h50
-rw-r--r--src/mongo/base/data_type_validated_test.cpp2
-rw-r--r--src/mongo/base/encoded_value_storage_test.cpp2
-rw-r--r--src/mongo/base/global_initializer_registerer.h72
-rw-r--r--src/mongo/base/initializer.h16
-rw-r--r--src/mongo/base/initializer_function.h10
-rw-r--r--src/mongo/bson/bson_obj_test.cpp5
-rw-r--r--src/mongo/bson/bson_validate_test.cpp45
-rw-r--r--src/mongo/bson/bsonelement.cpp14
-rw-r--r--src/mongo/bson/bsonelement.h5
-rw-r--r--src/mongo/bson/bsonelement_test.cpp25
-rw-r--r--src/mongo/bson/bsonmisc.h2
-rw-r--r--src/mongo/bson/bsonobj.cpp4
-rw-r--r--src/mongo/bson/bsonobj.h12
-rw-r--r--src/mongo/bson/bsonobjbuilder.h2
-rw-r--r--src/mongo/bson/bsonobjbuilder_test.cpp5
-rw-r--r--src/mongo/bson/bsontypes.h2
-rw-r--r--src/mongo/bson/json.cpp6
-rw-r--r--src/mongo/bson/oid_test.cpp2
-rw-r--r--src/mongo/bson/ordering.h2
-rw-r--r--src/mongo/bson/timestamp.cpp2
-rw-r--r--src/mongo/bson/ugly_bson_integration_test.cpp5
-rw-r--r--src/mongo/bson/util/bson_check.h13
-rw-r--r--src/mongo/bson/util/bson_check_test.cpp13
-rw-r--r--src/mongo/bson/util/bson_extract.cpp25
-rw-r--r--src/mongo/bson/util/bson_extract_test.cpp6
-rw-r--r--src/mongo/bson/util/builder_test.cpp2
-rw-r--r--src/mongo/client/authenticate.cpp28
-rw-r--r--src/mongo/client/authenticate_test.cpp12
-rw-r--r--src/mongo/client/connection_string_connect.cpp2
-rw-r--r--src/mongo/client/constants.h2
-rw-r--r--src/mongo/client/cyrus_sasl_client_session.cpp5
-rw-r--r--src/mongo/client/dbclient_base.cpp25
-rw-r--r--src/mongo/client/dbclient_base.h8
-rw-r--r--src/mongo/client/dbclient_connection.cpp18
-rw-r--r--src/mongo/client/dbclient_cursor.cpp4
-rw-r--r--src/mongo/client/dbclient_cursor_test.cpp2
-rw-r--r--src/mongo/client/dbclient_rs.cpp49
-rw-r--r--src/mongo/client/dbclient_rs.h6
-rw-r--r--src/mongo/client/fetcher.cpp46
-rw-r--r--src/mongo/client/fetcher_test.cpp160
-rw-r--r--src/mongo/client/mongo_uri.cpp17
-rw-r--r--src/mongo/client/mongo_uri_test.cpp28
-rw-r--r--src/mongo/client/native_sasl_client_session.cpp2
-rw-r--r--src/mongo/client/query_spec.h6
-rw-r--r--src/mongo/client/read_preference.cpp27
-rw-r--r--src/mongo/client/read_preference_test.cpp24
-rw-r--r--src/mongo/client/remote_command_retry_scheduler_test.cpp3
-rw-r--r--src/mongo/client/replica_set_monitor.cpp35
-rw-r--r--src/mongo/client/replica_set_monitor.h1
-rw-r--r--src/mongo/client/replica_set_monitor_internal_test.cpp156
-rw-r--r--src/mongo/client/replica_set_monitor_manager.cpp6
-rw-r--r--src/mongo/client/replica_set_monitor_scan_test.cpp513
-rw-r--r--src/mongo/client/sasl_client_authenticate.h2
-rw-r--r--src/mongo/client/sasl_client_authenticate_impl.cpp2
-rw-r--r--src/mongo/client/sasl_scram_client_conversation.cpp9
-rw-r--r--src/mongo/client/sasl_sspi.cpp9
-rw-r--r--src/mongo/crypto/aead_encryption.cpp27
-rw-r--r--src/mongo/crypto/mechanism_scram.h9
-rw-r--r--src/mongo/crypto/sha_block.h10
-rw-r--r--src/mongo/crypto/symmetric_crypto_apple.cpp4
-rw-r--r--src/mongo/crypto/symmetric_crypto_openssl.cpp9
-rw-r--r--src/mongo/db/auth/authorization_manager_impl.cpp3
-rw-r--r--src/mongo/db/auth/authorization_manager_test.cpp30
-rw-r--r--src/mongo/db/auth/authorization_session_impl.cpp21
-rw-r--r--src/mongo/db/auth/authorization_session_test.cpp103
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp28
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.cpp3
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_s.cpp55
-rw-r--r--src/mongo/db/auth/privilege_parser_test.cpp27
-rw-r--r--src/mongo/db/auth/role_graph.cpp42
-rw-r--r--src/mongo/db/auth/role_graph_test.cpp63
-rw-r--r--src/mongo/db/auth/role_graph_update.cpp21
-rw-r--r--src/mongo/db/auth/sasl_authentication_session_test.cpp24
-rw-r--r--src/mongo/db/auth/sasl_mechanism_registry.cpp7
-rw-r--r--src/mongo/db/auth/sasl_mechanism_registry_test.cpp11
-rw-r--r--src/mongo/db/auth/sasl_options_init.cpp2
-rw-r--r--src/mongo/db/auth/sasl_plain_server_conversation.cpp5
-rw-r--r--src/mongo/db/auth/sasl_plain_server_conversation.h5
-rw-r--r--src/mongo/db/auth/sasl_scram_server_conversation.cpp15
-rw-r--r--src/mongo/db/auth/sasl_scram_test.cpp17
-rw-r--r--src/mongo/db/auth/security_file.cpp4
-rw-r--r--src/mongo/db/auth/user.cpp2
-rw-r--r--src/mongo/db/auth/user_document_parser.cpp8
-rw-r--r--src/mongo/db/auth/user_document_parser_test.cpp102
-rw-r--r--src/mongo/db/auth/user_management_commands_parser.cpp15
-rw-r--r--src/mongo/db/baton.cpp2
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp14
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp7
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp17
-rw-r--r--src/mongo/db/catalog/collection_catalog.h5
-rw-r--r--src/mongo/db/catalog/collection_catalog_test.cpp2
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp4
-rw-r--r--src/mongo/db/catalog/collection_compact.h6
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp45
-rw-r--r--src/mongo/db/catalog/collection_options.cpp8
-rw-r--r--src/mongo/db/catalog/collection_options.h2
-rw-r--r--src/mongo/db/catalog/create_collection.cpp9
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp8
-rw-r--r--src/mongo/db/catalog/database_impl.cpp21
-rw-r--r--src/mongo/db/catalog/database_test.cpp73
-rw-r--r--src/mongo/db/catalog/document_validation.h2
-rw-r--r--src/mongo/db/catalog/drop_database.cpp25
-rw-r--r--src/mongo/db/catalog/drop_database_test.cpp8
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp33
-rw-r--r--src/mongo/db/catalog/health_log.cpp4
-rw-r--r--src/mongo/db/catalog/health_log.h2
-rw-r--r--src/mongo/db/catalog/index_build_block.cpp4
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp3
-rw-r--r--src/mongo/db/catalog/index_builds_manager_test.cpp3
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.cpp12
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp87
-rw-r--r--src/mongo/db/catalog/index_consistency.cpp3
-rw-r--r--src/mongo/db/catalog/index_key_validate.cpp74
-rw-r--r--src/mongo/db/catalog/index_key_validate_test.cpp6
-rw-r--r--src/mongo/db/catalog/index_spec_validate_test.cpp379
-rw-r--r--src/mongo/db/catalog/index_timestamp_helper.h4
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp15
-rw-r--r--src/mongo/db/catalog/private/record_store_validate_adaptor.cpp14
-rw-r--r--src/mongo/db/catalog/private/record_store_validate_adaptor.h2
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp29
-rw-r--r--src/mongo/db/catalog/rename_collection_test.cpp37
-rw-r--r--src/mongo/db/catalog/util/partitioned.h2
-rw-r--r--src/mongo/db/catalog/util/partitioned_test.cpp1
-rw-r--r--src/mongo/db/catalog_raii.cpp6
-rw-r--r--src/mongo/db/client.cpp4
-rw-r--r--src/mongo/db/clientcursor.cpp2
-rw-r--r--src/mongo/db/cloner.cpp43
-rw-r--r--src/mongo/db/commands/clone_collection.cpp4
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp3
-rw-r--r--src/mongo/db/commands/compact.cpp2
-rw-r--r--src/mongo/db/commands/connection_status.cpp2
-rw-r--r--src/mongo/db/commands/count_cmd.cpp2
-rw-r--r--src/mongo/db/commands/create_indexes.cpp29
-rw-r--r--src/mongo/db/commands/dbcheck.cpp4
-rw-r--r--src/mongo/db/commands/dbcommands.cpp4
-rw-r--r--src/mongo/db/commands/dbcommands_d.cpp8
-rw-r--r--src/mongo/db/commands/dbhash.cpp9
-rw-r--r--src/mongo/db/commands/do_txn_cmd.cpp4
-rw-r--r--src/mongo/db/commands/driverHelpers.cpp2
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp2
-rw-r--r--src/mongo/db/commands/explain_cmd.cpp3
-rw-r--r--src/mongo/db/commands/fail_point_cmd.cpp2
-rw-r--r--src/mongo/db/commands/feature_compatibility_version.cpp9
-rw-r--r--src/mongo/db/commands/feature_compatibility_version_command_parser.cpp20
-rw-r--r--src/mongo/db/commands/feature_compatibility_version_documentation.h4
-rw-r--r--src/mongo/db/commands/feature_compatibility_version_parser.cpp75
-rw-r--r--src/mongo/db/commands/find_cmd.cpp10
-rw-r--r--src/mongo/db/commands/fsync.cpp4
-rw-r--r--src/mongo/db/commands/fsync_locked.h10
-rw-r--r--src/mongo/db/commands/generic_servers.cpp4
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp22
-rw-r--r--src/mongo/db/commands/hashcmd.cpp2
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp2
-rw-r--r--src/mongo/db/commands/list_databases.cpp2
-rw-r--r--src/mongo/db/commands/list_indexes.cpp4
-rw-r--r--src/mongo/db/commands/lock_info.cpp2
-rw-r--r--src/mongo/db/commands/mr.cpp22
-rw-r--r--src/mongo/db/commands/mr.h16
-rw-r--r--src/mongo/db/commands/mr_common.cpp4
-rw-r--r--src/mongo/db/commands/mr_test.cpp6
-rw-r--r--src/mongo/db/commands/parameters.cpp17
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp2
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp25
-rw-r--r--src/mongo/db/commands/repair_cursor.cpp2
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp4
-rw-r--r--src/mongo/db/commands/server_status_internal.cpp2
-rw-r--r--src/mongo/db/commands/server_status_internal.h2
-rw-r--r--src/mongo/db/commands/server_status_metric.cpp2
-rw-r--r--src/mongo/db/commands/server_status_metric.h2
-rw-r--r--src/mongo/db/commands/sleep_command.cpp2
-rw-r--r--src/mongo/db/commands/snapshot_management.cpp2
-rw-r--r--src/mongo/db/commands/test_commands.cpp2
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp80
-rw-r--r--src/mongo/db/commands/user_management_commands_common.cpp35
-rw-r--r--src/mongo/db/commands/validate.cpp2
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp2
-rw-r--r--src/mongo/db/concurrency/d_concurrency_bm.cpp4
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp2
-rw-r--r--src/mongo/db/concurrency/lock_manager.cpp5
-rw-r--r--src/mongo/db/concurrency/lock_manager.h52
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp9
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp11
-rw-r--r--src/mongo/db/concurrency/write_conflict_exception.cpp12
-rw-r--r--src/mongo/db/curop.cpp9
-rw-r--r--src/mongo/db/curop_failpoint_helpers.cpp2
-rw-r--r--src/mongo/db/curop_failpoint_helpers.h2
-rw-r--r--src/mongo/db/db.cpp4
-rw-r--r--src/mongo/db/db_raii.cpp3
-rw-r--r--src/mongo/db/dbdirectclient.cpp2
-rw-r--r--src/mongo/db/dbhelpers.cpp2
-rw-r--r--src/mongo/db/dbmessage.cpp4
-rw-r--r--src/mongo/db/dbmessage.h6
-rw-r--r--src/mongo/db/dbmessage_test.cpp2
-rw-r--r--src/mongo/db/exec/and_sorted.cpp2
-rw-r--r--src/mongo/db/exec/change_stream_proxy.cpp3
-rw-r--r--src/mongo/db/exec/collection_scan.cpp6
-rw-r--r--src/mongo/db/exec/count_scan.cpp2
-rw-r--r--src/mongo/db/exec/geo_near.cpp18
-rw-r--r--src/mongo/db/exec/queued_data_stage_test.cpp2
-rw-r--r--src/mongo/db/exec/record_store_fast_count.h2
-rw-r--r--src/mongo/db/exec/requires_collection_stage.cpp3
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp10
-rw-r--r--src/mongo/db/exec/text_or.cpp2
-rw-r--r--src/mongo/db/exec/update_stage.cpp3
-rw-r--r--src/mongo/db/exec/write_stage_common.h4
-rw-r--r--src/mongo/db/exhaust_cursor_currentop_integration_test.cpp14
-rw-r--r--src/mongo/db/field_parser_test.cpp76
-rw-r--r--src/mongo/db/field_ref_set.cpp4
-rw-r--r--src/mongo/db/free_mon/free_mon_controller.h32
-rw-r--r--src/mongo/db/free_mon/free_mon_controller_test.cpp201
-rw-r--r--src/mongo/db/free_mon/free_mon_message.h24
-rw-r--r--src/mongo/db/free_mon/free_mon_mongod.cpp31
-rw-r--r--src/mongo/db/free_mon/free_mon_op_observer.cpp5
-rw-r--r--src/mongo/db/free_mon/free_mon_options.h4
-rw-r--r--src/mongo/db/free_mon/free_mon_processor.cpp60
-rw-r--r--src/mongo/db/free_mon/free_mon_queue_test.cpp2
-rw-r--r--src/mongo/db/ftdc/compressor_test.cpp223
-rw-r--r--src/mongo/db/ftdc/controller.h16
-rw-r--r--src/mongo/db/ftdc/controller_test.cpp4
-rw-r--r--src/mongo/db/ftdc/file_manager.cpp10
-rw-r--r--src/mongo/db/ftdc/file_manager_test.cpp92
-rw-r--r--src/mongo/db/ftdc/file_reader.cpp3
-rw-r--r--src/mongo/db/ftdc/file_writer.cpp3
-rw-r--r--src/mongo/db/ftdc/file_writer_test.cpp86
-rw-r--r--src/mongo/db/ftdc/ftdc_system_stats.h1
-rw-r--r--src/mongo/db/ftdc/ftdc_system_stats_linux.cpp5
-rw-r--r--src/mongo/db/ftdc/util.cpp4
-rw-r--r--src/mongo/db/ftdc/util.h24
-rw-r--r--src/mongo/db/ftdc/varint.h4
-rw-r--r--src/mongo/db/fts/fts_element_iterator.cpp9
-rw-r--r--src/mongo/db/fts/fts_index_format.cpp16
-rw-r--r--src/mongo/db/fts/fts_index_format.h4
-rw-r--r--src/mongo/db/fts/fts_index_format_test.cpp18
-rw-r--r--src/mongo/db/fts/fts_language.cpp14
-rw-r--r--src/mongo/db/fts/fts_language.h4
-rw-r--r--src/mongo/db/fts/fts_language_test.cpp4
-rw-r--r--src/mongo/db/fts/fts_matcher.cpp4
-rw-r--r--src/mongo/db/fts/fts_matcher.h4
-rw-r--r--src/mongo/db/fts/fts_matcher_test.cpp4
-rw-r--r--src/mongo/db/fts/fts_query_impl.cpp4
-rw-r--r--src/mongo/db/fts/fts_query_impl.h4
-rw-r--r--src/mongo/db/fts/fts_query_impl_test.cpp4
-rw-r--r--src/mongo/db/fts/fts_query_parser.cpp4
-rw-r--r--src/mongo/db/fts/fts_query_parser.h4
-rw-r--r--src/mongo/db/fts/fts_spec.cpp22
-rw-r--r--src/mongo/db/fts/fts_spec_legacy.cpp10
-rw-r--r--src/mongo/db/fts/fts_spec_test.cpp22
-rw-r--r--src/mongo/db/fts/fts_util.cpp4
-rw-r--r--src/mongo/db/fts/fts_util.h4
-rw-r--r--src/mongo/db/fts/stemmer.cpp4
-rw-r--r--src/mongo/db/fts/stemmer.h4
-rw-r--r--src/mongo/db/fts/stemmer_test.cpp4
-rw-r--r--src/mongo/db/fts/stop_words.cpp6
-rw-r--r--src/mongo/db/fts/stop_words.h4
-rw-r--r--src/mongo/db/fts/stop_words_test.cpp4
-rw-r--r--src/mongo/db/fts/tokenizer.cpp4
-rw-r--r--src/mongo/db/fts/tokenizer.h4
-rw-r--r--src/mongo/db/fts/tokenizer_test.cpp4
-rw-r--r--src/mongo/db/fts/unicode/string.cpp2
-rw-r--r--src/mongo/db/fts/unicode/string_test.cpp2
-rw-r--r--src/mongo/db/geo/big_polygon.cpp2
-rw-r--r--src/mongo/db/geo/big_polygon.h2
-rw-r--r--src/mongo/db/geo/big_polygon_test.cpp169
-rw-r--r--src/mongo/db/geo/geometry_container.cpp5
-rw-r--r--src/mongo/db/geo/geoparser.cpp3
-rw-r--r--src/mongo/db/geo/geoparser_test.cpp2
-rw-r--r--src/mongo/db/geo/hash.cpp13
-rw-r--r--src/mongo/db/geo/hash_test.cpp2
-rw-r--r--src/mongo/db/geo/r2_region_coverer.cpp2
-rw-r--r--src/mongo/db/geo/shapes.h5
-rw-r--r--src/mongo/db/hasher.h2
-rw-r--r--src/mongo/db/hasher_test.cpp3
-rw-r--r--src/mongo/db/index/btree_key_generator.cpp4
-rw-r--r--src/mongo/db/index/btree_key_generator_test.cpp2
-rw-r--r--src/mongo/db/index/expression_params.cpp10
-rw-r--r--src/mongo/db/index/index_access_method.cpp4
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp10
-rw-r--r--src/mongo/db/index/index_build_interceptor.h6
-rw-r--r--src/mongo/db/index/index_descriptor.cpp5
-rw-r--r--src/mongo/db/index/s2_access_method.cpp36
-rw-r--r--src/mongo/db/index/s2_key_generator_test.cpp21
-rw-r--r--src/mongo/db/index/sort_key_generator_test.cpp3
-rw-r--r--src/mongo/db/index_builder.h2
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp42
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp10
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod_test.cpp3
-rw-r--r--src/mongo/db/initialize_server_global_state.cpp10
-rw-r--r--src/mongo/db/initialize_server_security_state.cpp4
-rw-r--r--src/mongo/db/introspect.cpp2
-rw-r--r--src/mongo/db/keypattern.cpp3
-rw-r--r--src/mongo/db/keypattern_test.cpp2
-rw-r--r--src/mongo/db/keys_collection_cache.cpp6
-rw-r--r--src/mongo/db/keys_collection_client.h4
-rw-r--r--src/mongo/db/keys_collection_client_direct.h4
-rw-r--r--src/mongo/db/keys_collection_client_sharded.h4
-rw-r--r--src/mongo/db/log_process_details.cpp2
-rw-r--r--src/mongo/db/logical_clock.cpp5
-rw-r--r--src/mongo/db/logical_session_cache_test.cpp5
-rw-r--r--src/mongo/db/logical_session_id_test.cpp48
-rw-r--r--src/mongo/db/logical_time_test.cpp10
-rw-r--r--src/mongo/db/matcher/expression.cpp2
-rw-r--r--src/mongo/db/matcher/expression.h2
-rw-r--r--src/mongo/db/matcher/expression_array.cpp2
-rw-r--r--src/mongo/db/matcher/expression_array.h6
-rw-r--r--src/mongo/db/matcher/expression_geo.cpp21
-rw-r--r--src/mongo/db/matcher/expression_geo_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_leaf.cpp2
-rw-r--r--src/mongo/db/matcher/expression_leaf.h2
-rw-r--r--src/mongo/db/matcher/expression_leaf_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_parser.cpp45
-rw-r--r--src/mongo/db/matcher/expression_parser_array_test.cpp154
-rw-r--r--src/mongo/db/matcher/expression_parser_leaf_test.cpp163
-rw-r--r--src/mongo/db/matcher/expression_parser_test.cpp9
-rw-r--r--src/mongo/db/matcher/expression_parser_tree_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_text.cpp6
-rw-r--r--src/mongo/db/matcher/expression_text_base.cpp6
-rw-r--r--src/mongo/db/matcher/expression_tree.cpp2
-rw-r--r--src/mongo/db/matcher/expression_tree.h2
-rw-r--r--src/mongo/db/matcher/expression_tree_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_type_test.cpp4
-rw-r--r--src/mongo/db/matcher/expression_where.cpp4
-rw-r--r--src/mongo/db/matcher/expression_where_noop.cpp2
-rw-r--r--src/mongo/db/matcher/expression_with_placeholder.cpp11
-rw-r--r--src/mongo/db/matcher/match_details.cpp2
-rw-r--r--src/mongo/db/matcher/match_details.h2
-rw-r--r--src/mongo/db/matcher/matchable.cpp2
-rw-r--r--src/mongo/db/matcher/matchable.h4
-rw-r--r--src/mongo/db/matcher/path.cpp2
-rw-r--r--src/mongo/db/matcher/path.h2
-rw-r--r--src/mongo/db/matcher/path_accepting_keyword_test.cpp45
-rw-r--r--src/mongo/db/matcher/path_test.cpp2
-rw-r--r--src/mongo/db/matcher/schema/expression_internal_schema_max_length.h4
-rw-r--r--src/mongo/db/matcher/schema/expression_internal_schema_min_length.h4
-rw-r--r--src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp3
-rw-r--r--src/mongo/db/matcher/schema/json_pointer_test.cpp10
-rw-r--r--src/mongo/db/matcher/schema/json_schema_parser.cpp159
-rw-r--r--src/mongo/db/mongod_options.cpp9
-rw-r--r--src/mongo/db/mongod_options.h2
-rw-r--r--src/mongo/db/multi_key_path_tracker.cpp4
-rw-r--r--src/mongo/db/multi_key_path_tracker_test.cpp3
-rw-r--r--src/mongo/db/namespace_string.cpp4
-rw-r--r--src/mongo/db/op_observer_impl.cpp8
-rw-r--r--src/mongo/db/op_observer_impl_test.cpp528
-rw-r--r--src/mongo/db/op_observer_util.h2
-rw-r--r--src/mongo/db/operation_time_tracker.cpp2
-rw-r--r--src/mongo/db/ops/delete.h2
-rw-r--r--src/mongo/db/ops/insert.cpp22
-rw-r--r--src/mongo/db/ops/insert.h2
-rw-r--r--src/mongo/db/ops/update.cpp3
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp8
-rw-r--r--src/mongo/db/ops/write_ops_parsers.cpp9
-rw-r--r--src/mongo/db/ops/write_ops_parsers_test.cpp80
-rw-r--r--src/mongo/db/ops/write_ops_retryability.cpp41
-rw-r--r--src/mongo/db/ops/write_ops_retryability_test.cpp45
-rw-r--r--src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp18
-rw-r--r--src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.cpp20
-rw-r--r--src/mongo/db/pipeline/accumulator.h2
-rw-r--r--src/mongo/db/pipeline/accumulator_avg.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_first.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_last.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_merge_objects.cpp3
-rw-r--r--src/mongo/db/pipeline/accumulator_min_max.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_push.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_std_dev.cpp2
-rw-r--r--src/mongo/db/pipeline/aggregation_request.cpp16
-rw-r--r--src/mongo/db/pipeline/dependencies.cpp2
-rw-r--r--src/mongo/db/pipeline/dependencies.h2
-rw-r--r--src/mongo/db/pipeline/dependencies_test.cpp3
-rw-r--r--src/mongo/db/pipeline/document.cpp5
-rw-r--r--src/mongo/db/pipeline/document.h2
-rw-r--r--src/mongo/db/pipeline/document_internal.h2
-rw-r--r--src/mongo/db/pipeline/document_source_add_fields.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_bucket.cpp39
-rw-r--r--src/mongo/db/pipeline/document_source_bucket_auto_test.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_change_stream.cpp18
-rw-r--r--src/mongo/db/pipeline/document_source_change_stream_test.cpp29
-rw-r--r--src/mongo/db/pipeline/document_source_coll_stats.cpp17
-rw-r--r--src/mongo/db/pipeline/document_source_current_op.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_current_op.h3
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_exchange.cpp12
-rw-r--r--src/mongo/db/pipeline/document_source_exchange_test.cpp84
-rw-r--r--src/mongo/db/pipeline/document_source_facet.cpp10
-rw-r--r--src/mongo/db/pipeline/document_source_graph_lookup.cpp25
-rw-r--r--src/mongo/db/pipeline/document_source_graph_lookup_test.cpp6
-rw-r--r--src/mongo/db/pipeline/document_source_group_test.cpp32
-rw-r--r--src/mongo/db/pipeline/document_source_index_stats.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h2
-rw-r--r--src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_internal_split_pipeline.h2
-rw-r--r--src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_list_cached_and_active_users.h3
-rw-r--r--src/mongo/db/pipeline/document_source_list_local_sessions.h3
-rw-r--r--src/mongo/db/pipeline/document_source_lookup.cpp13
-rw-r--r--src/mongo/db/pipeline/document_source_lookup_change_post_image.cpp17
-rw-r--r--src/mongo/db/pipeline/document_source_lookup_test.cpp137
-rw-r--r--src/mongo/db/pipeline/document_source_match.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_merge.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_merge.h2
-rw-r--r--src/mongo/db/pipeline/document_source_merge_cursors_test.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_merge_test.cpp199
-rw-r--r--src/mongo/db/pipeline/document_source_mock.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_out.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_plan_cache_stats.cpp12
-rw-r--r--src/mongo/db/pipeline/document_source_plan_cache_stats_test.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_queue.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_redact.cpp5
-rw-r--r--src/mongo/db/pipeline/document_source_replace_root.cpp10
-rw-r--r--src/mongo/db/pipeline/document_source_replace_root_test.cpp6
-rw-r--r--src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp6
-rw-r--r--src/mongo/db/pipeline/document_source_sequential_document_cache.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_sequential_document_cache.h2
-rw-r--r--src/mongo/db/pipeline/document_source_skip.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_unwind.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_unwind_test.cpp23
-rw-r--r--src/mongo/db/pipeline/document_source_writer.h2
-rw-r--r--src/mongo/db/pipeline/expression.cpp284
-rw-r--r--src/mongo/db/pipeline/expression.h22
-rw-r--r--src/mongo/db/pipeline/expression_convert_test.cpp40
-rw-r--r--src/mongo/db/pipeline/expression_date_test.cpp129
-rw-r--r--src/mongo/db/pipeline/expression_test.cpp194
-rw-r--r--src/mongo/db/pipeline/expression_trigonometric.h8
-rw-r--r--src/mongo/db/pipeline/expression_trigonometric_test.cpp2
-rw-r--r--src/mongo/db/pipeline/field_path.cpp2
-rw-r--r--src/mongo/db/pipeline/field_path.h2
-rw-r--r--src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp140
-rw-r--r--src/mongo/db/pipeline/lite_parsed_document_source.cpp2
-rw-r--r--src/mongo/db/pipeline/lite_parsed_pipeline.cpp3
-rw-r--r--src/mongo/db/pipeline/lookup_set_cache.h4
-rw-r--r--src/mongo/db/pipeline/mongos_process_interface.cpp9
-rw-r--r--src/mongo/db/pipeline/parsed_aggregation_projection.cpp41
-rw-r--r--src/mongo/db/pipeline/parsed_aggregation_projection_test.cpp38
-rw-r--r--src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp21
-rw-r--r--src/mongo/db/pipeline/pipeline.cpp6
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp23
-rw-r--r--src/mongo/db/pipeline/pipeline_metadata_tree.h17
-rw-r--r--src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp15
-rw-r--r--src/mongo/db/pipeline/process_interface_standalone.cpp27
-rw-r--r--src/mongo/db/pipeline/process_interface_standalone_test.cpp2
-rw-r--r--src/mongo/db/pipeline/resume_token.cpp5
-rw-r--r--src/mongo/db/pipeline/resume_token_test.cpp4
-rw-r--r--src/mongo/db/pipeline/sharded_agg_helpers.cpp13
-rw-r--r--src/mongo/db/pipeline/stub_mongo_process_interface_lookup_single_document.cpp8
-rw-r--r--src/mongo/db/pipeline/value.cpp14
-rw-r--r--src/mongo/db/pipeline/value.h4
-rw-r--r--src/mongo/db/pipeline/variables.cpp10
-rw-r--r--src/mongo/db/pipeline/variables.h2
-rw-r--r--src/mongo/db/query/canonical_query_encoder.cpp20
-rw-r--r--src/mongo/db/query/canonical_query_encoder.h4
-rw-r--r--src/mongo/db/query/collation/collation_index_key.cpp6
-rw-r--r--src/mongo/db/query/collation/collation_index_key_test.cpp3
-rw-r--r--src/mongo/db/query/collation/collation_spec_test.cpp144
-rw-r--r--src/mongo/db/query/collation/collator_factory_icu.cpp155
-rw-r--r--src/mongo/db/query/collation/collator_factory_icu_decoration.cpp4
-rw-r--r--src/mongo/db/query/collation/collator_factory_icu_test.cpp176
-rw-r--r--src/mongo/db/query/collation/collator_interface_mock_test.cpp10
-rw-r--r--src/mongo/db/query/count_command_test.cpp79
-rw-r--r--src/mongo/db/query/cursor_response.cpp18
-rw-r--r--src/mongo/db/query/cursor_response_test.cpp183
-rw-r--r--src/mongo/db/query/datetime/date_time_support.cpp10
-rw-r--r--src/mongo/db/query/datetime/date_time_support.h3
-rw-r--r--src/mongo/db/query/datetime/init_timezone_data.cpp3
-rw-r--r--src/mongo/db/query/explain.h2
-rw-r--r--src/mongo/db/query/explain_options.cpp11
-rw-r--r--src/mongo/db/query/find.cpp3
-rw-r--r--src/mongo/db/query/find_and_modify_request.cpp18
-rw-r--r--src/mongo/db/query/find_and_modify_request.h14
-rw-r--r--src/mongo/db/query/get_executor.cpp14
-rw-r--r--src/mongo/db/query/get_executor_test.cpp15
-rw-r--r--src/mongo/db/query/getmore_request.cpp11
-rw-r--r--src/mongo/db/query/getmore_request_test.cpp45
-rw-r--r--src/mongo/db/query/killcursors_request.cpp4
-rw-r--r--src/mongo/db/query/killcursors_request_test.cpp21
-rw-r--r--src/mongo/db/query/killcursors_response.cpp4
-rw-r--r--src/mongo/db/query/killcursors_response_test.cpp42
-rw-r--r--src/mongo/db/query/parsed_distinct.cpp18
-rw-r--r--src/mongo/db/query/parsed_distinct_test.cpp50
-rw-r--r--src/mongo/db/query/parsed_projection.cpp8
-rw-r--r--src/mongo/db/query/parsed_projection_test.cpp5
-rw-r--r--src/mongo/db/query/plan_cache_indexability.cpp2
-rw-r--r--src/mongo/db/query/plan_cache_indexability_test.cpp4
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp3
-rw-r--r--src/mongo/db/query/plan_enumerator.cpp14
-rw-r--r--src/mongo/db/query/planner_analysis.cpp2
-rw-r--r--src/mongo/db/query/planner_ixselect.cpp15
-rw-r--r--src/mongo/db/query/planner_ixselect_test.cpp10
-rw-r--r--src/mongo/db/query/query_planner.cpp15
-rw-r--r--src/mongo/db/query/query_planner_geo_test.cpp80
-rw-r--r--src/mongo/db/query/query_planner_test.cpp17
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp4
-rw-r--r--src/mongo/db/query/query_planner_text_test.cpp113
-rw-r--r--src/mongo/db/query/query_planner_wildcard_index_test.cpp3
-rw-r--r--src/mongo/db/query/query_request.cpp20
-rw-r--r--src/mongo/db/query/query_request_test.cpp2
-rw-r--r--src/mongo/db/query/query_settings_test.cpp4
-rw-r--r--src/mongo/db/query/query_solution.cpp2
-rw-r--r--src/mongo/db/query/query_solution_test.cpp3
-rw-r--r--src/mongo/db/query/stage_builder.cpp7
-rw-r--r--src/mongo/db/read_concern.h2
-rw-r--r--src/mongo/db/read_concern_mongod.cpp12
-rw-r--r--src/mongo/db/read_concern_test.cpp4
-rw-r--r--src/mongo/db/repair_database.cpp7
-rw-r--r--src/mongo/db/repair_database_and_check_version.cpp20
-rw-r--r--src/mongo/db/repl/abstract_async_component.cpp13
-rw-r--r--src/mongo/db/repl/abstract_async_component.h3
-rw-r--r--src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.cpp2
-rw-r--r--src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.h2
-rw-r--r--src/mongo/db/repl/applier_helpers.cpp3
-rw-r--r--src/mongo/db/repl/apply_ops.cpp11
-rw-r--r--src/mongo/db/repl/apply_ops.h2
-rw-r--r--src/mongo/db/repl/apply_ops_test.cpp83
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.cpp3
-rw-r--r--src/mongo/db/repl/bgsync.h22
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change.cpp5
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp241
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.cpp65
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp11
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp68
-rw-r--r--src/mongo/db/repl/database_cloner.cpp43
-rw-r--r--src/mongo/db/repl/database_cloner_test.cpp111
-rw-r--r--src/mongo/db/repl/databases_cloner_test.cpp68
-rw-r--r--src/mongo/db/repl/dbcheck.cpp32
-rw-r--r--src/mongo/db/repl/dbcheck.h4
-rw-r--r--src/mongo/db/repl/dbcheck_idl.h2
-rw-r--r--src/mongo/db/repl/do_txn.cpp8
-rw-r--r--src/mongo/db/repl/do_txn_test.cpp12
-rw-r--r--src/mongo/db/repl/drop_pending_collection_reaper_test.cpp2
-rw-r--r--src/mongo/db/repl/idempotency_test_fixture.cpp7
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp28
-rw-r--r--src/mongo/db/repl/initial_syncer_test.cpp94
-rw-r--r--src/mongo/db/repl/is_master_response.cpp27
-rw-r--r--src/mongo/db/repl/isself.cpp3
-rw-r--r--src/mongo/db/repl/member_config.cpp12
-rw-r--r--src/mongo/db/repl/member_config_test.cpp180
-rw-r--r--src/mongo/db/repl/member_data.cpp5
-rw-r--r--src/mongo/db/repl/mock_repl_coord_server_fixture.h2
-rw-r--r--src/mongo/db/repl/oplog.cpp27
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection.cpp9
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection_test.cpp41
-rw-r--r--src/mongo/db/repl/oplog_entry.cpp3
-rw-r--r--src/mongo/db/repl/oplog_fetcher.cpp28
-rw-r--r--src/mongo/db/repl/oplog_interface_mock.cpp3
-rw-r--r--src/mongo/db/repl/oplog_test.cpp6
-rw-r--r--src/mongo/db/repl/optime_extract_test.cpp3
-rw-r--r--src/mongo/db/repl/read_concern_args.cpp38
-rw-r--r--src/mongo/db/repl/read_concern_args_test.cpp323
-rw-r--r--src/mongo/db/repl/repl_set_config.cpp112
-rw-r--r--src/mongo/db/repl/repl_set_config_checks.cpp47
-rw-r--r--src/mongo/db/repl/repl_set_config_checks_test.cpp432
-rw-r--r--src/mongo/db/repl/repl_set_config_test.cpp1272
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp7
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response.cpp15
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response_test.cpp128
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_impl.cpp21
-rw-r--r--src/mongo/db/repl/replication_coordinator.h12
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp22
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.h10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp12
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp371
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp72
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp131
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp1515
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.cpp30
-rw-r--r--src/mongo/db/repl/replication_info.cpp5
-rw-r--r--src/mongo/db/repl/replication_recovery.cpp3
-rw-r--r--src/mongo/db/repl/replication_recovery_test.cpp8
-rw-r--r--src/mongo/db/repl/reporter_test.cpp9
-rw-r--r--src/mongo/db/repl/roll_back_local_operations.cpp20
-rw-r--r--src/mongo/db/repl/roll_back_local_operations_test.cpp31
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp39
-rw-r--r--src/mongo/db/repl/rollback_impl.h2
-rw-r--r--src/mongo/db/repl/rollback_impl_test.cpp92
-rw-r--r--src/mongo/db/repl/rollback_source_impl.cpp4
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.cpp7
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp48
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp633
-rw-r--r--src/mongo/db/repl/split_horizon_test.cpp3
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp42
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp42
-rw-r--r--src/mongo/db/repl/storage_interface_mock.h9
-rw-r--r--src/mongo/db/repl/sync_source_resolver.cpp22
-rw-r--r--src/mongo/db/repl/sync_source_selector.h2
-rw-r--r--src/mongo/db/repl/sync_tail.cpp34
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp170
-rw-r--r--src/mongo/db/repl/task_runner.cpp1
-rw-r--r--src/mongo/db/repl/topology_coordinator.cpp43
-rw-r--r--src/mongo/db/repl/topology_coordinator.h2
-rw-r--r--src/mongo/db/repl/topology_coordinator_v1_test.cpp1200
-rw-r--r--src/mongo/db/repl/vote_requester_test.cpp90
-rw-r--r--src/mongo/db/repl_index_build_state.h4
-rw-r--r--src/mongo/db/s/active_migrations_registry.cpp9
-rw-r--r--src/mongo/db/s/active_move_primaries_registry.cpp4
-rw-r--r--src/mongo/db/s/active_move_primaries_registry.h2
-rw-r--r--src/mongo/db/s/active_move_primaries_registry_test.cpp2
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.cpp6
-rw-r--r--src/mongo/db/s/add_shard_util.cpp2
-rw-r--r--src/mongo/db/s/add_shard_util.h2
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp12
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.cpp6
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp5
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp16
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request.cpp12
-rw-r--r--src/mongo/db/s/check_sharding_index_command.cpp9
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp10
-rw-r--r--src/mongo/db/s/cleanup_orphaned_cmd.cpp6
-rw-r--r--src/mongo/db/s/collection_metadata.cpp3
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp9
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp3
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp18
-rw-r--r--src/mongo/db/s/collection_range_deleter.h16
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.cpp3
-rw-r--r--src/mongo/db/s/collection_sharding_state_test.cpp23
-rw-r--r--src/mongo/db/s/config/configsvr_enable_sharding_command.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_move_primary_command.cpp7
-rw-r--r--src/mongo/db/s/config/configsvr_remove_shard_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_shard_collection_command.cpp32
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp12
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp122
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp38
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp11
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp8
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp16
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp50
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp8
-rw-r--r--src/mongo/db/s/config_server_op_observer_test.cpp2
-rw-r--r--src/mongo/db/s/flush_database_cache_updates_command.cpp3
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp45
-rw-r--r--src/mongo/db/s/metadata_manager.cpp12
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp23
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp8
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp34
-rw-r--r--src/mongo/db/s/migration_session_id.cpp4
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp44
-rw-r--r--src/mongo/db/s/migration_util.cpp2
-rw-r--r--src/mongo/db/s/migration_util.h2
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp4
-rw-r--r--src/mongo/db/s/move_primary_source_manager.cpp3
-rw-r--r--src/mongo/db/s/scoped_operation_completion_sharding_actions.h2
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination.cpp38
-rw-r--r--src/mongo/db/s/session_catalog_migration_source.cpp8
-rw-r--r--src/mongo/db/s/set_shard_version_command.cpp16
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp7
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp71
-rw-r--r--src/mongo/db/s/shard_server_op_observer.cpp5
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp60
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod_test.cpp45
-rw-r--r--src/mongo/db/s/sharding_logging.cpp14
-rw-r--r--src/mongo/db/s/shardsvr_shard_collection.cpp57
-rw-r--r--src/mongo/db/s/split_chunk.cpp18
-rw-r--r--src/mongo/db/s/transaction_coordinator.cpp7
-rw-r--r--src/mongo/db/s/transaction_coordinator_catalog.cpp4
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.cpp10
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.h43
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util_test.cpp6
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.cpp2
-rw-r--r--src/mongo/db/s/transaction_coordinator_structures_test.cpp3
-rw-r--r--src/mongo/db/s/transaction_coordinator_test.cpp65
-rw-r--r--src/mongo/db/s/transaction_coordinator_util.cpp45
-rw-r--r--src/mongo/db/s/txn_two_phase_commit_cmds.cpp11
-rw-r--r--src/mongo/db/s/type_shard_identity_test.cpp21
-rw-r--r--src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp4
-rw-r--r--src/mongo/db/server_options.h18
-rw-r--r--src/mongo/db/server_options_helpers.h28
-rw-r--r--src/mongo/db/service_context_test_fixture.h6
-rw-r--r--src/mongo/db/service_entry_point_common.cpp37
-rw-r--r--src/mongo/db/session_catalog_mongod.cpp14
-rw-r--r--src/mongo/db/session_catalog_test.cpp42
-rw-r--r--src/mongo/db/sessions_collection_config_server.h18
-rw-r--r--src/mongo/db/sorter/sorter.cpp40
-rw-r--r--src/mongo/db/sorter/sorter.h2
-rw-r--r--src/mongo/db/startup_warnings_common.cpp6
-rw-r--r--src/mongo/db/startup_warnings_mongod.cpp14
-rw-r--r--src/mongo/db/stats/counters.cpp2
-rw-r--r--src/mongo/db/stats/counters.h2
-rw-r--r--src/mongo/db/stats/fine_clock.h2
-rw-r--r--src/mongo/db/stats/timer_stats.cpp2
-rw-r--r--src/mongo/db/stats/timer_stats.h2
-rw-r--r--src/mongo/db/storage/biggie/biggie_record_store.cpp5
-rw-r--r--src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp8
-rw-r--r--src/mongo/db/storage/biggie/store.h8
-rw-r--r--src/mongo/db/storage/biggie/store_test.cpp4
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.cpp2
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.h2
-rw-r--r--src/mongo/db/storage/capped_callback.h2
-rw-r--r--src/mongo/db/storage/devnull/devnull_kv_engine.h2
-rw-r--r--src/mongo/db/storage/durable_catalog_impl.cpp4
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp2
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h2
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp6
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp2
-rw-r--r--src/mongo/db/storage/journal_listener.h2
-rw-r--r--src/mongo/db/storage/key_string.cpp20
-rw-r--r--src/mongo/db/storage/key_string_test.cpp7
-rw-r--r--src/mongo/db/storage/kv/durable_catalog_test.cpp3
-rw-r--r--src/mongo/db/storage/kv/kv_engine.h2
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_harness.cpp9
-rw-r--r--src/mongo/db/storage/kv/kv_prefix.cpp2
-rw-r--r--src/mongo/db/storage/kv/kv_prefix.h2
-rw-r--r--src/mongo/db/storage/kv/temporary_kv_record_store.h3
-rw-r--r--src/mongo/db/storage/mobile/mobile_session_pool.h4
-rw-r--r--src/mongo/db/storage/record_store.h2
-rw-r--r--src/mongo/db/storage/record_store_test_harness.cpp4
-rw-r--r--src/mongo/db/storage/record_store_test_randomiter.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_recorditer.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_recordstore.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_repairiter.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_storagesize.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_touch.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_truncate.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_updaterecord.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_updatewithdamages.cpp2
-rw-r--r--src/mongo/db/storage/remove_saver.cpp2
-rw-r--r--src/mongo/db/storage/snapshot.h2
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp96
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp45
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp23
-rw-r--r--src/mongo/db/storage/storage_engine.h4
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp27
-rw-r--r--src/mongo/db/storage/storage_engine_init.cpp21
-rw-r--r--src/mongo/db/storage/storage_engine_interface.h2
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_posix.cpp34
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_test.cpp2
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_windows.cpp12
-rw-r--r--src/mongo/db/storage/storage_engine_metadata.cpp66
-rw-r--r--src/mongo/db/storage/storage_engine_metadata_test.cpp2
-rw-r--r--src/mongo/db/storage/storage_file_util.cpp12
-rw-r--r--src/mongo/db/storage/storage_init.cpp13
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp15
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp12
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp3
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp10
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp31
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp38
-rw-r--r--src/mongo/db/system_index.cpp16
-rw-r--r--src/mongo/db/traffic_reader.cpp4
-rw-r--r--src/mongo/db/traffic_recorder.cpp2
-rw-r--r--src/mongo/db/traffic_recorder_validators.cpp4
-rw-r--r--src/mongo/db/transaction_history_iterator.cpp3
-rw-r--r--src/mongo/db/transaction_participant.cpp104
-rw-r--r--src/mongo/db/transaction_participant_test.cpp187
-rw-r--r--src/mongo/db/update/addtoset_node.cpp3
-rw-r--r--src/mongo/db/update/addtoset_node_test.cpp2
-rw-r--r--src/mongo/db/update/arithmetic_node.cpp16
-rw-r--r--src/mongo/db/update/arithmetic_node_test.cpp2
-rw-r--r--src/mongo/db/update/bit_node.cpp15
-rw-r--r--src/mongo/db/update/bit_node.h2
-rw-r--r--src/mongo/db/update/bit_node_test.cpp4
-rw-r--r--src/mongo/db/update/compare_node_test.cpp2
-rw-r--r--src/mongo/db/update/current_date_node_test.cpp4
-rw-r--r--src/mongo/db/update/field_checker_test.cpp4
-rw-r--r--src/mongo/db/update/log_builder.cpp16
-rw-r--r--src/mongo/db/update/modifier_node.cpp18
-rw-r--r--src/mongo/db/update/object_replace_executor.cpp3
-rw-r--r--src/mongo/db/update/object_replace_executor_test.cpp2
-rw-r--r--src/mongo/db/update/path_support.cpp18
-rw-r--r--src/mongo/db/update/path_support_test.cpp20
-rw-r--r--src/mongo/db/update/pipeline_executor_test.cpp2
-rw-r--r--src/mongo/db/update/pop_node.cpp3
-rw-r--r--src/mongo/db/update/pull_node_test.cpp2
-rw-r--r--src/mongo/db/update/pullall_node.cpp2
-rw-r--r--src/mongo/db/update/pullall_node_test.cpp2
-rw-r--r--src/mongo/db/update/push_node.cpp6
-rw-r--r--src/mongo/db/update/push_node_test.cpp15
-rw-r--r--src/mongo/db/update/rename_node.cpp16
-rw-r--r--src/mongo/db/update/rename_node_test.cpp5
-rw-r--r--src/mongo/db/update/set_node_test.cpp2
-rw-r--r--src/mongo/db/update/storage_validation.cpp3
-rw-r--r--src/mongo/db/update/unset_node_test.cpp2
-rw-r--r--src/mongo/db/update/update_array_node.h2
-rw-r--r--src/mongo/db/update/update_driver.cpp16
-rw-r--r--src/mongo/db/update/update_leaf_node.cpp10
-rw-r--r--src/mongo/db/update/update_object_node.cpp33
-rw-r--r--src/mongo/db/update/update_object_node.h2
-rw-r--r--src/mongo/db/update/update_serialization_test.cpp2
-rw-r--r--src/mongo/db/update_index_data.cpp2
-rw-r--r--src/mongo/db/update_index_data.h2
-rw-r--r--src/mongo/db/update_index_data_test.cpp2
-rw-r--r--src/mongo/db/views/durable_view_catalog.cpp4
-rw-r--r--src/mongo/db/views/resolved_view_test.cpp27
-rw-r--r--src/mongo/db/views/view_catalog.cpp3
-rw-r--r--src/mongo/db/views/view_catalog_test.cpp3
-rw-r--r--src/mongo/db/views/view_graph.cpp6
-rw-r--r--src/mongo/db/write_concern.cpp2
-rw-r--r--src/mongo/dbtests/basictests.cpp4
-rw-r--r--src/mongo/dbtests/clienttests.cpp4
-rw-r--r--src/mongo/dbtests/commandtests.cpp10
-rw-r--r--src/mongo/dbtests/counttests.cpp9
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp2
-rw-r--r--src/mongo/dbtests/deferred_writer.cpp5
-rw-r--r--src/mongo/dbtests/directclienttests.cpp5
-rw-r--r--src/mongo/dbtests/framework.h2
-rw-r--r--src/mongo/dbtests/framework_options.cpp2
-rw-r--r--src/mongo/dbtests/framework_options.h2
-rw-r--r--src/mongo/dbtests/framework_options_init.cpp2
-rw-r--r--src/mongo/dbtests/indexcatalogtests.cpp6
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp168
-rw-r--r--src/mongo/dbtests/jsobjtests.cpp81
-rw-r--r--src/mongo/dbtests/jsontests.cpp9
-rw-r--r--src/mongo/dbtests/jstests.cpp20
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.cpp2
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.h2
-rw-r--r--src/mongo/dbtests/mock/mock_remote_db_server.cpp2
-rw-r--r--src/mongo/dbtests/mock/mock_replica_set.cpp2
-rw-r--r--src/mongo/dbtests/mock/mock_replica_set.h2
-rw-r--r--src/mongo/dbtests/mock_dbclient_conn_test.cpp12
-rw-r--r--src/mongo/dbtests/mock_replica_set_test.cpp2
-rw-r--r--src/mongo/dbtests/multikey_paths_test.cpp45
-rw-r--r--src/mongo/dbtests/plan_executor_invalidation_test.cpp6
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp5
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp3
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp3
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp2
-rw-r--r--src/mongo/dbtests/querytests.cpp35
-rw-r--r--src/mongo/dbtests/replica_set_monitor_test.cpp24
-rw-r--r--src/mongo/dbtests/repltests.cpp2
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp2
-rw-r--r--src/mongo/dbtests/storage_timestamp_tests.cpp481
-rw-r--r--src/mongo/dbtests/threadedtests.cpp2
-rw-r--r--src/mongo/dbtests/updatetests.cpp19
-rw-r--r--src/mongo/dbtests/validate_tests.cpp195
-rw-r--r--src/mongo/embedded/embedded.cpp4
-rw-r--r--src/mongo/embedded/embedded_ismaster.cpp2
-rw-r--r--src/mongo/embedded/embedded_options_helpers.cpp2
-rw-r--r--src/mongo/embedded/stitch_support/stitch_support_test.cpp4
-rw-r--r--src/mongo/executor/connection_pool.cpp21
-rw-r--r--src/mongo/executor/connection_pool_test.cpp4
-rw-r--r--src/mongo/executor/connection_pool_test_fixture.cpp8
-rw-r--r--src/mongo/executor/connection_pool_tl.cpp12
-rw-r--r--src/mongo/executor/connection_pool_tl.h2
-rw-r--r--src/mongo/executor/egress_tag_closer.h7
-rw-r--r--src/mongo/executor/network_interface_integration_test.cpp23
-rw-r--r--src/mongo/executor/network_interface_mock.cpp52
-rw-r--r--src/mongo/executor/network_interface_tl.cpp55
-rw-r--r--src/mongo/executor/scoped_task_executor.cpp4
-rw-r--r--src/mongo/executor/task_executor_cursor_integration_test.cpp3
-rw-r--r--src/mongo/executor/task_executor_cursor_test.cpp48
-rw-r--r--src/mongo/executor/task_executor_test_common.cpp7
-rw-r--r--src/mongo/executor/task_executor_test_common.h7
-rw-r--r--src/mongo/executor/thread_pool_task_executor.cpp2
-rw-r--r--src/mongo/idl/config_option_test.cpp22
-rw-r--r--src/mongo/idl/idl_parser.cpp20
-rw-r--r--src/mongo/idl/idl_parser.h9
-rw-r--r--src/mongo/idl/idl_test.cpp329
-rw-r--r--src/mongo/idl/server_parameter_specialized_test.cpp12
-rw-r--r--src/mongo/idl/server_parameter_with_storage.h11
-rw-r--r--src/mongo/logger/encoder.h2
-rw-r--r--src/mongo/logger/log_component.cpp4
-rw-r--r--src/mongo/logger/log_component_settings.cpp4
-rw-r--r--src/mongo/logger/log_manager.cpp4
-rw-r--r--src/mongo/logger/log_severity.cpp6
-rw-r--r--src/mongo/logger/log_test.cpp3
-rw-r--r--src/mongo/logger/parse_log_component_settings.cpp26
-rw-r--r--src/mongo/logger/parse_log_component_settings_test.cpp7
-rw-r--r--src/mongo/logger/ramlog.cpp2
-rw-r--r--src/mongo/logger/ramlog.h2
-rw-r--r--src/mongo/logger/rotatable_file_writer.cpp18
-rw-r--r--src/mongo/logger/rotatable_file_writer_test.cpp2
-rw-r--r--src/mongo/platform/atomic_proxy.h8
-rw-r--r--src/mongo/platform/bits.h2
-rw-r--r--src/mongo/platform/bits_test.cpp2
-rw-r--r--src/mongo/platform/decimal128_test.cpp6
-rw-r--r--src/mongo/platform/random_test.cpp2
-rw-r--r--src/mongo/platform/shared_library_posix.cpp3
-rw-r--r--src/mongo/platform/strcasestr.h2
-rw-r--r--src/mongo/rpc/get_status_from_command_result.cpp8
-rw-r--r--src/mongo/rpc/legacy_reply.cpp9
-rw-r--r--src/mongo/rpc/legacy_request.cpp4
-rw-r--r--src/mongo/rpc/metadata.cpp2
-rw-r--r--src/mongo/rpc/metadata/client_metadata.cpp39
-rw-r--r--src/mongo/rpc/metadata/client_metadata_test.cpp91
-rw-r--r--src/mongo/rpc/metadata/config_server_metadata.cpp4
-rw-r--r--src/mongo/rpc/metadata/logical_time_metadata_test.cpp2
-rw-r--r--src/mongo/rpc/metadata/oplog_query_metadata_test.cpp19
-rw-r--r--src/mongo/rpc/metadata/repl_set_metadata_test.cpp14
-rw-r--r--src/mongo/rpc/metadata/sharding_metadata_test.cpp10
-rw-r--r--src/mongo/rpc/metadata/tracking_metadata.cpp4
-rw-r--r--src/mongo/rpc/metadata/tracking_metadata_test.cpp9
-rw-r--r--src/mongo/rpc/metadata_test.cpp11
-rw-r--r--src/mongo/rpc/object_check_test.cpp2
-rw-r--r--src/mongo/rpc/op_msg_integration_test.cpp3
-rw-r--r--src/mongo/rpc/op_msg_test.cpp219
-rw-r--r--src/mongo/rpc/protocol.cpp13
-rw-r--r--src/mongo/rpc/protocol.h4
-rw-r--r--src/mongo/rpc/protocol_test.cpp9
-rw-r--r--src/mongo/rpc/write_concern_error_detail.cpp4
-rw-r--r--src/mongo/s/async_requests_sender.cpp7
-rw-r--r--src/mongo/s/balancer_configuration_test.cpp3
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_impl.cpp27
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.cpp37
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.h2
-rw-r--r--src/mongo/s/catalog/dist_lock_manager_mock.cpp8
-rw-r--r--src/mongo/s/catalog/dist_lock_ping_info.cpp2
-rw-r--r--src/mongo/s/catalog/dist_lock_ping_info.h2
-rw-r--r--src/mongo/s/catalog/mongo_version_range.cpp2
-rw-r--r--src/mongo/s/catalog/mongo_version_range.h2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp36
-rw-r--r--src/mongo/s/catalog/sharding_catalog_test.cpp74
-rw-r--r--src/mongo/s/catalog/type_changelog_test.cpp51
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp15
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp123
-rw-r--r--src/mongo/s/catalog/type_collection_test.cpp115
-rw-r--r--src/mongo/s/catalog/type_config_version_test.cpp8
-rw-r--r--src/mongo/s/catalog/type_database.cpp8
-rw-r--r--src/mongo/s/catalog/type_database_test.cpp3
-rw-r--r--src/mongo/s/catalog/type_locks_test.cpp94
-rw-r--r--src/mongo/s/catalog/type_mongos_test.cpp80
-rw-r--r--src/mongo/s/catalog/type_shard_database.cpp8
-rw-r--r--src/mongo/s/catalog/type_shard_test.cpp11
-rw-r--r--src/mongo/s/catalog/type_tags_test.cpp14
-rw-r--r--src/mongo/s/catalog_cache.cpp22
-rw-r--r--src/mongo/s/chunk.cpp3
-rw-r--r--src/mongo/s/chunk_manager.cpp6
-rw-r--r--src/mongo/s/chunk_manager_index_bounds_test.cpp3
-rw-r--r--src/mongo/s/client/parallel.cpp31
-rw-r--r--src/mongo/s/client/shard.h20
-rw-r--r--src/mongo/s/client/shard_registry.cpp4
-rw-r--r--src/mongo/s/client/shard_remote.cpp1
-rw-r--r--src/mongo/s/client/shard_remote.h8
-rw-r--r--src/mongo/s/client/sharding_connection_hook.cpp4
-rw-r--r--src/mongo/s/client/version_manager.cpp37
-rw-r--r--src/mongo/s/cluster_commands_helpers.cpp10
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_data_size_cmd.cpp7
-rw-r--r--src/mongo/s/commands/cluster_explain.cpp19
-rw-r--r--src/mongo/s/commands/cluster_explain_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_find_test.cpp3
-rw-r--r--src/mongo/s/commands/cluster_kill_op.cpp4
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp20
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp8
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp15
-rw-r--r--src/mongo/s/commands/cluster_split_cmd.cpp20
-rw-r--r--src/mongo/s/commands/commands_public.cpp4
-rw-r--r--src/mongo/s/commands/strategy.cpp12
-rw-r--r--src/mongo/s/grid.cpp5
-rw-r--r--src/mongo/s/mongos_options.h2
-rw-r--r--src/mongo/s/query/async_results_merger.cpp6
-rw-r--r--src/mongo/s/query/async_results_merger_test.cpp85
-rw-r--r--src/mongo/s/query/blocking_results_merger_test.cpp1
-rw-r--r--src/mongo/s/query/cluster_aggregate.cpp7
-rw-r--r--src/mongo/s/query/cluster_aggregation_planner.cpp3
-rw-r--r--src/mongo/s/query/cluster_client_cursor_params.h2
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.cpp11
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.h2
-rw-r--r--src/mongo/s/query/cluster_find.cpp37
-rw-r--r--src/mongo/s/query/router_stage_pipeline.cpp3
-rw-r--r--src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp5
-rw-r--r--src/mongo/s/query/store_possible_cursor.h2
-rw-r--r--src/mongo/s/request_types/add_shard_request_test.cpp51
-rw-r--r--src/mongo/s/request_types/add_shard_to_zone_request_test.cpp15
-rw-r--r--src/mongo/s/request_types/balance_chunk_request_test.cpp42
-rw-r--r--src/mongo/s/request_types/merge_chunk_request_test.cpp116
-rw-r--r--src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp5
-rw-r--r--src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp15
-rw-r--r--src/mongo/s/request_types/set_shard_version_request_test.cpp256
-rw-r--r--src/mongo/s/request_types/split_chunk_request_test.cpp257
-rw-r--r--src/mongo/s/request_types/split_chunk_request_type.cpp4
-rw-r--r--src/mongo/s/request_types/update_zone_key_range_request_type.cpp5
-rw-r--r--src/mongo/s/server.cpp2
-rw-r--r--src/mongo/s/shard_key_pattern.cpp8
-rw-r--r--src/mongo/s/shard_key_pattern_test.cpp12
-rw-r--r--src/mongo/s/shard_util.cpp14
-rw-r--r--src/mongo/s/sharding_egress_metadata_hook.cpp4
-rw-r--r--src/mongo/s/sharding_initialization.h2
-rw-r--r--src/mongo/s/sharding_mongod_test_fixture.cpp5
-rw-r--r--src/mongo/s/sharding_router_test_fixture.cpp6
-rw-r--r--src/mongo/s/sharding_task_executor.cpp9
-rw-r--r--src/mongo/s/sharding_task_executor_pool_controller.cpp2
-rw-r--r--src/mongo/s/transaction_router.cpp45
-rw-r--r--src/mongo/s/transaction_router.h271
-rw-r--r--src/mongo/s/transaction_router_test.cpp179
-rw-r--r--src/mongo/s/write_ops/batch_downconvert.cpp11
-rw-r--r--src/mongo/s/write_ops/batch_downconvert_test.cpp13
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.cpp13
-rw-r--r--src/mongo/s/write_ops/batch_write_op.cpp22
-rw-r--r--src/mongo/s/write_ops/batched_command_request_test.cpp20
-rw-r--r--src/mongo/s/write_ops/batched_command_response.cpp6
-rw-r--r--src/mongo/s/write_ops/batched_command_response_test.cpp14
-rw-r--r--src/mongo/s/write_ops/chunk_manager_targeter.cpp24
-rw-r--r--src/mongo/scripting/bson_template_evaluator.h2
-rw-r--r--src/mongo/scripting/bson_template_evaluator_test.cpp50
-rw-r--r--src/mongo/scripting/engine.cpp8
-rw-r--r--src/mongo/scripting/engine.h2
-rw-r--r--src/mongo/scripting/engine_none.cpp2
-rw-r--r--src/mongo/scripting/mozjs/bson.cpp4
-rw-r--r--src/mongo/scripting/mozjs/code.cpp9
-rw-r--r--src/mongo/scripting/mozjs/cursor_handle.cpp3
-rw-r--r--src/mongo/scripting/mozjs/implscope.cpp2
-rw-r--r--src/mongo/scripting/mozjs/mongo.cpp9
-rw-r--r--src/mongo/scripting/mozjs/mongohelpers.js3
-rw-r--r--src/mongo/scripting/mozjs/nativefunction.cpp3
-rw-r--r--src/mongo/scripting/mozjs/object.cpp3
-rw-r--r--src/mongo/scripting/mozjs/objectwrapper.cpp7
-rw-r--r--src/mongo/scripting/mozjs/regexp.cpp3
-rw-r--r--src/mongo/scripting/mozjs/session.cpp4
-rw-r--r--src/mongo/scripting/mozjs/timestamp.cpp7
-rw-r--r--src/mongo/scripting/mozjs/uri.cpp3
-rw-r--r--src/mongo/scripting/mozjs/valuewriter.cpp3
-rw-r--r--src/mongo/scripting/mozjs/wrapconstrainedmethod.h13
-rw-r--r--src/mongo/scripting/mozjs/wraptype.h30
-rw-r--r--src/mongo/shell/assert.js22
-rw-r--r--src/mongo/shell/bench.cpp27
-rw-r--r--src/mongo/shell/bench.h8
-rw-r--r--src/mongo/shell/bulk_api.js26
-rw-r--r--src/mongo/shell/collection.js178
-rw-r--r--src/mongo/shell/crud_api.js296
-rw-r--r--src/mongo/shell/db.js3272
-rw-r--r--src/mongo/shell/dbshell.cpp19
-rw-r--r--src/mongo/shell/encrypted_dbclient_base.cpp4
-rw-r--r--src/mongo/shell/encrypted_shell_options.h2
-rw-r--r--src/mongo/shell/explain_query.js5
-rw-r--r--src/mongo/shell/explainable.js7
-rw-r--r--src/mongo/shell/kms_aws.cpp2
-rw-r--r--src/mongo/shell/kms_local.cpp2
-rw-r--r--src/mongo/shell/linenoise.cpp12
-rw-r--r--src/mongo/shell/linenoise_utf8.h5
-rw-r--r--src/mongo/shell/mk_wcwidth.cpp18
-rw-r--r--src/mongo/shell/mongo.js17
-rw-r--r--src/mongo/shell/query.js96
-rw-r--r--src/mongo/shell/replsettest.js30
-rw-r--r--src/mongo/shell/servers.js2343
-rw-r--r--src/mongo/shell/servers_misc.js48
-rw-r--r--src/mongo/shell/session.js110
-rw-r--r--src/mongo/shell/shardingtest.js18
-rw-r--r--src/mongo/shell/shell_options.cpp11
-rw-r--r--src/mongo/shell/shell_options.h2
-rw-r--r--src/mongo/shell/shell_options_init.cpp2
-rw-r--r--src/mongo/shell/shell_utils.h4
-rw-r--r--src/mongo/shell/shell_utils_extended.cpp8
-rw-r--r--src/mongo/shell/shell_utils_extended.h2
-rw-r--r--src/mongo/shell/shell_utils_launcher.cpp10
-rw-r--r--src/mongo/shell/types.js35
-rw-r--r--src/mongo/shell/utils.js82
-rw-r--r--src/mongo/shell/utils_auth.js242
-rw-r--r--src/mongo/shell/utils_sh.js64
-rw-r--r--src/mongo/stdx/condition_variable.h2
-rw-r--r--src/mongo/stdx/mutex.h2
-rw-r--r--src/mongo/stdx/thread.h8
-rw-r--r--src/mongo/stdx/variant.h12
-rw-r--r--src/mongo/tools/mongobridge_options.h2
-rw-r--r--src/mongo/tools/mongobridge_options_init.cpp2
-rw-r--r--src/mongo/tools/mongoebench_options.cpp3
-rw-r--r--src/mongo/transport/baton_asio_linux.h10
-rw-r--r--src/mongo/transport/max_conns_override_test.cpp2
-rw-r--r--src/mongo/transport/message_compressor_manager_test.cpp5
-rw-r--r--src/mongo/transport/message_compressor_registry.h2
-rw-r--r--src/mongo/transport/service_entry_point.h8
-rw-r--r--src/mongo/transport/service_entry_point_impl.cpp6
-rw-r--r--src/mongo/transport/service_executor_adaptive.cpp47
-rw-r--r--src/mongo/transport/service_executor_adaptive_test.cpp15
-rw-r--r--src/mongo/transport/service_executor_synchronous.cpp2
-rw-r--r--src/mongo/transport/service_executor_test.cpp2
-rw-r--r--src/mongo/transport/service_state_machine.cpp2
-rw-r--r--src/mongo/transport/service_state_machine_test.cpp15
-rw-r--r--src/mongo/transport/session.h14
-rw-r--r--src/mongo/transport/session_asio.h5
-rw-r--r--src/mongo/transport/transport_layer_asio.cpp10
-rw-r--r--src/mongo/transport/transport_layer_asio_integration_test.cpp7
-rw-r--r--src/mongo/transport/transport_layer_asio_test.cpp4
-rw-r--r--src/mongo/transport/transport_layer_egress_init.cpp1
-rw-r--r--src/mongo/unittest/system_resource_canary_bm.cpp4
-rw-r--r--src/mongo/unittest/temp_dir.cpp2
-rw-r--r--src/mongo/unittest/unittest_helpers.cpp2
-rw-r--r--src/mongo/util/alarm.h2
-rw-r--r--src/mongo/util/alarm_test.cpp12
-rw-r--r--src/mongo/util/assert_util.cpp11
-rw-r--r--src/mongo/util/assert_util_test.cpp16
-rw-r--r--src/mongo/util/boost_assert_impl.cpp7
-rw-r--r--src/mongo/util/bson_util.h2
-rw-r--r--src/mongo/util/bufreader.h2
-rw-r--r--src/mongo/util/checksum.h2
-rw-r--r--src/mongo/util/clock_source_mock_test.cpp21
-rw-r--r--src/mongo/util/cmdline_utils/censor_cmdline.cpp2
-rw-r--r--src/mongo/util/cmdline_utils/censor_cmdline.h2
-rw-r--r--src/mongo/util/concurrency/idle_thread_block.cpp4
-rw-r--r--src/mongo/util/concurrency/mutex.h2
-rw-r--r--src/mongo/util/concurrency/thread_name.cpp4
-rw-r--r--src/mongo/util/concurrency/thread_pool.cpp4
-rw-r--r--src/mongo/util/concurrency/ticketholder.cpp5
-rw-r--r--src/mongo/util/concurrency/value.h2
-rw-r--r--src/mongo/util/debugger.cpp2
-rw-r--r--src/mongo/util/decimal_counter.h2
-rw-r--r--src/mongo/util/dns_name.h4
-rw-r--r--src/mongo/util/dns_query_test.cpp9
-rw-r--r--src/mongo/util/exception_filter_win32.cpp4
-rw-r--r--src/mongo/util/exit.cpp5
-rw-r--r--src/mongo/util/fail_point.cpp2
-rw-r--r--src/mongo/util/fail_point_test.cpp7
-rw-r--r--src/mongo/util/file.cpp21
-rw-r--r--src/mongo/util/file.h2
-rw-r--r--src/mongo/util/future.h2
-rw-r--r--src/mongo/util/future_impl.h11
-rw-r--r--src/mongo/util/future_test_edge_cases.cpp2
-rw-r--r--src/mongo/util/future_test_executor_future.cpp52
-rw-r--r--src/mongo/util/future_test_future_int.cpp22
-rw-r--r--src/mongo/util/future_test_future_move_only.cpp10
-rw-r--r--src/mongo/util/future_test_future_void.cpp22
-rw-r--r--src/mongo/util/future_test_shared_future.cpp5
-rw-r--r--src/mongo/util/future_test_utils.h5
-rw-r--r--src/mongo/util/hex.cpp2
-rw-r--r--src/mongo/util/hex.h2
-rw-r--r--src/mongo/util/if_constexpr.h4
-rw-r--r--src/mongo/util/intrusive_counter.cpp3
-rw-r--r--src/mongo/util/log.h58
-rw-r--r--src/mongo/util/log_and_backoff.cpp4
-rw-r--r--src/mongo/util/lru_cache_test.cpp5
-rw-r--r--src/mongo/util/map_util.h2
-rw-r--r--src/mongo/util/md5_test.cpp2
-rw-r--r--src/mongo/util/md5main.cpp4
-rw-r--r--src/mongo/util/net/cidr.cpp4
-rw-r--r--src/mongo/util/net/hostandport.cpp25
-rw-r--r--src/mongo/util/net/http_client_none.cpp2
-rw-r--r--src/mongo/util/net/http_client_winhttp.cpp6
-rw-r--r--src/mongo/util/net/private/socket_poll.cpp2
-rw-r--r--src/mongo/util/net/private/socket_poll.h2
-rw-r--r--src/mongo/util/net/sock.cpp6
-rw-r--r--src/mongo/util/net/ssl/context_schannel.hpp44
-rw-r--r--src/mongo/util/net/ssl/detail/impl/engine_apple.ipp8
-rw-r--r--src/mongo/util/net/ssl/detail/io.hpp2
-rw-r--r--src/mongo/util/net/ssl_manager.cpp23
-rw-r--r--src/mongo/util/net/ssl_manager.h6
-rw-r--r--src/mongo/util/net/ssl_manager_apple.cpp11
-rw-r--r--src/mongo/util/net/ssl_manager_openssl.cpp30
-rw-r--r--src/mongo/util/net/ssl_manager_test.cpp14
-rw-r--r--src/mongo/util/net/ssl_manager_windows.cpp74
-rw-r--r--src/mongo/util/net/ssl_options.cpp13
-rw-r--r--src/mongo/util/net/ssl_options.h24
-rw-r--r--src/mongo/util/net/ssl_parameters.cpp9
-rw-r--r--src/mongo/util/net/ssl_parameters_auth.cpp16
-rw-r--r--src/mongo/util/net/ssl_stream.cpp6
-rw-r--r--src/mongo/util/ntservice.cpp2
-rw-r--r--src/mongo/util/options_parser/constraints.h7
-rw-r--r--src/mongo/util/options_parser/environment_test.cpp9
-rw-r--r--src/mongo/util/options_parser/option_section.cpp6
-rw-r--r--src/mongo/util/options_parser/options_parser.cpp27
-rw-r--r--src/mongo/util/options_parser/options_parser_test.cpp3
-rw-r--r--src/mongo/util/perfctr_collect.cpp4
-rw-r--r--src/mongo/util/perfctr_collect_test.cpp20
-rw-r--r--src/mongo/util/periodic_runner.h2
-rw-r--r--src/mongo/util/periodic_runner_factory.cpp2
-rw-r--r--src/mongo/util/periodic_runner_impl.cpp4
-rw-r--r--src/mongo/util/polymorphic_scoped.h4
-rw-r--r--src/mongo/util/processinfo.h2
-rw-r--r--src/mongo/util/processinfo_linux.cpp38
-rw-r--r--src/mongo/util/processinfo_openbsd.cpp2
-rw-r--r--src/mongo/util/processinfo_osx.cpp2
-rw-r--r--src/mongo/util/processinfo_solaris.cpp2
-rw-r--r--src/mongo/util/processinfo_test.cpp4
-rw-r--r--src/mongo/util/processinfo_unknown.cpp2
-rw-r--r--src/mongo/util/processinfo_windows.cpp2
-rw-r--r--src/mongo/util/procparser.cpp17
-rw-r--r--src/mongo/util/procparser.h12
-rw-r--r--src/mongo/util/procparser_test.cpp14
-rw-r--r--src/mongo/util/producer_consumer_queue.h9
-rw-r--r--src/mongo/util/producer_consumer_queue_test.cpp4
-rw-r--r--src/mongo/util/progress_meter.cpp2
-rw-r--r--src/mongo/util/progress_meter.h2
-rw-r--r--src/mongo/util/queue.h2
-rw-r--r--src/mongo/util/regex_util.cpp10
-rw-r--r--src/mongo/util/regex_util.h4
-rw-r--r--src/mongo/util/safe_num.h2
-rw-r--r--src/mongo/util/safe_num_test.cpp2
-rw-r--r--src/mongo/util/scopeguard.h2
-rw-r--r--src/mongo/util/shared_buffer.h2
-rw-r--r--src/mongo/util/signal_handlers.cpp2
-rw-r--r--src/mongo/util/signal_win32.cpp2
-rw-r--r--src/mongo/util/signal_win32.h2
-rw-r--r--src/mongo/util/stack_introspect.h2
-rw-r--r--src/mongo/util/stacktrace_posix.cpp4
-rw-r--r--src/mongo/util/stacktrace_windows.cpp2
-rw-r--r--src/mongo/util/string_map_test.cpp7
-rw-r--r--src/mongo/util/summation_test.cpp60
-rw-r--r--src/mongo/util/tcmalloc_set_parameter.cpp12
-rw-r--r--src/mongo/util/text.cpp8
-rw-r--r--src/mongo/util/tick_source_test.cpp2
-rw-r--r--src/mongo/util/unique_function_test.cpp2
-rw-r--r--src/mongo/util/unowned_ptr_test.cpp2
-rw-r--r--src/mongo/watchdog/watchdog_mongod.h4
3494 files changed, 188981 insertions, 199013 deletions
diff --git a/jstests/aggregation/bugs/cond.js b/jstests/aggregation/bugs/cond.js
index 313316f4418..84831ca11a7 100644
--- a/jstests/aggregation/bugs/cond.js
+++ b/jstests/aggregation/bugs/cond.js
@@ -1,88 +1,87 @@
// $cond returns the evaluated second argument if the first evaluates to true but the evaluated
// third argument if the first evaluates to false.
(function() {
- "use strict";
- load('jstests/aggregation/extras/utils.js');
+"use strict";
+load('jstests/aggregation/extras/utils.js');
- const coll = db.jstests_aggregation_cond;
- coll.drop();
+const coll = db.jstests_aggregation_cond;
+coll.drop();
- coll.save({});
+coll.save({});
- function assertError(expectedErrorCode, condSpec) {
- assertErrorCode(coll, {$project: {a: {$cond: condSpec}}}, expectedErrorCode);
- }
+function assertError(expectedErrorCode, condSpec) {
+ assertErrorCode(coll, {$project: {a: {$cond: condSpec}}}, expectedErrorCode);
+}
- function assertResult(expectedResult, arg) {
- assert.eq(expectedResult, coll.aggregate({$project: {a: {$cond: arg}}}).toArray()[0].a);
- }
+function assertResult(expectedResult, arg) {
+ assert.eq(expectedResult, coll.aggregate({$project: {a: {$cond: arg}}}).toArray()[0].a);
+}
- // Wrong number of args.
- assertError(16020, []);
- assertError(16020, [1]);
- assertError(16020, [false]);
- assertError(16020, [1, 1]);
- assertError(16020, [1, 1, null, 1]);
- assertError(16020, [1, 1, 1, undefined]);
+// Wrong number of args.
+assertError(16020, []);
+assertError(16020, [1]);
+assertError(16020, [false]);
+assertError(16020, [1, 1]);
+assertError(16020, [1, 1, null, 1]);
+assertError(16020, [1, 1, 1, undefined]);
- // Bad object cases.
- assertError(17080, {"else": 1, then: 1});
- assertError(17081, {"if": 1, "else": 1});
- assertError(17082, {"if": 1, then: 1});
- assertError(17083, {asdf: 1, then: 1});
+// Bad object cases.
+assertError(17080, {"else": 1, then: 1});
+assertError(17081, {"if": 1, "else": 1});
+assertError(17082, {"if": 1, then: 1});
+assertError(17083, {asdf: 1, then: 1});
- // Literal expressions.
- assertResult(1, [true, 1, 2]);
- assertResult(2, [false, 1, 2]);
+// Literal expressions.
+assertResult(1, [true, 1, 2]);
+assertResult(2, [false, 1, 2]);
- // Order independence for object case.
- assertResult(1, {"if": true, "then": 1, "else": 2});
- assertResult(1, {"if": true, "else": 2, "then": 1});
- assertResult(1, {"then": 1, "if": true, "else": 2});
- assertResult(1, {"then": 1, "else": 2, "if": true});
- assertResult(1, {"else": 2, "then": 1, "if": true});
- assertResult(1, {"else": 2, "if": true, "then": 1});
+// Order independence for object case.
+assertResult(1, {"if": true, "then": 1, "else": 2});
+assertResult(1, {"if": true, "else": 2, "then": 1});
+assertResult(1, {"then": 1, "if": true, "else": 2});
+assertResult(1, {"then": 1, "else": 2, "if": true});
+assertResult(1, {"else": 2, "then": 1, "if": true});
+assertResult(1, {"else": 2, "if": true, "then": 1});
- // Computed expressions.
- assertResult(1, [{$and: []}, {$add: [1]}, {$add: [1, 1]}]);
- assertResult(2, [{$or: []}, {$add: [1]}, {$add: [1, 1]}]);
+// Computed expressions.
+assertResult(1, [{$and: []}, {$add: [1]}, {$add: [1, 1]}]);
+assertResult(2, [{$or: []}, {$add: [1]}, {$add: [1, 1]}]);
- assert(coll.drop());
- assert.writeOK(coll.insert({t: true, f: false, x: 'foo', y: 'bar'}));
+assert(coll.drop());
+assert.writeOK(coll.insert({t: true, f: false, x: 'foo', y: 'bar'}));
- // Field path expressions.
- assertResult('foo', ['$t', '$x', '$y']);
- assertResult('bar', ['$f', '$x', '$y']);
+// Field path expressions.
+assertResult('foo', ['$t', '$x', '$y']);
+assertResult('bar', ['$f', '$x', '$y']);
- assert(coll.drop());
- assert.writeOK(coll.insert({}));
+assert(coll.drop());
+assert.writeOK(coll.insert({}));
- // Coerce to bool.
- assertResult('a', [1, 'a', 'b']);
- assertResult('a', ['', 'a', 'b']);
- assertResult('b', [0, 'a', 'b']);
+// Coerce to bool.
+assertResult('a', [1, 'a', 'b']);
+assertResult('a', ['', 'a', 'b']);
+assertResult('b', [0, 'a', 'b']);
- // Nested.
- assert(coll.drop());
- assert.writeOK(coll.insert({noonSense: 'am', mealCombined: 'no'}));
- assert.writeOK(coll.insert({noonSense: 'am', mealCombined: 'yes'}));
- assert.writeOK(coll.insert({noonSense: 'pm', mealCombined: 'yes'}));
- assert.writeOK(coll.insert({noonSense: 'pm', mealCombined: 'no'}));
- assert.eq(
- ['breakfast', 'brunch', 'dinner', 'linner'],
- coll.aggregate([
- {
- $project: {
- meal: {
- $cond: [
- {$eq: ['$noonSense', 'am']},
- {$cond: [{$eq: ['$mealCombined', 'yes']}, 'brunch', 'breakfast']},
- {$cond: [{$eq: ['$mealCombined', 'yes']}, 'linner', 'dinner']}
- ]
+// Nested.
+assert(coll.drop());
+assert.writeOK(coll.insert({noonSense: 'am', mealCombined: 'no'}));
+assert.writeOK(coll.insert({noonSense: 'am', mealCombined: 'yes'}));
+assert.writeOK(coll.insert({noonSense: 'pm', mealCombined: 'yes'}));
+assert.writeOK(coll.insert({noonSense: 'pm', mealCombined: 'no'}));
+assert.eq(['breakfast', 'brunch', 'dinner', 'linner'],
+ coll.aggregate([
+ {
+ $project: {
+ meal: {
+ $cond: [
+ {$eq: ['$noonSense', 'am']},
+ {$cond: [{$eq: ['$mealCombined', 'yes']}, 'brunch', 'breakfast']},
+ {$cond: [{$eq: ['$mealCombined', 'yes']}, 'linner', 'dinner']}
+ ]
+ }
}
- }
- },
- {$sort: {meal: 1}}
- ])
- .map(doc => doc.meal));
+ },
+ {$sort: {meal: 1}}
+ ])
+ .map(doc => doc.meal));
}());
diff --git a/jstests/aggregation/bugs/cursor_timeout.js b/jstests/aggregation/bugs/cursor_timeout.js
index f579fba407d..21260074d26 100644
--- a/jstests/aggregation/bugs/cursor_timeout.js
+++ b/jstests/aggregation/bugs/cursor_timeout.js
@@ -7,83 +7,82 @@
* ]
*/
(function() {
- 'use strict';
+'use strict';
- // Cursor timeout on mongod is handled by a single thread/timer that will sleep for
- // "clientCursorMonitorFrequencySecs" and add the sleep value to each operation's duration when
- // it wakes up, timing out those whose "now() - last accessed since" time exceeds. A cursor
- // timeout of 2 seconds with a monitor frequency of 1 second means an effective timeout period
- // of 1 to 2 seconds.
- const cursorTimeoutMs = 2000;
- const cursorMonitorFrequencySecs = 1;
+// Cursor timeout on mongod is handled by a single thread/timer that will sleep for
+// "clientCursorMonitorFrequencySecs" and add the sleep value to each operation's duration when
+// it wakes up, timing out those whose "now() - last accessed since" time exceeds. A cursor
+// timeout of 2 seconds with a monitor frequency of 1 second means an effective timeout period
+// of 1 to 2 seconds.
+const cursorTimeoutMs = 2000;
+const cursorMonitorFrequencySecs = 1;
- const options = {
- setParameter: {
- internalDocumentSourceCursorBatchSizeBytes: 1,
- // We use the "cursorTimeoutMillis" server parameter to decrease how long it takes for a
- // non-exhausted cursor to time out. We use the "clientCursorMonitorFrequencySecs"
- // server parameter to make the ClientCursorMonitor that cleans up the timed out cursors
- // run more often. The combination of these server parameters reduces the amount of time
- // we need to wait within this test.
- cursorTimeoutMillis: cursorTimeoutMs,
- clientCursorMonitorFrequencySecs: cursorMonitorFrequencySecs,
- }
- };
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
+const options = {
+ setParameter: {
+ internalDocumentSourceCursorBatchSizeBytes: 1,
+ // We use the "cursorTimeoutMillis" server parameter to decrease how long it takes for a
+ // non-exhausted cursor to time out. We use the "clientCursorMonitorFrequencySecs"
+ // server parameter to make the ClientCursorMonitor that cleans up the timed out cursors
+ // run more often. The combination of these server parameters reduces the amount of time
+ // we need to wait within this test.
+ cursorTimeoutMillis: cursorTimeoutMs,
+ clientCursorMonitorFrequencySecs: cursorMonitorFrequencySecs,
+ }
+};
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
- const testDB = conn.getDB('test');
+const testDB = conn.getDB('test');
- // We use a batch size of 2 to ensure that the mongo shell does not exhaust the cursor on its
- // first batch.
- const batchSize = 2;
- const numMatches = 5;
+// We use a batch size of 2 to ensure that the mongo shell does not exhaust the cursor on its
+// first batch.
+const batchSize = 2;
+const numMatches = 5;
- function assertCursorTimesOut(collName, pipeline) {
- const res = assert.commandWorked(testDB.runCommand({
- aggregate: collName,
- pipeline: pipeline,
- cursor: {
- batchSize: batchSize,
- },
- }));
+function assertCursorTimesOut(collName, pipeline) {
+ const res = assert.commandWorked(testDB.runCommand({
+ aggregate: collName,
+ pipeline: pipeline,
+ cursor: {
+ batchSize: batchSize,
+ },
+ }));
- let serverStatus = assert.commandWorked(testDB.serverStatus());
- const expectedNumTimedOutCursors = serverStatus.metrics.cursor.timedOut + 1;
+ let serverStatus = assert.commandWorked(testDB.serverStatus());
+ const expectedNumTimedOutCursors = serverStatus.metrics.cursor.timedOut + 1;
- const cursor = new DBCommandCursor(testDB, res, batchSize);
+ const cursor = new DBCommandCursor(testDB, res, batchSize);
- // Wait until the idle cursor background job has killed the aggregation cursor.
- assert.soon(
- function() {
- serverStatus = assert.commandWorked(testDB.serverStatus());
- return +serverStatus.metrics.cursor.timedOut === expectedNumTimedOutCursors;
- },
- function() {
- return "aggregation cursor failed to time out: " +
- tojson(serverStatus.metrics.cursor);
- });
+ // Wait until the idle cursor background job has killed the aggregation cursor.
+ assert.soon(
+ function() {
+ serverStatus = assert.commandWorked(testDB.serverStatus());
+ return +serverStatus.metrics.cursor.timedOut === expectedNumTimedOutCursors;
+ },
+ function() {
+ return "aggregation cursor failed to time out: " + tojson(serverStatus.metrics.cursor);
+ });
- assert.eq(0, serverStatus.metrics.cursor.open.total, tojson(serverStatus));
+ assert.eq(0, serverStatus.metrics.cursor.open.total, tojson(serverStatus));
- // We attempt to exhaust the aggregation cursor to verify that sending a getMore returns an
- // error due to the cursor being killed.
- let err = assert.throws(function() {
- cursor.itcount();
- });
- assert.eq(ErrorCodes.CursorNotFound, err.code, tojson(err));
- }
+ // We attempt to exhaust the aggregation cursor to verify that sending a getMore returns an
+ // error due to the cursor being killed.
+ let err = assert.throws(function() {
+ cursor.itcount();
+ });
+ assert.eq(ErrorCodes.CursorNotFound, err.code, tojson(err));
+}
- assert.writeOK(testDB.source.insert({local: 1}));
- for (let i = 0; i < numMatches; ++i) {
- assert.writeOK(testDB.dest.insert({foreign: 1}));
- }
+assert.writeOK(testDB.source.insert({local: 1}));
+for (let i = 0; i < numMatches; ++i) {
+ assert.writeOK(testDB.dest.insert({foreign: 1}));
+}
- // Test that a regular aggregation cursor is killed when the timeout is reached.
- assertCursorTimesOut('dest', []);
+// Test that a regular aggregation cursor is killed when the timeout is reached.
+assertCursorTimesOut('dest', []);
- // Test that an aggregation cursor with a $lookup stage is killed when the timeout is reached.
- assertCursorTimesOut('source', [
+// Test that an aggregation cursor with a $lookup stage is killed when the timeout is reached.
+assertCursorTimesOut('source', [
{
$lookup: {
from: 'dest',
@@ -97,9 +96,9 @@
},
]);
- // Test that an aggregation cursor with nested $lookup stages is killed when the timeout is
- // reached.
- assertCursorTimesOut('source', [
+// Test that an aggregation cursor with nested $lookup stages is killed when the timeout is
+// reached.
+assertCursorTimesOut('source', [
{
$lookup: {
from: 'dest',
@@ -126,5 +125,5 @@
},
]);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/aggregation/bugs/explain_options_helper.js b/jstests/aggregation/bugs/explain_options_helper.js
index 17360acab73..0834d56e469 100644
--- a/jstests/aggregation/bugs/explain_options_helper.js
+++ b/jstests/aggregation/bugs/explain_options_helper.js
@@ -2,23 +2,25 @@
// This test was designed to reproduce SERVER-32300".
(function() {
- "use strict";
+"use strict";
- const coll = db.explain_options;
- coll.drop();
+const coll = db.explain_options;
+coll.drop();
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({_id: i}));
- }
+for (let i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({_id: i}));
+}
- const collation = {collation: {locale: "zh", backwards: false}};
+const collation = {
+ collation: {locale: "zh", backwards: false}
+};
- const firstResults = coll.aggregate([{$sort: {_id: 1}}], collation).toArray();
- // Issue an explain in order to verify that 'collation' is not modified to include the explain
- // flag.
- assert.commandWorked(coll.explain().aggregate([], collation));
+const firstResults = coll.aggregate([{$sort: {_id: 1}}], collation).toArray();
+// Issue an explain in order to verify that 'collation' is not modified to include the explain
+// flag.
+assert.commandWorked(coll.explain().aggregate([], collation));
- const secondResults = coll.aggregate([{$sort: {_id: 1}}], collation).toArray();
- // Assert that the result didn't change after an explain helper is issued.
- assert.eq(firstResults, secondResults);
+const secondResults = coll.aggregate([{$sort: {_id: 1}}], collation).toArray();
+// Assert that the result didn't change after an explain helper is issued.
+assert.eq(firstResults, secondResults);
}());
diff --git a/jstests/aggregation/bugs/firstlast.js b/jstests/aggregation/bugs/firstlast.js
index aa360a25b7e..8ab83fe30b7 100644
--- a/jstests/aggregation/bugs/firstlast.js
+++ b/jstests/aggregation/bugs/firstlast.js
@@ -2,120 +2,119 @@
* Tests the $first and $last accumulators in $group.
*/
(function() {
- 'use strict';
- const coll = db.jstests_aggregation_firstlast;
- coll.drop();
-
- /** Check expected $first and $last result values. */
- function assertFirstLast(expectedFirst, expectedLast, stages, expression) {
- let pipeline = [{$sort: {_id: 1}}];
- if (stages) {
- pipeline = pipeline.concat(stages);
- }
+'use strict';
+const coll = db.jstests_aggregation_firstlast;
+coll.drop();
+
+/** Check expected $first and $last result values. */
+function assertFirstLast(expectedFirst, expectedLast, stages, expression) {
+ let pipeline = [{$sort: {_id: 1}}];
+ if (stages) {
+ pipeline = pipeline.concat(stages);
+ }
- expression = expression || '$b';
- pipeline.push(
- {$group: {_id: '$a', first: {$first: expression}, last: {$last: expression}}});
-
- const result = coll.aggregate(pipeline).toArray();
- for (let i = 0; i < result.length; ++i) {
- if (result[i]._id === 1) {
- // Check results for group _id 1.
- assert.eq(expectedFirst, result[i].first);
- assert.eq(expectedLast, result[i].last);
- return;
- }
+ expression = expression || '$b';
+ pipeline.push({$group: {_id: '$a', first: {$first: expression}, last: {$last: expression}}});
+
+ const result = coll.aggregate(pipeline).toArray();
+ for (let i = 0; i < result.length; ++i) {
+ if (result[i]._id === 1) {
+ // Check results for group _id 1.
+ assert.eq(expectedFirst, result[i].first);
+ assert.eq(expectedLast, result[i].last);
+ return;
}
- throw new Error('Expected $group _id "1" is missing');
}
-
- // One document.
- assert.writeOK(coll.insert({a: 1, b: 1}));
- assertFirstLast(1, 1);
-
- // Two documents.
- assert.writeOK(coll.insert({a: 1, b: 2}));
- assertFirstLast(1, 2);
-
- // Three documents.
- assert.writeOK(coll.insert({a: 1, b: 3}));
- assertFirstLast(1, 3);
-
- // Another 'a' key value does not affect outcome.
- assert(coll.drop());
- assert.writeOK(coll.insert({a: 3, b: 0}));
- assert.writeOK(coll.insert({a: 1, b: 1}));
- assert.writeOK(coll.insert({a: 1, b: 2}));
- assert.writeOK(coll.insert({a: 1, b: 3}));
- assert.writeOK(coll.insert({a: 2, b: 0}));
- assertFirstLast(1, 3);
-
- // Additional pipeline stages do not affect outcome if order is maintained.
- assertFirstLast(1, 3, [{$project: {x: '$a', y: '$b'}}, {$project: {a: '$x', b: '$y'}}]);
-
- // Additional pipeline stages affect outcome if order is modified.
- assertFirstLast(3, 1, [{$sort: {b: -1}}]);
-
- // Skip and limit affect the results seen.
- assert(coll.drop());
- assert.writeOK(coll.insert({a: 1, b: 1}));
- assert.writeOK(coll.insert({a: 1, b: 2}));
- assert.writeOK(coll.insert({a: 1, b: 3}));
- assertFirstLast(1, 2, [{$limit: 2}]);
- assertFirstLast(2, 3, [{$skip: 1}, {$limit: 2}]);
- assertFirstLast(2, 2, [{$skip: 1}, {$limit: 1}]);
-
- // Mixed type values.
- assert.writeOK(coll.insert({a: 1, b: 'foo'}));
- assertFirstLast(1, 'foo');
-
- assert(coll.drop());
- assert.writeOK(coll.insert({a: 1, b: 'bar'}));
- assert.writeOK(coll.insert({a: 1, b: true}));
- assertFirstLast('bar', true);
-
- // Value null.
- assert(coll.drop());
- assert.writeOK(coll.insert({a: 1, b: null}));
- assert.writeOK(coll.insert({a: 1, b: 2}));
- assertFirstLast(null, 2);
-
- assert(coll.drop());
- assert.writeOK(coll.insert({a: 1, b: 2}));
- assert.writeOK(coll.insert({a: 1, b: null}));
- assertFirstLast(2, null);
-
- assert(coll.drop());
- assert.writeOK(coll.insert({a: 1, b: null}));
- assert.writeOK(coll.insert({a: 1, b: null}));
- assertFirstLast(null, null);
-
- // Value missing.
- assert(coll.drop());
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({a: 1, b: 2}));
- assertFirstLast(undefined, 2);
-
- assert(coll.drop());
- assert.writeOK(coll.insert({a: 1, b: 2}));
- assert.writeOK(coll.insert({a: 1}));
- assertFirstLast(2, undefined);
-
- assert(coll.drop());
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({a: 1}));
- assertFirstLast(undefined, undefined);
-
- // Dotted field.
- assert(coll.drop());
- assert.writeOK(coll.insert({a: 1, b: [{c: 1}, {c: 2}]}));
- assert.writeOK(coll.insert({a: 1, b: [{c: 6}, {}]}));
- assertFirstLast([1, 2], [6], [], '$b.c');
-
- // Computed expressions.
- assert(coll.drop());
- assert.writeOK(coll.insert({a: 1, b: 1}));
- assert.writeOK(coll.insert({a: 1, b: 2}));
- assertFirstLast(1, 0, [], {$mod: ['$b', 2]});
- assertFirstLast(0, 1, [], {$mod: [{$add: ['$b', 1]}, 2]});
+ throw new Error('Expected $group _id "1" is missing');
+}
+
+// One document.
+assert.writeOK(coll.insert({a: 1, b: 1}));
+assertFirstLast(1, 1);
+
+// Two documents.
+assert.writeOK(coll.insert({a: 1, b: 2}));
+assertFirstLast(1, 2);
+
+// Three documents.
+assert.writeOK(coll.insert({a: 1, b: 3}));
+assertFirstLast(1, 3);
+
+// Another 'a' key value does not affect outcome.
+assert(coll.drop());
+assert.writeOK(coll.insert({a: 3, b: 0}));
+assert.writeOK(coll.insert({a: 1, b: 1}));
+assert.writeOK(coll.insert({a: 1, b: 2}));
+assert.writeOK(coll.insert({a: 1, b: 3}));
+assert.writeOK(coll.insert({a: 2, b: 0}));
+assertFirstLast(1, 3);
+
+// Additional pipeline stages do not affect outcome if order is maintained.
+assertFirstLast(1, 3, [{$project: {x: '$a', y: '$b'}}, {$project: {a: '$x', b: '$y'}}]);
+
+// Additional pipeline stages affect outcome if order is modified.
+assertFirstLast(3, 1, [{$sort: {b: -1}}]);
+
+// Skip and limit affect the results seen.
+assert(coll.drop());
+assert.writeOK(coll.insert({a: 1, b: 1}));
+assert.writeOK(coll.insert({a: 1, b: 2}));
+assert.writeOK(coll.insert({a: 1, b: 3}));
+assertFirstLast(1, 2, [{$limit: 2}]);
+assertFirstLast(2, 3, [{$skip: 1}, {$limit: 2}]);
+assertFirstLast(2, 2, [{$skip: 1}, {$limit: 1}]);
+
+// Mixed type values.
+assert.writeOK(coll.insert({a: 1, b: 'foo'}));
+assertFirstLast(1, 'foo');
+
+assert(coll.drop());
+assert.writeOK(coll.insert({a: 1, b: 'bar'}));
+assert.writeOK(coll.insert({a: 1, b: true}));
+assertFirstLast('bar', true);
+
+// Value null.
+assert(coll.drop());
+assert.writeOK(coll.insert({a: 1, b: null}));
+assert.writeOK(coll.insert({a: 1, b: 2}));
+assertFirstLast(null, 2);
+
+assert(coll.drop());
+assert.writeOK(coll.insert({a: 1, b: 2}));
+assert.writeOK(coll.insert({a: 1, b: null}));
+assertFirstLast(2, null);
+
+assert(coll.drop());
+assert.writeOK(coll.insert({a: 1, b: null}));
+assert.writeOK(coll.insert({a: 1, b: null}));
+assertFirstLast(null, null);
+
+// Value missing.
+assert(coll.drop());
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 1, b: 2}));
+assertFirstLast(undefined, 2);
+
+assert(coll.drop());
+assert.writeOK(coll.insert({a: 1, b: 2}));
+assert.writeOK(coll.insert({a: 1}));
+assertFirstLast(2, undefined);
+
+assert(coll.drop());
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 1}));
+assertFirstLast(undefined, undefined);
+
+// Dotted field.
+assert(coll.drop());
+assert.writeOK(coll.insert({a: 1, b: [{c: 1}, {c: 2}]}));
+assert.writeOK(coll.insert({a: 1, b: [{c: 6}, {}]}));
+assertFirstLast([1, 2], [6], [], '$b.c');
+
+// Computed expressions.
+assert(coll.drop());
+assert.writeOK(coll.insert({a: 1, b: 1}));
+assert.writeOK(coll.insert({a: 1, b: 2}));
+assertFirstLast(1, 0, [], {$mod: ['$b', 2]});
+assertFirstLast(0, 1, [], {$mod: [{$add: ['$b', 1]}, 2]});
}());
diff --git a/jstests/aggregation/bugs/groupMissing.js b/jstests/aggregation/bugs/groupMissing.js
index c08e70185b1..5f734abbee5 100644
--- a/jstests/aggregation/bugs/groupMissing.js
+++ b/jstests/aggregation/bugs/groupMissing.js
@@ -8,68 +8,68 @@
load('jstests/aggregation/extras/utils.js'); // For resultsEq.
(function() {
- "use strict";
+"use strict";
- var coll = db.groupMissing;
- coll.drop();
+var coll = db.groupMissing;
+coll.drop();
- coll.insert({a: null});
- coll.insert({});
+coll.insert({a: null});
+coll.insert({});
- var res = coll.aggregate({$group: {_id: "$a"}});
- var arr = res.toArray();
- assert.eq(arr.length, 1);
- assert.eq(arr[0]._id, null);
+var res = coll.aggregate({$group: {_id: "$a"}});
+var arr = res.toArray();
+assert.eq(arr.length, 1);
+assert.eq(arr[0]._id, null);
- coll.createIndex({a: 1});
- res = coll.aggregate({$sort: {a: 1}}, {$group: {_id: "$a"}});
- arr = res.toArray();
- assert.eq(arr.length, 1);
- assert.eq(arr[0]._id, null);
+coll.createIndex({a: 1});
+res = coll.aggregate({$sort: {a: 1}}, {$group: {_id: "$a"}});
+arr = res.toArray();
+assert.eq(arr.length, 1);
+assert.eq(arr[0]._id, null);
- coll.drop();
+coll.drop();
- coll.insert({a: null});
- coll.insert({});
+coll.insert({a: null});
+coll.insert({});
- // Bug, see SERVER-21992.
+// Bug, see SERVER-21992.
+res = coll.aggregate({$group: {_id: {a: "$a"}}});
+assert(resultsEq(res.toArray(), [{_id: {a: null}}]));
+
+// Correct behavior after SERVER-21992 is fixed.
+if (0) {
res = coll.aggregate({$group: {_id: {a: "$a"}}});
- assert(resultsEq(res.toArray(), [{_id: {a: null}}]));
+ assert(resultsEq(res.toArray(), [{_id: {a: null}}, {_id: {a: {}}}]));
+}
- // Correct behavior after SERVER-21992 is fixed.
- if (0) {
- res = coll.aggregate({$group: {_id: {a: "$a"}}});
- assert(resultsEq(res.toArray(), [{_id: {a: null}}, {_id: {a: {}}}]));
- }
+// Bug, see SERVER-21992.
+coll.createIndex({a: 1});
+res = coll.aggregate({$group: {_id: {a: "$a"}}});
+assert(resultsEq(res.toArray(), [{_id: {a: null}}]));
- // Bug, see SERVER-21992.
- coll.createIndex({a: 1});
+// Correct behavior after SERVER-21992 is fixed.
+if (0) {
res = coll.aggregate({$group: {_id: {a: "$a"}}});
- assert(resultsEq(res.toArray(), [{_id: {a: null}}]));
+ assert(resultsEq(res.toArray(), [{_id: {a: null}}, {_id: {a: {}}}]));
+}
- // Correct behavior after SERVER-21992 is fixed.
- if (0) {
- res = coll.aggregate({$group: {_id: {a: "$a"}}});
- assert(resultsEq(res.toArray(), [{_id: {a: null}}, {_id: {a: {}}}]));
- }
+coll.drop();
+coll.insert({a: null, b: 1});
+coll.insert({b: 1});
+coll.insert({a: null, b: 1});
- coll.drop();
- coll.insert({a: null, b: 1});
- coll.insert({b: 1});
- coll.insert({a: null, b: 1});
+res = coll.aggregate({$group: {_id: {a: "$a", b: "$b"}}});
+assert(resultsEq(res.toArray(), [{_id: {b: 1}}, {_id: {a: null, b: 1}}]));
- res = coll.aggregate({$group: {_id: {a: "$a", b: "$b"}}});
- assert(resultsEq(res.toArray(), [{_id: {b: 1}}, {_id: {a: null, b: 1}}]));
+// Bug, see SERVER-23229.
+coll.createIndex({a: 1, b: 1});
+res = coll.aggregate({$sort: {a: 1, b: 1}}, {$group: {_id: {a: "$a", b: "$b"}}});
+assert(resultsEq(res.toArray(), [{_id: {a: null, b: 1}}]));
- // Bug, see SERVER-23229.
+// Correct behavior after SERVER-23229 is fixed.
+if (0) {
coll.createIndex({a: 1, b: 1});
res = coll.aggregate({$sort: {a: 1, b: 1}}, {$group: {_id: {a: "$a", b: "$b"}}});
- assert(resultsEq(res.toArray(), [{_id: {a: null, b: 1}}]));
-
- // Correct behavior after SERVER-23229 is fixed.
- if (0) {
- coll.createIndex({a: 1, b: 1});
- res = coll.aggregate({$sort: {a: 1, b: 1}}, {$group: {_id: {a: "$a", b: "$b"}}});
- assert(resultsEq(res.toArray(), [{_id: {b: 1}}, {_id: {a: null, b: 1}}]));
- }
+ assert(resultsEq(res.toArray(), [{_id: {b: 1}}, {_id: {a: null, b: 1}}]));
+}
}());
diff --git a/jstests/aggregation/bugs/lookup_unwind_getmore.js b/jstests/aggregation/bugs/lookup_unwind_getmore.js
index 3ba7dbf4007..67b970de820 100644
--- a/jstests/aggregation/bugs/lookup_unwind_getmore.js
+++ b/jstests/aggregation/bugs/lookup_unwind_getmore.js
@@ -8,45 +8,47 @@
* ]
*/
(function() {
- 'use strict';
+'use strict';
- const options = {setParameter: 'internalDocumentSourceCursorBatchSizeBytes=1'};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
+const options = {
+ setParameter: 'internalDocumentSourceCursorBatchSizeBytes=1'
+};
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
- const testDB = conn.getDB('test');
+const testDB = conn.getDB('test');
- /**
- * Executes an aggregrate with 'options.pipeline' and confirms that 'options.numResults' were
- * returned.
- */
- function runTest(options) {
- // The batchSize must be smaller than the number of documents returned by the $lookup. This
- // ensures that the mongo shell will issue a getMore when unwinding the $lookup results for
- // the same document in the 'source' collection, under a different OperationContext.
- const batchSize = 2;
+/**
+ * Executes an aggregrate with 'options.pipeline' and confirms that 'options.numResults' were
+ * returned.
+ */
+function runTest(options) {
+ // The batchSize must be smaller than the number of documents returned by the $lookup. This
+ // ensures that the mongo shell will issue a getMore when unwinding the $lookup results for
+ // the same document in the 'source' collection, under a different OperationContext.
+ const batchSize = 2;
- testDB.source.drop();
- assert.writeOK(testDB.source.insert({x: 1}));
+ testDB.source.drop();
+ assert.writeOK(testDB.source.insert({x: 1}));
- testDB.dest.drop();
- for (let i = 0; i < 5; ++i) {
- assert.writeOK(testDB.dest.insert({x: 1}));
- }
+ testDB.dest.drop();
+ for (let i = 0; i < 5; ++i) {
+ assert.writeOK(testDB.dest.insert({x: 1}));
+ }
- const res = assert.commandWorked(testDB.runCommand({
- aggregate: 'source',
- pipeline: options.pipeline,
- cursor: {
- batchSize: batchSize,
- },
- }));
+ const res = assert.commandWorked(testDB.runCommand({
+ aggregate: 'source',
+ pipeline: options.pipeline,
+ cursor: {
+ batchSize: batchSize,
+ },
+ }));
- const cursor = new DBCommandCursor(testDB, res, batchSize);
- assert.eq(options.numResults, cursor.itcount());
- }
+ const cursor = new DBCommandCursor(testDB, res, batchSize);
+ assert.eq(options.numResults, cursor.itcount());
+}
- runTest({
+runTest({
pipeline: [
{
$lookup: {
@@ -65,7 +67,7 @@
numResults: 5
});
- runTest({
+runTest({
pipeline: [
{
$lookup: {
@@ -99,5 +101,5 @@
numResults: 25
});
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/aggregation/bugs/lookup_unwind_killcursor.js b/jstests/aggregation/bugs/lookup_unwind_killcursor.js
index 45da6350c2f..eab9d05c591 100644
--- a/jstests/aggregation/bugs/lookup_unwind_killcursor.js
+++ b/jstests/aggregation/bugs/lookup_unwind_killcursor.js
@@ -8,43 +8,45 @@
* ]
*/
(function() {
- 'use strict';
+'use strict';
- const options = {setParameter: 'internalDocumentSourceCursorBatchSizeBytes=1'};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
+const options = {
+ setParameter: 'internalDocumentSourceCursorBatchSizeBytes=1'
+};
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
- const testDB = conn.getDB('test');
+const testDB = conn.getDB('test');
- function runTest(pipeline) {
- // We use a batch size of 2 to ensure that the mongo shell does not exhaust the cursor on
- // its first batch.
- const batchSize = 2;
+function runTest(pipeline) {
+ // We use a batch size of 2 to ensure that the mongo shell does not exhaust the cursor on
+ // its first batch.
+ const batchSize = 2;
- testDB.source.drop();
- assert.writeOK(testDB.source.insert({x: 1}));
+ testDB.source.drop();
+ assert.writeOK(testDB.source.insert({x: 1}));
- testDB.dest.drop();
- for (let i = 0; i < 5; ++i) {
- assert.writeOK(testDB.dest.insert({x: 1}));
- }
+ testDB.dest.drop();
+ for (let i = 0; i < 5; ++i) {
+ assert.writeOK(testDB.dest.insert({x: 1}));
+ }
- const res = assert.commandWorked(testDB.runCommand({
- aggregate: 'source',
- pipeline: pipeline,
- cursor: {
- batchSize: batchSize,
- },
- }));
+ const res = assert.commandWorked(testDB.runCommand({
+ aggregate: 'source',
+ pipeline: pipeline,
+ cursor: {
+ batchSize: batchSize,
+ },
+ }));
- const cursor = new DBCommandCursor(testDB, res, batchSize);
- cursor.close(); // Closing the cursor will issue the "killCursors" command.
+ const cursor = new DBCommandCursor(testDB, res, batchSize);
+ cursor.close(); // Closing the cursor will issue the "killCursors" command.
- const serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- assert.eq(0, serverStatus.metrics.cursor.open.total, tojson(serverStatus.metrics.cursor));
- }
+ const serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ assert.eq(0, serverStatus.metrics.cursor.open.total, tojson(serverStatus.metrics.cursor));
+}
- runTest([
+runTest([
{
$lookup: {
from: 'dest',
@@ -60,7 +62,7 @@
},
]);
- runTest([
+runTest([
{
$lookup: {
from: 'dest',
@@ -91,5 +93,5 @@
},
]);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/aggregation/bugs/match.js b/jstests/aggregation/bugs/match.js
index 8cb4519a861..6a545ed60c1 100644
--- a/jstests/aggregation/bugs/match.js
+++ b/jstests/aggregation/bugs/match.js
@@ -2,166 +2,169 @@
// - Filtering behavior equivalent to a mongo query.
// - $where and geo operators are not allowed
(function() {
- "use strict";
-
- load('jstests/aggregation/extras/utils.js');
-
- const coll = db.jstests_aggregation_match;
- coll.drop();
-
- const identityProjection = {_id: '$_id', a: '$a'};
-
- /** Assert that an aggregation generated the expected error. */
- function assertError(expectedCode, matchSpec) {
- const matchStage = {$match: matchSpec};
- // Check where matching is folded in to DocumentSourceCursor.
- assertErrorCode(coll, [matchStage], expectedCode);
- // Check where matching is not folded in to DocumentSourceCursor.
- assertErrorCode(coll, [{$project: identityProjection}, matchStage], expectedCode);
+"use strict";
+
+load('jstests/aggregation/extras/utils.js');
+
+const coll = db.jstests_aggregation_match;
+coll.drop();
+
+const identityProjection = {
+ _id: '$_id',
+ a: '$a'
+};
+
+/** Assert that an aggregation generated the expected error. */
+function assertError(expectedCode, matchSpec) {
+ const matchStage = {$match: matchSpec};
+ // Check where matching is folded in to DocumentSourceCursor.
+ assertErrorCode(coll, [matchStage], expectedCode);
+ // Check where matching is not folded in to DocumentSourceCursor.
+ assertErrorCode(coll, [{$project: identityProjection}, matchStage], expectedCode);
+}
+
+/** Assert that the contents of two arrays are equal, ignoring element ordering. */
+function assertEqualResultsUnordered(one, two) {
+ let oneStr = one.map(function(x) {
+ return tojson(x);
+ });
+ let twoStr = two.map(function(x) {
+ return tojson(x);
+ });
+ oneStr.sort();
+ twoStr.sort();
+ assert.eq(oneStr, twoStr);
+}
+
+/** Assert that an aggregation result is as expected. */
+function assertResults(expectedResults, matchSpec) {
+ const findResults = coll.find(matchSpec).toArray();
+ if (expectedResults) {
+ assertEqualResultsUnordered(expectedResults, findResults);
}
-
- /** Assert that the contents of two arrays are equal, ignoring element ordering. */
- function assertEqualResultsUnordered(one, two) {
- let oneStr = one.map(function(x) {
- return tojson(x);
- });
- let twoStr = two.map(function(x) {
- return tojson(x);
- });
- oneStr.sort();
- twoStr.sort();
- assert.eq(oneStr, twoStr);
- }
-
- /** Assert that an aggregation result is as expected. */
- function assertResults(expectedResults, matchSpec) {
- const findResults = coll.find(matchSpec).toArray();
- if (expectedResults) {
- assertEqualResultsUnordered(expectedResults, findResults);
- }
- const matchStage = {$match: matchSpec};
- // Check where matching is folded in to DocumentSourceCursor.
- assertEqualResultsUnordered(findResults, coll.aggregate(matchStage).toArray());
- // Check where matching is not folded in to DocumentSourceCursor.
- assertEqualResultsUnordered(
- findResults, coll.aggregate({$project: identityProjection}, matchStage).toArray());
- }
-
- // Invalid matcher syntax.
- assertError(2, {a: {$mod: [0 /* invalid */, 0]}});
-
- // $where not allowed.
- assertError(ErrorCodes.BadValue, {$where: 'true'});
-
- // Geo not allowed.
- assertError(ErrorCodes.BadValue, {$match: {a: {$near: [0, 0]}}});
-
- function checkMatchResults(indexed) {
- // No results.
- coll.remove({});
- assertResults([], {});
-
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.writeOK(coll.insert({_id: 1, a: 2}));
- assert.writeOK(coll.insert({_id: 2, a: 3}));
-
- // Empty query.
- assertResults([{_id: 0, a: 1}, {_id: 1, a: 2}, {_id: 2, a: 3}], {});
-
- // Simple queries.
- assertResults([{_id: 0, a: 1}], {a: 1});
- assertResults([{_id: 1, a: 2}], {a: 2});
- assertResults([{_id: 1, a: 2}, {_id: 2, a: 3}], {a: {$gt: 1}});
- assertResults([{_id: 0, a: 1}, {_id: 1, a: 2}], {a: {$lte: 2}});
- assertResults([{_id: 0, a: 1}, {_id: 2, a: 3}], {a: {$in: [1, 3]}});
-
- // Regular expression.
- coll.remove({});
- assert.writeOK(coll.insert({_id: 0, a: 'x'}));
- assert.writeOK(coll.insert({_id: 1, a: 'yx'}));
- assertResults([{_id: 0, a: 'x'}], {a: /^x/});
- assertResults([{_id: 0, a: 'x'}, {_id: 1, a: 'yx'}], {a: /x/});
-
- // Dotted field.
- coll.remove({});
- assert.writeOK(coll.insert({_id: 0, a: {b: 4}}));
- assert.writeOK(coll.insert({_id: 1, a: 2}));
- assertResults([{_id: 0, a: {b: 4}}], {'a.b': 4});
-
- // Value within an array.
- coll.remove({});
- assert.writeOK(coll.insert({_id: 0, a: [1, 2, 3]}));
- assert.writeOK(coll.insert({_id: 1, a: [2, 2, 3]}));
- assert.writeOK(coll.insert({_id: 2, a: [2, 2, 2]}));
- assertResults([{_id: 0, a: [1, 2, 3]}, {_id: 1, a: [2, 2, 3]}], {a: 3});
-
- // Missing, null, $exists matching.
- coll.remove({});
- assert.writeOK(coll.insert({_id: 0}));
- assert.writeOK(coll.insert({_id: 1, a: null}));
- assert.writeOK(coll.insert({_id: 3, a: 0}));
- assertResults([{_id: 0}, {_id: 1, a: null}], {a: null});
- assertResults(null, {a: {$exists: true}});
- assertResults(null, {a: {$exists: false}});
-
- // $elemMatch
- coll.remove({});
- assert.writeOK(coll.insert({_id: 0, a: [1, 2]}));
- assert.writeOK(coll.insert({_id: 1, a: [1, 2, 3]}));
- assertResults([{_id: 1, a: [1, 2, 3]}], {a: {$elemMatch: {$gt: 1, $mod: [2, 1]}}});
-
- coll.remove({});
- assert.writeOK(coll.insert({_id: 0, a: [{b: 1}, {c: 2}]}));
- assert.writeOK(coll.insert({_id: 1, a: [{b: 1, c: 2}]}));
- assertResults([{_id: 1, a: [{b: 1, c: 2}]}], {a: {$elemMatch: {b: 1, c: 2}}});
-
- // $size
- coll.remove({});
- assert.writeOK(coll.insert({}));
- assert.writeOK(coll.insert({a: null}));
- assert.writeOK(coll.insert({a: []}));
- assert.writeOK(coll.insert({a: [1]}));
- assert.writeOK(coll.insert({a: [1, 2]}));
- assertResults(null, {a: {$size: 0}});
- assertResults(null, {a: {$size: 1}});
- assertResults(null, {a: {$size: 2}});
-
- // $type
- coll.remove({});
- assert.writeOK(coll.insert({}));
- assert.writeOK(coll.insert({a: null}));
- assert.writeOK(coll.insert({a: NumberInt(1)}));
- assert.writeOK(coll.insert({a: NumberLong(2)}));
- assert.writeOK(coll.insert({a: 66.6}));
- assert.writeOK(coll.insert({a: 'abc'}));
- assert.writeOK(coll.insert({a: /xyz/}));
- assert.writeOK(coll.insert({a: {q: 1}}));
- assert.writeOK(coll.insert({a: true}));
- assert.writeOK(coll.insert({a: new Date()}));
- assert.writeOK(coll.insert({a: new ObjectId()}));
- for (let type = 1; type <= 18; ++type) {
- assertResults(null, {a: {$type: type}});
- }
-
- coll.remove({});
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.writeOK(coll.insert({_id: 1, a: 2}));
- assert.writeOK(coll.insert({_id: 2, a: 3}));
-
- // $and
- assertResults([{_id: 1, a: 2}], {$and: [{a: 2}, {_id: 1}]});
- assertResults([], {$and: [{a: 1}, {_id: 1}]});
- assertResults([{_id: 1, a: 2}, {_id: 2, a: 3}],
- {$and: [{$or: [{_id: 1}, {a: 3}]}, {$or: [{_id: 2}, {a: 2}]}]});
-
- // $or
- assertResults([{_id: 0, a: 1}, {_id: 2, a: 3}], {$or: [{_id: 0}, {a: 3}]});
+ const matchStage = {$match: matchSpec};
+ // Check where matching is folded in to DocumentSourceCursor.
+ assertEqualResultsUnordered(findResults, coll.aggregate(matchStage).toArray());
+ // Check where matching is not folded in to DocumentSourceCursor.
+ assertEqualResultsUnordered(
+ findResults, coll.aggregate({$project: identityProjection}, matchStage).toArray());
+}
+
+// Invalid matcher syntax.
+assertError(2, {a: {$mod: [0 /* invalid */, 0]}});
+
+// $where not allowed.
+assertError(ErrorCodes.BadValue, {$where: 'true'});
+
+// Geo not allowed.
+assertError(ErrorCodes.BadValue, {$match: {a: {$near: [0, 0]}}});
+
+function checkMatchResults(indexed) {
+ // No results.
+ coll.remove({});
+ assertResults([], {});
+
+ assert.writeOK(coll.insert({_id: 0, a: 1}));
+ assert.writeOK(coll.insert({_id: 1, a: 2}));
+ assert.writeOK(coll.insert({_id: 2, a: 3}));
+
+ // Empty query.
+ assertResults([{_id: 0, a: 1}, {_id: 1, a: 2}, {_id: 2, a: 3}], {});
+
+ // Simple queries.
+ assertResults([{_id: 0, a: 1}], {a: 1});
+ assertResults([{_id: 1, a: 2}], {a: 2});
+ assertResults([{_id: 1, a: 2}, {_id: 2, a: 3}], {a: {$gt: 1}});
+ assertResults([{_id: 0, a: 1}, {_id: 1, a: 2}], {a: {$lte: 2}});
+ assertResults([{_id: 0, a: 1}, {_id: 2, a: 3}], {a: {$in: [1, 3]}});
+
+ // Regular expression.
+ coll.remove({});
+ assert.writeOK(coll.insert({_id: 0, a: 'x'}));
+ assert.writeOK(coll.insert({_id: 1, a: 'yx'}));
+ assertResults([{_id: 0, a: 'x'}], {a: /^x/});
+ assertResults([{_id: 0, a: 'x'}, {_id: 1, a: 'yx'}], {a: /x/});
+
+ // Dotted field.
+ coll.remove({});
+ assert.writeOK(coll.insert({_id: 0, a: {b: 4}}));
+ assert.writeOK(coll.insert({_id: 1, a: 2}));
+ assertResults([{_id: 0, a: {b: 4}}], {'a.b': 4});
+
+ // Value within an array.
+ coll.remove({});
+ assert.writeOK(coll.insert({_id: 0, a: [1, 2, 3]}));
+ assert.writeOK(coll.insert({_id: 1, a: [2, 2, 3]}));
+ assert.writeOK(coll.insert({_id: 2, a: [2, 2, 2]}));
+ assertResults([{_id: 0, a: [1, 2, 3]}, {_id: 1, a: [2, 2, 3]}], {a: 3});
+
+ // Missing, null, $exists matching.
+ coll.remove({});
+ assert.writeOK(coll.insert({_id: 0}));
+ assert.writeOK(coll.insert({_id: 1, a: null}));
+ assert.writeOK(coll.insert({_id: 3, a: 0}));
+ assertResults([{_id: 0}, {_id: 1, a: null}], {a: null});
+ assertResults(null, {a: {$exists: true}});
+ assertResults(null, {a: {$exists: false}});
+
+ // $elemMatch
+ coll.remove({});
+ assert.writeOK(coll.insert({_id: 0, a: [1, 2]}));
+ assert.writeOK(coll.insert({_id: 1, a: [1, 2, 3]}));
+ assertResults([{_id: 1, a: [1, 2, 3]}], {a: {$elemMatch: {$gt: 1, $mod: [2, 1]}}});
+
+ coll.remove({});
+ assert.writeOK(coll.insert({_id: 0, a: [{b: 1}, {c: 2}]}));
+ assert.writeOK(coll.insert({_id: 1, a: [{b: 1, c: 2}]}));
+ assertResults([{_id: 1, a: [{b: 1, c: 2}]}], {a: {$elemMatch: {b: 1, c: 2}}});
+
+ // $size
+ coll.remove({});
+ assert.writeOK(coll.insert({}));
+ assert.writeOK(coll.insert({a: null}));
+ assert.writeOK(coll.insert({a: []}));
+ assert.writeOK(coll.insert({a: [1]}));
+ assert.writeOK(coll.insert({a: [1, 2]}));
+ assertResults(null, {a: {$size: 0}});
+ assertResults(null, {a: {$size: 1}});
+ assertResults(null, {a: {$size: 2}});
+
+ // $type
+ coll.remove({});
+ assert.writeOK(coll.insert({}));
+ assert.writeOK(coll.insert({a: null}));
+ assert.writeOK(coll.insert({a: NumberInt(1)}));
+ assert.writeOK(coll.insert({a: NumberLong(2)}));
+ assert.writeOK(coll.insert({a: 66.6}));
+ assert.writeOK(coll.insert({a: 'abc'}));
+ assert.writeOK(coll.insert({a: /xyz/}));
+ assert.writeOK(coll.insert({a: {q: 1}}));
+ assert.writeOK(coll.insert({a: true}));
+ assert.writeOK(coll.insert({a: new Date()}));
+ assert.writeOK(coll.insert({a: new ObjectId()}));
+ for (let type = 1; type <= 18; ++type) {
+ assertResults(null, {a: {$type: type}});
}
- checkMatchResults(false);
- coll.createIndex({a: 1});
- checkMatchResults(true);
- coll.createIndex({'a.b': 1});
- coll.createIndex({'a.c': 1});
- checkMatchResults(true);
+ coll.remove({});
+ assert.writeOK(coll.insert({_id: 0, a: 1}));
+ assert.writeOK(coll.insert({_id: 1, a: 2}));
+ assert.writeOK(coll.insert({_id: 2, a: 3}));
+
+ // $and
+ assertResults([{_id: 1, a: 2}], {$and: [{a: 2}, {_id: 1}]});
+ assertResults([], {$and: [{a: 1}, {_id: 1}]});
+ assertResults([{_id: 1, a: 2}, {_id: 2, a: 3}],
+ {$and: [{$or: [{_id: 1}, {a: 3}]}, {$or: [{_id: 2}, {a: 2}]}]});
+
+ // $or
+ assertResults([{_id: 0, a: 1}, {_id: 2, a: 3}], {$or: [{_id: 0}, {a: 3}]});
+}
+
+checkMatchResults(false);
+coll.createIndex({a: 1});
+checkMatchResults(true);
+coll.createIndex({'a.b': 1});
+coll.createIndex({'a.c': 1});
+checkMatchResults(true);
})();
diff --git a/jstests/aggregation/bugs/match_swap_limit.js b/jstests/aggregation/bugs/match_swap_limit.js
index 3de26d6f4b5..7dabc7130ca 100644
--- a/jstests/aggregation/bugs/match_swap_limit.js
+++ b/jstests/aggregation/bugs/match_swap_limit.js
@@ -2,19 +2,19 @@
* Ensure that $match is always applied after $limit.
*/
(function() {
- "use strict";
+"use strict";
- let coll = db.jstests_match_swap_limit;
- coll.drop();
+let coll = db.jstests_match_swap_limit;
+coll.drop();
- assert.writeOK(coll.insert({_id: 0, x: 1, y: 3}));
- assert.writeOK(coll.insert({_id: 1, x: 2, y: 2}));
- assert.writeOK(coll.insert({_id: 2, x: 3, y: 1}));
+assert.writeOK(coll.insert({_id: 0, x: 1, y: 3}));
+assert.writeOK(coll.insert({_id: 1, x: 2, y: 2}));
+assert.writeOK(coll.insert({_id: 2, x: 3, y: 1}));
- assert.eq([{_id: 1, x: 2, y: 2}],
- coll.aggregate([{$sort: {x: -1}}, {$limit: 2}, {$match: {y: {$gte: 2}}}]).toArray());
+assert.eq([{_id: 1, x: 2, y: 2}],
+ coll.aggregate([{$sort: {x: -1}}, {$limit: 2}, {$match: {y: {$gte: 2}}}]).toArray());
- assert.writeOK(coll.createIndex({x: 1}));
- assert.eq([{_id: 1, x: 2, y: 2}],
- coll.aggregate([{$sort: {x: -1}}, {$limit: 2}, {$match: {y: {$gte: 2}}}]).toArray());
+assert.writeOK(coll.createIndex({x: 1}));
+assert.eq([{_id: 1, x: 2, y: 2}],
+ coll.aggregate([{$sort: {x: -1}}, {$limit: 2}, {$match: {y: {$gte: 2}}}]).toArray());
}());
diff --git a/jstests/aggregation/bugs/reverseArray.js b/jstests/aggregation/bugs/reverseArray.js
index 0fa4010654b..cf80c040171 100644
--- a/jstests/aggregation/bugs/reverseArray.js
+++ b/jstests/aggregation/bugs/reverseArray.js
@@ -4,29 +4,29 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
(function() {
- "use strict";
+"use strict";
- var coll = db.reverseArray;
- coll.drop();
+var coll = db.reverseArray;
+coll.drop();
- // We need a document to flow through the pipeline, even though we don't care what fields it
- // has.
- coll.insert({});
+// We need a document to flow through the pipeline, even though we don't care what fields it
+// has.
+coll.insert({});
- assertErrorCode(coll, [{$project: {reversed: {$reverseArray: 1}}}], 34435);
+assertErrorCode(coll, [{$project: {reversed: {$reverseArray: 1}}}], 34435);
- var res = coll.aggregate([{$project: {reversed: {$reverseArray: {$literal: [1, 2]}}}}]);
- var output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].reversed, [2, 1]);
+var res = coll.aggregate([{$project: {reversed: {$reverseArray: {$literal: [1, 2]}}}}]);
+var output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].reversed, [2, 1]);
- var res = coll.aggregate([{$project: {reversed: {$reverseArray: {$literal: [[1, 2]]}}}}]);
- var output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].reversed, [[1, 2]]);
+var res = coll.aggregate([{$project: {reversed: {$reverseArray: {$literal: [[1, 2]]}}}}]);
+var output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].reversed, [[1, 2]]);
- var res = coll.aggregate([{$project: {reversed: {$reverseArray: "$notAField"}}}]);
- var output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].reversed, null);
+var res = coll.aggregate([{$project: {reversed: {$reverseArray: "$notAField"}}}]);
+var output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].reversed, null);
}());
diff --git a/jstests/aggregation/bugs/server10176.js b/jstests/aggregation/bugs/server10176.js
index 988beb24f13..9283c819342 100644
--- a/jstests/aggregation/bugs/server10176.js
+++ b/jstests/aggregation/bugs/server10176.js
@@ -4,61 +4,61 @@
load('jstests/aggregation/extras/utils.js');
(function() {
- var coll = db.abs_expr;
- coll.drop();
+var coll = db.abs_expr;
+coll.drop();
- // valid types (numeric and null)
- assert.writeOK(coll.insert({_id: 0, a: 5}));
- assert.writeOK(coll.insert({_id: 1, a: -5}));
- assert.writeOK(coll.insert({_id: 2, a: 5.5}));
- assert.writeOK(coll.insert({_id: 3, a: -5.5}));
- assert.writeOK(coll.insert({_id: 4, a: NumberInt("5")}));
- assert.writeOK(coll.insert({_id: 5, a: NumberInt("-5")}));
- assert.writeOK(coll.insert({_id: 6, a: NumberLong("5")}));
- assert.writeOK(coll.insert({_id: 7, a: NumberLong("-5")}));
- assert.writeOK(coll.insert({_id: 8, a: 0.0}));
- assert.writeOK(coll.insert({_id: 9, a: -0.0}));
- assert.writeOK(coll.insert({_id: 10, a: NumberInt("0")}));
- // INT_MIN is -(2 ^ 31)
- assert.writeOK(coll.insert({_id: 11, a: NumberInt(-Math.pow(2, 31))}));
- assert.writeOK(coll.insert({_id: 12, a: -Math.pow(2, 31)}));
- // 1152921504606846977 is 2^60 + 1, an integer that can't be represented precisely as a double
- assert.writeOK(coll.insert({_id: 13, a: NumberLong("1152921504606846977")}));
- assert.writeOK(coll.insert({_id: 14, a: NumberLong("-1152921504606846977")}));
- assert.writeOK(coll.insert({_id: 15, a: null}));
- assert.writeOK(coll.insert({_id: 16, a: undefined}));
- assert.writeOK(coll.insert({_id: 17, a: NaN}));
- assert.writeOK(coll.insert({_id: 18}));
+// valid types (numeric and null)
+assert.writeOK(coll.insert({_id: 0, a: 5}));
+assert.writeOK(coll.insert({_id: 1, a: -5}));
+assert.writeOK(coll.insert({_id: 2, a: 5.5}));
+assert.writeOK(coll.insert({_id: 3, a: -5.5}));
+assert.writeOK(coll.insert({_id: 4, a: NumberInt("5")}));
+assert.writeOK(coll.insert({_id: 5, a: NumberInt("-5")}));
+assert.writeOK(coll.insert({_id: 6, a: NumberLong("5")}));
+assert.writeOK(coll.insert({_id: 7, a: NumberLong("-5")}));
+assert.writeOK(coll.insert({_id: 8, a: 0.0}));
+assert.writeOK(coll.insert({_id: 9, a: -0.0}));
+assert.writeOK(coll.insert({_id: 10, a: NumberInt("0")}));
+// INT_MIN is -(2 ^ 31)
+assert.writeOK(coll.insert({_id: 11, a: NumberInt(-Math.pow(2, 31))}));
+assert.writeOK(coll.insert({_id: 12, a: -Math.pow(2, 31)}));
+// 1152921504606846977 is 2^60 + 1, an integer that can't be represented precisely as a double
+assert.writeOK(coll.insert({_id: 13, a: NumberLong("1152921504606846977")}));
+assert.writeOK(coll.insert({_id: 14, a: NumberLong("-1152921504606846977")}));
+assert.writeOK(coll.insert({_id: 15, a: null}));
+assert.writeOK(coll.insert({_id: 16, a: undefined}));
+assert.writeOK(coll.insert({_id: 17, a: NaN}));
+assert.writeOK(coll.insert({_id: 18}));
- // valid use of $abs: numbers become positive, null/undefined/nonexistent become null
+// valid use of $abs: numbers become positive, null/undefined/nonexistent become null
- var results = coll.aggregate([{$project: {a: {$abs: "$a"}}}, {$sort: {_id: 1}}]).toArray();
- assert.eq(results, [
- {_id: 0, a: 5},
- {_id: 1, a: 5},
- {_id: 2, a: 5.5},
- {_id: 3, a: 5.5},
- {_id: 4, a: 5},
- {_id: 5, a: 5},
- {_id: 6, a: NumberLong("5")},
- {_id: 7, a: NumberLong("5")},
- {_id: 8, a: 0},
- {_id: 9, a: 0},
- {_id: 10, a: 0},
- {_id: 11, a: NumberLong(Math.pow(2, 31))},
- {_id: 12, a: Math.pow(2, 31)},
- {_id: 13, a: NumberLong("1152921504606846977")},
- {_id: 14, a: NumberLong("1152921504606846977")},
- {_id: 15, a: null},
- {_id: 16, a: null},
- {_id: 17, a: NaN},
- {_id: 18, a: null},
- ]);
- // Invalid
+var results = coll.aggregate([{$project: {a: {$abs: "$a"}}}, {$sort: {_id: 1}}]).toArray();
+assert.eq(results, [
+ {_id: 0, a: 5},
+ {_id: 1, a: 5},
+ {_id: 2, a: 5.5},
+ {_id: 3, a: 5.5},
+ {_id: 4, a: 5},
+ {_id: 5, a: 5},
+ {_id: 6, a: NumberLong("5")},
+ {_id: 7, a: NumberLong("5")},
+ {_id: 8, a: 0},
+ {_id: 9, a: 0},
+ {_id: 10, a: 0},
+ {_id: 11, a: NumberLong(Math.pow(2, 31))},
+ {_id: 12, a: Math.pow(2, 31)},
+ {_id: 13, a: NumberLong("1152921504606846977")},
+ {_id: 14, a: NumberLong("1152921504606846977")},
+ {_id: 15, a: null},
+ {_id: 16, a: null},
+ {_id: 17, a: NaN},
+ {_id: 18, a: null},
+]);
+// Invalid
- // using $abs on string
- assertErrorCode(coll, [{$project: {a: {$abs: "string"}}}], 28765);
+// using $abs on string
+assertErrorCode(coll, [{$project: {a: {$abs: "string"}}}], 28765);
- // using $abs on LLONG_MIN (-2 ^ 63)
- assertErrorCode(coll, [{$project: {a: {$abs: NumberLong("-9223372036854775808")}}}], 28680);
+// using $abs on LLONG_MIN (-2 ^ 63)
+assertErrorCode(coll, [{$project: {a: {$abs: NumberLong("-9223372036854775808")}}}], 28680);
}());
diff --git a/jstests/aggregation/bugs/server11118.js b/jstests/aggregation/bugs/server11118.js
index 27b3fa7597e..46e79c3a7cc 100644
--- a/jstests/aggregation/bugs/server11118.js
+++ b/jstests/aggregation/bugs/server11118.js
@@ -1,154 +1,153 @@
// SERVER-11118 Tests for $dateToString
(function() {
- "use strict";
-
- load('jstests/aggregation/extras/utils.js');
-
- const coll = db.server11118;
-
- // Used to verify expected output format
- function testFormat(date, formatStr, expectedStr) {
- coll.drop();
- assert.writeOK(coll.insert({date: date}));
-
- const res =
- coll.aggregate([{
- $project:
- {_id: 0, formatted: {$dateToString: {format: formatStr, date: "$date"}}}
- }])
- .toArray();
-
- assert.eq(res[0].formatted, expectedStr);
- }
-
- // Used to verify that server recognizes bad formats
- function testFormatError(formatObj, errCode) {
- coll.drop();
- assert.writeOK(coll.insert({date: ISODate()}));
-
- assertErrorCode(coll, {$project: {_id: 0, formatted: {$dateToString: formatObj}}}, errCode);
- }
-
- // Used to verify that only date values are accepted for date parameter
- function testDateValueError(dateVal, errCode) {
- coll.drop();
- assert.writeOK(coll.insert({date: dateVal}));
-
- assertErrorCode(
- coll, {$project: {formatted: {$dateToString: {format: "%Y", date: "$date"}}}}, errCode);
- }
-
- const now = ISODate();
-
- // Use all modifiers we can test with js provided function
- testFormat(now, "%%-%Y-%m-%d-%H-%M-%S-%L", [
- "%",
- now.getUTCFullYear().zeroPad(4),
- (now.getUTCMonth() + 1).zeroPad(2),
- now.getUTCDate().zeroPad(2),
- now.getUTCHours().zeroPad(2),
- now.getUTCMinutes().zeroPad(2),
- now.getUTCSeconds().zeroPad(2),
- now.getUTCMilliseconds().zeroPad(3)
- ].join("-"));
-
- // Padding tests
- const padme = ISODate("2001-02-03T04:05:06.007Z");
-
- testFormat(padme, "%%", "%");
- testFormat(padme, "%Y", padme.getUTCFullYear().zeroPad(4));
- testFormat(padme, "%m", (padme.getUTCMonth() + 1).zeroPad(2));
- testFormat(padme, "%d", padme.getUTCDate().zeroPad(2));
- testFormat(padme, "%H", padme.getUTCHours().zeroPad(2));
- testFormat(padme, "%M", padme.getUTCMinutes().zeroPad(2));
- testFormat(padme, "%S", padme.getUTCSeconds().zeroPad(2));
- testFormat(padme, "%L", padme.getUTCMilliseconds().zeroPad(3));
-
- // no space and multiple characters between modifiers
- testFormat(now, "%d%d***%d***%d**%d*%d", [
- now.getUTCDate().zeroPad(2),
- now.getUTCDate().zeroPad(2),
- "***",
- now.getUTCDate().zeroPad(2),
- "***",
- now.getUTCDate().zeroPad(2),
- "**",
- now.getUTCDate().zeroPad(2),
- "*",
- now.getUTCDate().zeroPad(2)
- ].join(""));
-
- // JS doesn't have equivalents of these format specifiers
- testFormat(ISODate('1999-01-02 03:04:05.006Z'), "%U-%w-%j", "00-7-002");
-
- // Missing date
- testFormatError({format: "%Y"}, 18628);
-
- // Extra field
- testFormatError({format: "%Y", date: "$date", extra: "whyamIhere"}, 18534);
-
- // Not an object
- testFormatError(["%Y", "$date"], 18629);
-
- // Use invalid modifier at middle of string
- testFormatError({format: "%Y-%q", date: "$date"}, 18536);
-
- // Odd number of percent signs at end
- testFormatError({format: "%U-%w-%j-%%%", date: "$date"}, 18535);
-
- // Odd number of percent signs at middle
- // will get interpreted as an invalid modifier since it will try to use '%A'
- testFormatError({format: "AAAAA%%%AAAAAA", date: "$date"}, 18536);
-
- // Format parameter not a string
- testFormatError({format: {iamalion: "roar"}, date: "$date"}, 18533);
-
- ///
- /// Additional Tests
- ///
-
- // Test document
- const date = ISODate("1999-08-29");
-
- testFormat(date, "%%d", "%d");
-
- // A very long string of "%"s
- const longstr = Array(1000).join("%%");
- const halfstr = Array(1000).join("%");
- testFormat(date, longstr, halfstr);
-
- // Dates as null (should return a null)
- testFormat(null, "%Y", null);
-
- ///
- /// Using non-date fields as date parameter *should fail*
- ///
-
- // Array
- testDateValueError([], 16006);
- testDateValueError([1, 2, 3], 16006);
-
- // Sub-object
- testDateValueError({}, 16006);
- testDateValueError({a: 1}, 16006);
-
- // String
- testDateValueError("blahblahblah", 16006);
-
- // Integer
- testDateValueError(1234, 16006);
-
- ///
- /// Using non-string fields as format strings
- ///
-
- // Array
- testFormatError({format: [], date: "$date"}, 18533);
- testFormatError({format: [1, 2, 3], date: "$date"}, 18533);
-
- // Integer
- testFormatError({format: 1, date: "$date"}, 18533);
+"use strict";
+
+load('jstests/aggregation/extras/utils.js');
+
+const coll = db.server11118;
+
+// Used to verify expected output format
+function testFormat(date, formatStr, expectedStr) {
+ coll.drop();
+ assert.writeOK(coll.insert({date: date}));
+
+ const res =
+ coll.aggregate([
+ {$project: {_id: 0, formatted: {$dateToString: {format: formatStr, date: "$date"}}}}
+ ])
+ .toArray();
+
+ assert.eq(res[0].formatted, expectedStr);
+}
+
+// Used to verify that server recognizes bad formats
+function testFormatError(formatObj, errCode) {
+ coll.drop();
+ assert.writeOK(coll.insert({date: ISODate()}));
+
+ assertErrorCode(coll, {$project: {_id: 0, formatted: {$dateToString: formatObj}}}, errCode);
+}
+
+// Used to verify that only date values are accepted for date parameter
+function testDateValueError(dateVal, errCode) {
+ coll.drop();
+ assert.writeOK(coll.insert({date: dateVal}));
+
+ assertErrorCode(
+ coll, {$project: {formatted: {$dateToString: {format: "%Y", date: "$date"}}}}, errCode);
+}
+
+const now = ISODate();
+
+// Use all modifiers we can test with js provided function
+testFormat(now, "%%-%Y-%m-%d-%H-%M-%S-%L", [
+ "%",
+ now.getUTCFullYear().zeroPad(4),
+ (now.getUTCMonth() + 1).zeroPad(2),
+ now.getUTCDate().zeroPad(2),
+ now.getUTCHours().zeroPad(2),
+ now.getUTCMinutes().zeroPad(2),
+ now.getUTCSeconds().zeroPad(2),
+ now.getUTCMilliseconds().zeroPad(3)
+].join("-"));
+
+// Padding tests
+const padme = ISODate("2001-02-03T04:05:06.007Z");
+
+testFormat(padme, "%%", "%");
+testFormat(padme, "%Y", padme.getUTCFullYear().zeroPad(4));
+testFormat(padme, "%m", (padme.getUTCMonth() + 1).zeroPad(2));
+testFormat(padme, "%d", padme.getUTCDate().zeroPad(2));
+testFormat(padme, "%H", padme.getUTCHours().zeroPad(2));
+testFormat(padme, "%M", padme.getUTCMinutes().zeroPad(2));
+testFormat(padme, "%S", padme.getUTCSeconds().zeroPad(2));
+testFormat(padme, "%L", padme.getUTCMilliseconds().zeroPad(3));
+
+// no space and multiple characters between modifiers
+testFormat(now, "%d%d***%d***%d**%d*%d", [
+ now.getUTCDate().zeroPad(2),
+ now.getUTCDate().zeroPad(2),
+ "***",
+ now.getUTCDate().zeroPad(2),
+ "***",
+ now.getUTCDate().zeroPad(2),
+ "**",
+ now.getUTCDate().zeroPad(2),
+ "*",
+ now.getUTCDate().zeroPad(2)
+].join(""));
+
+// JS doesn't have equivalents of these format specifiers
+testFormat(ISODate('1999-01-02 03:04:05.006Z'), "%U-%w-%j", "00-7-002");
+
+// Missing date
+testFormatError({format: "%Y"}, 18628);
+
+// Extra field
+testFormatError({format: "%Y", date: "$date", extra: "whyamIhere"}, 18534);
+
+// Not an object
+testFormatError(["%Y", "$date"], 18629);
+
+// Use invalid modifier at middle of string
+testFormatError({format: "%Y-%q", date: "$date"}, 18536);
+
+// Odd number of percent signs at end
+testFormatError({format: "%U-%w-%j-%%%", date: "$date"}, 18535);
+
+// Odd number of percent signs at middle
+// will get interpreted as an invalid modifier since it will try to use '%A'
+testFormatError({format: "AAAAA%%%AAAAAA", date: "$date"}, 18536);
+
+// Format parameter not a string
+testFormatError({format: {iamalion: "roar"}, date: "$date"}, 18533);
+
+///
+/// Additional Tests
+///
+
+// Test document
+const date = ISODate("1999-08-29");
+
+testFormat(date, "%%d", "%d");
+
+// A very long string of "%"s
+const longstr = Array(1000).join("%%");
+const halfstr = Array(1000).join("%");
+testFormat(date, longstr, halfstr);
+
+// Dates as null (should return a null)
+testFormat(null, "%Y", null);
+
+///
+/// Using non-date fields as date parameter *should fail*
+///
+
+// Array
+testDateValueError([], 16006);
+testDateValueError([1, 2, 3], 16006);
+
+// Sub-object
+testDateValueError({}, 16006);
+testDateValueError({a: 1}, 16006);
+
+// String
+testDateValueError("blahblahblah", 16006);
+
+// Integer
+testDateValueError(1234, 16006);
+
+///
+/// Using non-string fields as format strings
+///
+
+// Array
+testFormatError({format: [], date: "$date"}, 18533);
+testFormatError({format: [1, 2, 3], date: "$date"}, 18533);
+
+// Integer
+testFormatError({format: 1, date: "$date"}, 18533);
- // Date
- testFormatError({format: ISODate(), date: "$date"}, 18533);
+// Date
+testFormatError({format: ISODate(), date: "$date"}, 18533);
})();
diff --git a/jstests/aggregation/bugs/server11675.js b/jstests/aggregation/bugs/server11675.js
index 759b4393b30..2d02a1ff53e 100644
--- a/jstests/aggregation/bugs/server11675.js
+++ b/jstests/aggregation/bugs/server11675.js
@@ -1,227 +1,224 @@
// SERVER-11675 Text search integration with aggregation
(function() {
- load('jstests/aggregation/extras/utils.js'); // For 'assertErrorCode'.
- load('jstests/libs/fixture_helpers.js'); // For 'FixtureHelpers'
-
- const coll = db.server11675;
- coll.drop();
-
- assert.writeOK(coll.insert({_id: 1, text: "apple", words: 1}));
- assert.writeOK(coll.insert({_id: 2, text: "banana", words: 1}));
- assert.writeOK(coll.insert({_id: 3, text: "apple banana", words: 2}));
- assert.writeOK(coll.insert({_id: 4, text: "cantaloupe", words: 1}));
-
- assert.commandWorked(coll.createIndex({text: "text"}));
-
- // query should have subfields query, project, sort, skip and limit. All but query are optional.
- const assertSameAsFind = function(query) {
- let cursor = coll.find(query.query);
- const pipeline = [{$match: query.query}];
-
- if ('project' in query) {
- cursor = coll.find(query.query, query.project); // no way to add to constructed cursor
- pipeline.push({$project: query.project});
- }
-
- if ('sort' in query) {
- cursor = cursor.sort(query.sort);
- pipeline.push({$sort: query.sort});
- }
-
- if ('skip' in query) {
- cursor = cursor.skip(query.skip);
- pipeline.push({$skip: query.skip});
- }
-
- if ('limit' in query) {
- cursor = cursor.limit(query.limit);
- pipeline.push({$limit: query.limit});
- }
-
- const findRes = cursor.toArray();
- const aggRes = coll.aggregate(pipeline).toArray();
-
- // If the query doesn't specify its own sort, there is a possibility that find() and
- // aggregate() will return the same results in different orders. We sort by _id on the
- // client side, so that the results still count as equal.
- if (!query.hasOwnProperty("sort")) {
- findRes.sort(function(a, b) {
- return a._id - b._id;
- });
- aggRes.sort(function(a, b) {
- return a._id - b._id;
- });
- }
-
- assert.docEq(aggRes, findRes);
- };
-
- assertSameAsFind({query: {}}); // sanity check
- assertSameAsFind({query: {$text: {$search: "apple"}}});
- assertSameAsFind({query: {_id: 1, $text: {$search: "apple"}}});
- assertSameAsFind(
- {query: {$text: {$search: "apple"}}, project: {_id: 1, score: {$meta: "textScore"}}});
- assertSameAsFind({
- query: {$text: {$search: "apple banana"}},
- project: {_id: 1, score: {$meta: "textScore"}}
- });
- assertSameAsFind({
- query: {$text: {$search: "apple banana"}},
- project: {_id: 1, score: {$meta: "textScore"}},
- sort: {score: {$meta: "textScore"}}
- });
- assertSameAsFind({
- query: {$text: {$search: "apple banana"}},
- project: {_id: 1, score: {$meta: "textScore"}},
- sort: {score: {$meta: "textScore"}},
- limit: 1
- });
- assertSameAsFind({
- query: {$text: {$search: "apple banana"}},
- project: {_id: 1, score: {$meta: "textScore"}},
- sort: {score: {$meta: "textScore"}},
- skip: 1
- });
- assertSameAsFind({
- query: {$text: {$search: "apple banana"}},
- project: {_id: 1, score: {$meta: "textScore"}},
- sort: {score: {$meta: "textScore"}},
- skip: 1,
- limit: 1
- });
-
- // $meta sort specification should be rejected if it has additional keys.
- assert.throws(function() {
- coll.aggregate([
- {$match: {$text: {$search: 'apple banana'}}},
- {$sort: {textScore: {$meta: 'textScore', extra: 1}}}
- ])
- .itcount();
- });
-
- // $meta sort specification should be rejected if the type of meta sort is not known.
- assert.throws(function() {
- coll.aggregate([
- {$match: {$text: {$search: 'apple banana'}}},
- {$sort: {textScore: {$meta: 'unknown'}}}
- ])
- .itcount();
- });
-
- // Sort specification should be rejected if a $-keyword other than $meta is used.
- assert.throws(function() {
- coll.aggregate([
- {$match: {$text: {$search: 'apple banana'}}},
- {$sort: {textScore: {$notMeta: 'textScore'}}}
- ])
- .itcount();
- });
-
- // Sort specification should be rejected if it is a string, not an object with $meta.
- assert.throws(function() {
- coll.aggregate(
- [{$match: {$text: {$search: 'apple banana'}}}, {$sort: {textScore: 'textScore'}}])
- .itcount();
- });
-
- // sharded find requires projecting the score to sort, but sharded agg does not.
- var findRes = coll.find({$text: {$search: "apple banana"}}, {textScore: {$meta: 'textScore'}})
- .sort({textScore: {$meta: 'textScore'}})
- .map(function(obj) {
- delete obj.textScore; // remove it to match agg output
- return obj;
- });
- let res = coll.aggregate([
- {$match: {$text: {$search: 'apple banana'}}},
- {$sort: {textScore: {$meta: 'textScore'}}}
- ])
- .toArray();
- assert.eq(res, findRes);
-
- // Make sure {$meta: 'textScore'} can be used as a sub-expression
- res = coll.aggregate([
- {$match: {_id: 1, $text: {$search: 'apple'}}},
- {
- $project: {
- words: 1,
- score: {$meta: 'textScore'},
- wordsTimesScore: {$multiply: ['$words', {$meta: 'textScore'}]}
- }
- }
- ])
- .toArray();
- assert.eq(res[0].wordsTimesScore, res[0].words * res[0].score, tojson(res));
-
- // And can be used in $group
- res = coll.aggregate([
- {$match: {_id: 1, $text: {$search: 'apple banana'}}},
- {$group: {_id: {$meta: 'textScore'}, score: {$first: {$meta: 'textScore'}}}}
- ])
- .toArray();
- assert.eq(res[0]._id, res[0].score, tojson(res));
-
- // Make sure metadata crosses shard -> merger boundary
- res = coll.aggregate([
- {$match: {_id: 1, $text: {$search: 'apple'}}},
- {$project: {scoreOnShard: {$meta: 'textScore'}}},
- {$limit: 1}, // force a split. later stages run on merger
- {$project: {scoreOnShard: 1, scoreOnMerger: {$meta: 'textScore'}}}
- ])
- .toArray();
- assert.eq(res[0].scoreOnMerger, res[0].scoreOnShard);
- let score = res[0].scoreOnMerger; // save for later tests
-
- // Make sure metadata crosses shard -> merger boundary even if not used on shard
- res = coll.aggregate([
- {$match: {_id: 1, $text: {$search: 'apple'}}},
- {$limit: 1}, // force a split. later stages run on merger
- {$project: {scoreOnShard: 1, scoreOnMerger: {$meta: 'textScore'}}}
+load('jstests/aggregation/extras/utils.js'); // For 'assertErrorCode'.
+load('jstests/libs/fixture_helpers.js'); // For 'FixtureHelpers'
+
+const coll = db.server11675;
+coll.drop();
+
+assert.writeOK(coll.insert({_id: 1, text: "apple", words: 1}));
+assert.writeOK(coll.insert({_id: 2, text: "banana", words: 1}));
+assert.writeOK(coll.insert({_id: 3, text: "apple banana", words: 2}));
+assert.writeOK(coll.insert({_id: 4, text: "cantaloupe", words: 1}));
+
+assert.commandWorked(coll.createIndex({text: "text"}));
+
+// query should have subfields query, project, sort, skip and limit. All but query are optional.
+const assertSameAsFind = function(query) {
+ let cursor = coll.find(query.query);
+ const pipeline = [{$match: query.query}];
+
+ if ('project' in query) {
+ cursor = coll.find(query.query, query.project); // no way to add to constructed cursor
+ pipeline.push({$project: query.project});
+ }
+
+ if ('sort' in query) {
+ cursor = cursor.sort(query.sort);
+ pipeline.push({$sort: query.sort});
+ }
+
+ if ('skip' in query) {
+ cursor = cursor.skip(query.skip);
+ pipeline.push({$skip: query.skip});
+ }
+
+ if ('limit' in query) {
+ cursor = cursor.limit(query.limit);
+ pipeline.push({$limit: query.limit});
+ }
+
+ const findRes = cursor.toArray();
+ const aggRes = coll.aggregate(pipeline).toArray();
+
+ // If the query doesn't specify its own sort, there is a possibility that find() and
+ // aggregate() will return the same results in different orders. We sort by _id on the
+ // client side, so that the results still count as equal.
+ if (!query.hasOwnProperty("sort")) {
+ findRes.sort(function(a, b) {
+ return a._id - b._id;
+ });
+ aggRes.sort(function(a, b) {
+ return a._id - b._id;
+ });
+ }
+
+ assert.docEq(aggRes, findRes);
+};
+
+assertSameAsFind({query: {}}); // sanity check
+assertSameAsFind({query: {$text: {$search: "apple"}}});
+assertSameAsFind({query: {_id: 1, $text: {$search: "apple"}}});
+assertSameAsFind(
+ {query: {$text: {$search: "apple"}}, project: {_id: 1, score: {$meta: "textScore"}}});
+assertSameAsFind(
+ {query: {$text: {$search: "apple banana"}}, project: {_id: 1, score: {$meta: "textScore"}}});
+assertSameAsFind({
+ query: {$text: {$search: "apple banana"}},
+ project: {_id: 1, score: {$meta: "textScore"}},
+ sort: {score: {$meta: "textScore"}}
+});
+assertSameAsFind({
+ query: {$text: {$search: "apple banana"}},
+ project: {_id: 1, score: {$meta: "textScore"}},
+ sort: {score: {$meta: "textScore"}},
+ limit: 1
+});
+assertSameAsFind({
+ query: {$text: {$search: "apple banana"}},
+ project: {_id: 1, score: {$meta: "textScore"}},
+ sort: {score: {$meta: "textScore"}},
+ skip: 1
+});
+assertSameAsFind({
+ query: {$text: {$search: "apple banana"}},
+ project: {_id: 1, score: {$meta: "textScore"}},
+ sort: {score: {$meta: "textScore"}},
+ skip: 1,
+ limit: 1
+});
+
+// $meta sort specification should be rejected if it has additional keys.
+assert.throws(function() {
+ coll.aggregate([
+ {$match: {$text: {$search: 'apple banana'}}},
+ {$sort: {textScore: {$meta: 'textScore', extra: 1}}}
+ ])
+ .itcount();
+});
+
+// $meta sort specification should be rejected if the type of meta sort is not known.
+assert.throws(function() {
+ coll.aggregate([
+ {$match: {$text: {$search: 'apple banana'}}},
+ {$sort: {textScore: {$meta: 'unknown'}}}
+ ])
+ .itcount();
+});
+
+// Sort specification should be rejected if a $-keyword other than $meta is used.
+assert.throws(function() {
+ coll.aggregate([
+ {$match: {$text: {$search: 'apple banana'}}},
+ {$sort: {textScore: {$notMeta: 'textScore'}}}
+ ])
+ .itcount();
+});
+
+// Sort specification should be rejected if it is a string, not an object with $meta.
+assert.throws(function() {
+ coll.aggregate(
+ [{$match: {$text: {$search: 'apple banana'}}}, {$sort: {textScore: 'textScore'}}])
+ .itcount();
+});
+
+// sharded find requires projecting the score to sort, but sharded agg does not.
+var findRes = coll.find({$text: {$search: "apple banana"}}, {textScore: {$meta: 'textScore'}})
+ .sort({textScore: {$meta: 'textScore'}})
+ .map(function(obj) {
+ delete obj.textScore; // remove it to match agg output
+ return obj;
+ });
+let res = coll.aggregate([
+ {$match: {$text: {$search: 'apple banana'}}},
+ {$sort: {textScore: {$meta: 'textScore'}}}
])
.toArray();
- assert.eq(res[0].scoreOnMerger, score);
-
- // Make sure metadata works if first $project doesn't use it.
- res = coll.aggregate([
- {$match: {_id: 1, $text: {$search: 'apple'}}},
- {$project: {_id: 1}},
- {$project: {_id: 1, score: {$meta: 'textScore'}}}
- ])
- .toArray();
- assert.eq(res[0].score, score);
-
- // Make sure the pipeline fails if it tries to reference the text score and it doesn't exist.
- res = coll.runCommand(
- {aggregate: coll.getName(), pipeline: [{$project: {_id: 1, score: {$meta: 'textScore'}}}]});
- assert.commandFailed(res);
-
- // Make sure the metadata is 'missing()' when it doesn't exist because the document changed
- res = coll.aggregate([
- {$match: {_id: 1, $text: {$search: 'apple banana'}}},
- {$group: {_id: 1, score: {$first: {$meta: 'textScore'}}}},
- {$project: {_id: 1, scoreAgain: {$meta: 'textScore'}}},
- ])
- .toArray();
- assert(!("scoreAgain" in res[0]));
-
- // Make sure metadata works after a $unwind
- assert.writeOK(coll.insert({_id: 5, text: 'mango', words: [1, 2, 3]}));
- res = coll.aggregate([
- {$match: {$text: {$search: 'mango'}}},
- {$project: {score: {$meta: "textScore"}, _id: 1, words: 1}},
- {$unwind: '$words'},
- {$project: {scoreAgain: {$meta: "textScore"}, score: 1}}
- ])
- .toArray();
- assert.eq(res[0].scoreAgain, res[0].score);
-
- // Error checking
- // $match, but wrong position
- assertErrorCode(
- coll, [{$sort: {text: 1}}, {$match: {$text: {$search: 'apple banana'}}}], 17313);
-
- // wrong $stage, but correct position
- assertErrorCode(coll,
- [{$project: {searchValue: {$text: {$search: 'apple banana'}}}}],
- ErrorCodes.InvalidPipelineOperator);
- assertErrorCode(coll, [{$sort: {$text: {$search: 'apple banana'}}}], 17312);
+assert.eq(res, findRes);
+
+// Make sure {$meta: 'textScore'} can be used as a sub-expression
+res = coll.aggregate([
+ {$match: {_id: 1, $text: {$search: 'apple'}}},
+ {
+ $project: {
+ words: 1,
+ score: {$meta: 'textScore'},
+ wordsTimesScore: {$multiply: ['$words', {$meta: 'textScore'}]}
+ }
+ }
+ ])
+ .toArray();
+assert.eq(res[0].wordsTimesScore, res[0].words * res[0].score, tojson(res));
+
+// And can be used in $group
+res = coll.aggregate([
+ {$match: {_id: 1, $text: {$search: 'apple banana'}}},
+ {$group: {_id: {$meta: 'textScore'}, score: {$first: {$meta: 'textScore'}}}}
+ ])
+ .toArray();
+assert.eq(res[0]._id, res[0].score, tojson(res));
+
+// Make sure metadata crosses shard -> merger boundary
+res = coll.aggregate([
+ {$match: {_id: 1, $text: {$search: 'apple'}}},
+ {$project: {scoreOnShard: {$meta: 'textScore'}}},
+ {$limit: 1}, // force a split. later stages run on merger
+ {$project: {scoreOnShard: 1, scoreOnMerger: {$meta: 'textScore'}}}
+ ])
+ .toArray();
+assert.eq(res[0].scoreOnMerger, res[0].scoreOnShard);
+let score = res[0].scoreOnMerger; // save for later tests
+
+// Make sure metadata crosses shard -> merger boundary even if not used on shard
+res = coll.aggregate([
+ {$match: {_id: 1, $text: {$search: 'apple'}}},
+ {$limit: 1}, // force a split. later stages run on merger
+ {$project: {scoreOnShard: 1, scoreOnMerger: {$meta: 'textScore'}}}
+ ])
+ .toArray();
+assert.eq(res[0].scoreOnMerger, score);
+
+// Make sure metadata works if first $project doesn't use it.
+res = coll.aggregate([
+ {$match: {_id: 1, $text: {$search: 'apple'}}},
+ {$project: {_id: 1}},
+ {$project: {_id: 1, score: {$meta: 'textScore'}}}
+ ])
+ .toArray();
+assert.eq(res[0].score, score);
+
+// Make sure the pipeline fails if it tries to reference the text score and it doesn't exist.
+res = coll.runCommand(
+ {aggregate: coll.getName(), pipeline: [{$project: {_id: 1, score: {$meta: 'textScore'}}}]});
+assert.commandFailed(res);
+
+// Make sure the metadata is 'missing()' when it doesn't exist because the document changed
+res = coll.aggregate([
+ {$match: {_id: 1, $text: {$search: 'apple banana'}}},
+ {$group: {_id: 1, score: {$first: {$meta: 'textScore'}}}},
+ {$project: {_id: 1, scoreAgain: {$meta: 'textScore'}}},
+ ])
+ .toArray();
+assert(!("scoreAgain" in res[0]));
+
+// Make sure metadata works after a $unwind
+assert.writeOK(coll.insert({_id: 5, text: 'mango', words: [1, 2, 3]}));
+res = coll.aggregate([
+ {$match: {$text: {$search: 'mango'}}},
+ {$project: {score: {$meta: "textScore"}, _id: 1, words: 1}},
+ {$unwind: '$words'},
+ {$project: {scoreAgain: {$meta: "textScore"}, score: 1}}
+ ])
+ .toArray();
+assert.eq(res[0].scoreAgain, res[0].score);
+
+// Error checking
+// $match, but wrong position
+assertErrorCode(coll, [{$sort: {text: 1}}, {$match: {$text: {$search: 'apple banana'}}}], 17313);
+
+// wrong $stage, but correct position
+assertErrorCode(coll,
+ [{$project: {searchValue: {$text: {$search: 'apple banana'}}}}],
+ ErrorCodes.InvalidPipelineOperator);
+assertErrorCode(coll, [{$sort: {$text: {$search: 'apple banana'}}}], 17312);
})();
diff --git a/jstests/aggregation/bugs/server12015.js b/jstests/aggregation/bugs/server12015.js
index 1b59a59545c..2c2b34d126c 100644
--- a/jstests/aggregation/bugs/server12015.js
+++ b/jstests/aggregation/bugs/server12015.js
@@ -9,77 +9,79 @@
load("jstests/aggregation/extras/utils.js"); // For orderedArrayEq.
(function() {
- "use strict";
- const coll = db.server12015;
- coll.drop();
- const indexSpec = {a: 1, b: 1};
+"use strict";
+const coll = db.server12015;
+coll.drop();
+const indexSpec = {
+ a: 1,
+ b: 1
+};
- assert.writeOK(coll.insert({_id: 0, a: 0, b: 0}));
- assert.writeOK(coll.insert({_id: 1, a: 0, b: 1}));
- assert.writeOK(coll.insert({_id: 2, a: 1, b: 0}));
- assert.writeOK(coll.insert({_id: 3, a: 1, b: 1}));
+assert.writeOK(coll.insert({_id: 0, a: 0, b: 0}));
+assert.writeOK(coll.insert({_id: 1, a: 0, b: 1}));
+assert.writeOK(coll.insert({_id: 2, a: 1, b: 0}));
+assert.writeOK(coll.insert({_id: 3, a: 1, b: 1}));
- /**
- * Helper to test that for a given pipeline, the same results are returned whether or not an
- * index is present. If 'ignoreSortOrder' is present, test for result parity without assuming
- * the order of results.
- */
- function assertResultsMatch(pipeline, ignoreSortOrder) {
- // Add a match stage to ensure index scans are considered for planning (workaround for
- // SERVER-20066).
- pipeline = [{$match: {a: {$gte: 0}}}].concat(pipeline);
+/**
+ * Helper to test that for a given pipeline, the same results are returned whether or not an
+ * index is present. If 'ignoreSortOrder' is present, test for result parity without assuming
+ * the order of results.
+ */
+function assertResultsMatch(pipeline, ignoreSortOrder) {
+ // Add a match stage to ensure index scans are considered for planning (workaround for
+ // SERVER-20066).
+ pipeline = [{$match: {a: {$gte: 0}}}].concat(pipeline);
- // Once with an index.
- assert.commandWorked(coll.ensureIndex(indexSpec));
- var resultsWithIndex = coll.aggregate(pipeline).toArray();
+ // Once with an index.
+ assert.commandWorked(coll.ensureIndex(indexSpec));
+ var resultsWithIndex = coll.aggregate(pipeline).toArray();
- // Again without an index.
- assert.commandWorked(coll.dropIndex(indexSpec));
- var resultsWithoutIndex = coll.aggregate(pipeline).toArray();
+ // Again without an index.
+ assert.commandWorked(coll.dropIndex(indexSpec));
+ var resultsWithoutIndex = coll.aggregate(pipeline).toArray();
- if (ignoreSortOrder) {
- assert(arrayEq(resultsWithIndex, resultsWithoutIndex), tojson({
- resultsWithIndex: resultsWithIndex,
- resultsWithoutIndex: resultsWithoutIndex
- }));
- } else {
- assert.eq(resultsWithIndex, resultsWithoutIndex);
- }
+ if (ignoreSortOrder) {
+ assert(
+ arrayEq(resultsWithIndex, resultsWithoutIndex),
+ tojson({resultsWithIndex: resultsWithIndex, resultsWithoutIndex: resultsWithoutIndex}));
+ } else {
+ assert.eq(resultsWithIndex, resultsWithoutIndex);
}
+}
- // Uncovered $project, no $sort.
- const ignoreSortOrder = true;
- assertResultsMatch([{$project: {_id: 1, a: 1, b: 1}}], ignoreSortOrder);
+// Uncovered $project, no $sort.
+const ignoreSortOrder = true;
+assertResultsMatch([{$project: {_id: 1, a: 1, b: 1}}], ignoreSortOrder);
- // Covered $project, no $sort.
- assertResultsMatch([{$project: {_id: 0, a: 1}}], ignoreSortOrder);
- assertResultsMatch([{$project: {_id: 0, a: 1, b: 1}}], ignoreSortOrder);
- assertResultsMatch([{$project: {_id: 0, a: 1, b: 1, c: {$literal: 1}}}], ignoreSortOrder);
- assertResultsMatch([{$project: {_id: 0, a: 1, b: 1}}, {$project: {a: 1}}], ignoreSortOrder);
- assertResultsMatch([{$project: {_id: 0, a: 1, b: 1}}, {$group: {_id: null, a: {$sum: "$a"}}}],
- ignoreSortOrder);
+// Covered $project, no $sort.
+assertResultsMatch([{$project: {_id: 0, a: 1}}], ignoreSortOrder);
+assertResultsMatch([{$project: {_id: 0, a: 1, b: 1}}], ignoreSortOrder);
+assertResultsMatch([{$project: {_id: 0, a: 1, b: 1, c: {$literal: 1}}}], ignoreSortOrder);
+assertResultsMatch([{$project: {_id: 0, a: 1, b: 1}}, {$project: {a: 1}}], ignoreSortOrder);
+assertResultsMatch([{$project: {_id: 0, a: 1, b: 1}}, {$group: {_id: null, a: {$sum: "$a"}}}],
+ ignoreSortOrder);
- // Non-blocking $sort, uncovered $project.
- assertResultsMatch([{$sort: {a: -1, b: -1}}, {$project: {_id: 1, a: 1, b: 1}}]);
- assertResultsMatch([{$sort: {a: 1, b: 1}}, {$project: {_id: 1, a: 1, b: 1}}]);
- assertResultsMatch(
- [{$sort: {a: 1, b: 1}}, {$group: {_id: "$_id", arr: {$push: "$a"}, sum: {$sum: "$b"}}}],
- ignoreSortOrder);
+// Non-blocking $sort, uncovered $project.
+assertResultsMatch([{$sort: {a: -1, b: -1}}, {$project: {_id: 1, a: 1, b: 1}}]);
+assertResultsMatch([{$sort: {a: 1, b: 1}}, {$project: {_id: 1, a: 1, b: 1}}]);
+assertResultsMatch(
+ [{$sort: {a: 1, b: 1}}, {$group: {_id: "$_id", arr: {$push: "$a"}, sum: {$sum: "$b"}}}],
+ ignoreSortOrder);
- // Non-blocking $sort, covered $project.
- assertResultsMatch([{$sort: {a: -1, b: -1}}, {$project: {_id: 0, a: 1, b: 1}}]);
- assertResultsMatch([{$sort: {a: 1, b: 1}}, {$project: {_id: 0, a: 1, b: 1}}]);
- assertResultsMatch([{$sort: {a: 1, b: 1}}, {$group: {_id: "$b", arr: {$push: "$a"}}}],
- ignoreSortOrder);
+// Non-blocking $sort, covered $project.
+assertResultsMatch([{$sort: {a: -1, b: -1}}, {$project: {_id: 0, a: 1, b: 1}}]);
+assertResultsMatch([{$sort: {a: 1, b: 1}}, {$project: {_id: 0, a: 1, b: 1}}]);
+assertResultsMatch([{$sort: {a: 1, b: 1}}, {$group: {_id: "$b", arr: {$push: "$a"}}}],
+ ignoreSortOrder);
- // Blocking $sort, uncovered $project.
- assertResultsMatch([{$sort: {b: 1, a: -1}}, {$project: {_id: 1, a: 1, b: 1}}]);
- assertResultsMatch(
- [{$sort: {b: 1, a: -1}}, {$group: {_id: "$_id", arr: {$push: "$a"}, sum: {$sum: "$b"}}}],
- ignoreSortOrder);
+// Blocking $sort, uncovered $project.
+assertResultsMatch([{$sort: {b: 1, a: -1}}, {$project: {_id: 1, a: 1, b: 1}}]);
+assertResultsMatch(
+ [{$sort: {b: 1, a: -1}}, {$group: {_id: "$_id", arr: {$push: "$a"}, sum: {$sum: "$b"}}}],
+ ignoreSortOrder);
- // Blocking $sort, covered $project.
- assertResultsMatch([{$sort: {b: 1, a: -1}}, {$project: {_id: 0, a: 1, b: 1}}]);
- assertResultsMatch([{$sort: {b: 1, a: -1}}, {$group: {_id: "$b", arr: {$push: "$a"}}}],
- ignoreSortOrder);
+// Blocking $sort, covered $project.
+assertResultsMatch([{$sort: {b: 1, a: -1}}, {$project: {_id: 0, a: 1, b: 1}}]);
+assertResultsMatch([{$sort: {b: 1, a: -1}}, {$group: {_id: "$b", arr: {$push: "$a"}}}],
+ ignoreSortOrder);
}());
diff --git a/jstests/aggregation/bugs/server14421.js b/jstests/aggregation/bugs/server14421.js
index 3201e20a81a..b6701546e3d 100644
--- a/jstests/aggregation/bugs/server14421.js
+++ b/jstests/aggregation/bugs/server14421.js
@@ -1,40 +1,40 @@
// SERVER-14421 minDistance for $geoNear aggregation operator
(function() {
- 'use strict';
- var coll = db.mindistance;
- coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, loc: {type: "Point", coordinates: [0, 0]}},
- {_id: 1, loc: {type: "Point", coordinates: [0, 0.01]}}
- ]));
- var response = coll.createIndex({loc: "2dsphere"});
- assert.eq(response.ok, 1, "Could not create 2dsphere index");
- var results = coll.aggregate([{
- $geoNear: {
- minDistance: 10000,
- spherical: true,
- distanceField: "distance",
- near: {type: "Point", coordinates: [0, 0]}
- }
- }]);
- assert.eq(results.itcount(), 0);
- results = coll.aggregate([{
- $geoNear: {
- minDistance: 1,
- spherical: true,
- distanceField: "distance",
- near: {type: "Point", coordinates: [0, 0]}
- }
- }]);
- assert.eq(results.itcount(), 1);
- results = coll.aggregate([{
- $geoNear: {
- minDistance: 0,
- spherical: true,
- distanceField: "distance",
- near: {type: "Point", coordinates: [0, 0]}
- }
- }]);
- assert.eq(results.itcount(), 2);
- coll.drop();
+'use strict';
+var coll = db.mindistance;
+coll.drop();
+assert.writeOK(coll.insert([
+ {_id: 0, loc: {type: "Point", coordinates: [0, 0]}},
+ {_id: 1, loc: {type: "Point", coordinates: [0, 0.01]}}
+]));
+var response = coll.createIndex({loc: "2dsphere"});
+assert.eq(response.ok, 1, "Could not create 2dsphere index");
+var results = coll.aggregate([{
+ $geoNear: {
+ minDistance: 10000,
+ spherical: true,
+ distanceField: "distance",
+ near: {type: "Point", coordinates: [0, 0]}
+ }
+}]);
+assert.eq(results.itcount(), 0);
+results = coll.aggregate([{
+ $geoNear: {
+ minDistance: 1,
+ spherical: true,
+ distanceField: "distance",
+ near: {type: "Point", coordinates: [0, 0]}
+ }
+}]);
+assert.eq(results.itcount(), 1);
+results = coll.aggregate([{
+ $geoNear: {
+ minDistance: 0,
+ spherical: true,
+ distanceField: "distance",
+ near: {type: "Point", coordinates: [0, 0]}
+ }
+}]);
+assert.eq(results.itcount(), 2);
+coll.drop();
}()); \ No newline at end of file
diff --git a/jstests/aggregation/bugs/server14670.js b/jstests/aggregation/bugs/server14670.js
index dc8a750e9db..adadb154da0 100644
--- a/jstests/aggregation/bugs/server14670.js
+++ b/jstests/aggregation/bugs/server14670.js
@@ -3,21 +3,19 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
(function() {
- "use strict";
+"use strict";
- var coll = db.substr;
- coll.drop();
+var coll = db.substr;
+coll.drop();
- // Need an empty document for the pipeline.
- coll.insert({});
+// Need an empty document for the pipeline.
+coll.insert({});
- assertErrorCode(coll,
- [{$project: {strLen: {$strLenBytes: 1}}}],
- 34473,
- "$strLenBytes requires a string argument.");
+assertErrorCode(coll,
+ [{$project: {strLen: {$strLenBytes: 1}}}],
+ 34473,
+ "$strLenBytes requires a string argument.");
- assertErrorCode(coll,
- [{$project: {strLen: {$strLenCP: 1}}}],
- 34471,
- "$strLenCP requires a string argument.");
+assertErrorCode(
+ coll, [{$project: {strLen: {$strLenCP: 1}}}], 34471, "$strLenCP requires a string argument.");
}());
diff --git a/jstests/aggregation/bugs/server14691.js b/jstests/aggregation/bugs/server14691.js
index 0ba010ac41a..2703f2dead9 100644
--- a/jstests/aggregation/bugs/server14691.js
+++ b/jstests/aggregation/bugs/server14691.js
@@ -1,52 +1,52 @@
// SERVER-14691: $avg aggregator should return null when it receives no input.
(function() {
- 'use strict';
+'use strict';
- var coll = db.accumulate_avg_sum_null;
+var coll = db.accumulate_avg_sum_null;
- // Test the $avg aggregator.
- coll.drop();
+// Test the $avg aggregator.
+coll.drop();
- // Null cases.
- assert.writeOK(coll.insert({a: 1, b: 2, c: 'string', d: null}));
+// Null cases.
+assert.writeOK(coll.insert({a: 1, b: 2, c: 'string', d: null}));
- // Missing field.
- var pipeline = [{$group: {_id: '$a', avg: {$avg: '$missing'}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: null}]);
+// Missing field.
+var pipeline = [{$group: {_id: '$a', avg: {$avg: '$missing'}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: null}]);
- // Non-numeric field.
- pipeline = [{$group: {_id: '$a', avg: {$avg: '$c'}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: null}]);
+// Non-numeric field.
+pipeline = [{$group: {_id: '$a', avg: {$avg: '$c'}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: null}]);
- // Field with value of null.
- pipeline = [{$group: {_id: '$a', avg: {$avg: '$d'}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: null}]);
+// Field with value of null.
+pipeline = [{$group: {_id: '$a', avg: {$avg: '$d'}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: null}]);
- // All three.
- coll.insert({a: 1, d: 'string'});
- coll.insert({a: 1});
- pipeline = [{$group: {_id: '$a', avg: {$avg: '$d'}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: null}]);
+// All three.
+coll.insert({a: 1, d: 'string'});
+coll.insert({a: 1});
+pipeline = [{$group: {_id: '$a', avg: {$avg: '$d'}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: null}]);
- // Non-null cases.
- coll.drop();
- assert.writeOK(coll.insert({a: 1, b: 2}));
- pipeline = [{$group: {_id: '$a', avg: {$avg: '$b'}}}];
+// Non-null cases.
+coll.drop();
+assert.writeOK(coll.insert({a: 1, b: 2}));
+pipeline = [{$group: {_id: '$a', avg: {$avg: '$b'}}}];
- // One field.
- assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: 2}]);
+// One field.
+assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: 2}]);
- // Two fields.
- assert.writeOK(coll.insert({a: 1, b: 4}));
- assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: 3}]);
+// Two fields.
+assert.writeOK(coll.insert({a: 1, b: 4}));
+assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: 3}]);
- // Average of zero should still work.
- assert.writeOK(coll.insert({a: 1, b: -6}));
- assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: 0}]);
+// Average of zero should still work.
+assert.writeOK(coll.insert({a: 1, b: -6}));
+assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: 0}]);
- // Missing, null, or non-numeric fields should not error or affect the average.
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({a: 1, b: 'string'}));
- assert.writeOK(coll.insert({a: 1, b: null}));
- assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: 0}]);
+// Missing, null, or non-numeric fields should not error or affect the average.
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 1, b: 'string'}));
+assert.writeOK(coll.insert({a: 1, b: null}));
+assert.eq(coll.aggregate(pipeline).toArray(), [{_id: 1, avg: 0}]);
}());
diff --git a/jstests/aggregation/bugs/server14872.js b/jstests/aggregation/bugs/server14872.js
index 3be4018ac21..4787df5259b 100644
--- a/jstests/aggregation/bugs/server14872.js
+++ b/jstests/aggregation/bugs/server14872.js
@@ -4,36 +4,36 @@
load('jstests/aggregation/extras/utils.js');
(function() {
- 'use strict';
+'use strict';
- var coll = db.agg_concat_arrays_expr;
- coll.drop();
+var coll = db.agg_concat_arrays_expr;
+coll.drop();
- assert.writeOK(coll.insert({a: [1, 2], b: ['three'], c: [], d: [[3], 4], e: null, str: 'x'}));
+assert.writeOK(coll.insert({a: [1, 2], b: ['three'], c: [], d: [[3], 4], e: null, str: 'x'}));
- // Basic concatenation.
- var pipeline = [{$project: {_id: 0, all: {$concatArrays: ['$a', '$b', '$c']}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{all: [1, 2, 'three']}]);
+// Basic concatenation.
+var pipeline = [{$project: {_id: 0, all: {$concatArrays: ['$a', '$b', '$c']}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{all: [1, 2, 'three']}]);
- // Concatenation with nested arrays.
- pipeline = [{$project: {_id: 0, all: {$concatArrays: ['$a', '$d']}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{all: [1, 2, [3], 4]}]);
+// Concatenation with nested arrays.
+pipeline = [{$project: {_id: 0, all: {$concatArrays: ['$a', '$d']}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{all: [1, 2, [3], 4]}]);
- // Concatenation with 1 argument.
- pipeline = [{$project: {_id: 0, all: {$concatArrays: ['$a']}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{all: [1, 2]}]);
+// Concatenation with 1 argument.
+pipeline = [{$project: {_id: 0, all: {$concatArrays: ['$a']}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{all: [1, 2]}]);
- // Concatenation with no arguments.
- pipeline = [{$project: {_id: 0, all: {$concatArrays: []}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{all: []}]);
+// Concatenation with no arguments.
+pipeline = [{$project: {_id: 0, all: {$concatArrays: []}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{all: []}]);
- // Any nullish inputs will result in null.
- pipeline = [{$project: {_id: 0, all: {$concatArrays: ['$a', '$e']}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{all: null}]);
- pipeline = [{$project: {_id: 0, all: {$concatArrays: ['$a', '$f']}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{all: null}]);
+// Any nullish inputs will result in null.
+pipeline = [{$project: {_id: 0, all: {$concatArrays: ['$a', '$e']}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{all: null}]);
+pipeline = [{$project: {_id: 0, all: {$concatArrays: ['$a', '$f']}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{all: null}]);
- // Error on any non-array, non-null inputs.
- pipeline = [{$project: {_id: 0, all: {$concatArrays: ['$a', '$str']}}}];
- assertErrorCode(coll, pipeline, 28664);
+// Error on any non-array, non-null inputs.
+pipeline = [{$project: {_id: 0, all: {$concatArrays: ['$a', '$str']}}}];
+assertErrorCode(coll, pipeline, 28664);
}());
diff --git a/jstests/aggregation/bugs/server17224.js b/jstests/aggregation/bugs/server17224.js
index 888c99b808c..31d0e889b8c 100644
--- a/jstests/aggregation/bugs/server17224.js
+++ b/jstests/aggregation/bugs/server17224.js
@@ -1,25 +1,25 @@
// SERVER-17224 An aggregation result with exactly the right size could crash the server rather than
// returning an error.
(function() {
- 'use strict';
+'use strict';
- var t = db.server17224;
- t.drop();
+var t = db.server17224;
+t.drop();
- // first 63MB
- for (var i = 0; i < 63; i++) {
- t.insert({a: new Array(1024 * 1024 + 1).join('a')});
- }
+// first 63MB
+for (var i = 0; i < 63; i++) {
+ t.insert({a: new Array(1024 * 1024 + 1).join('a')});
+}
- // the remaining ~1MB with room for field names and other overhead
- t.insert({a: new Array(1024 * 1024 - 1105).join('a')});
+// the remaining ~1MB with room for field names and other overhead
+t.insert({a: new Array(1024 * 1024 - 1105).join('a')});
- // do not use cursor form, since it has a different workaroud for this issue.
- assert.commandFailed(db.runCommand({
- aggregate: t.getName(),
- pipeline: [{$match: {}}, {$group: {_id: null, arr: {$push: {a: '$a'}}}}]
- }));
+// do not use cursor form, since it has a different workaroud for this issue.
+assert.commandFailed(db.runCommand({
+ aggregate: t.getName(),
+ pipeline: [{$match: {}}, {$group: {_id: null, arr: {$push: {a: '$a'}}}}]
+}));
- // Make sure the server is still up.
- assert.commandWorked(db.runCommand('ping'));
+// Make sure the server is still up.
+assert.commandWorked(db.runCommand('ping'));
}());
diff --git a/jstests/aggregation/bugs/server17943.js b/jstests/aggregation/bugs/server17943.js
index 075623c705d..6b510e2ddbc 100644
--- a/jstests/aggregation/bugs/server17943.js
+++ b/jstests/aggregation/bugs/server17943.js
@@ -4,80 +4,88 @@
load('jstests/aggregation/extras/utils.js');
(function() {
- 'use strict';
+'use strict';
- var coll = db.agg_filter_expr;
- coll.drop();
+var coll = db.agg_filter_expr;
+coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [1, 2, 3, 4, 5]}));
- assert.writeOK(coll.insert({_id: 1, a: [2, 4]}));
- assert.writeOK(coll.insert({_id: 2, a: []}));
- assert.writeOK(coll.insert({_id: 3, a: [1]}));
- assert.writeOK(coll.insert({_id: 4, a: null}));
- assert.writeOK(coll.insert({_id: 5, a: undefined}));
- assert.writeOK(coll.insert({_id: 6}));
+assert.writeOK(coll.insert({_id: 0, a: [1, 2, 3, 4, 5]}));
+assert.writeOK(coll.insert({_id: 1, a: [2, 4]}));
+assert.writeOK(coll.insert({_id: 2, a: []}));
+assert.writeOK(coll.insert({_id: 3, a: [1]}));
+assert.writeOK(coll.insert({_id: 4, a: null}));
+assert.writeOK(coll.insert({_id: 5, a: undefined}));
+assert.writeOK(coll.insert({_id: 6}));
- // Create filter to only accept odd numbers.
- filterDoc = {input: '$a', as: 'x', cond: {$eq: [1, {$mod: ['$$x', 2]}]}};
- var expectedResults = [
- {_id: 0, b: [1, 3, 5]},
- {_id: 1, b: []},
- {_id: 2, b: []},
- {_id: 3, b: [1]},
- {_id: 4, b: null},
- {_id: 5, b: null},
- {_id: 6, b: null},
- ];
- var results =
- coll.aggregate([{$project: {b: {$filter: filterDoc}}}, {$sort: {_id: 1}}]).toArray();
- assert.eq(results, expectedResults);
+// Create filter to only accept odd numbers.
+filterDoc = {input: '$a', as: 'x', cond: {$eq: [1, {$mod: ['$$x', 2]}]}};
+var expectedResults = [
+ {_id: 0, b: [1, 3, 5]},
+ {_id: 1, b: []},
+ {_id: 2, b: []},
+ {_id: 3, b: [1]},
+ {_id: 4, b: null},
+ {_id: 5, b: null},
+ {_id: 6, b: null},
+];
+var results = coll.aggregate([{$project: {b: {$filter: filterDoc}}}, {$sort: {_id: 1}}]).toArray();
+assert.eq(results, expectedResults);
- // create filter that uses the default variable name in 'cond'
- filterDoc = {input: '$a', cond: {$eq: [2, '$$this']}};
- expectedResults = [
- {_id: 0, b: [2]},
- {_id: 1, b: [2]},
- {_id: 2, b: []},
- {_id: 3, b: []},
- {_id: 4, b: null},
- {_id: 5, b: null},
- {_id: 6, b: null},
- ];
- results = coll.aggregate([{$project: {b: {$filter: filterDoc}}}, {$sort: {_id: 1}}]).toArray();
- assert.eq(results, expectedResults);
+// create filter that uses the default variable name in 'cond'
+filterDoc = {
+ input: '$a',
+ cond: {$eq: [2, '$$this']}
+};
+expectedResults = [
+ {_id: 0, b: [2]},
+ {_id: 1, b: [2]},
+ {_id: 2, b: []},
+ {_id: 3, b: []},
+ {_id: 4, b: null},
+ {_id: 5, b: null},
+ {_id: 6, b: null},
+];
+results = coll.aggregate([{$project: {b: {$filter: filterDoc}}}, {$sort: {_id: 1}}]).toArray();
+assert.eq(results, expectedResults);
- // Invalid filter expressions.
+// Invalid filter expressions.
- // '$filter' is not a document.
- var filterDoc = 'string';
- assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28646);
+// '$filter' is not a document.
+var filterDoc = 'string';
+assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28646);
- // Extra field(s).
- filterDoc = {input: '$a', as: 'x', cond: true, extra: 1};
- assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28647);
+// Extra field(s).
+filterDoc = {input: '$a', as: 'x', cond: true, extra: 1};
+assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28647);
- // Missing 'input'.
- filterDoc = {as: 'x', cond: true};
- assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28648);
+// Missing 'input'.
+filterDoc = {
+ as: 'x',
+ cond: true
+};
+assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28648);
- // Missing 'cond'.
- filterDoc = {input: '$a', as: 'x'};
- assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28650);
+// Missing 'cond'.
+filterDoc = {input: '$a', as: 'x'};
+assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28650);
- // 'as' is not a valid variable name.
- filterDoc = {input: '$a', as: '$x', cond: true};
- assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 16867);
+// 'as' is not a valid variable name.
+filterDoc = {input: '$a', as: '$x', cond: true};
+assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 16867);
- // 'input' is not an array.
- filterDoc = {input: 'string', as: 'x', cond: true};
- assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28651);
+// 'input' is not an array.
+filterDoc = {input: 'string', as: 'x', cond: true};
+assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28651);
- // 'cond' uses undefined variable name.
- filterDoc = {input: '$a', cond: {$eq: [1, '$$var']}};
- assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 17276);
+// 'cond' uses undefined variable name.
+filterDoc = {
+ input: '$a',
+ cond: {$eq: [1, '$$var']}
+};
+assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 17276);
- assert(coll.drop());
- assert.writeOK(coll.insert({a: 'string'}));
- filterDoc = {input: '$a', as: 'x', cond: true};
- assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28651);
+assert(coll.drop());
+assert.writeOK(coll.insert({a: 'string'}));
+filterDoc = {input: '$a', as: 'x', cond: true};
+assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28651);
}());
diff --git a/jstests/aggregation/bugs/server18198.js b/jstests/aggregation/bugs/server18198.js
index a182195a864..9aa26451161 100644
--- a/jstests/aggregation/bugs/server18198.js
+++ b/jstests/aggregation/bugs/server18198.js
@@ -1,67 +1,67 @@
// SERVER-18198 check read pref is only applied when there is no $out stage
// in aggregate shell helper
(function() {
- "use strict";
- var t = db.server18198;
- t.drop();
+"use strict";
+var t = db.server18198;
+t.drop();
- var mongo = db.getMongo();
+var mongo = db.getMongo();
- try {
- var commandsRan = [];
- // hook in our patched mongo
- var mockMongo = {
- getSlaveOk: function() {
- return true;
- },
- runCommand: function(db, cmd, opts) {
- commandsRan.push({db: db, cmd: cmd, opts: opts});
- return {ok: 1.0};
- },
- getReadPref: function() {
- return {mode: "secondaryPreferred"};
- },
- getReadPrefMode: function() {
- return "secondaryPreferred";
- },
- getMinWireVersion: function() {
- return mongo.getMinWireVersion();
- },
- getMaxWireVersion: function() {
- return mongo.getMaxWireVersion();
- },
- isReplicaSetMember: function() {
- return mongo.isReplicaSetMember();
- },
- isMongos: function() {
- return mongo.isMongos();
- },
- isCausalConsistency: function() {
- return false;
- },
- getClusterTime: function() {
- return mongo.getClusterTime();
- },
- };
+try {
+ var commandsRan = [];
+ // hook in our patched mongo
+ var mockMongo = {
+ getSlaveOk: function() {
+ return true;
+ },
+ runCommand: function(db, cmd, opts) {
+ commandsRan.push({db: db, cmd: cmd, opts: opts});
+ return {ok: 1.0};
+ },
+ getReadPref: function() {
+ return {mode: "secondaryPreferred"};
+ },
+ getReadPrefMode: function() {
+ return "secondaryPreferred";
+ },
+ getMinWireVersion: function() {
+ return mongo.getMinWireVersion();
+ },
+ getMaxWireVersion: function() {
+ return mongo.getMaxWireVersion();
+ },
+ isReplicaSetMember: function() {
+ return mongo.isReplicaSetMember();
+ },
+ isMongos: function() {
+ return mongo.isMongos();
+ },
+ isCausalConsistency: function() {
+ return false;
+ },
+ getClusterTime: function() {
+ return mongo.getClusterTime();
+ },
+ };
- db._mongo = mockMongo;
- db._session = new _DummyDriverSession(mockMongo);
+ db._mongo = mockMongo;
+ db._session = new _DummyDriverSession(mockMongo);
- // this query should not get a read pref
- t.aggregate([{$sort: {"x": 1}}, {$out: "foo"}]);
- assert.eq(commandsRan.length, 1);
- // check that it doesn't have a read preference
- assert(!commandsRan[0].cmd.hasOwnProperty("$readPreference"));
+ // this query should not get a read pref
+ t.aggregate([{$sort: {"x": 1}}, {$out: "foo"}]);
+ assert.eq(commandsRan.length, 1);
+ // check that it doesn't have a read preference
+ assert(!commandsRan[0].cmd.hasOwnProperty("$readPreference"));
- commandsRan = [];
+ commandsRan = [];
- t.aggregate([{$sort: {"x": 1}}]);
- // check another command was run
- assert.eq(commandsRan.length, 1);
- // check that it has a read preference
- assert(commandsRan[0].cmd.hasOwnProperty("$readPreference"));
- } finally {
- db._mongo = mongo;
- db._session = new _DummyDriverSession(mongo);
- }
+ t.aggregate([{$sort: {"x": 1}}]);
+ // check another command was run
+ assert.eq(commandsRan.length, 1);
+ // check that it has a read preference
+ assert(commandsRan[0].cmd.hasOwnProperty("$readPreference"));
+} finally {
+ db._mongo = mongo;
+ db._session = new _DummyDriverSession(mongo);
+}
})();
diff --git a/jstests/aggregation/bugs/server18222.js b/jstests/aggregation/bugs/server18222.js
index cea52b3970d..d27188bbb2b 100644
--- a/jstests/aggregation/bugs/server18222.js
+++ b/jstests/aggregation/bugs/server18222.js
@@ -1,43 +1,43 @@
// SERVER-18222: Add $isArray aggregation expression.
(function() {
- 'use strict';
- var coll = db.is_array_expr;
- coll.drop();
+'use strict';
+var coll = db.is_array_expr;
+coll.drop();
- // Non-array types.
- assert.writeOK(coll.insert({_id: 0, x: 0}));
- assert.writeOK(coll.insert({_id: 1, x: '0'}));
- assert.writeOK(coll.insert({_id: 2, x: new ObjectId()}));
- assert.writeOK(coll.insert({_id: 3, x: new NumberLong(0)}));
- assert.writeOK(coll.insert({_id: 4, x: {y: []}}));
- assert.writeOK(coll.insert({_id: 5, x: null}));
- assert.writeOK(coll.insert({_id: 6, x: NaN}));
- assert.writeOK(coll.insert({_id: 7, x: undefined}));
+// Non-array types.
+assert.writeOK(coll.insert({_id: 0, x: 0}));
+assert.writeOK(coll.insert({_id: 1, x: '0'}));
+assert.writeOK(coll.insert({_id: 2, x: new ObjectId()}));
+assert.writeOK(coll.insert({_id: 3, x: new NumberLong(0)}));
+assert.writeOK(coll.insert({_id: 4, x: {y: []}}));
+assert.writeOK(coll.insert({_id: 5, x: null}));
+assert.writeOK(coll.insert({_id: 6, x: NaN}));
+assert.writeOK(coll.insert({_id: 7, x: undefined}));
- // Array types.
- assert.writeOK(coll.insert({_id: 8, x: []}));
- assert.writeOK(coll.insert({_id: 9, x: [0]}));
- assert.writeOK(coll.insert({_id: 10, x: ['0']}));
+// Array types.
+assert.writeOK(coll.insert({_id: 8, x: []}));
+assert.writeOK(coll.insert({_id: 9, x: [0]}));
+assert.writeOK(coll.insert({_id: 10, x: ['0']}));
- // Project field is_array to represent whether the field x was an array.
- var results = coll.aggregate([
- {$sort: {_id: 1}},
- {$project: {isArray: {$isArray: '$x'}}},
- ])
- .toArray();
- var expectedResults = [
- {_id: 0, isArray: false},
- {_id: 1, isArray: false},
- {_id: 2, isArray: false},
- {_id: 3, isArray: false},
- {_id: 4, isArray: false},
- {_id: 5, isArray: false},
- {_id: 6, isArray: false},
- {_id: 7, isArray: false},
- {_id: 8, isArray: true},
- {_id: 9, isArray: true},
- {_id: 10, isArray: true},
- ];
+// Project field is_array to represent whether the field x was an array.
+var results = coll.aggregate([
+ {$sort: {_id: 1}},
+ {$project: {isArray: {$isArray: '$x'}}},
+ ])
+ .toArray();
+var expectedResults = [
+ {_id: 0, isArray: false},
+ {_id: 1, isArray: false},
+ {_id: 2, isArray: false},
+ {_id: 3, isArray: false},
+ {_id: 4, isArray: false},
+ {_id: 5, isArray: false},
+ {_id: 6, isArray: false},
+ {_id: 7, isArray: false},
+ {_id: 8, isArray: true},
+ {_id: 9, isArray: true},
+ {_id: 10, isArray: true},
+];
- assert.eq(results, expectedResults);
+assert.eq(results, expectedResults);
}());
diff --git a/jstests/aggregation/bugs/server18427.js b/jstests/aggregation/bugs/server18427.js
index f15c1f9e23e..fffbc51ef64 100644
--- a/jstests/aggregation/bugs/server18427.js
+++ b/jstests/aggregation/bugs/server18427.js
@@ -4,156 +4,151 @@
load('jstests/aggregation/extras/utils.js');
(function() {
- 'use strict';
- var coll = db.log_exponential_expressions;
- coll.drop();
- assert.writeOK(coll.insert({_id: 0}));
-
- var decimalE = NumberDecimal("2.718281828459045235360287471352662");
- var decimal1overE = NumberDecimal("0.3678794411714423215955237701614609");
-
- // Helper for testing that op returns expResult.
- function testOp(op, expResult) {
- var pipeline = [{$project: {_id: 0, result: op}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{result: expResult}]);
- }
-
- // $log, $log10, $ln.
-
- // Valid input: numeric/null/NaN, base positive and not equal to 1, arg positive.
- // - NumberDouble
- testOp({$log: [10, 10]}, 1);
- testOp({$log10: [10]}, 1);
- testOp({$ln: [Math.E]}, 1);
- // - NumberDecimal
- testOp({$log: [NumberDecimal("10"), NumberDecimal("10")]}, NumberDecimal("1"));
- testOp({$log10: [NumberDecimal("10")]}, NumberDecimal("1"));
- // The below answer is actually correct: the input is an approximation of E
- testOp({$ln: [decimalE]}, NumberDecimal("0.9999999999999999999999999999999998"));
- // All types converted to doubles.
- testOp({$log: [NumberLong("10"), NumberLong("10")]}, 1);
- testOp({$log10: [NumberLong("10")]}, 1);
- testOp({$ln: [NumberLong("1")]}, 0);
- // LLONG_MAX is converted to a double.
- testOp({$log: [NumberLong("9223372036854775807"), 10]}, 18.964889726830812);
- // Null inputs result in null.
- testOp({$log: [null, 10]}, null);
- testOp({$log: [10, null]}, null);
- testOp({$log: [null, NumberDecimal(10)]}, null);
- testOp({$log: [NumberDecimal(10), null]}, null);
- testOp({$log10: [null]}, null);
- testOp({$ln: [null]}, null);
- // NaN inputs result in NaN.
- testOp({$log: [NaN, 10]}, NaN);
- testOp({$log: [10, NaN]}, NaN);
- testOp({$log: [NaN, NumberDecimal(10)]}, NaN);
- testOp({$log: [NumberDecimal(10), NaN]}, NaN);
- testOp({$log10: [NaN]}, NaN);
- testOp({$ln: [NaN]}, NaN);
-
- // Invalid input: non-numeric/non-null, bases not positive or equal to 1, args not positive.
-
- // Args/bases must be numeric or null.
- assertErrorCode(coll, [{$project: {log: {$log: ["string", 5]}}}], 28756);
- assertErrorCode(coll, [{$project: {log: {$log: [5, "string"]}}}], 28757);
- assertErrorCode(coll, [{$project: {log10: {$log10: ["string"]}}}], 28765);
- assertErrorCode(coll, [{$project: {ln: {$ln: ["string"]}}}], 28765);
- // Args/bases cannot equal 0.
- assertErrorCode(coll, [{$project: {log: {$log: [0, 5]}}}], 28758);
- assertErrorCode(coll, [{$project: {log: {$log: [5, 0]}}}], 28759);
- assertErrorCode(coll, [{$project: {log10: {$log10: [0]}}}], 28761);
- assertErrorCode(coll, [{$project: {ln: {$ln: [0]}}}], 28766);
- assertErrorCode(coll, [{$project: {log: {$log: [NumberDecimal(0), NumberDecimal(5)]}}}], 28758);
- assertErrorCode(coll, [{$project: {log: {$log: [NumberDecimal(5), NumberDecimal(0)]}}}], 28759);
- assertErrorCode(coll, [{$project: {log10: {$log10: [NumberDecimal(0)]}}}], 28761);
- assertErrorCode(coll, [{$project: {ln: {$ln: [NumberDecimal(0)]}}}], 28766);
- // Args/bases cannot be negative.
- assertErrorCode(coll, [{$project: {log: {$log: [-1, 5]}}}], 28758);
- assertErrorCode(coll, [{$project: {log: {$log: [5, -1]}}}], 28759);
- assertErrorCode(coll, [{$project: {log10: {$log10: [-1]}}}], 28761);
- assertErrorCode(coll, [{$project: {ln: {$ln: [-1]}}}], 28766);
- assertErrorCode(
- coll, [{$project: {log: {$log: [NumberDecimal(-1), NumberDecimal(5)]}}}], 28758);
- assertErrorCode(
- coll, [{$project: {log: {$log: [NumberDecimal(5), NumberDecimal(-1)]}}}], 28759);
- assertErrorCode(coll, [{$project: {log10: {$log10: [NumberDecimal(-1)]}}}], 28761);
- assertErrorCode(coll, [{$project: {ln: {$ln: [NumberDecimal(-1)]}}}], 28766);
- // Base can't equal 1.
- assertErrorCode(coll, [{$project: {log: {$log: [5, 1]}}}], 28759);
- assertErrorCode(coll, [{$project: {log: {$log: [NumberDecimal(5), NumberDecimal(1)]}}}], 28759);
-
- // $pow, $exp.
-
- // Valid input - numeric/null/NaN.
-
- // $pow -- if either input is a double return a double.
- testOp({$pow: [10, 2]}, 100);
- testOp({$pow: [1 / 2, -1]}, 2);
- testOp({$pow: [-2, 2]}, 4);
- testOp({$pow: [NumberInt("2"), 2]}, 4);
- testOp({$pow: [-2, NumberInt("2")]}, 4);
- // $pow -- if either input is a NumberDecimal, return a NumberDecimal
- testOp({$pow: [NumberDecimal("10.0"), -2]},
- NumberDecimal("0.01000000000000000000000000000000000"));
- testOp({$pow: [0.5, NumberDecimal("-1")]},
- NumberDecimal("2.000000000000000000000000000000000"));
- testOp({$pow: [-2, NumberDecimal("2")]}, NumberDecimal("4.000000000000000000000000000000000"));
- testOp({$pow: [NumberInt("2"), NumberDecimal("2")]},
- NumberDecimal("4.000000000000000000000000000000000"));
- testOp({$pow: [NumberDecimal("-2.0"), NumberInt("2")]},
- NumberDecimal("4.000000000000000000000000000000000"));
- testOp({$pow: [NumberDecimal("10.0"), 2]},
- NumberDecimal("100.0000000000000000000000000000000"));
-
- // If exponent is negative and base not -1, 0, or 1, return a double.
- testOp({$pow: [NumberLong("2"), NumberLong("-1")]}, 1 / 2);
- testOp({$pow: [NumberInt("4"), NumberInt("-1")]}, 1 / 4);
- testOp({$pow: [NumberInt("4"), NumberLong("-1")]}, 1 / 4);
- testOp({$pow: [NumberInt("1"), NumberLong("-2")]}, NumberLong("1"));
- testOp({$pow: [NumberInt("-1"), NumberLong("-2")]}, NumberLong("1"));
- testOp({$pow: [NumberLong("-1"), NumberLong("-3")]}, NumberLong("-1"));
- // If result would overflow a long, return a double.
- testOp({$pow: [NumberInt("2"), NumberLong("63")]}, 9223372036854776000);
- // Exact decimal result
- testOp({$pow: [NumberInt("5"), NumberDecimal("-112")]},
- NumberDecimal("5192296858534827628530496329220096E-112"));
-
- // Result would be incorrect if double were returned.
- testOp({$pow: [NumberInt("3"), NumberInt("35")]}, NumberLong("50031545098999707"));
-
- // Else if either input is a long, return a long.
- testOp({$pow: [NumberInt("-2"), NumberLong("63")]}, NumberLong("-9223372036854775808"));
- testOp({$pow: [NumberInt("4"), NumberLong("2")]}, NumberLong("16"));
- testOp({$pow: [NumberLong("4"), NumberInt("2")]}, NumberLong("16"));
- testOp({$pow: [NumberLong("4"), NumberLong("2")]}, NumberLong("16"));
-
- // Else return an int if it fits.
- testOp({$pow: [NumberInt("4"), NumberInt("2")]}, 16);
-
- // $exp always returns doubles for non-zero non-decimal inputs, since e is a double.
- testOp({$exp: [NumberInt("-1")]}, 1 / Math.E);
- testOp({$exp: [NumberLong("1")]}, Math.E);
- // $exp returns decimal results for decimal inputs
- testOp({$exp: [NumberDecimal("-1")]}, decimal1overE);
- testOp({$exp: [NumberDecimal("1")]}, decimalE);
- // Null input results in null.
- testOp({$pow: [null, 2]}, null);
- testOp({$pow: [1 / 2, null]}, null);
- testOp({$pow: [null, NumberDecimal(2)]}, null);
- testOp({$pow: [NumberDecimal("0.5"), null]}, null);
- testOp({$exp: [null]}, null);
- // NaN input results in NaN.
- testOp({$pow: [NaN, 2]}, NaN);
- testOp({$pow: [1 / 2, NaN]}, NaN);
- testOp({$pow: [NaN, NumberDecimal(2)]}, NumberDecimal("NaN"));
- testOp({$pow: [NumberDecimal("0.5"), NaN]}, NumberDecimal("NaN"));
- testOp({$exp: [NaN]}, NaN);
-
- // Invalid inputs - non-numeric/non-null types, or 0 to a negative exponent.
- assertErrorCode(coll, [{$project: {pow: {$pow: [0, NumberLong("-1")]}}}], 28764);
- assertErrorCode(coll, [{$project: {pow: {$pow: ["string", 5]}}}], 28762);
- assertErrorCode(coll, [{$project: {pow: {$pow: [5, "string"]}}}], 28763);
- assertErrorCode(coll, [{$project: {exp: {$exp: ["string"]}}}], 28765);
- assertErrorCode(coll, [{$project: {pow: {$pow: [NumberDecimal(0), NumberLong("-1")]}}}], 28764);
- assertErrorCode(coll, [{$project: {pow: {$pow: ["string", NumberDecimal(5)]}}}], 28762);
+'use strict';
+var coll = db.log_exponential_expressions;
+coll.drop();
+assert.writeOK(coll.insert({_id: 0}));
+
+var decimalE = NumberDecimal("2.718281828459045235360287471352662");
+var decimal1overE = NumberDecimal("0.3678794411714423215955237701614609");
+
+// Helper for testing that op returns expResult.
+function testOp(op, expResult) {
+ var pipeline = [{$project: {_id: 0, result: op}}];
+ assert.eq(coll.aggregate(pipeline).toArray(), [{result: expResult}]);
+}
+
+// $log, $log10, $ln.
+
+// Valid input: numeric/null/NaN, base positive and not equal to 1, arg positive.
+// - NumberDouble
+testOp({$log: [10, 10]}, 1);
+testOp({$log10: [10]}, 1);
+testOp({$ln: [Math.E]}, 1);
+// - NumberDecimal
+testOp({$log: [NumberDecimal("10"), NumberDecimal("10")]}, NumberDecimal("1"));
+testOp({$log10: [NumberDecimal("10")]}, NumberDecimal("1"));
+// The below answer is actually correct: the input is an approximation of E
+testOp({$ln: [decimalE]}, NumberDecimal("0.9999999999999999999999999999999998"));
+// All types converted to doubles.
+testOp({$log: [NumberLong("10"), NumberLong("10")]}, 1);
+testOp({$log10: [NumberLong("10")]}, 1);
+testOp({$ln: [NumberLong("1")]}, 0);
+// LLONG_MAX is converted to a double.
+testOp({$log: [NumberLong("9223372036854775807"), 10]}, 18.964889726830812);
+// Null inputs result in null.
+testOp({$log: [null, 10]}, null);
+testOp({$log: [10, null]}, null);
+testOp({$log: [null, NumberDecimal(10)]}, null);
+testOp({$log: [NumberDecimal(10), null]}, null);
+testOp({$log10: [null]}, null);
+testOp({$ln: [null]}, null);
+// NaN inputs result in NaN.
+testOp({$log: [NaN, 10]}, NaN);
+testOp({$log: [10, NaN]}, NaN);
+testOp({$log: [NaN, NumberDecimal(10)]}, NaN);
+testOp({$log: [NumberDecimal(10), NaN]}, NaN);
+testOp({$log10: [NaN]}, NaN);
+testOp({$ln: [NaN]}, NaN);
+
+// Invalid input: non-numeric/non-null, bases not positive or equal to 1, args not positive.
+
+// Args/bases must be numeric or null.
+assertErrorCode(coll, [{$project: {log: {$log: ["string", 5]}}}], 28756);
+assertErrorCode(coll, [{$project: {log: {$log: [5, "string"]}}}], 28757);
+assertErrorCode(coll, [{$project: {log10: {$log10: ["string"]}}}], 28765);
+assertErrorCode(coll, [{$project: {ln: {$ln: ["string"]}}}], 28765);
+// Args/bases cannot equal 0.
+assertErrorCode(coll, [{$project: {log: {$log: [0, 5]}}}], 28758);
+assertErrorCode(coll, [{$project: {log: {$log: [5, 0]}}}], 28759);
+assertErrorCode(coll, [{$project: {log10: {$log10: [0]}}}], 28761);
+assertErrorCode(coll, [{$project: {ln: {$ln: [0]}}}], 28766);
+assertErrorCode(coll, [{$project: {log: {$log: [NumberDecimal(0), NumberDecimal(5)]}}}], 28758);
+assertErrorCode(coll, [{$project: {log: {$log: [NumberDecimal(5), NumberDecimal(0)]}}}], 28759);
+assertErrorCode(coll, [{$project: {log10: {$log10: [NumberDecimal(0)]}}}], 28761);
+assertErrorCode(coll, [{$project: {ln: {$ln: [NumberDecimal(0)]}}}], 28766);
+// Args/bases cannot be negative.
+assertErrorCode(coll, [{$project: {log: {$log: [-1, 5]}}}], 28758);
+assertErrorCode(coll, [{$project: {log: {$log: [5, -1]}}}], 28759);
+assertErrorCode(coll, [{$project: {log10: {$log10: [-1]}}}], 28761);
+assertErrorCode(coll, [{$project: {ln: {$ln: [-1]}}}], 28766);
+assertErrorCode(coll, [{$project: {log: {$log: [NumberDecimal(-1), NumberDecimal(5)]}}}], 28758);
+assertErrorCode(coll, [{$project: {log: {$log: [NumberDecimal(5), NumberDecimal(-1)]}}}], 28759);
+assertErrorCode(coll, [{$project: {log10: {$log10: [NumberDecimal(-1)]}}}], 28761);
+assertErrorCode(coll, [{$project: {ln: {$ln: [NumberDecimal(-1)]}}}], 28766);
+// Base can't equal 1.
+assertErrorCode(coll, [{$project: {log: {$log: [5, 1]}}}], 28759);
+assertErrorCode(coll, [{$project: {log: {$log: [NumberDecimal(5), NumberDecimal(1)]}}}], 28759);
+
+// $pow, $exp.
+
+// Valid input - numeric/null/NaN.
+
+// $pow -- if either input is a double return a double.
+testOp({$pow: [10, 2]}, 100);
+testOp({$pow: [1 / 2, -1]}, 2);
+testOp({$pow: [-2, 2]}, 4);
+testOp({$pow: [NumberInt("2"), 2]}, 4);
+testOp({$pow: [-2, NumberInt("2")]}, 4);
+// $pow -- if either input is a NumberDecimal, return a NumberDecimal
+testOp({$pow: [NumberDecimal("10.0"), -2]}, NumberDecimal("0.01000000000000000000000000000000000"));
+testOp({$pow: [0.5, NumberDecimal("-1")]}, NumberDecimal("2.000000000000000000000000000000000"));
+testOp({$pow: [-2, NumberDecimal("2")]}, NumberDecimal("4.000000000000000000000000000000000"));
+testOp({$pow: [NumberInt("2"), NumberDecimal("2")]},
+ NumberDecimal("4.000000000000000000000000000000000"));
+testOp({$pow: [NumberDecimal("-2.0"), NumberInt("2")]},
+ NumberDecimal("4.000000000000000000000000000000000"));
+testOp({$pow: [NumberDecimal("10.0"), 2]}, NumberDecimal("100.0000000000000000000000000000000"));
+
+// If exponent is negative and base not -1, 0, or 1, return a double.
+testOp({$pow: [NumberLong("2"), NumberLong("-1")]}, 1 / 2);
+testOp({$pow: [NumberInt("4"), NumberInt("-1")]}, 1 / 4);
+testOp({$pow: [NumberInt("4"), NumberLong("-1")]}, 1 / 4);
+testOp({$pow: [NumberInt("1"), NumberLong("-2")]}, NumberLong("1"));
+testOp({$pow: [NumberInt("-1"), NumberLong("-2")]}, NumberLong("1"));
+testOp({$pow: [NumberLong("-1"), NumberLong("-3")]}, NumberLong("-1"));
+// If result would overflow a long, return a double.
+testOp({$pow: [NumberInt("2"), NumberLong("63")]}, 9223372036854776000);
+// Exact decimal result
+testOp({$pow: [NumberInt("5"), NumberDecimal("-112")]},
+ NumberDecimal("5192296858534827628530496329220096E-112"));
+
+// Result would be incorrect if double were returned.
+testOp({$pow: [NumberInt("3"), NumberInt("35")]}, NumberLong("50031545098999707"));
+
+// Else if either input is a long, return a long.
+testOp({$pow: [NumberInt("-2"), NumberLong("63")]}, NumberLong("-9223372036854775808"));
+testOp({$pow: [NumberInt("4"), NumberLong("2")]}, NumberLong("16"));
+testOp({$pow: [NumberLong("4"), NumberInt("2")]}, NumberLong("16"));
+testOp({$pow: [NumberLong("4"), NumberLong("2")]}, NumberLong("16"));
+
+// Else return an int if it fits.
+testOp({$pow: [NumberInt("4"), NumberInt("2")]}, 16);
+
+// $exp always returns doubles for non-zero non-decimal inputs, since e is a double.
+testOp({$exp: [NumberInt("-1")]}, 1 / Math.E);
+testOp({$exp: [NumberLong("1")]}, Math.E);
+// $exp returns decimal results for decimal inputs
+testOp({$exp: [NumberDecimal("-1")]}, decimal1overE);
+testOp({$exp: [NumberDecimal("1")]}, decimalE);
+// Null input results in null.
+testOp({$pow: [null, 2]}, null);
+testOp({$pow: [1 / 2, null]}, null);
+testOp({$pow: [null, NumberDecimal(2)]}, null);
+testOp({$pow: [NumberDecimal("0.5"), null]}, null);
+testOp({$exp: [null]}, null);
+// NaN input results in NaN.
+testOp({$pow: [NaN, 2]}, NaN);
+testOp({$pow: [1 / 2, NaN]}, NaN);
+testOp({$pow: [NaN, NumberDecimal(2)]}, NumberDecimal("NaN"));
+testOp({$pow: [NumberDecimal("0.5"), NaN]}, NumberDecimal("NaN"));
+testOp({$exp: [NaN]}, NaN);
+
+// Invalid inputs - non-numeric/non-null types, or 0 to a negative exponent.
+assertErrorCode(coll, [{$project: {pow: {$pow: [0, NumberLong("-1")]}}}], 28764);
+assertErrorCode(coll, [{$project: {pow: {$pow: ["string", 5]}}}], 28762);
+assertErrorCode(coll, [{$project: {pow: {$pow: [5, "string"]}}}], 28763);
+assertErrorCode(coll, [{$project: {exp: {$exp: ["string"]}}}], 28765);
+assertErrorCode(coll, [{$project: {pow: {$pow: [NumberDecimal(0), NumberLong("-1")]}}}], 28764);
+assertErrorCode(coll, [{$project: {pow: {$pow: ["string", NumberDecimal(5)]}}}], 28762);
}());
diff --git a/jstests/aggregation/bugs/server20163.js b/jstests/aggregation/bugs/server20163.js
index e61ba606c24..a03e3c70fbe 100644
--- a/jstests/aggregation/bugs/server20163.js
+++ b/jstests/aggregation/bugs/server20163.js
@@ -3,138 +3,204 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
(function() {
- "use strict";
-
- var coll = db.zip;
- coll.drop();
-
- coll.insert({'long': [1, 2, 3], 'short': ['x', 'y']});
-
- var zipObj = 3;
- assertErrorCode(coll,
- [{$project: {zipped: {$zip: zipObj}}}],
- 34460,
- "$zip requires an object" + " as an argument.");
-
- zipObj = {inputs: []};
- assertErrorCode(coll,
- [{$project: {zipped: {$zip: zipObj}}}],
- 34465,
- "$zip requires at least" + " one input array");
-
- zipObj = {inputs: {"a": "b"}};
- assertErrorCode(coll, [{$project: {zipped: {$zip: zipObj}}}], 34461, "inputs is not an array");
-
- zipObj = {inputs: ["$a"], defaults: ["A"]};
- assertErrorCode(coll,
- [{$project: {zipped: {$zip: zipObj}}}],
- 34466,
- "cannot specify defaults" + " unless useLongestLength is true.");
-
- zipObj = {inputs: ["$a"], defaults: ["A", "B"], useLongestLength: true};
- assertErrorCode(coll,
- [{$project: {zipped: {$zip: zipObj}}}],
- 34467,
- "inputs and defaults" + " must be the same length.");
-
- zipObj = {inputs: ["$a"], defaults: {"a": "b"}};
- assertErrorCode(
- coll, [{$project: {zipped: {$zip: zipObj}}}], 34462, "defaults is not an" + " array");
-
- zipObj = {inputs: ["$a"], defaults: ["A"], useLongestLength: 1};
- assertErrorCode(
- coll, [{$project: {zipped: {$zip: zipObj}}}], 34463, "useLongestLength is not" + " a bool");
-
- zipObj = {inputs: ["$a", "$b"], defaults: ["A"], notAField: 1};
- assertErrorCode(coll, [{$project: {zipped: {$zip: zipObj}}}], 34464, "unknown argument");
-
- zipObj = {inputs: ["A", "B"]};
- assertErrorCode(coll,
- [{$project: {zipped: {$zip: zipObj}}}],
- 34468,
- "an element of inputs" + " was not an array.");
-
- zipObj = {inputs: [[1, 2, 3], ["A", "B", "C"]]};
- var res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
- var output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].zipped, [[1, "A"], [2, "B"], [3, "C"]]);
-
- zipObj = {inputs: [[1, 2, 3], null]};
- res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
- output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].zipped, null);
-
- zipObj = {inputs: [null, [1, 2, 3]]};
- res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
- output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].zipped, null);
-
- zipObj = {inputs: ["$missing", [1, 2, 3]]};
- res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
- output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].zipped, null);
-
- zipObj = {inputs: [undefined, [1, 2, 3]]};
- res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
- output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].zipped, null);
-
- zipObj = {inputs: [[1, 2, 3], ["A", "B"]]};
- res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
- output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].zipped, [[1, "A"], [2, "B"]]);
-
- zipObj = {inputs: [["A", "B"], [1, 2, 3]]};
- res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
- output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].zipped, [["A", 1], ["B", 2]]);
-
- zipObj = {inputs: [[], []]};
- res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
- output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].zipped, []);
-
- zipObj = {inputs: [["$short"], ["$long"]]};
- res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
- output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].zipped, [[['x', 'y'], [1, 2, 3]]]);
-
- zipObj = {inputs: ["$short", "$long"]};
- res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
- output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].zipped, [['x', 1], ['y', 2]]);
-
- zipObj = {inputs: [["$long"]]};
- res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
- output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].zipped, [[[1, 2, 3]]]);
-
- zipObj = {inputs: [[1, 2, 3], ['a', 'b', 'c'], ['c', 'b', 'a']]};
- res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
- output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].zipped, [[1, 'a', 'c'], [2, 'b', 'b'], [3, 'c', 'a']]);
-
- zipObj = {inputs: [[1, 2, 3], ["A", "B"]], defaults: ["C", "D"], useLongestLength: true};
- res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
- output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].zipped, [[1, "A"], [2, "B"], [3, "D"]]);
-
- zipObj = {inputs: [[1, 2, 3], ["A", "B"]], useLongestLength: true};
- res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
- output = res.toArray();
- assert.eq(1, output.length);
- assert.eq(output[0].zipped, [[1, "A"], [2, "B"], [3, null]]);
+"use strict";
+
+var coll = db.zip;
+coll.drop();
+
+coll.insert({'long': [1, 2, 3], 'short': ['x', 'y']});
+
+var zipObj = 3;
+assertErrorCode(coll,
+ [{$project: {zipped: {$zip: zipObj}}}],
+ 34460,
+ "$zip requires an object" +
+ " as an argument.");
+
+zipObj = {
+ inputs: []
+};
+assertErrorCode(coll,
+ [{$project: {zipped: {$zip: zipObj}}}],
+ 34465,
+ "$zip requires at least" +
+ " one input array");
+
+zipObj = {
+ inputs: {"a": "b"}
+};
+assertErrorCode(coll, [{$project: {zipped: {$zip: zipObj}}}], 34461, "inputs is not an array");
+
+zipObj = {
+ inputs: ["$a"],
+ defaults: ["A"]
+};
+assertErrorCode(coll,
+ [{$project: {zipped: {$zip: zipObj}}}],
+ 34466,
+ "cannot specify defaults" +
+ " unless useLongestLength is true.");
+
+zipObj = {
+ inputs: ["$a"],
+ defaults: ["A", "B"],
+ useLongestLength: true
+};
+assertErrorCode(coll,
+ [{$project: {zipped: {$zip: zipObj}}}],
+ 34467,
+ "inputs and defaults" +
+ " must be the same length.");
+
+zipObj = {
+ inputs: ["$a"],
+ defaults: {"a": "b"}
+};
+assertErrorCode(coll,
+ [{$project: {zipped: {$zip: zipObj}}}],
+ 34462,
+ "defaults is not an" +
+ " array");
+
+zipObj = {
+ inputs: ["$a"],
+ defaults: ["A"],
+ useLongestLength: 1
+};
+assertErrorCode(coll,
+ [{$project: {zipped: {$zip: zipObj}}}],
+ 34463,
+ "useLongestLength is not" +
+ " a bool");
+
+zipObj = {
+ inputs: ["$a", "$b"],
+ defaults: ["A"],
+ notAField: 1
+};
+assertErrorCode(coll, [{$project: {zipped: {$zip: zipObj}}}], 34464, "unknown argument");
+
+zipObj = {
+ inputs: ["A", "B"]
+};
+assertErrorCode(coll,
+ [{$project: {zipped: {$zip: zipObj}}}],
+ 34468,
+ "an element of inputs" +
+ " was not an array.");
+
+zipObj = {
+ inputs: [[1, 2, 3], ["A", "B", "C"]]
+};
+var res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
+var output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].zipped, [[1, "A"], [2, "B"], [3, "C"]]);
+
+zipObj = {
+ inputs: [[1, 2, 3], null]
+};
+res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
+output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].zipped, null);
+
+zipObj = {
+ inputs: [null, [1, 2, 3]]
+};
+res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
+output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].zipped, null);
+
+zipObj = {
+ inputs: ["$missing", [1, 2, 3]]
+};
+res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
+output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].zipped, null);
+
+zipObj = {
+ inputs: [undefined, [1, 2, 3]]
+};
+res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
+output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].zipped, null);
+
+zipObj = {
+ inputs: [[1, 2, 3], ["A", "B"]]
+};
+res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
+output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].zipped, [[1, "A"], [2, "B"]]);
+
+zipObj = {
+ inputs: [["A", "B"], [1, 2, 3]]
+};
+res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
+output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].zipped, [["A", 1], ["B", 2]]);
+
+zipObj = {
+ inputs: [[], []]
+};
+res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
+output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].zipped, []);
+
+zipObj = {
+ inputs: [["$short"], ["$long"]]
+};
+res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
+output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].zipped, [[['x', 'y'], [1, 2, 3]]]);
+
+zipObj = {
+ inputs: ["$short", "$long"]
+};
+res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
+output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].zipped, [['x', 1], ['y', 2]]);
+
+zipObj = {
+ inputs: [["$long"]]
+};
+res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
+output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].zipped, [[[1, 2, 3]]]);
+
+zipObj = {
+ inputs: [[1, 2, 3], ['a', 'b', 'c'], ['c', 'b', 'a']]
+};
+res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
+output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].zipped, [[1, 'a', 'c'], [2, 'b', 'b'], [3, 'c', 'a']]);
+
+zipObj = {
+ inputs: [[1, 2, 3], ["A", "B"]],
+ defaults: ["C", "D"],
+ useLongestLength: true
+};
+res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
+output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].zipped, [[1, "A"], [2, "B"], [3, "D"]]);
+
+zipObj = {
+ inputs: [[1, 2, 3], ["A", "B"]],
+ useLongestLength: true
+};
+res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
+output = res.toArray();
+assert.eq(1, output.length);
+assert.eq(output[0].zipped, [[1, "A"], [2, "B"], [3, null]]);
}());
diff --git a/jstests/aggregation/bugs/server20168.js b/jstests/aggregation/bugs/server20168.js
index 2ff8c6e53cd..9a886bbc279 100644
--- a/jstests/aggregation/bugs/server20168.js
+++ b/jstests/aggregation/bugs/server20168.js
@@ -1,39 +1,38 @@
// SERVER-20168: Add option to $unwind to output a null result for empty arrays.
(function() {
- "use strict";
+"use strict";
- var coll = db.server20168;
- coll.drop();
+var coll = db.server20168;
+coll.drop();
- // Should return no results on a non-existent collection.
- var results = coll.aggregate([{$unwind: {path: "$x"}}]).toArray();
- assert.eq(0, results.length, "$unwind returned the wrong number of results");
+// Should return no results on a non-existent collection.
+var results = coll.aggregate([{$unwind: {path: "$x"}}]).toArray();
+assert.eq(0, results.length, "$unwind returned the wrong number of results");
- /**
- * Asserts that with the input 'inputDoc', an $unwind stage on 'unwindPath' should produce no
- * results if preserveNullAndEmptyArrays is not specified, and produces one result, equal to
- * 'outputDoc', if it is specified.
- */
- function testPreserveNullAndEmptyArraysParam(inputDoc, unwindPath, outputDoc) {
- coll.drop();
- assert.writeOK(coll.insert(inputDoc));
+/**
+ * Asserts that with the input 'inputDoc', an $unwind stage on 'unwindPath' should produce no
+ * results if preserveNullAndEmptyArrays is not specified, and produces one result, equal to
+ * 'outputDoc', if it is specified.
+ */
+function testPreserveNullAndEmptyArraysParam(inputDoc, unwindPath, outputDoc) {
+ coll.drop();
+ assert.writeOK(coll.insert(inputDoc));
- // If preserveNullAndEmptyArrays is passed, we should get an output document.
- var preservedResults =
- coll.aggregate([{$unwind: {path: unwindPath, preserveNullAndEmptyArrays: true}}])
- .toArray();
- assert.eq(1, preservedResults.length, "$unwind returned the wrong number of results");
- assert.eq(preservedResults[0],
- outputDoc,
- "Unexpected result for an $unwind with preserveNullAndEmptyArrays " +
- "(input was " + tojson(inputDoc) + ")");
+ // If preserveNullAndEmptyArrays is passed, we should get an output document.
+ var preservedResults =
+ coll.aggregate([{$unwind: {path: unwindPath, preserveNullAndEmptyArrays: true}}]).toArray();
+ assert.eq(1, preservedResults.length, "$unwind returned the wrong number of results");
+ assert.eq(preservedResults[0],
+ outputDoc,
+ "Unexpected result for an $unwind with preserveNullAndEmptyArrays " +
+ "(input was " + tojson(inputDoc) + ")");
- // If not, we should get no outputs.
- var defaultResults = coll.aggregate([{$unwind: {path: unwindPath}}]).toArray();
- assert.eq(0, defaultResults.length, "$unwind returned the wrong number of results");
- }
+ // If not, we should get no outputs.
+ var defaultResults = coll.aggregate([{$unwind: {path: unwindPath}}]).toArray();
+ assert.eq(0, defaultResults.length, "$unwind returned the wrong number of results");
+}
- testPreserveNullAndEmptyArraysParam({_id: 0}, "$x", {_id: 0});
- testPreserveNullAndEmptyArraysParam({_id: 0, x: null}, "$x", {_id: 0, x: null});
- testPreserveNullAndEmptyArraysParam({_id: 0, x: []}, "$x", {_id: 0});
+testPreserveNullAndEmptyArraysParam({_id: 0}, "$x", {_id: 0});
+testPreserveNullAndEmptyArraysParam({_id: 0, x: null}, "$x", {_id: 0, x: null});
+testPreserveNullAndEmptyArraysParam({_id: 0, x: []}, "$x", {_id: 0});
}());
diff --git a/jstests/aggregation/bugs/server20169.js b/jstests/aggregation/bugs/server20169.js
index 27995b8030c..2b5a969f803 100644
--- a/jstests/aggregation/bugs/server20169.js
+++ b/jstests/aggregation/bugs/server20169.js
@@ -3,59 +3,67 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
(function() {
- "use strict";
-
- var coll = db.range;
- coll.drop();
-
- // We need an input document to receive an output document.
- coll.insert({});
-
- var rangeObj = [1];
- assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
- 28667,
- "range requires two" + " or three arguments");
-
- rangeObj = ["a", 1];
- assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
- 34443,
- "range requires a" + " numeric starting value");
-
- rangeObj = [1.1, 1];
- assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
- 34444,
- "range requires an" + " integral starting value");
-
- rangeObj = [1, "a"];
- assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
- 34445,
- "range requires a" + " numeric ending value");
-
- rangeObj = [1, 1.1];
- assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
- 34446,
- "range requires an" + " integral ending value");
-
- rangeObj = [1, 3, "a"];
- assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
- 34447,
- "range requires a" + " numeric step value");
-
- rangeObj = [1, 3, 1.1];
- assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
- 34448,
- "range requires an" + " integral step value");
-
- rangeObj = [1, 3, 0];
- assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
- 34449,
- "range requires a" + " non-zero step value");
+"use strict";
+
+var coll = db.range;
+coll.drop();
+
+// We need an input document to receive an output document.
+coll.insert({});
+
+var rangeObj = [1];
+assertErrorCode(coll,
+ [{$project: {range: {$range: rangeObj}}}],
+ 28667,
+ "range requires two" +
+ " or three arguments");
+
+rangeObj = ["a", 1];
+assertErrorCode(coll,
+ [{$project: {range: {$range: rangeObj}}}],
+ 34443,
+ "range requires a" +
+ " numeric starting value");
+
+rangeObj = [1.1, 1];
+assertErrorCode(coll,
+ [{$project: {range: {$range: rangeObj}}}],
+ 34444,
+ "range requires an" +
+ " integral starting value");
+
+rangeObj = [1, "a"];
+assertErrorCode(coll,
+ [{$project: {range: {$range: rangeObj}}}],
+ 34445,
+ "range requires a" +
+ " numeric ending value");
+
+rangeObj = [1, 1.1];
+assertErrorCode(coll,
+ [{$project: {range: {$range: rangeObj}}}],
+ 34446,
+ "range requires an" +
+ " integral ending value");
+
+rangeObj = [1, 3, "a"];
+assertErrorCode(coll,
+ [{$project: {range: {$range: rangeObj}}}],
+ 34447,
+ "range requires a" +
+ " numeric step value");
+
+rangeObj = [1, 3, 1.1];
+assertErrorCode(coll,
+ [{$project: {range: {$range: rangeObj}}}],
+ 34448,
+ "range requires an" +
+ " integral step value");
+
+rangeObj = [1, 3, 0];
+assertErrorCode(coll,
+ [{$project: {range: {$range: rangeObj}}}],
+ 34449,
+ "range requires a" +
+ " non-zero step value");
}());
diff --git a/jstests/aggregation/bugs/server21632.js b/jstests/aggregation/bugs/server21632.js
index 944ca114ab6..c23d8836bea 100644
--- a/jstests/aggregation/bugs/server21632.js
+++ b/jstests/aggregation/bugs/server21632.js
@@ -11,77 +11,76 @@
// 2. We should not see any duplicate documents in any one $sample (this is only guaranteed if
// there are no ongoing write operations).
(function() {
- "use strict";
-
- var coll = db.server21632;
- coll.drop();
-
- // If there is no collection, or no documents in the collection, we should not get any results
- // from a sample.
- assert.eq([], coll.aggregate([{$sample: {size: 1}}]).toArray());
- assert.eq([], coll.aggregate([{$sample: {size: 10}}]).toArray());
-
- db.createCollection(coll.getName());
-
- // Test if we are running WT + LSM and if so, skip the test.
- // WiredTiger LSM random cursor implementation doesn't currently give random enough
- // distribution to pass this test case, so disable the test when checking an LSM
- // configuration for now. We will need revisit this before releasing WiredTiger LSM
- // as a supported file type. (See: WT-2403 for details on forthcoming changes)
-
- var storageEngine = jsTest.options().storageEngine || "wiredTiger";
-
- if (storageEngine == "wiredTiger" && coll.stats().wiredTiger.type == 'lsm') {
- return;
- }
-
- assert.eq([], coll.aggregate([{$sample: {size: 1}}]).toArray());
- assert.eq([], coll.aggregate([{$sample: {size: 10}}]).toArray());
-
- // If there is only one document, we should get that document.
- var paddingStr = "abcdefghijklmnopqrstuvwxyz";
- var firstDoc = {_id: 0, paddingStr: paddingStr};
- assert.writeOK(coll.insert(firstDoc));
- assert.eq([firstDoc], coll.aggregate([{$sample: {size: 1}}]).toArray());
- assert.eq([firstDoc], coll.aggregate([{$sample: {size: 10}}]).toArray());
-
- // Insert a bunch of documents.
- var bulk = coll.initializeUnorderedBulkOp();
- var nDocs = 1000;
- for (var id = 1; id < nDocs; id++) {
- bulk.insert({_id: id, paddingStr: paddingStr});
- }
- bulk.execute();
-
- // Will contain a document's _id as a key if we've ever seen that document.
- var cumulativeSeenIds = {};
- var sampleSize = 10;
-
- jsTestLog("About to do repeated samples, explain output: " +
- tojson(coll.explain().aggregate([{$sample: {size: sampleSize}}])));
-
- // Repeatedly ask for small samples of documents to get a cumulative sample of size 'nDocs'.
- for (var i = 0; i < nDocs / sampleSize; i++) {
- var results = coll.aggregate([{$sample: {size: sampleSize}}]).toArray();
-
- assert.eq(
- results.length, sampleSize, "$sample did not return the expected number of results");
-
- // Check that there are no duplicate documents in the result of any single sample.
- var idsThisSample = {};
- results.forEach(function recordId(result) {
- assert.lte(result._id, nDocs, "$sample returned an unknown document");
- assert(!idsThisSample[result._id],
- "A single $sample returned the same document twice: " + result._id);
-
- cumulativeSeenIds[result._id] = true;
- idsThisSample[result._id] = true;
- });
- }
-
- // An implementation would have to be very broken for this assertion to fail.
- assert.gte(Object.keys(cumulativeSeenIds).length, nDocs / 4);
-
- // Make sure we can return all documents in the collection.
- assert.eq(coll.aggregate([{$sample: {size: nDocs}}]).toArray().length, nDocs);
+"use strict";
+
+var coll = db.server21632;
+coll.drop();
+
+// If there is no collection, or no documents in the collection, we should not get any results
+// from a sample.
+assert.eq([], coll.aggregate([{$sample: {size: 1}}]).toArray());
+assert.eq([], coll.aggregate([{$sample: {size: 10}}]).toArray());
+
+db.createCollection(coll.getName());
+
+// Test if we are running WT + LSM and if so, skip the test.
+// WiredTiger LSM random cursor implementation doesn't currently give random enough
+// distribution to pass this test case, so disable the test when checking an LSM
+// configuration for now. We will need revisit this before releasing WiredTiger LSM
+// as a supported file type. (See: WT-2403 for details on forthcoming changes)
+
+var storageEngine = jsTest.options().storageEngine || "wiredTiger";
+
+if (storageEngine == "wiredTiger" && coll.stats().wiredTiger.type == 'lsm') {
+ return;
+}
+
+assert.eq([], coll.aggregate([{$sample: {size: 1}}]).toArray());
+assert.eq([], coll.aggregate([{$sample: {size: 10}}]).toArray());
+
+// If there is only one document, we should get that document.
+var paddingStr = "abcdefghijklmnopqrstuvwxyz";
+var firstDoc = {_id: 0, paddingStr: paddingStr};
+assert.writeOK(coll.insert(firstDoc));
+assert.eq([firstDoc], coll.aggregate([{$sample: {size: 1}}]).toArray());
+assert.eq([firstDoc], coll.aggregate([{$sample: {size: 10}}]).toArray());
+
+// Insert a bunch of documents.
+var bulk = coll.initializeUnorderedBulkOp();
+var nDocs = 1000;
+for (var id = 1; id < nDocs; id++) {
+ bulk.insert({_id: id, paddingStr: paddingStr});
+}
+bulk.execute();
+
+// Will contain a document's _id as a key if we've ever seen that document.
+var cumulativeSeenIds = {};
+var sampleSize = 10;
+
+jsTestLog("About to do repeated samples, explain output: " +
+ tojson(coll.explain().aggregate([{$sample: {size: sampleSize}}])));
+
+// Repeatedly ask for small samples of documents to get a cumulative sample of size 'nDocs'.
+for (var i = 0; i < nDocs / sampleSize; i++) {
+ var results = coll.aggregate([{$sample: {size: sampleSize}}]).toArray();
+
+ assert.eq(results.length, sampleSize, "$sample did not return the expected number of results");
+
+ // Check that there are no duplicate documents in the result of any single sample.
+ var idsThisSample = {};
+ results.forEach(function recordId(result) {
+ assert.lte(result._id, nDocs, "$sample returned an unknown document");
+ assert(!idsThisSample[result._id],
+ "A single $sample returned the same document twice: " + result._id);
+
+ cumulativeSeenIds[result._id] = true;
+ idsThisSample[result._id] = true;
+ });
+}
+
+// An implementation would have to be very broken for this assertion to fail.
+assert.gte(Object.keys(cumulativeSeenIds).length, nDocs / 4);
+
+// Make sure we can return all documents in the collection.
+assert.eq(coll.aggregate([{$sample: {size: nDocs}}]).toArray().length, nDocs);
})();
diff --git a/jstests/aggregation/bugs/server22093.js b/jstests/aggregation/bugs/server22093.js
index 61068e38493..618c65f85b7 100644
--- a/jstests/aggregation/bugs/server22093.js
+++ b/jstests/aggregation/bugs/server22093.js
@@ -11,42 +11,42 @@
load('jstests/libs/analyze_plan.js');
(function() {
- "use strict";
+"use strict";
- var coll = db.countscan;
- coll.drop();
+var coll = db.countscan;
+coll.drop();
- for (var i = 0; i < 3; i++) {
- for (var j = 0; j < 10; j += 2) {
- coll.insert({foo: i, bar: j});
- }
+for (var i = 0; i < 3; i++) {
+ for (var j = 0; j < 10; j += 2) {
+ coll.insert({foo: i, bar: j});
}
+}
- coll.ensureIndex({foo: 1});
+coll.ensureIndex({foo: 1});
- var simpleGroup = coll.aggregate([{$group: {_id: null, count: {$sum: 1}}}]).toArray();
+var simpleGroup = coll.aggregate([{$group: {_id: null, count: {$sum: 1}}}]).toArray();
- assert.eq(simpleGroup.length, 1);
- assert.eq(simpleGroup[0]["count"], 15);
+assert.eq(simpleGroup.length, 1);
+assert.eq(simpleGroup[0]["count"], 15);
- var explained = coll.explain().aggregate(
- [{$match: {foo: {$gt: 0}}}, {$group: {_id: null, count: {$sum: 1}}}]);
+var explained =
+ coll.explain().aggregate([{$match: {foo: {$gt: 0}}}, {$group: {_id: null, count: {$sum: 1}}}]);
- assert(planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN"));
+assert(planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN"));
- explained = coll.explain().aggregate([
- {$match: {foo: {$gt: 0}}},
- {$project: {_id: 0, a: {$literal: null}}},
- {$group: {_id: null, count: {$sum: 1}}}
- ]);
+explained = coll.explain().aggregate([
+ {$match: {foo: {$gt: 0}}},
+ {$project: {_id: 0, a: {$literal: null}}},
+ {$group: {_id: null, count: {$sum: 1}}}
+]);
- assert(planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN"));
+assert(planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN"));
- // Make sure a $count stage can use the COUNT_SCAN optimization.
- explained = coll.explain().aggregate([{$match: {foo: {$gt: 0}}}, {$count: "count"}]);
- assert(planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN"));
+// Make sure a $count stage can use the COUNT_SCAN optimization.
+explained = coll.explain().aggregate([{$match: {foo: {$gt: 0}}}, {$count: "count"}]);
+assert(planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN"));
- // A $match that is not a single range cannot use the COUNT_SCAN optimization.
- explained = coll.explain().aggregate([{$match: {foo: {$in: [0, 1]}}}, {$count: "count"}]);
- assert(!planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN"));
+// A $match that is not a single range cannot use the COUNT_SCAN optimization.
+explained = coll.explain().aggregate([{$match: {foo: {$in: [0, 1]}}}, {$count: "count"}]);
+assert(!planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN"));
}());
diff --git a/jstests/aggregation/bugs/server22580.js b/jstests/aggregation/bugs/server22580.js
index 3a448173875..3b9f81dbcfc 100644
--- a/jstests/aggregation/bugs/server22580.js
+++ b/jstests/aggregation/bugs/server22580.js
@@ -3,41 +3,46 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
(function() {
- "use strict";
-
- var coll = db.substrCP;
- coll.drop();
-
- // Need an empty document for pipeline.
- coll.insert({});
-
- assertErrorCode(coll,
- [{$project: {substr: {$substrCP: ["abc", 0, "a"]}}}],
- 34452,
- "$substrCP" + " does not accept non-numeric types as a length.");
-
- assertErrorCode(coll,
- [{$project: {substr: {$substrCP: ["abc", 0, NaN]}}}],
- 34453,
- "$substrCP" + " does not accept non-integers as a length.");
-
- assertErrorCode(coll,
- [{$project: {substr: {$substrCP: ["abc", "abc", 3]}}}],
- 34450,
- "$substrCP does not accept non-numeric types as a starting index.");
-
- assertErrorCode(coll,
- [{$project: {substr: {$substrCP: ["abc", 2.2, 3]}}}],
- 34451,
- "$substrCP" + " does not accept non-integers as a starting index.");
-
- assertErrorCode(coll,
- [{$project: {substr: {$substrCP: ["abc", -1, 3]}}}],
- 34455,
- "$substrCP " + "does not accept negative integers as inputs.");
-
- assertErrorCode(coll,
- [{$project: {substr: {$substrCP: ["abc", 1, -3]}}}],
- 34454,
- "$substrCP " + "does not accept negative integers as inputs.");
+"use strict";
+
+var coll = db.substrCP;
+coll.drop();
+
+// Need an empty document for pipeline.
+coll.insert({});
+
+assertErrorCode(coll,
+ [{$project: {substr: {$substrCP: ["abc", 0, "a"]}}}],
+ 34452,
+ "$substrCP" +
+ " does not accept non-numeric types as a length.");
+
+assertErrorCode(coll,
+ [{$project: {substr: {$substrCP: ["abc", 0, NaN]}}}],
+ 34453,
+ "$substrCP" +
+ " does not accept non-integers as a length.");
+
+assertErrorCode(coll,
+ [{$project: {substr: {$substrCP: ["abc", "abc", 3]}}}],
+ 34450,
+ "$substrCP does not accept non-numeric types as a starting index.");
+
+assertErrorCode(coll,
+ [{$project: {substr: {$substrCP: ["abc", 2.2, 3]}}}],
+ 34451,
+ "$substrCP" +
+ " does not accept non-integers as a starting index.");
+
+assertErrorCode(coll,
+ [{$project: {substr: {$substrCP: ["abc", -1, 3]}}}],
+ 34455,
+ "$substrCP " +
+ "does not accept negative integers as inputs.");
+
+assertErrorCode(coll,
+ [{$project: {substr: {$substrCP: ["abc", 1, -3]}}}],
+ 34454,
+ "$substrCP " +
+ "does not accept negative integers as inputs.");
}());
diff --git a/jstests/aggregation/bugs/server25590.js b/jstests/aggregation/bugs/server25590.js
index 329ae808a2c..b478f806029 100644
--- a/jstests/aggregation/bugs/server25590.js
+++ b/jstests/aggregation/bugs/server25590.js
@@ -1,19 +1,19 @@
// Test that an aggregate command where the "pipeline" field has the wrong type fails with a
// TypeMismatch error.
(function() {
- "use strict";
+"use strict";
- const coll = db.server25590;
- coll.drop();
+const coll = db.server25590;
+coll.drop();
- assert.writeOK(coll.insert({}));
+assert.writeOK(coll.insert({}));
- assert.commandFailedWithCode(db.runCommand({aggregate: coll.getName(), pipeline: 1}),
- ErrorCodes.TypeMismatch);
- assert.commandFailedWithCode(db.runCommand({aggregate: coll.getName(), pipeline: {}}),
- ErrorCodes.TypeMismatch);
- assert.commandFailedWithCode(db.runCommand({aggregate: coll.getName(), pipeline: [1, 2]}),
- ErrorCodes.TypeMismatch);
- assert.commandFailedWithCode(db.runCommand({aggregate: coll.getName(), pipeline: [1, null]}),
- ErrorCodes.TypeMismatch);
+assert.commandFailedWithCode(db.runCommand({aggregate: coll.getName(), pipeline: 1}),
+ ErrorCodes.TypeMismatch);
+assert.commandFailedWithCode(db.runCommand({aggregate: coll.getName(), pipeline: {}}),
+ ErrorCodes.TypeMismatch);
+assert.commandFailedWithCode(db.runCommand({aggregate: coll.getName(), pipeline: [1, 2]}),
+ ErrorCodes.TypeMismatch);
+assert.commandFailedWithCode(db.runCommand({aggregate: coll.getName(), pipeline: [1, null]}),
+ ErrorCodes.TypeMismatch);
})();
diff --git a/jstests/aggregation/bugs/server26462.js b/jstests/aggregation/bugs/server26462.js
index b0ef33ae35b..08225e54ce3 100644
--- a/jstests/aggregation/bugs/server26462.js
+++ b/jstests/aggregation/bugs/server26462.js
@@ -1,29 +1,29 @@
// Tests that adding a field that only contains metadata does not cause a segmentation fault when
// grouping on the added field.
(function() {
- "use strict";
+"use strict";
- // Drop the old test collection, if any.
- db.server26462.drop();
+// Drop the old test collection, if any.
+db.server26462.drop();
- // Insert some test documents into the collection.
- assert.writeOK(db.server26462.insert({"_id": 1, "title": "cakes and ale"}));
- assert.writeOK(db.server26462.insert({"_id": 2, "title": "more cakes"}));
- assert.writeOK(db.server26462.insert({"_id": 3, "title": "bread"}));
- assert.writeOK(db.server26462.insert({"_id": 4, "title": "some cakes"}));
+// Insert some test documents into the collection.
+assert.writeOK(db.server26462.insert({"_id": 1, "title": "cakes and ale"}));
+assert.writeOK(db.server26462.insert({"_id": 2, "title": "more cakes"}));
+assert.writeOK(db.server26462.insert({"_id": 3, "title": "bread"}));
+assert.writeOK(db.server26462.insert({"_id": 4, "title": "some cakes"}));
- // Create a text index on the documents.
- assert.commandWorked(db.server26462.createIndex({title: "text"}));
+// Create a text index on the documents.
+assert.commandWorked(db.server26462.createIndex({title: "text"}));
- // Add a metadata only field in the aggregation pipeline and use that field in the $group _id.
- let res = db.server26462
- .aggregate([
- {$match: {$text: {$search: "cake"}}},
- {$addFields: {fooScore: {$meta: "textScore"}}},
- {$group: {_id: "$fooScore", count: {$sum: 1}}}
- ])
- .itcount();
+// Add a metadata only field in the aggregation pipeline and use that field in the $group _id.
+let res = db.server26462
+ .aggregate([
+ {$match: {$text: {$search: "cake"}}},
+ {$addFields: {fooScore: {$meta: "textScore"}}},
+ {$group: {_id: "$fooScore", count: {$sum: 1}}}
+ ])
+ .itcount();
- // Assert that the command worked.
- assert.eq(2, res);
+// Assert that the command worked.
+assert.eq(2, res);
})();
diff --git a/jstests/aggregation/bugs/server37750.js b/jstests/aggregation/bugs/server37750.js
index cdfd098d87d..902c427c292 100644
--- a/jstests/aggregation/bugs/server37750.js
+++ b/jstests/aggregation/bugs/server37750.js
@@ -6,75 +6,75 @@
* requires_sharding]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- // Set up a 2-shard cluster. Configure 'internalQueryExecYieldIterations' on both shards such
- // that operations will yield on each PlanExecuter iteration.
- const st = new ShardingTest({
- name: jsTestName(),
- shards: 2,
- rs: {nodes: 1, setParameter: {internalQueryExecYieldIterations: 1}}
- });
+// Set up a 2-shard cluster. Configure 'internalQueryExecYieldIterations' on both shards such
+// that operations will yield on each PlanExecuter iteration.
+const st = new ShardingTest({
+ name: jsTestName(),
+ shards: 2,
+ rs: {nodes: 1, setParameter: {internalQueryExecYieldIterations: 1}}
+});
- const mongosDB = st.s.getDB(jsTestName());
- const mongosColl = mongosDB.test;
+const mongosDB = st.s.getDB(jsTestName());
+const mongosColl = mongosDB.test;
- // Shard the test collection, split it at {_id: 0}, and move the upper chunk to shard1.
- st.shardColl(mongosColl, {_id: 1}, {_id: 0}, {_id: 0});
+// Shard the test collection, split it at {_id: 0}, and move the upper chunk to shard1.
+st.shardColl(mongosColl, {_id: 1}, {_id: 0}, {_id: 0});
- // Insert enough documents on each shard to induce the $sample random-cursor optimization.
- for (let i = (-150); i < 150; ++i) {
- assert.commandWorked(mongosColl.insert({_id: i}));
- }
+// Insert enough documents on each shard to induce the $sample random-cursor optimization.
+for (let i = (-150); i < 150; ++i) {
+ assert.commandWorked(mongosColl.insert({_id: i}));
+}
- // Run the initial aggregate for the $sample stage.
- const cmdRes = assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [{$sample: {size: 3}}],
- comment: "$sample random",
- cursor: {batchSize: 0}
- }));
- assert.eq(cmdRes.cursor.firstBatch.length, 0);
+// Run the initial aggregate for the $sample stage.
+const cmdRes = assert.commandWorked(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [{$sample: {size: 3}}],
+ comment: "$sample random",
+ cursor: {batchSize: 0}
+}));
+assert.eq(cmdRes.cursor.firstBatch.length, 0);
- // Force each shard to hang on yield to allow for currentOp capture.
- FixtureHelpers.runCommandOnEachPrimary({
- db: mongosDB.getSiblingDB("admin"),
- cmdObj: {
- configureFailPoint: "setYieldAllLocksHang",
- mode: "alwaysOn",
- data: {namespace: mongosColl.getFullName()}
- }
- });
+// Force each shard to hang on yield to allow for currentOp capture.
+FixtureHelpers.runCommandOnEachPrimary({
+ db: mongosDB.getSiblingDB("admin"),
+ cmdObj: {
+ configureFailPoint: "setYieldAllLocksHang",
+ mode: "alwaysOn",
+ data: {namespace: mongosColl.getFullName()}
+ }
+});
- // Run $currentOp to confirm that the $sample getMore yields on both shards.
- const awaitShell = startParallelShell(() => {
- load("jstests/libs/fixture_helpers.js");
- assert.soon(() => db.getSiblingDB("admin")
- .aggregate([
- {$currentOp: {}},
- {
- $match: {
- "cursor.originatingCommand.comment": "$sample random",
- planSummary: "QUEUED_DATA, MULTI_ITERATOR",
- numYields: {$gt: 0}
- }
+// Run $currentOp to confirm that the $sample getMore yields on both shards.
+const awaitShell = startParallelShell(() => {
+ load("jstests/libs/fixture_helpers.js");
+ assert.soon(() => db.getSiblingDB("admin")
+ .aggregate([
+ {$currentOp: {}},
+ {
+ $match: {
+ "cursor.originatingCommand.comment": "$sample random",
+ planSummary: "QUEUED_DATA, MULTI_ITERATOR",
+ numYields: {$gt: 0}
}
- ])
- .itcount() === 2);
- // Release the failpoint and allow the getMores to complete.
- FixtureHelpers.runCommandOnEachPrimary({
- db: db.getSiblingDB("admin"),
- cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
- });
- }, mongosDB.getMongo().port);
+ }
+ ])
+ .itcount() === 2);
+ // Release the failpoint and allow the getMores to complete.
+ FixtureHelpers.runCommandOnEachPrimary({
+ db: db.getSiblingDB("admin"),
+ cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
+ });
+}, mongosDB.getMongo().port);
- // Retrieve the results for the $sample aggregation.
- const sampleCursor = new DBCommandCursor(mongosDB, cmdRes);
- assert.eq(sampleCursor.toArray().length, 3);
+// Retrieve the results for the $sample aggregation.
+const sampleCursor = new DBCommandCursor(mongosDB, cmdRes);
+assert.eq(sampleCursor.toArray().length, 3);
- // Confirm that the parallel shell completes successfully, and tear down the cluster.
- awaitShell();
- st.stop();
+// Confirm that the parallel shell completes successfully, and tear down the cluster.
+awaitShell();
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/aggregation/bugs/server4588.js b/jstests/aggregation/bugs/server4588.js
index 000cc8f0231..be04773c0ff 100644
--- a/jstests/aggregation/bugs/server4588.js
+++ b/jstests/aggregation/bugs/server4588.js
@@ -1,60 +1,55 @@
// SERVER-4588 Add option to $unwind to emit array index.
(function() {
- "use strict";
+"use strict";
- const coll = db.server4588;
- coll.drop();
+const coll = db.server4588;
+coll.drop();
- assert.writeOK(coll.insert({_id: 0}));
- assert.writeOK(coll.insert({_id: 1, x: null}));
- assert.writeOK(coll.insert({_id: 2, x: []}));
- assert.writeOK(coll.insert({_id: 3, x: [1, 2, 3]}));
- assert.writeOK(coll.insert({_id: 4, x: 5}));
+assert.writeOK(coll.insert({_id: 0}));
+assert.writeOK(coll.insert({_id: 1, x: null}));
+assert.writeOK(coll.insert({_id: 2, x: []}));
+assert.writeOK(coll.insert({_id: 3, x: [1, 2, 3]}));
+assert.writeOK(coll.insert({_id: 4, x: 5}));
- // Without includeArrayIndex.
- let actualResults =
- coll.aggregate([{$unwind: {path: "$x"}}, {$sort: {_id: 1, x: 1}}]).toArray();
- let expectedResults = [
- {_id: 3, x: 1},
- {_id: 3, x: 2},
- {_id: 3, x: 3},
- {_id: 4, x: 5},
- ];
- assert.eq(expectedResults, actualResults, "Incorrect results for normal $unwind");
+// Without includeArrayIndex.
+let actualResults = coll.aggregate([{$unwind: {path: "$x"}}, {$sort: {_id: 1, x: 1}}]).toArray();
+let expectedResults = [
+ {_id: 3, x: 1},
+ {_id: 3, x: 2},
+ {_id: 3, x: 3},
+ {_id: 4, x: 5},
+];
+assert.eq(expectedResults, actualResults, "Incorrect results for normal $unwind");
- // With includeArrayIndex, index inserted into a new field.
- actualResults =
- coll.aggregate(
- [{$unwind: {path: "$x", includeArrayIndex: "index"}}, {$sort: {_id: 1, x: 1}}])
- .toArray();
- expectedResults = [
- {_id: 3, x: 1, index: NumberLong(0)},
- {_id: 3, x: 2, index: NumberLong(1)},
- {_id: 3, x: 3, index: NumberLong(2)},
- {_id: 4, x: 5, index: null},
- ];
- assert.eq(expectedResults, actualResults, "Incorrect results $unwind with includeArrayIndex");
+// With includeArrayIndex, index inserted into a new field.
+actualResults =
+ coll.aggregate([{$unwind: {path: "$x", includeArrayIndex: "index"}}, {$sort: {_id: 1, x: 1}}])
+ .toArray();
+expectedResults = [
+ {_id: 3, x: 1, index: NumberLong(0)},
+ {_id: 3, x: 2, index: NumberLong(1)},
+ {_id: 3, x: 3, index: NumberLong(2)},
+ {_id: 4, x: 5, index: null},
+];
+assert.eq(expectedResults, actualResults, "Incorrect results $unwind with includeArrayIndex");
- // With both includeArrayIndex and preserveNullAndEmptyArrays.
- actualResults =
- coll.aggregate([
- {
- $unwind:
- {path: "$x", includeArrayIndex: "index", preserveNullAndEmptyArrays: true}
- },
- {$sort: {_id: 1, x: 1}}
- ])
- .toArray();
- expectedResults = [
- {_id: 0, index: null},
- {_id: 1, x: null, index: null},
- {_id: 2, index: null},
- {_id: 3, x: 1, index: NumberLong(0)},
- {_id: 3, x: 2, index: NumberLong(1)},
- {_id: 3, x: 3, index: NumberLong(2)},
- {_id: 4, x: 5, index: null},
- ];
- assert.eq(expectedResults,
- actualResults,
- "Incorrect results $unwind with includeArrayIndex and preserveNullAndEmptyArrays");
+// With both includeArrayIndex and preserveNullAndEmptyArrays.
+actualResults =
+ coll.aggregate([
+ {$unwind: {path: "$x", includeArrayIndex: "index", preserveNullAndEmptyArrays: true}},
+ {$sort: {_id: 1, x: 1}}
+ ])
+ .toArray();
+expectedResults = [
+ {_id: 0, index: null},
+ {_id: 1, x: null, index: null},
+ {_id: 2, index: null},
+ {_id: 3, x: 1, index: NumberLong(0)},
+ {_id: 3, x: 2, index: NumberLong(1)},
+ {_id: 3, x: 3, index: NumberLong(2)},
+ {_id: 4, x: 5, index: null},
+];
+assert.eq(expectedResults,
+ actualResults,
+ "Incorrect results $unwind with includeArrayIndex and preserveNullAndEmptyArrays");
}());
diff --git a/jstests/aggregation/bugs/server4589.js b/jstests/aggregation/bugs/server4589.js
index e7f2e1b9746..efa7254e4d9 100644
--- a/jstests/aggregation/bugs/server4589.js
+++ b/jstests/aggregation/bugs/server4589.js
@@ -4,67 +4,67 @@
load('jstests/aggregation/extras/utils.js');
(function() {
- 'use strict';
-
- var coll = db.agg_array_elem_at_expr;
- coll.drop();
-
- assert.writeOK(coll.insert({a: [1, 2, 3, 4, 5]}));
-
- // Normal indexing.
- var pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', 2]}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{x: 3}]);
-
- // Indexing with a float.
- pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', 1.0]}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{x: 2}]);
-
- // Indexing with a decimal
- pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', NumberDecimal('2.0')]}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{x: 3}]);
-
- // Negative indexing.
- pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', -1]}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{x: 5}]);
- pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', -5]}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{x: 1}]);
-
- // Out of bounds positive.
- pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', 5]}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{}]);
- pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', Math.pow(2, 31) - 1]}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{}]);
- pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', NumberLong(Math.pow(2, 31) - 1)]}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{}]);
-
- // Out of bounds negative.
- pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', -6]}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{}]);
- pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', -Math.pow(2, 31)]}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{}]);
- pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', NumberLong(-Math.pow(2, 31))]}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{}]);
-
- // Null inputs.
- pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', null]}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{x: null}]);
- pipeline = [{$project: {_id: 0, x: {$arrayElemAt: [null, 4]}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{x: null}]);
-
- // Error cases.
-
- // Wrong number of arguments.
- assertErrorCode(coll, [{$project: {x: {$arrayElemAt: [['one', 'arg']]}}}], 16020);
-
- // First argument is not an array.
- assertErrorCode(coll, [{$project: {x: {$arrayElemAt: ['one', 2]}}}], 28689);
-
- // Second argument is not numeric.
- assertErrorCode(coll, [{$project: {x: {$arrayElemAt: [[1, 2], '2']}}}], 28690);
-
- // Second argument is not integral.
- assertErrorCode(coll, [{$project: {x: {$arrayElemAt: [[1, 2], 1.5]}}}], 28691);
- assertErrorCode(coll, [{$project: {x: {$arrayElemAt: [[1, 2], NumberDecimal('1.5')]}}}], 28691);
- assertErrorCode(coll, [{$project: {x: {$arrayElemAt: [[1, 2], Math.pow(2, 32)]}}}], 28691);
- assertErrorCode(coll, [{$project: {x: {$arrayElemAt: [[1, 2], -Math.pow(2, 31) - 1]}}}], 28691);
+'use strict';
+
+var coll = db.agg_array_elem_at_expr;
+coll.drop();
+
+assert.writeOK(coll.insert({a: [1, 2, 3, 4, 5]}));
+
+// Normal indexing.
+var pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', 2]}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{x: 3}]);
+
+// Indexing with a float.
+pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', 1.0]}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{x: 2}]);
+
+// Indexing with a decimal
+pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', NumberDecimal('2.0')]}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{x: 3}]);
+
+// Negative indexing.
+pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', -1]}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{x: 5}]);
+pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', -5]}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{x: 1}]);
+
+// Out of bounds positive.
+pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', 5]}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{}]);
+pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', Math.pow(2, 31) - 1]}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{}]);
+pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', NumberLong(Math.pow(2, 31) - 1)]}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{}]);
+
+// Out of bounds negative.
+pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', -6]}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{}]);
+pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', -Math.pow(2, 31)]}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{}]);
+pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', NumberLong(-Math.pow(2, 31))]}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{}]);
+
+// Null inputs.
+pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', null]}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{x: null}]);
+pipeline = [{$project: {_id: 0, x: {$arrayElemAt: [null, 4]}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{x: null}]);
+
+// Error cases.
+
+// Wrong number of arguments.
+assertErrorCode(coll, [{$project: {x: {$arrayElemAt: [['one', 'arg']]}}}], 16020);
+
+// First argument is not an array.
+assertErrorCode(coll, [{$project: {x: {$arrayElemAt: ['one', 2]}}}], 28689);
+
+// Second argument is not numeric.
+assertErrorCode(coll, [{$project: {x: {$arrayElemAt: [[1, 2], '2']}}}], 28690);
+
+// Second argument is not integral.
+assertErrorCode(coll, [{$project: {x: {$arrayElemAt: [[1, 2], 1.5]}}}], 28691);
+assertErrorCode(coll, [{$project: {x: {$arrayElemAt: [[1, 2], NumberDecimal('1.5')]}}}], 28691);
+assertErrorCode(coll, [{$project: {x: {$arrayElemAt: [[1, 2], Math.pow(2, 32)]}}}], 28691);
+assertErrorCode(coll, [{$project: {x: {$arrayElemAt: [[1, 2], -Math.pow(2, 31) - 1]}}}], 28691);
}());
diff --git a/jstests/aggregation/bugs/server4638.js b/jstests/aggregation/bugs/server4638.js
index 4934da94a34..ee6f7cfd6df 100644
--- a/jstests/aggregation/bugs/server4638.js
+++ b/jstests/aggregation/bugs/server4638.js
@@ -13,4 +13,4 @@ assert.eq(res[0].x, 0);
// Make sure having an undefined doesn't break pipelines that do use the field
res = t.aggregate({$project: {undef: 1}}).toArray();
assert.eq(res[0].undef, undefined);
-assert.eq(typeof(res[0].undef), "undefined");
+assert.eq(typeof (res[0].undef), "undefined");
diff --git a/jstests/aggregation/bugs/server5012.js b/jstests/aggregation/bugs/server5012.js
index a9955349490..14dfe914b52 100644
--- a/jstests/aggregation/bugs/server5012.js
+++ b/jstests/aggregation/bugs/server5012.js
@@ -1,11 +1,11 @@
(function() {
- "use strict";
- load('jstests/aggregation/data/articles.js');
+"use strict";
+load('jstests/aggregation/data/articles.js');
- const article = db.getSiblingDB("aggdb").getCollection("article");
- const cursor = article.aggregate(
- [{$sort: {_id: 1}}, {$project: {author: 1, _id: 0}}, {$project: {Writer: "$author"}}]);
- const expected = [{Writer: "bob"}, {Writer: "dave"}, {Writer: "jane"}];
+const article = db.getSiblingDB("aggdb").getCollection("article");
+const cursor = article.aggregate(
+ [{$sort: {_id: 1}}, {$project: {author: 1, _id: 0}}, {$project: {Writer: "$author"}}]);
+const expected = [{Writer: "bob"}, {Writer: "dave"}, {Writer: "jane"}];
- assert.eq(cursor.toArray(), expected);
+assert.eq(cursor.toArray(), expected);
}());
diff --git a/jstests/aggregation/bugs/server533.js b/jstests/aggregation/bugs/server533.js
index b64ddc9669f..d66c5d27ad8 100644
--- a/jstests/aggregation/bugs/server533.js
+++ b/jstests/aggregation/bugs/server533.js
@@ -4,32 +4,32 @@
load('jstests/aggregation/extras/utils.js');
(function() {
- 'use strict';
-
- var coll = db.agg_sample;
- coll.drop();
-
- // Should return no results on a collection that doesn't exist. Should not crash.
- assert.eq(coll.aggregate([{$sample: {size: 10}}]).toArray(), []);
-
- var nItems = 3;
- for (var i = 0; i < nItems; i++) {
- assert.writeOK(coll.insert({_id: i}));
- }
-
- [0, 1, nItems, nItems + 1].forEach(function(size) {
- var results = coll.aggregate([{$sample: {size: size}}]).toArray();
- assert.eq(results.length, Math.min(size, nItems));
- });
-
- // Multiple $sample stages are allowed.
- var results = coll.aggregate([{$sample: {size: nItems}}, {$sample: {size: 1}}]).toArray();
- assert.eq(results.length, 1);
-
- // Invalid options.
- assertErrorCode(coll, [{$sample: 'string'}], 28745);
- assertErrorCode(coll, [{$sample: {size: 'string'}}], 28746);
- assertErrorCode(coll, [{$sample: {size: -1}}], 28747);
- assertErrorCode(coll, [{$sample: {unknownOpt: true}}], 28748);
- assertErrorCode(coll, [{$sample: {/* no size */}}], 28749);
+'use strict';
+
+var coll = db.agg_sample;
+coll.drop();
+
+// Should return no results on a collection that doesn't exist. Should not crash.
+assert.eq(coll.aggregate([{$sample: {size: 10}}]).toArray(), []);
+
+var nItems = 3;
+for (var i = 0; i < nItems; i++) {
+ assert.writeOK(coll.insert({_id: i}));
+}
+
+[0, 1, nItems, nItems + 1].forEach(function(size) {
+ var results = coll.aggregate([{$sample: {size: size}}]).toArray();
+ assert.eq(results.length, Math.min(size, nItems));
+});
+
+// Multiple $sample stages are allowed.
+var results = coll.aggregate([{$sample: {size: nItems}}, {$sample: {size: 1}}]).toArray();
+assert.eq(results.length, 1);
+
+// Invalid options.
+assertErrorCode(coll, [{$sample: 'string'}], 28745);
+assertErrorCode(coll, [{$sample: {size: 'string'}}], 28746);
+assertErrorCode(coll, [{$sample: {size: -1}}], 28747);
+assertErrorCode(coll, [{$sample: {unknownOpt: true}}], 28748);
+assertErrorCode(coll, [{$sample: {/* no size */}}], 28749);
}());
diff --git a/jstests/aggregation/bugs/server6074.js b/jstests/aggregation/bugs/server6074.js
index 8adf6b7eca8..8e53459ba9e 100644
--- a/jstests/aggregation/bugs/server6074.js
+++ b/jstests/aggregation/bugs/server6074.js
@@ -4,78 +4,78 @@
load('jstests/aggregation/extras/utils.js');
(function() {
- 'use strict';
-
- var coll = db.agg_slice_expr;
- coll.drop();
-
- // Need to have at least one document to ensure the pipeline executes.
- assert.writeOK(coll.insert({}));
-
- function testSlice(sliceArgs, expArray) {
- var pipeline = [{$project: {_id: 0, slice: {$slice: sliceArgs}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{slice: expArray}]);
- }
-
- // Two argument form.
-
- testSlice([[0, 1, 2, 3, 4], 2], [0, 1]);
- testSlice([[0, 1, 2, 3, 4], 2.0], [0, 1]);
- // Negative count
- testSlice([[0, 1, 2, 3, 4], -2], [3, 4]);
- testSlice([[0, 1, 2, 3, 4], -2.0], [3, 4]);
- // Zero count.
- testSlice([[0, 1, 2, 3, 4], 0], []);
- // Out of bounds positive.
- testSlice([[0, 1, 2, 3, 4], 10], [0, 1, 2, 3, 4]);
- // Out of bounds negative.
- testSlice([[0, 1, 2, 3, 4], -10], [0, 1, 2, 3, 4]);
- // Null arguments
- testSlice([null, -10], null);
- testSlice([[0, 1, 2, 3, 4], null], null);
-
- // Three argument form.
-
- testSlice([[0, 1, 2, 3, 4], 1, 2], [1, 2]);
- testSlice([[0, 1, 2, 3, 4], 1.0, 2.0], [1, 2]);
- // Negative start index.
- testSlice([[0, 1, 2, 3, 4], -3, 2], [2, 3]);
- testSlice([[0, 1, 2, 3, 4], -5, 2], [0, 1]);
- // Slice starts out of bounds.
- testSlice([[0, 1, 2, 3, 4], -10, 2], [0, 1]);
- testSlice([[0, 1, 2, 3, 4], 10, 2], []);
- // Slice ends out of bounds.
- testSlice([[0, 1, 2, 3, 4], 4, 3], [4]);
- testSlice([[0, 1, 2, 3, 4], -1, 3], [4]);
- // Null arguments
- testSlice([[0, 1, 2, 3, 4], -1, null], null);
-
- // Error cases.
-
- // Wrong number of arguments.
- assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2, 3]]}}}], 28667);
- assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2, 3], 4, 5, 6]}}}], 28667);
-
- // First argument is not an array.
- assertErrorCode(coll, [{$project: {x: {$slice: ['one', 2]}}}], 28724);
-
- // Second argument is not numeric.
- assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], '2']}}}], 28725);
-
- // Second argument is not integral.
- assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], 1.5]}}}], 28726);
- assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], Math.pow(2, 32)]}}}], 28726);
- assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], -Math.pow(2, 31) - 1]}}}], 28726);
-
- // Third argument is not numeric.
- assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], 0, '2']}}}], 28727);
-
- // Third argument is not integral.
- assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], 0, 1.5]}}}], 28728);
- assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], 0, Math.pow(2, 32)]}}}], 28728);
- assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], 0, -Math.pow(2, 31) - 1]}}}], 28728);
-
- // Third argument is not positive.
- assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], 0, 0]}}}], 28729);
- assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], 0, -1]}}}], 28729);
+'use strict';
+
+var coll = db.agg_slice_expr;
+coll.drop();
+
+// Need to have at least one document to ensure the pipeline executes.
+assert.writeOK(coll.insert({}));
+
+function testSlice(sliceArgs, expArray) {
+ var pipeline = [{$project: {_id: 0, slice: {$slice: sliceArgs}}}];
+ assert.eq(coll.aggregate(pipeline).toArray(), [{slice: expArray}]);
+}
+
+// Two argument form.
+
+testSlice([[0, 1, 2, 3, 4], 2], [0, 1]);
+testSlice([[0, 1, 2, 3, 4], 2.0], [0, 1]);
+// Negative count
+testSlice([[0, 1, 2, 3, 4], -2], [3, 4]);
+testSlice([[0, 1, 2, 3, 4], -2.0], [3, 4]);
+// Zero count.
+testSlice([[0, 1, 2, 3, 4], 0], []);
+// Out of bounds positive.
+testSlice([[0, 1, 2, 3, 4], 10], [0, 1, 2, 3, 4]);
+// Out of bounds negative.
+testSlice([[0, 1, 2, 3, 4], -10], [0, 1, 2, 3, 4]);
+// Null arguments
+testSlice([null, -10], null);
+testSlice([[0, 1, 2, 3, 4], null], null);
+
+// Three argument form.
+
+testSlice([[0, 1, 2, 3, 4], 1, 2], [1, 2]);
+testSlice([[0, 1, 2, 3, 4], 1.0, 2.0], [1, 2]);
+// Negative start index.
+testSlice([[0, 1, 2, 3, 4], -3, 2], [2, 3]);
+testSlice([[0, 1, 2, 3, 4], -5, 2], [0, 1]);
+// Slice starts out of bounds.
+testSlice([[0, 1, 2, 3, 4], -10, 2], [0, 1]);
+testSlice([[0, 1, 2, 3, 4], 10, 2], []);
+// Slice ends out of bounds.
+testSlice([[0, 1, 2, 3, 4], 4, 3], [4]);
+testSlice([[0, 1, 2, 3, 4], -1, 3], [4]);
+// Null arguments
+testSlice([[0, 1, 2, 3, 4], -1, null], null);
+
+// Error cases.
+
+// Wrong number of arguments.
+assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2, 3]]}}}], 28667);
+assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2, 3], 4, 5, 6]}}}], 28667);
+
+// First argument is not an array.
+assertErrorCode(coll, [{$project: {x: {$slice: ['one', 2]}}}], 28724);
+
+// Second argument is not numeric.
+assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], '2']}}}], 28725);
+
+// Second argument is not integral.
+assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], 1.5]}}}], 28726);
+assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], Math.pow(2, 32)]}}}], 28726);
+assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], -Math.pow(2, 31) - 1]}}}], 28726);
+
+// Third argument is not numeric.
+assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], 0, '2']}}}], 28727);
+
+// Third argument is not integral.
+assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], 0, 1.5]}}}], 28728);
+assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], 0, Math.pow(2, 32)]}}}], 28728);
+assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], 0, -Math.pow(2, 31) - 1]}}}], 28728);
+
+// Third argument is not positive.
+assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], 0, 0]}}}], 28729);
+assertErrorCode(coll, [{$project: {x: {$slice: [[1, 2], 0, -1]}}}], 28729);
}());
diff --git a/jstests/aggregation/bugs/server6125.js b/jstests/aggregation/bugs/server6125.js
index 592a560312a..bd4ab4ce220 100644
--- a/jstests/aggregation/bugs/server6125.js
+++ b/jstests/aggregation/bugs/server6125.js
@@ -65,9 +65,9 @@ function setupArray() {
{_id: 13, a: new Timestamp(1 / 1000, 1), ty: "Timestamp"},
{_id: 14, a: /regex/, ty: "RegExp"},
{
- _id: 15,
- a: new DBPointer("test.s6125", new ObjectId("0102030405060708090A0B0C")),
- ty: "DBPointer"
+ _id: 15,
+ a: new DBPointer("test.s6125", new ObjectId("0102030405060708090A0B0C")),
+ ty: "DBPointer"
},
{_id: 16, a: function() {}, ty: "Code"},
// Code with Scope not implemented in JS
diff --git a/jstests/aggregation/bugs/server6127.js b/jstests/aggregation/bugs/server6127.js
index 26585c87d21..1f11d858c83 100644
--- a/jstests/aggregation/bugs/server6127.js
+++ b/jstests/aggregation/bugs/server6127.js
@@ -7,21 +7,21 @@
* is no path). Previous it would uassert causing the aggregation to end.
*/
(function() {
- "use strict";
- db.s6127.drop();
+"use strict";
+db.s6127.drop();
- assert.writeOK(db.s6127.insert({_id: 0, a: 1}));
- assert.writeOK(db.s6127.insert({_id: 1, foo: 2}));
- assert.writeOK(db.s6127.insert({_id: 2, foo: {bar: 3}}));
+assert.writeOK(db.s6127.insert({_id: 0, a: 1}));
+assert.writeOK(db.s6127.insert({_id: 1, foo: 2}));
+assert.writeOK(db.s6127.insert({_id: 2, foo: {bar: 3}}));
- // Aggregate checking the field foo and the path foo.bar.
- const cursor = db.s6127.aggregate(
- [{$sort: {_id: 1}}, {$project: {_id: 0, "foo.bar": 1, field: "$foo", path: "$foo.bar"}}]);
+// Aggregate checking the field foo and the path foo.bar.
+const cursor = db.s6127.aggregate(
+ [{$sort: {_id: 1}}, {$project: {_id: 0, "foo.bar": 1, field: "$foo", path: "$foo.bar"}}]);
- // The first document should contain nothing as neither field exists, the second document should
- // contain only field as it has a value in foo, but foo does not have a field bar so it cannot
- // walk that path, the third document should have both the field and path as foo is an object
- // which has a field bar.
- const expected = [{}, {field: 2}, {foo: {bar: 3}, field: {bar: 3}, path: 3}];
- assert.eq(cursor.toArray(), expected);
+// The first document should contain nothing as neither field exists, the second document should
+// contain only field as it has a value in foo, but foo does not have a field bar so it cannot
+// walk that path, the third document should have both the field and path as foo is an object
+// which has a field bar.
+const expected = [{}, {field: 2}, {foo: {bar: 3}, field: {bar: 3}, path: 3}];
+assert.eq(cursor.toArray(), expected);
}());
diff --git a/jstests/aggregation/bugs/server6147.js b/jstests/aggregation/bugs/server6147.js
index 0969b366636..c74e1848512 100644
--- a/jstests/aggregation/bugs/server6147.js
+++ b/jstests/aggregation/bugs/server6147.js
@@ -6,44 +6,44 @@
* constant and a field regardless of whether they were equal or not.
*/
(function() {
- "use strict";
- db.s6147.drop();
+"use strict";
+db.s6147.drop();
- assert.writeOK(db.s6147.insert({a: 1}));
- assert.writeOK(db.s6147.insert({a: 2}));
+assert.writeOK(db.s6147.insert({a: 1}));
+assert.writeOK(db.s6147.insert({a: 2}));
- // Aggregate checking various combinations of the constant and the field.
- const cursor = db.s6147.aggregate([
- {$sort: {a: 1}},
- {
- $project: {
- _id: 0,
- constantAndField: {$ne: [1, "$a"]},
- fieldAndConstant: {$ne: ["$a", 1]},
- constantAndConstant: {$ne: [1, 1]},
- fieldAndField: {$ne: ["$a", "$a"]}
- }
+// Aggregate checking various combinations of the constant and the field.
+const cursor = db.s6147.aggregate([
+ {$sort: {a: 1}},
+ {
+ $project: {
+ _id: 0,
+ constantAndField: {$ne: [1, "$a"]},
+ fieldAndConstant: {$ne: ["$a", 1]},
+ constantAndConstant: {$ne: [1, 1]},
+ fieldAndField: {$ne: ["$a", "$a"]}
}
- ]);
+ }
+]);
- // In both documents, the constantAndConstant and fieldAndField should be false since they
- // compare something with itself. However, the constantAndField and fieldAndConstant should be
- // different as document one contains 1 which should return false and document 2 contains
- // something different so should return true.
- const expected = [
- {
- constantAndField: false,
- fieldAndConstant: false,
- constantAndConstant: false,
- fieldAndField: false
- },
- {
- constantAndField: true,
- fieldAndConstant: true,
- constantAndConstant: false,
- fieldAndField: false
- }
- ];
+// In both documents, the constantAndConstant and fieldAndField should be false since they
+// compare something with itself. However, the constantAndField and fieldAndConstant should be
+// different as document one contains 1 which should return false and document 2 contains
+// something different so should return true.
+const expected = [
+ {
+ constantAndField: false,
+ fieldAndConstant: false,
+ constantAndConstant: false,
+ fieldAndField: false
+ },
+ {
+ constantAndField: true,
+ fieldAndConstant: true,
+ constantAndConstant: false,
+ fieldAndField: false
+ }
+];
- assert.eq(cursor.toArray(), expected);
+assert.eq(cursor.toArray(), expected);
}());
diff --git a/jstests/aggregation/bugs/server6179.js b/jstests/aggregation/bugs/server6179.js
index a5e934a9e89..065f5b261ee 100644
--- a/jstests/aggregation/bugs/server6179.js
+++ b/jstests/aggregation/bugs/server6179.js
@@ -4,53 +4,53 @@
// requires_spawning_own_processes,
// ]
(function() {
- 'use strict';
-
- var s = new ShardingTest({shards: 2});
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.data", key: {_id: 1}}));
-
- var d = s.getDB("test");
-
- // Insert _id values 0 - 99
- var N = 100;
-
- var bulkOp = d.data.initializeOrderedBulkOp();
- for (var i = 0; i < N; ++i) {
- bulkOp.insert({_id: i, i: i % 10});
- }
- bulkOp.execute();
-
- // Split the data into 3 chunks
- assert.commandWorked(s.s0.adminCommand({split: "test.data", middle: {_id: 33}}));
- assert.commandWorked(s.s0.adminCommand({split: "test.data", middle: {_id: 66}}));
-
- // Migrate the middle chunk to another shard
- assert.commandWorked(s.s0.adminCommand(
- {movechunk: "test.data", find: {_id: 50}, to: s.getOther(s.getPrimaryShard("test")).name}));
-
- // Check that we get results rather than an error
- var result = d.data
- .aggregate({$group: {_id: '$_id', i: {$first: '$i'}}},
- {$group: {_id: '$i', avg_id: {$avg: '$_id'}}},
- {$sort: {_id: 1}})
- .toArray();
- var expected = [
- {"_id": 0, "avg_id": 45},
- {"_id": 1, "avg_id": 46},
- {"_id": 2, "avg_id": 47},
- {"_id": 3, "avg_id": 48},
- {"_id": 4, "avg_id": 49},
- {"_id": 5, "avg_id": 50},
- {"_id": 6, "avg_id": 51},
- {"_id": 7, "avg_id": 52},
- {"_id": 8, "avg_id": 53},
- {"_id": 9, "avg_id": 54}
- ];
-
- assert.eq(result, expected);
-
- s.stop();
+'use strict';
+
+var s = new ShardingTest({shards: 2});
+
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.data", key: {_id: 1}}));
+
+var d = s.getDB("test");
+
+// Insert _id values 0 - 99
+var N = 100;
+
+var bulkOp = d.data.initializeOrderedBulkOp();
+for (var i = 0; i < N; ++i) {
+ bulkOp.insert({_id: i, i: i % 10});
+}
+bulkOp.execute();
+
+// Split the data into 3 chunks
+assert.commandWorked(s.s0.adminCommand({split: "test.data", middle: {_id: 33}}));
+assert.commandWorked(s.s0.adminCommand({split: "test.data", middle: {_id: 66}}));
+
+// Migrate the middle chunk to another shard
+assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.data", find: {_id: 50}, to: s.getOther(s.getPrimaryShard("test")).name}));
+
+// Check that we get results rather than an error
+var result = d.data
+ .aggregate({$group: {_id: '$_id', i: {$first: '$i'}}},
+ {$group: {_id: '$i', avg_id: {$avg: '$_id'}}},
+ {$sort: {_id: 1}})
+ .toArray();
+var expected = [
+ {"_id": 0, "avg_id": 45},
+ {"_id": 1, "avg_id": 46},
+ {"_id": 2, "avg_id": 47},
+ {"_id": 3, "avg_id": 48},
+ {"_id": 4, "avg_id": 49},
+ {"_id": 5, "avg_id": 50},
+ {"_id": 6, "avg_id": 51},
+ {"_id": 7, "avg_id": 52},
+ {"_id": 8, "avg_id": 53},
+ {"_id": 9, "avg_id": 54}
+];
+
+assert.eq(result, expected);
+
+s.stop();
})();
diff --git a/jstests/aggregation/bugs/server6185.js b/jstests/aggregation/bugs/server6185.js
index cf084d4b371..06eacdf791d 100644
--- a/jstests/aggregation/bugs/server6185.js
+++ b/jstests/aggregation/bugs/server6185.js
@@ -2,16 +2,16 @@
* Tests that projecting a non-existent subfield behaves identically in both query and aggregation.
*/
(function() {
- "use strict";
- const coll = db.c;
- coll.drop();
+"use strict";
+const coll = db.c;
+coll.drop();
- assert.writeOK(coll.insert({a: [1]}));
- assert.writeOK(coll.insert({a: {c: 1}}));
- assert.writeOK(coll.insert({a: [{c: 1}, {b: 1, c: 1}, {c: 1}]}));
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({b: 1}));
+assert.writeOK(coll.insert({a: [1]}));
+assert.writeOK(coll.insert({a: {c: 1}}));
+assert.writeOK(coll.insert({a: [{c: 1}, {b: 1, c: 1}, {c: 1}]}));
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({b: 1}));
- assert.eq(coll.aggregate([{$project: {'a.b': 1}}, {$sort: {_id: 1}}]).toArray(),
- coll.find({}, {'a.b': 1}).sort({_id: 1}).toArray());
+assert.eq(coll.aggregate([{$project: {'a.b': 1}}, {$sort: {_id: 1}}]).toArray(),
+ coll.find({}, {'a.b': 1}).sort({_id: 1}).toArray());
}());
diff --git a/jstests/aggregation/bugs/server6530.js b/jstests/aggregation/bugs/server6530.js
index 36a5d3deb3f..77dfcd703fb 100644
--- a/jstests/aggregation/bugs/server6530.js
+++ b/jstests/aggregation/bugs/server6530.js
@@ -2,31 +2,37 @@
* Test that $near queries are disallowed in $match stages.
*/
(function() {
- "use strict";
- load("jstests/aggregation/extras/utils.js");
+"use strict";
+load("jstests/aggregation/extras/utils.js");
- const coll = db.getCollection("no_near_in_match");
- coll.drop();
+const coll = db.getCollection("no_near_in_match");
+coll.drop();
- // Create indexes that could satisfy various $near queries.
- assert.commandWorked(coll.createIndex({point2d: "2d"}));
- assert.commandWorked(coll.createIndex({point2dsphere: "2dsphere"}));
+// Create indexes that could satisfy various $near queries.
+assert.commandWorked(coll.createIndex({point2d: "2d"}));
+assert.commandWorked(coll.createIndex({point2dsphere: "2dsphere"}));
- // Populate the collection so that successful queries can return at least one result.
- assert.writeOK(coll.insert({point2d: [0.25, 0.35]}));
- assert.writeOK(coll.insert({point2dsphere: [0.25, 0.35]}));
+// Populate the collection so that successful queries can return at least one result.
+assert.writeOK(coll.insert({point2d: [0.25, 0.35]}));
+assert.writeOK(coll.insert({point2dsphere: [0.25, 0.35]}));
- const nearQuery = {point2d: {$near: [0, 0]}};
- const nearSphereQuery = {point2dsphere: {$nearSphere: [0, 0]}};
- const geoNearQuery = {point2d: {$geoNear: [0, 0]}};
+const nearQuery = {
+ point2d: {$near: [0, 0]}
+};
+const nearSphereQuery = {
+ point2dsphere: {$nearSphere: [0, 0]}
+};
+const geoNearQuery = {
+ point2d: {$geoNear: [0, 0]}
+};
- // Test that normal finds return a result.
- assert.eq(1, coll.find(nearQuery).count());
- assert.eq(1, coll.find(nearSphereQuery).count());
- assert.eq(1, coll.find(geoNearQuery).count());
+// Test that normal finds return a result.
+assert.eq(1, coll.find(nearQuery).count());
+assert.eq(1, coll.find(nearSphereQuery).count());
+assert.eq(1, coll.find(geoNearQuery).count());
- // Test that we refuse to run $match with a near query.
- assertErrorCode(coll, {$match: nearQuery}, ErrorCodes.BadValue);
- assertErrorCode(coll, {$match: nearSphereQuery}, ErrorCodes.BadValue);
- assertErrorCode(coll, {$match: geoNearQuery}, ErrorCodes.BadValue);
+// Test that we refuse to run $match with a near query.
+assertErrorCode(coll, {$match: nearQuery}, ErrorCodes.BadValue);
+assertErrorCode(coll, {$match: nearSphereQuery}, ErrorCodes.BadValue);
+assertErrorCode(coll, {$match: geoNearQuery}, ErrorCodes.BadValue);
}());
diff --git a/jstests/aggregation/bugs/server6779.js b/jstests/aggregation/bugs/server6779.js
index 44f641ea15d..d9d48898068 100644
--- a/jstests/aggregation/bugs/server6779.js
+++ b/jstests/aggregation/bugs/server6779.js
@@ -1,20 +1,20 @@
// server 6779: serializing ExpressionCoerceToBool
// This test only fails in debug mode with the bug since that tests round-tripping
(function() {
- "use strict";
+"use strict";
- function test(op, val) {
- const coll = db.server6779;
- coll.drop();
- assert.writeOK(coll.insert({a: true}));
- assert.writeOK(coll.insert({a: false}));
+function test(op, val) {
+ const coll = db.server6779;
+ coll.drop();
+ assert.writeOK(coll.insert({a: true}));
+ assert.writeOK(coll.insert({a: false}));
- const obj = {};
- obj[op] = ['$a', val];
- const result = coll.aggregate([{$project: {_id: 0, bool: obj}}, {$sort: {bool: -1}}]);
+ const obj = {};
+ obj[op] = ['$a', val];
+ const result = coll.aggregate([{$project: {_id: 0, bool: obj}}, {$sort: {bool: -1}}]);
- assert.eq(result.toArray(), [{bool: true}, {bool: false}]);
- }
- test('$and', true);
- test('$or', false);
+ assert.eq(result.toArray(), [{bool: true}, {bool: false}]);
+}
+test('$and', true);
+test('$or', false);
}());
diff --git a/jstests/aggregation/bugs/server7695_isodates.js b/jstests/aggregation/bugs/server7695_isodates.js
index 4d969bf80f8..ca90c47f0fe 100644
--- a/jstests/aggregation/bugs/server7695_isodates.js
+++ b/jstests/aggregation/bugs/server7695_isodates.js
@@ -1,254 +1,251 @@
// SERVER-7695: Add $isoWeek, $isoWeekYear, and $isoDayOfWeek aggregation expressions.
(function() {
- "use strict";
- const coll = db.server7695;
- let testOpCount = 0;
-
- load('jstests/libs/dateutil.js');
-
- coll.drop();
-
- // Seed collection so that the pipeline will execute.
- assert.writeOK(coll.insert({}));
-
- /**
- * Helper for testing that 'op' returns 'expResult'.
- */
- function testOp(op, value, expResult) {
- testOpCount++;
- let pipeline = [{$project: {_id: 0, result: {}}}];
- pipeline[0].$project.result[op] = value;
- let msg = "Exptected {" + op + ": " + value + "} to equal: " + expResult;
- let res = coll.runCommand('aggregate', {pipeline: pipeline, cursor: {}});
-
- // in the case of $dateToString the date is on property date
- let date = value.date || value;
- if (date.valueOf() < 0 && _isWindows() && res.code === 16422) {
- // some versions of windows (but not all) fail with dates before 1970
- print("skipping test of " + date.tojson() +
- " because system doesn't support old dates");
- return;
- }
-
- if (date.valueOf() / 1000 < -2 * 1024 * 1024 * 1024 && res.code == 16421) {
- // we correctly detected that we are outside of the range of a 32-bit time_t
- print("skipping test of " + date.tojson() + " because it is outside of time_t range");
- return;
- }
+"use strict";
+const coll = db.server7695;
+let testOpCount = 0;
+
+load('jstests/libs/dateutil.js');
+
+coll.drop();
+
+// Seed collection so that the pipeline will execute.
+assert.writeOK(coll.insert({}));
+
+/**
+ * Helper for testing that 'op' returns 'expResult'.
+ */
+function testOp(op, value, expResult) {
+ testOpCount++;
+ let pipeline = [{$project: {_id: 0, result: {}}}];
+ pipeline[0].$project.result[op] = value;
+ let msg = "Exptected {" + op + ": " + value + "} to equal: " + expResult;
+ let res = coll.runCommand('aggregate', {pipeline: pipeline, cursor: {}});
+
+ // in the case of $dateToString the date is on property date
+ let date = value.date || value;
+ if (date.valueOf() < 0 && _isWindows() && res.code === 16422) {
+ // some versions of windows (but not all) fail with dates before 1970
+ print("skipping test of " + date.tojson() + " because system doesn't support old dates");
+ return;
+ }
- assert.eq(res.cursor.firstBatch[0].result, expResult, tojson(pipeline));
+ if (date.valueOf() / 1000 < -2 * 1024 * 1024 * 1024 && res.code == 16421) {
+ // we correctly detected that we are outside of the range of a 32-bit time_t
+ print("skipping test of " + date.tojson() + " because it is outside of time_t range");
+ return;
}
- // While development, there was a bug which caused an error with $dateToString if the order of
- // %V and %G changed, so I added this test to prevent regression.
- testOp('$dateToString', {date: new Date("1900-12-31T23:59:59Z"), format: "%V-%G"}, "01-1901");
- // This was failing, but it shouldn't as it is the same as above, only rotated.
- testOp('$dateToString', {date: new Date("1900-12-31T23:59:59Z"), format: "%G-%V"}, "1901-01");
-
- // 1900 is special because it's devisible by 4 and by 100 but not 400 so it's not a leap year.
- // 2000 is special, because it's devisible by 4, 100, 400 and so it is a leap year.
- const years = {
- common: [
- 1900, // Starting and ending on Monday (special).
- 2002, // Starting and ending on Tuesday.
- 2014, // Starting and ending on Wednesday.
- 2015, // Starting and ending on Thursday.
- 2010, // Starting and ending on Friday.
- 2011, // Starting and ending on Saturday.
- 2006, // Starting and ending on Sunday.
- ],
- leap: [
- 1996, // Starting on Monday, ending on Tuesday.
- 2008, // Starting on Tuesday, ending on Wednesday.
- 1992, // Starting on Wednesday, ending on Thursday.
- 2004, // Starting on Thursday, ending on Friday.
- 2016, // Starting on Friday, ending on Saturday.
- 2000, // Starting on Saturday, ending on Sunday (special).
- 2012, // Starting on Sunday, ending on Monday.
- ],
- commonAfterLeap: [
- 2001, // Starting and ending on Monday.
- 2013, // Starting and ending on Tuesday.
- 1997, // Starting and ending on Wednesday.
- 2009, // Starting and ending on Thursday.
- 1993, // Starting and ending on Friday.
- 2005, // Starting and ending on Saturday.
- 2017, // Starting and ending on Sunday.
- ],
- };
-
- const MONDAY = 1;
- const TUESDAY = 2;
- const WEDNESDAY = 3;
- const THURSDAY = 4;
- const FRIDAY = 5;
- const SATURDAY = 6;
- const SUNDAY = 7;
-
- ['common', 'leap', 'commonAfterLeap'].forEach(function(type) {
- years[type].forEach(function(year, day) {
- // forEach starts indexing at zero but weekdays start with Monday on 1 so we add +1.
- day = day + 1;
- let newYear = DateUtil.getNewYear(year);
- let endOfFirstWeekInYear = DateUtil.getEndOfFirstWeekInYear(year, day);
- let startOfSecondWeekInYear = DateUtil.getStartOfSecondWeekInYear(year, day);
- let birthday = DateUtil.getBirthday(year);
- let endOfSecondToLastWeekInYear =
- DateUtil.getEndOfSecondToLastWeekInYear(year, day, type);
- let startOfLastWeekInYear = DateUtil.getStartOfLastWeekInYear(year, day, type);
- let newYearsEve = DateUtil.getNewYearsEve(year);
-
- testOp('$isoDayOfWeek', newYear, day);
- testOp('$isoDayOfWeek', endOfFirstWeekInYear, SUNDAY);
- testOp('$isoDayOfWeek', startOfSecondWeekInYear, MONDAY);
- testOp('$isoDayOfWeek', endOfSecondToLastWeekInYear, SUNDAY);
- testOp('$isoDayOfWeek', startOfLastWeekInYear, MONDAY);
- if (type === 'leap') {
- testOp('$isoDayOfWeek', newYearsEve, DateUtil.shiftWeekday(day, 1));
- } else {
- testOp('$isoDayOfWeek', newYearsEve, day);
- }
+ assert.eq(res.cursor.firstBatch[0].result, expResult, tojson(pipeline));
+}
+
+// While development, there was a bug which caused an error with $dateToString if the order of
+// %V and %G changed, so I added this test to prevent regression.
+testOp('$dateToString', {date: new Date("1900-12-31T23:59:59Z"), format: "%V-%G"}, "01-1901");
+// This was failing, but it shouldn't as it is the same as above, only rotated.
+testOp('$dateToString', {date: new Date("1900-12-31T23:59:59Z"), format: "%G-%V"}, "1901-01");
+
+// 1900 is special because it's devisible by 4 and by 100 but not 400 so it's not a leap year.
+// 2000 is special, because it's devisible by 4, 100, 400 and so it is a leap year.
+const years = {
+ common: [
+ 1900, // Starting and ending on Monday (special).
+ 2002, // Starting and ending on Tuesday.
+ 2014, // Starting and ending on Wednesday.
+ 2015, // Starting and ending on Thursday.
+ 2010, // Starting and ending on Friday.
+ 2011, // Starting and ending on Saturday.
+ 2006, // Starting and ending on Sunday.
+ ],
+ leap: [
+ 1996, // Starting on Monday, ending on Tuesday.
+ 2008, // Starting on Tuesday, ending on Wednesday.
+ 1992, // Starting on Wednesday, ending on Thursday.
+ 2004, // Starting on Thursday, ending on Friday.
+ 2016, // Starting on Friday, ending on Saturday.
+ 2000, // Starting on Saturday, ending on Sunday (special).
+ 2012, // Starting on Sunday, ending on Monday.
+ ],
+ commonAfterLeap: [
+ 2001, // Starting and ending on Monday.
+ 2013, // Starting and ending on Tuesday.
+ 1997, // Starting and ending on Wednesday.
+ 2009, // Starting and ending on Thursday.
+ 1993, // Starting and ending on Friday.
+ 2005, // Starting and ending on Saturday.
+ 2017, // Starting and ending on Sunday.
+ ],
+};
+
+const MONDAY = 1;
+const TUESDAY = 2;
+const WEDNESDAY = 3;
+const THURSDAY = 4;
+const FRIDAY = 5;
+const SATURDAY = 6;
+const SUNDAY = 7;
+
+['common', 'leap', 'commonAfterLeap'].forEach(function(type) {
+ years[type].forEach(function(year, day) {
+ // forEach starts indexing at zero but weekdays start with Monday on 1 so we add +1.
+ day = day + 1;
+ let newYear = DateUtil.getNewYear(year);
+ let endOfFirstWeekInYear = DateUtil.getEndOfFirstWeekInYear(year, day);
+ let startOfSecondWeekInYear = DateUtil.getStartOfSecondWeekInYear(year, day);
+ let birthday = DateUtil.getBirthday(year);
+ let endOfSecondToLastWeekInYear = DateUtil.getEndOfSecondToLastWeekInYear(year, day, type);
+ let startOfLastWeekInYear = DateUtil.getStartOfLastWeekInYear(year, day, type);
+ let newYearsEve = DateUtil.getNewYearsEve(year);
+
+ testOp('$isoDayOfWeek', newYear, day);
+ testOp('$isoDayOfWeek', endOfFirstWeekInYear, SUNDAY);
+ testOp('$isoDayOfWeek', startOfSecondWeekInYear, MONDAY);
+ testOp('$isoDayOfWeek', endOfSecondToLastWeekInYear, SUNDAY);
+ testOp('$isoDayOfWeek', startOfLastWeekInYear, MONDAY);
+ if (type === 'leap') {
+ testOp('$isoDayOfWeek', newYearsEve, DateUtil.shiftWeekday(day, 1));
+ } else {
+ testOp('$isoDayOfWeek', newYearsEve, day);
+ }
- if (type === 'leap') {
- testOp('$isoDayOfWeek', birthday, DateUtil.shiftWeekday(day, 4));
- } else {
- testOp('$isoDayOfWeek', birthday, DateUtil.shiftWeekday(day, 3));
- }
+ if (type === 'leap') {
+ testOp('$isoDayOfWeek', birthday, DateUtil.shiftWeekday(day, 4));
+ } else {
+ testOp('$isoDayOfWeek', birthday, DateUtil.shiftWeekday(day, 3));
+ }
- testOp('$isoWeekYear', birthday, year);
- // In leap years staring on Thursday, the birthday is in week 28, every year else it is
- // in week 27.
- if (type === 'leap' && day === THURSDAY) {
- testOp('$isoWeek', birthday, 28);
- } else {
- testOp('$isoWeek', birthday, 27);
- }
+ testOp('$isoWeekYear', birthday, year);
+ // In leap years staring on Thursday, the birthday is in week 28, every year else it is
+ // in week 27.
+ if (type === 'leap' && day === THURSDAY) {
+ testOp('$isoWeek', birthday, 28);
+ } else {
+ testOp('$isoWeek', birthday, 27);
+ }
- if (day <= THURSDAY) {
- // A year starting between Monday and Thursday will always start in week 1.
- testOp('$isoWeek', newYear, 1);
- testOp('$isoWeekYear', newYear, year);
- testOp('$isoWeek', endOfFirstWeekInYear, 1);
- testOp('$isoWeekYear', endOfFirstWeekInYear, year);
- testOp('$isoWeek', startOfSecondWeekInYear, 2);
- testOp('$isoWeekYear', startOfSecondWeekInYear, year);
+ if (day <= THURSDAY) {
+ // A year starting between Monday and Thursday will always start in week 1.
+ testOp('$isoWeek', newYear, 1);
+ testOp('$isoWeekYear', newYear, year);
+ testOp('$isoWeek', endOfFirstWeekInYear, 1);
+ testOp('$isoWeekYear', endOfFirstWeekInYear, year);
+ testOp('$isoWeek', startOfSecondWeekInYear, 2);
+ testOp('$isoWeekYear', startOfSecondWeekInYear, year);
+ testOp(
+ '$dateToString', {format: '%G-W%V-%u', date: newYear}, "" + year + "-W01-" + day);
+ } else if (day == FRIDAY || (day == SATURDAY && type === 'commonAfterLeap')) {
+ // A year starting on Friday will always start with week 53 of the previous year.
+ // A common year starting on a Saturday and after a leap year will also start with
+ // week 53 of the previous year.
+ testOp('$isoWeek', newYear, 53);
+ testOp('$isoWeekYear', newYear, year - 1);
+ testOp('$isoWeek', endOfFirstWeekInYear, 53);
+ testOp('$isoWeekYear', endOfFirstWeekInYear, year - 1);
+ testOp('$isoWeek', startOfSecondWeekInYear, 1);
+ testOp('$isoWeekYear', startOfSecondWeekInYear, year);
+ testOp('$dateToString',
+ {format: '%G-W%V-%u', date: newYear},
+ "" + (year - 1) + "-W53-" + day);
+ } else {
+ // A year starting on Saturday (except after a leap year) or Sunday will always
+ // start with week 52 of the previous year.
+ testOp('$isoWeek', newYear, 52);
+ testOp('$isoWeekYear', newYear, year - 1);
+ testOp('$isoWeek', endOfFirstWeekInYear, 52);
+ testOp('$isoWeekYear', endOfFirstWeekInYear, year - 1);
+ testOp('$isoWeek', startOfSecondWeekInYear, 1);
+ testOp('$isoWeekYear', startOfSecondWeekInYear, year);
+ testOp('$dateToString',
+ {format: '%G-W%V-%u', date: newYear},
+ "" + (year - 1) + "-W52-" + day);
+ }
+
+ if (type === 'leap') {
+ if (day <= TUESDAY) {
+ // A leap year starting between Monday and Tuesday will always end in week 1 of
+ // the next year.
+ testOp('$isoWeek', newYearsEve, 1);
+ testOp('$isoWeekYear', newYearsEve, year + 1);
+ testOp('$isoWeek', endOfSecondToLastWeekInYear, 52);
+ testOp('$isoWeekYear', endOfSecondToLastWeekInYear, year);
+ testOp('$isoWeek', startOfLastWeekInYear, 1);
+ testOp('$isoWeekYear', startOfLastWeekInYear, year + 1);
+ testOp('$dateToString',
+ {format: '%G-W%V-%u', date: newYearsEve},
+ "" + (year + 1) + "-W01-" + DateUtil.shiftWeekday(day, 1));
+ } else if (day <= THURSDAY) {
+ // A leap year starting on Wednesday or Thursday will always end with week 53.
+ testOp('$isoWeek', newYearsEve, 53);
+ testOp('$isoWeekYear', newYearsEve, year);
+ testOp('$isoWeek', endOfSecondToLastWeekInYear, 52);
+ testOp('$isoWeekYear', endOfSecondToLastWeekInYear, year);
+ testOp('$isoWeek', startOfLastWeekInYear, 53);
+ testOp('$isoWeekYear', startOfLastWeekInYear, year);
testOp('$dateToString',
- {format: '%G-W%V-%u', date: newYear},
- "" + year + "-W01-" + day);
- } else if (day == FRIDAY || (day == SATURDAY && type === 'commonAfterLeap')) {
- // A year starting on Friday will always start with week 53 of the previous year.
- // A common year starting on a Saturday and after a leap year will also start with
- // week 53 of the previous year.
- testOp('$isoWeek', newYear, 53);
- testOp('$isoWeekYear', newYear, year - 1);
- testOp('$isoWeek', endOfFirstWeekInYear, 53);
- testOp('$isoWeekYear', endOfFirstWeekInYear, year - 1);
- testOp('$isoWeek', startOfSecondWeekInYear, 1);
- testOp('$isoWeekYear', startOfSecondWeekInYear, year);
+ {format: '%G-W%V-%u', date: newYearsEve},
+ "" + (year) + "-W53-" + DateUtil.shiftWeekday(day, 1));
+ } else if (day <= SATURDAY) {
+ // A leap year starting on Friday or Sarturday will always and with week 52
+ testOp('$isoWeek', newYearsEve, 52);
+ testOp('$isoWeekYear', newYearsEve, year);
+ testOp('$isoWeek', endOfSecondToLastWeekInYear, 51);
+ testOp('$isoWeekYear', endOfSecondToLastWeekInYear, year);
+ testOp('$isoWeek', startOfLastWeekInYear, 52);
+ testOp('$isoWeekYear', startOfLastWeekInYear, year);
testOp('$dateToString',
- {format: '%G-W%V-%u', date: newYear},
- "" + (year - 1) + "-W53-" + day);
+ {format: '%G-W%V-%u', date: newYearsEve},
+ "" + (year) + "-W52-" + DateUtil.shiftWeekday(day, 1));
} else {
- // A year starting on Saturday (except after a leap year) or Sunday will always
- // start with week 52 of the previous year.
- testOp('$isoWeek', newYear, 52);
- testOp('$isoWeekYear', newYear, year - 1);
- testOp('$isoWeek', endOfFirstWeekInYear, 52);
- testOp('$isoWeekYear', endOfFirstWeekInYear, year - 1);
- testOp('$isoWeek', startOfSecondWeekInYear, 1);
- testOp('$isoWeekYear', startOfSecondWeekInYear, year);
+ // A leap year starting on Sunday will always end with week 1
+ testOp('$isoWeek', newYearsEve, 1);
+ testOp('$isoWeekYear', newYearsEve, year + 1);
+ testOp('$isoWeek', endOfSecondToLastWeekInYear, 51);
+ testOp('$isoWeekYear', endOfSecondToLastWeekInYear, year);
+ testOp('$isoWeek', startOfLastWeekInYear, 52);
+ testOp('$isoWeekYear', startOfLastWeekInYear, year);
testOp('$dateToString',
- {format: '%G-W%V-%u', date: newYear},
- "" + (year - 1) + "-W52-" + day);
+ {format: '%G-W%V-%u', date: newYearsEve},
+ "" + (year + 1) + "-W01-" + DateUtil.shiftWeekday(day, 1));
}
-
- if (type === 'leap') {
- if (day <= TUESDAY) {
- // A leap year starting between Monday and Tuesday will always end in week 1 of
- // the next year.
- testOp('$isoWeek', newYearsEve, 1);
- testOp('$isoWeekYear', newYearsEve, year + 1);
- testOp('$isoWeek', endOfSecondToLastWeekInYear, 52);
- testOp('$isoWeekYear', endOfSecondToLastWeekInYear, year);
- testOp('$isoWeek', startOfLastWeekInYear, 1);
- testOp('$isoWeekYear', startOfLastWeekInYear, year + 1);
- testOp('$dateToString',
- {format: '%G-W%V-%u', date: newYearsEve},
- "" + (year + 1) + "-W01-" + DateUtil.shiftWeekday(day, 1));
- } else if (day <= THURSDAY) {
- // A leap year starting on Wednesday or Thursday will always end with week 53.
- testOp('$isoWeek', newYearsEve, 53);
- testOp('$isoWeekYear', newYearsEve, year);
- testOp('$isoWeek', endOfSecondToLastWeekInYear, 52);
- testOp('$isoWeekYear', endOfSecondToLastWeekInYear, year);
- testOp('$isoWeek', startOfLastWeekInYear, 53);
- testOp('$isoWeekYear', startOfLastWeekInYear, year);
- testOp('$dateToString',
- {format: '%G-W%V-%u', date: newYearsEve},
- "" + (year) + "-W53-" + DateUtil.shiftWeekday(day, 1));
- } else if (day <= SATURDAY) {
- // A leap year starting on Friday or Sarturday will always and with week 52
- testOp('$isoWeek', newYearsEve, 52);
- testOp('$isoWeekYear', newYearsEve, year);
- testOp('$isoWeek', endOfSecondToLastWeekInYear, 51);
- testOp('$isoWeekYear', endOfSecondToLastWeekInYear, year);
- testOp('$isoWeek', startOfLastWeekInYear, 52);
- testOp('$isoWeekYear', startOfLastWeekInYear, year);
- testOp('$dateToString',
- {format: '%G-W%V-%u', date: newYearsEve},
- "" + (year) + "-W52-" + DateUtil.shiftWeekday(day, 1));
- } else {
- // A leap year starting on Sunday will always end with week 1
- testOp('$isoWeek', newYearsEve, 1);
- testOp('$isoWeekYear', newYearsEve, year + 1);
- testOp('$isoWeek', endOfSecondToLastWeekInYear, 51);
- testOp('$isoWeekYear', endOfSecondToLastWeekInYear, year);
- testOp('$isoWeek', startOfLastWeekInYear, 52);
- testOp('$isoWeekYear', startOfLastWeekInYear, year);
- testOp('$dateToString',
- {format: '%G-W%V-%u', date: newYearsEve},
- "" + (year + 1) + "-W01-" + DateUtil.shiftWeekday(day, 1));
- }
+ } else {
+ if (day <= WEDNESDAY) {
+ // A common year starting between Monday and Wednesday will always end in week 1
+ // of the next year.
+ testOp('$isoWeek', newYearsEve, 1);
+ testOp('$isoWeekYear', newYearsEve, year + 1);
+ testOp('$isoWeek', endOfSecondToLastWeekInYear, 52);
+ testOp('$isoWeekYear', endOfSecondToLastWeekInYear, year);
+ testOp('$isoWeek', startOfLastWeekInYear, 1);
+ testOp('$isoWeekYear', startOfLastWeekInYear, year + 1);
+ testOp('$dateToString',
+ {format: '%G-W%V-%u', date: newYearsEve},
+ "" + (year + 1) + "-W01-" + day);
+ } else if (day === THURSDAY) {
+ // A common year starting on Thursday will always end with week 53.
+ testOp('$isoWeek', newYearsEve, 53);
+ testOp('$isoWeekYear', newYearsEve, year);
+ testOp('$isoWeek', endOfSecondToLastWeekInYear, 52);
+ testOp('$isoWeekYear', endOfSecondToLastWeekInYear, year);
+ testOp('$isoWeek', startOfLastWeekInYear, 53);
+ testOp('$isoWeekYear', startOfLastWeekInYear, year);
+ testOp('$dateToString',
+ {format: '%G-W%V-%u', date: newYearsEve},
+ "" + (year) + "-W53-" + day);
} else {
- if (day <= WEDNESDAY) {
- // A common year starting between Monday and Wednesday will always end in week 1
- // of the next year.
- testOp('$isoWeek', newYearsEve, 1);
- testOp('$isoWeekYear', newYearsEve, year + 1);
- testOp('$isoWeek', endOfSecondToLastWeekInYear, 52);
- testOp('$isoWeekYear', endOfSecondToLastWeekInYear, year);
- testOp('$isoWeek', startOfLastWeekInYear, 1);
- testOp('$isoWeekYear', startOfLastWeekInYear, year + 1);
- testOp('$dateToString',
- {format: '%G-W%V-%u', date: newYearsEve},
- "" + (year + 1) + "-W01-" + day);
- } else if (day === THURSDAY) {
- // A common year starting on Thursday will always end with week 53.
- testOp('$isoWeek', newYearsEve, 53);
- testOp('$isoWeekYear', newYearsEve, year);
- testOp('$isoWeek', endOfSecondToLastWeekInYear, 52);
- testOp('$isoWeekYear', endOfSecondToLastWeekInYear, year);
- testOp('$isoWeek', startOfLastWeekInYear, 53);
- testOp('$isoWeekYear', startOfLastWeekInYear, year);
- testOp('$dateToString',
- {format: '%G-W%V-%u', date: newYearsEve},
- "" + (year) + "-W53-" + day);
- } else {
- // A common year starting on between Friday and Sunday will always end with week
- // 52.
- testOp('$isoWeek', newYearsEve, 52);
- testOp('$isoWeekYear', newYearsEve, year);
- testOp('$isoWeek', endOfSecondToLastWeekInYear, 51);
- testOp('$isoWeekYear', endOfSecondToLastWeekInYear, year);
- testOp('$isoWeek', startOfLastWeekInYear, 52);
- testOp('$isoWeekYear', startOfLastWeekInYear, year);
- testOp('$dateToString',
- {format: '%G-W%V-%u', date: newYearsEve},
- "" + (year) + "-W52-" + day);
- }
+ // A common year starting on between Friday and Sunday will always end with week
+ // 52.
+ testOp('$isoWeek', newYearsEve, 52);
+ testOp('$isoWeekYear', newYearsEve, year);
+ testOp('$isoWeek', endOfSecondToLastWeekInYear, 51);
+ testOp('$isoWeekYear', endOfSecondToLastWeekInYear, year);
+ testOp('$isoWeek', startOfLastWeekInYear, 52);
+ testOp('$isoWeekYear', startOfLastWeekInYear, year);
+ testOp('$dateToString',
+ {format: '%G-W%V-%u', date: newYearsEve},
+ "" + (year) + "-W52-" + day);
}
- });
+ }
});
- assert.eq(testOpCount, 485, 'Expected 485 tests to run');
+});
+assert.eq(testOpCount, 485, 'Expected 485 tests to run');
})();
diff --git a/jstests/aggregation/bugs/server7781.js b/jstests/aggregation/bugs/server7781.js
index f755a6af0ad..19700cc2202 100644
--- a/jstests/aggregation/bugs/server7781.js
+++ b/jstests/aggregation/bugs/server7781.js
@@ -4,128 +4,128 @@
// requires_spawning_own_processes,
// ]
(function() {
- 'use strict';
-
- load('jstests/libs/geo_near_random.js');
- load('jstests/aggregation/extras/utils.js');
-
- var coll = 'server7781';
-
+'use strict';
+
+load('jstests/libs/geo_near_random.js');
+load('jstests/aggregation/extras/utils.js');
+
+var coll = 'server7781';
+
+db[coll].drop();
+db[coll].insert({loc: [0, 0]});
+
+// $geoNear is only allowed as the first stage in a pipeline, nowhere else.
+assert.throws(
+ () => db[coll].aggregate(
+ [{$match: {x: 1}}, {$geoNear: {near: [1, 1], spherical: true, distanceField: 'dis'}}]));
+
+const kDistanceField = "dis";
+const kIncludeLocsField = "loc";
+
+/**
+ * Tests the output of the $geoNear command. This function expects a document with the following
+ * fields:
+ * - 'geoNearSpec' is the specification for a $geoNear aggregation stage.
+ * - 'limit' is an integer limiting the number of pipeline results.
+ * - 'batchSize', if specified, is the batchSize to use for the aggregation.
+ */
+function testGeoNearStageOutput({geoNearSpec, limit, batchSize}) {
+ const aggOptions = batchSize ? {batchSize: batchSize} : {};
+ const result =
+ db[coll].aggregate([{$geoNear: geoNearSpec}, {$limit: limit}], aggOptions).toArray();
+ const errmsg = () => tojson(result);
+
+ // Verify that we got the expected number of results.
+ assert.eq(result.length, limit, errmsg);
+
+ // Run though the array, checking for proper sort order and sane computed distances.
+ result.reduce((lastDist, curDoc) => {
+ const curDist = curDoc[kDistanceField];
+
+ // Verify that distances are in increasing order.
+ assert.lte(lastDist, curDist, errmsg);
+
+ // Verify that the computed distance is correct.
+ const computed = Geo.sphereDistance(geoNearSpec["near"], curDoc[kIncludeLocsField]);
+ assert.close(computed, curDist, errmsg);
+ return curDist;
+ }, 0);
+}
+
+// We use this to generate points. Using a single global to avoid reseting RNG in each pass.
+var pointMaker = new GeoNearRandomTest(coll);
+
+function test(db, sharded, indexType) {
db[coll].drop();
- db[coll].insert({loc: [0, 0]});
-
- // $geoNear is only allowed as the first stage in a pipeline, nowhere else.
- assert.throws(
- () => db[coll].aggregate(
- [{$match: {x: 1}}, {$geoNear: {near: [1, 1], spherical: true, distanceField: 'dis'}}]));
-
- const kDistanceField = "dis";
- const kIncludeLocsField = "loc";
-
- /**
- * Tests the output of the $geoNear command. This function expects a document with the following
- * fields:
- * - 'geoNearSpec' is the specification for a $geoNear aggregation stage.
- * - 'limit' is an integer limiting the number of pipeline results.
- * - 'batchSize', if specified, is the batchSize to use for the aggregation.
- */
- function testGeoNearStageOutput({geoNearSpec, limit, batchSize}) {
- const aggOptions = batchSize ? {batchSize: batchSize} : {};
- const result =
- db[coll].aggregate([{$geoNear: geoNearSpec}, {$limit: limit}], aggOptions).toArray();
- const errmsg = () => tojson(result);
-
- // Verify that we got the expected number of results.
- assert.eq(result.length, limit, errmsg);
-
- // Run though the array, checking for proper sort order and sane computed distances.
- result.reduce((lastDist, curDoc) => {
- const curDist = curDoc[kDistanceField];
-
- // Verify that distances are in increasing order.
- assert.lte(lastDist, curDist, errmsg);
-
- // Verify that the computed distance is correct.
- const computed = Geo.sphereDistance(geoNearSpec["near"], curDoc[kIncludeLocsField]);
- assert.close(computed, curDist, errmsg);
- return curDist;
- }, 0);
- }
-
- // We use this to generate points. Using a single global to avoid reseting RNG in each pass.
- var pointMaker = new GeoNearRandomTest(coll);
- function test(db, sharded, indexType) {
- db[coll].drop();
-
- if (sharded) { // sharded setup
- var shards = [];
- var config = db.getSiblingDB("config");
- config.shards.find().forEach(function(shard) {
- shards.push(shard._id);
- });
+ if (sharded) { // sharded setup
+ var shards = [];
+ var config = db.getSiblingDB("config");
+ config.shards.find().forEach(function(shard) {
+ shards.push(shard._id);
+ });
+ assert.commandWorked(
+ db.adminCommand({shardCollection: db[coll].getFullName(), key: {rand: 1}}));
+ for (var i = 1; i < 10; i++) {
+ // split at 0.1, 0.2, ... 0.9
assert.commandWorked(
- db.adminCommand({shardCollection: db[coll].getFullName(), key: {rand: 1}}));
- for (var i = 1; i < 10; i++) {
- // split at 0.1, 0.2, ... 0.9
- assert.commandWorked(
- db.adminCommand({split: db[coll].getFullName(), middle: {rand: i / 10}}));
- db.adminCommand({
- moveChunk: db[coll].getFullName(),
- find: {rand: i / 10},
- to: shards[i % shards.length]
- });
- }
-
- assert.eq(config.chunks.count({'ns': db[coll].getFullName()}), 10);
- }
-
- // insert points
- var numPts = 10 * 1000;
- var bulk = db[coll].initializeUnorderedBulkOp();
- for (var i = 0; i < numPts; i++) {
- bulk.insert({rand: Math.random(), loc: pointMaker.mkPt()});
+ db.adminCommand({split: db[coll].getFullName(), middle: {rand: i / 10}}));
+ db.adminCommand({
+ moveChunk: db[coll].getFullName(),
+ find: {rand: i / 10},
+ to: shards[i % shards.length]
+ });
}
- assert.writeOK(bulk.execute());
-
- assert.eq(db[coll].count(), numPts);
-
- db[coll].ensureIndex({loc: indexType});
-
- // Test $geoNear with spherical coordinates.
- testGeoNearStageOutput({
- geoNearSpec: {
- near: pointMaker.mkPt(0.25),
- distanceField: kDistanceField,
- includeLocs: kIncludeLocsField,
- spherical: true,
- },
- limit: 100
- });
- // Test $geoNear with an initial batchSize of 1.
- testGeoNearStageOutput({
- geoNearSpec: {
- near: pointMaker.mkPt(0.25),
- distanceField: kDistanceField,
- includeLocs: kIncludeLocsField,
- spherical: true,
- },
- limit: 70,
- batchSize: 1
- });
+ assert.eq(config.chunks.count({'ns': db[coll].getFullName()}), 10);
}
- test(db, false, '2d');
- test(db, false, '2dsphere');
-
- var sharded = new ShardingTest({shards: 3, mongos: 1});
- assert.commandWorked(sharded.s0.adminCommand({enablesharding: "test"}));
- sharded.ensurePrimaryShard('test', sharded.shard1.shardName);
-
- test(sharded.getDB('test'), true, '2d');
- test(sharded.getDB('test'), true, '2dsphere');
-
- sharded.stop();
+ // insert points
+ var numPts = 10 * 1000;
+ var bulk = db[coll].initializeUnorderedBulkOp();
+ for (var i = 0; i < numPts; i++) {
+ bulk.insert({rand: Math.random(), loc: pointMaker.mkPt()});
+ }
+ assert.writeOK(bulk.execute());
+
+ assert.eq(db[coll].count(), numPts);
+
+ db[coll].ensureIndex({loc: indexType});
+
+ // Test $geoNear with spherical coordinates.
+ testGeoNearStageOutput({
+ geoNearSpec: {
+ near: pointMaker.mkPt(0.25),
+ distanceField: kDistanceField,
+ includeLocs: kIncludeLocsField,
+ spherical: true,
+ },
+ limit: 100
+ });
+
+ // Test $geoNear with an initial batchSize of 1.
+ testGeoNearStageOutput({
+ geoNearSpec: {
+ near: pointMaker.mkPt(0.25),
+ distanceField: kDistanceField,
+ includeLocs: kIncludeLocsField,
+ spherical: true,
+ },
+ limit: 70,
+ batchSize: 1
+ });
+}
+
+test(db, false, '2d');
+test(db, false, '2dsphere');
+
+var sharded = new ShardingTest({shards: 3, mongos: 1});
+assert.commandWorked(sharded.s0.adminCommand({enablesharding: "test"}));
+sharded.ensurePrimaryShard('test', sharded.shard1.shardName);
+
+test(sharded.getDB('test'), true, '2d');
+test(sharded.getDB('test'), true, '2dsphere');
+
+sharded.stop();
})();
diff --git a/jstests/aggregation/bugs/server8141.js b/jstests/aggregation/bugs/server8141.js
index 9777737517b..908fd952059 100644
--- a/jstests/aggregation/bugs/server8141.js
+++ b/jstests/aggregation/bugs/server8141.js
@@ -1,52 +1,50 @@
// SERVER-8141 Avoid treating arrays as literals in aggregation pipeline.
(function() {
- 'use strict';
- var coll = db.exprs_in_arrays;
- coll.drop();
+'use strict';
+var coll = db.exprs_in_arrays;
+coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: ['foo', 'bar', 'baz'], b: 'bar', c: 'Baz'}));
+assert.writeOK(coll.insert({_id: 0, a: ['foo', 'bar', 'baz'], b: 'bar', c: 'Baz'}));
- // An array of constants should still evaluate to an array of constants.
- var pipeline = [{$project: {_id: 0, d: ['constant', 1]}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{d: ['constant', 1]}]);
+// An array of constants should still evaluate to an array of constants.
+var pipeline = [{$project: {_id: 0, d: ['constant', 1]}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{d: ['constant', 1]}]);
- // A field name inside an array should take on the value of that field.
- pipeline = [{$project: {_id: 0, d: ['$b']}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{d: ['bar']}]);
+// A field name inside an array should take on the value of that field.
+pipeline = [{$project: {_id: 0, d: ['$b']}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{d: ['bar']}]);
- // An expression inside an array should be evaluated.
- pipeline = [{$project: {_id: 0, d: [{$toLower: 'FoO'}]}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{d: ['foo']}]);
+// An expression inside an array should be evaluated.
+pipeline = [{$project: {_id: 0, d: [{$toLower: 'FoO'}]}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{d: ['foo']}]);
- // Both an expression and a field name inside an array should be evaluated.
- pipeline = [{$project: {_id: 0, d: ['$b', {$toLower: 'FoO'}]}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{d: ['bar', 'foo']}]);
+// Both an expression and a field name inside an array should be evaluated.
+pipeline = [{$project: {_id: 0, d: ['$b', {$toLower: 'FoO'}]}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{d: ['bar', 'foo']}]);
- // A nested array should still be evaluated.
- pipeline = [{$project: {_id: 0, d: ['$b', 'constant', [1, {$toLower: 'FoO'}]]}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{d: ['bar', 'constant', [1, 'foo']]}]);
+// A nested array should still be evaluated.
+pipeline = [{$project: {_id: 0, d: ['$b', 'constant', [1, {$toLower: 'FoO'}]]}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{d: ['bar', 'constant', [1, 'foo']]}]);
- // Should still evaluate array elements inside arguments to an expression.
- pipeline = [{$project: {_id: 0, d: {$setIntersection: ['$a', ['$b']]}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{d: ['bar']}]);
+// Should still evaluate array elements inside arguments to an expression.
+pipeline = [{$project: {_id: 0, d: {$setIntersection: ['$a', ['$b']]}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{d: ['bar']}]);
- pipeline = [{$project: {_id: 0, d: {$setIntersection: ['$a', [{$toLower: 'FoO'}]]}}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{d: ['foo']}]);
+pipeline = [{$project: {_id: 0, d: {$setIntersection: ['$a', [{$toLower: 'FoO'}]]}}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{d: ['foo']}]);
- // Nested arrays.
- pipeline = [{
- $project: {
- _id: 0,
- d: {$setIntersection: [[[1, 'foo', 'bar']], [[1, {$toLower: 'FoO'}, '$b']]]}
- }
- }];
- assert.eq(coll.aggregate(pipeline).toArray(), [{d: [[1, 'foo', 'bar']]}]);
+// Nested arrays.
+pipeline = [{
+ $project:
+ {_id: 0, d: {$setIntersection: [[[1, 'foo', 'bar']], [[1, {$toLower: 'FoO'}, '$b']]]}}
+}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{d: [[1, 'foo', 'bar']]}]);
- coll.drop();
+coll.drop();
- // Should replace missing values with NULL to preserve indices.
- assert.writeOK(coll.insert({_id: 1, x: 1, z: 2}));
+// Should replace missing values with NULL to preserve indices.
+assert.writeOK(coll.insert({_id: 1, x: 1, z: 2}));
- pipeline = [{$project: {_id: 0, coordinate: ['$x', '$y', '$z']}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{coordinate: [1, null, 2]}]);
+pipeline = [{$project: {_id: 0, coordinate: ['$x', '$y', '$z']}}];
+assert.eq(coll.aggregate(pipeline).toArray(), [{coordinate: [1, null, 2]}]);
}());
diff --git a/jstests/aggregation/bugs/server8164.js b/jstests/aggregation/bugs/server8164.js
index 89cb360d91f..5b137b18d87 100644
--- a/jstests/aggregation/bugs/server8164.js
+++ b/jstests/aggregation/bugs/server8164.js
@@ -1,144 +1,144 @@
// SERVER-8164: ISODate doesn't handle years less than 100 properly.
(function() {
- assert.eq(tojson(ISODate("0000-01-01")), 'ISODate("0000-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("0000-01-01T00:00:00")), 'ISODate("0000-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("0000-01-01T00:00:00Z")), 'ISODate("0000-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("0000-01-01T00:00:00.123")), 'ISODate("0000-01-01T00:00:00.123Z")');
- assert.eq(tojson(ISODate("0000-01-01T00:00:00.123Z")), 'ISODate("0000-01-01T00:00:00.123Z")');
-
- assert.eq(tojson(ISODate("0000-01-01T00:00:00.1Z")), 'ISODate("0000-01-01T00:00:00.100Z")');
- assert.eq(tojson(ISODate("0000-01-01T00:00:00.10Z")), 'ISODate("0000-01-01T00:00:00.100Z")');
- assert.eq(tojson(ISODate("0000-01-01T00:00:00.100Z")), 'ISODate("0000-01-01T00:00:00.100Z")');
- assert.eq(tojson(ISODate("0000-01-01T00:00:00.1000Z")), 'ISODate("0000-01-01T00:00:00.100Z")');
-
- assert.eq(tojson(ISODate("0000-01-01T00:00:00.1234Z")), 'ISODate("0000-01-01T00:00:00.123Z")');
- assert.eq(tojson(ISODate("0000-01-01T00:00:00.1235Z")), 'ISODate("0000-01-01T00:00:00.124Z")');
-
- /* Testing different years */
- assert.eq(tojson(ISODate("0000-01-01T00:00:00Z")), 'ISODate("0000-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("0001-01-01T00:00:00Z")), 'ISODate("0001-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("0069-01-01T00:00:00Z")), 'ISODate("0069-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("0070-01-01T00:00:00Z")), 'ISODate("0070-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("0099-01-01T00:00:00Z")), 'ISODate("0099-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("0100-01-01T00:00:00Z")), 'ISODate("0100-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("1800-01-01T00:00:00Z")), 'ISODate("1800-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("1801-01-01T00:00:00Z")), 'ISODate("1801-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("1869-01-01T00:00:00Z")), 'ISODate("1869-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("1870-01-01T00:00:00Z")), 'ISODate("1870-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("1899-01-01T00:00:00Z")), 'ISODate("1899-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("1900-01-01T00:00:00Z")), 'ISODate("1900-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("1901-01-01T00:00:00Z")), 'ISODate("1901-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("1969-01-01T00:00:00Z")), 'ISODate("1969-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("1970-01-01T00:00:00Z")), 'ISODate("1970-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("1999-01-01T00:00:00Z")), 'ISODate("1999-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("2000-01-01T00:00:00Z")), 'ISODate("2000-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("2001-01-01T00:00:00Z")), 'ISODate("2001-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("2069-01-01T00:00:00Z")), 'ISODate("2069-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("2070-01-01T00:00:00Z")), 'ISODate("2070-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("2099-01-01T00:00:00Z")), 'ISODate("2099-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("9999-01-01T00:00:00Z")), 'ISODate("9999-01-01T00:00:00Z")');
-
- /* Testing without - in date and : in time */
- assert.eq(tojson(ISODate("19980101T00:00:00Z")), 'ISODate("1998-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("1999-0101T00:00:00Z")), 'ISODate("1999-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("200001-01T00:00:00Z")), 'ISODate("2000-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("1998-01-01T000000Z")), 'ISODate("1998-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("1999-01-01T00:0000Z")), 'ISODate("1999-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("2000-01-01T0000:00Z")), 'ISODate("2000-01-01T00:00:00Z")');
-
- /* Testing field overflows */
- assert.eq(tojson(ISODate("0000-01-01T00:00:60Z")), 'ISODate("0000-01-01T00:01:00Z")');
- assert.eq(tojson(ISODate("0000-01-01T00:00:99Z")), 'ISODate("0000-01-01T00:01:39Z")');
-
- assert.eq(tojson(ISODate("0000-01-01T00:60:00Z")), 'ISODate("0000-01-01T01:00:00Z")');
- assert.eq(tojson(ISODate("0000-01-01T00:99:00Z")), 'ISODate("0000-01-01T01:39:00Z")');
-
- assert.eq(tojson(ISODate("0000-01-01T24:00:00Z")), 'ISODate("0000-01-02T00:00:00Z")');
- assert.eq(tojson(ISODate("0000-01-01T99:00:00Z")), 'ISODate("0000-01-05T03:00:00Z")');
-
- assert.eq(tojson(ISODate("0000-01-32T00:00:00Z")), 'ISODate("0000-02-01T00:00:00Z")');
- assert.eq(tojson(ISODate("0000-01-99T00:00:00Z")), 'ISODate("0000-04-08T00:00:00Z")');
- assert.eq(tojson(ISODate("0000-02-29T00:00:00Z")), 'ISODate("0000-02-29T00:00:00Z")');
- assert.eq(tojson(ISODate("0000-02-30T00:00:00Z")), 'ISODate("0000-03-01T00:00:00Z")');
- assert.eq(tojson(ISODate("0000-02-31T00:00:00Z")), 'ISODate("0000-03-02T00:00:00Z")');
- assert.eq(tojson(ISODate("0000-02-99T00:00:00Z")), 'ISODate("0000-05-09T00:00:00Z")');
-
- assert.eq(tojson(ISODate("0001-02-29T00:00:00Z")), 'ISODate("0001-03-01T00:00:00Z")');
- assert.eq(tojson(ISODate("0001-02-30T00:00:00Z")), 'ISODate("0001-03-02T00:00:00Z")');
- assert.eq(tojson(ISODate("0001-02-31T00:00:00Z")), 'ISODate("0001-03-03T00:00:00Z")');
- assert.eq(tojson(ISODate("0001-02-99T00:00:00Z")), 'ISODate("0001-05-10T00:00:00Z")');
-
- assert.eq(tojson(ISODate("0000-13-01T00:00:00Z")), 'ISODate("0001-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("0000-99-01T00:00:00Z")), 'ISODate("0008-03-01T00:00:00Z")');
-
- /* Testing GMT offset instead of Z */
- assert.eq(tojson(ISODate("0001-01-01T00:00:00+01")), 'ISODate("0000-12-31T23:00:00Z")');
- assert.eq(tojson(ISODate("0001-01-01T00:00:00+99")), 'ISODate("0000-12-27T21:00:00Z")');
-
- assert.eq(tojson(ISODate("0001-01-01T00:00:00-01")), 'ISODate("0001-01-01T01:00:00Z")');
- assert.eq(tojson(ISODate("0001-01-01T00:00:00-99")), 'ISODate("0001-01-05T03:00:00Z")');
-
- assert.eq(tojson(ISODate("0001-01-01T00:00:00+0100")), 'ISODate("0000-12-31T23:00:00Z")');
- assert.eq(tojson(ISODate("0001-01-01T00:00:00+0160")), 'ISODate("0000-12-31T22:00:00Z")');
- assert.eq(tojson(ISODate("0001-01-01T00:00:00+0199")), 'ISODate("0000-12-31T21:21:00Z")');
- assert.eq(tojson(ISODate("0001-01-01T00:00:00+9999")), 'ISODate("0000-12-27T19:21:00Z")');
-
- assert.eq(tojson(ISODate("0001-01-01T00:00:00-0100")), 'ISODate("0001-01-01T01:00:00Z")');
- assert.eq(tojson(ISODate("0001-01-01T00:00:00-0160")), 'ISODate("0001-01-01T02:00:00Z")');
- assert.eq(tojson(ISODate("0001-01-01T00:00:00-0199")), 'ISODate("0001-01-01T02:39:00Z")');
- assert.eq(tojson(ISODate("0001-01-01T00:00:00-9999")), 'ISODate("0001-01-05T04:39:00Z")');
-
- assert.eq(tojson(ISODate("0001-01-01T00:00:00+01:00")), 'ISODate("0000-12-31T23:00:00Z")');
- assert.eq(tojson(ISODate("0001-01-01T00:00:00+01:60")), 'ISODate("0000-12-31T22:00:00Z")');
- assert.eq(tojson(ISODate("0001-01-01T00:00:00+01:99")), 'ISODate("0000-12-31T21:21:00Z")');
- assert.eq(tojson(ISODate("0001-01-01T00:00:00+99:99")), 'ISODate("0000-12-27T19:21:00Z")');
-
- assert.eq(tojson(ISODate("0001-01-01T00:00:00-01:00")), 'ISODate("0001-01-01T01:00:00Z")');
- assert.eq(tojson(ISODate("0001-01-01T00:00:00-01:60")), 'ISODate("0001-01-01T02:00:00Z")');
- assert.eq(tojson(ISODate("0001-01-01T00:00:00-01:99")), 'ISODate("0001-01-01T02:39:00Z")');
- assert.eq(tojson(ISODate("0001-01-01T00:00:00-99:99")), 'ISODate("0001-01-05T04:39:00Z")');
-
- /* Testing field underflows */
- assert.eq(tojson(ISODate("0001-01-00T00:00:00Z")), 'ISODate("0000-12-31T00:00:00Z")');
- assert.eq(tojson(ISODate("0001-00-00T00:00:00Z")), 'ISODate("0000-11-30T00:00:00Z")');
- assert.eq(tojson(ISODate("0001-00-01T00:00:00Z")), 'ISODate("0000-12-01T00:00:00Z")');
-
- /* Testing lowest and highest */
- assert.eq(tojson(ISODate("0000-01-01T00:00:00Z")), 'ISODate("0000-01-01T00:00:00Z")');
- assert.eq(tojson(ISODate("9999-12-31T23:59:59.999Z")), 'ISODate("9999-12-31T23:59:59.999Z")');
-
- /* Testing out of range */
+assert.eq(tojson(ISODate("0000-01-01")), 'ISODate("0000-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("0000-01-01T00:00:00")), 'ISODate("0000-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("0000-01-01T00:00:00Z")), 'ISODate("0000-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("0000-01-01T00:00:00.123")), 'ISODate("0000-01-01T00:00:00.123Z")');
+assert.eq(tojson(ISODate("0000-01-01T00:00:00.123Z")), 'ISODate("0000-01-01T00:00:00.123Z")');
+
+assert.eq(tojson(ISODate("0000-01-01T00:00:00.1Z")), 'ISODate("0000-01-01T00:00:00.100Z")');
+assert.eq(tojson(ISODate("0000-01-01T00:00:00.10Z")), 'ISODate("0000-01-01T00:00:00.100Z")');
+assert.eq(tojson(ISODate("0000-01-01T00:00:00.100Z")), 'ISODate("0000-01-01T00:00:00.100Z")');
+assert.eq(tojson(ISODate("0000-01-01T00:00:00.1000Z")), 'ISODate("0000-01-01T00:00:00.100Z")');
+
+assert.eq(tojson(ISODate("0000-01-01T00:00:00.1234Z")), 'ISODate("0000-01-01T00:00:00.123Z")');
+assert.eq(tojson(ISODate("0000-01-01T00:00:00.1235Z")), 'ISODate("0000-01-01T00:00:00.124Z")');
+
+/* Testing different years */
+assert.eq(tojson(ISODate("0000-01-01T00:00:00Z")), 'ISODate("0000-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("0001-01-01T00:00:00Z")), 'ISODate("0001-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("0069-01-01T00:00:00Z")), 'ISODate("0069-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("0070-01-01T00:00:00Z")), 'ISODate("0070-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("0099-01-01T00:00:00Z")), 'ISODate("0099-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("0100-01-01T00:00:00Z")), 'ISODate("0100-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("1800-01-01T00:00:00Z")), 'ISODate("1800-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("1801-01-01T00:00:00Z")), 'ISODate("1801-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("1869-01-01T00:00:00Z")), 'ISODate("1869-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("1870-01-01T00:00:00Z")), 'ISODate("1870-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("1899-01-01T00:00:00Z")), 'ISODate("1899-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("1900-01-01T00:00:00Z")), 'ISODate("1900-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("1901-01-01T00:00:00Z")), 'ISODate("1901-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("1969-01-01T00:00:00Z")), 'ISODate("1969-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("1970-01-01T00:00:00Z")), 'ISODate("1970-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("1999-01-01T00:00:00Z")), 'ISODate("1999-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("2000-01-01T00:00:00Z")), 'ISODate("2000-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("2001-01-01T00:00:00Z")), 'ISODate("2001-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("2069-01-01T00:00:00Z")), 'ISODate("2069-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("2070-01-01T00:00:00Z")), 'ISODate("2070-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("2099-01-01T00:00:00Z")), 'ISODate("2099-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("9999-01-01T00:00:00Z")), 'ISODate("9999-01-01T00:00:00Z")');
+
+/* Testing without - in date and : in time */
+assert.eq(tojson(ISODate("19980101T00:00:00Z")), 'ISODate("1998-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("1999-0101T00:00:00Z")), 'ISODate("1999-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("200001-01T00:00:00Z")), 'ISODate("2000-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("1998-01-01T000000Z")), 'ISODate("1998-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("1999-01-01T00:0000Z")), 'ISODate("1999-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("2000-01-01T0000:00Z")), 'ISODate("2000-01-01T00:00:00Z")');
+
+/* Testing field overflows */
+assert.eq(tojson(ISODate("0000-01-01T00:00:60Z")), 'ISODate("0000-01-01T00:01:00Z")');
+assert.eq(tojson(ISODate("0000-01-01T00:00:99Z")), 'ISODate("0000-01-01T00:01:39Z")');
+
+assert.eq(tojson(ISODate("0000-01-01T00:60:00Z")), 'ISODate("0000-01-01T01:00:00Z")');
+assert.eq(tojson(ISODate("0000-01-01T00:99:00Z")), 'ISODate("0000-01-01T01:39:00Z")');
+
+assert.eq(tojson(ISODate("0000-01-01T24:00:00Z")), 'ISODate("0000-01-02T00:00:00Z")');
+assert.eq(tojson(ISODate("0000-01-01T99:00:00Z")), 'ISODate("0000-01-05T03:00:00Z")');
+
+assert.eq(tojson(ISODate("0000-01-32T00:00:00Z")), 'ISODate("0000-02-01T00:00:00Z")');
+assert.eq(tojson(ISODate("0000-01-99T00:00:00Z")), 'ISODate("0000-04-08T00:00:00Z")');
+assert.eq(tojson(ISODate("0000-02-29T00:00:00Z")), 'ISODate("0000-02-29T00:00:00Z")');
+assert.eq(tojson(ISODate("0000-02-30T00:00:00Z")), 'ISODate("0000-03-01T00:00:00Z")');
+assert.eq(tojson(ISODate("0000-02-31T00:00:00Z")), 'ISODate("0000-03-02T00:00:00Z")');
+assert.eq(tojson(ISODate("0000-02-99T00:00:00Z")), 'ISODate("0000-05-09T00:00:00Z")');
+
+assert.eq(tojson(ISODate("0001-02-29T00:00:00Z")), 'ISODate("0001-03-01T00:00:00Z")');
+assert.eq(tojson(ISODate("0001-02-30T00:00:00Z")), 'ISODate("0001-03-02T00:00:00Z")');
+assert.eq(tojson(ISODate("0001-02-31T00:00:00Z")), 'ISODate("0001-03-03T00:00:00Z")');
+assert.eq(tojson(ISODate("0001-02-99T00:00:00Z")), 'ISODate("0001-05-10T00:00:00Z")');
+
+assert.eq(tojson(ISODate("0000-13-01T00:00:00Z")), 'ISODate("0001-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("0000-99-01T00:00:00Z")), 'ISODate("0008-03-01T00:00:00Z")');
+
+/* Testing GMT offset instead of Z */
+assert.eq(tojson(ISODate("0001-01-01T00:00:00+01")), 'ISODate("0000-12-31T23:00:00Z")');
+assert.eq(tojson(ISODate("0001-01-01T00:00:00+99")), 'ISODate("0000-12-27T21:00:00Z")');
+
+assert.eq(tojson(ISODate("0001-01-01T00:00:00-01")), 'ISODate("0001-01-01T01:00:00Z")');
+assert.eq(tojson(ISODate("0001-01-01T00:00:00-99")), 'ISODate("0001-01-05T03:00:00Z")');
+
+assert.eq(tojson(ISODate("0001-01-01T00:00:00+0100")), 'ISODate("0000-12-31T23:00:00Z")');
+assert.eq(tojson(ISODate("0001-01-01T00:00:00+0160")), 'ISODate("0000-12-31T22:00:00Z")');
+assert.eq(tojson(ISODate("0001-01-01T00:00:00+0199")), 'ISODate("0000-12-31T21:21:00Z")');
+assert.eq(tojson(ISODate("0001-01-01T00:00:00+9999")), 'ISODate("0000-12-27T19:21:00Z")');
+
+assert.eq(tojson(ISODate("0001-01-01T00:00:00-0100")), 'ISODate("0001-01-01T01:00:00Z")');
+assert.eq(tojson(ISODate("0001-01-01T00:00:00-0160")), 'ISODate("0001-01-01T02:00:00Z")');
+assert.eq(tojson(ISODate("0001-01-01T00:00:00-0199")), 'ISODate("0001-01-01T02:39:00Z")');
+assert.eq(tojson(ISODate("0001-01-01T00:00:00-9999")), 'ISODate("0001-01-05T04:39:00Z")');
+
+assert.eq(tojson(ISODate("0001-01-01T00:00:00+01:00")), 'ISODate("0000-12-31T23:00:00Z")');
+assert.eq(tojson(ISODate("0001-01-01T00:00:00+01:60")), 'ISODate("0000-12-31T22:00:00Z")');
+assert.eq(tojson(ISODate("0001-01-01T00:00:00+01:99")), 'ISODate("0000-12-31T21:21:00Z")');
+assert.eq(tojson(ISODate("0001-01-01T00:00:00+99:99")), 'ISODate("0000-12-27T19:21:00Z")');
+
+assert.eq(tojson(ISODate("0001-01-01T00:00:00-01:00")), 'ISODate("0001-01-01T01:00:00Z")');
+assert.eq(tojson(ISODate("0001-01-01T00:00:00-01:60")), 'ISODate("0001-01-01T02:00:00Z")');
+assert.eq(tojson(ISODate("0001-01-01T00:00:00-01:99")), 'ISODate("0001-01-01T02:39:00Z")');
+assert.eq(tojson(ISODate("0001-01-01T00:00:00-99:99")), 'ISODate("0001-01-05T04:39:00Z")');
+
+/* Testing field underflows */
+assert.eq(tojson(ISODate("0001-01-00T00:00:00Z")), 'ISODate("0000-12-31T00:00:00Z")');
+assert.eq(tojson(ISODate("0001-00-00T00:00:00Z")), 'ISODate("0000-11-30T00:00:00Z")');
+assert.eq(tojson(ISODate("0001-00-01T00:00:00Z")), 'ISODate("0000-12-01T00:00:00Z")');
+
+/* Testing lowest and highest */
+assert.eq(tojson(ISODate("0000-01-01T00:00:00Z")), 'ISODate("0000-01-01T00:00:00Z")');
+assert.eq(tojson(ISODate("9999-12-31T23:59:59.999Z")), 'ISODate("9999-12-31T23:59:59.999Z")');
+
+/* Testing out of range */
+assert.throws(function() {
+ tojson(ISODate("0000-01-00T23:59:59.999Z"));
+});
+assert.throws(function() {
+ tojson(ISODate("9999-12-31T23:59:60Z"));
+});
+
+/* Testing broken format */
+var brokenFormatTests = [
+ "2017",
+ "2017-09",
+ "2017-09-16T18:37 25Z",
+ "2017-09-16T18 37:25Z",
+ "2017-09-16X18:37:25Z",
+ "2017-09 16T18:37:25Z",
+ "2017 09-16T18:37:25Z",
+ "2017-09-16T18:37:25 123Z",
+ "2017-09-16T18:37:25 0600",
+];
+
+brokenFormatTests.forEach(function(test) {
assert.throws(function() {
- tojson(ISODate("0000-01-00T23:59:59.999Z"));
- });
- assert.throws(function() {
- tojson(ISODate("9999-12-31T23:59:60Z"));
- });
-
- /* Testing broken format */
- var brokenFormatTests = [
- "2017",
- "2017-09",
- "2017-09-16T18:37 25Z",
- "2017-09-16T18 37:25Z",
- "2017-09-16X18:37:25Z",
- "2017-09 16T18:37:25Z",
- "2017 09-16T18:37:25Z",
- "2017-09-16T18:37:25 123Z",
- "2017-09-16T18:37:25 0600",
- ];
-
- brokenFormatTests.forEach(function(test) {
- assert.throws(function() {
- print(tojson(ISODate(test)));
- }, [tojson(test)]);
- });
-
- /* Testing conversion to milliseconds */
- assert.eq(ISODate("1969-12-31T23:59:59.999Z"), new Date(-1));
- assert.eq(ISODate("1969-12-31T23:59:59.000Z"), new Date(-1000));
- assert.eq(ISODate("1900-01-01T00:00:00.000Z"), new Date(-2208988800000));
- assert.eq(ISODate("1899-12-31T23:59:59.999Z"), new Date(-2208988800001));
- assert.eq(ISODate("0000-01-01T00:00:00.000Z"), new Date(-62167219200000));
- assert.eq(ISODate("9999-12-31T23:59:59.999Z"), new Date(253402300799999));
+ print(tojson(ISODate(test)));
+ }, [tojson(test)]);
+});
+
+/* Testing conversion to milliseconds */
+assert.eq(ISODate("1969-12-31T23:59:59.999Z"), new Date(-1));
+assert.eq(ISODate("1969-12-31T23:59:59.000Z"), new Date(-1000));
+assert.eq(ISODate("1900-01-01T00:00:00.000Z"), new Date(-2208988800000));
+assert.eq(ISODate("1899-12-31T23:59:59.999Z"), new Date(-2208988800001));
+assert.eq(ISODate("0000-01-01T00:00:00.000Z"), new Date(-62167219200000));
+assert.eq(ISODate("9999-12-31T23:59:59.999Z"), new Date(253402300799999));
}());
diff --git a/jstests/aggregation/bugs/server8568.js b/jstests/aggregation/bugs/server8568.js
index ae9a9ad8202..71793f5696b 100644
--- a/jstests/aggregation/bugs/server8568.js
+++ b/jstests/aggregation/bugs/server8568.js
@@ -4,40 +4,40 @@
load('jstests/aggregation/extras/utils.js');
(function() {
- 'use strict';
- var coll = db.sqrt;
- coll.drop();
- assert.writeOK(coll.insert({_id: 0}));
-
- // Helper for testing that op returns expResult.
- function testOp(op, expResult) {
- var pipeline = [{$project: {_id: 0, result: op}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{result: expResult}]);
- }
-
- // Helper for testing that op results in error with code errorCode.
- function testError(op, errorCode) {
- var pipeline = [{$project: {_id: 0, result: op}}];
- assertErrorCode(coll, pipeline, errorCode);
- }
-
- // Valid input: Numeric arg >= 0, null, or NaN.
-
- testOp({$sqrt: [100]}, 10);
- testOp({$sqrt: [0]}, 0);
- // All types converted to doubles.
- testOp({$sqrt: [NumberLong("100")]}, 10);
- // LLONG_MAX is converted to a double.
- testOp({$sqrt: [NumberLong("9223372036854775807")]}, 3037000499.97605);
- // Null inputs result in null.
- testOp({$sqrt: [null]}, null);
- // NaN inputs result in NaN.
- testOp({$sqrt: [NaN]}, NaN);
-
- // Invalid input: non-numeric/non-null, arg is negative.
-
- // Arg must be numeric or null.
- testError({$sqrt: ["string"]}, 28765);
- // Args cannot be negative.
- testError({$sqrt: [-1]}, 28714);
+'use strict';
+var coll = db.sqrt;
+coll.drop();
+assert.writeOK(coll.insert({_id: 0}));
+
+// Helper for testing that op returns expResult.
+function testOp(op, expResult) {
+ var pipeline = [{$project: {_id: 0, result: op}}];
+ assert.eq(coll.aggregate(pipeline).toArray(), [{result: expResult}]);
+}
+
+// Helper for testing that op results in error with code errorCode.
+function testError(op, errorCode) {
+ var pipeline = [{$project: {_id: 0, result: op}}];
+ assertErrorCode(coll, pipeline, errorCode);
+}
+
+// Valid input: Numeric arg >= 0, null, or NaN.
+
+testOp({$sqrt: [100]}, 10);
+testOp({$sqrt: [0]}, 0);
+// All types converted to doubles.
+testOp({$sqrt: [NumberLong("100")]}, 10);
+// LLONG_MAX is converted to a double.
+testOp({$sqrt: [NumberLong("9223372036854775807")]}, 3037000499.97605);
+// Null inputs result in null.
+testOp({$sqrt: [null]}, null);
+// NaN inputs result in NaN.
+testOp({$sqrt: [NaN]}, NaN);
+
+// Invalid input: non-numeric/non-null, arg is negative.
+
+// Arg must be numeric or null.
+testError({$sqrt: ["string"]}, 28765);
+// Args cannot be negative.
+testError({$sqrt: [-1]}, 28714);
}());
diff --git a/jstests/aggregation/bugs/server8581.js b/jstests/aggregation/bugs/server8581.js
index 54b97be3d08..fa81578ccbd 100644
--- a/jstests/aggregation/bugs/server8581.js
+++ b/jstests/aggregation/bugs/server8581.js
@@ -79,47 +79,47 @@ a3result = [{
a4result = [
{
- _id: 1,
- level: 1,
- b: {
- level: 3,
- c: 5,
- d: [{level: 1, e: 4}, {f: 6}, "NOT AN OBJECT!!11!", [2, 3, 4, {level: 1, r: 11}]]
- },
- h: {level: 2, i: {level: 4, j: {level: 1, k: 8}}},
- l: {m: {level: 3, n: 12}},
- o: [],
- q: 14
+ _id: 1,
+ level: 1,
+ b: {
+ level: 3,
+ c: 5,
+ d: [{level: 1, e: 4}, {f: 6}, "NOT AN OBJECT!!11!", [2, 3, 4, {level: 1, r: 11}]]
+ },
+ h: {level: 2, i: {level: 4, j: {level: 1, k: 8}}},
+ l: {m: {level: 3, n: 12}},
+ o: [],
+ q: 14
},
{
- _id: 2,
- level: 4,
+ _id: 2,
+ level: 4,
}
];
a5result = [
{
- _id: 1,
- level: 1,
- b: {
- level: 3,
- c: 5,
- d: [
- {level: 1, e: 4},
- {f: 6},
- {level: 5, g: 9},
- "NOT AN OBJECT!!11!",
- [2, 3, 4, {level: 1, r: 11}, {level: 5, s: 99}]
- ]
- },
- h: {level: 2, i: {level: 4, j: {level: 1, k: 8}}},
- l: {m: {level: 3, n: 12}},
- o: [{level: 5, p: 19}],
- q: 14
+ _id: 1,
+ level: 1,
+ b: {
+ level: 3,
+ c: 5,
+ d: [
+ {level: 1, e: 4},
+ {f: 6},
+ {level: 5, g: 9},
+ "NOT AN OBJECT!!11!",
+ [2, 3, 4, {level: 1, r: 11}, {level: 5, s: 99}]
+ ]
+ },
+ h: {level: 2, i: {level: 4, j: {level: 1, k: 8}}},
+ l: {m: {level: 3, n: 12}},
+ o: [{level: 5, p: 19}],
+ q: 14
},
{
- _id: 2,
- level: 4,
+ _id: 2,
+ level: 4,
}
];
diff --git a/jstests/aggregation/bugs/server9444.js b/jstests/aggregation/bugs/server9444.js
index f3c6a449fad..6bb554c8e62 100644
--- a/jstests/aggregation/bugs/server9444.js
+++ b/jstests/aggregation/bugs/server9444.js
@@ -1,64 +1,64 @@
// server-9444 support disk storage of intermediate results in aggregation
(function() {
- 'use strict';
+'use strict';
- load('jstests/libs/fixture_helpers.js'); // For 'FixtureHelpers'
+load('jstests/libs/fixture_helpers.js'); // For 'FixtureHelpers'
- const t = db.server9444;
- t.drop();
+const t = db.server9444;
+t.drop();
- const sharded = FixtureHelpers.isSharded(t);
+const sharded = FixtureHelpers.isSharded(t);
- var memoryLimitMB = sharded ? 200 : 100;
+var memoryLimitMB = sharded ? 200 : 100;
- function loadData() {
- var bigStr = Array(1024 * 1024 + 1).toString(); // 1MB of ','
- for (var i = 0; i < memoryLimitMB + 1; i++)
- t.insert({_id: i, bigStr: i + bigStr, random: Math.random()});
+function loadData() {
+ var bigStr = Array(1024 * 1024 + 1).toString(); // 1MB of ','
+ for (var i = 0; i < memoryLimitMB + 1; i++)
+ t.insert({_id: i, bigStr: i + bigStr, random: Math.random()});
- assert.gt(t.stats().size, memoryLimitMB * 1024 * 1024);
- }
- loadData();
+ assert.gt(t.stats().size, memoryLimitMB * 1024 * 1024);
+}
+loadData();
- function test(pipeline, outOfMemoryCode) {
- // ensure by default we error out if exceeding memory limit
- var res = t.runCommand('aggregate', {pipeline: pipeline, cursor: {}});
- assert.commandFailed(res);
- assert.eq(res.code, outOfMemoryCode);
+function test(pipeline, outOfMemoryCode) {
+ // ensure by default we error out if exceeding memory limit
+ var res = t.runCommand('aggregate', {pipeline: pipeline, cursor: {}});
+ assert.commandFailed(res);
+ assert.eq(res.code, outOfMemoryCode);
- // ensure allowDiskUse: false does what it says
- res = t.runCommand('aggregate', {pipeline: pipeline, cursor: {}, allowDiskUse: false});
- assert.commandFailed(res);
- assert.eq(res.code, outOfMemoryCode);
+ // ensure allowDiskUse: false does what it says
+ res = t.runCommand('aggregate', {pipeline: pipeline, cursor: {}, allowDiskUse: false});
+ assert.commandFailed(res);
+ assert.eq(res.code, outOfMemoryCode);
- // allowDiskUse only supports bool. In particular, numbers aren't allowed.
- res = t.runCommand('aggregate', {pipeline: pipeline, cursor: {}, allowDiskUse: 1});
- assert.commandFailed(res);
+ // allowDiskUse only supports bool. In particular, numbers aren't allowed.
+ res = t.runCommand('aggregate', {pipeline: pipeline, cursor: {}, allowDiskUse: 1});
+ assert.commandFailed(res);
- // ensure we work when allowDiskUse === true
- res = t.aggregate(pipeline, {allowDiskUse: true});
- assert.eq(res.itcount(), t.count()); // all tests output one doc per input doc
- }
+ // ensure we work when allowDiskUse === true
+ res = t.aggregate(pipeline, {allowDiskUse: true});
+ assert.eq(res.itcount(), t.count()); // all tests output one doc per input doc
+}
- var groupCode = 16945;
- var sortCode = 16819;
- var sortLimitCode = 16820;
+var groupCode = 16945;
+var sortCode = 16819;
+var sortLimitCode = 16820;
- test([{$group: {_id: '$_id', bigStr: {$min: '$bigStr'}}}], groupCode);
+test([{$group: {_id: '$_id', bigStr: {$min: '$bigStr'}}}], groupCode);
- // sorting with _id would use index which doesn't require extsort
- test([{$sort: {random: 1}}], sortCode);
- test([{$sort: {bigStr: 1}}], sortCode); // big key and value
+// sorting with _id would use index which doesn't require extsort
+test([{$sort: {random: 1}}], sortCode);
+test([{$sort: {bigStr: 1}}], sortCode); // big key and value
- // make sure sort + large limit won't crash the server (SERVER-10136)
- test([{$sort: {bigStr: 1}}, {$limit: 1000 * 1000 * 1000}], sortLimitCode);
+// make sure sort + large limit won't crash the server (SERVER-10136)
+test([{$sort: {bigStr: 1}}, {$limit: 1000 * 1000 * 1000}], sortLimitCode);
- // test combining two extSorts in both same and different orders
- test([{$group: {_id: '$_id', bigStr: {$min: '$bigStr'}}}, {$sort: {_id: 1}}], groupCode);
- test([{$group: {_id: '$_id', bigStr: {$min: '$bigStr'}}}, {$sort: {_id: -1}}], groupCode);
- test([{$group: {_id: '$_id', bigStr: {$min: '$bigStr'}}}, {$sort: {random: 1}}], groupCode);
- test([{$sort: {random: 1}}, {$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}], sortCode);
+// test combining two extSorts in both same and different orders
+test([{$group: {_id: '$_id', bigStr: {$min: '$bigStr'}}}, {$sort: {_id: 1}}], groupCode);
+test([{$group: {_id: '$_id', bigStr: {$min: '$bigStr'}}}, {$sort: {_id: -1}}], groupCode);
+test([{$group: {_id: '$_id', bigStr: {$min: '$bigStr'}}}, {$sort: {random: 1}}], groupCode);
+test([{$sort: {random: 1}}, {$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}], sortCode);
- // don't leave large collection laying around
- t.drop();
+// don't leave large collection laying around
+t.drop();
})();
diff --git a/jstests/aggregation/bugs/server9625.js b/jstests/aggregation/bugs/server9625.js
index 4a525aba518..4cbf487b5e0 100644
--- a/jstests/aggregation/bugs/server9625.js
+++ b/jstests/aggregation/bugs/server9625.js
@@ -5,70 +5,70 @@
load('jstests/aggregation/extras/utils.js');
(function() {
- 'use strict';
- var coll = db.server9625;
- coll.drop();
- assert.writeOK(coll.insert({}));
+'use strict';
+var coll = db.server9625;
+coll.drop();
+assert.writeOK(coll.insert({}));
- // Helper for testing that op returns expResult.
- function testOp(op, expResult) {
- var pipeline = [{$project: {_id: 0, result: op}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{result: expResult}]);
- }
+// Helper for testing that op returns expResult.
+function testOp(op, expResult) {
+ var pipeline = [{$project: {_id: 0, result: op}}];
+ assert.eq(coll.aggregate(pipeline).toArray(), [{result: expResult}]);
+}
- // ExpressionFromAccumulators take either a list of arguments or a single array argument.
- testOp({$avg: [1, 2, 3, 4, 5]}, 3);
- testOp({$avg: [[1, 2, 3, 4, 5]]}, 3);
- testOp({$min: [1, 2, 3, 4, 5]}, 1);
- testOp({$min: [[1, 2, 3, 4, 5]]}, 1);
- testOp({$max: [1, 2, 3, 4, 5]}, 5);
- testOp({$max: [[1, 2, 3, 4, 5]]}, 5);
- testOp({$sum: [1, 2, 3, 4, 5]}, 15);
- testOp({$sum: [[1, 2, 3, 4, 5]]}, 15);
- testOp({$stdDevPop: [1, 3]}, 1);
- testOp({$stdDevPop: [[1, 3]]}, 1);
- testOp({$stdDevSamp: [1, 2, 3]}, 1);
- testOp({$stdDevSamp: [[1, 2, 3]]}, 1);
+// ExpressionFromAccumulators take either a list of arguments or a single array argument.
+testOp({$avg: [1, 2, 3, 4, 5]}, 3);
+testOp({$avg: [[1, 2, 3, 4, 5]]}, 3);
+testOp({$min: [1, 2, 3, 4, 5]}, 1);
+testOp({$min: [[1, 2, 3, 4, 5]]}, 1);
+testOp({$max: [1, 2, 3, 4, 5]}, 5);
+testOp({$max: [[1, 2, 3, 4, 5]]}, 5);
+testOp({$sum: [1, 2, 3, 4, 5]}, 15);
+testOp({$sum: [[1, 2, 3, 4, 5]]}, 15);
+testOp({$stdDevPop: [1, 3]}, 1);
+testOp({$stdDevPop: [[1, 3]]}, 1);
+testOp({$stdDevSamp: [1, 2, 3]}, 1);
+testOp({$stdDevSamp: [[1, 2, 3]]}, 1);
- // Null arguments are ignored.
- testOp({$avg: [1, 2, 3, 4, 5, null]}, 3);
- testOp({$min: [1, 2, 3, 4, 5, null]}, 1);
- testOp({$max: [1, 2, 3, 4, 5, null]}, 5);
- testOp({$sum: [1, 2, 3, 4, 5, null]}, 15);
- testOp({$stdDevPop: [1, 3, null]}, 1);
- testOp({$stdDevSamp: [1, 2, 3, null]}, 1);
+// Null arguments are ignored.
+testOp({$avg: [1, 2, 3, 4, 5, null]}, 3);
+testOp({$min: [1, 2, 3, 4, 5, null]}, 1);
+testOp({$max: [1, 2, 3, 4, 5, null]}, 5);
+testOp({$sum: [1, 2, 3, 4, 5, null]}, 15);
+testOp({$stdDevPop: [1, 3, null]}, 1);
+testOp({$stdDevSamp: [1, 2, 3, null]}, 1);
- // NaN arguments are processed by all expressions.
- testOp({$avg: [1, 2, 3, 4, 5, NaN]}, NaN);
- testOp({$min: [1, 2, 3, 4, 5, NaN]}, NaN);
- testOp({$max: [1, 2, 3, 4, 5, NaN]}, 5);
- testOp({$sum: [1, 2, 3, 4, 5, NaN]}, NaN);
- testOp({$stdDevPop: [1, 3, NaN]}, NaN);
- testOp({$stdDevSamp: [1, 2, 3, NaN]}, NaN);
+// NaN arguments are processed by all expressions.
+testOp({$avg: [1, 2, 3, 4, 5, NaN]}, NaN);
+testOp({$min: [1, 2, 3, 4, 5, NaN]}, NaN);
+testOp({$max: [1, 2, 3, 4, 5, NaN]}, 5);
+testOp({$sum: [1, 2, 3, 4, 5, NaN]}, NaN);
+testOp({$stdDevPop: [1, 3, NaN]}, NaN);
+testOp({$stdDevSamp: [1, 2, 3, NaN]}, NaN);
- // Use at least one non-constant value in the following tests, to ensure
- // isAssociative() and isCommutative() are called. If all arguments are constant, the
- // optimization will evaluate them all into one, without calling isAssociative() nor
- // isCommutative().
- coll.drop();
- assert.writeOK(coll.insert({"a": 1, "b": 6}));
+// Use at least one non-constant value in the following tests, to ensure
+// isAssociative() and isCommutative() are called. If all arguments are constant, the
+// optimization will evaluate them all into one, without calling isAssociative() nor
+// isCommutative().
+coll.drop();
+assert.writeOK(coll.insert({"a": 1, "b": 6}));
- // These expressions are associative and commutative so inner expression can be combined with
- // outer.
- testOp({$sum: ["$a", 2, 3, {$sum: [4, 5]}]}, 15);
- testOp({$min: ["$a", 2, 3, {$min: [4, 5]}]}, 1);
- testOp({$max: ["$a", 2, 3, {$max: [4, 5]}]}, 5);
+// These expressions are associative and commutative so inner expression can be combined with
+// outer.
+testOp({$sum: ["$a", 2, 3, {$sum: [4, 5]}]}, 15);
+testOp({$min: ["$a", 2, 3, {$min: [4, 5]}]}, 1);
+testOp({$max: ["$a", 2, 3, {$max: [4, 5]}]}, 5);
- // These expressions are not associative and commutative so inner expression cannot be combined
- // with outer.
- testOp({$avg: ["$a", 3, {$avg: [4, 6]}]}, 3);
- testOp({$stdDevPop: ["$a", {$stdDevPop: [1, 3]}]}, 0);
- testOp({$stdDevSamp: ["$a", {$stdDevSamp: [1, 2, 3]}]}, 0);
+// These expressions are not associative and commutative so inner expression cannot be combined
+// with outer.
+testOp({$avg: ["$a", 3, {$avg: [4, 6]}]}, 3);
+testOp({$stdDevPop: ["$a", {$stdDevPop: [1, 3]}]}, 0);
+testOp({$stdDevSamp: ["$a", {$stdDevSamp: [1, 2, 3]}]}, 0);
- // If isAssociative() and isCommutative() did not return false when provided a single argument,
- // the single array argument provided to the inner expression would be ignored instead of
- // treated as a list of arguments, and these tests would fail.
- testOp({$sum: ["$a", 2, 3, {$sum: [["$a", 4, 5]]}]}, 16);
- testOp({$min: ["$b", 2, 3, {$min: [["$a", 4, 5]]}]}, 1);
- testOp({$max: ["$a", 2, 3, {$max: [["$b", 4, 5]]}]}, 6);
+// If isAssociative() and isCommutative() did not return false when provided a single argument,
+// the single array argument provided to the inner expression would be ignored instead of
+// treated as a list of arguments, and these tests would fail.
+testOp({$sum: ["$a", 2, 3, {$sum: [["$a", 4, 5]]}]}, 16);
+testOp({$min: ["$b", 2, 3, {$min: [["$a", 4, 5]]}]}, 1);
+testOp({$max: ["$a", 2, 3, {$max: [["$b", 4, 5]]}]}, 6);
}());
diff --git a/jstests/aggregation/bugs/skip_limit_overflow.js b/jstests/aggregation/bugs/skip_limit_overflow.js
index f0d7e0b27c7..50e665b178f 100644
--- a/jstests/aggregation/bugs/skip_limit_overflow.js
+++ b/jstests/aggregation/bugs/skip_limit_overflow.js
@@ -8,116 +8,115 @@
* @tags: [do_not_wrap_aggregations_in_facets, assumes_unsharded_collection]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStages' and other explain helpers.
+load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStages' and other explain helpers.
- const coll = db.server39788;
- coll.drop();
+const coll = db.server39788;
+coll.drop();
- function testPipeline(pipeline, expectedResult, optimizedAwayStages) {
- const explainOutput = coll.explain().aggregate(pipeline);
+function testPipeline(pipeline, expectedResult, optimizedAwayStages) {
+ const explainOutput = coll.explain().aggregate(pipeline);
- assert(explainOutput.hasOwnProperty("stages"),
- "Expected pipeline " + tojsononeline(pipeline) +
- " to use an aggregation framework in the explain output: " +
- tojson(explainOutput));
+ assert(explainOutput.hasOwnProperty("stages"),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " to use an aggregation framework in the explain output: " + tojson(explainOutput));
- if (optimizedAwayStages) {
- optimizedAwayStages.forEach(
- (stage) =>
- assert(!aggPlanHasStage(explainOutput, stage),
- "Expected pipeline " + tojsononeline(pipeline) + " to *not* include a " +
- stage + " stage in the explain output: " + tojson(explainOutput)));
- }
-
- for (let path in expectedResult) {
- const subPaths = path.split(".");
- const stageName = subPaths[0];
- const stages = getAggPlanStages(explainOutput, stageName);
- assert(stages !== null,
- "Expected pipeline " + tojsononeline(pipeline) + " to include a " + stageName +
- " stage in the explain output: " + tojson(explainOutput));
- assert(stages.length == expectedResult[path].length,
- "Expected pipeline " + tojsononeline(pipeline) + " to include " +
- expectedResult[path].length + stageName + " stages in the explain output: " +
- tojson(explainOutput));
- assert.eq(
- stages.reduce(
- (res, stage) => {
- res.push(subPaths.reduce((res, cur) => res[cur], stage));
- return res;
- },
- []),
- expectedResult[path],
- "Stage: " + stageName + ", path: " + path + ", explain: " + tojson(explainOutput));
- }
+ if (optimizedAwayStages) {
+ optimizedAwayStages.forEach(
+ (stage) =>
+ assert(!aggPlanHasStage(explainOutput, stage),
+ "Expected pipeline " + tojsononeline(pipeline) + " to *not* include a " +
+ stage + " stage in the explain output: " + tojson(explainOutput)));
+ }
- // Ensure the aggregate command doesn't fail.
- assert.eq(coll.aggregate(pipeline).toArray(), []);
+ for (let path in expectedResult) {
+ const subPaths = path.split(".");
+ const stageName = subPaths[0];
+ const stages = getAggPlanStages(explainOutput, stageName);
+ assert(stages !== null,
+ "Expected pipeline " + tojsononeline(pipeline) + " to include a " + stageName +
+ " stage in the explain output: " + tojson(explainOutput));
+ assert(stages.length == expectedResult[path].length,
+ "Expected pipeline " + tojsononeline(pipeline) + " to include " +
+ expectedResult[path].length + stageName +
+ " stages in the explain output: " + tojson(explainOutput));
+ assert.eq(
+ stages.reduce(
+ (res, stage) => {
+ res.push(subPaths.reduce((res, cur) => res[cur], stage));
+ return res;
+ },
+ []),
+ expectedResult[path],
+ "Stage: " + stageName + ", path: " + path + ", explain: " + tojson(explainOutput));
}
- // Case where overflow of limit + skip prevents limit stage from being absorbed. Values
- // are specified as integrals > MAX_LONG. Note that we cannot specify this huge value as
- // a NumberLong, as we get a number conversion error (even if it's passed as a string).
- testPipeline([{$sort: {x: -1}}, {$skip: 18446744073709552000}, {$limit: 6}],
- {"$limit": [NumberLong(6)], "$skip": [NumberLong("9223372036854775807")]});
- testPipeline([{$sort: {x: -1}}, {$skip: 6}, {$limit: 18446744073709552000}],
- {"$limit": [NumberLong("9223372036854775807")], "$skip": [NumberLong(6)]});
+ // Ensure the aggregate command doesn't fail.
+ assert.eq(coll.aggregate(pipeline).toArray(), []);
+}
+
+// Case where overflow of limit + skip prevents limit stage from being absorbed. Values
+// are specified as integrals > MAX_LONG. Note that we cannot specify this huge value as
+// a NumberLong, as we get a number conversion error (even if it's passed as a string).
+testPipeline([{$sort: {x: -1}}, {$skip: 18446744073709552000}, {$limit: 6}],
+ {"$limit": [NumberLong(6)], "$skip": [NumberLong("9223372036854775807")]});
+testPipeline([{$sort: {x: -1}}, {$skip: 6}, {$limit: 18446744073709552000}],
+ {"$limit": [NumberLong("9223372036854775807")], "$skip": [NumberLong(6)]});
- // Case where overflow of limit + skip prevents limit stage from being absorbed. One of the
- // values == MAX_LONG, another one is 1.
- testPipeline([{$sort: {x: -1}}, {$skip: NumberLong("9223372036854775807")}, {$limit: 1}],
- {"$limit": [NumberLong(1)], "$skip": [NumberLong("9223372036854775807")]});
- testPipeline([{$sort: {x: -1}}, {$skip: 1}, {$limit: NumberLong("9223372036854775807")}],
- {"$limit": [NumberLong("9223372036854775807")], "$skip": [NumberLong(1)]});
+// Case where overflow of limit + skip prevents limit stage from being absorbed. One of the
+// values == MAX_LONG, another one is 1.
+testPipeline([{$sort: {x: -1}}, {$skip: NumberLong("9223372036854775807")}, {$limit: 1}],
+ {"$limit": [NumberLong(1)], "$skip": [NumberLong("9223372036854775807")]});
+testPipeline([{$sort: {x: -1}}, {$skip: 1}, {$limit: NumberLong("9223372036854775807")}],
+ {"$limit": [NumberLong("9223372036854775807")], "$skip": [NumberLong(1)]});
- // Case where limit + skip do not overflow. Limit == MAX_LONG and skip is 0. Should be able to
- // absorb the limit and skip stages.
- // Note that we cannot specify limit == 0, so we expect an error in this case.
- testPipeline([{$sort: {x: -1}}, {$skip: 0}, {$limit: NumberLong("9223372036854775807")}],
- {"$cursor.limit": [NumberLong("9223372036854775807")]},
- ["$skip", "$limit"]);
+// Case where limit + skip do not overflow. Limit == MAX_LONG and skip is 0. Should be able to
+// absorb the limit and skip stages.
+// Note that we cannot specify limit == 0, so we expect an error in this case.
+testPipeline([{$sort: {x: -1}}, {$skip: 0}, {$limit: NumberLong("9223372036854775807")}],
+ {"$cursor.limit": [NumberLong("9223372036854775807")]},
+ ["$skip", "$limit"]);
- // Case where limit + skip do not overflow. One value is MAX_LONG - 1 and another one is 1.
- // Should be able to absorb the limit stage.
- testPipeline([{$sort: {x: -1}}, {$skip: NumberLong("9223372036854775806")}, {$limit: 1}],
- {
- "$cursor.limit": [NumberLong("9223372036854775807")],
- "$skip": [NumberLong("9223372036854775806")]
- },
- ["$limit"]);
- testPipeline([{$sort: {x: -1}}, {$skip: 1}, {$limit: NumberLong("9223372036854775806")}],
- {"$cursor.limit": [NumberLong("9223372036854775807")], "$skip": [NumberLong(1)]},
- ["$limit"]);
+// Case where limit + skip do not overflow. One value is MAX_LONG - 1 and another one is 1.
+// Should be able to absorb the limit stage.
+testPipeline([{$sort: {x: -1}}, {$skip: NumberLong("9223372036854775806")}, {$limit: 1}],
+ {
+ "$cursor.limit": [NumberLong("9223372036854775807")],
+ "$skip": [NumberLong("9223372036854775806")]
+ },
+ ["$limit"]);
+testPipeline([{$sort: {x: -1}}, {$skip: 1}, {$limit: NumberLong("9223372036854775806")}],
+ {"$cursor.limit": [NumberLong("9223372036854775807")], "$skip": [NumberLong(1)]},
+ ["$limit"]);
- // Case where limit + skip do not overflow. Both values are < MAX_LONG.
- testPipeline([{$sort: {x: -1}}, {$skip: 674761616283}, {$limit: 35361718}],
- {"$cursor.limit": [NumberLong(674796978001)], "$skip": [NumberLong(674761616283)]},
- ["$limit"]);
- testPipeline([{$sort: {x: -1}}, {$skip: 35361718}, {$limit: 674761616283}],
- {"$cursor.limit": [NumberLong(674796978001)], "$skip": [NumberLong(35361718)]},
- ["$limit"]);
+// Case where limit + skip do not overflow. Both values are < MAX_LONG.
+testPipeline([{$sort: {x: -1}}, {$skip: 674761616283}, {$limit: 35361718}],
+ {"$cursor.limit": [NumberLong(674796978001)], "$skip": [NumberLong(674761616283)]},
+ ["$limit"]);
+testPipeline([{$sort: {x: -1}}, {$skip: 35361718}, {$limit: 674761616283}],
+ {"$cursor.limit": [NumberLong(674796978001)], "$skip": [NumberLong(35361718)]},
+ ["$limit"]);
- // Case where where overflow of limit + skip + skip prevents limit stage from being absorbed.
- // One skip == MAX_LONG - 1, another one is 1. Should merge two skip stages into one.
- testPipeline(
- [{$sort: {x: -1}}, {$skip: 1}, {$skip: NumberLong("9223372036854775806")}, {$limit: 1}],
- {"$limit": [NumberLong(1)], "$skip": [NumberLong("9223372036854775807")]});
+// Case where where overflow of limit + skip + skip prevents limit stage from being absorbed.
+// One skip == MAX_LONG - 1, another one is 1. Should merge two skip stages into one.
+testPipeline(
+ [{$sort: {x: -1}}, {$skip: 1}, {$skip: NumberLong("9223372036854775806")}, {$limit: 1}],
+ {"$limit": [NumberLong(1)], "$skip": [NumberLong("9223372036854775807")]});
- // Case where where overflow of limit + skip + skip prevents limit stage from being absorbed.
- // One skip == MAX_LONG, another one is 1. Should not absorb or merge any stages.
- testPipeline(
- [{$sort: {x: -1}}, {$skip: 1}, {$skip: NumberLong("9223372036854775807")}, {$limit: 1}],
- {"$limit": [NumberLong(1)], "$skip": [NumberLong(1), NumberLong("9223372036854775807")]});
+// Case where where overflow of limit + skip + skip prevents limit stage from being absorbed.
+// One skip == MAX_LONG, another one is 1. Should not absorb or merge any stages.
+testPipeline(
+ [{$sort: {x: -1}}, {$skip: 1}, {$skip: NumberLong("9223372036854775807")}, {$limit: 1}],
+ {"$limit": [NumberLong(1)], "$skip": [NumberLong(1), NumberLong("9223372036854775807")]});
- // Case where sample size is > MAX_LONG.
- testPipeline([{$sample: {size: 18446744073709552000}}],
- {"$sample.size": [NumberLong("9223372036854775807")]});
- // Case where sample size is == MAX_LONG.
- testPipeline([{$sample: {size: NumberLong("9223372036854775807")}}],
- {"$sample.size": [NumberLong("9223372036854775807")]});
- // Case where sample size is == MAX_LONG - 1.
- testPipeline([{$sample: {size: NumberLong("9223372036854775806")}}],
- {"$sample.size": [NumberLong("9223372036854775806")]});
+// Case where sample size is > MAX_LONG.
+testPipeline([{$sample: {size: 18446744073709552000}}],
+ {"$sample.size": [NumberLong("9223372036854775807")]});
+// Case where sample size is == MAX_LONG.
+testPipeline([{$sample: {size: NumberLong("9223372036854775807")}}],
+ {"$sample.size": [NumberLong("9223372036854775807")]});
+// Case where sample size is == MAX_LONG - 1.
+testPipeline([{$sample: {size: NumberLong("9223372036854775806")}}],
+ {"$sample.size": [NumberLong("9223372036854775806")]});
})();
diff --git a/jstests/aggregation/bugs/sort_arrays.js b/jstests/aggregation/bugs/sort_arrays.js
index 9fbb707decb..e83b4466cc6 100644
--- a/jstests/aggregation/bugs/sort_arrays.js
+++ b/jstests/aggregation/bugs/sort_arrays.js
@@ -1,17 +1,17 @@
// Tests that sorting by a field that contains an array will sort by the minimum element in that
// array.
(function() {
- "use strict";
+"use strict";
- const coll = db.foo;
- coll.drop();
- assert.writeOK(coll.insert([{_id: 2, a: [2, 3]}, {_id: 3, a: [2, 4]}, {_id: 4, a: [2, 1]}]));
- const expectedOrder = [{_id: 4, a: [2, 1]}, {_id: 2, a: [2, 3]}, {_id: 3, a: [2, 4]}];
+const coll = db.foo;
+coll.drop();
+assert.writeOK(coll.insert([{_id: 2, a: [2, 3]}, {_id: 3, a: [2, 4]}, {_id: 4, a: [2, 1]}]));
+const expectedOrder = [{_id: 4, a: [2, 1]}, {_id: 2, a: [2, 3]}, {_id: 3, a: [2, 4]}];
- assert.eq(coll.aggregate([{$sort: {a: 1, _id: 1}}]).toArray(), expectedOrder);
- assert.eq(coll.find().sort({a: 1, _id: 1}).toArray(), expectedOrder);
+assert.eq(coll.aggregate([{$sort: {a: 1, _id: 1}}]).toArray(), expectedOrder);
+assert.eq(coll.find().sort({a: 1, _id: 1}).toArray(), expectedOrder);
- assert.commandWorked(coll.ensureIndex({a: 1}));
- assert.eq(coll.aggregate([{$sort: {a: 1, _id: 1}}]).toArray(), expectedOrder);
- assert.eq(coll.find().sort({a: 1, _id: 1}).toArray(), expectedOrder);
+assert.commandWorked(coll.ensureIndex({a: 1}));
+assert.eq(coll.aggregate([{$sort: {a: 1, _id: 1}}]).toArray(), expectedOrder);
+assert.eq(coll.find().sort({a: 1, _id: 1}).toArray(), expectedOrder);
}());
diff --git a/jstests/aggregation/bugs/substr.js b/jstests/aggregation/bugs/substr.js
index 1090b09dffb..c4eaff7e137 100644
--- a/jstests/aggregation/bugs/substr.js
+++ b/jstests/aggregation/bugs/substr.js
@@ -122,8 +122,8 @@ assert.eq(
a: {
$substrBytes: [
{
- $substrBytes:
- [{$substrBytes: [{$substrBytes: ['abcdefghij', 1, 6]}, 2, 5]}, 0, 3]
+ $substrBytes:
+ [{$substrBytes: [{$substrBytes: ['abcdefghij', 1, 6]}, 2, 5]}, 0, 3]
},
1,
1
diff --git a/jstests/aggregation/explain.js b/jstests/aggregation/explain.js
index 3e446afe43a..9203ce83d46 100644
--- a/jstests/aggregation/explain.js
+++ b/jstests/aggregation/explain.js
@@ -1,28 +1,27 @@
// Tests the behavior of explain() when used with the aggregation
// pipeline. Explain() should not read or modify the plan cache.
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/analyze_plan.js'); // For getAggPlanStage().
+load('jstests/libs/analyze_plan.js'); // For getAggPlanStage().
- let coll = db.explain;
- coll.drop();
+let coll = db.explain;
+coll.drop();
- assert.commandWorked(coll.createIndex({x: 1}));
- assert.commandWorked(coll.createIndex({y: 1}));
+assert.commandWorked(coll.createIndex({x: 1}));
+assert.commandWorked(coll.createIndex({y: 1}));
- let result = coll.explain().aggregate([{$match: {x: 1, y: 1}}]);
- assert.eq(null, getAggPlanStage(result, "CACHED_PLAN"));
+let result = coll.explain().aggregate([{$match: {x: 1, y: 1}}]);
+assert.eq(null, getAggPlanStage(result, "CACHED_PLAN"));
- // At this point, there should be no entries in the plan cache.
- result = coll.explain().aggregate([{$match: {x: 1, y: 1}}]);
- assert.eq(null, getAggPlanStage(result, "CACHED_PLAN"));
+// At this point, there should be no entries in the plan cache.
+result = coll.explain().aggregate([{$match: {x: 1, y: 1}}]);
+assert.eq(null, getAggPlanStage(result, "CACHED_PLAN"));
- // Now add entry in the cache without explain().
- result = coll.aggregate([{$match: {x: 1, y: 1}}]);
-
- // Now there's an entry in the cache, make sure explain() doesn't use it.
- result = coll.explain().aggregate([{$match: {x: 1, y: 1}}]);
- assert.eq(null, getAggPlanStage(result, "CACHED_PLAN"));
+// Now add entry in the cache without explain().
+result = coll.aggregate([{$match: {x: 1, y: 1}}]);
+// Now there's an entry in the cache, make sure explain() doesn't use it.
+result = coll.explain().aggregate([{$match: {x: 1, y: 1}}]);
+assert.eq(null, getAggPlanStage(result, "CACHED_PLAN"));
})();
diff --git a/jstests/aggregation/explain_limit.js b/jstests/aggregation/explain_limit.js
index e3451dc0c7c..a0dabdc1b02 100644
--- a/jstests/aggregation/explain_limit.js
+++ b/jstests/aggregation/explain_limit.js
@@ -1,80 +1,79 @@
// Tests the behavior of explain() when used with the aggregation pipeline and limits.
// @tags: [do_not_wrap_aggregations_in_facets]
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js"); // For getAggPlanStages().
-
- let coll = db.explain_limit;
-
- const kMultipleSolutionLimit = 101;
- const kCollSize = kMultipleSolutionLimit + 5;
- const kLimit = 10;
-
- // Return whether or explain() was successful and contained the appropriate fields given the
- // requested verbosity. Checks that the number of documents examined is correct based on
- // 'multipleSolutions', which indicates there was more than one plan available.
- function checkResults({results, verbosity, multipleSolutions}) {
- let cursorSubdocs = getAggPlanStages(results, "$cursor");
- assert.gt(cursorSubdocs.length, 0);
- for (let stageResult of cursorSubdocs) {
- assert(stageResult.hasOwnProperty("$cursor"));
- let result = stageResult.$cursor;
-
- assert.eq(result.limit, NumberLong(kLimit), tojson(results));
-
- if (verbosity === "queryPlanner") {
- assert(!result.hasOwnProperty("executionStats"), tojson(results));
+"use strict";
+
+load("jstests/libs/analyze_plan.js"); // For getAggPlanStages().
+
+let coll = db.explain_limit;
+
+const kMultipleSolutionLimit = 101;
+const kCollSize = kMultipleSolutionLimit + 5;
+const kLimit = 10;
+
+// Return whether or explain() was successful and contained the appropriate fields given the
+// requested verbosity. Checks that the number of documents examined is correct based on
+// 'multipleSolutions', which indicates there was more than one plan available.
+function checkResults({results, verbosity, multipleSolutions}) {
+ let cursorSubdocs = getAggPlanStages(results, "$cursor");
+ assert.gt(cursorSubdocs.length, 0);
+ for (let stageResult of cursorSubdocs) {
+ assert(stageResult.hasOwnProperty("$cursor"));
+ let result = stageResult.$cursor;
+
+ assert.eq(result.limit, NumberLong(kLimit), tojson(results));
+
+ if (verbosity === "queryPlanner") {
+ assert(!result.hasOwnProperty("executionStats"), tojson(results));
+ } else {
+ // If it's "executionStats" or "allPlansExecution".
+ if (multipleSolutions) {
+ // If there's more than one plan available, we may run several of them against
+ // each other to see which is fastest. During this, our limit may be ignored
+ // and so explain may return that it examined more documents than we asked it
+ // to.
+ assert.lte(
+ result.executionStats.nReturned, kMultipleSolutionLimit, tojson(results));
+ assert.lte(result.executionStats.totalDocsExamined,
+ kMultipleSolutionLimit,
+ tojson(results));
} else {
- // If it's "executionStats" or "allPlansExecution".
- if (multipleSolutions) {
- // If there's more than one plan available, we may run several of them against
- // each other to see which is fastest. During this, our limit may be ignored
- // and so explain may return that it examined more documents than we asked it
- // to.
- assert.lte(
- result.executionStats.nReturned, kMultipleSolutionLimit, tojson(results));
- assert.lte(result.executionStats.totalDocsExamined,
- kMultipleSolutionLimit,
- tojson(results));
- } else {
- assert.eq(result.executionStats.nReturned, kLimit, tojson(results));
- assert.eq(result.executionStats.totalDocsExamined, kLimit, tojson(results));
- }
+ assert.eq(result.executionStats.nReturned, kLimit, tojson(results));
+ assert.eq(result.executionStats.totalDocsExamined, kLimit, tojson(results));
}
}
}
+}
- // explain() should respect limit.
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
+// explain() should respect limit.
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
- for (let i = 0; i < kCollSize; i++) {
- assert.writeOK(coll.insert({a: 1}));
- }
+for (let i = 0; i < kCollSize; i++) {
+ assert.writeOK(coll.insert({a: 1}));
+}
- const pipeline = [{$match: {a: 1}}, {$limit: kLimit}];
+const pipeline = [{$match: {a: 1}}, {$limit: kLimit}];
- let plannerLevel = coll.explain("queryPlanner").aggregate(pipeline);
- checkResults({results: plannerLevel, verbosity: "queryPlanner"});
+let plannerLevel = coll.explain("queryPlanner").aggregate(pipeline);
+checkResults({results: plannerLevel, verbosity: "queryPlanner"});
- let execLevel = coll.explain("executionStats").aggregate(pipeline);
- checkResults({results: execLevel, verbosity: "executionStats", multipleSolutions: false});
+let execLevel = coll.explain("executionStats").aggregate(pipeline);
+checkResults({results: execLevel, verbosity: "executionStats", multipleSolutions: false});
- let allPlansExecLevel = coll.explain("allPlansExecution").aggregate(pipeline);
- checkResults(
- {results: allPlansExecLevel, verbosity: "allPlansExecution", multipleSolutions: false});
+let allPlansExecLevel = coll.explain("allPlansExecution").aggregate(pipeline);
+checkResults(
+ {results: allPlansExecLevel, verbosity: "allPlansExecution", multipleSolutions: false});
- // Create a second index so that more than one plan is available.
- assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+// Create a second index so that more than one plan is available.
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
- plannerLevel = coll.explain("queryPlanner").aggregate(pipeline);
- checkResults({results: plannerLevel, verbosity: "queryPlanner"});
+plannerLevel = coll.explain("queryPlanner").aggregate(pipeline);
+checkResults({results: plannerLevel, verbosity: "queryPlanner"});
- execLevel = coll.explain("executionStats").aggregate(pipeline);
- checkResults({results: execLevel, verbosity: "executionStats", multipleSolutions: true});
+execLevel = coll.explain("executionStats").aggregate(pipeline);
+checkResults({results: execLevel, verbosity: "executionStats", multipleSolutions: true});
- allPlansExecLevel = coll.explain("allPlansExecution").aggregate(pipeline);
- checkResults(
- {results: allPlansExecLevel, verbosity: "allPlansExecution", multipleSolutions: true});
+allPlansExecLevel = coll.explain("allPlansExecution").aggregate(pipeline);
+checkResults({results: allPlansExecLevel, verbosity: "allPlansExecution", multipleSolutions: true});
})();
diff --git a/jstests/aggregation/explain_writing_aggs.js b/jstests/aggregation/explain_writing_aggs.js
index 8cf58ba0040..412060bcfa6 100644
--- a/jstests/aggregation/explain_writing_aggs.js
+++ b/jstests/aggregation/explain_writing_aggs.js
@@ -6,90 +6,90 @@
* @tags: [assumes_unsharded_collection, assumes_write_concern_unchanged]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos().
- load("jstests/libs/analyze_plan.js"); // For getAggPlanStage().
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode().
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos().
+load("jstests/libs/analyze_plan.js"); // For getAggPlanStage().
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode().
- let sourceColl = db.explain_writing_aggs_source;
- let targetColl = db.explain_writing_aggs_target;
- sourceColl.drop();
- targetColl.drop();
+let sourceColl = db.explain_writing_aggs_source;
+let targetColl = db.explain_writing_aggs_target;
+sourceColl.drop();
+targetColl.drop();
- assert.writeOK(sourceColl.insert({_id: 1}));
+assert.writeOK(sourceColl.insert({_id: 1}));
- // Test that $out can be explained with 'queryPlanner' explain verbosity and does not perform
- // any writes.
- let explain = sourceColl.explain("queryPlanner").aggregate([{$out: targetColl.getName()}]);
- let outExplain = getAggPlanStage(explain, "$out");
- assert.neq(outExplain, null, explain);
- assert.eq(outExplain.$out, targetColl.getName(), explain);
- assert.eq(targetColl.find().itcount(), 0, explain);
+// Test that $out can be explained with 'queryPlanner' explain verbosity and does not perform
+// any writes.
+let explain = sourceColl.explain("queryPlanner").aggregate([{$out: targetColl.getName()}]);
+let outExplain = getAggPlanStage(explain, "$out");
+assert.neq(outExplain, null, explain);
+assert.eq(outExplain.$out, targetColl.getName(), explain);
+assert.eq(targetColl.find().itcount(), 0, explain);
- // Test each $merge mode with 'queryPlanner' explain verbosity.
- withEachMergeMode(function({whenMatchedMode, whenNotMatchedMode}) {
- const mergeStage = {
- $merge: {
- into: targetColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- };
- const explain = sourceColl.explain("queryPlanner").aggregate([mergeStage]);
- const mergeExplain = getAggPlanStage(explain, "$merge");
- assert.neq(mergeExplain, null, explain);
- assert(mergeExplain.hasOwnProperty("$merge"), explain);
- assert.eq(mergeExplain.$merge.whenMatched, whenMatchedMode, mergeExplain);
- assert.eq(mergeExplain.$merge.whenNotMatched, whenNotMatchedMode, mergeExplain);
- assert.eq(mergeExplain.$merge.on, "_id", mergeExplain);
- assert.eq(targetColl.find().itcount(), 0, explain);
- });
+// Test each $merge mode with 'queryPlanner' explain verbosity.
+withEachMergeMode(function({whenMatchedMode, whenNotMatchedMode}) {
+ const mergeStage = {
+ $merge: {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ };
+ const explain = sourceColl.explain("queryPlanner").aggregate([mergeStage]);
+ const mergeExplain = getAggPlanStage(explain, "$merge");
+ assert.neq(mergeExplain, null, explain);
+ assert(mergeExplain.hasOwnProperty("$merge"), explain);
+ assert.eq(mergeExplain.$merge.whenMatched, whenMatchedMode, mergeExplain);
+ assert.eq(mergeExplain.$merge.whenNotMatched, whenNotMatchedMode, mergeExplain);
+ assert.eq(mergeExplain.$merge.on, "_id", mergeExplain);
+ assert.eq(targetColl.find().itcount(), 0, explain);
+});
- function assertExecutionExplainFails(writingStage, verbosity) {
- assert.commandFailedWithCode(db.runCommand({
- explain: {aggregate: sourceColl.getName(), pipeline: [writingStage], cursor: {}},
- verbosity: verbosity
- }),
- [51029, 51184]);
- assert.eq(targetColl.find().itcount(), 0);
- }
+function assertExecutionExplainFails(writingStage, verbosity) {
+ assert.commandFailedWithCode(db.runCommand({
+ explain: {aggregate: sourceColl.getName(), pipeline: [writingStage], cursor: {}},
+ verbosity: verbosity
+ }),
+ [51029, 51184]);
+ assert.eq(targetColl.find().itcount(), 0);
+}
- // Test that 'executionStats' and 'allPlansExec' level explain fail with each $merge mode. These
- // explain modes must fail, since they would attempt to do writes. Explain must always be
- // read-only (including explain of update and delete, which describe what writes they _would_ do
- // if exected for real).
- withEachMergeMode(function({whenMatchedMode, whenNotMatchedMode}) {
- const mergeStage = {
- $merge: {
- into: targetColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- };
- assertExecutionExplainFails(mergeStage, "executionStats");
- assertExecutionExplainFails(mergeStage, "allPlansExecution");
- });
+// Test that 'executionStats' and 'allPlansExec' level explain fail with each $merge mode. These
+// explain modes must fail, since they would attempt to do writes. Explain must always be
+// read-only (including explain of update and delete, which describe what writes they _would_ do
+// if exected for real).
+withEachMergeMode(function({whenMatchedMode, whenNotMatchedMode}) {
+ const mergeStage = {
+ $merge: {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ };
+ assertExecutionExplainFails(mergeStage, "executionStats");
+ assertExecutionExplainFails(mergeStage, "allPlansExecution");
+});
- // Also test the $out stage since it also performs writes.
- assertExecutionExplainFails({$out: targetColl.getName()}, "executionStats");
- assertExecutionExplainFails({$out: targetColl.getName()}, "allPlansExecution");
+// Also test the $out stage since it also performs writes.
+assertExecutionExplainFails({$out: targetColl.getName()}, "executionStats");
+assertExecutionExplainFails({$out: targetColl.getName()}, "allPlansExecution");
- // Execution explain should fail even if the source collection does not exist.
- sourceColl.drop();
- withEachMergeMode(function({whenMatchedMode, whenNotMatchedMode}) {
- const mergeStage = {
- $merge: {
- into: targetColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- };
- assertExecutionExplainFails(mergeStage, "executionStats");
- assertExecutionExplainFails(mergeStage, "allPlansExecution");
- });
+// Execution explain should fail even if the source collection does not exist.
+sourceColl.drop();
+withEachMergeMode(function({whenMatchedMode, whenNotMatchedMode}) {
+ const mergeStage = {
+ $merge: {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ };
+ assertExecutionExplainFails(mergeStage, "executionStats");
+ assertExecutionExplainFails(mergeStage, "allPlansExecution");
+});
- // Also test the $out stage since it also performs writes.
- assertExecutionExplainFails({$out: targetColl.getName()}, "executionStats");
- assertExecutionExplainFails({$out: targetColl.getName()}, "allPlansExecution");
+// Also test the $out stage since it also performs writes.
+assertExecutionExplainFails({$out: targetColl.getName()}, "executionStats");
+assertExecutionExplainFails({$out: targetColl.getName()}, "allPlansExecution");
}());
diff --git a/jstests/aggregation/expressions/arrayToObject.js b/jstests/aggregation/expressions/arrayToObject.js
index 114d69b7aaa..df78b9f1aaf 100644
--- a/jstests/aggregation/expressions/arrayToObject.js
+++ b/jstests/aggregation/expressions/arrayToObject.js
@@ -1,77 +1,75 @@
// Tests for $arrayToObject aggregation expression.
(function() {
- "use strict";
+"use strict";
- // For assertErrorCode().
- load("jstests/aggregation/extras/utils.js");
+// For assertErrorCode().
+load("jstests/aggregation/extras/utils.js");
- let coll = db.array_to_object_expr;
- coll.drop();
+let coll = db.array_to_object_expr;
+coll.drop();
- // Write one document so that the aggregations which use $const produce a result.
- assert.writeOK(coll.insert({_id: "sentinel", a: 1}));
+// Write one document so that the aggregations which use $const produce a result.
+assert.writeOK(coll.insert({_id: "sentinel", a: 1}));
- /*
- * Check that the collapsed, object form of 'expanded' (which is computed using $arrayToObject)
- * matches our expectation.
- */
- function assertCollapsed(expanded, expectedCollapsed) {
- const result =
- coll.aggregate(
- [{$project: {collapsed: {$arrayToObject: {$const: expanded}}}}, {$limit: 1}])
- .toArray();
- assert.eq(result, [{_id: "sentinel", collapsed: expectedCollapsed}]);
- }
+/*
+ * Check that the collapsed, object form of 'expanded' (which is computed using $arrayToObject)
+ * matches our expectation.
+ */
+function assertCollapsed(expanded, expectedCollapsed) {
+ const result =
+ coll.aggregate([{$project: {collapsed: {$arrayToObject: {$const: expanded}}}}, {$limit: 1}])
+ .toArray();
+ assert.eq(result, [{_id: "sentinel", collapsed: expectedCollapsed}]);
+}
- /*
- * Check that $arrayToObject on the given value produces the expected error.
- */
- function assertPipelineErrors(expanded, errorCode) {
- assertErrorCode(
- coll,
- [{$project: {collapsed: {$arrayToObject: {$const: expanded}}}}, {$limit: 1}],
- errorCode);
- }
+/*
+ * Check that $arrayToObject on the given value produces the expected error.
+ */
+function assertPipelineErrors(expanded, errorCode) {
+ assertErrorCode(coll,
+ [{$project: {collapsed: {$arrayToObject: {$const: expanded}}}}, {$limit: 1}],
+ errorCode);
+}
- // $arrayToObject correctly converts a key-value pairs to an object.
- assertCollapsed([["price", 24], ["item", "apple"]], {"price": 24, "item": "apple"});
- assertCollapsed([{"k": "price", "v": 24}, {"k": "item", "v": "apple"}],
- {"price": 24, "item": "apple"});
- // If duplicate field names are in the array, $arrayToObject should use value from the last one.
- assertCollapsed([{"k": "price", "v": 24}, {"k": "price", "v": 100}], {"price": 100});
- assertCollapsed([["price", 24], ["price", 100]], {"price": 100});
+// $arrayToObject correctly converts a key-value pairs to an object.
+assertCollapsed([["price", 24], ["item", "apple"]], {"price": 24, "item": "apple"});
+assertCollapsed([{"k": "price", "v": 24}, {"k": "item", "v": "apple"}],
+ {"price": 24, "item": "apple"});
+// If duplicate field names are in the array, $arrayToObject should use value from the last one.
+assertCollapsed([{"k": "price", "v": 24}, {"k": "price", "v": 100}], {"price": 100});
+assertCollapsed([["price", 24], ["price", 100]], {"price": 100});
- assertCollapsed([["price", 24], ["item", "apple"]], {"price": 24, "item": "apple"});
- assertCollapsed([], {});
+assertCollapsed([["price", 24], ["item", "apple"]], {"price": 24, "item": "apple"});
+assertCollapsed([], {});
- assertCollapsed(null, null);
- assertCollapsed(undefined, null);
- assertCollapsed([{"k": "price", "v": null}], {"price": null});
- assertCollapsed([{"k": "price", "v": undefined}], {"price": undefined});
- // Need to manually check the case where 'expanded' is not in the document.
- assert.commandWorked(coll.insert({_id: "missing-expanded-field"}));
- const result = coll.aggregate([
- {$match: {_id: "missing-expanded-field"}},
- {$project: {collapsed: {$arrayToObject: "$expanded"}}}
- ])
- .toArray();
- assert.eq(result, [{_id: "missing-expanded-field", collapsed: null}]);
+assertCollapsed(null, null);
+assertCollapsed(undefined, null);
+assertCollapsed([{"k": "price", "v": null}], {"price": null});
+assertCollapsed([{"k": "price", "v": undefined}], {"price": undefined});
+// Need to manually check the case where 'expanded' is not in the document.
+assert.commandWorked(coll.insert({_id: "missing-expanded-field"}));
+const result = coll.aggregate([
+ {$match: {_id: "missing-expanded-field"}},
+ {$project: {collapsed: {$arrayToObject: "$expanded"}}}
+ ])
+ .toArray();
+assert.eq(result, [{_id: "missing-expanded-field", collapsed: null}]);
- assertPipelineErrors([{"k": "price", "v": 24}, ["item", "apple"]], 40391);
- assertPipelineErrors([["item", "apple"], {"k": "price", "v": 24}], 40396);
- assertPipelineErrors("string", 40386);
- assertPipelineErrors(ObjectId(), 40386);
- assertPipelineErrors(NumberLong(0), 40386);
- assertPipelineErrors([0], 40398);
- assertPipelineErrors([["missing_value"]], 40397);
- assertPipelineErrors([[321, 12]], 40395);
- assertPipelineErrors([["key", "value", "offset"]], 40397);
- assertPipelineErrors({y: []}, 40386);
- assertPipelineErrors([{y: "x", x: "y"}], 40393);
- assertPipelineErrors([{k: "missing"}], 40392);
- assertPipelineErrors([{k: 24, v: "string"}], 40394);
- assertPipelineErrors([{k: null, v: "nullKey"}], 40394);
- assertPipelineErrors([{k: undefined, v: "undefinedKey"}], 40394);
- assertPipelineErrors([{y: "ignored", k: "item", v: "pear"}], 40392);
- assertPipelineErrors(NaN, 40386);
+assertPipelineErrors([{"k": "price", "v": 24}, ["item", "apple"]], 40391);
+assertPipelineErrors([["item", "apple"], {"k": "price", "v": 24}], 40396);
+assertPipelineErrors("string", 40386);
+assertPipelineErrors(ObjectId(), 40386);
+assertPipelineErrors(NumberLong(0), 40386);
+assertPipelineErrors([0], 40398);
+assertPipelineErrors([["missing_value"]], 40397);
+assertPipelineErrors([[321, 12]], 40395);
+assertPipelineErrors([["key", "value", "offset"]], 40397);
+assertPipelineErrors({y: []}, 40386);
+assertPipelineErrors([{y: "x", x: "y"}], 40393);
+assertPipelineErrors([{k: "missing"}], 40392);
+assertPipelineErrors([{k: 24, v: "string"}], 40394);
+assertPipelineErrors([{k: null, v: "nullKey"}], 40394);
+assertPipelineErrors([{k: undefined, v: "undefinedKey"}], 40394);
+assertPipelineErrors([{y: "ignored", k: "item", v: "pear"}], 40392);
+assertPipelineErrors(NaN, 40386);
}());
diff --git a/jstests/aggregation/expressions/collation_expressions.js b/jstests/aggregation/expressions/collation_expressions.js
index 93f2ada0197..dc959791f2f 100644
--- a/jstests/aggregation/expressions/collation_expressions.js
+++ b/jstests/aggregation/expressions/collation_expressions.js
@@ -3,133 +3,135 @@
// Test that expressions which make can make string comparisons respect the collation.
(function() {
- "use strict";
+"use strict";
- // For testExpression() and testExpressionWithCollation().
- load("jstests/aggregation/extras/utils.js");
-
- var coll = db.collation_expressions;
- coll.drop();
-
- var results;
- const caseInsensitive = {locale: "en_US", strength: 2};
- const numericOrdering = {locale: "en_US", numericOrdering: true};
-
- // Test that $cmp respects the collection-default collation.
- assert.commandWorked(db.createCollection(coll.getName(), {collation: caseInsensitive}));
- testExpression(coll, {$cmp: ["a", "A"]}, 0);
-
- coll.drop();
-
- // Test that $cmp respects the collation.
- testExpressionWithCollation(coll, {$cmp: ["a", "A"]}, 0, caseInsensitive);
-
- // Test that $eq respects the collation.
- testExpressionWithCollation(coll, {$eq: ["a", "A"]}, true, caseInsensitive);
-
- // Test that $ne respects the collation.
- testExpressionWithCollation(coll, {$ne: ["a", "A"]}, false, caseInsensitive);
-
- // Test that $lt respects the collation.
- testExpressionWithCollation(coll, {$lt: ["2", "10"]}, true, numericOrdering);
-
- // Test that $lte respects the collation.
- testExpressionWithCollation(coll, {$lte: ["2", "10"]}, true, numericOrdering);
- testExpressionWithCollation(coll, {$lte: ["b", "B"]}, true, caseInsensitive);
-
- // Test that $gt respects the collation.
- testExpressionWithCollation(coll, {$gt: ["2", "10"]}, false, numericOrdering);
-
- // Test that $gte respects the collation.
- testExpressionWithCollation(coll, {$gte: ["2", "10"]}, false, numericOrdering);
- testExpressionWithCollation(coll, {$gte: ["b", "B"]}, true, caseInsensitive);
-
- // Test that $in respects the collation.
- testExpressionWithCollation(coll, {$in: ["A", [1, 2, "a", 3, 4]]}, true, caseInsensitive);
-
- // Test that $indexOfArray respects the collation.
- testExpressionWithCollation(
- coll, {$indexOfArray: [[1, 2, "a", "b", "c", "B"], "B"]}, 3, caseInsensitive);
-
- // Test that $indexOfBytes doesn't respect the collation.
- testExpressionWithCollation(coll, {$indexOfBytes: ["12abcB", "B"]}, 5, caseInsensitive);
-
- // Test that $indexOfCP doesn't respect the collation.
- testExpressionWithCollation(coll, {$indexOfCP: ["12abcB", "B"]}, 5, caseInsensitive);
-
- // Test that $strcasecmp doesn't respect the collation.
- testExpressionWithCollation(coll, {$strcasecmp: ["100", "2"]}, -1, numericOrdering);
-
- // Test that $setEquals respects the collation.
- testExpressionWithCollation(
- coll, {$setEquals: [["a", "B"], ["b", "A"]]}, true, caseInsensitive);
-
- // Test that $setIntersection respects the collation.
- results =
- coll.aggregate([{$project: {out: {$setIntersection: [["a", "B", "c"], ["d", "b", "A"]]}}}],
- {collation: caseInsensitive})
- .toArray();
- assert.eq(1, results.length);
- assert.eq(2, results[0].out.length);
-
- // Test that $setUnion respects the collation.
- results = coll.aggregate([{$project: {out: {$setUnion: [["a", "B", "c"], ["d", "b", "A"]]}}}],
- {collation: caseInsensitive})
- .toArray();
- assert.eq(1, results.length);
- assert.eq(4, results[0].out.length);
-
- // Test that $setDifference respects the collation.
- testExpressionWithCollation(
- coll, {$setDifference: [["a", "B"], ["b", "A"]]}, [], caseInsensitive);
-
- // Test that $setIsSubset respects the collation.
- testExpressionWithCollation(
- coll, {$setIsSubset: [["a", "B"], ["b", "A", "c"]]}, true, caseInsensitive);
-
- // Test that $split doesn't respect the collation.
- testExpressionWithCollation(coll, {$split: ["abc", "B"]}, ["abc"], caseInsensitive);
-
- // Test that an $and which can be optimized out respects the collation.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "A"}));
- results = coll.aggregate([{$project: {out: {$and: [{$eq: ["$str", "a"]}, {$eq: ["b", "B"]}]}}}],
- {collation: caseInsensitive})
- .toArray();
- assert.eq(1, results.length);
- assert.eq(true, results[0].out);
-
- // Test that an $and which cannot be optimized out respects the collation.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "A", str2: "B"}));
- results =
- coll.aggregate([{$project: {out: {$and: [{$eq: ["$str", "a"]}, {$eq: ["$str2", "b"]}]}}}],
- {collation: caseInsensitive})
- .toArray();
- assert.eq(1, results.length);
- assert.eq(true, results[0].out);
-
- // Test that an $or which can be optimized out respects the collation.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "A"}));
- results = coll.aggregate([{$project: {out: {$or: [{$eq: ["$str", "a"]}, {$eq: ["b", "c"]}]}}}],
- {collation: caseInsensitive})
- .toArray();
- assert.eq(1, results.length);
- assert.eq(true, results[0].out);
-
- // Test that an $or which cannot be optimized out respects the collation.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "A", str2: "B"}));
- results =
- coll.aggregate([{$project: {out: {$or: [{$eq: ["$str", "c"]}, {$eq: ["$str2", "b"]}]}}}],
- {collation: caseInsensitive})
- .toArray();
- assert.eq(1, results.length);
- assert.eq(true, results[0].out);
-
- // Test that $filter's subexpressions respect the collation.
- testExpressionWithCollation(coll,
+// For testExpression() and testExpressionWithCollation().
+load("jstests/aggregation/extras/utils.js");
+
+var coll = db.collation_expressions;
+coll.drop();
+
+var results;
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
+const numericOrdering = {
+ locale: "en_US",
+ numericOrdering: true
+};
+
+// Test that $cmp respects the collection-default collation.
+assert.commandWorked(db.createCollection(coll.getName(), {collation: caseInsensitive}));
+testExpression(coll, {$cmp: ["a", "A"]}, 0);
+
+coll.drop();
+
+// Test that $cmp respects the collation.
+testExpressionWithCollation(coll, {$cmp: ["a", "A"]}, 0, caseInsensitive);
+
+// Test that $eq respects the collation.
+testExpressionWithCollation(coll, {$eq: ["a", "A"]}, true, caseInsensitive);
+
+// Test that $ne respects the collation.
+testExpressionWithCollation(coll, {$ne: ["a", "A"]}, false, caseInsensitive);
+
+// Test that $lt respects the collation.
+testExpressionWithCollation(coll, {$lt: ["2", "10"]}, true, numericOrdering);
+
+// Test that $lte respects the collation.
+testExpressionWithCollation(coll, {$lte: ["2", "10"]}, true, numericOrdering);
+testExpressionWithCollation(coll, {$lte: ["b", "B"]}, true, caseInsensitive);
+
+// Test that $gt respects the collation.
+testExpressionWithCollation(coll, {$gt: ["2", "10"]}, false, numericOrdering);
+
+// Test that $gte respects the collation.
+testExpressionWithCollation(coll, {$gte: ["2", "10"]}, false, numericOrdering);
+testExpressionWithCollation(coll, {$gte: ["b", "B"]}, true, caseInsensitive);
+
+// Test that $in respects the collation.
+testExpressionWithCollation(coll, {$in: ["A", [1, 2, "a", 3, 4]]}, true, caseInsensitive);
+
+// Test that $indexOfArray respects the collation.
+testExpressionWithCollation(
+ coll, {$indexOfArray: [[1, 2, "a", "b", "c", "B"], "B"]}, 3, caseInsensitive);
+
+// Test that $indexOfBytes doesn't respect the collation.
+testExpressionWithCollation(coll, {$indexOfBytes: ["12abcB", "B"]}, 5, caseInsensitive);
+
+// Test that $indexOfCP doesn't respect the collation.
+testExpressionWithCollation(coll, {$indexOfCP: ["12abcB", "B"]}, 5, caseInsensitive);
+
+// Test that $strcasecmp doesn't respect the collation.
+testExpressionWithCollation(coll, {$strcasecmp: ["100", "2"]}, -1, numericOrdering);
+
+// Test that $setEquals respects the collation.
+testExpressionWithCollation(coll, {$setEquals: [["a", "B"], ["b", "A"]]}, true, caseInsensitive);
+
+// Test that $setIntersection respects the collation.
+results =
+ coll.aggregate([{$project: {out: {$setIntersection: [["a", "B", "c"], ["d", "b", "A"]]}}}],
+ {collation: caseInsensitive})
+ .toArray();
+assert.eq(1, results.length);
+assert.eq(2, results[0].out.length);
+
+// Test that $setUnion respects the collation.
+results = coll.aggregate([{$project: {out: {$setUnion: [["a", "B", "c"], ["d", "b", "A"]]}}}],
+ {collation: caseInsensitive})
+ .toArray();
+assert.eq(1, results.length);
+assert.eq(4, results[0].out.length);
+
+// Test that $setDifference respects the collation.
+testExpressionWithCollation(coll, {$setDifference: [["a", "B"], ["b", "A"]]}, [], caseInsensitive);
+
+// Test that $setIsSubset respects the collation.
+testExpressionWithCollation(
+ coll, {$setIsSubset: [["a", "B"], ["b", "A", "c"]]}, true, caseInsensitive);
+
+// Test that $split doesn't respect the collation.
+testExpressionWithCollation(coll, {$split: ["abc", "B"]}, ["abc"], caseInsensitive);
+
+// Test that an $and which can be optimized out respects the collation.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "A"}));
+results = coll.aggregate([{$project: {out: {$and: [{$eq: ["$str", "a"]}, {$eq: ["b", "B"]}]}}}],
+ {collation: caseInsensitive})
+ .toArray();
+assert.eq(1, results.length);
+assert.eq(true, results[0].out);
+
+// Test that an $and which cannot be optimized out respects the collation.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "A", str2: "B"}));
+results = coll.aggregate([{$project: {out: {$and: [{$eq: ["$str", "a"]}, {$eq: ["$str2", "b"]}]}}}],
+ {collation: caseInsensitive})
+ .toArray();
+assert.eq(1, results.length);
+assert.eq(true, results[0].out);
+
+// Test that an $or which can be optimized out respects the collation.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "A"}));
+results = coll.aggregate([{$project: {out: {$or: [{$eq: ["$str", "a"]}, {$eq: ["b", "c"]}]}}}],
+ {collation: caseInsensitive})
+ .toArray();
+assert.eq(1, results.length);
+assert.eq(true, results[0].out);
+
+// Test that an $or which cannot be optimized out respects the collation.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "A", str2: "B"}));
+results = coll.aggregate([{$project: {out: {$or: [{$eq: ["$str", "c"]}, {$eq: ["$str2", "b"]}]}}}],
+ {collation: caseInsensitive})
+ .toArray();
+assert.eq(1, results.length);
+assert.eq(true, results[0].out);
+
+// Test that $filter's subexpressions respect the collation.
+testExpressionWithCollation(coll,
{
$filter: {
input: {
@@ -146,8 +148,8 @@
["a", "A", "c", "C"],
caseInsensitive);
- // Test that $let's subexpressions respect the collation.
- testExpressionWithCollation(coll,
+// Test that $let's subexpressions respect the collation.
+testExpressionWithCollation(coll,
{
$let: {
vars: {str: {$cond: [{$eq: ["A", "a"]}, "b", "c"]}},
@@ -157,8 +159,8 @@
"d",
caseInsensitive);
- // Test that $map's subexpressions respect the collation.
- testExpressionWithCollation(
+// Test that $map's subexpressions respect the collation.
+testExpressionWithCollation(
coll,
{
$map: {
@@ -170,18 +172,18 @@
[true, false, true, false],
caseInsensitive);
- // Test that $group stage's _id expressions respect the collation.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1}));
- results = coll.aggregate([{$group: {_id: {a: {$eq: ["a", "A"]}, b: {$eq: ["b", "B"]}}}}],
- {collation: caseInsensitive})
- .toArray();
- assert.eq(1, results.length);
- assert.eq(true, results[0]._id.a);
- assert.eq(true, results[0]._id.b);
-
- // Test that $reduce's subexpressions respect the collation.
- testExpressionWithCollation(
+// Test that $group stage's _id expressions respect the collation.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1}));
+results = coll.aggregate([{$group: {_id: {a: {$eq: ["a", "A"]}, b: {$eq: ["b", "B"]}}}}],
+ {collation: caseInsensitive})
+ .toArray();
+assert.eq(1, results.length);
+assert.eq(true, results[0]._id.a);
+assert.eq(true, results[0]._id.b);
+
+// Test that $reduce's subexpressions respect the collation.
+testExpressionWithCollation(
coll,
{
$reduce: {
@@ -195,50 +197,50 @@
{sum: 7},
caseInsensitive);
- // Test that $switch's subexpressions respect the collation.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, a: "A"}));
- assert.writeOK(coll.insert({_id: 2, b: "B"}));
- assert.writeOK(coll.insert({_id: 3, c: "C"}));
- results = coll.aggregate([{
- $project: {
- out: {
- $switch: {
- branches: [
- {case: {$eq: ["$a", "a"]}, then: "foo"},
- {case: {$eq: ["$b", "b"]}, then: "bar"}
- ],
- default: "baz"
- }
- }
- }
- }],
- {collation: caseInsensitive})
- .toArray();
- assert.eq(3, results.length);
- assert.eq("foo", results[0].out);
- assert.eq("bar", results[1].out);
- assert.eq("baz", results[2].out);
-
- // Test that a $zip's subexpressions respect the collation.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, evens: [0, 2, 4], odds: [1, 3]}));
- results = coll.aggregate([{
- $project: {
- out: {
- $zip: {
- inputs: [
- {$cond: [{$eq: ["A", "a"]}, "$evens", "$odds"]},
- {$cond: [{$eq: ["B", "b"]}, "$odds", "$evens"]}
- ],
- defaults: [0, {$cond: [{$eq: ["C", "c"]}, 5, 7]}],
- useLongestLength: true
- }
- }
- }
- }],
- {collation: caseInsensitive})
- .toArray();
- assert.eq(1, results.length);
- assert.eq([[0, 1], [2, 3], [4, 5]], results[0].out);
+// Test that $switch's subexpressions respect the collation.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, a: "A"}));
+assert.writeOK(coll.insert({_id: 2, b: "B"}));
+assert.writeOK(coll.insert({_id: 3, c: "C"}));
+results = coll.aggregate([{
+ $project: {
+ out: {
+ $switch: {
+ branches: [
+ {case: {$eq: ["$a", "a"]}, then: "foo"},
+ {case: {$eq: ["$b", "b"]}, then: "bar"}
+ ],
+ default: "baz"
+ }
+ }
+ }
+ }],
+ {collation: caseInsensitive})
+ .toArray();
+assert.eq(3, results.length);
+assert.eq("foo", results[0].out);
+assert.eq("bar", results[1].out);
+assert.eq("baz", results[2].out);
+
+// Test that a $zip's subexpressions respect the collation.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, evens: [0, 2, 4], odds: [1, 3]}));
+results = coll.aggregate([{
+ $project: {
+ out: {
+ $zip: {
+ inputs: [
+ {$cond: [{$eq: ["A", "a"]}, "$evens", "$odds"]},
+ {$cond: [{$eq: ["B", "b"]}, "$odds", "$evens"]}
+ ],
+ defaults: [0, {$cond: [{$eq: ["C", "c"]}, 5, 7]}],
+ useLongestLength: true
+ }
+ }
+ }
+ }],
+ {collation: caseInsensitive})
+ .toArray();
+assert.eq(1, results.length);
+assert.eq([[0, 1], [2, 3], [4, 5]], results[0].out);
})();
diff --git a/jstests/aggregation/expressions/convert.js b/jstests/aggregation/expressions/convert.js
index b32c6639751..4e56bf16265 100644
--- a/jstests/aggregation/expressions/convert.js
+++ b/jstests/aggregation/expressions/convert.js
@@ -2,325 +2,312 @@
* Tests behavior of $convert aggregation operator.
*/
(function() {
- "use strict";
-
- const coll = db.expression_convert;
- function populateCollection(documentList) {
- coll.drop();
- var bulk = coll.initializeOrderedBulkOp();
- documentList.forEach(doc => bulk.insert(doc));
- assert.writeOK(bulk.execute());
- }
-
- //
- // One test document for each possible conversion. Edge cases for these conversions are tested
- // in expression_convert_test.cpp.
- //
- var conversionTestDocs = [
- {_id: 0, input: 1.9, target: "double", expected: 1.9},
- {_id: 1, input: 1.9, target: "string", expected: "1.9"},
- {_id: 2, input: 1.9, target: "bool", expected: true},
- {_id: 3, input: 1.9, target: "date", expected: ISODate("1970-01-01T00:00:00.001Z")},
- {_id: 4, input: 1.9, target: "int", expected: NumberInt(1)},
- {_id: 5, input: 1.9, target: "long", expected: NumberLong(1)},
- {_id: 6, input: 1.9, target: "decimal", expected: NumberDecimal(1.9)},
-
- {_id: 7, input: "1.9", target: "double", expected: 1.9},
- {_id: 8, input: "str", target: "string", expected: "str"},
- {
- _id: 9,
- input: "0123456789abcdef01234567",
- target: "objectId",
- expected: ObjectId("0123456789abcdef01234567")
- },
- {_id: 10, input: "", target: "bool", expected: true},
- {
- _id: 11,
- input: "1970-01-01T00:00:00.001Z",
- target: "date",
- expected: ISODate("1970-01-01T00:00:00.001Z")
- },
- {_id: 12, input: "1", target: "int", expected: NumberInt(1)},
- {_id: 13, input: "1", target: "long", expected: NumberLong(1)},
- {_id: 14, input: "1.9", target: "decimal", expected: NumberDecimal("1.9")},
-
- {
- _id: 15,
- input: ObjectId("0123456789abcdef01234567"),
- target: "string",
- expected: "0123456789abcdef01234567"
- },
- {_id: 16, input: ObjectId("0123456789abcdef01234567"), target: "bool", expected: true},
- {
- _id: 17,
- input: ObjectId("0123456789abcdef01234567"),
- target: "objectId",
- expected: ObjectId("0123456789abcdef01234567")
- },
- {
- _id: 18,
- input: ObjectId("0123456789abcdef01234567"),
- target: "date",
- expected: ISODate("1970-08-09T22:25:43Z")
- },
-
- {_id: 19, input: false, target: "double", expected: 0.0},
- {_id: 20, input: false, target: "string", expected: "false"},
- {_id: 21, input: false, target: "bool", expected: false},
- {_id: 22, input: false, target: "int", expected: NumberInt(0)},
- {_id: 23, input: false, target: "long", expected: NumberLong(0)},
- {_id: 24, input: false, target: "decimal", expected: NumberDecimal(0)},
-
- {_id: 25, input: ISODate("1970-01-01T00:00:00.123Z"), target: "double", expected: 123.0},
- {
- _id: 26,
- input: ISODate("1970-01-01T00:00:00.123Z"),
- target: "string",
- expected: "1970-01-01T00:00:00.123Z"
- },
- {_id: 27, input: ISODate("1970-01-01T00:00:00.123Z"), target: "bool", expected: true},
- {
- _id: 28,
- input: ISODate("1970-01-01T00:00:00.123Z"),
- target: "date",
- expected: ISODate("1970-01-01T00:00:00.123Z")
- },
- {
- _id: 29,
- input: ISODate("1970-01-01T00:00:00.123Z"),
- target: "long",
- expected: NumberLong(123)
- },
- {
- _id: 30,
- input: ISODate("1970-01-01T00:00:00.123Z"),
- target: "decimal",
- expected: NumberDecimal("123")
- },
-
- {_id: 31, input: NumberInt(1), target: "double", expected: 1.0},
- {_id: 32, input: NumberInt(1), target: "string", expected: "1"},
- {_id: 33, input: NumberInt(1), target: "bool", expected: true},
- {_id: 34, input: NumberInt(1), target: "int", expected: NumberInt(1)},
- {_id: 35, input: NumberInt(1), target: "long", expected: NumberLong(1)},
- {_id: 36, input: NumberInt(1), target: "decimal", expected: NumberDecimal("1")},
-
- {_id: 37, input: NumberLong(1), target: "double", expected: 1.0},
- {_id: 38, input: NumberLong(1), target: "string", expected: "1"},
- {_id: 39, input: NumberLong(1), target: "bool", expected: true},
- {
- _id: 40,
- input: NumberLong(1),
- target: "date",
- expected: ISODate("1970-01-01T00:00:00.001Z")
- },
- {_id: 41, input: NumberLong(1), target: "int", expected: NumberInt(1)},
- {_id: 42, input: NumberLong(1), target: "long", expected: NumberLong(1)},
- {_id: 43, input: NumberLong(1), target: "decimal", expected: NumberDecimal("1")},
-
- {_id: 44, input: NumberDecimal("1.9"), target: "double", expected: 1.9},
- {_id: 45, input: NumberDecimal("1.9"), target: "string", expected: "1.9"},
- {_id: 46, input: NumberDecimal("1.9"), target: "bool", expected: true},
- {
- _id: 47,
- input: NumberDecimal("1.9"),
- target: "date",
- expected: ISODate("1970-01-01T00:00:00.001Z")
- },
- {_id: 48, input: NumberDecimal("1.9"), target: "int", expected: NumberInt(1)},
- {_id: 49, input: NumberDecimal("1.9"), target: "long", expected: NumberLong(1)},
- {_id: 50, input: NumberDecimal("1.9"), target: "decimal", expected: NumberDecimal("1.9")},
-
- {_id: 51, input: MinKey, target: "bool", expected: true},
- {_id: 52, input: {foo: 1, bar: 2}, target: "bool", expected: true},
- {_id: 53, input: [1, 2], target: "bool", expected: true},
- {
- _id: 54,
- input: BinData(0, "BBBBBBBBBBBBBBBBBBBBBBBBBBBB"),
- target: "bool",
- expected: true
- },
- {_id: 55, input: /B*/, target: "bool", expected: true},
- {_id: 56, input: new DBRef("db.test", "oid"), target: "bool", expected: true},
- {_id: 57, input: function() {}, target: "bool", expected: true},
- // Symbol and CodeWScope are not supported from JavaScript, so we can't test them here.
- {_id: 58, input: new Timestamp(1 / 1000, 1), target: "bool", expected: true},
- {_id: 59, input: MinKey, target: "bool", expected: true}
- ];
- populateCollection(conversionTestDocs);
-
- // Test $convert on each document.
- var pipeline = [
- {
- $project: {
- output: {$convert: {to: "$target", input: "$input"}},
- target: "$target",
- expected: "$expected"
- }
- },
- {$addFields: {outputType: {$type: "$output"}}},
- {$sort: {_id: 1}}
- ];
- var aggResult = coll.aggregate(pipeline).toArray();
- assert.eq(aggResult.length, conversionTestDocs.length);
-
- aggResult.forEach(doc => {
- assert.eq(doc.output, doc.expected, "Unexpected conversion: _id = " + doc._id);
- assert.eq(doc.outputType, doc.target, "Conversion to incorrect type: _id = " + doc._id);
- });
-
- // Test each conversion using the shorthand $toBool, $toString, etc. syntax.
+"use strict";
+
+const coll = db.expression_convert;
+function populateCollection(documentList) {
+ coll.drop();
+ var bulk = coll.initializeOrderedBulkOp();
+ documentList.forEach(doc => bulk.insert(doc));
+ assert.writeOK(bulk.execute());
+}
+
+//
+// One test document for each possible conversion. Edge cases for these conversions are tested
+// in expression_convert_test.cpp.
+//
+var conversionTestDocs = [
+ {_id: 0, input: 1.9, target: "double", expected: 1.9},
+ {_id: 1, input: 1.9, target: "string", expected: "1.9"},
+ {_id: 2, input: 1.9, target: "bool", expected: true},
+ {_id: 3, input: 1.9, target: "date", expected: ISODate("1970-01-01T00:00:00.001Z")},
+ {_id: 4, input: 1.9, target: "int", expected: NumberInt(1)},
+ {_id: 5, input: 1.9, target: "long", expected: NumberLong(1)},
+ {_id: 6, input: 1.9, target: "decimal", expected: NumberDecimal(1.9)},
+
+ {_id: 7, input: "1.9", target: "double", expected: 1.9},
+ {_id: 8, input: "str", target: "string", expected: "str"},
+ {
+ _id: 9,
+ input: "0123456789abcdef01234567",
+ target: "objectId",
+ expected: ObjectId("0123456789abcdef01234567")
+ },
+ {_id: 10, input: "", target: "bool", expected: true},
+ {
+ _id: 11,
+ input: "1970-01-01T00:00:00.001Z",
+ target: "date",
+ expected: ISODate("1970-01-01T00:00:00.001Z")
+ },
+ {_id: 12, input: "1", target: "int", expected: NumberInt(1)},
+ {_id: 13, input: "1", target: "long", expected: NumberLong(1)},
+ {_id: 14, input: "1.9", target: "decimal", expected: NumberDecimal("1.9")},
+
+ {
+ _id: 15,
+ input: ObjectId("0123456789abcdef01234567"),
+ target: "string",
+ expected: "0123456789abcdef01234567"
+ },
+ {_id: 16, input: ObjectId("0123456789abcdef01234567"), target: "bool", expected: true},
+ {
+ _id: 17,
+ input: ObjectId("0123456789abcdef01234567"),
+ target: "objectId",
+ expected: ObjectId("0123456789abcdef01234567")
+ },
+ {
+ _id: 18,
+ input: ObjectId("0123456789abcdef01234567"),
+ target: "date",
+ expected: ISODate("1970-08-09T22:25:43Z")
+ },
+
+ {_id: 19, input: false, target: "double", expected: 0.0},
+ {_id: 20, input: false, target: "string", expected: "false"},
+ {_id: 21, input: false, target: "bool", expected: false},
+ {_id: 22, input: false, target: "int", expected: NumberInt(0)},
+ {_id: 23, input: false, target: "long", expected: NumberLong(0)},
+ {_id: 24, input: false, target: "decimal", expected: NumberDecimal(0)},
+
+ {_id: 25, input: ISODate("1970-01-01T00:00:00.123Z"), target: "double", expected: 123.0},
+ {
+ _id: 26,
+ input: ISODate("1970-01-01T00:00:00.123Z"),
+ target: "string",
+ expected: "1970-01-01T00:00:00.123Z"
+ },
+ {_id: 27, input: ISODate("1970-01-01T00:00:00.123Z"), target: "bool", expected: true},
+ {
+ _id: 28,
+ input: ISODate("1970-01-01T00:00:00.123Z"),
+ target: "date",
+ expected: ISODate("1970-01-01T00:00:00.123Z")
+ },
+ {
+ _id: 29,
+ input: ISODate("1970-01-01T00:00:00.123Z"),
+ target: "long",
+ expected: NumberLong(123)
+ },
+ {
+ _id: 30,
+ input: ISODate("1970-01-01T00:00:00.123Z"),
+ target: "decimal",
+ expected: NumberDecimal("123")
+ },
+
+ {_id: 31, input: NumberInt(1), target: "double", expected: 1.0},
+ {_id: 32, input: NumberInt(1), target: "string", expected: "1"},
+ {_id: 33, input: NumberInt(1), target: "bool", expected: true},
+ {_id: 34, input: NumberInt(1), target: "int", expected: NumberInt(1)},
+ {_id: 35, input: NumberInt(1), target: "long", expected: NumberLong(1)},
+ {_id: 36, input: NumberInt(1), target: "decimal", expected: NumberDecimal("1")},
+
+ {_id: 37, input: NumberLong(1), target: "double", expected: 1.0},
+ {_id: 38, input: NumberLong(1), target: "string", expected: "1"},
+ {_id: 39, input: NumberLong(1), target: "bool", expected: true},
+ {_id: 40, input: NumberLong(1), target: "date", expected: ISODate("1970-01-01T00:00:00.001Z")},
+ {_id: 41, input: NumberLong(1), target: "int", expected: NumberInt(1)},
+ {_id: 42, input: NumberLong(1), target: "long", expected: NumberLong(1)},
+ {_id: 43, input: NumberLong(1), target: "decimal", expected: NumberDecimal("1")},
+
+ {_id: 44, input: NumberDecimal("1.9"), target: "double", expected: 1.9},
+ {_id: 45, input: NumberDecimal("1.9"), target: "string", expected: "1.9"},
+ {_id: 46, input: NumberDecimal("1.9"), target: "bool", expected: true},
+ {
+ _id: 47,
+ input: NumberDecimal("1.9"),
+ target: "date",
+ expected: ISODate("1970-01-01T00:00:00.001Z")
+ },
+ {_id: 48, input: NumberDecimal("1.9"), target: "int", expected: NumberInt(1)},
+ {_id: 49, input: NumberDecimal("1.9"), target: "long", expected: NumberLong(1)},
+ {_id: 50, input: NumberDecimal("1.9"), target: "decimal", expected: NumberDecimal("1.9")},
+
+ {_id: 51, input: MinKey, target: "bool", expected: true},
+ {_id: 52, input: {foo: 1, bar: 2}, target: "bool", expected: true},
+ {_id: 53, input: [1, 2], target: "bool", expected: true},
+ {_id: 54, input: BinData(0, "BBBBBBBBBBBBBBBBBBBBBBBBBBBB"), target: "bool", expected: true},
+ {_id: 55, input: /B*/, target: "bool", expected: true},
+ {_id: 56, input: new DBRef("db.test", "oid"), target: "bool", expected: true},
+ {_id: 57, input: function() {}, target: "bool", expected: true},
+ // Symbol and CodeWScope are not supported from JavaScript, so we can't test them here.
+ {_id: 58, input: new Timestamp(1 / 1000, 1), target: "bool", expected: true},
+ {_id: 59, input: MinKey, target: "bool", expected: true}
+];
+populateCollection(conversionTestDocs);
+
+// Test $convert on each document.
+var pipeline = [
+ {
+ $project: {
+ output: {$convert: {to: "$target", input: "$input"}},
+ target: "$target",
+ expected: "$expected"
+ }
+ },
+ {$addFields: {outputType: {$type: "$output"}}},
+ {$sort: {_id: 1}}
+];
+var aggResult = coll.aggregate(pipeline).toArray();
+assert.eq(aggResult.length, conversionTestDocs.length);
+
+aggResult.forEach(doc => {
+ assert.eq(doc.output, doc.expected, "Unexpected conversion: _id = " + doc._id);
+ assert.eq(doc.outputType, doc.target, "Conversion to incorrect type: _id = " + doc._id);
+});
+
+// Test each conversion using the shorthand $toBool, $toString, etc. syntax.
+pipeline = [
+ {
+ $project: {
+ output: {
+ $switch: {
+ branches: [
+ {case: {$eq: ["$target", "double"]}, then: {$toDouble: "$input"}},
+ {case: {$eq: ["$target", "string"]}, then: {$toString: "$input"}},
+ {case: {$eq: ["$target", "objectId"]}, then: {$toObjectId: "$input"}},
+ {case: {$eq: ["$target", "bool"]}, then: {$toBool: "$input"}},
+ {case: {$eq: ["$target", "date"]}, then: {$toDate: "$input"}},
+ {case: {$eq: ["$target", "int"]}, then: {$toInt: "$input"}},
+ {case: {$eq: ["$target", "long"]}, then: {$toLong: "$input"}},
+ {case: {$eq: ["$target", "decimal"]}, then: {$toDecimal: "$input"}}
+ ]
+ }
+ },
+ target: "$target",
+ expected: "$expected"
+ }
+ },
+ {$addFields: {outputType: {$type: "$output"}}},
+ {$sort: {_id: 1}}
+];
+aggResult = coll.aggregate(pipeline).toArray();
+assert.eq(aggResult.length, conversionTestDocs.length);
+
+aggResult.forEach(doc => {
+ assert.eq(doc.output, doc.expected, "Unexpected conversion: _id = " + doc._id);
+ assert.eq(doc.outputType, doc.target, "Conversion to incorrect type: _id = " + doc._id);
+});
+
+// Test a $convert expression with "onError" to make sure that error handling still allows an
+// error in the "input" expression to propagate.
+assert.throws(function() {
+ coll.aggregate([
+ {$project: {output: {$convert: {to: "string", input: {$divide: [1, 0]}, onError: "ERROR"}}}}
+ ]);
+}, [], "Pipeline should have failed");
+
+//
+// Unsupported conversions.
+//
+var illegalConversionTestDocs = [
+ {_id: 0, input: 1.9, target: "objectId"},
+
+ {_id: 1, input: ObjectId("0123456789abcdef01234567"), target: "double"},
+ {_id: 2, input: ObjectId("0123456789abcdef01234567"), target: "int"},
+ {_id: 3, input: ObjectId("0123456789abcdef01234567"), target: "long"},
+ {_id: 4, input: ObjectId("0123456789abcdef01234567"), target: "decimal"},
+
+ {_id: 5, input: false, target: "objectId"},
+ {_id: 6, input: false, target: "date"},
+
+ {_id: 7, input: ISODate("1970-01-01T00:00:00.123Z"), target: "objectId"},
+ {_id: 8, input: ISODate("1970-01-01T00:00:00.123Z"), target: "int"},
+
+ {_id: 9, input: NumberInt(1), target: "objectId"},
+ {_id: 10, input: NumberInt(1), target: "date"},
+
+ {_id: 11, input: NumberLong(1), target: "objectId"},
+
+ {_id: 12, input: NumberDecimal("1.9"), target: "objectId"},
+
+ {_id: 13, input: 1.9, target: "minKey"},
+ {_id: 14, input: 1.9, target: "missing"},
+ {_id: 15, input: 1.9, target: "object"},
+ {_id: 16, input: 1.9, target: "array"},
+ {_id: 17, input: 1.9, target: "binData"},
+ {_id: 18, input: 1.9, target: "undefined"},
+ {_id: 19, input: 1.9, target: "null"},
+ {_id: 20, input: 1.9, target: "regex"},
+ {_id: 21, input: 1.9, target: "dbPointer"},
+ {_id: 22, input: 1.9, target: "javascript"},
+ {_id: 23, input: 1.9, target: "symbol"},
+ {_id: 24, input: 1.9, target: "javascriptWithScope"},
+ {_id: 25, input: 1.9, target: "timestamp"},
+ {_id: 26, input: 1.9, target: "maxKey"},
+];
+populateCollection(illegalConversionTestDocs);
+
+// Test each document to ensure that the conversion throws an error.
+illegalConversionTestDocs.forEach(doc => {
pipeline = [
- {
- $project: {
- output: {
- $switch: {
- branches: [
- {case: {$eq: ["$target", "double"]}, then: {$toDouble: "$input"}},
- {case: {$eq: ["$target", "string"]}, then: {$toString: "$input"}},
- {case: {$eq: ["$target", "objectId"]}, then: {$toObjectId: "$input"}},
- {case: {$eq: ["$target", "bool"]}, then: {$toBool: "$input"}},
- {case: {$eq: ["$target", "date"]}, then: {$toDate: "$input"}},
- {case: {$eq: ["$target", "int"]}, then: {$toInt: "$input"}},
- {case: {$eq: ["$target", "long"]}, then: {$toLong: "$input"}},
- {case: {$eq: ["$target", "decimal"]}, then: {$toDecimal: "$input"}}
- ]
- }
- },
- target: "$target",
- expected: "$expected"
- }
- },
- {$addFields: {outputType: {$type: "$output"}}},
- {$sort: {_id: 1}}
+ {$match: {_id: doc._id}},
+ {$project: {output: {$convert: {to: "$target", input: "$input"}}}}
];
- aggResult = coll.aggregate(pipeline).toArray();
- assert.eq(aggResult.length, conversionTestDocs.length);
- aggResult.forEach(doc => {
- assert.eq(doc.output, doc.expected, "Unexpected conversion: _id = " + doc._id);
- assert.eq(doc.outputType, doc.target, "Conversion to incorrect type: _id = " + doc._id);
- });
-
- // Test a $convert expression with "onError" to make sure that error handling still allows an
- // error in the "input" expression to propagate.
assert.throws(function() {
- coll.aggregate([{
- $project:
- {output: {$convert: {to: "string", input: {$divide: [1, 0]}, onError: "ERROR"}}}
- }]);
- }, [], "Pipeline should have failed");
-
- //
- // Unsupported conversions.
- //
- var illegalConversionTestDocs = [
- {_id: 0, input: 1.9, target: "objectId"},
-
- {_id: 1, input: ObjectId("0123456789abcdef01234567"), target: "double"},
- {_id: 2, input: ObjectId("0123456789abcdef01234567"), target: "int"},
- {_id: 3, input: ObjectId("0123456789abcdef01234567"), target: "long"},
- {_id: 4, input: ObjectId("0123456789abcdef01234567"), target: "decimal"},
-
- {_id: 5, input: false, target: "objectId"},
- {_id: 6, input: false, target: "date"},
-
- {_id: 7, input: ISODate("1970-01-01T00:00:00.123Z"), target: "objectId"},
- {_id: 8, input: ISODate("1970-01-01T00:00:00.123Z"), target: "int"},
-
- {_id: 9, input: NumberInt(1), target: "objectId"},
- {_id: 10, input: NumberInt(1), target: "date"},
-
- {_id: 11, input: NumberLong(1), target: "objectId"},
-
- {_id: 12, input: NumberDecimal("1.9"), target: "objectId"},
-
- {_id: 13, input: 1.9, target: "minKey"},
- {_id: 14, input: 1.9, target: "missing"},
- {_id: 15, input: 1.9, target: "object"},
- {_id: 16, input: 1.9, target: "array"},
- {_id: 17, input: 1.9, target: "binData"},
- {_id: 18, input: 1.9, target: "undefined"},
- {_id: 19, input: 1.9, target: "null"},
- {_id: 20, input: 1.9, target: "regex"},
- {_id: 21, input: 1.9, target: "dbPointer"},
- {_id: 22, input: 1.9, target: "javascript"},
- {_id: 23, input: 1.9, target: "symbol"},
- {_id: 24, input: 1.9, target: "javascriptWithScope"},
- {_id: 25, input: 1.9, target: "timestamp"},
- {_id: 26, input: 1.9, target: "maxKey"},
- ];
- populateCollection(illegalConversionTestDocs);
-
- // Test each document to ensure that the conversion throws an error.
- illegalConversionTestDocs.forEach(doc => {
- pipeline = [
- {$match: {_id: doc._id}},
- {$project: {output: {$convert: {to: "$target", input: "$input"}}}}
- ];
-
- assert.throws(function() {
- coll.aggregate(pipeline);
- }, [], "Conversion should have failed: _id = " + doc._id);
- });
-
- // Test that each illegal conversion uses the 'onError' value.
- pipeline = [
- {$project: {output: {$convert: {to: "$target", input: "$input", onError: "ERROR"}}}},
- {$sort: {_id: 1}}
- ];
- var aggResult = coll.aggregate(pipeline).toArray();
- assert.eq(aggResult.length, illegalConversionTestDocs.length);
-
- aggResult.forEach(doc => {
- assert.eq(doc.output, "ERROR", "Unexpected result: _id = " + doc._id);
- });
-
- // Test that, when onError is missing, the missing value propagates to the result.
- pipeline = [
- {
- $project: {
- _id: false,
- output: {$convert: {to: "$target", input: "$input", onError: "$$REMOVE"}}
- }
- },
- {$sort: {_id: 1}}
- ];
- var aggResult = coll.aggregate(pipeline).toArray();
- assert.eq(aggResult.length, illegalConversionTestDocs.length);
-
- aggResult.forEach(doc => {
- assert.eq(doc, {});
- });
-
- //
- // One test document for each "nullish" value.
- //
- var nullTestDocs =
- [{_id: 0, input: null}, {_id: 1, input: undefined}, {_id: 2, /* input is missing */}];
- populateCollection(nullTestDocs);
-
- // Test that all nullish inputs result in the 'onNull' output.
- pipeline = [
- {$project: {output: {$convert: {to: "int", input: "$input", onNull: "NULL"}}}},
- {$sort: {_id: 1}}
- ];
- var aggResult = coll.aggregate(pipeline).toArray();
- assert.eq(aggResult.length, nullTestDocs.length);
-
- aggResult.forEach(doc => {
- assert.eq(doc.output, "NULL", "Unexpected result: _id = " + doc._id);
- });
-
- // Test that all nullish inputs result in the 'onNull' output _even_ if 'to' is nullish.
- pipeline = [
- {$project: {output: {$convert: {to: null, input: "$input", onNull: "NULL"}}}},
- {$sort: {_id: 1}}
- ];
- var aggResult = coll.aggregate(pipeline).toArray();
- assert.eq(aggResult.length, nullTestDocs.length);
-
- aggResult.forEach(doc => {
- assert.eq(doc.output, "NULL", "Unexpected result: _id = " + doc._id);
- });
+ coll.aggregate(pipeline);
+ }, [], "Conversion should have failed: _id = " + doc._id);
+});
+
+// Test that each illegal conversion uses the 'onError' value.
+pipeline = [
+ {$project: {output: {$convert: {to: "$target", input: "$input", onError: "ERROR"}}}},
+ {$sort: {_id: 1}}
+];
+var aggResult = coll.aggregate(pipeline).toArray();
+assert.eq(aggResult.length, illegalConversionTestDocs.length);
+
+aggResult.forEach(doc => {
+ assert.eq(doc.output, "ERROR", "Unexpected result: _id = " + doc._id);
+});
+
+// Test that, when onError is missing, the missing value propagates to the result.
+pipeline = [
+ {
+ $project:
+ {_id: false, output: {$convert: {to: "$target", input: "$input", onError: "$$REMOVE"}}}
+ },
+ {$sort: {_id: 1}}
+];
+var aggResult = coll.aggregate(pipeline).toArray();
+assert.eq(aggResult.length, illegalConversionTestDocs.length);
+
+aggResult.forEach(doc => {
+ assert.eq(doc, {});
+});
+
+//
+// One test document for each "nullish" value.
+//
+var nullTestDocs =
+ [{_id: 0, input: null}, {_id: 1, input: undefined}, {_id: 2, /* input is missing */}];
+populateCollection(nullTestDocs);
+
+// Test that all nullish inputs result in the 'onNull' output.
+pipeline = [
+ {$project: {output: {$convert: {to: "int", input: "$input", onNull: "NULL"}}}},
+ {$sort: {_id: 1}}
+];
+var aggResult = coll.aggregate(pipeline).toArray();
+assert.eq(aggResult.length, nullTestDocs.length);
+
+aggResult.forEach(doc => {
+ assert.eq(doc.output, "NULL", "Unexpected result: _id = " + doc._id);
+});
+
+// Test that all nullish inputs result in the 'onNull' output _even_ if 'to' is nullish.
+pipeline = [
+ {$project: {output: {$convert: {to: null, input: "$input", onNull: "NULL"}}}},
+ {$sort: {_id: 1}}
+];
+var aggResult = coll.aggregate(pipeline).toArray();
+assert.eq(aggResult.length, nullTestDocs.length);
+
+aggResult.forEach(doc => {
+ assert.eq(doc.output, "NULL", "Unexpected result: _id = " + doc._id);
+});
}());
diff --git a/jstests/aggregation/expressions/date_expressions_with_timezones.js b/jstests/aggregation/expressions/date_expressions_with_timezones.js
index 83f7488f26c..076800a5384 100644
--- a/jstests/aggregation/expressions/date_expressions_with_timezones.js
+++ b/jstests/aggregation/expressions/date_expressions_with_timezones.js
@@ -1,83 +1,83 @@
// Basic tests for using date expressions with time zone arguments.
(function() {
- "use strict";
+"use strict";
- const coll = db.date_expressions_with_time_zones;
- coll.drop();
+const coll = db.date_expressions_with_time_zones;
+coll.drop();
- assert.writeOK(coll.insert([
- // Three sales on 2017-06-16 in UTC.
- {_id: 0, date: new ISODate("2017-06-16T00:00:00.000Z"), sales: 1},
- {_id: 1, date: new ISODate("2017-06-16T12:02:21.013Z"), sales: 2},
- // Six sales on 2017-06-17 in UTC.
- {_id: 2, date: new ISODate("2017-06-17T00:00:00.000Z"), sales: 2},
- {_id: 3, date: new ISODate("2017-06-17T12:02:21.013Z"), sales: 2},
- {_id: 4, date: new ISODate("2017-06-17T15:00:33.101Z"), sales: 2},
- ]));
+assert.writeOK(coll.insert([
+ // Three sales on 2017-06-16 in UTC.
+ {_id: 0, date: new ISODate("2017-06-16T00:00:00.000Z"), sales: 1},
+ {_id: 1, date: new ISODate("2017-06-16T12:02:21.013Z"), sales: 2},
+ // Six sales on 2017-06-17 in UTC.
+ {_id: 2, date: new ISODate("2017-06-17T00:00:00.000Z"), sales: 2},
+ {_id: 3, date: new ISODate("2017-06-17T12:02:21.013Z"), sales: 2},
+ {_id: 4, date: new ISODate("2017-06-17T15:00:33.101Z"), sales: 2},
+]));
- // Compute how many sales happened on each day, in UTC.
- assert.eq(
- [
- {_id: {year: 2017, month: 6, day: 16}, totalSales: 3},
- {_id: {year: 2017, month: 6, day: 17}, totalSales: 6}
- ],
- coll.aggregate([
- {
- $group: {
- _id: {
- year: {$year: "$date"},
- month: {$month: "$date"},
- day: {$dayOfMonth: "$date"}
- },
- totalSales: {$sum: "$sales"}
- }
- },
- {$sort: {"_id.year": 1, "_id.month": 1, "_id.day": 1}}
- ])
- .toArray());
+// Compute how many sales happened on each day, in UTC.
+assert.eq(
+ [
+ {_id: {year: 2017, month: 6, day: 16}, totalSales: 3},
+ {_id: {year: 2017, month: 6, day: 17}, totalSales: 6}
+ ],
+ coll.aggregate([
+ {
+ $group: {
+ _id: {
+ year: {$year: "$date"},
+ month: {$month: "$date"},
+ day: {$dayOfMonth: "$date"}
+ },
+ totalSales: {$sum: "$sales"}
+ }
+ },
+ {$sort: {"_id.year": 1, "_id.month": 1, "_id.day": 1}}
+ ])
+ .toArray());
- // Compute how many sales happened on each day, in New York. The sales made at midnight should
- // move to the previous days.
- assert.eq(
- [
- {_id: {year: 2017, month: 6, day: 15}, totalSales: 1},
- {_id: {year: 2017, month: 6, day: 16}, totalSales: 4},
- {_id: {year: 2017, month: 6, day: 17}, totalSales: 4}
- ],
- coll.aggregate([
- {
- $group: {
- _id: {
- year: {$year: {date: "$date", timezone: "America/New_York"}},
- month: {$month: {date: "$date", timezone: "America/New_York"}},
- day: {$dayOfMonth: {date: "$date", timezone: "America/New_York"}}
- },
- totalSales: {$sum: "$sales"}
- }
- },
- {$sort: {"_id.year": 1, "_id.month": 1, "_id.day": 1}}
- ])
- .toArray());
+// Compute how many sales happened on each day, in New York. The sales made at midnight should
+// move to the previous days.
+assert.eq(
+ [
+ {_id: {year: 2017, month: 6, day: 15}, totalSales: 1},
+ {_id: {year: 2017, month: 6, day: 16}, totalSales: 4},
+ {_id: {year: 2017, month: 6, day: 17}, totalSales: 4}
+ ],
+ coll.aggregate([
+ {
+ $group: {
+ _id: {
+ year: {$year: {date: "$date", timezone: "America/New_York"}},
+ month: {$month: {date: "$date", timezone: "America/New_York"}},
+ day: {$dayOfMonth: {date: "$date", timezone: "America/New_York"}}
+ },
+ totalSales: {$sum: "$sales"}
+ }
+ },
+ {$sort: {"_id.year": 1, "_id.month": 1, "_id.day": 1}}
+ ])
+ .toArray());
- // Compute how many sales happened on each day, in Sydney (+10 hours).
- assert.eq(
- [
- {_id: {year: 2017, month: 6, day: 16}, totalSales: 3},
- {_id: {year: 2017, month: 6, day: 17}, totalSales: 4},
- {_id: {year: 2017, month: 6, day: 18}, totalSales: 2}
- ],
- coll.aggregate([
- {
- $group: {
- _id: {
- year: {$year: {date: "$date", timezone: "Australia/Sydney"}},
- month: {$month: {date: "$date", timezone: "Australia/Sydney"}},
- day: {$dayOfMonth: {date: "$date", timezone: "Australia/Sydney"}}
- },
- totalSales: {$sum: "$sales"}
- }
- },
- {$sort: {"_id.year": 1, "_id.month": 1, "_id.day": 1}}
- ])
- .toArray());
+// Compute how many sales happened on each day, in Sydney (+10 hours).
+assert.eq(
+ [
+ {_id: {year: 2017, month: 6, day: 16}, totalSales: 3},
+ {_id: {year: 2017, month: 6, day: 17}, totalSales: 4},
+ {_id: {year: 2017, month: 6, day: 18}, totalSales: 2}
+ ],
+ coll.aggregate([
+ {
+ $group: {
+ _id: {
+ year: {$year: {date: "$date", timezone: "Australia/Sydney"}},
+ month: {$month: {date: "$date", timezone: "Australia/Sydney"}},
+ day: {$dayOfMonth: {date: "$date", timezone: "Australia/Sydney"}}
+ },
+ totalSales: {$sum: "$sales"}
+ }
+ },
+ {$sort: {"_id.year": 1, "_id.month": 1, "_id.day": 1}}
+ ])
+ .toArray());
})();
diff --git a/jstests/aggregation/expressions/date_from_parts.js b/jstests/aggregation/expressions/date_from_parts.js
index cd16fe6af49..204b66b4169 100644
--- a/jstests/aggregation/expressions/date_from_parts.js
+++ b/jstests/aggregation/expressions/date_from_parts.js
@@ -1,935 +1,903 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and assertErrMsgContains.
(function() {
- "use strict";
-
- const coll = db.dateFromParts;
-
- /* --------------------------------------------------------------------------------------- */
- /* Basic Sanity Checks */
- coll.drop();
-
- assert.commandWorked(coll.insert([
- {_id: 0, year: 2017, month: 6, day: 19, hour: 15, minute: 13, second: 25, millisecond: 713},
- {
- _id: 1,
- year: 2017,
- month: 6,
- day: 19,
- hour: 15,
- minute: 13,
- second: 25,
- millisecond: 713,
- timezone: "Europe/Amsterdam"
- },
- {
- _id: 2,
- year: 2017,
- month: 6,
- day: 19,
- hour: 15,
- minute: 13,
- second: 25,
- millisecond: 713,
- timezone: "Asia/Tokyo"
- },
- {
- _id: 3,
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 15,
- minute: 13,
- second: 25,
- millisecond: 713,
- timezone: "America/Chicago"
- }
- },
- ]));
-
- assert.eq(
- [
- {_id: 1, date: ISODate("2016-12-31T23:00:00Z")},
- {_id: 2, date: ISODate("2016-12-31T15:00:00Z")},
- ],
- coll.aggregate([
- {
- $match: {'year': {$exists: true}, 'timezone': {$exists: true}},
- },
- {$project: {date: {'$dateFromParts': {year: "$year", "timezone": "$timezone"}}}}
- ])
- .toArray());
-
- assert.eq(
- [
- {_id: 3, date: ISODate("2017-06-19T05:00:00Z")},
- ],
- coll.aggregate([
- {
- $match: {
- 'date.year': {$exists: true},
- },
+"use strict";
+
+const coll = db.dateFromParts;
+
+/* --------------------------------------------------------------------------------------- */
+/* Basic Sanity Checks */
+coll.drop();
+
+assert.commandWorked(coll.insert([
+ {_id: 0, year: 2017, month: 6, day: 19, hour: 15, minute: 13, second: 25, millisecond: 713},
+ {
+ _id: 1,
+ year: 2017,
+ month: 6,
+ day: 19,
+ hour: 15,
+ minute: 13,
+ second: 25,
+ millisecond: 713,
+ timezone: "Europe/Amsterdam"
+ },
+ {
+ _id: 2,
+ year: 2017,
+ month: 6,
+ day: 19,
+ hour: 15,
+ minute: 13,
+ second: 25,
+ millisecond: 713,
+ timezone: "Asia/Tokyo"
+ },
+ {
+ _id: 3,
+ date: {
+ year: 2017,
+ month: 6,
+ day: 19,
+ hour: 15,
+ minute: 13,
+ second: 25,
+ millisecond: 713,
+ timezone: "America/Chicago"
+ }
+ },
+]));
+
+assert.eq(
+ [
+ {_id: 1, date: ISODate("2016-12-31T23:00:00Z")},
+ {_id: 2, date: ISODate("2016-12-31T15:00:00Z")},
+ ],
+ coll.aggregate([
+ {
+ $match: {'year': {$exists: true}, 'timezone': {$exists: true}},
+ },
+ {$project: {date: {'$dateFromParts': {year: "$year", "timezone": "$timezone"}}}}
+ ])
+ .toArray());
+
+assert.eq(
+ [
+ {_id: 3, date: ISODate("2017-06-19T05:00:00Z")},
+ ],
+ coll.aggregate([
+ {
+ $match: {
+ 'date.year': {$exists: true},
},
- {
- $project: {
- date: {
- '$dateFromParts': {
- year: "$date.year",
- month: '$date.month',
- day: '$date.day',
- timezone: '$date.timezone'
- }
- }
- }
+ },
+ {
+ $project: {
+ date: {
+ '$dateFromParts': {
+ year: "$date.year",
+ month: '$date.month',
+ day: '$date.day',
+ timezone: '$date.timezone'
+ }
+ }
}
- ])
- .toArray());
-
- let pipeline = {$project: {date: {'$dateFromParts': "$date"}}};
- assertErrorCode(coll, pipeline, 40519);
-
- pipeline = {$project: {date: {'$dateFromParts': {"timezone": "$timezone"}}}};
- assertErrorCode(coll, pipeline, 40516);
-
- pipeline = {$project: {date: {'$dateFromParts': {year: false}}}};
- assertErrorCode(coll, pipeline, 40515);
-
- pipeline = {$project: {date: {'$dateFromParts': {year: 2012, "timezone": "DoesNot/Exist"}}}};
- assertErrorCode(coll, pipeline, 40485);
-
- pipeline = {$project: {date: {'$dateFromParts': {year: 2012, "timezone": 5}}}};
- assertErrorCode(coll, pipeline, 40517);
-
- /* --------------------------------------------------------------------------------------- */
-
- coll.drop();
-
- assert.commandWorked(coll.insert([
- {
- _id: 0,
- year: 2017,
- month: 6,
- day: 23,
- hour: 14,
- minute: 27,
- second: 37,
- millisecond: 742,
- timezone: "Europe/Berlin"
- },
- ]));
-
- let pipelines = [
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "Europe/Berlin",
- year: 2017,
- month: 6,
- day: 23,
- hour: 14,
- minute: 27,
- second: 37,
- millisecond: 742
- }
- }
- }
- }],
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "Europe/Berlin",
- year: NumberInt("2017"),
- month: NumberInt("6"),
- day: NumberInt("23"),
- hour: NumberInt("14"),
- minute: NumberInt("27"),
- second: NumberInt("37"),
- millisecond: NumberInt("742")
- }
- }
- }
- }],
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "Europe/Berlin",
- year: NumberLong("2017"),
- month: NumberLong("6"),
- day: NumberLong("23"),
- hour: NumberLong("14"),
- minute: NumberLong("27"),
- second: NumberLong("37"),
- millisecond: NumberLong("742")
- }
- }
- }
- }],
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "Europe/Berlin",
- year: NumberDecimal("2017"),
- month: NumberDecimal("6"),
- day: NumberDecimal("23"),
- hour: NumberDecimal("14"),
- minute: NumberDecimal("27"),
- second: NumberDecimal("37"),
- millisecond: NumberDecimal("742")
- }
- }
- }
- }],
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "+02:00",
- year: 2017,
- month: 6,
- day: 23,
- hour: 14,
- minute: 27,
- second: 37,
- millisecond: 742
- }
- }
- }
- }],
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "-02",
- year: 2017,
- month: 6,
- day: 23,
- hour: 10,
- minute: 27,
- second: 37,
- millisecond: 742
- }
- }
- }
- }],
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "+02:00",
- year: 2017,
- month: 6,
- day: 23,
- hour: 14,
- minute: 27,
- second: 37,
- millisecond: 742
- }
- }
- }
- }],
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "+04:15",
- year: 2017,
- month: 6,
- day: 23,
- hour: 16,
- minute: 42,
- second: 37,
- millisecond: 742
- }
- }
- }
- }],
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "$timezone",
- year: 2017,
- month: 6,
- day: 23,
- hour: 14,
- minute: 27,
- second: 37,
- millisecond: 742
- }
- }
- }
- }],
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "Europe/Berlin",
- year: "$year",
- month: 6,
- day: 23,
- hour: 14,
- minute: 27,
- second: 37,
- millisecond: 742
- }
- }
- }
- }],
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "Europe/Berlin",
- year: 2017,
- month: "$month",
- day: 23,
- hour: 14,
- minute: 27,
- second: 37,
- millisecond: 742
- }
- }
- }
- }],
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "Europe/Berlin",
- year: 2017,
- month: 6,
- day: "$day",
- hour: 14,
- minute: 27,
- second: 37,
- millisecond: 742
- }
- }
- }
- }],
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "Europe/Berlin",
- year: 2017,
- month: 6,
- day: 23,
- hour: "$hour",
- minute: 27,
- second: 37,
- millisecond: 742
- }
- }
- }
- }],
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "Europe/Berlin",
- year: 2017,
- month: 6,
- day: 23,
- hour: 14,
- minute: "$minute",
- second: 37,
- millisecond: 742
- }
- }
- }
- }],
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "Europe/Berlin",
- year: 2017,
- month: 6,
- day: 23,
- hour: 14,
- minute: 27,
- second: "$second",
- millisecond: 742
- }
- }
- }
- }],
- [{
- '$project': {
- date: {
- '$dateFromParts': {
- timezone: "Europe/Berlin",
- year: 2017,
- month: 6,
- day: 23,
- hour: 14,
- minute: 27,
- second: 37,
- millisecond: "$millisecond"
- }
- }
- }
- }],
- ];
-
- pipelines.forEach(function(pipeline) {
- assert.eq([{_id: 0, date: ISODate("2017-06-23T12:27:37.742Z")}],
- coll.aggregate(pipeline).toArray(),
- tojson(pipeline));
- });
-
- /* --------------------------------------------------------------------------------------- */
- /* Testing whether it throws the right assert for missing values */
-
- coll.drop();
-
- assert.commandWorked(coll.insert([
- {_id: 0},
- ]));
-
- pipelines = [
- [{'$project': {date: {'$dateFromParts': {year: "$year"}}}}],
- [{'$project': {date: {'$dateFromParts': {year: 2017, month: "$month"}}}}],
- [{'$project': {date: {'$dateFromParts': {year: 2017, day: "$day"}}}}],
- [{'$project': {date: {'$dateFromParts': {year: 2017, hour: "$hour"}}}}],
- [{'$project': {date: {'$dateFromParts': {year: 2017, minute: "$minute"}}}}],
- [{'$project': {date: {'$dateFromParts': {year: 2017, second: "$second"}}}}],
- [{'$project': {date: {'$dateFromParts': {year: 2017, millisecond: "$millisecond"}}}}],
- [{'$project': {date: {'$dateFromParts': {isoWeekYear: "$isoWeekYear"}}}}],
- [{'$project': {date: {'$dateFromParts': {isoWeekYear: 2017, isoWeek: "$isoWeek"}}}}],
- [{
- '$project':
- {date: {'$dateFromParts': {isoWeekYear: 2017, isoDayOfWeek: "$isoDayOfWeek"}}}
- }],
- ];
-
- pipelines.forEach(function(pipeline) {
- assert.eq([{_id: 0, date: null}], coll.aggregate(pipeline).toArray(), tojson(pipeline));
- });
-
- pipeline = [{'$project': {date: {'$dateFromParts': {year: 2017, timezone: "$timezone"}}}}];
- assert.eq([{_id: 0, date: null}], coll.aggregate(pipeline).toArray());
-
- /* --------------------------------------------------------------------------------------- */
- /* Testing whether it throws the right assert for uncoersable values */
-
- coll.drop();
-
- assert.commandWorked(coll.insert([
- {_id: 0, falseValue: false},
- ]));
-
- pipelines = [
- [{'$project': {date: {'$dateFromParts': {year: "$falseValue"}}}}],
- [{'$project': {date: {'$dateFromParts': {year: 2017, month: "$falseValue"}}}}],
- [{'$project': {date: {'$dateFromParts': {year: 2017, day: "$falseValue"}}}}],
- [{'$project': {date: {'$dateFromParts': {year: 2017, hour: "$falseValue"}}}}],
- [{'$project': {date: {'$dateFromParts': {year: 2017, minute: "$falseValue"}}}}],
- [{'$project': {date: {'$dateFromParts': {year: 2017, second: "$falseValue"}}}}],
- [{'$project': {date: {'$dateFromParts': {year: 2017, millisecond: "$falseValue"}}}}],
- [{'$project': {date: {'$dateFromParts': {isoWeekYear: "$falseValue"}}}}],
- [{'$project': {date: {'$dateFromParts': {isoWeekYear: 2017, isoWeek: "$falseValue"}}}}],
- [{
- '$project':
- {date: {'$dateFromParts': {isoWeekYear: 2017, isoDayOfWeek: "$falseValue"}}}
- }],
- ];
-
- pipelines.forEach(function(pipeline) {
- assertErrorCode(coll, pipeline, 40515, tojson(pipeline));
- });
-
- pipeline = [{'$project': {date: {'$dateFromParts': {year: 2017, timezone: "$falseValue"}}}}];
- assertErrorCode(coll, pipeline, 40517);
-
- /* --------------------------------------------------------------------------------------- */
- /* Testing whether it throws the right assert for uncoersable values */
-
- coll.drop();
-
- assert.commandWorked(coll.insert([
- {_id: 0, outOfRangeValue: 10002},
- ]));
-
- pipelines = [
- [{'$project': {date: {'$dateFromParts': {year: "$outOfRangeValue"}}}}],
- [{'$project': {date: {'$dateFromParts': {year: -1}}}}],
- [{'$project': {date: {'$dateFromParts': {year: 10000}}}}],
- ];
-
- pipelines.forEach(function(pipeline) {
- assertErrorCode(coll, pipeline, 40523, tojson(pipeline));
- });
-
- /* --------------------------------------------------------------------------------------- */
- /* Testing "out of range" under and overflows */
-
- coll.drop();
-
- assert.commandWorked(coll.insert([{
- _id: 0,
- minusOne: -1,
- zero: 0,
- thirteen: 13,
- twentyFive: 25,
- sixtyOne: 61,
- thousandAndOne: 1001,
- tenThousandMinusOne: 9999,
- tenThousandAndOne: 10001,
- seventyMillionAndSomething: 71841012,
- secondsSinceEpoch: 1502095918,
- millisSinceEpoch: NumberLong("1502095918551"),
- }]));
-
- tests = [
- {expected: "0000-01-01T00:00:00.000Z", parts: {year: "$zero"}},
- {expected: "9999-01-01T00:00:00.000Z", parts: {year: "$tenThousandMinusOne"}},
- {expected: "2016-11-01T00:00:00.000Z", parts: {year: 2017, month: "$minusOne"}},
- {expected: "2016-12-01T00:00:00.000Z", parts: {year: 2017, month: "$zero"}},
- {expected: "2018-01-01T00:00:00.000Z", parts: {year: 2017, month: "$thirteen"}},
- {expected: "2016-12-30T00:00:00.000Z", parts: {year: 2017, day: "$minusOne"}},
- {expected: "2016-12-31T00:00:00.000Z", parts: {year: 2017, day: "$zero"}},
- {expected: "2017-03-02T00:00:00.000Z", parts: {year: 2017, day: "$sixtyOne"}},
- {expected: "2016-12-31T23:00:00.000Z", parts: {year: 2017, hour: "$minusOne"}},
- {expected: "2017-01-02T01:00:00.000Z", parts: {year: 2017, hour: "$twentyFive"}},
- {expected: "2016-12-31T23:59:00.000Z", parts: {year: 2017, minute: "$minusOne"}},
- {expected: "2017-01-01T00:00:00.000Z", parts: {year: 2017, minute: "$zero"}},
- {expected: "2017-01-01T01:01:00.000Z", parts: {year: 2017, minute: "$sixtyOne"}},
- {expected: "2016-12-31T23:59:59.000Z", parts: {year: 2017, second: "$minusOne"}},
- {expected: "2017-01-01T00:01:01.000Z", parts: {year: 2017, second: "$sixtyOne"}},
- {
- expected: "2019-04-12T11:50:12.000Z",
- parts: {year: 2017, second: "$seventyMillionAndSomething"}
- },
- {
- expected: "1972-04-11T11:50:12.000Z",
- parts: {year: 1970, second: "$seventyMillionAndSomething"}
- },
- {expected: "2017-08-07T08:51:58.000Z", parts: {year: 1970, second: "$secondsSinceEpoch"}},
- {expected: "2016-12-31T23:59:59.999Z", parts: {year: 2017, millisecond: "$minusOne"}},
- {expected: "2017-01-01T00:00:01.001Z", parts: {year: 2017, millisecond: "$thousandAndOne"}},
- {
- expected: "2017-01-01T19:57:21.012Z",
- parts: {year: 2017, millisecond: "$seventyMillionAndSomething"}
- },
- {
- expected: "2017-01-18T09:14:55.918Z",
- parts: {year: 2017, millisecond: "$secondsSinceEpoch"}
- },
- {
- expected: "1970-01-01T19:57:21.012Z",
- parts: {year: 1970, millisecond: "$seventyMillionAndSomething"}
- },
- {
- expected: "2017-08-07T08:51:58.551Z",
- parts: {year: 1970, millisecond: "$millisSinceEpoch"}
- },
- ];
-
- tests.forEach(function(test) {
- assert.eq(
- [
- {_id: 0, date: ISODate(test.expected)},
- ],
- coll.aggregate([{$project: {date: {"$dateFromParts": test.parts}}}]).toArray(),
- tojson(test));
- });
-
- /* --------------------------------------------------------------------------------------- */
- /*
- * Testing double and Decimal128 millisecond values that aren't representable as a 64-bit
- * integer or overflow when converting to a 64-bit microsecond value.
- */
- coll.drop();
-
- assert.commandWorked(coll.insert([{
- _id: 0,
- veryBigDoubleA: 18014398509481984.0,
- veryBigDecimal128A: NumberDecimal("9223372036854775807"), // 2^63-1
- veryBigDoubleB: 18014398509481984000.0,
- veryBigDecimal128B: NumberDecimal("9223372036854775807000"), // (2^63-1) * 1000
- }]));
-
- pipeline =
- [{$project: {date: {"$dateFromParts": {year: 1970, millisecond: "$veryBigDoubleA"}}}}];
- assertErrMsgContains(
- coll,
- pipeline,
- ErrorCodes.DurationOverflow,
- "Overflow casting from a lower-precision duration to a higher-precision duration");
-
- pipeline =
- [{$project: {date: {"$dateFromParts": {year: 1970, millisecond: "$veryBigDecimal128A"}}}}];
- assertErrMsgContains(
- coll,
- pipeline,
- ErrorCodes.DurationOverflow,
- "Overflow casting from a lower-precision duration to a higher-precision duration");
-
- pipeline =
- [{$project: {date: {"$dateFromParts": {year: 1970, millisecond: "$veryBigDoubleB"}}}}];
- assertErrMsgContains(coll, pipeline, 40515, "'millisecond' must evaluate to an integer");
-
- pipeline =
- [{$project: {date: {"$dateFromParts": {year: 1970, millisecond: "$veryBigDecimal128B"}}}}];
- assertErrMsgContains(coll, pipeline, 40515, "'millisecond' must evaluate to an integer");
-
- /* --------------------------------------------------------------------------------------- */
- /* Testing that year values are only allowed in the range [0, 9999] and that month, day, hour,
- * and minute values are only allowed in the range [-32,768, 32,767]. */
- coll.drop();
-
- assert.commandWorked(coll.insert([{
+ }
+ ])
+ .toArray());
+
+let pipeline = {$project: {date: {'$dateFromParts': "$date"}}};
+assertErrorCode(coll, pipeline, 40519);
+
+pipeline = {
+ $project: {date: {'$dateFromParts': {"timezone": "$timezone"}}}
+};
+assertErrorCode(coll, pipeline, 40516);
+
+pipeline = {
+ $project: {date: {'$dateFromParts': {year: false}}}
+};
+assertErrorCode(coll, pipeline, 40515);
+
+pipeline = {
+ $project: {date: {'$dateFromParts': {year: 2012, "timezone": "DoesNot/Exist"}}}
+};
+assertErrorCode(coll, pipeline, 40485);
+
+pipeline = {
+ $project: {date: {'$dateFromParts': {year: 2012, "timezone": 5}}}
+};
+assertErrorCode(coll, pipeline, 40517);
+
+/* --------------------------------------------------------------------------------------- */
+
+coll.drop();
+
+assert.commandWorked(coll.insert([
+ {
_id: 0,
- bigYear: 10000,
- smallYear: -1,
- prettyBigInt: 32768,
- prettyBigNegativeInt: -32769
- }]));
-
- pipeline = [{$project: {date: {"$dateFromParts": {year: "$bigYear"}}}}];
- assertErrMsgContains(
- coll, pipeline, 40523, "'year' must evaluate to an integer in the range 0 to 9999");
-
- pipeline = [{$project: {date: {"$dateFromParts": {year: "$smallYear"}}}}];
- assertErrMsgContains(
- coll, pipeline, 40523, "'year' must evaluate to an integer in the range 0 to 9999");
-
- pipeline = [{$project: {date: {"$dateFromParts": {year: 1970, month: "$prettyBigInt"}}}}];
- assertErrMsgContains(
- coll, pipeline, 31034, "'month' must evaluate to a value in the range [-32768, 32767]");
-
- pipeline =
- [{$project: {date: {"$dateFromParts": {year: 1970, month: "$prettyBigNegativeInt"}}}}];
- assertErrMsgContains(
- coll, pipeline, 31034, "'month' must evaluate to a value in the range [-32768, 32767]");
-
- pipeline =
- [{$project: {date: {"$dateFromParts": {year: 1970, month: 1, day: "$prettyBigInt"}}}}];
- assertErrMsgContains(
- coll, pipeline, 31034, "'day' must evaluate to a value in the range [-32768, 32767]");
-
- pipeline = [{
- $project:
- {date: {"$dateFromParts": {year: 1970, month: 1, day: "$prettyBigNegativeInt"}}}
- }];
- assertErrMsgContains(
- coll, pipeline, 31034, "'day' must evaluate to a value in the range [-32768, 32767]");
-
- pipeline = [{$project: {date: {"$dateFromParts": {year: 1970, hour: "$prettyBigInt"}}}}];
- assertErrMsgContains(
- coll, pipeline, 31034, "'hour' must evaluate to a value in the range [-32768, 32767]");
-
- pipeline =
- [{$project: {date: {"$dateFromParts": {year: 1970, hour: "$prettyBigNegativeInt"}}}}];
- assertErrMsgContains(
- coll, pipeline, 31034, "'hour' must evaluate to a value in the range [-32768, 32767]");
-
- pipeline =
- [{$project: {date: {"$dateFromParts": {year: 1970, hour: 0, minute: "$prettyBigInt"}}}}];
- assertErrMsgContains(
- coll, pipeline, 31034, "'minute' must evaluate to a value in the range [-32768, 32767]");
-
- pipeline = [{
- $project:
- {date: {"$dateFromParts": {year: 1970, hour: 0, minute: "$prettyBigNegativeInt"}}}
- }];
- assertErrMsgContains(
- coll, pipeline, 31034, "'minute' must evaluate to a value in the range [-32768, 32767]");
-
- pipeline = [{$project: {date: {"$dateFromParts": {isoWeekYear: "$bigYear"}}}}];
- assertErrMsgContains(
- coll, pipeline, 31095, "'isoWeekYear' must evaluate to an integer in the range 0 to 9999");
-
- pipeline = [{$project: {date: {"$dateFromParts": {isoWeekYear: "$smallYear"}}}}];
- assertErrMsgContains(
- coll, pipeline, 31095, "'isoWeekYear' must evaluate to an integer in the range 0 to 9999");
-
- pipeline =
- [{$project: {date: {"$dateFromParts": {isoWeekYear: 1970, isoWeek: "$prettyBigInt"}}}}];
- assertErrMsgContains(
- coll, pipeline, 31034, "'isoWeek' must evaluate to a value in the range [-32768, 32767]");
-
- pipeline = [{
- $project:
- {date: {"$dateFromParts": {isoWeekYear: 1970, isoWeek: "$prettyBigNegativeInt"}}}
- }];
- assertErrMsgContains(
- coll, pipeline, 31034, "'isoWeek' must evaluate to a value in the range [-32768, 32767]");
-
- pipeline = [
- {$project: {date: {"$dateFromParts": {isoWeekYear: 1970, isoDayOfWeek: "$prettyBigInt"}}}}
- ];
- assertErrMsgContains(coll,
- pipeline,
- 31034,
- "'isoDayOfWeek' must evaluate to a value in the range [-32768, 32767]");
-
- pipeline = [{
- $project: {
- date: {"$dateFromParts": {isoWeekYear: 1970, isoDayOfWeek: "$prettyBigNegativeInt"}}
+ year: 2017,
+ month: 6,
+ day: 23,
+ hour: 14,
+ minute: 27,
+ second: 37,
+ millisecond: 742,
+ timezone: "Europe/Berlin"
+ },
+]));
+
+let pipelines = [
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "Europe/Berlin",
+ year: 2017,
+ month: 6,
+ day: 23,
+ hour: 14,
+ minute: 27,
+ second: 37,
+ millisecond: 742
+ }
+ }
}
- }];
- assertErrMsgContains(coll,
- pipeline,
- 31034,
- "'isoDayOfWeek' must evaluate to a value in the range [-32768, 32767]");
-
- /* --------------------------------------------------------------------------------------- */
- /* Testing wrong arguments */
-
- coll.drop();
-
- assert.commandWorked(coll.insert([
- {_id: 0},
- ]));
-
- pipelines = [
- {code: 40519, pipeline: {'$project': {date: {'$dateFromParts': true}}}},
- {code: 40519, pipeline: {'$project': {date: {'$dateFromParts': []}}}},
-
- {code: 40518, pipeline: {'$project': {date: {'$dateFromParts': {unknown: true}}}}},
-
- {code: 40516, pipeline: {'$project': {date: {'$dateFromParts': {}}}}},
-
- {
- code: 40489,
- pipeline: {'$project': {date: {'$dateFromParts': {year: 2017, isoWeekYear: 2017}}}}
- },
- {code: 40489, pipeline: {'$project': {date: {'$dateFromParts': {year: 2017, isoWeek: 3}}}}},
- {
- code: 40489,
- pipeline: {'$project': {date: {'$dateFromParts': {year: 2017, isoDayOfWeek: 5}}}}
- },
- {
- code: 40489,
- pipeline: {'$project': {date: {'$dateFromParts': {isoWeekYear: 2017, year: 2017}}}}
- },
-
- {
- code: 40525,
- pipeline: {'$project': {date: {'$dateFromParts': {isoWeekYear: 2017, month: 12}}}}
- },
- {
- code: 40525,
- pipeline: {'$project': {date: {'$dateFromParts': {isoWeekYear: 2017, day: 17}}}}
- },
- ];
-
- pipelines.forEach(function(item) {
- assertErrorCode(coll, item.pipeline, item.code, tojson(pipeline));
- });
-
- /* --------------------------------------------------------------------------------------- */
- /* Testing wrong value (types) */
-
- coll.drop();
-
- assert.commandWorked(coll.insert([
- {_id: 0, floatField: 2017.5, decimalField: NumberDecimal("2017.5")},
- ]));
-
- pipelines = [
- {code: 40515, pipeline: {'$project': {date: {'$dateFromParts': {year: "2017"}}}}},
- {code: 40515, pipeline: {'$project': {date: {'$dateFromParts': {year: 2017.3}}}}},
- {
- code: 40515,
- pipeline: {'$project': {date: {'$dateFromParts': {year: NumberDecimal("2017.3")}}}}
- },
- {code: 40515, pipeline: {'$project': {date: {'$dateFromParts': {year: "$floatField"}}}}},
- {code: 40515, pipeline: {'$project': {date: {'$dateFromParts': {year: "$decimalField"}}}}},
- ];
-
- pipelines.forEach(function(item) {
- assertErrorCode(coll, item.pipeline, item.code, tojson(pipeline));
- });
-
- /* --------------------------------------------------------------------------------------- */
-
- coll.drop();
-
- assert.commandWorked(coll.insert([
- {_id: 0, year: NumberDecimal("2017"), month: 6.0, day: NumberInt(19), hour: NumberLong(15)},
- {
- _id: 1,
- year: NumberDecimal("2017"),
- minute: 6.0,
- second: NumberInt(19),
- millisecond: NumberLong(15)
- },
- {_id: 2, isoWeekYear: NumberDecimal("2017"), isoWeek: 6.0, isoDayOfWeek: NumberInt(4)},
- ]));
-
+ }],
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "Europe/Berlin",
+ year: NumberInt("2017"),
+ month: NumberInt("6"),
+ day: NumberInt("23"),
+ hour: NumberInt("14"),
+ minute: NumberInt("27"),
+ second: NumberInt("37"),
+ millisecond: NumberInt("742")
+ }
+ }
+ }
+ }],
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "Europe/Berlin",
+ year: NumberLong("2017"),
+ month: NumberLong("6"),
+ day: NumberLong("23"),
+ hour: NumberLong("14"),
+ minute: NumberLong("27"),
+ second: NumberLong("37"),
+ millisecond: NumberLong("742")
+ }
+ }
+ }
+ }],
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "Europe/Berlin",
+ year: NumberDecimal("2017"),
+ month: NumberDecimal("6"),
+ day: NumberDecimal("23"),
+ hour: NumberDecimal("14"),
+ minute: NumberDecimal("27"),
+ second: NumberDecimal("37"),
+ millisecond: NumberDecimal("742")
+ }
+ }
+ }
+ }],
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "+02:00",
+ year: 2017,
+ month: 6,
+ day: 23,
+ hour: 14,
+ minute: 27,
+ second: 37,
+ millisecond: 742
+ }
+ }
+ }
+ }],
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "-02",
+ year: 2017,
+ month: 6,
+ day: 23,
+ hour: 10,
+ minute: 27,
+ second: 37,
+ millisecond: 742
+ }
+ }
+ }
+ }],
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "+02:00",
+ year: 2017,
+ month: 6,
+ day: 23,
+ hour: 14,
+ minute: 27,
+ second: 37,
+ millisecond: 742
+ }
+ }
+ }
+ }],
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "+04:15",
+ year: 2017,
+ month: 6,
+ day: 23,
+ hour: 16,
+ minute: 42,
+ second: 37,
+ millisecond: 742
+ }
+ }
+ }
+ }],
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "$timezone",
+ year: 2017,
+ month: 6,
+ day: 23,
+ hour: 14,
+ minute: 27,
+ second: 37,
+ millisecond: 742
+ }
+ }
+ }
+ }],
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "Europe/Berlin",
+ year: "$year",
+ month: 6,
+ day: 23,
+ hour: 14,
+ minute: 27,
+ second: 37,
+ millisecond: 742
+ }
+ }
+ }
+ }],
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "Europe/Berlin",
+ year: 2017,
+ month: "$month",
+ day: 23,
+ hour: 14,
+ minute: 27,
+ second: 37,
+ millisecond: 742
+ }
+ }
+ }
+ }],
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "Europe/Berlin",
+ year: 2017,
+ month: 6,
+ day: "$day",
+ hour: 14,
+ minute: 27,
+ second: 37,
+ millisecond: 742
+ }
+ }
+ }
+ }],
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "Europe/Berlin",
+ year: 2017,
+ month: 6,
+ day: 23,
+ hour: "$hour",
+ minute: 27,
+ second: 37,
+ millisecond: 742
+ }
+ }
+ }
+ }],
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "Europe/Berlin",
+ year: 2017,
+ month: 6,
+ day: 23,
+ hour: 14,
+ minute: "$minute",
+ second: 37,
+ millisecond: 742
+ }
+ }
+ }
+ }],
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "Europe/Berlin",
+ year: 2017,
+ month: 6,
+ day: 23,
+ hour: 14,
+ minute: 27,
+ second: "$second",
+ millisecond: 742
+ }
+ }
+ }
+ }],
+ [{
+ '$project': {
+ date: {
+ '$dateFromParts': {
+ timezone: "Europe/Berlin",
+ year: 2017,
+ month: 6,
+ day: 23,
+ hour: 14,
+ minute: 27,
+ second: 37,
+ millisecond: "$millisecond"
+ }
+ }
+ }
+ }],
+];
+
+pipelines.forEach(function(pipeline) {
+ assert.eq([{_id: 0, date: ISODate("2017-06-23T12:27:37.742Z")}],
+ coll.aggregate(pipeline).toArray(),
+ tojson(pipeline));
+});
+
+/* --------------------------------------------------------------------------------------- */
+/* Testing whether it throws the right assert for missing values */
+
+coll.drop();
+
+assert.commandWorked(coll.insert([
+ {_id: 0},
+]));
+
+pipelines = [
+ [{'$project': {date: {'$dateFromParts': {year: "$year"}}}}],
+ [{'$project': {date: {'$dateFromParts': {year: 2017, month: "$month"}}}}],
+ [{'$project': {date: {'$dateFromParts': {year: 2017, day: "$day"}}}}],
+ [{'$project': {date: {'$dateFromParts': {year: 2017, hour: "$hour"}}}}],
+ [{'$project': {date: {'$dateFromParts': {year: 2017, minute: "$minute"}}}}],
+ [{'$project': {date: {'$dateFromParts': {year: 2017, second: "$second"}}}}],
+ [{'$project': {date: {'$dateFromParts': {year: 2017, millisecond: "$millisecond"}}}}],
+ [{'$project': {date: {'$dateFromParts': {isoWeekYear: "$isoWeekYear"}}}}],
+ [{'$project': {date: {'$dateFromParts': {isoWeekYear: 2017, isoWeek: "$isoWeek"}}}}],
+ [{'$project': {date: {'$dateFromParts': {isoWeekYear: 2017, isoDayOfWeek: "$isoDayOfWeek"}}}}],
+];
+
+pipelines.forEach(function(pipeline) {
+ assert.eq([{_id: 0, date: null}], coll.aggregate(pipeline).toArray(), tojson(pipeline));
+});
+
+pipeline = [{'$project': {date: {'$dateFromParts': {year: 2017, timezone: "$timezone"}}}}];
+assert.eq([{_id: 0, date: null}], coll.aggregate(pipeline).toArray());
+
+/* --------------------------------------------------------------------------------------- */
+/* Testing whether it throws the right assert for uncoersable values */
+
+coll.drop();
+
+assert.commandWorked(coll.insert([
+ {_id: 0, falseValue: false},
+]));
+
+pipelines = [
+ [{'$project': {date: {'$dateFromParts': {year: "$falseValue"}}}}],
+ [{'$project': {date: {'$dateFromParts': {year: 2017, month: "$falseValue"}}}}],
+ [{'$project': {date: {'$dateFromParts': {year: 2017, day: "$falseValue"}}}}],
+ [{'$project': {date: {'$dateFromParts': {year: 2017, hour: "$falseValue"}}}}],
+ [{'$project': {date: {'$dateFromParts': {year: 2017, minute: "$falseValue"}}}}],
+ [{'$project': {date: {'$dateFromParts': {year: 2017, second: "$falseValue"}}}}],
+ [{'$project': {date: {'$dateFromParts': {year: 2017, millisecond: "$falseValue"}}}}],
+ [{'$project': {date: {'$dateFromParts': {isoWeekYear: "$falseValue"}}}}],
+ [{'$project': {date: {'$dateFromParts': {isoWeekYear: 2017, isoWeek: "$falseValue"}}}}],
+ [{'$project': {date: {'$dateFromParts': {isoWeekYear: 2017, isoDayOfWeek: "$falseValue"}}}}],
+];
+
+pipelines.forEach(function(pipeline) {
+ assertErrorCode(coll, pipeline, 40515, tojson(pipeline));
+});
+
+pipeline = [{'$project': {date: {'$dateFromParts': {year: 2017, timezone: "$falseValue"}}}}];
+assertErrorCode(coll, pipeline, 40517);
+
+/* --------------------------------------------------------------------------------------- */
+/* Testing whether it throws the right assert for uncoersable values */
+
+coll.drop();
+
+assert.commandWorked(coll.insert([
+ {_id: 0, outOfRangeValue: 10002},
+]));
+
+pipelines = [
+ [{'$project': {date: {'$dateFromParts': {year: "$outOfRangeValue"}}}}],
+ [{'$project': {date: {'$dateFromParts': {year: -1}}}}],
+ [{'$project': {date: {'$dateFromParts': {year: 10000}}}}],
+];
+
+pipelines.forEach(function(pipeline) {
+ assertErrorCode(coll, pipeline, 40523, tojson(pipeline));
+});
+
+/* --------------------------------------------------------------------------------------- */
+/* Testing "out of range" under and overflows */
+
+coll.drop();
+
+assert.commandWorked(coll.insert([{
+ _id: 0,
+ minusOne: -1,
+ zero: 0,
+ thirteen: 13,
+ twentyFive: 25,
+ sixtyOne: 61,
+ thousandAndOne: 1001,
+ tenThousandMinusOne: 9999,
+ tenThousandAndOne: 10001,
+ seventyMillionAndSomething: 71841012,
+ secondsSinceEpoch: 1502095918,
+ millisSinceEpoch: NumberLong("1502095918551"),
+}]));
+
+tests = [
+ {expected: "0000-01-01T00:00:00.000Z", parts: {year: "$zero"}},
+ {expected: "9999-01-01T00:00:00.000Z", parts: {year: "$tenThousandMinusOne"}},
+ {expected: "2016-11-01T00:00:00.000Z", parts: {year: 2017, month: "$minusOne"}},
+ {expected: "2016-12-01T00:00:00.000Z", parts: {year: 2017, month: "$zero"}},
+ {expected: "2018-01-01T00:00:00.000Z", parts: {year: 2017, month: "$thirteen"}},
+ {expected: "2016-12-30T00:00:00.000Z", parts: {year: 2017, day: "$minusOne"}},
+ {expected: "2016-12-31T00:00:00.000Z", parts: {year: 2017, day: "$zero"}},
+ {expected: "2017-03-02T00:00:00.000Z", parts: {year: 2017, day: "$sixtyOne"}},
+ {expected: "2016-12-31T23:00:00.000Z", parts: {year: 2017, hour: "$minusOne"}},
+ {expected: "2017-01-02T01:00:00.000Z", parts: {year: 2017, hour: "$twentyFive"}},
+ {expected: "2016-12-31T23:59:00.000Z", parts: {year: 2017, minute: "$minusOne"}},
+ {expected: "2017-01-01T00:00:00.000Z", parts: {year: 2017, minute: "$zero"}},
+ {expected: "2017-01-01T01:01:00.000Z", parts: {year: 2017, minute: "$sixtyOne"}},
+ {expected: "2016-12-31T23:59:59.000Z", parts: {year: 2017, second: "$minusOne"}},
+ {expected: "2017-01-01T00:01:01.000Z", parts: {year: 2017, second: "$sixtyOne"}},
+ {
+ expected: "2019-04-12T11:50:12.000Z",
+ parts: {year: 2017, second: "$seventyMillionAndSomething"}
+ },
+ {
+ expected: "1972-04-11T11:50:12.000Z",
+ parts: {year: 1970, second: "$seventyMillionAndSomething"}
+ },
+ {expected: "2017-08-07T08:51:58.000Z", parts: {year: 1970, second: "$secondsSinceEpoch"}},
+ {expected: "2016-12-31T23:59:59.999Z", parts: {year: 2017, millisecond: "$minusOne"}},
+ {expected: "2017-01-01T00:00:01.001Z", parts: {year: 2017, millisecond: "$thousandAndOne"}},
+ {
+ expected: "2017-01-01T19:57:21.012Z",
+ parts: {year: 2017, millisecond: "$seventyMillionAndSomething"}
+ },
+ {expected: "2017-01-18T09:14:55.918Z", parts: {year: 2017, millisecond: "$secondsSinceEpoch"}},
+ {
+ expected: "1970-01-01T19:57:21.012Z",
+ parts: {year: 1970, millisecond: "$seventyMillionAndSomething"}
+ },
+ {expected: "2017-08-07T08:51:58.551Z", parts: {year: 1970, millisecond: "$millisSinceEpoch"}},
+];
+
+tests.forEach(function(test) {
assert.eq(
[
- {_id: 0, date: ISODate("2017-06-19T15:00:00Z")},
+ {_id: 0, date: ISODate(test.expected)},
],
- coll.aggregate([
- {
- $match: {_id: 0},
- },
- {
- $project: {
- date: {
- '$dateFromParts':
- {year: "$year", month: "$month", day: "$day", hour: "$hour"}
- }
- }
+ coll.aggregate([{$project: {date: {"$dateFromParts": test.parts}}}]).toArray(),
+ tojson(test));
+});
+
+/* --------------------------------------------------------------------------------------- */
+/*
+ * Testing double and Decimal128 millisecond values that aren't representable as a 64-bit
+ * integer or overflow when converting to a 64-bit microsecond value.
+ */
+coll.drop();
+
+assert.commandWorked(coll.insert([{
+ _id: 0,
+ veryBigDoubleA: 18014398509481984.0,
+ veryBigDecimal128A: NumberDecimal("9223372036854775807"), // 2^63-1
+ veryBigDoubleB: 18014398509481984000.0,
+ veryBigDecimal128B: NumberDecimal("9223372036854775807000"), // (2^63-1) * 1000
+}]));
+
+pipeline = [{$project: {date: {"$dateFromParts": {year: 1970, millisecond: "$veryBigDoubleA"}}}}];
+assertErrMsgContains(
+ coll,
+ pipeline,
+ ErrorCodes.DurationOverflow,
+ "Overflow casting from a lower-precision duration to a higher-precision duration");
+
+pipeline =
+ [{$project: {date: {"$dateFromParts": {year: 1970, millisecond: "$veryBigDecimal128A"}}}}];
+assertErrMsgContains(
+ coll,
+ pipeline,
+ ErrorCodes.DurationOverflow,
+ "Overflow casting from a lower-precision duration to a higher-precision duration");
+
+pipeline = [{$project: {date: {"$dateFromParts": {year: 1970, millisecond: "$veryBigDoubleB"}}}}];
+assertErrMsgContains(coll, pipeline, 40515, "'millisecond' must evaluate to an integer");
+
+pipeline =
+ [{$project: {date: {"$dateFromParts": {year: 1970, millisecond: "$veryBigDecimal128B"}}}}];
+assertErrMsgContains(coll, pipeline, 40515, "'millisecond' must evaluate to an integer");
+
+/* --------------------------------------------------------------------------------------- */
+/* Testing that year values are only allowed in the range [0, 9999] and that month, day, hour,
+ * and minute values are only allowed in the range [-32,768, 32,767]. */
+coll.drop();
+
+assert.commandWorked(coll.insert(
+ [{_id: 0, bigYear: 10000, smallYear: -1, prettyBigInt: 32768, prettyBigNegativeInt: -32769}]));
+
+pipeline = [{$project: {date: {"$dateFromParts": {year: "$bigYear"}}}}];
+assertErrMsgContains(
+ coll, pipeline, 40523, "'year' must evaluate to an integer in the range 0 to 9999");
+
+pipeline = [{$project: {date: {"$dateFromParts": {year: "$smallYear"}}}}];
+assertErrMsgContains(
+ coll, pipeline, 40523, "'year' must evaluate to an integer in the range 0 to 9999");
+
+pipeline = [{$project: {date: {"$dateFromParts": {year: 1970, month: "$prettyBigInt"}}}}];
+assertErrMsgContains(
+ coll, pipeline, 31034, "'month' must evaluate to a value in the range [-32768, 32767]");
+
+pipeline = [{$project: {date: {"$dateFromParts": {year: 1970, month: "$prettyBigNegativeInt"}}}}];
+assertErrMsgContains(
+ coll, pipeline, 31034, "'month' must evaluate to a value in the range [-32768, 32767]");
+
+pipeline = [{$project: {date: {"$dateFromParts": {year: 1970, month: 1, day: "$prettyBigInt"}}}}];
+assertErrMsgContains(
+ coll, pipeline, 31034, "'day' must evaluate to a value in the range [-32768, 32767]");
+
+pipeline =
+ [{$project: {date: {"$dateFromParts": {year: 1970, month: 1, day: "$prettyBigNegativeInt"}}}}];
+assertErrMsgContains(
+ coll, pipeline, 31034, "'day' must evaluate to a value in the range [-32768, 32767]");
+
+pipeline = [{$project: {date: {"$dateFromParts": {year: 1970, hour: "$prettyBigInt"}}}}];
+assertErrMsgContains(
+ coll, pipeline, 31034, "'hour' must evaluate to a value in the range [-32768, 32767]");
+
+pipeline = [{$project: {date: {"$dateFromParts": {year: 1970, hour: "$prettyBigNegativeInt"}}}}];
+assertErrMsgContains(
+ coll, pipeline, 31034, "'hour' must evaluate to a value in the range [-32768, 32767]");
+
+pipeline = [{$project: {date: {"$dateFromParts": {year: 1970, hour: 0, minute: "$prettyBigInt"}}}}];
+assertErrMsgContains(
+ coll, pipeline, 31034, "'minute' must evaluate to a value in the range [-32768, 32767]");
+
+pipeline = [
+ {$project: {date: {"$dateFromParts": {year: 1970, hour: 0, minute: "$prettyBigNegativeInt"}}}}
+];
+assertErrMsgContains(
+ coll, pipeline, 31034, "'minute' must evaluate to a value in the range [-32768, 32767]");
+
+pipeline = [{$project: {date: {"$dateFromParts": {isoWeekYear: "$bigYear"}}}}];
+assertErrMsgContains(
+ coll, pipeline, 31095, "'isoWeekYear' must evaluate to an integer in the range 0 to 9999");
+
+pipeline = [{$project: {date: {"$dateFromParts": {isoWeekYear: "$smallYear"}}}}];
+assertErrMsgContains(
+ coll, pipeline, 31095, "'isoWeekYear' must evaluate to an integer in the range 0 to 9999");
+
+pipeline = [{$project: {date: {"$dateFromParts": {isoWeekYear: 1970, isoWeek: "$prettyBigInt"}}}}];
+assertErrMsgContains(
+ coll, pipeline, 31034, "'isoWeek' must evaluate to a value in the range [-32768, 32767]");
+
+pipeline =
+ [{$project: {date: {"$dateFromParts": {isoWeekYear: 1970, isoWeek: "$prettyBigNegativeInt"}}}}];
+assertErrMsgContains(
+ coll, pipeline, 31034, "'isoWeek' must evaluate to a value in the range [-32768, 32767]");
+
+pipeline =
+ [{$project: {date: {"$dateFromParts": {isoWeekYear: 1970, isoDayOfWeek: "$prettyBigInt"}}}}];
+assertErrMsgContains(
+ coll, pipeline, 31034, "'isoDayOfWeek' must evaluate to a value in the range [-32768, 32767]");
+
+pipeline = [{
+ $project: {date: {"$dateFromParts": {isoWeekYear: 1970, isoDayOfWeek: "$prettyBigNegativeInt"}}}
+}];
+assertErrMsgContains(
+ coll, pipeline, 31034, "'isoDayOfWeek' must evaluate to a value in the range [-32768, 32767]");
+
+/* --------------------------------------------------------------------------------------- */
+/* Testing wrong arguments */
+
+coll.drop();
+
+assert.commandWorked(coll.insert([
+ {_id: 0},
+]));
+
+pipelines = [
+ {code: 40519, pipeline: {'$project': {date: {'$dateFromParts': true}}}},
+ {code: 40519, pipeline: {'$project': {date: {'$dateFromParts': []}}}},
+
+ {code: 40518, pipeline: {'$project': {date: {'$dateFromParts': {unknown: true}}}}},
+
+ {code: 40516, pipeline: {'$project': {date: {'$dateFromParts': {}}}}},
+
+ {
+ code: 40489,
+ pipeline: {'$project': {date: {'$dateFromParts': {year: 2017, isoWeekYear: 2017}}}}
+ },
+ {code: 40489, pipeline: {'$project': {date: {'$dateFromParts': {year: 2017, isoWeek: 3}}}}},
+ {
+ code: 40489,
+ pipeline: {'$project': {date: {'$dateFromParts': {year: 2017, isoDayOfWeek: 5}}}}
+ },
+ {
+ code: 40489,
+ pipeline: {'$project': {date: {'$dateFromParts': {isoWeekYear: 2017, year: 2017}}}}
+ },
+
+ {
+ code: 40525,
+ pipeline: {'$project': {date: {'$dateFromParts': {isoWeekYear: 2017, month: 12}}}}
+ },
+ {code: 40525, pipeline: {'$project': {date: {'$dateFromParts': {isoWeekYear: 2017, day: 17}}}}},
+];
+
+pipelines.forEach(function(item) {
+ assertErrorCode(coll, item.pipeline, item.code, tojson(pipeline));
+});
+
+/* --------------------------------------------------------------------------------------- */
+/* Testing wrong value (types) */
+
+coll.drop();
+
+assert.commandWorked(coll.insert([
+ {_id: 0, floatField: 2017.5, decimalField: NumberDecimal("2017.5")},
+]));
+
+pipelines = [
+ {code: 40515, pipeline: {'$project': {date: {'$dateFromParts': {year: "2017"}}}}},
+ {code: 40515, pipeline: {'$project': {date: {'$dateFromParts': {year: 2017.3}}}}},
+ {
+ code: 40515,
+ pipeline: {'$project': {date: {'$dateFromParts': {year: NumberDecimal("2017.3")}}}}
+ },
+ {code: 40515, pipeline: {'$project': {date: {'$dateFromParts': {year: "$floatField"}}}}},
+ {code: 40515, pipeline: {'$project': {date: {'$dateFromParts': {year: "$decimalField"}}}}},
+];
+
+pipelines.forEach(function(item) {
+ assertErrorCode(coll, item.pipeline, item.code, tojson(pipeline));
+});
+
+/* --------------------------------------------------------------------------------------- */
+
+coll.drop();
+
+assert.commandWorked(coll.insert([
+ {_id: 0, year: NumberDecimal("2017"), month: 6.0, day: NumberInt(19), hour: NumberLong(15)},
+ {
+ _id: 1,
+ year: NumberDecimal("2017"),
+ minute: 6.0,
+ second: NumberInt(19),
+ millisecond: NumberLong(15)
+ },
+ {_id: 2, isoWeekYear: NumberDecimal("2017"), isoWeek: 6.0, isoDayOfWeek: NumberInt(4)},
+]));
+
+assert.eq(
+ [
+ {_id: 0, date: ISODate("2017-06-19T15:00:00Z")},
+ ],
+ coll.aggregate([
+ {
+ $match: {_id: 0},
+ },
+ {
+ $project: {
+ date: {
+ '$dateFromParts':
+ {year: "$year", month: "$month", day: "$day", hour: "$hour"}
+ }
+ }
+ }
+ ])
+ .toArray());
+
+assert.eq(
+ [
+ {_id: 1, date: ISODate("2017-01-01T00:06:19.015Z")},
+ ],
+ coll.aggregate([
+ {
+ $match: {_id: 1},
+ },
+ {
+ $project: {
+ date: {
+ '$dateFromParts': {
+ year: "$year",
+ minute: "$minute",
+ second: "$second",
+ millisecond: "$millisecond"
+ }
+ }
}
- ])
- .toArray());
+ }
+ ])
+ .toArray());
+
+assert.eq(
+ [
+ {_id: 2, date: ISODate("2017-02-09T00:00:00Z")},
+ ],
+ coll.aggregate([
+ {
+ $match: {_id: 2},
+ },
+ {
+ $project: {
+ date: {
+ '$dateFromParts': {
+ isoWeekYear: "$isoWeekYear",
+ isoWeek: "$isoWeek",
+ isoDayOfWeek: "$isoDayOfWeek"
+ }
+ }
+ }
+ }
+ ])
+ .toArray());
+
+/* --------------------------------------------------------------------------------------- */
+coll.drop();
+
+assert.commandWorked(coll.insert([
+ {
+ _id: 0,
+ year: NumberDecimal("2017"),
+ month: 6.0,
+ day: NumberInt(19),
+ hour: NumberLong(15),
+ minute: NumberDecimal(1),
+ second: 51,
+ millisecond: 551
+ },
+]));
+
+var tests = [
+ {expected: ISODate("2017-06-19T19:01:51.551Z"), tz: "-04:00"},
+ {expected: ISODate("2017-06-19T12:01:51.551Z"), tz: "+03"},
+ {expected: ISODate("2017-06-19T18:21:51.551Z"), tz: "-0320"},
+ {expected: ISODate("2017-06-19T19:01:51.551Z"), tz: "America/New_York"},
+ {expected: ISODate("2017-06-19T13:01:51.551Z"), tz: "Europe/Amsterdam"},
+];
+
+tests.forEach(function(test) {
assert.eq(
[
- {_id: 1, date: ISODate("2017-01-01T00:06:19.015Z")},
+ {_id: 0, date: test.expected},
],
- coll.aggregate([
- {
- $match: {_id: 1},
- },
- {
- $project: {
- date: {
- '$dateFromParts': {
- year: "$year",
- minute: "$minute",
- second: "$second",
- millisecond: "$millisecond"
- }
- }
- }
+ coll.aggregate([{
+ $project: {
+ date: {
+ "$dateFromParts": {
+ year: "$year",
+ month: "$month",
+ day: "$day",
+ hour: "$hour",
+ minute: "$minute",
+ second: "$second",
+ millisecond: "$millisecond",
+ timezone: test.tz
+ }
+ }
}
- ])
- .toArray());
+ }])
+ .toArray(),
+ tojson(test));
+});
+/* --------------------------------------------------------------------------------------- */
+
+coll.drop();
+
+assert.commandWorked(coll.insert([
+ {
+ _id: 0,
+ isoWeekYear: NumberDecimal("2017"),
+ isoWeek: 25.0,
+ isoDayOfWeek: NumberInt(1),
+ hour: NumberLong(15),
+ minute: NumberDecimal(1),
+ second: 51,
+ millisecond: 551
+ },
+]));
+
+var tests = [
+ {expected: ISODate("2017-06-19T19:01:51.551Z"), tz: "-04:00"},
+ {expected: ISODate("2017-06-19T12:01:51.551Z"), tz: "+03"},
+ {expected: ISODate("2017-06-19T18:21:51.551Z"), tz: "-0320"},
+ {expected: ISODate("2017-06-19T19:01:51.551Z"), tz: "America/New_York"},
+ {expected: ISODate("2017-06-19T13:01:51.551Z"), tz: "Europe/Amsterdam"},
+];
+
+tests.forEach(function(test) {
assert.eq(
[
- {_id: 2, date: ISODate("2017-02-09T00:00:00Z")},
+ {_id: 0, date: test.expected},
],
- coll.aggregate([
- {
- $match: {_id: 2},
- },
- {
- $project: {
- date: {
- '$dateFromParts': {
- isoWeekYear: "$isoWeekYear",
- isoWeek: "$isoWeek",
- isoDayOfWeek: "$isoDayOfWeek"
- }
- }
- }
- }
- ])
- .toArray());
-
- /* --------------------------------------------------------------------------------------- */
-
- coll.drop();
-
- assert.commandWorked(coll.insert([
- {
- _id: 0,
- year: NumberDecimal("2017"),
- month: 6.0,
- day: NumberInt(19),
- hour: NumberLong(15),
- minute: NumberDecimal(1),
- second: 51,
- millisecond: 551
- },
- ]));
-
- var tests = [
- {expected: ISODate("2017-06-19T19:01:51.551Z"), tz: "-04:00"},
- {expected: ISODate("2017-06-19T12:01:51.551Z"), tz: "+03"},
- {expected: ISODate("2017-06-19T18:21:51.551Z"), tz: "-0320"},
- {expected: ISODate("2017-06-19T19:01:51.551Z"), tz: "America/New_York"},
- {expected: ISODate("2017-06-19T13:01:51.551Z"), tz: "Europe/Amsterdam"},
- ];
-
- tests.forEach(function(test) {
- assert.eq(
- [
- {_id: 0, date: test.expected},
- ],
- coll.aggregate([{
- $project: {
- date: {
- "$dateFromParts": {
- year: "$year",
- month: "$month",
- day: "$day",
- hour: "$hour",
- minute: "$minute",
- second: "$second",
- millisecond: "$millisecond",
- timezone: test.tz
- }
+ coll.aggregate([{
+ $project: {
+ date: {
+ "$dateFromParts": {
+ isoWeekYear: "$isoWeekYear",
+ isoWeek: "$isoWeek",
+ isoDayOfWeek: "$isoDayOfWeek",
+ hour: "$hour",
+ minute: "$minute",
+ second: "$second",
+ millisecond: "$millisecond",
+ timezone: test.tz
}
}
- }])
- .toArray(),
- tojson(test));
- });
-
- /* --------------------------------------------------------------------------------------- */
-
- coll.drop();
-
- assert.commandWorked(coll.insert([
- {
- _id: 0,
- isoWeekYear: NumberDecimal("2017"),
- isoWeek: 25.0,
- isoDayOfWeek: NumberInt(1),
- hour: NumberLong(15),
- minute: NumberDecimal(1),
- second: 51,
- millisecond: 551
- },
- ]));
-
- var tests = [
- {expected: ISODate("2017-06-19T19:01:51.551Z"), tz: "-04:00"},
- {expected: ISODate("2017-06-19T12:01:51.551Z"), tz: "+03"},
- {expected: ISODate("2017-06-19T18:21:51.551Z"), tz: "-0320"},
- {expected: ISODate("2017-06-19T19:01:51.551Z"), tz: "America/New_York"},
- {expected: ISODate("2017-06-19T13:01:51.551Z"), tz: "Europe/Amsterdam"},
- ];
-
- tests.forEach(function(test) {
- assert.eq(
- [
- {_id: 0, date: test.expected},
- ],
- coll.aggregate([{
- $project: {
- date: {
- "$dateFromParts": {
- isoWeekYear: "$isoWeekYear",
- isoWeek: "$isoWeek",
- isoDayOfWeek: "$isoDayOfWeek",
- hour: "$hour",
- minute: "$minute",
- second: "$second",
- millisecond: "$millisecond",
- timezone: test.tz
- }
- }
- }
- }])
- .toArray(),
- tojson(test));
- });
-
- /* --------------------------------------------------------------------------------------- */
+ }
+ }])
+ .toArray(),
+ tojson(test));
+});
+/* --------------------------------------------------------------------------------------- */
})();
diff --git a/jstests/aggregation/expressions/date_from_string.js b/jstests/aggregation/expressions/date_from_string.js
index e62b4f9f392..f6e358fb1be 100644
--- a/jstests/aggregation/expressions/date_from_string.js
+++ b/jstests/aggregation/expressions/date_from_string.js
@@ -1,795 +1,768 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and assertErrMsgContains.
(function() {
- "use strict";
-
- const coll = db.date_from_string;
-
- /* --------------------------------------------------------------------------------------- */
- /* Normal format tests. */
-
- coll.drop();
- assert.writeOK(coll.insert({_id: 0}));
-
- let testCases = [
- {
- expect: "2017-07-04T11:56:02Z",
- inputString: "2017-07-04T11:56:02Z",
- format: "%Y-%m-%dT%H:%M:%SZ"
- },
- {
- expect: "2017-07-04T11:56:02.813Z",
- inputString: "2017-07-04T11:56:02.813Z",
- format: "%Y-%m-%dT%H:%M:%S.%LZ"
- },
- {
- expect: "2017-07-04T11:56:02.810Z",
- inputString: "2017-07-04T11:56:02.81Z",
- format: "%Y-%m-%dT%H:%M:%S.%LZ"
- },
- {
- expect: "2017-07-04T11:56:02.800Z",
- inputString: "2017-07-04T11:56:02.8Z",
- format: "%Y-%m-%dT%H:%M:%S.%LZ"
- },
- {
- expect: "2017-07-04T11:56:02Z",
- inputString: "2017-07-04T11:56.02",
- format: "%Y-%m-%dT%H:%M.%S"
- },
- {
- expect: "2017-07-04T11:56:02.813Z",
- inputString: "2017-07-04T11:56.02.813",
- format: "%Y-%m-%dT%H:%M.%S.%L"
- },
- {
- expect: "2017-07-04T11:56:02.810Z",
- inputString: "2017-07-04T11:56.02.81",
- format: "%Y-%m-%dT%H:%M.%S.%L"
- },
- {
- expect: "2017-07-04T11:56:02.800Z",
- inputString: "2017-07-04T11:56.02.8",
- format: "%Y-%m-%dT%H:%M.%S.%L"
- },
- ];
- testCases.forEach(function(testCase) {
- assert.eq([{_id: 0, date: ISODate(testCase.expect)}],
- coll.aggregate(
- {$project: {date: {$dateFromString: {dateString: testCase.inputString}}}})
- .toArray(),
- tojson(testCase));
- assert.eq(
- [{_id: 0, date: ISODate(testCase.expect)}],
- coll.aggregate({
- $project: {
- date: {
- $dateFromString:
- {dateString: testCase.inputString, format: testCase.format}
- }
- }
- })
- .toArray(),
- tojson(testCase));
- });
-
- /* --------------------------------------------------------------------------------------- */
- /* Normal format tests with timezone. */
-
- coll.drop();
- assert.writeOK(coll.insert({_id: 0}));
-
- testCases = [
- {
- expect: "2017-07-04T10:56:02Z",
- inputString: "2017-07-04T11:56.02",
- format: "%Y-%m-%dT%H:%M.%S"
- },
- {
- expect: "2017-07-04T10:56:02.813Z",
- inputString: "2017-07-04T11:56.02.813",
- format: "%Y-%m-%dT%H:%M.%S.%L"
- },
- {
- expect: "2017-07-04T10:56:02.810Z",
- inputString: "2017-07-04T11:56.02.81",
- format: "%Y-%m-%dT%H:%M.%S.%L"
- },
- {
- expect: "2017-07-04T10:56:02.800Z",
- inputString: "2017-07-04T11:56.02.8",
- format: "%Y-%m-%dT%H:%M.%S.%L"
- },
- ];
- testCases.forEach(function(testCase) {
- assert.eq(
- [{_id: 0, date: ISODate(testCase.expect)}],
- coll.aggregate({
- $project: {
- date: {
- $dateFromString:
- {dateString: testCase.inputString, timezone: "Europe/London"}
- }
+"use strict";
+
+const coll = db.date_from_string;
+
+/* --------------------------------------------------------------------------------------- */
+/* Normal format tests. */
+
+coll.drop();
+assert.writeOK(coll.insert({_id: 0}));
+
+let testCases = [
+ {
+ expect: "2017-07-04T11:56:02Z",
+ inputString: "2017-07-04T11:56:02Z",
+ format: "%Y-%m-%dT%H:%M:%SZ"
+ },
+ {
+ expect: "2017-07-04T11:56:02.813Z",
+ inputString: "2017-07-04T11:56:02.813Z",
+ format: "%Y-%m-%dT%H:%M:%S.%LZ"
+ },
+ {
+ expect: "2017-07-04T11:56:02.810Z",
+ inputString: "2017-07-04T11:56:02.81Z",
+ format: "%Y-%m-%dT%H:%M:%S.%LZ"
+ },
+ {
+ expect: "2017-07-04T11:56:02.800Z",
+ inputString: "2017-07-04T11:56:02.8Z",
+ format: "%Y-%m-%dT%H:%M:%S.%LZ"
+ },
+ {
+ expect: "2017-07-04T11:56:02Z",
+ inputString: "2017-07-04T11:56.02",
+ format: "%Y-%m-%dT%H:%M.%S"
+ },
+ {
+ expect: "2017-07-04T11:56:02.813Z",
+ inputString: "2017-07-04T11:56.02.813",
+ format: "%Y-%m-%dT%H:%M.%S.%L"
+ },
+ {
+ expect: "2017-07-04T11:56:02.810Z",
+ inputString: "2017-07-04T11:56.02.81",
+ format: "%Y-%m-%dT%H:%M.%S.%L"
+ },
+ {
+ expect: "2017-07-04T11:56:02.800Z",
+ inputString: "2017-07-04T11:56.02.8",
+ format: "%Y-%m-%dT%H:%M.%S.%L"
+ },
+];
+testCases.forEach(function(testCase) {
+ assert.eq(
+ [{_id: 0, date: ISODate(testCase.expect)}],
+ coll.aggregate({$project: {date: {$dateFromString: {dateString: testCase.inputString}}}})
+ .toArray(),
+ tojson(testCase));
+ assert.eq(
+ [{_id: 0, date: ISODate(testCase.expect)}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateFromString: {dateString: testCase.inputString, format: testCase.format}
}
- })
- .toArray(),
- tojson(testCase));
- assert.eq([{_id: 0, date: ISODate(testCase.expect)}],
- coll.aggregate({
- $project: {
- date: {
- $dateFromString: {
- dateString: testCase.inputString,
- timezone: "Europe/London",
- format: testCase.format
- }
- }
+ }
+ })
+ .toArray(),
+ tojson(testCase));
+});
+
+/* --------------------------------------------------------------------------------------- */
+/* Normal format tests with timezone. */
+
+coll.drop();
+assert.writeOK(coll.insert({_id: 0}));
+
+testCases = [
+ {
+ expect: "2017-07-04T10:56:02Z",
+ inputString: "2017-07-04T11:56.02",
+ format: "%Y-%m-%dT%H:%M.%S"
+ },
+ {
+ expect: "2017-07-04T10:56:02.813Z",
+ inputString: "2017-07-04T11:56.02.813",
+ format: "%Y-%m-%dT%H:%M.%S.%L"
+ },
+ {
+ expect: "2017-07-04T10:56:02.810Z",
+ inputString: "2017-07-04T11:56.02.81",
+ format: "%Y-%m-%dT%H:%M.%S.%L"
+ },
+ {
+ expect: "2017-07-04T10:56:02.800Z",
+ inputString: "2017-07-04T11:56.02.8",
+ format: "%Y-%m-%dT%H:%M.%S.%L"
+ },
+];
+testCases.forEach(function(testCase) {
+ assert.eq([{_id: 0, date: ISODate(testCase.expect)}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateFromString:
+ {dateString: testCase.inputString, timezone: "Europe/London"}
}
- })
- .toArray(),
- tojson(testCase));
- });
-
- /* --------------------------------------------------------------------------------------- */
- /* Normal format tests with UTC offset. */
-
- coll.drop();
- assert.writeOK(coll.insert({_id: 0}));
-
- testCases = [
- {
- expect: "2017-07-04T10:56:02Z",
- inputString: "2017-07-04T11:56.02",
- format: "%Y-%m-%dT%H:%M.%S"
- },
- {
- expect: "2017-07-04T10:56:02.813Z",
- inputString: "2017-07-04T11:56.02.813",
- format: "%Y-%m-%dT%H:%M.%S.%L"
- },
- {
- expect: "2017-07-04T10:56:02.810Z",
- inputString: "2017-07-04T11:56.02.81",
- format: "%Y-%m-%dT%H:%M.%S.%L"
- },
- {
- expect: "2017-07-04T10:56:02.800Z",
- inputString: "2017-07-04T11:56.02.8",
- format: "%Y-%m-%dT%H:%M.%S.%L"
- },
- ];
- testCases.forEach(function(testCase) {
- assert.eq([{_id: 0, date: ISODate(testCase.expect)}],
- coll.aggregate({
- $project: {
- date: {
- $dateFromString:
- {dateString: testCase.inputString, timezone: "+01:00"}
+ }
+ })
+ .toArray(),
+ tojson(testCase));
+ assert.eq([{_id: 0, date: ISODate(testCase.expect)}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateFromString: {
+ dateString: testCase.inputString,
+ timezone: "Europe/London",
+ format: testCase.format
}
}
- })
- .toArray(),
- tojson(testCase));
- assert.eq([{_id: 0, date: ISODate(testCase.expect)}],
- coll.aggregate({
- $project: {
- date: {
- $dateFromString: {
- dateString: testCase.inputString,
- timezone: "+01:00",
- format: testCase.format
- }
+ }
+ })
+ .toArray(),
+ tojson(testCase));
+});
+
+/* --------------------------------------------------------------------------------------- */
+/* Normal format tests with UTC offset. */
+
+coll.drop();
+assert.writeOK(coll.insert({_id: 0}));
+
+testCases = [
+ {
+ expect: "2017-07-04T10:56:02Z",
+ inputString: "2017-07-04T11:56.02",
+ format: "%Y-%m-%dT%H:%M.%S"
+ },
+ {
+ expect: "2017-07-04T10:56:02.813Z",
+ inputString: "2017-07-04T11:56.02.813",
+ format: "%Y-%m-%dT%H:%M.%S.%L"
+ },
+ {
+ expect: "2017-07-04T10:56:02.810Z",
+ inputString: "2017-07-04T11:56.02.81",
+ format: "%Y-%m-%dT%H:%M.%S.%L"
+ },
+ {
+ expect: "2017-07-04T10:56:02.800Z",
+ inputString: "2017-07-04T11:56.02.8",
+ format: "%Y-%m-%dT%H:%M.%S.%L"
+ },
+];
+testCases.forEach(function(testCase) {
+ assert.eq(
+ [{_id: 0, date: ISODate(testCase.expect)}],
+ coll.aggregate({
+ $project: {
+ date: {$dateFromString: {dateString: testCase.inputString, timezone: "+01:00"}}
+ }
+ })
+ .toArray(),
+ tojson(testCase));
+ assert.eq([{_id: 0, date: ISODate(testCase.expect)}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateFromString: {
+ dateString: testCase.inputString,
+ timezone: "+01:00",
+ format: testCase.format
}
}
- })
- .toArray(),
- tojson(testCase));
- });
-
- /* --------------------------------------------------------------------------------------- */
- /* Normal format tests from data. */
-
- coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, dateString: "2017-07-06T12:35:37Z", format: "%Y-%m-%dT%H:%M:%SZ"},
- {_id: 1, dateString: "2017-07-06T12:35:37.513Z", format: "%Y-%m-%dT%H:%M:%S.%LZ"},
- {_id: 2, dateString: "2017-07-06T12:35:37", format: "%Y-%m-%dT%H:%M:%S"},
- {_id: 3, dateString: "2017-07-06T12:35:37.513", format: "%Y-%m-%dT%H:%M:%S.%L"},
- {_id: 4, dateString: "1960-07-10T12:10:37.448", format: "%Y-%m-%dT%H:%M:%S.%L"},
- ]));
-
- let expectedResults = [
- {"_id": 0, "date": ISODate("2017-07-06T12:35:37Z")},
- {"_id": 1, "date": ISODate("2017-07-06T12:35:37.513Z")},
- {"_id": 2, "date": ISODate("2017-07-06T12:35:37Z")},
- {"_id": 3, "date": ISODate("2017-07-06T12:35:37.513Z")},
- {"_id": 4, "date": ISODate("1960-07-10T12:10:37.448Z")},
- ];
- assert.eq(expectedResults,
- coll.aggregate([
- {
- $project: {date: {$dateFromString: {dateString: "$dateString"}}},
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
-
- // Repeat the test with an explicit format specifier string.
- assert.eq(
- expectedResults,
- coll.aggregate([
- {
- $project:
- {date: {$dateFromString: {dateString: "$dateString", format: "$format"}}},
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
-
- expectedResults = [
- {"_id": 0, "date": new Date(1499344537000)},
- {"_id": 1, "date": new Date(1499344537513)},
- {"_id": 2, "date": new Date(1499344537000)},
- {"_id": 3, "date": new Date(1499344537513)},
- {"_id": 4, "date": new Date(-299072962552)},
- ];
- assert.eq(expectedResults,
- coll.aggregate([
- {
- $project: {date: {$dateFromString: {dateString: "$dateString"}}},
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
-
- // Repeat the test with an explicit format specifier string.
- assert.eq(
- expectedResults,
- coll.aggregate([
- {
- $project:
- {date: {$dateFromString: {dateString: "$dateString", format: "$format"}}},
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
-
- /* --------------------------------------------------------------------------------------- */
- /* Normal format tests from data, with time zone. */
-
- coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, dateString: "2017-07-06T12:35:37.513", timezone: "GMT"},
- {_id: 1, dateString: "2017-07-06T12:35:37.513", timezone: "UTC"},
- {_id: 2, dateString: "1960-07-10T12:35:37.513", timezone: "America/New_York"},
- {_id: 3, dateString: "1960-07-10T12:35:37.513", timezone: "Europe/London"},
- {_id: 4, dateString: "2017-07-06T12:35:37.513", timezone: "America/Los_Angeles"},
- {_id: 5, dateString: "2017-07-06T12:35:37.513", timezone: "Europe/Paris"},
- {_id: 6, dateString: "2017-07-06T12:35:37.513", timezone: "+04:00"},
- ]));
-
- expectedResults = [
- {"_id": 0, "date": ISODate("2017-07-06T12:35:37.513Z")},
- {"_id": 1, "date": ISODate("2017-07-06T12:35:37.513Z")},
- {"_id": 2, "date": ISODate("1960-07-10T16:35:37.513Z")},
- {"_id": 3, "date": ISODate("1960-07-10T11:35:37.513Z")},
- {"_id": 4, "date": ISODate("2017-07-06T19:35:37.513Z")},
- {"_id": 5, "date": ISODate("2017-07-06T10:35:37.513Z")},
- {"_id": 6, "date": ISODate("2017-07-06T08:35:37.513Z")},
- ];
-
- assert.eq(
- expectedResults,
- coll.aggregate([
- {
- $project:
- {date: {$dateFromString: {dateString: "$dateString", timezone: "$timezone"}}},
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
-
- // Repeat the test with an explicit format specifier string.
- assert.eq(expectedResults,
- coll.aggregate([
- {
- $project: {
- date: {
- $dateFromString: {
- dateString: "$dateString",
- timezone: "$timezone",
- format: "%Y-%m-%dT%H:%M:%S.%L"
- }
- }
- },
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
-
- /* --------------------------------------------------------------------------------------- */
- /* dateString from data with timezone as constant */
-
- coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, dateString: "2017-07-06T12:35:37"},
- ]));
-
- assert.eq(
- [
- {"_id": 0, "date": ISODate("2017-07-06T03:35:37Z")},
- ],
- coll.aggregate([
- {
- $project: {
- date: {$dateFromString: {dateString: "$dateString", timezone: "Asia/Tokyo"}}
- },
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
-
- /* --------------------------------------------------------------------------------------- */
- /* dateString from constant with timezone from data */
-
- coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, timezone: "Europe/London"},
- {_id: 1, timezone: "America/New_York"},
- {_id: 2, timezone: "-05:00"},
- ]));
-
- assert.eq(
- [
- {"_id": 0, "date": ISODate("2017-07-19T17:52:35.199Z")},
- {"_id": 1, "date": ISODate("2017-07-19T22:52:35.199Z")},
- {"_id": 2, "date": ISODate("2017-07-19T23:52:35.199Z")},
- ],
- coll.aggregate([
- {
- $project: {
- date: {
- $dateFromString:
- {dateString: "2017-07-19T18:52:35.199", timezone: "$timezone"}
}
+ })
+ .toArray(),
+ tojson(testCase));
+});
+
+/* --------------------------------------------------------------------------------------- */
+/* Normal format tests from data. */
+
+coll.drop();
+assert.writeOK(coll.insert([
+ {_id: 0, dateString: "2017-07-06T12:35:37Z", format: "%Y-%m-%dT%H:%M:%SZ"},
+ {_id: 1, dateString: "2017-07-06T12:35:37.513Z", format: "%Y-%m-%dT%H:%M:%S.%LZ"},
+ {_id: 2, dateString: "2017-07-06T12:35:37", format: "%Y-%m-%dT%H:%M:%S"},
+ {_id: 3, dateString: "2017-07-06T12:35:37.513", format: "%Y-%m-%dT%H:%M:%S.%L"},
+ {_id: 4, dateString: "1960-07-10T12:10:37.448", format: "%Y-%m-%dT%H:%M:%S.%L"},
+]));
+
+let expectedResults = [
+ {"_id": 0, "date": ISODate("2017-07-06T12:35:37Z")},
+ {"_id": 1, "date": ISODate("2017-07-06T12:35:37.513Z")},
+ {"_id": 2, "date": ISODate("2017-07-06T12:35:37Z")},
+ {"_id": 3, "date": ISODate("2017-07-06T12:35:37.513Z")},
+ {"_id": 4, "date": ISODate("1960-07-10T12:10:37.448Z")},
+];
+assert.eq(expectedResults,
+ coll.aggregate([
+ {
+ $project: {date: {$dateFromString: {dateString: "$dateString"}}},
},
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
-
- /* --------------------------------------------------------------------------------------- */
- /* BI format tests. */
-
- coll.drop();
- assert.writeOK(coll.insert({_id: 0}));
-
- let pipelines = [
- {
- expect: "2017-01-01T00:00:00Z",
- pipeline: {$project: {date: {$dateFromString: {dateString: "2017-01-01 00:00:00"}}}}
- },
- {
- expect: "2017-07-01T00:00:00Z",
- pipeline: {$project: {date: {$dateFromString: {dateString: "2017-07-01 00:00:00"}}}}
- },
- {
- expect: "2017-07-06T00:00:00Z",
- pipeline: {$project: {date: {$dateFromString: {dateString: "2017-07-06"}}}}
- },
- {
- expect: "2017-07-06T00:00:00Z",
- pipeline: {$project: {date: {$dateFromString: {dateString: "2017-07-06 00:00:00"}}}}
- },
- {
- expect: "2017-07-06T11:00:00Z",
- pipeline: {$project: {date: {$dateFromString: {dateString: "2017-07-06 11:00:00"}}}}
- },
- {
- expect: "2017-07-06T11:36:00Z",
- pipeline: {$project: {date: {$dateFromString: {dateString: "2017-07-06 11:36:00"}}}}
- },
- {
- expect: "2017-07-06T11:36:54Z",
- pipeline: {$project: {date: {$dateFromString: {dateString: "2017-07-06 11:36:54"}}}}
- },
- ];
- pipelines.forEach(function(pipeline) {
- assert.eq([{_id: 0, date: ISODate(pipeline.expect)}],
- coll.aggregate(pipeline.pipeline).toArray(),
- tojson(pipeline));
- });
-
- /* --------------------------------------------------------------------------------------- */
- /* Tests with additional timezone information . */
-
- coll.drop();
- assert.writeOK(coll.insert({_id: 0}));
-
- testCases = [
- // GMT based variants
- {expect: "2017-07-14T12:02:44.771Z", inputString: "2017-07-14T12:02:44.771 GMT"},
- {expect: "2017-07-14T12:02:44.771Z", inputString: "2017-07-14T12:02:44.771 GMT+00"},
- {expect: "2017-07-14T12:02:44.771Z", inputString: "2017-07-14T12:02:44.771 GMT+00:00"},
- {expect: "2017-07-14T10:02:44.771Z", inputString: "2017-07-14T12:02:44.771 GMT+02"},
- {expect: "2017-07-14T10:02:44.771Z", inputString: "2017-07-14T12:02:44.771 GMT+02:00"},
- {expect: "2017-07-14T09:02:44.771Z", inputString: "2017-07-14T12:02:44.771+03"},
- {expect: "2017-07-14T08:32:44.771Z", inputString: "2017-07-14T12:02:44.771+0330"},
- {expect: "2017-07-14T08:32:44.771Z", inputString: "2017-07-14T12:02:44.771+03:30"},
- // With timezone abbreviations
- {expect: "2017-07-14T12:02:44.771Z", inputString: "2017-07-14T12:02:44.771 UTC"},
- {expect: "2017-07-14T10:02:44.771Z", inputString: "2017-07-14T12:02:44.771 CEST"},
- {expect: "2017-07-14T17:02:44.771Z", inputString: "2017-07-14T12:02:44.771 EST"},
- {expect: "2017-07-14T19:02:44.771Z", inputString: "2017-07-14T12:02:44.771 PDT"},
- // A-I,K-Z are military time zones:
- // https://en.wikipedia.org/wiki/List_of_military_time_zones
- {expect: "2017-07-14T11:02:44.771Z", inputString: "2017-07-14T12:02:44.771 A"},
- {expect: "2017-07-14T01:02:44.771Z", inputString: "2017-07-14T12:02:44.771 L"},
- {expect: "2017-07-14T15:02:44.771Z", inputString: "2017-07-14T12:02:44.771 P"},
- {expect: "2017-07-14T12:02:44.771Z", inputString: "2017-07-14T12:02:44.771 Z"},
- ];
- testCases.forEach(function(testCase) {
- assert.eq([{_id: 0, date: ISODate(testCase.expect)}],
- coll.aggregate(
- {$project: {date: {$dateFromString: {dateString: testCase.inputString}}}})
- .toArray(),
- tojson(testCase));
- assert.eq([{_id: 0, date: ISODate(testCase.expect)}],
- coll.aggregate({
- $project: {
- date: {
- $dateFromString: {
- dateString: testCase.inputString,
- format: "%Y-%m-%dT%H:%M:%S.%L%z"
- }
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
+
+// Repeat the test with an explicit format specifier string.
+assert.eq(
+ expectedResults,
+ coll.aggregate([
+ {
+ $project: {date: {$dateFromString: {dateString: "$dateString", format: "$format"}}},
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
+
+expectedResults = [
+ {"_id": 0, "date": new Date(1499344537000)},
+ {"_id": 1, "date": new Date(1499344537513)},
+ {"_id": 2, "date": new Date(1499344537000)},
+ {"_id": 3, "date": new Date(1499344537513)},
+ {"_id": 4, "date": new Date(-299072962552)},
+];
+assert.eq(expectedResults,
+ coll.aggregate([
+ {
+ $project: {date: {$dateFromString: {dateString: "$dateString"}}},
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
+
+// Repeat the test with an explicit format specifier string.
+assert.eq(
+ expectedResults,
+ coll.aggregate([
+ {
+ $project: {date: {$dateFromString: {dateString: "$dateString", format: "$format"}}},
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
+
+/* --------------------------------------------------------------------------------------- */
+/* Normal format tests from data, with time zone. */
+
+coll.drop();
+assert.writeOK(coll.insert([
+ {_id: 0, dateString: "2017-07-06T12:35:37.513", timezone: "GMT"},
+ {_id: 1, dateString: "2017-07-06T12:35:37.513", timezone: "UTC"},
+ {_id: 2, dateString: "1960-07-10T12:35:37.513", timezone: "America/New_York"},
+ {_id: 3, dateString: "1960-07-10T12:35:37.513", timezone: "Europe/London"},
+ {_id: 4, dateString: "2017-07-06T12:35:37.513", timezone: "America/Los_Angeles"},
+ {_id: 5, dateString: "2017-07-06T12:35:37.513", timezone: "Europe/Paris"},
+ {_id: 6, dateString: "2017-07-06T12:35:37.513", timezone: "+04:00"},
+]));
+
+expectedResults = [
+ {"_id": 0, "date": ISODate("2017-07-06T12:35:37.513Z")},
+ {"_id": 1, "date": ISODate("2017-07-06T12:35:37.513Z")},
+ {"_id": 2, "date": ISODate("1960-07-10T16:35:37.513Z")},
+ {"_id": 3, "date": ISODate("1960-07-10T11:35:37.513Z")},
+ {"_id": 4, "date": ISODate("2017-07-06T19:35:37.513Z")},
+ {"_id": 5, "date": ISODate("2017-07-06T10:35:37.513Z")},
+ {"_id": 6, "date": ISODate("2017-07-06T08:35:37.513Z")},
+];
+
+assert.eq(
+ expectedResults,
+ coll.aggregate([
+ {
+ $project:
+ {date: {$dateFromString: {dateString: "$dateString", timezone: "$timezone"}}},
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
+
+// Repeat the test with an explicit format specifier string.
+assert.eq(expectedResults,
+ coll.aggregate([
+ {
+ $project: {
+ date: {
+ $dateFromString: {
+ dateString: "$dateString",
+ timezone: "$timezone",
+ format: "%Y-%m-%dT%H:%M:%S.%L"
}
}
- })
- .toArray(),
- tojson(testCase));
- });
-
- /* --------------------------------------------------------------------------------------- */
- /* BI format tests from data. */
-
- coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, dateString: "2017-01-01 00:00:00"},
- {_id: 1, dateString: "2017-07-01 00:00:00"},
- {_id: 2, dateString: "2017-07-06"},
- {_id: 3, dateString: "2017-07-06 00:00:00"},
- {_id: 4, dateString: "2017-07-06 11:00:00"},
- {_id: 5, dateString: "2017-07-06 11:36:00"},
- {_id: 6, dateString: "2017-07-06 11:36:54"},
- ]));
-
- assert.eq(
- [
- {"_id": 0, "date": ISODate("2017-01-01T00:00:00Z")},
- {"_id": 1, "date": ISODate("2017-07-01T00:00:00Z")},
- {"_id": 2, "date": ISODate("2017-07-06T00:00:00Z")},
- {"_id": 3, "date": ISODate("2017-07-06T00:00:00Z")},
- {"_id": 4, "date": ISODate("2017-07-06T11:00:00Z")},
- {"_id": 5, "date": ISODate("2017-07-06T11:36:00Z")},
- {"_id": 6, "date": ISODate("2017-07-06T11:36:54Z")}
- ],
- coll.aggregate([
- {
- $project: {date: {$dateFromString: {dateString: "$dateString"}}},
+ },
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
+
+/* --------------------------------------------------------------------------------------- */
+/* dateString from data with timezone as constant */
+
+coll.drop();
+assert.writeOK(coll.insert([
+ {_id: 0, dateString: "2017-07-06T12:35:37"},
+]));
+
+assert.eq(
+ [
+ {"_id": 0, "date": ISODate("2017-07-06T03:35:37Z")},
+ ],
+ coll.aggregate([
+ {
+ $project:
+ {date: {$dateFromString: {dateString: "$dateString", timezone: "Asia/Tokyo"}}},
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
+
+/* --------------------------------------------------------------------------------------- */
+/* dateString from constant with timezone from data */
+
+coll.drop();
+assert.writeOK(coll.insert([
+ {_id: 0, timezone: "Europe/London"},
+ {_id: 1, timezone: "America/New_York"},
+ {_id: 2, timezone: "-05:00"},
+]));
+
+assert.eq(
+ [
+ {"_id": 0, "date": ISODate("2017-07-19T17:52:35.199Z")},
+ {"_id": 1, "date": ISODate("2017-07-19T22:52:35.199Z")},
+ {"_id": 2, "date": ISODate("2017-07-19T23:52:35.199Z")},
+ ],
+ coll.aggregate([
+ {
+ $project: {
+ date: {
+ $dateFromString:
+ {dateString: "2017-07-19T18:52:35.199", timezone: "$timezone"}
+ }
},
- {$sort: {_id: 1}}
- ])
- .toArray());
-
- /* --------------------------------------------------------------------------------------- */
- /* Wacky format tests from data. */
-
- coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, dateString: "July 4th, 2017"},
- {_id: 1, dateString: "July 4th, 2017 12:39:30 BST"},
- {_id: 2, dateString: "July 4th, 2017 11am"},
- {_id: 3, dateString: "July 4th, 2017 12pm"},
- {_id: 4, dateString: "7/4/17"},
- {_id: 5, dateString: "04-07-2017"},
- {_id: 6, dateString: "2017-Jul-04 noon"},
- {_id: 7, dateString: "2017-07-04 12:48:07 GMT+0545"},
- {_id: 8, dateString: "2017-07-04 12:48:07 GMT-0200"},
- ]));
-
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
+
+/* --------------------------------------------------------------------------------------- */
+/* BI format tests. */
+
+coll.drop();
+assert.writeOK(coll.insert({_id: 0}));
+
+let pipelines = [
+ {
+ expect: "2017-01-01T00:00:00Z",
+ pipeline: {$project: {date: {$dateFromString: {dateString: "2017-01-01 00:00:00"}}}}
+ },
+ {
+ expect: "2017-07-01T00:00:00Z",
+ pipeline: {$project: {date: {$dateFromString: {dateString: "2017-07-01 00:00:00"}}}}
+ },
+ {
+ expect: "2017-07-06T00:00:00Z",
+ pipeline: {$project: {date: {$dateFromString: {dateString: "2017-07-06"}}}}
+ },
+ {
+ expect: "2017-07-06T00:00:00Z",
+ pipeline: {$project: {date: {$dateFromString: {dateString: "2017-07-06 00:00:00"}}}}
+ },
+ {
+ expect: "2017-07-06T11:00:00Z",
+ pipeline: {$project: {date: {$dateFromString: {dateString: "2017-07-06 11:00:00"}}}}
+ },
+ {
+ expect: "2017-07-06T11:36:00Z",
+ pipeline: {$project: {date: {$dateFromString: {dateString: "2017-07-06 11:36:00"}}}}
+ },
+ {
+ expect: "2017-07-06T11:36:54Z",
+ pipeline: {$project: {date: {$dateFromString: {dateString: "2017-07-06 11:36:54"}}}}
+ },
+];
+pipelines.forEach(function(pipeline) {
+ assert.eq([{_id: 0, date: ISODate(pipeline.expect)}],
+ coll.aggregate(pipeline.pipeline).toArray(),
+ tojson(pipeline));
+});
+
+/* --------------------------------------------------------------------------------------- */
+/* Tests with additional timezone information . */
+
+coll.drop();
+assert.writeOK(coll.insert({_id: 0}));
+
+testCases = [
+ // GMT based variants
+ {expect: "2017-07-14T12:02:44.771Z", inputString: "2017-07-14T12:02:44.771 GMT"},
+ {expect: "2017-07-14T12:02:44.771Z", inputString: "2017-07-14T12:02:44.771 GMT+00"},
+ {expect: "2017-07-14T12:02:44.771Z", inputString: "2017-07-14T12:02:44.771 GMT+00:00"},
+ {expect: "2017-07-14T10:02:44.771Z", inputString: "2017-07-14T12:02:44.771 GMT+02"},
+ {expect: "2017-07-14T10:02:44.771Z", inputString: "2017-07-14T12:02:44.771 GMT+02:00"},
+ {expect: "2017-07-14T09:02:44.771Z", inputString: "2017-07-14T12:02:44.771+03"},
+ {expect: "2017-07-14T08:32:44.771Z", inputString: "2017-07-14T12:02:44.771+0330"},
+ {expect: "2017-07-14T08:32:44.771Z", inputString: "2017-07-14T12:02:44.771+03:30"},
+ // With timezone abbreviations
+ {expect: "2017-07-14T12:02:44.771Z", inputString: "2017-07-14T12:02:44.771 UTC"},
+ {expect: "2017-07-14T10:02:44.771Z", inputString: "2017-07-14T12:02:44.771 CEST"},
+ {expect: "2017-07-14T17:02:44.771Z", inputString: "2017-07-14T12:02:44.771 EST"},
+ {expect: "2017-07-14T19:02:44.771Z", inputString: "2017-07-14T12:02:44.771 PDT"},
+ // A-I,K-Z are military time zones:
+ // https://en.wikipedia.org/wiki/List_of_military_time_zones
+ {expect: "2017-07-14T11:02:44.771Z", inputString: "2017-07-14T12:02:44.771 A"},
+ {expect: "2017-07-14T01:02:44.771Z", inputString: "2017-07-14T12:02:44.771 L"},
+ {expect: "2017-07-14T15:02:44.771Z", inputString: "2017-07-14T12:02:44.771 P"},
+ {expect: "2017-07-14T12:02:44.771Z", inputString: "2017-07-14T12:02:44.771 Z"},
+];
+testCases.forEach(function(testCase) {
assert.eq(
- [
- {"_id": 0, "date": ISODate("2017-07-04T00:00:00Z")},
- {"_id": 1, "date": ISODate("2017-07-04T11:39:30Z")},
- {"_id": 2, "date": ISODate("2017-07-04T11:00:00Z")},
- {"_id": 3, "date": ISODate("2017-07-04T12:00:00Z")},
- {"_id": 4, "date": ISODate("2017-07-04T00:00:00Z")},
- {"_id": 5, "date": ISODate("2017-07-04T00:00:00Z")},
- {"_id": 6, "date": ISODate("2017-07-04T12:00:00Z")},
- {"_id": 7, "date": ISODate("2017-07-04T07:03:07Z")},
- {"_id": 8, "date": ISODate("2017-07-04T14:48:07Z")},
- ],
- coll.aggregate([
- {
- $project: {date: {$dateFromString: {dateString: "$dateString"}}},
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
-
- /* --------------------------------------------------------------------------------------- */
- /* Tests formats that aren't supported with the normal $dateFromString parser. */
-
- coll.drop();
- assert.writeOK(coll.insert({_id: 0}));
-
- testCases = [
- {inputString: "05 12 1988", format: "%d %m %Y", expect: "1988-12-05T00:00:00Z"},
- {inputString: "1992 04 26", format: "%Y %m %d", expect: "1992-04-26T00:00:00Z"},
- {inputString: "05*12*1988", format: "%d*%m*%Y", expect: "1988-12-05T00:00:00Z"},
- {inputString: "1992/04/26", format: "%Y/%m/%d", expect: "1992-04-26T00:00:00Z"},
- {inputString: "1992 % 04 % 26", format: "%Y %% %m %% %d", expect: "1992-04-26T00:00:00Z"},
- {
- inputString: "Day: 05 Month: 12 Year: 1988",
- format: "Day: %d Month: %m Year: %Y",
- expect: "1988-12-05T00:00:00Z"
- },
- {inputString: "Date: 1992/04/26", format: "Date: %Y/%m/%d", expect: "1992-04-26T00:00:00Z"},
- {inputString: "4/26/1992:+0445", format: "%m/%d/%Y:%z", expect: "1992-04-25T19:15:00Z"},
- {inputString: "4/26/1992:+285", format: "%m/%d/%Y:%Z", expect: "1992-04-25T19:15:00Z"},
- ];
- testCases.forEach(function(testCase) {
- assert.eq(
- [{_id: 0, date: ISODate(testCase.expect)}],
- coll.aggregate({
- $project: {
- date: {
- $dateFromString:
- {dateString: testCase.inputString, format: testCase.format}
- }
+ [{_id: 0, date: ISODate(testCase.expect)}],
+ coll.aggregate({$project: {date: {$dateFromString: {dateString: testCase.inputString}}}})
+ .toArray(),
+ tojson(testCase));
+ assert.eq(
+ [{_id: 0, date: ISODate(testCase.expect)}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateFromString:
+ {dateString: testCase.inputString, format: "%Y-%m-%dT%H:%M:%S.%L%z"}
}
- })
- .toArray(),
- tojson(testCase));
- });
-
- /* --------------------------------------------------------------------------------------- */
- /* Tests for ISO year, week of year, and day of the week. */
-
- testCases = [
- {inputString: "2017", format: "%G", expect: "2017-01-02T00:00:00Z"},
- {inputString: "2017, Week 53", format: "%G, Week %V", expect: "2018-01-01T00:00:00Z"},
- {inputString: "2017, Day 5", format: "%G, Day %u", expect: "2017-01-06T00:00:00Z"},
- {inputString: "53.7.2017", format: "%V.%u.%G", expect: "2018-01-07T00:00:00Z"},
- {inputString: "1.1.1", format: "%V.%u.%G", expect: "0001-01-01T00:00:00Z"},
- ];
- testCases.forEach(function(testCase) {
- assert.eq(
- [{_id: 0, date: ISODate(testCase.expect)}],
- coll.aggregate({
- $project: {
- date: {
- $dateFromString:
- {dateString: testCase.inputString, format: testCase.format}
- }
+ }
+ })
+ .toArray(),
+ tojson(testCase));
+});
+
+/* --------------------------------------------------------------------------------------- */
+/* BI format tests from data. */
+
+coll.drop();
+assert.writeOK(coll.insert([
+ {_id: 0, dateString: "2017-01-01 00:00:00"},
+ {_id: 1, dateString: "2017-07-01 00:00:00"},
+ {_id: 2, dateString: "2017-07-06"},
+ {_id: 3, dateString: "2017-07-06 00:00:00"},
+ {_id: 4, dateString: "2017-07-06 11:00:00"},
+ {_id: 5, dateString: "2017-07-06 11:36:00"},
+ {_id: 6, dateString: "2017-07-06 11:36:54"},
+]));
+
+assert.eq(
+ [
+ {"_id": 0, "date": ISODate("2017-01-01T00:00:00Z")},
+ {"_id": 1, "date": ISODate("2017-07-01T00:00:00Z")},
+ {"_id": 2, "date": ISODate("2017-07-06T00:00:00Z")},
+ {"_id": 3, "date": ISODate("2017-07-06T00:00:00Z")},
+ {"_id": 4, "date": ISODate("2017-07-06T11:00:00Z")},
+ {"_id": 5, "date": ISODate("2017-07-06T11:36:00Z")},
+ {"_id": 6, "date": ISODate("2017-07-06T11:36:54Z")}
+ ],
+ coll.aggregate([
+ {
+ $project: {date: {$dateFromString: {dateString: "$dateString"}}},
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
+
+/* --------------------------------------------------------------------------------------- */
+/* Wacky format tests from data. */
+
+coll.drop();
+assert.writeOK(coll.insert([
+ {_id: 0, dateString: "July 4th, 2017"},
+ {_id: 1, dateString: "July 4th, 2017 12:39:30 BST"},
+ {_id: 2, dateString: "July 4th, 2017 11am"},
+ {_id: 3, dateString: "July 4th, 2017 12pm"},
+ {_id: 4, dateString: "7/4/17"},
+ {_id: 5, dateString: "04-07-2017"},
+ {_id: 6, dateString: "2017-Jul-04 noon"},
+ {_id: 7, dateString: "2017-07-04 12:48:07 GMT+0545"},
+ {_id: 8, dateString: "2017-07-04 12:48:07 GMT-0200"},
+]));
+
+assert.eq(
+ [
+ {"_id": 0, "date": ISODate("2017-07-04T00:00:00Z")},
+ {"_id": 1, "date": ISODate("2017-07-04T11:39:30Z")},
+ {"_id": 2, "date": ISODate("2017-07-04T11:00:00Z")},
+ {"_id": 3, "date": ISODate("2017-07-04T12:00:00Z")},
+ {"_id": 4, "date": ISODate("2017-07-04T00:00:00Z")},
+ {"_id": 5, "date": ISODate("2017-07-04T00:00:00Z")},
+ {"_id": 6, "date": ISODate("2017-07-04T12:00:00Z")},
+ {"_id": 7, "date": ISODate("2017-07-04T07:03:07Z")},
+ {"_id": 8, "date": ISODate("2017-07-04T14:48:07Z")},
+ ],
+ coll.aggregate([
+ {
+ $project: {date: {$dateFromString: {dateString: "$dateString"}}},
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
+
+/* --------------------------------------------------------------------------------------- */
+/* Tests formats that aren't supported with the normal $dateFromString parser. */
+
+coll.drop();
+assert.writeOK(coll.insert({_id: 0}));
+
+testCases = [
+ {inputString: "05 12 1988", format: "%d %m %Y", expect: "1988-12-05T00:00:00Z"},
+ {inputString: "1992 04 26", format: "%Y %m %d", expect: "1992-04-26T00:00:00Z"},
+ {inputString: "05*12*1988", format: "%d*%m*%Y", expect: "1988-12-05T00:00:00Z"},
+ {inputString: "1992/04/26", format: "%Y/%m/%d", expect: "1992-04-26T00:00:00Z"},
+ {inputString: "1992 % 04 % 26", format: "%Y %% %m %% %d", expect: "1992-04-26T00:00:00Z"},
+ {
+ inputString: "Day: 05 Month: 12 Year: 1988",
+ format: "Day: %d Month: %m Year: %Y",
+ expect: "1988-12-05T00:00:00Z"
+ },
+ {inputString: "Date: 1992/04/26", format: "Date: %Y/%m/%d", expect: "1992-04-26T00:00:00Z"},
+ {inputString: "4/26/1992:+0445", format: "%m/%d/%Y:%z", expect: "1992-04-25T19:15:00Z"},
+ {inputString: "4/26/1992:+285", format: "%m/%d/%Y:%Z", expect: "1992-04-25T19:15:00Z"},
+];
+testCases.forEach(function(testCase) {
+ assert.eq(
+ [{_id: 0, date: ISODate(testCase.expect)}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateFromString: {dateString: testCase.inputString, format: testCase.format}
}
- })
- .toArray(),
- tojson(testCase));
- });
-
- /* --------------------------------------------------------------------------------------- */
- /* Testing whether it throws the right assert for missing elements of a date/time string. */
-
- coll.drop();
-
- assert.writeOK(coll.insert([
- {_id: 0},
- ]));
-
- pipelines = [
- [{'$project': {date: {$dateFromString: {dateString: "July 4th"}}}}],
- [{'$project': {date: {$dateFromString: {dateString: "12:50:53"}}}}],
- ];
-
- pipelines.forEach(function(pipeline) {
- assertErrMsgContains(coll,
- pipeline,
- ErrorCodes.ConversionFailure,
- "an incomplete date/time string has been found");
- });
-
- /* --------------------------------------------------------------------------------------- */
- /* Testing whether it throws the right assert for broken date/time strings. */
-
- coll.drop();
-
- assert.writeOK(coll.insert([
- {_id: 0},
- ]));
-
- pipelines = [
- [{'$project': {date: {$dateFromString: {dateString: "2017, 12:50:53"}}}}],
- [{'$project': {date: {$dateFromString: {dateString: "60.Monday1770/06:59"}}}}],
- ];
-
- pipelines.forEach(function(pipeline) {
- assertErrMsgContains(
- coll, pipeline, ErrorCodes.ConversionFailure, "Error parsing date string");
- });
-
- /* --------------------------------------------------------------------------------------- */
- /* NULL returns. */
-
- coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, date: new ISODate("2017-06-19T15:13:25.713Z")},
- {_id: 1, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: null},
- {_id: 2, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: undefined},
- ]));
-
- pipelines = [
- [{$project: {date: {$dateFromString: {dateString: "$tz"}}}}, {$sort: {_id: 1}}],
- [
- {
- $project:
- {date: {$dateFromString: {dateString: "2017-07-11T17:05:19Z", timezone: "$tz"}}}
- },
- {$sort: {_id: 1}}
- ],
- ];
- pipelines.forEach(function(pipeline) {
- assert.eq([{_id: 0, date: null}, {_id: 1, date: null}, {_id: 2, date: null}],
- coll.aggregate(pipeline).toArray(),
- tojson(pipeline));
- });
-
- coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0},
- {_id: 1, format: null},
- {_id: 2, format: undefined},
- ]));
-
+ }
+ })
+ .toArray(),
+ tojson(testCase));
+});
+
+/* --------------------------------------------------------------------------------------- */
+/* Tests for ISO year, week of year, and day of the week. */
+
+testCases = [
+ {inputString: "2017", format: "%G", expect: "2017-01-02T00:00:00Z"},
+ {inputString: "2017, Week 53", format: "%G, Week %V", expect: "2018-01-01T00:00:00Z"},
+ {inputString: "2017, Day 5", format: "%G, Day %u", expect: "2017-01-06T00:00:00Z"},
+ {inputString: "53.7.2017", format: "%V.%u.%G", expect: "2018-01-07T00:00:00Z"},
+ {inputString: "1.1.1", format: "%V.%u.%G", expect: "0001-01-01T00:00:00Z"},
+];
+testCases.forEach(function(testCase) {
assert.eq(
- [{_id: 0, date: null}, {_id: 1, date: null}, {_id: 2, date: null}],
+ [{_id: 0, date: ISODate(testCase.expect)}],
coll.aggregate({
$project: {
date: {
- $dateFromString: {dateString: "2017-07-11T17:05:19Z", format: "$format"}
+ $dateFromString: {dateString: testCase.inputString, format: testCase.format}
}
}
})
- .toArray());
+ .toArray(),
+ tojson(testCase));
+});
- /* --------------------------------------------------------------------------------------- */
- /* Parse errors. */
+/* --------------------------------------------------------------------------------------- */
+/* Testing whether it throws the right assert for missing elements of a date/time string. */
- let pipeline = [{$project: {date: {$dateFromString: "no-object"}}}];
- assertErrMsgContains(
- coll, pipeline, 40540, "$dateFromString only supports an object as an argument");
+coll.drop();
- pipeline = [{$project: {date: {$dateFromString: {"unknown": "$tz"}}}}];
- assertErrMsgContains(coll, pipeline, 40541, "Unrecognized argument");
+assert.writeOK(coll.insert([
+ {_id: 0},
+]));
- pipeline = [{$project: {date: {$dateFromString: {dateString: 5}}}}];
+pipelines = [
+ [{'$project': {date: {$dateFromString: {dateString: "July 4th"}}}}],
+ [{'$project': {date: {$dateFromString: {dateString: "12:50:53"}}}}],
+];
+
+pipelines.forEach(function(pipeline) {
assertErrMsgContains(coll,
pipeline,
ErrorCodes.ConversionFailure,
- "$dateFromString requires that 'dateString' be a string");
-
- /* --------------------------------------------------------------------------------------- */
- /* Passing in time zone with date/time string. */
+ "an incomplete date/time string has been found");
+});
- pipeline = {
- $project: {
- date: {
- $dateFromString:
- {dateString: "2017-07-12T22:23:55 GMT+02:00", timezone: "Europe/Amsterdam"}
- }
- }
- };
- assertErrorCode(coll, pipeline, ErrorCodes.ConversionFailure);
-
- pipeline = {
- $project: {
- date: {
- $dateFromString:
- {dateString: "2017-07-12T22:23:55Z", timezone: "Europe/Amsterdam"}
- }
- }
- };
- assertErrorCode(coll, pipeline, ErrorCodes.ConversionFailure);
-
- pipeline = {
- $project: {
- date: {
- $dateFromString: {
- dateString: "2017-07-12T22:23:55 America/New_York",
- timezone: "Europe/Amsterdam"
- }
- }
- }
- };
- assertErrorCode(coll, pipeline, ErrorCodes.ConversionFailure);
+/* --------------------------------------------------------------------------------------- */
+/* Testing whether it throws the right assert for broken date/time strings. */
- pipeline = {
- $project: {date: {$dateFromString: {dateString: "2017-07-12T22:23:55 Europe/Amsterdam"}}}
- };
- assertErrorCode(coll, pipeline, ErrorCodes.ConversionFailure);
+coll.drop();
- /* --------------------------------------------------------------------------------------- */
- /* Error cases for $dateFromString with format specifier string. */
+assert.writeOK(coll.insert([
+ {_id: 0},
+]));
- // Test umatched format specifier string.
- pipeline = [{$project: {date: {$dateFromString: {dateString: "2018-01", format: "%Y-%m-%d"}}}}];
- assertErrMsgContains(coll, pipeline, ErrorCodes.ConversionFailure, "Data missing");
+pipelines = [
+ [{'$project': {date: {$dateFromString: {dateString: "2017, 12:50:53"}}}}],
+ [{'$project': {date: {$dateFromString: {dateString: "60.Monday1770/06:59"}}}}],
+];
- pipeline = [{$project: {date: {$dateFromString: {dateString: "2018-01", format: "%Y"}}}}];
- assertErrMsgContains(coll, pipeline, ErrorCodes.ConversionFailure, "Trailing data");
+pipelines.forEach(function(pipeline) {
+ assertErrMsgContains(coll, pipeline, ErrorCodes.ConversionFailure, "Error parsing date string");
+});
- // Test missing specifier prefix '%'.
- pipeline = [{$project: {date: {$dateFromString: {dateString: "1992-26-04", format: "Y-d-m"}}}}];
- assertErrMsgContains(coll, pipeline, ErrorCodes.ConversionFailure, "Format literal not found");
+/* --------------------------------------------------------------------------------------- */
+/* NULL returns. */
- pipeline = [{$project: {date: {$dateFromString: {dateString: "1992", format: "%n"}}}}];
- assertErrMsgContains(coll, pipeline, 18536, "Invalid format character");
+coll.drop();
+assert.writeOK(coll.insert([
+ {_id: 0, date: new ISODate("2017-06-19T15:13:25.713Z")},
+ {_id: 1, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: null},
+ {_id: 2, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: undefined},
+]));
- pipeline = [{
- $project: {
- date: {
- $dateFromString:
- {dateString: "4/26/1992:+0445", format: "%m/%d/%Y:%z", timezone: "+0500"}
- }
+pipelines = [
+ [{$project: {date: {$dateFromString: {dateString: "$tz"}}}}, {$sort: {_id: 1}}],
+ [
+ {
+ $project:
+ {date: {$dateFromString: {dateString: "2017-07-11T17:05:19Z", timezone: "$tz"}}}
+ },
+ {$sort: {_id: 1}}
+ ],
+];
+pipelines.forEach(function(pipeline) {
+ assert.eq([{_id: 0, date: null}, {_id: 1, date: null}, {_id: 2, date: null}],
+ coll.aggregate(pipeline).toArray(),
+ tojson(pipeline));
+});
+
+coll.drop();
+assert.writeOK(coll.insert([
+ {_id: 0},
+ {_id: 1, format: null},
+ {_id: 2, format: undefined},
+]));
+
+assert.eq(
+ [{_id: 0, date: null}, {_id: 1, date: null}, {_id: 2, date: null}],
+ coll.aggregate({
+ $project:
+ {date: {$dateFromString: {dateString: "2017-07-11T17:05:19Z", format: "$format"}}}
+ })
+ .toArray());
+
+/* --------------------------------------------------------------------------------------- */
+/* Parse errors. */
+
+let pipeline = [{$project: {date: {$dateFromString: "no-object"}}}];
+assertErrMsgContains(
+ coll, pipeline, 40540, "$dateFromString only supports an object as an argument");
+
+pipeline = [{$project: {date: {$dateFromString: {"unknown": "$tz"}}}}];
+assertErrMsgContains(coll, pipeline, 40541, "Unrecognized argument");
+
+pipeline = [{$project: {date: {$dateFromString: {dateString: 5}}}}];
+assertErrMsgContains(coll,
+ pipeline,
+ ErrorCodes.ConversionFailure,
+ "$dateFromString requires that 'dateString' be a string");
+
+/* --------------------------------------------------------------------------------------- */
+/* Passing in time zone with date/time string. */
+
+pipeline = {
+ $project: {
+ date: {
+ $dateFromString:
+ {dateString: "2017-07-12T22:23:55 GMT+02:00", timezone: "Europe/Amsterdam"}
}
- }];
- assertErrMsgContains(
- coll,
- pipeline,
- ErrorCodes.ConversionFailure,
- "you cannot pass in a date/time string with GMT offset together with a timezone argument");
-
- pipeline = [{$project: {date: {$dateFromString: {dateString: "4/26/1992", format: 5}}}}];
- assertErrMsgContains(
- coll, pipeline, 40684, "$dateFromString requires that 'format' be a string");
-
- pipeline = [{$project: {date: {$dateFromString: {dateString: "4/26/1992", format: {}}}}}];
- assertErrMsgContains(
- coll, pipeline, 40684, "$dateFromString requires that 'format' be a string");
-
- pipeline =
- [{$project: {date: {$dateFromString: {dateString: "ISO Day 6", format: "ISO Day %u"}}}}];
- assertErrMsgContains(
- coll, pipeline, ErrorCodes.ConversionFailure, "The parsed date was invalid");
-
- pipeline =
- [{$project: {date: {$dateFromString: {dateString: "ISO Week 52", format: "ISO Week %V"}}}}];
- assertErrMsgContains(
- coll, pipeline, ErrorCodes.ConversionFailure, "The parsed date was invalid");
-
- pipeline = [{
- $project: {
- date: {$dateFromString: {dateString: "ISO Week 1, 2018", format: "ISO Week %V, %Y"}}
+ }
+};
+assertErrorCode(coll, pipeline, ErrorCodes.ConversionFailure);
+
+pipeline = {
+ $project: {
+ date: {$dateFromString: {dateString: "2017-07-12T22:23:55Z", timezone: "Europe/Amsterdam"}}
+ }
+};
+assertErrorCode(coll, pipeline, ErrorCodes.ConversionFailure);
+
+pipeline = {
+ $project: {
+ date: {
+ $dateFromString:
+ {dateString: "2017-07-12T22:23:55 America/New_York", timezone: "Europe/Amsterdam"}
}
- }];
- assertErrMsgContains(coll,
- pipeline,
- ErrorCodes.ConversionFailure,
- "Mixing of ISO dates with natural dates is not allowed");
+ }
+};
+assertErrorCode(coll, pipeline, ErrorCodes.ConversionFailure);
- pipeline =
- [{$project: {date: {$dateFromString: {dateString: "12/31/2018", format: "%m/%d/%G"}}}}];
- assertErrMsgContains(coll,
- pipeline,
- ErrorCodes.ConversionFailure,
- "Mixing of ISO dates with natural dates is not allowed");
+pipeline = {
+ $project: {date: {$dateFromString: {dateString: "2017-07-12T22:23:55 Europe/Amsterdam"}}}
+};
+assertErrorCode(coll, pipeline, ErrorCodes.ConversionFailure);
+
+/* --------------------------------------------------------------------------------------- */
+/* Error cases for $dateFromString with format specifier string. */
- // Test embedded null bytes in the 'dateString' and 'format' fields.
- pipeline =
- [{$project: {date: {$dateFromString: {dateString: "12/31\0/2018", format: "%m/%d/%Y"}}}}];
- assertErrMsgContains(coll, pipeline, ErrorCodes.ConversionFailure, "Data missing");
+// Test umatched format specifier string.
+pipeline = [{$project: {date: {$dateFromString: {dateString: "2018-01", format: "%Y-%m-%d"}}}}];
+assertErrMsgContains(coll, pipeline, ErrorCodes.ConversionFailure, "Data missing");
- pipeline =
- [{$project: {date: {$dateFromString: {dateString: "12/31/2018", format: "%m/%d\0/%Y"}}}}];
- assertErrMsgContains(coll, pipeline, ErrorCodes.ConversionFailure, "Trailing data");
+pipeline = [{$project: {date: {$dateFromString: {dateString: "2018-01", format: "%Y"}}}}];
+assertErrMsgContains(coll, pipeline, ErrorCodes.ConversionFailure, "Trailing data");
+
+// Test missing specifier prefix '%'.
+pipeline = [{$project: {date: {$dateFromString: {dateString: "1992-26-04", format: "Y-d-m"}}}}];
+assertErrMsgContains(coll, pipeline, ErrorCodes.ConversionFailure, "Format literal not found");
+
+pipeline = [{$project: {date: {$dateFromString: {dateString: "1992", format: "%n"}}}}];
+assertErrMsgContains(coll, pipeline, 18536, "Invalid format character");
+
+pipeline = [{
+ $project: {
+ date: {
+ $dateFromString:
+ {dateString: "4/26/1992:+0445", format: "%m/%d/%Y:%z", timezone: "+0500"}
+ }
+ }
+}];
+assertErrMsgContains(
+ coll,
+ pipeline,
+ ErrorCodes.ConversionFailure,
+ "you cannot pass in a date/time string with GMT offset together with a timezone argument");
+
+pipeline = [{$project: {date: {$dateFromString: {dateString: "4/26/1992", format: 5}}}}];
+assertErrMsgContains(coll, pipeline, 40684, "$dateFromString requires that 'format' be a string");
+
+pipeline = [{$project: {date: {$dateFromString: {dateString: "4/26/1992", format: {}}}}}];
+assertErrMsgContains(coll, pipeline, 40684, "$dateFromString requires that 'format' be a string");
+
+pipeline = [{$project: {date: {$dateFromString: {dateString: "ISO Day 6", format: "ISO Day %u"}}}}];
+assertErrMsgContains(coll, pipeline, ErrorCodes.ConversionFailure, "The parsed date was invalid");
+
+pipeline =
+ [{$project: {date: {$dateFromString: {dateString: "ISO Week 52", format: "ISO Week %V"}}}}];
+assertErrMsgContains(coll, pipeline, ErrorCodes.ConversionFailure, "The parsed date was invalid");
+
+pipeline = [{
+ $project: {date: {$dateFromString: {dateString: "ISO Week 1, 2018", format: "ISO Week %V, %Y"}}}
+}];
+assertErrMsgContains(coll,
+ pipeline,
+ ErrorCodes.ConversionFailure,
+ "Mixing of ISO dates with natural dates is not allowed");
+
+pipeline = [{$project: {date: {$dateFromString: {dateString: "12/31/2018", format: "%m/%d/%G"}}}}];
+assertErrMsgContains(coll,
+ pipeline,
+ ErrorCodes.ConversionFailure,
+ "Mixing of ISO dates with natural dates is not allowed");
+
+// Test embedded null bytes in the 'dateString' and 'format' fields.
+pipeline =
+ [{$project: {date: {$dateFromString: {dateString: "12/31\0/2018", format: "%m/%d/%Y"}}}}];
+assertErrMsgContains(coll, pipeline, ErrorCodes.ConversionFailure, "Data missing");
+
+pipeline =
+ [{$project: {date: {$dateFromString: {dateString: "12/31/2018", format: "%m/%d\0/%Y"}}}}];
+assertErrMsgContains(coll, pipeline, ErrorCodes.ConversionFailure, "Trailing data");
})();
diff --git a/jstests/aggregation/expressions/date_from_string_on_error.js b/jstests/aggregation/expressions/date_from_string_on_error.js
index ba0ce0fa573..6611fe4a79f 100644
--- a/jstests/aggregation/expressions/date_from_string_on_error.js
+++ b/jstests/aggregation/expressions/date_from_string_on_error.js
@@ -2,132 +2,76 @@
* Tests for the $dateFromString expression with the optional 'onError' parameter.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrMsgContains.
+load("jstests/aggregation/extras/utils.js"); // For assertErrMsgContains.
- const onErrorValue = ISODate("2017-07-04T11:56:02Z");
- const coll = db.date_from_string_on_error;
- coll.drop();
+const onErrorValue = ISODate("2017-07-04T11:56:02Z");
+const coll = db.date_from_string_on_error;
+coll.drop();
- assert.writeOK(coll.insert({_id: 0}));
+assert.writeOK(coll.insert({_id: 0}));
- // Test that the 'onError' value is returned when 'dateString' is not a valid date/time.
- for (let inputDate of["July 4th",
- "12:50:53",
- "2017",
- "60.Monday1770/06:59",
- "Not even close",
- "July 4th, 10000"]) {
- assert.eq(
- [{_id: 0, date: onErrorValue}],
- coll.aggregate({
- $project:
- {date: {$dateFromString: {dateString: inputDate, onError: onErrorValue}}}
- })
- .toArray());
- }
-
- // Test that the 'onError' value is returned when 'dateString' is not a string.
- for (let inputDate of[5, {year: 2018, month: 2, day: 5}, ["2018-02-05"]]) {
- assert.eq(
- [{_id: 0, date: onErrorValue}],
- coll.aggregate({
- $project:
- {date: {$dateFromString: {dateString: inputDate, onError: onErrorValue}}}
- })
- .toArray());
- }
-
- // Test that the 'onError' value is ignored when 'dateString' is nullish.
- for (let inputDate of[null, undefined, "$missing"]) {
- assert.eq(
- [{_id: 0, date: null}],
- coll.aggregate({
- $project:
- {date: {$dateFromString: {dateString: inputDate, onError: onErrorValue}}}
- })
- .toArray());
- }
+// Test that the 'onError' value is returned when 'dateString' is not a valid date/time.
+for (let inputDate of ["July 4th",
+ "12:50:53",
+ "2017",
+ "60.Monday1770/06:59",
+ "Not even close",
+ "July 4th, 10000"]) {
+ assert.eq(
+ [{_id: 0, date: onErrorValue}],
+ coll.aggregate({
+ $project: {date: {$dateFromString: {dateString: inputDate, onError: onErrorValue}}}
+ })
+ .toArray());
+}
- // Test that the 'onError' value is returned for unmatched format strings.
- for (let inputFormat of["%Y", "%Y-%m-%dT%H", "Y-m-d"]) {
- assert.eq([{_id: 0, date: onErrorValue}],
- coll.aggregate({
- $project: {
- date: {
- $dateFromString: {
- dateString: "2018-02-06",
- format: inputFormat,
- onError: onErrorValue
- }
- }
- }
- })
- .toArray());
- }
+// Test that the 'onError' value is returned when 'dateString' is not a string.
+for (let inputDate of [5, {year: 2018, month: 2, day: 5}, ["2018-02-05"]]) {
+ assert.eq(
+ [{_id: 0, date: onErrorValue}],
+ coll.aggregate({
+ $project: {date: {$dateFromString: {dateString: inputDate, onError: onErrorValue}}}
+ })
+ .toArray());
+}
- // Test that null is returned when the 'timezone' or 'format' is nullish, regardless of the
- // 'onError' value.
- for (let nullishValue of[null, undefined, "$missing"]) {
- assert.eq([{_id: 0, date: null}],
- coll.aggregate({
- $project: {
- date: {
- $dateFromString: {
- dateString: "2018-02-06T11:56:02Z",
- format: nullishValue,
- onError: onErrorValue
- }
- }
- }
- })
- .toArray());
- assert.eq([{_id: 0, date: null}],
- coll.aggregate({
- $project: {
- date: {
- $dateFromString: {
- dateString: "2018-02-06T11:56:02Z",
- timezone: nullishValue,
- onError: onErrorValue
- }
- }
- }
- })
- .toArray());
- }
+// Test that the 'onError' value is ignored when 'dateString' is nullish.
+for (let inputDate of [null, undefined, "$missing"]) {
+ assert.eq(
+ [{_id: 0, date: null}],
+ coll.aggregate({
+ $project: {date: {$dateFromString: {dateString: inputDate, onError: onErrorValue}}}
+ })
+ .toArray());
+}
- // Test that onError is returned when the input is not a string and other parameters are
- // nullish.
+// Test that the 'onError' value is returned for unmatched format strings.
+for (let inputFormat of ["%Y", "%Y-%m-%dT%H", "Y-m-d"]) {
assert.eq(
[{_id: 0, date: onErrorValue}],
coll.aggregate({
$project: {
- date: {$dateFromString: {dateString: 5, format: null, onError: onErrorValue}}
+ date: {
+ $dateFromString:
+ {dateString: "2018-02-06", format: inputFormat, onError: onErrorValue}
+ }
}
})
.toArray());
- assert.eq([{_id: 0, date: onErrorValue}],
- coll.aggregate({
- $project: {
- date: {
- $dateFromString:
- {dateString: 5, timezone: "$missing", onError: onErrorValue}
- }
- }
- })
- .toArray());
+}
- // Test that onError is ignored when the input is an invalid string and other parameters are
- // nullish.
+// Test that null is returned when the 'timezone' or 'format' is nullish, regardless of the
+// 'onError' value.
+for (let nullishValue of [null, undefined, "$missing"]) {
assert.eq([{_id: 0, date: null}],
coll.aggregate({
$project: {
date: {
$dateFromString: {
- dateString: "Invalid date string",
- format: null,
+ dateString: "2018-02-06T11:56:02Z",
+ format: nullishValue,
onError: onErrorValue
}
}
@@ -139,55 +83,95 @@
$project: {
date: {
$dateFromString: {
- dateString: "Invalid date string",
- timezone: "$missing",
+ dateString: "2018-02-06T11:56:02Z",
+ timezone: nullishValue,
onError: onErrorValue
}
}
}
})
.toArray());
+}
+
+// Test that onError is returned when the input is not a string and other parameters are
+// nullish.
+assert.eq(
+ [{_id: 0, date: onErrorValue}],
+ coll.aggregate({
+ $project:
+ {date: {$dateFromString: {dateString: 5, format: null, onError: onErrorValue}}}
+ })
+ .toArray());
+assert.eq(
+ [{_id: 0, date: onErrorValue}],
+ coll.aggregate({
+ $project: {
+ date:
+ {$dateFromString: {dateString: 5, timezone: "$missing", onError: onErrorValue}}
+ }
+ })
+ .toArray());
+
+// Test that onError is ignored when the input is an invalid string and other parameters are
+// nullish.
+assert.eq(
+ [{_id: 0, date: null}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateFromString:
+ {dateString: "Invalid date string", format: null, onError: onErrorValue}
+ }
+ }
+ })
+ .toArray());
+assert.eq([{_id: 0, date: null}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateFromString: {
+ dateString: "Invalid date string",
+ timezone: "$missing",
+ onError: onErrorValue
+ }
+ }
+ }
+ })
+ .toArray());
- // Test that 'onError' can be any type, not just an ISODate.
- for (let onError of[{}, 5, "Not a date", null, undefined]) {
- assert.eq(
- [{_id: 0, date: onError}],
- coll.aggregate({
- $project: {date: {$dateFromString: {dateString: "invalid", onError: onError}}}
- })
- .toArray());
- }
- // Test that a missing 'onError' value results in no output field when used within a $project
- // stage.
+// Test that 'onError' can be any type, not just an ISODate.
+for (let onError of [{}, 5, "Not a date", null, undefined]) {
assert.eq(
- [{_id: 0}],
+ [{_id: 0, date: onError}],
coll.aggregate(
- {$project: {date: {$dateFromString: {dateString: "invalid", onError: "$missing"}}}})
+ {$project: {date: {$dateFromString: {dateString: "invalid", onError: onError}}}})
.toArray());
+}
+// Test that a missing 'onError' value results in no output field when used within a $project
+// stage.
+assert.eq(
+ [{_id: 0}],
+ coll.aggregate(
+ {$project: {date: {$dateFromString: {dateString: "invalid", onError: "$missing"}}}})
+ .toArray());
- // Test that 'onError' is ignored when the 'format' is invalid.
- assertErrMsgContains(
- coll,
- [{
- $project: {
- date: {
- $dateFromString: {dateString: "4/26/1992", format: 5, onError: onErrorValue}
- }
- }
- }],
- 40684,
- "$dateFromString requires that 'format' be a string");
+// Test that 'onError' is ignored when the 'format' is invalid.
+assertErrMsgContains(
+ coll,
+ [{
+ $project:
+ {date: {$dateFromString: {dateString: "4/26/1992", format: 5, onError: onErrorValue}}}
+ }],
+ 40684,
+ "$dateFromString requires that 'format' be a string");
- assertErrMsgContains(
- coll,
- [{
- $project: {
- date: {
- $dateFromString:
- {dateString: "4/26/1992", format: "%n", onError: onErrorValue}
- }
- }
- }],
- 18536,
- "Invalid format character '%n' in format string");
+assertErrMsgContains(
+ coll,
+ [{
+ $project: {
+ date: {$dateFromString: {dateString: "4/26/1992", format: "%n", onError: onErrorValue}}
+ }
+ }],
+ 18536,
+ "Invalid format character '%n' in format string");
})();
diff --git a/jstests/aggregation/expressions/date_from_string_on_null.js b/jstests/aggregation/expressions/date_from_string_on_null.js
index 12b7d673984..caf7cf1216d 100644
--- a/jstests/aggregation/expressions/date_from_string_on_null.js
+++ b/jstests/aggregation/expressions/date_from_string_on_null.js
@@ -2,67 +2,65 @@
* Tests for the $dateFromString expression with the optional 'onNull' parameter.
*/
(function() {
- "use strict";
+"use strict";
- const onNullValue = ISODate("2017-07-04T11:56:02Z");
- const coll = db.date_from_string_on_null;
- coll.drop();
+const onNullValue = ISODate("2017-07-04T11:56:02Z");
+const coll = db.date_from_string_on_null;
+coll.drop();
- assert.writeOK(coll.insert({_id: 0}));
+assert.writeOK(coll.insert({_id: 0}));
- // Test that the 'onNull' value is returned when the 'dateString' is nullish.
- for (let inputDate of[null, undefined, "$missing"]) {
- assert.eq(
- [{_id: 0, date: onNullValue}],
- coll.aggregate({
- $project:
- {date: {$dateFromString: {dateString: inputDate, onNull: onNullValue}}}
- })
- .toArray());
- }
+// Test that the 'onNull' value is returned when the 'dateString' is nullish.
+for (let inputDate of [null, undefined, "$missing"]) {
+ assert.eq(
+ [{_id: 0, date: onNullValue}],
+ coll.aggregate(
+ {$project: {date: {$dateFromString: {dateString: inputDate, onNull: onNullValue}}}})
+ .toArray());
+}
- // Test that null is returned when the 'timezone' or 'format' is nullish, regardless of the
- // 'onNull' value.
- for (let nullishValue of[null, undefined, "$missing"]) {
- assert.eq([{_id: 0, date: null}],
- coll.aggregate({
- $project: {
- date: {
- $dateFromString: {
- dateString: "2018-02-06T11:56:02Z",
- format: nullishValue,
- onNull: onNullValue
- }
+// Test that null is returned when the 'timezone' or 'format' is nullish, regardless of the
+// 'onNull' value.
+for (let nullishValue of [null, undefined, "$missing"]) {
+ assert.eq([{_id: 0, date: null}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateFromString: {
+ dateString: "2018-02-06T11:56:02Z",
+ format: nullishValue,
+ onNull: onNullValue
}
}
- })
- .toArray());
- assert.eq([{_id: 0, date: null}],
- coll.aggregate({
- $project: {
- date: {
- $dateFromString: {
- dateString: "2018-02-06T11:56:02Z",
- timezone: nullishValue,
- onNull: onNullValue
- }
+ }
+ })
+ .toArray());
+ assert.eq([{_id: 0, date: null}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateFromString: {
+ dateString: "2018-02-06T11:56:02Z",
+ timezone: nullishValue,
+ onNull: onNullValue
}
}
- })
- .toArray());
- }
+ }
+ })
+ .toArray());
+}
- // Test that 'onNull' can be any type, not just an ISODate.
- for (let onNull of[{}, 5, "Not a date", null, undefined]) {
- assert.eq(
- [{_id: 0, date: onNull}],
- coll.aggregate(
- {$project: {date: {$dateFromString: {dateString: "$missing", onNull: onNull}}}})
- .toArray());
- }
+// Test that 'onNull' can be any type, not just an ISODate.
+for (let onNull of [{}, 5, "Not a date", null, undefined]) {
assert.eq(
- [{_id: 0}],
+ [{_id: 0, date: onNull}],
coll.aggregate(
- {$project: {date: {$dateFromString: {dateString: "$missing", onNull: "$missing"}}}})
+ {$project: {date: {$dateFromString: {dateString: "$missing", onNull: onNull}}}})
.toArray());
+}
+assert.eq(
+ [{_id: 0}],
+ coll.aggregate(
+ {$project: {date: {$dateFromString: {dateString: "$missing", onNull: "$missing"}}}})
+ .toArray());
})();
diff --git a/jstests/aggregation/expressions/date_to_parts.js b/jstests/aggregation/expressions/date_to_parts.js
index c1a41abf0b1..47344f5deda 100644
--- a/jstests/aggregation/expressions/date_to_parts.js
+++ b/jstests/aggregation/expressions/date_to_parts.js
@@ -1,195 +1,109 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode
(function() {
- "use strict";
+"use strict";
- const coll = db.dateToParts;
- coll.drop();
+const coll = db.dateToParts;
+coll.drop();
- /* --------------------------------------------------------------------------------------- */
- assert.writeOK(coll.insert([
- {_id: 0, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: "UTC"},
- {_id: 1, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: "Europe/London"},
- {_id: 2, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: "America/New_York", iso: true},
- {_id: 3, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: "America/New_York", iso: false},
- ]));
+/* --------------------------------------------------------------------------------------- */
+assert.writeOK(coll.insert([
+ {_id: 0, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: "UTC"},
+ {_id: 1, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: "Europe/London"},
+ {_id: 2, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: "America/New_York", iso: true},
+ {_id: 3, date: new ISODate("2017-06-19T15:13:25.713Z"), tz: "America/New_York", iso: false},
+]));
- assert.eq(
- [
- {
+assert.eq(
+ [
+ {
_id: 0,
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 15,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- {
+ date:
+ {year: 2017, month: 6, day: 19, hour: 15, minute: 13, second: 25, millisecond: 713}
+ },
+ {
_id: 1,
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 15,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- {
+ date:
+ {year: 2017, month: 6, day: 19, hour: 15, minute: 13, second: 25, millisecond: 713}
+ },
+ {
_id: 2,
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 15,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- {
+ date:
+ {year: 2017, month: 6, day: 19, hour: 15, minute: 13, second: 25, millisecond: 713}
+ },
+ {
_id: 3,
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 15,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- ],
- coll.aggregate([{$project: {date: {'$dateToParts': {date: "$date"}}}}, {$sort: {_id: 1}}])
- .toArray());
+ date:
+ {year: 2017, month: 6, day: 19, hour: 15, minute: 13, second: 25, millisecond: 713}
+ },
+ ],
+ coll.aggregate([{$project: {date: {'$dateToParts': {date: "$date"}}}}, {$sort: {_id: 1}}])
+ .toArray());
- assert.eq(
- [
- {
+assert.eq(
+ [
+ {
_id: 0,
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 15,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- {
+ date:
+ {year: 2017, month: 6, day: 19, hour: 15, minute: 13, second: 25, millisecond: 713}
+ },
+ {
_id: 1,
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 16,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- {
+ date:
+ {year: 2017, month: 6, day: 19, hour: 16, minute: 13, second: 25, millisecond: 713}
+ },
+ {
_id: 2,
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 11,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- {
+ date:
+ {year: 2017, month: 6, day: 19, hour: 11, minute: 13, second: 25, millisecond: 713}
+ },
+ {
_id: 3,
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 11,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- ],
- coll.aggregate([
- {$project: {date: {'$dateToParts': {date: "$date", "timezone": "$tz"}}}},
- {$sort: {_id: 1}}
- ])
- .toArray());
+ date:
+ {year: 2017, month: 6, day: 19, hour: 11, minute: 13, second: 25, millisecond: 713}
+ },
+ ],
+ coll.aggregate([
+ {$project: {date: {'$dateToParts': {date: "$date", "timezone": "$tz"}}}},
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
- assert.eq(
- [
- {
+assert.eq(
+ [
+ {
_id: 0,
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 15,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- {
+ date:
+ {year: 2017, month: 6, day: 19, hour: 15, minute: 13, second: 25, millisecond: 713}
+ },
+ {
_id: 1,
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 16,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- {
+ date:
+ {year: 2017, month: 6, day: 19, hour: 16, minute: 13, second: 25, millisecond: 713}
+ },
+ {
_id: 2,
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 11,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- {
+ date:
+ {year: 2017, month: 6, day: 19, hour: 11, minute: 13, second: 25, millisecond: 713}
+ },
+ {
_id: 3,
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 11,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- ],
- coll.aggregate([
- {
- $project: {
- date:
- {'$dateToParts': {date: "$date", "timezone": "$tz", "iso8601": false}}
- }
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
+ date:
+ {year: 2017, month: 6, day: 19, hour: 11, minute: 13, second: 25, millisecond: 713}
+ },
+ ],
+ coll.aggregate([
+ {
+ $project:
+ {date: {'$dateToParts': {date: "$date", "timezone": "$tz", "iso8601": false}}}
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
- assert.eq(
- [
- {
+assert.eq(
+ [
+ {
_id: 0,
date: {
isoWeekYear: 2017,
@@ -200,8 +114,8 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode
second: 25,
millisecond: 713
}
- },
- {
+ },
+ {
_id: 1,
date: {
isoWeekYear: 2017,
@@ -212,8 +126,8 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode
second: 25,
millisecond: 713
}
- },
- {
+ },
+ {
_id: 2,
date: {
isoWeekYear: 2017,
@@ -224,8 +138,8 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode
second: 25,
millisecond: 713
}
- },
- {
+ },
+ {
_id: 3,
date: {
isoWeekYear: 2017,
@@ -236,21 +150,20 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode
second: 25,
millisecond: 713
}
- },
- ],
- coll.aggregate([
- {
- $project: {
- date: {'$dateToParts': {date: "$date", "timezone": "$tz", "iso8601": true}}
- }
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
+ },
+ ],
+ coll.aggregate([
+ {
+ $project:
+ {date: {'$dateToParts': {date: "$date", "timezone": "$tz", "iso8601": true}}}
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
- assert.eq(
- [
- {
+assert.eq(
+ [
+ {
_id: 2,
date: {
isoWeekYear: 2017,
@@ -261,104 +174,72 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode
second: 25,
millisecond: 713
}
- },
- {
+ },
+ {
_id: 3,
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 11,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- ],
- coll.aggregate([
- {$match: {iso: {$exists: true}}},
- {
- $project: {
- date:
- {'$dateToParts': {date: "$date", "timezone": "$tz", "iso8601": "$iso"}}
- }
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
+ date:
+ {year: 2017, month: 6, day: 19, hour: 11, minute: 13, second: 25, millisecond: 713}
+ },
+ ],
+ coll.aggregate([
+ {$match: {iso: {$exists: true}}},
+ {
+ $project:
+ {date: {'$dateToParts': {date: "$date", "timezone": "$tz", "iso8601": "$iso"}}}
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
- /* --------------------------------------------------------------------------------------- */
- /* Tests with timestamp */
- assert(coll.drop());
+/* --------------------------------------------------------------------------------------- */
+/* Tests with timestamp */
+assert(coll.drop());
- assert.writeOK(coll.insert([
- {
- _id: ObjectId("58c7cba47bbadf523cf2c313"),
- date: new ISODate("2017-06-19T15:13:25.713Z"),
- tz: "Europe/London"
- },
- ]));
+assert.writeOK(coll.insert([
+ {
+ _id: ObjectId("58c7cba47bbadf523cf2c313"),
+ date: new ISODate("2017-06-19T15:13:25.713Z"),
+ tz: "Europe/London"
+ },
+]));
- assert.eq(
- [
- {
+assert.eq(
+ [
+ {
_id: ObjectId("58c7cba47bbadf523cf2c313"),
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 15,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- ],
- coll.aggregate([{$project: {date: {'$dateToParts': {date: "$date"}}}}]).toArray());
+ date:
+ {year: 2017, month: 6, day: 19, hour: 15, minute: 13, second: 25, millisecond: 713}
+ },
+ ],
+ coll.aggregate([{$project: {date: {'$dateToParts': {date: "$date"}}}}]).toArray());
- assert.eq(
- [
- {
+assert.eq(
+ [
+ {
_id: ObjectId("58c7cba47bbadf523cf2c313"),
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 16,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- ],
- coll.aggregate([{$project: {date: {'$dateToParts': {date: "$date", "timezone": "$tz"}}}}])
- .toArray());
+ date:
+ {year: 2017, month: 6, day: 19, hour: 16, minute: 13, second: 25, millisecond: 713}
+ },
+ ],
+ coll.aggregate([{$project: {date: {'$dateToParts': {date: "$date", "timezone": "$tz"}}}}])
+ .toArray());
- assert.eq(
- [
- {
+assert.eq(
+ [
+ {
_id: ObjectId("58c7cba47bbadf523cf2c313"),
- date: {
- year: 2017,
- month: 6,
- day: 19,
- hour: 16,
- minute: 13,
- second: 25,
- millisecond: 713
- }
- },
- ],
- coll.aggregate([{
- $project: {
- date: {'$dateToParts': {date: "$date", "timezone": "$tz", "iso8601": false}}
- }
- }])
- .toArray());
+ date:
+ {year: 2017, month: 6, day: 19, hour: 16, minute: 13, second: 25, millisecond: 713}
+ },
+ ],
+ coll.aggregate([{
+ $project: {date: {'$dateToParts': {date: "$date", "timezone": "$tz", "iso8601": false}}}
+ }])
+ .toArray());
- assert.eq(
- [
- {
+assert.eq(
+ [
+ {
_id: ObjectId("58c7cba47bbadf523cf2c313"),
date: {
isoWeekYear: 2017,
@@ -369,86 +250,88 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode
second: 25,
millisecond: 713
}
- },
- ],
- coll.aggregate([{
- $project:
- {date: {'$dateToParts': {date: "$date", "timezone": "$tz", "iso8601": true}}}
- }])
- .toArray());
+ },
+ ],
+ coll.aggregate([{
+ $project: {date: {'$dateToParts': {date: "$date", "timezone": "$tz", "iso8601": true}}}
+ }])
+ .toArray());
- assert.eq(
- [
- {
+assert.eq(
+ [
+ {
_id: ObjectId("58c7cba47bbadf523cf2c313"),
- date:
- {year: 2017, month: 3, day: 14, hour: 10, minute: 53, second: 24, millisecond: 0}
- },
- ],
- coll.aggregate([{
- $project:
- {date: {'$dateToParts': {date: "$_id", "timezone": "$tz", "iso8601": false}}}
- }])
- .toArray());
-
- /* --------------------------------------------------------------------------------------- */
- assert(coll.drop());
+ date: {year: 2017, month: 3, day: 14, hour: 10, minute: 53, second: 24, millisecond: 0}
+ },
+ ],
+ coll.aggregate([{
+ $project: {date: {'$dateToParts': {date: "$_id", "timezone": "$tz", "iso8601": false}}}
+ }])
+ .toArray());
- assert.writeOK(coll.insert([
- {_id: 0, date: ISODate("2017-06-27T12:00:20Z")},
- ]));
+/* --------------------------------------------------------------------------------------- */
+assert(coll.drop());
- assert.eq(
- [
- {_id: 0, date: null},
- ],
- coll.aggregate([{$project: {date: {'$dateToParts': {date: "$date", timezone: "$tz"}}}}])
- .toArray());
+assert.writeOK(coll.insert([
+ {_id: 0, date: ISODate("2017-06-27T12:00:20Z")},
+]));
- /* --------------------------------------------------------------------------------------- */
- assert(coll.drop());
+assert.eq(
+ [
+ {_id: 0, date: null},
+ ],
+ coll.aggregate([{$project: {date: {'$dateToParts': {date: "$date", timezone: "$tz"}}}}])
+ .toArray());
- assert.writeOK(coll.insert([
- {_id: 0, date: ISODate("2017-06-27T12:00:20Z")},
- ]));
+/* --------------------------------------------------------------------------------------- */
+assert(coll.drop());
- assert.eq(
- [
- {_id: 0, date: null},
- ],
- coll.aggregate([{$project: {date: {'$dateToParts': {date: "$date", iso8601: "$iso8601"}}}}])
- .toArray());
+assert.writeOK(coll.insert([
+ {_id: 0, date: ISODate("2017-06-27T12:00:20Z")},
+]));
- /* --------------------------------------------------------------------------------------- */
- assert(coll.drop());
+assert.eq(
+ [
+ {_id: 0, date: null},
+ ],
+ coll.aggregate([{$project: {date: {'$dateToParts': {date: "$date", iso8601: "$iso8601"}}}}])
+ .toArray());
- assert.writeOK(coll.insert([
- {_id: 0, tz: "Europe/London"},
- ]));
+/* --------------------------------------------------------------------------------------- */
+assert(coll.drop());
- assert.eq(
- [
- {_id: 0, date: null},
- ],
- coll.aggregate([{$project: {date: {'$dateToParts': {date: "$date"}}}}]).toArray());
+assert.writeOK(coll.insert([
+ {_id: 0, tz: "Europe/London"},
+]));
- /* --------------------------------------------------------------------------------------- */
+assert.eq(
+ [
+ {_id: 0, date: null},
+ ],
+ coll.aggregate([{$project: {date: {'$dateToParts': {date: "$date"}}}}]).toArray());
- let pipeline = {$project: {date: {'$dateToParts': {"timezone": "$tz"}}}};
- assertErrorCode(coll, pipeline, 40522);
+/* --------------------------------------------------------------------------------------- */
- pipeline = {
- $project: {date: {'$dateToParts': {date: "$date", "timezone": "$tz", "iso8601": 5}}}
- };
- assertErrorCode(coll, pipeline, 40521);
+let pipeline = {$project: {date: {'$dateToParts': {"timezone": "$tz"}}}};
+assertErrorCode(coll, pipeline, 40522);
- pipeline = {$project: {date: {'$dateToParts': {date: 42}}}};
- assertErrorCode(coll, pipeline, 16006);
+pipeline = {
+ $project: {date: {'$dateToParts': {date: "$date", "timezone": "$tz", "iso8601": 5}}}
+};
+assertErrorCode(coll, pipeline, 40521);
- pipeline = {$project: {date: {'$dateToParts': {date: "$date", "timezone": 5}}}};
- assertErrorCode(coll, pipeline, 40517);
+pipeline = {
+ $project: {date: {'$dateToParts': {date: 42}}}
+};
+assertErrorCode(coll, pipeline, 16006);
- pipeline = {$project: {date: {'$dateToParts': {date: "$date", "timezone": "DoesNot/Exist"}}}};
- assertErrorCode(coll, pipeline, 40485);
+pipeline = {
+ $project: {date: {'$dateToParts': {date: "$date", "timezone": 5}}}
+};
+assertErrorCode(coll, pipeline, 40517);
+pipeline = {
+ $project: {date: {'$dateToParts': {date: "$date", "timezone": "DoesNot/Exist"}}}
+};
+assertErrorCode(coll, pipeline, 40485);
})();
diff --git a/jstests/aggregation/expressions/date_to_string.js b/jstests/aggregation/expressions/date_to_string.js
index dad8775a3e8..9452fd54cc0 100644
--- a/jstests/aggregation/expressions/date_to_string.js
+++ b/jstests/aggregation/expressions/date_to_string.js
@@ -1,290 +1,283 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode
(function() {
- "use strict";
+"use strict";
- const coll = db.date_to_string;
- coll.drop();
+const coll = db.date_to_string;
+coll.drop();
- /* --------------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------------- */
- assert.writeOK(coll.insert([
- {_id: 0, date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "UTC"},
- {_id: 1, date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "Europe/London"},
- {_id: 2, date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "America/New_York"},
- {_id: 3, date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "Australia/Eucla"},
- {_id: 4, date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "Asia/Kathmandu"},
- {_id: 5, date: new ISODate("1935-07-10T11:36:37.133Z"), tz: "Europe/Amsterdam"},
- {_id: 6, date: new ISODate("1900-07-10T11:41:22.418Z"), tz: "America/Caracas"},
- ]));
+assert.writeOK(coll.insert([
+ {_id: 0, date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "UTC"},
+ {_id: 1, date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "Europe/London"},
+ {_id: 2, date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "America/New_York"},
+ {_id: 3, date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "Australia/Eucla"},
+ {_id: 4, date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "Asia/Kathmandu"},
+ {_id: 5, date: new ISODate("1935-07-10T11:36:37.133Z"), tz: "Europe/Amsterdam"},
+ {_id: 6, date: new ISODate("1900-07-10T11:41:22.418Z"), tz: "America/Caracas"},
+]));
- assert.eq(
- [
- {_id: 0, date: "2017-07-04 14:56:42 +0000 (0 minutes)"},
- {_id: 1, date: "2017-07-04 15:56:42 +0100 (60 minutes)"},
- {_id: 2, date: "2017-07-04 10:56:42 -0400 (-240 minutes)"},
- {_id: 3, date: "2017-07-04 23:41:42 +0845 (525 minutes)"},
- {_id: 4, date: "2017-07-04 20:41:42 +0545 (345 minutes)"},
- {_id: 5, date: "1935-07-10 12:56:09 +0119 (79 minutes)"},
- {_id: 6, date: "1900-07-10 07:13:42 -0427 (-267 minutes)"},
- ],
- coll.aggregate([
- {
- $project: {
- date: {
- $dateToString: {
- format: "%Y-%m-%d %H:%M:%S %z (%Z minutes)",
- date: "$date",
- timezone: "$tz"
- }
- }
- }
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
+assert.eq(
+ [
+ {_id: 0, date: "2017-07-04 14:56:42 +0000 (0 minutes)"},
+ {_id: 1, date: "2017-07-04 15:56:42 +0100 (60 minutes)"},
+ {_id: 2, date: "2017-07-04 10:56:42 -0400 (-240 minutes)"},
+ {_id: 3, date: "2017-07-04 23:41:42 +0845 (525 minutes)"},
+ {_id: 4, date: "2017-07-04 20:41:42 +0545 (345 minutes)"},
+ {_id: 5, date: "1935-07-10 12:56:09 +0119 (79 minutes)"},
+ {_id: 6, date: "1900-07-10 07:13:42 -0427 (-267 minutes)"},
+ ],
+ coll.aggregate([
+ {
+ $project: {
+ date: {
+ $dateToString: {
+ format: "%Y-%m-%d %H:%M:%S %z (%Z minutes)",
+ date: "$date",
+ timezone: "$tz"
+ }
+ }
+ }
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
- /* --------------------------------------------------------------------------------------- */
- coll.drop();
+/* --------------------------------------------------------------------------------------- */
+coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, date: new ISODate("2017-01-04T15:08:51.911Z")},
- {_id: 1, date: new ISODate("2017-07-04T15:09:12.911Z")},
- {_id: 2, date: new ISODate("2017-12-04T15:09:14.911Z")},
- ]));
+assert.writeOK(coll.insert([
+ {_id: 0, date: new ISODate("2017-01-04T15:08:51.911Z")},
+ {_id: 1, date: new ISODate("2017-07-04T15:09:12.911Z")},
+ {_id: 2, date: new ISODate("2017-12-04T15:09:14.911Z")},
+]));
- assert.eq(
- [
- {_id: 0, date: "2017-01-04 10:08:51 -0500 (-300 minutes)"},
- {_id: 1, date: "2017-07-04 11:09:12 -0400 (-240 minutes)"},
- {_id: 2, date: "2017-12-04 10:09:14 -0500 (-300 minutes)"},
- ],
- coll.aggregate([
- {
- $project: {
- date: {
- $dateToString: {
- format: "%Y-%m-%d %H:%M:%S %z (%Z minutes)",
- date: "$date",
- timezone: "America/New_York"
- }
- }
- }
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
+assert.eq(
+ [
+ {_id: 0, date: "2017-01-04 10:08:51 -0500 (-300 minutes)"},
+ {_id: 1, date: "2017-07-04 11:09:12 -0400 (-240 minutes)"},
+ {_id: 2, date: "2017-12-04 10:09:14 -0500 (-300 minutes)"},
+ ],
+ coll.aggregate([
+ {
+ $project: {
+ date: {
+ $dateToString: {
+ format: "%Y-%m-%d %H:%M:%S %z (%Z minutes)",
+ date: "$date",
+ timezone: "America/New_York"
+ }
+ }
+ }
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
- /* --------------------------------------------------------------------------------------- */
- coll.drop();
+/* --------------------------------------------------------------------------------------- */
+coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, date: new ISODate("2017-01-04T15:08:51.911Z")},
- {_id: 1, date: new ISODate("2017-07-04T15:09:12.911Z")},
- {_id: 2, date: new ISODate("2017-12-04T15:09:14.911Z")},
- ]));
+assert.writeOK(coll.insert([
+ {_id: 0, date: new ISODate("2017-01-04T15:08:51.911Z")},
+ {_id: 1, date: new ISODate("2017-07-04T15:09:12.911Z")},
+ {_id: 2, date: new ISODate("2017-12-04T15:09:14.911Z")},
+]));
- assert.eq(
- [
- {_id: 0, date: "2017-01-04 15:08:51 +0000 (0 minutes)"},
- {_id: 1, date: "2017-07-04 15:09:12 +0000 (0 minutes)"},
- {_id: 2, date: "2017-12-04 15:09:14 +0000 (0 minutes)"},
- ],
- coll.aggregate([
- {
- $project: {
- date: {
- $dateToString:
- {format: "%Y-%m-%d %H:%M:%S %z (%Z minutes)", date: "$date"}
- }
- }
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
+assert.eq(
+ [
+ {_id: 0, date: "2017-01-04 15:08:51 +0000 (0 minutes)"},
+ {_id: 1, date: "2017-07-04 15:09:12 +0000 (0 minutes)"},
+ {_id: 2, date: "2017-12-04 15:09:14 +0000 (0 minutes)"},
+ ],
+ coll.aggregate([
+ {
+ $project: {
+ date: {
+ $dateToString: {format: "%Y-%m-%d %H:%M:%S %z (%Z minutes)", date: "$date"}
+ }
+ }
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
- /* --------------------------------------------------------------------------------------- */
- coll.drop();
+/* --------------------------------------------------------------------------------------- */
+coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, date: new ISODate("2017-01-01T15:08:51.911Z")},
- {_id: 1, date: new ISODate("2017-07-04T15:09:12.911Z")},
- {_id: 2, date: new ISODate("2017-12-04T15:09:14.911Z")},
- ]));
+assert.writeOK(coll.insert([
+ {_id: 0, date: new ISODate("2017-01-01T15:08:51.911Z")},
+ {_id: 1, date: new ISODate("2017-07-04T15:09:12.911Z")},
+ {_id: 2, date: new ISODate("2017-12-04T15:09:14.911Z")},
+]));
- assert.eq(
- [
- {_id: 0, date: "Natural: 2017-W1-01, ISO: 2016-W7-52"},
- {_id: 1, date: "Natural: 2017-W3-27, ISO: 2017-W2-27"},
- {_id: 2, date: "Natural: 2017-W2-49, ISO: 2017-W1-49"},
- ],
- coll.aggregate([
- {
- $project: {
- date: {
- $dateToString:
- {format: "Natural: %Y-W%w-%U, ISO: %G-W%u-%V", date: "$date"}
- }
- }
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
+assert.eq(
+ [
+ {_id: 0, date: "Natural: 2017-W1-01, ISO: 2016-W7-52"},
+ {_id: 1, date: "Natural: 2017-W3-27, ISO: 2017-W2-27"},
+ {_id: 2, date: "Natural: 2017-W2-49, ISO: 2017-W1-49"},
+ ],
+ coll.aggregate([
+ {
+ $project: {
+ date: {
+ $dateToString: {format: "Natural: %Y-W%w-%U, ISO: %G-W%u-%V", date: "$date"}
+ }
+ }
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
- /* --------------------------------------------------------------------------------------- */
- /* Test that missing expressions, turn into BSON null values */
- coll.drop();
+/* --------------------------------------------------------------------------------------- */
+/* Test that missing expressions, turn into BSON null values */
+coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, date: new ISODate("2017-01-04T15:08:51.911Z")},
- {_id: 1, date: new ISODate("2017-01-04T15:08:51.911Z"), timezone: null},
- {_id: 2, date: new ISODate("2017-01-04T15:08:51.911Z"), timezone: undefined},
- {_id: 3, timezone: "Europe/Oslo"},
- {_id: 4, date: null, timezone: "Europe/Oslo"},
- {_id: 5, date: undefined, timezone: "Europe/Oslo"},
- ]));
+assert.writeOK(coll.insert([
+ {_id: 0, date: new ISODate("2017-01-04T15:08:51.911Z")},
+ {_id: 1, date: new ISODate("2017-01-04T15:08:51.911Z"), timezone: null},
+ {_id: 2, date: new ISODate("2017-01-04T15:08:51.911Z"), timezone: undefined},
+ {_id: 3, timezone: "Europe/Oslo"},
+ {_id: 4, date: null, timezone: "Europe/Oslo"},
+ {_id: 5, date: undefined, timezone: "Europe/Oslo"},
+]));
- assert.eq(
- [
- {_id: 0, date: null},
- {_id: 1, date: null},
- {_id: 2, date: null},
- {_id: 3, date: null},
- {_id: 4, date: null},
- {_id: 5, date: null},
- ],
- coll.aggregate([
- {
- $project: {
- date: {
- $dateToString: {
- format: "%Y-%m-%d %H:%M:%S %z (%Z minutes)",
- date: "$date",
- timezone: "$timezone"
- }
- }
- }
- },
- {$sort: {_id: 1}}
- ])
- .toArray());
+assert.eq(
+ [
+ {_id: 0, date: null},
+ {_id: 1, date: null},
+ {_id: 2, date: null},
+ {_id: 3, date: null},
+ {_id: 4, date: null},
+ {_id: 5, date: null},
+ ],
+ coll.aggregate([
+ {
+ $project: {
+ date: {
+ $dateToString: {
+ format: "%Y-%m-%d %H:%M:%S %z (%Z minutes)",
+ date: "$date",
+ timezone: "$timezone"
+ }
+ }
+ }
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
- /* --------------------------------------------------------------------------------------- */
- /* Test that the default format is "%Y-%m-%dT%H:%M:%S.%LZ" if none specified. */
- coll.drop();
+/* --------------------------------------------------------------------------------------- */
+/* Test that the default format is "%Y-%m-%dT%H:%M:%S.%LZ" if none specified. */
+coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, date: new ISODate("2017-01-04T15:08:51.911Z")},
- {_id: 1, date: new ISODate("2017-07-04T15:09:12.911Z")},
- {_id: 2, date: new ISODate("2017-12-04T15:09:14.911Z")},
- ]));
+assert.writeOK(coll.insert([
+ {_id: 0, date: new ISODate("2017-01-04T15:08:51.911Z")},
+ {_id: 1, date: new ISODate("2017-07-04T15:09:12.911Z")},
+ {_id: 2, date: new ISODate("2017-12-04T15:09:14.911Z")},
+]));
- assert.eq(
- [
- {_id: 0, date: "2017-01-04T10:08:51.911Z"},
- {_id: 1, date: "2017-07-04T11:09:12.911Z"},
- {_id: 2, date: "2017-12-04T10:09:14.911Z"},
- ],
- coll.aggregate([
- {$project: {date: {$dateToString: {date: "$date", timezone: "America/New_York"}}}},
- {$sort: {_id: 1}}
- ])
- .toArray());
+assert.eq(
+ [
+ {_id: 0, date: "2017-01-04T10:08:51.911Z"},
+ {_id: 1, date: "2017-07-04T11:09:12.911Z"},
+ {_id: 2, date: "2017-12-04T10:09:14.911Z"},
+ ],
+ coll.aggregate([
+ {$project: {date: {$dateToString: {date: "$date", timezone: "America/New_York"}}}},
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
- /* --------------------------------------------------------------------------------------- */
- /* Test that null is returned when 'format' evaluates to nullish. */
- coll.drop();
- assert.writeOK(coll.insert({_id: 0}));
+/* --------------------------------------------------------------------------------------- */
+/* Test that null is returned when 'format' evaluates to nullish. */
+coll.drop();
+assert.writeOK(coll.insert({_id: 0}));
- assert.eq([{_id: 0, date: null}],
- coll.aggregate({
- $project: {
- date: {
- $dateToString: {
- date: new ISODate("2017-01-04T15:08:51.911Z"),
- format: null,
- }
+assert.eq([{_id: 0, date: null}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateToString: {
+ date: new ISODate("2017-01-04T15:08:51.911Z"),
+ format: null,
}
}
- })
- .toArray());
- assert.eq([{_id: 0, date: null}],
- coll.aggregate({
- $project: {
- date: {
- $dateToString: {
- date: new ISODate("2017-01-04T15:08:51.911Z"),
- format: undefined,
- }
+ }
+ })
+ .toArray());
+assert.eq([{_id: 0, date: null}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateToString: {
+ date: new ISODate("2017-01-04T15:08:51.911Z"),
+ format: undefined,
}
}
- })
- .toArray());
- assert.eq([{_id: 0, date: null}],
- coll.aggregate({
- $project: {
- date: {
- $dateToString: {
- date: new ISODate("2017-01-04T15:08:51.911Z"),
- format: "$missing",
- }
+ }
+ })
+ .toArray());
+assert.eq([{_id: 0, date: null}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateToString: {
+ date: new ISODate("2017-01-04T15:08:51.911Z"),
+ format: "$missing",
}
}
- })
- .toArray());
+ }
+ })
+ .toArray());
- /* --------------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------------- */
- let pipeline = [{
- $project:
- {date: {$dateToString: {date: new ISODate("2017-01-04T15:08:51.911Z"), format: 5}}}
- }];
- assertErrMsgContains(coll, pipeline, 18533, "$dateToString requires that 'format' be a string");
+let pipeline = [
+ {$project: {date: {$dateToString: {date: new ISODate("2017-01-04T15:08:51.911Z"), format: 5}}}}
+];
+assertErrMsgContains(coll, pipeline, 18533, "$dateToString requires that 'format' be a string");
- pipeline =
- [{$project: {date: {$dateToString: {format: "%Y-%m-%d %H:%M:%S", timezone: "$tz"}}}}];
- assertErrMsgContains(coll, pipeline, 18628, "Missing 'date' parameter to $dateToString");
+pipeline = [{$project: {date: {$dateToString: {format: "%Y-%m-%d %H:%M:%S", timezone: "$tz"}}}}];
+assertErrMsgContains(coll, pipeline, 18628, "Missing 'date' parameter to $dateToString");
- pipeline = [{
- $project: {
- date: {
- $dateToString: {
- date: new ISODate("2017-01-04T15:08:51.911Z"),
- format: "%Y-%m-%d %H:%M:%S",
- timezone: 5
- }
+pipeline = [{
+ $project: {
+ date: {
+ $dateToString: {
+ date: new ISODate("2017-01-04T15:08:51.911Z"),
+ format: "%Y-%m-%d %H:%M:%S",
+ timezone: 5
}
}
- }];
- assertErrMsgContains(coll, pipeline, 40517, "timezone must evaluate to a string");
+ }
+}];
+assertErrMsgContains(coll, pipeline, 40517, "timezone must evaluate to a string");
- pipeline = [{$project: {date: {$dateToString: {format: "%Y-%m-%d %H:%M:%S", date: 42}}}}];
- assertErrMsgContains(coll, pipeline, 16006, "can't convert from BSON type double to Date");
+pipeline = [{$project: {date: {$dateToString: {format: "%Y-%m-%d %H:%M:%S", date: 42}}}}];
+assertErrMsgContains(coll, pipeline, 16006, "can't convert from BSON type double to Date");
- pipeline = [{
- $project: {
- date: {
- $dateToString: {
- date: new ISODate("2017-01-04T15:08:51.911Z"),
- format: "%Y-%m-%d %H:%M:%S",
- timezone: "DoesNotExist"
- }
+pipeline = [{
+ $project: {
+ date: {
+ $dateToString: {
+ date: new ISODate("2017-01-04T15:08:51.911Z"),
+ format: "%Y-%m-%d %H:%M:%S",
+ timezone: "DoesNotExist"
}
}
- }];
- assertErrMsgContains(coll, pipeline, 40485, "unrecognized time zone identifier");
+ }
+}];
+assertErrMsgContains(coll, pipeline, 40485, "unrecognized time zone identifier");
- pipeline = [{
- $project:
- {date: {$dateToString: {date: new ISODate("2017-01-04T15:08:51.911Z"), format: "%"}}}
- }];
- assertErrMsgContains(coll, pipeline, 18535, "Unmatched '%' at end of format string");
+pipeline = [{
+ $project: {date: {$dateToString: {date: new ISODate("2017-01-04T15:08:51.911Z"), format: "%"}}}
+}];
+assertErrMsgContains(coll, pipeline, 18535, "Unmatched '%' at end of format string");
- // Fails for unknown format specifier.
- pipeline = [{
- $project: {
- date: {$dateToString: {date: new ISODate("2017-01-04T15:08:51.911Z"), format: "%n"}}
- }
- }];
- assertErrMsgContains(coll, pipeline, 18536, "Invalid format character '%n' in format string");
+// Fails for unknown format specifier.
+pipeline = [{
+ $project: {date: {$dateToString: {date: new ISODate("2017-01-04T15:08:51.911Z"), format: "%n"}}}
+}];
+assertErrMsgContains(coll, pipeline, 18536, "Invalid format character '%n' in format string");
})();
diff --git a/jstests/aggregation/expressions/date_to_string_on_null.js b/jstests/aggregation/expressions/date_to_string_on_null.js
index e5b3ec50f1b..7b3bdc07538 100644
--- a/jstests/aggregation/expressions/date_to_string_on_null.js
+++ b/jstests/aggregation/expressions/date_to_string_on_null.js
@@ -2,76 +2,71 @@
* Tests for the $dateToString expression with the optional 'onNull' parameter.
*/
(function() {
- "use strict";
+"use strict";
- const onNullValue = ISODate("2017-07-04T11:56:02Z");
- const coll = db.date_to_string_on_null;
- coll.drop();
+const onNullValue = ISODate("2017-07-04T11:56:02Z");
+const coll = db.date_to_string_on_null;
+coll.drop();
- assert.writeOK(coll.insert({_id: 0}));
+assert.writeOK(coll.insert({_id: 0}));
- for (let nullishValue of[null, undefined, "$missing"]) {
- // Test that the 'onNull' value is returned when the 'date' is nullish.
- assert.eq([{_id: 0, date: onNullValue}],
- coll.aggregate({
- $project: {
- date: {
- $dateToString: {
- date: nullishValue,
- format: "%Y-%m-%d %H:%M:%S",
- onNull: onNullValue
- }
- }
- }
- })
- .toArray());
-
- // Test that null is returned when the 'timezone' is nullish, regardless of the 'onNull'
- // value.
- assert.eq([{_id: 0, date: null}],
- coll.aggregate({
- $project: {
- date: {
- $dateToString: {
- date: "2018-02-06T11:56:02Z",
- format: "%Y-%m-%d %H:%M:%S",
- timezone: nullishValue,
- onNull: onNullValue
- }
- }
- }
- })
- .toArray());
- }
+for (let nullishValue of [null, undefined, "$missing"]) {
+ // Test that the 'onNull' value is returned when the 'date' is nullish.
+ assert.eq(
+ [{_id: 0, date: onNullValue}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateToString:
+ {date: nullishValue, format: "%Y-%m-%d %H:%M:%S", onNull: onNullValue}
+ }
+ }
+ })
+ .toArray());
- // Test that 'onNull' can be any type, not just an ISODate.
- for (let onNullValue of[{}, 5, "Not a date", null, undefined]) {
- assert.eq([{_id: 0, date: onNullValue}],
- coll.aggregate({
- $project: {
- date: {
- $dateToString: {
- date: "$missing",
- format: "%Y-%m-%d %H:%M:%S",
- onNull: onNullValue
- }
+ // Test that null is returned when the 'timezone' is nullish, regardless of the 'onNull'
+ // value.
+ assert.eq([{_id: 0, date: null}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateToString: {
+ date: "2018-02-06T11:56:02Z",
+ format: "%Y-%m-%d %H:%M:%S",
+ timezone: nullishValue,
+ onNull: onNullValue
}
}
- })
- .toArray());
- }
+ }
+ })
+ .toArray());
+}
- // Test that 'onNull' can be missing, resulting in no output field when used within a $project
- // stage.
+// Test that 'onNull' can be any type, not just an ISODate.
+for (let onNullValue of [{}, 5, "Not a date", null, undefined]) {
assert.eq(
- [{_id: 0}],
+ [{_id: 0, date: onNullValue}],
coll.aggregate({
$project: {
date: {
$dateToString:
- {date: "$missing", format: "%Y-%m-%d %H:%M:%S", onNull: "$missing"}
+ {date: "$missing", format: "%Y-%m-%d %H:%M:%S", onNull: onNullValue}
}
}
})
.toArray());
+}
+
+// Test that 'onNull' can be missing, resulting in no output field when used within a $project
+// stage.
+assert.eq([{_id: 0}],
+ coll.aggregate({
+ $project: {
+ date: {
+ $dateToString:
+ {date: "$missing", format: "%Y-%m-%d %H:%M:%S", onNull: "$missing"}
+ }
+ }
+ })
+ .toArray());
})();
diff --git a/jstests/aggregation/expressions/expression_mod.js b/jstests/aggregation/expressions/expression_mod.js
index 63469ca8177..677f634bc7e 100644
--- a/jstests/aggregation/expressions/expression_mod.js
+++ b/jstests/aggregation/expressions/expression_mod.js
@@ -3,92 +3,92 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExpression.
(function() {
- "use strict";
+"use strict";
- var testDB = db.getSiblingDB("expression_mod");
- assert.commandWorked(testDB.dropDatabase());
- var coll = testDB.getCollection("test");
+var testDB = db.getSiblingDB("expression_mod");
+assert.commandWorked(testDB.dropDatabase());
+var coll = testDB.getCollection("test");
- //
- // Confirm different input numeric types are evaluated correctly.
- //
+//
+// Confirm different input numeric types are evaluated correctly.
+//
- // Aggregate checking various combinations of number types.
- // The $match portion ensures they are of the correct type as the shell turns the ints back to
- // doubles at the end so we can not check types with assert.
- coll.save({});
- var result = coll.aggregate({
- $project: {
- _id: 0,
- dub_dub: {$mod: [138.5, 3.0]},
- dub_int: {$mod: [138.5, NumberLong(3)]},
- dub_long: {$mod: [138.5, NumberInt(3)]},
- int_dub: {$mod: [NumberInt(8), 3.25]},
- int_dubint: {$mod: [NumberInt(8), 3.0]},
- int_int: {$mod: [NumberInt(8), NumberInt(3)]},
- int_long: {$mod: [NumberInt(8), NumberLong(3)]},
- long_dub: {$mod: [NumberLong(8), 3.25]},
- long_dubint: {$mod: [NumberLong(8), 3.0]},
- long_dublong: {$mod: [NumberLong(500000000000), 450000000000.0]},
- long_int: {$mod: [NumberLong(8), NumberInt(3)]},
- long_long: {$mod: [NumberLong(8), NumberLong(3)]},
- verylong_verylong: {$mod: [NumberLong(800000000000), NumberLong(300000000000)]}
- }
- },
- {
- $match: {
- // 1 is NumberDouble
- dub_dub: {$type: 1},
- dub_int: {$type: 1},
- dub_long: {$type: 1},
- int_dub: {$type: 1},
- // 16 is NumberInt
- int_dubint: {$type: 16},
- int_int: {$type: 16},
- // 18 is NumberLong
- int_long: {$type: 18},
- long_dub: {$type: 1},
- long_dubint: {$type: 18},
- long_dublong: {$type: 1},
- long_int: {$type: 18},
- long_long: {$type: 18},
- verylong_verylong: {$type: 18}
- }
- });
+// Aggregate checking various combinations of number types.
+// The $match portion ensures they are of the correct type as the shell turns the ints back to
+// doubles at the end so we can not check types with assert.
+coll.save({});
+var result = coll.aggregate({
+ $project: {
+ _id: 0,
+ dub_dub: {$mod: [138.5, 3.0]},
+ dub_int: {$mod: [138.5, NumberLong(3)]},
+ dub_long: {$mod: [138.5, NumberInt(3)]},
+ int_dub: {$mod: [NumberInt(8), 3.25]},
+ int_dubint: {$mod: [NumberInt(8), 3.0]},
+ int_int: {$mod: [NumberInt(8), NumberInt(3)]},
+ int_long: {$mod: [NumberInt(8), NumberLong(3)]},
+ long_dub: {$mod: [NumberLong(8), 3.25]},
+ long_dubint: {$mod: [NumberLong(8), 3.0]},
+ long_dublong: {$mod: [NumberLong(500000000000), 450000000000.0]},
+ long_int: {$mod: [NumberLong(8), NumberInt(3)]},
+ long_long: {$mod: [NumberLong(8), NumberLong(3)]},
+ verylong_verylong: {$mod: [NumberLong(800000000000), NumberLong(300000000000)]}
+ }
+},
+ {
+ $match: {
+ // 1 is NumberDouble
+ dub_dub: {$type: 1},
+ dub_int: {$type: 1},
+ dub_long: {$type: 1},
+ int_dub: {$type: 1},
+ // 16 is NumberInt
+ int_dubint: {$type: 16},
+ int_int: {$type: 16},
+ // 18 is NumberLong
+ int_long: {$type: 18},
+ long_dub: {$type: 1},
+ long_dubint: {$type: 18},
+ long_dublong: {$type: 1},
+ long_int: {$type: 18},
+ long_long: {$type: 18},
+ verylong_verylong: {$type: 18}
+ }
+ });
- // Correct answers (it is mainly the types that are important here).
- var expectedResult = [{
- dub_dub: 0.5,
- dub_int: 0.5,
- dub_long: 0.5,
- int_dub: 1.5,
- int_dubint: 2,
- int_int: 2,
- int_long: NumberLong(2),
- long_dub: 1.5,
- long_dubint: NumberLong(2),
- long_dublong: 50000000000,
- long_int: NumberLong(2),
- long_long: NumberLong(2),
- verylong_verylong: NumberLong(200000000000)
- }];
+// Correct answers (it is mainly the types that are important here).
+var expectedResult = [{
+ dub_dub: 0.5,
+ dub_int: 0.5,
+ dub_long: 0.5,
+ int_dub: 1.5,
+ int_dubint: 2,
+ int_int: 2,
+ int_long: NumberLong(2),
+ long_dub: 1.5,
+ long_dubint: NumberLong(2),
+ long_dublong: 50000000000,
+ long_int: NumberLong(2),
+ long_long: NumberLong(2),
+ verylong_verylong: NumberLong(200000000000)
+}];
- assert.eq(result.toArray(), expectedResult, tojson(result));
+assert.eq(result.toArray(), expectedResult, tojson(result));
- //
- // Confirm error cases.
- //
+//
+// Confirm error cases.
+//
- // Confirm mod by 0 fails in an expected manner.
- assertErrorCode(coll, {$project: {a: {$mod: [10, 0 /*double*/]}}}, 16610);
- assertErrorCode(coll, {$project: {a: {$mod: [NumberInt(10), NumberInt(0)]}}}, 16610);
- assertErrorCode(coll, {$project: {a: {$mod: [NumberLong(10), NumberLong(0)]}}}, 16610);
+// Confirm mod by 0 fails in an expected manner.
+assertErrorCode(coll, {$project: {a: {$mod: [10, 0 /*double*/]}}}, 16610);
+assertErrorCode(coll, {$project: {a: {$mod: [NumberInt(10), NumberInt(0)]}}}, 16610);
+assertErrorCode(coll, {$project: {a: {$mod: [NumberLong(10), NumberLong(0)]}}}, 16610);
- // Confirm expected behavior for NaN and Infinity values.
- testExpression(coll, {$mod: [10, NaN]}, NaN);
- testExpression(coll, {$mod: [10, Infinity]}, 10);
- testExpression(coll, {$mod: [10, -Infinity]}, 10);
- testExpression(coll, {$mod: [Infinity, 10]}, NaN);
- testExpression(coll, {$mod: [-Infinity, 10]}, NaN);
- testExpression(coll, {$mod: [NaN, 10]}, NaN);
+// Confirm expected behavior for NaN and Infinity values.
+testExpression(coll, {$mod: [10, NaN]}, NaN);
+testExpression(coll, {$mod: [10, Infinity]}, 10);
+testExpression(coll, {$mod: [10, -Infinity]}, 10);
+testExpression(coll, {$mod: [Infinity, 10]}, NaN);
+testExpression(coll, {$mod: [-Infinity, 10]}, NaN);
+testExpression(coll, {$mod: [NaN, 10]}, NaN);
})();
diff --git a/jstests/aggregation/expressions/expression_trigonometric.js b/jstests/aggregation/expressions/expression_trigonometric.js
index 192e9743b62..468c6bccef3 100644
--- a/jstests/aggregation/expressions/expression_trigonometric.js
+++ b/jstests/aggregation/expressions/expression_trigonometric.js
@@ -1,254 +1,253 @@
// SERVER-32930: Basic integration tests for trigonometric aggregation expressions.
(function() {
- "use strict";
- // For assertErrorCode.
- load("jstests/aggregation/extras/utils.js");
-
- const coll = db.expression_trigonometric;
- coll.drop();
- // We need at least one document in the collection in order to test expressions, add it here.
- assert.commandWorked(coll.insert({}));
-
- // Helper for testing that op returns expResult.
- function testOp(op, expResult) {
- const pipeline = [{$project: {_id: 0, result: op}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{result: expResult}]);
- }
-
- // Helper for testing that the aggregation expression 'op' returns expResult, approximately,
- // since NumberDecimal has so many representations for a given number (0 versus 0e-40 for
- // instance).
- function testOpApprox(op, expResult) {
- const pipeline = [{$project: {_id: 0, result: {$abs: {$subtract: [op, expResult]}}}}];
- assert.lt(coll.aggregate(pipeline).toArray(), [{result: NumberDecimal("0.00000005")}]);
- }
-
- // Simple successful int input.
- testOp({$acos: NumberInt(1)}, 0);
- testOp({$acosh: NumberInt(1)}, 0);
- testOp({$asin: NumberInt(0)}, 0);
- testOp({$asinh: NumberInt(0)}, 0);
- testOp({$atan: NumberInt(0)}, 0);
- testOp({$atan2: [NumberInt(0), NumberInt(1)]}, 0);
- testOp({$atan2: [NumberInt(0), NumberInt(0)]}, 0);
- testOp({$atanh: NumberInt(0)}, 0);
- testOp({$cos: NumberInt(0)}, 1);
- testOp({$cosh: NumberInt(0)}, 1);
- testOp({$sin: NumberInt(0)}, 0);
- testOp({$sinh: NumberInt(0)}, 0);
- testOp({$tan: NumberInt(0)}, 0);
- testOp({$tanh: NumberInt(0)}, 0);
- testOp({$degreesToRadians: NumberInt(0)}, 0);
- testOp({$radiansToDegrees: NumberInt(0)}, 0);
-
- // Simple successful long input.
- testOp({$acos: NumberLong(1)}, 0);
- testOp({$acosh: NumberLong(1)}, 0);
- testOp({$asin: NumberLong(0)}, 0);
- testOp({$asinh: NumberLong(0)}, 0);
- testOp({$atan: NumberLong(0)}, 0);
- testOp({$atan2: [NumberLong(0), NumberLong(1)]}, 0);
- testOp({$atan2: [NumberLong(0), NumberLong(0)]}, 0);
- testOp({$atanh: NumberLong(0)}, 0);
- testOp({$cos: NumberLong(0)}, 1);
- testOp({$cosh: NumberLong(0)}, 1);
- testOp({$sin: NumberLong(0)}, 0);
- testOp({$sinh: NumberLong(0)}, 0);
- testOp({$tan: NumberLong(0)}, 0);
- testOp({$tanh: NumberLong(0)}, 0);
- testOp({$degreesToRadians: NumberLong(0)}, 0);
- testOp({$radiansToDegrees: NumberLong(0)}, 0);
-
- // Simple successful double input.
- testOp({$acos: 1}, 0);
- testOp({$acosh: 1}, 0);
- testOp({$asin: 0}, 0);
- testOp({$asinh: 0}, 0);
- testOp({$atan: 0}, 0);
- testOp({$atan2: [0, 1]}, 0);
- testOp({$atan2: [0, 0]}, 0);
- testOp({$atanh: 0}, 0);
- testOp({$cos: 0}, 1);
- testOp({$cosh: 0}, 1);
- testOp({$sin: 0}, 0);
- testOp({$sinh: 0}, 0);
- testOp({$tan: 0}, 0);
- testOp({$tanh: 0}, 0);
- testOp({$degreesToRadians: 0}, 0);
- testOp({$radiansToDegrees: 0}, 0);
-
- // Simple successful decimal input.
- testOpApprox({$acos: NumberDecimal(1)}, NumberDecimal(0));
- testOpApprox({$acosh: NumberDecimal(1)}, NumberDecimal(0));
- testOpApprox({$asin: NumberDecimal(0)}, NumberDecimal(0));
- testOpApprox({$asinh: NumberDecimal(0)}, NumberDecimal(0));
- testOpApprox({$atan: NumberDecimal(0)}, NumberDecimal(0));
- testOpApprox({$atan2: [NumberDecimal(0), 1]}, NumberDecimal(0));
- testOpApprox({$atan2: [NumberDecimal(0), 0]}, NumberDecimal(0));
- testOpApprox({$atanh: NumberDecimal(0)}, NumberDecimal(0));
- testOpApprox({$cos: NumberDecimal(0)}, NumberDecimal(1));
- testOpApprox({$cosh: NumberDecimal(0)}, NumberDecimal(1));
- testOpApprox({$sin: NumberDecimal(0)}, NumberDecimal(0));
- testOpApprox({$sinh: NumberDecimal(0)}, NumberDecimal(0));
- testOpApprox({$tan: NumberDecimal(0)}, NumberDecimal(0));
- testOpApprox({$tanh: NumberDecimal(0)}, NumberDecimal(0));
- testOpApprox({$degreesToRadians: NumberDecimal(0)}, NumberDecimal(0));
- testOpApprox({$radiansToDegrees: NumberDecimal(0)}, NumberDecimal(0));
-
- // Infinity input produces out of bounds error.
- assertErrorCode(coll, [{$project: {a: {$acos: -Infinity}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$acos: NumberDecimal('-Infinity')}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$acos: Infinity}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$acos: NumberDecimal('Infinity')}}}], 50989);
-
- assertErrorCode(coll, [{$project: {a: {$acosh: -Infinity}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$acosh: NumberDecimal('-Infinity')}}}], 50989);
-
- assertErrorCode(coll, [{$project: {a: {$asin: -Infinity}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$asin: NumberDecimal('-Infinity')}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$asin: Infinity}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$asin: NumberDecimal('Infinity')}}}], 50989);
-
- assertErrorCode(coll, [{$project: {a: {$atanh: -Infinity}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$atanh: NumberDecimal('-Infinity')}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$atanh: Infinity}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$atanh: NumberDecimal('Infinity')}}}], 50989);
-
- assertErrorCode(coll, [{$project: {a: {$cos: -Infinity}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$cos: NumberDecimal('-Infinity')}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$cos: Infinity}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$cos: NumberDecimal('Infinity')}}}], 50989);
-
- assertErrorCode(coll, [{$project: {a: {$sin: -Infinity}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$sin: NumberDecimal('-Infinity')}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$sin: Infinity}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$sin: NumberDecimal('Infinity')}}}], 50989);
-
- assertErrorCode(coll, [{$project: {a: {$tan: -Infinity}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$tan: NumberDecimal('-Infinity')}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$tan: Infinity}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$tan: NumberDecimal('Infinity')}}}], 50989);
-
- // Infinity input produces Infinity as output.
- testOp({$acosh: NumberDecimal('Infinity')}, NumberDecimal('Infinity'));
- testOp({$acosh: Infinity}, Infinity);
-
- testOp({$asinh: NumberDecimal('Infinity')}, NumberDecimal('Infinity'));
- testOp({$asinh: NumberDecimal('-Infinity')}, NumberDecimal('-Infinity'));
- testOp({$asinh: Infinity}, Infinity);
- testOp({$asinh: -Infinity}, -Infinity);
- testOp({$cosh: NumberDecimal('Infinity')}, NumberDecimal('Infinity'));
- testOp({$cosh: NumberDecimal('-Infinity')}, NumberDecimal('Infinity'));
- testOp({$cosh: Infinity}, Infinity);
- testOp({$cosh: -Infinity}, Infinity);
- testOp({$sinh: NumberDecimal('Infinity')}, NumberDecimal('Infinity'));
- testOp({$sinh: NumberDecimal('-Infinity')}, NumberDecimal('-Infinity'));
- testOp({$sinh: Infinity}, Infinity);
- testOp({$sinh: -Infinity}, -Infinity);
-
- // Infinity produces finite output (due to asymptotic bounds).
- testOpApprox({$atan: NumberDecimal('Infinity')}, NumberDecimal(Math.PI / 2));
- testOpApprox({$atan: NumberDecimal('-Infinity')}, NumberDecimal(Math.Pi / 2));
- testOpApprox({$atan: Infinity}, Math.PI / 2);
- testOpApprox({$atan: -Infinity}, -Math.PI / 2);
-
- testOpApprox({$atan2: [NumberDecimal('Infinity'), 0]}, NumberDecimal(Math.PI / 2));
- testOpApprox({$atan2: [NumberDecimal('-Infinity'), 0]}, NumberDecimal(-Math.PI / 2));
- testOpApprox({$atan2: [NumberDecimal('-Infinity'), NumberDecimal("Infinity")]},
- NumberDecimal(-Math.PI / 4));
- testOpApprox({$atan2: [NumberDecimal('-Infinity'), NumberDecimal("-Infinity")]},
- NumberDecimal(-3 * Math.PI / 4));
- testOpApprox({$atan2: [NumberDecimal('0'), NumberDecimal("-Infinity")]},
- NumberDecimal(Math.PI));
- testOpApprox({$atan2: [NumberDecimal('0'), NumberDecimal("Infinity")]}, NumberDecimal(0));
-
- testOp({$tanh: NumberDecimal('Infinity')}, NumberDecimal('1'));
- testOp({$tanh: NumberDecimal('-Infinity')}, NumberDecimal('-1'));
-
- // Finite input produces infinite outputs.
- testOp({$atanh: NumberDecimal(1)}, NumberDecimal('Infinity'));
- testOp({$atanh: NumberDecimal(-1)}, NumberDecimal('-Infinity'));
- testOp({$atanh: 1}, Infinity);
- testOp({$atanh: -1}, -Infinity);
-
- testOp({$tanh: Infinity}, 1);
- testOp({$tanh: -Infinity}, -1);
-
- // Int argument out of bounds.
- assertErrorCode(coll, [{$project: {a: {$acos: NumberInt(-2)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$acos: NumberInt(2)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$asin: NumberInt(-2)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$asin: NumberInt(2)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$acosh: NumberInt(0)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$atanh: NumberInt(2)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$atanh: NumberInt(-2)}}}], 50989);
-
- // Long argument out of bounds.
- assertErrorCode(coll, [{$project: {a: {$acos: NumberLong(-2)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$acos: NumberLong(2)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$asin: NumberLong(-2)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$asin: NumberLong(2)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$acosh: NumberLong(0)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$atanh: NumberLong(2)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$atanh: NumberLong(-2)}}}], 50989);
-
- // Double argument out of bounds.
- assertErrorCode(coll, [{$project: {a: {$acos: -1.1}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$acos: 1.1}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$asin: -1.1}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$asin: 1.1}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$acosh: 0.9}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$atanh: -1.00001}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$atanh: 1.00001}}}], 50989);
-
- // Decimal argument out of bounds.
- assertErrorCode(coll, [{$project: {a: {$acos: NumberDecimal(-1.1)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$acos: NumberDecimal(1.1)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$asin: NumberDecimal(-1.1)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$asin: NumberDecimal(1.1)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$acosh: NumberDecimal(0.9)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$atanh: NumberDecimal(-1.00001)}}}], 50989);
- assertErrorCode(coll, [{$project: {a: {$atanh: NumberDecimal(1.000001)}}}], 50989);
-
- // Check NaN is preserved.
- ["$acos", "$asin", "$atan", "$cos", "$sin", "$tan"].forEach(op => {
- testOp({[op]: NaN}, NaN);
- testOp({[op]: NumberDecimal(NaN)}, NumberDecimal(NaN));
- // Check the hyperbolic version of each function.
- testOp({[op + 'h']: NaN}, NaN);
- testOp({[op + 'h']: NumberDecimal(NaN)}, NumberDecimal(NaN));
- });
-
- ["$radiansToDegrees", "$degreesToRadians"].forEach(op => {
- testOp({[op]: NaN}, NaN);
- testOp({[op]: NumberDecimal(NaN)}, NumberDecimal(NaN));
- testOp({[op]: -Infinity}, -Infinity);
- testOp({[op]: NumberDecimal(-Infinity)}, NumberDecimal(-Infinity));
- testOp({[op]: Infinity}, Infinity);
- testOp({[op]: NumberDecimal(Infinity)}, NumberDecimal(Infinity));
- });
-
- testOp({$atan2: [NumberDecimal('NaN'), NumberDecimal('NaN')]}, NumberDecimal('NaN'));
- testOp({$atan2: [NumberDecimal('NaN'), NumberDecimal('0')]}, NumberDecimal('NaN'));
- testOp({$atan2: [NumberDecimal('0'), NumberDecimal('NaN')]}, NumberDecimal('NaN'));
-
- // Non-numeric input.
- assertErrorCode(coll, [{$project: {a: {$acos: "string"}}}], 28765);
- assertErrorCode(coll, [{$project: {a: {$acosh: "string"}}}], 28765);
- assertErrorCode(coll, [{$project: {a: {$asin: "string"}}}], 28765);
- assertErrorCode(coll, [{$project: {a: {$asinh: "string"}}}], 28765);
- assertErrorCode(coll, [{$project: {a: {$atan: "string"}}}], 28765);
- assertErrorCode(coll, [{$project: {a: {$atan2: ["string", "string"]}}}], 51044);
- assertErrorCode(coll, [{$project: {a: {$atan2: ["string", 0.0]}}}], 51044);
- assertErrorCode(coll, [{$project: {a: {$atan2: [0.0, "string"]}}}], 51045);
- assertErrorCode(coll, [{$project: {a: {$atanh: "string"}}}], 28765);
- assertErrorCode(coll, [{$project: {a: {$cos: "string"}}}], 28765);
- assertErrorCode(coll, [{$project: {a: {$cosh: "string"}}}], 28765);
- assertErrorCode(coll, [{$project: {a: {$sin: "string"}}}], 28765);
- assertErrorCode(coll, [{$project: {a: {$sinh: "string"}}}], 28765);
- assertErrorCode(coll, [{$project: {a: {$tan: "string"}}}], 28765);
- assertErrorCode(coll, [{$project: {a: {$tanh: "string"}}}], 28765);
- assertErrorCode(coll, [{$project: {a: {$degreesToRadians: "string"}}}], 28765);
- assertErrorCode(coll, [{$project: {a: {$radiansToDegrees: "string"}}}], 28765);
+"use strict";
+// For assertErrorCode.
+load("jstests/aggregation/extras/utils.js");
+
+const coll = db.expression_trigonometric;
+coll.drop();
+// We need at least one document in the collection in order to test expressions, add it here.
+assert.commandWorked(coll.insert({}));
+
+// Helper for testing that op returns expResult.
+function testOp(op, expResult) {
+ const pipeline = [{$project: {_id: 0, result: op}}];
+ assert.eq(coll.aggregate(pipeline).toArray(), [{result: expResult}]);
+}
+
+// Helper for testing that the aggregation expression 'op' returns expResult, approximately,
+// since NumberDecimal has so many representations for a given number (0 versus 0e-40 for
+// instance).
+function testOpApprox(op, expResult) {
+ const pipeline = [{$project: {_id: 0, result: {$abs: {$subtract: [op, expResult]}}}}];
+ assert.lt(coll.aggregate(pipeline).toArray(), [{result: NumberDecimal("0.00000005")}]);
+}
+
+// Simple successful int input.
+testOp({$acos: NumberInt(1)}, 0);
+testOp({$acosh: NumberInt(1)}, 0);
+testOp({$asin: NumberInt(0)}, 0);
+testOp({$asinh: NumberInt(0)}, 0);
+testOp({$atan: NumberInt(0)}, 0);
+testOp({$atan2: [NumberInt(0), NumberInt(1)]}, 0);
+testOp({$atan2: [NumberInt(0), NumberInt(0)]}, 0);
+testOp({$atanh: NumberInt(0)}, 0);
+testOp({$cos: NumberInt(0)}, 1);
+testOp({$cosh: NumberInt(0)}, 1);
+testOp({$sin: NumberInt(0)}, 0);
+testOp({$sinh: NumberInt(0)}, 0);
+testOp({$tan: NumberInt(0)}, 0);
+testOp({$tanh: NumberInt(0)}, 0);
+testOp({$degreesToRadians: NumberInt(0)}, 0);
+testOp({$radiansToDegrees: NumberInt(0)}, 0);
+
+// Simple successful long input.
+testOp({$acos: NumberLong(1)}, 0);
+testOp({$acosh: NumberLong(1)}, 0);
+testOp({$asin: NumberLong(0)}, 0);
+testOp({$asinh: NumberLong(0)}, 0);
+testOp({$atan: NumberLong(0)}, 0);
+testOp({$atan2: [NumberLong(0), NumberLong(1)]}, 0);
+testOp({$atan2: [NumberLong(0), NumberLong(0)]}, 0);
+testOp({$atanh: NumberLong(0)}, 0);
+testOp({$cos: NumberLong(0)}, 1);
+testOp({$cosh: NumberLong(0)}, 1);
+testOp({$sin: NumberLong(0)}, 0);
+testOp({$sinh: NumberLong(0)}, 0);
+testOp({$tan: NumberLong(0)}, 0);
+testOp({$tanh: NumberLong(0)}, 0);
+testOp({$degreesToRadians: NumberLong(0)}, 0);
+testOp({$radiansToDegrees: NumberLong(0)}, 0);
+
+// Simple successful double input.
+testOp({$acos: 1}, 0);
+testOp({$acosh: 1}, 0);
+testOp({$asin: 0}, 0);
+testOp({$asinh: 0}, 0);
+testOp({$atan: 0}, 0);
+testOp({$atan2: [0, 1]}, 0);
+testOp({$atan2: [0, 0]}, 0);
+testOp({$atanh: 0}, 0);
+testOp({$cos: 0}, 1);
+testOp({$cosh: 0}, 1);
+testOp({$sin: 0}, 0);
+testOp({$sinh: 0}, 0);
+testOp({$tan: 0}, 0);
+testOp({$tanh: 0}, 0);
+testOp({$degreesToRadians: 0}, 0);
+testOp({$radiansToDegrees: 0}, 0);
+
+// Simple successful decimal input.
+testOpApprox({$acos: NumberDecimal(1)}, NumberDecimal(0));
+testOpApprox({$acosh: NumberDecimal(1)}, NumberDecimal(0));
+testOpApprox({$asin: NumberDecimal(0)}, NumberDecimal(0));
+testOpApprox({$asinh: NumberDecimal(0)}, NumberDecimal(0));
+testOpApprox({$atan: NumberDecimal(0)}, NumberDecimal(0));
+testOpApprox({$atan2: [NumberDecimal(0), 1]}, NumberDecimal(0));
+testOpApprox({$atan2: [NumberDecimal(0), 0]}, NumberDecimal(0));
+testOpApprox({$atanh: NumberDecimal(0)}, NumberDecimal(0));
+testOpApprox({$cos: NumberDecimal(0)}, NumberDecimal(1));
+testOpApprox({$cosh: NumberDecimal(0)}, NumberDecimal(1));
+testOpApprox({$sin: NumberDecimal(0)}, NumberDecimal(0));
+testOpApprox({$sinh: NumberDecimal(0)}, NumberDecimal(0));
+testOpApprox({$tan: NumberDecimal(0)}, NumberDecimal(0));
+testOpApprox({$tanh: NumberDecimal(0)}, NumberDecimal(0));
+testOpApprox({$degreesToRadians: NumberDecimal(0)}, NumberDecimal(0));
+testOpApprox({$radiansToDegrees: NumberDecimal(0)}, NumberDecimal(0));
+
+// Infinity input produces out of bounds error.
+assertErrorCode(coll, [{$project: {a: {$acos: -Infinity}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$acos: NumberDecimal('-Infinity')}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$acos: Infinity}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$acos: NumberDecimal('Infinity')}}}], 50989);
+
+assertErrorCode(coll, [{$project: {a: {$acosh: -Infinity}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$acosh: NumberDecimal('-Infinity')}}}], 50989);
+
+assertErrorCode(coll, [{$project: {a: {$asin: -Infinity}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$asin: NumberDecimal('-Infinity')}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$asin: Infinity}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$asin: NumberDecimal('Infinity')}}}], 50989);
+
+assertErrorCode(coll, [{$project: {a: {$atanh: -Infinity}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$atanh: NumberDecimal('-Infinity')}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$atanh: Infinity}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$atanh: NumberDecimal('Infinity')}}}], 50989);
+
+assertErrorCode(coll, [{$project: {a: {$cos: -Infinity}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$cos: NumberDecimal('-Infinity')}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$cos: Infinity}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$cos: NumberDecimal('Infinity')}}}], 50989);
+
+assertErrorCode(coll, [{$project: {a: {$sin: -Infinity}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$sin: NumberDecimal('-Infinity')}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$sin: Infinity}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$sin: NumberDecimal('Infinity')}}}], 50989);
+
+assertErrorCode(coll, [{$project: {a: {$tan: -Infinity}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$tan: NumberDecimal('-Infinity')}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$tan: Infinity}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$tan: NumberDecimal('Infinity')}}}], 50989);
+
+// Infinity input produces Infinity as output.
+testOp({$acosh: NumberDecimal('Infinity')}, NumberDecimal('Infinity'));
+testOp({$acosh: Infinity}, Infinity);
+
+testOp({$asinh: NumberDecimal('Infinity')}, NumberDecimal('Infinity'));
+testOp({$asinh: NumberDecimal('-Infinity')}, NumberDecimal('-Infinity'));
+testOp({$asinh: Infinity}, Infinity);
+testOp({$asinh: -Infinity}, -Infinity);
+testOp({$cosh: NumberDecimal('Infinity')}, NumberDecimal('Infinity'));
+testOp({$cosh: NumberDecimal('-Infinity')}, NumberDecimal('Infinity'));
+testOp({$cosh: Infinity}, Infinity);
+testOp({$cosh: -Infinity}, Infinity);
+testOp({$sinh: NumberDecimal('Infinity')}, NumberDecimal('Infinity'));
+testOp({$sinh: NumberDecimal('-Infinity')}, NumberDecimal('-Infinity'));
+testOp({$sinh: Infinity}, Infinity);
+testOp({$sinh: -Infinity}, -Infinity);
+
+// Infinity produces finite output (due to asymptotic bounds).
+testOpApprox({$atan: NumberDecimal('Infinity')}, NumberDecimal(Math.PI / 2));
+testOpApprox({$atan: NumberDecimal('-Infinity')}, NumberDecimal(Math.Pi / 2));
+testOpApprox({$atan: Infinity}, Math.PI / 2);
+testOpApprox({$atan: -Infinity}, -Math.PI / 2);
+
+testOpApprox({$atan2: [NumberDecimal('Infinity'), 0]}, NumberDecimal(Math.PI / 2));
+testOpApprox({$atan2: [NumberDecimal('-Infinity'), 0]}, NumberDecimal(-Math.PI / 2));
+testOpApprox({$atan2: [NumberDecimal('-Infinity'), NumberDecimal("Infinity")]},
+ NumberDecimal(-Math.PI / 4));
+testOpApprox({$atan2: [NumberDecimal('-Infinity'), NumberDecimal("-Infinity")]},
+ NumberDecimal(-3 * Math.PI / 4));
+testOpApprox({$atan2: [NumberDecimal('0'), NumberDecimal("-Infinity")]}, NumberDecimal(Math.PI));
+testOpApprox({$atan2: [NumberDecimal('0'), NumberDecimal("Infinity")]}, NumberDecimal(0));
+
+testOp({$tanh: NumberDecimal('Infinity')}, NumberDecimal('1'));
+testOp({$tanh: NumberDecimal('-Infinity')}, NumberDecimal('-1'));
+
+// Finite input produces infinite outputs.
+testOp({$atanh: NumberDecimal(1)}, NumberDecimal('Infinity'));
+testOp({$atanh: NumberDecimal(-1)}, NumberDecimal('-Infinity'));
+testOp({$atanh: 1}, Infinity);
+testOp({$atanh: -1}, -Infinity);
+
+testOp({$tanh: Infinity}, 1);
+testOp({$tanh: -Infinity}, -1);
+
+// Int argument out of bounds.
+assertErrorCode(coll, [{$project: {a: {$acos: NumberInt(-2)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$acos: NumberInt(2)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$asin: NumberInt(-2)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$asin: NumberInt(2)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$acosh: NumberInt(0)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$atanh: NumberInt(2)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$atanh: NumberInt(-2)}}}], 50989);
+
+// Long argument out of bounds.
+assertErrorCode(coll, [{$project: {a: {$acos: NumberLong(-2)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$acos: NumberLong(2)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$asin: NumberLong(-2)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$asin: NumberLong(2)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$acosh: NumberLong(0)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$atanh: NumberLong(2)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$atanh: NumberLong(-2)}}}], 50989);
+
+// Double argument out of bounds.
+assertErrorCode(coll, [{$project: {a: {$acos: -1.1}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$acos: 1.1}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$asin: -1.1}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$asin: 1.1}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$acosh: 0.9}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$atanh: -1.00001}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$atanh: 1.00001}}}], 50989);
+
+// Decimal argument out of bounds.
+assertErrorCode(coll, [{$project: {a: {$acos: NumberDecimal(-1.1)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$acos: NumberDecimal(1.1)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$asin: NumberDecimal(-1.1)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$asin: NumberDecimal(1.1)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$acosh: NumberDecimal(0.9)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$atanh: NumberDecimal(-1.00001)}}}], 50989);
+assertErrorCode(coll, [{$project: {a: {$atanh: NumberDecimal(1.000001)}}}], 50989);
+
+// Check NaN is preserved.
+["$acos", "$asin", "$atan", "$cos", "$sin", "$tan"].forEach(op => {
+ testOp({[op]: NaN}, NaN);
+ testOp({[op]: NumberDecimal(NaN)}, NumberDecimal(NaN));
+ // Check the hyperbolic version of each function.
+ testOp({[op + 'h']: NaN}, NaN);
+ testOp({[op + 'h']: NumberDecimal(NaN)}, NumberDecimal(NaN));
+});
+
+["$radiansToDegrees", "$degreesToRadians"].forEach(op => {
+ testOp({[op]: NaN}, NaN);
+ testOp({[op]: NumberDecimal(NaN)}, NumberDecimal(NaN));
+ testOp({[op]: -Infinity}, -Infinity);
+ testOp({[op]: NumberDecimal(-Infinity)}, NumberDecimal(-Infinity));
+ testOp({[op]: Infinity}, Infinity);
+ testOp({[op]: NumberDecimal(Infinity)}, NumberDecimal(Infinity));
+});
+
+testOp({$atan2: [NumberDecimal('NaN'), NumberDecimal('NaN')]}, NumberDecimal('NaN'));
+testOp({$atan2: [NumberDecimal('NaN'), NumberDecimal('0')]}, NumberDecimal('NaN'));
+testOp({$atan2: [NumberDecimal('0'), NumberDecimal('NaN')]}, NumberDecimal('NaN'));
+
+// Non-numeric input.
+assertErrorCode(coll, [{$project: {a: {$acos: "string"}}}], 28765);
+assertErrorCode(coll, [{$project: {a: {$acosh: "string"}}}], 28765);
+assertErrorCode(coll, [{$project: {a: {$asin: "string"}}}], 28765);
+assertErrorCode(coll, [{$project: {a: {$asinh: "string"}}}], 28765);
+assertErrorCode(coll, [{$project: {a: {$atan: "string"}}}], 28765);
+assertErrorCode(coll, [{$project: {a: {$atan2: ["string", "string"]}}}], 51044);
+assertErrorCode(coll, [{$project: {a: {$atan2: ["string", 0.0]}}}], 51044);
+assertErrorCode(coll, [{$project: {a: {$atan2: [0.0, "string"]}}}], 51045);
+assertErrorCode(coll, [{$project: {a: {$atanh: "string"}}}], 28765);
+assertErrorCode(coll, [{$project: {a: {$cos: "string"}}}], 28765);
+assertErrorCode(coll, [{$project: {a: {$cosh: "string"}}}], 28765);
+assertErrorCode(coll, [{$project: {a: {$sin: "string"}}}], 28765);
+assertErrorCode(coll, [{$project: {a: {$sinh: "string"}}}], 28765);
+assertErrorCode(coll, [{$project: {a: {$tan: "string"}}}], 28765);
+assertErrorCode(coll, [{$project: {a: {$tanh: "string"}}}], 28765);
+assertErrorCode(coll, [{$project: {a: {$degreesToRadians: "string"}}}], 28765);
+assertErrorCode(coll, [{$project: {a: {$radiansToDegrees: "string"}}}], 28765);
}());
diff --git a/jstests/aggregation/expressions/floor_ceil.js b/jstests/aggregation/expressions/floor_ceil.js
index def7c4de59e..1b4830d0d0d 100644
--- a/jstests/aggregation/expressions/floor_ceil.js
+++ b/jstests/aggregation/expressions/floor_ceil.js
@@ -1,41 +1,41 @@
// The following are integration tests for $floor and $ceil.
(function() {
- "use strict";
+"use strict";
- // For assertErrorCode.
- load("jstests/aggregation/extras/utils.js");
+// For assertErrorCode.
+load("jstests/aggregation/extras/utils.js");
- var coll = db.server19548;
- coll.drop();
- // We need at least one document in the collection in order to test expressions, add it here.
- assert.commandWorked(coll.insert({}));
+var coll = db.server19548;
+coll.drop();
+// We need at least one document in the collection in order to test expressions, add it here.
+assert.commandWorked(coll.insert({}));
- // Helper for testing that op returns expResult.
- function testOp(op, expResult) {
- var pipeline = [{$project: {_id: 0, result: op}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{result: expResult}]);
- }
+// Helper for testing that op returns expResult.
+function testOp(op, expResult) {
+ var pipeline = [{$project: {_id: 0, result: op}}];
+ assert.eq(coll.aggregate(pipeline).toArray(), [{result: expResult}]);
+}
- testOp({$ceil: NumberLong(4)}, NumberLong(4));
- testOp({$ceil: NaN}, NaN);
- testOp({$ceil: Infinity}, Infinity);
- testOp({$ceil: -Infinity}, -Infinity);
- testOp({$ceil: null}, null);
- testOp({$ceil: -2.0}, -2.0);
- testOp({$ceil: 0.9}, 1.0);
- testOp({$ceil: -1.2}, -1.0);
+testOp({$ceil: NumberLong(4)}, NumberLong(4));
+testOp({$ceil: NaN}, NaN);
+testOp({$ceil: Infinity}, Infinity);
+testOp({$ceil: -Infinity}, -Infinity);
+testOp({$ceil: null}, null);
+testOp({$ceil: -2.0}, -2.0);
+testOp({$ceil: 0.9}, 1.0);
+testOp({$ceil: -1.2}, -1.0);
- testOp({$floor: NumberLong(4)}, NumberLong(4));
- testOp({$floor: NaN}, NaN);
- testOp({$floor: Infinity}, Infinity);
- testOp({$floor: -Infinity}, -Infinity);
- testOp({$floor: null}, null);
- testOp({$floor: -2.0}, -2.0);
- testOp({$floor: 0.9}, 0.0);
- testOp({$floor: -1.2}, -2.0);
+testOp({$floor: NumberLong(4)}, NumberLong(4));
+testOp({$floor: NaN}, NaN);
+testOp({$floor: Infinity}, Infinity);
+testOp({$floor: -Infinity}, -Infinity);
+testOp({$floor: null}, null);
+testOp({$floor: -2.0}, -2.0);
+testOp({$floor: 0.9}, 0.0);
+testOp({$floor: -1.2}, -2.0);
- // Non-numeric input.
- assertErrorCode(coll, [{$project: {a: {$ceil: "string"}}}], 28765);
- assertErrorCode(coll, [{$project: {a: {$floor: "string"}}}], 28765);
+// Non-numeric input.
+assertErrorCode(coll, [{$project: {a: {$ceil: "string"}}}], 28765);
+assertErrorCode(coll, [{$project: {a: {$floor: "string"}}}], 28765);
}());
diff --git a/jstests/aggregation/expressions/in.js b/jstests/aggregation/expressions/in.js
index 7c91313e081..63ba02f1b4e 100644
--- a/jstests/aggregation/expressions/in.js
+++ b/jstests/aggregation/expressions/in.js
@@ -4,205 +4,204 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
(function() {
- "use strict";
+"use strict";
- const caseInsensitive = {locale: "en_US", strength: 2};
- var coll = db.in ;
- coll.drop();
-
- function testExpression(options) {
- coll.drop();
- testExpressionInternal(options);
- }
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
+var coll = db.in;
+coll.drop();
- function testExpressionHashIndex(options) {
- coll.drop();
- assert.commandWorked(coll.createIndex({elementField: "hashed"}));
- testExpressionInternal(options);
- }
+function testExpression(options) {
+ coll.drop();
+ testExpressionInternal(options);
+}
- function testExpressionCollectionCollation(options, collationSpec) {
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: collationSpec}));
- testExpressionInternal(options);
- }
+function testExpressionHashIndex(options) {
+ coll.drop();
+ assert.commandWorked(coll.createIndex({elementField: "hashed"}));
+ testExpressionInternal(options);
+}
- function testExpressionInternal(options) {
- var pipeline = {$project: {included: {$in: ["$elementField", {$literal: options.array}]}}};
- assert.writeOK(coll.insert({elementField: options.element}));
- var res = coll.aggregate(pipeline).toArray();
- assert.eq(res.length, 1);
- assert.eq(res[0].included, options.elementIsIncluded);
-
- if (options.queryFormShouldBeEquivalent) {
- var query = {elementField: {$in: options.array}};
- res = coll.find(query).toArray();
-
- if (options.elementIsIncluded) {
- assert.eq(res.length, 1);
- } else {
- assert.eq(res.length, 0);
- }
+function testExpressionCollectionCollation(options, collationSpec) {
+ coll.drop();
+ assert.commandWorked(db.createCollection(coll.getName(), {collation: collationSpec}));
+ testExpressionInternal(options);
+}
+
+function testExpressionInternal(options) {
+ var pipeline = {$project: {included: {$in: ["$elementField", {$literal: options.array}]}}};
+ assert.writeOK(coll.insert({elementField: options.element}));
+ var res = coll.aggregate(pipeline).toArray();
+ assert.eq(res.length, 1);
+ assert.eq(res[0].included, options.elementIsIncluded);
+
+ if (options.queryFormShouldBeEquivalent) {
+ var query = {elementField: {$in: options.array}};
+ res = coll.find(query).toArray();
+
+ if (options.elementIsIncluded) {
+ assert.eq(res.length, 1);
+ } else {
+ assert.eq(res.length, 0);
}
}
-
- testExpression(
- {element: 1, array: [1, 2, 3], elementIsIncluded: true, queryFormShouldBeEquivalent: true});
-
- testExpression({
- element: "A",
- array: ["a", "A", "a"],
- elementIsIncluded: true,
- queryFormShouldBeEquivalent: true
- });
-
- testExpression({
- element: {a: 1},
- array: [{b: 1}, 2],
- elementIsIncluded: false,
- queryFormShouldBeEquivalent: true
- });
-
- testExpression({
- element: {a: 1},
- array: [{a: 1}],
- elementIsIncluded: true,
- queryFormShouldBeEquivalent: true
- });
-
- testExpression({
- element: [1, 2],
- array: [[2, 1]],
- elementIsIncluded: false,
- queryFormShouldBeEquivalent: true
- });
-
- testExpression({
- element: [1, 2],
- array: [[1, 2]],
- elementIsIncluded: true,
- queryFormShouldBeEquivalent: true
- });
-
- // Test $in with duplicated target element.
- testExpression({
- element: 7,
- array: [3, 5, 7, 7, 9],
- elementIsIncluded: true,
- queryFormShouldBeEquivalent: true
- });
-
- // Test $in with other element within array duplicated.
- testExpression({
- element: 7,
- array: [3, 5, 7, 9, 9],
- elementIsIncluded: true,
- queryFormShouldBeEquivalent: true
- });
-
- // Test $in on unsorted array.
- testExpression({
- element: 7,
- array: [3, 10, 5, 7, 8, 9],
- elementIsIncluded: true,
- queryFormShouldBeEquivalent: true
- });
-
- // Test matching $in on unsorted array with duplicates.
- testExpression({
- element: 7,
- array: [7, 10, 7, 10, 2, 5, 3, 7],
- elementIsIncluded: true,
- queryFormShouldBeEquivalent: true
- });
-
- // Test non-matching $in on unsorted array with duplicates.
- testExpression({
- element: 8,
- array: [10, 7, 2, 5, 3],
- elementIsIncluded: false,
- queryFormShouldBeEquivalent: true
- });
-
- // Test $in with success due to collation on source collection.
- testExpressionCollectionCollation({
- element: "abcd",
- array: ["aBcD", "ABCD"],
- elementIsIncluded: true,
- queryFormShouldBeEquivalent: true
- },
- caseInsensitive);
-
- // Test $in with a source collection that has a hash index on the relevant field.
- testExpressionHashIndex({
- element: 5,
- array: [10, 7, 2, 5, 3],
- elementIsIncluded: true,
- queryFormShouldBeEquivalent: true
- });
-
- testExpression(
- {element: 1, array: [], elementIsIncluded: false, queryFormShouldBeEquivalent: true});
-
- // Aggregation's $in has parity with query's $in except with regexes matching string values and
- // equality semantics with array values.
-
- testExpression({
- element: "abc",
- array: [/a/, /b/, /c/],
- elementIsIncluded: false,
- queryFormShouldBeEquivalent: false
- });
-
- testExpression({
- element: /a/,
- array: ["a", "b", "c"],
- elementIsIncluded: false,
- queryFormShouldBeEquivalent: false
- });
-
- testExpression({
- element: [],
- array: [1, 2, 3],
- elementIsIncluded: false,
- queryFormShouldBeEquivalent: false
- });
-
- testExpression({
- element: [1],
- array: [1, 2, 3],
- elementIsIncluded: false,
- queryFormShouldBeEquivalent: false
- });
-
- testExpression({
- element: [1, 2],
- array: [1, 2, 3],
- elementIsIncluded: false,
- queryFormShouldBeEquivalent: false
- });
-
- coll.drop();
- coll.insert({});
-
- var pipeline = {$project: {included: {$in: [[1, 2], 1]}}};
- assertErrorCode(coll, pipeline, 40081, "$in requires an array as a second argument");
-
- pipeline = {$project: {included: {$in: [1, null]}}};
- assertErrorCode(coll, pipeline, 40081, "$in requires an array as a second argument");
-
- pipeline = {$project: {included: {$in: [1, "$notAField"]}}};
- assertErrorCode(coll, pipeline, 40081, "$in requires an array as a second argument");
-
- pipeline = {$project: {included: {$in: null}}};
- assertErrorCode(coll, pipeline, 16020, "$in requires two arguments");
-
- pipeline = {$project: {included: {$in: [1]}}};
- assertErrorCode(coll, pipeline, 16020, "$in requires two arguments");
-
- pipeline = {$project: {included: {$in: []}}};
- assertErrorCode(coll, pipeline, 16020, "$in requires two arguments");
-
- pipeline = {$project: {included: {$in: [1, 2, 3]}}};
- assertErrorCode(coll, pipeline, 16020, "$in requires two arguments");
+}
+
+testExpression(
+ {element: 1, array: [1, 2, 3], elementIsIncluded: true, queryFormShouldBeEquivalent: true});
+
+testExpression({
+ element: "A",
+ array: ["a", "A", "a"],
+ elementIsIncluded: true,
+ queryFormShouldBeEquivalent: true
+});
+
+testExpression({
+ element: {a: 1},
+ array: [{b: 1}, 2],
+ elementIsIncluded: false,
+ queryFormShouldBeEquivalent: true
+});
+
+testExpression(
+ {element: {a: 1}, array: [{a: 1}], elementIsIncluded: true, queryFormShouldBeEquivalent: true});
+
+testExpression({
+ element: [1, 2],
+ array: [[2, 1]],
+ elementIsIncluded: false,
+ queryFormShouldBeEquivalent: true
+});
+
+testExpression(
+ {element: [1, 2], array: [[1, 2]], elementIsIncluded: true, queryFormShouldBeEquivalent: true});
+
+// Test $in with duplicated target element.
+testExpression({
+ element: 7,
+ array: [3, 5, 7, 7, 9],
+ elementIsIncluded: true,
+ queryFormShouldBeEquivalent: true
+});
+
+// Test $in with other element within array duplicated.
+testExpression({
+ element: 7,
+ array: [3, 5, 7, 9, 9],
+ elementIsIncluded: true,
+ queryFormShouldBeEquivalent: true
+});
+
+// Test $in on unsorted array.
+testExpression({
+ element: 7,
+ array: [3, 10, 5, 7, 8, 9],
+ elementIsIncluded: true,
+ queryFormShouldBeEquivalent: true
+});
+
+// Test matching $in on unsorted array with duplicates.
+testExpression({
+ element: 7,
+ array: [7, 10, 7, 10, 2, 5, 3, 7],
+ elementIsIncluded: true,
+ queryFormShouldBeEquivalent: true
+});
+
+// Test non-matching $in on unsorted array with duplicates.
+testExpression({
+ element: 8,
+ array: [10, 7, 2, 5, 3],
+ elementIsIncluded: false,
+ queryFormShouldBeEquivalent: true
+});
+
+// Test $in with success due to collation on source collection.
+testExpressionCollectionCollation({
+ element: "abcd",
+ array: ["aBcD", "ABCD"],
+ elementIsIncluded: true,
+ queryFormShouldBeEquivalent: true
+},
+ caseInsensitive);
+
+// Test $in with a source collection that has a hash index on the relevant field.
+testExpressionHashIndex({
+ element: 5,
+ array: [10, 7, 2, 5, 3],
+ elementIsIncluded: true,
+ queryFormShouldBeEquivalent: true
+});
+
+testExpression(
+ {element: 1, array: [], elementIsIncluded: false, queryFormShouldBeEquivalent: true});
+
+// Aggregation's $in has parity with query's $in except with regexes matching string values and
+// equality semantics with array values.
+
+testExpression({
+ element: "abc",
+ array: [/a/, /b/, /c/],
+ elementIsIncluded: false,
+ queryFormShouldBeEquivalent: false
+});
+
+testExpression({
+ element: /a/,
+ array: ["a", "b", "c"],
+ elementIsIncluded: false,
+ queryFormShouldBeEquivalent: false
+});
+
+testExpression(
+ {element: [], array: [1, 2, 3], elementIsIncluded: false, queryFormShouldBeEquivalent: false});
+
+testExpression(
+ {element: [1], array: [1, 2, 3], elementIsIncluded: false, queryFormShouldBeEquivalent: false});
+
+testExpression({
+ element: [1, 2],
+ array: [1, 2, 3],
+ elementIsIncluded: false,
+ queryFormShouldBeEquivalent: false
+});
+
+coll.drop();
+coll.insert({});
+
+var pipeline = {$project: {included: {$in: [[1, 2], 1]}}};
+assertErrorCode(coll, pipeline, 40081, "$in requires an array as a second argument");
+
+pipeline = {
+ $project: {included: {$in: [1, null]}}
+};
+assertErrorCode(coll, pipeline, 40081, "$in requires an array as a second argument");
+
+pipeline = {
+ $project: {included: {$in: [1, "$notAField"]}}
+};
+assertErrorCode(coll, pipeline, 40081, "$in requires an array as a second argument");
+
+pipeline = {
+ $project: {included: {$in: null}}
+};
+assertErrorCode(coll, pipeline, 16020, "$in requires two arguments");
+
+pipeline = {
+ $project: {included: {$in: [1]}}
+};
+assertErrorCode(coll, pipeline, 16020, "$in requires two arguments");
+
+pipeline = {
+ $project: {included: {$in: []}}
+};
+assertErrorCode(coll, pipeline, 16020, "$in requires two arguments");
+
+pipeline = {
+ $project: {included: {$in: [1, 2, 3]}}
+};
+assertErrorCode(coll, pipeline, 16020, "$in requires two arguments");
}());
diff --git a/jstests/aggregation/expressions/indexof_array.js b/jstests/aggregation/expressions/indexof_array.js
index 3fb445e5066..a32376b1f9d 100644
--- a/jstests/aggregation/expressions/indexof_array.js
+++ b/jstests/aggregation/expressions/indexof_array.js
@@ -3,58 +3,66 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExpression.
(function() {
- "use strict";
+"use strict";
- var coll = db.indexofarray;
- coll.drop();
+var coll = db.indexofarray;
+coll.drop();
- // Insert a dummy document to ensure something flows through the pipeline.
- assert.writeOK(coll.insert({}));
+// Insert a dummy document to ensure something flows through the pipeline.
+assert.writeOK(coll.insert({}));
- testExpression(coll, {$indexOfArray: [[1, 2, 3], 2]}, 1);
+testExpression(coll, {$indexOfArray: [[1, 2, 3], 2]}, 1);
- testExpression(coll, {$indexOfArray: [[1, 2, 3], 4]}, -1);
+testExpression(coll, {$indexOfArray: [[1, 2, 3], 4]}, -1);
- testExpression(coll, {$indexOfArray: [[1, 2, 3, 2, 1], 2, 2]}, 3);
+testExpression(coll, {$indexOfArray: [[1, 2, 3, 2, 1], 2, 2]}, 3);
- testExpression(coll, {$indexOfArray: [[1, 2, 3, 4, 5], 4, 0, 3]}, -1);
+testExpression(coll, {$indexOfArray: [[1, 2, 3, 4, 5], 4, 0, 3]}, -1);
- testExpression(coll, {$indexOfArray: [[1, 2, 3], 2, 1]}, 1);
+testExpression(coll, {$indexOfArray: [[1, 2, 3], 2, 1]}, 1);
- testExpression(coll, {$indexOfArray: [[1, 2, 3], 2, 0, 10]}, 1);
+testExpression(coll, {$indexOfArray: [[1, 2, 3], 2, 0, 10]}, 1);
- testExpression(coll, {$indexOfArray: [[1, 2, 3, 2, 1, 2, 3], 2, 2, 4]}, 3);
+testExpression(coll, {$indexOfArray: [[1, 2, 3, 2, 1, 2, 3], 2, 2, 4]}, 3);
- testExpression(coll, {$indexOfArray: [null, 2]}, null);
+testExpression(coll, {$indexOfArray: [null, 2]}, null);
- testExpression(coll, {$indexOfArray: [[1, 2, 3], 2, 3]}, -1);
+testExpression(coll, {$indexOfArray: [[1, 2, 3], 2, 3]}, -1);
- testExpression(coll, {$indexOfArray: [[1, 2, 3], 2, 3, 1]}, -1);
+testExpression(coll, {$indexOfArray: [[1, 2, 3], 2, 3, 1]}, -1);
- testExpression(coll, {$indexOfArray: [[1, 2, 3], 2, 3, 3]}, -1);
+testExpression(coll, {$indexOfArray: [[1, 2, 3], 2, 3, 3]}, -1);
- testExpression(coll, {$indexOfArray: [[1, 2, 3], 2, 3, 5]}, -1);
+testExpression(coll, {$indexOfArray: [[1, 2, 3], 2, 3, 5]}, -1);
- testExpression(coll, {$indexOfArray: [[], 1]}, -1);
+testExpression(coll, {$indexOfArray: [[], 1]}, -1);
- var pipeline = {
- $project: {
- output: {
- $indexOfArray: ["string", "s"],
- }
+var pipeline = {
+ $project: {
+ output: {
+ $indexOfArray: ["string", "s"],
}
- };
- assertErrorCode(coll, pipeline, 40090);
-
- pipeline = {$project: {output: {$indexOfArray: [[1, 2, 3], 2, "bad"]}}};
- assertErrorCode(coll, pipeline, 40096);
-
- pipeline = {$project: {output: {$indexOfArray: [[1, 2, 3], 2, 0, "bad"]}}};
- assertErrorCode(coll, pipeline, 40096);
-
- pipeline = {$project: {output: {$indexOfArray: [[1, 2, 3], 2, -1]}}};
- assertErrorCode(coll, pipeline, 40097);
-
- pipeline = {$project: {output: {$indexOfArray: [[1, 2, 3], 2, 1, -1]}}};
- assertErrorCode(coll, pipeline, 40097);
+ }
+};
+assertErrorCode(coll, pipeline, 40090);
+
+pipeline = {
+ $project: {output: {$indexOfArray: [[1, 2, 3], 2, "bad"]}}
+};
+assertErrorCode(coll, pipeline, 40096);
+
+pipeline = {
+ $project: {output: {$indexOfArray: [[1, 2, 3], 2, 0, "bad"]}}
+};
+assertErrorCode(coll, pipeline, 40096);
+
+pipeline = {
+ $project: {output: {$indexOfArray: [[1, 2, 3], 2, -1]}}
+};
+assertErrorCode(coll, pipeline, 40097);
+
+pipeline = {
+ $project: {output: {$indexOfArray: [[1, 2, 3], 2, 1, -1]}}
+};
+assertErrorCode(coll, pipeline, 40097);
}());
diff --git a/jstests/aggregation/expressions/indexof_bytes.js b/jstests/aggregation/expressions/indexof_bytes.js
index d484ad50948..14bcead5293 100644
--- a/jstests/aggregation/expressions/indexof_bytes.js
+++ b/jstests/aggregation/expressions/indexof_bytes.js
@@ -3,137 +3,145 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExpression.
(function() {
- "use strict";
-
- function testExpressionBytes(coll, expression, result, shouldTestEquivalence = true) {
- testExpression(coll, expression, result);
-
- if (shouldTestEquivalence) {
- // If we are specifying a starting or ending index for the search, we should be able to
- // achieve equivalent behavior using $substrBytes.
- var indexOfSpec = expression["$indexOfBytes"];
- var input = indexOfSpec[0];
- var token = indexOfSpec[1];
- var start = indexOfSpec.length > 2 ? indexOfSpec[2] : 0;
- // Use $strLenBytes because JavaScript's length property is based off of UTF-16, not the
- // actual number of bytes.
- var end = indexOfSpec.length > 3 ? indexOfSpec[3] : {$strLenBytes: input};
-
- var substrExpr = {
- $indexOfBytes: [{$substrBytes: [input, start, {$subtract: [end, start]}]}, token]
- };
-
- // Since the new expression takes the index with respect to a shortened string, the
- // output index will differ from the index with respect to the full length string,
- // unless the output is -1.
- var substrResult = (result === -1) ? -1 : result - start;
-
- testExpression(coll, substrExpr, substrResult);
- }
+"use strict";
+
+function testExpressionBytes(coll, expression, result, shouldTestEquivalence = true) {
+ testExpression(coll, expression, result);
+
+ if (shouldTestEquivalence) {
+ // If we are specifying a starting or ending index for the search, we should be able to
+ // achieve equivalent behavior using $substrBytes.
+ var indexOfSpec = expression["$indexOfBytes"];
+ var input = indexOfSpec[0];
+ var token = indexOfSpec[1];
+ var start = indexOfSpec.length > 2 ? indexOfSpec[2] : 0;
+ // Use $strLenBytes because JavaScript's length property is based off of UTF-16, not the
+ // actual number of bytes.
+ var end = indexOfSpec.length > 3 ? indexOfSpec[3] : {$strLenBytes: input};
+
+ var substrExpr = {
+ $indexOfBytes: [{$substrBytes: [input, start, {$subtract: [end, start]}]}, token]
+ };
+
+ // Since the new expression takes the index with respect to a shortened string, the
+ // output index will differ from the index with respect to the full length string,
+ // unless the output is -1.
+ var substrResult = (result === -1) ? -1 : result - start;
+
+ testExpression(coll, substrExpr, substrResult);
}
+}
- var coll = db.indexofbytes;
- coll.drop();
+var coll = db.indexofbytes;
+coll.drop();
- // Insert a dummy document so something flows through the pipeline.
- assert.writeOK(coll.insert({}));
+// Insert a dummy document so something flows through the pipeline.
+assert.writeOK(coll.insert({}));
- testExpressionBytes(coll, {$indexOfBytes: ["abc", "b"]}, 1);
+testExpressionBytes(coll, {$indexOfBytes: ["abc", "b"]}, 1);
- testExpressionBytes(coll, {$indexOfBytes: ["abcba", "b"]}, 1);
+testExpressionBytes(coll, {$indexOfBytes: ["abcba", "b"]}, 1);
- testExpressionBytes(coll, {$indexOfBytes: ["abc", "d"]}, -1);
+testExpressionBytes(coll, {$indexOfBytes: ["abc", "d"]}, -1);
- testExpressionBytes(coll, {$indexOfBytes: ["abcba", "b", 2]}, 3);
+testExpressionBytes(coll, {$indexOfBytes: ["abcba", "b", 2]}, 3);
- testExpressionBytes(coll, {$indexOfBytes: ["abcde", "d", 0, 2]}, -1);
+testExpressionBytes(coll, {$indexOfBytes: ["abcde", "d", 0, 2]}, -1);
- testExpressionBytes(coll, {$indexOfBytes: ["abc", "b", 1]}, 1);
+testExpressionBytes(coll, {$indexOfBytes: ["abc", "b", 1]}, 1);
- testExpressionBytes(coll, {$indexOfBytes: ["abc", "b", 0, 10]}, 1);
+testExpressionBytes(coll, {$indexOfBytes: ["abc", "b", 0, 10]}, 1);
- testExpressionBytes(coll, {$indexOfBytes: ["abcbabc", "b", 2, 4]}, 3);
+testExpressionBytes(coll, {$indexOfBytes: ["abcbabc", "b", 2, 4]}, 3);
- // $strLenBytes does not accept null as an input.
- testExpressionBytes(coll, {$indexOfBytes: [null, "b"]}, null, false);
+// $strLenBytes does not accept null as an input.
+testExpressionBytes(coll, {$indexOfBytes: [null, "b"]}, null, false);
- testExpressionBytes(coll, {$indexOfBytes: ["abc", "b", 3]}, -1);
+testExpressionBytes(coll, {$indexOfBytes: ["abc", "b", 3]}, -1);
- testExpressionBytes(coll, {$indexOfBytes: ["abc", "b", 3, 1]}, -1);
+testExpressionBytes(coll, {$indexOfBytes: ["abc", "b", 3, 1]}, -1);
- testExpressionBytes(coll, {$indexOfBytes: ["abc", "b", 3, 5]}, -1);
+testExpressionBytes(coll, {$indexOfBytes: ["abc", "b", 3, 5]}, -1);
- testExpressionBytes(coll, {$indexOfBytes: ["", " "]}, -1);
+testExpressionBytes(coll, {$indexOfBytes: ["", " "]}, -1);
- testExpressionBytes(coll, {$indexOfBytes: [" ", ""]}, 0);
+testExpressionBytes(coll, {$indexOfBytes: [" ", ""]}, 0);
- testExpressionBytes(coll, {$indexOfBytes: ["", ""]}, 0);
+testExpressionBytes(coll, {$indexOfBytes: ["", ""]}, 0);
- testExpressionBytes(coll, {$indexOfBytes: ["abc", "", 3]}, 3);
+testExpressionBytes(coll, {$indexOfBytes: ["abc", "", 3]}, 3);
- testExpressionBytes(coll, {$indexOfBytes: ["abc", "", 1]}, 1);
+testExpressionBytes(coll, {$indexOfBytes: ["abc", "", 1]}, 1);
- // Test with multi-byte tokens.
+// Test with multi-byte tokens.
- testExpressionBytes(coll, {$indexOfBytes: ["abcde", "de"]}, 3);
+testExpressionBytes(coll, {$indexOfBytes: ["abcde", "de"]}, 3);
- testExpressionBytes(coll, {$indexOfBytes: ["abcde", "def"]}, -1);
+testExpressionBytes(coll, {$indexOfBytes: ["abcde", "def"]}, -1);
- // Test with non-ASCII characters. Some tests do not test equivalence using $substrBytes because
- // $substrBytes disallows taking a substring that begins or ends in the middle of a UTF-8
- // encoding of a character.
- testExpressionBytes(coll, {$indexOfBytes: ["a∫∫b", "b"]}, 7);
+// Test with non-ASCII characters. Some tests do not test equivalence using $substrBytes because
+// $substrBytes disallows taking a substring that begins or ends in the middle of a UTF-8
+// encoding of a character.
+testExpressionBytes(coll, {$indexOfBytes: ["a∫∫b", "b"]}, 7);
- // $substrBytes would attempt to take the substring from the middle of a UTF-8
- // encoding of a character.
- testExpressionBytes(coll, {$indexOfBytes: ["a∫∫b", "b", 6]}, 7, false);
+// $substrBytes would attempt to take the substring from the middle of a UTF-8
+// encoding of a character.
+testExpressionBytes(coll, {$indexOfBytes: ["a∫∫b", "b", 6]}, 7, false);
- testExpressionBytes(coll, {$indexOfBytes: ["abc∫ba", "∫"]}, 3);
+testExpressionBytes(coll, {$indexOfBytes: ["abc∫ba", "∫"]}, 3);
- testExpressionBytes(coll, {$indexOfBytes: ["∫∫∫", "a"]}, -1);
+testExpressionBytes(coll, {$indexOfBytes: ["∫∫∫", "a"]}, -1);
- // $substrBytes would attempt to take the substring from the middle of a UTF-8
- // encoding of a character.
- testExpressionBytes(coll, {$indexOfBytes: ["ab∫c", "c", 0, 3]}, -1, false);
+// $substrBytes would attempt to take the substring from the middle of a UTF-8
+// encoding of a character.
+testExpressionBytes(coll, {$indexOfBytes: ["ab∫c", "c", 0, 3]}, -1, false);
- testExpressionBytes(coll, {$indexOfBytes: ["abc∫b∫", "b∫"]}, 6);
+testExpressionBytes(coll, {$indexOfBytes: ["abc∫b∫", "b∫"]}, 6);
- // Test with embedded null bytes.
- testExpressionBytes(coll, {$indexOfBytes: ["abc\0d", "d"]}, 4);
+// Test with embedded null bytes.
+testExpressionBytes(coll, {$indexOfBytes: ["abc\0d", "d"]}, 4);
- testExpressionBytes(coll, {$indexOfBytes: ["abc\0", "\0"]}, 3);
+testExpressionBytes(coll, {$indexOfBytes: ["abc\0", "\0"]}, 3);
- testExpressionBytes(coll, {$indexOfBytes: ["abc\0d\0", "d", 5, 6]}, -1);
+testExpressionBytes(coll, {$indexOfBytes: ["abc\0d\0", "d", 5, 6]}, -1);
- // Error cases.
+// Error cases.
- var pipeline = {
- $project: {
- output: {
- $indexOfBytes: [3, "s"],
- }
- }
- };
- assertErrorCode(coll, pipeline, 40091);
-
- pipeline = {
- $project: {
- output: {
- $indexOfBytes: ["s", 3],
- }
+var pipeline = {
+ $project: {
+ output: {
+ $indexOfBytes: [3, "s"],
}
- };
- assertErrorCode(coll, pipeline, 40092);
-
- pipeline = {$project: {output: {$indexOfBytes: ["abc", "b", "bad"]}}};
- assertErrorCode(coll, pipeline, 40096);
-
- pipeline = {$project: {output: {$indexOfBytes: ["abc", "b", 0, "bad"]}}};
- assertErrorCode(coll, pipeline, 40096);
-
- pipeline = {$project: {output: {$indexOfBytes: ["abc", "b", -1]}}};
- assertErrorCode(coll, pipeline, 40097);
+ }
+};
+assertErrorCode(coll, pipeline, 40091);
- pipeline = {$project: {output: {$indexOfBytes: ["abc", "b", 1, -1]}}};
- assertErrorCode(coll, pipeline, 40097);
+pipeline = {
+ $project: {
+ output: {
+ $indexOfBytes: ["s", 3],
+ }
+ }
+};
+assertErrorCode(coll, pipeline, 40092);
+
+pipeline = {
+ $project: {output: {$indexOfBytes: ["abc", "b", "bad"]}}
+};
+assertErrorCode(coll, pipeline, 40096);
+
+pipeline = {
+ $project: {output: {$indexOfBytes: ["abc", "b", 0, "bad"]}}
+};
+assertErrorCode(coll, pipeline, 40096);
+
+pipeline = {
+ $project: {output: {$indexOfBytes: ["abc", "b", -1]}}
+};
+assertErrorCode(coll, pipeline, 40097);
+
+pipeline = {
+ $project: {output: {$indexOfBytes: ["abc", "b", 1, -1]}}
+};
+assertErrorCode(coll, pipeline, 40097);
}());
diff --git a/jstests/aggregation/expressions/indexof_codepoints.js b/jstests/aggregation/expressions/indexof_codepoints.js
index 506b1a13cfa..acc4a3b072d 100644
--- a/jstests/aggregation/expressions/indexof_codepoints.js
+++ b/jstests/aggregation/expressions/indexof_codepoints.js
@@ -3,117 +3,125 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExpression.
(function() {
- "use strict";
-
- function testExpressionCodePoints(coll, expression, result, shouldTestEquivalence = true) {
- testExpression(coll, expression, result);
-
- var indexOfSpec = expression["$indexOfCP"];
- if (shouldTestEquivalence) {
- // If we are specifying a starting or ending index for the search, we should be able to
- // achieve equivalent behavior using $substrCP.
- var input = indexOfSpec[0];
- var token = indexOfSpec[1];
- var start = indexOfSpec.length > 2 ? indexOfSpec[2] : 0;
- var end = indexOfSpec.length > 3 ? indexOfSpec[3] : {$strLenCP: input};
-
- var substrExpr = {
- $indexOfCP: [{$substrCP: [input, start, {$subtract: [end, start]}]}, token]
- };
-
- // Since the new expression takes the index with respect to a shortened string, the
- // output index will differ from the index with respect to the full length string,
- // unless the output is -1.
- var substrResult = (result === -1) ? -1 : result - start;
-
- testExpression(coll, substrExpr, substrResult);
- }
+"use strict";
+
+function testExpressionCodePoints(coll, expression, result, shouldTestEquivalence = true) {
+ testExpression(coll, expression, result);
+
+ var indexOfSpec = expression["$indexOfCP"];
+ if (shouldTestEquivalence) {
+ // If we are specifying a starting or ending index for the search, we should be able to
+ // achieve equivalent behavior using $substrCP.
+ var input = indexOfSpec[0];
+ var token = indexOfSpec[1];
+ var start = indexOfSpec.length > 2 ? indexOfSpec[2] : 0;
+ var end = indexOfSpec.length > 3 ? indexOfSpec[3] : {$strLenCP: input};
+
+ var substrExpr = {
+ $indexOfCP: [{$substrCP: [input, start, {$subtract: [end, start]}]}, token]
+ };
+
+ // Since the new expression takes the index with respect to a shortened string, the
+ // output index will differ from the index with respect to the full length string,
+ // unless the output is -1.
+ var substrResult = (result === -1) ? -1 : result - start;
+
+ testExpression(coll, substrExpr, substrResult);
}
+}
- var coll = db.indexofcp;
- coll.drop();
+var coll = db.indexofcp;
+coll.drop();
- // Insert a dummy document so something flows through the pipeline.
- assert.writeOK(coll.insert({}));
+// Insert a dummy document so something flows through the pipeline.
+assert.writeOK(coll.insert({}));
- testExpressionCodePoints(coll, {$indexOfCP: ["∫aƒ", "ƒ"]}, 2);
+testExpressionCodePoints(coll, {$indexOfCP: ["∫aƒ", "ƒ"]}, 2);
- testExpressionCodePoints(coll, {$indexOfCP: ["a∫c", "d"]}, -1);
+testExpressionCodePoints(coll, {$indexOfCP: ["a∫c", "d"]}, -1);
- testExpressionCodePoints(coll, {$indexOfCP: ["∫b∫ba", "b", 2]}, 3);
+testExpressionCodePoints(coll, {$indexOfCP: ["∫b∫ba", "b", 2]}, 3);
- testExpressionCodePoints(coll, {$indexOfCP: ["ab∫de", "d", 0, 3]}, -1);
+testExpressionCodePoints(coll, {$indexOfCP: ["ab∫de", "d", 0, 3]}, -1);
- testExpressionCodePoints(coll, {$indexOfCP: ["ab∫de", "d", 0, 4]}, 3);
+testExpressionCodePoints(coll, {$indexOfCP: ["ab∫de", "d", 0, 4]}, 3);
- testExpressionCodePoints(coll, {$indexOfCP: ["øøc", "ø", 1]}, 1);
+testExpressionCodePoints(coll, {$indexOfCP: ["øøc", "ø", 1]}, 1);
- testExpressionCodePoints(coll, {$indexOfCP: ["øƒc", "ƒ", 0, 10]}, 1);
+testExpressionCodePoints(coll, {$indexOfCP: ["øƒc", "ƒ", 0, 10]}, 1);
- testExpressionCodePoints(coll, {$indexOfCP: ["abcbabc", "b", 2, 4]}, 3);
+testExpressionCodePoints(coll, {$indexOfCP: ["abcbabc", "b", 2, 4]}, 3);
- // $strLenCP does not accept null as an input.
- testExpressionCodePoints(coll, {$indexOfCP: [null, "√"]}, null, false);
+// $strLenCP does not accept null as an input.
+testExpressionCodePoints(coll, {$indexOfCP: [null, "√"]}, null, false);
- testExpressionCodePoints(coll, {$indexOfCP: ["abc", "b", 3]}, -1);
+testExpressionCodePoints(coll, {$indexOfCP: ["abc", "b", 3]}, -1);
- // We are intentionally testing specifying an end index before the start index, which is why we
- // cannot use $substrCP in checking for equivalence.
- testExpressionCodePoints(coll, {$indexOfCP: ["a√cb", "b", 3, 1]}, -1, false);
+// We are intentionally testing specifying an end index before the start index, which is why we
+// cannot use $substrCP in checking for equivalence.
+testExpressionCodePoints(coll, {$indexOfCP: ["a√cb", "b", 3, 1]}, -1, false);
- testExpressionCodePoints(coll, {$indexOfCP: ["a∫b", "b", 3, 5]}, -1);
+testExpressionCodePoints(coll, {$indexOfCP: ["a∫b", "b", 3, 5]}, -1);
- testExpressionCodePoints(coll, {$indexOfCP: ["", "∫"]}, -1);
+testExpressionCodePoints(coll, {$indexOfCP: ["", "∫"]}, -1);
- testExpressionCodePoints(coll, {$indexOfCP: [" ", ""]}, 0);
+testExpressionCodePoints(coll, {$indexOfCP: [" ", ""]}, 0);
- testExpressionCodePoints(coll, {$indexOfCP: ["", ""]}, 0);
+testExpressionCodePoints(coll, {$indexOfCP: ["", ""]}, 0);
- testExpressionCodePoints(coll, {$indexOfCP: ["abc", "", 1]}, 1);
+testExpressionCodePoints(coll, {$indexOfCP: ["abc", "", 1]}, 1);
- // Test with multi-byte tokens.
+// Test with multi-byte tokens.
- testExpressionCodePoints(coll, {$indexOfCP: ["abcƒe", "ƒe"]}, 3);
+testExpressionCodePoints(coll, {$indexOfCP: ["abcƒe", "ƒe"]}, 3);
- testExpressionCodePoints(coll, {$indexOfCP: ["∫aeøø", "øøø"]}, -1);
+testExpressionCodePoints(coll, {$indexOfCP: ["∫aeøø", "øøø"]}, -1);
- // Test with embedded null bytes.
+// Test with embedded null bytes.
- testExpressionCodePoints(coll, {$indexOfCP: ["ab∫\0d", "d"]}, 4);
+testExpressionCodePoints(coll, {$indexOfCP: ["ab∫\0d", "d"]}, 4);
- testExpressionCodePoints(coll, {$indexOfCP: ["øbc\0", "\0"]}, 3);
+testExpressionCodePoints(coll, {$indexOfCP: ["øbc\0", "\0"]}, 3);
- testExpressionCodePoints(coll, {$indexOfCP: ["πbƒ\0d\0", "d", 5, 6]}, -1);
+testExpressionCodePoints(coll, {$indexOfCP: ["πbƒ\0d\0", "d", 5, 6]}, -1);
- // Error cases.
+// Error cases.
- var pipeline = {
- $project: {
- output: {
- $indexOfCP: [3, "s"],
- }
- }
- };
- assertErrorCode(coll, pipeline, 40093);
-
- pipeline = {
- $project: {
- output: {
- $indexOfCP: ["s", 3],
- }
+var pipeline = {
+ $project: {
+ output: {
+ $indexOfCP: [3, "s"],
}
- };
- assertErrorCode(coll, pipeline, 40094);
-
- pipeline = {$project: {output: {$indexOfCP: ["abc", "b", "bad"]}}};
- assertErrorCode(coll, pipeline, 40096);
-
- pipeline = {$project: {output: {$indexOfCP: ["abc", "b", 0, "bad"]}}};
- assertErrorCode(coll, pipeline, 40096);
-
- pipeline = {$project: {output: {$indexOfCP: ["abc", "b", -1]}}};
- assertErrorCode(coll, pipeline, 40097);
+ }
+};
+assertErrorCode(coll, pipeline, 40093);
- pipeline = {$project: {output: {$indexOfCP: ["abc", "b", 1, -1]}}};
- assertErrorCode(coll, pipeline, 40097);
+pipeline = {
+ $project: {
+ output: {
+ $indexOfCP: ["s", 3],
+ }
+ }
+};
+assertErrorCode(coll, pipeline, 40094);
+
+pipeline = {
+ $project: {output: {$indexOfCP: ["abc", "b", "bad"]}}
+};
+assertErrorCode(coll, pipeline, 40096);
+
+pipeline = {
+ $project: {output: {$indexOfCP: ["abc", "b", 0, "bad"]}}
+};
+assertErrorCode(coll, pipeline, 40096);
+
+pipeline = {
+ $project: {output: {$indexOfCP: ["abc", "b", -1]}}
+};
+assertErrorCode(coll, pipeline, 40097);
+
+pipeline = {
+ $project: {output: {$indexOfCP: ["abc", "b", 1, -1]}}
+};
+assertErrorCode(coll, pipeline, 40097);
}());
diff --git a/jstests/aggregation/expressions/let.js b/jstests/aggregation/expressions/let.js
index 5de6db8eebf..0a9959e0d7e 100644
--- a/jstests/aggregation/expressions/let.js
+++ b/jstests/aggregation/expressions/let.js
@@ -2,50 +2,48 @@
* Basic integration tests for the $let expression.
*/
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
-
- let coll = db.agg_expr_let;
- coll.drop();
- assert.commandWorked(coll.insert({zero: 0, one: 1, two: 2, three: 3, nested: {four: 4}}));
-
- function testExpr(expression, output) {
- const res = coll.aggregate([{$project: {output: expression}}]).toArray();
- assert.eq(res.length, 1, tojson(res));
- assert.eq(res[0].output, output, tojson(res));
-
- // Test in group:
- const result = coll.aggregate({$group: {_id: 0, res: {$sum: expression}}}).toArray();
- assert.eq(result, [{_id: 0, res: output}]);
- }
-
- // Basic tests.
- testExpr('$two', 2);
- testExpr('$$CURRENT.two', 2);
- testExpr('$$ROOT.two', 2);
-
- // Using sub expressions.
- testExpr({$add: ['$two', '$$CURRENT.three']}, 5);
- testExpr({$add: ['$$CURRENT.two', '$$ROOT.nested.four']}, 6);
-
- // Verify that the variables defined in $let work.
- testExpr({$let: {vars: {a: 10}, in : '$$a'}}, 10);
- testExpr({$let: {vars: {a: '$zero'}, in : '$$a'}}, 0);
- testExpr({$let: {vars: {a: {$add: ['$one', '$two']}, b: 10}, in : {$multiply: ['$$a', '$$b']}}},
- 30);
-
- // Verify that the outer level variable works in inner level $let.
- testExpr({
- $let: {
- vars: {var1: 1},
- in : {$let: {vars: {var2: "$$var1"}, in : {$sum: ["$$var1", "$$var2"]}}}
- }
- },
- 2);
-
- // Verify that the outer level variables get overwritten by inner level variables.
- testExpr({
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+
+let coll = db.agg_expr_let;
+coll.drop();
+assert.commandWorked(coll.insert({zero: 0, one: 1, two: 2, three: 3, nested: {four: 4}}));
+
+function testExpr(expression, output) {
+ const res = coll.aggregate([{$project: {output: expression}}]).toArray();
+ assert.eq(res.length, 1, tojson(res));
+ assert.eq(res[0].output, output, tojson(res));
+
+ // Test in group:
+ const result = coll.aggregate({$group: {_id: 0, res: {$sum: expression}}}).toArray();
+ assert.eq(result, [{_id: 0, res: output}]);
+}
+
+// Basic tests.
+testExpr('$two', 2);
+testExpr('$$CURRENT.two', 2);
+testExpr('$$ROOT.two', 2);
+
+// Using sub expressions.
+testExpr({$add: ['$two', '$$CURRENT.three']}, 5);
+testExpr({$add: ['$$CURRENT.two', '$$ROOT.nested.four']}, 6);
+
+// Verify that the variables defined in $let work.
+testExpr({$let: {vars: {a: 10}, in : '$$a'}}, 10);
+testExpr({$let: {vars: {a: '$zero'}, in : '$$a'}}, 0);
+testExpr({$let: {vars: {a: {$add: ['$one', '$two']}, b: 10}, in : {$multiply: ['$$a', '$$b']}}},
+ 30);
+
+// Verify that the outer level variable works in inner level $let.
+testExpr({
+ $let:
+ {vars: {var1: 1}, in : {$let: {vars: {var2: "$$var1"}, in : {$sum: ["$$var1", "$$var2"]}}}}
+},
+ 2);
+
+// Verify that the outer level variables get overwritten by inner level variables.
+testExpr({
$let: {
vars: {var1: "$one"},
in : {$let: {vars: {var2: "$$var1", var1: 3}, in : {$sum: ["$$var2", "$$var1"]}}}
@@ -53,40 +51,39 @@
},
4);
- // $let changing CURRENT
- testExpr({$let: {vars: {CURRENT: '$$ROOT.nested'}, in : {$multiply: ['$four', '$$ROOT.two']}}},
- 8);
- testExpr({
- $let: {
- vars: {CURRENT: '$$CURRENT.nested'}, // using original value of CURRENT
- in : {$multiply: ['$four', '$$ROOT.two']}
- }
- },
- 8);
- testExpr({
- $let: {
- vars: {CURRENT: '$nested'}, // same as last
- in : {$multiply: ['$four', '$$ROOT.two']}
- }
- },
- 8);
- testExpr({
- $let: {
- vars: {CURRENT: {$const: {ten: 10}}}, // "artificial" object
- in : {$multiply: ['$ten', '$$ROOT.two']}
- }
- },
- 20);
- testExpr({
- $let: {
- vars: {CURRENT: '$three'}, // sets current to the number 3 (not an object)
- in : {$multiply: ['$$CURRENT', '$$ROOT.two']}
- }
- },
- 6);
+// $let changing CURRENT
+testExpr({$let: {vars: {CURRENT: '$$ROOT.nested'}, in : {$multiply: ['$four', '$$ROOT.two']}}}, 8);
+testExpr({
+ $let: {
+ vars: {CURRENT: '$$CURRENT.nested'}, // using original value of CURRENT
+ in : {$multiply: ['$four', '$$ROOT.two']}
+ }
+},
+ 8);
+testExpr({
+ $let: {
+ vars: {CURRENT: '$nested'}, // same as last
+ in : {$multiply: ['$four', '$$ROOT.two']}
+ }
+},
+ 8);
+testExpr({
+ $let: {
+ vars: {CURRENT: {$const: {ten: 10}}}, // "artificial" object
+ in : {$multiply: ['$ten', '$$ROOT.two']}
+ }
+},
+ 20);
+testExpr({
+ $let: {
+ vars: {CURRENT: '$three'}, // sets current to the number 3 (not an object)
+ in : {$multiply: ['$$CURRENT', '$$ROOT.two']}
+ }
+},
+ 6);
- // Swapping with $let (ensures there is no ordering dependency in vars).
- testExpr({
+// Swapping with $let (ensures there is no ordering dependency in vars).
+testExpr({
$let: {
vars: {x: 6, y: 10},
in : {
@@ -99,34 +96,32 @@
}, // Not commutative!
4); // 10-6 not 6-10 or 6-6
- // Unicode is allowed.
- testExpr({$let: {vars: {'日本語': 10}, in : '$$日本語'}},
- 10); // Japanese for "Japanese language".
-
- // Can use ROOT and CURRENT directly with no subfield (SERVER-5916).
- coll.drop();
- coll.insert({_id: 'obj'});
- assert.eq(coll.aggregate({$project: {_id: 0, obj: '$$ROOT'}}).toArray(), [{obj: {_id: 'obj'}}]);
- assert.eq(coll.aggregate({$project: {_id: 0, obj: '$$CURRENT'}}).toArray(),
- [{obj: {_id: 'obj'}}]);
- assert.eq(coll.aggregate({$group: {_id: 0, objs: {$push: '$$ROOT'}}}).toArray(),
- [{_id: 0, objs: [{_id: 'obj'}]}]);
- assert.eq(coll.aggregate({$group: {_id: 0, objs: {$push: '$$CURRENT'}}}).toArray(),
- [{_id: 0, objs: [{_id: 'obj'}]}]);
-
- // Check name validity checks.
- assertErrorCode(coll, {$project: {a: {$let: {vars: {ROOT: 1}, in : '$$ROOT'}}}}, 16867);
- assertErrorCode(coll, {$project: {a: {$let: {vars: {FOO: 1}, in : '$$FOO'}}}}, 16867);
- assertErrorCode(coll, {$project: {a: {$let: {vars: {_underbar: 1}, in : '$$FOO'}}}}, 16867);
- assertErrorCode(coll, {$project: {a: {$let: {vars: {'a.b': 1}, in : '$$FOO'}}}}, 16868);
- assertErrorCode(coll, {$project: {a: {$let: {vars: {'a b': 1}, in : '$$FOO'}}}}, 16868);
- assertErrorCode(coll, {$project: {a: '$$_underbar'}}, 16870);
- assertErrorCode(coll, {$project: {a: '$$with spaces'}}, 16871);
-
- // Verify that variables defined in '$let' cannot be used to initialize other variables.
- assertErrorCode(
- coll,
- [{$project: {output: {$let: {vars: {var1: "$one", var2: "$$var1"}, in : "$$var1"}}}}],
- 17276);
-
+// Unicode is allowed.
+testExpr({$let: {vars: {'日本語': 10}, in : '$$日本語'}},
+ 10); // Japanese for "Japanese language".
+
+// Can use ROOT and CURRENT directly with no subfield (SERVER-5916).
+coll.drop();
+coll.insert({_id: 'obj'});
+assert.eq(coll.aggregate({$project: {_id: 0, obj: '$$ROOT'}}).toArray(), [{obj: {_id: 'obj'}}]);
+assert.eq(coll.aggregate({$project: {_id: 0, obj: '$$CURRENT'}}).toArray(), [{obj: {_id: 'obj'}}]);
+assert.eq(coll.aggregate({$group: {_id: 0, objs: {$push: '$$ROOT'}}}).toArray(),
+ [{_id: 0, objs: [{_id: 'obj'}]}]);
+assert.eq(coll.aggregate({$group: {_id: 0, objs: {$push: '$$CURRENT'}}}).toArray(),
+ [{_id: 0, objs: [{_id: 'obj'}]}]);
+
+// Check name validity checks.
+assertErrorCode(coll, {$project: {a: {$let: {vars: {ROOT: 1}, in : '$$ROOT'}}}}, 16867);
+assertErrorCode(coll, {$project: {a: {$let: {vars: {FOO: 1}, in : '$$FOO'}}}}, 16867);
+assertErrorCode(coll, {$project: {a: {$let: {vars: {_underbar: 1}, in : '$$FOO'}}}}, 16867);
+assertErrorCode(coll, {$project: {a: {$let: {vars: {'a.b': 1}, in : '$$FOO'}}}}, 16868);
+assertErrorCode(coll, {$project: {a: {$let: {vars: {'a b': 1}, in : '$$FOO'}}}}, 16868);
+assertErrorCode(coll, {$project: {a: '$$_underbar'}}, 16870);
+assertErrorCode(coll, {$project: {a: '$$with spaces'}}, 16871);
+
+// Verify that variables defined in '$let' cannot be used to initialize other variables.
+assertErrorCode(
+ coll,
+ [{$project: {output: {$let: {vars: {var1: "$one", var2: "$$var1"}, in : "$$var1"}}}}],
+ 17276);
}());
diff --git a/jstests/aggregation/expressions/merge_objects.js b/jstests/aggregation/expressions/merge_objects.js
index 599e182ff5a..e6d38ccc6a4 100644
--- a/jstests/aggregation/expressions/merge_objects.js
+++ b/jstests/aggregation/expressions/merge_objects.js
@@ -1,160 +1,147 @@
// Tests for the $mergeObjects aggregation expression.
(function() {
- "use strict";
-
- // For assertErrorCode().
- load("jstests/aggregation/extras/utils.js");
-
- let coll = db.merge_object_expr;
- coll.drop();
-
- // Test merging two objects together.
- assert.writeOK(coll.insert({_id: 0, subObject: {b: 1, c: 1}}));
- let result = coll.aggregate([
- {$match: {_id: 0}},
- {$project: {mergedDocument: {$mergeObjects: ["$subObject", {d: 1}]}}}
- ])
- .toArray();
- assert.eq(result, [{_id: 0, mergedDocument: {b: 1, c: 1, d: 1}}]);
-
- // Test merging the root document with a new field.
- assert.writeOK(coll.insert({_id: 1, a: 0, b: 1}));
- result =
- coll.aggregate([
- {$match: {_id: 1}},
- {$project: {mergedDocument: {$mergeObjects: ["$$ROOT", {newField: "newValue"}]}}}
- ])
- .toArray();
- assert.eq(result, [{_id: 1, mergedDocument: {_id: 1, a: 0, b: 1, newField: "newValue"}}]);
-
- // Test replacing a field in the root.
- assert.writeOK(coll.insert({_id: 2, a: 0, b: 1}));
- result = coll.aggregate([
- {$match: {_id: 2}},
- {$project: {mergedDocument: {$mergeObjects: ["$$ROOT", {a: "newValue"}]}}}
- ])
- .toArray();
- assert.eq(result, [{_id: 2, mergedDocument: {_id: 2, a: "newValue", b: 1}}]);
-
- // Test overriding a document with root.
- assert.writeOK(coll.insert({_id: 3, a: 0, b: 1}));
- result =
- coll.aggregate([
- {$match: {_id: 3}},
- {$project: {mergedDocument: {$mergeObjects: [{a: "defaultValue"}, "$$ROOT"]}}}
- ])
- .toArray();
- assert.eq(result, [{_id: 3, mergedDocument: {a: 0, _id: 3, b: 1}}]);
-
- // Test replacing root with merged document.
- assert.writeOK(coll.insert({_id: 4, a: 0, subObject: {b: 1, c: 2}}));
- result = coll.aggregate([
- {$match: {_id: 4}},
- {$replaceRoot: {newRoot: {$mergeObjects: ["$$ROOT", "$subObject"]}}}
- ])
- .toArray();
- assert.eq(result, [{_id: 4, a: 0, subObject: {b: 1, c: 2}, b: 1, c: 2}]);
-
- // Test merging with an embedded object.
- assert.writeOK(coll.insert({_id: 5, subObject: {b: 1, c: 1}}));
- result = coll.aggregate([
- {$match: {_id: 5}},
- {
- $project: {
- mergedDocument:
- {$mergeObjects: ["$subObject", {subObject1: {d: 1}}, {e: 1}]}
- }
- }
- ])
- .toArray();
- assert.eq(result, [{_id: 5, mergedDocument: {b: 1, c: 1, subObject1: {d: 1}, e: 1}}]);
-
- // Test for errors on non-document types.
- assert.writeOK(coll.insert({_id: 6, a: "string"}));
- assertErrorCode(coll,
- [
- {$match: {_id: 6}},
- {$project: {mergedDocument: {$mergeObjects: ["$a", {a: "newString"}]}}}
- ],
- 40400);
-
- assert.writeOK(coll.insert({_id: 7, a: {b: 1}, c: 1}));
- assertErrorCode(
- coll,
- [{$match: {_id: 7}}, {$project: {mergedDocument: {$mergeObjects: ["$a", "$c"]}}}],
- 40400);
-
- // Test outputs with null values.
- assert.writeOK(coll.insert({_id: 8, a: {b: 1}}));
- result = coll.aggregate([
- {$match: {_id: 8}},
- {$project: {mergedDocument: {$mergeObjects: ["$a", {b: null}]}}}
- ])
- .toArray();
- assert.eq(result, [{_id: 8, mergedDocument: {b: null}}]);
+"use strict";
- // Test output with undefined values.
- assert.writeOK(coll.insert({_id: 9, a: {b: 1}}));
- result = coll.aggregate([
- {$match: {_id: 9}},
- {$project: {mergedDocument: {$mergeObjects: ["$a", {b: undefined}]}}}
- ])
- .toArray();
- assert.eq(result, [{_id: 9, mergedDocument: {b: undefined}}]);
-
- // Test output with missing values.
- assert.writeOK(coll.insert({_id: 10, a: {b: 1}}));
- result =
- coll.aggregate([
- {$match: {_id: 10}},
- {$project: {mergedDocument: {$mergeObjects: ["$a", {b: "$nonExistentField"}]}}}
- ])
- .toArray();
- assert.eq(result, [{_id: 10, mergedDocument: {b: 1}}]);
-
- assert.writeOK(coll.insert({_id: 11, a: {b: 1}}));
- result = coll.aggregate([
- {$match: {_id: 11}},
- {$project: {mergedDocument: {$mergeObjects: ["$a", {b: ""}]}}}
- ])
- .toArray();
- assert.eq(result, [{_id: 11, mergedDocument: {b: ""}}]);
+// For assertErrorCode().
+load("jstests/aggregation/extras/utils.js");
- // Test outputs with empty values.
- assert.writeOK(coll.insert({_id: 12, b: 1, c: 1}));
- result =
- coll.aggregate([{$match: {_id: 12}}, {$project: {mergedDocument: {$mergeObjects: [{}]}}}])
- .toArray();
- assert.eq(result, [{_id: 12, mergedDocument: {}}]);
+let coll = db.merge_object_expr;
+coll.drop();
- result = coll.aggregate(
- [{$match: {_id: 12}}, {$project: {mergedDocument: {$mergeObjects: [{}, {}]}}}])
- .toArray();
- assert.eq(result, [{_id: 12, mergedDocument: {}}]);
-
- // Test merge within a $group stage.
- assert.writeOK(coll.insert({_id: 13, group: 1, obj: {}}));
- assert.writeOK(coll.insert({_id: 14, group: 1, obj: {a: 2, b: 2}}));
- assert.writeOK(coll.insert({_id: 15, group: 1, obj: {a: 1, c: 3}}));
- assert.writeOK(coll.insert({_id: 16, group: 2, obj: {a: 1, b: 1}}));
- result = coll.aggregate([
- {$match: {_id: {$in: [13, 14, 15, 16]}}},
- {$sort: {_id: 1}},
- {$group: {_id: "$group", mergedDocument: {$mergeObjects: "$obj"}}},
- {$sort: {_id: 1}},
- ])
- .toArray();
- assert.eq(
- result,
- [{_id: 1, mergedDocument: {a: 1, b: 2, c: 3}}, {_id: 2, mergedDocument: {a: 1, b: 1}}]);
-
- // Test merge with $$REMOVE operator.
- assert.writeOK(coll.insert({_id: 17, a: {b: 2}}));
- result = coll.aggregate([
- {$match: {_id: 17}},
- {$project: {mergedDocument: {$mergeObjects: ["$a", {b: "$$REMOVE"}]}}}
+// Test merging two objects together.
+assert.writeOK(coll.insert({_id: 0, subObject: {b: 1, c: 1}}));
+let result = coll.aggregate([
+ {$match: {_id: 0}},
+ {$project: {mergedDocument: {$mergeObjects: ["$subObject", {d: 1}]}}}
])
.toArray();
- assert.eq(result, [{_id: 17, mergedDocument: {b: 2}}]);
-
+assert.eq(result, [{_id: 0, mergedDocument: {b: 1, c: 1, d: 1}}]);
+
+// Test merging the root document with a new field.
+assert.writeOK(coll.insert({_id: 1, a: 0, b: 1}));
+result = coll.aggregate([
+ {$match: {_id: 1}},
+ {$project: {mergedDocument: {$mergeObjects: ["$$ROOT", {newField: "newValue"}]}}}
+ ])
+ .toArray();
+assert.eq(result, [{_id: 1, mergedDocument: {_id: 1, a: 0, b: 1, newField: "newValue"}}]);
+
+// Test replacing a field in the root.
+assert.writeOK(coll.insert({_id: 2, a: 0, b: 1}));
+result = coll.aggregate([
+ {$match: {_id: 2}},
+ {$project: {mergedDocument: {$mergeObjects: ["$$ROOT", {a: "newValue"}]}}}
+ ])
+ .toArray();
+assert.eq(result, [{_id: 2, mergedDocument: {_id: 2, a: "newValue", b: 1}}]);
+
+// Test overriding a document with root.
+assert.writeOK(coll.insert({_id: 3, a: 0, b: 1}));
+result = coll.aggregate([
+ {$match: {_id: 3}},
+ {$project: {mergedDocument: {$mergeObjects: [{a: "defaultValue"}, "$$ROOT"]}}}
+ ])
+ .toArray();
+assert.eq(result, [{_id: 3, mergedDocument: {a: 0, _id: 3, b: 1}}]);
+
+// Test replacing root with merged document.
+assert.writeOK(coll.insert({_id: 4, a: 0, subObject: {b: 1, c: 2}}));
+result = coll.aggregate([
+ {$match: {_id: 4}},
+ {$replaceRoot: {newRoot: {$mergeObjects: ["$$ROOT", "$subObject"]}}}
+ ])
+ .toArray();
+assert.eq(result, [{_id: 4, a: 0, subObject: {b: 1, c: 2}, b: 1, c: 2}]);
+
+// Test merging with an embedded object.
+assert.writeOK(coll.insert({_id: 5, subObject: {b: 1, c: 1}}));
+result =
+ coll.aggregate([
+ {$match: {_id: 5}},
+ {
+ $project:
+ {mergedDocument: {$mergeObjects: ["$subObject", {subObject1: {d: 1}}, {e: 1}]}}
+ }
+ ])
+ .toArray();
+assert.eq(result, [{_id: 5, mergedDocument: {b: 1, c: 1, subObject1: {d: 1}, e: 1}}]);
+
+// Test for errors on non-document types.
+assert.writeOK(coll.insert({_id: 6, a: "string"}));
+assertErrorCode(
+ coll,
+ [{$match: {_id: 6}}, {$project: {mergedDocument: {$mergeObjects: ["$a", {a: "newString"}]}}}],
+ 40400);
+
+assert.writeOK(coll.insert({_id: 7, a: {b: 1}, c: 1}));
+assertErrorCode(
+ coll, [{$match: {_id: 7}}, {$project: {mergedDocument: {$mergeObjects: ["$a", "$c"]}}}], 40400);
+
+// Test outputs with null values.
+assert.writeOK(coll.insert({_id: 8, a: {b: 1}}));
+result =
+ coll.aggregate(
+ [{$match: {_id: 8}}, {$project: {mergedDocument: {$mergeObjects: ["$a", {b: null}]}}}])
+ .toArray();
+assert.eq(result, [{_id: 8, mergedDocument: {b: null}}]);
+
+// Test output with undefined values.
+assert.writeOK(coll.insert({_id: 9, a: {b: 1}}));
+result = coll.aggregate([
+ {$match: {_id: 9}},
+ {$project: {mergedDocument: {$mergeObjects: ["$a", {b: undefined}]}}}
+ ])
+ .toArray();
+assert.eq(result, [{_id: 9, mergedDocument: {b: undefined}}]);
+
+// Test output with missing values.
+assert.writeOK(coll.insert({_id: 10, a: {b: 1}}));
+result = coll.aggregate([
+ {$match: {_id: 10}},
+ {$project: {mergedDocument: {$mergeObjects: ["$a", {b: "$nonExistentField"}]}}}
+ ])
+ .toArray();
+assert.eq(result, [{_id: 10, mergedDocument: {b: 1}}]);
+
+assert.writeOK(coll.insert({_id: 11, a: {b: 1}}));
+result =
+ coll.aggregate(
+ [{$match: {_id: 11}}, {$project: {mergedDocument: {$mergeObjects: ["$a", {b: ""}]}}}])
+ .toArray();
+assert.eq(result, [{_id: 11, mergedDocument: {b: ""}}]);
+
+// Test outputs with empty values.
+assert.writeOK(coll.insert({_id: 12, b: 1, c: 1}));
+result = coll.aggregate([{$match: {_id: 12}}, {$project: {mergedDocument: {$mergeObjects: [{}]}}}])
+ .toArray();
+assert.eq(result, [{_id: 12, mergedDocument: {}}]);
+
+result =
+ coll.aggregate([{$match: {_id: 12}}, {$project: {mergedDocument: {$mergeObjects: [{}, {}]}}}])
+ .toArray();
+assert.eq(result, [{_id: 12, mergedDocument: {}}]);
+
+// Test merge within a $group stage.
+assert.writeOK(coll.insert({_id: 13, group: 1, obj: {}}));
+assert.writeOK(coll.insert({_id: 14, group: 1, obj: {a: 2, b: 2}}));
+assert.writeOK(coll.insert({_id: 15, group: 1, obj: {a: 1, c: 3}}));
+assert.writeOK(coll.insert({_id: 16, group: 2, obj: {a: 1, b: 1}}));
+result = coll.aggregate([
+ {$match: {_id: {$in: [13, 14, 15, 16]}}},
+ {$sort: {_id: 1}},
+ {$group: {_id: "$group", mergedDocument: {$mergeObjects: "$obj"}}},
+ {$sort: {_id: 1}},
+ ])
+ .toArray();
+assert.eq(result,
+ [{_id: 1, mergedDocument: {a: 1, b: 2, c: 3}}, {_id: 2, mergedDocument: {a: 1, b: 1}}]);
+
+// Test merge with $$REMOVE operator.
+assert.writeOK(coll.insert({_id: 17, a: {b: 2}}));
+result = coll.aggregate([
+ {$match: {_id: 17}},
+ {$project: {mergedDocument: {$mergeObjects: ["$a", {b: "$$REMOVE"}]}}}
+ ])
+ .toArray();
+assert.eq(result, [{_id: 17, mergedDocument: {b: 2}}]);
}());
diff --git a/jstests/aggregation/expressions/objectToArray.js b/jstests/aggregation/expressions/objectToArray.js
index 5f92b0ae1a9..0ec4a40c2c0 100644
--- a/jstests/aggregation/expressions/objectToArray.js
+++ b/jstests/aggregation/expressions/objectToArray.js
@@ -1,95 +1,90 @@
// Tests for the $objectToArray aggregation expression.
(function() {
- "use strict";
-
- // For assertErrorCode().
- load("jstests/aggregation/extras/utils.js");
-
- let coll = db.object_to_array_expr;
- coll.drop();
-
- let object_to_array_expr = {$project: {expanded: {$objectToArray: "$subDoc"}}};
-
- // $objectToArray correctly converts a document to an array of key-value pairs.
- assert.writeOK(coll.insert({_id: 0, subDoc: {"a": 1, "b": 2, "c": "foo"}}));
- let result = coll.aggregate([{$match: {_id: 0}}, object_to_array_expr]).toArray();
- assert.eq(
- result,
- [{_id: 0, expanded: [{"k": "a", "v": 1}, {"k": "b", "v": 2}, {"k": "c", "v": "foo"}]}]);
-
- assert.writeOK(coll.insert({_id: 1, subDoc: {"y": []}}));
- result = coll.aggregate([{$match: {_id: 1}}, object_to_array_expr]).toArray();
- assert.eq(result, [{_id: 1, expanded: [{"k": "y", "v": []}]}]);
-
- assert.writeOK(coll.insert({_id: 2, subDoc: {"a": 1, "b": {"d": "string"}, "c": [1, 2]}}));
- result = coll.aggregate([{$match: {_id: 2}}, object_to_array_expr]).toArray();
- assert.eq(
- result, [{
- _id: 2,
- expanded:
- [{"k": "a", "v": 1}, {"k": "b", "v": {"d": "string"}}, {"k": "c", "v": [1, 2]}]
- }]);
-
- assert.writeOK(coll.insert({_id: 3, subDoc: {}}));
- result = coll.aggregate([{$match: {_id: 3}}, object_to_array_expr]).toArray();
- assert.eq(result, [{_id: 3, expanded: []}]);
-
- // Turns to array from the root of the document.
- assert.writeOK(coll.insert({_id: 4, "a": 1, "b": 2, "c": 3}));
- result =
- coll.aggregate([{$match: {_id: 4}}, {$project: {document: {$objectToArray: "$$ROOT"}}}])
- .toArray();
- assert.eq(result, [
- {
- _id: 4,
- document:
- [{"k": "_id", "v": 4}, {"k": "a", "v": 1}, {"k": "b", "v": 2}, {"k": "c", "v": 3}]
- }
- ]);
-
- assert.writeOK(coll.insert({_id: 5, "date": ISODate("2017-01-24T00:00:00")}));
- result = coll.aggregate([
- {$match: {_id: 5}},
- {$project: {document: {$objectToArray: {dayOfWeek: {$dayOfWeek: "$date"}}}}}
- ])
- .toArray();
- assert.eq(result, [{_id: 5, document: [{"k": "dayOfWeek", "v": 3}]}]);
-
- // $objectToArray errors on non-document types.
- assert.writeOK(coll.insert({_id: 6, subDoc: "string"}));
- assertErrorCode(coll, [{$match: {_id: 6}}, object_to_array_expr], 40390);
-
- assert.writeOK(coll.insert({_id: 7, subDoc: ObjectId()}));
- assertErrorCode(coll, [{$match: {_id: 7}}, object_to_array_expr], 40390);
-
- assert.writeOK(coll.insert({_id: 8, subDoc: NumberLong(0)}));
- assertErrorCode(coll, [{$match: {_id: 8}}, object_to_array_expr], 40390);
-
- assert.writeOK(coll.insert({_id: 9, subDoc: []}));
- assertErrorCode(coll, [{$match: {_id: 9}}, object_to_array_expr], 40390);
-
- assert.writeOK(coll.insert({_id: 10, subDoc: [0]}));
- assertErrorCode(coll, [{$match: {_id: 10}}, object_to_array_expr], 40390);
-
- assert.writeOK(coll.insert({_id: 11, subDoc: ["string"]}));
- assertErrorCode(coll, [{$match: {_id: 11}}, object_to_array_expr], 40390);
-
- assert.writeOK(coll.insert({_id: 12, subDoc: [{"a": "b"}]}));
- assertErrorCode(coll, [{$match: {_id: 12}}, object_to_array_expr], 40390);
-
- assert.writeOK(coll.insert({_id: 13, subDoc: NaN}));
- assertErrorCode(coll, [{$match: {_id: 13}}, object_to_array_expr], 40390);
-
- // $objectToArray outputs null on null-ish types.
- assert.writeOK(coll.insert({_id: 14, subDoc: null}));
- result = coll.aggregate([{$match: {_id: 14}}, object_to_array_expr]).toArray();
- assert.eq(result, [{_id: 14, expanded: null}]);
-
- assert.writeOK(coll.insert({_id: 15, subDoc: undefined}));
- result = coll.aggregate([{$match: {_id: 15}}, object_to_array_expr]).toArray();
- assert.eq(result, [{_id: 15, expanded: null}]);
-
- assert.writeOK(coll.insert({_id: 16}));
- result = coll.aggregate([{$match: {_id: 16}}, object_to_array_expr]).toArray();
- assert.eq(result, [{_id: 16, expanded: null}]);
+"use strict";
+
+// For assertErrorCode().
+load("jstests/aggregation/extras/utils.js");
+
+let coll = db.object_to_array_expr;
+coll.drop();
+
+let object_to_array_expr = {$project: {expanded: {$objectToArray: "$subDoc"}}};
+
+// $objectToArray correctly converts a document to an array of key-value pairs.
+assert.writeOK(coll.insert({_id: 0, subDoc: {"a": 1, "b": 2, "c": "foo"}}));
+let result = coll.aggregate([{$match: {_id: 0}}, object_to_array_expr]).toArray();
+assert.eq(result,
+ [{_id: 0, expanded: [{"k": "a", "v": 1}, {"k": "b", "v": 2}, {"k": "c", "v": "foo"}]}]);
+
+assert.writeOK(coll.insert({_id: 1, subDoc: {"y": []}}));
+result = coll.aggregate([{$match: {_id: 1}}, object_to_array_expr]).toArray();
+assert.eq(result, [{_id: 1, expanded: [{"k": "y", "v": []}]}]);
+
+assert.writeOK(coll.insert({_id: 2, subDoc: {"a": 1, "b": {"d": "string"}, "c": [1, 2]}}));
+result = coll.aggregate([{$match: {_id: 2}}, object_to_array_expr]).toArray();
+assert.eq(
+ result, [{
+ _id: 2,
+ expanded: [{"k": "a", "v": 1}, {"k": "b", "v": {"d": "string"}}, {"k": "c", "v": [1, 2]}]
+ }]);
+
+assert.writeOK(coll.insert({_id: 3, subDoc: {}}));
+result = coll.aggregate([{$match: {_id: 3}}, object_to_array_expr]).toArray();
+assert.eq(result, [{_id: 3, expanded: []}]);
+
+// Turns to array from the root of the document.
+assert.writeOK(coll.insert({_id: 4, "a": 1, "b": 2, "c": 3}));
+result = coll.aggregate([{$match: {_id: 4}}, {$project: {document: {$objectToArray: "$$ROOT"}}}])
+ .toArray();
+assert.eq(
+ result, [{
+ _id: 4,
+ document: [{"k": "_id", "v": 4}, {"k": "a", "v": 1}, {"k": "b", "v": 2}, {"k": "c", "v": 3}]
+ }]);
+
+assert.writeOK(coll.insert({_id: 5, "date": ISODate("2017-01-24T00:00:00")}));
+result = coll.aggregate([
+ {$match: {_id: 5}},
+ {$project: {document: {$objectToArray: {dayOfWeek: {$dayOfWeek: "$date"}}}}}
+ ])
+ .toArray();
+assert.eq(result, [{_id: 5, document: [{"k": "dayOfWeek", "v": 3}]}]);
+
+// $objectToArray errors on non-document types.
+assert.writeOK(coll.insert({_id: 6, subDoc: "string"}));
+assertErrorCode(coll, [{$match: {_id: 6}}, object_to_array_expr], 40390);
+
+assert.writeOK(coll.insert({_id: 7, subDoc: ObjectId()}));
+assertErrorCode(coll, [{$match: {_id: 7}}, object_to_array_expr], 40390);
+
+assert.writeOK(coll.insert({_id: 8, subDoc: NumberLong(0)}));
+assertErrorCode(coll, [{$match: {_id: 8}}, object_to_array_expr], 40390);
+
+assert.writeOK(coll.insert({_id: 9, subDoc: []}));
+assertErrorCode(coll, [{$match: {_id: 9}}, object_to_array_expr], 40390);
+
+assert.writeOK(coll.insert({_id: 10, subDoc: [0]}));
+assertErrorCode(coll, [{$match: {_id: 10}}, object_to_array_expr], 40390);
+
+assert.writeOK(coll.insert({_id: 11, subDoc: ["string"]}));
+assertErrorCode(coll, [{$match: {_id: 11}}, object_to_array_expr], 40390);
+
+assert.writeOK(coll.insert({_id: 12, subDoc: [{"a": "b"}]}));
+assertErrorCode(coll, [{$match: {_id: 12}}, object_to_array_expr], 40390);
+
+assert.writeOK(coll.insert({_id: 13, subDoc: NaN}));
+assertErrorCode(coll, [{$match: {_id: 13}}, object_to_array_expr], 40390);
+
+// $objectToArray outputs null on null-ish types.
+assert.writeOK(coll.insert({_id: 14, subDoc: null}));
+result = coll.aggregate([{$match: {_id: 14}}, object_to_array_expr]).toArray();
+assert.eq(result, [{_id: 14, expanded: null}]);
+
+assert.writeOK(coll.insert({_id: 15, subDoc: undefined}));
+result = coll.aggregate([{$match: {_id: 15}}, object_to_array_expr]).toArray();
+assert.eq(result, [{_id: 15, expanded: null}]);
+
+assert.writeOK(coll.insert({_id: 16}));
+result = coll.aggregate([{$match: {_id: 16}}, object_to_array_expr]).toArray();
+assert.eq(result, [{_id: 16, expanded: null}]);
}());
diff --git a/jstests/aggregation/expressions/object_ids_for_date_expressions.js b/jstests/aggregation/expressions/object_ids_for_date_expressions.js
index dae3ce0d280..bff8ab587b3 100644
--- a/jstests/aggregation/expressions/object_ids_for_date_expressions.js
+++ b/jstests/aggregation/expressions/object_ids_for_date_expressions.js
@@ -1,98 +1,97 @@
// SERVER-9406: Allow ObjectId type to be treated as a date in date related expressions
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/dateutil.js');
+load('jstests/libs/dateutil.js');
- const coll = db.server9406;
- let testOpCount = 0;
+const coll = db.server9406;
+let testOpCount = 0;
- coll.drop();
+coll.drop();
- // Seed collection so that the pipeline will execute.
- assert.writeOK(coll.insert({}));
+// Seed collection so that the pipeline will execute.
+assert.writeOK(coll.insert({}));
- function makeObjectIdFromDate(dt) {
- try {
- return new ObjectId((dt.getTime() / 1000).toString(16) + "f000000000000000");
- } catch (e) {
- assert("Invalid date for conversion to Object Id: " + dt);
- }
+function makeObjectIdFromDate(dt) {
+ try {
+ return new ObjectId((dt.getTime() / 1000).toString(16) + "f000000000000000");
+ } catch (e) {
+ assert("Invalid date for conversion to Object Id: " + dt);
}
+}
- /**
- * Helper for testing that 'op' on 'value' is the same for dates as equivalent ObjectIds
- * 'value' is either a date value, or an object containing field 'date'.
- */
- function testOp(op, value) {
- testOpCount++;
+/**
+ * Helper for testing that 'op' on 'value' is the same for dates as equivalent ObjectIds
+ * 'value' is either a date value, or an object containing field 'date'.
+ */
+function testOp(op, value) {
+ testOpCount++;
- let pipeline = [{$project: {_id: 0, result: {}}}];
- pipeline[0].$project.result[op] = value;
- let res1 = coll.aggregate(pipeline).toArray()[0];
- if (value.date) {
- value.date = makeObjectIdFromDate(value.date);
- } else {
- value = makeObjectIdFromDate(value);
- }
- pipeline[0].$project.result[op] = value;
- let res2 = coll.aggregate(pipeline).toArray()[0];
-
- assert.eq(res2.result, res1.result, tojson(pipeline));
+ let pipeline = [{$project: {_id: 0, result: {}}}];
+ pipeline[0].$project.result[op] = value;
+ let res1 = coll.aggregate(pipeline).toArray()[0];
+ if (value.date) {
+ value.date = makeObjectIdFromDate(value.date);
+ } else {
+ value = makeObjectIdFromDate(value);
}
+ pipeline[0].$project.result[op] = value;
+ let res2 = coll.aggregate(pipeline).toArray()[0];
- testOp('$dateToString', {date: new Date("1980-12-31T23:59:59Z"), format: "%V-%G"});
- testOp('$dateToString', {date: new Date("1980-12-31T23:59:59Z"), format: "%G-%V"});
+ assert.eq(res2.result, res1.result, tojson(pipeline));
+}
- const years = [
- 2002, // Starting and ending on Tuesday.
- 2014, // Starting and ending on Wednesday.
- 2015, // Starting and ending on Thursday.
- 2010, // Starting and ending on Friday.
- 2011, // Starting and ending on Saturday.
- 2006, // Starting and ending on Sunday.
- 1996, // Starting on Monday, ending on Tuesday.
- 2008, // Starting on Tuesday, ending on Wednesday.
- 1992, // Starting on Wednesday, ending on Thursday.
- 2004, // Starting on Thursday, ending on Friday.
- 2016, // Starting on Friday, ending on Saturday.
- 2000, // Starting on Saturday, ending on Sunday (special).
- 2012 // Starting on Sunday, ending on Monday.
- ];
+testOp('$dateToString', {date: new Date("1980-12-31T23:59:59Z"), format: "%V-%G"});
+testOp('$dateToString', {date: new Date("1980-12-31T23:59:59Z"), format: "%G-%V"});
- const day = 1;
- years.forEach(function(year) {
- // forEach starts indexing at zero but weekdays start with Monday on 1 so we add +1.
- let newYear = DateUtil.getNewYear(year);
- let endOfFirstWeekInYear = DateUtil.getEndOfFirstWeekInYear(year, day);
- let startOfSecondWeekInYear = DateUtil.getStartOfSecondWeekInYear(year, day);
- let birthday = DateUtil.getBirthday(year);
- let newYearsEve = DateUtil.getNewYearsEve(year);
- let now = new Date();
- now.setYear(year);
- now.setMilliseconds(0);
+const years = [
+ 2002, // Starting and ending on Tuesday.
+ 2014, // Starting and ending on Wednesday.
+ 2015, // Starting and ending on Thursday.
+ 2010, // Starting and ending on Friday.
+ 2011, // Starting and ending on Saturday.
+ 2006, // Starting and ending on Sunday.
+ 1996, // Starting on Monday, ending on Tuesday.
+ 2008, // Starting on Tuesday, ending on Wednesday.
+ 1992, // Starting on Wednesday, ending on Thursday.
+ 2004, // Starting on Thursday, ending on Friday.
+ 2016, // Starting on Friday, ending on Saturday.
+ 2000, // Starting on Saturday, ending on Sunday (special).
+ 2012 // Starting on Sunday, ending on Monday.
+];
- testOp('$isoDayOfWeek', newYear);
- testOp('$isoDayOfWeek', endOfFirstWeekInYear);
- testOp('$isoDayOfWeek', startOfSecondWeekInYear);
- testOp('$isoWeekYear', birthday);
+const day = 1;
+years.forEach(function(year) {
+ // forEach starts indexing at zero but weekdays start with Monday on 1 so we add +1.
+ let newYear = DateUtil.getNewYear(year);
+ let endOfFirstWeekInYear = DateUtil.getEndOfFirstWeekInYear(year, day);
+ let startOfSecondWeekInYear = DateUtil.getStartOfSecondWeekInYear(year, day);
+ let birthday = DateUtil.getBirthday(year);
+ let newYearsEve = DateUtil.getNewYearsEve(year);
+ let now = new Date();
+ now.setYear(year);
+ now.setMilliseconds(0);
- testOp('$isoWeek', newYear);
- testOp('$isoWeek', now);
- testOp('$isoWeekYear', newYear);
- testOp('$isoWeek', endOfFirstWeekInYear);
- testOp('$dateToString', {format: '%G-W%V-%u', date: newYear});
- testOp('$isoWeek', endOfFirstWeekInYear);
- testOp('$year', endOfFirstWeekInYear);
- testOp('$month', endOfFirstWeekInYear);
- testOp('$dayOfMonth', endOfFirstWeekInYear);
- testOp('$dayOfWeek', birthday);
- testOp('$dayOfWeek', newYearsEve);
- testOp('$minute', newYearsEve);
- testOp('$second', now);
- testOp('$millisecond', newYear);
+ testOp('$isoDayOfWeek', newYear);
+ testOp('$isoDayOfWeek', endOfFirstWeekInYear);
+ testOp('$isoDayOfWeek', startOfSecondWeekInYear);
+ testOp('$isoWeekYear', birthday);
- });
- assert.eq(testOpCount, 236, 'Expected 236 tests to run');
+ testOp('$isoWeek', newYear);
+ testOp('$isoWeek', now);
+ testOp('$isoWeekYear', newYear);
+ testOp('$isoWeek', endOfFirstWeekInYear);
+ testOp('$dateToString', {format: '%G-W%V-%u', date: newYear});
+ testOp('$isoWeek', endOfFirstWeekInYear);
+ testOp('$year', endOfFirstWeekInYear);
+ testOp('$month', endOfFirstWeekInYear);
+ testOp('$dayOfMonth', endOfFirstWeekInYear);
+ testOp('$dayOfWeek', birthday);
+ testOp('$dayOfWeek', newYearsEve);
+ testOp('$minute', newYearsEve);
+ testOp('$second', now);
+ testOp('$millisecond', newYear);
+});
+assert.eq(testOpCount, 236, 'Expected 236 tests to run');
})();
diff --git a/jstests/aggregation/expressions/reduce.js b/jstests/aggregation/expressions/reduce.js
index 54a66fc8b56..2565b88ed60 100644
--- a/jstests/aggregation/expressions/reduce.js
+++ b/jstests/aggregation/expressions/reduce.js
@@ -3,26 +3,20 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExpression.
(function() {
- "use strict";
+"use strict";
- var coll = db.reduce;
+var coll = db.reduce;
- testExpression(
- coll,
- {
- $reduce:
- {input: [1, 2, 3], initialValue: {$literal: 0}, in : {$sum: ["$$this", "$$value"]}}
- },
- 6);
- testExpression(coll, {$reduce: {input: [], initialValue: {$literal: 0}, in : 10}}, 0);
- testExpression(
- coll,
- {
- $reduce:
- {input: [1, 2, 3], initialValue: [], in : {$concatArrays: ["$$value", ["$$this"]]}}
- },
- [1, 2, 3]);
- testExpression(coll,
+testExpression(
+ coll,
+ {$reduce: {input: [1, 2, 3], initialValue: {$literal: 0}, in : {$sum: ["$$this", "$$value"]}}},
+ 6);
+testExpression(coll, {$reduce: {input: [], initialValue: {$literal: 0}, in : 10}}, 0);
+testExpression(
+ coll,
+ {$reduce: {input: [1, 2, 3], initialValue: [], in : {$concatArrays: ["$$value", ["$$this"]]}}},
+ [1, 2, 3]);
+testExpression(coll,
{
$reduce: {
input: [1, 2],
@@ -32,8 +26,8 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
},
[[], []]);
- // A nested $reduce which sums each subarray, then multiplies the results.
- testExpression(coll,
+// A nested $reduce which sums each subarray, then multiplies the results.
+testExpression(coll,
{
$reduce: {
input: [[1, 2, 3], [4, 5]],
@@ -54,9 +48,9 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
},
54);
- // A nested $reduce using a $let to allow the inner $reduce to access the variables of the
- // outer.
- testExpression(coll,
+// A nested $reduce using a $let to allow the inner $reduce to access the variables of the
+// outer.
+testExpression(coll,
{
$reduce: {
input: [[0, 1], [2, 3]],
@@ -84,19 +78,18 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
},
{allElements: [0, 1, 2, 3], sumOfInner: 6});
- // Nullish input produces null as an output.
- testExpression(coll, {$reduce: {input: null, initialValue: {$literal: 0}, in : 5}}, null);
- testExpression(
- coll, {$reduce: {input: "$nonexistent", initialValue: {$literal: 0}, in : 5}}, null);
+// Nullish input produces null as an output.
+testExpression(coll, {$reduce: {input: null, initialValue: {$literal: 0}, in : 5}}, null);
+testExpression(coll, {$reduce: {input: "$nonexistent", initialValue: {$literal: 0}, in : 5}}, null);
- // Error cases for $reduce.
+// Error cases for $reduce.
- // $reduce requires an object.
- var pipeline = {$project: {reduced: {$reduce: 0}}};
- assertErrorCode(coll, pipeline, 40075);
+// $reduce requires an object.
+var pipeline = {$project: {reduced: {$reduce: 0}}};
+assertErrorCode(coll, pipeline, 40075);
- // Unknown field specified.
- pipeline = {
+// Unknown field specified.
+pipeline = {
$project: {
reduced: {
$reduce: {
@@ -108,27 +101,35 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
}
}
};
- assertErrorCode(coll, pipeline, 40076);
+assertErrorCode(coll, pipeline, 40076);
- // $reduce requires input to be specified.
- pipeline = {$project: {reduced: {$reduce: {initialValue: {$literal: 0}, in : {$literal: 0}}}}};
- assertErrorCode(coll, pipeline, 40077);
+// $reduce requires input to be specified.
+pipeline = {
+ $project: {reduced: {$reduce: {initialValue: {$literal: 0}, in : {$literal: 0}}}}
+};
+assertErrorCode(coll, pipeline, 40077);
- // $reduce requires initialValue to be specified.
- pipeline = {$project: {reduced: {$reduce: {input: {$literal: 0}, in : {$literal: 0}}}}};
- assertErrorCode(coll, pipeline, 40078);
+// $reduce requires initialValue to be specified.
+pipeline = {
+ $project: {reduced: {$reduce: {input: {$literal: 0}, in : {$literal: 0}}}}
+};
+assertErrorCode(coll, pipeline, 40078);
- // $reduce requires in to be specified.
- pipeline = {
- $project: {reduced: {$reduce: {input: {$literal: 0}, initialValue: {$literal: 0}}}}
- };
- assertErrorCode(coll, pipeline, 40079);
+// $reduce requires in to be specified.
+pipeline = {
+ $project: {reduced: {$reduce: {input: {$literal: 0}, initialValue: {$literal: 0}}}}
+};
+assertErrorCode(coll, pipeline, 40079);
- // $$value is undefined in the non-'in' arguments of $reduce.
- pipeline = {$project: {reduced: {$reduce: {input: "$$value", initialValue: [], in : []}}}};
- assertErrorCode(coll, pipeline, 17276);
+// $$value is undefined in the non-'in' arguments of $reduce.
+pipeline = {
+ $project: {reduced: {$reduce: {input: "$$value", initialValue: [], in : []}}}
+};
+assertErrorCode(coll, pipeline, 17276);
- // $$this is undefined in the non-'in' arguments of $reduce.
- pipeline = {$project: {reduced: {$reduce: {input: "$$this", initialValue: [], in : []}}}};
- assertErrorCode(coll, pipeline, 17276);
+// $$this is undefined in the non-'in' arguments of $reduce.
+pipeline = {
+ $project: {reduced: {$reduce: {input: "$$this", initialValue: [], in : []}}}
+};
+assertErrorCode(coll, pipeline, 17276);
}());
diff --git a/jstests/aggregation/expressions/regex.js b/jstests/aggregation/expressions/regex.js
index 2713828e5c6..bf128ec04cc 100644
--- a/jstests/aggregation/expressions/regex.js
+++ b/jstests/aggregation/expressions/regex.js
@@ -2,509 +2,490 @@
* Tests for $regexFind, $regexFindAll and $regexMatch aggregation expressions.
*/
(function() {
- 'use strict';
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode().
- const coll = db.regex_find_expr;
- coll.drop();
+'use strict';
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode().
+const coll = db.regex_find_expr;
+coll.drop();
- function testRegex(expression, inputObj, expectedOutput) {
- const result =
- coll.aggregate([
- {"$project": {_id: 0, "matches": {[expression]: inputObj}}},
- {"$sort": {"matches": 1}} // Sort to ensure the documents are returned in a
- // deterministic order for sharded clusters.
- ])
- .toArray();
- assert.eq(result, expectedOutput);
- }
- function testRegexForKey(expression, key, inputObj, expectedMatchObj) {
- const result =
- coll.aggregate(
- [{"$match": {"_id": key}}, {"$project": {"matches": {[expression]: inputObj}}}])
- .toArray();
- const expectedOutput = [{"_id": key, "matches": expectedMatchObj}];
- assert.eq(result, expectedOutput);
- }
+function testRegex(expression, inputObj, expectedOutput) {
+ const result =
+ coll.aggregate([
+ {"$project": {_id: 0, "matches": {[expression]: inputObj}}},
+ {"$sort": {"matches": 1}} // Sort to ensure the documents are returned in a
+ // deterministic order for sharded clusters.
+ ])
+ .toArray();
+ assert.eq(result, expectedOutput);
+}
+function testRegexForKey(expression, key, inputObj, expectedMatchObj) {
+ const result =
+ coll.aggregate(
+ [{"$match": {"_id": key}}, {"$project": {"matches": {[expression]: inputObj}}}])
+ .toArray();
+ const expectedOutput = [{"_id": key, "matches": expectedMatchObj}];
+ assert.eq(result, expectedOutput);
+}
- /**
- * This function validates the output against $regexFind, $regexFindAll and $regexMatch
- * expressions.
- */
- function testRegexFindAgg(inputObj, expectedOutputForFindAll) {
- testRegex("$regexFindAll", inputObj, expectedOutputForFindAll);
- // For each of the output document, get first element from "matches" array. This will
- // convert 'regexFindAll' output to 'regexFind' output.
- const expectedOutputForFind = expectedOutputForFindAll.map(
- (element) => ({matches: element.matches.length == 0 ? null : element.matches[0]}));
- testRegex("$regexFind", inputObj, expectedOutputForFind);
+/**
+ * This function validates the output against $regexFind, $regexFindAll and $regexMatch
+ * expressions.
+ */
+function testRegexFindAgg(inputObj, expectedOutputForFindAll) {
+ testRegex("$regexFindAll", inputObj, expectedOutputForFindAll);
+ // For each of the output document, get first element from "matches" array. This will
+ // convert 'regexFindAll' output to 'regexFind' output.
+ const expectedOutputForFind = expectedOutputForFindAll.map(
+ (element) => ({matches: element.matches.length == 0 ? null : element.matches[0]}));
+ testRegex("$regexFind", inputObj, expectedOutputForFind);
- // For each of the output document, if there is at least one element in the array, then
- // there is a match.
- const expectedOutputForMatch =
- expectedOutputForFindAll.map((element) => ({matches: element.matches.length != 0}));
- testRegex("$regexMatch", inputObj, expectedOutputForMatch);
- }
+ // For each of the output document, if there is at least one element in the array, then
+ // there is a match.
+ const expectedOutputForMatch =
+ expectedOutputForFindAll.map((element) => ({matches: element.matches.length != 0}));
+ testRegex("$regexMatch", inputObj, expectedOutputForMatch);
+}
- /**
- * This function validates the output against $regexFind, $regexFindAll and $regexMatch
- * expressions.
- */
- function testRegexFindAggForKey(key, inputObj, expectedOutputForFindAll) {
- testRegexForKey("$regexFindAll", key, inputObj, expectedOutputForFindAll);
+/**
+ * This function validates the output against $regexFind, $regexFindAll and $regexMatch
+ * expressions.
+ */
+function testRegexFindAggForKey(key, inputObj, expectedOutputForFindAll) {
+ testRegexForKey("$regexFindAll", key, inputObj, expectedOutputForFindAll);
- const expectedOutputForFind =
- expectedOutputForFindAll.length == 0 ? null : expectedOutputForFindAll[0];
- testRegexForKey("$regexFind", key, inputObj, expectedOutputForFind);
+ const expectedOutputForFind =
+ expectedOutputForFindAll.length == 0 ? null : expectedOutputForFindAll[0];
+ testRegexForKey("$regexFind", key, inputObj, expectedOutputForFind);
- const expectedOutputForMatch = expectedOutputForFindAll.length != 0;
- testRegexForKey("$regexMatch", key, inputObj, expectedOutputForMatch);
- }
+ const expectedOutputForMatch = expectedOutputForFindAll.length != 0;
+ testRegexForKey("$regexMatch", key, inputObj, expectedOutputForMatch);
+}
- /**
- * This function validates the output against $regexFind, $regexFindAll and $regexMatch
- * expressions.
- */
- function testRegexAggException(inputObj, exceptionCode) {
- assertErrorCode(
- coll, [{"$project": {"matches": {"$regexFindAll": inputObj}}}], exceptionCode);
- assertErrorCode(coll, [{"$project": {"matches": {"$regexFind": inputObj}}}], exceptionCode);
- assertErrorCode(
- coll, [{"$project": {"matches": {"$regexMatch": inputObj}}}], exceptionCode);
- }
+/**
+ * This function validates the output against $regexFind, $regexFindAll and $regexMatch
+ * expressions.
+ */
+function testRegexAggException(inputObj, exceptionCode) {
+ assertErrorCode(coll, [{"$project": {"matches": {"$regexFindAll": inputObj}}}], exceptionCode);
+ assertErrorCode(coll, [{"$project": {"matches": {"$regexFind": inputObj}}}], exceptionCode);
+ assertErrorCode(coll, [{"$project": {"matches": {"$regexMatch": inputObj}}}], exceptionCode);
+}
- (function testWithSingleMatch() {
- // Regex in string notation, find with multiple captures and matches.
- assert.commandWorked(coll.insert({_id: 0, text: "Simple Example "}));
- testRegexFindAggForKey(0, {input: "$text", regex: "(m(p))"}, [
- {"match": "mp", "idx": 2, "captures": ["mp", "p"]},
- {"match": "mp", "idx": 10, "captures": ["mp", "p"]}
- ]);
- // Regex in json syntax, with multiple captures and matches.
- testRegexFindAggForKey(0, {input: "$text", regex: /(m(p))/}, [
- {"match": "mp", "idx": 2, "captures": ["mp", "p"]},
- {"match": "mp", "idx": 10, "captures": ["mp", "p"]}
- ]);
- // Verify no overlapping match sub-strings.
- assert.commandWorked(coll.insert({_id: 112, text: "aaaaa aaaa"}));
- testRegexFindAggForKey(112, {input: "$text", regex: /(aa)/}, [
- {"match": "aa", "idx": 0, "captures": ["aa"]},
- {"match": "aa", "idx": 2, "captures": ["aa"]},
- {"match": "aa", "idx": 6, "captures": ["aa"]},
- {"match": "aa", "idx": 8, "captures": ["aa"]}
- ]);
- testRegexFindAggForKey(112, {input: "$text", regex: /(aa)+/}, [
- {"match": "aaaa", "idx": 0, "captures": ["aa"]},
- {"match": "aaaa", "idx": 6, "captures": ["aa"]}
- ]);
- // Verify greedy match.
- testRegexFindAggForKey(112, {input: "$text", regex: /(a+)/}, [
- {"match": "aaaaa", "idx": 0, "captures": ["aaaaa"]},
- {"match": "aaaa", "idx": 6, "captures": ["aaaa"]},
- ]);
- testRegexFindAggForKey(112, {input: "$text", regex: /(a)+/}, [
- {"match": "aaaaa", "idx": 0, "captures": ["a"]},
- {"match": "aaaa", "idx": 6, "captures": ["a"]},
- ]);
- // Verify lazy match.
- assert.commandWorked(coll.insert({_id: 113, text: "aaa aa"}));
- testRegexFindAggForKey(113, {input: "$text", regex: /(a+?)/}, [
- {"match": "a", "idx": 0, "captures": ["a"]},
- {"match": "a", "idx": 1, "captures": ["a"]},
- {"match": "a", "idx": 2, "captures": ["a"]},
- {"match": "a", "idx": 4, "captures": ["a"]},
- {"match": "a", "idx": 5, "captures": ["a"]}
- ]);
- testRegexFindAggForKey(113, {input: "$text", regex: /(a*?)/}, [
- {"match": "", "idx": 0, "captures": [""]},
- {"match": "", "idx": 1, "captures": [""]},
- {"match": "", "idx": 2, "captures": [""]},
- {"match": "", "idx": 3, "captures": [""]},
- {"match": "", "idx": 4, "captures": [""]},
- {"match": "", "idx": 5, "captures": [""]}
- ]);
+(function testWithSingleMatch() {
+ // Regex in string notation, find with multiple captures and matches.
+ assert.commandWorked(coll.insert({_id: 0, text: "Simple Example "}));
+ testRegexFindAggForKey(0, {input: "$text", regex: "(m(p))"}, [
+ {"match": "mp", "idx": 2, "captures": ["mp", "p"]},
+ {"match": "mp", "idx": 10, "captures": ["mp", "p"]}
+ ]);
+ // Regex in json syntax, with multiple captures and matches.
+ testRegexFindAggForKey(0, {input: "$text", regex: /(m(p))/}, [
+ {"match": "mp", "idx": 2, "captures": ["mp", "p"]},
+ {"match": "mp", "idx": 10, "captures": ["mp", "p"]}
+ ]);
+ // Verify no overlapping match sub-strings.
+ assert.commandWorked(coll.insert({_id: 112, text: "aaaaa aaaa"}));
+ testRegexFindAggForKey(112, {input: "$text", regex: /(aa)/}, [
+ {"match": "aa", "idx": 0, "captures": ["aa"]},
+ {"match": "aa", "idx": 2, "captures": ["aa"]},
+ {"match": "aa", "idx": 6, "captures": ["aa"]},
+ {"match": "aa", "idx": 8, "captures": ["aa"]}
+ ]);
+ testRegexFindAggForKey(112, {input: "$text", regex: /(aa)+/}, [
+ {"match": "aaaa", "idx": 0, "captures": ["aa"]},
+ {"match": "aaaa", "idx": 6, "captures": ["aa"]}
+ ]);
+ // Verify greedy match.
+ testRegexFindAggForKey(112, {input: "$text", regex: /(a+)/}, [
+ {"match": "aaaaa", "idx": 0, "captures": ["aaaaa"]},
+ {"match": "aaaa", "idx": 6, "captures": ["aaaa"]},
+ ]);
+ testRegexFindAggForKey(112, {input: "$text", regex: /(a)+/}, [
+ {"match": "aaaaa", "idx": 0, "captures": ["a"]},
+ {"match": "aaaa", "idx": 6, "captures": ["a"]},
+ ]);
+ // Verify lazy match.
+ assert.commandWorked(coll.insert({_id: 113, text: "aaa aa"}));
+ testRegexFindAggForKey(113, {input: "$text", regex: /(a+?)/}, [
+ {"match": "a", "idx": 0, "captures": ["a"]},
+ {"match": "a", "idx": 1, "captures": ["a"]},
+ {"match": "a", "idx": 2, "captures": ["a"]},
+ {"match": "a", "idx": 4, "captures": ["a"]},
+ {"match": "a", "idx": 5, "captures": ["a"]}
+ ]);
+ testRegexFindAggForKey(113, {input: "$text", regex: /(a*?)/}, [
+ {"match": "", "idx": 0, "captures": [""]},
+ {"match": "", "idx": 1, "captures": [""]},
+ {"match": "", "idx": 2, "captures": [""]},
+ {"match": "", "idx": 3, "captures": [""]},
+ {"match": "", "idx": 4, "captures": [""]},
+ {"match": "", "idx": 5, "captures": [""]}
+ ]);
- // Regex string groups within group.
- testRegexFindAggForKey(
- 0,
- {input: "$text", regex: "((S)(i)(m)(p)(l)(e))"},
- [{"match": "Simple", "idx": 0, "captures": ["Simple", "S", "i", "m", "p", "l", "e"]}]);
- testRegexFindAggForKey(
- 0,
- {input: "$text", regex: "(S)(i)(m)((p)(l)(e))"},
- [{"match": "Simple", "idx": 0, "captures": ["S", "i", "m", "ple", "p", "l", "e"]}]);
+ // Regex string groups within group.
+ testRegexFindAggForKey(
+ 0,
+ {input: "$text", regex: "((S)(i)(m)(p)(l)(e))"},
+ [{"match": "Simple", "idx": 0, "captures": ["Simple", "S", "i", "m", "p", "l", "e"]}]);
+ testRegexFindAggForKey(
+ 0,
+ {input: "$text", regex: "(S)(i)(m)((p)(l)(e))"},
+ [{"match": "Simple", "idx": 0, "captures": ["S", "i", "m", "ple", "p", "l", "e"]}]);
- // Regex email pattern.
- assert.commandWorked(
- coll.insert({_id: 1, text: "Some field text with email mongo@mongodb.com"}));
- testRegexFindAggForKey(
- 1,
- {input: "$text", regex: "([a-zA-Z0-9._-]+)@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+"},
- [{"match": "mongo@mongodb.com", "idx": 27, "captures": ["mongo"]}]);
+ // Regex email pattern.
+ assert.commandWorked(
+ coll.insert({_id: 1, text: "Some field text with email mongo@mongodb.com"}));
+ testRegexFindAggForKey(
+ 1,
+ {input: "$text", regex: "([a-zA-Z0-9._-]+)@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+"},
+ [{"match": "mongo@mongodb.com", "idx": 27, "captures": ["mongo"]}]);
- // Regex digits.
- assert.commandWorked(coll.insert({_id: 5, text: "Text with 02 digits"}));
- testRegexFindAggForKey(
- 5, {input: "$text", regex: /[0-9]+/}, [{"match": "02", "idx": 10, "captures": []}]);
- testRegexFindAggForKey(
- 5, {input: "$text", regex: /(\d+)/}, [{"match": "02", "idx": 10, "captures": ["02"]}]);
+ // Regex digits.
+ assert.commandWorked(coll.insert({_id: 5, text: "Text with 02 digits"}));
+ testRegexFindAggForKey(
+ 5, {input: "$text", regex: /[0-9]+/}, [{"match": "02", "idx": 10, "captures": []}]);
+ testRegexFindAggForKey(
+ 5, {input: "$text", regex: /(\d+)/}, [{"match": "02", "idx": 10, "captures": ["02"]}]);
- // Regex a non-capture group.
- assert.commandWorked(coll.insert({_id: 6, text: "1,2,3,4,5,6,7,8,9,10"}));
- testRegexFindAggForKey(6,
- {input: "$text", regex: /^(?:1|a)\,([0-9]+)/},
- [{"match": "1,2", "idx": 0, "captures": ["2"]}]);
+ // Regex a non-capture group.
+ assert.commandWorked(coll.insert({_id: 6, text: "1,2,3,4,5,6,7,8,9,10"}));
+ testRegexFindAggForKey(6,
+ {input: "$text", regex: /^(?:1|a)\,([0-9]+)/},
+ [{"match": "1,2", "idx": 0, "captures": ["2"]}]);
- // Regex quantifier.
- assert.commandWorked(coll.insert({_id: 7, text: "abc12defgh345jklm"}));
- testRegexFindAggForKey(
- 7, {input: "$text", regex: /[0-9]{3}/}, [{"match": "345", "idx": 10, "captures": []}]);
+ // Regex quantifier.
+ assert.commandWorked(coll.insert({_id: 7, text: "abc12defgh345jklm"}));
+ testRegexFindAggForKey(
+ 7, {input: "$text", regex: /[0-9]{3}/}, [{"match": "345", "idx": 10, "captures": []}]);
- // Regex case insensitive option.
- assert.commandWorked(coll.insert({_id: 8, text: "This Is Camel Case"}));
- testRegexFindAggForKey(8, {input: "$text", regex: /camel/}, []);
- testRegexFindAggForKey(
- 8, {input: "$text", regex: /camel/i}, [{"match": "Camel", "idx": 8, "captures": []}]);
- testRegexFindAggForKey(8,
- {input: "$text", regex: /camel/, options: "i"},
- [{"match": "Camel", "idx": 8, "captures": []}]);
- testRegexFindAggForKey(8,
- {input: "$text", regex: "camel", options: "i"},
- [{"match": "Camel", "idx": 8, "captures": []}]);
+ // Regex case insensitive option.
+ assert.commandWorked(coll.insert({_id: 8, text: "This Is Camel Case"}));
+ testRegexFindAggForKey(8, {input: "$text", regex: /camel/}, []);
+ testRegexFindAggForKey(
+ 8, {input: "$text", regex: /camel/i}, [{"match": "Camel", "idx": 8, "captures": []}]);
+ testRegexFindAggForKey(8,
+ {input: "$text", regex: /camel/, options: "i"},
+ [{"match": "Camel", "idx": 8, "captures": []}]);
+ testRegexFindAggForKey(8,
+ {input: "$text", regex: "camel", options: "i"},
+ [{"match": "Camel", "idx": 8, "captures": []}]);
- // Regex multi line option.
- assert.commandWorked(coll.insert({_id: 9, text: "Foo line1\nFoo line2\nFoo line3"}));
- // Verify no match with options flag off.
- testRegexFindAggForKey(9, {input: "$text", regex: /^Foo line\d$/}, []);
- // Verify match when flag is on.
- testRegexFindAggForKey(9, {input: "$text", regex: /(^Foo line\d$)/m}, [
- {"match": "Foo line1", "idx": 0, "captures": ["Foo line1"]},
- {"match": "Foo line2", "idx": 10, "captures": ["Foo line2"]},
- {"match": "Foo line3", "idx": 20, "captures": ["Foo line3"]}
- ]);
+ // Regex multi line option.
+ assert.commandWorked(coll.insert({_id: 9, text: "Foo line1\nFoo line2\nFoo line3"}));
+ // Verify no match with options flag off.
+ testRegexFindAggForKey(9, {input: "$text", regex: /^Foo line\d$/}, []);
+ // Verify match when flag is on.
+ testRegexFindAggForKey(9, {input: "$text", regex: /(^Foo line\d$)/m}, [
+ {"match": "Foo line1", "idx": 0, "captures": ["Foo line1"]},
+ {"match": "Foo line2", "idx": 10, "captures": ["Foo line2"]},
+ {"match": "Foo line3", "idx": 20, "captures": ["Foo line3"]}
+ ]);
- // Regex single line option.
- testRegexFindAggForKey(9, {input: "$text", regex: "Foo.*line"}, [
- {"match": "Foo line", "idx": 0, "captures": []},
- {"match": "Foo line", "idx": 10, "captures": []},
- {"match": "Foo line", "idx": 20, "captures": []}
- ]);
- testRegexFindAggForKey(
- 9,
- {input: "$text", regex: "Foo.*line", options: "s"},
- [{"match": "Foo line1\nFoo line2\nFoo line", "idx": 0, "captures": []}]);
+ // Regex single line option.
+ testRegexFindAggForKey(9, {input: "$text", regex: "Foo.*line"}, [
+ {"match": "Foo line", "idx": 0, "captures": []},
+ {"match": "Foo line", "idx": 10, "captures": []},
+ {"match": "Foo line", "idx": 20, "captures": []}
+ ]);
+ testRegexFindAggForKey(9,
+ {input: "$text", regex: "Foo.*line", options: "s"},
+ [{"match": "Foo line1\nFoo line2\nFoo line", "idx": 0, "captures": []}]);
- // Regex extended option.
- testRegexFindAggForKey(9, {input: "$text", regex: "F o o # a comment"}, []);
- testRegexFindAggForKey(9, {input: "$text", regex: "F o o # a comment", options: "x"}, [
+ // Regex extended option.
+ testRegexFindAggForKey(9, {input: "$text", regex: "F o o # a comment"}, []);
+ testRegexFindAggForKey(9, {input: "$text", regex: "F o o # a comment", options: "x"}, [
+ {"match": "Foo", "idx": 0, "captures": []},
+ {"match": "Foo", "idx": 10, "captures": []},
+ {"match": "Foo", "idx": 20, "captures": []}
+ ]);
+ testRegexFindAggForKey(
+ 9, {input: "$text", regex: "F o o # a comment \n\n# ignored", options: "x"}, [
{"match": "Foo", "idx": 0, "captures": []},
{"match": "Foo", "idx": 10, "captures": []},
{"match": "Foo", "idx": 20, "captures": []}
]);
- testRegexFindAggForKey(
- 9, {input: "$text", regex: "F o o # a comment \n\n# ignored", options: "x"}, [
- {"match": "Foo", "idx": 0, "captures": []},
- {"match": "Foo", "idx": 10, "captures": []},
- {"match": "Foo", "idx": 20, "captures": []}
- ]);
- testRegexFindAggForKey(9, {input: "$text", regex: "(F o o) # a comment", options: "x"}, [
- {"match": "Foo", "idx": 0, "captures": ["Foo"]},
- {"match": "Foo", "idx": 10, "captures": ["Foo"]},
- {"match": "Foo", "idx": 20, "captures": ["Foo"]}
- ]);
+ testRegexFindAggForKey(9, {input: "$text", regex: "(F o o) # a comment", options: "x"}, [
+ {"match": "Foo", "idx": 0, "captures": ["Foo"]},
+ {"match": "Foo", "idx": 10, "captures": ["Foo"]},
+ {"match": "Foo", "idx": 20, "captures": ["Foo"]}
+ ]);
- // Regex pattern from a document field value.
- assert.commandWorked(
- coll.insert({_id: 10, text: "Simple Value Example", pattern: "(m(p))"}));
- testRegexFindAggForKey(10, {input: "$text", regex: "$pattern"}, [
- {"match": "mp", "idx": 2, "captures": ["mp", "p"]},
- {"match": "mp", "idx": 16, "captures": ["mp", "p"]}
- ]);
- assert.commandWorked(coll.insert({_id: 11, text: "OtherText", pattern: /(T(e))xt$/}));
- testRegexFindAggForKey(11,
- {input: "$text", regex: "$pattern"},
- [{"match": "Text", "idx": 5, "captures": ["Te", "e"]}]);
+ // Regex pattern from a document field value.
+ assert.commandWorked(coll.insert({_id: 10, text: "Simple Value Example", pattern: "(m(p))"}));
+ testRegexFindAggForKey(10, {input: "$text", regex: "$pattern"}, [
+ {"match": "mp", "idx": 2, "captures": ["mp", "p"]},
+ {"match": "mp", "idx": 16, "captures": ["mp", "p"]}
+ ]);
+ assert.commandWorked(coll.insert({_id: 11, text: "OtherText", pattern: /(T(e))xt$/}));
+ testRegexFindAggForKey(11,
+ {input: "$text", regex: "$pattern"},
+ [{"match": "Text", "idx": 5, "captures": ["Te", "e"]}]);
- // Empty input matches empty regex.
- testRegexFindAggForKey(
- 0, {input: "", regex: ""}, [{"match": "", "idx": 0, "captures": []}]);
- // Empty captures groups.
- testRegexFindAggForKey(0, {input: "bbbb", regex: "()"}, [
- {"match": "", "idx": 0, "captures": [""]},
- {"match": "", "idx": 1, "captures": [""]},
- {"match": "", "idx": 2, "captures": [""]},
- {"match": "", "idx": 3, "captures": [""]}
- ]);
- // No matches.
- testRegexFindAggForKey(0, {input: "$text", regex: /foo/}, []);
- // Regex null.
- testRegexFindAggForKey(0, {input: "$text", regex: null}, []);
- // Input null.
- testRegexFindAggForKey(0, {input: null, regex: /valid/}, []);
- // Both null.
- testRegexFindAggForKey(0, {input: null, regex: null}, []);
- testRegexFindAggForKey(
- 0, {input: "$missingField", regex: "$missingField", options: "i"}, []);
- testRegexFindAggForKey(0, {input: "$missingField", regex: "$$REMOVE", options: "i"}, []);
- })();
+ // Empty input matches empty regex.
+ testRegexFindAggForKey(0, {input: "", regex: ""}, [{"match": "", "idx": 0, "captures": []}]);
+ // Empty captures groups.
+ testRegexFindAggForKey(0, {input: "bbbb", regex: "()"}, [
+ {"match": "", "idx": 0, "captures": [""]},
+ {"match": "", "idx": 1, "captures": [""]},
+ {"match": "", "idx": 2, "captures": [""]},
+ {"match": "", "idx": 3, "captures": [""]}
+ ]);
+ // No matches.
+ testRegexFindAggForKey(0, {input: "$text", regex: /foo/}, []);
+ // Regex null.
+ testRegexFindAggForKey(0, {input: "$text", regex: null}, []);
+ // Input null.
+ testRegexFindAggForKey(0, {input: null, regex: /valid/}, []);
+ // Both null.
+ testRegexFindAggForKey(0, {input: null, regex: null}, []);
+ testRegexFindAggForKey(0, {input: "$missingField", regex: "$missingField", options: "i"}, []);
+ testRegexFindAggForKey(0, {input: "$missingField", regex: "$$REMOVE", options: "i"}, []);
+})();
- (function testWithStartOptions() {
- coll.drop();
- assert.commandWorked(coll.insert({_id: 2, text: "cafétéria"}));
- assert.commandWorked(coll.insert({_id: 3, text: "ab\ncd"}));
+(function testWithStartOptions() {
+ coll.drop();
+ assert.commandWorked(coll.insert({_id: 2, text: "cafétéria"}));
+ assert.commandWorked(coll.insert({_id: 3, text: "ab\ncd"}));
- // LIMIT_MATCH option to limit the number of comparisons PCRE does internally.
- testRegexAggException({input: "$text", regex: "(*LIMIT_MATCH=1)fé"}, 51156);
- testRegexFindAggForKey(2,
- {input: "$text", regex: "(*LIMIT_MATCH=3)(fé)"},
- [{"match": "fé", "idx": 2, "captures": ["fé"]}]);
+ // LIMIT_MATCH option to limit the number of comparisons PCRE does internally.
+ testRegexAggException({input: "$text", regex: "(*LIMIT_MATCH=1)fé"}, 51156);
+ testRegexFindAggForKey(2,
+ {input: "$text", regex: "(*LIMIT_MATCH=3)(fé)"},
+ [{"match": "fé", "idx": 2, "captures": ["fé"]}]);
- // (*LF) would change the feed system to UNIX like and (*CR) to windows like. So '\n' would
- // match '.' with CR but not LF.
- testRegexFindAggForKey(3, {input: "$text", regex: "(*LF)ab.cd"}, []);
- testRegexFindAggForKey(3,
- {input: "$text", regex: "(*CR)ab.cd"},
- [{"match": "ab\ncd", "idx": 0, "captures": []}]);
+ // (*LF) would change the feed system to UNIX like and (*CR) to windows like. So '\n' would
+ // match '.' with CR but not LF.
+ testRegexFindAggForKey(3, {input: "$text", regex: "(*LF)ab.cd"}, []);
+ testRegexFindAggForKey(
+ 3, {input: "$text", regex: "(*CR)ab.cd"}, [{"match": "ab\ncd", "idx": 0, "captures": []}]);
- // Multiple start options.
- testRegexFindAggForKey(2,
- {input: "$text", regex: String.raw `(*LIMIT_MATCH=5)(*UCP)^(\w+)`},
- [{"match": "cafétéria", "idx": 0, "captures": ["cafétéria"]}]);
- testRegexAggException({input: "$text", regex: String.raw `(*LIMIT_MATCH=1)(*UCP)^(\w+)`},
- 51156);
- })();
+ // Multiple start options.
+ testRegexFindAggForKey(2,
+ {input: "$text", regex: String.raw`(*LIMIT_MATCH=5)(*UCP)^(\w+)`},
+ [{"match": "cafétéria", "idx": 0, "captures": ["cafétéria"]}]);
+ testRegexAggException({input: "$text", regex: String.raw`(*LIMIT_MATCH=1)(*UCP)^(\w+)`}, 51156);
+})();
- (function testWithUnicodeData() {
- coll.drop();
- // Unicode index counting.
- assert.commandWorked(coll.insert({_id: 2, text: "cafétéria"}));
- assert.commandWorked(coll.insert({_id: 3, text: "मा०गो डीबि"}));
- testRegexFindAggForKey(
- 2, {input: "$text", regex: "té"}, [{"match": "té", "idx": 4, "captures": []}]);
- testRegexFindAggForKey(
- 3, {input: "$text", regex: /म/}, [{"match": "म", "idx": 0, "captures": []}]);
- // Unicode with capture group.
- testRegexFindAggForKey(3,
- {input: "$text", regex: /(गो )/},
- [{"match": "गो ", "idx": 3, "captures": ["गो "]}]);
- // Test that regexes support Unicode character properties.
- testRegexFindAggForKey(2, {input: "$text", regex: String.raw `\p{Hangul}`}, []);
- testRegexFindAggForKey(2,
- {input: "$text", regex: String.raw `\p{Latin}+$`},
- [{"match": "cafétéria", "idx": 0, "captures": []}]);
- // Test that the (*UTF) and (*UTF8) options are accepted for unicode characters.
- assert.commandWorked(coll.insert({_id: 12, text: "༢༣༤༤༤༥12༥A"}));
- testRegexFindAggForKey(12, {input: "$text", regex: "(*UTF8)༤"}, [
- {"match": "༤", "idx": 2, "captures": []},
- {"match": "༤", "idx": 3, "captures": []},
- {"match": "༤", "idx": 4, "captures": []}
- ]);
- testRegexFindAggForKey(12, {input: "$text", regex: "(*UTF)༤"}, [
- {"match": "༤", "idx": 2, "captures": []},
- {"match": "༤", "idx": 3, "captures": []},
- {"match": "༤", "idx": 4, "captures": []}
- ]);
- // For ASCII characters.
- assert.commandWorked(coll.insert({_id: 4, text: "123444"}));
- testRegexFindAggForKey(4,
- {input: "$text", regex: "(*UTF8)(44)"},
- [{"match": "44", "idx": 3, "captures": ["44"]}]);
- testRegexFindAggForKey(4,
- {input: "$text", regex: "(*UTF)(44)"},
- [{"match": "44", "idx": 3, "captures": ["44"]}]);
+(function testWithUnicodeData() {
+ coll.drop();
+ // Unicode index counting.
+ assert.commandWorked(coll.insert({_id: 2, text: "cafétéria"}));
+ assert.commandWorked(coll.insert({_id: 3, text: "मा०गो डीबि"}));
+ testRegexFindAggForKey(
+ 2, {input: "$text", regex: "té"}, [{"match": "té", "idx": 4, "captures": []}]);
+ testRegexFindAggForKey(
+ 3, {input: "$text", regex: /म/}, [{"match": "म", "idx": 0, "captures": []}]);
+ // Unicode with capture group.
+ testRegexFindAggForKey(
+ 3, {input: "$text", regex: /(गो )/}, [{"match": "गो ", "idx": 3, "captures": ["गो "]}]);
+ // Test that regexes support Unicode character properties.
+ testRegexFindAggForKey(2, {input: "$text", regex: String.raw`\p{Hangul}`}, []);
+ testRegexFindAggForKey(2,
+ {input: "$text", regex: String.raw`\p{Latin}+$`},
+ [{"match": "cafétéria", "idx": 0, "captures": []}]);
+ // Test that the (*UTF) and (*UTF8) options are accepted for unicode characters.
+ assert.commandWorked(coll.insert({_id: 12, text: "༢༣༤༤༤༥12༥A"}));
+ testRegexFindAggForKey(12, {input: "$text", regex: "(*UTF8)༤"}, [
+ {"match": "༤", "idx": 2, "captures": []},
+ {"match": "༤", "idx": 3, "captures": []},
+ {"match": "༤", "idx": 4, "captures": []}
+ ]);
+ testRegexFindAggForKey(12, {input: "$text", regex: "(*UTF)༤"}, [
+ {"match": "༤", "idx": 2, "captures": []},
+ {"match": "༤", "idx": 3, "captures": []},
+ {"match": "༤", "idx": 4, "captures": []}
+ ]);
+ // For ASCII characters.
+ assert.commandWorked(coll.insert({_id: 4, text: "123444"}));
+ testRegexFindAggForKey(
+ 4, {input: "$text", regex: "(*UTF8)(44)"}, [{"match": "44", "idx": 3, "captures": ["44"]}]);
+ testRegexFindAggForKey(
+ 4, {input: "$text", regex: "(*UTF)(44)"}, [{"match": "44", "idx": 3, "captures": ["44"]}]);
- // When the (*UCP) option is specified, Unicode "word" characters are included in the '\w'
- // character type.
- testRegexFindAggForKey(12,
- {input: "$text", regex: String.raw `(*UCP)^(\w+)`},
- [{"match": "༢༣༤༤༤༥12༥A", "idx": 0, "captures": ["༢༣༤༤༤༥12༥A"]}]);
- // When the (*UCP) option is specified, [:digit:] becomes \p{N} and matches all Unicode
- // decimal digit characters.
- testRegexFindAggForKey(12,
- {input: "$text", regex: "(*UCP)^[[:digit:]]+"},
- [{"match": "༢༣༤༤༤༥12༥", "idx": 0, "captures": []}]);
- testRegexFindAggForKey(12, {input: "$text", regex: "(*UCP)[[:digit:]]+$"}, []);
- // When the (*UCP) option is specified, [:alpha:] becomes \p{L} and matches all Unicode
- // alphabetic characters.
- assert.commandWorked(coll.insert({_id: 13, text: "박정수AB"}));
- testRegexFindAggForKey(13,
- {input: "$text", regex: String.raw `(*UCP)^[[:alpha:]]+`},
- [{"match": "박정수AB", "idx": 0, "captures": []}]);
+ // When the (*UCP) option is specified, Unicode "word" characters are included in the '\w'
+ // character type.
+ testRegexFindAggForKey(12,
+ {input: "$text", regex: String.raw`(*UCP)^(\w+)`},
+ [{"match": "༢༣༤༤༤༥12༥A", "idx": 0, "captures": ["༢༣༤༤༤༥12༥A"]}]);
+ // When the (*UCP) option is specified, [:digit:] becomes \p{N} and matches all Unicode
+ // decimal digit characters.
+ testRegexFindAggForKey(12,
+ {input: "$text", regex: "(*UCP)^[[:digit:]]+"},
+ [{"match": "༢༣༤༤༤༥12༥", "idx": 0, "captures": []}]);
+ testRegexFindAggForKey(12, {input: "$text", regex: "(*UCP)[[:digit:]]+$"}, []);
+ // When the (*UCP) option is specified, [:alpha:] becomes \p{L} and matches all Unicode
+ // alphabetic characters.
+ assert.commandWorked(coll.insert({_id: 13, text: "박정수AB"}));
+ testRegexFindAggForKey(13,
+ {input: "$text", regex: String.raw`(*UCP)^[[:alpha:]]+`},
+ [{"match": "박정수AB", "idx": 0, "captures": []}]);
- // No match when options are not set.
- testRegexFindAggForKey(12, {input: "$text", regex: String.raw `^(\w+)`}, []);
- testRegexFindAggForKey(12, {input: "$text", regex: "^[[:digit:]]"}, []);
- testRegexFindAggForKey(2, {input: "$text", regex: "^[[:alpha:]]+$"}, []);
- })();
+ // No match when options are not set.
+ testRegexFindAggForKey(12, {input: "$text", regex: String.raw`^(\w+)`}, []);
+ testRegexFindAggForKey(12, {input: "$text", regex: "^[[:digit:]]"}, []);
+ testRegexFindAggForKey(2, {input: "$text", regex: "^[[:alpha:]]+$"}, []);
+})();
- (function testErrors() {
- coll.drop();
- assert.commandWorked(coll.insert({text: "string"}));
- // Null object.
- testRegexAggException(null, 51103);
- // Incorrect object parameter.
- testRegexAggException("incorrect type", 51103);
- // Test malformed regex.
- testRegexAggException({input: "$text", regex: "[0-9"}, 51111);
- testRegexAggException({regex: "[a-c", input: null}, 51111);
- // Malformed regex because start options not at the beginning.
- testRegexAggException({input: "$text", regex: "^(*UCP)[[:alpha:]]+$"}, 51111);
- testRegexAggException({input: "$text", regex: "((*UCP)[[:alpha:]]+$)"}, 51111);
- // At least one of the 'input' field is not string.
- assert.commandWorked(coll.insert({a: "string"}));
- assert.commandWorked(coll.insert({a: {b: "object"}}));
- testRegexAggException({input: "$a", regex: "valid"}, 51104);
- testRegexAggException({input: "$a", regex: null}, 51104);
- // 'regex' field is not string or regex.
- testRegexAggException({input: "$text", regex: ["incorrect"]}, 51105);
- // 'options' field is not string.
- testRegexAggException({input: "$text", regex: "valid", options: 123}, 51106);
- // Incorrect 'options' flag.
- testRegexAggException({input: "$text", regex: "valid", options: 'a'}, 51108);
- // 'options' are case-sensitive.
- testRegexAggException({input: "$text", regex: "valid", options: "I"}, 51108);
- testRegexAggException({options: "I", regex: null, input: null}, 51108);
- // Options specified in both 'regex' and 'options'.
- testRegexAggException({input: "$text", regex: /(m(p))/i, options: "i"}, 51107);
- testRegexAggException({input: "$text", regex: /(m(p))/i, options: "x"}, 51107);
- testRegexAggException({input: "$text", regex: /(m(p))/m, options: ""}, 51107);
- // 'regex' as string with null characters.
- testRegexAggException({input: "$text", regex: "sasd\0", options: "i"}, 51109);
- testRegexAggException({regex: "sa\x00sd", options: "i", input: null}, 51109);
- // 'options' as string with null characters.
- testRegexAggException({input: "$text", regex: /(m(p))/, options: "i\0"}, 51110);
- testRegexAggException({input: "$text", options: "i\x00", regex: null}, 51110);
- // Invalid parameter.
- testRegexAggException({input: "$text", invalid: "i"}, 31024);
- testRegexAggException({input: "$text", regex: "sa", invalid: "$missingField"}, 31024);
- testRegexAggException({input: "$text", regex: "sa", invalid: null}, 31024);
- testRegexAggException({input: "$text", regex: "sa", invalid: []}, 31024);
- // Regex not present.
- testRegexAggException({input: "$text"}, 31023);
- testRegexAggException({input: "$missingField"}, 31023);
- testRegexAggException({input: "$text", options: "invalid"}, 31023);
- // Input not present.
- testRegexAggException({regex: /valid/}, 31022);
- testRegexAggException({regex: "$missingField"}, 31022);
- testRegexAggException({regex: "[0-9"}, 31022);
- // Empty object.
- testRegexAggException({}, 31022);
- })();
+(function testErrors() {
+ coll.drop();
+ assert.commandWorked(coll.insert({text: "string"}));
+ // Null object.
+ testRegexAggException(null, 51103);
+ // Incorrect object parameter.
+ testRegexAggException("incorrect type", 51103);
+ // Test malformed regex.
+ testRegexAggException({input: "$text", regex: "[0-9"}, 51111);
+ testRegexAggException({regex: "[a-c", input: null}, 51111);
+ // Malformed regex because start options not at the beginning.
+ testRegexAggException({input: "$text", regex: "^(*UCP)[[:alpha:]]+$"}, 51111);
+ testRegexAggException({input: "$text", regex: "((*UCP)[[:alpha:]]+$)"}, 51111);
+ // At least one of the 'input' field is not string.
+ assert.commandWorked(coll.insert({a: "string"}));
+ assert.commandWorked(coll.insert({a: {b: "object"}}));
+ testRegexAggException({input: "$a", regex: "valid"}, 51104);
+ testRegexAggException({input: "$a", regex: null}, 51104);
+ // 'regex' field is not string or regex.
+ testRegexAggException({input: "$text", regex: ["incorrect"]}, 51105);
+ // 'options' field is not string.
+ testRegexAggException({input: "$text", regex: "valid", options: 123}, 51106);
+ // Incorrect 'options' flag.
+ testRegexAggException({input: "$text", regex: "valid", options: 'a'}, 51108);
+ // 'options' are case-sensitive.
+ testRegexAggException({input: "$text", regex: "valid", options: "I"}, 51108);
+ testRegexAggException({options: "I", regex: null, input: null}, 51108);
+ // Options specified in both 'regex' and 'options'.
+ testRegexAggException({input: "$text", regex: /(m(p))/i, options: "i"}, 51107);
+ testRegexAggException({input: "$text", regex: /(m(p))/i, options: "x"}, 51107);
+ testRegexAggException({input: "$text", regex: /(m(p))/m, options: ""}, 51107);
+ // 'regex' as string with null characters.
+ testRegexAggException({input: "$text", regex: "sasd\0", options: "i"}, 51109);
+ testRegexAggException({regex: "sa\x00sd", options: "i", input: null}, 51109);
+ // 'options' as string with null characters.
+ testRegexAggException({input: "$text", regex: /(m(p))/, options: "i\0"}, 51110);
+ testRegexAggException({input: "$text", options: "i\x00", regex: null}, 51110);
+ // Invalid parameter.
+ testRegexAggException({input: "$text", invalid: "i"}, 31024);
+ testRegexAggException({input: "$text", regex: "sa", invalid: "$missingField"}, 31024);
+ testRegexAggException({input: "$text", regex: "sa", invalid: null}, 31024);
+ testRegexAggException({input: "$text", regex: "sa", invalid: []}, 31024);
+ // Regex not present.
+ testRegexAggException({input: "$text"}, 31023);
+ testRegexAggException({input: "$missingField"}, 31023);
+ testRegexAggException({input: "$text", options: "invalid"}, 31023);
+ // Input not present.
+ testRegexAggException({regex: /valid/}, 31022);
+ testRegexAggException({regex: "$missingField"}, 31022);
+ testRegexAggException({regex: "[0-9"}, 31022);
+ // Empty object.
+ testRegexAggException({}, 31022);
+})();
- (function testMultipleMatches() {
- coll.drop();
- assert.commandWorked(coll.insert({a: "string1string2", regex: "(string[1-2])"}));
- assert.commandWorked(coll.insert({a: "string3 string4", regex: "(string[3-4])"}));
- assert.commandWorked(coll.insert({a: "string5 string6", regex: "(string[3-4])"}));
- // All documents match.
- testRegexFindAgg({input: "$a", regex: "(str.*?[0-9])"}, [
- {
- "matches": [
- {"match": "string1", "idx": 0, "captures": ["string1"]},
- {"match": "string2", "idx": 7, "captures": ["string2"]}
- ]
- },
- {
- "matches": [
- {"match": "string3", "idx": 0, "captures": ["string3"]},
- {"match": "string4", "idx": 8, "captures": ["string4"]}
- ]
- },
- {
- "matches": [
- {"match": "string5", "idx": 0, "captures": ["string5"]},
- {"match": "string6", "idx": 8, "captures": ["string6"]}
- ]
- }
- ]);
- // Only one match.
- testRegexFindAgg({input: "$a", regex: "(^.*[0-2]$)"}, [
- {"matches": []},
- {"matches": []},
- {"matches": [{"match": "string1string2", "idx": 0, "captures": ["string1string2"]}]}
+(function testMultipleMatches() {
+ coll.drop();
+ assert.commandWorked(coll.insert({a: "string1string2", regex: "(string[1-2])"}));
+ assert.commandWorked(coll.insert({a: "string3 string4", regex: "(string[3-4])"}));
+ assert.commandWorked(coll.insert({a: "string5 string6", regex: "(string[3-4])"}));
+ // All documents match.
+ testRegexFindAgg({input: "$a", regex: "(str.*?[0-9])"}, [
+ {
+ "matches": [
+ {"match": "string1", "idx": 0, "captures": ["string1"]},
+ {"match": "string2", "idx": 7, "captures": ["string2"]}
+ ]
+ },
+ {
+ "matches": [
+ {"match": "string3", "idx": 0, "captures": ["string3"]},
+ {"match": "string4", "idx": 8, "captures": ["string4"]}
+ ]
+ },
+ {
+ "matches": [
+ {"match": "string5", "idx": 0, "captures": ["string5"]},
+ {"match": "string6", "idx": 8, "captures": ["string6"]}
+ ]
+ }
+ ]);
+ // Only one match.
+ testRegexFindAgg({input: "$a", regex: "(^.*[0-2]$)"}, [
+ {"matches": []},
+ {"matches": []},
+ {"matches": [{"match": "string1string2", "idx": 0, "captures": ["string1string2"]}]}
- ]);
- // None match.
- testRegexFindAgg({input: "$a", regex: "(^.*[7-9]$)"},
- [{"matches": []}, {"matches": []}, {"matches": []}]);
+ ]);
+ // None match.
+ testRegexFindAgg({input: "$a", regex: "(^.*[7-9]$)"},
+ [{"matches": []}, {"matches": []}, {"matches": []}]);
- // All documents match when using variable regex.
- testRegexFindAgg({input: "$a", regex: "$regex"}, [
- {"matches": []},
- {
- "matches": [
- {"match": "string1", "idx": 0, "captures": ["string1"]},
- {"match": "string2", "idx": 7, "captures": ["string2"]}
- ]
- },
- {
- "matches": [
- {"match": "string3", "idx": 0, "captures": ["string3"]},
- {"match": "string4", "idx": 8, "captures": ["string4"]}
- ]
- }
- ]);
- })();
+ // All documents match when using variable regex.
+ testRegexFindAgg({input: "$a", regex: "$regex"}, [
+ {"matches": []},
+ {
+ "matches": [
+ {"match": "string1", "idx": 0, "captures": ["string1"]},
+ {"match": "string2", "idx": 7, "captures": ["string2"]}
+ ]
+ },
+ {
+ "matches": [
+ {"match": "string3", "idx": 0, "captures": ["string3"]},
+ {"match": "string4", "idx": 8, "captures": ["string4"]}
+ ]
+ }
+ ]);
+})();
- (function testInsideCondOperator() {
- coll.drop();
- assert.commandWorked(
- coll.insert({_id: 0, level: "Public Knowledge", info: "Company Name"}));
- assert.commandWorked(
- coll.insert({_id: 1, level: "Private Information", info: "Company Secret"}));
- const expectedResults =
- [{"_id": 0, "information": "Company Name"}, {"_id": 1, "information": "REDACTED"}];
- // For $regexFindAll.
- const resultFindAll =
- coll.aggregate([{
- "$project": {
- "information": {
- "$cond": [
- {
- "$eq":
- [{"$regexFindAll": {input: "$level", regex: /public/i}}, []]
- },
- "REDACTED",
- "$info"
- ]
- }
- }
- }])
- .toArray();
- assert.eq(resultFindAll, expectedResults);
- // For $regexMatch.
- const resultMatch =
- coll.aggregate([{
- "$project": {
- "information": {
- "$cond": [
- {"$regexMatch": {input: "$level", regex: /public/i}},
- "$info",
- "REDACTED"
- ]
- }
+(function testInsideCondOperator() {
+ coll.drop();
+ assert.commandWorked(coll.insert({_id: 0, level: "Public Knowledge", info: "Company Name"}));
+ assert.commandWorked(
+ coll.insert({_id: 1, level: "Private Information", info: "Company Secret"}));
+ const expectedResults =
+ [{"_id": 0, "information": "Company Name"}, {"_id": 1, "information": "REDACTED"}];
+ // For $regexFindAll.
+ const resultFindAll =
+ coll.aggregate([{
+ "$project": {
+ "information": {
+ "$cond": [
+ {"$eq": [{"$regexFindAll": {input: "$level", regex: /public/i}}, []]},
+ "REDACTED",
+ "$info"
+ ]
}
- }])
- .toArray();
- // For $regexFind.
- const resultFind =
- coll.aggregate([{
- "$project": {
- "information": {
- "$cond": [
- {
- "$ne":
- [{"$regexFind": {input: "$level", regex: /public/i}}, null]
- },
- "$info",
- "REDACTED"
- ]
- }
+ }
+ }])
+ .toArray();
+ assert.eq(resultFindAll, expectedResults);
+ // For $regexMatch.
+ const resultMatch = coll.aggregate([{
+ "$project": {
+ "information": {
+ "$cond": [
+ {"$regexMatch": {input: "$level", regex: /public/i}},
+ "$info",
+ "REDACTED"
+ ]
+ }
+ }
+ }])
+ .toArray();
+ // For $regexFind.
+ const resultFind =
+ coll.aggregate([{
+ "$project": {
+ "information": {
+ "$cond": [
+ {"$ne": [{"$regexFind": {input: "$level", regex: /public/i}}, null]},
+ "$info",
+ "REDACTED"
+ ]
}
- }])
- .toArray();
- // Validate that {$ne : [{$regexFind: ...}, null]} produces same result as
- // {$regexMatch: ...}.
- assert.eq(resultFind, resultMatch);
- assert.eq(resultFind, expectedResults);
- })();
+ }
+ }])
+ .toArray();
+ // Validate that {$ne : [{$regexFind: ...}, null]} produces same result as
+ // {$regexMatch: ...}.
+ assert.eq(resultFind, resultMatch);
+ assert.eq(resultFind, expectedResults);
+})();
}());
diff --git a/jstests/aggregation/expressions/regex_limits.js b/jstests/aggregation/expressions/regex_limits.js
index 8ae924f65eb..eceaede1b8b 100644
--- a/jstests/aggregation/expressions/regex_limits.js
+++ b/jstests/aggregation/expressions/regex_limits.js
@@ -2,119 +2,112 @@
* Tests to validate limits for $regexFind, $regexFindAll and $regexMatch aggregation expressions.
*/
(function() {
- 'use strict';
-
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode().
-
- const coll = db.regex_expr_limit;
- coll.drop();
- assert.commandWorked(coll.insert({z: "c".repeat(50000) + "d".repeat(50000) + "e"}));
-
- function testRegexAgg(inputObj, expectedOutputForFindAll) {
- const resultFindAll =
- coll.aggregate([{"$project": {_id: 0, "matches": {"$regexFindAll": inputObj}}}])
- .toArray();
- assert.eq(resultFindAll, [{"matches": expectedOutputForFindAll}]);
-
- const resultFind =
- coll.aggregate([{"$project": {_id: 0, "matches": {"$regexFind": inputObj}}}]).toArray();
- assert.eq(
- resultFind, [{
- "matches": expectedOutputForFindAll.length == 0 ? null : expectedOutputForFindAll[0]
- }]);
-
- const resultMatch =
- coll.aggregate([{"$project": {_id: 0, "matches": {"$regexMatch": inputObj}}}])
- .toArray();
- assert.eq(resultMatch, [{"matches": expectedOutputForFindAll.length != 0}]);
+'use strict';
+
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode().
+
+const coll = db.regex_expr_limit;
+coll.drop();
+assert.commandWorked(coll.insert({z: "c".repeat(50000) + "d".repeat(50000) + "e"}));
+
+function testRegexAgg(inputObj, expectedOutputForFindAll) {
+ const resultFindAll =
+ coll.aggregate([{"$project": {_id: 0, "matches": {"$regexFindAll": inputObj}}}]).toArray();
+ assert.eq(resultFindAll, [{"matches": expectedOutputForFindAll}]);
+
+ const resultFind =
+ coll.aggregate([{"$project": {_id: 0, "matches": {"$regexFind": inputObj}}}]).toArray();
+ assert.eq(
+ resultFind,
+ [{"matches": expectedOutputForFindAll.length == 0 ? null : expectedOutputForFindAll[0]}]);
+
+ const resultMatch =
+ coll.aggregate([{"$project": {_id: 0, "matches": {"$regexMatch": inputObj}}}]).toArray();
+ assert.eq(resultMatch, [{"matches": expectedOutputForFindAll.length != 0}]);
+}
+
+function testRegexAggException(inputObj, exceptionCode, expression) {
+ // If expression is defined, run tests only against that expression.
+ if (expression != undefined) {
+ assertErrorCode(coll, [{"$project": {"matches": {[expression]: inputObj}}}], exceptionCode);
+ return;
}
+ assertErrorCode(coll, [{"$project": {"matches": {"$regexFindAll": inputObj}}}], exceptionCode);
+ assertErrorCode(coll, [{"$project": {"matches": {"$regexFind": inputObj}}}], exceptionCode);
+ assertErrorCode(coll, [{"$project": {"matches": {"$regexMatch": inputObj}}}], exceptionCode);
+}
+
+(function testLongRegex() {
+ // PCRE doesn't have a direct limit on the regex string length. It will instead error when
+ // the internal memory used while compiling reaches 64KB. When there are no capture groups
+ // this limit is 32764.
+ // Reference : https://www.pcre.org/original/doc/html/pcrelimits.html
+ const kMaxRegexPatternLen = 32764;
+ const patternMaxLen = "c".repeat(kMaxRegexPatternLen);
+
+ // Test that a regex with maximum allowable pattern length can find a document.
+ testRegexAgg({input: "$z", regex: patternMaxLen},
+ [{match: patternMaxLen, "idx": 0, "captures": []}]);
+
+ // Test that a regex pattern exceeding the limit fails.
+ const patternTooLong = patternMaxLen + "c";
+ testRegexAggException({input: "$z", regex: patternTooLong}, 51111);
+})();
- function testRegexAggException(inputObj, exceptionCode, expression) {
- // If expression is defined, run tests only against that expression.
- if (expression != undefined) {
- assertErrorCode(
- coll, [{"$project": {"matches": {[expression]: inputObj}}}], exceptionCode);
- return;
- }
- assertErrorCode(
- coll, [{"$project": {"matches": {"$regexFindAll": inputObj}}}], exceptionCode);
- assertErrorCode(coll, [{"$project": {"matches": {"$regexFind": inputObj}}}], exceptionCode);
- assertErrorCode(
- coll, [{"$project": {"matches": {"$regexMatch": inputObj}}}], exceptionCode);
- }
+(function testBufferOverflow() {
+ // $regexFindAll will match each character individually, when the pattern is empty. If there
+ // are 'n' characters in the input, it would result to 'n' individual matches. If the
+ // pattern further has 'k' capture groups, then the output document will have 'n * k'
+ // sub-strings representing the captures.
+ const pattern = "(".repeat(100) + ")".repeat(100);
+ // If the intermediate document size exceeds 64MB at any point, we will stop further
+ // evaluation and throw an error.
+ testRegexAggException({input: "$z", regex: pattern}, 51151, "$regexFindAll");
+
+ const pattern2 = "()".repeat(100);
+ testRegexAggException({input: "$z", regex: pattern2}, 51151, "$regexFindAll");
+})();
- (function testLongRegex() {
- // PCRE doesn't have a direct limit on the regex string length. It will instead error when
- // the internal memory used while compiling reaches 64KB. When there are no capture groups
- // this limit is 32764.
- // Reference : https://www.pcre.org/original/doc/html/pcrelimits.html
- const kMaxRegexPatternLen = 32764;
- const patternMaxLen = "c".repeat(kMaxRegexPatternLen);
-
- // Test that a regex with maximum allowable pattern length can find a document.
- testRegexAgg({input: "$z", regex: patternMaxLen},
- [{match: patternMaxLen, "idx": 0, "captures": []}]);
-
- // Test that a regex pattern exceeding the limit fails.
- const patternTooLong = patternMaxLen + "c";
- testRegexAggException({input: "$z", regex: patternTooLong}, 51111);
- })();
-
- (function testBufferOverflow() {
- // $regexFindAll will match each character individually, when the pattern is empty. If there
- // are 'n' characters in the input, it would result to 'n' individual matches. If the
- // pattern further has 'k' capture groups, then the output document will have 'n * k'
- // sub-strings representing the captures.
- const pattern = "(".repeat(100) + ")".repeat(100);
- // If the intermediate document size exceeds 64MB at any point, we will stop further
- // evaluation and throw an error.
- testRegexAggException({input: "$z", regex: pattern}, 51151, "$regexFindAll");
-
- const pattern2 = "()".repeat(100);
- testRegexAggException({input: "$z", regex: pattern2}, 51151, "$regexFindAll");
- })();
-
- (function testNumberOfCaptureGroupLimit() {
- // Even though PCRE has a much higher limit on captures (65535), we will be limited by the
- // other limit, maximum internal memory it uses while compiling is 64KB. PCRE will use a lot
- // more memory when there are capture groups. As the number of capture groups increases, the
- // max length of the regex reduces by a factor of around 4.
- const approxAllowedCaptureGroups = 3999;
- let pattern = "(d)".repeat(approxAllowedCaptureGroups) + "e";
- const expectedOutputCaptures = new Array(approxAllowedCaptureGroups).fill('d');
-
- testRegexAgg({input: "$z", regex: pattern}, [{
- match: "d".repeat(approxAllowedCaptureGroups) + "e",
- "idx": 96001,
- "captures": expectedOutputCaptures
- }]);
-
- // In this case, during execution, PCRE will hit the PCRE_ERROR_RECURSIONLIMIT because of
- // high number of captures and return an error.
- const bufferExecutionFailure = 2553;
- pattern = "(d)".repeat(bufferExecutionFailure) + pattern;
- testRegexAggException({input: "$z", regex: pattern}, 51156);
-
- // Add one more capture group to the pattern so that it tips over the maximum regex length
- // limit, and verify that PCRE throws an error while attempting to compile.
- pattern = "(d)" + pattern;
- testRegexAggException({input: "$z", regex: pattern}, 51111);
- })();
-
- (function testMaxCaptureDepth() {
- const kMaxCaptureDepthLen = 250;
- // Create a pattern with 250 depth captures of the format '(((((...e...))))'.
- const patternMaxDepth =
- "(".repeat(kMaxCaptureDepthLen) + "e" + ")".repeat(kMaxCaptureDepthLen);
- const expectedOutputCaptures = new Array(kMaxCaptureDepthLen).fill('e');
-
- // Test that there is a match.
- testRegexAgg({input: "$z", regex: patternMaxDepth},
- [{match: "e", "idx": 100000, "captures": expectedOutputCaptures}]);
-
- // Add one more and verify that regex expression throws an error.
- const patternTooLong = '(' + patternMaxDepth + ')';
- testRegexAggException({input: "$z", regex: patternTooLong}, 51111);
- })();
+(function testNumberOfCaptureGroupLimit() {
+ // Even though PCRE has a much higher limit on captures (65535), we will be limited by the
+ // other limit, maximum internal memory it uses while compiling is 64KB. PCRE will use a lot
+ // more memory when there are capture groups. As the number of capture groups increases, the
+ // max length of the regex reduces by a factor of around 4.
+ const approxAllowedCaptureGroups = 3999;
+ let pattern = "(d)".repeat(approxAllowedCaptureGroups) + "e";
+ const expectedOutputCaptures = new Array(approxAllowedCaptureGroups).fill('d');
+
+ testRegexAgg({input: "$z", regex: pattern}, [{
+ match: "d".repeat(approxAllowedCaptureGroups) + "e",
+ "idx": 96001,
+ "captures": expectedOutputCaptures
+ }]);
+
+ // In this case, during execution, PCRE will hit the PCRE_ERROR_RECURSIONLIMIT because of
+ // high number of captures and return an error.
+ const bufferExecutionFailure = 2553;
+ pattern = "(d)".repeat(bufferExecutionFailure) + pattern;
+ testRegexAggException({input: "$z", regex: pattern}, 51156);
+
+ // Add one more capture group to the pattern so that it tips over the maximum regex length
+ // limit, and verify that PCRE throws an error while attempting to compile.
+ pattern = "(d)" + pattern;
+ testRegexAggException({input: "$z", regex: pattern}, 51111);
+})();
+(function testMaxCaptureDepth() {
+ const kMaxCaptureDepthLen = 250;
+ // Create a pattern with 250 depth captures of the format '(((((...e...))))'.
+ const patternMaxDepth = "(".repeat(kMaxCaptureDepthLen) + "e" +
+ ")".repeat(kMaxCaptureDepthLen);
+ const expectedOutputCaptures = new Array(kMaxCaptureDepthLen).fill('e');
+
+ // Test that there is a match.
+ testRegexAgg({input: "$z", regex: patternMaxDepth},
+ [{match: "e", "idx": 100000, "captures": expectedOutputCaptures}]);
+
+ // Add one more and verify that regex expression throws an error.
+ const patternTooLong = '(' + patternMaxDepth + ')';
+ testRegexAggException({input: "$z", regex: patternTooLong}, 51111);
+})();
})();
diff --git a/jstests/aggregation/expressions/round_trunc.js b/jstests/aggregation/expressions/round_trunc.js
index 0941d1e4275..735c2b54477 100644
--- a/jstests/aggregation/expressions/round_trunc.js
+++ b/jstests/aggregation/expressions/round_trunc.js
@@ -1,115 +1,114 @@
// Basic integration tests for the $round and $trunc aggregation expressions.
(function() {
- "use strict";
+"use strict";
- // For assertErrorCode.
- load("jstests/aggregation/extras/utils.js");
+// For assertErrorCode.
+load("jstests/aggregation/extras/utils.js");
- var coll = db.server19548;
- coll.drop();
- // Seed collection so that the pipeline will execute.
- assert.writeOK(coll.insert({}));
+var coll = db.server19548;
+coll.drop();
+// Seed collection so that the pipeline will execute.
+assert.writeOK(coll.insert({}));
- // Helper for testing that op returns expResult.
- function testOp(op, expResult) {
- var pipeline = [{$project: {_id: 0, result: op}}];
- assert.eq(coll.aggregate(pipeline).toArray(), [{result: expResult}]);
- }
+// Helper for testing that op returns expResult.
+function testOp(op, expResult) {
+ var pipeline = [{$project: {_id: 0, result: op}}];
+ assert.eq(coll.aggregate(pipeline).toArray(), [{result: expResult}]);
+}
- // Test $trunc and $round with one argument.
- testOp({$trunc: NumberLong(4)}, NumberLong(4));
- testOp({$trunc: NaN}, NaN);
- testOp({$trunc: Infinity}, Infinity);
- testOp({$trunc: -Infinity}, -Infinity);
- testOp({$trunc: null}, null);
- testOp({$trunc: -2.0}, -2.0);
- testOp({$trunc: 0.9}, 0.0);
- testOp({$trunc: -1.2}, -1.0);
- testOp({$trunc: NumberDecimal("-1.6")}, NumberDecimal("-1"));
+// Test $trunc and $round with one argument.
+testOp({$trunc: NumberLong(4)}, NumberLong(4));
+testOp({$trunc: NaN}, NaN);
+testOp({$trunc: Infinity}, Infinity);
+testOp({$trunc: -Infinity}, -Infinity);
+testOp({$trunc: null}, null);
+testOp({$trunc: -2.0}, -2.0);
+testOp({$trunc: 0.9}, 0.0);
+testOp({$trunc: -1.2}, -1.0);
+testOp({$trunc: NumberDecimal("-1.6")}, NumberDecimal("-1"));
- testOp({$round: NumberLong(4)}, NumberLong(4));
- testOp({$round: NaN}, NaN);
- testOp({$round: Infinity}, Infinity);
- testOp({$round: -Infinity}, -Infinity);
- testOp({$round: null}, null);
- testOp({$round: -2.0}, -2.0);
- testOp({$round: 0.9}, 1.0);
- testOp({$round: -1.2}, -1.0);
- testOp({$round: NumberDecimal("-1.6")}, NumberDecimal("-2"));
+testOp({$round: NumberLong(4)}, NumberLong(4));
+testOp({$round: NaN}, NaN);
+testOp({$round: Infinity}, Infinity);
+testOp({$round: -Infinity}, -Infinity);
+testOp({$round: null}, null);
+testOp({$round: -2.0}, -2.0);
+testOp({$round: 0.9}, 1.0);
+testOp({$round: -1.2}, -1.0);
+testOp({$round: NumberDecimal("-1.6")}, NumberDecimal("-2"));
- // Test $trunc and $round with two arguments.
- testOp({$trunc: [1.298, 0]}, 1);
- testOp({$trunc: [1.298, 1]}, 1.2);
- testOp({$trunc: [23.298, -1]}, 20);
- testOp({$trunc: [NumberDecimal("1.298"), 0]}, NumberDecimal("1"));
- testOp({$trunc: [NumberDecimal("1.298"), 1]}, NumberDecimal("1.2"));
- testOp({$trunc: [NumberDecimal("23.298"), -1]}, NumberDecimal("2E+1"));
- testOp({$trunc: [1.298, 100]}, 1.298);
- testOp({$trunc: [NumberDecimal("1.298912343250054252245154325"), NumberLong("20")]},
- NumberDecimal("1.29891234325005425224"));
- testOp({$trunc: [NumberDecimal("1.298"), NumberDecimal("100")]},
- NumberDecimal("1.298000000000000000000000000000000"));
+// Test $trunc and $round with two arguments.
+testOp({$trunc: [1.298, 0]}, 1);
+testOp({$trunc: [1.298, 1]}, 1.2);
+testOp({$trunc: [23.298, -1]}, 20);
+testOp({$trunc: [NumberDecimal("1.298"), 0]}, NumberDecimal("1"));
+testOp({$trunc: [NumberDecimal("1.298"), 1]}, NumberDecimal("1.2"));
+testOp({$trunc: [NumberDecimal("23.298"), -1]}, NumberDecimal("2E+1"));
+testOp({$trunc: [1.298, 100]}, 1.298);
+testOp({$trunc: [NumberDecimal("1.298912343250054252245154325"), NumberLong("20")]},
+ NumberDecimal("1.29891234325005425224"));
+testOp({$trunc: [NumberDecimal("1.298"), NumberDecimal("100")]},
+ NumberDecimal("1.298000000000000000000000000000000"));
- testOp({$round: [1.298, 0]}, 1);
- testOp({$round: [1.298, 1]}, 1.3);
- testOp({$round: [23.298, -1]}, 20);
- testOp({$round: [NumberDecimal("1.298"), 0]}, NumberDecimal("1"));
- testOp({$round: [NumberDecimal("1.298"), 1]}, NumberDecimal("1.3"));
- testOp({$round: [NumberDecimal("23.298"), -1]}, NumberDecimal("2E+1"));
- testOp({$round: [1.298, 100]}, 1.298);
- testOp({$round: [NumberDecimal("1.298912343250054252245154325"), NumberLong("20")]},
- NumberDecimal("1.29891234325005425225"));
- testOp({$round: [NumberDecimal("1.298"), NumberDecimal("100")]},
- NumberDecimal("1.298000000000000000000000000000000"));
+testOp({$round: [1.298, 0]}, 1);
+testOp({$round: [1.298, 1]}, 1.3);
+testOp({$round: [23.298, -1]}, 20);
+testOp({$round: [NumberDecimal("1.298"), 0]}, NumberDecimal("1"));
+testOp({$round: [NumberDecimal("1.298"), 1]}, NumberDecimal("1.3"));
+testOp({$round: [NumberDecimal("23.298"), -1]}, NumberDecimal("2E+1"));
+testOp({$round: [1.298, 100]}, 1.298);
+testOp({$round: [NumberDecimal("1.298912343250054252245154325"), NumberLong("20")]},
+ NumberDecimal("1.29891234325005425225"));
+testOp({$round: [NumberDecimal("1.298"), NumberDecimal("100")]},
+ NumberDecimal("1.298000000000000000000000000000000"));
- // Test $round overflow.
- testOp({$round: [NumberInt("2147483647"), -1]}, NumberLong("2147483650"));
- assertErrorCode(
- coll, [{$project: {a: {$round: [NumberLong("9223372036854775806"), -1]}}}], 51080);
+// Test $round overflow.
+testOp({$round: [NumberInt("2147483647"), -1]}, NumberLong("2147483650"));
+assertErrorCode(coll, [{$project: {a: {$round: [NumberLong("9223372036854775806"), -1]}}}], 51080);
- // Test $trunc and $round with more than 2 arguments.
- assertErrorCode(coll, [{$project: {a: {$trunc: [1, 2, 3]}}}], 28667);
- assertErrorCode(coll, [{$project: {a: {$round: [1, 2, 3]}}}], 28667);
+// Test $trunc and $round with more than 2 arguments.
+assertErrorCode(coll, [{$project: {a: {$trunc: [1, 2, 3]}}}], 28667);
+assertErrorCode(coll, [{$project: {a: {$round: [1, 2, 3]}}}], 28667);
- // Test non-numeric input to $trunc and $round.
- assertErrorCode(coll, [{$project: {a: {$round: "string"}}}], 51081);
- assertErrorCode(coll, [{$project: {a: {$trunc: "string"}}}], 51081);
+// Test non-numeric input to $trunc and $round.
+assertErrorCode(coll, [{$project: {a: {$round: "string"}}}], 51081);
+assertErrorCode(coll, [{$project: {a: {$trunc: "string"}}}], 51081);
- // Test NaN and Infinity numeric args.
- testOp({$round: [Infinity, 0]}, Infinity);
- testOp({$round: [-Infinity, 0]}, -Infinity);
- testOp({$round: [NaN, 0]}, NaN);
- testOp({$round: [NumberDecimal("Infinity"), 0]}, NumberDecimal("Infinity"));
- testOp({$round: [NumberDecimal("-Infinity"), 0]}, NumberDecimal("-Infinity"));
- testOp({$round: [NumberDecimal("NaN"), 0]}, NumberDecimal("NaN"));
+// Test NaN and Infinity numeric args.
+testOp({$round: [Infinity, 0]}, Infinity);
+testOp({$round: [-Infinity, 0]}, -Infinity);
+testOp({$round: [NaN, 0]}, NaN);
+testOp({$round: [NumberDecimal("Infinity"), 0]}, NumberDecimal("Infinity"));
+testOp({$round: [NumberDecimal("-Infinity"), 0]}, NumberDecimal("-Infinity"));
+testOp({$round: [NumberDecimal("NaN"), 0]}, NumberDecimal("NaN"));
- testOp({$trunc: [Infinity, 0]}, Infinity);
- testOp({$trunc: [-Infinity, 0]}, -Infinity);
- testOp({$trunc: [NaN, 0]}, NaN);
- testOp({$trunc: [NumberDecimal("Infinity"), 0]}, NumberDecimal("Infinity"));
- testOp({$trunc: [NumberDecimal("-Infinity"), 0]}, NumberDecimal("-Infinity"));
- testOp({$trunc: [NumberDecimal("NaN"), 0]}, NumberDecimal("NaN"));
+testOp({$trunc: [Infinity, 0]}, Infinity);
+testOp({$trunc: [-Infinity, 0]}, -Infinity);
+testOp({$trunc: [NaN, 0]}, NaN);
+testOp({$trunc: [NumberDecimal("Infinity"), 0]}, NumberDecimal("Infinity"));
+testOp({$trunc: [NumberDecimal("-Infinity"), 0]}, NumberDecimal("-Infinity"));
+testOp({$trunc: [NumberDecimal("NaN"), 0]}, NumberDecimal("NaN"));
- // Test precision arguments that are out of bounds.
- assertErrorCode(coll, [{$project: {a: {$round: [1, NumberLong("101")]}}}], 51083);
- assertErrorCode(coll, [{$project: {a: {$round: [1, NumberLong("-21")]}}}], 51083);
- assertErrorCode(coll, [{$project: {a: {$round: [1, NumberDecimal("101")]}}}], 51083);
- assertErrorCode(coll, [{$project: {a: {$round: [1, NumberDecimal("-21")]}}}], 51083);
- assertErrorCode(coll, [{$project: {a: {$round: [1, NumberInt("101")]}}}], 51083);
- assertErrorCode(coll, [{$project: {a: {$round: [1, NumberInt("-21")]}}}], 51083);
- assertErrorCode(coll, [{$project: {a: {$round: [1, 101]}}}], 51083);
- assertErrorCode(coll, [{$project: {a: {$round: [1, -21]}}}], 51083);
- assertErrorCode(coll, [{$project: {a: {$trunc: [1, NumberLong("101")]}}}], 51083);
- assertErrorCode(coll, [{$project: {a: {$trunc: [1, NumberLong("-21")]}}}], 51083);
- assertErrorCode(coll, [{$project: {a: {$trunc: [1, NumberDecimal("101")]}}}], 51083);
- assertErrorCode(coll, [{$project: {a: {$trunc: [1, NumberDecimal("-21")]}}}], 51083);
- assertErrorCode(coll, [{$project: {a: {$trunc: [1, NumberInt("101")]}}}], 51083);
- assertErrorCode(coll, [{$project: {a: {$trunc: [1, NumberInt("-21")]}}}], 51083);
- assertErrorCode(coll, [{$project: {a: {$trunc: [1, 101]}}}], 51083);
- assertErrorCode(coll, [{$project: {a: {$trunc: [1, -21]}}}], 51083);
+// Test precision arguments that are out of bounds.
+assertErrorCode(coll, [{$project: {a: {$round: [1, NumberLong("101")]}}}], 51083);
+assertErrorCode(coll, [{$project: {a: {$round: [1, NumberLong("-21")]}}}], 51083);
+assertErrorCode(coll, [{$project: {a: {$round: [1, NumberDecimal("101")]}}}], 51083);
+assertErrorCode(coll, [{$project: {a: {$round: [1, NumberDecimal("-21")]}}}], 51083);
+assertErrorCode(coll, [{$project: {a: {$round: [1, NumberInt("101")]}}}], 51083);
+assertErrorCode(coll, [{$project: {a: {$round: [1, NumberInt("-21")]}}}], 51083);
+assertErrorCode(coll, [{$project: {a: {$round: [1, 101]}}}], 51083);
+assertErrorCode(coll, [{$project: {a: {$round: [1, -21]}}}], 51083);
+assertErrorCode(coll, [{$project: {a: {$trunc: [1, NumberLong("101")]}}}], 51083);
+assertErrorCode(coll, [{$project: {a: {$trunc: [1, NumberLong("-21")]}}}], 51083);
+assertErrorCode(coll, [{$project: {a: {$trunc: [1, NumberDecimal("101")]}}}], 51083);
+assertErrorCode(coll, [{$project: {a: {$trunc: [1, NumberDecimal("-21")]}}}], 51083);
+assertErrorCode(coll, [{$project: {a: {$trunc: [1, NumberInt("101")]}}}], 51083);
+assertErrorCode(coll, [{$project: {a: {$trunc: [1, NumberInt("-21")]}}}], 51083);
+assertErrorCode(coll, [{$project: {a: {$trunc: [1, 101]}}}], 51083);
+assertErrorCode(coll, [{$project: {a: {$trunc: [1, -21]}}}], 51083);
- // Test non-integral precision arguments.
- assertErrorCode(coll, [{$project: {a: {$round: [1, NumberDecimal("1.4")]}}}], 51082);
- assertErrorCode(coll, [{$project: {a: {$trunc: [1, 10.5]}}}], 51082);
+// Test non-integral precision arguments.
+assertErrorCode(coll, [{$project: {a: {$round: [1, NumberDecimal("1.4")]}}}], 51082);
+assertErrorCode(coll, [{$project: {a: {$trunc: [1, 10.5]}}}], 51082);
}());
diff --git a/jstests/aggregation/expressions/size.js b/jstests/aggregation/expressions/size.js
index c3ccec34fb3..4e21c71bf4e 100644
--- a/jstests/aggregation/expressions/size.js
+++ b/jstests/aggregation/expressions/size.js
@@ -2,22 +2,21 @@
* Test the $size expression.
*/
(function() {
- "use strict";
- load("jstests/aggregation/extras/utils.js");
+"use strict";
+load("jstests/aggregation/extras/utils.js");
- const coll = db.expression_size;
- coll.drop();
+const coll = db.expression_size;
+coll.drop();
- assert.writeOK(coll.insert({_id: 0, arr: []}));
- assert.writeOK(coll.insert({_id: 1, arr: [1]}));
- assert.writeOK(coll.insert({_id: 2, arr: ["asdf", "asdfasdf"]}));
- assert.writeOK(coll.insert({_id: 3, arr: [1, "asdf", 1234, 4.3, {key: 23}]}));
- assert.writeOK(coll.insert({_id: 4, arr: [3, [31, 31, 13, 13]]}));
+assert.writeOK(coll.insert({_id: 0, arr: []}));
+assert.writeOK(coll.insert({_id: 1, arr: [1]}));
+assert.writeOK(coll.insert({_id: 2, arr: ["asdf", "asdfasdf"]}));
+assert.writeOK(coll.insert({_id: 3, arr: [1, "asdf", 1234, 4.3, {key: 23}]}));
+assert.writeOK(coll.insert({_id: 4, arr: [3, [31, 31, 13, 13]]}));
- const result =
- coll.aggregate([{$sort: {_id: 1}}, {$project: {_id: 0, length: {$size: "$arr"}}}]);
- assert.eq(result.toArray(), [{length: 0}, {length: 1}, {length: 2}, {length: 5}, {length: 2}]);
+const result = coll.aggregate([{$sort: {_id: 1}}, {$project: {_id: 0, length: {$size: "$arr"}}}]);
+assert.eq(result.toArray(), [{length: 0}, {length: 1}, {length: 2}, {length: 5}, {length: 2}]);
- assert.writeOK(coll.insert({arr: 231}));
- assertErrorCode(coll, {$project: {_id: 0, length: {$size: "$arr"}}}, 17124);
+assert.writeOK(coll.insert({arr: 231}));
+assertErrorCode(coll, {$project: {_id: 0, length: {$size: "$arr"}}}, 17124);
}());
diff --git a/jstests/aggregation/expressions/split.js b/jstests/aggregation/expressions/split.js
index 7d3402bde4e..86200334395 100644
--- a/jstests/aggregation/expressions/split.js
+++ b/jstests/aggregation/expressions/split.js
@@ -3,58 +3,68 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExpression.
(function() {
- "use strict";
-
- var coll = db.split;
- coll.drop();
- assert.writeOK(coll.insert({}));
-
- testExpression(coll, {$split: ["abc", "b"]}, ["a", "c"]);
- testExpression(coll, {$split: ["aaa", "b"]}, ["aaa"]);
- testExpression(coll, {$split: ["a b a", "b"]}, ["a ", " a"]);
- testExpression(coll, {$split: ["a", "a"]}, ["", ""]);
- testExpression(coll, {$split: ["aa", "a"]}, ["", "", ""]);
- testExpression(coll, {$split: ["aaa", "a"]}, ["", "", "", ""]);
- testExpression(coll, {$split: ["", "a"]}, [""]);
- testExpression(coll, {$split: ["abc abc cba abc", "abc"]}, ["", " ", " cba ", ""]);
-
- // Ensure that $split operates correctly when the string has embedded null bytes.
- testExpression(coll, {$split: ["a\0b\0c", "\0"]}, ["a", "b", "c"]);
- testExpression(coll, {$split: ["\0a\0", "a"]}, ["\0", "\0"]);
- testExpression(coll, {$split: ["\0\0\0", "a"]}, ["\0\0\0"]);
-
- // Ensure that $split operates correctly when the string has multi-byte tokens or input strings.
- // Note that this expression is not unicode-aware; splitting is based wholly off of the byte
- // sequence of the input and token.
- testExpression(coll, {$split: ["∫a∫", "a"]}, ["∫", "∫"]);
- testExpression(coll, {$split: ["a∫∫a", "∫"]}, ["a", "", "a"]);
-
- // Ensure that $split produces null when given null as input.
- testExpression(coll, {$split: ["abc", null]}, null);
- testExpression(coll, {$split: [null, "abc"]}, null);
-
- // Ensure that $split produces null when given missing fields as input.
- testExpression(coll, {$split: ["$a", "a"]}, null);
- testExpression(coll, {$split: ["a", "$a"]}, null);
-
- // Ensure that $split errors when given more or less than two arguments.
- var pipeline = {$project: {split: {$split: []}}};
- assertErrorCode(coll, pipeline, 16020);
-
- pipeline = {$project: {split: {$split: ["a"]}}};
- assertErrorCode(coll, pipeline, 16020);
-
- pipeline = {$project: {split: {$split: ["a", "b", "c"]}}};
- assertErrorCode(coll, pipeline, 16020);
-
- // Ensure that $split errors when given non-string input.
- pipeline = {$project: {split: {$split: [1, "abc"]}}};
- assertErrorCode(coll, pipeline, 40085);
-
- pipeline = {$project: {split: {$split: ["abc", 1]}}};
- assertErrorCode(coll, pipeline, 40086);
-
- // Ensure that $split errors when given an empty separator.
- pipeline = {$project: {split: {$split: ["abc", ""]}}};
- assertErrorCode(coll, pipeline, 40087);
+"use strict";
+
+var coll = db.split;
+coll.drop();
+assert.writeOK(coll.insert({}));
+
+testExpression(coll, {$split: ["abc", "b"]}, ["a", "c"]);
+testExpression(coll, {$split: ["aaa", "b"]}, ["aaa"]);
+testExpression(coll, {$split: ["a b a", "b"]}, ["a ", " a"]);
+testExpression(coll, {$split: ["a", "a"]}, ["", ""]);
+testExpression(coll, {$split: ["aa", "a"]}, ["", "", ""]);
+testExpression(coll, {$split: ["aaa", "a"]}, ["", "", "", ""]);
+testExpression(coll, {$split: ["", "a"]}, [""]);
+testExpression(coll, {$split: ["abc abc cba abc", "abc"]}, ["", " ", " cba ", ""]);
+
+// Ensure that $split operates correctly when the string has embedded null bytes.
+testExpression(coll, {$split: ["a\0b\0c", "\0"]}, ["a", "b", "c"]);
+testExpression(coll, {$split: ["\0a\0", "a"]}, ["\0", "\0"]);
+testExpression(coll, {$split: ["\0\0\0", "a"]}, ["\0\0\0"]);
+
+// Ensure that $split operates correctly when the string has multi-byte tokens or input strings.
+// Note that this expression is not unicode-aware; splitting is based wholly off of the byte
+// sequence of the input and token.
+testExpression(coll, {$split: ["∫a∫", "a"]}, ["∫", "∫"]);
+testExpression(coll, {$split: ["a∫∫a", "∫"]}, ["a", "", "a"]);
+
+// Ensure that $split produces null when given null as input.
+testExpression(coll, {$split: ["abc", null]}, null);
+testExpression(coll, {$split: [null, "abc"]}, null);
+
+// Ensure that $split produces null when given missing fields as input.
+testExpression(coll, {$split: ["$a", "a"]}, null);
+testExpression(coll, {$split: ["a", "$a"]}, null);
+
+// Ensure that $split errors when given more or less than two arguments.
+var pipeline = {$project: {split: {$split: []}}};
+assertErrorCode(coll, pipeline, 16020);
+
+pipeline = {
+ $project: {split: {$split: ["a"]}}
+};
+assertErrorCode(coll, pipeline, 16020);
+
+pipeline = {
+ $project: {split: {$split: ["a", "b", "c"]}}
+};
+assertErrorCode(coll, pipeline, 16020);
+
+// Ensure that $split errors when given non-string input.
+pipeline = {
+ $project: {split: {$split: [1, "abc"]}}
+};
+assertErrorCode(coll, pipeline, 40085);
+
+pipeline = {
+ $project: {split: {$split: ["abc", 1]}}
+};
+assertErrorCode(coll, pipeline, 40086);
+
+// Ensure that $split errors when given an empty separator.
+pipeline = {
+ $project: {split: {$split: ["abc", ""]}}
+};
+assertErrorCode(coll, pipeline, 40087);
}());
diff --git a/jstests/aggregation/expressions/switch.js b/jstests/aggregation/expressions/switch.js
index 64cd9e1db2f..4521d629905 100644
--- a/jstests/aggregation/expressions/switch.js
+++ b/jstests/aggregation/expressions/switch.js
@@ -2,147 +2,143 @@
// of the expression.
(function() {
- "use strict";
-
- var coll = db.switch;
- coll.drop();
-
- // Insert an empty document so that something can flow through the pipeline.
- coll.insert({});
-
- // Ensure that a branch is correctly evaluated.
- var pipeline = {
- "$project": {
- "_id": 0,
- "output": {
- "$switch": {
- "branches": [{"case": {"$eq": [1, 1]}, "then": "one is equal to one!"}],
- }
- }
- }
- };
- var res = coll.aggregate(pipeline).toArray();
-
- assert.eq(res.length, 1);
- assert.eq(res[0], {"output": "one is equal to one!"});
-
- // Ensure that the first branch which matches is chosen.
- pipeline = {
- "$project": {
- "_id": 0,
- "output": {
- "$switch": {
- "branches": [
- {"case": {"$eq": [1, 1]}, "then": "one is equal to one!"},
- {"case": {"$eq": [2, 2]}, "then": "two is equal to two!"}
- ],
- }
- }
- }
- };
- res = coll.aggregate(pipeline).toArray();
-
- assert.eq(res.length, 1);
- assert.eq(res[0], {"output": "one is equal to one!"});
-
- // Ensure that the default is chosen if no case matches.
- pipeline = {
- "$project": {
- "_id": 0,
- "output": {
- "$switch": {
- "branches": [{"case": {"$eq": [1, 2]}, "then": "one is equal to two!"}],
- "default": "no case matched."
- }
+"use strict";
+
+var coll = db.switch;
+coll.drop();
+
+// Insert an empty document so that something can flow through the pipeline.
+coll.insert({});
+
+// Ensure that a branch is correctly evaluated.
+var pipeline = {
+ "$project": {
+ "_id": 0,
+ "output": {
+ "$switch": {
+ "branches": [{"case": {"$eq": [1, 1]}, "then": "one is equal to one!"}],
}
}
- };
- res = coll.aggregate(pipeline).toArray();
-
- assert.eq(res.length, 1);
- assert.eq(res[0], {"output": "no case matched."});
-
- // Ensure that nullish values are treated as false when they are a "case", and are null
- // otherwise.
- pipeline = {
- "$project": {
- "_id": 0,
- "output": {
- "$switch": {
- "branches": [{"case": null, "then": "Null was true!"}],
- "default": "No case matched."
- }
+ }
+};
+var res = coll.aggregate(pipeline).toArray();
+
+assert.eq(res.length, 1);
+assert.eq(res[0], {"output": "one is equal to one!"});
+
+// Ensure that the first branch which matches is chosen.
+pipeline = {
+ "$project": {
+ "_id": 0,
+ "output": {
+ "$switch": {
+ "branches": [
+ {"case": {"$eq": [1, 1]}, "then": "one is equal to one!"},
+ {"case": {"$eq": [2, 2]}, "then": "two is equal to two!"}
+ ],
}
}
- };
- res = coll.aggregate(pipeline).toArray();
-
- assert.eq(res.length, 1);
- assert.eq(res[0], {"output": "No case matched."});
-
- pipeline = {
- "$project": {
- "_id": 0,
- "output": {
- "$switch": {
- "branches": [{"case": "$missingField", "then": "Null was true!"}],
- "default": "No case matched."
- }
+ }
+};
+res = coll.aggregate(pipeline).toArray();
+
+assert.eq(res.length, 1);
+assert.eq(res[0], {"output": "one is equal to one!"});
+
+// Ensure that the default is chosen if no case matches.
+pipeline = {
+ "$project": {
+ "_id": 0,
+ "output": {
+ "$switch": {
+ "branches": [{"case": {"$eq": [1, 2]}, "then": "one is equal to two!"}],
+ "default": "no case matched."
}
}
- };
- res = coll.aggregate(pipeline).toArray();
-
- assert.eq(res.length, 1);
- assert.eq(res[0], {"output": "No case matched."});
-
- pipeline = {
- "$project": {
- "_id": 0,
- "output": {"$switch": {"branches": [{"case": true, "then": null}], "default": false}}
- }
- };
- res = coll.aggregate(pipeline).toArray();
-
- assert.eq(res.length, 1);
- assert.eq(res[0], {"output": null});
-
- pipeline = {
- "$project": {
- "_id": 0,
- "output": {
- "$switch":
- {"branches": [{"case": true, "then": "$missingField"}], "default": false}
+ }
+};
+res = coll.aggregate(pipeline).toArray();
+
+assert.eq(res.length, 1);
+assert.eq(res[0], {"output": "no case matched."});
+
+// Ensure that nullish values are treated as false when they are a "case", and are null
+// otherwise.
+pipeline = {
+ "$project": {
+ "_id": 0,
+ "output": {
+ "$switch": {
+ "branches": [{"case": null, "then": "Null was true!"}],
+ "default": "No case matched."
}
}
- };
- res = coll.aggregate(pipeline).toArray();
-
- assert.eq(res.length, 1);
- assert.eq(res[0], {});
-
- pipeline = {
- "$project": {
- "_id": 0,
- "output": {"$switch": {"branches": [{"case": null, "then": false}], "default": null}}
- }
- };
- res = coll.aggregate(pipeline).toArray();
-
- assert.eq(res.length, 1);
- assert.eq(res[0], {"output": null});
-
- pipeline = {
- "$project": {
- "_id": 0,
- "output": {
- "$switch":
- {"branches": [{"case": null, "then": false}], "default": "$missingField"}
+ }
+};
+res = coll.aggregate(pipeline).toArray();
+
+assert.eq(res.length, 1);
+assert.eq(res[0], {"output": "No case matched."});
+
+pipeline = {
+ "$project": {
+ "_id": 0,
+ "output": {
+ "$switch": {
+ "branches": [{"case": "$missingField", "then": "Null was true!"}],
+ "default": "No case matched."
}
}
- };
- res = coll.aggregate(pipeline).toArray();
-
- assert.eq(res.length, 1);
- assert.eq(res[0], {});
+ }
+};
+res = coll.aggregate(pipeline).toArray();
+
+assert.eq(res.length, 1);
+assert.eq(res[0], {"output": "No case matched."});
+
+pipeline = {
+ "$project": {
+ "_id": 0,
+ "output": {"$switch": {"branches": [{"case": true, "then": null}], "default": false}}
+ }
+};
+res = coll.aggregate(pipeline).toArray();
+
+assert.eq(res.length, 1);
+assert.eq(res[0], {"output": null});
+
+pipeline = {
+ "$project": {
+ "_id": 0,
+ "output":
+ {"$switch": {"branches": [{"case": true, "then": "$missingField"}], "default": false}}
+ }
+};
+res = coll.aggregate(pipeline).toArray();
+
+assert.eq(res.length, 1);
+assert.eq(res[0], {});
+
+pipeline = {
+ "$project": {
+ "_id": 0,
+ "output": {"$switch": {"branches": [{"case": null, "then": false}], "default": null}}
+ }
+};
+res = coll.aggregate(pipeline).toArray();
+
+assert.eq(res.length, 1);
+assert.eq(res[0], {"output": null});
+
+pipeline = {
+ "$project": {
+ "_id": 0,
+ "output":
+ {"$switch": {"branches": [{"case": null, "then": false}], "default": "$missingField"}}
+ }
+};
+res = coll.aggregate(pipeline).toArray();
+
+assert.eq(res.length, 1);
+assert.eq(res[0], {});
}());
diff --git a/jstests/aggregation/expressions/switch_errors.js b/jstests/aggregation/expressions/switch_errors.js
index 0d9023fb250..1cead260526 100644
--- a/jstests/aggregation/expressions/switch_errors.js
+++ b/jstests/aggregation/expressions/switch_errors.js
@@ -3,55 +3,65 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
(function() {
- "use strict";
+"use strict";
- var coll = db.switch;
- coll.drop();
+var coll = db.switch;
+coll.drop();
- var pipeline = {"$project": {"output": {"$switch": "not an object"}}};
- assertErrorCode(coll, pipeline, 40060, "$switch requires an object as an argument.");
+var pipeline = {"$project": {"output": {"$switch": "not an object"}}};
+assertErrorCode(coll, pipeline, 40060, "$switch requires an object as an argument.");
- pipeline = {"$project": {"output": {"$switch": {"branches": "not an array"}}}};
- assertErrorCode(coll, pipeline, 40061, "$switch requires 'branches' to be an array.");
+pipeline = {
+ "$project": {"output": {"$switch": {"branches": "not an array"}}}
+};
+assertErrorCode(coll, pipeline, 40061, "$switch requires 'branches' to be an array.");
- pipeline = {"$project": {"output": {"$switch": {"branches": ["not an object"]}}}};
- assertErrorCode(coll, pipeline, 40062, "$switch requires each branch to be an object.");
+pipeline = {
+ "$project": {"output": {"$switch": {"branches": ["not an object"]}}}
+};
+assertErrorCode(coll, pipeline, 40062, "$switch requires each branch to be an object.");
- pipeline = {"$project": {"output": {"$switch": {"branches": [{}]}}}};
- assertErrorCode(coll, pipeline, 40064, "$switch requires each branch have a 'case'.");
+pipeline = {
+ "$project": {"output": {"$switch": {"branches": [{}]}}}
+};
+assertErrorCode(coll, pipeline, 40064, "$switch requires each branch have a 'case'.");
- pipeline = {
- "$project": {
- "output": {
- "$switch": {
- "branches": [{
- "case": 1,
- }]
- }
+pipeline = {
+ "$project": {
+ "output": {
+ "$switch": {
+ "branches": [{
+ "case": 1,
+ }]
}
}
- };
- assertErrorCode(coll, pipeline, 40065, "$switch requires each branch have a 'then'.");
-
- pipeline = {
- "$project":
- {"output": {"$switch": {"branches": [{"case": true, "then": false, "badKey": 1}]}}}
- };
- assertErrorCode(coll, pipeline, 40063, "$switch found a branch with an unknown argument");
-
- pipeline = {"$project": {"output": {"$switch": {"notAnArgument": 1}}}};
- assertErrorCode(coll, pipeline, 40067, "$switch found an unknown argument");
-
- pipeline = {"$project": {"output": {"$switch": {"branches": []}}}};
- assertErrorCode(coll, pipeline, 40068, "$switch requires at least one branch");
-
- pipeline = {"$project": {"output": {"$switch": {}}}};
- assertErrorCode(coll, pipeline, 40068, "$switch requires at least one branch");
-
- coll.insert({x: 1});
- pipeline = {
- "$project":
- {"output": {"$switch": {"branches": [{"case": {"$eq": ["$x", 0]}, "then": 1}]}}}
- };
- assertErrorCode(coll, pipeline, 40066, "$switch has no default and an input matched no case");
+ }
+};
+assertErrorCode(coll, pipeline, 40065, "$switch requires each branch have a 'then'.");
+
+pipeline = {
+ "$project": {"output": {"$switch": {"branches": [{"case": true, "then": false, "badKey": 1}]}}}
+};
+assertErrorCode(coll, pipeline, 40063, "$switch found a branch with an unknown argument");
+
+pipeline = {
+ "$project": {"output": {"$switch": {"notAnArgument": 1}}}
+};
+assertErrorCode(coll, pipeline, 40067, "$switch found an unknown argument");
+
+pipeline = {
+ "$project": {"output": {"$switch": {"branches": []}}}
+};
+assertErrorCode(coll, pipeline, 40068, "$switch requires at least one branch");
+
+pipeline = {
+ "$project": {"output": {"$switch": {}}}
+};
+assertErrorCode(coll, pipeline, 40068, "$switch requires at least one branch");
+
+coll.insert({x: 1});
+pipeline = {
+ "$project": {"output": {"$switch": {"branches": [{"case": {"$eq": ["$x", 0]}, "then": 1}]}}}
+};
+assertErrorCode(coll, pipeline, 40066, "$switch has no default and an input matched no case");
}());
diff --git a/jstests/aggregation/expressions/trim.js b/jstests/aggregation/expressions/trim.js
index 34d8573f259..af197adca5a 100644
--- a/jstests/aggregation/expressions/trim.js
+++ b/jstests/aggregation/expressions/trim.js
@@ -2,98 +2,89 @@
* Basic tests for the $trim, $ltrim, and $rtrim expressions.
*/
(function() {
- "use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode, testExpression and
- // testExpressionWithCollation.
+"use strict";
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode, testExpression and
+ // testExpressionWithCollation.
- const coll = db.trim_expressions;
+const coll = db.trim_expressions;
- testExpression(coll, {$trim: {input: " abc "}}, "abc");
- testExpression(coll, {$trim: {input: " a b\nc "}}, "a b\nc");
- testExpression(coll, {$ltrim: {input: "\t abc "}}, "abc ");
- testExpression(coll, {$rtrim: {input: "\t abc "}}, "\t abc");
- testExpression(
- coll,
- {$map: {input: {$split: ["4, 5, 6, 7,8,9, 10", ","]}, in : {$trim: {input: "$$this"}}}},
- ["4", "5", "6", "7", "8", "9", "10"]);
+testExpression(coll, {$trim: {input: " abc "}}, "abc");
+testExpression(coll, {$trim: {input: " a b\nc "}}, "a b\nc");
+testExpression(coll, {$ltrim: {input: "\t abc "}}, "abc ");
+testExpression(coll, {$rtrim: {input: "\t abc "}}, "\t abc");
+testExpression(
+ coll,
+ {$map: {input: {$split: ["4, 5, 6, 7,8,9, 10", ","]}, in : {$trim: {input: "$$this"}}}},
+ ["4", "5", "6", "7", "8", "9", "10"]);
- // Test that the trim expressions do not respect the collation.
- const caseInsensitive = {locale: "en_US", strength: 2};
- testExpressionWithCollation(coll, {$trim: {input: "xXx", chars: "x"}}, "X", caseInsensitive);
- testExpressionWithCollation(coll, {$rtrim: {input: "xXx", chars: "x"}}, "xX", caseInsensitive);
- testExpressionWithCollation(coll, {$ltrim: {input: "xXx", chars: "x"}}, "Xx", caseInsensitive);
+// Test that the trim expressions do not respect the collation.
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
+testExpressionWithCollation(coll, {$trim: {input: "xXx", chars: "x"}}, "X", caseInsensitive);
+testExpressionWithCollation(coll, {$rtrim: {input: "xXx", chars: "x"}}, "xX", caseInsensitive);
+testExpressionWithCollation(coll, {$ltrim: {input: "xXx", chars: "x"}}, "Xx", caseInsensitive);
- // Test using inputs from documents.
- coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, name: ", Charlie"},
- {_id: 1, name: "Obama\t, Barack"},
- {_id: 2, name: " Ride, Sally "}
- ]));
+// Test using inputs from documents.
+coll.drop();
+assert.writeOK(coll.insert([
+ {_id: 0, name: ", Charlie"},
+ {_id: 1, name: "Obama\t, Barack"},
+ {_id: 2, name: " Ride, Sally "}
+]));
- assert.eq(
- coll.aggregate([
- {$sort: {_id: 1}},
- {
- $project: {
- firstName: {$trim: {input: {$arrayElemAt: [{$split: ["$name", ","]}, 1]}}}
- }
- }
- ])
- .toArray(),
- [
- {_id: 0, firstName: "Charlie"},
- {_id: 1, firstName: "Barack"},
- {_id: 2, firstName: "Sally"}
- ]);
+assert.eq(
+ coll.aggregate([
+ {$sort: {_id: 1}},
+ {$project: {firstName: {$trim: {input: {$arrayElemAt: [{$split: ["$name", ","]}, 1]}}}}}
+ ])
+ .toArray(),
+ [{_id: 0, firstName: "Charlie"}, {_id: 1, firstName: "Barack"}, {_id: 2, firstName: "Sally"}]);
- coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, poorlyParsedWebTitle: "The title of my document"},
- {_id: 1, poorlyParsedWebTitle: "\u2001\u2002 Odd unicode indentation"},
- {_id: 2, poorlyParsedWebTitle: "\u2001\u2002 Odd unicode indentation\u200A"},
- ]));
- assert.eq(coll.aggregate([
- {$sort: {_id: 1}},
- {$project: {title: {$ltrim: {input: "$poorlyParsedWebTitle"}}}}
- ])
- .toArray(),
- [
- {_id: 0, title: "The title of my document"},
- {_id: 1, title: "Odd unicode indentation"},
- {_id: 2, title: "Odd unicode indentation\u200A"}
- ]);
+coll.drop();
+assert.writeOK(coll.insert([
+ {_id: 0, poorlyParsedWebTitle: "The title of my document"},
+ {_id: 1, poorlyParsedWebTitle: "\u2001\u2002 Odd unicode indentation"},
+ {_id: 2, poorlyParsedWebTitle: "\u2001\u2002 Odd unicode indentation\u200A"},
+]));
+assert.eq(
+ coll.aggregate(
+ [{$sort: {_id: 1}}, {$project: {title: {$ltrim: {input: "$poorlyParsedWebTitle"}}}}])
+ .toArray(),
+ [
+ {_id: 0, title: "The title of my document"},
+ {_id: 1, title: "Odd unicode indentation"},
+ {_id: 2, title: "Odd unicode indentation\u200A"}
+ ]);
- coll.drop();
- assert.writeOK(coll.insert([
- {_id: 0, proof: "Left as an exercise for the reader∎"},
- {_id: 1, proof: "∎∃ proof∎"},
+coll.drop();
+assert.writeOK(coll.insert([
+ {_id: 0, proof: "Left as an exercise for the reader∎"},
+ {_id: 1, proof: "∎∃ proof∎"},
+ {_id: 2, proof: "Just view the problem as a continuous DAG whose elements are taylor series∎"},
+ {_id: 3, proof: null},
+ {_id: 4},
+]));
+assert.eq(
+ coll.aggregate(
+ [{$sort: {_id: 1}}, {$project: {proof: {$rtrim: {input: "$proof", chars: "∎"}}}}])
+ .toArray(),
+ [
+ {_id: 0, proof: "Left as an exercise for the reader"},
+ {_id: 1, proof: "∎∃ proof"},
{
- _id: 2,
- proof: "Just view the problem as a continuous DAG whose elements are taylor series∎"
- },
- {_id: 3, proof: null},
- {_id: 4},
- ]));
- assert.eq(
- coll.aggregate(
- [{$sort: {_id: 1}}, {$project: {proof: {$rtrim: {input: "$proof", chars: "∎"}}}}])
- .toArray(),
- [
- {_id: 0, proof: "Left as an exercise for the reader"},
- {_id: 1, proof: "∎∃ proof"},
- {
_id: 2,
proof: "Just view the problem as a continuous DAG whose elements are taylor series"
- },
- {_id: 3, proof: null},
- {_id: 4, proof: null},
- ]);
+ },
+ {_id: 3, proof: null},
+ {_id: 4, proof: null},
+ ]);
- // Test that errors are reported correctly.
- assertErrorCode(coll, [{$project: {x: {$trim: " x "}}}], 50696);
- assertErrorCode(coll, [{$project: {x: {$trim: {input: 4}}}}], 50699);
- assertErrorCode(coll, [{$project: {x: {$trim: {input: {$add: [4, 2]}}}}}], 50699);
- assertErrorCode(coll, [{$project: {x: {$trim: {input: "$_id"}}}}], 50699);
- assertErrorCode(coll, [{$project: {x: {$trim: {input: " x ", chars: "$_id"}}}}], 50700);
+// Test that errors are reported correctly.
+assertErrorCode(coll, [{$project: {x: {$trim: " x "}}}], 50696);
+assertErrorCode(coll, [{$project: {x: {$trim: {input: 4}}}}], 50699);
+assertErrorCode(coll, [{$project: {x: {$trim: {input: {$add: [4, 2]}}}}}], 50699);
+assertErrorCode(coll, [{$project: {x: {$trim: {input: "$_id"}}}}], 50699);
+assertErrorCode(coll, [{$project: {x: {$trim: {input: " x ", chars: "$_id"}}}}], 50700);
}());
diff --git a/jstests/aggregation/extras/utils.js b/jstests/aggregation/extras/utils.js
index 3a8bbf5d071..60233208e0b 100644
--- a/jstests/aggregation/extras/utils.js
+++ b/jstests/aggregation/extras/utils.js
@@ -179,7 +179,7 @@ function arrayShallowCopy(a) {
* the same documents, although the order need not match and the _id values need not match.
*
* Are non-scalar values references?
-*/
+ */
function resultsEq(rl, rr, verbose = false) {
const debug = msg => verbose ? print(msg) : null; // Helper to log 'msg' iff 'verbose' is true.
diff --git a/jstests/aggregation/group_conversion_to_distinct_scan.js b/jstests/aggregation/group_conversion_to_distinct_scan.js
index 1982ac581ad..90b25268a7c 100644
--- a/jstests/aggregation/group_conversion_to_distinct_scan.js
+++ b/jstests/aggregation/group_conversion_to_distinct_scan.js
@@ -12,650 +12,650 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js");
-
- let coll = db.group_conversion_to_distinct_scan;
- coll.drop();
-
- // Add test data and indexes. Fields prefixed with "mk" are multikey.
- let indexList = [
- {pattern: {a: 1, b: 1, c: 1}, option: {}},
- {pattern: {mkA: 1, b: 1, c: 1}, option: {}},
- {pattern: {aa: 1, mkB: 1, c: 1}, option: {}},
- {pattern: {aa: 1, bb: 1, c: 1}, option: {}},
- {pattern: {"foo.a": 1, "foo.b": 1}, option: {}},
- {pattern: {"mkFoo.a": 1, "mkFoo.b": 1}, option: {}},
- {pattern: {"foo.a": 1, "mkFoo.b": 1}, option: {}}
- ];
-
- function createIndexes() {
- for (let indexSpec of indexList) {
- assert.commandWorked(coll.createIndex(indexSpec.pattern, indexSpec.option));
- }
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+
+let coll = db.group_conversion_to_distinct_scan;
+coll.drop();
+
+// Add test data and indexes. Fields prefixed with "mk" are multikey.
+let indexList = [
+ {pattern: {a: 1, b: 1, c: 1}, option: {}},
+ {pattern: {mkA: 1, b: 1, c: 1}, option: {}},
+ {pattern: {aa: 1, mkB: 1, c: 1}, option: {}},
+ {pattern: {aa: 1, bb: 1, c: 1}, option: {}},
+ {pattern: {"foo.a": 1, "foo.b": 1}, option: {}},
+ {pattern: {"mkFoo.a": 1, "mkFoo.b": 1}, option: {}},
+ {pattern: {"foo.a": 1, "mkFoo.b": 1}, option: {}}
+];
+
+function createIndexes() {
+ for (let indexSpec of indexList) {
+ assert.commandWorked(coll.createIndex(indexSpec.pattern, indexSpec.option));
}
- createIndexes();
-
- assert.commandWorked(coll.insert([
- {_id: 0, a: 1, b: 1, c: 1},
- {_id: 1, a: 1, b: 2, c: 2},
- {_id: 2, a: 1, b: 2, c: 3},
- {_id: 3, a: 1, b: 3, c: 2},
- {_id: 4, a: 2, b: 2, c: 2},
- {_id: 5, b: 1, c: 1},
- {_id: 6, a: null, b: 1, c: 1},
-
- {_id: 7, aa: 1, mkB: 2, bb: 2},
- {_id: 8, aa: 1, mkB: [1, 3], bb: 1},
- {_id: 9, aa: 2, mkB: [], bb: 3},
-
- {_id: 10, mkA: 1, c: 3},
- {_id: 11, mkA: [2, 3, 4], c: 3},
- {_id: 12, mkA: 2, c: 2},
- {_id: 13, mkA: 3, c: 4},
-
- {_id: 14, foo: {a: 1, b: 1}, mkFoo: {a: 1, b: 1}},
- {_id: 15, foo: {a: 1, b: 2}, mkFoo: {a: 1, b: 2}},
- {_id: 16, foo: {a: 2, b: 2}, mkFoo: {a: 2, b: 2}},
- {_id: 17, foo: {b: 1}, mkFoo: {b: 1}},
- {_id: 18, foo: {a: null, b: 1}, mkFoo: {a: null, b: 1}},
- {_id: 19, foo: {a: 3}, mkFoo: [{a: 3, b: 4}, {a: 4, b: 3}]},
-
- {_id: 20, str: "foo", d: 1},
- {_id: 21, str: "FoO", d: 2},
- {_id: 22, str: "bar", d: 4},
- {_id: 23, str: "bAr", d: 3}
- ]));
-
- // Helper for dropping an index and removing it from the list of indexes.
- function removeIndex(pattern) {
- assert.commandWorked(coll.dropIndex(pattern));
- indexList = indexList.filter((ix) => bsonWoCompare(ix.pattern, pattern) != 0);
- }
-
- function addIndex(pattern, option) {
- indexList.push({pattern: pattern, option: option});
- assert.commandWorked(coll.createIndex(pattern, option));
- }
-
- // Check that 'pipeline' returns the correct results with and without a hint added to the query.
- // We also test with and without indices to check all the possibilities. 'options' is the
- // options to pass to aggregate() and may be omitted. Similarly, the hint object can be omitted
- // and will default to a $natural hint.
- function assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, expectedResults, hintObj = {$natural: 1}, options = {}) {
- assert.commandWorked(coll.dropIndexes());
- const resultsNoIndex = coll.aggregate(pipeline, options).toArray();
+}
+createIndexes();
+
+assert.commandWorked(coll.insert([
+ {_id: 0, a: 1, b: 1, c: 1},
+ {_id: 1, a: 1, b: 2, c: 2},
+ {_id: 2, a: 1, b: 2, c: 3},
+ {_id: 3, a: 1, b: 3, c: 2},
+ {_id: 4, a: 2, b: 2, c: 2},
+ {_id: 5, b: 1, c: 1},
+ {_id: 6, a: null, b: 1, c: 1},
+
+ {_id: 7, aa: 1, mkB: 2, bb: 2},
+ {_id: 8, aa: 1, mkB: [1, 3], bb: 1},
+ {_id: 9, aa: 2, mkB: [], bb: 3},
+
+ {_id: 10, mkA: 1, c: 3},
+ {_id: 11, mkA: [2, 3, 4], c: 3},
+ {_id: 12, mkA: 2, c: 2},
+ {_id: 13, mkA: 3, c: 4},
+
+ {_id: 14, foo: {a: 1, b: 1}, mkFoo: {a: 1, b: 1}},
+ {_id: 15, foo: {a: 1, b: 2}, mkFoo: {a: 1, b: 2}},
+ {_id: 16, foo: {a: 2, b: 2}, mkFoo: {a: 2, b: 2}},
+ {_id: 17, foo: {b: 1}, mkFoo: {b: 1}},
+ {_id: 18, foo: {a: null, b: 1}, mkFoo: {a: null, b: 1}},
+ {_id: 19, foo: {a: 3}, mkFoo: [{a: 3, b: 4}, {a: 4, b: 3}]},
+
+ {_id: 20, str: "foo", d: 1},
+ {_id: 21, str: "FoO", d: 2},
+ {_id: 22, str: "bar", d: 4},
+ {_id: 23, str: "bAr", d: 3}
+]));
+
+// Helper for dropping an index and removing it from the list of indexes.
+function removeIndex(pattern) {
+ assert.commandWorked(coll.dropIndex(pattern));
+ indexList = indexList.filter((ix) => bsonWoCompare(ix.pattern, pattern) != 0);
+}
+
+function addIndex(pattern, option) {
+ indexList.push({pattern: pattern, option: option});
+ assert.commandWorked(coll.createIndex(pattern, option));
+}
+
+// Check that 'pipeline' returns the correct results with and without a hint added to the query.
+// We also test with and without indices to check all the possibilities. 'options' is the
+// options to pass to aggregate() and may be omitted. Similarly, the hint object can be omitted
+// and will default to a $natural hint.
+function assertResultsMatchWithAndWithoutHintandIndexes(pipeline,
+ expectedResults,
+ hintObj = {
+ $natural: 1
+ },
+ options = {}) {
+ assert.commandWorked(coll.dropIndexes());
+ const resultsNoIndex = coll.aggregate(pipeline, options).toArray();
- createIndexes();
- const resultsWithIndex = coll.aggregate(pipeline, options).toArray();
-
- const passedOptions = Object.assign({}, {hint: hintObj}, options);
- const resultsWithHint = coll.aggregate(pipeline, passedOptions).toArray();
-
- assert.sameMembers(resultsNoIndex, resultsWithIndex);
- assert.sameMembers(resultsWithIndex, resultsWithHint);
- assert.sameMembers(resultsWithHint, expectedResults);
- }
-
- //
- // Verify that a $sort-$group pipeline can use DISTINCT_SCAN when the sort is available from an
- // index.
- //
- let pipeline = [{$sort: {a: 1}}, {$group: {_id: "$a"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: null}, {_id: 1}, {_id: 2}]);
- let explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
-
- // Pipelines that use the DISTINCT_SCAN optimization should not also have a blocking sort.
- assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
-
- //
- // Verify that a $group pipeline can use DISTINCT_SCAN even when the user does not specify a
- // sort.
- //
- pipeline = [{$group: {_id: "$a"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: null}, {_id: 1}, {_id: 2}]);
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
- assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
-
- //
- // Verify that a $group pipeline with a $natural hint does not use DISTINCT_SCAN.
- //
- pipeline = [{$group: {_id: "$a"}}];
- explain = coll.explain().aggregate(pipeline, {hint: {$natural: 1}});
- assert.neq(null, getAggPlanStage(explain, "COLLSCAN"), explain);
-
- //
- // Verify that a $group pipeline with a pertinent hint as string does use DISTINCT_SCAN.
- //
- pipeline = [{$group: {_id: "$a"}}];
- explain = coll.explain().aggregate(pipeline, {hint: "a_1_b_1_c_1"});
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
-
- //
- // Verify that a $group pipeline with a pertinent hint as an object does use DISTINCT_SCAN.
- //
- pipeline = [{$group: {_id: "$a"}}];
- explain = coll.explain().aggregate(pipeline, {hint: {a: 1, b: 1, c: 1}});
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
-
- //
- // Verify that a $group pipeline with a non-pertinent hint does not use DISTINCT_SCAN.
- //
- pipeline = [{$group: {_id: "$a"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null}, {_id: 1}, {_id: 2}], {_id: 1});
- explain = coll.explain().aggregate(pipeline, {hint: {_id: 1}});
- assert.neq(null, getAggPlanStage(explain, "IXSCAN"), explain);
- assert.eq({_id: 1}, getAggPlanStage(explain, "IXSCAN").keyPattern);
-
- //
- // Verify that a $group pipeline with an index filter still uses DISTINCT_SCAN.
- //
- assert.commandWorked(db.runCommand({
- planCacheSetFilter: coll.getName(),
- query: {},
- projection: {a: 1, _id: 0},
- indexes: ["a_1_b_1_c_1"]
- }));
-
- pipeline = [{$group: {_id: "$a"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: null}, {_id: 1}, {_id: 2}]);
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
- assert.eq(true, explain.stages[0].$cursor.queryPlanner.indexFilterSet);
-
- //
- // Verify that a $group pipeline with an index filter and $natural hint uses DISTINCT_SCAN.
- //
- pipeline = [{$group: {_id: "$a"}}];
- explain = coll.explain().aggregate(pipeline, {hint: {$natural: 1}});
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
- assert.eq(true, explain.stages[0].$cursor.queryPlanner.indexFilterSet);
-
- //
- // Verify that a $group pipeline with an index filter and non-pertinent hint uses DISTINCT_SCAN.
- //
- pipeline = [{$group: {_id: "$a"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null}, {_id: 1}, {_id: 2}], {_id: 1});
- explain = coll.explain().aggregate(pipeline, {hint: {_id: 1}});
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
- assert.eq(true, explain.stages[0].$cursor.queryPlanner.indexFilterSet);
-
- assert.commandWorked(db.runCommand({planCacheClearFilters: coll.getName()}));
-
- //
- // Verify that a $sort-$group pipeline _does not_ use a DISTINCT_SCAN on a multikey field.
- //
- pipeline = [{$sort: {mkA: 1}}, {$group: {_id: "$mkA"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: [2, 3, 4]}]);
- explain = coll.explain().aggregate(pipeline);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
-
- //
- // Verify that a $sort-$group pipeline can use DISTINCT_SCAN when the sort is available from an
- // index and there are $first accumulators.
- //
- pipeline = [{$sort: {a: 1, b: 1}}, {$group: {_id: "$a", accum: {$first: "$b"}}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null, accum: null}, {_id: 1, accum: 1}, {_id: 2, accum: 2}]);
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
- assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
-
- //
- // Verify that a $sort-$group pipeline can use DISTINCT_SCAN when a $first accumulator needs the
- // entire document.
- //
- pipeline = [{$sort: {a: -1, b: -1}}, {$group: {_id: "$a", accum: {$first: "$$ROOT"}}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [
- {_id: null, accum: {_id: 6, a: null, b: 1, c: 1}},
- {_id: 1, accum: {_id: 3, a: 1, b: 3, c: 2}},
- {_id: 2, accum: {_id: 4, a: 2, b: 2, c: 2}}
- ]);
- explain = coll.explain().aggregate(pipeline);
- assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern, explain);
- assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
-
- //
- // Verify that a $sort-$group pipeline can use DISTINCT_SCAN when sorting and grouping by fields
- // with dotted paths.
- //
- pipeline =
- [{$sort: {"foo.a": 1, "foo.b": 1}}, {$group: {_id: "$foo.a", accum: {$first: "$foo.b"}}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline,
- [{_id: null, accum: null}, {_id: 1, accum: 1}, {_id: 2, accum: 2}, {_id: 3, accum: null}]);
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({"foo.a": 1, "foo.b": 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
- assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
-
- //
- // Verify that a $group pipeline can use DISTINCT_SCAN to group on a dotted path field, even
- // when the user does not specify a sort.
- //
- pipeline = [{$group: {_id: "$foo.a"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline,
- [{_id: null}, {_id: 1}, {_id: 2}, {_id: 3}]);
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
-
- //
- // Verify that we _do not_ attempt to use a DISTINCT_SCAN on a multikey field.
- //
- pipeline = [{$group: {_id: "$mkA"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: [2, 3, 4]}]);
- explain = coll.explain().aggregate(pipeline);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
-
- //
- // Verify that we may not use a DISTINCT_SCAN on a dotted field when the last component
- // is not multikey, but an intermediate component is.
- //
- pipeline = [{$group: {_id: "$mkFoo.a"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [
- {_id: null},
- {_id: 1},
- {_id: 2},
- {_id: [3, 4]},
- ]);
- explain = coll.explain().aggregate(pipeline);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
-
- //
- // Verify that we _do not_ attempt to use a DISTINCT_SCAN on a multikey dotted-path field when
- // a sort is present.
- //
- pipeline = [
- {$sort: {"mkFoo.a": 1, "mkFoo.b": 1}},
- {$group: {_id: "$mkFoo.a", accum: {$first: "$mkFoo.b"}}}
- ];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [
- {_id: null, accum: null},
- {_id: 1, accum: 1},
- {_id: 2, accum: 2},
- {_id: [3, 4], accum: [4, 3]}
- ]);
- explain = coll.explain().aggregate(pipeline);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
-
- //
- // Verify that we _do not_ attempt a DISTINCT_SCAN to satisfy a sort on a multikey field, even
- // when the field we are grouping by is not multikey.
- //
- pipeline = [{$sort: {aa: 1, mkB: 1}}, {$group: {_id: "$aa", accum: {$first: "$mkB"}}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null, accum: null}, {_id: 1, accum: [1, 3]}, {_id: 2, accum: []}]);
- explain = coll.explain().aggregate(pipeline);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), tojson(explain));
-
- //
- // Verify that with dotted paths we _do not_ attempt a DISTINCT_SCAN to satisfy a sort on a
- // multikey field, even when the field we are grouping by is not multikey.
- //
- pipeline = [
- {$sort: {"foo.a": 1, "mkFoo.b": 1}},
- {$group: {_id: "$foo.a", accum: {$first: "$mkFoo.b"}}}
- ];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [
- {_id: null, accum: null},
- {_id: 1, accum: 1},
- {_id: 2, accum: 2},
- {_id: 3, accum: [4, 3]}
- ]);
- explain = coll.explain().aggregate(pipeline);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
-
- //
- // Verify that we can use a DISTINCT_SCAN on a multikey index to sort and group on a dotted-path
- // field, so long as the field we are sorting over is not multikey and comes before any multikey
- // fields in the index key pattern.
- //
- // We drop the {"foo.a": 1, "foo.b": 1} to force this test to use the multikey
- // {"foo.a": 1, "mkFoo.b"} index. The rest of the test doesn't use either of those indexes.
- //
- removeIndex({"foo.a": 1, "foo.b": 1});
- pipeline = [{$sort: {"foo.a": 1}}, {$group: {_id: "$foo.a"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline,
- [{_id: null}, {_id: 1}, {_id: 2}, {_id: 3}]);
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({"foo.a": 1, "mkFoo.b": 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
- assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
-
- //
- // Verify that a $sort-$group pipeline can use DISTINCT_SCAN even when there is a $first
- // accumulator that accesses a multikey field.
- //
- pipeline = [{$sort: {aa: 1, bb: 1}}, {$group: {_id: "$aa", accum: {$first: "$mkB"}}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null, accum: null}, {_id: 1, accum: [1, 3]}, {_id: 2, accum: []}]);
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({aa: 1, bb: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
- assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
-
- //
- // Verify that a $sort-$group pipeline can use DISTINCT_SCAN even when there is a $first
- // accumulator that includes an expression.
- //
- pipeline =
- [{$sort: {a: 1, b: 1}}, {$group: {_id: "$a", accum: {$first: {$add: ["$b", "$c"]}}}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null, accum: null}, {_id: 1, accum: 2}, {_id: 2, accum: 4}]);
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
- assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
-
- //
- // Verify that a $match-$sort-$group pipeline can use a DISTINCT_SCAN to sort and group by a
- // field that is not the first field in a compound index, so long as the previous fields are
- // scanned with equality bounds (i.e., are point queries).
- //
- pipeline = [{$match: {a: 1}}, {$sort: {b: 1}}, {$group: {_id: "$b"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: 1}, {_id: 2}, {_id: 3}]);
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
- assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
-
- //
- // Same as the previous case but with the sort order matching the index key pattern, so the
- // query planner does not need to infer the availability of a sort on {b: 1} based on the
- // equality bounds for the 'a field.
- //
- pipeline = [{$match: {a: 1}}, {$sort: {a: 1, b: 1}}, {$group: {_id: "$b"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: 1}, {_id: 2}, {_id: 3}]);
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
- assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
-
- //
- // Same as the previous case but with no user-specified sort.
- //
- pipeline = [{$match: {a: 1}}, {$group: {_id: "$b"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: 1}, {_id: 2}, {_id: 3}]);
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
- assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
-
- //
- // Verify that a $match-$sort-$group pipeline _does not_ use a DISTINCT_SCAN to sort and group
- // on the second field of an index when there is no equality match on the first field.
- //
- pipeline = [{$sort: {a: 1, b: 1}}, {$group: {_id: "$b"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline,
- [{_id: null}, {_id: 1}, {_id: 2}, {_id: 3}]);
- explain = coll.explain().aggregate(pipeline);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
-
- //
- // Verify that a $match-$sort-$limit-$group pipeline _does not_ coalesce the $sort-$limit and
- // then consider the result eligible for the DISTINCT_SCAN optimization.
- //
- // In this example, the {$limit: 3} filters out the document {a: 1, b: 3, c: 2}, which means we
- // don't see a {_id: 3} group. If we instead applied the {$limit: 3} after the $group stage, we
- // would incorrectly list three groups. DISTINCT_SCAN won't work here, because we have to
- // examine each document in order to determine which groups get filtered out by the $limit.
- //
- pipeline = [{$match: {a: 1}}, {$sort: {a: 1, b: 1}}, {$limit: 3}, {$group: {_id: "$b"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: 1}, {_id: 2}]);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
-
- //
- // Verify that an additional $project stage does not lead to incorrect results (although it will
- // preclude the use of the DISTINCT_SCAN optimization).
- //
- pipeline =
- [{$match: {a: 1}}, {$project: {a: 1, b: 1}}, {$sort: {a: 1, b: 1}}, {$group: {_id: "$b"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: 1}, {_id: 2}, {_id: 3}]);
-
- //
- // Verify that a $sort-$group can use a DISTINCT_SCAN even when the requested sort is the
- // reverse of the index's sort.
- //
- pipeline = [{$sort: {a: -1, b: -1}}, {$group: {_id: "$a", accum: {$first: "$b"}}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null, accum: 1}, {_id: 1, accum: 3}, {_id: 2, accum: 2}]);
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
- assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
-
- //
- // Verify that a $sort-$group pipeline _does not_ use DISTINCT_SCAN when there are non-$first
- // accumulators.
- //
- pipeline = [{$sort: {a: 1}}, {$group: {_id: "$a", accum: {$sum: "$b"}}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null, accum: 2}, {_id: 1, accum: 8}, {_id: 2, accum: 2}]);
- explain = coll.explain().aggregate(pipeline);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
-
- // An index scan is still possible, though.
- assert.neq(null, getAggPlanStage(explain, "IXSCAN"), explain);
- assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "IXSCAN").keyPattern);
- assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
-
- //
- // Verify that a $sort-$group pipeline _does not_ use DISTINCT_SCAN when documents are not
- // sorted by the field used for grouping.
- //
- pipeline = [{$sort: {b: 1}}, {$group: {_id: "$a", accum: {$first: "$b"}}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null, accum: null}, {_id: 1, accum: 1}, {_id: 2, accum: 2}]);
- explain = coll.explain().aggregate(pipeline);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
-
- //
- // Verify that a $match-$sort-$group pipeline _does not_ use a DISTINCT_SCAN when the match does
- // not provide equality (point query) bounds for each field before the grouped-by field in the
- // index.
- //
- pipeline = [{$match: {a: {$gt: 0}}}, {$sort: {b: 1}}, {$group: {_id: "$b"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: 1}, {_id: 2}, {_id: 3}]);
- explain = coll.explain().aggregate(pipeline);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
-
- ////////////////////////////////////////////////////////////////////////////////////////////////
- // We execute all the collation-related tests three times with three different configurations
- // (no index, index without collation, index with collation).
- //
- // Collation tests 1: no index on string field.
- ////////////////////////////////////////////////////////////////////////////////////////////////
-
- const collationOption = {collation: {locale: "en_US", strength: 2}};
-
- //
- // Verify that a $group on an unindexed field uses a collection scan.
- //
- pipeline = [{$group: {_id: "$str"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null}, {_id: "FoO"}, {_id: "bAr"}, {_id: "bar"}, {_id: "foo"}]);
- explain = coll.explain().aggregate(pipeline);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
-
- //
- // Verify that a collated $group on an unindexed field uses a collection scan.
- //
- pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null}, {_id: "bAr"}, {_id: "foo"}], {$natural: 1}, collationOption);
- explain = coll.explain().aggregate(pipeline, collationOption);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
-
- //
- // Verify that a $sort-$group pipeline uses a collection scan.
- //
- pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str", accum: {$first: "$d"}}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [
- {_id: null, accum: null},
- {_id: "FoO", accum: 2},
- {_id: "bAr", accum: 3},
- {_id: "bar", accum: 4},
- {_id: "foo", accum: 1}
- ]);
- explain = coll.explain().aggregate(pipeline);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
-
- //
- // Verify that a collated $sort-$group pipeline with a $first accumulator uses a collection
- // scan.
- //
- pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str", accum: {$first: "$d"}}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline,
- [{_id: null, accum: null}, {_id: "bAr", accum: 3}, {_id: "foo", accum: 1}],
- {$natural: 1},
- collationOption);
- explain = coll.explain().aggregate(pipeline, collationOption);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
-
- ////////////////////////////////////////////////////////////////////////////////////////////////
- // Collation tests 2: index on string field with no collation.
- ////////////////////////////////////////////////////////////////////////////////////////////////
-
- addIndex({str: 1, d: 1});
-
- //
- // Verify that a $group uses a DISTINCT_SCAN.
- //
- pipeline = [{$group: {_id: "$str"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null}, {_id: "FoO"}, {_id: "bAr"}, {_id: "bar"}, {_id: "foo"}]);
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({str: 1, d: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
-
- //
- // Verify that a $sort-$group pipeline with a collation _does not_ scan the index, which is not
- // aware of the collation.
- //
- // Note that, when using a case-insensitive collation, "bAr" and "bar" will get grouped
- // together, and the decision as to which one will represent the group is arbitary. The
- // tie-breaking {d: 1} component of the sort forces a specific decision for this aggregation,
- // making this test more reliable.
- //
- pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null}, {_id: "bAr"}, {_id: "foo"}], {$natural: 1}, collationOption);
- explain = coll.explain().aggregate(pipeline, collationOption);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
-
- //
- // Verify that a $sort-$group pipeline uses a DISTINCT_SCAN.
- //
- pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str", accum: {$first: "$d"}}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [
- {_id: null, accum: null},
- {_id: "FoO", accum: 2},
- {_id: "bAr", accum: 3},
- {_id: "bar", accum: 4},
- {_id: "foo", accum: 1}
- ]);
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({str: 1, d: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
-
- //
- // Verify that a $sort-$group that use a collation and includes a $first accumulators _does
- // not_ scan the index, which is not aware of the collation.
- //
- pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str", accum: {$first: "$d"}}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline,
- [{_id: null, accum: null}, {_id: "bAr", accum: 3}, {_id: "foo", accum: 1}],
- {$natural: 1},
- collationOption);
- explain = coll.explain().aggregate(pipeline, collationOption);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
-
- ////////////////////////////////////////////////////////////////////////////////////////////////
- // Collation tests 3: index on string field with case-insensitive collation.
- ////////////////////////////////////////////////////////////////////////////////////////////////
-
- removeIndex({str: 1, d: 1});
- addIndex({str: 1, d: 1}, collationOption);
-
- //
- // Verify that a $group with no collation _does not_ scan the index, which does have a
- // collation.
- //
- pipeline = [{$group: {_id: "$str"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null}, {_id: "FoO"}, {_id: "bAr"}, {_id: "bar"}, {_id: "foo"}]);
- explain = coll.explain().aggregate(pipeline);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
-
- //
- // Verify that a $sort-$group with a collation uses a DISTINCT_SCAN on the index, which uses a
- // matching collation.
- //
- // Note that, when using a case-insensitive collation, "bAr" and "bar" will get grouped
- // together, and the decision as to which one will represent the group is arbitary. The
- // tie-breaking {d: 1} component of the sort forces a specific decision for this aggregation,
- // making this test more reliable.
- //
- pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str"}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline, [{_id: null}, {_id: "bAr"}, {_id: "foo"}], {$natural: 1}, collationOption);
- explain = coll.explain().aggregate(pipeline, collationOption);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({str: 1, d: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
-
- //
- // Verify that a $sort-$group pipeline with no collation _does not_ scan the index, which does
- // have a collation.
- //
- pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str", accum: {$first: "$d"}}}];
- assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [
- {_id: null, accum: null},
- {_id: "FoO", accum: 2},
- {_id: "bAr", accum: 3},
- {_id: "bar", accum: 4},
- {_id: "foo", accum: 1}
- ]);
- explain = coll.explain().aggregate(pipeline);
- assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
-
- //
- // Verify that a $sort-$group pipeline that uses a collation and includes a $first accumulator
- // uses a DISTINCT_SCAN, which uses a matching collation.
- //
- pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str", accum: {$first: "$d"}}}];
- assertResultsMatchWithAndWithoutHintandIndexes(
- pipeline,
- [{_id: null, accum: null}, {_id: "bAr", accum: 3}, {_id: "foo", accum: 1}],
- {$natural: 1},
- collationOption);
- explain = coll.explain().aggregate(pipeline, collationOption);
- assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
- assert.eq({str: 1, d: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+ createIndexes();
+ const resultsWithIndex = coll.aggregate(pipeline, options).toArray();
+
+ const passedOptions = Object.assign({}, {hint: hintObj}, options);
+ const resultsWithHint = coll.aggregate(pipeline, passedOptions).toArray();
+
+ assert.sameMembers(resultsNoIndex, resultsWithIndex);
+ assert.sameMembers(resultsWithIndex, resultsWithHint);
+ assert.sameMembers(resultsWithHint, expectedResults);
+}
+
+//
+// Verify that a $sort-$group pipeline can use DISTINCT_SCAN when the sort is available from an
+// index.
+//
+let pipeline = [{$sort: {a: 1}}, {$group: {_id: "$a"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: null}, {_id: 1}, {_id: 2}]);
+let explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+
+// Pipelines that use the DISTINCT_SCAN optimization should not also have a blocking sort.
+assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
+
+//
+// Verify that a $group pipeline can use DISTINCT_SCAN even when the user does not specify a
+// sort.
+//
+pipeline = [{$group: {_id: "$a"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: null}, {_id: 1}, {_id: 2}]);
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
+
+//
+// Verify that a $group pipeline with a $natural hint does not use DISTINCT_SCAN.
+//
+pipeline = [{$group: {_id: "$a"}}];
+explain = coll.explain().aggregate(pipeline, {hint: {$natural: 1}});
+assert.neq(null, getAggPlanStage(explain, "COLLSCAN"), explain);
+
+//
+// Verify that a $group pipeline with a pertinent hint as string does use DISTINCT_SCAN.
+//
+pipeline = [{$group: {_id: "$a"}}];
+explain = coll.explain().aggregate(pipeline, {hint: "a_1_b_1_c_1"});
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+
+//
+// Verify that a $group pipeline with a pertinent hint as an object does use DISTINCT_SCAN.
+//
+pipeline = [{$group: {_id: "$a"}}];
+explain = coll.explain().aggregate(pipeline, {hint: {a: 1, b: 1, c: 1}});
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+
+//
+// Verify that a $group pipeline with a non-pertinent hint does not use DISTINCT_SCAN.
+//
+pipeline = [{$group: {_id: "$a"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null}, {_id: 1}, {_id: 2}], {_id: 1});
+explain = coll.explain().aggregate(pipeline, {hint: {_id: 1}});
+assert.neq(null, getAggPlanStage(explain, "IXSCAN"), explain);
+assert.eq({_id: 1}, getAggPlanStage(explain, "IXSCAN").keyPattern);
+
+//
+// Verify that a $group pipeline with an index filter still uses DISTINCT_SCAN.
+//
+assert.commandWorked(db.runCommand({
+ planCacheSetFilter: coll.getName(),
+ query: {},
+ projection: {a: 1, _id: 0},
+ indexes: ["a_1_b_1_c_1"]
+}));
+
+pipeline = [{$group: {_id: "$a"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: null}, {_id: 1}, {_id: 2}]);
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+assert.eq(true, explain.stages[0].$cursor.queryPlanner.indexFilterSet);
+
+//
+// Verify that a $group pipeline with an index filter and $natural hint uses DISTINCT_SCAN.
+//
+pipeline = [{$group: {_id: "$a"}}];
+explain = coll.explain().aggregate(pipeline, {hint: {$natural: 1}});
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+assert.eq(true, explain.stages[0].$cursor.queryPlanner.indexFilterSet);
+
+//
+// Verify that a $group pipeline with an index filter and non-pertinent hint uses DISTINCT_SCAN.
+//
+pipeline = [{$group: {_id: "$a"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null}, {_id: 1}, {_id: 2}], {_id: 1});
+explain = coll.explain().aggregate(pipeline, {hint: {_id: 1}});
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+assert.eq(true, explain.stages[0].$cursor.queryPlanner.indexFilterSet);
+
+assert.commandWorked(db.runCommand({planCacheClearFilters: coll.getName()}));
+
+//
+// Verify that a $sort-$group pipeline _does not_ use a DISTINCT_SCAN on a multikey field.
+//
+pipeline = [{$sort: {mkA: 1}}, {$group: {_id: "$mkA"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: [2, 3, 4]}]);
+explain = coll.explain().aggregate(pipeline);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+
+//
+// Verify that a $sort-$group pipeline can use DISTINCT_SCAN when the sort is available from an
+// index and there are $first accumulators.
+//
+pipeline = [{$sort: {a: 1, b: 1}}, {$group: {_id: "$a", accum: {$first: "$b"}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null, accum: null}, {_id: 1, accum: 1}, {_id: 2, accum: 2}]);
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
+
+//
+// Verify that a $sort-$group pipeline can use DISTINCT_SCAN when a $first accumulator needs the
+// entire document.
+//
+pipeline = [{$sort: {a: -1, b: -1}}, {$group: {_id: "$a", accum: {$first: "$$ROOT"}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [
+ {_id: null, accum: {_id: 6, a: null, b: 1, c: 1}},
+ {_id: 1, accum: {_id: 3, a: 1, b: 3, c: 2}},
+ {_id: 2, accum: {_id: 4, a: 2, b: 2, c: 2}}
+]);
+explain = coll.explain().aggregate(pipeline);
+assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern, explain);
+assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
+
+//
+// Verify that a $sort-$group pipeline can use DISTINCT_SCAN when sorting and grouping by fields
+// with dotted paths.
+//
+pipeline =
+ [{$sort: {"foo.a": 1, "foo.b": 1}}, {$group: {_id: "$foo.a", accum: {$first: "$foo.b"}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline,
+ [{_id: null, accum: null}, {_id: 1, accum: 1}, {_id: 2, accum: 2}, {_id: 3, accum: null}]);
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({"foo.a": 1, "foo.b": 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
+
+//
+// Verify that a $group pipeline can use DISTINCT_SCAN to group on a dotted path field, even
+// when the user does not specify a sort.
+//
+pipeline = [{$group: {_id: "$foo.a"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline,
+ [{_id: null}, {_id: 1}, {_id: 2}, {_id: 3}]);
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
+
+//
+// Verify that we _do not_ attempt to use a DISTINCT_SCAN on a multikey field.
+//
+pipeline = [{$group: {_id: "$mkA"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: [2, 3, 4]}]);
+explain = coll.explain().aggregate(pipeline);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+
+//
+// Verify that we may not use a DISTINCT_SCAN on a dotted field when the last component
+// is not multikey, but an intermediate component is.
+//
+pipeline = [{$group: {_id: "$mkFoo.a"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [
+ {_id: null},
+ {_id: 1},
+ {_id: 2},
+ {_id: [3, 4]},
+]);
+explain = coll.explain().aggregate(pipeline);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+
+//
+// Verify that we _do not_ attempt to use a DISTINCT_SCAN on a multikey dotted-path field when
+// a sort is present.
+//
+pipeline = [
+ {$sort: {"mkFoo.a": 1, "mkFoo.b": 1}},
+ {$group: {_id: "$mkFoo.a", accum: {$first: "$mkFoo.b"}}}
+];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [
+ {_id: null, accum: null},
+ {_id: 1, accum: 1},
+ {_id: 2, accum: 2},
+ {_id: [3, 4], accum: [4, 3]}
+]);
+explain = coll.explain().aggregate(pipeline);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+
+//
+// Verify that we _do not_ attempt a DISTINCT_SCAN to satisfy a sort on a multikey field, even
+// when the field we are grouping by is not multikey.
+//
+pipeline = [{$sort: {aa: 1, mkB: 1}}, {$group: {_id: "$aa", accum: {$first: "$mkB"}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null, accum: null}, {_id: 1, accum: [1, 3]}, {_id: 2, accum: []}]);
+explain = coll.explain().aggregate(pipeline);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), tojson(explain));
+
+//
+// Verify that with dotted paths we _do not_ attempt a DISTINCT_SCAN to satisfy a sort on a
+// multikey field, even when the field we are grouping by is not multikey.
+//
+pipeline =
+ [{$sort: {"foo.a": 1, "mkFoo.b": 1}}, {$group: {_id: "$foo.a", accum: {$first: "$mkFoo.b"}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline,
+ [{_id: null, accum: null}, {_id: 1, accum: 1}, {_id: 2, accum: 2}, {_id: 3, accum: [4, 3]}]);
+explain = coll.explain().aggregate(pipeline);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+
+//
+// Verify that we can use a DISTINCT_SCAN on a multikey index to sort and group on a dotted-path
+// field, so long as the field we are sorting over is not multikey and comes before any multikey
+// fields in the index key pattern.
+//
+// We drop the {"foo.a": 1, "foo.b": 1} to force this test to use the multikey
+// {"foo.a": 1, "mkFoo.b"} index. The rest of the test doesn't use either of those indexes.
+//
+removeIndex({"foo.a": 1, "foo.b": 1});
+pipeline = [{$sort: {"foo.a": 1}}, {$group: {_id: "$foo.a"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline,
+ [{_id: null}, {_id: 1}, {_id: 2}, {_id: 3}]);
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({"foo.a": 1, "mkFoo.b": 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
+
+//
+// Verify that a $sort-$group pipeline can use DISTINCT_SCAN even when there is a $first
+// accumulator that accesses a multikey field.
+//
+pipeline = [{$sort: {aa: 1, bb: 1}}, {$group: {_id: "$aa", accum: {$first: "$mkB"}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null, accum: null}, {_id: 1, accum: [1, 3]}, {_id: 2, accum: []}]);
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({aa: 1, bb: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
+
+//
+// Verify that a $sort-$group pipeline can use DISTINCT_SCAN even when there is a $first
+// accumulator that includes an expression.
+//
+pipeline = [{$sort: {a: 1, b: 1}}, {$group: {_id: "$a", accum: {$first: {$add: ["$b", "$c"]}}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null, accum: null}, {_id: 1, accum: 2}, {_id: 2, accum: 4}]);
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
+
+//
+// Verify that a $match-$sort-$group pipeline can use a DISTINCT_SCAN to sort and group by a
+// field that is not the first field in a compound index, so long as the previous fields are
+// scanned with equality bounds (i.e., are point queries).
+//
+pipeline = [{$match: {a: 1}}, {$sort: {b: 1}}, {$group: {_id: "$b"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: 1}, {_id: 2}, {_id: 3}]);
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
+
+//
+// Same as the previous case but with the sort order matching the index key pattern, so the
+// query planner does not need to infer the availability of a sort on {b: 1} based on the
+// equality bounds for the 'a field.
+//
+pipeline = [{$match: {a: 1}}, {$sort: {a: 1, b: 1}}, {$group: {_id: "$b"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: 1}, {_id: 2}, {_id: 3}]);
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
+
+//
+// Same as the previous case but with no user-specified sort.
+//
+pipeline = [{$match: {a: 1}}, {$group: {_id: "$b"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: 1}, {_id: 2}, {_id: 3}]);
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
+
+//
+// Verify that a $match-$sort-$group pipeline _does not_ use a DISTINCT_SCAN to sort and group
+// on the second field of an index when there is no equality match on the first field.
+//
+pipeline = [{$sort: {a: 1, b: 1}}, {$group: {_id: "$b"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline,
+ [{_id: null}, {_id: 1}, {_id: 2}, {_id: 3}]);
+explain = coll.explain().aggregate(pipeline);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+
+//
+// Verify that a $match-$sort-$limit-$group pipeline _does not_ coalesce the $sort-$limit and
+// then consider the result eligible for the DISTINCT_SCAN optimization.
+//
+// In this example, the {$limit: 3} filters out the document {a: 1, b: 3, c: 2}, which means we
+// don't see a {_id: 3} group. If we instead applied the {$limit: 3} after the $group stage, we
+// would incorrectly list three groups. DISTINCT_SCAN won't work here, because we have to
+// examine each document in order to determine which groups get filtered out by the $limit.
+//
+pipeline = [{$match: {a: 1}}, {$sort: {a: 1, b: 1}}, {$limit: 3}, {$group: {_id: "$b"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: 1}, {_id: 2}]);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+
+//
+// Verify that an additional $project stage does not lead to incorrect results (although it will
+// preclude the use of the DISTINCT_SCAN optimization).
+//
+pipeline =
+ [{$match: {a: 1}}, {$project: {a: 1, b: 1}}, {$sort: {a: 1, b: 1}}, {$group: {_id: "$b"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: 1}, {_id: 2}, {_id: 3}]);
+
+//
+// Verify that a $sort-$group can use a DISTINCT_SCAN even when the requested sort is the
+// reverse of the index's sort.
+//
+pipeline = [{$sort: {a: -1, b: -1}}, {$group: {_id: "$a", accum: {$first: "$b"}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null, accum: 1}, {_id: 1, accum: 3}, {_id: 2, accum: 2}]);
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
+
+//
+// Verify that a $sort-$group pipeline _does not_ use DISTINCT_SCAN when there are non-$first
+// accumulators.
+//
+pipeline = [{$sort: {a: 1}}, {$group: {_id: "$a", accum: {$sum: "$b"}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null, accum: 2}, {_id: 1, accum: 8}, {_id: 2, accum: 2}]);
+explain = coll.explain().aggregate(pipeline);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+
+// An index scan is still possible, though.
+assert.neq(null, getAggPlanStage(explain, "IXSCAN"), explain);
+assert.eq({a: 1, b: 1, c: 1}, getAggPlanStage(explain, "IXSCAN").keyPattern);
+assert.eq(null, getAggPlanStage(explain, "SORT"), explain);
+
+//
+// Verify that a $sort-$group pipeline _does not_ use DISTINCT_SCAN when documents are not
+// sorted by the field used for grouping.
+//
+pipeline = [{$sort: {b: 1}}, {$group: {_id: "$a", accum: {$first: "$b"}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null, accum: null}, {_id: 1, accum: 1}, {_id: 2, accum: 2}]);
+explain = coll.explain().aggregate(pipeline);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+
+//
+// Verify that a $match-$sort-$group pipeline _does not_ use a DISTINCT_SCAN when the match does
+// not provide equality (point query) bounds for each field before the grouped-by field in the
+// index.
+//
+pipeline = [{$match: {a: {$gt: 0}}}, {$sort: {b: 1}}, {$group: {_id: "$b"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [{_id: 1}, {_id: 2}, {_id: 3}]);
+explain = coll.explain().aggregate(pipeline);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+// We execute all the collation-related tests three times with three different configurations
+// (no index, index without collation, index with collation).
+//
+// Collation tests 1: no index on string field.
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+const collationOption = {
+ collation: {locale: "en_US", strength: 2}
+};
+
+//
+// Verify that a $group on an unindexed field uses a collection scan.
+//
+pipeline = [{$group: {_id: "$str"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null}, {_id: "FoO"}, {_id: "bAr"}, {_id: "bar"}, {_id: "foo"}]);
+explain = coll.explain().aggregate(pipeline);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
+
+//
+// Verify that a collated $group on an unindexed field uses a collection scan.
+//
+pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null}, {_id: "bAr"}, {_id: "foo"}], {$natural: 1}, collationOption);
+explain = coll.explain().aggregate(pipeline, collationOption);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
+
+//
+// Verify that a $sort-$group pipeline uses a collection scan.
+//
+pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str", accum: {$first: "$d"}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [
+ {_id: null, accum: null},
+ {_id: "FoO", accum: 2},
+ {_id: "bAr", accum: 3},
+ {_id: "bar", accum: 4},
+ {_id: "foo", accum: 1}
+]);
+explain = coll.explain().aggregate(pipeline);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
+
+//
+// Verify that a collated $sort-$group pipeline with a $first accumulator uses a collection
+// scan.
+//
+pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str", accum: {$first: "$d"}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline,
+ [{_id: null, accum: null}, {_id: "bAr", accum: 3}, {_id: "foo", accum: 1}],
+ {$natural: 1},
+ collationOption);
+explain = coll.explain().aggregate(pipeline, collationOption);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+// Collation tests 2: index on string field with no collation.
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+addIndex({str: 1, d: 1});
+
+//
+// Verify that a $group uses a DISTINCT_SCAN.
+//
+pipeline = [{$group: {_id: "$str"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null}, {_id: "FoO"}, {_id: "bAr"}, {_id: "bar"}, {_id: "foo"}]);
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({str: 1, d: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+
+//
+// Verify that a $sort-$group pipeline with a collation _does not_ scan the index, which is not
+// aware of the collation.
+//
+// Note that, when using a case-insensitive collation, "bAr" and "bar" will get grouped
+// together, and the decision as to which one will represent the group is arbitary. The
+// tie-breaking {d: 1} component of the sort forces a specific decision for this aggregation,
+// making this test more reliable.
+//
+pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null}, {_id: "bAr"}, {_id: "foo"}], {$natural: 1}, collationOption);
+explain = coll.explain().aggregate(pipeline, collationOption);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
+
+//
+// Verify that a $sort-$group pipeline uses a DISTINCT_SCAN.
+//
+pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str", accum: {$first: "$d"}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [
+ {_id: null, accum: null},
+ {_id: "FoO", accum: 2},
+ {_id: "bAr", accum: 3},
+ {_id: "bar", accum: 4},
+ {_id: "foo", accum: 1}
+]);
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({str: 1, d: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+
+//
+// Verify that a $sort-$group that use a collation and includes a $first accumulators _does
+// not_ scan the index, which is not aware of the collation.
+//
+pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str", accum: {$first: "$d"}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline,
+ [{_id: null, accum: null}, {_id: "bAr", accum: 3}, {_id: "foo", accum: 1}],
+ {$natural: 1},
+ collationOption);
+explain = coll.explain().aggregate(pipeline, collationOption);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+// Collation tests 3: index on string field with case-insensitive collation.
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+removeIndex({str: 1, d: 1});
+addIndex({str: 1, d: 1}, collationOption);
+
+//
+// Verify that a $group with no collation _does not_ scan the index, which does have a
+// collation.
+//
+pipeline = [{$group: {_id: "$str"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null}, {_id: "FoO"}, {_id: "bAr"}, {_id: "bar"}, {_id: "foo"}]);
+explain = coll.explain().aggregate(pipeline);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
+
+//
+// Verify that a $sort-$group with a collation uses a DISTINCT_SCAN on the index, which uses a
+// matching collation.
+//
+// Note that, when using a case-insensitive collation, "bAr" and "bar" will get grouped
+// together, and the decision as to which one will represent the group is arbitary. The
+// tie-breaking {d: 1} component of the sort forces a specific decision for this aggregation,
+// making this test more reliable.
+//
+pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str"}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline, [{_id: null}, {_id: "bAr"}, {_id: "foo"}], {$natural: 1}, collationOption);
+explain = coll.explain().aggregate(pipeline, collationOption);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({str: 1, d: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
+
+//
+// Verify that a $sort-$group pipeline with no collation _does not_ scan the index, which does
+// have a collation.
+//
+pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str", accum: {$first: "$d"}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(pipeline, [
+ {_id: null, accum: null},
+ {_id: "FoO", accum: 2},
+ {_id: "bAr", accum: 3},
+ {_id: "bar", accum: 4},
+ {_id: "foo", accum: 1}
+]);
+explain = coll.explain().aggregate(pipeline);
+assert.eq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq(null, getAggPlanStage(explain, "IXSCAN"), explain);
+
+//
+// Verify that a $sort-$group pipeline that uses a collation and includes a $first accumulator
+// uses a DISTINCT_SCAN, which uses a matching collation.
+//
+pipeline = [{$sort: {str: 1, d: 1}}, {$group: {_id: "$str", accum: {$first: "$d"}}}];
+assertResultsMatchWithAndWithoutHintandIndexes(
+ pipeline,
+ [{_id: null, accum: null}, {_id: "bAr", accum: 3}, {_id: "foo", accum: 1}],
+ {$natural: 1},
+ collationOption);
+explain = coll.explain().aggregate(pipeline, collationOption);
+assert.neq(null, getAggPlanStage(explain, "DISTINCT_SCAN"), explain);
+assert.eq({str: 1, d: 1}, getAggPlanStage(explain, "DISTINCT_SCAN").keyPattern);
}());
diff --git a/jstests/aggregation/illegal_reference_in_match.js b/jstests/aggregation/illegal_reference_in_match.js
index 7ef6c904406..eca2df0009c 100644
--- a/jstests/aggregation/illegal_reference_in_match.js
+++ b/jstests/aggregation/illegal_reference_in_match.js
@@ -2,35 +2,35 @@
// illegal inside the aggregation system is used in a $match that is not pushed down to the query
// system, the correct error is raised.
(function() {
- "use strict";
+"use strict";
- const coll = db.illegal_reference_in_match;
- assert.commandWorked(coll.insert({a: 1}));
+const coll = db.illegal_reference_in_match;
+assert.commandWorked(coll.insert({a: 1}));
- const pipeline = [
- // The limit stage prevents the planner from pushing the match into the query layer.
- {$limit: 10},
+const pipeline = [
+ // The limit stage prevents the planner from pushing the match into the query layer.
+ {$limit: 10},
- // 'a.$c' is an illegal path in the aggregation system (though it is legal in the query
- // system). The $limit above forces this $match to run as an aggregation stage, so the path
- // will be interpreted as illegal.
- {$match: {"a.$c": 4}},
+ // 'a.$c' is an illegal path in the aggregation system (though it is legal in the query
+ // system). The $limit above forces this $match to run as an aggregation stage, so the path
+ // will be interpreted as illegal.
+ {$match: {"a.$c": 4}},
- // This inclusion-projection allows the planner to determine that the only necessary fields
- // we need to fetch from the document are "_id" (by default), "a.$c" (since we do a match
- // on it) and "dummy" since we include/rename it as part of this $project.
+ // This inclusion-projection allows the planner to determine that the only necessary fields
+ // we need to fetch from the document are "_id" (by default), "a.$c" (since we do a match
+ // on it) and "dummy" since we include/rename it as part of this $project.
- // The reason we need to explicitly include a "dummy" field, rather than just including
- // "a.$c" is that, as mentioned before, a.$c is an illegal path in the aggregation system,
- // so if we use it as part of the project, the $project will fail to parse (and the
- // relevant code will not be exercised).
- {
- $project: {
- "newAndUnrelatedField": "$dummy",
- }
+ // The reason we need to explicitly include a "dummy" field, rather than just including
+ // "a.$c" is that, as mentioned before, a.$c is an illegal path in the aggregation system,
+ // so if we use it as part of the project, the $project will fail to parse (and the
+ // relevant code will not be exercised).
+ {
+ $project: {
+ "newAndUnrelatedField": "$dummy",
}
- ];
+ }
+];
- const err = assert.throws(() => coll.aggregate(pipeline));
- assert.eq(err.code, 16410);
+const err = assert.throws(() => coll.aggregate(pipeline));
+assert.eq(err.code, 16410);
})();
diff --git a/jstests/aggregation/match_swapping_renamed_fields.js b/jstests/aggregation/match_swapping_renamed_fields.js
index 92340a868cb..e537f249454 100644
--- a/jstests/aggregation/match_swapping_renamed_fields.js
+++ b/jstests/aggregation/match_swapping_renamed_fields.js
@@ -4,112 +4,105 @@
* @tags: [do_not_wrap_aggregations_in_facets]
*/
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js");
-
- let coll = db.match_swapping_renamed_fields;
- coll.drop();
-
- assert.writeOK(coll.insert([{a: 1, b: 1, c: 1}, {a: 2, b: 2, c: 2}, {a: 3, b: 3, c: 3}]));
- assert.commandWorked(coll.createIndex({a: 1}));
-
- // Test that a $match can result in index usage after moving past a field renamed by $project.
- let pipeline = [{$project: {_id: 0, z: "$a", c: 1}}, {$match: {z: {$gt: 1}}}];
- assert.eq(2, coll.aggregate(pipeline).itcount());
- let explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "IXSCAN"), tojson(explain));
-
- // Test that a $match can result in index usage after moving past a field renamed by $addFields.
- pipeline = [{$addFields: {z: "$a"}}, {$match: {z: {$gt: 1}}}];
- assert.eq(2, coll.aggregate(pipeline).itcount());
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "IXSCAN"), tojson(explain));
-
- // Test that a $match with $type can result in index usage after moving past a field renamed by
- // $project.
- pipeline = [{$project: {_id: 0, z: "$a", c: 1}}, {$match: {z: {$type: "number"}}}];
- assert.eq(3, coll.aggregate(pipeline).itcount());
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "IXSCAN"), tojson(explain));
-
- // Test that a partially dependent match can split, with a rename applied, resulting in index
- // usage.
- pipeline =
- [{$project: {z: "$a", zz: {$sum: ["$a", "$b"]}}}, {$match: {z: {$gt: 1}, zz: {$lt: 5}}}];
- assert.eq(1, coll.aggregate(pipeline).itcount());
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "IXSCAN"), tojson(explain));
-
- // Test that a match can swap past several renames, resulting in index usage.
- pipeline = [
- {$project: {d: "$a"}},
- {$addFields: {e: "$$CURRENT.d"}},
- {$project: {f: "$$ROOT.e"}},
- {$match: {f: {$gt: 1}}}
- ];
- assert.eq(2, coll.aggregate(pipeline).itcount());
- explain = coll.explain().aggregate(pipeline);
- assert.neq(null, getAggPlanStage(explain, "IXSCAN"), tojson(explain));
-
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: 1, c: 1}, {b: 2, c: 2}]}));
- assert.writeOK(coll.insert({_id: 1, a: [{b: 3, c: 3}, {b: 4, c: 4}]}));
- assert.commandWorked(coll.createIndex({"a.b": 1, "a.c": 1}));
-
- // Test that a $match can result in index usage after moving past a dotted array path renamed by
- // a $map inside a $project.
- pipeline = [
- {$project: {d: {$map: {input: "$a", as: "iter", in : {e: "$$iter.b", f: "$$iter.c"}}}}},
- {$match: {"d.e": 1, "d.f": 2}}
- ];
- assert.eq([{_id: 0, d: [{e: 1, f: 1}, {e: 2, f: 2}]}], coll.aggregate(pipeline).toArray());
- explain = coll.explain().aggregate(pipeline);
- let ixscan = getAggPlanStage(explain, "IXSCAN");
- assert.neq(null, ixscan, tojson(explain));
- assert.eq({"a.b": 1, "a.c": 1}, ixscan.keyPattern, tojson(ixscan));
-
- // Test that a $match can result in index usage after moving past a dotted array path renamed by
- // a $map inside an $addFields. This time the match expression is partially dependent and should
- // get split.
- pipeline = [
- {
- $addFields:
- {d: {$map: {input: "$a", as: "iter", in : {e: "$$iter.b", f: "$$iter.c"}}}, g: 2}
- },
- {$match: {"d.e": 1, g: 2}}
- ];
- assert.eq([{_id: 0, a: [{b: 1, c: 1}, {b: 2, c: 2}], d: [{e: 1, f: 1}, {e: 2, f: 2}], g: 2}],
- coll.aggregate(pipeline).toArray());
- explain = coll.explain().aggregate(pipeline);
- ixscan = getAggPlanStage(explain, "IXSCAN");
- assert.neq(null, ixscan, tojson(explain));
- assert.eq({"a.b": 1, "a.c": 1}, ixscan.keyPattern, tojson(ixscan));
-
- // Test that match swapping behaves correctly when a $map contains a rename but also computes a
- // new field.
- pipeline = [
- {
- $addFields:
- {d: {$map: {input: "$a", as: "iter", in : {e: "$$iter.b", f: {$literal: 99}}}}}
- },
- {$match: {"d.e": 1, "d.f": 99}}
- ];
- assert.eq([{_id: 0, a: [{b: 1, c: 1}, {b: 2, c: 2}], d: [{e: 1, f: 99}, {e: 2, f: 99}]}],
- coll.aggregate(pipeline).toArray());
- explain = coll.explain().aggregate(pipeline);
- ixscan = getAggPlanStage(explain, "IXSCAN");
- assert.neq(null, ixscan, tojson(explain));
- assert.eq({"a.b": 1, "a.c": 1}, ixscan.keyPattern, tojson(ixscan));
-
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: [{c: 1}, {c: 2}]}, {b: [{c: 3}, {c: 4}]}]}));
- assert.writeOK(coll.insert({_id: 1, a: [{b: [{c: 5}, {c: 6}]}, {b: [{c: 7}, {c: 8}]}]}));
- assert.commandWorked(coll.createIndex({"a.b.c": 1}));
-
- // Test that a $match can result in index usage by moving past a rename of a field inside
- // two-levels of arrays. The rename is expressed using nested $map inside a $project.
- pipeline = [
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+
+let coll = db.match_swapping_renamed_fields;
+coll.drop();
+
+assert.writeOK(coll.insert([{a: 1, b: 1, c: 1}, {a: 2, b: 2, c: 2}, {a: 3, b: 3, c: 3}]));
+assert.commandWorked(coll.createIndex({a: 1}));
+
+// Test that a $match can result in index usage after moving past a field renamed by $project.
+let pipeline = [{$project: {_id: 0, z: "$a", c: 1}}, {$match: {z: {$gt: 1}}}];
+assert.eq(2, coll.aggregate(pipeline).itcount());
+let explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "IXSCAN"), tojson(explain));
+
+// Test that a $match can result in index usage after moving past a field renamed by $addFields.
+pipeline = [{$addFields: {z: "$a"}}, {$match: {z: {$gt: 1}}}];
+assert.eq(2, coll.aggregate(pipeline).itcount());
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "IXSCAN"), tojson(explain));
+
+// Test that a $match with $type can result in index usage after moving past a field renamed by
+// $project.
+pipeline = [{$project: {_id: 0, z: "$a", c: 1}}, {$match: {z: {$type: "number"}}}];
+assert.eq(3, coll.aggregate(pipeline).itcount());
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "IXSCAN"), tojson(explain));
+
+// Test that a partially dependent match can split, with a rename applied, resulting in index
+// usage.
+pipeline = [{$project: {z: "$a", zz: {$sum: ["$a", "$b"]}}}, {$match: {z: {$gt: 1}, zz: {$lt: 5}}}];
+assert.eq(1, coll.aggregate(pipeline).itcount());
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "IXSCAN"), tojson(explain));
+
+// Test that a match can swap past several renames, resulting in index usage.
+pipeline = [
+ {$project: {d: "$a"}},
+ {$addFields: {e: "$$CURRENT.d"}},
+ {$project: {f: "$$ROOT.e"}},
+ {$match: {f: {$gt: 1}}}
+];
+assert.eq(2, coll.aggregate(pipeline).itcount());
+explain = coll.explain().aggregate(pipeline);
+assert.neq(null, getAggPlanStage(explain, "IXSCAN"), tojson(explain));
+
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [{b: 1, c: 1}, {b: 2, c: 2}]}));
+assert.writeOK(coll.insert({_id: 1, a: [{b: 3, c: 3}, {b: 4, c: 4}]}));
+assert.commandWorked(coll.createIndex({"a.b": 1, "a.c": 1}));
+
+// Test that a $match can result in index usage after moving past a dotted array path renamed by
+// a $map inside a $project.
+pipeline = [
+ {$project: {d: {$map: {input: "$a", as: "iter", in : {e: "$$iter.b", f: "$$iter.c"}}}}},
+ {$match: {"d.e": 1, "d.f": 2}}
+];
+assert.eq([{_id: 0, d: [{e: 1, f: 1}, {e: 2, f: 2}]}], coll.aggregate(pipeline).toArray());
+explain = coll.explain().aggregate(pipeline);
+let ixscan = getAggPlanStage(explain, "IXSCAN");
+assert.neq(null, ixscan, tojson(explain));
+assert.eq({"a.b": 1, "a.c": 1}, ixscan.keyPattern, tojson(ixscan));
+
+// Test that a $match can result in index usage after moving past a dotted array path renamed by
+// a $map inside an $addFields. This time the match expression is partially dependent and should
+// get split.
+pipeline = [
+ {$addFields: {d: {$map: {input: "$a", as: "iter", in : {e: "$$iter.b", f: "$$iter.c"}}}, g: 2}},
+ {$match: {"d.e": 1, g: 2}}
+];
+assert.eq([{_id: 0, a: [{b: 1, c: 1}, {b: 2, c: 2}], d: [{e: 1, f: 1}, {e: 2, f: 2}], g: 2}],
+ coll.aggregate(pipeline).toArray());
+explain = coll.explain().aggregate(pipeline);
+ixscan = getAggPlanStage(explain, "IXSCAN");
+assert.neq(null, ixscan, tojson(explain));
+assert.eq({"a.b": 1, "a.c": 1}, ixscan.keyPattern, tojson(ixscan));
+
+// Test that match swapping behaves correctly when a $map contains a rename but also computes a
+// new field.
+pipeline = [
+ {$addFields: {d: {$map: {input: "$a", as: "iter", in : {e: "$$iter.b", f: {$literal: 99}}}}}},
+ {$match: {"d.e": 1, "d.f": 99}}
+];
+assert.eq([{_id: 0, a: [{b: 1, c: 1}, {b: 2, c: 2}], d: [{e: 1, f: 99}, {e: 2, f: 99}]}],
+ coll.aggregate(pipeline).toArray());
+explain = coll.explain().aggregate(pipeline);
+ixscan = getAggPlanStage(explain, "IXSCAN");
+assert.neq(null, ixscan, tojson(explain));
+assert.eq({"a.b": 1, "a.c": 1}, ixscan.keyPattern, tojson(ixscan));
+
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [{b: [{c: 1}, {c: 2}]}, {b: [{c: 3}, {c: 4}]}]}));
+assert.writeOK(coll.insert({_id: 1, a: [{b: [{c: 5}, {c: 6}]}, {b: [{c: 7}, {c: 8}]}]}));
+assert.commandWorked(coll.createIndex({"a.b.c": 1}));
+
+// Test that a $match can result in index usage by moving past a rename of a field inside
+// two-levels of arrays. The rename is expressed using nested $map inside a $project.
+pipeline = [
{
$project: {
d: {
@@ -131,16 +124,16 @@
},
{$match: {"d.e.f": 7}}
];
- assert.eq([{_id: 1, d: [{e: [{f: 5}, {f: 6}]}, {e: [{f: 7}, {f: 8}]}]}],
- coll.aggregate(pipeline).toArray());
- explain = coll.explain().aggregate(pipeline);
- ixscan = getAggPlanStage(explain, "IXSCAN");
- assert.neq(null, ixscan, tojson(explain));
- assert.eq({"a.b.c": 1}, ixscan.keyPattern, tojson(ixscan));
-
- // Test that a $match can result in index usage by moving past a rename of a field inside
- // two-levels of arrays. The rename is expressed using nested $map inside an $addFields.
- pipeline = [
+assert.eq([{_id: 1, d: [{e: [{f: 5}, {f: 6}]}, {e: [{f: 7}, {f: 8}]}]}],
+ coll.aggregate(pipeline).toArray());
+explain = coll.explain().aggregate(pipeline);
+ixscan = getAggPlanStage(explain, "IXSCAN");
+assert.neq(null, ixscan, tojson(explain));
+assert.eq({"a.b.c": 1}, ixscan.keyPattern, tojson(ixscan));
+
+// Test that a $match can result in index usage by moving past a rename of a field inside
+// two-levels of arrays. The rename is expressed using nested $map inside an $addFields.
+pipeline = [
{
$addFields: {
d: {
@@ -162,53 +155,50 @@
},
{$match: {"d.b.c": 7}}
];
- assert.eq([{
- _id: 1,
- a: [{b: [{c: 5}, {c: 6}]}, {b: [{c: 7}, {c: 8}]}],
- d: [{b: [{c: 5}, {c: 6}]}, {b: [{c: 7}, {c: 8}]}]
- }],
- coll.aggregate(pipeline).toArray());
- explain = coll.explain().aggregate(pipeline);
- ixscan = getAggPlanStage(explain, "IXSCAN");
- assert.neq(null, ixscan, tojson(explain));
- assert.eq({"a.b.c": 1}, ixscan.keyPattern, tojson(ixscan));
-
- // Test that we correctly match on the subfield of a renamed field. Here, a match on "x.b.c"
- // follows an "a" to "x" rename. When we move the match stage in front of the rename, the match
- // should also get rewritten to use "a.b.c" as its filter.
- pipeline = [{$project: {x: "$a"}}, {$match: {"x.b.c": 1}}];
- assert.eq([{_id: 0, x: [{b: [{c: 1}, {c: 2}]}, {b: [{c: 3}, {c: 4}]}]}],
- coll.aggregate(pipeline).toArray());
- explain = coll.explain().aggregate(pipeline);
- ixscan = getAggPlanStage(explain, "IXSCAN");
- assert.neq(null, ixscan, tojson(explain));
- assert.eq({"a.b.c": 1}, ixscan.keyPattern, tojson(ixscan));
-
- // Test that we correctly match on the subfield of a renamed field when the rename results from
- // a $map operation. Here, a match on "d.e.c" follows an "a.b" to "d.e" rename. When we move the
- // match stage in front of the renaming $map operation, the match should also get rewritten to
- // use "a.b.c" as its filter.
- pipeline = [
- {$project: {d: {$map: {input: "$a", as: "iter", in : {e: "$$iter.b"}}}}},
- {$match: {"d.e.c": 7}}
- ];
- assert.eq([{_id: 1, d: [{e: [{c: 5}, {c: 6}]}, {e: [{c: 7}, {c: 8}]}]}],
- coll.aggregate(pipeline).toArray());
- explain = coll.explain().aggregate(pipeline);
- ixscan = getAggPlanStage(explain, "IXSCAN");
- assert.neq(null, ixscan, tojson(explain));
- assert.eq({"a.b.c": 1}, ixscan.keyPattern, tojson(ixscan));
-
- // Test multiple renames. Designed to reproduce SERVER-32690.
- pipeline = [
- {$_internalInhibitOptimization: {}},
- {$project: {x: "$x", y: "$x"}},
- {$match: {y: 1, w: 1}}
- ];
- assert.eq([], coll.aggregate(pipeline).toArray());
- explain = coll.explain().aggregate(pipeline);
- // We expect that the $match stage has been split into two, since one predicate has an
- // applicable rename that allows swapping, while the other does not.
- let matchStages = getAggPlanStages(explain, "$match");
- assert.eq(2, matchStages.length);
+assert.eq([{
+ _id: 1,
+ a: [{b: [{c: 5}, {c: 6}]}, {b: [{c: 7}, {c: 8}]}],
+ d: [{b: [{c: 5}, {c: 6}]}, {b: [{c: 7}, {c: 8}]}]
+ }],
+ coll.aggregate(pipeline).toArray());
+explain = coll.explain().aggregate(pipeline);
+ixscan = getAggPlanStage(explain, "IXSCAN");
+assert.neq(null, ixscan, tojson(explain));
+assert.eq({"a.b.c": 1}, ixscan.keyPattern, tojson(ixscan));
+
+// Test that we correctly match on the subfield of a renamed field. Here, a match on "x.b.c"
+// follows an "a" to "x" rename. When we move the match stage in front of the rename, the match
+// should also get rewritten to use "a.b.c" as its filter.
+pipeline = [{$project: {x: "$a"}}, {$match: {"x.b.c": 1}}];
+assert.eq([{_id: 0, x: [{b: [{c: 1}, {c: 2}]}, {b: [{c: 3}, {c: 4}]}]}],
+ coll.aggregate(pipeline).toArray());
+explain = coll.explain().aggregate(pipeline);
+ixscan = getAggPlanStage(explain, "IXSCAN");
+assert.neq(null, ixscan, tojson(explain));
+assert.eq({"a.b.c": 1}, ixscan.keyPattern, tojson(ixscan));
+
+// Test that we correctly match on the subfield of a renamed field when the rename results from
+// a $map operation. Here, a match on "d.e.c" follows an "a.b" to "d.e" rename. When we move the
+// match stage in front of the renaming $map operation, the match should also get rewritten to
+// use "a.b.c" as its filter.
+pipeline = [
+ {$project: {d: {$map: {input: "$a", as: "iter", in : {e: "$$iter.b"}}}}},
+ {$match: {"d.e.c": 7}}
+];
+assert.eq([{_id: 1, d: [{e: [{c: 5}, {c: 6}]}, {e: [{c: 7}, {c: 8}]}]}],
+ coll.aggregate(pipeline).toArray());
+explain = coll.explain().aggregate(pipeline);
+ixscan = getAggPlanStage(explain, "IXSCAN");
+assert.neq(null, ixscan, tojson(explain));
+assert.eq({"a.b.c": 1}, ixscan.keyPattern, tojson(ixscan));
+
+// Test multiple renames. Designed to reproduce SERVER-32690.
+pipeline =
+ [{$_internalInhibitOptimization: {}}, {$project: {x: "$x", y: "$x"}}, {$match: {y: 1, w: 1}}];
+assert.eq([], coll.aggregate(pipeline).toArray());
+explain = coll.explain().aggregate(pipeline);
+// We expect that the $match stage has been split into two, since one predicate has an
+// applicable rename that allows swapping, while the other does not.
+let matchStages = getAggPlanStages(explain, "$match");
+assert.eq(2, matchStages.length);
}());
diff --git a/jstests/aggregation/mongos_merge.js b/jstests/aggregation/mongos_merge.js
index 67a6d433312..f6bbeea0122 100644
--- a/jstests/aggregation/mongos_merge.js
+++ b/jstests/aggregation/mongos_merge.js
@@ -18,226 +18,221 @@
*/
(function() {
- load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
- load('jstests/libs/geo_near_random.js'); // For GeoNearRandomTest.
- load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
- load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
-
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
- const unshardedColl = mongosDB[jsTestName() + "_unsharded"];
-
- const shard0DB = primaryShardDB = st.shard0.getDB(jsTestName());
- const shard1DB = st.shard1.getDB(jsTestName());
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Always merge pipelines which cannot merge on mongoS on the primary shard instead, so we know
- // where to check for $mergeCursors.
- assert.commandWorked(
- mongosDB.adminCommand({setParameter: 1, internalQueryAlwaysMergeOnPrimaryShard: true}));
-
- // Enable sharding on the test DB and ensure its primary is shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // We will need to test $geoNear on this collection, so create a 2dsphere index.
- assert.commandWorked(mongosColl.createIndex({geo: "2dsphere"}));
-
- // We will test that $textScore metadata is not propagated to the user, so create a text index.
- assert.commandWorked(mongosColl.createIndex({text: "text"}));
-
- // Split the collection into 4 chunks: [MinKey, -100), [-100, 0), [0, 100), [100, MaxKey).
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: -100}}));
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 100}}));
-
- // Move the [0, 100) and [100, MaxKey) chunks to shard1.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 50}, to: st.shard1.shardName}));
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 150}, to: st.shard1.shardName}));
-
- // Create a random geo co-ord generator for testing.
- var georng = new GeoNearRandomTest(mongosColl);
-
- // Write 400 documents across the 4 chunks.
- for (let i = -200; i < 200; i++) {
- assert.writeOK(mongosColl.insert(
- {_id: i, a: [i], b: {redactThisDoc: true}, c: true, geo: georng.mkPt(), text: "txt"}));
- assert.writeOK(unshardedColl.insert({_id: i, x: i}));
- }
+load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
+load('jstests/libs/geo_near_random.js'); // For GeoNearRandomTest.
+load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
+load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
- let testNameHistory = new Set();
+const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
- // Clears system.profile and restarts the profiler on the primary shard. We enable profiling to
- // verify that no $mergeCursors occur during tests where we expect the merge to run on mongoS.
- function startProfiling() {
- assert.commandWorked(primaryShardDB.setProfilingLevel(0));
- primaryShardDB.system.profile.drop();
- assert.commandWorked(primaryShardDB.setProfilingLevel(2));
- }
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
+const unshardedColl = mongosDB[jsTestName() + "_unsharded"];
- /**
- * Runs the aggregation specified by 'pipeline', verifying that:
- * - The number of documents returned by the aggregation matches 'expectedCount'.
- * - The merge was performed on a mongoS if 'mergeType' is 'mongos', and on a shard otherwise.
- */
- function assertMergeBehaviour(
- {testName, pipeline, mergeType, batchSize, allowDiskUse, expectedCount}) {
- // Ensure that this test has a unique name.
- assert(!testNameHistory.has(testName));
- testNameHistory.add(testName);
-
- // Create the aggregation options from the given arguments.
- const opts = {
- comment: testName,
- cursor: (batchSize ? {batchSize: batchSize} : {}),
- };
-
- if (allowDiskUse !== undefined) {
- opts.allowDiskUse = allowDiskUse;
- }
+const shard0DB = primaryShardDB = st.shard0.getDB(jsTestName());
+const shard1DB = st.shard1.getDB(jsTestName());
- // Verify that the explain() output's 'mergeType' field matches our expectation.
- assert.eq(
- assert.commandWorked(mongosColl.explain().aggregate(pipeline, Object.extend({}, opts)))
- .mergeType,
- mergeType);
-
- // Verify that the aggregation returns the expected number of results.
- assert.eq(mongosColl.aggregate(pipeline, opts).itcount(), expectedCount);
-
- // Verify that a $mergeCursors aggregation ran on the primary shard if 'mergeType' is not
- // 'mongos', and that no such aggregation ran otherwise.
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: primaryShardDB,
- numExpectedMatches: (mergeType === "mongos" ? 0 : 1),
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: 1}
- }
- });
- }
+assert.commandWorked(mongosDB.dropDatabase());
- /**
- * Throws an assertion if the aggregation specified by 'pipeline' does not produce
- * 'expectedCount' results, or if the merge phase is not performed on the mongoS.
- */
- function assertMergeOnMongoS({testName, pipeline, batchSize, allowDiskUse, expectedCount}) {
- assertMergeBehaviour({
- testName: testName,
- pipeline: pipeline,
- mergeType: "mongos",
- batchSize: (batchSize || 10),
- allowDiskUse: allowDiskUse,
- expectedCount: expectedCount
- });
- }
+// Always merge pipelines which cannot merge on mongoS on the primary shard instead, so we know
+// where to check for $mergeCursors.
+assert.commandWorked(
+ mongosDB.adminCommand({setParameter: 1, internalQueryAlwaysMergeOnPrimaryShard: true}));
- /**
- * Throws an assertion if the aggregation specified by 'pipeline' does not produce
- * 'expectedCount' results, or if the merge phase was not performed on a shard.
- */
- function assertMergeOnMongoD(
- {testName, pipeline, mergeType, batchSize, allowDiskUse, expectedCount}) {
- assertMergeBehaviour({
- testName: testName,
- pipeline: pipeline,
- mergeType: (mergeType || "anyShard"),
- batchSize: (batchSize || 10),
- allowDiskUse: allowDiskUse,
- expectedCount: expectedCount
- });
+// Enable sharding on the test DB and ensure its primary is shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+// Shard the test collection on _id.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// We will need to test $geoNear on this collection, so create a 2dsphere index.
+assert.commandWorked(mongosColl.createIndex({geo: "2dsphere"}));
+
+// We will test that $textScore metadata is not propagated to the user, so create a text index.
+assert.commandWorked(mongosColl.createIndex({text: "text"}));
+
+// Split the collection into 4 chunks: [MinKey, -100), [-100, 0), [0, 100), [100, MaxKey).
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: -100}}));
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 100}}));
+
+// Move the [0, 100) and [100, MaxKey) chunks to shard1.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 50}, to: st.shard1.shardName}));
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 150}, to: st.shard1.shardName}));
+
+// Create a random geo co-ord generator for testing.
+var georng = new GeoNearRandomTest(mongosColl);
+
+// Write 400 documents across the 4 chunks.
+for (let i = -200; i < 200; i++) {
+ assert.writeOK(mongosColl.insert(
+ {_id: i, a: [i], b: {redactThisDoc: true}, c: true, geo: georng.mkPt(), text: "txt"}));
+ assert.writeOK(unshardedColl.insert({_id: i, x: i}));
+}
+
+let testNameHistory = new Set();
+
+// Clears system.profile and restarts the profiler on the primary shard. We enable profiling to
+// verify that no $mergeCursors occur during tests where we expect the merge to run on mongoS.
+function startProfiling() {
+ assert.commandWorked(primaryShardDB.setProfilingLevel(0));
+ primaryShardDB.system.profile.drop();
+ assert.commandWorked(primaryShardDB.setProfilingLevel(2));
+}
+
+/**
+ * Runs the aggregation specified by 'pipeline', verifying that:
+ * - The number of documents returned by the aggregation matches 'expectedCount'.
+ * - The merge was performed on a mongoS if 'mergeType' is 'mongos', and on a shard otherwise.
+ */
+function assertMergeBehaviour(
+ {testName, pipeline, mergeType, batchSize, allowDiskUse, expectedCount}) {
+ // Ensure that this test has a unique name.
+ assert(!testNameHistory.has(testName));
+ testNameHistory.add(testName);
+
+ // Create the aggregation options from the given arguments.
+ const opts = {
+ comment: testName,
+ cursor: (batchSize ? {batchSize: batchSize} : {}),
+ };
+
+ if (allowDiskUse !== undefined) {
+ opts.allowDiskUse = allowDiskUse;
}
- /**
- * Runs a series of test cases which will consistently merge on mongoS or mongoD regardless of
- * whether 'allowDiskUse' is true, false or omitted.
- */
- function runTestCasesWhoseMergeLocationIsConsistentRegardlessOfAllowDiskUse(allowDiskUse) {
- // Test that a $match pipeline with an empty merge stage is merged on mongoS.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_match_only",
- pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}],
- allowDiskUse: allowDiskUse,
- expectedCount: 400
- });
+ // Verify that the explain() output's 'mergeType' field matches our expectation.
+ assert.eq(
+ assert.commandWorked(mongosColl.explain().aggregate(pipeline, Object.extend({}, opts)))
+ .mergeType,
+ mergeType);
+
+ // Verify that the aggregation returns the expected number of results.
+ assert.eq(mongosColl.aggregate(pipeline, opts).itcount(), expectedCount);
+
+ // Verify that a $mergeCursors aggregation ran on the primary shard if 'mergeType' is not
+ // 'mongos', and that no such aggregation ran otherwise.
+ profilerHasNumMatchingEntriesOrThrow({
+ profileDB: primaryShardDB,
+ numExpectedMatches: (mergeType === "mongos" ? 0 : 1),
+ filter: {
+ "command.aggregate": mongosColl.getName(),
+ "command.comment": testName,
+ "command.pipeline.$mergeCursors": {$exists: 1}
+ }
+ });
+}
- // Test that a $sort stage which merges pre-sorted streams is run on mongoS.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_sort_presorted",
- pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$sort: {_id: -1}}],
- allowDiskUse: allowDiskUse,
- expectedCount: 400
- });
+/**
+ * Throws an assertion if the aggregation specified by 'pipeline' does not produce
+ * 'expectedCount' results, or if the merge phase is not performed on the mongoS.
+ */
+function assertMergeOnMongoS({testName, pipeline, batchSize, allowDiskUse, expectedCount}) {
+ assertMergeBehaviour({
+ testName: testName,
+ pipeline: pipeline,
+ mergeType: "mongos",
+ batchSize: (batchSize || 10),
+ allowDiskUse: allowDiskUse,
+ expectedCount: expectedCount
+ });
+}
- // Test that $skip is merged on mongoS.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_skip",
- pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$sort: {_id: -1}}, {$skip: 300}],
- allowDiskUse: allowDiskUse,
- expectedCount: 100
- });
+/**
+ * Throws an assertion if the aggregation specified by 'pipeline' does not produce
+ * 'expectedCount' results, or if the merge phase was not performed on a shard.
+ */
+function assertMergeOnMongoD(
+ {testName, pipeline, mergeType, batchSize, allowDiskUse, expectedCount}) {
+ assertMergeBehaviour({
+ testName: testName,
+ pipeline: pipeline,
+ mergeType: (mergeType || "anyShard"),
+ batchSize: (batchSize || 10),
+ allowDiskUse: allowDiskUse,
+ expectedCount: expectedCount
+ });
+}
- // Test that $limit is merged on mongoS.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_limit",
- pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$limit: 300}],
- allowDiskUse: allowDiskUse,
- expectedCount: 300
- });
+/**
+ * Runs a series of test cases which will consistently merge on mongoS or mongoD regardless of
+ * whether 'allowDiskUse' is true, false or omitted.
+ */
+function runTestCasesWhoseMergeLocationIsConsistentRegardlessOfAllowDiskUse(allowDiskUse) {
+ // Test that a $match pipeline with an empty merge stage is merged on mongoS.
+ assertMergeOnMongoS({
+ testName: "agg_mongos_merge_match_only",
+ pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 400
+ });
- // Test that $sample is merged on mongoS if it is the splitpoint, since this will result in
- // a merging $sort of presorted streams in the merge pipeline.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_sample_splitpoint",
- pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$sample: {size: 300}}],
- allowDiskUse: allowDiskUse,
- expectedCount: 300
- });
+ // Test that a $sort stage which merges pre-sorted streams is run on mongoS.
+ assertMergeOnMongoS({
+ testName: "agg_mongos_merge_sort_presorted",
+ pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$sort: {_id: -1}}],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 400
+ });
- // Test that $geoNear is merged on mongoS.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_geo_near",
- pipeline: [
- {$geoNear: {near: [0, 0], distanceField: "distance", spherical: true}},
- {$limit: 300}
- ],
- allowDiskUse: allowDiskUse,
- expectedCount: 300
- });
+ // Test that $skip is merged on mongoS.
+ assertMergeOnMongoS({
+ testName: "agg_mongos_merge_skip",
+ pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$sort: {_id: -1}}, {$skip: 300}],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 100
+ });
- // Test that $facet is merged on mongoS if all pipelines are mongoS-mergeable regardless of
- // 'allowDiskUse'.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_facet_all_pipes_eligible_for_mongos",
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {
- $facet: {
- pipe1: [{$match: {_id: {$gt: 0}}}, {$skip: 10}, {$limit: 150}],
- pipe2: [{$match: {_id: {$lt: 0}}}, {$project: {_id: 0, a: 1}}]
- }
+ // Test that $limit is merged on mongoS.
+ assertMergeOnMongoS({
+ testName: "agg_mongos_merge_limit",
+ pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$limit: 300}],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 300
+ });
+
+ // Test that $sample is merged on mongoS if it is the splitpoint, since this will result in
+ // a merging $sort of presorted streams in the merge pipeline.
+ assertMergeOnMongoS({
+ testName: "agg_mongos_merge_sample_splitpoint",
+ pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$sample: {size: 300}}],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 300
+ });
+
+ // Test that $geoNear is merged on mongoS.
+ assertMergeOnMongoS({
+ testName: "agg_mongos_merge_geo_near",
+ pipeline:
+ [{$geoNear: {near: [0, 0], distanceField: "distance", spherical: true}}, {$limit: 300}],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 300
+ });
+
+ // Test that $facet is merged on mongoS if all pipelines are mongoS-mergeable regardless of
+ // 'allowDiskUse'.
+ assertMergeOnMongoS({
+ testName: "agg_mongos_merge_facet_all_pipes_eligible_for_mongos",
+ pipeline: [
+ {$match: {_id: {$gte: -200, $lte: 200}}},
+ {
+ $facet: {
+ pipe1: [{$match: {_id: {$gt: 0}}}, {$skip: 10}, {$limit: 150}],
+ pipe2: [{$match: {_id: {$lt: 0}}}, {$project: {_id: 0, a: 1}}]
}
- ],
- allowDiskUse: allowDiskUse,
- expectedCount: 1
- });
+ }
+ ],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 1
+ });
- // Test that $facet is merged on mongoD if any pipeline requires a primary shard merge,
- // regardless of 'allowDiskUse'.
- assertMergeOnMongoD({
+ // Test that $facet is merged on mongoD if any pipeline requires a primary shard merge,
+ // regardless of 'allowDiskUse'.
+ assertMergeOnMongoD({
testName: "agg_mongos_merge_facet_pipe_needs_primary_shard_disk_use_" + allowDiskUse,
pipeline: [
{$match: {_id: {$gte: -200, $lte: 200}}},
@@ -263,43 +258,43 @@
expectedCount: 1
});
- // Test that a pipeline whose merging half can be run on mongos using only the mongos
- // execution machinery returns the correct results.
- // TODO SERVER-30882 Find a way to assert that all stages get absorbed by mongos.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_all_mongos_runnable_skip_and_limit_stages",
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {$sort: {_id: -1}},
- {$skip: 150},
- {$limit: 150},
- {$skip: 5},
- {$limit: 1},
- ],
- allowDiskUse: allowDiskUse,
- expectedCount: 1
- });
+ // Test that a pipeline whose merging half can be run on mongos using only the mongos
+ // execution machinery returns the correct results.
+ // TODO SERVER-30882 Find a way to assert that all stages get absorbed by mongos.
+ assertMergeOnMongoS({
+ testName: "agg_mongos_merge_all_mongos_runnable_skip_and_limit_stages",
+ pipeline: [
+ {$match: {_id: {$gte: -200, $lte: 200}}},
+ {$sort: {_id: -1}},
+ {$skip: 150},
+ {$limit: 150},
+ {$skip: 5},
+ {$limit: 1},
+ ],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 1
+ });
- // Test that a merge pipeline which needs to run on a shard is NOT merged on mongoS
- // regardless of 'allowDiskUse'.
- assertMergeOnMongoD({
- testName: "agg_mongos_merge_primary_shard_disk_use_" + allowDiskUse,
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {$_internalSplitPipeline: {mergeType: "anyShard"}}
- ],
- mergeType: "anyShard",
- allowDiskUse: allowDiskUse,
- expectedCount: 400
- });
+ // Test that a merge pipeline which needs to run on a shard is NOT merged on mongoS
+ // regardless of 'allowDiskUse'.
+ assertMergeOnMongoD({
+ testName: "agg_mongos_merge_primary_shard_disk_use_" + allowDiskUse,
+ pipeline: [
+ {$match: {_id: {$gte: -200, $lte: 200}}},
+ {$_internalSplitPipeline: {mergeType: "anyShard"}}
+ ],
+ mergeType: "anyShard",
+ allowDiskUse: allowDiskUse,
+ expectedCount: 400
+ });
- // Allow sharded $lookup.
- setParameterOnAllHosts(
- DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", true);
+ // Allow sharded $lookup.
+ setParameterOnAllHosts(
+ DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", true);
- // Test that $lookup is merged on the primary shard when the foreign collection is
- // unsharded.
- assertMergeOnMongoD({
+ // Test that $lookup is merged on the primary shard when the foreign collection is
+ // unsharded.
+ assertMergeOnMongoD({
testName: "agg_mongos_merge_lookup_unsharded_disk_use_" + allowDiskUse,
pipeline: [
{$match: {_id: {$gte: -200, $lte: 200}}},
@@ -317,8 +312,8 @@
expectedCount: 400
});
- // Test that $lookup is merged on mongoS when the foreign collection is sharded.
- assertMergeOnMongoS({
+ // Test that $lookup is merged on mongoS when the foreign collection is sharded.
+ assertMergeOnMongoS({
testName: "agg_mongos_merge_lookup_sharded_disk_use_" + allowDiskUse,
pipeline: [
{$match: {_id: {$gte: -200, $lte: 200}}},
@@ -336,192 +331,180 @@
expectedCount: 400
});
- // Disable sharded $lookup.
- setParameterOnAllHosts(
- DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", false);
- }
+ // Disable sharded $lookup.
+ setParameterOnAllHosts(
+ DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", false);
+}
- /**
- * Runs a series of test cases which will always merge on mongoD when 'allowDiskUse' is true,
- * and on mongoS when 'allowDiskUse' is false or omitted.
- */
- function runTestCasesWhoseMergeLocationDependsOnAllowDiskUse(allowDiskUse) {
- // All test cases should merge on mongoD if allowDiskUse is true, mongoS otherwise.
- const assertMergeOnMongoX = (allowDiskUse ? assertMergeOnMongoD : assertMergeOnMongoS);
-
- // Test that a blocking $sort is only merged on mongoS if 'allowDiskUse' is not set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_blocking_sort_no_disk_use",
- pipeline:
- [{$match: {_id: {$gte: -200, $lte: 200}}}, {$sort: {_id: -1}}, {$sort: {a: 1}}],
- allowDiskUse: allowDiskUse,
- expectedCount: 400
- });
+/**
+ * Runs a series of test cases which will always merge on mongoD when 'allowDiskUse' is true,
+ * and on mongoS when 'allowDiskUse' is false or omitted.
+ */
+function runTestCasesWhoseMergeLocationDependsOnAllowDiskUse(allowDiskUse) {
+ // All test cases should merge on mongoD if allowDiskUse is true, mongoS otherwise.
+ const assertMergeOnMongoX = (allowDiskUse ? assertMergeOnMongoD : assertMergeOnMongoS);
+
+ // Test that a blocking $sort is only merged on mongoS if 'allowDiskUse' is not set.
+ assertMergeOnMongoX({
+ testName: "agg_mongos_merge_blocking_sort_no_disk_use",
+ pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$sort: {_id: -1}}, {$sort: {a: 1}}],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 400
+ });
- // Test that $group is only merged on mongoS if 'allowDiskUse' is not set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_group_allow_disk_use",
- pipeline:
- [{$match: {_id: {$gte: -200, $lte: 200}}}, {$group: {_id: {$mod: ["$_id", 150]}}}],
- allowDiskUse: allowDiskUse,
- expectedCount: 299
- });
+ // Test that $group is only merged on mongoS if 'allowDiskUse' is not set.
+ assertMergeOnMongoX({
+ testName: "agg_mongos_merge_group_allow_disk_use",
+ pipeline:
+ [{$match: {_id: {$gte: -200, $lte: 200}}}, {$group: {_id: {$mod: ["$_id", 150]}}}],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 299
+ });
- // Test that a blocking $sample is only merged on mongoS if 'allowDiskUse' is not set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_blocking_sample_allow_disk_use",
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {$sample: {size: 300}},
- {$sample: {size: 200}}
- ],
- allowDiskUse: allowDiskUse,
- expectedCount: 200
- });
+ // Test that a blocking $sample is only merged on mongoS if 'allowDiskUse' is not set.
+ assertMergeOnMongoX({
+ testName: "agg_mongos_merge_blocking_sample_allow_disk_use",
+ pipeline: [
+ {$match: {_id: {$gte: -200, $lte: 200}}},
+ {$sample: {size: 300}},
+ {$sample: {size: 200}}
+ ],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 200
+ });
- // Test that $facet is only merged on mongoS if all pipelines are mongoS-mergeable when
- // 'allowDiskUse' is not set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_facet_allow_disk_use",
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {
- $facet: {
- pipe1: [{$match: {_id: {$gt: 0}}}, {$skip: 10}, {$limit: 150}],
- pipe2: [{$match: {_id: {$lt: 0}}}, {$sort: {a: -1}}]
- }
+ // Test that $facet is only merged on mongoS if all pipelines are mongoS-mergeable when
+ // 'allowDiskUse' is not set.
+ assertMergeOnMongoX({
+ testName: "agg_mongos_merge_facet_allow_disk_use",
+ pipeline: [
+ {$match: {_id: {$gte: -200, $lte: 200}}},
+ {
+ $facet: {
+ pipe1: [{$match: {_id: {$gt: 0}}}, {$skip: 10}, {$limit: 150}],
+ pipe2: [{$match: {_id: {$lt: 0}}}, {$sort: {a: -1}}]
}
- ],
- allowDiskUse: allowDiskUse,
- expectedCount: 1
- });
-
- // Test that $bucketAuto is only merged on mongoS if 'allowDiskUse' is not set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_bucket_auto_allow_disk_use",
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {$bucketAuto: {groupBy: "$_id", buckets: 10}}
- ],
- allowDiskUse: allowDiskUse,
- expectedCount: 10
- });
-
- //
- // Test composite stages.
- //
+ }
+ ],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 1
+ });
- // Test that $bucket ($group->$sort) is merged on mongoS iff 'allowDiskUse' is not set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_bucket_allow_disk_use",
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {
- $bucket: {
- groupBy: "$_id",
- boundaries: [-200, -150, -100, -50, 0, 50, 100, 150, 200]
- }
- }
- ],
- allowDiskUse: allowDiskUse,
- expectedCount: 8
- });
+ // Test that $bucketAuto is only merged on mongoS if 'allowDiskUse' is not set.
+ assertMergeOnMongoX({
+ testName: "agg_mongos_merge_bucket_auto_allow_disk_use",
+ pipeline: [
+ {$match: {_id: {$gte: -200, $lte: 200}}},
+ {$bucketAuto: {groupBy: "$_id", buckets: 10}}
+ ],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 10
+ });
- // Test that $sortByCount ($group->$sort) is merged on mongoS iff 'allowDiskUse' isn't set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_sort_by_count_allow_disk_use",
- pipeline:
- [{$match: {_id: {$gte: -200, $lte: 200}}}, {$sortByCount: {$mod: ["$_id", 150]}}],
- allowDiskUse: allowDiskUse,
- expectedCount: 299
- });
+ //
+ // Test composite stages.
+ //
- // Test that $count ($group->$project) is merged on mongoS iff 'allowDiskUse' is not set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_count_allow_disk_use",
- pipeline: [{$match: {_id: {$gte: -150, $lte: 1500}}}, {$count: "doc_count"}],
- allowDiskUse: allowDiskUse,
- expectedCount: 1
- });
- }
+ // Test that $bucket ($group->$sort) is merged on mongoS iff 'allowDiskUse' is not set.
+ assertMergeOnMongoX({
+ testName: "agg_mongos_merge_bucket_allow_disk_use",
+ pipeline: [
+ {$match: {_id: {$gte: -200, $lte: 200}}},
+ {$bucket: {groupBy: "$_id", boundaries: [-200, -150, -100, -50, 0, 50, 100, 150, 200]}}
+ ],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 8
+ });
- // Run all test cases for each potential value of 'allowDiskUse'.
- for (let allowDiskUse of[false, undefined, true]) {
- // Reset the profiler and clear the list of tests that ran on the previous iteration.
- testNameHistory.clear();
- startProfiling();
+ // Test that $sortByCount ($group->$sort) is merged on mongoS iff 'allowDiskUse' isn't set.
+ assertMergeOnMongoX({
+ testName: "agg_mongos_merge_sort_by_count_allow_disk_use",
+ pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$sortByCount: {$mod: ["$_id", 150]}}],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 299
+ });
- // Run all test cases.
- runTestCasesWhoseMergeLocationIsConsistentRegardlessOfAllowDiskUse(allowDiskUse);
- runTestCasesWhoseMergeLocationDependsOnAllowDiskUse(allowDiskUse);
- }
+ // Test that $count ($group->$project) is merged on mongoS iff 'allowDiskUse' is not set.
+ assertMergeOnMongoX({
+ testName: "agg_mongos_merge_count_allow_disk_use",
+ pipeline: [{$match: {_id: {$gte: -150, $lte: 1500}}}, {$count: "doc_count"}],
+ allowDiskUse: allowDiskUse,
+ expectedCount: 1
+ });
+}
- // Start a new profiling session before running the final few tests.
+// Run all test cases for each potential value of 'allowDiskUse'.
+for (let allowDiskUse of [false, undefined, true]) {
+ // Reset the profiler and clear the list of tests that ran on the previous iteration.
+ testNameHistory.clear();
startProfiling();
- // Test that merge pipelines containing all mongos-runnable stages produce the expected output.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_all_mongos_runnable_stages",
- pipeline: [
- {$geoNear: {near: [0, 0], distanceField: "distance", spherical: true}},
- {$sort: {a: 1}},
- {$skip: 150},
- {$limit: 150},
- {$addFields: {d: true}},
- {$unwind: "$a"},
- {$sample: {size: 100}},
- {$project: {c: 0, geo: 0, distance: 0}},
- {$group: {_id: "$_id", doc: {$push: "$$CURRENT"}}},
- {$unwind: "$doc"},
- {$replaceRoot: {newRoot: "$doc"}},
- {$facet: {facetPipe: [{$match: {_id: {$gte: -200, $lte: 200}}}]}},
- {$unwind: "$facetPipe"},
- {$replaceRoot: {newRoot: "$facetPipe"}},
- {
- $redact: {
- $cond:
- {if: {$eq: ["$redactThisDoc", true]}, then: "$$PRUNE", else: "$$DESCEND"}
- }
- },
- {
- $match: {
- _id: {$gte: -50, $lte: 100},
- a: {$type: "number", $gte: -50, $lte: 100},
- b: {$exists: false},
- c: {$exists: false},
- d: true,
- geo: {$exists: false},
- distance: {$exists: false},
- text: "txt"
- }
- }
- ],
- expectedCount: 100
- });
-
- // Test that metadata is not propagated to the user when a pipeline which produces metadata
- // fields merges on mongoS.
- const metaDataTests = [
- {pipeline: [{$sort: {_id: -1}}], verifyNoMetaData: (doc) => assert.isnull(doc.$sortKey)},
+ // Run all test cases.
+ runTestCasesWhoseMergeLocationIsConsistentRegardlessOfAllowDiskUse(allowDiskUse);
+ runTestCasesWhoseMergeLocationDependsOnAllowDiskUse(allowDiskUse);
+}
+
+// Start a new profiling session before running the final few tests.
+startProfiling();
+
+// Test that merge pipelines containing all mongos-runnable stages produce the expected output.
+assertMergeOnMongoS({
+ testName: "agg_mongos_merge_all_mongos_runnable_stages",
+ pipeline: [
+ {$geoNear: {near: [0, 0], distanceField: "distance", spherical: true}},
+ {$sort: {a: 1}},
+ {$skip: 150},
+ {$limit: 150},
+ {$addFields: {d: true}},
+ {$unwind: "$a"},
+ {$sample: {size: 100}},
+ {$project: {c: 0, geo: 0, distance: 0}},
+ {$group: {_id: "$_id", doc: {$push: "$$CURRENT"}}},
+ {$unwind: "$doc"},
+ {$replaceRoot: {newRoot: "$doc"}},
+ {$facet: {facetPipe: [{$match: {_id: {$gte: -200, $lte: 200}}}]}},
+ {$unwind: "$facetPipe"},
+ {$replaceRoot: {newRoot: "$facetPipe"}},
{
- pipeline: [{$match: {$text: {$search: "txt"}}}],
- verifyNoMetaData: (doc) => assert.isnull(doc.$textScore)
+ $redact:
+ {$cond: {if: {$eq: ["$redactThisDoc", true]}, then: "$$PRUNE", else: "$$DESCEND"}}
},
{
- pipeline: [{$sample: {size: 300}}],
- verifyNoMetaData: (doc) => assert.isnull(doc.$randVal)
- },
- {
- pipeline: [{$match: {$text: {$search: "txt"}}}, {$sort: {text: 1}}],
- verifyNoMetaData:
- (doc) => assert.docEq([doc.$textScore, doc.$sortKey], [undefined, undefined])
+ $match: {
+ _id: {$gte: -50, $lte: 100},
+ a: {$type: "number", $gte: -50, $lte: 100},
+ b: {$exists: false},
+ c: {$exists: false},
+ d: true,
+ geo: {$exists: false},
+ distance: {$exists: false},
+ text: "txt"
+ }
}
- ];
-
- for (let metaDataTest of metaDataTests) {
- assert.gte(mongosColl.aggregate(metaDataTest.pipeline).itcount(), 300);
- mongosColl.aggregate(metaDataTest.pipeline).forEach(metaDataTest.verifyNoMetaData);
+ ],
+ expectedCount: 100
+});
+
+// Test that metadata is not propagated to the user when a pipeline which produces metadata
+// fields merges on mongoS.
+const metaDataTests = [
+ {pipeline: [{$sort: {_id: -1}}], verifyNoMetaData: (doc) => assert.isnull(doc.$sortKey)},
+ {
+ pipeline: [{$match: {$text: {$search: "txt"}}}],
+ verifyNoMetaData: (doc) => assert.isnull(doc.$textScore)
+ },
+ {pipeline: [{$sample: {size: 300}}], verifyNoMetaData: (doc) => assert.isnull(doc.$randVal)},
+ {
+ pipeline: [{$match: {$text: {$search: "txt"}}}, {$sort: {text: 1}}],
+ verifyNoMetaData: (doc) =>
+ assert.docEq([doc.$textScore, doc.$sortKey], [undefined, undefined])
}
+];
+
+for (let metaDataTest of metaDataTests) {
+ assert.gte(mongosColl.aggregate(metaDataTest.pipeline).itcount(), 300);
+ mongosColl.aggregate(metaDataTest.pipeline).forEach(metaDataTest.verifyNoMetaData);
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/aggregation/mongos_slaveok.js b/jstests/aggregation/mongos_slaveok.js
index 24346e407f0..a0ccf2d1100 100644
--- a/jstests/aggregation/mongos_slaveok.js
+++ b/jstests/aggregation/mongos_slaveok.js
@@ -8,43 +8,41 @@
* ]
*/
(function() {
- load('jstests/replsets/rslib.js');
+load('jstests/replsets/rslib.js');
- var NODES = 2;
+var NODES = 2;
- var doTest = function(st, doSharded) {
- var testDB = st.s.getDB('test');
+var doTest = function(st, doSharded) {
+ var testDB = st.s.getDB('test');
- if (doSharded) {
- testDB.adminCommand({enableSharding: 'test'});
- testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
- }
+ if (doSharded) {
+ testDB.adminCommand({enableSharding: 'test'});
+ testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+ }
- testDB.user.insert({x: 10}, {writeConcern: {w: NODES}});
- testDB.setSlaveOk(true);
+ testDB.user.insert({x: 10}, {writeConcern: {w: NODES}});
+ testDB.setSlaveOk(true);
- var secNode = st.rs0.getSecondary();
- secNode.getDB('test').setProfilingLevel(2);
+ var secNode = st.rs0.getSecondary();
+ secNode.getDB('test').setProfilingLevel(2);
- // wait for mongos to recognize that the slave is up
- awaitRSClientHosts(st.s, secNode, {ok: true});
+ // wait for mongos to recognize that the slave is up
+ awaitRSClientHosts(st.s, secNode, {ok: true});
- var res =
- testDB.runCommand({aggregate: 'user', pipeline: [{$project: {x: 1}}], cursor: {}});
- assert(res.ok, 'aggregate command failed: ' + tojson(res));
+ var res = testDB.runCommand({aggregate: 'user', pipeline: [{$project: {x: 1}}], cursor: {}});
+ assert(res.ok, 'aggregate command failed: ' + tojson(res));
- var profileQuery = {op: 'command', ns: 'test.user', 'command.aggregate': 'user'};
- var profileDoc = secNode.getDB('test').system.profile.findOne(profileQuery);
+ var profileQuery = {op: 'command', ns: 'test.user', 'command.aggregate': 'user'};
+ var profileDoc = secNode.getDB('test').system.profile.findOne(profileQuery);
- assert(profileDoc != null);
- testDB.dropDatabase();
- };
+ assert(profileDoc != null);
+ testDB.dropDatabase();
+};
- var st = new ShardingTest({shards: {rs0: {oplogSize: 10, nodes: NODES}}});
+var st = new ShardingTest({shards: {rs0: {oplogSize: 10, nodes: NODES}}});
- doTest(st, false);
- doTest(st, true);
-
- st.stop();
+doTest(st, false);
+doTest(st, true);
+st.stop();
})();
diff --git a/jstests/aggregation/optimize_away_pipeline.js b/jstests/aggregation/optimize_away_pipeline.js
index 191a98023d4..8ca82dd3ed6 100644
--- a/jstests/aggregation/optimize_away_pipeline.js
+++ b/jstests/aggregation/optimize_away_pipeline.js
@@ -9,330 +9,322 @@
// sharded collections.
// @tags: [do_not_wrap_aggregations_in_facets, assumes_unsharded_collection]
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For 'orderedArrayEq' and 'arrayEq'.
- load("jstests/concurrency/fsm_workload_helpers/server_types.js"); // For isWiredTiger.
- load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers.
- load("jstests/libs/fixture_helpers.js"); // For 'isMongos' and 'isSharded'.
+load("jstests/aggregation/extras/utils.js"); // For 'orderedArrayEq' and 'arrayEq'.
+load("jstests/concurrency/fsm_workload_helpers/server_types.js"); // For isWiredTiger.
+load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers.
+load("jstests/libs/fixture_helpers.js"); // For 'isMongos' and 'isSharded'.
- const coll = db.optimize_away_pipeline;
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, x: 10}));
- assert.writeOK(coll.insert({_id: 2, x: 20}));
- assert.writeOK(coll.insert({_id: 3, x: 30}));
+const coll = db.optimize_away_pipeline;
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, x: 10}));
+assert.writeOK(coll.insert({_id: 2, x: 20}));
+assert.writeOK(coll.insert({_id: 3, x: 30}));
- // Asserts that the give pipeline has *not* been optimized away and the request is answered
- // using the aggregation module. There should be pipeline stages present in the explain output.
- // The functions also asserts that a query stage passed in the 'stage' argument is present in
- // the explain output. If 'expectedResult' is provided, the pipeline is executed and the
- // returned result as validated agains the expected result without respecting the order of the
- // documents. If 'preserveResultOrder' is 'true' - the order is respected.
- function assertPipelineUsesAggregation({
- pipeline = [],
- pipelineOptions = {},
- expectedStage = null,
- expectedResult = null,
- preserveResultOrder = false
- } = {}) {
- const explainOutput = coll.explain().aggregate(pipeline, pipelineOptions);
+// Asserts that the give pipeline has *not* been optimized away and the request is answered
+// using the aggregation module. There should be pipeline stages present in the explain output.
+// The functions also asserts that a query stage passed in the 'stage' argument is present in
+// the explain output. If 'expectedResult' is provided, the pipeline is executed and the
+// returned result as validated agains the expected result without respecting the order of the
+// documents. If 'preserveResultOrder' is 'true' - the order is respected.
+function assertPipelineUsesAggregation({
+ pipeline = [],
+ pipelineOptions = {},
+ expectedStage = null,
+ expectedResult = null,
+ preserveResultOrder = false
+} = {}) {
+ const explainOutput = coll.explain().aggregate(pipeline, pipelineOptions);
- assert(isAggregationPlan(explainOutput),
- "Expected pipeline " + tojsononeline(pipeline) +
- " to use an aggregation framework in the explain output: " +
- tojson(explainOutput));
- assert(!isQueryPlan(explainOutput),
- "Expected pipeline " + tojsononeline(pipeline) +
- " *not* to use a query layer at the root level in the explain output: " +
- tojson(explainOutput));
+ assert(isAggregationPlan(explainOutput),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " to use an aggregation framework in the explain output: " + tojson(explainOutput));
+ assert(!isQueryPlan(explainOutput),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " *not* to use a query layer at the root level in the explain output: " +
+ tojson(explainOutput));
- let cursor = getAggPlanStage(explainOutput, "$cursor");
- if (cursor) {
- cursor = cursor.$cursor;
- } else {
- cursor = getAggPlanStage(explainOutput, "$geoNearCursor").$geoNearCursor;
- }
-
- assert(cursor,
- "Expected pipeline " + tojsononeline(pipeline) + " to include a $cursor " +
- " stage in the explain output: " + tojson(explainOutput));
- assert(cursor.queryPlanner.optimizedPipeline === undefined,
- "Expected pipeline " + tojsononeline(pipeline) + " to *not* include an " +
- "'optimizedPipeline' field in the explain output: " + tojson(explainOutput));
- assert(aggPlanHasStage(explainOutput, expectedStage),
- "Expected pipeline " + tojsononeline(pipeline) + " to include a " + expectedStage +
- " stage in the explain output: " + tojson(explainOutput));
+ let cursor = getAggPlanStage(explainOutput, "$cursor");
+ if (cursor) {
+ cursor = cursor.$cursor;
+ } else {
+ cursor = getAggPlanStage(explainOutput, "$geoNearCursor").$geoNearCursor;
+ }
- if (expectedResult) {
- const actualResult = coll.aggregate(pipeline, pipelineOptions).toArray();
- assert(preserveResultOrder ? orderedArrayEq(actualResult, expectedResult)
- : arrayEq(actualResult, expectedResult));
- }
+ assert(cursor,
+ "Expected pipeline " + tojsononeline(pipeline) + " to include a $cursor " +
+ " stage in the explain output: " + tojson(explainOutput));
+ assert(cursor.queryPlanner.optimizedPipeline === undefined,
+ "Expected pipeline " + tojsononeline(pipeline) + " to *not* include an " +
+ "'optimizedPipeline' field in the explain output: " + tojson(explainOutput));
+ assert(aggPlanHasStage(explainOutput, expectedStage),
+ "Expected pipeline " + tojsononeline(pipeline) + " to include a " + expectedStage +
+ " stage in the explain output: " + tojson(explainOutput));
- return explainOutput;
+ if (expectedResult) {
+ const actualResult = coll.aggregate(pipeline, pipelineOptions).toArray();
+ assert(preserveResultOrder ? orderedArrayEq(actualResult, expectedResult)
+ : arrayEq(actualResult, expectedResult));
}
- // Asserts that the give pipeline has been optimized away and the request is answered using
- // just the query module. There should be no pipeline stages present in the explain output.
- // The functions also asserts that a query stage passed in the 'stage' argument is present in
- // the explain output. If 'expectedResult' is provided, the pipeline is executed and the
- // returned result as validated agains the expected result without respecting the order of the
- // documents. If 'preserveResultOrder' is 'true' - the order is respected.
- function assertPipelineDoesNotUseAggregation({
- pipeline = [],
- pipelineOptions = {},
- expectedStage = null,
- expectedResult = null,
- preserveResultOrder = false
- } = {}) {
- const explainOutput = coll.explain().aggregate(pipeline, pipelineOptions);
-
- assert(!isAggregationPlan(explainOutput),
- "Expected pipeline " + tojsononeline(pipeline) +
- " *not* to use an aggregation framework in the explain output: " +
- tojson(explainOutput));
- assert(isQueryPlan(explainOutput),
- "Expected pipeline " + tojsononeline(pipeline) +
- " to use a query layer at the root level in the explain output: " +
- tojson(explainOutput));
- if (explainOutput.hasOwnProperty("shards")) {
- Object.keys(explainOutput.shards)
- .forEach((shard) => assert(
- explainOutput.shards[shard].queryPlanner.optimizedPipeline === true,
- "Expected pipeline " + tojsononeline(pipeline) + " to include an " +
- "'optimizedPipeline' field in the explain output: " +
- tojson(explainOutput)));
- } else {
- assert(explainOutput.queryPlanner.optimizedPipeline === true,
- "Expected pipeline " + tojsononeline(pipeline) + " to include an " +
- "'optimizedPipeline' field in the explain output: " + tojson(explainOutput));
- }
- assert(planHasStage(db, explainOutput, expectedStage),
- "Expected pipeline " + tojsononeline(pipeline) + " to include a " + expectedStage +
- " stage in the explain output: " + tojson(explainOutput));
+ return explainOutput;
+}
- if (expectedResult) {
- const actualResult = coll.aggregate(pipeline, pipelineOptions).toArray();
- assert(preserveResultOrder ? orderedArrayEq(actualResult, expectedResult)
- : arrayEq(actualResult, expectedResult));
- }
+// Asserts that the give pipeline has been optimized away and the request is answered using
+// just the query module. There should be no pipeline stages present in the explain output.
+// The functions also asserts that a query stage passed in the 'stage' argument is present in
+// the explain output. If 'expectedResult' is provided, the pipeline is executed and the
+// returned result as validated agains the expected result without respecting the order of the
+// documents. If 'preserveResultOrder' is 'true' - the order is respected.
+function assertPipelineDoesNotUseAggregation({
+ pipeline = [],
+ pipelineOptions = {},
+ expectedStage = null,
+ expectedResult = null,
+ preserveResultOrder = false
+} = {}) {
+ const explainOutput = coll.explain().aggregate(pipeline, pipelineOptions);
- return explainOutput;
+ assert(!isAggregationPlan(explainOutput),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " *not* to use an aggregation framework in the explain output: " +
+ tojson(explainOutput));
+ assert(isQueryPlan(explainOutput),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " to use a query layer at the root level in the explain output: " +
+ tojson(explainOutput));
+ if (explainOutput.hasOwnProperty("shards")) {
+ Object.keys(explainOutput.shards)
+ .forEach((shard) =>
+ assert(explainOutput.shards[shard].queryPlanner.optimizedPipeline === true,
+ "Expected pipeline " + tojsononeline(pipeline) + " to include an " +
+ "'optimizedPipeline' field in the explain output: " +
+ tojson(explainOutput)));
+ } else {
+ assert(explainOutput.queryPlanner.optimizedPipeline === true,
+ "Expected pipeline " + tojsononeline(pipeline) + " to include an " +
+ "'optimizedPipeline' field in the explain output: " + tojson(explainOutput));
}
+ assert(planHasStage(db, explainOutput, expectedStage),
+ "Expected pipeline " + tojsononeline(pipeline) + " to include a " + expectedStage +
+ " stage in the explain output: " + tojson(explainOutput));
- // Test that getMore works with the optimized query.
- function testGetMore({command = null, expectedResult = null} = {}) {
- const documents =
- new DBCommandCursor(db, assert.commandWorked(db.runCommand(command)), 1 /* batchsize */)
- .toArray();
- assert(arrayEq(documents, expectedResult));
+ if (expectedResult) {
+ const actualResult = coll.aggregate(pipeline, pipelineOptions).toArray();
+ assert(preserveResultOrder ? orderedArrayEq(actualResult, expectedResult)
+ : arrayEq(actualResult, expectedResult));
}
- let explainOutput;
+ return explainOutput;
+}
- // Basic pipelines.
+// Test that getMore works with the optimized query.
+function testGetMore({command = null, expectedResult = null} = {}) {
+ const documents =
+ new DBCommandCursor(db, assert.commandWorked(db.runCommand(command)), 1 /* batchsize */)
+ .toArray();
+ assert(arrayEq(documents, expectedResult));
+}
- // Test basic scenarios when a pipeline has a single $cursor stage or can be collapsed into a
- // single cursor stage.
- assertPipelineDoesNotUseAggregation({
- pipeline: [],
- expectedStage: "COLLSCAN",
- expectedResult: [{_id: 1, x: 10}, {_id: 2, x: 20}, {_id: 3, x: 30}]
- });
- assertPipelineDoesNotUseAggregation({
- pipeline: [{$match: {x: 20}}],
- expectedStage: "COLLSCAN",
- expectedResult: [{_id: 2, x: 20}]
- });
+let explainOutput;
- // Pipelines with a collation.
+// Basic pipelines.
- // Test a simple pipeline with a case-insensitive collation.
- assert.writeOK(coll.insert({_id: 4, x: 40, b: "abc"}));
- assertPipelineDoesNotUseAggregation({
- pipeline: [{$match: {b: "ABC"}}],
- pipelineOptions: {collation: {locale: "en_US", strength: 2}},
- expectedStage: "COLLSCAN",
- expectedResult: [{_id: 4, x: 40, b: "abc"}]
- });
- assert.commandWorked(coll.deleteOne({_id: 4}));
+// Test basic scenarios when a pipeline has a single $cursor stage or can be collapsed into a
+// single cursor stage.
+assertPipelineDoesNotUseAggregation({
+ pipeline: [],
+ expectedStage: "COLLSCAN",
+ expectedResult: [{_id: 1, x: 10}, {_id: 2, x: 20}, {_id: 3, x: 30}]
+});
+assertPipelineDoesNotUseAggregation(
+ {pipeline: [{$match: {x: 20}}], expectedStage: "COLLSCAN", expectedResult: [{_id: 2, x: 20}]});
- // Pipelines with covered queries.
+// Pipelines with a collation.
- // We can collapse a covered query into a single $cursor when $project and $sort are present and
- // the latter is near the front of the pipeline. Skip this test in sharded modes as we cannot
- // correctly handle explain output in plan analyzer helper functions.
- assert.commandWorked(coll.createIndex({x: 1}));
- assertPipelineDoesNotUseAggregation({
- pipeline: [{$sort: {x: 1}}, {$project: {x: 1, _id: 0}}],
- expectedStage: "IXSCAN",
- expectedResult: [{x: 10}, {x: 20}, {x: 30}],
- preserveResultOrder: true
- });
- assertPipelineDoesNotUseAggregation({
- pipeline: [{$match: {x: {$gte: 20}}}, {$sort: {x: 1}}, {$project: {x: 1, _id: 0}}],
- expectedStage: "IXSCAN",
- expectedResult: [{x: 20}, {x: 30}],
- preserveResultOrder: true
- });
- // TODO: SERVER-36723 We cannot collapse if there is a $limit stage though.
- assertPipelineUsesAggregation({
- pipeline:
- [{$match: {x: {$gte: 20}}}, {$sort: {x: 1}}, {$limit: 1}, {$project: {x: 1, _id: 0}}],
- expectedStage: "IXSCAN",
- expectedResult: [{x: 20}]
- });
- assert.commandWorked(coll.dropIndexes());
+// Test a simple pipeline with a case-insensitive collation.
+assert.writeOK(coll.insert({_id: 4, x: 40, b: "abc"}));
+assertPipelineDoesNotUseAggregation({
+ pipeline: [{$match: {b: "ABC"}}],
+ pipelineOptions: {collation: {locale: "en_US", strength: 2}},
+ expectedStage: "COLLSCAN",
+ expectedResult: [{_id: 4, x: 40, b: "abc"}]
+});
+assert.commandWorked(coll.deleteOne({_id: 4}));
- // Pipelines which cannot be optimized away.
+// Pipelines with covered queries.
- // TODO SERVER-40254: Uncovered queries.
- assert.writeOK(coll.insert({_id: 4, x: 40, a: {b: "ab1"}}));
- assertPipelineUsesAggregation({
- pipeline: [{$project: {x: 1, _id: 0}}],
- expectedStage: "COLLSCAN",
- expectedResult: [{x: 10}, {x: 20}, {x: 30}, {x: 40}]
- });
- assertPipelineUsesAggregation({
- pipeline: [{$match: {x: 20}}, {$project: {x: 1, _id: 0}}],
- expectedStage: "COLLSCAN",
- expectedResult: [{x: 20}]
- });
- assertPipelineUsesAggregation({
- pipeline: [{$project: {x: 1, "a.b": 1, _id: 0}}],
- expectedStage: "COLLSCAN",
- expectedResult: [{x: 10}, {x: 20}, {x: 30}, {x: 40, a: {b: "ab1"}}]
- });
- assertPipelineUsesAggregation({
- pipeline: [{$match: {x: 40}}, {$project: {"a.b": 1, _id: 0}}],
- expectedStage: "COLLSCAN",
- expectedResult: [{a: {b: "ab1"}}]
- });
- assert.commandWorked(coll.deleteOne({_id: 4}));
+// We can collapse a covered query into a single $cursor when $project and $sort are present and
+// the latter is near the front of the pipeline. Skip this test in sharded modes as we cannot
+// correctly handle explain output in plan analyzer helper functions.
+assert.commandWorked(coll.createIndex({x: 1}));
+assertPipelineDoesNotUseAggregation({
+ pipeline: [{$sort: {x: 1}}, {$project: {x: 1, _id: 0}}],
+ expectedStage: "IXSCAN",
+ expectedResult: [{x: 10}, {x: 20}, {x: 30}],
+ preserveResultOrder: true
+});
+assertPipelineDoesNotUseAggregation({
+ pipeline: [{$match: {x: {$gte: 20}}}, {$sort: {x: 1}}, {$project: {x: 1, _id: 0}}],
+ expectedStage: "IXSCAN",
+ expectedResult: [{x: 20}, {x: 30}],
+ preserveResultOrder: true
+});
+// TODO: SERVER-36723 We cannot collapse if there is a $limit stage though.
+assertPipelineUsesAggregation({
+ pipeline: [{$match: {x: {$gte: 20}}}, {$sort: {x: 1}}, {$limit: 1}, {$project: {x: 1, _id: 0}}],
+ expectedStage: "IXSCAN",
+ expectedResult: [{x: 20}]
+});
+assert.commandWorked(coll.dropIndexes());
- // TODO SERVER-36723: $limit stage is not supported yet.
- assertPipelineUsesAggregation({
- pipeline: [{$match: {x: 20}}, {$limit: 1}],
- expectedStage: "COLLSCAN",
- expectedResult: [{_id: 2, x: 20}]
- });
- // TODO SERVER-36723: $skip stage is not supported yet.
- assertPipelineUsesAggregation({
- pipeline: [{$match: {x: {$gte: 20}}}, {$skip: 1}],
- expectedStage: "COLLSCAN",
- expectedResult: [{_id: 3, x: 30}]
- });
- // We cannot collapse a $project stage if it has a complex pipeline expression.
- assertPipelineUsesAggregation(
- {pipeline: [{$project: {x: {$substr: ["$y", 0, 1]}, _id: 0}}], expectedStage: "COLLSCAN"});
- assertPipelineUsesAggregation({
- pipeline: [{$match: {x: 20}}, {$project: {x: {$substr: ["$y", 0, 1]}, _id: 0}}],
- expectedStage: "COLLSCAN"
- });
- // We cannot optimize away a pipeline if there are stages which have no equivalent in the
- // find command.
- assertPipelineUsesAggregation({
- pipeline: [{$match: {x: {$gte: 20}}}, {$count: "count"}],
- expectedStage: "COLLSCAN",
- expectedResult: [{count: 2}]
- });
- assertPipelineUsesAggregation({
- pipeline: [{$match: {x: {$gte: 20}}}, {$group: {_id: "null", s: {$sum: "$x"}}}],
- expectedStage: "COLLSCAN",
- expectedResult: [{_id: "null", s: 50}]
- });
- // TODO SERVER-40253: We cannot optimize away text search queries.
- assert.commandWorked(coll.createIndex({y: "text"}));
- assertPipelineUsesAggregation(
- {pipeline: [{$match: {$text: {$search: "abc"}}}], expectedStage: "IXSCAN"});
- assert.commandWorked(coll.dropIndexes());
- // We cannot optimize away geo near queries.
- assert.commandWorked(coll.createIndex({"y": "2d"}));
- assertPipelineUsesAggregation({
- pipeline: [{$geoNear: {near: [0, 0], distanceField: "y", spherical: true}}],
- expectedStage: "GEO_NEAR_2D"
- });
- assert.commandWorked(coll.dropIndexes());
+// Pipelines which cannot be optimized away.
+
+// TODO SERVER-40254: Uncovered queries.
+assert.writeOK(coll.insert({_id: 4, x: 40, a: {b: "ab1"}}));
+assertPipelineUsesAggregation({
+ pipeline: [{$project: {x: 1, _id: 0}}],
+ expectedStage: "COLLSCAN",
+ expectedResult: [{x: 10}, {x: 20}, {x: 30}, {x: 40}]
+});
+assertPipelineUsesAggregation({
+ pipeline: [{$match: {x: 20}}, {$project: {x: 1, _id: 0}}],
+ expectedStage: "COLLSCAN",
+ expectedResult: [{x: 20}]
+});
+assertPipelineUsesAggregation({
+ pipeline: [{$project: {x: 1, "a.b": 1, _id: 0}}],
+ expectedStage: "COLLSCAN",
+ expectedResult: [{x: 10}, {x: 20}, {x: 30}, {x: 40, a: {b: "ab1"}}]
+});
+assertPipelineUsesAggregation({
+ pipeline: [{$match: {x: 40}}, {$project: {"a.b": 1, _id: 0}}],
+ expectedStage: "COLLSCAN",
+ expectedResult: [{a: {b: "ab1"}}]
+});
+assert.commandWorked(coll.deleteOne({_id: 4}));
+
+// TODO SERVER-36723: $limit stage is not supported yet.
+assertPipelineUsesAggregation({
+ pipeline: [{$match: {x: 20}}, {$limit: 1}],
+ expectedStage: "COLLSCAN",
+ expectedResult: [{_id: 2, x: 20}]
+});
+// TODO SERVER-36723: $skip stage is not supported yet.
+assertPipelineUsesAggregation({
+ pipeline: [{$match: {x: {$gte: 20}}}, {$skip: 1}],
+ expectedStage: "COLLSCAN",
+ expectedResult: [{_id: 3, x: 30}]
+});
+// We cannot collapse a $project stage if it has a complex pipeline expression.
+assertPipelineUsesAggregation(
+ {pipeline: [{$project: {x: {$substr: ["$y", 0, 1]}, _id: 0}}], expectedStage: "COLLSCAN"});
+assertPipelineUsesAggregation({
+ pipeline: [{$match: {x: 20}}, {$project: {x: {$substr: ["$y", 0, 1]}, _id: 0}}],
+ expectedStage: "COLLSCAN"
+});
+// We cannot optimize away a pipeline if there are stages which have no equivalent in the
+// find command.
+assertPipelineUsesAggregation({
+ pipeline: [{$match: {x: {$gte: 20}}}, {$count: "count"}],
+ expectedStage: "COLLSCAN",
+ expectedResult: [{count: 2}]
+});
+assertPipelineUsesAggregation({
+ pipeline: [{$match: {x: {$gte: 20}}}, {$group: {_id: "null", s: {$sum: "$x"}}}],
+ expectedStage: "COLLSCAN",
+ expectedResult: [{_id: "null", s: 50}]
+});
+// TODO SERVER-40253: We cannot optimize away text search queries.
+assert.commandWorked(coll.createIndex({y: "text"}));
+assertPipelineUsesAggregation(
+ {pipeline: [{$match: {$text: {$search: "abc"}}}], expectedStage: "IXSCAN"});
+assert.commandWorked(coll.dropIndexes());
+// We cannot optimize away geo near queries.
+assert.commandWorked(coll.createIndex({"y": "2d"}));
+assertPipelineUsesAggregation({
+ pipeline: [{$geoNear: {near: [0, 0], distanceField: "y", spherical: true}}],
+ expectedStage: "GEO_NEAR_2D"
+});
+assert.commandWorked(coll.dropIndexes());
- // getMore cases.
+// getMore cases.
- // Test getMore on a collection with an optimized away pipeline.
+// Test getMore on a collection with an optimized away pipeline.
+testGetMore({
+ command: {aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 1}},
+ expectedResult: [{_id: 1, x: 10}, {_id: 2, x: 20}, {_id: 3, x: 30}]
+});
+testGetMore({
+ command:
+ {aggregate: coll.getName(), pipeline: [{$match: {x: {$gte: 20}}}], cursor: {batchSize: 1}},
+ expectedResult: [{_id: 2, x: 20}, {_id: 3, x: 30}]
+});
+testGetMore({
+ command: {
+ aggregate: coll.getName(),
+ pipeline: [{$match: {x: {$gte: 20}}}, {$project: {x: 1, _id: 0}}],
+ cursor: {batchSize: 1}
+ },
+ expectedResult: [{x: 20}, {x: 30}]
+});
+// Test getMore on a view with an optimized away pipeline. Since views cannot be created when
+// imlicit sharded collection mode is on, this test will be run only on a non-sharded
+// collection.
+let view;
+if (!FixtureHelpers.isSharded(coll)) {
+ view = db.optimize_away_pipeline_view;
+ view.drop();
+ assert.commandWorked(db.createView(view.getName(), coll.getName(), []));
testGetMore({
- command: {aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 1}},
+ command: {find: view.getName(), filter: {}, batchSize: 1},
expectedResult: [{_id: 1, x: 10}, {_id: 2, x: 20}, {_id: 3, x: 30}]
});
+}
+// Test getMore puts a correct namespace into profile data for a colletion with optimized away
+// pipeline. Cannot be run on mongos as profiling can be enabled only on mongod. Also profiling
+// is supported on WiredTiger only.
+if (!FixtureHelpers.isMongos(db) && isWiredTiger(db)) {
+ db.system.profile.drop();
+ db.setProfilingLevel(2);
testGetMore({
command: {
aggregate: coll.getName(),
- pipeline: [{$match: {x: {$gte: 20}}}],
- cursor: {batchSize: 1}
+ pipeline: [{$match: {x: 10}}],
+ cursor: {batchSize: 1},
+ comment: 'optimize_away_pipeline'
},
- expectedResult: [{_id: 2, x: 20}, {_id: 3, x: 30}]
+ expectedResult: [{_id: 1, x: 10}]
});
- testGetMore({
- command: {
- aggregate: coll.getName(),
- pipeline: [{$match: {x: {$gte: 20}}}, {$project: {x: 1, _id: 0}}],
- cursor: {batchSize: 1}
- },
- expectedResult: [{x: 20}, {x: 30}]
- });
- // Test getMore on a view with an optimized away pipeline. Since views cannot be created when
- // imlicit sharded collection mode is on, this test will be run only on a non-sharded
- // collection.
- let view;
+ db.setProfilingLevel(0);
+ let profile = db.system.profile.find({}, {op: 1, ns: 1, comment: 'optimize_away_pipeline'})
+ .sort({ts: 1})
+ .toArray();
+ assert(arrayEq(
+ profile,
+ [{op: "command", ns: coll.getFullName()}, {op: "getmore", ns: coll.getFullName()}]));
+ // Test getMore puts a correct namespace into profile data for a view with an optimized away
+ // pipeline.
if (!FixtureHelpers.isSharded(coll)) {
- view = db.optimize_away_pipeline_view;
- view.drop();
- assert.commandWorked(db.createView(view.getName(), coll.getName(), []));
- testGetMore({
- command: {find: view.getName(), filter: {}, batchSize: 1},
- expectedResult: [{_id: 1, x: 10}, {_id: 2, x: 20}, {_id: 3, x: 30}]
- });
- }
- // Test getMore puts a correct namespace into profile data for a colletion with optimized away
- // pipeline. Cannot be run on mongos as profiling can be enabled only on mongod. Also profiling
- // is supported on WiredTiger only.
- if (!FixtureHelpers.isMongos(db) && isWiredTiger(db)) {
db.system.profile.drop();
db.setProfilingLevel(2);
testGetMore({
command: {
- aggregate: coll.getName(),
- pipeline: [{$match: {x: 10}}],
- cursor: {batchSize: 1},
+ find: view.getName(),
+ filter: {x: 10},
+ batchSize: 1,
comment: 'optimize_away_pipeline'
},
expectedResult: [{_id: 1, x: 10}]
});
db.setProfilingLevel(0);
- let profile = db.system.profile.find({}, {op: 1, ns: 1, comment: 'optimize_away_pipeline'})
- .sort({ts: 1})
- .toArray();
+ profile = db.system.profile.find({}, {op: 1, ns: 1, comment: 'optimize_away_pipeline'})
+ .sort({ts: 1})
+ .toArray();
assert(arrayEq(
profile,
- [{op: "command", ns: coll.getFullName()}, {op: "getmore", ns: coll.getFullName()}]));
- // Test getMore puts a correct namespace into profile data for a view with an optimized away
- // pipeline.
- if (!FixtureHelpers.isSharded(coll)) {
- db.system.profile.drop();
- db.setProfilingLevel(2);
- testGetMore({
- command: {
- find: view.getName(),
- filter: {x: 10},
- batchSize: 1,
- comment: 'optimize_away_pipeline'
- },
- expectedResult: [{_id: 1, x: 10}]
- });
- db.setProfilingLevel(0);
- profile = db.system.profile.find({}, {op: 1, ns: 1, comment: 'optimize_away_pipeline'})
- .sort({ts: 1})
- .toArray();
- assert(arrayEq(
- profile,
- [{op: "query", ns: view.getFullName()}, {op: "getmore", ns: view.getFullName()}]));
- }
+ [{op: "query", ns: view.getFullName()}, {op: "getmore", ns: view.getFullName()}]));
}
+}
}());
diff --git a/jstests/aggregation/pipeline_pass_through_from_mongos.js b/jstests/aggregation/pipeline_pass_through_from_mongos.js
index e98bbc8d854..3c3694e3931 100644
--- a/jstests/aggregation/pipeline_pass_through_from_mongos.js
+++ b/jstests/aggregation/pipeline_pass_through_from_mongos.js
@@ -5,49 +5,48 @@
* @tags: [requires_sharding]
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
+load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
- const st = new ShardingTest({shards: 2});
- const mongosDB = st.s0.getDB(jsTestName());
- assert.commandWorked(st.s0.adminCommand({enableSharding: jsTestName()}));
- st.ensurePrimaryShard(jsTestName(), st.shard0.shardName);
- const mongosColl = mongosDB.test;
- const primaryShard = st.shard0.getDB(jsTestName());
- const shard1DB = st.shard1.getDB(jsTestName());
+const st = new ShardingTest({shards: 2});
+const mongosDB = st.s0.getDB(jsTestName());
+assert.commandWorked(st.s0.adminCommand({enableSharding: jsTestName()}));
+st.ensurePrimaryShard(jsTestName(), st.shard0.shardName);
+const mongosColl = mongosDB.test;
+const primaryShard = st.shard0.getDB(jsTestName());
+const shard1DB = st.shard1.getDB(jsTestName());
- assert.commandWorked(primaryShard.setProfilingLevel(2));
- assert.commandWorked(shard1DB.setProfilingLevel(2));
+assert.commandWorked(primaryShard.setProfilingLevel(2));
+assert.commandWorked(shard1DB.setProfilingLevel(2));
- // Verify that the $lookup is passed through to the primary shard when all its sub-pipeline
- // stages can be passed through.
- let testName = "sub_pipeline_can_be_passed_through";
- assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [{
- $lookup:
- {pipeline: [{$match: {a: "val"}}], from: mongosDB.otherColl.getName(), as: "c"}
- }],
- cursor: {},
- comment: testName
- }));
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShard,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
+// Verify that the $lookup is passed through to the primary shard when all its sub-pipeline
+// stages can be passed through.
+let testName = "sub_pipeline_can_be_passed_through";
+assert.commandWorked(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [
+ {$lookup: {pipeline: [{$match: {a: "val"}}], from: mongosDB.otherColl.getName(), as: "c"}}
+ ],
+ cursor: {},
+ comment: testName
+}));
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryShard,
+ filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
+});
+profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard1DB,
+ filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
+});
- // Test to verify that the mongoS doesn't pass the pipeline through to the primary shard when
- // $lookup's sub-pipeline has one or more stages which don't allow passthrough. In this
- // sub-pipeline, the $merge stage is not allowed to pass through, which forces the pipeline to
- // be parsed on mongoS. Since $merge is not allowed within a $lookup, the command thus fails on
- // mongoS without ever reaching a shard. This test-case exercises the bug described in
- // SERVER-41290.
- const pipelineForLookup = [
+// Test to verify that the mongoS doesn't pass the pipeline through to the primary shard when
+// $lookup's sub-pipeline has one or more stages which don't allow passthrough. In this
+// sub-pipeline, the $merge stage is not allowed to pass through, which forces the pipeline to
+// be parsed on mongoS. Since $merge is not allowed within a $lookup, the command thus fails on
+// mongoS without ever reaching a shard. This test-case exercises the bug described in
+// SERVER-41290.
+const pipelineForLookup = [
{
$lookup: {
pipeline: [{$match: {a: "val"}}, {$merge: {into: "merge_collection"}}],
@@ -56,25 +55,25 @@
}
},
];
- testName = "lookup_with_merge_cannot_be_passed_through";
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: pipelineForLookup,
- cursor: {},
- comment: testName
- }),
- 51047);
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: primaryShard,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
+testName = "lookup_with_merge_cannot_be_passed_through";
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: pipelineForLookup,
+ cursor: {},
+ comment: testName
+}),
+ 51047);
+profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: primaryShard,
+ filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
+});
+profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard1DB,
+ filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
+});
- // Same test as the above with another level of nested $lookup.
- const pipelineForNestedLookup = [{
+// Same test as the above with another level of nested $lookup.
+const pipelineForNestedLookup = [{
$lookup: {
from: mongosDB.otherColl.getName(),
as: "field",
@@ -87,81 +86,80 @@
}]
}
}];
- testName = "nested_lookup_with_merge_cannot_be_passed_through";
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: pipelineForNestedLookup,
- cursor: {},
- comment: testName
- }),
- 51047);
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: primaryShard,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
+testName = "nested_lookup_with_merge_cannot_be_passed_through";
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: pipelineForNestedLookup,
+ cursor: {},
+ comment: testName
+}),
+ 51047);
+profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: primaryShard,
+ filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
+});
+profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard1DB,
+ filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
+});
- // Test to verify that the mongoS doesn't pass the pipeline through to the primary shard when
- // one or more of $facet's sub-pipelines have one or more stages which don't allow passthrough.
- // In this sub-pipeline, the $merge stage is not allowed to pass through, which forces the
- // pipeline to be parsed on mongoS. Since $merge is not allowed within a $facet, the command
- // thus fails on mongoS without ever reaching a shard. This test-case exercises the bug
- // described in SERVER-41290.
- const pipelineForFacet = [
- {
- $facet: {
- field0: [{$match: {a: "val"}}],
- field1: [{$match: {a: "val"}}, {$merge: {into: "merge_collection"}}],
- }
- },
- ];
- testName = "facet_with_merge_cannot_be_passed_through";
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: pipelineForFacet,
- cursor: {},
- comment: testName
- }),
- 40600);
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: primaryShard,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
+// Test to verify that the mongoS doesn't pass the pipeline through to the primary shard when
+// one or more of $facet's sub-pipelines have one or more stages which don't allow passthrough.
+// In this sub-pipeline, the $merge stage is not allowed to pass through, which forces the
+// pipeline to be parsed on mongoS. Since $merge is not allowed within a $facet, the command
+// thus fails on mongoS without ever reaching a shard. This test-case exercises the bug
+// described in SERVER-41290.
+const pipelineForFacet = [
+ {
+ $facet: {
+ field0: [{$match: {a: "val"}}],
+ field1: [{$match: {a: "val"}}, {$merge: {into: "merge_collection"}}],
+ }
+ },
+];
+testName = "facet_with_merge_cannot_be_passed_through";
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: pipelineForFacet,
+ cursor: {},
+ comment: testName
+}),
+ 40600);
+profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: primaryShard,
+ filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
+});
+profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard1DB,
+ filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
+});
- // Same test as the above with another level of nested $facet.
- const pipelineForNestedFacet = [
- {
- $facet: {
- field0: [{$match: {a: "val"}}],
- field1: [
- {$facet: {field2: [{$match: {a: "val"}}, {$merge: {into: "merge_collection"}}]}}
- ],
- }
- },
- ];
- testName = "facet_with_merge_cannot_be_passed_through";
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: pipelineForFacet,
- cursor: {},
- comment: testName
- }),
- 40600);
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: primaryShard,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
+// Same test as the above with another level of nested $facet.
+const pipelineForNestedFacet = [
+ {
+ $facet: {
+ field0: [{$match: {a: "val"}}],
+ field1:
+ [{$facet: {field2: [{$match: {a: "val"}}, {$merge: {into: "merge_collection"}}]}}],
+ }
+ },
+];
+testName = "facet_with_merge_cannot_be_passed_through";
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: pipelineForFacet,
+ cursor: {},
+ comment: testName
+}),
+ 40600);
+profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: primaryShard,
+ filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
+});
+profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard1DB,
+ filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/aggregation/shard_targeting.js b/jstests/aggregation/shard_targeting.js
index 800357ab324..1654c17760d 100644
--- a/jstests/aggregation/shard_targeting.js
+++ b/jstests/aggregation/shard_targeting.js
@@ -22,361 +22,358 @@
* ]
*/
(function() {
- load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
-
- const st = new ShardingTest({shards: 2, mongos: 2, config: 1});
-
- // mongosForAgg will be used to perform all aggregations.
- // mongosForMove does all chunk migrations, leaving mongosForAgg with stale config metadata.
- const mongosForAgg = st.s0;
- const mongosForMove = st.s1;
-
- const mongosDB = mongosForAgg.getDB(jsTestName());
- const mongosColl = mongosDB.test;
-
- const shard0DB = primaryShardDB = st.shard0.getDB(jsTestName());
- const shard1DB = st.shard1.getDB(jsTestName());
-
- // Turn off best-effort recipient metadata refresh post-migration commit on both shards because
- // it creates non-determinism for the profiler.
- assert.commandWorked(st.shard0.getDB('admin').runCommand(
- {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'}));
- assert.commandWorked(st.shard1.getDB('admin').runCommand(
- {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'}));
-
- // Turn off automatic shard refresh in mongos when a stale config error is thrown.
- assert.commandWorked(mongosForAgg.getDB('admin').runCommand(
- {configureFailPoint: 'doNotRefreshShardsOnRetargettingError', mode: 'alwaysOn'}));
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 4 chunks: [MinKey, -100), [-100, 0), [0, 100), [100, MaxKey).
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: -100}}));
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 100}}));
-
- // Move the [0, 100) and [100, MaxKey) chunks to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 50}, to: st.shard1.shardName}));
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 150}, to: st.shard1.shardName}));
-
- // Write one document into each of the chunks.
- assert.writeOK(mongosColl.insert({_id: -150}));
- assert.writeOK(mongosColl.insert({_id: -50}));
- assert.writeOK(mongosColl.insert({_id: 50}));
- assert.writeOK(mongosColl.insert({_id: 150}));
-
- const shardExceptions =
- [ErrorCodes.StaleConfig, ErrorCodes.StaleShardVersion, ErrorCodes.StaleEpoch];
-
- // Create an $_internalSplitPipeline stage that forces the merge to occur on the Primary shard.
- const forcePrimaryMerge = [{$_internalSplitPipeline: {mergeType: "primaryShard"}}];
-
- function runAggShardTargetTest({splitPoint}) {
- // Ensure that both mongoS have up-to-date caches, and enable the profiler on both shards.
- assert.commandWorked(mongosForAgg.getDB("admin").runCommand({flushRouterConfig: 1}));
- assert.commandWorked(mongosForMove.getDB("admin").runCommand({flushRouterConfig: 1}));
-
- assert.commandWorked(shard0DB.setProfilingLevel(2));
- assert.commandWorked(shard1DB.setProfilingLevel(2));
-
- //
- // Test cases.
- //
-
- let testName, outColl;
-
- // Test that a range query is passed through if the chunks encompassed by the query all lie
- // on a single shard, in this case st.shard0.shardName.
- testName = "agg_shard_targeting_range_single_shard_all_chunks_on_same_shard";
- assert.eq(mongosColl
- .aggregate([{$match: {_id: {$gte: -150, $lte: -50}}}].concat(splitPoint),
- {comment: testName})
- .itcount(),
- 2);
-
- // We expect one aggregation on shard0, none on shard1, and no $mergeCursors on shard0 (the
- // primary shard).
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard0DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: primaryShardDB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: 1}
- }
- });
-
- // Test that a range query with a stage that requires a primary shard merge ($out in this
- // case) is passed through if the chunk ranges encompassed by the query all lie on the
- // primary shard.
- testName = "agg_shard_targeting_range_all_chunks_on_primary_shard_out_no_merge";
- outColl = mongosDB[testName];
-
- assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [{$match: {_id: {$gte: -150, $lte: -50}}}].concat(splitPoint).concat([
- {$out: testName}
- ]),
- comment: testName,
- cursor: {}
- }));
-
- // We expect one aggregation on shard0, none on shard1, and no $mergeCursors on shard0 (the
- // primary shard).
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard0DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: primaryShardDB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: 1}
- }
- });
-
- // Verify that the contents of the $out collection are as expected.
- assert.eq(outColl.find().sort({_id: 1}).toArray(), [{_id: -150}, {_id: -50}]);
-
- // Test that a passthrough will back out and split the pipeline if we try to target a single
- // shard, get a stale config exception, and find that more than one shard is now involved.
- // Move the _id: [-100, 0) chunk from st.shard0.shardName to st.shard1.shardName via
- // mongosForMove.
- assert.commandWorked(mongosForMove.getDB("admin").runCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: -50},
- to: st.shard1.shardName,
- }));
-
- // Run the same aggregation that targeted a single shard via the now-stale mongoS. It should
- // attempt to send the aggregation to st.shard0.shardName, hit a stale config exception,
- // split the pipeline and redispatch. We append an $_internalSplitPipeline stage in order to
- // force a shard merge rather than a mongoS merge.
- testName = "agg_shard_targeting_backout_passthrough_and_split_if_cache_is_stale";
- assert.eq(mongosColl
- .aggregate([{$match: {_id: {$gte: -150, $lte: -50}}}]
- .concat(splitPoint)
- .concat(forcePrimaryMerge),
- {comment: testName})
- .itcount(),
- 2);
-
- // Before the first dispatch:
- // - mongosForMove and st.shard0.shardName (the donor shard) are up to date.
- // - mongosForAgg and st.shard1.shardName are stale. mongosForAgg incorrectly believes that
- // the necessary data is all on st.shard0.shardName.
- //
- // We therefore expect that:
- // - mongosForAgg will throw a stale config error when it attempts to establish a
- // single-shard cursor on st.shard0.shardName (attempt 1).
- // - mongosForAgg will back out, refresh itself, and redispatch to both shards.
- // - st.shard1.shardName will throw a stale config and refresh itself when the split
- // pipeline is sent to it (attempt 2).
- // - mongosForAgg will back out and redispatch (attempt 3).
- // - The aggregation will succeed on the third dispatch.
-
- // We confirm this behaviour via the following profiler results:
-
- // - One aggregation on st.shard0.shardName with a shard version exception (indicating that
- // the mongoS was stale).
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard0DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: false},
- errCode: {$in: shardExceptions}
- }
- });
-
- // - One aggregation on st.shard1.shardName with a shard version exception (indicating that
- // the shard was stale).
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard1DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: false},
- errCode: {$in: shardExceptions}
- }
- });
-
- // - At most two aggregations on st.shard0.shardName with no stale config exceptions. The
- // first, if present, is an aborted cursor created if the command reaches
- // st.shard0.shardName before st.shard1.shardName throws its stale config exception during
- // attempt 2. The second profiler entry is from the aggregation which succeeded.
- profilerHasAtLeastOneAtMostNumMatchingEntriesOrThrow({
- profileDB: shard0DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: false},
- errCode: {$exists: false}
- },
- maxExpectedMatches: 2
- });
-
- // - One aggregation on st.shard1.shardName with no stale config exception.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard1DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: false},
- errCode: {$exists: false}
- }
- });
-
- // - One $mergeCursors aggregation on primary st.shard0.shardName, since we eventually
- // target both shards after backing out the passthrough and splitting the pipeline.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShardDB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: true}
- }
- });
-
- // Move the _id: [-100, 0) chunk back from st.shard1.shardName to st.shard0.shardName via
- // mongosForMove. Shard0 and mongosForAgg are now stale.
- assert.commandWorked(mongosForMove.getDB("admin").runCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: -50},
- to: st.shard0.shardName,
- _waitForDelete: true
- }));
-
- // Run the same aggregation via the now-stale mongoS. It should split the pipeline, hit a
- // stale config exception, and reset to the original single-shard pipeline upon refresh. We
- // append an $_internalSplitPipeline stage in order to force a shard merge rather than a
- // mongoS merge.
- testName = "agg_shard_targeting_backout_split_pipeline_and_reassemble_if_cache_is_stale";
- assert.eq(mongosColl
- .aggregate([{$match: {_id: {$gte: -150, $lte: -50}}}]
- .concat(splitPoint)
- .concat(forcePrimaryMerge),
- {comment: testName})
- .itcount(),
- 2);
-
- // Before the first dispatch:
- // - mongosForMove and st.shard1.shardName (the donor shard) are up to date.
- // - mongosForAgg and st.shard0.shardName are stale. mongosForAgg incorrectly believes that
- // the necessary data is spread across both shards.
- //
- // We therefore expect that:
- // - mongosForAgg will throw a stale config error when it attempts to establish a cursor on
- // st.shard1.shardName (attempt 1).
- // - mongosForAgg will back out, refresh itself, and redispatch to st.shard0.shardName.
- // - st.shard0.shardName will throw a stale config and refresh itself when the pipeline is
- // sent to it (attempt 2).
- // - mongosForAgg will back out, and redispatch (attempt 3).
- // - The aggregation will succeed on the third dispatch.
-
- // We confirm this behaviour via the following profiler results:
-
- // - One aggregation on st.shard1.shardName with a shard version exception (indicating that
- // the mongoS was stale).
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard1DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: false},
- errCode: {$in: shardExceptions}
- }
- });
-
- // - One aggregation on st.shard0.shardName with a shard version exception (indicating that
- // the shard was stale).
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard0DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: false},
- errCode: {$in: shardExceptions}
- }
- });
-
- // - At most two aggregations on st.shard0.shardName with no stale config exceptions. The
- // first, if present, is an aborted cursor created if the command reaches
- // st.shard0.shardName before st.shard1.shardName throws its stale config exception during
- // attempt 1. The second profiler entry is the aggregation which succeeded.
- profilerHasAtLeastOneAtMostNumMatchingEntriesOrThrow({
- profileDB: shard0DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: false},
- errCode: {$exists: false}
- },
- maxExpectedMatches: 2
- });
-
- // No $mergeCursors aggregation on primary st.shard0.shardName, since after backing out the
- // split pipeline we eventually target only st.shard0.shardName.
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: primaryShardDB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: true}
- }
- });
-
- // Clean up the test run by dropping the $out collection and resetting the profiler.
- assert(outColl.drop());
-
- assert.commandWorked(shard0DB.setProfilingLevel(0));
- assert.commandWorked(shard1DB.setProfilingLevel(0));
-
- assert(shard0DB.system.profile.drop());
- assert(shard1DB.system.profile.drop());
- }
-
- // Run tests with a variety of splitpoints, testing the pipeline split and re-assembly logic in
- // cases where the merge pipeline is empty, where the split stage is moved from shard to merge
- // pipe ($facet, $lookup), and where there are both shard and merge versions of the split source
- // ($sort, $group, $limit). Each test case will ultimately produce the same output.
- runAggShardTargetTest({splitPoint: []});
- runAggShardTargetTest({splitPoint: [{$sort: {_id: 1}}]});
- runAggShardTargetTest({splitPoint: [{$group: {_id: "$_id"}}]});
- runAggShardTargetTest({splitPoint: [{$limit: 4}]});
- runAggShardTargetTest({
- splitPoint: [
- {$facet: {facetPipe: [{$match: {_id: {$gt: MinKey}}}]}},
- {$unwind: "$facetPipe"},
- {$replaceRoot: {newRoot: "$facetPipe"}}
- ]
+load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
+
+const st = new ShardingTest({shards: 2, mongos: 2, config: 1});
+
+// mongosForAgg will be used to perform all aggregations.
+// mongosForMove does all chunk migrations, leaving mongosForAgg with stale config metadata.
+const mongosForAgg = st.s0;
+const mongosForMove = st.s1;
+
+const mongosDB = mongosForAgg.getDB(jsTestName());
+const mongosColl = mongosDB.test;
+
+const shard0DB = primaryShardDB = st.shard0.getDB(jsTestName());
+const shard1DB = st.shard1.getDB(jsTestName());
+
+// Turn off best-effort recipient metadata refresh post-migration commit on both shards because
+// it creates non-determinism for the profiler.
+assert.commandWorked(st.shard0.getDB('admin').runCommand(
+ {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'}));
+assert.commandWorked(st.shard1.getDB('admin').runCommand(
+ {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'}));
+
+// Turn off automatic shard refresh in mongos when a stale config error is thrown.
+assert.commandWorked(mongosForAgg.getDB('admin').runCommand(
+ {configureFailPoint: 'doNotRefreshShardsOnRetargettingError', mode: 'alwaysOn'}));
+
+assert.commandWorked(mongosDB.dropDatabase());
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+// Shard the test collection on _id.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 4 chunks: [MinKey, -100), [-100, 0), [0, 100), [100, MaxKey).
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: -100}}));
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 100}}));
+
+// Move the [0, 100) and [100, MaxKey) chunks to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 50}, to: st.shard1.shardName}));
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 150}, to: st.shard1.shardName}));
+
+// Write one document into each of the chunks.
+assert.writeOK(mongosColl.insert({_id: -150}));
+assert.writeOK(mongosColl.insert({_id: -50}));
+assert.writeOK(mongosColl.insert({_id: 50}));
+assert.writeOK(mongosColl.insert({_id: 150}));
+
+const shardExceptions =
+ [ErrorCodes.StaleConfig, ErrorCodes.StaleShardVersion, ErrorCodes.StaleEpoch];
+
+// Create an $_internalSplitPipeline stage that forces the merge to occur on the Primary shard.
+const forcePrimaryMerge = [{$_internalSplitPipeline: {mergeType: "primaryShard"}}];
+
+function runAggShardTargetTest({splitPoint}) {
+ // Ensure that both mongoS have up-to-date caches, and enable the profiler on both shards.
+ assert.commandWorked(mongosForAgg.getDB("admin").runCommand({flushRouterConfig: 1}));
+ assert.commandWorked(mongosForMove.getDB("admin").runCommand({flushRouterConfig: 1}));
+
+ assert.commandWorked(shard0DB.setProfilingLevel(2));
+ assert.commandWorked(shard1DB.setProfilingLevel(2));
+
+ //
+ // Test cases.
+ //
+
+ let testName, outColl;
+
+ // Test that a range query is passed through if the chunks encompassed by the query all lie
+ // on a single shard, in this case st.shard0.shardName.
+ testName = "agg_shard_targeting_range_single_shard_all_chunks_on_same_shard";
+ assert.eq(mongosColl
+ .aggregate([{$match: {_id: {$gte: -150, $lte: -50}}}].concat(splitPoint),
+ {comment: testName})
+ .itcount(),
+ 2);
+
+ // We expect one aggregation on shard0, none on shard1, and no $mergeCursors on shard0 (the
+ // primary shard).
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shard0DB,
+ filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
+ });
+ profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard1DB,
+ filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
+ });
+ profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: primaryShardDB,
+ filter: {
+ "command.aggregate": mongosColl.getName(),
+ "command.comment": testName,
+ "command.pipeline.$mergeCursors": {$exists: 1}
+ }
+ });
+
+ // Test that a range query with a stage that requires a primary shard merge ($out in this
+ // case) is passed through if the chunk ranges encompassed by the query all lie on the
+ // primary shard.
+ testName = "agg_shard_targeting_range_all_chunks_on_primary_shard_out_no_merge";
+ outColl = mongosDB[testName];
+
+ assert.commandWorked(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [{$match: {_id: {$gte: -150, $lte: -50}}}].concat(splitPoint).concat([
+ {$out: testName}
+ ]),
+ comment: testName,
+ cursor: {}
+ }));
+
+ // We expect one aggregation on shard0, none on shard1, and no $mergeCursors on shard0 (the
+ // primary shard).
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shard0DB,
+ filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
});
- runAggShardTargetTest({
+ profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard1DB,
+ filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
+ });
+ profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: primaryShardDB,
+ filter: {
+ "command.aggregate": mongosColl.getName(),
+ "command.comment": testName,
+ "command.pipeline.$mergeCursors": {$exists: 1}
+ }
+ });
+
+ // Verify that the contents of the $out collection are as expected.
+ assert.eq(outColl.find().sort({_id: 1}).toArray(), [{_id: -150}, {_id: -50}]);
+
+ // Test that a passthrough will back out and split the pipeline if we try to target a single
+ // shard, get a stale config exception, and find that more than one shard is now involved.
+ // Move the _id: [-100, 0) chunk from st.shard0.shardName to st.shard1.shardName via
+ // mongosForMove.
+ assert.commandWorked(mongosForMove.getDB("admin").runCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: -50},
+ to: st.shard1.shardName,
+ }));
+
+ // Run the same aggregation that targeted a single shard via the now-stale mongoS. It should
+ // attempt to send the aggregation to st.shard0.shardName, hit a stale config exception,
+ // split the pipeline and redispatch. We append an $_internalSplitPipeline stage in order to
+ // force a shard merge rather than a mongoS merge.
+ testName = "agg_shard_targeting_backout_passthrough_and_split_if_cache_is_stale";
+ assert.eq(mongosColl
+ .aggregate([{$match: {_id: {$gte: -150, $lte: -50}}}]
+ .concat(splitPoint)
+ .concat(forcePrimaryMerge),
+ {comment: testName})
+ .itcount(),
+ 2);
+
+ // Before the first dispatch:
+ // - mongosForMove and st.shard0.shardName (the donor shard) are up to date.
+ // - mongosForAgg and st.shard1.shardName are stale. mongosForAgg incorrectly believes that
+ // the necessary data is all on st.shard0.shardName.
+ //
+ // We therefore expect that:
+ // - mongosForAgg will throw a stale config error when it attempts to establish a
+ // single-shard cursor on st.shard0.shardName (attempt 1).
+ // - mongosForAgg will back out, refresh itself, and redispatch to both shards.
+ // - st.shard1.shardName will throw a stale config and refresh itself when the split
+ // pipeline is sent to it (attempt 2).
+ // - mongosForAgg will back out and redispatch (attempt 3).
+ // - The aggregation will succeed on the third dispatch.
+
+ // We confirm this behaviour via the following profiler results:
+
+ // - One aggregation on st.shard0.shardName with a shard version exception (indicating that
+ // the mongoS was stale).
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shard0DB,
+ filter: {
+ "command.aggregate": mongosColl.getName(),
+ "command.comment": testName,
+ "command.pipeline.$mergeCursors": {$exists: false},
+ errCode: {$in: shardExceptions}
+ }
+ });
+
+ // - One aggregation on st.shard1.shardName with a shard version exception (indicating that
+ // the shard was stale).
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shard1DB,
+ filter: {
+ "command.aggregate": mongosColl.getName(),
+ "command.comment": testName,
+ "command.pipeline.$mergeCursors": {$exists: false},
+ errCode: {$in: shardExceptions}
+ }
+ });
+
+ // - At most two aggregations on st.shard0.shardName with no stale config exceptions. The
+ // first, if present, is an aborted cursor created if the command reaches
+ // st.shard0.shardName before st.shard1.shardName throws its stale config exception during
+ // attempt 2. The second profiler entry is from the aggregation which succeeded.
+ profilerHasAtLeastOneAtMostNumMatchingEntriesOrThrow({
+ profileDB: shard0DB,
+ filter: {
+ "command.aggregate": mongosColl.getName(),
+ "command.comment": testName,
+ "command.pipeline.$mergeCursors": {$exists: false},
+ errCode: {$exists: false}
+ },
+ maxExpectedMatches: 2
+ });
+
+ // - One aggregation on st.shard1.shardName with no stale config exception.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shard1DB,
+ filter: {
+ "command.aggregate": mongosColl.getName(),
+ "command.comment": testName,
+ "command.pipeline.$mergeCursors": {$exists: false},
+ errCode: {$exists: false}
+ }
+ });
+
+ // - One $mergeCursors aggregation on primary st.shard0.shardName, since we eventually
+ // target both shards after backing out the passthrough and splitting the pipeline.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryShardDB,
+ filter: {
+ "command.aggregate": mongosColl.getName(),
+ "command.comment": testName,
+ "command.pipeline.$mergeCursors": {$exists: true}
+ }
+ });
+
+ // Move the _id: [-100, 0) chunk back from st.shard1.shardName to st.shard0.shardName via
+ // mongosForMove. Shard0 and mongosForAgg are now stale.
+ assert.commandWorked(mongosForMove.getDB("admin").runCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: -50},
+ to: st.shard0.shardName,
+ _waitForDelete: true
+ }));
+
+ // Run the same aggregation via the now-stale mongoS. It should split the pipeline, hit a
+ // stale config exception, and reset to the original single-shard pipeline upon refresh. We
+ // append an $_internalSplitPipeline stage in order to force a shard merge rather than a
+ // mongoS merge.
+ testName = "agg_shard_targeting_backout_split_pipeline_and_reassemble_if_cache_is_stale";
+ assert.eq(mongosColl
+ .aggregate([{$match: {_id: {$gte: -150, $lte: -50}}}]
+ .concat(splitPoint)
+ .concat(forcePrimaryMerge),
+ {comment: testName})
+ .itcount(),
+ 2);
+
+ // Before the first dispatch:
+ // - mongosForMove and st.shard1.shardName (the donor shard) are up to date.
+ // - mongosForAgg and st.shard0.shardName are stale. mongosForAgg incorrectly believes that
+ // the necessary data is spread across both shards.
+ //
+ // We therefore expect that:
+ // - mongosForAgg will throw a stale config error when it attempts to establish a cursor on
+ // st.shard1.shardName (attempt 1).
+ // - mongosForAgg will back out, refresh itself, and redispatch to st.shard0.shardName.
+ // - st.shard0.shardName will throw a stale config and refresh itself when the pipeline is
+ // sent to it (attempt 2).
+ // - mongosForAgg will back out, and redispatch (attempt 3).
+ // - The aggregation will succeed on the third dispatch.
+
+ // We confirm this behaviour via the following profiler results:
+
+ // - One aggregation on st.shard1.shardName with a shard version exception (indicating that
+ // the mongoS was stale).
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shard1DB,
+ filter: {
+ "command.aggregate": mongosColl.getName(),
+ "command.comment": testName,
+ "command.pipeline.$mergeCursors": {$exists: false},
+ errCode: {$in: shardExceptions}
+ }
+ });
+
+ // - One aggregation on st.shard0.shardName with a shard version exception (indicating that
+ // the shard was stale).
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shard0DB,
+ filter: {
+ "command.aggregate": mongosColl.getName(),
+ "command.comment": testName,
+ "command.pipeline.$mergeCursors": {$exists: false},
+ errCode: {$in: shardExceptions}
+ }
+ });
+
+ // - At most two aggregations on st.shard0.shardName with no stale config exceptions. The
+ // first, if present, is an aborted cursor created if the command reaches
+ // st.shard0.shardName before st.shard1.shardName throws its stale config exception during
+ // attempt 1. The second profiler entry is the aggregation which succeeded.
+ profilerHasAtLeastOneAtMostNumMatchingEntriesOrThrow({
+ profileDB: shard0DB,
+ filter: {
+ "command.aggregate": mongosColl.getName(),
+ "command.comment": testName,
+ "command.pipeline.$mergeCursors": {$exists: false},
+ errCode: {$exists: false}
+ },
+ maxExpectedMatches: 2
+ });
+
+ // No $mergeCursors aggregation on primary st.shard0.shardName, since after backing out the
+ // split pipeline we eventually target only st.shard0.shardName.
+ profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: primaryShardDB,
+ filter: {
+ "command.aggregate": mongosColl.getName(),
+ "command.comment": testName,
+ "command.pipeline.$mergeCursors": {$exists: true}
+ }
+ });
+
+ // Clean up the test run by dropping the $out collection and resetting the profiler.
+ assert(outColl.drop());
+
+ assert.commandWorked(shard0DB.setProfilingLevel(0));
+ assert.commandWorked(shard1DB.setProfilingLevel(0));
+
+ assert(shard0DB.system.profile.drop());
+ assert(shard1DB.system.profile.drop());
+}
+
+// Run tests with a variety of splitpoints, testing the pipeline split and re-assembly logic in
+// cases where the merge pipeline is empty, where the split stage is moved from shard to merge
+// pipe ($facet, $lookup), and where there are both shard and merge versions of the split source
+// ($sort, $group, $limit). Each test case will ultimately produce the same output.
+runAggShardTargetTest({splitPoint: []});
+runAggShardTargetTest({splitPoint: [{$sort: {_id: 1}}]});
+runAggShardTargetTest({splitPoint: [{$group: {_id: "$_id"}}]});
+runAggShardTargetTest({splitPoint: [{$limit: 4}]});
+runAggShardTargetTest({
+ splitPoint: [
+ {$facet: {facetPipe: [{$match: {_id: {$gt: MinKey}}}]}},
+ {$unwind: "$facetPipe"},
+ {$replaceRoot: {newRoot: "$facetPipe"}}
+ ]
+});
+runAggShardTargetTest({
splitPoint: [
{
$lookup: {
@@ -390,5 +387,5 @@
]
});
- st.stop();
+st.stop();
})();
diff --git a/jstests/aggregation/sharded_agg_cleanup_on_error.js b/jstests/aggregation/sharded_agg_cleanup_on_error.js
index 56d2ae73f32..cbcb1f02e53 100644
--- a/jstests/aggregation/sharded_agg_cleanup_on_error.js
+++ b/jstests/aggregation/sharded_agg_cleanup_on_error.js
@@ -8,136 +8,135 @@
* @tags: [requires_sharding,do_not_wrap_aggregations_in_facets]
*/
(function() {
- "use strict";
-
- // For assertMergeFailsForAllModesWithCode.
- load("jstests/aggregation/extras/merge_helpers.js");
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
-
- const kFailPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
- const kFailpointOptions = {shouldCheckForInterrupt: true};
-
- const st = new ShardingTest({shards: 2});
- const kDBName = "test";
- const kDivideByZeroErrCode = 16608;
- const mongosDB = st.s.getDB(kDBName);
- const shard0DB = st.shard0.getDB(kDBName);
- const shard1DB = st.shard1.getDB(kDBName);
-
- let coll = mongosDB.sharded_agg_cleanup_on_error;
-
- for (let i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({_id: i}));
- }
-
- st.shardColl(coll, {_id: 1}, {_id: 5}, {_id: 6}, kDBName, false);
- st.ensurePrimaryShard(kDBName, st.shard0.name);
-
- function assertFailsAndCleansUpCursors({pipeline, errCode}) {
- let cmdRes = mongosDB.runCommand(
- {aggregate: coll.getName(), pipeline: pipeline, cursor: {batchSize: 0}});
- assert.commandWorked(cmdRes);
- assert.neq(0, cmdRes.cursor.id);
- assert.eq(coll.getFullName(), cmdRes.cursor.ns);
- assert.eq(0, cmdRes.cursor.firstBatch.length);
-
- cmdRes = mongosDB.runCommand({getMore: cmdRes.cursor.id, collection: coll.getName()});
- assert.commandFailedWithCode(cmdRes, errCode);
-
- // Neither mongos or the shards should leave cursors open. By the time we get here, the
- // cursor which was hanging on shard 1 will have been marked interrupted, but isn't
- // guaranteed to be deleted yet. Thus, we use an assert.soon().
- assert.eq(mongosDB.serverStatus().metrics.cursor.open.total, 0);
- assert.eq(shard0DB.serverStatus().metrics.cursor.open.total, 0);
- assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.pinned == 0);
- }
-
- try {
- // Set up a fail point which causes getMore to hang on shard 1.
- assert.commandWorked(shard1DB.adminCommand(
- {configureFailPoint: kFailPointName, mode: "alwaysOn", data: kFailpointOptions}));
-
- // Issue an aggregregation that will fail during a getMore on shard 0, and make sure that
- // this correctly kills the hanging cursor on shard 1. Use $_internalSplitPipeline to ensure
- // that this pipeline merges on mongos.
- assertFailsAndCleansUpCursors({
- pipeline: [
- {$project: {out: {$divide: ["$_id", 0]}}},
- {$_internalSplitPipeline: {mergeType: "mongos"}}
- ],
- errCode: kDivideByZeroErrCode
- });
-
- // Repeat the test above, but this time use $_internalSplitPipeline to force the merge to
- // take place on a shard 0.
- assertFailsAndCleansUpCursors({
- pipeline: [
- {$project: {out: {$divide: ["$_id", 0]}}},
- {$_internalSplitPipeline: {mergeType: "primaryShard"}}
- ],
- errCode: kDivideByZeroErrCode
- });
- } finally {
- assert.commandWorked(
- shard0DB.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
- }
-
- // Test that aggregations which fail to establish a merging shard cursor also cleanup the open
- // shard cursors.
- try {
- // Enable the failpoint to fail on establishing a merging shard cursor.
- assert.commandWorked(mongosDB.adminCommand({
- configureFailPoint: "clusterAggregateFailToEstablishMergingShardCursor",
- mode: "alwaysOn"
- }));
-
- // Run an aggregation which requires a merging shard pipeline. This should fail because of
- // the failpoint.
- assertErrorCode(coll, [{$out: "target"}], ErrorCodes.FailPointEnabled);
-
- // Neither mongos or the shards should leave cursors open.
- assert.eq(mongosDB.serverStatus().metrics.cursor.open.total, 0);
- assert.soon(() => shard0DB.serverStatus().metrics.cursor.open.total == 0);
- assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.total == 0);
-
- } finally {
- assert.commandWorked(mongosDB.adminCommand({
- configureFailPoint: "clusterAggregateFailToEstablishMergingShardCursor",
- mode: "off"
- }));
- }
-
- // Test that aggregations involving $exchange correctly clean up the producer cursors.
- try {
- assert.commandWorked(mongosDB.adminCommand({
- configureFailPoint: "clusterAggregateFailToDispatchExchangeConsumerPipeline",
- mode: "alwaysOn"
- }));
-
- // Run an aggregation which is eligible for $exchange. This should assert because of
- // the failpoint. Add a $group stage to force an exchange-eligible split of the pipeline
- // before the $merge. Without the $group we won't use the exchange optimization and instead
- // will send the $merge to each shard.
- st.shardColl(mongosDB.target, {_id: 1}, {_id: 0}, {_id: 1}, kDBName, false);
-
- assertMergeFailsForAllModesWithCode({
- source: coll,
- target: mongosDB.target,
- prevStages: [{$group: {_id: "$fakeShardKey"}}],
- errorCodes: ErrorCodes.FailPointEnabled
- });
-
- // Neither mongos or the shards should leave cursors open.
- assert.eq(mongosDB.serverStatus().metrics.cursor.open.total, 0);
- assert.soon(() => shard0DB.serverStatus().metrics.cursor.open.total == 0);
- assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.total == 0);
-
- } finally {
- assert.commandWorked(mongosDB.adminCommand({
- configureFailPoint: "clusterAggregateFailToDispatchExchangeConsumerPipeline",
- mode: "off"
- }));
- }
-
- st.stop();
+"use strict";
+
+// For assertMergeFailsForAllModesWithCode.
+load("jstests/aggregation/extras/merge_helpers.js");
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+
+const kFailPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
+const kFailpointOptions = {
+ shouldCheckForInterrupt: true
+};
+
+const st = new ShardingTest({shards: 2});
+const kDBName = "test";
+const kDivideByZeroErrCode = 16608;
+const mongosDB = st.s.getDB(kDBName);
+const shard0DB = st.shard0.getDB(kDBName);
+const shard1DB = st.shard1.getDB(kDBName);
+
+let coll = mongosDB.sharded_agg_cleanup_on_error;
+
+for (let i = 0; i < 10; i++) {
+ assert.writeOK(coll.insert({_id: i}));
+}
+
+st.shardColl(coll, {_id: 1}, {_id: 5}, {_id: 6}, kDBName, false);
+st.ensurePrimaryShard(kDBName, st.shard0.name);
+
+function assertFailsAndCleansUpCursors({pipeline, errCode}) {
+ let cmdRes = mongosDB.runCommand(
+ {aggregate: coll.getName(), pipeline: pipeline, cursor: {batchSize: 0}});
+ assert.commandWorked(cmdRes);
+ assert.neq(0, cmdRes.cursor.id);
+ assert.eq(coll.getFullName(), cmdRes.cursor.ns);
+ assert.eq(0, cmdRes.cursor.firstBatch.length);
+
+ cmdRes = mongosDB.runCommand({getMore: cmdRes.cursor.id, collection: coll.getName()});
+ assert.commandFailedWithCode(cmdRes, errCode);
+
+ // Neither mongos or the shards should leave cursors open. By the time we get here, the
+ // cursor which was hanging on shard 1 will have been marked interrupted, but isn't
+ // guaranteed to be deleted yet. Thus, we use an assert.soon().
+ assert.eq(mongosDB.serverStatus().metrics.cursor.open.total, 0);
+ assert.eq(shard0DB.serverStatus().metrics.cursor.open.total, 0);
+ assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.pinned == 0);
+}
+
+try {
+ // Set up a fail point which causes getMore to hang on shard 1.
+ assert.commandWorked(shard1DB.adminCommand(
+ {configureFailPoint: kFailPointName, mode: "alwaysOn", data: kFailpointOptions}));
+
+ // Issue an aggregregation that will fail during a getMore on shard 0, and make sure that
+ // this correctly kills the hanging cursor on shard 1. Use $_internalSplitPipeline to ensure
+ // that this pipeline merges on mongos.
+ assertFailsAndCleansUpCursors({
+ pipeline: [
+ {$project: {out: {$divide: ["$_id", 0]}}},
+ {$_internalSplitPipeline: {mergeType: "mongos"}}
+ ],
+ errCode: kDivideByZeroErrCode
+ });
+
+ // Repeat the test above, but this time use $_internalSplitPipeline to force the merge to
+ // take place on a shard 0.
+ assertFailsAndCleansUpCursors({
+ pipeline: [
+ {$project: {out: {$divide: ["$_id", 0]}}},
+ {$_internalSplitPipeline: {mergeType: "primaryShard"}}
+ ],
+ errCode: kDivideByZeroErrCode
+ });
+} finally {
+ assert.commandWorked(shard0DB.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
+}
+
+// Test that aggregations which fail to establish a merging shard cursor also cleanup the open
+// shard cursors.
+try {
+ // Enable the failpoint to fail on establishing a merging shard cursor.
+ assert.commandWorked(mongosDB.adminCommand({
+ configureFailPoint: "clusterAggregateFailToEstablishMergingShardCursor",
+ mode: "alwaysOn"
+ }));
+
+ // Run an aggregation which requires a merging shard pipeline. This should fail because of
+ // the failpoint.
+ assertErrorCode(coll, [{$out: "target"}], ErrorCodes.FailPointEnabled);
+
+ // Neither mongos or the shards should leave cursors open.
+ assert.eq(mongosDB.serverStatus().metrics.cursor.open.total, 0);
+ assert.soon(() => shard0DB.serverStatus().metrics.cursor.open.total == 0);
+ assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.total == 0);
+
+} finally {
+ assert.commandWorked(mongosDB.adminCommand(
+ {configureFailPoint: "clusterAggregateFailToEstablishMergingShardCursor", mode: "off"}));
+}
+
+// Test that aggregations involving $exchange correctly clean up the producer cursors.
+try {
+ assert.commandWorked(mongosDB.adminCommand({
+ configureFailPoint: "clusterAggregateFailToDispatchExchangeConsumerPipeline",
+ mode: "alwaysOn"
+ }));
+
+ // Run an aggregation which is eligible for $exchange. This should assert because of
+ // the failpoint. Add a $group stage to force an exchange-eligible split of the pipeline
+ // before the $merge. Without the $group we won't use the exchange optimization and instead
+ // will send the $merge to each shard.
+ st.shardColl(mongosDB.target, {_id: 1}, {_id: 0}, {_id: 1}, kDBName, false);
+
+ assertMergeFailsForAllModesWithCode({
+ source: coll,
+ target: mongosDB.target,
+ prevStages: [{$group: {_id: "$fakeShardKey"}}],
+ errorCodes: ErrorCodes.FailPointEnabled
+ });
+
+ // Neither mongos or the shards should leave cursors open.
+ assert.eq(mongosDB.serverStatus().metrics.cursor.open.total, 0);
+ assert.soon(() => shard0DB.serverStatus().metrics.cursor.open.total == 0);
+ assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.total == 0);
+
+} finally {
+ assert.commandWorked(mongosDB.adminCommand({
+ configureFailPoint: "clusterAggregateFailToDispatchExchangeConsumerPipeline",
+ mode: "off"
+ }));
+}
+
+st.stop();
})();
diff --git a/jstests/aggregation/sources/addFields/use_cases.js b/jstests/aggregation/sources/addFields/use_cases.js
index b6f92fdb7b7..2f6f454ba5a 100644
--- a/jstests/aggregation/sources/addFields/use_cases.js
+++ b/jstests/aggregation/sources/addFields/use_cases.js
@@ -9,60 +9,59 @@
*/
(function() {
- "use strict";
+"use strict";
- // For arrayEq.
- load("jstests/aggregation/extras/utils.js");
+// For arrayEq.
+load("jstests/aggregation/extras/utils.js");
- const dbName = "test";
- const collName = jsTest.name();
+const dbName = "test";
+const collName = jsTest.name();
- function doExecutionTest(conn) {
- const coll = conn.getDB(dbName).getCollection(collName);
- coll.drop();
+function doExecutionTest(conn) {
+ const coll = conn.getDB(dbName).getCollection(collName);
+ coll.drop();
- // Insert a bunch of documents of the form above.
- const nDocs = 10;
- for (let i = 0; i < nDocs; i++) {
- assert.writeOK(coll.insert({"_id": i, "2i": i * 2, "3i": i * 3}));
- }
-
- // Add the minimum, maximum, and average temperatures, and make sure that doing the same
- // with addFields yields the correct answer.
- // First compute with $project, since we know all the fields in this document.
- let projectPipe = [{
- $project: {
- "2i": 1,
- "3i": 1,
- "6i^2": {"$multiply": ["$2i", "$3i"]},
- // _id is implicitly included.
- }
- }];
- let correct = coll.aggregate(projectPipe).toArray();
+ // Insert a bunch of documents of the form above.
+ const nDocs = 10;
+ for (let i = 0; i < nDocs; i++) {
+ assert.writeOK(coll.insert({"_id": i, "2i": i * 2, "3i": i * 3}));
+ }
- // Then compute the same results using $addFields.
- let addFieldsPipe = [{
- $addFields: {
- "6i^2": {"$multiply": ["$2i", "$3i"]},
- // All other fields are implicitly included.
- }
- }];
- let addFieldsResult = coll.aggregate(addFieldsPipe).toArray();
+ // Add the minimum, maximum, and average temperatures, and make sure that doing the same
+ // with addFields yields the correct answer.
+ // First compute with $project, since we know all the fields in this document.
+ let projectPipe = [{
+ $project: {
+ "2i": 1,
+ "3i": 1,
+ "6i^2": {"$multiply": ["$2i", "$3i"]},
+ // _id is implicitly included.
+ }
+ }];
+ let correct = coll.aggregate(projectPipe).toArray();
- // Then assert they are the same.
- assert(arrayEq(addFieldsResult, correct),
- "$addFields does not work the same as a $project with computed and included fields");
- }
+ // Then compute the same results using $addFields.
+ let addFieldsPipe = [{
+ $addFields: {
+ "6i^2": {"$multiply": ["$2i", "$3i"]},
+ // All other fields are implicitly included.
+ }
+ }];
+ let addFieldsResult = coll.aggregate(addFieldsPipe).toArray();
- // Test against the standalone started by resmoke.py.
- let conn = db.getMongo();
- doExecutionTest(conn);
- print("Success! Standalone execution use case test for $addFields passed.");
+ // Then assert they are the same.
+ assert(arrayEq(addFieldsResult, correct),
+ "$addFields does not work the same as a $project with computed and included fields");
+}
- // Test against a sharded cluster.
- let st = new ShardingTest({shards: 2});
- doExecutionTest(st.s0);
- st.stop();
- print("Success! Sharding use case test for $addFields passed.");
+// Test against the standalone started by resmoke.py.
+let conn = db.getMongo();
+doExecutionTest(conn);
+print("Success! Standalone execution use case test for $addFields passed.");
+// Test against a sharded cluster.
+let st = new ShardingTest({shards: 2});
+doExecutionTest(st.s0);
+st.stop();
+print("Success! Sharding use case test for $addFields passed.");
}());
diff --git a/jstests/aggregation/sources/addFields/weather.js b/jstests/aggregation/sources/addFields/weather.js
index 79916ee13d6..16e570b843c 100644
--- a/jstests/aggregation/sources/addFields/weather.js
+++ b/jstests/aggregation/sources/addFields/weather.js
@@ -9,97 +9,96 @@
*/
(function() {
- "use strict";
+"use strict";
- // For arrayEq.
- load("jstests/aggregation/extras/utils.js");
+// For arrayEq.
+load("jstests/aggregation/extras/utils.js");
- const dbName = "test";
- const collName = jsTest.name();
+const dbName = "test";
+const collName = jsTest.name();
- Random.setRandomSeed();
+Random.setRandomSeed();
- /**
- * Helper to generate an array of specified length of numbers in the specified range.
- */
- function randomArray(length, minValue, maxValue) {
- let array = [];
- for (let i = 0; i < length; i++) {
- array.push((Random.rand() * (maxValue - minValue)) + minValue);
- }
- return array;
+/**
+ * Helper to generate an array of specified length of numbers in the specified range.
+ */
+function randomArray(length, minValue, maxValue) {
+ let array = [];
+ for (let i = 0; i < length; i++) {
+ array.push((Random.rand() * (maxValue - minValue)) + minValue);
}
+ return array;
+}
- /**
- * Helper to generate a randomized document with the following schema:
- * {
- * month: <integer month of year>,
- * day: <integer day of month>,
- * temperatures: <array of 24 decimal temperatures>
- * }
- */
- function generateRandomDocument() {
- const minTemp = -40;
- const maxTemp = 120;
-
- return {
- month: Random.randInt(12) + 1, // 1-12
- day: Random.randInt(31) + 1, // 1-31
- temperatures: randomArray(24, minTemp, maxTemp),
- };
- }
+/**
+ * Helper to generate a randomized document with the following schema:
+ * {
+ * month: <integer month of year>,
+ * day: <integer day of month>,
+ * temperatures: <array of 24 decimal temperatures>
+ * }
+ */
+function generateRandomDocument() {
+ const minTemp = -40;
+ const maxTemp = 120;
- function doExecutionTest(conn) {
- const coll = conn.getDB(dbName).getCollection(collName);
- coll.drop();
+ return {
+ month: Random.randInt(12) + 1, // 1-12
+ day: Random.randInt(31) + 1, // 1-31
+ temperatures: randomArray(24, minTemp, maxTemp),
+ };
+}
- // Insert a bunch of documents of the form above.
- const nDocs = 10;
- for (let i = 0; i < nDocs; i++) {
- assert.writeOK(coll.insert(generateRandomDocument()));
- }
+function doExecutionTest(conn) {
+ const coll = conn.getDB(dbName).getCollection(collName);
+ coll.drop();
- // Add the minimum, maximum, and average temperatures, and make sure that doing the same
- // with addFields yields the correct answer.
- // First compute with $project, since we know all the fields in this document.
- let projectWeatherPipe = [{
- $project: {
- "month": 1,
- "day": 1,
- "temperatures": 1,
- "minTemp": {"$min": "$temperatures"},
- "maxTemp": {"$max": "$temperatures"},
- "average": {"$avg": "$temperatures"},
- // _id is implicitly included.
- }
- }];
- let correctWeather = coll.aggregate(projectWeatherPipe).toArray();
+ // Insert a bunch of documents of the form above.
+ const nDocs = 10;
+ for (let i = 0; i < nDocs; i++) {
+ assert.writeOK(coll.insert(generateRandomDocument()));
+ }
- // Then compute the same results using $addFields.
- let addFieldsWeatherPipe = [{
- $addFields: {
- "minTemp": {"$min": "$temperatures"},
- "maxTemp": {"$max": "$temperatures"},
- "average": {"$avg": "$temperatures"},
- // All other fields are implicitly included.
- }
- }];
- let addFieldsResult = coll.aggregate(addFieldsWeatherPipe).toArray();
+ // Add the minimum, maximum, and average temperatures, and make sure that doing the same
+ // with addFields yields the correct answer.
+ // First compute with $project, since we know all the fields in this document.
+ let projectWeatherPipe = [{
+ $project: {
+ "month": 1,
+ "day": 1,
+ "temperatures": 1,
+ "minTemp": {"$min": "$temperatures"},
+ "maxTemp": {"$max": "$temperatures"},
+ "average": {"$avg": "$temperatures"},
+ // _id is implicitly included.
+ }
+ }];
+ let correctWeather = coll.aggregate(projectWeatherPipe).toArray();
- // Then assert they are the same.
- assert(arrayEq(addFieldsResult, correctWeather),
- "$addFields does not work the same as a $project with computed and included fields");
- }
+ // Then compute the same results using $addFields.
+ let addFieldsWeatherPipe = [{
+ $addFields: {
+ "minTemp": {"$min": "$temperatures"},
+ "maxTemp": {"$max": "$temperatures"},
+ "average": {"$avg": "$temperatures"},
+ // All other fields are implicitly included.
+ }
+ }];
+ let addFieldsResult = coll.aggregate(addFieldsWeatherPipe).toArray();
- // Test against the standalone started by resmoke.py.
- let conn = db.getMongo();
- doExecutionTest(conn);
- print("Success! Standalone execution weather test for $addFields passed.");
+ // Then assert they are the same.
+ assert(arrayEq(addFieldsResult, correctWeather),
+ "$addFields does not work the same as a $project with computed and included fields");
+}
- // Test against a sharded cluster.
- let st = new ShardingTest({shards: 2});
- doExecutionTest(st.s0);
- st.stop();
- print("Success! Sharding weather test for $addFields passed.");
+// Test against the standalone started by resmoke.py.
+let conn = db.getMongo();
+doExecutionTest(conn);
+print("Success! Standalone execution weather test for $addFields passed.");
+// Test against a sharded cluster.
+let st = new ShardingTest({shards: 2});
+doExecutionTest(st.s0);
+st.stop();
+print("Success! Sharding weather test for $addFields passed.");
}());
diff --git a/jstests/aggregation/sources/bucket/collation_bucket.js b/jstests/aggregation/sources/bucket/collation_bucket.js
index 45f15402499..617bf8085f2 100644
--- a/jstests/aggregation/sources/bucket/collation_bucket.js
+++ b/jstests/aggregation/sources/bucket/collation_bucket.js
@@ -3,102 +3,100 @@
// Test that the $bucket stage defines and sorts buckets according to the collation.
(function() {
- "use strict";
+"use strict";
- var results;
- const numericOrdering = {collation: {locale: "en_US", numericOrdering: true}};
+var results;
+const numericOrdering = {
+ collation: {locale: "en_US", numericOrdering: true}
+};
- var coll = db.collation_bucket;
- coll.drop();
+var coll = db.collation_bucket;
+coll.drop();
- function insertData() {
- assert.writeOK(coll.insert({num: "1"}));
- assert.writeOK(coll.insert({num: "2"}));
- assert.writeOK(coll.insert({num: "5"}));
- assert.writeOK(coll.insert({num: "10"}));
- assert.writeOK(coll.insert({num: "20"}));
- assert.writeOK(coll.insert({num: "50"}));
- assert.writeOK(coll.insert({num: "100"}));
- assert.writeOK(coll.insert({num: "200"}));
- assert.writeOK(coll.insert({num: "500"}));
- }
+function insertData() {
+ assert.writeOK(coll.insert({num: "1"}));
+ assert.writeOK(coll.insert({num: "2"}));
+ assert.writeOK(coll.insert({num: "5"}));
+ assert.writeOK(coll.insert({num: "10"}));
+ assert.writeOK(coll.insert({num: "20"}));
+ assert.writeOK(coll.insert({num: "50"}));
+ assert.writeOK(coll.insert({num: "100"}));
+ assert.writeOK(coll.insert({num: "200"}));
+ assert.writeOK(coll.insert({num: "500"}));
+}
- insertData();
+insertData();
- // Test that $bucket respects an explicit collation.
- results = coll.aggregate([{$bucket: {groupBy: "$num", boundaries: ["1", "10", "100", "1000"]}}],
- numericOrdering)
- .toArray();
- assert.eq(3, results.length);
- assert.eq({_id: "1", count: 3}, results[0]);
- assert.eq({_id: "10", count: 3}, results[1]);
- assert.eq({_id: "100", count: 3}, results[2]);
+// Test that $bucket respects an explicit collation.
+results = coll.aggregate([{$bucket: {groupBy: "$num", boundaries: ["1", "10", "100", "1000"]}}],
+ numericOrdering)
+ .toArray();
+assert.eq(3, results.length);
+assert.eq({_id: "1", count: 3}, results[0]);
+assert.eq({_id: "10", count: 3}, results[1]);
+assert.eq({_id: "100", count: 3}, results[2]);
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
- insertData();
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
+insertData();
- // Test that $bucket respects the inherited collation.
- results = coll.aggregate([{$bucket: {groupBy: "$num", boundaries: ["1", "10", "100", "1000"]}}])
- .toArray();
- assert.eq(3, results.length);
- assert.eq({_id: "1", count: 3}, results[0]);
- assert.eq({_id: "10", count: 3}, results[1]);
- assert.eq({_id: "100", count: 3}, results[2]);
+// Test that $bucket respects the inherited collation.
+results = coll.aggregate([{$bucket: {groupBy: "$num", boundaries: ["1", "10", "100", "1000"]}}])
+ .toArray();
+assert.eq(3, results.length);
+assert.eq({_id: "1", count: 3}, results[0]);
+assert.eq({_id: "10", count: 3}, results[1]);
+assert.eq({_id: "100", count: 3}, results[2]);
- // Test that the collection default can be overridden with the simple collation. In this case,
- // the $bucket should fail, because under a lexicographical comparison strings like "2" or "5"
- // won't fall into any of the buckets.
- assert.throws(
- () => coll.aggregate([{$bucket: {groupBy: "$num", boundaries: ["1", "10", "100", "1000"]}}],
- {collation: {locale: "simple"}}));
+// Test that the collection default can be overridden with the simple collation. In this case,
+// the $bucket should fail, because under a lexicographical comparison strings like "2" or "5"
+// won't fall into any of the buckets.
+assert.throws(
+ () => coll.aggregate([{$bucket: {groupBy: "$num", boundaries: ["1", "10", "100", "1000"]}}],
+ {collation: {locale: "simple"}}));
- // Test that $bucket rejects boundaries that are not sorted according to the collation.
- assert.throws(
- () => coll.aggregate([{$bucket: {groupBy: "$num", boundaries: ["100", "20", "4"]}}]));
+// Test that $bucket rejects boundaries that are not sorted according to the collation.
+assert.throws(() => coll.aggregate([{$bucket: {groupBy: "$num", boundaries: ["100", "20", "4"]}}]));
- assert.throws(() =>
- coll.aggregate([{$bucket: {groupBy: "$num", boundaries: ["4", "20", "100"]}}],
- {collation: {locale: "simple"}}));
+assert.throws(() => coll.aggregate([{$bucket: {groupBy: "$num", boundaries: ["4", "20", "100"]}}],
+ {collation: {locale: "simple"}}));
- // Test that $bucket rejects a default value that falls within the boundaries.
- assert.throws(
- () => coll.aggregate(
- [{$bucket: {groupBy: "$num", boundaries: ["1", "10", "100"], default: "40"}}]));
+// Test that $bucket rejects a default value that falls within the boundaries.
+assert.throws(() => coll.aggregate(
+ [{$bucket: {groupBy: "$num", boundaries: ["1", "10", "100"], default: "40"}}]));
- assert.throws(() => coll.aggregate(
- [{$bucket: {groupBy: "$num", boundaries: ["100", "999"], default: "2"}}],
- {collation: {locale: "simple"}}));
+assert.throws(
+ () => coll.aggregate([{$bucket: {groupBy: "$num", boundaries: ["100", "999"], default: "2"}}],
+ {collation: {locale: "simple"}}));
- // Test that $bucket accepts a default value that falls outside the boundaries according to the
- // collation.
- results =
- coll.aggregate([{
+// Test that $bucket accepts a default value that falls outside the boundaries according to the
+// collation.
+results = coll.aggregate([{
+ $bucket: {
+ groupBy: "$num",
+ boundaries: ["100", "999"],
+ default: "2" // Would fall between 100 and 999 if using the simple collation.
+ }
+ }])
+ .toArray();
+assert.eq(2, results.length);
+assert.eq({_id: "2", count: 6}, results[0]);
+assert.eq({_id: "100", count: 3}, results[1]); // "100", "200", and "500".
+
+results =
+ coll.aggregate(
+ [{
$bucket: {
groupBy: "$num",
- boundaries: ["100", "999"],
- default: "2" // Would fall between 100 and 999 if using the simple collation.
+ boundaries: ["1", "19999"], // Will include all numbers that start with "1"
+ default: "2" // Would fall between boundaries if using the
+ // collection-default collation with numeric
+ // ordering.
}
- }])
- .toArray();
- assert.eq(2, results.length);
- assert.eq({_id: "2", count: 6}, results[0]);
- assert.eq({_id: "100", count: 3}, results[1]); // "100", "200", and "500".
-
- results =
- coll.aggregate(
- [{
- $bucket: {
- groupBy: "$num",
- boundaries: ["1", "19999"], // Will include all numbers that start with "1"
- default: "2" // Would fall between boundaries if using the
- // collection-default collation with numeric
- // ordering.
- }
- }],
- {collation: {locale: "simple"}})
- .toArray();
- assert.eq(2, results.length);
- assert.eq({_id: "1", count: 3}, results[0]); // "1", "10", and "100".
- assert.eq({_id: "2", count: 6}, results[1]);
+ }],
+ {collation: {locale: "simple"}})
+ .toArray();
+assert.eq(2, results.length);
+assert.eq({_id: "1", count: 3}, results[0]); // "1", "10", and "100".
+assert.eq({_id: "2", count: 6}, results[1]);
})();
diff --git a/jstests/aggregation/sources/bucketauto/collation_bucketauto.js b/jstests/aggregation/sources/bucketauto/collation_bucketauto.js
index 4aafa6ea511..26b48951ab7 100644
--- a/jstests/aggregation/sources/bucketauto/collation_bucketauto.js
+++ b/jstests/aggregation/sources/bucketauto/collation_bucketauto.js
@@ -3,55 +3,56 @@
// Test that the $bucketAuto stage defines and sorts buckets according to the collation.
(function() {
- "use strict";
-
- var results;
- const numericOrdering = {collation: {locale: "en_US", numericOrdering: true}};
-
- var coll = db.collation_bucket;
- coll.drop();
-
- function insertData() {
- assert.writeOK(coll.insert({num: "1"}));
- assert.writeOK(coll.insert({num: "2"}));
- assert.writeOK(coll.insert({num: "5"}));
- assert.writeOK(coll.insert({num: "10"}));
- assert.writeOK(coll.insert({num: "20"}));
- assert.writeOK(coll.insert({num: "50"}));
- assert.writeOK(coll.insert({num: "100"}));
- assert.writeOK(coll.insert({num: "200"}));
- assert.writeOK(coll.insert({num: "500"}));
- }
-
- insertData();
-
- // Test that $bucketAuto respects an explicit collation.
- results =
- coll.aggregate([{$bucketAuto: {groupBy: "$num", buckets: 3}}], numericOrdering).toArray();
- assert.eq(3, results.length);
- assert.eq({_id: {min: "1", max: "10"}, count: 3}, results[0]);
- assert.eq({_id: {min: "10", max: "100"}, count: 3}, results[1]);
- assert.eq({_id: {min: "100", max: "500"}, count: 3}, results[2]);
-
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
- insertData();
-
- // Test that $bucketAuto respects the inherited collation.
- results = coll.aggregate([{$bucketAuto: {groupBy: "$num", buckets: 3}}]).toArray();
- assert.eq(3, results.length);
- assert.eq({_id: {min: "1", max: "10"}, count: 3}, results[0]);
- assert.eq({_id: {min: "10", max: "100"}, count: 3}, results[1]);
- assert.eq({_id: {min: "100", max: "500"}, count: 3}, results[2]);
-
- // Test that the collection default can be overridden with the simple collation. In this case,
- // the numbers will be sorted in lexicographical order, so the 3 buckets will be:
- // ["1", "10","100"], ["2", "20", "200"], and ["5", "50", "500"]
- results = coll.aggregate([{$bucketAuto: {groupBy: "$num", buckets: 3}}],
- {collation: {locale: "simple"}})
- .toArray();
- assert.eq(3, results.length);
- assert.eq({_id: {min: "1", max: "2"}, count: 3}, results[0]);
- assert.eq({_id: {min: "2", max: "5"}, count: 3}, results[1]);
- assert.eq({_id: {min: "5", max: "500"}, count: 3}, results[2]);
+"use strict";
+
+var results;
+const numericOrdering = {
+ collation: {locale: "en_US", numericOrdering: true}
+};
+
+var coll = db.collation_bucket;
+coll.drop();
+
+function insertData() {
+ assert.writeOK(coll.insert({num: "1"}));
+ assert.writeOK(coll.insert({num: "2"}));
+ assert.writeOK(coll.insert({num: "5"}));
+ assert.writeOK(coll.insert({num: "10"}));
+ assert.writeOK(coll.insert({num: "20"}));
+ assert.writeOK(coll.insert({num: "50"}));
+ assert.writeOK(coll.insert({num: "100"}));
+ assert.writeOK(coll.insert({num: "200"}));
+ assert.writeOK(coll.insert({num: "500"}));
+}
+
+insertData();
+
+// Test that $bucketAuto respects an explicit collation.
+results = coll.aggregate([{$bucketAuto: {groupBy: "$num", buckets: 3}}], numericOrdering).toArray();
+assert.eq(3, results.length);
+assert.eq({_id: {min: "1", max: "10"}, count: 3}, results[0]);
+assert.eq({_id: {min: "10", max: "100"}, count: 3}, results[1]);
+assert.eq({_id: {min: "100", max: "500"}, count: 3}, results[2]);
+
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
+insertData();
+
+// Test that $bucketAuto respects the inherited collation.
+results = coll.aggregate([{$bucketAuto: {groupBy: "$num", buckets: 3}}]).toArray();
+assert.eq(3, results.length);
+assert.eq({_id: {min: "1", max: "10"}, count: 3}, results[0]);
+assert.eq({_id: {min: "10", max: "100"}, count: 3}, results[1]);
+assert.eq({_id: {min: "100", max: "500"}, count: 3}, results[2]);
+
+// Test that the collection default can be overridden with the simple collation. In this case,
+// the numbers will be sorted in lexicographical order, so the 3 buckets will be:
+// ["1", "10","100"], ["2", "20", "200"], and ["5", "50", "500"]
+results =
+ coll.aggregate([{$bucketAuto: {groupBy: "$num", buckets: 3}}], {collation: {locale: "simple"}})
+ .toArray();
+assert.eq(3, results.length);
+assert.eq({_id: {min: "1", max: "2"}, count: 3}, results[0]);
+assert.eq({_id: {min: "2", max: "5"}, count: 3}, results[1]);
+assert.eq({_id: {min: "5", max: "500"}, count: 3}, results[2]);
})();
diff --git a/jstests/aggregation/sources/collStats/count.js b/jstests/aggregation/sources/collStats/count.js
index 265805408a3..5eb96cd7146 100644
--- a/jstests/aggregation/sources/collStats/count.js
+++ b/jstests/aggregation/sources/collStats/count.js
@@ -1,71 +1,71 @@
// Test that count within a $collStats stage returns the correct number of documents.
// @tags: [assumes_no_implicit_collection_creation_after_drop]
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
- load("jstests/libs/fixture_helpers.js"); // For "FixtureHelpers".
+load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
+load("jstests/libs/fixture_helpers.js"); // For "FixtureHelpers".
- let testDB = db.getSiblingDB("aggregation_count_db");
- let coll = testDB.aggregation_count;
- coll.drop();
+let testDB = db.getSiblingDB("aggregation_count_db");
+let coll = testDB.aggregation_count;
+coll.drop();
- let nDocs = 1000;
- for (var i = 0; i < nDocs; i++) {
- assert.writeOK(coll.insert({a: i}));
- }
+let nDocs = 1000;
+for (var i = 0; i < nDocs; i++) {
+ assert.writeOK(coll.insert({a: i}));
+}
- // Test that $collStats must be first stage.
- let pipeline = [{$match: {}}, {$collStats: {}}];
- assertErrorCode(coll, pipeline, 40602);
+// Test that $collStats must be first stage.
+let pipeline = [{$match: {}}, {$collStats: {}}];
+assertErrorCode(coll, pipeline, 40602);
- // Test that an error is returned if count is not an object.
- pipeline = [{$collStats: {count: 1}}];
- assertErrorCode(coll, pipeline, 40480, "count spec must be an object");
- pipeline = [{$collStats: {count: "1"}}];
- assertErrorCode(coll, pipeline, 40480, "count spec must be an object");
+// Test that an error is returned if count is not an object.
+pipeline = [{$collStats: {count: 1}}];
+assertErrorCode(coll, pipeline, 40480, "count spec must be an object");
+pipeline = [{$collStats: {count: "1"}}];
+assertErrorCode(coll, pipeline, 40480, "count spec must be an object");
- // Test the accuracy of the record count as a standalone option.
- pipeline = [{$collStats: {count: {}}}];
- let result = coll.aggregate(pipeline).next();
- assert.eq(nDocs, result.count);
+// Test the accuracy of the record count as a standalone option.
+pipeline = [{$collStats: {count: {}}}];
+let result = coll.aggregate(pipeline).next();
+assert.eq(nDocs, result.count);
- // Test the record count alongside latencyStats and storageStats.
- pipeline = [{$collStats: {count: {}, latencyStats: {}}}];
- result = coll.aggregate(pipeline).next();
- assert.eq(nDocs, result.count);
- assert(result.hasOwnProperty("latencyStats"));
- assert(result.latencyStats.hasOwnProperty("reads"));
- assert(result.latencyStats.hasOwnProperty("writes"));
- assert(result.latencyStats.hasOwnProperty("commands"));
+// Test the record count alongside latencyStats and storageStats.
+pipeline = [{$collStats: {count: {}, latencyStats: {}}}];
+result = coll.aggregate(pipeline).next();
+assert.eq(nDocs, result.count);
+assert(result.hasOwnProperty("latencyStats"));
+assert(result.latencyStats.hasOwnProperty("reads"));
+assert(result.latencyStats.hasOwnProperty("writes"));
+assert(result.latencyStats.hasOwnProperty("commands"));
- pipeline = [{$collStats: {count: {}, latencyStats: {}, storageStats: {}}}];
- result = coll.aggregate(pipeline).next();
- assert.eq(nDocs, result.count);
- assert(result.hasOwnProperty("latencyStats"));
- assert(result.latencyStats.hasOwnProperty("reads"));
- assert(result.latencyStats.hasOwnProperty("writes"));
- assert(result.latencyStats.hasOwnProperty("commands"));
- assert(result.hasOwnProperty("storageStats"));
- assert.eq(nDocs, result.storageStats.count);
+pipeline = [{$collStats: {count: {}, latencyStats: {}, storageStats: {}}}];
+result = coll.aggregate(pipeline).next();
+assert.eq(nDocs, result.count);
+assert(result.hasOwnProperty("latencyStats"));
+assert(result.latencyStats.hasOwnProperty("reads"));
+assert(result.latencyStats.hasOwnProperty("writes"));
+assert(result.latencyStats.hasOwnProperty("commands"));
+assert(result.hasOwnProperty("storageStats"));
+assert.eq(nDocs, result.storageStats.count);
- // Test the record count against an empty collection.
- assert.writeOK(coll.remove({}));
- pipeline = [{$collStats: {count: {}}}];
- result = coll.aggregate(pipeline).next();
- assert.eq(0, result.count);
+// Test the record count against an empty collection.
+assert.writeOK(coll.remove({}));
+pipeline = [{$collStats: {count: {}}}];
+result = coll.aggregate(pipeline).next();
+assert.eq(0, result.count);
- // Test that we error when the collection does not exist.
- coll.drop();
- assertErrorCode(coll, pipeline, 40481);
+// Test that we error when the collection does not exist.
+coll.drop();
+assertErrorCode(coll, pipeline, 40481);
- // Test that we error when the database does not exist.
- // TODO SERVER-33039 When running against a mongos, a non-existent database will cause all
- // aggregations to return an empty result set.
- assert.commandWorked(testDB.dropDatabase());
- if (FixtureHelpers.isMongos(testDB)) {
- assert.eq([], coll.aggregate(pipeline).toArray());
- } else {
- assertErrorCode(coll, pipeline, 40481);
- }
+// Test that we error when the database does not exist.
+// TODO SERVER-33039 When running against a mongos, a non-existent database will cause all
+// aggregations to return an empty result set.
+assert.commandWorked(testDB.dropDatabase());
+if (FixtureHelpers.isMongos(testDB)) {
+ assert.eq([], coll.aggregate(pipeline).toArray());
+} else {
+ assertErrorCode(coll, pipeline, 40481);
+}
}());
diff --git a/jstests/aggregation/sources/collStats/shard_host_info.js b/jstests/aggregation/sources/collStats/shard_host_info.js
index 34e1d8a195e..ced3f9bb47f 100644
--- a/jstests/aggregation/sources/collStats/shard_host_info.js
+++ b/jstests/aggregation/sources/collStats/shard_host_info.js
@@ -7,52 +7,52 @@
* ]
*/
(function() {
- "use strict";
-
- // Test mongoD behaviour using the standalone started by resmoke.py.
- let testDB = db.getSiblingDB(jsTestName());
- let testColl = testDB.test;
-
- // getHostName() doesn't include port, db.getMongo().host is 127.0.0.1:<port>
- const hostName = (getHostName() + ":" + db.getMongo().host.split(":")[1]);
-
- // Test that the shard field is absent and the host field is present when run on mongoD.
- assert.eq(testColl
- .aggregate([
- {$collStats: {latencyStats: {histograms: true}}},
- {$group: {_id: {shard: "$shard", host: "$host"}}}
- ])
- .toArray(),
- [{_id: {host: hostName}}]);
-
- // Test that both shard and hostname are present for $collStats results on a sharded cluster.
- const st = new ShardingTest({name: jsTestName(), shards: 2});
-
- testDB = st.s.getDB(jsTestName());
- testColl = testDB.test;
-
- assert.commandWorked(testDB.dropDatabase());
-
- // Enable sharding on the test database.
- assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
-
- // Shard 'testColl' on {_id: 'hashed'}. This will automatically presplit the collection and
- // place chunks on each shard.
- assert.commandWorked(
- testDB.adminCommand({shardCollection: testColl.getFullName(), key: {_id: "hashed"}}));
-
- // Group $collStats result by $shard and $host to confirm that both fields are present.
- assert.eq(testColl
- .aggregate([
- {$collStats: {latencyStats: {histograms: true}}},
- {$group: {_id: {shard: "$shard", host: "$host"}}},
- {$sort: {_id: 1}}
- ])
- .toArray(),
- [
- {_id: {shard: st.shard0.shardName, host: st.rs0.getPrimary().host}},
- {_id: {shard: st.shard1.shardName, host: st.rs1.getPrimary().host}},
- ]);
-
- st.stop();
+"use strict";
+
+// Test mongoD behaviour using the standalone started by resmoke.py.
+let testDB = db.getSiblingDB(jsTestName());
+let testColl = testDB.test;
+
+// getHostName() doesn't include port, db.getMongo().host is 127.0.0.1:<port>
+const hostName = (getHostName() + ":" + db.getMongo().host.split(":")[1]);
+
+// Test that the shard field is absent and the host field is present when run on mongoD.
+assert.eq(testColl
+ .aggregate([
+ {$collStats: {latencyStats: {histograms: true}}},
+ {$group: {_id: {shard: "$shard", host: "$host"}}}
+ ])
+ .toArray(),
+ [{_id: {host: hostName}}]);
+
+// Test that both shard and hostname are present for $collStats results on a sharded cluster.
+const st = new ShardingTest({name: jsTestName(), shards: 2});
+
+testDB = st.s.getDB(jsTestName());
+testColl = testDB.test;
+
+assert.commandWorked(testDB.dropDatabase());
+
+// Enable sharding on the test database.
+assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
+
+// Shard 'testColl' on {_id: 'hashed'}. This will automatically presplit the collection and
+// place chunks on each shard.
+assert.commandWorked(
+ testDB.adminCommand({shardCollection: testColl.getFullName(), key: {_id: "hashed"}}));
+
+// Group $collStats result by $shard and $host to confirm that both fields are present.
+assert.eq(testColl
+ .aggregate([
+ {$collStats: {latencyStats: {histograms: true}}},
+ {$group: {_id: {shard: "$shard", host: "$host"}}},
+ {$sort: {_id: 1}}
+ ])
+ .toArray(),
+ [
+ {_id: {shard: st.shard0.shardName, host: st.rs0.getPrimary().host}},
+ {_id: {shard: st.shard1.shardName, host: st.rs1.getPrimary().host}},
+ ]);
+
+st.stop();
})();
diff --git a/jstests/aggregation/sources/facet/inner_graphlookup.js b/jstests/aggregation/sources/facet/inner_graphlookup.js
index 340853f7721..9631b8878ef 100644
--- a/jstests/aggregation/sources/facet/inner_graphlookup.js
+++ b/jstests/aggregation/sources/facet/inner_graphlookup.js
@@ -7,21 +7,21 @@
* using the $graphLookup stage outside of the $facet stage.
*/
(function() {
- "use strict";
+"use strict";
- // We will only use one collection, the $graphLookup will look up from the same collection.
- var graphColl = db.facetGraphLookup;
+// We will only use one collection, the $graphLookup will look up from the same collection.
+var graphColl = db.facetGraphLookup;
- // The graph in ASCII form: 0 --- 1 --- 2 3
- graphColl.drop();
- assert.writeOK(graphColl.insert({_id: 0, edges: [1]}));
- assert.writeOK(graphColl.insert({_id: 1, edges: [0, 2]}));
- assert.writeOK(graphColl.insert({_id: 2, edges: [1]}));
- assert.writeOK(graphColl.insert({_id: 3}));
+// The graph in ASCII form: 0 --- 1 --- 2 3
+graphColl.drop();
+assert.writeOK(graphColl.insert({_id: 0, edges: [1]}));
+assert.writeOK(graphColl.insert({_id: 1, edges: [0, 2]}));
+assert.writeOK(graphColl.insert({_id: 2, edges: [1]}));
+assert.writeOK(graphColl.insert({_id: 3}));
- // For each document in the collection, this will compute all the other documents that are
- // reachable from this one.
- const graphLookupStage = {
+// For each document in the collection, this will compute all the other documents that are
+// reachable from this one.
+const graphLookupStage = {
$graphLookup: {
from: graphColl.getName(),
startWith: "$_id",
@@ -31,20 +31,24 @@
}
};
- const projectStage = {$project: {_id: 1, edges: 1, connected_length: {$size: "$connected"}}};
-
- const normalResults = graphColl.aggregate([graphLookupStage, projectStage]).toArray();
- const facetedResults =
- graphColl.aggregate([{$facet: {nested: [graphLookupStage, projectStage]}}]).toArray();
- assert.eq(facetedResults, [{nested: normalResults}]);
-
- const sortStage = {$sort: {_id: 1, "connected._id": 1}};
-
- const normalResultsUnwound =
- graphColl.aggregate([graphLookupStage, {$unwind: "$connected"}, sortStage]).toArray();
- const facetedResultsUnwound =
- graphColl
- .aggregate([{$facet: {nested: [graphLookupStage, {$unwind: "$connected"}, sortStage]}}])
- .toArray();
- assert.eq(facetedResultsUnwound, [{nested: normalResultsUnwound}]);
+const projectStage = {
+ $project: {_id: 1, edges: 1, connected_length: {$size: "$connected"}}
+};
+
+const normalResults = graphColl.aggregate([graphLookupStage, projectStage]).toArray();
+const facetedResults =
+ graphColl.aggregate([{$facet: {nested: [graphLookupStage, projectStage]}}]).toArray();
+assert.eq(facetedResults, [{nested: normalResults}]);
+
+const sortStage = {
+ $sort: {_id: 1, "connected._id": 1}
+};
+
+const normalResultsUnwound =
+ graphColl.aggregate([graphLookupStage, {$unwind: "$connected"}, sortStage]).toArray();
+const facetedResultsUnwound =
+ graphColl
+ .aggregate([{$facet: {nested: [graphLookupStage, {$unwind: "$connected"}, sortStage]}}])
+ .toArray();
+assert.eq(facetedResultsUnwound, [{nested: normalResultsUnwound}]);
}());
diff --git a/jstests/aggregation/sources/facet/inner_lookup.js b/jstests/aggregation/sources/facet/inner_lookup.js
index 39f1b53f88f..0852f820869 100644
--- a/jstests/aggregation/sources/facet/inner_lookup.js
+++ b/jstests/aggregation/sources/facet/inner_lookup.js
@@ -7,41 +7,36 @@
* the $lookup stage outside of the $facet stage.
*/
(function() {
- "use strict";
+"use strict";
- var local = db.facetLookupLocal;
- var foreign = db.facetLookupForeign;
+var local = db.facetLookupLocal;
+var foreign = db.facetLookupForeign;
- local.drop();
- assert.writeOK(local.insert({_id: 0}));
- assert.writeOK(local.insert({_id: 1}));
+local.drop();
+assert.writeOK(local.insert({_id: 0}));
+assert.writeOK(local.insert({_id: 1}));
- foreign.drop();
- assert.writeOK(foreign.insert({_id: 0, foreignKey: 0}));
- assert.writeOK(foreign.insert({_id: 1, foreignKey: 1}));
- assert.writeOK(foreign.insert({_id: 2, foreignKey: 2}));
+foreign.drop();
+assert.writeOK(foreign.insert({_id: 0, foreignKey: 0}));
+assert.writeOK(foreign.insert({_id: 1, foreignKey: 1}));
+assert.writeOK(foreign.insert({_id: 2, foreignKey: 2}));
- function runTest(lookupStage) {
- const lookupResults = local.aggregate([lookupStage]).toArray();
- const facetedLookupResults = local.aggregate([{$facet: {nested: [lookupStage]}}]).toArray();
- assert.eq(facetedLookupResults, [{nested: lookupResults}]);
+function runTest(lookupStage) {
+ const lookupResults = local.aggregate([lookupStage]).toArray();
+ const facetedLookupResults = local.aggregate([{$facet: {nested: [lookupStage]}}]).toArray();
+ assert.eq(facetedLookupResults, [{nested: lookupResults}]);
- const lookupResultsUnwound = local.aggregate([lookupStage, {$unwind: "$joined"}]).toArray();
- const facetedLookupResultsUnwound =
- local.aggregate([{$facet: {nested: [lookupStage, {$unwind: "$joined"}]}}]).toArray();
- assert.eq(facetedLookupResultsUnwound, [{nested: lookupResultsUnwound}]);
- }
+ const lookupResultsUnwound = local.aggregate([lookupStage, {$unwind: "$joined"}]).toArray();
+ const facetedLookupResultsUnwound =
+ local.aggregate([{$facet: {nested: [lookupStage, {$unwind: "$joined"}]}}]).toArray();
+ assert.eq(facetedLookupResultsUnwound, [{nested: lookupResultsUnwound}]);
+}
- runTest({
- $lookup: {
- from: foreign.getName(),
- localField: "_id",
- foreignField: "foreignKey",
- as: "joined"
- }
- });
+runTest({
+ $lookup: {from: foreign.getName(), localField: "_id", foreignField: "foreignKey", as: "joined"}
+});
- runTest({
+runTest({
$lookup: {
from: foreign.getName(),
let : {id1: "$_id"},
diff --git a/jstests/aggregation/sources/facet/use_cases.js b/jstests/aggregation/sources/facet/use_cases.js
index a6f1def408e..83f5d58d4d8 100644
--- a/jstests/aggregation/sources/facet/use_cases.js
+++ b/jstests/aggregation/sources/facet/use_cases.js
@@ -6,168 +6,164 @@
* ]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
- load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
+load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
+load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
- const dbName = "test";
- const collName = jsTest.name();
- const testNs = dbName + "." + collName;
+const dbName = "test";
+const collName = jsTest.name();
+const testNs = dbName + "." + collName;
- Random.setRandomSeed();
+Random.setRandomSeed();
- /**
- * Helper to get a random entry out of an array.
- */
- function randomChoice(array) {
- return array[Random.randInt(array.length)];
- }
-
- /**
- * Helper to generate a randomized document with the following schema:
- * {
- * manufacturer: <string>,
- * price: <double>,
- * screenSize: <double>
- * }
- */
- function generateRandomDocument(docId) {
- const manufacturers =
- ["Sony", "Samsung", "LG", "Panasonic", "Mitsubishi", "Vizio", "Toshiba", "Sharp"];
- const minPrice = 100;
- const maxPrice = 4000;
- const minScreenSize = 18;
- const maxScreenSize = 40;
-
- return {
- _id: docId,
- manufacturer: randomChoice(manufacturers),
- price: Random.randInt(maxPrice - minPrice + 1) + minPrice,
- screenSize: Random.randInt(maxScreenSize - minScreenSize + 1) + minScreenSize,
- };
- }
+/**
+ * Helper to get a random entry out of an array.
+ */
+function randomChoice(array) {
+ return array[Random.randInt(array.length)];
+}
- /**
- * Inserts 'nDocs' documents into collection given by 'dbName' and 'collName'. Documents will
- * have _ids in the range [0, nDocs).
- */
- function populateData(conn, nDocs) {
- var coll = conn.getDB(dbName).getCollection(collName);
- coll.remove({}); // Don't drop the collection, since it might be sharded.
-
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < nDocs; i++) {
- const doc = generateRandomDocument(i);
- bulk.insert(doc);
- }
- assert.writeOK(bulk.execute());
- }
+/**
+ * Helper to generate a randomized document with the following schema:
+ * {
+ * manufacturer: <string>,
+ * price: <double>,
+ * screenSize: <double>
+ * }
+ */
+function generateRandomDocument(docId) {
+ const manufacturers =
+ ["Sony", "Samsung", "LG", "Panasonic", "Mitsubishi", "Vizio", "Toshiba", "Sharp"];
+ const minPrice = 100;
+ const maxPrice = 4000;
+ const minScreenSize = 18;
+ const maxScreenSize = 40;
+
+ return {
+ _id: docId,
+ manufacturer: randomChoice(manufacturers),
+ price: Random.randInt(maxPrice - minPrice + 1) + minPrice,
+ screenSize: Random.randInt(maxScreenSize - minScreenSize + 1) + minScreenSize,
+ };
+}
- function doExecutionTest(conn) {
- var coll = conn.getDB(dbName).getCollection(collName);
- //
- // Compute the most common manufacturers, and the number of TVs in each price range.
- //
-
- // First compute each separately, to make sure we have the correct results.
- const manufacturerPipe = [
- {$sortByCount: "$manufacturer"},
- // Sort by count and then by _id in case there are two manufacturers with an equal
- // count.
- {$sort: {count: -1, _id: 1}},
- ];
- const bucketedPricePipe = [
- {
- $bucket: {groupBy: "$price", boundaries: [0, 500, 1000, 1500, 2000], default: 2000},
- },
- {$sort: {count: -1}}
- ];
- const automaticallyBucketedPricePipe = [{$bucketAuto: {groupBy: "$price", buckets: 5}}];
-
- const mostCommonManufacturers = coll.aggregate(manufacturerPipe).toArray();
- const numTVsBucketedByPriceRange = coll.aggregate(bucketedPricePipe).toArray();
- const numTVsAutomaticallyBucketedByPriceRange =
- coll.aggregate(automaticallyBucketedPricePipe).toArray();
-
- const facetPipe = [{
- $facet: {
- manufacturers: manufacturerPipe,
- bucketedPrices: bucketedPricePipe,
- autoBucketedPrices: automaticallyBucketedPricePipe
- }
- }];
-
- // Then compute the results using $facet.
- const facetResult = coll.aggregate(facetPipe).toArray();
- assert.eq(facetResult.length, 1);
- const facetManufacturers = facetResult[0].manufacturers;
- const facetBucketedPrices = facetResult[0].bucketedPrices;
- const facetAutoBucketedPrices = facetResult[0].autoBucketedPrices;
-
- // Then assert they are the same.
- assert.eq(facetManufacturers, mostCommonManufacturers);
- assert.eq(facetBucketedPrices, numTVsBucketedByPriceRange);
- assert.eq(facetAutoBucketedPrices, numTVsAutomaticallyBucketedByPriceRange);
+/**
+ * Inserts 'nDocs' documents into collection given by 'dbName' and 'collName'. Documents will
+ * have _ids in the range [0, nDocs).
+ */
+function populateData(conn, nDocs) {
+ var coll = conn.getDB(dbName).getCollection(collName);
+ coll.remove({}); // Don't drop the collection, since it might be sharded.
+
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < nDocs; i++) {
+ const doc = generateRandomDocument(i);
+ bulk.insert(doc);
}
-
- // Test against the standalone started by resmoke.py.
- const nDocs = 1000 * 10;
- const conn = db.getMongo();
- populateData(conn, nDocs);
- doExecutionTest(conn);
-
- // Test against a sharded cluster.
- const st = new ShardingTest({shards: 2});
- populateData(st.s0, nDocs);
- doExecutionTest(st.s0);
-
- const shardedDBName = "sharded";
- const shardedCollName = "collection";
- const shardedColl = st.getDB(shardedDBName).getCollection(shardedCollName);
- const unshardedColl = st.getDB(shardedDBName).getCollection(collName);
-
- assert.commandWorked(st.admin.runCommand({enableSharding: shardedDBName}));
- assert.commandWorked(
- st.admin.runCommand({shardCollection: shardedColl.getFullName(), key: {_id: 1}}));
-
- // Test $lookup inside a $facet stage on a sharded collection.
- // Enable sharded $lookup.
- setParameterOnAllHosts(
- DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", true);
- assert.commandWorked(unshardedColl.runCommand({
- aggregate: unshardedColl.getName(),
- pipeline: [{
- $facet: {
- a: [{
- $lookup: {
- from: shardedCollName,
- localField: "_id",
- foreignField: "_id",
- as: "results"
- }
- }]
- }
- }],
- cursor: {}
- }));
- // Disable sharded $lookup.
- setParameterOnAllHosts(
- DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", false);
-
- // Then run the assertions against a sharded collection.
- assert.commandWorked(st.admin.runCommand({enableSharding: dbName}));
- assert.commandWorked(st.admin.runCommand({shardCollection: testNs, key: {_id: 1}}));
-
- // Make sure there is a chunk on each shard, so that our aggregations are targeted to multiple
- // shards.
- assert.commandWorked(st.admin.runCommand({split: testNs, middle: {_id: nDocs / 2}}));
- assert.commandWorked(
- st.admin.runCommand({moveChunk: testNs, find: {_id: 0}, to: st.shard0.shardName}));
- assert.commandWorked(
- st.admin.runCommand({moveChunk: testNs, find: {_id: nDocs - 1}, to: st.shard1.shardName}));
-
- doExecutionTest(st.s0);
-
- st.stop();
+ assert.writeOK(bulk.execute());
+}
+
+function doExecutionTest(conn) {
+ var coll = conn.getDB(dbName).getCollection(collName);
+ //
+ // Compute the most common manufacturers, and the number of TVs in each price range.
+ //
+
+ // First compute each separately, to make sure we have the correct results.
+ const manufacturerPipe = [
+ {$sortByCount: "$manufacturer"},
+ // Sort by count and then by _id in case there are two manufacturers with an equal
+ // count.
+ {$sort: {count: -1, _id: 1}},
+ ];
+ const bucketedPricePipe = [
+ {
+ $bucket: {groupBy: "$price", boundaries: [0, 500, 1000, 1500, 2000], default: 2000},
+ },
+ {$sort: {count: -1}}
+ ];
+ const automaticallyBucketedPricePipe = [{$bucketAuto: {groupBy: "$price", buckets: 5}}];
+
+ const mostCommonManufacturers = coll.aggregate(manufacturerPipe).toArray();
+ const numTVsBucketedByPriceRange = coll.aggregate(bucketedPricePipe).toArray();
+ const numTVsAutomaticallyBucketedByPriceRange =
+ coll.aggregate(automaticallyBucketedPricePipe).toArray();
+
+ const facetPipe = [{
+ $facet: {
+ manufacturers: manufacturerPipe,
+ bucketedPrices: bucketedPricePipe,
+ autoBucketedPrices: automaticallyBucketedPricePipe
+ }
+ }];
+
+ // Then compute the results using $facet.
+ const facetResult = coll.aggregate(facetPipe).toArray();
+ assert.eq(facetResult.length, 1);
+ const facetManufacturers = facetResult[0].manufacturers;
+ const facetBucketedPrices = facetResult[0].bucketedPrices;
+ const facetAutoBucketedPrices = facetResult[0].autoBucketedPrices;
+
+ // Then assert they are the same.
+ assert.eq(facetManufacturers, mostCommonManufacturers);
+ assert.eq(facetBucketedPrices, numTVsBucketedByPriceRange);
+ assert.eq(facetAutoBucketedPrices, numTVsAutomaticallyBucketedByPriceRange);
+}
+
+// Test against the standalone started by resmoke.py.
+const nDocs = 1000 * 10;
+const conn = db.getMongo();
+populateData(conn, nDocs);
+doExecutionTest(conn);
+
+// Test against a sharded cluster.
+const st = new ShardingTest({shards: 2});
+populateData(st.s0, nDocs);
+doExecutionTest(st.s0);
+
+const shardedDBName = "sharded";
+const shardedCollName = "collection";
+const shardedColl = st.getDB(shardedDBName).getCollection(shardedCollName);
+const unshardedColl = st.getDB(shardedDBName).getCollection(collName);
+
+assert.commandWorked(st.admin.runCommand({enableSharding: shardedDBName}));
+assert.commandWorked(
+ st.admin.runCommand({shardCollection: shardedColl.getFullName(), key: {_id: 1}}));
+
+// Test $lookup inside a $facet stage on a sharded collection.
+// Enable sharded $lookup.
+setParameterOnAllHosts(
+ DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", true);
+assert.commandWorked(unshardedColl.runCommand({
+ aggregate: unshardedColl.getName(),
+ pipeline: [{
+ $facet: {
+ a: [{
+ $lookup:
+ {from: shardedCollName, localField: "_id", foreignField: "_id", as: "results"}
+ }]
+ }
+ }],
+ cursor: {}
+}));
+// Disable sharded $lookup.
+setParameterOnAllHosts(
+ DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", false);
+
+// Then run the assertions against a sharded collection.
+assert.commandWorked(st.admin.runCommand({enableSharding: dbName}));
+assert.commandWorked(st.admin.runCommand({shardCollection: testNs, key: {_id: 1}}));
+
+// Make sure there is a chunk on each shard, so that our aggregations are targeted to multiple
+// shards.
+assert.commandWorked(st.admin.runCommand({split: testNs, middle: {_id: nDocs / 2}}));
+assert.commandWorked(
+ st.admin.runCommand({moveChunk: testNs, find: {_id: 0}, to: st.shard0.shardName}));
+assert.commandWorked(
+ st.admin.runCommand({moveChunk: testNs, find: {_id: nDocs - 1}, to: st.shard1.shardName}));
+
+doExecutionTest(st.s0);
+
+st.stop();
}());
diff --git a/jstests/aggregation/sources/geonear/collation_geonear.js b/jstests/aggregation/sources/geonear/collation_geonear.js
index 076e0a8bea8..d4c47c1aec0 100644
--- a/jstests/aggregation/sources/geonear/collation_geonear.js
+++ b/jstests/aggregation/sources/geonear/collation_geonear.js
@@ -3,79 +3,81 @@
// Test that the $geoNear stage's query predicate respects the collation.
(function() {
- "use strict";
+"use strict";
- const caseInsensitive = {collation: {locale: "en_US", strength: 2}};
+const caseInsensitive = {
+ collation: {locale: "en_US", strength: 2}
+};
- var coll = db.collation_geonear;
- coll.drop();
- assert.commandWorked(coll.createIndex({loc: "2dsphere"}));
- assert.writeOK(coll.insert({loc: [0, 0], str: "A"}));
+var coll = db.collation_geonear;
+coll.drop();
+assert.commandWorked(coll.createIndex({loc: "2dsphere"}));
+assert.writeOK(coll.insert({loc: [0, 0], str: "A"}));
- // Test that the $geoNear agg stage respects an explicit collation.
- assert.eq(0,
- coll.aggregate([{
- $geoNear: {
- near: {type: "Point", coordinates: [0, 0]},
- distanceField: "distanceField",
- spherical: true,
- query: {str: "a"},
- }
- }])
- .itcount());
- assert.eq(1,
- coll.aggregate([{
- $geoNear: {
- near: {type: "Point", coordinates: [0, 0]},
- distanceField: "distanceField",
- spherical: true,
- query: {str: "a"},
- }
- }],
- caseInsensitive)
- .itcount());
+// Test that the $geoNear agg stage respects an explicit collation.
+assert.eq(0,
+ coll.aggregate([{
+ $geoNear: {
+ near: {type: "Point", coordinates: [0, 0]},
+ distanceField: "distanceField",
+ spherical: true,
+ query: {str: "a"},
+ }
+ }])
+ .itcount());
+assert.eq(1,
+ coll.aggregate([{
+ $geoNear: {
+ near: {type: "Point", coordinates: [0, 0]},
+ distanceField: "distanceField",
+ spherical: true,
+ query: {str: "a"},
+ }
+ }],
+ caseInsensitive)
+ .itcount());
- // Test that the collation parameter cannot be passed directly as a parameter of the $geoNear
- // stage.
- assert.throws(function() {
- coll.aggregate([{
- $geoNear: {
- near: {type: "Point", coordinates: [0, 0]},
- distanceField: "distanceField",
- spherical: true,
- query: {str: "a"},
- collation: {locale: "en_US", strength: 2},
- }
- }]);
- });
+// Test that the collation parameter cannot be passed directly as a parameter of the $geoNear
+// stage.
+assert.throws(function() {
+ coll.aggregate([{
+ $geoNear: {
+ near: {type: "Point", coordinates: [0, 0]},
+ distanceField: "distanceField",
+ spherical: true,
+ query: {str: "a"},
+ collation: {locale: "en_US", strength: 2},
+ }
+ }]);
+});
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
- assert.commandWorked(coll.createIndex({loc: "2dsphere"}));
- assert.writeOK(coll.insert({loc: [0, 0], str: "A"}));
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
+assert.commandWorked(coll.createIndex({loc: "2dsphere"}));
+assert.writeOK(coll.insert({loc: [0, 0], str: "A"}));
- // Test that the $geoNear agg stage respects an inherited collation.
- assert.eq(1,
- coll.aggregate([{
- $geoNear: {
- near: {type: "Point", coordinates: [0, 0]},
- distanceField: "distanceField",
- spherical: true,
- query: {str: "a"},
- }
- }])
- .itcount());
+// Test that the $geoNear agg stage respects an inherited collation.
+assert.eq(1,
+ coll.aggregate([{
+ $geoNear: {
+ near: {type: "Point", coordinates: [0, 0]},
+ distanceField: "distanceField",
+ spherical: true,
+ query: {str: "a"},
+ }
+ }])
+ .itcount());
- // Test that the the collection default can be overridden with the simple collation.
- assert.eq(0,
- coll.aggregate([{
- $geoNear: {
- near: {type: "Point", coordinates: [0, 0]},
- distanceField: "distanceField",
- spherical: true,
- query: {str: "a"},
- }
- }],
- {collation: {locale: "simple"}})
- .itcount());
+// Test that the the collection default can be overridden with the simple collation.
+assert.eq(0,
+ coll.aggregate([{
+ $geoNear: {
+ near: {type: "Point", coordinates: [0, 0]},
+ distanceField: "distanceField",
+ spherical: true,
+ query: {str: "a"},
+ }
+ }],
+ {collation: {locale: "simple"}})
+ .itcount());
})();
diff --git a/jstests/aggregation/sources/geonear/distancefield_and_includelocs.js b/jstests/aggregation/sources/geonear/distancefield_and_includelocs.js
index 80e884c2c36..1ed2364ccb3 100644
--- a/jstests/aggregation/sources/geonear/distancefield_and_includelocs.js
+++ b/jstests/aggregation/sources/geonear/distancefield_and_includelocs.js
@@ -3,167 +3,164 @@
* (specifically, by specifying nested fields, overriding existing fields, and so on).
*/
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For 'customDocumentEq'.
-
- const coll = db.getCollection("geonear_distancefield_and_includelocs");
- coll.drop();
-
- /**
- * Runs an aggregation with a $geoNear stage using 'geoSpec' and an optional $project stage
- * using 'projSpec'. Returns the first result; that is, the result closest to the "near" point.
- */
- function firstGeoNearResult(geoSpec, projSpec) {
- geoSpec.spherical = true;
- const pipeline = [{$geoNear: geoSpec}, {$limit: 1}];
- if (projSpec) {
- pipeline.push({$project: projSpec});
- }
-
- const res = coll.aggregate(pipeline).toArray();
- assert.eq(1, res.length, tojson(res));
- return res[0];
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For 'customDocumentEq'.
+
+const coll = db.getCollection("geonear_distancefield_and_includelocs");
+coll.drop();
+
+/**
+ * Runs an aggregation with a $geoNear stage using 'geoSpec' and an optional $project stage
+ * using 'projSpec'. Returns the first result; that is, the result closest to the "near" point.
+ */
+function firstGeoNearResult(geoSpec, projSpec) {
+ geoSpec.spherical = true;
+ const pipeline = [{$geoNear: geoSpec}, {$limit: 1}];
+ if (projSpec) {
+ pipeline.push({$project: projSpec});
}
- // Use documents with a variety of different fields: scalars, arrays, legacy points and GeoJSON
- // objects.
- const docWithLegacyPoint = {
- _id: "legacy",
- geo: [1, 1],
- ptForNearQuery: [1, 1],
- scalar: "foo",
- arr: [{a: 1, b: 1}, {a: 2, b: 2}],
- };
- const docWithGeoPoint = {
- _id: "point",
- geo: {type: "Point", coordinates: [1, 0]},
- ptForNearQuery: [1, 0],
- scalar: "bar",
- arr: [{a: 3, b: 3}, {a: 4, b: 4}],
- };
- const docWithGeoLine = {
- _id: "linestring",
- geo: {type: "LineString", coordinates: [[0, 0], [-1, -1]]},
- ptForNearQuery: [-1, -1],
- scalar: "baz",
- arr: [{a: 5, b: 5}, {a: 6, b: 6}],
- };
-
- // We test with a 2dsphere index, since 2d indexes can't support GeoJSON objects.
- assert.commandWorked(coll.createIndex({geo: "2dsphere"}));
-
- // Populate the collection.
- assert.writeOK(coll.insert(docWithLegacyPoint));
- assert.writeOK(coll.insert(docWithGeoPoint));
- assert.writeOK(coll.insert(docWithGeoLine));
-
- // Define a custom way to compare documents since the results here might differ by insignificant
- // amounts.
- const assertCloseEnough = (left, right) =>
- assert(customDocumentEq({
- left: left,
- right: right,
- valueComparator: (a, b) => {
- if (typeof a !== "number") {
- return a === b;
- }
- // Allow some minor differences in the numbers.
- return Math.abs(a - b) < 1e-10;
- }
- }),
- () => `[${tojson(left)}] != [${tojson(right)}]`);
-
- [docWithLegacyPoint, docWithGeoPoint, docWithGeoLine].forEach(doc => {
- const docPlusNewFields = (newDoc) => Object.extend(Object.extend({}, doc), newDoc);
-
- //
- // Tests for "distanceField".
- //
- const expectedDistance = 0.0000000000000001;
-
- // Test that "distanceField" can be computed in a new field.
- assertCloseEnough(firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "newField"}),
- docPlusNewFields({newField: expectedDistance}));
-
- // Test that "distanceField" can be computed in a new nested field.
- assertCloseEnough(
- firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "nested.field"}),
- docPlusNewFields({nested: {field: expectedDistance}}));
-
- // Test that "distanceField" can overwrite an existing scalar field.
- assertCloseEnough(firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "scalar"}),
- docPlusNewFields({scalar: expectedDistance}));
-
- // Test that "distanceField" can completely overwrite an existing array field.
- assertCloseEnough(firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "arr"}),
- docPlusNewFields({arr: expectedDistance}));
-
- // TODO (SERVER-35561): When "includeLocs" shares a path prefix with an existing field, the
- // fields are overwritten, even if they could be preserved.
- assertCloseEnough(firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "arr.b"}),
- docPlusNewFields({arr: {b: expectedDistance}}));
-
- //
- // Tests for both "includeLocs" and "distanceField".
- //
-
- // Test that "distanceField" and "includeLocs" can both be specified.
- assertCloseEnough(
- firstGeoNearResult(
- {near: doc.ptForNearQuery, distanceField: "dist", includeLocs: "loc"}),
- docPlusNewFields({dist: expectedDistance, loc: doc.geo}));
-
- // Test that "distanceField" and "includeLocs" can be the same path. The result is arbitrary
- // ("includeLocs" wins).
- assertCloseEnough(
- firstGeoNearResult(
- {near: doc.ptForNearQuery, distanceField: "newField", includeLocs: "newField"}),
- docPlusNewFields({newField: doc.geo}));
-
- // Test that "distanceField" and "includeLocs" are both preserved when their paths share a
- // prefix but do not conflict.
- assertCloseEnough(
- firstGeoNearResult(
- {near: doc.ptForNearQuery, distanceField: "comp.dist", includeLocs: "comp.loc"}),
- docPlusNewFields({comp: {dist: expectedDistance, loc: doc.geo}}));
-
- //
- // Tests for "includeLocs" only. Project out the distance field.
- //
- const removeDistFieldProj = {d: 0};
-
- // Test that "includeLocs" can be computed in a new field.
- assertCloseEnough(
- firstGeoNearResult(
- {near: doc.ptForNearQuery, distanceField: "d", includeLocs: "newField"},
- removeDistFieldProj),
- docPlusNewFields({newField: doc.geo}));
-
- // Test that "includeLocs" can be computed in a new nested field.
- assertCloseEnough(
- firstGeoNearResult(
- {near: doc.ptForNearQuery, distanceField: "d", includeLocs: "nested.field"},
- removeDistFieldProj),
- docPlusNewFields({nested: {field: doc.geo}}));
-
- // Test that "includeLocs" can overwrite an existing scalar field.
- assertCloseEnough(firstGeoNearResult(
- {near: doc.ptForNearQuery, distanceField: "d", includeLocs: "scalar"},
- removeDistFieldProj),
- docPlusNewFields({scalar: doc.geo}));
-
- // Test that "includeLocs" can completely overwrite an existing array field.
- assertCloseEnough(
- firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "d", includeLocs: "arr"},
- removeDistFieldProj),
- docPlusNewFields({arr: doc.geo}));
-
- // TODO (SERVER-35561): When "includeLocs" shares a path prefix with an existing field, the
- // fields are overwritten, even if they could be preserved.
- assertCloseEnough(
- firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "d", includeLocs: "arr.a"},
- removeDistFieldProj),
- docPlusNewFields({arr: {a: doc.geo}}));
- });
+ const res = coll.aggregate(pipeline).toArray();
+ assert.eq(1, res.length, tojson(res));
+ return res[0];
+}
+
+// Use documents with a variety of different fields: scalars, arrays, legacy points and GeoJSON
+// objects.
+const docWithLegacyPoint = {
+ _id: "legacy",
+ geo: [1, 1],
+ ptForNearQuery: [1, 1],
+ scalar: "foo",
+ arr: [{a: 1, b: 1}, {a: 2, b: 2}],
+};
+const docWithGeoPoint = {
+ _id: "point",
+ geo: {type: "Point", coordinates: [1, 0]},
+ ptForNearQuery: [1, 0],
+ scalar: "bar",
+ arr: [{a: 3, b: 3}, {a: 4, b: 4}],
+};
+const docWithGeoLine = {
+ _id: "linestring",
+ geo: {type: "LineString", coordinates: [[0, 0], [-1, -1]]},
+ ptForNearQuery: [-1, -1],
+ scalar: "baz",
+ arr: [{a: 5, b: 5}, {a: 6, b: 6}],
+};
+
+// We test with a 2dsphere index, since 2d indexes can't support GeoJSON objects.
+assert.commandWorked(coll.createIndex({geo: "2dsphere"}));
+
+// Populate the collection.
+assert.writeOK(coll.insert(docWithLegacyPoint));
+assert.writeOK(coll.insert(docWithGeoPoint));
+assert.writeOK(coll.insert(docWithGeoLine));
+
+// Define a custom way to compare documents since the results here might differ by insignificant
+// amounts.
+const assertCloseEnough = (left, right) => assert(customDocumentEq({
+ left: left,
+ right: right,
+ valueComparator: (a, b) => {
+ if (typeof a !== "number") {
+ return a === b;
+ }
+ // Allow some minor differences in the
+ // numbers.
+ return Math.abs(a - b) < 1e-10;
+ }
+ }),
+ () => `[${tojson(left)}] != [${tojson(right)}]`);
+
+[docWithLegacyPoint, docWithGeoPoint, docWithGeoLine].forEach(doc => {
+ const docPlusNewFields = (newDoc) => Object.extend(Object.extend({}, doc), newDoc);
+
+ //
+ // Tests for "distanceField".
+ //
+ const expectedDistance = 0.0000000000000001;
+
+ // Test that "distanceField" can be computed in a new field.
+ assertCloseEnough(firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "newField"}),
+ docPlusNewFields({newField: expectedDistance}));
+
+ // Test that "distanceField" can be computed in a new nested field.
+ assertCloseEnough(firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "nested.field"}),
+ docPlusNewFields({nested: {field: expectedDistance}}));
+
+ // Test that "distanceField" can overwrite an existing scalar field.
+ assertCloseEnough(firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "scalar"}),
+ docPlusNewFields({scalar: expectedDistance}));
+
+ // Test that "distanceField" can completely overwrite an existing array field.
+ assertCloseEnough(firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "arr"}),
+ docPlusNewFields({arr: expectedDistance}));
+
+ // TODO (SERVER-35561): When "includeLocs" shares a path prefix with an existing field, the
+ // fields are overwritten, even if they could be preserved.
+ assertCloseEnough(firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "arr.b"}),
+ docPlusNewFields({arr: {b: expectedDistance}}));
+
+ //
+ // Tests for both "includeLocs" and "distanceField".
+ //
+
+ // Test that "distanceField" and "includeLocs" can both be specified.
+ assertCloseEnough(
+ firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "dist", includeLocs: "loc"}),
+ docPlusNewFields({dist: expectedDistance, loc: doc.geo}));
+
+ // Test that "distanceField" and "includeLocs" can be the same path. The result is arbitrary
+ // ("includeLocs" wins).
+ assertCloseEnough(
+ firstGeoNearResult(
+ {near: doc.ptForNearQuery, distanceField: "newField", includeLocs: "newField"}),
+ docPlusNewFields({newField: doc.geo}));
+
+ // Test that "distanceField" and "includeLocs" are both preserved when their paths share a
+ // prefix but do not conflict.
+ assertCloseEnough(
+ firstGeoNearResult(
+ {near: doc.ptForNearQuery, distanceField: "comp.dist", includeLocs: "comp.loc"}),
+ docPlusNewFields({comp: {dist: expectedDistance, loc: doc.geo}}));
+
+ //
+ // Tests for "includeLocs" only. Project out the distance field.
+ //
+ const removeDistFieldProj = {d: 0};
+
+ // Test that "includeLocs" can be computed in a new field.
+ assertCloseEnough(
+ firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "d", includeLocs: "newField"},
+ removeDistFieldProj),
+ docPlusNewFields({newField: doc.geo}));
+
+ // Test that "includeLocs" can be computed in a new nested field.
+ assertCloseEnough(
+ firstGeoNearResult(
+ {near: doc.ptForNearQuery, distanceField: "d", includeLocs: "nested.field"},
+ removeDistFieldProj),
+ docPlusNewFields({nested: {field: doc.geo}}));
+
+ // Test that "includeLocs" can overwrite an existing scalar field.
+ assertCloseEnough(
+ firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "d", includeLocs: "scalar"},
+ removeDistFieldProj),
+ docPlusNewFields({scalar: doc.geo}));
+
+ // Test that "includeLocs" can completely overwrite an existing array field.
+ assertCloseEnough(
+ firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "d", includeLocs: "arr"},
+ removeDistFieldProj),
+ docPlusNewFields({arr: doc.geo}));
+
+ // TODO (SERVER-35561): When "includeLocs" shares a path prefix with an existing field, the
+ // fields are overwritten, even if they could be preserved.
+ assertCloseEnough(
+ firstGeoNearResult({near: doc.ptForNearQuery, distanceField: "d", includeLocs: "arr.a"},
+ removeDistFieldProj),
+ docPlusNewFields({arr: {a: doc.geo}}));
+});
}());
diff --git a/jstests/aggregation/sources/geonear/mindistance_and_maxdistance.js b/jstests/aggregation/sources/geonear/mindistance_and_maxdistance.js
index 7c5e6c750f3..99262902d3e 100644
--- a/jstests/aggregation/sources/geonear/mindistance_and_maxdistance.js
+++ b/jstests/aggregation/sources/geonear/mindistance_and_maxdistance.js
@@ -2,98 +2,103 @@
* Tests the behavior of the $geoNear stage with varying values of 'minDistance' and 'maxDistance'.
*/
(function() {
- "use strict";
-
- const coll = db.getCollection("geonear_mindistance_maxdistance");
-
- const kMaxDistance = Math.PI * 2.0;
-
- // Test points that are exactly at the "near" point, close to the point, and far from the point.
- // Distances are purposely chosen to be small so that distances in meters and radians are close.
- const origin = {pt: [0, 0]};
- const near = {pt: [0.23, -0.32]};
- const far = {pt: [5.9, 0.0]};
-
- ["2d", "2dsphere"].forEach(geoType => {
- jsTestLog(`Testing $geoNear with index {pt: "${geoType}"}`);
- coll.drop();
-
- // Create the desired index type and populate the collection.
- assert.commandWorked(coll.createIndex({pt: geoType}));
- [origin, near, far].forEach(doc => {
- doc.distFromOrigin = (geoType === "2dsphere") ? Geo.sphereDistance(doc.pt, origin.pt)
- : Geo.distance(doc.pt, origin.pt);
- assert.commandWorked(coll.insert(doc));
- });
-
- /**
- * Helper function that runs a $geoNear aggregation near the origin, setting the minimum
- * and/or maximum search distance using the object 'minMaxOpts', and asserting that the
- * results match 'expected'.
- */
- function assertGeoNearResults(minMaxOpts, expected) {
- const geoNearStage = {
- $geoNear: Object.extend(
- {near: origin.pt, distanceField: "dist", spherical: (geoType === "2dsphere")},
- minMaxOpts)
- };
- const projStage = {$project: {_id: 0, dist: 0}};
- const res = coll.aggregate([geoNearStage, projStage]).toArray();
- assert.eq(
- res,
- expected,
- () => `Unexpected results from ${tojson(geoNearStage)} using a ${geoType} index`);
- }
-
- // If no minimum nor maximum distance is set, all points are returned.
- assertGeoNearResults({}, [origin, near, far]);
-
- //
- // Tests for minDistance.
- //
-
- // Negative values and non-numeric values are illegal.
- assert.throws(() => assertGeoNearResults({minDistance: -1.1}));
- assert.throws(() => assertGeoNearResults({minDistance: "3.2"}));
-
- // A minimum distance of 0 returns all points.
- assertGeoNearResults({minDistance: -0.0}, [origin, near, far]);
- assertGeoNearResults({minDistance: 0.0}, [origin, near, far]);
-
- // Larger minimum distances exclude closer points.
- assertGeoNearResults({minDistance: (near.distFromOrigin / 2)}, [near, far]);
- assertGeoNearResults({minDistance: (far.distFromOrigin / 2)}, [far]);
- assertGeoNearResults({minDistance: kMaxDistance}, []);
-
- //
- // Tests for maxDistance.
- //
-
- // Negative values and non-numeric values are illegal.
- assert.throws(() => assertGeoNearResults({maxDistance: -1.1}));
- assert.throws(() => assertGeoNearResults({maxDistance: "3.2"}));
-
- // A maximum distance of 0 returns only the origin.
- assertGeoNearResults({maxDistance: 0.0}, [origin]);
- assertGeoNearResults({maxDistance: -0.0}, [origin]);
-
- // Larger maximum distances include more points.
- assertGeoNearResults({maxDistance: (near.distFromOrigin + 0.01)}, [origin, near]);
- assertGeoNearResults({maxDistance: (far.distFromOrigin + 0.01)}, [origin, near, far]);
-
- //
- // Tests for minDistance and maxDistance together.
- //
-
- // Cast a wide net and all points should be returned.
- assertGeoNearResults({minDistance: 0.0, maxDistance: kMaxDistance}, [origin, near, far]);
-
- // A narrower range excludes the origin and the far point.
- assertGeoNearResults(
- {minDistance: (near.distFromOrigin / 2), maxDistance: (near.distFromOrigin + 0.01)},
- [near]);
-
- // An impossible range is legal but returns no results.
- assertGeoNearResults({minDistance: 3.0, maxDistance: 1.0}, []);
+"use strict";
+
+const coll = db.getCollection("geonear_mindistance_maxdistance");
+
+const kMaxDistance = Math.PI * 2.0;
+
+// Test points that are exactly at the "near" point, close to the point, and far from the point.
+// Distances are purposely chosen to be small so that distances in meters and radians are close.
+const origin = {
+ pt: [0, 0]
+};
+const near = {
+ pt: [0.23, -0.32]
+};
+const far = {
+ pt: [5.9, 0.0]
+};
+
+["2d", "2dsphere"].forEach(geoType => {
+ jsTestLog(`Testing $geoNear with index {pt: "${geoType}"}`);
+ coll.drop();
+
+ // Create the desired index type and populate the collection.
+ assert.commandWorked(coll.createIndex({pt: geoType}));
+ [origin, near, far].forEach(doc => {
+ doc.distFromOrigin = (geoType === "2dsphere") ? Geo.sphereDistance(doc.pt, origin.pt)
+ : Geo.distance(doc.pt, origin.pt);
+ assert.commandWorked(coll.insert(doc));
});
+
+ /**
+ * Helper function that runs a $geoNear aggregation near the origin, setting the minimum
+ * and/or maximum search distance using the object 'minMaxOpts', and asserting that the
+ * results match 'expected'.
+ */
+ function assertGeoNearResults(minMaxOpts, expected) {
+ const geoNearStage = {
+ $geoNear: Object.extend(
+ {near: origin.pt, distanceField: "dist", spherical: (geoType === "2dsphere")},
+ minMaxOpts)
+ };
+ const projStage = {$project: {_id: 0, dist: 0}};
+ const res = coll.aggregate([geoNearStage, projStage]).toArray();
+ assert.eq(res,
+ expected,
+ () => `Unexpected results from ${tojson(geoNearStage)} using a ${geoType} index`);
+ }
+
+ // If no minimum nor maximum distance is set, all points are returned.
+ assertGeoNearResults({}, [origin, near, far]);
+
+ //
+ // Tests for minDistance.
+ //
+
+ // Negative values and non-numeric values are illegal.
+ assert.throws(() => assertGeoNearResults({minDistance: -1.1}));
+ assert.throws(() => assertGeoNearResults({minDistance: "3.2"}));
+
+ // A minimum distance of 0 returns all points.
+ assertGeoNearResults({minDistance: -0.0}, [origin, near, far]);
+ assertGeoNearResults({minDistance: 0.0}, [origin, near, far]);
+
+ // Larger minimum distances exclude closer points.
+ assertGeoNearResults({minDistance: (near.distFromOrigin / 2)}, [near, far]);
+ assertGeoNearResults({minDistance: (far.distFromOrigin / 2)}, [far]);
+ assertGeoNearResults({minDistance: kMaxDistance}, []);
+
+ //
+ // Tests for maxDistance.
+ //
+
+ // Negative values and non-numeric values are illegal.
+ assert.throws(() => assertGeoNearResults({maxDistance: -1.1}));
+ assert.throws(() => assertGeoNearResults({maxDistance: "3.2"}));
+
+ // A maximum distance of 0 returns only the origin.
+ assertGeoNearResults({maxDistance: 0.0}, [origin]);
+ assertGeoNearResults({maxDistance: -0.0}, [origin]);
+
+ // Larger maximum distances include more points.
+ assertGeoNearResults({maxDistance: (near.distFromOrigin + 0.01)}, [origin, near]);
+ assertGeoNearResults({maxDistance: (far.distFromOrigin + 0.01)}, [origin, near, far]);
+
+ //
+ // Tests for minDistance and maxDistance together.
+ //
+
+ // Cast a wide net and all points should be returned.
+ assertGeoNearResults({minDistance: 0.0, maxDistance: kMaxDistance}, [origin, near, far]);
+
+ // A narrower range excludes the origin and the far point.
+ assertGeoNearResults(
+ {minDistance: (near.distFromOrigin / 2), maxDistance: (near.distFromOrigin + 0.01)},
+ [near]);
+
+ // An impossible range is legal but returns no results.
+ assertGeoNearResults({minDistance: 3.0, maxDistance: 1.0}, []);
+});
}());
diff --git a/jstests/aggregation/sources/geonear/requires_geo_index.js b/jstests/aggregation/sources/geonear/requires_geo_index.js
index e2c3a1d9706..f8380eb27a4 100644
--- a/jstests/aggregation/sources/geonear/requires_geo_index.js
+++ b/jstests/aggregation/sources/geonear/requires_geo_index.js
@@ -2,21 +2,21 @@
// TODO: Reenable test on passthroughs with sharded collections as part of SERVER-38995.
// @tags: [assumes_unsharded_collection]
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
+load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
- const coll = db.coll;
- const from = db.from;
+const coll = db.coll;
+const from = db.from;
- coll.drop();
- from.drop();
+coll.drop();
+from.drop();
- const geonearPipeline = [
- {$geoNear: {near: [0, 0], distanceField: "distance", spherical: true}},
- ];
+const geonearPipeline = [
+ {$geoNear: {near: [0, 0], distanceField: "distance", spherical: true}},
+];
- const geonearWithinLookupPipeline = [
+const geonearWithinLookupPipeline = [
{
$lookup: {
pipeline: geonearPipeline,
@@ -26,16 +26,16 @@
},
];
- assert.commandWorked(coll.insert({_id: 5, x: 5}));
- assert.commandWorked(from.insert({_id: 1, geo: [0, 0]}));
+assert.commandWorked(coll.insert({_id: 5, x: 5}));
+assert.commandWorked(from.insert({_id: 1, geo: [0, 0]}));
- // Fail without index.
- assertErrorCode(from, geonearPipeline, ErrorCodes.IndexNotFound);
- assertErrorCode(coll, geonearWithinLookupPipeline, ErrorCodes.IndexNotFound);
+// Fail without index.
+assertErrorCode(from, geonearPipeline, ErrorCodes.IndexNotFound);
+assertErrorCode(coll, geonearWithinLookupPipeline, ErrorCodes.IndexNotFound);
- assert.commandWorked(from.createIndex({geo: "2dsphere"}));
+assert.commandWorked(from.createIndex({geo: "2dsphere"}));
- // Run successfully when you have the geospatial index.
- assert.eq(from.aggregate(geonearPipeline).itcount(), 1);
- assert.eq(coll.aggregate(geonearWithinLookupPipeline).itcount(), 1);
+// Run successfully when you have the geospatial index.
+assert.eq(from.aggregate(geonearPipeline).itcount(), 1);
+assert.eq(coll.aggregate(geonearWithinLookupPipeline).itcount(), 1);
}());
diff --git a/jstests/aggregation/sources/graphLookup/airports.js b/jstests/aggregation/sources/graphLookup/airports.js
index 9254fd992fa..779678b07da 100644
--- a/jstests/aggregation/sources/graphLookup/airports.js
+++ b/jstests/aggregation/sources/graphLookup/airports.js
@@ -5,36 +5,36 @@
// In MongoDB 3.4, $graphLookup was introduced. In this file, we test some complex graphs.
(function() {
- "use strict";
+"use strict";
- var local = db.local;
- var foreign = db.foreign;
+var local = db.local;
+var foreign = db.foreign;
- local.drop();
- foreign.drop();
+local.drop();
+foreign.drop();
- var airports = [
- {_id: "JFK", connects: ["PWM", "BOS", "LGA", "SFO"]},
- {_id: "PWM", connects: ["BOS", "JFK"]},
- {_id: "BOS", connects: ["PWM", "JFK", "LGA"]},
- {_id: "SFO", connects: ["JFK", "MIA"]},
- {_id: "LGA", connects: ["BOS", "JFK", "ORD"]},
- {_id: "ORD", connects: ["LGA"]},
- {_id: "ATL", connects: ["MIA"]},
- {_id: "MIA", connects: ["ATL", "SFO"]}
- ];
+var airports = [
+ {_id: "JFK", connects: ["PWM", "BOS", "LGA", "SFO"]},
+ {_id: "PWM", connects: ["BOS", "JFK"]},
+ {_id: "BOS", connects: ["PWM", "JFK", "LGA"]},
+ {_id: "SFO", connects: ["JFK", "MIA"]},
+ {_id: "LGA", connects: ["BOS", "JFK", "ORD"]},
+ {_id: "ORD", connects: ["LGA"]},
+ {_id: "ATL", connects: ["MIA"]},
+ {_id: "MIA", connects: ["ATL", "SFO"]}
+];
- var bulk = foreign.initializeUnorderedBulkOp();
- airports.forEach(function(a) {
- bulk.insert(a);
- });
- assert.writeOK(bulk.execute());
+var bulk = foreign.initializeUnorderedBulkOp();
+airports.forEach(function(a) {
+ bulk.insert(a);
+});
+assert.writeOK(bulk.execute());
- // Insert a dummy document so that something will flow through the pipeline.
- local.insert({});
+// Insert a dummy document so that something will flow through the pipeline.
+local.insert({});
- // Perform a simple $graphLookup and ensure it retrieves every result.
- var res = local
+// Perform a simple $graphLookup and ensure it retrieves every result.
+var res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -46,12 +46,12 @@
})
.toArray()[0];
- // "foreign" represents a connected graph.
- assert.eq(res.connections.length, airports.length);
+// "foreign" represents a connected graph.
+assert.eq(res.connections.length, airports.length);
- // Perform a $graphLookup and ensure it correctly computes the shortest path to a node when more
- // than one path exists.
- res = local
+// Perform a $graphLookup and ensure it correctly computes the shortest path to a node when more
+// than one path exists.
+res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -66,17 +66,17 @@
{$project: {_id: "$connections._id", hops: "$connections.hops"}})
.toArray();
- var expectedDistances = {BOS: 0, PWM: 1, JFK: 1, LGA: 1, ORD: 2, SFO: 2, MIA: 3, ATL: 4};
+var expectedDistances = {BOS: 0, PWM: 1, JFK: 1, LGA: 1, ORD: 2, SFO: 2, MIA: 3, ATL: 4};
- assert.eq(res.length, airports.length);
- res.forEach(function(c) {
- assert.eq(c.hops, expectedDistances[c._id]);
- });
+assert.eq(res.length, airports.length);
+res.forEach(function(c) {
+ assert.eq(c.hops, expectedDistances[c._id]);
+});
- // Disconnect the graph, and ensure we don't find the other side.
- foreign.remove({_id: "JFK"});
+// Disconnect the graph, and ensure we don't find the other side.
+foreign.remove({_id: "JFK"});
- res = db.local
+res = db.local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -88,6 +88,6 @@
})
.toArray()[0];
- // ATL should now connect to itself, MIA, and SFO.
- assert.eq(res.connections.length, 3);
+// ATL should now connect to itself, MIA, and SFO.
+assert.eq(res.connections.length, 3);
}());
diff --git a/jstests/aggregation/sources/graphLookup/basic.js b/jstests/aggregation/sources/graphLookup/basic.js
index c0bcb1a8a53..ef44b9b60bb 100644
--- a/jstests/aggregation/sources/graphLookup/basic.js
+++ b/jstests/aggregation/sources/graphLookup/basic.js
@@ -6,16 +6,16 @@
// of the stage.
(function() {
- "use strict";
+"use strict";
- var local = db.local;
- var foreign = db.foreign;
+var local = db.local;
+var foreign = db.foreign;
- local.drop();
- foreign.drop();
+local.drop();
+foreign.drop();
- // Ensure a $graphLookup works even if one of the involved collections doesn't exist.
- const basicGraphLookup = {
+// Ensure a $graphLookup works even if one of the involved collections doesn't exist.
+const basicGraphLookup = {
$graphLookup: {
from: "foreign",
startWith: "$starting",
@@ -25,40 +25,39 @@
}
};
- assert.eq(
- local.aggregate([basicGraphLookup]).toArray().length,
- 0,
- "expected an empty result set for a $graphLookup with non-existent local and foreign " +
- "collections");
+assert.eq(local.aggregate([basicGraphLookup]).toArray().length,
+ 0,
+ "expected an empty result set for a $graphLookup with non-existent local and foreign " +
+ "collections");
- assert.writeOK(foreign.insert({}));
+assert.writeOK(foreign.insert({}));
- assert.eq(local.aggregate([basicGraphLookup]).toArray().length,
- 0,
- "expected an empty result set for a $graphLookup on a non-existent local collection");
+assert.eq(local.aggregate([basicGraphLookup]).toArray().length,
+ 0,
+ "expected an empty result set for a $graphLookup on a non-existent local collection");
- local.drop();
- foreign.drop();
+local.drop();
+foreign.drop();
- assert.writeOK(local.insert({_id: 0}));
+assert.writeOK(local.insert({_id: 0}));
- assert.eq(local.aggregate([basicGraphLookup]).toArray(),
- [{_id: 0, results: []}],
- "expected $graphLookup to succeed with a non-existent foreign collection");
+assert.eq(local.aggregate([basicGraphLookup]).toArray(),
+ [{_id: 0, results: []}],
+ "expected $graphLookup to succeed with a non-existent foreign collection");
- local.drop();
- foreign.drop();
+local.drop();
+foreign.drop();
- var bulk = foreign.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({_id: i, neighbors: [i - 1, i + 1]});
- }
- assert.writeOK(bulk.execute());
+var bulk = foreign.initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({_id: i, neighbors: [i - 1, i + 1]});
+}
+assert.writeOK(bulk.execute());
- assert.writeOK(local.insert({starting: 50}));
+assert.writeOK(local.insert({starting: 50}));
- // Perform a simple $graphLookup and ensure it retrieves every result.
- var res = local
+// Perform a simple $graphLookup and ensure it retrieves every result.
+var res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -70,10 +69,10 @@
})
.toArray()[0];
- assert.eq(res.integers.length, 100);
+assert.eq(res.integers.length, 100);
- // Perform a $graphLookup and ensure it respects "maxDepth".
- res = local
+// Perform a $graphLookup and ensure it respects "maxDepth".
+res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -86,11 +85,11 @@
})
.toArray()[0];
- // At depth zero, we retrieve one integer, and two for every depth thereafter.
- assert.eq(res.integers.length, 11);
+// At depth zero, we retrieve one integer, and two for every depth thereafter.
+assert.eq(res.integers.length, 11);
- // Perform a $graphLookup and ensure it properly evaluates "startWith".
- res = local
+// Perform a $graphLookup and ensure it properly evaluates "startWith".
+res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -103,11 +102,11 @@
})
.toArray()[0];
- assert.eq(res.integers.length, 1);
- assert.eq(res.integers[0]._id, 53);
+assert.eq(res.integers.length, 1);
+assert.eq(res.integers[0]._id, 53);
- // Perform a $graphLookup and ensure it properly expands "startWith".
- res = local
+// Perform a $graphLookup and ensure it properly expands "startWith".
+res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -120,17 +119,17 @@
})
.toArray()[0];
- assert.eq(res.integers.length, 3);
+assert.eq(res.integers.length, 3);
- // $graphLookup should not recurse when the 'connectFromField' is missing. However, if it
- // mistakenly does, then it would look for a 'connectToField' value of null. In order to prevent
- // regressions, we insert a document with a 'connectToField' value of null, then perform a
- // $graphLookup, and ensure that we do not find the erroneous document.
- assert.writeOK(foreign.remove({_id: 51}));
- assert.writeOK(foreign.insert({_id: 51}));
- assert.writeOK(foreign.insert({_id: null, neighbors: [50, 52]}));
+// $graphLookup should not recurse when the 'connectFromField' is missing. However, if it
+// mistakenly does, then it would look for a 'connectToField' value of null. In order to prevent
+// regressions, we insert a document with a 'connectToField' value of null, then perform a
+// $graphLookup, and ensure that we do not find the erroneous document.
+assert.writeOK(foreign.remove({_id: 51}));
+assert.writeOK(foreign.insert({_id: 51}));
+assert.writeOK(foreign.insert({_id: null, neighbors: [50, 52]}));
- res = local
+res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -142,17 +141,17 @@
})
.toArray()[0];
- // Our result should be missing the values with _id from 52 to 99.
- assert.eq(res.integers.length, 52);
+// Our result should be missing the values with _id from 52 to 99.
+assert.eq(res.integers.length, 52);
- // Perform a $graphLookup and ensure we don't go into an infinite loop when our graph is cyclic.
- assert.writeOK(foreign.remove({_id: {$in: [null, 51]}}));
- assert.writeOK(foreign.insert({_id: 51, neighbors: [50, 52]}));
+// Perform a $graphLookup and ensure we don't go into an infinite loop when our graph is cyclic.
+assert.writeOK(foreign.remove({_id: {$in: [null, 51]}}));
+assert.writeOK(foreign.insert({_id: 51, neighbors: [50, 52]}));
- assert.writeOK(foreign.update({_id: 99}, {$set: {neighbors: [98, 0]}}));
- assert.writeOK(foreign.update({_id: 0}, {$set: {neighbors: [99, 1]}}));
+assert.writeOK(foreign.update({_id: 99}, {$set: {neighbors: [98, 0]}}));
+assert.writeOK(foreign.update({_id: 0}, {$set: {neighbors: [99, 1]}}));
- res = local
+res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -164,10 +163,10 @@
})
.toArray()[0];
- assert.eq(res.integers.length, 100);
+assert.eq(res.integers.length, 100);
- // Perform a $graphLookup and ensure that "depthField" is properly populated.
- res = local
+// Perform a $graphLookup and ensure that "depthField" is properly populated.
+res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -180,9 +179,9 @@
})
.toArray()[0];
- assert.eq(res.integers.length, 100);
+assert.eq(res.integers.length, 100);
- res.integers.forEach(function(n) {
- assert.eq(n.distance, Math.abs(50 - n._id));
- });
+res.integers.forEach(function(n) {
+ assert.eq(n.distance, Math.abs(50 - n._id));
+});
}());
diff --git a/jstests/aggregation/sources/graphLookup/collation_graphlookup.js b/jstests/aggregation/sources/graphLookup/collation_graphlookup.js
index 7b457289cc6..f3fbcf2ee34 100644
--- a/jstests/aggregation/sources/graphLookup/collation_graphlookup.js
+++ b/jstests/aggregation/sources/graphLookup/collation_graphlookup.js
@@ -8,22 +8,26 @@
* set on the aggregation, or the default collation of the collection.
*/
(function() {
- "use strict";
+"use strict";
- var res;
- const caseInsensitiveUS = {collation: {locale: "en_US", strength: 2}};
- const caseSensitiveUS = {collation: {locale: "en_US", strength: 3}};
+var res;
+const caseInsensitiveUS = {
+ collation: {locale: "en_US", strength: 2}
+};
+const caseSensitiveUS = {
+ collation: {locale: "en_US", strength: 3}
+};
- var coll = db.collation_graphlookup;
- var foreignColl = db.collation_graphlookup_foreign;
+var coll = db.collation_graphlookup;
+var foreignColl = db.collation_graphlookup_foreign;
- // Test that $graphLookup respects the collation set on the aggregation pipeline. Case
- // insensitivity should mean that we find both "jeremy" and "jimmy" as friends.
- coll.drop();
- assert.writeOK(coll.insert({username: "erica", friends: ["jeremy", "jimmy"]}));
- assert.writeOK(coll.insert([{username: "JEREMY"}, {username: "JIMMY"}]));
+// Test that $graphLookup respects the collation set on the aggregation pipeline. Case
+// insensitivity should mean that we find both "jeremy" and "jimmy" as friends.
+coll.drop();
+assert.writeOK(coll.insert({username: "erica", friends: ["jeremy", "jimmy"]}));
+assert.writeOK(coll.insert([{username: "JEREMY"}, {username: "JIMMY"}]));
- res = coll.aggregate(
+res = coll.aggregate(
[
{$match: {username: "erica"}},
{
@@ -38,12 +42,12 @@
],
caseInsensitiveUS)
.toArray();
- assert.eq(1, res.length);
- assert.eq("erica", res[0].username);
- assert.eq(2, res[0].friendUsers.length);
+assert.eq(1, res.length);
+assert.eq("erica", res[0].username);
+assert.eq(2, res[0].friendUsers.length);
- // Negative test: ensure that we don't find any friends when the collation is simple.
- res = coll.aggregate([
+// Negative test: ensure that we don't find any friends when the collation is simple.
+res = coll.aggregate([
{$match: {username: "erica"}},
{
$graphLookup: {
@@ -56,20 +60,20 @@
}
])
.toArray();
- assert.eq(1, res.length);
- assert.eq("erica", res[0].username);
- assert.eq(0, res[0].friendUsers.length);
+assert.eq(1, res.length);
+assert.eq("erica", res[0].username);
+assert.eq(0, res[0].friendUsers.length);
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), caseInsensitiveUS));
- assert.writeOK(coll.insert({username: "erica", friends: ["jeremy", "jimmy"]}));
- foreignColl.drop();
- assert.commandWorked(db.createCollection(foreignColl.getName(), caseSensitiveUS));
- assert.writeOK(foreignColl.insert([{username: "JEREMY"}, {username: "JIMMY"}]));
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), caseInsensitiveUS));
+assert.writeOK(coll.insert({username: "erica", friends: ["jeremy", "jimmy"]}));
+foreignColl.drop();
+assert.commandWorked(db.createCollection(foreignColl.getName(), caseSensitiveUS));
+assert.writeOK(foreignColl.insert([{username: "JEREMY"}, {username: "JIMMY"}]));
- // Test that $graphLookup inherits the default collation of the collection on which it is run,
- // and that this collation is used instead of the default collation of the foreign collection.
- res = coll.aggregate([
+// Test that $graphLookup inherits the default collation of the collection on which it is run,
+// and that this collation is used instead of the default collation of the foreign collection.
+res = coll.aggregate([
{$match: {username: "erica"}},
{
$graphLookup: {
@@ -82,18 +86,18 @@
}
])
.toArray();
- assert.eq(1, res.length);
- assert.eq("erica", res[0].username);
- assert.eq(2, res[0].friendUsers.length);
+assert.eq(1, res.length);
+assert.eq("erica", res[0].username);
+assert.eq(2, res[0].friendUsers.length);
- // Test that we don't use the collation to dedup string _id values. This would cause us to miss
- // nodes in the graph that have distinct _id values which compare equal under the collation.
- coll.drop();
- assert.writeOK(coll.insert({username: "erica", friends: ["jeremy"]}));
- assert.writeOK(coll.insert({_id: "foo", username: "JEREMY", friends: ["jimmy"]}));
- assert.writeOK(coll.insert({_id: "FOO", username: "jimmy", friends: []}));
+// Test that we don't use the collation to dedup string _id values. This would cause us to miss
+// nodes in the graph that have distinct _id values which compare equal under the collation.
+coll.drop();
+assert.writeOK(coll.insert({username: "erica", friends: ["jeremy"]}));
+assert.writeOK(coll.insert({_id: "foo", username: "JEREMY", friends: ["jimmy"]}));
+assert.writeOK(coll.insert({_id: "FOO", username: "jimmy", friends: []}));
- res = coll.aggregate(
+res = coll.aggregate(
[
{$match: {username: "erica"}},
{
@@ -108,18 +112,18 @@
],
caseInsensitiveUS)
.toArray();
- assert.eq(1, res.length);
- assert.eq("erica", res[0].username);
- assert.eq(2, res[0].friendUsers.length);
+assert.eq(1, res.length);
+assert.eq("erica", res[0].username);
+assert.eq(2, res[0].friendUsers.length);
- // Test that the result set is not deduplicated under the collation. If two documents are
- // entirely equal under the collation, they should still both get returned in the "as" field.
- coll.drop();
- assert.writeOK(coll.insert({username: "erica", friends: ["jeremy"]}));
- assert.writeOK(coll.insert({_id: "foo", username: "jeremy"}));
- assert.writeOK(coll.insert({_id: "FOO", username: "JEREMY"}));
+// Test that the result set is not deduplicated under the collation. If two documents are
+// entirely equal under the collation, they should still both get returned in the "as" field.
+coll.drop();
+assert.writeOK(coll.insert({username: "erica", friends: ["jeremy"]}));
+assert.writeOK(coll.insert({_id: "foo", username: "jeremy"}));
+assert.writeOK(coll.insert({_id: "FOO", username: "JEREMY"}));
- res = coll.aggregate(
+res = coll.aggregate(
[
{$match: {username: "erica"}},
{
@@ -134,7 +138,7 @@
],
caseInsensitiveUS)
.toArray();
- assert.eq(1, res.length);
- assert.eq("erica", res[0].username);
- assert.eq(2, res[0].friendUsers.length);
+assert.eq(1, res.length);
+assert.eq("erica", res[0].username);
+assert.eq(2, res[0].friendUsers.length);
})();
diff --git a/jstests/aggregation/sources/graphLookup/error.js b/jstests/aggregation/sources/graphLookup/error.js
index 42d1203238c..b7360f3e9e8 100644
--- a/jstests/aggregation/sources/graphLookup/error.js
+++ b/jstests/aggregation/sources/graphLookup/error.js
@@ -6,18 +6,17 @@
load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
(function() {
- "use strict";
+"use strict";
- var local = db.local;
+var local = db.local;
- local.drop();
- assert.writeOK(local.insert({b: 0}));
+local.drop();
+assert.writeOK(local.insert({b: 0}));
- var pipeline = {$graphLookup: 4};
- assertErrorCode(
- local, pipeline, ErrorCodes.FailedToParse, "$graphLookup spec must be an object");
+var pipeline = {$graphLookup: 4};
+assertErrorCode(local, pipeline, ErrorCodes.FailedToParse, "$graphLookup spec must be an object");
- pipeline = {
+pipeline = {
$graphLookup: {
from: "foreign",
startWith: {$literal: 0},
@@ -27,9 +26,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
maxDepth: "string"
}
};
- assertErrorCode(local, pipeline, 40100, "maxDepth must be numeric");
+assertErrorCode(local, pipeline, 40100, "maxDepth must be numeric");
- pipeline = {
+pipeline = {
$graphLookup: {
from: "foreign",
startWith: {$literal: 0},
@@ -39,9 +38,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
maxDepth: -1
}
};
- assertErrorCode(local, pipeline, 40101, "maxDepth must be nonnegative");
+assertErrorCode(local, pipeline, 40101, "maxDepth must be nonnegative");
- pipeline = {
+pipeline = {
$graphLookup: {
from: "foreign",
startWith: {$literal: 0},
@@ -51,9 +50,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
maxDepth: 2.3
}
};
- assertErrorCode(local, pipeline, 40102, "maxDepth must be representable as a long long");
+assertErrorCode(local, pipeline, 40102, "maxDepth must be representable as a long long");
- pipeline = {
+pipeline = {
$graphLookup: {
from: -1,
startWith: {$literal: 0},
@@ -62,9 +61,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
as: "output"
}
};
- assertErrorCode(local, pipeline, ErrorCodes.FailedToParse, "from must be a string");
+assertErrorCode(local, pipeline, ErrorCodes.FailedToParse, "from must be a string");
- pipeline = {
+pipeline = {
$graphLookup: {
from: "",
startWith: {$literal: 0},
@@ -73,9 +72,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
as: "output"
}
};
- assertErrorCode(local, pipeline, ErrorCodes.InvalidNamespace, "from must be a valid namespace");
+assertErrorCode(local, pipeline, ErrorCodes.InvalidNamespace, "from must be a valid namespace");
- pipeline = {
+pipeline = {
$graphLookup: {
from: "foreign",
startWith: {$literal: 0},
@@ -84,9 +83,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
as: 0
}
};
- assertErrorCode(local, pipeline, 40103, "as must be a string");
+assertErrorCode(local, pipeline, 40103, "as must be a string");
- pipeline = {
+pipeline = {
$graphLookup: {
from: "foreign",
startWith: {$literal: 0},
@@ -95,9 +94,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
as: "$output"
}
};
- assertErrorCode(local, pipeline, 16410, "as cannot be a fieldPath");
+assertErrorCode(local, pipeline, 16410, "as cannot be a fieldPath");
- pipeline = {
+pipeline = {
$graphLookup: {
from: "foreign",
startWith: {$literal: 0},
@@ -106,9 +105,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
as: "output"
}
};
- assertErrorCode(local, pipeline, 40103, "connectFromField must be a string");
+assertErrorCode(local, pipeline, 40103, "connectFromField must be a string");
- pipeline = {
+pipeline = {
$graphLookup: {
from: "foreign",
startWith: {$literal: 0},
@@ -117,9 +116,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
as: "output"
}
};
- assertErrorCode(local, pipeline, 16410, "connectFromField cannot be a fieldPath");
+assertErrorCode(local, pipeline, 16410, "connectFromField cannot be a fieldPath");
- pipeline = {
+pipeline = {
$graphLookup: {
from: "foreign",
startWith: {$literal: 0},
@@ -128,9 +127,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
as: "output"
}
};
- assertErrorCode(local, pipeline, 40103, "connectToField must be a string");
+assertErrorCode(local, pipeline, 40103, "connectToField must be a string");
- pipeline = {
+pipeline = {
$graphLookup: {
from: "foreign",
startWith: {$literal: 0},
@@ -139,9 +138,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
as: "output"
}
};
- assertErrorCode(local, pipeline, 16410, "connectToField cannot be a fieldPath");
+assertErrorCode(local, pipeline, 16410, "connectToField cannot be a fieldPath");
- pipeline = {
+pipeline = {
$graphLookup: {
from: "foreign",
startWith: {$literal: 0},
@@ -151,9 +150,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
depthField: 0
}
};
- assertErrorCode(local, pipeline, 40103, "depthField must be a string");
+assertErrorCode(local, pipeline, 40103, "depthField must be a string");
- pipeline = {
+pipeline = {
$graphLookup: {
from: "foreign",
startWith: {$literal: 0},
@@ -163,9 +162,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
depthField: "$depth"
}
};
- assertErrorCode(local, pipeline, 16410, "depthField cannot be a fieldPath");
+assertErrorCode(local, pipeline, 16410, "depthField cannot be a fieldPath");
- pipeline = {
+pipeline = {
$graphLookup: {
from: "foreign",
startWith: {$literal: 0},
@@ -175,9 +174,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
restrictSearchWithMatch: "notamatch"
}
};
- assertErrorCode(local, pipeline, 40185, "restrictSearchWithMatch must be an object");
+assertErrorCode(local, pipeline, 40185, "restrictSearchWithMatch must be an object");
- pipeline = {
+pipeline = {
$graphLookup: {
from: "foreign",
startWith: {$literal: 0},
@@ -187,43 +186,37 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
notAField: "foo"
}
};
- assertErrorCode(local, pipeline, 40104, "unknown argument");
-
- pipeline = {
- $graphLookup:
- {from: "foreign", startWith: {$literal: 0}, connectFromField: "b", as: "output"}
- };
- assertErrorCode(local, pipeline, 40105, "connectToField was not specified");
-
- pipeline = {
- $graphLookup:
- {from: "foreign", startWith: {$literal: 0}, connectToField: "a", as: "output"}
- };
- assertErrorCode(local, pipeline, 40105, "connectFromField was not specified");
-
- pipeline = {
- $graphLookup: {from: "foreign", connectToField: "a", connectFromField: "b", as: "output"}
- };
- assertErrorCode(local, pipeline, 40105, "startWith was not specified");
-
- pipeline = {
- $graphLookup: {
- from: "foreign",
- startWith: {$literal: 0},
- connectToField: "a",
- connectFromField: "b"
- }
- };
- assertErrorCode(local, pipeline, 40105, "as was not specified");
-
- pipeline = {
- $graphLookup:
- {startWith: {$literal: 0}, connectToField: "a", connectFromField: "b", as: "output"}
- };
- assertErrorCode(local, pipeline, ErrorCodes.FailedToParse, "from was not specified");
-
- // restrictSearchWithMatch must be a valid match expression.
- pipeline = {
+assertErrorCode(local, pipeline, 40104, "unknown argument");
+
+pipeline = {
+ $graphLookup: {from: "foreign", startWith: {$literal: 0}, connectFromField: "b", as: "output"}
+};
+assertErrorCode(local, pipeline, 40105, "connectToField was not specified");
+
+pipeline = {
+ $graphLookup: {from: "foreign", startWith: {$literal: 0}, connectToField: "a", as: "output"}
+};
+assertErrorCode(local, pipeline, 40105, "connectFromField was not specified");
+
+pipeline = {
+ $graphLookup: {from: "foreign", connectToField: "a", connectFromField: "b", as: "output"}
+};
+assertErrorCode(local, pipeline, 40105, "startWith was not specified");
+
+pipeline = {
+ $graphLookup:
+ {from: "foreign", startWith: {$literal: 0}, connectToField: "a", connectFromField: "b"}
+};
+assertErrorCode(local, pipeline, 40105, "as was not specified");
+
+pipeline = {
+ $graphLookup:
+ {startWith: {$literal: 0}, connectToField: "a", connectFromField: "b", as: "output"}
+};
+assertErrorCode(local, pipeline, ErrorCodes.FailedToParse, "from was not specified");
+
+// restrictSearchWithMatch must be a valid match expression.
+pipeline = {
$graphLookup: {
from: 'foreign',
startWith: {$literal: 0},
@@ -233,10 +226,10 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
restrictSearchWithMatch: {$not: {a: 1}}
}
};
- assert.throws(() => local.aggregate(pipeline), [], "unable to parse match expression");
+assert.throws(() => local.aggregate(pipeline), [], "unable to parse match expression");
- // $where and $text cannot be used inside $graphLookup.
- pipeline = {
+// $where and $text cannot be used inside $graphLookup.
+pipeline = {
$graphLookup: {
from: 'foreign',
startWith: {$literal: 0},
@@ -246,9 +239,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
restrictSearchWithMatch: {$where: "3 > 2"}
}
};
- assert.throws(() => local.aggregate(pipeline), [], "cannot use $where inside $graphLookup");
+assert.throws(() => local.aggregate(pipeline), [], "cannot use $where inside $graphLookup");
- pipeline = {
+pipeline = {
$graphLookup: {
from: 'foreign',
startWith: {$literal: 0},
@@ -258,9 +251,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
restrictSearchWithMatch: {$text: {$search: "some text"}}
}
};
- assert.throws(() => local.aggregate(pipeline), [], "cannot use $text inside $graphLookup");
+assert.throws(() => local.aggregate(pipeline), [], "cannot use $text inside $graphLookup");
- pipeline = {
+pipeline = {
$graphLookup: {
from: 'foreign',
startWith: {$literal: 0},
@@ -272,9 +265,9 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
}
}
};
- assert.throws(() => local.aggregate(pipeline), [], "cannot use $near inside $graphLookup");
+assert.throws(() => local.aggregate(pipeline), [], "cannot use $near inside $graphLookup");
- pipeline = {
+pipeline = {
$graphLookup: {
from: 'foreign',
startWith: {$literal: 0},
@@ -293,15 +286,15 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
}
}
};
- assert.throws(
- () => local.aggregate(pipeline), [], "cannot use $near inside $graphLookup at any depth");
+assert.throws(
+ () => local.aggregate(pipeline), [], "cannot use $near inside $graphLookup at any depth");
- let foreign = db.foreign;
- foreign.drop();
- assert.writeOK(foreign.insert({a: 0, x: 0}));
+let foreign = db.foreign;
+foreign.drop();
+assert.writeOK(foreign.insert({a: 0, x: 0}));
- // Test a restrictSearchWithMatch expression that fails to parse.
- pipeline = {
+// Test a restrictSearchWithMatch expression that fails to parse.
+pipeline = {
$graphLookup: {
from: 'foreign',
startWith: {$literal: 0},
@@ -311,10 +304,10 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
restrictSearchWithMatch: {$expr: {$eq: ["$x", "$$unbound"]}}
}
};
- assert.throws(() => local.aggregate(pipeline), [], "cannot use $expr with unbound variable");
+assert.throws(() => local.aggregate(pipeline), [], "cannot use $expr with unbound variable");
- // Test a restrictSearchWithMatchExpression that throws at runtime.
- pipeline = {
+// Test a restrictSearchWithMatchExpression that throws at runtime.
+pipeline = {
$graphLookup: {
from: 'foreign',
startWith: {$literal: 0},
@@ -324,25 +317,25 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
restrictSearchWithMatch: {$expr: {$divide: [1, "$x"]}}
}
};
- assertErrorCode(local, pipeline, 16608, "division by zero in $expr");
+assertErrorCode(local, pipeline, 16608, "division by zero in $expr");
- // $graphLookup can only consume at most 100MB of memory.
- foreign.drop();
+// $graphLookup can only consume at most 100MB of memory.
+foreign.drop();
- // Here, the visited set exceeds 100MB.
- var bulk = foreign.initializeUnorderedBulkOp();
+// Here, the visited set exceeds 100MB.
+var bulk = foreign.initializeUnorderedBulkOp();
- var initial = [];
- for (var i = 0; i < 8; i++) {
- var obj = {_id: i};
+var initial = [];
+for (var i = 0; i < 8; i++) {
+ var obj = {_id: i};
- obj['longString'] = new Array(14 * 1024 * 1024).join('x');
- initial.push(i);
- bulk.insert(obj);
- }
- assert.writeOK(bulk.execute());
+ obj['longString'] = new Array(14 * 1024 * 1024).join('x');
+ initial.push(i);
+ bulk.insert(obj);
+}
+assert.writeOK(bulk.execute());
- pipeline = {
+pipeline = {
$graphLookup: {
from: "foreign",
startWith: {$literal: initial},
@@ -351,21 +344,21 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
as: "graph"
}
};
- assertErrorCode(local, pipeline, 40099, "maximum memory usage reached");
+assertErrorCode(local, pipeline, 40099, "maximum memory usage reached");
- // Here, the visited set should grow to approximately 90 MB, and the frontier should push memory
- // usage over 100MB.
- foreign.drop();
+// Here, the visited set should grow to approximately 90 MB, and the frontier should push memory
+// usage over 100MB.
+foreign.drop();
- var bulk = foreign.initializeUnorderedBulkOp();
- for (var i = 0; i < 14; i++) {
- var obj = {from: 0, to: 1};
- obj['s'] = new Array(7 * 1024 * 1024).join(' ');
- bulk.insert(obj);
- }
- assert.writeOK(bulk.execute());
+var bulk = foreign.initializeUnorderedBulkOp();
+for (var i = 0; i < 14; i++) {
+ var obj = {from: 0, to: 1};
+ obj['s'] = new Array(7 * 1024 * 1024).join(' ');
+ bulk.insert(obj);
+}
+assert.writeOK(bulk.execute());
- pipeline = {
+pipeline = {
$graphLookup: {
from: "foreign",
startWith: {$literal: 0},
@@ -375,20 +368,20 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
}
};
- assertErrorCode(local, pipeline, 40099, "maximum memory usage reached");
+assertErrorCode(local, pipeline, 40099, "maximum memory usage reached");
- // Here, we test that the cache keeps memory usage under 100MB, and does not cause an error.
- foreign.drop();
+// Here, we test that the cache keeps memory usage under 100MB, and does not cause an error.
+foreign.drop();
- var bulk = foreign.initializeUnorderedBulkOp();
- for (var i = 0; i < 13; i++) {
- var obj = {from: 0, to: 1};
- obj['s'] = new Array(7 * 1024 * 1024).join(' ');
- bulk.insert(obj);
- }
- assert.writeOK(bulk.execute());
+var bulk = foreign.initializeUnorderedBulkOp();
+for (var i = 0; i < 13; i++) {
+ var obj = {from: 0, to: 1};
+ obj['s'] = new Array(7 * 1024 * 1024).join(' ');
+ bulk.insert(obj);
+}
+assert.writeOK(bulk.execute());
- var res = local
+var res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -401,5 +394,5 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
{$unwind: {path: "$out"}})
.toArray();
- assert.eq(res.length, 13);
+assert.eq(res.length, 13);
}());
diff --git a/jstests/aggregation/sources/graphLookup/filter.js b/jstests/aggregation/sources/graphLookup/filter.js
index 69027500aae..4b46c843d9a 100644
--- a/jstests/aggregation/sources/graphLookup/filter.js
+++ b/jstests/aggregation/sources/graphLookup/filter.js
@@ -6,23 +6,23 @@
// we test the functionality and correctness of the option.
(function() {
- "use strict";
+"use strict";
- var local = db.local;
- var foreign = db.foreign;
+var local = db.local;
+var foreign = db.foreign;
- local.drop();
- foreign.drop();
+local.drop();
+foreign.drop();
- var bulk = foreign.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({_id: i, neighbors: [i - 1, i + 1]});
- }
- assert.writeOK(bulk.execute());
- assert.writeOK(local.insert({starting: 0}));
+var bulk = foreign.initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({_id: i, neighbors: [i - 1, i + 1]});
+}
+assert.writeOK(bulk.execute());
+assert.writeOK(local.insert({starting: 0}));
- // Assert that the graphLookup only retrieves ten documents, with _id from 0 to 9.
- var res = local
+// Assert that the graphLookup only retrieves ten documents, with _id from 0 to 9.
+var res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -35,11 +35,11 @@
})
.toArray()[0];
- assert.eq(res.integers.length, 10);
+assert.eq(res.integers.length, 10);
- // Assert that the graphLookup doesn't retrieve any documents, as to do so it would need to
- // traverse nodes in the graph that don't match the 'restrictSearchWithMatch' predicate.
- res = local
+// Assert that the graphLookup doesn't retrieve any documents, as to do so it would need to
+// traverse nodes in the graph that don't match the 'restrictSearchWithMatch' predicate.
+res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -52,16 +52,16 @@
})
.toArray()[0];
- assert.eq(res.integers.length, 0);
+assert.eq(res.integers.length, 0);
- foreign.drop();
- assert.writeOK(foreign.insert({from: 0, to: 1, shouldBeIncluded: true}));
- assert.writeOK(foreign.insert({from: 1, to: 2, shouldBeIncluded: false}));
- assert.writeOK(foreign.insert({from: 2, to: 3, shouldBeIncluded: true}));
+foreign.drop();
+assert.writeOK(foreign.insert({from: 0, to: 1, shouldBeIncluded: true}));
+assert.writeOK(foreign.insert({from: 1, to: 2, shouldBeIncluded: false}));
+assert.writeOK(foreign.insert({from: 2, to: 3, shouldBeIncluded: true}));
- // Assert that the $graphLookup stops exploring when it finds a document that doesn't match the
- // filter.
- res = local
+// Assert that the $graphLookup stops exploring when it finds a document that doesn't match the
+// filter.
+res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -74,10 +74,10 @@
})
.toArray()[0];
- assert.eq(res.results.length, 1);
+assert.eq(res.results.length, 1);
- // $expr is allowed inside the 'restrictSearchWithMatch' match expression.
- res = local
+// $expr is allowed inside the 'restrictSearchWithMatch' match expression.
+res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -90,5 +90,5 @@
})
.toArray()[0];
- assert.eq(res.results.length, 1);
+assert.eq(res.results.length, 1);
})();
diff --git a/jstests/aggregation/sources/graphLookup/nested_objects.js b/jstests/aggregation/sources/graphLookup/nested_objects.js
index d40cced2ac4..43c81302ae4 100644
--- a/jstests/aggregation/sources/graphLookup/nested_objects.js
+++ b/jstests/aggregation/sources/graphLookup/nested_objects.js
@@ -6,24 +6,24 @@
// when the 'connectToField' is a nested array, or when the 'connectFromField' is a nested array.
(function() {
- "use strict";
+"use strict";
- var local = db.local;
- var foreign = db.foreign;
+var local = db.local;
+var foreign = db.foreign;
- local.drop();
- foreign.drop();
+local.drop();
+foreign.drop();
- // 'connectFromField' is an array of objects.
- var bulk = foreign.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({_id: i, neighbors: [{id: i + 1}, {id: i + 2}]});
- }
- assert.writeOK(bulk.execute());
+// 'connectFromField' is an array of objects.
+var bulk = foreign.initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({_id: i, neighbors: [{id: i + 1}, {id: i + 2}]});
+}
+assert.writeOK(bulk.execute());
- assert.writeOK(local.insert({starting: 0}));
+assert.writeOK(local.insert({starting: 0}));
- var res = local
+var res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -34,18 +34,18 @@
}
})
.toArray()[0];
- assert.eq(res.integers.length, 100);
+assert.eq(res.integers.length, 100);
- foreign.drop();
+foreign.drop();
- // 'connectToField' is an array of objects.
- var bulk = foreign.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({previous: [{neighbor: i}, {neighbor: i - 1}], value: i + 1});
- }
- assert.writeOK(bulk.execute());
+// 'connectToField' is an array of objects.
+var bulk = foreign.initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({previous: [{neighbor: i}, {neighbor: i - 1}], value: i + 1});
+}
+assert.writeOK(bulk.execute());
- var res = local
+var res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -56,21 +56,21 @@
}
})
.toArray()[0];
- assert.eq(res.integers.length, 100);
+assert.eq(res.integers.length, 100);
- foreign.drop();
+foreign.drop();
- // Both 'connectToField' and 'connectFromField' are arrays of objects.
- var bulk = foreign.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({
- previous: [{neighbor: i}, {neighbor: i - 1}],
- values: [{neighbor: i + 1}, {neighbor: i + 2}]
- });
- }
- assert.writeOK(bulk.execute());
+// Both 'connectToField' and 'connectFromField' are arrays of objects.
+var bulk = foreign.initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({
+ previous: [{neighbor: i}, {neighbor: i - 1}],
+ values: [{neighbor: i + 1}, {neighbor: i + 2}]
+ });
+}
+assert.writeOK(bulk.execute());
- var res = local
+var res = local
.aggregate({
$graphLookup: {
from: "foreign",
@@ -81,5 +81,5 @@
}
})
.toArray()[0];
- assert.eq(res.integers.length, 100);
+assert.eq(res.integers.length, 100);
}());
diff --git a/jstests/aggregation/sources/graphLookup/socialite.js b/jstests/aggregation/sources/graphLookup/socialite.js
index 228c0f56c0e..f38f6c2ffc0 100644
--- a/jstests/aggregation/sources/graphLookup/socialite.js
+++ b/jstests/aggregation/sources/graphLookup/socialite.js
@@ -6,35 +6,35 @@
// Socialite schema example available here: https://github.com/mongodb-labs/socialite
(function() {
- "use strict";
+"use strict";
- var follower = db.followers;
- var users = db.users;
+var follower = db.followers;
+var users = db.users;
- follower.drop();
- users.drop();
+follower.drop();
+users.drop();
- var userDocs = [
- {_id: "djw", fullname: "Darren", country: "Australia"},
- {_id: "bmw", fullname: "Bob", country: "Germany"},
- {_id: "jsr", fullname: "Jared", country: "USA"},
- {_id: "ftr", fullname: "Frank", country: "Canada"}
- ];
+var userDocs = [
+ {_id: "djw", fullname: "Darren", country: "Australia"},
+ {_id: "bmw", fullname: "Bob", country: "Germany"},
+ {_id: "jsr", fullname: "Jared", country: "USA"},
+ {_id: "ftr", fullname: "Frank", country: "Canada"}
+];
- userDocs.forEach(function(userDoc) {
- assert.writeOK(users.insert(userDoc));
- });
+userDocs.forEach(function(userDoc) {
+ assert.writeOK(users.insert(userDoc));
+});
- var followers = [{_f: "djw", _t: "jsr"}, {_f: "jsr", _t: "bmw"}, {_f: "ftr", _t: "bmw"}];
+var followers = [{_f: "djw", _t: "jsr"}, {_f: "jsr", _t: "bmw"}, {_f: "ftr", _t: "bmw"}];
- followers.forEach(function(f) {
- assert.writeOK(follower.insert(f));
- });
+followers.forEach(function(f) {
+ assert.writeOK(follower.insert(f));
+});
- // Find the social network of "Darren", that is, people Darren follows, and people who are
- // followed by someone Darren follows, etc.
+// Find the social network of "Darren", that is, people Darren follows, and people who are
+// followed by someone Darren follows, etc.
- var res = users
+var res = users
.aggregate({$match: {fullname: "Darren"}},
{
$graphLookup: {
@@ -49,6 +49,6 @@
{$project: {_id: "$network._t"}})
.toArray();
- // "djw" is followed, directly or indirectly, by "jsr" and "bmw".
- assert.eq(res.length, 2);
+// "djw" is followed, directly or indirectly, by "jsr" and "bmw".
+assert.eq(res.length, 2);
}());
diff --git a/jstests/aggregation/sources/graphLookup/variables.js b/jstests/aggregation/sources/graphLookup/variables.js
index 87e2c8b3975..63b1bbea244 100644
--- a/jstests/aggregation/sources/graphLookup/variables.js
+++ b/jstests/aggregation/sources/graphLookup/variables.js
@@ -2,17 +2,17 @@
* Tests to verify that $graphLookup can use the variables defined in an outer scope.
*/
(function() {
- "use strict";
+"use strict";
- let local = db.graph_lookup_var_local;
- let foreign = db.graph_lookup_var_foreign;
- local.drop();
- foreign.drop();
+let local = db.graph_lookup_var_local;
+let foreign = db.graph_lookup_var_foreign;
+local.drop();
+foreign.drop();
- foreign.insert({from: "b", to: "a", _id: 0});
- local.insert({});
+foreign.insert({from: "b", to: "a", _id: 0});
+local.insert({});
- const basicGraphLookup = {
+const basicGraphLookup = {
$graphLookup: {
from: "graph_lookup_var_foreign",
startWith: "$$var1",
@@ -22,7 +22,7 @@
}
};
- const lookup = {
+const lookup = {
$lookup: {
from: "graph_lookup_var_local",
let : {var1: "a"},
@@ -31,11 +31,10 @@
}
};
- // Verify that $graphLookup can use the variable 'var1' which is defined in parent $lookup.
- let res = local.aggregate([lookup]).toArray();
- assert.eq(res.length, 1);
- assert.eq(res[0].resultsFromLookup.length, 1);
- assert.eq(res[0].resultsFromLookup[0].resultsFromGraphLookup.length, 1);
- assert.eq(res[0].resultsFromLookup[0].resultsFromGraphLookup[0], {_id: 0, from: "b", to: "a"});
-
+// Verify that $graphLookup can use the variable 'var1' which is defined in parent $lookup.
+let res = local.aggregate([lookup]).toArray();
+assert.eq(res.length, 1);
+assert.eq(res[0].resultsFromLookup.length, 1);
+assert.eq(res[0].resultsFromLookup[0].resultsFromGraphLookup.length, 1);
+assert.eq(res[0].resultsFromLookup[0].resultsFromGraphLookup[0], {_id: 0, from: "b", to: "a"});
})();
diff --git a/jstests/aggregation/sources/group/collation_group.js b/jstests/aggregation/sources/group/collation_group.js
index c4977900078..94db6f15ed1 100644
--- a/jstests/aggregation/sources/group/collation_group.js
+++ b/jstests/aggregation/sources/group/collation_group.js
@@ -3,82 +3,80 @@
// Test that the $group stage and all accumulators respect the collation.
(function() {
- "use strict";
+"use strict";
- var coll = db.collation_group;
- coll.drop();
+var coll = db.collation_group;
+coll.drop();
- var results;
- var caseInsensitive = {collation: {locale: "en_US", strength: 2}};
- var diacriticInsensitive = {collation: {locale: "en_US", strength: 1, caseLevel: true}};
- var numericOrdering = {collation: {locale: "en_US", numericOrdering: true}};
- var caseAndDiacriticInsensitive = {collation: {locale: "en_US", strength: 1}};
+var results;
+var caseInsensitive = {collation: {locale: "en_US", strength: 2}};
+var diacriticInsensitive = {collation: {locale: "en_US", strength: 1, caseLevel: true}};
+var numericOrdering = {collation: {locale: "en_US", numericOrdering: true}};
+var caseAndDiacriticInsensitive = {collation: {locale: "en_US", strength: 1}};
- assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
+assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
- assert.writeOK(coll.insert({_id: 0, str: "A", str2: "á"}));
- assert.writeOK(coll.insert({_id: 1, str: "a", str2: "a"}));
- assert.writeOK(coll.insert({_id: 2, str: "B", str2: "é"}));
- assert.writeOK(coll.insert({_id: 3, str: "b", str2: "e"}));
+assert.writeOK(coll.insert({_id: 0, str: "A", str2: "á"}));
+assert.writeOK(coll.insert({_id: 1, str: "a", str2: "a"}));
+assert.writeOK(coll.insert({_id: 2, str: "B", str2: "é"}));
+assert.writeOK(coll.insert({_id: 3, str: "b", str2: "e"}));
- // Ensure that equality of groups respects the collation inherited from the collection default.
- assert.eq(2, coll.aggregate([{$group: {_id: "$str"}}]).itcount());
+// Ensure that equality of groups respects the collation inherited from the collection default.
+assert.eq(2, coll.aggregate([{$group: {_id: "$str"}}]).itcount());
- // Ensure that equality of groups respects an explicit collation.
- assert.eq(2, coll.aggregate([{$group: {_id: "$str2"}}], diacriticInsensitive).itcount());
+// Ensure that equality of groups respects an explicit collation.
+assert.eq(2, coll.aggregate([{$group: {_id: "$str2"}}], diacriticInsensitive).itcount());
- // Ensure that equality of groups created by $sortByCount respects the inherited collation.
- assert.eq(2, coll.aggregate([{$sortByCount: "$str"}]).itcount());
- assert.eq(4, coll.aggregate([{$sortByCount: "$str2"}]).itcount());
+// Ensure that equality of groups created by $sortByCount respects the inherited collation.
+assert.eq(2, coll.aggregate([{$sortByCount: "$str"}]).itcount());
+assert.eq(4, coll.aggregate([{$sortByCount: "$str2"}]).itcount());
- // Ensure that equality of groups created by $sortByCount respects an explicit collation.
- assert.eq(4, coll.aggregate([{$sortByCount: "$str"}], diacriticInsensitive).itcount());
- assert.eq(2, coll.aggregate([{$sortByCount: "$str2"}], diacriticInsensitive).itcount());
+// Ensure that equality of groups created by $sortByCount respects an explicit collation.
+assert.eq(4, coll.aggregate([{$sortByCount: "$str"}], diacriticInsensitive).itcount());
+assert.eq(2, coll.aggregate([{$sortByCount: "$str2"}], diacriticInsensitive).itcount());
- // Ensure that equality of groups inside $facet stage respects the inherited collation.
- results =
- coll.aggregate([{
- $facet:
- {facetStr: [{$group: {_id: "$str"}}], facetStr2: [{$group: {_id: "$str2"}}]}
- }])
- .toArray();
- assert.eq(1, results.length);
- assert.eq(2, results[0].facetStr.length);
- assert.eq(4, results[0].facetStr2.length);
+// Ensure that equality of groups inside $facet stage respects the inherited collation.
+results =
+ coll.aggregate([
+ {$facet: {facetStr: [{$group: {_id: "$str"}}], facetStr2: [{$group: {_id: "$str2"}}]}}
+ ])
+ .toArray();
+assert.eq(1, results.length);
+assert.eq(2, results[0].facetStr.length);
+assert.eq(4, results[0].facetStr2.length);
- // Test that the $addToSet accumulator respects the inherited collation.
- results = coll.aggregate([{$group: {_id: null, set: {$addToSet: "$str"}}}]).toArray();
- assert.eq(1, results.length);
- assert.eq(2, results[0].set.length);
+// Test that the $addToSet accumulator respects the inherited collation.
+results = coll.aggregate([{$group: {_id: null, set: {$addToSet: "$str"}}}]).toArray();
+assert.eq(1, results.length);
+assert.eq(2, results[0].set.length);
- // Test that the $addToSet accumulator respects an explicit collation.
- results =
- coll.aggregate([{$group: {_id: null, set: {$addToSet: "$str2"}}}], diacriticInsensitive)
- .toArray();
- assert.eq(1, results.length);
- assert.eq(2, results[0].set.length);
+// Test that the $addToSet accumulator respects an explicit collation.
+results = coll.aggregate([{$group: {_id: null, set: {$addToSet: "$str2"}}}], diacriticInsensitive)
+ .toArray();
+assert.eq(1, results.length);
+assert.eq(2, results[0].set.length);
- // Ensure that a subexpression inside $push respects the collation.
- results = coll.aggregate(
- [
- {$match: {_id: 0}},
- {$group: {_id: null, areEqual: {$push: {$eq: ["$str", "$str2"]}}}}
- ],
- caseAndDiacriticInsensitive)
- .toArray();
- assert.eq(1, results.length);
- assert.eq(1, results[0].areEqual.length);
- assert.eq(true, results[0].areEqual[0]);
+// Ensure that a subexpression inside $push respects the collation.
+results = coll.aggregate(
+ [
+ {$match: {_id: 0}},
+ {$group: {_id: null, areEqual: {$push: {$eq: ["$str", "$str2"]}}}}
+ ],
+ caseAndDiacriticInsensitive)
+ .toArray();
+assert.eq(1, results.length);
+assert.eq(1, results[0].areEqual.length);
+assert.eq(true, results[0].areEqual[0]);
- // Test that the $min and $max accumulators respect the inherited collation.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
- assert.writeOK(coll.insert({num: "100"}));
- assert.writeOK(coll.insert({num: "2"}));
- results = coll.aggregate([{$group: {_id: null, min: {$min: "$num"}}}]).toArray();
- assert.eq(1, results.length);
- assert.eq("2", results[0].min);
- results = coll.aggregate([{$group: {_id: null, max: {$max: "$num"}}}]).toArray();
- assert.eq(1, results.length);
- assert.eq("100", results[0].max);
+// Test that the $min and $max accumulators respect the inherited collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
+assert.writeOK(coll.insert({num: "100"}));
+assert.writeOK(coll.insert({num: "2"}));
+results = coll.aggregate([{$group: {_id: null, min: {$min: "$num"}}}]).toArray();
+assert.eq(1, results.length);
+assert.eq("2", results[0].min);
+results = coll.aggregate([{$group: {_id: null, max: {$max: "$num"}}}]).toArray();
+assert.eq(1, results.length);
+assert.eq("100", results[0].max);
})();
diff --git a/jstests/aggregation/sources/group/group_by_variable.js b/jstests/aggregation/sources/group/group_by_variable.js
index 1d5e203caea..322b260297a 100644
--- a/jstests/aggregation/sources/group/group_by_variable.js
+++ b/jstests/aggregation/sources/group/group_by_variable.js
@@ -3,24 +3,24 @@
* SERVER-37459.
*/
(function() {
- "use strict";
+"use strict";
- const coll = db.group_by_system_var;
- coll.drop();
+const coll = db.group_by_system_var;
+coll.drop();
- assert.commandWorked(coll.insert({_id: 1, x: 1}));
- assert.commandWorked(coll.insert({_id: 2, x: 2}));
+assert.commandWorked(coll.insert({_id: 1, x: 1}));
+assert.commandWorked(coll.insert({_id: 2, x: 2}));
- function checkPipeline(pipeline, expectedResults) {
- const res = coll.aggregate(pipeline).toArray();
- assert.eq(res, expectedResults, pipeline);
- }
+function checkPipeline(pipeline, expectedResults) {
+ const res = coll.aggregate(pipeline).toArray();
+ assert.eq(res, expectedResults, pipeline);
+}
- const wholeCollUnderId = [{_id: {_id: 1, x: 1}}, {_id: {_id: 2, x: 2}}];
- checkPipeline([{$group: {_id: "$$ROOT"}}, {$sort: {"_id": 1}}], wholeCollUnderId);
- checkPipeline([{$group: {_id: "$$CURRENT"}}, {$sort: {"_id": 1}}], wholeCollUnderId);
+const wholeCollUnderId = [{_id: {_id: 1, x: 1}}, {_id: {_id: 2, x: 2}}];
+checkPipeline([{$group: {_id: "$$ROOT"}}, {$sort: {"_id": 1}}], wholeCollUnderId);
+checkPipeline([{$group: {_id: "$$CURRENT"}}, {$sort: {"_id": 1}}], wholeCollUnderId);
- const collIds = [{_id: 1}, {_id: 2}];
- checkPipeline([{$group: {_id: "$$ROOT.x"}}, {$sort: {"_id": 1}}], collIds);
- checkPipeline([{$group: {_id: "$$CURRENT.x"}}, {$sort: {"_id": 1}}], collIds);
+const collIds = [{_id: 1}, {_id: 2}];
+checkPipeline([{$group: {_id: "$$ROOT.x"}}, {$sort: {"_id": 1}}], collIds);
+checkPipeline([{$group: {_id: "$$CURRENT.x"}}, {$sort: {"_id": 1}}], collIds);
})();
diff --git a/jstests/aggregation/sources/group/numeric_grouping.js b/jstests/aggregation/sources/group/numeric_grouping.js
index c55dee564de..a7b9f22d979 100644
--- a/jstests/aggregation/sources/group/numeric_grouping.js
+++ b/jstests/aggregation/sources/group/numeric_grouping.js
@@ -2,24 +2,24 @@
* Tests that numbers that are equivalent but have different types are grouped together.
*/
(function() {
- "use strict";
- const coll = db.numeric_grouping;
+"use strict";
+const coll = db.numeric_grouping;
- coll.drop();
+coll.drop();
- assert.writeOK(coll.insert({key: new NumberInt(24), value: 17}));
- assert.writeOK(coll.insert({key: new NumberLong(24), value: 8}));
- assert.writeOK(coll.insert({key: 24, value: 5}));
+assert.writeOK(coll.insert({key: new NumberInt(24), value: 17}));
+assert.writeOK(coll.insert({key: new NumberLong(24), value: 8}));
+assert.writeOK(coll.insert({key: 24, value: 5}));
- assert.writeOK(coll.insert({key: new NumberInt(42), value: 11}));
- assert.writeOK(coll.insert({key: new NumberLong(42), value: 13}));
- assert.writeOK(coll.insert({key: 42, value: 6}));
+assert.writeOK(coll.insert({key: new NumberInt(42), value: 11}));
+assert.writeOK(coll.insert({key: new NumberLong(42), value: 13}));
+assert.writeOK(coll.insert({key: 42, value: 6}));
- const results = coll.aggregate({$group: {_id: "$key", s: {$sum: "$value"}}}).toArray();
+const results = coll.aggregate({$group: {_id: "$key", s: {$sum: "$value"}}}).toArray();
- assert.eq(results.length, 2, tojson(results));
+assert.eq(results.length, 2, tojson(results));
- // Both groups should sum to 30.
- assert.eq(results[0].s, 30, tojson(results));
- assert.eq(results[1].s, 30, tojson(results));
+// Both groups should sum to 30.
+assert.eq(results[0].s, 30, tojson(results));
+assert.eq(results[1].s, 30, tojson(results));
}());
diff --git a/jstests/aggregation/sources/group/text_score_grouping.js b/jstests/aggregation/sources/group/text_score_grouping.js
index bb65d77fd00..2952602ee46 100644
--- a/jstests/aggregation/sources/group/text_score_grouping.js
+++ b/jstests/aggregation/sources/group/text_score_grouping.js
@@ -2,30 +2,29 @@
* Tests that a user can group on the text score.
*/
(function() {
- "use strict";
- const coll = db.text_score_grouping;
+"use strict";
+const coll = db.text_score_grouping;
- coll.drop();
+coll.drop();
- assert.writeOK(coll.insert({"_id": 1, "title": "cakes"}));
- assert.writeOK(coll.insert({"_id": 2, "title": "cookies and cakes"}));
+assert.writeOK(coll.insert({"_id": 1, "title": "cakes"}));
+assert.writeOK(coll.insert({"_id": 2, "title": "cookies and cakes"}));
- assert.commandWorked(coll.createIndex({title: "text"}));
+assert.commandWorked(coll.createIndex({title: "text"}));
- // Make sure there are two distinct groups for a text search with no other dependencies.
- var results = coll.aggregate([
- {$match: {$text: {$search: "cake cookies"}}},
- {$group: {_id: {$meta: "textScore"}, count: {$sum: 1}}}
- ])
- .toArray();
- assert.eq(results.length, 2);
-
- // Make sure there are two distinct groups if there are other fields required by the group.
- results = coll.aggregate([
+// Make sure there are two distinct groups for a text search with no other dependencies.
+var results = coll.aggregate([
{$match: {$text: {$search: "cake cookies"}}},
- {$group: {_id: {$meta: "textScore"}, firstId: {$first: "$_id"}}}
+ {$group: {_id: {$meta: "textScore"}, count: {$sum: 1}}}
])
.toArray();
- assert.eq(results.length, 2);
+assert.eq(results.length, 2);
+// Make sure there are two distinct groups if there are other fields required by the group.
+results = coll.aggregate([
+ {$match: {$text: {$search: "cake cookies"}}},
+ {$group: {_id: {$meta: "textScore"}, firstId: {$first: "$_id"}}}
+ ])
+ .toArray();
+assert.eq(results.length, 2);
}());
diff --git a/jstests/aggregation/sources/lookup/lookup_absorb_match.js b/jstests/aggregation/sources/lookup/lookup_absorb_match.js
index 1a6aea31b16..1d85817970f 100644
--- a/jstests/aggregation/sources/lookup/lookup_absorb_match.js
+++ b/jstests/aggregation/sources/lookup/lookup_absorb_match.js
@@ -7,22 +7,22 @@
* @tags: [assumes_unsharded_collection]
*/
(function() {
- "use strict";
+"use strict";
- let testDB = db.getSiblingDB("lookup_absorb_match");
- testDB.dropDatabase();
+let testDB = db.getSiblingDB("lookup_absorb_match");
+testDB.dropDatabase();
- let locations = testDB.getCollection("locations");
- assert.writeOK(locations.insert({_id: "doghouse", coordinates: [25.0, 60.0]}));
- assert.writeOK(locations.insert({_id: "bullpen", coordinates: [-25.0, -60.0]}));
+let locations = testDB.getCollection("locations");
+assert.writeOK(locations.insert({_id: "doghouse", coordinates: [25.0, 60.0]}));
+assert.writeOK(locations.insert({_id: "bullpen", coordinates: [-25.0, -60.0]}));
- let animals = testDB.getCollection("animals");
- assert.writeOK(animals.insert({_id: "dog", locationId: "doghouse"}));
- assert.writeOK(animals.insert({_id: "bull", locationId: "bullpen"}));
+let animals = testDB.getCollection("animals");
+assert.writeOK(animals.insert({_id: "dog", locationId: "doghouse"}));
+assert.writeOK(animals.insert({_id: "bull", locationId: "bullpen"}));
- // Test that a $match with $geoWithin works properly when performed directly on an absorbed
- // lookup field.
- let result = testDB.animals
+// Test that a $match with $geoWithin works properly when performed directly on an absorbed
+// lookup field.
+let result = testDB.animals
.aggregate([
{
$lookup: {
@@ -53,15 +53,12 @@
}
])
.toArray();
- let expected = [{
- _id: "dog",
- locationId: "doghouse",
- location: {_id: "doghouse", coordinates: [25.0, 60.0]}
- }];
- assert.eq(result, expected);
+let expected =
+ [{_id: "dog", locationId: "doghouse", location: {_id: "doghouse", coordinates: [25.0, 60.0]}}];
+assert.eq(result, expected);
- // Test that a $match with $geoIntersects works as expected when absorbed by a $lookup.
- result = testDB.animals
+// Test that a $match with $geoIntersects works as expected when absorbed by a $lookup.
+result = testDB.animals
.aggregate([
{
$lookup: {
@@ -92,10 +89,7 @@
}
])
.toArray();
- expected = [{
- _id: "bull",
- locationId: "bullpen",
- location: {_id: "bullpen", coordinates: [-25.0, -60.0]}
- }];
- assert.eq(result, expected);
+expected =
+ [{_id: "bull", locationId: "bullpen", location: {_id: "bullpen", coordinates: [-25.0, -60.0]}}];
+assert.eq(result, expected);
}());
diff --git a/jstests/aggregation/sources/lookup/lookup_contains_text.js b/jstests/aggregation/sources/lookup/lookup_contains_text.js
index 2e96054735e..0ecbb60d683 100644
--- a/jstests/aggregation/sources/lookup/lookup_contains_text.js
+++ b/jstests/aggregation/sources/lookup/lookup_contains_text.js
@@ -3,49 +3,46 @@
// TODO: Reenable test on passthroughs with sharded collections as part of SERVER-38996.
// @tags: [assumes_unsharded_collection]
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
+load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
- const outer = db.outer;
- const inner = db.inner;
+const outer = db.outer;
+const inner = db.inner;
- outer.drop();
- inner.drop();
+outer.drop();
+inner.drop();
- const kNoTextScoreAvailableErrCode = 40218;
+const kNoTextScoreAvailableErrCode = 40218;
- // This pipeline is never legal, because the subpipeline projects out a textScore but does not
- // begin with a $text search.
- let pipeline = [
- {$match: {$text: {$search: "foo"}}},
- {
- $lookup:
- {from: "inner", as: "as", pipeline: [{$project: {score: {$meta: "textScore"}}}]}
- }
- ];
+// This pipeline is never legal, because the subpipeline projects out a textScore but does not
+// begin with a $text search.
+let pipeline = [
+ {$match: {$text: {$search: "foo"}}},
+ {$lookup: {from: "inner", as: "as", pipeline: [{$project: {score: {$meta: "textScore"}}}]}}
+];
- assert.commandWorked(outer.insert({_id: 100, a: "foo"}));
- assert.commandWorked(inner.insert({_id: 100, a: "bar apple banana"}));
+assert.commandWorked(outer.insert({_id: 100, a: "foo"}));
+assert.commandWorked(inner.insert({_id: 100, a: "bar apple banana"}));
- // Neither 'outer' nor 'inner' have text indexes.
- assertErrorCode(outer, pipeline, ErrorCodes.IndexNotFound);
+// Neither 'outer' nor 'inner' have text indexes.
+assertErrorCode(outer, pipeline, ErrorCodes.IndexNotFound);
- // Only 'outer' has a text index.
- assert.commandWorked(outer.createIndex({a: "text"}, {name: "outer_first_index"}));
- assertErrorCode(outer, pipeline, kNoTextScoreAvailableErrCode);
+// Only 'outer' has a text index.
+assert.commandWorked(outer.createIndex({a: "text"}, {name: "outer_first_index"}));
+assertErrorCode(outer, pipeline, kNoTextScoreAvailableErrCode);
- // Only 'inner' has a text index.
- assert.commandWorked(outer.dropIndex("outer_first_index"));
- assert.commandWorked(inner.createIndex({a: "text"}));
- assertErrorCode(outer, pipeline, ErrorCodes.IndexNotFound);
+// Only 'inner' has a text index.
+assert.commandWorked(outer.dropIndex("outer_first_index"));
+assert.commandWorked(inner.createIndex({a: "text"}));
+assertErrorCode(outer, pipeline, ErrorCodes.IndexNotFound);
- // Both 'outer' and 'inner' have a text index.
- assert.commandWorked(outer.createIndex({a: "text"}));
- assertErrorCode(outer, pipeline, kNoTextScoreAvailableErrCode);
+// Both 'outer' and 'inner' have a text index.
+assert.commandWorked(outer.createIndex({a: "text"}));
+assertErrorCode(outer, pipeline, kNoTextScoreAvailableErrCode);
- // A pipeline with two text searches, one within a $lookup, will work.
- pipeline = [
+// A pipeline with two text searches, one within a $lookup, will work.
+pipeline = [
{$match: {$text: {$search: "foo"}}},
{
$lookup: {
@@ -59,24 +56,24 @@
}
];
- let expected = [{"_id": 100, "a": "foo", "as": [{"_id": 100, "score": 2}]}];
- assert.eq(outer.aggregate(pipeline).toArray(), expected);
+let expected = [{"_id": 100, "a": "foo", "as": [{"_id": 100, "score": 2}]}];
+assert.eq(outer.aggregate(pipeline).toArray(), expected);
- // A lookup with a text search in the subpipeline will correctly perform that search on 'from'.
- pipeline = [{
+// A lookup with a text search in the subpipeline will correctly perform that search on 'from'.
+pipeline = [{
$lookup: {
from: "inner",
as: "as",
pipeline: [{$match: {$text: {$search: "bar apple banana hello"}}}]
}
}];
- expected = [{"_id": 100, "a": "foo", "as": [{"_id": 100, "a": "bar apple banana"}]}];
+expected = [{"_id": 100, "a": "foo", "as": [{"_id": 100, "a": "bar apple banana"}]}];
- assert.eq(outer.aggregate(pipeline).toArray(), expected);
+assert.eq(outer.aggregate(pipeline).toArray(), expected);
- // A lookup with two text searches and two text score $projects will have the text scores
- // reference the relevant text search.
- pipeline = [
+// A lookup with two text searches and two text score $projects will have the text scores
+// reference the relevant text search.
+pipeline = [
{$match: {$text: {$search: "foo"}}},
{
$lookup: {
@@ -91,13 +88,13 @@
{$project: {score: {$meta: "textScore"}, as: 1}},
];
- expected = [{"_id": 100, "as": [{"_id": 100, "score": 2}], "score": 1.1}];
+expected = [{"_id": 100, "as": [{"_id": 100, "score": 2}], "score": 1.1}];
- assert.eq(outer.aggregate(pipeline).toArray(), expected);
+assert.eq(outer.aggregate(pipeline).toArray(), expected);
- // Given a $text stage in the 'from' pipeline, the outer pipeline will not be able to access
- // this $text stage's text score.
- pipeline = [
+// Given a $text stage in the 'from' pipeline, the outer pipeline will not be able to access
+// this $text stage's text score.
+pipeline = [
{
$lookup: {
from: "inner",
@@ -108,5 +105,5 @@
{$project: {score: {$meta: "textScore"}, as: 1}},
];
- assertErrorCode(outer, pipeline, kNoTextScoreAvailableErrCode);
+assertErrorCode(outer, pipeline, kNoTextScoreAvailableErrCode);
}());
diff --git a/jstests/aggregation/sources/lookup/lookup_non_correlated.js b/jstests/aggregation/sources/lookup/lookup_non_correlated.js
index 523eb37f8d4..d7323d861c1 100644
--- a/jstests/aggregation/sources/lookup/lookup_non_correlated.js
+++ b/jstests/aggregation/sources/lookup/lookup_non_correlated.js
@@ -6,59 +6,59 @@
* Confirms that $lookup with a non-correlated foreign pipeline returns expected results.
*/
(function() {
- "use strict";
+"use strict";
- const testDB = db.getSiblingDB("lookup_non_correlated");
- const localName = "local";
- const localColl = testDB.getCollection(localName);
- localColl.drop();
- const foreignName = "foreign";
- const foreignColl = testDB.getCollection(foreignName);
- foreignColl.drop();
+const testDB = db.getSiblingDB("lookup_non_correlated");
+const localName = "local";
+const localColl = testDB.getCollection(localName);
+localColl.drop();
+const foreignName = "foreign";
+const foreignColl = testDB.getCollection(foreignName);
+foreignColl.drop();
- assert.writeOK(localColl.insert({_id: "A"}));
- assert.writeOK(localColl.insert({_id: "B"}));
- assert.writeOK(localColl.insert({_id: "C"}));
+assert.writeOK(localColl.insert({_id: "A"}));
+assert.writeOK(localColl.insert({_id: "B"}));
+assert.writeOK(localColl.insert({_id: "C"}));
- assert.writeOK(foreignColl.insert({_id: 1}));
- assert.writeOK(foreignColl.insert({_id: 2}));
- assert.writeOK(foreignColl.insert({_id: 3}));
+assert.writeOK(foreignColl.insert({_id: 1}));
+assert.writeOK(foreignColl.insert({_id: 2}));
+assert.writeOK(foreignColl.insert({_id: 3}));
- // Basic non-correlated lookup returns expected results.
- let cursor = localColl.aggregate([
- {$match: {_id: {$in: ["B", "C"]}}},
- {$sort: {_id: 1}},
- {$lookup: {from: foreignName, as: "foreignDocs", pipeline: [{$match: {_id: {"$gte": 2}}}]}},
- ]);
+// Basic non-correlated lookup returns expected results.
+let cursor = localColl.aggregate([
+ {$match: {_id: {$in: ["B", "C"]}}},
+ {$sort: {_id: 1}},
+ {$lookup: {from: foreignName, as: "foreignDocs", pipeline: [{$match: {_id: {"$gte": 2}}}]}},
+]);
- assert(cursor.hasNext());
- assert.docEq({_id: "B", foreignDocs: [{_id: 2}, {_id: 3}]}, cursor.next());
- assert(cursor.hasNext());
- assert.docEq({_id: "C", foreignDocs: [{_id: 2}, {_id: 3}]}, cursor.next());
- assert(!cursor.hasNext());
+assert(cursor.hasNext());
+assert.docEq({_id: "B", foreignDocs: [{_id: 2}, {_id: 3}]}, cursor.next());
+assert(cursor.hasNext());
+assert.docEq({_id: "C", foreignDocs: [{_id: 2}, {_id: 3}]}, cursor.next());
+assert(!cursor.hasNext());
- // Non-correlated lookup followed by unwind on 'as' returns expected results.
- cursor = localColl.aggregate([
- {$match: {_id: "A"}},
- {$lookup: {from: foreignName, as: "foreignDocs", pipeline: [{$match: {_id: {"$gte": 2}}}]}},
- {$unwind: "$foreignDocs"}
- ]);
+// Non-correlated lookup followed by unwind on 'as' returns expected results.
+cursor = localColl.aggregate([
+ {$match: {_id: "A"}},
+ {$lookup: {from: foreignName, as: "foreignDocs", pipeline: [{$match: {_id: {"$gte": 2}}}]}},
+ {$unwind: "$foreignDocs"}
+]);
- assert(cursor.hasNext());
- assert.docEq({_id: "A", foreignDocs: {_id: 2}}, cursor.next());
- assert(cursor.hasNext());
- assert.docEq({_id: "A", foreignDocs: {_id: 3}}, cursor.next());
- assert(!cursor.hasNext());
+assert(cursor.hasNext());
+assert.docEq({_id: "A", foreignDocs: {_id: 2}}, cursor.next());
+assert(cursor.hasNext());
+assert.docEq({_id: "A", foreignDocs: {_id: 3}}, cursor.next());
+assert(!cursor.hasNext());
- // Non-correlated lookup followed by unwind and filter on 'as' returns expected results.
- cursor = localColl.aggregate([
- {$match: {_id: "A"}},
- {$lookup: {from: foreignName, as: "foreignDocs", pipeline: [{$match: {_id: {"$gte": 2}}}]}},
- {$unwind: "$foreignDocs"},
- {$match: {"foreignDocs._id": 2}}
- ]);
+// Non-correlated lookup followed by unwind and filter on 'as' returns expected results.
+cursor = localColl.aggregate([
+ {$match: {_id: "A"}},
+ {$lookup: {from: foreignName, as: "foreignDocs", pipeline: [{$match: {_id: {"$gte": 2}}}]}},
+ {$unwind: "$foreignDocs"},
+ {$match: {"foreignDocs._id": 2}}
+]);
- assert(cursor.hasNext());
- assert.docEq({_id: "A", foreignDocs: {_id: 2}}, cursor.next());
- assert(!cursor.hasNext());
+assert(cursor.hasNext());
+assert.docEq({_id: "A", foreignDocs: {_id: 2}}, cursor.next());
+assert(!cursor.hasNext());
})();
diff --git a/jstests/aggregation/sources/lookup/lookup_sort_limit.js b/jstests/aggregation/sources/lookup/lookup_sort_limit.js
index 3633852615b..121a3c43f04 100644
--- a/jstests/aggregation/sources/lookup/lookup_sort_limit.js
+++ b/jstests/aggregation/sources/lookup/lookup_sort_limit.js
@@ -5,24 +5,24 @@
* @tags: [assumes_unsharded_collection]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For getAggPlanStages().
+load("jstests/libs/analyze_plan.js"); // For getAggPlanStages().
- const testDB = db.getSiblingDB("lookup_sort_limit");
- testDB.dropDatabase();
+const testDB = db.getSiblingDB("lookup_sort_limit");
+testDB.dropDatabase();
- const localColl = testDB.getCollection("local");
- const fromColl = testDB.getCollection("from");
+const localColl = testDB.getCollection("local");
+const fromColl = testDB.getCollection("from");
- const bulk = fromColl.initializeUnorderedBulkOp();
- for (let i = 0; i < 10; i++) {
- bulk.insert({_id: i, foreignField: i});
- }
- assert.commandWorked(bulk.execute());
- assert.commandWorked(localColl.insert({_id: 0}));
+const bulk = fromColl.initializeUnorderedBulkOp();
+for (let i = 0; i < 10; i++) {
+ bulk.insert({_id: i, foreignField: i});
+}
+assert.commandWorked(bulk.execute());
+assert.commandWorked(localColl.insert({_id: 0}));
- let res = localColl
+let res = localColl
.aggregate([{
$lookup: {
from: fromColl.getName(),
@@ -33,10 +33,10 @@
}])
.toArray();
- assert.eq({_id: 0, result: [{_id: 0, foreignField: 0}]}, res[0]);
+assert.eq({_id: 0, result: [{_id: 0, foreignField: 0}]}, res[0]);
- // Run a similar test except with a sort that cannot be covered with an index scan.
- res = localColl
+// Run a similar test except with a sort that cannot be covered with an index scan.
+res = localColl
.aggregate([{
$lookup: {
from: fromColl.getName(),
@@ -47,6 +47,5 @@
}])
.toArray();
- assert.eq({_id: 0, result: [{_id: 9, foreignField: 9}]}, res[0]);
-
+assert.eq({_id: 0, result: [{_id: 9, foreignField: 9}]}, res[0]);
}());
diff --git a/jstests/aggregation/sources/lookup/lookup_subpipeline.js b/jstests/aggregation/sources/lookup/lookup_subpipeline.js
index 39d2ff0d850..d9933c869cf 100644
--- a/jstests/aggregation/sources/lookup/lookup_subpipeline.js
+++ b/jstests/aggregation/sources/lookup/lookup_subpipeline.js
@@ -1,58 +1,58 @@
// Tests for the $lookup stage with a sub-pipeline.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- const testName = "lookup_subpipeline";
+const testName = "lookup_subpipeline";
- const coll = db.lookUp;
- const from = db.from;
- const thirdColl = db.thirdColl;
- const fourthColl = db.fourthColl;
+const coll = db.lookUp;
+const from = db.from;
+const thirdColl = db.thirdColl;
+const fourthColl = db.fourthColl;
- // Used by testPipeline to sort result documents. All _ids must be primitives.
- function compareId(a, b) {
- if (a._id < b._id) {
- return -1;
- }
- if (a._id > b._id) {
- return 1;
- }
- return 0;
+// Used by testPipeline to sort result documents. All _ids must be primitives.
+function compareId(a, b) {
+ if (a._id < b._id) {
+ return -1;
}
-
- function generateNestedPipeline(foreignCollName, numLevels) {
- let pipeline = [{"$lookup": {pipeline: [], from: foreignCollName, as: "same"}}];
-
- for (let level = 1; level < numLevels; level++) {
- pipeline = [{"$lookup": {pipeline: pipeline, from: foreignCollName, as: "same"}}];
- }
-
- return pipeline;
+ if (a._id > b._id) {
+ return 1;
}
+ return 0;
+}
- // Helper for testing that pipeline returns correct set of results.
- function testPipeline(pipeline, expectedResult, collection) {
- assert.eq(collection.aggregate(pipeline).toArray().sort(compareId),
- expectedResult.sort(compareId));
+function generateNestedPipeline(foreignCollName, numLevels) {
+ let pipeline = [{"$lookup": {pipeline: [], from: foreignCollName, as: "same"}}];
+
+ for (let level = 1; level < numLevels; level++) {
+ pipeline = [{"$lookup": {pipeline: pipeline, from: foreignCollName, as: "same"}}];
}
- //
- // Pipeline syntax using 'let' variables.
- //
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, x: 1}));
- assert.writeOK(coll.insert({_id: 2, x: 2}));
- assert.writeOK(coll.insert({_id: 3, x: 3}));
-
- from.drop();
- assert.writeOK(from.insert({_id: 1}));
- assert.writeOK(from.insert({_id: 2}));
- assert.writeOK(from.insert({_id: 3}));
-
- // Basic non-equi theta join via $project.
- let pipeline = [
+ return pipeline;
+}
+
+// Helper for testing that pipeline returns correct set of results.
+function testPipeline(pipeline, expectedResult, collection) {
+ assert.eq(collection.aggregate(pipeline).toArray().sort(compareId),
+ expectedResult.sort(compareId));
+}
+
+//
+// Pipeline syntax using 'let' variables.
+//
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, x: 1}));
+assert.writeOK(coll.insert({_id: 2, x: 2}));
+assert.writeOK(coll.insert({_id: 3, x: 3}));
+
+from.drop();
+assert.writeOK(from.insert({_id: 1}));
+assert.writeOK(from.insert({_id: 2}));
+assert.writeOK(from.insert({_id: 3}));
+
+// Basic non-equi theta join via $project.
+let pipeline = [
{
$lookup: {
let : {var1: "$_id"},
@@ -67,23 +67,23 @@
},
];
- let expectedResults = [
- {"_id": 1, x: 1, "c": []},
- {"_id": 2, x: 2, "c": [{"_id": 1}]},
- {
- "_id": 3,
- x: 3,
- "c": [
- {"_id": 1},
- {
+let expectedResults = [
+ {"_id": 1, x: 1, "c": []},
+ {"_id": 2, x: 2, "c": [{"_id": 1}]},
+ {
+ "_id": 3,
+ x: 3,
+ "c": [
+ {"_id": 1},
+ {
"_id": 2,
- }
- ]
- }
- ];
- testPipeline(pipeline, expectedResults, coll);
- // Basic non-equi theta join via $match.
- pipeline = [
+ }
+ ]
+ }
+];
+testPipeline(pipeline, expectedResults, coll);
+// Basic non-equi theta join via $match.
+pipeline = [
{
$lookup: {
let : {var1: "$_id"},
@@ -96,24 +96,24 @@
},
];
- expectedResults = [
- {"_id": 1, x: 1, "c": []},
- {"_id": 2, x: 2, "c": [{"_id": 1}]},
- {
- "_id": 3,
- x: 3,
- "c": [
- {"_id": 1},
- {
+expectedResults = [
+ {"_id": 1, x: 1, "c": []},
+ {"_id": 2, x: 2, "c": [{"_id": 1}]},
+ {
+ "_id": 3,
+ x: 3,
+ "c": [
+ {"_id": 1},
+ {
"_id": 2,
- }
- ]
- }
- ];
- testPipeline(pipeline, expectedResults, coll);
+ }
+ ]
+ }
+];
+testPipeline(pipeline, expectedResults, coll);
- // Multi-level join using $match.
- pipeline = [
+// Multi-level join using $match.
+pipeline = [
{
$lookup: {
let : {var1: "$_id"},
@@ -134,15 +134,15 @@
},
];
- expectedResults = [
- {"_id": 1, "x": 1, "c": [{"_id": 1, "d": [{"_id": 2}, {"_id": 3}]}]},
- {"_id": 2, "x": 2, "c": [{"_id": 2, "d": [{"_id": 3}]}]},
- {"_id": 3, "x": 3, "c": [{"_id": 3, "d": []}]}
- ];
- testPipeline(pipeline, expectedResults, coll);
+expectedResults = [
+ {"_id": 1, "x": 1, "c": [{"_id": 1, "d": [{"_id": 2}, {"_id": 3}]}]},
+ {"_id": 2, "x": 2, "c": [{"_id": 2, "d": [{"_id": 3}]}]},
+ {"_id": 3, "x": 3, "c": [{"_id": 3, "d": []}]}
+];
+testPipeline(pipeline, expectedResults, coll);
- // Equijoin with $match that can't be delegated to the query subsystem.
- pipeline = [
+// Equijoin with $match that can't be delegated to the query subsystem.
+pipeline = [
{
$lookup: {
let : {var1: "$x"},
@@ -158,15 +158,15 @@
},
];
- expectedResults = [
- {"_id": 1, "x": 1, "c": []},
- {"_id": 2, "x": 2, "c": [{"_id": 1}, {"_id": 2}, {"_id": 3}]},
- {"_id": 3, "x": 3, "c": []}
- ];
- testPipeline(pipeline, expectedResults, coll);
+expectedResults = [
+ {"_id": 1, "x": 1, "c": []},
+ {"_id": 2, "x": 2, "c": [{"_id": 1}, {"_id": 2}, {"_id": 3}]},
+ {"_id": 3, "x": 3, "c": []}
+];
+testPipeline(pipeline, expectedResults, coll);
- // Multiple variables.
- pipeline = [
+// Multiple variables.
+pipeline = [
{
$lookup: {
let : {var1: "$_id", var2: "$x"},
@@ -187,15 +187,15 @@
{$project: {x: 1, c: 1}}
];
- expectedResults = [
- {"_id": 1, x: 1, "c": []},
- {"_id": 2, x: 2, "c": [{"_id": 1, var2Times2: 4}]},
- {"_id": 3, x: 3, "c": [{"_id": 1, var2Times2: 6}, {"_id": 2, var2Times2: 6}]}
- ];
- testPipeline(pipeline, expectedResults, coll);
+expectedResults = [
+ {"_id": 1, x: 1, "c": []},
+ {"_id": 2, x: 2, "c": [{"_id": 1, var2Times2: 4}]},
+ {"_id": 3, x: 3, "c": [{"_id": 1, var2Times2: 6}, {"_id": 2, var2Times2: 6}]}
+];
+testPipeline(pipeline, expectedResults, coll);
- // Let var as complex expression object.
- pipeline = [
+// Let var as complex expression object.
+pipeline = [
{
$lookup: {
let : {var1: {$mod: ["$x", 3]}},
@@ -208,39 +208,39 @@
},
];
- expectedResults = [
- {
- "_id": 1,
- x: 1,
- "c": [
- {_id: 1, var1Mod3TimesForeignId: 1},
- {_id: 2, var1Mod3TimesForeignId: 2},
- {_id: 3, var1Mod3TimesForeignId: 3}
- ]
- },
- {
- "_id": 2,
- x: 2,
- "c": [
- {_id: 1, var1Mod3TimesForeignId: 2},
- {_id: 2, var1Mod3TimesForeignId: 4},
- {_id: 3, var1Mod3TimesForeignId: 6}
- ]
- },
- {
- "_id": 3,
- x: 3,
- "c": [
- {_id: 1, var1Mod3TimesForeignId: 0},
- {_id: 2, var1Mod3TimesForeignId: 0},
- {_id: 3, var1Mod3TimesForeignId: 0}
- ]
- }
- ];
- testPipeline(pipeline, expectedResults, coll);
+expectedResults = [
+ {
+ "_id": 1,
+ x: 1,
+ "c": [
+ {_id: 1, var1Mod3TimesForeignId: 1},
+ {_id: 2, var1Mod3TimesForeignId: 2},
+ {_id: 3, var1Mod3TimesForeignId: 3}
+ ]
+ },
+ {
+ "_id": 2,
+ x: 2,
+ "c": [
+ {_id: 1, var1Mod3TimesForeignId: 2},
+ {_id: 2, var1Mod3TimesForeignId: 4},
+ {_id: 3, var1Mod3TimesForeignId: 6}
+ ]
+ },
+ {
+ "_id": 3,
+ x: 3,
+ "c": [
+ {_id: 1, var1Mod3TimesForeignId: 0},
+ {_id: 2, var1Mod3TimesForeignId: 0},
+ {_id: 3, var1Mod3TimesForeignId: 0}
+ ]
+ }
+];
+testPipeline(pipeline, expectedResults, coll);
- // 'let' defined variables are available to all nested sub-pipelines.
- pipeline = [
+// 'let' defined variables are available to all nested sub-pipelines.
+pipeline = [
{$match: {_id: 1}},
{
$lookup: {
@@ -277,26 +277,26 @@
}
];
- expectedResults = [{
+expectedResults = [{
+ "_id": 1,
+ "x": 1,
+ "join1": [{
"_id": 1,
- "x": 1,
- "join1": [{
- "_id": 1,
- "join2": [{
- "_id": 2,
- "letVar1": "ABC",
- "join3": [
- {"_id": 1, "mergedLetVars": "ABC123XYZ"},
- {"_id": 2, "mergedLetVars": "ABC123XYZ"},
- {"_id": 3, "mergedLetVars": "ABC123XYZ"}
- ]
- }]
+ "join2": [{
+ "_id": 2,
+ "letVar1": "ABC",
+ "join3": [
+ {"_id": 1, "mergedLetVars": "ABC123XYZ"},
+ {"_id": 2, "mergedLetVars": "ABC123XYZ"},
+ {"_id": 3, "mergedLetVars": "ABC123XYZ"}
+ ]
}]
- }];
- testPipeline(pipeline, expectedResults, coll);
+ }]
+}];
+testPipeline(pipeline, expectedResults, coll);
- // 'let' variable shadowed by foreign pipeline variable.
- pipeline = [
+// 'let' variable shadowed by foreign pipeline variable.
+pipeline = [
{$match: {_id: 2}},
{
$lookup: {
@@ -327,46 +327,46 @@
}
];
- expectedResults = [{
- "_id": 2,
- "x": 2,
- "c": [
- {
- "_id": 1,
- "shadowedVar": "abc",
- "originalVar": 2,
- "d": [
- {"_id": 1, "shadowedVar": "xyz", "originalVar": 2},
- {"_id": 2, "shadowedVar": "xyz", "originalVar": 2},
- {"_id": 3, "shadowedVar": "xyz", "originalVar": 2}
- ]
- },
- {
- "_id": 2,
- "shadowedVar": "abc",
- "originalVar": 2,
- "d": [
- {"_id": 1, "shadowedVar": "xyz", "originalVar": 2},
- {"_id": 2, "shadowedVar": "xyz", "originalVar": 2},
- {"_id": 3, "shadowedVar": "xyz", "originalVar": 2}
- ]
- },
- {
- "_id": 3,
- "shadowedVar": "abc",
- "originalVar": 2,
- "d": [
- {"_id": 1, "shadowedVar": "xyz", "originalVar": 2},
- {"_id": 2, "shadowedVar": "xyz", "originalVar": 2},
- {"_id": 3, "shadowedVar": "xyz", "originalVar": 2}
- ]
- }
- ]
- }];
- testPipeline(pipeline, expectedResults, coll);
+expectedResults = [{
+ "_id": 2,
+ "x": 2,
+ "c": [
+ {
+ "_id": 1,
+ "shadowedVar": "abc",
+ "originalVar": 2,
+ "d": [
+ {"_id": 1, "shadowedVar": "xyz", "originalVar": 2},
+ {"_id": 2, "shadowedVar": "xyz", "originalVar": 2},
+ {"_id": 3, "shadowedVar": "xyz", "originalVar": 2}
+ ]
+ },
+ {
+ "_id": 2,
+ "shadowedVar": "abc",
+ "originalVar": 2,
+ "d": [
+ {"_id": 1, "shadowedVar": "xyz", "originalVar": 2},
+ {"_id": 2, "shadowedVar": "xyz", "originalVar": 2},
+ {"_id": 3, "shadowedVar": "xyz", "originalVar": 2}
+ ]
+ },
+ {
+ "_id": 3,
+ "shadowedVar": "abc",
+ "originalVar": 2,
+ "d": [
+ {"_id": 1, "shadowedVar": "xyz", "originalVar": 2},
+ {"_id": 2, "shadowedVar": "xyz", "originalVar": 2},
+ {"_id": 3, "shadowedVar": "xyz", "originalVar": 2}
+ ]
+ }
+ ]
+}];
+testPipeline(pipeline, expectedResults, coll);
- // Use of undefined variable fails.
- assertErrorCode(coll,
+// Use of undefined variable fails.
+assertErrorCode(coll,
[{
$lookup: {
from: "from",
@@ -376,11 +376,11 @@
}
}],
17276);
- assertErrorCode(
- coll,
- [{$lookup: {let : {var1: 1, var2: "$$var1"}, pipeline: [], from: "from", as: "as"}}],
- 17276);
- assertErrorCode(coll,
+assertErrorCode(
+ coll,
+ [{$lookup: {let : {var1: 1, var2: "$$var1"}, pipeline: [], from: "from", as: "as"}}],
+ 17276);
+assertErrorCode(coll,
[{
$lookup: {
let : {
@@ -394,9 +394,9 @@
}],
17276);
- // The dotted path offset of a non-object variable is equivalent referencing an undefined
- // field.
- pipeline = [
+// The dotted path offset of a non-object variable is equivalent referencing an undefined
+// field.
+pipeline = [
{
$lookup: {
let : {var1: "$x"},
@@ -416,18 +416,18 @@
{$sort: {x: 1}}
];
- expectedResults = [
- {"x": 1, "as": [{"_id": 1}, {"_id": 2}, {"_id": 3}]},
- {"x": 2, "as": [{"_id": 1}, {"_id": 2}, {"_id": 3}]},
- {"x": 3, "as": [{"_id": 1}, {"_id": 2}, {"_id": 3}]}
- ];
- testPipeline(pipeline, expectedResults, coll);
+expectedResults = [
+ {"x": 1, "as": [{"_id": 1}, {"_id": 2}, {"_id": 3}]},
+ {"x": 2, "as": [{"_id": 1}, {"_id": 2}, {"_id": 3}]},
+ {"x": 3, "as": [{"_id": 1}, {"_id": 2}, {"_id": 3}]}
+];
+testPipeline(pipeline, expectedResults, coll);
- // Comparison where a 'let' variable references an array.
- coll.drop();
- assert.writeOK(coll.insert({x: [1, 2, 3]}));
+// Comparison where a 'let' variable references an array.
+coll.drop();
+assert.writeOK(coll.insert({x: [1, 2, 3]}));
- pipeline = [
+pipeline = [
{
$lookup: {
let : {var1: "$x"},
@@ -441,17 +441,17 @@
{$project: {_id: 0}}
];
- expectedResults = [{"x": [1, 2, 3], "as": [{"_id": 1}, {"_id": 2}, {"_id": 3}]}];
- testPipeline(pipeline, expectedResults, coll);
+expectedResults = [{"x": [1, 2, 3], "as": [{"_id": 1}, {"_id": 2}, {"_id": 3}]}];
+testPipeline(pipeline, expectedResults, coll);
- //
- // Pipeline syntax with nested object.
- //
- coll.drop();
- assert.writeOK(coll.insert({x: {y: {z: 10}}}));
+//
+// Pipeline syntax with nested object.
+//
+coll.drop();
+assert.writeOK(coll.insert({x: {y: {z: 10}}}));
- // Subfields of 'let' variables can be referenced via dotted path.
- pipeline = [
+// Subfields of 'let' variables can be referenced via dotted path.
+pipeline = [
{
$lookup: {
let : {var1: "$x"},
@@ -465,14 +465,13 @@
{$project: {_id: 0}}
];
- expectedResults = [{
- "x": {"y": {"z": 10}},
- "as": [{"_id": 1, "z": 10}, {"_id": 2, "z": 10}, {"_id": 3, "z": 10}]
- }];
- testPipeline(pipeline, expectedResults, coll);
+expectedResults = [
+ {"x": {"y": {"z": 10}}, "as": [{"_id": 1, "z": 10}, {"_id": 2, "z": 10}, {"_id": 3, "z": 10}]}
+];
+testPipeline(pipeline, expectedResults, coll);
- // 'let' variable with dotted field path off of $$ROOT.
- pipeline = [
+// 'let' variable with dotted field path off of $$ROOT.
+pipeline = [
{
$lookup: {
let : {var1: "$$ROOT.x.y.z"},
@@ -485,11 +484,11 @@
{$project: {_id: 0}}
];
- expectedResults = [{"x": {"y": {"z": 10}}, "as": [{"x": {"y": {"z": 10}}}]}];
- testPipeline(pipeline, expectedResults, coll);
+expectedResults = [{"x": {"y": {"z": 10}}, "as": [{"x": {"y": {"z": 10}}}]}];
+testPipeline(pipeline, expectedResults, coll);
- // 'let' variable with dotted field path off of $$CURRENT.
- pipeline = [
+// 'let' variable with dotted field path off of $$CURRENT.
+pipeline = [
{
$lookup: {
let : {var1: "$$CURRENT.x.y.z"},
@@ -502,34 +501,34 @@
{$project: {_id: 0}}
];
- expectedResults = [{"x": {"y": {"z": 10}}, "as": [{"x": {"y": {"z": 10}}}]}];
- testPipeline(pipeline, expectedResults, coll);
-
- //
- // Pipeline syntax with nested $lookup.
- //
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, w: 1}));
- assert.writeOK(coll.insert({_id: 2, w: 2}));
- assert.writeOK(coll.insert({_id: 3, w: 3}));
-
- from.drop();
- assert.writeOK(from.insert({_id: 1, x: 1}));
- assert.writeOK(from.insert({_id: 2, x: 2}));
- assert.writeOK(from.insert({_id: 3, x: 3}));
-
- thirdColl.drop();
- assert.writeOK(thirdColl.insert({_id: 1, y: 1}));
- assert.writeOK(thirdColl.insert({_id: 2, y: 2}));
- assert.writeOK(thirdColl.insert({_id: 3, y: 3}));
-
- fourthColl.drop();
- assert.writeOK(fourthColl.insert({_id: 1, z: 1}));
- assert.writeOK(fourthColl.insert({_id: 2, z: 2}));
- assert.writeOK(fourthColl.insert({_id: 3, z: 3}));
-
- // Nested $lookup pipeline.
- pipeline = [
+expectedResults = [{"x": {"y": {"z": 10}}, "as": [{"x": {"y": {"z": 10}}}]}];
+testPipeline(pipeline, expectedResults, coll);
+
+//
+// Pipeline syntax with nested $lookup.
+//
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, w: 1}));
+assert.writeOK(coll.insert({_id: 2, w: 2}));
+assert.writeOK(coll.insert({_id: 3, w: 3}));
+
+from.drop();
+assert.writeOK(from.insert({_id: 1, x: 1}));
+assert.writeOK(from.insert({_id: 2, x: 2}));
+assert.writeOK(from.insert({_id: 3, x: 3}));
+
+thirdColl.drop();
+assert.writeOK(thirdColl.insert({_id: 1, y: 1}));
+assert.writeOK(thirdColl.insert({_id: 2, y: 2}));
+assert.writeOK(thirdColl.insert({_id: 3, y: 3}));
+
+fourthColl.drop();
+assert.writeOK(fourthColl.insert({_id: 1, z: 1}));
+assert.writeOK(fourthColl.insert({_id: 2, z: 2}));
+assert.writeOK(fourthColl.insert({_id: 3, z: 3}));
+
+// Nested $lookup pipeline.
+pipeline = [
{$match: {_id: 1}},
{
$lookup: {
@@ -560,61 +559,55 @@
}
];
- expectedResults = [{
- "_id": 1,
- "w": 1,
- "firstLookup": [{
- "_id": 2,
- x: 2, "secondLookup": [{"_id": 3, y: 3, "thirdLookup": [{_id: 1, z: 1}]}]
- }]
- }];
- testPipeline(pipeline, expectedResults, coll);
-
- // Deeply nested $lookup pipeline. Confirm that we can execute an aggregation with nested
- // $lookup sub-pipelines up to the maximum depth, but not beyond.
- let nestedPipeline = generateNestedPipeline("lookup", 20);
- assert.commandWorked(
- coll.getDB().runCommand({aggregate: coll.getName(), pipeline: nestedPipeline, cursor: {}}));
-
- nestedPipeline = generateNestedPipeline("lookup", 21);
- assertErrorCode(coll, nestedPipeline, ErrorCodes.MaxSubPipelineDepthExceeded);
-
- // Confirm that maximum $lookup sub-pipeline depth is respected when aggregating views whose
- // combined nesting depth exceeds the limit.
- nestedPipeline = generateNestedPipeline("lookup", 10);
- coll.getDB().view1.drop();
- assert.commandWorked(
- coll.getDB().runCommand({create: "view1", viewOn: "lookup", pipeline: nestedPipeline}));
-
- nestedPipeline = generateNestedPipeline("view1", 10);
- coll.getDB().view2.drop();
- assert.commandWorked(
- coll.getDB().runCommand({create: "view2", viewOn: "view1", pipeline: nestedPipeline}));
-
- // Confirm that a composite sub-pipeline depth of 20 is allowed.
- assert.commandWorked(coll.getDB().runCommand({aggregate: "view2", pipeline: [], cursor: {}}));
-
- const pipelineWhichExceedsNestingLimit = generateNestedPipeline("view2", 1);
- coll.getDB().view3.drop();
- assert.commandWorked(coll.getDB().runCommand(
- {create: "view3", viewOn: "view2", pipeline: pipelineWhichExceedsNestingLimit}));
-
- //
- // Error cases.
- //
-
- // Confirm that a composite sub-pipeline depth greater than 20 fails.
- assertErrorCode(coll.getDB().view3, [], ErrorCodes.MaxSubPipelineDepthExceeded);
-
- // 'pipeline' and 'let' must be of expected type.
- assertErrorCode(
- coll, [{$lookup: {pipeline: 1, from: "from", as: "as"}}], ErrorCodes.TypeMismatch);
- assertErrorCode(
- coll, [{$lookup: {pipeline: {}, from: "from", as: "as"}}], ErrorCodes.TypeMismatch);
- assertErrorCode(coll,
- [{$lookup: {let : 1, pipeline: [], from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {let : [], pipeline: [], from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
+expectedResults = [{
+ "_id": 1,
+ "w": 1,
+ "firstLookup":
+ [{"_id": 2, x: 2, "secondLookup": [{"_id": 3, y: 3, "thirdLookup": [{_id: 1, z: 1}]}]}]
+}];
+testPipeline(pipeline, expectedResults, coll);
+
+// Deeply nested $lookup pipeline. Confirm that we can execute an aggregation with nested
+// $lookup sub-pipelines up to the maximum depth, but not beyond.
+let nestedPipeline = generateNestedPipeline("lookup", 20);
+assert.commandWorked(
+ coll.getDB().runCommand({aggregate: coll.getName(), pipeline: nestedPipeline, cursor: {}}));
+
+nestedPipeline = generateNestedPipeline("lookup", 21);
+assertErrorCode(coll, nestedPipeline, ErrorCodes.MaxSubPipelineDepthExceeded);
+
+// Confirm that maximum $lookup sub-pipeline depth is respected when aggregating views whose
+// combined nesting depth exceeds the limit.
+nestedPipeline = generateNestedPipeline("lookup", 10);
+coll.getDB().view1.drop();
+assert.commandWorked(
+ coll.getDB().runCommand({create: "view1", viewOn: "lookup", pipeline: nestedPipeline}));
+
+nestedPipeline = generateNestedPipeline("view1", 10);
+coll.getDB().view2.drop();
+assert.commandWorked(
+ coll.getDB().runCommand({create: "view2", viewOn: "view1", pipeline: nestedPipeline}));
+
+// Confirm that a composite sub-pipeline depth of 20 is allowed.
+assert.commandWorked(coll.getDB().runCommand({aggregate: "view2", pipeline: [], cursor: {}}));
+
+const pipelineWhichExceedsNestingLimit = generateNestedPipeline("view2", 1);
+coll.getDB().view3.drop();
+assert.commandWorked(coll.getDB().runCommand(
+ {create: "view3", viewOn: "view2", pipeline: pipelineWhichExceedsNestingLimit}));
+
+//
+// Error cases.
+//
+
+// Confirm that a composite sub-pipeline depth greater than 20 fails.
+assertErrorCode(coll.getDB().view3, [], ErrorCodes.MaxSubPipelineDepthExceeded);
+
+// 'pipeline' and 'let' must be of expected type.
+assertErrorCode(coll, [{$lookup: {pipeline: 1, from: "from", as: "as"}}], ErrorCodes.TypeMismatch);
+assertErrorCode(coll, [{$lookup: {pipeline: {}, from: "from", as: "as"}}], ErrorCodes.TypeMismatch);
+assertErrorCode(
+ coll, [{$lookup: {let : 1, pipeline: [], from: "from", as: "as"}}], ErrorCodes.FailedToParse);
+assertErrorCode(
+ coll, [{$lookup: {let : [], pipeline: [], from: "from", as: "as"}}], ErrorCodes.FailedToParse);
}());
diff --git a/jstests/aggregation/sources/lookup/lookup_subpipeline_geonear.js b/jstests/aggregation/sources/lookup/lookup_subpipeline_geonear.js
index 185e46bfb10..2043a298779 100644
--- a/jstests/aggregation/sources/lookup/lookup_subpipeline_geonear.js
+++ b/jstests/aggregation/sources/lookup/lookup_subpipeline_geonear.js
@@ -2,29 +2,29 @@
// TODO: Reenable test on passthroughs with sharded collections as part of SERVER-38995.
// @tags: [assumes_unsharded_collection]
(function() {
- "use strict";
+"use strict";
- const coll = db.lookup_subpipeline_geonear;
- const from = db.from;
+const coll = db.lookup_subpipeline_geonear;
+const from = db.from;
- coll.drop();
- assert.commandWorked(coll.insert({_id: 4, x: 4}));
+coll.drop();
+assert.commandWorked(coll.insert({_id: 4, x: 4}));
- from.drop();
+from.drop();
- // Create geospatial index for field 'geo' on 'from'.
- assert.commandWorked(from.createIndex({geo: "2dsphere"}));
+// Create geospatial index for field 'geo' on 'from'.
+assert.commandWorked(from.createIndex({geo: "2dsphere"}));
- // Insert one matching document in 'from'.
- assert.commandWorked(from.insert({_id: 1, geo: [0, 0]}));
+// Insert one matching document in 'from'.
+assert.commandWorked(from.insert({_id: 1, geo: [0, 0]}));
- const geonearPipeline = [
- {$geoNear: {near: [0, 0], distanceField: "distance", spherical: true}},
- ];
+const geonearPipeline = [
+ {$geoNear: {near: [0, 0], distanceField: "distance", spherical: true}},
+];
- assert.eq(from.aggregate(geonearPipeline).itcount(), 1);
+assert.eq(from.aggregate(geonearPipeline).itcount(), 1);
- let pipeline = [
+let pipeline = [
{
$lookup: {
pipeline: geonearPipeline,
@@ -34,6 +34,6 @@
},
];
- assert.eq(coll.aggregate(pipeline).toArray(),
- [{"_id": 4, "x": 4, "c": [{"_id": 1, "geo": [0, 0], "distance": 0}]}]);
+assert.eq(coll.aggregate(pipeline).toArray(),
+ [{"_id": 4, "x": 4, "c": [{"_id": 1, "geo": [0, 0], "distance": 0}]}]);
}()); \ No newline at end of file
diff --git a/jstests/aggregation/sources/lookup/profile_lookup.js b/jstests/aggregation/sources/lookup/profile_lookup.js
index a6c07b910a0..f2c9df8331c 100644
--- a/jstests/aggregation/sources/lookup/profile_lookup.js
+++ b/jstests/aggregation/sources/lookup/profile_lookup.js
@@ -3,38 +3,38 @@
// Tests that profiled $lookups contain the correct namespace and that Top is updated accordingly.
(function() {
- "use strict";
+"use strict";
- const localColl = db.local;
- const foreignColl = db.foreign;
- localColl.drop();
- foreignColl.drop();
+const localColl = db.local;
+const foreignColl = db.foreign;
+localColl.drop();
+foreignColl.drop();
- assert.commandWorked(localColl.insert([{a: 1}, {b: 1}, {a: 2}]));
- assert.commandWorked(foreignColl.insert({a: 1}));
+assert.commandWorked(localColl.insert([{a: 1}, {b: 1}, {a: 2}]));
+assert.commandWorked(foreignColl.insert({a: 1}));
- db.system.profile.drop();
- db.setProfilingLevel(2);
+db.system.profile.drop();
+db.setProfilingLevel(2);
- let oldTop = db.adminCommand("top");
+let oldTop = db.adminCommand("top");
- localColl.aggregate(
- [{$lookup: {from: foreignColl.getName(), as: "res", localField: "a", foreignField: "a"}}]);
+localColl.aggregate(
+ [{$lookup: {from: foreignColl.getName(), as: "res", localField: "a", foreignField: "a"}}]);
- db.setProfilingLevel(0);
+db.setProfilingLevel(0);
- // Confirm that namespace is the local rather than foreign collection.
- let profileDoc = db.system.profile.findOne();
- assert.eq("test.local", profileDoc.ns);
+// Confirm that namespace is the local rather than foreign collection.
+let profileDoc = db.system.profile.findOne();
+assert.eq("test.local", profileDoc.ns);
- // Confirm that the local collection had one command added to Top.
- let newTop = db.adminCommand("top");
- assert.eq(1,
- newTop.totals[localColl.getFullName()].commands.count -
- oldTop.totals[localColl.getFullName()].commands.count);
+// Confirm that the local collection had one command added to Top.
+let newTop = db.adminCommand("top");
+assert.eq(1,
+ newTop.totals[localColl.getFullName()].commands.count -
+ oldTop.totals[localColl.getFullName()].commands.count);
- // Confirm that for each document in local, the foreign collection had one entry added to Top.
- assert.eq(3,
- newTop.totals[foreignColl.getFullName()].commands.count -
- oldTop.totals[foreignColl.getFullName()].commands.count);
+// Confirm that for each document in local, the foreign collection had one entry added to Top.
+assert.eq(3,
+ newTop.totals[foreignColl.getFullName()].commands.count -
+ oldTop.totals[foreignColl.getFullName()].commands.count);
}());
diff --git a/jstests/aggregation/sources/match/collation_match.js b/jstests/aggregation/sources/match/collation_match.js
index bc9456898f9..8c8c225f66d 100644
--- a/jstests/aggregation/sources/match/collation_match.js
+++ b/jstests/aggregation/sources/match/collation_match.js
@@ -3,47 +3,46 @@
// Test that the $match stage respects the collation.
(function() {
- "use strict";
-
- var caseInsensitive = {collation: {locale: "en_US", strength: 2}};
-
- var coll = db.collation_match;
- coll.drop();
- assert.writeOK(coll.insert({a: "a"}));
-
- // Test that the $match respects an explicit collation when it can be pushed down into the query
- // layer.
- assert.eq(1, coll.aggregate([{$match: {a: "A"}}], caseInsensitive).itcount());
-
- // Test that the $match respects an explicit collation when it cannot be pushed down into the
- // query layer.
- assert.eq(
- 1, coll.aggregate([{$project: {b: "B"}}, {$match: {b: "b"}}], caseInsensitive).itcount());
-
- // Test that $match inside a $facet respects the collation.
- assert.eq(1,
- coll.aggregate([{$facet: {fct: [{$match: {a: "A"}}]}}], caseInsensitive)
- .toArray()[0]
- .fct.length);
-
- // Test that when a $match can be split to be part before the $unwind and part after, both
- // pieces of the split respect the collation.
- coll.drop();
- assert.writeOK(coll.insert({a: "foo", b: ["bar"]}));
- assert.eq(1,
- coll.aggregate([{$limit: 1}, {$unwind: "$b"}, {$match: {a: "FOO", b: "BAR"}}],
- caseInsensitive)
- .itcount());
-
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
- assert.writeOK(coll.insert({a: "a"}));
-
- // Test that the $match respects the inherited collation when it can be pushed down into the
- // query layer.
- assert.eq(1, coll.aggregate([{$match: {a: "A"}}]).itcount());
-
- // Test that the $match respects the inherited collation when it cannot be pushed down into the
- // query layer.
- assert.eq(1, coll.aggregate([{$project: {b: "B"}}, {$match: {b: "b"}}]).itcount());
+"use strict";
+
+var caseInsensitive = {collation: {locale: "en_US", strength: 2}};
+
+var coll = db.collation_match;
+coll.drop();
+assert.writeOK(coll.insert({a: "a"}));
+
+// Test that the $match respects an explicit collation when it can be pushed down into the query
+// layer.
+assert.eq(1, coll.aggregate([{$match: {a: "A"}}], caseInsensitive).itcount());
+
+// Test that the $match respects an explicit collation when it cannot be pushed down into the
+// query layer.
+assert.eq(1, coll.aggregate([{$project: {b: "B"}}, {$match: {b: "b"}}], caseInsensitive).itcount());
+
+// Test that $match inside a $facet respects the collation.
+assert.eq(1,
+ coll.aggregate([{$facet: {fct: [{$match: {a: "A"}}]}}], caseInsensitive)
+ .toArray()[0]
+ .fct.length);
+
+// Test that when a $match can be split to be part before the $unwind and part after, both
+// pieces of the split respect the collation.
+coll.drop();
+assert.writeOK(coll.insert({a: "foo", b: ["bar"]}));
+assert.eq(
+ 1,
+ coll.aggregate([{$limit: 1}, {$unwind: "$b"}, {$match: {a: "FOO", b: "BAR"}}], caseInsensitive)
+ .itcount());
+
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
+assert.writeOK(coll.insert({a: "a"}));
+
+// Test that the $match respects the inherited collation when it can be pushed down into the
+// query layer.
+assert.eq(1, coll.aggregate([{$match: {a: "A"}}]).itcount());
+
+// Test that the $match respects the inherited collation when it cannot be pushed down into the
+// query layer.
+assert.eq(1, coll.aggregate([{$project: {b: "B"}}, {$match: {b: "b"}}]).itcount());
})();
diff --git a/jstests/aggregation/sources/match/expr_match.js b/jstests/aggregation/sources/match/expr_match.js
index b2627c963cc..8e2541958db 100644
--- a/jstests/aggregation/sources/match/expr_match.js
+++ b/jstests/aggregation/sources/match/expr_match.js
@@ -1,49 +1,44 @@
// Basic testing to confirm that the $match stage handles $expr correctly.
(function() {
- "use strict";
-
- const coll = db.expr_match;
- coll.drop();
- assert.writeOK(coll.insert({x: 0}));
- assert.writeOK(coll.insert({x: 1, y: 1}));
- assert.writeOK(coll.insert({x: 2, y: 4}));
- assert.writeOK(coll.insert({x: 3, y: 9}));
-
- // $match with $expr representing local document field path reference.
- assert.eq(1, coll.aggregate([{$match: {$expr: {$eq: ["$x", 2]}}}]).itcount());
- assert.eq(1, coll.aggregate([{$match: {$expr: {$eq: ["$x", "$y"]}}}]).itcount());
- assert.eq(3, coll.aggregate([{$match: {$expr: {$eq: ["$x", {$sqrt: "$y"}]}}}]).itcount());
-
- // $match with $expr containing $or and $and.
- assert.eq(
- 2,
- coll.aggregate([{
- $match: {
- $expr:
- {$or: [{$eq: ["$x", 3]}, {$and: [{$eq: ["$x", 2]}, {$eq: ["$y", 4]}]}]}
- }
- }])
- .itcount());
-
- // $match $expr containing $in.
- assert.eq(3,
- coll.aggregate([{$match: {$expr: {$in: ["$x", [1, {$mod: [4, 2]}, 3]]}}}]).itcount());
-
- // $match with constant expression and field path.
- assert.eq(1,
- coll.aggregate([{$match: {$expr: {$gte: ["$y", {$multiply: [3, 3]}]}}}]).itcount());
-
- // $match with constant expression and no field path.
- assert.eq(4, coll.aggregate([{$match: {$expr: {$gte: [10, 5]}}}]).itcount());
- assert.eq(0, coll.aggregate([{$match: {$expr: {$gte: [5, 10]}}}]).itcount());
-
- // $match with $expr works inside a $or.
- assert.eq(4,
- coll.aggregate([{$match: {$or: [{$expr: {$eq: ["$foo", "$bar"]}}, {b: {$gt: 3}}]}}])
- .itcount());
-
- // $match with $expr works inside a $and.
- assert.eq(2,
- coll.aggregate([{$match: {$and: [{$expr: {$eq: ["$foo", "$bar"]}}, {x: {$lt: 2}}]}}])
- .itcount());
+"use strict";
+
+const coll = db.expr_match;
+coll.drop();
+assert.writeOK(coll.insert({x: 0}));
+assert.writeOK(coll.insert({x: 1, y: 1}));
+assert.writeOK(coll.insert({x: 2, y: 4}));
+assert.writeOK(coll.insert({x: 3, y: 9}));
+
+// $match with $expr representing local document field path reference.
+assert.eq(1, coll.aggregate([{$match: {$expr: {$eq: ["$x", 2]}}}]).itcount());
+assert.eq(1, coll.aggregate([{$match: {$expr: {$eq: ["$x", "$y"]}}}]).itcount());
+assert.eq(3, coll.aggregate([{$match: {$expr: {$eq: ["$x", {$sqrt: "$y"}]}}}]).itcount());
+
+// $match with $expr containing $or and $and.
+assert.eq(
+ 2,
+ coll.aggregate([{
+ $match: {$expr: {$or: [{$eq: ["$x", 3]}, {$and: [{$eq: ["$x", 2]}, {$eq: ["$y", 4]}]}]}}
+ }])
+ .itcount());
+
+// $match $expr containing $in.
+assert.eq(3, coll.aggregate([{$match: {$expr: {$in: ["$x", [1, {$mod: [4, 2]}, 3]]}}}]).itcount());
+
+// $match with constant expression and field path.
+assert.eq(1, coll.aggregate([{$match: {$expr: {$gte: ["$y", {$multiply: [3, 3]}]}}}]).itcount());
+
+// $match with constant expression and no field path.
+assert.eq(4, coll.aggregate([{$match: {$expr: {$gte: [10, 5]}}}]).itcount());
+assert.eq(0, coll.aggregate([{$match: {$expr: {$gte: [5, 10]}}}]).itcount());
+
+// $match with $expr works inside a $or.
+assert.eq(
+ 4,
+ coll.aggregate([{$match: {$or: [{$expr: {$eq: ["$foo", "$bar"]}}, {b: {$gt: 3}}]}}]).itcount());
+
+// $match with $expr works inside a $and.
+assert.eq(2,
+ coll.aggregate([{$match: {$and: [{$expr: {$eq: ["$foo", "$bar"]}}, {x: {$lt: 2}}]}}])
+ .itcount());
})();
diff --git a/jstests/aggregation/sources/match/text_search_requires_index.js b/jstests/aggregation/sources/match/text_search_requires_index.js
index 431b5185d0e..ff62fa1bea2 100644
--- a/jstests/aggregation/sources/match/text_search_requires_index.js
+++ b/jstests/aggregation/sources/match/text_search_requires_index.js
@@ -2,19 +2,19 @@
// TODO: Reenable test on passthroughs with sharded collections as part of SERVER-38996.
// @tags: [assumes_unsharded_collection]
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
+load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
- const coll = db.coll;
- const from = db.from;
+const coll = db.coll;
+const from = db.from;
- coll.drop();
- from.drop();
+coll.drop();
+from.drop();
- const textPipeline = [{$match: {$text: {$search: "foo"}}}];
+const textPipeline = [{$match: {$text: {$search: "foo"}}}];
- const pipeline = [
+const pipeline = [
{
$lookup: {
pipeline: textPipeline,
@@ -24,16 +24,16 @@
},
];
- assert.commandWorked(coll.insert({_id: 1}));
- assert.commandWorked(from.insert({_id: 100, a: "foo"}));
+assert.commandWorked(coll.insert({_id: 1}));
+assert.commandWorked(from.insert({_id: 100, a: "foo"}));
- // Fail without index.
- assertErrorCode(from, textPipeline, ErrorCodes.IndexNotFound);
- assertErrorCode(coll, pipeline, ErrorCodes.IndexNotFound);
+// Fail without index.
+assertErrorCode(from, textPipeline, ErrorCodes.IndexNotFound);
+assertErrorCode(coll, pipeline, ErrorCodes.IndexNotFound);
- assert.commandWorked(from.createIndex({a: "text"}));
+assert.commandWorked(from.createIndex({a: "text"}));
- // Should run when you have the text index.
- assert.eq(from.aggregate(textPipeline).itcount(), 1);
- assert.eq(coll.aggregate(pipeline).itcount(), 1);
+// Should run when you have the text index.
+assert.eq(from.aggregate(textPipeline).itcount(), 1);
+assert.eq(coll.aggregate(pipeline).itcount(), 1);
}());
diff --git a/jstests/aggregation/sources/merge/all_modes.js b/jstests/aggregation/sources/merge/all_modes.js
index 83e0192530f..3854008072c 100644
--- a/jstests/aggregation/sources/merge/all_modes.js
+++ b/jstests/aggregation/sources/merge/all_modes.js
@@ -4,312 +4,290 @@
// exists when none is expected.
// @tags: [assumes_no_implicit_collection_creation_after_drop]
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
+load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
- const source = db.all_modes_source;
- const target = db.all_modes_target;
+const source = db.all_modes_source;
+const target = db.all_modes_target;
- (function setup() {
- source.drop();
- target.drop();
+(function setup() {
+ source.drop();
+ target.drop();
- // All tests use the same data in the source collection.
- assert.commandWorked(source.insert(
- [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 3, b: "c"}]));
+ // All tests use the same data in the source collection.
+ assert.commandWorked(
+ source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 3, b: "c"}]));
+})();
- })();
+// Test 'whenMatched=replace whenNotMatched=insert' mode. This is an equivalent of a
+// replacement-style update with upsert=true.
+(function testWhenMatchedReplaceWhenNotMatchedInsert() {
+ assert.commandWorked(target.insert([{_id: 1, a: 10}, {_id: 3, a: 30}, {_id: 4, a: 40}]));
+ assert.doesNotThrow(() => source.aggregate([
+ {$merge: {into: target.getName(), whenMatched: "replace", whenNotMatched: "insert"}}
+ ]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, b: "a"},
+ {_id: 2, a: 2, b: "b"},
+ {_id: 3, a: 3, b: "c"},
+ {_id: 4, a: 40}
+ ]
+ });
+})();
- // Test 'whenMatched=replace whenNotMatched=insert' mode. This is an equivalent of a
- // replacement-style update with upsert=true.
- (function testWhenMatchedReplaceWhenNotMatchedInsert() {
- assert.commandWorked(target.insert([{_id: 1, a: 10}, {_id: 3, a: 30}, {_id: 4, a: 40}]));
- assert.doesNotThrow(() => source.aggregate([
- {$merge: {into: target.getName(), whenMatched: "replace", whenNotMatched: "insert"}}
- ]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a"},
- {_id: 2, a: 2, b: "b"},
- {_id: 3, a: 3, b: "c"},
- {_id: 4, a: 40}
- ]
- });
- })();
+// Test 'whenMatched=replace whenNotMatched=fail' mode. For matched documents the update
+// should be unordered and report an error at the end when all documents in a batch have been
+// processed, it will not fail as soon as we hit the first document without a match.
+(function testWhenMatchedReplaceWhenNotMatchedFail() {
+ assert(target.drop());
+ assert.commandWorked(target.insert([{_id: 1, a: 10}, {_id: 3, a: 30}, {_id: 4, a: 40}]));
+ const error = assert.throws(
+ () => source.aggregate(
+ [{$merge: {into: target.getName(), whenMatched: "replace", whenNotMatched: "fail"}}]));
+ assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: "a"}, {_id: 3, a: 3, b: "c"}, {_id: 4, a: 40}]
+ });
+})();
- // Test 'whenMatched=replace whenNotMatched=fail' mode. For matched documents the update
- // should be unordered and report an error at the end when all documents in a batch have been
- // processed, it will not fail as soon as we hit the first document without a match.
- (function testWhenMatchedReplaceWhenNotMatchedFail() {
- assert(target.drop());
- assert.commandWorked(target.insert([{_id: 1, a: 10}, {_id: 3, a: 30}, {_id: 4, a: 40}]));
- const error = assert.throws(() => source.aggregate([
- {$merge: {into: target.getName(), whenMatched: "replace", whenNotMatched: "fail"}}
- ]));
- assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: "a"}, {_id: 3, a: 3, b: "c"}, {_id: 4, a: 40}]
- });
- })();
+// Test 'whenMatched=replace whenNotMatched=discard' mode. Documents in the target
+// collection without a match in the source collection should not be modified as a result
+// of the merge operation.
+(function testWhenMatchedReplaceWhenNotMatchedDiscard() {
+ assert(target.drop());
+ assert.commandWorked(target.insert([{_id: 1, a: 10}, {_id: 3, a: 30}, {_id: 4, a: 40}]));
+ assert.doesNotThrow(() => source.aggregate([
+ {$merge: {into: target.getName(), whenMatched: "replace", whenNotMatched: "discard"}}
+ ]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: "a"}, {_id: 3, a: 3, b: "c"}, {_id: 4, a: 40}]
+ });
+})();
- // Test 'whenMatched=replace whenNotMatched=discard' mode. Documents in the target
- // collection without a match in the source collection should not be modified as a result
- // of the merge operation.
- (function testWhenMatchedReplaceWhenNotMatchedDiscard() {
- assert(target.drop());
- assert.commandWorked(target.insert([{_id: 1, a: 10}, {_id: 3, a: 30}, {_id: 4, a: 40}]));
- assert.doesNotThrow(() => source.aggregate([{
- $merge: {into: target.getName(), whenMatched: "replace", whenNotMatched: "discard"}
- }]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: "a"}, {_id: 3, a: 3, b: "c"}, {_id: 4, a: 40}]
- });
- })();
+// Test 'whenMatched=fail whenNotMatched=insert' mode. For matched documents the update should
+// be unordered and report an error at the end when all documents in a batch have been
+// processed, it will not fail as soon as we hit the first document with a match.
+(function testWhenMatchedFailWhenNotMatchedInsert() {
+ assert(target.drop());
+ assert.commandWorked(target.insert(
+ [{_id: 10, a: 10, c: "x"}, {_id: 3, a: 30, c: "y"}, {_id: 4, a: 40, c: "z"}]));
+ // Besides ensuring that a DuplicateKey error is raised when we find a matching document,
+ // this test also verifies that this $merge mode does perform an unordered insert and all
+ // documents in the batch without a matching document get inserted into the target
+ // collection. There is a special case when we can bail out early without processing all
+ // documents which fit into a single batch. Namely, if we have a sharded cluster with two
+ // shards, and shard documents by {_id: "hashed"}, we will end up with the document {_id: 3}
+ // landed on shard0, and {_id: 1} and {_id: 2} on shard1 in the source collection. Note
+ // that {_id: 3} has a duplicate key with the document in the target collection. For this
+ // particlar case, the entire pipeline is sent to each shard. Lets assume that shard0 has
+ // processed its single document with {_id: 3} and raised a DuplicateKey error, whilst
+ // shard1 hasn't performed any writes yet (or even hasn't started reading from the cursor).
+ // The mongos, after receiving the DuplicateKey, will stop pulling data from the shards
+ // and will kill the cursors open on the remaining shards. Shard1, eventually, will throw
+ // a CursorKilled during an interrupt check, and so no writes will be done into the target
+ // collection. To workaround this scenario and guarantee that the writes will always be
+ // performed, we will sort the documents by _id in ascending order. In this case, the
+ // pipeline will be split and we will pull everything to mongos before doing the $merge.
+ // This also ensures that documents with {_id: 1 } and {_id: 2} will be inserted first
+ // before the DuplicateKey error is raised.
+ const error = assert.throws(() => source.aggregate([
+ {$sort: {_id: 1}},
+ {$merge: {into: target.getName(), whenMatched: "fail", whenNotMatched: "insert"}}
+ ]));
+ assert.commandFailedWithCode(error, ErrorCodes.DuplicateKey);
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, b: "a"},
+ {_id: 2, a: 2, b: "b"},
+ {_id: 3, a: 30, c: "y"},
+ {_id: 4, a: 40, c: "z"},
+ {_id: 10, a: 10, c: "x"}
+ ]
+ });
+})();
- // Test 'whenMatched=fail whenNotMatched=insert' mode. For matched documents the update should
- // be unordered and report an error at the end when all documents in a batch have been
- // processed, it will not fail as soon as we hit the first document with a match.
- (function testWhenMatchedFailWhenNotMatchedInsert() {
- assert(target.drop());
- assert.commandWorked(target.insert(
- [{_id: 10, a: 10, c: "x"}, {_id: 3, a: 30, c: "y"}, {_id: 4, a: 40, c: "z"}]));
- // Besides ensuring that a DuplicateKey error is raised when we find a matching document,
- // this test also verifies that this $merge mode does perform an unordered insert and all
- // documents in the batch without a matching document get inserted into the target
- // collection. There is a special case when we can bail out early without processing all
- // documents which fit into a single batch. Namely, if we have a sharded cluster with two
- // shards, and shard documents by {_id: "hashed"}, we will end up with the document {_id: 3}
- // landed on shard0, and {_id: 1} and {_id: 2} on shard1 in the source collection. Note
- // that {_id: 3} has a duplicate key with the document in the target collection. For this
- // particlar case, the entire pipeline is sent to each shard. Lets assume that shard0 has
- // processed its single document with {_id: 3} and raised a DuplicateKey error, whilst
- // shard1 hasn't performed any writes yet (or even hasn't started reading from the cursor).
- // The mongos, after receiving the DuplicateKey, will stop pulling data from the shards
- // and will kill the cursors open on the remaining shards. Shard1, eventually, will throw
- // a CursorKilled during an interrupt check, and so no writes will be done into the target
- // collection. To workaround this scenario and guarantee that the writes will always be
- // performed, we will sort the documents by _id in ascending order. In this case, the
- // pipeline will be split and we will pull everything to mongos before doing the $merge.
- // This also ensures that documents with {_id: 1 } and {_id: 2} will be inserted first
- // before the DuplicateKey error is raised.
- const error = assert.throws(() => source.aggregate([
- {$sort: {_id: 1}},
- {$merge: {into: target.getName(), whenMatched: "fail", whenNotMatched: "insert"}}
- ]));
- assert.commandFailedWithCode(error, ErrorCodes.DuplicateKey);
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a"},
- {_id: 2, a: 2, b: "b"},
- {_id: 3, a: 30, c: "y"},
- {_id: 4, a: 40, c: "z"},
- {_id: 10, a: 10, c: "x"}
- ]
- });
- })();
+// Test 'whenMatched=fail whenNotMatched=fail' mode. This mode is not supported and should fail.
+(function testWhenMatchedFailWhenNotMatchedFail() {
+ assert(target.drop());
+ assert.commandWorked(target.insert({_id: 1, a: 10}));
+ const error = assert.throws(
+ () => source.aggregate(
+ [{$merge: {into: target.getName(), whenMatched: "fail", whenNotMatched: "fail"}}]));
+ assert.commandFailedWithCode(error, 51181);
+ // Ensure the target collection has not been modified.
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 10}]});
+})();
- // Test 'whenMatched=fail whenNotMatched=fail' mode. This mode is not supported and should fail.
- (function testWhenMatchedFailWhenNotMatchedFail() {
- assert(target.drop());
- assert.commandWorked(target.insert({_id: 1, a: 10}));
- const error = assert.throws(
- () => source.aggregate(
- [{$merge: {into: target.getName(), whenMatched: "fail", whenNotMatched: "fail"}}]));
- assert.commandFailedWithCode(error, 51181);
- // Ensure the target collection has not been modified.
- assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 10}]});
- })();
+// Test 'whenMatched=fail whenNotMatched=discard' mode. This mode is not supported and should
+// fail.
+(function testWhenMatchedFailWhenNotMatchedDiscard() {
+ assert(target.drop());
+ assert.commandWorked(target.insert({_id: 1, a: 10}));
+ const error = assert.throws(
+ () => source.aggregate(
+ [{$merge: {into: target.getName(), whenMatched: "fail", whenNotMatched: "discard"}}]));
+ assert.commandFailedWithCode(error, 51181);
+ // Ensure the target collection has not been modified.
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 10}]});
+})();
- // Test 'whenMatched=fail whenNotMatched=discard' mode. This mode is not supported and should
- // fail.
- (function testWhenMatchedFailWhenNotMatchedDiscard() {
- assert(target.drop());
- assert.commandWorked(target.insert({_id: 1, a: 10}));
- const error = assert.throws(() => source.aggregate([
- {$merge: {into: target.getName(), whenMatched: "fail", whenNotMatched: "discard"}}
- ]));
- assert.commandFailedWithCode(error, 51181);
- // Ensure the target collection has not been modified.
- assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 10}]});
- })();
+// Test 'whenMatched=merge whenNotMatched=insert' mode. This is an equivalent of an update
+// with a $set operator and upsert=true.
+(function testWhenMatchedMergeWhenNotMatchedInsert() {
+ assert(target.drop());
+ assert.commandWorked(
+ target.insert([{_id: 1, a: 10, c: "z"}, {_id: 3, a: 30}, {_id: 4, a: 40}]));
+ assert.doesNotThrow(
+ () => source.aggregate(
+ [{$merge: {into: target.getName(), whenMatched: "merge", whenNotMatched: "insert"}}]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, c: "z", b: "a"},
+ {_id: 2, a: 2, b: "b"},
+ {_id: 3, a: 3, b: "c"},
+ {_id: 4, a: 40}
+ ]
+ });
+})();
- // Test 'whenMatched=merge whenNotMatched=insert' mode. This is an equivalent of an update
- // with a $set operator and upsert=true.
- (function testWhenMatchedMergeWhenNotMatchedInsert() {
- assert(target.drop());
- assert.commandWorked(
- target.insert([{_id: 1, a: 10, c: "z"}, {_id: 3, a: 30}, {_id: 4, a: 40}]));
- assert.doesNotThrow(() => source.aggregate([
- {$merge: {into: target.getName(), whenMatched: "merge", whenNotMatched: "insert"}}
- ]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, c: "z", b: "a"},
- {_id: 2, a: 2, b: "b"},
- {_id: 3, a: 3, b: "c"},
- {_id: 4, a: 40}
- ]
- });
- })();
+// Test 'whenMatched=merge whenNotMatched=fail' mode. For matched documents the update
+// should be unordered and report an error at the end when all documents in a batch have been
+// processed, it will not fail as soon as we hit the first document without a match.
+(function testWhenMatchedMergeWhenNotMatchedFail() {
+ assert(target.drop());
+ assert.commandWorked(
+ target.insert([{_id: 1, a: 10, c: "x"}, {_id: 3, a: 30, c: "y"}, {_id: 4, a: 40, c: "z"}]));
+ const error = assert.throws(
+ () => source.aggregate(
+ [{$merge: {into: target.getName(), whenMatched: "merge", whenNotMatched: "fail"}}]));
+ assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, b: "a", c: "x"},
+ {_id: 3, a: 3, b: "c", c: "y"},
+ {_id: 4, a: 40, c: "z"}
+ ]
+ });
+})();
- // Test 'whenMatched=merge whenNotMatched=fail' mode. For matched documents the update
- // should be unordered and report an error at the end when all documents in a batch have been
- // processed, it will not fail as soon as we hit the first document without a match.
- (function testWhenMatchedMergeWhenNotMatchedFail() {
- assert(target.drop());
- assert.commandWorked(target.insert(
- [{_id: 1, a: 10, c: "x"}, {_id: 3, a: 30, c: "y"}, {_id: 4, a: 40, c: "z"}]));
- const error = assert.throws(() => source.aggregate([
- {$merge: {into: target.getName(), whenMatched: "merge", whenNotMatched: "fail"}}
- ]));
- assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a", c: "x"},
- {_id: 3, a: 3, b: "c", c: "y"},
- {_id: 4, a: 40, c: "z"}
- ]
- });
- })();
+// Test 'whenMatched=merge whenNotMatched=discard' mode. Documents in the target collection
+// without
+// a match in the source collection should not be modified as a result of the merge operation.
+(function testWhenMatchedMergeWhenNotMatchedDiscard() {
+ assert(target.drop());
+ assert.commandWorked(
+ target.insert([{_id: 1, a: 10, c: "x"}, {_id: 3, a: 30, c: "y"}, {_id: 4, a: 40, c: "z"}]));
+ assert.doesNotThrow(
+ () => source.aggregate(
+ [{$merge: {into: target.getName(), whenMatched: "merge", whenNotMatched: "discard"}}]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, b: "a", c: "x"},
+ {_id: 3, a: 3, b: "c", c: "y"},
+ {_id: 4, a: 40, c: "z"}
+ ]
+ });
+})();
- // Test 'whenMatched=merge whenNotMatched=discard' mode. Documents in the target collection
- // without
- // a match in the source collection should not be modified as a result of the merge operation.
- (function testWhenMatchedMergeWhenNotMatchedDiscard() {
- assert(target.drop());
- assert.commandWorked(target.insert(
- [{_id: 1, a: 10, c: "x"}, {_id: 3, a: 30, c: "y"}, {_id: 4, a: 40, c: "z"}]));
- assert.doesNotThrow(() => source.aggregate([
- {$merge: {into: target.getName(), whenMatched: "merge", whenNotMatched: "discard"}}
- ]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a", c: "x"},
- {_id: 3, a: 3, b: "c", c: "y"},
- {_id: 4, a: 40, c: "z"}
- ]
- });
- })();
+// Test 'whenMatched=[pipeline] whenNotMatched=insert' mode. This is an equivalent of a
+// pipeline-style update with upsert=true.
+(function testWhenMatchedPipelineUpdateWhenNotMatchedInsert() {
+ assert(target.drop());
+ assert.commandWorked(target.insert({_id: 1, b: 1}));
+ assert.doesNotThrow(() => source.aggregate([{
+ $merge:
+ {into: target.getName(), whenMatched: [{$addFields: {x: 2}}], whenNotMatched: "insert"}
+ }]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, b: 1, x: 2}, {_id: 2, x: 2}, {_id: 3, x: 2}]
+ });
+})();
- // Test 'whenMatched=[pipeline] whenNotMatched=insert' mode. This is an equivalent of a
- // pipeline-style update with upsert=true.
- (function testWhenMatchedPipelineUpdateWhenNotMatchedInsert() {
- assert(target.drop());
- assert.commandWorked(target.insert({_id: 1, b: 1}));
- assert.doesNotThrow(() => source.aggregate([{
- $merge: {
- into: target.getName(),
- whenMatched: [{$addFields: {x: 2}}],
- whenNotMatched: "insert"
- }
- }]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, b: 1, x: 2}, {_id: 2, x: 2}, {_id: 3, x: 2}]
- });
- })();
+// Test 'whenMatched=[pipeline] whenNotMatched=fail' mode. For matched documents the update
+// should be unordered and report an error at the end when all documents in a batch have been
+// processed, it will not fail as soon as we hit the first document without a match.
+(function testWhenMatchedPipelineUpdateWhenNotMatchedFail() {
+ assert(target.drop());
+ assert.commandWorked(
+ target.insert([{_id: 1, a: 10, c: "x"}, {_id: 3, a: 30, c: "y"}, {_id: 4, a: 40, c: "z"}]));
+ const error = assert.throws(() => source.aggregate([{
+ $merge:
+ {into: target.getName(), whenMatched: [{$addFields: {x: 2}}], whenNotMatched: "fail"}
+ }]));
+ assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected:
+ [{_id: 1, a: 10, c: "x", x: 2}, {_id: 3, a: 30, c: "y", x: 2}, {_id: 4, a: 40, c: "z"}]
+ });
+})();
- // Test 'whenMatched=[pipeline] whenNotMatched=fail' mode. For matched documents the update
- // should be unordered and report an error at the end when all documents in a batch have been
- // processed, it will not fail as soon as we hit the first document without a match.
- (function testWhenMatchedPipelineUpdateWhenNotMatchedFail() {
- assert(target.drop());
- assert.commandWorked(target.insert(
- [{_id: 1, a: 10, c: "x"}, {_id: 3, a: 30, c: "y"}, {_id: 4, a: 40, c: "z"}]));
- const error = assert.throws(() => source.aggregate([{
- $merge: {
- into: target.getName(),
- whenMatched: [{$addFields: {x: 2}}],
- whenNotMatched: "fail"
- }
- }]));
- assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 10, c: "x", x: 2},
- {_id: 3, a: 30, c: "y", x: 2},
- {_id: 4, a: 40, c: "z"}
- ]
- });
- })();
+// Test 'whenMatched=[pipeline] whenNotMatched=discard' mode. Documents in the target collection
+// without a match in the source collection should not be modified as a result of the merge
+// operation.
+(function testWhenMatchedPipelineUpdateWhenNotMatchedDiscard() {
+ assert(target.drop());
+ assert.commandWorked(
+ target.insert([{_id: 1, a: 10, c: "x"}, {_id: 3, a: 30, c: "y"}, {_id: 4, a: 40, c: "z"}]));
+ assert.doesNotThrow(() => source.aggregate([{
+ $merge:
+ {into: target.getName(), whenMatched: [{$addFields: {x: 2}}], whenNotMatched: "discard"}
+ }]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected:
+ [{_id: 1, a: 10, c: "x", x: 2}, {_id: 3, a: 30, c: "y", x: 2}, {_id: 4, a: 40, c: "z"}]
+ });
+})();
- // Test 'whenMatched=[pipeline] whenNotMatched=discard' mode. Documents in the target collection
- // without a match in the source collection should not be modified as a result of the merge
- // operation.
- (function testWhenMatchedPipelineUpdateWhenNotMatchedDiscard() {
- assert(target.drop());
- assert.commandWorked(target.insert(
- [{_id: 1, a: 10, c: "x"}, {_id: 3, a: 30, c: "y"}, {_id: 4, a: 40, c: "z"}]));
- assert.doesNotThrow(() => source.aggregate([{
- $merge: {
- into: target.getName(),
- whenMatched: [{$addFields: {x: 2}}],
- whenNotMatched: "discard"
- }
- }]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 10, c: "x", x: 2},
- {_id: 3, a: 30, c: "y", x: 2},
- {_id: 4, a: 40, c: "z"}
- ]
- });
- })();
+// Test 'whenMatched=keepExisting whenNotMatched=insert' mode. Existing documents in the target
+// collection which have a matching document in the source collection must not be updated, only
+// documents without a match must be inserted into the target collection.
+(function testWhenMatchedKeepExistingWhenNotMatchedInsert() {
+ assert(target.drop());
+ assert.commandWorked(target.insert([{_id: 1, a: 10}, {_id: 3, a: 30}, {_id: 4, a: 40}]));
+ assert.doesNotThrow(() => source.aggregate([
+ {$merge: {into: target.getName(), whenMatched: "keepExisting", whenNotMatched: "insert"}}
+ ]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 10}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30}, {_id: 4, a: 40}]
+ });
+})();
- // Test 'whenMatched=keepExisting whenNotMatched=insert' mode. Existing documents in the target
- // collection which have a matching document in the source collection must not be updated, only
- // documents without a match must be inserted into the target collection.
- (function testWhenMatchedKeepExistingWhenNotMatchedInsert() {
- assert(target.drop());
- assert.commandWorked(target.insert([{_id: 1, a: 10}, {_id: 3, a: 30}, {_id: 4, a: 40}]));
- assert.doesNotThrow(() => source.aggregate([{
- $merge:
- {into: target.getName(), whenMatched: "keepExisting", whenNotMatched: "insert"}
- }]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 10}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30}, {_id: 4, a: 40}]
- });
- })();
+// Test 'whenMatched=keepExisting whenNotMatched=fail' mode. This mode is not supported and
+// should fail.
+(function testWhenMatchedKeepExistingWhenNotMatchedFail() {
+ assert(target.drop());
+ assert.commandWorked(target.insert({_id: 1, a: 10}));
+ const error = assert.throws(() => source.aggregate([
+ {$merge: {into: target.getName(), whenMatched: "keepExisting", whenNotMatched: "fail"}}
+ ]));
+ assert.commandFailedWithCode(error, 51181);
+ // Ensure the target collection has not been modified.
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 10}]});
+})();
- // Test 'whenMatched=keepExisting whenNotMatched=fail' mode. This mode is not supported and
- // should fail.
- (function testWhenMatchedKeepExistingWhenNotMatchedFail() {
- assert(target.drop());
- assert.commandWorked(target.insert({_id: 1, a: 10}));
- const error = assert.throws(() => source.aggregate([{
- $merge:
- {into: target.getName(), whenMatched: "keepExisting", whenNotMatched: "fail"}
- }]));
- assert.commandFailedWithCode(error, 51181);
- // Ensure the target collection has not been modified.
- assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 10}]});
- })();
-
- // Test 'whenMatched=keepExisting whenNotMatched=discard' mode. This mode is not supported and
- // should fail.
- (function testWhenMatchedKeepExistingWhenNotMatchedDiscard() {
- assert(target.drop());
- assert.commandWorked(target.insert({_id: 1, a: 10}));
- const error = assert.throws(() => source.aggregate([{
- $merge: {
- into: target.getName(),
- whenMatched: "keepExisting",
- whenNotMatched: "discard"
- }
- }]));
- assert.commandFailedWithCode(error, 51181);
- // Ensure the target collection has not been modified.
- assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 10}]});
- })();
+// Test 'whenMatched=keepExisting whenNotMatched=discard' mode. This mode is not supported and
+// should fail.
+(function testWhenMatchedKeepExistingWhenNotMatchedDiscard() {
+ assert(target.drop());
+ assert.commandWorked(target.insert({_id: 1, a: 10}));
+ const error = assert.throws(() => source.aggregate([
+ {$merge: {into: target.getName(), whenMatched: "keepExisting", whenNotMatched: "discard"}}
+ ]));
+ assert.commandFailedWithCode(error, 51181);
+ // Ensure the target collection has not been modified.
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 10}]});
+})();
}());
diff --git a/jstests/aggregation/sources/merge/batch_writes.js b/jstests/aggregation/sources/merge/batch_writes.js
index 3dc6455161e..1d0c5502391 100644
--- a/jstests/aggregation/sources/merge/batch_writes.js
+++ b/jstests/aggregation/sources/merge/batch_writes.js
@@ -3,69 +3,69 @@
// nothing horrendous happens and to characterize the current behavior.
// @tags: [assumes_unsharded_collection]
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- const coll = db.batch_writes;
- const outColl = db.batch_writes_out;
- coll.drop();
- outColl.drop();
+const coll = db.batch_writes;
+const outColl = db.batch_writes_out;
+coll.drop();
+outColl.drop();
- // Test with 2 very large documents that do not fit into a single batch.
- const kSize15MB = 15 * 1024 * 1024;
- const largeArray = new Array(kSize15MB).join("a");
- assert.commandWorked(coll.insert({_id: 0, a: largeArray}));
- assert.commandWorked(coll.insert({_id: 1, a: largeArray}));
+// Test with 2 very large documents that do not fit into a single batch.
+const kSize15MB = 15 * 1024 * 1024;
+const largeArray = new Array(kSize15MB).join("a");
+assert.commandWorked(coll.insert({_id: 0, a: largeArray}));
+assert.commandWorked(coll.insert({_id: 1, a: largeArray}));
- // Make sure the $merge succeeds without any duplicate keys.
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- // Skip the combination of merge modes which will fail depending on the contents of the
- // source and target collection, as this will cause the aggregation to fail.
- if (whenMatchedMode == "fail" || whenNotMatchedMode == "fail")
- return;
+// Make sure the $merge succeeds without any duplicate keys.
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ // Skip the combination of merge modes which will fail depending on the contents of the
+ // source and target collection, as this will cause the aggregation to fail.
+ if (whenMatchedMode == "fail" || whenNotMatchedMode == "fail")
+ return;
- coll.aggregate([{
- $merge: {
- into: outColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]);
- assert.eq(whenNotMatchedMode == "discard" ? 0 : 2, outColl.find().itcount());
- outColl.drop();
- });
+ coll.aggregate([{
+ $merge: {
+ into: outColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]);
+ assert.eq(whenNotMatchedMode == "discard" ? 0 : 2, outColl.find().itcount());
+ outColl.drop();
+});
- coll.drop();
- for (let i = 0; i < 10; i++) {
- assert.commandWorked(coll.insert({_id: i, a: i}));
- }
+coll.drop();
+for (let i = 0; i < 10; i++) {
+ assert.commandWorked(coll.insert({_id: i, a: i}));
+}
- // Create a unique index on 'a' in the output collection to create a unique key violation when
- // running the $merge. The second document to be written ({_id: 1, a: 1}) will conflict with the
- // existing document in the output collection. We use a unique index on a field other than _id
- // because whenMatched: "replace" will not change _id when one already exists.
- outColl.drop();
- assert.commandWorked(outColl.insert({_id: 2, a: 1}));
- assert.commandWorked(outColl.createIndex({a: 1}, {unique: true}));
+// Create a unique index on 'a' in the output collection to create a unique key violation when
+// running the $merge. The second document to be written ({_id: 1, a: 1}) will conflict with the
+// existing document in the output collection. We use a unique index on a field other than _id
+// because whenMatched: "replace" will not change _id when one already exists.
+outColl.drop();
+assert.commandWorked(outColl.insert({_id: 2, a: 1}));
+assert.commandWorked(outColl.createIndex({a: 1}, {unique: true}));
- // Test that the writes for $merge are unordered, meaning the operation continues even if it
- // encounters a duplicate key error. We don't guarantee any particular behavior in this case,
- // but this test is meant to characterize the current behavior.
- assertErrorCode(
- coll,
- [{$merge: {into: outColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}],
- ErrorCodes.DuplicateKey);
- assert.soon(() => {
- return outColl.find().itcount() == 9;
- });
+// Test that the writes for $merge are unordered, meaning the operation continues even if it
+// encounters a duplicate key error. We don't guarantee any particular behavior in this case,
+// but this test is meant to characterize the current behavior.
+assertErrorCode(
+ coll,
+ [{$merge: {into: outColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}],
+ ErrorCodes.DuplicateKey);
+assert.soon(() => {
+ return outColl.find().itcount() == 9;
+});
- assertErrorCode(
- coll,
- [{$merge: {into: outColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}],
- ErrorCodes.DuplicateKey);
- assert.soon(() => {
- return outColl.find().itcount() == 9;
- });
+assertErrorCode(
+ coll,
+ [{$merge: {into: outColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}],
+ ErrorCodes.DuplicateKey);
+assert.soon(() => {
+ return outColl.find().itcount() == 9;
+});
}());
diff --git a/jstests/aggregation/sources/merge/bypass_doc_validation.js b/jstests/aggregation/sources/merge/bypass_doc_validation.js
index d43b624ba91..957fcc9a2df 100644
--- a/jstests/aggregation/sources/merge/bypass_doc_validation.js
+++ b/jstests/aggregation/sources/merge/bypass_doc_validation.js
@@ -4,186 +4,150 @@
* @tags: [assumes_unsharded_collection]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- const testDB = db.getSiblingDB("out_bypass_doc_val");
- const sourceColl = testDB.getCollection("source");
- const targetColl = testDB.getCollection("target");
+const testDB = db.getSiblingDB("out_bypass_doc_val");
+const sourceColl = testDB.getCollection("source");
+const targetColl = testDB.getCollection("target");
+targetColl.drop();
+assert.commandWorked(testDB.createCollection(targetColl.getName(), {validator: {a: 2}}));
+
+sourceColl.drop();
+assert.commandWorked(sourceColl.insert({_id: 0, a: 1}));
+
+// Test that the bypassDocumentValidation flag is passed through to the writes on the output
+// collection.
+(function testBypassDocValidationTrue() {
+ sourceColl.aggregate([{$merge: targetColl.getName()}], {bypassDocumentValidation: true});
+ assert.eq([{_id: 0, a: 1}], targetColl.find().toArray());
+
+ sourceColl.aggregate(
+ [
+ {$addFields: {a: 3}},
+ {$merge: {into: targetColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}
+ ],
+ {bypassDocumentValidation: true});
+ assert.eq([{_id: 0, a: 3}], targetColl.find().toArray());
+
+ sourceColl.aggregate(
+ [
+ {$replaceRoot: {newRoot: {_id: 1, a: 4}}},
+ {$merge: {into: targetColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}
+ ],
+ {bypassDocumentValidation: true});
+ assert.eq([{_id: 0, a: 3}, {_id: 1, a: 4}], targetColl.find().sort({_id: 1}).toArray());
+}());
+
+// Test that mode "replaceDocuments" passes without the bypassDocumentValidation flag if the
+// updated doc is valid.
+(function testReplacementStyleUpdateWithoutBypass() {
+ sourceColl.aggregate([
+ {$addFields: {a: 2}},
+ {$merge: {into: targetColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}
+ ]);
+ assert.eq([{_id: 0, a: 2}], targetColl.find({_id: 0}).toArray());
+ sourceColl.aggregate(
+ [
+ {$addFields: {a: 2}},
+ {$merge: {into: targetColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}
+ ],
+ {bypassDocumentValidation: false});
+ assert.eq([{_id: 0, a: 2}], targetColl.find({_id: 0}).toArray());
+}());
+
+function assertDocValidationFailure(cmdOptions) {
+ assert.commandWorked(targetColl.remove({}));
+ assertErrorCode(sourceColl,
+ [{$merge: targetColl.getName()}],
+ ErrorCodes.DocumentValidationFailure,
+ "Expected failure without bypass set",
+ cmdOptions);
+
+ assertErrorCode(
+ sourceColl,
+ [
+ {$addFields: {a: 3}},
+ {$merge: {into: targetColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}
+ ],
+ ErrorCodes.DocumentValidationFailure,
+ "Expected failure without bypass set",
+ cmdOptions);
+
+ assertErrorCode(
+ sourceColl,
+ [
+ {$replaceRoot: {newRoot: {_id: 1, a: 4}}},
+ {$merge: {into: targetColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}
+ ],
+ ErrorCodes.DocumentValidationFailure,
+ "Expected failure without bypass set",
+ cmdOptions);
+ assert.eq(0, targetColl.find().itcount());
+}
+
+// Test that $merge fails if the output document is not valid, and the bypassDocumentValidation
+// flag is not set.
+assertDocValidationFailure({});
+
+// Test that $merge fails if the output document is not valid, and the bypassDocumentValidation
+// flag is explicitly set to false.
+assertDocValidationFailure({bypassDocumentValidation: false});
+
+// Test that bypassDocumentValidation is *not* needed if the source collection has a
+// validator but the output collection does not.
+(function testDocValidatorOnSourceCollection() {
targetColl.drop();
- assert.commandWorked(testDB.createCollection(targetColl.getName(), {validator: {a: 2}}));
+ assert.commandWorked(testDB.runCommand({collMod: sourceColl.getName(), validator: {a: 1}}));
+
+ sourceColl.aggregate([{$merge: targetColl.getName()}]);
+ assert.eq([{_id: 0, a: 1}], targetColl.find().toArray());
+
+ sourceColl.aggregate([
+ {$addFields: {a: 3}},
+ {$merge: {into: targetColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}
+ ]);
+ assert.eq([{_id: 0, a: 3}], targetColl.find().toArray());
+
+ sourceColl.aggregate([
+ {$replaceRoot: {newRoot: {_id: 1, a: 4}}},
+ {$merge: {into: targetColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}
+ ]);
+ assert.eq([{_id: 0, a: 3}, {_id: 1, a: 4}], targetColl.find().sort({_id: 1}).toArray());
+}());
+// Test that the bypassDocumentValidation is casted to true if the value is non-boolean.
+(function testNonBooleanBypassDocValidationFlag() {
+ assert.commandWorked(targetColl.remove({}));
+ assert.commandWorked(testDB.runCommand({collMod: targetColl.getName(), validator: {a: 1}}));
sourceColl.drop();
assert.commandWorked(sourceColl.insert({_id: 0, a: 1}));
- // Test that the bypassDocumentValidation flag is passed through to the writes on the output
- // collection.
- (function testBypassDocValidationTrue() {
- sourceColl.aggregate([{$merge: targetColl.getName()}], {bypassDocumentValidation: true});
- assert.eq([{_id: 0, a: 1}], targetColl.find().toArray());
+ sourceColl.aggregate([{$merge: targetColl.getName()}], {bypassDocumentValidation: 5});
+ assert.eq([{_id: 0, a: 1}], targetColl.find().toArray());
- sourceColl.aggregate(
- [
- {$addFields: {a: 3}},
- {
- $merge: {
- into: targetColl.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert"
- }
- }
- ],
- {bypassDocumentValidation: true});
- assert.eq([{_id: 0, a: 3}], targetColl.find().toArray());
-
- sourceColl.aggregate(
- [
- {$replaceRoot: {newRoot: {_id: 1, a: 4}}},
- {
- $merge:
- {into: targetColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}
- }
- ],
- {bypassDocumentValidation: true});
- assert.eq([{_id: 0, a: 3}, {_id: 1, a: 4}], targetColl.find().sort({_id: 1}).toArray());
- }());
-
- // Test that mode "replaceDocuments" passes without the bypassDocumentValidation flag if the
- // updated doc is valid.
- (function testReplacementStyleUpdateWithoutBypass() {
- sourceColl.aggregate([
- {$addFields: {a: 2}},
- {
- $merge:
- {into: targetColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}
- }
- ]);
- assert.eq([{_id: 0, a: 2}], targetColl.find({_id: 0}).toArray());
- sourceColl.aggregate(
- [
- {$addFields: {a: 2}},
- {
- $merge: {
- into: targetColl.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert"
- }
- }
- ],
- {bypassDocumentValidation: false});
- assert.eq([{_id: 0, a: 2}], targetColl.find({_id: 0}).toArray());
- }());
-
- function assertDocValidationFailure(cmdOptions) {
- assert.commandWorked(targetColl.remove({}));
- assertErrorCode(sourceColl,
- [{$merge: targetColl.getName()}],
- ErrorCodes.DocumentValidationFailure,
- "Expected failure without bypass set",
- cmdOptions);
-
- assertErrorCode(sourceColl,
- [
- {$addFields: {a: 3}},
- {
- $merge: {
- into: targetColl.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert"
- }
- }
- ],
- ErrorCodes.DocumentValidationFailure,
- "Expected failure without bypass set",
- cmdOptions);
-
- assertErrorCode(
- sourceColl,
- [
- {$replaceRoot: {newRoot: {_id: 1, a: 4}}},
- {
- $merge:
- {into: targetColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}
- }
- ],
- ErrorCodes.DocumentValidationFailure,
- "Expected failure without bypass set",
- cmdOptions);
- assert.eq(0, targetColl.find().itcount());
- }
-
- // Test that $merge fails if the output document is not valid, and the bypassDocumentValidation
- // flag is not set.
- assertDocValidationFailure({});
-
- // Test that $merge fails if the output document is not valid, and the bypassDocumentValidation
- // flag is explicitly set to false.
- assertDocValidationFailure({bypassDocumentValidation: false});
-
- // Test that bypassDocumentValidation is *not* needed if the source collection has a
- // validator but the output collection does not.
- (function testDocValidatorOnSourceCollection() {
- targetColl.drop();
- assert.commandWorked(testDB.runCommand({collMod: sourceColl.getName(), validator: {a: 1}}));
-
- sourceColl.aggregate([{$merge: targetColl.getName()}]);
- assert.eq([{_id: 0, a: 1}], targetColl.find().toArray());
-
- sourceColl.aggregate([
+ sourceColl.aggregate(
+ [
{$addFields: {a: 3}},
- {
- $merge:
- {into: targetColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}
- }
- ]);
- assert.eq([{_id: 0, a: 3}], targetColl.find().toArray());
+ {$merge: {into: targetColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}
+ ],
+ {bypassDocumentValidation: "false"});
+ assert.eq([{_id: 0, a: 3}], targetColl.find().toArray());
+}());
- sourceColl.aggregate([
- {$replaceRoot: {newRoot: {_id: 1, a: 4}}},
- {$merge: {into: targetColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}
- ]);
- assert.eq([{_id: 0, a: 3}, {_id: 1, a: 4}], targetColl.find().sort({_id: 1}).toArray());
- }());
-
- // Test that the bypassDocumentValidation is casted to true if the value is non-boolean.
- (function testNonBooleanBypassDocValidationFlag() {
- assert.commandWorked(targetColl.remove({}));
- assert.commandWorked(testDB.runCommand({collMod: targetColl.getName(), validator: {a: 1}}));
- sourceColl.drop();
- assert.commandWorked(sourceColl.insert({_id: 0, a: 1}));
-
- sourceColl.aggregate([{$merge: targetColl.getName()}], {bypassDocumentValidation: 5});
- assert.eq([{_id: 0, a: 1}], targetColl.find().toArray());
-
- sourceColl.aggregate(
- [
- {$addFields: {a: 3}},
- {
- $merge: {
- into: targetColl.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert"
- }
- }
- ],
- {bypassDocumentValidation: "false"});
- assert.eq([{_id: 0, a: 3}], targetColl.find().toArray());
- }());
-
- // Test bypassDocumentValidation with $merge to a collection in a foreign database.
- (function testForeignDb() {
- const foreignDB = db.getSiblingDB("foreign_db");
- const foreignColl = foreignDB.foreign_coll;
- foreignColl.drop();
- assert.commandWorked(
- foreignDB.createCollection(foreignColl.getName(), {validator: {a: 2}}));
-
- sourceColl.aggregate(
- [
- {$addFields: {a: 3}},
- {
+// Test bypassDocumentValidation with $merge to a collection in a foreign database.
+(function testForeignDb() {
+ const foreignDB = db.getSiblingDB("foreign_db");
+ const foreignColl = foreignDB.foreign_coll;
+ foreignColl.drop();
+ assert.commandWorked(foreignDB.createCollection(foreignColl.getName(), {validator: {a: 2}}));
+
+ sourceColl.aggregate(
+ [
+ {$addFields: {a: 3}},
+ {
$merge: {
into: {
db: foreignDB.getName(),
@@ -192,15 +156,15 @@
whenMatched: "replace",
whenNotMatched: "insert"
}
- }
- ],
- {bypassDocumentValidation: true});
- assert.eq([{_id: 0, a: 3}], foreignColl.find().toArray());
-
- sourceColl.aggregate(
- [
- {$replaceRoot: {newRoot: {_id: 1, a: 4}}},
- {
+ }
+ ],
+ {bypassDocumentValidation: true});
+ assert.eq([{_id: 0, a: 3}], foreignColl.find().toArray());
+
+ sourceColl.aggregate(
+ [
+ {$replaceRoot: {newRoot: {_id: 1, a: 4}}},
+ {
$merge: {
into: {
db: foreignDB.getName(),
@@ -209,16 +173,16 @@
whenMatched: "fail",
whenNotMatched: "insert"
}
- }
- ],
- {bypassDocumentValidation: true});
- assert.eq([{_id: 0, a: 3}, {_id: 1, a: 4}], foreignColl.find().sort({_id: 1}).toArray());
-
- assert.commandWorked(foreignColl.remove({}));
- assertErrorCode(sourceColl,
- [
- {$addFields: {a: 3}},
- {
+ }
+ ],
+ {bypassDocumentValidation: true});
+ assert.eq([{_id: 0, a: 3}, {_id: 1, a: 4}], foreignColl.find().sort({_id: 1}).toArray());
+
+ assert.commandWorked(foreignColl.remove({}));
+ assertErrorCode(sourceColl,
+ [
+ {$addFields: {a: 3}},
+ {
$merge: {
into: {
db: foreignDB.getName(),
@@ -227,14 +191,14 @@
whenMatched: "replace",
whenNotMatched: "insert"
}
- }
- ],
- ErrorCodes.DocumentValidationFailure);
-
- assertErrorCode(sourceColl,
- [
- {$replaceRoot: {newRoot: {_id: 1, a: 4}}},
- {
+ }
+ ],
+ ErrorCodes.DocumentValidationFailure);
+
+ assertErrorCode(sourceColl,
+ [
+ {$replaceRoot: {newRoot: {_id: 1, a: 4}}},
+ {
$merge: {
into: {
db: foreignDB.getName(),
@@ -243,9 +207,9 @@
whenMatched: "fail",
whenNotMatched: "insert"
}
- }
- ],
- ErrorCodes.DocumentValidationFailure);
- assert.eq(0, foreignColl.find().itcount());
- }());
+ }
+ ],
+ ErrorCodes.DocumentValidationFailure);
+ assert.eq(0, foreignColl.find().itcount());
+}());
}());
diff --git a/jstests/aggregation/sources/merge/disallowed_in_lookup.js b/jstests/aggregation/sources/merge/disallowed_in_lookup.js
index 3731055f6b9..19f37305dbe 100644
--- a/jstests/aggregation/sources/merge/disallowed_in_lookup.js
+++ b/jstests/aggregation/sources/merge/disallowed_in_lookup.js
@@ -1,28 +1,28 @@
// Tests that $merge cannot be used within a $lookup pipeline.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection.
- load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
- load("jstests/libs/discover_topology.js"); // For findNonConfigNodes.
- load("jstests/libs/fixture_helpers.js"); // For isSharded.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection.
+load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
+load("jstests/libs/discover_topology.js"); // For findNonConfigNodes.
+load("jstests/libs/fixture_helpers.js"); // For isSharded.
- const kErrorCodeMergeBannedInLookup = 51047;
- const kErrorCodeMergeLastStageOnly = 40601;
- const coll = db.merge_in_lookup_not_allowed;
- coll.drop();
+const kErrorCodeMergeBannedInLookup = 51047;
+const kErrorCodeMergeLastStageOnly = 40601;
+const coll = db.merge_in_lookup_not_allowed;
+coll.drop();
- const from = db.merge_in_lookup_not_allowed_from;
- from.drop();
+const from = db.merge_in_lookup_not_allowed_from;
+from.drop();
- if (FixtureHelpers.isSharded(from)) {
- setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
- "internalQueryAllowShardedLookup",
- true);
- }
+if (FixtureHelpers.isSharded(from)) {
+ setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
+ "internalQueryAllowShardedLookup",
+ true);
+}
- let pipeline = [
+let pipeline = [
{
$lookup: {
pipeline: [{$merge: {into: "out_collection", on: "_id"}}],
@@ -31,9 +31,9 @@
}
},
];
- assertErrorCode(coll, pipeline, kErrorCodeMergeBannedInLookup);
+assertErrorCode(coll, pipeline, kErrorCodeMergeBannedInLookup);
- pipeline = [
+pipeline = [
{
$lookup: {
pipeline: [{$project: {x: 0}}, {$merge: {into: "out_collection", on: "_id"}}],
@@ -42,9 +42,9 @@
}
},
];
- assertErrorCode(coll, pipeline, kErrorCodeMergeBannedInLookup);
+assertErrorCode(coll, pipeline, kErrorCodeMergeBannedInLookup);
- pipeline = [
+pipeline = [
{
$lookup: {
pipeline: [{$merge: {into: "out_collection", on: "_id"}}, {$match: {x: true}}],
@@ -53,14 +53,14 @@
}
},
];
- // Pipeline will fail because $merge is not last in the subpipeline.
- // Validation for $merge in a $lookup's subpipeline occurs at a later point.
- assertErrorCode(coll, pipeline, kErrorCodeMergeLastStageOnly);
+// Pipeline will fail because $merge is not last in the subpipeline.
+// Validation for $merge in a $lookup's subpipeline occurs at a later point.
+assertErrorCode(coll, pipeline, kErrorCodeMergeLastStageOnly);
- // Create view which contains $merge within $lookup.
- assertDropCollection(coll.getDB(), "view1");
+// Create view which contains $merge within $lookup.
+assertDropCollection(coll.getDB(), "view1");
- pipeline = [
+pipeline = [
{
$lookup: {
pipeline: [{$merge: {into: "out_collection", on: "_id"}}],
@@ -69,14 +69,14 @@
}
},
];
- // Pipeline will fail because $merge is not allowed to exist within a $lookup.
- // Validation for $merge in a view occurs at a later point.
- const cmdRes =
- coll.getDB().runCommand({create: "view1", viewOn: coll.getName(), pipeline: pipeline});
- assert.commandFailedWithCode(cmdRes, kErrorCodeMergeBannedInLookup);
+// Pipeline will fail because $merge is not allowed to exist within a $lookup.
+// Validation for $merge in a view occurs at a later point.
+const cmdRes =
+ coll.getDB().runCommand({create: "view1", viewOn: coll.getName(), pipeline: pipeline});
+assert.commandFailedWithCode(cmdRes, kErrorCodeMergeBannedInLookup);
- // Test that a $merge without an explicit "on" field still fails within a $lookup.
- pipeline = [
+// Test that a $merge without an explicit "on" field still fails within a $lookup.
+pipeline = [
{
$lookup: {
pipeline: [{$merge: {into: "out_collection"}}],
@@ -85,7 +85,7 @@
}
},
];
- assert.commandFailedWithCode(
- db.runCommand({aggregate: coll.getName(), pipeline: pipeline, cursor: {}}),
- kErrorCodeMergeBannedInLookup);
+assert.commandFailedWithCode(
+ db.runCommand({aggregate: coll.getName(), pipeline: pipeline, cursor: {}}),
+ kErrorCodeMergeBannedInLookup);
}());
diff --git a/jstests/aggregation/sources/merge/exchange_explain.js b/jstests/aggregation/sources/merge/exchange_explain.js
index 362af97ed46..23bed99973d 100644
--- a/jstests/aggregation/sources/merge/exchange_explain.js
+++ b/jstests/aggregation/sources/merge/exchange_explain.js
@@ -6,173 +6,169 @@
load('jstests/aggregation/extras/utils.js');
(function() {
- "use strict";
-
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
-
- const mongosDB = st.s.getDB("test_db");
-
- const inColl = mongosDB["inColl"];
- const targetCollRange = mongosDB["targetCollRange"];
- const targetCollRangeOtherField = mongosDB["targetCollRangeOtherField"];
- const targetCollHash = mongosDB["targetCollHash"];
-
- const numDocs = 1000;
-
- function runExplainQuery(targetColl) {
- return inColl.explain("allPlansExecution").aggregate([
- {$group: {_id: "$a", a: {$avg: "$a"}}},
- {
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: "replace",
- whenNotMatched: "insert"
- }
- }
- ]);
- }
+"use strict";
- function runRealQuery(targetColl) {
- return inColl.aggregate([
- {$group: {_id: "$a", a: {$avg: "$a"}}},
- {
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: "replace",
- whenNotMatched: "insert"
- }
- }
- ]);
- }
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
- function getExchangeSpec(explain) {
- assert(explain.hasOwnProperty("splitPipeline"), tojson(explain));
- assert(explain.splitPipeline.hasOwnProperty("exchange"), tojson(explain));
+const mongosDB = st.s.getDB("test_db");
- return explain.splitPipeline.exchange;
- }
+const inColl = mongosDB["inColl"];
+const targetCollRange = mongosDB["targetCollRange"];
+const targetCollRangeOtherField = mongosDB["targetCollRangeOtherField"];
+const targetCollHash = mongosDB["targetCollHash"];
- // Shard the input collection.
- st.shardColl(inColl, {a: 1}, {a: 500}, {a: 500}, mongosDB.getName());
+const numDocs = 1000;
- // Insert some data to the input collection.
- let bulk = inColl.initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; i++) {
- bulk.insert({a: i}, {b: [0, 1, 2, 3, i]});
- }
- assert.commandWorked(bulk.execute());
-
- // Shard the output collections.
- st.shardColl(targetCollRange, {_id: 1}, {_id: 500}, {_id: 500}, mongosDB.getName());
- st.shardColl(targetCollRangeOtherField, {b: 1}, {b: 500}, {b: 500}, mongosDB.getName());
- st.shardColl(targetCollHash, {_id: "hashed"}, false, false, mongosDB.getName());
-
- // Run the explain. We expect to see the range based exchange here.
- let explain = runExplainQuery(targetCollRange);
-
- // Make sure we see the exchange in the explain output.
- assert.eq(explain.mergeType, "exchange", tojson(explain));
- let exchangeSpec = getExchangeSpec(explain);
- assert.eq(exchangeSpec.policy, "keyRange");
- assert.eq(exchangeSpec.key, {_id: 1});
-
- // Run the real query.
- runRealQuery(targetCollRange);
- let results = targetCollRange.aggregate([{'$count': "count"}]).next().count;
- assert.eq(results, numDocs);
-
- // Rerun the same query with the hash based exchange.
- explain = runExplainQuery(targetCollHash);
-
- // Make sure we see the exchange in the explain output.
- assert.eq(explain.mergeType, "exchange", tojson(explain));
- exchangeSpec = getExchangeSpec(explain);
- assert.eq(exchangeSpec.policy, "keyRange");
- assert.eq(exchangeSpec.key, {_id: "hashed"});
-
- // Run the real query.
- runRealQuery(targetCollHash);
- results = targetCollHash.aggregate([{'$count': "count"}]).next().count;
- assert.eq(results, numDocs);
-
- // This should fail because the "on" field ('b' in this case, the shard key of the target
- // collection) cannot be an array.
- assertErrorCode(inColl,
- [{
- $merge: {
- into: {
- db: targetCollRangeOtherField.getDB().getName(),
- coll: targetCollRangeOtherField.getName(),
- },
- whenMatched: "replace",
- whenNotMatched: "insert"
- }
- }],
- 51132);
-
- // Turn off the exchange and rerun the query.
- assert.commandWorked(mongosDB.adminCommand({setParameter: 1, internalQueryDisableExchange: 1}));
- explain = runExplainQuery(targetCollRange);
-
- // Make sure there is no exchange.
- assert.eq(explain.mergeType, "anyShard", tojson(explain));
- assert(explain.hasOwnProperty("splitPipeline"), tojson(explain));
- assert(!explain.splitPipeline.hasOwnProperty("exchange"), tojson(explain));
-
- // This should fail similar to before even if we are not running the exchange.
- assertErrorCode(inColl,
- [{
- $merge: {
- into: {
- db: targetCollRangeOtherField.getDB().getName(),
- coll: targetCollRangeOtherField.getName(),
- },
- whenMatched: "replace",
- whenNotMatched: "insert"
- }
- }],
- 51132);
-
- // SERVER-38349 Make sure mongos rejects specifying exchange directly.
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: inColl.getName(),
- pipeline: [],
- cursor: {},
- exchange: {
- policy: "keyRange",
- bufferSize: NumberInt(1024),
- boundaries: [{_id: 0}],
- consumers: NumberInt(2),
- consumerIds: [NumberInt(0), NumberInt(1)]
+function runExplainQuery(targetColl) {
+ return inColl.explain("allPlansExecution").aggregate([
+ {$group: {_id: "$a", a: {$avg: "$a"}}},
+ {
+ $merge: {
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
+ whenMatched: "replace",
+ whenNotMatched: "insert"
+ }
}
- }),
- 51028);
+ ]);
+}
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: inColl.getName(),
- pipeline: [{
+function runRealQuery(targetColl) {
+ return inColl.aggregate([
+ {$group: {_id: "$a", a: {$avg: "$a"}}},
+ {
$merge: {
- into: targetCollRange.getName(),
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
whenMatched: "replace",
whenNotMatched: "insert"
}
- }],
- cursor: {},
- exchange: {
- policy: "keyRange",
- bufferSize: NumberInt(1024),
- boundaries: [{_id: 0}],
- consumers: NumberInt(2),
- consumerIds: [NumberInt(0), NumberInt(1)]
}
- }),
- 51028);
+ ]);
+}
+
+function getExchangeSpec(explain) {
+ assert(explain.hasOwnProperty("splitPipeline"), tojson(explain));
+ assert(explain.splitPipeline.hasOwnProperty("exchange"), tojson(explain));
+
+ return explain.splitPipeline.exchange;
+}
+
+// Shard the input collection.
+st.shardColl(inColl, {a: 1}, {a: 500}, {a: 500}, mongosDB.getName());
+
+// Insert some data to the input collection.
+let bulk = inColl.initializeUnorderedBulkOp();
+for (let i = 0; i < numDocs; i++) {
+ bulk.insert({a: i}, {b: [0, 1, 2, 3, i]});
+}
+assert.commandWorked(bulk.execute());
+
+// Shard the output collections.
+st.shardColl(targetCollRange, {_id: 1}, {_id: 500}, {_id: 500}, mongosDB.getName());
+st.shardColl(targetCollRangeOtherField, {b: 1}, {b: 500}, {b: 500}, mongosDB.getName());
+st.shardColl(targetCollHash, {_id: "hashed"}, false, false, mongosDB.getName());
+
+// Run the explain. We expect to see the range based exchange here.
+let explain = runExplainQuery(targetCollRange);
+
+// Make sure we see the exchange in the explain output.
+assert.eq(explain.mergeType, "exchange", tojson(explain));
+let exchangeSpec = getExchangeSpec(explain);
+assert.eq(exchangeSpec.policy, "keyRange");
+assert.eq(exchangeSpec.key, {_id: 1});
+
+// Run the real query.
+runRealQuery(targetCollRange);
+let results = targetCollRange.aggregate([{'$count': "count"}]).next().count;
+assert.eq(results, numDocs);
+
+// Rerun the same query with the hash based exchange.
+explain = runExplainQuery(targetCollHash);
+
+// Make sure we see the exchange in the explain output.
+assert.eq(explain.mergeType, "exchange", tojson(explain));
+exchangeSpec = getExchangeSpec(explain);
+assert.eq(exchangeSpec.policy, "keyRange");
+assert.eq(exchangeSpec.key, {_id: "hashed"});
+
+// Run the real query.
+runRealQuery(targetCollHash);
+results = targetCollHash.aggregate([{'$count': "count"}]).next().count;
+assert.eq(results, numDocs);
+
+// This should fail because the "on" field ('b' in this case, the shard key of the target
+// collection) cannot be an array.
+assertErrorCode(inColl,
+ [{
+ $merge: {
+ into: {
+ db: targetCollRangeOtherField.getDB().getName(),
+ coll: targetCollRangeOtherField.getName(),
+ },
+ whenMatched: "replace",
+ whenNotMatched: "insert"
+ }
+ }],
+ 51132);
+
+// Turn off the exchange and rerun the query.
+assert.commandWorked(mongosDB.adminCommand({setParameter: 1, internalQueryDisableExchange: 1}));
+explain = runExplainQuery(targetCollRange);
+
+// Make sure there is no exchange.
+assert.eq(explain.mergeType, "anyShard", tojson(explain));
+assert(explain.hasOwnProperty("splitPipeline"), tojson(explain));
+assert(!explain.splitPipeline.hasOwnProperty("exchange"), tojson(explain));
+
+// This should fail similar to before even if we are not running the exchange.
+assertErrorCode(inColl,
+ [{
+ $merge: {
+ into: {
+ db: targetCollRangeOtherField.getDB().getName(),
+ coll: targetCollRangeOtherField.getName(),
+ },
+ whenMatched: "replace",
+ whenNotMatched: "insert"
+ }
+ }],
+ 51132);
+
+// SERVER-38349 Make sure mongos rejects specifying exchange directly.
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: inColl.getName(),
+ pipeline: [],
+ cursor: {},
+ exchange: {
+ policy: "keyRange",
+ bufferSize: NumberInt(1024),
+ boundaries: [{_id: 0}],
+ consumers: NumberInt(2),
+ consumerIds: [NumberInt(0), NumberInt(1)]
+ }
+}),
+ 51028);
+
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: inColl.getName(),
+ pipeline: [{
+ $merge: {into: targetCollRange.getName(), whenMatched: "replace", whenNotMatched: "insert"}
+ }],
+ cursor: {},
+ exchange: {
+ policy: "keyRange",
+ bufferSize: NumberInt(1024),
+ boundaries: [{_id: 0}],
+ consumers: NumberInt(2),
+ consumerIds: [NumberInt(0), NumberInt(1)]
+ }
+}),
+ 51028);
- st.stop();
+st.stop();
}());
diff --git a/jstests/aggregation/sources/merge/merge_to_referenced_collection.js b/jstests/aggregation/sources/merge/merge_to_referenced_collection.js
index c6a82bab79d..a9060f58b0a 100644
--- a/jstests/aggregation/sources/merge/merge_to_referenced_collection.js
+++ b/jstests/aggregation/sources/merge/merge_to_referenced_collection.js
@@ -9,64 +9,55 @@
* @tags: [assumes_unsharded_collection]
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/aggregation/extras/merge_helpers.js'); // For 'withEachMergeMode'.
- load('jstests/libs/fixture_helpers.js'); // For 'FixtureHelpers'.
+load('jstests/aggregation/extras/merge_helpers.js'); // For 'withEachMergeMode'.
+load('jstests/libs/fixture_helpers.js'); // For 'FixtureHelpers'.
- const testDB = db.getSiblingDB("merge_to_referenced_coll");
- const coll = testDB.test;
+const testDB = db.getSiblingDB("merge_to_referenced_coll");
+const coll = testDB.test;
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- coll.drop();
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ coll.drop();
- // Seed the collection to ensure each pipeline will actually do something.
- assert.commandWorked(coll.insert({_id: 0}));
+ // Seed the collection to ensure each pipeline will actually do something.
+ assert.commandWorked(coll.insert({_id: 0}));
- // Each of the following assertions will somehow use $merge to write to a namespace that is
- // being read from elsewhere in the pipeline.
- const assertFailsWithCode = ((fn) => {
- const error = assert.throws(fn);
- assert.contains(error.code, [51188, 51079]);
- });
+ // Each of the following assertions will somehow use $merge to write to a namespace that is
+ // being read from elsewhere in the pipeline.
+ const assertFailsWithCode = ((fn) => {
+ const error = assert.throws(fn);
+ assert.contains(error.code, [51188, 51079]);
+ });
- // Test $merge to the aggregate command's source collection.
- assertFailsWithCode(() => coll.aggregate([{
+ // Test $merge to the aggregate command's source collection.
+ assertFailsWithCode(() => coll.aggregate([{
+ $merge:
+ {into: coll.getName(), whenMatched: whenMatchedMode, whenNotMatched: whenNotMatchedMode}
+ }]));
+
+ // Test $merge to the same namespace as a $lookup which is the same as the aggregate
+ // command's source collection.
+ assertFailsWithCode(() => coll.aggregate([
+ {$lookup: {from: coll.getName(), as: "x", localField: "f_id", foreignField: "_id"}},
+ {
$merge: {
into: coll.getName(),
whenMatched: whenMatchedMode,
whenNotMatched: whenNotMatchedMode
}
- }]));
+ }
+ ]));
- // Test $merge to the same namespace as a $lookup which is the same as the aggregate
- // command's source collection.
- assertFailsWithCode(() => coll.aggregate([
- {$lookup: {from: coll.getName(), as: "x", localField: "f_id", foreignField: "_id"}},
- {
- $merge: {
- into: coll.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }
- ]));
+ // Test $merge to the same namespace as a $lookup which is *not* the same as the aggregate
+ // command's source collection.
+ assertFailsWithCode(() => coll.aggregate([
+ {$lookup: {from: "bar", as: "x", localField: "f_id", foreignField: "_id"}},
+ {$merge: {into: "bar", whenMatched: whenMatchedMode, whenNotMatched: whenNotMatchedMode}}
+ ]));
- // Test $merge to the same namespace as a $lookup which is *not* the same as the aggregate
- // command's source collection.
- assertFailsWithCode(() => coll.aggregate([
- {$lookup: {from: "bar", as: "x", localField: "f_id", foreignField: "_id"}},
- {
- $merge: {
- into: "bar",
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }
- ]));
-
- // Test $merge to the same namespace as a $graphLookup.
- assertFailsWithCode(() => coll.aggregate([
+ // Test $merge to the same namespace as a $graphLookup.
+ assertFailsWithCode(() => coll.aggregate([
{
$graphLookup: {
from: "bar",
@@ -85,8 +76,8 @@
}
]));
- // Test $merge to the same namespace as a $lookup which is nested within another $lookup.
- assertFailsWithCode(() => coll.aggregate([
+ // Test $merge to the same namespace as a $lookup which is nested within another $lookup.
+ assertFailsWithCode(() => coll.aggregate([
{
$lookup: {
from: "bar",
@@ -103,49 +94,33 @@
}
}
]));
- // Test $merge to the same namespace as a $lookup which is nested within a $facet.
- assertFailsWithCode(() => coll.aggregate([
- {
- $facet: {
- y: [{$lookup: {from: "TARGET", as: "y", pipeline: []}}],
- }
- },
- {
- $merge: {
- into: "TARGET",
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }
- ]));
- assertFailsWithCode(() => coll.aggregate([
- {
- $facet: {
- x: [{$lookup: {from: "other", as: "y", pipeline: []}}],
- y: [{$lookup: {from: "TARGET", as: "y", pipeline: []}}],
- }
- },
- {
- $merge: {
- into: "TARGET",
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
+ // Test $merge to the same namespace as a $lookup which is nested within a $facet.
+ assertFailsWithCode(() => coll.aggregate([
+ {
+ $facet: {
+ y: [{$lookup: {from: "TARGET", as: "y", pipeline: []}}],
}
- ]));
-
- // Test that we use the resolved namespace of a view to detect this sort of halloween
- // problem.
- assert.commandWorked(
- testDB.runCommand({create: "view_on_TARGET", viewOn: "TARGET", pipeline: []}));
- assertFailsWithCode(() => testDB.view_on_TARGET.aggregate([{
- $merge: {
- into: "TARGET",
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
+ },
+ {$merge: {into: "TARGET", whenMatched: whenMatchedMode, whenNotMatched: whenNotMatchedMode}}
+ ]));
+ assertFailsWithCode(() => coll.aggregate([
+ {
+ $facet: {
+ x: [{$lookup: {from: "other", as: "y", pipeline: []}}],
+ y: [{$lookup: {from: "TARGET", as: "y", pipeline: []}}],
}
- }]));
- assertFailsWithCode(() => coll.aggregate([
+ },
+ {$merge: {into: "TARGET", whenMatched: whenMatchedMode, whenNotMatched: whenNotMatchedMode}}
+ ]));
+
+ // Test that we use the resolved namespace of a view to detect this sort of halloween
+ // problem.
+ assert.commandWorked(
+ testDB.runCommand({create: "view_on_TARGET", viewOn: "TARGET", pipeline: []}));
+ assertFailsWithCode(() => testDB.view_on_TARGET.aggregate([
+ {$merge: {into: "TARGET", whenMatched: whenMatchedMode, whenNotMatched: whenNotMatchedMode}}
+ ]));
+ assertFailsWithCode(() => coll.aggregate([
{
$facet: {
x: [{$lookup: {from: "other", as: "y", pipeline: []}}],
@@ -167,25 +142,21 @@
}
]));
- function generateNestedPipeline(foreignCollName, numLevels) {
- let pipeline = [{"$lookup": {pipeline: [], from: foreignCollName, as: "same"}}];
-
- for (let level = 1; level < numLevels; level++) {
- pipeline = [{"$lookup": {pipeline: pipeline, from: foreignCollName, as: "same"}}];
- }
+ function generateNestedPipeline(foreignCollName, numLevels) {
+ let pipeline = [{"$lookup": {pipeline: [], from: foreignCollName, as: "same"}}];
- return pipeline;
+ for (let level = 1; level < numLevels; level++) {
+ pipeline = [{"$lookup": {pipeline: pipeline, from: foreignCollName, as: "same"}}];
}
- const nestedPipeline = generateNestedPipeline("lookup", 20).concat([{
- $merge: {
- into: "lookup",
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]);
- assertFailsWithCode(() => coll.aggregate(nestedPipeline));
+ return pipeline;
+ }
- testDB.dropDatabase();
- });
+ const nestedPipeline = generateNestedPipeline("lookup", 20).concat([
+ {$merge: {into: "lookup", whenMatched: whenMatchedMode, whenNotMatched: whenNotMatchedMode}}
+ ]);
+ assertFailsWithCode(() => coll.aggregate(nestedPipeline));
+
+ testDB.dropDatabase();
+});
}());
diff --git a/jstests/aggregation/sources/merge/merge_to_same_collection.js b/jstests/aggregation/sources/merge/merge_to_same_collection.js
index 2e26a26965a..51435696fdd 100644
--- a/jstests/aggregation/sources/merge/merge_to_same_collection.js
+++ b/jstests/aggregation/sources/merge/merge_to_same_collection.js
@@ -2,19 +2,19 @@
* Tests that $merge fails when the target collection is the aggregation collection.
*
* @tags: [assumes_unsharded_collection]
-*/
+ */
(function() {
- "use strict";
+"use strict";
- // For assertMergeFailsForAllModesWithCode.
- load("jstests/aggregation/extras/merge_helpers.js");
+// For assertMergeFailsForAllModesWithCode.
+load("jstests/aggregation/extras/merge_helpers.js");
- const coll = db.name;
- coll.drop();
+const coll = db.name;
+coll.drop();
- const nDocs = 10;
- for (let i = 0; i < nDocs; i++) {
- assert.commandWorked(coll.insert({_id: i, a: i}));
- }
- assertMergeFailsForAllModesWithCode({source: coll, target: coll, errorCodes: 51188});
+const nDocs = 10;
+for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert({_id: i, a: i}));
+}
+assertMergeFailsForAllModesWithCode({source: coll, target: coll, errorCodes: 51188});
}());
diff --git a/jstests/aggregation/sources/merge/mode_fail_insert.js b/jstests/aggregation/sources/merge/mode_fail_insert.js
index 7cfd6aee02e..9363c42b12d 100644
--- a/jstests/aggregation/sources/merge/mode_fail_insert.js
+++ b/jstests/aggregation/sources/merge/mode_fail_insert.js
@@ -1,147 +1,149 @@
// Tests the behavior of $merge with whenMatched: "fail" and whenNotMatched: "insert".
// @tags: [assumes_unsharded_collection, assumes_no_implicit_collection_creation_after_drop]
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
-
- const coll = db.merge_insert_only;
- coll.drop();
-
- const targetColl = db.merge_insert_only_out;
- targetColl.drop();
-
- const pipeline =
- [{$merge: {into: targetColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}];
-
- //
- // Test $merge with a non-existent output collection.
- //
- assert.commandWorked(coll.insert({_id: 0}));
-
- coll.aggregate(pipeline);
- assert.eq(1, targetColl.find().itcount());
-
- //
- // Test $merge with an existing output collection.
- //
- assert.commandWorked(coll.remove({_id: 0}));
- assert.commandWorked(coll.insert({_id: 1}));
- coll.aggregate(pipeline);
- assert.eq(2, targetColl.find().itcount());
-
- //
- // Test that $merge fails if there's a duplicate key error.
- //
- assertErrorCode(coll, pipeline, ErrorCodes.DuplicateKey);
-
- //
- // Test that $merge will preserve the indexes and options of the output collection.
- //
- const validator = {a: {$gt: 0}};
- targetColl.drop();
- assert.commandWorked(db.createCollection(targetColl.getName(), {validator: validator}));
- assert.commandWorked(targetColl.createIndex({a: 1}));
-
- coll.drop();
- assert.commandWorked(coll.insert({a: 1}));
-
- coll.aggregate(pipeline);
- assert.eq(1, targetColl.find().itcount());
- assert.eq(2, targetColl.getIndexes().length);
-
- const listColl = db.runCommand({listCollections: 1, filter: {name: targetColl.getName()}});
- assert.commandWorked(listColl);
- assert.eq(validator, listColl.cursor.firstBatch[0].options["validator"]);
-
- //
- // Test that $merge fails if it violates a unique index constraint.
- //
- coll.drop();
- assert.commandWorked(coll.insert([{_id: 0, a: 0}, {_id: 1, a: 0}]));
- targetColl.drop();
- assert.commandWorked(targetColl.createIndex({a: 1}, {unique: true}));
-
- assertErrorCode(coll, pipeline, ErrorCodes.DuplicateKey);
-
- //
- // Test that a $merge aggregation succeeds even if the _id is stripped out and the "unique key"
- // is the document key, which will be _id for a new collection.
- //
- coll.drop();
- assert.commandWorked(coll.insert({a: 0}));
- targetColl.drop();
- assert.doesNotThrow(() => coll.aggregate([
- {$project: {_id: 0}},
- {$merge: {into: targetColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}},
- ]));
- assert.eq(1, targetColl.find().itcount());
-
- //
- // Test that a $merge aggregation succeeds even if the _id is stripped out and _id is included
- // in the "on" fields.
- //
- coll.drop();
- assert.commandWorked(coll.insert([{_id: "should be projected away", name: "kyle"}]));
- targetColl.drop();
- assert.commandWorked(targetColl.createIndex({_id: 1, name: -1}, {unique: true}));
- assert.doesNotThrow(() => coll.aggregate([
- {$project: {_id: 0}},
- {
- $merge: {
- into: targetColl.getName(),
- whenMatched: "fail",
- whenNotMatched: "insert",
- on: ["_id", "name"]
- }
- },
- ]));
- assert.eq(1, targetColl.find().itcount());
-
- //
- // Tests for $merge to a database that differs from the aggregation database.
- //
- const foreignDb = db.getSiblingDB("merge_insert_only_foreign");
- const foreignTargetColl = foreignDb.merge_insert_only_out;
- const pipelineDifferentOutputDb = [
- {$project: {_id: 0}},
- {
- $merge: {
- into: {
- db: foreignDb.getName(),
- coll: foreignTargetColl.getName(),
- },
- whenMatched: "fail",
- whenNotMatched: "insert",
- }
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
+
+const coll = db.merge_insert_only;
+coll.drop();
+
+const targetColl = db.merge_insert_only_out;
+targetColl.drop();
+
+const pipeline =
+ [{$merge: {into: targetColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}];
+
+//
+// Test $merge with a non-existent output collection.
+//
+assert.commandWorked(coll.insert({_id: 0}));
+
+coll.aggregate(pipeline);
+assert.eq(1, targetColl.find().itcount());
+
+//
+// Test $merge with an existing output collection.
+//
+assert.commandWorked(coll.remove({_id: 0}));
+assert.commandWorked(coll.insert({_id: 1}));
+coll.aggregate(pipeline);
+assert.eq(2, targetColl.find().itcount());
+
+//
+// Test that $merge fails if there's a duplicate key error.
+//
+assertErrorCode(coll, pipeline, ErrorCodes.DuplicateKey);
+
+//
+// Test that $merge will preserve the indexes and options of the output collection.
+//
+const validator = {
+ a: {$gt: 0}
+};
+targetColl.drop();
+assert.commandWorked(db.createCollection(targetColl.getName(), {validator: validator}));
+assert.commandWorked(targetColl.createIndex({a: 1}));
+
+coll.drop();
+assert.commandWorked(coll.insert({a: 1}));
+
+coll.aggregate(pipeline);
+assert.eq(1, targetColl.find().itcount());
+assert.eq(2, targetColl.getIndexes().length);
+
+const listColl = db.runCommand({listCollections: 1, filter: {name: targetColl.getName()}});
+assert.commandWorked(listColl);
+assert.eq(validator, listColl.cursor.firstBatch[0].options["validator"]);
+
+//
+// Test that $merge fails if it violates a unique index constraint.
+//
+coll.drop();
+assert.commandWorked(coll.insert([{_id: 0, a: 0}, {_id: 1, a: 0}]));
+targetColl.drop();
+assert.commandWorked(targetColl.createIndex({a: 1}, {unique: true}));
+
+assertErrorCode(coll, pipeline, ErrorCodes.DuplicateKey);
+
+//
+// Test that a $merge aggregation succeeds even if the _id is stripped out and the "unique key"
+// is the document key, which will be _id for a new collection.
+//
+coll.drop();
+assert.commandWorked(coll.insert({a: 0}));
+targetColl.drop();
+assert.doesNotThrow(() => coll.aggregate([
+ {$project: {_id: 0}},
+ {$merge: {into: targetColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}},
+]));
+assert.eq(1, targetColl.find().itcount());
+
+//
+// Test that a $merge aggregation succeeds even if the _id is stripped out and _id is included
+// in the "on" fields.
+//
+coll.drop();
+assert.commandWorked(coll.insert([{_id: "should be projected away", name: "kyle"}]));
+targetColl.drop();
+assert.commandWorked(targetColl.createIndex({_id: 1, name: -1}, {unique: true}));
+assert.doesNotThrow(() => coll.aggregate([
+ {$project: {_id: 0}},
+ {
+ $merge: {
+ into: targetColl.getName(),
+ whenMatched: "fail",
+ whenNotMatched: "insert",
+ on: ["_id", "name"]
+ }
+ },
+]));
+assert.eq(1, targetColl.find().itcount());
+
+//
+// Tests for $merge to a database that differs from the aggregation database.
+//
+const foreignDb = db.getSiblingDB("merge_insert_only_foreign");
+const foreignTargetColl = foreignDb.merge_insert_only_out;
+const pipelineDifferentOutputDb = [
+ {$project: {_id: 0}},
+ {
+ $merge: {
+ into: {
+ db: foreignDb.getName(),
+ coll: foreignTargetColl.getName(),
+ },
+ whenMatched: "fail",
+ whenNotMatched: "insert",
}
- ];
-
- foreignDb.dropDatabase();
- coll.drop();
- assert.commandWorked(coll.insert({a: 1}));
-
- if (!FixtureHelpers.isMongos(db)) {
- //
- // Test that $merge implicitly creates a new database when the output collection's database
- // doesn't exist.
- //
- coll.aggregate(pipelineDifferentOutputDb);
- assert.eq(foreignTargetColl.find().itcount(), 1);
- } else {
- // Implicit database creation is prohibited in a cluster.
- const error = assert.throws(() => coll.aggregate(pipelineDifferentOutputDb));
- assert.commandFailedWithCode(error, ErrorCodes.NamespaceNotFound);
-
- // Explicitly create the collection and database, then fall through to the test below.
- assert.commandWorked(foreignTargetColl.insert({val: "forcing database creation"}));
}
-
- //
- // Re-run the $merge aggregation, which should merge with the existing contents of the
- // collection. We rely on implicit _id generation to give us unique _id values.
- //
- assert.doesNotThrow(() => coll.aggregate(pipelineDifferentOutputDb));
- assert.eq(foreignTargetColl.find().itcount(), 2);
+];
+
+foreignDb.dropDatabase();
+coll.drop();
+assert.commandWorked(coll.insert({a: 1}));
+
+if (!FixtureHelpers.isMongos(db)) {
+ //
+ // Test that $merge implicitly creates a new database when the output collection's database
+ // doesn't exist.
+ //
+ coll.aggregate(pipelineDifferentOutputDb);
+ assert.eq(foreignTargetColl.find().itcount(), 1);
+} else {
+ // Implicit database creation is prohibited in a cluster.
+ const error = assert.throws(() => coll.aggregate(pipelineDifferentOutputDb));
+ assert.commandFailedWithCode(error, ErrorCodes.NamespaceNotFound);
+
+ // Explicitly create the collection and database, then fall through to the test below.
+ assert.commandWorked(foreignTargetColl.insert({val: "forcing database creation"}));
+}
+
+//
+// Re-run the $merge aggregation, which should merge with the existing contents of the
+// collection. We rely on implicit _id generation to give us unique _id values.
+//
+assert.doesNotThrow(() => coll.aggregate(pipelineDifferentOutputDb));
+assert.eq(foreignTargetColl.find().itcount(), 2);
}());
diff --git a/jstests/aggregation/sources/merge/mode_keep_existing_insert.js b/jstests/aggregation/sources/merge/mode_keep_existing_insert.js
index b76fb9d20e9..3f146adbcb5 100644
--- a/jstests/aggregation/sources/merge/mode_keep_existing_insert.js
+++ b/jstests/aggregation/sources/merge/mode_keep_existing_insert.js
@@ -4,372 +4,367 @@
// exists when none is expected.
// @tags: [assumes_no_implicit_collection_creation_after_drop]
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
-
- const source = db[`${jsTest.name()}_source`];
- source.drop();
- const target = db[`${jsTest.name()}_target`];
- target.drop();
- const mergeStage = {
- $merge: {into: target.getName(), whenMatched: "keepExisting", whenNotMatched: "insert"}
- };
- const pipeline = [mergeStage];
-
- // Test $merge into a non-existent collection.
- (function testMergeIntoNonExistentCollection() {
- assert.commandWorked(source.insert({_id: 1, a: 1, b: "a"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a"},
- ]
- });
- })();
-
- // Test $merge into an existing collection.
- (function testMergeIntoExistentCollection() {
- assert.commandWorked(source.insert({_id: 2, a: 2, b: "b"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
- });
- })();
-
- // Test $merge does not update documents in the target collection if they were not modified
- // in the source collection.
- (function testMergeDoesNotUpdateUnmodifiedDocuments() {
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
- });
- })();
-
- // Test $merge doesn't update documents in the target collection if they were modified in the
- // source collection.
- (function testMergeDoesNotUpdateModifiedDocuments() {
- // Update and merge a single document.
- assert.commandWorked(source.update({_id: 2}, {a: 22, c: "c"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
- });
-
- // Update and merge multiple documents.
- assert.commandWorked(source.update({_id: 1}, {a: 11}));
- assert.commandWorked(source.update({_id: 2}, {a: 22, c: "c", d: "d"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
- });
- })();
-
- // Test $merge inserts a new document into the target collection if it was inserted into the
- // source collection.
- (function testMergeInsertsNewDocument() {
- // Insert and merge a single document.
- assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 3, b: "c"}]
- });
- assert.commandWorked(source.deleteOne({_id: 3}));
- assert.commandWorked(target.deleteOne({_id: 3}));
-
- // Insert and merge multiple documents.
- assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
- assert.commandWorked(source.insert({_id: 4, a: 4, c: "d"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a"},
- {_id: 2, a: 2, b: "b"},
- {_id: 3, a: 3, b: "c"},
- {_id: 4, a: 4, c: "d"}
- ]
- });
- assert.commandWorked(source.deleteMany({_id: {$in: [3, 4]}}));
- assert.commandWorked(target.deleteMany({_id: {$in: [3, 4]}}));
- })();
-
- // Test $merge doesn't modify the target collection if a document has been removed from the
- // source collection.
- (function testMergeDoesNotUpdateDeletedDocument() {
- assert.commandWorked(source.deleteOne({_id: 1}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a"},
- {_id: 2, a: 2, b: "b"},
- ]
- });
- })();
-
- // Test $merge fails if a unique index constraint in the target collection is violated.
- (function testMergeFailsIfTargetUniqueKeyIsViolated() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
+
+const source = db[`${jsTest.name()}_source`];
+source.drop();
+const target = db[`${jsTest.name()}_target`];
+target.drop();
+const mergeStage = {
+ $merge: {into: target.getName(), whenMatched: "keepExisting", whenNotMatched: "insert"}
+};
+const pipeline = [mergeStage];
+
+// Test $merge into a non-existent collection.
+(function testMergeIntoNonExistentCollection() {
+ assert.commandWorked(source.insert({_id: 1, a: 1, b: "a"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, b: "a"},
+ ]
+ });
+})();
+
+// Test $merge into an existing collection.
+(function testMergeIntoExistentCollection() {
+ assert.commandWorked(source.insert({_id: 2, a: 2, b: "b"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
+ });
+})();
+
+// Test $merge does not update documents in the target collection if they were not modified
+// in the source collection.
+(function testMergeDoesNotUpdateUnmodifiedDocuments() {
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
+ });
+})();
+
+// Test $merge doesn't update documents in the target collection if they were modified in the
+// source collection.
+(function testMergeDoesNotUpdateModifiedDocuments() {
+ // Update and merge a single document.
+ assert.commandWorked(source.update({_id: 2}, {a: 22, c: "c"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
+ });
+
+ // Update and merge multiple documents.
+ assert.commandWorked(source.update({_id: 1}, {a: 11}));
+ assert.commandWorked(source.update({_id: 2}, {a: 22, c: "c", d: "d"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
+ });
+})();
+
+// Test $merge inserts a new document into the target collection if it was inserted into the
+// source collection.
+(function testMergeInsertsNewDocument() {
+ // Insert and merge a single document.
+ assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 3, b: "c"}]
+ });
+ assert.commandWorked(source.deleteOne({_id: 3}));
+ assert.commandWorked(target.deleteOne({_id: 3}));
+
+ // Insert and merge multiple documents.
+ assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
+ assert.commandWorked(source.insert({_id: 4, a: 4, c: "d"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, b: "a"},
+ {_id: 2, a: 2, b: "b"},
+ {_id: 3, a: 3, b: "c"},
+ {_id: 4, a: 4, c: "d"}
+ ]
+ });
+ assert.commandWorked(source.deleteMany({_id: {$in: [3, 4]}}));
+ assert.commandWorked(target.deleteMany({_id: {$in: [3, 4]}}));
+})();
+
+// Test $merge doesn't modify the target collection if a document has been removed from the
+// source collection.
+(function testMergeDoesNotUpdateDeletedDocument() {
+ assert.commandWorked(source.deleteOne({_id: 1}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, b: "a"},
+ {_id: 2, a: 2, b: "b"},
+ ]
+ });
+})();
+
+// Test $merge fails if a unique index constraint in the target collection is violated.
+(function testMergeFailsIfTargetUniqueKeyIsViolated() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
+
+ assert(source.drop());
+ assert.commandWorked(source.insert({_id: 4, a: 1}));
+ assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
+ const error = assert.throws(() => source.aggregate(pipeline));
+ assert.commandFailedWithCode(error, ErrorCodes.DuplicateKey);
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, b: "a"},
+ {_id: 2, a: 2, b: "b"},
+ ]
+ });
+ assert.commandWorked(target.dropIndex({a: 1}));
+})();
+
+// Test $merge fails if it cannot find an index to verify that the 'on' fields will be unique.
+(function testMergeFailsIfOnFieldCannotBeVerifiedForUniquness() {
+ // The 'on' fields contains a single document field.
+ let error = assert.throws(
+ () => source.aggregate([{$merge: Object.assign({on: "nonexistent"}, mergeStage.$merge)}]));
+ assert.commandFailedWithCode(error, [51190, 51183]);
+
+ // The 'on' fields contains multiple document fields.
+ error = assert.throws(
+ () => source.aggregate(
+ [{$merge: Object.assign({on: ["nonexistent1", "nonexistent2"]}, mergeStage.$merge)}]));
+ assert.commandFailedWithCode(error, [51190, 51183]);
+})();
+
+// Test $merge with an explicit 'on' field over a single or multiple document fields which
+// differ from the _id field.
+(function testMergeWithOnFields() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
+
+ // The 'on' fields contains a single document field.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({a: 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
+ assert.commandWorked(
+ source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
+ assert.commandWorked(
+ target.insert([{_id: 1, a: 1, c: "x"}, {_id: 4, a: 30, c: "y"}, {_id: 5, a: 40, c: "z"}]));
+ assert.doesNotThrow(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: "a"}, mergeStage.$merge)}]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, c: "x"},
+ {_id: 2, a: 2, b: "b"},
+ {_id: 4, a: 30, c: "y"},
+ {_id: 5, a: 40, c: "z"}
+ ]
+ });
+
+ // The 'on' fields contains multiple document fields.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({a: 1, b: 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({a: 1, b: 1}, {unique: true}));
+ assert.commandWorked(source.insert(
+ [{_id: 1, a: 1, b: "a", c: "x"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
+ assert.commandWorked(target.insert(
+ [{_id: 1, a: 1, b: "a"}, {_id: 4, a: 30, b: "c", c: "y"}, {_id: 5, a: 40, c: "z"}]));
+ assert.doesNotThrow(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: ["a", "b"]}, mergeStage.$merge)}]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, b: "a"},
+ {_id: 2, a: 2, b: "b"},
+ {_id: 4, a: 30, b: "c", c: "y"},
+ {_id: 5, a: 40, c: "z"}
+ ]
+ });
+ assert.commandWorked(source.dropIndex({a: 1, b: 1}));
+ assert.commandWorked(target.dropIndex({a: 1, b: 1}));
+})();
+
+// Test $merge with a dotted path in the 'on' field.
+(function testMergeWithDottedOnField() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
+
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({"a.b": 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({"a.b": 1}, {unique: true}));
+ assert.commandWorked(source.insert([
+ {_id: 1, a: {b: "b"}, c: "x"},
+ {_id: 2, a: {b: "c"}, c: "y"},
+ {_id: 3, a: {b: 30}, b: "c"}
+ ]));
+ assert.commandWorked(target.insert({_id: 2, a: {b: "c"}}));
+ assert.doesNotThrow(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: "a.b"}, mergeStage.$merge)}]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected:
+ [{_id: 1, a: {b: "b"}, c: "x"}, {_id: 2, a: {b: "c"}}, {_id: 3, a: {b: 30}, b: "c"}]
+ });
+})();
+
+// Test $merge fails if the value of the 'on' field in a document is invalid, e.g. missing,
+// null or an array.
+(function testMergeFailsIfOnFieldIsInvalid() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
+
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({"z": 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({"z": 1}, {unique: true}));
+
+ // The 'on' field is missing.
+ assert.commandWorked(source.insert({_id: 1}));
+ let error = assert.throws(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: "z"}, mergeStage.$merge)}]));
+ assert.commandFailedWithCode(error, 51132);
+
+ // The 'on' field is null.
+ assert.commandWorked(source.update({_id: 1}, {z: null}));
+ error = assert.throws(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: "z"}, mergeStage.$merge)}]));
+ assert.commandFailedWithCode(error, 51132);
+
+ // The 'on' field is an array.
+ assert.commandWorked(source.update({_id: 1}, {z: [1, 2]}));
+ error = assert.throws(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: "z"}, mergeStage.$merge)}]));
+ assert.commandFailedWithCode(error, 51185);
+})();
+
+// Test $merge when the _id field is removed from the aggregate projection but is used in the
+// $merge's 'on' field.
+(function testMergeWhenDocIdIsRemovedFromProjection() {
+ // The _id is a single 'on' field (a default one).
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]));
+ assert.commandWorked(target.insert({_id: 1, b: "c"}));
+ assert.doesNotThrow(() => source.aggregate([{$project: {_id: 0}}, mergeStage]));
+ assertArrayEq({
+ // Remove the _id field from the projection as the arrayEq function cannot ignore
+ // mismatches in the ObjectId. The target collection should contain all elements from
+ // the source and the target even though they had the same _id's and would have been
+ // merged should we not remove the _id field from the aggregate projection.
+ actual: target.find({}, {_id: 0}).toArray(),
+ expected: [{b: "c"}, {a: 1, b: "a"}, {a: 2, b: "b"}]
+ });
+
+ // The _id is part of the compound 'on' field.
+ assert(target.drop());
+ assert.commandWorked(target.insert({_id: 1, b: "c"}));
+ assert.commandWorked(source.createIndex({_id: 1, a: -1}, {unique: true}));
+ assert.commandWorked(target.createIndex({_id: 1, a: -1}, {unique: true}));
+ assert.doesNotThrow(() => source.aggregate([
+ {$project: {_id: 0}},
+ {$merge: Object.assign({on: ["_id", "a"]}, mergeStage.$merge)}
+ ]));
+ assertArrayEq({
+ // Remove the _id field from the projection as the arrayEq function cannot ignore
+ // mismatches in the ObjectId. The target collection should contain all elements from
+ // the source and the target even though they had the same _id's and would have been
+ // merged should we not remove the _id field from the aggregate projection.
+ actual: target.find({}, {_id: 0}).toArray(),
+ expected: [{b: "c"}, {a: 1, b: "a"}, {a: 2, b: "b"}]
+ });
+ assert.commandWorked(source.dropIndex({_id: 1, a: -1}));
+ assert.commandWorked(target.dropIndex({_id: 1, a: -1}));
+})();
+
+// Test $merge preserves indexes and options of the existing target collection.
+(function testMergePresrvesIndexesAndOptions() {
+ const validator = {a: {$gt: 0}};
+ assert(target.drop());
+ assert.commandWorked(db.createCollection(target.getName(), {validator: validator}));
+ assert.commandWorked(target.createIndex({a: 1}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
+ });
+ assert.eq(2, target.getIndexes().length);
+
+ const listColl = db.runCommand({listCollections: 1, filter: {name: target.getName()}});
+ assert.commandWorked(listColl);
+ assert.eq(validator, listColl.cursor.firstBatch[0].options["validator"]);
+})();
+
+// Test $merge implicitly creates a new database when the target collection's database doesn't
+// exist.
+(function testMergeImplicitlyCreatesTargetDatabase() {
+ assert(source.drop());
+ assert.commandWorked(source.insert({_id: 1, a: 1}));
+
+ const foreignDb = db.getSiblingDB(`${jsTest.name()}_foreign_db`);
+ assert.commandWorked(foreignDb.dropDatabase());
+ const foreignTarget = foreignDb[`${jsTest.name()}_target`];
+ const foreignPipeline = [{
+ $merge: {
+ into: {db: foreignDb.getName(), coll: foreignTarget.getName()},
+ whenMatched: "keepExisting",
+ whenNotMatched: "insert"
}
+ }];
- assert(source.drop());
- assert.commandWorked(source.insert({_id: 4, a: 1}));
- assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
- const error = assert.throws(() => source.aggregate(pipeline));
- assert.commandFailedWithCode(error, ErrorCodes.DuplicateKey);
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a"},
- {_id: 2, a: 2, b: "b"},
- ]
- });
- assert.commandWorked(target.dropIndex({a: 1}));
- })();
-
- // Test $merge fails if it cannot find an index to verify that the 'on' fields will be unique.
- (function testMergeFailsIfOnFieldCannotBeVerifiedForUniquness() {
- // The 'on' fields contains a single document field.
- let error =
- assert.throws(() => source.aggregate(
- [{$merge: Object.assign({on: "nonexistent"}, mergeStage.$merge)}]));
- assert.commandFailedWithCode(error, [51190, 51183]);
-
- // The 'on' fields contains multiple document fields.
- error = assert.throws(() => source.aggregate([
- {$merge: Object.assign({on: ["nonexistent1", "nonexistent2"]}, mergeStage.$merge)}
- ]));
- assert.commandFailedWithCode(error, [51190, 51183]);
- })();
-
- // Test $merge with an explicit 'on' field over a single or multiple document fields which
- // differ from the _id field.
- (function testMergeWithOnFields() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
-
- // The 'on' fields contains a single document field.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({a: 1}, {unique: true}));
- assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
- assert.commandWorked(source.insert(
- [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
- assert.commandWorked(target.insert(
- [{_id: 1, a: 1, c: "x"}, {_id: 4, a: 30, c: "y"}, {_id: 5, a: 40, c: "z"}]));
- assert.doesNotThrow(
- () => source.aggregate(
- [{$project: {_id: 0}}, {$merge: Object.assign({on: "a"}, mergeStage.$merge)}]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, c: "x"},
- {_id: 2, a: 2, b: "b"},
- {_id: 4, a: 30, c: "y"},
- {_id: 5, a: 40, c: "z"}
- ]
- });
-
- // The 'on' fields contains multiple document fields.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({a: 1, b: 1}, {unique: true}));
- assert.commandWorked(target.createIndex({a: 1, b: 1}, {unique: true}));
- assert.commandWorked(source.insert(
- [{_id: 1, a: 1, b: "a", c: "x"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
- assert.commandWorked(target.insert(
- [{_id: 1, a: 1, b: "a"}, {_id: 4, a: 30, b: "c", c: "y"}, {_id: 5, a: 40, c: "z"}]));
- assert.doesNotThrow(() => source.aggregate([
- {$project: {_id: 0}},
- {$merge: Object.assign({on: ["a", "b"]}, mergeStage.$merge)}
- ]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a"},
- {_id: 2, a: 2, b: "b"},
- {_id: 4, a: 30, b: "c", c: "y"},
- {_id: 5, a: 40, c: "z"}
- ]
- });
- assert.commandWorked(source.dropIndex({a: 1, b: 1}));
- assert.commandWorked(target.dropIndex({a: 1, b: 1}));
- })();
-
- // Test $merge with a dotted path in the 'on' field.
- (function testMergeWithDottedOnField() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
-
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({"a.b": 1}, {unique: true}));
- assert.commandWorked(target.createIndex({"a.b": 1}, {unique: true}));
- assert.commandWorked(source.insert([
- {_id: 1, a: {b: "b"}, c: "x"},
- {_id: 2, a: {b: "c"}, c: "y"},
- {_id: 3, a: {b: 30}, b: "c"}
- ]));
- assert.commandWorked(target.insert({_id: 2, a: {b: "c"}}));
- assert.doesNotThrow(
- () => source.aggregate(
- [{$project: {_id: 0}}, {$merge: Object.assign({on: "a.b"}, mergeStage.$merge)}]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: {b: "b"}, c: "x"},
- {_id: 2, a: {b: "c"}},
- {_id: 3, a: {b: 30}, b: "c"}
- ]
- });
- })();
-
- // Test $merge fails if the value of the 'on' field in a document is invalid, e.g. missing,
- // null or an array.
- (function testMergeFailsIfOnFieldIsInvalid() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
-
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({"z": 1}, {unique: true}));
- assert.commandWorked(target.createIndex({"z": 1}, {unique: true}));
-
- // The 'on' field is missing.
- assert.commandWorked(source.insert({_id: 1}));
- let error = assert.throws(
- () => source.aggregate(
- [{$project: {_id: 0}}, {$merge: Object.assign({on: "z"}, mergeStage.$merge)}]));
- assert.commandFailedWithCode(error, 51132);
-
- // The 'on' field is null.
- assert.commandWorked(source.update({_id: 1}, {z: null}));
- error = assert.throws(
- () => source.aggregate(
- [{$project: {_id: 0}}, {$merge: Object.assign({on: "z"}, mergeStage.$merge)}]));
- assert.commandFailedWithCode(error, 51132);
-
- // The 'on' field is an array.
- assert.commandWorked(source.update({_id: 1}, {z: [1, 2]}));
- error = assert.throws(
- () => source.aggregate(
- [{$project: {_id: 0}}, {$merge: Object.assign({on: "z"}, mergeStage.$merge)}]));
- assert.commandFailedWithCode(error, 51185);
- })();
-
- // Test $merge when the _id field is removed from the aggregate projection but is used in the
- // $merge's 'on' field.
- (function testMergeWhenDocIdIsRemovedFromProjection() {
- // The _id is a single 'on' field (a default one).
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]));
- assert.commandWorked(target.insert({_id: 1, b: "c"}));
- assert.doesNotThrow(() => source.aggregate([{$project: {_id: 0}}, mergeStage]));
- assertArrayEq({
- // Remove the _id field from the projection as the arrayEq function cannot ignore
- // mismatches in the ObjectId. The target collection should contain all elements from
- // the source and the target even though they had the same _id's and would have been
- // merged should we not remove the _id field from the aggregate projection.
- actual: target.find({}, {_id: 0}).toArray(),
- expected: [{b: "c"}, {a: 1, b: "a"}, {a: 2, b: "b"}]
- });
-
- // The _id is part of the compound 'on' field.
- assert(target.drop());
- assert.commandWorked(target.insert({_id: 1, b: "c"}));
- assert.commandWorked(source.createIndex({_id: 1, a: -1}, {unique: true}));
- assert.commandWorked(target.createIndex({_id: 1, a: -1}, {unique: true}));
- assert.doesNotThrow(() => source.aggregate([
- {$project: {_id: 0}},
- {$merge: Object.assign({on: ["_id", "a"]}, mergeStage.$merge)}
- ]));
- assertArrayEq({
- // Remove the _id field from the projection as the arrayEq function cannot ignore
- // mismatches in the ObjectId. The target collection should contain all elements from
- // the source and the target even though they had the same _id's and would have been
- // merged should we not remove the _id field from the aggregate projection.
- actual: target.find({}, {_id: 0}).toArray(),
- expected: [{b: "c"}, {a: 1, b: "a"}, {a: 2, b: "b"}]
- });
- assert.commandWorked(source.dropIndex({_id: 1, a: -1}));
- assert.commandWorked(target.dropIndex({_id: 1, a: -1}));
- })();
-
- // Test $merge preserves indexes and options of the existing target collection.
- (function testMergePresrvesIndexesAndOptions() {
- const validator = {a: {$gt: 0}};
- assert(target.drop());
- assert.commandWorked(db.createCollection(target.getName(), {validator: validator}));
- assert.commandWorked(target.createIndex({a: 1}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
- });
- assert.eq(2, target.getIndexes().length);
-
- const listColl = db.runCommand({listCollections: 1, filter: {name: target.getName()}});
- assert.commandWorked(listColl);
- assert.eq(validator, listColl.cursor.firstBatch[0].options["validator"]);
- })();
-
- // Test $merge implicitly creates a new database when the target collection's database doesn't
- // exist.
- (function testMergeImplicitlyCreatesTargetDatabase() {
- assert(source.drop());
- assert.commandWorked(source.insert({_id: 1, a: 1}));
-
- const foreignDb = db.getSiblingDB(`${jsTest.name()}_foreign_db`);
- assert.commandWorked(foreignDb.dropDatabase());
- const foreignTarget = foreignDb[`${jsTest.name()}_target`];
- const foreignPipeline = [{
- $merge: {
- into: {db: foreignDb.getName(), coll: foreignTarget.getName()},
- whenMatched: "keepExisting",
- whenNotMatched: "insert"
- }
- }];
-
- if (!FixtureHelpers.isMongos(db)) {
- assert.doesNotThrow(() => source.aggregate(foreignPipeline));
- assertArrayEq({actual: foreignTarget.find().toArray(), expected: [{_id: 1, a: 1}]});
- } else {
- // Implicit database creation is prohibited in a cluster.
- const error = assert.throws(() => source.aggregate(foreignPipeline));
- assert.commandFailedWithCode(error, ErrorCodes.NamespaceNotFound);
-
- // Force a creation of the database and collection, then fall through the test below.
- assert.commandWorked(foreignTarget.insert({_id: 1, a: 1}));
- }
-
- assert.commandWorked(source.update({_id: 1}, {a: 1, b: "a"}));
+ if (!FixtureHelpers.isMongos(db)) {
assert.doesNotThrow(() => source.aggregate(foreignPipeline));
assertArrayEq({actual: foreignTarget.find().toArray(), expected: [{_id: 1, a: 1}]});
- assert.commandWorked(foreignDb.dropDatabase());
- })();
+ } else {
+ // Implicit database creation is prohibited in a cluster.
+ const error = assert.throws(() => source.aggregate(foreignPipeline));
+ assert.commandFailedWithCode(error, ErrorCodes.NamespaceNotFound);
+
+ // Force a creation of the database and collection, then fall through the test below.
+ assert.commandWorked(foreignTarget.insert({_id: 1, a: 1}));
+ }
+
+ assert.commandWorked(source.update({_id: 1}, {a: 1, b: "a"}));
+ assert.doesNotThrow(() => source.aggregate(foreignPipeline));
+ assertArrayEq({actual: foreignTarget.find().toArray(), expected: [{_id: 1, a: 1}]});
+ assert.commandWorked(foreignDb.dropDatabase());
+})();
}());
diff --git a/jstests/aggregation/sources/merge/mode_merge_discard.js b/jstests/aggregation/sources/merge/mode_merge_discard.js
index 401210c1d4d..cc9fff93691 100644
--- a/jstests/aggregation/sources/merge/mode_merge_discard.js
+++ b/jstests/aggregation/sources/merge/mode_merge_discard.js
@@ -4,237 +4,227 @@
// exists when none is expected.
// @tags: [assumes_no_implicit_collection_creation_after_drop]
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isSharded.
-
- const source = db[`${jsTest.name()}_source`];
- source.drop();
- const target = db[`${jsTest.name()}_target`];
- target.drop();
- const mergeStage = {
- $merge: {into: target.getName(), whenMatched: "merge", whenNotMatched: "discard"}
- };
- const pipeline = [mergeStage];
-
- // Test $merge when some documents in the source collection don't have a matching document in
- // the target collection. The merge operation should succeed and unmatched documents discarded.
- (function testMergeIfMatchingDocumentNotFound() {
- // Single document without a match.
- assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}]));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: 1}, {_id: 3, a: 3, b: 3}]
- });
-
- // Multiple documents without a match.
- assert(target.drop());
- assert.commandWorked(target.insert([{_id: 1, b: 1}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 1, b: 1}]});
- })();
-
- // Test $merge when all documents in the source collection have a matching document in the
- // target collection.
- (function testMergeWhenAllDocumentsHaveMatch() {
- // Source has a single element with a match in the target.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert({_id: 3, a: 3}));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 3, a: 3, b: 3}]});
-
- // Source has multiple documents with matches in the target.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}, {_id: 3, b: 3}]
- });
- })();
-
- // Test $merge when a field is presented in the source and the target and contains a
- // sub-document value.
- (function testMergeSubdocuments() {
- // Source has a single element with a match in the target.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert({_id: 1, a: {b: 1}}));
- assert.commandWorked(target.insert([{_id: 1, a: {c: 2}}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, a: {b: 1}}, {_id: 3, b: 3}]});
-
- // Source has multiple documents with matches in the target.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: {b: 1}}, {_id: 2, a: {b: 2}}]));
- assert.commandWorked(target.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: {b: 1}, b: 1}, {_id: 2, a: {b: 2}}, {_id: 3, b: 3}]
- });
- })();
-
- // Test $merge when the source collection is empty. The target collection should not be
- // modified.
- (function testMergeWhenSourceIsEmpty() {
- assert.commandWorked(source.deleteMany({}));
- assert(target.drop());
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
- })();
-
- // Test $merge does not insert a new document into the target collection if it was inserted
- // into the source collection.
- (function testMergeDoesNotInsertNewDocument() {
- // Insert and merge a single document.
- assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
- assert.commandWorked(source.deleteOne({_id: 3}));
-
- // Insert and merge multiple documents.
- assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
- assert.commandWorked(source.insert({_id: 4, a: 4, c: "d"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
- assert.commandWorked(source.deleteMany({_id: {$in: [3, 4]}}));
- })();
-
- // Test $merge doesn't modify the target collection if a document has been removed from the
- // source collection.
- (function testMergeDoesNotUpdateDeletedDocument() {
- assert.commandWorked(source.deleteOne({_id: 1}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
- })();
-
- // Test $merge with an explicit 'on' field over a single or multiple document fields which
- // differ from the _id field.
- (function testMergeWithOnFields() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
-
- // The 'on' fields contains a single document field.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({a: 1}, {unique: true}));
- assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
- assert.commandWorked(source.insert(
- [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
- assert.commandWorked(target.insert(
- [{_id: 1, a: 1, c: "x"}, {_id: 4, a: 30, c: "y"}, {_id: 5, a: 40, c: "z"}]));
- assert.doesNotThrow(
- () => source.aggregate(
- [{$project: {_id: 0}}, {$merge: Object.assign({on: "a"}, mergeStage.$merge)}]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a", c: "x"},
- {_id: 4, a: 30, b: "c", c: "y"},
- {_id: 5, a: 40, c: "z"}
- ]
- });
-
- // The 'on' fields contains multiple document fields.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({a: 1, b: 1}, {unique: true}));
- assert.commandWorked(target.createIndex({a: 1, b: 1}, {unique: true}));
- assert.commandWorked(source.insert([
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isSharded.
+
+const source = db[`${jsTest.name()}_source`];
+source.drop();
+const target = db[`${jsTest.name()}_target`];
+target.drop();
+const mergeStage = {
+ $merge: {into: target.getName(), whenMatched: "merge", whenNotMatched: "discard"}
+};
+const pipeline = [mergeStage];
+
+// Test $merge when some documents in the source collection don't have a matching document in
+// the target collection. The merge operation should succeed and unmatched documents discarded.
+(function testMergeIfMatchingDocumentNotFound() {
+ // Single document without a match.
+ assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}]));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, a: 1, b: 1}, {_id: 3, a: 3, b: 3}]});
+
+ // Multiple documents without a match.
+ assert(target.drop());
+ assert.commandWorked(target.insert([{_id: 1, b: 1}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 1, b: 1}]});
+})();
+
+// Test $merge when all documents in the source collection have a matching document in the
+// target collection.
+(function testMergeWhenAllDocumentsHaveMatch() {
+ // Source has a single element with a match in the target.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert({_id: 3, a: 3}));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 3, a: 3, b: 3}]});
+
+ // Source has multiple documents with matches in the target.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}, {_id: 3, b: 3}]
+ });
+})();
+
+// Test $merge when a field is presented in the source and the target and contains a
+// sub-document value.
+(function testMergeSubdocuments() {
+ // Source has a single element with a match in the target.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert({_id: 1, a: {b: 1}}));
+ assert.commandWorked(target.insert([{_id: 1, a: {c: 2}}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, a: {b: 1}}, {_id: 3, b: 3}]});
+
+ // Source has multiple documents with matches in the target.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: {b: 1}}, {_id: 2, a: {b: 2}}]));
+ assert.commandWorked(target.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: {b: 1}, b: 1}, {_id: 2, a: {b: 2}}, {_id: 3, b: 3}]
+ });
+})();
+
+// Test $merge when the source collection is empty. The target collection should not be
+// modified.
+(function testMergeWhenSourceIsEmpty() {
+ assert.commandWorked(source.deleteMany({}));
+ assert(target.drop());
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
+})();
+
+// Test $merge does not insert a new document into the target collection if it was inserted
+// into the source collection.
+(function testMergeDoesNotInsertNewDocument() {
+ // Insert and merge a single document.
+ assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
+ assert.commandWorked(source.deleteOne({_id: 3}));
+
+ // Insert and merge multiple documents.
+ assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
+ assert.commandWorked(source.insert({_id: 4, a: 4, c: "d"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
+ assert.commandWorked(source.deleteMany({_id: {$in: [3, 4]}}));
+})();
+
+// Test $merge doesn't modify the target collection if a document has been removed from the
+// source collection.
+(function testMergeDoesNotUpdateDeletedDocument() {
+ assert.commandWorked(source.deleteOne({_id: 1}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
+})();
+
+// Test $merge with an explicit 'on' field over a single or multiple document fields which
+// differ from the _id field.
+(function testMergeWithOnFields() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
+
+ // The 'on' fields contains a single document field.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({a: 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
+ assert.commandWorked(
+ source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
+ assert.commandWorked(
+ target.insert([{_id: 1, a: 1, c: "x"}, {_id: 4, a: 30, c: "y"}, {_id: 5, a: 40, c: "z"}]));
+ assert.doesNotThrow(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: "a"}, mergeStage.$merge)}]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
{_id: 1, a: 1, b: "a", c: "x"},
- {_id: 2, a: 2, b: "b"},
- {_id: 3, a: 30, b: "c", c: "x"}
- ]));
- assert.commandWorked(target.insert(
- [{_id: 1, a: 1, b: "a"}, {_id: 4, a: 30, b: "c"}, {_id: 5, a: 40, c: "z"}]));
- assert.doesNotThrow(() => source.aggregate([
- {$project: {_id: 0}},
- {$merge: Object.assign({on: ["a", "b"]}, mergeStage.$merge)}
- ]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a", c: "x"},
- {_id: 4, a: 30, b: "c", c: "x"},
- {_id: 5, a: 40, c: "z"}
- ]
- });
- assert.commandWorked(source.dropIndex({a: 1, b: 1}));
- assert.commandWorked(target.dropIndex({a: 1, b: 1}));
- })();
-
- // Test $merge with a dotted path in the 'on' field.
- (function testMergeWithDottedOnField() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
-
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({"a.b": 1}, {unique: true}));
- assert.commandWorked(target.createIndex({"a.b": 1}, {unique: true}));
- assert.commandWorked(source.insert([
- {_id: 1, a: {b: "b"}, c: "x"},
- {_id: 2, a: {b: "c"}, c: "y"},
- {_id: 3, a: {b: 30}, b: "c"}
- ]));
- assert.commandWorked(target.insert({_id: 2, a: {b: "c"}, d: "z"}));
- assert.doesNotThrow(
- () => source.aggregate(
- [{$project: {_id: 0}}, {$merge: Object.assign({on: "a.b"}, mergeStage.$merge)}]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 2, a: {b: "c"}, c: "y", d: "z"},
- ]
- });
- })();
-
- // Test $merge when the _id field is removed from the aggregate projection but is used in the
- // $merge's 'on' field.
- (function testMergeWhenDocIdIsRemovedFromProjection() {
- // The _id is a single 'on' field (a default one).
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]));
- assert.commandWorked(target.insert({_id: 1, b: "c"}));
- assert.doesNotThrow(() => source.aggregate([{$project: {_id: 0}}, mergeStage]));
- assertArrayEq({actual: target.find({}, {_id: 0}).toArray(), expected: [{b: "c"}]});
-
- // The _id is part of the compound 'on' field.
- assert(target.drop());
- assert.commandWorked(target.insert({_id: 1, b: "c"}));
- assert.commandWorked(source.createIndex({_id: 1, a: -1}, {unique: true}));
- assert.commandWorked(target.createIndex({_id: 1, a: -1}, {unique: true}));
- assert.doesNotThrow(() => source.aggregate([
- {$project: {_id: 0}},
- {$merge: Object.assign({on: ["_id", "a"]}, mergeStage.$merge)}
- ]));
- assertArrayEq({actual: target.find({}, {_id: 0}).toArray(), expected: [{b: "c"}]});
- assert.commandWorked(source.dropIndex({_id: 1, a: -1}));
- assert.commandWorked(target.dropIndex({_id: 1, a: -1}));
- })();
+ {_id: 4, a: 30, b: "c", c: "y"},
+ {_id: 5, a: 40, c: "z"}
+ ]
+ });
+
+ // The 'on' fields contains multiple document fields.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({a: 1, b: 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({a: 1, b: 1}, {unique: true}));
+ assert.commandWorked(source.insert(
+ [{_id: 1, a: 1, b: "a", c: "x"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c", c: "x"}]));
+ assert.commandWorked(
+ target.insert([{_id: 1, a: 1, b: "a"}, {_id: 4, a: 30, b: "c"}, {_id: 5, a: 40, c: "z"}]));
+ assert.doesNotThrow(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: ["a", "b"]}, mergeStage.$merge)}]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, b: "a", c: "x"},
+ {_id: 4, a: 30, b: "c", c: "x"},
+ {_id: 5, a: 40, c: "z"}
+ ]
+ });
+ assert.commandWorked(source.dropIndex({a: 1, b: 1}));
+ assert.commandWorked(target.dropIndex({a: 1, b: 1}));
+})();
+
+// Test $merge with a dotted path in the 'on' field.
+(function testMergeWithDottedOnField() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
+
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({"a.b": 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({"a.b": 1}, {unique: true}));
+ assert.commandWorked(source.insert([
+ {_id: 1, a: {b: "b"}, c: "x"},
+ {_id: 2, a: {b: "c"}, c: "y"},
+ {_id: 3, a: {b: 30}, b: "c"}
+ ]));
+ assert.commandWorked(target.insert({_id: 2, a: {b: "c"}, d: "z"}));
+ assert.doesNotThrow(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: "a.b"}, mergeStage.$merge)}]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 2, a: {b: "c"}, c: "y", d: "z"},
+ ]
+ });
+})();
+
+// Test $merge when the _id field is removed from the aggregate projection but is used in the
+// $merge's 'on' field.
+(function testMergeWhenDocIdIsRemovedFromProjection() {
+ // The _id is a single 'on' field (a default one).
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]));
+ assert.commandWorked(target.insert({_id: 1, b: "c"}));
+ assert.doesNotThrow(() => source.aggregate([{$project: {_id: 0}}, mergeStage]));
+ assertArrayEq({actual: target.find({}, {_id: 0}).toArray(), expected: [{b: "c"}]});
+
+ // The _id is part of the compound 'on' field.
+ assert(target.drop());
+ assert.commandWorked(target.insert({_id: 1, b: "c"}));
+ assert.commandWorked(source.createIndex({_id: 1, a: -1}, {unique: true}));
+ assert.commandWorked(target.createIndex({_id: 1, a: -1}, {unique: true}));
+ assert.doesNotThrow(() => source.aggregate([
+ {$project: {_id: 0}},
+ {$merge: Object.assign({on: ["_id", "a"]}, mergeStage.$merge)}
+ ]));
+ assertArrayEq({actual: target.find({}, {_id: 0}).toArray(), expected: [{b: "c"}]});
+ assert.commandWorked(source.dropIndex({_id: 1, a: -1}));
+ assert.commandWorked(target.dropIndex({_id: 1, a: -1}));
+})();
}());
diff --git a/jstests/aggregation/sources/merge/mode_merge_fail.js b/jstests/aggregation/sources/merge/mode_merge_fail.js
index 9bf0eadb148..de0842a02b9 100644
--- a/jstests/aggregation/sources/merge/mode_merge_fail.js
+++ b/jstests/aggregation/sources/merge/mode_merge_fail.js
@@ -4,116 +4,113 @@
// exists when none is expected.
// @tags: [assumes_no_implicit_collection_creation_after_drop]
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
+load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
- const source = db[`${jsTest.name()}_source`];
- source.drop();
- const target = db[`${jsTest.name()}_target`];
- target.drop();
- const mergeStage = {
- $merge: {into: target.getName(), whenMatched: "merge", whenNotMatched: "fail"}
- };
- const pipeline = [mergeStage];
+const source = db[`${jsTest.name()}_source`];
+source.drop();
+const target = db[`${jsTest.name()}_target`];
+target.drop();
+const mergeStage = {
+ $merge: {into: target.getName(), whenMatched: "merge", whenNotMatched: "fail"}
+};
+const pipeline = [mergeStage];
- // Test $merge when some documents in the source collection don't have a matching document in
- // the target collection.
- (function testMergeFailsIfMatchingDocumentNotFound() {
- // Single document without a match.
- assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}]));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
- let error = assert.throws(() => source.aggregate(pipeline));
- assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: 1}, {_id: 3, a: 3, b: 3}]
- });
+// Test $merge when some documents in the source collection don't have a matching document in
+// the target collection.
+(function testMergeFailsIfMatchingDocumentNotFound() {
+ // Single document without a match.
+ assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}]));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
+ let error = assert.throws(() => source.aggregate(pipeline));
+ assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, a: 1, b: 1}, {_id: 3, a: 3, b: 3}]});
- // Multiple documents without a match.
- assert(target.drop());
- assert.commandWorked(target.insert([{_id: 1, b: 1}]));
- error = assert.throws(() => source.aggregate(pipeline));
- assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
- assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 1, b: 1}]});
- })();
+ // Multiple documents without a match.
+ assert(target.drop());
+ assert.commandWorked(target.insert([{_id: 1, b: 1}]));
+ error = assert.throws(() => source.aggregate(pipeline));
+ assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 1, b: 1}]});
+})();
- // Test $merge when all documents in the source collection have a matching document in the
- // target collection.
- (function testMergeWhenAllDocumentsHaveMatch() {
- // Source has a single element with a match in the target.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert({_id: 3, a: 3}));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 3, a: 3, b: 3}]});
+// Test $merge when all documents in the source collection have a matching document in the
+// target collection.
+(function testMergeWhenAllDocumentsHaveMatch() {
+ // Source has a single element with a match in the target.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert({_id: 3, a: 3}));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 3, a: 3, b: 3}]});
- // Source has multiple documents with matches in the target.
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}, {_id: 3, a: 3, b: 3}]
- });
- })();
+ // Source has multiple documents with matches in the target.
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}, {_id: 3, a: 3, b: 3}]
+ });
+})();
- // Test $merge when the source collection is empty. The target collection should not be
- // modified.
- (function testMergeWhenSourceIsEmpty() {
- assert.commandWorked(source.deleteMany({}));
- assert(target.drop());
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
- })();
+// Test $merge when the source collection is empty. The target collection should not be
+// modified.
+(function testMergeWhenSourceIsEmpty() {
+ assert.commandWorked(source.deleteMany({}));
+ assert(target.drop());
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
+})();
- // Test $merge uses unorderded batch update. When a mismatch is detected in a batch, the error
- // should be returned once the batch is processed and no further documents should be processed
- // and updated.
- (function testMergeUnorderedBatchUpdate() {
- const maxBatchSize = 16 * 1024 * 1024; // 16MB
- const docSize = 1024 * 1024; // 1MB
- const numDocs = 20;
- const maxDocsInBatch = maxBatchSize / docSize;
+// Test $merge uses unorderded batch update. When a mismatch is detected in a batch, the error
+// should be returned once the batch is processed and no further documents should be processed
+// and updated.
+(function testMergeUnorderedBatchUpdate() {
+ const maxBatchSize = 16 * 1024 * 1024; // 16MB
+ const docSize = 1024 * 1024; // 1MB
+ const numDocs = 20;
+ const maxDocsInBatch = maxBatchSize / docSize;
- assert(source.drop());
- assert(target.drop());
+ assert(source.drop());
+ assert(target.drop());
- // Insert 'numDocs' documents of size 'docSize' into the source collection.
- generateCollection({coll: source, numDocs: numDocs, docSize: docSize});
+ // Insert 'numDocs' documents of size 'docSize' into the source collection.
+ generateCollection({coll: source, numDocs: numDocs, docSize: docSize});
- // Copy over documents from the source collection into the target and remove the 'padding'
- // field from the projection, so we can distinguish which documents have been modified by
- // the $merge stage.
- assert.doesNotThrow(
- () => source.aggregate([{$project: {padding: 0}}, {$out: target.getName()}]));
+ // Copy over documents from the source collection into the target and remove the 'padding'
+ // field from the projection, so we can distinguish which documents have been modified by
+ // the $merge stage.
+ assert.doesNotThrow(() =>
+ source.aggregate([{$project: {padding: 0}}, {$out: target.getName()}]));
- // Remove one document from the target collection so that $merge fails. This document should
- // be in the first batch of the aggregation pipeline below, which sorts documents by the _id
- // field in ascending order. Since each document in the source collection is 1MB, and the
- // max batch size is 16MB, the first batch will contain documents with the _id in the range
- // of [0, 15].
- assert.commandWorked(target.deleteOne({_id: Math.floor(Math.random() * maxDocsInBatch)}));
+ // Remove one document from the target collection so that $merge fails. This document should
+ // be in the first batch of the aggregation pipeline below, which sorts documents by the _id
+ // field in ascending order. Since each document in the source collection is 1MB, and the
+ // max batch size is 16MB, the first batch will contain documents with the _id in the range
+ // of [0, 15].
+ assert.commandWorked(target.deleteOne({_id: Math.floor(Math.random() * maxDocsInBatch)}));
- // Ensure the target collection has 'numDocs' - 1 documents without the 'padding' field.
- assert.eq(numDocs - 1, target.find({padding: {$exists: false}}).itcount());
+ // Ensure the target collection has 'numDocs' - 1 documents without the 'padding' field.
+ assert.eq(numDocs - 1, target.find({padding: {$exists: false}}).itcount());
- // Run the $merge pipeline and ensure it fails, as there is one document in the source
- // collection without a match in the target.
- const error = assert.throws(() => source.aggregate([{$sort: {_id: 1}}, mergeStage]));
- assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
+ // Run the $merge pipeline and ensure it fails, as there is one document in the source
+ // collection without a match in the target.
+ const error = assert.throws(() => source.aggregate([{$sort: {_id: 1}}, mergeStage]));
+ assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
- // There will be maxDocsInBatch documents in the batch, one without a match.
- const numDocsModified = maxDocsInBatch - 1;
- // All remaining documents except those in the first batch must be left unmodified.
- const numDocsUnmodified = numDocs - maxDocsInBatch;
- assert.eq(numDocsModified, target.find({padding: {$exists: true}}).itcount());
- assert.eq(numDocsUnmodified, target.find({padding: {$exists: false}}).itcount());
- })();
+ // There will be maxDocsInBatch documents in the batch, one without a match.
+ const numDocsModified = maxDocsInBatch - 1;
+ // All remaining documents except those in the first batch must be left unmodified.
+ const numDocsUnmodified = numDocs - maxDocsInBatch;
+ assert.eq(numDocsModified, target.find({padding: {$exists: true}}).itcount());
+ assert.eq(numDocsUnmodified, target.find({padding: {$exists: false}}).itcount());
+})();
}());
diff --git a/jstests/aggregation/sources/merge/mode_merge_insert.js b/jstests/aggregation/sources/merge/mode_merge_insert.js
index 370963a24d2..577479f7a46 100644
--- a/jstests/aggregation/sources/merge/mode_merge_insert.js
+++ b/jstests/aggregation/sources/merge/mode_merge_insert.js
@@ -4,368 +4,365 @@
// exists when none is expected.
// @tags: [assumes_no_implicit_collection_creation_after_drop]
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
-
- const source = db[`${jsTest.name()}_source`];
- source.drop();
- const target = db[`${jsTest.name()}_target`];
- target.drop();
- const mergeStage = {
- $merge: {into: target.getName(), whenMatched: "merge", whenNotMatched: "insert"}
- };
- const pipeline = [mergeStage];
-
- // Test $merge into a non-existent collection.
- (function testMergeIntoNonExistentCollection() {
- assert.commandWorked(source.insert({_id: 1, a: 1, b: "a"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a"},
- ]
- });
- })();
-
- // Test $merge into an existing collection.
- (function testMergeIntoExistentCollection() {
- assert.commandWorked(source.insert({_id: 2, a: 2, b: "b"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
- });
- })();
-
- // Test $merge does not update documents in the target collection if they were not modified
- // in the source collection.
- (function testMergeDoesNotUpdateUnmodifiedDocuments() {
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
- });
- })();
-
- // Test $merge updates documents in the target collection if they were modified in the source
- // collection.
- (function testMergeUpdatesModifiedDocuments() {
- // Update and merge a single document.
- assert.commandWorked(source.update({_id: 2}, {a: 22, c: "c"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 22, b: "b", c: "c"}]
- });
-
- // Update and merge multiple documents.
- assert.commandWorked(source.update({_id: 1}, {a: 11}));
- assert.commandWorked(source.update({_id: 2}, {a: 22, c: "c", d: "d"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 11, b: "a"}, {_id: 2, a: 22, b: "b", c: "c", d: "d"}]
- });
- })();
-
- // Test $merge inserts a new document into the target collection if it was inserted into the
- // source collection.
- (function testMergeInsertsNewDocument() {
- // Insert and merge a single document.
- assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 11, b: "a"},
- {_id: 2, a: 22, b: "b", c: "c", d: "d"},
- {_id: 3, a: 3, b: "c"}
- ]
- });
- assert.commandWorked(source.deleteOne({_id: 3}));
- assert.commandWorked(target.deleteOne({_id: 3}));
-
- // Insert and merge multiple documents.
- assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
- assert.commandWorked(source.insert({_id: 4, a: 4, c: "d"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 11, b: "a"},
- {_id: 2, a: 22, b: "b", c: "c", d: "d"},
- {_id: 3, a: 3, b: "c"},
- {_id: 4, a: 4, c: "d"}
- ]
- });
- assert.commandWorked(source.deleteMany({_id: {$in: [3, 4]}}));
- assert.commandWorked(target.deleteMany({_id: {$in: [3, 4]}}));
- })();
-
- // Test $merge doesn't modify the target collection if a document has been removed from the
- // source collection.
- (function testMergeDoesNotUpdateDeletedDocument() {
- assert.commandWorked(source.deleteOne({_id: 1}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 11, b: "a"},
- {_id: 2, a: 22, b: "b", c: "c", d: "d"},
- ]
- });
- })();
-
- // Test $merge fails if a unique index constraint in the target collection is violated.
- (function testMergeFailsIfTargetUniqueKeyIsViolated() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
-
- assert(source.drop());
- assert.commandWorked(source.insert({_id: 4, a: 11}));
- assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
- const error = assert.throws(() => source.aggregate(pipeline));
- assert.commandFailedWithCode(error, ErrorCodes.DuplicateKey);
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 11, b: "a"},
- {_id: 2, a: 22, b: "b", c: "c", d: "d"},
- ]
- });
- assert.commandWorked(target.dropIndex({a: 1}));
- })();
-
- // Test $merge fails if it cannot find an index to verify that the 'on' fields will be unique.
- (function testMergeFailsIfOnFieldCannotBeVerifiedForUniquness() {
- // The 'on' fields contains a single document field.
- let error =
- assert.throws(() => source.aggregate(
- [{$merge: Object.assign({on: "nonexistent"}, mergeStage.$merge)}]));
- assert.commandFailedWithCode(error, [51190, 51183]);
-
- // The 'on' fields contains multiple document fields.
- error = assert.throws(() => source.aggregate([
- {$merge: Object.assign({on: ["nonexistent1", "nonexistent2"]}, mergeStage.$merge)}
- ]));
- assert.commandFailedWithCode(error, [51190, 51183]);
- })();
-
- // Test $merge with an explicit 'on' field over a single or multiple document fields which
- // differ from the _id field.
- (function testMergeWithOnFields() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
-
- // The 'on' fields contains a single document field.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({a: 1}, {unique: true}));
- assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
- assert.commandWorked(source.insert(
- [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
- assert.commandWorked(target.insert(
- [{_id: 1, a: 1, c: "x"}, {_id: 4, a: 30, c: "y"}, {_id: 5, a: 40, c: "z"}]));
- assert.doesNotThrow(
- () => source.aggregate(
- [{$project: {_id: 0}}, {$merge: Object.assign({on: "a"}, mergeStage.$merge)}]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a", c: "x"},
- {_id: 2, a: 2, b: "b"},
- {_id: 4, a: 30, b: "c", c: "y"},
- {_id: 5, a: 40, c: "z"}
- ]
- });
-
- // The 'on' fields contains multiple document fields.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({a: 1, b: 1}, {unique: true}));
- assert.commandWorked(target.createIndex({a: 1, b: 1}, {unique: true}));
- assert.commandWorked(source.insert(
- [{_id: 1, a: 1, b: "a", c: "x"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
- assert.commandWorked(target.insert(
- [{_id: 1, a: 1, b: "a"}, {_id: 4, a: 30, b: "c", c: "y"}, {_id: 5, a: 40, c: "z"}]));
- assert.doesNotThrow(() => source.aggregate([
- {$project: {_id: 0}},
- {$merge: Object.assign({on: ["a", "b"]}, mergeStage.$merge)}
- ]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a", c: "x"},
- {_id: 2, a: 2, b: "b"},
- {_id: 4, a: 30, b: "c", c: "y"},
- {_id: 5, a: 40, c: "z"}
- ]
- });
- assert.commandWorked(source.dropIndex({a: 1, b: 1}));
- assert.commandWorked(target.dropIndex({a: 1, b: 1}));
- })();
-
- // Test $merge with a dotted path in the 'on' field.
- (function testMergeWithDottedOnField() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
-
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({"a.b": 1}, {unique: true}));
- assert.commandWorked(target.createIndex({"a.b": 1}, {unique: true}));
- assert.commandWorked(source.insert([
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
+
+const source = db[`${jsTest.name()}_source`];
+source.drop();
+const target = db[`${jsTest.name()}_target`];
+target.drop();
+const mergeStage = {
+ $merge: {into: target.getName(), whenMatched: "merge", whenNotMatched: "insert"}
+};
+const pipeline = [mergeStage];
+
+// Test $merge into a non-existent collection.
+(function testMergeIntoNonExistentCollection() {
+ assert.commandWorked(source.insert({_id: 1, a: 1, b: "a"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, b: "a"},
+ ]
+ });
+})();
+
+// Test $merge into an existing collection.
+(function testMergeIntoExistentCollection() {
+ assert.commandWorked(source.insert({_id: 2, a: 2, b: "b"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
+ });
+})();
+
+// Test $merge does not update documents in the target collection if they were not modified
+// in the source collection.
+(function testMergeDoesNotUpdateUnmodifiedDocuments() {
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
+ });
+})();
+
+// Test $merge updates documents in the target collection if they were modified in the source
+// collection.
+(function testMergeUpdatesModifiedDocuments() {
+ // Update and merge a single document.
+ assert.commandWorked(source.update({_id: 2}, {a: 22, c: "c"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 22, b: "b", c: "c"}]
+ });
+
+ // Update and merge multiple documents.
+ assert.commandWorked(source.update({_id: 1}, {a: 11}));
+ assert.commandWorked(source.update({_id: 2}, {a: 22, c: "c", d: "d"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 11, b: "a"}, {_id: 2, a: 22, b: "b", c: "c", d: "d"}]
+ });
+})();
+
+// Test $merge inserts a new document into the target collection if it was inserted into the
+// source collection.
+(function testMergeInsertsNewDocument() {
+ // Insert and merge a single document.
+ assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 11, b: "a"},
+ {_id: 2, a: 22, b: "b", c: "c", d: "d"},
+ {_id: 3, a: 3, b: "c"}
+ ]
+ });
+ assert.commandWorked(source.deleteOne({_id: 3}));
+ assert.commandWorked(target.deleteOne({_id: 3}));
+
+ // Insert and merge multiple documents.
+ assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
+ assert.commandWorked(source.insert({_id: 4, a: 4, c: "d"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 11, b: "a"},
+ {_id: 2, a: 22, b: "b", c: "c", d: "d"},
+ {_id: 3, a: 3, b: "c"},
+ {_id: 4, a: 4, c: "d"}
+ ]
+ });
+ assert.commandWorked(source.deleteMany({_id: {$in: [3, 4]}}));
+ assert.commandWorked(target.deleteMany({_id: {$in: [3, 4]}}));
+})();
+
+// Test $merge doesn't modify the target collection if a document has been removed from the
+// source collection.
+(function testMergeDoesNotUpdateDeletedDocument() {
+ assert.commandWorked(source.deleteOne({_id: 1}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 11, b: "a"},
+ {_id: 2, a: 22, b: "b", c: "c", d: "d"},
+ ]
+ });
+})();
+
+// Test $merge fails if a unique index constraint in the target collection is violated.
+(function testMergeFailsIfTargetUniqueKeyIsViolated() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
+
+ assert(source.drop());
+ assert.commandWorked(source.insert({_id: 4, a: 11}));
+ assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
+ const error = assert.throws(() => source.aggregate(pipeline));
+ assert.commandFailedWithCode(error, ErrorCodes.DuplicateKey);
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 11, b: "a"},
+ {_id: 2, a: 22, b: "b", c: "c", d: "d"},
+ ]
+ });
+ assert.commandWorked(target.dropIndex({a: 1}));
+})();
+
+// Test $merge fails if it cannot find an index to verify that the 'on' fields will be unique.
+(function testMergeFailsIfOnFieldCannotBeVerifiedForUniquness() {
+ // The 'on' fields contains a single document field.
+ let error = assert.throws(
+ () => source.aggregate([{$merge: Object.assign({on: "nonexistent"}, mergeStage.$merge)}]));
+ assert.commandFailedWithCode(error, [51190, 51183]);
+
+ // The 'on' fields contains multiple document fields.
+ error = assert.throws(
+ () => source.aggregate(
+ [{$merge: Object.assign({on: ["nonexistent1", "nonexistent2"]}, mergeStage.$merge)}]));
+ assert.commandFailedWithCode(error, [51190, 51183]);
+})();
+
+// Test $merge with an explicit 'on' field over a single or multiple document fields which
+// differ from the _id field.
+(function testMergeWithOnFields() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
+
+ // The 'on' fields contains a single document field.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({a: 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
+ assert.commandWorked(
+ source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
+ assert.commandWorked(
+ target.insert([{_id: 1, a: 1, c: "x"}, {_id: 4, a: 30, c: "y"}, {_id: 5, a: 40, c: "z"}]));
+ assert.doesNotThrow(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: "a"}, mergeStage.$merge)}]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, b: "a", c: "x"},
+ {_id: 2, a: 2, b: "b"},
+ {_id: 4, a: 30, b: "c", c: "y"},
+ {_id: 5, a: 40, c: "z"}
+ ]
+ });
+
+ // The 'on' fields contains multiple document fields.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({a: 1, b: 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({a: 1, b: 1}, {unique: true}));
+ assert.commandWorked(source.insert(
+ [{_id: 1, a: 1, b: "a", c: "x"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
+ assert.commandWorked(target.insert(
+ [{_id: 1, a: 1, b: "a"}, {_id: 4, a: 30, b: "c", c: "y"}, {_id: 5, a: 40, c: "z"}]));
+ assert.doesNotThrow(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: ["a", "b"]}, mergeStage.$merge)}]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, b: "a", c: "x"},
+ {_id: 2, a: 2, b: "b"},
+ {_id: 4, a: 30, b: "c", c: "y"},
+ {_id: 5, a: 40, c: "z"}
+ ]
+ });
+ assert.commandWorked(source.dropIndex({a: 1, b: 1}));
+ assert.commandWorked(target.dropIndex({a: 1, b: 1}));
+})();
+
+// Test $merge with a dotted path in the 'on' field.
+(function testMergeWithDottedOnField() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
+
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({"a.b": 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({"a.b": 1}, {unique: true}));
+ assert.commandWorked(source.insert([
+ {_id: 1, a: {b: "b"}, c: "x"},
+ {_id: 2, a: {b: "c"}, c: "y"},
+ {_id: 3, a: {b: 30}, b: "c"}
+ ]));
+ assert.commandWorked(target.insert({_id: 2, a: {b: "c"}}));
+ assert.doesNotThrow(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: "a.b"}, mergeStage.$merge)}]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
{_id: 1, a: {b: "b"}, c: "x"},
{_id: 2, a: {b: "c"}, c: "y"},
{_id: 3, a: {b: 30}, b: "c"}
- ]));
- assert.commandWorked(target.insert({_id: 2, a: {b: "c"}}));
- assert.doesNotThrow(
- () => source.aggregate(
- [{$project: {_id: 0}}, {$merge: Object.assign({on: "a.b"}, mergeStage.$merge)}]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: {b: "b"}, c: "x"},
- {_id: 2, a: {b: "c"}, c: "y"},
- {_id: 3, a: {b: 30}, b: "c"}
- ]
- });
- })();
-
- // Test $merge fails if the value of the 'on' field in a document is invalid, e.g. missing,
- // null or an array.
- (function testMergeFailsIfOnFieldIsInvalid() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
-
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({"z": 1}, {unique: true}));
- assert.commandWorked(target.createIndex({"z": 1}, {unique: true}));
-
- // The 'on' field is missing.
- assert.commandWorked(source.insert({_id: 1}));
- let error = assert.throws(
- () => source.aggregate(
- [{$project: {_id: 0}}, {$merge: Object.assign({on: "z"}, mergeStage.$merge)}]));
- assert.commandFailedWithCode(error, 51132);
-
- // The 'on' field is null.
- assert.commandWorked(source.update({_id: 1}, {z: null}));
- error = assert.throws(
- () => source.aggregate(
- [{$project: {_id: 0}}, {$merge: Object.assign({on: "z"}, mergeStage.$merge)}]));
- assert.commandFailedWithCode(error, 51132);
-
- // The 'on' field is an array.
- assert.commandWorked(source.update({_id: 1}, {z: [1, 2]}));
- error = assert.throws(
- () => source.aggregate(
- [{$project: {_id: 0}}, {$merge: Object.assign({on: "z"}, mergeStage.$merge)}]));
- assert.commandFailedWithCode(error, 51185);
- })();
-
- // Test $merge when the _id field is removed from the aggregate projection but is used in the
- // $merge's 'on' field.
- (function testMergeWhenDocIdIsRemovedFromProjection() {
- // The _id is a single 'on' field (a default one).
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]));
- assert.commandWorked(target.insert({_id: 1, b: "c"}));
- assert.doesNotThrow(() => source.aggregate([{$project: {_id: 0}}, mergeStage]));
- assertArrayEq({
- actual: target.find({}, {_id: 0}).toArray(),
- expected: [{b: "c"}, {a: 1, b: "a"}, {a: 2, b: "b"}]
- });
-
- // The _id is part of the compound 'on' field.
- assert(target.drop());
- assert.commandWorked(target.insert({_id: 1, b: "c"}));
- assert.commandWorked(source.createIndex({_id: 1, a: -1}, {unique: true}));
- assert.commandWorked(target.createIndex({_id: 1, a: -1}, {unique: true}));
- assert.doesNotThrow(() => source.aggregate([
- {$project: {_id: 0}},
- {$merge: Object.assign({on: ["_id", "a"]}, mergeStage.$merge)}
- ]));
- assertArrayEq({
- actual: target.find({}, {_id: 0}).toArray(),
- expected: [{b: "c"}, {a: 1, b: "a"}, {a: 2, b: "b"}]
- });
- assert.commandWorked(source.dropIndex({_id: 1, a: -1}));
- assert.commandWorked(target.dropIndex({_id: 1, a: -1}));
- })();
-
- // Test $merge preserves indexes and options of the existing target collection.
- (function testMergePresrvesIndexesAndOptions() {
- const validator = {a: {$gt: 0}};
- assert(target.drop());
- assert.commandWorked(db.createCollection(target.getName(), {validator: validator}));
- assert.commandWorked(target.createIndex({a: 1}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
- });
- assert.eq(2, target.getIndexes().length);
-
- const listColl = db.runCommand({listCollections: 1, filter: {name: target.getName()}});
- assert.commandWorked(listColl);
- assert.eq(validator, listColl.cursor.firstBatch[0].options["validator"]);
- })();
-
- // Test $merge implicitly creates a new database when the target collection's database doesn't
- // exist.
- (function testMergeImplicitlyCreatesTargetDatabase() {
- assert(source.drop());
- assert.commandWorked(source.insert({_id: 1, a: 1, b: "a"}));
-
- const foreignDb = db.getSiblingDB(`${jsTest.name()}_foreign_db`);
- assert.commandWorked(foreignDb.dropDatabase());
- const foreignTarget = foreignDb[`${jsTest.name()}_target`];
- const foreignPipeline = [{
- $merge: {
- into: {db: foreignDb.getName(), coll: foreignTarget.getName()},
- whenMatched: "merge",
- whenNotMatched: "insert"
- }
- }];
-
- if (!FixtureHelpers.isMongos(db)) {
- assert.doesNotThrow(() => source.aggregate(foreignPipeline));
- assertArrayEq(
- {actual: foreignTarget.find().toArray(), expected: [{_id: 1, a: 1, b: "a"}]});
- } else {
- // Implicit database creation is prohibited in a cluster.
- const error = assert.throws(() => source.aggregate(foreignPipeline));
- assert.commandFailedWithCode(error, ErrorCodes.NamespaceNotFound);
-
- // Force a creation of the database and collection, then fall through the test below.
- assert.commandWorked(foreignTarget.insert({_id: 1, a: 1}));
+ ]
+ });
+})();
+
+// Test $merge fails if the value of the 'on' field in a document is invalid, e.g. missing,
+// null or an array.
+(function testMergeFailsIfOnFieldIsInvalid() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
+
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({"z": 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({"z": 1}, {unique: true}));
+
+ // The 'on' field is missing.
+ assert.commandWorked(source.insert({_id: 1}));
+ let error = assert.throws(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: "z"}, mergeStage.$merge)}]));
+ assert.commandFailedWithCode(error, 51132);
+
+ // The 'on' field is null.
+ assert.commandWorked(source.update({_id: 1}, {z: null}));
+ error = assert.throws(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: "z"}, mergeStage.$merge)}]));
+ assert.commandFailedWithCode(error, 51132);
+
+ // The 'on' field is an array.
+ assert.commandWorked(source.update({_id: 1}, {z: [1, 2]}));
+ error = assert.throws(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: "z"}, mergeStage.$merge)}]));
+ assert.commandFailedWithCode(error, 51185);
+})();
+
+// Test $merge when the _id field is removed from the aggregate projection but is used in the
+// $merge's 'on' field.
+(function testMergeWhenDocIdIsRemovedFromProjection() {
+ // The _id is a single 'on' field (a default one).
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]));
+ assert.commandWorked(target.insert({_id: 1, b: "c"}));
+ assert.doesNotThrow(() => source.aggregate([{$project: {_id: 0}}, mergeStage]));
+ assertArrayEq({
+ actual: target.find({}, {_id: 0}).toArray(),
+ expected: [{b: "c"}, {a: 1, b: "a"}, {a: 2, b: "b"}]
+ });
+
+ // The _id is part of the compound 'on' field.
+ assert(target.drop());
+ assert.commandWorked(target.insert({_id: 1, b: "c"}));
+ assert.commandWorked(source.createIndex({_id: 1, a: -1}, {unique: true}));
+ assert.commandWorked(target.createIndex({_id: 1, a: -1}, {unique: true}));
+ assert.doesNotThrow(() => source.aggregate([
+ {$project: {_id: 0}},
+ {$merge: Object.assign({on: ["_id", "a"]}, mergeStage.$merge)}
+ ]));
+ assertArrayEq({
+ actual: target.find({}, {_id: 0}).toArray(),
+ expected: [{b: "c"}, {a: 1, b: "a"}, {a: 2, b: "b"}]
+ });
+ assert.commandWorked(source.dropIndex({_id: 1, a: -1}));
+ assert.commandWorked(target.dropIndex({_id: 1, a: -1}));
+})();
+
+// Test $merge preserves indexes and options of the existing target collection.
+(function testMergePresrvesIndexesAndOptions() {
+ const validator = {a: {$gt: 0}};
+ assert(target.drop());
+ assert.commandWorked(db.createCollection(target.getName(), {validator: validator}));
+ assert.commandWorked(target.createIndex({a: 1}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]
+ });
+ assert.eq(2, target.getIndexes().length);
+
+ const listColl = db.runCommand({listCollections: 1, filter: {name: target.getName()}});
+ assert.commandWorked(listColl);
+ assert.eq(validator, listColl.cursor.firstBatch[0].options["validator"]);
+})();
+
+// Test $merge implicitly creates a new database when the target collection's database doesn't
+// exist.
+(function testMergeImplicitlyCreatesTargetDatabase() {
+ assert(source.drop());
+ assert.commandWorked(source.insert({_id: 1, a: 1, b: "a"}));
+
+ const foreignDb = db.getSiblingDB(`${jsTest.name()}_foreign_db`);
+ assert.commandWorked(foreignDb.dropDatabase());
+ const foreignTarget = foreignDb[`${jsTest.name()}_target`];
+ const foreignPipeline = [{
+ $merge: {
+ into: {db: foreignDb.getName(), coll: foreignTarget.getName()},
+ whenMatched: "merge",
+ whenNotMatched: "insert"
}
+ }];
+ if (!FixtureHelpers.isMongos(db)) {
assert.doesNotThrow(() => source.aggregate(foreignPipeline));
assertArrayEq({actual: foreignTarget.find().toArray(), expected: [{_id: 1, a: 1, b: "a"}]});
- assert.commandWorked(foreignDb.dropDatabase());
- })();
+ } else {
+ // Implicit database creation is prohibited in a cluster.
+ const error = assert.throws(() => source.aggregate(foreignPipeline));
+ assert.commandFailedWithCode(error, ErrorCodes.NamespaceNotFound);
+
+ // Force a creation of the database and collection, then fall through the test below.
+ assert.commandWorked(foreignTarget.insert({_id: 1, a: 1}));
+ }
+
+ assert.doesNotThrow(() => source.aggregate(foreignPipeline));
+ assertArrayEq({actual: foreignTarget.find().toArray(), expected: [{_id: 1, a: 1, b: "a"}]});
+ assert.commandWorked(foreignDb.dropDatabase());
+})();
}());
diff --git a/jstests/aggregation/sources/merge/mode_pipeline_discard.js b/jstests/aggregation/sources/merge/mode_pipeline_discard.js
index 12b556b8384..0c9333ca2af 100644
--- a/jstests/aggregation/sources/merge/mode_pipeline_discard.js
+++ b/jstests/aggregation/sources/merge/mode_pipeline_discard.js
@@ -4,279 +4,271 @@
// exists when none is expected.
// @tags: [assumes_no_implicit_collection_creation_after_drop]
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isSharded.
+load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isSharded.
- // A helper function to create a pipeline with a $merge stage using a custom 'updatePipeline'
- // for the whenMatched mode. If 'initialStages' array is specified, the $merge stage will be
- // appended to this array and the result returned to the caller, otherwise an array with a
- // single $merge stage is returned. An output collection for the $merge stage is specified
- // in the 'target', and the $merge stage 'on' fields in the 'on' parameter.
- function makeMergePipeline(
- {target = "", initialStages = [], updatePipeline = [], on = "_id"} = {}) {
- return initialStages.concat([{
- $merge:
- {into: target, on: on, whenMatched: updatePipeline, whenNotMatched: "discard"}
- }]);
- }
+// A helper function to create a pipeline with a $merge stage using a custom 'updatePipeline'
+// for the whenMatched mode. If 'initialStages' array is specified, the $merge stage will be
+// appended to this array and the result returned to the caller, otherwise an array with a
+// single $merge stage is returned. An output collection for the $merge stage is specified
+// in the 'target', and the $merge stage 'on' fields in the 'on' parameter.
+function makeMergePipeline(
+ {target = "", initialStages = [], updatePipeline = [], on = "_id"} = {}) {
+ return initialStages.concat(
+ [{$merge: {into: target, on: on, whenMatched: updatePipeline, whenNotMatched: "discard"}}]);
+}
- const source = db[`${jsTest.name()}_source`];
- source.drop();
- const target = db[`${jsTest.name()}_target`];
- target.drop();
+const source = db[`${jsTest.name()}_source`];
+source.drop();
+const target = db[`${jsTest.name()}_target`];
+target.drop();
- // Test $merge when some documents in the source collection don't have a matching document in
- // the target collection. The merge operation should succeed and unmatched documents discarded.
- (function testMergeIfMatchingDocumentNotFound() {
- const pipeline =
- makeMergePipeline({target: target.getName(), updatePipeline: [{$set: {x: 1, y: 2}}]});
+// Test $merge when some documents in the source collection don't have a matching document in
+// the target collection. The merge operation should succeed and unmatched documents discarded.
+(function testMergeIfMatchingDocumentNotFound() {
+ const pipeline =
+ makeMergePipeline({target: target.getName(), updatePipeline: [{$set: {x: 1, y: 2}}]});
- // Single document without a match.
- assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}]));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, b: 1, x: 1, y: 2}, {_id: 3, b: 3, x: 1, y: 2}]
- });
+ // Single document without a match.
+ assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}]));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, b: 1, x: 1, y: 2}, {_id: 3, b: 3, x: 1, y: 2}]
+ });
- // Multiple documents without a match.
- assert(target.drop());
- assert.commandWorked(target.insert([{_id: 1, b: 1}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1, x: 1, y: 2}]});
- })();
+ // Multiple documents without a match.
+ assert(target.drop());
+ assert.commandWorked(target.insert([{_id: 1, b: 1}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1, x: 1, y: 2}]});
+})();
- // Test $merge when all documents in the source collection have a matching document in the
- // target collection.
- (function testMergeWhenAllDocumentsHaveMatch() {
- const pipeline =
- makeMergePipeline({target: target.getName(), updatePipeline: [{$set: {x: 1, y: 2}}]});
+// Test $merge when all documents in the source collection have a matching document in the
+// target collection.
+(function testMergeWhenAllDocumentsHaveMatch() {
+ const pipeline =
+ makeMergePipeline({target: target.getName(), updatePipeline: [{$set: {x: 1, y: 2}}]});
- // Source has a single element with a match in the target.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert({_id: 3, a: 3}));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, b: 1}, {_id: 3, b: 3, x: 1, y: 2}]
- });
+ // Source has a single element with a match in the target.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert({_id: 3, a: 3}));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 3, b: 3, x: 1, y: 2}]});
- // Source has multiple documents with matches in the target.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, b: 1, x: 1, y: 2}, {_id: 2, b: 2, x: 1, y: 2}, {_id: 3, b: 3}]
- });
- })();
+ // Source has multiple documents with matches in the target.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, b: 1, x: 1, y: 2}, {_id: 2, b: 2, x: 1, y: 2}, {_id: 3, b: 3}]
+ });
+})();
- // Test $merge when the source collection is empty. The target collection should not be
- // modified.
- (function testMergeWhenSourceIsEmpty() {
- const pipeline =
- makeMergePipeline({target: target.getName(), updatePipeline: [{$set: {x: 1, y: 2}}]});
+// Test $merge when the source collection is empty. The target collection should not be
+// modified.
+(function testMergeWhenSourceIsEmpty() {
+ const pipeline =
+ makeMergePipeline({target: target.getName(), updatePipeline: [{$set: {x: 1, y: 2}}]});
- assert.commandWorked(source.deleteMany({}));
- assert(target.drop());
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
- })();
+ assert.commandWorked(source.deleteMany({}));
+ assert(target.drop());
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
+})();
- // Test $merge does not insert a new document into the target collection if it was inserted
- // into the source collection.
- (function testMergeDoesNotInsertNewDocument() {
- const pipeline =
- makeMergePipeline({target: target.getName(), updatePipeline: [{$set: {x: 1, y: 2}}]});
+// Test $merge does not insert a new document into the target collection if it was inserted
+// into the source collection.
+(function testMergeDoesNotInsertNewDocument() {
+ const pipeline =
+ makeMergePipeline({target: target.getName(), updatePipeline: [{$set: {x: 1, y: 2}}]});
- // Insert and merge a single document.
- assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
- assert.commandWorked(source.deleteOne({_id: 3}));
+ // Insert and merge a single document.
+ assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
+ assert.commandWorked(source.deleteOne({_id: 3}));
- // Insert and merge multiple documents.
- assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
- assert.commandWorked(source.insert({_id: 4, a: 4, c: "d"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
- assert.commandWorked(source.deleteMany({_id: {$in: [3, 4]}}));
- })();
+ // Insert and merge multiple documents.
+ assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
+ assert.commandWorked(source.insert({_id: 4, a: 4, c: "d"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
+ assert.commandWorked(source.deleteMany({_id: {$in: [3, 4]}}));
+})();
- // Test $merge doesn't modify the target collection if a document has been removed from the
- // source collection.
- (function testMergeDoesNotUpdateDeletedDocument() {
- const pipeline =
- makeMergePipeline({target: target.getName(), updatePipeline: [{$set: {x: 1, y: 2}}]});
+// Test $merge doesn't modify the target collection if a document has been removed from the
+// source collection.
+(function testMergeDoesNotUpdateDeletedDocument() {
+ const pipeline =
+ makeMergePipeline({target: target.getName(), updatePipeline: [{$set: {x: 1, y: 2}}]});
- assert.commandWorked(source.deleteOne({_id: 1}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
- })();
+ assert.commandWorked(source.deleteOne({_id: 1}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
+})();
- // Test $merge with an explicit 'on' field over a single or multiple document fields which
- // differ from the _id field.
- (function testMergeWithOnFields() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
+// Test $merge with an explicit 'on' field over a single or multiple document fields which
+// differ from the _id field.
+(function testMergeWithOnFields() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
- let pipeline = makeMergePipeline({
- initialStages: [{$project: {_id: 0}}],
- target: target.getName(),
- on: "a",
- updatePipeline: [{$set: {x: 1, y: 2}}]
- });
+ let pipeline = makeMergePipeline({
+ initialStages: [{$project: {_id: 0}}],
+ target: target.getName(),
+ on: "a",
+ updatePipeline: [{$set: {x: 1, y: 2}}]
+ });
- // The 'on' fields contains a single document field.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({a: 1}, {unique: true}));
- assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
- assert.commandWorked(source.insert(
- [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
- assert.commandWorked(target.insert(
- [{_id: 1, a: 1, c: "x"}, {_id: 4, a: 30, c: "y"}, {_id: 5, a: 40, c: "z"}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, c: "x", x: 1, y: 2},
- {_id: 4, a: 30, c: "y", x: 1, y: 2},
- {_id: 5, a: 40, c: "z"}
- ]
- });
+ // The 'on' fields contains a single document field.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({a: 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
+ assert.commandWorked(
+ source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
+ assert.commandWorked(
+ target.insert([{_id: 1, a: 1, c: "x"}, {_id: 4, a: 30, c: "y"}, {_id: 5, a: 40, c: "z"}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, c: "x", x: 1, y: 2},
+ {_id: 4, a: 30, c: "y", x: 1, y: 2},
+ {_id: 5, a: 40, c: "z"}
+ ]
+ });
- pipeline = makeMergePipeline({
- initialStages: [{$project: {_id: 0}}],
- target: target.getName(),
- on: ["a", "b"],
- updatePipeline: [{$set: {x: 1, y: 2}}]
- });
+ pipeline = makeMergePipeline({
+ initialStages: [{$project: {_id: 0}}],
+ target: target.getName(),
+ on: ["a", "b"],
+ updatePipeline: [{$set: {x: 1, y: 2}}]
+ });
- // The 'on' fields contains multiple document fields.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({a: 1, b: 1}, {unique: true}));
- assert.commandWorked(target.createIndex({a: 1, b: 1}, {unique: true}));
- assert.commandWorked(source.insert(
- [{_id: 1, a: 1, b: "a", c: "x"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
- assert.commandWorked(target.insert(
- [{_id: 1, a: 1, b: "a"}, {_id: 4, a: 30, b: "c", c: "y"}, {_id: 5, a: 40, c: "z"}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a", x: 1, y: 2},
- {_id: 4, a: 30, b: "c", c: "y", x: 1, y: 2},
- {_id: 5, a: 40, c: "z"}
- ]
- });
- assert.commandWorked(source.dropIndex({a: 1, b: 1}));
- assert.commandWorked(target.dropIndex({a: 1, b: 1}));
- })();
+ // The 'on' fields contains multiple document fields.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({a: 1, b: 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({a: 1, b: 1}, {unique: true}));
+ assert.commandWorked(source.insert(
+ [{_id: 1, a: 1, b: "a", c: "x"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
+ assert.commandWorked(target.insert(
+ [{_id: 1, a: 1, b: "a"}, {_id: 4, a: 30, b: "c", c: "y"}, {_id: 5, a: 40, c: "z"}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: 1, b: "a", x: 1, y: 2},
+ {_id: 4, a: 30, b: "c", c: "y", x: 1, y: 2},
+ {_id: 5, a: 40, c: "z"}
+ ]
+ });
+ assert.commandWorked(source.dropIndex({a: 1, b: 1}));
+ assert.commandWorked(target.dropIndex({a: 1, b: 1}));
+})();
- // Test $merge with a dotted path in the 'on' field.
- (function testMergeWithDottedOnField() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
+// Test $merge with a dotted path in the 'on' field.
+(function testMergeWithDottedOnField() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
- const pipeline = makeMergePipeline({
- initialStages: [{$project: {_id: 0}}],
- target: target.getName(),
- on: "a.b",
- updatePipeline: [{$set: {x: 1, y: 2}}]
- });
+ const pipeline = makeMergePipeline({
+ initialStages: [{$project: {_id: 0}}],
+ target: target.getName(),
+ on: "a.b",
+ updatePipeline: [{$set: {x: 1, y: 2}}]
+ });
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({"a.b": 1}, {unique: true}));
- assert.commandWorked(target.createIndex({"a.b": 1}, {unique: true}));
- assert.commandWorked(source.insert([
- {_id: 1, a: {b: "b"}, c: "x"},
- {_id: 2, a: {b: "c"}, c: "y"},
- {_id: 3, a: {b: 30}, b: "c"}
- ]));
- assert.commandWorked(target.insert({_id: 2, a: {b: "c"}}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 2, a: {b: "c"}, x: 1, y: 2},
- ]
- });
- })();
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({"a.b": 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({"a.b": 1}, {unique: true}));
+ assert.commandWorked(source.insert([
+ {_id: 1, a: {b: "b"}, c: "x"},
+ {_id: 2, a: {b: "c"}, c: "y"},
+ {_id: 3, a: {b: 30}, b: "c"}
+ ]));
+ assert.commandWorked(target.insert({_id: 2, a: {b: "c"}}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 2, a: {b: "c"}, x: 1, y: 2},
+ ]
+ });
+})();
- // Test $merge when the _id field is removed from the aggregate projection but is used in the
- // $merge's 'on' field.
- (function testMergeWhenDocIdIsRemovedFromProjection() {
- let pipeline = makeMergePipeline({
- initialStages: [{$project: {_id: 0}}],
- target: target.getName(),
- updatePipeline: [{$set: {x: 1, y: 2}}]
- });
+// Test $merge when the _id field is removed from the aggregate projection but is used in the
+// $merge's 'on' field.
+(function testMergeWhenDocIdIsRemovedFromProjection() {
+ let pipeline = makeMergePipeline({
+ initialStages: [{$project: {_id: 0}}],
+ target: target.getName(),
+ updatePipeline: [{$set: {x: 1, y: 2}}]
+ });
- // The _id is a single 'on' field (a default one).
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]));
- assert.commandWorked(target.insert({_id: 1, b: "c"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({actual: target.find({}, {_id: 0}).toArray(), expected: [{b: "c"}]});
+ // The _id is a single 'on' field (a default one).
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]));
+ assert.commandWorked(target.insert({_id: 1, b: "c"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find({}, {_id: 0}).toArray(), expected: [{b: "c"}]});
- pipeline = makeMergePipeline({
- initialStages: [{$project: {_id: 0}}],
- on: ["_id", "a"],
- target: target.getName(),
- updatePipeline: [{$set: {x: 1, y: 2}}]
- });
+ pipeline = makeMergePipeline({
+ initialStages: [{$project: {_id: 0}}],
+ on: ["_id", "a"],
+ target: target.getName(),
+ updatePipeline: [{$set: {x: 1, y: 2}}]
+ });
- // The _id is part of the compound 'on' field.
- assert(target.drop());
- assert.commandWorked(target.insert({_id: 1, b: "c"}));
- assert.commandWorked(source.createIndex({_id: 1, a: -1}, {unique: true}));
- assert.commandWorked(target.createIndex({_id: 1, a: -1}, {unique: true}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({actual: target.find({}, {_id: 0}).toArray(), expected: [{b: "c"}]});
- assert.commandWorked(source.dropIndex({_id: 1, a: -1}));
- assert.commandWorked(target.dropIndex({_id: 1, a: -1}));
- })();
+ // The _id is part of the compound 'on' field.
+ assert(target.drop());
+ assert.commandWorked(target.insert({_id: 1, b: "c"}));
+ assert.commandWorked(source.createIndex({_id: 1, a: -1}, {unique: true}));
+ assert.commandWorked(target.createIndex({_id: 1, a: -1}, {unique: true}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find({}, {_id: 0}).toArray(), expected: [{b: "c"}]});
+ assert.commandWorked(source.dropIndex({_id: 1, a: -1}));
+ assert.commandWorked(target.dropIndex({_id: 1, a: -1}));
+})();
- // Test that variables referencing the fields in the source document can be specified in the
- // 'let' argument and referenced in the update pipeline.
- (function testMergeWithLetVariables() {
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
- assert.commandWorked(target.insert([{_id: 1, c: 1}]));
+// Test that variables referencing the fields in the source document can be specified in the
+// 'let' argument and referenced in the update pipeline.
+(function testMergeWithLetVariables() {
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
+ assert.commandWorked(target.insert([{_id: 1, c: 1}]));
- assert.doesNotThrow(() => source.aggregate([{
- $merge: {
- into: target.getName(),
- let : {x: "$a", y: "$b"},
- whenMatched: [{$set: {z: {$add: ["$$x", "$$y"]}}}],
- whenNotMatched: "discard"
- }
- }]));
- assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, c: 1, z: 2}]});
- })();
+ assert.doesNotThrow(() => source.aggregate([{
+ $merge: {
+ into: target.getName(),
+ let : {x: "$a", y: "$b"},
+ whenMatched: [{$set: {z: {$add: ["$$x", "$$y"]}}}],
+ whenNotMatched: "discard"
+ }
+ }]));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, c: 1, z: 2}]});
+})();
}());
diff --git a/jstests/aggregation/sources/merge/mode_pipeline_fail.js b/jstests/aggregation/sources/merge/mode_pipeline_fail.js
index 7d8d2337949..60c46ce8708 100644
--- a/jstests/aggregation/sources/merge/mode_pipeline_fail.js
+++ b/jstests/aggregation/sources/merge/mode_pipeline_fail.js
@@ -4,95 +4,89 @@
// exists when none is expected.
// @tags: [assumes_no_implicit_collection_creation_after_drop]
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
+load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
- const source = db[`${jsTest.name()}_source`];
- source.drop();
- const target = db[`${jsTest.name()}_target`];
- target.drop();
- const mergeStage = {
- $merge:
- {into: target.getName(), whenMatched: [{$addFields: {x: 2}}], whenNotMatched: "fail"}
- };
- const pipeline = [mergeStage];
+const source = db[`${jsTest.name()}_source`];
+source.drop();
+const target = db[`${jsTest.name()}_target`];
+target.drop();
+const mergeStage = {
+ $merge: {into: target.getName(), whenMatched: [{$addFields: {x: 2}}], whenNotMatched: "fail"}
+};
+const pipeline = [mergeStage];
- // Test $merge when some documents in the source collection don't have a matching document in
- // the target collection.
- (function testMergeFailsIfMatchingDocumentNotFound() {
- // Single document without a match.
- assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}]));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
- let error = assert.throws(() => source.aggregate(pipeline));
- assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, b: 1, x: 2}, {_id: 3, b: 3, x: 2}]
- });
+// Test $merge when some documents in the source collection don't have a matching document in
+// the target collection.
+(function testMergeFailsIfMatchingDocumentNotFound() {
+ // Single document without a match.
+ assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}]));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
+ let error = assert.throws(() => source.aggregate(pipeline));
+ assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, b: 1, x: 2}, {_id: 3, b: 3, x: 2}]});
- // Multiple documents without a match.
- assert(target.drop());
- assert.commandWorked(target.insert([{_id: 1, b: 1}]));
- error = assert.throws(() => source.aggregate(pipeline));
- assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
- assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1, x: 2}]});
- })();
+ // Multiple documents without a match.
+ assert(target.drop());
+ assert.commandWorked(target.insert([{_id: 1, b: 1}]));
+ error = assert.throws(() => source.aggregate(pipeline));
+ assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1, x: 2}]});
+})();
- // Test $merge when all documents in the source collection have a matching document in the
- // target collection.
- (function testMergeWhenAllDocumentsHaveMatch() {
- // Source has a single element with a match in the target.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert({_id: 3, a: 3}));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 3, b: 3, x: 2}]});
+// Test $merge when all documents in the source collection have a matching document in the
+// target collection.
+(function testMergeWhenAllDocumentsHaveMatch() {
+ // Source has a single element with a match in the target.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert({_id: 3, a: 3}));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 3, b: 3, x: 2}]});
- // Source has multiple documents with matches in the target.
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, b: 1, x: 2}, {_id: 2, b: 2, x: 2}, {_id: 3, b: 3, x: 2}]
- });
- })();
+ // Source has multiple documents with matches in the target.
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, b: 1, x: 2}, {_id: 2, b: 2, x: 2}, {_id: 3, b: 3, x: 2}]
+ });
+})();
- // Test $merge when the source collection is empty. The target collection should not be
- // modified.
- (function testMergeWhenSourceIsEmpty() {
- assert.commandWorked(source.deleteMany({}));
- assert(target.drop());
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
- })();
+// Test $merge when the source collection is empty. The target collection should not be
+// modified.
+(function testMergeWhenSourceIsEmpty() {
+ assert.commandWorked(source.deleteMany({}));
+ assert(target.drop());
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
+})();
- // Test that variables referencing the fields in the source document can be specified in the
- // 'let' argument and referenced in the update pipeline.
- (function testMergeWithLetVariables() {
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
- assert.commandWorked(target.insert([{_id: 1, c: 1}, {_id: 2, c: 2}]));
+// Test that variables referencing the fields in the source document can be specified in the
+// 'let' argument and referenced in the update pipeline.
+(function testMergeWithLetVariables() {
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
+ assert.commandWorked(target.insert([{_id: 1, c: 1}, {_id: 2, c: 2}]));
- assert.doesNotThrow(() => source.aggregate([{
- $merge: {
- into: target.getName(),
- let : {x: "$a", y: "$b"},
- whenMatched: [{$set: {z: {$add: ["$$x", "$$y"]}}}],
- whenNotMatched: "fail"
- }
- }]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, c: 1, z: 2}, {_id: 2, c: 2, z: 4}]
- });
- })();
+ assert.doesNotThrow(() => source.aggregate([{
+ $merge: {
+ into: target.getName(),
+ let : {x: "$a", y: "$b"},
+ whenMatched: [{$set: {z: {$add: ["$$x", "$$y"]}}}],
+ whenNotMatched: "fail"
+ }
+ }]));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, c: 1, z: 2}, {_id: 2, c: 2, z: 4}]});
+})();
}());
diff --git a/jstests/aggregation/sources/merge/mode_pipeline_insert.js b/jstests/aggregation/sources/merge/mode_pipeline_insert.js
index b8f8374cfc9..df3414e0950 100644
--- a/jstests/aggregation/sources/merge/mode_pipeline_insert.js
+++ b/jstests/aggregation/sources/merge/mode_pipeline_insert.js
@@ -4,644 +4,624 @@
// exists when none is expected.
// @tags: [assumes_no_implicit_collection_creation_after_drop]
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
-
- // A helper function to create a pipeline with a $merge stage using a custom 'updatePipeline'
- // for the whenMatched mode. If 'initialStages' array is specified, the $merge stage will be
- // appended to this array and the result returned to the caller, otherwise an array with a
- // single $merge stage is returned. An output collection for the $merge stage is specified
- // in the 'target', and the $merge stage 'on' fields in the 'on' parameter. The 'letVars'
- // parameter describes the 'let' argument of the $merge stage and holds variables that can be
- // referenced in the pipeline.
- function makeMergePipeline({target = "",
- initialStages = [],
- updatePipeline = [],
- on = "_id",
- letVars = undefined} = {}) {
- const baseObj = letVars !== undefined ? {let : letVars} : {};
- return initialStages.concat([{
- $merge: Object.assign(
- baseObj,
- {into: target, on: on, whenMatched: updatePipeline, whenNotMatched: "insert"})
- }]);
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
+
+// A helper function to create a pipeline with a $merge stage using a custom 'updatePipeline'
+// for the whenMatched mode. If 'initialStages' array is specified, the $merge stage will be
+// appended to this array and the result returned to the caller, otherwise an array with a
+// single $merge stage is returned. An output collection for the $merge stage is specified
+// in the 'target', and the $merge stage 'on' fields in the 'on' parameter. The 'letVars'
+// parameter describes the 'let' argument of the $merge stage and holds variables that can be
+// referenced in the pipeline.
+function makeMergePipeline(
+ {target = "", initialStages = [], updatePipeline = [], on = "_id", letVars = undefined} = {}) {
+ const baseObj = letVars !== undefined ? {let : letVars} : {};
+ return initialStages.concat([{
+ $merge: Object.assign(
+ baseObj, {into: target, on: on, whenMatched: updatePipeline, whenNotMatched: "insert"})
+ }]);
+}
+
+const source = db[`${jsTest.name()}_source`];
+source.drop();
+const target = db[`${jsTest.name()}_target`];
+target.drop();
+
+(function testMergeIntoNonExistentCollection() {
+ assert.commandWorked(source.insert({_id: 1, a: 1, b: "a"}));
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline(
+ {target: target.getName(), updatePipeline: [{$addFields: {x: 1}}]})));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, x: 1},
+ ]
+ });
+})();
+
+// Test $merge inserts a document into an existing target collection if no matching document
+// is found.
+(function testMergeInsertsDocumentIfMatchNotFound() {
+ assert.commandWorked(target.deleteMany({}));
+ assert.doesNotThrow(
+ () => source.aggregate(makeMergePipeline(
+ {target: target.getName(), updatePipeline: [{$addFields: {x: 1, y: 2}}]})));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, x: 1, y: 2}]});
+})();
+
+// Test $merge updates an existing document in the target collection by applying a
+// pipeline-style update.
+(function testMergeUpdatesDocumentIfMatchFound() {
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
+ target: target.getName(),
+ updatePipeline: [{$project: {x: {$add: ["$x", 1]}, y: {$add: ["$y", 2]}}}]
+ })));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, x: 2, y: 4}]});
+})();
+
+// Test $merge with various pipeline stages which are currently supported by the pipeline-style
+// update.
+(function testMergeWithSupportedUpdatePipelineStages() {
+ assert(source.drop());
+ assert(target.drop());
+
+ assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
+ assert.commandWorked(target.insert({_id: 1, b: 1}));
+
+ // Test $addFields stage.
+ assert.doesNotThrow(
+ () => source.aggregate(makeMergePipeline(
+ {target: target.getName(), updatePipeline: [{$addFields: {x: {$add: ["$b", 1]}}}]})));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, b: 1, x: 2}, {_id: 2, x: null}]});
+
+ // Test $project stage.
+ assert(target.drop());
+ assert.commandWorked(target.insert({_id: 1, b: 1}));
+ assert.doesNotThrow(
+ () => source.aggregate(makeMergePipeline(
+ {target: target.getName(), updatePipeline: [{$project: {x: {$add: ["$b", 1]}}}]})));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, x: 2}, {_id: 2, x: null}]});
+
+ // Test $replaceWith stage.
+ assert(target.drop());
+ assert.commandWorked(
+ target.insert([{_id: 1, b: 1, c: {x: {y: 1}}}, {_id: 2, b: 2, c: {x: {y: 2}}}]));
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline(
+ {target: target.getName(), updatePipeline: [{$replaceWith: "$c"}]})));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, x: {y: 1}}, {_id: 2, x: {y: 2}}]});
+
+ // Test $replaceRoot stage.
+ assert(target.drop());
+ assert.commandWorked(
+ target.insert([{_id: 1, b: 1, c: {x: {y: 1}}}, {_id: 2, b: 2, c: {x: {y: 2}}}]));
+ assert.doesNotThrow(
+ () => source.aggregate(makeMergePipeline(
+ {target: target.getName(), updatePipeline: [{$replaceRoot: {newRoot: "$c"}}]})));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, x: {y: 1}}, {_id: 2, x: {y: 2}}]});
+})();
+
+// Test $merge inserts a new document into the target collection if not matching document is
+// found by applying a pipeline-style update with upsert=true semantics.
+(function testMergeInsertDocumentIfMatchNotFound() {
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert({_id: 1, a: 1}));
+ assert.commandWorked(target.insert({_id: 2, a: 2}));
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline(
+ {target: target.getName(), updatePipeline: [{$addFields: {x: 1}}]})));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, x: 1}, {_id: 2, a: 2}]});
+})();
+
+// Test $merge doesn't modify the target collection if a document has been removed from the
+// source collection.
+(function testMergeDoesNotUpdateDeletedDocument() {
+ assert.commandWorked(source.deleteOne({_id: 1}));
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
+ target: target.getName(),
+ updatePipeline: [{$project: {x: {$add: ["$x", 1]}, a: 1}}]
+ })));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, x: 1},
+ {_id: 2, a: 2},
+ ]
+ });
+})();
+
+// Test $merge fails if a unique index constraint in the target collection is violated.
+(function testMergeFailsIfTargetUniqueKeyIsViolated() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
}
- const source = db[`${jsTest.name()}_source`];
- source.drop();
- const target = db[`${jsTest.name()}_target`];
- target.drop();
-
- (function testMergeIntoNonExistentCollection() {
- assert.commandWorked(source.insert({_id: 1, a: 1, b: "a"}));
- assert.doesNotThrow(
- () => source.aggregate(makeMergePipeline(
- {target: target.getName(), updatePipeline: [{$addFields: {x: 1}}]})));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, x: 1},
- ]
- });
- })();
-
- // Test $merge inserts a document into an existing target collection if no matching document
- // is found.
- (function testMergeInsertsDocumentIfMatchNotFound() {
- assert.commandWorked(target.deleteMany({}));
- assert.doesNotThrow(
- () => source.aggregate(makeMergePipeline(
- {target: target.getName(), updatePipeline: [{$addFields: {x: 1, y: 2}}]})));
- assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, x: 1, y: 2}]});
- })();
-
- // Test $merge updates an existing document in the target collection by applying a
- // pipeline-style update.
- (function testMergeUpdatesDocumentIfMatchFound() {
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- target: target.getName(),
- updatePipeline: [{$project: {x: {$add: ["$x", 1]}, y: {$add: ["$y", 2]}}}]
- })));
- assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, x: 2, y: 4}]});
- })();
-
- // Test $merge with various pipeline stages which are currently supported by the pipeline-style
- // update.
- (function testMergeWithSupportedUpdatePipelineStages() {
- assert(source.drop());
- assert(target.drop());
-
- assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
- assert.commandWorked(target.insert({_id: 1, b: 1}));
-
- // Test $addFields stage.
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- target: target.getName(),
- updatePipeline: [{$addFields: {x: {$add: ["$b", 1]}}}]
- })));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1, x: 2}, {_id: 2, x: null}]});
-
- // Test $project stage.
- assert(target.drop());
- assert.commandWorked(target.insert({_id: 1, b: 1}));
- assert.doesNotThrow(
- () => source.aggregate(makeMergePipeline(
- {target: target.getName(), updatePipeline: [{$project: {x: {$add: ["$b", 1]}}}]})));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, x: 2}, {_id: 2, x: null}]});
-
- // Test $replaceWith stage.
- assert(target.drop());
- assert.commandWorked(
- target.insert([{_id: 1, b: 1, c: {x: {y: 1}}}, {_id: 2, b: 2, c: {x: {y: 2}}}]));
- assert.doesNotThrow(
- () => source.aggregate(makeMergePipeline(
- {target: target.getName(), updatePipeline: [{$replaceWith: "$c"}]})));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, x: {y: 1}}, {_id: 2, x: {y: 2}}]
- });
-
- // Test $replaceRoot stage.
- assert(target.drop());
- assert.commandWorked(
- target.insert([{_id: 1, b: 1, c: {x: {y: 1}}}, {_id: 2, b: 2, c: {x: {y: 2}}}]));
- assert.doesNotThrow(
- () => source.aggregate(makeMergePipeline(
- {target: target.getName(), updatePipeline: [{$replaceRoot: {newRoot: "$c"}}]})));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, x: {y: 1}}, {_id: 2, x: {y: 2}}]
- });
- })();
-
- // Test $merge inserts a new document into the target collection if not matching document is
- // found by applying a pipeline-style update with upsert=true semantics.
- (function testMergeInsertDocumentIfMatchNotFound() {
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert({_id: 1, a: 1}));
- assert.commandWorked(target.insert({_id: 2, a: 2}));
- assert.doesNotThrow(
- () => source.aggregate(makeMergePipeline(
- {target: target.getName(), updatePipeline: [{$addFields: {x: 1}}]})));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, x: 1}, {_id: 2, a: 2}]});
- })();
-
- // Test $merge doesn't modify the target collection if a document has been removed from the
- // source collection.
- (function testMergeDoesNotUpdateDeletedDocument() {
- assert.commandWorked(source.deleteOne({_id: 1}));
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- target: target.getName(),
- updatePipeline: [{$project: {x: {$add: ["$x", 1]}, a: 1}}]
- })));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, x: 1},
- {_id: 2, a: 2},
- ]
- });
- })();
-
- // Test $merge fails if a unique index constraint in the target collection is violated.
- (function testMergeFailsIfTargetUniqueKeyIsViolated() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
-
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert({_id: 4, a: 2}));
- assert.commandWorked(target.insert([{_id: 1, x: 1}, {_id: 2, a: 2}]));
- assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
- const error = assert.throws(
- () => source.aggregate(makeMergePipeline(
- {target: target.getName(), updatePipeline: [{$project: {x: 1, a: 1}}]})));
- assert.commandFailedWithCode(error, ErrorCodes.DuplicateKey);
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, x: 1},
- {_id: 2, a: 2},
- ]
- });
- assert.commandWorked(target.dropIndex({a: 1}));
- })();
-
- // Test $merge fails if it cannot find an index to verify that the 'on' fields will be unique.
- (function testMergeFailsIfOnFieldCannotBeVerifiedForUniquness() {
- // The 'on' fields contains a single document field.
- let error = assert.throws(() => source.aggregate(makeMergePipeline({
- target: target.getName(),
- on: "nonexistent",
- updatePipeline: [{$project: {x: 1, a: 1}}]
- })));
- assert.commandFailedWithCode(error, [51190, 51183]);
-
- // The 'on' fields contains multiple document fields.
- error = assert.throws(() => source.aggregate(makeMergePipeline({
- target: target.getName(),
- on: ["nonexistent1", "nonexistent2"],
- updatePipeline: [{$project: {x: 1, a: 1}}]
- })));
- assert.commandFailedWithCode(error, [51190, 51183]);
- })();
-
- // Test $merge with an explicit 'on' field over a single or multiple document fields which
- // differ from the _id field.
- (function testMergeWithOnFields() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
-
- // The 'on' fields contains a single document field.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({a: 1}, {unique: true}));
- assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
- assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 30}]));
- assert.commandWorked(
- target.insert([{_id: 1, a: 1, b: 1}, {_id: 4, a: 30, b: 2}, {_id: 5, a: 40, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- initialStages: [{$project: {_id: 0}}],
- target: target.getName(),
- on: "a",
- updatePipeline: [{$addFields: {z: 1}}]
- })));
- assertArrayEq({
- actual: target.find({}, {_id: 0}).toArray(),
- expected: [{a: 1, b: 1, z: 1}, {a: 2, z: 1}, {a: 30, b: 2, z: 1}, {a: 40, b: 3}]
- });
-
- // The 'on' fields contains multiple document fields.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({a: 1, b: 1}, {unique: true}));
- assert.commandWorked(target.createIndex({a: 1, b: 1}, {unique: true}));
- assert.commandWorked(
- source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 4}, {_id: 3, a: 30, b: 2}]));
- assert.commandWorked(
- target.insert([{_id: 1, a: 1, b: 1}, {_id: 4, a: 30, b: 2}, {_id: 5, a: 40, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- initialStages: [{$project: {_id: 0}}],
- target: target.getName(),
- on: ["a", "b"],
- updatePipeline: [{$addFields: {z: 1}}]
- })));
- assertArrayEq({
- actual: target.find({}, {_id: 0}).toArray(),
- expected:
- [{a: 1, b: 1, z: 1}, {a: 2, b: 4, z: 1}, {a: 30, b: 2, z: 1}, {a: 40, b: 3}]
- });
- assert.commandWorked(source.dropIndex({a: 1, b: 1}));
- assert.commandWorked(target.dropIndex({a: 1, b: 1}));
- })();
-
- // Test $merge with a dotted path in the 'on' field.
- (function testMergeWithDottedOnField() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
-
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({"a.b": 1}, {unique: true}));
- assert.commandWorked(target.createIndex({"a.b": 1}, {unique: true}));
- assert.commandWorked(source.insert([
- {_id: 1, a: {b: "b"}, c: "x"},
- {_id: 2, a: {b: "c"}, c: "y"},
- {_id: 3, a: {b: 30}, b: "c"}
- ]));
- assert.commandWorked(target.insert({_id: 2, a: {b: "c"}, c: "y"}));
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- initialStages: [{$project: {_id: 0}}],
- target: target.getName(),
- on: "a.b",
- updatePipeline: [{$addFields: {z: 1}}]
- })));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: {b: "b"}, z: 1},
- {_id: 2, a: {b: "c"}, c: "y", z: 1},
- {_id: 3, a: {b: 30}, z: 1}
- ]
- });
- })();
-
- // Test $merge fails if the value of the 'on' field in a document is invalid, e.g. missing,
- // null or an array.
- (function testMergeFailsIfOnFieldIsInvalid() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
-
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({"z": 1}, {unique: true}));
- assert.commandWorked(target.createIndex({"z": 1}, {unique: true}));
-
- const pipeline = makeMergePipeline({
- initialStages: [{$project: {_id: 0}}],
- target: target.getName(),
- on: "z",
- updatePipeline: [{$addFields: {z: 1}}]
- });
-
- // The 'on' field is missing.
- assert.commandWorked(source.insert({_id: 1}));
- let error = assert.throws(() => source.aggregate(pipeline));
- assert.commandFailedWithCode(error, 51132);
-
- // The 'on' field is null.
- assert.commandWorked(source.update({_id: 1}, {z: null}));
- error = assert.throws(() => source.aggregate(pipeline));
- assert.commandFailedWithCode(error, 51132);
-
- // The 'on' field is an array.
- assert.commandWorked(source.update({_id: 1}, {z: [1, 2]}));
- error = assert.throws(() => source.aggregate(pipeline));
- assert.commandFailedWithCode(error, 51185);
- })();
-
- // Test $merge when the _id field is removed from the aggregate projection but is used in the
- // $merge's 'on' field. When the _id is missing, the $merge stage will create a new ObjectId in
- // its place before performing the insert or update.
- (function testMergeWhenDocIdIsRemovedFromProjection() {
- let pipeline = makeMergePipeline({
- initialStages: [{$project: {_id: 0}}],
- target: target.getName(),
- updatePipeline: [{$addFields: {z: 1}}]
- });
-
- // The _id is a single 'on' field (a default one).
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]));
- assert.commandWorked(target.insert({_id: 1, b: "c"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find({}, {_id: 0}).toArray(),
- // There is a matching document in the target with {_id: 1}, but since we cannot match
- // it (no _id in projection), we just insert two new documents from the source
- // collection by applying a pipeline-style update.
- expected: [{b: "c"}, {z: 1}, {z: 1}]
- });
-
- pipeline = makeMergePipeline({
- initialStages: [{$project: {_id: 0}}],
- on: ["_id", "a"],
- target: target.getName(),
- updatePipeline: [{$addFields: {z: 1}}]
- });
-
- // The _id is part of the compound 'on' field.
- assert(target.drop());
- assert.commandWorked(target.insert({_id: 1, b: "c"}));
- assert.commandWorked(source.createIndex({_id: 1, a: -1}, {unique: true}));
- assert.commandWorked(target.createIndex({_id: 1, a: -1}, {unique: true}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find({}, {_id: 0}).toArray(),
- expected: [{b: "c"}, {a: 1, z: 1}, {a: 2, z: 1}]
- });
- assert.commandWorked(source.dropIndex({_id: 1, a: -1}));
- assert.commandWorked(target.dropIndex({_id: 1, a: -1}));
- })();
-
- // Test $merge preserves indexes and options of the existing target collection.
- (function testMergePresrvesIndexesAndOptions() {
- const validator = {z: {$gt: 0}};
- assert(target.drop());
- assert.commandWorked(db.createCollection(target.getName(), {validator: validator}));
- assert.commandWorked(target.createIndex({a: 1}));
- assert.doesNotThrow(
- () => source.aggregate(makeMergePipeline(
- {target: target.getName(), updatePipeline: [{$addFields: {z: 1}}]})));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, z: 1}, {_id: 2, z: 1}]});
- assert.eq(2, target.getIndexes().length);
-
- const listColl = db.runCommand({listCollections: 1, filter: {name: target.getName()}});
- assert.commandWorked(listColl);
- assert.eq(validator, listColl.cursor.firstBatch[0].options["validator"]);
- })();
-
- // Test $merge implicitly creates a new database when the target collection's database doesn't
- // exist.
- (function testMergeImplicitlyCreatesTargetDatabase() {
- assert(source.drop());
- assert.commandWorked(source.insert({_id: 1, a: 1, b: "a"}));
-
- const foreignDb = db.getSiblingDB(`${jsTest.name()}_foreign_db`);
- assert.commandWorked(foreignDb.dropDatabase());
- const foreignTarget = foreignDb[`${jsTest.name()}_target`];
- const foreignPipeline = makeMergePipeline({
- target: {db: foreignDb.getName(), coll: foreignTarget.getName()},
- updatePipeline: [{$addFields: {z: 1}}]
- });
-
- if (!FixtureHelpers.isMongos(db)) {
- assert.doesNotThrow(() => source.aggregate(foreignPipeline));
- assertArrayEq({actual: foreignTarget.find().toArray(), expected: [{_id: 1, z: 1}]});
- } else {
- // Implicit database creation is prohibited in a cluster.
- const error = assert.throws(() => source.aggregate(foreignPipeline));
- assert.commandFailedWithCode(error, ErrorCodes.NamespaceNotFound);
-
- // Force a creation of the database and collection, then fall through the test
- // below.
- assert.commandWorked(foreignTarget.insert({_id: 1}));
- }
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert({_id: 4, a: 2}));
+ assert.commandWorked(target.insert([{_id: 1, x: 1}, {_id: 2, a: 2}]));
+ assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
+ const error =
+ assert.throws(() => source.aggregate(makeMergePipeline(
+ {target: target.getName(), updatePipeline: [{$project: {x: 1, a: 1}}]})));
+ assert.commandFailedWithCode(error, ErrorCodes.DuplicateKey);
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, x: 1},
+ {_id: 2, a: 2},
+ ]
+ });
+ assert.commandWorked(target.dropIndex({a: 1}));
+})();
+
+// Test $merge fails if it cannot find an index to verify that the 'on' fields will be unique.
+(function testMergeFailsIfOnFieldCannotBeVerifiedForUniquness() {
+ // The 'on' fields contains a single document field.
+ let error = assert.throws(() => source.aggregate(makeMergePipeline({
+ target: target.getName(),
+ on: "nonexistent",
+ updatePipeline: [{$project: {x: 1, a: 1}}]
+ })));
+ assert.commandFailedWithCode(error, [51190, 51183]);
+
+ // The 'on' fields contains multiple document fields.
+ error = assert.throws(() => source.aggregate(makeMergePipeline({
+ target: target.getName(),
+ on: ["nonexistent1", "nonexistent2"],
+ updatePipeline: [{$project: {x: 1, a: 1}}]
+ })));
+ assert.commandFailedWithCode(error, [51190, 51183]);
+})();
+
+// Test $merge with an explicit 'on' field over a single or multiple document fields which
+// differ from the _id field.
+(function testMergeWithOnFields() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
+
+ // The 'on' fields contains a single document field.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({a: 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
+ assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 30}]));
+ assert.commandWorked(
+ target.insert([{_id: 1, a: 1, b: 1}, {_id: 4, a: 30, b: 2}, {_id: 5, a: 40, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
+ initialStages: [{$project: {_id: 0}}],
+ target: target.getName(),
+ on: "a",
+ updatePipeline: [{$addFields: {z: 1}}]
+ })));
+ assertArrayEq({
+ actual: target.find({}, {_id: 0}).toArray(),
+ expected: [{a: 1, b: 1, z: 1}, {a: 2, z: 1}, {a: 30, b: 2, z: 1}, {a: 40, b: 3}]
+ });
+
+ // The 'on' fields contains multiple document fields.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({a: 1, b: 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({a: 1, b: 1}, {unique: true}));
+ assert.commandWorked(
+ source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 4}, {_id: 3, a: 30, b: 2}]));
+ assert.commandWorked(
+ target.insert([{_id: 1, a: 1, b: 1}, {_id: 4, a: 30, b: 2}, {_id: 5, a: 40, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
+ initialStages: [{$project: {_id: 0}}],
+ target: target.getName(),
+ on: ["a", "b"],
+ updatePipeline: [{$addFields: {z: 1}}]
+ })));
+ assertArrayEq({
+ actual: target.find({}, {_id: 0}).toArray(),
+ expected: [{a: 1, b: 1, z: 1}, {a: 2, b: 4, z: 1}, {a: 30, b: 2, z: 1}, {a: 40, b: 3}]
+ });
+ assert.commandWorked(source.dropIndex({a: 1, b: 1}));
+ assert.commandWorked(target.dropIndex({a: 1, b: 1}));
+})();
+
+// Test $merge with a dotted path in the 'on' field.
+(function testMergeWithDottedOnField() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({"a.b": 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({"a.b": 1}, {unique: true}));
+ assert.commandWorked(source.insert([
+ {_id: 1, a: {b: "b"}, c: "x"},
+ {_id: 2, a: {b: "c"}, c: "y"},
+ {_id: 3, a: {b: 30}, b: "c"}
+ ]));
+ assert.commandWorked(target.insert({_id: 2, a: {b: "c"}, c: "y"}));
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
+ initialStages: [{$project: {_id: 0}}],
+ target: target.getName(),
+ on: "a.b",
+ updatePipeline: [{$addFields: {z: 1}}]
+ })));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
+ {_id: 1, a: {b: "b"}, z: 1},
+ {_id: 2, a: {b: "c"}, c: "y", z: 1},
+ {_id: 3, a: {b: 30}, z: 1}
+ ]
+ });
+})();
+
+// Test $merge fails if the value of the 'on' field in a document is invalid, e.g. missing,
+// null or an array.
+(function testMergeFailsIfOnFieldIsInvalid() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
+
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({"z": 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({"z": 1}, {unique: true}));
+
+ const pipeline = makeMergePipeline({
+ initialStages: [{$project: {_id: 0}}],
+ target: target.getName(),
+ on: "z",
+ updatePipeline: [{$addFields: {z: 1}}]
+ });
+
+ // The 'on' field is missing.
+ assert.commandWorked(source.insert({_id: 1}));
+ let error = assert.throws(() => source.aggregate(pipeline));
+ assert.commandFailedWithCode(error, 51132);
+
+ // The 'on' field is null.
+ assert.commandWorked(source.update({_id: 1}, {z: null}));
+ error = assert.throws(() => source.aggregate(pipeline));
+ assert.commandFailedWithCode(error, 51132);
+
+ // The 'on' field is an array.
+ assert.commandWorked(source.update({_id: 1}, {z: [1, 2]}));
+ error = assert.throws(() => source.aggregate(pipeline));
+ assert.commandFailedWithCode(error, 51185);
+})();
+
+// Test $merge when the _id field is removed from the aggregate projection but is used in the
+// $merge's 'on' field. When the _id is missing, the $merge stage will create a new ObjectId in
+// its place before performing the insert or update.
+(function testMergeWhenDocIdIsRemovedFromProjection() {
+ let pipeline = makeMergePipeline({
+ initialStages: [{$project: {_id: 0}}],
+ target: target.getName(),
+ updatePipeline: [{$addFields: {z: 1}}]
+ });
+
+ // The _id is a single 'on' field (a default one).
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]));
+ assert.commandWorked(target.insert({_id: 1, b: "c"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find({}, {_id: 0}).toArray(),
+ // There is a matching document in the target with {_id: 1}, but since we cannot match
+ // it (no _id in projection), we just insert two new documents from the source
+ // collection by applying a pipeline-style update.
+ expected: [{b: "c"}, {z: 1}, {z: 1}]
+ });
+
+ pipeline = makeMergePipeline({
+ initialStages: [{$project: {_id: 0}}],
+ on: ["_id", "a"],
+ target: target.getName(),
+ updatePipeline: [{$addFields: {z: 1}}]
+ });
+
+ // The _id is part of the compound 'on' field.
+ assert(target.drop());
+ assert.commandWorked(target.insert({_id: 1, b: "c"}));
+ assert.commandWorked(source.createIndex({_id: 1, a: -1}, {unique: true}));
+ assert.commandWorked(target.createIndex({_id: 1, a: -1}, {unique: true}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find({}, {_id: 0}).toArray(),
+ expected: [{b: "c"}, {a: 1, z: 1}, {a: 2, z: 1}]
+ });
+ assert.commandWorked(source.dropIndex({_id: 1, a: -1}));
+ assert.commandWorked(target.dropIndex({_id: 1, a: -1}));
+})();
+
+// Test $merge preserves indexes and options of the existing target collection.
+(function testMergePresrvesIndexesAndOptions() {
+ const validator = {z: {$gt: 0}};
+ assert(target.drop());
+ assert.commandWorked(db.createCollection(target.getName(), {validator: validator}));
+ assert.commandWorked(target.createIndex({a: 1}));
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline(
+ {target: target.getName(), updatePipeline: [{$addFields: {z: 1}}]})));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, z: 1}, {_id: 2, z: 1}]});
+ assert.eq(2, target.getIndexes().length);
+
+ const listColl = db.runCommand({listCollections: 1, filter: {name: target.getName()}});
+ assert.commandWorked(listColl);
+ assert.eq(validator, listColl.cursor.firstBatch[0].options["validator"]);
+})();
+
+// Test $merge implicitly creates a new database when the target collection's database doesn't
+// exist.
+(function testMergeImplicitlyCreatesTargetDatabase() {
+ assert(source.drop());
+ assert.commandWorked(source.insert({_id: 1, a: 1, b: "a"}));
+
+ const foreignDb = db.getSiblingDB(`${jsTest.name()}_foreign_db`);
+ assert.commandWorked(foreignDb.dropDatabase());
+ const foreignTarget = foreignDb[`${jsTest.name()}_target`];
+ const foreignPipeline = makeMergePipeline({
+ target: {db: foreignDb.getName(), coll: foreignTarget.getName()},
+ updatePipeline: [{$addFields: {z: 1}}]
+ });
+
+ if (!FixtureHelpers.isMongos(db)) {
assert.doesNotThrow(() => source.aggregate(foreignPipeline));
assertArrayEq({actual: foreignTarget.find().toArray(), expected: [{_id: 1, z: 1}]});
- assert.commandWorked(foreignDb.dropDatabase());
- })();
-
- // Test that $merge can reference the default 'let' variable 'new' which holds the entire
- // document from the source collection.
- (function testMergeWithDefaultLetVariable() {
- assert(source.drop());
- assert(target.drop());
-
- assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
- assert.commandWorked(target.insert({_id: 1, c: 1}));
-
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- target: target.getName(),
- updatePipeline: [{$set: {x: {$add: ["$$new.a", "$$new.b"]}}}]
- })));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, c: 1, x: 2}, {_id: 2, x: 4}]});
- })();
-
- // Test that the default 'let' variable 'new' is not available once the 'let' argument to the
- // $merge stage is specified explicitly.
- (function testMergeCannotUseDefaultLetVariableIfLetIsSpecified() {
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
- assert.commandWorked(target.insert({_id: 1, c: 1}));
-
- const error = assert.throws(() => source.aggregate(makeMergePipeline({
- letVars: {foo: "bar"},
- target: target.getName(),
- updatePipeline: [{$project: {x: "$$new.a", y: "$$new.b"}}]
- })));
- assert.commandFailedWithCode(error, 17276);
- })();
-
- // Test that $merge can accept an empty object holding no variables and the default 'new'
- // variable is not available.
- (function testMergeWithEmptyLetVariables() {
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
- assert.commandWorked(target.insert({_id: 1, c: 1}));
-
- // Can use an empty object.
- assert.doesNotThrow(
- () => source.aggregate(makeMergePipeline(
- {letVars: {}, target: target.getName(), updatePipeline: [{$set: {x: "foo"}}]})));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, c: 1, x: "foo"}, {_id: 2, x: "foo"}]
- });
-
- // No default variable 'new' is available.
- const error = assert.throws(() => source.aggregate(makeMergePipeline({
- letVars: {},
- target: target.getName(),
- updatePipeline: [{$project: {x: "$$new.a", y: "$$new.b"}}]
- })));
- assert.commandFailedWithCode(error, 17276);
- })();
-
- // Test that $merge can accept a null value as the 'let' argument and the default variable 'new'
- // can be used.
- // Note that this is not a desirable behaviour but rather a limitation in the IDL parser which
- // cannot differentiate between an optional field specified explicitly as 'null', or not
- // specified at all. In both cases it will treat the field like it wasn't specified. So, this
- // test ensures that we're aware of this limitation. Once the limitation is addressed in
- // SERVER-41272, this test should be updated to accordingly.
- (function testMergeWithNullLetVariables() {
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
- assert.commandWorked(target.insert({_id: 1, c: 1}));
-
- // Can use a null 'let' argument.
- assert.doesNotThrow(
- () => source.aggregate(makeMergePipeline(
- {letVars: null, target: target.getName(), updatePipeline: [{$set: {x: "foo"}}]})));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, c: 1, x: "foo"}, {_id: 2, x: "foo"}]
- });
-
- // Can use the default 'new' variable.
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- letVars: null,
- target: target.getName(),
- updatePipeline: [{$project: {x: "$$new.a", y: "$$new.b"}}]
- })));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, x: 1, y: 1}, {_id: 2, x: 2, y: 2}]
- });
- })();
-
- // Test that constant values can be specified in the 'let' argument and referenced in the update
- // pipeline.
- (function testMergeWithConstantLetVariable() {
- // Non-array constants.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
- assert.commandWorked(target.insert({_id: 1, c: 1}));
-
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- letVars: {a: 1, b: "foo", c: true},
- target: target.getName(),
- updatePipeline: [{$set: {x: "$$a", y: "$$b", z: "$$c"}}]
- })));
- assertArrayEq({
- actual: target.find().toArray(),
- expected:
- [{_id: 1, c: 1, x: 1, y: "foo", z: true}, {_id: 2, x: 1, y: "foo", z: true}]
- });
-
- // Constant array.
- assert(target.drop());
- assert.commandWorked(target.insert({_id: 1, c: 1}));
-
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- letVars: {a: [1, 2, 3]},
- target: target.getName(),
- updatePipeline: [{$set: {x: {$arrayElemAt: ["$$a", 1]}}}]
- })));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, c: 1, x: 2}, {_id: 2, x: 2}]});
- })();
-
- // Test that variables referencing the fields in the source document can be specified in the
- // 'let' argument and referenced in the update pipeline.
- (function testMergeWithNonConstantLetVariables() {
- // Non-array fields.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
- assert.commandWorked(target.insert({_id: 1, c: 1}));
-
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- letVars: {x: "$a", y: "$b"},
- target: target.getName(),
- updatePipeline: [{$set: {z: {$add: ["$$x", "$$y"]}}}]
- })));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, c: 1, z: 2}, {_id: 2, z: 4}]});
-
- // Array field with expressions in the pipeline.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: [1, 2, 3]}, {_id: 2, a: [4, 5, 6]}]));
- assert.commandWorked(target.insert({_id: 1, c: 1}));
-
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- letVars: {x: "$a"},
- target: target.getName(),
- updatePipeline: [{$set: {z: {$arrayElemAt: ["$$x", 1]}}}]
- })));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, c: 1, z: 2}, {_id: 2, z: 5}]});
-
- // Array field with expressions in the 'let' argument.
- assert(target.drop());
- assert.commandWorked(target.insert({_id: 1, c: 1}));
-
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- letVars: {x: {$arrayElemAt: ["$a", 2]}},
- target: target.getName(),
- updatePipeline: [{$set: {z: "$$x"}}]
- })));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, c: 1, z: 3}, {_id: 2, z: 6}]});
- })();
-
- // Test that variables using the dotted path can be specified in the 'let' argument and
- // referenced in the update pipeline.
- (function testMergeWithDottedPathLetVariables() {
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: {b: {c: 2}}}, {_id: 2, a: {b: {c: 3}}}]));
- assert.commandWorked(target.insert({_id: 1, c: 1}));
-
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- letVars: {x: "$a.b.c"},
- target: target.getName(),
- updatePipeline: [{$set: {z: {$pow: ["$$x", 2]}}}]
- })));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, c: 1, z: 4}, {_id: 2, z: 9}]});
- })();
-
- // Test that 'let' variables are referred to the computed document in the aggregation pipeline,
- // not the original document in the source collection.
- (function testMergeLetVariablesHoldsComputedValues() {
- // Test the default 'new' variable.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(
- source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 1, b: 2}, {_id: 3, a: 2, b: 3}]));
- assert.commandWorked(target.insert({_id: 1, c: 1}));
-
- // In the $group stage the total field 'a' uses the same name as in the source collection
- // intentionally, to make sure that even when a referenced field is present in the source
- // collection under the same name, the actual value for the variable will be picked up from
- // the computed document.
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- initialStages: [{$group: {_id: "$a", a: {$sum: "$b"}}}],
- target: target.getName(),
- updatePipeline: [{$set: {z: "$$new"}}]
- })));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, c: 1, z: {_id: 1, a: 3}}, {_id: 2, z: {_id: 2, a: 3}}]
- });
-
- // Test custom 'let' variables.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(
- source.insert([{_id: 1, a: 1, b: 5}, {_id: 2, a: 1, b: 2}, {_id: 3, a: 2, b: 3}]));
- assert.commandWorked(target.insert({_id: 1, c: 1}));
-
- assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
- initialStages: [{$group: {_id: "$a", a: {$sum: "$b"}}}],
- letVars: {x: {$pow: ["$a", 2]}},
- target: target.getName(),
- updatePipeline: [{$set: {z: "$$x"}}]
- })));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, c: 1, z: 49}, {_id: 2, z: 9}]});
- })();
+ } else {
+ // Implicit database creation is prohibited in a cluster.
+ const error = assert.throws(() => source.aggregate(foreignPipeline));
+ assert.commandFailedWithCode(error, ErrorCodes.NamespaceNotFound);
+
+ // Force a creation of the database and collection, then fall through the test
+ // below.
+ assert.commandWorked(foreignTarget.insert({_id: 1}));
+ }
+
+ assert.doesNotThrow(() => source.aggregate(foreignPipeline));
+ assertArrayEq({actual: foreignTarget.find().toArray(), expected: [{_id: 1, z: 1}]});
+ assert.commandWorked(foreignDb.dropDatabase());
+})();
+
+// Test that $merge can reference the default 'let' variable 'new' which holds the entire
+// document from the source collection.
+(function testMergeWithDefaultLetVariable() {
+ assert(source.drop());
+ assert(target.drop());
+
+ assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
+ assert.commandWorked(target.insert({_id: 1, c: 1}));
+
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
+ target: target.getName(),
+ updatePipeline: [{$set: {x: {$add: ["$$new.a", "$$new.b"]}}}]
+ })));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, c: 1, x: 2}, {_id: 2, x: 4}]});
+})();
+
+// Test that the default 'let' variable 'new' is not available once the 'let' argument to the
+// $merge stage is specified explicitly.
+(function testMergeCannotUseDefaultLetVariableIfLetIsSpecified() {
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
+ assert.commandWorked(target.insert({_id: 1, c: 1}));
+
+ const error = assert.throws(() => source.aggregate(makeMergePipeline({
+ letVars: {foo: "bar"},
+ target: target.getName(),
+ updatePipeline: [{$project: {x: "$$new.a", y: "$$new.b"}}]
+ })));
+ assert.commandFailedWithCode(error, 17276);
+})();
+
+// Test that $merge can accept an empty object holding no variables and the default 'new'
+// variable is not available.
+(function testMergeWithEmptyLetVariables() {
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
+ assert.commandWorked(target.insert({_id: 1, c: 1}));
+
+ // Can use an empty object.
+ assert.doesNotThrow(
+ () => source.aggregate(makeMergePipeline(
+ {letVars: {}, target: target.getName(), updatePipeline: [{$set: {x: "foo"}}]})));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, c: 1, x: "foo"}, {_id: 2, x: "foo"}]
+ });
+
+ // No default variable 'new' is available.
+ const error = assert.throws(() => source.aggregate(makeMergePipeline({
+ letVars: {},
+ target: target.getName(),
+ updatePipeline: [{$project: {x: "$$new.a", y: "$$new.b"}}]
+ })));
+ assert.commandFailedWithCode(error, 17276);
+})();
+
+// Test that $merge can accept a null value as the 'let' argument and the default variable 'new'
+// can be used.
+// Note that this is not a desirable behaviour but rather a limitation in the IDL parser which
+// cannot differentiate between an optional field specified explicitly as 'null', or not
+// specified at all. In both cases it will treat the field like it wasn't specified. So, this
+// test ensures that we're aware of this limitation. Once the limitation is addressed in
+// SERVER-41272, this test should be updated to accordingly.
+(function testMergeWithNullLetVariables() {
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
+ assert.commandWorked(target.insert({_id: 1, c: 1}));
+
+ // Can use a null 'let' argument.
+ assert.doesNotThrow(
+ () => source.aggregate(makeMergePipeline(
+ {letVars: null, target: target.getName(), updatePipeline: [{$set: {x: "foo"}}]})));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, c: 1, x: "foo"}, {_id: 2, x: "foo"}]
+ });
+
+ // Can use the default 'new' variable.
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
+ letVars: null,
+ target: target.getName(),
+ updatePipeline: [{$project: {x: "$$new.a", y: "$$new.b"}}]
+ })));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, x: 1, y: 1}, {_id: 2, x: 2, y: 2}]});
+})();
+
+// Test that constant values can be specified in the 'let' argument and referenced in the update
+// pipeline.
+(function testMergeWithConstantLetVariable() {
+ // Non-array constants.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
+ assert.commandWorked(target.insert({_id: 1, c: 1}));
+
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
+ letVars: {a: 1, b: "foo", c: true},
+ target: target.getName(),
+ updatePipeline: [{$set: {x: "$$a", y: "$$b", z: "$$c"}}]
+ })));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, c: 1, x: 1, y: "foo", z: true}, {_id: 2, x: 1, y: "foo", z: true}]
+ });
+
+ // Constant array.
+ assert(target.drop());
+ assert.commandWorked(target.insert({_id: 1, c: 1}));
+
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
+ letVars: {a: [1, 2, 3]},
+ target: target.getName(),
+ updatePipeline: [{$set: {x: {$arrayElemAt: ["$$a", 1]}}}]
+ })));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, c: 1, x: 2}, {_id: 2, x: 2}]});
+})();
+
+// Test that variables referencing the fields in the source document can be specified in the
+// 'let' argument and referenced in the update pipeline.
+(function testMergeWithNonConstantLetVariables() {
+ // Non-array fields.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 2, b: 2}]));
+ assert.commandWorked(target.insert({_id: 1, c: 1}));
+
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
+ letVars: {x: "$a", y: "$b"},
+ target: target.getName(),
+ updatePipeline: [{$set: {z: {$add: ["$$x", "$$y"]}}}]
+ })));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, c: 1, z: 2}, {_id: 2, z: 4}]});
+
+ // Array field with expressions in the pipeline.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: [1, 2, 3]}, {_id: 2, a: [4, 5, 6]}]));
+ assert.commandWorked(target.insert({_id: 1, c: 1}));
+
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
+ letVars: {x: "$a"},
+ target: target.getName(),
+ updatePipeline: [{$set: {z: {$arrayElemAt: ["$$x", 1]}}}]
+ })));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, c: 1, z: 2}, {_id: 2, z: 5}]});
+
+ // Array field with expressions in the 'let' argument.
+ assert(target.drop());
+ assert.commandWorked(target.insert({_id: 1, c: 1}));
+
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
+ letVars: {x: {$arrayElemAt: ["$a", 2]}},
+ target: target.getName(),
+ updatePipeline: [{$set: {z: "$$x"}}]
+ })));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, c: 1, z: 3}, {_id: 2, z: 6}]});
+})();
+
+// Test that variables using the dotted path can be specified in the 'let' argument and
+// referenced in the update pipeline.
+(function testMergeWithDottedPathLetVariables() {
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: {b: {c: 2}}}, {_id: 2, a: {b: {c: 3}}}]));
+ assert.commandWorked(target.insert({_id: 1, c: 1}));
+
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
+ letVars: {x: "$a.b.c"},
+ target: target.getName(),
+ updatePipeline: [{$set: {z: {$pow: ["$$x", 2]}}}]
+ })));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, c: 1, z: 4}, {_id: 2, z: 9}]});
+})();
+
+// Test that 'let' variables are referred to the computed document in the aggregation pipeline,
+// not the original document in the source collection.
+(function testMergeLetVariablesHoldsComputedValues() {
+ // Test the default 'new' variable.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(
+ source.insert([{_id: 1, a: 1, b: 1}, {_id: 2, a: 1, b: 2}, {_id: 3, a: 2, b: 3}]));
+ assert.commandWorked(target.insert({_id: 1, c: 1}));
+
+ // In the $group stage the total field 'a' uses the same name as in the source collection
+ // intentionally, to make sure that even when a referenced field is present in the source
+ // collection under the same name, the actual value for the variable will be picked up from
+ // the computed document.
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
+ initialStages: [{$group: {_id: "$a", a: {$sum: "$b"}}}],
+ target: target.getName(),
+ updatePipeline: [{$set: {z: "$$new"}}]
+ })));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, c: 1, z: {_id: 1, a: 3}}, {_id: 2, z: {_id: 2, a: 3}}]
+ });
+
+ // Test custom 'let' variables.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(
+ source.insert([{_id: 1, a: 1, b: 5}, {_id: 2, a: 1, b: 2}, {_id: 3, a: 2, b: 3}]));
+ assert.commandWorked(target.insert({_id: 1, c: 1}));
+
+ assert.doesNotThrow(() => source.aggregate(makeMergePipeline({
+ initialStages: [{$group: {_id: "$a", a: {$sum: "$b"}}}],
+ letVars: {x: {$pow: ["$a", 2]}},
+ target: target.getName(),
+ updatePipeline: [{$set: {z: "$$x"}}]
+ })));
+ assertArrayEq(
+ {actual: target.find().toArray(), expected: [{_id: 1, c: 1, z: 49}, {_id: 2, z: 9}]});
+})();
}());
diff --git a/jstests/aggregation/sources/merge/mode_replace_discard.js b/jstests/aggregation/sources/merge/mode_replace_discard.js
index 5a0aa6eeb79..aba69a27d28 100644
--- a/jstests/aggregation/sources/merge/mode_replace_discard.js
+++ b/jstests/aggregation/sources/merge/mode_replace_discard.js
@@ -5,204 +5,193 @@
// exists when none is expected.
// @tags: [assumes_no_implicit_collection_creation_after_drop]
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isSharded.
-
- const source = db[`${jsTest.name()}_source`];
- source.drop();
- const target = db[`${jsTest.name()}_target`];
- target.drop();
- const mergeStage = {
- $merge: {into: target.getName(), whenMatched: "replace", whenNotMatched: "discard"}
- };
- const pipeline = [mergeStage];
-
- // Test $merge when some documents in the source collection don't have a matching document in
- // the target collection. The merge operation should succeed and unmatched documents discarded.
- (function testMergeIfMatchingDocumentNotFound() {
- // Single document without a match.
- assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}]));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, a: 1}, {_id: 3, a: 3}]});
-
- // Multiple documents without a match.
- assert(target.drop());
- assert.commandWorked(target.insert([{_id: 1, b: 1}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 1}]});
- })();
-
- // Test $merge when all documents in the source collection have a matching document in the
- // target collection.
- (function testMergeWhenAllDocumentsHaveMatch() {
- // Source has a single element with a match in the target.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert({_id: 3, a: 3}));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 3, a: 3}]});
-
- // Source has multiple documents with matches in the target.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, b: 3}]
- });
- })();
-
- // Test $merge when the source collection is empty. The target collection should not be
- // modified.
- (function testMergeWhenSourceIsEmpty() {
- assert.commandWorked(source.deleteMany({}));
- assert(target.drop());
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
- })();
-
- // Test $merge does not insert a new document into the target collection if it was inserted
- // into the source collection.
- (function testMergeDoesNotInsertNewDocument() {
- // Insert and merge a single document.
- assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
- assert.commandWorked(source.deleteOne({_id: 3}));
-
- // Insert and merge multiple documents.
- assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
- assert.commandWorked(source.insert({_id: 4, a: 4, c: "d"}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
- assert.commandWorked(source.deleteMany({_id: {$in: [3, 4]}}));
- })();
-
- // Test $merge doesn't modify the target collection if a document has been removed from the
- // source collection.
- (function testMergeDoesNotUpdateDeletedDocument() {
- assert.commandWorked(source.deleteOne({_id: 1}));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
- })();
-
- // Test $merge with an explicit 'on' field over a single or multiple document fields which
- // differ from the _id field.
- (function testMergeWithOnFields() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
-
- // The 'on' fields contains a single document field.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({a: 1}, {unique: true}));
- assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
- assert.commandWorked(source.insert(
- [{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
- assert.commandWorked(target.insert(
- [{_id: 1, a: 1, c: "x"}, {_id: 4, a: 30, c: "y"}, {_id: 5, a: 40, c: "z"}]));
- assert.doesNotThrow(
- () => source.aggregate(
- [{$project: {_id: 0}}, {$merge: Object.assign({on: "a"}, mergeStage.$merge)}]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1, b: "a"}, {_id: 4, a: 30, b: "c"}, {_id: 5, a: 40, c: "z"}]
- });
-
- // The 'on' fields contains multiple document fields.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({a: 1, b: 1}, {unique: true}));
- assert.commandWorked(target.createIndex({a: 1, b: 1}, {unique: true}));
- assert.commandWorked(source.insert(
- [{_id: 1, a: 1, b: "a", c: "x"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
- assert.commandWorked(target.insert(
- [{_id: 1, a: 1, b: "a"}, {_id: 4, a: 30, b: "c", c: "y"}, {_id: 5, a: 40, c: "z"}]));
- assert.doesNotThrow(() => source.aggregate([
- {$project: {_id: 0}},
- {$merge: Object.assign({on: ["a", "b"]}, mergeStage.$merge)}
- ]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 1, a: 1, b: "a", c: "x"},
- {_id: 4, a: 30, b: "c"},
- {_id: 5, a: 40, c: "z"}
- ]
- });
- assert.commandWorked(source.dropIndex({a: 1, b: 1}));
- assert.commandWorked(target.dropIndex({a: 1, b: 1}));
- })();
-
- // Test $merge with a dotted path in the 'on' field.
- (function testMergeWithDottedOnField() {
- if (FixtureHelpers.isSharded(source)) {
- // Skip this test if the collection sharded, because an implicitly created sharded
- // key of {_id: 1} will not be covered by a unique index created in this test, which
- // is not allowed.
- return;
- }
-
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.createIndex({"a.b": 1}, {unique: true}));
- assert.commandWorked(target.createIndex({"a.b": 1}, {unique: true}));
- assert.commandWorked(source.insert([
- {_id: 1, a: {b: "b"}, c: "x"},
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isSharded.
+
+const source = db[`${jsTest.name()}_source`];
+source.drop();
+const target = db[`${jsTest.name()}_target`];
+target.drop();
+const mergeStage = {
+ $merge: {into: target.getName(), whenMatched: "replace", whenNotMatched: "discard"}
+};
+const pipeline = [mergeStage];
+
+// Test $merge when some documents in the source collection don't have a matching document in
+// the target collection. The merge operation should succeed and unmatched documents discarded.
+(function testMergeIfMatchingDocumentNotFound() {
+ // Single document without a match.
+ assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}]));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 1}, {_id: 3, a: 3}]});
+
+ // Multiple documents without a match.
+ assert(target.drop());
+ assert.commandWorked(target.insert([{_id: 1, b: 1}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 1}]});
+})();
+
+// Test $merge when all documents in the source collection have a matching document in the
+// target collection.
+(function testMergeWhenAllDocumentsHaveMatch() {
+ // Source has a single element with a match in the target.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert({_id: 3, a: 3}));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 3, a: 3}]});
+
+ // Source has multiple documents with matches in the target.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, b: 3}]
+ });
+})();
+
+// Test $merge when the source collection is empty. The target collection should not be
+// modified.
+(function testMergeWhenSourceIsEmpty() {
+ assert.commandWorked(source.deleteMany({}));
+ assert(target.drop());
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
+})();
+
+// Test $merge does not insert a new document into the target collection if it was inserted
+// into the source collection.
+(function testMergeDoesNotInsertNewDocument() {
+ // Insert and merge a single document.
+ assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
+ assert.commandWorked(source.deleteOne({_id: 3}));
+
+ // Insert and merge multiple documents.
+ assert.commandWorked(source.insert({_id: 3, a: 3, b: "c"}));
+ assert.commandWorked(source.insert({_id: 4, a: 4, c: "d"}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
+ assert.commandWorked(source.deleteMany({_id: {$in: [3, 4]}}));
+})();
+
+// Test $merge doesn't modify the target collection if a document has been removed from the
+// source collection.
+(function testMergeDoesNotUpdateDeletedDocument() {
+ assert.commandWorked(source.deleteOne({_id: 1}));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
+})();
+
+// Test $merge with an explicit 'on' field over a single or multiple document fields which
+// differ from the _id field.
+(function testMergeWithOnFields() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
+
+ // The 'on' fields contains a single document field.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({a: 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
+ assert.commandWorked(
+ source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
+ assert.commandWorked(
+ target.insert([{_id: 1, a: 1, c: "x"}, {_id: 4, a: 30, c: "y"}, {_id: 5, a: 40, c: "z"}]));
+ assert.doesNotThrow(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: "a"}, mergeStage.$merge)}]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: "a"}, {_id: 4, a: 30, b: "c"}, {_id: 5, a: 40, c: "z"}]
+ });
+
+ // The 'on' fields contains multiple document fields.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({a: 1, b: 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({a: 1, b: 1}, {unique: true}));
+ assert.commandWorked(source.insert(
+ [{_id: 1, a: 1, b: "a", c: "x"}, {_id: 2, a: 2, b: "b"}, {_id: 3, a: 30, b: "c"}]));
+ assert.commandWorked(target.insert(
+ [{_id: 1, a: 1, b: "a"}, {_id: 4, a: 30, b: "c", c: "y"}, {_id: 5, a: 40, c: "z"}]));
+ assert.doesNotThrow(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: ["a", "b"]}, mergeStage.$merge)}]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1, b: "a", c: "x"}, {_id: 4, a: 30, b: "c"}, {_id: 5, a: 40, c: "z"}]
+ });
+ assert.commandWorked(source.dropIndex({a: 1, b: 1}));
+ assert.commandWorked(target.dropIndex({a: 1, b: 1}));
+})();
+
+// Test $merge with a dotted path in the 'on' field.
+(function testMergeWithDottedOnField() {
+ if (FixtureHelpers.isSharded(source)) {
+ // Skip this test if the collection sharded, because an implicitly created sharded
+ // key of {_id: 1} will not be covered by a unique index created in this test, which
+ // is not allowed.
+ return;
+ }
+
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.createIndex({"a.b": 1}, {unique: true}));
+ assert.commandWorked(target.createIndex({"a.b": 1}, {unique: true}));
+ assert.commandWorked(source.insert([
+ {_id: 1, a: {b: "b"}, c: "x"},
+ {_id: 2, a: {b: "c"}, c: "y"},
+ {_id: 3, a: {b: 30}, b: "c"}
+ ]));
+ assert.commandWorked(target.insert({_id: 2, a: {b: "c"}}));
+ assert.doesNotThrow(
+ () => source.aggregate(
+ [{$project: {_id: 0}}, {$merge: Object.assign({on: "a.b"}, mergeStage.$merge)}]));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [
{_id: 2, a: {b: "c"}, c: "y"},
- {_id: 3, a: {b: 30}, b: "c"}
- ]));
- assert.commandWorked(target.insert({_id: 2, a: {b: "c"}}));
- assert.doesNotThrow(
- () => source.aggregate(
- [{$project: {_id: 0}}, {$merge: Object.assign({on: "a.b"}, mergeStage.$merge)}]));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [
- {_id: 2, a: {b: "c"}, c: "y"},
- ]
- });
- })();
-
- // Test $merge when the _id field is removed from the aggregate projection but is used in the
- // $merge's 'on' field.
- (function testMergeWhenDocIdIsRemovedFromProjection() {
- // The _id is a single 'on' field (a default one).
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]));
- assert.commandWorked(target.insert({_id: 1, b: "c"}));
- assert.doesNotThrow(() => source.aggregate([{$project: {_id: 0}}, mergeStage]));
- assertArrayEq({actual: target.find({}, {_id: 0}).toArray(), expected: [{b: "c"}]});
-
- // The _id is part of the compound 'on' field.
- assert(target.drop());
- assert.commandWorked(target.insert({_id: 1, b: "c"}));
- assert.commandWorked(source.createIndex({_id: 1, a: -1}, {unique: true}));
- assert.commandWorked(target.createIndex({_id: 1, a: -1}, {unique: true}));
- assert.doesNotThrow(() => source.aggregate([
- {$project: {_id: 0}},
- {$merge: Object.assign({on: ["_id", "a"]}, mergeStage.$merge)}
- ]));
- assertArrayEq({actual: target.find({}, {_id: 0}).toArray(), expected: [{b: "c"}]});
- assert.commandWorked(source.dropIndex({_id: 1, a: -1}));
- assert.commandWorked(target.dropIndex({_id: 1, a: -1}));
- })();
+ ]
+ });
+})();
+
+// Test $merge when the _id field is removed from the aggregate projection but is used in the
+// $merge's 'on' field.
+(function testMergeWhenDocIdIsRemovedFromProjection() {
+ // The _id is a single 'on' field (a default one).
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1, b: "a"}, {_id: 2, a: 2, b: "b"}]));
+ assert.commandWorked(target.insert({_id: 1, b: "c"}));
+ assert.doesNotThrow(() => source.aggregate([{$project: {_id: 0}}, mergeStage]));
+ assertArrayEq({actual: target.find({}, {_id: 0}).toArray(), expected: [{b: "c"}]});
+
+ // The _id is part of the compound 'on' field.
+ assert(target.drop());
+ assert.commandWorked(target.insert({_id: 1, b: "c"}));
+ assert.commandWorked(source.createIndex({_id: 1, a: -1}, {unique: true}));
+ assert.commandWorked(target.createIndex({_id: 1, a: -1}, {unique: true}));
+ assert.doesNotThrow(() => source.aggregate([
+ {$project: {_id: 0}},
+ {$merge: Object.assign({on: ["_id", "a"]}, mergeStage.$merge)}
+ ]));
+ assertArrayEq({actual: target.find({}, {_id: 0}).toArray(), expected: [{b: "c"}]});
+ assert.commandWorked(source.dropIndex({_id: 1, a: -1}));
+ assert.commandWorked(target.dropIndex({_id: 1, a: -1}));
+})();
}());
diff --git a/jstests/aggregation/sources/merge/mode_replace_fail.js b/jstests/aggregation/sources/merge/mode_replace_fail.js
index 7afdb6579dc..19e74e58536 100644
--- a/jstests/aggregation/sources/merge/mode_replace_fail.js
+++ b/jstests/aggregation/sources/merge/mode_replace_fail.js
@@ -4,114 +4,111 @@
// exists when none is expected.
// @tags: [assumes_no_implicit_collection_creation_after_drop]
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
+load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
- const source = db[`${jsTest.name()}_source`];
- source.drop();
- const target = db[`${jsTest.name()}_target`];
- target.drop();
- const mergeStage = {
- $merge: {into: target.getName(), whenMatched: "replace", whenNotMatched: "fail"}
- };
- const pipeline = [mergeStage];
+const source = db[`${jsTest.name()}_source`];
+source.drop();
+const target = db[`${jsTest.name()}_target`];
+target.drop();
+const mergeStage = {
+ $merge: {into: target.getName(), whenMatched: "replace", whenNotMatched: "fail"}
+};
+const pipeline = [mergeStage];
- // Test $merge when some documents in the source collection don't have a matching document in
- // the target collection.
- (function testMergeFailsIfMatchingDocumentNotFound() {
- // Single document without a match.
- assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}]));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
- let error = assert.throws(() => source.aggregate(pipeline));
- assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, a: 1}, {_id: 3, a: 3}]});
+// Test $merge when some documents in the source collection don't have a matching document in
+// the target collection.
+(function testMergeFailsIfMatchingDocumentNotFound() {
+ // Single document without a match.
+ assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}]));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
+ let error = assert.throws(() => source.aggregate(pipeline));
+ assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 1}, {_id: 3, a: 3}]});
- // Multiple documents without a match.
- assert(target.drop());
- assert.commandWorked(target.insert([{_id: 1, b: 1}]));
- error = assert.throws(() => source.aggregate(pipeline));
- assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
- assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 1}]});
- })();
+ // Multiple documents without a match.
+ assert(target.drop());
+ assert.commandWorked(target.insert([{_id: 1, b: 1}]));
+ error = assert.throws(() => source.aggregate(pipeline));
+ assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, a: 1}]});
+})();
- // Test $merge when all documents in the source collection have a matching document in the
- // target collection.
- (function testMergeWhenAllDocumentsHaveMatch() {
- // Source has a single element with a match in the target.
- assert(source.drop());
- assert(target.drop());
- assert.commandWorked(source.insert({_id: 3, a: 3}));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 3, a: 3}]});
+// Test $merge when all documents in the source collection have a matching document in the
+// target collection.
+(function testMergeWhenAllDocumentsHaveMatch() {
+ // Source has a single element with a match in the target.
+ assert(source.drop());
+ assert(target.drop());
+ assert.commandWorked(source.insert({_id: 3, a: 3}));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 3, a: 3}]});
- // Source has multiple documents with matches in the target.
- assert(target.drop());
- assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}, {_id: 3, b: 3}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq({
- actual: target.find().toArray(),
- expected: [{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}]
- });
- })();
+ // Source has multiple documents with matches in the target.
+ assert(target.drop());
+ assert.commandWorked(source.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}, {_id: 3, b: 3}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({
+ actual: target.find().toArray(),
+ expected: [{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}]
+ });
+})();
- // Test $merge when the source collection is empty. The target collection should not be
- // modified.
- (function testMergeWhenSourceIsEmpty() {
- assert.commandWorked(source.deleteMany({}));
- assert(target.drop());
- assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}]));
- assert.doesNotThrow(() => source.aggregate(pipeline));
- assertArrayEq(
- {actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
- })();
+// Test $merge when the source collection is empty. The target collection should not be
+// modified.
+(function testMergeWhenSourceIsEmpty() {
+ assert.commandWorked(source.deleteMany({}));
+ assert(target.drop());
+ assert.commandWorked(target.insert([{_id: 1, b: 1}, {_id: 2, b: 2}]));
+ assert.doesNotThrow(() => source.aggregate(pipeline));
+ assertArrayEq({actual: target.find().toArray(), expected: [{_id: 1, b: 1}, {_id: 2, b: 2}]});
+})();
- // Test $merge uses unorderded batch update. When a mismatch is detected in a batch, the error
- // should be returned once the batch is processed and no further documents should be processed
- // and updated.
- (function testMergeUnorderedBatchUpdate() {
- const maxBatchSize = 16 * 1024 * 1024; // 16MB
- const docSize = 1024 * 1024; // 1MB
- const numDocs = 20;
- const maxDocsInBatch = maxBatchSize / docSize;
+// Test $merge uses unorderded batch update. When a mismatch is detected in a batch, the error
+// should be returned once the batch is processed and no further documents should be processed
+// and updated.
+(function testMergeUnorderedBatchUpdate() {
+ const maxBatchSize = 16 * 1024 * 1024; // 16MB
+ const docSize = 1024 * 1024; // 1MB
+ const numDocs = 20;
+ const maxDocsInBatch = maxBatchSize / docSize;
- assert(source.drop());
- assert(target.drop());
+ assert(source.drop());
+ assert(target.drop());
- // Insert 'numDocs' documents of size 'docSize' into the source collection.
- generateCollection({coll: source, numDocs: numDocs, docSize: docSize});
+ // Insert 'numDocs' documents of size 'docSize' into the source collection.
+ generateCollection({coll: source, numDocs: numDocs, docSize: docSize});
- // Copy over documents from the source collection into the target and remove the 'padding'
- // field from the projection, so we can distinguish which documents have been modified by
- // the $merge stage.
- assert.doesNotThrow(
- () => source.aggregate([{$project: {padding: 0}}, {$out: target.getName()}]));
+ // Copy over documents from the source collection into the target and remove the 'padding'
+ // field from the projection, so we can distinguish which documents have been modified by
+ // the $merge stage.
+ assert.doesNotThrow(() =>
+ source.aggregate([{$project: {padding: 0}}, {$out: target.getName()}]));
- // Remove one document from the target collection so that $merge fails. This document should
- // be in the first batch of the aggregation pipeline below, which sorts documents by the _id
- // field in ascending order. Since each document in the source collection is 1MB, and the
- // max batch size is 16MB, the first batch will contain documents with the _id in the range
- // of [0, 15].
- assert.commandWorked(target.deleteOne({_id: Math.floor(Math.random() * maxDocsInBatch)}));
+ // Remove one document from the target collection so that $merge fails. This document should
+ // be in the first batch of the aggregation pipeline below, which sorts documents by the _id
+ // field in ascending order. Since each document in the source collection is 1MB, and the
+ // max batch size is 16MB, the first batch will contain documents with the _id in the range
+ // of [0, 15].
+ assert.commandWorked(target.deleteOne({_id: Math.floor(Math.random() * maxDocsInBatch)}));
- // Ensure the target collection has 'numDocs' - 1 documents without the 'padding' field.
- assert.eq(numDocs - 1, target.find({padding: {$exists: false}}).itcount());
+ // Ensure the target collection has 'numDocs' - 1 documents without the 'padding' field.
+ assert.eq(numDocs - 1, target.find({padding: {$exists: false}}).itcount());
- // Run the $merge pipeline and ensure it fails, as there is one document in the source
- // collection without a match in the target.
- const error = assert.throws(() => source.aggregate([{$sort: {_id: 1}}, mergeStage]));
- assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
+ // Run the $merge pipeline and ensure it fails, as there is one document in the source
+ // collection without a match in the target.
+ const error = assert.throws(() => source.aggregate([{$sort: {_id: 1}}, mergeStage]));
+ assert.commandFailedWithCode(error, ErrorCodes.MergeStageNoMatchingDocument);
- // There will be maxDocsInBatch documents in the batch, one without a match.
- const numDocsModified = maxDocsInBatch - 1;
- // All remaining documents except those in the first batch must be left unmodified.
- const numDocsUnmodified = numDocs - maxDocsInBatch;
- assert.eq(numDocsModified, target.find({padding: {$exists: true}}).itcount());
- assert.eq(numDocsUnmodified, target.find({padding: {$exists: false}}).itcount());
- })();
+ // There will be maxDocsInBatch documents in the batch, one without a match.
+ const numDocsModified = maxDocsInBatch - 1;
+ // All remaining documents except those in the first batch must be left unmodified.
+ const numDocsUnmodified = numDocs - maxDocsInBatch;
+ assert.eq(numDocsModified, target.find({padding: {$exists: true}}).itcount());
+ assert.eq(numDocsUnmodified, target.find({padding: {$exists: false}}).itcount());
+})();
}());
diff --git a/jstests/aggregation/sources/merge/mode_replace_insert.js b/jstests/aggregation/sources/merge/mode_replace_insert.js
index bb1e407ea7b..e81ac857dc4 100644
--- a/jstests/aggregation/sources/merge/mode_replace_insert.js
+++ b/jstests/aggregation/sources/merge/mode_replace_insert.js
@@ -1,225 +1,214 @@
// Tests for the $merge stage with whenMatched: "replace" and whenNotMatched: "insert".
// @tags: [assumes_unsharded_collection]
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
-
- const coll = db.merge_replace_insert;
- const outColl = db.merge_replace_insert_out;
- coll.drop();
- outColl.drop();
-
- const nDocs = 10;
- for (let i = 0; i < nDocs; i++) {
- assert.commandWorked(coll.insert({_id: i, a: i}));
- }
-
- // Test that a $merge with whenMatched: "replace" and whenNotMatched: "insert" mode will
- // default the "on" fields to "_id".
- coll.aggregate(
- [{$merge: {into: outColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}]);
- assert.eq(nDocs, outColl.find().itcount());
-
- // Test that $merge will update existing documents that match the "on" fields.
- const nDocsReplaced = 5;
- coll.aggregate([
- {$project: {_id: {$mod: ["$_id", nDocsReplaced]}}},
- {
- $merge: {
- into: outColl.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: "_id"
- }
- }
- ]);
- assert.eq(nDocsReplaced, outColl.find({a: {$exists: false}}).itcount());
-
- // Test $merge with a dotted path "on" fields.
- coll.drop();
- outColl.drop();
- assert.commandWorked(coll.insert([{_id: 0, a: {b: 1}}, {_id: 1, a: {b: 1}, c: 1}]));
- assert.commandWorked(outColl.createIndex({"a.b": 1, _id: 1}, {unique: true}));
- coll.aggregate([
- {$addFields: {_id: 0}},
- {
- $merge: {
- into: outColl.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: ["_id", "a.b"]
- }
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
+
+const coll = db.merge_replace_insert;
+const outColl = db.merge_replace_insert_out;
+coll.drop();
+outColl.drop();
+
+const nDocs = 10;
+for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert({_id: i, a: i}));
+}
+
+// Test that a $merge with whenMatched: "replace" and whenNotMatched: "insert" mode will
+// default the "on" fields to "_id".
+coll.aggregate(
+ [{$merge: {into: outColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}]);
+assert.eq(nDocs, outColl.find().itcount());
+
+// Test that $merge will update existing documents that match the "on" fields.
+const nDocsReplaced = 5;
+coll.aggregate([
+ {$project: {_id: {$mod: ["$_id", nDocsReplaced]}}},
+ {$merge: {into: outColl.getName(), whenMatched: "replace", whenNotMatched: "insert", on: "_id"}}
+]);
+assert.eq(nDocsReplaced, outColl.find({a: {$exists: false}}).itcount());
+
+// Test $merge with a dotted path "on" fields.
+coll.drop();
+outColl.drop();
+assert.commandWorked(coll.insert([{_id: 0, a: {b: 1}}, {_id: 1, a: {b: 1}, c: 1}]));
+assert.commandWorked(outColl.createIndex({"a.b": 1, _id: 1}, {unique: true}));
+coll.aggregate([
+ {$addFields: {_id: 0}},
+ {
+ $merge: {
+ into: outColl.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: ["_id", "a.b"]
}
- ]);
- assert.eq([{_id: 0, a: {b: 1}, c: 1}], outColl.find().toArray());
-
- // Test that $merge will automatically generate a missing "_id" for the "on" field.
- coll.drop();
- outColl.drop();
- assert.commandWorked(coll.insert({field: "will be removed"}));
- assert.doesNotThrow(() => coll.aggregate([
- {$replaceRoot: {newRoot: {}}},
- {
- $merge: {
- into: outColl.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- }
+ }
+]);
+assert.eq([{_id: 0, a: {b: 1}, c: 1}], outColl.find().toArray());
+
+// Test that $merge will automatically generate a missing "_id" for the "on" field.
+coll.drop();
+outColl.drop();
+assert.commandWorked(coll.insert({field: "will be removed"}));
+assert.doesNotThrow(() => coll.aggregate([
+ {$replaceRoot: {newRoot: {}}},
+ {
+ $merge: {
+ into: outColl.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
}
- ]));
- assert.eq(1, outColl.find({field: {$exists: false}}).itcount());
-
- // Test that $merge will automatically generate a missing "_id", and the aggregation succeeds
- // with multiple "on" fields.
- outColl.drop();
- assert.commandWorked(outColl.createIndex({name: -1, _id: 1}, {unique: true, sparse: true}));
- assert.doesNotThrow(() => coll.aggregate([
- {$replaceRoot: {newRoot: {name: "jungsoo"}}},
- {
- $merge: {
- into: outColl.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: ["_id", "name"]
- }
+ }
+]));
+assert.eq(1, outColl.find({field: {$exists: false}}).itcount());
+
+// Test that $merge will automatically generate a missing "_id", and the aggregation succeeds
+// with multiple "on" fields.
+outColl.drop();
+assert.commandWorked(outColl.createIndex({name: -1, _id: 1}, {unique: true, sparse: true}));
+assert.doesNotThrow(() => coll.aggregate([
+ {$replaceRoot: {newRoot: {name: "jungsoo"}}},
+ {
+ $merge: {
+ into: outColl.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: ["_id", "name"]
}
- ]));
- assert.eq(1, outColl.find().itcount());
-
- // Test that we will not attempt to modify the _id of an existing document if the _id is
- // projected away but the "on" field does not involve _id.
- coll.drop();
- assert.commandWorked(coll.insert({name: "kyle"}));
- assert.commandWorked(coll.insert({name: "nick"}));
- outColl.drop();
- assert.commandWorked(outColl.createIndex({name: 1}, {unique: true}));
- assert.commandWorked(outColl.insert({_id: "must be unchanged", name: "kyle"}));
- assert.doesNotThrow(() => coll.aggregate([
- {$project: {_id: 0}},
- {$addFields: {newField: 1}},
- {
- $merge: {
- into: outColl.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: "name"
- }
+ }
+]));
+assert.eq(1, outColl.find().itcount());
+
+// Test that we will not attempt to modify the _id of an existing document if the _id is
+// projected away but the "on" field does not involve _id.
+coll.drop();
+assert.commandWorked(coll.insert({name: "kyle"}));
+assert.commandWorked(coll.insert({name: "nick"}));
+outColl.drop();
+assert.commandWorked(outColl.createIndex({name: 1}, {unique: true}));
+assert.commandWorked(outColl.insert({_id: "must be unchanged", name: "kyle"}));
+assert.doesNotThrow(() => coll.aggregate([
+ {$project: {_id: 0}},
+ {$addFields: {newField: 1}},
+ {
+ $merge:
+ {into: outColl.getName(), whenMatched: "replace", whenNotMatched: "insert", on: "name"}
+ }
+]));
+const outResult = outColl.find().sort({name: 1}).toArray();
+const errmsgFn = () => tojson(outResult);
+assert.eq(2, outResult.length, errmsgFn);
+assert.docEq({_id: "must be unchanged", name: "kyle", newField: 1}, outResult[0], errmsgFn);
+assert.eq("nick", outResult[1].name, errmsgFn);
+assert.eq(1, outResult[1].newField, errmsgFn);
+assert.neq(null, outResult[1]._id, errmsgFn);
+
+// Test that $merge with a missing non-id "on" field fails.
+outColl.drop();
+assert.commandWorked(outColl.createIndex({missing: 1}, {unique: true}));
+assertErrorCode(
+ coll,
+ [{
+ $merge: {
+ into: outColl.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: "missing"
}
- ]));
- const outResult = outColl.find().sort({name: 1}).toArray();
- const errmsgFn = () => tojson(outResult);
- assert.eq(2, outResult.length, errmsgFn);
- assert.docEq({_id: "must be unchanged", name: "kyle", newField: 1}, outResult[0], errmsgFn);
- assert.eq("nick", outResult[1].name, errmsgFn);
- assert.eq(1, outResult[1].newField, errmsgFn);
- assert.neq(null, outResult[1]._id, errmsgFn);
-
- // Test that $merge with a missing non-id "on" field fails.
- outColl.drop();
- assert.commandWorked(outColl.createIndex({missing: 1}, {unique: true}));
- assertErrorCode(
- coll,
- [{
- $merge: {
- into: outColl.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: "missing"
- }
- }],
- 51132 // This attempt should fail because there's no field 'missing' in the document.
- );
-
- // Test that a replace fails to insert a document if it violates a unique index constraint. In
- // this example, $merge will attempt to insert multiple documents with {a: 0} which is not
- // allowed with the unique index on {a: 1}.
- coll.drop();
- assert.commandWorked(coll.insert([{_id: 0}, {_id: 1}]));
-
- outColl.drop();
- assert.commandWorked(outColl.createIndex({a: 1}, {unique: true}));
- assertErrorCode(
- coll,
- [
- {$addFields: {a: 0}},
- {$merge: {into: outColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}
- ],
- ErrorCodes.DuplicateKey);
-
- // Test that $merge fails if the "on" fields contains an array.
- coll.drop();
- assert.commandWorked(coll.insert({_id: 0, a: [1, 2]}));
- assert.commandWorked(outColl.createIndex({"a.b": 1, _id: 1}, {unique: true}));
- assertErrorCode(coll,
- [
- {$addFields: {_id: 0}},
- {
+ }],
+ 51132 // This attempt should fail because there's no field 'missing' in the document.
+);
+
+// Test that a replace fails to insert a document if it violates a unique index constraint. In
+// this example, $merge will attempt to insert multiple documents with {a: 0} which is not
+// allowed with the unique index on {a: 1}.
+coll.drop();
+assert.commandWorked(coll.insert([{_id: 0}, {_id: 1}]));
+
+outColl.drop();
+assert.commandWorked(outColl.createIndex({a: 1}, {unique: true}));
+assertErrorCode(
+ coll,
+ [
+ {$addFields: {a: 0}},
+ {$merge: {into: outColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}
+ ],
+ ErrorCodes.DuplicateKey);
+
+// Test that $merge fails if the "on" fields contains an array.
+coll.drop();
+assert.commandWorked(coll.insert({_id: 0, a: [1, 2]}));
+assert.commandWorked(outColl.createIndex({"a.b": 1, _id: 1}, {unique: true}));
+assertErrorCode(coll,
+ [
+ {$addFields: {_id: 0}},
+ {
$merge: {
into: outColl.getName(),
whenMatched: "replace",
whenNotMatched: "insert",
on: ["_id", "a.b"]
}
- }
- ],
- 51132);
-
- coll.drop();
- assert.commandWorked(coll.insert({_id: 0, a: [{b: 1}]}));
- assertErrorCode(coll,
- [
- {$addFields: {_id: 0}},
- {
+ }
+ ],
+ 51132);
+
+coll.drop();
+assert.commandWorked(coll.insert({_id: 0, a: [{b: 1}]}));
+assertErrorCode(coll,
+ [
+ {$addFields: {_id: 0}},
+ {
$merge: {
into: outColl.getName(),
whenMatched: "replace",
whenNotMatched: "insert",
on: ["_id", "a.b"]
}
- }
- ],
- 51132);
-
- // Tests for $merge to a database that differs from the aggregation database.
- const foreignDb = db.getSiblingDB("merge_replace_insert_foreign");
- const foreignTargetColl = foreignDb.out;
- const pipelineDifferentOutputDb = [{
- $merge: {
- into: {
- db: foreignDb.getName(),
- coll: foreignTargetColl.getName(),
- },
- whenMatched: "replace",
- whenNotMatched: "insert",
- }
- }];
-
- coll.drop();
- assert.commandWorked(coll.insert({_id: 0}));
- foreignDb.dropDatabase();
-
- if (!FixtureHelpers.isMongos(db)) {
- // Test that $merge implicitly creates a new database when the output collection's database
- // doesn't exist.
- coll.aggregate(pipelineDifferentOutputDb);
- assert.eq(foreignTargetColl.find().itcount(), 1);
- } else {
- // Implicit database creation is prohibited in a cluster.
- let error = assert.throws(() => coll.aggregate(pipelineDifferentOutputDb));
- assert.commandFailedWithCode(error, ErrorCodes.NamespaceNotFound);
-
- // Force a creation of the database and collection, then fall through the test below.
- assert.commandWorked(foreignTargetColl.insert({_id: 0}));
+ }
+ ],
+ 51132);
+
+// Tests for $merge to a database that differs from the aggregation database.
+const foreignDb = db.getSiblingDB("merge_replace_insert_foreign");
+const foreignTargetColl = foreignDb.out;
+const pipelineDifferentOutputDb = [{
+ $merge: {
+ into: {
+ db: foreignDb.getName(),
+ coll: foreignTargetColl.getName(),
+ },
+ whenMatched: "replace",
+ whenNotMatched: "insert",
}
+}];
+
+coll.drop();
+assert.commandWorked(coll.insert({_id: 0}));
+foreignDb.dropDatabase();
- // Insert a new document into the source collection, then test that running the same
- // aggregation will replace existing documents in the foreign output collection when
- // applicable.
- coll.drop();
- const newDocuments = [{_id: 0, newField: 1}, {_id: 1}];
- assert.commandWorked(coll.insert(newDocuments));
+if (!FixtureHelpers.isMongos(db)) {
+ // Test that $merge implicitly creates a new database when the output collection's database
+ // doesn't exist.
coll.aggregate(pipelineDifferentOutputDb);
- assert.eq(foreignTargetColl.find().sort({_id: 1}).toArray(), newDocuments);
+ assert.eq(foreignTargetColl.find().itcount(), 1);
+} else {
+ // Implicit database creation is prohibited in a cluster.
+ let error = assert.throws(() => coll.aggregate(pipelineDifferentOutputDb));
+ assert.commandFailedWithCode(error, ErrorCodes.NamespaceNotFound);
+
+ // Force a creation of the database and collection, then fall through the test below.
+ assert.commandWorked(foreignTargetColl.insert({_id: 0}));
+}
+
+// Insert a new document into the source collection, then test that running the same
+// aggregation will replace existing documents in the foreign output collection when
+// applicable.
+coll.drop();
+const newDocuments = [{_id: 0, newField: 1}, {_id: 1}];
+assert.commandWorked(coll.insert(newDocuments));
+coll.aggregate(pipelineDifferentOutputDb);
+assert.eq(foreignTargetColl.find().sort({_id: 1}).toArray(), newDocuments);
}());
diff --git a/jstests/aggregation/sources/merge/on_fields_validation.js b/jstests/aggregation/sources/merge/on_fields_validation.js
index 78c7dd4eb41..ae911689cdf 100644
--- a/jstests/aggregation/sources/merge/on_fields_validation.js
+++ b/jstests/aggregation/sources/merge/on_fields_validation.js
@@ -7,137 +7,133 @@
* @tags: [cannot_create_unique_index_when_using_hashed_shard_key]
*/
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
-
- const source = db.unique_key_validation_source;
- const target = db.unique_key_validation_target;
-
- [source, target].forEach(coll => coll.drop());
- assert.commandWorked(source.insert({_id: 0}));
-
- //
- // Tests for invalid "on" fields specifications.
- //
- function assertOnFieldsIsInvalid(onFields, expectedErrorCode) {
- const stage = {
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: onFields
- }
- };
- assertErrorCode(source, stage, expectedErrorCode);
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+
+const source = db.unique_key_validation_source;
+const target = db.unique_key_validation_target;
+
+[source, target].forEach(coll => coll.drop());
+assert.commandWorked(source.insert({_id: 0}));
+
+//
+// Tests for invalid "on" fields specifications.
+//
+function assertOnFieldsIsInvalid(onFields, expectedErrorCode) {
+ const stage = {
+ $merge:
+ {into: target.getName(), whenMatched: "replace", whenNotMatched: "insert", on: onFields}
+ };
+ assertErrorCode(source, stage, expectedErrorCode);
+}
+
+// A non-array or string "on" fields is prohibited.
+assertOnFieldsIsInvalid(3.14, 51186);
+assertOnFieldsIsInvalid({_id: 1}, 51186);
+
+// Explicitly specifying an empty-array "on" fields is invalid.
+assertOnFieldsIsInvalid([], 51187);
+
+// The "on" fields array won't be accepted if any element is not a string.
+assertOnFieldsIsInvalid(["hashed", 1], 51134);
+assertOnFieldsIsInvalid([["_id"]], 51134);
+assertOnFieldsIsInvalid([null], 51134);
+assertOnFieldsIsInvalid([true, "a"], 51134);
+
+//
+// An error is raised if $merge encounters a document that is missing one or more of the
+// "on" fields.
+//
+assert.commandWorked(target.remove({}));
+assert.commandWorked(target.createIndex({name: 1, team: -1}, {unique: true}));
+const pipelineNameTeam = [{
+ $merge: {
+ into: target.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: ["name", "team"]
}
+}];
+
+// Missing both "name" and "team".
+assertErrorCode(source, pipelineNameTeam, 51132);
+
+// Missing "name".
+assert.commandWorked(source.update({_id: 0}, {_id: 0, team: "query"}));
+assertErrorCode(source, pipelineNameTeam, 51132);
+
+// Missing "team".
+assert.commandWorked(source.update({_id: 0}, {_id: 0, name: "nicholas"}));
+assertErrorCode(source, pipelineNameTeam, 51132);
+
+// A document with both "name" and "team" will be accepted.
+assert.commandWorked(source.update({_id: 0}, {_id: 0, name: "nicholas", team: "query"}));
+assert.doesNotThrow(() => source.aggregate(pipelineNameTeam));
+assert.eq(target.find().toArray(), [{_id: 0, name: "nicholas", team: "query"}]);
+
+//
+// An error is raised if $merge encounters a document where one of the "on" fields is a nullish
+// value.
+//
+assert.commandWorked(target.remove({}));
+assert.commandWorked(target.createIndex({"song.artist": 1}, {unique: 1}));
+const pipelineSongDotArtist = [{
+ $merge: {
+ into: target.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: ["song.artist"]
+ }
+}];
+
+// Explicit null "song" (a prefix of an "on" field).
+assert.commandWorked(source.update({_id: 0}, {_id: 0, song: null}));
+assertErrorCode(source, pipelineSongDotArtist, 51132);
+
+// Explicit undefined "song" (a prefix of an "on" field).
+assert.commandWorked(source.update({_id: 0}, {_id: 0, song: undefined}));
+assertErrorCode(source, pipelineSongDotArtist, 51132);
+
+// Explicit null "song.artist".
+assert.commandWorked(source.update({_id: 0}, {_id: 0, song: {artist: null}}));
+assertErrorCode(source, pipelineSongDotArtist, 51132);
+
+// Explicit undefined "song.artist".
+assert.commandWorked(source.update({_id: 0}, {_id: 0, song: {artist: undefined}}));
+assertErrorCode(source, pipelineSongDotArtist, 51132);
+
+// A valid "artist" will be accepted.
+assert.commandWorked(source.update({_id: 0}, {_id: 0, song: {artist: "Illenium"}}));
+assert.doesNotThrow(() => source.aggregate(pipelineSongDotArtist));
+assert.eq(target.find().toArray(), [{_id: 0, song: {artist: "Illenium"}}]);
+
+//
+// An error is raised if $merge encounters a document where one of the "on" fields (or a prefix
+// of an "on" field) is an array.
+//
+assert.commandWorked(target.remove({}));
+assert.commandWorked(target.createIndex({"address.street": 1}, {unique: 1}));
+const pipelineAddressDotStreet = [{
+ $merge: {
+ into: target.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: ["address.street"]
+ }
+}];
+
+// "address.street" is an array.
+assert.commandWorked(
+ source.update({_id: 0}, {_id: 0, address: {street: ["West 43rd St", "1633 Broadway"]}}));
+assertErrorCode(source, pipelineAddressDotStreet, 51185);
+
+// "address" is an array (a prefix of an "on" field).
+assert.commandWorked(source.update({_id: 0}, {_id: 0, address: [{street: "1633 Broadway"}]}));
+assertErrorCode(source, pipelineAddressDotStreet, 51132);
- // A non-array or string "on" fields is prohibited.
- assertOnFieldsIsInvalid(3.14, 51186);
- assertOnFieldsIsInvalid({_id: 1}, 51186);
-
- // Explicitly specifying an empty-array "on" fields is invalid.
- assertOnFieldsIsInvalid([], 51187);
-
- // The "on" fields array won't be accepted if any element is not a string.
- assertOnFieldsIsInvalid(["hashed", 1], 51134);
- assertOnFieldsIsInvalid([["_id"]], 51134);
- assertOnFieldsIsInvalid([null], 51134);
- assertOnFieldsIsInvalid([true, "a"], 51134);
-
- //
- // An error is raised if $merge encounters a document that is missing one or more of the
- // "on" fields.
- //
- assert.commandWorked(target.remove({}));
- assert.commandWorked(target.createIndex({name: 1, team: -1}, {unique: true}));
- const pipelineNameTeam = [{
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: ["name", "team"]
- }
- }];
-
- // Missing both "name" and "team".
- assertErrorCode(source, pipelineNameTeam, 51132);
-
- // Missing "name".
- assert.commandWorked(source.update({_id: 0}, {_id: 0, team: "query"}));
- assertErrorCode(source, pipelineNameTeam, 51132);
-
- // Missing "team".
- assert.commandWorked(source.update({_id: 0}, {_id: 0, name: "nicholas"}));
- assertErrorCode(source, pipelineNameTeam, 51132);
-
- // A document with both "name" and "team" will be accepted.
- assert.commandWorked(source.update({_id: 0}, {_id: 0, name: "nicholas", team: "query"}));
- assert.doesNotThrow(() => source.aggregate(pipelineNameTeam));
- assert.eq(target.find().toArray(), [{_id: 0, name: "nicholas", team: "query"}]);
-
- //
- // An error is raised if $merge encounters a document where one of the "on" fields is a nullish
- // value.
- //
- assert.commandWorked(target.remove({}));
- assert.commandWorked(target.createIndex({"song.artist": 1}, {unique: 1}));
- const pipelineSongDotArtist = [{
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: ["song.artist"]
- }
- }];
-
- // Explicit null "song" (a prefix of an "on" field).
- assert.commandWorked(source.update({_id: 0}, {_id: 0, song: null}));
- assertErrorCode(source, pipelineSongDotArtist, 51132);
-
- // Explicit undefined "song" (a prefix of an "on" field).
- assert.commandWorked(source.update({_id: 0}, {_id: 0, song: undefined}));
- assertErrorCode(source, pipelineSongDotArtist, 51132);
-
- // Explicit null "song.artist".
- assert.commandWorked(source.update({_id: 0}, {_id: 0, song: {artist: null}}));
- assertErrorCode(source, pipelineSongDotArtist, 51132);
-
- // Explicit undefined "song.artist".
- assert.commandWorked(source.update({_id: 0}, {_id: 0, song: {artist: undefined}}));
- assertErrorCode(source, pipelineSongDotArtist, 51132);
-
- // A valid "artist" will be accepted.
- assert.commandWorked(source.update({_id: 0}, {_id: 0, song: {artist: "Illenium"}}));
- assert.doesNotThrow(() => source.aggregate(pipelineSongDotArtist));
- assert.eq(target.find().toArray(), [{_id: 0, song: {artist: "Illenium"}}]);
-
- //
- // An error is raised if $merge encounters a document where one of the "on" fields (or a prefix
- // of an "on" field) is an array.
- //
- assert.commandWorked(target.remove({}));
- assert.commandWorked(target.createIndex({"address.street": 1}, {unique: 1}));
- const pipelineAddressDotStreet = [{
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: ["address.street"]
- }
- }];
-
- // "address.street" is an array.
- assert.commandWorked(
- source.update({_id: 0}, {_id: 0, address: {street: ["West 43rd St", "1633 Broadway"]}}));
- assertErrorCode(source, pipelineAddressDotStreet, 51185);
-
- // "address" is an array (a prefix of an "on" field).
- assert.commandWorked(source.update({_id: 0}, {_id: 0, address: [{street: "1633 Broadway"}]}));
- assertErrorCode(source, pipelineAddressDotStreet, 51132);
-
- // A scalar "address.street" is accepted.
- assert.commandWorked(source.update({_id: 0}, {_id: 0, address: {street: "1633 Broadway"}}));
- assert.doesNotThrow(() => source.aggregate(pipelineAddressDotStreet));
- assert.eq(target.find().toArray(), [{_id: 0, address: {street: "1633 Broadway"}}]);
+// A scalar "address.street" is accepted.
+assert.commandWorked(source.update({_id: 0}, {_id: 0, address: {street: "1633 Broadway"}}));
+assert.doesNotThrow(() => source.aggregate(pipelineAddressDotStreet));
+assert.eq(target.find().toArray(), [{_id: 0, address: {street: "1633 Broadway"}}]);
}());
diff --git a/jstests/aggregation/sources/merge/requires_unique_index.js b/jstests/aggregation/sources/merge/requires_unique_index.js
index a316d239321..38f8aa27f64 100644
--- a/jstests/aggregation/sources/merge/requires_unique_index.js
+++ b/jstests/aggregation/sources/merge/requires_unique_index.js
@@ -6,407 +6,365 @@
// manually. This is to avoid implicit creation and sharding of the $merge target collections in the
// passthrough suites.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode,
- // assertMergeFailsWithoutUniqueIndex.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode,
+ // assertMergeFailsWithoutUniqueIndex.
- const testDB = db.getSiblingDB("merge_requires_unique_index");
- assert.commandWorked(testDB.dropDatabase());
+const testDB = db.getSiblingDB("merge_requires_unique_index");
+assert.commandWorked(testDB.dropDatabase());
- const source = testDB.source;
- assert.commandWorked(source.insert([{_id: 0, a: 0}, {_id: 1, a: 1}]));
+const source = testDB.source;
+assert.commandWorked(source.insert([{_id: 0, a: 0}, {_id: 1, a: 1}]));
- // Helper to drop a collection without using the shell helper, and thus avoiding the implicit
- // recreation in the passthrough suites.
- function dropWithoutImplicitRecreate(coll) {
- testDB.runCommand({drop: coll.getName()});
+// Helper to drop a collection without using the shell helper, and thus avoiding the implicit
+// recreation in the passthrough suites.
+function dropWithoutImplicitRecreate(coll) {
+ testDB.runCommand({drop: coll.getName()});
+}
+
+// Test that using {_id: 1} or not providing a unique key does not require any special indexes.
+(function simpleIdOnFieldsOrDefaultShouldNotRequireIndexes() {
+ function assertDefaultOnFieldsSucceeds({setupCallback, collName}) {
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ // Skip the combination of merge modes which will fail depending on the contents of
+ // the source and target collection, as this will cause the assertion below to trip.
+ if (whenMatchedMode == "fail" || whenNotMatchedMode == "fail")
+ return;
+
+ setupCallback();
+ assert.doesNotThrow(() => source.aggregate([{
+ $merge: {
+ into: collName,
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]));
+ setupCallback();
+ assert.doesNotThrow(() => source.aggregate([{
+ $merge: {
+ into: collName,
+ on: "_id",
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]));
+ });
}
- // Test that using {_id: 1} or not providing a unique key does not require any special indexes.
- (function simpleIdOnFieldsOrDefaultShouldNotRequireIndexes() {
- function assertDefaultOnFieldsSucceeds({setupCallback, collName}) {
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- // Skip the combination of merge modes which will fail depending on the contents of
- // the source and target collection, as this will cause the assertion below to trip.
- if (whenMatchedMode == "fail" || whenNotMatchedMode == "fail")
- return;
-
- setupCallback();
- assert.doesNotThrow(() => source.aggregate([{
- $merge: {
- into: collName,
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]));
- setupCallback();
- assert.doesNotThrow(() => source.aggregate([{
- $merge: {
- into: collName,
- on: "_id",
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]));
- });
+ // Test that using "_id" or not specifying "on" fields works for a collection which does
+ // not exist.
+ const non_existent = testDB.non_existent;
+ assertDefaultOnFieldsSucceeds({
+ setupCallback: () => dropWithoutImplicitRecreate(non_existent),
+ collName: non_existent.getName()
+ });
+
+ const unindexed = testDB.unindexed;
+ assertDefaultOnFieldsSucceeds({
+ setupCallback: () => {
+ dropWithoutImplicitRecreate(unindexed);
+ assert.commandWorked(testDB.runCommand({create: unindexed.getName()}));
+ },
+ collName: unindexed.getName()
+ });
+}());
+
+// Test that a unique index on the "on" fields can be used to satisfy the requirement.
+(function basicUniqueIndexWorks() {
+ const target = testDB.regular_unique;
+ dropWithoutImplicitRecreate(target);
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["_id", "a"], target: target});
+
+ assert.commandWorked(testDB.runCommand({create: target.getName()}));
+ assert.commandWorked(target.createIndex({a: 1, _id: 1}, {unique: true}));
+ assert.doesNotThrow(() => source.aggregate([{
+ $merge: {
+ into: target.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: ["_id", "a"]
}
+ }]));
+ assert.doesNotThrow(() => source.aggregate([{
+ $merge: {
+ into: target.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: ["a", "_id"]
+ }
+ }]));
+
+ assertMergeFailsWithoutUniqueIndex(
+ {source: source, onFields: ["_id", "a", "b"], target: target});
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["a", "b"], target: target});
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["b"], target: target});
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["a"], target: target});
+
+ assert.commandWorked(target.dropIndex({a: 1, _id: 1}));
+ assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
+ assert.doesNotThrow(() => source.aggregate([{
+ $merge: {into: target.getName(), whenMatched: "replace", whenNotMatched: "insert", on: "a"}
+ }]));
+
+ // Create a non-unique index and make sure that doesn't work.
+ assert.commandWorked(target.dropIndex({a: 1}));
+ assert.commandWorked(target.createIndex({a: 1}));
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["_id", "a"], target: target});
+}());
- // Test that using "_id" or not specifying "on" fields works for a collection which does
- // not exist.
- const non_existent = testDB.non_existent;
- assertDefaultOnFieldsSucceeds({
- setupCallback: () => dropWithoutImplicitRecreate(non_existent),
- collName: non_existent.getName()
- });
+// Test that a unique index on the "on" fields cannot be used to satisfy the requirement if it
+// is a partial index.
+(function uniqueButPartialShouldNotWork() {
+ const target = testDB.unique_but_partial_indexes;
+ dropWithoutImplicitRecreate(target);
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
+
+ assert.commandWorked(
+ target.createIndex({a: 1}, {unique: true, partialFilterExpression: {a: {$gte: 2}}}));
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["_id", "a"], target: target});
+}());
- const unindexed = testDB.unindexed;
- assertDefaultOnFieldsSucceeds({
- setupCallback: () => {
- dropWithoutImplicitRecreate(unindexed);
- assert.commandWorked(testDB.runCommand({create: unindexed.getName()}));
- },
- collName: unindexed.getName()
- });
- }());
-
- // Test that a unique index on the "on" fields can be used to satisfy the requirement.
- (function basicUniqueIndexWorks() {
- const target = testDB.regular_unique;
- dropWithoutImplicitRecreate(target);
- assertMergeFailsWithoutUniqueIndex(
- {source: source, onFields: ["_id", "a"], target: target});
-
- assert.commandWorked(testDB.runCommand({create: target.getName()}));
- assert.commandWorked(target.createIndex({a: 1, _id: 1}, {unique: true}));
- assert.doesNotThrow(() => source.aggregate([{
+// Test that a unique index on the "on" fields cannot be used to satisfy the requirement if it
+// has a different collation.
+(function indexMustMatchCollationOfOperation() {
+ const target = testDB.collation_indexes;
+ dropWithoutImplicitRecreate(target);
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
+
+ assert.commandWorked(target.createIndex({a: 1}, {unique: true, collation: {locale: "en_US"}}));
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
+ assertMergeFailsWithoutUniqueIndex(
+ {source: source, onFields: "a", target: target, options: {collation: {locale: "en"}}});
+ assertMergeFailsWithoutUniqueIndex(
+ {source: source, onFields: "a", target: target, options: {collation: {locale: "simple"}}});
+ assertMergeFailsWithoutUniqueIndex({
+ source: source,
+ onFields: "a",
+ target: target,
+ options: {collation: {locale: "en_US", strength: 1}}
+ });
+ assert.doesNotThrow(() => source.aggregate([{
+ $merge: {
+ into: target.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: "a"
+ }
+ }],
+ {collation: {locale: "en_US"}}));
+
+ // Test that a non-unique index with the same collation cannot be used.
+ assert.commandWorked(target.dropIndex({a: 1}));
+ assert.commandWorked(target.createIndex({a: 1}, {collation: {locale: "en_US"}}));
+ assertMergeFailsWithoutUniqueIndex(
+ {source: source, onFields: "a", target: target, options: {collation: {locale: "en_US"}}});
+
+ // Test that a collection-default collation will be applied to the index, but not the
+ // $merge's update or insert into that collection. The pipeline will inherit a
+ // collection-default collation, but from the source collection, not the $merge's target
+ // collection.
+ dropWithoutImplicitRecreate(target);
+ assert.commandWorked(
+ testDB.runCommand({create: target.getName(), collation: {locale: "en_US"}}));
+ assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
+ assertMergeFailsWithoutUniqueIndex({
+ source: source,
+ onFields: "a",
+ target: target,
+ });
+ assert.doesNotThrow(() => source.aggregate([{
+ $merge: {
+ into: target.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: "a"
+ }
+ }],
+ {collation: {locale: "en_US"}}));
+
+ // Test that when the source collection and foreign collection have the same default
+ // collation, a unique index on the foreign collection can be used.
+ const newSourceColl = testDB.new_source;
+ dropWithoutImplicitRecreate(newSourceColl);
+ assert.commandWorked(
+ testDB.runCommand({create: newSourceColl.getName(), collation: {locale: "en_US"}}));
+ assert.commandWorked(newSourceColl.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
+ // This aggregate does not specify a collation, but it should inherit the default collation
+ // from 'newSourceColl', and therefore the index on 'target' should be eligible for use
+ // since it has the same collation.
+ assert.doesNotThrow(() => newSourceColl.aggregate([{
+ $merge: {into: target.getName(), whenMatched: "replace", whenNotMatched: "insert", on: "a"}
+ }]));
+
+ // Test that an explicit "simple" collation can be used with an index without a collation.
+ dropWithoutImplicitRecreate(target);
+ assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
+ assert.doesNotThrow(() => source.aggregate([{
+ $merge: {
+ into: target.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: "a"
+ }
+ }],
+ {collation: {locale: "simple"}}));
+ assertMergeFailsWithoutUniqueIndex(
+ {source: source, onFields: "a", target: target, options: {collation: {locale: "en_US"}}});
+}());
+
+// Test that a unique index which is not simply ascending/descending fields cannot be used for
+// the "on" fields.
+(function testSpecialIndexTypes() {
+ const target = testDB.special_index_types;
+ dropWithoutImplicitRecreate(target);
+
+ assert.commandWorked(target.createIndex({a: 1, text: "text"}, {unique: true}));
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["a", "text"], target: target});
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: "text", target: target});
+
+ dropWithoutImplicitRecreate(target);
+ assert.commandWorked(target.createIndex({a: 1, geo: "2dsphere"}, {unique: true}));
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["a", "geo"], target: target});
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["geo", "a"], target: target});
+
+ dropWithoutImplicitRecreate(target);
+ assert.commandWorked(target.createIndex({geo: "2d"}, {unique: true}));
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["a", "geo"], target: target});
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: "geo", target: target});
+
+ dropWithoutImplicitRecreate(target);
+ assert.commandWorked(
+ target.createIndex({geo: "geoHaystack", a: 1}, {unique: true, bucketSize: 5}));
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["a", "geo"], target: target});
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["geo", "a"], target: target});
+
+ dropWithoutImplicitRecreate(target);
+ // MongoDB does not support unique hashed indexes.
+ assert.commandFailedWithCode(target.createIndex({a: "hashed"}, {unique: true}), 16764);
+ assert.commandWorked(target.createIndex({a: "hashed"}));
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
+}());
+
+// Test that a unique index with dotted field names can be used.
+(function testDottedFieldNames() {
+ const target = testDB.dotted_field_paths;
+ dropWithoutImplicitRecreate(target);
+
+ assert.commandWorked(target.createIndex({a: 1, "b.c.d": -1}, {unique: true}));
+ assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
+ assert.doesNotThrow(() => source.aggregate([
+ {$project: {_id: 1, a: 1, b: {c: {d: "x"}}}},
+ {
$merge: {
into: target.getName(),
whenMatched: "replace",
whenNotMatched: "insert",
- on: ["_id", "a"]
+ on: ["a", "b.c.d"]
}
- }]));
- assert.doesNotThrow(() => source.aggregate([{
+ }
+ ]));
+
+ dropWithoutImplicitRecreate(target);
+ assert.commandWorked(target.createIndex({"id.x": 1, "id.y": -1}, {unique: true}));
+ assert.doesNotThrow(() => source.aggregate([
+ {$group: {_id: {x: "$_id", y: "$a"}}},
+ {$project: {id: "$_id"}},
+ {
$merge: {
into: target.getName(),
whenMatched: "replace",
whenNotMatched: "insert",
- on: ["a", "_id"]
+ on: ["id.x", "id.y"]
}
- }]));
-
- assertMergeFailsWithoutUniqueIndex(
- {source: source, onFields: ["_id", "a", "b"], target: target});
- assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["a", "b"], target: target});
- assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["b"], target: target});
- assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["a"], target: target});
-
- assert.commandWorked(target.dropIndex({a: 1, _id: 1}));
- assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
- assert.doesNotThrow(() => source.aggregate([{
+ }
+ ]));
+ assert.doesNotThrow(() => source.aggregate([
+ {$group: {_id: {x: "$_id", y: "$a"}}},
+ {$project: {id: "$_id"}},
+ {
$merge: {
into: target.getName(),
whenMatched: "replace",
whenNotMatched: "insert",
- on: "a"
+ on: ["id.y", "id.x"]
}
- }]));
-
- // Create a non-unique index and make sure that doesn't work.
- assert.commandWorked(target.dropIndex({a: 1}));
- assert.commandWorked(target.createIndex({a: 1}));
- assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
- assertMergeFailsWithoutUniqueIndex(
- {source: source, onFields: ["_id", "a"], target: target});
- }());
-
- // Test that a unique index on the "on" fields cannot be used to satisfy the requirement if it
- // is a partial index.
- (function uniqueButPartialShouldNotWork() {
- const target = testDB.unique_but_partial_indexes;
- dropWithoutImplicitRecreate(target);
- assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
-
- assert.commandWorked(
- target.createIndex({a: 1}, {unique: true, partialFilterExpression: {a: {$gte: 2}}}));
- assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
- assertMergeFailsWithoutUniqueIndex(
- {source: source, onFields: ["_id", "a"], target: target});
- }());
-
- // Test that a unique index on the "on" fields cannot be used to satisfy the requirement if it
- // has a different collation.
- (function indexMustMatchCollationOfOperation() {
- const target = testDB.collation_indexes;
- dropWithoutImplicitRecreate(target);
- assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
-
- assert.commandWorked(
- target.createIndex({a: 1}, {unique: true, collation: {locale: "en_US"}}));
- assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
- assertMergeFailsWithoutUniqueIndex(
- {source: source, onFields: "a", target: target, options: {collation: {locale: "en"}}});
- assertMergeFailsWithoutUniqueIndex({
- source: source,
- onFields: "a",
- target: target,
- options: {collation: {locale: "simple"}}
- });
- assertMergeFailsWithoutUniqueIndex({
- source: source,
- onFields: "a",
- target: target,
- options: {collation: {locale: "en_US", strength: 1}}
- });
- assert.doesNotThrow(() => source.aggregate([{
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: "a"
- }
- }],
- {collation: {locale: "en_US"}}));
-
- // Test that a non-unique index with the same collation cannot be used.
- assert.commandWorked(target.dropIndex({a: 1}));
- assert.commandWorked(target.createIndex({a: 1}, {collation: {locale: "en_US"}}));
- assertMergeFailsWithoutUniqueIndex({
- source: source,
- onFields: "a",
- target: target,
- options: {collation: {locale: "en_US"}}
- });
+ }
+ ]));
+
+ // Test that we cannot use arrays with a dotted path within a $merge.
+ dropWithoutImplicitRecreate(target);
+ assert.commandWorked(target.createIndex({"b.c": 1}, {unique: true}));
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ assert.commandFailedWithCode(testDB.runCommand({
+ aggregate: source.getName(),
+ pipeline: [
+ {$replaceRoot: {newRoot: {b: [{c: 1}, {c: 2}]}}},
+ {
+ $merge: {
+ into: target.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode,
+ on: "b.c"
+ }
+ }
+ ],
+ cursor: {}
+ }),
+ [50905, 51132]);
+ });
+}());
- // Test that a collection-default collation will be applied to the index, but not the
- // $merge's update or insert into that collection. The pipeline will inherit a
- // collection-default collation, but from the source collection, not the $merge's target
- // collection.
- dropWithoutImplicitRecreate(target);
- assert.commandWorked(
- testDB.runCommand({create: target.getName(), collation: {locale: "en_US"}}));
- assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
- assertMergeFailsWithoutUniqueIndex({
- source: source,
- onFields: "a",
- target: target,
- });
- assert.doesNotThrow(() => source.aggregate([{
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: "a"
- }
- }],
- {collation: {locale: "en_US"}}));
-
- // Test that when the source collection and foreign collection have the same default
- // collation, a unique index on the foreign collection can be used.
- const newSourceColl = testDB.new_source;
- dropWithoutImplicitRecreate(newSourceColl);
- assert.commandWorked(
- testDB.runCommand({create: newSourceColl.getName(), collation: {locale: "en_US"}}));
- assert.commandWorked(newSourceColl.insert([{_id: 1, a: 1}, {_id: 2, a: 2}]));
- // This aggregate does not specify a collation, but it should inherit the default collation
- // from 'newSourceColl', and therefore the index on 'target' should be eligible for use
- // since it has the same collation.
- assert.doesNotThrow(() => newSourceColl.aggregate([{
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: "a"
- }
- }]));
-
- // Test that an explicit "simple" collation can be used with an index without a collation.
- dropWithoutImplicitRecreate(target);
- assert.commandWorked(target.createIndex({a: 1}, {unique: true}));
- assert.doesNotThrow(() => source.aggregate([{
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: "a"
- }
- }],
- {collation: {locale: "simple"}}));
- assertMergeFailsWithoutUniqueIndex({
- source: source,
- onFields: "a",
- target: target,
- options: {collation: {locale: "en_US"}}
- });
- }());
-
- // Test that a unique index which is not simply ascending/descending fields cannot be used for
- // the "on" fields.
- (function testSpecialIndexTypes() {
- const target = testDB.special_index_types;
- dropWithoutImplicitRecreate(target);
-
- assert.commandWorked(target.createIndex({a: 1, text: "text"}, {unique: true}));
- assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
- assertMergeFailsWithoutUniqueIndex(
- {source: source, onFields: ["a", "text"], target: target});
- assertMergeFailsWithoutUniqueIndex({source: source, onFields: "text", target: target});
-
- dropWithoutImplicitRecreate(target);
- assert.commandWorked(target.createIndex({a: 1, geo: "2dsphere"}, {unique: true}));
- assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
- assertMergeFailsWithoutUniqueIndex(
- {source: source, onFields: ["a", "geo"], target: target});
- assertMergeFailsWithoutUniqueIndex(
- {source: source, onFields: ["geo", "a"], target: target});
-
- dropWithoutImplicitRecreate(target);
- assert.commandWorked(target.createIndex({geo: "2d"}, {unique: true}));
- assertMergeFailsWithoutUniqueIndex(
- {source: source, onFields: ["a", "geo"], target: target});
- assertMergeFailsWithoutUniqueIndex({source: source, onFields: "geo", target: target});
-
- dropWithoutImplicitRecreate(target);
- assert.commandWorked(
- target.createIndex({geo: "geoHaystack", a: 1}, {unique: true, bucketSize: 5}));
- assertMergeFailsWithoutUniqueIndex(
- {source: source, onFields: ["a", "geo"], target: target});
- assertMergeFailsWithoutUniqueIndex(
- {source: source, onFields: ["geo", "a"], target: target});
-
- dropWithoutImplicitRecreate(target);
- // MongoDB does not support unique hashed indexes.
- assert.commandFailedWithCode(target.createIndex({a: "hashed"}, {unique: true}), 16764);
- assert.commandWorked(target.createIndex({a: "hashed"}));
- assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
- }());
-
- // Test that a unique index with dotted field names can be used.
- (function testDottedFieldNames() {
- const target = testDB.dotted_field_paths;
- dropWithoutImplicitRecreate(target);
-
- assert.commandWorked(target.createIndex({a: 1, "b.c.d": -1}, {unique: true}));
- assertMergeFailsWithoutUniqueIndex({source: source, onFields: "a", target: target});
- assert.doesNotThrow(() => source.aggregate([
- {$project: {_id: 1, a: 1, b: {c: {d: "x"}}}},
- {
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: ["a", "b.c.d"]
- }
- }
- ]));
-
- dropWithoutImplicitRecreate(target);
- assert.commandWorked(target.createIndex({"id.x": 1, "id.y": -1}, {unique: true}));
- assert.doesNotThrow(() => source.aggregate([
- {$group: {_id: {x: "$_id", y: "$a"}}},
- {$project: {id: "$_id"}},
- {
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: ["id.x", "id.y"]
- }
- }
- ]));
- assert.doesNotThrow(() => source.aggregate([
- {$group: {_id: {x: "$_id", y: "$a"}}},
- {$project: {id: "$_id"}},
- {
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: ["id.y", "id.x"]
- }
- }
- ]));
+// Test that a unique index that is multikey can still be used.
+(function testMultikeyIndex() {
+ const target = testDB.multikey_index;
+ dropWithoutImplicitRecreate(target);
- // Test that we cannot use arrays with a dotted path within a $merge.
- dropWithoutImplicitRecreate(target);
- assert.commandWorked(target.createIndex({"b.c": 1}, {unique: true}));
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- assert.commandFailedWithCode(testDB.runCommand({
- aggregate: source.getName(),
- pipeline: [
- {$replaceRoot: {newRoot: {b: [{c: 1}, {c: 2}]}}},
- {
- $merge: {
- into: target.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode,
- on: "b.c"
- }
- }
- ],
- cursor: {}
- }),
- [50905, 51132]);
- });
- }());
-
- // Test that a unique index that is multikey can still be used.
- (function testMultikeyIndex() {
- const target = testDB.multikey_index;
- dropWithoutImplicitRecreate(target);
-
- assert.commandWorked(target.createIndex({"a.b": 1}, {unique: true}));
- assert.doesNotThrow(() => source.aggregate([
- {$project: {_id: 1, "a.b": "$a"}},
- {
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: "a.b"
- }
- }
- ]));
- assert.commandWorked(target.insert({_id: "TARGET", a: [{b: "hi"}, {b: "hello"}]}));
- assert.commandWorked(source.insert({a: "hi", proofOfUpdate: "PROOF"}));
- assert.doesNotThrow(() => source.aggregate([
- {$project: {_id: 0, proofOfUpdate: "PROOF", "a.b": "$a"}},
- {
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: "a.b"
- }
- }
- ]));
- assert.docEq(target.findOne({"a.b": "hi", proofOfUpdate: "PROOF"}),
- {_id: "TARGET", a: {b: "hi"}, proofOfUpdate: "PROOF"});
- }());
-
- // Test that a unique index that is sparse can still be used.
- (function testSparseIndex() {
- const target = testDB.multikey_index;
- dropWithoutImplicitRecreate(target);
-
- assert.commandWorked(target.createIndex({a: 1}, {unique: true, sparse: true}));
- assert.doesNotThrow(() => source.aggregate([{
+ assert.commandWorked(target.createIndex({"a.b": 1}, {unique: true}));
+ assert.doesNotThrow(() => source.aggregate([
+ {$project: {_id: 1, "a.b": "$a"}},
+ {
$merge: {
into: target.getName(),
whenMatched: "replace",
whenNotMatched: "insert",
- on: "a"
+ on: "a.b"
}
- }]));
- assert.commandWorked(target.insert([{b: 1, c: 1}, {a: null}, {d: 4}]));
- assert.doesNotThrow(() => source.aggregate([{
+ }
+ ]));
+ assert.commandWorked(target.insert({_id: "TARGET", a: [{b: "hi"}, {b: "hello"}]}));
+ assert.commandWorked(source.insert({a: "hi", proofOfUpdate: "PROOF"}));
+ assert.doesNotThrow(() => source.aggregate([
+ {$project: {_id: 0, proofOfUpdate: "PROOF", "a.b": "$a"}},
+ {
$merge: {
into: target.getName(),
whenMatched: "replace",
whenNotMatched: "insert",
- on: "a"
+ on: "a.b"
}
- }]));
- }());
+ }
+ ]));
+ assert.docEq(target.findOne({"a.b": "hi", proofOfUpdate: "PROOF"}),
+ {_id: "TARGET", a: {b: "hi"}, proofOfUpdate: "PROOF"});
+}());
+
+// Test that a unique index that is sparse can still be used.
+(function testSparseIndex() {
+ const target = testDB.multikey_index;
+ dropWithoutImplicitRecreate(target);
+
+ assert.commandWorked(target.createIndex({a: 1}, {unique: true, sparse: true}));
+ assert.doesNotThrow(() => source.aggregate([{
+ $merge: {into: target.getName(), whenMatched: "replace", whenNotMatched: "insert", on: "a"}
+ }]));
+ assert.commandWorked(target.insert([{b: 1, c: 1}, {a: null}, {d: 4}]));
+ assert.doesNotThrow(() => source.aggregate([{
+ $merge: {into: target.getName(), whenMatched: "replace", whenNotMatched: "insert", on: "a"}
+ }]));
+}());
}());
diff --git a/jstests/aggregation/sources/merge/use_cases.js b/jstests/aggregation/sources/merge/use_cases.js
index 5bce8006656..6c1c71b9419 100644
--- a/jstests/aggregation/sources/merge/use_cases.js
+++ b/jstests/aggregation/sources/merge/use_cases.js
@@ -5,112 +5,109 @@
* @tags: [requires_sharding]
*/
(function() {
- "use strict";
+"use strict";
- Random.setRandomSeed();
+Random.setRandomSeed();
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
- const mongosDB = st.s.getDB("use_cases");
+const mongosDB = st.s.getDB("use_cases");
- const metricsColl = mongosDB["metrics"];
- const rollupColl = mongosDB["rollup"];
+const metricsColl = mongosDB["metrics"];
+const rollupColl = mongosDB["rollup"];
- function incDateByMinutes(date, mins) {
- return new Date(date.getTime() + (60 * 1000 * mins));
- }
-
- // Inserts 'nSamples' worth of random data starting at 'date'.
- function insertRandomData(coll, date, nSamples) {
- let ticksSum = 0, tempSum = 0;
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nSamples; i++) {
- const randTick = Random.randInt(100);
- const randTemp = Random.randInt(100);
- ticksSum += randTick;
- tempSum += randTemp;
- bulk.insert({
- _id: incDateByMinutes(date, i * (60 / nSamples)),
- ticks: randTick,
- temp: randTemp
- });
- }
- assert.commandWorked(bulk.execute());
+function incDateByMinutes(date, mins) {
+ return new Date(date.getTime() + (60 * 1000 * mins));
+}
- return [ticksSum, tempSum];
+// Inserts 'nSamples' worth of random data starting at 'date'.
+function insertRandomData(coll, date, nSamples) {
+ let ticksSum = 0, tempSum = 0;
+ let bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < nSamples; i++) {
+ const randTick = Random.randInt(100);
+ const randTemp = Random.randInt(100);
+ ticksSum += randTick;
+ tempSum += randTemp;
+ bulk.insert(
+ {_id: incDateByMinutes(date, i * (60 / nSamples)), ticks: randTick, temp: randTemp});
}
-
- // Runs a $merge aggregate on the metrics collection to the rollup collection, grouping by hour,
- // summing the ticks, and averaging the temps.
- function runAggregate({startDate, whenMatchedMode, whenNotMatchedMode}) {
- metricsColl.aggregate([
- {$match: {_id: {$gte: startDate}}},
- {
- $group: {
- _id: {$dateToString: {format: "%Y-%m-%dT%H", date: "$_id"}},
- ticks: {$sum: "$ticks"},
- avgTemp: {$avg: "$temp"},
- }
- },
- {
- $merge: {
- into: {db: rollupColl.getDB().getName(), coll: rollupColl.getName()},
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
+ assert.commandWorked(bulk.execute());
+
+ return [ticksSum, tempSum];
+}
+
+// Runs a $merge aggregate on the metrics collection to the rollup collection, grouping by hour,
+// summing the ticks, and averaging the temps.
+function runAggregate({startDate, whenMatchedMode, whenNotMatchedMode}) {
+ metricsColl.aggregate([
+ {$match: {_id: {$gte: startDate}}},
+ {
+ $group: {
+ _id: {$dateToString: {format: "%Y-%m-%dT%H", date: "$_id"}},
+ ticks: {$sum: "$ticks"},
+ avgTemp: {$avg: "$temp"},
}
- ]);
- }
+ },
+ {
+ $merge: {
+ into: {db: rollupColl.getDB().getName(), coll: rollupColl.getName()},
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }
+ ]);
+}
- // Shard the metrics (source) collection on _id, which is the date of the sample.
- const hourZero = new ISODate("2018-08-15T00:00:00.000Z");
- const hourOne = incDateByMinutes(hourZero, 60);
- st.shardColl(metricsColl, {_id: 1}, {_id: hourOne}, {_id: hourOne}, mongosDB.getName());
+// Shard the metrics (source) collection on _id, which is the date of the sample.
+const hourZero = new ISODate("2018-08-15T00:00:00.000Z");
+const hourOne = incDateByMinutes(hourZero, 60);
+st.shardColl(metricsColl, {_id: 1}, {_id: hourOne}, {_id: hourOne}, mongosDB.getName());
- // Insert sample documents into the metrics collection.
- const samplesPerHour = 10;
- let [ticksSum, tempSum] = insertRandomData(metricsColl, hourZero, samplesPerHour);
+// Insert sample documents into the metrics collection.
+const samplesPerHour = 10;
+let [ticksSum, tempSum] = insertRandomData(metricsColl, hourZero, samplesPerHour);
- runAggregate({startDate: hourZero, whenMatchedMode: "fail", whenNotMatchedMode: "insert"});
+runAggregate({startDate: hourZero, whenMatchedMode: "fail", whenNotMatchedMode: "insert"});
- // Verify the results of the $merge in the rollup collection.
- let res = rollupColl.find().sort({_id: 1});
- assert.eq([{_id: "2018-08-15T00", ticks: ticksSum, avgTemp: tempSum / samplesPerHour}],
- res.toArray());
+// Verify the results of the $merge in the rollup collection.
+let res = rollupColl.find().sort({_id: 1});
+assert.eq([{_id: "2018-08-15T00", ticks: ticksSum, avgTemp: tempSum / samplesPerHour}],
+ res.toArray());
- // Insert another hour's worth of data, and verify that the $merge will append the result to the
- // output collection.
- [ticksSum, tempSum] = insertRandomData(metricsColl, hourOne, samplesPerHour);
+// Insert another hour's worth of data, and verify that the $merge will append the result to the
+// output collection.
+[ticksSum, tempSum] = insertRandomData(metricsColl, hourOne, samplesPerHour);
- runAggregate({startDate: hourOne, whenMatchedMode: "fail", whenNotMatchedMode: "insert"});
+runAggregate({startDate: hourOne, whenMatchedMode: "fail", whenNotMatchedMode: "insert"});
- res = rollupColl.find().sort({_id: 1}).toArray();
- assert.eq(2, res.length);
- assert.eq(res[1], {_id: "2018-08-15T01", ticks: ticksSum, avgTemp: tempSum / samplesPerHour});
+res = rollupColl.find().sort({_id: 1}).toArray();
+assert.eq(2, res.length);
+assert.eq(res[1], {_id: "2018-08-15T01", ticks: ticksSum, avgTemp: tempSum / samplesPerHour});
- // Whoops, there was a mistake in the last hour of data. Let's re-run the aggregation and update
- // the rollup collection using the "replace".
- assert.commandWorked(metricsColl.update({_id: hourOne}, {$inc: {ticks: 10}}));
- ticksSum += 10;
+// Whoops, there was a mistake in the last hour of data. Let's re-run the aggregation and update
+// the rollup collection using the "replace".
+assert.commandWorked(metricsColl.update({_id: hourOne}, {$inc: {ticks: 10}}));
+ticksSum += 10;
- runAggregate({startDate: hourOne, whenMatchedMode: "replace", whenNotMatchedMode: "insert"});
+runAggregate({startDate: hourOne, whenMatchedMode: "replace", whenNotMatchedMode: "insert"});
- res = rollupColl.find().sort({_id: 1}).toArray();
- assert.eq(2, res.length);
- assert.eq(res[1], {_id: "2018-08-15T01", ticks: ticksSum, avgTemp: tempSum / samplesPerHour});
+res = rollupColl.find().sort({_id: 1}).toArray();
+assert.eq(2, res.length);
+assert.eq(res[1], {_id: "2018-08-15T01", ticks: ticksSum, avgTemp: tempSum / samplesPerHour});
- // Shard the output collection into 2 chunks, and make the split hour 6.
- const hourSix = incDateByMinutes(hourZero, 60 * 6);
- st.shardColl(rollupColl, {_id: 1}, {_id: hourSix}, {_id: hourSix}, mongosDB.getName());
+// Shard the output collection into 2 chunks, and make the split hour 6.
+const hourSix = incDateByMinutes(hourZero, 60 * 6);
+st.shardColl(rollupColl, {_id: 1}, {_id: hourSix}, {_id: hourSix}, mongosDB.getName());
- // Insert hour 7 data into the metrics collection and re-run the aggregation.
- [ticksSum, tempSum] = insertRandomData(metricsColl, hourSix, samplesPerHour);
+// Insert hour 7 data into the metrics collection and re-run the aggregation.
+[ticksSum, tempSum] = insertRandomData(metricsColl, hourSix, samplesPerHour);
- runAggregate({startDate: hourSix, whenMatchedMode: "fail", whenNotMatchedMode: "insert"});
+runAggregate({startDate: hourSix, whenMatchedMode: "fail", whenNotMatchedMode: "insert"});
- res = rollupColl.find().sort({_id: 1}).toArray();
- assert.eq(3, res.length, tojson(res));
- assert.eq(res[2], {_id: "2018-08-15T06", ticks: ticksSum, avgTemp: tempSum / samplesPerHour});
+res = rollupColl.find().sort({_id: 1}).toArray();
+assert.eq(3, res.length, tojson(res));
+assert.eq(res[2], {_id: "2018-08-15T06", ticks: ticksSum, avgTemp: tempSum / samplesPerHour});
- st.stop();
+st.stop();
}());
diff --git a/jstests/aggregation/sources/out/out_in_lookup_not_allowed.js b/jstests/aggregation/sources/out/out_in_lookup_not_allowed.js
index d81eaaaab83..9e97363233c 100644
--- a/jstests/aggregation/sources/out/out_in_lookup_not_allowed.js
+++ b/jstests/aggregation/sources/out/out_in_lookup_not_allowed.js
@@ -1,28 +1,28 @@
// Tests that $out cannot be used within a $lookup pipeline.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection.
- load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
- load("jstests/libs/discover_topology.js"); // For findNonConfigNodes.
- load("jstests/libs/fixture_helpers.js"); // For isSharded.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection.
+load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
+load("jstests/libs/discover_topology.js"); // For findNonConfigNodes.
+load("jstests/libs/fixture_helpers.js"); // For isSharded.
- const ERROR_CODE_OUT_BANNED_IN_LOOKUP = 51047;
- const ERROR_CODE_OUT_LAST_STAGE_ONLY = 40601;
- const coll = db.out_in_lookup_not_allowed;
- coll.drop();
+const ERROR_CODE_OUT_BANNED_IN_LOOKUP = 51047;
+const ERROR_CODE_OUT_LAST_STAGE_ONLY = 40601;
+const coll = db.out_in_lookup_not_allowed;
+coll.drop();
- const from = db.out_in_lookup_not_allowed_from;
- from.drop();
+const from = db.out_in_lookup_not_allowed_from;
+from.drop();
- if (FixtureHelpers.isSharded(from)) {
- setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
- "internalQueryAllowShardedLookup",
- true);
- }
+if (FixtureHelpers.isSharded(from)) {
+ setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
+ "internalQueryAllowShardedLookup",
+ true);
+}
- let pipeline = [
+let pipeline = [
{
$lookup: {
pipeline: [{$out: "out_collection"}],
@@ -31,9 +31,9 @@
}
},
];
- assertErrorCode(coll, pipeline, ERROR_CODE_OUT_BANNED_IN_LOOKUP);
+assertErrorCode(coll, pipeline, ERROR_CODE_OUT_BANNED_IN_LOOKUP);
- pipeline = [
+pipeline = [
{
$lookup: {
pipeline: [{$project: {x: 0}}, {$out: "out_collection"}],
@@ -43,9 +43,9 @@
},
];
- assertErrorCode(coll, pipeline, ERROR_CODE_OUT_BANNED_IN_LOOKUP);
+assertErrorCode(coll, pipeline, ERROR_CODE_OUT_BANNED_IN_LOOKUP);
- pipeline = [
+pipeline = [
{
$lookup: {
pipeline: [{$out: "out_collection"}, {$match: {x: true}}],
@@ -55,14 +55,14 @@
},
];
- // Pipeline will fail because $out is not last in the subpipeline.
- // Validation for $out in a $lookup's subpipeline occurs at a later point.
- assertErrorCode(coll, pipeline, ERROR_CODE_OUT_LAST_STAGE_ONLY);
+// Pipeline will fail because $out is not last in the subpipeline.
+// Validation for $out in a $lookup's subpipeline occurs at a later point.
+assertErrorCode(coll, pipeline, ERROR_CODE_OUT_LAST_STAGE_ONLY);
- // Create view which contains $out within $lookup.
- assertDropCollection(coll.getDB(), "view1");
+// Create view which contains $out within $lookup.
+assertDropCollection(coll.getDB(), "view1");
- pipeline = [
+pipeline = [
{
$lookup: {
pipeline: [{$out: "out_collection"}],
@@ -72,9 +72,9 @@
},
];
- // Pipeline will fail because $out is not allowed to exist within a $lookup.
- // Validation for $out in a view occurs at a later point.
- const cmdRes =
- coll.getDB().runCommand({create: "view1", viewOn: coll.getName(), pipeline: pipeline});
- assert.commandFailedWithCode(cmdRes, ERROR_CODE_OUT_BANNED_IN_LOOKUP);
+// Pipeline will fail because $out is not allowed to exist within a $lookup.
+// Validation for $out in a view occurs at a later point.
+const cmdRes =
+ coll.getDB().runCommand({create: "view1", viewOn: coll.getName(), pipeline: pipeline});
+assert.commandFailedWithCode(cmdRes, ERROR_CODE_OUT_BANNED_IN_LOOKUP);
}());
diff --git a/jstests/aggregation/sources/out/replace_collection.js b/jstests/aggregation/sources/out/replace_collection.js
index 63204485a7c..b614e5bc486 100644
--- a/jstests/aggregation/sources/out/replace_collection.js
+++ b/jstests/aggregation/sources/out/replace_collection.js
@@ -6,69 +6,69 @@
* @tags: [assumes_unsharded_collection]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
- const coll = db.source;
- coll.drop();
+const coll = db.source;
+coll.drop();
- const targetColl = db.target;
- targetColl.drop();
+const targetColl = db.target;
+targetColl.drop();
- const pipeline = [{$out: targetColl.getName()}];
+const pipeline = [{$out: targetColl.getName()}];
- //
- // Test $out with a non-existent output collection.
- //
- assert.commandWorked(coll.insert({_id: 0}));
- coll.aggregate(pipeline);
- assert.eq(1, targetColl.find().itcount());
+//
+// Test $out with a non-existent output collection.
+//
+assert.commandWorked(coll.insert({_id: 0}));
+coll.aggregate(pipeline);
+assert.eq(1, targetColl.find().itcount());
- //
- // Test $out with an existing output collection.
- //
- coll.aggregate(pipeline);
- assert.eq(1, targetColl.find().itcount());
+//
+// Test $out with an existing output collection.
+//
+coll.aggregate(pipeline);
+assert.eq(1, targetColl.find().itcount());
- //
- // Test that $out will preserve the indexes and options of the output collection.
- //
- targetColl.drop();
- assert.commandWorked(db.createCollection(targetColl.getName(), {validator: {a: {$gt: 0}}}));
- assert.commandWorked(targetColl.createIndex({a: 1}));
+//
+// Test that $out will preserve the indexes and options of the output collection.
+//
+targetColl.drop();
+assert.commandWorked(db.createCollection(targetColl.getName(), {validator: {a: {$gt: 0}}}));
+assert.commandWorked(targetColl.createIndex({a: 1}));
- coll.drop();
- assert.commandWorked(coll.insert({a: 1}));
+coll.drop();
+assert.commandWorked(coll.insert({a: 1}));
- coll.aggregate(pipeline);
- assert.eq(1, targetColl.find().itcount());
- assert.eq(2, targetColl.getIndexes().length);
+coll.aggregate(pipeline);
+assert.eq(1, targetColl.find().itcount());
+assert.eq(2, targetColl.getIndexes().length);
- const listColl = db.runCommand({listCollections: 1, filter: {name: targetColl.getName()}});
- assert.commandWorked(listColl);
- assert.eq({a: {$gt: 0}}, listColl.cursor.firstBatch[0].options["validator"]);
+const listColl = db.runCommand({listCollections: 1, filter: {name: targetColl.getName()}});
+assert.commandWorked(listColl);
+assert.eq({a: {$gt: 0}}, listColl.cursor.firstBatch[0].options["validator"]);
- //
- // Test that $out fails if it violates a unique index constraint.
- //
- coll.drop();
- assert.commandWorked(coll.insert([{_id: 0, a: 0}, {_id: 1, a: 0}]));
- targetColl.drop();
- assert.commandWorked(targetColl.createIndex({a: 1}, {unique: true}));
+//
+// Test that $out fails if it violates a unique index constraint.
+//
+coll.drop();
+assert.commandWorked(coll.insert([{_id: 0, a: 0}, {_id: 1, a: 0}]));
+targetColl.drop();
+assert.commandWorked(targetColl.createIndex({a: 1}, {unique: true}));
- assertErrorCode(coll, pipeline, ErrorCodes.DuplicateKey);
+assertErrorCode(coll, pipeline, ErrorCodes.DuplicateKey);
- // Rerun a similar test, except populate the target collection with a document that conflics
- // with one out of the pipeline. In this case, there is no unique key violation since the target
- // collection will be dropped before renaming the source collection.
- coll.drop();
- assert.commandWorked(coll.insert({_id: 0, a: 0}));
- targetColl.remove({});
- assert.commandWorked(targetColl.insert({_id: 1, a: 0}));
+// Rerun a similar test, except populate the target collection with a document that conflics
+// with one out of the pipeline. In this case, there is no unique key violation since the target
+// collection will be dropped before renaming the source collection.
+coll.drop();
+assert.commandWorked(coll.insert({_id: 0, a: 0}));
+targetColl.remove({});
+assert.commandWorked(targetColl.insert({_id: 1, a: 0}));
- coll.aggregate(pipeline);
- assert.eq(1, targetColl.find().itcount());
- assert.eq(2, targetColl.getIndexes().length);
+coll.aggregate(pipeline);
+assert.eq(1, targetColl.find().itcount());
+assert.eq(2, targetColl.getIndexes().length);
}());
diff --git a/jstests/aggregation/sources/out/required_last_position.js b/jstests/aggregation/sources/out/required_last_position.js
index e3a861aaf20..97ba01e7b04 100644
--- a/jstests/aggregation/sources/out/required_last_position.js
+++ b/jstests/aggregation/sources/out/required_last_position.js
@@ -1,17 +1,16 @@
// Tests that $out can only be used as the last stage.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- const coll = db.require_out_last;
- coll.drop();
+const coll = db.require_out_last;
+coll.drop();
- // Test that $out is allowed as the last (and only) stage.
- assert.doesNotThrow(() => coll.aggregate([{$out: "out_collection"}]));
+// Test that $out is allowed as the last (and only) stage.
+assert.doesNotThrow(() => coll.aggregate([{$out: "out_collection"}]));
- // Test that $out is not allowed to have a stage after it.
- assertErrorCode(coll, [{$out: "out_collection"}, {$match: {x: true}}], 40601);
- assertErrorCode(
- coll, [{$project: {x: 0}}, {$out: "out_collection"}, {$match: {x: true}}], 40601);
+// Test that $out is not allowed to have a stage after it.
+assertErrorCode(coll, [{$out: "out_collection"}, {$match: {x: true}}], 40601);
+assertErrorCode(coll, [{$project: {x: 0}}, {$out: "out_collection"}, {$match: {x: true}}], 40601);
}());
diff --git a/jstests/aggregation/sources/project/remove_redundant_projects.js b/jstests/aggregation/sources/project/remove_redundant_projects.js
index e3c7af08573..f1a21264c7e 100644
--- a/jstests/aggregation/sources/project/remove_redundant_projects.js
+++ b/jstests/aggregation/sources/project/remove_redundant_projects.js
@@ -2,150 +2,151 @@
// pipeline that can be covered by a normal query.
// @tags: [do_not_wrap_aggregations_in_facets]
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For orderedArrayEq.
- load('jstests/libs/analyze_plan.js'); // For planHasStage().
-
- let coll = db.remove_redundant_projects;
- coll.drop();
-
- assert.writeOK(coll.insert({_id: {a: 1, b: 1}, a: 1, c: {d: 1}, e: ['elem1']}));
-
- let indexSpec = {a: 1, 'c.d': 1, 'e.0': 1};
-
- /**
- * Helper to test that for a given pipeline, the same results are returned whether or not an
- * index is present. Also tests whether a projection is absorbed by the pipeline
- * ('expectProjectToCoalesce') and the corresponding project stage ('removedProjectStage') does
- * not exist in the explain output.
- */
- function assertResultsMatch({pipeline = [],
- expectProjectToCoalesce = false,
- removedProjectStage = null,
- index = indexSpec,
- pipelineOptimizedAway = false} = {}) {
- // Add a match stage to ensure index scans are considered for planning (workaround for
- // SERVER-20066).
- pipeline = [{$match: {a: {$gte: 0}}}].concat(pipeline);
-
- // Once with an index.
- assert.commandWorked(coll.createIndex(index));
- let explain = coll.explain().aggregate(pipeline);
- let resultsWithIndex = coll.aggregate(pipeline).toArray();
-
- // Projection does not get pushed down when sharding filter is used.
- if (!explain.hasOwnProperty("shards")) {
- let result;
-
- if (pipelineOptimizedAway) {
- assert(isQueryPlan(explain));
- result = explain.queryPlanner.winningPlan;
- } else {
- assert(isAggregationPlan(explain));
- result = explain.stages[0].$cursor.queryPlanner.winningPlan;
- }
-
- // Check that $project uses the query system.
- assert.eq(expectProjectToCoalesce,
- planHasStage(db, result, "PROJECTION_DEFAULT") ||
- planHasStage(db, result, "PROJECTION_COVERED") ||
- planHasStage(db, result, "PROJECTION_SIMPLE"));
-
- if (!pipelineOptimizedAway) {
- // Check that $project was removed from pipeline and pushed to the query system.
- explain.stages.forEach(function(stage) {
- if (stage.hasOwnProperty("$project"))
- assert.neq(removedProjectStage, stage["$project"]);
- });
- }
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For orderedArrayEq.
+load('jstests/libs/analyze_plan.js'); // For planHasStage().
+
+let coll = db.remove_redundant_projects;
+coll.drop();
+
+assert.writeOK(coll.insert({_id: {a: 1, b: 1}, a: 1, c: {d: 1}, e: ['elem1']}));
+
+let indexSpec = {a: 1, 'c.d': 1, 'e.0': 1};
+
+/**
+ * Helper to test that for a given pipeline, the same results are returned whether or not an
+ * index is present. Also tests whether a projection is absorbed by the pipeline
+ * ('expectProjectToCoalesce') and the corresponding project stage ('removedProjectStage') does
+ * not exist in the explain output.
+ */
+function assertResultsMatch({
+ pipeline = [],
+ expectProjectToCoalesce = false,
+ removedProjectStage = null,
+ index = indexSpec,
+ pipelineOptimizedAway = false
+} = {}) {
+ // Add a match stage to ensure index scans are considered for planning (workaround for
+ // SERVER-20066).
+ pipeline = [{$match: {a: {$gte: 0}}}].concat(pipeline);
+
+ // Once with an index.
+ assert.commandWorked(coll.createIndex(index));
+ let explain = coll.explain().aggregate(pipeline);
+ let resultsWithIndex = coll.aggregate(pipeline).toArray();
+
+ // Projection does not get pushed down when sharding filter is used.
+ if (!explain.hasOwnProperty("shards")) {
+ let result;
+
+ if (pipelineOptimizedAway) {
+ assert(isQueryPlan(explain));
+ result = explain.queryPlanner.winningPlan;
+ } else {
+ assert(isAggregationPlan(explain));
+ result = explain.stages[0].$cursor.queryPlanner.winningPlan;
}
- // Again without an index.
- assert.commandWorked(coll.dropIndex(index));
- let resultsWithoutIndex = coll.aggregate(pipeline).toArray();
-
- assert(orderedArrayEq(resultsWithIndex, resultsWithoutIndex));
+ // Check that $project uses the query system.
+ assert.eq(expectProjectToCoalesce,
+ planHasStage(db, result, "PROJECTION_DEFAULT") ||
+ planHasStage(db, result, "PROJECTION_COVERED") ||
+ planHasStage(db, result, "PROJECTION_SIMPLE"));
+
+ if (!pipelineOptimizedAway) {
+ // Check that $project was removed from pipeline and pushed to the query system.
+ explain.stages.forEach(function(stage) {
+ if (stage.hasOwnProperty("$project"))
+ assert.neq(removedProjectStage, stage["$project"]);
+ });
+ }
}
- // Test that covered projections correctly use the query system for projection and the $project
- // stage is removed from the pipeline.
- assertResultsMatch({
- pipeline: [{$project: {_id: 0, a: 1}}],
- expectProjectToCoalesce: true,
- removedProjectStage: {_id: 0, a: 1},
- pipelineOptimizedAway: true
- });
- assertResultsMatch({
- pipeline: [{$project: {_id: 0, a: 1}}, {$group: {_id: null, a: {$sum: "$a"}}}],
- expectProjectToCoalesce: true,
- removedProjectStage: {_id: 0, a: 1}
- });
- assertResultsMatch({
- pipeline: [{$sort: {a: -1}}, {$project: {_id: 0, a: 1}}],
- expectProjectToCoalesce: true,
- removedProjectStage: {_id: 0, a: 1},
- pipelineOptimizedAway: true
- });
- assertResultsMatch({
- pipeline: [
- {$sort: {a: 1, 'c.d': 1}},
- {$project: {_id: 0, a: 1}},
- {$group: {_id: "$a", arr: {$push: "$a"}}}
- ],
- expectProjectToCoalesce: true,
- removedProjectStage: {_id: 0, a: 1}
- });
- assertResultsMatch({
- pipeline: [{$project: {_id: 0, c: {d: 1}}}],
- expectProjectToCoalesce: true,
- removedProjectStage: {_id: 0, c: {d: 1}},
- pipelineOptimizedAway: true
- });
-
- // Test that projections with renamed fields are not removed from the pipeline, however an
- // inclusion projection is still pushed to the query system.
- assertResultsMatch({pipeline: [{$project: {_id: 0, f: "$a"}}], expectProjectToCoalesce: true});
- assertResultsMatch(
- {pipeline: [{$project: {_id: 0, a: 1, f: "$a"}}], expectProjectToCoalesce: true});
-
- // Test that uncovered projections include the $project stage in the pipeline.
- assertResultsMatch(
- {pipeline: [{$sort: {a: 1}}, {$project: {_id: 1, b: 1}}], expectProjectToCoalesce: false});
- assertResultsMatch({
- pipeline:
- [{$sort: {a: 1}}, {$group: {_id: "$_id", arr: {$push: "$a"}}}, {$project: {arr: 1}}],
- expectProjectToCoalesce: false
- });
-
- // Test that projections with computed fields are kept in the pipeline.
- assertResultsMatch(
- {pipeline: [{$project: {computedField: {$sum: "$a"}}}], expectProjectToCoalesce: false});
- assertResultsMatch({pipeline: [{$project: {a: ["$a", "$b"]}}], expectProjectToCoalesce: false});
- assertResultsMatch({
- pipeline: [{
- $project:
- {e: {$filter: {input: "$e", as: "item", cond: {"$eq": ["$$item", "elem0"]}}}}
- }],
- expectProjectToCoalesce: false
- });
-
- // Test that only the first projection is removed from the pipeline.
- assertResultsMatch({
- pipeline: [
- {$project: {_id: 0, a: 1}},
- {$group: {_id: "$a", arr: {$push: "$a"}, a: {$sum: "$a"}}},
- {$project: {_id: 0}}
- ],
- expectProjectToCoalesce: true,
- removedProjectStage: {_id: 0, a: 1}
- });
-
- // Test that projections on _id with nested fields are not removed from pipeline. Due to
- // SERVER-7502, the dependency analysis does not generate a covered projection for nested
- // fields in _id and thus we cannot remove the stage.
- indexSpec = {'_id.a': 1, a: 1};
- assertResultsMatch(
- {pipeline: [{$project: {'_id.a': 1}}], expectProjectToCoalesce: false, index: indexSpec});
-
+ // Again without an index.
+ assert.commandWorked(coll.dropIndex(index));
+ let resultsWithoutIndex = coll.aggregate(pipeline).toArray();
+
+ assert(orderedArrayEq(resultsWithIndex, resultsWithoutIndex));
+}
+
+// Test that covered projections correctly use the query system for projection and the $project
+// stage is removed from the pipeline.
+assertResultsMatch({
+ pipeline: [{$project: {_id: 0, a: 1}}],
+ expectProjectToCoalesce: true,
+ removedProjectStage: {_id: 0, a: 1},
+ pipelineOptimizedAway: true
+});
+assertResultsMatch({
+ pipeline: [{$project: {_id: 0, a: 1}}, {$group: {_id: null, a: {$sum: "$a"}}}],
+ expectProjectToCoalesce: true,
+ removedProjectStage: {_id: 0, a: 1}
+});
+assertResultsMatch({
+ pipeline: [{$sort: {a: -1}}, {$project: {_id: 0, a: 1}}],
+ expectProjectToCoalesce: true,
+ removedProjectStage: {_id: 0, a: 1},
+ pipelineOptimizedAway: true
+});
+assertResultsMatch({
+ pipeline: [
+ {$sort: {a: 1, 'c.d': 1}},
+ {$project: {_id: 0, a: 1}},
+ {$group: {_id: "$a", arr: {$push: "$a"}}}
+ ],
+ expectProjectToCoalesce: true,
+ removedProjectStage: {_id: 0, a: 1}
+});
+assertResultsMatch({
+ pipeline: [{$project: {_id: 0, c: {d: 1}}}],
+ expectProjectToCoalesce: true,
+ removedProjectStage: {_id: 0, c: {d: 1}},
+ pipelineOptimizedAway: true
+});
+
+// Test that projections with renamed fields are not removed from the pipeline, however an
+// inclusion projection is still pushed to the query system.
+assertResultsMatch({pipeline: [{$project: {_id: 0, f: "$a"}}], expectProjectToCoalesce: true});
+assertResultsMatch(
+ {pipeline: [{$project: {_id: 0, a: 1, f: "$a"}}], expectProjectToCoalesce: true});
+
+// Test that uncovered projections include the $project stage in the pipeline.
+assertResultsMatch(
+ {pipeline: [{$sort: {a: 1}}, {$project: {_id: 1, b: 1}}], expectProjectToCoalesce: false});
+assertResultsMatch({
+ pipeline: [{$sort: {a: 1}}, {$group: {_id: "$_id", arr: {$push: "$a"}}}, {$project: {arr: 1}}],
+ expectProjectToCoalesce: false
+});
+
+// Test that projections with computed fields are kept in the pipeline.
+assertResultsMatch(
+ {pipeline: [{$project: {computedField: {$sum: "$a"}}}], expectProjectToCoalesce: false});
+assertResultsMatch({pipeline: [{$project: {a: ["$a", "$b"]}}], expectProjectToCoalesce: false});
+assertResultsMatch({
+ pipeline:
+ [{$project: {e: {$filter: {input: "$e", as: "item", cond: {"$eq": ["$$item", "elem0"]}}}}}],
+ expectProjectToCoalesce: false
+});
+
+// Test that only the first projection is removed from the pipeline.
+assertResultsMatch({
+ pipeline: [
+ {$project: {_id: 0, a: 1}},
+ {$group: {_id: "$a", arr: {$push: "$a"}, a: {$sum: "$a"}}},
+ {$project: {_id: 0}}
+ ],
+ expectProjectToCoalesce: true,
+ removedProjectStage: {_id: 0, a: 1}
+});
+
+// Test that projections on _id with nested fields are not removed from pipeline. Due to
+// SERVER-7502, the dependency analysis does not generate a covered projection for nested
+// fields in _id and thus we cannot remove the stage.
+indexSpec = {
+ '_id.a': 1,
+ a: 1
+};
+assertResultsMatch(
+ {pipeline: [{$project: {'_id.a': 1}}], expectProjectToCoalesce: false, index: indexSpec});
}());
diff --git a/jstests/aggregation/sources/redact/collation_redact.js b/jstests/aggregation/sources/redact/collation_redact.js
index 36304e9a7f2..7ff1e1ad4f1 100644
--- a/jstests/aggregation/sources/redact/collation_redact.js
+++ b/jstests/aggregation/sources/redact/collation_redact.js
@@ -3,38 +3,37 @@
// Test that the $redact stage respects the collation.
(function() {
- "use strict";
-
- var caseInsensitive = {collation: {locale: "en_US", strength: 2}};
-
- var coll = db.collation_redact;
- coll.drop();
- assert.writeOK(coll.insert({a: "a"}));
-
- // Test that $redact respects an explicit collation. Since the top-level of the document gets
- // pruned, we end up redacting the entire document and returning no results.
- assert.eq(0,
- coll.aggregate([{$redact: {$cond: [{$eq: ["A", "a"]}, "$$PRUNE", "$$KEEP"]}}],
- caseInsensitive)
- .itcount());
-
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
- assert.writeOK(coll.insert({a: "a"}));
-
- // Test that $redact respects the inherited collation. Since the top-level of the document gets
- // pruned, we end up redacting the entire document and returning no results.
- assert.eq(
- 0,
- coll.aggregate([{$redact: {$cond: [{$eq: ["A", "a"]}, "$$PRUNE", "$$KEEP"]}}]).itcount());
-
- // Test that a $match which can be optimized to be pushed before the $redact respects the
- // collation.
- assert.eq(1, coll.aggregate([{$redact: "$$KEEP"}, {$match: {a: "A"}}]).itcount());
-
- // Comparison to the internal constants bound to the $$KEEP, $$PRUNE, and $$DESCEND variable
- // should not respect the collation.
- assert.throws(() => coll.aggregate([{$redact: "KEEP"}], caseInsensitive));
- assert.throws(() => coll.aggregate([{$redact: "PRUNE"}], caseInsensitive));
- assert.throws(() => coll.aggregate([{$redact: "REDACT"}], caseInsensitive));
+"use strict";
+
+var caseInsensitive = {collation: {locale: "en_US", strength: 2}};
+
+var coll = db.collation_redact;
+coll.drop();
+assert.writeOK(coll.insert({a: "a"}));
+
+// Test that $redact respects an explicit collation. Since the top-level of the document gets
+// pruned, we end up redacting the entire document and returning no results.
+assert.eq(
+ 0,
+ coll.aggregate([{$redact: {$cond: [{$eq: ["A", "a"]}, "$$PRUNE", "$$KEEP"]}}], caseInsensitive)
+ .itcount());
+
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
+assert.writeOK(coll.insert({a: "a"}));
+
+// Test that $redact respects the inherited collation. Since the top-level of the document gets
+// pruned, we end up redacting the entire document and returning no results.
+assert.eq(0,
+ coll.aggregate([{$redact: {$cond: [{$eq: ["A", "a"]}, "$$PRUNE", "$$KEEP"]}}]).itcount());
+
+// Test that a $match which can be optimized to be pushed before the $redact respects the
+// collation.
+assert.eq(1, coll.aggregate([{$redact: "$$KEEP"}, {$match: {a: "A"}}]).itcount());
+
+// Comparison to the internal constants bound to the $$KEEP, $$PRUNE, and $$DESCEND variable
+// should not respect the collation.
+assert.throws(() => coll.aggregate([{$redact: "KEEP"}], caseInsensitive));
+assert.throws(() => coll.aggregate([{$redact: "PRUNE"}], caseInsensitive));
+assert.throws(() => coll.aggregate([{$redact: "REDACT"}], caseInsensitive));
})();
diff --git a/jstests/aggregation/sources/replaceRoot/address.js b/jstests/aggregation/sources/replaceRoot/address.js
index 32ac3df2626..537ec7d50ac 100644
--- a/jstests/aggregation/sources/replaceRoot/address.js
+++ b/jstests/aggregation/sources/replaceRoot/address.js
@@ -7,99 +7,98 @@
*/
(function() {
- "use strict";
+"use strict";
- // For arrayEq.
- load("jstests/aggregation/extras/utils.js");
+// For arrayEq.
+load("jstests/aggregation/extras/utils.js");
- const dbName = "test";
- const collName = jsTest.name();
+const dbName = "test";
+const collName = jsTest.name();
- Random.setRandomSeed();
+Random.setRandomSeed();
- /**
- * Helper to get a random entry out of an array.
- */
- function randomChoice(array) {
- return array[Random.randInt(array.length)];
- }
-
- /**
- * Helper to generate a randomized document with the following schema:
- * {
- * name: <string>,
- * address: {number: <3-digit int>, street: <string>, city: <string>, zip: <5-digit int>}
- * }
- */
- function generateRandomDocument() {
- let names = ["Asya", "Charlie", "Dan", "Geert", "Kyle"];
- const minNumber = 1;
- const maxNumber = 999;
- let streets = ["3rd", "4th", "5th", "6th", "7th", "8th", "9th"];
- let cities = ["New York", "Palo Alto", "Sydney", "Dublin"];
- const minZip = 10000;
- const maxZip = 99999;
+/**
+ * Helper to get a random entry out of an array.
+ */
+function randomChoice(array) {
+ return array[Random.randInt(array.length)];
+}
- return {
- names: randomChoice(names),
- address: {
- number: Random.randInt(maxNumber - minNumber + 1) + minNumber,
- street: randomChoice(streets),
- city: randomChoice(cities),
- zip: Random.randInt(maxZip - minZip + 1) + minZip,
- },
- };
- }
+/**
+ * Helper to generate a randomized document with the following schema:
+ * {
+ * name: <string>,
+ * address: {number: <3-digit int>, street: <string>, city: <string>, zip: <5-digit int>}
+ * }
+ */
+function generateRandomDocument() {
+ let names = ["Asya", "Charlie", "Dan", "Geert", "Kyle"];
+ const minNumber = 1;
+ const maxNumber = 999;
+ let streets = ["3rd", "4th", "5th", "6th", "7th", "8th", "9th"];
+ let cities = ["New York", "Palo Alto", "Sydney", "Dublin"];
+ const minZip = 10000;
+ const maxZip = 99999;
- function doExecutionTest(conn) {
- const coll = conn.getDB(dbName).getCollection(collName);
- coll.drop();
+ return {
+ names: randomChoice(names),
+ address: {
+ number: Random.randInt(maxNumber - minNumber + 1) + minNumber,
+ street: randomChoice(streets),
+ city: randomChoice(cities),
+ zip: Random.randInt(maxZip - minZip + 1) + minZip,
+ },
+ };
+}
- // Insert a bunch of documents of the form above.
- const nDocs = 10;
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nDocs; i++) {
- bulk.insert(generateRandomDocument());
- }
- assert.writeOK(bulk.execute());
+function doExecutionTest(conn) {
+ const coll = conn.getDB(dbName).getCollection(collName);
+ coll.drop();
- // Extract the contents of the address field, and make sure that doing the same
- // with replaceRoot yields the correct answer.
- // First compute each separately, since we know all of the fields in the address,
- // to make sure we have the correct results.
- let addressPipe = [{
- $project: {
- "_id": 0,
- "number": "$address.number",
- "street": "$address.street",
- "city": "$address.city",
- "zip": "$address.zip"
- }
- }];
- let correctAddresses = coll.aggregate(addressPipe).toArray();
+ // Insert a bunch of documents of the form above.
+ const nDocs = 10;
+ let bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < nDocs; i++) {
+ bulk.insert(generateRandomDocument());
+ }
+ assert.writeOK(bulk.execute());
- // Then compute the same results using $replaceRoot.
- let replaceWithResult = coll.aggregate([
- {$replaceRoot: {newRoot: "$address"}},
- {$sort: {city: 1, zip: 1, street: 1, number: 1}}
- ])
- .toArray();
+ // Extract the contents of the address field, and make sure that doing the same
+ // with replaceRoot yields the correct answer.
+ // First compute each separately, since we know all of the fields in the address,
+ // to make sure we have the correct results.
+ let addressPipe = [{
+ $project: {
+ "_id": 0,
+ "number": "$address.number",
+ "street": "$address.street",
+ "city": "$address.city",
+ "zip": "$address.zip"
+ }
+ }];
+ let correctAddresses = coll.aggregate(addressPipe).toArray();
- // Then assert they are the same.
- assert(
- arrayEq(replaceWithResult, correctAddresses),
- "$replaceRoot does not work the same as $project-ing the relevant fields to the top level");
- }
+ // Then compute the same results using $replaceRoot.
+ let replaceWithResult = coll.aggregate([
+ {$replaceRoot: {newRoot: "$address"}},
+ {$sort: {city: 1, zip: 1, street: 1, number: 1}}
+ ])
+ .toArray();
- // Test against the standalone started by resmoke.py.
- let conn = db.getMongo();
- doExecutionTest(conn);
- print("Success! Standalone execution test for $replaceRoot passed.");
+ // Then assert they are the same.
+ assert(
+ arrayEq(replaceWithResult, correctAddresses),
+ "$replaceRoot does not work the same as $project-ing the relevant fields to the top level");
+}
- // Test against a sharded cluster.
- let st = new ShardingTest({shards: 2});
- doExecutionTest(st.s0);
- st.stop();
- print("Success! Sharding test for $replaceRoot passed.");
+// Test against the standalone started by resmoke.py.
+let conn = db.getMongo();
+doExecutionTest(conn);
+print("Success! Standalone execution test for $replaceRoot passed.");
+// Test against a sharded cluster.
+let st = new ShardingTest({shards: 2});
+doExecutionTest(st.s0);
+st.stop();
+print("Success! Sharding test for $replaceRoot passed.");
}());
diff --git a/jstests/aggregation/sources/replaceRoot/use_cases.js b/jstests/aggregation/sources/replaceRoot/use_cases.js
index d66129df2e9..cb58ddac5c8 100644
--- a/jstests/aggregation/sources/replaceRoot/use_cases.js
+++ b/jstests/aggregation/sources/replaceRoot/use_cases.js
@@ -1,25 +1,22 @@
// Basic integration tests for $replaceRoot and its alias $replaceWith.
(function() {
- "use strict";
+"use strict";
- const coll = db.replaceWith_use_cases;
- coll.drop();
+const coll = db.replaceWith_use_cases;
+coll.drop();
- assert.commandWorked(coll.insert([
- {_id: 0, comments: [{user_id: "x", comment: "foo"}, {user_id: "y", comment: "bar"}]},
- {_id: 1, comments: [{user_id: "y", comment: "bar again"}]}
- ]));
+assert.commandWorked(coll.insert([
+ {_id: 0, comments: [{user_id: "x", comment: "foo"}, {user_id: "y", comment: "bar"}]},
+ {_id: 1, comments: [{user_id: "y", comment: "bar again"}]}
+]));
- // Test computing the most frequent commenters using $replaceRoot.
- let pipeline = [
- {$unwind: "$comments"},
- {$replaceRoot: {newRoot: "$comments"}},
- {$sortByCount: "$user_id"}
- ];
- const expectedResults = [{_id: "y", count: 2}, {_id: "x", count: 1}];
- assert.eq(coll.aggregate(pipeline).toArray(), expectedResults);
+// Test computing the most frequent commenters using $replaceRoot.
+let pipeline =
+ [{$unwind: "$comments"}, {$replaceRoot: {newRoot: "$comments"}}, {$sortByCount: "$user_id"}];
+const expectedResults = [{_id: "y", count: 2}, {_id: "x", count: 1}];
+assert.eq(coll.aggregate(pipeline).toArray(), expectedResults);
- // Test the same thing but using the $replaceWith alias.
- pipeline = [{$unwind: "$comments"}, {$replaceWith: "$comments"}, {$sortByCount: "$user_id"}];
- assert.eq(coll.aggregate(pipeline).toArray(), expectedResults);
+// Test the same thing but using the $replaceWith alias.
+pipeline = [{$unwind: "$comments"}, {$replaceWith: "$comments"}, {$sortByCount: "$user_id"}];
+assert.eq(coll.aggregate(pipeline).toArray(), expectedResults);
}());
diff --git a/jstests/aggregation/sources/sort/collation_sort.js b/jstests/aggregation/sources/sort/collation_sort.js
index 8febbafb857..6d8b20f9ab2 100644
--- a/jstests/aggregation/sources/sort/collation_sort.js
+++ b/jstests/aggregation/sources/sort/collation_sort.js
@@ -1,95 +1,95 @@
// Test that the $sort stage respects the collation.
(function() {
- "use strict";
+"use strict";
- // In French, words are sometimes ordered on the secondary level (a.k.a. at the level of
- // diacritical marks) by the *last* accent difference rather than the first. This is specified
- // by the {backwards: true} option.
- //
- // For example, côte < coté, since the last accent difference is "e" < "é". Without the reverse
- // accent weighting turned on, these two words would sort in the opposite order, since "ô" >
- // "o".
- var frenchAccentOrdering = {collation: {locale: "fr", backwards: true}};
+// In French, words are sometimes ordered on the secondary level (a.k.a. at the level of
+// diacritical marks) by the *last* accent difference rather than the first. This is specified
+// by the {backwards: true} option.
+//
+// For example, côte < coté, since the last accent difference is "e" < "é". Without the reverse
+// accent weighting turned on, these two words would sort in the opposite order, since "ô" >
+// "o".
+var frenchAccentOrdering = {collation: {locale: "fr", backwards: true}};
- var coll = db.collation_sort;
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, word1: "pêche", word2: "côté"}));
- assert.writeOK(coll.insert({_id: 2, word1: "pêche", word2: "coté"}));
- assert.writeOK(coll.insert({_id: 3, word1: "pêche", word2: "côte"}));
- assert.writeOK(coll.insert({_id: 4, word1: "pèché", word2: "côté"}));
- assert.writeOK(coll.insert({_id: 5, word1: "pèché", word2: "coté"}));
- assert.writeOK(coll.insert({_id: 6, word1: "pèché", word2: "côte"}));
- assert.writeOK(coll.insert({_id: 7, word1: "pêché", word2: "côté"}));
- assert.writeOK(coll.insert({_id: 8, word1: "pêché", word2: "coté"}));
- assert.writeOK(coll.insert({_id: 9, word1: "pêché", word2: "côte"}));
+var coll = db.collation_sort;
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, word1: "pêche", word2: "côté"}));
+assert.writeOK(coll.insert({_id: 2, word1: "pêche", word2: "coté"}));
+assert.writeOK(coll.insert({_id: 3, word1: "pêche", word2: "côte"}));
+assert.writeOK(coll.insert({_id: 4, word1: "pèché", word2: "côté"}));
+assert.writeOK(coll.insert({_id: 5, word1: "pèché", word2: "coté"}));
+assert.writeOK(coll.insert({_id: 6, word1: "pèché", word2: "côte"}));
+assert.writeOK(coll.insert({_id: 7, word1: "pêché", word2: "côté"}));
+assert.writeOK(coll.insert({_id: 8, word1: "pêché", word2: "coté"}));
+assert.writeOK(coll.insert({_id: 9, word1: "pêché", word2: "côte"}));
- // Test that ascending sort respects the collation.
- assert.eq([{_id: "pèché"}, {_id: "pêche"}, {_id: "pêché"}],
- coll.aggregate([{$group: {_id: "$word1"}}, {$sort: {_id: 1}}]).toArray());
- assert.eq([{_id: "pêche"}, {_id: "pèché"}, {_id: "pêché"}],
- coll.aggregate([{$group: {_id: "$word1"}}, {$sort: {_id: 1}}], frenchAccentOrdering)
- .toArray());
+// Test that ascending sort respects the collation.
+assert.eq([{_id: "pèché"}, {_id: "pêche"}, {_id: "pêché"}],
+ coll.aggregate([{$group: {_id: "$word1"}}, {$sort: {_id: 1}}]).toArray());
+assert.eq(
+ [{_id: "pêche"}, {_id: "pèché"}, {_id: "pêché"}],
+ coll.aggregate([{$group: {_id: "$word1"}}, {$sort: {_id: 1}}], frenchAccentOrdering).toArray());
- // Test that descending sort respects the collation.
- assert.eq([{_id: "pêché"}, {_id: "pêche"}, {_id: "pèché"}],
- coll.aggregate([{$group: {_id: "$word1"}}, {$sort: {_id: -1}}]).toArray());
- assert.eq([{_id: "pêché"}, {_id: "pèché"}, {_id: "pêche"}],
- coll.aggregate([{$group: {_id: "$word1"}}, {$sort: {_id: -1}}], frenchAccentOrdering)
- .toArray());
+// Test that descending sort respects the collation.
+assert.eq([{_id: "pêché"}, {_id: "pêche"}, {_id: "pèché"}],
+ coll.aggregate([{$group: {_id: "$word1"}}, {$sort: {_id: -1}}]).toArray());
+assert.eq([{_id: "pêché"}, {_id: "pèché"}, {_id: "pêche"}],
+ coll.aggregate([{$group: {_id: "$word1"}}, {$sort: {_id: -1}}], frenchAccentOrdering)
+ .toArray());
- // Test that compound, mixed ascending/descending sort respects the collation.
- assert.eq([4, 6, 5, 1, 3, 2, 7, 9, 8],
- coll.aggregate([
+// Test that compound, mixed ascending/descending sort respects the collation.
+assert.eq([4, 6, 5, 1, 3, 2, 7, 9, 8],
+ coll.aggregate([
+ {$sort: {word1: 1, word2: -1}},
+ {$project: {_id: 1}},
+ {$group: {_id: null, out: {$push: "$_id"}}}
+ ])
+ .toArray()[0]
+ .out);
+assert.eq([1, 2, 3, 4, 5, 6, 7, 8, 9],
+ coll.aggregate(
+ [
{$sort: {word1: 1, word2: -1}},
{$project: {_id: 1}},
{$group: {_id: null, out: {$push: "$_id"}}}
- ])
- .toArray()[0]
- .out);
- assert.eq([1, 2, 3, 4, 5, 6, 7, 8, 9],
- coll.aggregate(
- [
- {$sort: {word1: 1, word2: -1}},
- {$project: {_id: 1}},
- {$group: {_id: null, out: {$push: "$_id"}}}
- ],
- frenchAccentOrdering)
- .toArray()[0]
- .out);
+ ],
+ frenchAccentOrdering)
+ .toArray()[0]
+ .out);
- // Test that compound, mixed descending/ascending sort respects the collation.
- assert.eq([8, 9, 7, 2, 3, 1, 5, 6, 4],
- coll.aggregate([
+// Test that compound, mixed descending/ascending sort respects the collation.
+assert.eq([8, 9, 7, 2, 3, 1, 5, 6, 4],
+ coll.aggregate([
+ {$sort: {word1: -1, word2: 1}},
+ {$project: {_id: 1}},
+ {$group: {_id: null, out: {$push: "$_id"}}}
+ ])
+ .toArray()[0]
+ .out);
+assert.eq([9, 8, 7, 6, 5, 4, 3, 2, 1],
+ coll.aggregate(
+ [
{$sort: {word1: -1, word2: 1}},
{$project: {_id: 1}},
{$group: {_id: null, out: {$push: "$_id"}}}
- ])
- .toArray()[0]
- .out);
- assert.eq([9, 8, 7, 6, 5, 4, 3, 2, 1],
- coll.aggregate(
- [
- {$sort: {word1: -1, word2: 1}},
- {$project: {_id: 1}},
- {$group: {_id: null, out: {$push: "$_id"}}}
- ],
- frenchAccentOrdering)
- .toArray()[0]
- .out);
+ ],
+ frenchAccentOrdering)
+ .toArray()[0]
+ .out);
- // Test that sort inside a $facet respects the collation.
- const results = coll.aggregate([{
- $facet: {
- fct: [
- {$sort: {word1: -1, word2: 1}},
- {$project: {_id: 1}},
- {$group: {_id: null, out: {$push: "$_id"}}}
- ]
- }
- }],
- frenchAccentOrdering)
- .toArray();
- assert.eq(1, results.length);
- assert.eq(1, results[0].fct.length);
- assert.eq([9, 8, 7, 6, 5, 4, 3, 2, 1], results[0].fct[0].out);
+// Test that sort inside a $facet respects the collation.
+const results = coll.aggregate([{
+ $facet: {
+ fct: [
+ {$sort: {word1: -1, word2: 1}},
+ {$project: {_id: 1}},
+ {$group: {_id: null, out: {$push: "$_id"}}}
+ ]
+ }
+ }],
+ frenchAccentOrdering)
+ .toArray();
+assert.eq(1, results.length);
+assert.eq(1, results[0].fct.length);
+assert.eq([9, 8, 7, 6, 5, 4, 3, 2, 1], results[0].fct[0].out);
})();
diff --git a/jstests/aggregation/sources/sort/collation_sort_japanese.js b/jstests/aggregation/sources/sort/collation_sort_japanese.js
index 5bfad05af31..9051ed45aa7 100644
--- a/jstests/aggregation/sources/sort/collation_sort_japanese.js
+++ b/jstests/aggregation/sources/sort/collation_sort_japanese.js
@@ -4,143 +4,148 @@
* aggregation_sharded_collections_passthrough.)
*/
(function() {
- "use strict";
-
- Random.setRandomSeed();
- const coll = db.getCollection("collation_sort_japanese");
-
- // In Japanese, the order of vowels is a, i, u, e, o. The sorting of mixed katakana and hiragana
- // vowels differs depending on the collation:
- //
- // - With the simple collation, hiragana vowels come first (in order), followed by katakana.
- // - In the Japanese locale, vowels with the same sound sort together. Whether hiragana or
- // katakana comes first depends on the strength level of the collation.
- const data = [
- {kana: "ア", val: 0, name: "katakana a"},
- {kana: "イ", val: 1, name: "katakana i"},
- {kana: "ウ", val: 2, name: "katakana u"},
- {kana: "エ", val: 3, name: "katakana e"},
- {kana: "オ", val: 4, name: "katakana o"},
- {kana: "あ", val: 5, name: "hiragana a"},
- {kana: "い", val: 6, name: "hiragana i"},
- {kana: "う", val: 7, name: "hiragana u"},
- {kana: "え", val: 8, name: "hiragana e"},
- {kana: "お", val: 9, name: "hiragana o"},
- ];
-
- const simpleCollation = {locale: "simple"};
- const jaCollationStr3 = {locale: "ja"};
- const jaCollationStr4 = {locale: "ja", strength: 4};
-
- /**
- * Inserts each doc of 'docs' into the collection in no specified order before running tests.
- */
- function runTests(docs) {
- let bulk = coll.initializeUnorderedBulkOp();
- for (let doc of docs) {
- bulk.insert(doc);
- }
- assert.writeOK(bulk.execute());
-
- let sortOrder;
-
- function assertAggregationSortOrder(collation, expectedVals) {
- let expectedDocs = expectedVals.map(val => ({val: val}));
- let result = coll.aggregate([{$sort: sortOrder}, {$project: {_id: 0, val: 1}}],
- {collation: collation})
- .toArray();
- assert.eq(result,
- expectedDocs,
- "sort returned wrong order with sort pattern " + tojson(sortOrder) +
- " and collation " + tojson(collation));
-
- // Run the same aggregation, but in a sharded cluster, force the merging to be performed
- // on a shard instead of on mongos.
- result = coll.aggregate(
- [
- {$_internalSplitPipeline: {mergeType: "anyShard"}},
- {$sort: sortOrder},
- {$project: {_id: 0, val: 1}}
- ],
- {collation: collation})
- .toArray();
- assert.eq(result,
- expectedDocs,
- "sort returned wrong order with sort pattern " + tojson(sortOrder) +
- " and collation " + tojson(collation) + " when merging on a shard");
- }
-
- // Start with a sort on a single key.
- sortOrder = {kana: 1};
-
- // With the binary collation, hiragana codepoints sort before katakana codepoints.
- assertAggregationSortOrder(simpleCollation, [5, 6, 7, 8, 9, 0, 1, 2, 3, 4]);
-
- // With the Japanese collation at strength 4, a hiragana codepoint always sorts before its
- // equivalent katakana.
- assertAggregationSortOrder(jaCollationStr4, [5, 0, 6, 1, 7, 2, 8, 3, 9, 4]);
+"use strict";
+
+Random.setRandomSeed();
+const coll = db.getCollection("collation_sort_japanese");
+
+// In Japanese, the order of vowels is a, i, u, e, o. The sorting of mixed katakana and hiragana
+// vowels differs depending on the collation:
+//
+// - With the simple collation, hiragana vowels come first (in order), followed by katakana.
+// - In the Japanese locale, vowels with the same sound sort together. Whether hiragana or
+// katakana comes first depends on the strength level of the collation.
+const data = [
+ {kana: "ア", val: 0, name: "katakana a"},
+ {kana: "イ", val: 1, name: "katakana i"},
+ {kana: "ウ", val: 2, name: "katakana u"},
+ {kana: "エ", val: 3, name: "katakana e"},
+ {kana: "オ", val: 4, name: "katakana o"},
+ {kana: "あ", val: 5, name: "hiragana a"},
+ {kana: "い", val: 6, name: "hiragana i"},
+ {kana: "う", val: 7, name: "hiragana u"},
+ {kana: "え", val: 8, name: "hiragana e"},
+ {kana: "お", val: 9, name: "hiragana o"},
+];
+
+const simpleCollation = {
+ locale: "simple"
+};
+const jaCollationStr3 = {
+ locale: "ja"
+};
+const jaCollationStr4 = {
+ locale: "ja",
+ strength: 4
+};
- // Test a sort on a compound key.
- sortOrder = {kana: 1, val: 1};
-
- // With the binary collation, hiragana codepoints sort before katakana codepoints.
- assertAggregationSortOrder(simpleCollation, [5, 6, 7, 8, 9, 0, 1, 2, 3, 4]);
+/**
+ * Inserts each doc of 'docs' into the collection in no specified order before running tests.
+ */
+function runTests(docs) {
+ let bulk = coll.initializeUnorderedBulkOp();
+ for (let doc of docs) {
+ bulk.insert(doc);
+ }
+ assert.writeOK(bulk.execute());
- // With the default Japanese collation, hiragana and katakana with the same pronunciation
- // sort together but with no specified order. The compound sort on "val" breaks the tie and
- // puts the katakana first.
- assertAggregationSortOrder(jaCollationStr3, [0, 5, 1, 6, 2, 7, 3, 8, 4, 9]);
+ let sortOrder;
- // With the Japanese collation at strength 4, a hiragana codepoint always sorts before its
- // equivalent katakana.
- assertAggregationSortOrder(jaCollationStr4, [5, 0, 6, 1, 7, 2, 8, 3, 9, 4]);
+ function assertAggregationSortOrder(collation, expectedVals) {
+ let expectedDocs = expectedVals.map(val => ({val: val}));
+ let result = coll.aggregate([{$sort: sortOrder}, {$project: {_id: 0, val: 1}}],
+ {collation: collation})
+ .toArray();
+ assert.eq(result,
+ expectedDocs,
+ "sort returned wrong order with sort pattern " + tojson(sortOrder) +
+ " and collation " + tojson(collation));
+
+ // Run the same aggregation, but in a sharded cluster, force the merging to be performed
+ // on a shard instead of on mongos.
+ result = coll.aggregate(
+ [
+ {$_internalSplitPipeline: {mergeType: "anyShard"}},
+ {$sort: sortOrder},
+ {$project: {_id: 0, val: 1}}
+ ],
+ {collation: collation})
+ .toArray();
+ assert.eq(result,
+ expectedDocs,
+ "sort returned wrong order with sort pattern " + tojson(sortOrder) +
+ " and collation " + tojson(collation) + " when merging on a shard");
}
- // Test sorting documents with only scalar values.
- coll.drop();
- runTests(data);
-
- // Test sorting documents containing singleton arrays.
- assert(coll.drop());
- runTests(data.map(doc => {
- let copy = Object.extend({}, doc);
+ // Start with a sort on a single key.
+ sortOrder = {kana: 1};
+
+ // With the binary collation, hiragana codepoints sort before katakana codepoints.
+ assertAggregationSortOrder(simpleCollation, [5, 6, 7, 8, 9, 0, 1, 2, 3, 4]);
+
+ // With the Japanese collation at strength 4, a hiragana codepoint always sorts before its
+ // equivalent katakana.
+ assertAggregationSortOrder(jaCollationStr4, [5, 0, 6, 1, 7, 2, 8, 3, 9, 4]);
+
+ // Test a sort on a compound key.
+ sortOrder = {kana: 1, val: 1};
+
+ // With the binary collation, hiragana codepoints sort before katakana codepoints.
+ assertAggregationSortOrder(simpleCollation, [5, 6, 7, 8, 9, 0, 1, 2, 3, 4]);
+
+ // With the default Japanese collation, hiragana and katakana with the same pronunciation
+ // sort together but with no specified order. The compound sort on "val" breaks the tie and
+ // puts the katakana first.
+ assertAggregationSortOrder(jaCollationStr3, [0, 5, 1, 6, 2, 7, 3, 8, 4, 9]);
+
+ // With the Japanese collation at strength 4, a hiragana codepoint always sorts before its
+ // equivalent katakana.
+ assertAggregationSortOrder(jaCollationStr4, [5, 0, 6, 1, 7, 2, 8, 3, 9, 4]);
+}
+
+// Test sorting documents with only scalar values.
+coll.drop();
+runTests(data);
+
+// Test sorting documents containing singleton arrays.
+assert(coll.drop());
+runTests(data.map(doc => {
+ let copy = Object.extend({}, doc);
+ copy.kana = [copy.kana];
+ return copy;
+}));
+
+// Test sorting documents containing arrays with multiple elements.
+assert(coll.drop());
+runTests(data.map(doc => {
+ let copy = Object.extend({}, doc);
+ copy.kana = [copy.kana, copy.kana, copy.kana];
+ return copy;
+}));
+
+// Test sorting documents where some values are scalars and others are arrays.
+assert(coll.drop());
+runTests(data.map(doc => {
+ let copy = Object.extend({}, doc);
+ if (Math.random() < 0.5) {
copy.kana = [copy.kana];
- return copy;
- }));
-
- // Test sorting documents containing arrays with multiple elements.
- assert(coll.drop());
- runTests(data.map(doc => {
- let copy = Object.extend({}, doc);
- copy.kana = [copy.kana, copy.kana, copy.kana];
- return copy;
- }));
-
- // Test sorting documents where some values are scalars and others are arrays.
- assert(coll.drop());
- runTests(data.map(doc => {
- let copy = Object.extend({}, doc);
- if (Math.random() < 0.5) {
- copy.kana = [copy.kana];
- }
- return copy;
- }));
-
- // Create indexes that provide sorts and assert that the results are equivalent.
- assert(coll.drop());
- assert.commandWorked(
- coll.createIndex({kana: 1}, {name: "k1_jaStr3", collation: jaCollationStr3}));
- assert.commandWorked(
- coll.createIndex({kana: 1}, {name: "k1_jaStr4", collation: jaCollationStr4}));
- assert.commandWorked(
- coll.createIndex({kana: 1, val: 1}, {name: "k1v1_jaStr3", collation: jaCollationStr3}));
- assert.commandWorked(
- coll.createIndex({kana: 1, val: 1}, {name: "k1v1_jaStr4", collation: jaCollationStr4}));
- runTests(data.map(doc => {
- let copy = Object.extend({}, doc);
- if (Math.random() < 0.5) {
- copy.kana = [copy.kana];
- }
- return copy;
- }));
+ }
+ return copy;
+}));
+
+// Create indexes that provide sorts and assert that the results are equivalent.
+assert(coll.drop());
+assert.commandWorked(coll.createIndex({kana: 1}, {name: "k1_jaStr3", collation: jaCollationStr3}));
+assert.commandWorked(coll.createIndex({kana: 1}, {name: "k1_jaStr4", collation: jaCollationStr4}));
+assert.commandWorked(
+ coll.createIndex({kana: 1, val: 1}, {name: "k1v1_jaStr3", collation: jaCollationStr3}));
+assert.commandWorked(
+ coll.createIndex({kana: 1, val: 1}, {name: "k1v1_jaStr4", collation: jaCollationStr4}));
+runTests(data.map(doc => {
+ let copy = Object.extend({}, doc);
+ if (Math.random() < 0.5) {
+ copy.kana = [copy.kana];
+ }
+ return copy;
+}));
}());
diff --git a/jstests/aggregation/sources/sort/explain_sort.js b/jstests/aggregation/sources/sort/explain_sort.js
index 19d4fb0c7ba..d519ea323c7 100644
--- a/jstests/aggregation/sources/sort/explain_sort.js
+++ b/jstests/aggregation/sources/sort/explain_sort.js
@@ -2,60 +2,60 @@
// designed to reproduce SERVER-33084.
// @tags: [do_not_wrap_aggregations_in_facets]
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js"); // For getAggPlanStages().
-
- const coll = db.explain_sort;
- coll.drop();
-
- const kNumDocs = 10;
-
- // Return whether or not explain() was successful and contained the appropriate fields given the
- // requested verbosity.
- function checkResults(results, verbosity) {
- let cursorSubdocs = getAggPlanStages(results, "$cursor");
- let nReturned = 0;
- let nExamined = 0;
- assert.gt(cursorSubdocs.length, 0);
- for (let stageResult of cursorSubdocs) {
- const result = stageResult.$cursor;
- if (verbosity === "queryPlanner") {
- assert(!result.hasOwnProperty("executionStats"), tojson(results));
- } else {
- nReturned += result.executionStats.nReturned;
- nExamined += result.executionStats.totalDocsExamined;
- }
- }
- if (verbosity != "queryPlanner") {
- assert.eq(nReturned, kNumDocs, tojson(results));
- assert.eq(nExamined, kNumDocs, tojson(results));
+"use strict";
+
+load("jstests/libs/analyze_plan.js"); // For getAggPlanStages().
+
+const coll = db.explain_sort;
+coll.drop();
+
+const kNumDocs = 10;
+
+// Return whether or not explain() was successful and contained the appropriate fields given the
+// requested verbosity.
+function checkResults(results, verbosity) {
+ let cursorSubdocs = getAggPlanStages(results, "$cursor");
+ let nReturned = 0;
+ let nExamined = 0;
+ assert.gt(cursorSubdocs.length, 0);
+ for (let stageResult of cursorSubdocs) {
+ const result = stageResult.$cursor;
+ if (verbosity === "queryPlanner") {
+ assert(!result.hasOwnProperty("executionStats"), tojson(results));
+ } else {
+ nReturned += result.executionStats.nReturned;
+ nExamined += result.executionStats.totalDocsExamined;
}
}
-
- for (let i = 0; i < kNumDocs; i++) {
- assert.writeOK(coll.insert({a: i}));
+ if (verbosity != "queryPlanner") {
+ assert.eq(nReturned, kNumDocs, tojson(results));
+ assert.eq(nExamined, kNumDocs, tojson(results));
}
+}
- // Execute several aggregations with a sort stage combined with various single document
- // transformation stages.
- for (let verbosity of["queryPlanner", "executionStats", "allPlansExecution"]) {
- let pipeline = [{$project: {a: 1}}, {$sort: {a: 1}}];
- checkResults(coll.explain(verbosity).aggregate(pipeline), verbosity);
+for (let i = 0; i < kNumDocs; i++) {
+ assert.writeOK(coll.insert({a: i}));
+}
- pipeline = [{$project: {a: 0}}, {$sort: {a: 1}}];
- checkResults(coll.explain(verbosity).aggregate(pipeline), verbosity);
+// Execute several aggregations with a sort stage combined with various single document
+// transformation stages.
+for (let verbosity of ["queryPlanner", "executionStats", "allPlansExecution"]) {
+ let pipeline = [{$project: {a: 1}}, {$sort: {a: 1}}];
+ checkResults(coll.explain(verbosity).aggregate(pipeline), verbosity);
- pipeline = [{$addFields: {b: 1}}, {$sort: {a: 1}}];
- checkResults(coll.explain(verbosity).aggregate(pipeline), verbosity);
+ pipeline = [{$project: {a: 0}}, {$sort: {a: 1}}];
+ checkResults(coll.explain(verbosity).aggregate(pipeline), verbosity);
- pipeline = [{$sort: {a: 1}}, {$project: {_id: 1}}];
- checkResults(coll.explain(verbosity).aggregate(pipeline), verbosity);
+ pipeline = [{$addFields: {b: 1}}, {$sort: {a: 1}}];
+ checkResults(coll.explain(verbosity).aggregate(pipeline), verbosity);
- pipeline = [{$project: {a: 1}}, {$limit: 5}, {$sort: {a: 1}}];
- checkResults(coll.explain(verbosity).aggregate(pipeline), verbosity);
+ pipeline = [{$sort: {a: 1}}, {$project: {_id: 1}}];
+ checkResults(coll.explain(verbosity).aggregate(pipeline), verbosity);
- pipeline = [{$project: {_id: 1}}, {$limit: 5}];
- checkResults(coll.explain(verbosity).aggregate(pipeline), verbosity);
- }
+ pipeline = [{$project: {a: 1}}, {$limit: 5}, {$sort: {a: 1}}];
+ checkResults(coll.explain(verbosity).aggregate(pipeline), verbosity);
+
+ pipeline = [{$project: {_id: 1}}, {$limit: 5}];
+ checkResults(coll.explain(verbosity).aggregate(pipeline), verbosity);
+}
})();
diff --git a/jstests/aggregation/sources/unset/unset.js b/jstests/aggregation/sources/unset/unset.js
index be20a69b362..c11f97598f5 100644
--- a/jstests/aggregation/sources/unset/unset.js
+++ b/jstests/aggregation/sources/unset/unset.js
@@ -1,39 +1,38 @@
// Basic testing for the $unset aggregation stage.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
+load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
- const coll = db.agg_stage_unset;
- coll.drop();
+const coll = db.agg_stage_unset;
+coll.drop();
- assert.commandWorked(coll.insert(
- [{_id: 0, a: 10}, {_id: 1, a: {b: 20, c: 30, 0: 40}}, {_id: 2, a: [{b: 50, c: 60}]}]));
+assert.commandWorked(coll.insert(
+ [{_id: 0, a: 10}, {_id: 1, a: {b: 20, c: 30, 0: 40}}, {_id: 2, a: [{b: 50, c: 60}]}]));
- // unset single field.
- let result = coll.aggregate([{$unset: ["a"]}]).toArray();
- assertArrayEq({actual: result, expected: [{_id: 0}, {_id: 1}, {_id: 2}]});
+// unset single field.
+let result = coll.aggregate([{$unset: ["a"]}]).toArray();
+assertArrayEq({actual: result, expected: [{_id: 0}, {_id: 1}, {_id: 2}]});
- // unset should work with string directive.
- result = coll.aggregate([{$unset: "a"}]).toArray();
- assertArrayEq({actual: result, expected: [{_id: 0}, {_id: 1}, {_id: 2}]});
+// unset should work with string directive.
+result = coll.aggregate([{$unset: "a"}]).toArray();
+assertArrayEq({actual: result, expected: [{_id: 0}, {_id: 1}, {_id: 2}]});
- // unset multiple fields.
- result = coll.aggregate([{$unset: ["_id", "a"]}]).toArray();
- assertArrayEq({actual: result, expected: [{}, {}, {}]});
+// unset multiple fields.
+result = coll.aggregate([{$unset: ["_id", "a"]}]).toArray();
+assertArrayEq({actual: result, expected: [{}, {}, {}]});
- // unset with dotted field path.
- result = coll.aggregate([{$unset: ["a.b"]}]).toArray();
- assertArrayEq({
- actual: result,
- expected: [{_id: 0, a: 10}, {_id: 1, a: {0: 40, c: 30}}, {_id: 2, a: [{c: 60}]}]
- });
-
- // Numeric field paths in aggregation represent field name only and not array offset.
- result = coll.aggregate([{$unset: ["a.0"]}]).toArray();
- assertArrayEq({
- actual: result,
- expected: [{_id: 0, a: 10}, {_id: 1, a: {b: 20, c: 30}}, {_id: 2, a: [{b: 50, c: 60}]}]
- });
+// unset with dotted field path.
+result = coll.aggregate([{$unset: ["a.b"]}]).toArray();
+assertArrayEq({
+ actual: result,
+ expected: [{_id: 0, a: 10}, {_id: 1, a: {0: 40, c: 30}}, {_id: 2, a: [{c: 60}]}]
+});
+// Numeric field paths in aggregation represent field name only and not array offset.
+result = coll.aggregate([{$unset: ["a.0"]}]).toArray();
+assertArrayEq({
+ actual: result,
+ expected: [{_id: 0, a: 10}, {_id: 1, a: {b: 20, c: 30}}, {_id: 2, a: [{b: 50, c: 60}]}]
+});
})();
diff --git a/jstests/aggregation/stages/skip_with_limit.js b/jstests/aggregation/stages/skip_with_limit.js
index 161ac931e88..d0bad0ed03f 100644
--- a/jstests/aggregation/stages/skip_with_limit.js
+++ b/jstests/aggregation/stages/skip_with_limit.js
@@ -4,47 +4,47 @@
* especially in a sharded cluster - which we intend to stress with this test.
*/
(function() {
- "use strict";
+"use strict";
- const coll = db.skip_with_limit;
- coll.drop();
+const coll = db.skip_with_limit;
+coll.drop();
- // Insert twenty documents: {x: 4, y: 0}, {x: 4, y: 1}, ..., {x: 4, y: 19}.
- const bulk = coll.initializeOrderedBulkOp();
- Array.from({length: 20}, (_, i) => ({x: 4, y: i})).forEach(doc => bulk.insert(doc));
- assert.commandWorked(bulk.execute());
+// Insert twenty documents: {x: 4, y: 0}, {x: 4, y: 1}, ..., {x: 4, y: 19}.
+const bulk = coll.initializeOrderedBulkOp();
+Array.from({length: 20}, (_, i) => ({x: 4, y: i})).forEach(doc => bulk.insert(doc));
+assert.commandWorked(bulk.execute());
- var count = coll.aggregate([{$match: {x: 4}}, {$skip: 10}, {$limit: 5}]).itcount();
- assert.eq(count, 5);
+var count = coll.aggregate([{$match: {x: 4}}, {$skip: 10}, {$limit: 5}]).itcount();
+assert.eq(count, 5);
- count = coll.aggregate([{$match: {x: 4}}, {$skip: 7}, {$skip: 3}, {$limit: 5}]).itcount();
- assert.eq(count, 5);
+count = coll.aggregate([{$match: {x: 4}}, {$skip: 7}, {$skip: 3}, {$limit: 5}]).itcount();
+assert.eq(count, 5);
- count = coll.aggregate([{$match: {x: 4}}, {$limit: 10}, {$skip: 5}]).itcount();
- assert.eq(count, 5);
+count = coll.aggregate([{$match: {x: 4}}, {$limit: 10}, {$skip: 5}]).itcount();
+assert.eq(count, 5);
- count = coll.aggregate([{$match: {x: 4}}, {$skip: 10}, {$addFields: {y: 1}}, {$limit: 5}])
- .itcount();
- assert.eq(count, 5);
+count =
+ coll.aggregate([{$match: {x: 4}}, {$skip: 10}, {$addFields: {y: 1}}, {$limit: 5}]).itcount();
+assert.eq(count, 5);
- count = coll.aggregate([{$match: {x: 4}}, {$skip: 10}, {$group: {_id: '$y'}}, {$limit: 5}])
- .itcount();
- assert.eq(count, 5);
+count =
+ coll.aggregate([{$match: {x: 4}}, {$skip: 10}, {$group: {_id: '$y'}}, {$limit: 5}]).itcount();
+assert.eq(count, 5);
- // For the pipelines with a $skip before the $limit, repeat the tests with larger skip values to
- // ensure that the skip is actually working. The large skips exhaust our 20 documents, so we get
- // fewer results.
- count = coll.aggregate([{$match: {x: 4}}, {$skip: 18}, {$limit: 5}]).itcount();
- assert.eq(count, 2);
+// For the pipelines with a $skip before the $limit, repeat the tests with larger skip values to
+// ensure that the skip is actually working. The large skips exhaust our 20 documents, so we get
+// fewer results.
+count = coll.aggregate([{$match: {x: 4}}, {$skip: 18}, {$limit: 5}]).itcount();
+assert.eq(count, 2);
- count = coll.aggregate([{$match: {x: 4}}, {$skip: 11}, {$skip: 7}, {$limit: 5}]).itcount();
- assert.eq(count, 2);
+count = coll.aggregate([{$match: {x: 4}}, {$skip: 11}, {$skip: 7}, {$limit: 5}]).itcount();
+assert.eq(count, 2);
- count = coll.aggregate([{$match: {x: 4}}, {$skip: 18}, {$addFields: {y: 1}}, {$limit: 5}])
- .itcount();
- assert.eq(count, 2);
+count =
+ coll.aggregate([{$match: {x: 4}}, {$skip: 18}, {$addFields: {y: 1}}, {$limit: 5}]).itcount();
+assert.eq(count, 2);
- count = coll.aggregate([{$match: {x: 4}}, {$skip: 18}, {$group: {_id: '$y'}}, {$limit: 5}])
- .itcount();
- assert.eq(count, 2);
+count =
+ coll.aggregate([{$match: {x: 4}}, {$skip: 18}, {$group: {_id: '$y'}}, {$limit: 5}]).itcount();
+assert.eq(count, 2);
}());
diff --git a/jstests/aggregation/testall.js b/jstests/aggregation/testall.js
index 33dd09a0463..a58a1bb00f2 100644
--- a/jstests/aggregation/testall.js
+++ b/jstests/aggregation/testall.js
@@ -1,938 +1,917 @@
(function() {
- "use strict";
-
- // Loads data into the namespace 'aggdb.articles'.
- load('jstests/aggregation/data/articles.js');
- load('jstests/aggregation/extras/utils.js');
-
- const testDB = db.getSiblingDB("aggdb");
-
- // just passing through fields
- let p1 = testDB.runCommand({
- aggregate: "article",
- pipeline: [{$project: {tags: 1, pageViews: 1}}, {$sort: {_id: 1}}],
- cursor: {}
- });
-
- let p1result = [
- {"_id": 1, "pageViews": 5, "tags": ["fun", "good", "fun"]},
- {"_id": 2, "pageViews": 7, "tags": ["fun", "nasty"]},
- {"_id": 3, "pageViews": 6, "tags": ["nasty", "filthy"]}
- ];
-
- assert.docEq(p1.cursor.firstBatch, p1result, 'p1 failed');
-
- // a simple array unwinding
- let u1 = testDB.runCommand({aggregate: "article", pipeline: [{$unwind: "$tags"}], cursor: {}});
-
- let u1result = [
- {
- "_id": 1,
- "title": "this is my title",
- "author": "bob",
- "posted": ISODate("2004-03-21T18:59:54Z"),
- "pageViews": 5,
- "tags": "fun",
- "comments":
- [{"author": "joe", "text": "this is cool"}, {"author": "sam", "text": "this is bad"}],
- "other": {"foo": 5}
- },
- {
- "_id": 1,
- "title": "this is my title",
- "author": "bob",
- "posted": ISODate("2004-03-21T18:59:54Z"),
- "pageViews": 5,
- "tags": "good",
- "comments":
- [{"author": "joe", "text": "this is cool"}, {"author": "sam", "text": "this is bad"}],
- "other": {"foo": 5}
- },
- {
- "_id": 1,
- "title": "this is my title",
- "author": "bob",
- "posted": ISODate("2004-03-21T18:59:54Z"),
- "pageViews": 5,
- "tags": "fun",
- "comments":
- [{"author": "joe", "text": "this is cool"}, {"author": "sam", "text": "this is bad"}],
- "other": {"foo": 5}
- },
- {
- "_id": 2,
- "title": "this is your title",
- "author": "dave",
- "posted": ISODate("2030-08-08T04:11:10Z"),
- "pageViews": 7,
- "tags": "fun",
- "comments": [
- {"author": "barbara", "text": "this is interesting"},
- {"author": "jenny", "text": "i like to play pinball", "votes": 10}
- ],
- "other": {"bar": 14}
- },
- {
- "_id": 2,
- "title": "this is your title",
- "author": "dave",
- "posted": ISODate("2030-08-08T04:11:10Z"),
- "pageViews": 7,
- "tags": "nasty",
- "comments": [
- {"author": "barbara", "text": "this is interesting"},
- {"author": "jenny", "text": "i like to play pinball", "votes": 10}
- ],
- "other": {"bar": 14}
- },
- {
- "_id": 3,
- "title": "this is some other title",
- "author": "jane",
- "posted": ISODate("2000-12-31T05:17:14Z"),
- "pageViews": 6,
- "tags": "nasty",
- "comments": [
- {"author": "will", "text": "i don't like the color"},
- {"author": "jenny", "text": "can i get that in green?"}
- ],
- "other": {"bar": 14}
- },
- {
- "_id": 3,
- "title": "this is some other title",
- "author": "jane",
- "posted": ISODate("2000-12-31T05:17:14Z"),
- "pageViews": 6,
- "tags": "filthy",
- "comments": [
- {"author": "will", "text": "i don't like the color"},
- {"author": "jenny", "text": "can i get that in green?"}
- ],
- "other": {"bar": 14}
- }
- ];
-
- let firstBatch = u1.cursor.firstBatch;
- assert(arrayEq(firstBatch, u1result), tojson({got: firstBatch, expected: u1result}));
-
- // unwind an array at the end of a dotted path
- testDB.ut.drop();
- assert.writeOK(testDB.ut.insert({_id: 4, a: 1, b: {e: 7, f: [4, 3, 2, 1]}, c: 12, d: 17}));
- let u2 = testDB.runCommand(
- {aggregate: "ut", pipeline: [{$unwind: "$b.f"}, {$sort: {"b.f": -1}}], cursor: {}});
-
- let u2result = [
- {"_id": 4, "a": 1, "b": {"e": 7, "f": 4}, "c": 12, "d": 17},
- {"_id": 4, "a": 1, "b": {"e": 7, "f": 3}, "c": 12, "d": 17},
- {"_id": 4, "a": 1, "b": {"e": 7, "f": 2}, "c": 12, "d": 17},
- {"_id": 4, "a": 1, "b": {"e": 7, "f": 1}, "c": 12, "d": 17}
- ];
-
- assert.docEq(u2.cursor.firstBatch, u2result, 'u2 failed');
-
- // combining a projection with unwinding an array
- let p2 = testDB.runCommand({
- aggregate: "article",
- pipeline: [{$project: {author: 1, tags: 1, pageViews: 1}}, {$unwind: "$tags"}],
- cursor: {}
- });
-
- let p2result = [
- {"_id": 1, "author": "bob", "pageViews": 5, "tags": "fun"},
- {"_id": 1, "author": "bob", "pageViews": 5, "tags": "good"},
- {"_id": 1, "author": "bob", "pageViews": 5, "tags": "fun"},
- {"_id": 2, "author": "dave", "pageViews": 7, "tags": "fun"},
- {"_id": 2, "author": "dave", "pageViews": 7, "tags": "nasty"},
- {"_id": 3, "author": "jane", "pageViews": 6, "tags": "nasty"},
- {"_id": 3, "author": "jane", "pageViews": 6, "tags": "filthy"}
- ];
-
- firstBatch = p2.cursor.firstBatch;
- assert(arrayEq(firstBatch, p2result), tojson({got: firstBatch, expected: p2result}));
-
- // pulling values out of subdocuments
- let p3 = testDB.runCommand({
- aggregate: "article",
- pipeline: [{$project: {otherfoo: "$other.foo", otherbar: "$other.bar"}}, {$sort: {_id: 1}}],
- cursor: {}
- });
-
- let p3result =
- [{"_id": 1, "otherfoo": 5}, {"_id": 2, "otherbar": 14}, {"_id": 3, "otherbar": 14}];
-
- assert.docEq(p3.cursor.firstBatch, p3result, 'p3 failed');
-
- // projection includes a computed value
- let p4 = testDB.runCommand({
- aggregate: "article",
- pipeline:
- [{$project: {author: 1, daveWroteIt: {$eq: ["$author", "dave"]}}}, {$sort: {_id: 1}}],
- cursor: {}
- });
-
- let p4result = [
- {"_id": 1, "author": "bob", "daveWroteIt": false},
- {"_id": 2, "author": "dave", "daveWroteIt": true},
- {"_id": 3, "author": "jane", "daveWroteIt": false}
- ];
-
- assert.docEq(p4.cursor.firstBatch, p4result, 'p4 failed');
-
- // projection includes a virtual (fabricated) document
- let p5 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {$project: {author: 1, pageViews: 1, tags: 1}},
- {$unwind: "$tags"},
- {$project: {author: 1, subDocument: {foo: "$pageViews", bar: "$tags"}}}
+"use strict";
+
+// Loads data into the namespace 'aggdb.articles'.
+load('jstests/aggregation/data/articles.js');
+load('jstests/aggregation/extras/utils.js');
+
+const testDB = db.getSiblingDB("aggdb");
+
+// just passing through fields
+let p1 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [{$project: {tags: 1, pageViews: 1}}, {$sort: {_id: 1}}],
+ cursor: {}
+});
+
+let p1result = [
+ {"_id": 1, "pageViews": 5, "tags": ["fun", "good", "fun"]},
+ {"_id": 2, "pageViews": 7, "tags": ["fun", "nasty"]},
+ {"_id": 3, "pageViews": 6, "tags": ["nasty", "filthy"]}
+];
+
+assert.docEq(p1.cursor.firstBatch, p1result, 'p1 failed');
+
+// a simple array unwinding
+let u1 = testDB.runCommand({aggregate: "article", pipeline: [{$unwind: "$tags"}], cursor: {}});
+
+let u1result = [
+ {
+ "_id": 1,
+ "title": "this is my title",
+ "author": "bob",
+ "posted": ISODate("2004-03-21T18:59:54Z"),
+ "pageViews": 5,
+ "tags": "fun",
+ "comments":
+ [{"author": "joe", "text": "this is cool"}, {"author": "sam", "text": "this is bad"}],
+ "other": {"foo": 5}
+ },
+ {
+ "_id": 1,
+ "title": "this is my title",
+ "author": "bob",
+ "posted": ISODate("2004-03-21T18:59:54Z"),
+ "pageViews": 5,
+ "tags": "good",
+ "comments":
+ [{"author": "joe", "text": "this is cool"}, {"author": "sam", "text": "this is bad"}],
+ "other": {"foo": 5}
+ },
+ {
+ "_id": 1,
+ "title": "this is my title",
+ "author": "bob",
+ "posted": ISODate("2004-03-21T18:59:54Z"),
+ "pageViews": 5,
+ "tags": "fun",
+ "comments":
+ [{"author": "joe", "text": "this is cool"}, {"author": "sam", "text": "this is bad"}],
+ "other": {"foo": 5}
+ },
+ {
+ "_id": 2,
+ "title": "this is your title",
+ "author": "dave",
+ "posted": ISODate("2030-08-08T04:11:10Z"),
+ "pageViews": 7,
+ "tags": "fun",
+ "comments": [
+ {"author": "barbara", "text": "this is interesting"},
+ {"author": "jenny", "text": "i like to play pinball", "votes": 10}
],
- cursor: {}
- });
-
- let p5result = [
- {"_id": 1, "author": "bob", "subDocument": {"foo": 5, "bar": "fun"}},
- {"_id": 1, "author": "bob", "subDocument": {"foo": 5, "bar": "good"}},
- {"_id": 1, "author": "bob", "subDocument": {"foo": 5, "bar": "fun"}},
- {"_id": 2, "author": "dave", "subDocument": {"foo": 7, "bar": "fun"}},
- {"_id": 2, "author": "dave", "subDocument": {"foo": 7, "bar": "nasty"}},
- {"_id": 3, "author": "jane", "subDocument": {"foo": 6, "bar": "nasty"}},
- {"_id": 3, "author": "jane", "subDocument": {"foo": 6, "bar": "filthy"}}
- ];
-
- firstBatch = p5.cursor.firstBatch;
- assert(arrayEq(firstBatch, p5result), tojson({got: firstBatch, expected: p5result}));
-
- // multi-step aggregate
- // nested expressions in computed fields
- let p6 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {$project: {author: 1, tags: 1, pageViews: 1}},
- {$unwind: "$tags"},
- {
- $project: {
- author: 1,
- tag: "$tags",
- pageViews: 1,
- daveWroteIt: {$eq: ["$author", "dave"]},
- weLikeIt: {$or: [{$eq: ["$author", "dave"]}, {$eq: ["$tags", "good"]}]}
- }
- }
+ "other": {"bar": 14}
+ },
+ {
+ "_id": 2,
+ "title": "this is your title",
+ "author": "dave",
+ "posted": ISODate("2030-08-08T04:11:10Z"),
+ "pageViews": 7,
+ "tags": "nasty",
+ "comments": [
+ {"author": "barbara", "text": "this is interesting"},
+ {"author": "jenny", "text": "i like to play pinball", "votes": 10}
],
- cursor: {}
- });
-
- let p6result = [
- {
- "_id": 1,
- "author": "bob",
- "pageViews": 5,
- "tag": "fun",
- "daveWroteIt": false,
- "weLikeIt": false
- },
- {
- "_id": 1,
- "author": "bob",
- "pageViews": 5,
- "tag": "good",
- "daveWroteIt": false,
- "weLikeIt": true
- },
- {
- "_id": 1,
- "author": "bob",
- "pageViews": 5,
- "tag": "fun",
- "daveWroteIt": false,
- "weLikeIt": false
- },
- {
- "_id": 2,
- "author": "dave",
- "pageViews": 7,
- "tag": "fun",
- "daveWroteIt": true,
- "weLikeIt": true
- },
- {
- "_id": 2,
- "author": "dave",
- "pageViews": 7,
- "tag": "nasty",
- "daveWroteIt": true,
- "weLikeIt": true
- },
- {
- "_id": 3,
- "author": "jane",
- "pageViews": 6,
- "tag": "nasty",
- "daveWroteIt": false,
- "weLikeIt": false
- },
+ "other": {"bar": 14}
+ },
+ {
+ "_id": 3,
+ "title": "this is some other title",
+ "author": "jane",
+ "posted": ISODate("2000-12-31T05:17:14Z"),
+ "pageViews": 6,
+ "tags": "nasty",
+ "comments": [
+ {"author": "will", "text": "i don't like the color"},
+ {"author": "jenny", "text": "can i get that in green?"}
+ ],
+ "other": {"bar": 14}
+ },
+ {
+ "_id": 3,
+ "title": "this is some other title",
+ "author": "jane",
+ "posted": ISODate("2000-12-31T05:17:14Z"),
+ "pageViews": 6,
+ "tags": "filthy",
+ "comments": [
+ {"author": "will", "text": "i don't like the color"},
+ {"author": "jenny", "text": "can i get that in green?"}
+ ],
+ "other": {"bar": 14}
+ }
+];
+
+let firstBatch = u1.cursor.firstBatch;
+assert(arrayEq(firstBatch, u1result), tojson({got: firstBatch, expected: u1result}));
+
+// unwind an array at the end of a dotted path
+testDB.ut.drop();
+assert.writeOK(testDB.ut.insert({_id: 4, a: 1, b: {e: 7, f: [4, 3, 2, 1]}, c: 12, d: 17}));
+let u2 = testDB.runCommand(
+ {aggregate: "ut", pipeline: [{$unwind: "$b.f"}, {$sort: {"b.f": -1}}], cursor: {}});
+
+let u2result = [
+ {"_id": 4, "a": 1, "b": {"e": 7, "f": 4}, "c": 12, "d": 17},
+ {"_id": 4, "a": 1, "b": {"e": 7, "f": 3}, "c": 12, "d": 17},
+ {"_id": 4, "a": 1, "b": {"e": 7, "f": 2}, "c": 12, "d": 17},
+ {"_id": 4, "a": 1, "b": {"e": 7, "f": 1}, "c": 12, "d": 17}
+];
+
+assert.docEq(u2.cursor.firstBatch, u2result, 'u2 failed');
+
+// combining a projection with unwinding an array
+let p2 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [{$project: {author: 1, tags: 1, pageViews: 1}}, {$unwind: "$tags"}],
+ cursor: {}
+});
+
+let p2result = [
+ {"_id": 1, "author": "bob", "pageViews": 5, "tags": "fun"},
+ {"_id": 1, "author": "bob", "pageViews": 5, "tags": "good"},
+ {"_id": 1, "author": "bob", "pageViews": 5, "tags": "fun"},
+ {"_id": 2, "author": "dave", "pageViews": 7, "tags": "fun"},
+ {"_id": 2, "author": "dave", "pageViews": 7, "tags": "nasty"},
+ {"_id": 3, "author": "jane", "pageViews": 6, "tags": "nasty"},
+ {"_id": 3, "author": "jane", "pageViews": 6, "tags": "filthy"}
+];
+
+firstBatch = p2.cursor.firstBatch;
+assert(arrayEq(firstBatch, p2result), tojson({got: firstBatch, expected: p2result}));
+
+// pulling values out of subdocuments
+let p3 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [{$project: {otherfoo: "$other.foo", otherbar: "$other.bar"}}, {$sort: {_id: 1}}],
+ cursor: {}
+});
+
+let p3result = [{"_id": 1, "otherfoo": 5}, {"_id": 2, "otherbar": 14}, {"_id": 3, "otherbar": 14}];
+
+assert.docEq(p3.cursor.firstBatch, p3result, 'p3 failed');
+
+// projection includes a computed value
+let p4 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [{$project: {author: 1, daveWroteIt: {$eq: ["$author", "dave"]}}}, {$sort: {_id: 1}}],
+ cursor: {}
+});
+
+let p4result = [
+ {"_id": 1, "author": "bob", "daveWroteIt": false},
+ {"_id": 2, "author": "dave", "daveWroteIt": true},
+ {"_id": 3, "author": "jane", "daveWroteIt": false}
+];
+
+assert.docEq(p4.cursor.firstBatch, p4result, 'p4 failed');
+
+// projection includes a virtual (fabricated) document
+let p5 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$project: {author: 1, pageViews: 1, tags: 1}},
+ {$unwind: "$tags"},
+ {$project: {author: 1, subDocument: {foo: "$pageViews", bar: "$tags"}}}
+ ],
+ cursor: {}
+});
+
+let p5result = [
+ {"_id": 1, "author": "bob", "subDocument": {"foo": 5, "bar": "fun"}},
+ {"_id": 1, "author": "bob", "subDocument": {"foo": 5, "bar": "good"}},
+ {"_id": 1, "author": "bob", "subDocument": {"foo": 5, "bar": "fun"}},
+ {"_id": 2, "author": "dave", "subDocument": {"foo": 7, "bar": "fun"}},
+ {"_id": 2, "author": "dave", "subDocument": {"foo": 7, "bar": "nasty"}},
+ {"_id": 3, "author": "jane", "subDocument": {"foo": 6, "bar": "nasty"}},
+ {"_id": 3, "author": "jane", "subDocument": {"foo": 6, "bar": "filthy"}}
+];
+
+firstBatch = p5.cursor.firstBatch;
+assert(arrayEq(firstBatch, p5result), tojson({got: firstBatch, expected: p5result}));
+
+// multi-step aggregate
+// nested expressions in computed fields
+let p6 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$project: {author: 1, tags: 1, pageViews: 1}},
+ {$unwind: "$tags"},
{
- "_id": 3,
- "author": "jane",
- "pageViews": 6,
- "tag": "filthy",
- "daveWroteIt": false,
- "weLikeIt": false
+ $project: {
+ author: 1,
+ tag: "$tags",
+ pageViews: 1,
+ daveWroteIt: {$eq: ["$author", "dave"]},
+ weLikeIt: {$or: [{$eq: ["$author", "dave"]}, {$eq: ["$tags", "good"]}]}
+ }
}
- ];
-
- firstBatch = p6.cursor.firstBatch;
- assert(arrayEq(firstBatch, p6result), tojson({got: firstBatch, expected: p6result}));
-
- // slightly more complex computed expression; $ifNull
- let p7 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {$project: {theSum: {$add: ["$pageViews", {$ifNull: ["$other.foo", "$other.bar"]}]}}},
- {$sort: {_id: 1}}
+ ],
+ cursor: {}
+});
+
+let p6result = [
+ {
+ "_id": 1,
+ "author": "bob",
+ "pageViews": 5,
+ "tag": "fun",
+ "daveWroteIt": false,
+ "weLikeIt": false
+ },
+ {
+ "_id": 1,
+ "author": "bob",
+ "pageViews": 5,
+ "tag": "good",
+ "daveWroteIt": false,
+ "weLikeIt": true
+ },
+ {
+ "_id": 1,
+ "author": "bob",
+ "pageViews": 5,
+ "tag": "fun",
+ "daveWroteIt": false,
+ "weLikeIt": false
+ },
+ {
+ "_id": 2,
+ "author": "dave",
+ "pageViews": 7,
+ "tag": "fun",
+ "daveWroteIt": true,
+ "weLikeIt": true
+ },
+ {
+ "_id": 2,
+ "author": "dave",
+ "pageViews": 7,
+ "tag": "nasty",
+ "daveWroteIt": true,
+ "weLikeIt": true
+ },
+ {
+ "_id": 3,
+ "author": "jane",
+ "pageViews": 6,
+ "tag": "nasty",
+ "daveWroteIt": false,
+ "weLikeIt": false
+ },
+ {
+ "_id": 3,
+ "author": "jane",
+ "pageViews": 6,
+ "tag": "filthy",
+ "daveWroteIt": false,
+ "weLikeIt": false
+ }
+];
+
+firstBatch = p6.cursor.firstBatch;
+assert(arrayEq(firstBatch, p6result), tojson({got: firstBatch, expected: p6result}));
+
+// slightly more complex computed expression; $ifNull
+let p7 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$project: {theSum: {$add: ["$pageViews", {$ifNull: ["$other.foo", "$other.bar"]}]}}},
+ {$sort: {_id: 1}}
+ ],
+ cursor: {}
+});
+
+let p7result = [{"_id": 1, "theSum": 10}, {"_id": 2, "theSum": 21}, {"_id": 3, "theSum": 20}];
+
+assert.docEq(p7.cursor.firstBatch, p7result, 'p7 failed');
+
+// dotted path inclusion; _id exclusion
+let p8 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [{$project: {_id: 0, author: 1, tags: 1, "comments.author": 1}}, {$unwind: "$tags"}],
+ cursor: {}
+});
+
+let p8result = [
+ {"author": "bob", "tags": "fun", "comments": [{"author": "joe"}, {"author": "sam"}]},
+ {"author": "bob", "tags": "good", "comments": [{"author": "joe"}, {"author": "sam"}]},
+ {"author": "bob", "tags": "fun", "comments": [{"author": "joe"}, {"author": "sam"}]},
+ {"author": "dave", "tags": "fun", "comments": [{"author": "barbara"}, {"author": "jenny"}]},
+ {"author": "dave", "tags": "nasty", "comments": [{"author": "barbara"}, {"author": "jenny"}]},
+ {"author": "jane", "tags": "nasty", "comments": [{"author": "will"}, {"author": "jenny"}]},
+ {"author": "jane", "tags": "filthy", "comments": [{"author": "will"}, {"author": "jenny"}]}
+];
+
+firstBatch = p8.cursor.firstBatch;
+assert(arrayEq(firstBatch, p8result), tojson({got: firstBatch, expected: p8result}));
+
+// collapse a dotted path with an intervening array
+let p9 = testDB.runCommand({
+ aggregate: "article",
+ pipeline:
+ [{$project: {_id: 0, author: 1, commentsAuthor: "$comments.author"}}, {$sort: {author: 1}}],
+ cursor: {}
+});
+
+let p9result = [
+ {"author": "bob", "commentsAuthor": ["joe", "sam"]},
+ {"author": "dave", "commentsAuthor": ["barbara", "jenny"]},
+ {"author": "jane", "commentsAuthor": ["will", "jenny"]}
+];
+
+assert.docEq(p9.cursor.firstBatch, p9result, 'p9 failed');
+
+// simple sort
+let p10 = testDB.runCommand({aggregate: "article", pipeline: [{$sort: {title: 1}}], cursor: {}});
+
+let p10result = [
+ {
+ "_id": 1,
+ "title": "this is my title",
+ "author": "bob",
+ "posted": ISODate("2004-03-21T18:59:54Z"),
+ "pageViews": 5,
+ "tags": ["fun", "good", "fun"],
+ "comments":
+ [{"author": "joe", "text": "this is cool"}, {"author": "sam", "text": "this is bad"}],
+ "other": {"foo": 5}
+ },
+ {
+ "_id": 3,
+ "title": "this is some other title",
+ "author": "jane",
+ "posted": ISODate("2000-12-31T05:17:14Z"),
+ "pageViews": 6,
+ "tags": ["nasty", "filthy"],
+ "comments": [
+ {"author": "will", "text": "i don't like the color"},
+ {"author": "jenny", "text": "can i get that in green?"}
],
- cursor: {}
- });
-
- let p7result = [{"_id": 1, "theSum": 10}, {"_id": 2, "theSum": 21}, {"_id": 3, "theSum": 20}];
-
- assert.docEq(p7.cursor.firstBatch, p7result, 'p7 failed');
-
- // dotted path inclusion; _id exclusion
- let p8 = testDB.runCommand({
- aggregate: "article",
- pipeline:
- [{$project: {_id: 0, author: 1, tags: 1, "comments.author": 1}}, {$unwind: "$tags"}],
- cursor: {}
- });
-
- let p8result = [
- {"author": "bob", "tags": "fun", "comments": [{"author": "joe"}, {"author": "sam"}]},
- {"author": "bob", "tags": "good", "comments": [{"author": "joe"}, {"author": "sam"}]},
- {"author": "bob", "tags": "fun", "comments": [{"author": "joe"}, {"author": "sam"}]},
- {"author": "dave", "tags": "fun", "comments": [{"author": "barbara"}, {"author": "jenny"}]},
+ "other": {"bar": 14}
+ },
+ {
+ "_id": 2,
+ "title": "this is your title",
+ "author": "dave",
+ "posted": ISODate("2030-08-08T04:11:10Z"),
+ "pageViews": 7,
+ "tags": ["fun", "nasty"],
+ "comments": [
+ {"author": "barbara", "text": "this is interesting"},
+ {"author": "jenny", "text": "i like to play pinball", "votes": 10}
+ ],
+ "other": {"bar": 14}
+ }
+];
+
+assert.docEq(p10.cursor.firstBatch, p10result, 'p10 failed');
+
+// unwind on nested array
+testDB.p11.drop();
+testDB.p11.save({
+ _id: 5,
+ name: 'MongoDB',
+ items: {authors: ['jay', 'vivek', 'bjornar'], dbg: [17, 42]},
+ favorites: ['pickles', 'ice cream', 'kettle chips']
+});
+
+let p11 = testDB.runCommand({
+ aggregate: "p11",
+ pipeline: [
+ {$unwind: "$items.authors"},
+ {$project: {name: 1, author: "$items.authors"}},
+ {$sort: {author: 1}}
+
+ ],
+ cursor: {}
+});
+
+let p11result = [
+ {"_id": 5, "name": "MongoDB", "author": "bjornar"},
+ {"_id": 5, "name": "MongoDB", "author": "jay"},
+ {"_id": 5, "name": "MongoDB", "author": "vivek"},
+];
+
+assert.docEq(p11.cursor.firstBatch, p11result, 'p11 failed');
+
+// multiply test
+let p12 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
{
- "author": "dave",
- "tags": "nasty",
- "comments": [{"author": "barbara"}, {"author": "jenny"}]
+ $project:
+ {theProduct: {$multiply: ["$pageViews", {$ifNull: ["$other.foo", "$other.bar"]}]}},
},
- {"author": "jane", "tags": "nasty", "comments": [{"author": "will"}, {"author": "jenny"}]},
- {
- "author": "jane",
- "tags": "filthy",
- "comments": [{"author": "will"}, {"author": "jenny"}]
- }
- ];
+ {$sort: {_id: 1}}
+ ],
+ cursor: {}
+});
- firstBatch = p8.cursor.firstBatch;
- assert(arrayEq(firstBatch, p8result), tojson({got: firstBatch, expected: p8result}));
+let p12result =
+ [{"_id": 1, "theProduct": 25}, {"_id": 2, "theProduct": 98}, {"_id": 3, "theProduct": 84}];
- // collapse a dotted path with an intervening array
- let p9 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {$project: {_id: 0, author: 1, commentsAuthor: "$comments.author"}},
- {$sort: {author: 1}}
- ],
- cursor: {}
- });
-
- let p9result = [
- {"author": "bob", "commentsAuthor": ["joe", "sam"]},
- {"author": "dave", "commentsAuthor": ["barbara", "jenny"]},
- {"author": "jane", "commentsAuthor": ["will", "jenny"]}
- ];
-
- assert.docEq(p9.cursor.firstBatch, p9result, 'p9 failed');
+assert.docEq(p12.cursor.firstBatch, p12result, 'p12 failed');
- // simple sort
- let p10 =
- testDB.runCommand({aggregate: "article", pipeline: [{$sort: {title: 1}}], cursor: {}});
-
- let p10result = [
+// subtraction test
+let p13 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
{
- "_id": 1,
- "title": "this is my title",
- "author": "bob",
- "posted": ISODate("2004-03-21T18:59:54Z"),
- "pageViews": 5,
- "tags": ["fun", "good", "fun"],
- "comments":
- [{"author": "joe", "text": "this is cool"}, {"author": "sam", "text": "this is bad"}],
- "other": {"foo": 5}
+ $project: {
+ theDifference:
+ {$subtract: ["$pageViews", {$ifNull: ["$other.foo", "$other.bar"]}]}
+ }
},
+ {$sort: {_id: 1}}
+ ],
+ cursor: {}
+});
+
+let p13result = [
+ {"_id": 1, "theDifference": 0},
+ {"_id": 2, "theDifference": -7},
+ {"_id": 3, "theDifference": -8}
+];
+
+assert.docEq(p13.cursor.firstBatch, p13result, 'p13 failed');
+
+// mod test
+let p14 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
{
- "_id": 3,
- "title": "this is some other title",
- "author": "jane",
- "posted": ISODate("2000-12-31T05:17:14Z"),
- "pageViews": 6,
- "tags": ["nasty", "filthy"],
- "comments": [
- {"author": "will", "text": "i don't like the color"},
- {"author": "jenny", "text": "can i get that in green?"}
- ],
- "other": {"bar": 14}
+ $project: {
+ theRemainder: {
+ $mod: [
+ {$ifNull: ["$other.foo", "$other.bar"]},
+ "$pageViews",
+ ]
+ }
+ }
},
+ {$sort: {_id: 1}}
+ ],
+ cursor: {}
+});
+
+let p14result =
+ [{"_id": 1, "theRemainder": 0}, {"_id": 2, "theRemainder": 0}, {"_id": 3, "theRemainder": 2}];
+
+assert.docEq(p14.cursor.firstBatch, p14result, 'p14 failed');
+
+// toUpper test
+let p15 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [{$project: {author: {$toUpper: "$author"}, pageViews: 1}}, {$sort: {_id: 1}}],
+ cursor: {}
+});
+
+let p15result = [
+ {"_id": 1, "author": "BOB", "pageViews": 5},
+ {"_id": 2, "author": "DAVE", "pageViews": 7},
+ {"_id": 3, "author": "JANE", "pageViews": 6}
+];
+
+assert.docEq(p15.cursor.firstBatch, p15result, 'p15 failed');
+
+// toLower test
+let p16 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$project: {author: {$toUpper: "$author"}, pageViews: 1}},
+ {$project: {author: {$toLower: "$author"}, pageViews: 1}},
+ {$sort: {_id: 1}}
+ ],
+ cursor: {}
+});
+
+let p16result = [
+ {
+ "_id": 1,
+ "author": "bob",
+ "pageViews": 5,
+ },
+ {
+ "_id": 2,
+ "author": "dave",
+ "pageViews": 7,
+ },
+ {
+ "_id": 3,
+ "author": "jane",
+ "pageViews": 6,
+ }
+];
+
+assert.docEq(p16.cursor.firstBatch, p16result, 'p16 failed');
+
+// substr test
+let p17 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
{
- "_id": 2,
- "title": "this is your title",
- "author": "dave",
- "posted": ISODate("2030-08-08T04:11:10Z"),
- "pageViews": 7,
- "tags": ["fun", "nasty"],
- "comments": [
- {"author": "barbara", "text": "this is interesting"},
- {"author": "jenny", "text": "i like to play pinball", "votes": 10}
- ],
- "other": {"bar": 14}
- }
- ];
-
- assert.docEq(p10.cursor.firstBatch, p10result, 'p10 failed');
-
- // unwind on nested array
- testDB.p11.drop();
- testDB.p11.save({
- _id: 5,
- name: 'MongoDB',
- items: {authors: ['jay', 'vivek', 'bjornar'], dbg: [17, 42]},
- favorites: ['pickles', 'ice cream', 'kettle chips']
- });
-
- let p11 = testDB.runCommand({
- aggregate: "p11",
- pipeline: [
- {$unwind: "$items.authors"},
- {$project: {name: 1, author: "$items.authors"}},
- {$sort: {author: 1}}
+ $project: {
+ author: {$substrBytes: ["$author", 1, 2]},
+ }
+ },
+ {$sort: {_id: 1}}
+ ],
+ cursor: {}
+});
- ],
- cursor: {}
- });
-
- let p11result = [
- {"_id": 5, "name": "MongoDB", "author": "bjornar"},
- {"_id": 5, "name": "MongoDB", "author": "jay"},
- {"_id": 5, "name": "MongoDB", "author": "vivek"},
- ];
-
- assert.docEq(p11.cursor.firstBatch, p11result, 'p11 failed');
-
- // multiply test
- let p12 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {
- $project: {
- theProduct: {$multiply: ["$pageViews", {$ifNull: ["$other.foo", "$other.bar"]}]}
- },
- },
- {$sort: {_id: 1}}
- ],
- cursor: {}
- });
-
- let p12result =
- [{"_id": 1, "theProduct": 25}, {"_id": 2, "theProduct": 98}, {"_id": 3, "theProduct": 84}];
-
- assert.docEq(p12.cursor.firstBatch, p12result, 'p12 failed');
-
- // subtraction test
- let p13 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {
- $project: {
- theDifference:
- {$subtract: ["$pageViews", {$ifNull: ["$other.foo", "$other.bar"]}]}
- }
- },
- {$sort: {_id: 1}}
- ],
- cursor: {}
- });
-
- let p13result = [
- {"_id": 1, "theDifference": 0},
- {"_id": 2, "theDifference": -7},
- {"_id": 3, "theDifference": -8}
- ];
-
- assert.docEq(p13.cursor.firstBatch, p13result, 'p13 failed');
-
- // mod test
- let p14 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {
- $project: {
- theRemainder: {
- $mod: [
- {$ifNull: ["$other.foo", "$other.bar"]},
- "$pageViews",
- ]
- }
- }
- },
- {$sort: {_id: 1}}
- ],
- cursor: {}
- });
-
- let p14result = [
- {"_id": 1, "theRemainder": 0},
- {"_id": 2, "theRemainder": 0},
- {"_id": 3, "theRemainder": 2}
- ];
-
- assert.docEq(p14.cursor.firstBatch, p14result, 'p14 failed');
-
- // toUpper test
- let p15 = testDB.runCommand({
- aggregate: "article",
- pipeline: [{$project: {author: {$toUpper: "$author"}, pageViews: 1}}, {$sort: {_id: 1}}],
- cursor: {}
- });
-
- let p15result = [
- {"_id": 1, "author": "BOB", "pageViews": 5},
- {"_id": 2, "author": "DAVE", "pageViews": 7},
- {"_id": 3, "author": "JANE", "pageViews": 6}
- ];
-
- assert.docEq(p15.cursor.firstBatch, p15result, 'p15 failed');
-
- // toLower test
- let p16 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {$project: {author: {$toUpper: "$author"}, pageViews: 1}},
- {$project: {author: {$toLower: "$author"}, pageViews: 1}},
- {$sort: {_id: 1}}
- ],
- cursor: {}
- });
+let p17result =
+ [{"_id": 1, "author": "ob"}, {"_id": 2, "author": "av"}, {"_id": 3, "author": "an"}];
- let p16result = [
- {
- "_id": 1,
- "author": "bob",
- "pageViews": 5,
- },
- {
- "_id": 2,
- "author": "dave",
- "pageViews": 7,
- },
- {
- "_id": 3,
- "author": "jane",
- "pageViews": 6,
- }
- ];
-
- assert.docEq(p16.cursor.firstBatch, p16result, 'p16 failed');
-
- // substr test
- let p17 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {
- $project: {
- author: {$substrBytes: ["$author", 1, 2]},
- }
- },
- {$sort: {_id: 1}}
- ],
- cursor: {}
- });
-
- let p17result =
- [{"_id": 1, "author": "ob"}, {"_id": 2, "author": "av"}, {"_id": 3, "author": "an"}];
-
- assert.docEq(p17.cursor.firstBatch, p17result, 'p17 failed');
-
- // strcasecmp test
- let p18 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {
- $project: {
- tags: 1,
- thisisalametest: {$strcasecmp: ["foo", "bar"]},
- thisisalamepass: {$strcasecmp: ["foo", "foo"]}
- }
- },
- {$sort: {_id: 1}}
- ],
- cursor: {}
- });
-
- let p18result = [
- {"_id": 1, "tags": ["fun", "good", "fun"], "thisisalametest": 1, "thisisalamepass": 0},
- {"_id": 2, "tags": ["fun", "nasty"], "thisisalametest": 1, "thisisalamepass": 0},
- {"_id": 3, "tags": ["nasty", "filthy"], "thisisalametest": 1, "thisisalamepass": 0}
- ];
-
- assert.docEq(p18.cursor.firstBatch, p18result, 'p18 failed');
-
- // date tests
- let p19 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {
- $project: {
- authors: 1,
- posted: 1,
- seconds: {$second: "$posted"},
- minutes: {$minute: "$posted"},
- hour: {$hour: "$posted"},
- dayOfYear: {$dayOfYear: "$posted"},
- dayOfMonth: {$dayOfMonth: "$posted"},
- dayOfWeek: {$dayOfWeek: "$posted"},
- month: {$month: "$posted"},
- week: {$week: "$posted"},
- year: {$year: "$posted"}
- }
- },
- {$sort: {_id: 1}}
- ],
- cursor: {}
- });
+assert.docEq(p17.cursor.firstBatch, p17result, 'p17 failed');
- let p19result = [
+// strcasecmp test
+let p18 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
{
- "_id": 1,
- "posted": ISODate("2004-03-21T18:59:54Z"),
- "seconds": 54,
- "minutes": 59,
- "hour": 18,
- "dayOfYear": 81,
- "dayOfMonth": 21,
- "dayOfWeek": 1,
- "month": 3,
- "week": 12,
- "year": 2004,
+ $project: {
+ tags: 1,
+ thisisalametest: {$strcasecmp: ["foo", "bar"]},
+ thisisalamepass: {$strcasecmp: ["foo", "foo"]}
+ }
},
+ {$sort: {_id: 1}}
+ ],
+ cursor: {}
+});
+
+let p18result = [
+ {"_id": 1, "tags": ["fun", "good", "fun"], "thisisalametest": 1, "thisisalamepass": 0},
+ {"_id": 2, "tags": ["fun", "nasty"], "thisisalametest": 1, "thisisalamepass": 0},
+ {"_id": 3, "tags": ["nasty", "filthy"], "thisisalametest": 1, "thisisalamepass": 0}
+];
+
+assert.docEq(p18.cursor.firstBatch, p18result, 'p18 failed');
+
+// date tests
+let p19 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
{
- "_id": 2,
- "posted": ISODate("2030-08-08T04:11:10Z"),
- "seconds": 10,
- "minutes": 11,
- "hour": 4,
- "dayOfYear": 220,
- "dayOfMonth": 8,
- "dayOfWeek": 5,
- "month": 8,
- "week": 31,
- "year": 2030,
+ $project: {
+ authors: 1,
+ posted: 1,
+ seconds: {$second: "$posted"},
+ minutes: {$minute: "$posted"},
+ hour: {$hour: "$posted"},
+ dayOfYear: {$dayOfYear: "$posted"},
+ dayOfMonth: {$dayOfMonth: "$posted"},
+ dayOfWeek: {$dayOfWeek: "$posted"},
+ month: {$month: "$posted"},
+ week: {$week: "$posted"},
+ year: {$year: "$posted"}
+ }
},
+ {$sort: {_id: 1}}
+ ],
+ cursor: {}
+});
+
+let p19result = [
+ {
+ "_id": 1,
+ "posted": ISODate("2004-03-21T18:59:54Z"),
+ "seconds": 54,
+ "minutes": 59,
+ "hour": 18,
+ "dayOfYear": 81,
+ "dayOfMonth": 21,
+ "dayOfWeek": 1,
+ "month": 3,
+ "week": 12,
+ "year": 2004,
+ },
+ {
+ "_id": 2,
+ "posted": ISODate("2030-08-08T04:11:10Z"),
+ "seconds": 10,
+ "minutes": 11,
+ "hour": 4,
+ "dayOfYear": 220,
+ "dayOfMonth": 8,
+ "dayOfWeek": 5,
+ "month": 8,
+ "week": 31,
+ "year": 2030,
+ },
+ {
+ "_id": 3,
+ "posted": ISODate("2000-12-31T05:17:14Z"),
+ "seconds": 14,
+ "minutes": 17,
+ "hour": 5,
+ "dayOfYear": 366,
+ "dayOfMonth": 31,
+ "dayOfWeek": 1,
+ "month": 12,
+ "week": 53,
+ "year": 2000,
+ }
+];
+
+assert.docEq(p19.cursor.firstBatch, p19result, 'p19 failed');
+
+testDB.lettype.drop();
+testDB.lettype.save({x: 17, y: "foo"});
+
+// ternary conditional operator
+let p21 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
{
- "_id": 3,
- "posted": ISODate("2000-12-31T05:17:14Z"),
- "seconds": 14,
- "minutes": 17,
- "hour": 5,
- "dayOfYear": 366,
- "dayOfMonth": 31,
- "dayOfWeek": 1,
- "month": 12,
- "week": 53,
- "year": 2000,
- }
- ];
-
- assert.docEq(p19.cursor.firstBatch, p19result, 'p19 failed');
-
- testDB.lettype.drop();
- testDB.lettype.save({x: 17, y: "foo"});
-
- // ternary conditional operator
- let p21 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {
- $project: {
- _id: 0,
- author: 1,
- pageViews: {
- $cond: [
- {$eq: ["$author", "dave"]},
- {$add: ["$pageViews", 1000]},
- "$pageViews"
- ]
- }
- }
- },
- {$sort: {author: 1}}
- ],
- cursor: {}
- });
-
- let p21result = [
- {"author": "bob", "pageViews": 5},
- {"author": "dave", "pageViews": 1007},
- {"author": "jane", "pageViews": 6}
- ];
-
- assert.docEq(p21.cursor.firstBatch, p21result, 'p21 failed');
-
- // simple matching
- let m1 = testDB.runCommand(
- {aggregate: "article", pipeline: [{$match: {author: "dave"}}], cursor: {}});
-
- let m1result = [{
+ $project: {
+ _id: 0,
+ author: 1,
+ pageViews: {
+ $cond:
+ [{$eq: ["$author", "dave"]}, {$add: ["$pageViews", 1000]}, "$pageViews"]
+ }
+ }
+ },
+ {$sort: {author: 1}}
+ ],
+ cursor: {}
+});
+
+let p21result = [
+ {"author": "bob", "pageViews": 5},
+ {"author": "dave", "pageViews": 1007},
+ {"author": "jane", "pageViews": 6}
+];
+
+assert.docEq(p21.cursor.firstBatch, p21result, 'p21 failed');
+
+// simple matching
+let m1 =
+ testDB.runCommand({aggregate: "article", pipeline: [{$match: {author: "dave"}}], cursor: {}});
+
+let m1result = [{
+ "_id": 2,
+ "title": "this is your title",
+ "author": "dave",
+ "posted": ISODate("2030-08-08T04:11:10Z"),
+ "pageViews": 7,
+ "tags": ["fun", "nasty"],
+ "comments": [
+ {"author": "barbara", "text": "this is interesting"},
+ {"author": "jenny", "text": "i like to play pinball", "votes": 10}
+ ],
+ "other": {"bar": 14}
+}];
+
+assert.docEq(m1.cursor.firstBatch, m1result, 'm1 failed');
+
+// combining matching with a projection
+let m2 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$project: {title: 1, author: 1, pageViews: 1, tags: 1, comments: 1}},
+ {$unwind: "$tags"},
+ {$match: {tags: "nasty"}},
+ {$sort: {_id: 1}}
+ ],
+ cursor: {}
+});
+
+let m2result = [
+ {
"_id": 2,
"title": "this is your title",
"author": "dave",
- "posted": ISODate("2030-08-08T04:11:10Z"),
"pageViews": 7,
- "tags": ["fun", "nasty"],
+ "tags": "nasty",
"comments": [
{"author": "barbara", "text": "this is interesting"},
{"author": "jenny", "text": "i like to play pinball", "votes": 10}
- ],
- "other": {"bar": 14}
- }];
-
- assert.docEq(m1.cursor.firstBatch, m1result, 'm1 failed');
-
- // combining matching with a projection
- let m2 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {$project: {title: 1, author: 1, pageViews: 1, tags: 1, comments: 1}},
- {$unwind: "$tags"},
- {$match: {tags: "nasty"}},
- {$sort: {_id: 1}}
- ],
- cursor: {}
- });
-
- let m2result = [
+ ]
+ },
+ {
+ "_id": 3,
+ "title": "this is some other title",
+ "author": "jane",
+ "pageViews": 6,
+ "tags": "nasty",
+ "comments": [
+ {"author": "will", "text": "i don't like the color"},
+ {"author": "jenny", "text": "can i get that in green?"}
+ ]
+ }
+];
+
+assert.docEq(m2.cursor.firstBatch, m2result, 'm2 failed');
+
+// group by tag, _id is a field reference
+let g1 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$project: {author: 1, tags: 1, pageViews: 1}},
+ {$unwind: "$tags"},
+ {$group: {_id: "$tags", docsByTag: {$sum: 1}, viewsByTag: {$sum: "$pageViews"}}},
+ {$sort: {'_id': 1}}
+ ],
+ cursor: {}
+});
+
+let g1result = [
+ {"_id": "filthy", "docsByTag": 1, "viewsByTag": 6},
+ {"_id": "fun", "docsByTag": 3, "viewsByTag": 17},
+ {"_id": "good", "docsByTag": 1, "viewsByTag": 5},
+ {"_id": "nasty", "docsByTag": 2, "viewsByTag": 13},
+];
+
+assert.docEq(g1.cursor.firstBatch, g1result, 'g1 failed');
+
+// $max, and averaging in a final projection; _id is structured
+let g2 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$project: {author: 1, tags: 1, pageViews: 1}},
+ {$unwind: "$tags"},
{
- "_id": 2,
- "title": "this is your title",
- "author": "dave",
- "pageViews": 7,
- "tags": "nasty",
- "comments": [
- {"author": "barbara", "text": "this is interesting"},
- {"author": "jenny", "text": "i like to play pinball", "votes": 10}
- ]
+ $group: {
+ _id: {tags: "$tags"},
+ docsByTag: {$sum: 1},
+ viewsByTag: {$sum: "$pageViews"},
+ mostViewsByTag: {$max: "$pageViews"},
+ }
},
{
- "_id": 3,
- "title": "this is some other title",
- "author": "jane",
- "pageViews": 6,
- "tags": "nasty",
- "comments": [
- {"author": "will", "text": "i don't like the color"},
- {"author": "jenny", "text": "can i get that in green?"}
- ]
- }
- ];
-
- assert.docEq(m2.cursor.firstBatch, m2result, 'm2 failed');
-
- // group by tag, _id is a field reference
- let g1 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {$project: {author: 1, tags: 1, pageViews: 1}},
- {$unwind: "$tags"},
- {$group: {_id: "$tags", docsByTag: {$sum: 1}, viewsByTag: {$sum: "$pageViews"}}},
- {$sort: {'_id': 1}}
- ],
- cursor: {}
- });
-
- let g1result = [
- {"_id": "filthy", "docsByTag": 1, "viewsByTag": 6},
- {"_id": "fun", "docsByTag": 3, "viewsByTag": 17},
- {"_id": "good", "docsByTag": 1, "viewsByTag": 5},
- {"_id": "nasty", "docsByTag": 2, "viewsByTag": 13},
- ];
-
- assert.docEq(g1.cursor.firstBatch, g1result, 'g1 failed');
-
- // $max, and averaging in a final projection; _id is structured
- let g2 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {$project: {author: 1, tags: 1, pageViews: 1}},
- {$unwind: "$tags"},
- {
- $group: {
- _id: {tags: "$tags"},
- docsByTag: {$sum: 1},
- viewsByTag: {$sum: "$pageViews"},
- mostViewsByTag: {$max: "$pageViews"},
- }
- },
- {
- $project: {
- _id: false,
- tag: "$_id.tags",
- mostViewsByTag: 1,
- docsByTag: 1,
- viewsByTag: 1,
- avgByTag: {$divide: ["$viewsByTag", "$docsByTag"]}
- }
- },
- {$sort: {'docsByTag': 1, 'viewsByTag': 1}}
- ],
- cursor: {}
- });
-
- let g2result = [
- {"docsByTag": 1, "viewsByTag": 5, "mostViewsByTag": 5, "tag": "good", "avgByTag": 5},
- {"docsByTag": 1, "viewsByTag": 6, "mostViewsByTag": 6, "tag": "filthy", "avgByTag": 6},
- {"docsByTag": 2, "viewsByTag": 13, "mostViewsByTag": 7, "tag": "nasty", "avgByTag": 6.5},
+ $project: {
+ _id: false,
+ tag: "$_id.tags",
+ mostViewsByTag: 1,
+ docsByTag: 1,
+ viewsByTag: 1,
+ avgByTag: {$divide: ["$viewsByTag", "$docsByTag"]}
+ }
+ },
+ {$sort: {'docsByTag': 1, 'viewsByTag': 1}}
+ ],
+ cursor: {}
+});
+
+let g2result = [
+ {"docsByTag": 1, "viewsByTag": 5, "mostViewsByTag": 5, "tag": "good", "avgByTag": 5},
+ {"docsByTag": 1, "viewsByTag": 6, "mostViewsByTag": 6, "tag": "filthy", "avgByTag": 6},
+ {"docsByTag": 2, "viewsByTag": 13, "mostViewsByTag": 7, "tag": "nasty", "avgByTag": 6.5},
+ {
+ "docsByTag": 3,
+ "viewsByTag": 17,
+ "mostViewsByTag": 7,
+ "tag": "fun",
+ "avgByTag": 5.666666666666667
+ }
+];
+
+assert.docEq(g2.cursor.firstBatch, g2result, 'g2 failed');
+
+// $push as an accumulator; can pivot data
+let g3 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
{
- "docsByTag": 3,
- "viewsByTag": 17,
- "mostViewsByTag": 7,
- "tag": "fun",
- "avgByTag": 5.666666666666667
- }
- ];
-
- assert.docEq(g2.cursor.firstBatch, g2result, 'g2 failed');
-
- // $push as an accumulator; can pivot data
- let g3 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {
- $project: {
- author: 1,
- tags: 1,
- }
- },
- {$unwind: "$tags"},
- {$sort: {author: 1}},
- {$group: {_id: {tags: "$tags"}, authors: {$push: "$author"}}},
- {$sort: {'_id': 1}}
- ],
- cursor: {}
- });
-
- let g3result = [
- {"_id": {"tags": "filthy"}, "authors": ["jane"]},
- {"_id": {"tags": "fun"}, "authors": ["bob", "bob", "dave"]},
- {"_id": {"tags": "good"}, "authors": ["bob"]},
- {"_id": {"tags": "nasty"}, "authors": ["dave", "jane"]}
- ];
-
- assert.docEq(g3.cursor.firstBatch, g3result, 'g3 failed');
-
- // $avg, and averaging in a final projection
- let g4 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {$project: {author: 1, tags: 1, pageViews: 1}},
- {$unwind: "$tags"},
- {
- $group: {
- _id: {tags: "$tags"},
- docsByTag: {$sum: 1},
- viewsByTag: {$sum: "$pageViews"},
- avgByTag: {$avg: "$pageViews"},
- }
- },
- {$sort: {'_id': 1}}
- ],
- cursor: {}
- });
-
- let g4result = [
- {"_id": {"tags": "filthy"}, "docsByTag": 1, "viewsByTag": 6, "avgByTag": 6},
- {"_id": {"tags": "fun"}, "docsByTag": 3, "viewsByTag": 17, "avgByTag": 5.666666666666667},
- {"_id": {"tags": "good"}, "docsByTag": 1, "viewsByTag": 5, "avgByTag": 5},
- {"_id": {"tags": "nasty"}, "docsByTag": 2, "viewsByTag": 13, "avgByTag": 6.5}
- ];
-
- assert.docEq(g4.cursor.firstBatch, g4result, 'g4 failed');
-
- // $addToSet as an accumulator; can pivot data
- let g5 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {
- $project: {
- author: 1,
- tags: 1,
- }
- },
- {$unwind: "$tags"},
- {$group: {_id: {tags: "$tags"}, authors: {$addToSet: "$author"}}},
- {$sort: {'_id': 1}}
- ],
- cursor: {}
- });
-
- // $addToSet doesn't guarantee order so we shouldn't test for it.
- g5.cursor.firstBatch.forEach(function(obj) {
- obj.authors.sort();
- });
-
- let g5result = [
- {"_id": {"tags": "filthy"}, "authors": ["jane"]},
+ $project: {
+ author: 1,
+ tags: 1,
+ }
+ },
+ {$unwind: "$tags"},
+ {$sort: {author: 1}},
+ {$group: {_id: {tags: "$tags"}, authors: {$push: "$author"}}},
+ {$sort: {'_id': 1}}
+ ],
+ cursor: {}
+});
+
+let g3result = [
+ {"_id": {"tags": "filthy"}, "authors": ["jane"]},
+ {"_id": {"tags": "fun"}, "authors": ["bob", "bob", "dave"]},
+ {"_id": {"tags": "good"}, "authors": ["bob"]},
+ {"_id": {"tags": "nasty"}, "authors": ["dave", "jane"]}
+];
+
+assert.docEq(g3.cursor.firstBatch, g3result, 'g3 failed');
+
+// $avg, and averaging in a final projection
+let g4 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$project: {author: 1, tags: 1, pageViews: 1}},
+ {$unwind: "$tags"},
{
- "_id": {"tags": "fun"},
- "authors": [
- "bob",
- "dave",
- ]
+ $group: {
+ _id: {tags: "$tags"},
+ docsByTag: {$sum: 1},
+ viewsByTag: {$sum: "$pageViews"},
+ avgByTag: {$avg: "$pageViews"},
+ }
},
- {"_id": {"tags": "good"}, "authors": ["bob"]},
+ {$sort: {'_id': 1}}
+ ],
+ cursor: {}
+});
+
+let g4result = [
+ {"_id": {"tags": "filthy"}, "docsByTag": 1, "viewsByTag": 6, "avgByTag": 6},
+ {"_id": {"tags": "fun"}, "docsByTag": 3, "viewsByTag": 17, "avgByTag": 5.666666666666667},
+ {"_id": {"tags": "good"}, "docsByTag": 1, "viewsByTag": 5, "avgByTag": 5},
+ {"_id": {"tags": "nasty"}, "docsByTag": 2, "viewsByTag": 13, "avgByTag": 6.5}
+];
+
+assert.docEq(g4.cursor.firstBatch, g4result, 'g4 failed');
+
+// $addToSet as an accumulator; can pivot data
+let g5 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
{
- "_id": {"tags": "nasty"},
- "authors": [
- "dave",
- "jane",
- ]
- }
- ];
-
- assert.docEq(g5.cursor.firstBatch, g5result, 'g5 failed');
-
- // $first and $last accumulators, constant _id
- let g6 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {$sort: {author: -1}},
- {
- $group: {
- _id: "authors", /* constant string, *not* a field reference */
- firstAuthor: {$last: "$author"}, /* note reverse sort above */
- lastAuthor: {$first: "$author"}, /* note reverse sort above */
- count: {$sum: 1}
- }
+ $project: {
+ author: 1,
+ tags: 1,
}
- ],
- cursor: {}
- });
-
- let g6result = [{"_id": "authors", firstAuthor: "bob", lastAuthor: "jane", count: 3}];
-
- // Test unwind on an unused field
- let g7 = testDB.runCommand({
- aggregate: "article",
- pipeline: [
- {$unwind: '$tags'},
- {
- $group: {
- _id: "tag_count", /* constant string, *not* a field reference */
- count: {$sum: 1}
- }
+ },
+ {$unwind: "$tags"},
+ {$group: {_id: {tags: "$tags"}, authors: {$addToSet: "$author"}}},
+ {$sort: {'_id': 1}}
+ ],
+ cursor: {}
+});
+
+// $addToSet doesn't guarantee order so we shouldn't test for it.
+g5.cursor.firstBatch.forEach(function(obj) {
+ obj.authors.sort();
+});
+
+let g5result = [
+ {"_id": {"tags": "filthy"}, "authors": ["jane"]},
+ {
+ "_id": {"tags": "fun"},
+ "authors": [
+ "bob",
+ "dave",
+ ]
+ },
+ {"_id": {"tags": "good"}, "authors": ["bob"]},
+ {
+ "_id": {"tags": "nasty"},
+ "authors": [
+ "dave",
+ "jane",
+ ]
+ }
+];
+
+assert.docEq(g5.cursor.firstBatch, g5result, 'g5 failed');
+
+// $first and $last accumulators, constant _id
+let g6 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$sort: {author: -1}},
+ {
+ $group: {
+ _id: "authors", /* constant string, *not* a field reference */
+ firstAuthor: {$last: "$author"}, /* note reverse sort above */
+ lastAuthor: {$first: "$author"}, /* note reverse sort above */
+ count: {$sum: 1}
}
- ],
- cursor: {}
- });
- assert.eq(g7.cursor.firstBatch[0].count, 7);
+ }
+ ],
+ cursor: {}
+});
+
+let g6result = [{"_id": "authors", firstAuthor: "bob", lastAuthor: "jane", count: 3}];
+
+// Test unwind on an unused field
+let g7 = testDB.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$unwind: '$tags'},
+ {
+ $group: {
+ _id: "tag_count", /* constant string, *not* a field reference */
+ count: {$sum: 1}
+ }
+ }
+ ],
+ cursor: {}
+});
+assert.eq(g7.cursor.firstBatch[0].count, 7);
}());
diff --git a/jstests/aggregation/testutils.js b/jstests/aggregation/testutils.js
index f4c5c1e296a..33c681d5dd9 100644
--- a/jstests/aggregation/testutils.js
+++ b/jstests/aggregation/testutils.js
@@ -1,143 +1,143 @@
// Tests the test utilities themselves.
(function() {
- load("jstests/aggregation/extras/utils.js");
-
- const verbose = false;
-
- const example = [
- {_id: ObjectId("4dc07fedd8420ab8d0d4066d"), pageViews: 5, tags: ["fun", "good"]},
- {_id: ObjectId("4dc07fedd8420ab8d0d4066e"), pageViews: 7, tags: ["fun", "nasty"]},
- {_id: ObjectId("4dc07fedd8420ab8d0d4066f"), pageViews: 6, tags: ["nasty", "filthy"]}
- ];
-
- assert(arrayEq(example, example, verbose));
- assert(resultsEq(example, example, verbose));
-
- const exampleDifferentOrder = [
- {_id: ObjectId("4dc07fedd8420ab8d0d4066d"), pageViews: 5, tags: ["fun", "good"]},
- {_id: ObjectId("4dc07fedd8420ab8d0d4066f"), pageViews: 6, tags: ["nasty", "filthy"]},
- {_id: ObjectId("4dc07fedd8420ab8d0d4066e"), pageViews: 7, tags: ["fun", "nasty"]},
- ];
-
- assert(resultsEq(exampleDifferentOrder, example, verbose));
- assert(resultsEq(example, exampleDifferentOrder, verbose));
- assert(!orderedArrayEq(example, exampleDifferentOrder, verbose));
-
- const exampleFewerEntries = [
- {_id: ObjectId("4dc07fedd8420ab8d0d4066e"), pageViews: 7, tags: ["fun", "nasty"]},
- {_id: ObjectId("4dc07fedd8420ab8d0d4066f"), pageViews: 6, tags: ["nasty", "filthy"]}
- ];
-
- assert(!resultsEq(example, exampleFewerEntries, verbose));
- assert(!resultsEq(exampleFewerEntries, example, verbose));
-
- const exampleNoIds = [
- {pageViews: 5, tags: ["fun", "good"]},
- {pageViews: 7, tags: ["fun", "nasty"]},
- {pageViews: 6, tags: ["nasty", "filthy"]}
- ];
-
- assert(!resultsEq(example, exampleNoIds, verbose));
- assert(!resultsEq(exampleNoIds, example, verbose));
-
- const exampleMissingTags = [
- {_id: ObjectId("4dc07fedd8420ab8d0d4066d"), pageViews: 5, tags: ["fun"]},
- {_id: ObjectId("4dc07fedd8420ab8d0d4066e"), pageViews: 7, tags: ["fun", "nasty"]},
- {_id: ObjectId("4dc07fedd8420ab8d0d4066f"), pageViews: 6, tags: ["filthy"]}
- ];
-
- assert(!resultsEq(example, exampleMissingTags, verbose));
- assert(!resultsEq(exampleMissingTags, example, verbose));
-
- const exampleDifferentIds = [
- {_id: 0, pageViews: 5, tags: ["fun", "good"]},
- {_id: 1, pageViews: 7, tags: ["fun", "nasty"]},
- {_id: 2, pageViews: 6, tags: ["nasty", "filthy"]}
- ];
- assert(resultsEq(example, exampleDifferentIds));
- assert(resultsEq(exampleDifferentIds, example));
-
- // Test using a custom comparator.
- assert(customDocumentEq({
- left: {a: 1, b: 3},
- right: {a: "ignore", b: 3},
- verbose: verbose,
- valueComparator: (l, r) => {
- if (l == "ignore" || r == "ignore") {
- return true;
- }
- return l == r;
+load("jstests/aggregation/extras/utils.js");
+
+const verbose = false;
+
+const example = [
+ {_id: ObjectId("4dc07fedd8420ab8d0d4066d"), pageViews: 5, tags: ["fun", "good"]},
+ {_id: ObjectId("4dc07fedd8420ab8d0d4066e"), pageViews: 7, tags: ["fun", "nasty"]},
+ {_id: ObjectId("4dc07fedd8420ab8d0d4066f"), pageViews: 6, tags: ["nasty", "filthy"]}
+];
+
+assert(arrayEq(example, example, verbose));
+assert(resultsEq(example, example, verbose));
+
+const exampleDifferentOrder = [
+ {_id: ObjectId("4dc07fedd8420ab8d0d4066d"), pageViews: 5, tags: ["fun", "good"]},
+ {_id: ObjectId("4dc07fedd8420ab8d0d4066f"), pageViews: 6, tags: ["nasty", "filthy"]},
+ {_id: ObjectId("4dc07fedd8420ab8d0d4066e"), pageViews: 7, tags: ["fun", "nasty"]},
+];
+
+assert(resultsEq(exampleDifferentOrder, example, verbose));
+assert(resultsEq(example, exampleDifferentOrder, verbose));
+assert(!orderedArrayEq(example, exampleDifferentOrder, verbose));
+
+const exampleFewerEntries = [
+ {_id: ObjectId("4dc07fedd8420ab8d0d4066e"), pageViews: 7, tags: ["fun", "nasty"]},
+ {_id: ObjectId("4dc07fedd8420ab8d0d4066f"), pageViews: 6, tags: ["nasty", "filthy"]}
+];
+
+assert(!resultsEq(example, exampleFewerEntries, verbose));
+assert(!resultsEq(exampleFewerEntries, example, verbose));
+
+const exampleNoIds = [
+ {pageViews: 5, tags: ["fun", "good"]},
+ {pageViews: 7, tags: ["fun", "nasty"]},
+ {pageViews: 6, tags: ["nasty", "filthy"]}
+];
+
+assert(!resultsEq(example, exampleNoIds, verbose));
+assert(!resultsEq(exampleNoIds, example, verbose));
+
+const exampleMissingTags = [
+ {_id: ObjectId("4dc07fedd8420ab8d0d4066d"), pageViews: 5, tags: ["fun"]},
+ {_id: ObjectId("4dc07fedd8420ab8d0d4066e"), pageViews: 7, tags: ["fun", "nasty"]},
+ {_id: ObjectId("4dc07fedd8420ab8d0d4066f"), pageViews: 6, tags: ["filthy"]}
+];
+
+assert(!resultsEq(example, exampleMissingTags, verbose));
+assert(!resultsEq(exampleMissingTags, example, verbose));
+
+const exampleDifferentIds = [
+ {_id: 0, pageViews: 5, tags: ["fun", "good"]},
+ {_id: 1, pageViews: 7, tags: ["fun", "nasty"]},
+ {_id: 2, pageViews: 6, tags: ["nasty", "filthy"]}
+];
+assert(resultsEq(example, exampleDifferentIds));
+assert(resultsEq(exampleDifferentIds, example));
+
+// Test using a custom comparator.
+assert(customDocumentEq({
+ left: {a: 1, b: 3},
+ right: {a: "ignore", b: 3},
+ verbose: verbose,
+ valueComparator: (l, r) => {
+ if (l == "ignore" || r == "ignore") {
+ return true;
}
- }));
- assert(!customDocumentEq({
- left: {a: 1, b: 3},
- right: {a: 3, b: 3},
- valueComparator: (l, r) => {
- if (l == "ignore" || r == "ignore") {
- return true;
- }
- return l == r;
+ return l == r;
+ }
+}));
+assert(!customDocumentEq({
+ left: {a: 1, b: 3},
+ right: {a: 3, b: 3},
+ valueComparator: (l, r) => {
+ if (l == "ignore" || r == "ignore") {
+ return true;
}
- }));
-
- // Test using a custom comparator with arrays.
- assert(customDocumentEq({
- left: {a: [1, 2], b: 3},
- right: {a: [2, "ignore"], b: 3},
- verbose: verbose,
- valueComparator: (l, r) => {
- if (l == "ignore" || r == "ignore") {
- return true;
- }
- return l == r;
+ return l == r;
+ }
+}));
+
+// Test using a custom comparator with arrays.
+assert(customDocumentEq({
+ left: {a: [1, 2], b: 3},
+ right: {a: [2, "ignore"], b: 3},
+ verbose: verbose,
+ valueComparator: (l, r) => {
+ if (l == "ignore" || r == "ignore") {
+ return true;
}
- }));
- assert(!customDocumentEq({
- left: {a: [1, 2], b: 3},
- right: {a: [3, "ignore"], b: 3},
- verbose: verbose,
- valueComparator: (l, r) => {
- if (l == "ignore" || r == "ignore") {
- return true;
- }
- return l == r;
+ return l == r;
+ }
+}));
+assert(!customDocumentEq({
+ left: {a: [1, 2], b: 3},
+ right: {a: [3, "ignore"], b: 3},
+ verbose: verbose,
+ valueComparator: (l, r) => {
+ if (l == "ignore" || r == "ignore") {
+ return true;
}
- }));
-
- // Test using a custom comparator with arrays of objects.
- assert(customDocumentEq({
- left: {a: [{b: 1}, {b: 2}, {b: 3}]},
- right: {a: [{b: "ignore"}, {b: 2}, {b: 3}]},
- verbose: verbose,
- valueComparator: (l, r) => {
- if (l == "ignore" || r == "ignore") {
- return true;
- }
- return l == r;
+ return l == r;
+ }
+}));
+
+// Test using a custom comparator with arrays of objects.
+assert(customDocumentEq({
+ left: {a: [{b: 1}, {b: 2}, {b: 3}]},
+ right: {a: [{b: "ignore"}, {b: 2}, {b: 3}]},
+ verbose: verbose,
+ valueComparator: (l, r) => {
+ if (l == "ignore" || r == "ignore") {
+ return true;
}
- }));
- assert(!customDocumentEq({
- left: {a: [{b: 1}, {b: 2}, {b: 1}]},
- right: {a: [{b: "ignore"}, {b: 2}, {b: 3}]},
- verbose: verbose,
- valueComparator: (l, r) => {
- if (l == "ignore" || r == "ignore") {
- return true;
- }
- return l == r;
+ return l == r;
+ }
+}));
+assert(!customDocumentEq({
+ left: {a: [{b: 1}, {b: 2}, {b: 1}]},
+ right: {a: [{b: "ignore"}, {b: 2}, {b: 3}]},
+ verbose: verbose,
+ valueComparator: (l, r) => {
+ if (l == "ignore" || r == "ignore") {
+ return true;
}
- }));
-
- assert(!anyEq(5, [5], verbose));
- assert(!anyEq([5], 5, verbose));
- assert(!anyEq("5", 5, verbose));
- assert(!anyEq(5, "5", verbose));
-
- assert(arrayEq([{c: 6}, [5], [4, 5], 2, undefined, 3, null, 4, 5],
- [undefined, null, 2, 3, 4, 5, {c: 6}, [4, 5], [5]],
- verbose));
-
- assert(arrayEq([undefined, null, 2, 3, 4, 5, {c: 6}, [4, 5], [5]],
- [{c: 6}, [5], [4, 5], 2, undefined, 3, null, 4, 5],
- verbose));
+ return l == r;
+ }
+}));
+
+assert(!anyEq(5, [5], verbose));
+assert(!anyEq([5], 5, verbose));
+assert(!anyEq("5", 5, verbose));
+assert(!anyEq(5, "5", verbose));
+
+assert(arrayEq([{c: 6}, [5], [4, 5], 2, undefined, 3, null, 4, 5],
+ [undefined, null, 2, 3, 4, 5, {c: 6}, [4, 5], [5]],
+ verbose));
+
+assert(arrayEq([undefined, null, 2, 3, 4, 5, {c: 6}, [4, 5], [5]],
+ [{c: 6}, [5], [4, 5], 2, undefined, 3, null, 4, 5],
+ verbose));
}());
diff --git a/jstests/aggregation/use_query_project_and_sort.js b/jstests/aggregation/use_query_project_and_sort.js
index 4d3c4a7a45a..191b4d78d3f 100644
--- a/jstests/aggregation/use_query_project_and_sort.js
+++ b/jstests/aggregation/use_query_project_and_sort.js
@@ -6,64 +6,55 @@
// in $facet stages:
// @tags: [do_not_wrap_aggregations_in_facets]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers.
+load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers.
- const coll = db.use_query_project_and_sort;
- coll.drop();
+const coll = db.use_query_project_and_sort;
+coll.drop();
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 100; ++i) {
- bulk.insert({_id: i, x: "string", a: -i, y: i % 2});
- }
- assert.writeOK(bulk.execute());
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < 100; ++i) {
+ bulk.insert({_id: i, x: "string", a: -i, y: i % 2});
+}
+assert.writeOK(bulk.execute());
- function assertQueryCoversProjectionAndSort(pipeline) {
- const explainOutput = coll.explain().aggregate(pipeline);
- assert(isQueryPlan(explainOutput));
- assert(!planHasStage(db, explainOutput, "FETCH"),
- "Expected pipeline " + tojsononeline(pipeline) +
- " *not* to include a FETCH stage in the explain output: " +
- tojson(explainOutput));
- assert(!planHasStage(db, explainOutput, "SORT"),
- "Expected pipeline " + tojsononeline(pipeline) +
- " *not* to include a SORT stage in the explain output: " +
- tojson(explainOutput));
- assert(planHasStage(db, explainOutput, "IXSCAN"),
- "Expected pipeline " + tojsononeline(pipeline) +
- " to include an index scan in the explain output: " + tojson(explainOutput));
- assert(!hasRejectedPlans(explainOutput),
- "Expected pipeline " + tojsononeline(pipeline) +
- " not to have any rejected plans in the explain output: " +
- tojson(explainOutput));
- return explainOutput;
- }
+function assertQueryCoversProjectionAndSort(pipeline) {
+ const explainOutput = coll.explain().aggregate(pipeline);
+ assert(isQueryPlan(explainOutput));
+ assert(!planHasStage(db, explainOutput, "FETCH"),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " *not* to include a FETCH stage in the explain output: " + tojson(explainOutput));
+ assert(!planHasStage(db, explainOutput, "SORT"),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " *not* to include a SORT stage in the explain output: " + tojson(explainOutput));
+ assert(planHasStage(db, explainOutput, "IXSCAN"),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " to include an index scan in the explain output: " + tojson(explainOutput));
+ assert(!hasRejectedPlans(explainOutput),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " not to have any rejected plans in the explain output: " + tojson(explainOutput));
+ return explainOutput;
+}
- assert.commandWorked(coll.createIndex({x: 1, a: -1, _id: 1}));
+assert.commandWorked(coll.createIndex({x: 1, a: -1, _id: 1}));
- // Test that a pipeline requiring a subset of the fields in a compound index can use that index
- // to cover the query.
- assertQueryCoversProjectionAndSort(
- [{$match: {x: "string"}}, {$sort: {x: 1}}, {$project: {_id: 0, x: 1}}]);
- assertQueryCoversProjectionAndSort(
- [{$match: {x: "string"}}, {$sort: {x: 1}}, {$project: {_id: 1, x: 1}}]);
- assertQueryCoversProjectionAndSort(
- [{$match: {x: "string"}}, {$sort: {x: -1, a: 1}}, {$project: {_id: 1, x: 1}}]);
- assertQueryCoversProjectionAndSort(
- [{$match: {x: "string"}}, {$sort: {x: 1, a: -1, _id: 1}}, {$project: {_id: 1}}]);
- assertQueryCoversProjectionAndSort(
- [{$match: {x: "string"}}, {$sort: {x: 1, a: -1, _id: 1}}, {$project: {_id: 1, x: 1}}]);
- assertQueryCoversProjectionAndSort(
- [{$match: {x: "string"}}, {$sort: {x: 1, a: -1, _id: 1}}, {$project: {_id: 1, a: 1}}]);
- assertQueryCoversProjectionAndSort([
- {$match: {x: "string"}},
- {$sort: {x: 1, a: -1, _id: 1}},
- {$project: {_id: 0, a: 1, x: 1}}
- ]);
- assertQueryCoversProjectionAndSort([
- {$match: {x: "string"}},
- {$sort: {x: 1, a: -1, _id: 1}},
- {$project: {_id: 1, x: 1, a: 1}}
- ]);
+// Test that a pipeline requiring a subset of the fields in a compound index can use that index
+// to cover the query.
+assertQueryCoversProjectionAndSort(
+ [{$match: {x: "string"}}, {$sort: {x: 1}}, {$project: {_id: 0, x: 1}}]);
+assertQueryCoversProjectionAndSort(
+ [{$match: {x: "string"}}, {$sort: {x: 1}}, {$project: {_id: 1, x: 1}}]);
+assertQueryCoversProjectionAndSort(
+ [{$match: {x: "string"}}, {$sort: {x: -1, a: 1}}, {$project: {_id: 1, x: 1}}]);
+assertQueryCoversProjectionAndSort(
+ [{$match: {x: "string"}}, {$sort: {x: 1, a: -1, _id: 1}}, {$project: {_id: 1}}]);
+assertQueryCoversProjectionAndSort(
+ [{$match: {x: "string"}}, {$sort: {x: 1, a: -1, _id: 1}}, {$project: {_id: 1, x: 1}}]);
+assertQueryCoversProjectionAndSort(
+ [{$match: {x: "string"}}, {$sort: {x: 1, a: -1, _id: 1}}, {$project: {_id: 1, a: 1}}]);
+assertQueryCoversProjectionAndSort(
+ [{$match: {x: "string"}}, {$sort: {x: 1, a: -1, _id: 1}}, {$project: {_id: 0, a: 1, x: 1}}]);
+assertQueryCoversProjectionAndSort(
+ [{$match: {x: "string"}}, {$sort: {x: 1, a: -1, _id: 1}}, {$project: {_id: 1, x: 1, a: 1}}]);
}());
diff --git a/jstests/aggregation/use_query_projection.js b/jstests/aggregation/use_query_projection.js
index e86a357b782..dccc24f58b9 100644
--- a/jstests/aggregation/use_query_projection.js
+++ b/jstests/aggregation/use_query_projection.js
@@ -6,122 +6,114 @@
// consider an index scan, so the pipelines cannot be wrapped in facet stages.
// @tags: [do_not_wrap_aggregations_in_facets]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers.
+load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers.
- const coll = db.use_query_projection;
- coll.drop();
+const coll = db.use_query_projection;
+coll.drop();
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 100; ++i) {
- bulk.insert({_id: i, x: "string", a: -i, y: i % 2});
- }
- assert.writeOK(bulk.execute());
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < 100; ++i) {
+ bulk.insert({_id: i, x: "string", a: -i, y: i % 2});
+}
+assert.writeOK(bulk.execute());
- function assertQueryCoversProjection({pipeline = [], pipelineOptimizedAway = true} = {}) {
- const explainOutput = coll.explain().aggregate(pipeline);
+function assertQueryCoversProjection({pipeline = [], pipelineOptimizedAway = true} = {}) {
+ const explainOutput = coll.explain().aggregate(pipeline);
- if (pipelineOptimizedAway) {
- assert(isQueryPlan(explainOutput));
- assert(!planHasStage(db, explainOutput, "FETCH"),
- "Expected pipeline " + tojsononeline(pipeline) +
- " *not* to include a FETCH stage in the explain output: " +
- tojson(explainOutput));
- assert(planHasStage(db, explainOutput, "IXSCAN"),
- "Expected pipeline " + tojsononeline(pipeline) +
- " to include an index scan in the explain output: " + tojson(explainOutput));
- } else {
- assert(isAggregationPlan(explainOutput));
- assert(!aggPlanHasStage(explainOutput, "FETCH"),
- "Expected pipeline " + tojsononeline(pipeline) +
- " *not* to include a FETCH stage in the explain output: " +
- tojson(explainOutput));
- assert(aggPlanHasStage(explainOutput, "IXSCAN"),
- "Expected pipeline " + tojsononeline(pipeline) +
- " to include an index scan in the explain output: " + tojson(explainOutput));
- }
- assert(!hasRejectedPlans(explainOutput),
+ if (pipelineOptimizedAway) {
+ assert(isQueryPlan(explainOutput));
+ assert(
+ !planHasStage(db, explainOutput, "FETCH"),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " *not* to include a FETCH stage in the explain output: " + tojson(explainOutput));
+ assert(planHasStage(db, explainOutput, "IXSCAN"),
"Expected pipeline " + tojsononeline(pipeline) +
- " not to have any rejected plans in the explain output: " +
- tojson(explainOutput));
- return explainOutput;
+ " to include an index scan in the explain output: " + tojson(explainOutput));
+ } else {
+ assert(isAggregationPlan(explainOutput));
+ assert(
+ !aggPlanHasStage(explainOutput, "FETCH"),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " *not* to include a FETCH stage in the explain output: " + tojson(explainOutput));
+ assert(aggPlanHasStage(explainOutput, "IXSCAN"),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " to include an index scan in the explain output: " + tojson(explainOutput));
}
+ assert(!hasRejectedPlans(explainOutput),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " not to have any rejected plans in the explain output: " + tojson(explainOutput));
+ return explainOutput;
+}
- function assertQueryDoesNotCoverProjection({pipeline = [], pipelineOptimizedAway = true} = {}) {
- const explainOutput = coll.explain().aggregate(pipeline);
+function assertQueryDoesNotCoverProjection({pipeline = [], pipelineOptimizedAway = true} = {}) {
+ const explainOutput = coll.explain().aggregate(pipeline);
- if (pipelineOptimizedAway) {
- assert(isQueryPlan(explainOutput));
- assert(planHasStage(db, explainOutput, "FETCH") || aggPlanHasStage("COLLSCAN"),
- "Expected pipeline " + tojsononeline(pipeline) +
- " to include a FETCH or COLLSCAN stage in the explain output: " +
- tojson(explainOutput));
- assert(!hasRejectedPlans(explainOutput),
- "Expected pipeline " + tojsononeline(pipeline) +
- " not to have any rejected plans in the explain output: " +
- tojson(explainOutput));
- } else {
- assert(isAggregationPlan(explainOutput));
- assert(aggPlanHasStage(explainOutput, "FETCH") || aggPlanHasStage("COLLSCAN"),
- "Expected pipeline " + tojsononeline(pipeline) +
- " to include a FETCH or COLLSCAN stage in the explain output: " +
- tojson(explainOutput));
- assert(!hasRejectedPlans(explainOutput),
- "Expected pipeline " + tojsononeline(pipeline) +
- " not to have any rejected plans in the explain output: " +
- tojson(explainOutput));
- }
-
- return explainOutput;
+ if (pipelineOptimizedAway) {
+ assert(isQueryPlan(explainOutput));
+ assert(planHasStage(db, explainOutput, "FETCH") || aggPlanHasStage("COLLSCAN"),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " to include a FETCH or COLLSCAN stage in the explain output: " +
+ tojson(explainOutput));
+ assert(
+ !hasRejectedPlans(explainOutput),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " not to have any rejected plans in the explain output: " + tojson(explainOutput));
+ } else {
+ assert(isAggregationPlan(explainOutput));
+ assert(aggPlanHasStage(explainOutput, "FETCH") || aggPlanHasStage("COLLSCAN"),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " to include a FETCH or COLLSCAN stage in the explain output: " +
+ tojson(explainOutput));
+ assert(
+ !hasRejectedPlans(explainOutput),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " not to have any rejected plans in the explain output: " + tojson(explainOutput));
}
- assert.commandWorked(coll.createIndex({x: 1, a: -1, _id: 1}));
+ return explainOutput;
+}
+
+assert.commandWorked(coll.createIndex({x: 1, a: -1, _id: 1}));
- // Test that a pipeline requiring a subset of the fields in a compound index can use that index
- // to cover the query.
- assertQueryCoversProjection({pipeline: [{$match: {x: "string"}}, {$project: {_id: 1, x: 1}}]});
- assertQueryCoversProjection({pipeline: [{$match: {x: "string"}}, {$project: {_id: 0, x: 1}}]});
- assertQueryCoversProjection(
- {pipeline: [{$match: {x: "string"}}, {$project: {_id: 0, x: 1, a: 1}}]});
- assertQueryCoversProjection(
- {pipeline: [{$match: {x: "string"}}, {$project: {_id: 1, x: 1, a: 1}}]});
- assertQueryCoversProjection(
- {pipeline: [{$match: {_id: 0, x: "string"}}, {$project: {_id: 1, x: 1, a: 1}}]});
+// Test that a pipeline requiring a subset of the fields in a compound index can use that index
+// to cover the query.
+assertQueryCoversProjection({pipeline: [{$match: {x: "string"}}, {$project: {_id: 1, x: 1}}]});
+assertQueryCoversProjection({pipeline: [{$match: {x: "string"}}, {$project: {_id: 0, x: 1}}]});
+assertQueryCoversProjection(
+ {pipeline: [{$match: {x: "string"}}, {$project: {_id: 0, x: 1, a: 1}}]});
+assertQueryCoversProjection(
+ {pipeline: [{$match: {x: "string"}}, {$project: {_id: 1, x: 1, a: 1}}]});
+assertQueryCoversProjection(
+ {pipeline: [{$match: {_id: 0, x: "string"}}, {$project: {_id: 1, x: 1, a: 1}}]});
- // Test that a pipeline requiring a field that is not in the index cannot use a covered plan.
- assertQueryDoesNotCoverProjection({
- pipeline: [{$match: {x: "string"}}, {$project: {notThere: 1}}],
- pipelineOptimizedAway: false
- });
+// Test that a pipeline requiring a field that is not in the index cannot use a covered plan.
+assertQueryDoesNotCoverProjection(
+ {pipeline: [{$match: {x: "string"}}, {$project: {notThere: 1}}], pipelineOptimizedAway: false});
- // Test that a covered plan is the only plan considered, even if another plan would be equally
- // selective. Add an equally selective index, then rely on assertQueryCoversProjection() to
- // assert that there is only one considered plan, and it is a covered plan.
- assert.commandWorked(coll.createIndex({x: 1}));
- assertQueryCoversProjection({
- pipeline: [
- {$match: {_id: 0, x: "string"}},
- {
- $sort: {
- x: 1,
- a: 1
- }
- }, // Note: not indexable, but doesn't add any additional dependencies.
- {$project: {_id: 1, x: 1, a: 1}},
- ],
- pipelineOptimizedAway: false
- });
+// Test that a covered plan is the only plan considered, even if another plan would be equally
+// selective. Add an equally selective index, then rely on assertQueryCoversProjection() to
+// assert that there is only one considered plan, and it is a covered plan.
+assert.commandWorked(coll.createIndex({x: 1}));
+assertQueryCoversProjection({
+ pipeline: [
+ {$match: {_id: 0, x: "string"}},
+ {$sort: {x: 1, a: 1}}, // Note: not indexable, but doesn't add any additional dependencies.
+ {$project: {_id: 1, x: 1, a: 1}},
+ ],
+ pipelineOptimizedAway: false
+});
- // Test that a multikey index will prevent a covered plan.
- assert.commandWorked(coll.dropIndex({x: 1})); // Make sure there is only one plan considered.
- assert.writeOK(coll.insert({x: ["an", "array!"]}));
- assertQueryDoesNotCoverProjection({
- pipeline: [{$match: {x: "string"}}, {$project: {_id: 1, x: 1}}],
- pipelineOptimizedAway: false
- });
- assertQueryDoesNotCoverProjection({
- pipeline: [{$match: {x: "string"}}, {$project: {_id: 1, x: 1, a: 1}}],
- pipelineOptimizedAway: false
- });
+// Test that a multikey index will prevent a covered plan.
+assert.commandWorked(coll.dropIndex({x: 1})); // Make sure there is only one plan considered.
+assert.writeOK(coll.insert({x: ["an", "array!"]}));
+assertQueryDoesNotCoverProjection({
+ pipeline: [{$match: {x: "string"}}, {$project: {_id: 1, x: 1}}],
+ pipelineOptimizedAway: false
+});
+assertQueryDoesNotCoverProjection({
+ pipeline: [{$match: {x: "string"}}, {$project: {_id: 1, x: 1, a: 1}}],
+ pipelineOptimizedAway: false
+});
}());
diff --git a/jstests/aggregation/use_query_sort.js b/jstests/aggregation/use_query_sort.js
index 26542252ff4..af9338be79e 100644
--- a/jstests/aggregation/use_query_sort.js
+++ b/jstests/aggregation/use_query_sort.js
@@ -5,80 +5,76 @@
// in $facet stages:
// @tags: [do_not_wrap_aggregations_in_facets]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers.
+load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers.
- const coll = db.use_query_sort;
- coll.drop();
+const coll = db.use_query_sort;
+coll.drop();
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 100; ++i) {
- bulk.insert({_id: i, x: "string", a: -i, y: i % 2});
- }
- assert.writeOK(bulk.execute());
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < 100; ++i) {
+ bulk.insert({_id: i, x: "string", a: -i, y: i % 2});
+}
+assert.writeOK(bulk.execute());
- function assertHasNonBlockingQuerySort(pipeline) {
- const explainOutput = coll.explain().aggregate(pipeline);
- assert(isQueryPlan(explainOutput));
- assert(!planHasStage(db, explainOutput, "SORT"),
- "Expected pipeline " + tojsononeline(pipeline) +
- " *not* to include a SORT stage in the explain output: " +
- tojson(explainOutput));
- assert(planHasStage(db, explainOutput, "IXSCAN"),
- "Expected pipeline " + tojsononeline(pipeline) +
- " to include an index scan in the explain output: " + tojson(explainOutput));
- assert(!hasRejectedPlans(explainOutput),
- "Expected pipeline " + tojsononeline(pipeline) +
- " not to have any rejected plans in the explain output: " +
- tojson(explainOutput));
- return explainOutput;
- }
+function assertHasNonBlockingQuerySort(pipeline) {
+ const explainOutput = coll.explain().aggregate(pipeline);
+ assert(isQueryPlan(explainOutput));
+ assert(!planHasStage(db, explainOutput, "SORT"),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " *not* to include a SORT stage in the explain output: " + tojson(explainOutput));
+ assert(planHasStage(db, explainOutput, "IXSCAN"),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " to include an index scan in the explain output: " + tojson(explainOutput));
+ assert(!hasRejectedPlans(explainOutput),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " not to have any rejected plans in the explain output: " + tojson(explainOutput));
+ return explainOutput;
+}
- function assertDoesNotHaveQuerySort(pipeline) {
- const explainOutput = coll.explain().aggregate(pipeline);
- assert(isAggregationPlan(explainOutput));
- assert(aggPlanHasStage(explainOutput, "$sort"),
- "Expected pipeline " + tojsononeline(pipeline) +
- " to include a $sort stage in the explain output: " + tojson(explainOutput));
- assert(!aggPlanHasStage(explainOutput, "SORT"),
- "Expected pipeline " + tojsononeline(pipeline) +
- " *not* to include a SORT stage in the explain output: " +
- tojson(explainOutput));
- assert(!hasRejectedPlans(explainOutput),
- "Expected pipeline " + tojsononeline(pipeline) +
- " not to have any rejected plans in the explain output: " +
- tojson(explainOutput));
- return explainOutput;
- }
+function assertDoesNotHaveQuerySort(pipeline) {
+ const explainOutput = coll.explain().aggregate(pipeline);
+ assert(isAggregationPlan(explainOutput));
+ assert(aggPlanHasStage(explainOutput, "$sort"),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " to include a $sort stage in the explain output: " + tojson(explainOutput));
+ assert(!aggPlanHasStage(explainOutput, "SORT"),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " *not* to include a SORT stage in the explain output: " + tojson(explainOutput));
+ assert(!hasRejectedPlans(explainOutput),
+ "Expected pipeline " + tojsononeline(pipeline) +
+ " not to have any rejected plans in the explain output: " + tojson(explainOutput));
+ return explainOutput;
+}
- // Test that a sort on the _id can use the query system to provide the sort.
- assertHasNonBlockingQuerySort([{$sort: {_id: -1}}]);
- assertHasNonBlockingQuerySort([{$sort: {_id: 1}}]);
- assertHasNonBlockingQuerySort([{$match: {_id: {$gte: 50}}}, {$sort: {_id: 1}}]);
- assertHasNonBlockingQuerySort([{$match: {_id: {$gte: 50}}}, {$sort: {_id: -1}}]);
+// Test that a sort on the _id can use the query system to provide the sort.
+assertHasNonBlockingQuerySort([{$sort: {_id: -1}}]);
+assertHasNonBlockingQuerySort([{$sort: {_id: 1}}]);
+assertHasNonBlockingQuerySort([{$match: {_id: {$gte: 50}}}, {$sort: {_id: 1}}]);
+assertHasNonBlockingQuerySort([{$match: {_id: {$gte: 50}}}, {$sort: {_id: -1}}]);
- // Test that a sort on a field not in any index cannot use a query system sort, and thus still
- // has a $sort stage.
- assertDoesNotHaveQuerySort([{$sort: {x: -1}}]);
- assertDoesNotHaveQuerySort([{$sort: {x: 1}}]);
- assertDoesNotHaveQuerySort([{$match: {_id: {$gte: 50}}}, {$sort: {x: 1}}]);
+// Test that a sort on a field not in any index cannot use a query system sort, and thus still
+// has a $sort stage.
+assertDoesNotHaveQuerySort([{$sort: {x: -1}}]);
+assertDoesNotHaveQuerySort([{$sort: {x: 1}}]);
+assertDoesNotHaveQuerySort([{$match: {_id: {$gte: 50}}}, {$sort: {x: 1}}]);
- assert.commandWorked(coll.createIndex({x: 1, y: -1}));
+assert.commandWorked(coll.createIndex({x: 1, y: -1}));
- assertHasNonBlockingQuerySort([{$sort: {x: 1, y: -1}}]);
- assertHasNonBlockingQuerySort([{$sort: {x: 1}}]);
- assertDoesNotHaveQuerySort([{$sort: {y: 1}}]);
- assertDoesNotHaveQuerySort([{$sort: {x: 1, y: 1}}]);
+assertHasNonBlockingQuerySort([{$sort: {x: 1, y: -1}}]);
+assertHasNonBlockingQuerySort([{$sort: {x: 1}}]);
+assertDoesNotHaveQuerySort([{$sort: {y: 1}}]);
+assertDoesNotHaveQuerySort([{$sort: {x: 1, y: 1}}]);
- // Test that a $match on a field not present in the same index eligible to provide a sort can
- // still result in a index scan on the sort field (SERVER-7568).
- assertHasNonBlockingQuerySort([{$match: {_id: {$gte: 50}}}, {$sort: {x: 1}}]);
+// Test that a $match on a field not present in the same index eligible to provide a sort can
+// still result in a index scan on the sort field (SERVER-7568).
+assertHasNonBlockingQuerySort([{$match: {_id: {$gte: 50}}}, {$sort: {x: 1}}]);
- // Test that a sort on the text score does not use the query system to provide the sort, since
- // it would need to be a blocking sort, and we prefer the $sort stage to the query system's sort
- // implementation.
- assert.commandWorked(coll.createIndex({x: "text"}));
- assertDoesNotHaveQuerySort(
- [{$match: {$text: {$search: "test"}}}, {$sort: {key: {$meta: "textScore"}}}]);
+// Test that a sort on the text score does not use the query system to provide the sort, since
+// it would need to be a blocking sort, and we prefer the $sort stage to the query system's sort
+// implementation.
+assert.commandWorked(coll.createIndex({x: "text"}));
+assertDoesNotHaveQuerySort(
+ [{$match: {$text: {$search: "test"}}}, {$sort: {key: {$meta: "textScore"}}}]);
}());
diff --git a/jstests/aggregation/variables/layered_variables.js b/jstests/aggregation/variables/layered_variables.js
index e0e10494b29..95e2d535402 100644
--- a/jstests/aggregation/variables/layered_variables.js
+++ b/jstests/aggregation/variables/layered_variables.js
@@ -1,14 +1,14 @@
// Tests that a pipeline with a blend of variable-using expressions reports correct results.
(function() {
- "use strict";
- const testDB = db.getSiblingDB("layered_variables");
- assert.commandWorked(testDB.dropDatabase());
- const coll = testDB.getCollection("test");
+"use strict";
+const testDB = db.getSiblingDB("layered_variables");
+assert.commandWorked(testDB.dropDatabase());
+const coll = testDB.getCollection("test");
- assert.writeOK(coll.insert({_id: 1, has_permissions: 1, my_array: [2, 3]}));
+assert.writeOK(coll.insert({_id: 1, has_permissions: 1, my_array: [2, 3]}));
- const res = assert.commandWorked(testDB.runCommand({
+const res = assert.commandWorked(testDB.runCommand({
aggregate: "test",
pipeline: [
{
@@ -51,8 +51,7 @@
cursor: {}
}));
- assert.eq(
- {_id: 1, has_permissions: 1, my_array: [2, 3], a: 1, b: 6, c: [2, 3], d: 3000, e: [3, 4]},
- res.cursor.firstBatch[0],
- tojson(res));
+assert.eq({_id: 1, has_permissions: 1, my_array: [2, 3], a: 1, b: 6, c: [2, 3], d: 3000, e: [3, 4]},
+ res.cursor.firstBatch[0],
+ tojson(res));
})();
diff --git a/jstests/aggregation/variables/remove_system_variable.js b/jstests/aggregation/variables/remove_system_variable.js
index 803c826af4f..5dd0cda9525 100644
--- a/jstests/aggregation/variables/remove_system_variable.js
+++ b/jstests/aggregation/variables/remove_system_variable.js
@@ -2,72 +2,68 @@
* Tests for the $$REMOVE system variable.
*/
(function() {
- "use strict";
+"use strict";
- let coll = db[jsTest.name()];
- coll.drop();
+let coll = db[jsTest.name()];
+coll.drop();
- assert.writeOK(coll.insert({_id: 1, a: 2, b: 3}));
- assert.writeOK(coll.insert({_id: 2, a: 3, b: 4}));
- assert.writeOK(coll.insert({_id: 3, a: {b: 98, c: 99}}));
+assert.writeOK(coll.insert({_id: 1, a: 2, b: 3}));
+assert.writeOK(coll.insert({_id: 2, a: 3, b: 4}));
+assert.writeOK(coll.insert({_id: 3, a: {b: 98, c: 99}}));
- let projectStage = {
- $project: {_id: 0, a: 1, b: {$cond: {if: {$eq: ["$b", 4]}, then: "$$REMOVE", else: "$b"}}}
- };
+let projectStage = {
+ $project: {_id: 0, a: 1, b: {$cond: {if: {$eq: ["$b", 4]}, then: "$$REMOVE", else: "$b"}}}
+};
- // Test that we can conditionally remove a field in $project.
- assert.eq([{a: 2, b: 3}], coll.aggregate([{$match: {_id: 1}}, projectStage]).toArray());
- assert.eq([{a: 3}], coll.aggregate([{$match: {_id: 2}}, projectStage]).toArray());
+// Test that we can conditionally remove a field in $project.
+assert.eq([{a: 2, b: 3}], coll.aggregate([{$match: {_id: 1}}, projectStage]).toArray());
+assert.eq([{a: 3}], coll.aggregate([{$match: {_id: 2}}, projectStage]).toArray());
- // Test removal of a nested field, using $project.
- assert.eq([{a: {b: 98}}],
- coll.aggregate([{$match: {_id: 3}}, {$project: {_id: 0, "a.b": 1}}]).toArray());
- assert.eq(
- [{a: {}}],
- coll.aggregate([{$match: {_id: 3}}, {$project: {_id: 0, "a.b": "$$REMOVE"}}]).toArray());
- assert.eq(
- [{a: {}}],
- coll.aggregate([{$match: {_id: 3}}, {$project: {_id: 0, a: {b: "$$REMOVE"}}}]).toArray());
+// Test removal of a nested field, using $project.
+assert.eq([{a: {b: 98}}],
+ coll.aggregate([{$match: {_id: 3}}, {$project: {_id: 0, "a.b": 1}}]).toArray());
+assert.eq([{a: {}}],
+ coll.aggregate([{$match: {_id: 3}}, {$project: {_id: 0, "a.b": "$$REMOVE"}}]).toArray());
+assert.eq([{a: {}}],
+ coll.aggregate([{$match: {_id: 3}}, {$project: {_id: 0, a: {b: "$$REMOVE"}}}]).toArray());
- // Test removal of a nested field, using $addFields.
- assert.eq([{_id: 3, a: {c: 99}}],
- coll.aggregate([{$match: {_id: 3}}, {$addFields: {"a.b": "$$REMOVE"}}]).toArray());
+// Test removal of a nested field, using $addFields.
+assert.eq([{_id: 3, a: {c: 99}}],
+ coll.aggregate([{$match: {_id: 3}}, {$addFields: {"a.b": "$$REMOVE"}}]).toArray());
- // Test that any field path following "$$REMOVE" also evaluates to missing.
- assert.eq([{_id: 3}],
- coll.aggregate([{$match: {_id: 3}}, {$addFields: {"a": "$$REMOVE.a.c"}}]).toArray());
+// Test that any field path following "$$REMOVE" also evaluates to missing.
+assert.eq([{_id: 3}],
+ coll.aggregate([{$match: {_id: 3}}, {$addFields: {"a": "$$REMOVE.a.c"}}]).toArray());
- // Test that $$REMOVE can be used together with user-defined variables in a $let.
- assert.eq([{a: {b: 3, d: 4}}],
- coll.aggregate([
- {$match: {_id: 3}},
- {
- $project: {
- _id: 0,
- a: {
- $let: {
- vars: {bar: 3, foo: 4},
- in : {b: "$$bar", c: "$$REMOVE", d: "$$foo"}
- }
- }
- }
- }
- ])
- .toArray());
-
- // Test that $$REMOVE cannot be assigned in a $let.
- assert.commandFailedWithCode(db.runCommand({
- aggregate: coll.getName(),
- cursor: {},
- pipeline: [
+// Test that $$REMOVE can be used together with user-defined variables in a $let.
+assert.eq(
+ [{a: {b: 3, d: 4}}],
+ coll.aggregate([
{$match: {_id: 3}},
- {$project: {_id: 0, a: {$let: {vars: {"REMOVE": 3}, in : {b: "$$REMOVE", c: 2}}}}}
- ]
- }),
- 16867);
+ {
+ $project: {
+ _id: 0,
+ a: {
+ $let: {vars: {bar: 3, foo: 4}, in : {b: "$$bar", c: "$$REMOVE", d: "$$foo"}}
+ }
+ }
+ }
+ ])
+ .toArray());
+
+// Test that $$REMOVE cannot be assigned in a $let.
+assert.commandFailedWithCode(db.runCommand({
+ aggregate: coll.getName(),
+ cursor: {},
+ pipeline: [
+ {$match: {_id: 3}},
+ {$project: {_id: 0, a: {$let: {vars: {"REMOVE": 3}, in : {b: "$$REMOVE", c: 2}}}}}
+ ]
+}),
+ 16867);
- // Test that $$REMOVE, $$CURRENT, $$ROOT, and user-defined variables can all be used together.
- assert.eq(
+// Test that $$REMOVE, $$CURRENT, $$ROOT, and user-defined variables can all be used together.
+assert.eq(
[{a: {b: 3, d: {_id: 1, a: 2, b: 3}, e: {_id: 1, a: 2, b: 3}}}],
coll.aggregate([
{$match: {_id: 1}},
diff --git a/jstests/auth/auth3.js b/jstests/auth/auth3.js
index 6ded435c508..29f61f1c7ee 100644
--- a/jstests/auth/auth3.js
+++ b/jstests/auth/auth3.js
@@ -1,33 +1,33 @@
(function() {
- 'use strict';
+'use strict';
- var conn = MongoRunner.runMongod({auth: ""});
+var conn = MongoRunner.runMongod({auth: ""});
- var admin = conn.getDB("admin");
- var errorCodeUnauthorized = 13;
+var admin = conn.getDB("admin");
+var errorCodeUnauthorized = 13;
- admin.createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
+admin.createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
- print("make sure curop, killop, and unlock fail");
+print("make sure curop, killop, and unlock fail");
- var x = admin.currentOp();
- assert(!("inprog" in x), tojson(x));
- assert.eq(x.code, errorCodeUnauthorized, tojson(x));
+var x = admin.currentOp();
+assert(!("inprog" in x), tojson(x));
+assert.eq(x.code, errorCodeUnauthorized, tojson(x));
- x = admin.killOp(123);
- assert(!("info" in x), tojson(x));
- assert.eq(x.code, errorCodeUnauthorized, tojson(x));
+x = admin.killOp(123);
+assert(!("info" in x), tojson(x));
+assert.eq(x.code, errorCodeUnauthorized, tojson(x));
- x = admin.fsyncUnlock();
- assert(x.errmsg != "fsyncUnlock called when not locked", tojson(x));
- assert.eq(x.code, errorCodeUnauthorized, tojson(x));
+x = admin.fsyncUnlock();
+assert(x.errmsg != "fsyncUnlock called when not locked", tojson(x));
+assert.eq(x.code, errorCodeUnauthorized, tojson(x));
- conn.getDB("admin").auth("foo", "bar");
+conn.getDB("admin").auth("foo", "bar");
- assert("inprog" in admin.currentOp());
- assert("info" in admin.killOp(123));
- assert.eq(admin.fsyncUnlock().errmsg, "fsyncUnlock called when not locked");
+assert("inprog" in admin.currentOp());
+assert("info" in admin.killOp(123));
+assert.eq(admin.fsyncUnlock().errmsg, "fsyncUnlock called when not locked");
- MongoRunner.stopMongod(conn, null, {user: "foo", pwd: "bar"});
+MongoRunner.stopMongod(conn, null, {user: "foo", pwd: "bar"});
})();
diff --git a/jstests/auth/auth_helpers.js b/jstests/auth/auth_helpers.js
index 0e944560e0b..f2d9f458eac 100644
--- a/jstests/auth/auth_helpers.js
+++ b/jstests/auth/auth_helpers.js
@@ -1,24 +1,24 @@
// Test the db.auth() shell helper.
(function() {
- 'use strict';
+'use strict';
- const conn = MongoRunner.runMongod();
- const admin = conn.getDB('admin');
+const conn = MongoRunner.runMongod();
+const admin = conn.getDB('admin');
- admin.createUser({user: 'andy', pwd: 'a', roles: jsTest.adminUserRoles});
- assert(admin.auth({user: 'andy', pwd: 'a'}));
- assert(admin.logout());
+admin.createUser({user: 'andy', pwd: 'a', roles: jsTest.adminUserRoles});
+assert(admin.auth({user: 'andy', pwd: 'a'}));
+assert(admin.logout());
- // Try all the ways to call db.auth that uses SCRAM-SHA-1 or MONGODB-CR.
- assert(admin.auth('andy', 'a'));
- assert(admin.logout());
- assert(admin.auth({user: 'andy', pwd: 'a'}));
- assert(admin.logout());
- assert(admin.auth({mechanism: 'SCRAM-SHA-1', user: 'andy', pwd: 'a'}));
- assert(admin.logout());
+// Try all the ways to call db.auth that uses SCRAM-SHA-1 or MONGODB-CR.
+assert(admin.auth('andy', 'a'));
+assert(admin.logout());
+assert(admin.auth({user: 'andy', pwd: 'a'}));
+assert(admin.logout());
+assert(admin.auth({mechanism: 'SCRAM-SHA-1', user: 'andy', pwd: 'a'}));
+assert(admin.logout());
- // Invalid mechanisms shouldn't lead to authentication, but also shouldn't crash.
- assert(!admin.auth({mechanism: 'this-mechanism-is-fake', user: 'andy', pwd: 'a'}));
- MongoRunner.stopMongod(conn);
+// Invalid mechanisms shouldn't lead to authentication, but also shouldn't crash.
+assert(!admin.auth({mechanism: 'this-mechanism-is-fake', user: 'andy', pwd: 'a'}));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/auth/auth_mechanism_discovery.js b/jstests/auth/auth_mechanism_discovery.js
index e3f7c5d0551..78b150ec1aa 100644
--- a/jstests/auth/auth_mechanism_discovery.js
+++ b/jstests/auth/auth_mechanism_discovery.js
@@ -1,58 +1,55 @@
// Tests that a client will auto-discover a user's supported SASL mechanisms during auth().
// @tags: [requires_sharding]
(function() {
- "use strict";
+"use strict";
- function runTest(conn) {
- const admin = conn.getDB("admin");
- const test = conn.getDB("test");
+function runTest(conn) {
+ const admin = conn.getDB("admin");
+ const test = conn.getDB("test");
- admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
- assert(admin.auth('admin', 'pass'));
+ admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(admin.auth('admin', 'pass'));
- // Verify user mechanism discovery.
- function checkUser(username, mechanism) {
- var createUser = {createUser: username, pwd: 'pwd', roles: []};
- if (mechanism !== undefined) {
- createUser.mechanisms = [mechanism];
- } else {
- // Create both variants, expect to prefer 256.
- mechanism = 'SCRAM-SHA-256';
- }
- assert.commandWorked(test.runCommand(createUser));
- assert.eq(test._getDefaultAuthenticationMechanism(username, test.getName()), mechanism);
- assert(test.auth(username, 'pwd'));
- test.logout();
+ // Verify user mechanism discovery.
+ function checkUser(username, mechanism) {
+ var createUser = {createUser: username, pwd: 'pwd', roles: []};
+ if (mechanism !== undefined) {
+ createUser.mechanisms = [mechanism];
+ } else {
+ // Create both variants, expect to prefer 256.
+ mechanism = 'SCRAM-SHA-256';
}
- checkUser('userSha1', 'SCRAM-SHA-1');
- checkUser('userSha256', 'SCRAM-SHA-256');
- checkUser('userAll');
-
- // Verify override of mechanism discovery.
- // Depends on 'userAll' user created above.
- assert.eq(test._getDefaultAuthenticationMechanism('userAll', test.getName()),
- 'SCRAM-SHA-256');
- test._defaultAuthenticationMechanism = 'SCRAM-SHA-1';
- assert.eq(test._getDefaultAuthenticationMechanism('userAll', test.getName()),
- 'SCRAM-SHA-1');
- test._defaultAuthenticationMechanism = 'NO-SUCH-MECHANISM';
- assert.eq(test._getDefaultAuthenticationMechanism('userAll', test.getName()),
- 'SCRAM-SHA-256');
+ assert.commandWorked(test.runCommand(createUser));
+ assert.eq(test._getDefaultAuthenticationMechanism(username, test.getName()), mechanism);
+ assert(test.auth(username, 'pwd'));
+ test.logout();
}
+ checkUser('userSha1', 'SCRAM-SHA-1');
+ checkUser('userSha256', 'SCRAM-SHA-256');
+ checkUser('userAll');
+
+ // Verify override of mechanism discovery.
+ // Depends on 'userAll' user created above.
+ assert.eq(test._getDefaultAuthenticationMechanism('userAll', test.getName()), 'SCRAM-SHA-256');
+ test._defaultAuthenticationMechanism = 'SCRAM-SHA-1';
+ assert.eq(test._getDefaultAuthenticationMechanism('userAll', test.getName()), 'SCRAM-SHA-1');
+ test._defaultAuthenticationMechanism = 'NO-SUCH-MECHANISM';
+ assert.eq(test._getDefaultAuthenticationMechanism('userAll', test.getName()), 'SCRAM-SHA-256');
+}
- // Test standalone.
- const m = MongoRunner.runMongod({auth: ""});
- runTest(m);
- MongoRunner.stopMongod(m);
+// Test standalone.
+const m = MongoRunner.runMongod({auth: ""});
+runTest(m);
+MongoRunner.stopMongod(m);
- // Test sharded.
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const st = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
- });
- runTest(st.s0);
- st.stop();
+// Test sharded.
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+const st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
+});
+runTest(st.s0);
+st.stop();
})();
diff --git a/jstests/auth/auth_mechanisms_parsing.js b/jstests/auth/auth_mechanisms_parsing.js
index 72f906b3c68..3954963b885 100644
--- a/jstests/auth/auth_mechanisms_parsing.js
+++ b/jstests/auth/auth_mechanisms_parsing.js
@@ -1,13 +1,13 @@
// Test for stripping whitespace for authenticationMechanisms
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod(
- {setParameter: "authenticationMechanisms=SCRAM-SHA-1,SCRAM-SHA-256, PLAIN"});
+const conn = MongoRunner.runMongod(
+ {setParameter: "authenticationMechanisms=SCRAM-SHA-1,SCRAM-SHA-256, PLAIN"});
- const cmdOut = conn.getDB('admin').runCommand({getParameter: 1, authenticationMechanisms: 1});
+const cmdOut = conn.getDB('admin').runCommand({getParameter: 1, authenticationMechanisms: 1});
- // Check to see if whitespace in front of PLAIN is stripped
- assert.sameMembers(cmdOut.authenticationMechanisms, ["SCRAM-SHA-1", "SCRAM-SHA-256", "PLAIN"]);
- MongoRunner.stopMongod(conn);
+// Check to see if whitespace in front of PLAIN is stripped
+assert.sameMembers(cmdOut.authenticationMechanisms, ["SCRAM-SHA-1", "SCRAM-SHA-256", "PLAIN"]);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/auth/authentication_restrictions.js b/jstests/auth/authentication_restrictions.js
index 1f08b3e6d6d..172db043770 100644
--- a/jstests/auth/authentication_restrictions.js
+++ b/jstests/auth/authentication_restrictions.js
@@ -4,224 +4,214 @@
*/
(function() {
- 'use strict';
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- function testConnection(
- conn, eventuallyConsistentConn, sleepUntilUserDataPropagated, sleepUntilUserDataRefreshed) {
- load("jstests/libs/host_ipaddr.js");
-
- // Create a session which observes an eventually consistent view of user data
- var eventualDb = eventuallyConsistentConn.getDB("admin");
-
- // Create a session for modifying user data during the life of the test
- var adminSession = new Mongo("localhost:" + conn.port);
- var admin = adminSession.getDB("admin");
- assert.commandWorked(admin.runCommand(
- {createUser: "admin", pwd: "admin", roles: [{role: "root", db: "admin"}]}));
- assert(admin.auth("admin", "admin"));
-
- // Create a strongly consistent session for consuming user data
- var db = conn.getDB("admin");
-
- // Create a strongly consistent session for consuming user data, with a non-localhost
- // source IP.
- var externalMongo = new Mongo(get_ipaddr() + ":" + conn.port);
- var externalDb = externalMongo.getDB("admin");
-
- assert.commandWorked(admin.runCommand({
- createUser: "user2",
- pwd: "user",
- roles: [],
- authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]
- }));
- assert.commandWorked(admin.runCommand({createUser: "user3", pwd: "user", roles: []}));
- assert.commandWorked(admin.runCommand(
- {updateUser: "user3", authenticationRestrictions: [{serverAddress: ["127.0.0.1"]}]}));
-
- print("=== User creation tests");
- print(
- "When a client creates users with empty authenticationRestrictions, the operation succeeds, though it has no effect");
- assert.commandWorked(admin.runCommand(
- {createUser: "user4", pwd: "user", roles: [], authenticationRestrictions: []}));
- assert(!Object.keys(admin.system.users.findOne({user: "user4"}))
- .includes("authenticationRestrictions"));
-
- print(
- "When a client updates a user's authenticationRestrictions to be empty, the operation succeeds, and removes the authenticationRestrictions field");
- assert.commandWorked(admin.runCommand({createUser: "user5", pwd: "user", roles: []}));
- assert.commandWorked(
- admin.runCommand({updateUser: "user5", authenticationRestrictions: []}));
- assert(!Object.keys(admin.system.users.findOne({user: "user5"}))
- .includes("authenticationRestrictions"));
- assert.commandWorked(admin.runCommand(
- {updateUser: "user5", authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]}));
- assert(Object.keys(admin.system.users.findOne({user: "user5"}))
- .includes("authenticationRestrictions"));
- assert.commandWorked(
- admin.runCommand({updateUser: "user5", authenticationRestrictions: []}));
- assert(!Object.keys(admin.system.users.findOne({user: "user5"}))
- .includes("authenticationRestrictions"));
-
- print(
- "When a client updates a user's authenticationRestrictions to be null or undefined, the operation fails");
- assert.commandWorked(admin.runCommand(
- {updateUser: "user5", authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]}));
- assert(Object.keys(admin.system.users.findOne({user: "user5"}))
- .includes("authenticationRestrictions"));
- assert.commandFailed(
- admin.runCommand({updateUser: "user5", authenticationRestrictions: null}));
- assert(Object.keys(admin.system.users.findOne({user: "user5"}))
- .includes("authenticationRestrictions"));
- assert.commandFailed(
- admin.runCommand({updateUser: "user5", authenticationRestrictions: undefined}));
- assert(Object.keys(admin.system.users.findOne({user: "user5"}))
- .includes("authenticationRestrictions"));
-
- print(
- "When a client creates users, it may use clientSource and serverAddress authenticationRestrictions");
- assert.commandWorked(admin.runCommand({
- createUser: "user6",
- pwd: "user",
- roles: [],
- authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]
- }));
- assert.commandWorked(admin.runCommand({
- createUser: "user7",
- pwd: "user",
- roles: [],
- authenticationRestrictions: [{serverAddress: ["127.0.0.1"]}]
- }));
- assert.commandWorked(admin.runCommand({
- createUser: "user8",
- pwd: "user",
- roles: [],
- authenticationRestrictions:
- [{clientSource: ["127.0.0.1"], serverAddress: ["127.0.0.1"]}]
- }));
- assert.commandWorked(admin.runCommand({
- createUser: "user9",
- pwd: "user",
- roles: [],
- authenticationRestrictions:
- [{clientSource: ["127.0.0.1"]}, {serverAddress: ["127.0.0.1"]}]
- }));
- assert.commandFailed(admin.runCommand({
- createUser: "user10",
- pwd: "user",
- roles: [],
- authenticationRestrictions: [{invalidRestriction: ["127.0.0.1"]}]
- }));
-
- print("=== Localhost access tests");
-
- print(
- "When a client on the loopback authenticates to a user with {clientSource: \"127.0.0.1\"}, it will succeed");
- assert(db.auth("user6", "user"));
-
- print(
- "When a client on the loopback authenticates to a user with {serverAddress: \"127.0.0.1\"}, it will succeed");
- assert(db.auth("user7", "user"));
-
- print(
- "When a client on the loopback authenticates to a user with {clientSource: \"127.0.0.1\", serverAddress: \"127.0.0.1\"}, it will succeed");
- assert(db.auth("user8", "user"));
-
- print("=== Remote access tests");
- print(
- "When a client on the external interface authenticates to a user with {clientSource: \"127.0.0.1\"}, it will fail");
- assert(!externalDb.auth("user6", "user"));
-
- print(
- "When a client on the external interface authenticates to a user with {serverAddress: \"127.0.0.1\"}, it will fail");
- assert(!externalDb.auth("user7", "user"));
-
- print(
- "When a client on the external interface authenticates to a user with {clientSource: \"127.0.0.1\", serverAddress: \"127.0.0.1\"}, it will fail");
- assert(!externalDb.auth("user8", "user"));
-
- print("=== Invalidation tests");
- print(
- "When a client removes all authenticationRestrictions from a user, authentication will succeed");
- assert.commandWorked(admin.runCommand({
- createUser: "user11",
- pwd: "user",
- roles: [],
- authenticationRestrictions:
- [{clientSource: ["127.0.0.1"], serverAddress: ["127.0.0.1"]}]
- }));
- assert(!externalDb.auth("user11", "user"));
- assert.commandWorked(
- admin.runCommand({updateUser: "user11", authenticationRestrictions: []}));
- assert(externalDb.auth("user11", "user"));
-
- print(
- "When a client sets authenticationRestrictions on a user, authorization privileges are revoked");
- assert.commandWorked(admin.runCommand(
- {createUser: "user12", pwd: "user", roles: [{role: "readWrite", db: "test"}]}));
-
- assert(db.auth("user12", "user"));
- assert.commandWorked(db.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
-
- sleepUntilUserDataPropagated();
- assert(eventualDb.auth("user12", "user"));
- assert.commandWorked(
- eventualDb.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
-
- assert.commandWorked(admin.runCommand(
- {updateUser: "user12", authenticationRestrictions: [{clientSource: ["192.0.2.0"]}]}));
-
- assert.commandFailed(db.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
-
- sleepUntilUserDataRefreshed();
- assert.commandFailed(
- eventualDb.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
- }
-
- print("Testing standalone");
- var conn = MongoRunner.runMongod({bind_ip_all: "", auth: ""});
- testConnection(conn, conn, function() {}, function() {});
- MongoRunner.stopMongod(conn);
-
- var keyfile = "jstests/libs/key1";
-
- print("Testing replicaset");
- var rst = new ReplSetTest(
- {name: 'testset', nodes: 2, nodeOptions: {bind_ip_all: "", auth: ""}, keyFile: keyfile});
- var nodes = rst.startSet();
- rst.initiate();
- rst.awaitSecondaryNodes();
- var awaitReplication = function() {
- authutil.asCluster(nodes, "jstests/libs/key1", function() {
- rst.awaitReplication();
- });
- };
-
- testConnection(rst.getPrimary(), rst.getSecondary(), awaitReplication, awaitReplication);
- rst.stopSet();
-
- print("Testing sharded cluster");
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest({
- mongos: 2,
- config: 3,
- shard: 1,
- keyFile: keyfile,
- other: {
- mongosOptions: {bind_ip_all: "", auth: null},
- configOptions: {auth: null},
- shardOptions: {auth: null},
- shardAsReplicaSet: false
- }
+'use strict';
+
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
+
+function testConnection(
+ conn, eventuallyConsistentConn, sleepUntilUserDataPropagated, sleepUntilUserDataRefreshed) {
+ load("jstests/libs/host_ipaddr.js");
+
+ // Create a session which observes an eventually consistent view of user data
+ var eventualDb = eventuallyConsistentConn.getDB("admin");
+
+ // Create a session for modifying user data during the life of the test
+ var adminSession = new Mongo("localhost:" + conn.port);
+ var admin = adminSession.getDB("admin");
+ assert.commandWorked(admin.runCommand(
+ {createUser: "admin", pwd: "admin", roles: [{role: "root", db: "admin"}]}));
+ assert(admin.auth("admin", "admin"));
+
+ // Create a strongly consistent session for consuming user data
+ var db = conn.getDB("admin");
+
+ // Create a strongly consistent session for consuming user data, with a non-localhost
+ // source IP.
+ var externalMongo = new Mongo(get_ipaddr() + ":" + conn.port);
+ var externalDb = externalMongo.getDB("admin");
+
+ assert.commandWorked(admin.runCommand({
+ createUser: "user2",
+ pwd: "user",
+ roles: [],
+ authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]
+ }));
+ assert.commandWorked(admin.runCommand({createUser: "user3", pwd: "user", roles: []}));
+ assert.commandWorked(admin.runCommand(
+ {updateUser: "user3", authenticationRestrictions: [{serverAddress: ["127.0.0.1"]}]}));
+
+ print("=== User creation tests");
+ print(
+ "When a client creates users with empty authenticationRestrictions, the operation succeeds, though it has no effect");
+ assert.commandWorked(admin.runCommand(
+ {createUser: "user4", pwd: "user", roles: [], authenticationRestrictions: []}));
+ assert(!Object.keys(admin.system.users.findOne({user: "user4"}))
+ .includes("authenticationRestrictions"));
+
+ print(
+ "When a client updates a user's authenticationRestrictions to be empty, the operation succeeds, and removes the authenticationRestrictions field");
+ assert.commandWorked(admin.runCommand({createUser: "user5", pwd: "user", roles: []}));
+ assert.commandWorked(admin.runCommand({updateUser: "user5", authenticationRestrictions: []}));
+ assert(!Object.keys(admin.system.users.findOne({user: "user5"}))
+ .includes("authenticationRestrictions"));
+ assert.commandWorked(admin.runCommand(
+ {updateUser: "user5", authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]}));
+ assert(Object.keys(admin.system.users.findOne({user: "user5"}))
+ .includes("authenticationRestrictions"));
+ assert.commandWorked(admin.runCommand({updateUser: "user5", authenticationRestrictions: []}));
+ assert(!Object.keys(admin.system.users.findOne({user: "user5"}))
+ .includes("authenticationRestrictions"));
+
+ print(
+ "When a client updates a user's authenticationRestrictions to be null or undefined, the operation fails");
+ assert.commandWorked(admin.runCommand(
+ {updateUser: "user5", authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]}));
+ assert(Object.keys(admin.system.users.findOne({user: "user5"}))
+ .includes("authenticationRestrictions"));
+ assert.commandFailed(admin.runCommand({updateUser: "user5", authenticationRestrictions: null}));
+ assert(Object.keys(admin.system.users.findOne({user: "user5"}))
+ .includes("authenticationRestrictions"));
+ assert.commandFailed(
+ admin.runCommand({updateUser: "user5", authenticationRestrictions: undefined}));
+ assert(Object.keys(admin.system.users.findOne({user: "user5"}))
+ .includes("authenticationRestrictions"));
+
+ print(
+ "When a client creates users, it may use clientSource and serverAddress authenticationRestrictions");
+ assert.commandWorked(admin.runCommand({
+ createUser: "user6",
+ pwd: "user",
+ roles: [],
+ authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]
+ }));
+ assert.commandWorked(admin.runCommand({
+ createUser: "user7",
+ pwd: "user",
+ roles: [],
+ authenticationRestrictions: [{serverAddress: ["127.0.0.1"]}]
+ }));
+ assert.commandWorked(admin.runCommand({
+ createUser: "user8",
+ pwd: "user",
+ roles: [],
+ authenticationRestrictions: [{clientSource: ["127.0.0.1"], serverAddress: ["127.0.0.1"]}]
+ }));
+ assert.commandWorked(admin.runCommand({
+ createUser: "user9",
+ pwd: "user",
+ roles: [],
+ authenticationRestrictions: [{clientSource: ["127.0.0.1"]}, {serverAddress: ["127.0.0.1"]}]
+ }));
+ assert.commandFailed(admin.runCommand({
+ createUser: "user10",
+ pwd: "user",
+ roles: [],
+ authenticationRestrictions: [{invalidRestriction: ["127.0.0.1"]}]
+ }));
+
+ print("=== Localhost access tests");
+
+ print(
+ "When a client on the loopback authenticates to a user with {clientSource: \"127.0.0.1\"}, it will succeed");
+ assert(db.auth("user6", "user"));
+
+ print(
+ "When a client on the loopback authenticates to a user with {serverAddress: \"127.0.0.1\"}, it will succeed");
+ assert(db.auth("user7", "user"));
+
+ print(
+ "When a client on the loopback authenticates to a user with {clientSource: \"127.0.0.1\", serverAddress: \"127.0.0.1\"}, it will succeed");
+ assert(db.auth("user8", "user"));
+
+ print("=== Remote access tests");
+ print(
+ "When a client on the external interface authenticates to a user with {clientSource: \"127.0.0.1\"}, it will fail");
+ assert(!externalDb.auth("user6", "user"));
+
+ print(
+ "When a client on the external interface authenticates to a user with {serverAddress: \"127.0.0.1\"}, it will fail");
+ assert(!externalDb.auth("user7", "user"));
+
+ print(
+ "When a client on the external interface authenticates to a user with {clientSource: \"127.0.0.1\", serverAddress: \"127.0.0.1\"}, it will fail");
+ assert(!externalDb.auth("user8", "user"));
+
+ print("=== Invalidation tests");
+ print(
+ "When a client removes all authenticationRestrictions from a user, authentication will succeed");
+ assert.commandWorked(admin.runCommand({
+ createUser: "user11",
+ pwd: "user",
+ roles: [],
+ authenticationRestrictions: [{clientSource: ["127.0.0.1"], serverAddress: ["127.0.0.1"]}]
+ }));
+ assert(!externalDb.auth("user11", "user"));
+ assert.commandWorked(admin.runCommand({updateUser: "user11", authenticationRestrictions: []}));
+ assert(externalDb.auth("user11", "user"));
+
+ print(
+ "When a client sets authenticationRestrictions on a user, authorization privileges are revoked");
+ assert.commandWorked(admin.runCommand(
+ {createUser: "user12", pwd: "user", roles: [{role: "readWrite", db: "test"}]}));
+
+ assert(db.auth("user12", "user"));
+ assert.commandWorked(db.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
+
+ sleepUntilUserDataPropagated();
+ assert(eventualDb.auth("user12", "user"));
+ assert.commandWorked(eventualDb.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
+
+ assert.commandWorked(admin.runCommand(
+ {updateUser: "user12", authenticationRestrictions: [{clientSource: ["192.0.2.0"]}]}));
+
+ assert.commandFailed(db.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
+
+ sleepUntilUserDataRefreshed();
+ assert.commandFailed(eventualDb.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
+}
+
+print("Testing standalone");
+var conn = MongoRunner.runMongod({bind_ip_all: "", auth: ""});
+testConnection(conn, conn, function() {}, function() {});
+MongoRunner.stopMongod(conn);
+
+var keyfile = "jstests/libs/key1";
+
+print("Testing replicaset");
+var rst = new ReplSetTest(
+ {name: 'testset', nodes: 2, nodeOptions: {bind_ip_all: "", auth: ""}, keyFile: keyfile});
+var nodes = rst.startSet();
+rst.initiate();
+rst.awaitSecondaryNodes();
+var awaitReplication = function() {
+ authutil.asCluster(nodes, "jstests/libs/key1", function() {
+ rst.awaitReplication();
});
- testConnection(st.s0,
- st.s1,
- function() {},
- function() {
- sleep(40 * 1000); // Wait for mongos user cache invalidation
- });
- st.stop();
-
+};
+
+testConnection(rst.getPrimary(), rst.getSecondary(), awaitReplication, awaitReplication);
+rst.stopSet();
+
+print("Testing sharded cluster");
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest({
+ mongos: 2,
+ config: 3,
+ shard: 1,
+ keyFile: keyfile,
+ other: {
+ mongosOptions: {bind_ip_all: "", auth: null},
+ configOptions: {auth: null},
+ shardOptions: {auth: null},
+ shardAsReplicaSet: false
+ }
+});
+testConnection(st.s0,
+ st.s1,
+ function() {},
+ function() {
+ sleep(40 * 1000); // Wait for mongos user cache invalidation
+ });
+st.stop();
}());
diff --git a/jstests/auth/authentication_restrictions_role.js b/jstests/auth/authentication_restrictions_role.js
index 3f23cfdcb92..70256fba7f5 100644
--- a/jstests/auth/authentication_restrictions_role.js
+++ b/jstests/auth/authentication_restrictions_role.js
@@ -4,424 +4,402 @@
*/
(function() {
- 'use strict';
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- function testRestrictionCreationAndEnforcement(
- conn, eventuallyConsistentConn, sleepUntilUserDataPropagated, sleepUntilUserDataRefreshed) {
- load("jstests/libs/host_ipaddr.js");
-
- // Create a session which observes an eventually consistent view of user data
- var eventualDb = eventuallyConsistentConn.getDB("admin");
-
- // Create a session for modifying user data during the life of the test
- var adminSession = new Mongo("127.0.0.1:" + conn.port);
- var admin = adminSession.getDB("admin");
- assert.commandWorked(admin.runCommand(
- {createUser: "admin", pwd: "admin", roles: [{role: "root", db: "admin"}]}));
- assert(admin.auth("admin", "admin"));
-
- // Create a strongly consistent session for consuming user data
- var db = conn.getDB("admin");
-
- // Create a strongly consistent session for consuming user data, with a non-localhost
- // source IP.
- var externalMongo = new Mongo(get_ipaddr() + ":" + conn.port);
- var externalDb = externalMongo.getDB("admin");
-
- assert.commandWorked(admin.runCommand({
- createRole: "role2",
- roles: [],
- privileges: [],
- authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]
- }));
- assert(Object.keys(admin.system.roles.findOne({role: "role2"}))
- .includes("authenticationRestrictions"));
- assert.commandWorked(admin.runCommand({createRole: "role3", roles: [], privileges: []}));
-
- print("=== Role creation tests");
- print(
- "When a client creates roles with empty authenticationRestrictions, the operation succeeds, though it has no effect");
- assert.commandWorked(admin.runCommand(
- {createRole: "role4", roles: [], privileges: [], authenticationRestrictions: []}));
- assert(!Object.keys(admin.system.roles.findOne({role: "role4"}))
- .includes("authenticationRestrictions"));
-
- print(
- "When a client updates a role's authenticationRestrictions to be empty, the operation succeeds, and removes the authenticationRestrictions field");
- assert.commandWorked(admin.runCommand({createRole: "role5", roles: [], privileges: []}));
- assert.commandWorked(
- admin.runCommand({updateRole: "role5", authenticationRestrictions: []}));
- assert(!Object.keys(admin.system.roles.findOne({role: "role5"}))
- .includes("authenticationRestrictions"));
- assert.commandWorked(admin.runCommand(
- {updateRole: "role5", authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]}));
- assert(Object.keys(admin.system.roles.findOne({role: "role5"}))
- .includes("authenticationRestrictions"));
- assert.commandWorked(
- admin.runCommand({updateRole: "role5", authenticationRestrictions: []}));
- assert(!Object.keys(admin.system.roles.findOne({role: "role5"}))
- .includes("authenticationRestrictions"));
-
- print(
- "When a client creates roles, it may use clientSource and serverAddress authenticationRestrictions");
- assert.commandWorked(admin.runCommand({
- createRole: "role6",
- roles: [],
- privileges: [],
- authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]
- }));
- assert(Object.keys(admin.system.roles.findOne({role: "role6"}))
- .includes("authenticationRestrictions"));
- assert.commandWorked(admin.runCommand({
- createRole: "role7",
- roles: [],
- privileges: [],
- authenticationRestrictions: [{serverAddress: ["127.0.0.1"]}]
- }));
- assert(Object.keys(admin.system.roles.findOne({role: "role7"}))
- .includes("authenticationRestrictions"));
- assert.commandWorked(admin.runCommand({
- createRole: "role8",
- roles: [],
- privileges: [],
- authenticationRestrictions:
- [{clientSource: ["127.0.0.1"], serverAddress: ["127.0.0.1"]}]
- }));
- assert(Object.keys(admin.system.roles.findOne({role: "role8"}))
- .includes("authenticationRestrictions"));
- assert.commandWorked(admin.runCommand({
- createRole: "role9",
- roles: [],
- privileges: [],
- authenticationRestrictions:
- [{clientSource: ["127.0.0.1"]}, {serverAddress: ["127.0.0.1"]}]
- }));
- assert(Object.keys(admin.system.roles.findOne({role: "role9"}))
- .includes("authenticationRestrictions"));
- assert.commandFailed(admin.runCommand({
- createRole: "role10",
- roles: [],
- privileges: [],
- authenticationRestrictions: [{invalidRestriction: ["127.0.0.1"]}]
- }));
-
- print("=== Localhost access tests");
- print(
- "When a client on the loopback authenticates to a user with {clientSource: \"127.0.0.1\"}, it will succeed");
- assert.commandWorked(
- admin.runCommand({createUser: "user6", pwd: "user", roles: ["role6"]}));
- assert(db.auth("user6", "user"));
-
- print(
- "When a client on the loopback authenticates to a user with {serverAddress: \"127.0.0.1\"}, it will succeed");
- assert.commandWorked(
- admin.runCommand({createUser: "user7", pwd: "user", roles: ["role7"]}));
- assert(db.auth("user7", "user"));
-
- print(
- "When a client on the loopback authenticates to a user with {clientSource: \"127.0.0.1\", serverAddress: \"127.0.0.1\"}, it will succeed");
- assert.commandWorked(
- admin.runCommand({createUser: "user8", pwd: "user", roles: ["role8"]}));
- assert(db.auth("user8", "user"));
-
- print("=== Remote access tests");
- print(
- "When a client on the external interface authenticates to a user with {clientSource: \"127.0.0.1\"}, it will fail");
- assert(!externalDb.auth("user6", "user"));
-
- print(
- "When a client on the external interface authenticates to a user with {serverAddress: \"127.0.0.1\"}, it will fail");
- assert(!externalDb.auth("user7", "user"));
-
- print(
- "When a client on the external interface authenticates to a user with {clientSource: \"127.0.0.1\", serverAddress: \"127.0.0.1\"}, it will fail");
- assert(!externalDb.auth("user8", "user"));
-
- print("=== Invalidation tests");
- print(
- "When a client removes all authenticationRestrictions from a role, authentication will succeed");
- assert.commandWorked(admin.runCommand({
- createRole: "role11",
- roles: [],
- privileges: [],
- authenticationRestrictions:
- [{clientSource: ["127.0.0.1"], serverAddress: ["127.0.0.1"]}]
- }));
- assert.commandWorked(
- admin.runCommand({createUser: "user11", pwd: "user", roles: ["role11"]}));
- assert(!externalDb.auth("user11", "user"));
- assert.commandWorked(
- admin.runCommand({updateRole: "role11", authenticationRestrictions: []}));
- assert(externalDb.auth("user11", "user"));
-
- print(
- "When a client sets authenticationRestrictions on a role, authorization privileges are revoked");
- assert.commandWorked(admin.runCommand({
- createRole: "role12",
- roles: [],
- privileges: [{resource: {db: "test", collection: "foo"}, actions: ["find"]}],
- authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]
- }));
- assert.commandWorked(
- admin.runCommand({createUser: "user12", pwd: "user", roles: ["role12"]}));
- assert(db.auth("user12", "user"));
- assert.commandWorked(db.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
- sleepUntilUserDataPropagated();
- assert(eventualDb.auth("user12", "user"));
- assert.commandWorked(
- eventualDb.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
- assert.commandWorked(admin.runCommand(
- {updateRole: "role12", authenticationRestrictions: [{clientSource: ["192.168.2.0"]}]}));
- assert.commandFailed(db.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
- sleepUntilUserDataRefreshed();
- assert.commandFailed(
- eventualDb.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
+'use strict';
+
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
+
+function testRestrictionCreationAndEnforcement(
+ conn, eventuallyConsistentConn, sleepUntilUserDataPropagated, sleepUntilUserDataRefreshed) {
+ load("jstests/libs/host_ipaddr.js");
+
+ // Create a session which observes an eventually consistent view of user data
+ var eventualDb = eventuallyConsistentConn.getDB("admin");
+
+ // Create a session for modifying user data during the life of the test
+ var adminSession = new Mongo("127.0.0.1:" + conn.port);
+ var admin = adminSession.getDB("admin");
+ assert.commandWorked(admin.runCommand(
+ {createUser: "admin", pwd: "admin", roles: [{role: "root", db: "admin"}]}));
+ assert(admin.auth("admin", "admin"));
+
+ // Create a strongly consistent session for consuming user data
+ var db = conn.getDB("admin");
+
+ // Create a strongly consistent session for consuming user data, with a non-localhost
+ // source IP.
+ var externalMongo = new Mongo(get_ipaddr() + ":" + conn.port);
+ var externalDb = externalMongo.getDB("admin");
+
+ assert.commandWorked(admin.runCommand({
+ createRole: "role2",
+ roles: [],
+ privileges: [],
+ authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]
+ }));
+ assert(Object.keys(admin.system.roles.findOne({role: "role2"}))
+ .includes("authenticationRestrictions"));
+ assert.commandWorked(admin.runCommand({createRole: "role3", roles: [], privileges: []}));
+
+ print("=== Role creation tests");
+ print(
+ "When a client creates roles with empty authenticationRestrictions, the operation succeeds, though it has no effect");
+ assert.commandWorked(admin.runCommand(
+ {createRole: "role4", roles: [], privileges: [], authenticationRestrictions: []}));
+ assert(!Object.keys(admin.system.roles.findOne({role: "role4"}))
+ .includes("authenticationRestrictions"));
+
+ print(
+ "When a client updates a role's authenticationRestrictions to be empty, the operation succeeds, and removes the authenticationRestrictions field");
+ assert.commandWorked(admin.runCommand({createRole: "role5", roles: [], privileges: []}));
+ assert.commandWorked(admin.runCommand({updateRole: "role5", authenticationRestrictions: []}));
+ assert(!Object.keys(admin.system.roles.findOne({role: "role5"}))
+ .includes("authenticationRestrictions"));
+ assert.commandWorked(admin.runCommand(
+ {updateRole: "role5", authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]}));
+ assert(Object.keys(admin.system.roles.findOne({role: "role5"}))
+ .includes("authenticationRestrictions"));
+ assert.commandWorked(admin.runCommand({updateRole: "role5", authenticationRestrictions: []}));
+ assert(!Object.keys(admin.system.roles.findOne({role: "role5"}))
+ .includes("authenticationRestrictions"));
+
+ print(
+ "When a client creates roles, it may use clientSource and serverAddress authenticationRestrictions");
+ assert.commandWorked(admin.runCommand({
+ createRole: "role6",
+ roles: [],
+ privileges: [],
+ authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]
+ }));
+ assert(Object.keys(admin.system.roles.findOne({role: "role6"}))
+ .includes("authenticationRestrictions"));
+ assert.commandWorked(admin.runCommand({
+ createRole: "role7",
+ roles: [],
+ privileges: [],
+ authenticationRestrictions: [{serverAddress: ["127.0.0.1"]}]
+ }));
+ assert(Object.keys(admin.system.roles.findOne({role: "role7"}))
+ .includes("authenticationRestrictions"));
+ assert.commandWorked(admin.runCommand({
+ createRole: "role8",
+ roles: [],
+ privileges: [],
+ authenticationRestrictions: [{clientSource: ["127.0.0.1"], serverAddress: ["127.0.0.1"]}]
+ }));
+ assert(Object.keys(admin.system.roles.findOne({role: "role8"}))
+ .includes("authenticationRestrictions"));
+ assert.commandWorked(admin.runCommand({
+ createRole: "role9",
+ roles: [],
+ privileges: [],
+ authenticationRestrictions: [{clientSource: ["127.0.0.1"]}, {serverAddress: ["127.0.0.1"]}]
+ }));
+ assert(Object.keys(admin.system.roles.findOne({role: "role9"}))
+ .includes("authenticationRestrictions"));
+ assert.commandFailed(admin.runCommand({
+ createRole: "role10",
+ roles: [],
+ privileges: [],
+ authenticationRestrictions: [{invalidRestriction: ["127.0.0.1"]}]
+ }));
+
+ print("=== Localhost access tests");
+ print(
+ "When a client on the loopback authenticates to a user with {clientSource: \"127.0.0.1\"}, it will succeed");
+ assert.commandWorked(admin.runCommand({createUser: "user6", pwd: "user", roles: ["role6"]}));
+ assert(db.auth("user6", "user"));
+
+ print(
+ "When a client on the loopback authenticates to a user with {serverAddress: \"127.0.0.1\"}, it will succeed");
+ assert.commandWorked(admin.runCommand({createUser: "user7", pwd: "user", roles: ["role7"]}));
+ assert(db.auth("user7", "user"));
+
+ print(
+ "When a client on the loopback authenticates to a user with {clientSource: \"127.0.0.1\", serverAddress: \"127.0.0.1\"}, it will succeed");
+ assert.commandWorked(admin.runCommand({createUser: "user8", pwd: "user", roles: ["role8"]}));
+ assert(db.auth("user8", "user"));
+
+ print("=== Remote access tests");
+ print(
+ "When a client on the external interface authenticates to a user with {clientSource: \"127.0.0.1\"}, it will fail");
+ assert(!externalDb.auth("user6", "user"));
+
+ print(
+ "When a client on the external interface authenticates to a user with {serverAddress: \"127.0.0.1\"}, it will fail");
+ assert(!externalDb.auth("user7", "user"));
+
+ print(
+ "When a client on the external interface authenticates to a user with {clientSource: \"127.0.0.1\", serverAddress: \"127.0.0.1\"}, it will fail");
+ assert(!externalDb.auth("user8", "user"));
+
+ print("=== Invalidation tests");
+ print(
+ "When a client removes all authenticationRestrictions from a role, authentication will succeed");
+ assert.commandWorked(admin.runCommand({
+ createRole: "role11",
+ roles: [],
+ privileges: [],
+ authenticationRestrictions: [{clientSource: ["127.0.0.1"], serverAddress: ["127.0.0.1"]}]
+ }));
+ assert.commandWorked(admin.runCommand({createUser: "user11", pwd: "user", roles: ["role11"]}));
+ assert(!externalDb.auth("user11", "user"));
+ assert.commandWorked(admin.runCommand({updateRole: "role11", authenticationRestrictions: []}));
+ assert(externalDb.auth("user11", "user"));
+
+ print(
+ "When a client sets authenticationRestrictions on a role, authorization privileges are revoked");
+ assert.commandWorked(admin.runCommand({
+ createRole: "role12",
+ roles: [],
+ privileges: [{resource: {db: "test", collection: "foo"}, actions: ["find"]}],
+ authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]
+ }));
+ assert.commandWorked(admin.runCommand({createUser: "user12", pwd: "user", roles: ["role12"]}));
+ assert(db.auth("user12", "user"));
+ assert.commandWorked(db.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
+ sleepUntilUserDataPropagated();
+ assert(eventualDb.auth("user12", "user"));
+ assert.commandWorked(eventualDb.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
+ assert.commandWorked(admin.runCommand(
+ {updateRole: "role12", authenticationRestrictions: [{clientSource: ["192.168.2.0"]}]}));
+ assert.commandFailed(db.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
+ sleepUntilUserDataRefreshed();
+ assert.commandFailed(eventualDb.getSiblingDB("test").runCommand({find: "foo", batchSize: 0}));
+}
+
+function testUsersInfoCommand(conn) {
+ function forEachUser(res, assertFun) {
+ assert(res.hasOwnProperty("users"));
+ print("Users: " + tojson(res.users));
+ assert.gt(res.users.length, 0);
+ res.users.forEach(assertFun);
}
- function testUsersInfoCommand(conn) {
- function forEachUser(res, assertFun) {
- assert(res.hasOwnProperty("users"));
- print("Users: " + tojson(res.users));
- assert.gt(res.users.length, 0);
- res.users.forEach(assertFun);
- }
-
- var admin = conn.getDB("admin");
- assert(admin.auth("admin", "admin"));
-
- assert.commandWorked(admin.runCommand({createUser: "user", pwd: "pwd", roles: []}));
- assert.commandWorked(admin.runCommand({
- createUser: "restrictedUser",
- pwd: "pwd",
- roles: [],
- authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]
- }));
- assert.commandWorked(admin.runCommand({
- createRole: "restrictedRole",
- roles: [],
- privileges: [],
- authenticationRestrictions: [{clientSource: ["127.0.0.2"]}]
- }));
- assert.commandWorked(admin.runCommand(
- {createUser: "userWithRestrictedRole", pwd: "pwd", roles: ["restrictedRole"]}));
- assert.commandWorked(admin.runCommand({
- createUser: "restrictedUserWithRestrictedRole",
- pwd: "pwd",
- roles: ["restrictedRole"],
- authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]
- }));
-
- print(
- "Calling usersInfo for all users on a database with showAuthenticationRestrictions is an error");
- assert.commandFailed(
- admin.runCommand({usersInfo: 1, showAuthenticationRestrictions: true}));
-
- print(
- "Calling usersInfo for all users on a database with showAuthenticationRestrictions false or unset will succeed, and not produce authenticationRestriction fields");
- [{}, {showAuthenticationRestrictions: false}].forEach(function(fragment) {
- forEachUser(
- assert.commandWorked(admin.runCommand(Object.merge({usersInfo: 1}, fragment))),
- function(userDoc) {
- assert(!userDoc.hasOwnProperty("authenticationRestrictions"));
- assert(!userDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
- });
- });
-
- print(
- "If usersInfo is called with showAuthenticationRestrictions true, on a user without authenticationRestrictions, a document with empty authenticationRestrictions and inheritedAuthenticationRestrictions arrays is returned");
- forEachUser(assert.commandWorked(admin.runCommand(
- {usersInfo: "user", showAuthenticationRestrictions: true})),
+ var admin = conn.getDB("admin");
+ assert(admin.auth("admin", "admin"));
+
+ assert.commandWorked(admin.runCommand({createUser: "user", pwd: "pwd", roles: []}));
+ assert.commandWorked(admin.runCommand({
+ createUser: "restrictedUser",
+ pwd: "pwd",
+ roles: [],
+ authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]
+ }));
+ assert.commandWorked(admin.runCommand({
+ createRole: "restrictedRole",
+ roles: [],
+ privileges: [],
+ authenticationRestrictions: [{clientSource: ["127.0.0.2"]}]
+ }));
+ assert.commandWorked(admin.runCommand(
+ {createUser: "userWithRestrictedRole", pwd: "pwd", roles: ["restrictedRole"]}));
+ assert.commandWorked(admin.runCommand({
+ createUser: "restrictedUserWithRestrictedRole",
+ pwd: "pwd",
+ roles: ["restrictedRole"],
+ authenticationRestrictions: [{clientSource: ["127.0.0.1"]}]
+ }));
+
+ print(
+ "Calling usersInfo for all users on a database with showAuthenticationRestrictions is an error");
+ assert.commandFailed(admin.runCommand({usersInfo: 1, showAuthenticationRestrictions: true}));
+
+ print(
+ "Calling usersInfo for all users on a database with showAuthenticationRestrictions false or unset will succeed, and not produce authenticationRestriction fields");
+ [{}, {showAuthenticationRestrictions: false}].forEach(function(fragment) {
+ forEachUser(assert.commandWorked(admin.runCommand(Object.merge({usersInfo: 1}, fragment))),
function(userDoc) {
- assert(userDoc.hasOwnProperty("authenticationRestrictions"));
- assert.eq(0, userDoc["authenticationRestrictions"].length);
-
- assert(userDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
- assert.eq(0, userDoc["inheritedAuthenticationRestrictions"].length);
- });
-
- print(
- "If usersInfo is called and showAuthenticationRestrictions is false or unset, return a document without an authenticationRestrictions or inheritedAuthenticationRestrictions field");
- ["user", "restrictedUser", "userWithRestrictedRole", "restrictedUserWithRestrictedRole"]
- .forEach(function(user) {
- forEachUser(
- assert.commandWorked(admin.runCommand(
- {usersInfo: "user", showAuthenticationRestrictions: false})),
- function(userDoc) {
- assert(!userDoc.hasOwnProperty("authenticationRestrictions"));
- assert(!userDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
- });
- forEachUser(
- assert.commandWorked(admin.runCommand({usersInfo: "user"})), function(userDoc) {
assert(!userDoc.hasOwnProperty("authenticationRestrictions"));
assert(!userDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
});
+ });
- });
+ print(
+ "If usersInfo is called with showAuthenticationRestrictions true, on a user without authenticationRestrictions, a document with empty authenticationRestrictions and inheritedAuthenticationRestrictions arrays is returned");
+ forEachUser(assert.commandWorked(
+ admin.runCommand({usersInfo: "user", showAuthenticationRestrictions: true})),
+ function(userDoc) {
+ assert(userDoc.hasOwnProperty("authenticationRestrictions"));
+ assert.eq(0, userDoc["authenticationRestrictions"].length);
- print(
- "Authentication restrictions can be obtained through usersInfo for a single user with restrictions");
- forEachUser(assert.commandWorked(admin.runCommand(
- {usersInfo: "restrictedUser", showAuthenticationRestrictions: true})),
- function(userDoc) {
- assert(userDoc.hasOwnProperty("authenticationRestrictions"));
- assert.eq(1, userDoc["authenticationRestrictions"].length);
+ assert(userDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
+ assert.eq(0, userDoc["inheritedAuthenticationRestrictions"].length);
+ });
- assert(userDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
- assert.eq(0, userDoc["inheritedAuthenticationRestrictions"].length);
- });
+ print(
+ "If usersInfo is called and showAuthenticationRestrictions is false or unset, return a document without an authenticationRestrictions or inheritedAuthenticationRestrictions field");
+ ["user", "restrictedUser", "userWithRestrictedRole", "restrictedUserWithRestrictedRole"]
+ .forEach(function(user) {
+ forEachUser(assert.commandWorked(admin.runCommand(
+ {usersInfo: "user", showAuthenticationRestrictions: false})),
+ function(userDoc) {
+ assert(!userDoc.hasOwnProperty("authenticationRestrictions"));
+ assert(!userDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
+ });
+ forEachUser(assert.commandWorked(admin.runCommand({usersInfo: "user"})),
+ function(userDoc) {
+ assert(!userDoc.hasOwnProperty("authenticationRestrictions"));
+ assert(!userDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
+ });
+ });
- print(
- "Authentication restrictions can be obtained through usersInfo for a single user with restrictioned roles");
- forEachUser(
- assert.commandWorked(admin.runCommand(
- {usersInfo: "userWithRestrictedRole", showAuthenticationRestrictions: true})),
- function(userDoc) {
- assert(userDoc.hasOwnProperty("authenticationRestrictions"));
- assert.eq(0, userDoc["authenticationRestrictions"].length);
-
- assert(userDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
- assert.eq(1, userDoc["inheritedAuthenticationRestrictions"].length);
- });
-
- print(
- "Authentication restrictions can be obtained through usersInfo for a single restricted user with restrictioned roles");
- forEachUser(assert.commandWorked(admin.runCommand({
- usersInfo: "restrictedUserWithRestrictedRole",
- showAuthenticationRestrictions: true
- })),
- function(userDoc) {
- print("This doc: " + tojson(userDoc));
- assert(userDoc.hasOwnProperty("authenticationRestrictions"));
- assert.eq(1, userDoc["authenticationRestrictions"].length);
+ print(
+ "Authentication restrictions can be obtained through usersInfo for a single user with restrictions");
+ forEachUser(assert.commandWorked(admin.runCommand(
+ {usersInfo: "restrictedUser", showAuthenticationRestrictions: true})),
+ function(userDoc) {
+ assert(userDoc.hasOwnProperty("authenticationRestrictions"));
+ assert.eq(1, userDoc["authenticationRestrictions"].length);
- assert(userDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
- assert.eq(1, userDoc["inheritedAuthenticationRestrictions"].length);
- });
- }
+ assert(userDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
+ assert.eq(0, userDoc["inheritedAuthenticationRestrictions"].length);
+ });
- function testRolesInfoCommand(conn) {
- function forEachRole(res, assertFun) {
- assert(res.hasOwnProperty("roles"));
- print("Users: " + tojson(res.roles));
- assert.gt(res.roles.length, 0);
- res.roles.forEach(assertFun);
- }
+ print(
+ "Authentication restrictions can be obtained through usersInfo for a single user with restrictioned roles");
+ forEachUser(assert.commandWorked(admin.runCommand(
+ {usersInfo: "userWithRestrictedRole", showAuthenticationRestrictions: true})),
+ function(userDoc) {
+ assert(userDoc.hasOwnProperty("authenticationRestrictions"));
+ assert.eq(0, userDoc["authenticationRestrictions"].length);
- var admin = conn.getDB("admin");
- assert(admin.auth("admin", "admin"));
+ assert(userDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
+ assert.eq(1, userDoc["inheritedAuthenticationRestrictions"].length);
+ });
- assert.commandWorked(admin.runCommand({createRole: "role", roles: [], privileges: []}));
- // restrictedRole already created
+ print(
+ "Authentication restrictions can be obtained through usersInfo for a single restricted user with restrictioned roles");
+ forEachUser(
assert.commandWorked(admin.runCommand(
- {createRole: "roleWithRestrictedRole", roles: ["restrictedRole"], privileges: []}));
- assert.commandWorked(admin.runCommand({
- createRole: "restrictedRoleWithRestrictedRole",
- roles: ["restrictedRole"],
- privileges: [],
- authenticationRestrictions: [{clientSource: ["127.0.0.3"]}]
- }));
-
- ["role", "restrictedRole", "roleWithRestrictedRole", "restrictedRoleWithRestrictedRole"]
- .forEach(function(role) {
- forEachRole(
- assert.commandWorked(admin.runCommand({rolesInfo: role})), function(roleDoc) {
- assert(!roleDoc.hasOwnProperty("authenticationRestrictions"));
- assert(!roleDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
- });
- });
-
- forEachRole(assert.commandWorked(admin.runCommand(
- {rolesInfo: "role", showAuthenticationRestrictions: true})),
- function(roleDoc) {
- assert(roleDoc.hasOwnProperty("authenticationRestrictions"));
- assert.eq(0, roleDoc.authenticationRestrictions.length);
- assert(roleDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
- assert.eq(0, roleDoc.inheritedAuthenticationRestrictions.length);
- });
+ {usersInfo: "restrictedUserWithRestrictedRole", showAuthenticationRestrictions: true})),
+ function(userDoc) {
+ print("This doc: " + tojson(userDoc));
+ assert(userDoc.hasOwnProperty("authenticationRestrictions"));
+ assert.eq(1, userDoc["authenticationRestrictions"].length);
+
+ assert(userDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
+ assert.eq(1, userDoc["inheritedAuthenticationRestrictions"].length);
+ });
+}
+
+function testRolesInfoCommand(conn) {
+ function forEachRole(res, assertFun) {
+ assert(res.hasOwnProperty("roles"));
+ print("Users: " + tojson(res.roles));
+ assert.gt(res.roles.length, 0);
+ res.roles.forEach(assertFun);
+ }
- forEachRole(assert.commandWorked(admin.runCommand(
- {rolesInfo: "restrictedRole", showAuthenticationRestrictions: true})),
- function(roleDoc) {
- assert(roleDoc.hasOwnProperty("authenticationRestrictions"));
- assert.eq(1, roleDoc.authenticationRestrictions.length);
- assert(roleDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
- assert.eq(1, roleDoc.inheritedAuthenticationRestrictions.length);
- });
+ var admin = conn.getDB("admin");
+ assert(admin.auth("admin", "admin"));
+
+ assert.commandWorked(admin.runCommand({createRole: "role", roles: [], privileges: []}));
+ // restrictedRole already created
+ assert.commandWorked(admin.runCommand(
+ {createRole: "roleWithRestrictedRole", roles: ["restrictedRole"], privileges: []}));
+ assert.commandWorked(admin.runCommand({
+ createRole: "restrictedRoleWithRestrictedRole",
+ roles: ["restrictedRole"],
+ privileges: [],
+ authenticationRestrictions: [{clientSource: ["127.0.0.3"]}]
+ }));
+
+ ["role", "restrictedRole", "roleWithRestrictedRole", "restrictedRoleWithRestrictedRole"]
+ .forEach(function(role) {
+ forEachRole(assert.commandWorked(admin.runCommand({rolesInfo: role})),
+ function(roleDoc) {
+ assert(!roleDoc.hasOwnProperty("authenticationRestrictions"));
+ assert(!roleDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
+ });
+ });
- forEachRole(
- assert.commandWorked(admin.runCommand(
- {rolesInfo: "roleWithRestrictedRole", showAuthenticationRestrictions: true})),
- function(roleDoc) {
- assert(roleDoc.hasOwnProperty("authenticationRestrictions"));
- assert.eq(0, roleDoc.authenticationRestrictions.length);
- assert(roleDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
- assert.eq(1, roleDoc.inheritedAuthenticationRestrictions.length);
- });
-
- forEachRole(assert.commandWorked(admin.runCommand({
- rolesInfo: "restrictedRoleWithRestrictedRole",
- showAuthenticationRestrictions: true
- })),
- function(roleDoc) {
- assert(roleDoc.hasOwnProperty("authenticationRestrictions"));
- assert.eq(1, roleDoc.authenticationRestrictions.length);
- assert(roleDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
- assert.eq(2, roleDoc.inheritedAuthenticationRestrictions.length);
- });
- }
+ forEachRole(assert.commandWorked(
+ admin.runCommand({rolesInfo: "role", showAuthenticationRestrictions: true})),
+ function(roleDoc) {
+ assert(roleDoc.hasOwnProperty("authenticationRestrictions"));
+ assert.eq(0, roleDoc.authenticationRestrictions.length);
+ assert(roleDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
+ assert.eq(0, roleDoc.inheritedAuthenticationRestrictions.length);
+ });
+
+ forEachRole(assert.commandWorked(admin.runCommand(
+ {rolesInfo: "restrictedRole", showAuthenticationRestrictions: true})),
+ function(roleDoc) {
+ assert(roleDoc.hasOwnProperty("authenticationRestrictions"));
+ assert.eq(1, roleDoc.authenticationRestrictions.length);
+ assert(roleDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
+ assert.eq(1, roleDoc.inheritedAuthenticationRestrictions.length);
+ });
+
+ forEachRole(assert.commandWorked(admin.runCommand(
+ {rolesInfo: "roleWithRestrictedRole", showAuthenticationRestrictions: true})),
+ function(roleDoc) {
+ assert(roleDoc.hasOwnProperty("authenticationRestrictions"));
+ assert.eq(0, roleDoc.authenticationRestrictions.length);
+ assert(roleDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
+ assert.eq(1, roleDoc.inheritedAuthenticationRestrictions.length);
+ });
- var keyfile = "jstests/libs/key1";
-
- print("Testing standalone");
- var conn = MongoRunner.runMongod({bind_ip_all: "", auth: ""});
- testRestrictionCreationAndEnforcement(conn, conn, function() {}, function() {});
- testUsersInfoCommand(conn);
- testRolesInfoCommand(conn);
- MongoRunner.stopMongod(conn);
-
- print("Testing replicaset");
- var rst = new ReplSetTest(
- {name: 'testset', nodes: 2, nodeOptions: {bind_ip_all: "", auth: ""}, keyFile: keyfile});
- var nodes = rst.startSet();
- rst.initiate();
- rst.awaitSecondaryNodes();
- var awaitReplication = function() {
- authutil.asCluster(nodes, "jstests/libs/key1", function() {
- rst.awaitReplication();
+ forEachRole(
+ assert.commandWorked(admin.runCommand(
+ {rolesInfo: "restrictedRoleWithRestrictedRole", showAuthenticationRestrictions: true})),
+ function(roleDoc) {
+ assert(roleDoc.hasOwnProperty("authenticationRestrictions"));
+ assert.eq(1, roleDoc.authenticationRestrictions.length);
+ assert(roleDoc.hasOwnProperty("inheritedAuthenticationRestrictions"));
+ assert.eq(2, roleDoc.inheritedAuthenticationRestrictions.length);
});
- };
-
- testRestrictionCreationAndEnforcement(
- rst.getPrimary(), rst.getSecondary(), awaitReplication, awaitReplication);
- testUsersInfoCommand(rst.getPrimary());
- testRolesInfoCommand(rst.getPrimary());
- rst.stopSet();
-
- print("Testing sharded cluster");
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest({
- mongos: 2,
- config: 3,
- shard: 1,
- keyFile: keyfile,
- other: {
- mongosOptions: {bind_ip_all: "", auth: null},
- configOptions: {auth: null},
- shardOptions: {auth: null},
- shardAsReplicaSet: false
- }
+}
+
+var keyfile = "jstests/libs/key1";
+
+print("Testing standalone");
+var conn = MongoRunner.runMongod({bind_ip_all: "", auth: ""});
+testRestrictionCreationAndEnforcement(conn, conn, function() {}, function() {});
+testUsersInfoCommand(conn);
+testRolesInfoCommand(conn);
+MongoRunner.stopMongod(conn);
+
+print("Testing replicaset");
+var rst = new ReplSetTest(
+ {name: 'testset', nodes: 2, nodeOptions: {bind_ip_all: "", auth: ""}, keyFile: keyfile});
+var nodes = rst.startSet();
+rst.initiate();
+rst.awaitSecondaryNodes();
+var awaitReplication = function() {
+ authutil.asCluster(nodes, "jstests/libs/key1", function() {
+ rst.awaitReplication();
});
- testRestrictionCreationAndEnforcement(
- st.s0,
- st.s1,
- function() {},
- function() {
- sleep(40 * 1000); // Wait for mongos user cache invalidation
- });
- testUsersInfoCommand(st.s0);
- st.stop();
-
+};
+
+testRestrictionCreationAndEnforcement(
+ rst.getPrimary(), rst.getSecondary(), awaitReplication, awaitReplication);
+testUsersInfoCommand(rst.getPrimary());
+testRolesInfoCommand(rst.getPrimary());
+rst.stopSet();
+
+print("Testing sharded cluster");
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest({
+ mongos: 2,
+ config: 3,
+ shard: 1,
+ keyFile: keyfile,
+ other: {
+ mongosOptions: {bind_ip_all: "", auth: null},
+ configOptions: {auth: null},
+ shardOptions: {auth: null},
+ shardAsReplicaSet: false
+ }
+});
+testRestrictionCreationAndEnforcement(
+ st.s0,
+ st.s1,
+ function() {},
+ function() {
+ sleep(40 * 1000); // Wait for mongos user cache invalidation
+ });
+testUsersInfoCommand(st.s0);
+st.stop();
}());
diff --git a/jstests/auth/authz_modifications_access_control.js b/jstests/auth/authz_modifications_access_control.js
index 11b9a59e593..f660e861908 100644
--- a/jstests/auth/authz_modifications_access_control.js
+++ b/jstests/auth/authz_modifications_access_control.js
@@ -70,7 +70,6 @@ function runTest(conn) {
var roleObj = adminUserAdmin.system.roles.findOne({role: "readWrite", db: "admin"});
// double check that no role object named "readWrite" has been created
assert(!roleObj, "user-defined \"readWrite\" role was created: " + tojson(roleObj));
-
})();
(function testViewUser() {
diff --git a/jstests/auth/autocomplete_auth.js b/jstests/auth/autocomplete_auth.js
index c0057bf1e52..35450ecbca6 100644
--- a/jstests/auth/autocomplete_auth.js
+++ b/jstests/auth/autocomplete_auth.js
@@ -15,38 +15,38 @@
const self = this;
(function() {
- 'use strict';
+'use strict';
- const testName = jsTest.name();
- const conn = MongoRunner.runMongod({auth: ''});
- const admin = conn.getDB('admin');
- admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
- assert(admin.auth('admin', 'pass'));
+const testName = jsTest.name();
+const conn = MongoRunner.runMongod({auth: ''});
+const admin = conn.getDB('admin');
+admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+assert(admin.auth('admin', 'pass'));
- admin.getSiblingDB(testName).createRole({
- role: 'coachTicket',
- privileges: [{resource: {db: testName, collection: 'coachClass'}, actions: ['find']}],
- roles: []
- });
+admin.getSiblingDB(testName).createRole({
+ role: 'coachTicket',
+ privileges: [{resource: {db: testName, collection: 'coachClass'}, actions: ['find']}],
+ roles: []
+});
- admin.getSiblingDB(testName).createUser(
- {user: 'coachPassenger', pwd: 'password', roles: ['coachTicket']});
+admin.getSiblingDB(testName).createUser(
+ {user: 'coachPassenger', pwd: 'password', roles: ['coachTicket']});
- const testDB = conn.getDB(testName);
- testDB.coachClass.insertOne({});
- testDB.businessClass.insertOne({});
+const testDB = conn.getDB(testName);
+testDB.coachClass.insertOne({});
+testDB.businessClass.insertOne({});
- // Must use 'db' to test autocompletion.
- self.db = new Mongo(conn.host).getDB(testName);
- assert(db.auth('coachPassenger', 'password'));
- const authzErrorCode = 13;
- assert.commandFailedWithCode(db.runCommand({listCollections: 1}), authzErrorCode);
- assert.commandWorked(db.runCommand({find: 'coachClass'}));
- assert.commandFailedWithCode(db.runCommand({find: 'businessClass'}), authzErrorCode);
- shellAutocomplete('db.');
- assert(__autocomplete__.includes('db.coachClass'),
- `Completions should include 'coachClass': ${__autocomplete__}`);
- assert(!__autocomplete__.includes('db.businessClass'),
- `Completions should NOT include 'businessClass': ${__autocomplete__}`);
- MongoRunner.stopMongod(conn);
+// Must use 'db' to test autocompletion.
+self.db = new Mongo(conn.host).getDB(testName);
+assert(db.auth('coachPassenger', 'password'));
+const authzErrorCode = 13;
+assert.commandFailedWithCode(db.runCommand({listCollections: 1}), authzErrorCode);
+assert.commandWorked(db.runCommand({find: 'coachClass'}));
+assert.commandFailedWithCode(db.runCommand({find: 'businessClass'}), authzErrorCode);
+shellAutocomplete('db.');
+assert(__autocomplete__.includes('db.coachClass'),
+ `Completions should include 'coachClass': ${__autocomplete__}`);
+assert(!__autocomplete__.includes('db.businessClass'),
+ `Completions should NOT include 'businessClass': ${__autocomplete__}`);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/auth/basic_role_auth.js b/jstests/auth/basic_role_auth.js
index e610d1ed493..6f481afc2e6 100644
--- a/jstests/auth/basic_role_auth.js
+++ b/jstests/auth/basic_role_auth.js
@@ -231,215 +231,215 @@ var testOps = function(db, allowedActions) {
// }
var TESTS = [
{
- name: 'Test multiple user login separate connection',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('ro', AUTH_INFO.test.ro.pwd));
-
- var conn2 = new Mongo(conn.host);
- var testDB2 = conn2.getDB('test');
- assert.eq(1, testDB2.auth('uadmin', AUTH_INFO.test.uadmin.pwd));
-
- testOps(testDB, READ_PERM);
- testOps(testDB2, UADMIN_PERM);
- }
+ name: 'Test multiple user login separate connection',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('ro', AUTH_INFO.test.ro.pwd));
+
+ var conn2 = new Mongo(conn.host);
+ var testDB2 = conn2.getDB('test');
+ assert.eq(1, testDB2.auth('uadmin', AUTH_INFO.test.uadmin.pwd));
+
+ testOps(testDB, READ_PERM);
+ testOps(testDB2, UADMIN_PERM);
+ }
},
{
- name: 'Test user with no role',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('none', AUTH_INFO.test.none.pwd));
+ name: 'Test user with no role',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('none', AUTH_INFO.test.none.pwd));
- testOps(testDB, {});
- }
+ testOps(testDB, {});
+ }
},
{
- name: 'Test read only user',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('ro', AUTH_INFO.test.ro.pwd));
+ name: 'Test read only user',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('ro', AUTH_INFO.test.ro.pwd));
- testOps(testDB, READ_PERM);
- }
+ testOps(testDB, READ_PERM);
+ }
},
{
- name: 'Test read/write user',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
+ name: 'Test read/write user',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
- testOps(testDB, READ_WRITE_PERM);
- }
+ testOps(testDB, READ_WRITE_PERM);
+ }
},
{
- name: 'Test read + dbAdmin user',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('roadmin', AUTH_INFO.test.roadmin.pwd));
-
- var combinedPerm = Object.extend({}, READ_PERM);
- combinedPerm = Object.extend(combinedPerm, ADMIN_PERM);
- testOps(testDB, combinedPerm);
- }
+ name: 'Test read + dbAdmin user',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('roadmin', AUTH_INFO.test.roadmin.pwd));
+
+ var combinedPerm = Object.extend({}, READ_PERM);
+ combinedPerm = Object.extend(combinedPerm, ADMIN_PERM);
+ testOps(testDB, combinedPerm);
+ }
},
{
- name: 'Test dbAdmin user',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('admin', AUTH_INFO.test.admin.pwd));
+ name: 'Test dbAdmin user',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('admin', AUTH_INFO.test.admin.pwd));
- testOps(testDB, ADMIN_PERM);
- }
+ testOps(testDB, ADMIN_PERM);
+ }
},
{
- name: 'Test userAdmin user',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('uadmin', AUTH_INFO.test.uadmin.pwd));
+ name: 'Test userAdmin user',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('uadmin', AUTH_INFO.test.uadmin.pwd));
- testOps(testDB, UADMIN_PERM);
- }
+ testOps(testDB, UADMIN_PERM);
+ }
},
{
- name: 'Test cluster user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('cluster', AUTH_INFO.admin.cluster.pwd));
+ name: 'Test cluster user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('cluster', AUTH_INFO.admin.cluster.pwd));
- testOps(conn.getDB('test'), CLUSTER_PERM);
- }
+ testOps(conn.getDB('test'), CLUSTER_PERM);
+ }
},
{
- name: 'Test admin user with no role',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('anone', AUTH_INFO.admin.anone.pwd));
-
- testOps(adminDB, {});
- testOps(conn.getDB('test'), {});
- }
+ name: 'Test admin user with no role',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('anone', AUTH_INFO.admin.anone.pwd));
+
+ testOps(adminDB, {});
+ testOps(conn.getDB('test'), {});
+ }
},
{
- name: 'Test read only admin user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('aro', AUTH_INFO.admin.aro.pwd));
-
- testOps(adminDB, READ_PERM);
- testOps(conn.getDB('test'), {});
- }
+ name: 'Test read only admin user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('aro', AUTH_INFO.admin.aro.pwd));
+
+ testOps(adminDB, READ_PERM);
+ testOps(conn.getDB('test'), {});
+ }
},
{
- name: 'Test read/write admin user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('arw', AUTH_INFO.admin.arw.pwd));
-
- testOps(adminDB, READ_WRITE_PERM);
- testOps(conn.getDB('test'), {});
- }
+ name: 'Test read/write admin user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('arw', AUTH_INFO.admin.arw.pwd));
+
+ testOps(adminDB, READ_WRITE_PERM);
+ testOps(conn.getDB('test'), {});
+ }
},
{
- name: 'Test dbAdmin admin user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('aadmin', AUTH_INFO.admin.aadmin.pwd));
-
- testOps(adminDB, ADMIN_PERM);
- testOps(conn.getDB('test'), {});
- }
+ name: 'Test dbAdmin admin user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('aadmin', AUTH_INFO.admin.aadmin.pwd));
+
+ testOps(adminDB, ADMIN_PERM);
+ testOps(conn.getDB('test'), {});
+ }
},
{
- name: 'Test userAdmin admin user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('auadmin', AUTH_INFO.admin.auadmin.pwd));
-
- testOps(adminDB, UADMIN_PERM);
- testOps(conn.getDB('test'), {});
- }
+ name: 'Test userAdmin admin user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('auadmin', AUTH_INFO.admin.auadmin.pwd));
+
+ testOps(adminDB, UADMIN_PERM);
+ testOps(conn.getDB('test'), {});
+ }
},
{
- name: 'Test read only any db user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('any_ro', AUTH_INFO.admin.any_ro.pwd));
-
- testOps(adminDB, READ_PERM);
- testOps(conn.getDB('test'), READ_PERM);
- }
+ name: 'Test read only any db user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('any_ro', AUTH_INFO.admin.any_ro.pwd));
+
+ testOps(adminDB, READ_PERM);
+ testOps(conn.getDB('test'), READ_PERM);
+ }
},
{
- name: 'Test read/write any db user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('any_rw', AUTH_INFO.admin.any_rw.pwd));
-
- testOps(adminDB, READ_WRITE_PERM);
- testOps(conn.getDB('test'), READ_WRITE_PERM);
- }
+ name: 'Test read/write any db user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('any_rw', AUTH_INFO.admin.any_rw.pwd));
+
+ testOps(adminDB, READ_WRITE_PERM);
+ testOps(conn.getDB('test'), READ_WRITE_PERM);
+ }
},
{
- name: 'Test dbAdmin any db user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('any_admin', AUTH_INFO.admin.any_admin.pwd));
-
- testOps(adminDB, ADMIN_PERM);
- testOps(conn.getDB('test'), ADMIN_PERM);
- }
+ name: 'Test dbAdmin any db user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('any_admin', AUTH_INFO.admin.any_admin.pwd));
+
+ testOps(adminDB, ADMIN_PERM);
+ testOps(conn.getDB('test'), ADMIN_PERM);
+ }
},
{
- name: 'Test userAdmin any db user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('any_uadmin', AUTH_INFO.admin.any_uadmin.pwd));
-
- testOps(adminDB, UADMIN_PERM);
- testOps(conn.getDB('test'), UADMIN_PERM);
- }
+ name: 'Test userAdmin any db user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('any_uadmin', AUTH_INFO.admin.any_uadmin.pwd));
+
+ testOps(adminDB, UADMIN_PERM);
+ testOps(conn.getDB('test'), UADMIN_PERM);
+ }
},
{
- name: 'Test change role',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
-
- var newConn = new Mongo(conn.host);
- assert.eq(1, newConn.getDB('admin').auth('any_uadmin', AUTH_INFO.admin.any_uadmin.pwd));
- newConn.getDB('test').updateUser('rw', {roles: ['read']});
- var origSpec = newConn.getDB("test").getUser("rw");
-
- // role change should affect users already authenticated.
- testOps(testDB, READ_PERM);
-
- // role change should affect active connections.
- testDB.runCommand({logout: 1});
- assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
- testOps(testDB, READ_PERM);
-
- // role change should also affect new connections.
- var newConn3 = new Mongo(conn.host);
- var testDB3 = newConn3.getDB('test');
- assert.eq(1, testDB3.auth('rw', AUTH_INFO.test.rw.pwd));
- testOps(testDB3, READ_PERM);
-
- newConn.getDB('test').updateUser('rw', {roles: origSpec.roles});
- }
+ name: 'Test change role',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
+
+ var newConn = new Mongo(conn.host);
+ assert.eq(1, newConn.getDB('admin').auth('any_uadmin', AUTH_INFO.admin.any_uadmin.pwd));
+ newConn.getDB('test').updateUser('rw', {roles: ['read']});
+ var origSpec = newConn.getDB("test").getUser("rw");
+
+ // role change should affect users already authenticated.
+ testOps(testDB, READ_PERM);
+
+ // role change should affect active connections.
+ testDB.runCommand({logout: 1});
+ assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
+ testOps(testDB, READ_PERM);
+
+ // role change should also affect new connections.
+ var newConn3 = new Mongo(conn.host);
+ var testDB3 = newConn3.getDB('test');
+ assert.eq(1, testDB3.auth('rw', AUTH_INFO.test.rw.pwd));
+ testOps(testDB3, READ_PERM);
+
+ newConn.getDB('test').updateUser('rw', {roles: origSpec.roles});
+ }
},
{
- name: 'Test override user',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
- assert.eq(1, testDB.auth('ro', AUTH_INFO.test.ro.pwd));
- testOps(testDB, READ_PERM);
-
- testDB.runCommand({logout: 1});
- testOps(testDB, {});
- }
+ name: 'Test override user',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
+ assert.eq(1, testDB.auth('ro', AUTH_INFO.test.ro.pwd));
+ testOps(testDB, READ_PERM);
+
+ testDB.runCommand({logout: 1});
+ testOps(testDB, {});
+ }
}
];
diff --git a/jstests/auth/cluster_ip_whitelist.js b/jstests/auth/cluster_ip_whitelist.js
index b82262a7551..401133dcf71 100644
--- a/jstests/auth/cluster_ip_whitelist.js
+++ b/jstests/auth/cluster_ip_whitelist.js
@@ -3,55 +3,54 @@
*/
(function() {
- 'use strict';
-
- print("When whitelist is empty, the server does not start.");
- assert.eq(null,
- MongoRunner.runMongod(
- {auth: null, keyFile: "jstests/libs/key1", clusterIpSourceWhitelist: ""}));
-
- function testIpWhitelist(description, whitelistString, authResult) {
- print(description);
-
- var conn = MongoRunner.runMongod(
- {auth: null, keyFile: "jstests/libs/key1", clusterIpSourceWhitelist: whitelistString});
- assert.eq(authResult, conn.getDB("local").auth("__system", "foopdedoop"));
- MongoRunner.stopMongod(conn);
- }
-
- testIpWhitelist(
- "When 127.0.0.1 is whitelisted, a client connected via localhost may auth as __system.",
- "127.0.0.1",
- true);
-
- testIpWhitelist(
- "When 127.0.0.0 is whitelisted as a 24-bit CIDR block, a client connected via localhost may auth as __system.",
- "127.0.0.0/24",
- true);
-
- testIpWhitelist(
- "When 127.0.0.5 is whitelisted as a 24-bit CIDR block, a client connected via localhost may auth as __system.",
- "127.0.0.5/24",
- true);
-
- testIpWhitelist(
- "When 127.0.0.0 is whitelisted as a 8-bit CIDR block, a client connected via localhost may auth as __system.",
- "127.0.0.0/8",
- true);
-
- testIpWhitelist(
- "When the IP block reserved for documentation and the 127.0.0.0/8 block are both whitelisted, a client connected via localhost may auth as __system.",
- "192.0.2.0/24,127.0.0.0/8",
- true);
-
- testIpWhitelist(
- "When 127.0.0.0/8 and the IP block reserved for documentation are both whitelisted, a client connected via localhost may auth as __system.",
- "127.0.0.0/8,192.0.2.0/24",
- true);
-
- testIpWhitelist(
- "When the IP block reserved for documentation and examples is whitelisted, a client connected via localhost may not auth as __system.",
- "192.0.2.0/24",
- false);
-
+'use strict';
+
+print("When whitelist is empty, the server does not start.");
+assert.eq(null,
+ MongoRunner.runMongod(
+ {auth: null, keyFile: "jstests/libs/key1", clusterIpSourceWhitelist: ""}));
+
+function testIpWhitelist(description, whitelistString, authResult) {
+ print(description);
+
+ var conn = MongoRunner.runMongod(
+ {auth: null, keyFile: "jstests/libs/key1", clusterIpSourceWhitelist: whitelistString});
+ assert.eq(authResult, conn.getDB("local").auth("__system", "foopdedoop"));
+ MongoRunner.stopMongod(conn);
+}
+
+testIpWhitelist(
+ "When 127.0.0.1 is whitelisted, a client connected via localhost may auth as __system.",
+ "127.0.0.1",
+ true);
+
+testIpWhitelist(
+ "When 127.0.0.0 is whitelisted as a 24-bit CIDR block, a client connected via localhost may auth as __system.",
+ "127.0.0.0/24",
+ true);
+
+testIpWhitelist(
+ "When 127.0.0.5 is whitelisted as a 24-bit CIDR block, a client connected via localhost may auth as __system.",
+ "127.0.0.5/24",
+ true);
+
+testIpWhitelist(
+ "When 127.0.0.0 is whitelisted as a 8-bit CIDR block, a client connected via localhost may auth as __system.",
+ "127.0.0.0/8",
+ true);
+
+testIpWhitelist(
+ "When the IP block reserved for documentation and the 127.0.0.0/8 block are both whitelisted, a client connected via localhost may auth as __system.",
+ "192.0.2.0/24,127.0.0.0/8",
+ true);
+
+testIpWhitelist(
+ "When 127.0.0.0/8 and the IP block reserved for documentation are both whitelisted, a client connected via localhost may auth as __system.",
+ "127.0.0.0/8,192.0.2.0/24",
+ true);
+
+testIpWhitelist(
+ "When the IP block reserved for documentation and examples is whitelisted, a client connected via localhost may not auth as __system.",
+ "192.0.2.0/24",
+ false);
}());
diff --git a/jstests/auth/commands_builtin_roles.js b/jstests/auth/commands_builtin_roles.js
index 32674cd8a41..0c87ea82763 100644
--- a/jstests/auth/commands_builtin_roles.js
+++ b/jstests/auth/commands_builtin_roles.js
@@ -58,15 +58,16 @@ function testProperAuthorization(conn, t, testcase, r) {
assert(r.db.auth("user|" + r.key, "password"));
authCommandsLib.authenticatedSetup(t, runOnDb);
var command = t.command;
- if (typeof(command) === "function") {
+ if (typeof (command) === "function") {
command = t.command(state, testcase.commandArgs);
}
var res = runOnDb.runCommand(command);
if (testcase.roles[r.key]) {
if (res.ok == 0 && res.code == authErrCode) {
- out = "expected authorization success" + " but received " + tojson(res) + " on db " +
- testcase.runOnDb + " with role " + r.key;
+ out = "expected authorization success" +
+ " but received " + tojson(res) + " on db " + testcase.runOnDb + " with role " +
+ r.key;
} else if (res.ok == 0 && !testcase.expectFail && res.code != commandNotSupportedCode) {
// don't error if the test failed with code commandNotSupported since
// some storage engines (e.g wiredTiger) don't support some commands (e.g. touch)
@@ -75,8 +76,9 @@ function testProperAuthorization(conn, t, testcase, r) {
}
} else {
if (res.ok == 1 || (res.ok == 0 && res.code != authErrCode)) {
- out = "expected authorization failure" + " but received result " + tojson(res) +
- " on db " + testcase.runOnDb + " with role " + r.key;
+ out = "expected authorization failure" +
+ " but received result " + tojson(res) + " on db " + testcase.runOnDb +
+ " with role " + r.key;
}
}
diff --git a/jstests/auth/commands_user_defined_roles.js b/jstests/auth/commands_user_defined_roles.js
index 049034f4c86..003957abe64 100644
--- a/jstests/auth/commands_user_defined_roles.js
+++ b/jstests/auth/commands_user_defined_roles.js
@@ -40,7 +40,7 @@ function testProperAuthorization(conn, t, testcase, privileges) {
authCommandsLib.authenticatedSetup(t, runOnDb);
var command = t.command;
- if (typeof(command) === "function") {
+ if (typeof (command) === "function") {
command = t.command(state, testcase.commandArgs);
}
var res = runOnDb.runCommand(command);
@@ -51,8 +51,9 @@ function testProperAuthorization(conn, t, testcase, privileges) {
out = "command failed with " + tojson(res) + " on db " + testcase.runOnDb +
" with privileges " + tojson(privileges);
} else if (testcase.expectFail && res.code == authErrCode) {
- out = "expected authorization success" + " but received " + tojson(res) + " on db " +
- testcase.runOnDb + " with privileges " + tojson(privileges);
+ out = "expected authorization success" +
+ " but received " + tojson(res) + " on db " + testcase.runOnDb + " with privileges " +
+ tojson(privileges);
}
firstDb.logout();
@@ -78,14 +79,14 @@ function testInsufficientPrivileges(conn, t, testcase, privileges) {
authCommandsLib.authenticatedSetup(t, runOnDb);
var command = t.command;
- if (typeof(command) === "function") {
+ if (typeof (command) === "function") {
command = t.command(state, testcase.commandArgs);
}
var res = runOnDb.runCommand(command);
if (res.ok == 1 || res.code != authErrCode) {
- out = "expected authorization failure " + " but received " + tojson(res) +
- " with privileges " + tojson(privileges);
+ out = "expected authorization failure " +
+ " but received " + tojson(res) + " with privileges " + tojson(privileges);
}
firstDb.logout();
diff --git a/jstests/auth/curop_auth_info.js b/jstests/auth/curop_auth_info.js
index 94f7426e4f1..2bb329b1eee 100644
--- a/jstests/auth/curop_auth_info.js
+++ b/jstests/auth/curop_auth_info.js
@@ -1,77 +1,77 @@
(function() {
- 'use strict';
+'use strict';
- const runTest = function(conn, failPointConn) {
- jsTestLog("Setting up users");
- const db = conn.getDB("admin");
- assert.commandWorked(
- db.runCommand({createUser: "admin", pwd: "pwd", roles: jsTest.adminUserRoles}));
- assert.eq(db.auth("admin", "pwd"), 1);
- assert.commandWorked(db.runCommand({createUser: "testuser", pwd: "pwd", roles: []}));
- db.grantRolesToUser("testuser", [{role: "readWrite", db: "test"}]);
+const runTest = function(conn, failPointConn) {
+ jsTestLog("Setting up users");
+ const db = conn.getDB("admin");
+ assert.commandWorked(
+ db.runCommand({createUser: "admin", pwd: "pwd", roles: jsTest.adminUserRoles}));
+ assert.eq(db.auth("admin", "pwd"), 1);
+ assert.commandWorked(db.runCommand({createUser: "testuser", pwd: "pwd", roles: []}));
+ db.grantRolesToUser("testuser", [{role: "readWrite", db: "test"}]);
- const queryFn = function() {
- assert.eq(db.getSiblingDB("admin").auth("testuser", "pwd"), 1);
- let testDB = db.getSiblingDB("test");
- testDB.test.insert({});
- assert.eq(testDB.test.find({}).comment("curop_auth_info.js query").itcount(), 1);
- };
-
- jsTestLog("blocking finds and starting parallel shell to create op");
- assert.commandWorked(failPointConn.getDB("admin").runCommand(
- {configureFailPoint: "waitInFindBeforeMakingBatch", mode: "alwaysOn"}));
- let finderWait = startParallelShell(queryFn, conn.port);
- let myOp;
+ const queryFn = function() {
+ assert.eq(db.getSiblingDB("admin").auth("testuser", "pwd"), 1);
+ let testDB = db.getSiblingDB("test");
+ testDB.test.insert({});
+ assert.eq(testDB.test.find({}).comment("curop_auth_info.js query").itcount(), 1);
+ };
- assert.soon(function() {
- const curOpResults = db.runCommand({currentOp: 1});
- assert.commandWorked(curOpResults);
- print(tojson(curOpResults));
- const myOps = curOpResults["inprog"].filter((op) => {
- return (op["command"]["comment"] == "curop_auth_info.js query");
- });
+ jsTestLog("blocking finds and starting parallel shell to create op");
+ assert.commandWorked(failPointConn.getDB("admin").runCommand(
+ {configureFailPoint: "waitInFindBeforeMakingBatch", mode: "alwaysOn"}));
+ let finderWait = startParallelShell(queryFn, conn.port);
+ let myOp;
- if (myOps.length == 0) {
- return false;
- }
- myOp = myOps[0];
- return true;
+ assert.soon(function() {
+ const curOpResults = db.runCommand({currentOp: 1});
+ assert.commandWorked(curOpResults);
+ print(tojson(curOpResults));
+ const myOps = curOpResults["inprog"].filter((op) => {
+ return (op["command"]["comment"] == "curop_auth_info.js query");
});
- jsTestLog("found op");
- assert.commandWorked(failPointConn.getDB("admin").runCommand(
- {configureFailPoint: "waitInFindBeforeMakingBatch", mode: "off"}));
- finderWait();
-
- const authedUsers = myOp["effectiveUsers"];
- const impersonators = myOp["runBy"];
- print(tojson(authedUsers), tojson(impersonators));
- if (impersonators) {
- assert.eq(authedUsers.length, 1);
- assert.docEq(authedUsers[0], {user: "testuser", db: "admin"});
- assert(impersonators);
- assert.eq(impersonators.length, 1);
- assert.docEq(impersonators[0], {user: "__system", db: "local"});
- } else {
- assert(authedUsers);
- assert.eq(authedUsers.length, 1);
- assert.docEq(authedUsers[0], {user: "testuser", db: "admin"});
+ if (myOps.length == 0) {
+ return false;
}
- };
+ myOp = myOps[0];
+ return true;
+ });
- const m = MongoRunner.runMongod();
- runTest(m, m);
- MongoRunner.stopMongod(m);
+ jsTestLog("found op");
+ assert.commandWorked(failPointConn.getDB("admin").runCommand(
+ {configureFailPoint: "waitInFindBeforeMakingBatch", mode: "off"}));
+ finderWait();
- const st = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- keyFile: 'jstests/libs/key1',
- other: {
- shardAsReplicaSet: false,
- }
- });
- runTest(st.s0, st.d0);
- st.stop();
+ const authedUsers = myOp["effectiveUsers"];
+ const impersonators = myOp["runBy"];
+ print(tojson(authedUsers), tojson(impersonators));
+ if (impersonators) {
+ assert.eq(authedUsers.length, 1);
+ assert.docEq(authedUsers[0], {user: "testuser", db: "admin"});
+ assert(impersonators);
+ assert.eq(impersonators.length, 1);
+ assert.docEq(impersonators[0], {user: "__system", db: "local"});
+ } else {
+ assert(authedUsers);
+ assert.eq(authedUsers.length, 1);
+ assert.docEq(authedUsers[0], {user: "testuser", db: "admin"});
+ }
+};
+
+const m = MongoRunner.runMongod();
+runTest(m, m);
+MongoRunner.stopMongod(m);
+
+const st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ keyFile: 'jstests/libs/key1',
+ other: {
+ shardAsReplicaSet: false,
+ }
+});
+runTest(st.s0, st.d0);
+st.stop();
})();
diff --git a/jstests/auth/currentop_cursors_auth.js b/jstests/auth/currentop_cursors_auth.js
index ca196be176f..70ee354273a 100644
--- a/jstests/auth/currentop_cursors_auth.js
+++ b/jstests/auth/currentop_cursors_auth.js
@@ -4,164 +4,156 @@
* @tags: [assumes_read_concern_unchanged, requires_auth, requires_journaling, requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/fixture_helpers.js"); // For isMongos.
+load("jstests/libs/fixture_helpers.js"); // For isMongos.
- // TODO SERVER-32672: remove the 'skipGossipingClusterTime' flag.
- TestData.skipGossipingClusterTime = true;
+// TODO SERVER-32672: remove the 'skipGossipingClusterTime' flag.
+TestData.skipGossipingClusterTime = true;
- // Create a new sharded cluster for testing and enable auth.
- const key = "jstests/libs/key1";
- const st = new ShardingTest({name: jsTestName(), keyFile: key, shards: 1});
+// Create a new sharded cluster for testing and enable auth.
+const key = "jstests/libs/key1";
+const st = new ShardingTest({name: jsTestName(), keyFile: key, shards: 1});
- const shardConn = st.rs0.getPrimary();
- const mongosConn = st.s;
+const shardConn = st.rs0.getPrimary();
+const mongosConn = st.s;
- shardConn.waitForClusterTime(60);
+shardConn.waitForClusterTime(60);
- Random.setRandomSeed();
- const pass = "a" + Random.rand();
+Random.setRandomSeed();
+const pass = "a" + Random.rand();
- // Create one root user and one regular user on the given connection.
- function createUsers(conn) {
- const adminDB = conn.getDB("admin");
- adminDB.createUser({user: "ted", pwd: pass, roles: ["root"]});
- assert(adminDB.auth("ted", pass), "Authentication 1 Failed");
- adminDB.createUser({user: "yuta", pwd: pass, roles: ["readWriteAnyDatabase"]});
- }
+// Create one root user and one regular user on the given connection.
+function createUsers(conn) {
+ const adminDB = conn.getDB("admin");
+ adminDB.createUser({user: "ted", pwd: pass, roles: ["root"]});
+ assert(adminDB.auth("ted", pass), "Authentication 1 Failed");
+ adminDB.createUser({user: "yuta", pwd: pass, roles: ["readWriteAnyDatabase"]});
+}
- // Create the necessary users at both cluster and shard-local level.
- createUsers(shardConn);
- createUsers(mongosConn);
-
- // Run the various auth tests on the given shard or mongoS connection.
- function runCursorTests(conn) {
- const db = conn.getDB("test");
- const adminDB = db.getSiblingDB("admin");
-
- // Log in as the root user.
- assert.commandWorked(adminDB.logout());
- assert(adminDB.auth("ted", pass), "Authentication 2 Failed");
-
- const coll = db.jstests_currentop_cursors_auth;
- coll.drop();
- for (let i = 0; i < 5; ++i) {
- assert.commandWorked(coll.insert({val: i}));
- }
-
- // Verify that we can see our own cursor with {allUsers: false}.
- const cursorId = assert
- .commandWorked(db.runCommand(
- {find: "jstests_currentop_cursors_auth", batchSize: 2}))
- .cursor.id;
-
- let result =
- adminDB
- .aggregate([
- {$currentOp: {localOps: true, allUsers: false, idleCursors: true}},
- {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": cursorId}]}}
- ])
- .toArray();
- assert.eq(result.length, 1, result);
-
- // Log in as the non-root user.
- assert.commandWorked(adminDB.logout());
- assert(adminDB.auth("yuta", pass), "Authentication 3 Failed");
-
- // Verify that we cannot see the root user's cursor.
- result = adminDB
- .aggregate([
- {$currentOp: {localOps: true, allUsers: false, idleCursors: true}},
- {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": cursorId}]}}
- ])
- .toArray();
- assert.eq(result.length, 0, result);
+// Create the necessary users at both cluster and shard-local level.
+createUsers(shardConn);
+createUsers(mongosConn);
- // Make sure that the behavior is the same when 'allUsers' is not explicitly specified.
- result = adminDB
- .aggregate([
- {$currentOp: {localOps: true, idleCursors: true}},
- {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": cursorId}]}}
- ])
- .toArray();
- assert.eq(result.length, 0, result);
-
- // Verify that the user without the 'inprog' privilege cannot view shard cursors via mongoS.
- if (FixtureHelpers.isMongos(db)) {
- assert.commandFailedWithCode(adminDB.runCommand({
- aggregate: 1,
- pipeline: [{$currentOp: {localOps: false, idleCursors: true}}],
- cursor: {}
- }),
- ErrorCodes.Unauthorized);
- }
-
- // Create a cursor with the second (non-root) user and confirm that we can see it.
- const secondCursorId = assert
- .commandWorked(db.runCommand(
- {find: "jstests_currentop_cursors_auth", batchSize: 2}))
- .cursor.id;
-
- result =
- adminDB
- .aggregate([
- {$currentOp: {localOps: true, allUsers: false, idleCursors: true}},
- {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": secondCursorId}]}}
- ])
- .toArray();
- assert.eq(result.length, 1, result);
-
- // Log back in with the root user and confirm that the first cursor is still present.
- assert.commandWorked(adminDB.logout());
- assert(adminDB.auth("ted", pass), "Authentication 4 Failed");
+// Run the various auth tests on the given shard or mongoS connection.
+function runCursorTests(conn) {
+ const db = conn.getDB("test");
+ const adminDB = db.getSiblingDB("admin");
- result = adminDB
+ // Log in as the root user.
+ assert.commandWorked(adminDB.logout());
+ assert(adminDB.auth("ted", pass), "Authentication 2 Failed");
+
+ const coll = db.jstests_currentop_cursors_auth;
+ coll.drop();
+ for (let i = 0; i < 5; ++i) {
+ assert.commandWorked(coll.insert({val: i}));
+ }
+
+ // Verify that we can see our own cursor with {allUsers: false}.
+ const cursorId =
+ assert.commandWorked(db.runCommand({find: "jstests_currentop_cursors_auth", batchSize: 2}))
+ .cursor.id;
+
+ let result = adminDB
.aggregate([
{$currentOp: {localOps: true, allUsers: false, idleCursors: true}},
{$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": cursorId}]}}
])
.toArray();
- assert.eq(result.length, 1, result);
+ assert.eq(result.length, 1, result);
+
+ // Log in as the non-root user.
+ assert.commandWorked(adminDB.logout());
+ assert(adminDB.auth("yuta", pass), "Authentication 3 Failed");
+
+ // Verify that we cannot see the root user's cursor.
+ result = adminDB
+ .aggregate([
+ {$currentOp: {localOps: true, allUsers: false, idleCursors: true}},
+ {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": cursorId}]}}
+ ])
+ .toArray();
+ assert.eq(result.length, 0, result);
+
+ // Make sure that the behavior is the same when 'allUsers' is not explicitly specified.
+ result = adminDB
+ .aggregate([
+ {$currentOp: {localOps: true, idleCursors: true}},
+ {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": cursorId}]}}
+ ])
+ .toArray();
+ assert.eq(result.length, 0, result);
+
+ // Verify that the user without the 'inprog' privilege cannot view shard cursors via mongoS.
+ if (FixtureHelpers.isMongos(db)) {
+ assert.commandFailedWithCode(adminDB.runCommand({
+ aggregate: 1,
+ pipeline: [{$currentOp: {localOps: false, idleCursors: true}}],
+ cursor: {}
+ }),
+ ErrorCodes.Unauthorized);
+ }
- // Confirm that the root user can see both users' cursors with {allUsers: true}.
+ // Create a cursor with the second (non-root) user and confirm that we can see it.
+ const secondCursorId =
+ assert.commandWorked(db.runCommand({find: "jstests_currentop_cursors_auth", batchSize: 2}))
+ .cursor.id;
+
+ result = adminDB
+ .aggregate([
+ {$currentOp: {localOps: true, allUsers: false, idleCursors: true}},
+ {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": secondCursorId}]}}
+ ])
+ .toArray();
+ assert.eq(result.length, 1, result);
+
+ // Log back in with the root user and confirm that the first cursor is still present.
+ assert.commandWorked(adminDB.logout());
+ assert(adminDB.auth("ted", pass), "Authentication 4 Failed");
+
+ result = adminDB
+ .aggregate([
+ {$currentOp: {localOps: true, allUsers: false, idleCursors: true}},
+ {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": cursorId}]}}
+ ])
+ .toArray();
+ assert.eq(result.length, 1, result);
+
+ // Confirm that the root user can see both users' cursors with {allUsers: true}.
+ result =
+ adminDB
+ .aggregate([
+ {$currentOp: {localOps: true, allUsers: true, idleCursors: true}},
+ {$match: {type: "idleCursor", "cursor.cursorId": {$in: [cursorId, secondCursorId]}}}
+ ])
+ .toArray();
+ assert.eq(result.length, 2, result);
+
+ // The root user can also see both cursors on the shard via mongoS with {localOps: false}.
+ if (FixtureHelpers.isMongos(db)) {
result = adminDB
.aggregate([
- {$currentOp: {localOps: true, allUsers: true, idleCursors: true}},
- {
- $match: {
- type: "idleCursor",
- "cursor.cursorId": {$in: [cursorId, secondCursorId]}
- }
- }
+ {$currentOp: {localOps: false, allUsers: true, idleCursors: true}},
+ {$match: {type: "idleCursor", shard: st.rs0.name}}
])
.toArray();
assert.eq(result.length, 2, result);
-
- // The root user can also see both cursors on the shard via mongoS with {localOps: false}.
- if (FixtureHelpers.isMongos(db)) {
- result = adminDB
- .aggregate([
- {$currentOp: {localOps: false, allUsers: true, idleCursors: true}},
- {$match: {type: "idleCursor", shard: st.rs0.name}}
- ])
- .toArray();
- assert.eq(result.length, 2, result);
- }
-
- // Clean up the cursors so that they don't affect subsequent tests.
- assert.commandWorked(
- db.runCommand({killCursors: coll.getName(), cursors: [cursorId, secondCursorId]}));
-
- // Make sure to logout to allow __system user to use the implicit session.
- assert.commandWorked(adminDB.logout());
}
- jsTestLog("Running cursor tests on mongoD");
- runCursorTests(shardConn);
+ // Clean up the cursors so that they don't affect subsequent tests.
+ assert.commandWorked(
+ db.runCommand({killCursors: coll.getName(), cursors: [cursorId, secondCursorId]}));
+
+ // Make sure to logout to allow __system user to use the implicit session.
+ assert.commandWorked(adminDB.logout());
+}
+
+jsTestLog("Running cursor tests on mongoD");
+runCursorTests(shardConn);
- jsTestLog("Running cursor tests on mongoS");
- runCursorTests(mongosConn);
+jsTestLog("Running cursor tests on mongoS");
+runCursorTests(mongosConn);
- st.stop();
+st.stop();
})();
diff --git a/jstests/auth/deleted_recreated_user.js b/jstests/auth/deleted_recreated_user.js
index 87517f48297..704710107c0 100644
--- a/jstests/auth/deleted_recreated_user.js
+++ b/jstests/auth/deleted_recreated_user.js
@@ -1,74 +1,74 @@
// Test that sessions can not be resumed by deleted and recreated user.
(function() {
- 'use strict';
+'use strict';
- const kInvalidationIntervalSecs = 5;
+const kInvalidationIntervalSecs = 5;
- function runTest(s0, s1) {
- assert(s0);
- assert(s1);
- const admin = s0.getDB('admin');
+function runTest(s0, s1) {
+ assert(s0);
+ assert(s1);
+ const admin = s0.getDB('admin');
- function checkIdType(username) {
- const user = admin.system.users.find({user: username, db: 'admin'}).toArray()[0];
- const id = user._id;
- const userId = user.userId;
- assert.eq(typeof(id), 'string');
- assert.eq(id, 'admin.' + username);
- assert.eq(typeof(userId), 'object');
- assert.eq(tojson(userId).substring(0, 5), 'UUID(');
- }
-
- admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
- assert(admin.auth('admin', 'pass'));
- checkIdType('admin');
+ function checkIdType(username) {
+ const user = admin.system.users.find({user: username, db: 'admin'}).toArray()[0];
+ const id = user._id;
+ const userId = user.userId;
+ assert.eq(typeof (id), 'string');
+ assert.eq(id, 'admin.' + username);
+ assert.eq(typeof (userId), 'object');
+ assert.eq(tojson(userId).substring(0, 5), 'UUID(');
+ }
- admin.createUser({user: 'user', pwd: 'pass', roles: jsTest.basicUserRoles});
- checkIdType('user');
- admin.logout();
+ admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(admin.auth('admin', 'pass'));
+ checkIdType('admin');
- // Connect as basic user and create a session.
- assert(admin.auth('user', 'pass'));
- assert.writeOK(admin.mycoll.insert({_id: "foo", data: "bar"}));
+ admin.createUser({user: 'user', pwd: 'pass', roles: jsTest.basicUserRoles});
+ checkIdType('user');
+ admin.logout();
- // Perform administrative commands via separate shell.
- function evalCmd(cmd) {
- const uri = 'mongodb://admin:pass@localhost:' + s1.port + '/admin';
- const result = runMongoProgram('./mongo', uri, '--eval', cmd);
- assert.eq(result, 0, "Command failed");
- }
- evalCmd('db.dropUser("user"); ');
- evalCmd('db.createUser({user: "user", pwd: "secret", roles: ["root"]});');
+ // Connect as basic user and create a session.
+ assert(admin.auth('user', 'pass'));
+ assert.writeOK(admin.mycoll.insert({_id: "foo", data: "bar"}));
- if (s0 !== s1) {
- // Wait for twice the invalidation interval when sharding.
- sleep(2 * kInvalidationIntervalSecs * 1000);
- }
+ // Perform administrative commands via separate shell.
+ function evalCmd(cmd) {
+ const uri = 'mongodb://admin:pass@localhost:' + s1.port + '/admin';
+ const result = runMongoProgram('./mongo', uri, '--eval', cmd);
+ assert.eq(result, 0, "Command failed");
+ }
+ evalCmd('db.dropUser("user"); ');
+ evalCmd('db.createUser({user: "user", pwd: "secret", roles: ["root"]});');
- // This should fail due to invalid user session.
- const thrown =
- assert.throws(() => admin.mycoll.find({}).toArray(), [], "Able to find after recreate");
- assert.eq(thrown.code, ErrorCodes.Unauthorized, "Threw something other than unauthorized");
+ if (s0 !== s1) {
+ // Wait for twice the invalidation interval when sharding.
+ sleep(2 * kInvalidationIntervalSecs * 1000);
}
- const mongod = MongoRunner.runMongod({auth: ''});
- runTest(mongod, mongod);
- MongoRunner.stopMongod(mongod);
+ // This should fail due to invalid user session.
+ const thrown =
+ assert.throws(() => admin.mycoll.find({}).toArray(), [], "Able to find after recreate");
+ assert.eq(thrown.code, ErrorCodes.Unauthorized, "Threw something other than unauthorized");
+}
+
+const mongod = MongoRunner.runMongod({auth: ''});
+runTest(mongod, mongod);
+MongoRunner.stopMongod(mongod);
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const st = new ShardingTest({
- shards: 1,
- mongos: 2,
- config: 1,
- other: {
- keyFile: 'jstests/libs/key1',
- shardAsReplicaSet: false,
- mongosOptions: {
- setParameter: 'userCacheInvalidationIntervalSecs=' + kInvalidationIntervalSecs,
- },
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+const st = new ShardingTest({
+ shards: 1,
+ mongos: 2,
+ config: 1,
+ other: {
+ keyFile: 'jstests/libs/key1',
+ shardAsReplicaSet: false,
+ mongosOptions: {
+ setParameter: 'userCacheInvalidationIntervalSecs=' + kInvalidationIntervalSecs,
},
- });
- runTest(st.s0, st.s1);
- st.stop();
+ },
+});
+runTest(st.s0, st.s1);
+st.stop();
})();
diff --git a/jstests/auth/getMore.js b/jstests/auth/getMore.js
index d58c52a205c..4495d61200b 100644
--- a/jstests/auth/getMore.js
+++ b/jstests/auth/getMore.js
@@ -1,351 +1,341 @@
// Tests that a user can only run a getMore on a cursor that they created.
// @tags: [requires_sharding]
(function() {
- "use strict";
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- function runTest(conn) {
- let adminDB = conn.getDB("admin");
- let isMaster = adminDB.runCommand("ismaster");
- assert.commandWorked(isMaster);
- const isMongos = (isMaster.msg === "isdbgrid");
-
- // Create the admin user.
- assert.commandWorked(
- adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]}));
- assert.eq(1, adminDB.auth("admin", "admin"));
-
- // Set up the test database.
- const testDBName = "auth_getMore";
- let testDB = adminDB.getSiblingDB(testDBName);
- testDB.dropDatabase();
- assert.writeOK(testDB.foo.insert({_id: 0}));
- assert.writeOK(testDB.foo.insert({_id: 1}));
- assert.writeOK(testDB.foo.insert({_id: 2}));
-
- //
- // Test that a user can only run a getMore on a cursor that they created.
- //
-
- // Create two users, "Alice" and "Mallory".
- assert.commandWorked(
- testDB.runCommand({createUser: "Alice", pwd: "pwd", roles: ["readWrite"]}));
- assert.commandWorked(
- testDB.runCommand({createUser: "Mallory", pwd: "pwd", roles: ["readWrite"]}));
- adminDB.logout();
-
- // Test that "Mallory" cannot use a find cursor created by "Alice".
- assert.eq(1, testDB.auth("Alice", "pwd"));
- let res = assert.commandWorked(testDB.runCommand({find: "foo", batchSize: 0}));
- let cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- testDB.logout();
- assert.eq(1, testDB.auth("Mallory", "pwd"));
- assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}),
- ErrorCodes.Unauthorized,
- "read from another user's find cursor");
- testDB.logout();
-
- // Test that "Mallory" cannot use a legacy find cursor created by "Alice".
- testDB.getMongo().forceReadMode("legacy");
- assert.eq(1, testDB.auth("Alice", "pwd"));
- let cursor = testDB.foo.find().batchSize(2);
+"use strict";
+
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
+
+function runTest(conn) {
+ let adminDB = conn.getDB("admin");
+ let isMaster = adminDB.runCommand("ismaster");
+ assert.commandWorked(isMaster);
+ const isMongos = (isMaster.msg === "isdbgrid");
+
+ // Create the admin user.
+ assert.commandWorked(adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]}));
+ assert.eq(1, adminDB.auth("admin", "admin"));
+
+ // Set up the test database.
+ const testDBName = "auth_getMore";
+ let testDB = adminDB.getSiblingDB(testDBName);
+ testDB.dropDatabase();
+ assert.writeOK(testDB.foo.insert({_id: 0}));
+ assert.writeOK(testDB.foo.insert({_id: 1}));
+ assert.writeOK(testDB.foo.insert({_id: 2}));
+
+ //
+ // Test that a user can only run a getMore on a cursor that they created.
+ //
+
+ // Create two users, "Alice" and "Mallory".
+ assert.commandWorked(
+ testDB.runCommand({createUser: "Alice", pwd: "pwd", roles: ["readWrite"]}));
+ assert.commandWorked(
+ testDB.runCommand({createUser: "Mallory", pwd: "pwd", roles: ["readWrite"]}));
+ adminDB.logout();
+
+ // Test that "Mallory" cannot use a find cursor created by "Alice".
+ assert.eq(1, testDB.auth("Alice", "pwd"));
+ let res = assert.commandWorked(testDB.runCommand({find: "foo", batchSize: 0}));
+ let cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ testDB.logout();
+ assert.eq(1, testDB.auth("Mallory", "pwd"));
+ assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}),
+ ErrorCodes.Unauthorized,
+ "read from another user's find cursor");
+ testDB.logout();
+
+ // Test that "Mallory" cannot use a legacy find cursor created by "Alice".
+ testDB.getMongo().forceReadMode("legacy");
+ assert.eq(1, testDB.auth("Alice", "pwd"));
+ let cursor = testDB.foo.find().batchSize(2);
+ cursor.next();
+ cursor.next();
+ testDB.logout();
+ assert.eq(1, testDB.auth("Mallory", "pwd"));
+ assert.throws(function() {
cursor.next();
- cursor.next();
- testDB.logout();
- assert.eq(1, testDB.auth("Mallory", "pwd"));
- assert.throws(function() {
- cursor.next();
- }, [], "read from another user's legacy find cursor");
- testDB.logout();
- testDB.getMongo().forceReadMode("commands");
-
- // Test that "Mallory" cannot use an aggregation cursor created by "Alice".
- assert.eq(1, testDB.auth("Alice", "pwd"));
- res = assert.commandWorked(
- testDB.runCommand({aggregate: "foo", pipeline: [], cursor: {batchSize: 0}}));
- cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- testDB.logout();
- assert.eq(1, testDB.auth("Mallory", "pwd"));
- assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}),
- ErrorCodes.Unauthorized,
- "read from another user's aggregate cursor");
- testDB.logout();
-
- // Test that "Mallory" cannot use a listCollections cursor created by "Alice".
- assert.eq(1, testDB.auth("Alice", "pwd"));
- res = assert.commandWorked(testDB.runCommand({listCollections: 1, cursor: {batchSize: 0}}));
- cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- testDB.logout();
- assert.eq(1, testDB.auth("Mallory", "pwd"));
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: cursorId, collection: "$cmd.listCollections"}),
- ErrorCodes.Unauthorized,
- "read from another user's listCollections cursor");
- testDB.logout();
-
- // Test that "Mallory" cannot use a listIndexes cursor created by "Alice".
- assert.eq(1, testDB.auth("Alice", "pwd"));
- res = assert.commandWorked(testDB.runCommand({listIndexes: "foo", cursor: {batchSize: 0}}));
- cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- testDB.logout();
- assert.eq(1, testDB.auth("Mallory", "pwd"));
- assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}),
- ErrorCodes.Unauthorized,
- "read from another user's listIndexes cursor");
- testDB.logout();
-
- //
- // Test that a user can call getMore on an indexStats cursor they created, unless the
- // indexStats privilege has been revoked in the meantime.
- //
-
- assert.eq(1, adminDB.auth("admin", "admin"));
- assert.commandWorked(testDB.runCommand({
- createRole: "indexStatsOnly",
- privileges: [{resource: {db: testDBName, collection: "foo"}, actions: ["indexStats"]}],
- roles: []
- }));
- assert.commandWorked(
- testDB.runCommand({createUser: "Bob", pwd: "pwd", roles: ["indexStatsOnly"]}));
- adminDB.logout();
-
- assert.eq(1, testDB.auth("Bob", "pwd"));
- res = assert.commandWorked(testDB.runCommand(
- {aggregate: "foo", pipeline: [{$indexStats: {}}], cursor: {batchSize: 0}}));
- cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- assert.commandWorked(testDB.runCommand({getMore: cursorId, collection: "foo"}));
-
- res = assert.commandWorked(testDB.runCommand(
- {aggregate: "foo", pipeline: [{$indexStats: {}}], cursor: {batchSize: 0}}));
- cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- testDB.logout();
-
- assert.eq(1, adminDB.auth("admin", "admin"));
- assert.commandWorked(
- testDB.runCommand({revokeRolesFromUser: "Bob", roles: ["indexStatsOnly"]}));
- adminDB.logout();
-
- assert.eq(1, testDB.auth("Bob", "pwd"));
- assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}),
- ErrorCodes.Unauthorized,
- "read from a cursor without required privileges");
- testDB.logout();
-
- //
- // Test that a user can call getMore on a listCollections cursor they created, unless the
- // readWrite privilege has been revoked in the meantime.
- //
-
- assert.eq(1, adminDB.auth("admin", "admin"));
-
- assert.commandWorked(
- testDB.runCommand({createUser: "Tom", pwd: "pwd", roles: ["readWrite"]}));
- adminDB.logout();
-
- assert.eq(1, testDB.auth("Tom", "pwd"));
- res = assert.commandWorked(testDB.runCommand({listCollections: 1, cursor: {batchSize: 0}}));
- cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- assert.commandWorked(
- testDB.runCommand({getMore: cursorId, collection: "$cmd.listCollections"}));
-
- res = assert.commandWorked(testDB.runCommand({listCollections: 1, cursor: {batchSize: 0}}));
- cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- testDB.logout();
-
- assert.eq(1, adminDB.auth("admin", "admin"));
- assert.commandWorked(testDB.runCommand({revokeRolesFromUser: "Tom", roles: ["readWrite"]}));
- adminDB.logout();
-
- assert.eq(1, testDB.auth("Tom", "pwd"));
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: cursorId, collection: "$cmd.listCollections"}),
- ErrorCodes.Unauthorized,
- "read from a cursor without required privileges");
- testDB.logout();
- //
- // Test that a user can call getMore on a listIndexes cursor they created, unless the
- // readWrite privilege has been revoked in the meantime.
- //
-
- assert.eq(1, adminDB.auth("admin", "admin"));
-
- assert.commandWorked(
- testDB.runCommand({createUser: "Bill", pwd: "pwd", roles: ["readWrite"]}));
- adminDB.logout();
-
- assert.eq(1, testDB.auth("Bill", "pwd"));
- res = assert.commandWorked(testDB.runCommand({listIndexes: "foo", cursor: {batchSize: 0}}));
- cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- assert.commandWorked(testDB.runCommand({getMore: cursorId, collection: "foo"}));
-
- res = assert.commandWorked(testDB.runCommand({listIndexes: "foo", cursor: {batchSize: 0}}));
- cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- testDB.logout();
-
- assert.eq(1, adminDB.auth("admin", "admin"));
- assert.commandWorked(
- testDB.runCommand({revokeRolesFromUser: "Bill", roles: ["readWrite"]}));
- adminDB.logout();
-
- assert.eq(1, testDB.auth("Bill", "pwd"));
- assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}),
- ErrorCodes.Unauthorized,
- "read from a cursor without required privileges");
- testDB.logout();
-
- //
- // Test that a user can run a getMore on an aggregate cursor they created, unless some
- // privileges required for the pipeline have been revoked in the meantime.
- //
-
- assert.eq(1, testDB.auth("Alice", "pwd"));
- res = assert.commandWorked(testDB.runCommand({
- aggregate: "foo",
- pipeline: [{$match: {_id: 0}}, {$out: "out"}],
- cursor: {batchSize: 0}
- }));
- cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- assert.commandWorked(testDB.runCommand({getMore: cursorId, collection: "foo"}));
-
- res = assert.commandWorked(testDB.runCommand({
- aggregate: "foo",
- pipeline: [{$match: {_id: 0}}, {$out: "out"}],
- cursor: {batchSize: 0}
- }));
- cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- testDB.logout();
-
- assert.eq(1, adminDB.auth("admin", "admin"));
- testDB.revokeRolesFromUser("Alice", ["readWrite"]);
- testDB.grantRolesToUser("Alice", ["read"]);
- adminDB.logout();
-
- assert.eq(1, testDB.auth("Alice", "pwd"));
- assert.commandFailedWithCode(
- testDB.runCommand(
- {aggregate: "foo", pipeline: [{$match: {_id: 0}}, {$out: "out"}], cursor: {}}),
- ErrorCodes.Unauthorized,
- "user should no longer have write privileges");
- assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}),
- ErrorCodes.Unauthorized,
- "wrote from a cursor without required privileges");
- testDB.logout();
-
- //
- // Test that if there were multiple users authenticated when the cursor was created, then at
- // least one of them must be authenticated in order to run getMore on the cursor.
- //
-
- assert.eq(1, adminDB.auth("admin", "admin"));
- assert.writeOK(testDB.bar.insert({_id: 0}));
-
- // Create a user "fooUser" on the test database that can read the "foo" collection.
- assert.commandWorked(testDB.runCommand({
- createRole: "readFoo",
- privileges: [{resource: {db: testDBName, collection: "foo"}, actions: ["find"]}],
- roles: []
- }));
- assert.commandWorked(
- testDB.runCommand({createUser: "fooUser", pwd: "pwd", roles: ["readFoo"]}));
-
- // Create a user "fooBarUser" on the admin database that can read the "foo" and "bar"
- // collections.
- assert.commandWorked(adminDB.runCommand({
- createRole: "readFooBar",
- privileges: [
- {resource: {db: testDBName, collection: "foo"}, actions: ["find"]},
- {resource: {db: testDBName, collection: "bar"}, actions: ["find"]}
- ],
- roles: []
- }));
- assert.commandWorked(
- adminDB.runCommand({createUser: "fooBarUser", pwd: "pwd", roles: ["readFooBar"]}));
-
- adminDB.logout();
-
- // Test that a cursor created by "fooUser" and "fooBarUser" can be used by "fooUser".
- assert.eq(1, testDB.auth("fooUser", "pwd"));
- assert.eq(1, adminDB.auth("fooBarUser", "pwd"));
- res = assert.commandWorked(testDB.runCommand({find: "foo", batchSize: 0}));
- cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- adminDB.logout();
- assert.commandWorked(testDB.runCommand({getMore: cursorId, collection: "foo"}));
- testDB.logout();
-
- // Test that a cursor created by "fooUser" and "fooBarUser" cannot be used by "fooUser" if
- // "fooUser" does not have the privilege to read the collection.
- assert.eq(1, testDB.auth("fooUser", "pwd"));
- assert.eq(1, adminDB.auth("fooBarUser", "pwd"));
- res = assert.commandWorked(testDB.runCommand({find: "bar", batchSize: 0}));
- cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- adminDB.logout();
- assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "bar"}),
- ErrorCodes.Unauthorized,
- "read from a cursor without required privileges");
- testDB.logout();
-
- // Test that an aggregate cursor created by "fooUser" and "fooBarUser" cannot be used by
- // "fooUser" if "fooUser" does not have all privileges required by the pipeline.
- assert.eq(1, testDB.auth("fooUser", "pwd"));
- assert.eq(1, adminDB.auth("fooBarUser", "pwd"));
- res = assert.commandWorked(testDB.runCommand({
- aggregate: "foo",
- pipeline: [
- {$match: {_id: 0}},
- {$lookup: {from: "bar", localField: "_id", foreignField: "_id", as: "bar"}}
- ],
- cursor: {batchSize: 0}
- }));
- cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- assert.commandWorked(testDB.runCommand({getMore: cursorId, collection: "foo"}));
-
- res = assert.commandWorked(testDB.runCommand({
- aggregate: "foo",
- pipeline: [
- {$match: {_id: 0}},
- {$lookup: {from: "bar", localField: "_id", foreignField: "_id", as: "bar"}}
- ],
- cursor: {batchSize: 0}
- }));
- cursorId = res.cursor.id;
- assert.neq(0, cursorId);
- adminDB.logout();
- assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}),
- ErrorCodes.Unauthorized,
- "read from a cursor without required privileges");
- testDB.logout();
- }
-
- // Run the test on a standalone.
- let conn = MongoRunner.runMongod({auth: "", bind_ip: "127.0.0.1"});
- runTest(conn);
- MongoRunner.stopMongod(conn);
-
- // Run the test on a sharded cluster.
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- let cluster = new ShardingTest({
- shards: 1,
- mongos: 1,
- keyFile: "jstests/libs/key1",
- other: {shardOptions: {auth: ""}, shardAsReplicaSet: false}
- });
- runTest(cluster);
- cluster.stop();
+ }, [], "read from another user's legacy find cursor");
+ testDB.logout();
+ testDB.getMongo().forceReadMode("commands");
+
+ // Test that "Mallory" cannot use an aggregation cursor created by "Alice".
+ assert.eq(1, testDB.auth("Alice", "pwd"));
+ res = assert.commandWorked(
+ testDB.runCommand({aggregate: "foo", pipeline: [], cursor: {batchSize: 0}}));
+ cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ testDB.logout();
+ assert.eq(1, testDB.auth("Mallory", "pwd"));
+ assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}),
+ ErrorCodes.Unauthorized,
+ "read from another user's aggregate cursor");
+ testDB.logout();
+
+ // Test that "Mallory" cannot use a listCollections cursor created by "Alice".
+ assert.eq(1, testDB.auth("Alice", "pwd"));
+ res = assert.commandWorked(testDB.runCommand({listCollections: 1, cursor: {batchSize: 0}}));
+ cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ testDB.logout();
+ assert.eq(1, testDB.auth("Mallory", "pwd"));
+ assert.commandFailedWithCode(
+ testDB.runCommand({getMore: cursorId, collection: "$cmd.listCollections"}),
+ ErrorCodes.Unauthorized,
+ "read from another user's listCollections cursor");
+ testDB.logout();
+
+ // Test that "Mallory" cannot use a listIndexes cursor created by "Alice".
+ assert.eq(1, testDB.auth("Alice", "pwd"));
+ res = assert.commandWorked(testDB.runCommand({listIndexes: "foo", cursor: {batchSize: 0}}));
+ cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ testDB.logout();
+ assert.eq(1, testDB.auth("Mallory", "pwd"));
+ assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}),
+ ErrorCodes.Unauthorized,
+ "read from another user's listIndexes cursor");
+ testDB.logout();
+
+ //
+ // Test that a user can call getMore on an indexStats cursor they created, unless the
+ // indexStats privilege has been revoked in the meantime.
+ //
+
+ assert.eq(1, adminDB.auth("admin", "admin"));
+ assert.commandWorked(testDB.runCommand({
+ createRole: "indexStatsOnly",
+ privileges: [{resource: {db: testDBName, collection: "foo"}, actions: ["indexStats"]}],
+ roles: []
+ }));
+ assert.commandWorked(
+ testDB.runCommand({createUser: "Bob", pwd: "pwd", roles: ["indexStatsOnly"]}));
+ adminDB.logout();
+
+ assert.eq(1, testDB.auth("Bob", "pwd"));
+ res = assert.commandWorked(testDB.runCommand(
+ {aggregate: "foo", pipeline: [{$indexStats: {}}], cursor: {batchSize: 0}}));
+ cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ assert.commandWorked(testDB.runCommand({getMore: cursorId, collection: "foo"}));
+
+ res = assert.commandWorked(testDB.runCommand(
+ {aggregate: "foo", pipeline: [{$indexStats: {}}], cursor: {batchSize: 0}}));
+ cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ testDB.logout();
+
+ assert.eq(1, adminDB.auth("admin", "admin"));
+ assert.commandWorked(
+ testDB.runCommand({revokeRolesFromUser: "Bob", roles: ["indexStatsOnly"]}));
+ adminDB.logout();
+
+ assert.eq(1, testDB.auth("Bob", "pwd"));
+ assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}),
+ ErrorCodes.Unauthorized,
+ "read from a cursor without required privileges");
+ testDB.logout();
+
+ //
+ // Test that a user can call getMore on a listCollections cursor they created, unless the
+ // readWrite privilege has been revoked in the meantime.
+ //
+
+ assert.eq(1, adminDB.auth("admin", "admin"));
+
+ assert.commandWorked(testDB.runCommand({createUser: "Tom", pwd: "pwd", roles: ["readWrite"]}));
+ adminDB.logout();
+
+ assert.eq(1, testDB.auth("Tom", "pwd"));
+ res = assert.commandWorked(testDB.runCommand({listCollections: 1, cursor: {batchSize: 0}}));
+ cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ assert.commandWorked(
+ testDB.runCommand({getMore: cursorId, collection: "$cmd.listCollections"}));
+
+ res = assert.commandWorked(testDB.runCommand({listCollections: 1, cursor: {batchSize: 0}}));
+ cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ testDB.logout();
+
+ assert.eq(1, adminDB.auth("admin", "admin"));
+ assert.commandWorked(testDB.runCommand({revokeRolesFromUser: "Tom", roles: ["readWrite"]}));
+ adminDB.logout();
+
+ assert.eq(1, testDB.auth("Tom", "pwd"));
+ assert.commandFailedWithCode(
+ testDB.runCommand({getMore: cursorId, collection: "$cmd.listCollections"}),
+ ErrorCodes.Unauthorized,
+ "read from a cursor without required privileges");
+ testDB.logout();
+ //
+ // Test that a user can call getMore on a listIndexes cursor they created, unless the
+ // readWrite privilege has been revoked in the meantime.
+ //
+
+ assert.eq(1, adminDB.auth("admin", "admin"));
+
+ assert.commandWorked(testDB.runCommand({createUser: "Bill", pwd: "pwd", roles: ["readWrite"]}));
+ adminDB.logout();
+
+ assert.eq(1, testDB.auth("Bill", "pwd"));
+ res = assert.commandWorked(testDB.runCommand({listIndexes: "foo", cursor: {batchSize: 0}}));
+ cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ assert.commandWorked(testDB.runCommand({getMore: cursorId, collection: "foo"}));
+
+ res = assert.commandWorked(testDB.runCommand({listIndexes: "foo", cursor: {batchSize: 0}}));
+ cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ testDB.logout();
+
+ assert.eq(1, adminDB.auth("admin", "admin"));
+ assert.commandWorked(testDB.runCommand({revokeRolesFromUser: "Bill", roles: ["readWrite"]}));
+ adminDB.logout();
+
+ assert.eq(1, testDB.auth("Bill", "pwd"));
+ assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}),
+ ErrorCodes.Unauthorized,
+ "read from a cursor without required privileges");
+ testDB.logout();
+
+ //
+ // Test that a user can run a getMore on an aggregate cursor they created, unless some
+ // privileges required for the pipeline have been revoked in the meantime.
+ //
+
+ assert.eq(1, testDB.auth("Alice", "pwd"));
+ res = assert.commandWorked(testDB.runCommand(
+ {aggregate: "foo", pipeline: [{$match: {_id: 0}}, {$out: "out"}], cursor: {batchSize: 0}}));
+ cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ assert.commandWorked(testDB.runCommand({getMore: cursorId, collection: "foo"}));
+
+ res = assert.commandWorked(testDB.runCommand(
+ {aggregate: "foo", pipeline: [{$match: {_id: 0}}, {$out: "out"}], cursor: {batchSize: 0}}));
+ cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ testDB.logout();
+
+ assert.eq(1, adminDB.auth("admin", "admin"));
+ testDB.revokeRolesFromUser("Alice", ["readWrite"]);
+ testDB.grantRolesToUser("Alice", ["read"]);
+ adminDB.logout();
+
+ assert.eq(1, testDB.auth("Alice", "pwd"));
+ assert.commandFailedWithCode(
+ testDB.runCommand(
+ {aggregate: "foo", pipeline: [{$match: {_id: 0}}, {$out: "out"}], cursor: {}}),
+ ErrorCodes.Unauthorized,
+ "user should no longer have write privileges");
+ assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}),
+ ErrorCodes.Unauthorized,
+ "wrote from a cursor without required privileges");
+ testDB.logout();
+
+ //
+ // Test that if there were multiple users authenticated when the cursor was created, then at
+ // least one of them must be authenticated in order to run getMore on the cursor.
+ //
+
+ assert.eq(1, adminDB.auth("admin", "admin"));
+ assert.writeOK(testDB.bar.insert({_id: 0}));
+
+ // Create a user "fooUser" on the test database that can read the "foo" collection.
+ assert.commandWorked(testDB.runCommand({
+ createRole: "readFoo",
+ privileges: [{resource: {db: testDBName, collection: "foo"}, actions: ["find"]}],
+ roles: []
+ }));
+ assert.commandWorked(
+ testDB.runCommand({createUser: "fooUser", pwd: "pwd", roles: ["readFoo"]}));
+
+ // Create a user "fooBarUser" on the admin database that can read the "foo" and "bar"
+ // collections.
+ assert.commandWorked(adminDB.runCommand({
+ createRole: "readFooBar",
+ privileges: [
+ {resource: {db: testDBName, collection: "foo"}, actions: ["find"]},
+ {resource: {db: testDBName, collection: "bar"}, actions: ["find"]}
+ ],
+ roles: []
+ }));
+ assert.commandWorked(
+ adminDB.runCommand({createUser: "fooBarUser", pwd: "pwd", roles: ["readFooBar"]}));
+
+ adminDB.logout();
+
+ // Test that a cursor created by "fooUser" and "fooBarUser" can be used by "fooUser".
+ assert.eq(1, testDB.auth("fooUser", "pwd"));
+ assert.eq(1, adminDB.auth("fooBarUser", "pwd"));
+ res = assert.commandWorked(testDB.runCommand({find: "foo", batchSize: 0}));
+ cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ adminDB.logout();
+ assert.commandWorked(testDB.runCommand({getMore: cursorId, collection: "foo"}));
+ testDB.logout();
+
+ // Test that a cursor created by "fooUser" and "fooBarUser" cannot be used by "fooUser" if
+ // "fooUser" does not have the privilege to read the collection.
+ assert.eq(1, testDB.auth("fooUser", "pwd"));
+ assert.eq(1, adminDB.auth("fooBarUser", "pwd"));
+ res = assert.commandWorked(testDB.runCommand({find: "bar", batchSize: 0}));
+ cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ adminDB.logout();
+ assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "bar"}),
+ ErrorCodes.Unauthorized,
+ "read from a cursor without required privileges");
+ testDB.logout();
+
+ // Test that an aggregate cursor created by "fooUser" and "fooBarUser" cannot be used by
+ // "fooUser" if "fooUser" does not have all privileges required by the pipeline.
+ assert.eq(1, testDB.auth("fooUser", "pwd"));
+ assert.eq(1, adminDB.auth("fooBarUser", "pwd"));
+ res = assert.commandWorked(testDB.runCommand({
+ aggregate: "foo",
+ pipeline: [
+ {$match: {_id: 0}},
+ {$lookup: {from: "bar", localField: "_id", foreignField: "_id", as: "bar"}}
+ ],
+ cursor: {batchSize: 0}
+ }));
+ cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ assert.commandWorked(testDB.runCommand({getMore: cursorId, collection: "foo"}));
+
+ res = assert.commandWorked(testDB.runCommand({
+ aggregate: "foo",
+ pipeline: [
+ {$match: {_id: 0}},
+ {$lookup: {from: "bar", localField: "_id", foreignField: "_id", as: "bar"}}
+ ],
+ cursor: {batchSize: 0}
+ }));
+ cursorId = res.cursor.id;
+ assert.neq(0, cursorId);
+ adminDB.logout();
+ assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}),
+ ErrorCodes.Unauthorized,
+ "read from a cursor without required privileges");
+ testDB.logout();
+}
+
+// Run the test on a standalone.
+let conn = MongoRunner.runMongod({auth: "", bind_ip: "127.0.0.1"});
+runTest(conn);
+MongoRunner.stopMongod(conn);
+
+// Run the test on a sharded cluster.
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+let cluster = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ keyFile: "jstests/libs/key1",
+ other: {shardOptions: {auth: ""}, shardAsReplicaSet: false}
+});
+runTest(cluster);
+cluster.stop();
}());
diff --git a/jstests/auth/iteration_count_control.js b/jstests/auth/iteration_count_control.js
index 6ae57fdd6f7..d003347bdbc 100644
--- a/jstests/auth/iteration_count_control.js
+++ b/jstests/auth/iteration_count_control.js
@@ -1,43 +1,43 @@
// Test SCRAM iterationCount control.
(function() {
- 'use strict';
+'use strict';
- load('./jstests/multiVersion/libs/auth_helpers.js');
+load('./jstests/multiVersion/libs/auth_helpers.js');
- const conn = MongoRunner.runMongod({auth: ''});
- const adminDB = conn.getDB('admin');
+const conn = MongoRunner.runMongod({auth: ''});
+const adminDB = conn.getDB('admin');
- adminDB.createUser({user: 'user1', pwd: 'pass', roles: jsTest.adminUserRoles});
- assert(adminDB.auth({user: 'user1', pwd: 'pass'}));
+adminDB.createUser({user: 'user1', pwd: 'pass', roles: jsTest.adminUserRoles});
+assert(adminDB.auth({user: 'user1', pwd: 'pass'}));
- var userDoc = getUserDoc(adminDB, 'user1');
- assert.eq(10000, userDoc.credentials['SCRAM-SHA-1'].iterationCount);
+var userDoc = getUserDoc(adminDB, 'user1');
+assert.eq(10000, userDoc.credentials['SCRAM-SHA-1'].iterationCount);
- // Changing iterationCount should not affect existing users.
- assert.commandWorked(adminDB.runCommand({setParameter: 1, scramIterationCount: 5000}));
- userDoc = getUserDoc(adminDB, 'user1');
- assert.eq(10000, userDoc.credentials['SCRAM-SHA-1'].iterationCount);
+// Changing iterationCount should not affect existing users.
+assert.commandWorked(adminDB.runCommand({setParameter: 1, scramIterationCount: 5000}));
+userDoc = getUserDoc(adminDB, 'user1');
+assert.eq(10000, userDoc.credentials['SCRAM-SHA-1'].iterationCount);
- // But it should take effect when the user's password is changed.
- adminDB.updateUser('user1', {pwd: 'pass', roles: jsTest.adminUserRoles});
- userDoc = getUserDoc(adminDB, 'user1');
- assert.eq(5000, userDoc.credentials['SCRAM-SHA-1'].iterationCount);
+// But it should take effect when the user's password is changed.
+adminDB.updateUser('user1', {pwd: 'pass', roles: jsTest.adminUserRoles});
+userDoc = getUserDoc(adminDB, 'user1');
+assert.eq(5000, userDoc.credentials['SCRAM-SHA-1'].iterationCount);
- // Test (in)valid values for scramIterationCount. 5000 is the minimum value.
- assert.commandFailed(adminDB.runCommand({setParameter: 1, scramIterationCount: 4999}));
- assert.commandFailed(adminDB.runCommand({setParameter: 1, scramIterationCount: -5000}));
- assert.commandWorked(adminDB.runCommand({setParameter: 1, scramIterationCount: 5000}));
- assert.commandWorked(adminDB.runCommand({setParameter: 1, scramIterationCount: 10000}));
- assert.commandWorked(adminDB.runCommand({setParameter: 1, scramIterationCount: 1000000}));
+// Test (in)valid values for scramIterationCount. 5000 is the minimum value.
+assert.commandFailed(adminDB.runCommand({setParameter: 1, scramIterationCount: 4999}));
+assert.commandFailed(adminDB.runCommand({setParameter: 1, scramIterationCount: -5000}));
+assert.commandWorked(adminDB.runCommand({setParameter: 1, scramIterationCount: 5000}));
+assert.commandWorked(adminDB.runCommand({setParameter: 1, scramIterationCount: 10000}));
+assert.commandWorked(adminDB.runCommand({setParameter: 1, scramIterationCount: 1000000}));
- assert.commandFailed(adminDB.runCommand({setParameter: 1, scramSHA256IterationCount: -5000}));
- assert.commandFailed(adminDB.runCommand({setParameter: 1, scramSHA256IterationCount: 4095}));
- assert.commandFailed(adminDB.runCommand({setParameter: 1, scramSHA256IterationCount: 4096}));
- assert.commandFailed(adminDB.runCommand({setParameter: 1, scramSHA256IterationCount: 4999}));
- assert.commandWorked(adminDB.runCommand({setParameter: 1, scramSHA256IterationCount: 5000}));
- assert.commandWorked(adminDB.runCommand({setParameter: 1, scramSHA256IterationCount: 10000}));
- assert.commandWorked(adminDB.runCommand({setParameter: 1, scramSHA256IterationCount: 1000000}));
+assert.commandFailed(adminDB.runCommand({setParameter: 1, scramSHA256IterationCount: -5000}));
+assert.commandFailed(adminDB.runCommand({setParameter: 1, scramSHA256IterationCount: 4095}));
+assert.commandFailed(adminDB.runCommand({setParameter: 1, scramSHA256IterationCount: 4096}));
+assert.commandFailed(adminDB.runCommand({setParameter: 1, scramSHA256IterationCount: 4999}));
+assert.commandWorked(adminDB.runCommand({setParameter: 1, scramSHA256IterationCount: 5000}));
+assert.commandWorked(adminDB.runCommand({setParameter: 1, scramSHA256IterationCount: 10000}));
+assert.commandWorked(adminDB.runCommand({setParameter: 1, scramSHA256IterationCount: 1000000}));
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/auth/iteration_count_defaults.js b/jstests/auth/iteration_count_defaults.js
index 6ebe74abec1..560704e274c 100644
--- a/jstests/auth/iteration_count_defaults.js
+++ b/jstests/auth/iteration_count_defaults.js
@@ -1,28 +1,28 @@
// Test SCRAM iterationCount defaults.
(function() {
- 'use strict';
+'use strict';
- function runOpt(params, sha1Value, sha256Value) {
- const conn = MongoRunner.runMongod({auth: '', setParameter: params});
- const adminDB = conn.getDB('admin');
+function runOpt(params, sha1Value, sha256Value) {
+ const conn = MongoRunner.runMongod({auth: '', setParameter: params});
+ const adminDB = conn.getDB('admin');
- adminDB.createUser({user: 'user1', pwd: 'pass', roles: jsTest.adminUserRoles});
- assert(adminDB.auth({user: 'user1', pwd: 'pass'}));
+ adminDB.createUser({user: 'user1', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(adminDB.auth({user: 'user1', pwd: 'pass'}));
- const response = assert.commandWorked(adminDB.runCommand(
- {getParameter: 1, scramIterationCount: 1, scramSHA256IterationCount: 1}));
- assert.eq(response.scramIterationCount, sha1Value);
- assert.eq(response.scramSHA256IterationCount, sha256Value);
+ const response = assert.commandWorked(adminDB.runCommand(
+ {getParameter: 1, scramIterationCount: 1, scramSHA256IterationCount: 1}));
+ assert.eq(response.scramIterationCount, sha1Value);
+ assert.eq(response.scramSHA256IterationCount, sha256Value);
- MongoRunner.stopMongod(conn);
- }
+ MongoRunner.stopMongod(conn);
+}
- runOpt({}, 10000, 15000);
- runOpt({scramIterationCount: 12500}, 12500, 15000);
- runOpt({scramIterationCount: 20000}, 20000, 20000);
- runOpt({scramSHA256IterationCount: 9999}, 10000, 9999);
- runOpt({scramSHA256IterationCount: 10001}, 10000, 10001);
- runOpt({scramIterationCount: 7000, scramSHA256IterationCount: 8000}, 7000, 8000);
- runOpt({scramIterationCount: 8000, scramSHA256IterationCount: 7000}, 8000, 7000);
+runOpt({}, 10000, 15000);
+runOpt({scramIterationCount: 12500}, 12500, 15000);
+runOpt({scramIterationCount: 20000}, 20000, 20000);
+runOpt({scramSHA256IterationCount: 9999}, 10000, 9999);
+runOpt({scramSHA256IterationCount: 10001}, 10000, 10001);
+runOpt({scramIterationCount: 7000, scramSHA256IterationCount: 8000}, 7000, 8000);
+runOpt({scramIterationCount: 8000, scramSHA256IterationCount: 7000}, 8000, 7000);
})();
diff --git a/jstests/auth/keyfile_rollover.js b/jstests/auth/keyfile_rollover.js
index bbc704797ad..ea66397d8c8 100644
--- a/jstests/auth/keyfile_rollover.js
+++ b/jstests/auth/keyfile_rollover.js
@@ -11,82 +11,82 @@
TestData.skipGossipingClusterTime = true;
(function() {
- 'use strict';
+'use strict';
- let rst = new ReplSetTest({nodes: 3, keyFile: "jstests/libs/key1"});
- rst.startSet();
- rst.initiate();
+let rst = new ReplSetTest({nodes: 3, keyFile: "jstests/libs/key1"});
+rst.startSet();
+rst.initiate();
- const runPrimaryTest = function(fn) {
- const curPrimary = rst.getPrimary();
- assert(curPrimary.getDB("admin").auth("root", "root"));
- try {
- fn(curPrimary);
- rst.awaitSecondaryNodes();
- } finally {
- curPrimary.getDB("admin").logout();
- }
- };
+const runPrimaryTest = function(fn) {
+ const curPrimary = rst.getPrimary();
+ assert(curPrimary.getDB("admin").auth("root", "root"));
+ try {
+ fn(curPrimary);
+ rst.awaitSecondaryNodes();
+ } finally {
+ curPrimary.getDB("admin").logout();
+ }
+};
- // Create a user to login as when auth is enabled later
- rst.getPrimary().getDB('admin').createUser({user: 'root', pwd: 'root', roles: ['root']});
-
- runPrimaryTest((curPrimary) => {
- assert.writeOK(curPrimary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
- assert.eq(1, curPrimary.getDB('test').a.count(), 'Error interacting with replSet');
- });
+// Create a user to login as when auth is enabled later
+rst.getPrimary().getDB('admin').createUser({user: 'root', pwd: 'root', roles: ['root']});
- jsTestLog("Using keyForRollover to transition auth to both keys");
+runPrimaryTest((curPrimary) => {
+ assert.writeOK(curPrimary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
+ assert.eq(1, curPrimary.getDB('test').a.count(), 'Error interacting with replSet');
+});
- /*
- * This rolls over the cluster from one keyfile to another. The first argument is the keyfile
- * servers should use, and the second is the keyfile the shell should use to authenticate
- * with the servers.
- */
- const rolloverKey = function(keyFileForServers, keyFileForAuth) {
- // Update the keyFile parameter for the ReplSetTest as a whole
- rst.keyFile = keyFileForServers;
- // Function to restart a node with a new keyfile parameter and wait for secondaries
- // to come back online
- const restart = function(node) {
- const nodeId = rst.getNodeId(node);
- rst.stop(nodeId);
- rst.start(nodeId, {keyFile: keyFileForServers});
- authutil.asCluster(rst.nodes, keyFileForAuth, () => {
- rst.awaitSecondaryNodes();
- });
- };
+jsTestLog("Using keyForRollover to transition auth to both keys");
- // First we restart the secondaries.
- rst.getSecondaries().forEach(function(secondary) {
- restart(secondary);
- });
-
- // Then we restart the primary and wait for it to come back up with an ismaster call.
- const primary = rst.getPrimary();
- restart(primary);
- assert.soonNoExcept(() => {
- authutil.asCluster(rst.nodes, keyFileForAuth, () => {
- assert.commandWorked(primary.getDB("admin").runCommand({isMaster: 1}));
- });
- return true;
+/*
+ * This rolls over the cluster from one keyfile to another. The first argument is the keyfile
+ * servers should use, and the second is the keyfile the shell should use to authenticate
+ * with the servers.
+ */
+const rolloverKey = function(keyFileForServers, keyFileForAuth) {
+ // Update the keyFile parameter for the ReplSetTest as a whole
+ rst.keyFile = keyFileForServers;
+ // Function to restart a node with a new keyfile parameter and wait for secondaries
+ // to come back online
+ const restart = function(node) {
+ const nodeId = rst.getNodeId(node);
+ rst.stop(nodeId);
+ rst.start(nodeId, {keyFile: keyFileForServers});
+ authutil.asCluster(rst.nodes, keyFileForAuth, () => {
+ rst.awaitSecondaryNodes();
});
};
- rolloverKey("jstests/libs/keyForRollover", "jstests/libs/key1");
+ // First we restart the secondaries.
+ rst.getSecondaries().forEach(function(secondary) {
+ restart(secondary);
+ });
- runPrimaryTest((curPrimary) => {
- assert.writeOK(curPrimary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
- assert.eq(2, curPrimary.getDB('test').a.count(), 'Error interacting with replSet');
+ // Then we restart the primary and wait for it to come back up with an ismaster call.
+ const primary = rst.getPrimary();
+ restart(primary);
+ assert.soonNoExcept(() => {
+ authutil.asCluster(rst.nodes, keyFileForAuth, () => {
+ assert.commandWorked(primary.getDB("admin").runCommand({isMaster: 1}));
+ });
+ return true;
});
+};
- jsTestLog("Upgrading set to use key2");
- rolloverKey("jstests/libs/key2", "jstests/libs/key2");
+rolloverKey("jstests/libs/keyForRollover", "jstests/libs/key1");
- runPrimaryTest((curPrimary) => {
- assert.writeOK(curPrimary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
- assert.eq(3, curPrimary.getDB('test').a.count(), 'Error interacting with replSet');
- });
+runPrimaryTest((curPrimary) => {
+ assert.writeOK(curPrimary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
+ assert.eq(2, curPrimary.getDB('test').a.count(), 'Error interacting with replSet');
+});
+
+jsTestLog("Upgrading set to use key2");
+rolloverKey("jstests/libs/key2", "jstests/libs/key2");
+
+runPrimaryTest((curPrimary) => {
+ assert.writeOK(curPrimary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
+ assert.eq(3, curPrimary.getDB('test').a.count(), 'Error interacting with replSet');
+});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/auth/kill_cursors.js b/jstests/auth/kill_cursors.js
index dc49c7dba59..3d9a535311b 100644
--- a/jstests/auth/kill_cursors.js
+++ b/jstests/auth/kill_cursors.js
@@ -1,190 +1,174 @@
// Test the killCursors command.
// @tags: [requires_sharding]
(function() {
- 'use strict';
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- function runTest(mongod) {
- /**
- * Open a cursor on `db` while authenticated as `authUsers`.
- * Then logout, and log back in as `killUsers` and try to kill that cursor.
- *
- * @param db - The db to create a cursor on and ultimately kill agains.
- * @param authUsers - Array of ['username', db] pairs to create the cursor under.
- * @param killUsers - Array of ['username', dn] pairs to use when killing.
- * @param shouldWork - Whether we expect success
- */
- function tryKill(db, authUsers, killUsers, shouldWork) {
- function loginAll(users) {
- users.forEach(function(u) {
- assert(u[1].auth(u[0], 'pass'));
- });
- }
+'use strict';
+
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
+
+function runTest(mongod) {
+ /**
+ * Open a cursor on `db` while authenticated as `authUsers`.
+ * Then logout, and log back in as `killUsers` and try to kill that cursor.
+ *
+ * @param db - The db to create a cursor on and ultimately kill agains.
+ * @param authUsers - Array of ['username', db] pairs to create the cursor under.
+ * @param killUsers - Array of ['username', dn] pairs to use when killing.
+ * @param shouldWork - Whether we expect success
+ */
+ function tryKill(db, authUsers, killUsers, shouldWork) {
+ function loginAll(users) {
+ users.forEach(function(u) {
+ assert(u[1].auth(u[0], 'pass'));
+ });
+ }
- function logoutAll() {
- [testA, testB].forEach(function(d) {
- const users = assert.commandWorked(d.runCommand({connectionStatus: 1}))
- .authInfo.authenticatedUsers;
- users.forEach(function(u) {
- mongod.getDB(u.db).logout();
- });
+ function logoutAll() {
+ [testA, testB].forEach(function(d) {
+ const users = assert.commandWorked(d.runCommand({connectionStatus: 1}))
+ .authInfo.authenticatedUsers;
+ users.forEach(function(u) {
+ mongod.getDB(u.db).logout();
});
- }
+ });
+ }
- function doKill(extra) {
- // Create a cursor to be killed later.
- loginAll(authUsers);
- let cmd = {find: db.coll.getName(), batchSize: 2};
- Object.assign(cmd, extra);
- const id = assert.commandWorked(db.runCommand(cmd)).cursor.id;
- assert.neq(id, 0, "Invalid cursor ID");
- logoutAll();
-
- loginAll(killUsers);
- const killCmd = db.runCommand({killCursors: db.coll.getName(), cursors: [id]});
- logoutAll();
- if (shouldWork) {
- assert.commandWorked(killCmd, "Unable to kill cursor");
- } else {
- assert.commandFailed(killCmd, "Should not have been able to kill cursor");
- }
+ function doKill(extra) {
+ // Create a cursor to be killed later.
+ loginAll(authUsers);
+ let cmd = {find: db.coll.getName(), batchSize: 2};
+ Object.assign(cmd, extra);
+ const id = assert.commandWorked(db.runCommand(cmd)).cursor.id;
+ assert.neq(id, 0, "Invalid cursor ID");
+ logoutAll();
+
+ loginAll(killUsers);
+ const killCmd = db.runCommand({killCursors: db.coll.getName(), cursors: [id]});
+ logoutAll();
+ if (shouldWork) {
+ assert.commandWorked(killCmd, "Unable to kill cursor");
+ } else {
+ assert.commandFailed(killCmd, "Should not have been able to kill cursor");
}
+ }
- doKill({});
+ doKill({});
- if ((authUsers.length === 1) && (killUsers.length === 1)) {
- // Session variant only makes sense with single auth'd users.
- doKill({lsid: {id: BinData(4, "QlLfPHTySm6tqfuV+EOsVA==")}});
- }
+ if ((authUsers.length === 1) && (killUsers.length === 1)) {
+ // Session variant only makes sense with single auth'd users.
+ doKill({lsid: {id: BinData(4, "QlLfPHTySm6tqfuV+EOsVA==")}});
}
+ }
- function trySelfKill(user) {
- const db = user[1];
- assert(db.auth(user[0], 'pass'));
-
- assert.commandWorked(db.runCommand({startSession: 1}));
+ function trySelfKill(user) {
+ const db = user[1];
+ assert(db.auth(user[0], 'pass'));
- const cmd = {
- aggregate: 1,
- pipeline: [{$listLocalSessions: {}}],
- cursor: {batchSize: 0}
- };
- const res = assert.commandWorked(db.runCommand(cmd));
- print(tojson(res));
- const id = res.cursor.id;
- assert.neq(id, 0, "Invalid cursor ID");
+ assert.commandWorked(db.runCommand({startSession: 1}));
- const killCmdRes = db.runCommand({killCursors: db.getName() + ".$cmd", cursors: [id]});
- db.logout();
+ const cmd = {aggregate: 1, pipeline: [{$listLocalSessions: {}}], cursor: {batchSize: 0}};
+ const res = assert.commandWorked(db.runCommand(cmd));
+ print(tojson(res));
+ const id = res.cursor.id;
+ assert.neq(id, 0, "Invalid cursor ID");
- assert.commandWorked(killCmdRes, "Unable to kill cursor");
- }
+ const killCmdRes = db.runCommand({killCursors: db.getName() + ".$cmd", cursors: [id]});
+ db.logout();
- /**
- * Create user1/user2 in testA, and user3/user4 in testB.
- * Create two 101 element collections in testA and testB.
- * Use various combinations of those users to open cursors,
- * then (potentially) different combinations of users to kill them.
- *
- * A cursor should only be killable if at least one of the users
- * who created it is trying to kill it.
- */
-
- const testA = mongod.getDB('testA');
- const testB = mongod.getDB('testB');
- const admin = mongod.getDB('admin');
-
- // Setup users
- admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
- assert(admin.auth('admin', 'pass'));
-
- testA.createUser({user: 'user1', pwd: 'pass', roles: jsTest.basicUserRoles});
- testA.createUser({user: 'user2', pwd: 'pass', roles: jsTest.basicUserRoles});
- testB.createUser({user: 'user3', pwd: 'pass', roles: jsTest.basicUserRoles});
- testB.createUser({user: 'user4', pwd: 'pass', roles: jsTest.basicUserRoles});
- testB.createUser({user: 'user5', pwd: 'pass', roles: []});
- admin.logout();
-
- // Create a collection with batchable data
- assert(testA.auth('user1', 'pass'));
- assert(testB.auth('user3', 'pass'));
- for (var i = 0; i < 101; ++i) {
- assert.writeOK(testA.coll.insert({_id: i}));
- assert.writeOK(testB.coll.insert({_id: i}));
- }
- testA.logout();
- testB.logout();
-
- // A user can kill their own cursor.
- tryKill(testA, [['user1', testA]], [['user1', testA]], true);
- tryKill(testA, [['user2', testA]], [['user2', testA]], true);
- tryKill(testB, [['user3', testB]], [['user3', testB]], true);
- tryKill(testB, [['user4', testB]], [['user4', testB]], true);
- trySelfKill(['user1', testA]);
- trySelfKill(['user5', testB]);
- trySelfKill(['admin', admin]);
-
- // A user cannot kill someone else's cursor.
- tryKill(testA, [['user1', testA]], [['user2', testA]], false);
- tryKill(testA, [['user1', testA]], [['user2', testA], ['user3', testB]], false);
- tryKill(testA, [['user2', testA]], [['user1', testA]], false);
- tryKill(testA, [['user2', testA]], [['user1', testA], ['user3', testB]], false);
- tryKill(testB, [['user3', testB]], [['user1', testA], ['user4', testB]], false);
- tryKill(testB, [['user3', testB]], [['user2', testA], ['user4', testB]], false);
-
- // A multi-owned cursor can be killed by any/all owner.
- tryKill(testA, [['user1', testA], ['user3', testB]], [['user1', testA]], true);
- tryKill(testB, [['user1', testA], ['user3', testB]], [['user3', testB]], true);
- tryKill(testA,
- [['user1', testA], ['user3', testB]],
- [['user1', testA], ['user3', testB]],
- true);
- tryKill(testA,
- [['user1', testA], ['user3', testB]],
- [['user2', testA], ['user3', testB]],
- true);
- tryKill(testB,
- [['user1', testA], ['user3', testB]],
- [['user1', testA], ['user3', testB]],
- true);
- tryKill(testB,
- [['user1', testA], ['user3', testB]],
- [['user1', testA], ['user4', testB]],
- true);
-
- // An owned cursor can not be killed by other user(s).
- tryKill(testA,
- [['user1', testA], ['user3', testB]],
- [['user2', testA], ['user4', testB]],
- false);
- tryKill(testA, [['user1', testA]], [['user2', testA], ['user3', testB]], false);
- tryKill(testA,
- [['user1', testA], ['user3', testB]],
- [['user2', testA], ['user4', testB]],
- false);
-
- // Admin can kill anything.
- tryKill(testA, [['user1', testA]], [['admin', admin]], true);
- tryKill(testA, [['user2', testA]], [['admin', admin]], true);
- tryKill(testB, [['user3', testB]], [['admin', admin]], true);
- tryKill(testB, [['user4', testB]], [['admin', admin]], true);
- tryKill(testA, [['user1', testA], ['user3', testB]], [['admin', admin]], true);
- tryKill(testB, [['user2', testA], ['user4', testB]], [['admin', admin]], true);
+ assert.commandWorked(killCmdRes, "Unable to kill cursor");
}
- const mongod = MongoRunner.runMongod({auth: ""});
- runTest(mongod);
- MongoRunner.stopMongod(mongod);
-
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const st = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
- });
- runTest(st.s0);
- st.stop();
+ /**
+ * Create user1/user2 in testA, and user3/user4 in testB.
+ * Create two 101 element collections in testA and testB.
+ * Use various combinations of those users to open cursors,
+ * then (potentially) different combinations of users to kill them.
+ *
+ * A cursor should only be killable if at least one of the users
+ * who created it is trying to kill it.
+ */
+
+ const testA = mongod.getDB('testA');
+ const testB = mongod.getDB('testB');
+ const admin = mongod.getDB('admin');
+
+ // Setup users
+ admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(admin.auth('admin', 'pass'));
+
+ testA.createUser({user: 'user1', pwd: 'pass', roles: jsTest.basicUserRoles});
+ testA.createUser({user: 'user2', pwd: 'pass', roles: jsTest.basicUserRoles});
+ testB.createUser({user: 'user3', pwd: 'pass', roles: jsTest.basicUserRoles});
+ testB.createUser({user: 'user4', pwd: 'pass', roles: jsTest.basicUserRoles});
+ testB.createUser({user: 'user5', pwd: 'pass', roles: []});
+ admin.logout();
+
+ // Create a collection with batchable data
+ assert(testA.auth('user1', 'pass'));
+ assert(testB.auth('user3', 'pass'));
+ for (var i = 0; i < 101; ++i) {
+ assert.writeOK(testA.coll.insert({_id: i}));
+ assert.writeOK(testB.coll.insert({_id: i}));
+ }
+ testA.logout();
+ testB.logout();
+
+ // A user can kill their own cursor.
+ tryKill(testA, [['user1', testA]], [['user1', testA]], true);
+ tryKill(testA, [['user2', testA]], [['user2', testA]], true);
+ tryKill(testB, [['user3', testB]], [['user3', testB]], true);
+ tryKill(testB, [['user4', testB]], [['user4', testB]], true);
+ trySelfKill(['user1', testA]);
+ trySelfKill(['user5', testB]);
+ trySelfKill(['admin', admin]);
+
+ // A user cannot kill someone else's cursor.
+ tryKill(testA, [['user1', testA]], [['user2', testA]], false);
+ tryKill(testA, [['user1', testA]], [['user2', testA], ['user3', testB]], false);
+ tryKill(testA, [['user2', testA]], [['user1', testA]], false);
+ tryKill(testA, [['user2', testA]], [['user1', testA], ['user3', testB]], false);
+ tryKill(testB, [['user3', testB]], [['user1', testA], ['user4', testB]], false);
+ tryKill(testB, [['user3', testB]], [['user2', testA], ['user4', testB]], false);
+
+ // A multi-owned cursor can be killed by any/all owner.
+ tryKill(testA, [['user1', testA], ['user3', testB]], [['user1', testA]], true);
+ tryKill(testB, [['user1', testA], ['user3', testB]], [['user3', testB]], true);
+ tryKill(
+ testA, [['user1', testA], ['user3', testB]], [['user1', testA], ['user3', testB]], true);
+ tryKill(
+ testA, [['user1', testA], ['user3', testB]], [['user2', testA], ['user3', testB]], true);
+ tryKill(
+ testB, [['user1', testA], ['user3', testB]], [['user1', testA], ['user3', testB]], true);
+ tryKill(
+ testB, [['user1', testA], ['user3', testB]], [['user1', testA], ['user4', testB]], true);
+
+ // An owned cursor can not be killed by other user(s).
+ tryKill(
+ testA, [['user1', testA], ['user3', testB]], [['user2', testA], ['user4', testB]], false);
+ tryKill(testA, [['user1', testA]], [['user2', testA], ['user3', testB]], false);
+ tryKill(
+ testA, [['user1', testA], ['user3', testB]], [['user2', testA], ['user4', testB]], false);
+
+ // Admin can kill anything.
+ tryKill(testA, [['user1', testA]], [['admin', admin]], true);
+ tryKill(testA, [['user2', testA]], [['admin', admin]], true);
+ tryKill(testB, [['user3', testB]], [['admin', admin]], true);
+ tryKill(testB, [['user4', testB]], [['admin', admin]], true);
+ tryKill(testA, [['user1', testA], ['user3', testB]], [['admin', admin]], true);
+ tryKill(testB, [['user2', testA], ['user4', testB]], [['admin', admin]], true);
+}
+
+const mongod = MongoRunner.runMongod({auth: ""});
+runTest(mongod);
+MongoRunner.stopMongod(mongod);
+
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+const st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
+});
+runTest(st.s0);
+st.stop();
})();
diff --git a/jstests/auth/kill_sessions.js b/jstests/auth/kill_sessions.js
index f32a69ae2e8..99ddc51296f 100644
--- a/jstests/auth/kill_sessions.js
+++ b/jstests/auth/kill_sessions.js
@@ -1,17 +1,17 @@
load("jstests/libs/kill_sessions.js");
(function() {
- 'use strict';
+'use strict';
- // TODO SERVER-35447: This test involves killing all sessions, which will not work as expected
- // if the kill command is sent with an implicit session.
- TestData.disableImplicitSessions = true;
+// TODO SERVER-35447: This test involves killing all sessions, which will not work as expected
+// if the kill command is sent with an implicit session.
+TestData.disableImplicitSessions = true;
- var forExec = MongoRunner.runMongod({auth: ""});
- var forKill = new Mongo(forExec.host);
- var forVerify = new Mongo(forExec.host);
- KillSessionsTestHelper.initializeAuth(forExec);
- forVerify.getDB("admin").auth("super", "password");
- KillSessionsTestHelper.runAuth(forExec, forKill, [forVerify]);
- MongoRunner.stopMongod(forExec);
+var forExec = MongoRunner.runMongod({auth: ""});
+var forKill = new Mongo(forExec.host);
+var forVerify = new Mongo(forExec.host);
+KillSessionsTestHelper.initializeAuth(forExec);
+forVerify.getDB("admin").auth("super", "password");
+KillSessionsTestHelper.runAuth(forExec, forKill, [forVerify]);
+MongoRunner.stopMongod(forExec);
})();
diff --git a/jstests/auth/killop_own_ops.js b/jstests/auth/killop_own_ops.js
index 37498b53586..dbb1689708c 100644
--- a/jstests/auth/killop_own_ops.js
+++ b/jstests/auth/killop_own_ops.js
@@ -8,153 +8,148 @@
*/
(function() {
- 'use strict';
-
- load("jstests/libs/fixture_helpers.js"); // For isMongos.
-
- function runTest(m, failPointName) {
- var db = m.getDB("foo");
- var admin = m.getDB("admin");
-
- admin.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
- admin.auth('admin', 'password');
- db.createUser({user: 'reader', pwd: 'reader', roles: [{db: 'foo', role: 'read'}]});
- db.createUser(
- {user: 'otherReader', pwd: 'otherReader', roles: [{db: 'foo', role: 'read'}]});
- admin.createRole({
- role: 'opAdmin',
- roles: [],
- privileges: [{resource: {cluster: true}, actions: ['inprog', 'killop']}]
- });
- db.createUser({user: 'opAdmin', pwd: 'opAdmin', roles: [{role: 'opAdmin', db: 'admin'}]});
-
- var t = db.jstests_killop;
- t.save({x: 1});
-
- if (!FixtureHelpers.isMongos(db)) {
- assert.commandWorked(
- db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
- }
+'use strict';
- admin.logout();
+load("jstests/libs/fixture_helpers.js"); // For isMongos.
- // Only used for nice error messages.
- function getAllLocalOps() {
- admin.aggregate([{$currentOp: {allUsers: true, localOps: true}}]).toArray();
- }
+function runTest(m, failPointName) {
+ var db = m.getDB("foo");
+ var admin = m.getDB("admin");
- /**
- * This function filters for the operations that we're looking for, based on their state and
- * the contents of their query object.
- */
- function ops(ownOps = true) {
- const ops =
- admin.aggregate([{$currentOp: {allUsers: !ownOps, localOps: true}}]).toArray();
-
- var ids = [];
- for (let o of ops) {
- if ((o.active || o.waitingForLock) && o.command &&
- o.command.find === "jstests_killop" && o.command.comment === "kill_own_ops") {
- ids.push(o.opid);
- }
- }
- return ids;
- }
+ admin.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
+ admin.auth('admin', 'password');
+ db.createUser({user: 'reader', pwd: 'reader', roles: [{db: 'foo', role: 'read'}]});
+ db.createUser({user: 'otherReader', pwd: 'otherReader', roles: [{db: 'foo', role: 'read'}]});
+ admin.createRole({
+ role: 'opAdmin',
+ roles: [],
+ privileges: [{resource: {cluster: true}, actions: ['inprog', 'killop']}]
+ });
+ db.createUser({user: 'opAdmin', pwd: 'opAdmin', roles: [{role: 'opAdmin', db: 'admin'}]});
- var queryAsReader =
- 'db = db.getSiblingDB("foo"); db.auth("reader", "reader"); db.jstests_killop.find().comment("kill_own_ops").toArray()';
+ var t = db.jstests_killop;
+ t.save({x: 1});
- jsTestLog("Starting long-running operation");
- db.auth('reader', 'reader');
- assert.commandWorked(
- db.adminCommand({configureFailPoint: failPointName, mode: "alwaysOn"}));
- var s1 = startParallelShell(queryAsReader, m.port);
- jsTestLog("Finding ops in $currentOp output");
- var o = [];
- assert.soon(
- function() {
- o = ops();
- return o.length == 1;
- },
- () => {
- return tojson(getAllLocalOps());
- },
- 60000);
- jsTestLog("Checking that another user cannot see or kill the op");
- db.logout();
- db.auth('otherReader', 'otherReader');
- assert.eq([], ops());
- assert.commandFailed(db.killOp(o[0]));
- db.logout();
- db.auth('reader', 'reader');
- assert.eq(1, ops().length);
- db.logout();
- jsTestLog("Checking that originating user can kill operation");
- var start = new Date();
- db.auth('reader', 'reader');
- assert.commandWorked(db.killOp(o[0]));
- assert.commandWorked(db.adminCommand({configureFailPoint: failPointName, mode: "off"}));
-
- jsTestLog("Waiting for ops to terminate");
- var exitCode = s1({checkExitSuccess: false});
- assert.neq(0,
- exitCode,
- "expected shell to exit abnormally due to operation execution being terminated");
-
- // don't want to pass if timeout killed the js function.
- var end = new Date();
- var diff = end - start;
- assert.lt(diff, 30000, "Start: " + start + "; end: " + end + "; diff: " + diff);
-
- jsTestLog("Starting a second long-running operation");
+ if (!FixtureHelpers.isMongos(db)) {
assert.commandWorked(
- db.adminCommand({configureFailPoint: failPointName, mode: "alwaysOn"}));
- var s2 = startParallelShell(queryAsReader, m.port);
- jsTestLog("Finding ops in $currentOp output");
- var o2 = [];
- assert.soon(
- function() {
- o2 = ops();
- return o2.length == 1;
- },
- () => {
- return tojson(getAllLocalOps());
- },
- 60000);
-
- db.logout();
- db.auth('opAdmin', 'opAdmin');
-
- jsTestLog("Checking that an administrative user can find others' operations");
- assert.eq(o2, ops(false));
-
- jsTestLog(
- "Checking that an administrative user cannot find others' operations with ownOps");
- assert.eq([], ops());
-
- jsTestLog("Checking that an administrative user can kill others' operations");
- var start = new Date();
- assert.commandWorked(db.killOp(o2[0]));
- assert.commandWorked(db.adminCommand({configureFailPoint: failPointName, mode: "off"}));
- jsTestLog("Waiting for ops to terminate");
- var exitCode = s2({checkExitSuccess: false});
- assert.neq(
- 0, exitCode, "expected shell to exit abnormally due to JS execution being terminated");
-
- var end = new Date();
- var diff = end - start;
- assert.lt(diff, 30000, "Start: " + start + "; end: " + end + "; diff: " + diff);
+ db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
+ }
+
+ admin.logout();
+
+ // Only used for nice error messages.
+ function getAllLocalOps() {
+ admin.aggregate([{$currentOp: {allUsers: true, localOps: true}}]).toArray();
+ }
+
+ /**
+ * This function filters for the operations that we're looking for, based on their state and
+ * the contents of their query object.
+ */
+ function ops(ownOps = true) {
+ const ops = admin.aggregate([{$currentOp: {allUsers: !ownOps, localOps: true}}]).toArray();
+
+ var ids = [];
+ for (let o of ops) {
+ if ((o.active || o.waitingForLock) && o.command &&
+ o.command.find === "jstests_killop" && o.command.comment === "kill_own_ops") {
+ ids.push(o.opid);
+ }
+ }
+ return ids;
}
- var conn = MongoRunner.runMongod({auth: ""});
- runTest(conn, "setYieldAllLocksHang");
- MongoRunner.stopMongod(conn);
-
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest(
- {shards: 1, keyFile: 'jstests/libs/key1', other: {shardAsReplicaSet: false}});
- // Use a different failpoint in the sharded version, since the mongos does not have a
- // setYieldAlllocksHang failpoint.
- runTest(st.s, "waitInFindBeforeMakingBatch");
- st.stop();
+ var queryAsReader =
+ 'db = db.getSiblingDB("foo"); db.auth("reader", "reader"); db.jstests_killop.find().comment("kill_own_ops").toArray()';
+
+ jsTestLog("Starting long-running operation");
+ db.auth('reader', 'reader');
+ assert.commandWorked(db.adminCommand({configureFailPoint: failPointName, mode: "alwaysOn"}));
+ var s1 = startParallelShell(queryAsReader, m.port);
+ jsTestLog("Finding ops in $currentOp output");
+ var o = [];
+ assert.soon(
+ function() {
+ o = ops();
+ return o.length == 1;
+ },
+ () => {
+ return tojson(getAllLocalOps());
+ },
+ 60000);
+ jsTestLog("Checking that another user cannot see or kill the op");
+ db.logout();
+ db.auth('otherReader', 'otherReader');
+ assert.eq([], ops());
+ assert.commandFailed(db.killOp(o[0]));
+ db.logout();
+ db.auth('reader', 'reader');
+ assert.eq(1, ops().length);
+ db.logout();
+ jsTestLog("Checking that originating user can kill operation");
+ var start = new Date();
+ db.auth('reader', 'reader');
+ assert.commandWorked(db.killOp(o[0]));
+ assert.commandWorked(db.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+
+ jsTestLog("Waiting for ops to terminate");
+ var exitCode = s1({checkExitSuccess: false});
+ assert.neq(0,
+ exitCode,
+ "expected shell to exit abnormally due to operation execution being terminated");
+
+ // don't want to pass if timeout killed the js function.
+ var end = new Date();
+ var diff = end - start;
+ assert.lt(diff, 30000, "Start: " + start + "; end: " + end + "; diff: " + diff);
+
+ jsTestLog("Starting a second long-running operation");
+ assert.commandWorked(db.adminCommand({configureFailPoint: failPointName, mode: "alwaysOn"}));
+ var s2 = startParallelShell(queryAsReader, m.port);
+ jsTestLog("Finding ops in $currentOp output");
+ var o2 = [];
+ assert.soon(
+ function() {
+ o2 = ops();
+ return o2.length == 1;
+ },
+ () => {
+ return tojson(getAllLocalOps());
+ },
+ 60000);
+
+ db.logout();
+ db.auth('opAdmin', 'opAdmin');
+
+ jsTestLog("Checking that an administrative user can find others' operations");
+ assert.eq(o2, ops(false));
+
+ jsTestLog("Checking that an administrative user cannot find others' operations with ownOps");
+ assert.eq([], ops());
+
+ jsTestLog("Checking that an administrative user can kill others' operations");
+ var start = new Date();
+ assert.commandWorked(db.killOp(o2[0]));
+ assert.commandWorked(db.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+ jsTestLog("Waiting for ops to terminate");
+ var exitCode = s2({checkExitSuccess: false});
+ assert.neq(
+ 0, exitCode, "expected shell to exit abnormally due to JS execution being terminated");
+
+ var end = new Date();
+ var diff = end - start;
+ assert.lt(diff, 30000, "Start: " + start + "; end: " + end + "; diff: " + diff);
+}
+
+var conn = MongoRunner.runMongod({auth: ""});
+runTest(conn, "setYieldAllLocksHang");
+MongoRunner.stopMongod(conn);
+
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st =
+ new ShardingTest({shards: 1, keyFile: 'jstests/libs/key1', other: {shardAsReplicaSet: false}});
+// Use a different failpoint in the sharded version, since the mongos does not have a
+// setYieldAlllocksHang failpoint.
+runTest(st.s, "waitInFindBeforeMakingBatch");
+st.stop();
})();
diff --git a/jstests/auth/list_all_local_sessions.js b/jstests/auth/list_all_local_sessions.js
index 3b7c19d1cbe..92363515a00 100644
--- a/jstests/auth/list_all_local_sessions.js
+++ b/jstests/auth/list_all_local_sessions.js
@@ -2,60 +2,60 @@
// @tags: [requires_sharding]
(function() {
- 'use strict';
- load('jstests/aggregation/extras/utils.js');
-
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
-
- function runListAllLocalSessionsTest(mongod) {
- assert(mongod);
- const admin = mongod.getDB("admin");
- const db = mongod.getDB("test");
-
- const pipeline = [{'$listLocalSessions': {allUsers: true}}];
- function listAllLocalSessions() {
- return admin.aggregate(pipeline);
- }
-
- admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
- assert(admin.auth('admin', 'pass'));
- db.createUser({user: 'user1', pwd: 'pass', roles: jsTest.basicUserRoles});
- admin.logout();
-
- // Shouldn't be able to listLocalSessions when not logged in.
- assertErrorCode(admin, pipeline, ErrorCodes.Unauthorized);
-
- // Start a new session and capture its sessionId.
- assert(db.auth('user1', 'pass'));
- const myid = assert.commandWorked(db.runCommand({startSession: 1})).id.id;
- assert(myid !== undefined);
-
- // Ensure that a normal user can NOT listAllLocalSessions to view their session.
- assertErrorCode(admin, pipeline, ErrorCodes.Unauthorized);
- db.logout();
-
- // Ensure that the cache now contains the session and is visible by admin.
- assert(admin.auth('admin', 'pass'));
- const resultArray = assert.doesNotThrow(listAllLocalSessions).toArray();
- assert.eq(resultArray.length, 1);
- const cacheid = resultArray[0]._id.id;
- assert(cacheid !== undefined);
- assert.eq(0, bsonWoCompare({x: cacheid}, {x: myid}));
+'use strict';
+load('jstests/aggregation/extras/utils.js');
+
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
+
+function runListAllLocalSessionsTest(mongod) {
+ assert(mongod);
+ const admin = mongod.getDB("admin");
+ const db = mongod.getDB("test");
+
+ const pipeline = [{'$listLocalSessions': {allUsers: true}}];
+ function listAllLocalSessions() {
+ return admin.aggregate(pipeline);
}
- const mongod = MongoRunner.runMongod({auth: ""});
- runListAllLocalSessionsTest(mongod);
- MongoRunner.stopMongod(mongod);
-
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const st = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
- });
- runListAllLocalSessionsTest(st.s0);
- st.stop();
+ admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(admin.auth('admin', 'pass'));
+ db.createUser({user: 'user1', pwd: 'pass', roles: jsTest.basicUserRoles});
+ admin.logout();
+
+ // Shouldn't be able to listLocalSessions when not logged in.
+ assertErrorCode(admin, pipeline, ErrorCodes.Unauthorized);
+
+ // Start a new session and capture its sessionId.
+ assert(db.auth('user1', 'pass'));
+ const myid = assert.commandWorked(db.runCommand({startSession: 1})).id.id;
+ assert(myid !== undefined);
+
+ // Ensure that a normal user can NOT listAllLocalSessions to view their session.
+ assertErrorCode(admin, pipeline, ErrorCodes.Unauthorized);
+ db.logout();
+
+ // Ensure that the cache now contains the session and is visible by admin.
+ assert(admin.auth('admin', 'pass'));
+ const resultArray = assert.doesNotThrow(listAllLocalSessions).toArray();
+ assert.eq(resultArray.length, 1);
+ const cacheid = resultArray[0]._id.id;
+ assert(cacheid !== undefined);
+ assert.eq(0, bsonWoCompare({x: cacheid}, {x: myid}));
+}
+
+const mongod = MongoRunner.runMongod({auth: ""});
+runListAllLocalSessionsTest(mongod);
+MongoRunner.stopMongod(mongod);
+
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+const st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
+});
+runListAllLocalSessionsTest(st.s0);
+st.stop();
})();
diff --git a/jstests/auth/list_all_sessions.js b/jstests/auth/list_all_sessions.js
index 7f077bee537..9ba823564a7 100644
--- a/jstests/auth/list_all_sessions.js
+++ b/jstests/auth/list_all_sessions.js
@@ -2,67 +2,67 @@
// @tags: [requires_sharding]
(function() {
- 'use strict';
- load('jstests/aggregation/extras/utils.js');
+'use strict';
+load('jstests/aggregation/extras/utils.js');
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
- function runListAllSessionsTest(mongod) {
- assert(mongod);
- const admin = mongod.getDB("admin");
- const config = mongod.getDB("config");
+function runListAllSessionsTest(mongod) {
+ assert(mongod);
+ const admin = mongod.getDB("admin");
+ const config = mongod.getDB("config");
- const pipeline = [{'$listSessions': {allUsers: true}}];
- function listSessions() {
- return config.system.sessions.aggregate(pipeline);
- }
+ const pipeline = [{'$listSessions': {allUsers: true}}];
+ function listSessions() {
+ return config.system.sessions.aggregate(pipeline);
+ }
- admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
- assert(admin.auth('admin', 'pass'));
- admin.createUser({user: 'user1', pwd: 'pass', roles: jsTest.basicUserRoles});
- admin.logout();
+ admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(admin.auth('admin', 'pass'));
+ admin.createUser({user: 'user1', pwd: 'pass', roles: jsTest.basicUserRoles});
+ admin.logout();
- // Fail if we're not logged in.
- assertErrorCode(config.system.sessions, pipeline, ErrorCodes.Unauthorized);
+ // Fail if we're not logged in.
+ assertErrorCode(config.system.sessions, pipeline, ErrorCodes.Unauthorized);
- // Start a new session and capture its sessionId.
- assert(admin.auth('user1', 'pass'));
- const myid = assert.commandWorked(admin.runCommand({startSession: 1})).id.id;
- assert(myid !== undefined);
- assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ // Start a new session and capture its sessionId.
+ assert(admin.auth('user1', 'pass'));
+ const myid = assert.commandWorked(admin.runCommand({startSession: 1})).id.id;
+ assert(myid !== undefined);
+ assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
- // Ensure that a normal user can NOT listSessions{allUsers:true} to view their session.
- assertErrorCode(config.system.sessions, pipeline, ErrorCodes.Unauthorized);
+ // Ensure that a normal user can NOT listSessions{allUsers:true} to view their session.
+ assertErrorCode(config.system.sessions, pipeline, ErrorCodes.Unauthorized);
- // Ensure that a normal user can NOT listSessions to view others' sessions.
- const viewAdminPipeline = [{'$listSessions': {users: [{user: 'admin', db: 'admin'}]}}];
- assertErrorCode(config.system.sessions, viewAdminPipeline, ErrorCodes.Unauthorized);
+ // Ensure that a normal user can NOT listSessions to view others' sessions.
+ const viewAdminPipeline = [{'$listSessions': {users: [{user: 'admin', db: 'admin'}]}}];
+ assertErrorCode(config.system.sessions, viewAdminPipeline, ErrorCodes.Unauthorized);
- // Ensure that the cache now contains the session and is visible by admin
- assert(admin.auth('admin', 'pass'));
- const resultArray = listSessions().toArray();
- assert.eq(resultArray.length, 1);
- const cacheid = resultArray[0]._id.id;
- assert(cacheid !== undefined);
- assert.eq(0, bsonWoCompare({x: cacheid}, {x: myid}));
+ // Ensure that the cache now contains the session and is visible by admin
+ assert(admin.auth('admin', 'pass'));
+ const resultArray = listSessions().toArray();
+ assert.eq(resultArray.length, 1);
+ const cacheid = resultArray[0]._id.id;
+ assert(cacheid !== undefined);
+ assert.eq(0, bsonWoCompare({x: cacheid}, {x: myid}));
- // Make sure pipelining other collections fail.
- assertErrorCode(admin.system.collections, pipeline, ErrorCodes.InvalidNamespace);
- }
+ // Make sure pipelining other collections fail.
+ assertErrorCode(admin.system.collections, pipeline, ErrorCodes.InvalidNamespace);
+}
- const mongod = MongoRunner.runMongod({auth: ""});
- runListAllSessionsTest(mongod);
- MongoRunner.stopMongod(mongod);
+const mongod = MongoRunner.runMongod({auth: ""});
+runListAllSessionsTest(mongod);
+MongoRunner.stopMongod(mongod);
- const st =
- new ShardingTest({shards: 1, mongos: 1, config: 1, other: {keyFile: 'jstests/libs/key1'}});
+const st =
+ new ShardingTest({shards: 1, mongos: 1, config: 1, other: {keyFile: 'jstests/libs/key1'}});
- // Ensure that the sessions collection exists.
- st.c0.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
- st.rs0.getPrimary().getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
+// Ensure that the sessions collection exists.
+st.c0.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
+st.rs0.getPrimary().getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
- runListAllSessionsTest(st.s0);
- st.stop();
+runListAllSessionsTest(st.s0);
+st.stop();
})();
diff --git a/jstests/auth/list_collections_filter_views.js b/jstests/auth/list_collections_filter_views.js
index d17c9d82101..eeb12c74051 100644
--- a/jstests/auth/list_collections_filter_views.js
+++ b/jstests/auth/list_collections_filter_views.js
@@ -1,58 +1,57 @@
// Test listCollections with unauthorized views.
(function() {
- "use strict";
-
- const dbName = "list_collections_filter_views";
-
- function runTestOnConnection(conn) {
- const admin = conn.getDB("admin");
- const db = conn.getDB("test");
-
- assert.commandWorked(admin.runCommand({createUser: "root", pwd: "root", roles: ["root"]}));
- assert(admin.auth("root", "root"));
-
- assert.commandWorked(db.foo.insert({x: 123}));
- assert.commandWorked(db.createView("bar", "foo", []));
- assert.commandWorked(db.createView("baz", "foo", []));
-
- assert.commandWorked(db.runCommand({
- createRole: "role",
- roles: [],
- privileges: [
- {resource: {db: "test", collection: "foo"}, actions: ["find"]},
- {resource: {db: "test", collection: "bar"}, actions: ["find"]}
- ]
- }));
-
- assert.commandWorked(
- db.runCommand({createUser: "user", pwd: "pwd", roles: [{role: "role", db: "test"}]}));
- admin.logout();
-
- assert(db.auth("user", "pwd"));
-
- const res = assert.commandWorked(
- db.runCommand({listCollections: 1, nameOnly: true, authorizedCollections: true}));
- assert.eq(2, res.cursor.firstBatch.length, tojson(res.cursor.firstBatch));
-
- function nameSort(a, b) {
- return a.name > b.name;
- }
- assert.eq(
- [{"name": "bar", "type": "view"}, {"name": "foo", "type": "collection"}].sort(nameSort),
- res.cursor.firstBatch.sort(nameSort));
- }
+"use strict";
+
+const dbName = "list_collections_filter_views";
+
+function runTestOnConnection(conn) {
+ const admin = conn.getDB("admin");
+ const db = conn.getDB("test");
+
+ assert.commandWorked(admin.runCommand({createUser: "root", pwd: "root", roles: ["root"]}));
+ assert(admin.auth("root", "root"));
- const mongod = MongoRunner.runMongod({auth: ''});
- runTestOnConnection(mongod);
- MongoRunner.stopMongod(mongod);
+ assert.commandWorked(db.foo.insert({x: 123}));
+ assert.commandWorked(db.createView("bar", "foo", []));
+ assert.commandWorked(db.createView("baz", "foo", []));
- const st = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false},
- });
- runTestOnConnection(st.s0);
- st.stop();
+ assert.commandWorked(db.runCommand({
+ createRole: "role",
+ roles: [],
+ privileges: [
+ {resource: {db: "test", collection: "foo"}, actions: ["find"]},
+ {resource: {db: "test", collection: "bar"}, actions: ["find"]}
+ ]
+ }));
+ assert.commandWorked(
+ db.runCommand({createUser: "user", pwd: "pwd", roles: [{role: "role", db: "test"}]}));
+ admin.logout();
+
+ assert(db.auth("user", "pwd"));
+
+ const res = assert.commandWorked(
+ db.runCommand({listCollections: 1, nameOnly: true, authorizedCollections: true}));
+ assert.eq(2, res.cursor.firstBatch.length, tojson(res.cursor.firstBatch));
+
+ function nameSort(a, b) {
+ return a.name > b.name;
+ }
+ assert.eq(
+ [{"name": "bar", "type": "view"}, {"name": "foo", "type": "collection"}].sort(nameSort),
+ res.cursor.firstBatch.sort(nameSort));
+}
+
+const mongod = MongoRunner.runMongod({auth: ''});
+runTestOnConnection(mongod);
+MongoRunner.stopMongod(mongod);
+
+const st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false},
+});
+runTestOnConnection(st.s0);
+st.stop();
}());
diff --git a/jstests/auth/list_collections_own_collections.js b/jstests/auth/list_collections_own_collections.js
index 82d17afc787..08acf45ab02 100644
--- a/jstests/auth/list_collections_own_collections.js
+++ b/jstests/auth/list_collections_own_collections.js
@@ -1,183 +1,186 @@
// Test nameOnly option of listCollections
(function() {
- "use strict";
-
- const dbName = "list_collections_own_collections";
-
- const nameSort = (a, b) => a.name > b.name;
- const resFoo = {"name": "foo", "type": "collection"};
- const resBar = {"name": "bar", "type": "view"};
- const resOther =
- [{"name": "otherCollection", "type": "collection"}, {"name": "otherView", "type": "view"}];
- const resSystemViews = {"name": "system.views", "type": "collection"};
-
- function runTestOnConnection(conn) {
- const admin = conn.getDB("admin");
- const db = conn.getDB(dbName);
-
- assert.commandWorked(admin.runCommand({createUser: "root", pwd: "root", roles: ["root"]}));
- assert(admin.auth("root", "root"));
-
- function createTestRoleAndUser(roleName, privs) {
- assert.commandWorked(
- admin.runCommand({createRole: roleName, roles: [], privileges: privs}));
-
- const userName = "user|" + roleName;
- assert.commandWorked(db.runCommand(
- {createUser: userName, pwd: "pwd", roles: [{role: roleName, db: "admin"}]}));
- }
-
- createTestRoleAndUser("roleWithExactNamespacePrivileges", [
- {resource: {db: dbName, collection: "foo"}, actions: ["find"]},
- {resource: {db: dbName, collection: "bar"}, actions: ["find"]}
- ]);
-
- createTestRoleAndUser("roleWithExactNamespaceAndSystemPrivileges", [
- {resource: {db: dbName, collection: "foo"}, actions: ["find"]},
- {resource: {db: dbName, collection: "bar"}, actions: ["find"]},
- {resource: {db: dbName, collection: "system.views"}, actions: ["find"]}
- ]);
-
- createTestRoleAndUser("roleWithCollectionPrivileges", [
- {resource: {db: "", collection: "foo"}, actions: ["find"]},
- {resource: {db: "", collection: "bar"}, actions: ["find"]}
- ]);
-
- createTestRoleAndUser("roleWithCollectionAndSystemPrivileges", [
- {resource: {db: "", collection: "foo"}, actions: ["find"]},
- {resource: {db: "", collection: "bar"}, actions: ["find"]},
- {resource: {db: "", collection: "system.views"}, actions: ["find"]}
- ]);
-
- createTestRoleAndUser("roleWithDatabasePrivileges", [
- {resource: {db: dbName, collection: ""}, actions: ["find"]},
- ]);
-
- createTestRoleAndUser("roleWithDatabaseAndSystemPrivileges", [
- {resource: {db: dbName, collection: ""}, actions: ["find"]},
- {resource: {db: dbName, collection: "system.views"}, actions: ["find"]}
- ]);
-
- createTestRoleAndUser("roleWithAnyNormalResourcePrivileges", [
- {resource: {db: "", collection: ""}, actions: ["find"]},
- ]);
-
- createTestRoleAndUser("roleWithAnyNormalResourceAndSystemPrivileges", [
- {resource: {db: "", collection: ""}, actions: ["find"]},
- {resource: {db: "", collection: "system.views"}, actions: ["find"]}
- ]);
-
- // Create the collection and view used by the tests.
- assert.commandWorked(db.dropDatabase());
- assert.commandWorked(db.createCollection("foo"));
- assert.commandWorked(db.createView("bar", "foo", []));
-
- // Create a collection and view that are never granted specific permissions, to ensure
- // they're only returned by listCollections when the role has access to the whole db/server.
- assert.commandWorked(db.createCollection("otherCollection"));
- assert.commandWorked(db.createView("otherView", "otherCollection", []));
-
- admin.logout();
-
- function runTestOnRole(roleName, expectedColls) {
- jsTestLog(roleName);
- const userName = "user|" + roleName;
- assert(db.auth(userName, "pwd"));
-
- let res;
-
- res = db.runCommand({listCollections: 1});
- assert.commandFailed(res);
- res = db.runCommand({listCollections: 1, nameOnly: true});
- assert.commandFailed(res);
- res = db.runCommand({listCollections: 1, authorizedCollections: true});
- assert.commandFailed(res);
-
- res = db.runCommand({listCollections: 1, nameOnly: true, authorizedCollections: true});
- assert.commandWorked(res);
- assert.eq(expectedColls.sort(nameSort), res.cursor.firstBatch.sort(nameSort));
-
- res = db.runCommand({
- listCollections: 1,
- nameOnly: true,
- authorizedCollections: true,
- filter: {"name": "foo"}
- });
- assert.commandWorked(res);
- assert.eq([resFoo], res.cursor.firstBatch);
-
- db.logout();
- }
-
- runTestOnRole("roleWithExactNamespacePrivileges", [resFoo, resBar]);
- runTestOnRole("roleWithExactNamespaceAndSystemPrivileges",
- [resFoo, resBar, resSystemViews]);
-
- runTestOnRole("roleWithCollectionPrivileges", [resFoo, resBar]);
- runTestOnRole("roleWithCollectionAndSystemPrivileges", [resFoo, resBar, resSystemViews]);
-
- runTestOnRole("roleWithDatabasePrivileges", [resFoo, resBar, ...resOther]);
- runTestOnRole("roleWithDatabaseAndSystemPrivileges",
- [resFoo, resBar, ...resOther, resSystemViews]);
-
- runTestOnRole("roleWithAnyNormalResourcePrivileges", [resFoo, resBar, ...resOther]);
- runTestOnRole("roleWithAnyNormalResourceAndSystemPrivileges",
- [resFoo, resBar, ...resOther, resSystemViews]);
+"use strict";
+
+const dbName = "list_collections_own_collections";
+
+const nameSort = (a, b) => a.name > b.name;
+const resFoo = {
+ "name": "foo",
+ "type": "collection"
+};
+const resBar = {
+ "name": "bar",
+ "type": "view"
+};
+const resOther =
+ [{"name": "otherCollection", "type": "collection"}, {"name": "otherView", "type": "view"}];
+const resSystemViews = {
+ "name": "system.views",
+ "type": "collection"
+};
+
+function runTestOnConnection(conn) {
+ const admin = conn.getDB("admin");
+ const db = conn.getDB(dbName);
+
+ assert.commandWorked(admin.runCommand({createUser: "root", pwd: "root", roles: ["root"]}));
+ assert(admin.auth("root", "root"));
+
+ function createTestRoleAndUser(roleName, privs) {
+ assert.commandWorked(
+ admin.runCommand({createRole: roleName, roles: [], privileges: privs}));
+
+ const userName = "user|" + roleName;
+ assert.commandWorked(db.runCommand(
+ {createUser: userName, pwd: "pwd", roles: [{role: roleName, db: "admin"}]}));
}
- function runNoAuthTestOnConnection(conn) {
- const admin = conn.getDB("admin");
- const db = conn.getDB(dbName);
-
- assert.commandWorked(db.dropDatabase());
- assert.commandWorked(db.createCollection("foo"));
- assert.commandWorked(db.createView("bar", "foo", []));
-
- var resFull = db.runCommand({listCollections: 1});
- assert.commandWorked(resFull);
- var resAuthColls = db.runCommand({listCollections: 1, authorizedCollections: true});
- assert.commandWorked(resAuthColls);
- assert.eq(resFull.cursor.firstBatch.sort(nameSort),
- resAuthColls.cursor.firstBatch.sort(nameSort));
-
- var resNameOnly = db.runCommand({listCollections: 1, nameOnly: true});
- assert.commandWorked(resNameOnly);
- var resNameOnlyAuthColls =
- db.runCommand({listCollections: 1, nameOnly: true, authorizedCollections: true});
- assert.commandWorked(resNameOnlyAuthColls);
- assert.eq(resNameOnly.cursor.firstBatch.sort(nameSort),
- resNameOnlyAuthColls.cursor.firstBatch.sort(nameSort));
-
- var resWithFilter = db.runCommand({
+ createTestRoleAndUser("roleWithExactNamespacePrivileges", [
+ {resource: {db: dbName, collection: "foo"}, actions: ["find"]},
+ {resource: {db: dbName, collection: "bar"}, actions: ["find"]}
+ ]);
+
+ createTestRoleAndUser("roleWithExactNamespaceAndSystemPrivileges", [
+ {resource: {db: dbName, collection: "foo"}, actions: ["find"]},
+ {resource: {db: dbName, collection: "bar"}, actions: ["find"]},
+ {resource: {db: dbName, collection: "system.views"}, actions: ["find"]}
+ ]);
+
+ createTestRoleAndUser("roleWithCollectionPrivileges", [
+ {resource: {db: "", collection: "foo"}, actions: ["find"]},
+ {resource: {db: "", collection: "bar"}, actions: ["find"]}
+ ]);
+
+ createTestRoleAndUser("roleWithCollectionAndSystemPrivileges", [
+ {resource: {db: "", collection: "foo"}, actions: ["find"]},
+ {resource: {db: "", collection: "bar"}, actions: ["find"]},
+ {resource: {db: "", collection: "system.views"}, actions: ["find"]}
+ ]);
+
+ createTestRoleAndUser("roleWithDatabasePrivileges", [
+ {resource: {db: dbName, collection: ""}, actions: ["find"]},
+ ]);
+
+ createTestRoleAndUser("roleWithDatabaseAndSystemPrivileges", [
+ {resource: {db: dbName, collection: ""}, actions: ["find"]},
+ {resource: {db: dbName, collection: "system.views"}, actions: ["find"]}
+ ]);
+
+ createTestRoleAndUser("roleWithAnyNormalResourcePrivileges", [
+ {resource: {db: "", collection: ""}, actions: ["find"]},
+ ]);
+
+ createTestRoleAndUser("roleWithAnyNormalResourceAndSystemPrivileges", [
+ {resource: {db: "", collection: ""}, actions: ["find"]},
+ {resource: {db: "", collection: "system.views"}, actions: ["find"]}
+ ]);
+
+ // Create the collection and view used by the tests.
+ assert.commandWorked(db.dropDatabase());
+ assert.commandWorked(db.createCollection("foo"));
+ assert.commandWorked(db.createView("bar", "foo", []));
+
+ // Create a collection and view that are never granted specific permissions, to ensure
+ // they're only returned by listCollections when the role has access to the whole db/server.
+ assert.commandWorked(db.createCollection("otherCollection"));
+ assert.commandWorked(db.createView("otherView", "otherCollection", []));
+
+ admin.logout();
+
+ function runTestOnRole(roleName, expectedColls) {
+ jsTestLog(roleName);
+ const userName = "user|" + roleName;
+ assert(db.auth(userName, "pwd"));
+
+ let res;
+
+ res = db.runCommand({listCollections: 1});
+ assert.commandFailed(res);
+ res = db.runCommand({listCollections: 1, nameOnly: true});
+ assert.commandFailed(res);
+ res = db.runCommand({listCollections: 1, authorizedCollections: true});
+ assert.commandFailed(res);
+
+ res = db.runCommand({listCollections: 1, nameOnly: true, authorizedCollections: true});
+ assert.commandWorked(res);
+ assert.eq(expectedColls.sort(nameSort), res.cursor.firstBatch.sort(nameSort));
+
+ res = db.runCommand({
listCollections: 1,
nameOnly: true,
authorizedCollections: true,
filter: {"name": "foo"}
});
- assert.commandWorked(resWithFilter);
- assert.eq([{"name": "foo", "type": "collection"}], resWithFilter.cursor.firstBatch);
- }
-
- const mongod = MongoRunner.runMongod({auth: ''});
- runTestOnConnection(mongod);
- MongoRunner.stopMongod(mongod);
-
- const st = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
- });
- runTestOnConnection(st.s0);
- st.stop();
+ assert.commandWorked(res);
+ assert.eq([resFoo], res.cursor.firstBatch);
- const mongodNoAuth = MongoRunner.runMongod();
- runNoAuthTestOnConnection(mongodNoAuth);
- MongoRunner.stopMongod(mongodNoAuth);
-
- const stNoAuth =
- new ShardingTest({shards: 1, mongos: 1, config: 1, other: {shardAsReplicaSet: false}});
- runNoAuthTestOnConnection(stNoAuth.s0);
- stNoAuth.stop();
+ db.logout();
+ }
+ runTestOnRole("roleWithExactNamespacePrivileges", [resFoo, resBar]);
+ runTestOnRole("roleWithExactNamespaceAndSystemPrivileges", [resFoo, resBar, resSystemViews]);
+
+ runTestOnRole("roleWithCollectionPrivileges", [resFoo, resBar]);
+ runTestOnRole("roleWithCollectionAndSystemPrivileges", [resFoo, resBar, resSystemViews]);
+
+ runTestOnRole("roleWithDatabasePrivileges", [resFoo, resBar, ...resOther]);
+ runTestOnRole("roleWithDatabaseAndSystemPrivileges",
+ [resFoo, resBar, ...resOther, resSystemViews]);
+
+ runTestOnRole("roleWithAnyNormalResourcePrivileges", [resFoo, resBar, ...resOther]);
+ runTestOnRole("roleWithAnyNormalResourceAndSystemPrivileges",
+ [resFoo, resBar, ...resOther, resSystemViews]);
+}
+
+function runNoAuthTestOnConnection(conn) {
+ const admin = conn.getDB("admin");
+ const db = conn.getDB(dbName);
+
+ assert.commandWorked(db.dropDatabase());
+ assert.commandWorked(db.createCollection("foo"));
+ assert.commandWorked(db.createView("bar", "foo", []));
+
+ var resFull = db.runCommand({listCollections: 1});
+ assert.commandWorked(resFull);
+ var resAuthColls = db.runCommand({listCollections: 1, authorizedCollections: true});
+ assert.commandWorked(resAuthColls);
+ assert.eq(resFull.cursor.firstBatch.sort(nameSort),
+ resAuthColls.cursor.firstBatch.sort(nameSort));
+
+ var resNameOnly = db.runCommand({listCollections: 1, nameOnly: true});
+ assert.commandWorked(resNameOnly);
+ var resNameOnlyAuthColls =
+ db.runCommand({listCollections: 1, nameOnly: true, authorizedCollections: true});
+ assert.commandWorked(resNameOnlyAuthColls);
+ assert.eq(resNameOnly.cursor.firstBatch.sort(nameSort),
+ resNameOnlyAuthColls.cursor.firstBatch.sort(nameSort));
+
+ var resWithFilter = db.runCommand(
+ {listCollections: 1, nameOnly: true, authorizedCollections: true, filter: {"name": "foo"}});
+ assert.commandWorked(resWithFilter);
+ assert.eq([{"name": "foo", "type": "collection"}], resWithFilter.cursor.firstBatch);
+}
+
+const mongod = MongoRunner.runMongod({auth: ''});
+runTestOnConnection(mongod);
+MongoRunner.stopMongod(mongod);
+
+const st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
+});
+runTestOnConnection(st.s0);
+st.stop();
+
+const mongodNoAuth = MongoRunner.runMongod();
+runNoAuthTestOnConnection(mongodNoAuth);
+MongoRunner.stopMongod(mongodNoAuth);
+
+const stNoAuth =
+ new ShardingTest({shards: 1, mongos: 1, config: 1, other: {shardAsReplicaSet: false}});
+runNoAuthTestOnConnection(stNoAuth.s0);
+stNoAuth.stop();
}());
diff --git a/jstests/auth/list_databases.js b/jstests/auth/list_databases.js
index a69472fb77f..2e374e54fd6 100644
--- a/jstests/auth/list_databases.js
+++ b/jstests/auth/list_databases.js
@@ -1,167 +1,167 @@
// Auth tests for the listDatabases command.
(function() {
- 'use strict';
+'use strict';
- function runTest(mongod) {
- const admin = mongod.getDB('admin');
- admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
- assert(admin.auth('admin', 'pass'));
+function runTest(mongod) {
+ const admin = mongod.getDB('admin');
+ admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(admin.auth('admin', 'pass'));
- // Establish db0..db7
- for (let i = 0; i < 8; ++i) {
- mongod.getDB('db' + i).foo.insert({bar: "baz"});
- }
- mongod.getDB("db0").baz.insert({x: "y"});
- mongod.getDB("db2").baz.insert({x: "y"});
-
- admin.createRole({
- role: 'dbLister',
- privileges: [{resource: {cluster: true}, actions: ['listDatabases']}],
- roles: []
- });
-
- admin.createRole({
- role: 'specificCollection',
- privileges: [{resource: {db: "db0", collection: "baz"}, actions: ['find']}],
- roles: []
- });
-
- admin.createRole({
- role: 'sharedNameCollections',
- privileges: [{resource: {db: "", collection: "baz"}, actions: ['find']}],
- roles: []
- });
-
- // Make db0, db2, db4, db6 readable to user1 abd user3.
- // Make db0, db1, db2, db3 read/writable to user 2 and user3.
- function makeRole(perm, dbNum) {
- return {role: perm, db: ("db" + dbNum)};
+ // Establish db0..db7
+ for (let i = 0; i < 8; ++i) {
+ mongod.getDB('db' + i).foo.insert({bar: "baz"});
+ }
+ mongod.getDB("db0").baz.insert({x: "y"});
+ mongod.getDB("db2").baz.insert({x: "y"});
+
+ admin.createRole({
+ role: 'dbLister',
+ privileges: [{resource: {cluster: true}, actions: ['listDatabases']}],
+ roles: []
+ });
+
+ admin.createRole({
+ role: 'specificCollection',
+ privileges: [{resource: {db: "db0", collection: "baz"}, actions: ['find']}],
+ roles: []
+ });
+
+ admin.createRole({
+ role: 'sharedNameCollections',
+ privileges: [{resource: {db: "", collection: "baz"}, actions: ['find']}],
+ roles: []
+ });
+
+ // Make db0, db2, db4, db6 readable to user1 abd user3.
+ // Make db0, db1, db2, db3 read/writable to user 2 and user3.
+ function makeRole(perm, dbNum) {
+ return {role: perm, db: ("db" + dbNum)};
+ }
+ const readEven = [0, 2, 4, 6].map(function(i) {
+ return makeRole("read", i);
+ });
+ const readWriteLow = [0, 1, 2, 3].map(function(i) {
+ return makeRole("readWrite", i);
+ });
+ admin.createUser({user: 'user1', pwd: 'pass', roles: readEven});
+ admin.createUser({user: 'user2', pwd: 'pass', roles: readWriteLow});
+ admin.createUser({user: 'user3', pwd: 'pass', roles: readEven.concat(readWriteLow)});
+
+ // Make db4 readable by user 4, and let them list all dbs.
+ // Make db5 readable by user 5, and let them list all dbs.
+ // Make collection baz in db0 findable by user6, and let them list db0.
+ // Make all baz collections findable by user7, and let them list all dbs.
+ admin.createUser({user: 'user4', pwd: 'pass', roles: [makeRole('read', 4), 'dbLister']});
+ admin.createUser({user: 'user5', pwd: 'pass', roles: [makeRole('read', 5), 'dbLister']});
+ admin.createUser({user: 'user6', pwd: 'pass', roles: ['specificCollection']});
+ admin.createUser({user: 'user7', pwd: 'pass', roles: ['sharedNameCollections']});
+ admin.logout();
+
+ const admin_dbs = ["admin", "db0", "db1", "db2", "db3", "db4", "db5", "db6", "db7"];
+
+ [{user: "user1", dbs: ["db0", "db2", "db4", "db6"]},
+ {user: "user2", dbs: ["db0", "db1", "db2", "db3"]},
+ {user: "user3", dbs: ["db0", "db1", "db2", "db3", "db4", "db6"]},
+ {user: "user4", dbs: admin_dbs, authDbs: ["db4"]},
+ {user: "user5", dbs: admin_dbs, authDbs: ["db5"]},
+ {user: "user6", dbs: ["db0"]},
+ {user: "user7", dbs: admin_dbs},
+ {user: "admin", dbs: admin_dbs, authDbs: admin_dbs},
+ ].forEach(function(test) {
+ function filterSpecial(db) {
+ // Returning of local/config varies with sharding/mobile/etc..
+ // Ignore these for simplicity.
+ return (db !== 'local') && (db !== 'config');
}
- const readEven = [0, 2, 4, 6].map(function(i) {
- return makeRole("read", i);
- });
- const readWriteLow = [0, 1, 2, 3].map(function(i) {
- return makeRole("readWrite", i);
- });
- admin.createUser({user: 'user1', pwd: 'pass', roles: readEven});
- admin.createUser({user: 'user2', pwd: 'pass', roles: readWriteLow});
- admin.createUser({user: 'user3', pwd: 'pass', roles: readEven.concat(readWriteLow)});
-
- // Make db4 readable by user 4, and let them list all dbs.
- // Make db5 readable by user 5, and let them list all dbs.
- // Make collection baz in db0 findable by user6, and let them list db0.
- // Make all baz collections findable by user7, and let them list all dbs.
- admin.createUser({user: 'user4', pwd: 'pass', roles: [makeRole('read', 4), 'dbLister']});
- admin.createUser({user: 'user5', pwd: 'pass', roles: [makeRole('read', 5), 'dbLister']});
- admin.createUser({user: 'user6', pwd: 'pass', roles: ['specificCollection']});
- admin.createUser({user: 'user7', pwd: 'pass', roles: ['sharedNameCollections']});
- admin.logout();
- const admin_dbs = ["admin", "db0", "db1", "db2", "db3", "db4", "db5", "db6", "db7"];
-
- [{user: "user1", dbs: ["db0", "db2", "db4", "db6"]},
- {user: "user2", dbs: ["db0", "db1", "db2", "db3"]},
- {user: "user3", dbs: ["db0", "db1", "db2", "db3", "db4", "db6"]},
- {user: "user4", dbs: admin_dbs, authDbs: ["db4"]},
- {user: "user5", dbs: admin_dbs, authDbs: ["db5"]},
- {user: "user6", dbs: ["db0"]},
- {user: "user7", dbs: admin_dbs},
- {user: "admin", dbs: admin_dbs, authDbs: admin_dbs},
- ].forEach(function(test) {
- function filterSpecial(db) {
- // Returning of local/config varies with sharding/mobile/etc..
- // Ignore these for simplicity.
- return (db !== 'local') && (db !== 'config');
- }
-
- // Invoking {listDatabases: 1} directly.
- function tryList(cmd, expect_dbs) {
- const dbs = assert.commandWorked(admin.runCommand(cmd));
- assert.eq(dbs.databases
- .map(function(db) {
- return db.name;
- })
- .filter(filterSpecial)
- .sort(),
- expect_dbs,
- test.user + " permissions");
- }
+ // Invoking {listDatabases: 1} directly.
+ function tryList(cmd, expect_dbs) {
+ const dbs = assert.commandWorked(admin.runCommand(cmd));
+ assert.eq(dbs.databases
+ .map(function(db) {
+ return db.name;
+ })
+ .filter(filterSpecial)
+ .sort(),
+ expect_dbs,
+ test.user + " permissions");
+ }
- admin.auth(test.user, 'pass');
- tryList({listDatabases: 1}, test.dbs);
- tryList({listDatabases: 1, authorizedDatabases: true}, test.authDbs || test.dbs);
+ admin.auth(test.user, 'pass');
+ tryList({listDatabases: 1}, test.dbs);
+ tryList({listDatabases: 1, authorizedDatabases: true}, test.authDbs || test.dbs);
- if (test.authDbs) {
- tryList({listDatabases: 1, authorizedDatabases: false}, test.dbs);
- } else {
- // Users without listDatabases cluster perm may not
- // request authorizedDatabases: false.
- assert.throws(tryList, [{listDatabases: 1, authorizedDatabases: false}, test.dbs]);
- }
+ if (test.authDbs) {
+ tryList({listDatabases: 1, authorizedDatabases: false}, test.dbs);
+ } else {
+ // Users without listDatabases cluster perm may not
+ // request authorizedDatabases: false.
+ assert.throws(tryList, [{listDatabases: 1, authorizedDatabases: false}, test.dbs]);
+ }
- // Test using shell helper Mongo.getDBs().
- assert.eq(mongod.getDBs(undefined, {}, true).filter(filterSpecial),
- test.dbs,
- "Shell helper speaking to same version");
- if (test.user !== 'admin' && test.user !== "user7") {
- // Admin and user7 don't have an explicit list of DBs to parse.
- assert.eq(mongod._getDatabaseNamesFromPrivileges(), test.authDbs || test.dbs);
-
- // Test (non-admin) call to Mongo.getDBs() on a < 4.0 MongoD
- // by injecting a command failure into Mongo.adminCommand().
- // This will allow us to resemble a < 4.0 server.
- const adminCommandFunction = mongod.adminCommand;
- const adminCommandMethod = adminCommandFunction.bind(mongod);
-
- try {
- mongod.adminCommand = function(cmd) {
- if (cmd.hasOwnProperty('listDatabases')) {
- return {
- ok: 0,
- errmsg: 'Stubbed command failure: ' + tojson(cmd),
- code: ErrorCodes.Unauthorized,
- codeName: 'Unauthorized'
- };
- }
- return adminCommandMethod(cmd);
- };
- // Command fails, but we dispatch via _getDatabaseNamesFromPrivileges().
- assert.eq(mongod.getDBs().databases.map(function(x) {
- return x.name;
- }),
- test.authDbs || test.dbs);
-
- // Still dispatches with explicit nameOnly===true, returns only names.
- assert.eq(mongod.getDBs(undefined, undefined, true), test.authDbs || test.dbs);
-
- // Command fails and unable to dispatch because nameOnly !== true.
- assert.throws(() => mongod.getDBs(undefined, undefined, false));
-
- // Command fails and unable to dispatch because filter is not empty.
- assert.throws(() => mongod.getDBs(undefined, {name: 'foo'}));
- } finally {
- mongod.adminCommand = adminCommandFunction;
- }
+ // Test using shell helper Mongo.getDBs().
+ assert.eq(mongod.getDBs(undefined, {}, true).filter(filterSpecial),
+ test.dbs,
+ "Shell helper speaking to same version");
+ if (test.user !== 'admin' && test.user !== "user7") {
+ // Admin and user7 don't have an explicit list of DBs to parse.
+ assert.eq(mongod._getDatabaseNamesFromPrivileges(), test.authDbs || test.dbs);
+
+ // Test (non-admin) call to Mongo.getDBs() on a < 4.0 MongoD
+ // by injecting a command failure into Mongo.adminCommand().
+ // This will allow us to resemble a < 4.0 server.
+ const adminCommandFunction = mongod.adminCommand;
+ const adminCommandMethod = adminCommandFunction.bind(mongod);
+
+ try {
+ mongod.adminCommand = function(cmd) {
+ if (cmd.hasOwnProperty('listDatabases')) {
+ return {
+ ok: 0,
+ errmsg: 'Stubbed command failure: ' + tojson(cmd),
+ code: ErrorCodes.Unauthorized,
+ codeName: 'Unauthorized'
+ };
+ }
+ return adminCommandMethod(cmd);
+ };
+ // Command fails, but we dispatch via _getDatabaseNamesFromPrivileges().
+ assert.eq(mongod.getDBs().databases.map(function(x) {
+ return x.name;
+ }),
+ test.authDbs || test.dbs);
+
+ // Still dispatches with explicit nameOnly===true, returns only names.
+ assert.eq(mongod.getDBs(undefined, undefined, true), test.authDbs || test.dbs);
+
+ // Command fails and unable to dispatch because nameOnly !== true.
+ assert.throws(() => mongod.getDBs(undefined, undefined, false));
+
+ // Command fails and unable to dispatch because filter is not empty.
+ assert.throws(() => mongod.getDBs(undefined, {name: 'foo'}));
+ } finally {
+ mongod.adminCommand = adminCommandFunction;
}
+ }
- admin.logout();
- });
- }
-
- const mongod = MongoRunner.runMongod({auth: ""});
- runTest(mongod);
- MongoRunner.stopMongod(mongod);
-
- if (jsTest.options().storageEngine !== "mobile") {
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const st = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
- });
- runTest(st.s0);
- st.stop();
- }
+ admin.logout();
+ });
+}
+
+const mongod = MongoRunner.runMongod({auth: ""});
+runTest(mongod);
+MongoRunner.stopMongod(mongod);
+
+if (jsTest.options().storageEngine !== "mobile") {
+ // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+ const st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
+ });
+ runTest(st.s0);
+ st.stop();
+}
})();
diff --git a/jstests/auth/list_local_sessions.js b/jstests/auth/list_local_sessions.js
index b943ba45955..3c16df48b8e 100644
--- a/jstests/auth/list_local_sessions.js
+++ b/jstests/auth/list_local_sessions.js
@@ -1,76 +1,75 @@
// All tests for the $listLocalSessions aggregation stage.
(function() {
- 'use strict';
- load('jstests/aggregation/extras/utils.js');
+'use strict';
+load('jstests/aggregation/extras/utils.js');
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
- function runListLocalSessionsTest(mongod) {
- assert(mongod);
- const admin = mongod.getDB('admin');
- const db = mongod.getDB("test");
+function runListLocalSessionsTest(mongod) {
+ assert(mongod);
+ const admin = mongod.getDB('admin');
+ const db = mongod.getDB("test");
- const pipeline = [{'$listLocalSessions': {}}];
- function listLocalSessions() {
- return admin.aggregate(pipeline);
- }
+ const pipeline = [{'$listLocalSessions': {}}];
+ function listLocalSessions() {
+ return admin.aggregate(pipeline);
+ }
- admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
- assert(admin.auth('admin', 'pass'));
+ admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(admin.auth('admin', 'pass'));
- db.createUser({user: 'user1', pwd: 'pass', roles: jsTest.basicUserRoles});
- db.createUser({user: 'user2', pwd: 'pass', roles: jsTest.basicUserRoles});
- admin.logout();
+ db.createUser({user: 'user1', pwd: 'pass', roles: jsTest.basicUserRoles});
+ db.createUser({user: 'user2', pwd: 'pass', roles: jsTest.basicUserRoles});
+ admin.logout();
- // Shouldn't be able to listLocalSessions when not logged in.
- assertErrorCode(admin, pipeline, ErrorCodes.Unauthorized);
+ // Shouldn't be able to listLocalSessions when not logged in.
+ assertErrorCode(admin, pipeline, ErrorCodes.Unauthorized);
- // Start a new session and capture its sessionId.
- assert(db.auth('user1', 'pass'));
- const myid = assert.commandWorked(db.runCommand({startSession: 1})).id.id;
- assert(myid !== undefined);
+ // Start a new session and capture its sessionId.
+ assert(db.auth('user1', 'pass'));
+ const myid = assert.commandWorked(db.runCommand({startSession: 1})).id.id;
+ assert(myid !== undefined);
- // Ensure that the cache now contains the session.
- const resultArray = assert.doesNotThrow(listLocalSessions).toArray();
- assert.eq(resultArray.length, 1);
- const cacheid = resultArray[0]._id.id;
- const myuid = resultArray[0]._id.uid;
- assert(cacheid !== undefined);
- assert.eq(0, bsonWoCompare({x: cacheid}, {x: myid}));
+ // Ensure that the cache now contains the session.
+ const resultArray = assert.doesNotThrow(listLocalSessions).toArray();
+ assert.eq(resultArray.length, 1);
+ const cacheid = resultArray[0]._id.id;
+ const myuid = resultArray[0]._id.uid;
+ assert(cacheid !== undefined);
+ assert.eq(0, bsonWoCompare({x: cacheid}, {x: myid}));
- // Try asking for the session by username.
- function listMyLocalSessions() {
- return admin.aggregate(
- [{'$listLocalSessions': {users: [{user: "user1", db: "test"}]}}]);
- }
- const resultArrayMine = assert.doesNotThrow(listMyLocalSessions).toArray();
- assert.eq(bsonWoCompare(resultArray, resultArrayMine), 0);
+ // Try asking for the session by username.
+ function listMyLocalSessions() {
+ return admin.aggregate([{'$listLocalSessions': {users: [{user: "user1", db: "test"}]}}]);
+ }
+ const resultArrayMine = assert.doesNotThrow(listMyLocalSessions).toArray();
+ assert.eq(bsonWoCompare(resultArray, resultArrayMine), 0);
- // Ensure that changing users hides the session.
- assert(db.auth('user2', 'pass'));
- const otherArray = assert.doesNotThrow(listLocalSessions).toArray();
- assert.eq(otherArray.length, 0);
+ // Ensure that changing users hides the session.
+ assert(db.auth('user2', 'pass'));
+ const otherArray = assert.doesNotThrow(listLocalSessions).toArray();
+ assert.eq(otherArray.length, 0);
- // Ensure that one user can not explicitly ask for another's sessions.
- assertErrorCode(admin,
- [{'$listLocalSessions': {users: [{user: "user1", db: "test"}]}}],
- ErrorCodes.Unauthorized);
- }
+ // Ensure that one user can not explicitly ask for another's sessions.
+ assertErrorCode(admin,
+ [{'$listLocalSessions': {users: [{user: "user1", db: "test"}]}}],
+ ErrorCodes.Unauthorized);
+}
- const mongod = MongoRunner.runMongod({auth: ""});
- runListLocalSessionsTest(mongod);
- MongoRunner.stopMongod(mongod);
+const mongod = MongoRunner.runMongod({auth: ""});
+runListLocalSessionsTest(mongod);
+MongoRunner.stopMongod(mongod);
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const st = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
- });
- runListLocalSessionsTest(st.s0);
- st.stop();
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+const st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
+});
+runListLocalSessionsTest(st.s0);
+st.stop();
})();
diff --git a/jstests/auth/list_sessions.js b/jstests/auth/list_sessions.js
index 2b6c8a0f8f1..f91218d22e4 100644
--- a/jstests/auth/list_sessions.js
+++ b/jstests/auth/list_sessions.js
@@ -4,85 +4,85 @@
*/
(function() {
- 'use strict';
- load('jstests/aggregation/extras/utils.js');
-
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
-
- function runListSessionsTest(mongod) {
- assert(mongod);
- const admin = mongod.getDB('admin');
- const config = mongod.getDB('config');
-
- const pipeline = [{'$listSessions': {}}];
- function listSessions() {
- return config.system.sessions.aggregate(pipeline);
- }
-
- admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
- assert(admin.auth('admin', 'pass'));
-
- admin.createUser({user: 'user1', pwd: 'pass', roles: jsTest.basicUserRoles});
- admin.createUser({user: 'user2', pwd: 'pass', roles: jsTest.basicUserRoles});
- admin.logout();
-
- // Fail when not logged in.
- assertErrorCode(config.system.sessions, pipeline, ErrorCodes.Unauthorized);
-
- // Start a new session and capture its sessionId.
- assert(admin.auth('user1', 'pass'));
- const myid = assert.commandWorked(admin.runCommand({startSession: 1})).id.id;
- assert(myid !== undefined);
-
- // Sync cache to collection and ensure it arrived.
- assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
- const resultArray = listSessions().toArray();
- assert.eq(resultArray.length, 1);
- const cacheid = resultArray[0]._id.id;
- assert(cacheid !== undefined);
- assert.eq(bsonWoCompare(cacheid, myid), 0);
-
- // Ask again using explicit UID.
- const user1Pipeline = [{'$listSessions': {users: [{user: "user1", db: "admin"}]}}];
- function listUser1Sessions() {
- return config.system.sessions.aggregate(user1Pipeline);
- }
- const resultArrayMine = listUser1Sessions().toArray();
- assert.eq(bsonWoCompare(resultArray, resultArrayMine), 0);
-
- // Make sure pipelining other collections fail
- assertErrorCode(admin.system.collections, pipeline, ErrorCodes.InvalidNamespace);
-
- // Ensure that changing users hides the session everwhere.
- assert(admin.auth('user2', 'pass'));
- assert.eq(listSessions().toArray().length, 0);
-
- // Ensure users can't view either other's sessions.
- assertErrorCode(config.system.sessions, user1Pipeline, ErrorCodes.Unauthorized);
-
- if (true) {
- // TODO SERVER-29141: Support forcing pipelines to run on mongos
- return;
- }
- function listLocalSessions() {
- return config.aggregate([{'$listLocalSessions': {}}]);
- }
- assert.eq(listLocalSessions().toArray().length, 0);
+'use strict';
+load('jstests/aggregation/extras/utils.js');
+
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
+
+function runListSessionsTest(mongod) {
+ assert(mongod);
+ const admin = mongod.getDB('admin');
+ const config = mongod.getDB('config');
+
+ const pipeline = [{'$listSessions': {}}];
+ function listSessions() {
+ return config.system.sessions.aggregate(pipeline);
+ }
+
+ admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(admin.auth('admin', 'pass'));
+
+ admin.createUser({user: 'user1', pwd: 'pass', roles: jsTest.basicUserRoles});
+ admin.createUser({user: 'user2', pwd: 'pass', roles: jsTest.basicUserRoles});
+ admin.logout();
+
+ // Fail when not logged in.
+ assertErrorCode(config.system.sessions, pipeline, ErrorCodes.Unauthorized);
+
+ // Start a new session and capture its sessionId.
+ assert(admin.auth('user1', 'pass'));
+ const myid = assert.commandWorked(admin.runCommand({startSession: 1})).id.id;
+ assert(myid !== undefined);
+
+ // Sync cache to collection and ensure it arrived.
+ assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ const resultArray = listSessions().toArray();
+ assert.eq(resultArray.length, 1);
+ const cacheid = resultArray[0]._id.id;
+ assert(cacheid !== undefined);
+ assert.eq(bsonWoCompare(cacheid, myid), 0);
+
+ // Ask again using explicit UID.
+ const user1Pipeline = [{'$listSessions': {users: [{user: "user1", db: "admin"}]}}];
+ function listUser1Sessions() {
+ return config.system.sessions.aggregate(user1Pipeline);
}
+ const resultArrayMine = listUser1Sessions().toArray();
+ assert.eq(bsonWoCompare(resultArray, resultArrayMine), 0);
- const mongod = MongoRunner.runMongod({auth: ""});
- runListSessionsTest(mongod);
- MongoRunner.stopMongod(mongod);
-
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const st = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
- });
- runListSessionsTest(st.s0);
- st.stop();
+ // Make sure pipelining other collections fail
+ assertErrorCode(admin.system.collections, pipeline, ErrorCodes.InvalidNamespace);
+
+ // Ensure that changing users hides the session everwhere.
+ assert(admin.auth('user2', 'pass'));
+ assert.eq(listSessions().toArray().length, 0);
+
+ // Ensure users can't view either other's sessions.
+ assertErrorCode(config.system.sessions, user1Pipeline, ErrorCodes.Unauthorized);
+
+ if (true) {
+ // TODO SERVER-29141: Support forcing pipelines to run on mongos
+ return;
+ }
+ function listLocalSessions() {
+ return config.aggregate([{'$listLocalSessions': {}}]);
+ }
+ assert.eq(listLocalSessions().toArray().length, 0);
+}
+
+const mongod = MongoRunner.runMongod({auth: ""});
+runListSessionsTest(mongod);
+MongoRunner.stopMongod(mongod);
+
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+const st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
+});
+runListSessionsTest(st.s0);
+st.stop();
})();
diff --git a/jstests/auth/listcommands_preauth.js b/jstests/auth/listcommands_preauth.js
index 4967628bc22..fae33cf65db 100644
--- a/jstests/auth/listcommands_preauth.js
+++ b/jstests/auth/listcommands_preauth.js
@@ -1,38 +1,36 @@
// Make sure that listCommands doesn't require authentication.
(function() {
- 'use strict';
+'use strict';
- function runTest(conn) {
- const admin = conn.getDB('admin');
+function runTest(conn) {
+ const admin = conn.getDB('admin');
- // Commands should succeed in auth-bypass mode regardless of requiresAuth().
- assert.commandWorked(admin.runCommand({listDatabases: 1}),
- "listDatabases shouldn't work pre-auth");
- assert.commandWorked(admin.runCommand({listCommands: 1}),
- "listCommands should work pre-auth");
+ // Commands should succeed in auth-bypass mode regardless of requiresAuth().
+ assert.commandWorked(admin.runCommand({listDatabases: 1}),
+ "listDatabases shouldn't work pre-auth");
+ assert.commandWorked(admin.runCommand({listCommands: 1}), "listCommands should work pre-auth");
- admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+ admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
- // listDatabases should now fail, because auth bypass is no longer valid.
- assert.commandFailed(admin.runCommand({listDatabases: 1}),
- "listDatabases shouldn't work pre-auth");
- // listCommands should STILL work, because it does not require auth.
- assert.commandWorked(admin.runCommand({listCommands: 1}),
- "listCommands should work pre-auth");
- }
+ // listDatabases should now fail, because auth bypass is no longer valid.
+ assert.commandFailed(admin.runCommand({listDatabases: 1}),
+ "listDatabases shouldn't work pre-auth");
+ // listCommands should STILL work, because it does not require auth.
+ assert.commandWorked(admin.runCommand({listCommands: 1}), "listCommands should work pre-auth");
+}
- const mongod = MongoRunner.runMongod({auth: ""});
- runTest(mongod);
- MongoRunner.stopMongod(mongod);
+const mongod = MongoRunner.runMongod({auth: ""});
+runTest(mongod);
+MongoRunner.stopMongod(mongod);
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const st = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
- });
- runTest(st.s0);
- st.stop();
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+const st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
+});
+runTest(st.s0);
+st.stop();
})();
diff --git a/jstests/auth/logs_include_client_info.js b/jstests/auth/logs_include_client_info.js
index 5e793100686..31594a60de5 100644
--- a/jstests/auth/logs_include_client_info.js
+++ b/jstests/auth/logs_include_client_info.js
@@ -2,28 +2,28 @@
// address of the client attempting to authenticate.
(function() {
- const conn = MongoRunner.runMongod({auth: ""});
- const admin = conn.getDB("admin");
+const conn = MongoRunner.runMongod({auth: ""});
+const admin = conn.getDB("admin");
- admin.createUser({
- user: "root",
- pwd: "root",
- roles: ["root"],
- });
+admin.createUser({
+ user: "root",
+ pwd: "root",
+ roles: ["root"],
+});
- assert(admin.auth("root", "root"));
+assert(admin.auth("root", "root"));
- const failConn = new Mongo(conn.host);
- failConn.getDB("admin").auth("root", "toot");
+const failConn = new Mongo(conn.host);
+failConn.getDB("admin").auth("root", "toot");
- const log = assert.commandWorked(admin.runCommand({getLog: "global"})).log;
+const log = assert.commandWorked(admin.runCommand({getLog: "global"})).log;
- const successRegex =
- /Successfully authenticated as principal root on admin from client (?:\d{1,3}\.){3}\d{1,3}:\d+/;
- const failRegex =
- /SASL SCRAM-SHA-\d+ authentication failed for root on admin from client (?:\d{1,3}\.){3}\d{1,3}:\d+/;
+const successRegex =
+ /Successfully authenticated as principal root on admin from client (?:\d{1,3}\.){3}\d{1,3}:\d+/;
+const failRegex =
+ /SASL SCRAM-SHA-\d+ authentication failed for root on admin from client (?:\d{1,3}\.){3}\d{1,3}:\d+/;
- assert(log.some((line) => successRegex.test(line)));
- assert(log.some((line) => failRegex.test(line)));
- MongoRunner.stopMongod(conn);
+assert(log.some((line) => successRegex.test(line)));
+assert(log.some((line) => failRegex.test(line)));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/auth/mongoURIAuth.js b/jstests/auth/mongoURIAuth.js
index 7f60b2e750e..0ceff7b4b31 100644
--- a/jstests/auth/mongoURIAuth.js
+++ b/jstests/auth/mongoURIAuth.js
@@ -2,71 +2,69 @@
// the specified auth mechanism.
(function() {
- 'use strict';
+'use strict';
- const runURIAuthTest = function(userMech, uriMech, authMechanism, regexMechanism) {
- const conn = MongoRunner.runMongod({auth: ""});
- const adminDB = conn.getDB("admin");
+const runURIAuthTest = function(userMech, uriMech, authMechanism, regexMechanism) {
+ const conn = MongoRunner.runMongod({auth: ""});
+ const adminDB = conn.getDB("admin");
+ adminDB.createUser({
+ user: "u",
+ pwd: "p",
+ roles: ["root"],
+
+ });
+ adminDB.auth("u", "p");
+ adminDB.setLogLevel(2, "command");
+
+ if (userMech) {
adminDB.createUser({
- user: "u",
- pwd: "p",
+ user: "user",
+ pwd: "password",
roles: ["root"],
-
+ mechanisms: [authMechanism],
});
- adminDB.auth("u", "p");
- adminDB.setLogLevel(2, "command");
-
- if (userMech) {
- adminDB.createUser({
- user: "user",
- pwd: "password",
- roles: ["root"],
- mechanisms: [authMechanism],
- });
- } else {
- adminDB.createUser({
- user: "user",
- pwd: "password",
- roles: ["root"],
- });
- }
-
- var uri;
+ } else {
+ adminDB.createUser({
+ user: "user",
+ pwd: "password",
+ roles: ["root"],
+ });
+ }
- if (uriMech) {
- uri = "mongodb://user:password@localhost:" + conn.port + "/admin?authMechanism=" +
- authMechanism;
- } else {
- uri = "mongodb://user:password@localhost:" + conn.port;
- }
+ var uri;
- var shell = runMongoProgram('./mongo', uri, "--eval", "db.getName()");
- assert.eq(shell, 0, "Should be able to connect with specified params.");
+ if (uriMech) {
+ uri = "mongodb://user:password@localhost:" + conn.port +
+ "/admin?authMechanism=" + authMechanism;
+ } else {
+ uri = "mongodb://user:password@localhost:" + conn.port;
+ }
- const log = adminDB.runCommand({getLog: "global"});
- adminDB.logout();
- const matches = tojson(log.log).match(regexMechanism);
- assert(matches);
- assert.eq(2, matches.length);
+ var shell = runMongoProgram('./mongo', uri, "--eval", "db.getName()");
+ assert.eq(shell, 0, "Should be able to connect with specified params.");
- MongoRunner.stopMongod(conn);
- };
+ const log = adminDB.runCommand({getLog: "global"});
+ adminDB.logout();
+ const matches = tojson(log.log).match(regexMechanism);
+ assert(matches);
+ assert.eq(2, matches.length);
- const SCRAM_SHA_256 = "SCRAM-SHA-256";
- const SCRAM_SHA_1 = "SCRAM-SHA-1";
+ MongoRunner.stopMongod(conn);
+};
- const SCRAM_SHA_256_regex = /saslStart.*mechanism:.*SCRAM-SHA-256/g;
- const SCRAM_SHA_1_regex = /saslStart.*mechanism:.*SCRAM-SHA-1/g;
+const SCRAM_SHA_256 = "SCRAM-SHA-256";
+const SCRAM_SHA_1 = "SCRAM-SHA-1";
- jsTestLog("Test that a mechanism specified in the URI is the chosen authentication method.");
- runURIAuthTest(false, true, SCRAM_SHA_256, SCRAM_SHA_256_regex);
+const SCRAM_SHA_256_regex = /saslStart.*mechanism:.*SCRAM-SHA-256/g;
+const SCRAM_SHA_1_regex = /saslStart.*mechanism:.*SCRAM-SHA-1/g;
- jsTestLog(
- "Test that a mechanism specified in CreateUser() is the chosen authentication method.");
- runURIAuthTest(true, false, SCRAM_SHA_1, SCRAM_SHA_1_regex);
+jsTestLog("Test that a mechanism specified in the URI is the chosen authentication method.");
+runURIAuthTest(false, true, SCRAM_SHA_256, SCRAM_SHA_256_regex);
- jsTestLog("Test that SCRAM-SHA-1 is the default authentication method.");
- runURIAuthTest(false, false, SCRAM_SHA_256, SCRAM_SHA_256_regex);
+jsTestLog("Test that a mechanism specified in CreateUser() is the chosen authentication method.");
+runURIAuthTest(true, false, SCRAM_SHA_1, SCRAM_SHA_1_regex);
+jsTestLog("Test that SCRAM-SHA-1 is the default authentication method.");
+runURIAuthTest(false, false, SCRAM_SHA_256, SCRAM_SHA_256_regex);
})(); \ No newline at end of file
diff --git a/jstests/auth/mongos_cache_invalidation.js b/jstests/auth/mongos_cache_invalidation.js
index 1f7064f558d..0917cb68f36 100644
--- a/jstests/auth/mongos_cache_invalidation.js
+++ b/jstests/auth/mongos_cache_invalidation.js
@@ -120,7 +120,6 @@ db3.auth('spencer', 'pwd');
db3.adminCommand("invalidateUserCache");
assert.writeOK(db3.foo.update({}, {$inc: {a: 1}}));
assert.eq(4, db3.foo.findOne().a);
-
})();
(function testRevokingPrivileges() {
@@ -212,7 +211,6 @@ db3.auth('spencer', 'pwd');
// We manually invalidate the cache on s2/db3.
db3.adminCommand("invalidateUserCache");
assert.commandFailedWithCode(db3.foo.runCommand("collStats"), authzErrorCode);
-
})();
st.stop();
diff --git a/jstests/auth/pinned_users.js b/jstests/auth/pinned_users.js
index f57bfa85f74..72e6b21fb8b 100644
--- a/jstests/auth/pinned_users.js
+++ b/jstests/auth/pinned_users.js
@@ -8,191 +8,188 @@
*
*/
(function() {
- 'use strict';
- jsTest.setOption("enableTestCommands", true);
- // Start a mongod with the user cache size set to zero, so we know that users who have
- // logged out always get fetched cleanly from disk.
- const rs = new ReplSetTest({
- nodes: 3,
- nodeOptions: {auth: "", setParameter: "authorizationManagerCacheSize=0"},
- keyFile: "jstests/libs/key1"
- });
-
- rs.startSet();
- rs.initiate();
- const mongod = rs.getPrimary();
- const admin = mongod.getDB("admin");
-
- admin.createUser({user: "admin", pwd: "admin", roles: ["root"]});
- admin.auth("admin", "admin");
-
- // Mark the "admin2" user as pinned in memory, we'll use this later on to recover from
- // the deadlock
- assert.commandWorked(admin.runCommand({
- setParameter: 1,
- logLevel: 2,
- authorizationManagerPinnedUsers: [
- {user: "admin2", db: "admin"},
- ],
- }));
-
- admin.createUser({user: "admin2", pwd: "admin", roles: ["root"]});
-
- let secondConn = new Mongo(mongod.host);
- let secondAdmin = secondConn.getDB("admin");
- secondAdmin.auth("admin2", "admin");
-
- // Invalidate the user cache so we know only "admin" is in there
- assert.commandWorked(admin.runCommand({invalidateUserCache: 1}));
+'use strict';
+jsTest.setOption("enableTestCommands", true);
+// Start a mongod with the user cache size set to zero, so we know that users who have
+// logged out always get fetched cleanly from disk.
+const rs = new ReplSetTest({
+ nodes: 3,
+ nodeOptions: {auth: "", setParameter: "authorizationManagerCacheSize=0"},
+ keyFile: "jstests/libs/key1"
+});
+
+rs.startSet();
+rs.initiate();
+const mongod = rs.getPrimary();
+const admin = mongod.getDB("admin");
+
+admin.createUser({user: "admin", pwd: "admin", roles: ["root"]});
+admin.auth("admin", "admin");
+
+// Mark the "admin2" user as pinned in memory, we'll use this later on to recover from
+// the deadlock
+assert.commandWorked(admin.runCommand({
+ setParameter: 1,
+ logLevel: 2,
+ authorizationManagerPinnedUsers: [
+ {user: "admin2", db: "admin"},
+ ],
+}));
+
+admin.createUser({user: "admin2", pwd: "admin", roles: ["root"]});
+
+let secondConn = new Mongo(mongod.host);
+let secondAdmin = secondConn.getDB("admin");
+secondAdmin.auth("admin2", "admin");
+
+// Invalidate the user cache so we know only "admin" is in there
+assert.commandWorked(admin.runCommand({invalidateUserCache: 1}));
+assert.soon(function() {
+ let cacheContents = admin.aggregate([{$listCachedAndActiveUsers: {}}]).toArray();
+ print("User cache after initialization: ", tojson(cacheContents));
+
+ const admin2Doc = sortDoc({"username": "admin2", "db": "admin", "active": true});
+ return cacheContents.some((doc) => friendlyEqual(admin2Doc, sortDoc(doc)));
+});
+
+const waitForCommand = function(waitingFor, opFilter) {
+ let opId = -1;
assert.soon(function() {
- let cacheContents = admin.aggregate([{$listCachedAndActiveUsers: {}}]).toArray();
- print("User cache after initialization: ", tojson(cacheContents));
-
- const admin2Doc = sortDoc({"username": "admin2", "db": "admin", "active": true});
- return cacheContents.some((doc) => friendlyEqual(admin2Doc, sortDoc(doc)));
+ print(`Checking for ${waitingFor}`);
+ const curopRes = admin.currentOp();
+ assert.commandWorked(curopRes);
+ const foundOp = curopRes["inprog"].filter(opFilter);
+
+ if (foundOp.length == 1) {
+ opId = foundOp[0]["opid"];
+ }
+ return (foundOp.length == 1);
});
-
- const waitForCommand = function(waitingFor, opFilter) {
- let opId = -1;
- assert.soon(function() {
- print(`Checking for ${waitingFor}`);
- const curopRes = admin.currentOp();
- assert.commandWorked(curopRes);
- const foundOp = curopRes["inprog"].filter(opFilter);
-
- if (foundOp.length == 1) {
- opId = foundOp[0]["opid"];
- }
- return (foundOp.length == 1);
- });
- return opId;
- };
-
- // The deadlock happens in two phases. First we run a command that acquires a read lock and
- // holds it for forever.
- let readLockShell = startParallelShell(function() {
- assert.eq(db.getSiblingDB("admin").auth("admin", "admin"), 1);
- assert.commandFailed(db.adminCommand(
- {sleep: 1, secs: 500, lock: "r", lockTarget: "admin", $comment: "Read lock sleep"}));
- }, mongod.port);
-
- // Wait for that command to appear in currentOp
- const readID = waitForCommand(
- "readlock",
- op => (op["ns"] == "admin.$cmd" && op["command"]["$comment"] == "Read lock sleep"));
-
- // Then we run a command that tries to acquire a write lock, which will wait for forever
- // because we're already holding a read lock, but will also prevent any new read locks from
- // being taken.
- let writeLockShell = startParallelShell(function() {
- assert.eq(db.getSiblingDB("admin").auth("admin", "admin"), 1);
- assert.commandFailed(db.adminCommand(
- {sleep: 1, secs: 500, lock: "w", lockTarget: "admin", $comment: "Write lock sleep"}));
- }, mongod.port);
-
- // Wait for that to appear in currentOp
- const writeID = waitForCommand(
- "writeLock",
- op => (op["ns"] == "admin.$cmd" && op["command"]["$comment"] == "Write lock sleep"));
-
- print("killing ops and moving on!");
-
- // If "admin2" wasn't pinned in memory, then these would hang.
- assert.commandWorked(secondAdmin.currentOp());
- assert.commandWorked(secondAdmin.killOp(readID));
- assert.commandWorked(secondAdmin.killOp(writeID));
-
- readLockShell();
- writeLockShell();
-
- admin.logout();
- secondAdmin.logout();
- rs.stopSet();
+ return opId;
+};
+
+// The deadlock happens in two phases. First we run a command that acquires a read lock and
+// holds it for forever.
+let readLockShell = startParallelShell(function() {
+ assert.eq(db.getSiblingDB("admin").auth("admin", "admin"), 1);
+ assert.commandFailed(db.adminCommand(
+ {sleep: 1, secs: 500, lock: "r", lockTarget: "admin", $comment: "Read lock sleep"}));
+}, mongod.port);
+
+// Wait for that command to appear in currentOp
+const readID = waitForCommand(
+ "readlock", op => (op["ns"] == "admin.$cmd" && op["command"]["$comment"] == "Read lock sleep"));
+
+// Then we run a command that tries to acquire a write lock, which will wait for forever
+// because we're already holding a read lock, but will also prevent any new read locks from
+// being taken.
+let writeLockShell = startParallelShell(function() {
+ assert.eq(db.getSiblingDB("admin").auth("admin", "admin"), 1);
+ assert.commandFailed(db.adminCommand(
+ {sleep: 1, secs: 500, lock: "w", lockTarget: "admin", $comment: "Write lock sleep"}));
+}, mongod.port);
+
+// Wait for that to appear in currentOp
+const writeID = waitForCommand(
+ "writeLock",
+ op => (op["ns"] == "admin.$cmd" && op["command"]["$comment"] == "Write lock sleep"));
+
+print("killing ops and moving on!");
+
+// If "admin2" wasn't pinned in memory, then these would hang.
+assert.commandWorked(secondAdmin.currentOp());
+assert.commandWorked(secondAdmin.killOp(readID));
+assert.commandWorked(secondAdmin.killOp(writeID));
+
+readLockShell();
+writeLockShell();
+
+admin.logout();
+secondAdmin.logout();
+rs.stopSet();
})();
// This checks that removing a user document actually unpins a user. This is a round-about way
// of making sure that updates to the authz manager by the opObserver correctly invalidates the
// cache and that pinned users don't stick around after they're removed.
(function() {
- 'use strict';
- jsTest.setOption("enableTestCommands", true);
- // Start a mongod with the user cache size set to zero, so we know that users who have
- // logged out always get fetched cleanly from disk.
- const mongod =
- MongoRunner.runMongod({auth: "", setParameter: "authorizationManagerCacheSize=0"});
- let admin = mongod.getDB("admin");
-
- admin.createUser({user: "admin", pwd: "admin", roles: ["root"]});
- admin.auth("admin", "admin");
-
- // Mark the "admin2" user as pinned in memory
- assert.commandWorked(admin.runCommand({
- setParameter: 1,
- logLevel: 2,
- authorizationManagerPinnedUsers: [
- {user: "admin2", db: "admin"},
- ],
- }));
-
- admin.createUser({user: "admin2", pwd: "admin", roles: ["root"]});
-
- // Invalidate the user cache so we know only "admin" is in there
- assert.commandWorked(admin.runCommand({invalidateUserCache: 1}));
- print("User cache after initialization: ",
- tojson(admin.aggregate([{$listCachedAndActiveUsers: {}}]).toArray()));
-
- assert.commandWorked(admin.getCollection("system.users").remove({user: "admin2"}));
-
- print("User cache after removing user doc: ",
- tojson(admin.aggregate([{$listCachedAndActiveUsers: {}}]).toArray()));
-
- assert.eq(admin.auth("admin2", "admin"), 0);
- MongoRunner.stopMongod(mongod);
+'use strict';
+jsTest.setOption("enableTestCommands", true);
+// Start a mongod with the user cache size set to zero, so we know that users who have
+// logged out always get fetched cleanly from disk.
+const mongod = MongoRunner.runMongod({auth: "", setParameter: "authorizationManagerCacheSize=0"});
+let admin = mongod.getDB("admin");
+
+admin.createUser({user: "admin", pwd: "admin", roles: ["root"]});
+admin.auth("admin", "admin");
+
+// Mark the "admin2" user as pinned in memory
+assert.commandWorked(admin.runCommand({
+ setParameter: 1,
+ logLevel: 2,
+ authorizationManagerPinnedUsers: [
+ {user: "admin2", db: "admin"},
+ ],
+}));
+
+admin.createUser({user: "admin2", pwd: "admin", roles: ["root"]});
+
+// Invalidate the user cache so we know only "admin" is in there
+assert.commandWorked(admin.runCommand({invalidateUserCache: 1}));
+print("User cache after initialization: ",
+ tojson(admin.aggregate([{$listCachedAndActiveUsers: {}}]).toArray()));
+
+assert.commandWorked(admin.getCollection("system.users").remove({user: "admin2"}));
+
+print("User cache after removing user doc: ",
+ tojson(admin.aggregate([{$listCachedAndActiveUsers: {}}]).toArray()));
+
+assert.eq(admin.auth("admin2", "admin"), 0);
+MongoRunner.stopMongod(mongod);
})();
// This checks that clearing the pinned user list actually unpins a user.
(function() {
- 'use strict';
- jsTest.setOption("enableTestCommands", true);
- // Start a mongod with the user cache size set to zero, so we know that users who have
- // logged out always get fetched cleanly from disk.
- const mongod =
- MongoRunner.runMongod({auth: "", setParameter: "authorizationManagerCacheSize=0"});
- let admin = mongod.getDB("admin");
-
- admin.createUser({user: "admin", pwd: "admin", roles: ["root"]});
- admin.auth("admin", "admin");
-
- // Mark the "admin2" user as pinned in memory
- assert.commandWorked(admin.runCommand({
- setParameter: 1,
- logLevel: 2,
- authorizationManagerPinnedUsers: [
- {user: "admin2", db: "admin"},
- ],
- }));
-
- admin.createUser({user: "admin2", pwd: "admin", roles: ["root"]});
- assert.soon(function() {
- let cacheContents = admin.aggregate([{$listCachedAndActiveUsers: {}}]).toArray();
- print("User cache after initialization: ", tojson(cacheContents));
-
- const admin2Doc = sortDoc({"username": "admin2", "db": "admin", "active": true});
- return cacheContents.some((doc) => friendlyEqual(admin2Doc, sortDoc(doc)));
- });
-
- // Clear the pinned users list
- assert.commandWorked(admin.runCommand({setParameter: 1, authorizationManagerPinnedUsers: []}));
-
- // Check that admin2 gets removed from the cache
- assert.commandWorked(admin.runCommand({invalidateUserCache: 1}));
- assert.soon(function() {
- let cacheContents = admin.aggregate([{$listCachedAndActiveUsers: {}}]).toArray();
- print("User cache after initialization: ", tojson(cacheContents));
-
- const admin2Doc = sortDoc({"username": "admin2", "db": "admin", "active": true});
- return !cacheContents.some((doc) => friendlyEqual(admin2Doc, sortDoc(doc)));
- });
-
- MongoRunner.stopMongod(mongod);
+'use strict';
+jsTest.setOption("enableTestCommands", true);
+// Start a mongod with the user cache size set to zero, so we know that users who have
+// logged out always get fetched cleanly from disk.
+const mongod = MongoRunner.runMongod({auth: "", setParameter: "authorizationManagerCacheSize=0"});
+let admin = mongod.getDB("admin");
+
+admin.createUser({user: "admin", pwd: "admin", roles: ["root"]});
+admin.auth("admin", "admin");
+
+// Mark the "admin2" user as pinned in memory
+assert.commandWorked(admin.runCommand({
+ setParameter: 1,
+ logLevel: 2,
+ authorizationManagerPinnedUsers: [
+ {user: "admin2", db: "admin"},
+ ],
+}));
+
+admin.createUser({user: "admin2", pwd: "admin", roles: ["root"]});
+assert.soon(function() {
+ let cacheContents = admin.aggregate([{$listCachedAndActiveUsers: {}}]).toArray();
+ print("User cache after initialization: ", tojson(cacheContents));
+
+ const admin2Doc = sortDoc({"username": "admin2", "db": "admin", "active": true});
+ return cacheContents.some((doc) => friendlyEqual(admin2Doc, sortDoc(doc)));
+});
+
+// Clear the pinned users list
+assert.commandWorked(admin.runCommand({setParameter: 1, authorizationManagerPinnedUsers: []}));
+
+// Check that admin2 gets removed from the cache
+assert.commandWorked(admin.runCommand({invalidateUserCache: 1}));
+assert.soon(function() {
+ let cacheContents = admin.aggregate([{$listCachedAndActiveUsers: {}}]).toArray();
+ print("User cache after initialization: ", tojson(cacheContents));
+
+ const admin2Doc = sortDoc({"username": "admin2", "db": "admin", "active": true});
+ return !cacheContents.some((doc) => friendlyEqual(admin2Doc, sortDoc(doc)));
+});
+
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/auth/pre_auth_commands_with_sessions.js b/jstests/auth/pre_auth_commands_with_sessions.js
index 0e440a01c13..19bee66efb6 100644
--- a/jstests/auth/pre_auth_commands_with_sessions.js
+++ b/jstests/auth/pre_auth_commands_with_sessions.js
@@ -1,52 +1,51 @@
(function() {
- 'use strict';
-
- var conn = MongoRunner.runMongod({auth: ""});
- var admin = conn.getDB("admin");
- var db = conn.getDB("otherdb");
-
- admin.createUser({user: "admin", pwd: "pwd", roles: jsTest.adminUserRoles});
- admin.auth("admin", "pwd");
- db.createUser({user: "lily", pwd: "pwd", roles: jsTest.basicUserRoles});
+'use strict';
+
+var conn = MongoRunner.runMongod({auth: ""});
+var admin = conn.getDB("admin");
+var db = conn.getDB("otherdb");
+
+admin.createUser({user: "admin", pwd: "pwd", roles: jsTest.adminUserRoles});
+admin.auth("admin", "pwd");
+db.createUser({user: "lily", pwd: "pwd", roles: jsTest.basicUserRoles});
+admin.logout();
+
+var testCommand = function(cmd) {
+ // Test that we can run a pre-auth command without authenticating.
+ var command = {[cmd]: 1};
+
+ assert.commandWorked(admin.runCommand(command));
+
+ // Test that we can authenticate and start a session
+ db.auth("lily", "pwd");
+ var res = admin.runCommand({startSession: 1});
+ assert.commandWorked(res);
+ var id = res.id;
+
+ var commandWithSession = {[cmd]: 1, lsid: res.id};
+
+ // Test that we can run a pre-auth command with a session while
+ // the session owner is logged in (and the session gets ignored)
+ assert.commandWorked(db.runCommand(command),
+ "failed to run command " + cmd + " while logged in");
+ assert.commandWorked(db.runCommand(commandWithSession),
+ "failed to run command " + cmd + " with session while logged in");
+
+ // Test that we can run a pre-auth command with a session while
+ // nobody is logged in (and the session gets ignored)
+ db.logout();
+ assert.commandWorked(db.runCommand(command),
+ "failed to run command " + cmd + " without being logged in");
+ assert.commandWorked(db.runCommand(commandWithSession),
+ "failed to run command " + cmd + " with session without being logged in");
+
+ db.logout();
admin.logout();
+};
- var testCommand = function(cmd) {
- // Test that we can run a pre-auth command without authenticating.
- var command = {[cmd]: 1};
-
- assert.commandWorked(admin.runCommand(command));
-
- // Test that we can authenticate and start a session
- db.auth("lily", "pwd");
- var res = admin.runCommand({startSession: 1});
- assert.commandWorked(res);
- var id = res.id;
-
- var commandWithSession = {[cmd]: 1, lsid: res.id};
-
- // Test that we can run a pre-auth command with a session while
- // the session owner is logged in (and the session gets ignored)
- assert.commandWorked(db.runCommand(command),
- "failed to run command " + cmd + " while logged in");
- assert.commandWorked(db.runCommand(commandWithSession),
- "failed to run command " + cmd + " with session while logged in");
-
- // Test that we can run a pre-auth command with a session while
- // nobody is logged in (and the session gets ignored)
- db.logout();
- assert.commandWorked(db.runCommand(command),
- "failed to run command " + cmd + " without being logged in");
- assert.commandWorked(
- db.runCommand(commandWithSession),
- "failed to run command " + cmd + " with session without being logged in");
-
- db.logout();
- admin.logout();
- };
-
- var commands = ["ping", "ismaster"];
- for (var i = 0; i < commands.length; i++) {
- testCommand(commands[i]);
- }
- MongoRunner.stopMongod(conn, null, {user: "admin", pwd: "pwd"});
+var commands = ["ping", "ismaster"];
+for (var i = 0; i < commands.length; i++) {
+ testCommand(commands[i]);
+}
+MongoRunner.stopMongod(conn, null, {user: "admin", pwd: "pwd"});
})();
diff --git a/jstests/auth/prepared_transaction.js b/jstests/auth/prepared_transaction.js
index 2864dea5fd5..9605fe32973 100644
--- a/jstests/auth/prepared_transaction.js
+++ b/jstests/auth/prepared_transaction.js
@@ -6,200 +6,200 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
-
- const rst = new ReplSetTest({nodes: 2, keyFile: "jstests/libs/key1"});
- rst.startSet();
- rst.initiate();
-
- const adminDB = rst.getPrimary().getDB("admin");
-
- // Create the admin user.
- assert.commandWorked(adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]}));
- assert.eq(1, adminDB.auth("admin", "admin"));
-
- // Set up the test database.
- const dbName = "test";
- const collName = "transactions";
- const testDB = adminDB.getSiblingDB(dbName);
- testDB.dropDatabase();
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- // Create two users. Alice will be given the 'internal' privilege.
- assert.commandWorked(
- adminDB.runCommand({createUser: "Alice", pwd: "pwd", roles: ["root", "__system"]}));
- assert.commandWorked(adminDB.runCommand({createUser: "Mallory", pwd: "pwd", roles: ["root"]}));
- adminDB.logout();
-
- /**
- * Test the prepareTransaction command with Alice who has the 'internal' privilege.
- */
- assert.eq(1, adminDB.auth("Alice", "pwd"));
- let lsid = assert.commandWorked(testDB.runCommand({startSession: 1})).id;
-
- // Start the transaction and insert a document.
- assert.commandWorked(testDB.runCommand({
- insert: collName,
- documents: [{_id: "alice"}],
- lsid: lsid,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
-
- // Try to run prepareTransaction against the secondary.
- assert.commandFailedWithCode(rst.getSecondary().getDB(dbName).adminCommand({
- prepareTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(1),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.Unauthorized);
-
- // Run prepareTransaction against the primary.
- const prepareTimestamp = assert
- .commandWorked(testDB.adminCommand({
- prepareTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(1),
- autocommit: false,
- writeConcern: {w: "majority"}
- }))
- .prepareTimestamp;
- const commitTimestamp = Timestamp(prepareTimestamp.getTime(), prepareTimestamp.getInc() + 1);
-
- // Commit the prepared transaction.
- assert.commandWorked(testDB.adminCommand({
- commitTransaction: 1,
- commitTimestamp: commitTimestamp,
- lsid: lsid,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(2),
- autocommit: false
- }));
-
- assert.eq(1, testDB[collName].find({_id: "alice"}).itcount());
- adminDB.logout();
-
- /**
- * Test the prepareTransaction command with Mallory who does not have the 'internal' privilege.
- */
- assert.eq(1, adminDB.auth("Mallory", "pwd"));
-
- // Start the transaction and insert a document.
- assert.commandWorked(testDB.runCommand({
- insert: collName,
- documents: [{_id: "mallory"}],
- lsid: lsid,
- txnNumber: NumberLong(1),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
-
- // Try to run prepareTransaction against the secondary.
- assert.commandFailedWithCode(rst.getSecondary().getDB(dbName).adminCommand({
- prepareTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(1),
- stmtId: NumberInt(1),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.Unauthorized);
-
- // Run prepareTransaction against the primary.
- assert.commandFailedWithCode(testDB.adminCommand({
- prepareTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(1),
- stmtId: NumberInt(1),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.Unauthorized);
-
- // Cannot commit the transaction with 'commitTimestamp'.
- assert.commandFailedWithCode(testDB.adminCommand({
- commitTransaction: 1,
- commitTimestamp: Timestamp(0, 0),
- lsid: lsid,
- txnNumber: NumberLong(1),
- stmtId: NumberInt(1),
- autocommit: false
- }),
- ErrorCodes.InvalidOptions);
-
- // The transaction should be aborted.
- assert.commandFailedWithCode(testDB.adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(1),
- stmtId: NumberInt(1),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
-
- assert.eq(0, testDB[collName].find({_id: "mallory"}).itcount());
- adminDB.logout();
-
- /**
- * Test the prepareTransaction command with an unauthenticated user.
- */
-
- // Start the transaction and insert a document.
- assert.commandFailedWithCode(testDB.runCommand({
- insert: collName,
- documents: [{_id: "unauthenticated"}],
- lsid: lsid,
- txnNumber: NumberLong(2),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }),
- ErrorCodes.Unauthorized);
-
- // Try to run prepareTransaction against the secondary.
- assert.commandFailedWithCode(rst.getSecondary().getDB(dbName).adminCommand({
- prepareTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(2),
- stmtId: NumberInt(0),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.Unauthorized);
-
- // Run prepareTransaction against the primary.
- assert.commandFailedWithCode(testDB.adminCommand({
- prepareTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(2),
- stmtId: NumberInt(0),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.Unauthorized);
-
- // Cannot commit the transaction.
- assert.commandFailedWithCode(testDB.adminCommand({
- commitTransaction: 1,
- commitTimestamp: Timestamp(0, 0),
- lsid: lsid,
- txnNumber: NumberLong(2),
- stmtId: NumberInt(0),
- autocommit: false
- }),
- ErrorCodes.Unauthorized);
-
- assert.eq(1, adminDB.auth("Alice", "pwd"));
- assert.eq(0, testDB[collName].find({_id: "unauthenticated"}).itcount());
- assert.commandWorked(testDB.runCommand({endSessions: [lsid]}));
- adminDB.logout();
-
- rst.stopSet();
+"use strict";
+
+const rst = new ReplSetTest({nodes: 2, keyFile: "jstests/libs/key1"});
+rst.startSet();
+rst.initiate();
+
+const adminDB = rst.getPrimary().getDB("admin");
+
+// Create the admin user.
+assert.commandWorked(adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]}));
+assert.eq(1, adminDB.auth("admin", "admin"));
+
+// Set up the test database.
+const dbName = "test";
+const collName = "transactions";
+const testDB = adminDB.getSiblingDB(dbName);
+testDB.dropDatabase();
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+// Create two users. Alice will be given the 'internal' privilege.
+assert.commandWorked(
+ adminDB.runCommand({createUser: "Alice", pwd: "pwd", roles: ["root", "__system"]}));
+assert.commandWorked(adminDB.runCommand({createUser: "Mallory", pwd: "pwd", roles: ["root"]}));
+adminDB.logout();
+
+/**
+ * Test the prepareTransaction command with Alice who has the 'internal' privilege.
+ */
+assert.eq(1, adminDB.auth("Alice", "pwd"));
+let lsid = assert.commandWorked(testDB.runCommand({startSession: 1})).id;
+
+// Start the transaction and insert a document.
+assert.commandWorked(testDB.runCommand({
+ insert: collName,
+ documents: [{_id: "alice"}],
+ lsid: lsid,
+ txnNumber: NumberLong(0),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}));
+
+// Try to run prepareTransaction against the secondary.
+assert.commandFailedWithCode(rst.getSecondary().getDB(dbName).adminCommand({
+ prepareTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(0),
+ stmtId: NumberInt(1),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}),
+ ErrorCodes.Unauthorized);
+
+// Run prepareTransaction against the primary.
+const prepareTimestamp = assert
+ .commandWorked(testDB.adminCommand({
+ prepareTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(0),
+ stmtId: NumberInt(1),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+ }))
+ .prepareTimestamp;
+const commitTimestamp = Timestamp(prepareTimestamp.getTime(), prepareTimestamp.getInc() + 1);
+
+// Commit the prepared transaction.
+assert.commandWorked(testDB.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: commitTimestamp,
+ lsid: lsid,
+ txnNumber: NumberLong(0),
+ stmtId: NumberInt(2),
+ autocommit: false
+}));
+
+assert.eq(1, testDB[collName].find({_id: "alice"}).itcount());
+adminDB.logout();
+
+/**
+ * Test the prepareTransaction command with Mallory who does not have the 'internal' privilege.
+ */
+assert.eq(1, adminDB.auth("Mallory", "pwd"));
+
+// Start the transaction and insert a document.
+assert.commandWorked(testDB.runCommand({
+ insert: collName,
+ documents: [{_id: "mallory"}],
+ lsid: lsid,
+ txnNumber: NumberLong(1),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}));
+
+// Try to run prepareTransaction against the secondary.
+assert.commandFailedWithCode(rst.getSecondary().getDB(dbName).adminCommand({
+ prepareTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(1),
+ stmtId: NumberInt(1),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}),
+ ErrorCodes.Unauthorized);
+
+// Run prepareTransaction against the primary.
+assert.commandFailedWithCode(testDB.adminCommand({
+ prepareTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(1),
+ stmtId: NumberInt(1),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}),
+ ErrorCodes.Unauthorized);
+
+// Cannot commit the transaction with 'commitTimestamp'.
+assert.commandFailedWithCode(testDB.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: Timestamp(0, 0),
+ lsid: lsid,
+ txnNumber: NumberLong(1),
+ stmtId: NumberInt(1),
+ autocommit: false
+}),
+ ErrorCodes.InvalidOptions);
+
+// The transaction should be aborted.
+assert.commandFailedWithCode(testDB.adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(1),
+ stmtId: NumberInt(1),
+ autocommit: false
+}),
+ ErrorCodes.NoSuchTransaction);
+
+assert.eq(0, testDB[collName].find({_id: "mallory"}).itcount());
+adminDB.logout();
+
+/**
+ * Test the prepareTransaction command with an unauthenticated user.
+ */
+
+// Start the transaction and insert a document.
+assert.commandFailedWithCode(testDB.runCommand({
+ insert: collName,
+ documents: [{_id: "unauthenticated"}],
+ lsid: lsid,
+ txnNumber: NumberLong(2),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}),
+ ErrorCodes.Unauthorized);
+
+// Try to run prepareTransaction against the secondary.
+assert.commandFailedWithCode(rst.getSecondary().getDB(dbName).adminCommand({
+ prepareTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(2),
+ stmtId: NumberInt(0),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}),
+ ErrorCodes.Unauthorized);
+
+// Run prepareTransaction against the primary.
+assert.commandFailedWithCode(testDB.adminCommand({
+ prepareTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(2),
+ stmtId: NumberInt(0),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}),
+ ErrorCodes.Unauthorized);
+
+// Cannot commit the transaction.
+assert.commandFailedWithCode(testDB.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: Timestamp(0, 0),
+ lsid: lsid,
+ txnNumber: NumberLong(2),
+ stmtId: NumberInt(0),
+ autocommit: false
+}),
+ ErrorCodes.Unauthorized);
+
+assert.eq(1, adminDB.auth("Alice", "pwd"));
+assert.eq(0, testDB[collName].find({_id: "unauthenticated"}).itcount());
+assert.commandWorked(testDB.runCommand({endSessions: [lsid]}));
+adminDB.logout();
+
+rst.stopSet();
}());
diff --git a/jstests/auth/refresh_logical_session_cache_with_long_usernames.js b/jstests/auth/refresh_logical_session_cache_with_long_usernames.js
index e584a1b8345..fb3cdb294d8 100644
--- a/jstests/auth/refresh_logical_session_cache_with_long_usernames.js
+++ b/jstests/auth/refresh_logical_session_cache_with_long_usernames.js
@@ -2,46 +2,49 @@
// usernames)
(function() {
- 'use strict';
+'use strict';
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
- const mongod = MongoRunner.runMongod({auth: ""});
+const mongod = MongoRunner.runMongod({auth: ""});
- const refresh = {refreshLogicalSessionCacheNow: 1};
- const startSession = {startSession: 1};
+const refresh = {
+ refreshLogicalSessionCacheNow: 1
+};
+const startSession = {
+ startSession: 1
+};
- const admin = mongod.getDB('admin');
- const db = mongod.getDB("test");
- const config = mongod.getDB("config");
+const admin = mongod.getDB('admin');
+const db = mongod.getDB("test");
+const config = mongod.getDB("config");
- admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
- assert(admin.auth('admin', 'pass'));
+admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+assert(admin.auth('admin', 'pass'));
- const longUserName = "x".repeat(1000);
+const longUserName = "x".repeat(1000);
- // Create a user with a long name, so that the refresh records have a chance to blow out the
- // 16MB limit, if all the sessions are flushed in one batch
- db.createUser({user: longUserName, pwd: 'pass', roles: jsTest.basicUserRoles});
- admin.logout();
+// Create a user with a long name, so that the refresh records have a chance to blow out the
+// 16MB limit, if all the sessions are flushed in one batch
+db.createUser({user: longUserName, pwd: 'pass', roles: jsTest.basicUserRoles});
+admin.logout();
- assert(db.auth(longUserName, 'pass'));
+assert(db.auth(longUserName, 'pass'));
- // 20k * 1k = 20mb which is greater than 16mb
- const numSessions = 20000;
- for (var i = 0; i < numSessions; i++) {
- assert.commandWorked(admin.runCommand(startSession), "unable to start session");
- }
+// 20k * 1k = 20mb which is greater than 16mb
+const numSessions = 20000;
+for (var i = 0; i < numSessions; i++) {
+ assert.commandWorked(admin.runCommand(startSession), "unable to start session");
+}
- assert.commandWorked(admin.runCommand(refresh), "failed to refresh");
+assert.commandWorked(admin.runCommand(refresh), "failed to refresh");
- // Make sure we actually flushed the sessions
- assert.eq(numSessions,
- config.system.sessions.aggregate([{'$listSessions': {}}, {'$count': "count"}])
- .next()
- .count);
+// Make sure we actually flushed the sessions
+assert.eq(
+ numSessions,
+ config.system.sessions.aggregate([{'$listSessions': {}}, {'$count': "count"}]).next().count);
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/auth/renameRestrictedCollections.js b/jstests/auth/renameRestrictedCollections.js
index 1105da4eb3d..40169bef2d6 100644
--- a/jstests/auth/renameRestrictedCollections.js
+++ b/jstests/auth/renameRestrictedCollections.js
@@ -1,109 +1,113 @@
(function() {
- 'use strict';
-
- // SERVER-8623: Test that renameCollection can't be used to bypass auth checks on system
- // namespaces
- const conn = MongoRunner.runMongod({auth: ""});
-
- const adminDB = conn.getDB("admin");
- const configDB = conn.getDB("config");
- const localDB = conn.getDB("local");
- const CodeUnauthorized = 13;
-
- const backdoorUserDoc = {user: 'backdoor', db: 'admin', pwd: 'hashed', roles: ['root']};
-
- adminDB.createUser({user: 'userAdmin', pwd: 'password', roles: ['userAdminAnyDatabase']});
-
- adminDB.auth('userAdmin', 'password');
- adminDB.createUser({user: 'readWriteAdmin', pwd: 'password', roles: ['readWriteAnyDatabase']});
- adminDB.createUser({
- user: 'readWriteAndUserAdmin',
- pwd: 'password',
- roles: ['readWriteAnyDatabase', 'userAdminAnyDatabase']
- });
- adminDB.createUser({user: 'root', pwd: 'password', roles: ['root']});
- adminDB.createUser({user: 'rootier', pwd: 'password', roles: ['__system']});
- adminDB.logout();
-
- jsTestLog("Test that a readWrite user can't rename system.profile to something they can read");
- adminDB.auth('readWriteAdmin', 'password');
- var res = adminDB.system.profile.renameCollection("profile");
- assert.eq(0, res.ok);
- assert.eq(CodeUnauthorized, res.code);
-
- jsTestLog("Test that a readWrite user can't rename system.users to something they can read");
- res = adminDB.system.users.renameCollection("users");
- assert.eq(0, res.ok);
- assert.eq(CodeUnauthorized, res.code);
- assert.eq(0, adminDB.users.count());
-
- jsTestLog("Test that a readWrite user can't use renameCollection to override system.users");
- adminDB.users.insert(backdoorUserDoc);
- res = adminDB.users.renameCollection("system.users", true);
- assert.eq(0, res.ok);
- assert.eq(CodeUnauthorized, res.code);
- adminDB.users.drop();
-
- jsTestLog("Test that a userAdmin can't rename system.users without readWrite");
- adminDB.logout();
- adminDB.auth('userAdmin', 'password');
- res = adminDB.system.users.renameCollection("users");
- assert.eq(0, res.ok);
- assert.eq(CodeUnauthorized, res.code);
- assert.eq(5, adminDB.system.users.count());
-
- adminDB.auth('readWriteAndUserAdmin', 'password');
- assert.eq(0, adminDB.users.count());
-
- jsTestLog("Test that even with userAdmin AND dbAdmin you CANNOT rename to/from system.users");
- res = adminDB.system.users.renameCollection("users");
- assert.eq(0, res.ok);
- assert.eq(CodeUnauthorized, res.code);
- assert.eq(5, adminDB.system.users.count());
-
- adminDB.users.drop();
- adminDB.users.insert(backdoorUserDoc);
- res = adminDB.users.renameCollection("system.users");
- assert.eq(0, res.ok);
- assert.eq(CodeUnauthorized, res.code);
-
- assert.eq(null, adminDB.system.users.findOne({user: backdoorUserDoc.user}));
- assert.neq(null, adminDB.system.users.findOne({user: 'userAdmin'}));
-
- adminDB.auth('rootier', 'password');
-
- jsTestLog("Test that with __system you CAN rename to/from system.users");
- res = adminDB.system.users.renameCollection("users", true);
- assert.eq(1, res.ok, tojson(res));
-
- // Test permissions against the configDB and localDB
-
- // Start with test against inserting to and renaming collections in config and local
- // as userAdminAnyDatabase.
- assert.writeOK(configDB.test.insert({'a': 1}));
- assert.commandWorked(configDB.test.renameCollection('test2'));
-
- assert.writeOK(localDB.test.insert({'a': 1}));
- assert.commandWorked(localDB.test.renameCollection('test2'));
- adminDB.logout();
-
- // Test renaming collection in config with readWriteAnyDatabase
- assert(adminDB.auth('readWriteAdmin', 'password'));
- res = configDB.test2.insert({'b': 2});
- assert.writeError(res, 13, "not authorized on config to execute command");
- res = configDB.test2.renameCollection('test');
- assert.eq(0, res.ok);
- assert.eq(CodeUnauthorized, res.code);
-
- // Test renaming collection in local with readWriteAnyDatabase
- res = localDB.test2.insert({'b': 2});
- assert.writeError(res, 13, "not authorized on config to execute command");
- res = localDB.test2.renameCollection('test');
- assert.eq(0, res.ok);
- assert.eq(CodeUnauthorized, res.code);
-
- // At this point, all the user documents are gone, so further activity may be unauthorized,
- // depending on cluster configuration. So, this is the end of the test.
- MongoRunner.stopMongod(conn, {user: 'userAdmin', pwd: 'password'});
-
+'use strict';
+
+// SERVER-8623: Test that renameCollection can't be used to bypass auth checks on system
+// namespaces
+const conn = MongoRunner.runMongod({auth: ""});
+
+const adminDB = conn.getDB("admin");
+const configDB = conn.getDB("config");
+const localDB = conn.getDB("local");
+const CodeUnauthorized = 13;
+
+const backdoorUserDoc = {
+ user: 'backdoor',
+ db: 'admin',
+ pwd: 'hashed',
+ roles: ['root']
+};
+
+adminDB.createUser({user: 'userAdmin', pwd: 'password', roles: ['userAdminAnyDatabase']});
+
+adminDB.auth('userAdmin', 'password');
+adminDB.createUser({user: 'readWriteAdmin', pwd: 'password', roles: ['readWriteAnyDatabase']});
+adminDB.createUser({
+ user: 'readWriteAndUserAdmin',
+ pwd: 'password',
+ roles: ['readWriteAnyDatabase', 'userAdminAnyDatabase']
+});
+adminDB.createUser({user: 'root', pwd: 'password', roles: ['root']});
+adminDB.createUser({user: 'rootier', pwd: 'password', roles: ['__system']});
+adminDB.logout();
+
+jsTestLog("Test that a readWrite user can't rename system.profile to something they can read");
+adminDB.auth('readWriteAdmin', 'password');
+var res = adminDB.system.profile.renameCollection("profile");
+assert.eq(0, res.ok);
+assert.eq(CodeUnauthorized, res.code);
+
+jsTestLog("Test that a readWrite user can't rename system.users to something they can read");
+res = adminDB.system.users.renameCollection("users");
+assert.eq(0, res.ok);
+assert.eq(CodeUnauthorized, res.code);
+assert.eq(0, adminDB.users.count());
+
+jsTestLog("Test that a readWrite user can't use renameCollection to override system.users");
+adminDB.users.insert(backdoorUserDoc);
+res = adminDB.users.renameCollection("system.users", true);
+assert.eq(0, res.ok);
+assert.eq(CodeUnauthorized, res.code);
+adminDB.users.drop();
+
+jsTestLog("Test that a userAdmin can't rename system.users without readWrite");
+adminDB.logout();
+adminDB.auth('userAdmin', 'password');
+res = adminDB.system.users.renameCollection("users");
+assert.eq(0, res.ok);
+assert.eq(CodeUnauthorized, res.code);
+assert.eq(5, adminDB.system.users.count());
+
+adminDB.auth('readWriteAndUserAdmin', 'password');
+assert.eq(0, adminDB.users.count());
+
+jsTestLog("Test that even with userAdmin AND dbAdmin you CANNOT rename to/from system.users");
+res = adminDB.system.users.renameCollection("users");
+assert.eq(0, res.ok);
+assert.eq(CodeUnauthorized, res.code);
+assert.eq(5, adminDB.system.users.count());
+
+adminDB.users.drop();
+adminDB.users.insert(backdoorUserDoc);
+res = adminDB.users.renameCollection("system.users");
+assert.eq(0, res.ok);
+assert.eq(CodeUnauthorized, res.code);
+
+assert.eq(null, adminDB.system.users.findOne({user: backdoorUserDoc.user}));
+assert.neq(null, adminDB.system.users.findOne({user: 'userAdmin'}));
+
+adminDB.auth('rootier', 'password');
+
+jsTestLog("Test that with __system you CAN rename to/from system.users");
+res = adminDB.system.users.renameCollection("users", true);
+assert.eq(1, res.ok, tojson(res));
+
+// Test permissions against the configDB and localDB
+
+// Start with test against inserting to and renaming collections in config and local
+// as userAdminAnyDatabase.
+assert.writeOK(configDB.test.insert({'a': 1}));
+assert.commandWorked(configDB.test.renameCollection('test2'));
+
+assert.writeOK(localDB.test.insert({'a': 1}));
+assert.commandWorked(localDB.test.renameCollection('test2'));
+adminDB.logout();
+
+// Test renaming collection in config with readWriteAnyDatabase
+assert(adminDB.auth('readWriteAdmin', 'password'));
+res = configDB.test2.insert({'b': 2});
+assert.writeError(res, 13, "not authorized on config to execute command");
+res = configDB.test2.renameCollection('test');
+assert.eq(0, res.ok);
+assert.eq(CodeUnauthorized, res.code);
+
+// Test renaming collection in local with readWriteAnyDatabase
+res = localDB.test2.insert({'b': 2});
+assert.writeError(res, 13, "not authorized on config to execute command");
+res = localDB.test2.renameCollection('test');
+assert.eq(0, res.ok);
+assert.eq(CodeUnauthorized, res.code);
+
+// At this point, all the user documents are gone, so further activity may be unauthorized,
+// depending on cluster configuration. So, this is the end of the test.
+MongoRunner.stopMongod(conn, {user: 'userAdmin', pwd: 'password'});
})(); \ No newline at end of file
diff --git a/jstests/auth/resource_pattern_matching.js b/jstests/auth/resource_pattern_matching.js
index ca3c9b50195..ccefb8f51f5 100644
--- a/jstests/auth/resource_pattern_matching.js
+++ b/jstests/auth/resource_pattern_matching.js
@@ -13,12 +13,8 @@ function setup_users(granter) {
admindb.runCommand({
createUser: "admin",
pwd: "admin",
- roles: [
- "userAdminAnyDatabase",
- "dbAdminAnyDatabase",
- "clusterAdmin",
- "readWriteAnyDatabase"
- ]
+ roles:
+ ["userAdminAnyDatabase", "dbAdminAnyDatabase", "clusterAdmin", "readWriteAnyDatabase"]
});
admindb.auth("admin", "admin");
@@ -143,10 +139,10 @@ function run_tests(granter, verifier) {
verifier,
[{resource: {db: "a", collection: "a"}, actions: ["find"]}],
{
- "a.a": should_find,
- "a.b": should_fail_find,
- "b.a": should_fail_find,
- "b.b": should_fail_find
+ "a.a": should_find,
+ "a.b": should_fail_find,
+ "b.a": should_fail_find,
+ "b.b": should_fail_find
});
run_test(
@@ -183,12 +179,12 @@ function run_tests(granter, verifier) {
verifier,
[{resource: {db: "$", collection: "cmd"}, actions: ["find"]}],
{
- "a.a": function(testdb, testcol) {
- var r = testdb.stats();
+ "a.a": function(testdb, testcol) {
+ var r = testdb.stats();
- if (r["ok"])
- throw("db.$.cmd shouldn't give a.stats()");
- }
+ if (r["ok"])
+ throw ("db.$.cmd shouldn't give a.stats()");
+ }
});
run_test_bad_resource("empty_resource", granter, {});
@@ -202,26 +198,26 @@ function run_tests(granter, verifier) {
granter,
verifier,
[
- {resource: {db: "a", collection: "a"}, actions: ["find"]},
- {resource: {db: "", collection: ""}, actions: ["insert"]}
+ {resource: {db: "a", collection: "a"}, actions: ["find"]},
+ {resource: {db: "", collection: ""}, actions: ["insert"]}
],
{
- "a.a": function(testdb, testcol) {
- should_insert(testdb, testcol);
- should_find(testdb, testcol);
- },
- "a.b": function(testdb, testcol) {
- should_insert(testdb, testcol);
- should_fail_find(testdb, testcol);
- },
- "b.a": function(testdb, testcol) {
- should_insert(testdb, testcol);
- should_fail_find(testdb, testcol);
- },
- "b.b": function(testdb, testcol) {
- should_insert(testdb, testcol);
- should_fail_find(testdb, testcol);
- },
+ "a.a": function(testdb, testcol) {
+ should_insert(testdb, testcol);
+ should_find(testdb, testcol);
+ },
+ "a.b": function(testdb, testcol) {
+ should_insert(testdb, testcol);
+ should_fail_find(testdb, testcol);
+ },
+ "b.a": function(testdb, testcol) {
+ should_insert(testdb, testcol);
+ should_fail_find(testdb, testcol);
+ },
+ "b.b": function(testdb, testcol) {
+ should_insert(testdb, testcol);
+ should_fail_find(testdb, testcol);
+ },
});
}
diff --git a/jstests/auth/role_management_commands_edge_cases.js b/jstests/auth/role_management_commands_edge_cases.js
index 9dd774d5518..023f01df95e 100644
--- a/jstests/auth/role_management_commands_edge_cases.js
+++ b/jstests/auth/role_management_commands_edge_cases.js
@@ -92,10 +92,8 @@ function runTest(conn) {
db.createRole({
role: 'role13',
roles: [],
- privileges: [{
- resource: {db: "test", collection: "foo", cluster: true},
- actions: ['find']
- }]
+ privileges:
+ [{resource: {db: "test", collection: "foo", cluster: true}, actions: ['find']}]
});
});
assert.throws(function() {
@@ -116,8 +114,7 @@ function runTest(conn) {
db.createRole({
role: 'role16',
roles: [],
- privileges:
- [{resource: {db: "test", collection: "foo"}, actions: ['fakeAction']}]
+ privileges: [{resource: {db: "test", collection: "foo"}, actions: ['fakeAction']}]
});
});
@@ -233,7 +230,6 @@ function runTest(conn) {
assert.throws(function() {
db.revokeRolesFromRole("readWrite", ['read']);
});
-
})();
(function testGrantPrivilegesToRole() {
diff --git a/jstests/auth/role_management_commands_lib.js b/jstests/auth/role_management_commands_lib.js
index a4156a3be7e..a706899c6e7 100644
--- a/jstests/auth/role_management_commands_lib.js
+++ b/jstests/auth/role_management_commands_lib.js
@@ -194,8 +194,8 @@ function runAllRoleManagementCommandsTests(conn, writeConcern) {
adminUserAdmin.grantPrivilegesToRole(
'adminRole',
[
- {resource: {cluster: true}, actions: ['serverStatus']},
- {resource: {db: "", collection: ""}, actions: ['find']}
+ {resource: {cluster: true}, actions: ['serverStatus']},
+ {resource: {db: "", collection: ""}, actions: ['find']}
],
writeConcern);
assert.doesNotThrow(function() {
@@ -212,8 +212,8 @@ function runAllRoleManagementCommandsTests(conn, writeConcern) {
testUserAdmin.grantPrivilegesToRole(
'testRole2',
[
- {resource: {db: 'test', collection: ''}, actions: ['insert', 'update']},
- {resource: {db: 'test', collection: 'foo'}, actions: ['find']}
+ {resource: {db: 'test', collection: ''}, actions: ['insert', 'update']},
+ {resource: {db: 'test', collection: 'foo'}, actions: ['find']}
],
writeConcern);
assert.doesNotThrow(function() {
diff --git a/jstests/auth/role_management_commands_sharded_wc_1.js b/jstests/auth/role_management_commands_sharded_wc_1.js
index 400e85b0029..9e7e3482d76 100644
--- a/jstests/auth/role_management_commands_sharded_wc_1.js
+++ b/jstests/auth/role_management_commands_sharded_wc_1.js
@@ -1,18 +1,18 @@
// @tags: [requires_sharding]
(function() {
- 'use strict';
+'use strict';
- load('jstests/auth/role_management_commands_lib.js');
+load('jstests/auth/role_management_commands_lib.js');
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest({
- shards: 2,
- config: 3,
- keyFile: 'jstests/libs/key1',
- useHostname: false,
- other: {shardAsReplicaSet: false}
- });
- runAllRoleManagementCommandsTests(st.s, {w: 1});
- st.stop();
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest({
+ shards: 2,
+ config: 3,
+ keyFile: 'jstests/libs/key1',
+ useHostname: false,
+ other: {shardAsReplicaSet: false}
+});
+runAllRoleManagementCommandsTests(st.s, {w: 1});
+st.stop();
})();
diff --git a/jstests/auth/role_management_commands_sharded_wc_majority.js b/jstests/auth/role_management_commands_sharded_wc_majority.js
index 6bda9c5288a..155aa931feb 100644
--- a/jstests/auth/role_management_commands_sharded_wc_majority.js
+++ b/jstests/auth/role_management_commands_sharded_wc_majority.js
@@ -3,18 +3,18 @@
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/auth/role_management_commands_lib.js');
+load('jstests/auth/role_management_commands_lib.js');
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest({
- shards: 2,
- config: 3,
- keyFile: 'jstests/libs/key1',
- useHostname: false,
- other: {shardAsReplicaSet: false}
- });
- runAllRoleManagementCommandsTests(st.s, {w: 'majority', wtimeout: 60 * 1000});
- st.stop();
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest({
+ shards: 2,
+ config: 3,
+ keyFile: 'jstests/libs/key1',
+ useHostname: false,
+ other: {shardAsReplicaSet: false}
+});
+runAllRoleManagementCommandsTests(st.s, {w: 'majority', wtimeout: 60 * 1000});
+st.stop();
})();
diff --git a/jstests/auth/role_management_commands_standalone.js b/jstests/auth/role_management_commands_standalone.js
index d35f2c30a3f..13d428c8afd 100644
--- a/jstests/auth/role_management_commands_standalone.js
+++ b/jstests/auth/role_management_commands_standalone.js
@@ -1,9 +1,9 @@
(function() {
- 'use strict';
+'use strict';
- load('jstests/auth/role_management_commands_lib.js');
+load('jstests/auth/role_management_commands_lib.js');
- var conn = MongoRunner.runMongod({auth: '', useHostname: false});
- runAllRoleManagementCommandsTests(conn);
- MongoRunner.stopMongod(conn);
+var conn = MongoRunner.runMongod({auth: '', useHostname: false});
+runAllRoleManagementCommandsTests(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/auth/sasl_mechanism_discovery.js b/jstests/auth/sasl_mechanism_discovery.js
index 0a2a05c2771..e64c8e3c545 100644
--- a/jstests/auth/sasl_mechanism_discovery.js
+++ b/jstests/auth/sasl_mechanism_discovery.js
@@ -1,80 +1,76 @@
// Tests that a client may discover a user's supported SASL mechanisms via isMaster.
// @tags: [requires_sharding]
(function() {
- "use strict";
+"use strict";
- function runTest(conn) {
- function checkMechs(userid, mechs) {
- const res =
- assert.commandWorked(db.runCommand({isMaster: 1, saslSupportedMechs: userid}));
- assert.eq(mechs.sort(), res.saslSupportedMechs.sort(), tojson(res));
- }
+function runTest(conn) {
+ function checkMechs(userid, mechs) {
+ const res = assert.commandWorked(db.runCommand({isMaster: 1, saslSupportedMechs: userid}));
+ assert.eq(mechs.sort(), res.saslSupportedMechs.sort(), tojson(res));
+ }
- var db = conn.getDB("admin");
- var externalDB = conn.getDB("$external");
+ var db = conn.getDB("admin");
+ var externalDB = conn.getDB("$external");
- assert.commandWorked(db.runCommand(
- {createUser: "userAdmin", pwd: "userAdmin", roles: ["userAdminAnyDatabase"]}));
- db.auth("userAdmin", "userAdmin");
+ assert.commandWorked(db.runCommand(
+ {createUser: "userAdmin", pwd: "userAdmin", roles: ["userAdminAnyDatabase"]}));
+ db.auth("userAdmin", "userAdmin");
- // Check that unknown users do not interrupt isMaster
- let res =
- assert.commandWorked(db.runCommand({isMaster: 1, saslSupportedMechs: "test.bogus"}));
- assert.eq(undefined, res.saslSupportedMechs);
+ // Check that unknown users do not interrupt isMaster
+ let res = assert.commandWorked(db.runCommand({isMaster: 1, saslSupportedMechs: "test.bogus"}));
+ assert.eq(undefined, res.saslSupportedMechs);
- // Check that invalid usernames produce the correct error code
- assert.commandFailedWithCode(db.runCommand({isMaster: 1, saslSupportedMechs: "bogus"}),
- ErrorCodes.BadValue);
+ // Check that invalid usernames produce the correct error code
+ assert.commandFailedWithCode(db.runCommand({isMaster: 1, saslSupportedMechs: "bogus"}),
+ ErrorCodes.BadValue);
- assert.commandWorked(db.runCommand({createUser: "user", pwd: "pwd", roles: []}));
- assert.commandWorked(externalDB.runCommand({createUser: "user", roles: []}));
+ assert.commandWorked(db.runCommand({createUser: "user", pwd: "pwd", roles: []}));
+ assert.commandWorked(externalDB.runCommand({createUser: "user", roles: []}));
- // Internal users should support scram methods.
- checkMechs("admin.user", ["SCRAM-SHA-256", "SCRAM-SHA-1"]);
+ // Internal users should support scram methods.
+ checkMechs("admin.user", ["SCRAM-SHA-256", "SCRAM-SHA-1"]);
- // External users on enterprise should support PLAIN, but not scram methods.
- if (assert.commandWorked(db.runCommand({buildInfo: 1})).modules.includes("enterprise")) {
- checkMechs("$external.user", ["PLAIN"]);
- } else {
- checkMechs("$external.user", []);
- }
+ // External users on enterprise should support PLAIN, but not scram methods.
+ if (assert.commandWorked(db.runCommand({buildInfo: 1})).modules.includes("enterprise")) {
+ checkMechs("$external.user", ["PLAIN"]);
+ } else {
+ checkMechs("$external.user", []);
+ }
- // Users with explicit mechs should only support those mechanisms
- assert.commandWorked(db.runCommand(
- {createUser: "256Only", pwd: "pwd", roles: [], mechanisms: ["SCRAM-SHA-256"]}));
- checkMechs("admin.256Only", ["SCRAM-SHA-256"]);
- assert.commandWorked(db.runCommand(
- {createUser: "1Only", pwd: "pwd", roles: [], mechanisms: ["SCRAM-SHA-1"]}));
- checkMechs("admin.1Only", ["SCRAM-SHA-1"]);
+ // Users with explicit mechs should only support those mechanisms
+ assert.commandWorked(db.runCommand(
+ {createUser: "256Only", pwd: "pwd", roles: [], mechanisms: ["SCRAM-SHA-256"]}));
+ checkMechs("admin.256Only", ["SCRAM-SHA-256"]);
+ assert.commandWorked(
+ db.runCommand({createUser: "1Only", pwd: "pwd", roles: [], mechanisms: ["SCRAM-SHA-1"]}));
+ checkMechs("admin.1Only", ["SCRAM-SHA-1"]);
- // Users with normalized and unnormalized names do not conflict
- assert.commandWorked(db.runCommand({createUser: "IX", pwd: "pwd", roles: []}));
- checkMechs("admin.IX", ["SCRAM-SHA-1", "SCRAM-SHA-256"]);
- assert.commandWorked(db.runCommand({createUser: "\u2168", pwd: "pwd", roles: []}));
- checkMechs("admin.\u2168", ["SCRAM-SHA-1", "SCRAM-SHA-256"]);
+ // Users with normalized and unnormalized names do not conflict
+ assert.commandWorked(db.runCommand({createUser: "IX", pwd: "pwd", roles: []}));
+ checkMechs("admin.IX", ["SCRAM-SHA-1", "SCRAM-SHA-256"]);
+ assert.commandWorked(db.runCommand({createUser: "\u2168", pwd: "pwd", roles: []}));
+ checkMechs("admin.\u2168", ["SCRAM-SHA-1", "SCRAM-SHA-256"]);
- // __system's mechanisms can be queried on local and admin if the server is in test mode
- checkMechs("local.__system", ["SCRAM-SHA-1", "SCRAM-SHA-256"]);
- checkMechs("admin.__system", ["SCRAM-SHA-1", "SCRAM-SHA-256"]);
- }
+ // __system's mechanisms can be queried on local and admin if the server is in test mode
+ checkMechs("local.__system", ["SCRAM-SHA-1", "SCRAM-SHA-256"]);
+ checkMechs("admin.__system", ["SCRAM-SHA-1", "SCRAM-SHA-256"]);
+}
- // Test standalone.
- var m = MongoRunner.runMongod({
- keyFile: 'jstests/libs/key1',
- setParameter: "authenticationMechanisms=SCRAM-SHA-1,SCRAM-SHA-256,PLAIN"
- });
- runTest(m);
- MongoRunner.stopMongod(m);
+// Test standalone.
+var m = MongoRunner.runMongod({
+ keyFile: 'jstests/libs/key1',
+ setParameter: "authenticationMechanisms=SCRAM-SHA-1,SCRAM-SHA-256,PLAIN"
+});
+runTest(m);
+MongoRunner.stopMongod(m);
- // Test mongos.
- var st = new ShardingTest({
- keyFile: 'jstests/libs/key1',
- shards: 0,
- other: {
- mongosOptions:
- {setParameter: "authenticationMechanisms=PLAIN,SCRAM-SHA-256,SCRAM-SHA-1"}
- }
- });
- runTest(st.s0);
- st.stop();
+// Test mongos.
+var st = new ShardingTest({
+ keyFile: 'jstests/libs/key1',
+ shards: 0,
+ other:
+ {mongosOptions: {setParameter: "authenticationMechanisms=PLAIN,SCRAM-SHA-256,SCRAM-SHA-1"}}
+});
+runTest(st.s0);
+st.stop();
})();
diff --git a/jstests/auth/scram-credentials-invalid.js b/jstests/auth/scram-credentials-invalid.js
index c5553c31f26..282c5c06cc9 100644
--- a/jstests/auth/scram-credentials-invalid.js
+++ b/jstests/auth/scram-credentials-invalid.js
@@ -2,43 +2,40 @@
// user with invalid SCRAM-SHA-1 credentials fails gracefully.
(function() {
- 'use strict';
-
- function runTest(mongod) {
- assert(mongod);
- const admin = mongod.getDB('admin');
- const test = mongod.getDB('test');
-
- admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
- assert(admin.auth('admin', 'pass'));
-
- test.createUser({user: 'user', pwd: 'pass', roles: jsTest.basicUserRoles});
-
- // Give the test user an invalid set of SCRAM-SHA-1 credentials.
- assert.eq(admin.system.users
- .update({_id: "test.user"}, {
- $set: {
- "credentials.SCRAM-SHA-1": {
- salt: "AAAA",
- storedKey: "AAAA",
- serverKey: "AAAA",
- iterationCount: 10000
- }
- }
- })
- .nModified,
- 1,
- "Should have updated one document for user@test");
- admin.logout();
-
- const error = assert.throws(function() {
- test._authOrThrow({user: 'user', pwd: 'pass'});
- });
-
- assert.eq(error, "Error: credential document SCRAM-SHA-1 failed validation");
- }
-
- const mongod = MongoRunner.runMongod({auth: "", useLogFiles: true});
- runTest(mongod);
- MongoRunner.stopMongod(mongod);
+'use strict';
+
+function runTest(mongod) {
+ assert(mongod);
+ const admin = mongod.getDB('admin');
+ const test = mongod.getDB('test');
+
+ admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(admin.auth('admin', 'pass'));
+
+ test.createUser({user: 'user', pwd: 'pass', roles: jsTest.basicUserRoles});
+
+ // Give the test user an invalid set of SCRAM-SHA-1 credentials.
+ assert.eq(
+ admin.system.users
+ .update({_id: "test.user"}, {
+ $set: {
+ "credentials.SCRAM-SHA-1":
+ {salt: "AAAA", storedKey: "AAAA", serverKey: "AAAA", iterationCount: 10000}
+ }
+ })
+ .nModified,
+ 1,
+ "Should have updated one document for user@test");
+ admin.logout();
+
+ const error = assert.throws(function() {
+ test._authOrThrow({user: 'user', pwd: 'pass'});
+ });
+
+ assert.eq(error, "Error: credential document SCRAM-SHA-1 failed validation");
+}
+
+const mongod = MongoRunner.runMongod({auth: "", useLogFiles: true});
+runTest(mongod);
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/auth/shell.js b/jstests/auth/shell.js
index b3d391fd70e..0685798952c 100644
--- a/jstests/auth/shell.js
+++ b/jstests/auth/shell.js
@@ -1,19 +1,19 @@
// Authenticate to a mongod from the shell via command line.
(function() {
- 'use strict';
+'use strict';
- const port = allocatePort();
- const mongod = MongoRunner.runMongod({auth: '', port: port});
- const admin = mongod.getDB('admin');
+const port = allocatePort();
+const mongod = MongoRunner.runMongod({auth: '', port: port});
+const admin = mongod.getDB('admin');
- admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
- // Connect via shell round-trip in order to verify handling of mongodb:// uri with password.
- const uri = 'mongodb://admin:pass@localhost:' + port + '/admin';
- // Be sure to actually do something requiring authentication.
- const mongo = runMongoProgram('mongo', uri, '--eval', 'db.system.users.find({});');
- assert.eq(mongo, 0, "Failed connecting to mongod via shell+mongodb uri");
+// Connect via shell round-trip in order to verify handling of mongodb:// uri with password.
+const uri = 'mongodb://admin:pass@localhost:' + port + '/admin';
+// Be sure to actually do something requiring authentication.
+const mongo = runMongoProgram('mongo', uri, '--eval', 'db.system.users.find({});');
+assert.eq(mongo, 0, "Failed connecting to mongod via shell+mongodb uri");
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/auth/system_auth_scram_mechs.js b/jstests/auth/system_auth_scram_mechs.js
index 7b6605a41c9..08934385d05 100644
--- a/jstests/auth/system_auth_scram_mechs.js
+++ b/jstests/auth/system_auth_scram_mechs.js
@@ -4,21 +4,21 @@
* @tags: [requires_replication]
*/
(function() {
- 'use strict';
+'use strict';
- const keyfile = 'jstests/libs/key1';
- const keyfileContents = cat(keyfile).replace(/[\011-\015\040]/g, '');
- const rs = new ReplSetTest({nodes: 3, keyFile: keyfile});
- rs.startSet();
- rs.initiate();
- const db = rs.getPrimary().getDB("admin");
+const keyfile = 'jstests/libs/key1';
+const keyfileContents = cat(keyfile).replace(/[\011-\015\040]/g, '');
+const rs = new ReplSetTest({nodes: 3, keyFile: keyfile});
+rs.startSet();
+rs.initiate();
+const db = rs.getPrimary().getDB("admin");
- jsTestLog("Testing scram-sha-256");
- assert.eq(db.auth({mechanism: 'SCRAM-SHA-256', user: '__system', pwd: keyfileContents}), 1);
- db.logout();
+jsTestLog("Testing scram-sha-256");
+assert.eq(db.auth({mechanism: 'SCRAM-SHA-256', user: '__system', pwd: keyfileContents}), 1);
+db.logout();
- jsTestLog("Testing scram-sha-1");
- assert.eq(db.auth({mechanism: 'SCRAM-SHA-1', user: '__system', pwd: keyfileContents}), 1);
+jsTestLog("Testing scram-sha-1");
+assert.eq(db.auth({mechanism: 'SCRAM-SHA-1', user: '__system', pwd: keyfileContents}), 1);
- rs.stopSet();
+rs.stopSet();
})();
diff --git a/jstests/auth/system_roles_collMod.js b/jstests/auth/system_roles_collMod.js
index c82a8d8b8b1..7b5f57567e0 100644
--- a/jstests/auth/system_roles_collMod.js
+++ b/jstests/auth/system_roles_collMod.js
@@ -1,24 +1,24 @@
// Verify custom roles still exist after noop collMod calls
(function() {
- 'use strict';
- print("START auth-system-roles-collMod.js");
- TestData.roleGraphInvalidationIsFatal = false;
- var conn = MongoRunner.runMongod({});
- var db = conn.getDB("test");
+'use strict';
+print("START auth-system-roles-collMod.js");
+TestData.roleGraphInvalidationIsFatal = false;
+var conn = MongoRunner.runMongod({});
+var db = conn.getDB("test");
- assert.commandWorked(db.runCommand(
- {createRole: "role1", roles: [{role: "readWrite", db: "test"}], privileges: []}));
- assert(db.runCommand({rolesInfo: "role1"}).roles[0].role === "role1");
+assert.commandWorked(
+ db.runCommand({createRole: "role1", roles: [{role: "readWrite", db: "test"}], privileges: []}));
+assert(db.runCommand({rolesInfo: "role1"}).roles[0].role === "role1");
- // RoleGraph not invalidated after empty collMod
- assert.commandWorked(db.adminCommand({collMod: "system.roles"}));
- assert(db.runCommand({rolesInfo: "role1"}).roles[0].role === "role1");
+// RoleGraph not invalidated after empty collMod
+assert.commandWorked(db.adminCommand({collMod: "system.roles"}));
+assert(db.runCommand({rolesInfo: "role1"}).roles[0].role === "role1");
- // RoleGraph invalidated after non-empty collMod
- assert.commandWorked(db.adminCommand({collMod: "system.roles", validationLevel: "off"}));
- assert(db.runCommand({rolesInfo: "role1"}).roles.length === 0);
+// RoleGraph invalidated after non-empty collMod
+assert.commandWorked(db.adminCommand({collMod: "system.roles", validationLevel: "off"}));
+assert(db.runCommand({rolesInfo: "role1"}).roles.length === 0);
- print("SUCCESS auth-system-roles-collMod.js");
- MongoRunner.stopMongod(conn);
+print("SUCCESS auth-system-roles-collMod.js");
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/auth/system_user_exception.js b/jstests/auth/system_user_exception.js
index 5955d629135..67814119541 100644
--- a/jstests/auth/system_user_exception.js
+++ b/jstests/auth/system_user_exception.js
@@ -1,21 +1,19 @@
// Test the special handling of the __system user
// works when the SCRAM-SHA-1 pw auth mechanisms are disabled.
(function() {
- "use strict";
+"use strict";
- // Start mongod with no authentication mechanisms enabled
- var m = MongoRunner.runMongod(
- {keyFile: "jstests/libs/key1", setParameter: "authenticationMechanisms=PLAIN"});
+// Start mongod with no authentication mechanisms enabled
+var m = MongoRunner.runMongod(
+ {keyFile: "jstests/libs/key1", setParameter: "authenticationMechanisms=PLAIN"});
- // Verify that it's possible to use SCRAM-SHA-1 to authenticate as the __system@local user
- assert.eq(
- 1, m.getDB("local").auth({user: "__system", pwd: "foopdedoop", mechanism: "SCRAM-SHA-1"}));
+// Verify that it's possible to use SCRAM-SHA-1 to authenticate as the __system@local user
+assert.eq(1,
+ m.getDB("local").auth({user: "__system", pwd: "foopdedoop", mechanism: "SCRAM-SHA-1"}));
- // Verify that it is not possible to authenticate other users
- m.getDB("test").runCommand(
- {createUser: "guest", pwd: "guest", roles: jsTest.readOnlyUserRoles});
- assert.eq(0, m.getDB("test").auth({user: "guest", pwd: "guest", mechanism: "SCRAM-SHA-1"}));
-
- MongoRunner.stopMongod(m);
+// Verify that it is not possible to authenticate other users
+m.getDB("test").runCommand({createUser: "guest", pwd: "guest", roles: jsTest.readOnlyUserRoles});
+assert.eq(0, m.getDB("test").auth({user: "guest", pwd: "guest", mechanism: "SCRAM-SHA-1"}));
+MongoRunner.stopMongod(m);
})();
diff --git a/jstests/auth/system_user_privileges.js b/jstests/auth/system_user_privileges.js
index 164ba9bd2e4..40619ba307c 100644
--- a/jstests/auth/system_user_privileges.js
+++ b/jstests/auth/system_user_privileges.js
@@ -12,91 +12,91 @@
(function() {
- "use strict";
-
- // Runs the "count" command on a database in a way that returns the result document, for easier
- // inspection of the errmsg.
- function runCountCommand(conn, dbName, collectionName) {
- return conn.getDB(dbName).runCommand({count: collectionName});
- }
-
- // Asserts that on the given "conn", "dbName"."collectionName".count() fails as unauthorized.
- function assertCountUnauthorized(conn, dbName, collectionName) {
- assert.eq(runCountCommand(conn, dbName, collectionName).code,
- 13,
- "On " + dbName + "." + collectionName);
- }
-
- var conn = MongoRunner.runMongod({auth: ""});
-
- var admin = conn.getDB('admin');
- var test = conn.getDB('test');
- var local = conn.getDB('local');
-
- //
- // Preliminary set up.
- //
- admin.createUser({user: 'admin', pwd: 'a', roles: jsTest.adminUserRoles});
- admin.auth('admin', 'a');
-
- //
- // Add users named "__system" with no privileges on "test" and "admin", and make sure you can't
- // add one on "local"
- //
-
- test.createUser({user: '__system', pwd: 'a', roles: []});
- admin.createUser({user: '__system', pwd: 'a', roles: []});
- assert.throws(function() {
- local.createUser({user: '__system', pwd: 'a', roles: []});
- });
-
- //
- // Add some data to count.
- //
-
- admin.foo.insert({_id: 1});
- test.foo.insert({_id: 2});
- local.foo.insert({_id: 3});
-
- admin.logout();
- assertCountUnauthorized(conn, "admin", "foo");
- assertCountUnauthorized(conn, "local", "foo");
- assertCountUnauthorized(conn, "test", "foo");
-
- //
- // Validate that you cannot even log in as __system@local with the supplied password; you _must_
- // use the password from the keyfile.
- //
- assert(!local.auth('__system', 'a'));
- assertCountUnauthorized(conn, "admin", "foo");
- assertCountUnauthorized(conn, "local", "foo");
- assertCountUnauthorized(conn, "test", "foo");
-
- //
- // Validate that __system@test is not shadowed by the keyfile __system user.
- //
- test.auth('__system', 'a');
- assertCountUnauthorized(conn, "admin", "foo");
- assertCountUnauthorized(conn, "local", "foo");
- assertCountUnauthorized(conn, "test", "foo");
-
- test.logout();
- assertCountUnauthorized(conn, "admin", "foo");
- assertCountUnauthorized(conn, "local", "foo");
- assertCountUnauthorized(conn, "test", "foo");
-
- //
- // Validate that __system@admin is not shadowed by the keyfile __system user.
- //
- admin.auth('__system', 'a');
- assertCountUnauthorized(conn, "admin", "foo");
- assertCountUnauthorized(conn, "local", "foo");
- assertCountUnauthorized(conn, "test", "foo");
-
- admin.logout();
- assertCountUnauthorized(conn, "admin", "foo");
- assertCountUnauthorized(conn, "local", "foo");
- assertCountUnauthorized(conn, "test", "foo");
-
- MongoRunner.stopMongod(conn, null, {user: 'admin', pwd: 'a'});
+"use strict";
+
+// Runs the "count" command on a database in a way that returns the result document, for easier
+// inspection of the errmsg.
+function runCountCommand(conn, dbName, collectionName) {
+ return conn.getDB(dbName).runCommand({count: collectionName});
+}
+
+// Asserts that on the given "conn", "dbName"."collectionName".count() fails as unauthorized.
+function assertCountUnauthorized(conn, dbName, collectionName) {
+ assert.eq(runCountCommand(conn, dbName, collectionName).code,
+ 13,
+ "On " + dbName + "." + collectionName);
+}
+
+var conn = MongoRunner.runMongod({auth: ""});
+
+var admin = conn.getDB('admin');
+var test = conn.getDB('test');
+var local = conn.getDB('local');
+
+//
+// Preliminary set up.
+//
+admin.createUser({user: 'admin', pwd: 'a', roles: jsTest.adminUserRoles});
+admin.auth('admin', 'a');
+
+//
+// Add users named "__system" with no privileges on "test" and "admin", and make sure you can't
+// add one on "local"
+//
+
+test.createUser({user: '__system', pwd: 'a', roles: []});
+admin.createUser({user: '__system', pwd: 'a', roles: []});
+assert.throws(function() {
+ local.createUser({user: '__system', pwd: 'a', roles: []});
+});
+
+//
+// Add some data to count.
+//
+
+admin.foo.insert({_id: 1});
+test.foo.insert({_id: 2});
+local.foo.insert({_id: 3});
+
+admin.logout();
+assertCountUnauthorized(conn, "admin", "foo");
+assertCountUnauthorized(conn, "local", "foo");
+assertCountUnauthorized(conn, "test", "foo");
+
+//
+// Validate that you cannot even log in as __system@local with the supplied password; you _must_
+// use the password from the keyfile.
+//
+assert(!local.auth('__system', 'a'));
+assertCountUnauthorized(conn, "admin", "foo");
+assertCountUnauthorized(conn, "local", "foo");
+assertCountUnauthorized(conn, "test", "foo");
+
+//
+// Validate that __system@test is not shadowed by the keyfile __system user.
+//
+test.auth('__system', 'a');
+assertCountUnauthorized(conn, "admin", "foo");
+assertCountUnauthorized(conn, "local", "foo");
+assertCountUnauthorized(conn, "test", "foo");
+
+test.logout();
+assertCountUnauthorized(conn, "admin", "foo");
+assertCountUnauthorized(conn, "local", "foo");
+assertCountUnauthorized(conn, "test", "foo");
+
+//
+// Validate that __system@admin is not shadowed by the keyfile __system user.
+//
+admin.auth('__system', 'a');
+assertCountUnauthorized(conn, "admin", "foo");
+assertCountUnauthorized(conn, "local", "foo");
+assertCountUnauthorized(conn, "test", "foo");
+
+admin.logout();
+assertCountUnauthorized(conn, "admin", "foo");
+assertCountUnauthorized(conn, "local", "foo");
+assertCountUnauthorized(conn, "test", "foo");
+
+MongoRunner.stopMongod(conn, null, {user: 'admin', pwd: 'a'});
})();
diff --git a/jstests/auth/transactions.js b/jstests/auth/transactions.js
index 19e6526ab64..7037a78cd98 100644
--- a/jstests/auth/transactions.js
+++ b/jstests/auth/transactions.js
@@ -1,143 +1,141 @@
// Tests that users can only use transactions that they created.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- const rst = new ReplSetTest({nodes: 1, keyFile: "jstests/libs/key1"});
- rst.startSet();
- rst.initiate();
-
- const adminDB = rst.getPrimary().getDB("admin");
-
- // Create the admin user.
- assert.commandWorked(adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]}));
- assert.eq(1, adminDB.auth("admin", "admin"));
-
- // Set up the test database.
- const dbName = "test";
- const collName = "transactions";
- const testDB = adminDB.getSiblingDB(dbName);
- testDB.dropDatabase();
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- // Create two users, "Alice" and "Mallory".
- assert.commandWorked(
- testDB.runCommand({createUser: "Alice", pwd: "pwd", roles: ["readWrite"]}));
- assert.commandWorked(
- testDB.runCommand({createUser: "Mallory", pwd: "pwd", roles: ["readWrite"]}));
- adminDB.logout();
-
- // Alice starts a transaction.
- assert.eq(1, testDB.auth("Alice", "pwd"));
- const lsid = assert.commandWorked(testDB.runCommand({startSession: 1})).id;
- assert.commandWorked(testDB.runCommand({
- insert: collName,
- documents: [{_id: "alice-1"}],
- lsid: lsid,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
- testDB.logout();
-
- // Mallory cannot continue the transaction. Using the same lsid for two different users creates
- // two distinct sessions on the server. Mallory's session does not have an open transaction.
- assert.eq(1, testDB.auth("Mallory", "pwd"));
- assert.commandFailedWithCode(testDB.runCommand({
- insert: collName,
- documents: [{_id: "mallory"}],
- lsid: lsid,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(1),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
-
- // Mallory cannot commit the transaction.
- assert.commandFailedWithCode(adminDB.runCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(1),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.NoSuchTransaction);
-
- // Mallory cannot abort the transaction.
- assert.commandFailedWithCode(adminDB.runCommand({
- abortTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(1),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.NoSuchTransaction);
- testDB.logout();
-
- // An unauthenticated user cannot continue the transaction.
- assert.commandFailedWithCode(testDB.runCommand({
- insert: collName,
- documents: [{_id: "unauthenticated"}],
- lsid: lsid,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(1),
- autocommit: false
- }),
- ErrorCodes.Unauthorized);
-
- // An unauthenticated user cannot commit the transaction.
- assert.commandFailedWithCode(adminDB.runCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(1),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.Unauthorized);
-
- // An unauthenticated user cannot abort the transaction.
- assert.commandFailedWithCode(adminDB.runCommand({
- abortTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(1),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.Unauthorized);
-
- // Alice can continue the transaction.
- assert.eq(1, testDB.auth("Alice", "pwd"));
- assert.commandWorked(testDB.runCommand({
- insert: collName,
- documents: [{_id: "alice-2"}],
- lsid: lsid,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(1),
- autocommit: false
- }));
-
- // Alice can commit the transaction.
- assert.commandWorked(adminDB.runCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(2),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
-
- // We do not see the writes from Mallory or the unauthenticated user.
- assert.eq(1, testDB[collName].find({_id: "alice-1"}).itcount());
- assert.eq(1, testDB[collName].find({_id: "alice-2"}).itcount());
- assert.eq(0, testDB[collName].find({_id: "mallory"}).itcount());
- assert.eq(0, testDB[collName].find({_id: "unauthenticated"}).itcount());
-
- assert.commandWorked(testDB.runCommand({endSessions: [lsid]}));
- testDB.logout();
- rst.stopSet();
+"use strict";
+
+const rst = new ReplSetTest({nodes: 1, keyFile: "jstests/libs/key1"});
+rst.startSet();
+rst.initiate();
+
+const adminDB = rst.getPrimary().getDB("admin");
+
+// Create the admin user.
+assert.commandWorked(adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]}));
+assert.eq(1, adminDB.auth("admin", "admin"));
+
+// Set up the test database.
+const dbName = "test";
+const collName = "transactions";
+const testDB = adminDB.getSiblingDB(dbName);
+testDB.dropDatabase();
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+// Create two users, "Alice" and "Mallory".
+assert.commandWorked(testDB.runCommand({createUser: "Alice", pwd: "pwd", roles: ["readWrite"]}));
+assert.commandWorked(testDB.runCommand({createUser: "Mallory", pwd: "pwd", roles: ["readWrite"]}));
+adminDB.logout();
+
+// Alice starts a transaction.
+assert.eq(1, testDB.auth("Alice", "pwd"));
+const lsid = assert.commandWorked(testDB.runCommand({startSession: 1})).id;
+assert.commandWorked(testDB.runCommand({
+ insert: collName,
+ documents: [{_id: "alice-1"}],
+ lsid: lsid,
+ txnNumber: NumberLong(0),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}));
+testDB.logout();
+
+// Mallory cannot continue the transaction. Using the same lsid for two different users creates
+// two distinct sessions on the server. Mallory's session does not have an open transaction.
+assert.eq(1, testDB.auth("Mallory", "pwd"));
+assert.commandFailedWithCode(testDB.runCommand({
+ insert: collName,
+ documents: [{_id: "mallory"}],
+ lsid: lsid,
+ txnNumber: NumberLong(0),
+ stmtId: NumberInt(1),
+ autocommit: false
+}),
+ ErrorCodes.NoSuchTransaction);
+
+// Mallory cannot commit the transaction.
+assert.commandFailedWithCode(adminDB.runCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(0),
+ stmtId: NumberInt(1),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}),
+ ErrorCodes.NoSuchTransaction);
+
+// Mallory cannot abort the transaction.
+assert.commandFailedWithCode(adminDB.runCommand({
+ abortTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(0),
+ stmtId: NumberInt(1),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}),
+ ErrorCodes.NoSuchTransaction);
+testDB.logout();
+
+// An unauthenticated user cannot continue the transaction.
+assert.commandFailedWithCode(testDB.runCommand({
+ insert: collName,
+ documents: [{_id: "unauthenticated"}],
+ lsid: lsid,
+ txnNumber: NumberLong(0),
+ stmtId: NumberInt(1),
+ autocommit: false
+}),
+ ErrorCodes.Unauthorized);
+
+// An unauthenticated user cannot commit the transaction.
+assert.commandFailedWithCode(adminDB.runCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(0),
+ stmtId: NumberInt(1),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}),
+ ErrorCodes.Unauthorized);
+
+// An unauthenticated user cannot abort the transaction.
+assert.commandFailedWithCode(adminDB.runCommand({
+ abortTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(0),
+ stmtId: NumberInt(1),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}),
+ ErrorCodes.Unauthorized);
+
+// Alice can continue the transaction.
+assert.eq(1, testDB.auth("Alice", "pwd"));
+assert.commandWorked(testDB.runCommand({
+ insert: collName,
+ documents: [{_id: "alice-2"}],
+ lsid: lsid,
+ txnNumber: NumberLong(0),
+ stmtId: NumberInt(1),
+ autocommit: false
+}));
+
+// Alice can commit the transaction.
+assert.commandWorked(adminDB.runCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(0),
+ stmtId: NumberInt(2),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
+
+// We do not see the writes from Mallory or the unauthenticated user.
+assert.eq(1, testDB[collName].find({_id: "alice-1"}).itcount());
+assert.eq(1, testDB[collName].find({_id: "alice-2"}).itcount());
+assert.eq(0, testDB[collName].find({_id: "mallory"}).itcount());
+assert.eq(0, testDB[collName].find({_id: "unauthenticated"}).itcount());
+
+assert.commandWorked(testDB.runCommand({endSessions: [lsid]}));
+testDB.logout();
+rst.stopSet();
}());
diff --git a/jstests/auth/upgrade_noauth_to_keyfile.js b/jstests/auth/upgrade_noauth_to_keyfile.js
index 9bf2ec115e6..41eef5612c8 100644
--- a/jstests/auth/upgrade_noauth_to_keyfile.js
+++ b/jstests/auth/upgrade_noauth_to_keyfile.js
@@ -13,46 +13,46 @@ load('jstests/multiVersion/libs/multi_rs.js');
TestData.skipGossipingClusterTime = true;
(function() {
- 'use strict';
- var keyFilePath = 'jstests/libs/key1';
+'use strict';
+var keyFilePath = 'jstests/libs/key1';
- // Disable auth explicitly
- var noAuthOptions = {noauth: ''};
+// Disable auth explicitly
+var noAuthOptions = {noauth: ''};
- // Undefine the flags we're replacing, otherwise upgradeSet will keep old values.
- var transitionToAuthOptions =
- {noauth: undefined, clusterAuthMode: 'keyFile', keyFile: keyFilePath, transitionToAuth: ''};
- var keyFileOptions = {
- clusterAuthMode: 'keyFile',
- keyFile: keyFilePath,
- transitionToAuth: undefined
- };
+// Undefine the flags we're replacing, otherwise upgradeSet will keep old values.
+var transitionToAuthOptions =
+ {noauth: undefined, clusterAuthMode: 'keyFile', keyFile: keyFilePath, transitionToAuth: ''};
+var keyFileOptions = {
+ clusterAuthMode: 'keyFile',
+ keyFile: keyFilePath,
+ transitionToAuth: undefined
+};
- var rst = new ReplSetTest({name: 'noauthSet', nodes: 3, nodeOptions: noAuthOptions});
- rst.startSet();
- rst.initiate();
+var rst = new ReplSetTest({name: 'noauthSet', nodes: 3, nodeOptions: noAuthOptions});
+rst.startSet();
+rst.initiate();
- var rstConn1 = rst.getPrimary();
+var rstConn1 = rst.getPrimary();
- // Create a user to login as when auth is enabled later
- rstConn1.getDB('admin').createUser({user: 'root', pwd: 'root', roles: ['root']});
+// Create a user to login as when auth is enabled later
+rstConn1.getDB('admin').createUser({user: 'root', pwd: 'root', roles: ['root']});
- rstConn1.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'});
- assert.eq(1, rstConn1.getDB('test').a.count(), 'Error interacting with replSet');
+rstConn1.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'});
+assert.eq(1, rstConn1.getDB('test').a.count(), 'Error interacting with replSet');
- print('=== UPGRADE noauth -> transitionToAuth/keyFile ===');
- rst.upgradeSet(transitionToAuthOptions);
- var rstConn2 = rst.getPrimary();
- rstConn2.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'});
- assert.eq(2, rstConn2.getDB('test').a.count(), 'Error interacting with replSet');
+print('=== UPGRADE noauth -> transitionToAuth/keyFile ===');
+rst.upgradeSet(transitionToAuthOptions);
+var rstConn2 = rst.getPrimary();
+rstConn2.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'});
+assert.eq(2, rstConn2.getDB('test').a.count(), 'Error interacting with replSet');
- print('=== UPGRADE transitionToAuth/keyFile -> keyFile ===');
- rst.upgradeSet(keyFileOptions, 'root', 'root');
+print('=== UPGRADE transitionToAuth/keyFile -> keyFile ===');
+rst.upgradeSet(keyFileOptions, 'root', 'root');
- // upgradeSet leaves its connections logged in as root
- var rstConn3 = rst.getPrimary();
- rstConn3.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'});
- assert.eq(3, rstConn3.getDB('test').a.count(), 'Error interacting with replSet');
+// upgradeSet leaves its connections logged in as root
+var rstConn3 = rst.getPrimary();
+rstConn3.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'});
+assert.eq(3, rstConn3.getDB('test').a.count(), 'Error interacting with replSet');
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js b/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js
index 49d9e40c87c..148e9d1dbfc 100644
--- a/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js
+++ b/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js
@@ -5,29 +5,25 @@
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- 'use strict';
+'use strict';
- // Disable auth explicitly
- var noAuthOptions = {noauth: ''};
- var transitionToAuthOptions = {
- clusterAuthMode: 'keyFile',
- keyFile: KEYFILE,
- transitionToAuth: ''
- };
- var keyFileOptions = {clusterAuthMode: 'keyFile', keyFile: KEYFILE};
+// Disable auth explicitly
+var noAuthOptions = {noauth: ''};
+var transitionToAuthOptions = {clusterAuthMode: 'keyFile', keyFile: KEYFILE, transitionToAuth: ''};
+var keyFileOptions = {clusterAuthMode: 'keyFile', keyFile: KEYFILE};
- print('=== Testing no-auth/transitionToAuth cluster ===');
- mixedShardTest(noAuthOptions, transitionToAuthOptions, true);
- mixedShardTest(transitionToAuthOptions, noAuthOptions, true);
+print('=== Testing no-auth/transitionToAuth cluster ===');
+mixedShardTest(noAuthOptions, transitionToAuthOptions, true);
+mixedShardTest(transitionToAuthOptions, noAuthOptions, true);
- print('=== Testing transitionToAuth/transitionToAuth cluster ===');
- mixedShardTest(transitionToAuthOptions, transitionToAuthOptions, true);
+print('=== Testing transitionToAuth/transitionToAuth cluster ===');
+mixedShardTest(transitionToAuthOptions, transitionToAuthOptions, true);
- print('=== Testing transitionToAuth/keyFile cluster ===');
- mixedShardTest(keyFileOptions, transitionToAuthOptions, true);
- mixedShardTest(transitionToAuthOptions, keyFileOptions, true);
+print('=== Testing transitionToAuth/keyFile cluster ===');
+mixedShardTest(keyFileOptions, transitionToAuthOptions, true);
+mixedShardTest(transitionToAuthOptions, keyFileOptions, true);
- print('=== Testing no-auth/keyFile cluster fails ===');
- mixedShardTest(noAuthOptions, keyFileOptions, false);
- mixedShardTest(keyFileOptions, noAuthOptions, false);
+print('=== Testing no-auth/keyFile cluster fails ===');
+mixedShardTest(noAuthOptions, keyFileOptions, false);
+mixedShardTest(keyFileOptions, noAuthOptions, false);
}());
diff --git a/jstests/auth/user_cache_doc_source.js b/jstests/auth/user_cache_doc_source.js
index f56531fe580..3ffaee9b70e 100644
--- a/jstests/auth/user_cache_doc_source.js
+++ b/jstests/auth/user_cache_doc_source.js
@@ -1,47 +1,47 @@
// Tests the user cache document source
(function() {
- 'use strict';
-
- var mongod = MongoRunner.runMongod({auth: ""});
- var db = mongod.getDB("admin");
- db.createUser({user: "root", pwd: "root", roles: ["userAdminAnyDatabase"]});
- db.auth("root", "root");
- db.createUser({user: "readOnlyUser", pwd: "foobar", roles: ["readAnyDatabase"]});
- var readUserCache = function() {
- var ret = db.aggregate([{$listCachedAndActiveUsers: {}}]).toArray();
- print(tojson(ret));
- return ret;
- };
-
- const expectedOnlyRoot = [{username: "root", db: "admin", active: true}];
- assert.eq(expectedOnlyRoot, readUserCache());
-
- /* This is broken because of SERVER-36384
- var newConn = new Mongo(mongod.name);
- assert.eq(newConn.getDB("admin").auth("readOnlyUser", "foobar"), 1);
-
- const expectedBothActive = [
- { username: "root", db: "admin", active: true },
- { username: "readOnlyUser", db: "admin", active: true }
- ];
- assert.eq(expectedBothActive, readUserCache());
-
- newConn.close();
- */
-
- var awaitShell = startParallelShell(function() {
- assert.eq(db.getSisterDB("admin").auth("readOnlyUser", "foobar"), 1);
- }, mongod.port);
-
- const expectedReadOnlyInactive = [
- {username: "readOnlyUser", db: "admin", active: false},
- {username: "root", db: "admin", active: true}
- ];
- assert.soon(function() {
- return friendlyEqual(expectedReadOnlyInactive, readUserCache());
- });
-
- MongoRunner.stopMongod(mongod);
- awaitShell({checkExitSuccess: false});
+'use strict';
+
+var mongod = MongoRunner.runMongod({auth: ""});
+var db = mongod.getDB("admin");
+db.createUser({user: "root", pwd: "root", roles: ["userAdminAnyDatabase"]});
+db.auth("root", "root");
+db.createUser({user: "readOnlyUser", pwd: "foobar", roles: ["readAnyDatabase"]});
+var readUserCache = function() {
+ var ret = db.aggregate([{$listCachedAndActiveUsers: {}}]).toArray();
+ print(tojson(ret));
+ return ret;
+};
+
+const expectedOnlyRoot = [{username: "root", db: "admin", active: true}];
+assert.eq(expectedOnlyRoot, readUserCache());
+
+/* This is broken because of SERVER-36384
+var newConn = new Mongo(mongod.name);
+assert.eq(newConn.getDB("admin").auth("readOnlyUser", "foobar"), 1);
+
+const expectedBothActive = [
+ { username: "root", db: "admin", active: true },
+ { username: "readOnlyUser", db: "admin", active: true }
+];
+assert.eq(expectedBothActive, readUserCache());
+
+newConn.close();
+*/
+
+var awaitShell = startParallelShell(function() {
+ assert.eq(db.getSisterDB("admin").auth("readOnlyUser", "foobar"), 1);
+}, mongod.port);
+
+const expectedReadOnlyInactive = [
+ {username: "readOnlyUser", db: "admin", active: false},
+ {username: "root", db: "admin", active: true}
+];
+assert.soon(function() {
+ return friendlyEqual(expectedReadOnlyInactive, readUserCache());
+});
+
+MongoRunner.stopMongod(mongod);
+awaitShell({checkExitSuccess: false});
})();
diff --git a/jstests/auth/user_defined_roles.js b/jstests/auth/user_defined_roles.js
index 221c8c06e5a..a58d4ea52b6 100644
--- a/jstests/auth/user_defined_roles.js
+++ b/jstests/auth/user_defined_roles.js
@@ -100,10 +100,9 @@ function runTest(conn) {
testDB.updateUser('testUser', {customData: {zipCode: 10036}});
});
assert.eq(null, testDB.getUser('testUser').customData);
- testUserAdmin.grantPrivilegesToRole('testRole1',
- [{
- resource: {db: 'test', collection: ''},
- actions: ['changeOwnPassword', 'changeOwnCustomData']
+ testUserAdmin.grantPrivilegesToRole('testRole1', [{
+ resource: {db: 'test', collection: ''},
+ actions: ['changeOwnPassword', 'changeOwnCustomData']
}]);
testDB.changeUserPassword('testUser', 'password');
assert(!testDB.auth('testUser', 'pwd'));
diff --git a/jstests/auth/user_defined_roles_on_secondaries.js b/jstests/auth/user_defined_roles_on_secondaries.js
index 69f768c3c15..47746e9cd56 100644
--- a/jstests/auth/user_defined_roles_on_secondaries.js
+++ b/jstests/auth/user_defined_roles_on_secondaries.js
@@ -37,197 +37,195 @@
(function() {
- var name = 'user_defined_roles_on_secondaries';
- var m0, m1;
-
- function assertListContainsRole(list, role, msg) {
- var i;
- for (i = 0; i < list.length; ++i) {
- if (list[i].role == role.role && list[i].db == role.db)
- return;
- }
- doassert("Could not find value " + tojson(val) + " in " + tojson(list) +
- (msg ? ": " + msg : ""));
+var name = 'user_defined_roles_on_secondaries';
+var m0, m1;
+
+function assertListContainsRole(list, role, msg) {
+ var i;
+ for (i = 0; i < list.length; ++i) {
+ if (list[i].role == role.role && list[i].db == role.db)
+ return;
}
-
- //
- // Create a 1-node replicaset and add two roles, inheriting the built-in read role on db1.
- //
- // read
- // / \
- // r1 r2
- //
- var rstest = new ReplSetTest({name: name, nodes: 1, nodeOptions: {}});
-
- rstest.startSet();
- rstest.initiate();
-
- m0 = rstest.nodes[0];
-
- m0.getDB("db1").createRole({
- role: "r1",
- roles: ["read"],
- privileges: [{resource: {db: "db1", collection: "system.users"}, actions: ["find"]}]
- });
-
- m0.getDB("db1").createRole({
- role: "r2",
- roles: ["read"],
- privileges: [{resource: {db: "db1", collection: "log"}, actions: ["insert"]}]
- });
-
- //
- // Add a second node to the set, and add a third role, dependent on the first two.
- //
- // read
- // / \
- // r1 r2
- // \ /
- // r3
- //
- rstest.add();
- rstest.reInitiate();
-
- // This write will have to wait on the initial sync to complete before progressing.
- assert.soonNoExcept(() => {
- assert.writeOK(rstest.getPrimary().getDB("db1")["aCollection"].insert(
- {a: "afterSecondNodeAdded"}, {writeConcern: {w: 2, wtimeout: 60 * 1000}}));
- return true;
- });
-
- rstest.getPrimary().getDB("db1").createRole({
- role: "r3",
- roles: ["r1", "r2"],
- privileges: [{resource: {db: "db1", collection: "log"}, actions: ["update"]}]
- },
- {w: 2});
-
- // Verify that both members of the set see the same role graph.
- rstest.nodes.forEach(function(node) {
- var role = node.getDB("db1").getRole("r3");
- assert.eq(2, role.roles.length, tojson(node));
- assertListContainsRole(role.roles, {role: "r1", db: "db1"}, node);
- assertListContainsRole(role.roles, {role: "r2", db: "db1"}, node);
- assert.eq(3, role.inheritedRoles.length, tojson(node));
- assertListContainsRole(role.inheritedRoles, {role: "r1", db: "db1"}, node);
- assertListContainsRole(role.inheritedRoles, {role: "r2", db: "db1"}, node);
- assertListContainsRole(role.inheritedRoles, {role: "read", db: "db1"}, node);
- });
-
- // Verify that updating roles propagates.
- rstest.getPrimary().getDB("db1").revokeRolesFromRole("r1", ["read"], {w: 2});
- rstest.getPrimary().getDB("db1").grantRolesToRole("r1", ["dbAdmin"], {w: 2});
- rstest.nodes.forEach(function(node) {
- var role = node.getDB("db1").getRole("r1");
- assert.eq(1, role.roles.length, tojson(node));
- assertListContainsRole(role.roles, {role: "dbAdmin", db: "db1"});
- });
-
- // Verify that dropping roles propagates.
- rstest.getPrimary().getDB("db1").dropRole("r2", {w: 2});
- rstest.nodes.forEach(function(node) {
- assert.eq(null, node.getDB("db1").getRole("r2"));
- var role = node.getDB("db1").getRole("r3");
- assert.eq(1, role.roles.length, tojson(node));
- assertListContainsRole(role.roles, {role: "r1", db: "db1"}, node);
- assert.eq(2, role.inheritedRoles.length, tojson(node));
- assertListContainsRole(role.inheritedRoles, {role: "r1", db: "db1"}, node);
- assertListContainsRole(role.inheritedRoles, {role: "dbAdmin", db: "db1"}, node);
- });
-
- // Verify that applyOps commands propagate.
- // NOTE: This section of the test depends on the oplog and roles schemas.
- assert.commandWorked(rstest.getPrimary().getDB("admin").runCommand({
- applyOps: [
- {op: "c", ns: "admin.$cmd", o: {create: "system.roles"}},
- {
- op: "i",
- ns: "admin.system.roles",
- o: {
- _id: "db1.s1",
- role: "s1",
- db: "db1",
- roles: [{role: "read", db: "db1"}],
- privileges:
- [{resource: {db: "db1", collection: "system.users"}, actions: ["find"]}]
- }
- },
- {
- op: "i",
- ns: "admin.system.roles",
- o: {
- _id: "db1.s2",
- role: "s2",
- db: "db1",
- roles: [{role: "read", db: "db1"}],
- privileges: [{resource: {db: "db1", collection: "log"}, actions: ["insert"]}]
- }
- },
- {op: "c", ns: "admin.$cmd", o: {drop: "system.roles"}},
- {op: "c", ns: "admin.$cmd", o: {create: "system.roles"}},
- {
- op: "i",
- ns: "admin.system.roles",
- o: {
- _id: "db1.t1",
- role: "t1",
- db: "db1",
- roles: [{role: "read", db: "db1"}],
- privileges:
- [{resource: {db: "db1", collection: "system.users"}, actions: ["find"]}]
- }
- },
- {
- op: "i",
- ns: "admin.system.roles",
- o: {
- _id: "db1.t2",
- role: "t2",
- db: "db1",
- roles: [],
- privileges: [{resource: {db: "db1", collection: "log"}, actions: ["insert"]}]
- }
- },
- {
- op: "i",
- ns: "admin.system.roles",
- o: {
- _id: "db1.t3",
- role: "t3",
- db: "db1",
- roles: [{role: "t1", db: "db1"}, {role: "t2", db: "db1"}],
- privileges: []
- }
- },
- {
- op: "u",
- ns: "admin.system.roles",
- o: {$set: {roles: [{role: "readWrite", db: "db1"}]}},
- o2: {_id: "db1.t2"}
+ doassert("Could not find value " + tojson(val) + " in " + tojson(list) +
+ (msg ? ": " + msg : ""));
+}
+
+//
+// Create a 1-node replicaset and add two roles, inheriting the built-in read role on db1.
+//
+// read
+// / \
+// r1 r2
+//
+var rstest = new ReplSetTest({name: name, nodes: 1, nodeOptions: {}});
+
+rstest.startSet();
+rstest.initiate();
+
+m0 = rstest.nodes[0];
+
+m0.getDB("db1").createRole({
+ role: "r1",
+ roles: ["read"],
+ privileges: [{resource: {db: "db1", collection: "system.users"}, actions: ["find"]}]
+});
+
+m0.getDB("db1").createRole({
+ role: "r2",
+ roles: ["read"],
+ privileges: [{resource: {db: "db1", collection: "log"}, actions: ["insert"]}]
+});
+
+//
+// Add a second node to the set, and add a third role, dependent on the first two.
+//
+// read
+// / \
+// r1 r2
+// \ /
+// r3
+//
+rstest.add();
+rstest.reInitiate();
+
+// This write will have to wait on the initial sync to complete before progressing.
+assert.soonNoExcept(() => {
+ assert.writeOK(rstest.getPrimary().getDB("db1")["aCollection"].insert(
+ {a: "afterSecondNodeAdded"}, {writeConcern: {w: 2, wtimeout: 60 * 1000}}));
+ return true;
+});
+
+rstest.getPrimary().getDB("db1").createRole({
+ role: "r3",
+ roles: ["r1", "r2"],
+ privileges: [{resource: {db: "db1", collection: "log"}, actions: ["update"]}]
+},
+ {w: 2});
+
+// Verify that both members of the set see the same role graph.
+rstest.nodes.forEach(function(node) {
+ var role = node.getDB("db1").getRole("r3");
+ assert.eq(2, role.roles.length, tojson(node));
+ assertListContainsRole(role.roles, {role: "r1", db: "db1"}, node);
+ assertListContainsRole(role.roles, {role: "r2", db: "db1"}, node);
+ assert.eq(3, role.inheritedRoles.length, tojson(node));
+ assertListContainsRole(role.inheritedRoles, {role: "r1", db: "db1"}, node);
+ assertListContainsRole(role.inheritedRoles, {role: "r2", db: "db1"}, node);
+ assertListContainsRole(role.inheritedRoles, {role: "read", db: "db1"}, node);
+});
+
+// Verify that updating roles propagates.
+rstest.getPrimary().getDB("db1").revokeRolesFromRole("r1", ["read"], {w: 2});
+rstest.getPrimary().getDB("db1").grantRolesToRole("r1", ["dbAdmin"], {w: 2});
+rstest.nodes.forEach(function(node) {
+ var role = node.getDB("db1").getRole("r1");
+ assert.eq(1, role.roles.length, tojson(node));
+ assertListContainsRole(role.roles, {role: "dbAdmin", db: "db1"});
+});
+
+// Verify that dropping roles propagates.
+rstest.getPrimary().getDB("db1").dropRole("r2", {w: 2});
+rstest.nodes.forEach(function(node) {
+ assert.eq(null, node.getDB("db1").getRole("r2"));
+ var role = node.getDB("db1").getRole("r3");
+ assert.eq(1, role.roles.length, tojson(node));
+ assertListContainsRole(role.roles, {role: "r1", db: "db1"}, node);
+ assert.eq(2, role.inheritedRoles.length, tojson(node));
+ assertListContainsRole(role.inheritedRoles, {role: "r1", db: "db1"}, node);
+ assertListContainsRole(role.inheritedRoles, {role: "dbAdmin", db: "db1"}, node);
+});
+
+// Verify that applyOps commands propagate.
+// NOTE: This section of the test depends on the oplog and roles schemas.
+assert.commandWorked(rstest.getPrimary().getDB("admin").runCommand({
+ applyOps: [
+ {op: "c", ns: "admin.$cmd", o: {create: "system.roles"}},
+ {
+ op: "i",
+ ns: "admin.system.roles",
+ o: {
+ _id: "db1.s1",
+ role: "s1",
+ db: "db1",
+ roles: [{role: "read", db: "db1"}],
+ privileges: [{resource: {db: "db1", collection: "system.users"}, actions: ["find"]}]
+ }
+ },
+ {
+ op: "i",
+ ns: "admin.system.roles",
+ o: {
+ _id: "db1.s2",
+ role: "s2",
+ db: "db1",
+ roles: [{role: "read", db: "db1"}],
+ privileges: [{resource: {db: "db1", collection: "log"}, actions: ["insert"]}]
+ }
+ },
+ {op: "c", ns: "admin.$cmd", o: {drop: "system.roles"}},
+ {op: "c", ns: "admin.$cmd", o: {create: "system.roles"}},
+ {
+ op: "i",
+ ns: "admin.system.roles",
+ o: {
+ _id: "db1.t1",
+ role: "t1",
+ db: "db1",
+ roles: [{role: "read", db: "db1"}],
+ privileges: [{resource: {db: "db1", collection: "system.users"}, actions: ["find"]}]
}
- ]
- }));
-
- assert.commandWorked(rstest.getPrimary().getDB("admin").getLastErrorObj(2));
- rstest.nodes.forEach(function(node) {
- var role = node.getDB("db1").getRole("t1");
- assert.eq(1, role.roles.length, tojson(node));
- assertListContainsRole(role.roles, {role: "read", db: "db1"}, node);
-
- var role = node.getDB("db1").getRole("t2");
- assert.eq(1, role.roles.length, tojson(node));
- assertListContainsRole(role.roles, {role: "readWrite", db: "db1"}, node);
- });
-
- // Verify that irrelevant index creation doesn't impair graph resolution
- assert.commandWorked(rstest.getPrimary().getDB("admin").col.save({data: 5}));
- assert.commandWorked(rstest.getPrimary().getDB("admin").runCommand(
- {createIndexes: "col", indexes: [{key: {data: 1}, name: "testIndex"}]}));
- rstest.awaitReplication();
- rstest.nodes.forEach(function(node) {
- var role = node.getDB("db1").getRole("t3");
- assert.eq(4, role.inheritedRoles.length, tojson(node));
- });
-
- rstest.stopSet();
+ },
+ {
+ op: "i",
+ ns: "admin.system.roles",
+ o: {
+ _id: "db1.t2",
+ role: "t2",
+ db: "db1",
+ roles: [],
+ privileges: [{resource: {db: "db1", collection: "log"}, actions: ["insert"]}]
+ }
+ },
+ {
+ op: "i",
+ ns: "admin.system.roles",
+ o: {
+ _id: "db1.t3",
+ role: "t3",
+ db: "db1",
+ roles: [{role: "t1", db: "db1"}, {role: "t2", db: "db1"}],
+ privileges: []
+ }
+ },
+ {
+ op: "u",
+ ns: "admin.system.roles",
+ o: {$set: {roles: [{role: "readWrite", db: "db1"}]}},
+ o2: {_id: "db1.t2"}
+ }
+ ]
+}));
+
+assert.commandWorked(rstest.getPrimary().getDB("admin").getLastErrorObj(2));
+rstest.nodes.forEach(function(node) {
+ var role = node.getDB("db1").getRole("t1");
+ assert.eq(1, role.roles.length, tojson(node));
+ assertListContainsRole(role.roles, {role: "read", db: "db1"}, node);
+
+ var role = node.getDB("db1").getRole("t2");
+ assert.eq(1, role.roles.length, tojson(node));
+ assertListContainsRole(role.roles, {role: "readWrite", db: "db1"}, node);
+});
+
+// Verify that irrelevant index creation doesn't impair graph resolution
+assert.commandWorked(rstest.getPrimary().getDB("admin").col.save({data: 5}));
+assert.commandWorked(rstest.getPrimary().getDB("admin").runCommand(
+ {createIndexes: "col", indexes: [{key: {data: 1}, name: "testIndex"}]}));
+rstest.awaitReplication();
+rstest.nodes.forEach(function(node) {
+ var role = node.getDB("db1").getRole("t3");
+ assert.eq(4, role.inheritedRoles.length, tojson(node));
+});
+
+rstest.stopSet();
}());
diff --git a/jstests/auth/user_management_commands_edge_cases.js b/jstests/auth/user_management_commands_edge_cases.js
index f8447ddd7c8..d197d0105b2 100644
--- a/jstests/auth/user_management_commands_edge_cases.js
+++ b/jstests/auth/user_management_commands_edge_cases.js
@@ -257,7 +257,6 @@ function runTest(conn) {
assert.throws(function() {
db.getUser(['user1']);
});
-
})();
(function testDropUser() {
diff --git a/jstests/auth/user_management_commands_lib.js b/jstests/auth/user_management_commands_lib.js
index f05987c2b1d..3bea79ab955 100644
--- a/jstests/auth/user_management_commands_lib.js
+++ b/jstests/auth/user_management_commands_lib.js
@@ -118,11 +118,11 @@ function runAllUserManagementCommandsTests(conn, writeConcern) {
testUserAdmin.grantRolesToUser('spencer',
[
- 'readWrite',
- 'dbAdmin',
- {role: 'readWrite', db: 'test'},
- {role: 'testRole', db: 'test'},
- 'readWrite'
+ 'readWrite',
+ 'dbAdmin',
+ {role: 'readWrite', db: 'test'},
+ {role: 'testRole', db: 'test'},
+ 'readWrite'
],
writeConcern);
@@ -142,9 +142,9 @@ function runAllUserManagementCommandsTests(conn, writeConcern) {
testUserAdmin.revokeRolesFromUser(
'spencer',
[
- 'readWrite',
- {role: 'dbAdmin', db: 'test2'}, // role user doesnt have
- "testRole"
+ 'readWrite',
+ {role: 'dbAdmin', db: 'test2'}, // role user doesnt have
+ "testRole"
],
writeConcern);
@@ -169,7 +169,6 @@ function runAllUserManagementCommandsTests(conn, writeConcern) {
db.getRole('testRole');
});
assert.commandFailedWithCode(db.adminCommand('connPoolSync'), ErrorCodes.Unauthorized);
-
})();
(function testUsersInfo() {
diff --git a/jstests/auth/user_management_commands_mechanisms.js b/jstests/auth/user_management_commands_mechanisms.js
index f0d8a2dbd12..98c806f194b 100644
--- a/jstests/auth/user_management_commands_mechanisms.js
+++ b/jstests/auth/user_management_commands_mechanisms.js
@@ -2,227 +2,215 @@
// @tags: [requires_persistence]
(function() {
- 'use strict';
-
- let mongod = MongoRunner.runMongod(
- {auth: "", setParameter: "authenticationMechanisms=SCRAM-SHA-1,SCRAM-SHA-256,PLAIN"});
- assert(mongod);
- const admin = mongod.getDB('admin');
- const test = mongod.getDB('test');
-
- function checkUser(userid, passwd, haveSCRAMSHA1, haveSCRAMSHA256) {
- function checkCredentialRecord(creds, hashLen, saltLen, itCount) {
- assert.eq(creds.iterationCount, itCount);
- assert.eq(creds.salt.length, saltLen);
- assert.eq(creds.storedKey.length, hashLen);
- assert.eq(creds.serverKey.length, hashLen);
- }
- function checkLogin(mech, digestOK, nodigestOK) {
- assert(test.auth({user: userid, pwd: passwd, mechanism: mech}));
+'use strict';
+
+let mongod = MongoRunner.runMongod(
+ {auth: "", setParameter: "authenticationMechanisms=SCRAM-SHA-1,SCRAM-SHA-256,PLAIN"});
+assert(mongod);
+const admin = mongod.getDB('admin');
+const test = mongod.getDB('test');
+
+function checkUser(userid, passwd, haveSCRAMSHA1, haveSCRAMSHA256) {
+ function checkCredentialRecord(creds, hashLen, saltLen, itCount) {
+ assert.eq(creds.iterationCount, itCount);
+ assert.eq(creds.salt.length, saltLen);
+ assert.eq(creds.storedKey.length, hashLen);
+ assert.eq(creds.serverKey.length, hashLen);
+ }
+ function checkLogin(mech, digestOK, nodigestOK) {
+ assert(test.auth({user: userid, pwd: passwd, mechanism: mech}));
+ test.logout();
+ assert.eq(digestOK,
+ test.auth({user: userid, pwd: passwd, mechanism: mech, digestPassword: true}));
+ if (digestOK) {
test.logout();
- assert.eq(
- digestOK,
- test.auth({user: userid, pwd: passwd, mechanism: mech, digestPassword: true}));
- if (digestOK) {
- test.logout();
- }
- assert.eq(
- nodigestOK,
- test.auth({user: userid, pwd: passwd, mechanism: mech, digestPassword: false}));
- if (nodigestOK) {
- test.logout();
- }
}
-
- const user = admin.system.users.findOne({_id: ('test.' + userid)});
- assert.eq(user.credentials.hasOwnProperty('SCRAM-SHA-1'), haveSCRAMSHA1);
- assert.eq(user.credentials.hasOwnProperty('SCRAM-SHA-256'), haveSCRAMSHA256);
-
- // usersInfo contains correct mechanisms for the user
- const userInfo = assert.commandWorked(test.runCommand({usersInfo: userid}));
- assert(Array.isArray(userInfo.users[0].mechanisms));
- assert.eq(userInfo.users[0].mechanisms.includes('SCRAM-SHA-1'), haveSCRAMSHA1);
- assert.eq(userInfo.users[0].mechanisms.includes('SCRAM-SHA-256'), haveSCRAMSHA256);
-
- // usersInfo with showCredentials shows correct mechanisms and credentials
- const userInfoWithCredentials =
- assert.commandWorked(test.runCommand({usersInfo: userid, showCredentials: true}));
- print(tojson(userInfoWithCredentials));
- assert.eq(userInfoWithCredentials.users[0].credentials.hasOwnProperty('SCRAM-SHA-1'),
- haveSCRAMSHA1);
- assert.eq(userInfoWithCredentials.users[0].credentials.hasOwnProperty('SCRAM-SHA-256'),
- haveSCRAMSHA256);
- assert(Array.isArray(userInfoWithCredentials.users[0].mechanisms));
- assert.eq(userInfoWithCredentials.users[0].mechanisms.includes('SCRAM-SHA-1'),
- haveSCRAMSHA1);
- assert.eq(userInfoWithCredentials.users[0].mechanisms.includes('SCRAM-SHA-256'),
- haveSCRAMSHA256);
-
- if (haveSCRAMSHA1) {
- checkCredentialRecord(user.credentials['SCRAM-SHA-1'], 28, 24, 10000);
- checkLogin('SCRAM-SHA-1', true, false);
- checkLogin('PLAIN', false, true);
- }
- if (haveSCRAMSHA256) {
- checkCredentialRecord(user.credentials['SCRAM-SHA-256'], 44, 40, 15000);
- checkLogin('SCRAM-SHA-256', false, true);
- checkLogin('PLAIN', false, true);
+ assert.eq(nodigestOK,
+ test.auth({user: userid, pwd: passwd, mechanism: mech, digestPassword: false}));
+ if (nodigestOK) {
+ test.logout();
}
}
- admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
- assert(admin.auth('admin', 'pass'));
-
- // Unknown mechanism.
- assert.throws(function() {
- test.createUser({
- user: 'shalala',
- pwd: 'pass',
- roles: jsTest.basicUserRoles,
- mechanisms: ['SCRAM-SHA-1', 'SCRAM-SHA-LA-LA'],
- });
- });
-
- // By default, users are created with both SCRAM variants.
- test.createUser({user: 'user', pwd: 'pass', roles: jsTest.basicUserRoles});
- checkUser('user', 'pass', true, true);
+ const user = admin.system.users.findOne({_id: ('test.' + userid)});
+ assert.eq(user.credentials.hasOwnProperty('SCRAM-SHA-1'), haveSCRAMSHA1);
+ assert.eq(user.credentials.hasOwnProperty('SCRAM-SHA-256'), haveSCRAMSHA256);
+
+ // usersInfo contains correct mechanisms for the user
+ const userInfo = assert.commandWorked(test.runCommand({usersInfo: userid}));
+ assert(Array.isArray(userInfo.users[0].mechanisms));
+ assert.eq(userInfo.users[0].mechanisms.includes('SCRAM-SHA-1'), haveSCRAMSHA1);
+ assert.eq(userInfo.users[0].mechanisms.includes('SCRAM-SHA-256'), haveSCRAMSHA256);
+
+ // usersInfo with showCredentials shows correct mechanisms and credentials
+ const userInfoWithCredentials =
+ assert.commandWorked(test.runCommand({usersInfo: userid, showCredentials: true}));
+ print(tojson(userInfoWithCredentials));
+ assert.eq(userInfoWithCredentials.users[0].credentials.hasOwnProperty('SCRAM-SHA-1'),
+ haveSCRAMSHA1);
+ assert.eq(userInfoWithCredentials.users[0].credentials.hasOwnProperty('SCRAM-SHA-256'),
+ haveSCRAMSHA256);
+ assert(Array.isArray(userInfoWithCredentials.users[0].mechanisms));
+ assert.eq(userInfoWithCredentials.users[0].mechanisms.includes('SCRAM-SHA-1'), haveSCRAMSHA1);
+ assert.eq(userInfoWithCredentials.users[0].mechanisms.includes('SCRAM-SHA-256'),
+ haveSCRAMSHA256);
+
+ if (haveSCRAMSHA1) {
+ checkCredentialRecord(user.credentials['SCRAM-SHA-1'], 28, 24, 10000);
+ checkLogin('SCRAM-SHA-1', true, false);
+ checkLogin('PLAIN', false, true);
+ }
+ if (haveSCRAMSHA256) {
+ checkCredentialRecord(user.credentials['SCRAM-SHA-256'], 44, 40, 15000);
+ checkLogin('SCRAM-SHA-256', false, true);
+ checkLogin('PLAIN', false, true);
+ }
+}
- // Request SHA1 only.
- test.createUser(
- {user: 'sha1user', pwd: 'pass', roles: jsTest.basicUserRoles, mechanisms: ['SCRAM-SHA-1']});
- checkUser('sha1user', 'pass', true, false);
+admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+assert(admin.auth('admin', 'pass'));
- // Request SHA256 only.
+// Unknown mechanism.
+assert.throws(function() {
test.createUser({
- user: 'sha256user',
+ user: 'shalala',
pwd: 'pass',
roles: jsTest.basicUserRoles,
- mechanisms: ['SCRAM-SHA-256']
+ mechanisms: ['SCRAM-SHA-1', 'SCRAM-SHA-LA-LA'],
});
- checkUser('sha256user', 'pass', false, true);
+});
- // Fail passing an empty mechanisms field.
- assert.throws(function() {
- test.createUser(
- {user: 'userNoMech', pwd: 'pass', roles: jsTest.basicUserRoles, mechanisms: []});
- });
+// By default, users are created with both SCRAM variants.
+test.createUser({user: 'user', pwd: 'pass', roles: jsTest.basicUserRoles});
+checkUser('user', 'pass', true, true);
- // Repeat above, but request client-side digesting.
- // Only the SCRAM-SHA-1 exclusive version should succeed.
+// Request SHA1 only.
+test.createUser(
+ {user: 'sha1user', pwd: 'pass', roles: jsTest.basicUserRoles, mechanisms: ['SCRAM-SHA-1']});
+checkUser('sha1user', 'pass', true, false);
- assert.throws(function() {
- test.createUser({
- user: 'user2',
- pwd: 'pass',
- roles: jsTest.basicUserRoles,
- passwordDisgestor: 'client'
- });
- });
+// Request SHA256 only.
+test.createUser(
+ {user: 'sha256user', pwd: 'pass', roles: jsTest.basicUserRoles, mechanisms: ['SCRAM-SHA-256']});
+checkUser('sha256user', 'pass', false, true);
+
+// Fail passing an empty mechanisms field.
+assert.throws(function() {
+ test.createUser(
+ {user: 'userNoMech', pwd: 'pass', roles: jsTest.basicUserRoles, mechanisms: []});
+});
+
+// Repeat above, but request client-side digesting.
+// Only the SCRAM-SHA-1 exclusive version should succeed.
+assert.throws(function() {
+ test.createUser(
+ {user: 'user2', pwd: 'pass', roles: jsTest.basicUserRoles, passwordDisgestor: 'client'});
+});
+
+test.createUser({
+ user: 'sha1user2',
+ pwd: 'pass',
+ roles: jsTest.basicUserRoles,
+ mechanisms: ['SCRAM-SHA-1'],
+ passwordDigestor: 'client'
+});
+checkUser('sha1user2', 'pass', true, false);
+
+assert.throws(function() {
test.createUser({
- user: 'sha1user2',
+ user: 'sha256user2',
pwd: 'pass',
roles: jsTest.basicUserRoles,
- mechanisms: ['SCRAM-SHA-1'],
+ mechanisms: ['SCRAM-SHA-256'],
passwordDigestor: 'client'
});
- checkUser('sha1user2', 'pass', true, false);
-
- assert.throws(function() {
- test.createUser({
- user: 'sha256user2',
- pwd: 'pass',
- roles: jsTest.basicUserRoles,
- mechanisms: ['SCRAM-SHA-256'],
- passwordDigestor: 'client'
- });
- });
-
- // Update original 1/256 user to just sha-1.
- test.updateUser('user', {pwd: 'pass1', mechanisms: ['SCRAM-SHA-1']});
- checkUser('user', 'pass1', true, false);
-
- // Then flip to 256-only
- test.updateUser('user', {pwd: 'pass256', mechanisms: ['SCRAM-SHA-256']});
- checkUser('user', 'pass256', false, true);
-
- // And back to (default) all.
- test.updateUser('user', {pwd: 'passAll'});
- checkUser('user', 'passAll', true, true);
-
- // Trim out mechanisms without changing password.
- test.updateUser('user', {mechanisms: ['SCRAM-SHA-256']});
- checkUser('user', 'passAll', false, true);
-
- // Fail when mechanisms is not a subset of the current user.
- assert.throws(function() {
- test.updateUser('user', {mechanisms: ['SCRAM-SHA-1']});
- });
-
- // Fail when passing an empty mechanisms field.
- assert.throws(function() {
- test.updateUser('user', {pwd: 'passEmpty', mechanisms: []});
- });
-
- // Succeed if we're using SHA-1 only.
- test.createUser(
- {user: "\u2168", pwd: 'pass', roles: jsTest.basicUserRoles, mechanisms: ['SCRAM-SHA-1']});
- checkUser("\u2168", 'pass', true, false);
-
- // Demonstrate that usersInfo returns all users with mechanisms lists
- const allUsersInfo = assert.commandWorked(test.runCommand({usersInfo: 1}));
- allUsersInfo.users.forEach(function(userObj) {
- assert(Array.isArray(userObj.mechanisms));
- });
-
- // Demonstrate that usersInfo can return all users with credentials
- const allUsersInfoWithCredentials =
- assert.commandWorked(test.runCommand({usersInfo: 1, showCredentials: true}));
- allUsersInfoWithCredentials.users.forEach(function(userObj) {
- assert(userObj.credentials !== undefined);
- assert(!Array.isArray(userObj.credentials));
- assert(userObj.mechanisms !== undefined);
- assert(Array.isArray(userObj.mechanisms));
- });
-
- // Demonstrate that usersInfo can find SCRAM-SHA-1 users
- const allSCRAMSHA1UsersInfo =
- assert.commandWorked(test.runCommand({usersInfo: 1, filter: {mechanisms: "SCRAM-SHA-1"}}));
- let foundUsers = [];
- allSCRAMSHA1UsersInfo.users.forEach(function(userObj) {
- foundUsers.push(userObj.user);
- });
- assert.eq(["sha1user", "sha1user2", "\u2168"], foundUsers);
-
- // Demonstrate that usersInfo can find SCRAM-SHA-256 users
- const allSCRAMSHA256UsersInfo = assert.commandWorked(
- test.runCommand({usersInfo: 1, filter: {mechanisms: "SCRAM-SHA-256"}}));
- foundUsers = [];
- allSCRAMSHA256UsersInfo.users.forEach(function(userObj) {
- foundUsers.push(userObj.user);
- });
- assert.eq(["sha256user", "user"], foundUsers);
-
- MongoRunner.stopMongod(mongod);
-
- // Ensure mechanisms can be enabled and disabled.
- mongod = MongoRunner.runMongod({
- auth: "",
- setParameter: "authenticationMechanisms=SCRAM-SHA-1",
- restart: mongod,
- noCleanData: true
- });
- assert(mongod.getDB("test").auth("sha1user", "pass"));
- assert(!mongod.getDB("test").auth("sha256user", "pass"));
- MongoRunner.stopMongod(mongod);
- mongod = MongoRunner.runMongod({
- auth: "",
- setParameter: "authenticationMechanisms=SCRAM-SHA-256",
- restart: mongod,
- noCleanData: true
- });
- assert(!mongod.getDB("test").auth("sha1user", "pass"));
- assert(mongod.getDB("test").auth("sha256user", "pass"));
- MongoRunner.stopMongod(mongod);
-
+});
+
+// Update original 1/256 user to just sha-1.
+test.updateUser('user', {pwd: 'pass1', mechanisms: ['SCRAM-SHA-1']});
+checkUser('user', 'pass1', true, false);
+
+// Then flip to 256-only
+test.updateUser('user', {pwd: 'pass256', mechanisms: ['SCRAM-SHA-256']});
+checkUser('user', 'pass256', false, true);
+
+// And back to (default) all.
+test.updateUser('user', {pwd: 'passAll'});
+checkUser('user', 'passAll', true, true);
+
+// Trim out mechanisms without changing password.
+test.updateUser('user', {mechanisms: ['SCRAM-SHA-256']});
+checkUser('user', 'passAll', false, true);
+
+// Fail when mechanisms is not a subset of the current user.
+assert.throws(function() {
+ test.updateUser('user', {mechanisms: ['SCRAM-SHA-1']});
+});
+
+// Fail when passing an empty mechanisms field.
+assert.throws(function() {
+ test.updateUser('user', {pwd: 'passEmpty', mechanisms: []});
+});
+
+// Succeed if we're using SHA-1 only.
+test.createUser(
+ {user: "\u2168", pwd: 'pass', roles: jsTest.basicUserRoles, mechanisms: ['SCRAM-SHA-1']});
+checkUser("\u2168", 'pass', true, false);
+
+// Demonstrate that usersInfo returns all users with mechanisms lists
+const allUsersInfo = assert.commandWorked(test.runCommand({usersInfo: 1}));
+allUsersInfo.users.forEach(function(userObj) {
+ assert(Array.isArray(userObj.mechanisms));
+});
+
+// Demonstrate that usersInfo can return all users with credentials
+const allUsersInfoWithCredentials =
+ assert.commandWorked(test.runCommand({usersInfo: 1, showCredentials: true}));
+allUsersInfoWithCredentials.users.forEach(function(userObj) {
+ assert(userObj.credentials !== undefined);
+ assert(!Array.isArray(userObj.credentials));
+ assert(userObj.mechanisms !== undefined);
+ assert(Array.isArray(userObj.mechanisms));
+});
+
+// Demonstrate that usersInfo can find SCRAM-SHA-1 users
+const allSCRAMSHA1UsersInfo =
+ assert.commandWorked(test.runCommand({usersInfo: 1, filter: {mechanisms: "SCRAM-SHA-1"}}));
+let foundUsers = [];
+allSCRAMSHA1UsersInfo.users.forEach(function(userObj) {
+ foundUsers.push(userObj.user);
+});
+assert.eq(["sha1user", "sha1user2", "\u2168"], foundUsers);
+
+// Demonstrate that usersInfo can find SCRAM-SHA-256 users
+const allSCRAMSHA256UsersInfo =
+ assert.commandWorked(test.runCommand({usersInfo: 1, filter: {mechanisms: "SCRAM-SHA-256"}}));
+foundUsers = [];
+allSCRAMSHA256UsersInfo.users.forEach(function(userObj) {
+ foundUsers.push(userObj.user);
+});
+assert.eq(["sha256user", "user"], foundUsers);
+
+MongoRunner.stopMongod(mongod);
+
+// Ensure mechanisms can be enabled and disabled.
+mongod = MongoRunner.runMongod({
+ auth: "",
+ setParameter: "authenticationMechanisms=SCRAM-SHA-1",
+ restart: mongod,
+ noCleanData: true
+});
+assert(mongod.getDB("test").auth("sha1user", "pass"));
+assert(!mongod.getDB("test").auth("sha256user", "pass"));
+MongoRunner.stopMongod(mongod);
+mongod = MongoRunner.runMongod({
+ auth: "",
+ setParameter: "authenticationMechanisms=SCRAM-SHA-256",
+ restart: mongod,
+ noCleanData: true
+});
+assert(!mongod.getDB("test").auth("sha1user", "pass"));
+assert(mongod.getDB("test").auth("sha256user", "pass"));
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/auth/user_management_commands_sharded_wc_1.js b/jstests/auth/user_management_commands_sharded_wc_1.js
index f5dd2222636..675efba731a 100644
--- a/jstests/auth/user_management_commands_sharded_wc_1.js
+++ b/jstests/auth/user_management_commands_sharded_wc_1.js
@@ -3,13 +3,13 @@
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/auth/user_management_commands_lib.js');
+load('jstests/auth/user_management_commands_lib.js');
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest(
- {shards: 2, config: 3, keyFile: 'jstests/libs/key1', other: {shardAsReplicaSet: false}});
- runAllUserManagementCommandsTests(st.s, {w: 1});
- st.stop();
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest(
+ {shards: 2, config: 3, keyFile: 'jstests/libs/key1', other: {shardAsReplicaSet: false}});
+runAllUserManagementCommandsTests(st.s, {w: 1});
+st.stop();
})();
diff --git a/jstests/auth/user_management_commands_sharded_wc_majority.js b/jstests/auth/user_management_commands_sharded_wc_majority.js
index e06f4b578c0..d9e0a75d543 100644
--- a/jstests/auth/user_management_commands_sharded_wc_majority.js
+++ b/jstests/auth/user_management_commands_sharded_wc_majority.js
@@ -1,13 +1,13 @@
// @tags: [requires_sharding]
(function() {
- 'use strict';
+'use strict';
- load('jstests/auth/user_management_commands_lib.js');
+load('jstests/auth/user_management_commands_lib.js');
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest(
- {shards: 2, config: 3, keyFile: 'jstests/libs/key1', other: {shardAsReplicaSet: false}});
- runAllUserManagementCommandsTests(st.s, {w: 'majority', wtimeout: 60 * 1000});
- st.stop();
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest(
+ {shards: 2, config: 3, keyFile: 'jstests/libs/key1', other: {shardAsReplicaSet: false}});
+runAllUserManagementCommandsTests(st.s, {w: 'majority', wtimeout: 60 * 1000});
+st.stop();
})();
diff --git a/jstests/auth/user_management_commands_standalone.js b/jstests/auth/user_management_commands_standalone.js
index 192f5968aa2..4f55c3dda81 100644
--- a/jstests/auth/user_management_commands_standalone.js
+++ b/jstests/auth/user_management_commands_standalone.js
@@ -1,9 +1,9 @@
(function() {
- 'use strict';
+'use strict';
- load('jstests/auth/user_management_commands_lib.js');
+load('jstests/auth/user_management_commands_lib.js');
- var conn = MongoRunner.runMongod({auth: '', useHostname: false});
- runAllUserManagementCommandsTests(conn);
- MongoRunner.stopMongod(conn);
+var conn = MongoRunner.runMongod({auth: '', useHostname: false});
+runAllUserManagementCommandsTests(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/auth/user_special_chars.js b/jstests/auth/user_special_chars.js
index fc968bc30b7..85ef75b48af 100644
--- a/jstests/auth/user_special_chars.js
+++ b/jstests/auth/user_special_chars.js
@@ -2,66 +2,65 @@
// Test creating and authenticating users with special characters.
(function() {
- var conn = MongoRunner.runMongod({auth: ''});
+var conn = MongoRunner.runMongod({auth: ''});
- var adminDB = conn.getDB('admin');
- adminDB.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+var adminDB = conn.getDB('admin');
+adminDB.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
- var testUserSpecialCharacters = function() {
+var testUserSpecialCharacters = function() {
+ // Create a user with special characters, make sure it can auth.
+ assert(adminDB.auth('admin', 'pass'));
+ adminDB.createUser(
+ {user: '~`!@#$%^&*()-_+={}[]||;:",.//><', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(adminDB.logout());
- // Create a user with special characters, make sure it can auth.
- assert(adminDB.auth('admin', 'pass'));
- adminDB.createUser(
- {user: '~`!@#$%^&*()-_+={}[]||;:",.//><', pwd: 'pass', roles: jsTest.adminUserRoles});
- assert(adminDB.logout());
+ assert(adminDB.auth({user: '~`!@#$%^&*()-_+={}[]||;:",.//><', pwd: 'pass'}));
+ assert(adminDB.logout());
+};
+testUserSpecialCharacters();
- assert(adminDB.auth({user: '~`!@#$%^&*()-_+={}[]||;:",.//><', pwd: 'pass'}));
- assert(adminDB.logout());
- };
- testUserSpecialCharacters();
+var testUserAndDatabaseAtSymbolConflation = function() {
+ // Create a pair of users and databases such that their string representations are
+ // identical.
+ assert(adminDB.auth('admin', 'pass'));
- var testUserAndDatabaseAtSymbolConflation = function() {
- // Create a pair of users and databases such that their string representations are
- // identical.
- assert(adminDB.auth('admin', 'pass'));
+ var bcDB = conn.getDB('b@c');
+ bcDB.createUser({user: 'a', pwd: 'pass2', roles: [{role: 'readWrite', db: 'b@c'}]});
- var bcDB = conn.getDB('b@c');
- bcDB.createUser({user: 'a', pwd: 'pass2', roles: [{role: 'readWrite', db: 'b@c'}]});
+ var cDB = conn.getDB('c');
+ cDB.createUser({user: 'a@b', pwd: 'pass1', roles: [{role: 'readWrite', db: 'c'}]});
- var cDB = conn.getDB('c');
- cDB.createUser({user: 'a@b', pwd: 'pass1', roles: [{role: 'readWrite', db: 'c'}]});
+ assert(adminDB.logout());
- assert(adminDB.logout());
+ // Ensure they cannot authenticate to the wrong database.
+ assert(!bcDB.auth('a@b', 'pass1'));
+ assert(!bcDB.auth('a@b', 'pass2'));
+ assert(!cDB.auth('a', 'pass1'));
+ assert(!cDB.auth('a', 'pass2'));
- // Ensure they cannot authenticate to the wrong database.
- assert(!bcDB.auth('a@b', 'pass1'));
- assert(!bcDB.auth('a@b', 'pass2'));
- assert(!cDB.auth('a', 'pass1'));
- assert(!cDB.auth('a', 'pass2'));
+ // Ensure that they can both successfully authenticate to their correct database.
+ assert(cDB.auth('a@b', 'pass1'));
+ assert.writeOK(cDB.col.insert({data: 1}));
+ assert.writeError(bcDB.col.insert({data: 2}));
+ assert(cDB.logout());
- // Ensure that they can both successfully authenticate to their correct database.
- assert(cDB.auth('a@b', 'pass1'));
- assert.writeOK(cDB.col.insert({data: 1}));
- assert.writeError(bcDB.col.insert({data: 2}));
- assert(cDB.logout());
+ assert(bcDB.auth('a', 'pass2'));
+ assert.writeOK(bcDB.col.insert({data: 3}));
+ assert.writeError(cDB.col.insert({data: 4}));
+ assert(bcDB.logout());
- assert(bcDB.auth('a', 'pass2'));
- assert.writeOK(bcDB.col.insert({data: 3}));
- assert.writeError(cDB.col.insert({data: 4}));
- assert(bcDB.logout());
+ // Ensure that the user cache permits both users to log in at the same time
+ assert(cDB.auth('a@b', 'pass1'));
+ assert(bcDB.auth('a', 'pass2'));
+ assert(cDB.logout());
+ assert(bcDB.logout());
- // Ensure that the user cache permits both users to log in at the same time
- assert(cDB.auth('a@b', 'pass1'));
- assert(bcDB.auth('a', 'pass2'));
- assert(cDB.logout());
- assert(bcDB.logout());
+ assert(bcDB.auth('a', 'pass2'));
+ assert(cDB.auth('a@b', 'pass1'));
+ assert(cDB.logout());
+ assert(bcDB.logout());
+};
+testUserAndDatabaseAtSymbolConflation();
- assert(bcDB.auth('a', 'pass2'));
- assert(cDB.auth('a@b', 'pass1'));
- assert(cDB.logout());
- assert(bcDB.logout());
- };
- testUserAndDatabaseAtSymbolConflation();
-
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/auth/usersInfo.js b/jstests/auth/usersInfo.js
index fdd4a1b0a5f..81b9b4ee870 100644
--- a/jstests/auth/usersInfo.js
+++ b/jstests/auth/usersInfo.js
@@ -1,47 +1,46 @@
// Test behavior and edge cases in usersInfo
(function() {
- 'use strict';
-
- function runTest(conn) {
- let db = conn.getDB("test");
- let emptyDB = conn.getDB("test2");
- let otherDB = conn.getDB("other");
-
- const userCount = 200;
- for (let i = 0; i < userCount; ++i) {
- assert.commandWorked(db.runCommand({createUser: "user" + i, pwd: "pwd", roles: []}));
- }
- assert.commandWorked(otherDB.runCommand({createUser: "otherUser", pwd: "pwd", roles: []}));
-
- // Check info for all users on the "test" database.
- const allTestInfo = assert.commandWorked(db.runCommand({usersInfo: 1}));
- assert.eq(userCount, allTestInfo.users.length);
-
- // Check we can find a particular user on the "test" database.
- assert.eq(1, assert.commandWorked(db.runCommand({usersInfo: "user12"})).users.length);
- assert.eq(1,
- assert.commandWorked(db.runCommand({usersInfo: {user: "user12", db: "test"}}))
- .users.length);
- assert.eq(0,
- assert.commandWorked(db.runCommand({usersInfo: {user: "user12", db: "test2"}}))
- .users.length);
- assert.eq(0, assert.commandWorked(emptyDB.runCommand({usersInfo: "user12"})).users.length);
-
- // No users are found on a database without users.
- assert.eq(0, assert.commandWorked(emptyDB.runCommand({usersInfo: 1})).users.length);
-
- // Check that we can find records for all users on all databases.
- const allInfo = assert.commandWorked(db.runCommand({usersInfo: {forAllDBs: true}}));
- assert.eq(userCount + 1, allInfo.users.length);
- }
+'use strict';
- const m = MongoRunner.runMongod();
- runTest(m);
- MongoRunner.stopMongod(m);
+function runTest(conn) {
+ let db = conn.getDB("test");
+ let emptyDB = conn.getDB("test2");
+ let otherDB = conn.getDB("other");
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const st =
- new ShardingTest({shards: 1, mongos: 1, config: 1, other: {shardAsReplicaSet: false}});
- runTest(st.s0);
- st.stop();
+ const userCount = 200;
+ for (let i = 0; i < userCount; ++i) {
+ assert.commandWorked(db.runCommand({createUser: "user" + i, pwd: "pwd", roles: []}));
+ }
+ assert.commandWorked(otherDB.runCommand({createUser: "otherUser", pwd: "pwd", roles: []}));
+
+ // Check info for all users on the "test" database.
+ const allTestInfo = assert.commandWorked(db.runCommand({usersInfo: 1}));
+ assert.eq(userCount, allTestInfo.users.length);
+
+ // Check we can find a particular user on the "test" database.
+ assert.eq(1, assert.commandWorked(db.runCommand({usersInfo: "user12"})).users.length);
+ assert.eq(1,
+ assert.commandWorked(db.runCommand({usersInfo: {user: "user12", db: "test"}}))
+ .users.length);
+ assert.eq(0,
+ assert.commandWorked(db.runCommand({usersInfo: {user: "user12", db: "test2"}}))
+ .users.length);
+ assert.eq(0, assert.commandWorked(emptyDB.runCommand({usersInfo: "user12"})).users.length);
+
+ // No users are found on a database without users.
+ assert.eq(0, assert.commandWorked(emptyDB.runCommand({usersInfo: 1})).users.length);
+
+ // Check that we can find records for all users on all databases.
+ const allInfo = assert.commandWorked(db.runCommand({usersInfo: {forAllDBs: true}}));
+ assert.eq(userCount + 1, allInfo.users.length);
+}
+
+const m = MongoRunner.runMongod();
+runTest(m);
+MongoRunner.stopMongod(m);
+
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+const st = new ShardingTest({shards: 1, mongos: 1, config: 1, other: {shardAsReplicaSet: false}});
+runTest(st.s0);
+st.stop();
}());
diff --git a/jstests/auth/validate_auth_schema_on_startup.js b/jstests/auth/validate_auth_schema_on_startup.js
index 764308a3644..e4c6c50fdcc 100644
--- a/jstests/auth/validate_auth_schema_on_startup.js
+++ b/jstests/auth/validate_auth_schema_on_startup.js
@@ -6,47 +6,46 @@
*/
(function() {
- const dbpath = MongoRunner.dataPath + "validateAuthSchemaOnStartup/";
- resetDbpath(dbpath);
- const dbName = "validateAuthSchemaOnStartup";
- const authSchemaColl = "system.version";
+const dbpath = MongoRunner.dataPath + "validateAuthSchemaOnStartup/";
+resetDbpath(dbpath);
+const dbName = "validateAuthSchemaOnStartup";
+const authSchemaColl = "system.version";
- let mongod = MongoRunner.runMongod({dbpath: dbpath, auth: ""});
- let adminDB = mongod.getDB('admin');
+let mongod = MongoRunner.runMongod({dbpath: dbpath, auth: ""});
+let adminDB = mongod.getDB('admin');
- // Create a user.
- adminDB.createUser(
- {user: "root", pwd: "root", roles: [{role: 'userAdminAnyDatabase', db: 'admin'}]});
- assert(adminDB.auth("root", "root"));
+// Create a user.
+adminDB.createUser(
+ {user: "root", pwd: "root", roles: [{role: 'userAdminAnyDatabase', db: 'admin'}]});
+assert(adminDB.auth("root", "root"));
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
- // Start without auth to corrupt the authSchema document.
- mongod = MongoRunner.runMongod({dbpath: dbpath, noCleanData: true});
- adminDB = mongod.getDB('admin');
+// Start without auth to corrupt the authSchema document.
+mongod = MongoRunner.runMongod({dbpath: dbpath, noCleanData: true});
+adminDB = mongod.getDB('admin');
- let currentVersion = adminDB[authSchemaColl].findOne({_id: 'authSchema'}).currentVersion;
+let currentVersion = adminDB[authSchemaColl].findOne({_id: 'authSchema'}).currentVersion;
- // Invalidate the authSchema document.
- assert.commandWorked(
- adminDB[authSchemaColl].update({_id: 'authSchema'}, {currentVersion: 'asdf'}));
- MongoRunner.stopMongod(mongod);
+// Invalidate the authSchema document.
+assert.commandWorked(adminDB[authSchemaColl].update({_id: 'authSchema'}, {currentVersion: 'asdf'}));
+MongoRunner.stopMongod(mongod);
- // Confirm start up fails, even without --auth.
- assert.eq(null, MongoRunner.runMongod({dbpath: dbpath, noCleanData: true}));
+// Confirm start up fails, even without --auth.
+assert.eq(null, MongoRunner.runMongod({dbpath: dbpath, noCleanData: true}));
- // Confirm startup works with the flag to disable validation so the document can be repaired.
- mongod = MongoRunner.runMongod(
- {dbpath: dbpath, noCleanData: true, setParameter: "startupAuthSchemaValidation=false"});
- adminDB = mongod.getDB('admin');
- assert.commandWorked(
- adminDB[authSchemaColl].update({_id: 'authSchema'}, {currentVersion: currentVersion}));
- MongoRunner.stopMongod(mongod);
+// Confirm startup works with the flag to disable validation so the document can be repaired.
+mongod = MongoRunner.runMongod(
+ {dbpath: dbpath, noCleanData: true, setParameter: "startupAuthSchemaValidation=false"});
+adminDB = mongod.getDB('admin');
+assert.commandWorked(
+ adminDB[authSchemaColl].update({_id: 'authSchema'}, {currentVersion: currentVersion}));
+MongoRunner.stopMongod(mongod);
- // Confirm everything is normal again.
- mongod = MongoRunner.runMongod({dbpath: dbpath, noCleanData: true, auth: ""});
- adminDB = mongod.getDB('admin');
- assert(adminDB.auth("root", "root"));
+// Confirm everything is normal again.
+mongod = MongoRunner.runMongod({dbpath: dbpath, noCleanData: true, auth: ""});
+adminDB = mongod.getDB('admin');
+assert(adminDB.auth("root", "root"));
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/auth/views_authz.js b/jstests/auth/views_authz.js
index 080a4b2bfcd..6223312249c 100644
--- a/jstests/auth/views_authz.js
+++ b/jstests/auth/views_authz.js
@@ -4,155 +4,142 @@
* @tags: [requires_sharding]
*/
(function() {
- "use strict";
+"use strict";
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
- function runTest(conn) {
- // Create the admin user.
- let adminDB = conn.getDB("admin");
- assert.commandWorked(
- adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]}));
- assert.eq(1, adminDB.auth("admin", "admin"));
+function runTest(conn) {
+ // Create the admin user.
+ let adminDB = conn.getDB("admin");
+ assert.commandWorked(adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]}));
+ assert.eq(1, adminDB.auth("admin", "admin"));
- const viewsDBName = "views_authz";
- let viewsDB = adminDB.getSiblingDB(viewsDBName);
- viewsDB.dropAllUsers();
- viewsDB.logout();
+ const viewsDBName = "views_authz";
+ let viewsDB = adminDB.getSiblingDB(viewsDBName);
+ viewsDB.dropAllUsers();
+ viewsDB.logout();
- // Create a user who can read, create and modify a view 'view' and a read a namespace
- // 'permitted' but does not have access to 'forbidden'.
- assert.commandWorked(viewsDB.runCommand({
- createRole: "readWriteView",
- privileges: [
- {
- resource: {db: viewsDBName, collection: "view"},
- actions: ["find", "createCollection", "collMod"]
- },
- {resource: {db: viewsDBName, collection: "view2"}, actions: ["find"]},
- {resource: {db: viewsDBName, collection: "permitted"}, actions: ["find"]}
- ],
- roles: []
- }));
- assert.commandWorked(
- viewsDB.runCommand({createUser: "viewUser", pwd: "pwd", roles: ["readWriteView"]}));
+ // Create a user who can read, create and modify a view 'view' and a read a namespace
+ // 'permitted' but does not have access to 'forbidden'.
+ assert.commandWorked(viewsDB.runCommand({
+ createRole: "readWriteView",
+ privileges: [
+ {
+ resource: {db: viewsDBName, collection: "view"},
+ actions: ["find", "createCollection", "collMod"]
+ },
+ {resource: {db: viewsDBName, collection: "view2"}, actions: ["find"]},
+ {resource: {db: viewsDBName, collection: "permitted"}, actions: ["find"]}
+ ],
+ roles: []
+ }));
+ assert.commandWorked(
+ viewsDB.runCommand({createUser: "viewUser", pwd: "pwd", roles: ["readWriteView"]}));
- adminDB.logout();
- assert.eq(1, viewsDB.auth("viewUser", "pwd"));
+ adminDB.logout();
+ assert.eq(1, viewsDB.auth("viewUser", "pwd"));
- const lookupStage = {
- $lookup: {from: "forbidden", localField: "x", foreignField: "x", as: "y"}
- };
- const graphLookupStage = {
- $graphLookup: {
- from: "forbidden",
- startWith: [],
- connectFromField: "x",
- connectToField: "x",
- as: "y"
- }
- };
+ const lookupStage = {$lookup: {from: "forbidden", localField: "x", foreignField: "x", as: "y"}};
+ const graphLookupStage = {
+ $graphLookup:
+ {from: "forbidden", startWith: [], connectFromField: "x", connectToField: "x", as: "y"}
+ };
- // You cannot create a view if you have both the 'createCollection' and 'find' actions on
- // that view but not the 'find' action on all of the dependent namespaces.
- assert.commandFailedWithCode(viewsDB.createView("view", "forbidden", []),
- ErrorCodes.Unauthorized,
- "created a readable view on an unreadable collection");
- assert.commandFailedWithCode(
- viewsDB.createView("view", "permitted", [lookupStage]),
- ErrorCodes.Unauthorized,
- "created a readable view on an unreadable collection via $lookup");
- assert.commandFailedWithCode(
- viewsDB.createView("view", "permitted", [graphLookupStage]),
- ErrorCodes.Unauthorized,
- "created a readable view on an unreadable collection via $graphLookup");
- assert.commandFailedWithCode(
- viewsDB.createView("view", "permitted", [{$facet: {a: [lookupStage]}}]),
- ErrorCodes.Unauthorized,
- "created a readable view on an unreadable collection via $lookup in a $facet");
- assert.commandFailedWithCode(
- viewsDB.createView("view", "permitted", [{$facet: {b: [graphLookupStage]}}]),
- ErrorCodes.Unauthorized,
- "created a readable view on an unreadable collection via $graphLookup in a $facet");
+ // You cannot create a view if you have both the 'createCollection' and 'find' actions on
+ // that view but not the 'find' action on all of the dependent namespaces.
+ assert.commandFailedWithCode(viewsDB.createView("view", "forbidden", []),
+ ErrorCodes.Unauthorized,
+ "created a readable view on an unreadable collection");
+ assert.commandFailedWithCode(viewsDB.createView("view", "permitted", [lookupStage]),
+ ErrorCodes.Unauthorized,
+ "created a readable view on an unreadable collection via $lookup");
+ assert.commandFailedWithCode(
+ viewsDB.createView("view", "permitted", [graphLookupStage]),
+ ErrorCodes.Unauthorized,
+ "created a readable view on an unreadable collection via $graphLookup");
+ assert.commandFailedWithCode(
+ viewsDB.createView("view", "permitted", [{$facet: {a: [lookupStage]}}]),
+ ErrorCodes.Unauthorized,
+ "created a readable view on an unreadable collection via $lookup in a $facet");
+ assert.commandFailedWithCode(
+ viewsDB.createView("view", "permitted", [{$facet: {b: [graphLookupStage]}}]),
+ ErrorCodes.Unauthorized,
+ "created a readable view on an unreadable collection via $graphLookup in a $facet");
- assert.commandWorked(viewsDB.createView("view", "permitted", [{$match: {x: 1}}]));
+ assert.commandWorked(viewsDB.createView("view", "permitted", [{$match: {x: 1}}]));
- // You cannot modify a view if you have both the 'collMod' and 'find' actions on that view
- // but not the 'find' action on all of the dependent namespaces.
- assert.commandFailedWithCode(
- viewsDB.runCommand({collMod: "view", viewOn: "forbidden", pipeline: [{$match: {}}]}),
- ErrorCodes.Unauthorized,
- "modified a view to read an unreadable collection");
- assert.commandFailedWithCode(
- viewsDB.runCommand({collMod: "view", viewOn: "permitted", pipeline: [lookupStage]}),
- ErrorCodes.Unauthorized,
- "modified a view to read an unreadable collection via $lookup");
- assert.commandFailedWithCode(
- viewsDB.runCommand(
- {collMod: "view", viewOn: "permitted", pipeline: [graphLookupStage]}),
- ErrorCodes.Unauthorized,
- "modified a view to read an unreadable collection via $graphLookup");
- assert.commandFailedWithCode(
- viewsDB.runCommand(
- {collMod: "view", viewOn: "permitted", pipeline: [{$facet: {a: [lookupStage]}}]}),
- ErrorCodes.Unauthorized,
- "modified a view to read an unreadable collection via $lookup in a $facet");
- assert.commandFailedWithCode(
- viewsDB.runCommand({
- collMod: "view",
- viewOn: "permitted",
- pipeline: [{$facet: {b: [graphLookupStage]}}]
- }),
- ErrorCodes.Unauthorized,
- "modified a view to read an unreadable collection via $graphLookup in a $facet");
+ // You cannot modify a view if you have both the 'collMod' and 'find' actions on that view
+ // but not the 'find' action on all of the dependent namespaces.
+ assert.commandFailedWithCode(
+ viewsDB.runCommand({collMod: "view", viewOn: "forbidden", pipeline: [{$match: {}}]}),
+ ErrorCodes.Unauthorized,
+ "modified a view to read an unreadable collection");
+ assert.commandFailedWithCode(
+ viewsDB.runCommand({collMod: "view", viewOn: "permitted", pipeline: [lookupStage]}),
+ ErrorCodes.Unauthorized,
+ "modified a view to read an unreadable collection via $lookup");
+ assert.commandFailedWithCode(
+ viewsDB.runCommand({collMod: "view", viewOn: "permitted", pipeline: [graphLookupStage]}),
+ ErrorCodes.Unauthorized,
+ "modified a view to read an unreadable collection via $graphLookup");
+ assert.commandFailedWithCode(
+ viewsDB.runCommand(
+ {collMod: "view", viewOn: "permitted", pipeline: [{$facet: {a: [lookupStage]}}]}),
+ ErrorCodes.Unauthorized,
+ "modified a view to read an unreadable collection via $lookup in a $facet");
+ assert.commandFailedWithCode(
+ viewsDB.runCommand(
+ {collMod: "view", viewOn: "permitted", pipeline: [{$facet: {b: [graphLookupStage]}}]}),
+ ErrorCodes.Unauthorized,
+ "modified a view to read an unreadable collection via $graphLookup in a $facet");
- // When auth is enabled, users must specify both "viewOn" and "pipeline" when running
- // collMod on a view; specifying only one or the other is not allowed. Without both the
- // "viewOn" and "pipeline" specified, authorization checks cannot determine if the users
- // have the necessary privileges.
- assert.commandFailedWithCode(viewsDB.runCommand({collMod: "view", pipeline: []}),
- ErrorCodes.InvalidOptions,
- "modified a view without having to specify 'viewOn'");
- assert.commandFailedWithCode(viewsDB.runCommand({collMod: "view", viewOn: "other"}),
- ErrorCodes.InvalidOptions,
- "modified a view without having to specify 'pipeline'");
+ // When auth is enabled, users must specify both "viewOn" and "pipeline" when running
+ // collMod on a view; specifying only one or the other is not allowed. Without both the
+ // "viewOn" and "pipeline" specified, authorization checks cannot determine if the users
+ // have the necessary privileges.
+ assert.commandFailedWithCode(viewsDB.runCommand({collMod: "view", pipeline: []}),
+ ErrorCodes.InvalidOptions,
+ "modified a view without having to specify 'viewOn'");
+ assert.commandFailedWithCode(viewsDB.runCommand({collMod: "view", viewOn: "other"}),
+ ErrorCodes.InvalidOptions,
+ "modified a view without having to specify 'pipeline'");
- // Create a view on a forbidden collection and populate it.
- assert.eq(1, adminDB.auth("admin", "admin"));
- assert.commandWorked(viewsDB.createView("view2", "forbidden", []));
- for (let i = 0; i < 10; i++) {
- assert.writeOK(viewsDB.forbidden.insert({x: 1}));
- }
- adminDB.logout();
-
- // Performing a find on a readable view returns a cursor that allows us to perform a getMore
- // even if the underlying collection is unreadable.
- assert.commandFailedWithCode(viewsDB.runCommand({find: "forbidden"}),
- ErrorCodes.Unauthorized,
- "successfully performed a find on an unreadable namespace");
- let res = viewsDB.runCommand({find: "view2", batchSize: 1});
- assert.commandWorked(res, "could not perform a find on a readable view");
- assert.eq(res.cursor.ns,
- "views_authz.view2",
- "performing find on a view does not return a cursor on the view namespace");
- assert.commandWorked(viewsDB.runCommand({getMore: res.cursor.id, collection: "view2"}),
- "could not perform getMore on a readable view");
+ // Create a view on a forbidden collection and populate it.
+ assert.eq(1, adminDB.auth("admin", "admin"));
+ assert.commandWorked(viewsDB.createView("view2", "forbidden", []));
+ for (let i = 0; i < 10; i++) {
+ assert.writeOK(viewsDB.forbidden.insert({x: 1}));
}
+ adminDB.logout();
+
+ // Performing a find on a readable view returns a cursor that allows us to perform a getMore
+ // even if the underlying collection is unreadable.
+ assert.commandFailedWithCode(viewsDB.runCommand({find: "forbidden"}),
+ ErrorCodes.Unauthorized,
+ "successfully performed a find on an unreadable namespace");
+ let res = viewsDB.runCommand({find: "view2", batchSize: 1});
+ assert.commandWorked(res, "could not perform a find on a readable view");
+ assert.eq(res.cursor.ns,
+ "views_authz.view2",
+ "performing find on a view does not return a cursor on the view namespace");
+ assert.commandWorked(viewsDB.runCommand({getMore: res.cursor.id, collection: "view2"}),
+ "could not perform getMore on a readable view");
+}
- // Run the test on a standalone.
- let mongod = MongoRunner.runMongod({auth: "", bind_ip: "127.0.0.1"});
- runTest(mongod);
- MongoRunner.stopMongod(mongod);
+// Run the test on a standalone.
+let mongod = MongoRunner.runMongod({auth: "", bind_ip: "127.0.0.1"});
+runTest(mongod);
+MongoRunner.stopMongod(mongod);
- // Run the test on a sharded cluster.
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- let cluster = new ShardingTest({
- shards: 1,
- mongos: 1,
- keyFile: "jstests/libs/key1",
- other: {shardOptions: {auth: ""}, shardAsReplicaSet: false}
- });
- runTest(cluster);
- cluster.stop();
+// Run the test on a sharded cluster.
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+let cluster = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ keyFile: "jstests/libs/key1",
+ other: {shardOptions: {auth: ""}, shardAsReplicaSet: false}
+});
+runTest(cluster);
+cluster.stop();
}());
diff --git a/jstests/change_streams/apply_ops.js b/jstests/change_streams/apply_ops.js
index 80065805c78..fa232f77f1b 100644
--- a/jstests/change_streams/apply_ops.js
+++ b/jstests/change_streams/apply_ops.js
@@ -2,160 +2,162 @@
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-
- const otherCollName = "change_stream_apply_ops_2";
- const coll = assertDropAndRecreateCollection(db, "change_stream_apply_ops");
- assertDropAndRecreateCollection(db, otherCollName);
-
- const otherDbName = "change_stream_apply_ops_db";
- const otherDbCollName = "someColl";
- assertDropAndRecreateCollection(db.getSiblingDB(otherDbName), otherDbCollName);
-
- // Insert a document that gets deleted as part of the transaction.
- const kDeletedDocumentId = 0;
- coll.insert({_id: kDeletedDocumentId, a: "I was here before the transaction"},
- {writeConcern: {w: "majority"}});
-
- let cst = new ChangeStreamTest(db);
- let changeStream = cst.startWatchingChanges({
- pipeline: [{$changeStream: {}}, {$project: {"lsid.uid": 0}}],
- collection: coll,
- doNotModifyInPassthroughs:
- true // A collection drop only invalidates single-collection change streams.
- });
-
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(db.getName());
- const sessionColl = sessionDb[coll.getName()];
-
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- assert.commandWorked(sessionColl.insert({_id: 1, a: 0}));
- assert.commandWorked(sessionColl.insert({_id: 2, a: 0}));
-
- // One insert on a collection that we're not watching. This should be skipped by the
- // single-collection changestream.
- assert.commandWorked(sessionDb[otherCollName].insert({_id: 111, a: "Doc on other collection"}));
-
- // One insert on a collection in a different database. This should be skipped by the single
- // collection and single-db changestreams.
- assert.commandWorked(
- session.getDatabase(otherDbName)[otherDbCollName].insert({_id: 222, a: "Doc on other DB"}));
-
- assert.commandWorked(sessionColl.updateOne({_id: 1}, {$inc: {a: 1}}));
-
- assert.commandWorked(sessionColl.deleteOne({_id: kDeletedDocumentId}));
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Do applyOps on the collection that we care about. This is an "external" applyOps, though
- // (not run as part of a transaction) so its entries should be skipped in the change
- // stream. This checks that applyOps that don't have an 'lsid' and 'txnNumber' field do not
- // get unwound.
- assert.commandWorked(db.runCommand({
- applyOps: [
- {op: "i", ns: coll.getFullName(), o: {_id: 3, a: "SHOULD NOT READ THIS"}},
- ]
- }));
-
- // Drop the collection. This will trigger an "invalidate" event at the end of the stream.
- assert.commandWorked(db.runCommand({drop: coll.getName()}));
-
- // Define the set of changes expected for the single-collection case per the operations above.
- const expectedChanges = [
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1, a: 0},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- },
- {
- documentKey: {_id: 2},
- fullDocument: {_id: 2, a: 0},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- },
- {
- documentKey: {_id: 1},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "update",
- updateDescription: {removedFields: [], updatedFields: {a: 1}},
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- },
- {
- documentKey: {_id: kDeletedDocumentId},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "delete",
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- },
- {
- operationType: "drop",
- ns: {db: db.getName(), coll: coll.getName()},
- },
- ];
-
- // Verify that the stream returns the expected sequence of changes.
- const changes =
- cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges});
- // Single collection change stream should also be invalidated by the drop.
- cst.assertNextChangesEqual({
- cursor: changeStream,
- expectedChanges: [{operationType: "invalidate"}],
- expectInvalidate: true
- });
-
- // Obtain the clusterTime from the first change.
- const startTime = changes[0].clusterTime;
-
- // Add an entry for the insert on db.otherColl into expectedChanges.
- expectedChanges.splice(2, 0, {
- documentKey: {_id: 111},
- fullDocument: {_id: 111, a: "Doc on other collection"},
- ns: {db: db.getName(), coll: otherCollName},
+"use strict";
+
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+
+const otherCollName = "change_stream_apply_ops_2";
+const coll = assertDropAndRecreateCollection(db, "change_stream_apply_ops");
+assertDropAndRecreateCollection(db, otherCollName);
+
+const otherDbName = "change_stream_apply_ops_db";
+const otherDbCollName = "someColl";
+assertDropAndRecreateCollection(db.getSiblingDB(otherDbName), otherDbCollName);
+
+// Insert a document that gets deleted as part of the transaction.
+const kDeletedDocumentId = 0;
+coll.insert({_id: kDeletedDocumentId, a: "I was here before the transaction"},
+ {writeConcern: {w: "majority"}});
+
+let cst = new ChangeStreamTest(db);
+let changeStream = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {}}, {$project: {"lsid.uid": 0}}],
+ collection: coll,
+ doNotModifyInPassthroughs:
+ true // A collection drop only invalidates single-collection change streams.
+});
+
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(db.getName());
+const sessionColl = sessionDb[coll.getName()];
+
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+assert.commandWorked(sessionColl.insert({_id: 1, a: 0}));
+assert.commandWorked(sessionColl.insert({_id: 2, a: 0}));
+
+// One insert on a collection that we're not watching. This should be skipped by the
+// single-collection changestream.
+assert.commandWorked(sessionDb[otherCollName].insert({_id: 111, a: "Doc on other collection"}));
+
+// One insert on a collection in a different database. This should be skipped by the single
+// collection and single-db changestreams.
+assert.commandWorked(
+ session.getDatabase(otherDbName)[otherDbCollName].insert({_id: 222, a: "Doc on other DB"}));
+
+assert.commandWorked(sessionColl.updateOne({_id: 1}, {$inc: {a: 1}}));
+
+assert.commandWorked(sessionColl.deleteOne({_id: kDeletedDocumentId}));
+
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// Do applyOps on the collection that we care about. This is an "external" applyOps, though
+// (not run as part of a transaction) so its entries should be skipped in the change
+// stream. This checks that applyOps that don't have an 'lsid' and 'txnNumber' field do not
+// get unwound.
+assert.commandWorked(db.runCommand({
+ applyOps: [
+ {op: "i", ns: coll.getFullName(), o: {_id: 3, a: "SHOULD NOT READ THIS"}},
+ ]
+}));
+
+// Drop the collection. This will trigger an "invalidate" event at the end of the stream.
+assert.commandWorked(db.runCommand({drop: coll.getName()}));
+
+// Define the set of changes expected for the single-collection case per the operations above.
+const expectedChanges = [
+ {
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1, a: 0},
+ ns: {db: db.getName(), coll: coll.getName()},
operationType: "insert",
lsid: session.getSessionId(),
txnNumber: session.getTxnNumber_forTesting(),
- });
-
- // Verify that a whole-db stream returns the expected sequence of changes, including the insert
- // on the other collection but NOT the changes on the other DB or the manual applyOps.
- changeStream = cst.startWatchingChanges({
- pipeline: [{$changeStream: {startAtOperationTime: startTime}}, {$project: {"lsid.uid": 0}}],
- collection: 1
- });
- cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges});
-
- // Add an entry for the insert on otherDb.otherDbColl into expectedChanges.
- expectedChanges.splice(3, 0, {
- documentKey: {_id: 222},
- fullDocument: {_id: 222, a: "Doc on other DB"},
- ns: {db: otherDbName, coll: otherDbCollName},
+ },
+ {
+ documentKey: {_id: 2},
+ fullDocument: {_id: 2, a: 0},
+ ns: {db: db.getName(), coll: coll.getName()},
operationType: "insert",
lsid: session.getSessionId(),
txnNumber: session.getTxnNumber_forTesting(),
- });
-
- // Verify that a whole-cluster stream returns the expected sequence of changes, including the
- // inserts on the other collection and the other database, but NOT the manual applyOps.
- cst = new ChangeStreamTest(db.getSiblingDB("admin"));
- changeStream = cst.startWatchingChanges({
- pipeline: [
- {$changeStream: {startAtOperationTime: startTime, allChangesForCluster: true}},
- {$project: {"lsid.uid": 0}}
- ],
- collection: 1
- });
+ },
+ {
+ documentKey: {_id: 1},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "update",
+ updateDescription: {removedFields: [], updatedFields: {a: 1}},
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+ },
+ {
+ documentKey: {_id: kDeletedDocumentId},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "delete",
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+ },
+ {
+ operationType: "drop",
+ ns: {db: db.getName(), coll: coll.getName()},
+ },
+];
+
+// Verify that the stream returns the expected sequence of changes.
+const changes =
cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges});
-
- cst.cleanUp();
+// Single collection change stream should also be invalidated by the drop.
+cst.assertNextChangesEqual({
+ cursor: changeStream,
+ expectedChanges: [{operationType: "invalidate"}],
+ expectInvalidate: true
+});
+
+// Obtain the clusterTime from the first change.
+const startTime = changes[0].clusterTime;
+
+// Add an entry for the insert on db.otherColl into expectedChanges.
+expectedChanges.splice(2, 0, {
+ documentKey: {_id: 111},
+ fullDocument: {_id: 111, a: "Doc on other collection"},
+ ns: {db: db.getName(), coll: otherCollName},
+ operationType: "insert",
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+});
+
+// Verify that a whole-db stream returns the expected sequence of changes, including the insert
+// on the other collection but NOT the changes on the other DB or the manual applyOps.
+changeStream = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {startAtOperationTime: startTime}}, {$project: {"lsid.uid": 0}}],
+ collection: 1
+});
+cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges});
+
+// Add an entry for the insert on otherDb.otherDbColl into expectedChanges.
+expectedChanges.splice(3, 0, {
+ documentKey: {_id: 222},
+ fullDocument: {_id: 222, a: "Doc on other DB"},
+ ns: {db: otherDbName, coll: otherDbCollName},
+ operationType: "insert",
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+});
+
+// Verify that a whole-cluster stream returns the expected sequence of changes, including the
+// inserts on the other collection and the other database, but NOT the manual applyOps.
+cst = new ChangeStreamTest(db.getSiblingDB("admin"));
+changeStream = cst.startWatchingChanges({
+ pipeline: [
+ {$changeStream: {startAtOperationTime: startTime, allChangesForCluster: true}},
+ {$project: {"lsid.uid": 0}}
+ ],
+ collection: 1
+});
+cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges});
+
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/apply_ops_resumability.js b/jstests/change_streams/apply_ops_resumability.js
index 7c61ed15792..bf581d40ee1 100644
--- a/jstests/change_streams/apply_ops_resumability.js
+++ b/jstests/change_streams/apply_ops_resumability.js
@@ -2,188 +2,188 @@
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-
- const coll = assertDropAndRecreateCollection(db, "change_stream_apply_ops");
- const otherCollName = "change_stream_apply_ops_2";
- assertDropAndRecreateCollection(db, otherCollName);
-
- const otherDbName = "change_stream_apply_ops_db";
- const otherDbCollName = "someColl";
- assertDropAndRecreateCollection(db.getSiblingDB(otherDbName), otherDbCollName);
-
- let cst = new ChangeStreamTest(db);
- let changeStream = cst.startWatchingChanges(
- {pipeline: [{$changeStream: {}}, {$project: {"lsid.uid": 0}}], collection: coll});
-
- // Do an insert outside of a transaction.
- assert.commandWorked(coll.insert({_id: 0, a: 123}));
-
- // Open a session, and perform two writes within a transaction.
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(db.getName());
- const sessionColl = sessionDb[coll.getName()];
-
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- assert.commandWorked(sessionColl.insert({_id: 1, a: 0}));
- assert.commandWorked(sessionColl.insert({_id: 2, a: 0}));
-
- // One insert on a collection that we're not watching. This should be skipped by the
- // single-collection change stream.
- assert.commandWorked(sessionDb[otherCollName].insert({_id: 111, a: "Doc on other collection"}));
-
- // One insert on a collection in a different database. This should be skipped by the single
- // collection and single-db changestreams.
- assert.commandWorked(
- session.getDatabase(otherDbName)[otherDbCollName].insert({_id: 222, a: "Doc on other DB"}));
-
- assert.commandWorked(sessionColl.updateOne({_id: 1}, {$inc: {a: 1}}));
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Now insert another document, not part of a transaction.
- assert.commandWorked(coll.insert({_id: 3, a: 123}));
-
- // Define the set of changes expected for the single-collection case per the operations above.
- const expectedChanges = [
- {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 123},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- },
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1, a: 0},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- },
- {
- documentKey: {_id: 2},
- fullDocument: {_id: 2, a: 0},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- },
- {
- documentKey: {_id: 1},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "update",
- updateDescription: {removedFields: [], updatedFields: {a: 1}},
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- },
- {
- documentKey: {_id: 3},
- fullDocument: {_id: 3, a: 123},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- },
- ];
-
- //
- // Test behavior of single-collection change streams with apply ops.
- //
-
- // Verify that the stream returns the expected sequence of changes.
- const changes =
- cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges});
-
- // Record the first (non-transaction) change and the first in-transaction change.
- const nonTxnChange = changes[0], firstTxnChange = changes[1], secondTxnChange = changes[2];
-
- // Resume after the first non-transaction change. Be sure we see the documents from the
- // transaction again.
- changeStream = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: nonTxnChange._id}}, {$project: {"lsid.uid": 0}}],
- collection: coll
- });
- cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(1)});
-
- // Resume after the first transaction change. Be sure we see the second change again.
- changeStream = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: firstTxnChange._id}}, {$project: {"lsid.uid": 0}}],
- collection: coll
- });
- cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(2)});
-
- // Try starting another change stream from the _last_ change caused by the transaction. Verify
- // that we can see the insert performed after the transaction was committed.
- let otherCursor = cst.startWatchingChanges({
- pipeline:
- [{$changeStream: {resumeAfter: secondTxnChange._id}}, {$project: {"lsid.uid": 0}}],
- collection: coll,
- doNotModifyInPassthroughs: true // A collection drop only invalidates single-collection
- // change streams.
- });
- cst.assertNextChangesEqual({cursor: otherCursor, expectedChanges: expectedChanges.slice(3)});
-
- // Drop the collection. This will trigger a "drop" followed by an "invalidate" for the single
- // collection change stream.
- assert.commandWorked(db.runCommand({drop: coll.getName()}));
- let change = cst.getOneChange(otherCursor);
- assert.eq(change.operationType, "drop");
- assert.eq(change.ns, {db: db.getName(), coll: coll.getName()});
- change = cst.getOneChange(otherCursor, true);
- assert.eq(change.operationType, "invalidate");
-
- //
- // Test behavior of whole-db change streams with apply ops.
- //
-
- // For a whole-db or whole-cluster change stream, the collection drop should return a single
- // "drop" entry and not invalidate the stream.
- expectedChanges.push({operationType: "drop", ns: {db: db.getName(), coll: coll.getName()}});
-
- // Add an entry for the insert on db.otherColl into expectedChanges.
- expectedChanges.splice(3, 0, {
- documentKey: {_id: 111},
- fullDocument: {_id: 111, a: "Doc on other collection"},
- ns: {db: db.getName(), coll: otherCollName},
+"use strict";
+
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+
+const coll = assertDropAndRecreateCollection(db, "change_stream_apply_ops");
+const otherCollName = "change_stream_apply_ops_2";
+assertDropAndRecreateCollection(db, otherCollName);
+
+const otherDbName = "change_stream_apply_ops_db";
+const otherDbCollName = "someColl";
+assertDropAndRecreateCollection(db.getSiblingDB(otherDbName), otherDbCollName);
+
+let cst = new ChangeStreamTest(db);
+let changeStream = cst.startWatchingChanges(
+ {pipeline: [{$changeStream: {}}, {$project: {"lsid.uid": 0}}], collection: coll});
+
+// Do an insert outside of a transaction.
+assert.commandWorked(coll.insert({_id: 0, a: 123}));
+
+// Open a session, and perform two writes within a transaction.
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(db.getName());
+const sessionColl = sessionDb[coll.getName()];
+
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+assert.commandWorked(sessionColl.insert({_id: 1, a: 0}));
+assert.commandWorked(sessionColl.insert({_id: 2, a: 0}));
+
+// One insert on a collection that we're not watching. This should be skipped by the
+// single-collection change stream.
+assert.commandWorked(sessionDb[otherCollName].insert({_id: 111, a: "Doc on other collection"}));
+
+// One insert on a collection in a different database. This should be skipped by the single
+// collection and single-db changestreams.
+assert.commandWorked(
+ session.getDatabase(otherDbName)[otherDbCollName].insert({_id: 222, a: "Doc on other DB"}));
+
+assert.commandWorked(sessionColl.updateOne({_id: 1}, {$inc: {a: 1}}));
+
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// Now insert another document, not part of a transaction.
+assert.commandWorked(coll.insert({_id: 3, a: 123}));
+
+// Define the set of changes expected for the single-collection case per the operations above.
+const expectedChanges = [
+ {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 123},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "insert",
+ },
+ {
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1, a: 0},
+ ns: {db: db.getName(), coll: coll.getName()},
operationType: "insert",
lsid: session.getSessionId(),
txnNumber: session.getTxnNumber_forTesting(),
- });
-
- // Verify that a whole-db stream can be resumed from the middle of the transaction, and that it
- // will see all subsequent changes including the insert on the other collection but NOT the
- // changes on the other DB.
- changeStream = cst.startWatchingChanges({
- pipeline:
- [{$changeStream: {resumeAfter: secondTxnChange._id}}, {$project: {"lsid.uid": 0}}],
- collection: 1,
- });
- cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(3)});
-
- // Add an entry for the insert on otherDb.otherDbColl into expectedChanges.
- expectedChanges.splice(4, 0, {
- documentKey: {_id: 222},
- fullDocument: {_id: 222, a: "Doc on other DB"},
- ns: {db: otherDbName, coll: otherDbCollName},
+ },
+ {
+ documentKey: {_id: 2},
+ fullDocument: {_id: 2, a: 0},
+ ns: {db: db.getName(), coll: coll.getName()},
operationType: "insert",
lsid: session.getSessionId(),
txnNumber: session.getTxnNumber_forTesting(),
- });
-
- // Verify that a whole-cluster stream can be resumed from the middle of the transaction, and
- // that it will see all subsequent changes including the insert on the other collection and the
- // changes on the other DB.
- cst = new ChangeStreamTest(db.getSiblingDB("admin"));
- changeStream = cst.startWatchingChanges({
- pipeline: [
- {$changeStream: {resumeAfter: secondTxnChange._id, allChangesForCluster: true}},
- {$project: {"lsid.uid": 0}}
- ],
- collection: 1
- });
- cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(3)});
-
- cst.cleanUp();
+ },
+ {
+ documentKey: {_id: 1},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "update",
+ updateDescription: {removedFields: [], updatedFields: {a: 1}},
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+ },
+ {
+ documentKey: {_id: 3},
+ fullDocument: {_id: 3, a: 123},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "insert",
+ },
+];
+
+//
+// Test behavior of single-collection change streams with apply ops.
+//
+
+// Verify that the stream returns the expected sequence of changes.
+const changes =
+ cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges});
+
+// Record the first (non-transaction) change and the first in-transaction change.
+const nonTxnChange = changes[0], firstTxnChange = changes[1], secondTxnChange = changes[2];
+
+// Resume after the first non-transaction change. Be sure we see the documents from the
+// transaction again.
+changeStream = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: nonTxnChange._id}}, {$project: {"lsid.uid": 0}}],
+ collection: coll
+});
+cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(1)});
+
+// Resume after the first transaction change. Be sure we see the second change again.
+changeStream = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: firstTxnChange._id}}, {$project: {"lsid.uid": 0}}],
+ collection: coll
+});
+cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(2)});
+
+// Try starting another change stream from the _last_ change caused by the transaction. Verify
+// that we can see the insert performed after the transaction was committed.
+let otherCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: secondTxnChange._id}}, {$project: {"lsid.uid": 0}}],
+ collection: coll,
+ doNotModifyInPassthroughs: true // A collection drop only invalidates single-collection
+ // change streams.
+});
+cst.assertNextChangesEqual({cursor: otherCursor, expectedChanges: expectedChanges.slice(3)});
+
+// Drop the collection. This will trigger a "drop" followed by an "invalidate" for the single
+// collection change stream.
+assert.commandWorked(db.runCommand({drop: coll.getName()}));
+let change = cst.getOneChange(otherCursor);
+assert.eq(change.operationType, "drop");
+assert.eq(change.ns, {db: db.getName(), coll: coll.getName()});
+change = cst.getOneChange(otherCursor, true);
+assert.eq(change.operationType, "invalidate");
+
+//
+// Test behavior of whole-db change streams with apply ops.
+//
+
+// For a whole-db or whole-cluster change stream, the collection drop should return a single
+// "drop" entry and not invalidate the stream.
+expectedChanges.push({operationType: "drop", ns: {db: db.getName(), coll: coll.getName()}});
+
+// Add an entry for the insert on db.otherColl into expectedChanges.
+expectedChanges.splice(3, 0, {
+ documentKey: {_id: 111},
+ fullDocument: {_id: 111, a: "Doc on other collection"},
+ ns: {db: db.getName(), coll: otherCollName},
+ operationType: "insert",
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+});
+
+// Verify that a whole-db stream can be resumed from the middle of the transaction, and that it
+// will see all subsequent changes including the insert on the other collection but NOT the
+// changes on the other DB.
+changeStream = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: secondTxnChange._id}}, {$project: {"lsid.uid": 0}}],
+ collection: 1,
+});
+cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(3)});
+
+// Add an entry for the insert on otherDb.otherDbColl into expectedChanges.
+expectedChanges.splice(4, 0, {
+ documentKey: {_id: 222},
+ fullDocument: {_id: 222, a: "Doc on other DB"},
+ ns: {db: otherDbName, coll: otherDbCollName},
+ operationType: "insert",
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+});
+
+// Verify that a whole-cluster stream can be resumed from the middle of the transaction, and
+// that it will see all subsequent changes including the insert on the other collection and the
+// changes on the other DB.
+cst = new ChangeStreamTest(db.getSiblingDB("admin"));
+changeStream = cst.startWatchingChanges({
+ pipeline: [
+ {$changeStream: {resumeAfter: secondTxnChange._id, allChangesForCluster: true}},
+ {$project: {"lsid.uid": 0}}
+ ],
+ collection: 1
+});
+cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(3)});
+
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/ban_from_lookup.js b/jstests/change_streams/ban_from_lookup.js
index b799c3ce169..45d3c692eea 100644
--- a/jstests/change_streams/ban_from_lookup.js
+++ b/jstests/change_streams/ban_from_lookup.js
@@ -2,23 +2,23 @@
* Test that the $changeStream stage cannot be used in a $lookup pipeline or sub-pipeline.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- const coll = assertDropAndRecreateCollection(db, "change_stream_ban_from_lookup");
- const foreignColl = "unsharded";
+const coll = assertDropAndRecreateCollection(db, "change_stream_ban_from_lookup");
+const foreignColl = "unsharded";
- assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(coll.insert({_id: 1}));
- // Verify that we cannot create a $lookup using a pipeline which begins with $changeStream.
- assertErrorCode(
- coll, [{$lookup: {from: foreignColl, as: 'as', pipeline: [{$changeStream: {}}]}}], 51047);
+// Verify that we cannot create a $lookup using a pipeline which begins with $changeStream.
+assertErrorCode(
+ coll, [{$lookup: {from: foreignColl, as: 'as', pipeline: [{$changeStream: {}}]}}], 51047);
- // Verify that we cannot create a $lookup if its pipeline contains a sub-$lookup whose pipeline
- // begins with $changeStream.
- assertErrorCode(
+// Verify that we cannot create a $lookup if its pipeline contains a sub-$lookup whose pipeline
+// begins with $changeStream.
+assertErrorCode(
coll,
[{
$lookup: {
diff --git a/jstests/change_streams/ban_from_views.js b/jstests/change_streams/ban_from_views.js
index c06932e55b3..29f78710544 100644
--- a/jstests/change_streams/ban_from_views.js
+++ b/jstests/change_streams/ban_from_views.js
@@ -2,37 +2,37 @@
* Test that the $changeStream stage cannot be used in a view definition pipeline.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- const coll = assertDropAndRecreateCollection(db, "change_stream_ban_from_views");
- assert.writeOK(coll.insert({_id: 1}));
+const coll = assertDropAndRecreateCollection(db, "change_stream_ban_from_views");
+assert.writeOK(coll.insert({_id: 1}));
- const normalViewName = "nonChangeStreamView";
- const csViewName = "changeStreamView";
+const normalViewName = "nonChangeStreamView";
+const csViewName = "changeStreamView";
- assertDropCollection(db, normalViewName);
- assertDropCollection(db, csViewName);
+assertDropCollection(db, normalViewName);
+assertDropCollection(db, csViewName);
- const csPipe = [{$changeStream: {}}];
+const csPipe = [{$changeStream: {}}];
- // Create one valid view for testing purposes.
- assert.commandWorked(db.runCommand(
- {create: normalViewName, viewOn: coll.getName(), pipeline: [{$match: {_id: 1}}]}));
+// Create one valid view for testing purposes.
+assert.commandWorked(db.runCommand(
+ {create: normalViewName, viewOn: coll.getName(), pipeline: [{$match: {_id: 1}}]}));
- // Verify that we cannot create a view using a pipeline which begins with $changeStream.
- assert.commandFailedWithCode(
- db.runCommand({create: csViewName, viewOn: coll.getName(), pipeline: csPipe}),
- ErrorCodes.OptionNotSupportedOnView);
+// Verify that we cannot create a view using a pipeline which begins with $changeStream.
+assert.commandFailedWithCode(
+ db.runCommand({create: csViewName, viewOn: coll.getName(), pipeline: csPipe}),
+ ErrorCodes.OptionNotSupportedOnView);
- // We also cannot update an existing view to use a $changeStream pipeline.
- assert.commandFailedWithCode(
- db.runCommand({collMod: normalViewName, viewOn: coll.getName(), pipeline: csPipe}),
- ErrorCodes.OptionNotSupportedOnView);
+// We also cannot update an existing view to use a $changeStream pipeline.
+assert.commandFailedWithCode(
+ db.runCommand({collMod: normalViewName, viewOn: coll.getName(), pipeline: csPipe}),
+ ErrorCodes.OptionNotSupportedOnView);
- // Verify change streams cannot be created on views.
- assert.commandFailedWithCode(
- db.runCommand({aggregate: normalViewName, pipeline: [{$changeStream: {}}], cursor: {}}),
- ErrorCodes.CommandNotSupportedOnView);
+// Verify change streams cannot be created on views.
+assert.commandFailedWithCode(
+ db.runCommand({aggregate: normalViewName, pipeline: [{$changeStream: {}}], cursor: {}}),
+ ErrorCodes.CommandNotSupportedOnView);
})();
diff --git a/jstests/change_streams/change_stream.js b/jstests/change_streams/change_stream.js
index 396504f2439..6c03864cedd 100644
--- a/jstests/change_streams/change_stream.js
+++ b/jstests/change_streams/change_stream.js
@@ -3,268 +3,265 @@
// collection results in a failure in the secondary reads suite.
// @tags: [assumes_read_preference_unchanged]
(function() {
- "use strict";
-
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and
- // assert[Valid|Invalid]ChangeStreamNss.
-
- const isMongos = FixtureHelpers.isMongos(db);
-
- // Drop and recreate the collections to be used in this set of tests.
- assertDropAndRecreateCollection(db, "t1");
- assertDropAndRecreateCollection(db, "t2");
-
- // Test that $changeStream only accepts an object as its argument.
- function checkArgFails(arg) {
- assert.commandFailedWithCode(
- db.runCommand({aggregate: "t1", pipeline: [{$changeStream: arg}], cursor: {}}), 50808);
- }
-
- checkArgFails(1);
- checkArgFails("invalid");
- checkArgFails(false);
- checkArgFails([1, 2, "invalid", {x: 1}]);
-
- // Test that a change stream cannot be opened on collections in the "admin", "config", or
- // "local" databases.
- assertInvalidChangeStreamNss("admin", "testColl");
- assertInvalidChangeStreamNss("config", "testColl");
- // Not allowed to access 'local' database through mongos.
- if (!isMongos) {
- assertInvalidChangeStreamNss("local", "testColl");
- }
-
- // Test that a change stream cannot be opened on 'system.' collections.
- assertInvalidChangeStreamNss(db.getName(), "system.users");
- assertInvalidChangeStreamNss(db.getName(), "system.profile");
- assertInvalidChangeStreamNss(db.getName(), "system.version");
-
- // Test that a change stream can be opened on namespaces with 'system' in the name, but not
- // considered an internal 'system dot' namespace.
- assertValidChangeStreamNss(db.getName(), "systemindexes");
- assertValidChangeStreamNss(db.getName(), "system_users");
-
- // Similar test but for DB names that are not considered internal.
- assert.writeOK(db.getSiblingDB("admincustomDB")["test"].insert({}));
- assertValidChangeStreamNss("admincustomDB");
-
- assert.writeOK(db.getSiblingDB("local_")["test"].insert({}));
- assertValidChangeStreamNss("local_");
-
- assert.writeOK(db.getSiblingDB("_config_")["test"].insert({}));
- assertValidChangeStreamNss("_config_");
-
- let cst = new ChangeStreamTest(db);
- let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
-
- jsTestLog("Testing single insert");
- // Test that if there are no changes, we return an empty batch.
- assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
-
- assert.writeOK(db.t1.insert({_id: 0, a: 1}));
- let expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 1},
- ns: {db: "test", coll: "t1"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- // Test that if there are no changes during a subsequent 'getMore', we return an empty batch.
- cursor = cst.getNextBatch(cursor);
- assert.eq(0, cursor.nextBatch.length, "Cursor had changes: " + tojson(cursor));
-
- jsTestLog("Testing second insert");
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.insert({_id: 1, a: 2}));
- expected = {
- documentKey: {_id: 1},
- fullDocument: {_id: 1, a: 2},
- ns: {db: "test", coll: "t1"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- jsTestLog("Testing update");
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.update({_id: 0}, {_id: 0, a: 3}));
- expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 3},
- ns: {db: "test", coll: "t1"},
- operationType: "replace",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- jsTestLog("Testing update of another field");
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.update({_id: 0}, {_id: 0, b: 3}));
- expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, b: 3},
+"use strict";
+
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and
+ // assert[Valid|Invalid]ChangeStreamNss.
+
+const isMongos = FixtureHelpers.isMongos(db);
+
+// Drop and recreate the collections to be used in this set of tests.
+assertDropAndRecreateCollection(db, "t1");
+assertDropAndRecreateCollection(db, "t2");
+
+// Test that $changeStream only accepts an object as its argument.
+function checkArgFails(arg) {
+ assert.commandFailedWithCode(
+ db.runCommand({aggregate: "t1", pipeline: [{$changeStream: arg}], cursor: {}}), 50808);
+}
+
+checkArgFails(1);
+checkArgFails("invalid");
+checkArgFails(false);
+checkArgFails([1, 2, "invalid", {x: 1}]);
+
+// Test that a change stream cannot be opened on collections in the "admin", "config", or
+// "local" databases.
+assertInvalidChangeStreamNss("admin", "testColl");
+assertInvalidChangeStreamNss("config", "testColl");
+// Not allowed to access 'local' database through mongos.
+if (!isMongos) {
+ assertInvalidChangeStreamNss("local", "testColl");
+}
+
+// Test that a change stream cannot be opened on 'system.' collections.
+assertInvalidChangeStreamNss(db.getName(), "system.users");
+assertInvalidChangeStreamNss(db.getName(), "system.profile");
+assertInvalidChangeStreamNss(db.getName(), "system.version");
+
+// Test that a change stream can be opened on namespaces with 'system' in the name, but not
+// considered an internal 'system dot' namespace.
+assertValidChangeStreamNss(db.getName(), "systemindexes");
+assertValidChangeStreamNss(db.getName(), "system_users");
+
+// Similar test but for DB names that are not considered internal.
+assert.writeOK(db.getSiblingDB("admincustomDB")["test"].insert({}));
+assertValidChangeStreamNss("admincustomDB");
+
+assert.writeOK(db.getSiblingDB("local_")["test"].insert({}));
+assertValidChangeStreamNss("local_");
+
+assert.writeOK(db.getSiblingDB("_config_")["test"].insert({}));
+assertValidChangeStreamNss("_config_");
+
+let cst = new ChangeStreamTest(db);
+let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+
+jsTestLog("Testing single insert");
+// Test that if there are no changes, we return an empty batch.
+assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
+
+assert.writeOK(db.t1.insert({_id: 0, a: 1}));
+let expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 1},
+ ns: {db: "test", coll: "t1"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+// Test that if there are no changes during a subsequent 'getMore', we return an empty batch.
+cursor = cst.getNextBatch(cursor);
+assert.eq(0, cursor.nextBatch.length, "Cursor had changes: " + tojson(cursor));
+
+jsTestLog("Testing second insert");
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.insert({_id: 1, a: 2}));
+expected = {
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1, a: 2},
+ ns: {db: "test", coll: "t1"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+jsTestLog("Testing update");
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.update({_id: 0}, {_id: 0, a: 3}));
+expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 3},
+ ns: {db: "test", coll: "t1"},
+ operationType: "replace",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+jsTestLog("Testing update of another field");
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.update({_id: 0}, {_id: 0, b: 3}));
+expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, b: 3},
+ ns: {db: "test", coll: "t1"},
+ operationType: "replace",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+jsTestLog("Testing upsert");
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.update({_id: 2}, {_id: 2, a: 4}, {upsert: true}));
+expected = {
+ documentKey: {_id: 2},
+ fullDocument: {_id: 2, a: 4},
+ ns: {db: "test", coll: "t1"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+jsTestLog("Testing partial update with $inc");
+assert.writeOK(db.t1.insert({_id: 3, a: 5, b: 1}));
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.update({_id: 3}, {$inc: {b: 2}}));
+expected = {
+ documentKey: {_id: 3},
+ ns: {db: "test", coll: "t1"},
+ operationType: "update",
+ updateDescription: {removedFields: [], updatedFields: {b: 3}},
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+jsTestLog("Testing multi:true update");
+assert.writeOK(db.t1.insert({_id: 4, a: 0, b: 1}));
+assert.writeOK(db.t1.insert({_id: 5, a: 0, b: 1}));
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.update({a: 0}, {$set: {b: 2}}, {multi: true}));
+expected = [
+ {
+ documentKey: {_id: 4},
ns: {db: "test", coll: "t1"},
- operationType: "replace",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- jsTestLog("Testing upsert");
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.update({_id: 2}, {_id: 2, a: 4}, {upsert: true}));
- expected = {
- documentKey: {_id: 2},
- fullDocument: {_id: 2, a: 4},
- ns: {db: "test", coll: "t1"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- jsTestLog("Testing partial update with $inc");
- assert.writeOK(db.t1.insert({_id: 3, a: 5, b: 1}));
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.update({_id: 3}, {$inc: {b: 2}}));
- expected = {
- documentKey: {_id: 3},
+ operationType: "update",
+ updateDescription: {removedFields: [], updatedFields: {b: 2}}
+ },
+ {
+ documentKey: {_id: 5},
ns: {db: "test", coll: "t1"},
operationType: "update",
- updateDescription: {removedFields: [], updatedFields: {b: 3}},
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- jsTestLog("Testing multi:true update");
- assert.writeOK(db.t1.insert({_id: 4, a: 0, b: 1}));
- assert.writeOK(db.t1.insert({_id: 5, a: 0, b: 1}));
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.update({a: 0}, {$set: {b: 2}}, {multi: true}));
- expected = [
- {
- documentKey: {_id: 4},
- ns: {db: "test", coll: "t1"},
- operationType: "update",
- updateDescription: {removedFields: [], updatedFields: {b: 2}}
- },
- {
- documentKey: {_id: 5},
- ns: {db: "test", coll: "t1"},
- operationType: "update",
- updateDescription: {removedFields: [], updatedFields: {b: 2}}
- }
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
-
- jsTestLog("Testing delete");
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.remove({_id: 1}));
- expected = {
- documentKey: {_id: 1},
+ updateDescription: {removedFields: [], updatedFields: {b: 2}}
+ }
+];
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+
+jsTestLog("Testing delete");
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.remove({_id: 1}));
+expected = {
+ documentKey: {_id: 1},
+ ns: {db: "test", coll: "t1"},
+ operationType: "delete",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+jsTestLog("Testing justOne:false delete");
+assert.writeOK(db.t1.insert({_id: 6, a: 1, b: 1}));
+assert.writeOK(db.t1.insert({_id: 7, a: 1, b: 1}));
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.remove({a: 1}, {justOne: false}));
+expected = [
+ {
+ documentKey: {_id: 6},
+ ns: {db: "test", coll: "t1"},
+ operationType: "delete",
+ },
+ {
+ documentKey: {_id: 7},
ns: {db: "test", coll: "t1"},
operationType: "delete",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- jsTestLog("Testing justOne:false delete");
- assert.writeOK(db.t1.insert({_id: 6, a: 1, b: 1}));
- assert.writeOK(db.t1.insert({_id: 7, a: 1, b: 1}));
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.remove({a: 1}, {justOne: false}));
- expected = [
- {
- documentKey: {_id: 6},
- ns: {db: "test", coll: "t1"},
- operationType: "delete",
- },
- {
- documentKey: {_id: 7},
- ns: {db: "test", coll: "t1"},
- operationType: "delete",
- }
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
-
- jsTestLog("Testing intervening write on another collection");
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- let t2cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t2});
- assert.writeOK(db.t2.insert({_id: 100, c: 1}));
- cst.assertNoChange(cursor);
- expected = {
- documentKey: {_id: 100},
- fullDocument: {_id: 100, c: 1},
- ns: {db: "test", coll: "t2"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: t2cursor, expectedChanges: [expected]});
-
- jsTestLog("Testing drop of unrelated collection");
- assert.writeOK(db.dropping.insert({}));
- assertDropCollection(db, db.dropping.getName());
- // Should still see the previous change from t2, shouldn't see anything about 'dropping'.
-
- jsTestLog("Testing insert that looks like rename");
- assertDropCollection(db, "dne1");
- assertDropCollection(db, "dne2");
- const dne1cursor =
- cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.dne1});
- const dne2cursor =
- cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.dne2});
- assert.writeOK(db.t2.insert({_id: 101, renameCollection: "test.dne1", to: "test.dne2"}));
- cst.assertNoChange(dne1cursor);
- cst.assertNoChange(dne2cursor);
-
- if (!isMongos) {
- jsTestLog("Ensuring attempt to read with legacy operations fails.");
- db.getMongo().forceReadMode('legacy');
- const legacyCursor =
- db.tailable2.aggregate([{$changeStream: {}}], {cursor: {batchSize: 0}});
- assert.throws(function() {
- legacyCursor.next();
- }, [], "Legacy getMore expected to fail on changeStream cursor.");
- db.getMongo().forceReadMode('commands');
}
-
- jsTestLog("Testing resumability");
- assertDropAndRecreateCollection(db, "resume1");
-
- // Note we do not project away 'id.ts' as it is part of the resume token.
- let resumeCursor =
- cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.resume1});
-
- // Insert a document and save the resulting change stream.
- assert.writeOK(db.resume1.insert({_id: 1}));
- const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
-
- jsTestLog("Testing resume after one document.");
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
- collection: db.resume1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
-
- jsTestLog("Inserting additional documents.");
- assert.writeOK(db.resume1.insert({_id: 2}));
- const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
- assert.writeOK(db.resume1.insert({_id: 3}));
- const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
-
- jsTestLog("Testing resume after first document of three.");
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
- collection: db.resume1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
- assert.docEq(cst.getOneChange(resumeCursor), secondInsertChangeDoc);
- assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
-
- jsTestLog("Testing resume after second document of three.");
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: secondInsertChangeDoc._id}}],
- collection: db.resume1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
- assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
-
- cst.cleanUp();
+];
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+
+jsTestLog("Testing intervening write on another collection");
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+let t2cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t2});
+assert.writeOK(db.t2.insert({_id: 100, c: 1}));
+cst.assertNoChange(cursor);
+expected = {
+ documentKey: {_id: 100},
+ fullDocument: {_id: 100, c: 1},
+ ns: {db: "test", coll: "t2"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: t2cursor, expectedChanges: [expected]});
+
+jsTestLog("Testing drop of unrelated collection");
+assert.writeOK(db.dropping.insert({}));
+assertDropCollection(db, db.dropping.getName());
+// Should still see the previous change from t2, shouldn't see anything about 'dropping'.
+
+jsTestLog("Testing insert that looks like rename");
+assertDropCollection(db, "dne1");
+assertDropCollection(db, "dne2");
+const dne1cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.dne1});
+const dne2cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.dne2});
+assert.writeOK(db.t2.insert({_id: 101, renameCollection: "test.dne1", to: "test.dne2"}));
+cst.assertNoChange(dne1cursor);
+cst.assertNoChange(dne2cursor);
+
+if (!isMongos) {
+ jsTestLog("Ensuring attempt to read with legacy operations fails.");
+ db.getMongo().forceReadMode('legacy');
+ const legacyCursor = db.tailable2.aggregate([{$changeStream: {}}], {cursor: {batchSize: 0}});
+ assert.throws(function() {
+ legacyCursor.next();
+ }, [], "Legacy getMore expected to fail on changeStream cursor.");
+ db.getMongo().forceReadMode('commands');
+}
+
+jsTestLog("Testing resumability");
+assertDropAndRecreateCollection(db, "resume1");
+
+// Note we do not project away 'id.ts' as it is part of the resume token.
+let resumeCursor =
+ cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.resume1});
+
+// Insert a document and save the resulting change stream.
+assert.writeOK(db.resume1.insert({_id: 1}));
+const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
+
+jsTestLog("Testing resume after one document.");
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
+ collection: db.resume1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+
+jsTestLog("Inserting additional documents.");
+assert.writeOK(db.resume1.insert({_id: 2}));
+const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
+assert.writeOK(db.resume1.insert({_id: 3}));
+const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
+
+jsTestLog("Testing resume after first document of three.");
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
+ collection: db.resume1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+assert.docEq(cst.getOneChange(resumeCursor), secondInsertChangeDoc);
+assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+
+jsTestLog("Testing resume after second document of three.");
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: secondInsertChangeDoc._id}}],
+ collection: db.resume1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/collation.js b/jstests/change_streams/collation.js
index e99f6064b60..3d50b564711 100644
--- a/jstests/change_streams/collation.js
+++ b/jstests/change_streams/collation.js
@@ -3,344 +3,332 @@
* default collation, and uses the simple collation if none is provided.
*/
(function() {
- "use strict";
-
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/change_stream_util.js"); // For 'ChangeStreamTest' and
- // 'runCommandChangeStreamPassthroughAware'.
-
- let cst = new ChangeStreamTest(db);
-
- const caseInsensitive = {locale: "en_US", strength: 2};
-
- let caseInsensitiveCollection = "change_stream_case_insensitive";
- assertDropCollection(db, caseInsensitiveCollection);
-
- // Test that you can open a change stream before the collection exists, and it will use the
- // simple collation. Tag this stream as 'doNotModifyInPassthroughs', since only individual
- // collections have the concept of a default collation.
- const simpleCollationStream = cst.startWatchingChanges({
- pipeline: [
- {$changeStream: {}},
- {
- $match:
- {$or: [{"fullDocument._id": "INSERT_ONE"}, {"fullDocument._id": "INSERT_TWO"}]}
- },
- {$project: {docId: "$fullDocument._id"}}
- ],
- collection: caseInsensitiveCollection,
- doNotModifyInPassthroughs: true
- });
-
- // Create the collection with a non-default collation. The stream should continue to use the
- // simple collation.
- caseInsensitiveCollection =
- assertCreateCollection(db, caseInsensitiveCollection, {collation: caseInsensitive});
- assert.commandWorked(
- caseInsensitiveCollection.insert([{_id: "insert_one"}, {_id: "INSERT_TWO"}]));
- cst.assertNextChangesEqual(
- {cursor: simpleCollationStream, expectedChanges: [{docId: "INSERT_TWO"}]});
-
- const caseInsensitivePipeline = [
+"use strict";
+
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/change_stream_util.js"); // For 'ChangeStreamTest' and
+ // 'runCommandChangeStreamPassthroughAware'.
+
+let cst = new ChangeStreamTest(db);
+
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
+
+let caseInsensitiveCollection = "change_stream_case_insensitive";
+assertDropCollection(db, caseInsensitiveCollection);
+
+// Test that you can open a change stream before the collection exists, and it will use the
+// simple collation. Tag this stream as 'doNotModifyInPassthroughs', since only individual
+// collections have the concept of a default collation.
+const simpleCollationStream = cst.startWatchingChanges({
+ pipeline: [
+ {$changeStream: {}},
+ {$match: {$or: [{"fullDocument._id": "INSERT_ONE"}, {"fullDocument._id": "INSERT_TWO"}]}},
+ {$project: {docId: "$fullDocument._id"}}
+ ],
+ collection: caseInsensitiveCollection,
+ doNotModifyInPassthroughs: true
+});
+
+// Create the collection with a non-default collation. The stream should continue to use the
+// simple collation.
+caseInsensitiveCollection =
+ assertCreateCollection(db, caseInsensitiveCollection, {collation: caseInsensitive});
+assert.commandWorked(caseInsensitiveCollection.insert([{_id: "insert_one"}, {_id: "INSERT_TWO"}]));
+cst.assertNextChangesEqual(
+ {cursor: simpleCollationStream, expectedChanges: [{docId: "INSERT_TWO"}]});
+
+const caseInsensitivePipeline = [
+ {$changeStream: {}},
+ {$match: {"fullDocument.text": "abc"}},
+ {$project: {docId: "$documentKey._id"}}
+];
+
+// Test that $changeStream will not implicitly adopt the default collation of the collection on
+// which it is run. Tag this stream as 'doNotModifyInPassthroughs'; whole-db and cluster-wide
+// streams do not have default collations.
+const didNotInheritCollationStream = cst.startWatchingChanges({
+ pipeline: caseInsensitivePipeline,
+ collection: caseInsensitiveCollection,
+ doNotModifyInPassthroughs: true
+});
+// Test that a collation can be explicitly specified for the $changeStream. This does not need
+// to be tagged 'doNotModifyInPassthroughs', since whole-db and cluster-wide changeStreams will
+// use an explicit collation if present.
+let explicitCaseInsensitiveStream = cst.startWatchingChanges({
+ pipeline: caseInsensitivePipeline,
+ collection: caseInsensitiveCollection,
+ aggregateOptions: {collation: caseInsensitive}
+});
+
+assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "aBc"}));
+assert.writeOK(caseInsensitiveCollection.insert({_id: 1, text: "abc"}));
+
+// 'didNotInheritCollationStream' should not have inherited the collection's case-insensitive
+// default collation, and should only see the second insert. 'explicitCaseInsensitiveStream'
+// should see both inserts.
+cst.assertNextChangesEqual({cursor: didNotInheritCollationStream, expectedChanges: [{docId: 1}]});
+cst.assertNextChangesEqual(
+ {cursor: explicitCaseInsensitiveStream, expectedChanges: [{docId: 0}, {docId: 1}]});
+
+// Test that the collation does not apply to the scan over the oplog.
+const similarNameCollection = assertDropAndRecreateCollection(
+ db, "cHaNgE_sTrEaM_cAsE_iNsEnSiTiVe", {collation: {locale: "en_US"}});
+
+// We must recreate the explicitCaseInsensitiveStream and set 'doNotModifyInPassthroughs'. Whole
+// db and cluster-wide streams use the simple collation while scanning the oplog, but they don't
+// filter the oplog by collection name. The subsequent $match stage which we inject into the
+// pipeline to filter for a specific collection will obey the pipeline's case-insensitive
+// collation, meaning that 'cHaNgE_sTrEaM_cAsE_iNsEnSiTiVe' will match
+// 'change_stream_case_insensitive'.
+explicitCaseInsensitiveStream = cst.startWatchingChanges({
+ pipeline: caseInsensitivePipeline,
+ collection: caseInsensitiveCollection,
+ aggregateOptions: {collation: caseInsensitive},
+ doNotModifyInPassthroughs: true
+});
+
+assert.writeOK(similarNameCollection.insert({_id: 0, text: "aBc"}));
+assert.writeOK(caseInsensitiveCollection.insert({_id: 2, text: "ABC"}));
+
+// The case-insensitive stream should not see the first insert (to the other collection), only
+// the second. We do not expect to see the insert in 'didNotInheritCollationStream'.
+cst.assertNextChangesEqual({cursor: explicitCaseInsensitiveStream, expectedChanges: [{docId: 2}]});
+
+// Test that creating a collection without a collation does not invalidate any change streams
+// that were opened before the collection existed.
+(function() {
+let noCollationCollection = "change_stream_no_collation";
+assertDropCollection(db, noCollationCollection);
+
+const streamCreatedBeforeNoCollationCollection = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey._id"}}],
+ collection: noCollationCollection
+});
+
+noCollationCollection = assertCreateCollection(db, noCollationCollection);
+assert.writeOK(noCollationCollection.insert({_id: 0}));
+
+cst.assertNextChangesEqual(
+ {cursor: streamCreatedBeforeNoCollationCollection, expectedChanges: [{docId: 0}]});
+}());
+
+// Test that creating a collection and explicitly specifying the simple collation does not
+// invalidate any change streams that were opened before the collection existed.
+(function() {
+let simpleCollationCollection = "change_stream_simple_collation";
+assertDropCollection(db, simpleCollationCollection);
+
+const streamCreatedBeforeSimpleCollationCollection = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey._id"}}],
+ collection: simpleCollationCollection
+});
+
+simpleCollationCollection =
+ assertCreateCollection(db, simpleCollationCollection, {collation: {locale: "simple"}});
+assert.writeOK(simpleCollationCollection.insert({_id: 0}));
+
+cst.assertNextChangesEqual(
+ {cursor: streamCreatedBeforeSimpleCollationCollection, expectedChanges: [{docId: 0}]});
+}());
+
+// Test that creating a change stream with a non-default collation, then creating a collection
+// with the same collation will not invalidate the change stream.
+(function() {
+let frenchCollection = "change_stream_french_collation";
+assertDropCollection(db, frenchCollection);
+
+const frenchChangeStream = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey._id"}}],
+ aggregateOptions: {collation: {locale: "fr"}},
+ collection: frenchCollection
+});
+
+frenchCollection = assertCreateCollection(db, frenchCollection, {collation: {locale: "fr"}});
+assert.writeOK(frenchCollection.insert({_id: 0}));
+
+cst.assertNextChangesEqual({cursor: frenchChangeStream, expectedChanges: [{docId: 0}]});
+}());
+
+// Test that creating a change stream with a non-default collation, then creating a collection
+// with *a different* collation will not invalidate the change stream.
+(function() {
+let germanCollection = "change_stream_german_collation";
+assertDropCollection(db, germanCollection);
+
+const englishCaseInsensitiveStream = cst.startWatchingChanges({
+ pipeline: [
+ {$changeStream: {}},
+ {$match: {"fullDocument.text": "abc"}},
+ {$project: {docId: "$documentKey._id"}}
+ ],
+ aggregateOptions: {collation: caseInsensitive},
+ collection: germanCollection
+});
+
+germanCollection = assertCreateCollection(db, germanCollection, {collation: {locale: "de"}});
+assert.writeOK(germanCollection.insert({_id: 0, text: "aBc"}));
+
+cst.assertNextChangesEqual({cursor: englishCaseInsensitiveStream, expectedChanges: [{docId: 0}]});
+}());
+
+// Test that creating a change stream with a non-default collation against a collection that has
+// a non-simple default collation will use the collation specified on the operation.
+(function() {
+const caseInsensitiveCollection = assertDropAndRecreateCollection(
+ db, "change_stream_case_insensitive", {collation: caseInsensitive});
+
+const englishCaseSensitiveStream = cst.startWatchingChanges({
+ pipeline: [
{$changeStream: {}},
{$match: {"fullDocument.text": "abc"}},
{$project: {docId: "$documentKey._id"}}
- ];
-
- // Test that $changeStream will not implicitly adopt the default collation of the collection on
- // which it is run. Tag this stream as 'doNotModifyInPassthroughs'; whole-db and cluster-wide
- // streams do not have default collations.
- const didNotInheritCollationStream = cst.startWatchingChanges({
- pipeline: caseInsensitivePipeline,
- collection: caseInsensitiveCollection,
- doNotModifyInPassthroughs: true
- });
- // Test that a collation can be explicitly specified for the $changeStream. This does not need
- // to be tagged 'doNotModifyInPassthroughs', since whole-db and cluster-wide changeStreams will
- // use an explicit collation if present.
- let explicitCaseInsensitiveStream = cst.startWatchingChanges({
- pipeline: caseInsensitivePipeline,
- collection: caseInsensitiveCollection,
- aggregateOptions: {collation: caseInsensitive}
- });
-
- assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "aBc"}));
- assert.writeOK(caseInsensitiveCollection.insert({_id: 1, text: "abc"}));
-
- // 'didNotInheritCollationStream' should not have inherited the collection's case-insensitive
- // default collation, and should only see the second insert. 'explicitCaseInsensitiveStream'
- // should see both inserts.
- cst.assertNextChangesEqual(
- {cursor: didNotInheritCollationStream, expectedChanges: [{docId: 1}]});
- cst.assertNextChangesEqual(
- {cursor: explicitCaseInsensitiveStream, expectedChanges: [{docId: 0}, {docId: 1}]});
-
- // Test that the collation does not apply to the scan over the oplog.
- const similarNameCollection = assertDropAndRecreateCollection(
- db, "cHaNgE_sTrEaM_cAsE_iNsEnSiTiVe", {collation: {locale: "en_US"}});
-
- // We must recreate the explicitCaseInsensitiveStream and set 'doNotModifyInPassthroughs'. Whole
- // db and cluster-wide streams use the simple collation while scanning the oplog, but they don't
- // filter the oplog by collection name. The subsequent $match stage which we inject into the
- // pipeline to filter for a specific collection will obey the pipeline's case-insensitive
- // collation, meaning that 'cHaNgE_sTrEaM_cAsE_iNsEnSiTiVe' will match
- // 'change_stream_case_insensitive'.
- explicitCaseInsensitiveStream = cst.startWatchingChanges({
- pipeline: caseInsensitivePipeline,
- collection: caseInsensitiveCollection,
- aggregateOptions: {collation: caseInsensitive},
- doNotModifyInPassthroughs: true
- });
-
- assert.writeOK(similarNameCollection.insert({_id: 0, text: "aBc"}));
- assert.writeOK(caseInsensitiveCollection.insert({_id: 2, text: "ABC"}));
-
- // The case-insensitive stream should not see the first insert (to the other collection), only
- // the second. We do not expect to see the insert in 'didNotInheritCollationStream'.
- cst.assertNextChangesEqual(
- {cursor: explicitCaseInsensitiveStream, expectedChanges: [{docId: 2}]});
-
- // Test that creating a collection without a collation does not invalidate any change streams
- // that were opened before the collection existed.
- (function() {
- let noCollationCollection = "change_stream_no_collation";
- assertDropCollection(db, noCollationCollection);
-
- const streamCreatedBeforeNoCollationCollection = cst.startWatchingChanges({
- pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey._id"}}],
- collection: noCollationCollection
- });
-
- noCollationCollection = assertCreateCollection(db, noCollationCollection);
- assert.writeOK(noCollationCollection.insert({_id: 0}));
-
- cst.assertNextChangesEqual(
- {cursor: streamCreatedBeforeNoCollationCollection, expectedChanges: [{docId: 0}]});
- }());
-
- // Test that creating a collection and explicitly specifying the simple collation does not
- // invalidate any change streams that were opened before the collection existed.
- (function() {
- let simpleCollationCollection = "change_stream_simple_collation";
- assertDropCollection(db, simpleCollationCollection);
-
- const streamCreatedBeforeSimpleCollationCollection = cst.startWatchingChanges({
- pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey._id"}}],
- collection: simpleCollationCollection
- });
-
- simpleCollationCollection =
- assertCreateCollection(db, simpleCollationCollection, {collation: {locale: "simple"}});
- assert.writeOK(simpleCollationCollection.insert({_id: 0}));
-
- cst.assertNextChangesEqual(
- {cursor: streamCreatedBeforeSimpleCollationCollection, expectedChanges: [{docId: 0}]});
- }());
-
- // Test that creating a change stream with a non-default collation, then creating a collection
- // with the same collation will not invalidate the change stream.
- (function() {
- let frenchCollection = "change_stream_french_collation";
- assertDropCollection(db, frenchCollection);
-
- const frenchChangeStream = cst.startWatchingChanges({
- pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey._id"}}],
- aggregateOptions: {collation: {locale: "fr"}},
- collection: frenchCollection
- });
-
- frenchCollection =
- assertCreateCollection(db, frenchCollection, {collation: {locale: "fr"}});
- assert.writeOK(frenchCollection.insert({_id: 0}));
-
- cst.assertNextChangesEqual({cursor: frenchChangeStream, expectedChanges: [{docId: 0}]});
- }());
-
- // Test that creating a change stream with a non-default collation, then creating a collection
- // with *a different* collation will not invalidate the change stream.
- (function() {
- let germanCollection = "change_stream_german_collation";
- assertDropCollection(db, germanCollection);
-
- const englishCaseInsensitiveStream = cst.startWatchingChanges({
- pipeline: [
- {$changeStream: {}},
- {$match: {"fullDocument.text": "abc"}},
- {$project: {docId: "$documentKey._id"}}
- ],
- aggregateOptions: {collation: caseInsensitive},
- collection: germanCollection
- });
-
- germanCollection =
- assertCreateCollection(db, germanCollection, {collation: {locale: "de"}});
- assert.writeOK(germanCollection.insert({_id: 0, text: "aBc"}));
-
- cst.assertNextChangesEqual(
- {cursor: englishCaseInsensitiveStream, expectedChanges: [{docId: 0}]});
- }());
-
- // Test that creating a change stream with a non-default collation against a collection that has
- // a non-simple default collation will use the collation specified on the operation.
- (function() {
- const caseInsensitiveCollection = assertDropAndRecreateCollection(
- db, "change_stream_case_insensitive", {collation: caseInsensitive});
-
- const englishCaseSensitiveStream = cst.startWatchingChanges({
- pipeline: [
- {$changeStream: {}},
- {$match: {"fullDocument.text": "abc"}},
- {$project: {docId: "$documentKey._id"}}
- ],
- aggregateOptions: {collation: {locale: "en_US"}},
- collection: caseInsensitiveCollection
- });
-
- assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "aBc"}));
- assert.writeOK(caseInsensitiveCollection.insert({_id: 1, text: "abc"}));
-
- cst.assertNextChangesEqual(
- {cursor: englishCaseSensitiveStream, expectedChanges: [{docId: 1}]});
- }());
-
- // Test that collation is supported by the shell helper. Test that creating a change stream with
- // a non-default collation against a collection that has a simple default collation will use the
- // collation specified on the operation.
- (function() {
- const noCollationCollection =
- assertDropAndRecreateCollection(db, "change_stream_no_collation");
-
- const cursor = noCollationCollection.watch(
- [{$match: {"fullDocument.text": "abc"}}, {$project: {docId: "$documentKey._id"}}],
- {collation: caseInsensitive});
- assert(!cursor.hasNext());
- assert.writeOK(noCollationCollection.insert({_id: 0, text: "aBc"}));
- assert.writeOK(noCollationCollection.insert({_id: 1, text: "abc"}));
- assert.soon(() => cursor.hasNext());
- assertChangeStreamEventEq(cursor.next(), {docId: 0});
- assert.soon(() => cursor.hasNext());
- assertChangeStreamEventEq(cursor.next(), {docId: 1});
- assert(!cursor.hasNext());
- }());
-
- // Test that we can resume a change stream on a collection that has been dropped without
- // requiring the user to explicitly specify the collation.
- (function() {
- const collName = "change_stream_case_insensitive";
- let caseInsensitiveCollection =
- assertDropAndRecreateCollection(db, collName, {collation: caseInsensitive});
-
- let changeStream = caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "abc"}}],
- {collation: caseInsensitive});
-
- assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "abc"}));
-
- assert.soon(() => changeStream.hasNext());
- const next = changeStream.next();
- assert.docEq(next.documentKey, {_id: 0});
- const resumeToken = next._id;
-
- // Insert a second document to see after resuming.
- assert.writeOK(caseInsensitiveCollection.insert({_id: "dropped_coll", text: "ABC"}));
-
- // Drop the collection to invalidate the stream.
- assertDropCollection(db, collName);
-
- // Test that a $changeStream is allowed to resume on the dropped collection with an explicit
- // collation, even if it doesn't match the original collection's default collation.
- changeStream = caseInsensitiveCollection.watch(
- [{$match: {"fullDocument.text": "ABC"}}],
- {resumeAfter: resumeToken, collation: {locale: "simple"}});
-
- assert.soon(() => changeStream.hasNext());
- assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
-
- // Test that a pipeline without an explicit collation is allowed to resume the change stream
- // after the collection has been dropped, and it will use the simple collation. Do not
- // modify this in the passthrough suite(s) since only individual collections have the
- // concept of a default collation.
- const doNotModifyInPassthroughs = true;
- const cmdRes = assert.commandWorked(runCommandChangeStreamPassthroughAware(
- db,
- {
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- cursor: {},
- },
- doNotModifyInPassthroughs));
-
- changeStream = new DBCommandCursor(db, cmdRes);
- assert.soon(() => changeStream.hasNext());
- assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
- }());
-
- // Test that the default collation of a new version of the collection is not applied when
- // resuming a change stream from before a collection drop.
- (function() {
- const collName = "change_stream_case_insensitive";
- let caseInsensitiveCollection =
- assertDropAndRecreateCollection(db, collName, {collation: caseInsensitive});
-
- let changeStream = caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "abc"}}],
- {collation: caseInsensitive});
-
- assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "abc"}));
-
- assert.soon(() => changeStream.hasNext());
- const next = changeStream.next();
- assert.docEq(next.documentKey, {_id: 0});
- const resumeToken = next._id;
-
- // Insert a second document to see after resuming.
- assert.writeOK(caseInsensitiveCollection.insert({_id: "dropped_coll", text: "ABC"}));
-
- // Recreate the collection with a different collation.
- caseInsensitiveCollection = assertDropAndRecreateCollection(
- db, caseInsensitiveCollection.getName(), {collation: {locale: "simple"}});
- assert.writeOK(caseInsensitiveCollection.insert({_id: "new collection", text: "abc"}));
-
- // Verify that the stream sees the insert before the drop and then is exhausted. We won't
- // see the invalidate because the pipeline has a $match stage after the $changeStream.
- assert.soon(() => changeStream.hasNext());
- assert.docEq(changeStream.next().fullDocument, {_id: "dropped_coll", text: "ABC"});
- // Only single-collection streams will be exhausted from the drop. Use 'next()' instead of
- // 'isExhausted()' to force a getMore since the previous getMore may not include the
- // collection drop, which is more likely with sharded collections on slow machines.
- if (!isChangeStreamPassthrough()) {
- assert.throws(() => changeStream.next());
- }
-
- // Test that a pipeline with an explicit collation is allowed to resume from before the
- // collection is dropped and recreated.
- changeStream =
- caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "ABC"}}],
- {resumeAfter: resumeToken, collation: {locale: "fr"}});
-
- assert.soon(() => changeStream.hasNext());
- assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
- // Only single-collection streams will be exhausted from the drop. Use 'next()' instead of
- // 'isExhausted()' to force a getMore since the previous getMore may not include the
- // collection drop, which is more likely with sharded collections on slow machines.
- if (!isChangeStreamPassthrough()) {
- assert.throws(() => changeStream.next());
- }
-
- // Test that a pipeline without an explicit collation is allowed to resume, even though the
- // collection has been recreated with the same default collation as it had previously. Do
- // not modify this command in the passthrough suite(s) since only individual collections
- // have the concept of a default collation.
- const doNotModifyInPassthroughs = true;
- const cmdRes = assert.commandWorked(runCommandChangeStreamPassthroughAware(
- db,
- {
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- cursor: {}
- },
- doNotModifyInPassthroughs));
-
- changeStream = new DBCommandCursor(db, cmdRes);
- assert.soon(() => changeStream.hasNext());
- assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
- }());
+ ],
+ aggregateOptions: {collation: {locale: "en_US"}},
+ collection: caseInsensitiveCollection
+});
+
+assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "aBc"}));
+assert.writeOK(caseInsensitiveCollection.insert({_id: 1, text: "abc"}));
+
+cst.assertNextChangesEqual({cursor: englishCaseSensitiveStream, expectedChanges: [{docId: 1}]});
+}());
+
+// Test that collation is supported by the shell helper. Test that creating a change stream with
+// a non-default collation against a collection that has a simple default collation will use the
+// collation specified on the operation.
+(function() {
+const noCollationCollection = assertDropAndRecreateCollection(db, "change_stream_no_collation");
+
+const cursor = noCollationCollection.watch(
+ [{$match: {"fullDocument.text": "abc"}}, {$project: {docId: "$documentKey._id"}}],
+ {collation: caseInsensitive});
+assert(!cursor.hasNext());
+assert.writeOK(noCollationCollection.insert({_id: 0, text: "aBc"}));
+assert.writeOK(noCollationCollection.insert({_id: 1, text: "abc"}));
+assert.soon(() => cursor.hasNext());
+assertChangeStreamEventEq(cursor.next(), {docId: 0});
+assert.soon(() => cursor.hasNext());
+assertChangeStreamEventEq(cursor.next(), {docId: 1});
+assert(!cursor.hasNext());
+}());
+
+// Test that we can resume a change stream on a collection that has been dropped without
+// requiring the user to explicitly specify the collation.
+(function() {
+const collName = "change_stream_case_insensitive";
+let caseInsensitiveCollection =
+ assertDropAndRecreateCollection(db, collName, {collation: caseInsensitive});
+
+let changeStream = caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "abc"}}],
+ {collation: caseInsensitive});
+
+assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "abc"}));
+
+assert.soon(() => changeStream.hasNext());
+const next = changeStream.next();
+assert.docEq(next.documentKey, {_id: 0});
+const resumeToken = next._id;
+
+// Insert a second document to see after resuming.
+assert.writeOK(caseInsensitiveCollection.insert({_id: "dropped_coll", text: "ABC"}));
+
+// Drop the collection to invalidate the stream.
+assertDropCollection(db, collName);
+
+// Test that a $changeStream is allowed to resume on the dropped collection with an explicit
+// collation, even if it doesn't match the original collection's default collation.
+changeStream =
+ caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "ABC"}}],
+ {resumeAfter: resumeToken, collation: {locale: "simple"}});
+
+assert.soon(() => changeStream.hasNext());
+assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
+
+// Test that a pipeline without an explicit collation is allowed to resume the change stream
+// after the collection has been dropped, and it will use the simple collation. Do not
+// modify this in the passthrough suite(s) since only individual collections have the
+// concept of a default collation.
+const doNotModifyInPassthroughs = true;
+const cmdRes = assert.commandWorked(runCommandChangeStreamPassthroughAware(
+ db,
+ {
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
+ cursor: {},
+ },
+ doNotModifyInPassthroughs));
+
+changeStream = new DBCommandCursor(db, cmdRes);
+assert.soon(() => changeStream.hasNext());
+assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
+}());
+
+// Test that the default collation of a new version of the collection is not applied when
+// resuming a change stream from before a collection drop.
+(function() {
+const collName = "change_stream_case_insensitive";
+let caseInsensitiveCollection =
+ assertDropAndRecreateCollection(db, collName, {collation: caseInsensitive});
+
+let changeStream = caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "abc"}}],
+ {collation: caseInsensitive});
+
+assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "abc"}));
+
+assert.soon(() => changeStream.hasNext());
+const next = changeStream.next();
+assert.docEq(next.documentKey, {_id: 0});
+const resumeToken = next._id;
+
+// Insert a second document to see after resuming.
+assert.writeOK(caseInsensitiveCollection.insert({_id: "dropped_coll", text: "ABC"}));
+
+// Recreate the collection with a different collation.
+caseInsensitiveCollection = assertDropAndRecreateCollection(
+ db, caseInsensitiveCollection.getName(), {collation: {locale: "simple"}});
+assert.writeOK(caseInsensitiveCollection.insert({_id: "new collection", text: "abc"}));
+
+// Verify that the stream sees the insert before the drop and then is exhausted. We won't
+// see the invalidate because the pipeline has a $match stage after the $changeStream.
+assert.soon(() => changeStream.hasNext());
+assert.docEq(changeStream.next().fullDocument, {_id: "dropped_coll", text: "ABC"});
+// Only single-collection streams will be exhausted from the drop. Use 'next()' instead of
+// 'isExhausted()' to force a getMore since the previous getMore may not include the
+// collection drop, which is more likely with sharded collections on slow machines.
+if (!isChangeStreamPassthrough()) {
+ assert.throws(() => changeStream.next());
+}
+
+// Test that a pipeline with an explicit collation is allowed to resume from before the
+// collection is dropped and recreated.
+changeStream =
+ caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "ABC"}}],
+ {resumeAfter: resumeToken, collation: {locale: "fr"}});
+
+assert.soon(() => changeStream.hasNext());
+assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
+// Only single-collection streams will be exhausted from the drop. Use 'next()' instead of
+// 'isExhausted()' to force a getMore since the previous getMore may not include the
+// collection drop, which is more likely with sharded collections on slow machines.
+if (!isChangeStreamPassthrough()) {
+ assert.throws(() => changeStream.next());
+}
+
+// Test that a pipeline without an explicit collation is allowed to resume, even though the
+// collection has been recreated with the same default collation as it had previously. Do
+// not modify this command in the passthrough suite(s) since only individual collections
+// have the concept of a default collation.
+const doNotModifyInPassthroughs = true;
+const cmdRes = assert.commandWorked(runCommandChangeStreamPassthroughAware(
+ db,
+ {aggregate: collName, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}},
+ doNotModifyInPassthroughs));
+
+changeStream = new DBCommandCursor(db, cmdRes);
+assert.soon(() => changeStream.hasNext());
+assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
+}());
})();
diff --git a/jstests/change_streams/does_not_implicitly_create_database.js b/jstests/change_streams/does_not_implicitly_create_database.js
index 052a53585bd..b6ffe0c83a4 100644
--- a/jstests/change_streams/does_not_implicitly_create_database.js
+++ b/jstests/change_streams/does_not_implicitly_create_database.js
@@ -4,78 +4,74 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/change_stream_util.js"); // For 'ChangeStreamTest'.
+load("jstests/libs/change_stream_util.js"); // For 'ChangeStreamTest'.
- // Ensure that the test DB does not exist.
- const testDB = db.getSiblingDB(jsTestName());
- assert.commandWorked(testDB.dropDatabase());
+// Ensure that the test DB does not exist.
+const testDB = db.getSiblingDB(jsTestName());
+assert.commandWorked(testDB.dropDatabase());
- let dbList = assert.commandWorked(
- db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: testDB.getName()}}));
- assert.docEq(dbList.databases, []);
+let dbList = assert.commandWorked(
+ db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: testDB.getName()}}));
+assert.docEq(dbList.databases, []);
- const collName = "test";
+const collName = "test";
- // Start a new $changeStream on the non-existent db.
- const cst = new ChangeStreamTest(testDB);
- const changeStreamCursor =
- cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: collName});
+// Start a new $changeStream on the non-existent db.
+const cst = new ChangeStreamTest(testDB);
+const changeStreamCursor =
+ cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: collName});
- // Confirm that a $changeStream cursor has been opened on the namespace.
- assert.gt(changeStreamCursor.id, 0);
+// Confirm that a $changeStream cursor has been opened on the namespace.
+assert.gt(changeStreamCursor.id, 0);
- // Confirm that the database has not been implicitly created.
- dbList = assert.commandWorked(
- db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: testDB.getName()}}));
- assert.docEq(dbList.databases, []);
+// Confirm that the database has not been implicitly created.
+dbList = assert.commandWorked(
+ db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: testDB.getName()}}));
+assert.docEq(dbList.databases, []);
- // Confirm that a non-$changeStream aggregation on the non-existent database returns an empty
- // cursor.
- const nonCsCmdRes = assert.commandWorked(
- testDB.runCommand({aggregate: collName, pipeline: [{$match: {}}], cursor: {}}));
- assert.docEq(nonCsCmdRes.cursor.firstBatch, []);
- assert.eq(nonCsCmdRes.cursor.id, 0);
+// Confirm that a non-$changeStream aggregation on the non-existent database returns an empty
+// cursor.
+const nonCsCmdRes = assert.commandWorked(
+ testDB.runCommand({aggregate: collName, pipeline: [{$match: {}}], cursor: {}}));
+assert.docEq(nonCsCmdRes.cursor.firstBatch, []);
+assert.eq(nonCsCmdRes.cursor.id, 0);
- // Now perform some writes into the collection...
- assert.commandWorked(testDB[collName].insert({_id: 1}));
- assert.commandWorked(testDB[collName].insert({_id: 2}));
- assert.commandWorked(testDB[collName].update({_id: 1}, {$set: {updated: true}}));
- assert.commandWorked(testDB[collName].remove({_id: 2}));
+// Now perform some writes into the collection...
+assert.commandWorked(testDB[collName].insert({_id: 1}));
+assert.commandWorked(testDB[collName].insert({_id: 2}));
+assert.commandWorked(testDB[collName].update({_id: 1}, {$set: {updated: true}}));
+assert.commandWorked(testDB[collName].remove({_id: 2}));
- // ... confirm that the database has been created...
- dbList = assert.commandWorked(
- db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: testDB.getName()}}));
- assert.docEq(dbList.databases, [{name: testDB.getName()}]);
+// ... confirm that the database has been created...
+dbList = assert.commandWorked(
+ db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: testDB.getName()}}));
+assert.docEq(dbList.databases, [{name: testDB.getName()}]);
- // ... and verify that the changes are observed by the stream.
- const expectedChanges = [
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1},
- ns: {db: testDB.getName(), coll: collName},
- operationType: "insert"
- },
- {
- documentKey: {_id: 2},
- fullDocument: {_id: 2},
- ns: {db: testDB.getName(), coll: collName},
- operationType: "insert"
- },
- {
- documentKey: {_id: 1},
- ns: {db: testDB.getName(), coll: collName},
- updateDescription: {removedFields: [], updatedFields: {updated: true}},
- operationType: "update"
- },
- {
- documentKey: {_id: 2},
- ns: {db: testDB.getName(), coll: collName},
- operationType: "delete"
- },
- ];
+// ... and verify that the changes are observed by the stream.
+const expectedChanges = [
+ {
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1},
+ ns: {db: testDB.getName(), coll: collName},
+ operationType: "insert"
+ },
+ {
+ documentKey: {_id: 2},
+ fullDocument: {_id: 2},
+ ns: {db: testDB.getName(), coll: collName},
+ operationType: "insert"
+ },
+ {
+ documentKey: {_id: 1},
+ ns: {db: testDB.getName(), coll: collName},
+ updateDescription: {removedFields: [], updatedFields: {updated: true}},
+ operationType: "update"
+ },
+ {documentKey: {_id: 2}, ns: {db: testDB.getName(), coll: collName}, operationType: "delete"},
+];
- cst.assertNextChangesEqual({cursor: changeStreamCursor, expectedChanges: expectedChanges});
- cst.cleanUp();
+cst.assertNextChangesEqual({cursor: changeStreamCursor, expectedChanges: expectedChanges});
+cst.cleanUp();
})(); \ No newline at end of file
diff --git a/jstests/change_streams/error_label.js b/jstests/change_streams/error_label.js
index 1c9a00db356..93a8e569d25 100644
--- a/jstests/change_streams/error_label.js
+++ b/jstests/change_streams/error_label.js
@@ -4,30 +4,30 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
+load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
- // Drop and recreate the collections to be used in this set of tests.
- const coll = assertDropAndRecreateCollection(db, "change_stream_error_label");
+// Drop and recreate the collections to be used in this set of tests.
+const coll = assertDropAndRecreateCollection(db, "change_stream_error_label");
- // Attaching a projection to the Change Stream that filters out the resume token (stored in the
- // _id field) guarantees a ChangeStreamFatalError as soon as we get the first change.
- const changeStream = coll.watch([{$project: {_id: 0}}], {batchSize: 1});
- assert.commandWorked(coll.insert({a: 1}));
+// Attaching a projection to the Change Stream that filters out the resume token (stored in the
+// _id field) guarantees a ChangeStreamFatalError as soon as we get the first change.
+const changeStream = coll.watch([{$project: {_id: 0}}], {batchSize: 1});
+assert.commandWorked(coll.insert({a: 1}));
- const err = assert.throws(function() {
- // Call hasNext() until it throws an error or unexpectedly returns true. We need the
- // assert.soon() to keep trying here, because the above insert command isn't immediately
- // observable to the change stream in sharded configurations.
- assert.soon(function() {
- return changeStream.hasNext();
- });
+const err = assert.throws(function() {
+ // Call hasNext() until it throws an error or unexpectedly returns true. We need the
+ // assert.soon() to keep trying here, because the above insert command isn't immediately
+ // observable to the change stream in sharded configurations.
+ assert.soon(function() {
+ return changeStream.hasNext();
});
+});
- // The hasNext() sends a getMore command, which should generate a ChangeStreamFatalError reply
- // that includes the NonResumableChangeStreamError errorLabel.
- assert.commandFailedWithCode(err, ErrorCodes.ChangeStreamFatalError);
- assert("errorLabels" in err, err);
- assert.contains("NonResumableChangeStreamError", err.errorLabels, err);
+// The hasNext() sends a getMore command, which should generate a ChangeStreamFatalError reply
+// that includes the NonResumableChangeStreamError errorLabel.
+assert.commandFailedWithCode(err, ErrorCodes.ChangeStreamFatalError);
+assert("errorLabels" in err, err);
+assert.contains("NonResumableChangeStreamError", err.errorLabels, err);
}()); \ No newline at end of file
diff --git a/jstests/change_streams/include_cluster_time.js b/jstests/change_streams/include_cluster_time.js
index d035a92f517..dcefe40d062 100644
--- a/jstests/change_streams/include_cluster_time.js
+++ b/jstests/change_streams/include_cluster_time.js
@@ -5,58 +5,56 @@
// based on the commit oplog entry, which would cause this test to fail.
// @tags: [change_stream_does_not_expect_txns]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/change_stream_util.js"); // For assertInvalidateOp.
- load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
+load("jstests/libs/change_stream_util.js"); // For assertInvalidateOp.
+load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
- // Drop and recreate the collections to be used in this set of tests.
- const coll = assertDropAndRecreateCollection(db, "include_cluster_time");
+// Drop and recreate the collections to be used in this set of tests.
+const coll = assertDropAndRecreateCollection(db, "include_cluster_time");
- const changeStream = coll.watch();
+const changeStream = coll.watch();
- const insertClusterTime =
- assert.commandWorked(coll.runCommand("insert", {documents: [{_id: 0}]})).operationTime;
+const insertClusterTime =
+ assert.commandWorked(coll.runCommand("insert", {documents: [{_id: 0}]})).operationTime;
- const updateClusterTime =
- assert
- .commandWorked(
- coll.runCommand("update", {updates: [{q: {_id: 0}, u: {$set: {updated: true}}}]}))
- .operationTime;
+const updateClusterTime = assert
+ .commandWorked(coll.runCommand(
+ "update", {updates: [{q: {_id: 0}, u: {$set: {updated: true}}}]}))
+ .operationTime;
- const deleteClusterTime =
- assert.commandWorked(coll.runCommand("delete", {deletes: [{q: {_id: 0}, limit: 1}]}))
- .operationTime;
+const deleteClusterTime =
+ assert.commandWorked(coll.runCommand("delete", {deletes: [{q: {_id: 0}, limit: 1}]}))
+ .operationTime;
- const dropClusterTime =
- assert.commandWorked(db.runCommand({drop: coll.getName()})).operationTime;
+const dropClusterTime = assert.commandWorked(db.runCommand({drop: coll.getName()})).operationTime;
- // Make sure each operation has a reasonable cluster time. Note that we should not assert
- // that the cluster times are equal, because the cluster time returned from the command is
- // generated by a second, independent read of the logical clock than the one used to
- // generate the oplog entry. It's possible that the system did something to advance the time
- // between the two reads of the clock.
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.lte(next.clusterTime, insertClusterTime);
+// Make sure each operation has a reasonable cluster time. Note that we should not assert
+// that the cluster times are equal, because the cluster time returned from the command is
+// generated by a second, independent read of the logical clock than the one used to
+// generate the oplog entry. It's possible that the system did something to advance the time
+// between the two reads of the clock.
+assert.soon(() => changeStream.hasNext());
+let next = changeStream.next();
+assert.eq(next.operationType, "insert");
+assert.lte(next.clusterTime, insertClusterTime);
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.lte(next.clusterTime, updateClusterTime);
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "update");
+assert.lte(next.clusterTime, updateClusterTime);
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "delete");
- assert.lte(next.clusterTime, deleteClusterTime);
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "delete");
+assert.lte(next.clusterTime, deleteClusterTime);
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "drop");
- assert.lte(next.clusterTime, dropClusterTime);
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "drop");
+assert.lte(next.clusterTime, dropClusterTime);
- assertInvalidateOp({cursor: changeStream, opType: "drop"});
+assertInvalidateOp({cursor: changeStream, opType: "drop"});
- changeStream.close();
+changeStream.close();
}());
diff --git a/jstests/change_streams/lookup_post_image.js b/jstests/change_streams/lookup_post_image.js
index be267f5feea..fa2658ed6f8 100644
--- a/jstests/change_streams/lookup_post_image.js
+++ b/jstests/change_streams/lookup_post_image.js
@@ -6,244 +6,241 @@
// uses_multiple_connections,
// ]
(function() {
- "use strict";
-
- load("jstests/libs/change_stream_util.js");
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- load("jstests/replsets/libs/two_phase_drops.js"); // For 'TwoPhaseDropCollectionTest'.
-
- const coll = assertDropAndRecreateCollection(db, "change_post_image");
- const cst = new ChangeStreamTest(db);
-
- jsTestLog("Testing change streams without 'fullDocument' specified");
- // Test that not specifying 'fullDocument' does include a 'fullDocument' in the result for
- // an insert.
- let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: coll});
- assert.writeOK(coll.insert({_id: "fullDocument not specified"}));
- let latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "insert");
- assert.eq(latestChange.fullDocument, {_id: "fullDocument not specified"});
-
- // Test that not specifying 'fullDocument' does include a 'fullDocument' in the result for a
- // replacement-style update.
- assert.writeOK(coll.update({_id: "fullDocument not specified"},
- {_id: "fullDocument not specified", replaced: true}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "replace");
- assert.eq(latestChange.fullDocument, {_id: "fullDocument not specified", replaced: true});
-
- // Test that not specifying 'fullDocument' does not include a 'fullDocument' in the result
- // for a non-replacement update.
- assert.writeOK(coll.update({_id: "fullDocument not specified"}, {$set: {updated: true}}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "update");
- assert(!latestChange.hasOwnProperty("fullDocument"));
-
- jsTestLog("Testing change streams with 'fullDocument' specified as 'default'");
-
- // Test that specifying 'fullDocument' as 'default' does include a 'fullDocument' in the
- // result for an insert.
- cursor = cst.startWatchingChanges(
- {collection: coll, pipeline: [{$changeStream: {fullDocument: "default"}}]});
- assert.writeOK(coll.insert({_id: "fullDocument is default"}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "insert");
- assert.eq(latestChange.fullDocument, {_id: "fullDocument is default"});
-
- // Test that specifying 'fullDocument' as 'default' does include a 'fullDocument' in the
- // result for a replacement-style update.
- assert.writeOK(coll.update({_id: "fullDocument is default"},
- {_id: "fullDocument is default", replaced: true}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "replace");
- assert.eq(latestChange.fullDocument, {_id: "fullDocument is default", replaced: true});
-
- // Test that specifying 'fullDocument' as 'default' does not include a 'fullDocument' in the
- // result for a non-replacement update.
- assert.writeOK(coll.update({_id: "fullDocument is default"}, {$set: {updated: true}}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "update");
- assert(!latestChange.hasOwnProperty("fullDocument"));
-
- jsTestLog("Testing change streams with 'fullDocument' specified as 'updateLookup'");
-
- // Test that specifying 'fullDocument' as 'updateLookup' does include a 'fullDocument' in
- // the result for an insert.
- cursor = cst.startWatchingChanges(
- {collection: coll, pipeline: [{$changeStream: {fullDocument: "updateLookup"}}]});
- assert.writeOK(coll.insert({_id: "fullDocument is lookup"}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "insert");
- assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup"});
-
- // Test that specifying 'fullDocument' as 'updateLookup' does include a 'fullDocument' in
- // the result for a replacement-style update.
- assert.writeOK(coll.update({_id: "fullDocument is lookup"},
- {_id: "fullDocument is lookup", replaced: true}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "replace");
- assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup", replaced: true});
-
- // Test that specifying 'fullDocument' as 'updateLookup' does include a 'fullDocument' in
- // the result for a non-replacement update.
- assert.writeOK(coll.update({_id: "fullDocument is lookup"}, {$set: {updated: true}}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "update");
- assert.eq(latestChange.fullDocument,
- {_id: "fullDocument is lookup", replaced: true, updated: true});
-
- // Test that looking up the post image of an update after deleting the document will result
- // in a 'fullDocument' with a value of null.
- cursor = cst.startWatchingChanges({
- collection: coll,
- pipeline: [
- {$changeStream: {fullDocument: "updateLookup"}},
- {$match: {operationType: "update"}}
- ]
- });
- assert.writeOK(coll.update({_id: "fullDocument is lookup"}, {$set: {updatedAgain: true}}));
- assert.writeOK(coll.remove({_id: "fullDocument is lookup"}));
- // If this test is running with secondary read preference, it's necessary for the remove
- // to propagate to all secondary nodes and be available for majority reads before we can
- // assume looking up the document will fail.
- FixtureHelpers.awaitLastOpCommitted(db);
-
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "update");
- assert(latestChange.hasOwnProperty("fullDocument"));
- assert.eq(latestChange.fullDocument, null);
- const deleteDocResumePoint = latestChange._id;
-
- // Test that looking up the post image of an update after the collection has been dropped
- // will result in 'fullDocument' with a value of null. This must be done using getMore
- // because new cursors cannot be established after a collection drop.
- assert.writeOK(coll.insert({_id: "fullDocument is lookup 2"}));
- assert.writeOK(coll.update({_id: "fullDocument is lookup 2"}, {$set: {updated: true}}));
-
- // Open a $changeStream cursor with batchSize 0, so that no oplog entries are retrieved yet.
- cursor = cst.startWatchingChanges({
- collection: coll,
- pipeline: [
- {$changeStream: {fullDocument: "updateLookup", resumeAfter: deleteDocResumePoint}},
- {$match: {operationType: {$ne: "delete"}}}
- ],
- aggregateOptions: {cursor: {batchSize: 0}}
- });
-
- // Save another stream to test post-image lookup after the collection is recreated.
- const cursorBeforeDrop = cst.startWatchingChanges({
- collection: coll,
- pipeline: [
- {$changeStream: {fullDocument: "updateLookup", resumeAfter: deleteDocResumePoint}},
- {$match: {operationType: {$ne: "delete"}}}
- ],
- aggregateOptions: {cursor: {batchSize: 0}}
- });
-
- // Retrieve the 'insert' operation from the latter stream. This is necessary on a sharded
- // collection so that the documentKey is retrieved before the collection is recreated;
- // otherwise, per SERVER-31691, a uassert will occur.
- latestChange = cst.getOneChange(cursorBeforeDrop);
- assert.eq(latestChange.operationType, "insert");
- assert(latestChange.hasOwnProperty("fullDocument"));
- assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup 2"});
-
- // Drop the collection and wait until two-phase drop finishes.
- assertDropCollection(db, coll.getName());
- assert.soon(function() {
- return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(db, coll.getName());
- });
- // If this test is running with secondary read preference, it's necessary for the drop
- // to propagate to all secondary nodes and be available for majority reads before we can
- // assume looking up the document will fail.
- FixtureHelpers.awaitLastOpCommitted(db);
-
- // Check the next $changeStream entry; this is the test document inserted above.
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "insert");
- assert(latestChange.hasOwnProperty("fullDocument"));
- assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup 2"});
-
- // The next entry is the 'update' operation. Because the collection has been dropped, our
- // attempt to look up the post-image results in a null document.
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "update");
- assert(latestChange.hasOwnProperty("fullDocument"));
- assert.eq(latestChange.fullDocument, null);
-
- // Test that we can resume a change stream with 'fullDocument: updateLookup' after the
- // collection has been dropped. This is only allowed if an explicit collation is provided.
- cursor = cst.startWatchingChanges({
- collection: coll,
- pipeline: [
- {$changeStream: {resumeAfter: deleteDocResumePoint, fullDocument: "updateLookup"}},
- {$match: {operationType: {$ne: "delete"}}}
- ],
- aggregateOptions: {cursor: {batchSize: 0}, collation: {locale: "simple"}}
- });
-
- // Check the next $changeStream entry; this is the test document inserted above.
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "insert");
- assert(latestChange.hasOwnProperty("fullDocument"));
- assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup 2"});
-
- // The next entry is the 'update' operation. Because the collection has been dropped, our
- // attempt to look up the post-image results in a null document.
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "update");
- assert(latestChange.hasOwnProperty("fullDocument"));
- assert.eq(latestChange.fullDocument, null);
-
- // Test that looking up the post image of an update after the collection has been dropped
- // and created again will result in 'fullDocument' with a value of null. This must be done
- // using getMore because new cursors cannot be established after a collection drop.
-
- // Insert a document with the same _id, verify the change stream won't return it due to
- // different UUID.
- assertCreateCollection(db, coll.getName());
- assert.writeOK(coll.insert({_id: "fullDocument is lookup 2"}));
-
- // Confirm that the next entry's post-image is null since new collection has a different
- // UUID.
- latestChange = cst.getOneChange(cursorBeforeDrop);
- assert.eq(latestChange.operationType, "update");
- assert(latestChange.hasOwnProperty("fullDocument"));
- assert.eq(latestChange.fullDocument, null);
-
- jsTestLog("Testing full document lookup with a real getMore");
- assert.writeOK(coll.insert({_id: "getMoreEnabled"}));
-
- cursor = cst.startWatchingChanges({
- collection: coll,
- pipeline: [{$changeStream: {fullDocument: "updateLookup"}}],
- });
- assert.writeOK(coll.update({_id: "getMoreEnabled"}, {$set: {updated: true}}));
-
- const doc = cst.getOneChange(cursor);
- assert.docEq(doc["fullDocument"], {_id: "getMoreEnabled", updated: true});
-
- // Test that invalidate entries don't have 'fullDocument' even if 'updateLookup' is
- // specified.
- cursor = cst.startWatchingChanges({
- collection: coll,
- pipeline: [{$changeStream: {fullDocument: "updateLookup"}}],
- aggregateOptions: {cursor: {batchSize: 0}}
- });
- assert.writeOK(coll.insert({_id: "testing invalidate"}));
- assertDropCollection(db, coll.getName());
- // Wait until two-phase drop finishes.
- assert.soon(function() {
- return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(db, coll.getName());
- });
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "insert");
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "drop");
- // Only single-collection change streams will be invalidated by the drop.
- if (!isChangeStreamPassthrough()) {
- latestChange = cst.getOneChange(cursor, true);
- assert.eq(latestChange.operationType, "invalidate");
- }
+"use strict";
+
+load("jstests/libs/change_stream_util.js");
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/replsets/libs/two_phase_drops.js"); // For 'TwoPhaseDropCollectionTest'.
+
+const coll = assertDropAndRecreateCollection(db, "change_post_image");
+const cst = new ChangeStreamTest(db);
+
+jsTestLog("Testing change streams without 'fullDocument' specified");
+// Test that not specifying 'fullDocument' does include a 'fullDocument' in the result for
+// an insert.
+let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: coll});
+assert.writeOK(coll.insert({_id: "fullDocument not specified"}));
+let latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "insert");
+assert.eq(latestChange.fullDocument, {_id: "fullDocument not specified"});
+
+// Test that not specifying 'fullDocument' does include a 'fullDocument' in the result for a
+// replacement-style update.
+assert.writeOK(coll.update({_id: "fullDocument not specified"},
+ {_id: "fullDocument not specified", replaced: true}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "replace");
+assert.eq(latestChange.fullDocument, {_id: "fullDocument not specified", replaced: true});
+
+// Test that not specifying 'fullDocument' does not include a 'fullDocument' in the result
+// for a non-replacement update.
+assert.writeOK(coll.update({_id: "fullDocument not specified"}, {$set: {updated: true}}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "update");
+assert(!latestChange.hasOwnProperty("fullDocument"));
+
+jsTestLog("Testing change streams with 'fullDocument' specified as 'default'");
+
+// Test that specifying 'fullDocument' as 'default' does include a 'fullDocument' in the
+// result for an insert.
+cursor = cst.startWatchingChanges(
+ {collection: coll, pipeline: [{$changeStream: {fullDocument: "default"}}]});
+assert.writeOK(coll.insert({_id: "fullDocument is default"}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "insert");
+assert.eq(latestChange.fullDocument, {_id: "fullDocument is default"});
+
+// Test that specifying 'fullDocument' as 'default' does include a 'fullDocument' in the
+// result for a replacement-style update.
+assert.writeOK(coll.update({_id: "fullDocument is default"},
+ {_id: "fullDocument is default", replaced: true}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "replace");
+assert.eq(latestChange.fullDocument, {_id: "fullDocument is default", replaced: true});
+
+// Test that specifying 'fullDocument' as 'default' does not include a 'fullDocument' in the
+// result for a non-replacement update.
+assert.writeOK(coll.update({_id: "fullDocument is default"}, {$set: {updated: true}}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "update");
+assert(!latestChange.hasOwnProperty("fullDocument"));
+
+jsTestLog("Testing change streams with 'fullDocument' specified as 'updateLookup'");
+
+// Test that specifying 'fullDocument' as 'updateLookup' does include a 'fullDocument' in
+// the result for an insert.
+cursor = cst.startWatchingChanges(
+ {collection: coll, pipeline: [{$changeStream: {fullDocument: "updateLookup"}}]});
+assert.writeOK(coll.insert({_id: "fullDocument is lookup"}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "insert");
+assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup"});
+
+// Test that specifying 'fullDocument' as 'updateLookup' does include a 'fullDocument' in
+// the result for a replacement-style update.
+assert.writeOK(
+ coll.update({_id: "fullDocument is lookup"}, {_id: "fullDocument is lookup", replaced: true}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "replace");
+assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup", replaced: true});
+
+// Test that specifying 'fullDocument' as 'updateLookup' does include a 'fullDocument' in
+// the result for a non-replacement update.
+assert.writeOK(coll.update({_id: "fullDocument is lookup"}, {$set: {updated: true}}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "update");
+assert.eq(latestChange.fullDocument,
+ {_id: "fullDocument is lookup", replaced: true, updated: true});
+
+// Test that looking up the post image of an update after deleting the document will result
+// in a 'fullDocument' with a value of null.
+cursor = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [{$changeStream: {fullDocument: "updateLookup"}}, {$match: {operationType: "update"}}]
+});
+assert.writeOK(coll.update({_id: "fullDocument is lookup"}, {$set: {updatedAgain: true}}));
+assert.writeOK(coll.remove({_id: "fullDocument is lookup"}));
+// If this test is running with secondary read preference, it's necessary for the remove
+// to propagate to all secondary nodes and be available for majority reads before we can
+// assume looking up the document will fail.
+FixtureHelpers.awaitLastOpCommitted(db);
+
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "update");
+assert(latestChange.hasOwnProperty("fullDocument"));
+assert.eq(latestChange.fullDocument, null);
+const deleteDocResumePoint = latestChange._id;
+
+// Test that looking up the post image of an update after the collection has been dropped
+// will result in 'fullDocument' with a value of null. This must be done using getMore
+// because new cursors cannot be established after a collection drop.
+assert.writeOK(coll.insert({_id: "fullDocument is lookup 2"}));
+assert.writeOK(coll.update({_id: "fullDocument is lookup 2"}, {$set: {updated: true}}));
+
+// Open a $changeStream cursor with batchSize 0, so that no oplog entries are retrieved yet.
+cursor = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [
+ {$changeStream: {fullDocument: "updateLookup", resumeAfter: deleteDocResumePoint}},
+ {$match: {operationType: {$ne: "delete"}}}
+ ],
+ aggregateOptions: {cursor: {batchSize: 0}}
+});
+
+// Save another stream to test post-image lookup after the collection is recreated.
+const cursorBeforeDrop = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [
+ {$changeStream: {fullDocument: "updateLookup", resumeAfter: deleteDocResumePoint}},
+ {$match: {operationType: {$ne: "delete"}}}
+ ],
+ aggregateOptions: {cursor: {batchSize: 0}}
+});
+
+// Retrieve the 'insert' operation from the latter stream. This is necessary on a sharded
+// collection so that the documentKey is retrieved before the collection is recreated;
+// otherwise, per SERVER-31691, a uassert will occur.
+latestChange = cst.getOneChange(cursorBeforeDrop);
+assert.eq(latestChange.operationType, "insert");
+assert(latestChange.hasOwnProperty("fullDocument"));
+assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup 2"});
+
+// Drop the collection and wait until two-phase drop finishes.
+assertDropCollection(db, coll.getName());
+assert.soon(function() {
+ return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(db, coll.getName());
+});
+// If this test is running with secondary read preference, it's necessary for the drop
+// to propagate to all secondary nodes and be available for majority reads before we can
+// assume looking up the document will fail.
+FixtureHelpers.awaitLastOpCommitted(db);
+
+// Check the next $changeStream entry; this is the test document inserted above.
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "insert");
+assert(latestChange.hasOwnProperty("fullDocument"));
+assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup 2"});
+
+// The next entry is the 'update' operation. Because the collection has been dropped, our
+// attempt to look up the post-image results in a null document.
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "update");
+assert(latestChange.hasOwnProperty("fullDocument"));
+assert.eq(latestChange.fullDocument, null);
+
+// Test that we can resume a change stream with 'fullDocument: updateLookup' after the
+// collection has been dropped. This is only allowed if an explicit collation is provided.
+cursor = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [
+ {$changeStream: {resumeAfter: deleteDocResumePoint, fullDocument: "updateLookup"}},
+ {$match: {operationType: {$ne: "delete"}}}
+ ],
+ aggregateOptions: {cursor: {batchSize: 0}, collation: {locale: "simple"}}
+});
+
+// Check the next $changeStream entry; this is the test document inserted above.
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "insert");
+assert(latestChange.hasOwnProperty("fullDocument"));
+assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup 2"});
+
+// The next entry is the 'update' operation. Because the collection has been dropped, our
+// attempt to look up the post-image results in a null document.
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "update");
+assert(latestChange.hasOwnProperty("fullDocument"));
+assert.eq(latestChange.fullDocument, null);
+
+// Test that looking up the post image of an update after the collection has been dropped
+// and created again will result in 'fullDocument' with a value of null. This must be done
+// using getMore because new cursors cannot be established after a collection drop.
+
+// Insert a document with the same _id, verify the change stream won't return it due to
+// different UUID.
+assertCreateCollection(db, coll.getName());
+assert.writeOK(coll.insert({_id: "fullDocument is lookup 2"}));
+
+// Confirm that the next entry's post-image is null since new collection has a different
+// UUID.
+latestChange = cst.getOneChange(cursorBeforeDrop);
+assert.eq(latestChange.operationType, "update");
+assert(latestChange.hasOwnProperty("fullDocument"));
+assert.eq(latestChange.fullDocument, null);
+
+jsTestLog("Testing full document lookup with a real getMore");
+assert.writeOK(coll.insert({_id: "getMoreEnabled"}));
+
+cursor = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [{$changeStream: {fullDocument: "updateLookup"}}],
+});
+assert.writeOK(coll.update({_id: "getMoreEnabled"}, {$set: {updated: true}}));
+
+const doc = cst.getOneChange(cursor);
+assert.docEq(doc["fullDocument"], {_id: "getMoreEnabled", updated: true});
+
+// Test that invalidate entries don't have 'fullDocument' even if 'updateLookup' is
+// specified.
+cursor = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [{$changeStream: {fullDocument: "updateLookup"}}],
+ aggregateOptions: {cursor: {batchSize: 0}}
+});
+assert.writeOK(coll.insert({_id: "testing invalidate"}));
+assertDropCollection(db, coll.getName());
+// Wait until two-phase drop finishes.
+assert.soon(function() {
+ return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(db, coll.getName());
+});
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "insert");
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "drop");
+// Only single-collection change streams will be invalidated by the drop.
+if (!isChangeStreamPassthrough()) {
+ latestChange = cst.getOneChange(cursor, true);
+ assert.eq(latestChange.operationType, "invalidate");
+}
}());
diff --git a/jstests/change_streams/metadata_notifications.js b/jstests/change_streams/metadata_notifications.js
index 4d1f29abf2a..8b3aae094fe 100644
--- a/jstests/change_streams/metadata_notifications.js
+++ b/jstests/change_streams/metadata_notifications.js
@@ -3,121 +3,198 @@
// invalidated by a database drop.
// @tags: [do_not_run_in_whole_cluster_passthrough]
(function() {
- "use strict";
-
- load("jstests/libs/change_stream_util.js");
- load('jstests/replsets/libs/two_phase_drops.js'); // For 'TwoPhaseDropCollectionTest'.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/fixture_helpers.js"); // For isSharded.
-
- db = db.getSiblingDB(jsTestName());
- let cst = new ChangeStreamTest(db);
-
- db.getMongo().forceReadMode('commands');
-
- // Test that it is possible to open a new change stream cursor on a collection that does not
- // exist.
- const collName = "test";
- assertDropCollection(db, collName);
-
- // Asserts that resuming a change stream with 'spec' and an explicit simple collation returns
- // the results specified by 'expected'.
- function assertResumeExpected({coll, spec, expected}) {
- const cursor = cst.startWatchingChanges({
- collection: coll,
- pipeline: [{$changeStream: spec}],
- aggregateOptions: {collation: {locale: "simple"}}
- });
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
- }
-
- // Cursor creation succeeds, but there are no results. We do not expect to see a notification
- // for collection creation.
- let cursor = cst.startWatchingChanges(
- {collection: collName, pipeline: [{$changeStream: {}}, {$project: {operationType: 1}}]});
-
- // We explicitly test getMore, to ensure that the getMore command for a non-existent collection
- // does not return an error.
- let change = cst.getNextBatch(cursor);
- assert.neq(change.id, 0);
- assert.eq(change.nextBatch.length, 0, tojson(change.nextBatch));
-
- // Dropping the empty database should not generate any notification for the change stream, since
- // the collection does not exist yet.
- assert.commandWorked(db.dropDatabase());
- change = cst.getNextBatch(cursor);
- assert.neq(change.id, 0);
- assert.eq(change.nextBatch.length, 0, tojson(change.nextBatch));
-
- // After collection creation, we expect to see oplog entries for each subsequent operation.
- let coll = assertCreateCollection(db, collName);
- assert.writeOK(coll.insert({_id: 0}));
-
- // Determine the number of shards that the collection is distributed across.
- const numShards = FixtureHelpers.numberOfShardsForCollection(coll);
-
- change = cst.getOneChange(cursor);
- assert.eq(change.operationType, "insert", tojson(change));
-
- // Create oplog entries of type insert, update, delete, and drop.
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
- assert.writeOK(coll.remove({_id: 1}));
- assertDropCollection(db, coll.getName());
-
- // We should get oplog entries of type insert, update, delete, drop, and invalidate. The cursor
- // should be closed.
- let expectedChanges = [
- {operationType: "insert"},
- {operationType: "update"},
- {operationType: "delete"},
- {operationType: "drop"},
- {operationType: "invalidate"},
- ];
- let changes = cst.assertNextChangesEqual(
- {cursor: cursor, expectedChanges: expectedChanges, expectInvalidate: true});
- const resumeToken = changes[0]._id;
- const resumeTokenDrop = changes[3]._id;
- const resumeTokenInvalidate = changes[4]._id;
-
- // Verify we can startAfter the invalidate. We should see one drop event for every other shard
- // that the collection was present on, or nothing if the collection was not sharded. This test
- // exercises the bug described in SERVER-41196.
- const restartedStream = coll.watch([], {startAfter: resumeTokenInvalidate});
- for (let i = 0; i < numShards - 1; ++i) {
- assert.soon(() => restartedStream.hasNext());
- const nextEvent = restartedStream.next();
- assert.eq(nextEvent.operationType, "drop", () => tojson(nextEvent));
- }
- assert(!restartedStream.hasNext(), () => tojson(restartedStream.next()));
-
- // Verify that we can resume a stream after a collection drop without an explicit collation.
- assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- cursor: {}
- }));
-
- // Recreate the collection.
- coll = assertCreateCollection(db, collName);
- assert.writeOK(coll.insert({_id: "after recreate"}));
-
- // Test resuming the change stream from the collection drop using 'resumeAfter'. If running in a
- // sharded passthrough suite, resuming from the drop will first return the drop from the other
- // shard before returning an invalidate.
- cursor = cst.startWatchingChanges({
+"use strict";
+
+load("jstests/libs/change_stream_util.js");
+load('jstests/replsets/libs/two_phase_drops.js'); // For 'TwoPhaseDropCollectionTest'.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/fixture_helpers.js"); // For isSharded.
+
+db = db.getSiblingDB(jsTestName());
+let cst = new ChangeStreamTest(db);
+
+db.getMongo().forceReadMode('commands');
+
+// Test that it is possible to open a new change stream cursor on a collection that does not
+// exist.
+const collName = "test";
+assertDropCollection(db, collName);
+
+// Asserts that resuming a change stream with 'spec' and an explicit simple collation returns
+// the results specified by 'expected'.
+function assertResumeExpected({coll, spec, expected}) {
+ const cursor = cst.startWatchingChanges({
collection: coll,
- pipeline: [{$changeStream: {resumeAfter: resumeTokenDrop}}],
- aggregateOptions: {collation: {locale: "simple"}, cursor: {batchSize: 0}}
- });
- cst.consumeDropUpTo({
- cursor: cursor,
- dropType: "drop",
- expectedNext: {operationType: "invalidate"},
- expectInvalidate: true
+ pipeline: [{$changeStream: spec}],
+ aggregateOptions: {collation: {locale: "simple"}}
});
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+}
+
+// Cursor creation succeeds, but there are no results. We do not expect to see a notification
+// for collection creation.
+let cursor = cst.startWatchingChanges(
+ {collection: collName, pipeline: [{$changeStream: {}}, {$project: {operationType: 1}}]});
+
+// We explicitly test getMore, to ensure that the getMore command for a non-existent collection
+// does not return an error.
+let change = cst.getNextBatch(cursor);
+assert.neq(change.id, 0);
+assert.eq(change.nextBatch.length, 0, tojson(change.nextBatch));
+
+// Dropping the empty database should not generate any notification for the change stream, since
+// the collection does not exist yet.
+assert.commandWorked(db.dropDatabase());
+change = cst.getNextBatch(cursor);
+assert.neq(change.id, 0);
+assert.eq(change.nextBatch.length, 0, tojson(change.nextBatch));
+
+// After collection creation, we expect to see oplog entries for each subsequent operation.
+let coll = assertCreateCollection(db, collName);
+assert.writeOK(coll.insert({_id: 0}));
+
+// Determine the number of shards that the collection is distributed across.
+const numShards = FixtureHelpers.numberOfShardsForCollection(coll);
+
+change = cst.getOneChange(cursor);
+assert.eq(change.operationType, "insert", tojson(change));
+
+// Create oplog entries of type insert, update, delete, and drop.
+assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
+assert.writeOK(coll.remove({_id: 1}));
+assertDropCollection(db, coll.getName());
+
+// We should get oplog entries of type insert, update, delete, drop, and invalidate. The cursor
+// should be closed.
+let expectedChanges = [
+ {operationType: "insert"},
+ {operationType: "update"},
+ {operationType: "delete"},
+ {operationType: "drop"},
+ {operationType: "invalidate"},
+];
+let changes = cst.assertNextChangesEqual(
+ {cursor: cursor, expectedChanges: expectedChanges, expectInvalidate: true});
+const resumeToken = changes[0]._id;
+const resumeTokenDrop = changes[3]._id;
+const resumeTokenInvalidate = changes[4]._id;
+
+// Verify we can startAfter the invalidate. We should see one drop event for every other shard
+// that the collection was present on, or nothing if the collection was not sharded. This test
+// exercises the bug described in SERVER-41196.
+const restartedStream = coll.watch([], {startAfter: resumeTokenInvalidate});
+for (let i = 0; i < numShards - 1; ++i) {
+ assert.soon(() => restartedStream.hasNext());
+ const nextEvent = restartedStream.next();
+ assert.eq(nextEvent.operationType, "drop", () => tojson(nextEvent));
+}
+assert(!restartedStream.hasNext(), () => tojson(restartedStream.next()));
+
+// Verify that we can resume a stream after a collection drop without an explicit collation.
+assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
+ cursor: {}
+}));
+
+// Recreate the collection.
+coll = assertCreateCollection(db, collName);
+assert.writeOK(coll.insert({_id: "after recreate"}));
+
+// Test resuming the change stream from the collection drop using 'resumeAfter'. If running in a
+// sharded passthrough suite, resuming from the drop will first return the drop from the other
+// shard before returning an invalidate.
+cursor = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenDrop}}],
+ aggregateOptions: {collation: {locale: "simple"}, cursor: {batchSize: 0}}
+});
+cst.consumeDropUpTo({
+ cursor: cursor,
+ dropType: "drop",
+ expectedNext: {operationType: "invalidate"},
+ expectInvalidate: true
+});
+
+// Test resuming the change stream from the invalidate after the drop using 'resumeAfter'.
+assert.commandFailedWithCode(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenInvalidate}}],
+ cursor: {},
+ collation: {locale: "simple"},
+}),
+ ErrorCodes.InvalidResumeToken);
+
+// Test resuming the change stream from the collection drop using 'startAfter'.
+assertResumeExpected({
+ coll: coll.getName(),
+ spec: {startAfter: resumeTokenDrop},
+ expected: [{operationType: "invalidate"}]
+});
+
+// Test resuming the change stream from the 'invalidate' notification using 'startAfter'.
+cursor = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [{$changeStream: {startAfter: resumeTokenInvalidate}}],
+ aggregateOptions: {collation: {locale: "simple"}, cursor: {batchSize: 0}}
+});
+cst.consumeDropUpTo({
+ cursor: cursor,
+ dropType: "drop",
+ expectedNext: {
+ operationType: "insert",
+ ns: {db: db.getName(), coll: coll.getName()},
+ fullDocument: {_id: "after recreate"},
+ documentKey: {_id: "after recreate"}
+ },
+});
+
+// Test that renaming a collection being watched generates a "rename" entry followed by an
+// "invalidate". This is true if the change stream is on the source or target collection of the
+// rename. Sharded collections cannot be renamed.
+if (!FixtureHelpers.isSharded(coll)) {
+ cursor = cst.startWatchingChanges({collection: collName, pipeline: [{$changeStream: {}}]});
+ assertDropCollection(db, "renamed_coll");
+ assert.writeOK(coll.renameCollection("renamed_coll"));
+ expectedChanges = [
+ {
+ operationType: "rename",
+ ns: {db: db.getName(), coll: collName},
+ to: {db: db.getName(), coll: "renamed_coll"},
+ },
+ {operationType: "invalidate"}
+ ];
+ cst.assertNextChangesEqual(
+ {cursor: cursor, expectedChanges: expectedChanges, expectInvalidate: true});
+
+ coll = db["renamed_coll"];
- // Test resuming the change stream from the invalidate after the drop using 'resumeAfter'.
+ // Repeat the test, this time with a change stream open on the target.
+ cursor = cst.startWatchingChanges({collection: collName, pipeline: [{$changeStream: {}}]});
+ assert.writeOK(coll.renameCollection(collName));
+ expectedChanges = [
+ {
+ operationType: "rename",
+ ns: {db: db.getName(), coll: "renamed_coll"},
+ to: {db: db.getName(), coll: collName},
+ },
+ {operationType: "invalidate"}
+ ];
+ const changes = cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expectedChanges});
+ const resumeTokenRename = changes[0]._id;
+ const resumeTokenInvalidate = changes[1]._id;
+
+ coll = db[collName];
+ assert.writeOK(coll.insert({_id: "after rename"}));
+
+ // Test resuming the change stream from the collection rename using 'resumeAfter'.
+ assertResumeExpected({
+ coll: coll.getName(),
+ spec: {resumeAfter: resumeTokenRename},
+ expected: [{operationType: "invalidate"}]
+ });
+ // Test resuming the change stream from the invalidate after the rename using 'resumeAfter'.
assert.commandFailedWithCode(db.runCommand({
aggregate: coll.getName(),
pipeline: [{$changeStream: {resumeAfter: resumeTokenInvalidate}}],
@@ -126,154 +203,76 @@
}),
ErrorCodes.InvalidResumeToken);
- // Test resuming the change stream from the collection drop using 'startAfter'.
+ // Test resuming the change stream from the rename using 'startAfter'.
assertResumeExpected({
coll: coll.getName(),
- spec: {startAfter: resumeTokenDrop},
+ spec: {startAfter: resumeTokenRename},
expected: [{operationType: "invalidate"}]
});
- // Test resuming the change stream from the 'invalidate' notification using 'startAfter'.
- cursor = cst.startWatchingChanges({
- collection: coll,
- pipeline: [{$changeStream: {startAfter: resumeTokenInvalidate}}],
- aggregateOptions: {collation: {locale: "simple"}, cursor: {batchSize: 0}}
- });
- cst.consumeDropUpTo({
- cursor: cursor,
- dropType: "drop",
- expectedNext: {
- operationType: "insert",
- ns: {db: db.getName(), coll: coll.getName()},
- fullDocument: {_id: "after recreate"},
- documentKey: {_id: "after recreate"}
- },
+ // Test resuming the change stream from the invalidate after the rename using 'startAfter'.
+ expectedChanges = [{
+ operationType: "insert",
+ ns: {db: db.getName(), coll: coll.getName()},
+ fullDocument: {_id: "after rename"},
+ documentKey: {_id: "after rename"}
+ }];
+ assertResumeExpected({
+ coll: coll.getName(),
+ spec: {startAfter: resumeTokenInvalidate},
+ expected: expectedChanges
});
- // Test that renaming a collection being watched generates a "rename" entry followed by an
- // "invalidate". This is true if the change stream is on the source or target collection of the
- // rename. Sharded collections cannot be renamed.
- if (!FixtureHelpers.isSharded(coll)) {
- cursor = cst.startWatchingChanges({collection: collName, pipeline: [{$changeStream: {}}]});
- assertDropCollection(db, "renamed_coll");
- assert.writeOK(coll.renameCollection("renamed_coll"));
- expectedChanges = [
- {
- operationType: "rename",
- ns: {db: db.getName(), coll: collName},
- to: {db: db.getName(), coll: "renamed_coll"},
- },
- {operationType: "invalidate"}
- ];
- cst.assertNextChangesEqual(
- {cursor: cursor, expectedChanges: expectedChanges, expectInvalidate: true});
-
- coll = db["renamed_coll"];
-
- // Repeat the test, this time with a change stream open on the target.
- cursor = cst.startWatchingChanges({collection: collName, pipeline: [{$changeStream: {}}]});
- assert.writeOK(coll.renameCollection(collName));
- expectedChanges = [
- {
- operationType: "rename",
- ns: {db: db.getName(), coll: "renamed_coll"},
- to: {db: db.getName(), coll: collName},
- },
- {operationType: "invalidate"}
- ];
- const changes =
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expectedChanges});
- const resumeTokenRename = changes[0]._id;
- const resumeTokenInvalidate = changes[1]._id;
-
- coll = db[collName];
- assert.writeOK(coll.insert({_id: "after rename"}));
-
- // Test resuming the change stream from the collection rename using 'resumeAfter'.
- assertResumeExpected({
- coll: coll.getName(),
- spec: {resumeAfter: resumeTokenRename},
- expected: [{operationType: "invalidate"}]
- });
- // Test resuming the change stream from the invalidate after the rename using 'resumeAfter'.
- assert.commandFailedWithCode(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$changeStream: {resumeAfter: resumeTokenInvalidate}}],
- cursor: {},
- collation: {locale: "simple"},
- }),
- ErrorCodes.InvalidResumeToken);
-
- // Test resuming the change stream from the rename using 'startAfter'.
- assertResumeExpected({
- coll: coll.getName(),
- spec: {startAfter: resumeTokenRename},
- expected: [{operationType: "invalidate"}]
- });
-
- // Test resuming the change stream from the invalidate after the rename using 'startAfter'.
- expectedChanges = [{
- operationType: "insert",
- ns: {db: db.getName(), coll: coll.getName()},
- fullDocument: {_id: "after rename"},
- documentKey: {_id: "after rename"}
- }];
- assertResumeExpected({
- coll: coll.getName(),
- spec: {startAfter: resumeTokenInvalidate},
- expected: expectedChanges
- });
-
- assertDropAndRecreateCollection(db, "renamed_coll");
- assert.writeOK(db.renamed_coll.insert({_id: 0}));
-
- // Repeat the test again, this time using the 'dropTarget' option with an existing target
- // collection.
- cursor =
- cst.startWatchingChanges({collection: "renamed_coll", pipeline: [{$changeStream: {}}]});
- assert.writeOK(coll.renameCollection("renamed_coll", true /* dropTarget */));
- expectedChanges = [
- {
- operationType: "rename",
- ns: {db: db.getName(), coll: collName},
- to: {db: db.getName(), coll: "renamed_coll"},
- },
- {operationType: "invalidate"}
- ];
- cst.assertNextChangesEqual(
- {cursor: cursor, expectedChanges: expectedChanges, expectInvalidate: true});
-
- coll = db["renamed_coll"];
-
- // Test the behavior of a change stream watching the target collection of a $out aggregation
- // stage.
- cursor = cst.startWatchingChanges({collection: collName, pipeline: [{$changeStream: {}}]});
- coll.aggregate([{$out: collName}]);
- // Note that $out will first create a temp collection, and then rename the temp collection
- // to the target. Do not explicitly check the 'ns' field.
- const rename = cst.getOneChange(cursor);
- assert.eq(rename.operationType, "rename", tojson(rename));
- assert.eq(rename.to, {db: db.getName(), coll: collName}, tojson(rename));
- assert.eq(cst.getOneChange(cursor, true).operationType, "invalidate");
- }
-
- // Test that dropping a database will first drop all of it's collections, invalidating any
- // change streams on those collections.
- cursor = cst.startWatchingChanges({
- collection: coll.getName(),
- pipeline: [{$changeStream: {}}],
- });
- assert.commandWorked(db.dropDatabase());
+ assertDropAndRecreateCollection(db, "renamed_coll");
+ assert.writeOK(db.renamed_coll.insert({_id: 0}));
+ // Repeat the test again, this time using the 'dropTarget' option with an existing target
+ // collection.
+ cursor =
+ cst.startWatchingChanges({collection: "renamed_coll", pipeline: [{$changeStream: {}}]});
+ assert.writeOK(coll.renameCollection("renamed_coll", true /* dropTarget */));
expectedChanges = [
{
- operationType: "drop",
- ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "rename",
+ ns: {db: db.getName(), coll: collName},
+ to: {db: db.getName(), coll: "renamed_coll"},
},
{operationType: "invalidate"}
];
cst.assertNextChangesEqual(
{cursor: cursor, expectedChanges: expectedChanges, expectInvalidate: true});
- cst.cleanUp();
+ coll = db["renamed_coll"];
+
+ // Test the behavior of a change stream watching the target collection of a $out aggregation
+ // stage.
+ cursor = cst.startWatchingChanges({collection: collName, pipeline: [{$changeStream: {}}]});
+ coll.aggregate([{$out: collName}]);
+ // Note that $out will first create a temp collection, and then rename the temp collection
+ // to the target. Do not explicitly check the 'ns' field.
+ const rename = cst.getOneChange(cursor);
+ assert.eq(rename.operationType, "rename", tojson(rename));
+ assert.eq(rename.to, {db: db.getName(), coll: collName}, tojson(rename));
+ assert.eq(cst.getOneChange(cursor, true).operationType, "invalidate");
+}
+
+// Test that dropping a database will first drop all of it's collections, invalidating any
+// change streams on those collections.
+cursor = cst.startWatchingChanges({
+ collection: coll.getName(),
+ pipeline: [{$changeStream: {}}],
+});
+assert.commandWorked(db.dropDatabase());
+
+expectedChanges = [
+ {
+ operationType: "drop",
+ ns: {db: db.getName(), coll: coll.getName()},
+ },
+ {operationType: "invalidate"}
+];
+cst.assertNextChangesEqual(
+ {cursor: cursor, expectedChanges: expectedChanges, expectInvalidate: true});
+
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/no_regex_leak.js b/jstests/change_streams/no_regex_leak.js
index e1e5f5484e1..a05207a22a4 100644
--- a/jstests/change_streams/no_regex_leak.js
+++ b/jstests/change_streams/no_regex_leak.js
@@ -3,59 +3,57 @@
* affect what documents appear in a changestream, in response to SERVER-41164.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/change_stream_util.js");
- load("jstests/libs/collection_drop_recreate.js");
- function test_no_leak(
- dbNameUnrelated, collNameUnrelated, dbNameProblematic, collNameProblematic) {
- const dbUnrelated = db.getSiblingDB(dbNameUnrelated);
- const cstUnrelated = new ChangeStreamTest(dbUnrelated);
- assertDropAndRecreateCollection(dbUnrelated, collNameUnrelated);
+load("jstests/libs/change_stream_util.js");
+load("jstests/libs/collection_drop_recreate.js");
+function test_no_leak(dbNameUnrelated, collNameUnrelated, dbNameProblematic, collNameProblematic) {
+ const dbUnrelated = db.getSiblingDB(dbNameUnrelated);
+ const cstUnrelated = new ChangeStreamTest(dbUnrelated);
+ assertDropAndRecreateCollection(dbUnrelated, collNameUnrelated);
- const watchUnrelated = cstUnrelated.startWatchingChanges(
- {pipeline: [{$changeStream: {}}], collection: collNameUnrelated});
+ const watchUnrelated = cstUnrelated.startWatchingChanges(
+ {pipeline: [{$changeStream: {}}], collection: collNameUnrelated});
- const dbProblematic = db.getSiblingDB(dbNameProblematic);
- const cstProblematic = new ChangeStreamTest(dbProblematic);
- assertDropAndRecreateCollection(dbProblematic, collNameProblematic);
+ const dbProblematic = db.getSiblingDB(dbNameProblematic);
+ const cstProblematic = new ChangeStreamTest(dbProblematic);
+ assertDropAndRecreateCollection(dbProblematic, collNameProblematic);
- const watchProblematic = cstProblematic.startWatchingChanges(
- {pipeline: [{$changeStream: {}}], collection: collNameProblematic});
+ const watchProblematic = cstProblematic.startWatchingChanges(
+ {pipeline: [{$changeStream: {}}], collection: collNameProblematic});
- assert.commandWorked(dbUnrelated.getCollection(collNameUnrelated).insert({_id: 2}));
- let expected = {
- documentKey: {_id: 2},
- fullDocument: {_id: 2},
- ns: {db: dbNameUnrelated, coll: collNameUnrelated},
- operationType: "insert",
- };
- // Make sure that only the database which was inserted into reflects a change on its
- // changestream.
- cstUnrelated.assertNextChangesEqual({cursor: watchUnrelated, expectedChanges: [expected]});
- // The other DB shouldn't have any changes.
- cstProblematic.assertNoChange(watchProblematic);
+ assert.commandWorked(dbUnrelated.getCollection(collNameUnrelated).insert({_id: 2}));
+ let expected = {
+ documentKey: {_id: 2},
+ fullDocument: {_id: 2},
+ ns: {db: dbNameUnrelated, coll: collNameUnrelated},
+ operationType: "insert",
+ };
+ // Make sure that only the database which was inserted into reflects a change on its
+ // changestream.
+ cstUnrelated.assertNextChangesEqual({cursor: watchUnrelated, expectedChanges: [expected]});
+ // The other DB shouldn't have any changes.
+ cstProblematic.assertNoChange(watchProblematic);
- assert.commandWorked(dbProblematic.getCollection(collNameProblematic).insert({_id: 3}));
- expected = {
- documentKey: {_id: 3},
- fullDocument: {_id: 3},
- ns: {db: dbNameProblematic, coll: collNameProblematic},
- operationType: "insert",
- };
- cstProblematic.assertNextChangesEqual(
- {cursor: watchProblematic, expectedChanges: [expected]});
- cstUnrelated.assertNoChange(watchUnrelated);
+ assert.commandWorked(dbProblematic.getCollection(collNameProblematic).insert({_id: 3}));
+ expected = {
+ documentKey: {_id: 3},
+ fullDocument: {_id: 3},
+ ns: {db: dbNameProblematic, coll: collNameProblematic},
+ operationType: "insert",
+ };
+ cstProblematic.assertNextChangesEqual({cursor: watchProblematic, expectedChanges: [expected]});
+ cstUnrelated.assertNoChange(watchUnrelated);
- cstUnrelated.cleanUp();
- cstProblematic.cleanUp();
- }
- if (!_isWindows()) {
- test_no_leak("has_no_pipe", "coll", "has_a_|pipe", "coll");
- test_no_leak("starssss", "coll", "stars*", "coll");
- }
- test_no_leak("has_[two]_brackets", "coll", "has_t_brackets", "coll");
- test_no_leak("test", "dotted.collection", "testadotted", "collection");
- test_no_leak("carat", "coll", "hasa^carat", "coll");
- test_no_leak("db1", "coll", "db1", "col*");
+ cstUnrelated.cleanUp();
+ cstProblematic.cleanUp();
+}
+if (!_isWindows()) {
+ test_no_leak("has_no_pipe", "coll", "has_a_|pipe", "coll");
+ test_no_leak("starssss", "coll", "stars*", "coll");
+}
+test_no_leak("has_[two]_brackets", "coll", "has_t_brackets", "coll");
+test_no_leak("test", "dotted.collection", "testadotted", "collection");
+test_no_leak("carat", "coll", "hasa^carat", "coll");
+test_no_leak("db1", "coll", "db1", "col*");
}());
diff --git a/jstests/change_streams/only_wake_getmore_for_relevant_changes.js b/jstests/change_streams/only_wake_getmore_for_relevant_changes.js
index 19f5433c8e9..16400360d55 100644
--- a/jstests/change_streams/only_wake_getmore_for_relevant_changes.js
+++ b/jstests/change_streams/only_wake_getmore_for_relevant_changes.js
@@ -4,33 +4,33 @@
// ]
// return early.
(function() {
- "use strict";
-
- load('jstests/libs/uuid_util.js');
- load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-
- /**
- * Uses a parallel shell to execute the javascript function 'event' at the same time as an
- * awaitData getMore on the cursor with id 'awaitDataCursorId'. Returns the result of the
- * getMore, and the time it took to complete.
- *
- * Note that 'event' will not have access to any local variables, since it will be executed in a
- * different scope.
- */
- function runGetMoreInParallelWithEvent(
- {collection, awaitDataCursorId, identifyingComment, maxTimeMS, event}) {
- // In some extreme cases, the parallel shell can take longer to start up than it takes for
- // the getMore to run. To prevent this from happening, the main thread waits for an insert
- // into "sentinel", to signal that the parallel shell has started and is waiting for the
- // getMore to appear in currentOp.
- const port =
- (collection.stats().sharded ? collection.getMongo().port
- : FixtureHelpers.getPrimaryForNodeHostingDatabase(db).port);
-
- const sentinelCountBefore = shellSentinelCollection.find().itcount();
-
- const awaitShellDoingEventDuringGetMore = startParallelShell(`
+"use strict";
+
+load('jstests/libs/uuid_util.js');
+load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+
+/**
+ * Uses a parallel shell to execute the javascript function 'event' at the same time as an
+ * awaitData getMore on the cursor with id 'awaitDataCursorId'. Returns the result of the
+ * getMore, and the time it took to complete.
+ *
+ * Note that 'event' will not have access to any local variables, since it will be executed in a
+ * different scope.
+ */
+function runGetMoreInParallelWithEvent(
+ {collection, awaitDataCursorId, identifyingComment, maxTimeMS, event}) {
+ // In some extreme cases, the parallel shell can take longer to start up than it takes for
+ // the getMore to run. To prevent this from happening, the main thread waits for an insert
+ // into "sentinel", to signal that the parallel shell has started and is waiting for the
+ // getMore to appear in currentOp.
+ const port =
+ (collection.stats().sharded ? collection.getMongo().port
+ : FixtureHelpers.getPrimaryForNodeHostingDatabase(db).port);
+
+ const sentinelCountBefore = shellSentinelCollection.find().itcount();
+
+ const awaitShellDoingEventDuringGetMore = startParallelShell(`
// Signal that the parallel shell has started.
assert.writeOK(db.getCollection("${ shellSentinelCollection.getName() }").insert({}));
@@ -46,134 +46,133 @@ const eventFn = ${ event.toString() };
eventFn();`,
port);
- // Wait for the shell to start.
- assert.soon(() => shellSentinelCollection.find().itcount() > sentinelCountBefore);
-
- // Run and time the getMore.
- const startTime = (new Date()).getTime();
- const result = assert.commandWorked(db.runCommand(
- {getMore: awaitDataCursorId, collection: collection.getName(), maxTimeMS: maxTimeMS}));
- awaitShellDoingEventDuringGetMore();
- return {result: result, elapsedMs: (new Date()).getTime() - startTime};
- }
-
- /**
- * Asserts that a getMore of the cursor given by 'awaitDataCursorId' will not return after
- * 'event' is called, and will instead keep waiting until its maxTimeMS is expired.
- *
- * @param [Collection] collection - the collection to use in the getMore command.
- * @param [NumberLong] awaitDataCursorId - the id of the cursor to use in the getMore command.
- * @param [Function] event - the event that should be run during the getMore.
- */
- function assertEventDoesNotWakeCursor(
- {collection, awaitDataCursorId, identifyingComment, event}) {
- const {result, elapsedMs} = runGetMoreInParallelWithEvent({
- collection: collection,
- awaitDataCursorId: awaitDataCursorId,
- identifyingComment: identifyingComment,
- maxTimeMS: 1000,
- event: event,
- });
- // Should have waited for at least 'maxTimeMS'.
- assert.gt(elapsedMs, 900, "getMore returned before waiting for maxTimeMS");
- const cursorResponse = result.cursor;
- // Cursor should be valid with no data.
- assert.neq(cursorResponse.id, 0);
- assert.eq(cursorResponse.nextBatch.length, 0);
- }
-
- /**
- * Asserts that a getMore of the cursor given by 'awaitDataCursorId' will return soon after
- * 'event' is called, and returns the response from the getMore command.
- *
- * @param [Collection] collection - the collection to use in the getMore command.
- * @param [NumberLong] awaitDataCursorId - the id of the cursor to use in the getMore command.
- * @param [Function] event - the event that should be run during the getMore.
- */
- function assertEventWakesCursor({collection, awaitDataCursorId, identifyingComment, event}) {
- // Run the original event, then (while still in the parallel shell) assert that the getMore
- // finishes soon after. This will be run in a parallel shell, which will not have a variable
- // 'event' in scope, so we'll have to stringify it here.
- const thirtyMinutes = 30 * 60 * 1000;
- const fiveMinutes = 5 * 60 * 1000;
- const {result, elapsedMs} = runGetMoreInParallelWithEvent({
- collection: collection,
- awaitDataCursorId: awaitDataCursorId,
- identifyingComment: identifyingComment,
- maxTimeMS: thirtyMinutes,
- event: event,
- });
-
- assert.lt(elapsedMs, fiveMinutes);
-
- return result;
- }
-
- // Refresh all collections which will be required in the course of this test.
- const shellSentinelCollection = assertDropAndRecreateCollection(db, "shell_sentinel");
- const changesCollection = assertDropAndRecreateCollection(db, "changes");
- const unrelatedCollection = assertDropCollection(db, "unrelated_collection");
-
- // Start a change stream cursor.
- const wholeCollectionStreamComment = "change stream on entire collection";
- let res = assert.commandWorked(db.runCommand({
- aggregate: changesCollection.getName(),
- // Project out the resume token, since that's subject to change unpredictably.
- pipeline: [{$changeStream: {}}],
- cursor: {},
- comment: wholeCollectionStreamComment
- }));
- const changeCursorId = res.cursor.id;
- assert.neq(changeCursorId, 0);
- assert.eq(res.cursor.firstBatch.length, 0);
-
- // Test that an insert during a getMore will wake up the cursor and immediately return with the
- // new result.
- const getMoreResponse = assertEventWakesCursor({
- collection: changesCollection,
- awaitDataCursorId: changeCursorId,
- identifyingComment: wholeCollectionStreamComment,
- event: () => assert.writeOK(db.changes.insert({_id: "wake up"}))
+ // Wait for the shell to start.
+ assert.soon(() => shellSentinelCollection.find().itcount() > sentinelCountBefore);
+
+ // Run and time the getMore.
+ const startTime = (new Date()).getTime();
+ const result = assert.commandWorked(db.runCommand(
+ {getMore: awaitDataCursorId, collection: collection.getName(), maxTimeMS: maxTimeMS}));
+ awaitShellDoingEventDuringGetMore();
+ return {result: result, elapsedMs: (new Date()).getTime() - startTime};
+}
+
+/**
+ * Asserts that a getMore of the cursor given by 'awaitDataCursorId' will not return after
+ * 'event' is called, and will instead keep waiting until its maxTimeMS is expired.
+ *
+ * @param [Collection] collection - the collection to use in the getMore command.
+ * @param [NumberLong] awaitDataCursorId - the id of the cursor to use in the getMore command.
+ * @param [Function] event - the event that should be run during the getMore.
+ */
+function assertEventDoesNotWakeCursor({collection, awaitDataCursorId, identifyingComment, event}) {
+ const {result, elapsedMs} = runGetMoreInParallelWithEvent({
+ collection: collection,
+ awaitDataCursorId: awaitDataCursorId,
+ identifyingComment: identifyingComment,
+ maxTimeMS: 1000,
+ event: event,
});
- assert.eq(getMoreResponse.cursor.nextBatch.length, 1);
- assert.eq(getMoreResponse.cursor.nextBatch[0].operationType,
- "insert",
- tojson(getMoreResponse.cursor.nextBatch[0]));
- assert.eq(getMoreResponse.cursor.nextBatch[0].fullDocument,
- {_id: "wake up"},
- tojson(getMoreResponse.cursor.nextBatch[0]));
-
- // Test that an insert to an unrelated collection will not cause the change stream to wake up
- // and return an empty batch before reaching the maxTimeMS.
- assertEventDoesNotWakeCursor({
- collection: changesCollection,
- awaitDataCursorId: changeCursorId,
- identifyingComment: wholeCollectionStreamComment,
- event: () => assert.writeOK(db.unrelated_collection.insert({_id: "unrelated change"}))
+ // Should have waited for at least 'maxTimeMS'.
+ assert.gt(elapsedMs, 900, "getMore returned before waiting for maxTimeMS");
+ const cursorResponse = result.cursor;
+ // Cursor should be valid with no data.
+ assert.neq(cursorResponse.id, 0);
+ assert.eq(cursorResponse.nextBatch.length, 0);
+}
+
+/**
+ * Asserts that a getMore of the cursor given by 'awaitDataCursorId' will return soon after
+ * 'event' is called, and returns the response from the getMore command.
+ *
+ * @param [Collection] collection - the collection to use in the getMore command.
+ * @param [NumberLong] awaitDataCursorId - the id of the cursor to use in the getMore command.
+ * @param [Function] event - the event that should be run during the getMore.
+ */
+function assertEventWakesCursor({collection, awaitDataCursorId, identifyingComment, event}) {
+ // Run the original event, then (while still in the parallel shell) assert that the getMore
+ // finishes soon after. This will be run in a parallel shell, which will not have a variable
+ // 'event' in scope, so we'll have to stringify it here.
+ const thirtyMinutes = 30 * 60 * 1000;
+ const fiveMinutes = 5 * 60 * 1000;
+ const {result, elapsedMs} = runGetMoreInParallelWithEvent({
+ collection: collection,
+ awaitDataCursorId: awaitDataCursorId,
+ identifyingComment: identifyingComment,
+ maxTimeMS: thirtyMinutes,
+ event: event,
});
- assert.commandWorked(
- db.runCommand({killCursors: changesCollection.getName(), cursors: [changeCursorId]}));
-
- // Test that changes ignored by filtering in later stages of the pipeline will not cause the
- // cursor to return before the getMore has exceeded maxTimeMS.
- const noInvalidatesComment = "change stream filtering invalidate entries";
- res = assert.commandWorked(db.runCommand({
- aggregate: changesCollection.getName(),
- // This pipeline filters changes to only invalidates, so regular inserts should not cause
- // the awaitData to end early.
- pipeline: [{$changeStream: {}}, {$match: {operationType: "invalidate"}}],
- cursor: {},
- comment: noInvalidatesComment
- }));
- assert.eq(
- res.cursor.firstBatch.length, 0, "did not expect any invalidations on changes collection");
- assert.neq(res.cursor.id, 0);
- assertEventDoesNotWakeCursor({
- collection: changesCollection,
- awaitDataCursorId: res.cursor.id,
- identifyingComment: noInvalidatesComment,
- event: () => assert.writeOK(db.changes.insert({_id: "should not appear"}))
- });
- assert.commandWorked(
- db.runCommand({killCursors: changesCollection.getName(), cursors: [res.cursor.id]}));
+
+ assert.lt(elapsedMs, fiveMinutes);
+
+ return result;
+}
+
+// Refresh all collections which will be required in the course of this test.
+const shellSentinelCollection = assertDropAndRecreateCollection(db, "shell_sentinel");
+const changesCollection = assertDropAndRecreateCollection(db, "changes");
+const unrelatedCollection = assertDropCollection(db, "unrelated_collection");
+
+// Start a change stream cursor.
+const wholeCollectionStreamComment = "change stream on entire collection";
+let res = assert.commandWorked(db.runCommand({
+ aggregate: changesCollection.getName(),
+ // Project out the resume token, since that's subject to change unpredictably.
+ pipeline: [{$changeStream: {}}],
+ cursor: {},
+ comment: wholeCollectionStreamComment
+}));
+const changeCursorId = res.cursor.id;
+assert.neq(changeCursorId, 0);
+assert.eq(res.cursor.firstBatch.length, 0);
+
+// Test that an insert during a getMore will wake up the cursor and immediately return with the
+// new result.
+const getMoreResponse = assertEventWakesCursor({
+ collection: changesCollection,
+ awaitDataCursorId: changeCursorId,
+ identifyingComment: wholeCollectionStreamComment,
+ event: () => assert.writeOK(db.changes.insert({_id: "wake up"}))
+});
+assert.eq(getMoreResponse.cursor.nextBatch.length, 1);
+assert.eq(getMoreResponse.cursor.nextBatch[0].operationType,
+ "insert",
+ tojson(getMoreResponse.cursor.nextBatch[0]));
+assert.eq(getMoreResponse.cursor.nextBatch[0].fullDocument,
+ {_id: "wake up"},
+ tojson(getMoreResponse.cursor.nextBatch[0]));
+
+// Test that an insert to an unrelated collection will not cause the change stream to wake up
+// and return an empty batch before reaching the maxTimeMS.
+assertEventDoesNotWakeCursor({
+ collection: changesCollection,
+ awaitDataCursorId: changeCursorId,
+ identifyingComment: wholeCollectionStreamComment,
+ event: () => assert.writeOK(db.unrelated_collection.insert({_id: "unrelated change"}))
+});
+assert.commandWorked(
+ db.runCommand({killCursors: changesCollection.getName(), cursors: [changeCursorId]}));
+
+// Test that changes ignored by filtering in later stages of the pipeline will not cause the
+// cursor to return before the getMore has exceeded maxTimeMS.
+const noInvalidatesComment = "change stream filtering invalidate entries";
+res = assert.commandWorked(db.runCommand({
+ aggregate: changesCollection.getName(),
+ // This pipeline filters changes to only invalidates, so regular inserts should not cause
+ // the awaitData to end early.
+ pipeline: [{$changeStream: {}}, {$match: {operationType: "invalidate"}}],
+ cursor: {},
+ comment: noInvalidatesComment
+}));
+assert.eq(
+ res.cursor.firstBatch.length, 0, "did not expect any invalidations on changes collection");
+assert.neq(res.cursor.id, 0);
+assertEventDoesNotWakeCursor({
+ collection: changesCollection,
+ awaitDataCursorId: res.cursor.id,
+ identifyingComment: noInvalidatesComment,
+ event: () => assert.writeOK(db.changes.insert({_id: "should not appear"}))
+});
+assert.commandWorked(
+ db.runCommand({killCursors: changesCollection.getName(), cursors: [res.cursor.id]}));
}());
diff --git a/jstests/change_streams/pipeline_cannot_modify_id_field.js b/jstests/change_streams/pipeline_cannot_modify_id_field.js
index d43e1ff28a4..20909ab4f9a 100644
--- a/jstests/change_streams/pipeline_cannot_modify_id_field.js
+++ b/jstests/change_streams/pipeline_cannot_modify_id_field.js
@@ -3,144 +3,142 @@
* $changeStream pipeline.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- const coll = assertDropAndRecreateCollection(db, jsTestName());
+const coll = assertDropAndRecreateCollection(db, jsTestName());
- // Bare-bones $changeStream pipeline which will be augmented during tests.
- const changeStream = [{$changeStream: {}}];
+// Bare-bones $changeStream pipeline which will be augmented during tests.
+const changeStream = [{$changeStream: {}}];
- // Test-cases of transformations that modify or remove _id, and are thus disallowed.
- const idModifyingTransformations = [
- {$project: {_id: 0}},
- {$project: {_id: "newValue"}},
- {$project: {_id: "$otherField"}},
- {$project: {_id: 0, otherField: 0}},
- {$project: {_id: 0, otherField: 1}},
- {$project: {"_id._data": 0}},
- {$project: {"_id._data": 1}},
- {$project: {"_id._data": "newValue"}},
- {$project: {"_id._data": "$_id._data"}}, // Disallowed because it discards _typeBits.
- {$project: {"_id._data": "$otherField"}},
- {$project: {"_id.otherField": 1}},
- {$project: {"_id._typeBits": 0}},
- [
- {$project: {otherField: "$_id"}},
- {$project: {otherField: 0}},
- {$project: {_id: "$otherField"}}
- ],
- {$project: {_id: {data: "$_id._data", typeBits: "$_id._typeBits"}}}, // Fields renamed.
- {$project: {_id: {_typeBits: "$_id._typeBits", _data: "$_id._data"}}}, // Fields reordered.
- {$project: {_id: {_data: "$_id._typeBits", _typeBits: "$_id._data"}}}, // Fields swapped.
- {$set: {_id: "newValue"}},
- {$set: {_id: "$otherField"}},
- {$set: {"_id._data": "newValue"}},
- {$set: {"_id._data": "$otherField"}},
- {$set: {"_id.otherField": "newValue"}}, // New subfield added to _id.
- [
- {$addFields: {otherField: "$_id"}},
- {$set: {otherField: "newValue"}},
- {$set: {_id: "$otherField"}}
- ],
- [
- // Fields renamed.
- {$addFields: {newId: {data: "$_id._data", typeBits: "$_id._typeBits"}}},
- {$set: {_id: "$newId"}}
- ],
- [
- // Fields reordered.
- {$addFields: {newId: {_typeBits: "$_id._typeBits", _data: "$_id._data"}}},
- {$set: {_id: "$newId"}}
- ],
- [
- // Fields swapped.
- {$addFields: {newId: {_data: "$_id._typeBits", _typeBits: "$_id._data"}}},
- {$set: {_id: "$newId"}}
- ],
- {$replaceRoot: {newRoot: {otherField: "$_id"}}},
- {$replaceWith: {otherField: "$_id"}},
- {$redact: {$cond: {if: {$gt: ["$_id", {}]}, then: "$$DESCEND", else: "$$PRUNE"}}} // _id:0
- ];
-
- // Test-cases of projections which are allowed: explicit inclusion of _id, implicit inclusion of
- // _id, renames which retain the full _id field, exclusion of unrelated fields, addition of and
- // modifications to unrelated fields, sequential renames which ultimately preserve _id, etc.
- const idPreservingTransformations = [
- {$project: {_id: 1}},
- {$project: {_id: 1, otherField: 0}},
- {$project: {_id: 1, otherField: 1}},
- {$project: {_id: "$_id", otherField: 1}},
- {$project: {"_id.otherField": 0}},
- {$project: {otherField: 1}},
+// Test-cases of transformations that modify or remove _id, and are thus disallowed.
+const idModifyingTransformations = [
+ {$project: {_id: 0}},
+ {$project: {_id: "newValue"}},
+ {$project: {_id: "$otherField"}},
+ {$project: {_id: 0, otherField: 0}},
+ {$project: {_id: 0, otherField: 1}},
+ {$project: {"_id._data": 0}},
+ {$project: {"_id._data": 1}},
+ {$project: {"_id._data": "newValue"}},
+ {$project: {"_id._data": "$_id._data"}}, // Disallowed because it discards _typeBits.
+ {$project: {"_id._data": "$otherField"}},
+ {$project: {"_id.otherField": 1}},
+ {$project: {"_id._typeBits": 0}},
+ [
+ {$project: {otherField: "$_id"}},
{$project: {otherField: 0}},
+ {$project: {_id: "$otherField"}}
+ ],
+ {$project: {_id: {data: "$_id._data", typeBits: "$_id._typeBits"}}}, // Fields renamed.
+ {$project: {_id: {_typeBits: "$_id._typeBits", _data: "$_id._data"}}}, // Fields reordered.
+ {$project: {_id: {_data: "$_id._typeBits", _typeBits: "$_id._data"}}}, // Fields swapped.
+ {$set: {_id: "newValue"}},
+ {$set: {_id: "$otherField"}},
+ {$set: {"_id._data": "newValue"}},
+ {$set: {"_id._data": "$otherField"}},
+ {$set: {"_id.otherField": "newValue"}}, // New subfield added to _id.
+ [
+ {$addFields: {otherField: "$_id"}},
+ {$set: {otherField: "newValue"}},
+ {$set: {_id: "$otherField"}}
+ ],
+ [
+ // Fields renamed.
+ {$addFields: {newId: {data: "$_id._data", typeBits: "$_id._typeBits"}}},
+ {$set: {_id: "$newId"}}
+ ],
+ [
+ // Fields reordered.
+ {$addFields: {newId: {_typeBits: "$_id._typeBits", _data: "$_id._data"}}},
+ {$set: {_id: "$newId"}}
+ ],
+ [
+ // Fields swapped.
+ {$addFields: {newId: {_data: "$_id._typeBits", _typeBits: "$_id._data"}}},
+ {$set: {_id: "$newId"}}
+ ],
+ {$replaceRoot: {newRoot: {otherField: "$_id"}}},
+ {$replaceWith: {otherField: "$_id"}},
+ {$redact: {$cond: {if: {$gt: ["$_id", {}]}, then: "$$DESCEND", else: "$$PRUNE"}}} // _id:0
+];
+
+// Test-cases of projections which are allowed: explicit inclusion of _id, implicit inclusion of
+// _id, renames which retain the full _id field, exclusion of unrelated fields, addition of and
+// modifications to unrelated fields, sequential renames which ultimately preserve _id, etc.
+const idPreservingTransformations = [
+ {$project: {_id: 1}},
+ {$project: {_id: 1, otherField: 0}},
+ {$project: {_id: 1, otherField: 1}},
+ {$project: {_id: "$_id", otherField: 1}},
+ {$project: {"_id.otherField": 0}},
+ {$project: {otherField: 1}},
+ {$project: {otherField: 0}},
+ {$project: {otherField: "$_id"}},
+ [
{$project: {otherField: "$_id"}},
- [
- {$project: {otherField: "$_id"}},
- {$project: {otherField: 1}},
- {$project: {_id: "$otherField"}}
- ],
- {$project: {"_id._data": 1, "_id._typeBits": 1}},
- {$project: {_id: {_data: "$_id._data", _typeBits: "$_id._typeBits"}}},
- {$set: {_id: "$_id"}},
- {$addFields: {otherField: "newValue"}},
- {$set: {_id: {_data: "$_id._data", _typeBits: "$_id._typeBits"}}},
- [{$addFields: {otherField: "$_id"}}, {$set: {_id: "$otherField"}}],
- [
- {$addFields: {newId: {_data: "$_id._data", _typeBits: "$_id._typeBits"}}},
- {$set: {_id: "$newId"}}
- ],
- {$replaceRoot: {newRoot: {_id: "$_id"}}},
- {$replaceWith: {_id: "$_id"}},
- {
- $redact: {
- $cond: {
- if: {
- $or: [
- // Keeps _id, descends into fullDocument.
- {$not: {$isArray: "$tags"}},
- {$gt: [{$size: {$setIntersection: ["$tags", ["USA"]]}}, 0]}
- ]
- },
- then: "$$DESCEND",
- else: "$$PRUNE"
- }
- }
- },
- {$redact: "$$DESCEND"}, // Descends through entire document, retaining all of it.
- {$redact: "$$KEEP"} // Keeps entire document.
- ];
+ {$project: {otherField: 1}},
+ {$project: {_id: "$otherField"}}
+ ],
+ {$project: {"_id._data": 1, "_id._typeBits": 1}},
+ {$project: {_id: {_data: "$_id._data", _typeBits: "$_id._typeBits"}}},
+ {$set: {_id: "$_id"}},
+ {$addFields: {otherField: "newValue"}},
+ {$set: {_id: {_data: "$_id._data", _typeBits: "$_id._typeBits"}}},
+ [{$addFields: {otherField: "$_id"}}, {$set: {_id: "$otherField"}}],
+ [
+ {$addFields: {newId: {_data: "$_id._data", _typeBits: "$_id._typeBits"}}},
+ {$set: {_id: "$newId"}}
+ ],
+ {$replaceRoot: {newRoot: {_id: "$_id"}}},
+ {$replaceWith: {_id: "$_id"}},
+ {
+ $redact: {
+ $cond: {
+ if: {
+ $or: [
+ // Keeps _id, descends into fullDocument.
+ {$not: {$isArray: "$tags"}},
+ {$gt: [{$size: {$setIntersection: ["$tags", ["USA"]]}}, 0]}
+ ]
+ },
+ then: "$$DESCEND",
+ else: "$$PRUNE"
+ }
+ }
+ },
+ {$redact: "$$DESCEND"}, // Descends through entire document, retaining all of it.
+ {$redact: "$$KEEP"} // Keeps entire document.
+];
- let docId = 0;
+let docId = 0;
- // Verify that each of the whitelisted transformations above succeeds.
- for (let transform of idPreservingTransformations) {
- const cmdRes = assert.commandWorked(
- db.runCommand(
- {aggregate: coll.getName(), pipeline: changeStream.concat(transform), cursor: {}}),
- transform);
- assert.commandWorked(coll.insert({_id: docId++}));
- assert.soon(() => {
- const getMoreRes = assert.commandWorked(
- db.runCommand({getMore: cmdRes.cursor.id, collection: coll.getName()}), transform);
- return getMoreRes.cursor.nextBatch.length > 0;
- }, transform);
- }
+// Verify that each of the whitelisted transformations above succeeds.
+for (let transform of idPreservingTransformations) {
+ const cmdRes = assert.commandWorked(
+ db.runCommand(
+ {aggregate: coll.getName(), pipeline: changeStream.concat(transform), cursor: {}}),
+ transform);
+ assert.commandWorked(coll.insert({_id: docId++}));
+ assert.soon(() => {
+ const getMoreRes = assert.commandWorked(
+ db.runCommand({getMore: cmdRes.cursor.id, collection: coll.getName()}), transform);
+ return getMoreRes.cursor.nextBatch.length > 0;
+ }, transform);
+}
- // Verify that each of the blacklisted transformations above are rejected.
- for (let transform of idModifyingTransformations) {
- const cmdRes = assert.commandWorked(
- db.runCommand(
- {aggregate: coll.getName(), pipeline: changeStream.concat(transform), cursor: {}}),
- transform);
- assert.commandWorked(coll.insert({_id: docId++}));
- assert.soon(() => {
- const getMoreRes =
- db.runCommand({getMore: cmdRes.cursor.id, collection: coll.getName()});
- return !getMoreRes.ok &&
- assert.commandFailedWithCode(
- getMoreRes, ErrorCodes.ChangeStreamFatalError, transform);
- }, transform);
- }
+// Verify that each of the blacklisted transformations above are rejected.
+for (let transform of idModifyingTransformations) {
+ const cmdRes = assert.commandWorked(
+ db.runCommand(
+ {aggregate: coll.getName(), pipeline: changeStream.concat(transform), cursor: {}}),
+ transform);
+ assert.commandWorked(coll.insert({_id: docId++}));
+ assert.soon(() => {
+ const getMoreRes = db.runCommand({getMore: cmdRes.cursor.id, collection: coll.getName()});
+ return !getMoreRes.ok &&
+ assert.commandFailedWithCode(getMoreRes, ErrorCodes.ChangeStreamFatalError, transform);
+ }, transform);
+}
}());
diff --git a/jstests/change_streams/report_latest_observed_oplog_timestamp.js b/jstests/change_streams/report_latest_observed_oplog_timestamp.js
index 56754613b47..be5c020ebff 100644
--- a/jstests/change_streams/report_latest_observed_oplog_timestamp.js
+++ b/jstests/change_streams/report_latest_observed_oplog_timestamp.js
@@ -6,93 +6,92 @@
// operation, which does not happen when the operations get grouped into a transaction.
// @tags: [change_stream_does_not_expect_txns]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- // Drop and recreate collections to assure a clean run.
- const testName = "report_latest_observed_oplog_timestamp";
- const cursorCollection = assertDropAndRecreateCollection(db, testName);
- const otherCollection = assertDropAndRecreateCollection(db, "unrelated_" + testName);
+// Drop and recreate collections to assure a clean run.
+const testName = "report_latest_observed_oplog_timestamp";
+const cursorCollection = assertDropAndRecreateCollection(db, testName);
+const otherCollection = assertDropAndRecreateCollection(db, "unrelated_" + testName);
- // Get a resume point.
- jsTestLog("Getting a resume point.");
- const batchSize = 2;
- const firstResponse = assert.commandWorked(cursorCollection.runCommand(
- {aggregate: testName, pipeline: [{$changeStream: {}}], cursor: {batchSize: batchSize}}));
- assert.eq(0, firstResponse.cursor.firstBatch.length);
- assert.writeOK(cursorCollection.insert({_id: 0}));
+// Get a resume point.
+jsTestLog("Getting a resume point.");
+const batchSize = 2;
+const firstResponse = assert.commandWorked(cursorCollection.runCommand(
+ {aggregate: testName, pipeline: [{$changeStream: {}}], cursor: {batchSize: batchSize}}));
+assert.eq(0, firstResponse.cursor.firstBatch.length);
+assert.writeOK(cursorCollection.insert({_id: 0}));
- function iterateCursor(initialCursorResponse) {
- const getMoreCollName = initialCursorResponse.cursor.ns.substr(
- initialCursorResponse.cursor.ns.indexOf('.') + 1);
- return assert.commandWorked(cursorCollection.runCommand({
- getMore: initialCursorResponse.cursor.id,
- collection: getMoreCollName,
- batchSize: batchSize
- }));
- }
- const resumeResponse = iterateCursor(firstResponse);
- assert.eq(1, resumeResponse.cursor.nextBatch.length);
- // Because needsMerge was not set, the latest oplog timestamp should not be returned.
- assert.eq(undefined, resumeResponse.$_internalLatestOplogTimestamp);
- const resumeToken = resumeResponse.cursor.nextBatch[0]["_id"];
+function iterateCursor(initialCursorResponse) {
+ const getMoreCollName =
+ initialCursorResponse.cursor.ns.substr(initialCursorResponse.cursor.ns.indexOf('.') + 1);
+ return assert.commandWorked(cursorCollection.runCommand({
+ getMore: initialCursorResponse.cursor.id,
+ collection: getMoreCollName,
+ batchSize: batchSize
+ }));
+}
+const resumeResponse = iterateCursor(firstResponse);
+assert.eq(1, resumeResponse.cursor.nextBatch.length);
+// Because needsMerge was not set, the latest oplog timestamp should not be returned.
+assert.eq(undefined, resumeResponse.$_internalLatestOplogTimestamp);
+const resumeToken = resumeResponse.cursor.nextBatch[0]["_id"];
- // Seed the collection with enough documents to fit in one batch.
- // Note the resume document is included; when needsMerge is true, we see the resume token
- // in the stream.
- jsTestLog("Adding documents to collection.");
- for (let i = 1; i < batchSize * 2; i++) {
- assert.writeOK(cursorCollection.insert({_id: i}));
- }
+// Seed the collection with enough documents to fit in one batch.
+// Note the resume document is included; when needsMerge is true, we see the resume token
+// in the stream.
+jsTestLog("Adding documents to collection.");
+for (let i = 1; i < batchSize * 2; i++) {
+ assert.writeOK(cursorCollection.insert({_id: i}));
+}
- // Look at one batch's worth.
- jsTestLog("Testing that operation time is present on initial aggregate command.");
- const cursorResponse = assert.commandWorked(cursorCollection.runCommand({
- aggregate: testName,
- // The latest observed optime is only reported when needsMerge is set, and needsMerge
- // requires fromMongos be set.
- needsMerge: true,
- fromMongos: true,
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- cursor: {batchSize: batchSize}
- }));
- const firstBatchOplogTimestamp = cursorResponse.$_internalLatestOplogTimestamp;
- assert.neq(undefined, firstBatchOplogTimestamp, tojson(cursorResponse));
+// Look at one batch's worth.
+jsTestLog("Testing that operation time is present on initial aggregate command.");
+const cursorResponse = assert.commandWorked(cursorCollection.runCommand({
+ aggregate: testName,
+ // The latest observed optime is only reported when needsMerge is set, and needsMerge
+ // requires fromMongos be set.
+ needsMerge: true,
+ fromMongos: true,
+ pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
+ cursor: {batchSize: batchSize}
+}));
+const firstBatchOplogTimestamp = cursorResponse.$_internalLatestOplogTimestamp;
+assert.neq(undefined, firstBatchOplogTimestamp, tojson(cursorResponse));
- // Iterate the cursor and assert that the observed operation time advanced.
- jsTestLog("Testing that operation time advances with getMore.");
- let getMoreResponse = iterateCursor(cursorResponse);
- const getMoreOplogTimestamp = getMoreResponse.$_internalLatestOplogTimestamp;
- assert.neq(undefined, getMoreOplogTimestamp, tojson(getMoreResponse));
- // SERVER-21861 Use bsonWoCompare to avoid the shell's flawed comparison of timestamps.
- assert.eq(
- bsonWoCompare(getMoreOplogTimestamp, firstBatchOplogTimestamp),
- 1,
- `Expected oplog timestamp from getMore (${getMoreOplogTimestamp}) to be larger than the` +
- ` oplog timestamp from the first batch (${firstBatchOplogTimestamp})`);
+// Iterate the cursor and assert that the observed operation time advanced.
+jsTestLog("Testing that operation time advances with getMore.");
+let getMoreResponse = iterateCursor(cursorResponse);
+const getMoreOplogTimestamp = getMoreResponse.$_internalLatestOplogTimestamp;
+assert.neq(undefined, getMoreOplogTimestamp, tojson(getMoreResponse));
+// SERVER-21861 Use bsonWoCompare to avoid the shell's flawed comparison of timestamps.
+assert.eq(bsonWoCompare(getMoreOplogTimestamp, firstBatchOplogTimestamp),
+ 1,
+ `Expected oplog timestamp from getMore (${getMoreOplogTimestamp}) to be larger than the` +
+ ` oplog timestamp from the first batch (${firstBatchOplogTimestamp})`);
- // Now make sure that the reported operation time advances if there are writes to an unrelated
- // collection.
- jsTestLog("Testing that operation time advances with writes to an unrelated collection.");
+// Now make sure that the reported operation time advances if there are writes to an unrelated
+// collection.
+jsTestLog("Testing that operation time advances with writes to an unrelated collection.");
- // First make sure there is nothing left in our cursor.
- getMoreResponse = iterateCursor(cursorResponse);
- assert.eq(getMoreResponse.cursor.nextBatch, []);
+// First make sure there is nothing left in our cursor.
+getMoreResponse = iterateCursor(cursorResponse);
+assert.eq(getMoreResponse.cursor.nextBatch, []);
- // Record that operation time, then test that the reported time advances on an insert to an
- // unrelated collection.
- const oplogTimeAtExhaust = getMoreResponse.$_internalLatestOplogTimestamp;
- assert.neq(undefined, oplogTimeAtExhaust, tojson(getMoreResponse));
- assert.writeOK(otherCollection.insert({}));
+// Record that operation time, then test that the reported time advances on an insert to an
+// unrelated collection.
+const oplogTimeAtExhaust = getMoreResponse.$_internalLatestOplogTimestamp;
+assert.neq(undefined, oplogTimeAtExhaust, tojson(getMoreResponse));
+assert.writeOK(otherCollection.insert({}));
- getMoreResponse = iterateCursor(cursorResponse);
- const oplogTimeAfterUnrelatedInsert = getMoreResponse.$_internalLatestOplogTimestamp;
- assert.neq(undefined, oplogTimeAtExhaust, tojson(getMoreResponse));
- // SERVER-21861 Use bsonWoCompare to avoid the shell's flawed comparison of timestamps.
- assert.eq(
- bsonWoCompare(oplogTimeAfterUnrelatedInsert, oplogTimeAtExhaust),
- 1,
- `Expected oplog timestamp from after unrelated insert (${oplogTimeAfterUnrelatedInsert})` +
- ` to be larger than the oplog timestamp at time of exhaust (${oplogTimeAtExhaust})`);
+getMoreResponse = iterateCursor(cursorResponse);
+const oplogTimeAfterUnrelatedInsert = getMoreResponse.$_internalLatestOplogTimestamp;
+assert.neq(undefined, oplogTimeAtExhaust, tojson(getMoreResponse));
+// SERVER-21861 Use bsonWoCompare to avoid the shell's flawed comparison of timestamps.
+assert.eq(
+ bsonWoCompare(oplogTimeAfterUnrelatedInsert, oplogTimeAtExhaust),
+ 1,
+ `Expected oplog timestamp from after unrelated insert (${oplogTimeAfterUnrelatedInsert})` +
+ ` to be larger than the oplog timestamp at time of exhaust (${oplogTimeAtExhaust})`);
})();
diff --git a/jstests/change_streams/report_post_batch_resume_token.js b/jstests/change_streams/report_post_batch_resume_token.js
index 1055288a9f5..1e9a110c99f 100644
--- a/jstests/change_streams/report_post_batch_resume_token.js
+++ b/jstests/change_streams/report_post_batch_resume_token.js
@@ -4,188 +4,188 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
-
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-
- // Drop and recreate collections to assure a clean run.
- const collName = "report_post_batch_resume_token";
- const testCollection = assertDropAndRecreateCollection(db, collName);
- const otherCollection = assertDropAndRecreateCollection(db, "unrelated_" + collName);
- const adminDB = db.getSiblingDB("admin");
-
- let docId = 0; // Tracks _id of documents inserted to ensure that we do not duplicate.
- const batchSize = 2;
-
- // Test that postBatchResumeToken is present on an initial aggregate of batchSize: 0.
- let csCursor = testCollection.watch([], {cursor: {batchSize: 0}});
- assert.eq(csCursor.objsLeftInBatch(), 0);
- let initialAggPBRT = csCursor.getResumeToken();
- assert.neq(undefined, initialAggPBRT);
-
- // Test that the PBRT does not advance beyond its initial value for a change stream whose
- // startAtOperationTime is in the future, even as writes are made to the test collection.
- const timestampIn2100 = Timestamp(4102444800, 1);
- csCursor = testCollection.watch([], {startAtOperationTime: timestampIn2100});
- assert.eq(csCursor.objsLeftInBatch(), 0);
- initialAggPBRT = csCursor.getResumeToken();
- assert.neq(undefined, initialAggPBRT);
-
- // Write some documents to the test collection.
- for (let i = 0; i < 5; ++i) {
- assert.commandWorked(testCollection.insert({_id: docId++}));
- }
-
- // Verify that no events are returned and the PBRT does not advance or go backwards.
- assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
- let getMorePBRT = csCursor.getResumeToken();
- assert.eq(bsonWoCompare(initialAggPBRT, getMorePBRT), 0);
-
- // Test that postBatchResumeToken is present on empty initial aggregate batch.
- csCursor = testCollection.watch();
- assert.eq(csCursor.objsLeftInBatch(), 0);
- initialAggPBRT = csCursor.getResumeToken();
- assert.neq(undefined, initialAggPBRT);
-
- // Test that postBatchResumeToken is present on empty getMore batch.
- assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
- getMorePBRT = csCursor.getResumeToken();
- assert.neq(undefined, getMorePBRT);
- assert.gte(bsonWoCompare(getMorePBRT, initialAggPBRT), 0);
-
- // Test that postBatchResumeToken advances with returned events. Insert one document into the
- // collection and consume the resulting change stream event.
+"use strict";
+
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+
+// Drop and recreate collections to assure a clean run.
+const collName = "report_post_batch_resume_token";
+const testCollection = assertDropAndRecreateCollection(db, collName);
+const otherCollection = assertDropAndRecreateCollection(db, "unrelated_" + collName);
+const adminDB = db.getSiblingDB("admin");
+
+let docId = 0; // Tracks _id of documents inserted to ensure that we do not duplicate.
+const batchSize = 2;
+
+// Test that postBatchResumeToken is present on an initial aggregate of batchSize: 0.
+let csCursor = testCollection.watch([], {cursor: {batchSize: 0}});
+assert.eq(csCursor.objsLeftInBatch(), 0);
+let initialAggPBRT = csCursor.getResumeToken();
+assert.neq(undefined, initialAggPBRT);
+
+// Test that the PBRT does not advance beyond its initial value for a change stream whose
+// startAtOperationTime is in the future, even as writes are made to the test collection.
+const timestampIn2100 = Timestamp(4102444800, 1);
+csCursor = testCollection.watch([], {startAtOperationTime: timestampIn2100});
+assert.eq(csCursor.objsLeftInBatch(), 0);
+initialAggPBRT = csCursor.getResumeToken();
+assert.neq(undefined, initialAggPBRT);
+
+// Write some documents to the test collection.
+for (let i = 0; i < 5; ++i) {
assert.commandWorked(testCollection.insert({_id: docId++}));
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
- assert(csCursor.objsLeftInBatch() == 1);
-
- // Because the retrieved event is the most recent entry in the oplog, the PBRT should be equal
- // to the resume token of the last item in the batch and greater than the initial PBRT.
- let resumeTokenFromDoc = csCursor.next()._id;
- getMorePBRT = csCursor.getResumeToken();
- assert.eq(bsonWoCompare(getMorePBRT, resumeTokenFromDoc), 0);
- assert.gt(bsonWoCompare(getMorePBRT, initialAggPBRT), 0);
-
- // Now seed the collection with enough documents to fit in two batches.
- for (let i = 0; i < batchSize * 2; i++) {
- assert.commandWorked(testCollection.insert({_id: docId++}));
- }
-
- // Test that the PBRT for a resumed stream is the given resume token if no result are returned.
- csCursor = testCollection.watch([], {resumeAfter: resumeTokenFromDoc, cursor: {batchSize: 0}});
- assert.eq(csCursor.objsLeftInBatch(), 0);
- initialAggPBRT = csCursor.getResumeToken();
- assert.neq(undefined, initialAggPBRT);
- assert.eq(bsonWoCompare(initialAggPBRT, resumeTokenFromDoc), 0);
-
- // Test that postBatchResumeToken advances with getMore. Iterate the cursor and assert that the
- // observed postBatchResumeToken advanced.
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
-
- // The postBatchResumeToken is again equal to the final token in the batch, and greater than the
- // PBRT from the initial response.
- let eventFromCursor = null;
- while (csCursor.objsLeftInBatch()) {
- eventFromCursor = csCursor.next();
- resumeTokenFromDoc = eventFromCursor._id;
- }
- getMorePBRT = csCursor.getResumeToken();
- assert.eq(bsonWoCompare(resumeTokenFromDoc, getMorePBRT), 0);
- assert.gt(bsonWoCompare(getMorePBRT, initialAggPBRT), 0);
-
- // Test that postBatchResumeToken advances with writes to an unrelated collection. First make
- // sure there is nothing left in our cursor, and obtain the latest PBRT...
- while (eventFromCursor.fullDocument._id < (docId - 1)) {
- assert.soon(() => csCursor.hasNext());
- eventFromCursor = csCursor.next();
- }
- assert(!csCursor.hasNext());
- let previousGetMorePBRT = csCursor.getResumeToken();
- assert.neq(undefined, previousGetMorePBRT);
-
- // ... then test that it advances on an insert to an unrelated collection.
- assert.commandWorked(otherCollection.insert({}));
- assert.soon(() => {
- assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
- getMorePBRT = csCursor.getResumeToken();
- return bsonWoCompare(getMorePBRT, previousGetMorePBRT) > 0;
- });
-
- // Insert two documents into the collection which are of the maximum BSON object size.
- const bsonUserSizeLimit = assert.commandWorked(adminDB.isMaster()).maxBsonObjectSize;
- assert.gt(bsonUserSizeLimit, 0);
- for (let i = 0; i < 2; ++i) {
- const docToInsert = {_id: docId++, padding: ""};
- docToInsert.padding = "a".repeat(bsonUserSizeLimit - Object.bsonsize(docToInsert));
- assert.commandWorked(testCollection.insert(docToInsert));
- }
-
- // Test that we return the correct postBatchResumeToken in the event that the batch hits the
- // byte size limit. Despite the fact that the batchSize is 2, we should only see 1 result,
- // because the second result cannot fit in the batch.
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
- assert.eq(csCursor.objsLeftInBatch(), 1);
-
- // Obtain the resume token and the PBRT from the first document.
- resumeTokenFromDoc = csCursor.next()._id;
- getMorePBRT = csCursor.getResumeToken();
-
- // Now retrieve the second event and confirm that the PBRTs and resume tokens are in-order.
- previousGetMorePBRT = getMorePBRT;
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
- assert.eq(csCursor.objsLeftInBatch(), 1);
- const resumeTokenFromSecondDoc = csCursor.next()._id;
- getMorePBRT = csCursor.getResumeToken();
- assert.gte(bsonWoCompare(previousGetMorePBRT, resumeTokenFromDoc), 0);
- assert.gt(bsonWoCompare(resumeTokenFromSecondDoc, previousGetMorePBRT), 0);
- assert.gte(bsonWoCompare(getMorePBRT, resumeTokenFromSecondDoc), 0);
-
- // Test that the PBRT is correctly updated when reading events from within a transaction.
- csCursor = testCollection.watch([], {cursor: {batchSize: batchSize}});
- const session = db.getMongo().startSession();
- const sessionDB = session.getDatabase(db.getName());
-
- const sessionColl = sessionDB[testCollection.getName()];
- const sessionOtherColl = sessionDB[otherCollection.getName()];
- session.startTransaction();
-
- // Write 3 documents to testCollection and 1 to the unrelated collection within the transaction.
- for (let i = 0; i < 3; ++i) {
- assert.commandWorked(sessionColl.insert({_id: docId++}));
- }
- assert.commandWorked(sessionOtherColl.insert({}));
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
-
- // Grab the next 2 events, which should be the first 2 events in the transaction.
- previousGetMorePBRT = getMorePBRT;
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
- assert.eq(csCursor.objsLeftInBatch(), 2);
-
- // The clusterTime should be the same on each, but the resume token keeps advancing.
- const txnEvent1 = csCursor.next(), txnEvent2 = csCursor.next();
- const txnClusterTime = txnEvent1.clusterTime;
- assert.eq(txnEvent2.clusterTime, txnClusterTime);
- assert.gt(bsonWoCompare(txnEvent1._id, previousGetMorePBRT), 0);
- assert.gt(bsonWoCompare(txnEvent2._id, txnEvent1._id), 0);
-
- // The PBRT of the first transaction batch is equal to the last document's resumeToken.
- getMorePBRT = csCursor.getResumeToken();
- assert.eq(bsonWoCompare(getMorePBRT, txnEvent2._id), 0);
-
- // Now get the next batch. This contains the third of the four transaction operations.
- previousGetMorePBRT = getMorePBRT;
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
- assert.eq(csCursor.objsLeftInBatch(), 1);
-
- // The clusterTime of this event is the same as the two events from the previous batch, but its
- // resume token is greater than the previous PBRT.
- const txnEvent3 = csCursor.next();
- assert.eq(txnEvent3.clusterTime, txnClusterTime);
- assert.gt(bsonWoCompare(txnEvent3._id, previousGetMorePBRT), 0);
-
- // Because we wrote to the unrelated collection, the final event in the transaction does not
- // appear in the batch. Confirm that the postBatchResumeToken has been set correctly.
+}
+
+// Verify that no events are returned and the PBRT does not advance or go backwards.
+assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
+let getMorePBRT = csCursor.getResumeToken();
+assert.eq(bsonWoCompare(initialAggPBRT, getMorePBRT), 0);
+
+// Test that postBatchResumeToken is present on empty initial aggregate batch.
+csCursor = testCollection.watch();
+assert.eq(csCursor.objsLeftInBatch(), 0);
+initialAggPBRT = csCursor.getResumeToken();
+assert.neq(undefined, initialAggPBRT);
+
+// Test that postBatchResumeToken is present on empty getMore batch.
+assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
+getMorePBRT = csCursor.getResumeToken();
+assert.neq(undefined, getMorePBRT);
+assert.gte(bsonWoCompare(getMorePBRT, initialAggPBRT), 0);
+
+// Test that postBatchResumeToken advances with returned events. Insert one document into the
+// collection and consume the resulting change stream event.
+assert.commandWorked(testCollection.insert({_id: docId++}));
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+assert(csCursor.objsLeftInBatch() == 1);
+
+// Because the retrieved event is the most recent entry in the oplog, the PBRT should be equal
+// to the resume token of the last item in the batch and greater than the initial PBRT.
+let resumeTokenFromDoc = csCursor.next()._id;
+getMorePBRT = csCursor.getResumeToken();
+assert.eq(bsonWoCompare(getMorePBRT, resumeTokenFromDoc), 0);
+assert.gt(bsonWoCompare(getMorePBRT, initialAggPBRT), 0);
+
+// Now seed the collection with enough documents to fit in two batches.
+for (let i = 0; i < batchSize * 2; i++) {
+ assert.commandWorked(testCollection.insert({_id: docId++}));
+}
+
+// Test that the PBRT for a resumed stream is the given resume token if no result are returned.
+csCursor = testCollection.watch([], {resumeAfter: resumeTokenFromDoc, cursor: {batchSize: 0}});
+assert.eq(csCursor.objsLeftInBatch(), 0);
+initialAggPBRT = csCursor.getResumeToken();
+assert.neq(undefined, initialAggPBRT);
+assert.eq(bsonWoCompare(initialAggPBRT, resumeTokenFromDoc), 0);
+
+// Test that postBatchResumeToken advances with getMore. Iterate the cursor and assert that the
+// observed postBatchResumeToken advanced.
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+
+// The postBatchResumeToken is again equal to the final token in the batch, and greater than the
+// PBRT from the initial response.
+let eventFromCursor = null;
+while (csCursor.objsLeftInBatch()) {
+ eventFromCursor = csCursor.next();
+ resumeTokenFromDoc = eventFromCursor._id;
+}
+getMorePBRT = csCursor.getResumeToken();
+assert.eq(bsonWoCompare(resumeTokenFromDoc, getMorePBRT), 0);
+assert.gt(bsonWoCompare(getMorePBRT, initialAggPBRT), 0);
+
+// Test that postBatchResumeToken advances with writes to an unrelated collection. First make
+// sure there is nothing left in our cursor, and obtain the latest PBRT...
+while (eventFromCursor.fullDocument._id < (docId - 1)) {
+ assert.soon(() => csCursor.hasNext());
+ eventFromCursor = csCursor.next();
+}
+assert(!csCursor.hasNext());
+let previousGetMorePBRT = csCursor.getResumeToken();
+assert.neq(undefined, previousGetMorePBRT);
+
+// ... then test that it advances on an insert to an unrelated collection.
+assert.commandWorked(otherCollection.insert({}));
+assert.soon(() => {
+ assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
getMorePBRT = csCursor.getResumeToken();
- assert.gte(bsonWoCompare(getMorePBRT, txnEvent3._id), 0);
+ return bsonWoCompare(getMorePBRT, previousGetMorePBRT) > 0;
+});
+
+// Insert two documents into the collection which are of the maximum BSON object size.
+const bsonUserSizeLimit = assert.commandWorked(adminDB.isMaster()).maxBsonObjectSize;
+assert.gt(bsonUserSizeLimit, 0);
+for (let i = 0; i < 2; ++i) {
+ const docToInsert = {_id: docId++, padding: ""};
+ docToInsert.padding = "a".repeat(bsonUserSizeLimit - Object.bsonsize(docToInsert));
+ assert.commandWorked(testCollection.insert(docToInsert));
+}
+
+// Test that we return the correct postBatchResumeToken in the event that the batch hits the
+// byte size limit. Despite the fact that the batchSize is 2, we should only see 1 result,
+// because the second result cannot fit in the batch.
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+assert.eq(csCursor.objsLeftInBatch(), 1);
+
+// Obtain the resume token and the PBRT from the first document.
+resumeTokenFromDoc = csCursor.next()._id;
+getMorePBRT = csCursor.getResumeToken();
+
+// Now retrieve the second event and confirm that the PBRTs and resume tokens are in-order.
+previousGetMorePBRT = getMorePBRT;
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+assert.eq(csCursor.objsLeftInBatch(), 1);
+const resumeTokenFromSecondDoc = csCursor.next()._id;
+getMorePBRT = csCursor.getResumeToken();
+assert.gte(bsonWoCompare(previousGetMorePBRT, resumeTokenFromDoc), 0);
+assert.gt(bsonWoCompare(resumeTokenFromSecondDoc, previousGetMorePBRT), 0);
+assert.gte(bsonWoCompare(getMorePBRT, resumeTokenFromSecondDoc), 0);
+
+// Test that the PBRT is correctly updated when reading events from within a transaction.
+csCursor = testCollection.watch([], {cursor: {batchSize: batchSize}});
+const session = db.getMongo().startSession();
+const sessionDB = session.getDatabase(db.getName());
+
+const sessionColl = sessionDB[testCollection.getName()];
+const sessionOtherColl = sessionDB[otherCollection.getName()];
+session.startTransaction();
+
+// Write 3 documents to testCollection and 1 to the unrelated collection within the transaction.
+for (let i = 0; i < 3; ++i) {
+ assert.commandWorked(sessionColl.insert({_id: docId++}));
+}
+assert.commandWorked(sessionOtherColl.insert({}));
+assert.commandWorked(session.commitTransaction_forTesting());
+session.endSession();
+
+// Grab the next 2 events, which should be the first 2 events in the transaction.
+previousGetMorePBRT = getMorePBRT;
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+assert.eq(csCursor.objsLeftInBatch(), 2);
+
+// The clusterTime should be the same on each, but the resume token keeps advancing.
+const txnEvent1 = csCursor.next(), txnEvent2 = csCursor.next();
+const txnClusterTime = txnEvent1.clusterTime;
+assert.eq(txnEvent2.clusterTime, txnClusterTime);
+assert.gt(bsonWoCompare(txnEvent1._id, previousGetMorePBRT), 0);
+assert.gt(bsonWoCompare(txnEvent2._id, txnEvent1._id), 0);
+
+// The PBRT of the first transaction batch is equal to the last document's resumeToken.
+getMorePBRT = csCursor.getResumeToken();
+assert.eq(bsonWoCompare(getMorePBRT, txnEvent2._id), 0);
+
+// Now get the next batch. This contains the third of the four transaction operations.
+previousGetMorePBRT = getMorePBRT;
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+assert.eq(csCursor.objsLeftInBatch(), 1);
+
+// The clusterTime of this event is the same as the two events from the previous batch, but its
+// resume token is greater than the previous PBRT.
+const txnEvent3 = csCursor.next();
+assert.eq(txnEvent3.clusterTime, txnClusterTime);
+assert.gt(bsonWoCompare(txnEvent3._id, previousGetMorePBRT), 0);
+
+// Because we wrote to the unrelated collection, the final event in the transaction does not
+// appear in the batch. Confirm that the postBatchResumeToken has been set correctly.
+getMorePBRT = csCursor.getResumeToken();
+assert.gte(bsonWoCompare(getMorePBRT, txnEvent3._id), 0);
})();
diff --git a/jstests/change_streams/required_as_first_stage.js b/jstests/change_streams/required_as_first_stage.js
index bdc0b43ba0c..2c5128f4865 100644
--- a/jstests/change_streams/required_as_first_stage.js
+++ b/jstests/change_streams/required_as_first_stage.js
@@ -5,50 +5,48 @@
// recognize the intentionally malformed aggergations that we test here.
// @tags: [change_stream_does_not_expect_txns]
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-
- const coll = assertDropAndRecreateCollection(db, "change_stream_required_as_first_stage");
-
- assertErrorCode(coll, [{$match: {z: 34}}, {$changeStream: {}}], 40602);
- assertErrorCode(coll, [{$indexStats: {}}, {$changeStream: {}}], 40602);
- assertErrorCode(
- coll,
- [{$indexStats: {}}, {$changeStream: {}}, {$match: {test: "this is an extra stage"}}],
- 40602);
-
- let error = assert.throws(() => coll.aggregate([{$sort: {x: 1}}, {$changeStream: {}}]));
- assert.contains(error.code, [40602, 50988], "Unexpected error: " + tojson(error));
-
- error = assert.throws(
- () => coll.aggregate([{$sort: {x: 1}}, {$changeStream: {}}], {allowDiskUse: true}));
- assert.contains(error.code, [40602, 50988], "Unexpected error: " + tojson(error));
-
- error = assert.throws(() => coll.aggregate([{$group: {_id: "$x"}}, {$changeStream: {}}]));
- assert.contains(error.code, [40602, 50988], "Unexpected error: " + tojson(error));
-
- // This one has a different error code because of conflicting host type requirements: the $group
- // needs to merge on a shard, but the $changeStream needs to merge on mongos. This doesn't
- // happen for the $sort because the half of the $sort running on mongos is pre-sorted, and so
- // won't need disk space.
- error = assert.throws(
- () => coll.aggregate([{$group: {_id: "$x"}}, {$changeStream: {}}], {allowDiskUse: true}));
- assert.contains(
- error.code, [40602, ErrorCodes.IllegalOperation], "Unexpected error: " + tojson(error));
-
- // Test that a $changeStream stage is not allowed within a $facet stage.
- assertErrorCode(coll, [{$facet: {testPipe: [{$changeStream: {}}]}}], 40600);
- assertErrorCode(coll,
- [{
- $facet: {
- testPipe: [
- {$indexStats: {}},
- {$changeStream: {}},
- {$match: {test: "this is an extra stage"}}
- ]
- }
- }],
- 40600);
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+
+const coll = assertDropAndRecreateCollection(db, "change_stream_required_as_first_stage");
+
+assertErrorCode(coll, [{$match: {z: 34}}, {$changeStream: {}}], 40602);
+assertErrorCode(coll, [{$indexStats: {}}, {$changeStream: {}}], 40602);
+assertErrorCode(
+ coll,
+ [{$indexStats: {}}, {$changeStream: {}}, {$match: {test: "this is an extra stage"}}],
+ 40602);
+
+let error = assert.throws(() => coll.aggregate([{$sort: {x: 1}}, {$changeStream: {}}]));
+assert.contains(error.code, [40602, 50988], "Unexpected error: " + tojson(error));
+
+error = assert.throws(
+ () => coll.aggregate([{$sort: {x: 1}}, {$changeStream: {}}], {allowDiskUse: true}));
+assert.contains(error.code, [40602, 50988], "Unexpected error: " + tojson(error));
+
+error = assert.throws(() => coll.aggregate([{$group: {_id: "$x"}}, {$changeStream: {}}]));
+assert.contains(error.code, [40602, 50988], "Unexpected error: " + tojson(error));
+
+// This one has a different error code because of conflicting host type requirements: the $group
+// needs to merge on a shard, but the $changeStream needs to merge on mongos. This doesn't
+// happen for the $sort because the half of the $sort running on mongos is pre-sorted, and so
+// won't need disk space.
+error = assert.throws(
+ () => coll.aggregate([{$group: {_id: "$x"}}, {$changeStream: {}}], {allowDiskUse: true}));
+assert.contains(
+ error.code, [40602, ErrorCodes.IllegalOperation], "Unexpected error: " + tojson(error));
+
+// Test that a $changeStream stage is not allowed within a $facet stage.
+assertErrorCode(coll, [{$facet: {testPipe: [{$changeStream: {}}]}}], 40600);
+assertErrorCode(
+ coll,
+ [{
+ $facet: {
+ testPipe:
+ [{$indexStats: {}}, {$changeStream: {}}, {$match: {test: "this is an extra stage"}}]
+ }
+ }],
+ 40600);
}());
diff --git a/jstests/change_streams/resume_from_high_water_mark_token.js b/jstests/change_streams/resume_from_high_water_mark_token.js
index 973fc32d9c9..edd34db162b 100644
--- a/jstests/change_streams/resume_from_high_water_mark_token.js
+++ b/jstests/change_streams/resume_from_high_water_mark_token.js
@@ -2,282 +2,265 @@
* Tests that a synthetic high-water-mark (HWM) token obeys the same semantics as a regular token.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/change_stream_util.js"); // For runCommandChangeStreamPassthroughAware.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/change_stream_util.js"); // For runCommandChangeStreamPassthroughAware.
- // Drop the test collections to assure a clean run.
- const collName = jsTestName();
- const otherCollName = "unrelated_" + collName;
- assertDropCollection(db, collName);
- assertDropCollection(db, otherCollName);
+// Drop the test collections to assure a clean run.
+const collName = jsTestName();
+const otherCollName = "unrelated_" + collName;
+assertDropCollection(db, collName);
+assertDropCollection(db, otherCollName);
- // Helper function to ensure that the specified command is not modified by the passthroughs.
- function runExactCommand(db, cmdObj) {
- const doNotModifyInPassthroughs = true;
- return runCommandChangeStreamPassthroughAware(db, cmdObj, doNotModifyInPassthroughs);
- }
+// Helper function to ensure that the specified command is not modified by the passthroughs.
+function runExactCommand(db, cmdObj) {
+ const doNotModifyInPassthroughs = true;
+ return runCommandChangeStreamPassthroughAware(db, cmdObj, doNotModifyInPassthroughs);
+}
- let docId = 0; // Tracks _id of documents inserted to ensure that we do not duplicate.
+let docId = 0; // Tracks _id of documents inserted to ensure that we do not duplicate.
- // Open a stream on the test collection, before the collection has actually been created. Make
- // sure that this command is not modified in the passthroughs, since this behaviour is only
- // relevant for single-collection streams.
- let cmdResBeforeCollExists = assert.commandWorked(
- runExactCommand(db, {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
+// Open a stream on the test collection, before the collection has actually been created. Make
+// sure that this command is not modified in the passthroughs, since this behaviour is only
+// relevant for single-collection streams.
+let cmdResBeforeCollExists = assert.commandWorked(
+ runExactCommand(db, {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
- // We should be able to retrieve a postBatchResumeToken (PBRT) even with no collection present.
- let csCursor = new DBCommandCursor(db, cmdResBeforeCollExists);
- let pbrtBeforeCollExists = csCursor.getResumeToken();
- assert.neq(undefined, pbrtBeforeCollExists);
- csCursor.close();
+// We should be able to retrieve a postBatchResumeToken (PBRT) even with no collection present.
+let csCursor = new DBCommandCursor(db, cmdResBeforeCollExists);
+let pbrtBeforeCollExists = csCursor.getResumeToken();
+assert.neq(undefined, pbrtBeforeCollExists);
+csCursor.close();
- // We can resumeAfter and startAfter the token while the collection still does not exist.
- for (let resumeType of["startAfter", "resumeAfter"]) {
- cmdResBeforeCollExists = assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [
- {$changeStream: {[resumeType]: pbrtBeforeCollExists}},
- {
- $match: {
- $or: [
- {"fullDocument._id": "INSERT_ONE"},
- {"fullDocument._id": "INSERT_TWO"}
- ]
- }
- }
- ],
- cursor: {}
- }));
- }
- csCursor = new DBCommandCursor(db, cmdResBeforeCollExists);
-
- // If the collection is then created with a case-insensitive collation, the resumed stream
- // continues to use the simple collation. We see 'INSERT_TWO' but not 'insert_one'.
- const testCollationCollection =
- assertCreateCollection(db, collName, {collation: {locale: "en_US", strength: 2}});
- assert.commandWorked(testCollationCollection.insert({_id: "insert_one"}));
- assert.commandWorked(testCollationCollection.insert({_id: "INSERT_TWO"}));
- assert.soon(() => csCursor.hasNext());
- assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_TWO"});
- csCursor.close();
-
- // We can resume from the pre-creation high water mark if we do not specify a collation...
- let cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
+// We can resumeAfter and startAfter the token while the collection still does not exist.
+for (let resumeType of ["startAfter", "resumeAfter"]) {
+ cmdResBeforeCollExists = assert.commandWorked(runExactCommand(db, {
aggregate: collName,
pipeline: [
- {$changeStream: {resumeAfter: pbrtBeforeCollExists}},
+ {$changeStream: {[resumeType]: pbrtBeforeCollExists}},
{
- $match:
- {$or: [{"fullDocument._id": "INSERT_ONE"}, {"fullDocument._id": "INSERT_TWO"}]}
+ $match:
+ {$or: [{"fullDocument._id": "INSERT_ONE"}, {"fullDocument._id": "INSERT_TWO"}]}
}
],
cursor: {}
}));
+}
+csCursor = new DBCommandCursor(db, cmdResBeforeCollExists);
- // ... but we will not inherit the collection's case-insensitive collation, instead defaulting
- // to the simple collation. We will therefore match 'INSERT_TWO' but not 'insert_one'.
- csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
- assert.soon(() => csCursor.hasNext());
- assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_TWO"});
- csCursor.close();
+// If the collection is then created with a case-insensitive collation, the resumed stream
+// continues to use the simple collation. We see 'INSERT_TWO' but not 'insert_one'.
+const testCollationCollection =
+ assertCreateCollection(db, collName, {collation: {locale: "en_US", strength: 2}});
+assert.commandWorked(testCollationCollection.insert({_id: "insert_one"}));
+assert.commandWorked(testCollationCollection.insert({_id: "INSERT_TWO"}));
+assert.soon(() => csCursor.hasNext());
+assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_TWO"});
+csCursor.close();
- // If we do specify a non-simple collation, it will be adopted by the pipeline.
- cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [
- {$changeStream: {resumeAfter: pbrtBeforeCollExists}},
- {
- $match:
- {$or: [{"fullDocument._id": "INSERT_ONE"}, {"fullDocument._id": "INSERT_TWO"}]}
- }
- ],
- collation: {locale: "en_US", strength: 2},
- cursor: {}
- }));
+// We can resume from the pre-creation high water mark if we do not specify a collation...
+let cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [
+ {$changeStream: {resumeAfter: pbrtBeforeCollExists}},
+ {$match: {$or: [{"fullDocument._id": "INSERT_ONE"}, {"fullDocument._id": "INSERT_TWO"}]}}
+ ],
+ cursor: {}
+}));
- // Now we match both 'insert_one' and 'INSERT_TWO'.
- csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
- assert.soon(() => csCursor.hasNext());
- assert.docEq(csCursor.next().fullDocument, {_id: "insert_one"});
- assert.soon(() => csCursor.hasNext());
- assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_TWO"});
- csCursor.close();
+// ... but we will not inherit the collection's case-insensitive collation, instead defaulting
+// to the simple collation. We will therefore match 'INSERT_TWO' but not 'insert_one'.
+csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
+assert.soon(() => csCursor.hasNext());
+assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_TWO"});
+csCursor.close();
- // Now open a change stream with batchSize:0 in order to produce a new high water mark.
- const cmdResCollWithCollation = assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [
- {$changeStream: {}},
- ],
- cursor: {batchSize: 0}
- }));
- csCursor = new DBCommandCursor(db, cmdResCollWithCollation);
- const hwmFromCollWithCollation = csCursor.getResumeToken();
- assert.neq(undefined, hwmFromCollWithCollation);
- csCursor.close();
+// If we do specify a non-simple collation, it will be adopted by the pipeline.
+cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [
+ {$changeStream: {resumeAfter: pbrtBeforeCollExists}},
+ {$match: {$or: [{"fullDocument._id": "INSERT_ONE"}, {"fullDocument._id": "INSERT_TWO"}]}}
+ ],
+ collation: {locale: "en_US", strength: 2},
+ cursor: {}
+}));
- // Insert two more documents into the collection for testing purposes.
- assert.commandWorked(testCollationCollection.insert({_id: "insert_three"}));
- assert.commandWorked(testCollationCollection.insert({_id: "INSERT_FOUR"}));
+// Now we match both 'insert_one' and 'INSERT_TWO'.
+csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
+assert.soon(() => csCursor.hasNext());
+assert.docEq(csCursor.next().fullDocument, {_id: "insert_one"});
+assert.soon(() => csCursor.hasNext());
+assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_TWO"});
+csCursor.close();
- // We can resume the stream on the collection using the HWM...
- const cmdResResumeWithCollation = assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [
- {$changeStream: {resumeAfter: hwmFromCollWithCollation}},
- {
- $match: {
- $or: [
- {"fullDocument._id": "INSERT_THREE"},
- {"fullDocument._id": "INSERT_FOUR"}
- ]
- }
- }
- ],
- cursor: {}
- }));
- csCursor = new DBCommandCursor(db, cmdResResumeWithCollation);
+// Now open a change stream with batchSize:0 in order to produce a new high water mark.
+const cmdResCollWithCollation = assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [
+ {$changeStream: {}},
+ ],
+ cursor: {batchSize: 0}
+}));
+csCursor = new DBCommandCursor(db, cmdResCollWithCollation);
+const hwmFromCollWithCollation = csCursor.getResumeToken();
+assert.neq(undefined, hwmFromCollWithCollation);
+csCursor.close();
- // ... but we do not inherit the collection's case-insensitive collation, matching 'INSERT_FOUR'
- // but not the preceding 'insert_three'.
- assert.soon(() => csCursor.hasNext());
- assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_FOUR"});
- csCursor.close();
+// Insert two more documents into the collection for testing purposes.
+assert.commandWorked(testCollationCollection.insert({_id: "insert_three"}));
+assert.commandWorked(testCollationCollection.insert({_id: "INSERT_FOUR"}));
- // Drop the collection and obtain a new pre-creation high water mark. We will use this later.
- assertDropCollection(db, collName);
- cmdResBeforeCollExists = assert.commandWorked(
- runExactCommand(db, {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
- csCursor = new DBCommandCursor(db, cmdResBeforeCollExists);
- pbrtBeforeCollExists = csCursor.getResumeToken();
- assert.neq(undefined, pbrtBeforeCollExists);
- csCursor.close();
+// We can resume the stream on the collection using the HWM...
+const cmdResResumeWithCollation = assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [
+ {$changeStream: {resumeAfter: hwmFromCollWithCollation}},
+ {$match: {$or: [{"fullDocument._id": "INSERT_THREE"}, {"fullDocument._id": "INSERT_FOUR"}]}}
+ ],
+ cursor: {}
+}));
+csCursor = new DBCommandCursor(db, cmdResResumeWithCollation);
- // Now create each of the test collections with the default simple collation.
- const testCollection = assertCreateCollection(db, collName);
- const otherCollection = assertCreateCollection(db, otherCollName);
- const adminDB = db.getSiblingDB("admin");
+// ... but we do not inherit the collection's case-insensitive collation, matching 'INSERT_FOUR'
+// but not the preceding 'insert_three'.
+assert.soon(() => csCursor.hasNext());
+assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_FOUR"});
+csCursor.close();
- // Open a stream on the test collection, and write a document to it.
- csCursor = testCollection.watch();
- assert.commandWorked(testCollection.insert({_id: docId++}));
+// Drop the collection and obtain a new pre-creation high water mark. We will use this later.
+assertDropCollection(db, collName);
+cmdResBeforeCollExists = assert.commandWorked(
+ runExactCommand(db, {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
+csCursor = new DBCommandCursor(db, cmdResBeforeCollExists);
+pbrtBeforeCollExists = csCursor.getResumeToken();
+assert.neq(undefined, pbrtBeforeCollExists);
+csCursor.close();
- // Write an event to the unrelated collection in order to advance the PBRT, and then consume all
- // events. When we see a PBRT that is greater than the timestamp of the last event (stored in
- // 'relatedEvent'), we know it must be a synthetic high-water-mark token.
- //
- // Note that the first insert into the unrelated collection may not be enough to advance the
- // PBRT; some passthroughs will group the unrelated write into a transaction with the related
- // write, giving them the same timestamp. We put the unrelated insert into the assert.soon loop,
- // so that it will eventually get its own transaction with a new timestamp.
- let relatedEvent = null;
- let hwmToken = null;
- assert.soon(() => {
- assert.commandWorked(otherCollection.insert({}));
- if (csCursor.hasNext()) {
- relatedEvent = csCursor.next();
- }
- assert.eq(csCursor.objsLeftInBatch(), 0);
- hwmToken = csCursor.getResumeToken();
- assert.neq(undefined, hwmToken);
- return relatedEvent && bsonWoCompare(hwmToken, relatedEvent._id) > 0;
- });
- csCursor.close();
+// Now create each of the test collections with the default simple collation.
+const testCollection = assertCreateCollection(db, collName);
+const otherCollection = assertCreateCollection(db, otherCollName);
+const adminDB = db.getSiblingDB("admin");
- // Now write some further documents to the collection before attempting to resume.
- for (let i = 0; i < 5; ++i) {
- assert.commandWorked(testCollection.insert({_id: docId++}));
- }
+// Open a stream on the test collection, and write a document to it.
+csCursor = testCollection.watch();
+assert.commandWorked(testCollection.insert({_id: docId++}));
- // We can resumeAfter and startAfter the high water mark. We only see the latest 5 documents.
- for (let resumeType of["startAfter", "resumeAfter"]) {
- csCursor = testCollection.watch([], {[resumeType]: hwmToken});
- assert.soon(() => {
- if (csCursor.hasNext()) {
- relatedEvent = csCursor.next();
- assert.gt(bsonWoCompare(relatedEvent._id, hwmToken), 0);
- // We never see the first document, whose _id was 0.
- assert.gt(relatedEvent.fullDocument._id, 0);
- }
- // The _id of the last document inserted is (docId-1).
- return relatedEvent.fullDocument._id === (docId - 1);
- });
- csCursor.close();
+// Write an event to the unrelated collection in order to advance the PBRT, and then consume all
+// events. When we see a PBRT that is greater than the timestamp of the last event (stored in
+// 'relatedEvent'), we know it must be a synthetic high-water-mark token.
+//
+// Note that the first insert into the unrelated collection may not be enough to advance the
+// PBRT; some passthroughs will group the unrelated write into a transaction with the related
+// write, giving them the same timestamp. We put the unrelated insert into the assert.soon loop,
+// so that it will eventually get its own transaction with a new timestamp.
+let relatedEvent = null;
+let hwmToken = null;
+assert.soon(() => {
+ assert.commandWorked(otherCollection.insert({}));
+ if (csCursor.hasNext()) {
+ relatedEvent = csCursor.next();
}
+ assert.eq(csCursor.objsLeftInBatch(), 0);
+ hwmToken = csCursor.getResumeToken();
+ assert.neq(undefined, hwmToken);
+ return relatedEvent && bsonWoCompare(hwmToken, relatedEvent._id) > 0;
+});
+csCursor.close();
- // Now resumeAfter the token that was generated before the collection was created...
- cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: pbrtBeforeCollExists}}],
- cursor: {}
- }));
- // ... and confirm that we see all the events that have occurred since then.
- csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
- let docCount = 0;
+// Now write some further documents to the collection before attempting to resume.
+for (let i = 0; i < 5; ++i) {
+ assert.commandWorked(testCollection.insert({_id: docId++}));
+}
+
+// We can resumeAfter and startAfter the high water mark. We only see the latest 5 documents.
+for (let resumeType of ["startAfter", "resumeAfter"]) {
+ csCursor = testCollection.watch([], {[resumeType]: hwmToken});
assert.soon(() => {
if (csCursor.hasNext()) {
relatedEvent = csCursor.next();
- assert.eq(relatedEvent.fullDocument._id, docCount++);
+ assert.gt(bsonWoCompare(relatedEvent._id, hwmToken), 0);
+ // We never see the first document, whose _id was 0.
+ assert.gt(relatedEvent.fullDocument._id, 0);
}
- return docCount === docId;
- });
-
- // Despite the fact that we just resumed from a token which was generated before the collection
- // existed and had no UUID, all subsequent HWMs should now have UUIDs. To test this, we first
- // get the current resume token, then write a document to the unrelated collection. We then wait
- // until the PBRT advances, which means that we now have a new HWM token.
- let hwmPostCreation = csCursor.getResumeToken();
- assert.commandWorked(otherCollection.insert({}));
- assert.soon(() => {
- assert(!csCursor.hasNext());
- return bsonWoCompare(csCursor.getResumeToken(), hwmPostCreation) > 0;
+ // The _id of the last document inserted is (docId-1).
+ return relatedEvent.fullDocument._id === (docId - 1);
});
- hwmPostCreation = csCursor.getResumeToken();
csCursor.close();
+}
- // We can resume from the token if the collection is dropped...
- assertDropCollection(db, collName);
- assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: hwmPostCreation}}],
- cursor: {}
- }));
- // ... or if the collection is recreated with a different UUID...
- assertCreateCollection(db, collName);
- assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: hwmPostCreation}}],
- cursor: {}
- }));
- // ... or if we specify an explicit collation.
- assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: hwmPostCreation}}],
- collation: {locale: "simple"},
- cursor: {}
- }));
+// Now resumeAfter the token that was generated before the collection was created...
+cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: pbrtBeforeCollExists}}],
+ cursor: {}
+}));
+// ... and confirm that we see all the events that have occurred since then.
+csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
+let docCount = 0;
+assert.soon(() => {
+ if (csCursor.hasNext()) {
+ relatedEvent = csCursor.next();
+ assert.eq(relatedEvent.fullDocument._id, docCount++);
+ }
+ return docCount === docId;
+});
- // Even after the collection is recreated, we can still resume from the pre-creation HWM...
- cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: pbrtBeforeCollExists}}],
- cursor: {}
- }));
- // ...and we can still see all the events from the collection's original incarnation...
- csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
- docCount = 0;
- assert.soon(() => {
- if (csCursor.hasNext()) {
- relatedEvent = csCursor.next();
- assert.eq(relatedEvent.fullDocument._id, docCount++);
- }
- return docCount === docId;
- });
- // ... this time followed by an invalidate, as the collection is dropped.
- assert.soon(() => {
- return csCursor.hasNext() && csCursor.next().operationType === "invalidate";
- });
- csCursor.close();
+// Despite the fact that we just resumed from a token which was generated before the collection
+// existed and had no UUID, all subsequent HWMs should now have UUIDs. To test this, we first
+// get the current resume token, then write a document to the unrelated collection. We then wait
+// until the PBRT advances, which means that we now have a new HWM token.
+let hwmPostCreation = csCursor.getResumeToken();
+assert.commandWorked(otherCollection.insert({}));
+assert.soon(() => {
+ assert(!csCursor.hasNext());
+ return bsonWoCompare(csCursor.getResumeToken(), hwmPostCreation) > 0;
+});
+hwmPostCreation = csCursor.getResumeToken();
+csCursor.close();
+
+// We can resume from the token if the collection is dropped...
+assertDropCollection(db, collName);
+assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: hwmPostCreation}}],
+ cursor: {}
+}));
+// ... or if the collection is recreated with a different UUID...
+assertCreateCollection(db, collName);
+assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: hwmPostCreation}}],
+ cursor: {}
+}));
+// ... or if we specify an explicit collation.
+assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: hwmPostCreation}}],
+ collation: {locale: "simple"},
+ cursor: {}
+}));
+
+// Even after the collection is recreated, we can still resume from the pre-creation HWM...
+cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: pbrtBeforeCollExists}}],
+ cursor: {}
+}));
+// ...and we can still see all the events from the collection's original incarnation...
+csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
+docCount = 0;
+assert.soon(() => {
+ if (csCursor.hasNext()) {
+ relatedEvent = csCursor.next();
+ assert.eq(relatedEvent.fullDocument._id, docCount++);
+ }
+ return docCount === docId;
+});
+// ... this time followed by an invalidate, as the collection is dropped.
+assert.soon(() => {
+ return csCursor.hasNext() && csCursor.next().operationType === "invalidate";
+});
+csCursor.close();
})(); \ No newline at end of file
diff --git a/jstests/change_streams/shell_helper.js b/jstests/change_streams/shell_helper.js
index a044ba76e50..29330a433e9 100644
--- a/jstests/change_streams/shell_helper.js
+++ b/jstests/change_streams/shell_helper.js
@@ -7,215 +7,215 @@
// based on the commit oplog entry, which would cause this test to fail.
// @tags: [change_stream_does_not_expect_txns]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- load("jstests/libs/change_stream_util.js"); // For assertInvalidateOp.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/change_stream_util.js"); // For assertInvalidateOp.
- const coll = assertDropAndRecreateCollection(db, "change_stream_shell_helper");
+const coll = assertDropAndRecreateCollection(db, "change_stream_shell_helper");
- function checkNextChange(cursor, expected) {
- assert.soon(() => cursor.hasNext());
- const nextObj = cursor.next();
- assertChangeStreamEventEq(nextObj, expected);
- return nextObj;
- }
-
- function testCommandIsCalled(testFunc, checkFunc) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+function checkNextChange(cursor, expected) {
+ assert.soon(() => cursor.hasNext());
+ const nextObj = cursor.next();
+ assertChangeStreamEventEq(nextObj, expected);
+ return nextObj;
+}
- const sentinel = {};
- let cmdObjSeen = sentinel;
+function testCommandIsCalled(testFunc, checkFunc) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- cmdObjSeen = cmdObj;
- return mongoRunCommandOriginal.apply(this, arguments);
- };
+ const sentinel = {};
+ let cmdObjSeen = sentinel;
- try {
- assert.doesNotThrow(testFunc);
- } finally {
- Mongo.prototype.runCommand = mongoRunCommandOriginal;
- }
-
- if (cmdObjSeen === sentinel) {
- throw new Error("Mongo.prototype.runCommand() was never called: " +
- testFunc.toString());
- }
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ cmdObjSeen = cmdObj;
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
- checkFunc(cmdObjSeen);
+ try {
+ assert.doesNotThrow(testFunc);
+ } finally {
+ Mongo.prototype.runCommand = mongoRunCommandOriginal;
}
- jsTestLog("Testing watch() without options");
- let changeStreamCursor = coll.watch();
-
- assert(!changeStreamCursor.hasNext());
-
- // Write the first document into the collection. We will save the resume token from this change.
- assert.writeOK(coll.insert({_id: 0, x: 1}));
- let resumeToken;
-
- // Test that each of the change stream cursors picks up the change.
- assert.soon(() => changeStreamCursor.hasNext());
- let change = changeStreamCursor.next();
- assert(!changeStreamCursor.hasNext());
- let expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, x: 1},
- ns: {db: "test", coll: coll.getName()},
- operationType: "insert",
- };
- assert("_id" in change, "Got unexpected change: " + tojson(change));
- // Remember the _id of the first op to resume the stream.
- resumeToken = change._id;
- // Remove the fields we cannot predict, then test that the change is as expected.
- delete change._id;
- delete change.clusterTime;
- assert.docEq(change, expected);
-
- jsTestLog("Testing watch() with pipeline");
- changeStreamCursor = coll.watch([{$project: {clusterTime: 1, docId: "$documentKey._id"}}]);
-
- // Store the cluster time of the insert as the timestamp to start from.
- const resumeTime =
- assert.commandWorked(db.runCommand({insert: coll.getName(), documents: [{_id: 1, x: 1}]}))
- .operationTime;
- jsTestLog("Insert of document with _id 1 got operationTime " + tojson(resumeTime));
-
- const changeForInsert = checkNextChange(changeStreamCursor, {docId: 1});
- jsTestLog("Change stream event for document with _id 1 reports clusterTime " +
- tojson(changeForInsert.clusterTime));
-
- // We expect the clusterTime returned by the change stream event and the operationTime returned
- // by the insert to be the same.
- assert.eq(changeForInsert.clusterTime, resumeTime);
-
- jsTestLog("Testing watch() with pipeline and resumeAfter");
- changeStreamCursor =
- coll.watch([{$project: {docId: "$documentKey._id"}}], {resumeAfter: resumeToken});
- checkNextChange(changeStreamCursor, {docId: 1});
-
- jsTestLog("Testing watch() with pipeline and startAfter");
- changeStreamCursor =
- coll.watch([{$project: {docId: "$documentKey._id"}}], {startAfter: resumeToken});
- checkNextChange(changeStreamCursor, {docId: 1});
-
- jsTestLog("Testing watch() with pipeline and startAtOperationTime");
- changeStreamCursor =
- coll.watch([{$project: {docId: "$documentKey._id"}}], {startAtOperationTime: resumeTime});
- checkNextChange(changeStreamCursor, {docId: 1});
-
- jsTestLog("Testing watch() with updateLookup");
- changeStreamCursor = coll.watch([], {fullDocument: "updateLookup"});
-
- assert.writeOK(coll.update({_id: 0}, {$set: {x: 10}}));
- expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, x: 10},
- ns: {db: "test", coll: coll.getName()},
- operationType: "update",
- updateDescription: {removedFields: [], updatedFields: {x: 10}},
- };
- checkNextChange(changeStreamCursor, expected);
-
- jsTestLog("Testing watch() with batchSize");
- // Only test mongod because mongos uses batch size 0 for aggregate commands internally to
- // establish cursors quickly. GetMore on mongos doesn't respect batch size due to SERVER-31992.
- const isMongos = FixtureHelpers.isMongos(db);
- if (!isMongos) {
- // Increase a field by 5 times and verify the batch size is respected.
- for (let i = 0; i < 5; i++) {
- assert.writeOK(coll.update({_id: 1}, {$inc: {x: 1}}));
- }
+ if (cmdObjSeen === sentinel) {
+ throw new Error("Mongo.prototype.runCommand() was never called: " + testFunc.toString());
+ }
- // Only watch the "update" changes of the specific doc since the beginning.
- changeStreamCursor =
- coll.watch([{$match: {documentKey: {_id: 1}, operationType: "update"}}],
- {resumeAfter: resumeToken, batchSize: 2});
-
- // Check the first batch.
- assert.eq(changeStreamCursor.objsLeftInBatch(), 2);
- // Consume the first batch.
- assert(changeStreamCursor.hasNext());
- changeStreamCursor.next();
- assert(changeStreamCursor.hasNext());
- changeStreamCursor.next();
- // Confirm that the batch is empty.
- assert.eq(changeStreamCursor.objsLeftInBatch(), 0);
-
- // Check the batch returned by getMore.
- assert(changeStreamCursor.hasNext());
- assert.eq(changeStreamCursor.objsLeftInBatch(), 2);
- changeStreamCursor.next();
- assert(changeStreamCursor.hasNext());
- changeStreamCursor.next();
- assert.eq(changeStreamCursor.objsLeftInBatch(), 0);
- // There are more changes coming, just not in the batch.
- assert(changeStreamCursor.hasNext());
+ checkFunc(cmdObjSeen);
+}
+
+jsTestLog("Testing watch() without options");
+let changeStreamCursor = coll.watch();
+
+assert(!changeStreamCursor.hasNext());
+
+// Write the first document into the collection. We will save the resume token from this change.
+assert.writeOK(coll.insert({_id: 0, x: 1}));
+let resumeToken;
+
+// Test that each of the change stream cursors picks up the change.
+assert.soon(() => changeStreamCursor.hasNext());
+let change = changeStreamCursor.next();
+assert(!changeStreamCursor.hasNext());
+let expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, x: 1},
+ ns: {db: "test", coll: coll.getName()},
+ operationType: "insert",
+};
+assert("_id" in change, "Got unexpected change: " + tojson(change));
+// Remember the _id of the first op to resume the stream.
+resumeToken = change._id;
+// Remove the fields we cannot predict, then test that the change is as expected.
+delete change._id;
+delete change.clusterTime;
+assert.docEq(change, expected);
+
+jsTestLog("Testing watch() with pipeline");
+changeStreamCursor = coll.watch([{$project: {clusterTime: 1, docId: "$documentKey._id"}}]);
+
+// Store the cluster time of the insert as the timestamp to start from.
+const resumeTime =
+ assert.commandWorked(db.runCommand({insert: coll.getName(), documents: [{_id: 1, x: 1}]}))
+ .operationTime;
+jsTestLog("Insert of document with _id 1 got operationTime " + tojson(resumeTime));
+
+const changeForInsert = checkNextChange(changeStreamCursor, {docId: 1});
+jsTestLog("Change stream event for document with _id 1 reports clusterTime " +
+ tojson(changeForInsert.clusterTime));
+
+// We expect the clusterTime returned by the change stream event and the operationTime returned
+// by the insert to be the same.
+assert.eq(changeForInsert.clusterTime, resumeTime);
+
+jsTestLog("Testing watch() with pipeline and resumeAfter");
+changeStreamCursor =
+ coll.watch([{$project: {docId: "$documentKey._id"}}], {resumeAfter: resumeToken});
+checkNextChange(changeStreamCursor, {docId: 1});
+
+jsTestLog("Testing watch() with pipeline and startAfter");
+changeStreamCursor =
+ coll.watch([{$project: {docId: "$documentKey._id"}}], {startAfter: resumeToken});
+checkNextChange(changeStreamCursor, {docId: 1});
+
+jsTestLog("Testing watch() with pipeline and startAtOperationTime");
+changeStreamCursor =
+ coll.watch([{$project: {docId: "$documentKey._id"}}], {startAtOperationTime: resumeTime});
+checkNextChange(changeStreamCursor, {docId: 1});
+
+jsTestLog("Testing watch() with updateLookup");
+changeStreamCursor = coll.watch([], {fullDocument: "updateLookup"});
+
+assert.writeOK(coll.update({_id: 0}, {$set: {x: 10}}));
+expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, x: 10},
+ ns: {db: "test", coll: coll.getName()},
+ operationType: "update",
+ updateDescription: {removedFields: [], updatedFields: {x: 10}},
+};
+checkNextChange(changeStreamCursor, expected);
+
+jsTestLog("Testing watch() with batchSize");
+// Only test mongod because mongos uses batch size 0 for aggregate commands internally to
+// establish cursors quickly. GetMore on mongos doesn't respect batch size due to SERVER-31992.
+const isMongos = FixtureHelpers.isMongos(db);
+if (!isMongos) {
+ // Increase a field by 5 times and verify the batch size is respected.
+ for (let i = 0; i < 5; i++) {
+ assert.writeOK(coll.update({_id: 1}, {$inc: {x: 1}}));
}
- jsTestLog("Testing watch() with maxAwaitTimeMS");
- changeStreamCursor = coll.watch([], {maxAwaitTimeMS: 500});
- testCommandIsCalled(() => assert(!changeStreamCursor.hasNext()), (cmdObj) => {
- assert.eq("getMore",
- Object.keys(cmdObj)[0],
- "expected getMore command, but was: " + tojson(cmdObj));
- assert(cmdObj.hasOwnProperty("maxTimeMS"), "unexpected getMore command: " + tojson(cmdObj));
- assert.eq(500, cmdObj.maxTimeMS, "unexpected getMore command: " + tojson(cmdObj));
+ // Only watch the "update" changes of the specific doc since the beginning.
+ changeStreamCursor = coll.watch([{$match: {documentKey: {_id: 1}, operationType: "update"}}],
+ {resumeAfter: resumeToken, batchSize: 2});
+
+ // Check the first batch.
+ assert.eq(changeStreamCursor.objsLeftInBatch(), 2);
+ // Consume the first batch.
+ assert(changeStreamCursor.hasNext());
+ changeStreamCursor.next();
+ assert(changeStreamCursor.hasNext());
+ changeStreamCursor.next();
+ // Confirm that the batch is empty.
+ assert.eq(changeStreamCursor.objsLeftInBatch(), 0);
+
+ // Check the batch returned by getMore.
+ assert(changeStreamCursor.hasNext());
+ assert.eq(changeStreamCursor.objsLeftInBatch(), 2);
+ changeStreamCursor.next();
+ assert(changeStreamCursor.hasNext());
+ changeStreamCursor.next();
+ assert.eq(changeStreamCursor.objsLeftInBatch(), 0);
+ // There are more changes coming, just not in the batch.
+ assert(changeStreamCursor.hasNext());
+}
+
+jsTestLog("Testing watch() with maxAwaitTimeMS");
+changeStreamCursor = coll.watch([], {maxAwaitTimeMS: 500});
+testCommandIsCalled(() => assert(!changeStreamCursor.hasNext()), (cmdObj) => {
+ assert.eq(
+ "getMore", Object.keys(cmdObj)[0], "expected getMore command, but was: " + tojson(cmdObj));
+ assert(cmdObj.hasOwnProperty("maxTimeMS"), "unexpected getMore command: " + tojson(cmdObj));
+ assert.eq(500, cmdObj.maxTimeMS, "unexpected getMore command: " + tojson(cmdObj));
+});
+
+jsTestLog("Testing the cursor gets closed when the collection gets dropped");
+changeStreamCursor = coll.watch([{$project: {clusterTime: 0}}]);
+assert.writeOK(coll.insert({_id: 2, x: 1}));
+expected = {
+ documentKey: {_id: 2},
+ fullDocument: {_id: 2, x: 1},
+ ns: {db: "test", coll: coll.getName()},
+ operationType: "insert",
+};
+checkNextChange(changeStreamCursor, expected);
+assert(!changeStreamCursor.hasNext());
+assert(!changeStreamCursor.isClosed());
+assert(!changeStreamCursor.isExhausted());
+
+// Dropping the collection should trigger a drop notification.
+assertDropCollection(db, coll.getName());
+assert.soon(() => changeStreamCursor.hasNext());
+assert(!changeStreamCursor.isExhausted());
+expected = {
+ operationType: "drop",
+ ns: {db: db.getName(), coll: coll.getName()}
+};
+checkNextChange(changeStreamCursor, expected);
+// For single collection change streams, the drop should invalidate the stream.
+const invalidateDoc = assertInvalidateOp({cursor: changeStreamCursor, opType: "drop"});
+
+if (invalidateDoc) {
+ jsTestLog("Testing using the 'startAfter' option from the invalidate entry");
+ assert.commandWorked(coll.insert({_id: "After drop"}));
+ let resumedFromInvalidate =
+ coll.watch([], {startAfter: invalidateDoc._id, collation: {locale: "simple"}});
+
+ // We should see the new insert after starting over. However, in sharded cluster
+ // passthroughs we may see more drop and invalidate notifications before we see the insert.
+ let firstChangeAfterDrop;
+ assert.soon(() => {
+ if (!resumedFromInvalidate.hasNext()) {
+ return false;
+ }
+ const next = resumedFromInvalidate.next();
+ if (next.operationType == "invalidate") {
+ // Start again!
+ resumedFromInvalidate =
+ coll.watch([], {startAfter: next._id, collation: {locale: "simple"}});
+ return false;
+ }
+ if (next.operationType == "drop") {
+ return false;
+ }
+ // THIS is the change we wanted.
+ firstChangeAfterDrop = next;
+ return true;
});
- jsTestLog("Testing the cursor gets closed when the collection gets dropped");
- changeStreamCursor = coll.watch([{$project: {clusterTime: 0}}]);
- assert.writeOK(coll.insert({_id: 2, x: 1}));
- expected = {
- documentKey: {_id: 2},
- fullDocument: {_id: 2, x: 1},
- ns: {db: "test", coll: coll.getName()},
- operationType: "insert",
- };
- checkNextChange(changeStreamCursor, expected);
- assert(!changeStreamCursor.hasNext());
- assert(!changeStreamCursor.isClosed());
- assert(!changeStreamCursor.isExhausted());
-
- // Dropping the collection should trigger a drop notification.
- assertDropCollection(db, coll.getName());
- assert.soon(() => changeStreamCursor.hasNext());
- assert(!changeStreamCursor.isExhausted());
- expected = {operationType: "drop", ns: {db: db.getName(), coll: coll.getName()}};
- checkNextChange(changeStreamCursor, expected);
- // For single collection change streams, the drop should invalidate the stream.
- const invalidateDoc = assertInvalidateOp({cursor: changeStreamCursor, opType: "drop"});
-
- if (invalidateDoc) {
- jsTestLog("Testing using the 'startAfter' option from the invalidate entry");
- assert.commandWorked(coll.insert({_id: "After drop"}));
- let resumedFromInvalidate =
- coll.watch([], {startAfter: invalidateDoc._id, collation: {locale: "simple"}});
-
- // We should see the new insert after starting over. However, in sharded cluster
- // passthroughs we may see more drop and invalidate notifications before we see the insert.
- let firstChangeAfterDrop;
- assert.soon(() => {
- if (!resumedFromInvalidate.hasNext()) {
- return false;
- }
- const next = resumedFromInvalidate.next();
- if (next.operationType == "invalidate") {
- // Start again!
- resumedFromInvalidate =
- coll.watch([], {startAfter: next._id, collation: {locale: "simple"}});
- return false;
- }
- if (next.operationType == "drop") {
- return false;
- }
- // THIS is the change we wanted.
- firstChangeAfterDrop = next;
- return true;
- });
-
- assert.eq(firstChangeAfterDrop.documentKey._id, "After drop", tojson(change));
- }
+ assert.eq(firstChangeAfterDrop.documentKey._id, "After drop", tojson(change));
+}
}());
diff --git a/jstests/change_streams/start_at_cluster_time.js b/jstests/change_streams/start_at_cluster_time.js
index 484de4c43c2..2edcb530e20 100644
--- a/jstests/change_streams/start_at_cluster_time.js
+++ b/jstests/change_streams/start_at_cluster_time.js
@@ -1,80 +1,79 @@
// Tests resuming change streams based on cluster time.
(function() {
- "use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+"use strict";
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- const coll = assertDropAndRecreateCollection(db, jsTestName());
+const coll = assertDropAndRecreateCollection(db, jsTestName());
- const testStartTime = db.runCommand({isMaster: 1}).$clusterTime.clusterTime;
+const testStartTime = db.runCommand({isMaster: 1}).$clusterTime.clusterTime;
- // Write a document to each chunk, and wait for replication.
- assert.writeOK(coll.insert({_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+// Write a document to each chunk, and wait for replication.
+assert.writeOK(coll.insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- // Perform two updates, then use a change stream to capture the cluster time of the first update
- // to be resumed from.
- const streamToFindClusterTime = coll.watch();
- assert.writeOK(coll.update({_id: -1}, {$set: {updated: true}}));
- assert.writeOK(coll.update({_id: 1}, {$set: {updated: true}}));
- assert.soon(() => streamToFindClusterTime.hasNext());
- let next = streamToFindClusterTime.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, {_id: -1});
- const timeOfFirstUpdate = next.clusterTime;
+// Perform two updates, then use a change stream to capture the cluster time of the first update
+// to be resumed from.
+const streamToFindClusterTime = coll.watch();
+assert.writeOK(coll.update({_id: -1}, {$set: {updated: true}}));
+assert.writeOK(coll.update({_id: 1}, {$set: {updated: true}}));
+assert.soon(() => streamToFindClusterTime.hasNext());
+let next = streamToFindClusterTime.next();
+assert.eq(next.operationType, "update");
+assert.eq(next.documentKey, {_id: -1});
+const timeOfFirstUpdate = next.clusterTime;
- let changeStream = coll.watch([], {startAtOperationTime: timeOfFirstUpdate});
+let changeStream = coll.watch([], {startAtOperationTime: timeOfFirstUpdate});
- // Test that starting at the cluster time is inclusive of the first update, so we should see
- // both updates in the new stream.
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update", tojson(next));
- assert.eq(next.documentKey._id, -1, tojson(next));
+// Test that starting at the cluster time is inclusive of the first update, so we should see
+// both updates in the new stream.
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "update", tojson(next));
+assert.eq(next.documentKey._id, -1, tojson(next));
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update", tojson(next));
- assert.eq(next.documentKey._id, 1, tojson(next));
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "update", tojson(next));
+assert.eq(next.documentKey._id, 1, tojson(next));
- // Test that startAtOperationTime is not allowed alongside resumeAfter.
- assert.commandFailedWithCode(db.runCommand({
- aggregate: coll.getName(),
- pipeline:
- [{$changeStream: {startAtOperationTime: timeOfFirstUpdate, resumeAfter: next._id}}],
- cursor: {}
- }),
- 40674);
+// Test that startAtOperationTime is not allowed alongside resumeAfter.
+assert.commandFailedWithCode(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$changeStream: {startAtOperationTime: timeOfFirstUpdate, resumeAfter: next._id}}],
+ cursor: {}
+}),
+ 40674);
- // Test that resuming from a time in the future will wait for that time to come.
- let resumeTimeFarFuture = db.runCommand({isMaster: 1}).$clusterTime.clusterTime;
- resumeTimeFarFuture =
- new Timestamp(resumeTimeFarFuture.getTime() + 60 * 60 * 6, 1); // 6 hours in the future
+// Test that resuming from a time in the future will wait for that time to come.
+let resumeTimeFarFuture = db.runCommand({isMaster: 1}).$clusterTime.clusterTime;
+resumeTimeFarFuture =
+ new Timestamp(resumeTimeFarFuture.getTime() + 60 * 60 * 6, 1); // 6 hours in the future
- let changeStreamFuture = coll.watch([], {startAtOperationTime: resumeTimeFarFuture});
+let changeStreamFuture = coll.watch([], {startAtOperationTime: resumeTimeFarFuture});
- // Resume the change stream from the start of the test and verify it picks up the changes to the
- // collection. Namely, it should see two inserts followed by two updates.
- changeStream = coll.watch([], {startAtOperationTime: testStartTime});
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "insert", tojson(next));
- assert.eq(next.documentKey._id, -1, tojson(next));
+// Resume the change stream from the start of the test and verify it picks up the changes to the
+// collection. Namely, it should see two inserts followed by two updates.
+changeStream = coll.watch([], {startAtOperationTime: testStartTime});
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "insert", tojson(next));
+assert.eq(next.documentKey._id, -1, tojson(next));
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "insert", tojson(next));
- assert.eq(next.documentKey._id, 1, tojson(next));
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "insert", tojson(next));
+assert.eq(next.documentKey._id, 1, tojson(next));
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update", tojson(next));
- assert.eq(next.documentKey._id, -1, tojson(next));
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "update", tojson(next));
+assert.eq(next.documentKey._id, -1, tojson(next));
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update", tojson(next));
- assert.eq(next.documentKey._id, 1, tojson(next));
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "update", tojson(next));
+assert.eq(next.documentKey._id, 1, tojson(next));
- // Verify that the change stream resumed from far into the future does not see any changes.
- assert(!changeStreamFuture.hasNext());
+// Verify that the change stream resumed from far into the future does not see any changes.
+assert(!changeStreamFuture.hasNext());
})();
diff --git a/jstests/change_streams/whitelist.js b/jstests/change_streams/whitelist.js
index 6b86604f8a8..cd5716b9073 100644
--- a/jstests/change_streams/whitelist.js
+++ b/jstests/change_streams/whitelist.js
@@ -3,31 +3,31 @@
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/aggregation/extras/utils.js'); // For assertErrorCode.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load('jstests/aggregation/extras/utils.js'); // For assertErrorCode.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- const coll = assertDropAndRecreateCollection(db, "change_stream_whitelist");
+const coll = assertDropAndRecreateCollection(db, "change_stream_whitelist");
- // Bare-bones $changeStream pipeline which will be augmented during tests.
- const changeStream = [{$changeStream: {}}];
+// Bare-bones $changeStream pipeline which will be augmented during tests.
+const changeStream = [{$changeStream: {}}];
- // List of non-$changeStream stages which are explicitly whitelisted.
- const whitelist = [
- {$match: {_id: {$exists: true}}},
- {$project: {_id: 1}},
- {$addFields: {newField: 1}},
- {$set: {newField: 1}},
- {$replaceRoot: {newRoot: {_id: "$_id"}}},
- {$replaceWith: {_id: "$_id"}},
- {$redact: "$$DESCEND"}
- ];
+// List of non-$changeStream stages which are explicitly whitelisted.
+const whitelist = [
+ {$match: {_id: {$exists: true}}},
+ {$project: {_id: 1}},
+ {$addFields: {newField: 1}},
+ {$set: {newField: 1}},
+ {$replaceRoot: {newRoot: {_id: "$_id"}}},
+ {$replaceWith: {_id: "$_id"}},
+ {$redact: "$$DESCEND"}
+];
- // List of stages which the whitelist mechanism will prevent from running in a $changeStream.
- // Does not include stages which are blacklisted but already implicitly prohibited, e.g. both
- // $currentOp and $changeStream must be the first stage in a pipeline.
- const blacklist = [
+// List of stages which the whitelist mechanism will prevent from running in a $changeStream.
+// Does not include stages which are blacklisted but already implicitly prohibited, e.g. both
+// $currentOp and $changeStream must be the first stage in a pipeline.
+const blacklist = [
{$group: {_id: "$_id"}},
{$sort: {_id: 1}},
{$skip: 100},
@@ -48,18 +48,18 @@
{$facet: {facetPipe: [{$match: {_id: {$exists: true}}}]}}
];
- // Verify that each of the whitelisted stages are permitted to run in a $changeStream.
- for (let allowedStage of whitelist) {
- assert.commandWorked(db.runCommand(
- {aggregate: coll.getName(), pipeline: changeStream.concat(allowedStage), cursor: {}}));
- }
-
- // Verify that all of the whitelisted stages are able to run in a $changeStream together.
+// Verify that each of the whitelisted stages are permitted to run in a $changeStream.
+for (let allowedStage of whitelist) {
assert.commandWorked(db.runCommand(
- {aggregate: coll.getName(), pipeline: changeStream.concat(whitelist), cursor: {}}));
+ {aggregate: coll.getName(), pipeline: changeStream.concat(allowedStage), cursor: {}}));
+}
+
+// Verify that all of the whitelisted stages are able to run in a $changeStream together.
+assert.commandWorked(db.runCommand(
+ {aggregate: coll.getName(), pipeline: changeStream.concat(whitelist), cursor: {}}));
- // Verify that a $changeStream pipeline fails to validate if a blacklisted stage is present.
- for (let bannedStage of blacklist) {
- assertErrorCode(coll, changeStream.concat(bannedStage), ErrorCodes.IllegalOperation);
- }
+// Verify that a $changeStream pipeline fails to validate if a blacklisted stage is present.
+for (let bannedStage of blacklist) {
+ assertErrorCode(coll, changeStream.concat(bannedStage), ErrorCodes.IllegalOperation);
+}
}());
diff --git a/jstests/change_streams/whole_cluster.js b/jstests/change_streams/whole_cluster.js
index b95453d12bc..7d2d3f22dbb 100644
--- a/jstests/change_streams/whole_cluster.js
+++ b/jstests/change_streams/whole_cluster.js
@@ -1,133 +1,133 @@
// Basic tests for $changeStream against all databases in the cluster.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and
- // assert[Valid|Invalid]ChangeStreamNss.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and
+ // assert[Valid|Invalid]ChangeStreamNss.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- db = db.getSiblingDB(jsTestName());
- const adminDB = db.getSiblingDB("admin");
- const otherDB = db.getSiblingDB(jsTestName() + "_other");
+db = db.getSiblingDB(jsTestName());
+const adminDB = db.getSiblingDB("admin");
+const otherDB = db.getSiblingDB(jsTestName() + "_other");
- // Drop and recreate the collections to be used in this set of tests.
- assertDropAndRecreateCollection(db, "t1");
- assertDropAndRecreateCollection(otherDB, "t2");
+// Drop and recreate the collections to be used in this set of tests.
+assertDropAndRecreateCollection(db, "t1");
+assertDropAndRecreateCollection(otherDB, "t2");
- // Test that a change stream can be opened on the admin database if {allChangesForCluster:true}
- // is specified.
- assertValidChangeStreamNss("admin", 1, {allChangesForCluster: true});
- // Test that a change stream cannot be opened on the admin database if a collection is
- // specified, even with {allChangesForCluster:true}.
- assertInvalidChangeStreamNss("admin", "testcoll", {allChangesForCluster: true});
- // Test that a change stream cannot be opened on a database other than admin if
- // {allChangesForCluster:true} is specified.
- assertInvalidChangeStreamNss(db.getName(), 1, {allChangesForCluster: true});
+// Test that a change stream can be opened on the admin database if {allChangesForCluster:true}
+// is specified.
+assertValidChangeStreamNss("admin", 1, {allChangesForCluster: true});
+// Test that a change stream cannot be opened on the admin database if a collection is
+// specified, even with {allChangesForCluster:true}.
+assertInvalidChangeStreamNss("admin", "testcoll", {allChangesForCluster: true});
+// Test that a change stream cannot be opened on a database other than admin if
+// {allChangesForCluster:true} is specified.
+assertInvalidChangeStreamNss(db.getName(), 1, {allChangesForCluster: true});
- let cst = new ChangeStreamTest(adminDB);
- let cursor = cst.startWatchingAllChangesForCluster();
+let cst = new ChangeStreamTest(adminDB);
+let cursor = cst.startWatchingAllChangesForCluster();
- // Test that if there are no changes, we return an empty batch.
- assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
+// Test that if there are no changes, we return an empty batch.
+assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
- // Test that the change stream returns an inserted doc.
- assert.writeOK(db.t1.insert({_id: 0, a: 1}));
- let expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 1},
- ns: {db: db.getName(), coll: "t1"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+// Test that the change stream returns an inserted doc.
+assert.writeOK(db.t1.insert({_id: 0, a: 1}));
+let expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 1},
+ ns: {db: db.getName(), coll: "t1"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
- // Test that the change stream returns another inserted doc in a different database.
- assert.writeOK(otherDB.t2.insert({_id: 0, a: 2}));
- expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 2},
- ns: {db: otherDB.getName(), coll: "t2"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+// Test that the change stream returns another inserted doc in a different database.
+assert.writeOK(otherDB.t2.insert({_id: 0, a: 2}));
+expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 2},
+ ns: {db: otherDB.getName(), coll: "t2"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
- // Test that the change stream returns an inserted doc on a user-created database whose name
- // includes 'admin', 'local', or 'config'.
- const validUserDBs = [
- "admin1",
- "1admin",
- "_admin_",
- "local_",
- "_local",
- "_local_",
- "config_",
- "_config",
- "_config_"
+// Test that the change stream returns an inserted doc on a user-created database whose name
+// includes 'admin', 'local', or 'config'.
+const validUserDBs = [
+ "admin1",
+ "1admin",
+ "_admin_",
+ "local_",
+ "_local",
+ "_local_",
+ "config_",
+ "_config",
+ "_config_"
+];
+validUserDBs.forEach(dbName => {
+ assert.writeOK(db.getSiblingDB(dbName).test.insert({_id: 0, a: 1}));
+ expected = [
+ {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 1},
+ ns: {db: dbName, coll: "test"},
+ operationType: "insert",
+ },
];
- validUserDBs.forEach(dbName => {
- assert.writeOK(db.getSiblingDB(dbName).test.insert({_id: 0, a: 1}));
- expected = [
- {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 1},
- ns: {db: dbName, coll: "test"},
- operationType: "insert",
- },
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
- });
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+});
- // Test that the change stream returns an inserted doc on a user-created collection whose name
- // includes "system" but is not considered an internal collection.
- const validSystemColls = ["system", "systems.views", "ssystem.views", "test.system"];
- validSystemColls.forEach(collName => {
- assert.writeOK(db.getCollection(collName).insert({_id: 0, a: 1}));
- expected = [
- {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 1},
- ns: {db: db.getName(), coll: collName},
- operationType: "insert",
- },
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
- });
+// Test that the change stream returns an inserted doc on a user-created collection whose name
+// includes "system" but is not considered an internal collection.
+const validSystemColls = ["system", "systems.views", "ssystem.views", "test.system"];
+validSystemColls.forEach(collName => {
+ assert.writeOK(db.getCollection(collName).insert({_id: 0, a: 1}));
+ expected = [
+ {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 1},
+ ns: {db: db.getName(), coll: collName},
+ operationType: "insert",
+ },
+ ];
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+});
- // Test that the change stream filters out operations on any collection in the 'admin', 'local',
- // or 'config' databases.
- const filteredDBs = ["admin", "local", "config"];
- filteredDBs.forEach(dbName => {
- // Not allowed to use 'local' db through mongos.
- if (FixtureHelpers.isMongos(db) && dbName == "local")
- return;
+// Test that the change stream filters out operations on any collection in the 'admin', 'local',
+// or 'config' databases.
+const filteredDBs = ["admin", "local", "config"];
+filteredDBs.forEach(dbName => {
+ // Not allowed to use 'local' db through mongos.
+ if (FixtureHelpers.isMongos(db) && dbName == "local")
+ return;
- assert.writeOK(db.getSiblingDB(dbName).test.insert({_id: 0, a: 1}));
- // Insert to the test collection to ensure that the change stream has something to
- // return.
- assert.writeOK(db.t1.insert({_id: dbName}));
- expected = [
- {
- documentKey: {_id: dbName},
- fullDocument: {_id: dbName},
- ns: {db: db.getName(), coll: "t1"},
- operationType: "insert",
- },
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
- // Drop the test collection to avoid duplicate key errors if this test is run multiple
- // times.
- assertDropCollection(db.getSiblingDB(dbName), "test");
- });
+ assert.writeOK(db.getSiblingDB(dbName).test.insert({_id: 0, a: 1}));
+ // Insert to the test collection to ensure that the change stream has something to
+ // return.
+ assert.writeOK(db.t1.insert({_id: dbName}));
+ expected = [
+ {
+ documentKey: {_id: dbName},
+ fullDocument: {_id: dbName},
+ ns: {db: db.getName(), coll: "t1"},
+ operationType: "insert",
+ },
+ ];
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+ // Drop the test collection to avoid duplicate key errors if this test is run multiple
+ // times.
+ assertDropCollection(db.getSiblingDB(dbName), "test");
+});
- // Dropping a database should generate drop entries for each collection followed by a database
- // drop.
- assert.commandWorked(otherDB.dropDatabase());
- cst.assertDatabaseDrop({cursor: cursor, db: otherDB});
+// Dropping a database should generate drop entries for each collection followed by a database
+// drop.
+assert.commandWorked(otherDB.dropDatabase());
+cst.assertDatabaseDrop({cursor: cursor, db: otherDB});
- // Drop the remaining databases and clean up the test.
- assert.commandWorked(db.dropDatabase());
- validUserDBs.forEach(dbName => {
- db.getSiblingDB(dbName).dropDatabase();
- });
- cst.cleanUp();
+// Drop the remaining databases and clean up the test.
+assert.commandWorked(db.dropDatabase());
+validUserDBs.forEach(dbName => {
+ db.getSiblingDB(dbName).dropDatabase();
+});
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/whole_cluster_metadata_notifications.js b/jstests/change_streams/whole_cluster_metadata_notifications.js
index ec7da470842..9a9d8c6efd5 100644
--- a/jstests/change_streams/whole_cluster_metadata_notifications.js
+++ b/jstests/change_streams/whole_cluster_metadata_notifications.js
@@ -1,280 +1,276 @@
// Tests of metadata notifications for a $changeStream on a whole cluster.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load('jstests/replsets/libs/two_phase_drops.js'); // For 'TwoPhaseDropCollectionTest'.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load('jstests/replsets/libs/two_phase_drops.js'); // For 'TwoPhaseDropCollectionTest'.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- // Define two databases. We will conduct our tests by creating one collection in each.
- const testDB1 = db.getSiblingDB(jsTestName()),
- testDB2 = db.getSiblingDB(jsTestName() + "_other");
- const adminDB = db.getSiblingDB("admin");
+// Define two databases. We will conduct our tests by creating one collection in each.
+const testDB1 = db.getSiblingDB(jsTestName()), testDB2 = db.getSiblingDB(jsTestName() + "_other");
+const adminDB = db.getSiblingDB("admin");
- assert.commandWorked(testDB1.dropDatabase());
- assert.commandWorked(testDB2.dropDatabase());
+assert.commandWorked(testDB1.dropDatabase());
+assert.commandWorked(testDB2.dropDatabase());
- // Create one collection on each database.
- let [db1Coll, db2Coll] =
- [testDB1, testDB2].map((testDB) => assertDropAndRecreateCollection(testDB, "test"));
+// Create one collection on each database.
+let [db1Coll, db2Coll] =
+ [testDB1, testDB2].map((testDB) => assertDropAndRecreateCollection(testDB, "test"));
- // Create a ChangeStreamTest on the 'admin' db. Cluster-wide change streams can only be opened
- // on admin.
- let cst = new ChangeStreamTest(adminDB);
- let aggCursor = cst.startWatchingAllChangesForCluster();
+// Create a ChangeStreamTest on the 'admin' db. Cluster-wide change streams can only be opened
+// on admin.
+let cst = new ChangeStreamTest(adminDB);
+let aggCursor = cst.startWatchingAllChangesForCluster();
- // Generate oplog entries of type insert, update, and delete across both databases.
- for (let coll of[db1Coll, db2Coll]) {
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
- assert.writeOK(coll.remove({_id: 1}));
- }
+// Generate oplog entries of type insert, update, and delete across both databases.
+for (let coll of [db1Coll, db2Coll]) {
+ assert.writeOK(coll.insert({_id: 1}));
+ assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
+ assert.writeOK(coll.remove({_id: 1}));
+}
- // Drop the second database, which should generate a 'drop' entry for the collection followed
- // by a 'dropDatabase' entry.
- assert.commandWorked(testDB2.dropDatabase());
+// Drop the second database, which should generate a 'drop' entry for the collection followed
+// by a 'dropDatabase' entry.
+assert.commandWorked(testDB2.dropDatabase());
- // We should get 6 oplog entries; three ops of type insert, update, delete from each database.
- for (let expectedDB of[testDB1, testDB2]) {
- let change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.ns.db, expectedDB.getName(), tojson(change));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "update", tojson(change));
- assert.eq(change.ns.db, expectedDB.getName(), tojson(change));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "delete", tojson(change));
- assert.eq(change.ns.db, expectedDB.getName(), tojson(change));
- }
- cst.assertDatabaseDrop({cursor: aggCursor, db: testDB2});
+// We should get 6 oplog entries; three ops of type insert, update, delete from each database.
+for (let expectedDB of [testDB1, testDB2]) {
+ let change = cst.getOneChange(aggCursor);
+ assert.eq(change.operationType, "insert", tojson(change));
+ assert.eq(change.ns.db, expectedDB.getName(), tojson(change));
+ change = cst.getOneChange(aggCursor);
+ assert.eq(change.operationType, "update", tojson(change));
+ assert.eq(change.ns.db, expectedDB.getName(), tojson(change));
+ change = cst.getOneChange(aggCursor);
+ assert.eq(change.operationType, "delete", tojson(change));
+ assert.eq(change.ns.db, expectedDB.getName(), tojson(change));
+}
+cst.assertDatabaseDrop({cursor: aggCursor, db: testDB2});
- // Test that a cluster-wide change stream can be resumed using a token from a collection which
- // has been dropped.
- db1Coll = assertDropAndRecreateCollection(testDB1, db1Coll.getName());
+// Test that a cluster-wide change stream can be resumed using a token from a collection which
+// has been dropped.
+db1Coll = assertDropAndRecreateCollection(testDB1, db1Coll.getName());
- // Get a valid resume token that the next change stream can use.
- aggCursor = cst.startWatchingAllChangesForCluster();
+// Get a valid resume token that the next change stream can use.
+aggCursor = cst.startWatchingAllChangesForCluster();
- assert.writeOK(db1Coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(db1Coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- let change = cst.getOneChange(aggCursor, false);
- const resumeToken = change._id;
-
- // For cluster-wide streams, it is possible to resume at a point before a collection is dropped,
- // even if the "drop" notification has not been received on the original stream yet.
- assertDropCollection(db1Coll, db1Coll.getName());
- // Wait for two-phase drop to complete, so that the UUID no longer exists.
- assert.soon(function() {
- return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(testDB1,
- db1Coll.getName());
- });
- assert.commandWorked(adminDB.runCommand({
- aggregate: 1,
- pipeline: [{$changeStream: {resumeAfter: resumeToken, allChangesForCluster: true}}],
- cursor: {}
- }));
-
- // Test that collection drops from any database result in "drop" notifications for the stream.
- [db1Coll, db2Coll] =
- [testDB1, testDB2].map((testDB) => assertDropAndRecreateCollection(testDB, "test"));
- let _idForTest = 0;
- for (let collToInvalidate of[db1Coll, db2Coll]) {
- // Start watching all changes in the cluster.
- aggCursor = cst.startWatchingAllChangesForCluster();
-
- let testDB = collToInvalidate.getDB();
-
- // Insert into the collections on both databases, and verify the change stream is able to
- // pick them up.
- for (let collToWrite of[db1Coll, db2Coll]) {
- assert.writeOK(collToWrite.insert({_id: _idForTest}));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.documentKey._id, _idForTest);
- assert.eq(change.ns.db, collToWrite.getDB().getName());
- _idForTest++;
- }
-
- // Renaming the collection should generate a 'rename' notification. Skip this test when
- // running on a sharded collection, since these cannot be renamed.
- if (!FixtureHelpers.isSharded(collToInvalidate)) {
- assertDropAndRecreateCollection(testDB, collToInvalidate.getName());
- const collName = collToInvalidate.getName();
-
- // Start watching all changes in the cluster.
- aggCursor = cst.startWatchingAllChangesForCluster();
- assert.writeOK(collToInvalidate.renameCollection("renamed_coll"));
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [
- {
- operationType: "rename",
- ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
- to: {db: testDB.getName(), coll: "renamed_coll"}
- },
- ]
- });
+let change = cst.getOneChange(aggCursor, false);
+const resumeToken = change._id;
- // Repeat the test, this time using the 'dropTarget' option with an existing target
- // collection.
- collToInvalidate = testDB.getCollection("renamed_coll");
- assertDropAndRecreateCollection(testDB, collName);
- assert.writeOK(testDB[collName].insert({_id: 0}));
- assert.writeOK(collToInvalidate.renameCollection(collName, true /* dropTarget */));
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [
- {
- operationType: "insert",
- ns: {db: testDB.getName(), coll: collName},
- documentKey: {_id: 0},
- fullDocument: {_id: 0}
- },
- {
- operationType: "rename",
- ns: {db: testDB.getName(), coll: "renamed_coll"},
- to: {db: testDB.getName(), coll: collName}
- }
- ]
- });
+// For cluster-wide streams, it is possible to resume at a point before a collection is dropped,
+// even if the "drop" notification has not been received on the original stream yet.
+assertDropCollection(db1Coll, db1Coll.getName());
+// Wait for two-phase drop to complete, so that the UUID no longer exists.
+assert.soon(function() {
+ return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(testDB1,
+ db1Coll.getName());
+});
+assert.commandWorked(adminDB.runCommand({
+ aggregate: 1,
+ pipeline: [{$changeStream: {resumeAfter: resumeToken, allChangesForCluster: true}}],
+ cursor: {}
+}));
- collToInvalidate = testDB[collName];
+// Test that collection drops from any database result in "drop" notifications for the stream.
+[db1Coll, db2Coll] =
+ [testDB1, testDB2].map((testDB) => assertDropAndRecreateCollection(testDB, "test"));
+let _idForTest = 0;
+for (let collToInvalidate of [db1Coll, db2Coll]) {
+ // Start watching all changes in the cluster.
+ aggCursor = cst.startWatchingAllChangesForCluster();
- // Test renaming a collection to a different database. Do not run this in the mongos
- // passthrough suites since we cannot guarantee the primary shard of the target database
- // and renameCollection requires the source and destination to be on the same shard.
- if (!FixtureHelpers.isMongos(testDB)) {
- const otherDB = testDB.getSiblingDB(testDB.getName() + "_rename_target");
- // Ensure the target database exists.
- const collOtherDB = assertDropAndRecreateCollection(otherDB, "test");
- assertDropCollection(otherDB, collOtherDB.getName());
- aggCursor = cst.startWatchingAllChangesForCluster();
- assert.commandWorked(testDB.adminCommand({
- renameCollection: collToInvalidate.getFullName(),
- to: collOtherDB.getFullName()
- }));
- // Do not check the 'ns' field since it will contain the namespace of the temp
- // collection created when renaming a collection across databases.
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "rename", tojson(change));
- assert.eq(change.to,
- {db: otherDB.getName(), coll: collOtherDB.getName()},
- tojson(change));
- // Rename across databases also drops the source collection after the collection is
- // copied over.
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [{
- operationType: "drop",
- ns: {db: testDB.getName(), coll: collToInvalidate.getName()}
- }]
- });
- }
+ let testDB = collToInvalidate.getDB();
- // Test the behavior of a change stream watching the target collection of a $out
- // aggregation stage.
- collToInvalidate.aggregate([{$out: "renamed_coll"}]);
- // Do not check the 'ns' field since it will contain the namespace of the temp
- // collection created by the $out stage, before renaming to 'renamed_coll'.
- const rename = cst.getOneChange(aggCursor);
- assert.eq(rename.operationType, "rename", tojson(rename));
- assert.eq(rename.to, {db: testDB.getName(), coll: "renamed_coll"}, tojson(rename));
+ // Insert into the collections on both databases, and verify the change stream is able to
+ // pick them up.
+ for (let collToWrite of [db1Coll, db2Coll]) {
+ assert.writeOK(collToWrite.insert({_id: _idForTest}));
+ change = cst.getOneChange(aggCursor);
+ assert.eq(change.operationType, "insert", tojson(change));
+ assert.eq(change.documentKey._id, _idForTest);
+ assert.eq(change.ns.db, collToWrite.getDB().getName());
+ _idForTest++;
+ }
- // The change stream should not be invalidated by the rename(s).
- assert.eq(0, cst.getNextBatch(aggCursor).nextBatch.length);
- assert.writeOK(collToInvalidate.insert({_id: 2}));
- assert.eq(cst.getOneChange(aggCursor).operationType, "insert");
+ // Renaming the collection should generate a 'rename' notification. Skip this test when
+ // running on a sharded collection, since these cannot be renamed.
+ if (!FixtureHelpers.isSharded(collToInvalidate)) {
+ assertDropAndRecreateCollection(testDB, collToInvalidate.getName());
+ const collName = collToInvalidate.getName();
- // Test that renaming a "system" collection to a user collection *does* return a rename
- // notification.
- assert.commandWorked(testDB.runCommand(
- {create: "view1", viewOn: collToInvalidate.getName(), pipeline: []}));
- assert.writeOK(testDB.system.views.renameCollection("non_system_collection"));
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [{
+ // Start watching all changes in the cluster.
+ aggCursor = cst.startWatchingAllChangesForCluster();
+ assert.writeOK(collToInvalidate.renameCollection("renamed_coll"));
+ cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [
+ {
operationType: "rename",
- ns: {db: testDB.getName(), coll: "system.views"},
- to: {db: testDB.getName(), coll: "non_system_collection"}
- }],
- });
-
- // Test that renaming a "system" collection to a different "system" collection does not
- // result in a notification in the change stream.
- aggCursor = cst.startWatchingAllChangesForCluster();
- assert.commandWorked(testDB.runCommand(
- {create: "view1", viewOn: collToInvalidate.getName(), pipeline: []}));
- // Note that the target of the rename must be a valid "system" collection.
- assert.writeOK(testDB.system.views.renameCollection("system.users"));
- // Verify that the change stream filters out the rename above, instead returning the
- // next insert to the test collection.
- assert.writeOK(collToInvalidate.insert({_id: 1}));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.ns, {db: testDB.getName(), coll: collToInvalidate.getName()});
+ ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
+ to: {db: testDB.getName(), coll: "renamed_coll"}
+ },
+ ]
+ });
- // Test that renaming a user collection to a "system" collection *does* return a rename
- // notification.
- assert.writeOK(collToInvalidate.renameCollection("system.views"));
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [{
+ // Repeat the test, this time using the 'dropTarget' option with an existing target
+ // collection.
+ collToInvalidate = testDB.getCollection("renamed_coll");
+ assertDropAndRecreateCollection(testDB, collName);
+ assert.writeOK(testDB[collName].insert({_id: 0}));
+ assert.writeOK(collToInvalidate.renameCollection(collName, true /* dropTarget */));
+ cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [
+ {
+ operationType: "insert",
+ ns: {db: testDB.getName(), coll: collName},
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0}
+ },
+ {
operationType: "rename",
- ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
- to: {db: testDB.getName(), coll: "system.views"}
- }],
- });
+ ns: {db: testDB.getName(), coll: "renamed_coll"},
+ to: {db: testDB.getName(), coll: collName}
+ }
+ ]
+ });
- // Drop the "system.views" collection to avoid view catalog errors in subsequent tests.
- assertDropCollection(testDB, "system.views");
+ collToInvalidate = testDB[collName];
- // Recreate the test collection for the remainder of the test.
- assert.writeOK(collToInvalidate.insert({_id: 0}));
+ // Test renaming a collection to a different database. Do not run this in the mongos
+ // passthrough suites since we cannot guarantee the primary shard of the target database
+ // and renameCollection requires the source and destination to be on the same shard.
+ if (!FixtureHelpers.isMongos(testDB)) {
+ const otherDB = testDB.getSiblingDB(testDB.getName() + "_rename_target");
+ // Ensure the target database exists.
+ const collOtherDB = assertDropAndRecreateCollection(otherDB, "test");
+ assertDropCollection(otherDB, collOtherDB.getName());
+ aggCursor = cst.startWatchingAllChangesForCluster();
+ assert.commandWorked(testDB.adminCommand(
+ {renameCollection: collToInvalidate.getFullName(), to: collOtherDB.getFullName()}));
+ // Do not check the 'ns' field since it will contain the namespace of the temp
+ // collection created when renaming a collection across databases.
+ change = cst.getOneChange(aggCursor);
+ assert.eq(change.operationType, "rename", tojson(change));
+ assert.eq(
+ change.to, {db: otherDB.getName(), coll: collOtherDB.getName()}, tojson(change));
+ // Rename across databases also drops the source collection after the collection is
+ // copied over.
cst.assertNextChangesEqual({
cursor: aggCursor,
expectedChanges: [{
- operationType: "insert",
- ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
- documentKey: {_id: 0},
- fullDocument: {_id: 0}
+ operationType: "drop",
+ ns: {db: testDB.getName(), coll: collToInvalidate.getName()}
}]
});
}
- // Dropping a collection should generate a 'drop' entry.
- assertDropCollection(testDB, collToInvalidate.getName());
- // Insert to the test collection to queue up another change after the drop. This is needed
- // since the number of 'drop' notifications is not deterministic in the sharded passthrough
- // suites.
- assert.writeOK(collToInvalidate.insert({_id: 0}));
- cst.consumeDropUpTo({
+ // Test the behavior of a change stream watching the target collection of a $out
+ // aggregation stage.
+ collToInvalidate.aggregate([{$out: "renamed_coll"}]);
+ // Do not check the 'ns' field since it will contain the namespace of the temp
+ // collection created by the $out stage, before renaming to 'renamed_coll'.
+ const rename = cst.getOneChange(aggCursor);
+ assert.eq(rename.operationType, "rename", tojson(rename));
+ assert.eq(rename.to, {db: testDB.getName(), coll: "renamed_coll"}, tojson(rename));
+
+ // The change stream should not be invalidated by the rename(s).
+ assert.eq(0, cst.getNextBatch(aggCursor).nextBatch.length);
+ assert.writeOK(collToInvalidate.insert({_id: 2}));
+ assert.eq(cst.getOneChange(aggCursor).operationType, "insert");
+
+ // Test that renaming a "system" collection to a user collection *does* return a rename
+ // notification.
+ assert.commandWorked(
+ testDB.runCommand({create: "view1", viewOn: collToInvalidate.getName(), pipeline: []}));
+ assert.writeOK(testDB.system.views.renameCollection("non_system_collection"));
+ cst.assertNextChangesEqual({
cursor: aggCursor,
- dropType: "drop",
- expectedNext: {
- documentKey: {_id: 0},
- fullDocument: {_id: 0},
- ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
- operationType: "insert",
- },
+ expectedChanges: [{
+ operationType: "rename",
+ ns: {db: testDB.getName(), coll: "system.views"},
+ to: {db: testDB.getName(), coll: "non_system_collection"}
+ }],
});
- // Operations on internal "system" collections should be filtered out and not included in
- // the change stream.
+ // Test that renaming a "system" collection to a different "system" collection does not
+ // result in a notification in the change stream.
aggCursor = cst.startWatchingAllChangesForCluster();
- // Creating a view will generate an insert entry on the "system.views" collection.
assert.commandWorked(
testDB.runCommand({create: "view1", viewOn: collToInvalidate.getName(), pipeline: []}));
- // Drop the "system.views" collection.
- assertDropCollection(testDB, "system.views");
- // Verify that the change stream does not report the insertion into "system.views", and is
- // not invalidated by dropping the system collection. Instead, it correctly reports the next
- // write to the test collection.
+ // Note that the target of the rename must be a valid "system" collection.
+ assert.writeOK(testDB.system.views.renameCollection("system.users"));
+ // Verify that the change stream filters out the rename above, instead returning the
+ // next insert to the test collection.
assert.writeOK(collToInvalidate.insert({_id: 1}));
change = cst.getOneChange(aggCursor);
assert.eq(change.operationType, "insert", tojson(change));
assert.eq(change.ns, {db: testDB.getName(), coll: collToInvalidate.getName()});
+
+ // Test that renaming a user collection to a "system" collection *does* return a rename
+ // notification.
+ assert.writeOK(collToInvalidate.renameCollection("system.views"));
+ cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [{
+ operationType: "rename",
+ ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
+ to: {db: testDB.getName(), coll: "system.views"}
+ }],
+ });
+
+ // Drop the "system.views" collection to avoid view catalog errors in subsequent tests.
+ assertDropCollection(testDB, "system.views");
+
+ // Recreate the test collection for the remainder of the test.
+ assert.writeOK(collToInvalidate.insert({_id: 0}));
+ cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [{
+ operationType: "insert",
+ ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0}
+ }]
+ });
}
- cst.cleanUp();
+ // Dropping a collection should generate a 'drop' entry.
+ assertDropCollection(testDB, collToInvalidate.getName());
+ // Insert to the test collection to queue up another change after the drop. This is needed
+ // since the number of 'drop' notifications is not deterministic in the sharded passthrough
+ // suites.
+ assert.writeOK(collToInvalidate.insert({_id: 0}));
+ cst.consumeDropUpTo({
+ cursor: aggCursor,
+ dropType: "drop",
+ expectedNext: {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0},
+ ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
+ operationType: "insert",
+ },
+ });
+
+ // Operations on internal "system" collections should be filtered out and not included in
+ // the change stream.
+ aggCursor = cst.startWatchingAllChangesForCluster();
+ // Creating a view will generate an insert entry on the "system.views" collection.
+ assert.commandWorked(
+ testDB.runCommand({create: "view1", viewOn: collToInvalidate.getName(), pipeline: []}));
+ // Drop the "system.views" collection.
+ assertDropCollection(testDB, "system.views");
+ // Verify that the change stream does not report the insertion into "system.views", and is
+ // not invalidated by dropping the system collection. Instead, it correctly reports the next
+ // write to the test collection.
+ assert.writeOK(collToInvalidate.insert({_id: 1}));
+ change = cst.getOneChange(aggCursor);
+ assert.eq(change.operationType, "insert", tojson(change));
+ assert.eq(change.ns, {db: testDB.getName(), coll: collToInvalidate.getName()});
+}
+
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/whole_cluster_resumability.js b/jstests/change_streams/whole_cluster_resumability.js
index 4a907315fd5..270f6c465db 100644
--- a/jstests/change_streams/whole_cluster_resumability.js
+++ b/jstests/change_streams/whole_cluster_resumability.js
@@ -1,169 +1,167 @@
// Basic tests for resuming a $changeStream that is open against all databases in a cluster.
(function() {
- "use strict";
+"use strict";
+
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+
+// Create two databases, with one collection in each.
+const testDBs = [db.getSiblingDB(jsTestName()), db.getSiblingDB(jsTestName() + "_other")];
+let [db1Coll, db2Coll] = testDBs.map((db) => assertDropAndRecreateCollection(db, "test"));
+const adminDB = db.getSiblingDB("admin");
+
+let cst = new ChangeStreamTest(adminDB);
+let resumeCursor = cst.startWatchingAllChangesForCluster();
+
+// Insert a document in the first database and save the resulting change stream.
+assert.writeOK(db1Coll.insert({_id: 1}));
+const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
+
+// Test resume after the first insert.
+resumeCursor = cst.startWatchingChanges({
+ pipeline:
+ [{$changeStream: {resumeAfter: firstInsertChangeDoc._id, allChangesForCluster: true}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+
+// Write the next document into the second database.
+assert.writeOK(db2Coll.insert({_id: 2}));
+const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
+
+// Write the third document into the first database again.
+assert.writeOK(db1Coll.insert({_id: 3}));
+const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
+
+// Test resuming after the first insert again.
+resumeCursor = cst.startWatchingChanges({
+ pipeline:
+ [{$changeStream: {resumeAfter: firstInsertChangeDoc._id, allChangesForCluster: true}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+assert.docEq(cst.getOneChange(resumeCursor), secondInsertChangeDoc);
+assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+
+// Test resume after second insert.
+resumeCursor = cst.startWatchingChanges({
+ pipeline:
+ [{$changeStream: {resumeAfter: secondInsertChangeDoc._id, allChangesForCluster: true}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+
+// Rename the collection and obtain a resume token from the 'rename' notification. Skip this
+// test when running on a sharded collection, since these cannot be renamed.
+if (!FixtureHelpers.isSharded(db1Coll)) {
+ assertDropAndRecreateCollection(db1Coll.getDB(), db1Coll.getName());
+ const renameColl = db1Coll.getDB().getCollection("rename_coll");
+ assertDropCollection(renameColl.getDB(), renameColl.getName());
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-
- // Create two databases, with one collection in each.
- const testDBs = [db.getSiblingDB(jsTestName()), db.getSiblingDB(jsTestName() + "_other")];
- let [db1Coll, db2Coll] = testDBs.map((db) => assertDropAndRecreateCollection(db, "test"));
- const adminDB = db.getSiblingDB("admin");
-
- let cst = new ChangeStreamTest(adminDB);
- let resumeCursor = cst.startWatchingAllChangesForCluster();
-
- // Insert a document in the first database and save the resulting change stream.
- assert.writeOK(db1Coll.insert({_id: 1}));
- const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
-
- // Test resume after the first insert.
resumeCursor = cst.startWatchingChanges({
- pipeline:
- [{$changeStream: {resumeAfter: firstInsertChangeDoc._id, allChangesForCluster: true}}],
collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
+ pipeline: [{$changeStream: {allChangesForCluster: true}}],
+ aggregateOptions: {cursor: {batchSize: 0}}
});
+ assert.writeOK(db1Coll.renameCollection(renameColl.getName()));
- // Write the next document into the second database.
- assert.writeOK(db2Coll.insert({_id: 2}));
- const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
-
- // Write the third document into the first database again.
- assert.writeOK(db1Coll.insert({_id: 3}));
- const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
-
- // Test resuming after the first insert again.
- resumeCursor = cst.startWatchingChanges({
- pipeline:
- [{$changeStream: {resumeAfter: firstInsertChangeDoc._id, allChangesForCluster: true}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
+ const renameChanges = cst.assertNextChangesEqual({
+ cursor: resumeCursor,
+ expectedChanges: [
+ {
+ operationType: "rename",
+ ns: {db: db1Coll.getDB().getName(), coll: db1Coll.getName()},
+ to: {db: renameColl.getDB().getName(), coll: renameColl.getName()}
+ },
+ ]
});
- assert.docEq(cst.getOneChange(resumeCursor), secondInsertChangeDoc);
- assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+ const resumeTokenRename = renameChanges[0]._id;
- // Test resume after second insert.
- resumeCursor = cst.startWatchingChanges({
- pipeline:
- [{$changeStream: {resumeAfter: secondInsertChangeDoc._id, allChangesForCluster: true}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
- assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
-
- // Rename the collection and obtain a resume token from the 'rename' notification. Skip this
- // test when running on a sharded collection, since these cannot be renamed.
- if (!FixtureHelpers.isSharded(db1Coll)) {
- assertDropAndRecreateCollection(db1Coll.getDB(), db1Coll.getName());
- const renameColl = db1Coll.getDB().getCollection("rename_coll");
- assertDropCollection(renameColl.getDB(), renameColl.getName());
-
- resumeCursor = cst.startWatchingChanges({
- collection: 1,
- pipeline: [{$changeStream: {allChangesForCluster: true}}],
- aggregateOptions: {cursor: {batchSize: 0}}
- });
- assert.writeOK(db1Coll.renameCollection(renameColl.getName()));
-
- const renameChanges = cst.assertNextChangesEqual({
- cursor: resumeCursor,
- expectedChanges: [
- {
- operationType: "rename",
- ns: {db: db1Coll.getDB().getName(), coll: db1Coll.getName()},
- to: {db: renameColl.getDB().getName(), coll: renameColl.getName()}
- },
- ]
- });
- const resumeTokenRename = renameChanges[0]._id;
-
- // Insert into the renamed collection.
- assert.writeOK(renameColl.insert({_id: "after rename"}));
-
- // Resume from the rename notification using 'resumeAfter' and verify that the change stream
- // returns the next insert.
- let expectedInsert = {
- operationType: "insert",
- ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
- fullDocument: {_id: "after rename"},
- documentKey: {_id: "after rename"}
- };
- resumeCursor = cst.startWatchingChanges({
- collection: 1,
- pipeline:
- [{$changeStream: {resumeAfter: resumeTokenRename, allChangesForCluster: true}}],
- aggregateOptions: {cursor: {batchSize: 0}}
- });
- cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
-
- // Resume from the rename notification using 'startAfter' and verify that the change stream
- // returns the next insert.
- expectedInsert = {
- operationType: "insert",
- ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
- fullDocument: {_id: "after rename"},
- documentKey: {_id: "after rename"}
- };
- resumeCursor = cst.startWatchingChanges({
- collection: 1,
- pipeline:
- [{$changeStream: {startAfter: resumeTokenRename, allChangesForCluster: true}}],
- aggregateOptions: {cursor: {batchSize: 0}}
- });
- cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
-
- // Rename back to the original collection for reliability of the collection drops when
- // dropping the database.
- assert.writeOK(renameColl.renameCollection(db1Coll.getName()));
- }
-
- // Dropping a database should generate a 'drop' notification for the collection followed by a
- // 'dropDatabase' notification.
- resumeCursor = cst.startWatchingAllChangesForCluster();
- assert.commandWorked(testDBs[0].dropDatabase());
- const dropDbChanges = cst.assertDatabaseDrop({cursor: resumeCursor, db: testDBs[0]});
- const resumeTokenDbDrop = dropDbChanges[dropDbChanges.length - 1]._id;
-
- // Recreate the collection and insert a document.
- assert.writeOK(db1Coll.insert({_id: "after recreate"}));
+ // Insert into the renamed collection.
+ assert.writeOK(renameColl.insert({_id: "after rename"}));
+ // Resume from the rename notification using 'resumeAfter' and verify that the change stream
+ // returns the next insert.
let expectedInsert = {
operationType: "insert",
- ns: {db: testDBs[0].getName(), coll: db1Coll.getName()},
- fullDocument: {_id: "after recreate"},
- documentKey: {_id: "after recreate"}
+ ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
+ fullDocument: {_id: "after rename"},
+ documentKey: {_id: "after rename"}
};
-
- // Resume from the database drop using 'resumeAfter', and verify the change stream picks up
- // the insert.
resumeCursor = cst.startWatchingChanges({
collection: 1,
- pipeline: [{$changeStream: {resumeAfter: resumeTokenDbDrop, allChangesForCluster: true}}],
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenRename, allChangesForCluster: true}}],
aggregateOptions: {cursor: {batchSize: 0}}
});
- cst.consumeDropUpTo({
- cursor: resumeCursor,
- dropType: "dropDatabase",
- expectedNext: expectedInsert,
- });
+ cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
- // Resume from the database drop using 'startAfter', and verify the change stream picks up the
- // insert.
+ // Resume from the rename notification using 'startAfter' and verify that the change stream
+ // returns the next insert.
+ expectedInsert = {
+ operationType: "insert",
+ ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
+ fullDocument: {_id: "after rename"},
+ documentKey: {_id: "after rename"}
+ };
resumeCursor = cst.startWatchingChanges({
collection: 1,
- pipeline: [{$changeStream: {startAfter: resumeTokenDbDrop, allChangesForCluster: true}}],
+ pipeline: [{$changeStream: {startAfter: resumeTokenRename, allChangesForCluster: true}}],
aggregateOptions: {cursor: {batchSize: 0}}
});
- cst.consumeDropUpTo({
- cursor: resumeCursor,
- dropType: "dropDatabase",
- expectedNext: expectedInsert,
- });
-
- cst.cleanUp();
+ cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
+
+ // Rename back to the original collection for reliability of the collection drops when
+ // dropping the database.
+ assert.writeOK(renameColl.renameCollection(db1Coll.getName()));
+}
+
+// Dropping a database should generate a 'drop' notification for the collection followed by a
+// 'dropDatabase' notification.
+resumeCursor = cst.startWatchingAllChangesForCluster();
+assert.commandWorked(testDBs[0].dropDatabase());
+const dropDbChanges = cst.assertDatabaseDrop({cursor: resumeCursor, db: testDBs[0]});
+const resumeTokenDbDrop = dropDbChanges[dropDbChanges.length - 1]._id;
+
+// Recreate the collection and insert a document.
+assert.writeOK(db1Coll.insert({_id: "after recreate"}));
+
+let expectedInsert = {
+ operationType: "insert",
+ ns: {db: testDBs[0].getName(), coll: db1Coll.getName()},
+ fullDocument: {_id: "after recreate"},
+ documentKey: {_id: "after recreate"}
+};
+
+// Resume from the database drop using 'resumeAfter', and verify the change stream picks up
+// the insert.
+resumeCursor = cst.startWatchingChanges({
+ collection: 1,
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenDbDrop, allChangesForCluster: true}}],
+ aggregateOptions: {cursor: {batchSize: 0}}
+});
+cst.consumeDropUpTo({
+ cursor: resumeCursor,
+ dropType: "dropDatabase",
+ expectedNext: expectedInsert,
+});
+
+// Resume from the database drop using 'startAfter', and verify the change stream picks up the
+// insert.
+resumeCursor = cst.startWatchingChanges({
+ collection: 1,
+ pipeline: [{$changeStream: {startAfter: resumeTokenDbDrop, allChangesForCluster: true}}],
+ aggregateOptions: {cursor: {batchSize: 0}}
+});
+cst.consumeDropUpTo({
+ cursor: resumeCursor,
+ dropType: "dropDatabase",
+ expectedNext: expectedInsert,
+});
+
+cst.cleanUp();
})();
diff --git a/jstests/change_streams/whole_db.js b/jstests/change_streams/whole_db.js
index e05fe809636..aaa6fd0a29f 100644
--- a/jstests/change_streams/whole_db.js
+++ b/jstests/change_streams/whole_db.js
@@ -3,84 +3,84 @@
// invalidated by a database drop.
// @tags: [do_not_run_in_whole_cluster_passthrough]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and
- // assert[Valid|Invalid]ChangeStreamNss.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and
+ // assert[Valid|Invalid]ChangeStreamNss.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- db = db.getSiblingDB(jsTestName());
- assert.commandWorked(db.dropDatabase());
+db = db.getSiblingDB(jsTestName());
+assert.commandWorked(db.dropDatabase());
- // Test that a single-database change stream cannot be opened on "admin", "config", or "local".
- assertInvalidChangeStreamNss("admin", 1);
- assertInvalidChangeStreamNss("config", 1);
- if (!FixtureHelpers.isMongos(db)) {
- assertInvalidChangeStreamNss("local", 1);
- }
+// Test that a single-database change stream cannot be opened on "admin", "config", or "local".
+assertInvalidChangeStreamNss("admin", 1);
+assertInvalidChangeStreamNss("config", 1);
+if (!FixtureHelpers.isMongos(db)) {
+ assertInvalidChangeStreamNss("local", 1);
+}
- let cst = new ChangeStreamTest(db);
- let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+let cst = new ChangeStreamTest(db);
+let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
- // Test that if there are no changes, we return an empty batch.
- assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
+// Test that if there are no changes, we return an empty batch.
+assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
- // Test that the change stream returns an inserted doc.
- assert.writeOK(db.t1.insert({_id: 0, a: 1}));
- let expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 1},
- ns: {db: db.getName(), coll: "t1"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+// Test that the change stream returns an inserted doc.
+assert.writeOK(db.t1.insert({_id: 0, a: 1}));
+let expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 1},
+ ns: {db: db.getName(), coll: "t1"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
- // Test that the change stream returns another inserted doc in a different collection but still
- // in the target db.
- assert.writeOK(db.t2.insert({_id: 0, a: 2}));
- expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 2},
- ns: {db: db.getName(), coll: "t2"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+// Test that the change stream returns another inserted doc in a different collection but still
+// in the target db.
+assert.writeOK(db.t2.insert({_id: 0, a: 2}));
+expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 2},
+ ns: {db: db.getName(), coll: "t2"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
- // Test that the change stream returns an inserted doc on a user-created collection whose name
- // includes "system" but is not considered an internal collection.
- const validSystemColls = ["system", "systems.views", "ssystem.views", "test.system"];
- validSystemColls.forEach(collName => {
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
- const coll = db.getCollection(collName);
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- expected = [
- {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 1},
- ns: {db: db.getName(), coll: collName},
- operationType: "insert",
- },
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+// Test that the change stream returns an inserted doc on a user-created collection whose name
+// includes "system" but is not considered an internal collection.
+const validSystemColls = ["system", "systems.views", "ssystem.views", "test.system"];
+validSystemColls.forEach(collName => {
+ cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+ const coll = db.getCollection(collName);
+ assert.writeOK(coll.insert({_id: 0, a: 1}));
+ expected = [
+ {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 1},
+ ns: {db: db.getName(), coll: collName},
+ operationType: "insert",
+ },
+ ];
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
- // Drop the collection and verify that the change stream picks up the "drop" notification.
- assertDropCollection(db, collName);
- // Insert to the test collection to queue up another change after the drop. This is needed
- // since the number of 'drop' notifications is not deterministic in the sharded passthrough
- // suites.
- assert.writeOK(coll.insert({_id: 0}));
- cst.consumeDropUpTo({
- cursor: cursor,
- dropType: "drop",
- expectedNext: {
- documentKey: {_id: 0},
- fullDocument: {_id: 0},
- ns: {db: db.getName(), coll: collName},
- operationType: "insert",
- },
- });
+ // Drop the collection and verify that the change stream picks up the "drop" notification.
+ assertDropCollection(db, collName);
+ // Insert to the test collection to queue up another change after the drop. This is needed
+ // since the number of 'drop' notifications is not deterministic in the sharded passthrough
+ // suites.
+ assert.writeOK(coll.insert({_id: 0}));
+ cst.consumeDropUpTo({
+ cursor: cursor,
+ dropType: "drop",
+ expectedNext: {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0},
+ ns: {db: db.getName(), coll: collName},
+ operationType: "insert",
+ },
});
+});
- cst.cleanUp();
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/whole_db_metadata_notifications.js b/jstests/change_streams/whole_db_metadata_notifications.js
index 54d4b8cc6e2..7b659ff4e12 100644
--- a/jstests/change_streams/whole_db_metadata_notifications.js
+++ b/jstests/change_streams/whole_db_metadata_notifications.js
@@ -3,256 +3,250 @@
// invalidated by a database drop.
// @tags: [do_not_run_in_whole_cluster_passthrough]
(function() {
- "use strict";
-
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load('jstests/replsets/libs/two_phase_drops.js'); // For 'TwoPhaseDropCollectionTest'.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-
- const testDB = db.getSiblingDB(jsTestName());
- testDB.dropDatabase();
- let cst = new ChangeStreamTest(testDB);
-
- // Write a document to the collection and test that the change stream returns it
- // and getMore command closes the cursor afterwards.
- const collName = "test";
- let coll = assertDropAndRecreateCollection(testDB, collName);
-
- let aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
-
- // Create oplog entries of type insert, update, and delete.
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
- assert.writeOK(coll.remove({_id: 1}));
-
- // Drop and recreate the collection.
- const collAgg = assertDropAndRecreateCollection(testDB, collName);
-
- // We should get 4 oplog entries of type insert, update, delete, and drop.
- let change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "insert", tojson(change));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "update", tojson(change));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "delete", tojson(change));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "drop", tojson(change));
-
- // Get a valid resume token that the next change stream can use.
- assert.writeOK(collAgg.insert({_id: 1}));
-
- change = cst.getOneChange(aggCursor, false);
- const resumeToken = change._id;
-
- // For whole-db streams, it is possible to resume at a point before a collection is dropped.
- assertDropCollection(testDB, collAgg.getName());
- // Wait for two-phase drop to complete, so that the UUID no longer exists.
- assert.soon(function() {
- return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(testDB,
- collAgg.getName());
- });
- assert.commandWorked(testDB.runCommand(
- {aggregate: 1, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}}));
-
- // Test that invalidation entries for other databases are filtered out.
- const otherDB = testDB.getSiblingDB(jsTestName() + "other");
- const otherDBColl = otherDB[collName + "_other"];
- assert.writeOK(otherDBColl.insert({_id: 0}));
-
- // Create collection on the database being watched.
- coll = assertDropAndRecreateCollection(testDB, collName);
-
- // Create the $changeStream. We set 'doNotModifyInPassthroughs' so that this test will not be
- // upconverted to a cluster-wide stream, which would return an entry for the dropped collection
- // in the other database.
- aggCursor = cst.startWatchingChanges(
- {pipeline: [{$changeStream: {}}], collection: 1, doNotModifyInPassthroughs: true});
-
- // Drop the collection on the other database, this should *not* invalidate the change stream.
- assertDropCollection(otherDB, otherDBColl.getName());
-
- // Insert into the collection in the watched database, and verify the change stream is able to
- // pick it up.
- assert.writeOK(coll.insert({_id: 1}));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.documentKey._id, 1);
-
- // Test that renaming a collection generates a 'rename' entry for the 'from' collection. MongoDB
- // does not allow renaming of sharded collections, so only perform this test if the collection
- // is not sharded.
- if (!FixtureHelpers.isSharded(coll)) {
- assertDropAndRecreateCollection(testDB, coll.getName());
- assertDropCollection(testDB, "renamed_coll");
- aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
- assert.writeOK(coll.renameCollection("renamed_coll"));
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [{
- operationType: "rename",
- ns: {db: testDB.getName(), coll: coll.getName()},
- to: {db: testDB.getName(), coll: "renamed_coll"}
- }]
- });
-
- // Repeat the test, this time using the 'dropTarget' option with an existing target
- // collection.
- coll = testDB["renamed_coll"];
- assertCreateCollection(testDB, collName);
- assert.writeOK(testDB[collName].insert({_id: 0}));
- assert.writeOK(coll.renameCollection(collName, true /* dropTarget */));
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [
- {
- operationType: "insert",
- ns: {db: testDB.getName(), coll: collName},
- documentKey: {_id: 0},
- fullDocument: {_id: 0}
- },
- {
- operationType: "rename",
- ns: {db: testDB.getName(), coll: "renamed_coll"},
- to: {db: testDB.getName(), coll: collName}
- }
- ]
- });
-
- coll = testDB[collName];
- // Test renaming a collection from the database being watched to a different database. Do
- // not run this in the mongos passthrough suites since we cannot guarantee the primary shard
- // of the target database, and renameCollection requires the source and destination to be on
- // the same shard.
- if (!FixtureHelpers.isMongos(testDB)) {
- const otherDB = testDB.getSiblingDB(testDB.getName() + "_rename_target");
- // Create target collection to ensure the database exists.
- const collOtherDB = assertCreateCollection(otherDB, "test");
- assertDropCollection(otherDB, "test");
- assert.commandWorked(testDB.adminCommand(
- {renameCollection: coll.getFullName(), to: collOtherDB.getFullName()}));
- // Rename across databases drops the source collection after the collection is copied
- // over.
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges:
- [{operationType: "drop", ns: {db: testDB.getName(), coll: coll.getName()}}]
- });
-
- // Test renaming a collection from a different database to the database being watched.
- assert.commandWorked(testDB.adminCommand(
- {renameCollection: collOtherDB.getFullName(), to: coll.getFullName()}));
- // Do not check the 'ns' field since it will contain the namespace of the temp
- // collection created when renaming a collection across databases.
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "rename");
- assert.eq(change.to, {db: testDB.getName(), coll: coll.getName()});
- }
-
- // Test the behavior of a change stream watching the target collection of a $out aggregation
- // stage.
- coll.aggregate([{$out: "renamed_coll"}]);
- // Note that $out will first create a temp collection, and then rename the temp collection
- // to the target. Do not explicitly check the 'ns' field.
- const rename = cst.getOneChange(aggCursor);
- assert.eq(rename.operationType, "rename", tojson(rename));
- assert.eq(rename.to, {db: testDB.getName(), coll: "renamed_coll"}, tojson(rename));
-
- // The change stream should not be invalidated by the rename(s).
- assert.eq(0, cst.getNextBatch(aggCursor).nextBatch.length);
- assert.writeOK(coll.insert({_id: 2}));
- assert.eq(cst.getOneChange(aggCursor).operationType, "insert");
-
- // Drop the new collection to avoid an additional 'drop' notification when the database is
- // dropped.
- assertDropCollection(testDB, "renamed_coll");
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges:
- [{operationType: "drop", ns: {db: testDB.getName(), coll: "renamed_coll"}}],
- });
- }
-
- // Dropping a collection should return a 'drop' entry.
- assertDropCollection(testDB, coll.getName());
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges:
- [{operationType: "drop", ns: {db: testDB.getName(), coll: coll.getName()}}],
- });
-
- // Operations on internal "system" collections should be filtered out and not included in the
- // change stream.
- aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
- // Creating a view will generate an insert entry on the "system.views" collection.
- assert.commandWorked(
- testDB.runCommand({create: "view1", viewOn: coll.getName(), pipeline: []}));
- // Drop the "system.views" collection.
- assertDropCollection(testDB, "system.views");
- // Verify that the change stream does not report the insertion into "system.views", and is
- // not invalidated by dropping the system collection. Instead, it correctly reports the next
- // write to the test collection.
- assert.writeOK(coll.insert({_id: 0}));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.ns, {db: testDB.getName(), coll: coll.getName()});
-
- // Test that renaming a "system" collection *does* return a notification if the target of
- // the rename is a non-system collection.
- assert.commandWorked(
- testDB.runCommand({create: "view1", viewOn: coll.getName(), pipeline: []}));
- assert.writeOK(testDB.system.views.renameCollection("non_system_collection"));
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [{
- operationType: "rename",
- ns: {db: testDB.getName(), coll: "system.views"},
- to: {db: testDB.getName(), coll: "non_system_collection"}
- }],
- });
-
- // Test that renaming a "system" collection to a different "system" collection does not
- // result in a notification in the change stream.
+"use strict";
+
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load('jstests/replsets/libs/two_phase_drops.js'); // For 'TwoPhaseDropCollectionTest'.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+
+const testDB = db.getSiblingDB(jsTestName());
+testDB.dropDatabase();
+let cst = new ChangeStreamTest(testDB);
+
+// Write a document to the collection and test that the change stream returns it
+// and getMore command closes the cursor afterwards.
+const collName = "test";
+let coll = assertDropAndRecreateCollection(testDB, collName);
+
+let aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+
+// Create oplog entries of type insert, update, and delete.
+assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
+assert.writeOK(coll.remove({_id: 1}));
+
+// Drop and recreate the collection.
+const collAgg = assertDropAndRecreateCollection(testDB, collName);
+
+// We should get 4 oplog entries of type insert, update, delete, and drop.
+let change = cst.getOneChange(aggCursor);
+assert.eq(change.operationType, "insert", tojson(change));
+change = cst.getOneChange(aggCursor);
+assert.eq(change.operationType, "update", tojson(change));
+change = cst.getOneChange(aggCursor);
+assert.eq(change.operationType, "delete", tojson(change));
+change = cst.getOneChange(aggCursor);
+assert.eq(change.operationType, "drop", tojson(change));
+
+// Get a valid resume token that the next change stream can use.
+assert.writeOK(collAgg.insert({_id: 1}));
+
+change = cst.getOneChange(aggCursor, false);
+const resumeToken = change._id;
+
+// For whole-db streams, it is possible to resume at a point before a collection is dropped.
+assertDropCollection(testDB, collAgg.getName());
+// Wait for two-phase drop to complete, so that the UUID no longer exists.
+assert.soon(function() {
+ return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(testDB, collAgg.getName());
+});
+assert.commandWorked(testDB.runCommand(
+ {aggregate: 1, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}}));
+
+// Test that invalidation entries for other databases are filtered out.
+const otherDB = testDB.getSiblingDB(jsTestName() + "other");
+const otherDBColl = otherDB[collName + "_other"];
+assert.writeOK(otherDBColl.insert({_id: 0}));
+
+// Create collection on the database being watched.
+coll = assertDropAndRecreateCollection(testDB, collName);
+
+// Create the $changeStream. We set 'doNotModifyInPassthroughs' so that this test will not be
+// upconverted to a cluster-wide stream, which would return an entry for the dropped collection
+// in the other database.
+aggCursor = cst.startWatchingChanges(
+ {pipeline: [{$changeStream: {}}], collection: 1, doNotModifyInPassthroughs: true});
+
+// Drop the collection on the other database, this should *not* invalidate the change stream.
+assertDropCollection(otherDB, otherDBColl.getName());
+
+// Insert into the collection in the watched database, and verify the change stream is able to
+// pick it up.
+assert.writeOK(coll.insert({_id: 1}));
+change = cst.getOneChange(aggCursor);
+assert.eq(change.operationType, "insert", tojson(change));
+assert.eq(change.documentKey._id, 1);
+
+// Test that renaming a collection generates a 'rename' entry for the 'from' collection. MongoDB
+// does not allow renaming of sharded collections, so only perform this test if the collection
+// is not sharded.
+if (!FixtureHelpers.isSharded(coll)) {
+ assertDropAndRecreateCollection(testDB, coll.getName());
+ assertDropCollection(testDB, "renamed_coll");
aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
- assert.commandWorked(
- testDB.runCommand({create: "view1", viewOn: coll.getName(), pipeline: []}));
- // Note that the target of the rename must be a valid "system" collection.
- assert.writeOK(testDB.system.views.renameCollection("system.users"));
- // Verify that the change stream filters out the rename above, instead returning the next insert
- // to the test collection.
- assert.writeOK(coll.insert({_id: 1}));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.ns, {db: testDB.getName(), coll: coll.getName()});
-
- // Test that renaming a user collection to a "system" collection *is* returned in the change
- // stream.
- assert.writeOK(coll.renameCollection("system.views"));
+ assert.writeOK(coll.renameCollection("renamed_coll"));
cst.assertNextChangesEqual({
cursor: aggCursor,
expectedChanges: [{
operationType: "rename",
ns: {db: testDB.getName(), coll: coll.getName()},
- to: {db: testDB.getName(), coll: "system.views"}
- }],
+ to: {db: testDB.getName(), coll: "renamed_coll"}
+ }]
});
- // Drop the "system.views" collection to avoid view catalog errors in subsequent tests.
- assertDropCollection(testDB, "system.views");
- assertDropCollection(testDB, "non_system_collection");
+ // Repeat the test, this time using the 'dropTarget' option with an existing target
+ // collection.
+ coll = testDB["renamed_coll"];
+ assertCreateCollection(testDB, collName);
+ assert.writeOK(testDB[collName].insert({_id: 0}));
+ assert.writeOK(coll.renameCollection(collName, true /* dropTarget */));
cst.assertNextChangesEqual({
cursor: aggCursor,
expectedChanges: [
- {operationType: "drop", ns: {db: testDB.getName(), coll: "non_system_collection"}},
+ {
+ operationType: "insert",
+ ns: {db: testDB.getName(), coll: collName},
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0}
+ },
+ {
+ operationType: "rename",
+ ns: {db: testDB.getName(), coll: "renamed_coll"},
+ to: {db: testDB.getName(), coll: collName}
+ }
]
});
- // Dropping the database should generate a 'dropDatabase' notification followed by an
- // 'invalidate'.
- assert.commandWorked(testDB.dropDatabase());
- cst.assertDatabaseDrop({cursor: aggCursor, db: testDB});
- cst.assertNextChangesEqual(
- {cursor: aggCursor, expectedChanges: [{operationType: "invalidate"}]});
+ coll = testDB[collName];
+ // Test renaming a collection from the database being watched to a different database. Do
+ // not run this in the mongos passthrough suites since we cannot guarantee the primary shard
+ // of the target database, and renameCollection requires the source and destination to be on
+ // the same shard.
+ if (!FixtureHelpers.isMongos(testDB)) {
+ const otherDB = testDB.getSiblingDB(testDB.getName() + "_rename_target");
+ // Create target collection to ensure the database exists.
+ const collOtherDB = assertCreateCollection(otherDB, "test");
+ assertDropCollection(otherDB, "test");
+ assert.commandWorked(testDB.adminCommand(
+ {renameCollection: coll.getFullName(), to: collOtherDB.getFullName()}));
+ // Rename across databases drops the source collection after the collection is copied
+ // over.
+ cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges:
+ [{operationType: "drop", ns: {db: testDB.getName(), coll: coll.getName()}}]
+ });
+
+ // Test renaming a collection from a different database to the database being watched.
+ assert.commandWorked(testDB.adminCommand(
+ {renameCollection: collOtherDB.getFullName(), to: coll.getFullName()}));
+ // Do not check the 'ns' field since it will contain the namespace of the temp
+ // collection created when renaming a collection across databases.
+ change = cst.getOneChange(aggCursor);
+ assert.eq(change.operationType, "rename");
+ assert.eq(change.to, {db: testDB.getName(), coll: coll.getName()});
+ }
- cst.cleanUp();
+ // Test the behavior of a change stream watching the target collection of a $out aggregation
+ // stage.
+ coll.aggregate([{$out: "renamed_coll"}]);
+ // Note that $out will first create a temp collection, and then rename the temp collection
+ // to the target. Do not explicitly check the 'ns' field.
+ const rename = cst.getOneChange(aggCursor);
+ assert.eq(rename.operationType, "rename", tojson(rename));
+ assert.eq(rename.to, {db: testDB.getName(), coll: "renamed_coll"}, tojson(rename));
+
+ // The change stream should not be invalidated by the rename(s).
+ assert.eq(0, cst.getNextBatch(aggCursor).nextBatch.length);
+ assert.writeOK(coll.insert({_id: 2}));
+ assert.eq(cst.getOneChange(aggCursor).operationType, "insert");
+
+ // Drop the new collection to avoid an additional 'drop' notification when the database is
+ // dropped.
+ assertDropCollection(testDB, "renamed_coll");
+ cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges:
+ [{operationType: "drop", ns: {db: testDB.getName(), coll: "renamed_coll"}}],
+ });
+}
+
+// Dropping a collection should return a 'drop' entry.
+assertDropCollection(testDB, coll.getName());
+cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [{operationType: "drop", ns: {db: testDB.getName(), coll: coll.getName()}}],
+});
+
+// Operations on internal "system" collections should be filtered out and not included in the
+// change stream.
+aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+// Creating a view will generate an insert entry on the "system.views" collection.
+assert.commandWorked(testDB.runCommand({create: "view1", viewOn: coll.getName(), pipeline: []}));
+// Drop the "system.views" collection.
+assertDropCollection(testDB, "system.views");
+// Verify that the change stream does not report the insertion into "system.views", and is
+// not invalidated by dropping the system collection. Instead, it correctly reports the next
+// write to the test collection.
+assert.writeOK(coll.insert({_id: 0}));
+change = cst.getOneChange(aggCursor);
+assert.eq(change.operationType, "insert", tojson(change));
+assert.eq(change.ns, {db: testDB.getName(), coll: coll.getName()});
+
+// Test that renaming a "system" collection *does* return a notification if the target of
+// the rename is a non-system collection.
+assert.commandWorked(testDB.runCommand({create: "view1", viewOn: coll.getName(), pipeline: []}));
+assert.writeOK(testDB.system.views.renameCollection("non_system_collection"));
+cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [{
+ operationType: "rename",
+ ns: {db: testDB.getName(), coll: "system.views"},
+ to: {db: testDB.getName(), coll: "non_system_collection"}
+ }],
+});
+
+// Test that renaming a "system" collection to a different "system" collection does not
+// result in a notification in the change stream.
+aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+assert.commandWorked(testDB.runCommand({create: "view1", viewOn: coll.getName(), pipeline: []}));
+// Note that the target of the rename must be a valid "system" collection.
+assert.writeOK(testDB.system.views.renameCollection("system.users"));
+// Verify that the change stream filters out the rename above, instead returning the next insert
+// to the test collection.
+assert.writeOK(coll.insert({_id: 1}));
+change = cst.getOneChange(aggCursor);
+assert.eq(change.operationType, "insert", tojson(change));
+assert.eq(change.ns, {db: testDB.getName(), coll: coll.getName()});
+
+// Test that renaming a user collection to a "system" collection *is* returned in the change
+// stream.
+assert.writeOK(coll.renameCollection("system.views"));
+cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [{
+ operationType: "rename",
+ ns: {db: testDB.getName(), coll: coll.getName()},
+ to: {db: testDB.getName(), coll: "system.views"}
+ }],
+});
+
+// Drop the "system.views" collection to avoid view catalog errors in subsequent tests.
+assertDropCollection(testDB, "system.views");
+assertDropCollection(testDB, "non_system_collection");
+cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [
+ {operationType: "drop", ns: {db: testDB.getName(), coll: "non_system_collection"}},
+ ]
+});
+
+// Dropping the database should generate a 'dropDatabase' notification followed by an
+// 'invalidate'.
+assert.commandWorked(testDB.dropDatabase());
+cst.assertDatabaseDrop({cursor: aggCursor, db: testDB});
+cst.assertNextChangesEqual({cursor: aggCursor, expectedChanges: [{operationType: "invalidate"}]});
+
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/whole_db_resumability.js b/jstests/change_streams/whole_db_resumability.js
index b71495355fa..697f72ddcf9 100644
--- a/jstests/change_streams/whole_db_resumability.js
+++ b/jstests/change_streams/whole_db_resumability.js
@@ -3,199 +3,199 @@
// invalidated by a database drop.
// @tags: [do_not_run_in_whole_cluster_passthrough]
(function() {
- "use strict";
-
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-
- // Drop and recreate the collections to be used in this set of tests.
- const testDB = db.getSiblingDB(jsTestName());
- let coll = assertDropAndRecreateCollection(testDB, "resume_coll");
- const otherColl = assertDropAndRecreateCollection(testDB, "resume_coll_other");
-
- let cst = new ChangeStreamTest(testDB);
- let resumeCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
-
- // Insert a single document to each collection and save the resume token from the first insert.
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(otherColl.insert({_id: 2}));
- const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
- assert.eq(firstInsertChangeDoc.ns, {db: testDB.getName(), coll: coll.getName()});
-
- // Test resuming the change stream after the first insert should pick up the insert on the
- // second collection.
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
+"use strict";
+
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+
+// Drop and recreate the collections to be used in this set of tests.
+const testDB = db.getSiblingDB(jsTestName());
+let coll = assertDropAndRecreateCollection(testDB, "resume_coll");
+const otherColl = assertDropAndRecreateCollection(testDB, "resume_coll_other");
+
+let cst = new ChangeStreamTest(testDB);
+let resumeCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+
+// Insert a single document to each collection and save the resume token from the first insert.
+assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(otherColl.insert({_id: 2}));
+const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
+assert.eq(firstInsertChangeDoc.ns, {db: testDB.getName(), coll: coll.getName()});
+
+// Test resuming the change stream after the first insert should pick up the insert on the
+// second collection.
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+
+const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
+assert.eq(secondInsertChangeDoc.ns, {db: testDB.getName(), coll: otherColl.getName()});
+
+// Insert a third document to the first collection and test that the change stream picks it up.
+assert.writeOK(coll.insert({_id: 3}));
+const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
+assert.eq(thirdInsertChangeDoc.ns, {db: testDB.getName(), coll: coll.getName()});
+
+// Test resuming after the first insert again.
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+assert.docEq(cst.getOneChange(resumeCursor), secondInsertChangeDoc);
+assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+
+// Test resume after second insert.
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: secondInsertChangeDoc._id}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+
+// Rename the collection and attempt to resume from the 'rename' notification. Skip this
+// test when running on a sharded collection, since these cannot be renamed.
+if (!FixtureHelpers.isSharded(coll)) {
+ assertDropAndRecreateCollection(coll.getDB(), coll.getName());
+ const renameColl = coll.getDB().getCollection("rename_coll");
+ assertDropCollection(renameColl.getDB(), renameColl.getName());
+
+ resumeCursor = cst.startWatchingChanges({collection: 1, pipeline: [{$changeStream: {}}]});
+ assert.writeOK(coll.renameCollection(renameColl.getName()));
+
+ const renameChanges = cst.assertNextChangesEqual({
+ cursor: resumeCursor,
+ expectedChanges: [
+ {
+ operationType: "rename",
+ ns: {db: coll.getDB().getName(), coll: coll.getName()},
+ to: {db: renameColl.getDB().getName(), coll: renameColl.getName()}
+ },
+ ]
});
+ const resumeTokenRename = renameChanges[0]._id;
- const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
- assert.eq(secondInsertChangeDoc.ns, {db: testDB.getName(), coll: otherColl.getName()});
+ // Insert into the renamed collection.
+ assert.writeOK(renameColl.insert({_id: "after rename"}));
- // Insert a third document to the first collection and test that the change stream picks it up.
- assert.writeOK(coll.insert({_id: 3}));
- const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
- assert.eq(thirdInsertChangeDoc.ns, {db: testDB.getName(), coll: coll.getName()});
+ // Resume from the rename notification using 'resumeAfter' and verify that the change stream
+ // returns the next insert.
+ let expectedInsert = {
+ operationType: "insert",
+ ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
+ fullDocument: {_id: "after rename"},
+ documentKey: {_id: "after rename"}
+ };
+ resumeCursor = cst.startWatchingChanges(
+ {collection: 1, pipeline: [{$changeStream: {resumeAfter: resumeTokenRename}}]});
+ cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
- // Test resuming after the first insert again.
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
- assert.docEq(cst.getOneChange(resumeCursor), secondInsertChangeDoc);
- assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+ // Resume from the rename notification using 'startAfter' and verify that the change stream
+ // returns the next insert.
+ expectedInsert = {
+ operationType: "insert",
+ ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
+ fullDocument: {_id: "after rename"},
+ documentKey: {_id: "after rename"}
+ };
+ resumeCursor = cst.startWatchingChanges(
+ {collection: 1, pipeline: [{$changeStream: {startAfter: resumeTokenRename}}]});
+ cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
- // Test resume after second insert.
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: secondInsertChangeDoc._id}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
- assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
-
- // Rename the collection and attempt to resume from the 'rename' notification. Skip this
- // test when running on a sharded collection, since these cannot be renamed.
- if (!FixtureHelpers.isSharded(coll)) {
- assertDropAndRecreateCollection(coll.getDB(), coll.getName());
- const renameColl = coll.getDB().getCollection("rename_coll");
- assertDropCollection(renameColl.getDB(), renameColl.getName());
-
- resumeCursor = cst.startWatchingChanges({collection: 1, pipeline: [{$changeStream: {}}]});
- assert.writeOK(coll.renameCollection(renameColl.getName()));
-
- const renameChanges = cst.assertNextChangesEqual({
- cursor: resumeCursor,
- expectedChanges: [
- {
- operationType: "rename",
- ns: {db: coll.getDB().getName(), coll: coll.getName()},
- to: {db: renameColl.getDB().getName(), coll: renameColl.getName()}
- },
- ]
- });
- const resumeTokenRename = renameChanges[0]._id;
-
- // Insert into the renamed collection.
- assert.writeOK(renameColl.insert({_id: "after rename"}));
-
- // Resume from the rename notification using 'resumeAfter' and verify that the change stream
- // returns the next insert.
- let expectedInsert = {
- operationType: "insert",
- ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
- fullDocument: {_id: "after rename"},
- documentKey: {_id: "after rename"}
- };
- resumeCursor = cst.startWatchingChanges(
- {collection: 1, pipeline: [{$changeStream: {resumeAfter: resumeTokenRename}}]});
- cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
-
- // Resume from the rename notification using 'startAfter' and verify that the change stream
- // returns the next insert.
- expectedInsert = {
- operationType: "insert",
- ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
- fullDocument: {_id: "after rename"},
- documentKey: {_id: "after rename"}
- };
- resumeCursor = cst.startWatchingChanges(
- {collection: 1, pipeline: [{$changeStream: {startAfter: resumeTokenRename}}]});
- cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
-
- // Rename back to the original collection for reliability of the collection drops when
- // dropping the database.
- assert.writeOK(renameColl.renameCollection(coll.getName()));
- }
-
- // Explicitly drop one collection to ensure reliability of the order of notifications from the
- // dropDatabase command.
- resumeCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
- assertDropCollection(testDB, otherColl.getName());
- const firstCollDrop = cst.getOneChange(resumeCursor);
- assert.eq(firstCollDrop.operationType, "drop", tojson(firstCollDrop));
- assert.eq(firstCollDrop.ns, {db: testDB.getName(), coll: otherColl.getName()});
-
- // Dropping a database should generate a 'drop' notification for each collection, a
- // 'dropDatabase' notification, and finally an 'invalidate'.
- assert.commandWorked(testDB.dropDatabase());
- const dropDbChanges = cst.assertDatabaseDrop({cursor: resumeCursor, db: testDB});
- const secondCollDrop = dropDbChanges[0];
- // For sharded passthrough suites, we know that the last entry will be a 'dropDatabase' however
- // there may be multiple collection drops in 'dropDbChanges' depending on the number of involved
- // shards.
- const resumeTokenDropDb = dropDbChanges[dropDbChanges.length - 1]._id;
- const resumeTokenInvalidate =
- cst.assertNextChangesEqual(
- {cursor: resumeCursor, expectedChanges: [{operationType: "invalidate"}]})[0]
- ._id;
-
- // Test resuming from the first collection drop and the second collection drop as a result of
+ // Rename back to the original collection for reliability of the collection drops when
// dropping the database.
- [firstCollDrop, secondCollDrop].forEach(token => {
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: token._id}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
- cst.assertDatabaseDrop({cursor: resumeCursor, db: testDB});
- cst.assertNextChangesEqual(
- {cursor: resumeCursor, expectedChanges: [{operationType: "invalidate"}]});
- });
-
- // Recreate the test collection.
- assert.writeOK(coll.insert({_id: "after recreate"}));
-
- // Test resuming from the 'dropDatabase' entry using 'resumeAfter'.
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: resumeTokenDropDb}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
+ assert.writeOK(renameColl.renameCollection(coll.getName()));
+}
+
+// Explicitly drop one collection to ensure reliability of the order of notifications from the
+// dropDatabase command.
+resumeCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+assertDropCollection(testDB, otherColl.getName());
+const firstCollDrop = cst.getOneChange(resumeCursor);
+assert.eq(firstCollDrop.operationType, "drop", tojson(firstCollDrop));
+assert.eq(firstCollDrop.ns, {db: testDB.getName(), coll: otherColl.getName()});
+
+// Dropping a database should generate a 'drop' notification for each collection, a
+// 'dropDatabase' notification, and finally an 'invalidate'.
+assert.commandWorked(testDB.dropDatabase());
+const dropDbChanges = cst.assertDatabaseDrop({cursor: resumeCursor, db: testDB});
+const secondCollDrop = dropDbChanges[0];
+// For sharded passthrough suites, we know that the last entry will be a 'dropDatabase' however
+// there may be multiple collection drops in 'dropDbChanges' depending on the number of involved
+// shards.
+const resumeTokenDropDb = dropDbChanges[dropDbChanges.length - 1]._id;
+const resumeTokenInvalidate =
cst.assertNextChangesEqual(
- {cursor: resumeCursor, expectedChanges: [{operationType: "invalidate"}]});
-
- // Test resuming from the 'invalidate' entry using 'resumeAfter'.
- assert.commandFailedWithCode(db.runCommand({
- aggregate: 1,
- pipeline: [{$changeStream: {resumeAfter: resumeTokenInvalidate}}],
- cursor: {},
- collation: {locale: "simple"},
- }),
- ErrorCodes.InvalidResumeToken);
+ {cursor: resumeCursor, expectedChanges: [{operationType: "invalidate"}]})[0]
+ ._id;
- // Test resuming from the 'dropDatabase' entry using 'startAfter'.
+// Test resuming from the first collection drop and the second collection drop as a result of
+// dropping the database.
+[firstCollDrop, secondCollDrop].forEach(token => {
resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {startAfter: resumeTokenDropDb}}],
+ pipeline: [{$changeStream: {resumeAfter: token._id}}],
collection: 1,
aggregateOptions: {cursor: {batchSize: 0}},
});
+ cst.assertDatabaseDrop({cursor: resumeCursor, db: testDB});
cst.assertNextChangesEqual(
{cursor: resumeCursor, expectedChanges: [{operationType: "invalidate"}]});
-
- // Test resuming from the 'invalidate' entry using 'startAfter' and verifies it picks up the
- // insert after recreating the db/collection.
- const expectedInsert = {
- operationType: "insert",
- ns: {db: testDB.getName(), coll: coll.getName()},
- fullDocument: {_id: "after recreate"},
- documentKey: {_id: "after recreate"}
- };
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {startAfter: resumeTokenInvalidate}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
- cst.consumeDropUpTo({
- cursor: resumeCursor,
- dropType: "dropDatabase",
- expectedNext: expectedInsert,
- });
-
- cst.cleanUp();
+});
+
+// Recreate the test collection.
+assert.writeOK(coll.insert({_id: "after recreate"}));
+
+// Test resuming from the 'dropDatabase' entry using 'resumeAfter'.
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenDropDb}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+cst.assertNextChangesEqual(
+ {cursor: resumeCursor, expectedChanges: [{operationType: "invalidate"}]});
+
+// Test resuming from the 'invalidate' entry using 'resumeAfter'.
+assert.commandFailedWithCode(db.runCommand({
+ aggregate: 1,
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenInvalidate}}],
+ cursor: {},
+ collation: {locale: "simple"},
+}),
+ ErrorCodes.InvalidResumeToken);
+
+// Test resuming from the 'dropDatabase' entry using 'startAfter'.
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {startAfter: resumeTokenDropDb}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+cst.assertNextChangesEqual(
+ {cursor: resumeCursor, expectedChanges: [{operationType: "invalidate"}]});
+
+// Test resuming from the 'invalidate' entry using 'startAfter' and verifies it picks up the
+// insert after recreating the db/collection.
+const expectedInsert = {
+ operationType: "insert",
+ ns: {db: testDB.getName(), coll: coll.getName()},
+ fullDocument: {_id: "after recreate"},
+ documentKey: {_id: "after recreate"}
+};
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {startAfter: resumeTokenInvalidate}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+cst.consumeDropUpTo({
+ cursor: resumeCursor,
+ dropType: "dropDatabase",
+ expectedNext: expectedInsert,
+});
+
+cst.cleanUp();
})();
diff --git a/jstests/client_encrypt/fle_auto_decrypt.js b/jstests/client_encrypt/fle_auto_decrypt.js
index 8122538c9f3..182c8af3ac4 100644
--- a/jstests/client_encrypt/fle_auto_decrypt.js
+++ b/jstests/client_encrypt/fle_auto_decrypt.js
@@ -5,52 +5,55 @@ load("jstests/client_encrypt/lib/mock_kms.js");
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- "use strict";
+"use strict";
- const x509_options = {sslMode: "requireSSL", sslPEMKeyFile: SERVER_CERT, sslCAFile: CA_CERT};
+const x509_options = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT
+};
- const conn = MongoRunner.runMongod(x509_options);
+const conn = MongoRunner.runMongod(x509_options);
- let localKMS = {
- key: BinData(
- 0,
- "tu9jUCBqZdwCelwE/EAm/4WqdxrSMi04B8e9uAV+m30rI1J2nhKZZtQjdvsSCwuI4erR6IEcEK+5eGUAODv43NDNIR9QheT2edWFewUfHKsl9cnzTc86meIzOmYl6drp"),
- };
+let localKMS = {
+ key: BinData(
+ 0,
+ "tu9jUCBqZdwCelwE/EAm/4WqdxrSMi04B8e9uAV+m30rI1J2nhKZZtQjdvsSCwuI4erR6IEcEK+5eGUAODv43NDNIR9QheT2edWFewUfHKsl9cnzTc86meIzOmYl6drp"),
+};
- const clientSideFLEOptions = {
- kmsProviders: {
- local: localKMS,
- },
- keyVaultNamespace: "test.coll",
- schemaMap: {}
- };
+const clientSideFLEOptions = {
+ kmsProviders: {
+ local: localKMS,
+ },
+ keyVaultNamespace: "test.coll",
+ schemaMap: {}
+};
- const deterministicAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic";
+const deterministicAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic";
- const shell = Mongo(conn.host, clientSideFLEOptions);
- const keyVault = shell.getKeyVault();
+const shell = Mongo(conn.host, clientSideFLEOptions);
+const keyVault = shell.getKeyVault();
- assert.writeOK(
- keyVault.createKey("local", "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']));
+assert.writeOK(keyVault.createKey("local", "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']));
- const clientEncrypt = shell.getClientEncryption();
- const keyId = keyVault.getKeyByAltName("mongoKey").toArray()[0]._id;
+const clientEncrypt = shell.getClientEncryption();
+const keyId = keyVault.getKeyByAltName("mongoKey").toArray()[0]._id;
- const encryptedStr = clientEncrypt.encrypt(keyId, "mongodb", deterministicAlgorithm);
+const encryptedStr = clientEncrypt.encrypt(keyId, "mongodb", deterministicAlgorithm);
- // Insert encrypted string into database
- const collection = conn.getDB("test").getCollection("collection");
+// Insert encrypted string into database
+const collection = conn.getDB("test").getCollection("collection");
- for (var i = 0; i < 150; i++) {
- assert.writeOK(collection.insert({string: encryptedStr, id: 1}));
- }
+for (var i = 0; i < 150; i++) {
+ assert.writeOK(collection.insert({string: encryptedStr, id: 1}));
+}
- // Ensure string is auto decrypted
- const encryptedCollection = shell.getDB("test").getCollection("collection");
- const result = encryptedCollection.find({id: 1}).toArray();
- result.forEach(function(entry) {
- assert.eq(entry.string, "mongodb");
- });
+// Ensure string is auto decrypted
+const encryptedCollection = shell.getDB("test").getCollection("collection");
+const result = encryptedCollection.find({id: 1}).toArray();
+result.forEach(function(entry) {
+ assert.eq(entry.string, "mongodb");
+});
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}()); \ No newline at end of file
diff --git a/jstests/client_encrypt/fle_aws_faults.js b/jstests/client_encrypt/fle_aws_faults.js
index c7afb02513c..1d9a621b42a 100644
--- a/jstests/client_encrypt/fle_aws_faults.js
+++ b/jstests/client_encrypt/fle_aws_faults.js
@@ -6,137 +6,137 @@ load("jstests/client_encrypt/lib/mock_kms.js");
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- "use strict";
+"use strict";
- const x509_options = {sslMode: "requireSSL", sslPEMKeyFile: SERVER_CERT, sslCAFile: CA_CERT};
+const x509_options = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT
+};
- const randomAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Random";
+const randomAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Random";
- const conn = MongoRunner.runMongod(x509_options);
- const test = conn.getDB("test");
- const collection = test.coll;
+const conn = MongoRunner.runMongod(x509_options);
+const test = conn.getDB("test");
+const collection = test.coll;
- function runKMS(mock_kms, func) {
- mock_kms.start();
+function runKMS(mock_kms, func) {
+ mock_kms.start();
- const awsKMS = {
- accessKeyId: "access",
- secretAccessKey: "secret",
- url: mock_kms.getURL(),
- };
+ const awsKMS = {
+ accessKeyId: "access",
+ secretAccessKey: "secret",
+ url: mock_kms.getURL(),
+ };
- const clientSideFLEOptions = {
- kmsProviders: {
- aws: awsKMS,
- },
- keyVaultNamespace: "test.coll",
- schemaMap: {}
- };
+ const clientSideFLEOptions = {
+ kmsProviders: {
+ aws: awsKMS,
+ },
+ keyVaultNamespace: "test.coll",
+ schemaMap: {}
+ };
- const shell = Mongo(conn.host, clientSideFLEOptions);
- const cleanCacheShell = Mongo(conn.host, clientSideFLEOptions);
+ const shell = Mongo(conn.host, clientSideFLEOptions);
+ const cleanCacheShell = Mongo(conn.host, clientSideFLEOptions);
- collection.drop();
+ collection.drop();
- func(shell, cleanCacheShell);
+ func(shell, cleanCacheShell);
- mock_kms.stop();
- }
+ mock_kms.stop();
+}
- function testBadEncryptResult(fault) {
- const mock_kms = new MockKMSServer(fault, false);
+function testBadEncryptResult(fault) {
+ const mock_kms = new MockKMSServer(fault, false);
- runKMS(mock_kms, (shell) => {
- const keyVault = shell.getKeyVault();
+ runKMS(mock_kms, (shell) => {
+ const keyVault = shell.getKeyVault();
- assert.throws(() => keyVault.createKey(
- "aws", "arn:aws:kms:us-east-1:fake:fake:fake", ["mongoKey"]));
- assert.eq(keyVault.getKeys("mongoKey").toArray().length, 0);
- });
- }
-
- testBadEncryptResult(FAULT_ENCRYPT);
- testBadEncryptResult(FAULT_ENCRYPT_WRONG_FIELDS);
- testBadEncryptResult(FAULT_ENCRYPT_BAD_BASE64);
+ assert.throws(
+ () => keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ["mongoKey"]));
+ assert.eq(keyVault.getKeys("mongoKey").toArray().length, 0);
+ });
+}
- function testBadEncryptError() {
- const mock_kms = new MockKMSServer(FAULT_ENCRYPT_CORRECT_FORMAT, false);
+testBadEncryptResult(FAULT_ENCRYPT);
+testBadEncryptResult(FAULT_ENCRYPT_WRONG_FIELDS);
+testBadEncryptResult(FAULT_ENCRYPT_BAD_BASE64);
- runKMS(mock_kms, (shell) => {
- const keyVault = shell.getKeyVault();
+function testBadEncryptError() {
+ const mock_kms = new MockKMSServer(FAULT_ENCRYPT_CORRECT_FORMAT, false);
- let error =
- assert.throws(() => keyVault.createKey(
- "aws", "arn:aws:kms:us-east-1:fake:fake:fake", ["mongoKey"]));
- assert.commandFailedWithCode(error, [51224]);
- assert.eq(
- error,
- "Error: AWS KMS failed to encrypt: NotFoundException : Error encrypting message");
- });
- }
-
- testBadEncryptError();
-
- function testBadDecryptResult(fault) {
- const mock_kms = new MockKMSServer(fault, false);
-
- runKMS(mock_kms, (shell) => {
- const keyVault = shell.getKeyVault();
- assert.writeOK(
- keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ["mongoKey"]));
- const keyId = keyVault.getKeys("mongoKey").toArray()[0]._id;
- const str = "mongo";
- assert.throws(() => {
- const encStr = shell.getClientEncryption().encrypt(keyId, str, randomAlgorithm);
- });
- });
- }
+ runKMS(mock_kms, (shell) => {
+ const keyVault = shell.getKeyVault();
- testBadDecryptResult(FAULT_DECRYPT);
+ let error = assert.throws(
+ () => keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ["mongoKey"]));
+ assert.commandFailedWithCode(error, [51224]);
+ assert.eq(error,
+ "Error: AWS KMS failed to encrypt: NotFoundException : Error encrypting message");
+ });
+}
- function testBadDecryptKeyResult(fault) {
- const mock_kms = new MockKMSServer(fault, true);
+testBadEncryptError();
- runKMS(mock_kms, (shell, cleanCacheShell) => {
- const keyVault = shell.getKeyVault();
+function testBadDecryptResult(fault) {
+ const mock_kms = new MockKMSServer(fault, false);
- assert.writeOK(
- keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ["mongoKey"]));
- const keyId = keyVault.getKeys("mongoKey").toArray()[0]._id;
- const str = "mongo";
+ runKMS(mock_kms, (shell) => {
+ const keyVault = shell.getKeyVault();
+ assert.writeOK(
+ keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ["mongoKey"]));
+ const keyId = keyVault.getKeys("mongoKey").toArray()[0]._id;
+ const str = "mongo";
+ assert.throws(() => {
const encStr = shell.getClientEncryption().encrypt(keyId, str, randomAlgorithm);
+ });
+ });
+}
+
+testBadDecryptResult(FAULT_DECRYPT);
- mock_kms.enableFaults();
+function testBadDecryptKeyResult(fault) {
+ const mock_kms = new MockKMSServer(fault, true);
- assert.throws(() => {
- var str = cleanCacheShell.decrypt(encStr);
- });
+ runKMS(mock_kms, (shell, cleanCacheShell) => {
+ const keyVault = shell.getKeyVault();
+ assert.writeOK(
+ keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ["mongoKey"]));
+ const keyId = keyVault.getKeys("mongoKey").toArray()[0]._id;
+ const str = "mongo";
+ const encStr = shell.getClientEncryption().encrypt(keyId, str, randomAlgorithm);
+
+ mock_kms.enableFaults();
+
+ assert.throws(() => {
+ var str = cleanCacheShell.decrypt(encStr);
});
- }
-
- testBadDecryptKeyResult(FAULT_DECRYPT_WRONG_KEY);
-
- function testBadDecryptError() {
- const mock_kms = new MockKMSServer(FAULT_DECRYPT_CORRECT_FORMAT, false);
-
- runKMS(mock_kms, (shell) => {
- const keyVault = shell.getKeyVault();
- assert.writeOK(
- keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ["mongoKey"]));
- const keyId = keyVault.getKeys("mongoKey").toArray()[0]._id;
- const str = "mongo";
- let error = assert.throws(() => {
- const encStr = shell.getClientEncryption().encrypt(keyId, str, randomAlgorithm);
- });
- assert.commandFailedWithCode(error, [51225]);
- assert.eq(
- error,
- "Error: AWS KMS failed to decrypt: NotFoundException : Error decrypting message");
+ });
+}
+
+testBadDecryptKeyResult(FAULT_DECRYPT_WRONG_KEY);
+
+function testBadDecryptError() {
+ const mock_kms = new MockKMSServer(FAULT_DECRYPT_CORRECT_FORMAT, false);
+
+ runKMS(mock_kms, (shell) => {
+ const keyVault = shell.getKeyVault();
+ assert.writeOK(
+ keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ["mongoKey"]));
+ const keyId = keyVault.getKeys("mongoKey").toArray()[0]._id;
+ const str = "mongo";
+ let error = assert.throws(() => {
+ const encStr = shell.getClientEncryption().encrypt(keyId, str, randomAlgorithm);
});
- }
+ assert.commandFailedWithCode(error, [51225]);
+ assert.eq(error,
+ "Error: AWS KMS failed to decrypt: NotFoundException : Error decrypting message");
+ });
+}
- testBadDecryptError();
+testBadDecryptError();
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}()); \ No newline at end of file
diff --git a/jstests/client_encrypt/fle_command_line_encryption.js b/jstests/client_encrypt/fle_command_line_encryption.js
index 9113f9f2d74..1ad044b4d95 100644
--- a/jstests/client_encrypt/fle_command_line_encryption.js
+++ b/jstests/client_encrypt/fle_command_line_encryption.js
@@ -6,36 +6,40 @@ load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- const x509_options = {sslMode: "requireSSL", sslPEMKeyFile: SERVER_CERT, sslCAFile: CA_CERT};
- const conn = MongoRunner.runMongod(x509_options);
+const x509_options = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT
+};
+const conn = MongoRunner.runMongod(x509_options);
- const shellOpts = [
- "mongo",
- "--host",
- conn.host,
- "--port",
- conn.port,
- "--tls",
- "--sslPEMKeyFile",
- CLIENT_CERT,
- "--sslCAFile",
- CA_CERT,
- "--tlsAllowInvalidHostnames",
- "--awsAccessKeyId",
- "access",
- "--awsSecretAccessKey",
- "secret",
- "--keyVaultNamespace",
- "test.coll",
- "--kmsURL",
- "https://localhost:8000",
- ];
+const shellOpts = [
+ "mongo",
+ "--host",
+ conn.host,
+ "--port",
+ conn.port,
+ "--tls",
+ "--sslPEMKeyFile",
+ CLIENT_CERT,
+ "--sslCAFile",
+ CA_CERT,
+ "--tlsAllowInvalidHostnames",
+ "--awsAccessKeyId",
+ "access",
+ "--awsSecretAccessKey",
+ "secret",
+ "--keyVaultNamespace",
+ "test.coll",
+ "--kmsURL",
+ "https://localhost:8000",
+];
- const testFiles = [
- "jstests/client_encrypt/lib/fle_command_line_explicit_encryption.js",
- ];
+const testFiles = [
+ "jstests/client_encrypt/lib/fle_command_line_explicit_encryption.js",
+];
- for (const file of testFiles) {
- runMongoProgram(...shellOpts, file);
- }
+for (const file of testFiles) {
+ runMongoProgram(...shellOpts, file);
+}
}()); \ No newline at end of file
diff --git a/jstests/client_encrypt/fle_encrypt_decrypt_shell.js b/jstests/client_encrypt/fle_encrypt_decrypt_shell.js
index fd7847c04dc..f67bc72dccc 100644
--- a/jstests/client_encrypt/fle_encrypt_decrypt_shell.js
+++ b/jstests/client_encrypt/fle_encrypt_decrypt_shell.js
@@ -5,117 +5,119 @@ load("jstests/client_encrypt/lib/mock_kms.js");
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- "use strict";
-
- const mock_kms = new MockKMSServer();
- mock_kms.start();
-
- const x509_options = {sslMode: "requireSSL", sslPEMKeyFile: SERVER_CERT, sslCAFile: CA_CERT};
-
- const conn = MongoRunner.runMongod(x509_options);
- const test = conn.getDB("test");
- const collection = test.coll;
-
- const awsKMS = {
- accessKeyId: "access",
- secretAccessKey: "secret",
- url: mock_kms.getURL(),
- };
-
- let localKMS = {
- key: BinData(
- 0,
- "tu9jUCBqZdwCelwE/EAm/4WqdxrSMi04B8e9uAV+m30rI1J2nhKZZtQjdvsSCwuI4erR6IEcEK+5eGUAODv43NDNIR9QheT2edWFewUfHKsl9cnzTc86meIzOmYl6drp"),
- };
-
- const clientSideFLEOptions = {
- kmsProviders: {
- aws: awsKMS,
- local: localKMS,
- },
- keyVaultNamespace: "test.coll",
- schemaMap: {}
- };
-
- const kmsTypes = ["aws", "local"];
-
- const randomAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Random";
- const deterministicAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic";
- const encryptionAlgorithms = [randomAlgorithm, deterministicAlgorithm];
-
- const passTestCases = [
- "mongo",
- NumberLong(13),
- NumberInt(23),
- UUID(),
- ISODate(),
- new Date('December 17, 1995 03:24:00'),
- BinData(0, '1234'),
- BinData(1, '1234'),
- BinData(3, '1234'),
- BinData(4, '1234'),
- BinData(5, '1234'),
- BinData(6, '1234'),
- new Timestamp(1, 2),
- new ObjectId(),
- new DBPointer("mongo", new ObjectId()),
- /test/
- ];
-
- const failDeterministic = [
- true,
- false,
- 12,
- NumberDecimal(0.1234),
- ["this is an array"],
- {"value": "mongo"},
- Code("function() { return true; }")
- ];
-
- const failTestCases =
- [null, undefined, MinKey(), MaxKey(), DBRef("test", "test", "test"), BinData(2, '1234')];
-
- const shell = Mongo(conn.host, clientSideFLEOptions);
- const keyVault = shell.getKeyVault();
-
- // Testing for every combination of (kmsType, algorithm, javascriptVariable)
- for (const kmsType of kmsTypes) {
- for (const encryptionAlgorithm of encryptionAlgorithms) {
- collection.drop();
-
- assert.writeOK(
- keyVault.createKey(kmsType, "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']));
- const keyId = keyVault.getKeyByAltName("mongoKey").toArray()[0]._id;
-
- let pass;
- let fail;
- if (encryptionAlgorithm === randomAlgorithm) {
- pass = [...passTestCases, ...failDeterministic];
- fail = failTestCases;
- } else if (encryptionAlgorithm === deterministicAlgorithm) {
- pass = passTestCases;
- fail = [...failTestCases, ...failDeterministic];
- }
+"use strict";
+
+const mock_kms = new MockKMSServer();
+mock_kms.start();
+
+const x509_options = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT
+};
+
+const conn = MongoRunner.runMongod(x509_options);
+const test = conn.getDB("test");
+const collection = test.coll;
+
+const awsKMS = {
+ accessKeyId: "access",
+ secretAccessKey: "secret",
+ url: mock_kms.getURL(),
+};
+
+let localKMS = {
+ key: BinData(
+ 0,
+ "tu9jUCBqZdwCelwE/EAm/4WqdxrSMi04B8e9uAV+m30rI1J2nhKZZtQjdvsSCwuI4erR6IEcEK+5eGUAODv43NDNIR9QheT2edWFewUfHKsl9cnzTc86meIzOmYl6drp"),
+};
+
+const clientSideFLEOptions = {
+ kmsProviders: {
+ aws: awsKMS,
+ local: localKMS,
+ },
+ keyVaultNamespace: "test.coll",
+ schemaMap: {}
+};
+
+const kmsTypes = ["aws", "local"];
+
+const randomAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Random";
+const deterministicAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic";
+const encryptionAlgorithms = [randomAlgorithm, deterministicAlgorithm];
+
+const passTestCases = [
+ "mongo",
+ NumberLong(13),
+ NumberInt(23),
+ UUID(),
+ ISODate(),
+ new Date('December 17, 1995 03:24:00'),
+ BinData(0, '1234'),
+ BinData(1, '1234'),
+ BinData(3, '1234'),
+ BinData(4, '1234'),
+ BinData(5, '1234'),
+ BinData(6, '1234'),
+ new Timestamp(1, 2),
+ new ObjectId(),
+ new DBPointer("mongo", new ObjectId()),
+ /test/
+];
+
+const failDeterministic = [
+ true,
+ false,
+ 12,
+ NumberDecimal(0.1234),
+ ["this is an array"],
+ {"value": "mongo"},
+ Code("function() { return true; }")
+];
+
+const failTestCases =
+ [null, undefined, MinKey(), MaxKey(), DBRef("test", "test", "test"), BinData(2, '1234')];
+
+const shell = Mongo(conn.host, clientSideFLEOptions);
+const keyVault = shell.getKeyVault();
+
+// Testing for every combination of (kmsType, algorithm, javascriptVariable)
+for (const kmsType of kmsTypes) {
+ for (const encryptionAlgorithm of encryptionAlgorithms) {
+ collection.drop();
+
+ assert.writeOK(
+ keyVault.createKey(kmsType, "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']));
+ const keyId = keyVault.getKeyByAltName("mongoKey").toArray()[0]._id;
+
+ let pass;
+ let fail;
+ if (encryptionAlgorithm === randomAlgorithm) {
+ pass = [...passTestCases, ...failDeterministic];
+ fail = failTestCases;
+ } else if (encryptionAlgorithm === deterministicAlgorithm) {
+ pass = passTestCases;
+ fail = [...failTestCases, ...failDeterministic];
+ }
- const clientEncrypt = shell.getClientEncryption();
- for (const passTestCase of pass) {
- const encPassTestCase =
- clientEncrypt.encrypt(keyId, passTestCase, encryptionAlgorithm);
- assert.eq(passTestCase, clientEncrypt.decrypt(encPassTestCase));
+ const clientEncrypt = shell.getClientEncryption();
+ for (const passTestCase of pass) {
+ const encPassTestCase = clientEncrypt.encrypt(keyId, passTestCase, encryptionAlgorithm);
+ assert.eq(passTestCase, clientEncrypt.decrypt(encPassTestCase));
- if (encryptionAlgorithm === deterministicAlgorithm) {
- assert.eq(encPassTestCase,
- clientEncrypt.encrypt(keyId, passTestCase, encryptionAlgorithm));
- }
+ if (encryptionAlgorithm === deterministicAlgorithm) {
+ assert.eq(encPassTestCase,
+ clientEncrypt.encrypt(keyId, passTestCase, encryptionAlgorithm));
}
+ }
- for (const failTestCase of fail) {
- assert.throws(() =>
- clientEncrypt.encrypt(keyId, failTestCase, encryptionAlgorithm));
- }
+ for (const failTestCase of fail) {
+ assert.throws(() => clientEncrypt.encrypt(keyId, failTestCase, encryptionAlgorithm));
}
}
+}
- MongoRunner.stopMongod(conn);
- mock_kms.stop();
+MongoRunner.stopMongod(conn);
+mock_kms.stop();
}()); \ No newline at end of file
diff --git a/jstests/client_encrypt/fle_key_faults.js b/jstests/client_encrypt/fle_key_faults.js
index b81c1bcfa81..41b2505c8ba 100644
--- a/jstests/client_encrypt/fle_key_faults.js
+++ b/jstests/client_encrypt/fle_key_faults.js
@@ -6,89 +6,93 @@ load("jstests/client_encrypt/lib/mock_kms.js");
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- "use strict";
-
- const mock_kms = new MockKMSServer();
- mock_kms.start();
-
- const x509_options = {sslMode: "requireSSL", sslPEMKeyFile: SERVER_CERT, sslCAFile: CA_CERT};
-
- const conn = MongoRunner.runMongod(x509_options);
- const test = conn.getDB("test");
- const collection = test.coll;
-
- const awsKMS = {
- accessKeyId: "access",
- secretAccessKey: "secret",
- url: mock_kms.getURL(),
- };
-
- var localKMS = {
- key: BinData(
- 0,
- "tu9jUCBqZdwCelwE/EAm/4WqdxrSMi04B8e9uAV+m30rI1J2nhKZZtQjdvsSCwuI4erR6IEcEK+5eGUAODv43NDNIR9QheT2edWFewUfHKsl9cnzTc86meIzOmYl6drp"),
- };
-
- const clientSideFLEOptions = {
- kmsProviders: {
- aws: awsKMS,
- local: localKMS,
- },
- keyVaultNamespace: "test.coll",
- schemaMap: {}
- };
-
- function testFault(kmsType, func) {
- collection.drop();
-
- const shell = Mongo(conn.host, clientSideFLEOptions);
- const keyVault = shell.getKeyVault();
-
- assert.writeOK(
- keyVault.createKey(kmsType, "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']));
- const keyId = keyVault.getKeyByAltName("mongoKey").toArray()[0]._id;
-
- func(keyId, shell);
- }
-
- function testFaults(func) {
- const kmsTypes = ["aws", "local"];
-
- for (const kmsType of kmsTypes) {
- testFault(kmsType, func);
- }
+"use strict";
+
+const mock_kms = new MockKMSServer();
+mock_kms.start();
+
+const x509_options = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT
+};
+
+const conn = MongoRunner.runMongod(x509_options);
+const test = conn.getDB("test");
+const collection = test.coll;
+
+const awsKMS = {
+ accessKeyId: "access",
+ secretAccessKey: "secret",
+ url: mock_kms.getURL(),
+};
+
+var localKMS = {
+ key: BinData(
+ 0,
+ "tu9jUCBqZdwCelwE/EAm/4WqdxrSMi04B8e9uAV+m30rI1J2nhKZZtQjdvsSCwuI4erR6IEcEK+5eGUAODv43NDNIR9QheT2edWFewUfHKsl9cnzTc86meIzOmYl6drp"),
+};
+
+const clientSideFLEOptions = {
+ kmsProviders: {
+ aws: awsKMS,
+ local: localKMS,
+ },
+ keyVaultNamespace: "test.coll",
+ schemaMap: {}
+};
+
+function testFault(kmsType, func) {
+ collection.drop();
+
+ const shell = Mongo(conn.host, clientSideFLEOptions);
+ const keyVault = shell.getKeyVault();
+
+ assert.writeOK(
+ keyVault.createKey(kmsType, "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']));
+ const keyId = keyVault.getKeyByAltName("mongoKey").toArray()[0]._id;
+
+ func(keyId, shell);
+}
+
+function testFaults(func) {
+ const kmsTypes = ["aws", "local"];
+
+ for (const kmsType of kmsTypes) {
+ testFault(kmsType, func);
}
+}
- // Negative - drop the key vault collection
- testFaults((keyId, shell) => {
- collection.drop();
+// Negative - drop the key vault collection
+testFaults((keyId, shell) => {
+ collection.drop();
- const str = "mongo";
- assert.throws(() => {
- const encStr = shell.getClientEncryption().encrypt(keyId, str);
- });
+ const str = "mongo";
+ assert.throws(() => {
+ const encStr = shell.getClientEncryption().encrypt(keyId, str);
});
+});
- // Negative - delete the keys
- testFaults((keyId, shell) => {
- collection.deleteMany({});
+// Negative - delete the keys
+testFaults((keyId, shell) => {
+ collection.deleteMany({});
- const str = "mongo";
- assert.throws(() => {
- const encStr = shell.getClientEncryption().encrypt(keyId, str);
- });
+ const str = "mongo";
+ assert.throws(() => {
+ const encStr = shell.getClientEncryption().encrypt(keyId, str);
});
+});
- // Negative - corrupt the master key with an unkown provider
- testFaults((keyId, shell) => {
- collection.updateMany({}, {$set: {"masterKey.provider": "fake"}});
+// Negative - corrupt the master key with an unkown provider
+testFaults((keyId, shell) => {
+ collection.updateMany({}, {$set: {"masterKey.provider": "fake"}});
- const str = "mongo";
- assert.throws(() => {
- const encStr = shell.getClientEncryption().encrypt(keyId, str);
- });
+ const str = "mongo";
+ assert.throws(() => {
+ const encStr = shell.getClientEncryption().encrypt(keyId, str);
});
+});
- MongoRunner.stopMongod(conn);
- mock_kms.stop();
+MongoRunner.stopMongod(conn);
+mock_kms.stop();
}()); \ No newline at end of file
diff --git a/jstests/client_encrypt/fle_keys.js b/jstests/client_encrypt/fle_keys.js
index 875615ac9a8..646b95141ad 100644
--- a/jstests/client_encrypt/fle_keys.js
+++ b/jstests/client_encrypt/fle_keys.js
@@ -6,70 +6,74 @@ load("jstests/client_encrypt/lib/mock_kms.js");
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- "use strict";
+"use strict";
- const mock_kms = new MockKMSServer();
- mock_kms.start();
+const mock_kms = new MockKMSServer();
+mock_kms.start();
- const x509_options = {sslMode: "requireSSL", sslPEMKeyFile: SERVER_CERT, sslCAFile: CA_CERT};
+const x509_options = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT
+};
- const conn = MongoRunner.runMongod(x509_options);
- const test = conn.getDB("test");
- const collection = test.coll;
+const conn = MongoRunner.runMongod(x509_options);
+const test = conn.getDB("test");
+const collection = test.coll;
- const awsKMS = {
- accessKeyId: "access",
- secretAccessKey: "secret",
- url: mock_kms.getURL(),
- };
+const awsKMS = {
+ accessKeyId: "access",
+ secretAccessKey: "secret",
+ url: mock_kms.getURL(),
+};
- const clientSideFLEOptions = {
- kmsProviders: {
- aws: awsKMS,
- },
- keyVaultNamespace: "test.coll",
- schemaMap: {}
- };
+const clientSideFLEOptions = {
+ kmsProviders: {
+ aws: awsKMS,
+ },
+ keyVaultNamespace: "test.coll",
+ schemaMap: {}
+};
- const conn_str = "mongodb://" + conn.host + "/?ssl=true";
- const shell = Mongo(conn_str, clientSideFLEOptions);
- const keyVault = shell.getKeyVault();
+const conn_str = "mongodb://" + conn.host + "/?ssl=true";
+const shell = Mongo(conn_str, clientSideFLEOptions);
+const keyVault = shell.getKeyVault();
- var key = keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']);
- assert.eq(1, keyVault.getKeys().itcount());
+var key = keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']);
+assert.eq(1, keyVault.getKeys().itcount());
- var result = keyVault.createKey("aws", "arn:aws:kms:us-east-4:fake:fake:fake", {});
- assert.eq("TypeError: key alternate names must be of Array type.", result);
+var result = keyVault.createKey("aws", "arn:aws:kms:us-east-4:fake:fake:fake", {});
+assert.eq("TypeError: key alternate names must be of Array type.", result);
- result = keyVault.createKey("aws", "arn:aws:kms:us-east-5:fake:fake:fake", [1]);
- assert.eq("TypeError: items in key alternate names must be of String type.", result);
+result = keyVault.createKey("aws", "arn:aws:kms:us-east-5:fake:fake:fake", [1]);
+assert.eq("TypeError: items in key alternate names must be of String type.", result);
- assert.eq(1, keyVault.getKeyByAltName("mongoKey").itcount());
+assert.eq(1, keyVault.getKeyByAltName("mongoKey").itcount());
- var keyId = keyVault.getKeyByAltName("mongoKey").toArray()[0]._id;
+var keyId = keyVault.getKeyByAltName("mongoKey").toArray()[0]._id;
- keyVault.addKeyAlternateName(keyId, "mongoKey2");
+keyVault.addKeyAlternateName(keyId, "mongoKey2");
- assert.eq(1, keyVault.getKeyByAltName("mongoKey2").itcount());
- assert.eq(2, keyVault.getKey(keyId).toArray()[0].keyAltNames.length);
- assert.eq(1, keyVault.getKeys().itcount());
+assert.eq(1, keyVault.getKeyByAltName("mongoKey2").itcount());
+assert.eq(2, keyVault.getKey(keyId).toArray()[0].keyAltNames.length);
+assert.eq(1, keyVault.getKeys().itcount());
- result = keyVault.addKeyAlternateName(keyId, [2]);
- assert.eq("TypeError: key alternate name cannot be object or array type.", result);
+result = keyVault.addKeyAlternateName(keyId, [2]);
+assert.eq("TypeError: key alternate name cannot be object or array type.", result);
- keyVault.removeKeyAlternateName(keyId, "mongoKey2");
- assert.eq(1, keyVault.getKey(keyId).toArray()[0].keyAltNames.length);
+keyVault.removeKeyAlternateName(keyId, "mongoKey2");
+assert.eq(1, keyVault.getKey(keyId).toArray()[0].keyAltNames.length);
- result = keyVault.deleteKey(keyId);
- assert.eq(0, keyVault.getKey(keyId).itcount());
- assert.eq(0, keyVault.getKeys().itcount());
+result = keyVault.deleteKey(keyId);
+assert.eq(0, keyVault.getKey(keyId).itcount());
+assert.eq(0, keyVault.getKeys().itcount());
- assert.writeOK(keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake1"));
- assert.writeOK(keyVault.createKey("aws", "arn:aws:kms:us-east-2:fake:fake:fake2"));
- assert.writeOK(keyVault.createKey("aws", "arn:aws:kms:us-east-3:fake:fake:fake3"));
+assert.writeOK(keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake1"));
+assert.writeOK(keyVault.createKey("aws", "arn:aws:kms:us-east-2:fake:fake:fake2"));
+assert.writeOK(keyVault.createKey("aws", "arn:aws:kms:us-east-3:fake:fake:fake3"));
- assert.eq(3, keyVault.getKeys().itcount());
+assert.eq(3, keyVault.getKeys().itcount());
- MongoRunner.stopMongod(conn);
- mock_kms.stop();
+MongoRunner.stopMongod(conn);
+mock_kms.stop();
}()); \ No newline at end of file
diff --git a/jstests/client_encrypt/fle_valid_fle_options.js b/jstests/client_encrypt/fle_valid_fle_options.js
index 9bdabaf4875..08a44f18725 100644
--- a/jstests/client_encrypt/fle_valid_fle_options.js
+++ b/jstests/client_encrypt/fle_valid_fle_options.js
@@ -3,60 +3,64 @@ load("jstests/client_encrypt/lib/mock_kms.js");
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- "use strict";
+"use strict";
- const mock_kms = new MockKMSServer();
- mock_kms.start();
+const mock_kms = new MockKMSServer();
+mock_kms.start();
- const randomAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Random";
- const deterministicAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic";
+const randomAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Random";
+const deterministicAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic";
- const x509_options =
- {sslMode: "requireSSL", sslPEMKeyFile: SERVER_CERT, sslCAFile: CA_CERT, vvvvv: ""};
+const x509_options = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT,
+ vvvvv: ""
+};
- const conn = MongoRunner.runMongod(x509_options);
- const unencryptedDatabase = conn.getDB("test");
- const collection = unencryptedDatabase.keystore;
+const conn = MongoRunner.runMongod(x509_options);
+const unencryptedDatabase = conn.getDB("test");
+const collection = unencryptedDatabase.keystore;
- const awsKMS = {
- accessKeyId: "access",
- secretAccessKey: "secret",
- url: mock_kms.getURL(),
- };
+const awsKMS = {
+ accessKeyId: "access",
+ secretAccessKey: "secret",
+ url: mock_kms.getURL(),
+};
- const clientSideFLEOptionsFail = [
- {
- kmsProviders: {
- aws: awsKMS,
- },
- schemaMap: {},
+const clientSideFLEOptionsFail = [
+ {
+ kmsProviders: {
+ aws: awsKMS,
},
- {
- keyVaultNamespace: "test.keystore",
- schemaMap: {},
- },
- ];
+ schemaMap: {},
+ },
+ {
+ keyVaultNamespace: "test.keystore",
+ schemaMap: {},
+ },
+];
- clientSideFLEOptionsFail.forEach(element => {
- assert.throws(Mongo, [conn.host, element]);
- });
+clientSideFLEOptionsFail.forEach(element => {
+ assert.throws(Mongo, [conn.host, element]);
+});
- const clientSideFLEOptionsPass = [
- {
- kmsProviders: {
- aws: awsKMS,
- },
- keyVaultNamespace: "test.keystore",
- schemaMap: {},
+const clientSideFLEOptionsPass = [
+ {
+ kmsProviders: {
+ aws: awsKMS,
},
- ];
+ keyVaultNamespace: "test.keystore",
+ schemaMap: {},
+ },
+];
- clientSideFLEOptionsPass.forEach(element => {
- assert.doesNotThrow(() => {
- Mongo(conn.host, element);
- });
+clientSideFLEOptionsPass.forEach(element => {
+ assert.doesNotThrow(() => {
+ Mongo(conn.host, element);
});
+});
- MongoRunner.stopMongod(conn);
- mock_kms.stop();
+MongoRunner.stopMongod(conn);
+mock_kms.stop();
}());
diff --git a/jstests/client_encrypt/lib/fle_command_line_explicit_encryption.js b/jstests/client_encrypt/lib/fle_command_line_explicit_encryption.js
index 0ca10b2057c..da83d69c87b 100644
--- a/jstests/client_encrypt/lib/fle_command_line_explicit_encryption.js
+++ b/jstests/client_encrypt/lib/fle_command_line_explicit_encryption.js
@@ -1,84 +1,83 @@
/**
-* Check the functionality of encrypt and decrypt functions in KeyVault.js. This test is run by
-* jstests/fle/fle_command_line_encryption.js.
-*/
+ * Check the functionality of encrypt and decrypt functions in KeyVault.js. This test is run by
+ * jstests/fle/fle_command_line_encryption.js.
+ */
load("jstests/client_encrypt/lib/mock_kms.js");
(function() {
- "use strict";
-
- const mock_kms = new MockKMSServer();
- mock_kms.start();
-
- const shell = Mongo();
- const keyVault = shell.getKeyVault();
-
- const test = shell.getDB("test");
- const collection = test.coll;
-
- const randomAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Random";
- const deterministicAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic";
- const encryptionAlgorithms = [randomAlgorithm, deterministicAlgorithm];
-
- const passTestCases = [
- "mongo",
- NumberLong(13),
- NumberInt(23),
- UUID(),
- ISODate(),
- new Date('December 17, 1995 03:24:00'),
- BinData(2, '1234'),
- new Timestamp(1, 2),
- new ObjectId(),
- new DBPointer("mongo", new ObjectId()),
- /test/
- ];
-
- const failDeterministic = [
- true,
- false,
- 12,
- NumberDecimal(0.1234),
- ["this is an array"],
- {"value": "mongo"},
- Code("function() { return true; }")
- ];
-
- const failTestCases = [null, undefined, MinKey(), MaxKey(), DBRef("test", "test", "test")];
-
- // Testing for every combination of (algorithm, javascriptVariable)
- for (const encryptionAlgorithm of encryptionAlgorithms) {
- collection.drop();
-
- assert.writeOK(
- keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']));
- const keyId = keyVault.getKeyByAltName("mongoKey").toArray()[0]._id;
-
- let pass;
- let fail;
- if (encryptionAlgorithm === randomAlgorithm) {
- pass = [...passTestCases, ...failDeterministic];
- fail = failTestCases;
- } else if (encryptionAlgorithm === deterministicAlgorithm) {
- pass = passTestCases;
- fail = [...failTestCases, ...failDeterministic];
- }
+"use strict";
+
+const mock_kms = new MockKMSServer();
+mock_kms.start();
+
+const shell = Mongo();
+const keyVault = shell.getKeyVault();
+
+const test = shell.getDB("test");
+const collection = test.coll;
+
+const randomAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Random";
+const deterministicAlgorithm = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic";
+const encryptionAlgorithms = [randomAlgorithm, deterministicAlgorithm];
+
+const passTestCases = [
+ "mongo",
+ NumberLong(13),
+ NumberInt(23),
+ UUID(),
+ ISODate(),
+ new Date('December 17, 1995 03:24:00'),
+ BinData(2, '1234'),
+ new Timestamp(1, 2),
+ new ObjectId(),
+ new DBPointer("mongo", new ObjectId()),
+ /test/
+];
+
+const failDeterministic = [
+ true,
+ false,
+ 12,
+ NumberDecimal(0.1234),
+ ["this is an array"],
+ {"value": "mongo"},
+ Code("function() { return true; }")
+];
+
+const failTestCases = [null, undefined, MinKey(), MaxKey(), DBRef("test", "test", "test")];
+
+// Testing for every combination of (algorithm, javascriptVariable)
+for (const encryptionAlgorithm of encryptionAlgorithms) {
+ collection.drop();
+
+ assert.writeOK(keyVault.createKey("aws", "arn:aws:kms:us-east-1:fake:fake:fake", ['mongoKey']));
+ const keyId = keyVault.getKeyByAltName("mongoKey").toArray()[0]._id;
+
+ let pass;
+ let fail;
+ if (encryptionAlgorithm === randomAlgorithm) {
+ pass = [...passTestCases, ...failDeterministic];
+ fail = failTestCases;
+ } else if (encryptionAlgorithm === deterministicAlgorithm) {
+ pass = passTestCases;
+ fail = [...failTestCases, ...failDeterministic];
+ }
- for (const passTestCase of pass) {
- const encPassTestCase = shell.encrypt(keyId, passTestCase, encryptionAlgorithm);
- assert.eq(passTestCase, shell.decrypt(encPassTestCase));
+ for (const passTestCase of pass) {
+ const encPassTestCase = shell.encrypt(keyId, passTestCase, encryptionAlgorithm);
+ assert.eq(passTestCase, shell.decrypt(encPassTestCase));
- if (encryptionAlgorithm == deterministicAlgorithm) {
- assert.eq(encPassTestCase, shell.encrypt(keyId, passTestCase, encryptionAlgorithm));
- }
+ if (encryptionAlgorithm == deterministicAlgorithm) {
+ assert.eq(encPassTestCase, shell.encrypt(keyId, passTestCase, encryptionAlgorithm));
}
+ }
- for (const failTestCase of fail) {
- assert.throws(shell.encrypt, [keyId, failTestCase, encryptionAlgorithm]);
- }
+ for (const failTestCase of fail) {
+ assert.throws(shell.encrypt, [keyId, failTestCase, encryptionAlgorithm]);
}
+}
- mock_kms.stop();
- print("Test completed with no errors.");
+mock_kms.stop();
+print("Test completed with no errors.");
}()); \ No newline at end of file
diff --git a/jstests/client_encrypt/lib/mock_kms.js b/jstests/client_encrypt/lib/mock_kms.js
index a7f34c37312..25556379c4a 100644
--- a/jstests/client_encrypt/lib/mock_kms.js
+++ b/jstests/client_encrypt/lib/mock_kms.js
@@ -18,11 +18,11 @@ const ENABLE_FAULTS = "enable_faults";
class MockKMSServer {
/**
- * Create a new webserver.
- *
- * @param {string} fault_type
- * @param {bool} disableFaultsOnStartup optionally disable fault on startup
- */
+ * Create a new webserver.
+ *
+ * @param {string} fault_type
+ * @param {bool} disableFaultsOnStartup optionally disable fault on startup
+ */
constructor(fault_type, disableFaultsOnStartup) {
this.python = "python3";
this.disableFaultsOnStartup = disableFaultsOnStartup || false;
diff --git a/jstests/concurrency/fsm_example.js b/jstests/concurrency/fsm_example.js
index 45b8ac5b47a..fb012462a15 100644
--- a/jstests/concurrency/fsm_example.js
+++ b/jstests/concurrency/fsm_example.js
@@ -7,7 +7,6 @@
* Serves as a template for new workloads.
*/
var $config = (function() {
-
// 'data' is passed (copied) to each of the worker threads.
var data = {};
@@ -81,5 +80,4 @@ var $config = (function() {
teardown: teardown, // optional, default empty function
data: data // optional, default empty object
};
-
})();
diff --git a/jstests/concurrency/fsm_libs/assert.js b/jstests/concurrency/fsm_libs/assert.js
index 437742ac396..f4b47acc0fe 100644
--- a/jstests/concurrency/fsm_libs/assert.js
+++ b/jstests/concurrency/fsm_libs/assert.js
@@ -12,7 +12,6 @@
*/
var AssertLevel = (function() {
-
function AssertLevel(level) {
this.level = level;
@@ -34,7 +33,6 @@ var AssertLevel = (function() {
OWN_DB: new AssertLevel(2),
isAssertLevel: isAssertLevel
};
-
})();
if (typeof globalAssertLevel === 'undefined') {
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js
index b848fa355ed..1b24c9bbdbb 100644
--- a/jstests/concurrency/fsm_libs/cluster.js
+++ b/jstests/concurrency/fsm_libs/cluster.js
@@ -53,8 +53,8 @@ var Cluster = function(options) {
getObjectKeys(options).forEach(function(option) {
assert.contains(option,
allowedKeys,
- 'invalid option: ' + tojson(option) + '; valid options are: ' +
- tojson(allowedKeys));
+ 'invalid option: ' + tojson(option) +
+ '; valid options are: ' + tojson(allowedKeys));
});
options.replication = options.replication || {};
@@ -271,7 +271,7 @@ var Cluster = function(options) {
this.executeOnMongodNodes = function executeOnMongodNodes(fn) {
assert(initialized, 'cluster must be initialized first');
- if (!fn || typeof(fn) !== 'function' || fn.length !== 1) {
+ if (!fn || typeof (fn) !== 'function' || fn.length !== 1) {
throw new Error('mongod function must be a function that takes a db as an argument');
}
_conns.mongod.forEach(function(mongodConn) {
@@ -282,7 +282,7 @@ var Cluster = function(options) {
this.executeOnMongosNodes = function executeOnMongosNodes(fn) {
assert(initialized, 'cluster must be initialized first');
- if (!fn || typeof(fn) !== 'function' || fn.length !== 1) {
+ if (!fn || typeof (fn) !== 'function' || fn.length !== 1) {
throw new Error('mongos function must be a function that takes a db as an argument');
}
_conns.mongos.forEach(function(mongosConn) {
@@ -293,7 +293,7 @@ var Cluster = function(options) {
this.executeOnConfigNodes = function executeOnConfigNodes(fn) {
assert(initialized, 'cluster must be initialized first');
- if (!fn || typeof(fn) !== 'function' || fn.length !== 1) {
+ if (!fn || typeof (fn) !== 'function' || fn.length !== 1) {
throw new Error('config function must be a function that takes a db as an argument');
}
st._configServers.forEach(function(conn) {
@@ -553,8 +553,8 @@ var Cluster = function(options) {
var primaryInfo = replSetStatus.members.find(memberInfo => memberInfo.self);
assert(primaryInfo !== undefined,
- phase + ', failed to find self in replication status: ' +
- tojson(replSetStatus));
+ phase +
+ ', failed to find self in replication status: ' + tojson(replSetStatus));
// Wait for all previous workload operations to complete, with "getLastError".
res = primary.getDB('test').runCommand({
@@ -565,7 +565,6 @@ var Cluster = function(options) {
});
assert.commandWorked(res, phase + ', error awaiting replication');
}
-
});
};
diff --git a/jstests/concurrency/fsm_libs/composer.js b/jstests/concurrency/fsm_libs/composer.js
index 99cfb64f34d..22d78e77871 100644
--- a/jstests/concurrency/fsm_libs/composer.js
+++ b/jstests/concurrency/fsm_libs/composer.js
@@ -1,7 +1,6 @@
load('jstests/concurrency/fsm_libs/fsm.js');
var composer = (function() {
-
function runCombinedFSM(workloads, configs, mixProb) {
// TODO: what if a workload depends on iterations?
var iterations = 100;
@@ -70,5 +69,4 @@ var composer = (function() {
}
return {run: runCombinedFSM};
-
})();
diff --git a/jstests/concurrency/fsm_libs/extend_workload.js b/jstests/concurrency/fsm_libs/extend_workload.js
index c00f8c80e99..eb9e681eb4e 100644
--- a/jstests/concurrency/fsm_libs/extend_workload.js
+++ b/jstests/concurrency/fsm_libs/extend_workload.js
@@ -2,7 +2,8 @@
load('jstests/concurrency/fsm_libs/parse_config.js'); // for parseConfig
-/** extendWorkload usage:
+/**
+ * extendWorkload usage:
*
* $config = extendWorkload($config, function($config, $super) {
* // ... modify $config ...
@@ -25,7 +26,8 @@ function extendWorkload($config, callback) {
return callback(childConfig, parsedSuperConfig);
}
-/** assignEqualProbsToTransitions example usage:
+/**
+ * assignEqualProbsToTransitions example usage:
* $config.transitions = Object.extend({<state>:
* assignEqualProbsToTransitions(Object.keys($config.states))}, $super.transitions);
*/
diff --git a/jstests/concurrency/fsm_libs/parse_config.js b/jstests/concurrency/fsm_libs/parse_config.js
index 3c365dc5f4c..3d673e6c062 100644
--- a/jstests/concurrency/fsm_libs/parse_config.js
+++ b/jstests/concurrency/fsm_libs/parse_config.js
@@ -61,8 +61,8 @@ function parseConfig(config) {
assert.gt(Object.keys(config.transitions[fromState]).length, 0);
Object.keys(config.transitions[fromState]).forEach(function(toState) {
assert(config.states.hasOwnProperty(toState),
- 'config.transitions.' + fromState + ' contains a state not in config.states: ' +
- toState);
+ 'config.transitions.' + fromState +
+ ' contains a state not in config.states: ' + toState);
assert.eq('number',
typeof config.transitions[fromState][toState],
'transitions.' + fromState + '.' + toState + ' should be a number');
diff --git a/jstests/concurrency/fsm_libs/resmoke_runner.js b/jstests/concurrency/fsm_libs/resmoke_runner.js
index 908eb126cb0..75569dde140 100644
--- a/jstests/concurrency/fsm_libs/resmoke_runner.js
+++ b/jstests/concurrency/fsm_libs/resmoke_runner.js
@@ -1,285 +1,284 @@
(function() {
- 'use strict';
+'use strict';
+
+load('jstests/concurrency/fsm_libs/runner.js'); // for runner.internals
+load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
+
+const validateExecutionOptions = runner.internals.validateExecutionOptions;
+const prepareCollections = runner.internals.prepareCollections;
+const WorkloadFailure = runner.internals.WorkloadFailure;
+const throwError = runner.internals.throwError;
+const setupWorkload = runner.internals.setupWorkload;
+const teardownWorkload = runner.internals.teardownWorkload;
+const setIterations = runner.internals.setIterations;
+const setThreadCount = runner.internals.setThreadCount;
+const loadWorkloadContext = runner.internals.loadWorkloadContext;
+
+// Returns true if the workload's teardown succeeds and false if the workload's teardown fails.
+function cleanupWorkload(workload, context, cluster, errors, header) {
+ const phase = 'before workload ' + workload + ' teardown';
+
+ try {
+ teardownWorkload(workload, context, cluster);
+ } catch (e) {
+ errors.push(new WorkloadFailure(e.toString(), e.stack, 'main', header + ' Teardown'));
+ return false;
+ }
- load('jstests/concurrency/fsm_libs/runner.js'); // for runner.internals
- load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
+ return true;
+}
- const validateExecutionOptions = runner.internals.validateExecutionOptions;
- const prepareCollections = runner.internals.prepareCollections;
- const WorkloadFailure = runner.internals.WorkloadFailure;
- const throwError = runner.internals.throwError;
- const setupWorkload = runner.internals.setupWorkload;
- const teardownWorkload = runner.internals.teardownWorkload;
- const setIterations = runner.internals.setIterations;
- const setThreadCount = runner.internals.setThreadCount;
- const loadWorkloadContext = runner.internals.loadWorkloadContext;
+function runWorkloads(workloads,
+ {cluster: clusterOptions = {}, execution: executionOptions = {}} = {}) {
+ assert.gt(workloads.length, 0, 'need at least one workload to run');
- // Returns true if the workload's teardown succeeds and false if the workload's teardown fails.
- function cleanupWorkload(workload, context, cluster, errors, header) {
- const phase = 'before workload ' + workload + ' teardown';
+ const executionMode = {serial: true};
+ validateExecutionOptions(executionMode, executionOptions);
+ Object.freeze(executionOptions); // immutable after validation (and normalization)
- try {
- teardownWorkload(workload, context, cluster);
- } catch (e) {
- errors.push(new WorkloadFailure(e.toString(), e.stack, 'main', header + ' Teardown'));
- return false;
- }
-
- return true;
+ // Determine how strong to make assertions while simultaneously executing different
+ // workloads.
+ let assertLevel = AssertLevel.OWN_DB;
+ if (clusterOptions.sameDB) {
+ // The database is shared by multiple workloads, so only make the asserts that apply
+ // when the collection is owned by an individual workload.
+ assertLevel = AssertLevel.OWN_COLL;
}
+ if (clusterOptions.sameCollection) {
+ // The collection is shared by multiple workloads, so only make the asserts that always
+ // apply.
+ assertLevel = AssertLevel.ALWAYS;
+ }
+ globalAssertLevel = assertLevel;
- function runWorkloads(workloads,
- {cluster: clusterOptions = {}, execution: executionOptions = {}} = {}) {
- assert.gt(workloads.length, 0, 'need at least one workload to run');
-
- const executionMode = {serial: true};
- validateExecutionOptions(executionMode, executionOptions);
- Object.freeze(executionOptions); // immutable after validation (and normalization)
-
- // Determine how strong to make assertions while simultaneously executing different
- // workloads.
- let assertLevel = AssertLevel.OWN_DB;
- if (clusterOptions.sameDB) {
- // The database is shared by multiple workloads, so only make the asserts that apply
- // when the collection is owned by an individual workload.
- assertLevel = AssertLevel.OWN_COLL;
- }
- if (clusterOptions.sameCollection) {
- // The collection is shared by multiple workloads, so only make the asserts that always
- // apply.
- assertLevel = AssertLevel.ALWAYS;
- }
- globalAssertLevel = assertLevel;
-
- const context = {};
- const applyMultipliers = true;
- loadWorkloadContext(workloads, context, executionOptions, applyMultipliers);
+ const context = {};
+ const applyMultipliers = true;
+ loadWorkloadContext(workloads, context, executionOptions, applyMultipliers);
- // Constructing a Cluster instance calls its internal validateClusterOptions() function,
- // which fills in any properties that aren't explicitly present in 'clusterOptions'. We do
- // this before constructing a ThreadManager instance to make its dependency on the
- // 'clusterOptions' being filled in explicit.
- const cluster = new Cluster(clusterOptions);
- const threadMgr = new ThreadManager(clusterOptions, executionMode);
+ // Constructing a Cluster instance calls its internal validateClusterOptions() function,
+ // which fills in any properties that aren't explicitly present in 'clusterOptions'. We do
+ // this before constructing a ThreadManager instance to make its dependency on the
+ // 'clusterOptions' being filled in explicit.
+ const cluster = new Cluster(clusterOptions);
+ const threadMgr = new ThreadManager(clusterOptions, executionMode);
- Random.setRandomSeed(clusterOptions.seed);
+ Random.setRandomSeed(clusterOptions.seed);
- const errors = [];
- const cleanup = [];
- let teardownFailed = false;
- let startTime = Date.now(); // Initialize in case setupWorkload fails below.
- let totalTime;
+ const errors = [];
+ const cleanup = [];
+ let teardownFailed = false;
+ let startTime = Date.now(); // Initialize in case setupWorkload fails below.
+ let totalTime;
- cluster.setup();
+ cluster.setup();
- jsTest.log('Workload(s) started: ' + workloads.join(' '));
+ jsTest.log('Workload(s) started: ' + workloads.join(' '));
- prepareCollections(workloads, context, cluster, clusterOptions, executionOptions);
+ prepareCollections(workloads, context, cluster, clusterOptions, executionOptions);
- try {
- // Set up the thread manager for this set of workloads.
- startTime = Date.now();
+ try {
+ // Set up the thread manager for this set of workloads.
+ startTime = Date.now();
- {
- const maxAllowedThreads = 100 * executionOptions.threadMultiplier;
- threadMgr.init(workloads, context, maxAllowedThreads);
- }
+ {
+ const maxAllowedThreads = 100 * executionOptions.threadMultiplier;
+ threadMgr.init(workloads, context, maxAllowedThreads);
+ }
- // Call each workload's setup function.
- workloads.forEach(function(workload) {
- // Define "iterations" and "threadCount" properties on the workload's $config.data
- // object so that they can be used within its setup(), teardown(), and state
- // functions. This must happen after calling threadMgr.init() in case the thread
- // counts needed to be scaled down.
- setIterations(context[workload].config);
- setThreadCount(context[workload].config);
-
- setupWorkload(workload, context, cluster);
- cleanup.push(workload);
- });
-
- // Await replication after running the $config.setup() function when stepdowns are
- // permitted to ensure its effects aren't rolled back.
- if (cluster.isReplication() && executionOptions.stepdownFiles !== undefined) {
- cluster.awaitReplication();
- }
+ // Call each workload's setup function.
+ workloads.forEach(function(workload) {
+ // Define "iterations" and "threadCount" properties on the workload's $config.data
+ // object so that they can be used within its setup(), teardown(), and state
+ // functions. This must happen after calling threadMgr.init() in case the thread
+ // counts needed to be scaled down.
+ setIterations(context[workload].config);
+ setThreadCount(context[workload].config);
+
+ setupWorkload(workload, context, cluster);
+ cleanup.push(workload);
+ });
+
+ // Await replication after running the $config.setup() function when stepdowns are
+ // permitted to ensure its effects aren't rolled back.
+ if (cluster.isReplication() && executionOptions.stepdownFiles !== undefined) {
+ cluster.awaitReplication();
+ }
- // Transactions run at snapshot read concern unless defaultTransactionReadConcernLevel
- // is set to another level.
- const transactionsWouldUseSnapshotReadConcern =
- !TestData.hasOwnProperty("defaultTransactionReadConcernLevel") ||
- TestData.defaultTransactionReadConcernLevel === "snapshot";
-
- // Synchronize the cluster times across all routers if the tests will be overriden to
- // use transactions, so the earliest global snapshots chosen by each router will include
- // the effects of each setup function. This is only required for snapshot read concern.
- if (cluster.isSharded() && TestData.runInsideTransaction &&
- transactionsWouldUseSnapshotReadConcern) {
- cluster.synchronizeMongosClusterTimes();
- }
+ // Transactions run at snapshot read concern unless defaultTransactionReadConcernLevel
+ // is set to another level.
+ const transactionsWouldUseSnapshotReadConcern =
+ !TestData.hasOwnProperty("defaultTransactionReadConcernLevel") ||
+ TestData.defaultTransactionReadConcernLevel === "snapshot";
+
+ // Synchronize the cluster times across all routers if the tests will be overriden to
+ // use transactions, so the earliest global snapshots chosen by each router will include
+ // the effects of each setup function. This is only required for snapshot read concern.
+ if (cluster.isSharded() && TestData.runInsideTransaction &&
+ transactionsWouldUseSnapshotReadConcern) {
+ cluster.synchronizeMongosClusterTimes();
+ }
- // After the $config.setup() function has been called, it is safe for the stepdown
- // thread to start running. The main thread won't attempt to interact with the cluster
- // until all of the spawned worker threads have finished.
- //
+ // After the $config.setup() function has been called, it is safe for the stepdown
+ // thread to start running. The main thread won't attempt to interact with the cluster
+ // until all of the spawned worker threads have finished.
+ //
- // Indicate that the stepdown thread can run. It is unnecessary for the stepdown thread
- // to indicate that it is going to start running because it will eventually after the
- // worker threads have started.
- if (executionOptions.stepdownFiles !== undefined) {
- writeFile(executionOptions.stepdownFiles.permitted, '');
- }
+ // Indicate that the stepdown thread can run. It is unnecessary for the stepdown thread
+ // to indicate that it is going to start running because it will eventually after the
+ // worker threads have started.
+ if (executionOptions.stepdownFiles !== undefined) {
+ writeFile(executionOptions.stepdownFiles.permitted, '');
+ }
- // Since the worker threads may be running with causal consistency enabled, we set the
- // initial clusterTime and initial operationTime for the sessions they'll create so that
- // they are guaranteed to observe the effects of the workload's $config.setup() function
- // being called.
- if (typeof executionOptions.sessionOptions === 'object' &&
- executionOptions.sessionOptions !== null) {
- // We only start a session for the worker threads and never start one for the main
- // thread. We can therefore get the clusterTime and operationTime tracked by the
- // underlying DummyDriverSession through any DB instance (i.e. the "test" database
- // here was chosen arbitrarily).
- const session = cluster.getDB('test').getSession();
-
- // JavaScript objects backed by C++ objects (e.g. BSON values from a command
- // response) do not serialize correctly when passed through the ScopedThread
- // constructor. To work around this behavior, we instead pass a stringified form of
- // the JavaScript object through the ScopedThread constructor and use eval() to
- // rehydrate it.
- executionOptions.sessionOptions.initialClusterTime =
- tojson(session.getClusterTime());
- executionOptions.sessionOptions.initialOperationTime =
- tojson(session.getOperationTime());
- }
+ // Since the worker threads may be running with causal consistency enabled, we set the
+ // initial clusterTime and initial operationTime for the sessions they'll create so that
+ // they are guaranteed to observe the effects of the workload's $config.setup() function
+ // being called.
+ if (typeof executionOptions.sessionOptions === 'object' &&
+ executionOptions.sessionOptions !== null) {
+ // We only start a session for the worker threads and never start one for the main
+ // thread. We can therefore get the clusterTime and operationTime tracked by the
+ // underlying DummyDriverSession through any DB instance (i.e. the "test" database
+ // here was chosen arbitrarily).
+ const session = cluster.getDB('test').getSession();
+
+ // JavaScript objects backed by C++ objects (e.g. BSON values from a command
+ // response) do not serialize correctly when passed through the ScopedThread
+ // constructor. To work around this behavior, we instead pass a stringified form of
+ // the JavaScript object through the ScopedThread constructor and use eval() to
+ // rehydrate it.
+ executionOptions.sessionOptions.initialClusterTime = tojson(session.getClusterTime());
+ executionOptions.sessionOptions.initialOperationTime =
+ tojson(session.getOperationTime());
+ }
+ try {
try {
- try {
- // Start this set of worker threads.
- threadMgr.spawnAll(cluster, executionOptions);
- // Allow 20% of the threads to fail. This allows the workloads to run on
- // underpowered test hosts.
- threadMgr.checkFailed(0.2);
- } finally {
- // Threads must be joined before destruction, so do this even in the presence of
- // exceptions.
- errors.push(...threadMgr.joinAll().map(
- e => new WorkloadFailure(
- e.err, e.stack, e.tid, 'Foreground ' + e.workloads.join(' '))));
- }
+ // Start this set of worker threads.
+ threadMgr.spawnAll(cluster, executionOptions);
+ // Allow 20% of the threads to fail. This allows the workloads to run on
+ // underpowered test hosts.
+ threadMgr.checkFailed(0.2);
} finally {
- // Until we are guaranteed that the stepdown thread isn't running, it isn't safe for
- // the $config.teardown() function to be called. We should signal to resmoke.py that
- // the stepdown thread should stop running and wait for the stepdown thread to
- // signal that it has stopped.
- //
- // Signal to the stepdown thread to stop stepping down the cluster.
- if (executionOptions.stepdownFiles !== undefined) {
- writeFile(executionOptions.stepdownFiles.idleRequest, '');
-
- // Wait for the acknowledgement file to be created by the stepdown thread.
- assert.soonNoExcept(function() {
- // The cat() function will throw an exception if the file isn't found.
- cat(executionOptions.stepdownFiles.idleAck);
- return true;
- }, "stepdown still in progress");
- }
+ // Threads must be joined before destruction, so do this even in the presence of
+ // exceptions.
+ errors.push(...threadMgr.joinAll().map(
+ e => new WorkloadFailure(
+ e.err, e.stack, e.tid, 'Foreground ' + e.workloads.join(' '))));
}
} finally {
- if (cluster.shouldPerformContinuousStepdowns()) {
- cluster.reestablishConnectionsAfterFailover();
+ // Until we are guaranteed that the stepdown thread isn't running, it isn't safe for
+ // the $config.teardown() function to be called. We should signal to resmoke.py that
+ // the stepdown thread should stop running and wait for the stepdown thread to
+ // signal that it has stopped.
+ //
+ // Signal to the stepdown thread to stop stepping down the cluster.
+ if (executionOptions.stepdownFiles !== undefined) {
+ writeFile(executionOptions.stepdownFiles.idleRequest, '');
+
+ // Wait for the acknowledgement file to be created by the stepdown thread.
+ assert.soonNoExcept(function() {
+ // The cat() function will throw an exception if the file isn't found.
+ cat(executionOptions.stepdownFiles.idleAck);
+ return true;
+ }, "stepdown still in progress");
}
- // Call each workload's teardown function. After all teardowns have completed check if
- // any of them failed.
- const cleanupResults = cleanup.map(
- workload => cleanupWorkload(workload, context, cluster, errors, 'Foreground'));
- teardownFailed = cleanupResults.some(success => (success === false));
-
- totalTime = Date.now() - startTime;
- jsTest.log('Workload(s) completed in ' + totalTime + ' ms: ' + workloads.join(' '));
}
-
- // Throw any existing errors so that resmoke.py can abort its execution of the test suite.
- throwError(errors);
-
- cluster.teardown();
- }
-
- if (typeof db === 'undefined') {
- throw new Error(
- 'resmoke_runner.js must be run with the mongo shell already connected to the database');
- }
-
- const clusterOptions = {
- replication: {enabled: false},
- sharded: {enabled: false},
- };
-
- // The TestData.discoverTopoloy is false when we only care about connecting to either a
- // standalone or primary node in a replica set.
- if (TestData.discoverTopology !== false) {
- const topology = DiscoverTopology.findConnectedNodes(db.getMongo());
-
- if (topology.type === Topology.kReplicaSet) {
- clusterOptions.replication.enabled = true;
- clusterOptions.replication.numNodes = topology.nodes.length;
- } else if (topology.type === Topology.kShardedCluster) {
- clusterOptions.replication.enabled = TestData.usingReplicaSetShards || false;
- clusterOptions.sharded.enabled = true;
- clusterOptions.sharded.enableAutoSplit = TestData.hasOwnProperty('runningWithAutoSplit')
- ? TestData.runningWithAutoSplit
- : true;
- clusterOptions.sharded.enableBalancer = TestData.hasOwnProperty('runningWithBalancer')
- ? TestData.runningWithBalancer
- : true;
- clusterOptions.sharded.numMongos = topology.mongos.nodes.length;
- clusterOptions.sharded.numShards = Object.keys(topology.shards).length;
- clusterOptions.sharded.stepdownOptions = {};
- clusterOptions.sharded.stepdownOptions.configStepdown =
- TestData.runningWithConfigStepdowns || false;
- clusterOptions.sharded.stepdownOptions.shardStepdown =
- TestData.runningWithShardStepdowns || false;
- } else if (topology.type !== Topology.kStandalone) {
- throw new Error('Unrecognized topology format: ' + tojson(topology));
+ } finally {
+ if (cluster.shouldPerformContinuousStepdowns()) {
+ cluster.reestablishConnectionsAfterFailover();
}
+ // Call each workload's teardown function. After all teardowns have completed check if
+ // any of them failed.
+ const cleanupResults = cleanup.map(
+ workload => cleanupWorkload(workload, context, cluster, errors, 'Foreground'));
+ teardownFailed = cleanupResults.some(success => (success === false));
+
+ totalTime = Date.now() - startTime;
+ jsTest.log('Workload(s) completed in ' + totalTime + ' ms: ' + workloads.join(' '));
}
- clusterOptions.sameDB = TestData.sameDB;
- clusterOptions.sameCollection = TestData.sameCollection;
+ // Throw any existing errors so that resmoke.py can abort its execution of the test suite.
+ throwError(errors);
+
+ cluster.teardown();
+}
+
+if (typeof db === 'undefined') {
+ throw new Error(
+ 'resmoke_runner.js must be run with the mongo shell already connected to the database');
+}
+
+const clusterOptions = {
+ replication: {enabled: false},
+ sharded: {enabled: false},
+};
+
+// The TestData.discoverTopoloy is false when we only care about connecting to either a
+// standalone or primary node in a replica set.
+if (TestData.discoverTopology !== false) {
+ const topology = DiscoverTopology.findConnectedNodes(db.getMongo());
+
+ if (topology.type === Topology.kReplicaSet) {
+ clusterOptions.replication.enabled = true;
+ clusterOptions.replication.numNodes = topology.nodes.length;
+ } else if (topology.type === Topology.kShardedCluster) {
+ clusterOptions.replication.enabled = TestData.usingReplicaSetShards || false;
+ clusterOptions.sharded.enabled = true;
+ clusterOptions.sharded.enableAutoSplit =
+ TestData.hasOwnProperty('runningWithAutoSplit') ? TestData.runningWithAutoSplit : true;
+ clusterOptions.sharded.enableBalancer =
+ TestData.hasOwnProperty('runningWithBalancer') ? TestData.runningWithBalancer : true;
+ clusterOptions.sharded.numMongos = topology.mongos.nodes.length;
+ clusterOptions.sharded.numShards = Object.keys(topology.shards).length;
+ clusterOptions.sharded.stepdownOptions = {};
+ clusterOptions.sharded.stepdownOptions.configStepdown =
+ TestData.runningWithConfigStepdowns || false;
+ clusterOptions.sharded.stepdownOptions.shardStepdown =
+ TestData.runningWithShardStepdowns || false;
+ } else if (topology.type !== Topology.kStandalone) {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
+ }
+}
- let workloads = TestData.fsmWorkloads;
+clusterOptions.sameDB = TestData.sameDB;
+clusterOptions.sameCollection = TestData.sameCollection;
- let sessionOptions = {};
- if (TestData.runningWithCausalConsistency !== undefined) {
- // Explicit sessions are causally consistent by default, so causal consistency has to be
- // explicitly disabled.
- sessionOptions.causalConsistency = TestData.runningWithCausalConsistency;
+let workloads = TestData.fsmWorkloads;
- if (TestData.runningWithCausalConsistency) {
- sessionOptions.readPreference = {mode: 'secondary'};
- }
- }
+let sessionOptions = {};
+if (TestData.runningWithCausalConsistency !== undefined) {
+ // Explicit sessions are causally consistent by default, so causal consistency has to be
+ // explicitly disabled.
+ sessionOptions.causalConsistency = TestData.runningWithCausalConsistency;
- if (TestData.runningWithConfigStepdowns || TestData.runningWithShardStepdowns) {
- sessionOptions.retryWrites = true;
- }
-
- const executionOptions = {dbNamePrefix: TestData.dbNamePrefix || ""};
- const resmokeDbPathPrefix = TestData.resmokeDbPathPrefix || ".";
-
- // The stepdown file names need to match the same construction as found in
- // buildscripts/resmokelib/testing/hooks/stepdown.py.
- if (TestData.useStepdownPermittedFile) {
- executionOptions.stepdownFiles = {
- permitted: resmokeDbPathPrefix + '/permitted',
- idleRequest: resmokeDbPathPrefix + '/idle_request',
- idleAck: resmokeDbPathPrefix + '/idle_ack',
- };
+ if (TestData.runningWithCausalConsistency) {
+ sessionOptions.readPreference = {mode: 'secondary'};
}
+}
+
+if (TestData.runningWithConfigStepdowns || TestData.runningWithShardStepdowns) {
+ sessionOptions.retryWrites = true;
+}
+
+const executionOptions = {
+ dbNamePrefix: TestData.dbNamePrefix || ""
+};
+const resmokeDbPathPrefix = TestData.resmokeDbPathPrefix || ".";
+
+// The stepdown file names need to match the same construction as found in
+// buildscripts/resmokelib/testing/hooks/stepdown.py.
+if (TestData.useStepdownPermittedFile) {
+ executionOptions.stepdownFiles = {
+ permitted: resmokeDbPathPrefix + '/permitted',
+ idleRequest: resmokeDbPathPrefix + '/idle_request',
+ idleAck: resmokeDbPathPrefix + '/idle_ack',
+ };
+}
- if (Object.keys(sessionOptions).length > 0 || TestData.runningWithSessions) {
- executionOptions.sessionOptions = sessionOptions;
- }
+if (Object.keys(sessionOptions).length > 0 || TestData.runningWithSessions) {
+ executionOptions.sessionOptions = sessionOptions;
+}
- runWorkloads(workloads, {cluster: clusterOptions, execution: executionOptions});
+runWorkloads(workloads, {cluster: clusterOptions, execution: executionOptions});
})();
diff --git a/jstests/concurrency/fsm_libs/runner.js b/jstests/concurrency/fsm_libs/runner.js
index e02d0fb4086..7350786910d 100644
--- a/jstests/concurrency/fsm_libs/runner.js
+++ b/jstests/concurrency/fsm_libs/runner.js
@@ -8,15 +8,14 @@ load('jstests/concurrency/fsm_utils/name_utils.js'); // for uniqueCollName and
load('jstests/concurrency/fsm_utils/setup_teardown_functions.js');
var runner = (function() {
-
function validateExecutionMode(mode) {
var allowedKeys = ['composed', 'parallel', 'serial'];
Object.keys(mode).forEach(function(option) {
assert.contains(option,
allowedKeys,
- 'invalid option: ' + tojson(option) + '; valid options are: ' +
- tojson(allowedKeys));
+ 'invalid option: ' + tojson(option) +
+ '; valid options are: ' + tojson(allowedKeys));
});
mode.composed = mode.composed || false;
@@ -61,8 +60,8 @@ var runner = (function() {
Object.keys(options).forEach(function(option) {
assert.contains(option,
allowedKeys,
- 'invalid option: ' + tojson(option) + '; valid options are: ' +
- tojson(allowedKeys));
+ 'invalid option: ' + tojson(option) +
+ '; valid options are: ' + tojson(allowedKeys));
});
if (typeof options.subsetSize !== 'undefined') {
@@ -130,8 +129,8 @@ var runner = (function() {
Object.keys(options).forEach(function(option) {
assert.contains(option,
allowedKeys,
- 'invalid option: ' + tojson(option) + '; valid options are: ' +
- tojson(allowedKeys));
+ 'invalid option: ' + tojson(option) +
+ '; valid options are: ' + tojson(allowedKeys));
});
if (typeof options.dropDatabaseBlacklist !== 'undefined') {
@@ -752,7 +751,6 @@ var runner = (function() {
loadWorkloadContext,
}
};
-
})();
var runWorkloadsSerially = runner.serial;
diff --git a/jstests/concurrency/fsm_libs/worker_thread.js b/jstests/concurrency/fsm_libs/worker_thread.js
index f5a827794c6..cb172148fe0 100644
--- a/jstests/concurrency/fsm_libs/worker_thread.js
+++ b/jstests/concurrency/fsm_libs/worker_thread.js
@@ -6,7 +6,6 @@ load('jstests/concurrency/fsm_libs/parse_config.js'); // for parseConfig
load('jstests/libs/specific_secondary_reader_mongo.js');
var workerThread = (function() {
-
// workloads = list of workload filenames
// args.tid = the thread identifier
// args.data = map of workload -> 'this' parameter passed to the FSM state functions
@@ -193,8 +192,7 @@ var workerThread = (function() {
// them here as non-configurable and non-writable.
Object.defineProperties(data, {
'iterations': {configurable: false, writable: false, value: data.iterations},
- 'threadCount':
- {configurable: false, writable: false, value: data.threadCount}
+ 'threadCount': {configurable: false, writable: false, value: data.threadCount}
});
data.tid = args.tid;
diff --git a/jstests/concurrency/fsm_selftests.js b/jstests/concurrency/fsm_selftests.js
index 30c614e9148..95b9b5a0d96 100644
--- a/jstests/concurrency/fsm_selftests.js
+++ b/jstests/concurrency/fsm_selftests.js
@@ -7,37 +7,37 @@
load('jstests/concurrency/fsm_libs/fsm.js');
(function() {
- var getWeightedRandomChoice = fsm._getWeightedRandomChoice;
+var getWeightedRandomChoice = fsm._getWeightedRandomChoice;
- var doc = {a: 0.25, b: 0.5, c: 0.25};
+var doc = {a: 0.25, b: 0.5, c: 0.25};
- // NOTE: getWeightedRandomChoice calls assert internally, so it will print stack traces
- // when assert.throws executes
- assert.throws(function() {
- getWeightedRandomChoice(doc, -1);
- }, [], 'should reject negative values');
- assert.throws(function() {
- getWeightedRandomChoice(doc, 1);
- }, [], 'should reject values == 1');
- assert.throws(function() {
- getWeightedRandomChoice(doc, 2);
- }, [], 'should reject values > 1');
+// NOTE: getWeightedRandomChoice calls assert internally, so it will print stack traces
+// when assert.throws executes
+assert.throws(function() {
+ getWeightedRandomChoice(doc, -1);
+}, [], 'should reject negative values');
+assert.throws(function() {
+ getWeightedRandomChoice(doc, 1);
+}, [], 'should reject values == 1');
+assert.throws(function() {
+ getWeightedRandomChoice(doc, 2);
+}, [], 'should reject values > 1');
- assert.throws(function() {
- getWeightedRandomChoice({}, 0.0);
- }, [], 'cannot choose from zero states');
- assert.throws(function() {
- getWeightedRandomChoice({}, 0.5);
- }, [], 'cannot choose from zero states');
- assert.throws(function() {
- getWeightedRandomChoice({}, 0.99);
- }, [], 'cannot choose from zero states');
+assert.throws(function() {
+ getWeightedRandomChoice({}, 0.0);
+}, [], 'cannot choose from zero states');
+assert.throws(function() {
+ getWeightedRandomChoice({}, 0.5);
+}, [], 'cannot choose from zero states');
+assert.throws(function() {
+ getWeightedRandomChoice({}, 0.99);
+}, [], 'cannot choose from zero states');
- assert.eq('a', getWeightedRandomChoice(doc, 0.00), '0');
- assert.eq('a', getWeightedRandomChoice(doc, 0.24), '1');
- assert.eq('b', getWeightedRandomChoice(doc, 0.25), '2');
- assert.eq('b', getWeightedRandomChoice(doc, 0.50), '3');
- assert.eq('b', getWeightedRandomChoice(doc, 0.74), '4');
- assert.eq('c', getWeightedRandomChoice(doc, 0.75), '5');
- assert.eq('c', getWeightedRandomChoice(doc, 0.99), '6');
+assert.eq('a', getWeightedRandomChoice(doc, 0.00), '0');
+assert.eq('a', getWeightedRandomChoice(doc, 0.24), '1');
+assert.eq('b', getWeightedRandomChoice(doc, 0.25), '2');
+assert.eq('b', getWeightedRandomChoice(doc, 0.50), '3');
+assert.eq('b', getWeightedRandomChoice(doc, 0.74), '4');
+assert.eq('c', getWeightedRandomChoice(doc, 0.75), '5');
+assert.eq('c', getWeightedRandomChoice(doc, 0.99), '6');
})();
diff --git a/jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js b/jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js
index 43bdc7ea8a8..9f8ed997895 100644
--- a/jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js
+++ b/jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js
@@ -1,7 +1,6 @@
'use strict';
var {withTxnAndAutoRetry, isKilledSessionCode} = (function() {
-
/**
* Calls 'func' with the print() function overridden to be a no-op.
*
@@ -71,9 +70,9 @@ var {withTxnAndAutoRetry, isKilledSessionCode} = (function() {
// is a retryable write.
if (!commitRes.ok && retryOnKilledSession && isKilledSessionCode(commitRes.code)) {
print("-=-=-=- Retrying commit after killed session code, sessionId: " +
- tojsononeline(session.getSessionId()) + ", txnNumber: " +
- tojsononeline(session.getTxnNumber_forTesting()) + ", res: " +
- tojsononeline(commitRes));
+ tojsononeline(session.getSessionId()) +
+ ", txnNumber: " + tojsononeline(session.getTxnNumber_forTesting()) +
+ ", res: " + tojsononeline(commitRes));
continue;
}
diff --git a/jstests/concurrency/fsm_workloads/access_collection_in_transaction_after_catalog_changes.js b/jstests/concurrency/fsm_workloads/access_collection_in_transaction_after_catalog_changes.js
index 1ad738f437b..7a825f6c009 100644
--- a/jstests/concurrency/fsm_workloads/access_collection_in_transaction_after_catalog_changes.js
+++ b/jstests/concurrency/fsm_workloads/access_collection_in_transaction_after_catalog_changes.js
@@ -15,9 +15,7 @@
*/
var $config = (function() {
-
var states = (function() {
-
function init(db, collName) {
this.session = db.getMongo().startSession();
}
@@ -41,10 +39,10 @@ var $config = (function() {
} catch (e) {
assertWhenOwnColl.contains(e.code,
[
- ErrorCodes.LockTimeout,
- ErrorCodes.WriteConflict,
- ErrorCodes.SnapshotUnavailable,
- ErrorCodes.OperationNotSupportedInTransaction
+ ErrorCodes.LockTimeout,
+ ErrorCodes.WriteConflict,
+ ErrorCodes.SnapshotUnavailable,
+ ErrorCodes.OperationNotSupportedInTransaction
],
() => tojson(e));
}
@@ -338,5 +336,4 @@ var $config = (function() {
setup: setup,
teardown: teardown
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/agg_base.js b/jstests/concurrency/fsm_workloads/agg_base.js
index ec598ef00c8..b38c8771d74 100644
--- a/jstests/concurrency/fsm_workloads/agg_base.js
+++ b/jstests/concurrency/fsm_workloads/agg_base.js
@@ -7,7 +7,6 @@
* then each thread does an aggregation with an empty $match.
*/
var $config = (function() {
-
var data = {
numDocs: 1000,
// Use 12KB documents by default. This number is useful because 12,000 documents each of
diff --git a/jstests/concurrency/fsm_workloads/agg_graph_lookup.js b/jstests/concurrency/fsm_workloads/agg_graph_lookup.js
index b10fc88a99d..5dbdd784ae0 100644
--- a/jstests/concurrency/fsm_workloads/agg_graph_lookup.js
+++ b/jstests/concurrency/fsm_workloads/agg_graph_lookup.js
@@ -6,7 +6,6 @@
* Runs a $graphLookup aggregation simultaneously with updates.
*/
var $config = (function() {
-
var data = {numDocs: 1000};
var states = {
diff --git a/jstests/concurrency/fsm_workloads/agg_group_external.js b/jstests/concurrency/fsm_workloads/agg_group_external.js
index ae60b810424..adb7a787e20 100644
--- a/jstests/concurrency/fsm_workloads/agg_group_external.js
+++ b/jstests/concurrency/fsm_workloads/agg_group_external.js
@@ -12,7 +12,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
// use enough docs to exceed 100MB, the in-memory limit for $sort and $group
$config.data.numDocs = 24 * 1000;
var MB = 1024 * 1024; // bytes
diff --git a/jstests/concurrency/fsm_workloads/agg_match.js b/jstests/concurrency/fsm_workloads/agg_match.js
index 91b4eef3755..58971c4d30a 100644
--- a/jstests/concurrency/fsm_workloads/agg_match.js
+++ b/jstests/concurrency/fsm_workloads/agg_match.js
@@ -9,7 +9,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.getOutCollName = function getOutCollName(collName) {
return collName + '_out_agg_match';
};
diff --git a/jstests/concurrency/fsm_workloads/agg_merge_when_matched_replace_with_new.js b/jstests/concurrency/fsm_workloads/agg_merge_when_matched_replace_with_new.js
index 5b26801afbf..169de096aa2 100644
--- a/jstests/concurrency/fsm_workloads/agg_merge_when_matched_replace_with_new.js
+++ b/jstests/concurrency/fsm_workloads/agg_merge_when_matched_replace_with_new.js
@@ -23,11 +23,11 @@ var $config = extendWorkload($config, function($config, $super) {
const res = db[collName].aggregate([
{$addFields: {_id: this.tid, count: this.threadRunCount}},
{
- $merge: {
- into: this.collWithMigrations,
- whenMatched: "replace",
- whenNotMatched: "insert"
- }
+ $merge: {
+ into: this.collWithMigrations,
+ whenMatched: "replace",
+ whenNotMatched: "insert"
+ }
},
]);
diff --git a/jstests/concurrency/fsm_workloads/agg_merge_when_not_matched_insert.js b/jstests/concurrency/fsm_workloads/agg_merge_when_not_matched_insert.js
index ef8f5ab6c04..084b8c9b0d0 100644
--- a/jstests/concurrency/fsm_workloads/agg_merge_when_not_matched_insert.js
+++ b/jstests/concurrency/fsm_workloads/agg_merge_when_not_matched_insert.js
@@ -20,15 +20,15 @@ var $config = extendWorkload($config, function($config, $super) {
$config.states.aggregate = function aggregate(db, collName, connCache) {
const res = db[collName].aggregate([
{
- $project: {
- "_id.tid": {$literal: this.tid},
- "_id.count": {$literal: this.threadRunCount},
- "_id.doc": "$_id"
- }
+ $project: {
+ "_id.tid": {$literal: this.tid},
+ "_id.count": {$literal: this.threadRunCount},
+ "_id.doc": "$_id"
+ }
},
{
- $merge:
- {into: this.collWithMigrations, whenMatched: "fail", whenNotMatched: "insert"}
+ $merge:
+ {into: this.collWithMigrations, whenMatched: "fail", whenNotMatched: "insert"}
},
]);
diff --git a/jstests/concurrency/fsm_workloads/agg_out.js b/jstests/concurrency/fsm_workloads/agg_out.js
index 33ced7f4ff6..d5c0a7ec7cb 100644
--- a/jstests/concurrency/fsm_workloads/agg_out.js
+++ b/jstests/concurrency/fsm_workloads/agg_out.js
@@ -19,7 +19,6 @@ load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $confi
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
var $config = extendWorkload($config, function($config, $super) {
-
// Use a smaller document size, but more iterations. The smaller documents will ensure each
// operation is faster, giving us time to do more operations and thus increasing the likelihood
// that any two operations will be happening concurrently.
diff --git a/jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js b/jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
index d36769776fd..60c31b8a429 100644
--- a/jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+++ b/jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
@@ -9,7 +9,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.states.aggregate = function aggregate(db, collName) {
// $out to the same collection so that concurrent aggregate commands would cause congestion.
db[collName].runCommand(
diff --git a/jstests/concurrency/fsm_workloads/agg_sort.js b/jstests/concurrency/fsm_workloads/agg_sort.js
index 8a04195fe15..757ecf76097 100644
--- a/jstests/concurrency/fsm_workloads/agg_sort.js
+++ b/jstests/concurrency/fsm_workloads/agg_sort.js
@@ -10,7 +10,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
return collName + '_out_agg_sort_';
};
diff --git a/jstests/concurrency/fsm_workloads/agg_sort_external.js b/jstests/concurrency/fsm_workloads/agg_sort_external.js
index f843d623ff3..b8cbad826bb 100644
--- a/jstests/concurrency/fsm_workloads/agg_sort_external.js
+++ b/jstests/concurrency/fsm_workloads/agg_sort_external.js
@@ -12,7 +12,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
// use enough docs to exceed 100MB, the in-memory limit for $sort and $group
$config.data.numDocs = 24 * 1000;
var MB = 1024 * 1024; // bytes
diff --git a/jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js b/jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js
index 093ff9b4870..721b653eec1 100644
--- a/jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js
+++ b/jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js
@@ -17,7 +17,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); //
load('jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
// The base setup will insert 'partitionSize' number of documents per thread, evenly
// distributing across the chunks. Documents will only have the "_id" field.
$config.data.partitionSize = 50;
diff --git a/jstests/concurrency/fsm_workloads/auth_create_role.js b/jstests/concurrency/fsm_workloads/auth_create_role.js
index 6ad5de17d9a..1cc041b876a 100644
--- a/jstests/concurrency/fsm_workloads/auth_create_role.js
+++ b/jstests/concurrency/fsm_workloads/auth_create_role.js
@@ -8,7 +8,6 @@
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRoles
var $config = (function() {
-
var data = {
// Use the workload name as a prefix for the role name,
// since the workload name is assumed to be unique.
@@ -16,7 +15,6 @@ var $config = (function() {
};
var states = (function() {
-
function uniqueRoleName(prefix, tid, num) {
return prefix + tid + '_' + num;
}
@@ -45,7 +43,6 @@ var $config = (function() {
}
return {init: init, createRole: createRole};
-
})();
var transitions = {init: {createRole: 1}, createRole: {createRole: 1}};
@@ -63,5 +60,4 @@ var $config = (function() {
transitions: transitions,
teardown: teardown
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/auth_create_user.js b/jstests/concurrency/fsm_workloads/auth_create_user.js
index 07fd1135032..2a703c78833 100644
--- a/jstests/concurrency/fsm_workloads/auth_create_user.js
+++ b/jstests/concurrency/fsm_workloads/auth_create_user.js
@@ -8,7 +8,6 @@
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropUsers
var $config = (function() {
-
var data = {
// Use the workload name as a prefix for the username,
// since the workload name is assumed to be unique.
@@ -16,7 +15,6 @@ var $config = (function() {
};
var states = (function() {
-
function uniqueUsername(prefix, tid, num) {
return prefix + tid + '_' + num;
}
@@ -40,7 +38,6 @@ var $config = (function() {
}
return {init: init, createUser: createUser};
-
})();
var transitions = {init: {createUser: 1}, createUser: {createUser: 1}};
@@ -58,5 +55,4 @@ var $config = (function() {
transitions: transitions,
teardown: teardown
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/auth_drop_role.js b/jstests/concurrency/fsm_workloads/auth_drop_role.js
index eba694ccd49..82ae01e58e7 100644
--- a/jstests/concurrency/fsm_workloads/auth_drop_role.js
+++ b/jstests/concurrency/fsm_workloads/auth_drop_role.js
@@ -9,7 +9,6 @@
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRoles
var $config = (function() {
-
var data = {
// Use the workload name as a prefix for the role name,
// since the workload name is assumed to be unique.
@@ -17,7 +16,6 @@ var $config = (function() {
};
var states = (function() {
-
function uniqueRoleName(prefix, tid, num) {
return prefix + tid + '_' + num;
}
@@ -45,11 +43,9 @@ var $config = (function() {
}
return {init: init, createAndDropRole: createAndDropRole};
-
})();
var transitions = {init: {createAndDropRole: 1}, createAndDropRole: {createAndDropRole: 1}};
return {threadCount: 10, iterations: 20, data: data, states: states, transitions: transitions};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/auth_drop_user.js b/jstests/concurrency/fsm_workloads/auth_drop_user.js
index a47d5566874..e8b9c1db4de 100644
--- a/jstests/concurrency/fsm_workloads/auth_drop_user.js
+++ b/jstests/concurrency/fsm_workloads/auth_drop_user.js
@@ -7,7 +7,6 @@
* drops the user from the database.
*/
var $config = (function() {
-
var data = {
// Use the workload name as a prefix for the username,
// since the workload name is assumed to be unique.
@@ -15,7 +14,6 @@ var $config = (function() {
};
var states = (function() {
-
function uniqueUsername(prefix, tid, num) {
return prefix + tid + '_' + num;
}
@@ -38,11 +36,9 @@ var $config = (function() {
}
return {init: init, createAndDropUser: createAndDropUser};
-
})();
var transitions = {init: {createAndDropUser: 1}, createAndDropUser: {createAndDropUser: 1}};
return {threadCount: 10, iterations: 20, data: data, states: states, transitions: transitions};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/collmod.js b/jstests/concurrency/fsm_workloads/collmod.js
index 82ddcaba932..15ea365c08e 100644
--- a/jstests/concurrency/fsm_workloads/collmod.js
+++ b/jstests/concurrency/fsm_workloads/collmod.js
@@ -11,14 +11,12 @@
* All threads update the same TTL index on the same collection.
*/
var $config = (function() {
-
var data = {
numDocs: 1000,
maxTTL: 5000 // max time to live
};
var states = (function() {
-
function collMod(db, collName) {
var newTTL = Random.randInt(this.maxTTL);
var res = db.runCommand({
@@ -33,7 +31,6 @@ var $config = (function() {
}
return {collMod: collMod};
-
})();
var transitions = {collMod: {collMod: 1}};
@@ -64,5 +61,4 @@ var $config = (function() {
transitions: transitions,
setup: setup
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
index 3b906236c5b..6dddc40b49e 100644
--- a/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
+++ b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
@@ -17,7 +17,6 @@ var $config = (function() {
var data = {prefix: 'convert_to_capped_collection'};
var states = (function() {
-
function uniqueCollectionName(prefix, tid) {
return prefix + '_' + tid;
}
@@ -79,5 +78,4 @@ var $config = (function() {
transitions: transitions,
setup: setup,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/count.js b/jstests/concurrency/fsm_workloads/count.js
index 05315afeb55..1b10482a383 100644
--- a/jstests/concurrency/fsm_workloads/count.js
+++ b/jstests/concurrency/fsm_workloads/count.js
@@ -14,7 +14,6 @@
load("jstests/libs/fixture_helpers.js"); // For isMongos.
var $config = (function() {
-
var data = {
randRange: function randRange(low, high) {
// return random number in range [low, high]
@@ -31,7 +30,6 @@ var $config = (function() {
};
var states = (function() {
-
function init(db, collName) {
this.modulus = this.randRange(5, 10);
this.countPerNum = this.randRange(50, 100);
@@ -59,11 +57,9 @@ var $config = (function() {
}
return {init: init, count: count};
-
})();
var transitions = {init: {count: 1}, count: {count: 1}};
return {data: data, threadCount: 10, iterations: 20, states: states, transitions: transitions};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection.js b/jstests/concurrency/fsm_workloads/create_capped_collection.js
index 672c369769c..5a74c060919 100644
--- a/jstests/concurrency/fsm_workloads/create_capped_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection.js
@@ -10,7 +10,6 @@
*/
var $config = (function() {
-
// Returns a document of the form { _id: ObjectId(...), field: '...' }
// with specified BSON size.
function makeDocWithSize(targetSize) {
@@ -84,7 +83,6 @@ var $config = (function() {
};
var states = (function() {
-
var options = {
capped: true,
size: 8192 // multiple of 256; larger than 4096 default
@@ -107,7 +105,6 @@ var $config = (function() {
}
return {init: init, create: create};
-
})();
var transitions = {init: {create: 1}, create: {create: 1}};
@@ -119,5 +116,4 @@ var $config = (function() {
states: states,
transitions: transitions,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
index 8e89a5c63a5..660827f312a 100644
--- a/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
@@ -13,7 +13,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for e
load('jstests/concurrency/fsm_workloads/create_capped_collection.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
$config.data.prefix = 'create_capped_collection_maxdocs';
diff --git a/jstests/concurrency/fsm_workloads/create_collection.js b/jstests/concurrency/fsm_workloads/create_collection.js
index 7b851e5b7fb..61db8d10824 100644
--- a/jstests/concurrency/fsm_workloads/create_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_collection.js
@@ -7,7 +7,6 @@
*/
var $config = (function() {
-
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
@@ -15,7 +14,6 @@ var $config = (function() {
};
var states = (function() {
-
function uniqueCollectionName(prefix, tid, num) {
return prefix + tid + '_' + num;
}
@@ -32,7 +30,6 @@ var $config = (function() {
}
return {init: init, create: create};
-
})();
var transitions = {init: {create: 1}, create: {create: 1}};
@@ -44,5 +41,4 @@ var $config = (function() {
states: states,
transitions: transitions,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/create_database.js b/jstests/concurrency/fsm_workloads/create_database.js
index 884c5d442df..573764a98ef 100644
--- a/jstests/concurrency/fsm_workloads/create_database.js
+++ b/jstests/concurrency/fsm_workloads/create_database.js
@@ -15,7 +15,6 @@
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isEphemeralForTest
var $config = (function() {
-
let data = {
checkCommandResult: function checkCommandResult(mayFailWithDatabaseDifferCase, res) {
if (mayFailWithDatabaseDifferCase && !res.ok)
@@ -47,7 +46,6 @@ var $config = (function() {
this.myDB = db.getSiblingDB(this.uniqueDBName);
this.created = false;
this.unique = true;
-
},
useSemiUniqueDBName: function useSemiUniqueDBName(db, collName) {
@@ -138,6 +136,8 @@ var $config = (function() {
// We only run a few iterations to reduce the amount of data cumulatively
// written to disk.
threadCount: 10,
- iterations: 120, states, transitions,
+ iterations: 120,
+ states,
+ transitions,
};
})();
diff --git a/jstests/concurrency/fsm_workloads/create_index_background.js b/jstests/concurrency/fsm_workloads/create_index_background.js
index bf5e431531b..9fec0d40a95 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background.js
@@ -13,7 +13,6 @@
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
var $config = (function() {
-
var data = {
nDocumentsToSeed: 1000,
nDocumentsToCreate: 200,
@@ -46,7 +45,6 @@ var $config = (function() {
};
var states = (function() {
-
function init(db, collName) {
// Add thread-specific documents
var bulk = db[collName].initializeUnorderedBulkOp();
@@ -187,7 +185,6 @@ var $config = (function() {
updateDocs: updateDocs,
deleteDocs: deleteDocs
};
-
})();
var transitions = {
@@ -249,5 +246,4 @@ var $config = (function() {
teardown: teardown,
transitions: transitions,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js b/jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js
index 08261178b7f..114c726eeee 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js
@@ -27,7 +27,6 @@ var $config = extendWorkload($config, function($config, $super) {
};
$config.data.extendDocument = function extendDocument(originalDoc) {
-
// Be sure we're not overwriting an existing field.
assertAlways.eq(originalDoc.hasOwnProperty(fieldName), false);
diff --git a/jstests/concurrency/fsm_workloads/create_index_background_unique.js b/jstests/concurrency/fsm_workloads/create_index_background_unique.js
index e12c8e59a09..0e147d42321 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background_unique.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background_unique.js
@@ -9,7 +9,6 @@
*/
var $config = (function() {
-
var data = {
prefix: "create_index_background_unique_",
numDocsToLoad: 5000,
@@ -81,7 +80,6 @@ var $config = (function() {
buildIndex: buildIndex,
dropIndex: dropIndex,
};
-
})();
var transitions = {
diff --git a/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js b/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js
index c6f8feb30fa..c1f0da117f2 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js
@@ -12,7 +12,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); //
load('jstests/concurrency/fsm_workloads/create_index_background_unique.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.prefix = "create_index_background_unique_capped_";
$config.data.getCollectionOptions = function() {
// We create an 8MB capped collection, as it will comfortably fit the collection data
diff --git a/jstests/concurrency/fsm_workloads/create_index_background_unique_collmod_capped.js b/jstests/concurrency/fsm_workloads/create_index_background_unique_collmod_capped.js
index fed8c0cf851..e178ef52cc9 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background_unique_collmod_capped.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background_unique_collmod_capped.js
@@ -11,7 +11,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/create_index_background_unique_collmod.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.prefix = "create_index_background_unique_collmod_capped_";
$config.data.getCollectionOptions = function() {
// We create an 8MB capped collection, as it will comfortably fit the collection data
diff --git a/jstests/concurrency/fsm_workloads/database_versioning.js b/jstests/concurrency/fsm_workloads/database_versioning.js
index 8759a0f9be5..31b97f5292a 100644
--- a/jstests/concurrency/fsm_workloads/database_versioning.js
+++ b/jstests/concurrency/fsm_workloads/database_versioning.js
@@ -7,9 +7,7 @@
* @tags: [requires_sharding]
*/
var $config = (function() {
-
var states = (function() {
-
function init(db, collName) {
// Dynamically load the shard names for the movePrimary thread to avoid hard-coding
// them.
@@ -92,7 +90,6 @@ var $config = (function() {
}
return {init: init, state: state};
-
})();
var transitions = {init: {state: 1}, state: {state: 1}};
@@ -144,5 +141,4 @@ var $config = (function() {
setup: setup,
teardown: teardown,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/distinct.js b/jstests/concurrency/fsm_workloads/distinct.js
index 9e58499b9b9..a400d18b29d 100644
--- a/jstests/concurrency/fsm_workloads/distinct.js
+++ b/jstests/concurrency/fsm_workloads/distinct.js
@@ -9,11 +9,9 @@
*/
var $config = (function() {
-
var data = {numDocs: 1000, prefix: 'distinct_fsm', shardKey: {i: 1}};
var states = (function() {
-
function init(db, collName) {
this.threadCollName = this.prefix + '_' + this.tid;
var bulk = db[this.threadCollName].initializeUnorderedBulkOp();
@@ -31,7 +29,6 @@ var $config = (function() {
}
return {init: init, distinct: distinct};
-
})();
var transitions = {init: {distinct: 1}, distinct: {distinct: 1}};
@@ -43,5 +40,4 @@ var $config = (function() {
states: states,
transitions: transitions,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/distinct_noindex.js b/jstests/concurrency/fsm_workloads/distinct_noindex.js
index 3727c968a14..1848fe78901 100644
--- a/jstests/concurrency/fsm_workloads/distinct_noindex.js
+++ b/jstests/concurrency/fsm_workloads/distinct_noindex.js
@@ -8,7 +8,6 @@
* Each thread operates on the same collection.
*/
var $config = (function() {
-
var data = {
randRange: function randRange(low, high) {
assertAlways.gt(high, low);
@@ -18,7 +17,6 @@ var $config = (function() {
};
var states = (function() {
-
function init(db, collName) {
this.modulus = this.randRange(5, 15);
@@ -36,11 +34,9 @@ var $config = (function() {
}
return {init: init, distinct: distinct};
-
})();
var transitions = {init: {distinct: 1}, distinct: {distinct: 1}};
return {data: data, threadCount: 10, iterations: 20, states: states, transitions: transitions};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/drop_collection.js b/jstests/concurrency/fsm_workloads/drop_collection.js
index 66fa7539de2..950c9f3d5ed 100644
--- a/jstests/concurrency/fsm_workloads/drop_collection.js
+++ b/jstests/concurrency/fsm_workloads/drop_collection.js
@@ -6,7 +6,6 @@
* Repeatedly creates and drops a collection.
*/
var $config = (function() {
-
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
@@ -14,7 +13,6 @@ var $config = (function() {
};
var states = (function() {
-
function uniqueCollectionName(prefix, tid, num) {
return prefix + tid + '_' + num;
}
@@ -31,7 +29,6 @@ var $config = (function() {
}
return {init: init, createAndDrop: createAndDrop};
-
})();
var transitions = {init: {createAndDrop: 1}, createAndDrop: {createAndDrop: 1}};
@@ -45,5 +42,4 @@ var $config = (function() {
// The threadCount and iterations can be increased once PM-697 ("Remove all usages of
// distributed lock") is complete.
return {threadCount: 5, iterations: 5, data: data, states: states, transitions: transitions};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/drop_database.js b/jstests/concurrency/fsm_workloads/drop_database.js
index 3a87701b086..8a13461bb09 100644
--- a/jstests/concurrency/fsm_workloads/drop_database.js
+++ b/jstests/concurrency/fsm_workloads/drop_database.js
@@ -6,7 +6,6 @@
* Repeatedly creates and drops a database.
*/
var $config = (function() {
-
var states = {
init: function init(db, collName) {
this.uniqueDBName = db.getName() + 'drop_database' + this.tid;
@@ -27,5 +26,4 @@ var $config = (function() {
var transitions = {init: {createAndDrop: 1}, createAndDrop: {createAndDrop: 1}};
return {threadCount: 10, iterations: 20, states: states, transitions: transitions};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/drop_index_during_replan.js b/jstests/concurrency/fsm_workloads/drop_index_during_replan.js
index 637fbbd4954..3444b8eac33 100644
--- a/jstests/concurrency/fsm_workloads/drop_index_during_replan.js
+++ b/jstests/concurrency/fsm_workloads/drop_index_during_replan.js
@@ -9,7 +9,6 @@
* index drops.
*/
var $config = (function() {
-
let data = {
collName: 'drop_index_during_replan',
indexSpecs: [
@@ -75,5 +74,4 @@ var $config = (function() {
transitions: transitions,
setup: setup
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/explain.js b/jstests/concurrency/fsm_workloads/explain.js
index 7a65411798c..7eb5483d9f5 100644
--- a/jstests/concurrency/fsm_workloads/explain.js
+++ b/jstests/concurrency/fsm_workloads/explain.js
@@ -9,7 +9,6 @@
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod
var $config = (function() {
-
var data = {
collNotExist: 'donotexist__',
nInserted: 0,
@@ -52,7 +51,6 @@ var $config = (function() {
}
return {insert: insert, explain: explain, explainNonExistentNS: explainNonExistentNS};
-
})();
var transitions = {
diff --git a/jstests/concurrency/fsm_workloads/explain_aggregate.js b/jstests/concurrency/fsm_workloads/explain_aggregate.js
index 5aa776ab84b..38370a4e346 100644
--- a/jstests/concurrency/fsm_workloads/explain_aggregate.js
+++ b/jstests/concurrency/fsm_workloads/explain_aggregate.js
@@ -10,7 +10,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
function assertCursorStages(num, obj) {
assertAlways(obj.stages, tojson(obj));
assertAlways.eq(num, obj.stages.length, tojson(obj.stages));
diff --git a/jstests/concurrency/fsm_workloads/explain_count.js b/jstests/concurrency/fsm_workloads/explain_count.js
index 920b12ab9b6..6e30cd442da 100644
--- a/jstests/concurrency/fsm_workloads/explain_count.js
+++ b/jstests/concurrency/fsm_workloads/explain_count.js
@@ -11,7 +11,6 @@ load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMong
load('jstests/libs/analyze_plan.js'); // for planHasStage
var $config = extendWorkload($config, function($config, $super) {
-
function assertNCounted(num, obj, db) {
var stage = obj.executionStats.executionStages;
// get sharded stage(s) if counting on mongos
diff --git a/jstests/concurrency/fsm_workloads/explain_find.js b/jstests/concurrency/fsm_workloads/explain_find.js
index 434fb3aa041..ac39881e4d8 100644
--- a/jstests/concurrency/fsm_workloads/explain_find.js
+++ b/jstests/concurrency/fsm_workloads/explain_find.js
@@ -11,7 +11,6 @@ load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
load('jstests/libs/analyze_plan.js'); // for planHasStage and isIxscan
var $config = extendWorkload($config, function($config, $super) {
-
$config.states = Object.extend({
explainLimit: function explainLimit(db, collName) {
var res = db[collName].find().limit(3).explain();
diff --git a/jstests/concurrency/fsm_workloads/explain_remove.js b/jstests/concurrency/fsm_workloads/explain_remove.js
index 971a39030c6..d74693d7f93 100644
--- a/jstests/concurrency/fsm_workloads/explain_remove.js
+++ b/jstests/concurrency/fsm_workloads/explain_remove.js
@@ -9,7 +9,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.states = Object.extend({
explainSingleRemove: function explainSingleRemove(db, collName) {
var res = db[collName]
diff --git a/jstests/concurrency/fsm_workloads/explain_update.js b/jstests/concurrency/fsm_workloads/explain_update.js
index 92e39f13adc..63d89428942 100644
--- a/jstests/concurrency/fsm_workloads/explain_update.js
+++ b/jstests/concurrency/fsm_workloads/explain_update.js
@@ -10,7 +10,6 @@ load('jstests/concurrency/fsm_workloads/explain.js'); // for $confi
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
var $config = extendWorkload($config, function($config, $super) {
-
$config.states = Object.extend({
explainBasicUpdate: function explainBasicUpdate(db, collName) {
var res =
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_inc.js b/jstests/concurrency/fsm_workloads/findAndModify_inc.js
index 7612d91280f..501aed05be6 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_inc.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_inc.js
@@ -85,5 +85,4 @@ var $config = (function() {
transitions: transitions,
setup: setup
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js b/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
index 19cd191b1aa..9be47a02525 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
@@ -22,7 +22,6 @@ load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
var $config = extendWorkload($config, function($config, $super) {
-
// Use the workload name as the database name, since the workload name is assumed to be
// unique.
$config.data.uniqueDBName = 'findAndModify_mixed_queue_unindexed';
@@ -81,7 +80,6 @@ var $config = extendWorkload($config, function($config, $super) {
remove: remove,
update: update,
};
-
})();
$config.transitions = {
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove.js b/jstests/concurrency/fsm_workloads/findAndModify_remove.js
index b4a32a3cc74..bcca3834c52 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove.js
@@ -7,11 +7,9 @@
* the findAndModify command to remove it.
*/
var $config = (function() {
-
var data = {shardKey: {tid: 1}};
var states = (function() {
-
function init(db, collName) {
this.iter = 0;
}
@@ -41,11 +39,9 @@ var $config = (function() {
}
return {init: init, insertAndRemove: insertAndRemove};
-
})();
var transitions = {init: {insertAndRemove: 1}, insertAndRemove: {insertAndRemove: 1}};
return {threadCount: 20, iterations: 20, data: data, states: states, transitions: transitions};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
index 9b945468cf5..c97ac6eb10a 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
@@ -15,7 +15,6 @@
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
var $config = (function() {
-
var data = {
// Use the workload name as the database name, since the workload name is assumed to be
// unique.
@@ -57,7 +56,6 @@ var $config = (function() {
};
var states = (function() {
-
function remove(db, collName) {
var res = db.runCommand(
{findAndModify: db[collName].getName(), query: {}, sort: {rand: -1}, remove: true});
@@ -78,7 +76,6 @@ var $config = (function() {
}
return {remove: remove};
-
})();
var transitions = {remove: {remove: 1}};
@@ -194,5 +191,4 @@ var $config = (function() {
setup: setup,
teardown: teardown
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
index 387c5467f04..981568904ad 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
@@ -17,7 +17,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for
load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
// Use the workload name as the database name, since the workload
// name is assumed to be unique.
$config.data.uniqueDBName = 'findAndModify_remove_queue_unindexed';
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update.js b/jstests/concurrency/fsm_workloads/findAndModify_update.js
index 16aa80b8a33..03e391409ae 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update.js
@@ -9,14 +9,12 @@
* using either the $min or $max operator.
*/
var $config = (function() {
-
var data = {
numDocsPerThread: 3, // >1 for 'sort' to be meaningful
shardKey: {tid: 1}
};
var states = (function() {
-
function makeDoc(tid) {
return {_id: new ObjectId(), tid: tid, value: 0};
}
@@ -76,7 +74,6 @@ var $config = (function() {
findAndModifyAscending: findAndModifyAscending,
findAndModifyDescending: findAndModifyDescending
};
-
})();
var transitions = {
@@ -98,5 +95,4 @@ var $config = (function() {
transitions: transitions,
setup: setup
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
index d1c8134bd39..aba96be9648 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
@@ -14,7 +14,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for exten
load('jstests/concurrency/fsm_workloads/findAndModify_update.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
// Do not create the { tid: 1, value: 1 } index so that a
// collection
// scan is performed for the query and sort operations.
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
index fd49788830c..68de0be1cc1 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
@@ -13,13 +13,11 @@
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod
var $config = (function() {
-
var data = {
shardKey: {tid: 1},
};
var states = (function() {
-
// Use the workload name as the field name (since it is assumed
// to be unique) to avoid any potential issues with large keys
// and indexes on the collection.
@@ -109,7 +107,6 @@ var $config = (function() {
insert: insert,
findAndModify: findAndModify,
};
-
})();
var transitions = {insert: {findAndModify: 1}, findAndModify: {findAndModify: 1}};
@@ -122,5 +119,4 @@ var $config = (function() {
startState: 'insert',
transitions: transitions
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js b/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
index 1d82f4b7eb2..550ad25c809 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
@@ -18,7 +18,6 @@ load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
var $config = extendWorkload($config, function($config, $super) {
-
// Use the workload name as the database name, since the workload name is assumed to be
// unique.
$config.data.uniqueDBName = 'findAndModify_update_queue';
@@ -34,7 +33,6 @@ var $config = extendWorkload($config, function($config, $super) {
$config.data.opName = 'updated';
var states = (function() {
-
function update(db, collName) {
// Update the counter field to avoid matching the same document again.
var res = db.runCommand({
@@ -61,7 +59,6 @@ var $config = extendWorkload($config, function($config, $super) {
}
return {update: update};
-
})();
var transitions = {update: {update: 1}};
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js b/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
index cda9a494a61..c70b80058f0 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
@@ -17,7 +17,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for
load('jstests/concurrency/fsm_workloads/findAndModify_update_queue.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
// Use the workload name as the database name, since the workload
// name is assumed to be unique.
$config.data.uniqueDBName = 'findAndModify_update_queue_unindexed';
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_upsert.js b/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
index e79b5322bc4..e9b06e7afb7 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
@@ -9,11 +9,9 @@
* $push operator.
*/
var $config = (function() {
-
var data = {sort: false, shardKey: {tid: 1}};
var states = (function() {
-
// Returns true if the specified array is sorted in ascending order,
// and false otherwise.
function isSorted(arr) {
@@ -101,7 +99,6 @@ var $config = (function() {
}
return {init: init, upsert: upsert, update: update};
-
})();
var transitions = {
@@ -111,5 +108,4 @@ var $config = (function() {
};
return {threadCount: 20, iterations: 20, data: data, states: states, transitions: transitions};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js b/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
index 8751e99fd21..0cbfbd3ab21 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
@@ -14,7 +14,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for exten
load('jstests/concurrency/fsm_workloads/findAndModify_upsert.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.sort = {$natural: 1};
return $config;
diff --git a/jstests/concurrency/fsm_workloads/globally_managed_cursors.js b/jstests/concurrency/fsm_workloads/globally_managed_cursors.js
index 67ee596e782..42e4abfa3e2 100644
--- a/jstests/concurrency/fsm_workloads/globally_managed_cursors.js
+++ b/jstests/concurrency/fsm_workloads/globally_managed_cursors.js
@@ -11,7 +11,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.states.listCollections = function listCollections(unusedDB, _) {
const db = unusedDB.getSiblingDB(this.uniqueDBName);
const cmdRes =
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_1char.js b/jstests/concurrency/fsm_workloads/indexed_insert_1char.js
index 54fe0662cb4..b264b6561a5 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_1char.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_1char.js
@@ -11,7 +11,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extend
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.indexedField = 'indexed_insert_1char';
$config.data.shardKey = {};
$config.data.shardKey[$config.data.indexedField] = 1;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_2d.js b/jstests/concurrency/fsm_workloads/indexed_insert_2d.js
index a461f9cb310..d4ec75e992d 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_2d.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_2d.js
@@ -11,7 +11,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extend
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.indexedField = 'indexed_insert_2d';
// Remove the shard key for 2d indexes, as they are not supported
delete $config.data.shardKey;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js b/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js
index 40134e97840..20ac7b4b588 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js
@@ -11,7 +11,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/indexed_insert_2d.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.indexedField = 'indexed_insert_2dsphere';
$config.data.getIndexSpec = function getIndexSpec() {
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base.js b/jstests/concurrency/fsm_workloads/indexed_insert_base.js
index b48ac0fbf1d..6b44042e59f 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_base.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_base.js
@@ -8,7 +8,6 @@
* value is the thread's id.
*/
var $config = (function() {
-
function makeSortSpecFromIndexSpec(ixSpec) {
var sort = {};
@@ -102,5 +101,4 @@ var $config = (function() {
},
setup: setup
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_compound.js b/jstests/concurrency/fsm_workloads/indexed_insert_compound.js
index c704b6dd0bc..b7a9b761b89 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_compound.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_compound.js
@@ -11,7 +11,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extend
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.states.init = function init(db, collName) {
$super.states.init.apply(this, arguments);
};
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js
index b486120185d..5306facb834 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js
@@ -11,7 +11,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extend
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.indexedField = 'indexed_insert_heterogeneous';
$config.data.shardKey = {};
$config.data.shardKey[$config.data.indexedField] = 1;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_large.js b/jstests/concurrency/fsm_workloads/indexed_insert_large.js
index cb2dbf58b21..55dd1daf4dc 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_large.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_large.js
@@ -12,7 +12,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extend
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.indexedField = 'indexed_insert_large';
// Remove the shard key, since it cannot be greater than 512 bytes
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js
index c5cc3af152d..06e92f5907f 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js
@@ -11,7 +11,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extend
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
// The indexedField must be limited such that the namespace and indexedField does not
// exceed 128 characters. The namespace defaults to // "test<i>_fsmdb<j>.fsmcoll<k>",
// where i, j & k are increasing integers for each test, workload and thread.
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js b/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js
index 34c28db22eb..4343de2d2be 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js
@@ -11,7 +11,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extend
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.indexedField = 'indexed_insert_multikey';
// Remove the shard key, since it cannot be a multikey index
delete $config.data.shardKey;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js b/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
index 38998cd9f59..4fc72a6cd0c 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
@@ -12,7 +12,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extend
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.indexedField = 'indexed_insert_ordered_bulk';
$config.data.shardKey = {};
$config.data.shardKey[$config.data.indexedField] = 1;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_text.js b/jstests/concurrency/fsm_workloads/indexed_insert_text.js
index 82e0feb09a8..7967bd30811 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_text.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_text.js
@@ -6,7 +6,6 @@
* Inserts some documents into a collection with a text index.
*/
var $config = (function() {
-
var states = {
init: function init(db, collName) {
// noop
@@ -126,5 +125,4 @@ var $config = (function() {
},
setup: setup
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js b/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js
index b527ef016f8..a665b7e61ac 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js
@@ -9,7 +9,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extend
load('jstests/concurrency/fsm_workloads/indexed_insert_text.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.states.init = function init(db, collName) {
$super.states.init.apply(this, arguments);
};
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
index 2b0afd43d96..4cb203311ca 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
@@ -9,7 +9,6 @@
* doc inserted by each thread is no longer in the collection.
*/
var $config = (function() {
-
var states = {
init: function init(db, collName) {
var res = db[collName].insert({indexed_insert_ttl: new ISODate(), first: true});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js b/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
index 3c1ea8f0ea0..233f630a8b4 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
@@ -12,7 +12,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extend
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.indexedField = 'indexed_insert_unordered_bulk';
$config.data.shardKey = {};
$config.data.shardKey[$config.data.indexedField] = 1;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js b/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
index bc1b65e9597..e4b59b5c3af 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
@@ -14,7 +14,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extend
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.indexedField = 'indexed_insert_upsert';
$config.data.shardKey = {};
$config.data.shardKey[$config.data.indexedField] = 1;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_where.js b/jstests/concurrency/fsm_workloads/indexed_insert_where.js
index e5d2a98b8c5..b44967d3550 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_where.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_where.js
@@ -10,7 +10,6 @@
*/
var $config = (function() {
-
var data = {
documentsToInsert: 100,
insertedDocuments: 0,
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_wildcard.js b/jstests/concurrency/fsm_workloads/indexed_insert_wildcard.js
index c1f08b7a876..3c22b636faf 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_wildcard.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_wildcard.js
@@ -23,8 +23,7 @@ var $config = extendWorkload($config, function($config, $super) {
threadIdInArray: [this.tid],
nestedThreadId: {threadId: this.tid},
arrayField: [this.tid, "a string", [1, 2, 3]],
- fieldWithNestedObject:
- {nestedDoc: {subNestedDoc: {leaf: "a string"}}, leaf: "a string"}
+ fieldWithNestedObject: {nestedDoc: {subNestedDoc: {leaf: "a string"}}, leaf: "a string"}
};
};
diff --git a/jstests/concurrency/fsm_workloads/invalidated_cursors.js b/jstests/concurrency/fsm_workloads/invalidated_cursors.js
index 777ee30eab1..58a3f007aaa 100644
--- a/jstests/concurrency/fsm_workloads/invalidated_cursors.js
+++ b/jstests/concurrency/fsm_workloads/invalidated_cursors.js
@@ -13,7 +13,6 @@
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
var $config = (function() {
-
let data = {
chooseRandomlyFrom: function chooseRandomlyFrom(arr) {
if (!Array.isArray(arr)) {
@@ -162,11 +161,11 @@ var $config = (function() {
// killOp.
assertAlways.contains(e.code,
[
- ErrorCodes.OperationFailed,
- ErrorCodes.QueryPlanKilled,
- ErrorCodes.CursorNotFound,
- ErrorCodes.CursorKilled,
- ErrorCodes.Interrupted,
+ ErrorCodes.OperationFailed,
+ ErrorCodes.QueryPlanKilled,
+ ErrorCodes.CursorNotFound,
+ ErrorCodes.CursorKilled,
+ ErrorCodes.Interrupted,
],
'unexpected error code: ' + e.code + ': ' + e.message);
}
diff --git a/jstests/concurrency/fsm_workloads/kill_aggregation.js b/jstests/concurrency/fsm_workloads/kill_aggregation.js
index 3b4e46b4b12..e3dcc312600 100644
--- a/jstests/concurrency/fsm_workloads/kill_aggregation.js
+++ b/jstests/concurrency/fsm_workloads/kill_aggregation.js
@@ -14,7 +14,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkl
load('jstests/concurrency/fsm_workloads/kill_rooted_or.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
// Use the workload name as the collection name, since the workload name is assumed to be
// unique. Note that we choose our own collection name instead of using the collection provided
// by the concurrency framework, because this workload drops its collection.
diff --git a/jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js b/jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js
index 6db7689ad3b..0cbc7193f17 100644
--- a/jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js
+++ b/jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js
@@ -16,7 +16,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extend
load('jstests/concurrency/fsm_workloads/invalidated_cursors.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
/**
* Runs the specified aggregation pipeline and stores the resulting cursor (if the command
* is successful) in 'this.cursor'.
diff --git a/jstests/concurrency/fsm_workloads/kill_rooted_or.js b/jstests/concurrency/fsm_workloads/kill_rooted_or.js
index 6f6e72bb67f..7cd4abdd735 100644
--- a/jstests/concurrency/fsm_workloads/kill_rooted_or.js
+++ b/jstests/concurrency/fsm_workloads/kill_rooted_or.js
@@ -10,7 +10,6 @@
* This workload was designed to reproduce SERVER-24761.
*/
var $config = (function() {
-
// Use the workload name as the collection name, since the workload name is assumed to be
// unique. Note that we choose our own collection name instead of using the collection provided
// by the concurrency framework, because this workload drops its collection.
@@ -110,5 +109,4 @@ var $config = (function() {
transitions: transitions,
setup: setup
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/list_indexes.js b/jstests/concurrency/fsm_workloads/list_indexes.js
index 17726286473..ba5549e1e68 100644
--- a/jstests/concurrency/fsm_workloads/list_indexes.js
+++ b/jstests/concurrency/fsm_workloads/list_indexes.js
@@ -7,7 +7,6 @@
* index catalog.
*/
var $config = (function() {
-
var states = (function() {
// Picks a random index to drop and recreate.
function modifyIndices(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_drop.js b/jstests/concurrency/fsm_workloads/map_reduce_drop.js
index 17900fb74a1..9066bca8375 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_drop.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_drop.js
@@ -17,7 +17,6 @@
* ]
*/
var $config = (function() {
-
var data = {
mapper: function mapper() {
emit(this.key, 1);
@@ -31,7 +30,6 @@ var $config = (function() {
};
var states = (function() {
-
function dropColl(db, collName) {
var mapReduceDb = db.getSiblingDB(this.mapReduceDBName);
@@ -78,7 +76,6 @@ var $config = (function() {
}
return {dropColl: dropColl, dropDB: dropDB, mapReduce: mapReduce};
-
})();
var transitions = {
@@ -100,5 +97,4 @@ var $config = (function() {
startState: 'mapReduce',
transitions: transitions,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_inline.js b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
index 22589f1afad..203fecbe6a8 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_inline.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
@@ -14,7 +14,6 @@
* ]
*/
var $config = (function() {
-
function mapper() {
if (this.hasOwnProperty('key') && this.hasOwnProperty('value')) {
var obj = {};
@@ -45,7 +44,6 @@ var $config = (function() {
var data = {numDocs: 2000, mapper: mapper, reducer: reducer, finalizer: finalizer};
var states = (function() {
-
function init(db, collName) {
// no-op
// other workloads that extend this workload use this method
@@ -59,7 +57,6 @@ var $config = (function() {
}
return {init: init, mapReduce: mapReduce};
-
})();
var transitions = {init: {mapReduce: 1}, mapReduce: {mapReduce: 1}};
@@ -93,5 +90,4 @@ var $config = (function() {
transitions: transitions,
setup: setup
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_merge.js b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
index 125a2b35261..2af72dca1a3 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_merge.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
@@ -21,7 +21,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
// Use the workload name as the database name,
// since the workload name is assumed to be unique.
var uniqueDBName = 'map_reduce_merge';
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js b/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
index 32b324b08bf..ffe5a4fa8fd 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
@@ -21,7 +21,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
// Use the workload name as a prefix for the database name,
// since the workload name is assumed to be unique.
var prefix = 'map_reduce_merge_nonatomic';
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
index 4cb9d8241ca..86d3733df9c 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
@@ -19,7 +19,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
var prefix = 'map_reduce_reduce';
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js b/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
index 286e2023c66..772eb6cd13f 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
@@ -22,7 +22,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
// Use the workload name as the collection name,
// since the workload name is assumed to be unique.
var uniqueCollectionName = 'map_reduce_reduce_nonatomic';
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace.js b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
index 0eda604b7ec..ec707fbbe53 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
@@ -19,7 +19,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
var prefix = 'map_reduce_replace';
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
index 623a7b2a936..51b962f7561 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
@@ -18,7 +18,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
$config.data.prefix = 'map_reduce_replace_nonexistent';
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js b/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
index bf9768d9639..59c6870b7b5 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
@@ -19,7 +19,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendW
load('jstests/concurrency/fsm_workloads/map_reduce_replace.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.states.remove = function remove(db, collName) {
for (var i = 0; i < 20; ++i) {
var res = db[collName].remove({value: {$gte: Random.randInt(this.numDocs / 10)}},
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js
index 0a6d74b51d9..81cc596e228 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js
@@ -7,7 +7,6 @@
*/
load('jstests/concurrency/fsm_workload_helpers/cleanup_txns.js');
var $config = (function() {
-
function quietly(func) {
const printOriginal = print;
try {
@@ -216,5 +215,4 @@ var $config = (function() {
setup: setup,
teardown: teardown
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
index 11aaac9cdac..2ecd324f04d 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
@@ -12,7 +12,6 @@ load('jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands
// $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.setup = function(db, collName, cluster) {
$super.setup.apply(this, arguments);
this.lsid = tojson({id: UUID()});
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js
index 56a8b5d3a13..79e9f3efd11 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js
@@ -53,7 +53,6 @@ load('jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js');
load("jstests/aggregation/extras/utils.js");
var $config = (function() {
-
function checkTransactionCommitOrder(documents) {
const graph = new Graph();
@@ -95,9 +94,7 @@ var $config = (function() {
}
}
- for (let {
- op, actual
- } of updateCounts.values()) {
+ for (let {op, actual} of updateCounts.values()) {
assert.eq(op.numUpdated, actual, () => {
return 'transaction ' + tojson(op) + ' should have updated ' + op.numUpdated +
' documents, but ' + actual + ' were updated: ' + tojson(updateCounts.values());
@@ -174,8 +171,8 @@ var $config = (function() {
assertWhenOwnColl.eq(allDocuments.length, numDocs * this.collections.length, () => {
if (this.session) {
return "txnNumber: " + tojson(this.session.getTxnNumber_forTesting()) +
- ", session id: " + tojson(this.session.getSessionId()) + ", all documents: " +
- tojson(allDocuments);
+ ", session id: " + tojson(this.session.getSessionId()) +
+ ", all documents: " + tojson(allDocuments);
}
return "all documents: " + tojson(allDocuments);
});
@@ -192,7 +189,6 @@ var $config = (function() {
}
const states = (function() {
-
return {
init: function init(db, collName) {
this.iteration = 0;
@@ -367,5 +363,4 @@ var $config = (function() {
setup: setup,
teardown: teardown,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
index 96f25774664..b4e22f46dc2 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
@@ -13,7 +13,6 @@ load('jstests/core/txns/libs/prepare_helpers.js');
// for $config
var $config = extendWorkload($config, function($config, $super) {
-
/**
* Returns all elements in the given array that evaluate to false for the given predicate
* function 'predFn'.
@@ -163,7 +162,6 @@ var $config = extendWorkload($config, function($config, $super) {
};
$config.states.checkInvariants = function checkInvariants(db, collName) {
-
// Check server-wide invariants using 100 samples. This sample size is deemed big enough to
// account for transient inconsistencies, which we assume are rare.
let nSamples = 100;
@@ -179,7 +177,6 @@ var $config = extendWorkload($config, function($config, $super) {
let timeOpen = Number(txnStats["timeOpenMicros"]);
assertAlways.eq(timeActive + timeInactive, timeOpen, () => tojson(txnStats));
});
-
};
$config.transitions = {
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_multi_db.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_multi_db.js
index 417cf05aa41..7946b83b2a2 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_multi_db.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_multi_db.js
@@ -10,7 +10,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js');
var $config = extendWorkload($config, ($config, $super) => {
-
// Number of unique collections and number of unique databases. The square root is used
// here to ensure the total number of namespaces (coll * db) is roughly equal to the
// number of threads.
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_repeated_reads.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_repeated_reads.js
index 877016bea2b..72661493d5d 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_repeated_reads.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_repeated_reads.js
@@ -11,7 +11,6 @@ load('jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_is
// for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.numReads = 5;
$config.states.repeatedRead = function repeatedRead(db, collName) {
@@ -26,17 +25,17 @@ var $config = extendWorkload($config, function($config, $super) {
const collectionDocs = collection.find().batchSize(batchSize).toArray();
assertWhenOwnColl.eq(this.numDocs, collectionDocs.length, () => {
return "txnNumber: " + tojson(this.session.getTxnNumber_forTesting()) +
- ", session id: " + tojson(this.session.getSessionId()) + ", read number: " +
- i + ", collection docs: " + tojson(collectionDocs);
+ ", session id: " + tojson(this.session.getSessionId()) +
+ ", read number: " + i + ", collection docs: " + tojson(collectionDocs);
});
if (prevDocuments) {
- assertAlways.sameMembers(prevDocuments,
- collectionDocs,
- () => "Document mismatch - previous documents: " +
- tojsononeline(prevDocuments) +
- ", current documents: " +
- tojsononeline(collectionDocs),
- bsonBinaryEqual); // Exact document matches.
+ assertAlways.sameMembers(
+ prevDocuments,
+ collectionDocs,
+ () => "Document mismatch - previous documents: " +
+ tojsononeline(prevDocuments) +
+ ", current documents: " + tojsononeline(collectionDocs),
+ bsonBinaryEqual); // Exact document matches.
}
prevDocuments = collectionDocs;
}
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js
index 27c3cef5181..adf4feba8b0 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js
@@ -12,13 +12,11 @@
load('jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js');
var $config = (function() {
-
function computeTotalOfAllBalances(documents) {
return documents.reduce((total, account) => total + account.balance, 0);
}
var states = (function() {
-
function getAllDocuments(session, collection, numDocs, txnHelperOptions) {
let documents;
withTxnAndAutoRetry(session, () => {
@@ -119,5 +117,4 @@ var $config = (function() {
setup: setup,
teardown: teardown
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js
index 2e52765014c..0c097015c46 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js
@@ -13,7 +13,6 @@ load('jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js');
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.majorityWriteCollName = 'majority_writes';
$config.data.counter = 0;
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_repeated_reads.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_repeated_reads.js
index a6a821e207a..93d89bbce0f 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_repeated_reads.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_repeated_reads.js
@@ -10,7 +10,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.numReads = 5;
$config.states.repeatedRead = function repeatedRead(db, collName) {
@@ -22,13 +21,13 @@ var $config = extendWorkload($config, function($config, $super) {
assertWhenOwnColl.eq(
this.numAccounts, collectionDocs.length, () => tojson(collectionDocs));
if (prevDocuments) {
- assertAlways.sameMembers(prevDocuments,
- collectionDocs,
- () => "Document mismatch - previous documents: " +
- tojsononeline(prevDocuments) +
- ", current documents: " +
- tojsononeline(collectionDocs),
- bsonBinaryEqual); // Exact document matches.
+ assertAlways.sameMembers(
+ prevDocuments,
+ collectionDocs,
+ () => "Document mismatch - previous documents: " +
+ tojsononeline(prevDocuments) +
+ ", current documents: " + tojsononeline(collectionDocs),
+ bsonBinaryEqual); // Exact document matches.
}
prevDocuments = collectionDocs;
}
diff --git a/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
index a744c6b888a..1ab8a48212c 100644
--- a/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
+++ b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
@@ -9,7 +9,6 @@
* the updating of said object's PlanCache (SERVER-17117).
*/
var $config = (function() {
-
function populateData(db, collName) {
var coll = db[collName];
@@ -28,7 +27,6 @@ var $config = (function() {
}
var states = (function() {
-
function count(db, collName) {
var coll = db.getSiblingDB(this.planCacheDBName)[collName];
@@ -52,7 +50,6 @@ var $config = (function() {
}
return {count: count, dropDB: dropDB};
-
})();
var transitions = {count: {count: 0.95, dropDB: 0.05}, dropDB: {count: 0.95, dropDB: 0.05}};
@@ -72,5 +69,4 @@ var $config = (function() {
transitions: transitions,
setup: setup,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_base.js b/jstests/concurrency/fsm_workloads/random_moveChunk_base.js
index bbafcd93a94..866d21a6b72 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_base.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_base.js
@@ -11,7 +11,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js');
load('jstests/concurrency/fsm_workloads/sharded_base_partitioned.js');
var $config = extendWorkload($config, function($config, $super) {
-
$config.threadCount = 1;
$config.iterations = 1;
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js
index e4a80f533a6..6f62354216f 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js
@@ -12,7 +12,6 @@ load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js');
load('jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js');
var $config = extendWorkload($config, function($config, $super) {
-
$config.threadCount = 5;
$config.iterations = 50;
@@ -122,8 +121,8 @@ var $config = extendWorkload($config, function($config, $super) {
$config.states.verifyDocuments = function verifyDocuments(db, collName, connCache) {
const docs = db[collName].find({tid: this.tid}).toArray();
assertWhenOwnColl.eq(this.expectedDocuments.length, docs.length, () => {
- return 'unexpected number of documents, docs: ' + tojson(docs) + ', expected docs: ' +
- tojson(this.expectedDocuments);
+ return 'unexpected number of documents, docs: ' + tojson(docs) +
+ ', expected docs: ' + tojson(this.expectedDocuments);
});
// Verify only the documents we haven't tried to delete were found.
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js
index c3e6fedba5c..706657b9631 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js
@@ -12,7 +12,6 @@ load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js');
load('jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js');
var $config = extendWorkload($config, function($config, $super) {
-
$config.threadCount = 5;
$config.iterations = 50;
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js b/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js
index 0fcafa31ca6..0b22a7909a9 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js
@@ -12,7 +12,6 @@ load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js');
load('jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js');
var $config = extendWorkload($config, function($config, $super) {
-
$config.threadCount = 5;
$config.iterations = 50;
@@ -149,10 +148,10 @@ var $config = extendWorkload($config, function($config, $super) {
: " as a retryable write. ";
logString += "The document will ";
logString += moveAcrossChunks ? "move across chunks. " : "stay within the same chunk. \n";
- logString += "Original document values -- id: " + idToUpdate + ", shardKey: " +
- currentShardKey + ", counter: " + counterForId + "\n";
- logString += "Intended new document values -- shardKey: " + newShardKey + ", counter: " +
- (counterForId + 1);
+ logString += "Original document values -- id: " + idToUpdate +
+ ", shardKey: " + currentShardKey + ", counter: " + counterForId + "\n";
+ logString += "Intended new document values -- shardKey: " + newShardKey +
+ ", counter: " + (counterForId + 1);
jsTestLog(logString);
};
@@ -163,15 +162,15 @@ var $config = extendWorkload($config, function($config, $super) {
logString += "Find by old shard key (should be empty): " +
tojson(collection.find({skey: currentShardKey}).toArray()) + "\n";
logString += "Find by _id: " + tojson(collection.find({_id: idToUpdate}).toArray()) + "\n";
- logString += "Find by new shard key: " +
- tojson(collection.find({skey: newShardKey}).toArray()) + "\n";
+ logString +=
+ "Find by new shard key: " + tojson(collection.find({skey: newShardKey}).toArray()) +
+ "\n";
jsTestLog(logString);
};
$config.data.findAndModifyShardKey = function findAndModifyShardKey(
db, collName, {wrapInTransaction, moveAcrossChunks} = {}) {
-
const collection = this.session.getDatabase(db.getName()).getCollection(collName);
const shardKeyField = $config.data.shardKeyField;
diff --git a/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js b/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
index cc2fa5e6562..0401126e907 100644
--- a/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
+++ b/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
@@ -10,7 +10,6 @@
* accessed after a WriteConflictException occurred in Collection::deleteDocument().
*/
var $config = (function() {
-
var states = {
insert: function insert(db, collName) {
var bulk = db[collName].initializeUnorderedBulkOp();
@@ -35,5 +34,4 @@ var $config = (function() {
states: states,
transitions: transitions
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/remove_multiple_documents.js b/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
index 5370f999975..c349dc20874 100644
--- a/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
+++ b/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
@@ -13,7 +13,6 @@
* @tags: [assumes_balancer_off]
*/
var $config = (function() {
-
var states = {
init: function init(db, collName) {
this.numDocs = 200;
@@ -42,5 +41,4 @@ var $config = (function() {
var transitions = {init: {count: 1}, count: {remove: 1}, remove: {remove: 0.825, count: 0.125}};
return {threadCount: 10, iterations: 20, states: states, transitions: transitions};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document.js b/jstests/concurrency/fsm_workloads/remove_single_document.js
index 1bfdb2b6897..c2d3831f223 100644
--- a/jstests/concurrency/fsm_workloads/remove_single_document.js
+++ b/jstests/concurrency/fsm_workloads/remove_single_document.js
@@ -6,7 +6,6 @@
* Repeatedly remove a document from the collection.
*/
var $config = (function() {
-
var states = {
remove: function remove(db, collName) {
// try removing a random document
@@ -53,5 +52,4 @@ var $config = (function() {
},
startState: 'remove'
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
index 0cd43e0e737..860aa5ea67f 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
@@ -11,7 +11,6 @@
*/
var $config = (function() {
-
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
@@ -19,7 +18,6 @@ var $config = (function() {
};
var states = (function() {
-
function uniqueCollectionName(prefix, tid, num) {
return prefix + tid + '_' + num;
}
@@ -43,7 +41,6 @@ var $config = (function() {
}
return {init: init, rename: rename};
-
})();
var transitions = {init: {rename: 1}, rename: {rename: 1}};
@@ -55,5 +52,4 @@ var $config = (function() {
states: states,
transitions: transitions,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
index 257b603d1fb..76bdd80f5a3 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
@@ -11,7 +11,6 @@
*/
var $config = (function() {
-
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
@@ -19,7 +18,6 @@ var $config = (function() {
};
var states = (function() {
-
function uniqueDBName(prefix, tid, num) {
return prefix + tid + '_' + num;
}
@@ -56,7 +54,6 @@ var $config = (function() {
}
return {init: init, rename: rename};
-
})();
var transitions = {init: {rename: 1}, rename: {rename: 1}};
@@ -68,5 +65,4 @@ var $config = (function() {
states: states,
transitions: transitions,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
index 10c34653855..72d966bb7a3 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
@@ -11,7 +11,6 @@
*/
var $config = (function() {
-
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
@@ -19,7 +18,6 @@ var $config = (function() {
};
var states = (function() {
-
var options = {capped: true, size: 4096};
function uniqueDBName(prefix, tid, num) {
@@ -79,7 +77,6 @@ var $config = (function() {
}
return {init: init, rename: rename};
-
})();
var transitions = {init: {rename: 1}, rename: {rename: 1}};
@@ -91,5 +88,4 @@ var $config = (function() {
states: states,
transitions: transitions,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
index 1291f130aae..9d2f0b2ac45 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
@@ -11,7 +11,6 @@
*/
var $config = (function() {
-
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
@@ -19,7 +18,6 @@ var $config = (function() {
};
var states = (function() {
-
var options = {capped: true, size: 4096};
function uniqueCollectionName(prefix, tid, num) {
@@ -71,7 +69,6 @@ var $config = (function() {
}
return {init: init, rename: rename};
-
})();
var transitions = {init: {rename: 1}, rename: {rename: 1}};
@@ -83,5 +80,4 @@ var $config = (function() {
states: states,
transitions: transitions,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
index e73f7a20959..b933d24e4d0 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
@@ -9,7 +9,6 @@
*/
var $config = (function() {
-
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
@@ -17,7 +16,6 @@ var $config = (function() {
};
var states = (function() {
-
function uniqueDBName(prefix, tid, num) {
return prefix + tid + '_' + num;
}
@@ -49,7 +47,6 @@ var $config = (function() {
}
return {init: init, rename: rename};
-
})();
var transitions = {init: {rename: 1}, rename: {rename: 1}};
@@ -61,5 +58,4 @@ var $config = (function() {
states: states,
transitions: transitions,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
index fdc2bcf8766..e9063730aea 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
@@ -9,7 +9,6 @@
*/
var $config = (function() {
-
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
@@ -17,7 +16,6 @@ var $config = (function() {
};
var states = (function() {
-
function uniqueDBName(prefix, tid, num) {
return prefix + tid + '_' + num;
}
@@ -72,7 +70,6 @@ var $config = (function() {
}
return {init: init, rename: rename};
-
})();
var transitions = {init: {rename: 1}, rename: {rename: 1}};
@@ -84,5 +81,4 @@ var $config = (function() {
states: states,
transitions: transitions,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
index 550b3e0ae22..91a810015ae 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
@@ -9,7 +9,6 @@
*/
var $config = (function() {
-
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
@@ -17,7 +16,6 @@ var $config = (function() {
};
var states = (function() {
-
function uniqueCollectionName(prefix, tid, num) {
return prefix + tid + '_' + num;
}
@@ -64,7 +62,6 @@ var $config = (function() {
}
return {init: init, rename: rename};
-
})();
var transitions = {init: {rename: 1}, rename: {rename: 1}};
@@ -76,5 +73,4 @@ var $config = (function() {
states: states,
transitions: transitions,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/secondary_reads.js b/jstests/concurrency/fsm_workloads/secondary_reads.js
index 4970013b56d..f04ec6e75dd 100644
--- a/jstests/concurrency/fsm_workloads/secondary_reads.js
+++ b/jstests/concurrency/fsm_workloads/secondary_reads.js
@@ -20,7 +20,6 @@
*/
var $config = (function() {
-
// Use the workload name as the collection name.
var uniqueCollectionName = 'secondary_reads';
@@ -86,7 +85,6 @@ var $config = (function() {
}
var states = (function() {
-
// One thread is dedicated to writing and other threads perform reads on
// secondaries with a randomly chosen readConcern level.
function readFromSecondaries(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js b/jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js
index 18218e00978..f98039909f9 100644
--- a/jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js
+++ b/jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js
@@ -25,7 +25,6 @@ load('jstests/concurrency/fsm_workloads/secondary_reads.js'); // for $config
* @tags: [creates_background_indexes, requires_replication, uses_write_concern]
*/
var $config = extendWorkload($config, function($config, $super) {
-
$config.states.buildIndex = function buildIndex(db, collName) {
if (this.isWriterThread(this.tid)) {
this.insertDocuments(db, this.collName);
diff --git a/jstests/concurrency/fsm_workloads/server_status.js b/jstests/concurrency/fsm_workloads/server_status.js
index fa3c8cbbeef..0c95bfe0ff3 100644
--- a/jstests/concurrency/fsm_workloads/server_status.js
+++ b/jstests/concurrency/fsm_workloads/server_status.js
@@ -6,7 +6,6 @@
* Simply checks that the serverStatus command works
*/
var $config = (function() {
-
var states = {
status: function status(db, collName) {
var opts =
diff --git a/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
index 018951ea053..7d16f61dd8d 100644
--- a/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
+++ b/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
@@ -24,7 +24,6 @@
load('jstests/concurrency/fsm_workload_helpers/chunks.js'); // for chunk helpers
var $config = (function() {
-
var data = {
partitionSize: 1,
// We use a non-hashed shard key of { _id: 1 } so that documents reside on their expected
@@ -82,8 +81,8 @@ var $config = (function() {
return coll
.aggregate([
{
- $match:
- {ns: this.partition.ns, [maxField]: {$lte: this.partition.chunkUpper}}
+ $match:
+ {ns: this.partition.ns, [maxField]: {$lte: this.partition.chunkUpper}}
},
{$sample: {size: 1}}
])
@@ -92,8 +91,8 @@ var $config = (function() {
return coll
.aggregate([
{
- $match:
- {ns: this.partition.ns, [minField]: {$gte: this.partition.chunkLower}}
+ $match:
+ {ns: this.partition.ns, [minField]: {$gte: this.partition.chunkLower}}
},
{$sample: {size: 1}}
])
@@ -102,11 +101,11 @@ var $config = (function() {
return coll
.aggregate([
{
- $match: {
- ns: this.partition.ns,
- [minField]: {$gte: this.partition.chunkLower},
- [maxField]: {$lte: this.partition.chunkUpper}
- }
+ $match: {
+ ns: this.partition.ns,
+ [minField]: {$gte: this.partition.chunkLower},
+ [maxField]: {$lte: this.partition.chunkUpper}
+ }
},
{$sample: {size: 1}}
])
@@ -115,8 +114,8 @@ var $config = (function() {
};
// This is used by the extended workloads to perform additional setup for more splitPoints.
- data.setupAdditionalSplitPoints = function setupAdditionalSplitPoints(db, collName, partition) {
- };
+ data.setupAdditionalSplitPoints = function setupAdditionalSplitPoints(
+ db, collName, partition) {};
var states = (function() {
// Inform this thread about its partition,
@@ -185,7 +184,6 @@ var $config = (function() {
this.setupAdditionalSplitPoints(db, collName, partition);
}
-
};
return {
diff --git a/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js
index 9ac083818b7..6e05411f345 100644
--- a/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js
+++ b/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js
@@ -13,7 +13,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for e
load('jstests/concurrency/fsm_workloads/sharded_base_partitioned.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.iterations = 8;
$config.threadCount = 5;
@@ -250,7 +249,6 @@ var $config = extendWorkload($config, function($config, $super) {
assertWhenOwnColl.eq(numChunksAfter, numChunksBefore, msg);
}
}
-
};
$config.transitions = {init: {mergeChunks: 1}, mergeChunks: {mergeChunks: 1}};
diff --git a/jstests/concurrency/fsm_workloads/sharded_moveChunk_drop_shard_key_index.js b/jstests/concurrency/fsm_workloads/sharded_moveChunk_drop_shard_key_index.js
index fd03bf37e48..70515b7eaa9 100644
--- a/jstests/concurrency/fsm_workloads/sharded_moveChunk_drop_shard_key_index.js
+++ b/jstests/concurrency/fsm_workloads/sharded_moveChunk_drop_shard_key_index.js
@@ -11,7 +11,6 @@
*/
var $config = (function() {
-
var data = {numSplitPoints: 100, shardKey: {key: 1}};
var states = {
@@ -79,5 +78,4 @@ var $config = (function() {
transitions: transitions,
setup: setup,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js
index cf2eb18ead9..f3c26e8d057 100644
--- a/jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js
+++ b/jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js
@@ -13,7 +13,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for e
load('jstests/concurrency/fsm_workloads/sharded_base_partitioned.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.iterations = 5;
$config.threadCount = 5;
@@ -112,8 +111,8 @@ var $config = extendWorkload($config, function($config, $super) {
// shard with the toShard. If the operation failed, verify that the config kept
// the chunk's shard as the fromShard.
var chunkAfter = conn.getDB('config').chunks.findOne({_id: chunk._id});
- var msg = msgBase + '\nchunkBefore: ' + tojson(chunk) + '\nchunkAfter: ' +
- tojson(chunkAfter);
+ var msg = msgBase + '\nchunkBefore: ' + tojson(chunk) +
+ '\nchunkAfter: ' + tojson(chunkAfter);
if (moveChunkRes.ok) {
msg = "moveChunk succeeded but chunk's shard was not new shard.\n" + msg;
assertWhenOwnColl.eq(chunkAfter.shard, toShard, msg);
diff --git a/jstests/concurrency/fsm_workloads/sharded_splitChunk_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_splitChunk_partitioned.js
index 8a32ea24f97..97d6f6df6e8 100644
--- a/jstests/concurrency/fsm_workloads/sharded_splitChunk_partitioned.js
+++ b/jstests/concurrency/fsm_workloads/sharded_splitChunk_partitioned.js
@@ -13,7 +13,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for e
load('jstests/concurrency/fsm_workloads/sharded_base_partitioned.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.iterations = 5;
$config.threadCount = 5;
@@ -23,7 +22,6 @@ var $config = extendWorkload($config, function($config, $super) {
// in the cluster affected by the splitChunk operation sees the appropriate
// after-state regardless of whether the operation succeeded or failed.
$config.states.splitChunk = function splitChunk(db, collName, connCache) {
-
var dbName = db.getName();
var ns = db[collName].getFullName();
var config = ChunkHelper.getPrimary(connCache.config);
diff --git a/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js b/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js
index e6042ab0e1d..008b0fe05f0 100644
--- a/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js
+++ b/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js
@@ -160,5 +160,4 @@ var $config = (function() {
setup: setup,
data: data,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/snapshot_read_kill_op_only.js b/jstests/concurrency/fsm_workloads/snapshot_read_kill_op_only.js
index 7211970d2a5..f5c3a2670f8 100644
--- a/jstests/concurrency/fsm_workloads/snapshot_read_kill_op_only.js
+++ b/jstests/concurrency/fsm_workloads/snapshot_read_kill_op_only.js
@@ -13,7 +13,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); //
load('jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.transitions = {
init: {snapshotFind: 1.0},
snapshotFind: {incrementTxnNumber: 0.33, killOp: 0.34, snapshotGetMore: 0.33},
diff --git a/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js b/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js
index 47f1bf836dd..70db1a7c44b 100644
--- a/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js
+++ b/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js
@@ -38,18 +38,18 @@ var $config = (function() {
doSnapshotGetMore(collName,
this,
[
- ErrorCodes.CursorKilled,
- ErrorCodes.CursorNotFound,
- ErrorCodes.Interrupted,
- ErrorCodes.LockTimeout,
- ErrorCodes.NoSuchTransaction,
+ ErrorCodes.CursorKilled,
+ ErrorCodes.CursorNotFound,
+ ErrorCodes.Interrupted,
+ ErrorCodes.LockTimeout,
+ ErrorCodes.NoSuchTransaction,
],
[
- ErrorCodes.NoSuchTransaction,
- ErrorCodes.Interrupted,
- // Anonymous code for when user tries to send commit as the first
- // operation in a transaction without sending a recovery token
- 50940
+ ErrorCodes.NoSuchTransaction,
+ ErrorCodes.Interrupted,
+ // Anonymous code for when user tries to send commit as the first
+ // operation in a transaction without sending a recovery token
+ 50940
]);
},
@@ -151,5 +151,4 @@ var $config = (function() {
teardown: teardown,
data: data,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js b/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
index 16b20e91a23..ed27a6bbc63 100644
--- a/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
+++ b/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
@@ -11,7 +11,6 @@
* Collection::updateDocument().
*/
var $config = (function() {
-
var states = {
insert: function insert(db, collName) {
var bulk = db[collName].initializeUnorderedBulkOp();
@@ -40,5 +39,4 @@ var $config = (function() {
states: states,
transitions: transitions
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/update_array.js b/jstests/concurrency/fsm_workloads/update_array.js
index 2020ee3c60a..d4923b485bb 100644
--- a/jstests/concurrency/fsm_workloads/update_array.js
+++ b/jstests/concurrency/fsm_workloads/update_array.js
@@ -14,9 +14,7 @@
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
var $config = (function() {
-
var states = (function() {
-
// db: explicitly passed to avoid accidentally using the global `db`
// res: WriteResult
// nModifiedPossibilities: array of allowed values for res.nModified
@@ -107,7 +105,6 @@ var $config = (function() {
doPull(db, collName, docIndex, value);
}
};
-
})();
var transitions = {push: {push: 0.8, pull: 0.2}, pull: {push: 0.8, pull: 0.2}};
@@ -131,5 +128,4 @@ var $config = (function() {
data: {numDocs: 10},
setup: setup
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/update_check_index.js b/jstests/concurrency/fsm_workloads/update_check_index.js
index bd82e39c471..8a94c568d6f 100644
--- a/jstests/concurrency/fsm_workloads/update_check_index.js
+++ b/jstests/concurrency/fsm_workloads/update_check_index.js
@@ -7,7 +7,6 @@
* for SERVER-17132.
*/
var $config = (function() {
-
var states = (function() {
function multiUpdate(db, collName) {
// Set 'c' to some random value.
diff --git a/jstests/concurrency/fsm_workloads/update_inc.js b/jstests/concurrency/fsm_workloads/update_inc.js
index 15675bc3f02..5eedadeedbf 100644
--- a/jstests/concurrency/fsm_workloads/update_inc.js
+++ b/jstests/concurrency/fsm_workloads/update_inc.js
@@ -13,7 +13,6 @@
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
var $config = (function() {
-
var data = {
// uses the workload name as _id on the document.
// assumes this name will be unique.
@@ -96,5 +95,4 @@ var $config = (function() {
transitions: transitions,
setup: setup
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/update_multifield.js b/jstests/concurrency/fsm_workloads/update_multifield.js
index 1e3087be413..da980099b70 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield.js
@@ -11,7 +11,6 @@
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
var $config = (function() {
-
function makeQuery(options) {
var query = {};
if (!options.multi) {
@@ -103,5 +102,4 @@ var $config = (function() {
},
setup: setup
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
index 7654b8bac19..346a883c484 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
@@ -13,7 +13,6 @@ load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
var $config = extendWorkload($config, function($config, $super) {
-
$config.data.multi = true;
$config.data.assertResult = function(res, db, collName, query) {
diff --git a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
index f339a8e004e..06c2c2907ba 100644
--- a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
+++ b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
@@ -15,7 +15,6 @@
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
var $config = (function() {
-
var states = {
init: function init(db, collName) {
this.fieldName = 't' + this.tid;
@@ -82,5 +81,4 @@ var $config = (function() {
setup: setup,
data: {docCount: 15}
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/update_rename.js b/jstests/concurrency/fsm_workloads/update_rename.js
index ca74ac5287d..675c469848e 100644
--- a/jstests/concurrency/fsm_workloads/update_rename.js
+++ b/jstests/concurrency/fsm_workloads/update_rename.js
@@ -6,7 +6,6 @@
* Each thread does a $rename to cause documents to jump between indexes.
*/
var $config = (function() {
-
var fieldNames = ['update_rename_x', 'update_rename_y', 'update_rename_z'];
function choose(array) {
@@ -69,5 +68,4 @@ var $config = (function() {
transitions: transitions,
setup: setup
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/update_replace.js b/jstests/concurrency/fsm_workloads/update_replace.js
index 2d04f38294c..12b6c8026f4 100644
--- a/jstests/concurrency/fsm_workloads/update_replace.js
+++ b/jstests/concurrency/fsm_workloads/update_replace.js
@@ -11,7 +11,6 @@
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
var $config = (function() {
-
// explicitly pass db to avoid accidentally using the global `db`
function assertResult(db, res) {
assertAlways.eq(0, res.nUpserted, tojson(res));
@@ -88,5 +87,4 @@ var $config = (function() {
transitions: transitions,
setup: setup
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/update_simple.js b/jstests/concurrency/fsm_workloads/update_simple.js
index cb4adf926c9..65178eafafb 100644
--- a/jstests/concurrency/fsm_workloads/update_simple.js
+++ b/jstests/concurrency/fsm_workloads/update_simple.js
@@ -13,7 +13,6 @@
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
var $config = (function() {
-
var states = {
set: function set(db, collName) {
this.setOrUnset(db, collName, true, this.numDocs);
@@ -93,5 +92,4 @@ var $config = (function() {
},
setup: setup
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/update_upsert_multi.js b/jstests/concurrency/fsm_workloads/update_upsert_multi.js
index 3b32f166d3f..bffcbc232fa 100644
--- a/jstests/concurrency/fsm_workloads/update_upsert_multi.js
+++ b/jstests/concurrency/fsm_workloads/update_upsert_multi.js
@@ -12,7 +12,6 @@
* @tags: [requires_non_retryable_writes]
*/
var $config = (function() {
-
var states = {
insert: function insert(db, collName) {
var query, update, options;
@@ -80,5 +79,4 @@ var $config = (function() {
data: {counter: 0, shardKey: {tid: 1}},
setup: setup
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/update_where.js b/jstests/concurrency/fsm_workloads/update_where.js
index 614cbc86093..b9723c86c07 100644
--- a/jstests/concurrency/fsm_workloads/update_where.js
+++ b/jstests/concurrency/fsm_workloads/update_where.js
@@ -20,8 +20,8 @@ var $config = extendWorkload($config, function($config, $super) {
var res = db[collName].update(
// Server-side JS does not support Random.randInt, so use Math.floor/random instead
{
- $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
- '&& this.tid === ' + this.tid
+ $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
+ '&& this.tid === ' + this.tid
},
{$set: {x: Random.randInt(this.randomBound)}},
{multi: true});
diff --git a/jstests/concurrency/fsm_workloads/upsert_unique_index.js b/jstests/concurrency/fsm_workloads/upsert_unique_index.js
index 62d91794ccb..15a7d1b14fc 100644
--- a/jstests/concurrency/fsm_workloads/upsert_unique_index.js
+++ b/jstests/concurrency/fsm_workloads/upsert_unique_index.js
@@ -6,7 +6,6 @@
* upsert generates an insert, which then fails due to another operation inserting first.
*/
var $config = (function() {
-
const data = {
numDocs: 4,
getDocValue: function() {
diff --git a/jstests/concurrency/fsm_workloads/upsert_where.js b/jstests/concurrency/fsm_workloads/upsert_where.js
index 7fa00727725..522fef6b32f 100644
--- a/jstests/concurrency/fsm_workloads/upsert_where.js
+++ b/jstests/concurrency/fsm_workloads/upsert_where.js
@@ -4,7 +4,8 @@
* upsert_where.js
*
* Bulk inserts documents in batches of 100, randomly selects a document that doesn't exist and
- * updates it, and queries by the thread that created the documents to verify counts. */
+ * updates it, and queries by the thread that created the documents to verify counts.
+ */
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
diff --git a/jstests/concurrency/fsm_workloads/view_catalog.js b/jstests/concurrency/fsm_workloads/view_catalog.js
index ebcd37a9c05..9557a9f9387 100644
--- a/jstests/concurrency/fsm_workloads/view_catalog.js
+++ b/jstests/concurrency/fsm_workloads/view_catalog.js
@@ -8,7 +8,6 @@
*/
var $config = (function() {
-
var data = {
// Use the workload name as a prefix for the view name, since the workload name is assumed
// to be unique.
@@ -17,7 +16,6 @@ var $config = (function() {
};
var states = (function() {
-
function init(db, collName) {
this.threadCollName = db[collName].getName();
this.threadViewName = this.prefix + '_' + this.tid;
@@ -65,7 +63,6 @@ var $config = (function() {
}
return {init: init, create: create, modify: modify, drop: drop};
-
})();
var transitions = {
@@ -100,5 +97,4 @@ var $config = (function() {
states: states,
transitions: transitions,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js b/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
index 4745ef3d427..98ccaa7e3d0 100644
--- a/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
+++ b/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
@@ -9,7 +9,6 @@
*/
var $config = (function() {
-
// Use the workload name as a prefix for the view names, since the workload name is assumed
// to be unique.
const prefix = 'view_catalog_cycle_lookup_';
@@ -19,13 +18,14 @@ var $config = (function() {
getRandomView: function(viewList) {
return viewList[Random.randInt(viewList.length)];
},
- getRandomViewPipeline: function() {
- const lookupViewNs1 = this.getRandomView(this.viewList);
- const lookupViewNs2 = this.getRandomView(this.viewList);
- const index = Random.randInt(4);
- switch (index) {
- case 0:
- return [{
+ getRandomViewPipeline:
+ function() {
+ const lookupViewNs1 = this.getRandomView(this.viewList);
+ const lookupViewNs2 = this.getRandomView(this.viewList);
+ const index = Random.randInt(4);
+ switch (index) {
+ case 0:
+ return [{
$lookup: {
from: lookupViewNs1,
localField: 'a',
@@ -33,8 +33,8 @@ var $config = (function() {
as: 'result1'
}
}];
- case 1:
- return [{
+ case 1:
+ return [{
$lookup: {
from: lookupViewNs1,
let : {a1: '$a'},
@@ -52,8 +52,8 @@ var $config = (function() {
as: 'result2'
}
}];
- case 2:
- return [{
+ case 2:
+ return [{
$graphLookup: {
from: lookupViewNs1,
startWith: '$a',
@@ -62,12 +62,12 @@ var $config = (function() {
as: 'result3'
}
}];
- case 3:
- return [];
- default:
- assertAlways(false, "Invalid index: " + index);
- }
- },
+ case 3:
+ return [];
+ default:
+ assertAlways(false, "Invalid index: " + index);
+ }
+ },
};
var states = (function() {
@@ -114,7 +114,6 @@ var $config = (function() {
remapViewToCollection: remapViewToCollection,
readFromView: readFromView,
};
-
})();
var transitions = {
diff --git a/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js b/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js
index d7eb28009cf..51eee139928 100644
--- a/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js
+++ b/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js
@@ -8,7 +8,6 @@
*/
var $config = (function() {
-
// Use the workload name as a prefix for the view names, since the workload name is assumed
// to be unique.
const prefix = 'view_catalog_cycle_with_drop_';
@@ -76,7 +75,6 @@ var $config = (function() {
recreateViewOnCollection: recreateViewOnCollection,
readFromView: readFromView
};
-
})();
var transitions = {
@@ -114,5 +112,4 @@ var $config = (function() {
transitions: transitions,
setup: setup,
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/yield.js b/jstests/concurrency/fsm_workloads/yield.js
index 5aaf9f086e7..748c912352c 100644
--- a/jstests/concurrency/fsm_workloads/yield.js
+++ b/jstests/concurrency/fsm_workloads/yield.js
@@ -7,7 +7,6 @@
* removing documents that they operate on.
*/
var $config = (function() {
-
// The explain used to build the assertion message in advanceCursor() is the only command not
// allowed in a transaction used in the query state function. With shard stepdowns, getMores
// aren't allowed outside a transaction, so if the explain runs when the suite is configured to
@@ -36,13 +35,14 @@ var $config = (function() {
while (cursor.hasNext()) {
prevDoc = doc;
doc = cursor.next();
- assertAlways(verifier(doc, prevDoc),
- 'Verifier failed!\nQuery: ' + tojson(cursor._query) + '\n' +
- (skipExplainInErrorMessage ? '' : 'Query plan: ' +
- tojson(cursor.explain())) +
- '\n' +
- 'Previous doc: ' + tojson(prevDoc) + '\n' +
- 'This doc: ' + tojson(doc));
+ assertAlways(
+ verifier(doc, prevDoc),
+ 'Verifier failed!\nQuery: ' + tojson(cursor._query) + '\n' +
+ (skipExplainInErrorMessage ? ''
+ : 'Query plan: ' + tojson(cursor.explain())) +
+ '\n' +
+ 'Previous doc: ' + tojson(prevDoc) + '\n' +
+ 'This doc: ' + tojson(doc));
}
assertAlways.eq(cursor.itcount(), 0);
},
@@ -169,5 +169,4 @@ var $config = (function() {
teardown: teardown,
data: data
};
-
})();
diff --git a/jstests/concurrency/fsm_workloads/yield_and_hashed.js b/jstests/concurrency/fsm_workloads/yield_and_hashed.js
index 60d73670a1f..21d43a6d536 100644
--- a/jstests/concurrency/fsm_workloads/yield_and_hashed.js
+++ b/jstests/concurrency/fsm_workloads/yield_and_hashed.js
@@ -10,7 +10,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWork
load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
/*
* Issue a query that will use the AND_HASH stage. This is a little tricky, so use
* stagedebug to force it to happen. Unfortunately this means it can't be batched.
diff --git a/jstests/concurrency/fsm_workloads/yield_and_sorted.js b/jstests/concurrency/fsm_workloads/yield_and_sorted.js
index ea077aeed12..480b9258d78 100644
--- a/jstests/concurrency/fsm_workloads/yield_and_sorted.js
+++ b/jstests/concurrency/fsm_workloads/yield_and_sorted.js
@@ -10,7 +10,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWork
load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
/*
* Issue a query that will use the AND_SORTED stage. This is a little tricky, so use
* stagedebug to force it to happen. Unfortunately this means it can't be batched.
diff --git a/jstests/concurrency/fsm_workloads/yield_fetch.js b/jstests/concurrency/fsm_workloads/yield_fetch.js
index e802635af73..7b5a0007042 100644
--- a/jstests/concurrency/fsm_workloads/yield_fetch.js
+++ b/jstests/concurrency/fsm_workloads/yield_fetch.js
@@ -10,7 +10,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWork
load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
/*
* Issue a query that will use the FETCH stage.
*/
diff --git a/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js b/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
index 7fb5c860a7a..1e6da602641 100644
--- a/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
+++ b/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
@@ -8,7 +8,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkl
load('jstests/concurrency/fsm_workloads/yield_geo_near.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
$config.states.remove = function remove(db, collName) {
var id = Random.randInt(this.nDocs);
var doc = db[collName].findOne({_id: id});
diff --git a/jstests/concurrency/fsm_workloads/yield_id_hack.js b/jstests/concurrency/fsm_workloads/yield_id_hack.js
index eddb653c1d8..a0ba2ffcf16 100644
--- a/jstests/concurrency/fsm_workloads/yield_id_hack.js
+++ b/jstests/concurrency/fsm_workloads/yield_id_hack.js
@@ -10,7 +10,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
/*
* Issue a query that will use the ID_HACK stage. This cannot be
* batched, so issue a
diff --git a/jstests/concurrency/fsm_workloads/yield_rooted_or.js b/jstests/concurrency/fsm_workloads/yield_rooted_or.js
index 2d21427b42b..b21e918e5bc 100644
--- a/jstests/concurrency/fsm_workloads/yield_rooted_or.js
+++ b/jstests/concurrency/fsm_workloads/yield_rooted_or.js
@@ -11,7 +11,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
/*
* Issue a query with an or stage as the root.
*/
diff --git a/jstests/concurrency/fsm_workloads/yield_sort.js b/jstests/concurrency/fsm_workloads/yield_sort.js
index 1c535ae6415..af8fef20510 100644
--- a/jstests/concurrency/fsm_workloads/yield_sort.js
+++ b/jstests/concurrency/fsm_workloads/yield_sort.js
@@ -10,7 +10,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWor
load('jstests/concurrency/fsm_workloads/yield_sort_merge.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
/*
* Execute a query that will use the SORT stage.
*/
diff --git a/jstests/concurrency/fsm_workloads/yield_sort_merge.js b/jstests/concurrency/fsm_workloads/yield_sort_merge.js
index d715a813701..b66f185854d 100644
--- a/jstests/concurrency/fsm_workloads/yield_sort_merge.js
+++ b/jstests/concurrency/fsm_workloads/yield_sort_merge.js
@@ -11,7 +11,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
/*
* Execute a query that will use the SORT_MERGE stage.
*/
diff --git a/jstests/concurrency/fsm_workloads/yield_text.js b/jstests/concurrency/fsm_workloads/yield_text.js
index 0ccf5b8a7d4..2b14a051b73 100644
--- a/jstests/concurrency/fsm_workloads/yield_text.js
+++ b/jstests/concurrency/fsm_workloads/yield_text.js
@@ -10,7 +10,6 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
var $config = extendWorkload($config, function($config, $super) {
-
/*
* Pick a random word and search for it using full text search.
*/
diff --git a/jstests/core/SERVER-23626.js b/jstests/core/SERVER-23626.js
index f4f01495269..9a25bda2291 100644
--- a/jstests/core/SERVER-23626.js
+++ b/jstests/core/SERVER-23626.js
@@ -1,18 +1,17 @@
(function() {
- "use strict";
- var t = db.jstests_server23626;
+"use strict";
+var t = db.jstests_server23626;
- t.mycoll.drop();
- assert.writeOK(t.mycoll.insert({_id: 0, a: Date.prototype}));
- assert.eq(1, t.mycoll.find({a: {$type: 'date'}}).itcount());
+t.mycoll.drop();
+assert.writeOK(t.mycoll.insert({_id: 0, a: Date.prototype}));
+assert.eq(1, t.mycoll.find({a: {$type: 'date'}}).itcount());
- t.mycoll.drop();
- assert.writeOK(t.mycoll.insert({_id: 0, a: Function.prototype}));
- assert.eq(1, t.mycoll.find({a: {$type: 'javascript'}}).itcount());
-
- t.mycoll.drop();
- assert.writeOK(t.mycoll.insert({_id: 0, a: RegExp.prototype}));
- assert.eq(1, t.mycoll.find({a: {$type: 'regex'}}).itcount());
+t.mycoll.drop();
+assert.writeOK(t.mycoll.insert({_id: 0, a: Function.prototype}));
+assert.eq(1, t.mycoll.find({a: {$type: 'javascript'}}).itcount());
+t.mycoll.drop();
+assert.writeOK(t.mycoll.insert({_id: 0, a: RegExp.prototype}));
+assert.eq(1, t.mycoll.find({a: {$type: 'regex'}}).itcount());
}()); \ No newline at end of file
diff --git a/jstests/core/add_skip_stage_before_fetch.js b/jstests/core/add_skip_stage_before_fetch.js
index 3f907b5e49c..aaad7bb5db3 100644
--- a/jstests/core/add_skip_stage_before_fetch.js
+++ b/jstests/core/add_skip_stage_before_fetch.js
@@ -6,58 +6,61 @@
// @tags: [assumes_unsharded_collection, operations_longer_than_stepdown_interval_in_txns]
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js");
-
- const coll = db.add_skip_stage_before_fetch;
-
- coll.drop();
- const testIndex = {a: 1, b: 1, c: 1};
- assert.commandWorked(coll.createIndex(testIndex));
-
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 10000; i++) {
- bulk.insert({
- a: i % 2,
- b: i % 4,
- c: Math.floor(Math.random() * 1000),
- d: Math.floor(Math.random() * 1000)
- });
- }
- assert.writeOK(bulk.execute());
-
- // The {a: 0, b: 2} query will match exactly one quarter of the documents in the collection:
- // 2500 in total. In the test queries below, we skip the first 2400, returning exactly 100
- // documents.
-
- // This find can be computed using the index, so we should only need to fetch the 100 documents
- // that get returned to the client after skipping the first 2400.
- let explainResult =
- coll.find({a: 0, b: 2}).hint(testIndex).skip(2400).explain("executionStats");
- assert.gte(explainResult.executionStats.totalKeysExamined, 2500);
- assert.eq(explainResult.executionStats.totalDocsExamined, 100);
-
- // This sort can also be computed using the index.
- explainResult =
- coll.find({a: 0, b: 2}).hint(testIndex).sort({c: 1}).skip(2400).explain("executionStats");
- assert.gte(explainResult.executionStats.totalKeysExamined, 2500);
- assert.eq(explainResult.executionStats.totalDocsExamined, 100);
-
- // This query is covered by the index, so there should be no fetch at all.
- explainResult = coll.find({a: 0, b: 2}, {_id: 0, a: 1})
- .hint(testIndex)
- .sort({c: 1})
- .skip(2400)
- .explain("executionStats");
- assert.gte(explainResult.executionStats.totalKeysExamined, 2500);
- assert.eq(explainResult.executionStats.totalDocsExamined, 0);
- assert(isIndexOnly(db, explainResult.queryPlanner.winningPlan));
-
- // This sort requires a field that is not in the index, so we should be fetching all 2500
- // documents that match the find predicate.
- explainResult =
- coll.find({a: 0, b: 2}).hint(testIndex).sort({d: 1}).skip(2400).explain("executionStats");
- assert.gte(explainResult.executionStats.totalKeysExamined, 2500);
- assert.eq(explainResult.executionStats.totalDocsExamined, 2500);
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+
+const coll = db.add_skip_stage_before_fetch;
+
+coll.drop();
+const testIndex = {
+ a: 1,
+ b: 1,
+ c: 1
+};
+assert.commandWorked(coll.createIndex(testIndex));
+
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < 10000; i++) {
+ bulk.insert({
+ a: i % 2,
+ b: i % 4,
+ c: Math.floor(Math.random() * 1000),
+ d: Math.floor(Math.random() * 1000)
+ });
+}
+assert.writeOK(bulk.execute());
+
+// The {a: 0, b: 2} query will match exactly one quarter of the documents in the collection:
+// 2500 in total. In the test queries below, we skip the first 2400, returning exactly 100
+// documents.
+
+// This find can be computed using the index, so we should only need to fetch the 100 documents
+// that get returned to the client after skipping the first 2400.
+let explainResult = coll.find({a: 0, b: 2}).hint(testIndex).skip(2400).explain("executionStats");
+assert.gte(explainResult.executionStats.totalKeysExamined, 2500);
+assert.eq(explainResult.executionStats.totalDocsExamined, 100);
+
+// This sort can also be computed using the index.
+explainResult =
+ coll.find({a: 0, b: 2}).hint(testIndex).sort({c: 1}).skip(2400).explain("executionStats");
+assert.gte(explainResult.executionStats.totalKeysExamined, 2500);
+assert.eq(explainResult.executionStats.totalDocsExamined, 100);
+
+// This query is covered by the index, so there should be no fetch at all.
+explainResult = coll.find({a: 0, b: 2}, {_id: 0, a: 1})
+ .hint(testIndex)
+ .sort({c: 1})
+ .skip(2400)
+ .explain("executionStats");
+assert.gte(explainResult.executionStats.totalKeysExamined, 2500);
+assert.eq(explainResult.executionStats.totalDocsExamined, 0);
+assert(isIndexOnly(db, explainResult.queryPlanner.winningPlan));
+
+// This sort requires a field that is not in the index, so we should be fetching all 2500
+// documents that match the find predicate.
+explainResult =
+ coll.find({a: 0, b: 2}).hint(testIndex).sort({d: 1}).skip(2400).explain("executionStats");
+assert.gte(explainResult.executionStats.totalKeysExamined, 2500);
+assert.eq(explainResult.executionStats.totalDocsExamined, 2500);
})();
diff --git a/jstests/core/agg_hint.js b/jstests/core/agg_hint.js
index 2d088daaf7b..899bbd2217a 100644
--- a/jstests/core/agg_hint.js
+++ b/jstests/core/agg_hint.js
@@ -7,255 +7,254 @@
// command against views, which is converted to a hinted aggregation on execution.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For getAggPlanStage.
+load("jstests/libs/analyze_plan.js"); // For getAggPlanStage.
- const testDB = db.getSiblingDB("agg_hint");
- assert.commandWorked(testDB.dropDatabase());
- const coll = testDB.getCollection("test");
- const view = testDB.getCollection("view");
+const testDB = db.getSiblingDB("agg_hint");
+assert.commandWorked(testDB.dropDatabase());
+const coll = testDB.getCollection("test");
+const view = testDB.getCollection("view");
- function confirmWinningPlanUsesExpectedIndex(
- explainResult, expectedKeyPattern, stageName, pipelineOptimizedAway) {
- const planStage = pipelineOptimizedAway ? getPlanStage(explainResult, stageName)
- : getAggPlanStage(explainResult, stageName);
- assert.neq(null, planStage);
+function confirmWinningPlanUsesExpectedIndex(
+ explainResult, expectedKeyPattern, stageName, pipelineOptimizedAway) {
+ const planStage = pipelineOptimizedAway ? getPlanStage(explainResult, stageName)
+ : getAggPlanStage(explainResult, stageName);
+ assert.neq(null, planStage);
- assert.eq(planStage.keyPattern, expectedKeyPattern, tojson(planStage));
- }
+ assert.eq(planStage.keyPattern, expectedKeyPattern, tojson(planStage));
+}
- // Runs explain on 'command', with the hint specified by 'hintKeyPattern' when not null.
- // Confirms that the winning query plan uses the index specified by 'expectedKeyPattern'.
- // If 'pipelineOptimizedAway' is set to true, then we expect the pipeline to be entirely
- // optimized away from the plan and replaced with a query tier.
- function confirmCommandUsesIndex({command = null,
- hintKeyPattern = null,
- expectedKeyPattern = null,
- stageName = "IXSCAN",
- pipelineOptimizedAway = false} = {}) {
- if (hintKeyPattern) {
- command["hint"] = hintKeyPattern;
- }
- const res =
- assert.commandWorked(testDB.runCommand({explain: command, verbosity: "queryPlanner"}));
- confirmWinningPlanUsesExpectedIndex(
- res, expectedKeyPattern, stageName, pipelineOptimizedAway);
+// Runs explain on 'command', with the hint specified by 'hintKeyPattern' when not null.
+// Confirms that the winning query plan uses the index specified by 'expectedKeyPattern'.
+// If 'pipelineOptimizedAway' is set to true, then we expect the pipeline to be entirely
+// optimized away from the plan and replaced with a query tier.
+function confirmCommandUsesIndex({
+ command = null,
+ hintKeyPattern = null,
+ expectedKeyPattern = null,
+ stageName = "IXSCAN",
+ pipelineOptimizedAway = false
+} = {}) {
+ if (hintKeyPattern) {
+ command["hint"] = hintKeyPattern;
}
+ const res =
+ assert.commandWorked(testDB.runCommand({explain: command, verbosity: "queryPlanner"}));
+ confirmWinningPlanUsesExpectedIndex(res, expectedKeyPattern, stageName, pipelineOptimizedAway);
+}
- // Runs explain on an aggregation with a pipeline specified by 'aggPipeline' and a hint
- // specified by 'hintKeyPattern' if not null. Confirms that the winning query plan uses the
- // index specified by 'expectedKeyPattern'. If 'pipelineOptimizedAway' is set to true, then
- // we expect the pipeline to be entirely optimized away from the plan and replaced with a
- // query tier.
- //
- // This method exists because the explain command does not support the aggregation command.
- function confirmAggUsesIndex({collName = null,
- aggPipeline = [],
- hintKeyPattern = null,
- expectedKeyPattern = null,
- stageName = "IXSCAN",
- pipelineOptimizedAway = false} = {}) {
- let options = {};
+// Runs explain on an aggregation with a pipeline specified by 'aggPipeline' and a hint
+// specified by 'hintKeyPattern' if not null. Confirms that the winning query plan uses the
+// index specified by 'expectedKeyPattern'. If 'pipelineOptimizedAway' is set to true, then
+// we expect the pipeline to be entirely optimized away from the plan and replaced with a
+// query tier.
+//
+// This method exists because the explain command does not support the aggregation command.
+function confirmAggUsesIndex({
+ collName = null,
+ aggPipeline = [],
+ hintKeyPattern = null,
+ expectedKeyPattern = null,
+ stageName = "IXSCAN",
+ pipelineOptimizedAway = false
+} = {}) {
+ let options = {};
- if (hintKeyPattern) {
- options = {hint: hintKeyPattern};
- }
- const res = assert.commandWorked(
- testDB.getCollection(collName).explain().aggregate(aggPipeline, options));
- confirmWinningPlanUsesExpectedIndex(
- res, expectedKeyPattern, stageName, pipelineOptimizedAway);
+ if (hintKeyPattern) {
+ options = {hint: hintKeyPattern};
}
+ const res = assert.commandWorked(
+ testDB.getCollection(collName).explain().aggregate(aggPipeline, options));
+ confirmWinningPlanUsesExpectedIndex(res, expectedKeyPattern, stageName, pipelineOptimizedAway);
+}
- // Specify hint as a string, representing index name.
- assert.commandWorked(coll.createIndex({x: 1}));
- for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({x: i}));
- }
+// Specify hint as a string, representing index name.
+assert.commandWorked(coll.createIndex({x: 1}));
+for (let i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({x: i}));
+}
- confirmAggUsesIndex({
- collName: "test",
- aggPipeline: [{$match: {x: 3}}],
- hintKeyPattern: "x_1",
- expectedKeyPattern: {x: 1},
- pipelineOptimizedAway: true
- });
+confirmAggUsesIndex({
+ collName: "test",
+ aggPipeline: [{$match: {x: 3}}],
+ hintKeyPattern: "x_1",
+ expectedKeyPattern: {x: 1},
+ pipelineOptimizedAway: true
+});
- //
- // For each of the following tests we confirm:
- // * That the expected index is chosen by the query planner when no hint is provided.
- // * That the expected index is chosen when hinted.
- // * That an index other than the one expected is chosen when hinted.
- //
+//
+// For each of the following tests we confirm:
+// * That the expected index is chosen by the query planner when no hint is provided.
+// * That the expected index is chosen when hinted.
+// * That an index other than the one expected is chosen when hinted.
+//
- // Hint on poor index choice should force use of the hinted index over one more optimal.
- coll.drop();
- assert.commandWorked(coll.createIndex({x: 1}));
- for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({x: i}));
- }
+// Hint on poor index choice should force use of the hinted index over one more optimal.
+coll.drop();
+assert.commandWorked(coll.createIndex({x: 1}));
+for (let i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({x: i}));
+}
- confirmAggUsesIndex({
- collName: "test",
- aggPipeline: [{$match: {x: 3}}],
- expectedKeyPattern: {x: 1},
- pipelineOptimizedAway: true
- });
- confirmAggUsesIndex({
- collName: "test",
- aggPipeline: [{$match: {x: 3}}],
- hintKeyPattern: {x: 1},
- expectedKeyPattern: {x: 1},
- pipelineOptimizedAway: true
- });
- confirmAggUsesIndex({
- collName: "test",
- aggPipeline: [{$match: {x: 3}}],
- hintKeyPattern: {_id: 1},
- expectedKeyPattern: {_id: 1},
- pipelineOptimizedAway: true
- });
+confirmAggUsesIndex({
+ collName: "test",
+ aggPipeline: [{$match: {x: 3}}],
+ expectedKeyPattern: {x: 1},
+ pipelineOptimizedAway: true
+});
+confirmAggUsesIndex({
+ collName: "test",
+ aggPipeline: [{$match: {x: 3}}],
+ hintKeyPattern: {x: 1},
+ expectedKeyPattern: {x: 1},
+ pipelineOptimizedAway: true
+});
+confirmAggUsesIndex({
+ collName: "test",
+ aggPipeline: [{$match: {x: 3}}],
+ hintKeyPattern: {_id: 1},
+ expectedKeyPattern: {_id: 1},
+ pipelineOptimizedAway: true
+});
- // With no hint specified, aggregation will always prefer an index that provides sort order over
- // one that requires a blocking sort. A hinted aggregation should allow for choice of an index
- // that provides blocking sort.
- coll.drop();
- assert.commandWorked(coll.createIndex({x: 1}));
- assert.commandWorked(coll.createIndex({y: 1}));
- for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({x: i, y: i}));
- }
+// With no hint specified, aggregation will always prefer an index that provides sort order over
+// one that requires a blocking sort. A hinted aggregation should allow for choice of an index
+// that provides blocking sort.
+coll.drop();
+assert.commandWorked(coll.createIndex({x: 1}));
+assert.commandWorked(coll.createIndex({y: 1}));
+for (let i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({x: i, y: i}));
+}
- confirmAggUsesIndex({
- collName: "test",
- aggPipeline: [{$match: {x: {$gte: 0}}}, {$sort: {y: 1}}],
- expectedKeyPattern: {y: 1},
- pipelineOptimizedAway: true
- });
- confirmAggUsesIndex({
- collName: "test",
- aggPipeline: [{$match: {x: {$gte: 0}}}, {$sort: {y: 1}}],
- hintKeyPattern: {y: 1},
- expectedKeyPattern: {y: 1},
- pipelineOptimizedAway: true
- });
- confirmAggUsesIndex({
- collName: "test",
- aggPipeline: [{$match: {x: {$gte: 0}}}, {$sort: {y: 1}}],
- hintKeyPattern: {x: 1},
- expectedKeyPattern: {x: 1}
- });
+confirmAggUsesIndex({
+ collName: "test",
+ aggPipeline: [{$match: {x: {$gte: 0}}}, {$sort: {y: 1}}],
+ expectedKeyPattern: {y: 1},
+ pipelineOptimizedAway: true
+});
+confirmAggUsesIndex({
+ collName: "test",
+ aggPipeline: [{$match: {x: {$gte: 0}}}, {$sort: {y: 1}}],
+ hintKeyPattern: {y: 1},
+ expectedKeyPattern: {y: 1},
+ pipelineOptimizedAway: true
+});
+confirmAggUsesIndex({
+ collName: "test",
+ aggPipeline: [{$match: {x: {$gte: 0}}}, {$sort: {y: 1}}],
+ hintKeyPattern: {x: 1},
+ expectedKeyPattern: {x: 1}
+});
- // With no hint specified, aggregation will always prefer an index that provides a covered
- // projection over one that does not. A hinted aggregation should allow for choice of an index
- // that does not cover.
- coll.drop();
- assert.commandWorked(coll.createIndex({x: 1}));
- assert.commandWorked(coll.createIndex({x: 1, y: 1}));
- for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({x: i, y: i}));
- }
+// With no hint specified, aggregation will always prefer an index that provides a covered
+// projection over one that does not. A hinted aggregation should allow for choice of an index
+// that does not cover.
+coll.drop();
+assert.commandWorked(coll.createIndex({x: 1}));
+assert.commandWorked(coll.createIndex({x: 1, y: 1}));
+for (let i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({x: i, y: i}));
+}
- confirmAggUsesIndex({
- collName: "test",
- aggPipeline: [{$match: {x: {$gte: 0}}}, {$project: {x: 1, y: 1, _id: 0}}],
- expectedKeyPattern: {x: 1, y: 1},
- pipelineOptimizedAway: true
- });
- confirmAggUsesIndex({
- collName: "test",
- aggPipeline: [{$match: {x: {$gte: 0}}}, {$project: {x: 1, y: 1, _id: 0}}],
- hintKeyPattern: {x: 1, y: 1},
- expectedKeyPattern: {x: 1, y: 1},
- pipelineOptimizedAway: true
- });
- confirmAggUsesIndex({
- collName: "test",
- aggPipeline: [{$match: {x: {$gte: 0}}}, {$project: {x: 1, y: 1, _id: 0}}],
- hintKeyPattern: {x: 1},
- expectedKeyPattern: {x: 1}
- });
+confirmAggUsesIndex({
+ collName: "test",
+ aggPipeline: [{$match: {x: {$gte: 0}}}, {$project: {x: 1, y: 1, _id: 0}}],
+ expectedKeyPattern: {x: 1, y: 1},
+ pipelineOptimizedAway: true
+});
+confirmAggUsesIndex({
+ collName: "test",
+ aggPipeline: [{$match: {x: {$gte: 0}}}, {$project: {x: 1, y: 1, _id: 0}}],
+ hintKeyPattern: {x: 1, y: 1},
+ expectedKeyPattern: {x: 1, y: 1},
+ pipelineOptimizedAway: true
+});
+confirmAggUsesIndex({
+ collName: "test",
+ aggPipeline: [{$match: {x: {$gte: 0}}}, {$project: {x: 1, y: 1, _id: 0}}],
+ hintKeyPattern: {x: 1},
+ expectedKeyPattern: {x: 1}
+});
- // Confirm that a hinted agg can be executed against a view.
- coll.drop();
- view.drop();
- assert.commandWorked(coll.createIndex({x: 1}));
- for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({x: i}));
- }
- assert.commandWorked(testDB.createView("view", "test", [{$match: {x: {$gte: 0}}}]));
+// Confirm that a hinted agg can be executed against a view.
+coll.drop();
+view.drop();
+assert.commandWorked(coll.createIndex({x: 1}));
+for (let i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({x: i}));
+}
+assert.commandWorked(testDB.createView("view", "test", [{$match: {x: {$gte: 0}}}]));
- confirmAggUsesIndex({
- collName: "view",
- aggPipeline: [{$match: {x: 3}}],
- expectedKeyPattern: {x: 1},
- pipelineOptimizedAway: true
- });
- confirmAggUsesIndex({
- collName: "view",
- aggPipeline: [{$match: {x: 3}}],
- hintKeyPattern: {x: 1},
- expectedKeyPattern: {x: 1},
- pipelineOptimizedAway: true
- });
- confirmAggUsesIndex({
- collName: "view",
- aggPipeline: [{$match: {x: 3}}],
- hintKeyPattern: {_id: 1},
- expectedKeyPattern: {_id: 1},
- pipelineOptimizedAway: true
- });
+confirmAggUsesIndex({
+ collName: "view",
+ aggPipeline: [{$match: {x: 3}}],
+ expectedKeyPattern: {x: 1},
+ pipelineOptimizedAway: true
+});
+confirmAggUsesIndex({
+ collName: "view",
+ aggPipeline: [{$match: {x: 3}}],
+ hintKeyPattern: {x: 1},
+ expectedKeyPattern: {x: 1},
+ pipelineOptimizedAway: true
+});
+confirmAggUsesIndex({
+ collName: "view",
+ aggPipeline: [{$match: {x: 3}}],
+ hintKeyPattern: {_id: 1},
+ expectedKeyPattern: {_id: 1},
+ pipelineOptimizedAway: true
+});
- // Confirm that a hinted find can be executed against a view.
- coll.drop();
- view.drop();
- assert.commandWorked(coll.createIndex({x: 1}));
- for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({x: i}));
- }
- assert.commandWorked(testDB.createView("view", "test", []));
+// Confirm that a hinted find can be executed against a view.
+coll.drop();
+view.drop();
+assert.commandWorked(coll.createIndex({x: 1}));
+for (let i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({x: i}));
+}
+assert.commandWorked(testDB.createView("view", "test", []));
- confirmCommandUsesIndex({
- command: {find: "view", filter: {x: 3}},
- expectedKeyPattern: {x: 1},
- pipelineOptimizedAway: true
- });
- confirmCommandUsesIndex({
- command: {find: "view", filter: {x: 3}},
- hintKeyPattern: {x: 1},
- expectedKeyPattern: {x: 1},
- pipelineOptimizedAway: true
- });
- confirmCommandUsesIndex({
- command: {find: "view", filter: {x: 3}},
- hintKeyPattern: {_id: 1},
- expectedKeyPattern: {_id: 1},
- pipelineOptimizedAway: true
- });
+confirmCommandUsesIndex({
+ command: {find: "view", filter: {x: 3}},
+ expectedKeyPattern: {x: 1},
+ pipelineOptimizedAway: true
+});
+confirmCommandUsesIndex({
+ command: {find: "view", filter: {x: 3}},
+ hintKeyPattern: {x: 1},
+ expectedKeyPattern: {x: 1},
+ pipelineOptimizedAway: true
+});
+confirmCommandUsesIndex({
+ command: {find: "view", filter: {x: 3}},
+ hintKeyPattern: {_id: 1},
+ expectedKeyPattern: {_id: 1},
+ pipelineOptimizedAway: true
+});
- // Confirm that a hinted count can be executed against a view.
- coll.drop();
- view.drop();
- assert.commandWorked(coll.createIndex({x: 1}));
- for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({x: i}));
- }
- assert.commandWorked(testDB.createView("view", "test", []));
+// Confirm that a hinted count can be executed against a view.
+coll.drop();
+view.drop();
+assert.commandWorked(coll.createIndex({x: 1}));
+for (let i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({x: i}));
+}
+assert.commandWorked(testDB.createView("view", "test", []));
- confirmCommandUsesIndex({
- command: {count: "view", query: {x: 3}},
- expectedKeyPattern: {x: 1},
- stageName: "COUNT_SCAN"
- });
- confirmCommandUsesIndex({
- command: {count: "view", query: {x: 3}},
- hintKeyPattern: {x: 1},
- expectedKeyPattern: {x: 1},
- stageName: "COUNT_SCAN"
- });
- confirmCommandUsesIndex({
- command: {count: "view", query: {x: 3}},
- hintKeyPattern: {_id: 1},
- expectedKeyPattern: {_id: 1}
- });
+confirmCommandUsesIndex(
+ {command: {count: "view", query: {x: 3}}, expectedKeyPattern: {x: 1}, stageName: "COUNT_SCAN"});
+confirmCommandUsesIndex({
+ command: {count: "view", query: {x: 3}},
+ hintKeyPattern: {x: 1},
+ expectedKeyPattern: {x: 1},
+ stageName: "COUNT_SCAN"
+});
+confirmCommandUsesIndex({
+ command: {count: "view", query: {x: 3}},
+ hintKeyPattern: {_id: 1},
+ expectedKeyPattern: {_id: 1}
+});
})();
diff --git a/jstests/core/aggregation_accepts_write_concern.js b/jstests/core/aggregation_accepts_write_concern.js
index 6db86a31411..2c764414a1d 100644
--- a/jstests/core/aggregation_accepts_write_concern.js
+++ b/jstests/core/aggregation_accepts_write_concern.js
@@ -4,28 +4,28 @@
* @tags: [assumes_write_concern_unchanged, does_not_support_stepdowns]
*/
(function() {
- "use strict";
+"use strict";
- const testDB = db.getSiblingDB("aggregation_accepts_write_concern");
- assert.commandWorked(testDB.dropDatabase());
- const collName = "test";
+const testDB = db.getSiblingDB("aggregation_accepts_write_concern");
+assert.commandWorked(testDB.dropDatabase());
+const collName = "test";
- assert.commandWorked(testDB.runCommand(
- {insert: collName, documents: [{_id: 1}], writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ testDB.runCommand({insert: collName, documents: [{_id: 1}], writeConcern: {w: "majority"}}));
- // A read-only aggregation accepts writeConcern.
- assert.commandWorked(testDB.runCommand({
- aggregate: collName,
- pipeline: [{$match: {_id: 1}}],
- cursor: {},
- writeConcern: {w: "majority"}
- }));
+// A read-only aggregation accepts writeConcern.
+assert.commandWorked(testDB.runCommand({
+ aggregate: collName,
+ pipeline: [{$match: {_id: 1}}],
+ cursor: {},
+ writeConcern: {w: "majority"}
+}));
- // An aggregation pipeline that writes accepts writeConcern.
- assert.commandWorked(testDB.runCommand({
- aggregate: collName,
- pipeline: [{$match: {_id: 1}}, {$out: collName + "_out"}],
- cursor: {},
- writeConcern: {w: "majority"}
- }));
+// An aggregation pipeline that writes accepts writeConcern.
+assert.commandWorked(testDB.runCommand({
+ aggregate: collName,
+ pipeline: [{$match: {_id: 1}}, {$out: collName + "_out"}],
+ cursor: {},
+ writeConcern: {w: "majority"}
+}));
})();
diff --git a/jstests/core/aggregation_getmore_batchsize.js b/jstests/core/aggregation_getmore_batchsize.js
index c0e12cfced3..c723d2ca45d 100644
--- a/jstests/core/aggregation_getmore_batchsize.js
+++ b/jstests/core/aggregation_getmore_batchsize.js
@@ -4,37 +4,37 @@
// from the aggregate sell helper
(function() {
- 'use strict';
-
- db.getMongo().forceReadMode("commands");
- var coll = db["aggregation_getmore_batchsize"];
-
- // Insert some data to query for
- assert.writeOK(coll.insert([{a: 1}, {a: 1}, {a: 1}, {a: 1}, {a: 1}, {a: 1}]));
-
- // Create a cursor with a batch size of 2 (should require three full batches to return all
- // documents).
- var cursor = coll.aggregate([{$match: {a: 1}}, {$limit: 6}], {cursor: {batchSize: 2}});
- var curCount = 2;
-
- // Check that each batch has only two documents in it.
- for (var i = 0; i < 6; i++) {
- print(tojson(cursor.next()));
- jsTestLog("Expecting " + (curCount - 1));
- assert.eq(cursor.objsLeftInBatch(), --curCount);
- if (curCount == 0)
- curCount = 2;
- }
-
- // Create a cursor with a batch size of 0 (should only return one full batch of documents).
- // {batchSize: 0} is a special case where the server will return a cursor ID immediately, or
- // an error, but the first document result will be fetched by a getMore.
- cursor = coll.aggregate([{$match: {a: 1}}, {$limit: 6}], {cursor: {batchSize: 0}});
- assert.eq(cursor.objsLeftInBatch(), 0);
- print(tojson(cursor.next()));
- assert.eq(cursor.objsLeftInBatch(), 5);
+'use strict';
+
+db.getMongo().forceReadMode("commands");
+var coll = db["aggregation_getmore_batchsize"];
+
+// Insert some data to query for
+assert.writeOK(coll.insert([{a: 1}, {a: 1}, {a: 1}, {a: 1}, {a: 1}, {a: 1}]));
- // Check that the default cursor behavior works if you specify a cursor but no batch size.
- cursor = coll.aggregate([{$match: {a: 1}}, {$limit: 6}], {cursor: {}});
- assert.eq(cursor.objsLeftInBatch(), 6);
+// Create a cursor with a batch size of 2 (should require three full batches to return all
+// documents).
+var cursor = coll.aggregate([{$match: {a: 1}}, {$limit: 6}], {cursor: {batchSize: 2}});
+var curCount = 2;
+
+// Check that each batch has only two documents in it.
+for (var i = 0; i < 6; i++) {
+ print(tojson(cursor.next()));
+ jsTestLog("Expecting " + (curCount - 1));
+ assert.eq(cursor.objsLeftInBatch(), --curCount);
+ if (curCount == 0)
+ curCount = 2;
+}
+
+// Create a cursor with a batch size of 0 (should only return one full batch of documents).
+// {batchSize: 0} is a special case where the server will return a cursor ID immediately, or
+// an error, but the first document result will be fetched by a getMore.
+cursor = coll.aggregate([{$match: {a: 1}}, {$limit: 6}], {cursor: {batchSize: 0}});
+assert.eq(cursor.objsLeftInBatch(), 0);
+print(tojson(cursor.next()));
+assert.eq(cursor.objsLeftInBatch(), 5);
+
+// Check that the default cursor behavior works if you specify a cursor but no batch size.
+cursor = coll.aggregate([{$match: {a: 1}}, {$limit: 6}], {cursor: {}});
+assert.eq(cursor.objsLeftInBatch(), 6);
})();
diff --git a/jstests/core/all.js b/jstests/core/all.js
index e77c0279215..9d142e6e6c4 100644
--- a/jstests/core/all.js
+++ b/jstests/core/all.js
@@ -2,7 +2,6 @@ t = db.jstests_all;
t.drop();
doTest = function() {
-
assert.commandWorked(t.save({a: [1, 2, 3]}));
assert.commandWorked(t.save({a: [1, 2, 4]}));
assert.commandWorked(t.save({a: [1, 8, 5]}));
@@ -36,7 +35,6 @@ doTest = function() {
assert.eq(5, t.find({a: {$all: [1]}}).count(), "E1");
assert.eq(0, t.find({a: {$all: [19]}}).count(), "E2");
assert.eq(0, t.find({a: {$all: []}}).count(), "E3");
-
};
doTest();
diff --git a/jstests/core/andor.js b/jstests/core/andor.js
index c574ab261a4..fb1ee98a448 100644
--- a/jstests/core/andor.js
+++ b/jstests/core/andor.js
@@ -11,7 +11,6 @@ function ok(q) {
t.save({a: 1});
test = function() {
-
ok({a: 1});
ok({$and: [{a: 1}]});
@@ -45,7 +44,6 @@ test = function() {
ok({$nor: [{$and: [{$and: [{a: 2}]}]}]});
ok({$nor: [{$and: [{$nor: [{a: 1}]}]}]});
-
};
test();
@@ -55,7 +53,6 @@ test();
// Test an inequality base match.
test = function() {
-
ok({a: {$ne: 2}});
ok({$and: [{a: {$ne: 2}}]});
@@ -89,7 +86,6 @@ test = function() {
ok({$nor: [{$and: [{$and: [{a: {$ne: 1}}]}]}]});
ok({$nor: [{$and: [{$nor: [{a: {$ne: 2}}]}]}]});
-
};
t.drop();
diff --git a/jstests/core/apitest_db_profile_level.js b/jstests/core/apitest_db_profile_level.js
index 2172b4ed1cb..adfc2b0ee43 100644
--- a/jstests/core/apitest_db_profile_level.js
+++ b/jstests/core/apitest_db_profile_level.js
@@ -4,37 +4,37 @@
*/
(function() {
- 'use strict';
-
- /*
- * be sure the public collection API is complete
- */
- assert(db.getProfilingLevel, "getProfilingLevel");
- assert(db.setProfilingLevel, "setProfilingLevel");
-
- // A test-specific database is used for profiler testing so as not to interfere with
- // other tests that modify profiler level, when run in parallel.
- var profileLevelDB = db.getSiblingDB("apitest_db_profile_level");
-
- profileLevelDB.setProfilingLevel(0);
- assert(profileLevelDB.getProfilingLevel() == 0, "prof level 0");
-
- profileLevelDB.setProfilingLevel(1);
- assert(profileLevelDB.getProfilingLevel() == 1, "p1");
-
- profileLevelDB.setProfilingLevel(2);
- assert(profileLevelDB.getProfilingLevel() == 2, "p2");
-
- profileLevelDB.setProfilingLevel(0);
- assert(profileLevelDB.getProfilingLevel() == 0, "prof level 0");
-
- var asserted = false;
- try {
- profileLevelDB.setProfilingLevel(10);
- assert(false);
- } catch (e) {
- asserted = true;
- assert(e.dbSetProfilingException);
- }
- assert(asserted, "should have asserted");
+'use strict';
+
+/*
+ * be sure the public collection API is complete
+ */
+assert(db.getProfilingLevel, "getProfilingLevel");
+assert(db.setProfilingLevel, "setProfilingLevel");
+
+// A test-specific database is used for profiler testing so as not to interfere with
+// other tests that modify profiler level, when run in parallel.
+var profileLevelDB = db.getSiblingDB("apitest_db_profile_level");
+
+profileLevelDB.setProfilingLevel(0);
+assert(profileLevelDB.getProfilingLevel() == 0, "prof level 0");
+
+profileLevelDB.setProfilingLevel(1);
+assert(profileLevelDB.getProfilingLevel() == 1, "p1");
+
+profileLevelDB.setProfilingLevel(2);
+assert(profileLevelDB.getProfilingLevel() == 2, "p2");
+
+profileLevelDB.setProfilingLevel(0);
+assert(profileLevelDB.getProfilingLevel() == 0, "prof level 0");
+
+var asserted = false;
+try {
+ profileLevelDB.setProfilingLevel(10);
+ assert(false);
+} catch (e) {
+ asserted = true;
+ assert(e.dbSetProfilingException);
+}
+assert(asserted, "should have asserted");
})();
diff --git a/jstests/core/apitest_dbcollection.js b/jstests/core/apitest_dbcollection.js
index f54e3e158c0..d36dcda4d89 100644
--- a/jstests/core/apitest_dbcollection.js
+++ b/jstests/core/apitest_dbcollection.js
@@ -47,44 +47,44 @@ for (i = 0; i < 100; i++) {
}
(function() {
- var validateResult = assert.commandWorked(db.getCollection("test_db").validate());
- // Extract validation results from mongos output if running in a sharded context.
- var isShardedNS = validateResult.hasOwnProperty('raw');
-
- if (isShardedNS) {
- // Sample mongos format:
- // {
- // raw: {
- // "localhost:30000": {
- // "ns" : "test.test_db",
- // ...
- // "valid": true,
- // ...
- // "ok": 1
- // }
- // },
- // "valid": true,
- // ...
- // "ok": 1
- // }
-
- var numFields = 0;
- var result = null;
- for (var field in validateResult.raw) {
- result = validateResult.raw[field];
- numFields++;
- }
-
- assert.eq(1, numFields);
- assert.neq(null, result);
- validateResult = result;
+var validateResult = assert.commandWorked(db.getCollection("test_db").validate());
+// Extract validation results from mongos output if running in a sharded context.
+var isShardedNS = validateResult.hasOwnProperty('raw');
+
+if (isShardedNS) {
+ // Sample mongos format:
+ // {
+ // raw: {
+ // "localhost:30000": {
+ // "ns" : "test.test_db",
+ // ...
+ // "valid": true,
+ // ...
+ // "ok": 1
+ // }
+ // },
+ // "valid": true,
+ // ...
+ // "ok": 1
+ // }
+
+ var numFields = 0;
+ var result = null;
+ for (var field in validateResult.raw) {
+ result = validateResult.raw[field];
+ numFields++;
}
- assert.eq('test.test_db',
- validateResult.ns,
- 'incorrect namespace in db.collection.validate() result: ' + tojson(validateResult));
- assert(validateResult.valid, 'collection validation failed');
- assert.eq(100, validateResult.nrecords, "11");
+ assert.eq(1, numFields);
+ assert.neq(null, result);
+ validateResult = result;
+}
+
+assert.eq('test.test_db',
+ validateResult.ns,
+ 'incorrect namespace in db.collection.validate() result: ' + tojson(validateResult));
+assert(validateResult.valid, 'collection validation failed');
+assert.eq(100, validateResult.nrecords, "11");
}());
/*
@@ -149,132 +149,126 @@ assert.eq(0, db.getCollection("test_db").getIndexes().length, "24");
*/
(function() {
- var t = db.apttest_dbcollection;
-
- // Non-existent collection.
- t.drop();
- var noCollStats = assert.commandWorked(
- t.stats(), 'db.collection.stats() should work on non-existent collection');
- assert.eq(0, noCollStats.size, "All properties should be 0 on nonexistant collections");
- assert.eq(0, noCollStats.count, "All properties should be 0 on nonexistant collections");
- assert.eq(0, noCollStats.storageSize, "All properties should be 0 on nonexistant collections");
- assert.eq(0, noCollStats.nindexes, "All properties should be 0 on nonexistant collections");
- assert.eq(
- 0, noCollStats.totalIndexSize, "All properties should be 0 on nonexistant collections");
-
- // scale - passed to stats() as sole numerical argument or part of an options object.
- t.drop();
- assert.commandWorked(db.createCollection(t.getName(), {capped: true, size: 10 * 1024 * 1024}));
- var collectionStats = assert.commandWorked(t.stats(1024 * 1024));
- assert.eq(10,
- collectionStats.maxSize,
- 'db.collection.stats(scale) - capped collection size scaled incorrectly: ' +
- tojson(collectionStats));
- var collectionStats = assert.commandWorked(t.stats({scale: 1024 * 1024}));
- assert.eq(10,
- collectionStats.maxSize,
- 'db.collection.stats({scale: N}) - capped collection size scaled incorrectly: ' +
- tojson(collectionStats));
-
- // indexDetails - If true, includes 'indexDetails' field in results. Default: false.
- t.drop();
- t.save({a: 1});
- t.ensureIndex({a: 1});
- collectionStats = assert.commandWorked(t.stats());
- assert(!collectionStats.hasOwnProperty('indexDetails'),
- 'unexpected indexDetails found in db.collection.stats() result: ' +
- tojson(collectionStats));
- collectionStats = assert.commandWorked(t.stats({indexDetails: false}));
- assert(!collectionStats.hasOwnProperty('indexDetails'),
- 'unexpected indexDetails found in db.collection.stats({indexDetails: true}) result: ' +
- tojson(collectionStats));
- collectionStats = assert.commandWorked(t.stats({indexDetails: true}));
+var t = db.apttest_dbcollection;
+
+// Non-existent collection.
+t.drop();
+var noCollStats =
+ assert.commandWorked(t.stats(), 'db.collection.stats() should work on non-existent collection');
+assert.eq(0, noCollStats.size, "All properties should be 0 on nonexistant collections");
+assert.eq(0, noCollStats.count, "All properties should be 0 on nonexistant collections");
+assert.eq(0, noCollStats.storageSize, "All properties should be 0 on nonexistant collections");
+assert.eq(0, noCollStats.nindexes, "All properties should be 0 on nonexistant collections");
+assert.eq(0, noCollStats.totalIndexSize, "All properties should be 0 on nonexistant collections");
+
+// scale - passed to stats() as sole numerical argument or part of an options object.
+t.drop();
+assert.commandWorked(db.createCollection(t.getName(), {capped: true, size: 10 * 1024 * 1024}));
+var collectionStats = assert.commandWorked(t.stats(1024 * 1024));
+assert.eq(10,
+ collectionStats.maxSize,
+ 'db.collection.stats(scale) - capped collection size scaled incorrectly: ' +
+ tojson(collectionStats));
+var collectionStats = assert.commandWorked(t.stats({scale: 1024 * 1024}));
+assert.eq(10,
+ collectionStats.maxSize,
+ 'db.collection.stats({scale: N}) - capped collection size scaled incorrectly: ' +
+ tojson(collectionStats));
+
+// indexDetails - If true, includes 'indexDetails' field in results. Default: false.
+t.drop();
+t.save({a: 1});
+t.ensureIndex({a: 1});
+collectionStats = assert.commandWorked(t.stats());
+assert(!collectionStats.hasOwnProperty('indexDetails'),
+ 'unexpected indexDetails found in db.collection.stats() result: ' + tojson(collectionStats));
+collectionStats = assert.commandWorked(t.stats({indexDetails: false}));
+assert(!collectionStats.hasOwnProperty('indexDetails'),
+ 'unexpected indexDetails found in db.collection.stats({indexDetails: true}) result: ' +
+ tojson(collectionStats));
+collectionStats = assert.commandWorked(t.stats({indexDetails: true}));
+assert(collectionStats.hasOwnProperty('indexDetails'),
+ 'indexDetails missing from db.collection.stats({indexDetails: true}) result: ' +
+ tojson(collectionStats));
+
+// Returns index name.
+function getIndexName(indexKey) {
+ var indexes = t.getIndexes().filter(function(doc) {
+ return friendlyEqual(doc.key, indexKey);
+ });
+ assert.eq(1,
+ indexes.length,
+ tojson(indexKey) + ' not found in getIndexes() result: ' + tojson(t.getIndexes()));
+ return indexes[0].name;
+}
+
+function checkIndexDetails(options, indexName) {
+ var collectionStats = assert.commandWorked(t.stats(options));
assert(collectionStats.hasOwnProperty('indexDetails'),
- 'indexDetails missing from db.collection.stats({indexDetails: true}) result: ' +
- tojson(collectionStats));
-
- // Returns index name.
- function getIndexName(indexKey) {
- var indexes = t.getIndexes().filter(function(doc) {
- return friendlyEqual(doc.key, indexKey);
- });
- assert.eq(
- 1,
- indexes.length,
- tojson(indexKey) + ' not found in getIndexes() result: ' + tojson(t.getIndexes()));
- return indexes[0].name;
+ 'indexDetails missing from ' +
+ 'db.collection.stats(' + tojson(options) + ') result: ' + tojson(collectionStats));
+ // Currently, indexDetails is only supported with WiredTiger.
+ var storageEngine = jsTest.options().storageEngine;
+ if (storageEngine && storageEngine !== 'wiredTiger') {
+ return;
}
-
- function checkIndexDetails(options, indexName) {
- var collectionStats = assert.commandWorked(t.stats(options));
- assert(collectionStats.hasOwnProperty('indexDetails'),
- 'indexDetails missing from ' +
- 'db.collection.stats(' + tojson(options) + ') result: ' +
+ assert.eq(1,
+ Object.keys(collectionStats.indexDetails).length,
+ 'indexDetails must have exactly one entry');
+ assert(collectionStats.indexDetails[indexName],
+ indexName + ' missing from indexDetails: ' + tojson(collectionStats.indexDetails));
+ assert.neq(0,
+ Object.keys(collectionStats.indexDetails[indexName]).length,
+ indexName + ' exists in indexDetails but contains no information: ' +
tojson(collectionStats));
- // Currently, indexDetails is only supported with WiredTiger.
- var storageEngine = jsTest.options().storageEngine;
- if (storageEngine && storageEngine !== 'wiredTiger') {
- return;
- }
- assert.eq(1,
- Object.keys(collectionStats.indexDetails).length,
- 'indexDetails must have exactly one entry');
- assert(collectionStats.indexDetails[indexName],
- indexName + ' missing from indexDetails: ' + tojson(collectionStats.indexDetails));
- assert.neq(0,
- Object.keys(collectionStats.indexDetails[indexName]).length,
- indexName + ' exists in indexDetails but contains no information: ' +
- tojson(collectionStats));
- }
+}
- // indexDetailsKey - show indexDetails results for this index key only.
- var indexKey = {a: 1};
- var indexName = getIndexName(indexKey);
- checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName);
+// indexDetailsKey - show indexDetails results for this index key only.
+var indexKey = {a: 1};
+var indexName = getIndexName(indexKey);
+checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName);
- // indexDetailsName - show indexDetails results for this index name only.
- checkIndexDetails({indexDetails: true, indexDetailsName: indexName}, indexName);
+// indexDetailsName - show indexDetails results for this index name only.
+checkIndexDetails({indexDetails: true, indexDetailsName: indexName}, indexName);
- // Cannot specify both indexDetailsKey and indexDetailsName.
- var error = assert.throws(function() {
- t.stats({indexDetails: true, indexDetailsKey: indexKey, indexDetailsName: indexName});
- }, [], 'indexDetailsKey and indexDetailsName cannot be used at the same time');
- assert.eq(Error,
- error.constructor,
- 'db.collection.stats() failed when both indexDetailsKey and indexDetailsName ' +
- 'are used but with incorrect error type');
+// Cannot specify both indexDetailsKey and indexDetailsName.
+var error = assert.throws(function() {
+ t.stats({indexDetails: true, indexDetailsKey: indexKey, indexDetailsName: indexName});
+}, [], 'indexDetailsKey and indexDetailsName cannot be used at the same time');
+assert.eq(Error,
+ error.constructor,
+ 'db.collection.stats() failed when both indexDetailsKey and indexDetailsName ' +
+ 'are used but with incorrect error type');
- t.drop();
+t.drop();
}());
/*
* test db.collection.totalSize()
*/
(function() {
- 'use strict';
-
- var t = db.apitest_dbcollection;
-
- t.drop();
- var emptyStats = assert.commandWorked(t.stats());
- assert.eq(emptyStats.storageSize, 0);
- assert.eq(emptyStats.totalIndexSize, 0);
-
- assert.eq(
- 0, t.storageSize(), 'db.collection.storageSize() on empty collection should return 0');
- assert.eq(0,
- t.totalIndexSize(),
- 'db.collection.totalIndexSize() on empty collection should return 0');
- assert.eq(0, t.totalSize(), 'db.collection.totalSize() on empty collection should return 0');
-
- t.save({a: 1});
- var stats = assert.commandWorked(t.stats());
- assert.neq(undefined,
- t.storageSize(),
- 'db.collection.storageSize() cannot be undefined on a non-empty collection');
- assert.neq(undefined,
- t.totalIndexSize(),
- 'db.collection.totalIndexSize() cannot be undefined on a non-empty collection');
-
- t.drop();
+'use strict';
+
+var t = db.apitest_dbcollection;
+
+t.drop();
+var emptyStats = assert.commandWorked(t.stats());
+assert.eq(emptyStats.storageSize, 0);
+assert.eq(emptyStats.totalIndexSize, 0);
+
+assert.eq(0, t.storageSize(), 'db.collection.storageSize() on empty collection should return 0');
+assert.eq(
+ 0, t.totalIndexSize(), 'db.collection.totalIndexSize() on empty collection should return 0');
+assert.eq(0, t.totalSize(), 'db.collection.totalSize() on empty collection should return 0');
+
+t.save({a: 1});
+var stats = assert.commandWorked(t.stats());
+assert.neq(undefined,
+ t.storageSize(),
+ 'db.collection.storageSize() cannot be undefined on a non-empty collection');
+assert.neq(undefined,
+ t.totalIndexSize(),
+ 'db.collection.totalIndexSize() cannot be undefined on a non-empty collection');
+
+t.drop();
}());
diff --git a/jstests/core/apply_ops1.js b/jstests/core/apply_ops1.js
index dd2e4ceb79b..29961fab45e 100644
--- a/jstests/core/apply_ops1.js
+++ b/jstests/core/apply_ops1.js
@@ -7,438 +7,421 @@
// ]
(function() {
- "use strict";
-
- load("jstests/libs/get_index_helpers.js");
-
- var t = db.apply_ops1;
- t.drop();
-
- //
- // Input validation tests
- //
-
- // Empty array of operations.
- assert.commandWorked(db.adminCommand({applyOps: []}),
- 'applyOps should not fail on empty array of operations');
-
- // Non-array type for operations.
- assert.commandFailed(db.adminCommand({applyOps: "not an array"}),
- 'applyOps should fail on non-array type for operations');
-
- // Missing 'op' field in an operation.
- assert.commandFailed(db.adminCommand({applyOps: [{ns: t.getFullName()}]}),
- 'applyOps should fail on operation without "op" field');
-
- // Non-string 'op' field in an operation.
- assert.commandFailed(db.adminCommand({applyOps: [{op: 12345, ns: t.getFullName()}]}),
- 'applyOps should fail on operation with non-string "op" field');
-
- // Empty 'op' field value in an operation.
- assert.commandFailed(db.adminCommand({applyOps: [{op: '', ns: t.getFullName()}]}),
- 'applyOps should fail on operation with empty "op" field value');
-
- // Missing 'ns' field in an operation.
- assert.commandFailed(db.adminCommand({applyOps: [{op: 'c'}]}),
- 'applyOps should fail on operation without "ns" field');
-
- // Non-string 'ns' field in an operation.
- assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: 12345}]}),
- 'applyOps should fail on operation with non-string "ns" field');
-
- // Empty 'ns' field value in an operation of type 'n' (noop).
- assert.commandWorked(db.adminCommand({applyOps: [{op: 'n', ns: ''}]}),
- 'applyOps should work on no op operation with empty "ns" field value');
-
- // Missing dbname in 'ns' field.
- assert.commandFailed(db.adminCommand({applyOps: [{op: 'd', ns: t.getName(), o: {_id: 1}}]}));
-
- // Missing 'o' field value in an operation of type 'c' (command).
- assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName()}]}),
- 'applyOps should fail on command operation without "o" field');
-
- // Non-object 'o' field value in an operation of type 'c' (command).
- assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName(), o: 'bar'}]}),
- 'applyOps should fail on command operation with non-object "o" field');
-
- // Empty object 'o' field value in an operation of type 'c' (command).
- assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName(), o: {}}]}),
- 'applyOps should fail on command operation with empty object "o" field');
-
- // Unknown key in 'o' field value in an operation of type 'c' (command).
- assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName(), o: {a: 1}}]}),
- 'applyOps should fail on command operation on unknown key in "o" field');
-
- // Empty 'ns' field value in operation type other than 'n'.
- assert.commandFailed(
- db.adminCommand({applyOps: [{op: 'c', ns: ''}]}),
- 'applyOps should fail on non-"n" operation type with empty "ns" field value');
-
- // Excessively nested applyOps commands gracefully fail.
- assert.commandFailed(db.adminCommand({
- "applyOps": [{
- "ts": {"$timestamp": {"t": 1, "i": 100}},
- "h": 0,
- "v": 2,
- "op": "c",
- "ns": "test.$cmd",
- "o": {
- "applyOps": [{
- "ts": {"$timestamp": {"t": 1, "i": 100}},
- "h": 0,
- "v": 2,
- "op": "c",
- "ns": "test.$cmd",
- "o": {
- "applyOps": [{
- "ts": {"$timestamp": {"t": 1, "i": 100}},
- "h": 0,
- "v": 2,
- "op": "c",
- "ns": "test.$cmd",
- "o": {
- "applyOps": [{
- "ts": {"$timestamp": {"t": 1, "i": 100}},
- "h": 0,
- "v": 2,
- "op": "c",
- "ns": "test.$cmd",
- "o": {
- "applyOps": [{
- "ts": {"$timestamp": {"t": 1, "i": 100}},
- "h": 0,
- "v": 2,
- "op": "c",
- "ns": "test.$cmd",
- "o": {
- "applyOps": [{
- "ts": {"$timestamp": {"t": 1, "i": 100}},
- "h": 0,
- "v": 2,
- "op": "c",
- "ns": "test.$cmd",
- "o": {
- "applyOps": [{
- "ts":
- {"$timestamp": {"t": 1, "i": 100}},
- "h": 0,
- "v": 2,
- "op": "c",
- "ns": "test.$cmd",
- "o": {
- "applyOps": [{
- "ts": {
- "$timestamp":
- {"t": 1, "i": 100}
- },
- "h": 0,
- "v": 2,
- "op": "c",
- "ns": "test.$cmd",
- "o": {
- "applyOps": [{
- "ts": {
- "$timestamp": {
- "t": 1,
- "i": 100
- }
- },
- "h": 0,
- "v": 2,
- "op": "c",
- "ns": "test.$cmd",
- "o": {
- "applyOps": [{
- "ts": {
- "$timestamp":
- {
- "t":
- 1,
- "i":
- 100
- }
- },
- "h": 0,
- "v": 2,
- "op": "c",
- "ns":
- "test.$cmd",
- "o": {
- "applyOps": [
- {
- "ts": {
- "$timestamp": {
- "t":
- 1,
- "i":
- 100
- }
- },
- "h":
- 0,
- "v":
- 2,
- "op":
- "c",
- "ns":
- "test.$cmd",
- "o": {
- "applyOps":
- [
- ]
- }
- }
- ]
+"use strict";
+
+load("jstests/libs/get_index_helpers.js");
+
+var t = db.apply_ops1;
+t.drop();
+
+//
+// Input validation tests
+//
+
+// Empty array of operations.
+assert.commandWorked(db.adminCommand({applyOps: []}),
+ 'applyOps should not fail on empty array of operations');
+
+// Non-array type for operations.
+assert.commandFailed(db.adminCommand({applyOps: "not an array"}),
+ 'applyOps should fail on non-array type for operations');
+
+// Missing 'op' field in an operation.
+assert.commandFailed(db.adminCommand({applyOps: [{ns: t.getFullName()}]}),
+ 'applyOps should fail on operation without "op" field');
+
+// Non-string 'op' field in an operation.
+assert.commandFailed(db.adminCommand({applyOps: [{op: 12345, ns: t.getFullName()}]}),
+ 'applyOps should fail on operation with non-string "op" field');
+
+// Empty 'op' field value in an operation.
+assert.commandFailed(db.adminCommand({applyOps: [{op: '', ns: t.getFullName()}]}),
+ 'applyOps should fail on operation with empty "op" field value');
+
+// Missing 'ns' field in an operation.
+assert.commandFailed(db.adminCommand({applyOps: [{op: 'c'}]}),
+ 'applyOps should fail on operation without "ns" field');
+
+// Non-string 'ns' field in an operation.
+assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: 12345}]}),
+ 'applyOps should fail on operation with non-string "ns" field');
+
+// Empty 'ns' field value in an operation of type 'n' (noop).
+assert.commandWorked(db.adminCommand({applyOps: [{op: 'n', ns: ''}]}),
+ 'applyOps should work on no op operation with empty "ns" field value');
+
+// Missing dbname in 'ns' field.
+assert.commandFailed(db.adminCommand({applyOps: [{op: 'd', ns: t.getName(), o: {_id: 1}}]}));
+
+// Missing 'o' field value in an operation of type 'c' (command).
+assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName()}]}),
+ 'applyOps should fail on command operation without "o" field');
+
+// Non-object 'o' field value in an operation of type 'c' (command).
+assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName(), o: 'bar'}]}),
+ 'applyOps should fail on command operation with non-object "o" field');
+
+// Empty object 'o' field value in an operation of type 'c' (command).
+assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName(), o: {}}]}),
+ 'applyOps should fail on command operation with empty object "o" field');
+
+// Unknown key in 'o' field value in an operation of type 'c' (command).
+assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName(), o: {a: 1}}]}),
+ 'applyOps should fail on command operation on unknown key in "o" field');
+
+// Empty 'ns' field value in operation type other than 'n'.
+assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: ''}]}),
+ 'applyOps should fail on non-"n" operation type with empty "ns" field value');
+
+// Excessively nested applyOps commands gracefully fail.
+assert.commandFailed(db.adminCommand({
+ "applyOps": [{
+ "ts": {"$timestamp": {"t": 1, "i": 100}},
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "applyOps": [{
+ "ts": {"$timestamp": {"t": 1, "i": 100}},
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "applyOps": [{
+ "ts": {"$timestamp": {"t": 1, "i": 100}},
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "applyOps": [{
+ "ts": {"$timestamp": {"t": 1, "i": 100}},
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "applyOps": [{
+ "ts": {"$timestamp": {"t": 1, "i": 100}},
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "applyOps": [{
+ "ts": {"$timestamp": {"t": 1, "i": 100}},
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "applyOps": [{
+ "ts": {"$timestamp": {"t": 1, "i": 100}},
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "applyOps": [{
+ "ts": {
+ "$timestamp": {"t": 1, "i": 100}
+ },
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "applyOps": [{
+ "ts": {
+ "$timestamp":
+ {"t": 1, "i": 100}
+ },
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "applyOps": [{
+ "ts": {
+ "$timestamp": {
+ "t": 1,
+ "i": 100
}
- }]
- }
- }]
- }
- }]
- }
- }]
- }
- }]
- }
- }]
- }
- }]
- }
- }]
- }
- }]
- }
- }]
- }),
- "Excessively nested applyOps should be rejected");
-
- // Valid 'ns' field value in unknown operation type 'x'.
- assert.commandFailed(
- db.adminCommand({applyOps: [{op: 'x', ns: t.getFullName()}]}),
- 'applyOps should fail on unknown operation type "x" with valid "ns" value');
-
- assert.eq(0, t.find().count(), "Non-zero amount of documents in collection to start");
-
- /**
- * Test function for running CRUD operations on non-existent namespaces using various
- * combinations of invalid namespaces (collection/database), allowAtomic and alwaysUpsert,
- * and nesting.
- *
- * Leave 'expectedErrorCode' undefined if this command is expected to run successfully.
- */
- function testCrudOperationOnNonExistentNamespace(optype, o, o2, expectedErrorCode) {
- expectedErrorCode = expectedErrorCode || ErrorCodes.OK;
- const t2 = db.getSiblingDB('apply_ops1_no_such_db').getCollection('t');
- [t, t2].forEach(coll => {
- const op = {op: optype, ns: coll.getFullName(), o: o, o2: o2};
- [false, true].forEach(nested => {
- const opToRun =
- nested ? {op: 'c', ns: 'test.$cmd', o: {applyOps: [op]}, o2: {}} : op;
- [false, true].forEach(allowAtomic => {
- [false, true].forEach(alwaysUpsert => {
- const cmd = {
- applyOps: [opToRun],
- allowAtomic: allowAtomic,
- alwaysUpsert: alwaysUpsert
- };
- jsTestLog('Testing applyOps on non-existent namespace: ' + tojson(cmd));
- if (expectedErrorCode === ErrorCodes.OK) {
- assert.commandWorked(db.adminCommand(cmd));
- } else {
- assert.commandFailedWithCode(db.adminCommand(cmd), expectedErrorCode);
+ },
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "applyOps": [{
+ "ts": {
+ "$timestamp": {
+ "t":
+ 1,
+ "i":
+ 100
+ }
+ },
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns":
+ "test.$cmd",
+ "o": {
+ "applyOps":
+ []
+ }
+ }]
+ }
+ }]
+ }
+ }]
+ }
+ }]
+ }
+ }]
+ }
+ }]
+ }
+ }]
+ }
+ }]
}
- });
+ }]
+ }
+ }]
+ }
+ }]
+}),
+ "Excessively nested applyOps should be rejected");
+
+// Valid 'ns' field value in unknown operation type 'x'.
+assert.commandFailed(db.adminCommand({applyOps: [{op: 'x', ns: t.getFullName()}]}),
+ 'applyOps should fail on unknown operation type "x" with valid "ns" value');
+
+assert.eq(0, t.find().count(), "Non-zero amount of documents in collection to start");
+
+/**
+ * Test function for running CRUD operations on non-existent namespaces using various
+ * combinations of invalid namespaces (collection/database), allowAtomic and alwaysUpsert,
+ * and nesting.
+ *
+ * Leave 'expectedErrorCode' undefined if this command is expected to run successfully.
+ */
+function testCrudOperationOnNonExistentNamespace(optype, o, o2, expectedErrorCode) {
+ expectedErrorCode = expectedErrorCode || ErrorCodes.OK;
+ const t2 = db.getSiblingDB('apply_ops1_no_such_db').getCollection('t');
+ [t, t2].forEach(coll => {
+ const op = {op: optype, ns: coll.getFullName(), o: o, o2: o2};
+ [false, true].forEach(nested => {
+ const opToRun = nested ? {op: 'c', ns: 'test.$cmd', o: {applyOps: [op]}, o2: {}} : op;
+ [false, true].forEach(allowAtomic => {
+ [false, true].forEach(alwaysUpsert => {
+ const cmd = {
+ applyOps: [opToRun],
+ allowAtomic: allowAtomic,
+ alwaysUpsert: alwaysUpsert
+ };
+ jsTestLog('Testing applyOps on non-existent namespace: ' + tojson(cmd));
+ if (expectedErrorCode === ErrorCodes.OK) {
+ assert.commandWorked(db.adminCommand(cmd));
+ } else {
+ assert.commandFailedWithCode(db.adminCommand(cmd), expectedErrorCode);
+ }
});
});
});
- }
-
- // Insert and update operations on non-existent collections/databases should return
- // NamespaceNotFound.
- testCrudOperationOnNonExistentNamespace('i', {_id: 0}, {}, ErrorCodes.NamespaceNotFound);
- testCrudOperationOnNonExistentNamespace('u', {x: 0}, {_id: 0}, ErrorCodes.NamespaceNotFound);
-
- // Delete operations on non-existent collections/databases should return OK for idempotency
- // reasons.
- testCrudOperationOnNonExistentNamespace('d', {_id: 0}, {});
-
- assert.commandWorked(db.createCollection(t.getName()));
- var a = assert.commandWorked(
- db.adminCommand({applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]}));
- assert.eq(1, t.find().count(), "Valid insert failed");
- assert.eq(true, a.results[0], "Bad result value for valid insert");
-
- a = assert.commandWorked(
- db.adminCommand({applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]}));
- assert.eq(1, t.find().count(), "Duplicate insert failed");
- assert.eq(true, a.results[0], "Bad result value for duplicate insert");
-
- var o = {_id: 5, x: 17};
- assert.eq(o, t.findOne(), "Mismatching document inserted.");
-
- // 'o' field is an empty array.
- assert.commandFailed(db.adminCommand({applyOps: [{op: 'i', ns: t.getFullName(), o: []}]}),
- 'applyOps should fail on insert of object with empty array element');
-
- var res = assert.commandWorked(db.runCommand({
- applyOps: [
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 18}}},
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 19}}}
- ]
- }));
-
- o.x++;
- o.x++;
-
- assert.eq(1, t.find().count(), "Updates increased number of documents");
- assert.eq(o, t.findOne(), "Document doesn't match expected");
- assert.eq(true, res.results[0], "Bad result value for valid update");
- assert.eq(true, res.results[1], "Bad result value for valid update");
-
- // preCondition fully matches
- res = db.runCommand({
- applyOps: [
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 20}}},
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 21}}}
- ],
- preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}]
});
-
- // The use of preCondition requires applyOps to run atomically. Therefore, it is incompatible
- // with {allowAtomic: false}.
- assert.commandFailedWithCode(
- db.runCommand({
- applyOps: [{op: 'u', ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}}],
- preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 21}}],
- allowAtomic: false,
- }),
- ErrorCodes.InvalidOptions,
- 'applyOps should fail when preCondition is present and atomicAllowed is false.');
-
- // The use of preCondition is also incompatible with operations that include commands.
- assert.commandFailedWithCode(
- db.runCommand({
- applyOps: [{op: 'c', ns: t.getCollection('$cmd').getFullName(), o: {applyOps: []}}],
- preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 21}}],
- }),
- ErrorCodes.InvalidOptions,
- 'applyOps should fail when preCondition is present and operations includes commands.');
-
- o.x++;
- o.x++;
-
- assert.eq(1, t.find().count(), "Updates increased number of documents");
- assert.eq(o, t.findOne(), "Document doesn't match expected");
- assert.eq(true, res.results[0], "Bad result value for valid update");
- assert.eq(true, res.results[1], "Bad result value for valid update");
-
- // preCondition doesn't match ns
- res = db.runCommand({
- applyOps: [
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}},
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 23}}}
- ],
- preCondition: [{ns: "foo.otherName", q: {_id: 5}, res: {x: 21}}]
- });
-
- assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied");
-
- // preCondition doesn't match query
- res = db.runCommand({
- applyOps: [
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}},
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 23}}}
- ],
- preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}]
- });
-
- assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied");
-
- res = db.runCommand({
- applyOps: [
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}},
- {op: "u", ns: t.getFullName(), o2: {_id: 6}, o: {$set: {x: 23}}}
- ]
- });
-
- assert.eq(true, res.results[0], "Valid update failed");
- assert.eq(true, res.results[1], "Valid update failed");
-
- // Ops with transaction numbers are valid.
- const lsid = {
- "id": UUID("3eea4a58-6018-40b6-8743-6a55783bf902"),
- "uid": BinData(0, "47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=")
- };
- res = db.runCommand({
- applyOps: [
- {
- op: "i",
- ns: t.getFullName(),
- o: {_id: 7, x: 24},
- lsid: lsid,
- txnNumber: NumberLong(1),
- stmtId: NumberInt(0)
- },
- {
- op: "u",
- ns: t.getFullName(),
- o2: {_id: 8},
- o: {$set: {x: 25}},
- lsid: lsid,
- txnNumber: NumberLong(1),
- stmtId: NumberInt(1)
- },
- {
- op: "d",
- ns: t.getFullName(),
- o: {_id: 7},
- lsid: lsid,
- txnNumber: NumberLong(2),
- stmtId: NumberInt(0)
- },
- ]
- });
-
- assert.eq(true, res.results[0], "Valid insert with transaction number failed");
- assert.eq(true, res.results[1], "Valid update with transaction number failed");
- assert.eq(true, res.results[2], "Valid delete with transaction number failed");
-
- // When applying a "u" (update) op, we default to 'UpdateNode' update semantics, and $set
- // operations add new fields in lexicographic order.
- res = assert.commandWorked(db.adminCommand({
- applyOps: [
- {"op": "i", "ns": t.getFullName(), "o": {_id: 6}},
- {"op": "u", "ns": t.getFullName(), "o2": {_id: 6}, "o": {$set: {z: 1, a: 2}}}
- ]
- }));
- assert.eq(t.findOne({_id: 6}), {_id: 6, a: 2, z: 1}); // Note: 'a' and 'z' have been sorted.
-
- // 'ModifierInterface' semantics are not supported, so an update with {$v: 0} should fail.
- res = assert.commandFailed(db.adminCommand({
- applyOps: [
- {"op": "i", "ns": t.getFullName(), "o": {_id: 7}},
- {
- "op": "u",
- "ns": t.getFullName(),
- "o2": {_id: 7},
- "o": {$v: NumberLong(0), $set: {z: 1, a: 2}}
- }
- ]
- }));
- assert.eq(res.code, 40682);
-
- // When we explicitly specify {$v: 1}, we should get 'UpdateNode' update semantics, and $set
- // operations get performed in lexicographic order.
- res = assert.commandWorked(db.adminCommand({
- applyOps: [
- {"op": "i", "ns": t.getFullName(), "o": {_id: 8}},
- {
- "op": "u",
- "ns": t.getFullName(),
- "o2": {_id: 8},
- "o": {$v: NumberLong(1), $set: {z: 1, a: 2}}
- }
- ]
- }));
- assert.eq(t.findOne({_id: 8}), {_id: 8, a: 2, z: 1}); // Note: 'a' and 'z' have been sorted.
+}
+
+// Insert and update operations on non-existent collections/databases should return
+// NamespaceNotFound.
+testCrudOperationOnNonExistentNamespace('i', {_id: 0}, {}, ErrorCodes.NamespaceNotFound);
+testCrudOperationOnNonExistentNamespace('u', {x: 0}, {_id: 0}, ErrorCodes.NamespaceNotFound);
+
+// Delete operations on non-existent collections/databases should return OK for idempotency
+// reasons.
+testCrudOperationOnNonExistentNamespace('d', {_id: 0}, {});
+
+assert.commandWorked(db.createCollection(t.getName()));
+var a = assert.commandWorked(
+ db.adminCommand({applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]}));
+assert.eq(1, t.find().count(), "Valid insert failed");
+assert.eq(true, a.results[0], "Bad result value for valid insert");
+
+a = assert.commandWorked(
+ db.adminCommand({applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]}));
+assert.eq(1, t.find().count(), "Duplicate insert failed");
+assert.eq(true, a.results[0], "Bad result value for duplicate insert");
+
+var o = {_id: 5, x: 17};
+assert.eq(o, t.findOne(), "Mismatching document inserted.");
+
+// 'o' field is an empty array.
+assert.commandFailed(db.adminCommand({applyOps: [{op: 'i', ns: t.getFullName(), o: []}]}),
+ 'applyOps should fail on insert of object with empty array element');
+
+var res = assert.commandWorked(db.runCommand({
+ applyOps: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 18}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 19}}}
+ ]
+}));
+
+o.x++;
+o.x++;
+
+assert.eq(1, t.find().count(), "Updates increased number of documents");
+assert.eq(o, t.findOne(), "Document doesn't match expected");
+assert.eq(true, res.results[0], "Bad result value for valid update");
+assert.eq(true, res.results[1], "Bad result value for valid update");
+
+// preCondition fully matches
+res = db.runCommand({
+ applyOps: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 20}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 21}}}
+ ],
+ preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}]
+});
+
+// The use of preCondition requires applyOps to run atomically. Therefore, it is incompatible
+// with {allowAtomic: false}.
+assert.commandFailedWithCode(
+ db.runCommand({
+ applyOps: [{op: 'u', ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}}],
+ preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 21}}],
+ allowAtomic: false,
+ }),
+ ErrorCodes.InvalidOptions,
+ 'applyOps should fail when preCondition is present and atomicAllowed is false.');
+
+// The use of preCondition is also incompatible with operations that include commands.
+assert.commandFailedWithCode(
+ db.runCommand({
+ applyOps: [{op: 'c', ns: t.getCollection('$cmd').getFullName(), o: {applyOps: []}}],
+ preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 21}}],
+ }),
+ ErrorCodes.InvalidOptions,
+ 'applyOps should fail when preCondition is present and operations includes commands.');
+
+o.x++;
+o.x++;
+
+assert.eq(1, t.find().count(), "Updates increased number of documents");
+assert.eq(o, t.findOne(), "Document doesn't match expected");
+assert.eq(true, res.results[0], "Bad result value for valid update");
+assert.eq(true, res.results[1], "Bad result value for valid update");
+
+// preCondition doesn't match ns
+res = db.runCommand({
+ applyOps: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 23}}}
+ ],
+ preCondition: [{ns: "foo.otherName", q: {_id: 5}, res: {x: 21}}]
+});
+
+assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied");
+
+// preCondition doesn't match query
+res = db.runCommand({
+ applyOps: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 23}}}
+ ],
+ preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}]
+});
+
+assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied");
+
+res = db.runCommand({
+ applyOps: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 6}, o: {$set: {x: 23}}}
+ ]
+});
+
+assert.eq(true, res.results[0], "Valid update failed");
+assert.eq(true, res.results[1], "Valid update failed");
+
+// Ops with transaction numbers are valid.
+const lsid = {
+ "id": UUID("3eea4a58-6018-40b6-8743-6a55783bf902"),
+ "uid": BinData(0, "47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=")
+};
+res = db.runCommand({
+ applyOps: [
+ {
+ op: "i",
+ ns: t.getFullName(),
+ o: {_id: 7, x: 24},
+ lsid: lsid,
+ txnNumber: NumberLong(1),
+ stmtId: NumberInt(0)
+ },
+ {
+ op: "u",
+ ns: t.getFullName(),
+ o2: {_id: 8},
+ o: {$set: {x: 25}},
+ lsid: lsid,
+ txnNumber: NumberLong(1),
+ stmtId: NumberInt(1)
+ },
+ {
+ op: "d",
+ ns: t.getFullName(),
+ o: {_id: 7},
+ lsid: lsid,
+ txnNumber: NumberLong(2),
+ stmtId: NumberInt(0)
+ },
+ ]
+});
+
+assert.eq(true, res.results[0], "Valid insert with transaction number failed");
+assert.eq(true, res.results[1], "Valid update with transaction number failed");
+assert.eq(true, res.results[2], "Valid delete with transaction number failed");
+
+// When applying a "u" (update) op, we default to 'UpdateNode' update semantics, and $set
+// operations add new fields in lexicographic order.
+res = assert.commandWorked(db.adminCommand({
+ applyOps: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 6}},
+ {"op": "u", "ns": t.getFullName(), "o2": {_id: 6}, "o": {$set: {z: 1, a: 2}}}
+ ]
+}));
+assert.eq(t.findOne({_id: 6}), {_id: 6, a: 2, z: 1}); // Note: 'a' and 'z' have been sorted.
+
+// 'ModifierInterface' semantics are not supported, so an update with {$v: 0} should fail.
+res = assert.commandFailed(db.adminCommand({
+ applyOps: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 7}},
+ {
+ "op": "u",
+ "ns": t.getFullName(),
+ "o2": {_id: 7},
+ "o": {$v: NumberLong(0), $set: {z: 1, a: 2}}
+ }
+ ]
+}));
+assert.eq(res.code, 40682);
+
+// When we explicitly specify {$v: 1}, we should get 'UpdateNode' update semantics, and $set
+// operations get performed in lexicographic order.
+res = assert.commandWorked(db.adminCommand({
+ applyOps: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 8}},
+ {
+ "op": "u",
+ "ns": t.getFullName(),
+ "o2": {_id: 8},
+ "o": {$v: NumberLong(1), $set: {z: 1, a: 2}}
+ }
+ ]
+}));
+assert.eq(t.findOne({_id: 8}), {_id: 8, a: 2, z: 1}); // Note: 'a' and 'z' have been sorted.
})();
diff --git a/jstests/core/apply_ops2.js b/jstests/core/apply_ops2.js
index caf30364c48..690b8545e4a 100644
--- a/jstests/core/apply_ops2.js
+++ b/jstests/core/apply_ops2.js
@@ -52,12 +52,7 @@ print("Testing applyOps with default alwaysUpsert");
res = db.runCommand({
applyOps: [
{op: "u", ns: t.getFullName(), o2: {_id: 1}, o: {$set: {x: "upsert=default existing"}}},
- {
- op: "u",
- ns: t.getFullName(),
- o2: {_id: 4},
- o: {$set: {x: "upsert=defaults non-existing"}}
- }
+ {op: "u", ns: t.getFullName(), o2: {_id: 4}, o: {$set: {x: "upsert=defaults non-existing"}}}
]
});
diff --git a/jstests/core/apply_ops_dups.js b/jstests/core/apply_ops_dups.js
index e18cd01d3f6..85bc04437a6 100644
--- a/jstests/core/apply_ops_dups.js
+++ b/jstests/core/apply_ops_dups.js
@@ -7,33 +7,33 @@
// ]
(function() {
- "use strict";
- var t = db.apply_ops_dups;
- t.drop();
+"use strict";
+var t = db.apply_ops_dups;
+t.drop();
- // Check that duplicate _id fields don't cause an error
- assert.writeOK(t.insert({_id: 0, x: 1}));
- assert.commandWorked(t.createIndex({x: 1}, {unique: true}));
- var a = assert.commandWorked(db.adminCommand({
- applyOps: [
- {"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: -1}},
- {"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 0}}
- ]
- }));
- printjson(a);
- printjson(t.find().toArray());
- assert.eq(2, t.find().count(), "Invalid insert worked");
- assert.eq(true, a.results[0], "Valid insert was rejected");
- assert.eq(true, a.results[1], "Insert should have not failed (but should be ignored");
- printjson(t.find().toArray());
+// Check that duplicate _id fields don't cause an error
+assert.writeOK(t.insert({_id: 0, x: 1}));
+assert.commandWorked(t.createIndex({x: 1}, {unique: true}));
+var a = assert.commandWorked(db.adminCommand({
+ applyOps: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: -1}},
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 0}}
+ ]
+}));
+printjson(a);
+printjson(t.find().toArray());
+assert.eq(2, t.find().count(), "Invalid insert worked");
+assert.eq(true, a.results[0], "Valid insert was rejected");
+assert.eq(true, a.results[1], "Insert should have not failed (but should be ignored");
+printjson(t.find().toArray());
- // Check that dups on non-id cause errors
- var a = assert.commandFailedWithCode(db.adminCommand({
- applyOps: [
- {"op": "i", "ns": t.getFullName(), "o": {_id: 1, x: 0}},
- {"op": "i", "ns": t.getFullName(), "o": {_id: 2, x: 1}}
- ]
- }),
- 11000 /*DuplicateKey*/);
- assert.eq(2, t.find().count(), "Invalid insert worked");
+// Check that dups on non-id cause errors
+var a = assert.commandFailedWithCode(db.adminCommand({
+ applyOps: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 1, x: 0}},
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 2, x: 1}}
+ ]
+}),
+ 11000 /*DuplicateKey*/);
+assert.eq(2, t.find().count(), "Invalid insert worked");
})();
diff --git a/jstests/core/apply_ops_index_collation.js b/jstests/core/apply_ops_index_collation.js
index d58d3659223..2447a32e101 100644
--- a/jstests/core/apply_ops_index_collation.js
+++ b/jstests/core/apply_ops_index_collation.js
@@ -13,78 +13,78 @@
// ]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/get_index_helpers.js");
- load('jstests/libs/uuid_util.js');
+load("jstests/libs/get_index_helpers.js");
+load('jstests/libs/uuid_util.js');
- const coll = db.apply_ops_index_collation;
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}}));
- const uuid = getUUIDFromListCollections(db, coll.getName());
+const coll = db.apply_ops_index_collation;
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}}));
+const uuid = getUUIDFromListCollections(db, coll.getName());
- // An index created using a createIndexes-style oplog entry with a non-simple collation does not
- // inherit the collection default collation.
- let res = assert.commandWorked(db.adminCommand({
- applyOps: [{
- op: "c",
- ns: coll.getFullName(),
- ui: uuid,
- o: {
- createIndexes: coll.getFullName(),
- v: 2,
- key: {a: 1},
- name: "a_1_en",
- collation: {
- locale: "en_US",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: false,
- version: "57.1"
- }
+// An index created using a createIndexes-style oplog entry with a non-simple collation does not
+// inherit the collection default collation.
+let res = assert.commandWorked(db.adminCommand({
+ applyOps: [{
+ op: "c",
+ ns: coll.getFullName(),
+ ui: uuid,
+ o: {
+ createIndexes: coll.getFullName(),
+ v: 2,
+ key: {a: 1},
+ name: "a_1_en",
+ collation: {
+ locale: "en_US",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: false,
+ version: "57.1"
}
- }]
- }));
- let allIndexes = coll.getIndexes();
- let spec = GetIndexHelpers.findByName(allIndexes, "a_1_en");
- assert.neq(null, spec, "Index 'a_1_en' not found: " + tojson(allIndexes));
- assert.eq(2, spec.v, tojson(spec));
- assert.eq("en_US", spec.collation.locale, tojson(spec));
+ }
+ }]
+}));
+let allIndexes = coll.getIndexes();
+let spec = GetIndexHelpers.findByName(allIndexes, "a_1_en");
+assert.neq(null, spec, "Index 'a_1_en' not found: " + tojson(allIndexes));
+assert.eq(2, spec.v, tojson(spec));
+assert.eq("en_US", spec.collation.locale, tojson(spec));
- // An index created using a createIndexes-style oplog entry with a simple collation does not
- // inherit the collection default collation.
- res = assert.commandWorked(db.adminCommand({
- applyOps: [{
- op: "c",
- ns: coll.getFullName(),
- ui: uuid,
- o: {createIndexes: coll.getFullName(), v: 2, key: {a: 1}, name: "a_1"}
- }]
- }));
- allIndexes = coll.getIndexes();
- spec = GetIndexHelpers.findByName(allIndexes, "a_1");
- assert.neq(null, spec, "Index 'a_1' not found: " + tojson(allIndexes));
- assert.eq(2, spec.v, tojson(spec));
- assert(!spec.hasOwnProperty("collation"), tojson(spec));
+// An index created using a createIndexes-style oplog entry with a simple collation does not
+// inherit the collection default collation.
+res = assert.commandWorked(db.adminCommand({
+ applyOps: [{
+ op: "c",
+ ns: coll.getFullName(),
+ ui: uuid,
+ o: {createIndexes: coll.getFullName(), v: 2, key: {a: 1}, name: "a_1"}
+ }]
+}));
+allIndexes = coll.getIndexes();
+spec = GetIndexHelpers.findByName(allIndexes, "a_1");
+assert.neq(null, spec, "Index 'a_1' not found: " + tojson(allIndexes));
+assert.eq(2, spec.v, tojson(spec));
+assert(!spec.hasOwnProperty("collation"), tojson(spec));
- // A v=1 index created using a createIndexes-style oplog entry does not inherit the collection
- // default collation.
- res = assert.commandWorked(db.adminCommand({
- applyOps: [{
- op: "c",
- ns: coll.getFullName(),
- ui: uuid,
- o: {createIndexes: coll.getFullName(), v: 1, key: {b: 1}, name: "b_1"}
- }]
- }));
- allIndexes = coll.getIndexes();
- spec = GetIndexHelpers.findByName(allIndexes, "b_1");
- assert.neq(null, spec, "Index 'b_1' not found: " + tojson(allIndexes));
- assert.eq(1, spec.v, tojson(spec));
- assert(!spec.hasOwnProperty("collation"), tojson(spec));
+// A v=1 index created using a createIndexes-style oplog entry does not inherit the collection
+// default collation.
+res = assert.commandWorked(db.adminCommand({
+ applyOps: [{
+ op: "c",
+ ns: coll.getFullName(),
+ ui: uuid,
+ o: {createIndexes: coll.getFullName(), v: 1, key: {b: 1}, name: "b_1"}
+ }]
+}));
+allIndexes = coll.getIndexes();
+spec = GetIndexHelpers.findByName(allIndexes, "b_1");
+assert.neq(null, spec, "Index 'b_1' not found: " + tojson(allIndexes));
+assert.eq(1, spec.v, tojson(spec));
+assert(!spec.hasOwnProperty("collation"), tojson(spec));
})();
diff --git a/jstests/core/apply_ops_invalid_index_spec.js b/jstests/core/apply_ops_invalid_index_spec.js
index 5ed9e6d8ee6..d602cae29c1 100644
--- a/jstests/core/apply_ops_invalid_index_spec.js
+++ b/jstests/core/apply_ops_invalid_index_spec.js
@@ -15,71 +15,71 @@
*/
(function() {
- 'use strict';
+'use strict';
- const t = db.apply_ops_invalid_index_spec;
- t.drop();
+const t = db.apply_ops_invalid_index_spec;
+t.drop();
- const collNs = t.getFullName();
- const cmdNs = db.getName() + '.$cmd';
- const systemIndexesNs = db.getCollection('system.indexes').getFullName();
+const collNs = t.getFullName();
+const cmdNs = db.getName() + '.$cmd';
+const systemIndexesNs = db.getCollection('system.indexes').getFullName();
- assert.commandWorked(db.createCollection(t.getName()));
- assert.writeOK(t.save({_id: 100, a: 100}));
+assert.commandWorked(db.createCollection(t.getName()));
+assert.writeOK(t.save({_id: 100, a: 100}));
- // Tests that db.collection.createIndex() fails when given an index spec containing an unknown
- // field.
- assert.commandFailedWithCode(t.createIndex({a: 1}, {v: 2, name: 'a_1_base_v2', unknown: 1}),
- ErrorCodes.InvalidIndexSpecificationOption);
- assert.commandFailedWithCode(t.createIndex({a: 1}, {v: 1, name: 'a_1_base_v1', unknown: 1}),
- ErrorCodes.InvalidIndexSpecificationOption);
+// Tests that db.collection.createIndex() fails when given an index spec containing an unknown
+// field.
+assert.commandFailedWithCode(t.createIndex({a: 1}, {v: 2, name: 'a_1_base_v2', unknown: 1}),
+ ErrorCodes.InvalidIndexSpecificationOption);
+assert.commandFailedWithCode(t.createIndex({a: 1}, {v: 1, name: 'a_1_base_v1', unknown: 1}),
+ ErrorCodes.InvalidIndexSpecificationOption);
- // A createIndexes command for a v:2 index with an unknown field in the index spec should fail.
- assert.commandFailedWithCode(db.adminCommand({
- applyOps: [{
- op: 'c',
- ns: cmdNs,
- o: {
- createIndexes: t.getName(),
- v: 2,
- key: {a: 1},
- name: 'a_1_create_v2',
- unknown: 1,
- },
- }],
- }),
- ErrorCodes.InvalidIndexSpecificationOption);
+// A createIndexes command for a v:2 index with an unknown field in the index spec should fail.
+assert.commandFailedWithCode(db.adminCommand({
+ applyOps: [{
+ op: 'c',
+ ns: cmdNs,
+ o: {
+ createIndexes: t.getName(),
+ v: 2,
+ key: {a: 1},
+ name: 'a_1_create_v2',
+ unknown: 1,
+ },
+ }],
+}),
+ ErrorCodes.InvalidIndexSpecificationOption);
- // A createIndexes command for a background index with unknown field in the index spec should
- // fail.
- assert.commandFailedWithCode(db.adminCommand({
- applyOps: [{
- op: 'c',
- ns: cmdNs,
- o: {
- createIndexes: t.getName(),
- v: 2,
- key: {a: 1},
- background: true,
- name: 'a_1_background',
- unknown: 1,
- },
- }],
- }),
- ErrorCodes.InvalidIndexSpecificationOption);
+// A createIndexes command for a background index with unknown field in the index spec should
+// fail.
+assert.commandFailedWithCode(db.adminCommand({
+ applyOps: [{
+ op: 'c',
+ ns: cmdNs,
+ o: {
+ createIndexes: t.getName(),
+ v: 2,
+ key: {a: 1},
+ background: true,
+ name: 'a_1_background',
+ unknown: 1,
+ },
+ }],
+}),
+ ErrorCodes.InvalidIndexSpecificationOption);
- // A createIndexes command for a v:1 index with an unknown field in the index spec should work.
- const res1 = assert.commandWorked(db.adminCommand({
- applyOps: [{
- op: 'c',
- ns: cmdNs,
- o: {
- createIndexes: t.getName(),
- v: 1,
- key: {a: 1},
- name: 'a_1_create_v1',
- unknown: 1,
- },
- }],
- }));
+// A createIndexes command for a v:1 index with an unknown field in the index spec should work.
+const res1 = assert.commandWorked(db.adminCommand({
+ applyOps: [{
+ op: 'c',
+ ns: cmdNs,
+ o: {
+ createIndexes: t.getName(),
+ v: 1,
+ key: {a: 1},
+ name: 'a_1_create_v1',
+ unknown: 1,
+ },
+ }],
+}));
})();
diff --git a/jstests/core/apply_ops_without_ns.js b/jstests/core/apply_ops_without_ns.js
index 3b488078a31..f23587f4a08 100644
--- a/jstests/core/apply_ops_without_ns.js
+++ b/jstests/core/apply_ops_without_ns.js
@@ -6,10 +6,9 @@
// ]
(function() {
- 'use strict';
+'use strict';
- // SERVER-33854: This should fail and not cause any invalid memory access.
- assert.commandFailed(db.adminCommand({
- applyOps: [{'op': 'c', 'ns': 'admin.$cmd', 'o': {applyOps: [{'op': 'i', 'o': {x: 1}}]}}]
- }));
+// SERVER-33854: This should fail and not cause any invalid memory access.
+assert.commandFailed(db.adminCommand(
+ {applyOps: [{'op': 'c', 'ns': 'admin.$cmd', 'o': {applyOps: [{'op': 'i', 'o': {x: 1}}]}}]}));
})();
diff --git a/jstests/core/arrayfind8.js b/jstests/core/arrayfind8.js
index f9693182a7a..87a3a8d701a 100644
--- a/jstests/core/arrayfind8.js
+++ b/jstests/core/arrayfind8.js
@@ -3,146 +3,142 @@
* Includes tests for bugs described in SERVER-1264 and SERVER-4180.
*/
(function() {
- "use strict";
+"use strict";
- const coll = db.jstests_arrayfind8;
- coll.drop();
-
- // May be changed during the test.
- let currentIndexSpec = {a: 1};
-
- /**
- * Check that the query results match the documents in the 'expected' array.
- */
- function assertResults(expected, query, context) {
- assert.eq(expected.length, coll.count(query), 'unexpected count in ' + context);
- const results = coll.find(query).toArray();
- const resultsAOnly = results.map((r) => r.a);
- assert.sameMembers(resultsAOnly, expected);
- }
-
- /**
- * Check matching for different query types.
- * @param bothMatch - document matched by both standardQuery and elemMatchQuery
- * @param elemMatch - document matched by elemMatchQuery but not standardQuery
- * @param notElemMatch - document matched by standardQuery but not elemMatchQuery
- */
- function checkMatch(
- bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, context) {
- function mayPush(arr, elt) {
- if (elt) {
- arr.push(elt);
- }
- }
+const coll = db.jstests_arrayfind8;
+coll.drop();
- let expectedStandardQueryResults = [];
- mayPush(expectedStandardQueryResults, bothMatch);
- mayPush(expectedStandardQueryResults, nonElemMatch);
- assertResults(expectedStandardQueryResults, standardQuery, context + ' standard query');
+// May be changed during the test.
+let currentIndexSpec = {a: 1};
- let expectedElemMatchQueryResults = [];
- mayPush(expectedElemMatchQueryResults, bothMatch);
- mayPush(expectedElemMatchQueryResults, elemMatch);
- assertResults(expectedElemMatchQueryResults, elemMatchQuery, context + ' elemMatch query');
- }
-
- /**
- * Check matching and for different query types.
- * @param subQuery - part of a query, to be provided as is for a standard query and within a
- * $elemMatch clause for a $elemMatch query
- * @param bothMatch - document matched by both standardQuery and elemMatchQuery
- * @param elemMatch - document matched by elemMatchQuery but not standardQuery
- * @param notElemMatch - document matched by standardQuery but not elemMatchQuery
- * @param additionalConstraints - additional query parameters not generated from @param subQuery
- */
- function checkQuery(subQuery, bothMatch, elemMatch, nonElemMatch, additionalConstraints) {
- coll.drop();
- additionalConstraints = additionalConstraints || {};
-
- // Construct standard and elemMatch queries from subQuery.
- const firstSubQueryKey = Object.keySet(subQuery)[0];
- let standardQuery = null;
- if (firstSubQueryKey[0] == '$') {
- standardQuery = {$and: [{a: subQuery}, additionalConstraints]};
- } else {
- // If the subQuery contains a field rather than operators, append to the 'a' field.
- let modifiedSubQuery = {};
- modifiedSubQuery['a.' + firstSubQueryKey] = subQuery[firstSubQueryKey];
- standardQuery = {$and: [modifiedSubQuery, additionalConstraints]};
- }
- const elemMatchQuery = {$and: [{a: {$elemMatch: subQuery}}, additionalConstraints]};
+/**
+ * Check that the query results match the documents in the 'expected' array.
+ */
+function assertResults(expected, query, context) {
+ assert.eq(expected.length, coll.count(query), 'unexpected count in ' + context);
+ const results = coll.find(query).toArray();
+ const resultsAOnly = results.map((r) => r.a);
+ assert.sameMembers(resultsAOnly, expected);
+}
- function insertValueIfNotNull(val) {
- if (val) {
- assert.commandWorked(coll.insert({a: val}));
- }
+/**
+ * Check matching for different query types.
+ * @param bothMatch - document matched by both standardQuery and elemMatchQuery
+ * @param elemMatch - document matched by elemMatchQuery but not standardQuery
+ * @param notElemMatch - document matched by standardQuery but not elemMatchQuery
+ */
+function checkMatch(bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, context) {
+ function mayPush(arr, elt) {
+ if (elt) {
+ arr.push(elt);
}
+ }
- // Save all documents and check matching without indexes.
- insertValueIfNotNull(bothMatch);
- insertValueIfNotNull(elemMatch);
- insertValueIfNotNull(nonElemMatch);
-
- checkMatch(bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, 'unindexed');
-
- // Check matching and index bounds for a single key index.
+ let expectedStandardQueryResults = [];
+ mayPush(expectedStandardQueryResults, bothMatch);
+ mayPush(expectedStandardQueryResults, nonElemMatch);
+ assertResults(expectedStandardQueryResults, standardQuery, context + ' standard query');
- assert.eq(coll.drop(), true);
- insertValueIfNotNull(bothMatch);
- insertValueIfNotNull(elemMatch);
- // The nonElemMatch document is not tested here, as it will often make the index multikey.
- assert.commandWorked(coll.createIndex(currentIndexSpec));
- checkMatch(bothMatch, elemMatch, null, standardQuery, elemMatchQuery, 'single key index');
+ let expectedElemMatchQueryResults = [];
+ mayPush(expectedElemMatchQueryResults, bothMatch);
+ mayPush(expectedElemMatchQueryResults, elemMatch);
+ assertResults(expectedElemMatchQueryResults, elemMatchQuery, context + ' elemMatch query');
+}
- // Check matching and index bounds for a multikey index.
+/**
+ * Check matching and for different query types.
+ * @param subQuery - part of a query, to be provided as is for a standard query and within a
+ * $elemMatch clause for a $elemMatch query
+ * @param bothMatch - document matched by both standardQuery and elemMatchQuery
+ * @param elemMatch - document matched by elemMatchQuery but not standardQuery
+ * @param notElemMatch - document matched by standardQuery but not elemMatchQuery
+ * @param additionalConstraints - additional query parameters not generated from @param subQuery
+ */
+function checkQuery(subQuery, bothMatch, elemMatch, nonElemMatch, additionalConstraints) {
+ coll.drop();
+ additionalConstraints = additionalConstraints || {};
+
+ // Construct standard and elemMatch queries from subQuery.
+ const firstSubQueryKey = Object.keySet(subQuery)[0];
+ let standardQuery = null;
+ if (firstSubQueryKey[0] == '$') {
+ standardQuery = {$and: [{a: subQuery}, additionalConstraints]};
+ } else {
+ // If the subQuery contains a field rather than operators, append to the 'a' field.
+ let modifiedSubQuery = {};
+ modifiedSubQuery['a.' + firstSubQueryKey] = subQuery[firstSubQueryKey];
+ standardQuery = {$and: [modifiedSubQuery, additionalConstraints]};
+ }
+ const elemMatchQuery = {$and: [{a: {$elemMatch: subQuery}}, additionalConstraints]};
- // Now the nonElemMatch document is tested.
- insertValueIfNotNull(nonElemMatch);
- // Force the index to be multikey.
- assert.commandWorked(coll.insert({a: [-1, -2]}));
- assert.commandWorked(coll.insert({a: {b: [-1, -2]}}));
- checkMatch(
- bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, 'multikey index');
+ function insertValueIfNotNull(val) {
+ if (val) {
+ assert.commandWorked(coll.insert({a: val}));
+ }
}
- // Basic test.
- checkQuery({$gt: 4}, [5]);
-
- // Multiple constraints within a $elemMatch clause.
- checkQuery({$gt: 4, $lt: 6}, [5], null, [3, 7]);
- checkQuery({$gt: 4, $not: {$gte: 6}}, [5]);
- checkQuery({$gt: 4, $not: {$ne: 6}}, [6]);
- checkQuery({$gte: 5, $lte: 5}, [5], null, [4, 6]);
- checkQuery({$in: [4, 6], $gt: 5}, [6], null, [4, 7]);
- checkQuery({$regex: '^a'}, ['a']);
-
- // Some constraints within a $elemMatch clause and other constraints outside of it.
- checkQuery({$gt: 4}, [5], null, null, {a: {$lt: 6}});
- checkQuery({$gte: 5}, [5], null, null, {a: {$lte: 5}});
- checkQuery({$in: [4, 6]}, [6], null, null, {a: {$gt: 5}});
-
- // Constraints in different $elemMatch clauses.
- checkQuery({$gt: 4}, [5], null, null, {a: {$elemMatch: {$lt: 6}}});
- checkQuery({$gt: 4}, [3, 7], null, null, {a: {$elemMatch: {$lt: 6}}});
- checkQuery({$gte: 5}, [5], null, null, {a: {$elemMatch: {$lte: 5}}});
- checkQuery({$in: [4, 6]}, [6], null, null, {a: {$elemMatch: {$gt: 5}}});
-
- checkQuery({$elemMatch: {$in: [5]}}, null, [[5]], [5], null);
-
- currentIndexSpec = {"a.b": 1};
- checkQuery({$elemMatch: {b: {$gte: 1, $lte: 1}}}, null, [[{b: 1}]], [{b: 1}], null);
- checkQuery({$elemMatch: {b: {$gte: 1, $lte: 1}}}, null, [[{b: [0, 2]}]], [{b: [0, 2]}], null);
-
- // Constraints for a top level (SERVER-1264 style) $elemMatch nested within a non top level
- // $elemMatch.
- checkQuery({b: {$elemMatch: {$gte: 1, $lte: 1}}}, [{b: [1]}]);
- checkQuery({b: {$elemMatch: {$gte: 1, $lte: 4}}}, [{b: [1]}]);
-
- checkQuery(
- {b: {$elemMatch: {$gte: 1, $lte: 4}}}, [{b: [2]}], null, null, {'a.b': {$in: [2, 5]}});
- checkQuery({b: {$elemMatch: {$in: [1, 2]}, $in: [2, 3]}},
- [{b: [2]}],
- null,
- [{b: [1]}, {b: [3]}],
- null);
+ // Save all documents and check matching without indexes.
+ insertValueIfNotNull(bothMatch);
+ insertValueIfNotNull(elemMatch);
+ insertValueIfNotNull(nonElemMatch);
+
+ checkMatch(bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, 'unindexed');
+
+ // Check matching and index bounds for a single key index.
+
+ assert.eq(coll.drop(), true);
+ insertValueIfNotNull(bothMatch);
+ insertValueIfNotNull(elemMatch);
+ // The nonElemMatch document is not tested here, as it will often make the index multikey.
+ assert.commandWorked(coll.createIndex(currentIndexSpec));
+ checkMatch(bothMatch, elemMatch, null, standardQuery, elemMatchQuery, 'single key index');
+
+ // Check matching and index bounds for a multikey index.
+
+ // Now the nonElemMatch document is tested.
+ insertValueIfNotNull(nonElemMatch);
+ // Force the index to be multikey.
+ assert.commandWorked(coll.insert({a: [-1, -2]}));
+ assert.commandWorked(coll.insert({a: {b: [-1, -2]}}));
+ checkMatch(bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, 'multikey index');
+}
+
+// Basic test.
+checkQuery({$gt: 4}, [5]);
+
+// Multiple constraints within a $elemMatch clause.
+checkQuery({$gt: 4, $lt: 6}, [5], null, [3, 7]);
+checkQuery({$gt: 4, $not: {$gte: 6}}, [5]);
+checkQuery({$gt: 4, $not: {$ne: 6}}, [6]);
+checkQuery({$gte: 5, $lte: 5}, [5], null, [4, 6]);
+checkQuery({$in: [4, 6], $gt: 5}, [6], null, [4, 7]);
+checkQuery({$regex: '^a'}, ['a']);
+
+// Some constraints within a $elemMatch clause and other constraints outside of it.
+checkQuery({$gt: 4}, [5], null, null, {a: {$lt: 6}});
+checkQuery({$gte: 5}, [5], null, null, {a: {$lte: 5}});
+checkQuery({$in: [4, 6]}, [6], null, null, {a: {$gt: 5}});
+
+// Constraints in different $elemMatch clauses.
+checkQuery({$gt: 4}, [5], null, null, {a: {$elemMatch: {$lt: 6}}});
+checkQuery({$gt: 4}, [3, 7], null, null, {a: {$elemMatch: {$lt: 6}}});
+checkQuery({$gte: 5}, [5], null, null, {a: {$elemMatch: {$lte: 5}}});
+checkQuery({$in: [4, 6]}, [6], null, null, {a: {$elemMatch: {$gt: 5}}});
+
+checkQuery({$elemMatch: {$in: [5]}}, null, [[5]], [5], null);
+
+currentIndexSpec = {
+ "a.b": 1
+};
+checkQuery({$elemMatch: {b: {$gte: 1, $lte: 1}}}, null, [[{b: 1}]], [{b: 1}], null);
+checkQuery({$elemMatch: {b: {$gte: 1, $lte: 1}}}, null, [[{b: [0, 2]}]], [{b: [0, 2]}], null);
+
+// Constraints for a top level (SERVER-1264 style) $elemMatch nested within a non top level
+// $elemMatch.
+checkQuery({b: {$elemMatch: {$gte: 1, $lte: 1}}}, [{b: [1]}]);
+checkQuery({b: {$elemMatch: {$gte: 1, $lte: 4}}}, [{b: [1]}]);
+
+checkQuery({b: {$elemMatch: {$gte: 1, $lte: 4}}}, [{b: [2]}], null, null, {'a.b': {$in: [2, 5]}});
+checkQuery(
+ {b: {$elemMatch: {$in: [1, 2]}, $in: [2, 3]}}, [{b: [2]}], null, [{b: [1]}, {b: [3]}], null);
})();
diff --git a/jstests/core/autocomplete.js b/jstests/core/autocomplete.js
index 29509b951b9..5e9856cc722 100644
--- a/jstests/core/autocomplete.js
+++ b/jstests/core/autocomplete.js
@@ -2,54 +2,54 @@
* Validate auto complete works for various javascript types implemented by C++.
*/
(function() {
- 'use strict';
+'use strict';
- function testAutoComplete(prefix) {
- // This method updates a global object with an array of strings on success.
- shellAutocomplete(prefix);
- return __autocomplete__;
- }
+function testAutoComplete(prefix) {
+ // This method updates a global object with an array of strings on success.
+ shellAutocomplete(prefix);
+ return __autocomplete__;
+}
- // Create a collection.
- db.auto_complete_coll.insert({});
+// Create a collection.
+db.auto_complete_coll.insert({});
- // Validate DB auto completion.
- const db_stuff = testAutoComplete('db.');
+// Validate DB auto completion.
+const db_stuff = testAutoComplete('db.');
- // Verify we enumerate built-in methods.
- assert.contains('db.prototype', db_stuff);
- assert.contains('db.hasOwnProperty', db_stuff);
- assert.contains('db.toString(', db_stuff);
+// Verify we enumerate built-in methods.
+assert.contains('db.prototype', db_stuff);
+assert.contains('db.hasOwnProperty', db_stuff);
+assert.contains('db.toString(', db_stuff);
- // Verify we have some methods we added.
- assert.contains('db.adminCommand(', db_stuff);
- assert.contains('db.runCommand(', db_stuff);
- assert.contains('db.watch(', db_stuff);
+// Verify we have some methods we added.
+assert.contains('db.adminCommand(', db_stuff);
+assert.contains('db.runCommand(', db_stuff);
+assert.contains('db.watch(', db_stuff);
- // Verify we enumerate collections.
- assert.contains('db.auto_complete_coll', db_stuff);
+// Verify we enumerate collections.
+assert.contains('db.auto_complete_coll', db_stuff);
- // Validate Collection autocompletion.
- const coll_stuff = testAutoComplete('db.auto_complete_coll.');
+// Validate Collection autocompletion.
+const coll_stuff = testAutoComplete('db.auto_complete_coll.');
- // Verify we enumerate built-in methods.
- assert.contains('db.auto_complete_coll.prototype', coll_stuff);
- assert.contains('db.auto_complete_coll.hasOwnProperty', coll_stuff);
- assert.contains('db.auto_complete_coll.toString(', coll_stuff);
+// Verify we enumerate built-in methods.
+assert.contains('db.auto_complete_coll.prototype', coll_stuff);
+assert.contains('db.auto_complete_coll.hasOwnProperty', coll_stuff);
+assert.contains('db.auto_complete_coll.toString(', coll_stuff);
- // Verify we have some methods we added.
- assert.contains('db.auto_complete_coll.aggregate(', coll_stuff);
- assert.contains('db.auto_complete_coll.runCommand(', coll_stuff);
+// Verify we have some methods we added.
+assert.contains('db.auto_complete_coll.aggregate(', coll_stuff);
+assert.contains('db.auto_complete_coll.runCommand(', coll_stuff);
- // Validate autocompletion when prefix is specified.
- const empty_stuff = testAutoComplete('');
+// Validate autocompletion when prefix is specified.
+const empty_stuff = testAutoComplete('');
- assert.contains('Array(', empty_stuff);
- assert.contains('print(', empty_stuff);
- assert.contains('ErrorCodes', empty_stuff);
+assert.contains('Array(', empty_stuff);
+assert.contains('print(', empty_stuff);
+assert.contains('ErrorCodes', empty_stuff);
- // Validate autocompletion returns ErrorCodes when ErrorCodes is specified.
- const error_codes_autocomplete = testAutoComplete('ErrorCodes.');
+// Validate autocompletion returns ErrorCodes when ErrorCodes is specified.
+const error_codes_autocomplete = testAutoComplete('ErrorCodes.');
- assert.contains('ErrorCodes.BadValue', error_codes_autocomplete);
+assert.contains('ErrorCodes.BadValue', error_codes_autocomplete);
})();
diff --git a/jstests/core/automation_setparameter.js b/jstests/core/automation_setparameter.js
index 5e8ea62f338..6482fdebbfb 100644
--- a/jstests/core/automation_setparameter.js
+++ b/jstests/core/automation_setparameter.js
@@ -8,50 +8,49 @@
(function() {
- // Run isMaster, and if it contains an automation service descriptor, save it, so we can restore
- // it later. If it wasn't set, original will just be undefined.
- var res = assert.commandWorked(db.runCommand({isMaster: 1}));
- var original = res.automationServiceDescriptor;
-
- // Try to set the descriptor to an invalid value: only strings are supported.
- assert.commandFailedWithCode(db.adminCommand({setParameter: 1, automationServiceDescriptor: 0}),
- ErrorCodes.TypeMismatch);
-
- // Try to set the descriptor to an invalid value: Only 64 characters are allowed.
- assert.commandFailedWithCode(db.adminCommand({
- setParameter: 1,
- automationServiceDescriptor:
- "1234567812345678123456781234567812345678123456781234567812345678X"
- }),
- ErrorCodes.Overflow);
-
- // Short strings are okay.
- res = assert.commandWorked(
- db.adminCommand({setParameter: 1, automationServiceDescriptor: "some_service"}));
-
- // Verify that the setParameter 'was' field contains what we expected.
- if (original)
- assert.eq(original, res.was);
-
- // Verify that the 'some_service' string is now echoed back to us in isMaster
- res = assert.commandWorked(db.runCommand({isMaster: 1}));
- assert.eq(res.automationServiceDescriptor, "some_service");
-
- // Verify that setting the descriptor to the empty string is ok, and prevents it from being
- // echoed back
- assert.commandWorked(db.adminCommand({setParameter: 1, automationServiceDescriptor: ""}));
- res = assert.commandWorked(db.runCommand({isMaster: 1}));
- assert(!res.hasOwnProperty('automationServiceDescriptor'));
-
- // Verify that the shell has the correct prompt.
- var originalPrompt = db.getMongo().promptPrefix;
- assert.commandWorked(db.adminCommand({setParameter: 1, automationServiceDescriptor: "set"}));
- db.getMongo().promptPrefix = undefined;
- assert(/\[automated\]/.test(defaultPrompt()));
-
- // Restore whatever was there originally.
- if (!original)
- original = "";
- assert.commandWorked(db.adminCommand({setParameter: 1, automationServiceDescriptor: original}));
- db.getMongo().promptPrefix = originalPrompt;
+// Run isMaster, and if it contains an automation service descriptor, save it, so we can restore
+// it later. If it wasn't set, original will just be undefined.
+var res = assert.commandWorked(db.runCommand({isMaster: 1}));
+var original = res.automationServiceDescriptor;
+
+// Try to set the descriptor to an invalid value: only strings are supported.
+assert.commandFailedWithCode(db.adminCommand({setParameter: 1, automationServiceDescriptor: 0}),
+ ErrorCodes.TypeMismatch);
+
+// Try to set the descriptor to an invalid value: Only 64 characters are allowed.
+assert.commandFailedWithCode(db.adminCommand({
+ setParameter: 1,
+ automationServiceDescriptor: "1234567812345678123456781234567812345678123456781234567812345678X"
+}),
+ ErrorCodes.Overflow);
+
+// Short strings are okay.
+res = assert.commandWorked(
+ db.adminCommand({setParameter: 1, automationServiceDescriptor: "some_service"}));
+
+// Verify that the setParameter 'was' field contains what we expected.
+if (original)
+ assert.eq(original, res.was);
+
+// Verify that the 'some_service' string is now echoed back to us in isMaster
+res = assert.commandWorked(db.runCommand({isMaster: 1}));
+assert.eq(res.automationServiceDescriptor, "some_service");
+
+// Verify that setting the descriptor to the empty string is ok, and prevents it from being
+// echoed back
+assert.commandWorked(db.adminCommand({setParameter: 1, automationServiceDescriptor: ""}));
+res = assert.commandWorked(db.runCommand({isMaster: 1}));
+assert(!res.hasOwnProperty('automationServiceDescriptor'));
+
+// Verify that the shell has the correct prompt.
+var originalPrompt = db.getMongo().promptPrefix;
+assert.commandWorked(db.adminCommand({setParameter: 1, automationServiceDescriptor: "set"}));
+db.getMongo().promptPrefix = undefined;
+assert(/\[automated\]/.test(defaultPrompt()));
+
+// Restore whatever was there originally.
+if (!original)
+ original = "";
+assert.commandWorked(db.adminCommand({setParameter: 1, automationServiceDescriptor: original}));
+db.getMongo().promptPrefix = originalPrompt;
}());
diff --git a/jstests/core/awaitdata_getmore_cmd.js b/jstests/core/awaitdata_getmore_cmd.js
index 11e178f0136..eb1fb194a32 100644
--- a/jstests/core/awaitdata_getmore_cmd.js
+++ b/jstests/core/awaitdata_getmore_cmd.js
@@ -11,82 +11,95 @@
// ]
(function() {
- 'use strict';
-
- load("jstests/libs/fixture_helpers.js");
-
- var cmdRes;
- var cursorId;
- var defaultBatchSize = 101;
- var collName = 'await_data';
- var coll = db[collName];
-
- // Create a non-capped collection with 10 documents.
- coll.drop();
- for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({a: i}));
- }
-
- // Find with tailable flag set should fail for a non-capped collection.
- cmdRes = db.runCommand({find: collName, tailable: true});
- assert.commandFailed(cmdRes);
-
- // Should also fail in the non-capped case if both the tailable and awaitData flags are set.
- cmdRes = db.runCommand({find: collName, tailable: true, awaitData: true});
- assert.commandFailed(cmdRes);
-
- // With a non-existent collection, should succeed but return no data and a closed cursor.
- coll.drop();
- cmdRes = assert.commandWorked(db.runCommand({find: collName, tailable: true}));
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.firstBatch.length, 0);
-
- // Create a capped collection with 10 documents.
- assert.commandWorked(db.createCollection(collName, {capped: true, size: 2048}));
- for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({a: i}));
- }
-
- // GetMore should succeed if query has awaitData but no maxTimeMS is supplied.
- cmdRes = db.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 2);
- cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
-
- // Should also succeed if maxTimeMS is supplied on the original find.
- const sixtyMinutes = 60 * 60 * 1000;
- cmdRes = db.runCommand(
- {find: collName, batchSize: 2, awaitData: true, tailable: true, maxTimeMS: sixtyMinutes});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 2);
- cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
-
- // Check that we can set up a tailable cursor over the capped collection.
- cmdRes = db.runCommand({find: collName, batchSize: 5, awaitData: true, tailable: true});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 5);
-
- // Check that tailing the capped collection with awaitData eventually ends up returning an empty
- // batch after hitting the timeout.
- cmdRes = db.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 2);
-
- // Issue getMore until we get an empty batch of results.
+'use strict';
+
+load("jstests/libs/fixture_helpers.js");
+
+var cmdRes;
+var cursorId;
+var defaultBatchSize = 101;
+var collName = 'await_data';
+var coll = db[collName];
+
+// Create a non-capped collection with 10 documents.
+coll.drop();
+for (var i = 0; i < 10; i++) {
+ assert.writeOK(coll.insert({a: i}));
+}
+
+// Find with tailable flag set should fail for a non-capped collection.
+cmdRes = db.runCommand({find: collName, tailable: true});
+assert.commandFailed(cmdRes);
+
+// Should also fail in the non-capped case if both the tailable and awaitData flags are set.
+cmdRes = db.runCommand({find: collName, tailable: true, awaitData: true});
+assert.commandFailed(cmdRes);
+
+// With a non-existent collection, should succeed but return no data and a closed cursor.
+coll.drop();
+cmdRes = assert.commandWorked(db.runCommand({find: collName, tailable: true}));
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.firstBatch.length, 0);
+
+// Create a capped collection with 10 documents.
+assert.commandWorked(db.createCollection(collName, {capped: true, size: 2048}));
+for (var i = 0; i < 10; i++) {
+ assert.writeOK(coll.insert({a: i}));
+}
+
+// GetMore should succeed if query has awaitData but no maxTimeMS is supplied.
+cmdRes = db.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 2);
+cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+
+// Should also succeed if maxTimeMS is supplied on the original find.
+const sixtyMinutes = 60 * 60 * 1000;
+cmdRes = db.runCommand(
+ {find: collName, batchSize: 2, awaitData: true, tailable: true, maxTimeMS: sixtyMinutes});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 2);
+cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+
+// Check that we can set up a tailable cursor over the capped collection.
+cmdRes = db.runCommand({find: collName, batchSize: 5, awaitData: true, tailable: true});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 5);
+
+// Check that tailing the capped collection with awaitData eventually ends up returning an empty
+// batch after hitting the timeout.
+cmdRes = db.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 2);
+
+// Issue getMore until we get an empty batch of results.
+cmdRes = db.runCommand({
+ getMore: cmdRes.cursor.id,
+ collection: coll.getName(),
+ batchSize: NumberInt(2),
+ maxTimeMS: 4000
+});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+
+// Keep issuing getMore until we get an empty batch after the timeout expires.
+while (cmdRes.cursor.nextBatch.length > 0) {
+ var now = new Date();
cmdRes = db.runCommand({
getMore: cmdRes.cursor.id,
collection: coll.getName(),
@@ -96,111 +109,95 @@
assert.commandWorked(cmdRes);
assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, coll.getFullName());
+}
+assert.gte((new Date()) - now, 2000);
- // Keep issuing getMore until we get an empty batch after the timeout expires.
- while (cmdRes.cursor.nextBatch.length > 0) {
- var now = new Date();
- cmdRes = db.runCommand({
- getMore: cmdRes.cursor.id,
- collection: coll.getName(),
- batchSize: NumberInt(2),
- maxTimeMS: 4000
- });
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- }
- assert.gte((new Date()) - now, 2000);
+// Repeat the test, this time tailing the oplog rather than a user-created capped collection.
+// The oplog tailing in not possible on mongos.
+if (FixtureHelpers.isReplSet(db)) {
+ var localDB = db.getSiblingDB("local");
+ var oplogColl = localDB.oplog.rs;
- // Repeat the test, this time tailing the oplog rather than a user-created capped collection.
- // The oplog tailing in not possible on mongos.
- if (FixtureHelpers.isReplSet(db)) {
- var localDB = db.getSiblingDB("local");
- var oplogColl = localDB.oplog.rs;
+ cmdRes = localDB.runCommand(
+ {find: oplogColl.getName(), batchSize: 2, awaitData: true, tailable: true});
+ assert.commandWorked(cmdRes);
+ if (cmdRes.cursor.id > NumberLong(0)) {
+ assert.eq(cmdRes.cursor.ns, oplogColl.getFullName());
+ assert.eq(cmdRes.cursor.firstBatch.length, 2);
cmdRes = localDB.runCommand(
- {find: oplogColl.getName(), batchSize: 2, awaitData: true, tailable: true});
+ {getMore: cmdRes.cursor.id, collection: oplogColl.getName(), maxTimeMS: 1000});
assert.commandWorked(cmdRes);
- if (cmdRes.cursor.id > NumberLong(0)) {
- assert.eq(cmdRes.cursor.ns, oplogColl.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 2);
+ assert.gt(cmdRes.cursor.id, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, oplogColl.getFullName());
+ while (cmdRes.cursor.nextBatch.length > 0) {
+ now = new Date();
cmdRes = localDB.runCommand(
- {getMore: cmdRes.cursor.id, collection: oplogColl.getName(), maxTimeMS: 1000});
+ {getMore: cmdRes.cursor.id, collection: oplogColl.getName(), maxTimeMS: 4000});
assert.commandWorked(cmdRes);
assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, oplogColl.getFullName());
-
- while (cmdRes.cursor.nextBatch.length > 0) {
- now = new Date();
- cmdRes = localDB.runCommand(
- {getMore: cmdRes.cursor.id, collection: oplogColl.getName(), maxTimeMS: 4000});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, oplogColl.getFullName());
- }
- assert.gte((new Date()) - now, 2000);
}
+ assert.gte((new Date()) - now, 2000);
}
-
- // Test filtered inserts while writing to a capped collection.
- // Find with a filter which doesn't match any documents in the collection.
- cmdRes = assert.commandWorked(db.runCommand({
- find: collName,
- batchSize: 2,
- filter: {x: 1},
- awaitData: true,
- tailable: true,
- comment: "uniquifier_comment"
- }));
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 0);
-
- // Test that a getMore command on a tailable, awaitData cursor does not return a new batch to
- // the user if a document was inserted, but it did not match the filter.
- let insertshell = startParallelShell(() => {
- // Signal to the original shell that the parallel shell has successfully started.
- assert.writeOK(db.await_data.insert({_id: "signal parent shell"}));
-
- // Wait for the parent shell to start watching for the next document.
- assert.soon(() => db.currentOp({
- op: "getmore",
- "cursor.originatingCommand.comment": "uniquifier_comment"
- }).inprog.length == 1,
- () => tojson(db.currentOp().inprog));
-
- // Now write a non-matching document to the collection.
- assert.writeOK(db.await_data.insert({_id: "no match", x: 0}));
-
- // Make sure the getMore has not ended after a while.
- sleep(2000);
- assert.eq(
- db.currentOp({op: "getmore", "cursor.originatingCommand.comment": "uniquifier_comment"})
- .inprog.length,
- 1,
- tojson(db.currentOp().inprog));
-
- // Now write a matching document to wake it up.
- assert.writeOK(db.await_data.insert({_id: "match", x: 1}));
- });
-
- // Wait until we receive confirmation that the parallel shell has started.
- assert.soon(() => db.await_data.findOne({_id: "signal parent shell"}) !== null);
-
- // Now issue a getMore which will match the parallel shell's currentOp filter, signalling it to
- // write a non-matching document into the collection. Confirm that we do not receive this
- // document and that we subsequently time out.
- now = new Date();
- cmdRes = db.runCommand({
- getMore: cmdRes.cursor.id,
- collection: collName,
- maxTimeMS: ReplSetTest.kDefaultTimeoutMS
- });
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 1);
- assert.docEq(cmdRes.cursor.nextBatch[0], {_id: "match", x: 1});
- insertshell();
+}
+
+// Test filtered inserts while writing to a capped collection.
+// Find with a filter which doesn't match any documents in the collection.
+cmdRes = assert.commandWorked(db.runCommand({
+ find: collName,
+ batchSize: 2,
+ filter: {x: 1},
+ awaitData: true,
+ tailable: true,
+ comment: "uniquifier_comment"
+}));
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 0);
+
+// Test that a getMore command on a tailable, awaitData cursor does not return a new batch to
+// the user if a document was inserted, but it did not match the filter.
+let insertshell = startParallelShell(() => {
+ // Signal to the original shell that the parallel shell has successfully started.
+ assert.writeOK(db.await_data.insert({_id: "signal parent shell"}));
+
+ // Wait for the parent shell to start watching for the next document.
+ assert.soon(() => db.currentOp({
+ op: "getmore",
+ "cursor.originatingCommand.comment": "uniquifier_comment"
+ }).inprog.length == 1,
+ () => tojson(db.currentOp().inprog));
+
+ // Now write a non-matching document to the collection.
+ assert.writeOK(db.await_data.insert({_id: "no match", x: 0}));
+
+ // Make sure the getMore has not ended after a while.
+ sleep(2000);
+ assert.eq(
+ db.currentOp({op: "getmore", "cursor.originatingCommand.comment": "uniquifier_comment"})
+ .inprog.length,
+ 1,
+ tojson(db.currentOp().inprog));
+
+ // Now write a matching document to wake it up.
+ assert.writeOK(db.await_data.insert({_id: "match", x: 1}));
+});
+
+// Wait until we receive confirmation that the parallel shell has started.
+assert.soon(() => db.await_data.findOne({_id: "signal parent shell"}) !== null);
+
+// Now issue a getMore which will match the parallel shell's currentOp filter, signalling it to
+// write a non-matching document into the collection. Confirm that we do not receive this
+// document and that we subsequently time out.
+now = new Date();
+cmdRes = db.runCommand(
+ {getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: ReplSetTest.kDefaultTimeoutMS});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.nextBatch.length, 1);
+assert.docEq(cmdRes.cursor.nextBatch[0], {_id: "match", x: 1});
+insertshell();
})();
diff --git a/jstests/core/background_index_multikey.js b/jstests/core/background_index_multikey.js
index 0449beb513b..3db0c2d81f1 100644
--- a/jstests/core/background_index_multikey.js
+++ b/jstests/core/background_index_multikey.js
@@ -7,68 +7,68 @@
*/
(function() {
- "use strict";
- function testIndexBuilds(isBackground) {
- jsTestLog("Testing " + (isBackground ? "background" : "foreground") + " index builds");
- let coll = db["background_index_multikey_" + isBackground];
- coll.drop();
+"use strict";
+function testIndexBuilds(isBackground) {
+ jsTestLog("Testing " + (isBackground ? "background" : "foreground") + " index builds");
+ let coll = db["background_index_multikey_" + isBackground];
+ coll.drop();
- // Build index after multikey document is in the collection.
- let doc = {_id: 0, a: [1, 2]};
- assert.writeOK(coll.insert(doc));
- assert.commandWorked(coll.createIndex({a: 1}, {background: isBackground}));
- assert.eq(1, coll.count({a: 1}));
- assert.eq(doc, coll.findOne({a: 1}));
- assert.eq(1, coll.count({a: 2}));
- assert.eq(doc, coll.findOne({a: 2}));
+ // Build index after multikey document is in the collection.
+ let doc = {_id: 0, a: [1, 2]};
+ assert.writeOK(coll.insert(doc));
+ assert.commandWorked(coll.createIndex({a: 1}, {background: isBackground}));
+ assert.eq(1, coll.count({a: 1}));
+ assert.eq(doc, coll.findOne({a: 1}));
+ assert.eq(1, coll.count({a: 2}));
+ assert.eq(doc, coll.findOne({a: 2}));
- // Build index where multikey is in an embedded document.
- doc = {_id: 1, b: {c: [1, 2]}};
- assert.writeOK(coll.insert(doc));
- assert.commandWorked(coll.createIndex({'b.c': 1}, {background: isBackground}));
- assert.eq(1, coll.count({'b.c': 1}));
- assert.eq(doc, coll.findOne({'b.c': 1}));
- assert.eq(1, coll.count({'b.c': 2}));
- assert.eq(doc, coll.findOne({'b.c': 2}));
+ // Build index where multikey is in an embedded document.
+ doc = {_id: 1, b: {c: [1, 2]}};
+ assert.writeOK(coll.insert(doc));
+ assert.commandWorked(coll.createIndex({'b.c': 1}, {background: isBackground}));
+ assert.eq(1, coll.count({'b.c': 1}));
+ assert.eq(doc, coll.findOne({'b.c': 1}));
+ assert.eq(1, coll.count({'b.c': 2}));
+ assert.eq(doc, coll.findOne({'b.c': 2}));
- // Add new multikey path to embedded path.
- doc = {_id: 2, b: [1, 2]};
- assert.writeOK(coll.insert(doc));
- assert.eq(1, coll.count({b: 1}));
- assert.eq(doc, coll.findOne({b: 1}));
- assert.eq(1, coll.count({b: 2}));
- assert.eq(doc, coll.findOne({b: 2}));
+ // Add new multikey path to embedded path.
+ doc = {_id: 2, b: [1, 2]};
+ assert.writeOK(coll.insert(doc));
+ assert.eq(1, coll.count({b: 1}));
+ assert.eq(doc, coll.findOne({b: 1}));
+ assert.eq(1, coll.count({b: 2}));
+ assert.eq(doc, coll.findOne({b: 2}));
- // Build index on a large collection that is not multikey, and then make it multikey.
- for (let i = 100; i < 1100; i++) {
- assert.writeOK(coll.insert({_id: i, d: i}));
- }
- assert.commandWorked(coll.createIndex({d: 1}, {background: isBackground}));
- doc = {_id: 3, d: [1, 2]};
- assert.writeOK(coll.insert(doc));
- assert.eq(1, coll.count({d: 1}));
- assert.eq(doc, coll.findOne({d: 1}));
- assert.eq(1, coll.count({d: 2}));
- assert.eq(doc, coll.findOne({d: 2}));
+ // Build index on a large collection that is not multikey, and then make it multikey.
+ for (let i = 100; i < 1100; i++) {
+ assert.writeOK(coll.insert({_id: i, d: i}));
+ }
+ assert.commandWorked(coll.createIndex({d: 1}, {background: isBackground}));
+ doc = {_id: 3, d: [1, 2]};
+ assert.writeOK(coll.insert(doc));
+ assert.eq(1, coll.count({d: 1}));
+ assert.eq(doc, coll.findOne({d: 1}));
+ assert.eq(1, coll.count({d: 2}));
+ assert.eq(doc, coll.findOne({d: 2}));
- // Build compound multikey index.
- doc = {_id: 4, e: [1, 2]};
- assert.writeOK(coll.insert(doc));
- assert.commandWorked(coll.createIndex({'e': 1, 'f': 1}, {background: isBackground}));
- assert.eq(1, coll.count({e: 1}));
- assert.eq(doc, coll.findOne({e: 1}));
- assert.eq(1, coll.count({e: 2}));
- assert.eq(doc, coll.findOne({e: 2}));
+ // Build compound multikey index.
+ doc = {_id: 4, e: [1, 2]};
+ assert.writeOK(coll.insert(doc));
+ assert.commandWorked(coll.createIndex({'e': 1, 'f': 1}, {background: isBackground}));
+ assert.eq(1, coll.count({e: 1}));
+ assert.eq(doc, coll.findOne({e: 1}));
+ assert.eq(1, coll.count({e: 2}));
+ assert.eq(doc, coll.findOne({e: 2}));
- // Add new multikey path to compound index.
- doc = {_id: 5, f: [1, 2]};
- assert.writeOK(coll.insert(doc));
- assert.eq(1, coll.count({f: 1}));
- assert.eq(doc, coll.findOne({f: 1}));
- assert.eq(1, coll.count({f: 2}));
- assert.eq(doc, coll.findOne({f: 2}));
- }
+ // Add new multikey path to compound index.
+ doc = {_id: 5, f: [1, 2]};
+ assert.writeOK(coll.insert(doc));
+ assert.eq(1, coll.count({f: 1}));
+ assert.eq(doc, coll.findOne({f: 1}));
+ assert.eq(1, coll.count({f: 2}));
+ assert.eq(doc, coll.findOne({f: 2}));
+}
- testIndexBuilds(false);
- testIndexBuilds(true);
+testIndexBuilds(false);
+testIndexBuilds(true);
})();
diff --git a/jstests/core/background_unique_indexes.js b/jstests/core/background_unique_indexes.js
index 6d8b02d5199..692aaa0f58f 100644
--- a/jstests/core/background_unique_indexes.js
+++ b/jstests/core/background_unique_indexes.js
@@ -14,57 +14,57 @@
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "background_unique_indexes";
- const collName = "test";
+const dbName = "background_unique_indexes";
+const collName = "test";
- let testDB = db.getSiblingDB(dbName);
+let testDB = db.getSiblingDB(dbName);
- // Setup collection.
- testDB[collName].drop();
- assert.commandWorked(testDB.runCommand({create: collName}));
+// Setup collection.
+testDB[collName].drop();
+assert.commandWorked(testDB.runCommand({create: collName}));
- // Each iteration increments and decrements a uniquely indexed value, 'x' while creating and
- // dropping an index. The goal is that an index build on a secondary might find a case where the
- // unique index constraint is temporarily violated, and an index on x maps to two different
- // records.
- const nOps = 1000;
- const nIterations = 15;
+// Each iteration increments and decrements a uniquely indexed value, 'x' while creating and
+// dropping an index. The goal is that an index build on a secondary might find a case where the
+// unique index constraint is temporarily violated, and an index on x maps to two different
+// records.
+const nOps = 1000;
+const nIterations = 15;
- // Write the initial documents.
- let bulk = testDB[collName].initializeUnorderedBulkOp();
+// Write the initial documents.
+let bulk = testDB[collName].initializeUnorderedBulkOp();
+for (let i = 0; i < nOps; i++) {
+ bulk.insert({_id: i, x: i, iter: 0});
+}
+assert.commandWorked(bulk.execute());
+
+// Cycle the value of x in the document {_id: i, x: i} between i and i+1 each iteration.
+for (let iteration = 0; iteration < nIterations; iteration++) {
+ // Reset each document.
+ let updates = [];
for (let i = 0; i < nOps; i++) {
- bulk.insert({_id: i, x: i, iter: 0});
+ updates[i] = {q: {_id: i}, u: {x: i, iter: iteration}};
}
- assert.commandWorked(bulk.execute());
-
- // Cycle the value of x in the document {_id: i, x: i} between i and i+1 each iteration.
- for (let iteration = 0; iteration < nIterations; iteration++) {
- // Reset each document.
- let updates = [];
- for (let i = 0; i < nOps; i++) {
- updates[i] = {q: {_id: i}, u: {x: i, iter: iteration}};
- }
-
- assert.commandWorked(testDB.runCommand({update: collName, updates: updates}));
- // Create a background unique index on the collection.
- assert.commandWorked(testDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {x: 1}, name: "x_1", background: true, unique: true}]
- }));
+ assert.commandWorked(testDB.runCommand({update: collName, updates: updates}));
- // Generate updates that increment x on each document backwards by _id to avoid conficts
- // when applied in-order.
- updates = [];
- for (let i = 0; i < nOps; i++) {
- // Apply each operation in reverse order.
- updates[nOps - i - 1] = {q: {_id: i}, u: {$inc: {x: 1}}};
- }
- assert.commandWorked(testDB.runCommand({update: collName, updates: updates}));
+ // Create a background unique index on the collection.
+ assert.commandWorked(testDB.runCommand({
+ createIndexes: collName,
+ indexes: [{key: {x: 1}, name: "x_1", background: true, unique: true}]
+ }));
- assert.commandWorked(testDB.runCommand({dropIndexes: collName, index: "x_1"}));
- print("iteration " + iteration);
+ // Generate updates that increment x on each document backwards by _id to avoid conficts
+ // when applied in-order.
+ updates = [];
+ for (let i = 0; i < nOps; i++) {
+ // Apply each operation in reverse order.
+ updates[nOps - i - 1] = {q: {_id: i}, u: {$inc: {x: 1}}};
}
+ assert.commandWorked(testDB.runCommand({update: collName, updates: updates}));
+
+ assert.commandWorked(testDB.runCommand({dropIndexes: collName, index: "x_1"}));
+ print("iteration " + iteration);
+}
})();
diff --git a/jstests/core/batch_size.js b/jstests/core/batch_size.js
index b280b8ceb77..ac7e1177c41 100644
--- a/jstests/core/batch_size.js
+++ b/jstests/core/batch_size.js
@@ -3,127 +3,124 @@
// Test subtleties of batchSize and limit.
(function() {
- "use strict";
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- var t = db.jstests_batch_size;
- t.drop();
-
- for (var i = 0; i < 4; i++) {
- t.save({_id: i, a: i});
- }
-
- function runIndexedTests() {
- // With limit, indexed.
- assert.eq(2, t.find().limit(2).itcount());
- assert.eq(2, t.find().sort({a: 1}).limit(2).itcount());
-
- // With batchSize, indexed.
- // SERVER-12438: If there is an index that provides the sort, then a plan with an unindexed
- // sort should never be used. Consequently, batchSize will NOT be a hard limit in this
- // case. WARNING: the behavior described above may change in the future.
- assert.eq(4, t.find().batchSize(2).itcount());
- assert.eq(4, t.find().sort({a: 1}).batchSize(2).itcount());
- }
-
- // Without batch size or limit, unindexed.
- assert.eq(4, t.find().itcount());
- assert.eq(4, t.find().sort({a: 1}).itcount());
-
- // With limit, unindexed.
+"use strict";
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+var t = db.jstests_batch_size;
+t.drop();
+
+for (var i = 0; i < 4; i++) {
+ t.save({_id: i, a: i});
+}
+
+function runIndexedTests() {
+ // With limit, indexed.
assert.eq(2, t.find().limit(2).itcount());
assert.eq(2, t.find().sort({a: 1}).limit(2).itcount());
+ // With batchSize, indexed.
+ // SERVER-12438: If there is an index that provides the sort, then a plan with an unindexed
+ // sort should never be used. Consequently, batchSize will NOT be a hard limit in this
+ // case. WARNING: the behavior described above may change in the future.
assert.eq(4, t.find().batchSize(2).itcount());
assert.eq(4, t.find().sort({a: 1}).batchSize(2).itcount());
-
- // With negative batchSize. A negative batchSize value instructs the server
- // to return just a single batch of results.
- assert.eq(1, t.find().batchSize(-1).itcount());
- assert.eq(2, t.find().batchSize(-2).itcount());
-
- // Run the tests with the index twice in order to double check plan caching.
- t.ensureIndex({a: 1});
- for (var i = 0; i < 2; i++) {
- runIndexedTests();
- }
-
- // The next tests make sure that we obey limit and batchSize properly when the sort could be
- // either indexed or unindexed.
- t.drop();
- t.ensureIndex({a: 1});
- t.ensureIndex({b: 1});
-
- for (var i = 0; i < 100; i++) {
- t.save({_id: i, a: i, b: 1});
- }
-
- // Without a hint. Do it twice to make sure caching is ok.
- for (var i = 0; i < 2; i++) {
- assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).batchSize(2).itcount());
- assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).limit(6).itcount());
- }
-
- // Hinting 'a'.
- assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).hint({a: 1}).batchSize(2).itcount());
- assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).hint({a: 1}).limit(6).itcount());
-
- // Hinting 'b'.
- assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).hint({b: 1}).batchSize(2).itcount());
- assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).hint({b: 1}).limit(6).itcount());
-
- // With explain.
- var explain = t.find({a: {$gte: 85}}).sort({b: 1}).batchSize(2).explain("executionStats");
- assert.eq(15, explain.executionStats.nReturned);
- explain = t.find({a: {$gte: 85}}).sort({b: 1}).limit(6).explain("executionStats");
- if (FixtureHelpers.isMongos(db)) {
- // If we're talking to a mongos, we expect at most one batch from each shard.
- assert.eq(FixtureHelpers.numberOfShardsForCollection(t) * 6,
- explain.executionStats.nReturned);
- } else {
- assert.eq(6, explain.executionStats.nReturned);
- }
-
- // Double check that we're not scanning more stuff than we have to. In order to get the sort
- // using index 'a', we should need to scan about 50 keys and 50 documents.
- var explain =
- t.find({a: {$gte: 50}}).sort({b: 1}).hint({a: 1}).limit(6).explain("executionStats");
- assert.lte(explain.executionStats.totalKeysExamined, 60);
- assert.lte(explain.executionStats.totalDocsExamined, 60);
- if (FixtureHelpers.isMongos(db)) {
- // If we're talking to a mongos, we expect at most one batch from each shard.
- assert.eq(FixtureHelpers.numberOfShardsForCollection(t) * 6,
- explain.executionStats.nReturned);
- } else {
- assert.eq(6, explain.executionStats.nReturned);
- }
-
- // -------
-
- // During plan ranking, we treat ntoreturn as a limit. This prevents us from buffering too much
- // data in a blocking sort stage during plan ranking.
- t.drop();
-
- // Generate big string to use in the object - 1MB+ String
- var bigStr = "ABCDEFGHIJKLMNBOPQRSTUVWXYZ012345687890";
- while (bigStr.length < 1000000) {
- bigStr = bigStr + "::" + bigStr;
- }
-
- // Insert enough documents to exceed the 32 MB in-memory sort limit.
- const nDocs = 40 * FixtureHelpers.numberOfShardsForCollection(t);
- for (var i = 0; i < nDocs; i++) {
- var doc = {x: 1, y: 1, z: i, big: bigStr};
- t.insert(doc);
- }
-
- // Two indices needed in order to trigger plan ranking. Neither index provides the sort order.
- t.ensureIndex({x: 1});
- t.ensureIndex({y: 1});
-
- // We should only buffer 3 docs in memory.
- var cursor = t.find({x: 1, y: 1}).sort({z: -1}).limit(3);
- assert.eq(nDocs - 1, cursor.next().z);
- assert.eq(nDocs - 2, cursor.next().z);
- assert.eq(nDocs - 3, cursor.next().z);
- assert(!cursor.hasNext());
+}
+
+// Without batch size or limit, unindexed.
+assert.eq(4, t.find().itcount());
+assert.eq(4, t.find().sort({a: 1}).itcount());
+
+// With limit, unindexed.
+assert.eq(2, t.find().limit(2).itcount());
+assert.eq(2, t.find().sort({a: 1}).limit(2).itcount());
+
+assert.eq(4, t.find().batchSize(2).itcount());
+assert.eq(4, t.find().sort({a: 1}).batchSize(2).itcount());
+
+// With negative batchSize. A negative batchSize value instructs the server
+// to return just a single batch of results.
+assert.eq(1, t.find().batchSize(-1).itcount());
+assert.eq(2, t.find().batchSize(-2).itcount());
+
+// Run the tests with the index twice in order to double check plan caching.
+t.ensureIndex({a: 1});
+for (var i = 0; i < 2; i++) {
+ runIndexedTests();
+}
+
+// The next tests make sure that we obey limit and batchSize properly when the sort could be
+// either indexed or unindexed.
+t.drop();
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
+
+for (var i = 0; i < 100; i++) {
+ t.save({_id: i, a: i, b: 1});
+}
+
+// Without a hint. Do it twice to make sure caching is ok.
+for (var i = 0; i < 2; i++) {
+ assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).batchSize(2).itcount());
+ assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).limit(6).itcount());
+}
+
+// Hinting 'a'.
+assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).hint({a: 1}).batchSize(2).itcount());
+assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).hint({a: 1}).limit(6).itcount());
+
+// Hinting 'b'.
+assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).hint({b: 1}).batchSize(2).itcount());
+assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).hint({b: 1}).limit(6).itcount());
+
+// With explain.
+var explain = t.find({a: {$gte: 85}}).sort({b: 1}).batchSize(2).explain("executionStats");
+assert.eq(15, explain.executionStats.nReturned);
+explain = t.find({a: {$gte: 85}}).sort({b: 1}).limit(6).explain("executionStats");
+if (FixtureHelpers.isMongos(db)) {
+ // If we're talking to a mongos, we expect at most one batch from each shard.
+ assert.eq(FixtureHelpers.numberOfShardsForCollection(t) * 6, explain.executionStats.nReturned);
+} else {
+ assert.eq(6, explain.executionStats.nReturned);
+}
+
+// Double check that we're not scanning more stuff than we have to. In order to get the sort
+// using index 'a', we should need to scan about 50 keys and 50 documents.
+var explain = t.find({a: {$gte: 50}}).sort({b: 1}).hint({a: 1}).limit(6).explain("executionStats");
+assert.lte(explain.executionStats.totalKeysExamined, 60);
+assert.lte(explain.executionStats.totalDocsExamined, 60);
+if (FixtureHelpers.isMongos(db)) {
+ // If we're talking to a mongos, we expect at most one batch from each shard.
+ assert.eq(FixtureHelpers.numberOfShardsForCollection(t) * 6, explain.executionStats.nReturned);
+} else {
+ assert.eq(6, explain.executionStats.nReturned);
+}
+
+// -------
+
+// During plan ranking, we treat ntoreturn as a limit. This prevents us from buffering too much
+// data in a blocking sort stage during plan ranking.
+t.drop();
+
+// Generate big string to use in the object - 1MB+ String
+var bigStr = "ABCDEFGHIJKLMNBOPQRSTUVWXYZ012345687890";
+while (bigStr.length < 1000000) {
+ bigStr = bigStr + "::" + bigStr;
+}
+
+// Insert enough documents to exceed the 32 MB in-memory sort limit.
+const nDocs = 40 * FixtureHelpers.numberOfShardsForCollection(t);
+for (var i = 0; i < nDocs; i++) {
+ var doc = {x: 1, y: 1, z: i, big: bigStr};
+ t.insert(doc);
+}
+
+// Two indices needed in order to trigger plan ranking. Neither index provides the sort order.
+t.ensureIndex({x: 1});
+t.ensureIndex({y: 1});
+
+// We should only buffer 3 docs in memory.
+var cursor = t.find({x: 1, y: 1}).sort({z: -1}).limit(3);
+assert.eq(nDocs - 1, cursor.next().z);
+assert.eq(nDocs - 2, cursor.next().z);
+assert.eq(nDocs - 3, cursor.next().z);
+assert(!cursor.hasNext());
}());
diff --git a/jstests/core/batch_write_collation_estsize.js b/jstests/core/batch_write_collation_estsize.js
index 819060ec37e..d0e4254d6b2 100644
--- a/jstests/core/batch_write_collation_estsize.js
+++ b/jstests/core/batch_write_collation_estsize.js
@@ -7,186 +7,178 @@
// specification in the write operation document.
(function() {
- "use strict";
-
- // Setup the test collection.
- db.batch_write_collation_estsize.drop();
- assert.writeOK(db.batch_write_collation_estsize.insert({str: "FOO"}));
-
- if (db.getMongo().writeMode() !== "commands") {
- // Cannot use the bulk API to set a collation when using legacy write ops.
- let bulk;
-
- // Test updateOne unordered bulk write operation with collation specification.
- bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp();
- assert.throws(() => {
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).updateOne({
- str: "BAR"
- });
- });
-
- // Test update unordered bulk write operation with collation specification.
- bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp();
- assert.throws(() => {
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).update({str: "BAR"});
- });
-
- // Test replaceOne unordered bulk write operation with collation specification.
- bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp();
- assert.throws(() => {
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).replaceOne({
- str: "BAR"
- });
- });
-
- // Test removeOne unordered bulk write operation with collation specification.
- bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp();
- assert.throws(() => {
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).removeOne();
- });
-
- // Test remove unordered bulk write operation with collation specification.
- bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp();
- assert.throws(() => {
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).remove();
- });
-
- // Test updateOne ordered bulk write operation with collation specification.
- bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp();
- assert.throws(() => {
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).updateOne({
- str: "BAR"
- });
- });
-
- // Test update ordered bulk write operation with collation specification.
- bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp();
- assert.throws(() => {
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).update({str: "BAR"});
- });
-
- // Test replaceOne ordered bulk write operation with collation specification.
- bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp();
- assert.throws(() => {
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).replaceOne({
- str: "BAR"
- });
- });
-
- // Test removeOne ordered bulk write operation with collation specification.
- bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp();
- assert.throws(() => {
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).removeOne();
- });
-
- // Test remove ordered bulk write operation with collation specification.
- bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp();
- assert.throws(() => {
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).remove();
- });
- } else {
- // Setup the bulk write response variable.
- let res;
-
- // Test updateOne bulk write operation with collation specification.
- res = db.batch_write_collation_estsize.bulkWrite([{
- updateOne: {
- filter: {str: "FOO"},
- update: {$set: {str: "BAR"}},
- collation: {
- locale: "en_US",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: false
- }
+"use strict";
+
+// Setup the test collection.
+db.batch_write_collation_estsize.drop();
+assert.writeOK(db.batch_write_collation_estsize.insert({str: "FOO"}));
+
+if (db.getMongo().writeMode() !== "commands") {
+ // Cannot use the bulk API to set a collation when using legacy write ops.
+ let bulk;
+
+ // Test updateOne unordered bulk write operation with collation specification.
+ bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp();
+ assert.throws(() => {
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).updateOne({str: "BAR"});
+ });
+
+ // Test update unordered bulk write operation with collation specification.
+ bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp();
+ assert.throws(() => {
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).update({str: "BAR"});
+ });
+
+ // Test replaceOne unordered bulk write operation with collation specification.
+ bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp();
+ assert.throws(() => {
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).replaceOne({str: "BAR"});
+ });
+
+ // Test removeOne unordered bulk write operation with collation specification.
+ bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp();
+ assert.throws(() => {
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).removeOne();
+ });
+
+ // Test remove unordered bulk write operation with collation specification.
+ bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp();
+ assert.throws(() => {
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).remove();
+ });
+
+ // Test updateOne ordered bulk write operation with collation specification.
+ bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp();
+ assert.throws(() => {
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).updateOne({str: "BAR"});
+ });
+
+ // Test update ordered bulk write operation with collation specification.
+ bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp();
+ assert.throws(() => {
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).update({str: "BAR"});
+ });
+
+ // Test replaceOne ordered bulk write operation with collation specification.
+ bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp();
+ assert.throws(() => {
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).replaceOne({str: "BAR"});
+ });
+
+ // Test removeOne ordered bulk write operation with collation specification.
+ bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp();
+ assert.throws(() => {
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).removeOne();
+ });
+
+ // Test remove ordered bulk write operation with collation specification.
+ bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp();
+ assert.throws(() => {
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).remove();
+ });
+} else {
+ // Setup the bulk write response variable.
+ let res;
+
+ // Test updateOne bulk write operation with collation specification.
+ res = db.batch_write_collation_estsize.bulkWrite([{
+ updateOne: {
+ filter: {str: "FOO"},
+ update: {$set: {str: "BAR"}},
+ collation: {
+ locale: "en_US",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: false
}
- }]);
- assert.eq(1, res.matchedCount);
-
- // Test updateMany bulk write operation with collation specification.
- res = db.batch_write_collation_estsize.bulkWrite([{
- updateMany: {
- filter: {str: "BAR"},
- update: {$set: {str: "FOO"}},
- collation: {
- locale: "en_US",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: false
- }
+ }
+ }]);
+ assert.eq(1, res.matchedCount);
+
+ // Test updateMany bulk write operation with collation specification.
+ res = db.batch_write_collation_estsize.bulkWrite([{
+ updateMany: {
+ filter: {str: "BAR"},
+ update: {$set: {str: "FOO"}},
+ collation: {
+ locale: "en_US",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: false
}
- }]);
- assert.eq(1, res.matchedCount);
-
- // Test replaceOne bulk write operation with collation specification.
- res = db.batch_write_collation_estsize.bulkWrite([{
- replaceOne: {
- filter: {str: "FOO"},
- replacement: {str: "BAR"},
- collation: {
- locale: "en_US",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: false
- }
+ }
+ }]);
+ assert.eq(1, res.matchedCount);
+
+ // Test replaceOne bulk write operation with collation specification.
+ res = db.batch_write_collation_estsize.bulkWrite([{
+ replaceOne: {
+ filter: {str: "FOO"},
+ replacement: {str: "BAR"},
+ collation: {
+ locale: "en_US",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: false
}
- }]);
- assert.eq(1, res.matchedCount);
-
- // Test deleteMany bulk write operation with collation specification.
- res = db.batch_write_collation_estsize.bulkWrite([{
- deleteOne: {
- filter: {str: "BAR"},
- collation: {
- locale: "en_US",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: false
- }
+ }
+ }]);
+ assert.eq(1, res.matchedCount);
+
+ // Test deleteMany bulk write operation with collation specification.
+ res = db.batch_write_collation_estsize.bulkWrite([{
+ deleteOne: {
+ filter: {str: "BAR"},
+ collation: {
+ locale: "en_US",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: false
}
- }]);
- assert.eq(1, res.deletedCount);
-
- // Reinsert a document to test deleteMany bulk write operation.
- assert.writeOK(db.batch_write_collation_estsize.insert({str: "FOO"}));
-
- // Test deleteMany bulk write operation with collation specification.
- res = db.batch_write_collation_estsize.bulkWrite([{
- deleteMany: {
- filter: {str: "FOO"},
- collation: {
- locale: "en_US",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: false
- }
+ }
+ }]);
+ assert.eq(1, res.deletedCount);
+
+ // Reinsert a document to test deleteMany bulk write operation.
+ assert.writeOK(db.batch_write_collation_estsize.insert({str: "FOO"}));
+
+ // Test deleteMany bulk write operation with collation specification.
+ res = db.batch_write_collation_estsize.bulkWrite([{
+ deleteMany: {
+ filter: {str: "FOO"},
+ collation: {
+ locale: "en_US",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: false
}
- }]);
- assert.eq(1, res.deletedCount);
- }
+ }
+ }]);
+ assert.eq(1, res.deletedCount);
+}
})();
diff --git a/jstests/core/batch_write_command_delete.js b/jstests/core/batch_write_command_delete.js
index 48234dbeef5..4004a519412 100644
--- a/jstests/core/batch_write_command_delete.js
+++ b/jstests/core/batch_write_command_delete.js
@@ -30,7 +30,7 @@ function resultOK(result) {
}
function resultNOK(result) {
- return !result.ok && typeof(result.code) == 'number' && typeof(result.errmsg) == 'string';
+ return !result.ok && typeof (result.code) == 'number' && typeof (result.errmsg) == 'string';
}
function countEventually(collection, n) {
diff --git a/jstests/core/batch_write_command_insert.js b/jstests/core/batch_write_command_insert.js
index dcbe065bf19..0480e63f04a 100644
--- a/jstests/core/batch_write_command_insert.js
+++ b/jstests/core/batch_write_command_insert.js
@@ -33,7 +33,7 @@ function resultOK(result) {
}
function resultNOK(result) {
- return !result.ok && typeof(result.code) == 'number' && typeof(result.errmsg) == 'string';
+ return !result.ok && typeof (result.code) == 'number' && typeof (result.errmsg) == 'string';
}
function countEventually(collection, n) {
diff --git a/jstests/core/batch_write_command_update.js b/jstests/core/batch_write_command_update.js
index df1a0ade62a..3d879e0064d 100644
--- a/jstests/core/batch_write_command_update.js
+++ b/jstests/core/batch_write_command_update.js
@@ -29,7 +29,7 @@ function resultOK(result) {
}
function resultNOK(result) {
- return !result.ok && typeof(result.code) == 'number' && typeof(result.errmsg) == 'string';
+ return !result.ok && typeof (result.code) == 'number' && typeof (result.errmsg) == 'string';
}
function countEventually(collection, n) {
diff --git a/jstests/core/bench_test1.js b/jstests/core/bench_test1.js
index 8e316c8b25e..2dd6e36c82a 100644
--- a/jstests/core/bench_test1.js
+++ b/jstests/core/bench_test1.js
@@ -5,40 +5,45 @@
// uses_multiple_connections,
// ]
(function() {
- "use strict";
-
- const t = db.bench_test1;
- t.drop();
-
- t.insert({_id: 1, x: 1});
- t.insert({_id: 2, x: 1});
-
- const ops = [
- {op: "findOne", ns: t.getFullName(), query: {_id: 1}},
- {op: "update", ns: t.getFullName(), query: {_id: 1}, update: {$inc: {x: 1}}}
- ];
-
- const seconds = 10;
-
- const benchArgs = {ops: ops, parallel: 2, seconds: seconds, host: db.getMongo().host};
-
- if (jsTest.options().auth) {
- benchArgs['db'] = 'admin';
- benchArgs['username'] = jsTest.options().authUser;
- benchArgs['password'] = jsTest.options().authPassword;
- }
- const res = benchRun(benchArgs);
-
- assert.lte(seconds * res.update, t.findOne({_id: 1}).x * 1.5, "A1");
-
- assert.eq(1, t.getIndexes().length, "B1");
- benchArgs['ops'] = [{op: "createIndex", ns: t.getFullName(), key: {x: 1}}];
- benchArgs['parallel'] = 1;
- benchRun(benchArgs);
- assert.eq(2, t.getIndexes().length, "B2");
- benchArgs['ops'] = [{op: "dropIndex", ns: t.getFullName(), key: {x: 1}}];
- benchRun(benchArgs);
- assert.soon(function() {
- return t.getIndexes().length == 1;
- });
+"use strict";
+
+const t = db.bench_test1;
+t.drop();
+
+t.insert({_id: 1, x: 1});
+t.insert({_id: 2, x: 1});
+
+const ops = [
+ {op: "findOne", ns: t.getFullName(), query: {_id: 1}},
+ {op: "update", ns: t.getFullName(), query: {_id: 1}, update: {$inc: {x: 1}}}
+];
+
+const seconds = 10;
+
+const benchArgs = {
+ ops: ops,
+ parallel: 2,
+ seconds: seconds,
+ host: db.getMongo().host
+};
+
+if (jsTest.options().auth) {
+ benchArgs['db'] = 'admin';
+ benchArgs['username'] = jsTest.options().authUser;
+ benchArgs['password'] = jsTest.options().authPassword;
+}
+const res = benchRun(benchArgs);
+
+assert.lte(seconds * res.update, t.findOne({_id: 1}).x * 1.5, "A1");
+
+assert.eq(1, t.getIndexes().length, "B1");
+benchArgs['ops'] = [{op: "createIndex", ns: t.getFullName(), key: {x: 1}}];
+benchArgs['parallel'] = 1;
+benchRun(benchArgs);
+assert.eq(2, t.getIndexes().length, "B2");
+benchArgs['ops'] = [{op: "dropIndex", ns: t.getFullName(), key: {x: 1}}];
+benchRun(benchArgs);
+assert.soon(function() {
+ return t.getIndexes().length == 1;
+});
}());
diff --git a/jstests/core/benchrun_pipeline_updates.js b/jstests/core/benchrun_pipeline_updates.js
index 06647c83f06..bf14e51e5ad 100644
--- a/jstests/core/benchrun_pipeline_updates.js
+++ b/jstests/core/benchrun_pipeline_updates.js
@@ -4,51 +4,51 @@
* @tags: [uses_multiple_connections]
*/
(function() {
- "use strict";
- const coll = db.benchrun_pipeline_updates;
- coll.drop();
+"use strict";
+const coll = db.benchrun_pipeline_updates;
+coll.drop();
- assert.commandWorked(coll.insert({_id: 0, x: 0}));
+assert.commandWorked(coll.insert({_id: 0, x: 0}));
- // Test that a basic pipeline can be used by an update op.
- let benchArgs = {
- ops: [
- {
- op: "update",
- ns: coll.getFullName(),
- query: {_id: 0},
- writeCmd: true,
- update: [{$set: {x: {$add: ["$x", 1]}}}]
- },
- ],
- parallel: 2,
- seconds: 1,
- host: db.getMongo().host,
- };
- if (jsTest.options().auth) {
- benchArgs['db'] = 'admin';
- benchArgs['username'] = jsTest.options().authUser;
- benchArgs['password'] = jsTest.options().authPassword;
- }
- let res = benchRun(benchArgs);
- assert.eq(res.errCount, 0);
- assert.gte(
- coll.findOne({_id: 0}).x, 2, "Expected at least one update to succeed and increment 'x'");
+// Test that a basic pipeline can be used by an update op.
+let benchArgs = {
+ ops: [
+ {
+ op: "update",
+ ns: coll.getFullName(),
+ query: {_id: 0},
+ writeCmd: true,
+ update: [{$set: {x: {$add: ["$x", 1]}}}]
+ },
+ ],
+ parallel: 2,
+ seconds: 1,
+ host: db.getMongo().host,
+};
+if (jsTest.options().auth) {
+ benchArgs['db'] = 'admin';
+ benchArgs['username'] = jsTest.options().authUser;
+ benchArgs['password'] = jsTest.options().authPassword;
+}
+let res = benchRun(benchArgs);
+assert.eq(res.errCount, 0);
+assert.gte(
+ coll.findOne({_id: 0}).x, 2, "Expected at least one update to succeed and increment 'x'");
- // Now test that the pipeline is still subject to benchRun's keyword replacement.
+// Now test that the pipeline is still subject to benchRun's keyword replacement.
- // Initialize x to something outside the range we'll expect it to be in below if the updates
- // succeed.
- assert.commandWorked(coll.updateOne({_id: 0}, {$set: {x: 100}}));
- benchArgs.ops = [{
- op: "update",
- ns: coll.getFullName(),
- query: {_id: 0},
- writeCmd: true,
- update: [{$project: {x: {$literal: {"#RAND_INT_PLUS_THREAD": [0, 2]}}}}]
- }];
- res = benchRun(benchArgs);
- assert.eq(res.errCount, 0);
- assert.lte(
- coll.findOne({_id: 0}).x, 3, "Expected 'x' to be no more than 3 after randInt replacement");
+// Initialize x to something outside the range we'll expect it to be in below if the updates
+// succeed.
+assert.commandWorked(coll.updateOne({_id: 0}, {$set: {x: 100}}));
+benchArgs.ops = [{
+ op: "update",
+ ns: coll.getFullName(),
+ query: {_id: 0},
+ writeCmd: true,
+ update: [{$project: {x: {$literal: {"#RAND_INT_PLUS_THREAD": [0, 2]}}}}]
+}];
+res = benchRun(benchArgs);
+assert.eq(res.errCount, 0);
+assert.lte(
+ coll.findOne({_id: 0}).x, 3, "Expected 'x' to be no more than 3 after randInt replacement");
}());
diff --git a/jstests/core/bindata_indexonly.js b/jstests/core/bindata_indexonly.js
index 4eb24476010..f215a17e882 100644
--- a/jstests/core/bindata_indexonly.js
+++ b/jstests/core/bindata_indexonly.js
@@ -4,75 +4,71 @@
* 2) Can perform index-only data access.
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/analyze_plan.js");
+load("jstests/libs/analyze_plan.js");
- var coll = db.jstests_bindata_indexonly;
+var coll = db.jstests_bindata_indexonly;
- coll.drop();
- assert.writeOK(coll.insert({_id: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA"), a: 1}));
- assert.writeOK(coll.insert({_id: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhv"), a: 2}));
- assert.writeOK(coll.insert({_id: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhz"), a: 3}));
- assert.writeOK(coll.insert({_id: BinData(0, "////////////////////////////"), a: 4}));
- assert.commandWorked(coll.createIndex({_id: 1, a: 1}));
+coll.drop();
+assert.writeOK(coll.insert({_id: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA"), a: 1}));
+assert.writeOK(coll.insert({_id: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhv"), a: 2}));
+assert.writeOK(coll.insert({_id: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhz"), a: 3}));
+assert.writeOK(coll.insert({_id: BinData(0, "////////////////////////////"), a: 4}));
+assert.commandWorked(coll.createIndex({_id: 1, a: 1}));
- assert.throws(function() {
- db.mycoll.insert({_id: 0, a: BinData.prototype});
- }, [], "bindata getter did not fail");
+assert.throws(function() {
+ db.mycoll.insert({_id: 0, a: BinData.prototype});
+}, [], "bindata getter did not fail");
- function testIndexOnlyBinData(blob) {
- var explain =
- coll.find({$and: [{_id: {$lte: BinData(0, blob)}}, {_id: {$gte: BinData(0, blob)}}]},
- {_id: 1, a: 1})
- .hint({_id: 1, a: 1})
- .explain("executionStats");
+function testIndexOnlyBinData(blob) {
+ var explain =
+ coll.find({$and: [{_id: {$lte: BinData(0, blob)}}, {_id: {$gte: BinData(0, blob)}}]},
+ {_id: 1, a: 1})
+ .hint({_id: 1, a: 1})
+ .explain("executionStats");
- assert(isIndexOnly(db, explain.queryPlanner.winningPlan),
- "indexonly.BinData(0, " + blob + ") - must be index-only");
- assert.eq(1,
- explain.executionStats.nReturned,
- "EXACTone.BinData(0, " + blob + ") - should only return one in unique set");
- }
+ assert(isIndexOnly(db, explain.queryPlanner.winningPlan),
+ "indexonly.BinData(0, " + blob + ") - must be index-only");
+ assert.eq(1,
+ explain.executionStats.nReturned,
+ "EXACTone.BinData(0, " + blob + ") - should only return one in unique set");
+}
- testIndexOnlyBinData("AAAAAAAAAAAAAAAAAAAAAAAAAAAA");
- testIndexOnlyBinData("AQAAAAEBAAVlbl9VSwAAAAAAAAhv");
- testIndexOnlyBinData("AQAAAAEBAAVlbl9VSwAAAAAAAAhz");
- testIndexOnlyBinData("////////////////////////////");
+testIndexOnlyBinData("AAAAAAAAAAAAAAAAAAAAAAAAAAAA");
+testIndexOnlyBinData("AQAAAAEBAAVlbl9VSwAAAAAAAAhv");
+testIndexOnlyBinData("AQAAAAEBAAVlbl9VSwAAAAAAAAhz");
+testIndexOnlyBinData("////////////////////////////");
- var explain;
+var explain;
- explain = coll.find({_id: {$lt: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, {_id: 1, a: 1})
- .hint({_id: 1, a: 1})
- .explain("executionStats");
- assert(isIndexOnly(db, explain), "indexonly.$lt.1 - must be index-only");
- assert.eq(0,
- explain.executionStats.nReturned,
- "correctcount.$lt.1 - not returning correct documents");
+explain = coll.find({_id: {$lt: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, {_id: 1, a: 1})
+ .hint({_id: 1, a: 1})
+ .explain("executionStats");
+assert(isIndexOnly(db, explain), "indexonly.$lt.1 - must be index-only");
+assert.eq(
+ 0, explain.executionStats.nReturned, "correctcount.$lt.1 - not returning correct documents");
- explain = coll.find({_id: {$gt: BinData(0, "////////////////////////////")}}, {_id: 1, a: 1})
- .hint({_id: 1, a: 1})
- .explain("executionStats");
- assert(isIndexOnly(db, explain), "indexonly.$gt.2 - must be index-only");
- assert.eq(0,
- explain.executionStats.nReturned,
- "correctcount.$gt.2 - not returning correct documents");
+explain = coll.find({_id: {$gt: BinData(0, "////////////////////////////")}}, {_id: 1, a: 1})
+ .hint({_id: 1, a: 1})
+ .explain("executionStats");
+assert(isIndexOnly(db, explain), "indexonly.$gt.2 - must be index-only");
+assert.eq(
+ 0, explain.executionStats.nReturned, "correctcount.$gt.2 - not returning correct documents");
- explain = coll.find({_id: {$lte: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhv")}}, {_id: 1, a: 1})
- .hint({_id: 1, a: 1})
- .explain("executionStats");
- assert(isIndexOnly(db, explain), "indexonly.$lte.3 - must be index-only");
- assert.eq(2,
- explain.executionStats.nReturned,
- "correctcount.$lte.3 - not returning correct documents");
+explain = coll.find({_id: {$lte: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhv")}}, {_id: 1, a: 1})
+ .hint({_id: 1, a: 1})
+ .explain("executionStats");
+assert(isIndexOnly(db, explain), "indexonly.$lte.3 - must be index-only");
+assert.eq(
+ 2, explain.executionStats.nReturned, "correctcount.$lte.3 - not returning correct documents");
- explain = coll.find({_id: {$gte: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhz")}}, {_id: 1, a: 1})
- .hint({_id: 1, a: 1})
- .explain("executionStats");
- assert(isIndexOnly(db, explain), "indexonly.$gte.3 - must be index-only");
- assert.eq(2,
- explain.executionStats.nReturned,
- "correctcount.$gte.3 - not returning correct documents");
+explain = coll.find({_id: {$gte: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhz")}}, {_id: 1, a: 1})
+ .hint({_id: 1, a: 1})
+ .explain("executionStats");
+assert(isIndexOnly(db, explain), "indexonly.$gte.3 - must be index-only");
+assert.eq(
+ 2, explain.executionStats.nReturned, "correctcount.$gte.3 - not returning correct documents");
- coll.drop();
+coll.drop();
})();
diff --git a/jstests/core/bittest.js b/jstests/core/bittest.js
index 194bf3df048..00785a5efcf 100644
--- a/jstests/core/bittest.js
+++ b/jstests/core/bittest.js
@@ -2,155 +2,154 @@
* This test ensures that bit test query operators work.
*/
(function() {
- 'use strict';
-
- load("jstests/libs/analyze_plan.js");
-
- var coll = db.jstests_bitwise;
-
- function assertQueryCorrect(query, count) {
- var explain = coll.find(query).explain("executionStats");
- assert(isCollscan(db, explain.queryPlanner.winningPlan),
- "expected bit test query plan to be COLLSCAN");
- assert.eq(count,
- explain.executionStats.nReturned,
- "bit test query not returning correct documents");
+'use strict';
+
+load("jstests/libs/analyze_plan.js");
+
+var coll = db.jstests_bitwise;
+
+function assertQueryCorrect(query, count) {
+ var explain = coll.find(query).explain("executionStats");
+ assert(isCollscan(db, explain.queryPlanner.winningPlan),
+ "expected bit test query plan to be COLLSCAN");
+ assert.eq(
+ count, explain.executionStats.nReturned, "bit test query not returning correct documents");
+}
+
+// Tests on numbers.
+
+coll.drop();
+assert.writeOK(coll.insert({a: 0}));
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 54}));
+assert.writeOK(coll.insert({a: 88}));
+assert.writeOK(coll.insert({a: 255}));
+assert.commandWorked(coll.createIndex({a: 1}));
+
+// Tests with bitmask.
+assertQueryCorrect({a: {$bitsAllSet: 0}}, 5);
+assertQueryCorrect({a: {$bitsAllSet: 1}}, 2);
+assertQueryCorrect({a: {$bitsAllSet: 16}}, 3);
+assertQueryCorrect({a: {$bitsAllSet: 54}}, 2);
+assertQueryCorrect({a: {$bitsAllSet: 55}}, 1);
+assertQueryCorrect({a: {$bitsAllSet: 88}}, 2);
+assertQueryCorrect({a: {$bitsAllSet: 255}}, 1);
+assertQueryCorrect({a: {$bitsAllClear: 0}}, 5);
+assertQueryCorrect({a: {$bitsAllClear: 1}}, 3);
+assertQueryCorrect({a: {$bitsAllClear: 16}}, 2);
+assertQueryCorrect({a: {$bitsAllClear: 129}}, 3);
+assertQueryCorrect({a: {$bitsAllClear: 255}}, 1);
+assertQueryCorrect({a: {$bitsAnySet: 0}}, 0);
+assertQueryCorrect({a: {$bitsAnySet: 9}}, 3);
+assertQueryCorrect({a: {$bitsAnySet: 255}}, 4);
+assertQueryCorrect({a: {$bitsAnyClear: 0}}, 0);
+assertQueryCorrect({a: {$bitsAnyClear: 18}}, 3);
+assertQueryCorrect({a: {$bitsAnyClear: 24}}, 3);
+assertQueryCorrect({a: {$bitsAnyClear: 255}}, 4);
+
+// Tests with array of bit positions.
+assertQueryCorrect({a: {$bitsAllSet: []}}, 5);
+assertQueryCorrect({a: {$bitsAllSet: [0]}}, 2);
+assertQueryCorrect({a: {$bitsAllSet: [4]}}, 3);
+assertQueryCorrect({a: {$bitsAllSet: [1, 2, 4, 5]}}, 2);
+assertQueryCorrect({a: {$bitsAllSet: [0, 1, 2, 4, 5]}}, 1);
+assertQueryCorrect({a: {$bitsAllSet: [3, 4, 6]}}, 2);
+assertQueryCorrect({a: {$bitsAllSet: [0, 1, 2, 3, 4, 5, 6, 7]}}, 1);
+assertQueryCorrect({a: {$bitsAllClear: []}}, 5);
+assertQueryCorrect({a: {$bitsAllClear: [0]}}, 3);
+assertQueryCorrect({a: {$bitsAllClear: [4]}}, 2);
+assertQueryCorrect({a: {$bitsAllClear: [1, 7]}}, 3);
+assertQueryCorrect({a: {$bitsAllClear: [0, 1, 2, 3, 4, 5, 6, 7]}}, 1);
+assertQueryCorrect({a: {$bitsAnySet: []}}, 0);
+assertQueryCorrect({a: {$bitsAnySet: [1, 3]}}, 3);
+assertQueryCorrect({a: {$bitsAnySet: [0, 1, 2, 3, 4, 5, 6, 7]}}, 4);
+assertQueryCorrect({a: {$bitsAnyClear: []}}, 0);
+assertQueryCorrect({a: {$bitsAnyClear: [1, 4]}}, 3);
+assertQueryCorrect({a: {$bitsAnyClear: [3, 4]}}, 3);
+assertQueryCorrect({a: {$bitsAnyClear: [0, 1, 2, 3, 4, 5, 6, 7]}}, 4);
+
+// Tests with multiple predicates.
+assertQueryCorrect({a: {$bitsAllSet: 54, $bitsAllClear: 201}}, 1);
+
+// Tests on negative numbers.
+
+coll.drop();
+assert.writeOK(coll.insert({a: -0}));
+assert.writeOK(coll.insert({a: -1}));
+assert.writeOK(coll.insert({a: -54}));
+
+// Tests with bitmask.
+assertQueryCorrect({a: {$bitsAllSet: 0}}, 3);
+assertQueryCorrect({a: {$bitsAllSet: 2}}, 2);
+assertQueryCorrect({a: {$bitsAllSet: 127}}, 1);
+assertQueryCorrect({a: {$bitsAllSet: 74}}, 2);
+assertQueryCorrect({a: {$bitsAllClear: 0}}, 3);
+assertQueryCorrect({a: {$bitsAllClear: 53}}, 2);
+assertQueryCorrect({a: {$bitsAllClear: 127}}, 1);
+assertQueryCorrect({a: {$bitsAnySet: 0}}, 0);
+assertQueryCorrect({a: {$bitsAnySet: 2}}, 2);
+assertQueryCorrect({a: {$bitsAnySet: 127}}, 2);
+assertQueryCorrect({a: {$bitsAnyClear: 0}}, 0);
+assertQueryCorrect({a: {$bitsAnyClear: 53}}, 2);
+assertQueryCorrect({a: {$bitsAnyClear: 127}}, 2);
+
+// Tests with array of bit positions.
+var allPositions = [];
+for (var i = 0; i < 64; i++) {
+ allPositions.push(i);
+}
+assertQueryCorrect({a: {$bitsAllSet: []}}, 3);
+assertQueryCorrect({a: {$bitsAllSet: [1]}}, 2);
+assertQueryCorrect({a: {$bitsAllSet: allPositions}}, 1);
+assertQueryCorrect({a: {$bitsAllSet: [1, 7, 6, 3, 100]}}, 2);
+assertQueryCorrect({a: {$bitsAllClear: []}}, 3);
+assertQueryCorrect({a: {$bitsAllClear: [5, 4, 2, 0]}}, 2);
+assertQueryCorrect({a: {$bitsAllClear: allPositions}}, 1);
+assertQueryCorrect({a: {$bitsAnySet: []}}, 0);
+assertQueryCorrect({a: {$bitsAnySet: [1]}}, 2);
+assertQueryCorrect({a: {$bitsAnySet: allPositions}}, 2);
+assertQueryCorrect({a: {$bitsAnyClear: []}}, 0);
+assertQueryCorrect({a: {$bitsAnyClear: [0, 2, 4, 5, 100]}}, 2);
+assertQueryCorrect({a: {$bitsAnyClear: allPositions}}, 2);
+
+// Tests with multiple predicates.
+assertQueryCorrect({a: {$bitsAllSet: 74, $bitsAllClear: 53}}, 1);
+
+// Tests on BinData.
+
+coll.drop();
+assert.writeOK(coll.insert({a: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}));
+assert.writeOK(coll.insert({a: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA")}));
+assert.writeOK(coll.insert({a: BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq")}));
+assert.writeOK(coll.insert({a: BinData(0, "////////////////////////////")}));
+assert.commandWorked(coll.createIndex({a: 1}));
+
+// Tests with binary string bitmask.
+assertQueryCorrect({a: {$bitsAllSet: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 4);
+assertQueryCorrect({a: {$bitsAllSet: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA")}}, 3);
+assertQueryCorrect({a: {$bitsAllSet: BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq")}}, 2);
+assertQueryCorrect({a: {$bitsAllSet: BinData(0, "////////////////////////////")}}, 1);
+assertQueryCorrect({a: {$bitsAllClear: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 4);
+assertQueryCorrect({a: {$bitsAllClear: BinData(0, "AAyfAAAAAAAAAAAAAAAAAAAAAAAA")}}, 3);
+assertQueryCorrect({a: {$bitsAllClear: BinData(0, "JAyfqwetkqwklEWRbWERKKJREtbq")}}, 2);
+assertQueryCorrect({a: {$bitsAllClear: BinData(0, "////////////////////////////")}}, 1);
+assertQueryCorrect({a: {$bitsAnySet: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 0);
+assertQueryCorrect({a: {$bitsAnySet: BinData(0, "AAyfAAAAAAAAAAAAAAAAAAAAAAAA")}}, 1);
+assertQueryCorrect({a: {$bitsAnySet: BinData(0, "JAyfqwetkqwklEWRbWERKKJREtbq")}}, 2);
+assertQueryCorrect({a: {$bitsAnySet: BinData(0, "////////////////////////////")}}, 3);
+assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 0);
+assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA")}}, 1);
+assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq")}}, 2);
+assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "////////////////////////////")}}, 3);
+
+// Tests with multiple predicates.
+assertQueryCorrect({
+ a: {
+ $bitsAllSet: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA"),
+ $bitsAllClear: BinData(0, "//yf////////////////////////")
}
+},
+ 1);
- // Tests on numbers.
-
- coll.drop();
- assert.writeOK(coll.insert({a: 0}));
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({a: 54}));
- assert.writeOK(coll.insert({a: 88}));
- assert.writeOK(coll.insert({a: 255}));
- assert.commandWorked(coll.createIndex({a: 1}));
-
- // Tests with bitmask.
- assertQueryCorrect({a: {$bitsAllSet: 0}}, 5);
- assertQueryCorrect({a: {$bitsAllSet: 1}}, 2);
- assertQueryCorrect({a: {$bitsAllSet: 16}}, 3);
- assertQueryCorrect({a: {$bitsAllSet: 54}}, 2);
- assertQueryCorrect({a: {$bitsAllSet: 55}}, 1);
- assertQueryCorrect({a: {$bitsAllSet: 88}}, 2);
- assertQueryCorrect({a: {$bitsAllSet: 255}}, 1);
- assertQueryCorrect({a: {$bitsAllClear: 0}}, 5);
- assertQueryCorrect({a: {$bitsAllClear: 1}}, 3);
- assertQueryCorrect({a: {$bitsAllClear: 16}}, 2);
- assertQueryCorrect({a: {$bitsAllClear: 129}}, 3);
- assertQueryCorrect({a: {$bitsAllClear: 255}}, 1);
- assertQueryCorrect({a: {$bitsAnySet: 0}}, 0);
- assertQueryCorrect({a: {$bitsAnySet: 9}}, 3);
- assertQueryCorrect({a: {$bitsAnySet: 255}}, 4);
- assertQueryCorrect({a: {$bitsAnyClear: 0}}, 0);
- assertQueryCorrect({a: {$bitsAnyClear: 18}}, 3);
- assertQueryCorrect({a: {$bitsAnyClear: 24}}, 3);
- assertQueryCorrect({a: {$bitsAnyClear: 255}}, 4);
-
- // Tests with array of bit positions.
- assertQueryCorrect({a: {$bitsAllSet: []}}, 5);
- assertQueryCorrect({a: {$bitsAllSet: [0]}}, 2);
- assertQueryCorrect({a: {$bitsAllSet: [4]}}, 3);
- assertQueryCorrect({a: {$bitsAllSet: [1, 2, 4, 5]}}, 2);
- assertQueryCorrect({a: {$bitsAllSet: [0, 1, 2, 4, 5]}}, 1);
- assertQueryCorrect({a: {$bitsAllSet: [3, 4, 6]}}, 2);
- assertQueryCorrect({a: {$bitsAllSet: [0, 1, 2, 3, 4, 5, 6, 7]}}, 1);
- assertQueryCorrect({a: {$bitsAllClear: []}}, 5);
- assertQueryCorrect({a: {$bitsAllClear: [0]}}, 3);
- assertQueryCorrect({a: {$bitsAllClear: [4]}}, 2);
- assertQueryCorrect({a: {$bitsAllClear: [1, 7]}}, 3);
- assertQueryCorrect({a: {$bitsAllClear: [0, 1, 2, 3, 4, 5, 6, 7]}}, 1);
- assertQueryCorrect({a: {$bitsAnySet: []}}, 0);
- assertQueryCorrect({a: {$bitsAnySet: [1, 3]}}, 3);
- assertQueryCorrect({a: {$bitsAnySet: [0, 1, 2, 3, 4, 5, 6, 7]}}, 4);
- assertQueryCorrect({a: {$bitsAnyClear: []}}, 0);
- assertQueryCorrect({a: {$bitsAnyClear: [1, 4]}}, 3);
- assertQueryCorrect({a: {$bitsAnyClear: [3, 4]}}, 3);
- assertQueryCorrect({a: {$bitsAnyClear: [0, 1, 2, 3, 4, 5, 6, 7]}}, 4);
-
- // Tests with multiple predicates.
- assertQueryCorrect({a: {$bitsAllSet: 54, $bitsAllClear: 201}}, 1);
-
- // Tests on negative numbers.
-
- coll.drop();
- assert.writeOK(coll.insert({a: -0}));
- assert.writeOK(coll.insert({a: -1}));
- assert.writeOK(coll.insert({a: -54}));
-
- // Tests with bitmask.
- assertQueryCorrect({a: {$bitsAllSet: 0}}, 3);
- assertQueryCorrect({a: {$bitsAllSet: 2}}, 2);
- assertQueryCorrect({a: {$bitsAllSet: 127}}, 1);
- assertQueryCorrect({a: {$bitsAllSet: 74}}, 2);
- assertQueryCorrect({a: {$bitsAllClear: 0}}, 3);
- assertQueryCorrect({a: {$bitsAllClear: 53}}, 2);
- assertQueryCorrect({a: {$bitsAllClear: 127}}, 1);
- assertQueryCorrect({a: {$bitsAnySet: 0}}, 0);
- assertQueryCorrect({a: {$bitsAnySet: 2}}, 2);
- assertQueryCorrect({a: {$bitsAnySet: 127}}, 2);
- assertQueryCorrect({a: {$bitsAnyClear: 0}}, 0);
- assertQueryCorrect({a: {$bitsAnyClear: 53}}, 2);
- assertQueryCorrect({a: {$bitsAnyClear: 127}}, 2);
-
- // Tests with array of bit positions.
- var allPositions = [];
- for (var i = 0; i < 64; i++) {
- allPositions.push(i);
- }
- assertQueryCorrect({a: {$bitsAllSet: []}}, 3);
- assertQueryCorrect({a: {$bitsAllSet: [1]}}, 2);
- assertQueryCorrect({a: {$bitsAllSet: allPositions}}, 1);
- assertQueryCorrect({a: {$bitsAllSet: [1, 7, 6, 3, 100]}}, 2);
- assertQueryCorrect({a: {$bitsAllClear: []}}, 3);
- assertQueryCorrect({a: {$bitsAllClear: [5, 4, 2, 0]}}, 2);
- assertQueryCorrect({a: {$bitsAllClear: allPositions}}, 1);
- assertQueryCorrect({a: {$bitsAnySet: []}}, 0);
- assertQueryCorrect({a: {$bitsAnySet: [1]}}, 2);
- assertQueryCorrect({a: {$bitsAnySet: allPositions}}, 2);
- assertQueryCorrect({a: {$bitsAnyClear: []}}, 0);
- assertQueryCorrect({a: {$bitsAnyClear: [0, 2, 4, 5, 100]}}, 2);
- assertQueryCorrect({a: {$bitsAnyClear: allPositions}}, 2);
-
- // Tests with multiple predicates.
- assertQueryCorrect({a: {$bitsAllSet: 74, $bitsAllClear: 53}}, 1);
-
- // Tests on BinData.
-
- coll.drop();
- assert.writeOK(coll.insert({a: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}));
- assert.writeOK(coll.insert({a: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA")}));
- assert.writeOK(coll.insert({a: BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq")}));
- assert.writeOK(coll.insert({a: BinData(0, "////////////////////////////")}));
- assert.commandWorked(coll.createIndex({a: 1}));
-
- // Tests with binary string bitmask.
- assertQueryCorrect({a: {$bitsAllSet: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 4);
- assertQueryCorrect({a: {$bitsAllSet: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA")}}, 3);
- assertQueryCorrect({a: {$bitsAllSet: BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq")}}, 2);
- assertQueryCorrect({a: {$bitsAllSet: BinData(0, "////////////////////////////")}}, 1);
- assertQueryCorrect({a: {$bitsAllClear: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 4);
- assertQueryCorrect({a: {$bitsAllClear: BinData(0, "AAyfAAAAAAAAAAAAAAAAAAAAAAAA")}}, 3);
- assertQueryCorrect({a: {$bitsAllClear: BinData(0, "JAyfqwetkqwklEWRbWERKKJREtbq")}}, 2);
- assertQueryCorrect({a: {$bitsAllClear: BinData(0, "////////////////////////////")}}, 1);
- assertQueryCorrect({a: {$bitsAnySet: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 0);
- assertQueryCorrect({a: {$bitsAnySet: BinData(0, "AAyfAAAAAAAAAAAAAAAAAAAAAAAA")}}, 1);
- assertQueryCorrect({a: {$bitsAnySet: BinData(0, "JAyfqwetkqwklEWRbWERKKJREtbq")}}, 2);
- assertQueryCorrect({a: {$bitsAnySet: BinData(0, "////////////////////////////")}}, 3);
- assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 0);
- assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA")}}, 1);
- assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq")}}, 2);
- assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "////////////////////////////")}}, 3);
-
- // Tests with multiple predicates.
- assertQueryCorrect({
- a: {
- $bitsAllSet: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA"),
- $bitsAllClear: BinData(0, "//yf////////////////////////")
- }
- },
- 1);
-
- coll.drop();
+coll.drop();
})();
diff --git a/jstests/core/bson.js b/jstests/core/bson.js
index 6c6d5268bbb..7139c6d3e1e 100644
--- a/jstests/core/bson.js
+++ b/jstests/core/bson.js
@@ -3,136 +3,131 @@
*/
(function() {
- 'use strict';
-
- var t = db.getCollection("bson");
- t.drop();
- function testObjectsAreEqual(obj1, obj2, equalityFunc, func_name) {
- var assert_msg = func_name + " " + tojson(obj1) + " " + tojson(obj2);
- assert(equalityFunc(obj1, obj2), assert_msg);
- }
-
- function testObjectsAreNotEqual(obj1, obj2, equalityFunc, func_name) {
- var assert_msg = func_name + " " + tojson(obj1) + " " + tojson(obj2);
- assert(!equalityFunc(obj1, obj2), assert_msg);
- }
-
- function runTests(func, testFunc) {
- // Tests on numbers.
- testObjectsAreEqual(0, 0, func, testFunc);
- testObjectsAreEqual(-5, -5, func, testFunc);
- testObjectsAreEqual(1.1, 1.1, func, testFunc);
- testObjectsAreEqual(1, 1, func, testFunc);
- testObjectsAreEqual(1.1, 1.10, func, testFunc);
- var nl0 = new NumberLong("18014398509481984");
- var nl1 = new NumberLong("18014398509481985");
- testObjectsAreEqual(nl0, nl0, func, testFunc);
- testObjectsAreNotEqual(nl0, nl1, func, testFunc);
-
- // Test on key name.
- t.insertMany([{a: 0}, {A: 0}]);
- testObjectsAreNotEqual(t.findOne({a: 0}), t.findOne({A: 0}), func, testFunc);
-
- // Tests on strings.
- testObjectsAreEqual("abc", "abc", func, testFunc);
- testObjectsAreNotEqual("abc", "aBc", func, testFunc);
-
- // Tests on boolean.
- testObjectsAreEqual(true, true, func, testFunc);
- testObjectsAreNotEqual(true, false, func, testFunc);
- testObjectsAreEqual(false, false, func, testFunc);
-
- // Tests on date & timestamp.
- var d0 = new Date(0);
- var d1 = new Date(1);
- var ts0 = new Timestamp(0, 1);
- var ts1 = new Timestamp(1, 1);
- testObjectsAreEqual(d0, d0, func, testFunc);
- testObjectsAreNotEqual(d0, d1, func, testFunc);
- testObjectsAreNotEqual(d1, ts1, func, testFunc);
- testObjectsAreEqual(ts0, ts0, func, testFunc);
- testObjectsAreNotEqual(ts0, ts1, func, testFunc);
-
- // Tests on regex.
- testObjectsAreEqual(/3/, /3/, func, testFunc);
- testObjectsAreNotEqual(/3/, /3/i, func, testFunc);
-
- // Tests on DBPointer.
- var dbp0 = new DBPointer("test", new ObjectId());
- var dbp1 = new DBPointer("test", new ObjectId());
- testObjectsAreEqual(dbp0, dbp0, func, testFunc);
- testObjectsAreNotEqual(dbp0, dbp1, func, testFunc);
-
- // Tests on JavaScript.
- var js0 = Function.prototype;
- var js1 = function() {};
- testObjectsAreEqual(js0, Function.prototype, func, testFunc);
- testObjectsAreNotEqual(js0, js1, func, testFunc);
-
- // Tests on arrays.
- testObjectsAreEqual([0, 1], [0, 1], func, testFunc);
- testObjectsAreNotEqual([0, 1], [0], func, testFunc);
- testObjectsAreNotEqual([1, 0], [0, 1], func, testFunc);
-
- // Tests on BinData & HexData.
- testObjectsAreEqual(new BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq"),
- new BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq"),
- func,
- testFunc);
- testObjectsAreEqual(new BinData(0, "AAaa"), new BinData(0, "AAaa"), func, testFunc);
- testObjectsAreNotEqual(new BinData(0, "AAaa"), new BinData(0, "aaAA"), func, testFunc);
-
- testObjectsAreEqual(new HexData(0, "AAaa"), new HexData(0, "AAaa"), func, testFunc);
- testObjectsAreEqual(new HexData(0, "AAaa"), new HexData(0, "aaAA"), func, testFunc);
- testObjectsAreNotEqual(new HexData(0, "AAaa"), new BinData(0, "AAaa"), func, testFunc);
-
- // Tests on ObjectId
- testObjectsAreEqual(new ObjectId("57d1b31cd311a43091fe592f"),
- new ObjectId("57d1b31cd311a43091fe592f"),
- func,
- testFunc);
- testObjectsAreNotEqual(new ObjectId("57d1b31cd311a43091fe592f"),
- new ObjectId("57d1b31ed311a43091fe5930"),
- func,
- testFunc);
-
- // Tests on miscellaneous types.
- testObjectsAreEqual(NaN, NaN, func, testFunc);
- testObjectsAreEqual(null, null, func, testFunc);
- testObjectsAreNotEqual(null, -null, func, testFunc);
- testObjectsAreEqual(MinKey, MinKey, func, testFunc);
- testObjectsAreEqual(MaxKey, MaxKey, func, testFunc);
- testObjectsAreNotEqual(MinKey, MaxKey, func, testFunc);
-
- // Test on object ordering.
- testObjectsAreNotEqual({a: 1, b: 2}, {b: 2, a: 1}, func, testFunc);
- }
-
- // Create wrapper function for bsonWoCompare, such that it returns boolean result.
- var bsonWoCompareWrapper = function(obj1, obj2) {
- return bsonWoCompare(obj1, obj2) === 0;
- };
-
- // Run the tests which work the same for both comparators.
- runTests(bsonWoCompareWrapper, "bsonWoCompare");
- runTests(bsonBinaryEqual, "bsonBinaryEqual");
-
- // Run the tests which differ between comparators.
- testObjectsAreEqual(NaN, -NaN, bsonWoCompareWrapper, "bsonWoCompare");
- testObjectsAreNotEqual(NaN, -NaN, bsonBinaryEqual, "bsonBinaryEqual");
- testObjectsAreEqual(1, NumberLong("1"), bsonWoCompareWrapper, "bsonWoCompare");
- testObjectsAreNotEqual(1, NumberLong("1"), bsonBinaryEqual, "bsonBinaryEqual");
- testObjectsAreEqual(1.0, NumberLong("1"), bsonWoCompareWrapper, "bsonWoCompare");
- testObjectsAreNotEqual(1.0, NumberLong("1"), bsonBinaryEqual, "bsonBinaryEqual");
- testObjectsAreEqual(NumberInt("1"), NumberLong("1"), bsonWoCompareWrapper, "bsonWoCompare");
- testObjectsAreNotEqual(NumberInt("1"), NumberLong("1"), bsonBinaryEqual, "bsonBinaryEqual");
- testObjectsAreEqual(
- NumberInt("1"), NumberDecimal("1.0"), bsonWoCompareWrapper, "bsonWoCompare");
- testObjectsAreNotEqual(
- NumberInt("1"), NumberDecimal("1.0"), bsonBinaryEqual, "bsonBinaryEqual");
- testObjectsAreEqual(
- NumberLong("1"), NumberDecimal("1.0"), bsonWoCompareWrapper, "bsonWoCompare");
- testObjectsAreNotEqual(
- NumberLong("1"), NumberDecimal("1.0"), bsonBinaryEqual, "bsonBinaryEqual");
-
+'use strict';
+
+var t = db.getCollection("bson");
+t.drop();
+function testObjectsAreEqual(obj1, obj2, equalityFunc, func_name) {
+ var assert_msg = func_name + " " + tojson(obj1) + " " + tojson(obj2);
+ assert(equalityFunc(obj1, obj2), assert_msg);
+}
+
+function testObjectsAreNotEqual(obj1, obj2, equalityFunc, func_name) {
+ var assert_msg = func_name + " " + tojson(obj1) + " " + tojson(obj2);
+ assert(!equalityFunc(obj1, obj2), assert_msg);
+}
+
+function runTests(func, testFunc) {
+ // Tests on numbers.
+ testObjectsAreEqual(0, 0, func, testFunc);
+ testObjectsAreEqual(-5, -5, func, testFunc);
+ testObjectsAreEqual(1.1, 1.1, func, testFunc);
+ testObjectsAreEqual(1, 1, func, testFunc);
+ testObjectsAreEqual(1.1, 1.10, func, testFunc);
+ var nl0 = new NumberLong("18014398509481984");
+ var nl1 = new NumberLong("18014398509481985");
+ testObjectsAreEqual(nl0, nl0, func, testFunc);
+ testObjectsAreNotEqual(nl0, nl1, func, testFunc);
+
+ // Test on key name.
+ t.insertMany([{a: 0}, {A: 0}]);
+ testObjectsAreNotEqual(t.findOne({a: 0}), t.findOne({A: 0}), func, testFunc);
+
+ // Tests on strings.
+ testObjectsAreEqual("abc", "abc", func, testFunc);
+ testObjectsAreNotEqual("abc", "aBc", func, testFunc);
+
+ // Tests on boolean.
+ testObjectsAreEqual(true, true, func, testFunc);
+ testObjectsAreNotEqual(true, false, func, testFunc);
+ testObjectsAreEqual(false, false, func, testFunc);
+
+ // Tests on date & timestamp.
+ var d0 = new Date(0);
+ var d1 = new Date(1);
+ var ts0 = new Timestamp(0, 1);
+ var ts1 = new Timestamp(1, 1);
+ testObjectsAreEqual(d0, d0, func, testFunc);
+ testObjectsAreNotEqual(d0, d1, func, testFunc);
+ testObjectsAreNotEqual(d1, ts1, func, testFunc);
+ testObjectsAreEqual(ts0, ts0, func, testFunc);
+ testObjectsAreNotEqual(ts0, ts1, func, testFunc);
+
+ // Tests on regex.
+ testObjectsAreEqual(/3/, /3/, func, testFunc);
+ testObjectsAreNotEqual(/3/, /3/i, func, testFunc);
+
+ // Tests on DBPointer.
+ var dbp0 = new DBPointer("test", new ObjectId());
+ var dbp1 = new DBPointer("test", new ObjectId());
+ testObjectsAreEqual(dbp0, dbp0, func, testFunc);
+ testObjectsAreNotEqual(dbp0, dbp1, func, testFunc);
+
+ // Tests on JavaScript.
+ var js0 = Function.prototype;
+ var js1 = function() {};
+ testObjectsAreEqual(js0, Function.prototype, func, testFunc);
+ testObjectsAreNotEqual(js0, js1, func, testFunc);
+
+ // Tests on arrays.
+ testObjectsAreEqual([0, 1], [0, 1], func, testFunc);
+ testObjectsAreNotEqual([0, 1], [0], func, testFunc);
+ testObjectsAreNotEqual([1, 0], [0, 1], func, testFunc);
+
+ // Tests on BinData & HexData.
+ testObjectsAreEqual(new BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq"),
+ new BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq"),
+ func,
+ testFunc);
+ testObjectsAreEqual(new BinData(0, "AAaa"), new BinData(0, "AAaa"), func, testFunc);
+ testObjectsAreNotEqual(new BinData(0, "AAaa"), new BinData(0, "aaAA"), func, testFunc);
+
+ testObjectsAreEqual(new HexData(0, "AAaa"), new HexData(0, "AAaa"), func, testFunc);
+ testObjectsAreEqual(new HexData(0, "AAaa"), new HexData(0, "aaAA"), func, testFunc);
+ testObjectsAreNotEqual(new HexData(0, "AAaa"), new BinData(0, "AAaa"), func, testFunc);
+
+ // Tests on ObjectId
+ testObjectsAreEqual(new ObjectId("57d1b31cd311a43091fe592f"),
+ new ObjectId("57d1b31cd311a43091fe592f"),
+ func,
+ testFunc);
+ testObjectsAreNotEqual(new ObjectId("57d1b31cd311a43091fe592f"),
+ new ObjectId("57d1b31ed311a43091fe5930"),
+ func,
+ testFunc);
+
+ // Tests on miscellaneous types.
+ testObjectsAreEqual(NaN, NaN, func, testFunc);
+ testObjectsAreEqual(null, null, func, testFunc);
+ testObjectsAreNotEqual(null, -null, func, testFunc);
+ testObjectsAreEqual(MinKey, MinKey, func, testFunc);
+ testObjectsAreEqual(MaxKey, MaxKey, func, testFunc);
+ testObjectsAreNotEqual(MinKey, MaxKey, func, testFunc);
+
+ // Test on object ordering.
+ testObjectsAreNotEqual({a: 1, b: 2}, {b: 2, a: 1}, func, testFunc);
+}
+
+// Create wrapper function for bsonWoCompare, such that it returns boolean result.
+var bsonWoCompareWrapper = function(obj1, obj2) {
+ return bsonWoCompare(obj1, obj2) === 0;
+};
+
+// Run the tests which work the same for both comparators.
+runTests(bsonWoCompareWrapper, "bsonWoCompare");
+runTests(bsonBinaryEqual, "bsonBinaryEqual");
+
+// Run the tests which differ between comparators.
+testObjectsAreEqual(NaN, -NaN, bsonWoCompareWrapper, "bsonWoCompare");
+testObjectsAreNotEqual(NaN, -NaN, bsonBinaryEqual, "bsonBinaryEqual");
+testObjectsAreEqual(1, NumberLong("1"), bsonWoCompareWrapper, "bsonWoCompare");
+testObjectsAreNotEqual(1, NumberLong("1"), bsonBinaryEqual, "bsonBinaryEqual");
+testObjectsAreEqual(1.0, NumberLong("1"), bsonWoCompareWrapper, "bsonWoCompare");
+testObjectsAreNotEqual(1.0, NumberLong("1"), bsonBinaryEqual, "bsonBinaryEqual");
+testObjectsAreEqual(NumberInt("1"), NumberLong("1"), bsonWoCompareWrapper, "bsonWoCompare");
+testObjectsAreNotEqual(NumberInt("1"), NumberLong("1"), bsonBinaryEqual, "bsonBinaryEqual");
+testObjectsAreEqual(NumberInt("1"), NumberDecimal("1.0"), bsonWoCompareWrapper, "bsonWoCompare");
+testObjectsAreNotEqual(NumberInt("1"), NumberDecimal("1.0"), bsonBinaryEqual, "bsonBinaryEqual");
+testObjectsAreEqual(NumberLong("1"), NumberDecimal("1.0"), bsonWoCompareWrapper, "bsonWoCompare");
+testObjectsAreNotEqual(NumberLong("1"), NumberDecimal("1.0"), bsonBinaryEqual, "bsonBinaryEqual");
})();
diff --git a/jstests/core/bson_compare_bug.js b/jstests/core/bson_compare_bug.js
index 2a39efd8db7..798af7a6992 100644
--- a/jstests/core/bson_compare_bug.js
+++ b/jstests/core/bson_compare_bug.js
@@ -1,47 +1,43 @@
(function() {
- "use strict";
+"use strict";
- db.bson_compare_bug.drop();
+db.bson_compare_bug.drop();
- // We want some BSON objects for this test. One convenient way to get that is to insert them
- // into the database and then get them back through a query.
- const coll = db.bson_compare_bug;
- assert.commandWorked(coll.insert(
- [
- {_id: 1, obj: {val: [], _id: 1}},
- {_id: 2, obj: {val: []}},
- {_id: 3, obj: {_id: 1, val: []}}
- ],
- {writeConcern: {w: "majority"}}));
+// We want some BSON objects for this test. One convenient way to get that is to insert them
+// into the database and then get them back through a query.
+const coll = db.bson_compare_bug;
+assert.commandWorked(coll.insert(
+ [{_id: 1, obj: {val: [], _id: 1}}, {_id: 2, obj: {val: []}}, {_id: 3, obj: {_id: 1, val: []}}],
+ {writeConcern: {w: "majority"}}));
- // The $replaceRoot is so we can get back two results that have an "_id" field and one that
- // doesn't. The first two results from this query are the same, except for that.
- // res[0]: {val: [], _id: 1}
- // res[1]: {val: []}
- const res = coll.aggregate([{$sort: {_id: 1}}, {$replaceRoot: {newRoot: "$obj"}}]).toArray();
- assert.eq(3, res.length);
+// The $replaceRoot is so we can get back two results that have an "_id" field and one that
+// doesn't. The first two results from this query are the same, except for that.
+// res[0]: {val: [], _id: 1}
+// res[1]: {val: []}
+const res = coll.aggregate([{$sort: {_id: 1}}, {$replaceRoot: {newRoot: "$obj"}}]).toArray();
+assert.eq(3, res.length);
- // bsonBinaryEqual() should see that the BSON results from the query are not equal.
- assert(!bsonBinaryEqual(res[0], res[1]));
+// bsonBinaryEqual() should see that the BSON results from the query are not equal.
+assert(!bsonBinaryEqual(res[0], res[1]));
- // A magic trick: the shell represents the objects in res[0] and res[1] as JavaScript objects
- // that internally store raw BSON data but also maintain JavaScript properties for each of their
- // BSON fields. The BSON and JavaScript properties are kept in sync both ways. Reading the "val"
- // property for the first time results in a call to BSONInfo::resolve(), which materializes the
- // "val" BSON field as a JavaScript property. In this case, the resolve function also
- // conservatively marks the object as "altered," because "val" is an array, and there's no way
- // to observe modifications to it.
- assert.eq(res[0].val, res[1].val);
+// A magic trick: the shell represents the objects in res[0] and res[1] as JavaScript objects
+// that internally store raw BSON data but also maintain JavaScript properties for each of their
+// BSON fields. The BSON and JavaScript properties are kept in sync both ways. Reading the "val"
+// property for the first time results in a call to BSONInfo::resolve(), which materializes the
+// "val" BSON field as a JavaScript property. In this case, the resolve function also
+// conservatively marks the object as "altered," because "val" is an array, and there's no way
+// to observe modifications to it.
+assert.eq(res[0].val, res[1].val);
- // We repeat the BSON comparison, but this time, the objects are "altered," and bsonBinaryEqual
- // needs to sync the JavaScript properties back into BSON. Before SERVER-39521, a bug in the
- // conversion would ignore the "_id" field unless it was previously resolved, which would cause
- // res[0] and res[1] to appear equal.
- assert(!bsonBinaryEqual(res[0], res[1]));
+// We repeat the BSON comparison, but this time, the objects are "altered," and bsonBinaryEqual
+// needs to sync the JavaScript properties back into BSON. Before SERVER-39521, a bug in the
+// conversion would ignore the "_id" field unless it was previously resolved, which would cause
+// res[0] and res[1] to appear equal.
+assert(!bsonBinaryEqual(res[0], res[1]));
- // The bug that caused the "_id" field to get dropped in conversion involves code that is
- // supposed to move the "_id" field to the front when converting a JavaScript object to BSON.
- // This check ensures that "_id" is still getting moved to the front. The value of res[0] should
- // now have changed so that both it and res[2] have their _id field first.
- assert(bsonBinaryEqual(res[0], res[2]));
+// The bug that caused the "_id" field to get dropped in conversion involves code that is
+// supposed to move the "_id" field to the front when converting a JavaScript object to BSON.
+// This check ensures that "_id" is still getting moved to the front. The value of res[0] should
+// now have changed so that both it and res[2] have their _id field first.
+assert(bsonBinaryEqual(res[0], res[2]));
}());
diff --git a/jstests/core/bulk_insert_capped.js b/jstests/core/bulk_insert_capped.js
index 50cc8f460dd..4e1f6b26dec 100644
--- a/jstests/core/bulk_insert_capped.js
+++ b/jstests/core/bulk_insert_capped.js
@@ -8,24 +8,24 @@
// SERVER-21488 Test that multi inserts into capped collections don't cause corruption.
// Note: this file must have a name that starts with "bulk" so it gets run by bulk_gle_passthrough.
(function() {
- "use strict";
- var t = db.capped_multi_insert;
- t.drop();
+"use strict";
+var t = db.capped_multi_insert;
+t.drop();
- db.createCollection(t.getName(), {capped: true, size: 16 * 1024, max: 1});
+db.createCollection(t.getName(), {capped: true, size: 16 * 1024, max: 1});
- t.insert([{_id: 1}, {_id: 2}]);
- assert.gleSuccess(db);
+t.insert([{_id: 1}, {_id: 2}]);
+assert.gleSuccess(db);
- // Ensure the collection is valid.
- var res = t.validate(true);
- assert(res.valid, tojson(res));
+// Ensure the collection is valid.
+var res = t.validate(true);
+assert(res.valid, tojson(res));
- // Ensure that various ways of iterating the collection only return one document.
- assert.eq(t.find().itcount(), 1); // Table scan.
- assert.eq(t.find({}, {_id: 1}).hint({_id: 1}).itcount(), 1); // Index only (covered).
- assert.eq(t.find().hint({_id: 1}).itcount(), 1); // Index scan with fetch.
+// Ensure that various ways of iterating the collection only return one document.
+assert.eq(t.find().itcount(), 1); // Table scan.
+assert.eq(t.find({}, {_id: 1}).hint({_id: 1}).itcount(), 1); // Index only (covered).
+assert.eq(t.find().hint({_id: 1}).itcount(), 1); // Index scan with fetch.
- // Ensure that the second document is the one that is kept.
- assert.eq(t.findOne(), {_id: 2});
+// Ensure that the second document is the one that is kept.
+assert.eq(t.findOne(), {_id: 2});
}());
diff --git a/jstests/core/bulk_legacy_enforce_gle.js b/jstests/core/bulk_legacy_enforce_gle.js
index 88b7c51e758..6359b277c0b 100644
--- a/jstests/core/bulk_legacy_enforce_gle.js
+++ b/jstests/core/bulk_legacy_enforce_gle.js
@@ -6,120 +6,120 @@
*/
(function() {
- "use strict";
- const coll = db.bulk_legacy_enforce_gle;
+"use strict";
+const coll = db.bulk_legacy_enforce_gle;
- /**
- * Inserts 'doc' into the collection, asserting that the write succeeds. This runs a
- * getLastError if the insert does not return a response.
- */
- function insertDocument(doc) {
- let res = coll.insert(doc);
- if (res) {
- assert.writeOK(res);
- } else {
- assert.gleOK(db.runCommand({getLastError: 1}));
- }
+/**
+ * Inserts 'doc' into the collection, asserting that the write succeeds. This runs a
+ * getLastError if the insert does not return a response.
+ */
+function insertDocument(doc) {
+ let res = coll.insert(doc);
+ if (res) {
+ assert.writeOK(res);
+ } else {
+ assert.gleOK(db.runCommand({getLastError: 1}));
}
+}
- coll.drop();
- let bulk = coll.initializeUnorderedBulkOp();
- bulk.find({_id: 1}).upsert().updateOne({_id: 1});
- assert.writeOK(bulk.execute());
- let gle = assert.gleOK(db.runCommand({getLastError: 1}));
- assert.eq(1, gle.n, tojson(gle));
+coll.drop();
+let bulk = coll.initializeUnorderedBulkOp();
+bulk.find({_id: 1}).upsert().updateOne({_id: 1});
+assert.writeOK(bulk.execute());
+let gle = assert.gleOK(db.runCommand({getLastError: 1}));
+assert.eq(1, gle.n, tojson(gle));
- // Batch of size 1 should not call resetError even when it errors out.
- assert(coll.drop());
- insertDocument({_id: 1});
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({none: 1}).upsert().updateOne({_id: 1});
- assert.throws(function() {
- bulk.execute();
- });
+// Batch of size 1 should not call resetError even when it errors out.
+assert(coll.drop());
+insertDocument({_id: 1});
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({none: 1}).upsert().updateOne({_id: 1});
+assert.throws(function() {
+ bulk.execute();
+});
- gle = db.runCommand({getLastError: 1});
- assert(gle.ok, tojson(gle));
- assert.neq(null, gle.err, tojson(gle));
+gle = db.runCommand({getLastError: 1});
+assert(gle.ok, tojson(gle));
+assert.neq(null, gle.err, tojson(gle));
- // Batch with all error except last should not call resetError.
- assert(coll.drop());
- insertDocument({_id: 1});
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({none: 1}).upsert().updateOne({_id: 1});
- bulk.find({none: 1}).upsert().updateOne({_id: 1});
- bulk.find({_id: 0}).upsert().updateOne({_id: 0});
- let res = assert.throws(function() {
- bulk.execute();
- });
- assert.eq(2, res.getWriteErrors().length);
+// Batch with all error except last should not call resetError.
+assert(coll.drop());
+insertDocument({_id: 1});
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({none: 1}).upsert().updateOne({_id: 1});
+bulk.find({none: 1}).upsert().updateOne({_id: 1});
+bulk.find({_id: 0}).upsert().updateOne({_id: 0});
+let res = assert.throws(function() {
+ bulk.execute();
+});
+assert.eq(2, res.getWriteErrors().length);
- gle = db.runCommand({getLastError: 1});
- assert(gle.ok, tojson(gle));
- assert.eq(1, gle.n, tojson(gle));
+gle = db.runCommand({getLastError: 1});
+assert(gle.ok, tojson(gle));
+assert.eq(1, gle.n, tojson(gle));
- // Batch with error at middle should not call resetError.
- assert(coll.drop());
- insertDocument({_id: 1});
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({_id: 0}).upsert().updateOne({_id: 0});
- bulk.find({none: 1}).upsert().updateOne({_id: 1});
- bulk.find({_id: 2}).upsert().updateOne({_id: 2});
- res = assert.throws(function() {
- bulk.execute();
- });
- assert.eq(1, res.getWriteErrors().length);
+// Batch with error at middle should not call resetError.
+assert(coll.drop());
+insertDocument({_id: 1});
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({_id: 0}).upsert().updateOne({_id: 0});
+bulk.find({none: 1}).upsert().updateOne({_id: 1});
+bulk.find({_id: 2}).upsert().updateOne({_id: 2});
+res = assert.throws(function() {
+ bulk.execute();
+});
+assert.eq(1, res.getWriteErrors().length);
- gle = db.runCommand({getLastError: 1});
- assert(gle.ok, tojson(gle));
- // For legacy writes, mongos sends the bulk as one while the shell sends the write individually.
- assert.gte(gle.n, 1, tojson(gle));
+gle = db.runCommand({getLastError: 1});
+assert(gle.ok, tojson(gle));
+// For legacy writes, mongos sends the bulk as one while the shell sends the write individually.
+assert.gte(gle.n, 1, tojson(gle));
- // Batch with error at last should call resetError.
- assert(coll.drop());
- insertDocument({_id: 2});
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({_id: 0}).upsert().updateOne({_id: 0});
- bulk.find({_id: 1}).upsert().updateOne({_id: 1});
- bulk.find({none: 1}).upsert().updateOne({_id: 2});
- res = assert.throws(function() {
- bulk.execute();
- });
- assert.eq(1, res.getWriteErrors().length);
+// Batch with error at last should call resetError.
+assert(coll.drop());
+insertDocument({_id: 2});
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({_id: 0}).upsert().updateOne({_id: 0});
+bulk.find({_id: 1}).upsert().updateOne({_id: 1});
+bulk.find({none: 1}).upsert().updateOne({_id: 2});
+res = assert.throws(function() {
+ bulk.execute();
+});
+assert.eq(1, res.getWriteErrors().length);
- gle = db.runCommand({getLastError: 1});
- assert(gle.ok, tojson(gle));
- assert.eq(0, gle.n, tojson(gle));
+gle = db.runCommand({getLastError: 1});
+assert(gle.ok, tojson(gle));
+assert.eq(0, gle.n, tojson(gle));
- // Batch with error at last should not call resetError if { w: 1 }.
- assert(coll.drop());
- insertDocument({_id: 2});
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({_id: 0}).upsert().updateOne({_id: 0});
- bulk.find({_id: 1}).upsert().updateOne({_id: 1});
- bulk.find({none: 1}).upsert().updateOne({_id: 2});
- res = assert.throws(function() {
- bulk.execute();
- });
- assert.eq(1, res.getWriteErrors().length);
+// Batch with error at last should not call resetError if { w: 1 }.
+assert(coll.drop());
+insertDocument({_id: 2});
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({_id: 0}).upsert().updateOne({_id: 0});
+bulk.find({_id: 1}).upsert().updateOne({_id: 1});
+bulk.find({none: 1}).upsert().updateOne({_id: 2});
+res = assert.throws(function() {
+ bulk.execute();
+});
+assert.eq(1, res.getWriteErrors().length);
- gle = db.runCommand({getLastError: 1, w: 1});
- assert(gle.ok, tojson(gle));
- assert.neq(null, gle.err, tojson(gle));
+gle = db.runCommand({getLastError: 1, w: 1});
+assert(gle.ok, tojson(gle));
+assert.neq(null, gle.err, tojson(gle));
- // Batch with error at last should not call resetError if { w: 0 }.
- assert(coll.drop());
- insertDocument({_id: 2});
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({_id: 0}).upsert().updateOne({_id: 0});
- bulk.find({_id: 1}).upsert().updateOne({_id: 1});
- bulk.find({none: 1}).upsert().updateOne({_id: 2});
- res = assert.throws(function() {
- bulk.execute();
- });
- assert.eq(1, res.getWriteErrors().length, () => tojson(res));
+// Batch with error at last should not call resetError if { w: 0 }.
+assert(coll.drop());
+insertDocument({_id: 2});
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({_id: 0}).upsert().updateOne({_id: 0});
+bulk.find({_id: 1}).upsert().updateOne({_id: 1});
+bulk.find({none: 1}).upsert().updateOne({_id: 2});
+res = assert.throws(function() {
+ bulk.execute();
+});
+assert.eq(1, res.getWriteErrors().length, () => tojson(res));
- gle = db.runCommand({getLastError: 1, w: 0});
- assert(gle.ok, tojson(gle));
- assert.neq(null, gle.err, tojson(gle));
+gle = db.runCommand({getLastError: 1, w: 0});
+assert(gle.ok, tojson(gle));
+assert.neq(null, gle.err, tojson(gle));
}());
diff --git a/jstests/core/bypass_doc_validation.js b/jstests/core/bypass_doc_validation.js
index 7a5f2389b3e..854b96031ba 100644
--- a/jstests/core/bypass_doc_validation.js
+++ b/jstests/core/bypass_doc_validation.js
@@ -18,174 +18,173 @@
* - update
*/
(function() {
- 'use strict';
-
- // For isWiredTiger.
- load("jstests/concurrency/fsm_workload_helpers/server_types.js");
- // For isReplSet
- load("jstests/libs/fixture_helpers.js");
-
- function assertFailsValidation(res) {
- if (res instanceof WriteResult || res instanceof BulkWriteResult) {
- assert.writeErrorWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res));
- } else {
- assert.commandFailedWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res));
- }
+'use strict';
+
+// For isWiredTiger.
+load("jstests/concurrency/fsm_workload_helpers/server_types.js");
+// For isReplSet
+load("jstests/libs/fixture_helpers.js");
+
+function assertFailsValidation(res) {
+ if (res instanceof WriteResult || res instanceof BulkWriteResult) {
+ assert.writeErrorWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res));
+ } else {
+ assert.commandFailedWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res));
}
+}
- const dbName = 'bypass_document_validation';
- const collName = 'bypass_document_validation';
- const myDb = db.getSiblingDB(dbName);
- const coll = myDb[collName];
-
- /**
- * Tests that we can bypass document validation when appropriate when a collection has validator
- * 'validator', which should enforce the existence of a field "a".
- */
- function runBypassDocumentValidationTest(validator) {
- // Use majority write concern to clear the drop-pending that can cause lock conflicts with
- // transactions.
- coll.drop({writeConcern: {w: "majority"}});
-
- // Insert documents into the collection that would not be valid before setting 'validator'.
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.insert({_id: 2}));
- assert.commandWorked(myDb.runCommand({collMod: collName, validator: validator}));
-
- const isMongos = db.runCommand({isdbgrid: 1}).isdbgrid;
- // Test applyOps with a simple insert if not on mongos.
- if (!isMongos) {
- const op = [{op: 'i', ns: coll.getFullName(), o: {_id: 9}}];
- assertFailsValidation(myDb.runCommand({applyOps: op, bypassDocumentValidation: false}));
- assert.eq(0, coll.count({_id: 9}));
- assert.commandWorked(myDb.runCommand({applyOps: op, bypassDocumentValidation: true}));
- assert.eq(1, coll.count({_id: 9}));
- }
-
- // Test doTxn with a simple insert if a replica set, not on mongos and the storage engine
- // is WiredTiger.
- if (FixtureHelpers.isReplSet(db) && !isMongos && isWiredTiger(db)) {
- const session = db.getMongo().startSession();
- const sessionDb = session.getDatabase(myDb.getName());
- const op = [{op: 'i', ns: coll.getFullName(), o: {_id: 10}}];
- assertFailsValidation(sessionDb.runCommand(
- {doTxn: op, bypassDocumentValidation: false, txnNumber: NumberLong("0")}));
- assert.eq(0, coll.count({_id: 10}));
- assert.commandWorked(sessionDb.runCommand(
- {doTxn: op, bypassDocumentValidation: true, txnNumber: NumberLong("1")}));
- assert.eq(1, coll.count({_id: 10}));
- }
-
- // Test the aggregation command with a $out stage.
- const outputCollName = 'bypass_output_coll';
- const outputColl = myDb[outputCollName];
- outputColl.drop();
- assert.commandWorked(myDb.createCollection(outputCollName, {validator: validator}));
- const pipeline =
- [{$match: {_id: 1}}, {$project: {aggregation: {$add: [1]}}}, {$out: outputCollName}];
- assert.throws(function() {
- coll.aggregate(pipeline, {bypassDocumentValidation: false});
- });
- assert.eq(0, outputColl.count({aggregation: 1}));
- coll.aggregate(pipeline, {bypassDocumentValidation: true});
- assert.eq(1, outputColl.count({aggregation: 1}));
-
- // Test the findAndModify command.
- assert.throws(function() {
- coll.findAndModify(
- {update: {$set: {findAndModify: 1}}, bypassDocumentValidation: false});
- });
- assert.eq(0, coll.count({findAndModify: 1}));
- coll.findAndModify({update: {$set: {findAndModify: 1}}, bypassDocumentValidation: true});
- assert.eq(1, coll.count({findAndModify: 1}));
-
- // Test the mapReduce command.
- const map = function() {
- emit(1, 1);
- };
- const reduce = function() {
- return 'mapReduce';
- };
- let res = myDb.runCommand({
- mapReduce: collName,
- map: map,
- reduce: reduce,
- out: {replace: outputCollName},
- bypassDocumentValidation: false
- });
- assertFailsValidation(res);
- assert.eq(0, outputColl.count({value: 'mapReduce'}));
- res = myDb.runCommand({
- mapReduce: collName,
- map: map,
- reduce: reduce,
- out: {replace: outputCollName},
- bypassDocumentValidation: true
- });
- assert.commandWorked(res);
- assert.eq(1, outputColl.count({value: 'mapReduce'}));
-
- // Test the insert command. Includes a test for a document with no _id (SERVER-20859).
- res = myDb.runCommand({insert: collName, documents: [{}], bypassDocumentValidation: false});
- assertFailsValidation(BulkWriteResult(res));
- res = myDb.runCommand(
- {insert: collName, documents: [{}, {_id: 6}], bypassDocumentValidation: false});
- assertFailsValidation(BulkWriteResult(res));
- res = myDb.runCommand(
- {insert: collName, documents: [{}, {_id: 6}], bypassDocumentValidation: true});
- assert.writeOK(res);
-
- // Test the update command.
- res = myDb.runCommand({
- update: collName,
- updates: [{q: {}, u: {$set: {update: 1}}}],
- bypassDocumentValidation: false
- });
- assertFailsValidation(BulkWriteResult(res));
- assert.eq(0, coll.count({update: 1}));
- res = myDb.runCommand({
- update: collName,
- updates: [{q: {}, u: {$set: {update: 1}}}],
- bypassDocumentValidation: true
- });
- assert.writeOK(res);
- assert.eq(1, coll.count({update: 1}));
-
- // Pipeline-style update is only supported for commands and not for OP_UPDATE which cannot
- // differentiate between an update object and an array.
- res = myDb.runCommand({
- update: collName,
- updates: [{q: {}, u: [{$set: {pipeline: 1}}]}],
- bypassDocumentValidation: false
- });
- assertFailsValidation(BulkWriteResult(res));
- assert.eq(0, coll.count({pipeline: 1}));
-
- assert.commandWorked(myDb.runCommand({
- update: collName,
- updates: [{q: {}, u: [{$set: {pipeline: 1}}]}],
- bypassDocumentValidation: true
- }));
- assert.eq(1, coll.count({pipeline: 1}));
-
- assert.commandFailed(myDb.runCommand({
- findAndModify: collName,
- update: [{$set: {findAndModifyPipeline: 1}}],
- bypassDocumentValidation: false
- }));
- assert.eq(0, coll.count({findAndModifyPipeline: 1}));
-
- assert.commandWorked(myDb.runCommand({
- findAndModify: collName,
- update: [{$set: {findAndModifyPipeline: 1}}],
- bypassDocumentValidation: true
- }));
- assert.eq(1, coll.count({findAndModifyPipeline: 1}));
+const dbName = 'bypass_document_validation';
+const collName = 'bypass_document_validation';
+const myDb = db.getSiblingDB(dbName);
+const coll = myDb[collName];
+
+/**
+ * Tests that we can bypass document validation when appropriate when a collection has validator
+ * 'validator', which should enforce the existence of a field "a".
+ */
+function runBypassDocumentValidationTest(validator) {
+ // Use majority write concern to clear the drop-pending that can cause lock conflicts with
+ // transactions.
+ coll.drop({writeConcern: {w: "majority"}});
+
+ // Insert documents into the collection that would not be valid before setting 'validator'.
+ assert.writeOK(coll.insert({_id: 1}));
+ assert.writeOK(coll.insert({_id: 2}));
+ assert.commandWorked(myDb.runCommand({collMod: collName, validator: validator}));
+
+ const isMongos = db.runCommand({isdbgrid: 1}).isdbgrid;
+ // Test applyOps with a simple insert if not on mongos.
+ if (!isMongos) {
+ const op = [{op: 'i', ns: coll.getFullName(), o: {_id: 9}}];
+ assertFailsValidation(myDb.runCommand({applyOps: op, bypassDocumentValidation: false}));
+ assert.eq(0, coll.count({_id: 9}));
+ assert.commandWorked(myDb.runCommand({applyOps: op, bypassDocumentValidation: true}));
+ assert.eq(1, coll.count({_id: 9}));
}
- // Run the test using a normal validator.
- runBypassDocumentValidationTest({a: {$exists: true}});
+ // Test doTxn with a simple insert if a replica set, not on mongos and the storage engine
+ // is WiredTiger.
+ if (FixtureHelpers.isReplSet(db) && !isMongos && isWiredTiger(db)) {
+ const session = db.getMongo().startSession();
+ const sessionDb = session.getDatabase(myDb.getName());
+ const op = [{op: 'i', ns: coll.getFullName(), o: {_id: 10}}];
+ assertFailsValidation(sessionDb.runCommand(
+ {doTxn: op, bypassDocumentValidation: false, txnNumber: NumberLong("0")}));
+ assert.eq(0, coll.count({_id: 10}));
+ assert.commandWorked(sessionDb.runCommand(
+ {doTxn: op, bypassDocumentValidation: true, txnNumber: NumberLong("1")}));
+ assert.eq(1, coll.count({_id: 10}));
+ }
- // Run the test again with an equivalent JSON Schema validator.
- runBypassDocumentValidationTest({$jsonSchema: {required: ['a']}});
+ // Test the aggregation command with a $out stage.
+ const outputCollName = 'bypass_output_coll';
+ const outputColl = myDb[outputCollName];
+ outputColl.drop();
+ assert.commandWorked(myDb.createCollection(outputCollName, {validator: validator}));
+ const pipeline =
+ [{$match: {_id: 1}}, {$project: {aggregation: {$add: [1]}}}, {$out: outputCollName}];
+ assert.throws(function() {
+ coll.aggregate(pipeline, {bypassDocumentValidation: false});
+ });
+ assert.eq(0, outputColl.count({aggregation: 1}));
+ coll.aggregate(pipeline, {bypassDocumentValidation: true});
+ assert.eq(1, outputColl.count({aggregation: 1}));
+
+ // Test the findAndModify command.
+ assert.throws(function() {
+ coll.findAndModify({update: {$set: {findAndModify: 1}}, bypassDocumentValidation: false});
+ });
+ assert.eq(0, coll.count({findAndModify: 1}));
+ coll.findAndModify({update: {$set: {findAndModify: 1}}, bypassDocumentValidation: true});
+ assert.eq(1, coll.count({findAndModify: 1}));
+
+ // Test the mapReduce command.
+ const map = function() {
+ emit(1, 1);
+ };
+ const reduce = function() {
+ return 'mapReduce';
+ };
+ let res = myDb.runCommand({
+ mapReduce: collName,
+ map: map,
+ reduce: reduce,
+ out: {replace: outputCollName},
+ bypassDocumentValidation: false
+ });
+ assertFailsValidation(res);
+ assert.eq(0, outputColl.count({value: 'mapReduce'}));
+ res = myDb.runCommand({
+ mapReduce: collName,
+ map: map,
+ reduce: reduce,
+ out: {replace: outputCollName},
+ bypassDocumentValidation: true
+ });
+ assert.commandWorked(res);
+ assert.eq(1, outputColl.count({value: 'mapReduce'}));
+
+ // Test the insert command. Includes a test for a document with no _id (SERVER-20859).
+ res = myDb.runCommand({insert: collName, documents: [{}], bypassDocumentValidation: false});
+ assertFailsValidation(BulkWriteResult(res));
+ res = myDb.runCommand(
+ {insert: collName, documents: [{}, {_id: 6}], bypassDocumentValidation: false});
+ assertFailsValidation(BulkWriteResult(res));
+ res = myDb.runCommand(
+ {insert: collName, documents: [{}, {_id: 6}], bypassDocumentValidation: true});
+ assert.writeOK(res);
+
+ // Test the update command.
+ res = myDb.runCommand({
+ update: collName,
+ updates: [{q: {}, u: {$set: {update: 1}}}],
+ bypassDocumentValidation: false
+ });
+ assertFailsValidation(BulkWriteResult(res));
+ assert.eq(0, coll.count({update: 1}));
+ res = myDb.runCommand({
+ update: collName,
+ updates: [{q: {}, u: {$set: {update: 1}}}],
+ bypassDocumentValidation: true
+ });
+ assert.writeOK(res);
+ assert.eq(1, coll.count({update: 1}));
+
+ // Pipeline-style update is only supported for commands and not for OP_UPDATE which cannot
+ // differentiate between an update object and an array.
+ res = myDb.runCommand({
+ update: collName,
+ updates: [{q: {}, u: [{$set: {pipeline: 1}}]}],
+ bypassDocumentValidation: false
+ });
+ assertFailsValidation(BulkWriteResult(res));
+ assert.eq(0, coll.count({pipeline: 1}));
+
+ assert.commandWorked(myDb.runCommand({
+ update: collName,
+ updates: [{q: {}, u: [{$set: {pipeline: 1}}]}],
+ bypassDocumentValidation: true
+ }));
+ assert.eq(1, coll.count({pipeline: 1}));
+
+ assert.commandFailed(myDb.runCommand({
+ findAndModify: collName,
+ update: [{$set: {findAndModifyPipeline: 1}}],
+ bypassDocumentValidation: false
+ }));
+ assert.eq(0, coll.count({findAndModifyPipeline: 1}));
+
+ assert.commandWorked(myDb.runCommand({
+ findAndModify: collName,
+ update: [{$set: {findAndModifyPipeline: 1}}],
+ bypassDocumentValidation: true
+ }));
+ assert.eq(1, coll.count({findAndModifyPipeline: 1}));
+}
+
+// Run the test using a normal validator.
+runBypassDocumentValidationTest({a: {$exists: true}});
+
+// Run the test again with an equivalent JSON Schema validator.
+runBypassDocumentValidationTest({$jsonSchema: {required: ['a']}});
})();
diff --git a/jstests/core/capped6.js b/jstests/core/capped6.js
index 9fffa2db7b3..393d8589a60 100644
--- a/jstests/core/capped6.js
+++ b/jstests/core/capped6.js
@@ -11,103 +11,103 @@
// uses_testing_only_commands,
// ]
(function() {
- var coll = db.capped6;
+var coll = db.capped6;
- Random.setRandomSeed();
- var maxDocuments = Random.randInt(400) + 100;
+Random.setRandomSeed();
+var maxDocuments = Random.randInt(400) + 100;
- /**
- * Check that documents in the collection are in order according to the value
- * of a, which corresponds to the insert order. This is a check that the oldest
- * document(s) is/are deleted when space is needed for the newest document. The
- * check is performed in both forward and reverse directions.
- */
- function checkOrder(i, valueArray) {
- res = coll.find().sort({$natural: -1});
- assert(res.hasNext(), "A");
- var j = i;
- while (res.hasNext()) {
- assert.eq(valueArray[j--].a, res.next().a, "B");
- }
+/**
+ * Check that documents in the collection are in order according to the value
+ * of a, which corresponds to the insert order. This is a check that the oldest
+ * document(s) is/are deleted when space is needed for the newest document. The
+ * check is performed in both forward and reverse directions.
+ */
+function checkOrder(i, valueArray) {
+ res = coll.find().sort({$natural: -1});
+ assert(res.hasNext(), "A");
+ var j = i;
+ while (res.hasNext()) {
+ assert.eq(valueArray[j--].a, res.next().a, "B");
+ }
- res = coll.find().sort({$natural: 1});
- assert(res.hasNext(), "C");
- while (res.hasNext()) {
- assert.eq(valueArray[++j].a, res.next().a, "D");
- }
- assert.eq(j, i, "E");
+ res = coll.find().sort({$natural: 1});
+ assert(res.hasNext(), "C");
+ while (res.hasNext()) {
+ assert.eq(valueArray[++j].a, res.next().a, "D");
}
+ assert.eq(j, i, "E");
+}
- /*
- * Prepare the values to insert and create the capped collection.
- */
- function prepareCollection(shouldReverse) {
- coll.drop();
- assert.commandWorked(db.createCollection("capped6", {capped: true, size: 1000}));
- var valueArray = new Array(maxDocuments);
- var c = "";
- for (i = 0; i < maxDocuments; ++i, c += "-") {
- // The a values are strings of increasing length.
- valueArray[i] = {a: c};
- }
- if (shouldReverse) {
- valueArray.reverse();
- }
- return valueArray;
+/*
+ * Prepare the values to insert and create the capped collection.
+ */
+function prepareCollection(shouldReverse) {
+ coll.drop();
+ assert.commandWorked(db.createCollection("capped6", {capped: true, size: 1000}));
+ var valueArray = new Array(maxDocuments);
+ var c = "";
+ for (i = 0; i < maxDocuments; ++i, c += "-") {
+ // The a values are strings of increasing length.
+ valueArray[i] = {a: c};
+ }
+ if (shouldReverse) {
+ valueArray.reverse();
}
+ return valueArray;
+}
- /**
- * 1. When this function is called the first time, insert new documents until 'maxDocuments'
- * number of documents have been inserted. Note that the collection may not have
- * 'maxDocuments' number of documents since it is a capped collection.
- * 2. Remove all but one documents via one or more "captrunc" requests.
- * 3. For each subsequent call to this function, keep track of the removed documents using
- * 'valueArrayIndexes' and re-insert the removed documents each time this function is
- * called.
- */
- function runCapTrunc(valueArray, valueArrayCurIndex, n, inc) {
- // If n <= 0, no documents are removed by captrunc.
- assert.gt(n, 0);
- assert.gte(valueArray.length, maxDocuments);
- for (var i = valueArrayCurIndex; i < maxDocuments; ++i) {
- assert.writeOK(coll.insert(valueArray[i]));
- }
- count = coll.count();
+/**
+ * 1. When this function is called the first time, insert new documents until 'maxDocuments'
+ * number of documents have been inserted. Note that the collection may not have
+ * 'maxDocuments' number of documents since it is a capped collection.
+ * 2. Remove all but one documents via one or more "captrunc" requests.
+ * 3. For each subsequent call to this function, keep track of the removed documents using
+ * 'valueArrayIndexes' and re-insert the removed documents each time this function is
+ * called.
+ */
+function runCapTrunc(valueArray, valueArrayCurIndex, n, inc) {
+ // If n <= 0, no documents are removed by captrunc.
+ assert.gt(n, 0);
+ assert.gte(valueArray.length, maxDocuments);
+ for (var i = valueArrayCurIndex; i < maxDocuments; ++i) {
+ assert.writeOK(coll.insert(valueArray[i]));
+ }
+ count = coll.count();
- // The index corresponding to the last document in the collection.
- valueArrayCurIndex = maxDocuments - 1;
+ // The index corresponding to the last document in the collection.
+ valueArrayCurIndex = maxDocuments - 1;
- // Number of times to call "captrunc" so that (count - 1) documents are removed
- // and at least 1 document is left in the array.
- var iterations = Math.floor((count - 1) / (n + inc));
+ // Number of times to call "captrunc" so that (count - 1) documents are removed
+ // and at least 1 document is left in the array.
+ var iterations = Math.floor((count - 1) / (n + inc));
- for (i = 0; i < iterations; ++i) {
- assert.commandWorked(db.runCommand({captrunc: "capped6", n: n, inc: inc}));
- count -= (n + inc);
- valueArrayCurIndex -= (n + inc);
- checkOrder(valueArrayCurIndex, valueArray);
- }
- // We return the index of the next document that should be inserted into the capped
- // collection, which would be the document after valueArrayCurIndex.
- return valueArrayCurIndex + 1;
+ for (i = 0; i < iterations; ++i) {
+ assert.commandWorked(db.runCommand({captrunc: "capped6", n: n, inc: inc}));
+ count -= (n + inc);
+ valueArrayCurIndex -= (n + inc);
+ checkOrder(valueArrayCurIndex, valueArray);
}
+ // We return the index of the next document that should be inserted into the capped
+ // collection, which would be the document after valueArrayCurIndex.
+ return valueArrayCurIndex + 1;
+}
- function doTest(shouldReverse) {
- var valueArray = prepareCollection(shouldReverse);
- var valueArrayIndex = 0;
- valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 1, false);
- valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 1, true);
- valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 16, true);
- valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 16, false);
- valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, maxDocuments - 2, true);
- valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, maxDocuments - 2, false);
- }
+function doTest(shouldReverse) {
+ var valueArray = prepareCollection(shouldReverse);
+ var valueArrayIndex = 0;
+ valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 1, false);
+ valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 1, true);
+ valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 16, true);
+ valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 16, false);
+ valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, maxDocuments - 2, true);
+ valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, maxDocuments - 2, false);
+}
- // Repeatedly add up to 'maxDocuments' documents and then truncate the newest
- // documents. Newer documents take up more space than older documents.
- doTest(false);
+// Repeatedly add up to 'maxDocuments' documents and then truncate the newest
+// documents. Newer documents take up more space than older documents.
+doTest(false);
- // Same test as above, but now the newer documents take less space than the
- // older documents instead of more.
- doTest(true);
+// Same test as above, but now the newer documents take less space than the
+// older documents instead of more.
+doTest(true);
})();
diff --git a/jstests/core/capped_queries_and_id_index.js b/jstests/core/capped_queries_and_id_index.js
index a10a4f60daf..1bf463f05ed 100644
--- a/jstests/core/capped_queries_and_id_index.js
+++ b/jstests/core/capped_queries_and_id_index.js
@@ -1,24 +1,24 @@
// Tests the behavior of querying or updating a capped collection with and without an _id index.
// @tags: [requires_capped]
(function() {
- "use strict";
- const coll = db.capped9;
- coll.drop();
+"use strict";
+const coll = db.capped9;
+coll.drop();
- assert.commandWorked(db.createCollection("capped9", {capped: true, size: 1024 * 50}));
+assert.commandWorked(db.createCollection("capped9", {capped: true, size: 1024 * 50}));
- assert.writeOK(coll.insert({_id: 1, x: 2, y: 3}));
+assert.writeOK(coll.insert({_id: 1, x: 2, y: 3}));
- assert.eq(1, coll.find({x: 2}).itcount());
- assert.eq(1, coll.find({y: 3}).itcount());
+assert.eq(1, coll.find({x: 2}).itcount());
+assert.eq(1, coll.find({y: 3}).itcount());
- // SERVER-3064 proposes making the following queries/updates by _id result in an error.
- assert.eq(1, coll.find({_id: 1}).itcount());
- assert.writeOK(coll.update({_id: 1}, {$set: {y: 4}}));
- assert.eq(4, coll.findOne().y);
+// SERVER-3064 proposes making the following queries/updates by _id result in an error.
+assert.eq(1, coll.find({_id: 1}).itcount());
+assert.writeOK(coll.update({_id: 1}, {$set: {y: 4}}));
+assert.eq(4, coll.findOne().y);
- assert.commandWorked(coll.createIndex({_id: 1}));
- assert.eq(1, coll.find({_id: 1}).itcount());
- assert.writeOK(coll.update({_id: 1}, {$set: {y: 5}}));
- assert.eq(5, coll.findOne().y);
+assert.commandWorked(coll.createIndex({_id: 1}));
+assert.eq(1, coll.find({_id: 1}).itcount());
+assert.writeOK(coll.update({_id: 1}, {$set: {y: 5}}));
+assert.eq(5, coll.findOne().y);
}());
diff --git a/jstests/core/capped_update.js b/jstests/core/capped_update.js
index b75ecb8243e..f11502b45fe 100644
--- a/jstests/core/capped_update.js
+++ b/jstests/core/capped_update.js
@@ -8,26 +8,26 @@
* ]
*/
(function() {
- 'use strict';
- var t = db.getSiblingDB("local").cannot_change_capped_size;
- t.drop();
- assert.commandWorked(
- t.getDB().createCollection(t.getName(), {capped: true, size: 1024, autoIndexId: false}));
- assert.eq(0, t.getIndexes().length, "the capped collection has indexes");
+'use strict';
+var t = db.getSiblingDB("local").cannot_change_capped_size;
+t.drop();
+assert.commandWorked(
+ t.getDB().createCollection(t.getName(), {capped: true, size: 1024, autoIndexId: false}));
+assert.eq(0, t.getIndexes().length, "the capped collection has indexes");
- for (var j = 1; j <= 10; j++) {
- assert.writeOK(t.insert({_id: j, s: "Hello, World!"}));
- }
+for (var j = 1; j <= 10; j++) {
+ assert.writeOK(t.insert({_id: j, s: "Hello, World!"}));
+}
- assert.writeOK(t.update({_id: 3}, {s: "Hello, Mongo!"})); // Mongo is same length as World
- assert.writeError(t.update({_id: 3}, {$set: {s: "Hello!"}}));
- assert.writeError(t.update({_id: 10}, {}));
- assert.writeError(t.update({_id: 10}, {s: "Hello, World!!!"}));
+assert.writeOK(t.update({_id: 3}, {s: "Hello, Mongo!"})); // Mongo is same length as World
+assert.writeError(t.update({_id: 3}, {$set: {s: "Hello!"}}));
+assert.writeError(t.update({_id: 10}, {}));
+assert.writeError(t.update({_id: 10}, {s: "Hello, World!!!"}));
- assert.commandWorked(t.getDB().runCommand({godinsert: t.getName(), obj: {a: 2}}));
- var doc = t.findOne({a: 2});
- assert.eq(undefined, doc["_id"], "now has _id after godinsert");
- assert.writeOK(t.update({a: 2}, {$inc: {a: 1}}));
- doc = t.findOne({a: 3});
- assert.eq(undefined, doc["_id"], "now has _id after update");
+assert.commandWorked(t.getDB().runCommand({godinsert: t.getName(), obj: {a: 2}}));
+var doc = t.findOne({a: 2});
+assert.eq(undefined, doc["_id"], "now has _id after godinsert");
+assert.writeOK(t.update({a: 2}, {$inc: {a: 1}}));
+doc = t.findOne({a: 3});
+assert.eq(undefined, doc["_id"], "now has _id after update");
})();
diff --git a/jstests/core/client_metadata_ismaster.js b/jstests/core/client_metadata_ismaster.js
index e5aa7d2547a..92a6b9cee2d 100644
--- a/jstests/core/client_metadata_ismaster.js
+++ b/jstests/core/client_metadata_ismaster.js
@@ -1,12 +1,11 @@
// Test that verifies client metadata behavior for isMaster
(function() {
- "use strict";
-
- // Verify that a isMaster request fails if it contains client metadata, and it is not first.
- // The shell sends isMaster on the first connection
- var result = db.runCommand({"isMaster": 1, "client": {"application": "foobar"}});
- assert.commandFailed(result);
- assert.eq(result.code, ErrorCodes.ClientMetadataCannotBeMutated, tojson(result));
+"use strict";
+// Verify that a isMaster request fails if it contains client metadata, and it is not first.
+// The shell sends isMaster on the first connection
+var result = db.runCommand({"isMaster": 1, "client": {"application": "foobar"}});
+assert.commandFailed(result);
+assert.eq(result.code, ErrorCodes.ClientMetadataCannotBeMutated, tojson(result));
})();
diff --git a/jstests/core/clone_as_capped_nonexistant.js b/jstests/core/clone_as_capped_nonexistant.js
index 1a87749002d..a4f8cf9787d 100644
--- a/jstests/core/clone_as_capped_nonexistant.js
+++ b/jstests/core/clone_as_capped_nonexistant.js
@@ -8,31 +8,30 @@
*/
(function() {
- "use strict";
- // This test ensures that CloneCollectionAsCapped()ing a nonexistent collection will not
- // cause the server to abort (SERVER-13750)
+"use strict";
+// This test ensures that CloneCollectionAsCapped()ing a nonexistent collection will not
+// cause the server to abort (SERVER-13750)
- var dbname = "clone_collection_as_capped_nonexistent";
- var testDb = db.getSiblingDB(dbname);
- testDb.dropDatabase();
+var dbname = "clone_collection_as_capped_nonexistent";
+var testDb = db.getSiblingDB(dbname);
+testDb.dropDatabase();
- // Database does not exist here
- var res = testDb.runCommand({cloneCollectionAsCapped: 'foo', toCollection: 'bar', size: 1024});
- assert.eq(res.ok, 0, "cloning a nonexistent collection to capped should not have worked");
- var isSharded = (db.isMaster().msg == "isdbgrid");
+// Database does not exist here
+var res = testDb.runCommand({cloneCollectionAsCapped: 'foo', toCollection: 'bar', size: 1024});
+assert.eq(res.ok, 0, "cloning a nonexistent collection to capped should not have worked");
+var isSharded = (db.isMaster().msg == "isdbgrid");
- assert.eq(
- res.errmsg,
- isSharded ? "no such cmd: cloneCollectionAsCapped" : "database " + dbname + " not found",
- "converting a nonexistent to capped failed but for the wrong reason");
+assert.eq(res.errmsg,
+ isSharded ? "no such cmd: cloneCollectionAsCapped" : "database " + dbname + " not found",
+ "converting a nonexistent to capped failed but for the wrong reason");
- // Database exists, but collection doesn't
- testDb.coll.insert({});
+// Database exists, but collection doesn't
+testDb.coll.insert({});
- var res = testDb.runCommand({cloneCollectionAsCapped: 'foo', toCollection: 'bar', size: 1024});
- assert.eq(res.ok, 0, "cloning a nonexistent collection to capped should not have worked");
- assert.eq(res.errmsg,
- isSharded ? "no such cmd: cloneCollectionAsCapped"
- : "source collection " + dbname + ".foo does not exist",
- "converting a nonexistent to capped failed but for the wrong reason");
+var res = testDb.runCommand({cloneCollectionAsCapped: 'foo', toCollection: 'bar', size: 1024});
+assert.eq(res.ok, 0, "cloning a nonexistent collection to capped should not have worked");
+assert.eq(res.errmsg,
+ isSharded ? "no such cmd: cloneCollectionAsCapped"
+ : "source collection " + dbname + ".foo does not exist",
+ "converting a nonexistent to capped failed but for the wrong reason");
}());
diff --git a/jstests/core/collation.js b/jstests/core/collation.js
index 22e8adf4c06..99623d18b7e 100644
--- a/jstests/core/collation.js
+++ b/jstests/core/collation.js
@@ -9,95 +9,135 @@
// Integration tests for the collation feature.
(function() {
- 'use strict';
-
- load("jstests/libs/analyze_plan.js");
- load("jstests/libs/get_index_helpers.js");
- // For isWiredTiger.
- load("jstests/concurrency/fsm_workload_helpers/server_types.js");
- // For isReplSet
- load("jstests/libs/fixture_helpers.js");
-
- var coll = db.collation;
- coll.drop();
-
- var explainRes;
- var writeRes;
- var planStage;
-
- var isMaster = db.runCommand("ismaster");
- assert.commandWorked(isMaster);
- var isMongos = (isMaster.msg === "isdbgrid");
-
- var assertIndexHasCollation = function(keyPattern, collation) {
- var indexSpecs = coll.getIndexes();
- var found = GetIndexHelpers.findByKeyPattern(indexSpecs, keyPattern, collation);
- assert.neq(null,
- found,
- "Index with key pattern " + tojson(keyPattern) + " and collation " +
- tojson(collation) + " not found: " + tojson(indexSpecs));
- };
-
- var getQueryCollation = function(explainRes) {
- if (explainRes.queryPlanner.hasOwnProperty("collation")) {
- return explainRes.queryPlanner.collation;
- }
-
- if (explainRes.queryPlanner.winningPlan.hasOwnProperty("shards") &&
- explainRes.queryPlanner.winningPlan.shards.length > 0 &&
- explainRes.queryPlanner.winningPlan.shards[0].hasOwnProperty("collation")) {
- return explainRes.queryPlanner.winningPlan.shards[0].collation;
- }
-
- return null;
- };
-
- //
- // Test using db.createCollection() to make a collection with a default collation.
- //
-
- // Attempting to create a collection with an invalid collation should fail.
- assert.commandFailed(db.createCollection("collation", {collation: "not an object"}));
- assert.commandFailed(db.createCollection("collation", {collation: {}}));
- assert.commandFailed(db.createCollection("collation", {collation: {blah: 1}}));
- assert.commandFailed(db.createCollection("collation", {collation: {locale: "en", blah: 1}}));
- assert.commandFailed(db.createCollection("collation", {collation: {locale: "xx"}}));
- assert.commandFailed(
- db.createCollection("collation", {collation: {locale: "en", strength: 99}}));
-
- // Attempting to create a collection whose collation version does not match the collator version
- // produced by ICU should result in failure with a special error code.
- assert.commandFailedWithCode(
- db.createCollection("collation", {collation: {locale: "en", version: "unknownVersion"}}),
- ErrorCodes.IncompatibleCollationVersion);
-
- // Ensure we can create a collection with the "simple" collation as the collection default.
- assert.commandWorked(db.createCollection("collation", {collation: {locale: "simple"}}));
- var collectionInfos = db.getCollectionInfos({name: "collation"});
- assert.eq(collectionInfos.length, 1);
- assert(!collectionInfos[0].options.hasOwnProperty("collation"));
- coll.drop();
+'use strict';
+
+load("jstests/libs/analyze_plan.js");
+load("jstests/libs/get_index_helpers.js");
+// For isWiredTiger.
+load("jstests/concurrency/fsm_workload_helpers/server_types.js");
+// For isReplSet
+load("jstests/libs/fixture_helpers.js");
+
+var coll = db.collation;
+coll.drop();
+
+var explainRes;
+var writeRes;
+var planStage;
+
+var isMaster = db.runCommand("ismaster");
+assert.commandWorked(isMaster);
+var isMongos = (isMaster.msg === "isdbgrid");
+
+var assertIndexHasCollation = function(keyPattern, collation) {
+ var indexSpecs = coll.getIndexes();
+ var found = GetIndexHelpers.findByKeyPattern(indexSpecs, keyPattern, collation);
+ assert.neq(null,
+ found,
+ "Index with key pattern " + tojson(keyPattern) + " and collation " +
+ tojson(collation) + " not found: " + tojson(indexSpecs));
+};
+
+var getQueryCollation = function(explainRes) {
+ if (explainRes.queryPlanner.hasOwnProperty("collation")) {
+ return explainRes.queryPlanner.collation;
+ }
- // Ensure that we populate all collation-related fields when we create a collection with a valid
- // collation.
- assert.commandWorked(db.createCollection("collation", {collation: {locale: "fr_CA"}}));
- var collectionInfos = db.getCollectionInfos({name: "collation"});
- assert.eq(collectionInfos.length, 1);
- assert.eq(collectionInfos[0].options.collation, {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true,
- version: "57.1",
- });
+ if (explainRes.queryPlanner.winningPlan.hasOwnProperty("shards") &&
+ explainRes.queryPlanner.winningPlan.shards.length > 0 &&
+ explainRes.queryPlanner.winningPlan.shards[0].hasOwnProperty("collation")) {
+ return explainRes.queryPlanner.winningPlan.shards[0].collation;
+ }
- // Ensure that an index with no collation inherits the collection-default collation.
- assert.commandWorked(coll.ensureIndex({a: 1}));
+ return null;
+};
+
+//
+// Test using db.createCollection() to make a collection with a default collation.
+//
+
+// Attempting to create a collection with an invalid collation should fail.
+assert.commandFailed(db.createCollection("collation", {collation: "not an object"}));
+assert.commandFailed(db.createCollection("collation", {collation: {}}));
+assert.commandFailed(db.createCollection("collation", {collation: {blah: 1}}));
+assert.commandFailed(db.createCollection("collation", {collation: {locale: "en", blah: 1}}));
+assert.commandFailed(db.createCollection("collation", {collation: {locale: "xx"}}));
+assert.commandFailed(db.createCollection("collation", {collation: {locale: "en", strength: 99}}));
+
+// Attempting to create a collection whose collation version does not match the collator version
+// produced by ICU should result in failure with a special error code.
+assert.commandFailedWithCode(
+ db.createCollection("collation", {collation: {locale: "en", version: "unknownVersion"}}),
+ ErrorCodes.IncompatibleCollationVersion);
+
+// Ensure we can create a collection with the "simple" collation as the collection default.
+assert.commandWorked(db.createCollection("collation", {collation: {locale: "simple"}}));
+var collectionInfos = db.getCollectionInfos({name: "collation"});
+assert.eq(collectionInfos.length, 1);
+assert(!collectionInfos[0].options.hasOwnProperty("collation"));
+coll.drop();
+
+// Ensure that we populate all collation-related fields when we create a collection with a valid
+// collation.
+assert.commandWorked(db.createCollection("collation", {collation: {locale: "fr_CA"}}));
+var collectionInfos = db.getCollectionInfos({name: "collation"});
+assert.eq(collectionInfos.length, 1);
+assert.eq(collectionInfos[0].options.collation, {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true,
+ version: "57.1",
+});
+
+// Ensure that an index with no collation inherits the collection-default collation.
+assert.commandWorked(coll.ensureIndex({a: 1}));
+assertIndexHasCollation({a: 1}, {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true,
+ version: "57.1",
+});
+
+// Ensure that an index which specifies an overriding collation does not use the collection
+// default.
+assert.commandWorked(coll.ensureIndex({b: 1}, {collation: {locale: "en_US"}}));
+assertIndexHasCollation({b: 1}, {
+ locale: "en_US",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: false,
+ version: "57.1",
+});
+
+// Ensure that an index which specifies the "simple" collation as an overriding collation still
+// does not use the collection default.
+assert.commandWorked(coll.ensureIndex({d: 1}, {collation: {locale: "simple"}}));
+assertIndexHasCollation({d: 1}, {locale: "simple"});
+
+// Ensure that a v=1 index doesn't inherit the collection-default collation.
+assert.commandWorked(coll.ensureIndex({c: 1}, {v: 1}));
+assertIndexHasCollation({c: 1}, {locale: "simple"});
+
+// Test that all indexes retain their current collation when the collection is re-indexed.
+if (!isMongos) {
+ assert.commandWorked(coll.reIndex());
assertIndexHasCollation({a: 1}, {
locale: "fr_CA",
caseLevel: false,
@@ -110,10 +150,6 @@
backwards: true,
version: "57.1",
});
-
- // Ensure that an index which specifies an overriding collation does not use the collection
- // default.
- assert.commandWorked(coll.ensureIndex({b: 1}, {collation: {locale: "en_US"}}));
assertIndexHasCollation({b: 1}, {
locale: "en_US",
caseLevel: false,
@@ -126,1911 +162,1839 @@
backwards: false,
version: "57.1",
});
-
- // Ensure that an index which specifies the "simple" collation as an overriding collation still
- // does not use the collection default.
- assert.commandWorked(coll.ensureIndex({d: 1}, {collation: {locale: "simple"}}));
assertIndexHasCollation({d: 1}, {locale: "simple"});
-
- // Ensure that a v=1 index doesn't inherit the collection-default collation.
- assert.commandWorked(coll.ensureIndex({c: 1}, {v: 1}));
assertIndexHasCollation({c: 1}, {locale: "simple"});
-
- // Test that all indexes retain their current collation when the collection is re-indexed.
- if (!isMongos) {
- assert.commandWorked(coll.reIndex());
- assertIndexHasCollation({a: 1}, {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true,
- version: "57.1",
- });
- assertIndexHasCollation({b: 1}, {
- locale: "en_US",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: false,
- version: "57.1",
- });
- assertIndexHasCollation({d: 1}, {locale: "simple"});
- assertIndexHasCollation({c: 1}, {locale: "simple"});
- }
-
- coll.drop();
-
- //
- // Creating an index with a collation.
- //
-
- // Attempting to build an index with an invalid collation should fail.
- assert.commandFailed(coll.ensureIndex({a: 1}, {collation: "not an object"}));
- assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {}}));
- assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {blah: 1}}));
- assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {locale: "en", blah: 1}}));
- assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {locale: "xx"}}));
- assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {locale: "en", strength: 99}}));
-
- // Attempting to create an index whose collation version does not match the collator version
- // produced by ICU should result in failure with a special error code.
- assert.commandFailedWithCode(
- coll.ensureIndex({a: 1}, {collation: {locale: "en", version: "unknownVersion"}}),
- ErrorCodes.IncompatibleCollationVersion);
-
- assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}}));
- assertIndexHasCollation({a: 1}, {
- locale: "en_US",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: false,
- version: "57.1",
- });
-
- assert.commandWorked(coll.createIndex({b: 1}, {collation: {locale: "en_US"}}));
- assertIndexHasCollation({b: 1}, {
- locale: "en_US",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: false,
- version: "57.1",
- });
-
- assert.commandWorked(coll.createIndexes([{c: 1}, {d: 1}], {collation: {locale: "fr_CA"}}));
- assertIndexHasCollation({c: 1}, {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true,
- version: "57.1",
- });
- assertIndexHasCollation({d: 1}, {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true,
- version: "57.1",
- });
-
- assert.commandWorked(coll.createIndexes([{e: 1}], {collation: {locale: "simple"}}));
- assertIndexHasCollation({e: 1}, {locale: "simple"});
-
- // Test that an index with a non-simple collation contains collator-generated comparison keys
- // rather than the verbatim indexed strings.
- if (db.getMongo().useReadCommands()) {
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}}));
- assert.commandWorked(coll.createIndex({b: 1}));
- assert.writeOK(coll.insert({a: "foo", b: "foo"}));
- assert.eq(1, coll.find().collation({locale: "fr_CA"}).hint({a: 1}).returnKey().itcount());
- assert.neq("foo",
- coll.find().collation({locale: "fr_CA"}).hint({a: 1}).returnKey().next().a);
- assert.eq(1, coll.find().collation({locale: "fr_CA"}).hint({b: 1}).returnKey().itcount());
- assert.eq("foo",
- coll.find().collation({locale: "fr_CA"}).hint({b: 1}).returnKey().next().b);
- }
-
- // Test that a query with a string comparison can use an index with a non-simple collation if it
- // has a matching collation.
- if (db.getMongo().useReadCommands()) {
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}}));
-
- // Query has simple collation, but index has fr_CA collation.
- explainRes = coll.find({a: "foo"}).explain();
- assert.commandWorked(explainRes);
- assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "COLLSCAN"));
-
- // Query has en_US collation, but index has fr_CA collation.
- explainRes = coll.find({a: "foo"}).collation({locale: "en_US"}).explain();
- assert.commandWorked(explainRes);
- assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "COLLSCAN"));
-
- // Matching collations.
- explainRes = coll.find({a: "foo"}).collation({locale: "fr_CA"}).explain();
- assert.commandWorked(explainRes);
- assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN"));
- }
-
- // Should not be possible to create a text index with an explicit non-simple collation.
+}
+
+coll.drop();
+
+//
+// Creating an index with a collation.
+//
+
+// Attempting to build an index with an invalid collation should fail.
+assert.commandFailed(coll.ensureIndex({a: 1}, {collation: "not an object"}));
+assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {}}));
+assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {blah: 1}}));
+assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {locale: "en", blah: 1}}));
+assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {locale: "xx"}}));
+assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {locale: "en", strength: 99}}));
+
+// Attempting to create an index whose collation version does not match the collator version
+// produced by ICU should result in failure with a special error code.
+assert.commandFailedWithCode(
+ coll.ensureIndex({a: 1}, {collation: {locale: "en", version: "unknownVersion"}}),
+ ErrorCodes.IncompatibleCollationVersion);
+
+assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}}));
+assertIndexHasCollation({a: 1}, {
+ locale: "en_US",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: false,
+ version: "57.1",
+});
+
+assert.commandWorked(coll.createIndex({b: 1}, {collation: {locale: "en_US"}}));
+assertIndexHasCollation({b: 1}, {
+ locale: "en_US",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: false,
+ version: "57.1",
+});
+
+assert.commandWorked(coll.createIndexes([{c: 1}, {d: 1}], {collation: {locale: "fr_CA"}}));
+assertIndexHasCollation({c: 1}, {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true,
+ version: "57.1",
+});
+assertIndexHasCollation({d: 1}, {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true,
+ version: "57.1",
+});
+
+assert.commandWorked(coll.createIndexes([{e: 1}], {collation: {locale: "simple"}}));
+assertIndexHasCollation({e: 1}, {locale: "simple"});
+
+// Test that an index with a non-simple collation contains collator-generated comparison keys
+// rather than the verbatim indexed strings.
+if (db.getMongo().useReadCommands()) {
coll.drop();
- assert.commandFailed(coll.createIndex({a: "text"}, {collation: {locale: "en"}}));
-
- // Text index builds which inherit a non-simple default collation should fail.
+ assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}}));
+ assert.commandWorked(coll.createIndex({b: 1}));
+ assert.writeOK(coll.insert({a: "foo", b: "foo"}));
+ assert.eq(1, coll.find().collation({locale: "fr_CA"}).hint({a: 1}).returnKey().itcount());
+ assert.neq("foo", coll.find().collation({locale: "fr_CA"}).hint({a: 1}).returnKey().next().a);
+ assert.eq(1, coll.find().collation({locale: "fr_CA"}).hint({b: 1}).returnKey().itcount());
+ assert.eq("foo", coll.find().collation({locale: "fr_CA"}).hint({b: 1}).returnKey().next().b);
+}
+
+// Test that a query with a string comparison can use an index with a non-simple collation if it
+// has a matching collation.
+if (db.getMongo().useReadCommands()) {
coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en"}}));
- assert.commandFailed(coll.createIndex({a: "text"}));
+ assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}}));
- // Text index build should succeed on a collection with a non-simple default collation if it
- // explicitly overrides the default with {locale: "simple"}.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en"}}));
- assert.commandWorked(coll.createIndex({a: "text"}, {collation: {locale: "simple"}}));
+ // Query has simple collation, but index has fr_CA collation.
+ explainRes = coll.find({a: "foo"}).explain();
+ assert.commandWorked(explainRes);
+ assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "COLLSCAN"));
- //
- // Collation tests for aggregation.
- //
+ // Query has en_US collation, but index has fr_CA collation.
+ explainRes = coll.find({a: "foo"}).collation({locale: "en_US"}).explain();
+ assert.commandWorked(explainRes);
+ assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "COLLSCAN"));
- // Aggregation should return correct results when collation specified and collection does not
+ // Matching collations.
+ explainRes = coll.find({a: "foo"}).collation({locale: "fr_CA"}).explain();
+ assert.commandWorked(explainRes);
+ assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN"));
+}
+
+// Should not be possible to create a text index with an explicit non-simple collation.
+coll.drop();
+assert.commandFailed(coll.createIndex({a: "text"}, {collation: {locale: "en"}}));
+
+// Text index builds which inherit a non-simple default collation should fail.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en"}}));
+assert.commandFailed(coll.createIndex({a: "text"}));
+
+// Text index build should succeed on a collection with a non-simple default collation if it
+// explicitly overrides the default with {locale: "simple"}.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en"}}));
+assert.commandWorked(coll.createIndex({a: "text"}, {collation: {locale: "simple"}}));
+
+//
+// Collation tests for aggregation.
+//
+
+// Aggregation should return correct results when collation specified and collection does not
+// exist.
+coll.drop();
+assert.eq(0, coll.aggregate([], {collation: {locale: "fr"}}).itcount());
+
+// Aggregation should return correct results when collation specified and collection does exist.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "bar"}));
+assert.eq(0, coll.aggregate([{$match: {str: "FOO"}}]).itcount());
+assert.eq(1,
+ coll.aggregate([{$match: {str: "FOO"}}], {collation: {locale: "en_US", strength: 2}})
+ .itcount());
+
+// Aggregation should return correct results when no collation specified and collection has a
+// default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({str: "foo"}));
+assert.eq(1, coll.aggregate([{$match: {str: "FOO"}}]).itcount());
+
+// Aggregation should return correct results when "simple" collation specified and collection
+// has a default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({str: "foo"}));
+assert.eq(0, coll.aggregate([{$match: {str: "FOO"}}], {collation: {locale: "simple"}}).itcount());
+
+// Aggregation should select compatible index when no collation specified and collection has a
+// default collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
+assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}}));
+var explain = coll.explain("queryPlanner").aggregate([{$match: {a: "foo"}}]);
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+
+// Aggregation should not use index when no collation specified and collection default
+// collation is incompatible with index collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
+assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}}));
+var explain = coll.explain("queryPlanner").aggregate([{$match: {a: "foo"}}]);
+assert(isCollscan(db, explain.queryPlanner.winningPlan));
+
+// Explain of aggregation with collation should succeed.
+assert.commandWorked(coll.explain().aggregate([], {collation: {locale: "fr"}}));
+
+//
+// Collation tests for count.
+//
+
+// Count should return correct results when collation specified and collection does not exist.
+coll.drop();
+assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).count());
+
+// Count should return correct results when collation specified and collection does exist.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "bar"}));
+assert.eq(0, coll.find({str: "FOO"}).count());
+assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).count());
+assert.eq(1, coll.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).count());
+assert.eq(0, coll.count({str: "FOO"}));
+assert.eq(0, coll.count({str: "FOO"}, {collation: {locale: "en_US"}}));
+assert.eq(1, coll.count({str: "FOO"}, {collation: {locale: "en_US", strength: 2}}));
+
+// Count should return correct results when no collation specified and collection has a default
+// collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({str: "foo"}));
+assert.eq(1, coll.find({str: "FOO"}).count());
+
+// Count should return correct results when "simple" collation specified and collection has a
+// default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({str: "foo"}));
+assert.eq(0, coll.find({str: "FOO"}).collation({locale: "simple"}).count());
+
+// Count should return correct results when collation specified and when run with explain.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "bar"}));
+explainRes = coll.explain("executionStats").find({str: "FOO"}).collation({locale: "en_US"}).count();
+assert.commandWorked(explainRes);
+planStage = getPlanStage(explainRes.executionStats.executionStages, "COLLSCAN");
+assert.neq(null, planStage);
+assert.eq(0, planStage.advanced);
+explainRes = coll.explain("executionStats")
+ .find({str: "FOO"})
+ .collation({locale: "en_US", strength: 2})
+ .count();
+assert.commandWorked(explainRes);
+planStage = getPlanStage(explainRes.executionStats.executionStages, "COLLSCAN");
+assert.neq(null, planStage);
+assert.eq(1, planStage.advanced);
+
+// Explain of COUNT_SCAN stage should include index collation.
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}}));
+explainRes = coll.explain("executionStats").find({a: 5}).count();
+assert.commandWorked(explainRes);
+planStage = getPlanStage(explainRes.executionStats.executionStages, "COUNT_SCAN");
+assert.neq(null, planStage);
+assert.eq(planStage.collation, {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true,
+ version: "57.1",
+});
+
+// Explain of COUNT_SCAN stage should include index collation when index collation is
+// inherited from collection default.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}}));
+assert.commandWorked(coll.createIndex({a: 1}));
+explainRes = coll.explain("executionStats").find({a: 5}).count();
+assert.commandWorked(explainRes);
+planStage = getPlanStage(explainRes.executionStats.executionStages, "COUNT_SCAN");
+assert.neq(null, planStage);
+assert.eq(planStage.collation, {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true,
+ version: "57.1",
+});
+
+// Should be able to use COUNT_SCAN for queries over strings.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}}));
+assert.commandWorked(coll.createIndex({a: 1}));
+explainRes = coll.explain("executionStats").find({a: "foo"}).count();
+assert.commandWorked(explainRes);
+assert(planHasStage(db, explainRes.executionStats.executionStages, "COUNT_SCAN"));
+assert(!planHasStage(db, explainRes.executionStats.executionStages, "FETCH"));
+
+//
+// Collation tests for distinct.
+//
+
+// Distinct should return correct results when collation specified and collection does not
+// exist.
+coll.drop();
+assert.eq(0, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}}).length);
+
+// Distinct should return correct results when collation specified and no indexes exist.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "FOO"}));
+var res = coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}});
+assert.eq(1, res.length);
+assert.eq("foo", res[0].toLowerCase());
+assert.eq(2, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 3}}).length);
+assert.eq(2,
+ coll.distinct("_id", {str: "foo"}, {collation: {locale: "en_US", strength: 2}}).length);
+
+// Distinct should return correct results when collation specified and compatible index exists.
+coll.createIndex({str: 1}, {collation: {locale: "en_US", strength: 2}});
+res = coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}});
+assert.eq(1, res.length);
+assert.eq("foo", res[0].toLowerCase());
+assert.eq(2, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 3}}).length);
+
+// Distinct should return correct results when no collation specified and collection has a
+// default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({str: "foo"}));
+assert.writeOK(coll.insert({str: "FOO"}));
+assert.eq(1, coll.distinct("str").length);
+assert.eq(2, coll.distinct("_id", {str: "foo"}).length);
+
+// Distinct should return correct results when "simple" collation specified and collection has a
+// default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({str: "foo"}));
+assert.writeOK(coll.insert({str: "FOO"}));
+assert.eq(2, coll.distinct("str", {}, {collation: {locale: "simple"}}).length);
+assert.eq(1, coll.distinct("_id", {str: "foo"}, {collation: {locale: "simple"}}).length);
+
+// Distinct should select compatible index when no collation specified and collection has a
+// default collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
+assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}}));
+var explain = coll.explain("queryPlanner").distinct("a");
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
+
+// Distinct scan on strings can be used over an index with a collation when the predicate has
+// exact bounds.
+explain = coll.explain("queryPlanner").distinct("a", {a: {$gt: "foo"}});
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
+assert(!planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
+
+// Distinct scan cannot be used over an index with a collation when the predicate has inexact
+// bounds.
+explain = coll.explain("queryPlanner").distinct("a", {a: {$exists: true}});
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
+assert(!planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+
+// Distinct scan can be used without a fetch when predicate has exact non-string bounds.
+explain = coll.explain("queryPlanner").distinct("a", {a: {$gt: 3}});
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
+assert(!planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
+
+// Distinct should not use index when no collation specified and collection default collation is
+// incompatible with index collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
+assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}}));
+var explain = coll.explain("queryPlanner").distinct("a");
+assert(isCollscan(db, explain.queryPlanner.winningPlan));
+
+// Explain of DISTINCT_SCAN stage should include index collation.
+coll.drop();
+assert.commandWorked(coll.createIndex({str: 1}, {collation: {locale: "fr_CA"}}));
+explainRes = coll.explain("executionStats").distinct("str", {}, {collation: {locale: "fr_CA"}});
+assert.commandWorked(explainRes);
+planStage = getPlanStage(explainRes.executionStats.executionStages, "DISTINCT_SCAN");
+assert.neq(null, planStage);
+assert.eq(planStage.collation, {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true,
+ version: "57.1",
+});
+
+// Explain of DISTINCT_SCAN stage should include index collation when index collation is
+// inherited from collection default.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}}));
+assert.commandWorked(coll.createIndex({str: 1}));
+explainRes = coll.explain("executionStats").distinct("str");
+assert.commandWorked(explainRes);
+planStage = getPlanStage(explainRes.executionStats.executionStages, "DISTINCT_SCAN");
+assert.neq(null, planStage);
+assert.eq(planStage.collation, {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true,
+ version: "57.1",
+});
+
+//
+// Collation tests for find.
+//
+
+if (db.getMongo().useReadCommands()) {
+ // Find should return correct results when collation specified and collection does not
// exist.
coll.drop();
- assert.eq(0, coll.aggregate([], {collation: {locale: "fr"}}).itcount());
+ assert.eq(0, coll.find({_id: "FOO"}).collation({locale: "en_US"}).itcount());
- // Aggregation should return correct results when collation specified and collection does exist.
+ // Find should return correct results when collation specified and filter is a match on _id.
coll.drop();
assert.writeOK(coll.insert({_id: 1, str: "foo"}));
assert.writeOK(coll.insert({_id: 2, str: "bar"}));
- assert.eq(0, coll.aggregate([{$match: {str: "FOO"}}]).itcount());
+ assert.writeOK(coll.insert({_id: "foo"}));
+ assert.eq(0, coll.find({_id: "FOO"}).itcount());
+ assert.eq(0, coll.find({_id: "FOO"}).collation({locale: "en_US"}).itcount());
+ assert.eq(1, coll.find({_id: "FOO"}).collation({locale: "en_US", strength: 2}).itcount());
+ assert.writeOK(coll.remove({_id: "foo"}));
+
+ // Find should return correct results when collation specified and no indexes exist.
+ assert.eq(0, coll.find({str: "FOO"}).itcount());
+ assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).itcount());
+ assert.eq(1, coll.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).itcount());
assert.eq(1,
- coll.aggregate([{$match: {str: "FOO"}}], {collation: {locale: "en_US", strength: 2}})
- .itcount());
-
- // Aggregation should return correct results when no collation specified and collection has a
- // default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({str: "foo"}));
- assert.eq(1, coll.aggregate([{$match: {str: "FOO"}}]).itcount());
-
- // Aggregation should return correct results when "simple" collation specified and collection
- // has a default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({str: "foo"}));
- assert.eq(0,
- coll.aggregate([{$match: {str: "FOO"}}], {collation: {locale: "simple"}}).itcount());
-
- // Aggregation should select compatible index when no collation specified and collection has a
- // default collation.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}}));
- var explain = coll.explain("queryPlanner").aggregate([{$match: {a: "foo"}}]);
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
-
- // Aggregation should not use index when no collation specified and collection default
- // collation is incompatible with index collation.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}}));
- var explain = coll.explain("queryPlanner").aggregate([{$match: {a: "foo"}}]);
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
-
- // Explain of aggregation with collation should succeed.
- assert.commandWorked(coll.explain().aggregate([], {collation: {locale: "fr"}}));
-
- //
- // Collation tests for count.
- //
-
- // Count should return correct results when collation specified and collection does not exist.
- coll.drop();
- assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).count());
-
- // Count should return correct results when collation specified and collection does exist.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "bar"}));
- assert.eq(0, coll.find({str: "FOO"}).count());
- assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).count());
- assert.eq(1, coll.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).count());
- assert.eq(0, coll.count({str: "FOO"}));
- assert.eq(0, coll.count({str: "FOO"}, {collation: {locale: "en_US"}}));
- assert.eq(1, coll.count({str: "FOO"}, {collation: {locale: "en_US", strength: 2}}));
-
- // Count should return correct results when no collation specified and collection has a default
- // collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({str: "foo"}));
- assert.eq(1, coll.find({str: "FOO"}).count());
-
- // Count should return correct results when "simple" collation specified and collection has a
- // default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({str: "foo"}));
- assert.eq(0, coll.find({str: "FOO"}).collation({locale: "simple"}).count());
-
- // Count should return correct results when collation specified and when run with explain.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "bar"}));
- explainRes =
- coll.explain("executionStats").find({str: "FOO"}).collation({locale: "en_US"}).count();
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "COLLSCAN");
- assert.neq(null, planStage);
- assert.eq(0, planStage.advanced);
- explainRes = coll.explain("executionStats")
- .find({str: "FOO"})
- .collation({locale: "en_US", strength: 2})
- .count();
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "COLLSCAN");
- assert.neq(null, planStage);
- assert.eq(1, planStage.advanced);
+ coll.find({str: {$ne: "FOO"}}).collation({locale: "en_US", strength: 2}).itcount());
- // Explain of COUNT_SCAN stage should include index collation.
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}}));
- explainRes = coll.explain("executionStats").find({a: 5}).count();
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "COUNT_SCAN");
- assert.neq(null, planStage);
- assert.eq(planStage.collation, {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true,
- version: "57.1",
- });
+ // Find should return correct results when collation specified and compatible index exists.
+ assert.commandWorked(coll.ensureIndex({str: 1}, {collation: {locale: "en_US", strength: 2}}));
+ assert.eq(0, coll.find({str: "FOO"}).hint({str: 1}).itcount());
+ assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).hint({str: 1}).itcount());
+ assert.eq(
+ 1,
+ coll.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).hint({str: 1}).itcount());
+ assert.eq(1,
+ coll.find({str: {$ne: "FOO"}})
+ .collation({locale: "en_US", strength: 2})
+ .hint({str: 1})
+ .itcount());
+ assert.commandWorked(coll.dropIndexes());
- // Explain of COUNT_SCAN stage should include index collation when index collation is
- // inherited from collection default.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}}));
- assert.commandWorked(coll.createIndex({a: 1}));
- explainRes = coll.explain("executionStats").find({a: 5}).count();
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "COUNT_SCAN");
- assert.neq(null, planStage);
- assert.eq(planStage.collation, {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true,
- version: "57.1",
- });
+ // Find should return correct results when collation specified and compatible partial index
+ // exists.
+ assert.commandWorked(coll.ensureIndex({str: 1}, {
+ partialFilterExpression: {str: {$lte: "FOO"}},
+ collation: {locale: "en_US", strength: 2}
+ }));
+ assert.eq(
+ 1,
+ coll.find({str: "foo"}).collation({locale: "en_US", strength: 2}).hint({str: 1}).itcount());
+ assert.writeOK(coll.insert({_id: 3, str: "goo"}));
+ assert.eq(
+ 0,
+ coll.find({str: "goo"}).collation({locale: "en_US", strength: 2}).hint({str: 1}).itcount());
+ assert.writeOK(coll.remove({_id: 3}));
+ assert.commandWorked(coll.dropIndexes());
- // Should be able to use COUNT_SCAN for queries over strings.
+ // Queries that use a index with a non-matching collation should add a sort
+ // stage if needed.
coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}}));
- assert.commandWorked(coll.createIndex({a: 1}));
- explainRes = coll.explain("executionStats").find({a: "foo"}).count();
- assert.commandWorked(explainRes);
- assert(planHasStage(db, explainRes.executionStats.executionStages, "COUNT_SCAN"));
- assert(!planHasStage(db, explainRes.executionStats.executionStages, "FETCH"));
+ assert.writeOK(coll.insert([{a: "A"}, {a: "B"}, {a: "b"}, {a: "a"}]));
- //
- // Collation tests for distinct.
- //
-
- // Distinct should return correct results when collation specified and collection does not
- // exist.
- coll.drop();
- assert.eq(0, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}}).length);
+ // Ensure results from an index that doesn't match the query collation are sorted to match
+ // the requested collation.
+ assert.commandWorked(coll.ensureIndex({a: 1}));
+ var res =
+ coll.find({a: {'$exists': true}}, {_id: 0}).collation({locale: "en_US", strength: 3}).sort({
+ a: 1
+ });
+ assert.eq(res.toArray(), [{a: "a"}, {a: "A"}, {a: "b"}, {a: "B"}]);
- // Distinct should return correct results when collation specified and no indexes exist.
+ // Find should return correct results when collation specified and query contains $expr.
coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "FOO"}));
- var res = coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}});
- assert.eq(1, res.length);
- assert.eq("foo", res[0].toLowerCase());
- assert.eq(2, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 3}}).length);
+ assert.writeOK(coll.insert([{a: "A"}, {a: "B"}]));
assert.eq(
- 2, coll.distinct("_id", {str: "foo"}, {collation: {locale: "en_US", strength: 2}}).length);
-
- // Distinct should return correct results when collation specified and compatible index exists.
- coll.createIndex({str: 1}, {collation: {locale: "en_US", strength: 2}});
- res = coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}});
- assert.eq(1, res.length);
- assert.eq("foo", res[0].toLowerCase());
- assert.eq(2, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 3}}).length);
-
- // Distinct should return correct results when no collation specified and collection has a
+ 1,
+ coll.find({$expr: {$eq: ["$a", "a"]}}).collation({locale: "en_US", strength: 2}).itcount());
+}
+
+// Find should return correct results when no collation specified and collection has a default
+// collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({str: "foo"}));
+assert.writeOK(coll.insert({str: "FOO"}));
+assert.writeOK(coll.insert({str: "bar"}));
+assert.eq(3, coll.find({str: {$in: ["foo", "bar"]}}).itcount());
+assert.eq(2, coll.find({str: "foo"}).itcount());
+assert.eq(1, coll.find({str: {$ne: "foo"}}).itcount());
+assert.eq([{str: "bar"}, {str: "foo"}, {str: "FOO"}],
+ coll.find({}, {_id: 0, str: 1}).sort({str: 1}).toArray());
+
+// Find with idhack should return correct results when no collation specified and collection has
+// a default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({_id: "foo"}));
+assert.eq(1, coll.find({_id: "FOO"}).itcount());
+
+// Find on _id should use idhack stage when query inherits collection default collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
+explainRes = coll.explain("executionStats").find({_id: "foo"}).finish();
+assert.commandWorked(explainRes);
+planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
+assert.neq(null, planStage);
+
+// Find with oplog replay should return correct results when no collation specified and
+// collection has a default collation. Skip this test for the mobile SE because it doesn't
+// support capped collections which are needed for oplog replay.
+if (jsTest.options().storageEngine !== "mobile") {
+ coll.drop();
+ assert.commandWorked(db.createCollection(
+ coll.getName(),
+ {collation: {locale: "en_US", strength: 2}, capped: true, size: 16 * 1024}));
+ assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 0)}));
+ assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 1)}));
+ assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 2)}));
+ assert.eq(2,
+ coll.find({str: "foo", ts: {$gte: Timestamp(1000, 1)}})
+ .addOption(DBQuery.Option.oplogReplay)
+ .itcount());
+}
+
+// Find should return correct results for query containing $expr when no collation specified and
+// collection has a default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert([{a: "A"}, {a: "B"}]));
+assert.eq(1, coll.find({$expr: {$eq: ["$a", "a"]}}).itcount());
+
+if (db.getMongo().useReadCommands()) {
+ // Find should return correct results when "simple" collation specified and collection has a
// default collation.
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
assert.writeOK(coll.insert({str: "foo"}));
assert.writeOK(coll.insert({str: "FOO"}));
- assert.eq(1, coll.distinct("str").length);
- assert.eq(2, coll.distinct("_id", {str: "foo"}).length);
+ assert.writeOK(coll.insert({str: "bar"}));
+ assert.eq(2, coll.find({str: {$in: ["foo", "bar"]}}).collation({locale: "simple"}).itcount());
+ assert.eq(1, coll.find({str: "foo"}).collation({locale: "simple"}).itcount());
+ assert.eq(
+ [{str: "FOO"}, {str: "bar"}, {str: "foo"}],
+ coll.find({}, {_id: 0, str: 1}).sort({str: 1}).collation({locale: "simple"}).toArray());
- // Distinct should return correct results when "simple" collation specified and collection has a
+ // Find on _id should return correct results when query collation differs from collection
// default collation.
coll.drop();
assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({str: "foo"}));
- assert.writeOK(coll.insert({str: "FOO"}));
- assert.eq(2, coll.distinct("str", {}, {collation: {locale: "simple"}}).length);
- assert.eq(1, coll.distinct("_id", {str: "foo"}, {collation: {locale: "simple"}}).length);
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 3}}));
+ assert.writeOK(coll.insert({_id: "foo"}));
+ assert.writeOK(coll.insert({_id: "FOO"}));
+ assert.eq(2, coll.find({_id: "foo"}).collation({locale: "en_US", strength: 2}).itcount());
- // Distinct should select compatible index when no collation specified and collection has a
- // default collation.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}}));
- var explain = coll.explain("queryPlanner").distinct("a");
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
-
- // Distinct scan on strings can be used over an index with a collation when the predicate has
- // exact bounds.
- explain = coll.explain("queryPlanner").distinct("a", {a: {$gt: "foo"}});
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
- assert(!planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
-
- // Distinct scan cannot be used over an index with a collation when the predicate has inexact
- // bounds.
- explain = coll.explain("queryPlanner").distinct("a", {a: {$exists: true}});
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
- assert(!planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
-
- // Distinct scan can be used without a fetch when predicate has exact non-string bounds.
- explain = coll.explain("queryPlanner").distinct("a", {a: {$gt: 3}});
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
- assert(!planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
-
- // Distinct should not use index when no collation specified and collection default collation is
- // incompatible with index collation.
+ // Find on _id should use idhack stage when explicitly given query collation matches
+ // collection default.
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}}));
- var explain = coll.explain("queryPlanner").distinct("a");
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
-
- // Explain of DISTINCT_SCAN stage should include index collation.
- coll.drop();
- assert.commandWorked(coll.createIndex({str: 1}, {collation: {locale: "fr_CA"}}));
- explainRes = coll.explain("executionStats").distinct("str", {}, {collation: {locale: "fr_CA"}});
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "DISTINCT_SCAN");
- assert.neq(null, planStage);
- assert.eq(planStage.collation, {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true,
- version: "57.1",
- });
-
- // Explain of DISTINCT_SCAN stage should include index collation when index collation is
- // inherited from collection default.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}}));
- assert.commandWorked(coll.createIndex({str: 1}));
- explainRes = coll.explain("executionStats").distinct("str");
+ explainRes =
+ coll.explain("executionStats").find({_id: "foo"}).collation({locale: "en_US"}).finish();
assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "DISTINCT_SCAN");
+ planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
assert.neq(null, planStage);
- assert.eq(planStage.collation, {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true,
- version: "57.1",
- });
-
- //
- // Collation tests for find.
- //
-
- if (db.getMongo().useReadCommands()) {
- // Find should return correct results when collation specified and collection does not
- // exist.
- coll.drop();
- assert.eq(0, coll.find({_id: "FOO"}).collation({locale: "en_US"}).itcount());
-
- // Find should return correct results when collation specified and filter is a match on _id.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "bar"}));
- assert.writeOK(coll.insert({_id: "foo"}));
- assert.eq(0, coll.find({_id: "FOO"}).itcount());
- assert.eq(0, coll.find({_id: "FOO"}).collation({locale: "en_US"}).itcount());
- assert.eq(1, coll.find({_id: "FOO"}).collation({locale: "en_US", strength: 2}).itcount());
- assert.writeOK(coll.remove({_id: "foo"}));
-
- // Find should return correct results when collation specified and no indexes exist.
- assert.eq(0, coll.find({str: "FOO"}).itcount());
- assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).itcount());
- assert.eq(1, coll.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).itcount());
- assert.eq(
- 1, coll.find({str: {$ne: "FOO"}}).collation({locale: "en_US", strength: 2}).itcount());
-
- // Find should return correct results when collation specified and compatible index exists.
- assert.commandWorked(
- coll.ensureIndex({str: 1}, {collation: {locale: "en_US", strength: 2}}));
- assert.eq(0, coll.find({str: "FOO"}).hint({str: 1}).itcount());
- assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).hint({str: 1}).itcount());
- assert.eq(1,
- coll.find({str: "FOO"})
- .collation({locale: "en_US", strength: 2})
- .hint({str: 1})
- .itcount());
- assert.eq(1,
- coll.find({str: {$ne: "FOO"}})
- .collation({locale: "en_US", strength: 2})
- .hint({str: 1})
- .itcount());
- assert.commandWorked(coll.dropIndexes());
-
- // Find should return correct results when collation specified and compatible partial index
- // exists.
- assert.commandWorked(coll.ensureIndex({str: 1}, {
- partialFilterExpression: {str: {$lte: "FOO"}},
- collation: {locale: "en_US", strength: 2}
- }));
- assert.eq(1,
- coll.find({str: "foo"})
- .collation({locale: "en_US", strength: 2})
- .hint({str: 1})
- .itcount());
- assert.writeOK(coll.insert({_id: 3, str: "goo"}));
- assert.eq(0,
- coll.find({str: "goo"})
- .collation({locale: "en_US", strength: 2})
- .hint({str: 1})
- .itcount());
- assert.writeOK(coll.remove({_id: 3}));
- assert.commandWorked(coll.dropIndexes());
-
- // Queries that use a index with a non-matching collation should add a sort
- // stage if needed.
- coll.drop();
- assert.writeOK(coll.insert([{a: "A"}, {a: "B"}, {a: "b"}, {a: "a"}]));
-
- // Ensure results from an index that doesn't match the query collation are sorted to match
- // the requested collation.
- assert.commandWorked(coll.ensureIndex({a: 1}));
- var res = coll.find({a: {'$exists': true}}, {_id: 0})
- .collation({locale: "en_US", strength: 3})
- .sort({a: 1});
- assert.eq(res.toArray(), [{a: "a"}, {a: "A"}, {a: "b"}, {a: "B"}]);
-
- // Find should return correct results when collation specified and query contains $expr.
- coll.drop();
- assert.writeOK(coll.insert([{a: "A"}, {a: "B"}]));
- assert.eq(1,
- coll.find({$expr: {$eq: ["$a", "a"]}})
- .collation({locale: "en_US", strength: 2})
- .itcount());
- }
-
- // Find should return correct results when no collation specified and collection has a default
- // collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({str: "foo"}));
- assert.writeOK(coll.insert({str: "FOO"}));
- assert.writeOK(coll.insert({str: "bar"}));
- assert.eq(3, coll.find({str: {$in: ["foo", "bar"]}}).itcount());
- assert.eq(2, coll.find({str: "foo"}).itcount());
- assert.eq(1, coll.find({str: {$ne: "foo"}}).itcount());
- assert.eq([{str: "bar"}, {str: "foo"}, {str: "FOO"}],
- coll.find({}, {_id: 0, str: 1}).sort({str: 1}).toArray());
- // Find with idhack should return correct results when no collation specified and collection has
- // a default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({_id: "foo"}));
- assert.eq(1, coll.find({_id: "FOO"}).itcount());
-
- // Find on _id should use idhack stage when query inherits collection default collation.
+ // Find on _id should not use idhack stage when query collation does not match collection
+ // default.
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- explainRes = coll.explain("executionStats").find({_id: "foo"}).finish();
+ explainRes =
+ coll.explain("executionStats").find({_id: "foo"}).collation({locale: "fr_CA"}).finish();
assert.commandWorked(explainRes);
planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
- assert.neq(null, planStage);
+ assert.eq(null, planStage);
- // Find with oplog replay should return correct results when no collation specified and
- // collection has a default collation. Skip this test for the mobile SE because it doesn't
- // support capped collections which are needed for oplog replay.
+ // Find with oplog replay should return correct results when "simple" collation specified
+ // and collection has a default collation. Skip this test for the mobile SE because it
+ // doesn't support capped collections which are needed for oplog replay.
if (jsTest.options().storageEngine !== "mobile") {
coll.drop();
assert.commandWorked(db.createCollection(
coll.getName(),
{collation: {locale: "en_US", strength: 2}, capped: true, size: 16 * 1024}));
+ const t0 = Timestamp(1000, 0);
+ const t1 = Timestamp(1000, 1);
+ const t2 = Timestamp(1000, 2);
assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 0)}));
assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 1)}));
assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 2)}));
- assert.eq(2,
+ assert.eq(0,
coll.find({str: "foo", ts: {$gte: Timestamp(1000, 1)}})
.addOption(DBQuery.Option.oplogReplay)
+ .collation({locale: "simple"})
.itcount());
}
-
- // Find should return correct results for query containing $expr when no collation specified and
- // collection has a default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert([{a: "A"}, {a: "B"}]));
- assert.eq(1, coll.find({$expr: {$eq: ["$a", "a"]}}).itcount());
-
- if (db.getMongo().useReadCommands()) {
- // Find should return correct results when "simple" collation specified and collection has a
- // default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({str: "foo"}));
- assert.writeOK(coll.insert({str: "FOO"}));
- assert.writeOK(coll.insert({str: "bar"}));
- assert.eq(2,
- coll.find({str: {$in: ["foo", "bar"]}}).collation({locale: "simple"}).itcount());
- assert.eq(1, coll.find({str: "foo"}).collation({locale: "simple"}).itcount());
- assert.eq(
- [{str: "FOO"}, {str: "bar"}, {str: "foo"}],
- coll.find({}, {_id: 0, str: 1}).sort({str: 1}).collation({locale: "simple"}).toArray());
-
- // Find on _id should return correct results when query collation differs from collection
- // default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 3}}));
- assert.writeOK(coll.insert({_id: "foo"}));
- assert.writeOK(coll.insert({_id: "FOO"}));
- assert.eq(2, coll.find({_id: "foo"}).collation({locale: "en_US", strength: 2}).itcount());
-
- // Find on _id should use idhack stage when explicitly given query collation matches
- // collection default.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- explainRes =
- coll.explain("executionStats").find({_id: "foo"}).collation({locale: "en_US"}).finish();
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
- assert.neq(null, planStage);
-
- // Find on _id should not use idhack stage when query collation does not match collection
- // default.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- explainRes =
- coll.explain("executionStats").find({_id: "foo"}).collation({locale: "fr_CA"}).finish();
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
- assert.eq(null, planStage);
-
- // Find with oplog replay should return correct results when "simple" collation specified
- // and collection has a default collation. Skip this test for the mobile SE because it
- // doesn't support capped collections which are needed for oplog replay.
- if (jsTest.options().storageEngine !== "mobile") {
- coll.drop();
- assert.commandWorked(db.createCollection(
- coll.getName(),
- {collation: {locale: "en_US", strength: 2}, capped: true, size: 16 * 1024}));
- const t0 = Timestamp(1000, 0);
- const t1 = Timestamp(1000, 1);
- const t2 = Timestamp(1000, 2);
- assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 0)}));
- assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 1)}));
- assert.writeOK(coll.insert({str: "FOO", ts: Timestamp(1000, 2)}));
- assert.eq(0,
- coll.find({str: "foo", ts: {$gte: Timestamp(1000, 1)}})
- .addOption(DBQuery.Option.oplogReplay)
- .collation({locale: "simple"})
- .itcount());
- }
- }
-
- // Find should select compatible index when no collation specified and collection has a default
- // collation.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}}));
- var explain = coll.find({a: "foo"}).explain("queryPlanner");
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
-
- // Find should select compatible index when no collation specified and collection default
- // collation is "simple".
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "simple"}}));
- assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}}));
- var explain = coll.find({a: "foo"}).explain("queryPlanner");
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
-
- // Find should not use index when no collation specified, index collation is "simple", and
- // collection has a non-"simple" default collation.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}}));
- var explain = coll.find({a: "foo"}).explain("queryPlanner");
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
-
- // Find should select compatible index when "simple" collation specified and collection has a
- // non-"simple" default collation.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}}));
- var explain = coll.find({a: "foo"}).collation({locale: "simple"}).explain("queryPlanner");
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
-
- // Find should return correct results when collation specified and run with explain.
- coll.drop();
- assert.writeOK(coll.insert({str: "foo"}));
- explainRes =
- coll.explain("executionStats").find({str: "FOO"}).collation({locale: "en_US"}).finish();
- assert.commandWorked(explainRes);
- assert.eq(0, explainRes.executionStats.nReturned);
- explainRes = coll.explain("executionStats")
- .find({str: "FOO"})
- .collation({locale: "en_US", strength: 2})
- .finish();
- assert.commandWorked(explainRes);
- assert.eq(1, explainRes.executionStats.nReturned);
-
- // Explain of find should include query collation.
- coll.drop();
- explainRes =
- coll.explain("executionStats").find({str: "foo"}).collation({locale: "fr_CA"}).finish();
- assert.commandWorked(explainRes);
- assert.eq(getQueryCollation(explainRes), {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true,
- version: "57.1",
- });
-
- // Explain of find should include query collation when inherited from collection default.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}}));
- explainRes = coll.explain("executionStats").find({str: "foo"}).finish();
- assert.commandWorked(explainRes);
- assert.eq(getQueryCollation(explainRes), {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true,
- version: "57.1",
- });
-
- // Explain of IXSCAN stage should include index collation.
- coll.drop();
- assert.commandWorked(coll.createIndex({str: 1}, {collation: {locale: "fr_CA"}}));
- explainRes =
- coll.explain("executionStats").find({str: "foo"}).collation({locale: "fr_CA"}).finish();
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "IXSCAN");
- assert.neq(null, planStage);
- assert.eq(planStage.collation, {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true,
- version: "57.1",
- });
-
- // Explain of IXSCAN stage should include index collation when index collation is inherited from
- // collection default.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}}));
- assert.commandWorked(coll.createIndex({str: 1}));
- explainRes = coll.explain("executionStats").find({str: "foo"}).finish();
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "IXSCAN");
- assert.neq(null, planStage);
- assert.eq(planStage.collation, {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true,
- version: "57.1",
- });
-
- if (!db.getMongo().useReadCommands()) {
- // find() shell helper should error if a collation is specified and the shell is not using
- // read commands.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "bar"}));
- assert.throws(function() {
- coll.find().collation({locale: "fr"}).itcount();
- });
- }
-
- //
- // Collation tests for findAndModify.
- //
-
- // findAndModify should return correct results when collation specified and collection does not
- // exist.
- coll.drop();
- assert.eq(null, coll.findAndModify({
- query: {str: "bar"},
- update: {$set: {str: "baz"}},
- new: true,
- collation: {locale: "fr"}
- }));
-
- // Update-findAndModify should return correct results when collation specified.
+}
+
+// Find should select compatible index when no collation specified and collection has a default
+// collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
+assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}}));
+var explain = coll.find({a: "foo"}).explain("queryPlanner");
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+
+// Find should select compatible index when no collation specified and collection default
+// collation is "simple".
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "simple"}}));
+assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}}));
+var explain = coll.find({a: "foo"}).explain("queryPlanner");
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+
+// Find should not use index when no collation specified, index collation is "simple", and
+// collection has a non-"simple" default collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
+assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}}));
+var explain = coll.find({a: "foo"}).explain("queryPlanner");
+assert(isCollscan(db, explain.queryPlanner.winningPlan));
+
+// Find should select compatible index when "simple" collation specified and collection has a
+// non-"simple" default collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
+assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}}));
+var explain = coll.find({a: "foo"}).collation({locale: "simple"}).explain("queryPlanner");
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+
+// Find should return correct results when collation specified and run with explain.
+coll.drop();
+assert.writeOK(coll.insert({str: "foo"}));
+explainRes =
+ coll.explain("executionStats").find({str: "FOO"}).collation({locale: "en_US"}).finish();
+assert.commandWorked(explainRes);
+assert.eq(0, explainRes.executionStats.nReturned);
+explainRes = coll.explain("executionStats")
+ .find({str: "FOO"})
+ .collation({locale: "en_US", strength: 2})
+ .finish();
+assert.commandWorked(explainRes);
+assert.eq(1, explainRes.executionStats.nReturned);
+
+// Explain of find should include query collation.
+coll.drop();
+explainRes =
+ coll.explain("executionStats").find({str: "foo"}).collation({locale: "fr_CA"}).finish();
+assert.commandWorked(explainRes);
+assert.eq(getQueryCollation(explainRes), {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true,
+ version: "57.1",
+});
+
+// Explain of find should include query collation when inherited from collection default.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}}));
+explainRes = coll.explain("executionStats").find({str: "foo"}).finish();
+assert.commandWorked(explainRes);
+assert.eq(getQueryCollation(explainRes), {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true,
+ version: "57.1",
+});
+
+// Explain of IXSCAN stage should include index collation.
+coll.drop();
+assert.commandWorked(coll.createIndex({str: 1}, {collation: {locale: "fr_CA"}}));
+explainRes =
+ coll.explain("executionStats").find({str: "foo"}).collation({locale: "fr_CA"}).finish();
+assert.commandWorked(explainRes);
+planStage = getPlanStage(explainRes.executionStats.executionStages, "IXSCAN");
+assert.neq(null, planStage);
+assert.eq(planStage.collation, {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true,
+ version: "57.1",
+});
+
+// Explain of IXSCAN stage should include index collation when index collation is inherited from
+// collection default.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}}));
+assert.commandWorked(coll.createIndex({str: 1}));
+explainRes = coll.explain("executionStats").find({str: "foo"}).finish();
+assert.commandWorked(explainRes);
+planStage = getPlanStage(explainRes.executionStats.executionStages, "IXSCAN");
+assert.neq(null, planStage);
+assert.eq(planStage.collation, {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true,
+ version: "57.1",
+});
+
+if (!db.getMongo().useReadCommands()) {
+ // find() shell helper should error if a collation is specified and the shell is not using
+ // read commands.
coll.drop();
assert.writeOK(coll.insert({_id: 1, str: "foo"}));
assert.writeOK(coll.insert({_id: 2, str: "bar"}));
- assert.eq({_id: 1, str: "baz"}, coll.findAndModify({
- query: {str: "FOO"},
- update: {$set: {str: "baz"}},
- new: true,
- collation: {locale: "en_US", strength: 2}
- }));
-
- // Explain of update-findAndModify should return correct results when collation specified.
- explainRes = coll.explain("executionStats").findAndModify({
- query: {str: "BAR"},
- update: {$set: {str: "baz"}},
- new: true,
- collation: {locale: "en_US", strength: 2}
- });
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "UPDATE");
- assert.neq(null, planStage);
- assert.eq(1, planStage.nWouldModify);
-
- // Delete-findAndModify should return correct results when collation specified.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "bar"}));
- assert.eq({_id: 1, str: "foo"},
- coll.findAndModify(
- {query: {str: "FOO"}, remove: true, collation: {locale: "en_US", strength: 2}}));
-
- // Explain of delete-findAndModify should return correct results when collation specified.
- explainRes = coll.explain("executionStats").findAndModify({
- query: {str: "BAR"},
- remove: true,
- collation: {locale: "en_US", strength: 2}
- });
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "DELETE");
- assert.neq(null, planStage);
- assert.eq(1, planStage.nWouldDelete);
-
- // findAndModify should return correct results when no collation specified and collection has a
- // default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.eq({_id: 1, str: "foo"},
- coll.findAndModify({query: {str: "FOO"}, update: {$set: {x: 1}}}));
- assert.eq({_id: 1, str: "foo", x: 1}, coll.findAndModify({query: {str: "FOO"}, remove: true}));
-
- // findAndModify should return correct results when "simple" collation specified and collection
- // has a default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.eq(null,
- coll.findAndModify(
- {query: {str: "FOO"}, update: {$set: {x: 1}}, collation: {locale: "simple"}}));
- assert.eq(
- null,
- coll.findAndModify({query: {str: "FOO"}, remove: true, collation: {locale: "simple"}}));
-
- //
- // Collation tests for mapReduce.
- //
-
- // mapReduce should return "collection doesn't exist" error when collation specified and
- // collection does not exist.
- coll.drop();
assert.throws(function() {
- coll.mapReduce(
- function() {
- emit(this.str, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, collation: {locale: "fr"}});
+ coll.find().collation({locale: "fr"}).itcount();
});
-
- // mapReduce should return correct results when collation specified and no indexes exist.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "bar"}));
- var mapReduceOut = coll.mapReduce(
+}
+
+//
+// Collation tests for findAndModify.
+//
+
+// findAndModify should return correct results when collation specified and collection does not
+// exist.
+coll.drop();
+assert.eq(
+ null,
+ coll.findAndModify(
+ {query: {str: "bar"}, update: {$set: {str: "baz"}}, new: true, collation: {locale: "fr"}}));
+
+// Update-findAndModify should return correct results when collation specified.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "bar"}));
+assert.eq({_id: 1, str: "baz"}, coll.findAndModify({
+ query: {str: "FOO"},
+ update: {$set: {str: "baz"}},
+ new: true,
+ collation: {locale: "en_US", strength: 2}
+}));
+
+// Explain of update-findAndModify should return correct results when collation specified.
+explainRes = coll.explain("executionStats").findAndModify({
+ query: {str: "BAR"},
+ update: {$set: {str: "baz"}},
+ new: true,
+ collation: {locale: "en_US", strength: 2}
+});
+assert.commandWorked(explainRes);
+planStage = getPlanStage(explainRes.executionStats.executionStages, "UPDATE");
+assert.neq(null, planStage);
+assert.eq(1, planStage.nWouldModify);
+
+// Delete-findAndModify should return correct results when collation specified.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "bar"}));
+assert.eq({_id: 1, str: "foo"},
+ coll.findAndModify(
+ {query: {str: "FOO"}, remove: true, collation: {locale: "en_US", strength: 2}}));
+
+// Explain of delete-findAndModify should return correct results when collation specified.
+explainRes = coll.explain("executionStats").findAndModify({
+ query: {str: "BAR"},
+ remove: true,
+ collation: {locale: "en_US", strength: 2}
+});
+assert.commandWorked(explainRes);
+planStage = getPlanStage(explainRes.executionStats.executionStages, "DELETE");
+assert.neq(null, planStage);
+assert.eq(1, planStage.nWouldDelete);
+
+// findAndModify should return correct results when no collation specified and collection has a
+// default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.eq({_id: 1, str: "foo"}, coll.findAndModify({query: {str: "FOO"}, update: {$set: {x: 1}}}));
+assert.eq({_id: 1, str: "foo", x: 1}, coll.findAndModify({query: {str: "FOO"}, remove: true}));
+
+// findAndModify should return correct results when "simple" collation specified and collection
+// has a default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.eq(null,
+ coll.findAndModify(
+ {query: {str: "FOO"}, update: {$set: {x: 1}}, collation: {locale: "simple"}}));
+assert.eq(null,
+ coll.findAndModify({query: {str: "FOO"}, remove: true, collation: {locale: "simple"}}));
+
+//
+// Collation tests for mapReduce.
+//
+
+// mapReduce should return "collection doesn't exist" error when collation specified and
+// collection does not exist.
+coll.drop();
+assert.throws(function() {
+ coll.mapReduce(
function() {
emit(this.str, 1);
},
function(key, values) {
return Array.sum(values);
},
- {out: {inline: 1}, query: {str: "FOO"}, collation: {locale: "en_US", strength: 2}});
- assert.commandWorked(mapReduceOut);
- assert.eq(mapReduceOut.results.length, 1);
-
- // mapReduce should return correct results when no collation specified and collection has a
- // default collation.
+ {out: {inline: 1}, collation: {locale: "fr"}});
+});
+
+// mapReduce should return correct results when collation specified and no indexes exist.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "bar"}));
+var mapReduceOut = coll.mapReduce(
+ function() {
+ emit(this.str, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {str: "FOO"}, collation: {locale: "en_US", strength: 2}});
+assert.commandWorked(mapReduceOut);
+assert.eq(mapReduceOut.results.length, 1);
+
+// mapReduce should return correct results when no collation specified and collection has a
+// default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+var mapReduceOut = coll.mapReduce(
+ function() {
+ emit(this.str, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {str: "FOO"}});
+assert.commandWorked(mapReduceOut);
+assert.eq(mapReduceOut.results.length, 1);
+
+// mapReduce should return correct results when "simple" collation specified and collection has
+// a default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+var mapReduceOut = coll.mapReduce(
+ function() {
+ emit(this.str, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {str: "FOO"}, collation: {locale: "simple"}});
+assert.commandWorked(mapReduceOut);
+assert.eq(mapReduceOut.results.length, 0);
+
+//
+// Collation tests for remove.
+//
+
+if (db.getMongo().writeMode() === "commands") {
+ // Remove should succeed when collation specified and collection does not exist.
+ coll.drop();
+ assert.writeOK(coll.remove({str: "foo"}, {justOne: true, collation: {locale: "fr"}}));
+
+ // Remove should return correct results when collation specified.
coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- var mapReduceOut = coll.mapReduce(
- function() {
- emit(this.str, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {str: "FOO"}});
- assert.commandWorked(mapReduceOut);
- assert.eq(mapReduceOut.results.length, 1);
+ assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ writeRes =
+ coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "en_US", strength: 2}});
+ assert.writeOK(writeRes);
+ assert.eq(1, writeRes.nRemoved);
- // mapReduce should return correct results when "simple" collation specified and collection has
- // a default collation.
+ // Explain of remove should return correct results when collation specified.
coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- var mapReduceOut = coll.mapReduce(
- function() {
- emit(this.str, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {str: "FOO"}, collation: {locale: "simple"}});
- assert.commandWorked(mapReduceOut);
- assert.eq(mapReduceOut.results.length, 0);
-
- //
- // Collation tests for remove.
- //
-
- if (db.getMongo().writeMode() === "commands") {
- // Remove should succeed when collation specified and collection does not exist.
- coll.drop();
- assert.writeOK(coll.remove({str: "foo"}, {justOne: true, collation: {locale: "fr"}}));
-
- // Remove should return correct results when collation specified.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- writeRes =
- coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "en_US", strength: 2}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
-
- // Explain of remove should return correct results when collation specified.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- explainRes = coll.explain("executionStats").remove({str: "FOO"}, {
- justOne: true,
- collation: {locale: "en_US", strength: 2}
- });
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "DELETE");
- assert.neq(null, planStage);
- assert.eq(1, planStage.nWouldDelete);
- }
-
- // Remove should return correct results when no collation specified and collection has a default
- // collation.
+ assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ explainRes = coll.explain("executionStats").remove({str: "FOO"}, {
+ justOne: true,
+ collation: {locale: "en_US", strength: 2}
+ });
+ assert.commandWorked(explainRes);
+ planStage = getPlanStage(explainRes.executionStats.executionStages, "DELETE");
+ assert.neq(null, planStage);
+ assert.eq(1, planStage.nWouldDelete);
+}
+
+// Remove should return correct results when no collation specified and collection has a default
+// collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+writeRes = coll.remove({str: "FOO"}, {justOne: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+
+// Remove with idhack should return correct results when no collation specified and collection
+// has a default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({_id: "foo"}));
+writeRes = coll.remove({_id: "FOO"}, {justOne: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+
+// Remove on _id should use idhack stage when query inherits collection default collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
+explainRes = coll.explain("executionStats").remove({_id: "foo"});
+assert.commandWorked(explainRes);
+planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
+assert.neq(null, planStage);
+
+if (db.getMongo().writeMode() === "commands") {
+ // Remove should return correct results when "simple" collation specified and collection has
+ // a default collation.
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- writeRes = coll.remove({str: "FOO"}, {justOne: true});
+ writeRes = coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "simple"}});
assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
+ assert.eq(0, writeRes.nRemoved);
- // Remove with idhack should return correct results when no collation specified and collection
- // has a default collation.
+ // Remove on _id should return correct results when "simple" collation specified and
+ // collection has a default collation.
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
assert.writeOK(coll.insert({_id: "foo"}));
- writeRes = coll.remove({_id: "FOO"}, {justOne: true});
+ writeRes = coll.remove({_id: "FOO"}, {justOne: true, collation: {locale: "simple"}});
assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
+ assert.eq(0, writeRes.nRemoved);
- // Remove on _id should use idhack stage when query inherits collection default collation.
+ // Remove on _id should use idhack stage when explicit query collation matches collection
+ // default.
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- explainRes = coll.explain("executionStats").remove({_id: "foo"});
+ explainRes =
+ coll.explain("executionStats").remove({_id: "foo"}, {collation: {locale: "en_US"}});
assert.commandWorked(explainRes);
planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
assert.neq(null, planStage);
- if (db.getMongo().writeMode() === "commands") {
- // Remove should return correct results when "simple" collation specified and collection has
- // a default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- writeRes = coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(0, writeRes.nRemoved);
-
- // Remove on _id should return correct results when "simple" collation specified and
- // collection has a default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({_id: "foo"}));
- writeRes = coll.remove({_id: "FOO"}, {justOne: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(0, writeRes.nRemoved);
-
- // Remove on _id should use idhack stage when explicit query collation matches collection
- // default.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- explainRes =
- coll.explain("executionStats").remove({_id: "foo"}, {collation: {locale: "en_US"}});
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
- assert.neq(null, planStage);
-
- // Remove on _id should not use idhack stage when query collation does not match collection
- // default.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- explainRes =
- coll.explain("executionStats").remove({_id: "foo"}, {collation: {locale: "fr_CA"}});
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
- assert.eq(null, planStage);
- }
+ // Remove on _id should not use idhack stage when query collation does not match collection
+ // default.
+ coll.drop();
+ assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
+ explainRes =
+ coll.explain("executionStats").remove({_id: "foo"}, {collation: {locale: "fr_CA"}});
+ assert.commandWorked(explainRes);
+ planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
+ assert.eq(null, planStage);
+}
- if (db.getMongo().writeMode() !== "commands") {
- // remove() shell helper should error if a collation is specified and the shell is not using
- // write commands.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- assert.throws(function() {
- coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "en_US", strength: 2}});
- });
- assert.throws(function() {
- coll.explain().remove({str: "FOO"},
- {justOne: true, collation: {locale: "en_US", strength: 2}});
- });
- }
+if (db.getMongo().writeMode() !== "commands") {
+ // remove() shell helper should error if a collation is specified and the shell is not using
+ // write commands.
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+ assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ assert.throws(function() {
+ coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "en_US", strength: 2}});
+ });
+ assert.throws(function() {
+ coll.explain().remove({str: "FOO"},
+ {justOne: true, collation: {locale: "en_US", strength: 2}});
+ });
+}
- //
- // Collation tests for update.
- //
+//
+// Collation tests for update.
+//
- if (db.getMongo().writeMode() === "commands") {
- // Update should succeed when collation specified and collection does not exist.
- coll.drop();
- assert.writeOK(coll.update(
- {str: "foo"}, {$set: {other: 99}}, {multi: true, collation: {locale: "fr"}}));
+if (db.getMongo().writeMode() === "commands") {
+ // Update should succeed when collation specified and collection does not exist.
+ coll.drop();
+ assert.writeOK(
+ coll.update({str: "foo"}, {$set: {other: 99}}, {multi: true, collation: {locale: "fr"}}));
- // Update should return correct results when collation specified.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- writeRes = coll.update({str: "FOO"},
- {$set: {other: 99}},
- {multi: true, collation: {locale: "en_US", strength: 2}});
- assert.eq(2, writeRes.nModified);
-
- // Explain of update should return correct results when collation specified.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- explainRes = coll.explain("executionStats").update({str: "FOO"}, {$set: {other: 99}}, {
- multi: true,
- collation: {locale: "en_US", strength: 2}
- });
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "UPDATE");
- assert.neq(null, planStage);
- assert.eq(2, planStage.nWouldModify);
- }
+ // Update should return correct results when collation specified.
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+ assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ writeRes = coll.update({str: "FOO"},
+ {$set: {other: 99}},
+ {multi: true, collation: {locale: "en_US", strength: 2}});
+ assert.eq(2, writeRes.nModified);
- // Update should return correct results when no collation specified and collection has a default
- // collation.
+ // Explain of update should return correct results when collation specified.
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+ assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ explainRes = coll.explain("executionStats").update({str: "FOO"}, {$set: {other: 99}}, {
+ multi: true,
+ collation: {locale: "en_US", strength: 2}
+ });
+ assert.commandWorked(explainRes);
+ planStage = getPlanStage(explainRes.executionStats.executionStages, "UPDATE");
+ assert.neq(null, planStage);
+ assert.eq(2, planStage.nWouldModify);
+}
+
+// Update should return correct results when no collation specified and collection has a default
+// collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+writeRes = coll.update({str: "FOO"}, {$set: {other: 99}});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+
+// Update with idhack should return correct results when no collation specified and collection
+// has a default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.writeOK(coll.insert({_id: "foo"}));
+writeRes = coll.update({_id: "FOO"}, {$set: {other: 99}});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+
+// Update on _id should use idhack stage when query inherits collection default collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
+explainRes = coll.explain("executionStats").update({_id: "foo"}, {$set: {other: 99}});
+assert.commandWorked(explainRes);
+planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
+assert.neq(null, planStage);
+
+if (db.getMongo().writeMode() === "commands") {
+ // Update should return correct results when "simple" collation specified and collection has
+ // a default collation.
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- writeRes = coll.update({str: "FOO"}, {$set: {other: 99}});
+ writeRes = coll.update({str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "simple"}});
assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
+ assert.eq(0, writeRes.nModified);
- // Update with idhack should return correct results when no collation specified and collection
- // has a default collation.
+ // Update on _id should return correct results when "simple" collation specified and
+ // collection has a default collation.
coll.drop();
assert.commandWorked(
db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
assert.writeOK(coll.insert({_id: "foo"}));
- writeRes = coll.update({_id: "FOO"}, {$set: {other: 99}});
+ writeRes = coll.update({_id: "FOO"}, {$set: {other: 99}}, {collation: {locale: "simple"}});
assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
+ assert.eq(0, writeRes.nModified);
- // Update on _id should use idhack stage when query inherits collection default collation.
+ // Update on _id should use idhack stage when explicitly given query collation matches
+ // collection default.
coll.drop();
assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- explainRes = coll.explain("executionStats").update({_id: "foo"}, {$set: {other: 99}});
+ explainRes = coll.explain("executionStats").update({_id: "foo"}, {$set: {other: 99}}, {
+ collation: {locale: "en_US"}
+ });
assert.commandWorked(explainRes);
planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
assert.neq(null, planStage);
- if (db.getMongo().writeMode() === "commands") {
- // Update should return correct results when "simple" collation specified and collection has
- // a default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- writeRes = coll.update({str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(0, writeRes.nModified);
-
- // Update on _id should return correct results when "simple" collation specified and
- // collection has a default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({_id: "foo"}));
- writeRes = coll.update({_id: "FOO"}, {$set: {other: 99}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(0, writeRes.nModified);
-
- // Update on _id should use idhack stage when explicitly given query collation matches
- // collection default.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- explainRes = coll.explain("executionStats").update({_id: "foo"}, {$set: {other: 99}}, {
- collation: {locale: "en_US"}
- });
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
- assert.neq(null, planStage);
-
- // Update on _id should not use idhack stage when query collation does not match collection
- // default.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
- explainRes = coll.explain("executionStats").update({_id: "foo"}, {$set: {other: 99}}, {
- collation: {locale: "fr_CA"}
- });
- assert.commandWorked(explainRes);
- planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
- assert.eq(null, planStage);
- }
-
- if (db.getMongo().writeMode() !== "commands") {
- // update() shell helper should error if a collation is specified and the shell is not using
- // write commands.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- assert.throws(function() {
- coll.update({str: "FOO"},
- {$set: {other: 99}},
- {multi: true, collation: {locale: "en_US", strength: 2}});
- });
- assert.throws(function() {
- coll.explain().update({str: "FOO"},
- {$set: {other: 99}},
- {multi: true, collation: {locale: "en_US", strength: 2}});
- });
- }
-
- //
- // Collation tests for the $geoNear aggregation stage.
- //
-
- // $geoNear should fail when collation is specified but the collection does not exist.
+ // Update on _id should not use idhack stage when query collation does not match collection
+ // default.
coll.drop();
- assert.commandFailedWithCode(db.runCommand({
- aggregate: coll.getName(),
- cursor: {},
- pipeline: [{
- $geoNear: {
- near: {type: "Point", coordinates: [0, 0]},
- distanceField: "dist",
- }
- }],
- collation: {locale: "en_US", strength: 2}
- }),
- ErrorCodes.NamespaceNotFound);
+ assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}}));
+ explainRes = coll.explain("executionStats").update({_id: "foo"}, {$set: {other: 99}}, {
+ collation: {locale: "fr_CA"}
+ });
+ assert.commandWorked(explainRes);
+ planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK");
+ assert.eq(null, planStage);
+}
- // $geoNear rejects the now-deprecated "collation" option.
+if (db.getMongo().writeMode() !== "commands") {
+ // update() shell helper should error if a collation is specified and the shell is not using
+ // write commands.
coll.drop();
- assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
- assert.commandFailedWithCode(db.runCommand({
- aggregate: coll.getName(),
- cursor: {},
- pipeline: [{
- $geoNear: {
- near: {type: "Point", coordinates: [0, 0]},
- distanceField: "dist",
- collation: {locale: "en_US"},
- }
- }],
- }),
- 40227);
-
- const geoNearStage = {
+ assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+ assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ assert.throws(function() {
+ coll.update({str: "FOO"},
+ {$set: {other: 99}},
+ {multi: true, collation: {locale: "en_US", strength: 2}});
+ });
+ assert.throws(function() {
+ coll.explain().update({str: "FOO"},
+ {$set: {other: 99}},
+ {multi: true, collation: {locale: "en_US", strength: 2}});
+ });
+}
+
+//
+// Collation tests for the $geoNear aggregation stage.
+//
+
+// $geoNear should fail when collation is specified but the collection does not exist.
+coll.drop();
+assert.commandFailedWithCode(db.runCommand({
+ aggregate: coll.getName(),
+ cursor: {},
+ pipeline: [{
$geoNear: {
near: {type: "Point", coordinates: [0, 0]},
distanceField: "dist",
- spherical: true,
- query: {str: "ABC"}
}
- };
+ }],
+ collation: {locale: "en_US", strength: 2}
+}),
+ ErrorCodes.NamespaceNotFound);
+
+// $geoNear rejects the now-deprecated "collation" option.
+coll.drop();
+assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
+assert.commandFailedWithCode(db.runCommand({
+ aggregate: coll.getName(),
+ cursor: {},
+ pipeline: [{
+ $geoNear: {
+ near: {type: "Point", coordinates: [0, 0]},
+ distanceField: "dist",
+ collation: {locale: "en_US"},
+ }
+ }],
+}),
+ 40227);
+
+const geoNearStage = {
+ $geoNear: {
+ near: {type: "Point", coordinates: [0, 0]},
+ distanceField: "dist",
+ spherical: true,
+ query: {str: "ABC"}
+ }
+};
+
+// $geoNear should return correct results when collation specified and string predicate not
+// indexed.
+assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
+assert.eq(0, coll.aggregate([geoNearStage]).itcount());
+assert.eq(1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount());
+
+// $geoNear should return correct results when no collation specified and string predicate
+// indexed.
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(coll.ensureIndex({geo: "2dsphere", str: 1}));
+assert.eq(0, coll.aggregate([geoNearStage]).itcount());
+assert.eq(1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount());
+
+// $geoNear should return correct results when collation specified and collation on index is
+// incompatible with string predicate.
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(
+ coll.ensureIndex({geo: "2dsphere", str: 1}, {collation: {locale: "en_US", strength: 3}}));
+assert.eq(0, coll.aggregate([geoNearStage]).itcount());
+assert.eq(1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount());
+
+// $geoNear should return correct results when collation specified and collation on index is
+// compatible with string predicate.
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(
+ coll.ensureIndex({geo: "2dsphere", str: 1}, {collation: {locale: "en_US", strength: 2}}));
+assert.eq(0, coll.aggregate([geoNearStage]).itcount());
+assert.eq(1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount());
+
+// $geoNear should return correct results when no collation specified and collection has a
+// default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
+assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
+assert.eq(1, coll.aggregate([geoNearStage]).itcount());
+
+// $geoNear should return correct results when "simple" collation specified and collection has
+// a default collation.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
+assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
+assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
+assert.eq(0, coll.aggregate([geoNearStage], {collation: {locale: "simple"}}).itcount());
+
+//
+// Collation tests for find with $nearSphere.
+//
+
+if (db.getMongo().useReadCommands()) {
+ // Find with $nearSphere should return correct results when collation specified and
+ // collection does not exist.
+ coll.drop();
+ assert.eq(
+ 0,
+ coll.find(
+ {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .collation({locale: "en_US", strength: 2})
+ .itcount());
- // $geoNear should return correct results when collation specified and string predicate not
- // indexed.
+ // Find with $nearSphere should return correct results when collation specified and string
+ // predicate not indexed.
+ coll.drop();
+ assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
- assert.eq(0, coll.aggregate([geoNearStage]).itcount());
assert.eq(
- 1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount());
-
- // $geoNear should return correct results when no collation specified and string predicate
- // indexed.
+ 0,
+ coll.find(
+ {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .itcount());
+ assert.eq(
+ 1,
+ coll.find(
+ {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .collation({locale: "en_US", strength: 2})
+ .itcount());
+
+ // Find with $nearSphere should return correct results when no collation specified and
+ // string predicate indexed.
assert.commandWorked(coll.dropIndexes());
assert.commandWorked(coll.ensureIndex({geo: "2dsphere", str: 1}));
- assert.eq(0, coll.aggregate([geoNearStage]).itcount());
assert.eq(
- 1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount());
-
- // $geoNear should return correct results when collation specified and collation on index is
- // incompatible with string predicate.
+ 0,
+ coll.find(
+ {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .itcount());
+ assert.eq(
+ 1,
+ coll.find(
+ {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .collation({locale: "en_US", strength: 2})
+ .itcount());
+
+ // Find with $nearSphere should return correct results when collation specified and
+ // collation on index is incompatible with string predicate.
assert.commandWorked(coll.dropIndexes());
assert.commandWorked(
coll.ensureIndex({geo: "2dsphere", str: 1}, {collation: {locale: "en_US", strength: 3}}));
- assert.eq(0, coll.aggregate([geoNearStage]).itcount());
assert.eq(
- 1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount());
-
- // $geoNear should return correct results when collation specified and collation on index is
- // compatible with string predicate.
+ 0,
+ coll.find(
+ {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .itcount());
+ assert.eq(
+ 1,
+ coll.find(
+ {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .collation({locale: "en_US", strength: 2})
+ .itcount());
+
+ // Find with $nearSphere should return correct results when collation specified and
+ // collation on index is compatible with string predicate.
assert.commandWorked(coll.dropIndexes());
assert.commandWorked(
coll.ensureIndex({geo: "2dsphere", str: 1}, {collation: {locale: "en_US", strength: 2}}));
- assert.eq(0, coll.aggregate([geoNearStage]).itcount());
assert.eq(
- 1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount());
-
- // $geoNear should return correct results when no collation specified and collection has a
- // default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
- assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
- assert.eq(1, coll.aggregate([geoNearStage]).itcount());
-
- // $geoNear should return correct results when "simple" collation specified and collection has
- // a default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}}));
- assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
- assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
- assert.eq(0, coll.aggregate([geoNearStage], {collation: {locale: "simple"}}).itcount());
-
- //
- // Collation tests for find with $nearSphere.
- //
-
- if (db.getMongo().useReadCommands()) {
- // Find with $nearSphere should return correct results when collation specified and
- // collection does not exist.
- coll.drop();
- assert.eq(0,
- coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
- .collation({locale: "en_US", strength: 2})
- .itcount());
-
- // Find with $nearSphere should return correct results when collation specified and string
- // predicate not indexed.
- coll.drop();
- assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"}));
- assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
- assert.eq(0,
- coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
- .itcount());
- assert.eq(1,
- coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
- .collation({locale: "en_US", strength: 2})
- .itcount());
-
- // Find with $nearSphere should return correct results when no collation specified and
- // string predicate indexed.
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.ensureIndex({geo: "2dsphere", str: 1}));
- assert.eq(0,
- coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
- .itcount());
- assert.eq(1,
- coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
- .collation({locale: "en_US", strength: 2})
- .itcount());
-
- // Find with $nearSphere should return correct results when collation specified and
- // collation on index is incompatible with string predicate.
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.ensureIndex({geo: "2dsphere", str: 1},
- {collation: {locale: "en_US", strength: 3}}));
- assert.eq(0,
- coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
- .itcount());
- assert.eq(1,
- coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
- .collation({locale: "en_US", strength: 2})
- .itcount());
-
- // Find with $nearSphere should return correct results when collation specified and
- // collation on index is compatible with string predicate.
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.ensureIndex({geo: "2dsphere", str: 1},
- {collation: {locale: "en_US", strength: 2}}));
- assert.eq(0,
- coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
- .itcount());
- assert.eq(1,
- coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
- .collation({locale: "en_US", strength: 2})
- .itcount());
- }
-
- //
- // Tests for the bulk API.
- //
-
- var bulk;
-
- if (db.getMongo().writeMode() !== "commands") {
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
-
- // Can't use the bulk API to set a collation when using legacy write ops.
- bulk = coll.initializeUnorderedBulkOp();
- assert.throws(function() {
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2});
- });
-
- bulk = coll.initializeOrderedBulkOp();
- assert.throws(function() {
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2});
- });
- } else {
- // update().
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).update({
- $set: {other: 99}
- });
- writeRes = bulk.execute();
- assert.writeOK(writeRes);
- assert.eq(2, writeRes.nModified);
-
- // updateOne().
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).updateOne({
- $set: {other: 99}
- });
- writeRes = bulk.execute();
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nModified);
-
- // replaceOne().
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).replaceOne({str: "oof"});
- writeRes = bulk.execute();
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nModified);
-
- // replaceOne() with upsert().
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({str: "FOO"}).collation({locale: "en_US"}).upsert().replaceOne({str: "foo"});
- writeRes = bulk.execute();
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nUpserted);
- assert.eq(0, writeRes.nModified);
-
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).upsert().replaceOne({
- str: "foo"
- });
- writeRes = bulk.execute();
- assert.writeOK(writeRes);
- assert.eq(0, writeRes.nUpserted);
- assert.eq(1, writeRes.nModified);
-
- // removeOne().
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).removeOne();
- writeRes = bulk.execute();
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
-
- // remove().
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).remove();
- writeRes = bulk.execute();
- assert.writeOK(writeRes);
- assert.eq(2, writeRes.nRemoved);
- }
+ 0,
+ coll.find(
+ {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .itcount());
+ assert.eq(
+ 1,
+ coll.find(
+ {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .collation({locale: "en_US", strength: 2})
+ .itcount());
+}
- //
- // Tests for the CRUD API.
- //
+//
+// Tests for the bulk API.
+//
- // deleteOne().
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- if (db.getMongo().writeMode() === "commands") {
- var res = coll.deleteOne({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
- assert.eq(1, res.deletedCount);
- } else {
- assert.throws(function() {
- coll.deleteOne({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
- });
- }
+var bulk;
- // deleteMany().
+if (db.getMongo().writeMode() !== "commands") {
coll.drop();
assert.writeOK(coll.insert({_id: 1, str: "foo"}));
assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- if (db.getMongo().writeMode() === "commands") {
- var res = coll.deleteMany({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
- assert.eq(2, res.deletedCount);
- } else {
- assert.throws(function() {
- coll.deleteMany({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
- });
- }
- // findOneAndDelete().
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.eq({_id: 1, str: "foo"},
- coll.findOneAndDelete({str: "FOO"}, {collation: {locale: "en_US", strength: 2}}));
- assert.eq(null, coll.findOne({_id: 1}));
+ // Can't use the bulk API to set a collation when using legacy write ops.
+ bulk = coll.initializeUnorderedBulkOp();
+ assert.throws(function() {
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2});
+ });
- // findOneAndReplace().
+ bulk = coll.initializeOrderedBulkOp();
+ assert.throws(function() {
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2});
+ });
+} else {
+ // update().
coll.drop();
assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.eq({_id: 1, str: "foo"},
- coll.findOneAndReplace(
- {str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}}));
- assert.neq(null, coll.findOne({str: "bar"}));
+ assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ bulk = coll.initializeUnorderedBulkOp();
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).update({$set: {other: 99}});
+ writeRes = bulk.execute();
+ assert.writeOK(writeRes);
+ assert.eq(2, writeRes.nModified);
- // findOneAndUpdate().
+ // updateOne().
coll.drop();
assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.eq({_id: 1, str: "foo"},
- coll.findOneAndUpdate(
- {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}}));
- assert.neq(null, coll.findOne({other: 99}));
+ assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+ bulk = coll.initializeUnorderedBulkOp();
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).updateOne({
+ $set: {other: 99}
+ });
+ writeRes = bulk.execute();
+ assert.writeOK(writeRes);
+ assert.eq(1, writeRes.nModified);
// replaceOne().
coll.drop();
assert.writeOK(coll.insert({_id: 1, str: "foo"}));
assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- if (db.getMongo().writeMode() === "commands") {
- var res = coll.replaceOne(
- {str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}});
- assert.eq(1, res.modifiedCount);
- } else {
- assert.throws(function() {
- coll.replaceOne(
- {str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}});
- });
- }
+ bulk = coll.initializeUnorderedBulkOp();
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).replaceOne({str: "oof"});
+ writeRes = bulk.execute();
+ assert.writeOK(writeRes);
+ assert.eq(1, writeRes.nModified);
- // updateOne().
+ // replaceOne() with upsert().
coll.drop();
assert.writeOK(coll.insert({_id: 1, str: "foo"}));
assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- if (db.getMongo().writeMode() === "commands") {
- var res = coll.updateOne(
- {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
- assert.eq(1, res.modifiedCount);
- } else {
- assert.throws(function() {
- coll.updateOne(
- {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
- });
- }
+ bulk = coll.initializeUnorderedBulkOp();
+ bulk.find({str: "FOO"}).collation({locale: "en_US"}).upsert().replaceOne({str: "foo"});
+ writeRes = bulk.execute();
+ assert.writeOK(writeRes);
+ assert.eq(1, writeRes.nUpserted);
+ assert.eq(0, writeRes.nModified);
+
+ bulk = coll.initializeUnorderedBulkOp();
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).upsert().replaceOne({
+ str: "foo"
+ });
+ writeRes = bulk.execute();
+ assert.writeOK(writeRes);
+ assert.eq(0, writeRes.nUpserted);
+ assert.eq(1, writeRes.nModified);
- // updateMany().
+ // removeOne().
coll.drop();
assert.writeOK(coll.insert({_id: 1, str: "foo"}));
assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- if (db.getMongo().writeMode() === "commands") {
- var res = coll.updateMany(
- {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
- assert.eq(2, res.modifiedCount);
- } else {
- assert.throws(function() {
- coll.updateMany(
- {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
- });
- }
+ bulk = coll.initializeUnorderedBulkOp();
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).removeOne();
+ writeRes = bulk.execute();
+ assert.writeOK(writeRes);
+ assert.eq(1, writeRes.nRemoved);
- // updateOne with bulkWrite().
+ // remove().
coll.drop();
assert.writeOK(coll.insert({_id: 1, str: "foo"}));
assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- if (db.getMongo().writeMode() === "commands") {
- var res = coll.bulkWrite([{
+ bulk = coll.initializeUnorderedBulkOp();
+ bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).remove();
+ writeRes = bulk.execute();
+ assert.writeOK(writeRes);
+ assert.eq(2, writeRes.nRemoved);
+}
+
+//
+// Tests for the CRUD API.
+//
+
+// deleteOne().
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+if (db.getMongo().writeMode() === "commands") {
+ var res = coll.deleteOne({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
+ assert.eq(1, res.deletedCount);
+} else {
+ assert.throws(function() {
+ coll.deleteOne({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
+ });
+}
+
+// deleteMany().
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+if (db.getMongo().writeMode() === "commands") {
+ var res = coll.deleteMany({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
+ assert.eq(2, res.deletedCount);
+} else {
+ assert.throws(function() {
+ coll.deleteMany({str: "FOO"}, {collation: {locale: "en_US", strength: 2}});
+ });
+}
+
+// findOneAndDelete().
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.eq({_id: 1, str: "foo"},
+ coll.findOneAndDelete({str: "FOO"}, {collation: {locale: "en_US", strength: 2}}));
+assert.eq(null, coll.findOne({_id: 1}));
+
+// findOneAndReplace().
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.eq({_id: 1, str: "foo"},
+ coll.findOneAndReplace(
+ {str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}}));
+assert.neq(null, coll.findOne({str: "bar"}));
+
+// findOneAndUpdate().
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.eq({_id: 1, str: "foo"},
+ coll.findOneAndUpdate(
+ {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}}));
+assert.neq(null, coll.findOne({other: 99}));
+
+// replaceOne().
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+if (db.getMongo().writeMode() === "commands") {
+ var res =
+ coll.replaceOne({str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}});
+ assert.eq(1, res.modifiedCount);
+} else {
+ assert.throws(function() {
+ coll.replaceOne({str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}});
+ });
+}
+
+// updateOne().
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+if (db.getMongo().writeMode() === "commands") {
+ var res = coll.updateOne(
+ {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
+ assert.eq(1, res.modifiedCount);
+} else {
+ assert.throws(function() {
+ coll.updateOne(
+ {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
+ });
+}
+
+// updateMany().
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+if (db.getMongo().writeMode() === "commands") {
+ var res = coll.updateMany(
+ {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
+ assert.eq(2, res.modifiedCount);
+} else {
+ assert.throws(function() {
+ coll.updateMany(
+ {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}});
+ });
+}
+
+// updateOne with bulkWrite().
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+if (db.getMongo().writeMode() === "commands") {
+ var res = coll.bulkWrite([{
+ updateOne: {
+ filter: {str: "FOO"},
+ update: {$set: {other: 99}},
+ collation: {locale: "en_US", strength: 2}
+ }
+ }]);
+ assert.eq(1, res.matchedCount);
+} else {
+ assert.throws(function() {
+ coll.bulkWrite([{
updateOne: {
filter: {str: "FOO"},
update: {$set: {other: 99}},
collation: {locale: "en_US", strength: 2}
}
}]);
- assert.eq(1, res.matchedCount);
- } else {
- assert.throws(function() {
- coll.bulkWrite([{
- updateOne: {
- filter: {str: "FOO"},
- update: {$set: {other: 99}},
- collation: {locale: "en_US", strength: 2}
- }
- }]);
- });
- }
-
- // updateMany with bulkWrite().
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- if (db.getMongo().writeMode() === "commands") {
- var res = coll.bulkWrite([{
+ });
+}
+
+// updateMany with bulkWrite().
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+if (db.getMongo().writeMode() === "commands") {
+ var res = coll.bulkWrite([{
+ updateMany: {
+ filter: {str: "FOO"},
+ update: {$set: {other: 99}},
+ collation: {locale: "en_US", strength: 2}
+ }
+ }]);
+ assert.eq(2, res.matchedCount);
+} else {
+ assert.throws(function() {
+ coll.bulkWrite([{
updateMany: {
filter: {str: "FOO"},
update: {$set: {other: 99}},
collation: {locale: "en_US", strength: 2}
}
}]);
- assert.eq(2, res.matchedCount);
- } else {
- assert.throws(function() {
- coll.bulkWrite([{
- updateMany: {
- filter: {str: "FOO"},
- update: {$set: {other: 99}},
- collation: {locale: "en_US", strength: 2}
- }
- }]);
- });
- }
-
- // replaceOne with bulkWrite().
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- if (db.getMongo().writeMode() === "commands") {
- var res = coll.bulkWrite([{
+ });
+}
+
+// replaceOne with bulkWrite().
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+if (db.getMongo().writeMode() === "commands") {
+ var res = coll.bulkWrite([{
+ replaceOne: {
+ filter: {str: "FOO"},
+ replacement: {str: "bar"},
+ collation: {locale: "en_US", strength: 2}
+ }
+ }]);
+ assert.eq(1, res.matchedCount);
+} else {
+ assert.throws(function() {
+ coll.bulkWrite([{
replaceOne: {
filter: {str: "FOO"},
replacement: {str: "bar"},
collation: {locale: "en_US", strength: 2}
}
}]);
- assert.eq(1, res.matchedCount);
- } else {
- assert.throws(function() {
- coll.bulkWrite([{
- replaceOne: {
- filter: {str: "FOO"},
- replacement: {str: "bar"},
- collation: {locale: "en_US", strength: 2}
- }
- }]);
- });
- }
-
- // deleteOne with bulkWrite().
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- if (db.getMongo().writeMode() === "commands") {
- var res = coll.bulkWrite(
+ });
+}
+
+// deleteOne with bulkWrite().
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+if (db.getMongo().writeMode() === "commands") {
+ var res = coll.bulkWrite(
+ [{deleteOne: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]);
+ assert.eq(1, res.deletedCount);
+} else {
+ assert.throws(function() {
+ coll.bulkWrite(
[{deleteOne: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]);
- assert.eq(1, res.deletedCount);
- } else {
- assert.throws(function() {
- coll.bulkWrite(
- [{deleteOne: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]);
- });
- }
-
- // deleteMany with bulkWrite().
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "foo"}));
- if (db.getMongo().writeMode() === "commands") {
- var res = coll.bulkWrite(
+ });
+}
+
+// deleteMany with bulkWrite().
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "foo"}));
+if (db.getMongo().writeMode() === "commands") {
+ var res = coll.bulkWrite(
+ [{deleteMany: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]);
+ assert.eq(2, res.deletedCount);
+} else {
+ assert.throws(function() {
+ coll.bulkWrite(
[{deleteMany: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]);
- assert.eq(2, res.deletedCount);
- } else {
- assert.throws(function() {
- coll.bulkWrite(
- [{deleteMany: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]);
- });
- }
-
- // Two deleteOne ops with bulkWrite using different collations.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, str: "foo"}));
- assert.writeOK(coll.insert({_id: 2, str: "bar"}));
- if (db.getMongo().writeMode() === "commands") {
- var res = coll.bulkWrite([
+ });
+}
+
+// Two deleteOne ops with bulkWrite using different collations.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, str: "foo"}));
+assert.writeOK(coll.insert({_id: 2, str: "bar"}));
+if (db.getMongo().writeMode() === "commands") {
+ var res = coll.bulkWrite([
+ {deleteOne: {filter: {str: "FOO"}, collation: {locale: "fr", strength: 2}}},
+ {deleteOne: {filter: {str: "BAR"}, collation: {locale: "en_US", strength: 2}}}
+ ]);
+ assert.eq(2, res.deletedCount);
+} else {
+ assert.throws(function() {
+ coll.bulkWrite([
{deleteOne: {filter: {str: "FOO"}, collation: {locale: "fr", strength: 2}}},
{deleteOne: {filter: {str: "BAR"}, collation: {locale: "en_US", strength: 2}}}
]);
- assert.eq(2, res.deletedCount);
- } else {
- assert.throws(function() {
- coll.bulkWrite([
- {deleteOne: {filter: {str: "FOO"}, collation: {locale: "fr", strength: 2}}},
- {deleteOne: {filter: {str: "BAR"}, collation: {locale: "en_US", strength: 2}}}
- ]);
- });
- }
+ });
+}
- // applyOps.
- if (!isMongos) {
- coll.drop();
- assert.commandWorked(
- db.createCollection("collation", {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({_id: "foo", x: 5, str: "bar"}));
-
- // preCondition.q respects collection default collation.
- assert.commandFailed(db.runCommand({
- applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 6}}}],
- preCondition: [{ns: coll.getFullName(), q: {_id: "not foo"}, res: {str: "bar"}}]
- }));
- assert.eq(5, coll.findOne({_id: "foo"}).x);
- assert.commandWorked(db.runCommand({
- applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 6}}}],
- preCondition: [{ns: coll.getFullName(), q: {_id: "FOO"}, res: {str: "bar"}}]
- }));
- assert.eq(6, coll.findOne({_id: "foo"}).x);
-
- // preCondition.res respects collection default collation.
- assert.commandFailed(db.runCommand({
- applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 7}}}],
- preCondition: [{ns: coll.getFullName(), q: {_id: "foo"}, res: {str: "not bar"}}]
- }));
- assert.eq(6, coll.findOne({_id: "foo"}).x);
- assert.commandWorked(db.runCommand({
- applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 7}}}],
- preCondition: [{ns: coll.getFullName(), q: {_id: "foo"}, res: {str: "BAR"}}]
- }));
- assert.eq(7, coll.findOne({_id: "foo"}).x);
-
- // <operation>.o2 respects collection default collation.
- assert.commandWorked(db.runCommand(
- {applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "FOO"}, o: {$set: {x: 8}}}]}));
- assert.eq(8, coll.findOne({_id: "foo"}).x);
- }
+// applyOps.
+if (!isMongos) {
+ coll.drop();
+ assert.commandWorked(
+ db.createCollection("collation", {collation: {locale: "en_US", strength: 2}}));
+ assert.writeOK(coll.insert({_id: "foo", x: 5, str: "bar"}));
- // doTxn
- if (FixtureHelpers.isReplSet(db) && !isMongos && isWiredTiger(db)) {
- const session = db.getMongo().startSession();
- const sessionDb = session.getDatabase(db.getName());
-
- // Use majority write concern to clear the drop-pending that can cause lock conflicts with
- // transactions.
- coll.drop({writeConcern: {w: "majority"}});
-
- assert.commandWorked(
- db.createCollection("collation", {collation: {locale: "en_US", strength: 2}}));
- assert.writeOK(coll.insert({_id: "foo", x: 5, str: "bar"}));
-
- // preCondition.q respects collection default collation.
- assert.commandFailed(sessionDb.runCommand({
- doTxn: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 6}}}],
- preCondition: [{ns: coll.getFullName(), q: {_id: "not foo"}, res: {str: "bar"}}],
- txnNumber: NumberLong("0")
- }));
- assert.eq(5, coll.findOne({_id: "foo"}).x);
- assert.commandWorked(sessionDb.runCommand({
- doTxn: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 6}}}],
- preCondition: [{ns: coll.getFullName(), q: {_id: "FOO"}, res: {str: "bar"}}],
- txnNumber: NumberLong("1")
- }));
- assert.eq(6, coll.findOne({_id: "foo"}).x);
-
- // preCondition.res respects collection default collation.
- assert.commandFailed(sessionDb.runCommand({
- doTxn: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 7}}}],
- preCondition: [{ns: coll.getFullName(), q: {_id: "foo"}, res: {str: "not bar"}}],
- txnNumber: NumberLong("2")
- }));
- assert.eq(6, coll.findOne({_id: "foo"}).x);
- assert.commandWorked(sessionDb.runCommand({
- doTxn: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 7}}}],
- preCondition: [{ns: coll.getFullName(), q: {_id: "foo"}, res: {str: "BAR"}}],
- txnNumber: NumberLong("3")
- }));
- assert.eq(7, coll.findOne({_id: "foo"}).x);
-
- // <operation>.o2 respects collection default collation.
- assert.commandWorked(sessionDb.runCommand({
- doTxn: [{op: "u", ns: coll.getFullName(), o2: {_id: "FOO"}, o: {$set: {x: 8}}}],
- txnNumber: NumberLong("4")
- }));
- assert.eq(8, coll.findOne({_id: "foo"}).x);
- }
+ // preCondition.q respects collection default collation.
+ assert.commandFailed(db.runCommand({
+ applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 6}}}],
+ preCondition: [{ns: coll.getFullName(), q: {_id: "not foo"}, res: {str: "bar"}}]
+ }));
+ assert.eq(5, coll.findOne({_id: "foo"}).x);
+ assert.commandWorked(db.runCommand({
+ applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 6}}}],
+ preCondition: [{ns: coll.getFullName(), q: {_id: "FOO"}, res: {str: "bar"}}]
+ }));
+ assert.eq(6, coll.findOne({_id: "foo"}).x);
- // Test that the collection created with the "cloneCollectionAsCapped" command inherits the
- // default collation of the corresponding collection. We skip running this command in a sharded
- // cluster because it isn't supported by mongos.
- if (!isMongos) {
- const clonedColl = db.collation_cloned;
+ // preCondition.res respects collection default collation.
+ assert.commandFailed(db.runCommand({
+ applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 7}}}],
+ preCondition: [{ns: coll.getFullName(), q: {_id: "foo"}, res: {str: "not bar"}}]
+ }));
+ assert.eq(6, coll.findOne({_id: "foo"}).x);
+ assert.commandWorked(db.runCommand({
+ applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 7}}}],
+ preCondition: [{ns: coll.getFullName(), q: {_id: "foo"}, res: {str: "BAR"}}]
+ }));
+ assert.eq(7, coll.findOne({_id: "foo"}).x);
- coll.drop();
- clonedColl.drop();
-
- // Create a collection with a non-simple default collation.
- assert.commandWorked(
- db.runCommand({create: coll.getName(), collation: {locale: "en", strength: 2}}));
- const originalCollectionInfos = db.getCollectionInfos({name: coll.getName()});
- assert.eq(originalCollectionInfos.length, 1, tojson(originalCollectionInfos));
-
- assert.writeOK(coll.insert({_id: "FOO"}));
- assert.writeOK(coll.insert({_id: "bar"}));
- assert.eq([{_id: "FOO"}],
- coll.find({_id: "foo"}).toArray(),
- "query should have performed a case-insensitive match");
-
- var cloneCollOutput = db.runCommand({
- cloneCollectionAsCapped: coll.getName(),
- toCollection: clonedColl.getName(),
- size: 4096
- });
- if (jsTest.options().storageEngine === "mobile") {
- // Capped collections are not supported by the mobile storage engine
- assert.commandFailedWithCode(cloneCollOutput, ErrorCodes.InvalidOptions);
- } else {
- assert.commandWorked(cloneCollOutput);
- const clonedCollectionInfos = db.getCollectionInfos({name: clonedColl.getName()});
- assert.eq(clonedCollectionInfos.length, 1, tojson(clonedCollectionInfos));
- assert.eq(originalCollectionInfos[0].options.collation,
- clonedCollectionInfos[0].options.collation);
- assert.eq([{_id: "FOO"}], clonedColl.find({_id: "foo"}).toArray());
- }
- }
+ // <operation>.o2 respects collection default collation.
+ assert.commandWorked(db.runCommand(
+ {applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "FOO"}, o: {$set: {x: 8}}}]}));
+ assert.eq(8, coll.findOne({_id: "foo"}).x);
+}
- // Test that the find command's min/max options respect the collation.
- if (db.getMongo().useReadCommands()) {
- coll.drop();
- assert.writeOK(coll.insert({str: "a"}));
- assert.writeOK(coll.insert({str: "A"}));
- assert.writeOK(coll.insert({str: "b"}));
- assert.writeOK(coll.insert({str: "B"}));
- assert.writeOK(coll.insert({str: "c"}));
- assert.writeOK(coll.insert({str: "C"}));
- assert.writeOK(coll.insert({str: "d"}));
- assert.writeOK(coll.insert({str: "D"}));
-
- // This query should fail, since there is no index to support the min/max.
- let err = assert.throws(() => coll.find()
- .min({str: "b"})
- .max({str: "D"})
- .collation({locale: "en_US", strength: 2})
- .itcount());
- assert.commandFailedWithCode(err, 51173);
-
- // Even after building an index with the right key pattern, the query should fail since the
- // collations don't match.
- assert.commandWorked(coll.createIndex({str: 1}, {name: "noCollation"}));
- err = assert.throws(() => coll.find()
+// doTxn
+if (FixtureHelpers.isReplSet(db) && !isMongos && isWiredTiger(db)) {
+ const session = db.getMongo().startSession();
+ const sessionDb = session.getDatabase(db.getName());
+
+ // Use majority write concern to clear the drop-pending that can cause lock conflicts with
+ // transactions.
+ coll.drop({writeConcern: {w: "majority"}});
+
+ assert.commandWorked(
+ db.createCollection("collation", {collation: {locale: "en_US", strength: 2}}));
+ assert.writeOK(coll.insert({_id: "foo", x: 5, str: "bar"}));
+
+ // preCondition.q respects collection default collation.
+ assert.commandFailed(sessionDb.runCommand({
+ doTxn: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 6}}}],
+ preCondition: [{ns: coll.getFullName(), q: {_id: "not foo"}, res: {str: "bar"}}],
+ txnNumber: NumberLong("0")
+ }));
+ assert.eq(5, coll.findOne({_id: "foo"}).x);
+ assert.commandWorked(sessionDb.runCommand({
+ doTxn: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 6}}}],
+ preCondition: [{ns: coll.getFullName(), q: {_id: "FOO"}, res: {str: "bar"}}],
+ txnNumber: NumberLong("1")
+ }));
+ assert.eq(6, coll.findOne({_id: "foo"}).x);
+
+ // preCondition.res respects collection default collation.
+ assert.commandFailed(sessionDb.runCommand({
+ doTxn: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 7}}}],
+ preCondition: [{ns: coll.getFullName(), q: {_id: "foo"}, res: {str: "not bar"}}],
+ txnNumber: NumberLong("2")
+ }));
+ assert.eq(6, coll.findOne({_id: "foo"}).x);
+ assert.commandWorked(sessionDb.runCommand({
+ doTxn: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 7}}}],
+ preCondition: [{ns: coll.getFullName(), q: {_id: "foo"}, res: {str: "BAR"}}],
+ txnNumber: NumberLong("3")
+ }));
+ assert.eq(7, coll.findOne({_id: "foo"}).x);
+
+ // <operation>.o2 respects collection default collation.
+ assert.commandWorked(sessionDb.runCommand({
+ doTxn: [{op: "u", ns: coll.getFullName(), o2: {_id: "FOO"}, o: {$set: {x: 8}}}],
+ txnNumber: NumberLong("4")
+ }));
+ assert.eq(8, coll.findOne({_id: "foo"}).x);
+}
+
+// Test that the collection created with the "cloneCollectionAsCapped" command inherits the
+// default collation of the corresponding collection. We skip running this command in a sharded
+// cluster because it isn't supported by mongos.
+if (!isMongos) {
+ const clonedColl = db.collation_cloned;
+
+ coll.drop();
+ clonedColl.drop();
+
+ // Create a collection with a non-simple default collation.
+ assert.commandWorked(
+ db.runCommand({create: coll.getName(), collation: {locale: "en", strength: 2}}));
+ const originalCollectionInfos = db.getCollectionInfos({name: coll.getName()});
+ assert.eq(originalCollectionInfos.length, 1, tojson(originalCollectionInfos));
+
+ assert.writeOK(coll.insert({_id: "FOO"}));
+ assert.writeOK(coll.insert({_id: "bar"}));
+ assert.eq([{_id: "FOO"}],
+ coll.find({_id: "foo"}).toArray(),
+ "query should have performed a case-insensitive match");
+
+ var cloneCollOutput = db.runCommand(
+ {cloneCollectionAsCapped: coll.getName(), toCollection: clonedColl.getName(), size: 4096});
+ if (jsTest.options().storageEngine === "mobile") {
+ // Capped collections are not supported by the mobile storage engine
+ assert.commandFailedWithCode(cloneCollOutput, ErrorCodes.InvalidOptions);
+ } else {
+ assert.commandWorked(cloneCollOutput);
+ const clonedCollectionInfos = db.getCollectionInfos({name: clonedColl.getName()});
+ assert.eq(clonedCollectionInfos.length, 1, tojson(clonedCollectionInfos));
+ assert.eq(originalCollectionInfos[0].options.collation,
+ clonedCollectionInfos[0].options.collation);
+ assert.eq([{_id: "FOO"}], clonedColl.find({_id: "foo"}).toArray());
+ }
+}
+
+// Test that the find command's min/max options respect the collation.
+if (db.getMongo().useReadCommands()) {
+ coll.drop();
+ assert.writeOK(coll.insert({str: "a"}));
+ assert.writeOK(coll.insert({str: "A"}));
+ assert.writeOK(coll.insert({str: "b"}));
+ assert.writeOK(coll.insert({str: "B"}));
+ assert.writeOK(coll.insert({str: "c"}));
+ assert.writeOK(coll.insert({str: "C"}));
+ assert.writeOK(coll.insert({str: "d"}));
+ assert.writeOK(coll.insert({str: "D"}));
+
+ // This query should fail, since there is no index to support the min/max.
+ let err = assert.throws(() => coll.find()
.min({str: "b"})
.max({str: "D"})
.collation({locale: "en_US", strength: 2})
- .hint({str: 1})
.itcount());
- assert.commandFailedWithCode(err, 51174);
-
- // After building an index with the case-insensitive US English collation, the query should
- // work. Furthermore, the bounds defined by the min and max should respect the
- // case-insensitive collation.
- assert.commandWorked(coll.createIndex(
- {str: 1}, {name: "withCollation", collation: {locale: "en_US", strength: 2}}));
- assert.eq(4,
- coll.find()
- .min({str: "b"})
- .max({str: "D"})
- .collation({locale: "en_US", strength: 2})
- .hint("withCollation")
- .itcount());
+ assert.commandFailedWithCode(err, 51173);
+
+ // Even after building an index with the right key pattern, the query should fail since the
+ // collations don't match.
+ assert.commandWorked(coll.createIndex({str: 1}, {name: "noCollation"}));
+ err = assert.throws(() => coll.find()
+ .min({str: "b"})
+ .max({str: "D"})
+ .collation({locale: "en_US", strength: 2})
+ .hint({str: 1})
+ .itcount());
+ assert.commandFailedWithCode(err, 51174);
+
+ // After building an index with the case-insensitive US English collation, the query should
+ // work. Furthermore, the bounds defined by the min and max should respect the
+ // case-insensitive collation.
+ assert.commandWorked(coll.createIndex(
+ {str: 1}, {name: "withCollation", collation: {locale: "en_US", strength: 2}}));
+ assert.eq(4,
+ coll.find()
+ .min({str: "b"})
+ .max({str: "D"})
+ .collation({locale: "en_US", strength: 2})
+ .hint("withCollation")
+ .itcount());
- // Ensure results from index with min/max query are sorted to match requested collation.
- coll.drop();
- assert.commandWorked(coll.ensureIndex({a: 1, b: 1}));
- assert.writeOK(coll.insert(
- [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "A"}, {a: 1, b: "a"}, {a: 2, b: 2}]));
- var expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "a"}, {a: 1, b: "A"}, {a: 2, b: 2}];
- res = coll.find({}, {_id: 0})
- .hint({a: 1, b: 1})
- .min({a: 1, b: 1})
- .max({a: 2, b: 3})
- .collation({locale: "en_US", strength: 3})
- .sort({a: 1, b: 1});
- assert.eq(res.toArray(), expected);
- res = coll.find({}, {_id: 0})
- .hint({a: 1, b: 1})
- .min({a: 1, b: 1})
- .collation({locale: "en_US", strength: 3})
- .sort({a: 1, b: 1});
- assert.eq(res.toArray(), expected);
- res = coll.find({}, {_id: 0})
- .hint({a: 1, b: 1})
- .max({a: 2, b: 3})
- .collation({locale: "en_US", strength: 3})
- .sort({a: 1, b: 1});
- assert.eq(res.toArray(), expected);
-
- // A min/max query that can use an index whose collation doesn't match should require a sort
- // stage if there are any in-bounds strings. Verify this using explain.
- explainRes = coll.find({}, {_id: 0})
- .hint({a: 1, b: 1})
- .max({a: 2, b: 3})
- .collation({locale: "en_US", strength: 3})
- .sort({a: 1, b: 1})
- .explain();
- assert.commandWorked(explainRes);
- assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "SORT"));
-
- // This query should fail since min has a string as one of it's boundaries, and the
- // collation doesn't match that of the index.
- assert.throws(() => coll.find({}, {_id: 0})
- .hint({a: 1, b: 1})
- .min({a: 1, b: "A"})
- .max({a: 2, b: 1})
- .collation({locale: "en_US", strength: 3})
- .sort({a: 1, b: 1})
- .itcount());
- }
+ // Ensure results from index with min/max query are sorted to match requested collation.
+ coll.drop();
+ assert.commandWorked(coll.ensureIndex({a: 1, b: 1}));
+ assert.writeOK(
+ coll.insert([{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "A"}, {a: 1, b: "a"}, {a: 2, b: 2}]));
+ var expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "a"}, {a: 1, b: "A"}, {a: 2, b: 2}];
+ res = coll.find({}, {_id: 0})
+ .hint({a: 1, b: 1})
+ .min({a: 1, b: 1})
+ .max({a: 2, b: 3})
+ .collation({locale: "en_US", strength: 3})
+ .sort({a: 1, b: 1});
+ assert.eq(res.toArray(), expected);
+ res = coll.find({}, {_id: 0})
+ .hint({a: 1, b: 1})
+ .min({a: 1, b: 1})
+ .collation({locale: "en_US", strength: 3})
+ .sort({a: 1, b: 1});
+ assert.eq(res.toArray(), expected);
+ res = coll.find({}, {_id: 0})
+ .hint({a: 1, b: 1})
+ .max({a: 2, b: 3})
+ .collation({locale: "en_US", strength: 3})
+ .sort({a: 1, b: 1});
+ assert.eq(res.toArray(), expected);
+
+ // A min/max query that can use an index whose collation doesn't match should require a sort
+ // stage if there are any in-bounds strings. Verify this using explain.
+ explainRes = coll.find({}, {_id: 0})
+ .hint({a: 1, b: 1})
+ .max({a: 2, b: 3})
+ .collation({locale: "en_US", strength: 3})
+ .sort({a: 1, b: 1})
+ .explain();
+ assert.commandWorked(explainRes);
+ assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "SORT"));
+
+ // This query should fail since min has a string as one of it's boundaries, and the
+ // collation doesn't match that of the index.
+ assert.throws(() => coll.find({}, {_id: 0})
+ .hint({a: 1, b: 1})
+ .min({a: 1, b: "A"})
+ .max({a: 2, b: 1})
+ .collation({locale: "en_US", strength: 3})
+ .sort({a: 1, b: 1})
+ .itcount());
+}
})();
diff --git a/jstests/core/collation_convert_to_capped.js b/jstests/core/collation_convert_to_capped.js
index e1f79bafb76..237156e86d7 100644
--- a/jstests/core/collation_convert_to_capped.js
+++ b/jstests/core/collation_convert_to_capped.js
@@ -9,28 +9,27 @@
*/
(function() {
- "use strict";
+"use strict";
- let testDb = db.getSiblingDB("collation_convert_to_capped");
- let coll = testDb.coll;
- testDb.dropDatabase();
+let testDb = db.getSiblingDB("collation_convert_to_capped");
+let coll = testDb.coll;
+testDb.dropDatabase();
- // Create a collection with a non-simple default collation.
- assert.commandWorked(
- testDb.runCommand({create: coll.getName(), collation: {locale: "en", strength: 2}}));
- const originalCollectionInfos = testDb.getCollectionInfos({name: coll.getName()});
- assert.eq(originalCollectionInfos.length, 1, tojson(originalCollectionInfos));
+// Create a collection with a non-simple default collation.
+assert.commandWorked(
+ testDb.runCommand({create: coll.getName(), collation: {locale: "en", strength: 2}}));
+const originalCollectionInfos = testDb.getCollectionInfos({name: coll.getName()});
+assert.eq(originalCollectionInfos.length, 1, tojson(originalCollectionInfos));
- assert.writeOK(coll.insert({_id: "FOO"}));
- assert.writeOK(coll.insert({_id: "bar"}));
- assert.eq([{_id: "FOO"}],
- coll.find({_id: "foo"}).toArray(),
- "query should have performed a case-insensitive match");
+assert.writeOK(coll.insert({_id: "FOO"}));
+assert.writeOK(coll.insert({_id: "bar"}));
+assert.eq([{_id: "FOO"}],
+ coll.find({_id: "foo"}).toArray(),
+ "query should have performed a case-insensitive match");
- assert.commandWorked(testDb.runCommand({convertToCapped: coll.getName(), size: 4096}));
- const cappedCollectionInfos = testDb.getCollectionInfos({name: coll.getName()});
- assert.eq(cappedCollectionInfos.length, 1, tojson(cappedCollectionInfos));
- assert.eq(originalCollectionInfos[0].options.collation,
- cappedCollectionInfos[0].options.collation);
- assert.eq([{_id: "FOO"}], coll.find({_id: "foo"}).toArray());
+assert.commandWorked(testDb.runCommand({convertToCapped: coll.getName(), size: 4096}));
+const cappedCollectionInfos = testDb.getCollectionInfos({name: coll.getName()});
+assert.eq(cappedCollectionInfos.length, 1, tojson(cappedCollectionInfos));
+assert.eq(originalCollectionInfos[0].options.collation, cappedCollectionInfos[0].options.collation);
+assert.eq([{_id: "FOO"}], coll.find({_id: "foo"}).toArray());
})();
diff --git a/jstests/core/collation_find_and_modify.js b/jstests/core/collation_find_and_modify.js
index ea9b355516f..6c0fd704dcc 100644
--- a/jstests/core/collation_find_and_modify.js
+++ b/jstests/core/collation_find_and_modify.js
@@ -4,84 +4,90 @@
// Integration tests for collation-aware findAndModify.
(function() {
- 'use strict';
- var coll = db.getCollection("find_and_modify_update_test");
+'use strict';
+var coll = db.getCollection("find_and_modify_update_test");
- const caseInsensitive = {locale: "en_US", strength: 2};
- const caseSensitive = {locale: "en_US", strength: 3};
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
+const caseSensitive = {
+ locale: "en_US",
+ strength: 3
+};
- // We restrict testing pipeline-style update to commands as they are not supported for OP_UPDATE
- // which cannot differentiate an update object from an array.
- if (db.getMongo().writeMode() === "commands") {
- //
- // Pipeline-style update respects collection default collation.
- //
+// We restrict testing pipeline-style update to commands as they are not supported for OP_UPDATE
+// which cannot differentiate an update object from an array.
+if (db.getMongo().writeMode() === "commands") {
+ //
+ // Pipeline-style update respects collection default collation.
+ //
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {collation: caseInsensitive}));
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- let doc = coll.findAndModify(
- {update: [{$set: {newField: {$indexOfArray: ["$x", "B"]}}}], new: true});
- assert.eq(doc.newField, 3, doc);
+ coll.drop();
+ assert.commandWorked(db.createCollection(coll.getName(), {collation: caseInsensitive}));
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ let doc =
+ coll.findAndModify({update: [{$set: {newField: {$indexOfArray: ["$x", "B"]}}}], new: true});
+ assert.eq(doc.newField, 3, doc);
- //
- // Pipeline-style findAndModify respects query collation.
- //
+ //
+ // Pipeline-style findAndModify respects query collation.
+ //
- // Case sensitive $indexOfArray on "B" matches "B".
- assert(coll.drop());
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- doc = coll.findAndModify({
- update: [{$set: {newField: {$indexOfArray: ["$x", "B"]}}}],
- collation: caseSensitive,
- new: true
- });
- assert.eq(doc.newField, 5, doc);
+ // Case sensitive $indexOfArray on "B" matches "B".
+ assert(coll.drop());
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ doc = coll.findAndModify({
+ update: [{$set: {newField: {$indexOfArray: ["$x", "B"]}}}],
+ collation: caseSensitive,
+ new: true
+ });
+ assert.eq(doc.newField, 5, doc);
- assert(coll.drop());
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- doc = coll.findAndModify({
- update: [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}],
- collation: caseSensitive,
- new: true
- });
- assert.eq(doc.newField, 5, doc);
+ assert(coll.drop());
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ doc = coll.findAndModify({
+ update: [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}],
+ collation: caseSensitive,
+ new: true
+ });
+ assert.eq(doc.newField, 5, doc);
- assert(coll.drop());
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- doc = coll.findAndModify({
- update: [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}],
- collation: caseSensitive,
- new: true
- });
- assert.eq(doc.newField, 5, doc);
+ assert(coll.drop());
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ doc = coll.findAndModify({
+ update: [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}],
+ collation: caseSensitive,
+ new: true
+ });
+ assert.eq(doc.newField, 5, doc);
- // Case insensitive $indexOfArray on "B" matches "b".
- assert(coll.drop());
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- doc = coll.findAndModify({
- update: [{$set: {newField: {$indexOfArray: ["$x", "B"]}}}],
- collation: caseInsensitive,
- new: true
- });
- assert.eq(doc.newField, 3, doc);
+ // Case insensitive $indexOfArray on "B" matches "b".
+ assert(coll.drop());
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ doc = coll.findAndModify({
+ update: [{$set: {newField: {$indexOfArray: ["$x", "B"]}}}],
+ collation: caseInsensitive,
+ new: true
+ });
+ assert.eq(doc.newField, 3, doc);
- assert(coll.drop());
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- doc = coll.findAndModify({
- update: [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}],
- collation: caseInsensitive,
- new: true
- });
- assert.eq(doc.newField, 3, doc);
+ assert(coll.drop());
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ doc = coll.findAndModify({
+ update: [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}],
+ collation: caseInsensitive,
+ new: true
+ });
+ assert.eq(doc.newField, 3, doc);
- assert(coll.drop());
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- doc = coll.findAndModify({
- update: [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}],
- collation: caseInsensitive,
- new: true
- });
- assert.eq(doc.newField, 3, doc);
- }
+ assert(coll.drop());
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ doc = coll.findAndModify({
+ update: [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}],
+ collation: caseInsensitive,
+ new: true
+ });
+ assert.eq(doc.newField, 3, doc);
+}
})();
diff --git a/jstests/core/collation_plan_cache.js b/jstests/core/collation_plan_cache.js
index 70b63acd33b..07507938cc3 100644
--- a/jstests/core/collation_plan_cache.js
+++ b/jstests/core/collation_plan_cache.js
@@ -8,244 +8,236 @@
// does_not_support_stepdowns,
// ]
(function() {
- 'use strict';
-
- var coll = db.collation_plan_cache;
- coll.drop();
-
- assert.writeOK(coll.insert({a: 'foo', b: 5}));
-
- // We need two indexes that each query can use so that a plan cache entry is created.
- assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: 'en_US'}}));
- assert.commandWorked(coll.createIndex({a: 1, b: 1}, {collation: {locale: 'en_US'}}));
-
- // We need an index with a different collation, so that string comparisons affect the query
- // shape.
- assert.commandWorked(coll.createIndex({b: 1}, {collation: {locale: 'fr_CA'}}));
-
- // listQueryShapes().
-
- // Run a query so that an entry is inserted into the cache.
- assert.commandWorked(
- coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}),
- 'find command failed');
-
- // The query shape should have been added.
- var shapes = coll.getPlanCache().listQueryShapes();
- assert.eq(1, shapes.length, 'unexpected cache size after running query');
- let filteredShape0 = shapes[0];
- delete filteredShape0.queryHash;
- assert.eq(filteredShape0,
- {
- query: {a: 'foo', b: 5},
- sort: {},
- projection: {},
- collation: {
- locale: 'en_US',
- caseLevel: false,
- caseFirst: 'off',
- strength: 3,
- numericOrdering: false,
- alternate: 'non-ignorable',
- maxVariable: 'punct',
- normalization: false,
- backwards: false,
- version: '57.1'
- }
- },
- 'unexpected query shape returned from listQueryShapes()');
-
- coll.getPlanCache().clear();
-
- // getPlansByQuery().
-
- // Passing a query with an empty collation object should throw.
- assert.throws(function() {
- coll.getPlanCache().getPlansByQuery(
- {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {}});
- }, [], 'empty collation object should throw');
-
- // Passing a query with an invalid collation object should throw.
- assert.throws(function() {
- coll.getPlanCache().getPlansByQuery(
- {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {bad: "value"}});
- }, [], 'invalid collation object should throw');
-
- // Run a query so that an entry is inserted into the cache.
- assert.commandWorked(
- coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}),
- 'find command failed');
-
- // The query should have cached plans.
- assert.lt(
- 0,
- coll.getPlanCache()
- .getPlansByQuery(
- {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'en_US'}})
- .plans.length,
- 'unexpected number of cached plans for query');
-
- // Test passing the query, sort, projection, and collation to getPlansByQuery() as separate
- // arguments.
- assert.lt(0,
- coll.getPlanCache()
- .getPlansByQuery({a: 'foo', b: 5}, {}, {}, {locale: 'en_US'})
- .plans.length,
- 'unexpected number of cached plans for query');
-
- // Test passing the query, sort, projection, and collation to getPlansByQuery() as separate
- // arguments.
- assert.eq(0,
- coll.getPlanCache().getPlansByQuery({a: 'foo', b: 5}).plans.length,
- 'unexpected number of cached plans for query');
-
- // A query with a different collation should have no cached plans.
- assert.eq(
- 0,
- coll.getPlanCache()
- .getPlansByQuery(
- {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'fr_CA'}})
- .plans.length,
- 'unexpected number of cached plans for query');
-
- // A query with different string locations should have no cached plans.
- assert.eq(0,
- coll.getPlanCache()
- .getPlansByQuery({
- query: {a: 'foo', b: 'bar'},
- sort: {},
- projection: {},
- collation: {locale: 'en_US'}
- })
- .plans.length,
- 'unexpected number of cached plans for query');
-
- coll.getPlanCache().clear();
-
- // clearPlansByQuery().
-
- // Passing a query with an empty collation object should throw.
- assert.throws(function() {
- coll.getPlanCache().clearPlansByQuery(
- {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {}});
- }, [], 'empty collation object should throw');
-
- // Passing a query with an invalid collation object should throw.
- assert.throws(function() {
- coll.getPlanCache().clearPlansByQuery(
- {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {bad: "value"}});
- }, [], 'invalid collation object should throw');
-
- // Run a query so that an entry is inserted into the cache.
- assert.commandWorked(
- coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}),
- 'find command failed');
- assert.eq(1,
- coll.getPlanCache().listQueryShapes().length,
- 'unexpected cache size after running query');
-
- // Dropping a query shape with a different collation should have no effect.
- coll.getPlanCache().clearPlansByQuery(
- {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'fr_CA'}});
- assert.eq(1,
- coll.getPlanCache().listQueryShapes().length,
- 'unexpected cache size after dropping uncached query shape');
-
- // Dropping a query shape with different string locations should have no effect.
+'use strict';
+
+var coll = db.collation_plan_cache;
+coll.drop();
+
+assert.writeOK(coll.insert({a: 'foo', b: 5}));
+
+// We need two indexes that each query can use so that a plan cache entry is created.
+assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: 'en_US'}}));
+assert.commandWorked(coll.createIndex({a: 1, b: 1}, {collation: {locale: 'en_US'}}));
+
+// We need an index with a different collation, so that string comparisons affect the query
+// shape.
+assert.commandWorked(coll.createIndex({b: 1}, {collation: {locale: 'fr_CA'}}));
+
+// listQueryShapes().
+
+// Run a query so that an entry is inserted into the cache.
+assert.commandWorked(
+ coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}),
+ 'find command failed');
+
+// The query shape should have been added.
+var shapes = coll.getPlanCache().listQueryShapes();
+assert.eq(1, shapes.length, 'unexpected cache size after running query');
+let filteredShape0 = shapes[0];
+delete filteredShape0.queryHash;
+assert.eq(filteredShape0,
+ {
+ query: {a: 'foo', b: 5},
+ sort: {},
+ projection: {},
+ collation: {
+ locale: 'en_US',
+ caseLevel: false,
+ caseFirst: 'off',
+ strength: 3,
+ numericOrdering: false,
+ alternate: 'non-ignorable',
+ maxVariable: 'punct',
+ normalization: false,
+ backwards: false,
+ version: '57.1'
+ }
+ },
+ 'unexpected query shape returned from listQueryShapes()');
+
+coll.getPlanCache().clear();
+
+// getPlansByQuery().
+
+// Passing a query with an empty collation object should throw.
+assert.throws(function() {
+ coll.getPlanCache().getPlansByQuery(
+ {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {}});
+}, [], 'empty collation object should throw');
+
+// Passing a query with an invalid collation object should throw.
+assert.throws(function() {
+ coll.getPlanCache().getPlansByQuery(
+ {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {bad: "value"}});
+}, [], 'invalid collation object should throw');
+
+// Run a query so that an entry is inserted into the cache.
+assert.commandWorked(
+ coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}),
+ 'find command failed');
+
+// The query should have cached plans.
+assert.lt(0,
+ coll.getPlanCache()
+ .getPlansByQuery(
+ {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'en_US'}})
+ .plans.length,
+ 'unexpected number of cached plans for query');
+
+// Test passing the query, sort, projection, and collation to getPlansByQuery() as separate
+// arguments.
+assert.lt(
+ 0,
+ coll.getPlanCache().getPlansByQuery({a: 'foo', b: 5}, {}, {}, {locale: 'en_US'}).plans.length,
+ 'unexpected number of cached plans for query');
+
+// Test passing the query, sort, projection, and collation to getPlansByQuery() as separate
+// arguments.
+assert.eq(0,
+ coll.getPlanCache().getPlansByQuery({a: 'foo', b: 5}).plans.length,
+ 'unexpected number of cached plans for query');
+
+// A query with a different collation should have no cached plans.
+assert.eq(0,
+ coll.getPlanCache()
+ .getPlansByQuery(
+ {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'fr_CA'}})
+ .plans.length,
+ 'unexpected number of cached plans for query');
+
+// A query with different string locations should have no cached plans.
+assert.eq(
+ 0,
+ coll.getPlanCache()
+ .getPlansByQuery(
+ {query: {a: 'foo', b: 'bar'}, sort: {}, projection: {}, collation: {locale: 'en_US'}})
+ .plans.length,
+ 'unexpected number of cached plans for query');
+
+coll.getPlanCache().clear();
+
+// clearPlansByQuery().
+
+// Passing a query with an empty collation object should throw.
+assert.throws(function() {
coll.getPlanCache().clearPlansByQuery(
- {query: {a: 'foo', b: 'bar'}, sort: {}, projection: {}, collation: {locale: 'en_US'}});
- assert.eq(1,
- coll.getPlanCache().listQueryShapes().length,
- 'unexpected cache size after dropping uncached query shape');
+ {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {}});
+}, [], 'empty collation object should throw');
- // Dropping query shape.
+// Passing a query with an invalid collation object should throw.
+assert.throws(function() {
coll.getPlanCache().clearPlansByQuery(
- {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'en_US'}});
- assert.eq(0,
- coll.getPlanCache().listQueryShapes().length,
- 'unexpected cache size after dropping query shapes');
-
- // Index filter commands.
-
- // planCacheSetFilter should fail if 'collation' is an empty object.
- assert.commandFailed(
- coll.runCommand('planCacheSetFilter',
- {query: {a: 'foo', b: 5}, collation: {}, indexes: [{a: 1, b: 1}]}),
- 'planCacheSetFilter should fail on empty collation object');
-
- // planCacheSetFilter should fail if 'collation' is an invalid object.
- assert.commandFailed(
- coll.runCommand(
- 'planCacheSetFilter',
- {query: {a: 'foo', b: 5}, collation: {bad: "value"}, indexes: [{a: 1, b: 1}]}),
- 'planCacheSetFilter should fail on invalid collation object');
-
- // Set a plan cache filter.
- assert.commandWorked(
- coll.runCommand(
- 'planCacheSetFilter',
- {query: {a: 'foo', b: 5}, collation: {locale: 'en_US'}, indexes: [{a: 1, b: 1}]}),
- 'planCacheSetFilter failed');
-
- // Check the plan cache filter was added.
- var res = coll.runCommand('planCacheListFilters');
- assert.commandWorked(res, 'planCacheListFilters failed');
- assert.eq(1, res.filters.length, 'unexpected number of plan cache filters');
- assert.eq(res.filters[0],
- {
- query: {a: 'foo', b: 5},
- sort: {},
- projection: {},
- collation: {
- locale: 'en_US',
- caseLevel: false,
- caseFirst: 'off',
- strength: 3,
- numericOrdering: false,
- alternate: 'non-ignorable',
- maxVariable: 'punct',
- normalization: false,
- backwards: false,
- version: '57.1'
- },
- indexes: [{a: 1, b: 1}]
+ {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {bad: "value"}});
+}, [], 'invalid collation object should throw');
+
+// Run a query so that an entry is inserted into the cache.
+assert.commandWorked(
+ coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}),
+ 'find command failed');
+assert.eq(
+ 1, coll.getPlanCache().listQueryShapes().length, 'unexpected cache size after running query');
+
+// Dropping a query shape with a different collation should have no effect.
+coll.getPlanCache().clearPlansByQuery(
+ {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'fr_CA'}});
+assert.eq(1,
+ coll.getPlanCache().listQueryShapes().length,
+ 'unexpected cache size after dropping uncached query shape');
+
+// Dropping a query shape with different string locations should have no effect.
+coll.getPlanCache().clearPlansByQuery(
+ {query: {a: 'foo', b: 'bar'}, sort: {}, projection: {}, collation: {locale: 'en_US'}});
+assert.eq(1,
+ coll.getPlanCache().listQueryShapes().length,
+ 'unexpected cache size after dropping uncached query shape');
+
+// Dropping query shape.
+coll.getPlanCache().clearPlansByQuery(
+ {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'en_US'}});
+assert.eq(0,
+ coll.getPlanCache().listQueryShapes().length,
+ 'unexpected cache size after dropping query shapes');
+
+// Index filter commands.
+
+// planCacheSetFilter should fail if 'collation' is an empty object.
+assert.commandFailed(
+ coll.runCommand('planCacheSetFilter',
+ {query: {a: 'foo', b: 5}, collation: {}, indexes: [{a: 1, b: 1}]}),
+ 'planCacheSetFilter should fail on empty collation object');
+
+// planCacheSetFilter should fail if 'collation' is an invalid object.
+assert.commandFailed(
+ coll.runCommand('planCacheSetFilter',
+ {query: {a: 'foo', b: 5}, collation: {bad: "value"}, indexes: [{a: 1, b: 1}]}),
+ 'planCacheSetFilter should fail on invalid collation object');
+
+// Set a plan cache filter.
+assert.commandWorked(
+ coll.runCommand(
+ 'planCacheSetFilter',
+ {query: {a: 'foo', b: 5}, collation: {locale: 'en_US'}, indexes: [{a: 1, b: 1}]}),
+ 'planCacheSetFilter failed');
+
+// Check the plan cache filter was added.
+var res = coll.runCommand('planCacheListFilters');
+assert.commandWorked(res, 'planCacheListFilters failed');
+assert.eq(1, res.filters.length, 'unexpected number of plan cache filters');
+assert.eq(res.filters[0],
+ {
+ query: {a: 'foo', b: 5},
+ sort: {},
+ projection: {},
+ collation: {
+ locale: 'en_US',
+ caseLevel: false,
+ caseFirst: 'off',
+ strength: 3,
+ numericOrdering: false,
+ alternate: 'non-ignorable',
+ maxVariable: 'punct',
+ normalization: false,
+ backwards: false,
+ version: '57.1'
},
- 'unexpected plan cache filter');
-
- // planCacheClearFilters should fail if 'collation' is an empty object.
- assert.commandFailed(
- coll.runCommand('planCacheClearFilters', {query: {a: 'foo', b: 5}, collation: {}}),
- 'planCacheClearFilters should fail on empty collation object');
-
- // planCacheSetFilter should fail if 'collation' is an invalid object.
- assert.commandFailed(coll.runCommand('planCacheClearFilters',
- {query: {a: 'foo', b: 5}, collation: {bad: 'value'}}),
- 'planCacheClearFilters should fail on invalid collation object');
-
- // Clearing a plan cache filter with no collation should have no effect.
- assert.commandWorked(coll.runCommand('planCacheClearFilters', {query: {a: 'foo', b: 5}}));
- assert.eq(1,
- coll.runCommand('planCacheListFilters').filters.length,
- 'unexpected number of plan cache filters');
-
- // Clearing a plan cache filter with a different collation should have no effect.
- assert.commandWorked(coll.runCommand('planCacheClearFilters',
- {query: {a: 'foo', b: 5}, collation: {locale: 'fr_CA'}}));
- assert.eq(1,
- coll.runCommand('planCacheListFilters').filters.length,
- 'unexpected number of plan cache filters');
-
- // Clearing a plan cache filter with different string locations should have no effect.
- assert.commandWorked(coll.runCommand(
- 'planCacheClearFilters', {query: {a: 'foo', b: 'bar', collation: {locale: 'en_US'}}}));
- assert.eq(1,
- coll.runCommand('planCacheListFilters').filters.length,
- 'unexpected number of plan cache filters');
-
- // Clear plan cache filter.
- assert.commandWorked(coll.runCommand('planCacheClearFilters',
- {query: {a: 'foo', b: 5}, collation: {locale: 'en_US'}}));
- assert.eq(0,
- coll.runCommand('planCacheListFilters').filters.length,
- 'unexpected number of plan cache filters');
+ indexes: [{a: 1, b: 1}]
+ },
+ 'unexpected plan cache filter');
+
+// planCacheClearFilters should fail if 'collation' is an empty object.
+assert.commandFailed(
+ coll.runCommand('planCacheClearFilters', {query: {a: 'foo', b: 5}, collation: {}}),
+ 'planCacheClearFilters should fail on empty collation object');
+
+// planCacheSetFilter should fail if 'collation' is an invalid object.
+assert.commandFailed(
+ coll.runCommand('planCacheClearFilters', {query: {a: 'foo', b: 5}, collation: {bad: 'value'}}),
+ 'planCacheClearFilters should fail on invalid collation object');
+
+// Clearing a plan cache filter with no collation should have no effect.
+assert.commandWorked(coll.runCommand('planCacheClearFilters', {query: {a: 'foo', b: 5}}));
+assert.eq(1,
+ coll.runCommand('planCacheListFilters').filters.length,
+ 'unexpected number of plan cache filters');
+
+// Clearing a plan cache filter with a different collation should have no effect.
+assert.commandWorked(coll.runCommand('planCacheClearFilters',
+ {query: {a: 'foo', b: 5}, collation: {locale: 'fr_CA'}}));
+assert.eq(1,
+ coll.runCommand('planCacheListFilters').filters.length,
+ 'unexpected number of plan cache filters');
+
+// Clearing a plan cache filter with different string locations should have no effect.
+assert.commandWorked(coll.runCommand('planCacheClearFilters',
+ {query: {a: 'foo', b: 'bar', collation: {locale: 'en_US'}}}));
+assert.eq(1,
+ coll.runCommand('planCacheListFilters').filters.length,
+ 'unexpected number of plan cache filters');
+
+// Clear plan cache filter.
+assert.commandWorked(coll.runCommand('planCacheClearFilters',
+ {query: {a: 'foo', b: 5}, collation: {locale: 'en_US'}}));
+assert.eq(0,
+ coll.runCommand('planCacheListFilters').filters.length,
+ 'unexpected number of plan cache filters');
})();
diff --git a/jstests/core/collation_update.js b/jstests/core/collation_update.js
index 82098e51cd3..32538e15653 100644
--- a/jstests/core/collation_update.js
+++ b/jstests/core/collation_update.js
@@ -4,320 +4,325 @@
// Integration tests for collation-aware updates.
(function() {
- 'use strict';
- var coll = db.getCollection("collation_update_test");
-
- const caseInsensitive = {collation: {locale: "en_US", strength: 2}};
- const caseSensitive = {collation: {locale: "en_US", strength: 3}};
- const numericOrdering = {collation: {locale: "en_US", numericOrdering: true}};
-
- // Update modifiers respect collection default collation on simple _id query.
+'use strict';
+var coll = db.getCollection("collation_update_test");
+
+const caseInsensitive = {
+ collation: {locale: "en_US", strength: 2}
+};
+const caseSensitive = {
+ collation: {locale: "en_US", strength: 3}
+};
+const numericOrdering = {
+ collation: {locale: "en_US", numericOrdering: true}
+};
+
+// Update modifiers respect collection default collation on simple _id query.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
+assert.writeOK(coll.insert({_id: 1, a: "124"}));
+assert.writeOK(coll.update({_id: 1}, {$min: {a: "1234"}}));
+assert.eq(coll.find({a: "124"}).count(), 1);
+
+// $min respects query collation.
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
- assert.writeOK(coll.insert({_id: 1, a: "124"}));
- assert.writeOK(coll.update({_id: 1}, {$min: {a: "1234"}}));
- assert.eq(coll.find({a: "124"}).count(), 1);
-
- // $min respects query collation.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
-
- // 1234 > 124, so no change should occur.
- assert.writeOK(coll.insert({a: "124"}));
- assert.writeOK(coll.update({a: "124"}, {$min: {a: "1234"}}, numericOrdering));
- assert.eq(coll.find({a: "124"}).count(), 1);
-
- // "1234" < "124" (non-numeric ordering), so an update should occur.
- assert.writeOK(coll.update({a: "124"}, {$min: {a: "1234"}}, caseSensitive));
- assert.eq(coll.find({a: "1234"}).count(), 1);
- }
- // $min respects collection default collation.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
+ // 1234 > 124, so no change should occur.
assert.writeOK(coll.insert({a: "124"}));
- assert.writeOK(coll.update({a: "124"}, {$min: {a: "1234"}}));
+ assert.writeOK(coll.update({a: "124"}, {$min: {a: "1234"}}, numericOrdering));
assert.eq(coll.find({a: "124"}).count(), 1);
- // $max respects query collation.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
-
- // "1234" < "124", so an update should not occur.
- assert.writeOK(coll.insert({a: "124"}));
- assert.writeOK(coll.update({a: "124"}, {$max: {a: "1234"}}, caseSensitive));
- assert.eq(coll.find({a: "124"}).count(), 1);
+ // "1234" < "124" (non-numeric ordering), so an update should occur.
+ assert.writeOK(coll.update({a: "124"}, {$min: {a: "1234"}}, caseSensitive));
+ assert.eq(coll.find({a: "1234"}).count(), 1);
+}
- // 1234 > 124, so an update should occur.
- assert.writeOK(coll.update({a: "124"}, {$max: {a: "1234"}}, numericOrdering));
- assert.eq(coll.find({a: "1234"}).count(), 1);
- }
+// $min respects collection default collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
+assert.writeOK(coll.insert({a: "124"}));
+assert.writeOK(coll.update({a: "124"}, {$min: {a: "1234"}}));
+assert.eq(coll.find({a: "124"}).count(), 1);
- // $max respects collection default collation.
+// $max respects query collation.
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
+
+ // "1234" < "124", so an update should not occur.
assert.writeOK(coll.insert({a: "124"}));
- assert.writeOK(coll.update({a: "124"}, {$max: {a: "1234"}}));
+ assert.writeOK(coll.update({a: "124"}, {$max: {a: "1234"}}, caseSensitive));
+ assert.eq(coll.find({a: "124"}).count(), 1);
+
+ // 1234 > 124, so an update should occur.
+ assert.writeOK(coll.update({a: "124"}, {$max: {a: "1234"}}, numericOrdering));
assert.eq(coll.find({a: "1234"}).count(), 1);
+}
- // $addToSet respects query collation.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
-
- // "foo" == "FOO" (case-insensitive), so set isn't extended.
- assert.writeOK(coll.insert({a: ["foo"]}));
- assert.writeOK(coll.update({}, {$addToSet: {a: "FOO"}}, caseInsensitive));
- var set = coll.findOne().a;
- assert.eq(set.length, 1);
-
- // "foo" != "FOO" (case-sensitive), so set is extended.
- assert.writeOK(coll.update({}, {$addToSet: {a: "FOO"}}, caseSensitive));
- set = coll.findOne().a;
- assert.eq(set.length, 2);
-
- coll.drop();
-
- // $each and $addToSet respect collation
- assert.writeOK(coll.insert({a: ["foo", "bar", "FOO"]}));
- assert.writeOK(
- coll.update({}, {$addToSet: {a: {$each: ["FOO", "BAR", "str"]}}}, caseInsensitive));
- set = coll.findOne().a;
- assert.eq(set.length, 4);
- assert(set.includes("foo"));
- assert(set.includes("FOO"));
- assert(set.includes("bar"));
- assert(set.includes("str"));
- }
+// $max respects collection default collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
+assert.writeOK(coll.insert({a: "124"}));
+assert.writeOK(coll.update({a: "124"}, {$max: {a: "1234"}}));
+assert.eq(coll.find({a: "1234"}).count(), 1);
+// $addToSet respects query collation.
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
+
// "foo" == "FOO" (case-insensitive), so set isn't extended.
assert.writeOK(coll.insert({a: ["foo"]}));
- assert.writeOK(coll.update({}, {$addToSet: {a: "FOO"}}));
+ assert.writeOK(coll.update({}, {$addToSet: {a: "FOO"}}, caseInsensitive));
var set = coll.findOne().a;
assert.eq(set.length, 1);
- // $pull respects query collation.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
-
- // "foo" != "FOO" (case-sensitive), so it is not pulled.
- assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
- assert.writeOK(coll.update({}, {$pull: {a: "foo"}}, caseSensitive));
- var arr = coll.findOne().a;
- assert.eq(arr.length, 1);
- assert(arr.includes("FOO"));
-
- // "foo" == "FOO" (case-insensitive), so "FOO" is pulled.
- assert.writeOK(coll.update({}, {$pull: {a: "foo"}}, caseInsensitive));
- arr = coll.findOne().a;
- assert.eq(arr.length, 0);
-
- // collation-aware $pull removes all instances that match.
- coll.drop();
- assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
- assert.writeOK(coll.update({}, {$pull: {a: "foo"}}, caseInsensitive));
- arr = coll.findOne().a;
- assert.eq(arr.length, 0);
-
- // collation-aware $pull with comparisons removes matching instances.
- coll.drop();
-
- // "124" > "1234" (case-sensitive), so it is not removed.
- assert.writeOK(coll.insert({a: ["124", "1234"]}));
- assert.writeOK(coll.update({}, {$pull: {a: {$lt: "1234"}}}, caseSensitive));
- arr = coll.findOne().a;
- assert.eq(arr.length, 2);
-
- // 124 < 1234 (numeric ordering), so it is removed.
- assert.writeOK(coll.update({}, {$pull: {a: {$lt: "1234"}}}, numericOrdering));
- arr = coll.findOne().a;
- assert.eq(arr.length, 1);
- assert(arr.includes("1234"));
- }
-
- // $pull respects collection default collation.
+ // "foo" != "FOO" (case-sensitive), so set is extended.
+ assert.writeOK(coll.update({}, {$addToSet: {a: "FOO"}}, caseSensitive));
+ set = coll.findOne().a;
+ assert.eq(set.length, 2);
+
coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
+
+ // $each and $addToSet respect collation
+ assert.writeOK(coll.insert({a: ["foo", "bar", "FOO"]}));
+ assert.writeOK(
+ coll.update({}, {$addToSet: {a: {$each: ["FOO", "BAR", "str"]}}}, caseInsensitive));
+ set = coll.findOne().a;
+ assert.eq(set.length, 4);
+ assert(set.includes("foo"));
+ assert(set.includes("FOO"));
+ assert(set.includes("bar"));
+ assert(set.includes("str"));
+}
+
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
+// "foo" == "FOO" (case-insensitive), so set isn't extended.
+assert.writeOK(coll.insert({a: ["foo"]}));
+assert.writeOK(coll.update({}, {$addToSet: {a: "FOO"}}));
+var set = coll.findOne().a;
+assert.eq(set.length, 1);
+
+// $pull respects query collation.
+if (db.getMongo().writeMode() === "commands") {
+ coll.drop();
+
+ // "foo" != "FOO" (case-sensitive), so it is not pulled.
assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
- assert.writeOK(coll.update({}, {$pull: {a: "foo"}}));
+ assert.writeOK(coll.update({}, {$pull: {a: "foo"}}, caseSensitive));
var arr = coll.findOne().a;
+ assert.eq(arr.length, 1);
+ assert(arr.includes("FOO"));
+
+ // "foo" == "FOO" (case-insensitive), so "FOO" is pulled.
+ assert.writeOK(coll.update({}, {$pull: {a: "foo"}}, caseInsensitive));
+ arr = coll.findOne().a;
assert.eq(arr.length, 0);
- // $pullAll respects query collation.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
+ // collation-aware $pull removes all instances that match.
+ coll.drop();
+ assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
+ assert.writeOK(coll.update({}, {$pull: {a: "foo"}}, caseInsensitive));
+ arr = coll.findOne().a;
+ assert.eq(arr.length, 0);
- // "foo" != "FOO" (case-sensitive), so no changes are made.
- assert.writeOK(coll.insert({a: ["foo", "bar"]}));
- assert.writeOK(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}, caseSensitive));
- var arr = coll.findOne().a;
- assert.eq(arr.length, 2);
+ // collation-aware $pull with comparisons removes matching instances.
+ coll.drop();
- // "foo" == "FOO", "bar" == "BAR" (case-insensitive), so both are removed.
- assert.writeOK(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}, caseInsensitive));
- arr = coll.findOne().a;
- assert.eq(arr.length, 0);
- }
+ // "124" > "1234" (case-sensitive), so it is not removed.
+ assert.writeOK(coll.insert({a: ["124", "1234"]}));
+ assert.writeOK(coll.update({}, {$pull: {a: {$lt: "1234"}}}, caseSensitive));
+ arr = coll.findOne().a;
+ assert.eq(arr.length, 2);
- // $pullAll respects collection default collation.
+ // 124 < 1234 (numeric ordering), so it is removed.
+ assert.writeOK(coll.update({}, {$pull: {a: {$lt: "1234"}}}, numericOrdering));
+ arr = coll.findOne().a;
+ assert.eq(arr.length, 1);
+ assert(arr.includes("1234"));
+}
+
+// $pull respects collection default collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
+assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
+assert.writeOK(coll.update({}, {$pull: {a: "foo"}}));
+var arr = coll.findOne().a;
+assert.eq(arr.length, 0);
+
+// $pullAll respects query collation.
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
+
+ // "foo" != "FOO" (case-sensitive), so no changes are made.
assert.writeOK(coll.insert({a: ["foo", "bar"]}));
- assert.writeOK(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}));
+ assert.writeOK(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}, caseSensitive));
var arr = coll.findOne().a;
- assert.eq(arr.length, 0);
+ assert.eq(arr.length, 2);
- // $push with $sort respects query collation.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
-
- // "1230" < "1234" < "124" (case-sensitive)
- assert.writeOK(coll.insert({a: ["1234", "124"]}));
- assert.writeOK(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}, caseSensitive));
- var arr = coll.findOne().a;
- assert.eq(arr.length, 3);
- assert.eq(arr[0], "1230");
- assert.eq(arr[1], "1234");
- assert.eq(arr[2], "124");
-
- // "124" < "1230" < "1234" (numeric ordering)
- coll.drop();
- assert.writeOK(coll.insert({a: ["1234", "124"]}));
- assert.writeOK(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}, numericOrdering));
- arr = coll.findOne().a;
- assert.eq(arr.length, 3);
- assert.eq(arr[0], "124");
- assert.eq(arr[1], "1230");
- assert.eq(arr[2], "1234");
- }
-
- // $push with $sort respects collection default collation.
+ // "foo" == "FOO", "bar" == "BAR" (case-insensitive), so both are removed.
+ assert.writeOK(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}, caseInsensitive));
+ arr = coll.findOne().a;
+ assert.eq(arr.length, 0);
+}
+
+// $pullAll respects collection default collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
+assert.writeOK(coll.insert({a: ["foo", "bar"]}));
+assert.writeOK(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}));
+var arr = coll.findOne().a;
+assert.eq(arr.length, 0);
+
+// $push with $sort respects query collation.
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
+
+ // "1230" < "1234" < "124" (case-sensitive)
assert.writeOK(coll.insert({a: ["1234", "124"]}));
- assert.writeOK(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}));
+ assert.writeOK(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}, caseSensitive));
var arr = coll.findOne().a;
assert.eq(arr.length, 3);
+ assert.eq(arr[0], "1230");
+ assert.eq(arr[1], "1234");
+ assert.eq(arr[2], "124");
+
+ // "124" < "1230" < "1234" (numeric ordering)
+ coll.drop();
+ assert.writeOK(coll.insert({a: ["1234", "124"]}));
+ assert.writeOK(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}, numericOrdering));
+ arr = coll.findOne().a;
+ assert.eq(arr.length, 3);
assert.eq(arr[0], "124");
assert.eq(arr[1], "1230");
assert.eq(arr[2], "1234");
-
- // $ positional operator respects query collation on $set.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
-
- // "foo" != "FOO" (case-sensitive) so no update occurs.
- assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
- assert.writeOK(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}, caseSensitive));
- var arr = coll.findOne().a;
- assert.eq(arr.length, 2);
- assert.eq(arr[0], "foo");
- assert.eq(arr[1], "FOO");
-
- // "foo" == "FOO" (case-insensitive) so no update occurs.
- assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
- assert.writeOK(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}, caseInsensitive));
- var arr = coll.findOne().a;
- assert.eq(arr.length, 2);
- assert.eq(arr[0], "FOO");
- assert.eq(arr[1], "FOO");
- }
-
- // $ positional operator respects collection default collation on $set.
+}
+
+// $push with $sort respects collection default collation.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), numericOrdering));
+assert.writeOK(coll.insert({a: ["1234", "124"]}));
+assert.writeOK(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}));
+var arr = coll.findOne().a;
+assert.eq(arr.length, 3);
+assert.eq(arr[0], "124");
+assert.eq(arr[1], "1230");
+assert.eq(arr[2], "1234");
+
+// $ positional operator respects query collation on $set.
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
+
+ // "foo" != "FOO" (case-sensitive) so no update occurs.
assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
- assert.writeOK(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}));
+ assert.writeOK(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}, caseSensitive));
var arr = coll.findOne().a;
assert.eq(arr.length, 2);
- assert.eq(arr[0], "FOO");
+ assert.eq(arr[0], "foo");
assert.eq(arr[1], "FOO");
- // Pipeline-style update respects collection default collation.
- // We restrict testing pipeline-style update to commands as they are not supported for OP_UPDATE
- // which cannot differentiate an update object from an array.
- if (db.getMongo().writeMode() === "commands") {
- assert(coll.drop());
- assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- assert.commandWorked(
- coll.update({}, [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}]));
- assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`);
- }
-
- // Pipeline-style update respects query collation.
- if (db.getMongo().writeMode() === "commands") {
- // Case sensitive $indexOfArray on "B" matches "B".
- assert(coll.drop());
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- assert.commandWorked(coll.update(
- {}, [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}], caseSensitive));
- assert.eq(coll.findOne().newField, 5, `actual=${coll.findOne()}`);
-
- assert(coll.drop());
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- assert.commandWorked(
- coll.update({}, [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}], caseSensitive));
- assert.eq(coll.findOne().newField, 5, `actual=${coll.findOne()}`);
-
- assert(coll.drop());
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- assert.commandWorked(coll.update(
- {}, [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}], caseSensitive));
- assert.eq(coll.findOne().newField, 5, `actual=${coll.findOne()}`);
-
- // Case insensitive $indexOfArray on "B" matches "b".
- assert(coll.drop());
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- assert.commandWorked(coll.update(
- {}, [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}], caseInsensitive));
- assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`);
-
- assert(coll.drop());
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- assert.commandWorked(coll.update(
- {}, [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}], caseInsensitive));
- assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`);
-
- assert(coll.drop());
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- assert.commandWorked(coll.update(
- {}, [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}], caseInsensitive));
- assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`);
-
- // Collation is respected for pipeline-style bulk update.
- assert(coll.drop());
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- assert.commandWorked(coll.bulkWrite([{
- updateOne: {
- filter: {},
- update: [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}],
- collation: caseInsensitive.collation
- }
- }]));
- assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`);
-
- assert(coll.drop());
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- assert.commandWorked(coll.bulkWrite([{
- updateOne: {
- filter: {},
- update: [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}],
- collation: caseInsensitive.collation
- }
- }]));
- assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`);
-
- assert(coll.drop());
- assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
- assert.commandWorked(coll.bulkWrite([{
- updateOne: {
- filter: {},
- update: [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}],
- collation: caseInsensitive.collation
- }
- }]));
- assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`);
- }
+ // "foo" == "FOO" (case-insensitive) so no update occurs.
+ assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
+ assert.writeOK(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}, caseInsensitive));
+ var arr = coll.findOne().a;
+ assert.eq(arr.length, 2);
+ assert.eq(arr[0], "FOO");
+ assert.eq(arr[1], "FOO");
+}
+
+// $ positional operator respects collection default collation on $set.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
+assert.writeOK(coll.insert({a: ["foo", "FOO"]}));
+assert.writeOK(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}));
+var arr = coll.findOne().a;
+assert.eq(arr.length, 2);
+assert.eq(arr[0], "FOO");
+assert.eq(arr[1], "FOO");
+
+// Pipeline-style update respects collection default collation.
+// We restrict testing pipeline-style update to commands as they are not supported for OP_UPDATE
+// which cannot differentiate an update object from an array.
+if (db.getMongo().writeMode() === "commands") {
+ assert(coll.drop());
+ assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ assert.commandWorked(coll.update({}, [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}]));
+ assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`);
+}
+
+// Pipeline-style update respects query collation.
+if (db.getMongo().writeMode() === "commands") {
+ // Case sensitive $indexOfArray on "B" matches "B".
+ assert(coll.drop());
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ assert.commandWorked(
+ coll.update({}, [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}], caseSensitive));
+ assert.eq(coll.findOne().newField, 5, `actual=${coll.findOne()}`);
+
+ assert(coll.drop());
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ assert.commandWorked(
+ coll.update({}, [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}], caseSensitive));
+ assert.eq(coll.findOne().newField, 5, `actual=${coll.findOne()}`);
+
+ assert(coll.drop());
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ assert.commandWorked(
+ coll.update({}, [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}], caseSensitive));
+ assert.eq(coll.findOne().newField, 5, `actual=${coll.findOne()}`);
+
+ // Case insensitive $indexOfArray on "B" matches "b".
+ assert(coll.drop());
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ assert.commandWorked(
+ coll.update({}, [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}], caseInsensitive));
+ assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`);
+
+ assert(coll.drop());
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ assert.commandWorked(
+ coll.update({}, [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}], caseInsensitive));
+ assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`);
+
+ assert(coll.drop());
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ assert.commandWorked(coll.update(
+ {}, [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}], caseInsensitive));
+ assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`);
+
+ // Collation is respected for pipeline-style bulk update.
+ assert(coll.drop());
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ assert.commandWorked(coll.bulkWrite([{
+ updateOne: {
+ filter: {},
+ update: [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}],
+ collation: caseInsensitive.collation
+ }
+ }]));
+ assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`);
+
+ assert(coll.drop());
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ assert.commandWorked(coll.bulkWrite([{
+ updateOne: {
+ filter: {},
+ update: [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}],
+ collation: caseInsensitive.collation
+ }
+ }]));
+ assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`);
+
+ assert(coll.drop());
+ assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]}));
+ assert.commandWorked(coll.bulkWrite([{
+ updateOne: {
+ filter: {},
+ update: [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}],
+ collation: caseInsensitive.collation
+ }
+ }]));
+ assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`);
+}
})();
diff --git a/jstests/core/collation_with_reverse_index.js b/jstests/core/collation_with_reverse_index.js
index af246187348..d586038b8b8 100644
--- a/jstests/core/collation_with_reverse_index.js
+++ b/jstests/core/collation_with_reverse_index.js
@@ -1,12 +1,12 @@
// Regression test for SERVER-34846.
(function() {
- const coll = db.collation_with_reverse_index;
- coll.drop();
+const coll = db.collation_with_reverse_index;
+coll.drop();
- coll.insertOne({int: 1, text: "hello world"});
- coll.createIndex({int: -1, text: -1}, {collation: {locale: "en", strength: 1}});
- const res = coll.find({int: 1}, {_id: 0, int: 1, text: 1}).toArray();
+coll.insertOne({int: 1, text: "hello world"});
+coll.createIndex({int: -1, text: -1}, {collation: {locale: "en", strength: 1}});
+const res = coll.find({int: 1}, {_id: 0, int: 1, text: 1}).toArray();
- assert.eq(res.length, 1);
- assert.eq(res[0].text, "hello world");
+assert.eq(res.length, 1);
+assert.eq(res[0].text, "hello world");
})();
diff --git a/jstests/core/collmod_bad_spec.js b/jstests/core/collmod_bad_spec.js
index a41e3c4f46e..c0519f375f5 100644
--- a/jstests/core/collmod_bad_spec.js
+++ b/jstests/core/collmod_bad_spec.js
@@ -7,21 +7,21 @@
// Tests that a collMod with a bad specification does not cause any changes, and does not crash the
// server.
(function() {
- "use strict";
+"use strict";
- var collName = "collModBadSpec";
- var coll = db.getCollection(collName);
+var collName = "collModBadSpec";
+var coll = db.getCollection(collName);
- coll.drop();
- assert.commandWorked(db.createCollection(collName));
+coll.drop();
+assert.commandWorked(db.createCollection(collName));
- // Get the original collection options for the collection.
- var originalResult = db.getCollectionInfos({name: collName});
+// Get the original collection options for the collection.
+var originalResult = db.getCollectionInfos({name: collName});
- // Issue an invalid command.
- assert.commandFailed(coll.runCommand("collMod", {validationLevel: "off", unknownField: "x"}));
+// Issue an invalid command.
+assert.commandFailed(coll.runCommand("collMod", {validationLevel: "off", unknownField: "x"}));
- // Make sure the options are unchanged.
- var newResult = db.getCollectionInfos({name: collName});
- assert.eq(originalResult, newResult);
+// Make sure the options are unchanged.
+var newResult = db.getCollectionInfos({name: collName});
+assert.eq(originalResult, newResult);
})();
diff --git a/jstests/core/collmod_without_uuid.js b/jstests/core/collmod_without_uuid.js
index 5beb1864ad0..d3b2ca5a287 100644
--- a/jstests/core/collmod_without_uuid.js
+++ b/jstests/core/collmod_without_uuid.js
@@ -10,20 +10,20 @@
*/
(function() {
- "use strict";
- const collName = "collmod_without_uuid";
+"use strict";
+const collName = "collmod_without_uuid";
- function checkUUIDs() {
- let infos = db.getCollectionInfos();
- assert(infos.every((coll) => coll.name != collName || coll.info.uuid != undefined),
- "Not all collections have UUIDs: " + tojson({infos}));
- }
+function checkUUIDs() {
+ let infos = db.getCollectionInfos();
+ assert(infos.every((coll) => coll.name != collName || coll.info.uuid != undefined),
+ "Not all collections have UUIDs: " + tojson({infos}));
+}
- db[collName].drop();
- assert.writeOK(db[collName].insert({}));
- checkUUIDs();
- let cmd = {applyOps: [{ns: "test.$cmd", op: "c", o: {collMod: collName}}]};
- let res = db.runCommand(cmd);
- assert.commandWorked(res, tojson(cmd));
- checkUUIDs();
+db[collName].drop();
+assert.writeOK(db[collName].insert({}));
+checkUUIDs();
+let cmd = {applyOps: [{ns: "test.$cmd", op: "c", o: {collMod: collName}}]};
+let res = db.runCommand(cmd);
+assert.commandWorked(res, tojson(cmd));
+checkUUIDs();
})();
diff --git a/jstests/core/commands_namespace_parsing.js b/jstests/core/commands_namespace_parsing.js
index ab9750bfbb5..9f5504f6549 100644
--- a/jstests/core/commands_namespace_parsing.js
+++ b/jstests/core/commands_namespace_parsing.js
@@ -14,351 +14,342 @@
// Note that for each command, a properly formatted command object must be passed to the helper
// function, regardless of the namespace used in the command object.
(function() {
- "use strict";
-
- const isFullyQualified = true;
- const isNotFullyQualified = false;
- const isAdminCommand = true;
- const isNotAdminCommand = false;
-
- // If the command expects the namespace to be fully qualified, set `isFullyQualified` to true.
- // If the command must be run against the admin database, set `isAdminCommand` to true.
- function assertFailsWithInvalidNamespacesForField(
- field, command, isFullyQualified, isAdminCommand) {
- const invalidNamespaces = [];
- invalidNamespaces.push(isFullyQualified ? "mydb." : "");
- invalidNamespaces.push(isFullyQualified ? "mydb.\0" : "\0");
- invalidNamespaces.push(isFullyQualified ? "mydb.a\0b" : "a\0b");
-
- const cmds = [];
- for (let ns of invalidNamespaces) {
- const cmd = Object.extend({}, command, /* deep copy */ true);
-
- const fieldNames = field.split(".");
- const lastFieldNameIndex = fieldNames.length - 1;
- let objToUpdate = cmd;
- for (let i = 0; i < lastFieldNameIndex; i++) {
- objToUpdate = objToUpdate[fieldNames[i]];
- }
- objToUpdate[fieldNames[lastFieldNameIndex]] = ns;
-
- cmds.push(cmd);
+"use strict";
+
+const isFullyQualified = true;
+const isNotFullyQualified = false;
+const isAdminCommand = true;
+const isNotAdminCommand = false;
+
+// If the command expects the namespace to be fully qualified, set `isFullyQualified` to true.
+// If the command must be run against the admin database, set `isAdminCommand` to true.
+function assertFailsWithInvalidNamespacesForField(
+ field, command, isFullyQualified, isAdminCommand) {
+ const invalidNamespaces = [];
+ invalidNamespaces.push(isFullyQualified ? "mydb." : "");
+ invalidNamespaces.push(isFullyQualified ? "mydb.\0" : "\0");
+ invalidNamespaces.push(isFullyQualified ? "mydb.a\0b" : "a\0b");
+
+ const cmds = [];
+ for (let ns of invalidNamespaces) {
+ const cmd = Object.extend({}, command, /* deep copy */ true);
+
+ const fieldNames = field.split(".");
+ const lastFieldNameIndex = fieldNames.length - 1;
+ let objToUpdate = cmd;
+ for (let i = 0; i < lastFieldNameIndex; i++) {
+ objToUpdate = objToUpdate[fieldNames[i]];
}
+ objToUpdate[fieldNames[lastFieldNameIndex]] = ns;
- const dbCmd = isAdminCommand ? db.adminCommand : db.runCommand;
- for (let cmd of cmds) {
- assert.commandFailedWithCode(dbCmd.apply(db, [cmd]), ErrorCodes.InvalidNamespace);
- }
- }
-
- const isMaster = db.runCommand("ismaster");
- assert.commandWorked(isMaster);
- const isMongos = (isMaster.msg === "isdbgrid");
-
- db.commands_namespace_parsing.drop();
- assert.writeOK(db.commands_namespace_parsing.insert({a: 1}));
-
- // Test aggregate fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "aggregate", {aggregate: "", pipeline: []}, isNotFullyQualified, isNotAdminCommand);
-
- // Test count fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "count", {count: ""}, isNotFullyQualified, isNotAdminCommand);
-
- // Test distinct fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "distinct", {distinct: "", key: "a"}, isNotFullyQualified, isNotAdminCommand);
-
- // Test mapReduce fails with an invalid input collection name.
- assertFailsWithInvalidNamespacesForField("mapreduce",
- {
- mapreduce: "",
- map: function() {
- emit(this.a, 1);
- },
- reduce: function(key, values) {
- return Array.sum(values);
- },
- out: "commands_namespace_parsing_out"
- },
- isNotFullyQualified,
- isNotAdminCommand);
- // Test mapReduce fails with an invalid output collection name.
- assertFailsWithInvalidNamespacesForField("out",
- {
- mapreduce: "commands_namespace_parsing",
- map: function() {
- emit(this.a, 1);
- },
- reduce: function(key, values) {
- return Array.sum(values);
- },
- out: ""
- },
- isNotFullyQualified,
- isNotAdminCommand);
-
- if (!isMongos) {
- // Test geoSearch fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "geoSearch",
- {geoSearch: "", search: {}, near: [0.0, 0.0], maxDistance: 10},
- isNotFullyQualified,
- isNotAdminCommand);
+ cmds.push(cmd);
}
- // Test find fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "find", {find: ""}, isNotFullyQualified, isNotAdminCommand);
-
- // Test insert fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField("insert",
- {insert: "", documents: [{q: {a: 1}, u: {a: 2}}]},
- isNotFullyQualified,
- isNotAdminCommand);
-
- // Test update fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField("update",
- {update: "", updates: [{q: {a: 1}, u: {a: 2}}]},
- isNotFullyQualified,
- isNotAdminCommand);
-
- // Test delete fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField("delete",
- {delete: "", deletes: [{q: {a: 1}, limit: 1}]},
- isNotFullyQualified,
- isNotAdminCommand);
-
- // Test findAndModify fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField("findAndModify",
- {findAndModify: "", update: {a: 2}},
- isNotFullyQualified,
- isNotAdminCommand);
-
- // Test getMore fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField("collection",
- {getMore: NumberLong("123456"), collection: ""},
- isNotFullyQualified,
- isNotAdminCommand);
-
- if (!isMongos) {
- // Test godinsert fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "godinsert", {godinsert: "", obj: {_id: 1}}, isNotFullyQualified, isNotAdminCommand);
+ const dbCmd = isAdminCommand ? db.adminCommand : db.runCommand;
+ for (let cmd of cmds) {
+ assert.commandFailedWithCode(dbCmd.apply(db, [cmd]), ErrorCodes.InvalidNamespace);
}
+}
- // Test planCacheListFilters fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "planCacheListFilters", {planCacheListFilters: ""}, isNotFullyQualified, isNotAdminCommand);
-
- // Test planCacheSetFilter fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField("planCacheSetFilter",
- {planCacheSetFilter: "", query: {}, indexes: [{a: 1}]},
- isNotFullyQualified,
- isNotAdminCommand);
-
- // Test planCacheClearFilters fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField("planCacheClearFilters",
- {planCacheClearFilters: ""},
- isNotFullyQualified,
- isNotAdminCommand);
+const isMaster = db.runCommand("ismaster");
+assert.commandWorked(isMaster);
+const isMongos = (isMaster.msg === "isdbgrid");
- // Test planCacheListQueryShapes fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField("planCacheListQueryShapes",
- {planCacheListQueryShapes: ""},
- isNotFullyQualified,
- isNotAdminCommand);
+db.commands_namespace_parsing.drop();
+assert.writeOK(db.commands_namespace_parsing.insert({a: 1}));
- // Test planCacheListPlans fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField("planCacheListPlans",
- {planCacheListPlans: "", query: {}},
- isNotFullyQualified,
- isNotAdminCommand);
+// Test aggregate fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "aggregate", {aggregate: "", pipeline: []}, isNotFullyQualified, isNotAdminCommand);
- // Test planCacheClear fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "planCacheClear", {planCacheClear: ""}, isNotFullyQualified, isNotAdminCommand);
+// Test count fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "count", {count: ""}, isNotFullyQualified, isNotAdminCommand);
- if (!isMongos) {
- // Test cleanupOrphaned fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "cleanupOrphaned", {cleanupOrphaned: ""}, isFullyQualified, isAdminCommand);
- }
+// Test distinct fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "distinct", {distinct: "", key: "a"}, isNotFullyQualified, isNotAdminCommand);
- if (isMongos) {
- // Test enableSharding fails with an invalid database name.
- assertFailsWithInvalidNamespacesForField(
- "enableSharding", {enableSharding: ""}, isNotFullyQualified, isAdminCommand);
-
- // Test mergeChunks fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "mergeChunks",
- {mergeChunks: "", bounds: [{_id: MinKey()}, {_id: MaxKey()}]},
- isFullyQualified,
- isAdminCommand);
-
- // Test shardCollection fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField("shardCollection",
- {shardCollection: "", key: {_id: 1}},
- isFullyQualified,
- isAdminCommand);
-
- // Test split fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "split", {split: "", find: {}}, isFullyQualified, isAdminCommand);
-
- // Test moveChunk fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "moveChunk",
- {moveChunk: "", find: {}, to: "commands_namespace_parsing_out"},
- isNotFullyQualified,
- isAdminCommand);
-
- // Test movePrimary fails with an invalid database name.
- assertFailsWithInvalidNamespacesForField(
- "movePrimary", {movePrimary: "", to: "dummy"}, isNotFullyQualified, isAdminCommand);
-
- // Test updateZoneKeyRange fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "updateZoneKeyRange",
- {updateZoneKeyRange: "", min: {_id: MinKey()}, max: {_id: MaxKey()}, zone: "3"},
- isNotFullyQualified,
- isAdminCommand);
- }
+// Test mapReduce fails with an invalid input collection name.
+assertFailsWithInvalidNamespacesForField("mapreduce",
+ {
+ mapreduce: "",
+ map: function() {
+ emit(this.a, 1);
+ },
+ reduce: function(key, values) {
+ return Array.sum(values);
+ },
+ out: "commands_namespace_parsing_out"
+ },
+ isNotFullyQualified,
+ isNotAdminCommand);
+// Test mapReduce fails with an invalid output collection name.
+assertFailsWithInvalidNamespacesForField("out",
+ {
+ mapreduce: "commands_namespace_parsing",
+ map: function() {
+ emit(this.a, 1);
+ },
+ reduce: function(key, values) {
+ return Array.sum(values);
+ },
+ out: ""
+ },
+ isNotFullyQualified,
+ isNotAdminCommand);
- // Test renameCollection fails with an invalid source collection name.
+if (!isMongos) {
+ // Test geoSearch fails with an invalid collection name.
assertFailsWithInvalidNamespacesForField(
- "renameCollection", {renameCollection: "", to: "test.b"}, isFullyQualified, isAdminCommand);
- // Test renameCollection fails with an invalid target collection name.
+ "geoSearch",
+ {geoSearch: "", search: {}, near: [0.0, 0.0], maxDistance: 10},
+ isNotFullyQualified,
+ isNotAdminCommand);
+}
+
+// Test find fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "find", {find: ""}, isNotFullyQualified, isNotAdminCommand);
+
+// Test insert fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField("insert",
+ {insert: "", documents: [{q: {a: 1}, u: {a: 2}}]},
+ isNotFullyQualified,
+ isNotAdminCommand);
+
+// Test update fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField("update",
+ {update: "", updates: [{q: {a: 1}, u: {a: 2}}]},
+ isNotFullyQualified,
+ isNotAdminCommand);
+
+// Test delete fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField("delete",
+ {delete: "", deletes: [{q: {a: 1}, limit: 1}]},
+ isNotFullyQualified,
+ isNotAdminCommand);
+
+// Test findAndModify fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "findAndModify", {findAndModify: "", update: {a: 2}}, isNotFullyQualified, isNotAdminCommand);
+
+// Test getMore fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField("collection",
+ {getMore: NumberLong("123456"), collection: ""},
+ isNotFullyQualified,
+ isNotAdminCommand);
+
+if (!isMongos) {
+ // Test godinsert fails with an invalid collection name.
assertFailsWithInvalidNamespacesForField(
- "to", {renameCollection: "test.b", to: ""}, isFullyQualified, isAdminCommand);
-
- // Test drop fails with an invalid collection name.
+ "godinsert", {godinsert: "", obj: {_id: 1}}, isNotFullyQualified, isNotAdminCommand);
+}
+
+// Test planCacheListFilters fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "planCacheListFilters", {planCacheListFilters: ""}, isNotFullyQualified, isNotAdminCommand);
+
+// Test planCacheSetFilter fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField("planCacheSetFilter",
+ {planCacheSetFilter: "", query: {}, indexes: [{a: 1}]},
+ isNotFullyQualified,
+ isNotAdminCommand);
+
+// Test planCacheClearFilters fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "planCacheClearFilters", {planCacheClearFilters: ""}, isNotFullyQualified, isNotAdminCommand);
+
+// Test planCacheListQueryShapes fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField("planCacheListQueryShapes",
+ {planCacheListQueryShapes: ""},
+ isNotFullyQualified,
+ isNotAdminCommand);
+
+// Test planCacheListPlans fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField("planCacheListPlans",
+ {planCacheListPlans: "", query: {}},
+ isNotFullyQualified,
+ isNotAdminCommand);
+
+// Test planCacheClear fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "planCacheClear", {planCacheClear: ""}, isNotFullyQualified, isNotAdminCommand);
+
+if (!isMongos) {
+ // Test cleanupOrphaned fails with an invalid collection name.
assertFailsWithInvalidNamespacesForField(
- "drop", {drop: ""}, isNotFullyQualified, isNotAdminCommand);
+ "cleanupOrphaned", {cleanupOrphaned: ""}, isFullyQualified, isAdminCommand);
+}
- // Test create fails with an invalid collection name.
+if (isMongos) {
+ // Test enableSharding fails with an invalid database name.
assertFailsWithInvalidNamespacesForField(
- "create", {create: ""}, isNotFullyQualified, isNotAdminCommand);
-
- if (!isMongos) {
- // Test cloneCollection fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField("cloneCollection",
- {cloneCollection: "", from: "fakehost"},
- isNotFullyQualified,
- isNotAdminCommand);
-
- // Test cloneCollectionAsCapped fails with an invalid source collection name.
- assertFailsWithInvalidNamespacesForField(
- "cloneCollectionAsCapped",
- {cloneCollectionAsCapped: "", toCollection: "b", size: 1024},
- isNotFullyQualified,
- isNotAdminCommand);
- // Test cloneCollectionAsCapped fails with an invalid target collection name.
- assertFailsWithInvalidNamespacesForField(
- "toCollection",
- {cloneCollectionAsCapped: "commands_namespace_parsing", toCollection: "", size: 1024},
- isNotFullyQualified,
- isNotAdminCommand);
-
- // Test convertToCapped fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField("convertToCapped",
- {convertToCapped: "", size: 1024},
- isNotFullyQualified,
- isNotAdminCommand);
- }
-
- // Test filemd5 fails with an invalid collection name.
- // Note: for this command, it is OK to pass 'root: ""', so do not use the helper function.
- assert.commandFailedWithCode(db.runCommand({filemd5: ObjectId(), root: "\0"}),
- ErrorCodes.InvalidNamespace);
- assert.commandFailedWithCode(db.runCommand({filemd5: ObjectId(), root: "a\0b"}),
- ErrorCodes.InvalidNamespace);
+ "enableSharding", {enableSharding: ""}, isNotFullyQualified, isAdminCommand);
- // Test createIndexes fails with an invalid collection name.
+ // Test mergeChunks fails with an invalid collection name.
assertFailsWithInvalidNamespacesForField(
- "createIndexes",
- {createIndexes: "", indexes: [{key: {a: 1}, name: "a1"}]},
- isNotFullyQualified,
- isNotAdminCommand);
+ "mergeChunks",
+ {mergeChunks: "", bounds: [{_id: MinKey()}, {_id: MaxKey()}]},
+ isFullyQualified,
+ isAdminCommand);
- // Test listIndexes fails with an invalid collection name.
+ // Test shardCollection fails with an invalid collection name.
assertFailsWithInvalidNamespacesForField(
- "listIndexes", {listIndexes: ""}, isNotFullyQualified, isNotAdminCommand);
+ "shardCollection", {shardCollection: "", key: {_id: 1}}, isFullyQualified, isAdminCommand);
- // Test dropIndexes fails with an invalid collection name.
+ // Test split fails with an invalid collection name.
assertFailsWithInvalidNamespacesForField(
- "dropIndexes", {dropIndexes: "", index: "*"}, isNotFullyQualified, isNotAdminCommand);
-
- if (!isMongos) {
- // Test compact fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "compact", {compact: ""}, isNotFullyQualified, isNotAdminCommand);
- }
+ "split", {split: "", find: {}}, isFullyQualified, isAdminCommand);
- // Test collMod fails with an invalid collection name.
+ // Test moveChunk fails with an invalid collection name.
assertFailsWithInvalidNamespacesForField(
- "collMod",
- {collMod: "", index: {keyPattern: {a: 1}, expireAfterSeconds: 60}},
+ "moveChunk",
+ {moveChunk: "", find: {}, to: "commands_namespace_parsing_out"},
isNotFullyQualified,
- isNotAdminCommand);
-
- // Test reIndex fails with an invalid collection name.
- if (!isMongos) {
- assertFailsWithInvalidNamespacesForField(
- "reIndex", {reIndex: ""}, isNotFullyQualified, isNotAdminCommand);
- }
-
- // Test collStats fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "collStats", {collStats: ""}, isNotFullyQualified, isNotAdminCommand);
-
- // Test dataSize fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField(
- "dataSize", {dataSize: ""}, isFullyQualified, isNotAdminCommand);
-
- // Test explain of aggregate fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField("aggregate",
- {aggregate: "", pipeline: [], explain: true},
- isNotFullyQualified,
- isNotAdminCommand);
+ isAdminCommand);
- // Test explain of count fails with an invalid collection name.
+ // Test movePrimary fails with an invalid database name.
assertFailsWithInvalidNamespacesForField(
- "explain.count", {explain: {count: ""}}, isNotFullyQualified, isNotAdminCommand);
+ "movePrimary", {movePrimary: "", to: "dummy"}, isNotFullyQualified, isAdminCommand);
- // Test explain of distinct fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField("explain.distinct",
- {explain: {distinct: "", key: "a"}},
- isNotFullyQualified,
- isNotAdminCommand);
-
- // Test explain of find fails with an invalid collection name.
+ // Test updateZoneKeyRange fails with an invalid collection name.
assertFailsWithInvalidNamespacesForField(
- "explain.find", {explain: {find: ""}}, isNotFullyQualified, isNotAdminCommand);
-
- // Test explain of findAndModify fails with an invalid collection name.
- assertFailsWithInvalidNamespacesForField("explain.findAndModify",
- {explain: {findAndModify: "", update: {a: 2}}},
+ "updateZoneKeyRange",
+ {updateZoneKeyRange: "", min: {_id: MinKey()}, max: {_id: MaxKey()}, zone: "3"},
+ isNotFullyQualified,
+ isAdminCommand);
+}
+
+// Test renameCollection fails with an invalid source collection name.
+assertFailsWithInvalidNamespacesForField(
+ "renameCollection", {renameCollection: "", to: "test.b"}, isFullyQualified, isAdminCommand);
+// Test renameCollection fails with an invalid target collection name.
+assertFailsWithInvalidNamespacesForField(
+ "to", {renameCollection: "test.b", to: ""}, isFullyQualified, isAdminCommand);
+
+// Test drop fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "drop", {drop: ""}, isNotFullyQualified, isNotAdminCommand);
+
+// Test create fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "create", {create: ""}, isNotFullyQualified, isNotAdminCommand);
+
+if (!isMongos) {
+ // Test cloneCollection fails with an invalid collection name.
+ assertFailsWithInvalidNamespacesForField("cloneCollection",
+ {cloneCollection: "", from: "fakehost"},
isNotFullyQualified,
isNotAdminCommand);
- // Test explain of delete fails with an invalid collection name.
+ // Test cloneCollectionAsCapped fails with an invalid source collection name.
assertFailsWithInvalidNamespacesForField(
- "explain.delete",
- {explain: {delete: "", deletes: [{q: {a: 1}, limit: 1}]}},
+ "cloneCollectionAsCapped",
+ {cloneCollectionAsCapped: "", toCollection: "b", size: 1024},
isNotFullyQualified,
isNotAdminCommand);
-
- // Test explain of update fails with an invalid collection name.
+ // Test cloneCollectionAsCapped fails with an invalid target collection name.
assertFailsWithInvalidNamespacesForField(
- "explain.update",
- {explain: {update: "", updates: [{q: {a: 1}, u: {a: 2}}]}},
+ "toCollection",
+ {cloneCollectionAsCapped: "commands_namespace_parsing", toCollection: "", size: 1024},
isNotFullyQualified,
isNotAdminCommand);
- // Test validate fails with an invalid collection name.
+ // Test convertToCapped fails with an invalid collection name.
+ assertFailsWithInvalidNamespacesForField("convertToCapped",
+ {convertToCapped: "", size: 1024},
+ isNotFullyQualified,
+ isNotAdminCommand);
+}
+
+// Test filemd5 fails with an invalid collection name.
+// Note: for this command, it is OK to pass 'root: ""', so do not use the helper function.
+assert.commandFailedWithCode(db.runCommand({filemd5: ObjectId(), root: "\0"}),
+ ErrorCodes.InvalidNamespace);
+assert.commandFailedWithCode(db.runCommand({filemd5: ObjectId(), root: "a\0b"}),
+ ErrorCodes.InvalidNamespace);
+
+// Test createIndexes fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField("createIndexes",
+ {createIndexes: "", indexes: [{key: {a: 1}, name: "a1"}]},
+ isNotFullyQualified,
+ isNotAdminCommand);
+
+// Test listIndexes fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "listIndexes", {listIndexes: ""}, isNotFullyQualified, isNotAdminCommand);
+
+// Test dropIndexes fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "dropIndexes", {dropIndexes: "", index: "*"}, isNotFullyQualified, isNotAdminCommand);
+
+if (!isMongos) {
+ // Test compact fails with an invalid collection name.
+ assertFailsWithInvalidNamespacesForField(
+ "compact", {compact: ""}, isNotFullyQualified, isNotAdminCommand);
+}
+
+// Test collMod fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "collMod",
+ {collMod: "", index: {keyPattern: {a: 1}, expireAfterSeconds: 60}},
+ isNotFullyQualified,
+ isNotAdminCommand);
+
+// Test reIndex fails with an invalid collection name.
+if (!isMongos) {
assertFailsWithInvalidNamespacesForField(
- "validate", {validate: ""}, isNotFullyQualified, isNotAdminCommand);
+ "reIndex", {reIndex: ""}, isNotFullyQualified, isNotAdminCommand);
+}
+
+// Test collStats fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "collStats", {collStats: ""}, isNotFullyQualified, isNotAdminCommand);
+
+// Test dataSize fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "dataSize", {dataSize: ""}, isFullyQualified, isNotAdminCommand);
+
+// Test explain of aggregate fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField("aggregate",
+ {aggregate: "", pipeline: [], explain: true},
+ isNotFullyQualified,
+ isNotAdminCommand);
+
+// Test explain of count fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "explain.count", {explain: {count: ""}}, isNotFullyQualified, isNotAdminCommand);
+
+// Test explain of distinct fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField("explain.distinct",
+ {explain: {distinct: "", key: "a"}},
+ isNotFullyQualified,
+ isNotAdminCommand);
+
+// Test explain of find fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "explain.find", {explain: {find: ""}}, isNotFullyQualified, isNotAdminCommand);
+
+// Test explain of findAndModify fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField("explain.findAndModify",
+ {explain: {findAndModify: "", update: {a: 2}}},
+ isNotFullyQualified,
+ isNotAdminCommand);
+
+// Test explain of delete fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField("explain.delete",
+ {explain: {delete: "", deletes: [{q: {a: 1}, limit: 1}]}},
+ isNotFullyQualified,
+ isNotAdminCommand);
+
+// Test explain of update fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField("explain.update",
+ {explain: {update: "", updates: [{q: {a: 1}, u: {a: 2}}]}},
+ isNotFullyQualified,
+ isNotAdminCommand);
+
+// Test validate fails with an invalid collection name.
+assertFailsWithInvalidNamespacesForField(
+ "validate", {validate: ""}, isNotFullyQualified, isNotAdminCommand);
})();
diff --git a/jstests/core/commands_that_do_not_write_do_not_accept_wc.js b/jstests/core/commands_that_do_not_write_do_not_accept_wc.js
index 17396961a74..bb471851bb5 100644
--- a/jstests/core/commands_that_do_not_write_do_not_accept_wc.js
+++ b/jstests/core/commands_that_do_not_write_do_not_accept_wc.js
@@ -10,52 +10,52 @@
*/
(function() {
- "use strict";
- var collName = 'leaves';
+"use strict";
+var collName = 'leaves';
- var commands = [];
+var commands = [];
- commands.push({find: collName, query: {_id: 1}});
+commands.push({find: collName, query: {_id: 1}});
- commands.push({distinct: collName, key: "_id"});
+commands.push({distinct: collName, key: "_id"});
- commands.push({count: collName, query: {type: 'oak'}});
+commands.push({count: collName, query: {type: 'oak'}});
- commands.push({
- mapReduce: collName,
- map: function() {
- this.tags.forEach(function(z) {
- emit(z, 1);
- });
- },
- reduce: function(key, values) {
- return {count: values.length};
- },
- out: {inline: 1}
- });
-
- function assertWriteConcernNotSupportedError(res) {
- assert.commandFailed(res);
- assert.eq(res.code, ErrorCodes.InvalidOptions);
- assert(!res.writeConcernError);
- }
-
- // Test a variety of valid and invalid writeConcerns to confirm that they still all get
- // the correct error.
- var writeConcerns = [{w: 'invalid'}, {w: 1}];
-
- function testUnsupportedWriteConcern(wc, cmd) {
- cmd.writeConcern = wc;
- jsTest.log("Testing " + tojson(cmd));
-
- var res = db.runCommand(cmd);
- assertWriteConcernNotSupportedError(res);
- }
-
- // Verify that each command gets a writeConcernNotSupported error.
- commands.forEach(function(cmd) {
- writeConcerns.forEach(function(wc) {
- testUnsupportedWriteConcern(wc, cmd);
+commands.push({
+ mapReduce: collName,
+ map: function() {
+ this.tags.forEach(function(z) {
+ emit(z, 1);
});
+ },
+ reduce: function(key, values) {
+ return {count: values.length};
+ },
+ out: {inline: 1}
+});
+
+function assertWriteConcernNotSupportedError(res) {
+ assert.commandFailed(res);
+ assert.eq(res.code, ErrorCodes.InvalidOptions);
+ assert(!res.writeConcernError);
+}
+
+// Test a variety of valid and invalid writeConcerns to confirm that they still all get
+// the correct error.
+var writeConcerns = [{w: 'invalid'}, {w: 1}];
+
+function testUnsupportedWriteConcern(wc, cmd) {
+ cmd.writeConcern = wc;
+ jsTest.log("Testing " + tojson(cmd));
+
+ var res = db.runCommand(cmd);
+ assertWriteConcernNotSupportedError(res);
+}
+
+// Verify that each command gets a writeConcernNotSupported error.
+commands.forEach(function(cmd) {
+ writeConcerns.forEach(function(wc) {
+ testUnsupportedWriteConcern(wc, cmd);
});
+});
})();
diff --git a/jstests/core/commands_with_uuid.js b/jstests/core/commands_with_uuid.js
index 69b889b394a..c64384675c9 100644
--- a/jstests/core/commands_with_uuid.js
+++ b/jstests/core/commands_with_uuid.js
@@ -1,102 +1,108 @@
/**
-* Tests that using a UUID as an argument to commands will retrieve results from the correct
-* collection.
-*
-* @tags: [
-* requires_fastcount,
-*
-* incompatible_with_embedded,
-* ]
-*/
+ * Tests that using a UUID as an argument to commands will retrieve results from the correct
+ * collection.
+ *
+ * @tags: [
+ * requires_fastcount,
+ *
+ * incompatible_with_embedded,
+ * ]
+ */
(function() {
- 'use strict';
- const mainCollName = 'main_coll';
- const subCollName = 'sub_coll';
- const kOtherDbName = 'commands_with_uuid_db';
- db.runCommand({drop: mainCollName});
- db.runCommand({drop: subCollName});
- assert.commandWorked(db.runCommand({create: mainCollName}));
- assert.commandWorked(db.runCommand({create: subCollName}));
+'use strict';
+const mainCollName = 'main_coll';
+const subCollName = 'sub_coll';
+const kOtherDbName = 'commands_with_uuid_db';
+db.runCommand({drop: mainCollName});
+db.runCommand({drop: subCollName});
+assert.commandWorked(db.runCommand({create: mainCollName}));
+assert.commandWorked(db.runCommand({create: subCollName}));
- // Check if UUIDs are enabled / supported.
- let collectionInfos = db.getCollectionInfos({name: mainCollName});
- let uuid = collectionInfos[0].info.uuid;
- if (uuid == null) {
- return;
- }
+// Check if UUIDs are enabled / supported.
+let collectionInfos = db.getCollectionInfos({name: mainCollName});
+let uuid = collectionInfos[0].info.uuid;
+if (uuid == null) {
+ return;
+}
- // No support for UUIDs on mongos.
- const isMaster = db.runCommand("ismaster");
- assert.commandWorked(isMaster);
- const isMongos = (isMaster.msg === "isdbgrid");
- if (isMongos) {
- return;
- }
+// No support for UUIDs on mongos.
+const isMaster = db.runCommand("ismaster");
+assert.commandWorked(isMaster);
+const isMongos = (isMaster.msg === "isdbgrid");
+if (isMongos) {
+ return;
+}
- assert.commandWorked(db.runCommand({insert: mainCollName, documents: [{fooField: 'FOO'}]}));
- assert.commandWorked(
- db.runCommand({insert: subCollName, documents: [{fooField: 'BAR'}, {fooField: 'FOOBAR'}]}));
+assert.commandWorked(db.runCommand({insert: mainCollName, documents: [{fooField: 'FOO'}]}));
+assert.commandWorked(
+ db.runCommand({insert: subCollName, documents: [{fooField: 'BAR'}, {fooField: 'FOOBAR'}]}));
- // Ensure passing a UUID to find retrieves results from the correct collection.
- let cmd = {find: uuid};
- let res = db.runCommand(cmd);
- assert.commandWorked(res, 'could not run ' + tojson(cmd));
- let cursor = new DBCommandCursor(db, res);
- let errMsg = 'expected more data from command ' + tojson(cmd) + ', with result ' + tojson(res);
- assert(cursor.hasNext(), errMsg);
- let doc = cursor.next();
- assert.eq(doc.fooField, 'FOO');
- assert(!cursor.hasNext(), 'expected to have exhausted cursor for results ' + tojson(res));
+// Ensure passing a UUID to find retrieves results from the correct collection.
+let cmd = {find: uuid};
+let res = db.runCommand(cmd);
+assert.commandWorked(res, 'could not run ' + tojson(cmd));
+let cursor = new DBCommandCursor(db, res);
+let errMsg = 'expected more data from command ' + tojson(cmd) + ', with result ' + tojson(res);
+assert(cursor.hasNext(), errMsg);
+let doc = cursor.next();
+assert.eq(doc.fooField, 'FOO');
+assert(!cursor.hasNext(), 'expected to have exhausted cursor for results ' + tojson(res));
- // Although we check for both string type and BinData type for the collection identifier
- // argument to a find command to accomodate for searching both by name and by UUID, if an
- // invalid type is passed, the parsing error message should say the expected type is string and
- // not BinData to avoid confusing the user.
- cmd = {find: 1.0};
- res = db.runCommand(cmd);
- assert.commandFailed(res, 'expected ' + tojson(cmd) + ' to fail.');
- assert(res.errmsg.includes('field must be of BSON type string'),
- 'expected the error message of ' + tojson(res) + ' to include string type');
+// Although we check for both string type and BinData type for the collection identifier
+// argument to a find command to accomodate for searching both by name and by UUID, if an
+// invalid type is passed, the parsing error message should say the expected type is string and
+// not BinData to avoid confusing the user.
+cmd = {
+ find: 1.0
+};
+res = db.runCommand(cmd);
+assert.commandFailed(res, 'expected ' + tojson(cmd) + ' to fail.');
+assert(res.errmsg.includes('field must be of BSON type string'),
+ 'expected the error message of ' + tojson(res) + ' to include string type');
- // Ensure passing a missing UUID to commands taking UUIDs uasserts that the UUID is not found.
- const missingUUID = UUID();
- for (cmd of[{count: missingUUID}, {find: missingUUID}, {listIndexes: missingUUID}]) {
- assert.commandFailedWithCode(
- db.runCommand(cmd), ErrorCodes.NamespaceNotFound, "command: " + tojson(cmd));
- }
+// Ensure passing a missing UUID to commands taking UUIDs uasserts that the UUID is not found.
+const missingUUID = UUID();
+for (cmd of [{count: missingUUID}, {find: missingUUID}, {listIndexes: missingUUID}]) {
+ assert.commandFailedWithCode(
+ db.runCommand(cmd), ErrorCodes.NamespaceNotFound, "command: " + tojson(cmd));
+}
- // Ensure passing a UUID to listIndexes retrieves results from the correct collection.
- cmd = {listIndexes: uuid};
- res = db.runCommand(cmd);
- assert.commandWorked(res, 'could not run ' + tojson(cmd));
- cursor = new DBCommandCursor(db, res);
- cursor.forEach(function(doc) {
- assert.eq(doc.ns, 'test.' + mainCollName);
- });
+// Ensure passing a UUID to listIndexes retrieves results from the correct collection.
+cmd = {
+ listIndexes: uuid
+};
+res = db.runCommand(cmd);
+assert.commandWorked(res, 'could not run ' + tojson(cmd));
+cursor = new DBCommandCursor(db, res);
+cursor.forEach(function(doc) {
+ assert.eq(doc.ns, 'test.' + mainCollName);
+});
- // Ensure passing a UUID to count retrieves results from the correct collection.
- cmd = {count: uuid};
- res = db.runCommand(cmd);
- assert.commandWorked(res, 'could not run ' + tojson(cmd));
- assert.eq(res.n, 1, "expected to count a single document with command: " + tojson(cmd));
+// Ensure passing a UUID to count retrieves results from the correct collection.
+cmd = {
+ count: uuid
+};
+res = db.runCommand(cmd);
+assert.commandWorked(res, 'could not run ' + tojson(cmd));
+assert.eq(res.n, 1, "expected to count a single document with command: " + tojson(cmd));
- // Test that UUID resolution fails when the UUID belongs to a different database. First, we
- // create a collection in another database.
- const dbWithUUID = db.getSiblingDB(kOtherDbName);
- dbWithUUID.getCollection(mainCollName).drop();
- assert.commandWorked(dbWithUUID.runCommand({create: mainCollName}));
- collectionInfos = dbWithUUID.getCollectionInfos({name: mainCollName});
- uuid = collectionInfos[0].info.uuid;
- assert.neq(null, uuid);
- assert.commandWorked(dbWithUUID.runCommand({find: uuid}));
+// Test that UUID resolution fails when the UUID belongs to a different database. First, we
+// create a collection in another database.
+const dbWithUUID = db.getSiblingDB(kOtherDbName);
+dbWithUUID.getCollection(mainCollName).drop();
+assert.commandWorked(dbWithUUID.runCommand({create: mainCollName}));
+collectionInfos = dbWithUUID.getCollectionInfos({name: mainCollName});
+uuid = collectionInfos[0].info.uuid;
+assert.neq(null, uuid);
+assert.commandWorked(dbWithUUID.runCommand({find: uuid}));
- // Run read commands supporting UUIDs against the original database, passing the UUID from a
- // different database, and verify that the UUID resolution fails with the correct error code. We
- // also test that the same command succeeds when there is no database mismatch.
- for (cmd of[{count: uuid}, {distinct: uuid, key: "a"}, {find: uuid}, {listIndexes: uuid}]) {
- assert.commandWorked(dbWithUUID.runCommand(cmd));
- assert.commandFailedWithCode(
- db.runCommand(cmd), ErrorCodes.NamespaceNotFound, "command: " + tojson(cmd));
- }
+// Run read commands supporting UUIDs against the original database, passing the UUID from a
+// different database, and verify that the UUID resolution fails with the correct error code. We
+// also test that the same command succeeds when there is no database mismatch.
+for (cmd of [{count: uuid}, {distinct: uuid, key: "a"}, {find: uuid}, {listIndexes: uuid}]) {
+ assert.commandWorked(dbWithUUID.runCommand(cmd));
+ assert.commandFailedWithCode(
+ db.runCommand(cmd), ErrorCodes.NamespaceNotFound, "command: " + tojson(cmd));
+}
}());
diff --git a/jstests/core/compact_keeps_indexes.js b/jstests/core/compact_keeps_indexes.js
index 1a050d27469..25b3909df1c 100644
--- a/jstests/core/compact_keeps_indexes.js
+++ b/jstests/core/compact_keeps_indexes.js
@@ -8,39 +8,38 @@
// ]
(function() {
- 'use strict';
+'use strict';
- var coll = db.compact_keeps_indexes;
+var coll = db.compact_keeps_indexes;
- coll.drop();
- coll.insert({_id: 1, x: 1});
- coll.ensureIndex({x: 1});
+coll.drop();
+coll.insert({_id: 1, x: 1});
+coll.ensureIndex({x: 1});
- assert.eq(coll.getIndexes().length, 2);
+assert.eq(coll.getIndexes().length, 2);
- // force:true is for replset passthroughs
- var res = coll.runCommand('compact', {force: true});
- // Some storage engines (for example, inMemoryExperiment) do not support the compact command.
- if (res.code == 115) { // CommandNotSupported
- return;
- }
- assert.commandWorked(res);
+// force:true is for replset passthroughs
+var res = coll.runCommand('compact', {force: true});
+// Some storage engines (for example, inMemoryExperiment) do not support the compact command.
+if (res.code == 115) { // CommandNotSupported
+ return;
+}
+assert.commandWorked(res);
- assert.eq(coll.getIndexes().length, 2);
- assert.eq(coll.find({_id: 1}).itcount(), 1);
- assert.eq(coll.find({x: 1}).itcount(), 1);
+assert.eq(coll.getIndexes().length, 2);
+assert.eq(coll.find({_id: 1}).itcount(), 1);
+assert.eq(coll.find({x: 1}).itcount(), 1);
- var dropCollectionShell = startParallelShell(function() {
- var t = db.getSiblingDB('test_compact_keeps_indexes_drop').testcoll;
+var dropCollectionShell = startParallelShell(function() {
+ var t = db.getSiblingDB('test_compact_keeps_indexes_drop').testcoll;
+ t.drop();
+ for (var i = 0; i < 100; i++) {
+ t.save({a: 1});
t.drop();
- for (var i = 0; i < 100; i++) {
- t.save({a: 1});
- t.drop();
- }
- });
- for (var i = 0; i < 10; i++) {
- coll.runCommand('compact');
}
- dropCollectionShell();
-
+});
+for (var i = 0; i < 10; i++) {
+ coll.runCommand('compact');
+}
+dropCollectionShell();
}());
diff --git a/jstests/core/compare_timestamps.js b/jstests/core/compare_timestamps.js
index 2440fac3fe1..b88bb003483 100644
--- a/jstests/core/compare_timestamps.js
+++ b/jstests/core/compare_timestamps.js
@@ -1,9 +1,9 @@
// SERVER-21160: Check that timestamp comparisons are unsigned
(function() {
- 'use strict';
- var t = db.compare_timestamps;
- t.drop();
- assert.writeOK(t.insert({a: new Timestamp(0xffffffff, 3), b: "non-zero"}));
- assert.writeOK(t.insert({a: new Timestamp(0, 0), b: "zero"}));
- assert.eq(t.find().sort({a: 1}).limit(1).next().b, "zero", "timestamp must compare unsigned");
+'use strict';
+var t = db.compare_timestamps;
+t.drop();
+assert.writeOK(t.insert({a: new Timestamp(0xffffffff, 3), b: "non-zero"}));
+assert.writeOK(t.insert({a: new Timestamp(0, 0), b: "zero"}));
+assert.eq(t.find().sort({a: 1}).limit(1).next().b, "zero", "timestamp must compare unsigned");
}());
diff --git a/jstests/core/connection_status.js b/jstests/core/connection_status.js
index efb3eb3fd4f..e50122f7cee 100644
--- a/jstests/core/connection_status.js
+++ b/jstests/core/connection_status.js
@@ -8,90 +8,85 @@
// Tests the connectionStatus command
(function() {
- "use strict";
- var dbName = 'connection_status';
- var myDB = db.getSiblingDB(dbName);
- myDB.dropAllUsers();
+"use strict";
+var dbName = 'connection_status';
+var myDB = db.getSiblingDB(dbName);
+myDB.dropAllUsers();
- /**
- * Test that the output of connectionStatus makes sense.
- */
- function validateConnectionStatus(expectedUser, expectedRole, showPrivileges) {
- var connectionStatus =
- myDB.runCommand({"connectionStatus": 1, "showPrivileges": showPrivileges});
- assert.commandWorked(connectionStatus);
- var authInfo = connectionStatus.authInfo;
+/**
+ * Test that the output of connectionStatus makes sense.
+ */
+function validateConnectionStatus(expectedUser, expectedRole, showPrivileges) {
+ var connectionStatus =
+ myDB.runCommand({"connectionStatus": 1, "showPrivileges": showPrivileges});
+ assert.commandWorked(connectionStatus);
+ var authInfo = connectionStatus.authInfo;
- // Test that authenticated users are properly returned.
- var users = authInfo.authenticatedUsers;
- var matches = 0;
- var infoStr = tojson(authInfo);
- for (var i = 0; i < users.length; i++) {
- var user = users[i].user;
- var db = users[i].db;
- assert(isString(user),
- "each authenticatedUsers should have a 'user' string:" + infoStr);
- assert(isString(db), "each authenticatedUsers should have a 'db' string:" + infoStr);
- if (user === expectedUser.user && db === expectedUser.db) {
- matches++;
- }
+ // Test that authenticated users are properly returned.
+ var users = authInfo.authenticatedUsers;
+ var matches = 0;
+ var infoStr = tojson(authInfo);
+ for (var i = 0; i < users.length; i++) {
+ var user = users[i].user;
+ var db = users[i].db;
+ assert(isString(user), "each authenticatedUsers should have a 'user' string:" + infoStr);
+ assert(isString(db), "each authenticatedUsers should have a 'db' string:" + infoStr);
+ if (user === expectedUser.user && db === expectedUser.db) {
+ matches++;
}
- assert.eq(
- matches, 1, "expected user should be present once in authenticatedUsers:" + infoStr);
+ }
+ assert.eq(matches, 1, "expected user should be present once in authenticatedUsers:" + infoStr);
- // Test that authenticated roles are properly returned.
- var roles = authInfo.authenticatedUserRoles;
- matches = 0;
- for (var i = 0; i < roles.length; i++) {
- var role = roles[i].role;
- var db = roles[i].db;
- assert(isString(role),
- "each authenticatedUserRole should have a 'role' string:" + infoStr);
- assert(isString(db), "each authenticatedUserRole should have a 'db' string:" + infoStr);
- if (role === expectedRole.role && db === expectedRole.db) {
- matches++;
- }
+ // Test that authenticated roles are properly returned.
+ var roles = authInfo.authenticatedUserRoles;
+ matches = 0;
+ for (var i = 0; i < roles.length; i++) {
+ var role = roles[i].role;
+ var db = roles[i].db;
+ assert(isString(role), "each authenticatedUserRole should have a 'role' string:" + infoStr);
+ assert(isString(db), "each authenticatedUserRole should have a 'db' string:" + infoStr);
+ if (role === expectedRole.role && db === expectedRole.db) {
+ matches++;
}
- // Role will be duplicated when users with the same role are logged in at the same time.
- assert.gte(
- matches, 1, "expected role should be present in authenticatedUserRoles:" + infoStr);
+ }
+ // Role will be duplicated when users with the same role are logged in at the same time.
+ assert.gte(matches, 1, "expected role should be present in authenticatedUserRoles:" + infoStr);
- var privileges = authInfo.authenticatedUserPrivileges;
- if (showPrivileges) {
- for (var i = 0; i < privileges.length; i++) {
- assert(
- isObject(privileges[i].resource),
- "each authenticatedUserPrivilege should have a 'resource' object:" + infoStr);
- var actions = privileges[i].actions;
- for (var j = 0; j < actions.length; j++) {
- assert(isString(actions[j]),
- "each authenticatedUserPrivilege action should be a string:" + infoStr);
- }
+ var privileges = authInfo.authenticatedUserPrivileges;
+ if (showPrivileges) {
+ for (var i = 0; i < privileges.length; i++) {
+ assert(isObject(privileges[i].resource),
+ "each authenticatedUserPrivilege should have a 'resource' object:" + infoStr);
+ var actions = privileges[i].actions;
+ for (var j = 0; j < actions.length; j++) {
+ assert(isString(actions[j]),
+ "each authenticatedUserPrivilege action should be a string:" + infoStr);
}
-
- } else {
- // Test that privileges are not returned without asking
- assert.eq(privileges,
- undefined,
- "authenticatedUserPrivileges should not be returned by default:" + infoStr);
}
+
+ } else {
+ // Test that privileges are not returned without asking
+ assert.eq(privileges,
+ undefined,
+ "authenticatedUserPrivileges should not be returned by default:" + infoStr);
}
+}
- function test(userName) {
- var user = {user: userName, db: dbName};
- var role = {role: "root", db: "admin"};
- myDB.createUser({user: userName, pwd: "weak password", roles: [role]});
- myDB.auth(userName, "weak password");
+function test(userName) {
+ var user = {user: userName, db: dbName};
+ var role = {role: "root", db: "admin"};
+ myDB.createUser({user: userName, pwd: "weak password", roles: [role]});
+ myDB.auth(userName, "weak password");
- // Validate with and without showPrivileges
- validateConnectionStatus(user, role, true);
- validateConnectionStatus(user, role, false);
+ // Validate with and without showPrivileges
+ validateConnectionStatus(user, role, true);
+ validateConnectionStatus(user, role, false);
- // Clean up.
- myDB.dropAllUsers();
- myDB.logout();
- }
+ // Clean up.
+ myDB.dropAllUsers();
+ myDB.logout();
+}
- test("someone");
- test("someone else");
+test("someone");
+test("someone else");
})();
diff --git a/jstests/core/constructors.js b/jstests/core/constructors.js
index 27b0b7f7406..0e28150e701 100644
--- a/jstests/core/constructors.js
+++ b/jstests/core/constructors.js
@@ -26,7 +26,7 @@ function clientEvalConstructorTest(constructorList) {
try {
eval(constructor);
} catch (e) {
- throw("valid constructor: " + constructor + " failed in eval context: " + e);
+ throw ("valid constructor: " + constructor + " failed in eval context: " + e);
}
});
constructorList.invalid.forEach(function(constructor) {
@@ -56,7 +56,7 @@ function mapReduceConstructorTest(constructorList) {
res = t.mapReduce(m, r, {out: "mr_constructors_out", scope: {xx: 1}});
} catch (e) {
- throw("valid constructor: " + constructor + " failed in mapReduce context: " + e);
+ throw ("valid constructor: " + constructor + " failed in mapReduce context: " + e);
}
});
constructorList.invalid.forEach(function(constructor) {
@@ -83,7 +83,7 @@ function whereConstructorTest(constructorList) {
try {
t.findOne({$where: constructor});
} catch (e) {
- throw("valid constructor: " + constructor + " failed in $where query: " + e);
+ throw ("valid constructor: " + constructor + " failed in $where query: " + e);
}
});
constructorList.invalid.forEach(function(constructor) {
diff --git a/jstests/core/contained_or_with_nested_or.js b/jstests/core/contained_or_with_nested_or.js
index b0add6fdeb1..7407498e1ce 100644
--- a/jstests/core/contained_or_with_nested_or.js
+++ b/jstests/core/contained_or_with_nested_or.js
@@ -1,41 +1,41 @@
// This test was designed to reproduce a memory leak that was fixed by SERVER-35455.
(function() {
- "use strict";
+"use strict";
- const coll = db.contained_or_with_nested_or;
- coll.drop();
- assert.commandWorked(coll.insert([
- // Should not match the query:
- {_id: 0, active: false, loc: "USA", agency: "FBI", vip: false},
- {_id: 1, active: false, loc: "RUS", agency: "OTHER", vip: true},
- {_id: 2, active: true, loc: "RUS", agency: "OTHER", vip: false},
- {_id: 3, active: true, loc: "USA", agency: "OTHER", vip: false},
- {_id: 4, active: true, loc: "UK", agency: "OTHER", vip: false},
- {_id: 5, active: true, loc: "UK", agency: "OTHER", vip: true},
- {_id: 6, active: true},
- // Should match the query:
- {_id: 7, active: true, loc: "USA", agency: "FBI", vip: false},
- {_id: 8, active: true, loc: "USA", agency: "CIA", vip: true},
- {_id: 9, active: true, loc: "RUS", agency: "OTHER", vip: true},
- {_id: 10, active: true, loc: "RUS", agency: "KGB"},
- ]));
- assert.commandWorked(coll.createIndexes([{loc: 1}, {agency: 1}, {vip: 1}]));
+const coll = db.contained_or_with_nested_or;
+coll.drop();
+assert.commandWorked(coll.insert([
+ // Should not match the query:
+ {_id: 0, active: false, loc: "USA", agency: "FBI", vip: false},
+ {_id: 1, active: false, loc: "RUS", agency: "OTHER", vip: true},
+ {_id: 2, active: true, loc: "RUS", agency: "OTHER", vip: false},
+ {_id: 3, active: true, loc: "USA", agency: "OTHER", vip: false},
+ {_id: 4, active: true, loc: "UK", agency: "OTHER", vip: false},
+ {_id: 5, active: true, loc: "UK", agency: "OTHER", vip: true},
+ {_id: 6, active: true},
+ // Should match the query:
+ {_id: 7, active: true, loc: "USA", agency: "FBI", vip: false},
+ {_id: 8, active: true, loc: "USA", agency: "CIA", vip: true},
+ {_id: 9, active: true, loc: "RUS", agency: "OTHER", vip: true},
+ {_id: 10, active: true, loc: "RUS", agency: "KGB"},
+]));
+assert.commandWorked(coll.createIndexes([{loc: 1}, {agency: 1}, {vip: 1}]));
- // The following query reproduced the memory leak described in SERVER-38601. To catch a
- // regression, we would only expect this test to fail on ASAN variants. Before SERVER-35455 we
- // would construct a plan for one clause of the $or, then realize that the other clause could
- // not be indexed and discard the plan for the first clause in a way that leaks memory.
- const results = coll.find({
- active: true,
- $or: [
- {loc: "USA", $or: [{agency: "FBI"}, {vip: true}]},
- {loc: "RUS", $or: [{agency: "KGB"}, {vip: true}]}
- ]
- })
- .toArray();
+// The following query reproduced the memory leak described in SERVER-38601. To catch a
+// regression, we would only expect this test to fail on ASAN variants. Before SERVER-35455 we
+// would construct a plan for one clause of the $or, then realize that the other clause could
+// not be indexed and discard the plan for the first clause in a way that leaks memory.
+const results = coll.find({
+ active: true,
+ $or: [
+ {loc: "USA", $or: [{agency: "FBI"}, {vip: true}]},
+ {loc: "RUS", $or: [{agency: "KGB"}, {vip: true}]}
+ ]
+ })
+ .toArray();
- // Just assert on the matching _ids. We avoid adding a sort to the query above to avoid
- // restricting the plans the query planner can consider.
- const matchingIds = results.map(result => result._id);
- assert.setEq(new Set(matchingIds), new Set([7, 8, 9, 10]));
+// Just assert on the matching _ids. We avoid adding a sort to the query above to avoid
+// restricting the plans the query planner can consider.
+const matchingIds = results.map(result => result._id);
+assert.setEq(new Set(matchingIds), new Set([7, 8, 9, 10]));
}());
diff --git a/jstests/core/convert_to_capped.js b/jstests/core/convert_to_capped.js
index e9a05f09450..58731299dae 100644
--- a/jstests/core/convert_to_capped.js
+++ b/jstests/core/convert_to_capped.js
@@ -8,20 +8,20 @@
*/
(function() {
- "use strict";
+"use strict";
- let testDb = db.getSiblingDB("convert_to_capped");
- let coll = testDb.coll;
- testDb.dropDatabase();
+let testDb = db.getSiblingDB("convert_to_capped");
+let coll = testDb.coll;
+testDb.dropDatabase();
- // Create a collection with some data.
- let num = 10;
- for (let i = 0; i < num; ++i) {
- assert.writeOK(coll.insert({_id: i}));
- }
+// Create a collection with some data.
+let num = 10;
+for (let i = 0; i < num; ++i) {
+ assert.writeOK(coll.insert({_id: i}));
+}
- // Ensure we do not allow overflowing the size long long on the server (SERVER-33078).
- assert.commandFailedWithCode(
- testDb.runCommand({convertToCapped: coll.getName(), size: 5308156746568725891247}),
- ErrorCodes.BadValue);
+// Ensure we do not allow overflowing the size long long on the server (SERVER-33078).
+assert.commandFailedWithCode(
+ testDb.runCommand({convertToCapped: coll.getName(), size: 5308156746568725891247}),
+ ErrorCodes.BadValue);
})();
diff --git a/jstests/core/count_hint.js b/jstests/core/count_hint.js
index d508a46fd1a..9bb485410ef 100644
--- a/jstests/core/count_hint.js
+++ b/jstests/core/count_hint.js
@@ -8,55 +8,55 @@
* @tags: [requires_fastcount]
*/
(function() {
- "use strict";
+"use strict";
- var coll = db.jstests_count_hint;
- coll.drop();
+var coll = db.jstests_count_hint;
+coll.drop();
- assert.writeOK(coll.insert({i: 1}));
- assert.writeOK(coll.insert({i: 2}));
+assert.writeOK(coll.insert({i: 1}));
+assert.writeOK(coll.insert({i: 2}));
- assert.eq(2, coll.find().count());
+assert.eq(2, coll.find().count());
- assert.commandWorked(coll.ensureIndex({i: 1}));
+assert.commandWorked(coll.ensureIndex({i: 1}));
- assert.eq(2, coll.find().hint("i_1").count());
- assert.eq(2, coll.find().hint({i: 1}).count());
+assert.eq(2, coll.find().hint("i_1").count());
+assert.eq(2, coll.find().hint({i: 1}).count());
- assert.eq(1, coll.find({i: 1}).hint("_id_").count());
- assert.eq(1, coll.find({i: 1}).hint({_id: 1}).count());
+assert.eq(1, coll.find({i: 1}).hint("_id_").count());
+assert.eq(1, coll.find({i: 1}).hint({_id: 1}).count());
- assert.eq(2, coll.find().hint("_id_").count());
- assert.eq(2, coll.find().hint({_id: 1}).count());
+assert.eq(2, coll.find().hint("_id_").count());
+assert.eq(2, coll.find().hint({_id: 1}).count());
- // Create a sparse index which should have no entries.
- assert.commandWorked(coll.ensureIndex({x: 1}, {sparse: true}));
+// Create a sparse index which should have no entries.
+assert.commandWorked(coll.ensureIndex({x: 1}, {sparse: true}));
- // A hint should be respected, even if it results in the wrong answer.
- assert.eq(0, coll.find().hint("x_1").count());
- assert.eq(0, coll.find().hint({x: 1}).count());
+// A hint should be respected, even if it results in the wrong answer.
+assert.eq(0, coll.find().hint("x_1").count());
+assert.eq(0, coll.find().hint({x: 1}).count());
- assert.eq(0, coll.find({i: 1}).hint("x_1").count());
- assert.eq(0, coll.find({i: 1}).hint({x: 1}).count());
+assert.eq(0, coll.find({i: 1}).hint("x_1").count());
+assert.eq(0, coll.find({i: 1}).hint({x: 1}).count());
- // SERVER-14792: bad hints should cause the count to fail, even if there is no query predicate.
- assert.throws(function() {
- coll.find().hint({bad: 1, hint: 1}).count();
- });
- assert.throws(function() {
- coll.find({i: 1}).hint({bad: 1, hint: 1}).count();
- });
+// SERVER-14792: bad hints should cause the count to fail, even if there is no query predicate.
+assert.throws(function() {
+ coll.find().hint({bad: 1, hint: 1}).count();
+});
+assert.throws(function() {
+ coll.find({i: 1}).hint({bad: 1, hint: 1}).count();
+});
- assert.throws(function() {
- coll.find().hint("BAD HINT").count();
- });
- assert.throws(function() {
- coll.find({i: 1}).hint("BAD HINT").count();
- });
+assert.throws(function() {
+ coll.find().hint("BAD HINT").count();
+});
+assert.throws(function() {
+ coll.find({i: 1}).hint("BAD HINT").count();
+});
- // Test that a bad hint fails with the correct error code.
- let cmdRes = db.runCommand({count: coll.getName(), hint: {bad: 1, hint: 1}});
- assert.commandFailedWithCode(cmdRes, ErrorCodes.BadValue, tojson(cmdRes));
- var regex = new RegExp("hint provided does not correspond to an existing index");
- assert(regex.test(cmdRes.errmsg));
+// Test that a bad hint fails with the correct error code.
+let cmdRes = db.runCommand({count: coll.getName(), hint: {bad: 1, hint: 1}});
+assert.commandFailedWithCode(cmdRes, ErrorCodes.BadValue, tojson(cmdRes));
+var regex = new RegExp("hint provided does not correspond to an existing index");
+assert(regex.test(cmdRes.errmsg));
})();
diff --git a/jstests/core/counta.js b/jstests/core/counta.js
index 027402af3ee..8d7df953e3d 100644
--- a/jstests/core/counta.js
+++ b/jstests/core/counta.js
@@ -3,28 +3,28 @@
// @tags: [requires_fastcount]
(function() {
- 'use strict';
+'use strict';
- var t = db.jstests_counta;
- t.drop();
+var t = db.jstests_counta;
+t.drop();
- for (var i = 0; i < 10; ++i) {
- t.save({a: i});
- }
+for (var i = 0; i < 10; ++i) {
+ t.save({a: i});
+}
- // f() is undefined, causing an assertion
- assert.throws(function() {
- t.count({
- $where: function() {
- if (this.a < 5) {
- return true;
- } else {
- f();
- }
+// f() is undefined, causing an assertion
+assert.throws(function() {
+ t.count({
+ $where: function() {
+ if (this.a < 5) {
+ return true;
+ } else {
+ f();
}
- });
+ }
});
+});
- // count must return error if collection name is absent
- assert.commandFailedWithCode(db.runCommand("count"), ErrorCodes.InvalidNamespace);
+// count must return error if collection name is absent
+assert.commandFailedWithCode(db.runCommand("count"), ErrorCodes.InvalidNamespace);
})();
diff --git a/jstests/core/coveredIndex1.js b/jstests/core/coveredIndex1.js
index 7776a48c014..2be0cae4bf9 100644
--- a/jstests/core/coveredIndex1.js
+++ b/jstests/core/coveredIndex1.js
@@ -7,82 +7,82 @@
* @tags: [assumes_unsharded_collection]
*/
(function() {
- "use strict";
+"use strict";
- const coll = db["jstests_coveredIndex1"];
- coll.drop();
+const coll = db["jstests_coveredIndex1"];
+coll.drop();
- // Include helpers for analyzing explain output.
- load("jstests/libs/analyze_plan.js");
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
- assert.writeOK(coll.insert({order: 0, fn: "john", ln: "doe"}));
- assert.writeOK(coll.insert({order: 1, fn: "jack", ln: "doe"}));
- assert.writeOK(coll.insert({order: 2, fn: "john", ln: "smith"}));
- assert.writeOK(coll.insert({order: 3, fn: "jack", ln: "black"}));
- assert.writeOK(coll.insert({order: 4, fn: "bob", ln: "murray"}));
- assert.writeOK(coll.insert({order: 5, fn: "aaa", ln: "bbb", obj: {a: 1, b: "blah"}}));
+assert.writeOK(coll.insert({order: 0, fn: "john", ln: "doe"}));
+assert.writeOK(coll.insert({order: 1, fn: "jack", ln: "doe"}));
+assert.writeOK(coll.insert({order: 2, fn: "john", ln: "smith"}));
+assert.writeOK(coll.insert({order: 3, fn: "jack", ln: "black"}));
+assert.writeOK(coll.insert({order: 4, fn: "bob", ln: "murray"}));
+assert.writeOK(coll.insert({order: 5, fn: "aaa", ln: "bbb", obj: {a: 1, b: "blah"}}));
- /**
- * Asserts that running the find command with query 'query' and projection 'projection' is
- * covered if 'isCovered' is true, or not covered otherwise.
- *
- * If 'hint' is specified, use 'hint' as the suggested index.
- */
- function assertIfQueryIsCovered(query, projection, isCovered, hint) {
- let cursor = coll.find(query, projection);
- if (hint) {
- cursor = cursor.hint(hint);
- }
- const explain = cursor.explain();
- assert.commandWorked(explain);
+/**
+ * Asserts that running the find command with query 'query' and projection 'projection' is
+ * covered if 'isCovered' is true, or not covered otherwise.
+ *
+ * If 'hint' is specified, use 'hint' as the suggested index.
+ */
+function assertIfQueryIsCovered(query, projection, isCovered, hint) {
+ let cursor = coll.find(query, projection);
+ if (hint) {
+ cursor = cursor.hint(hint);
+ }
+ const explain = cursor.explain();
+ assert.commandWorked(explain);
- assert(explain.hasOwnProperty("queryPlanner"), tojson(explain));
- assert(explain.queryPlanner.hasOwnProperty("winningPlan"), tojson(explain));
- const winningPlan = explain.queryPlanner.winningPlan;
- if (isCovered) {
- assert(isIndexOnly(db, winningPlan),
- "Query " + tojson(query) + " with projection " + tojson(projection) +
- " should have been covered, but got this plan: " + tojson(winningPlan));
- } else {
- assert(!isIndexOnly(db, winningPlan),
- "Query " + tojson(query) + " with projection " + tojson(projection) +
- " should not have been covered, but got this plan: " + tojson(winningPlan));
- }
+ assert(explain.hasOwnProperty("queryPlanner"), tojson(explain));
+ assert(explain.queryPlanner.hasOwnProperty("winningPlan"), tojson(explain));
+ const winningPlan = explain.queryPlanner.winningPlan;
+ if (isCovered) {
+ assert(isIndexOnly(db, winningPlan),
+ "Query " + tojson(query) + " with projection " + tojson(projection) +
+ " should have been covered, but got this plan: " + tojson(winningPlan));
+ } else {
+ assert(!isIndexOnly(db, winningPlan),
+ "Query " + tojson(query) + " with projection " + tojson(projection) +
+ " should not have been covered, but got this plan: " + tojson(winningPlan));
}
+}
- // Create an index on one field.
- assert.commandWorked(coll.createIndex({ln: 1}));
- assertIfQueryIsCovered({}, {}, false);
- assertIfQueryIsCovered({ln: "doe"}, {}, false);
- assertIfQueryIsCovered({ln: "doe"}, {ln: 1}, false);
- assertIfQueryIsCovered({ln: "doe"}, {ln: 1, _id: 0}, true, {ln: 1});
+// Create an index on one field.
+assert.commandWorked(coll.createIndex({ln: 1}));
+assertIfQueryIsCovered({}, {}, false);
+assertIfQueryIsCovered({ln: "doe"}, {}, false);
+assertIfQueryIsCovered({ln: "doe"}, {ln: 1}, false);
+assertIfQueryIsCovered({ln: "doe"}, {ln: 1, _id: 0}, true, {ln: 1});
- // Create a compound index.
- assert.commandWorked(coll.dropIndex({ln: 1}));
- assert.commandWorked(coll.createIndex({ln: 1, fn: 1}));
- assertIfQueryIsCovered({ln: "doe"}, {ln: 1, _id: 0}, true);
- assertIfQueryIsCovered({ln: "doe"}, {ln: 1, fn: 1, _id: 0}, true);
- assertIfQueryIsCovered({ln: "doe", fn: "john"}, {ln: 1, fn: 1, _id: 0}, true);
- assertIfQueryIsCovered({fn: "john", ln: "doe"}, {fn: 1, ln: 1, _id: 0}, true);
- assertIfQueryIsCovered({fn: "john"}, {fn: 1, _id: 0}, false);
+// Create a compound index.
+assert.commandWorked(coll.dropIndex({ln: 1}));
+assert.commandWorked(coll.createIndex({ln: 1, fn: 1}));
+assertIfQueryIsCovered({ln: "doe"}, {ln: 1, _id: 0}, true);
+assertIfQueryIsCovered({ln: "doe"}, {ln: 1, fn: 1, _id: 0}, true);
+assertIfQueryIsCovered({ln: "doe", fn: "john"}, {ln: 1, fn: 1, _id: 0}, true);
+assertIfQueryIsCovered({fn: "john", ln: "doe"}, {fn: 1, ln: 1, _id: 0}, true);
+assertIfQueryIsCovered({fn: "john"}, {fn: 1, _id: 0}, false);
- // Repeat the above test, but with a compound index involving _id.
- assert.commandWorked(coll.dropIndex({ln: 1, fn: 1}));
- assert.commandWorked(coll.createIndex({_id: 1, ln: 1}));
- assertIfQueryIsCovered({_id: 123, ln: "doe"}, {_id: 1}, true);
- assertIfQueryIsCovered({_id: 123, ln: "doe"}, {ln: 1}, true);
- assertIfQueryIsCovered({ln: "doe", _id: 123}, {ln: 1, _id: 1}, true);
- assertIfQueryIsCovered({ln: "doe"}, {ln: 1}, false);
+// Repeat the above test, but with a compound index involving _id.
+assert.commandWorked(coll.dropIndex({ln: 1, fn: 1}));
+assert.commandWorked(coll.createIndex({_id: 1, ln: 1}));
+assertIfQueryIsCovered({_id: 123, ln: "doe"}, {_id: 1}, true);
+assertIfQueryIsCovered({_id: 123, ln: "doe"}, {ln: 1}, true);
+assertIfQueryIsCovered({ln: "doe", _id: 123}, {ln: 1, _id: 1}, true);
+assertIfQueryIsCovered({ln: "doe"}, {ln: 1}, false);
- // Create an index on an embedded object.
- assert.commandWorked(coll.dropIndex({_id: 1, ln: 1}));
- assert.commandWorked(coll.createIndex({obj: 1}));
- assertIfQueryIsCovered({"obj.a": 1}, {obj: 1}, false);
- assertIfQueryIsCovered({obj: {a: 1, b: "blah"}}, false);
- assertIfQueryIsCovered({obj: {a: 1, b: "blah"}}, {obj: 1, _id: 0}, true);
+// Create an index on an embedded object.
+assert.commandWorked(coll.dropIndex({_id: 1, ln: 1}));
+assert.commandWorked(coll.createIndex({obj: 1}));
+assertIfQueryIsCovered({"obj.a": 1}, {obj: 1}, false);
+assertIfQueryIsCovered({obj: {a: 1, b: "blah"}}, false);
+assertIfQueryIsCovered({obj: {a: 1, b: "blah"}}, {obj: 1, _id: 0}, true);
- // Create indexes on fields inside an embedded object.
- assert.commandWorked(coll.dropIndex({obj: 1}));
- assert.commandWorked(coll.createIndex({"obj.a": 1, "obj.b": 1}));
- assertIfQueryIsCovered({"obj.a": 1}, {obj: 1}, false);
+// Create indexes on fields inside an embedded object.
+assert.commandWorked(coll.dropIndex({obj: 1}));
+assert.commandWorked(coll.createIndex({"obj.a": 1, "obj.b": 1}));
+assertIfQueryIsCovered({"obj.a": 1}, {obj: 1}, false);
}());
diff --git a/jstests/core/coveredIndex3.js b/jstests/core/coveredIndex3.js
index 4bfedda888b..3a6621a2a72 100644
--- a/jstests/core/coveredIndex3.js
+++ b/jstests/core/coveredIndex3.js
@@ -12,13 +12,15 @@ if (0) { // SERVER-4975
// Insert an array, which will make the { a:1 } index multikey and should disable covered
// index
// matching.
- p1 = startParallelShell('for( i = 0; i < 60; ++i ) { \
+ p1 = startParallelShell(
+ 'for( i = 0; i < 60; ++i ) { \
db.jstests_coveredIndex3.save( { a:[ 2000, 2001 ] } ); \
sleep( 300 ); \
}');
// Frequent writes cause the find operation to yield.
- p2 = startParallelShell('for( i = 0; i < 1800; ++i ) { \
+ p2 = startParallelShell(
+ 'for( i = 0; i < 1800; ++i ) { \
db.jstests_coveredIndex3_other.save( {} ); \
sleep( 10 ); \
}');
diff --git a/jstests/core/covered_index_sort_no_fetch_optimization.js b/jstests/core/covered_index_sort_no_fetch_optimization.js
index d7545b49761..81853b0f02b 100644
--- a/jstests/core/covered_index_sort_no_fetch_optimization.js
+++ b/jstests/core/covered_index_sort_no_fetch_optimization.js
@@ -6,229 +6,233 @@
// must be fetched to support the SHARDING_FILTER stage.
// @tags: [assumes_unsharded_collection]
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js");
-
- const collName = "covered_index_sort_no_fetch_optimization";
- const coll = db.getCollection(collName);
- coll.drop();
-
- assert.commandWorked(coll.createIndex({a: 1, b: 1}));
-
- assert.commandWorked(coll.insert([
- {a: 1, b: 1, c: 1},
- {a: 1, b: 2, c: 2},
- {a: 2, b: 1, c: 3},
- {a: 2, b: 2, c: 4},
- {a: -1, b: 1, c: 5}
- ]));
-
- const kIsCovered = true;
- const kNotCovered = false;
- const kBlockingSort = true;
- const kNonBlockingSort = false;
-
- function assertExpectedResult(findCmd, expectedResult, isCovered, isBlockingSort) {
- const result = assert.commandWorked(db.runCommand(findCmd));
- assert.eq(result.cursor.firstBatch, expectedResult, result);
-
- const explainResult =
- assert.commandWorked(db.runCommand({explain: findCmd, verbosity: "executionStats"}));
- assert.eq(
- isCovered, isIndexOnly(db, explainResult.queryPlanner.winningPlan), explainResult);
- assert.eq(isBlockingSort,
- planHasStage(db, explainResult.queryPlanner.winningPlan, "SORT"),
- explainResult);
- }
-
- // Test correctness of basic covered queries. Here, the sort predicate is not the same order
- // as the index order, but uses the same keys.
- let findCmd = {find: collName, filter: {a: {$lt: 2}}, projection: {b: 1, _id: 0}, sort: {b: 1}};
- let expected = [{"b": 1}, {"b": 1}, {"b": 2}];
- assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
-
- findCmd = {
- find: collName,
- filter: {a: {$gt: 0}},
- projection: {a: 1, b: 1, _id: 0},
- sort: {b: 1, a: 1}
- };
- expected = [{"a": 1, "b": 1}, {"a": 2, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 2}];
- assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
-
- findCmd = {
- find: collName,
- filter: {a: {$gt: 0}},
- projection: {a: 1, b: 1, _id: 0},
- sort: {b: 1, a: -1}
- };
- expected = [{"a": 2, "b": 1}, {"a": 1, "b": 1}, {"a": 2, "b": 2}, {"a": 1, "b": 2}];
- assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
-
- // Test correctness of queries where sort is not covered because not all sort keys are in the
- // index.
- findCmd = {
- find: collName,
- filter: {a: {$gt: 0}},
- projection: {b: 1, c: 1, _id: 0},
- sort: {c: 1, b: 1}
- };
- expected = [{"b": 1, "c": 1}, {"b": 2, "c": 2}, {"b": 1, "c": 3}, {"b": 2, "c": 4}];
- assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort);
-
- findCmd =
- {find: collName, filter: {a: {$gt: 0}}, projection: {b: 1, _id: 0}, sort: {c: 1, b: 1}};
- expected = [{"b": 1}, {"b": 2}, {"b": 1}, {"b": 2}];
- assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort);
-
- // When the sort key is multikey, we cannot cover the sort using the index.
- assert.commandWorked(coll.insert({a: 1, b: [4, 5, 6]}));
- assert.commandWorked(coll.insert({a: 1, b: [-1, 11, 12]}));
- findCmd = {find: collName, filter: {a: {$gt: 0}}, projection: {b: 1, _id: 0}, sort: {b: 1}};
- expected = [{"b": [-1, 11, 12]}, {"b": 1}, {"b": 1}, {"b": 2}, {"b": 2}, {"b": [4, 5, 6]}];
- assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort);
-
- // Collation Tests.
-
- // If you have an index with the same index key pattern and the same collation as the sort key,
- // then no blocking sort is required.
- assert(coll.drop());
- // Note that {locale: "en_US", strength: 3} differ from the simple collation with respect to
- // case ordering. "en_US" collation puts lowercase letters first, whereas the simple collation
- // puts uppercase first.
- assert.commandWorked(
- coll.createIndex({a: 1, b: 1}, {collation: {locale: "en_US", strength: 3}}));
- assert.commandWorked(
- coll.insert([{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "A"}, {a: 1, b: "a"}, {a: 2, b: 2}]));
-
- findCmd = {
- find: collName,
- filter: {},
- projection: {a: 1, b: 1, _id: 0},
- collation: {locale: "en_US", strength: 3},
- sort: {a: 1, b: 1},
- hint: {a: 1, b: 1}
- };
- expected = [
- {"a": 1, "b": 1},
- {"a": 1, "b": 2},
- {"a": 1, "b": "a"},
- {"a": 1, "b": "A"},
- {"a": 2, "b": 2}
- ];
- assertExpectedResult(findCmd, expected, kNotCovered, kNonBlockingSort);
-
- // This tests the case where there is a collation, and we need to do a blocking SORT, but that
- // SORT could be computed using the index keys. However, this query cannot be covered due the
- // index having a non-simple collation.
- findCmd = {
- find: collName,
- filter: {a: {$lt: 2}},
- projection: {b: 1, _id: 0},
- collation: {locale: "en_US", strength: 3},
- sort: {b: 1},
- hint: {a: 1, b: 1}
- };
- expected = [
- {"b": 1},
- {"b": 2},
- {"b": "a"},
- {"b": "A"},
- ];
- assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort);
-
- // The index has the same key pattern as the sort but a different collation.
- // We expect to add a fetch stage here as 'b' is not guaranteed to be in the correct order.
- assert.commandWorked(coll.dropIndex({a: 1, b: 1}));
- assert.commandWorked(
- coll.createIndex({a: 1, b: 1}, {collation: {locale: "en_US", strength: 1}}));
-
- findCmd = {
- find: collName,
- filter: {},
- projection: {a: 1, b: 1, _id: 0},
- collation: {locale: "en_US", strength: 3},
- sort: {a: 1, b: 1},
- hint: {a: 1, b: 1}
- };
- expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "a"}, {a: 1, b: "A"}, {a: 2, b: 2}];
- assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort);
-
- // The index has a collation but the query sort does not.
- // We expect to add a fetch stage here as 'b' is not guaranteed to be in the correct order.
- assert.commandWorked(coll.dropIndex({a: 1, b: 1}));
- assert.commandWorked(
- coll.createIndex({a: 1, b: 1}, {collation: {locale: "en_US", strength: 3}}));
- findCmd = {
- find: collName,
- filter: {},
- projection: {a: 1, b: 1, _id: 0},
- sort: {a: 1, b: 1},
- hint: {a: 1, b: 1}
- };
- expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "A"}, {a: 1, b: "a"}, {a: 2, b: 2}];
- assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort);
-
- // The index has a collation but the query does not. However, our index bounds do not contain
- // strings, so we can apply the no-fetch optimization.
- findCmd = {
- find: collName,
- filter: {a: {$gte: 1}, b: 2},
- projection: {a: 1, b: 1, _id: 0},
- sort: {b: 1, a: 1},
- hint: {a: 1, b: 1}
- };
- expected = [{a: 1, b: 2}, {a: 2, b: 2}];
- assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
-
- // The index does not have a special collation, but the query asks for one. The no-fetch
- // optimization will be applied in this case. The server must correctly respect the collation
- // when sorting the index keys, as the index keys do not already reflect the collation.
- assert.commandWorked(coll.dropIndex({a: 1, b: 1}));
- assert.commandWorked(coll.createIndex({a: 1, b: 1}));
-
- findCmd = {
- find: collName,
- filter: {},
- projection: {a: 1, b: 1, _id: 0},
- collation: {locale: "en_US", strength: 3},
- sort: {a: 1, b: 1},
- hint: {a: 1, b: 1}
- };
-
- expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "a"}, {a: 1, b: "A"}, {a: 2, b: 2}];
- assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
-
- // Test covered sort plan possible with non-multikey dotted field in sort key.
- assert(coll.drop());
- assert.commandWorked(coll.createIndex({a: 1, "b.c": 1}));
- assert.commandWorked(coll.insert([
- {a: 0, b: {c: 1}},
- {a: 1, b: {c: 2}},
- {a: 2, b: {c: "A"}},
- {a: 3, b: {c: "a"}},
- {a: 4, b: {c: 3}}
- ]));
-
- findCmd = {
- find: collName,
- filter: {a: {$gt: 0}},
- projection: {a: 1, "b.c": 1, _id: 0},
- sort: {"b.c": 1}
- };
- expected = [
- {"a": 1, "b": {"c": 2}},
- {"a": 4, "b": {"c": 3}},
- {"a": 2, "b": {"c": "A"}},
- {"a": 3, "b": {"c": "a"}}
- ];
- assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
-
- assert.commandWorked(coll.insert({a: [1], b: {c: 1}}));
- findCmd =
- {find: collName, filter: {a: {$gt: 0}}, projection: {"b.c": 1, _id: 0}, sort: {"b.c": 1}};
- expected =
- [{"b": {"c": 1}}, {"b": {"c": 2}}, {"b": {"c": 3}}, {"b": {"c": "A"}}, {"b": {"c": "a"}}];
- assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+
+const collName = "covered_index_sort_no_fetch_optimization";
+const coll = db.getCollection(collName);
+coll.drop();
+
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+
+assert.commandWorked(coll.insert([
+ {a: 1, b: 1, c: 1},
+ {a: 1, b: 2, c: 2},
+ {a: 2, b: 1, c: 3},
+ {a: 2, b: 2, c: 4},
+ {a: -1, b: 1, c: 5}
+]));
+
+const kIsCovered = true;
+const kNotCovered = false;
+const kBlockingSort = true;
+const kNonBlockingSort = false;
+
+function assertExpectedResult(findCmd, expectedResult, isCovered, isBlockingSort) {
+ const result = assert.commandWorked(db.runCommand(findCmd));
+ assert.eq(result.cursor.firstBatch, expectedResult, result);
+
+ const explainResult =
+ assert.commandWorked(db.runCommand({explain: findCmd, verbosity: "executionStats"}));
+ assert.eq(isCovered, isIndexOnly(db, explainResult.queryPlanner.winningPlan), explainResult);
+ assert.eq(isBlockingSort,
+ planHasStage(db, explainResult.queryPlanner.winningPlan, "SORT"),
+ explainResult);
+}
+
+// Test correctness of basic covered queries. Here, the sort predicate is not the same order
+// as the index order, but uses the same keys.
+let findCmd = {find: collName, filter: {a: {$lt: 2}}, projection: {b: 1, _id: 0}, sort: {b: 1}};
+let expected = [{"b": 1}, {"b": 1}, {"b": 2}];
+assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
+
+findCmd = {
+ find: collName,
+ filter: {a: {$gt: 0}},
+ projection: {a: 1, b: 1, _id: 0},
+ sort: {b: 1, a: 1}
+};
+expected = [{"a": 1, "b": 1}, {"a": 2, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 2}];
+assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
+
+findCmd = {
+ find: collName,
+ filter: {a: {$gt: 0}},
+ projection: {a: 1, b: 1, _id: 0},
+ sort: {b: 1, a: -1}
+};
+expected = [{"a": 2, "b": 1}, {"a": 1, "b": 1}, {"a": 2, "b": 2}, {"a": 1, "b": 2}];
+assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
+
+// Test correctness of queries where sort is not covered because not all sort keys are in the
+// index.
+findCmd = {
+ find: collName,
+ filter: {a: {$gt: 0}},
+ projection: {b: 1, c: 1, _id: 0},
+ sort: {c: 1, b: 1}
+};
+expected = [{"b": 1, "c": 1}, {"b": 2, "c": 2}, {"b": 1, "c": 3}, {"b": 2, "c": 4}];
+assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort);
+
+findCmd = {
+ find: collName,
+ filter: {a: {$gt: 0}},
+ projection: {b: 1, _id: 0},
+ sort: {c: 1, b: 1}
+};
+expected = [{"b": 1}, {"b": 2}, {"b": 1}, {"b": 2}];
+assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort);
+
+// When the sort key is multikey, we cannot cover the sort using the index.
+assert.commandWorked(coll.insert({a: 1, b: [4, 5, 6]}));
+assert.commandWorked(coll.insert({a: 1, b: [-1, 11, 12]}));
+findCmd = {
+ find: collName,
+ filter: {a: {$gt: 0}},
+ projection: {b: 1, _id: 0},
+ sort: {b: 1}
+};
+expected = [{"b": [-1, 11, 12]}, {"b": 1}, {"b": 1}, {"b": 2}, {"b": 2}, {"b": [4, 5, 6]}];
+assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort);
+
+// Collation Tests.
+
+// If you have an index with the same index key pattern and the same collation as the sort key,
+// then no blocking sort is required.
+assert(coll.drop());
+// Note that {locale: "en_US", strength: 3} differ from the simple collation with respect to
+// case ordering. "en_US" collation puts lowercase letters first, whereas the simple collation
+// puts uppercase first.
+assert.commandWorked(coll.createIndex({a: 1, b: 1}, {collation: {locale: "en_US", strength: 3}}));
+assert.commandWorked(
+ coll.insert([{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "A"}, {a: 1, b: "a"}, {a: 2, b: 2}]));
+
+findCmd = {
+ find: collName,
+ filter: {},
+ projection: {a: 1, b: 1, _id: 0},
+ collation: {locale: "en_US", strength: 3},
+ sort: {a: 1, b: 1},
+ hint: {a: 1, b: 1}
+};
+expected =
+ [{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 1, "b": "a"}, {"a": 1, "b": "A"}, {"a": 2, "b": 2}];
+assertExpectedResult(findCmd, expected, kNotCovered, kNonBlockingSort);
+
+// This tests the case where there is a collation, and we need to do a blocking SORT, but that
+// SORT could be computed using the index keys. However, this query cannot be covered due the
+// index having a non-simple collation.
+findCmd = {
+ find: collName,
+ filter: {a: {$lt: 2}},
+ projection: {b: 1, _id: 0},
+ collation: {locale: "en_US", strength: 3},
+ sort: {b: 1},
+ hint: {a: 1, b: 1}
+};
+expected = [
+ {"b": 1},
+ {"b": 2},
+ {"b": "a"},
+ {"b": "A"},
+];
+assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort);
+
+// The index has the same key pattern as the sort but a different collation.
+// We expect to add a fetch stage here as 'b' is not guaranteed to be in the correct order.
+assert.commandWorked(coll.dropIndex({a: 1, b: 1}));
+assert.commandWorked(coll.createIndex({a: 1, b: 1}, {collation: {locale: "en_US", strength: 1}}));
+
+findCmd = {
+ find: collName,
+ filter: {},
+ projection: {a: 1, b: 1, _id: 0},
+ collation: {locale: "en_US", strength: 3},
+ sort: {a: 1, b: 1},
+ hint: {a: 1, b: 1}
+};
+expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "a"}, {a: 1, b: "A"}, {a: 2, b: 2}];
+assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort);
+
+// The index has a collation but the query sort does not.
+// We expect to add a fetch stage here as 'b' is not guaranteed to be in the correct order.
+assert.commandWorked(coll.dropIndex({a: 1, b: 1}));
+assert.commandWorked(coll.createIndex({a: 1, b: 1}, {collation: {locale: "en_US", strength: 3}}));
+findCmd = {
+ find: collName,
+ filter: {},
+ projection: {a: 1, b: 1, _id: 0},
+ sort: {a: 1, b: 1},
+ hint: {a: 1, b: 1}
+};
+expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "A"}, {a: 1, b: "a"}, {a: 2, b: 2}];
+assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort);
+
+// The index has a collation but the query does not. However, our index bounds do not contain
+// strings, so we can apply the no-fetch optimization.
+findCmd = {
+ find: collName,
+ filter: {a: {$gte: 1}, b: 2},
+ projection: {a: 1, b: 1, _id: 0},
+ sort: {b: 1, a: 1},
+ hint: {a: 1, b: 1}
+};
+expected = [{a: 1, b: 2}, {a: 2, b: 2}];
+assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
+
+// The index does not have a special collation, but the query asks for one. The no-fetch
+// optimization will be applied in this case. The server must correctly respect the collation
+// when sorting the index keys, as the index keys do not already reflect the collation.
+assert.commandWorked(coll.dropIndex({a: 1, b: 1}));
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+
+findCmd = {
+ find: collName,
+ filter: {},
+ projection: {a: 1, b: 1, _id: 0},
+ collation: {locale: "en_US", strength: 3},
+ sort: {a: 1, b: 1},
+ hint: {a: 1, b: 1}
+};
+
+expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "a"}, {a: 1, b: "A"}, {a: 2, b: 2}];
+assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
+
+// Test covered sort plan possible with non-multikey dotted field in sort key.
+assert(coll.drop());
+assert.commandWorked(coll.createIndex({a: 1, "b.c": 1}));
+assert.commandWorked(coll.insert([
+ {a: 0, b: {c: 1}},
+ {a: 1, b: {c: 2}},
+ {a: 2, b: {c: "A"}},
+ {a: 3, b: {c: "a"}},
+ {a: 4, b: {c: 3}}
+]));
+
+findCmd = {
+ find: collName,
+ filter: {a: {$gt: 0}},
+ projection: {a: 1, "b.c": 1, _id: 0},
+ sort: {"b.c": 1}
+};
+expected = [
+ {"a": 1, "b": {"c": 2}},
+ {"a": 4, "b": {"c": 3}},
+ {"a": 2, "b": {"c": "A"}},
+ {"a": 3, "b": {"c": "a"}}
+];
+assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
+
+assert.commandWorked(coll.insert({a: [1], b: {c: 1}}));
+findCmd = {
+ find: collName,
+ filter: {a: {$gt: 0}},
+ projection: {"b.c": 1, _id: 0},
+ sort: {"b.c": 1}
+};
+expected =
+ [{"b": {"c": 1}}, {"b": {"c": 2}}, {"b": {"c": 3}}, {"b": {"c": "A"}}, {"b": {"c": "a"}}];
+assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
})();
diff --git a/jstests/core/covered_multikey.js b/jstests/core/covered_multikey.js
index ec4ed0d5c0b..cb5e97d8dbb 100644
--- a/jstests/core/covered_multikey.js
+++ b/jstests/core/covered_multikey.js
@@ -7,103 +7,103 @@
* Test covering behavior for queries over a multikey index.
*/
(function() {
- "use strict";
+"use strict";
- // For making assertions about explain output.
- load("jstests/libs/analyze_plan.js");
+// For making assertions about explain output.
+load("jstests/libs/analyze_plan.js");
- let coll = db.covered_multikey;
- coll.drop();
+let coll = db.covered_multikey;
+coll.drop();
- assert.writeOK(coll.insert({a: 1, b: [2, 3, 4]}));
- assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+assert.writeOK(coll.insert({a: 1, b: [2, 3, 4]}));
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
- assert.eq(1, coll.find({a: 1, b: 2}, {_id: 0, a: 1}).itcount());
- assert.eq({a: 1}, coll.findOne({a: 1, b: 2}, {_id: 0, a: 1}));
- let explainRes = coll.explain("queryPlanner").find({a: 1, b: 2}, {_id: 0, a: 1}).finish();
- assert(isIxscan(db, explainRes.queryPlanner.winningPlan));
- assert(!planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH"));
+assert.eq(1, coll.find({a: 1, b: 2}, {_id: 0, a: 1}).itcount());
+assert.eq({a: 1}, coll.findOne({a: 1, b: 2}, {_id: 0, a: 1}));
+let explainRes = coll.explain("queryPlanner").find({a: 1, b: 2}, {_id: 0, a: 1}).finish();
+assert(isIxscan(db, explainRes.queryPlanner.winningPlan));
+assert(!planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH"));
- coll.drop();
- assert.writeOK(coll.insert({a: 1, b: [1, 2, 3], c: 3, d: 5}));
- assert.writeOK(coll.insert({a: [1, 2, 3], b: 1, c: 4, d: 6}));
- assert.commandWorked(coll.createIndex({a: 1, b: 1, c: -1, d: -1}));
+coll.drop();
+assert.writeOK(coll.insert({a: 1, b: [1, 2, 3], c: 3, d: 5}));
+assert.writeOK(coll.insert({a: [1, 2, 3], b: 1, c: 4, d: 6}));
+assert.commandWorked(coll.createIndex({a: 1, b: 1, c: -1, d: -1}));
- let cursor = coll.find({a: 1, b: 1}, {_id: 0, c: 1, d: 1}).sort({c: -1, d: -1});
- assert.eq(cursor.next(), {c: 4, d: 6});
- assert.eq(cursor.next(), {c: 3, d: 5});
- assert(!cursor.hasNext());
- explainRes = coll.explain("queryPlanner")
- .find({a: 1, b: 1}, {_id: 0, c: 1, d: 1})
- .sort({c: -1, d: -1})
- .finish();
- assert(!planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH"));
+let cursor = coll.find({a: 1, b: 1}, {_id: 0, c: 1, d: 1}).sort({c: -1, d: -1});
+assert.eq(cursor.next(), {c: 4, d: 6});
+assert.eq(cursor.next(), {c: 3, d: 5});
+assert(!cursor.hasNext());
+explainRes = coll.explain("queryPlanner")
+ .find({a: 1, b: 1}, {_id: 0, c: 1, d: 1})
+ .sort({c: -1, d: -1})
+ .finish();
+assert(!planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH"));
- // Verify that a query cannot be covered over a path which is multikey due to an empty array.
- coll.drop();
- assert.writeOK(coll.insert({a: []}));
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.eq({a: []}, coll.findOne({a: []}, {_id: 0, a: 1}));
- explainRes = coll.explain("queryPlanner").find({a: []}, {_id: 0, a: 1}).finish();
- assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN"));
- assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH"));
- let ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
- assert.eq(true, ixscanStage.isMultiKey);
+// Verify that a query cannot be covered over a path which is multikey due to an empty array.
+coll.drop();
+assert.writeOK(coll.insert({a: []}));
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.eq({a: []}, coll.findOne({a: []}, {_id: 0, a: 1}));
+explainRes = coll.explain("queryPlanner").find({a: []}, {_id: 0, a: 1}).finish();
+assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN"));
+assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH"));
+let ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
+assert.eq(true, ixscanStage.isMultiKey);
- // Verify that a query cannot be covered over a path which is multikey due to a single-element
- // array.
- coll.drop();
- assert.writeOK(coll.insert({a: [2]}));
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.eq({a: [2]}, coll.findOne({a: 2}, {_id: 0, a: 1}));
- explainRes = coll.explain("queryPlanner").find({a: 2}, {_id: 0, a: 1}).finish();
- assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN"));
- assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH"));
- ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
- assert.eq(true, ixscanStage.isMultiKey);
+// Verify that a query cannot be covered over a path which is multikey due to a single-element
+// array.
+coll.drop();
+assert.writeOK(coll.insert({a: [2]}));
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.eq({a: [2]}, coll.findOne({a: 2}, {_id: 0, a: 1}));
+explainRes = coll.explain("queryPlanner").find({a: 2}, {_id: 0, a: 1}).finish();
+assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN"));
+assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH"));
+ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
+assert.eq(true, ixscanStage.isMultiKey);
- // Verify that a query cannot be covered over a path which is multikey due to a single-element
- // array, where the path is made multikey by an update rather than an insert.
- coll.drop();
- assert.writeOK(coll.insert({a: 2}));
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.writeOK(coll.update({}, {$set: {a: [2]}}));
- assert.eq({a: [2]}, coll.findOne({a: 2}, {_id: 0, a: 1}));
- explainRes = coll.explain("queryPlanner").find({a: 2}, {_id: 0, a: 1}).finish();
- assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN"));
- assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH"));
- ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
- assert.eq(true, ixscanStage.isMultiKey);
+// Verify that a query cannot be covered over a path which is multikey due to a single-element
+// array, where the path is made multikey by an update rather than an insert.
+coll.drop();
+assert.writeOK(coll.insert({a: 2}));
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.writeOK(coll.update({}, {$set: {a: [2]}}));
+assert.eq({a: [2]}, coll.findOne({a: 2}, {_id: 0, a: 1}));
+explainRes = coll.explain("queryPlanner").find({a: 2}, {_id: 0, a: 1}).finish();
+assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN"));
+assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH"));
+ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
+assert.eq(true, ixscanStage.isMultiKey);
- // Verify that a trailing empty array makes a 2dsphere index multikey.
- coll.drop();
- assert.commandWorked(coll.createIndex({"a.b": 1, c: "2dsphere"}));
- assert.writeOK(coll.insert({a: {b: 1}, c: {type: "Point", coordinates: [0, 0]}}));
- explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish();
- ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
- assert.neq(null, ixscanStage);
- assert.eq(false, ixscanStage.isMultiKey);
- assert.writeOK(coll.insert({a: {b: []}, c: {type: "Point", coordinates: [0, 0]}}));
- explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish();
- ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
- assert.neq(null, ixscanStage);
- assert.eq(true, ixscanStage.isMultiKey);
+// Verify that a trailing empty array makes a 2dsphere index multikey.
+coll.drop();
+assert.commandWorked(coll.createIndex({"a.b": 1, c: "2dsphere"}));
+assert.writeOK(coll.insert({a: {b: 1}, c: {type: "Point", coordinates: [0, 0]}}));
+explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish();
+ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
+assert.neq(null, ixscanStage);
+assert.eq(false, ixscanStage.isMultiKey);
+assert.writeOK(coll.insert({a: {b: []}, c: {type: "Point", coordinates: [0, 0]}}));
+explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish();
+ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
+assert.neq(null, ixscanStage);
+assert.eq(true, ixscanStage.isMultiKey);
- // Verify that a mid-path empty array makes a 2dsphere index multikey.
- coll.drop();
- assert.commandWorked(coll.createIndex({"a.b": 1, c: "2dsphere"}));
- assert.writeOK(coll.insert({a: [], c: {type: "Point", coordinates: [0, 0]}}));
- explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish();
- ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
- assert.neq(null, ixscanStage);
- assert.eq(true, ixscanStage.isMultiKey);
+// Verify that a mid-path empty array makes a 2dsphere index multikey.
+coll.drop();
+assert.commandWorked(coll.createIndex({"a.b": 1, c: "2dsphere"}));
+assert.writeOK(coll.insert({a: [], c: {type: "Point", coordinates: [0, 0]}}));
+explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish();
+ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
+assert.neq(null, ixscanStage);
+assert.eq(true, ixscanStage.isMultiKey);
- // Verify that a single-element array makes a 2dsphere index multikey.
- coll.drop();
- assert.commandWorked(coll.createIndex({"a.b": 1, c: "2dsphere"}));
- assert.writeOK(coll.insert({a: {b: [3]}, c: {type: "Point", coordinates: [0, 0]}}));
- explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish();
- ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
- assert.neq(null, ixscanStage);
- assert.eq(true, ixscanStage.isMultiKey);
+// Verify that a single-element array makes a 2dsphere index multikey.
+coll.drop();
+assert.commandWorked(coll.createIndex({"a.b": 1, c: "2dsphere"}));
+assert.writeOK(coll.insert({a: {b: [3]}, c: {type: "Point", coordinates: [0, 0]}}));
+explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish();
+ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN");
+assert.neq(null, ixscanStage);
+assert.eq(true, ixscanStage.isMultiKey);
}());
diff --git a/jstests/core/create_collection.js b/jstests/core/create_collection.js
index 06c820f1462..2ac8692b16b 100644
--- a/jstests/core/create_collection.js
+++ b/jstests/core/create_collection.js
@@ -5,163 +5,160 @@
// Tests for the "create" command.
(function() {
- "use strict";
-
- load("jstests/libs/get_index_helpers.js");
-
- // "create" command rejects invalid options.
- db.create_collection.drop();
- assert.commandFailedWithCode(db.createCollection("create_collection", {unknown: 1}), 40415);
-
- // Cannot create a collection with null characters.
- assert.commandFailedWithCode(db.createCollection("\0ab"), ErrorCodes.InvalidNamespace);
- assert.commandFailedWithCode(db.createCollection("a\0b"), ErrorCodes.InvalidNamespace);
- assert.commandFailedWithCode(db.createCollection("ab\0"), ErrorCodes.InvalidNamespace);
-
- //
- // Tests for "idIndex" field.
- //
-
- // "idIndex" field not allowed with "viewOn".
- db.create_collection.drop();
- assert.commandWorked(db.createCollection("create_collection"));
- assert.commandFailedWithCode(db.runCommand({
- create: "create_view",
- viewOn: "create_collection",
- idIndex: {key: {_id: 1}, name: "_id_"}
- }),
- ErrorCodes.InvalidOptions);
-
- // "idIndex" field not allowed with "autoIndexId".
- db.create_collection.drop();
- assert.commandFailedWithCode(
- db.createCollection("create_collection",
- {autoIndexId: false, idIndex: {key: {_id: 1}, name: "_id_"}}),
- ErrorCodes.InvalidOptions);
-
- // "idIndex" field must be an object.
- db.create_collection.drop();
- assert.commandFailedWithCode(db.createCollection("create_collection", {idIndex: 1}),
- ErrorCodes.TypeMismatch);
-
- // "idIndex" field cannot be empty.
- db.create_collection.drop();
- assert.commandFailedWithCode(db.createCollection("create_collection", {idIndex: {}}),
- ErrorCodes.FailedToParse);
-
- // "idIndex" field must be a specification for an _id index.
- db.create_collection.drop();
- assert.commandFailedWithCode(
- db.createCollection("create_collection", {idIndex: {key: {a: 1}, name: "a_1"}}),
- ErrorCodes.BadValue);
-
- // "idIndex" field must have "key" equal to {_id: 1}.
- db.create_collection.drop();
- assert.commandFailedWithCode(
- db.createCollection("create_collection", {idIndex: {key: {a: 1}, name: "_id_"}}),
- ErrorCodes.BadValue);
-
- // The name of an _id index gets corrected to "_id_".
- db.create_collection.drop();
- assert.commandWorked(
- db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "a_1"}}));
- var indexSpec = GetIndexHelpers.findByKeyPattern(db.create_collection.getIndexes(), {_id: 1});
- assert.neq(indexSpec, null);
- assert.eq(indexSpec.name, "_id_", tojson(indexSpec));
-
- // "idIndex" field must only contain fields that are allowed for an _id index.
- db.create_collection.drop();
- assert.commandFailedWithCode(
- db.createCollection("create_collection",
- {idIndex: {key: {_id: 1}, name: "_id_", sparse: true}}),
- ErrorCodes.InvalidIndexSpecificationOption);
-
- // "create" creates v=2 _id index when "v" is not specified in "idIndex".
- db.create_collection.drop();
- assert.commandWorked(
- db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_"}}));
- indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_");
- assert.neq(indexSpec, null);
- assert.eq(indexSpec.v, 2, tojson(indexSpec));
-
- // "create" creates v=1 _id index when "idIndex" has "v" equal to 1.
- db.create_collection.drop();
- assert.commandWorked(
- db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
- indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_");
- assert.neq(indexSpec, null);
- assert.eq(indexSpec.v, 1, tojson(indexSpec));
-
- // "create" creates v=2 _id index when "idIndex" has "v" equal to 2.
- db.create_collection.drop();
- assert.commandWorked(
- db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_", v: 2}}));
- indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_");
- assert.neq(indexSpec, null);
- assert.eq(indexSpec.v, 2, tojson(indexSpec));
-
- // "collation" field of "idIndex" must match collection default collation.
- db.create_collection.drop();
- assert.commandFailedWithCode(
- db.createCollection("create_collection",
- {idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}}}),
- ErrorCodes.BadValue);
-
- db.create_collection.drop();
- assert.commandFailedWithCode(db.createCollection("create_collection", {
- collation: {locale: "fr_CA"},
- idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}}
- }),
- ErrorCodes.BadValue);
-
- db.create_collection.drop();
- assert.commandFailedWithCode(db.createCollection("create_collection", {
- collation: {locale: "fr_CA"},
- idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "simple"}}
- }),
- ErrorCodes.BadValue);
-
- db.create_collection.drop();
- assert.commandWorked(db.createCollection("create_collection", {
- collation: {locale: "en_US", strength: 3},
- idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}}
- }));
- indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_");
- assert.neq(indexSpec, null);
- assert.eq(indexSpec.collation.locale, "en_US", tojson(indexSpec));
-
- // If "collation" field is not present in "idIndex", _id index inherits collection default
- // collation.
- db.create_collection.drop();
- assert.commandWorked(db.createCollection(
- "create_collection",
- {collation: {locale: "en_US"}, idIndex: {key: {_id: 1}, name: "_id_"}}));
- indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_");
- assert.neq(indexSpec, null);
- assert.eq(indexSpec.collation.locale, "en_US", tojson(indexSpec));
-
- //
- // Tests the combination of the "capped", "size" and "max" fields in createCollection().
- //
-
- // When "capped" is true, the "size" field needs to be present.
- assert.commandFailedWithCode(db.createCollection('capped_no_size_no_max', {capped: true}),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(db.createCollection('capped_no_size', {capped: true, max: 10}),
- ErrorCodes.InvalidOptions);
- db.no_capped.drop();
- assert.commandWorked(db.createCollection('no_capped'), {capped: false});
- db.capped_no_max.drop();
- assert.commandWorked(db.createCollection('capped_no_max', {capped: true, size: 256}));
- db.capped_with_max_and_size.drop();
- assert.commandWorked(
- db.createCollection('capped_with_max_and_size', {capped: true, max: 10, size: 256}));
-
- // When the "size" field is present, "capped" needs to be true.
- assert.commandFailedWithCode(db.createCollection('size_no_capped', {size: 256}),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(
- db.createCollection('size_capped_false', {capped: false, size: 256}),
- ErrorCodes.InvalidOptions);
-
+"use strict";
+
+load("jstests/libs/get_index_helpers.js");
+
+// "create" command rejects invalid options.
+db.create_collection.drop();
+assert.commandFailedWithCode(db.createCollection("create_collection", {unknown: 1}), 40415);
+
+// Cannot create a collection with null characters.
+assert.commandFailedWithCode(db.createCollection("\0ab"), ErrorCodes.InvalidNamespace);
+assert.commandFailedWithCode(db.createCollection("a\0b"), ErrorCodes.InvalidNamespace);
+assert.commandFailedWithCode(db.createCollection("ab\0"), ErrorCodes.InvalidNamespace);
+
+//
+// Tests for "idIndex" field.
+//
+
+// "idIndex" field not allowed with "viewOn".
+db.create_collection.drop();
+assert.commandWorked(db.createCollection("create_collection"));
+assert.commandFailedWithCode(db.runCommand({
+ create: "create_view",
+ viewOn: "create_collection",
+ idIndex: {key: {_id: 1}, name: "_id_"}
+}),
+ ErrorCodes.InvalidOptions);
+
+// "idIndex" field not allowed with "autoIndexId".
+db.create_collection.drop();
+assert.commandFailedWithCode(
+ db.createCollection("create_collection",
+ {autoIndexId: false, idIndex: {key: {_id: 1}, name: "_id_"}}),
+ ErrorCodes.InvalidOptions);
+
+// "idIndex" field must be an object.
+db.create_collection.drop();
+assert.commandFailedWithCode(db.createCollection("create_collection", {idIndex: 1}),
+ ErrorCodes.TypeMismatch);
+
+// "idIndex" field cannot be empty.
+db.create_collection.drop();
+assert.commandFailedWithCode(db.createCollection("create_collection", {idIndex: {}}),
+ ErrorCodes.FailedToParse);
+
+// "idIndex" field must be a specification for an _id index.
+db.create_collection.drop();
+assert.commandFailedWithCode(
+ db.createCollection("create_collection", {idIndex: {key: {a: 1}, name: "a_1"}}),
+ ErrorCodes.BadValue);
+
+// "idIndex" field must have "key" equal to {_id: 1}.
+db.create_collection.drop();
+assert.commandFailedWithCode(
+ db.createCollection("create_collection", {idIndex: {key: {a: 1}, name: "_id_"}}),
+ ErrorCodes.BadValue);
+
+// The name of an _id index gets corrected to "_id_".
+db.create_collection.drop();
+assert.commandWorked(
+ db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "a_1"}}));
+var indexSpec = GetIndexHelpers.findByKeyPattern(db.create_collection.getIndexes(), {_id: 1});
+assert.neq(indexSpec, null);
+assert.eq(indexSpec.name, "_id_", tojson(indexSpec));
+
+// "idIndex" field must only contain fields that are allowed for an _id index.
+db.create_collection.drop();
+assert.commandFailedWithCode(
+ db.createCollection("create_collection",
+ {idIndex: {key: {_id: 1}, name: "_id_", sparse: true}}),
+ ErrorCodes.InvalidIndexSpecificationOption);
+
+// "create" creates v=2 _id index when "v" is not specified in "idIndex".
+db.create_collection.drop();
+assert.commandWorked(
+ db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_"}}));
+indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_");
+assert.neq(indexSpec, null);
+assert.eq(indexSpec.v, 2, tojson(indexSpec));
+
+// "create" creates v=1 _id index when "idIndex" has "v" equal to 1.
+db.create_collection.drop();
+assert.commandWorked(
+ db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
+indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_");
+assert.neq(indexSpec, null);
+assert.eq(indexSpec.v, 1, tojson(indexSpec));
+
+// "create" creates v=2 _id index when "idIndex" has "v" equal to 2.
+db.create_collection.drop();
+assert.commandWorked(
+ db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_", v: 2}}));
+indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_");
+assert.neq(indexSpec, null);
+assert.eq(indexSpec.v, 2, tojson(indexSpec));
+
+// "collation" field of "idIndex" must match collection default collation.
+db.create_collection.drop();
+assert.commandFailedWithCode(
+ db.createCollection("create_collection",
+ {idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}}}),
+ ErrorCodes.BadValue);
+
+db.create_collection.drop();
+assert.commandFailedWithCode(db.createCollection("create_collection", {
+ collation: {locale: "fr_CA"},
+ idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}}
+}),
+ ErrorCodes.BadValue);
+
+db.create_collection.drop();
+assert.commandFailedWithCode(db.createCollection("create_collection", {
+ collation: {locale: "fr_CA"},
+ idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "simple"}}
+}),
+ ErrorCodes.BadValue);
+
+db.create_collection.drop();
+assert.commandWorked(db.createCollection("create_collection", {
+ collation: {locale: "en_US", strength: 3},
+ idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}}
+}));
+indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_");
+assert.neq(indexSpec, null);
+assert.eq(indexSpec.collation.locale, "en_US", tojson(indexSpec));
+
+// If "collation" field is not present in "idIndex", _id index inherits collection default
+// collation.
+db.create_collection.drop();
+assert.commandWorked(db.createCollection(
+ "create_collection", {collation: {locale: "en_US"}, idIndex: {key: {_id: 1}, name: "_id_"}}));
+indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_");
+assert.neq(indexSpec, null);
+assert.eq(indexSpec.collation.locale, "en_US", tojson(indexSpec));
+
+//
+// Tests the combination of the "capped", "size" and "max" fields in createCollection().
+//
+
+// When "capped" is true, the "size" field needs to be present.
+assert.commandFailedWithCode(db.createCollection('capped_no_size_no_max', {capped: true}),
+ ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(db.createCollection('capped_no_size', {capped: true, max: 10}),
+ ErrorCodes.InvalidOptions);
+db.no_capped.drop();
+assert.commandWorked(db.createCollection('no_capped'), {capped: false});
+db.capped_no_max.drop();
+assert.commandWorked(db.createCollection('capped_no_max', {capped: true, size: 256}));
+db.capped_with_max_and_size.drop();
+assert.commandWorked(
+ db.createCollection('capped_with_max_and_size', {capped: true, max: 10, size: 256}));
+
+// When the "size" field is present, "capped" needs to be true.
+assert.commandFailedWithCode(db.createCollection('size_no_capped', {size: 256}),
+ ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(db.createCollection('size_capped_false', {capped: false, size: 256}),
+ ErrorCodes.InvalidOptions);
})();
diff --git a/jstests/core/create_index_same_spec_different_name.js b/jstests/core/create_index_same_spec_different_name.js
index 660a2a714fc..7b08f9f55ca 100644
--- a/jstests/core/create_index_same_spec_different_name.js
+++ b/jstests/core/create_index_same_spec_different_name.js
@@ -3,15 +3,14 @@
* 'IndexOptionsConflict' error.
*/
(function() {
- 'use strict';
+'use strict';
- const coll = "create_index_same_spec_different_name";
- db.coll.drop();
+const coll = "create_index_same_spec_different_name";
+db.coll.drop();
- assert.commandWorked(
- db.runCommand({createIndexes: coll, indexes: [{key: {x: 1}, name: "x_1"}]}));
+assert.commandWorked(db.runCommand({createIndexes: coll, indexes: [{key: {x: 1}, name: "x_1"}]}));
- assert.commandFailedWithCode(
- db.runCommand({createIndexes: coll, indexes: [{key: {x: 1}, name: "x_2"}]}),
- ErrorCodes.IndexOptionsConflict);
+assert.commandFailedWithCode(
+ db.runCommand({createIndexes: coll, indexes: [{key: {x: 1}, name: "x_2"}]}),
+ ErrorCodes.IndexOptionsConflict);
}());
diff --git a/jstests/core/create_indexes.js b/jstests/core/create_indexes.js
index 45db4c9ef76..3e4bd209e5b 100644
--- a/jstests/core/create_indexes.js
+++ b/jstests/core/create_indexes.js
@@ -4,172 +4,170 @@
* ]
*/
(function() {
- 'use strict';
-
- var isMongos = ("isdbgrid" == db.runCommand("ismaster").msg);
-
- var extractResult = function(obj) {
- if (!isMongos)
- return obj;
-
- // Sample mongos format:
- // {
- // raw: {
- // "localhost:30000": {
- // createdCollectionAutomatically: false,
- // numIndexesBefore: 3,
- // numIndexesAfter: 5,
- // ok: 1
- // }
- // },
- // ok: 1
- // }
-
- var numFields = 0;
- var result = null;
- for (var field in obj.raw) {
- result = obj.raw[field];
- numFields++;
- }
-
- assert.neq(null, result);
- assert.eq(1, numFields);
- return result;
- };
-
- var checkImplicitCreate = function(createIndexResult, isMongos) {
- let allowImplicit = !isMongos;
- assert.eq(allowImplicit, createIndexResult.createdCollectionAutomatically);
- };
-
- var dbTest = db.getSisterDB('create_indexes_db');
- dbTest.dropDatabase();
-
- // Database does not exist
- var collDbNotExist = dbTest.create_indexes_no_db;
- var res = assert.commandWorked(
- collDbNotExist.runCommand('createIndexes', {indexes: [{key: {x: 1}, name: 'x_1'}]}));
- res = extractResult(res);
- checkImplicitCreate(res, isMongos);
- assert.eq(1, res.numIndexesBefore);
- assert.eq(2, res.numIndexesAfter);
- assert.isnull(res.note,
- 'createIndexes.note should not be present in results when adding a new index: ' +
- tojson(res));
-
- // Collection does not exist, but database does
- var t = dbTest.create_indexes;
- var res = assert.commandWorked(
- t.runCommand('createIndexes', {indexes: [{key: {x: 1}, name: 'x_1'}]}));
- res = extractResult(res);
- checkImplicitCreate(res, isMongos);
- assert.eq(1, res.numIndexesBefore);
- assert.eq(2, res.numIndexesAfter);
- assert.isnull(res.note,
- 'createIndexes.note should not be present in results when adding a new index: ' +
- tojson(res));
-
- // Both database and collection exist
- res = assert.commandWorked(
- t.runCommand('createIndexes', {indexes: [{key: {x: 1}, name: 'x_1'}]}));
- res = extractResult(res);
- assert(!res.createdCollectionAutomatically);
- assert.eq(2, res.numIndexesBefore);
- assert.eq(2,
- res.numIndexesAfter,
- 'numIndexesAfter missing from createIndexes result when adding a duplicate index: ' +
- tojson(res));
- assert(res.note,
- 'createIndexes.note should be present in results when adding a duplicate index: ' +
- tojson(res));
-
- res = t.runCommand("createIndexes",
- {indexes: [{key: {"x": 1}, name: "x_1"}, {key: {"y": 1}, name: "y_1"}]});
- res = extractResult(res);
- assert(!res.createdCollectionAutomatically);
- assert.eq(2, res.numIndexesBefore);
- assert.eq(3, res.numIndexesAfter);
-
- res = assert.commandWorked(t.runCommand(
- 'createIndexes', {indexes: [{key: {a: 1}, name: 'a_1'}, {key: {b: 1}, name: 'b_1'}]}));
- res = extractResult(res);
- assert(!res.createdCollectionAutomatically);
- assert.eq(3, res.numIndexesBefore);
- assert.eq(5, res.numIndexesAfter);
- assert.isnull(res.note,
- 'createIndexes.note should not be present in results when adding new indexes: ' +
- tojson(res));
-
- res = assert.commandWorked(t.runCommand(
- 'createIndexes', {indexes: [{key: {a: 1}, name: 'a_1'}, {key: {b: 1}, name: 'b_1'}]}));
-
- res = extractResult(res);
- assert.eq(5, res.numIndexesBefore);
- assert.eq(5,
- res.numIndexesAfter,
- 'numIndexesAfter missing from createIndexes result when adding duplicate indexes: ' +
- tojson(res));
- assert(res.note,
- 'createIndexes.note should be present in results when adding a duplicate index: ' +
- tojson(res));
-
- res = t.runCommand("createIndexes", {indexes: [{}]});
- assert(!res.ok);
-
- res = t.runCommand("createIndexes", {indexes: [{}, {key: {m: 1}, name: "asd"}]});
- assert(!res.ok);
-
- assert.eq(5, t.getIndexes().length);
-
- res = t.runCommand("createIndexes", {indexes: [{key: {"c": 1}, sparse: true, name: "c_1"}]});
- assert.eq(6, t.getIndexes().length);
- assert.eq(1,
- t.getIndexes()
- .filter(function(z) {
- return z.sparse;
- })
- .length);
-
- res = t.runCommand("createIndexes", {indexes: [{key: {"x": "foo"}, name: "x_1"}]});
- assert(!res.ok);
-
- assert.eq(6, t.getIndexes().length);
-
- res = t.runCommand("createIndexes", {indexes: [{key: {"x": 1}, name: ""}]});
- assert(!res.ok);
-
- assert.eq(6, t.getIndexes().length);
-
- // Test that v0 indexes cannot be created.
- res = t.runCommand('createIndexes', {indexes: [{key: {d: 1}, name: 'd_1', v: 0}]});
- assert.commandFailed(res, 'v0 index creation should fail');
-
- // Test that v1 indexes can be created explicitly.
- res = t.runCommand('createIndexes', {indexes: [{key: {d: 1}, name: 'd_1', v: 1}]});
- assert.commandWorked(res, 'v1 index creation should succeed');
-
- // Test that index creation fails with an invalid top-level field.
- res = t.runCommand('createIndexes', {indexes: [{key: {e: 1}, name: 'e_1'}], 'invalidField': 1});
- assert.commandFailedWithCode(res, ErrorCodes.BadValue);
-
- // Test that index creation fails with an invalid field in the index spec for index version V2.
- res = t.runCommand('createIndexes',
- {indexes: [{key: {e: 1}, name: 'e_1', 'v': 2, 'invalidField': 1}]});
- assert.commandFailedWithCode(res, ErrorCodes.InvalidIndexSpecificationOption);
-
- // Test that index creation fails with an invalid field in the index spec for index version V1.
- res = t.runCommand('createIndexes',
- {indexes: [{key: {e: 1}, name: 'e_1', 'v': 1, 'invalidField': 1}]});
- assert.commandFailedWithCode(res, ErrorCodes.InvalidIndexSpecificationOption);
-
- // Test that index creation fails with an index named '*'.
- res = t.runCommand('createIndexes', {indexes: [{key: {star: 1}, name: '*'}]});
- assert.commandFailedWithCode(res, ErrorCodes.BadValue);
-
- // Test that user is not allowed to create indexes in config.transactions.
- var configDB = db.getSiblingDB('config');
- res = configDB.runCommand(
- {createIndexes: 'transactions', indexes: [{key: {star: 1}, name: 'star'}]});
- assert.commandFailedWithCode(res, ErrorCodes.IllegalOperation);
-
+'use strict';
+
+var isMongos = ("isdbgrid" == db.runCommand("ismaster").msg);
+
+var extractResult = function(obj) {
+ if (!isMongos)
+ return obj;
+
+ // Sample mongos format:
+ // {
+ // raw: {
+ // "localhost:30000": {
+ // createdCollectionAutomatically: false,
+ // numIndexesBefore: 3,
+ // numIndexesAfter: 5,
+ // ok: 1
+ // }
+ // },
+ // ok: 1
+ // }
+
+ var numFields = 0;
+ var result = null;
+ for (var field in obj.raw) {
+ result = obj.raw[field];
+ numFields++;
+ }
+
+ assert.neq(null, result);
+ assert.eq(1, numFields);
+ return result;
+};
+
+var checkImplicitCreate = function(createIndexResult, isMongos) {
+ let allowImplicit = !isMongos;
+ assert.eq(allowImplicit, createIndexResult.createdCollectionAutomatically);
+};
+
+var dbTest = db.getSisterDB('create_indexes_db');
+dbTest.dropDatabase();
+
+// Database does not exist
+var collDbNotExist = dbTest.create_indexes_no_db;
+var res = assert.commandWorked(
+ collDbNotExist.runCommand('createIndexes', {indexes: [{key: {x: 1}, name: 'x_1'}]}));
+res = extractResult(res);
+checkImplicitCreate(res, isMongos);
+assert.eq(1, res.numIndexesBefore);
+assert.eq(2, res.numIndexesAfter);
+assert.isnull(
+ res.note,
+ 'createIndexes.note should not be present in results when adding a new index: ' + tojson(res));
+
+// Collection does not exist, but database does
+var t = dbTest.create_indexes;
+var res =
+ assert.commandWorked(t.runCommand('createIndexes', {indexes: [{key: {x: 1}, name: 'x_1'}]}));
+res = extractResult(res);
+checkImplicitCreate(res, isMongos);
+assert.eq(1, res.numIndexesBefore);
+assert.eq(2, res.numIndexesAfter);
+assert.isnull(
+ res.note,
+ 'createIndexes.note should not be present in results when adding a new index: ' + tojson(res));
+
+// Both database and collection exist
+res = assert.commandWorked(t.runCommand('createIndexes', {indexes: [{key: {x: 1}, name: 'x_1'}]}));
+res = extractResult(res);
+assert(!res.createdCollectionAutomatically);
+assert.eq(2, res.numIndexesBefore);
+assert.eq(2,
+ res.numIndexesAfter,
+ 'numIndexesAfter missing from createIndexes result when adding a duplicate index: ' +
+ tojson(res));
+assert(res.note,
+ 'createIndexes.note should be present in results when adding a duplicate index: ' +
+ tojson(res));
+
+res = t.runCommand("createIndexes",
+ {indexes: [{key: {"x": 1}, name: "x_1"}, {key: {"y": 1}, name: "y_1"}]});
+res = extractResult(res);
+assert(!res.createdCollectionAutomatically);
+assert.eq(2, res.numIndexesBefore);
+assert.eq(3, res.numIndexesAfter);
+
+res = assert.commandWorked(t.runCommand(
+ 'createIndexes', {indexes: [{key: {a: 1}, name: 'a_1'}, {key: {b: 1}, name: 'b_1'}]}));
+res = extractResult(res);
+assert(!res.createdCollectionAutomatically);
+assert.eq(3, res.numIndexesBefore);
+assert.eq(5, res.numIndexesAfter);
+assert.isnull(
+ res.note,
+ 'createIndexes.note should not be present in results when adding new indexes: ' + tojson(res));
+
+res = assert.commandWorked(t.runCommand(
+ 'createIndexes', {indexes: [{key: {a: 1}, name: 'a_1'}, {key: {b: 1}, name: 'b_1'}]}));
+
+res = extractResult(res);
+assert.eq(5, res.numIndexesBefore);
+assert.eq(5,
+ res.numIndexesAfter,
+ 'numIndexesAfter missing from createIndexes result when adding duplicate indexes: ' +
+ tojson(res));
+assert(res.note,
+ 'createIndexes.note should be present in results when adding a duplicate index: ' +
+ tojson(res));
+
+res = t.runCommand("createIndexes", {indexes: [{}]});
+assert(!res.ok);
+
+res = t.runCommand("createIndexes", {indexes: [{}, {key: {m: 1}, name: "asd"}]});
+assert(!res.ok);
+
+assert.eq(5, t.getIndexes().length);
+
+res = t.runCommand("createIndexes", {indexes: [{key: {"c": 1}, sparse: true, name: "c_1"}]});
+assert.eq(6, t.getIndexes().length);
+assert.eq(1,
+ t.getIndexes()
+ .filter(function(z) {
+ return z.sparse;
+ })
+ .length);
+
+res = t.runCommand("createIndexes", {indexes: [{key: {"x": "foo"}, name: "x_1"}]});
+assert(!res.ok);
+
+assert.eq(6, t.getIndexes().length);
+
+res = t.runCommand("createIndexes", {indexes: [{key: {"x": 1}, name: ""}]});
+assert(!res.ok);
+
+assert.eq(6, t.getIndexes().length);
+
+// Test that v0 indexes cannot be created.
+res = t.runCommand('createIndexes', {indexes: [{key: {d: 1}, name: 'd_1', v: 0}]});
+assert.commandFailed(res, 'v0 index creation should fail');
+
+// Test that v1 indexes can be created explicitly.
+res = t.runCommand('createIndexes', {indexes: [{key: {d: 1}, name: 'd_1', v: 1}]});
+assert.commandWorked(res, 'v1 index creation should succeed');
+
+// Test that index creation fails with an invalid top-level field.
+res = t.runCommand('createIndexes', {indexes: [{key: {e: 1}, name: 'e_1'}], 'invalidField': 1});
+assert.commandFailedWithCode(res, ErrorCodes.BadValue);
+
+// Test that index creation fails with an invalid field in the index spec for index version V2.
+res = t.runCommand('createIndexes',
+ {indexes: [{key: {e: 1}, name: 'e_1', 'v': 2, 'invalidField': 1}]});
+assert.commandFailedWithCode(res, ErrorCodes.InvalidIndexSpecificationOption);
+
+// Test that index creation fails with an invalid field in the index spec for index version V1.
+res = t.runCommand('createIndexes',
+ {indexes: [{key: {e: 1}, name: 'e_1', 'v': 1, 'invalidField': 1}]});
+assert.commandFailedWithCode(res, ErrorCodes.InvalidIndexSpecificationOption);
+
+// Test that index creation fails with an index named '*'.
+res = t.runCommand('createIndexes', {indexes: [{key: {star: 1}, name: '*'}]});
+assert.commandFailedWithCode(res, ErrorCodes.BadValue);
+
+// Test that user is not allowed to create indexes in config.transactions.
+var configDB = db.getSiblingDB('config');
+res =
+ configDB.runCommand({createIndexes: 'transactions', indexes: [{key: {star: 1}, name: 'star'}]});
+assert.commandFailedWithCode(res, ErrorCodes.IllegalOperation);
}());
diff --git a/jstests/core/create_indexes_with_unknown_field_names.js b/jstests/core/create_indexes_with_unknown_field_names.js
index c5cfefff3a5..2a3a0cbc9bc 100644
--- a/jstests/core/create_indexes_with_unknown_field_names.js
+++ b/jstests/core/create_indexes_with_unknown_field_names.js
@@ -3,40 +3,40 @@
* if 'ignoreUnknownIndexOptions: true' is set on the createIndexes command.
*/
(function() {
- "use strict";
+"use strict";
- db.unknown_field_names_create_indexes.drop();
- assert.commandFailedWithCode(db.runCommand({
- createIndexes: "unknown_field_names_create_indexes",
- indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}]
- }),
- ErrorCodes.InvalidIndexSpecificationOption);
+db.unknown_field_names_create_indexes.drop();
+assert.commandFailedWithCode(db.runCommand({
+ createIndexes: "unknown_field_names_create_indexes",
+ indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}]
+}),
+ ErrorCodes.InvalidIndexSpecificationOption);
- assert.commandFailedWithCode(db.runCommand({
- createIndexes: "unknown_field_names_create_indexes",
- indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}],
- ignoreUnknownIndexOptions: false
- }),
- ErrorCodes.InvalidIndexSpecificationOption);
+assert.commandFailedWithCode(db.runCommand({
+ createIndexes: "unknown_field_names_create_indexes",
+ indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}],
+ ignoreUnknownIndexOptions: false
+}),
+ ErrorCodes.InvalidIndexSpecificationOption);
- assert.commandFailedWithCode(db.runCommand({
- createIndexes: "unknown_field_names_create_indexes",
- indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}],
- ignoreUnknownIndexOptions: "badValue"
- }),
- ErrorCodes.TypeMismatch);
+assert.commandFailedWithCode(db.runCommand({
+ createIndexes: "unknown_field_names_create_indexes",
+ indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}],
+ ignoreUnknownIndexOptions: "badValue"
+}),
+ ErrorCodes.TypeMismatch);
- assert.commandWorked(db.runCommand({
- createIndexes: "unknown_field_names_create_indexes",
- indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}],
- ignoreUnknownIndexOptions: true
- }));
+assert.commandWorked(db.runCommand({
+ createIndexes: "unknown_field_names_create_indexes",
+ indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}],
+ ignoreUnknownIndexOptions: true
+}));
- // Make sure 'someField' is not in the index spec.
- let indexes = db.unknown_field_names_create_indexes.getIndexes();
- for (let index in indexes) {
- if (0 === bsonWoCompare(indexes[index].key, {x: 1})) {
- assert.eq(indexes[index].someField, undefined);
- }
+// Make sure 'someField' is not in the index spec.
+let indexes = db.unknown_field_names_create_indexes.getIndexes();
+for (let index in indexes) {
+ if (0 === bsonWoCompare(indexes[index].key, {x: 1})) {
+ assert.eq(indexes[index].someField, undefined);
}
+}
})();
diff --git a/jstests/core/crud_api.js b/jstests/core/crud_api.js
index e0f099fb09a..6abd4872770 100644
--- a/jstests/core/crud_api.js
+++ b/jstests/core/crud_api.js
@@ -9,742 +9,733 @@
// ]
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For arrayEq.
-
- var crudAPISpecTests = function crudAPISpecTests() {
- "use strict";
-
- // Get the colllection
- var coll = db.crud_tests;
-
- // Setup
- function createTestExecutor(coll, method, verifyResult) {
- return function(args) {
- // Drop collection
- coll.drop();
- // Insert test data
- var r = coll.insertMany(args.insert);
- assert.eq(args.insert.length, r.insertedIds.length);
-
- // Execute the method with arguments
- r = coll[method].apply(coll, args.params);
- verifyResult(args.result, r);
-
- // Get all the results
- assert.soonNoExcept(
- function() {
- var results = coll.find({}).sort({_id: 1}).toArray();
- assert.docEq(args.expected, results);
- return true;
- },
- function() {
- return "collection never contained expected documents";
- });
- };
- }
+load("jstests/aggregation/extras/utils.js"); // For arrayEq.
- function checkResultObject(first, second) {
- // Only assert on the "modifiedCount" property when write commands are enabled
- if (db.getMongo().writeMode() === 'commands') {
- assert.docEq(first, second);
- } else {
- var overrideModifiedCount = {modifiedCount: undefined};
- assert.docEq(Object.merge(first, overrideModifiedCount),
- Object.merge(second, overrideModifiedCount));
- }
- }
+var crudAPISpecTests = function crudAPISpecTests() {
+ "use strict";
- // Setup executors
- var deleteManyExecutor = createTestExecutor(coll, 'deleteMany', checkResultObject);
- var deleteOneExecutor = createTestExecutor(coll, 'deleteOne', checkResultObject);
- var bulkWriteExecutor = createTestExecutor(coll, 'bulkWrite', checkResultObject);
- var findOneAndDeleteExecutor =
- createTestExecutor(coll, 'findOneAndDelete', checkResultObject);
- var findOneAndReplaceExecutor =
- createTestExecutor(coll, 'findOneAndReplace', checkResultObject);
- var findOneAndUpdateExecutor =
- createTestExecutor(coll, 'findOneAndUpdate', checkResultObject);
- var insertManyExecutor = createTestExecutor(coll, 'insertMany', checkResultObject);
- var insertOneExecutor = createTestExecutor(coll, 'insertOne', checkResultObject);
- var replaceOneExecutor = createTestExecutor(coll, 'replaceOne', checkResultObject);
- var updateManyExecutor = createTestExecutor(coll, 'updateMany', checkResultObject);
- var updateOneExecutor = createTestExecutor(coll, 'updateOne', checkResultObject);
- var countExecutor = createTestExecutor(coll, 'count', assert.eq);
- var distinctExecutor =
- createTestExecutor(coll, 'distinct', (a, b) => assert(arrayEq(a, b)));
-
- //
- // BulkWrite
- //
-
- bulkWriteExecutor({
- insert: [{_id: 1, c: 1}, {_id: 2, c: 2}, {_id: 3, c: 3}],
- params: [[
+ // Get the colllection
+ var coll = db.crud_tests;
+
+ // Setup
+ function createTestExecutor(coll, method, verifyResult) {
+ return function(args) {
+ // Drop collection
+ coll.drop();
+ // Insert test data
+ var r = coll.insertMany(args.insert);
+ assert.eq(args.insert.length, r.insertedIds.length);
+
+ // Execute the method with arguments
+ r = coll[method].apply(coll, args.params);
+ verifyResult(args.result, r);
+
+ // Get all the results
+ assert.soonNoExcept(
+ function() {
+ var results = coll.find({}).sort({_id: 1}).toArray();
+ assert.docEq(args.expected, results);
+ return true;
+ },
+ function() {
+ return "collection never contained expected documents";
+ });
+ };
+ }
+
+ function checkResultObject(first, second) {
+ // Only assert on the "modifiedCount" property when write commands are enabled
+ if (db.getMongo().writeMode() === 'commands') {
+ assert.docEq(first, second);
+ } else {
+ var overrideModifiedCount = {modifiedCount: undefined};
+ assert.docEq(Object.merge(first, overrideModifiedCount),
+ Object.merge(second, overrideModifiedCount));
+ }
+ }
+
+ // Setup executors
+ var deleteManyExecutor = createTestExecutor(coll, 'deleteMany', checkResultObject);
+ var deleteOneExecutor = createTestExecutor(coll, 'deleteOne', checkResultObject);
+ var bulkWriteExecutor = createTestExecutor(coll, 'bulkWrite', checkResultObject);
+ var findOneAndDeleteExecutor = createTestExecutor(coll, 'findOneAndDelete', checkResultObject);
+ var findOneAndReplaceExecutor =
+ createTestExecutor(coll, 'findOneAndReplace', checkResultObject);
+ var findOneAndUpdateExecutor = createTestExecutor(coll, 'findOneAndUpdate', checkResultObject);
+ var insertManyExecutor = createTestExecutor(coll, 'insertMany', checkResultObject);
+ var insertOneExecutor = createTestExecutor(coll, 'insertOne', checkResultObject);
+ var replaceOneExecutor = createTestExecutor(coll, 'replaceOne', checkResultObject);
+ var updateManyExecutor = createTestExecutor(coll, 'updateMany', checkResultObject);
+ var updateOneExecutor = createTestExecutor(coll, 'updateOne', checkResultObject);
+ var countExecutor = createTestExecutor(coll, 'count', assert.eq);
+ var distinctExecutor = createTestExecutor(coll, 'distinct', (a, b) => assert(arrayEq(a, b)));
+
+ //
+ // BulkWrite
+ //
+
+ bulkWriteExecutor({
+ insert: [{_id: 1, c: 1}, {_id: 2, c: 2}, {_id: 3, c: 3}],
+ params: [[
+ {insertOne: {document: {_id: 4, a: 1}}},
+ {updateOne: {filter: {_id: 5, a: 2}, update: {$set: {a: 2}}, upsert: true}},
+ {updateMany: {filter: {_id: 6, a: 3}, update: {$set: {a: 3}}, upsert: true}},
+ {deleteOne: {filter: {c: 1}}},
+ {insertOne: {document: {_id: 7, c: 2}}},
+ {deleteMany: {filter: {c: 2}}},
+ {replaceOne: {filter: {c: 3}, replacement: {c: 4}, upsert: true}}
+ ]],
+ result: {
+ acknowledged: true,
+ insertedCount: 2,
+ matchedCount: 1,
+ deletedCount: 3,
+ upsertedCount: 2,
+ insertedIds: {'0': 4, '4': 7},
+ upsertedIds: {'1': 5, '2': 6}
+ },
+ expected: [{"_id": 3, "c": 4}, {"_id": 4, "a": 1}, {"_id": 5, "a": 2}, {"_id": 6, "a": 3}]
+ });
+
+ bulkWriteExecutor({
+ insert: [{_id: 1, c: 1}, {_id: 2, c: 2}, {_id: 3, c: 3}],
+ params: [
+ [
{insertOne: {document: {_id: 4, a: 1}}},
{updateOne: {filter: {_id: 5, a: 2}, update: {$set: {a: 2}}, upsert: true}},
{updateMany: {filter: {_id: 6, a: 3}, update: {$set: {a: 3}}, upsert: true}},
{deleteOne: {filter: {c: 1}}},
- {insertOne: {document: {_id: 7, c: 2}}},
{deleteMany: {filter: {c: 2}}},
{replaceOne: {filter: {c: 3}, replacement: {c: 4}, upsert: true}}
- ]],
- result: {
- acknowledged: true,
- insertedCount: 2,
- matchedCount: 1,
- deletedCount: 3,
- upsertedCount: 2,
- insertedIds: {'0': 4, '4': 7},
- upsertedIds: {'1': 5, '2': 6}
- },
- expected:
- [{"_id": 3, "c": 4}, {"_id": 4, "a": 1}, {"_id": 5, "a": 2}, {"_id": 6, "a": 3}]
- });
-
- bulkWriteExecutor({
- insert: [{_id: 1, c: 1}, {_id: 2, c: 2}, {_id: 3, c: 3}],
- params: [
- [
- {insertOne: {document: {_id: 4, a: 1}}},
- {updateOne: {filter: {_id: 5, a: 2}, update: {$set: {a: 2}}, upsert: true}},
- {updateMany: {filter: {_id: 6, a: 3}, update: {$set: {a: 3}}, upsert: true}},
- {deleteOne: {filter: {c: 1}}},
- {deleteMany: {filter: {c: 2}}},
- {replaceOne: {filter: {c: 3}, replacement: {c: 4}, upsert: true}}
- ],
- {ordered: false}
- ],
- result: {
- acknowledged: true,
- insertedCount: 1,
- matchedCount: 1,
- deletedCount: 2,
- upsertedCount: 2,
- insertedIds: {'0': 4},
- upsertedIds: {'1': 5, '2': 6}
- },
- expected:
- [{"_id": 3, "c": 4}, {"_id": 4, "a": 1}, {"_id": 5, "a": 2}, {"_id": 6, "a": 3}]
- });
-
- // DeleteMany
- //
-
- // DeleteMany when many documents match
- deleteManyExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: {$gt: 1}}],
- result: {acknowledged: true, deletedCount: 2},
- expected: [{_id: 1, x: 11}]
- });
- // DeleteMany when no document matches
- deleteManyExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}],
- result: {acknowledged: true, deletedCount: 0},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // DeleteMany when many documents match, no write concern
- deleteManyExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: {$gt: 1}}, {w: 0}],
- result: {acknowledged: false},
- expected: [{_id: 1, x: 11}]
- });
-
- //
- // DeleteOne
- //
-
- // DeleteOne when many documents match
- deleteOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: {$gt: 1}}],
- result: {acknowledged: true, deletedCount: 1},
- expected: [{_id: 1, x: 11}, {_id: 3, x: 33}]
- });
- // DeleteOne when one document matches
- deleteOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 2}],
- result: {acknowledged: true, deletedCount: 1},
- expected: [{_id: 1, x: 11}, {_id: 3, x: 33}]
- });
- // DeleteOne when no documents match
- deleteOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}],
- result: {acknowledged: true, deletedCount: 0},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // DeleteOne when many documents match, no write concern
- deleteOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: {$gt: 1}}, {w: 0}],
- result: {acknowledged: false},
- expected: [{_id: 1, x: 11}, {_id: 3, x: 33}]
- });
-
- //
- // FindOneAndDelete
- //
-
- // FindOneAndDelete when one document matches
- findOneAndDeleteExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: {$gt: 2}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
- result: {x: 33},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}]
- });
- // FindOneAndDelete when one document matches
- findOneAndDeleteExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 2}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
- result: {x: 22},
- expected: [{_id: 1, x: 11}, {_id: 3, x: 33}]
- });
- // FindOneAndDelete when no documents match
- findOneAndDeleteExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
- result: null,
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
-
- //
- // FindOneAndReplace
- //
-
- // FindOneAndReplace when many documents match returning the document before modification
- findOneAndReplaceExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: {$gt: 1}}, {x: 32}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
- result: {x: 22},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}]
- });
- // FindOneAndReplace when many documents match returning the document after modification
- findOneAndReplaceExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [
- {_id: {$gt: 1}},
- {x: 32},
- {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
- ],
- result: {x: 32},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}]
- });
- // FindOneAndReplace when one document matches returning the document before modification
- findOneAndReplaceExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 2}, {x: 32}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
- result: {x: 22},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}]
- });
- // FindOneAndReplace when one document matches returning the document after modification
- findOneAndReplaceExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [
- {_id: 2},
- {x: 32},
- {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
- ],
- result: {x: 32},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}]
- });
- // FindOneAndReplace when no documents match returning the document before modification
- findOneAndReplaceExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}, {x: 44}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
- result: null,
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // FindOneAndReplace when no documents match with upsert returning the document before
- // modification
- findOneAndReplaceExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}, {x: 44}, {projection: {x: 1, _id: 0}, sort: {x: 1}, upsert: true}],
- result: null,
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 44}]
- });
- // FindOneAndReplace when no documents match returning the document after modification
- findOneAndReplaceExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [
- {_id: 4},
- {x: 44},
- {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
- ],
- result: null,
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // FindOneAndReplace when no documents match with upsert returning the document after
- // modification
- findOneAndReplaceExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [
- {_id: 4},
- {x: 44},
- {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true, upsert: true}
- ],
- result: {x: 44},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 44}]
- });
-
- assert.throws(function() {
- coll.findOneAndReplace({a: 1}, {$set: {b: 1}});
- });
-
- //
- // FindOneAndUpdate
- //
-
- // FindOneAndUpdate when many documents match returning the document before modification
- findOneAndUpdateExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: {$gt: 1}}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
- result: {x: 22},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
- });
- // FindOneAndUpdate when many documents match returning the document after modification
- findOneAndUpdateExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [
- {_id: {$gt: 1}},
- {$inc: {x: 1}},
- {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
- ],
- result: {x: 23},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
- });
- // FindOneAndUpdate when one document matches returning the document before modification
- findOneAndUpdateExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 2}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
- result: {x: 22},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
- });
- // FindOneAndUpdate when one document matches returning the document after modification
- findOneAndUpdateExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [
- {_id: 2},
- {$inc: {x: 1}},
- {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
- ],
- result: {x: 23},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
- });
- // FindOneAndUpdate when no documents match returning the document before modification
- findOneAndUpdateExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
- result: null,
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // FindOneAndUpdate when no documents match with upsert returning the document before
- // modification
- findOneAndUpdateExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [
- {_id: 4},
- {$inc: {x: 1}},
- {projection: {x: 1, _id: 0}, sort: {x: 1}, upsert: true}
- ],
- result: null,
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
- });
- // FindOneAndUpdate when no documents match returning the document after modification
- findOneAndUpdateExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [
- {_id: 4},
- {$inc: {x: 1}},
- {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
- ],
- result: null,
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // FindOneAndUpdate when no documents match with upsert returning the document after
- // modification
- findOneAndUpdateExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [
- {_id: 4},
- {$inc: {x: 1}},
- {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true, upsert: true}
],
- result: {x: 1},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
- });
-
- assert.throws(function() {
- coll.findOneAndUpdate({a: 1}, {});
- });
-
- assert.throws(function() {
- coll.findOneAndUpdate({a: 1}, {b: 1});
- });
-
- //
- // InsertMany
- //
-
- // InsertMany with non-existing documents
- insertManyExecutor({
- insert: [{_id: 1, x: 11}],
- params: [[{_id: 2, x: 22}, {_id: 3, x: 33}]],
- result: {acknowledged: true, insertedIds: [2, 3]},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // InsertMany with non-existing documents, no write concern
- insertManyExecutor({
- insert: [{_id: 1, x: 11}],
- params: [[{_id: 2, x: 22}, {_id: 3, x: 33}], {w: 0}],
- result: {acknowledged: false},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
-
- //
- // InsertOne
- //
-
- // InsertOne with non-existing documents
- insertOneExecutor({
- insert: [{_id: 1, x: 11}],
- params: [{_id: 2, x: 22}],
- result: {acknowledged: true, insertedId: 2},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}]
- });
- // InsertOne with non-existing documents, no write concern
- insertOneExecutor({
- insert: [{_id: 1, x: 11}],
- params: [{_id: 2, x: 22}, {w: 0}],
- result: {acknowledged: false},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}]
- });
-
- //
- // ReplaceOne
- //
-
- // ReplaceOne when many documents match
- replaceOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: {$gt: 1}}, {x: 111}],
- result: {acknowledged: true, matchedCount: 1, modifiedCount: 1},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 111}, {_id: 3, x: 33}]
- });
- // ReplaceOne when one document matches
- replaceOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 1}, {_id: 1, x: 111}],
- result: {acknowledged: true, matchedCount: 1, modifiedCount: 1},
- expected: [{_id: 1, x: 111}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // ReplaceOne when no documents match
- replaceOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}, {_id: 4, x: 1}],
- result: {acknowledged: true, matchedCount: 0, modifiedCount: 0},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // ReplaceOne with upsert when no documents match without an id specified
- replaceOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}, {x: 1}, {upsert: true}],
- result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
- });
- // ReplaceOne with upsert when no documents match with an id specified
- replaceOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}, {_id: 4, x: 1}, {upsert: true}],
- result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
- });
- // ReplaceOne with upsert when no documents match with an id specified, no write concern
- replaceOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}, {_id: 4, x: 1}, {upsert: true, w: 0}],
- result: {acknowledged: false},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
- });
- // ReplaceOne with upsert when no documents match with an id specified, no write concern
- replaceOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}, {_id: 4, x: 1}, {upsert: true, writeConcern: {w: 0}}],
- result: {acknowledged: false},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
- });
-
- assert.throws(function() {
- coll.replaceOne({a: 1}, {$set: {b: 1}});
- });
-
- //
- // UpdateMany
- //
-
- // UpdateMany when many documents match
- updateManyExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: {$gt: 1}}, {$inc: {x: 1}}],
- result: {acknowledged: true, matchedCount: 2, modifiedCount: 2},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 34}]
- });
- // UpdateMany when one document matches
- updateManyExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 1}, {$inc: {x: 1}}],
- result: {acknowledged: true, matchedCount: 1, modifiedCount: 1},
- expected: [{_id: 1, x: 12}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // UpdateMany when no documents match
- updateManyExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}, {$inc: {x: 1}}],
- result: {acknowledged: true, matchedCount: 0, modifiedCount: 0},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // UpdateMany with upsert when no documents match
- updateManyExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}, {$inc: {x: 1}}, {upsert: true}],
- result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
- });
- // UpdateMany with upsert when no documents match, no write concern
- updateManyExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}, {$inc: {x: 1}}, {upsert: true, w: 0}],
- result: {acknowledged: false},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
- });
-
- assert.throws(function() {
- coll.updateMany({a: 1}, {});
- });
-
- assert.throws(function() {
- coll.updateMany({a: 1}, {b: 1});
- });
-
- //
- // UpdateOne
- //
-
- // UpdateOne when many documents match
- updateOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: {$gt: 1}}, {$inc: {x: 1}}],
- result: {acknowledged: true, matchedCount: 1, modifiedCount: 1},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
- });
- // UpdateOne when one document matches
- updateOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 1}, {$inc: {x: 1}}],
- result: {acknowledged: true, matchedCount: 1, modifiedCount: 1},
- expected: [{_id: 1, x: 12}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // UpdateOne when no documents match
- updateOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}, {$inc: {x: 1}}],
- result: {acknowledged: true, matchedCount: 0, modifiedCount: 0},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
-
- // UpdateOne with upsert when no documents match
- updateOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: 4}, {$inc: {x: 1}}, {upsert: true}],
- result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
- });
- // UpdateOne when many documents match, no write concern
- updateOneExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: {$gt: 1}}, {$inc: {x: 1}}, {w: 0}],
- result: {acknowledged: false},
- expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
- });
-
- assert.throws(function() {
- coll.updateOne({a: 1}, {});
- });
-
- assert.throws(function() {
- coll.updateOne({a: 1}, {b: 1});
- });
-
- //
- // Count
- //
-
- // Simple count of all elements
- countExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{}],
- result: 3,
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // Simple count no arguments
- countExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [],
- result: 3,
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // Simple count filtered
- countExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{_id: {$gt: 1}}],
- result: 2,
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // Simple count of all elements, applying limit
- countExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{}, {limit: 1}],
- result: 1,
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // Simple count of all elements, applying skip
- countExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{}, {skip: 1}],
- result: 2,
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // Simple count no arguments, applying hint
- countExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: [{}, {hint: {"_id": 1}}],
- result: 3,
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
-
- //
- // Distinct
- //
-
- // Simple distinct of field x no filter
- distinctExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: ['x'],
- result: [11, 22, 33],
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // Simple distinct of field x
- distinctExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: ['x', {}],
- result: [11, 22, 33],
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // Simple distinct of field x filtered
- distinctExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: ['x', {x: {$gt: 11}}],
- result: [22, 33],
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
- // Simple distinct of field x filtered with maxTimeMS
- distinctExecutor({
- insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
- params: ['x', {x: {$gt: 11}}, {maxTimeMS: 100000}],
- result: [22, 33],
- expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
- });
-
- //
- // Find
- //
-
- coll.deleteMany({});
- // Insert all of them
- coll.insertMany([{a: 0, b: 0}, {a: 1, b: 1}]);
-
- // Simple projection
- var result =
- coll.find({}).sort({a: 1}).limit(1).skip(1).projection({_id: 0, a: 1}).toArray();
- assert.docEq(result, [{a: 1}]);
-
- // Simple tailable cursor
- var cursor = coll.find({}).sort({a: 1}).tailable();
- assert.eq(34, (cursor._options & ~DBQuery.Option.slaveOk));
- var cursor = coll.find({}).sort({a: 1}).tailable(false);
- assert.eq(2, (cursor._options & ~DBQuery.Option.slaveOk));
-
- // Check modifiers
- var cursor = coll.find({}).modifiers({$hint: 'a_1'});
- assert.eq('a_1', cursor._query['$hint']);
-
- // allowPartialResults
- var cursor = coll.find({}).allowPartialResults();
- assert.eq(128, (cursor._options & ~DBQuery.Option.slaveOk));
-
- // noCursorTimeout
- var cursor = coll.find({}).noCursorTimeout();
- assert.eq(16, (cursor._options & ~DBQuery.Option.slaveOk));
-
- // oplogReplay
- var cursor = coll.find({}).oplogReplay();
- assert.eq(8, (cursor._options & ~DBQuery.Option.slaveOk));
-
- //
- // Aggregation
- //
-
- coll.deleteMany({});
- // Insert all of them
- coll.insertMany([{a: 0, b: 0}, {a: 1, b: 1}]);
-
- // Simple aggregation with useCursor
- var result = coll.aggregate([{$match: {}}], {useCursor: true}).toArray();
- assert.eq(2, result.length);
-
- // Simple aggregation with batchSize
- var result = coll.aggregate([{$match: {}}], {batchSize: 2}).toArray();
- assert.eq(2, result.length);
-
- // Drop collection
- coll.drop();
- coll.ensureIndex({a: 1}, {unique: true});
-
- // Should throw duplicate key error
- assert.throws(function() {
- coll.insertMany([{a: 0, b: 0}, {a: 0, b: 1}]);
- });
-
- assert(coll.findOne({a: 0, b: 0}) != null);
- assert.throws(function() {
- coll.insertOne({a: 0, b: 0});
- });
-
- assert.throws(function() {
- coll.updateOne({b: 2}, {$set: {a: 0}}, {upsert: true});
- });
-
- assert.throws(function() {
- coll.updateMany({b: 2}, {$set: {a: 0}}, {upsert: true});
- });
-
- assert.throws(function() {
- coll.deleteOne({$invalidFieldName: {a: 1}});
- });
-
- assert.throws(function() {
- coll.deleteMany({$set: {a: 1}});
- });
-
- assert.throws(function() {
- coll.bulkWrite([{insertOne: {document: {_id: 4, a: 0}}}]);
- });
- };
-
- crudAPISpecTests();
+ {ordered: false}
+ ],
+ result: {
+ acknowledged: true,
+ insertedCount: 1,
+ matchedCount: 1,
+ deletedCount: 2,
+ upsertedCount: 2,
+ insertedIds: {'0': 4},
+ upsertedIds: {'1': 5, '2': 6}
+ },
+ expected: [{"_id": 3, "c": 4}, {"_id": 4, "a": 1}, {"_id": 5, "a": 2}, {"_id": 6, "a": 3}]
+ });
+
+ // DeleteMany
+ //
+
+ // DeleteMany when many documents match
+ deleteManyExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}],
+ result: {acknowledged: true, deletedCount: 2},
+ expected: [{_id: 1, x: 11}]
+ });
+ // DeleteMany when no document matches
+ deleteManyExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}],
+ result: {acknowledged: true, deletedCount: 0},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // DeleteMany when many documents match, no write concern
+ deleteManyExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {w: 0}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}]
+ });
+
+ //
+ // DeleteOne
+ //
+
+ // DeleteOne when many documents match
+ deleteOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}],
+ result: {acknowledged: true, deletedCount: 1},
+ expected: [{_id: 1, x: 11}, {_id: 3, x: 33}]
+ });
+ // DeleteOne when one document matches
+ deleteOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 2}],
+ result: {acknowledged: true, deletedCount: 1},
+ expected: [{_id: 1, x: 11}, {_id: 3, x: 33}]
+ });
+ // DeleteOne when no documents match
+ deleteOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}],
+ result: {acknowledged: true, deletedCount: 0},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // DeleteOne when many documents match, no write concern
+ deleteOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {w: 0}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}, {_id: 3, x: 33}]
+ });
+
+ //
+ // FindOneAndDelete
+ //
+
+ // FindOneAndDelete when one document matches
+ findOneAndDeleteExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 2}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: {x: 33},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}]
+ });
+ // FindOneAndDelete when one document matches
+ findOneAndDeleteExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 2}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: {x: 22},
+ expected: [{_id: 1, x: 11}, {_id: 3, x: 33}]
+ });
+ // FindOneAndDelete when no documents match
+ findOneAndDeleteExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: null,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+
+ //
+ // FindOneAndReplace
+ //
+
+ // FindOneAndReplace when many documents match returning the document before modification
+ findOneAndReplaceExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {x: 32}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: {x: 22},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}]
+ });
+ // FindOneAndReplace when many documents match returning the document after modification
+ findOneAndReplaceExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: {$gt: 1}},
+ {x: 32},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
+ ],
+ result: {x: 32},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}]
+ });
+ // FindOneAndReplace when one document matches returning the document before modification
+ findOneAndReplaceExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 2}, {x: 32}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: {x: 22},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}]
+ });
+ // FindOneAndReplace when one document matches returning the document after modification
+ findOneAndReplaceExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: 2},
+ {x: 32},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
+ ],
+ result: {x: 32},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}]
+ });
+ // FindOneAndReplace when no documents match returning the document before modification
+ findOneAndReplaceExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {x: 44}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: null,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // FindOneAndReplace when no documents match with upsert returning the document before
+ // modification
+ findOneAndReplaceExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {x: 44}, {projection: {x: 1, _id: 0}, sort: {x: 1}, upsert: true}],
+ result: null,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 44}]
+ });
+ // FindOneAndReplace when no documents match returning the document after modification
+ findOneAndReplaceExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: 4},
+ {x: 44},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
+ ],
+ result: null,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // FindOneAndReplace when no documents match with upsert returning the document after
+ // modification
+ findOneAndReplaceExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: 4},
+ {x: 44},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true, upsert: true}
+ ],
+ result: {x: 44},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 44}]
+ });
+
+ assert.throws(function() {
+ coll.findOneAndReplace({a: 1}, {$set: {b: 1}});
+ });
+
+ //
+ // FindOneAndUpdate
+ //
+
+ // FindOneAndUpdate when many documents match returning the document before modification
+ findOneAndUpdateExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: {x: 22},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
+ });
+ // FindOneAndUpdate when many documents match returning the document after modification
+ findOneAndUpdateExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: {$gt: 1}},
+ {$inc: {x: 1}},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
+ ],
+ result: {x: 23},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
+ });
+ // FindOneAndUpdate when one document matches returning the document before modification
+ findOneAndUpdateExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 2}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: {x: 22},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
+ });
+ // FindOneAndUpdate when one document matches returning the document after modification
+ findOneAndUpdateExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: 2},
+ {$inc: {x: 1}},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
+ ],
+ result: {x: 23},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
+ });
+ // FindOneAndUpdate when no documents match returning the document before modification
+ findOneAndUpdateExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: null,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // FindOneAndUpdate when no documents match with upsert returning the document before
+ // modification
+ findOneAndUpdateExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params:
+ [{_id: 4}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}, upsert: true}],
+ result: null,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
+ });
+ // FindOneAndUpdate when no documents match returning the document after modification
+ findOneAndUpdateExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: 4},
+ {$inc: {x: 1}},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
+ ],
+ result: null,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // FindOneAndUpdate when no documents match with upsert returning the document after
+ // modification
+ findOneAndUpdateExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: 4},
+ {$inc: {x: 1}},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true, upsert: true}
+ ],
+ result: {x: 1},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
+ });
+
+ assert.throws(function() {
+ coll.findOneAndUpdate({a: 1}, {});
+ });
+
+ assert.throws(function() {
+ coll.findOneAndUpdate({a: 1}, {b: 1});
+ });
+
+ //
+ // InsertMany
+ //
+
+ // InsertMany with non-existing documents
+ insertManyExecutor({
+ insert: [{_id: 1, x: 11}],
+ params: [[{_id: 2, x: 22}, {_id: 3, x: 33}]],
+ result: {acknowledged: true, insertedIds: [2, 3]},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // InsertMany with non-existing documents, no write concern
+ insertManyExecutor({
+ insert: [{_id: 1, x: 11}],
+ params: [[{_id: 2, x: 22}, {_id: 3, x: 33}], {w: 0}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+
+ //
+ // InsertOne
+ //
+
+ // InsertOne with non-existing documents
+ insertOneExecutor({
+ insert: [{_id: 1, x: 11}],
+ params: [{_id: 2, x: 22}],
+ result: {acknowledged: true, insertedId: 2},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}]
+ });
+ // InsertOne with non-existing documents, no write concern
+ insertOneExecutor({
+ insert: [{_id: 1, x: 11}],
+ params: [{_id: 2, x: 22}, {w: 0}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}]
+ });
+
+ //
+ // ReplaceOne
+ //
+
+ // ReplaceOne when many documents match
+ replaceOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {x: 111}],
+ result: {acknowledged: true, matchedCount: 1, modifiedCount: 1},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 111}, {_id: 3, x: 33}]
+ });
+ // ReplaceOne when one document matches
+ replaceOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 1}, {_id: 1, x: 111}],
+ result: {acknowledged: true, matchedCount: 1, modifiedCount: 1},
+ expected: [{_id: 1, x: 111}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // ReplaceOne when no documents match
+ replaceOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {_id: 4, x: 1}],
+ result: {acknowledged: true, matchedCount: 0, modifiedCount: 0},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // ReplaceOne with upsert when no documents match without an id specified
+ replaceOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {x: 1}, {upsert: true}],
+ result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
+ });
+ // ReplaceOne with upsert when no documents match with an id specified
+ replaceOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {_id: 4, x: 1}, {upsert: true}],
+ result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
+ });
+ // ReplaceOne with upsert when no documents match with an id specified, no write concern
+ replaceOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {_id: 4, x: 1}, {upsert: true, w: 0}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
+ });
+ // ReplaceOne with upsert when no documents match with an id specified, no write concern
+ replaceOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {_id: 4, x: 1}, {upsert: true, writeConcern: {w: 0}}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
+ });
+
+ assert.throws(function() {
+ coll.replaceOne({a: 1}, {$set: {b: 1}});
+ });
+
+ //
+ // UpdateMany
+ //
+
+ // UpdateMany when many documents match
+ updateManyExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {$inc: {x: 1}}],
+ result: {acknowledged: true, matchedCount: 2, modifiedCount: 2},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 34}]
+ });
+ // UpdateMany when one document matches
+ updateManyExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 1}, {$inc: {x: 1}}],
+ result: {acknowledged: true, matchedCount: 1, modifiedCount: 1},
+ expected: [{_id: 1, x: 12}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // UpdateMany when no documents match
+ updateManyExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {$inc: {x: 1}}],
+ result: {acknowledged: true, matchedCount: 0, modifiedCount: 0},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // UpdateMany with upsert when no documents match
+ updateManyExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {$inc: {x: 1}}, {upsert: true}],
+ result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
+ });
+ // UpdateMany with upsert when no documents match, no write concern
+ updateManyExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {$inc: {x: 1}}, {upsert: true, w: 0}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
+ });
+
+ assert.throws(function() {
+ coll.updateMany({a: 1}, {});
+ });
+
+ assert.throws(function() {
+ coll.updateMany({a: 1}, {b: 1});
+ });
+
+ //
+ // UpdateOne
+ //
+
+ // UpdateOne when many documents match
+ updateOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {$inc: {x: 1}}],
+ result: {acknowledged: true, matchedCount: 1, modifiedCount: 1},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
+ });
+ // UpdateOne when one document matches
+ updateOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 1}, {$inc: {x: 1}}],
+ result: {acknowledged: true, matchedCount: 1, modifiedCount: 1},
+ expected: [{_id: 1, x: 12}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // UpdateOne when no documents match
+ updateOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {$inc: {x: 1}}],
+ result: {acknowledged: true, matchedCount: 0, modifiedCount: 0},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+
+ // UpdateOne with upsert when no documents match
+ updateOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {$inc: {x: 1}}, {upsert: true}],
+ result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
+ });
+ // UpdateOne when many documents match, no write concern
+ updateOneExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {$inc: {x: 1}}, {w: 0}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
+ });
+
+ assert.throws(function() {
+ coll.updateOne({a: 1}, {});
+ });
+
+ assert.throws(function() {
+ coll.updateOne({a: 1}, {b: 1});
+ });
+
+ //
+ // Count
+ //
+
+ // Simple count of all elements
+ countExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{}],
+ result: 3,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // Simple count no arguments
+ countExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [],
+ result: 3,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // Simple count filtered
+ countExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}],
+ result: 2,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // Simple count of all elements, applying limit
+ countExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{}, {limit: 1}],
+ result: 1,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // Simple count of all elements, applying skip
+ countExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{}, {skip: 1}],
+ result: 2,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // Simple count no arguments, applying hint
+ countExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{}, {hint: {"_id": 1}}],
+ result: 3,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+
+ //
+ // Distinct
+ //
+
+ // Simple distinct of field x no filter
+ distinctExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: ['x'],
+ result: [11, 22, 33],
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // Simple distinct of field x
+ distinctExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: ['x', {}],
+ result: [11, 22, 33],
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // Simple distinct of field x filtered
+ distinctExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: ['x', {x: {$gt: 11}}],
+ result: [22, 33],
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+ // Simple distinct of field x filtered with maxTimeMS
+ distinctExecutor({
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: ['x', {x: {$gt: 11}}, {maxTimeMS: 100000}],
+ result: [22, 33],
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
+ });
+
+ //
+ // Find
+ //
+
+ coll.deleteMany({});
+ // Insert all of them
+ coll.insertMany([{a: 0, b: 0}, {a: 1, b: 1}]);
+
+ // Simple projection
+ var result = coll.find({}).sort({a: 1}).limit(1).skip(1).projection({_id: 0, a: 1}).toArray();
+ assert.docEq(result, [{a: 1}]);
+
+ // Simple tailable cursor
+ var cursor = coll.find({}).sort({a: 1}).tailable();
+ assert.eq(34, (cursor._options & ~DBQuery.Option.slaveOk));
+ var cursor = coll.find({}).sort({a: 1}).tailable(false);
+ assert.eq(2, (cursor._options & ~DBQuery.Option.slaveOk));
+
+ // Check modifiers
+ var cursor = coll.find({}).modifiers({$hint: 'a_1'});
+ assert.eq('a_1', cursor._query['$hint']);
+
+ // allowPartialResults
+ var cursor = coll.find({}).allowPartialResults();
+ assert.eq(128, (cursor._options & ~DBQuery.Option.slaveOk));
+
+ // noCursorTimeout
+ var cursor = coll.find({}).noCursorTimeout();
+ assert.eq(16, (cursor._options & ~DBQuery.Option.slaveOk));
+
+ // oplogReplay
+ var cursor = coll.find({}).oplogReplay();
+ assert.eq(8, (cursor._options & ~DBQuery.Option.slaveOk));
+
+ //
+ // Aggregation
+ //
+
+ coll.deleteMany({});
+ // Insert all of them
+ coll.insertMany([{a: 0, b: 0}, {a: 1, b: 1}]);
+
+ // Simple aggregation with useCursor
+ var result = coll.aggregate([{$match: {}}], {useCursor: true}).toArray();
+ assert.eq(2, result.length);
+
+ // Simple aggregation with batchSize
+ var result = coll.aggregate([{$match: {}}], {batchSize: 2}).toArray();
+ assert.eq(2, result.length);
+
+ // Drop collection
+ coll.drop();
+ coll.ensureIndex({a: 1}, {unique: true});
+
+ // Should throw duplicate key error
+ assert.throws(function() {
+ coll.insertMany([{a: 0, b: 0}, {a: 0, b: 1}]);
+ });
+
+ assert(coll.findOne({a: 0, b: 0}) != null);
+ assert.throws(function() {
+ coll.insertOne({a: 0, b: 0});
+ });
+
+ assert.throws(function() {
+ coll.updateOne({b: 2}, {$set: {a: 0}}, {upsert: true});
+ });
+
+ assert.throws(function() {
+ coll.updateMany({b: 2}, {$set: {a: 0}}, {upsert: true});
+ });
+
+ assert.throws(function() {
+ coll.deleteOne({$invalidFieldName: {a: 1}});
+ });
+
+ assert.throws(function() {
+ coll.deleteMany({$set: {a: 1}});
+ });
+
+ assert.throws(function() {
+ coll.bulkWrite([{insertOne: {document: {_id: 4, a: 0}}}]);
+ });
+};
+
+crudAPISpecTests();
})();
diff --git a/jstests/core/currentop.js b/jstests/core/currentop.js
index 9345f900596..636fdee2cb3 100644
--- a/jstests/core/currentop.js
+++ b/jstests/core/currentop.js
@@ -9,48 +9,48 @@
*/
(function() {
- "use strict";
- const coll = db.jstests_currentop;
- coll.drop();
+"use strict";
+const coll = db.jstests_currentop;
+coll.drop();
- // We fsync+lock the server to cause all subsequent write operations to block.
- assert.commandWorked(db.fsyncLock());
+// We fsync+lock the server to cause all subsequent write operations to block.
+assert.commandWorked(db.fsyncLock());
- const awaitInsertShell = startParallelShell(function() {
- assert.writeOK(db.jstests_currentop.insert({}));
- });
+const awaitInsertShell = startParallelShell(function() {
+ assert.writeOK(db.jstests_currentop.insert({}));
+});
- // Wait until the write appears in the currentOp output reporting that it is waiting for a lock.
- assert.soon(
- function() {
- var lock_type = "";
- if (jsTest.options().storageEngine === "mobile") {
- lock_type = "W";
- } else {
- lock_type = "w";
- }
- const ops = db.currentOp({
- $and: [
- {"locks.Global": lock_type, waitingForLock: true},
- // Depending on whether CurOp::setNS_inlock() has been called, the "ns" field
- // may either be the full collection name or the command namespace.
- {
- $or: [
- {ns: coll.getFullName()},
- {ns: db.$cmd.getFullName(), "command.insert": coll.getName()}
- ]
- },
- {type: "op"}
- ]
- });
- return ops.inprog.length === 1;
- },
- function() {
- return "Failed to find blocked insert in currentOp() output: " + tojson(db.currentOp());
+// Wait until the write appears in the currentOp output reporting that it is waiting for a lock.
+assert.soon(
+ function() {
+ var lock_type = "";
+ if (jsTest.options().storageEngine === "mobile") {
+ lock_type = "W";
+ } else {
+ lock_type = "w";
+ }
+ const ops = db.currentOp({
+ $and: [
+ {"locks.Global": lock_type, waitingForLock: true},
+ // Depending on whether CurOp::setNS_inlock() has been called, the "ns" field
+ // may either be the full collection name or the command namespace.
+ {
+ $or: [
+ {ns: coll.getFullName()},
+ {ns: db.$cmd.getFullName(), "command.insert": coll.getName()}
+ ]
+ },
+ {type: "op"}
+ ]
});
+ return ops.inprog.length === 1;
+ },
+ function() {
+ return "Failed to find blocked insert in currentOp() output: " + tojson(db.currentOp());
+ });
- // Unlock the server and make sure the write finishes.
- const fsyncResponse = assert.commandWorked(db.fsyncUnlock());
- assert.eq(fsyncResponse.lockCount, 0);
- awaitInsertShell();
+// Unlock the server and make sure the write finishes.
+const fsyncResponse = assert.commandWorked(db.fsyncUnlock());
+assert.eq(fsyncResponse.lockCount, 0);
+awaitInsertShell();
}());
diff --git a/jstests/core/currentop_cursors.js b/jstests/core/currentop_cursors.js
index 5db0c413f85..9cc7e37dcb2 100644
--- a/jstests/core/currentop_cursors.js
+++ b/jstests/core/currentop_cursors.js
@@ -6,256 +6,241 @@
*/
(function() {
- "use strict";
- const coll = db.jstests_currentop_cursors;
- // Will skip lsid tests if not in commands read mode.
- const commandReadMode = db.getMongo().readMode() == "commands";
-
- load("jstests/libs/fixture_helpers.js"); // for FixtureHelpers
-
- // Avoiding using the shell helper to avoid the implicit collection recreation.
- db.runCommand({drop: coll.getName()});
- assert.commandWorked(db.createCollection(coll.getName(), {capped: true, size: 1000}));
- for (let i = 0; i < 30; ++i) {
- assert.commandWorked(coll.insert({"val": i}));
- }
- /**
- * runTest creates a new collection called jstests_currentop_cursors and then runs the provided
- * find query. It calls $currentOp and does some basic assertions to make sure idleCursors is
- * behaving as intended in each case.
- * findFunc: A function that runs a find query. Is expected to return a cursorID.
- * Arbitrary code can be run in findFunc as long as it returns a cursorID.
- * assertFunc: A function that runs assertions against the results of the $currentOp.
- * Takes the following arguments
- * 'findOut': The cursorID returned from findFunc.
- * 'result': The results from running $currenpOp as an array of JSON objects.
- * Arbitrary code can be run in assertFunc, and there is no return value needed.
- */
- function runTest({findFunc, assertFunc}) {
- const adminDB = db.getSiblingDB("admin");
- const findOut = findFunc();
- const result =
- adminDB
- .aggregate([
- {$currentOp: {localOps: true, allUsers: false, idleCursors: true}},
- {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": findOut}]}}
- ])
- .toArray();
- assert.eq(result[0].ns, coll.getFullName(), result);
- assert.eq(result[0].cursor.originatingCommand.find, coll.getName(), result);
- assertFunc(findOut, result);
- const noIdle =
- adminDB
- .aggregate([
- {$currentOp: {allUsers: false, idleCursors: false}},
- {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": findOut}]}}
- ])
- .toArray();
- assert.eq(noIdle.length, 0, tojson(noIdle));
- const noFlag =
- adminDB.aggregate([{$currentOp: {allUsers: false}}, {$match: {type: "idleCursor"}}])
- .toArray();
-
- assert.eq(noIdle.length, 0, tojson(noFlag));
- }
-
- // Basic test with default values.
- runTest({
- findFunc: function() {
- return assert
- .commandWorked(db.runCommand({find: "jstests_currentop_cursors", batchSize: 2}))
- .cursor.id;
- },
- assertFunc: function(cursorId, result) {
- assert.eq(result.length, 1, result);
- // Plan summary does not exist on mongos, so skip this test on mongos.
- if (!FixtureHelpers.isMongos(db)) {
- assert.eq(result[0].planSummary, "COLLSCAN", result);
- } else {
- assert(!result[0].hasOwnProperty("planSummary"), result);
- }
- // Lsid will not exist if not in command read mode.
- if (commandReadMode) {
- assert(result[0].lsid.hasOwnProperty('id'), result);
- assert(result[0].lsid.hasOwnProperty('uid'), result);
- }
- const uri = new MongoURI(db.getMongo().host);
- assert(uri.servers.some((server) => {
- return result[0].host == getHostName() + ":" + server.port;
- }));
- const idleCursor = result[0].cursor;
- assert.eq(idleCursor.nDocsReturned, 2, result);
- assert.eq(idleCursor.nBatchesReturned, 1, result);
- assert.eq(idleCursor.tailable, false, result);
- assert.eq(idleCursor.awaitData, false, result);
- assert.eq(idleCursor.noCursorTimeout, false, result);
- assert.eq(idleCursor.originatingCommand.batchSize, 2, result);
- assert.lte(idleCursor.createdDate, idleCursor.lastAccessDate, result);
- // Make sure that the top level fields do not also appear in the cursor subobject.
- assert(!idleCursor.hasOwnProperty("planSummary"), result);
- assert(!idleCursor.hasOwnProperty('host'), result);
- assert(!idleCursor.hasOwnProperty('lsid'), result);
+"use strict";
+const coll = db.jstests_currentop_cursors;
+// Will skip lsid tests if not in commands read mode.
+const commandReadMode = db.getMongo().readMode() == "commands";
+
+load("jstests/libs/fixture_helpers.js"); // for FixtureHelpers
+
+// Avoiding using the shell helper to avoid the implicit collection recreation.
+db.runCommand({drop: coll.getName()});
+assert.commandWorked(db.createCollection(coll.getName(), {capped: true, size: 1000}));
+for (let i = 0; i < 30; ++i) {
+ assert.commandWorked(coll.insert({"val": i}));
+}
+/**
+ * runTest creates a new collection called jstests_currentop_cursors and then runs the provided
+ * find query. It calls $currentOp and does some basic assertions to make sure idleCursors is
+ * behaving as intended in each case.
+ * findFunc: A function that runs a find query. Is expected to return a cursorID.
+ * Arbitrary code can be run in findFunc as long as it returns a cursorID.
+ * assertFunc: A function that runs assertions against the results of the $currentOp.
+ * Takes the following arguments
+ * 'findOut': The cursorID returned from findFunc.
+ * 'result': The results from running $currenpOp as an array of JSON objects.
+ * Arbitrary code can be run in assertFunc, and there is no return value needed.
+ */
+function runTest({findFunc, assertFunc}) {
+ const adminDB = db.getSiblingDB("admin");
+ const findOut = findFunc();
+ const result = adminDB
+ .aggregate([
+ {$currentOp: {localOps: true, allUsers: false, idleCursors: true}},
+ {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": findOut}]}}
+ ])
+ .toArray();
+ assert.eq(result[0].ns, coll.getFullName(), result);
+ assert.eq(result[0].cursor.originatingCommand.find, coll.getName(), result);
+ assertFunc(findOut, result);
+ const noIdle = adminDB
+ .aggregate([
+ {$currentOp: {allUsers: false, idleCursors: false}},
+ {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": findOut}]}}
+ ])
+ .toArray();
+ assert.eq(noIdle.length, 0, tojson(noIdle));
+ const noFlag =
+ adminDB.aggregate([{$currentOp: {allUsers: false}}, {$match: {type: "idleCursor"}}])
+ .toArray();
+
+ assert.eq(noIdle.length, 0, tojson(noFlag));
+}
+
+// Basic test with default values.
+runTest({
+ findFunc: function() {
+ return assert
+ .commandWorked(db.runCommand({find: "jstests_currentop_cursors", batchSize: 2}))
+ .cursor.id;
+ },
+ assertFunc: function(cursorId, result) {
+ assert.eq(result.length, 1, result);
+ // Plan summary does not exist on mongos, so skip this test on mongos.
+ if (!FixtureHelpers.isMongos(db)) {
+ assert.eq(result[0].planSummary, "COLLSCAN", result);
+ } else {
+ assert(!result[0].hasOwnProperty("planSummary"), result);
}
- });
-
- // Test that tailable, awaitData, and noCursorTimeout are set.
- runTest({
- findFunc: function() {
- return assert
- .commandWorked(db.runCommand({
- find: "jstests_currentop_cursors",
- batchSize: 2,
- tailable: true,
- awaitData: true,
- noCursorTimeout: true
- }))
- .cursor.id;
- },
- assertFunc: function(cursorId, result) {
-
- assert.eq(result.length, 1, result);
- const idleCursor = result[0].cursor;
- assert.eq(idleCursor.tailable, true, result);
- assert.eq(idleCursor.awaitData, true, result);
- assert.eq(idleCursor.noCursorTimeout, true, result);
- assert.eq(idleCursor.originatingCommand.batchSize, 2, result);
+ // Lsid will not exist if not in command read mode.
+ if (commandReadMode) {
+ assert(result[0].lsid.hasOwnProperty('id'), result);
+ assert(result[0].lsid.hasOwnProperty('uid'), result);
}
- });
-
- // Test that dates are set correctly.
- runTest({
- findFunc: function() {
- return assert
- .commandWorked(db.runCommand({find: "jstests_currentop_cursors", batchSize: 2}))
- .cursor.id;
- },
- assertFunc: function(cursorId, result) {
- const adminDB = db.getSiblingDB("admin");
- // Make sure the two cursors have different creation times.
- assert.soon(() => {
- const secondCursor = assert.commandWorked(
- db.runCommand({find: "jstests_currentop_cursors", batchSize: 2}));
+ const uri = new MongoURI(db.getMongo().host);
+ assert(uri.servers.some((server) => {
+ return result[0].host == getHostName() + ":" + server.port;
+ }));
+ const idleCursor = result[0].cursor;
+ assert.eq(idleCursor.nDocsReturned, 2, result);
+ assert.eq(idleCursor.nBatchesReturned, 1, result);
+ assert.eq(idleCursor.tailable, false, result);
+ assert.eq(idleCursor.awaitData, false, result);
+ assert.eq(idleCursor.noCursorTimeout, false, result);
+ assert.eq(idleCursor.originatingCommand.batchSize, 2, result);
+ assert.lte(idleCursor.createdDate, idleCursor.lastAccessDate, result);
+ // Make sure that the top level fields do not also appear in the cursor subobject.
+ assert(!idleCursor.hasOwnProperty("planSummary"), result);
+ assert(!idleCursor.hasOwnProperty('host'), result);
+ assert(!idleCursor.hasOwnProperty('lsid'), result);
+ }
+});
+
+// Test that tailable, awaitData, and noCursorTimeout are set.
+runTest({
+ findFunc: function() {
+ return assert
+ .commandWorked(db.runCommand({
+ find: "jstests_currentop_cursors",
+ batchSize: 2,
+ tailable: true,
+ awaitData: true,
+ noCursorTimeout: true
+ }))
+ .cursor.id;
+ },
+ assertFunc: function(cursorId, result) {
+ assert.eq(result.length, 1, result);
+ const idleCursor = result[0].cursor;
+ assert.eq(idleCursor.tailable, true, result);
+ assert.eq(idleCursor.awaitData, true, result);
+ assert.eq(idleCursor.noCursorTimeout, true, result);
+ assert.eq(idleCursor.originatingCommand.batchSize, 2, result);
+ }
+});
+
+// Test that dates are set correctly.
+runTest({
+ findFunc: function() {
+ return assert
+ .commandWorked(db.runCommand({find: "jstests_currentop_cursors", batchSize: 2}))
+ .cursor.id;
+ },
+ assertFunc: function(cursorId, result) {
+ const adminDB = db.getSiblingDB("admin");
+ // Make sure the two cursors have different creation times.
+ assert.soon(() => {
+ const secondCursor = assert.commandWorked(
+ db.runCommand({find: "jstests_currentop_cursors", batchSize: 2}));
- const secondResult =
- adminDB
- .aggregate([
- {$currentOp: {localOps: true, allUsers: false, idleCursors: true}},
- {
- $match: {
- $and: [
- {type: "idleCursor"},
- {"cursor.cursorId": secondCursor.cursor.id}
- ]
- }
+ const secondResult =
+ adminDB
+ .aggregate([
+ {$currentOp: {localOps: true, allUsers: false, idleCursors: true}},
+ {
+ $match: {
+ $and: [
+ {type: "idleCursor"},
+ {"cursor.cursorId": secondCursor.cursor.id}
+ ]
}
- ])
- .toArray();
- return result[0].cursor.createdDate < secondResult[0].cursor.createdDate;
- });
- }
- });
+ }
+ ])
+ .toArray();
+ return result[0].cursor.createdDate < secondResult[0].cursor.createdDate;
+ });
+ }
+});
+
+// Test larger batch size.
+runTest({
+ findFunc: function() {
+ return assert
+ .commandWorked(db.runCommand(
+ {find: "jstests_currentop_cursors", batchSize: 4, noCursorTimeout: true}))
+ .cursor.id;
+ },
+ assertFunc: function(cursorId, result) {
+ const idleCursor = result[0].cursor;
+ assert.eq(result.length, 1, result);
+ assert.eq(idleCursor.nDocsReturned, 4, result);
+ assert.eq(idleCursor.nBatchesReturned, 1, result);
+ assert.eq(idleCursor.noCursorTimeout, true, result);
+ assert.eq(idleCursor.originatingCommand.batchSize, 4, result);
+ }
+});
+
+// Test batchSize and nDocs are incremented correctly.
+runTest({
+ findFunc: function() {
+ return assert
+ .commandWorked(db.runCommand({find: "jstests_currentop_cursors", batchSize: 2}))
+ .cursor.id;
+ },
+ assertFunc: function(cursorId, result) {
+ const adminDB = db.getSiblingDB("admin");
+ const originalAccess = result[0].cursor.lastAccessDate;
+ assert.commandWorked(db.runCommand(
+ {getMore: cursorId, collection: "jstests_currentop_cursors", batchSize: 2}));
+ result = adminDB
+ .aggregate([
+ {$currentOp: {localOps: true, allUsers: false, idleCursors: true}},
+ {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": cursorId}]}}
+ ])
+ .toArray();
+ let idleCursor = result[0].cursor;
+ assert.eq(idleCursor.nDocsReturned, 4, result);
+ assert.eq(idleCursor.nBatchesReturned, 2, result);
+ assert.eq(idleCursor.originatingCommand.batchSize, 2, result);
+ // Make sure that the getMore will not finish running in the same milli as the cursor
+ // creation.
+ assert.soon(() => {
+ assert.commandWorked(db.runCommand(
+ {getMore: cursorId, collection: "jstests_currentop_cursors", batchSize: 2}));
+ result = adminDB
+ .aggregate([
+ {$currentOp: {localOps: true, allUsers: false, idleCursors: true}},
+ {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": cursorId}]}}
+ ])
+ .toArray();
+ idleCursor = result[0].cursor;
+ return idleCursor.createdDate < idleCursor.lastAccessDate &&
+ originalAccess < idleCursor.lastAccessDate;
+ });
+ }
+});
- // Test larger batch size.
+// planSummary does not exist on Mongos, so skip this test.
+if (!FixtureHelpers.isMongos(db)) {
runTest({
findFunc: function() {
+ assert.commandWorked(coll.createIndex({"val": 1}));
return assert
.commandWorked(db.runCommand(
- {find: "jstests_currentop_cursors", batchSize: 4, noCursorTimeout: true}))
+ {find: "jstests_currentop_cursors", filter: {"val": {$gt: 2}}, batchSize: 2}))
.cursor.id;
},
assertFunc: function(cursorId, result) {
- const idleCursor = result[0].cursor;
assert.eq(result.length, 1, result);
- assert.eq(idleCursor.nDocsReturned, 4, result);
- assert.eq(idleCursor.nBatchesReturned, 1, result);
- assert.eq(idleCursor.noCursorTimeout, true, result);
- assert.eq(idleCursor.originatingCommand.batchSize, 4, result);
+ assert.eq(result[0].planSummary, "IXSCAN { val: 1 }", result);
}
});
-
- // Test batchSize and nDocs are incremented correctly.
+}
+// Test lsid.id value is correct if in commandReadMode.
+if (commandReadMode) {
+ const session = db.getMongo().startSession();
runTest({
findFunc: function() {
+ const sessionDB = session.getDatabase("test");
return assert
- .commandWorked(db.runCommand({find: "jstests_currentop_cursors", batchSize: 2}))
+ .commandWorked(
+ sessionDB.runCommand({find: "jstests_currentop_cursors", batchSize: 2}))
.cursor.id;
},
assertFunc: function(cursorId, result) {
- const adminDB = db.getSiblingDB("admin");
- const originalAccess = result[0].cursor.lastAccessDate;
- assert.commandWorked(db.runCommand(
- {getMore: cursorId, collection: "jstests_currentop_cursors", batchSize: 2}));
- result =
- adminDB
- .aggregate([
- {$currentOp: {localOps: true, allUsers: false, idleCursors: true}},
- {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": cursorId}]}}
- ])
- .toArray();
- let idleCursor = result[0].cursor;
- assert.eq(idleCursor.nDocsReturned, 4, result);
- assert.eq(idleCursor.nBatchesReturned, 2, result);
- assert.eq(idleCursor.originatingCommand.batchSize, 2, result);
- // Make sure that the getMore will not finish running in the same milli as the cursor
- // creation.
- assert.soon(() => {
- assert.commandWorked(db.runCommand(
- {getMore: cursorId, collection: "jstests_currentop_cursors", batchSize: 2}));
- result =
- adminDB
- .aggregate([
- {$currentOp: {localOps: true, allUsers: false, idleCursors: true}},
- {
- $match:
- {$and: [{type: "idleCursor"}, {"cursor.cursorId": cursorId}]}
- }
- ])
- .toArray();
- idleCursor = result[0].cursor;
- return idleCursor.createdDate < idleCursor.lastAccessDate &&
- originalAccess < idleCursor.lastAccessDate;
-
- });
+ assert.eq(result.length, 1, result);
+ assert.eq(session.getSessionId().id, result[0].lsid.id);
}
});
-
- // planSummary does not exist on Mongos, so skip this test.
- if (!FixtureHelpers.isMongos(db)) {
- runTest({
- findFunc: function() {
- assert.commandWorked(coll.createIndex({"val": 1}));
- return assert
- .commandWorked(db.runCommand({
- find: "jstests_currentop_cursors",
- filter: {"val": {$gt: 2}},
- batchSize: 2
- }))
- .cursor.id;
-
- },
- assertFunc: function(cursorId, result) {
- assert.eq(result.length, 1, result);
- assert.eq(result[0].planSummary, "IXSCAN { val: 1 }", result);
-
- }
- });
- }
- // Test lsid.id value is correct if in commandReadMode.
- if (commandReadMode) {
- const session = db.getMongo().startSession();
- runTest({
- findFunc: function() {
- const sessionDB = session.getDatabase("test");
- return assert
- .commandWorked(
- sessionDB.runCommand({find: "jstests_currentop_cursors", batchSize: 2}))
- .cursor.id;
- },
- assertFunc: function(cursorId, result) {
- assert.eq(result.length, 1, result);
- assert.eq(session.getSessionId().id, result[0].lsid.id);
- }
- });
- }
-
+}
})();
diff --git a/jstests/core/currentop_predicate.js b/jstests/core/currentop_predicate.js
index 049d7d3ab30..ddda0570f87 100644
--- a/jstests/core/currentop_predicate.js
+++ b/jstests/core/currentop_predicate.js
@@ -2,13 +2,13 @@
// Tests the use of a match predicate with the currentOp command.
(function() {
- // Test a predicate that matches the currentOp operation we are running.
- var res = db.adminCommand("currentOp", {command: {$exists: true}});
- assert.commandWorked(res);
- assert.gt(res.inprog.length, 0, tojson(res));
+// Test a predicate that matches the currentOp operation we are running.
+var res = db.adminCommand("currentOp", {command: {$exists: true}});
+assert.commandWorked(res);
+assert.gt(res.inprog.length, 0, tojson(res));
- // Test a predicate that matches no operations.
- res = db.adminCommand("currentOp", {dummyCurOpField: {exists: true}});
- assert.commandWorked(res);
- assert.eq(res.inprog.length, 0, tojson(res));
+// Test a predicate that matches no operations.
+res = db.adminCommand("currentOp", {dummyCurOpField: {exists: true}});
+assert.commandWorked(res);
+assert.eq(res.inprog.length, 0, tojson(res));
})();
diff --git a/jstests/core/cursora.js b/jstests/core/cursora.js
index 93113055497..3def8c6162f 100644
--- a/jstests/core/cursora.js
+++ b/jstests/core/cursora.js
@@ -8,47 +8,48 @@
// ]
(function() {
- "use strict";
-
- const t = db.cursora;
-
- function run(n) {
- if (!isNumber(n)) {
- assert(isNumber(n), "cursora.js isNumber");
- }
- t.drop();
-
- let bulk = t.initializeUnorderedBulkOp();
- for (let i = 0; i < n; i++)
- bulk.insert({_id: i});
- assert.writeOK(bulk.execute());
-
- const join = startParallelShell("sleep(50);" + "db.cursora.remove({});");
-
- let num;
- try {
- let start = new Date();
- num = t.find(function() {
- let num = 2;
- for (let x = 0; x < 1000; x++)
- num += 2;
- return num > 0;
- })
- .sort({_id: -1})
- .itcount();
- } catch (e) {
- print("cursora.js FAIL " + e);
- join();
- throw e;
- }
+"use strict";
- join();
+const t = db.cursora;
- assert.eq(0, t.count());
- if (n == num)
- print("cursora.js warning: shouldn't have counted all n: " + n + " num: " + num);
+function run(n) {
+ if (!isNumber(n)) {
+ assert(isNumber(n), "cursora.js isNumber");
+ }
+ t.drop();
+
+ let bulk = t.initializeUnorderedBulkOp();
+ for (let i = 0; i < n; i++)
+ bulk.insert({_id: i});
+ assert.writeOK(bulk.execute());
+
+ const join = startParallelShell("sleep(50);" +
+ "db.cursora.remove({});");
+
+ let num;
+ try {
+ let start = new Date();
+ num = t.find(function() {
+ let num = 2;
+ for (let x = 0; x < 1000; x++)
+ num += 2;
+ return num > 0;
+ })
+ .sort({_id: -1})
+ .itcount();
+ } catch (e) {
+ print("cursora.js FAIL " + e);
+ join();
+ throw e;
}
- run(1500);
- run(5000);
+ join();
+
+ assert.eq(0, t.count());
+ if (n == num)
+ print("cursora.js warning: shouldn't have counted all n: " + n + " num: " + num);
+}
+
+run(1500);
+run(5000);
})();
diff --git a/jstests/core/datasize2.js b/jstests/core/datasize2.js
index a64bb62c287..2468e490602 100644
--- a/jstests/core/datasize2.js
+++ b/jstests/core/datasize2.js
@@ -8,32 +8,32 @@
//
(function() {
- "use strict";
+"use strict";
- var coll = db.foo;
- var adminDB = db.getSiblingDB('admin');
- coll.drop();
+var coll = db.foo;
+var adminDB = db.getSiblingDB('admin');
+coll.drop();
- var N = 1000;
- for (var i = 0; i < N; i++) {
- coll.insert({_id: i, s: "asdasdasdasdasdasdasd"});
- }
+var N = 1000;
+for (var i = 0; i < N; i++) {
+ coll.insert({_id: i, s: "asdasdasdasdasdasdasd"});
+}
- var dataSizeCommand =
- {"dataSize": "test.foo", "keyPattern": {"_id": 1}, "min": {"_id": 0}, "max": {"_id": N}};
+var dataSizeCommand =
+ {"dataSize": "test.foo", "keyPattern": {"_id": 1}, "min": {"_id": 0}, "max": {"_id": N}};
- assert.eq(N,
- db.runCommand(dataSizeCommand).numObjects,
- "dataSize command on 'test.foo' failed when called on the 'test' DB.");
- assert.eq(N,
- adminDB.runCommand(dataSizeCommand).numObjects,
- "dataSize command on 'test.foo' failed when called on the 'admin' DB.");
+assert.eq(N,
+ db.runCommand(dataSizeCommand).numObjects,
+ "dataSize command on 'test.foo' failed when called on the 'test' DB.");
+assert.eq(N,
+ adminDB.runCommand(dataSizeCommand).numObjects,
+ "dataSize command on 'test.foo' failed when called on the 'admin' DB.");
- dataSizeCommand.maxObjects = 100;
- assert.eq(101,
- db.runCommand(dataSizeCommand).numObjects,
- "dataSize command with max number of objects set failed on 'test' DB");
- assert.eq(101,
- db.runCommand(dataSizeCommand).numObjects,
- "dataSize command with max number of objects set failed on 'admin' DB");
+dataSizeCommand.maxObjects = 100;
+assert.eq(101,
+ db.runCommand(dataSizeCommand).numObjects,
+ "dataSize command with max number of objects set failed on 'test' DB");
+assert.eq(101,
+ db.runCommand(dataSizeCommand).numObjects,
+ "dataSize command with max number of objects set failed on 'admin' DB");
})();
diff --git a/jstests/core/dbadmin.js b/jstests/core/dbadmin.js
index 1dd042d863d..3d2483b0334 100644
--- a/jstests/core/dbadmin.js
+++ b/jstests/core/dbadmin.js
@@ -1,37 +1,35 @@
load('jstests/aggregation/extras/utils.js');
(function() {
- 'use strict';
+'use strict';
- var t = db.dbadmin;
- t.save({x: 1});
- t.save({x: 1});
+var t = db.dbadmin;
+t.save({x: 1});
+t.save({x: 1});
- var res = db.adminCommand("listDatabases");
- assert(res.databases && res.databases.length > 0, "listDatabases: " + tojson(res));
+var res = db.adminCommand("listDatabases");
+assert(res.databases && res.databases.length > 0, "listDatabases: " + tojson(res));
- var res = db.adminCommand({listDatabases: 1, nameOnly: true});
- assert(res.databases && res.databases.length > 0 && res.totalSize === undefined,
- "listDatabases nameOnly: " + tojson(res));
+var res = db.adminCommand({listDatabases: 1, nameOnly: true});
+assert(res.databases && res.databases.length > 0 && res.totalSize === undefined,
+ "listDatabases nameOnly: " + tojson(res));
- var now = new Date();
- var x = db._adminCommand("ismaster");
- assert(x.ismaster, "ismaster failed: " + tojson(x));
- assert(x.localTime, "ismaster didn't include time: " + tojson(x));
+var now = new Date();
+var x = db._adminCommand("ismaster");
+assert(x.ismaster, "ismaster failed: " + tojson(x));
+assert(x.localTime, "ismaster didn't include time: " + tojson(x));
- var localTimeSkew = x.localTime - now;
- if (localTimeSkew >= 50) {
- print("Warning: localTimeSkew " + localTimeSkew + " > 50ms.");
- }
- assert.lt(localTimeSkew, 500, "isMaster.localTime");
+var localTimeSkew = x.localTime - now;
+if (localTimeSkew >= 50) {
+ print("Warning: localTimeSkew " + localTimeSkew + " > 50ms.");
+}
+assert.lt(localTimeSkew, 500, "isMaster.localTime");
- var before = db.runCommand("serverStatus");
- print(before.uptimeEstimate);
- sleep(5000);
-
- var after = db.runCommand("serverStatus");
- print(after.uptimeEstimate);
- assert.gte(
- after.uptimeEstimate, before.uptimeEstimate, "uptime estimate should be non-decreasing");
+var before = db.runCommand("serverStatus");
+print(before.uptimeEstimate);
+sleep(5000);
+var after = db.runCommand("serverStatus");
+print(after.uptimeEstimate);
+assert.gte(after.uptimeEstimate, before.uptimeEstimate, "uptime estimate should be non-decreasing");
})();
diff --git a/jstests/core/dbref4.js b/jstests/core/dbref4.js
index d4648497218..0de94028e39 100644
--- a/jstests/core/dbref4.js
+++ b/jstests/core/dbref4.js
@@ -3,22 +3,22 @@
// Ensures round-trippability of int ids in DBRef's after a save/restore
(function() {
- "use strict";
+"use strict";
- const coll = db.dbref4;
- coll.drop();
+const coll = db.dbref4;
+coll.drop();
- coll.insert({
- "refInt": DBRef("DBRef", NumberInt(1), "Ref"),
- });
+coll.insert({
+ "refInt": DBRef("DBRef", NumberInt(1), "Ref"),
+});
- // we inserted something with an int
- assert(coll.findOne({'refInt.$id': {$type: 16}}));
+// we inserted something with an int
+assert(coll.findOne({'refInt.$id': {$type: 16}}));
- var doc = coll.findOne();
- doc.x = 1;
- coll.save(doc);
+var doc = coll.findOne();
+doc.x = 1;
+coll.save(doc);
- // after pulling it back and saving it again, still has an int
- assert(coll.findOne({'refInt.$id': {$type: 16}}));
+// after pulling it back and saving it again, still has an int
+assert(coll.findOne({'refInt.$id': {$type: 16}}));
})();
diff --git a/jstests/core/dbstats.js b/jstests/core/dbstats.js
index 1a831e9ce42..9321fca40da 100644
--- a/jstests/core/dbstats.js
+++ b/jstests/core/dbstats.js
@@ -3,68 +3,70 @@
// @tags: [requires_dbstats]
(function() {
- "use strict";
-
- function serverIsMongos() {
- const res = db.runCommand("ismaster");
- assert.commandWorked(res);
- return res.msg === "isdbgrid";
- }
-
- function serverUsingPersistentStorage() {
- const res = db.runCommand("serverStatus");
- assert.commandWorked(res);
- return res.storageEngine.persistent === true;
- }
-
- const isMongoS = serverIsMongos();
- const isUsingPersistentStorage = !isMongoS && serverUsingPersistentStorage();
-
- let testDB = db.getSiblingDB("dbstats_js");
- assert.commandWorked(testDB.dropDatabase());
-
- let coll = testDB["testColl"];
- assert.commandWorked(coll.createIndex({x: 1}));
- const doc = {_id: 1, x: 1};
- assert.writeOK(coll.insert(doc));
-
- let dbStats = testDB.runCommand({dbStats: 1});
+"use strict";
+
+function serverIsMongos() {
+ const res = db.runCommand("ismaster");
+ assert.commandWorked(res);
+ return res.msg === "isdbgrid";
+}
+
+function serverUsingPersistentStorage() {
+ const res = db.runCommand("serverStatus");
+ assert.commandWorked(res);
+ return res.storageEngine.persistent === true;
+}
+
+const isMongoS = serverIsMongos();
+const isUsingPersistentStorage = !isMongoS && serverUsingPersistentStorage();
+
+let testDB = db.getSiblingDB("dbstats_js");
+assert.commandWorked(testDB.dropDatabase());
+
+let coll = testDB["testColl"];
+assert.commandWorked(coll.createIndex({x: 1}));
+const doc = {
+ _id: 1,
+ x: 1
+};
+assert.writeOK(coll.insert(doc));
+
+let dbStats = testDB.runCommand({dbStats: 1});
+assert.commandWorked(dbStats);
+
+assert.eq(1, dbStats.objects, tojson(dbStats)); // Includes testColl only
+const dataSize = Object.bsonsize(doc);
+assert.eq(dataSize, dbStats.avgObjSize, tojson(dbStats));
+assert.eq(dataSize, dbStats.dataSize, tojson(dbStats));
+
+// Index count will vary on mongoS if an additional index is needed to support sharding.
+if (isMongoS) {
+ assert(dbStats.hasOwnProperty("indexes"), tojson(dbStats));
+} else {
+ assert.eq(2, dbStats.indexes, tojson(dbStats));
+}
+
+assert(dbStats.hasOwnProperty("storageSize"), tojson(dbStats));
+assert(dbStats.hasOwnProperty("numExtents"), tojson(dbStats));
+assert(dbStats.hasOwnProperty("indexSize"), tojson(dbStats));
+
+if (isUsingPersistentStorage) {
+ assert(dbStats.hasOwnProperty("fsUsedSize"), tojson(dbStats));
+ assert(dbStats.hasOwnProperty("fsTotalSize"), tojson(dbStats));
+}
+
+// Confirm collection and view counts on mongoD
+if (!isMongoS) {
+ assert.eq(testDB.getName(), dbStats.db, tojson(dbStats));
+
+ // We wait to add a view until this point as it allows more exact testing of avgObjSize for
+ // WiredTiger above. Having more than 1 document would require floating point comparison.
+ assert.commandWorked(testDB.createView("testView", "testColl", []));
+
+ dbStats = testDB.runCommand({dbStats: 1});
assert.commandWorked(dbStats);
- assert.eq(1, dbStats.objects, tojson(dbStats)); // Includes testColl only
- const dataSize = Object.bsonsize(doc);
- assert.eq(dataSize, dbStats.avgObjSize, tojson(dbStats));
- assert.eq(dataSize, dbStats.dataSize, tojson(dbStats));
-
- // Index count will vary on mongoS if an additional index is needed to support sharding.
- if (isMongoS) {
- assert(dbStats.hasOwnProperty("indexes"), tojson(dbStats));
- } else {
- assert.eq(2, dbStats.indexes, tojson(dbStats));
- }
-
- assert(dbStats.hasOwnProperty("storageSize"), tojson(dbStats));
- assert(dbStats.hasOwnProperty("numExtents"), tojson(dbStats));
- assert(dbStats.hasOwnProperty("indexSize"), tojson(dbStats));
-
- if (isUsingPersistentStorage) {
- assert(dbStats.hasOwnProperty("fsUsedSize"), tojson(dbStats));
- assert(dbStats.hasOwnProperty("fsTotalSize"), tojson(dbStats));
- }
-
- // Confirm collection and view counts on mongoD
- if (!isMongoS) {
- assert.eq(testDB.getName(), dbStats.db, tojson(dbStats));
-
- // We wait to add a view until this point as it allows more exact testing of avgObjSize for
- // WiredTiger above. Having more than 1 document would require floating point comparison.
- assert.commandWorked(testDB.createView("testView", "testColl", []));
-
- dbStats = testDB.runCommand({dbStats: 1});
- assert.commandWorked(dbStats);
-
- assert.eq(2, dbStats.collections, tojson(dbStats)); // testColl + system.views
- assert.eq(1, dbStats.views, tojson(dbStats));
- }
-
+ assert.eq(2, dbStats.collections, tojson(dbStats)); // testColl + system.views
+ assert.eq(1, dbStats.views, tojson(dbStats));
+}
})();
diff --git a/jstests/core/diagdata.js b/jstests/core/diagdata.js
index f002004b5a5..2f2c224304e 100644
--- a/jstests/core/diagdata.js
+++ b/jstests/core/diagdata.js
@@ -7,10 +7,10 @@
load('jstests/libs/ftdc.js');
(function() {
- "use strict";
+"use strict";
- // Verify we require admin database
- assert.commandFailed(db.diagdata.runCommand("getDiagnosticData"));
+// Verify we require admin database
+assert.commandFailed(db.diagdata.runCommand("getDiagnosticData"));
- verifyGetDiagnosticData(db.getSiblingDB('admin'));
+verifyGetDiagnosticData(db.getSiblingDB('admin'));
})();
diff --git a/jstests/core/distinct1.js b/jstests/core/distinct1.js
index aee7b604926..1d4ccaab16c 100644
--- a/jstests/core/distinct1.js
+++ b/jstests/core/distinct1.js
@@ -1,69 +1,68 @@
(function() {
- "use strict";
- const collName = "distinct1";
- const coll = db.getCollection(collName);
- coll.drop();
+"use strict";
+const collName = "distinct1";
+const coll = db.getCollection(collName);
+coll.drop();
- assert.eq(0, coll.distinct("a").length, "test empty");
+assert.eq(0, coll.distinct("a").length, "test empty");
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({a: 2}));
- assert.writeOK(coll.insert({a: 2}));
- assert.writeOK(coll.insert({a: 2}));
- assert.writeOK(coll.insert({a: 3}));
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 2}));
+assert.writeOK(coll.insert({a: 2}));
+assert.writeOK(coll.insert({a: 2}));
+assert.writeOK(coll.insert({a: 3}));
- // Test that distinct returns all the distinct values.
- assert.eq([1, 2, 3], coll.distinct("a").sort(), "distinct returned unexpected results");
+// Test that distinct returns all the distinct values.
+assert.eq([1, 2, 3], coll.distinct("a").sort(), "distinct returned unexpected results");
- // Test that distinct respects the query condition.
- assert.eq([1, 2],
- coll.distinct("a", {a: {$lt: 3}}).sort(),
- "distinct with query returned unexpected results");
+// Test that distinct respects the query condition.
+assert.eq([1, 2],
+ coll.distinct("a", {a: {$lt: 3}}).sort(),
+ "distinct with query returned unexpected results");
- assert(coll.drop());
+assert(coll.drop());
- assert.writeOK(coll.insert({a: {b: "a"}, c: 12}));
- assert.writeOK(coll.insert({a: {b: "b"}, c: 12}));
- assert.writeOK(coll.insert({a: {b: "c"}, c: 12}));
- assert.writeOK(coll.insert({a: {b: "c"}, c: 12}));
+assert.writeOK(coll.insert({a: {b: "a"}, c: 12}));
+assert.writeOK(coll.insert({a: {b: "b"}, c: 12}));
+assert.writeOK(coll.insert({a: {b: "c"}, c: 12}));
+assert.writeOK(coll.insert({a: {b: "c"}, c: 12}));
- // Test that distinct works on fields in embedded documents.
- assert.eq(["a", "b", "c"],
- coll.distinct("a.b").sort(),
- "distinct on dotted field returned unexpected results");
+// Test that distinct works on fields in embedded documents.
+assert.eq(["a", "b", "c"],
+ coll.distinct("a.b").sort(),
+ "distinct on dotted field returned unexpected results");
- assert(coll.drop());
+assert(coll.drop());
- assert.writeOK(coll.insert({_id: 1, a: 1}));
- assert.writeOK(coll.insert({_id: 2, a: 2}));
+assert.writeOK(coll.insert({_id: 1, a: 1}));
+assert.writeOK(coll.insert({_id: 2, a: 2}));
- // Test that distinct works on the _id field.
- assert.eq([1, 2], coll.distinct("_id").sort(), "distinct on _id returned unexpected results");
+// Test that distinct works on the _id field.
+assert.eq([1, 2], coll.distinct("_id").sort(), "distinct on _id returned unexpected results");
- // Test that distinct works with a query on the _id field.
- assert.eq([1],
- coll.distinct("a", {_id: 1}),
- "distinct with query on _id returned unexpected results");
+// Test that distinct works with a query on the _id field.
+assert.eq(
+ [1], coll.distinct("a", {_id: 1}), "distinct with query on _id returned unexpected results");
- assert(coll.drop());
+assert(coll.drop());
- assert.writeOK(coll.insert({a: 1, b: 2}));
- assert.writeOK(coll.insert({a: 2, b: 2}));
- assert.writeOK(coll.insert({a: 2, b: 1}));
- assert.writeOK(coll.insert({a: 2, b: 2}));
- assert.writeOK(coll.insert({a: 3, b: 2}));
- assert.writeOK(coll.insert({a: 4, b: 1}));
- assert.writeOK(coll.insert({a: 4, b: 1}));
+assert.writeOK(coll.insert({a: 1, b: 2}));
+assert.writeOK(coll.insert({a: 2, b: 2}));
+assert.writeOK(coll.insert({a: 2, b: 1}));
+assert.writeOK(coll.insert({a: 2, b: 2}));
+assert.writeOK(coll.insert({a: 3, b: 2}));
+assert.writeOK(coll.insert({a: 4, b: 1}));
+assert.writeOK(coll.insert({a: 4, b: 1}));
- // Test running the distinct command directly, rather than via shell helper.
- let res = assert.commandWorked(db.runCommand({distinct: collName, key: "a"}));
- assert.eq([1, 2, 3, 4], res.values.sort());
+// Test running the distinct command directly, rather than via shell helper.
+let res = assert.commandWorked(db.runCommand({distinct: collName, key: "a"}));
+assert.eq([1, 2, 3, 4], res.values.sort());
- res = assert.commandWorked(db.runCommand({distinct: collName, key: "a", query: null}));
- assert.eq([1, 2, 3, 4], res.values.sort());
+res = assert.commandWorked(db.runCommand({distinct: collName, key: "a", query: null}));
+assert.eq([1, 2, 3, 4], res.values.sort());
- res = assert.commandWorked(db.runCommand({distinct: collName, key: "a", query: {b: 2}}));
- assert.eq([1, 2, 3], res.values.sort());
+res = assert.commandWorked(db.runCommand({distinct: collName, key: "a", query: {b: 2}}));
+assert.eq([1, 2, 3], res.values.sort());
- assert.commandFailed(db.runCommand({distinct: collName, key: "a", query: 1}));
+assert.commandFailed(db.runCommand({distinct: collName, key: "a", query: 1}));
}());
diff --git a/jstests/core/distinct4.js b/jstests/core/distinct4.js
index 2723e947d2d..a66022ecbaa 100644
--- a/jstests/core/distinct4.js
+++ b/jstests/core/distinct4.js
@@ -1,55 +1,54 @@
// Vaildate input to distinct command. SERVER-12642
(function() {
- "use strict";
+"use strict";
- var t = db.distinct4;
+var t = db.distinct4;
- t.drop();
- t.save({a: null});
- t.save({a: 1});
- t.save({a: 1});
- t.save({a: 2});
- t.save({a: 3});
+t.drop();
+t.save({a: null});
+t.save({a: 1});
+t.save({a: 1});
+t.save({a: 2});
+t.save({a: 3});
- // first argument should be a string or error
+// first argument should be a string or error
- // from shell helper
- assert.throws(function() {
- t.distinct({a: 1});
- });
+// from shell helper
+assert.throws(function() {
+ t.distinct({a: 1});
+});
- // from command interface
- assert.commandFailedWithCode(t.runCommand("distinct", {"key": {a: 1}}),
- ErrorCodes.TypeMismatch);
+// from command interface
+assert.commandFailedWithCode(t.runCommand("distinct", {"key": {a: 1}}), ErrorCodes.TypeMismatch);
- // second argument should be a document or error
+// second argument should be a document or error
- // from shell helper
- assert.throws(function() {
- t.distinct('a', '1');
- });
+// from shell helper
+assert.throws(function() {
+ t.distinct('a', '1');
+});
- // from command interface
- assert.commandFailedWithCode(t.runCommand("distinct", {"key": "a", "query": "a"}),
- ErrorCodes.TypeMismatch);
+// from command interface
+assert.commandFailedWithCode(t.runCommand("distinct", {"key": "a", "query": "a"}),
+ ErrorCodes.TypeMismatch);
- // empty query clause should not cause error
+// empty query clause should not cause error
- // from shell helper
- var a = assert.doesNotThrow(function() {
- return t.distinct('a');
- });
- // [ null, 1, 2, 3 ]
- assert.eq(4, a.length, tojson(a));
- assert.contains(null, a);
- assert.contains(1, a);
- assert.contains(2, a);
- assert.contains(3, a);
+// from shell helper
+var a = assert.doesNotThrow(function() {
+ return t.distinct('a');
+});
+// [ null, 1, 2, 3 ]
+assert.eq(4, a.length, tojson(a));
+assert.contains(null, a);
+assert.contains(1, a);
+assert.contains(2, a);
+assert.contains(3, a);
- // from command interface
- assert.commandWorked(t.runCommand("distinct", {"key": "a"}));
+// from command interface
+assert.commandWorked(t.runCommand("distinct", {"key": "a"}));
- // embedded nulls are prohibited in the key field
- assert.commandFailedWithCode(t.runCommand("distinct", {"key": "a\0b"}), 31032);
+// embedded nulls are prohibited in the key field
+assert.commandFailedWithCode(t.runCommand("distinct", {"key": "a\0b"}), 31032);
})();
diff --git a/jstests/core/distinct_compound_index.js b/jstests/core/distinct_compound_index.js
index 0176e3581a0..6182267ea51 100644
--- a/jstests/core/distinct_compound_index.js
+++ b/jstests/core/distinct_compound_index.js
@@ -1,35 +1,34 @@
// @tags: [assumes_balancer_off]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- load("jstests/libs/analyze_plan.js"); // For planHasStage.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/analyze_plan.js"); // For planHasStage.
- var coll = db.distinct_multikey_index;
+var coll = db.distinct_multikey_index;
- coll.drop();
- for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.save({a: 1, b: 1}));
- assert.writeOK(coll.save({a: 1, b: 2}));
- assert.writeOK(coll.save({a: 2, b: 1}));
- assert.writeOK(coll.save({a: 2, b: 3}));
- }
- coll.createIndex({a: 1, b: 1});
+coll.drop();
+for (var i = 0; i < 10; i++) {
+ assert.writeOK(coll.save({a: 1, b: 1}));
+ assert.writeOK(coll.save({a: 1, b: 2}));
+ assert.writeOK(coll.save({a: 2, b: 1}));
+ assert.writeOK(coll.save({a: 2, b: 3}));
+}
+coll.createIndex({a: 1, b: 1});
- var explain_distinct_with_query = coll.explain("executionStats").distinct('b', {a: 1});
- assert.commandWorked(explain_distinct_with_query);
- assert(planHasStage(db, explain_distinct_with_query.queryPlanner.winningPlan, "DISTINCT_SCAN"));
- assert(planHasStage(
- db, explain_distinct_with_query.queryPlanner.winningPlan, "PROJECTION_COVERED"));
- // If the collection is sharded, we expect at most 2 distinct values per shard. If the
- // collection is not sharded, we expect 2 returned.
- assert.lte(explain_distinct_with_query.executionStats.nReturned,
- 2 * FixtureHelpers.numberOfShardsForCollection(coll));
+var explain_distinct_with_query = coll.explain("executionStats").distinct('b', {a: 1});
+assert.commandWorked(explain_distinct_with_query);
+assert(planHasStage(db, explain_distinct_with_query.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+assert(
+ planHasStage(db, explain_distinct_with_query.queryPlanner.winningPlan, "PROJECTION_COVERED"));
+// If the collection is sharded, we expect at most 2 distinct values per shard. If the
+// collection is not sharded, we expect 2 returned.
+assert.lte(explain_distinct_with_query.executionStats.nReturned,
+ 2 * FixtureHelpers.numberOfShardsForCollection(coll));
- var explain_distinct_without_query = coll.explain("executionStats").distinct('b');
- assert.commandWorked(explain_distinct_without_query);
- assert(planHasStage(db, explain_distinct_without_query.queryPlanner.winningPlan, "COLLSCAN"));
- assert(!planHasStage(
- db, explain_distinct_without_query.queryPlanner.winningPlan, "DISTINCT_SCAN"));
- assert.eq(40, explain_distinct_without_query.executionStats.nReturned);
+var explain_distinct_without_query = coll.explain("executionStats").distinct('b');
+assert.commandWorked(explain_distinct_without_query);
+assert(planHasStage(db, explain_distinct_without_query.queryPlanner.winningPlan, "COLLSCAN"));
+assert(!planHasStage(db, explain_distinct_without_query.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+assert.eq(40, explain_distinct_without_query.executionStats.nReturned);
})();
diff --git a/jstests/core/distinct_index1.js b/jstests/core/distinct_index1.js
index bd1822b7a8c..e1cd721c7f4 100644
--- a/jstests/core/distinct_index1.js
+++ b/jstests/core/distinct_index1.js
@@ -3,79 +3,79 @@
* @tags: [assumes_balancer_off]
*/
(function() {
- load("jstests/libs/analyze_plan.js"); // For getPlanStage.
+load("jstests/libs/analyze_plan.js"); // For getPlanStage.
- const coll = db.distinct_index1;
- coll.drop();
+const coll = db.distinct_index1;
+coll.drop();
- function getHash(num) {
- return Math.floor(Math.sqrt(num * 123123)) % 10;
- }
+function getHash(num) {
+ return Math.floor(Math.sqrt(num * 123123)) % 10;
+}
- function getDistinctExplainWithExecutionStats(field, query) {
- const explain = coll.explain("executionStats").distinct(field, query || {});
- assert(explain.hasOwnProperty("executionStats"), explain);
- return explain;
- }
+function getDistinctExplainWithExecutionStats(field, query) {
+ const explain = coll.explain("executionStats").distinct(field, query || {});
+ assert(explain.hasOwnProperty("executionStats"), explain);
+ return explain;
+}
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 1000; i++) {
- bulk.insert({a: getHash(i * 5), b: getHash(i)});
- }
- assert.commandWorked(bulk.execute());
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < 1000; i++) {
+ bulk.insert({a: getHash(i * 5), b: getHash(i)});
+}
+assert.commandWorked(bulk.execute());
- let explain = getDistinctExplainWithExecutionStats("a");
- // Collection scan looks at all 1000 documents and gets 1000 distinct values. Looks at 0 index
- // keys.
- assert.eq(1000, explain.executionStats.nReturned);
- assert.eq(0, explain.executionStats.totalKeysExamined);
- assert.eq(1000, explain.executionStats.totalDocsExamined);
+let explain = getDistinctExplainWithExecutionStats("a");
+// Collection scan looks at all 1000 documents and gets 1000 distinct values. Looks at 0 index
+// keys.
+assert.eq(1000, explain.executionStats.nReturned);
+assert.eq(0, explain.executionStats.totalKeysExamined);
+assert.eq(1000, explain.executionStats.totalDocsExamined);
- explain = getDistinctExplainWithExecutionStats("a", {a: {$gt: 5}});
- // Collection scan looks at all 1000 documents and gets 398 distinct values which match the
- // query. Looks at 0 index keys.
- assert.eq(398, explain.executionStats.nReturned);
- assert.eq(0, explain.executionStats.totalKeysExamined);
- assert.eq(1000, explain.executionStats.totalDocsExamined);
+explain = getDistinctExplainWithExecutionStats("a", {a: {$gt: 5}});
+// Collection scan looks at all 1000 documents and gets 398 distinct values which match the
+// query. Looks at 0 index keys.
+assert.eq(398, explain.executionStats.nReturned);
+assert.eq(0, explain.executionStats.totalKeysExamined);
+assert.eq(1000, explain.executionStats.totalDocsExamined);
- explain = getDistinctExplainWithExecutionStats("b", {a: {$gt: 5}});
- // Collection scan looks at all 1000 documents and gets 398 distinct values which match the
- // query. Looks at 0 index keys.
- assert.eq(398, explain.executionStats.nReturned);
- assert.eq(0, explain.executionStats.totalKeysExamined);
- assert.eq(1000, explain.executionStats.totalDocsExamined);
+explain = getDistinctExplainWithExecutionStats("b", {a: {$gt: 5}});
+// Collection scan looks at all 1000 documents and gets 398 distinct values which match the
+// query. Looks at 0 index keys.
+assert.eq(398, explain.executionStats.nReturned);
+assert.eq(0, explain.executionStats.totalKeysExamined);
+assert.eq(1000, explain.executionStats.totalDocsExamined);
- assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({a: 1}));
- explain = getDistinctExplainWithExecutionStats("a");
- // There are only 10 values. We use the fast distinct hack and only examine each value once.
- assert.eq(10, explain.executionStats.nReturned);
- assert.lte(10, explain.executionStats.totalKeysExamined);
+explain = getDistinctExplainWithExecutionStats("a");
+// There are only 10 values. We use the fast distinct hack and only examine each value once.
+assert.eq(10, explain.executionStats.nReturned);
+assert.lte(10, explain.executionStats.totalKeysExamined);
- explain = getDistinctExplainWithExecutionStats("a", {a: {$gt: 5}});
- // Only 4 values of a are >= 5 and we use the fast distinct hack.
- assert.eq(4, explain.executionStats.nReturned);
- assert.eq(4, explain.executionStats.totalKeysExamined);
- assert.eq(0, explain.executionStats.totalDocsExamined);
+explain = getDistinctExplainWithExecutionStats("a", {a: {$gt: 5}});
+// Only 4 values of a are >= 5 and we use the fast distinct hack.
+assert.eq(4, explain.executionStats.nReturned);
+assert.eq(4, explain.executionStats.totalKeysExamined);
+assert.eq(0, explain.executionStats.totalDocsExamined);
- explain = getDistinctExplainWithExecutionStats("b", {a: {$gt: 5}});
- // We can't use the fast distinct hack here because we're distinct-ing over 'b'.
- assert.eq(398, explain.executionStats.nReturned);
- assert.eq(398, explain.executionStats.totalKeysExamined);
- assert.eq(398, explain.executionStats.totalDocsExamined);
+explain = getDistinctExplainWithExecutionStats("b", {a: {$gt: 5}});
+// We can't use the fast distinct hack here because we're distinct-ing over 'b'.
+assert.eq(398, explain.executionStats.nReturned);
+assert.eq(398, explain.executionStats.totalKeysExamined);
+assert.eq(398, explain.executionStats.totalDocsExamined);
- // Test that a distinct over a trailing field of the index can be covered.
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.createIndex({a: 1, b: 1}));
- explain = getDistinctExplainWithExecutionStats("b", {a: {$gt: 5}, b: {$gt: 5}});
- assert.lte(explain.executionStats.nReturned, 171);
- assert.eq(0, explain.executionStats.totalDocsExamined);
+// Test that a distinct over a trailing field of the index can be covered.
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+explain = getDistinctExplainWithExecutionStats("b", {a: {$gt: 5}, b: {$gt: 5}});
+assert.lte(explain.executionStats.nReturned, 171);
+assert.eq(0, explain.executionStats.totalDocsExamined);
- // Should use an index scan over the hashed index.
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.createIndex({a: "hashed"}));
- explain = getDistinctExplainWithExecutionStats("a", {$or: [{a: 3}, {a: 5}]});
- assert.eq(188, explain.executionStats.nReturned);
- const indexScanStage = getPlanStage(explain.executionStats.executionStages, "IXSCAN");
- assert.eq("hashed", indexScanStage.keyPattern.a);
+// Should use an index scan over the hashed index.
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(coll.createIndex({a: "hashed"}));
+explain = getDistinctExplainWithExecutionStats("a", {$or: [{a: 3}, {a: 5}]});
+assert.eq(188, explain.executionStats.nReturned);
+const indexScanStage = getPlanStage(explain.executionStats.executionStages, "IXSCAN");
+assert.eq("hashed", indexScanStage.keyPattern.a);
})();
diff --git a/jstests/core/distinct_multikey.js b/jstests/core/distinct_multikey.js
index 77aba774a1c..72acd2c342c 100644
--- a/jstests/core/distinct_multikey.js
+++ b/jstests/core/distinct_multikey.js
@@ -2,102 +2,102 @@
* Tests for distinct planning and execution in the presence of multikey indexes.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js");
+load("jstests/libs/analyze_plan.js");
- let coll = db.jstest_distinct_multikey;
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.writeOK(coll.insert({a: [1, 2, 3]}));
- assert.writeOK(coll.insert({a: [2, 3, 4]}));
- assert.writeOK(coll.insert({a: [5, 6, 7]}));
+let coll = db.jstest_distinct_multikey;
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.writeOK(coll.insert({a: [1, 2, 3]}));
+assert.writeOK(coll.insert({a: [2, 3, 4]}));
+assert.writeOK(coll.insert({a: [5, 6, 7]}));
- // Test that distinct can correctly use a multikey index when there is no predicate.
- let result = coll.distinct("a");
- assert.eq([1, 2, 3, 4, 5, 6, 7], result.sort());
- let explain = coll.explain("queryPlanner").distinct("a");
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+// Test that distinct can correctly use a multikey index when there is no predicate.
+let result = coll.distinct("a");
+assert.eq([1, 2, 3, 4, 5, 6, 7], result.sort());
+let explain = coll.explain("queryPlanner").distinct("a");
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
- // Test that distinct can correctly use a multikey index when there is a predicate. This query
- // should not be eligible for the distinct scan and cannot be covered.
- result = coll.distinct("a", {a: 3});
- assert.eq([1, 2, 3, 4], result.sort());
- explain = coll.explain("queryPlanner").distinct("a", {a: 3});
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN"));
+// Test that distinct can correctly use a multikey index when there is a predicate. This query
+// should not be eligible for the distinct scan and cannot be covered.
+result = coll.distinct("a", {a: 3});
+assert.eq([1, 2, 3, 4], result.sort());
+explain = coll.explain("queryPlanner").distinct("a", {a: 3});
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN"));
- // Test distinct over a dotted multikey field, with a predicate.
- coll.drop();
- assert.commandWorked(coll.createIndex({"a.b": 1}));
- assert.writeOK(coll.insert({a: {b: [1, 2, 3]}}));
- assert.writeOK(coll.insert({a: {b: [2, 3, 4]}}));
+// Test distinct over a dotted multikey field, with a predicate.
+coll.drop();
+assert.commandWorked(coll.createIndex({"a.b": 1}));
+assert.writeOK(coll.insert({a: {b: [1, 2, 3]}}));
+assert.writeOK(coll.insert({a: {b: [2, 3, 4]}}));
- result = coll.distinct("a.b", {"a.b": 3});
- assert.eq([1, 2, 3, 4], result.sort());
- explain = coll.explain("queryPlanner").distinct("a.b", {"a.b": 3});
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN"));
+result = coll.distinct("a.b", {"a.b": 3});
+assert.eq([1, 2, 3, 4], result.sort());
+explain = coll.explain("queryPlanner").distinct("a.b", {"a.b": 3});
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN"));
- // Test that the distinct scan can be used when there is a predicate and the index is not
- // multikey.
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({a: 2}));
- assert.writeOK(coll.insert({a: 3}));
+// Test that the distinct scan can be used when there is a predicate and the index is not
+// multikey.
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 2}));
+assert.writeOK(coll.insert({a: 3}));
- result = coll.distinct("a", {a: {$gte: 2}});
- assert.eq([2, 3], result.sort());
- explain = coll.explain("queryPlanner").distinct("a", {a: {$gte: 2}});
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+result = coll.distinct("a", {a: {$gte: 2}});
+assert.eq([2, 3], result.sort());
+explain = coll.explain("queryPlanner").distinct("a", {a: {$gte: 2}});
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
- // Test a distinct which can use a multikey index, where the field being distinct'ed is not
- // multikey.
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1, b: 1}));
- assert.writeOK(coll.insert({a: 1, b: [2, 3]}));
- assert.writeOK(coll.insert({a: 8, b: [3, 4]}));
- assert.writeOK(coll.insert({a: 7, b: [4, 5]}));
+// Test a distinct which can use a multikey index, where the field being distinct'ed is not
+// multikey.
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+assert.writeOK(coll.insert({a: 1, b: [2, 3]}));
+assert.writeOK(coll.insert({a: 8, b: [3, 4]}));
+assert.writeOK(coll.insert({a: 7, b: [4, 5]}));
- result = coll.distinct("a", {a: {$gte: 2}});
- assert.eq([7, 8], result.sort());
- explain = coll.explain("queryPlanner").distinct("a", {a: {$gte: 2}});
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+result = coll.distinct("a", {a: {$gte: 2}});
+assert.eq([7, 8], result.sort());
+explain = coll.explain("queryPlanner").distinct("a", {a: {$gte: 2}});
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
- // Test distinct over a trailing multikey field.
- result = coll.distinct("b", {a: {$gte: 2}});
- assert.eq([3, 4, 5], result.sort());
- explain = coll.explain("queryPlanner").distinct("b", {a: {$gte: 2}});
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN"));
+// Test distinct over a trailing multikey field.
+result = coll.distinct("b", {a: {$gte: 2}});
+assert.eq([3, 4, 5], result.sort());
+explain = coll.explain("queryPlanner").distinct("b", {a: {$gte: 2}});
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN"));
- // Test distinct over a trailing non-multikey field, where the leading field is multikey.
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1, b: 1}));
- assert.writeOK(coll.insert({a: [2, 3], b: 1}));
- assert.writeOK(coll.insert({a: [3, 4], b: 8}));
- assert.writeOK(coll.insert({a: [3, 5], b: 7}));
+// Test distinct over a trailing non-multikey field, where the leading field is multikey.
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+assert.writeOK(coll.insert({a: [2, 3], b: 1}));
+assert.writeOK(coll.insert({a: [3, 4], b: 8}));
+assert.writeOK(coll.insert({a: [3, 5], b: 7}));
- result = coll.distinct("b", {a: 3});
- assert.eq([1, 7, 8], result.sort());
- explain = coll.explain("queryPlanner").distinct("b", {a: 3});
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+result = coll.distinct("b", {a: 3});
+assert.eq([1, 7, 8], result.sort());
+explain = coll.explain("queryPlanner").distinct("b", {a: 3});
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
- // Test distinct over a trailing non-multikey dotted path where the leading field is multikey.
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1, "b.c": 1}));
- assert.writeOK(coll.insert({a: [2, 3], b: {c: 1}}));
- assert.writeOK(coll.insert({a: [3, 4], b: {c: 8}}));
- assert.writeOK(coll.insert({a: [3, 5], b: {c: 7}}));
+// Test distinct over a trailing non-multikey dotted path where the leading field is multikey.
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1, "b.c": 1}));
+assert.writeOK(coll.insert({a: [2, 3], b: {c: 1}}));
+assert.writeOK(coll.insert({a: [3, 4], b: {c: 8}}));
+assert.writeOK(coll.insert({a: [3, 5], b: {c: 7}}));
- result = coll.distinct("b.c", {a: 3});
- assert.eq([1, 7, 8], result.sort());
- explain = coll.explain("queryPlanner").distinct("b.c", {a: 3});
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_DEFAULT"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+result = coll.distinct("b.c", {a: 3});
+assert.eq([1, 7, 8], result.sort());
+explain = coll.explain("queryPlanner").distinct("b.c", {a: 3});
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_DEFAULT"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
}());
diff --git a/jstests/core/distinct_multikey_dotted_path.js b/jstests/core/distinct_multikey_dotted_path.js
index dc770ec3a24..b06ca2d95ea 100644
--- a/jstests/core/distinct_multikey_dotted_path.js
+++ b/jstests/core/distinct_multikey_dotted_path.js
@@ -9,190 +9,192 @@
* @tags: [assumes_unsharded_collection, does_not_support_stepdowns]
*/
(function() {
- "use strict";
- load("jstests/libs/analyze_plan.js"); // For planHasStage().
-
- const coll = db.distinct_multikey;
- coll.drop();
- assert.commandWorked(coll.createIndex({"a.b.c": 1}));
-
- assert.commandWorked(coll.insert({a: {b: {c: 1}}}));
- assert.commandWorked(coll.insert({a: {b: {c: 2}}}));
- assert.commandWorked(coll.insert({a: {b: {c: 3}}}));
- assert.commandWorked(coll.insert({a: {b: {notRelevant: 3}}}));
- assert.commandWorked(coll.insert({a: {notRelevant: 3}}));
-
- const numPredicate = {"a.b.c": {$gt: 0}};
-
- function getAggPipelineForDistinct(path) {
- return [{$group: {_id: "$" + path}}];
- }
-
- // Run an agg pipeline with a $group, and convert the results so they're equivalent
- // to what a distinct() would return.
- // Note that $group will treat an array as its own key rather than unwinding it. This means
- // that a $group on a field that's multikey will have different behavior than a distinct(), so
- // we only use this function for non-multikey fields.
- function distinctResultsFromPipeline(pipeline) {
- const res = coll.aggregate(pipeline).toArray();
- return res.map((x) => x._id);
- }
-
- // Be sure a distinct scan is used when the index is not multi key.
- (function testDistinctWithNonMultikeyIndex() {
- const results = coll.distinct("a.b.c");
- // TODO SERVER-14832: Returning 'null' here is inconsistent with the behavior when no index
- // is present.
- assert.sameMembers([1, 2, 3, null], results);
-
- const expl = coll.explain().distinct("a.b.c");
- assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl);
-
- // Do an equivalent query using $group.
- const pipeline = getAggPipelineForDistinct("a.b.c");
- const aggResults = distinctResultsFromPipeline(pipeline);
- assert.sameMembers(aggResults, results);
- const aggExpl = assert.commandWorked(coll.explain().aggregate(pipeline));
- assert.gt(getAggPlanStages(aggExpl, "DISTINCT_SCAN").length, 0);
- })();
-
- // Distinct with a predicate.
- (function testDistinctWithPredWithNonMultikeyIndex() {
- const results = coll.distinct("a.b.c", numPredicate);
- assert.sameMembers([1, 2, 3], results);
-
- const expl = coll.explain().distinct("a.b.c", numPredicate);
-
- assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl);
-
- const pipeline = [{$match: numPredicate}].concat(getAggPipelineForDistinct("a.b.c"));
- const aggResults = distinctResultsFromPipeline(pipeline);
- assert.sameMembers(aggResults, results);
- const aggExpl = assert.commandWorked(coll.explain().aggregate(pipeline));
- assert.gt(getAggPlanStages(aggExpl, "DISTINCT_SCAN").length, 0);
- })();
-
- // Make the index multi key.
- assert.commandWorked(coll.insert({a: {b: [{c: 4}, {c: 5}]}}));
- assert.commandWorked(coll.insert({a: {b: [{c: 4}, {c: 6}]}}));
- // Empty array is indexed as 'undefined'.
- assert.commandWorked(coll.insert({a: {b: {c: []}}}));
-
- // We should still use the index as long as the path we distinct() on is never an array
- // index.
- (function testDistinctWithMultikeyIndex() {
- const multiKeyResults = coll.distinct("a.b.c");
- // TODO SERVER-14832: Returning 'null' and 'undefined' here is inconsistent with the
- // behavior when no index is present.
- assert.sameMembers([1, 2, 3, 4, 5, 6, null, undefined], multiKeyResults);
- const expl = coll.explain().distinct("a.b.c");
-
- assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"));
-
- // Not running same query with $group now that the field is multikey. See comment above.
- })();
-
- // We cannot use the DISTINCT_SCAN optimization when there is a multikey path in the key and
- // there is a predicate. The reason is that we may have a predicate like {a: 4}, and two
- // documents: {a: [4, 5]}, {a: [4, 6]}. With a DISTINCT_SCAN, we would "skip over" one of the
- // documents, and leave out either '5' or '6', rather than providing the correct result of
- // [4, 5, 6]. The test below is for a similar case.
- (function testDistinctWithPredWithMultikeyIndex() {
- const pred = {"a.b.c": 4};
- const results = coll.distinct("a.b.c", pred);
- assert.sameMembers([4, 5, 6], results);
-
- const expl = coll.explain().distinct("a.b.c", pred);
- assert.eq(false, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl);
- assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "IXSCAN"), expl);
-
- // Not running same query with $group now that the field is multikey. See comment above.
- })();
-
- // Perform a distinct on a path where the last component is multikey.
- (function testDistinctOnPathWhereLastComponentIsMultiKey() {
- assert.commandWorked(coll.createIndex({"a.b": 1}));
- const multiKeyResults = coll.distinct("a.b");
- assert.sameMembers(
- [
- null, // From the document with no 'b' field. TODO SERVER-14832: this is
- // inconsistent with behavior when no index is present.
- {c: 1},
- {c: 2},
- {c: 3},
- {c: 4},
- {c: 5},
- {c: 6},
- {c: []},
- {notRelevant: 3}
- ],
- multiKeyResults);
-
- const expl = coll.explain().distinct("a.b");
- assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"));
-
- // Not running same query with $group now that the field is multikey. See comment above.
- })();
-
- (function testDistinctOnPathWhereLastComponentIsMultiKeyWithPredicate() {
- assert.commandWorked(coll.createIndex({"a.b": 1}));
- const pred = {"a.b": {$type: "array"}};
- const multiKeyResults = coll.distinct("a.b", pred);
- assert.sameMembers(
- [
- {c: 4},
- {c: 5},
- {c: 6},
- ],
- multiKeyResults);
-
- const expl = coll.explain().distinct("a.b", pred);
- assert.eq(false, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"));
- assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "IXSCAN"));
-
- // Not running same query with $group now that the field is multikey. See comment above.
- })();
-
- // If the path we distinct() on includes an array index, a COLLSCAN should be used,
- // even if an index is available on the prefix to the array component ("a.b" in this case).
- (function testDistinctOnNumericMultikeyPathNoIndex() {
- const res = coll.distinct("a.b.0");
- assert.eq(res, [{c: 4}]);
-
- const expl = coll.explain().distinct("a.b.0");
- assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "COLLSCAN"), expl);
-
- // Will not attempt the equivalent query with aggregation, since $group by "a.b.0" will
- // only treat '0' as a field name (not array index).
- })();
-
- // Creating an index on "a.b.0" and doing a distinct on it should be able to use DISTINCT_SCAN.
- (function testDistinctOnNumericMultikeyPathWithIndex() {
- assert.commandWorked(coll.createIndex({"a.b.0": 1}));
- assert.commandWorked(coll.insert({a: {b: {0: "hello world"}}}));
- const res = coll.distinct("a.b.0");
- assert.sameMembers(res, [{c: 4}, "hello world"]);
-
- const expl = coll.explain().distinct("a.b.0");
- assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl);
-
- // Will not attempt the equivalent query with aggregation, since $group by "a.b.0" will
- // only treat '0' as a field name (not array index).
- })();
-
- // Creating an index on "a.b.0" and doing a distinct on it should use an IXSCAN, as "a.b" is
- // multikey. See explanation above about why a DISTINCT_SCAN cannot be used when the path
- // given is multikey.
- (function testDistinctWithPredOnNumericMultikeyPathWithIndex() {
- const pred = {"a.b.0": {$type: "object"}};
- const res = coll.distinct("a.b.0", pred);
- assert.sameMembers(res, [{c: 4}]);
-
- const expl = coll.explain().distinct("a.b.0", pred);
- assert.eq(false, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl);
- assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "IXSCAN"), expl);
-
- // Will not attempt the equivalent query with aggregation, since $group by "a.b.0" will
- // only treat '0' as a field name (not array index).
- })();
+"use strict";
+load("jstests/libs/analyze_plan.js"); // For planHasStage().
+
+const coll = db.distinct_multikey;
+coll.drop();
+assert.commandWorked(coll.createIndex({"a.b.c": 1}));
+
+assert.commandWorked(coll.insert({a: {b: {c: 1}}}));
+assert.commandWorked(coll.insert({a: {b: {c: 2}}}));
+assert.commandWorked(coll.insert({a: {b: {c: 3}}}));
+assert.commandWorked(coll.insert({a: {b: {notRelevant: 3}}}));
+assert.commandWorked(coll.insert({a: {notRelevant: 3}}));
+
+const numPredicate = {
+ "a.b.c": {$gt: 0}
+};
+
+function getAggPipelineForDistinct(path) {
+ return [{$group: {_id: "$" + path}}];
+}
+
+// Run an agg pipeline with a $group, and convert the results so they're equivalent
+// to what a distinct() would return.
+// Note that $group will treat an array as its own key rather than unwinding it. This means
+// that a $group on a field that's multikey will have different behavior than a distinct(), so
+// we only use this function for non-multikey fields.
+function distinctResultsFromPipeline(pipeline) {
+ const res = coll.aggregate(pipeline).toArray();
+ return res.map((x) => x._id);
+}
+
+// Be sure a distinct scan is used when the index is not multi key.
+(function testDistinctWithNonMultikeyIndex() {
+ const results = coll.distinct("a.b.c");
+ // TODO SERVER-14832: Returning 'null' here is inconsistent with the behavior when no index
+ // is present.
+ assert.sameMembers([1, 2, 3, null], results);
+
+ const expl = coll.explain().distinct("a.b.c");
+ assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl);
+
+ // Do an equivalent query using $group.
+ const pipeline = getAggPipelineForDistinct("a.b.c");
+ const aggResults = distinctResultsFromPipeline(pipeline);
+ assert.sameMembers(aggResults, results);
+ const aggExpl = assert.commandWorked(coll.explain().aggregate(pipeline));
+ assert.gt(getAggPlanStages(aggExpl, "DISTINCT_SCAN").length, 0);
+})();
+
+// Distinct with a predicate.
+(function testDistinctWithPredWithNonMultikeyIndex() {
+ const results = coll.distinct("a.b.c", numPredicate);
+ assert.sameMembers([1, 2, 3], results);
+
+ const expl = coll.explain().distinct("a.b.c", numPredicate);
+
+ assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl);
+
+ const pipeline = [{$match: numPredicate}].concat(getAggPipelineForDistinct("a.b.c"));
+ const aggResults = distinctResultsFromPipeline(pipeline);
+ assert.sameMembers(aggResults, results);
+ const aggExpl = assert.commandWorked(coll.explain().aggregate(pipeline));
+ assert.gt(getAggPlanStages(aggExpl, "DISTINCT_SCAN").length, 0);
+})();
+
+// Make the index multi key.
+assert.commandWorked(coll.insert({a: {b: [{c: 4}, {c: 5}]}}));
+assert.commandWorked(coll.insert({a: {b: [{c: 4}, {c: 6}]}}));
+// Empty array is indexed as 'undefined'.
+assert.commandWorked(coll.insert({a: {b: {c: []}}}));
+
+// We should still use the index as long as the path we distinct() on is never an array
+// index.
+(function testDistinctWithMultikeyIndex() {
+ const multiKeyResults = coll.distinct("a.b.c");
+ // TODO SERVER-14832: Returning 'null' and 'undefined' here is inconsistent with the
+ // behavior when no index is present.
+ assert.sameMembers([1, 2, 3, 4, 5, 6, null, undefined], multiKeyResults);
+ const expl = coll.explain().distinct("a.b.c");
+
+ assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+
+ // Not running same query with $group now that the field is multikey. See comment above.
+})();
+
+// We cannot use the DISTINCT_SCAN optimization when there is a multikey path in the key and
+// there is a predicate. The reason is that we may have a predicate like {a: 4}, and two
+// documents: {a: [4, 5]}, {a: [4, 6]}. With a DISTINCT_SCAN, we would "skip over" one of the
+// documents, and leave out either '5' or '6', rather than providing the correct result of
+// [4, 5, 6]. The test below is for a similar case.
+(function testDistinctWithPredWithMultikeyIndex() {
+ const pred = {"a.b.c": 4};
+ const results = coll.distinct("a.b.c", pred);
+ assert.sameMembers([4, 5, 6], results);
+
+ const expl = coll.explain().distinct("a.b.c", pred);
+ assert.eq(false, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl);
+ assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "IXSCAN"), expl);
+
+ // Not running same query with $group now that the field is multikey. See comment above.
+})();
+
+// Perform a distinct on a path where the last component is multikey.
+(function testDistinctOnPathWhereLastComponentIsMultiKey() {
+ assert.commandWorked(coll.createIndex({"a.b": 1}));
+ const multiKeyResults = coll.distinct("a.b");
+ assert.sameMembers(
+ [
+ null, // From the document with no 'b' field. TODO SERVER-14832: this is
+ // inconsistent with behavior when no index is present.
+ {c: 1},
+ {c: 2},
+ {c: 3},
+ {c: 4},
+ {c: 5},
+ {c: 6},
+ {c: []},
+ {notRelevant: 3}
+ ],
+ multiKeyResults);
+
+ const expl = coll.explain().distinct("a.b");
+ assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+
+ // Not running same query with $group now that the field is multikey. See comment above.
+})();
+
+(function testDistinctOnPathWhereLastComponentIsMultiKeyWithPredicate() {
+ assert.commandWorked(coll.createIndex({"a.b": 1}));
+ const pred = {"a.b": {$type: "array"}};
+ const multiKeyResults = coll.distinct("a.b", pred);
+ assert.sameMembers(
+ [
+ {c: 4},
+ {c: 5},
+ {c: 6},
+ ],
+ multiKeyResults);
+
+ const expl = coll.explain().distinct("a.b", pred);
+ assert.eq(false, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+ assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "IXSCAN"));
+
+ // Not running same query with $group now that the field is multikey. See comment above.
+})();
+
+// If the path we distinct() on includes an array index, a COLLSCAN should be used,
+// even if an index is available on the prefix to the array component ("a.b" in this case).
+(function testDistinctOnNumericMultikeyPathNoIndex() {
+ const res = coll.distinct("a.b.0");
+ assert.eq(res, [{c: 4}]);
+
+ const expl = coll.explain().distinct("a.b.0");
+ assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "COLLSCAN"), expl);
+
+ // Will not attempt the equivalent query with aggregation, since $group by "a.b.0" will
+ // only treat '0' as a field name (not array index).
+})();
+
+// Creating an index on "a.b.0" and doing a distinct on it should be able to use DISTINCT_SCAN.
+(function testDistinctOnNumericMultikeyPathWithIndex() {
+ assert.commandWorked(coll.createIndex({"a.b.0": 1}));
+ assert.commandWorked(coll.insert({a: {b: {0: "hello world"}}}));
+ const res = coll.distinct("a.b.0");
+ assert.sameMembers(res, [{c: 4}, "hello world"]);
+
+ const expl = coll.explain().distinct("a.b.0");
+ assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl);
+
+ // Will not attempt the equivalent query with aggregation, since $group by "a.b.0" will
+ // only treat '0' as a field name (not array index).
+})();
+
+// Creating an index on "a.b.0" and doing a distinct on it should use an IXSCAN, as "a.b" is
+// multikey. See explanation above about why a DISTINCT_SCAN cannot be used when the path
+// given is multikey.
+(function testDistinctWithPredOnNumericMultikeyPathWithIndex() {
+ const pred = {"a.b.0": {$type: "object"}};
+ const res = coll.distinct("a.b.0", pred);
+ assert.sameMembers(res, [{c: 4}]);
+
+ const expl = coll.explain().distinct("a.b.0", pred);
+ assert.eq(false, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl);
+ assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "IXSCAN"), expl);
+
+ // Will not attempt the equivalent query with aggregation, since $group by "a.b.0" will
+ // only treat '0' as a field name (not array index).
+})();
})();
diff --git a/jstests/core/doc_validation.js b/jstests/core/doc_validation.js
index 9acfffae4e3..57f99adf48c 100644
--- a/jstests/core/doc_validation.js
+++ b/jstests/core/doc_validation.js
@@ -9,278 +9,275 @@
// Test basic inserts and updates with document validation.
(function() {
- "use strict";
+"use strict";
- function assertFailsValidation(res) {
- if (res instanceof WriteResult) {
- assert.writeErrorWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res));
- } else {
- assert.commandFailedWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res));
- }
+function assertFailsValidation(res) {
+ if (res instanceof WriteResult) {
+ assert.writeErrorWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res));
+ } else {
+ assert.commandFailedWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res));
}
+}
- const array = [];
- for (let i = 0; i < 2048; i++) {
- array.push({arbitrary: i});
- }
+const array = [];
+for (let i = 0; i < 2048; i++) {
+ array.push({arbitrary: i});
+}
- const collName = "doc_validation";
- const coll = db[collName];
-
- /**
- * Runs a series of document validation tests using the validator 'validator', which should
- * enforce the existence of a field "a".
- */
- function runInsertUpdateValidationTest(validator) {
- coll.drop();
-
- // Create a collection with document validator 'validator'.
- assert.commandWorked(db.createCollection(collName, {validator: validator}));
-
- // Insert and upsert documents that will pass validation.
- assert.writeOK(coll.insert({_id: "valid1", a: 1}));
- assert.writeOK(coll.update({_id: "valid2"}, {_id: "valid2", a: 2}, {upsert: true}));
- assert.writeOK(coll.runCommand(
- "findAndModify", {query: {_id: "valid3"}, update: {$set: {a: 3}}, upsert: true}));
-
- // Insert and upsert documents that will not pass validation.
- assertFailsValidation(coll.insert({_id: "invalid3", b: 1}));
- assertFailsValidation(
- coll.update({_id: "invalid4"}, {_id: "invalid4", b: 2}, {upsert: true}));
- assertFailsValidation(coll.runCommand(
- "findAndModify", {query: {_id: "invalid4"}, update: {$set: {b: 3}}, upsert: true}));
-
- // Assert that we can remove the document that passed validation.
- assert.writeOK(coll.remove({_id: "valid1"}));
-
- // Check that we can only update documents that pass validation. We insert a valid and an
- // invalid document, then set the validator.
- coll.drop();
- assert.writeOK(coll.insert({_id: "valid1", a: 1}));
- assert.writeOK(coll.insert({_id: "invalid2", b: 1}));
- assert.commandWorked(coll.runCommand("collMod", {validator: validator}));
-
- // Assert that updates on a conforming document succeed when they affect fields not involved
- // in validator.
- // Add a new field.
- assert.writeOK(coll.update({_id: "valid1"}, {$set: {z: 1}}));
- assert.writeOK(
- coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {y: 2}}}));
- // In-place update.
- assert.writeOK(coll.update({_id: "valid1"}, {$inc: {z: 1}}));
- assert.writeOK(
- coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$inc: {y: 1}}}));
- // Out-of-place update.
- assert.writeOK(coll.update({_id: "valid1"}, {$set: {z: array}}));
- assert.writeOK(
- coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {y: array}}}));
- // No-op update.
- assert.writeOK(coll.update({_id: "valid1"}, {a: 1}));
- assert.writeOK(
- coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {a: 1}}}));
-
- // Verify those same updates will fail on non-conforming document.
- assertFailsValidation(coll.update({_id: "invalid2"}, {$set: {z: 1}}));
- assertFailsValidation(coll.update({_id: "invalid2"}, {$inc: {z: 1}}));
- assertFailsValidation(coll.update({_id: "invalid2"}, {$set: {z: array}}));
- assertFailsValidation(
- coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {y: 2}}}));
- assertFailsValidation(
- coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$inc: {y: 1}}}));
- assertFailsValidation(coll.runCommand(
- "findAndModify", {query: {_id: "invalid2"}, update: {$set: {y: array}}}));
-
- // A no-op update of an invalid doc will succeed.
- assert.writeOK(coll.update({_id: "invalid2"}, {$set: {b: 1}}));
- assert.writeOK(
- coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {b: 1}}}));
-
- // Verify that we can't make a conforming document fail validation, but can update a
- // non-conforming document to pass validation.
- coll.drop();
- assert.writeOK(coll.insert({_id: "valid1", a: 1}));
- assert.writeOK(coll.insert({_id: "invalid2", b: 1}));
- assert.writeOK(coll.insert({_id: "invalid3", b: 1}));
- assert.commandWorked(coll.runCommand("collMod", {validator: validator}));
-
- assertFailsValidation(coll.update({_id: "valid1"}, {$unset: {a: 1}}));
- assert.writeOK(coll.update({_id: "invalid2"}, {$set: {a: 1}}));
- assertFailsValidation(
- coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$unset: {a: 1}}}));
- assert.writeOK(
- coll.runCommand("findAndModify", {query: {_id: "invalid3"}, update: {$set: {a: 1}}}));
-
- // Modify the collection to remove the document validator.
- assert.commandWorked(coll.runCommand("collMod", {validator: {}}));
-
- // Verify that no validation is applied to updates.
- assert.writeOK(coll.update({_id: "valid1"}, {$set: {z: 1}}));
- assert.writeOK(coll.update({_id: "invalid2"}, {$set: {z: 1}}));
- assert.writeOK(coll.update({_id: "valid1"}, {$unset: {a: 1}}));
- assert.writeOK(coll.update({_id: "invalid2"}, {$set: {a: 1}}));
- assert.writeOK(
- coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {z: 2}}}));
- assert.writeOK(
- coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {z: 2}}}));
- assert.writeOK(
- coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$unset: {a: 1}}}));
- assert.writeOK(
- coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {a: 1}}}));
- }
+const collName = "doc_validation";
+const coll = db[collName];
- // Run the test with a normal validator.
- runInsertUpdateValidationTest({a: {$exists: true}});
-
- // Run the test again with an equivalent JSON Schema.
- runInsertUpdateValidationTest({$jsonSchema: {required: ["a"]}});
-
- /**
- * Run a series of document validation tests involving collation using the validator
- * 'validator', which should enforce that the field "a" has the value "xyz".
- */
- function runCollationValidationTest(validator) {
- coll.drop();
- assert.commandWorked(db.createCollection(
- collName, {validator: validator, collation: {locale: "en_US", strength: 2}}));
-
- // An insert that matches the validator should succeed.
- assert.writeOK(coll.insert({_id: 0, a: "xyz", b: "foo"}));
-
- const isJSONSchema = validator.hasOwnProperty("$jsonSchema");
-
- // A normal validator should respect the collation and the inserts should succeed. A JSON
- // Schema validator ignores the collation and the inserts should fail.
- const assertCorrectResult =
- isJSONSchema ? res => assertFailsValidation(res) : res => assert.writeOK(res);
- assertCorrectResult(coll.insert({a: "XYZ"}));
- assertCorrectResult(coll.insert({a: "XyZ", b: "foo"}));
- assertCorrectResult(coll.update({_id: 0}, {a: "xyZ", b: "foo"}));
- assertCorrectResult(coll.update({_id: 0}, {$set: {a: "Xyz"}}));
- assertCorrectResult(
- coll.runCommand("findAndModify", {query: {_id: 0}, update: {a: "xyZ", b: "foo"}}));
- assertCorrectResult(
- coll.runCommand("findAndModify", {query: {_id: 0}, update: {$set: {a: "Xyz"}}}));
-
- // Test an insert and an update that should always fail.
- assertFailsValidation(coll.insert({a: "not xyz"}));
- assertFailsValidation(coll.update({_id: 0}, {$set: {a: "xyzz"}}));
- assertFailsValidation(
- coll.runCommand("findAndModify", {query: {_id: 0}, update: {$set: {a: "xyzz"}}}));
-
- // A normal validator expands leaf arrays, such that if "a" is an array containing "xyz", it
- // matches {a: "xyz"}. A JSON Schema validator does not expand leaf arrays and treats arrays
- // as a single array value.
- assertCorrectResult(coll.insert({a: ["xyz"]}));
- assertCorrectResult(coll.insert({a: ["XYZ"]}));
- assertCorrectResult(coll.insert({a: ["XyZ"], b: "foo"}));
- }
+/**
+ * Runs a series of document validation tests using the validator 'validator', which should
+ * enforce the existence of a field "a".
+ */
+function runInsertUpdateValidationTest(validator) {
+ coll.drop();
- runCollationValidationTest({a: "xyz"});
- runCollationValidationTest({$jsonSchema: {properties: {a: {enum: ["xyz"]}}}});
+ // Create a collection with document validator 'validator'.
+ assert.commandWorked(db.createCollection(collName, {validator: validator}));
- // The validator is allowed to contain $expr.
- coll.drop();
- assert.commandWorked(db.createCollection(collName, {validator: {$expr: {$eq: ["$a", 5]}}}));
- assert.writeOK(coll.insert({a: 5}));
- assertFailsValidation(coll.insert({a: 4}));
- assert.commandWorked(
- db.runCommand({"collMod": collName, "validator": {$expr: {$eq: ["$a", 4]}}}));
- assert.writeOK(coll.insert({a: 4}));
- assertFailsValidation(coll.insert({a: 5}));
-
- // The validator supports $expr with the date extraction expressions (with a timezone
- // specified).
- coll.drop();
- assert.commandWorked(db.createCollection(collName, {
- validator:
- {$expr: {$eq: [1, {$dayOfMonth: {date: "$a", timezone: "America/New_York"}}]}}
- }));
- assert.writeOK(coll.insert({a: ISODate("2017-10-01T22:00:00")}));
- assertFailsValidation(coll.insert({a: ISODate("2017-10-01T00:00:00")}));
-
- // The validator supports $expr with a $dateToParts expression.
- coll.drop();
- assert.commandWorked(db.createCollection(collName, {
- validator: {
- $expr: {
- $eq: [
- {
- "year": 2017,
- "month": 10,
- "day": 1,
- "hour": 18,
- "minute": 0,
- "second": 0,
- "millisecond": 0
- },
- {$dateToParts: {date: "$a", timezone: "America/New_York"}}
- ]
- }
- }
- }));
- assert.writeOK(coll.insert({a: ISODate("2017-10-01T22:00:00")}));
- assertFailsValidation(coll.insert({a: ISODate("2017-10-01T00:00:00")}));
+ // Insert and upsert documents that will pass validation.
+ assert.writeOK(coll.insert({_id: "valid1", a: 1}));
+ assert.writeOK(coll.update({_id: "valid2"}, {_id: "valid2", a: 2}, {upsert: true}));
+ assert.writeOK(coll.runCommand("findAndModify",
+ {query: {_id: "valid3"}, update: {$set: {a: 3}}, upsert: true}));
- // The validator supports $expr with $dateToString expression.
- coll.drop();
- assert.commandWorked(db.createCollection(collName, {
- validator: {
- $expr: {
- $eq: [
- "2017-07-04 14:56:42 +0000 (0 minutes)",
- {
- $dateToString: {
- format: "%Y-%m-%d %H:%M:%S %z (%Z minutes)",
- date: "$date",
- timezone: "$tz"
- }
- }
- ]
- }
- }
- }));
- assert.writeOK(coll.insert({date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "UTC"}));
- assertFailsValidation(
- coll.insert({date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "America/New_York"}));
+ // Insert and upsert documents that will not pass validation.
+ assertFailsValidation(coll.insert({_id: "invalid3", b: 1}));
+ assertFailsValidation(coll.update({_id: "invalid4"}, {_id: "invalid4", b: 2}, {upsert: true}));
+ assertFailsValidation(coll.runCommand(
+ "findAndModify", {query: {_id: "invalid4"}, update: {$set: {b: 3}}, upsert: true}));
+
+ // Assert that we can remove the document that passed validation.
+ assert.writeOK(coll.remove({_id: "valid1"}));
- // The validator supports $expr with $dateFromParts expression.
+ // Check that we can only update documents that pass validation. We insert a valid and an
+ // invalid document, then set the validator.
coll.drop();
- assert.commandWorked(db.createCollection(collName, {
- validator: {
- $expr: {
- $eq: [
- ISODate("2016-12-31T15:00:00Z"),
- {'$dateFromParts': {year: "$year", "timezone": "$timezone"}}
- ]
- }
- }
- }));
- assert.writeOK(coll.insert({_id: 0, year: 2017, month: 6, day: 19, timezone: "Asia/Tokyo"}));
+ assert.writeOK(coll.insert({_id: "valid1", a: 1}));
+ assert.writeOK(coll.insert({_id: "invalid2", b: 1}));
+ assert.commandWorked(coll.runCommand("collMod", {validator: validator}));
+
+ // Assert that updates on a conforming document succeed when they affect fields not involved
+ // in validator.
+ // Add a new field.
+ assert.writeOK(coll.update({_id: "valid1"}, {$set: {z: 1}}));
+ assert.writeOK(
+ coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {y: 2}}}));
+ // In-place update.
+ assert.writeOK(coll.update({_id: "valid1"}, {$inc: {z: 1}}));
+ assert.writeOK(
+ coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$inc: {y: 1}}}));
+ // Out-of-place update.
+ assert.writeOK(coll.update({_id: "valid1"}, {$set: {z: array}}));
+ assert.writeOK(
+ coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {y: array}}}));
+ // No-op update.
+ assert.writeOK(coll.update({_id: "valid1"}, {a: 1}));
+ assert.writeOK(
+ coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {a: 1}}}));
+
+ // Verify those same updates will fail on non-conforming document.
+ assertFailsValidation(coll.update({_id: "invalid2"}, {$set: {z: 1}}));
+ assertFailsValidation(coll.update({_id: "invalid2"}, {$inc: {z: 1}}));
+ assertFailsValidation(coll.update({_id: "invalid2"}, {$set: {z: array}}));
assertFailsValidation(
- coll.insert({_id: 1, year: 2022, month: 1, day: 1, timezone: "America/New_York"}));
+ coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {y: 2}}}));
+ assertFailsValidation(
+ coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$inc: {y: 1}}}));
+ assertFailsValidation(
+ coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {y: array}}}));
- // The validator supports $expr with $dateFromString expression.
+ // A no-op update of an invalid doc will succeed.
+ assert.writeOK(coll.update({_id: "invalid2"}, {$set: {b: 1}}));
+ assert.writeOK(
+ coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {b: 1}}}));
+
+ // Verify that we can't make a conforming document fail validation, but can update a
+ // non-conforming document to pass validation.
coll.drop();
- assert.commandWorked(db.createCollection(collName, {
- validator: {
- $expr: {
- $eq: [
- ISODate("2017-07-04T15:56:02Z"),
- {'$dateFromString': {dateString: "$date", timezone: 'America/New_York'}}
- ]
- }
- }
- }));
- assert.writeOK(coll.insert({_id: 0, date: "2017-07-04T11:56:02"}));
- assertFailsValidation(coll.insert({_id: 1, date: "2015-02-02T11:00:00"}));
+ assert.writeOK(coll.insert({_id: "valid1", a: 1}));
+ assert.writeOK(coll.insert({_id: "invalid2", b: 1}));
+ assert.writeOK(coll.insert({_id: "invalid3", b: 1}));
+ assert.commandWorked(coll.runCommand("collMod", {validator: validator}));
- // The validator can contain an $expr that may throw at runtime.
+ assertFailsValidation(coll.update({_id: "valid1"}, {$unset: {a: 1}}));
+ assert.writeOK(coll.update({_id: "invalid2"}, {$set: {a: 1}}));
+ assertFailsValidation(
+ coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$unset: {a: 1}}}));
+ assert.writeOK(
+ coll.runCommand("findAndModify", {query: {_id: "invalid3"}, update: {$set: {a: 1}}}));
+
+ // Modify the collection to remove the document validator.
+ assert.commandWorked(coll.runCommand("collMod", {validator: {}}));
+
+ // Verify that no validation is applied to updates.
+ assert.writeOK(coll.update({_id: "valid1"}, {$set: {z: 1}}));
+ assert.writeOK(coll.update({_id: "invalid2"}, {$set: {z: 1}}));
+ assert.writeOK(coll.update({_id: "valid1"}, {$unset: {a: 1}}));
+ assert.writeOK(coll.update({_id: "invalid2"}, {$set: {a: 1}}));
+ assert.writeOK(
+ coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {z: 2}}}));
+ assert.writeOK(
+ coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {z: 2}}}));
+ assert.writeOK(
+ coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$unset: {a: 1}}}));
+ assert.writeOK(
+ coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {a: 1}}}));
+}
+
+// Run the test with a normal validator.
+runInsertUpdateValidationTest({a: {$exists: true}});
+
+// Run the test again with an equivalent JSON Schema.
+runInsertUpdateValidationTest({$jsonSchema: {required: ["a"]}});
+
+/**
+ * Run a series of document validation tests involving collation using the validator
+ * 'validator', which should enforce that the field "a" has the value "xyz".
+ */
+function runCollationValidationTest(validator) {
coll.drop();
- assert.commandWorked(
- db.createCollection(collName, {validator: {$expr: {$eq: ["$a", {$divide: [1, "$b"]}]}}}));
- assert.writeOK(coll.insert({a: 1, b: 1}));
- let res = coll.insert({a: 1, b: 0});
- assert.writeError(res);
- assert.eq(res.getWriteError().code, 16608);
- assert.writeOK(coll.insert({a: -1, b: -1}));
+ assert.commandWorked(db.createCollection(
+ collName, {validator: validator, collation: {locale: "en_US", strength: 2}}));
+
+ // An insert that matches the validator should succeed.
+ assert.writeOK(coll.insert({_id: 0, a: "xyz", b: "foo"}));
+
+ const isJSONSchema = validator.hasOwnProperty("$jsonSchema");
+
+ // A normal validator should respect the collation and the inserts should succeed. A JSON
+ // Schema validator ignores the collation and the inserts should fail.
+ const assertCorrectResult =
+ isJSONSchema ? res => assertFailsValidation(res) : res => assert.writeOK(res);
+ assertCorrectResult(coll.insert({a: "XYZ"}));
+ assertCorrectResult(coll.insert({a: "XyZ", b: "foo"}));
+ assertCorrectResult(coll.update({_id: 0}, {a: "xyZ", b: "foo"}));
+ assertCorrectResult(coll.update({_id: 0}, {$set: {a: "Xyz"}}));
+ assertCorrectResult(
+ coll.runCommand("findAndModify", {query: {_id: 0}, update: {a: "xyZ", b: "foo"}}));
+ assertCorrectResult(
+ coll.runCommand("findAndModify", {query: {_id: 0}, update: {$set: {a: "Xyz"}}}));
+
+ // Test an insert and an update that should always fail.
+ assertFailsValidation(coll.insert({a: "not xyz"}));
+ assertFailsValidation(coll.update({_id: 0}, {$set: {a: "xyzz"}}));
+ assertFailsValidation(
+ coll.runCommand("findAndModify", {query: {_id: 0}, update: {$set: {a: "xyzz"}}}));
+
+ // A normal validator expands leaf arrays, such that if "a" is an array containing "xyz", it
+ // matches {a: "xyz"}. A JSON Schema validator does not expand leaf arrays and treats arrays
+ // as a single array value.
+ assertCorrectResult(coll.insert({a: ["xyz"]}));
+ assertCorrectResult(coll.insert({a: ["XYZ"]}));
+ assertCorrectResult(coll.insert({a: ["XyZ"], b: "foo"}));
+}
+
+runCollationValidationTest({a: "xyz"});
+runCollationValidationTest({$jsonSchema: {properties: {a: {enum: ["xyz"]}}}});
+
+// The validator is allowed to contain $expr.
+coll.drop();
+assert.commandWorked(db.createCollection(collName, {validator: {$expr: {$eq: ["$a", 5]}}}));
+assert.writeOK(coll.insert({a: 5}));
+assertFailsValidation(coll.insert({a: 4}));
+assert.commandWorked(db.runCommand({"collMod": collName, "validator": {$expr: {$eq: ["$a", 4]}}}));
+assert.writeOK(coll.insert({a: 4}));
+assertFailsValidation(coll.insert({a: 5}));
+
+// The validator supports $expr with the date extraction expressions (with a timezone
+// specified).
+coll.drop();
+assert.commandWorked(db.createCollection(
+ collName,
+ {validator: {$expr: {$eq: [1, {$dayOfMonth: {date: "$a", timezone: "America/New_York"}}]}}}));
+assert.writeOK(coll.insert({a: ISODate("2017-10-01T22:00:00")}));
+assertFailsValidation(coll.insert({a: ISODate("2017-10-01T00:00:00")}));
+
+// The validator supports $expr with a $dateToParts expression.
+coll.drop();
+assert.commandWorked(db.createCollection(collName, {
+ validator: {
+ $expr: {
+ $eq: [
+ {
+ "year": 2017,
+ "month": 10,
+ "day": 1,
+ "hour": 18,
+ "minute": 0,
+ "second": 0,
+ "millisecond": 0
+ },
+ {$dateToParts: {date: "$a", timezone: "America/New_York"}}
+ ]
+ }
+ }
+}));
+assert.writeOK(coll.insert({a: ISODate("2017-10-01T22:00:00")}));
+assertFailsValidation(coll.insert({a: ISODate("2017-10-01T00:00:00")}));
+
+// The validator supports $expr with $dateToString expression.
+coll.drop();
+assert.commandWorked(db.createCollection(collName, {
+ validator: {
+ $expr: {
+ $eq: [
+ "2017-07-04 14:56:42 +0000 (0 minutes)",
+ {
+ $dateToString: {
+ format: "%Y-%m-%d %H:%M:%S %z (%Z minutes)",
+ date: "$date",
+ timezone: "$tz"
+ }
+ }
+ ]
+ }
+ }
+}));
+assert.writeOK(coll.insert({date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "UTC"}));
+assertFailsValidation(
+ coll.insert({date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "America/New_York"}));
+
+// The validator supports $expr with $dateFromParts expression.
+coll.drop();
+assert.commandWorked(db.createCollection(collName, {
+ validator: {
+ $expr: {
+ $eq: [
+ ISODate("2016-12-31T15:00:00Z"),
+ {'$dateFromParts': {year: "$year", "timezone": "$timezone"}}
+ ]
+ }
+ }
+}));
+assert.writeOK(coll.insert({_id: 0, year: 2017, month: 6, day: 19, timezone: "Asia/Tokyo"}));
+assertFailsValidation(
+ coll.insert({_id: 1, year: 2022, month: 1, day: 1, timezone: "America/New_York"}));
+
+// The validator supports $expr with $dateFromString expression.
+coll.drop();
+assert.commandWorked(db.createCollection(collName, {
+ validator: {
+ $expr: {
+ $eq: [
+ ISODate("2017-07-04T15:56:02Z"),
+ {'$dateFromString': {dateString: "$date", timezone: 'America/New_York'}}
+ ]
+ }
+ }
+}));
+assert.writeOK(coll.insert({_id: 0, date: "2017-07-04T11:56:02"}));
+assertFailsValidation(coll.insert({_id: 1, date: "2015-02-02T11:00:00"}));
+
+// The validator can contain an $expr that may throw at runtime.
+coll.drop();
+assert.commandWorked(
+ db.createCollection(collName, {validator: {$expr: {$eq: ["$a", {$divide: [1, "$b"]}]}}}));
+assert.writeOK(coll.insert({a: 1, b: 1}));
+let res = coll.insert({a: 1, b: 0});
+assert.writeError(res);
+assert.eq(res.getWriteError().code, 16608);
+assert.writeOK(coll.insert({a: -1, b: -1}));
})();
diff --git a/jstests/core/doc_validation_invalid_validators.js b/jstests/core/doc_validation_invalid_validators.js
index 81d16ec5371..b09e2cb0a14 100644
--- a/jstests/core/doc_validation_invalid_validators.js
+++ b/jstests/core/doc_validation_invalid_validators.js
@@ -5,65 +5,62 @@
// Verify invalid validator statements won't work and that we
// can't create validated collections on restricted databases.
(function() {
- "use strict";
+"use strict";
- var collName = "doc_validation_invalid_validators";
- var coll = db[collName];
- coll.drop();
+var collName = "doc_validation_invalid_validators";
+var coll = db[collName];
+coll.drop();
- // Check a few invalid match statements for validator.
- assert.commandFailed(db.createCollection(collName, {validator: 7}));
- assert.commandFailed(db.createCollection(collName, {validator: "assert"}));
- assert.commandFailed(db.createCollection(collName, {validator: {$jsonSchema: {invalid: 1}}}));
+// Check a few invalid match statements for validator.
+assert.commandFailed(db.createCollection(collName, {validator: 7}));
+assert.commandFailed(db.createCollection(collName, {validator: "assert"}));
+assert.commandFailed(db.createCollection(collName, {validator: {$jsonSchema: {invalid: 1}}}));
- // Check some disallowed match statements.
- assert.commandFailed(db.createCollection(collName, {validator: {$text: "bob"}}));
- assert.commandFailed(db.createCollection(collName, {validator: {$where: "this.a == this.b"}}));
- assert.commandFailed(db.createCollection(collName, {validator: {$near: {place: "holder"}}}));
- assert.commandFailed(db.createCollection(collName, {validator: {$geoNear: {place: "holder"}}}));
- assert.commandFailed(
- db.createCollection(collName, {validator: {$nearSphere: {place: "holder"}}}));
- assert.commandFailed(
- db.createCollection(collName, {validator: {$expr: {$eq: ["$a", "$$unbound"]}}}));
+// Check some disallowed match statements.
+assert.commandFailed(db.createCollection(collName, {validator: {$text: "bob"}}));
+assert.commandFailed(db.createCollection(collName, {validator: {$where: "this.a == this.b"}}));
+assert.commandFailed(db.createCollection(collName, {validator: {$near: {place: "holder"}}}));
+assert.commandFailed(db.createCollection(collName, {validator: {$geoNear: {place: "holder"}}}));
+assert.commandFailed(db.createCollection(collName, {validator: {$nearSphere: {place: "holder"}}}));
+assert.commandFailed(
+ db.createCollection(collName, {validator: {$expr: {$eq: ["$a", "$$unbound"]}}}));
- // Verify we fail on admin, local and config databases.
- assert.commandFailed(
- db.getSiblingDB("admin").createCollection(collName, {validator: {a: {$exists: true}}}));
- if (!db.runCommand("isdbgrid").isdbgrid) {
- assert.commandFailed(
- db.getSiblingDB("local").createCollection(collName, {validator: {a: {$exists: true}}}));
- }
+// Verify we fail on admin, local and config databases.
+assert.commandFailed(
+ db.getSiblingDB("admin").createCollection(collName, {validator: {a: {$exists: true}}}));
+if (!db.runCommand("isdbgrid").isdbgrid) {
assert.commandFailed(
- db.getSiblingDB("config").createCollection(collName, {validator: {a: {$exists: true}}}));
+ db.getSiblingDB("local").createCollection(collName, {validator: {a: {$exists: true}}}));
+}
+assert.commandFailed(
+ db.getSiblingDB("config").createCollection(collName, {validator: {a: {$exists: true}}}));
- // Create collection with document validator.
- assert.commandWorked(db.createCollection(collName, {validator: {a: {$exists: true}}}));
+// Create collection with document validator.
+assert.commandWorked(db.createCollection(collName, {validator: {a: {$exists: true}}}));
- // Verify some invalid match statements can't be passed to collMod.
- assert.commandFailed(
- db.runCommand({"collMod": collName, "validator": {$text: {$search: "bob"}}}));
- assert.commandFailed(
- db.runCommand({"collMod": collName, "validator": {$where: "this.a == this.b"}}));
- assert.commandFailed(
- db.runCommand({"collMod": collName, "validator": {$near: {place: "holder"}}}));
- assert.commandFailed(
- db.runCommand({"collMod": collName, "validator": {$geoNear: {place: "holder"}}}));
- assert.commandFailed(
- db.runCommand({"collMod": collName, "validator": {$nearSphere: {place: "holder"}}}));
- assert.commandFailed(
- db.runCommand({"collMod": collName, "validator": {$expr: {$eq: ["$a", "$$unbound"]}}}));
- assert.commandFailed(
- db.runCommand({"collMod": collName, "validator": {$jsonSchema: {invalid: 7}}}));
+// Verify some invalid match statements can't be passed to collMod.
+assert.commandFailed(db.runCommand({"collMod": collName, "validator": {$text: {$search: "bob"}}}));
+assert.commandFailed(
+ db.runCommand({"collMod": collName, "validator": {$where: "this.a == this.b"}}));
+assert.commandFailed(db.runCommand({"collMod": collName, "validator": {$near: {place: "holder"}}}));
+assert.commandFailed(
+ db.runCommand({"collMod": collName, "validator": {$geoNear: {place: "holder"}}}));
+assert.commandFailed(
+ db.runCommand({"collMod": collName, "validator": {$nearSphere: {place: "holder"}}}));
+assert.commandFailed(
+ db.runCommand({"collMod": collName, "validator": {$expr: {$eq: ["$a", "$$unbound"]}}}));
+assert.commandFailed(
+ db.runCommand({"collMod": collName, "validator": {$jsonSchema: {invalid: 7}}}));
- coll.drop();
+coll.drop();
- // Create collection without document validator.
- assert.commandWorked(db.createCollection(collName));
+// Create collection without document validator.
+assert.commandWorked(db.createCollection(collName));
- // Verify we can't add an invalid validator to a collection without a validator.
- assert.commandFailed(db.runCommand({"collMod": collName, "validator": {$text: "bob"}}));
- assert.commandFailed(
- db.runCommand({"collMod": collName, "validator": {$where: "this.a == this.b"}}));
- assert.commandWorked(db.runCommand({"collMod": collName, "validator": {a: {$exists: true}}}));
- coll.drop();
+// Verify we can't add an invalid validator to a collection without a validator.
+assert.commandFailed(db.runCommand({"collMod": collName, "validator": {$text: "bob"}}));
+assert.commandFailed(
+ db.runCommand({"collMod": collName, "validator": {$where: "this.a == this.b"}}));
+assert.commandWorked(db.runCommand({"collMod": collName, "validator": {a: {$exists: true}}}));
+coll.drop();
})();
diff --git a/jstests/core/doc_validation_options.js b/jstests/core/doc_validation_options.js
index e9ba64d8029..50d8edfa671 100644
--- a/jstests/core/doc_validation_options.js
+++ b/jstests/core/doc_validation_options.js
@@ -3,56 +3,55 @@
// @tags: [assumes_no_implicit_collection_creation_after_drop, requires_non_retryable_commands]
(function() {
- "use strict";
+"use strict";
- function assertFailsValidation(res) {
- var DocumentValidationFailure = 121;
- assert.writeError(res);
- assert.eq(res.getWriteError().code, DocumentValidationFailure);
- }
+function assertFailsValidation(res) {
+ var DocumentValidationFailure = 121;
+ assert.writeError(res);
+ assert.eq(res.getWriteError().code, DocumentValidationFailure);
+}
- var t = db.doc_validation_options;
- t.drop();
+var t = db.doc_validation_options;
+t.drop();
- assert.commandWorked(db.createCollection(t.getName(), {validator: {a: 1}}));
+assert.commandWorked(db.createCollection(t.getName(), {validator: {a: 1}}));
- assertFailsValidation(t.insert({a: 2}));
- t.insert({a: 1});
- assert.eq(1, t.count());
+assertFailsValidation(t.insert({a: 2}));
+t.insert({a: 1});
+assert.eq(1, t.count());
- // test default to strict
- assertFailsValidation(t.update({}, {$set: {a: 2}}));
- assert.eq(1, t.find({a: 1}).itcount());
+// test default to strict
+assertFailsValidation(t.update({}, {$set: {a: 2}}));
+assert.eq(1, t.find({a: 1}).itcount());
- // check we can do a bad update in warn mode
- assert.commandWorked(t.runCommand("collMod", {validationAction: "warn"}));
- t.update({}, {$set: {a: 2}});
- assert.eq(1, t.find({a: 2}).itcount());
+// check we can do a bad update in warn mode
+assert.commandWorked(t.runCommand("collMod", {validationAction: "warn"}));
+t.update({}, {$set: {a: 2}});
+assert.eq(1, t.find({a: 2}).itcount());
- // TODO: check log for message?
+// TODO: check log for message?
- // make sure persisted
- var info = db.getCollectionInfos({name: t.getName()})[0];
- assert.eq("warn", info.options.validationAction, tojson(info));
+// make sure persisted
+var info = db.getCollectionInfos({name: t.getName()})[0];
+assert.eq("warn", info.options.validationAction, tojson(info));
- // check we can go back to enforce strict
- assert.commandWorked(
- t.runCommand("collMod", {validationAction: "error", validationLevel: "strict"}));
- assertFailsValidation(t.update({}, {$set: {a: 3}}));
- assert.eq(1, t.find({a: 2}).itcount());
+// check we can go back to enforce strict
+assert.commandWorked(
+ t.runCommand("collMod", {validationAction: "error", validationLevel: "strict"}));
+assertFailsValidation(t.update({}, {$set: {a: 3}}));
+assert.eq(1, t.find({a: 2}).itcount());
- // check bad -> bad is ok
- assert.commandWorked(t.runCommand("collMod", {validationLevel: "moderate"}));
- t.update({}, {$set: {a: 3}});
- assert.eq(1, t.find({a: 3}).itcount());
+// check bad -> bad is ok
+assert.commandWorked(t.runCommand("collMod", {validationLevel: "moderate"}));
+t.update({}, {$set: {a: 3}});
+assert.eq(1, t.find({a: 3}).itcount());
- // test create
- t.drop();
- assert.commandWorked(
- db.createCollection(t.getName(), {validator: {a: 1}, validationAction: "warn"}));
-
- t.insert({a: 2});
- t.insert({a: 1});
- assert.eq(2, t.count());
+// test create
+t.drop();
+assert.commandWorked(
+ db.createCollection(t.getName(), {validator: {a: 1}, validationAction: "warn"}));
+t.insert({a: 2});
+t.insert({a: 1});
+assert.eq(2, t.count());
})();
diff --git a/jstests/core/dotted_path_in_null.js b/jstests/core/dotted_path_in_null.js
index 31ffc11a562..f32f60a9ff2 100644
--- a/jstests/core/dotted_path_in_null.js
+++ b/jstests/core/dotted_path_in_null.js
@@ -1,23 +1,23 @@
(function() {
- "use strict";
+"use strict";
- const coll = db.dotted_path_in_null;
- coll.drop();
+const coll = db.dotted_path_in_null;
+coll.drop();
- assert.writeOK(coll.insert({_id: 1, a: [{b: 5}]}));
- assert.writeOK(coll.insert({_id: 2, a: [{}]}));
- assert.writeOK(coll.insert({_id: 3, a: []}));
- assert.writeOK(coll.insert({_id: 4, a: [{}, {b: 5}]}));
- assert.writeOK(coll.insert({_id: 5, a: [5, {b: 5}]}));
+assert.writeOK(coll.insert({_id: 1, a: [{b: 5}]}));
+assert.writeOK(coll.insert({_id: 2, a: [{}]}));
+assert.writeOK(coll.insert({_id: 3, a: []}));
+assert.writeOK(coll.insert({_id: 4, a: [{}, {b: 5}]}));
+assert.writeOK(coll.insert({_id: 5, a: [5, {b: 5}]}));
- function getIds(query) {
- let ids = [];
- coll.find(query).sort({_id: 1}).forEach(doc => ids.push(doc._id));
- return ids;
- }
+function getIds(query) {
+ let ids = [];
+ coll.find(query).sort({_id: 1}).forEach(doc => ids.push(doc._id));
+ return ids;
+}
- assert.eq([2, 4], getIds({"a.b": {$in: [null]}}), "Did not match the expected documents");
+assert.eq([2, 4], getIds({"a.b": {$in: [null]}}), "Did not match the expected documents");
- assert.commandWorked(coll.createIndex({"a.b": 1}));
- assert.eq([2, 4], getIds({"a.b": {$in: [null]}}), "Did not match the expected documents");
+assert.commandWorked(coll.createIndex({"a.b": 1}));
+assert.eq([2, 4], getIds({"a.b": {$in: [null]}}), "Did not match the expected documents");
}());
diff --git a/jstests/core/drop_index.js b/jstests/core/drop_index.js
index 8bce5608773..83e03c5a8fd 100644
--- a/jstests/core/drop_index.js
+++ b/jstests/core/drop_index.js
@@ -2,85 +2,85 @@
// collection.
// @tags: [assumes_no_implicit_index_creation]
(function() {
- 'use strict';
+'use strict';
- const t = db.drop_index;
- t.drop();
+const t = db.drop_index;
+t.drop();
- /**
- * Extracts index names from listIndexes result.
- */
- function getIndexNames(cmdRes) {
- return t.getIndexes().map(spec => spec.name);
- }
+/**
+ * Extracts index names from listIndexes result.
+ */
+function getIndexNames(cmdRes) {
+ return t.getIndexes().map(spec => spec.name);
+}
- /**
- * Checks that collection contains the given list of non-id indexes and nothing else.
- */
- function assertIndexes(expectedIndexNames, msg) {
- const actualIndexNames = getIndexNames();
- const testMsgSuffix = () => msg + ': expected ' + tojson(expectedIndexNames) + ' but got ' +
- tojson(actualIndexNames) + ' instead.';
- assert.eq(expectedIndexNames.length + 1,
- actualIndexNames.length,
- 'unexpected number of indexes after ' + testMsgSuffix());
- assert(actualIndexNames.includes('_id_'),
- '_id index missing after ' + msg + ': ' + tojson(actualIndexNames));
- for (let expectedIndexName of expectedIndexNames) {
- assert(actualIndexNames.includes(expectedIndexName),
- expectedIndexName + ' index missing after ' + testMsgSuffix());
- }
+/**
+ * Checks that collection contains the given list of non-id indexes and nothing else.
+ */
+function assertIndexes(expectedIndexNames, msg) {
+ const actualIndexNames = getIndexNames();
+ const testMsgSuffix = () => msg + ': expected ' + tojson(expectedIndexNames) + ' but got ' +
+ tojson(actualIndexNames) + ' instead.';
+ assert.eq(expectedIndexNames.length + 1,
+ actualIndexNames.length,
+ 'unexpected number of indexes after ' + testMsgSuffix());
+ assert(actualIndexNames.includes('_id_'),
+ '_id index missing after ' + msg + ': ' + tojson(actualIndexNames));
+ for (let expectedIndexName of expectedIndexNames) {
+ assert(actualIndexNames.includes(expectedIndexName),
+ expectedIndexName + ' index missing after ' + testMsgSuffix());
}
+}
- assert.writeOK(t.insert({_id: 1, a: 2, b: 3, c: 1, d: 1, e: 1}));
- assertIndexes([], 'inserting test document');
+assert.writeOK(t.insert({_id: 1, a: 2, b: 3, c: 1, d: 1, e: 1}));
+assertIndexes([], 'inserting test document');
- assert.commandWorked(t.createIndex({a: 1}));
- assert.commandWorked(t.createIndex({b: 1}));
- assert.commandWorked(t.createIndex({c: 1}));
- assert.commandWorked(t.createIndex({d: 1}));
- assert.commandWorked(t.createIndex({e: 1}));
- assertIndexes(['a_1', 'b_1', 'c_1', 'd_1', 'e_1'], 'creating indexes');
+assert.commandWorked(t.createIndex({a: 1}));
+assert.commandWorked(t.createIndex({b: 1}));
+assert.commandWorked(t.createIndex({c: 1}));
+assert.commandWorked(t.createIndex({d: 1}));
+assert.commandWorked(t.createIndex({e: 1}));
+assertIndexes(['a_1', 'b_1', 'c_1', 'd_1', 'e_1'], 'creating indexes');
- // Drop single index by name.
- // Collection.dropIndex() throws if the dropIndexes command fails.
- t.dropIndex(t._genIndexName({a: 1}));
- assertIndexes(['b_1', 'c_1', 'd_1', 'e_1'], 'dropping {a: 1} by name');
+// Drop single index by name.
+// Collection.dropIndex() throws if the dropIndexes command fails.
+t.dropIndex(t._genIndexName({a: 1}));
+assertIndexes(['b_1', 'c_1', 'd_1', 'e_1'], 'dropping {a: 1} by name');
- // Drop single index by key pattern.
- t.dropIndex({b: 1});
- assertIndexes(['c_1', 'd_1', 'e_1'], 'dropping {b: 1} by key pattern');
+// Drop single index by key pattern.
+t.dropIndex({b: 1});
+assertIndexes(['c_1', 'd_1', 'e_1'], 'dropping {b: 1} by key pattern');
- // Not allowed to drop _id index.
- assert.commandFailedWithCode(t.dropIndex('_id_'), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(t.dropIndex({_id: 1}), ErrorCodes.InvalidOptions);
+// Not allowed to drop _id index.
+assert.commandFailedWithCode(t.dropIndex('_id_'), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(t.dropIndex({_id: 1}), ErrorCodes.InvalidOptions);
- // Ensure you can recreate indexes, even if you don't use dropIndex method.
- // Prior to SERVER-7168, the shell used to cache names of indexes created using
- // Collection.ensureIndex().
- assert.commandWorked(t.createIndex({a: 1}));
- assertIndexes(['a_1', 'c_1', 'd_1', 'e_1'], 'recreating {a: 1}');
+// Ensure you can recreate indexes, even if you don't use dropIndex method.
+// Prior to SERVER-7168, the shell used to cache names of indexes created using
+// Collection.ensureIndex().
+assert.commandWorked(t.createIndex({a: 1}));
+assertIndexes(['a_1', 'c_1', 'd_1', 'e_1'], 'recreating {a: 1}');
- // Drop multiple indexes.
- assert.commandWorked(t.dropIndexes(['c_1', 'd_1']));
- assertIndexes(['a_1', 'e_1'], 'dropping {c: 1} and {d: 1}');
+// Drop multiple indexes.
+assert.commandWorked(t.dropIndexes(['c_1', 'd_1']));
+assertIndexes(['a_1', 'e_1'], 'dropping {c: 1} and {d: 1}');
- // Must drop all the indexes provided or none at all - for example, if one of the index names
- // provided is invalid.
- let ex = assert.throws(() => {
- t.dropIndexes(['a_1', '_id_']);
- });
- assert.commandFailedWithCode(ex, ErrorCodes.InvalidOptions);
- assertIndexes(['a_1', 'e_1'], 'failed dropIndexes command with _id index');
+// Must drop all the indexes provided or none at all - for example, if one of the index names
+// provided is invalid.
+let ex = assert.throws(() => {
+ t.dropIndexes(['a_1', '_id_']);
+});
+assert.commandFailedWithCode(ex, ErrorCodes.InvalidOptions);
+assertIndexes(['a_1', 'e_1'], 'failed dropIndexes command with _id index');
- // List of index names must contain only strings.
- ex = assert.throws(() => {
- t.dropIndexes(['a_1', 123]);
- });
- assert.commandFailedWithCode(ex, ErrorCodes.TypeMismatch);
- assertIndexes(['a_1', 'e_1'], 'failed dropIndexes command with non-string index name');
+// List of index names must contain only strings.
+ex = assert.throws(() => {
+ t.dropIndexes(['a_1', 123]);
+});
+assert.commandFailedWithCode(ex, ErrorCodes.TypeMismatch);
+assertIndexes(['a_1', 'e_1'], 'failed dropIndexes command with non-string index name');
- // Drop all indexes.
- assert.commandWorked(t.dropIndexes());
- assertIndexes([], 'dropping all indexes');
+// Drop all indexes.
+assert.commandWorked(t.dropIndexes());
+assertIndexes([], 'dropping all indexes');
}());
diff --git a/jstests/core/dropdb.js b/jstests/core/dropdb.js
index 1af56da34bf..1fd3fd10582 100644
--- a/jstests/core/dropdb.js
+++ b/jstests/core/dropdb.js
@@ -12,8 +12,8 @@ function check(shouldExist) {
var dbs = m.getDBNames();
assert.eq(Array.contains(dbs, baseName),
shouldExist,
- "DB " + baseName + " should " + (shouldExist ? "" : "not ") + "exist." + " dbs: " +
- tojson(dbs) + "\n" + tojson(m.getDBs()));
+ "DB " + baseName + " should " + (shouldExist ? "" : "not ") + "exist." +
+ " dbs: " + tojson(dbs) + "\n" + tojson(m.getDBs()));
}
ddb.c.save({});
diff --git a/jstests/core/elemMatchProjection.js b/jstests/core/elemMatchProjection.js
index f01b566d3b6..390b7aa5d17 100644
--- a/jstests/core/elemMatchProjection.js
+++ b/jstests/core/elemMatchProjection.js
@@ -2,247 +2,239 @@
// Tests for $elemMatch projections and $ positional operator projection.
(function() {
- "use strict";
-
- const coll = db.SERVER828Test;
- coll.drop();
-
- const date1 = new Date();
-
- // Generate monotonically increasing _id values. ObjectIds generated by the shell are not
- // guaranteed to be monotically increasing, and we will depend on the _id sort order later in
- // the test.
- let currentId = 0;
- function nextId() {
- return ++currentId;
- }
-
- // Insert various styles of arrays.
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 100; i++) {
- bulk.insert({_id: nextId(), group: 1, x: [1, 2, 3, 4, 5]});
- bulk.insert({_id: nextId(), group: 2, x: [{a: 1, b: 2}, {a: 2, c: 3}, {a: 1, d: 5}]});
- bulk.insert({
- _id: nextId(),
- group: 3,
- x: [{a: 1, b: 2}, {a: 2, c: 3}, {a: 1, d: 5}],
- y: [{aa: 1, bb: 2}, {aa: 2, cc: 3}, {aa: 1, dd: 5}]
- });
- bulk.insert({_id: nextId(), group: 3, x: [{a: 1, b: 3}, {a: -6, c: 3}]});
- bulk.insert({_id: nextId(), group: 4, x: [{a: 1, b: 4}, {a: -6, c: 3}]});
- bulk.insert(
- {_id: nextId(), group: 5, x: [new Date(), 5, 10, 'string', new ObjectId(), 123.456]});
- bulk.insert({
- _id: nextId(),
- group: 6,
- x: [
- {a: 'string', b: date1},
- {a: new ObjectId(), b: 1.2345},
- {a: 'string2', b: date1}
- ]
- });
- bulk.insert({_id: nextId(), group: 7, x: [{y: [1, 2, 3, 4]}]});
- bulk.insert({_id: nextId(), group: 8, x: [{y: [{a: 1, b: 2}, {a: 3, b: 4}]}]});
- bulk.insert({
- _id: nextId(),
- group: 9,
- x: [{y: [{a: 1, b: 2}, {a: 3, b: 4}]}, {z: [{a: 1, b: 2}, {a: 3, b: 4}]}]
- });
- bulk.insert({
- _id: nextId(),
- group: 10,
- x: [{a: 1, b: 2}, {a: 3, b: 4}],
- y: [{c: 1, d: 2}, {c: 3, d: 4}]
- });
- bulk.insert({
- _id: nextId(),
- group: 10,
- x: [{a: 1, b: 2}, {a: 3, b: 4}],
- y: [{c: 1, d: 2}, {c: 3, d: 4}]
- });
- bulk.insert({
- _id: nextId(),
- group: 11,
- x: [{a: 1, b: 2}, {a: 2, c: 3}, {a: 1, d: 5}],
- covered: [{aa: 1, bb: 2}, {aa: 2, cc: 3}, {aa: 1, dd: 5}]
- });
- bulk.insert({_id: nextId(), group: 12, x: {y: [{a: 1, b: 1}, {a: 1, b: 2}]}});
- bulk.insert({_id: nextId(), group: 13, x: [{a: 1, b: 1}, {a: 1, b: 2}]});
- bulk.insert({_id: nextId(), group: 13, x: [{a: 1, b: 2}, {a: 1, b: 1}]});
- }
- assert.writeOK(bulk.execute());
-
- assert.writeOK(coll.createIndex({group: 1, 'y.d': 1}));
- assert.writeOK(coll.createIndex({group: 1, covered: 1})); // for covered index test
-
- // Tests for the $-positional operator.
- assert.eq(1,
- coll.find({group: 3, 'x.a': 2}, {'x.$': 1}).sort({_id: 1}).toArray()[0].x.length,
- "single object match (array length match)");
-
- assert.eq(2,
- coll.find({group: 3, 'x.a': 1}, {'x.$': 1}).sort({_id: 1}).toArray()[0].x[0].b,
- "single object match first");
-
- assert.eq(undefined,
- coll.find({group: 3, 'x.a': 2}, {_id: 0, 'x.$': 1}).sort({_id: 1}).toArray()[0]._id,
- "single object match with filtered _id");
-
- assert.eq(1,
- coll.find({group: 3, 'x.a': 2}, {'x.$': 1}).sort({_id: 1}).toArray()[0].x.length,
- "sorted single object match with filtered _id (array length match)");
-
- assert.eq(1,
- coll.find({'group': 2, 'x': {'$elemMatch': {'a': 1, 'b': 2}}}, {'x.$': 1})
- .toArray()[0]
- .x.length,
- "single object match with elemMatch");
-
- assert.eq(1,
- coll.find({'group': 2, 'x': {'$elemMatch': {'a': 1, 'b': 2}}}, {'x.$': {'$slice': 1}})
- .toArray()[0]
- .x.length,
- "single object match with elemMatch and positive slice");
-
- assert.eq(
- 1,
- coll.find({'group': 2, 'x': {'$elemMatch': {'a': 1, 'b': 2}}}, {'x.$': {'$slice': -1}})
- .toArray()[0]
- .x.length,
- "single object match with elemMatch and negative slice");
-
- assert.eq(1,
- coll.find({'group': 12, 'x.y.a': 1}, {'x.y.$': 1}).toArray()[0].x.y.length,
- "single object match with two level dot notation");
-
- assert.eq(1,
- coll.find({group: 3, 'x.a': 2}, {'x.$': 1}).sort({x: 1}).toArray()[0].x.length,
- "sorted object match (array length match)");
-
- assert.eq({aa: 1, dd: 5},
- coll.find({group: 3, 'y.dd': 5}, {'y.$': 1}).sort({_id: 1}).toArray()[0].y[0],
- "single object match (value match)");
-
- assert.throws(function() {
- coll.find({group: 3, 'x.a': 2}, {'y.$': 1}).toArray();
- }, [], "throw on invalid projection (field mismatch)");
-
- assert.throws(function() {
- coll.find({group: 3, 'x.a': 2}, {'y.$': 1}).sort({x: 1}).toArray();
- }, [], "throw on invalid sorted projection (field mismatch)");
-
- assert.throws(function() {
- coll.find({group: 3, 'x.a': 2}, {'x.$': 1, group: 0}).sort({x: 1}).toArray();
- }, [], "throw on invalid projection combination (include and exclude)");
-
- assert.throws(function() {
- coll.find({group: 3, 'x.a': 1, 'y.aa': 1}, {'x.$': 1, 'y.$': 1}).toArray();
- }, [], "throw on multiple projections");
-
- assert.throws(function() {
- coll.find({group: 3}, {'g.$': 1}).toArray();
- }, [], "throw on invalid projection (non-array field)");
-
- assert.eq({aa: 1, dd: 5},
- coll.find({group: 11, 'covered.dd': 5}, {'covered.$': 1}).toArray()[0].covered[0],
- "single object match (covered index)");
-
- assert.eq({aa: 1, dd: 5},
- coll.find({group: 11, 'covered.dd': 5}, {'covered.$': 1})
- .sort({covered: 1})
- .toArray()[0]
- .covered[0],
- "single object match (sorted covered index)");
-
- assert.eq(1,
- coll.find({group: 10, 'y.d': 4}, {'y.$': 1}).sort({_id: 1}).toArray()[0].y.length,
- "single object match (regular index");
-
- // Tests for $elemMatch projection.
- assert.eq(-6,
- coll.find({group: 4}, {x: {$elemMatch: {a: -6}}}).toArray()[0].x[0].a,
- "single object match");
-
- assert.eq(1,
- coll.find({group: 4}, {x: {$elemMatch: {a: -6}}}).toArray()[0].x.length,
- "filters non-matching array elements");
-
- assert.eq(1,
- coll.find({group: 4}, {x: {$elemMatch: {a: -6, c: 3}}}).toArray()[0].x.length,
- "filters non-matching array elements with multiple elemMatch criteria");
-
- assert.eq(
- 1,
- coll.find({group: 13}, {'x': {'$elemMatch': {a: {$gt: 0, $lt: 2}}}})
- .sort({_id: 1})
- .toArray()[0]
- .x.length,
- "filters non-matching array elements with multiple criteria for a single element in the array");
-
- assert.eq(
- 3,
- coll.find({group: 4}, {x: {$elemMatch: {a: {$lt: 1}}}}).sort({_id: 1}).toArray()[0].x[0].c,
- "object operator match");
-
- assert.eq([4],
- coll.find({group: 1}, {x: {$elemMatch: {$in: [100, 4, -123]}}}).toArray()[0].x,
- "$in number match");
-
- assert.eq([{a: 1, b: 2}],
- coll.find({group: 2}, {x: {$elemMatch: {a: {$in: [1]}}}}).toArray()[0].x,
- "$in number match");
-
- assert.eq([1],
- coll.find({group: 1}, {x: {$elemMatch: {$nin: [4, 5, 6]}}}).toArray()[0].x,
- "$nin number match");
-
- assert.eq([1],
- coll.find({group: 1}, {x: {$elemMatch: {$all: [1]}}}).toArray()[0].x,
- "$in number match");
-
- assert.eq([{a: 'string', b: date1}],
- coll.find({group: 6}, {x: {$elemMatch: {a: 'string'}}}).toArray()[0].x,
- "mixed object match on string eq");
-
- assert.eq([{a: 'string2', b: date1}],
- coll.find({group: 6}, {x: {$elemMatch: {a: /ring2/}}}).toArray()[0].x,
- "mixed object match on regexp");
-
- assert.eq([{a: 'string', b: date1}],
- coll.find({group: 6}, {x: {$elemMatch: {a: {$type: 2}}}}).toArray()[0].x,
- "mixed object match on type");
-
- assert.eq([{a: 2, c: 3}],
- coll.find({group: 2}, {x: {$elemMatch: {a: {$ne: 1}}}}).toArray()[0].x,
- "mixed object match on ne");
-
- assert.eq([{a: 1, d: 5}],
- coll.find({group: 3}, {x: {$elemMatch: {d: {$exists: true}}}})
- .sort({_id: 1})
- .toArray()[0]
- .x,
- "mixed object match on exists");
-
- assert.eq(
- [{a: 2, c: 3}],
- coll.find({group: 3}, {x: {$elemMatch: {a: {$mod: [2, 0]}}}}).sort({_id: 1}).toArray()[0].x,
- "mixed object match on mod");
-
- assert.eq({"x": [{"a": 1, "b": 2}], "y": [{"c": 3, "d": 4}]},
- coll.find({group: 10}, {_id: 0, x: {$elemMatch: {a: 1}}, y: {$elemMatch: {c: 3}}})
- .sort({_id: 1})
- .toArray()[0],
- "multiple $elemMatch on unique fields 1");
-
- // Tests involving getMore. Test the $-positional operator across multiple batches.
- let a = coll.find({group: 3, 'x.b': 2}, {'x.$': 1}).sort({_id: 1}).batchSize(1);
- while (a.hasNext()) {
- assert.eq(2, a.next().x[0].b, "positional getMore test");
- }
-
- // Test the $elemMatch operator across multiple batches.
- a = coll.find({group: 3}, {x: {$elemMatch: {a: 1}}}).sort({_id: 1}).batchSize(1);
- while (a.hasNext()) {
- assert.eq(1, a.next().x[0].a, "positional getMore test");
- }
+"use strict";
+
+const coll = db.SERVER828Test;
+coll.drop();
+
+const date1 = new Date();
+
+// Generate monotonically increasing _id values. ObjectIds generated by the shell are not
+// guaranteed to be monotically increasing, and we will depend on the _id sort order later in
+// the test.
+let currentId = 0;
+function nextId() {
+ return ++currentId;
+}
+
+// Insert various styles of arrays.
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < 100; i++) {
+ bulk.insert({_id: nextId(), group: 1, x: [1, 2, 3, 4, 5]});
+ bulk.insert({_id: nextId(), group: 2, x: [{a: 1, b: 2}, {a: 2, c: 3}, {a: 1, d: 5}]});
+ bulk.insert({
+ _id: nextId(),
+ group: 3,
+ x: [{a: 1, b: 2}, {a: 2, c: 3}, {a: 1, d: 5}],
+ y: [{aa: 1, bb: 2}, {aa: 2, cc: 3}, {aa: 1, dd: 5}]
+ });
+ bulk.insert({_id: nextId(), group: 3, x: [{a: 1, b: 3}, {a: -6, c: 3}]});
+ bulk.insert({_id: nextId(), group: 4, x: [{a: 1, b: 4}, {a: -6, c: 3}]});
+ bulk.insert(
+ {_id: nextId(), group: 5, x: [new Date(), 5, 10, 'string', new ObjectId(), 123.456]});
+ bulk.insert({
+ _id: nextId(),
+ group: 6,
+ x: [{a: 'string', b: date1}, {a: new ObjectId(), b: 1.2345}, {a: 'string2', b: date1}]
+ });
+ bulk.insert({_id: nextId(), group: 7, x: [{y: [1, 2, 3, 4]}]});
+ bulk.insert({_id: nextId(), group: 8, x: [{y: [{a: 1, b: 2}, {a: 3, b: 4}]}]});
+ bulk.insert({
+ _id: nextId(),
+ group: 9,
+ x: [{y: [{a: 1, b: 2}, {a: 3, b: 4}]}, {z: [{a: 1, b: 2}, {a: 3, b: 4}]}]
+ });
+ bulk.insert({
+ _id: nextId(),
+ group: 10,
+ x: [{a: 1, b: 2}, {a: 3, b: 4}],
+ y: [{c: 1, d: 2}, {c: 3, d: 4}]
+ });
+ bulk.insert({
+ _id: nextId(),
+ group: 10,
+ x: [{a: 1, b: 2}, {a: 3, b: 4}],
+ y: [{c: 1, d: 2}, {c: 3, d: 4}]
+ });
+ bulk.insert({
+ _id: nextId(),
+ group: 11,
+ x: [{a: 1, b: 2}, {a: 2, c: 3}, {a: 1, d: 5}],
+ covered: [{aa: 1, bb: 2}, {aa: 2, cc: 3}, {aa: 1, dd: 5}]
+ });
+ bulk.insert({_id: nextId(), group: 12, x: {y: [{a: 1, b: 1}, {a: 1, b: 2}]}});
+ bulk.insert({_id: nextId(), group: 13, x: [{a: 1, b: 1}, {a: 1, b: 2}]});
+ bulk.insert({_id: nextId(), group: 13, x: [{a: 1, b: 2}, {a: 1, b: 1}]});
+}
+assert.writeOK(bulk.execute());
+
+assert.writeOK(coll.createIndex({group: 1, 'y.d': 1}));
+assert.writeOK(coll.createIndex({group: 1, covered: 1})); // for covered index test
+
+// Tests for the $-positional operator.
+assert.eq(1,
+ coll.find({group: 3, 'x.a': 2}, {'x.$': 1}).sort({_id: 1}).toArray()[0].x.length,
+ "single object match (array length match)");
+
+assert.eq(2,
+ coll.find({group: 3, 'x.a': 1}, {'x.$': 1}).sort({_id: 1}).toArray()[0].x[0].b,
+ "single object match first");
+
+assert.eq(undefined,
+ coll.find({group: 3, 'x.a': 2}, {_id: 0, 'x.$': 1}).sort({_id: 1}).toArray()[0]._id,
+ "single object match with filtered _id");
+
+assert.eq(1,
+ coll.find({group: 3, 'x.a': 2}, {'x.$': 1}).sort({_id: 1}).toArray()[0].x.length,
+ "sorted single object match with filtered _id (array length match)");
+
+assert.eq(1,
+ coll.find({'group': 2, 'x': {'$elemMatch': {'a': 1, 'b': 2}}}, {'x.$': 1})
+ .toArray()[0]
+ .x.length,
+ "single object match with elemMatch");
+
+assert.eq(1,
+ coll.find({'group': 2, 'x': {'$elemMatch': {'a': 1, 'b': 2}}}, {'x.$': {'$slice': 1}})
+ .toArray()[0]
+ .x.length,
+ "single object match with elemMatch and positive slice");
+
+assert.eq(1,
+ coll.find({'group': 2, 'x': {'$elemMatch': {'a': 1, 'b': 2}}}, {'x.$': {'$slice': -1}})
+ .toArray()[0]
+ .x.length,
+ "single object match with elemMatch and negative slice");
+
+assert.eq(1,
+ coll.find({'group': 12, 'x.y.a': 1}, {'x.y.$': 1}).toArray()[0].x.y.length,
+ "single object match with two level dot notation");
+
+assert.eq(1,
+ coll.find({group: 3, 'x.a': 2}, {'x.$': 1}).sort({x: 1}).toArray()[0].x.length,
+ "sorted object match (array length match)");
+
+assert.eq({aa: 1, dd: 5},
+ coll.find({group: 3, 'y.dd': 5}, {'y.$': 1}).sort({_id: 1}).toArray()[0].y[0],
+ "single object match (value match)");
+
+assert.throws(function() {
+ coll.find({group: 3, 'x.a': 2}, {'y.$': 1}).toArray();
+}, [], "throw on invalid projection (field mismatch)");
+
+assert.throws(function() {
+ coll.find({group: 3, 'x.a': 2}, {'y.$': 1}).sort({x: 1}).toArray();
+}, [], "throw on invalid sorted projection (field mismatch)");
+
+assert.throws(function() {
+ coll.find({group: 3, 'x.a': 2}, {'x.$': 1, group: 0}).sort({x: 1}).toArray();
+}, [], "throw on invalid projection combination (include and exclude)");
+
+assert.throws(function() {
+ coll.find({group: 3, 'x.a': 1, 'y.aa': 1}, {'x.$': 1, 'y.$': 1}).toArray();
+}, [], "throw on multiple projections");
+
+assert.throws(function() {
+ coll.find({group: 3}, {'g.$': 1}).toArray();
+}, [], "throw on invalid projection (non-array field)");
+
+assert.eq({aa: 1, dd: 5},
+ coll.find({group: 11, 'covered.dd': 5}, {'covered.$': 1}).toArray()[0].covered[0],
+ "single object match (covered index)");
+
+assert.eq({aa: 1, dd: 5},
+ coll.find({group: 11, 'covered.dd': 5}, {'covered.$': 1})
+ .sort({covered: 1})
+ .toArray()[0]
+ .covered[0],
+ "single object match (sorted covered index)");
+
+assert.eq(1,
+ coll.find({group: 10, 'y.d': 4}, {'y.$': 1}).sort({_id: 1}).toArray()[0].y.length,
+ "single object match (regular index");
+
+// Tests for $elemMatch projection.
+assert.eq(-6,
+ coll.find({group: 4}, {x: {$elemMatch: {a: -6}}}).toArray()[0].x[0].a,
+ "single object match");
+
+assert.eq(1,
+ coll.find({group: 4}, {x: {$elemMatch: {a: -6}}}).toArray()[0].x.length,
+ "filters non-matching array elements");
+
+assert.eq(1,
+ coll.find({group: 4}, {x: {$elemMatch: {a: -6, c: 3}}}).toArray()[0].x.length,
+ "filters non-matching array elements with multiple elemMatch criteria");
+
+assert.eq(
+ 1,
+ coll.find({group: 13}, {'x': {'$elemMatch': {a: {$gt: 0, $lt: 2}}}})
+ .sort({_id: 1})
+ .toArray()[0]
+ .x.length,
+ "filters non-matching array elements with multiple criteria for a single element in the array");
+
+assert.eq(
+ 3,
+ coll.find({group: 4}, {x: {$elemMatch: {a: {$lt: 1}}}}).sort({_id: 1}).toArray()[0].x[0].c,
+ "object operator match");
+
+assert.eq([4],
+ coll.find({group: 1}, {x: {$elemMatch: {$in: [100, 4, -123]}}}).toArray()[0].x,
+ "$in number match");
+
+assert.eq([{a: 1, b: 2}],
+ coll.find({group: 2}, {x: {$elemMatch: {a: {$in: [1]}}}}).toArray()[0].x,
+ "$in number match");
+
+assert.eq([1],
+ coll.find({group: 1}, {x: {$elemMatch: {$nin: [4, 5, 6]}}}).toArray()[0].x,
+ "$nin number match");
+
+assert.eq(
+ [1], coll.find({group: 1}, {x: {$elemMatch: {$all: [1]}}}).toArray()[0].x, "$in number match");
+
+assert.eq([{a: 'string', b: date1}],
+ coll.find({group: 6}, {x: {$elemMatch: {a: 'string'}}}).toArray()[0].x,
+ "mixed object match on string eq");
+
+assert.eq([{a: 'string2', b: date1}],
+ coll.find({group: 6}, {x: {$elemMatch: {a: /ring2/}}}).toArray()[0].x,
+ "mixed object match on regexp");
+
+assert.eq([{a: 'string', b: date1}],
+ coll.find({group: 6}, {x: {$elemMatch: {a: {$type: 2}}}}).toArray()[0].x,
+ "mixed object match on type");
+
+assert.eq([{a: 2, c: 3}],
+ coll.find({group: 2}, {x: {$elemMatch: {a: {$ne: 1}}}}).toArray()[0].x,
+ "mixed object match on ne");
+
+assert.eq(
+ [{a: 1, d: 5}],
+ coll.find({group: 3}, {x: {$elemMatch: {d: {$exists: true}}}}).sort({_id: 1}).toArray()[0].x,
+ "mixed object match on exists");
+
+assert.eq(
+ [{a: 2, c: 3}],
+ coll.find({group: 3}, {x: {$elemMatch: {a: {$mod: [2, 0]}}}}).sort({_id: 1}).toArray()[0].x,
+ "mixed object match on mod");
+
+assert.eq({"x": [{"a": 1, "b": 2}], "y": [{"c": 3, "d": 4}]},
+ coll.find({group: 10}, {_id: 0, x: {$elemMatch: {a: 1}}, y: {$elemMatch: {c: 3}}})
+ .sort({_id: 1})
+ .toArray()[0],
+ "multiple $elemMatch on unique fields 1");
+
+// Tests involving getMore. Test the $-positional operator across multiple batches.
+let a = coll.find({group: 3, 'x.b': 2}, {'x.$': 1}).sort({_id: 1}).batchSize(1);
+while (a.hasNext()) {
+ assert.eq(2, a.next().x[0].b, "positional getMore test");
+}
+
+// Test the $elemMatch operator across multiple batches.
+a = coll.find({group: 3}, {x: {$elemMatch: {a: 1}}}).sort({_id: 1}).batchSize(1);
+while (a.hasNext()) {
+ assert.eq(1, a.next().x[0].a, "positional getMore test");
+}
}());
diff --git a/jstests/core/elemmatch_or_pushdown.js b/jstests/core/elemmatch_or_pushdown.js
index b9a6d5bcc41..86888996b19 100644
--- a/jstests/core/elemmatch_or_pushdown.js
+++ b/jstests/core/elemmatch_or_pushdown.js
@@ -3,82 +3,84 @@
* SERVER-38164.
*/
(function() {
- "use strict";
+"use strict";
- const coll = db.elemmatch_or_pushdown;
- coll.drop();
+const coll = db.elemmatch_or_pushdown;
+coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: 1, b: [{c: 4}]}));
- assert.writeOK(coll.insert({_id: 1, a: 2, b: [{c: 4}]}));
- assert.writeOK(coll.insert({_id: 2, a: 2, b: [{c: 5}]}));
- assert.writeOK(coll.insert({_id: 3, a: 1, b: [{c: 5}]}));
- assert.writeOK(coll.insert({_id: 4, a: 1, b: [{c: 6}]}));
- assert.writeOK(coll.insert({_id: 5, a: 1, b: [{c: 7}]}));
- assert.commandWorked(coll.createIndex({a: 1, "b.c": 1}));
+assert.writeOK(coll.insert({_id: 0, a: 1, b: [{c: 4}]}));
+assert.writeOK(coll.insert({_id: 1, a: 2, b: [{c: 4}]}));
+assert.writeOK(coll.insert({_id: 2, a: 2, b: [{c: 5}]}));
+assert.writeOK(coll.insert({_id: 3, a: 1, b: [{c: 5}]}));
+assert.writeOK(coll.insert({_id: 4, a: 1, b: [{c: 6}]}));
+assert.writeOK(coll.insert({_id: 5, a: 1, b: [{c: 7}]}));
+assert.commandWorked(coll.createIndex({a: 1, "b.c": 1}));
- assert.eq(coll.find({a: 1, b: {$elemMatch: {$or: [{c: 4}, {c: 5}]}}}).sort({_id: 1}).toArray(),
- [{_id: 0, a: 1, b: [{c: 4}]}, {_id: 3, a: 1, b: [{c: 5}]}]);
- assert.eq(coll.find({a: 1, $or: [{a: 2}, {b: {$elemMatch: {$or: [{c: 4}, {c: 5}]}}}]})
- .sort({_id: 1})
- .toArray(),
- [{_id: 0, a: 1, b: [{c: 4}]}, {_id: 3, a: 1, b: [{c: 5}]}]);
+assert.eq(coll.find({a: 1, b: {$elemMatch: {$or: [{c: 4}, {c: 5}]}}}).sort({_id: 1}).toArray(),
+ [{_id: 0, a: 1, b: [{c: 4}]}, {_id: 3, a: 1, b: [{c: 5}]}]);
+assert.eq(coll.find({a: 1, $or: [{a: 2}, {b: {$elemMatch: {$or: [{c: 4}, {c: 5}]}}}]})
+ .sort({_id: 1})
+ .toArray(),
+ [{_id: 0, a: 1, b: [{c: 4}]}, {_id: 3, a: 1, b: [{c: 5}]}]);
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: 5, b: [{c: [{f: 8}], d: 6}]}));
- assert.writeOK(coll.insert({_id: 1, a: 4, b: [{c: [{f: 8}], d: 6}]}));
- assert.writeOK(coll.insert({_id: 2, a: 5, b: [{c: [{f: 8}], d: 7}]}));
- assert.writeOK(coll.insert({_id: 3, a: 4, b: [{c: [{f: 9}], d: 6}]}));
- assert.writeOK(coll.insert({_id: 4, a: 5, b: [{c: [{f: 8}], e: 7}]}));
- assert.writeOK(coll.insert({_id: 5, a: 4, b: [{c: [{f: 8}], e: 7}]}));
- assert.writeOK(coll.insert({_id: 6, a: 5, b: [{c: [{f: 8}], e: 8}]}));
- assert.writeOK(coll.insert({_id: 7, a: 5, b: [{c: [{f: 9}], e: 7}]}));
- assert.commandWorked(coll.createIndex({"b.d": 1, "b.c.f": 1}));
- assert.commandWorked(coll.createIndex({"b.e": 1, "b.c.f": 1}));
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: 5, b: [{c: [{f: 8}], d: 6}]}));
+assert.writeOK(coll.insert({_id: 1, a: 4, b: [{c: [{f: 8}], d: 6}]}));
+assert.writeOK(coll.insert({_id: 2, a: 5, b: [{c: [{f: 8}], d: 7}]}));
+assert.writeOK(coll.insert({_id: 3, a: 4, b: [{c: [{f: 9}], d: 6}]}));
+assert.writeOK(coll.insert({_id: 4, a: 5, b: [{c: [{f: 8}], e: 7}]}));
+assert.writeOK(coll.insert({_id: 5, a: 4, b: [{c: [{f: 8}], e: 7}]}));
+assert.writeOK(coll.insert({_id: 6, a: 5, b: [{c: [{f: 8}], e: 8}]}));
+assert.writeOK(coll.insert({_id: 7, a: 5, b: [{c: [{f: 9}], e: 7}]}));
+assert.commandWorked(coll.createIndex({"b.d": 1, "b.c.f": 1}));
+assert.commandWorked(coll.createIndex({"b.e": 1, "b.c.f": 1}));
- assert.eq(coll.find({a: 5, b: {$elemMatch: {c: {$elemMatch: {f: 8}}, $or: [{d: 6}, {e: 7}]}}})
- .sort({_id: 1})
- .toArray(),
- [{_id: 0, a: 5, b: [{c: [{f: 8}], d: 6}]}, {_id: 4, a: 5, b: [{c: [{f: 8}], e: 7}]}]);
+assert.eq(coll.find({a: 5, b: {$elemMatch: {c: {$elemMatch: {f: 8}}, $or: [{d: 6}, {e: 7}]}}})
+ .sort({_id: 1})
+ .toArray(),
+ [{_id: 0, a: 5, b: [{c: [{f: 8}], d: 6}]}, {_id: 4, a: 5, b: [{c: [{f: 8}], e: 7}]}]);
- // Test that $not predicates in $elemMatch can be pushed into an $or sibling of the $elemMatch.
- coll.drop();
- assert.commandWorked(coll.insert({_id: 0, arr: [{a: 0, b: 2}], c: 4, d: 5}));
- assert.commandWorked(coll.insert({_id: 1, arr: [{a: 1, b: 2}], c: 4, d: 5}));
- assert.commandWorked(coll.insert({_id: 2, arr: [{a: 0, b: 3}], c: 4, d: 5}));
- assert.commandWorked(coll.insert({_id: 3, arr: [{a: 1, b: 3}], c: 4, d: 5}));
- assert.commandWorked(coll.insert({_id: 4, arr: [{a: 0, b: 2}], c: 6, d: 7}));
- assert.commandWorked(coll.insert({_id: 5, arr: [{a: 1, b: 2}], c: 6, d: 7}));
- assert.commandWorked(coll.insert({_id: 6, arr: [{a: 0, b: 3}], c: 6, d: 7}));
- assert.commandWorked(coll.insert({_id: 7, arr: [{a: 1, b: 3}], c: 6, d: 7}));
+// Test that $not predicates in $elemMatch can be pushed into an $or sibling of the $elemMatch.
+coll.drop();
+assert.commandWorked(coll.insert({_id: 0, arr: [{a: 0, b: 2}], c: 4, d: 5}));
+assert.commandWorked(coll.insert({_id: 1, arr: [{a: 1, b: 2}], c: 4, d: 5}));
+assert.commandWorked(coll.insert({_id: 2, arr: [{a: 0, b: 3}], c: 4, d: 5}));
+assert.commandWorked(coll.insert({_id: 3, arr: [{a: 1, b: 3}], c: 4, d: 5}));
+assert.commandWorked(coll.insert({_id: 4, arr: [{a: 0, b: 2}], c: 6, d: 7}));
+assert.commandWorked(coll.insert({_id: 5, arr: [{a: 1, b: 2}], c: 6, d: 7}));
+assert.commandWorked(coll.insert({_id: 6, arr: [{a: 0, b: 3}], c: 6, d: 7}));
+assert.commandWorked(coll.insert({_id: 7, arr: [{a: 1, b: 3}], c: 6, d: 7}));
- const keyPattern = {"arr.a": 1, "arr.b": 1, c: 1, d: 1};
- assert.commandWorked(coll.createIndex(keyPattern));
+const keyPattern = {
+ "arr.a": 1,
+ "arr.b": 1,
+ c: 1,
+ d: 1
+};
+assert.commandWorked(coll.createIndex(keyPattern));
- const elemMatchOr = {
- arr: {$elemMatch: {a: {$ne: 1}, $or: [{b: 2}, {b: 3}]}},
- $or: [
- {c: 4, d: 5},
- {c: 6, d: 7},
- ],
- };
+const elemMatchOr = {
+ arr: {$elemMatch: {a: {$ne: 1}, $or: [{b: 2}, {b: 3}]}},
+ $or: [
+ {c: 4, d: 5},
+ {c: 6, d: 7},
+ ],
+};
- // Confirm that we get the same results using the index and a COLLSCAN.
- for (let hint of[keyPattern, {$natural: 1}]) {
- assert.eq(coll.find(elemMatchOr, {_id: 1}).sort({_id: 1}).hint(hint).toArray(),
- [{_id: 0}, {_id: 2}, {_id: 4}, {_id: 6}]);
+// Confirm that we get the same results using the index and a COLLSCAN.
+for (let hint of [keyPattern, {$natural: 1}]) {
+ assert.eq(coll.find(elemMatchOr, {_id: 1}).sort({_id: 1}).hint(hint).toArray(),
+ [{_id: 0}, {_id: 2}, {_id: 4}, {_id: 6}]);
- assert.eq(
- coll.aggregate(
- [
- {
- $match:
- {arr: {$elemMatch: {a: {$ne: 1}}}, $or: [{c: 4, d: 5}, {c: 6, d: 7}]}
- },
- {$project: {_id: 1}},
- {$sort: {_id: 1}}
- ],
- {hint: hint})
- .toArray(),
- [{_id: 0}, {_id: 2}, {_id: 4}, {_id: 6}]);
- }
+ assert.eq(
+ coll.aggregate(
+ [
+ {$match: {arr: {$elemMatch: {a: {$ne: 1}}}, $or: [{c: 4, d: 5}, {c: 6, d: 7}]}},
+ {$project: {_id: 1}},
+ {$sort: {_id: 1}}
+ ],
+ {hint: hint})
+ .toArray(),
+ [{_id: 0}, {_id: 2}, {_id: 4}, {_id: 6}]);
+}
}());
diff --git a/jstests/core/ensure_sorted.js b/jstests/core/ensure_sorted.js
index c2d29728c59..037eda45c19 100644
--- a/jstests/core/ensure_sorted.js
+++ b/jstests/core/ensure_sorted.js
@@ -6,25 +6,25 @@
// SERVER-17011 Tests whether queries which specify sort and batch size can generate results out of
// order due to the ntoreturn hack. The EnsureSortedStage should solve this problem.
(function() {
- 'use strict';
- var coll = db.ensure_sorted;
+'use strict';
+var coll = db.ensure_sorted;
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1, b: 1}));
- assert.writeOK(coll.insert({a: 1, b: 4}));
- assert.writeOK(coll.insert({a: 2, b: 3}));
- assert.writeOK(coll.insert({a: 3, b: 2}));
- assert.writeOK(coll.insert({a: 4, b: 1}));
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+assert.writeOK(coll.insert({a: 1, b: 4}));
+assert.writeOK(coll.insert({a: 2, b: 3}));
+assert.writeOK(coll.insert({a: 3, b: 2}));
+assert.writeOK(coll.insert({a: 4, b: 1}));
- var cursor = coll.find({a: {$lt: 5}}).sort({b: -1}).batchSize(2);
- cursor.next(); // {a: 1, b: 4}.
- cursor.next(); // {a: 2, b: 3}.
+var cursor = coll.find({a: {$lt: 5}}).sort({b: -1}).batchSize(2);
+cursor.next(); // {a: 1, b: 4}.
+cursor.next(); // {a: 2, b: 3}.
- assert.writeOK(coll.update({b: 2}, {$set: {b: 5}}));
- var result = cursor.next();
+assert.writeOK(coll.update({b: 2}, {$set: {b: 5}}));
+var result = cursor.next();
- // We might either drop the document where "b" is 2 from the result set, or we might include the
- // old version of this document (before the update is applied). Either is acceptable, but
- // out-of-order results are unacceptable.
- assert(result.b === 2 || result.b === 1, "cursor returned: " + printjson(result));
+// We might either drop the document where "b" is 2 from the result set, or we might include the
+// old version of this document (before the update is applied). Either is acceptable, but
+// out-of-order results are unacceptable.
+assert(result.b === 2 || result.b === 1, "cursor returned: " + printjson(result));
})();
diff --git a/jstests/core/exhaust.js b/jstests/core/exhaust.js
index fe76916ee95..125c70cefe8 100644
--- a/jstests/core/exhaust.js
+++ b/jstests/core/exhaust.js
@@ -1,26 +1,25 @@
// @tags: [requires_getmore]
(function() {
- 'use strict';
+'use strict';
- var c = db.exhaustColl;
- c.drop();
+var c = db.exhaustColl;
+c.drop();
- const docCount = 4;
- for (var i = 0; i < docCount; i++) {
- assert.writeOK(c.insert({a: i}));
- }
+const docCount = 4;
+for (var i = 0; i < docCount; i++) {
+ assert.writeOK(c.insert({a: i}));
+}
- // Check that the query works without exhaust set
- assert.eq(c.find().batchSize(1).itcount(), docCount);
-
- // Now try to run the same query with exhaust
- try {
- assert.eq(c.find().batchSize(1).addOption(DBQuery.Option.exhaust).itcount(), docCount);
- } catch (e) {
- // The exhaust option is not valid against mongos, ensure that this query throws the right
- // code
- assert.eq(e.code, 18526, () => tojson(e));
- }
+// Check that the query works without exhaust set
+assert.eq(c.find().batchSize(1).itcount(), docCount);
+// Now try to run the same query with exhaust
+try {
+ assert.eq(c.find().batchSize(1).addOption(DBQuery.Option.exhaust).itcount(), docCount);
+} catch (e) {
+ // The exhaust option is not valid against mongos, ensure that this query throws the right
+ // code
+ assert.eq(e.code, 18526, () => tojson(e));
+}
}());
diff --git a/jstests/core/existsa.js b/jstests/core/existsa.js
index d98fd3f2d68..66d0ded50d4 100644
--- a/jstests/core/existsa.js
+++ b/jstests/core/existsa.js
@@ -2,110 +2,110 @@
* Tests that sparse indexes are disallowed for $exists:false queries.
*/
(function() {
- "use strict";
-
- const coll = db.jstests_existsa;
- coll.drop();
-
- assert.writeOK(coll.insert({}));
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({a: {x: 1}, b: 1}));
-
- let indexKeySpec = {};
- let indexKeyField = '';
-
- /** Configure testing of an index { <indexKeyField>:1 }. */
- function setIndex(_indexKeyField) {
- indexKeyField = _indexKeyField;
- indexKeySpec = {};
- indexKeySpec[indexKeyField] = 1;
- coll.ensureIndex(indexKeySpec, {sparse: true});
- }
- setIndex('a');
-
- /** @return count when hinting the index to use. */
- function hintedCount(query) {
- return coll.find(query).hint(indexKeySpec).itcount();
- }
-
- /** The query field does not exist and the sparse index is not used without a hint. */
- function assertMissing(query, expectedMissing = 1, expectedIndexedMissing = 0) {
- assert.eq(expectedMissing, coll.count(query));
- // We also shouldn't get a different count depending on whether
- // an index is used or not.
- assert.eq(expectedIndexedMissing, hintedCount(query));
- }
-
- /** The query field exists and the sparse index is used without a hint. */
- function assertExists(query, expectedExists = 2) {
- assert.eq(expectedExists, coll.count(query));
- // An $exists:true predicate generates no index filters. Add another predicate on the index
- // key to trigger use of the index.
- let andClause = {};
- andClause[indexKeyField] = {$ne: null};
- Object.extend(query, {$and: [andClause]});
- assert.eq(expectedExists, coll.count(query));
- assert.eq(expectedExists, hintedCount(query));
- }
-
- /** The query field exists and the sparse index is not used without a hint. */
- function assertExistsUnindexed(query, expectedExists = 2) {
- assert.eq(expectedExists, coll.count(query));
- // Even with another predicate on the index key, the sparse index is disallowed.
- let andClause = {};
- andClause[indexKeyField] = {$ne: null};
- Object.extend(query, {$and: [andClause]});
- assert.eq(expectedExists, coll.count(query));
- assert.eq(expectedExists, hintedCount(query));
- }
-
- // $exists:false queries match the proper number of documents and disallow the sparse index.
- assertMissing({a: {$exists: false}});
- assertMissing({a: {$not: {$exists: true}}});
- assertMissing({$and: [{a: {$exists: false}}]});
- assertMissing({$or: [{a: {$exists: false}}]});
- assertMissing({$nor: [{a: {$exists: true}}]});
- assertMissing({'a.x': {$exists: false}}, 2, 1);
-
- // Currently a sparse index is disallowed even if the $exists:false query is on a different
- // field.
- assertMissing({b: {$exists: false}}, 2, 1);
- assertMissing({b: {$exists: false}, a: {$ne: 6}}, 2, 1);
- assertMissing({b: {$not: {$exists: true}}}, 2, 1);
-
- // Top level $exists:true queries match the proper number of documents
- // and use the sparse index on { a : 1 }.
- assertExists({a: {$exists: true}});
-
- // Nested $exists queries match the proper number of documents and disallow the sparse index.
- assertExistsUnindexed({$nor: [{a: {$exists: false}}]});
- assertExistsUnindexed({$nor: [{'a.x': {$exists: false}}]}, 1);
- assertExistsUnindexed({a: {$not: {$exists: false}}});
-
- // Nested $exists queries disallow the sparse index in some cases where it is not strictly
- // necessary to do so. (Descriptive tests.)
- assertExistsUnindexed({$nor: [{b: {$exists: false}}]}, 1); // Unindexed field.
- assertExists({$or: [{a: {$exists: true}}]}); // $exists:true not $exists:false.
-
- // Behavior is similar with $elemMatch.
- coll.drop();
- assert.writeOK(coll.insert({a: [{}]}));
- assert.writeOK(coll.insert({a: [{b: 1}]}));
- assert.writeOK(coll.insert({a: [{b: [1]}]}));
- setIndex('a.b');
-
- assertMissing({a: {$elemMatch: {b: {$exists: false}}}});
-
- // A $elemMatch predicate is treated as nested, and the index should be used for $exists:true.
- assertExists({a: {$elemMatch: {b: {$exists: true}}}});
-
- // A $not within $elemMatch should not attempt to use a sparse index for $exists:false.
- assertExistsUnindexed({'a.b': {$elemMatch: {$not: {$exists: false}}}}, 1);
- assertExistsUnindexed({'a.b': {$elemMatch: {$gt: 0, $not: {$exists: false}}}}, 1);
-
- // A non sparse index will not be disallowed.
- coll.drop();
- assert.writeOK(coll.insert({}));
- coll.ensureIndex({a: 1});
- assert.eq(1, coll.find({a: {$exists: false}}).itcount());
+"use strict";
+
+const coll = db.jstests_existsa;
+coll.drop();
+
+assert.writeOK(coll.insert({}));
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: {x: 1}, b: 1}));
+
+let indexKeySpec = {};
+let indexKeyField = '';
+
+/** Configure testing of an index { <indexKeyField>:1 }. */
+function setIndex(_indexKeyField) {
+ indexKeyField = _indexKeyField;
+ indexKeySpec = {};
+ indexKeySpec[indexKeyField] = 1;
+ coll.ensureIndex(indexKeySpec, {sparse: true});
+}
+setIndex('a');
+
+/** @return count when hinting the index to use. */
+function hintedCount(query) {
+ return coll.find(query).hint(indexKeySpec).itcount();
+}
+
+/** The query field does not exist and the sparse index is not used without a hint. */
+function assertMissing(query, expectedMissing = 1, expectedIndexedMissing = 0) {
+ assert.eq(expectedMissing, coll.count(query));
+ // We also shouldn't get a different count depending on whether
+ // an index is used or not.
+ assert.eq(expectedIndexedMissing, hintedCount(query));
+}
+
+/** The query field exists and the sparse index is used without a hint. */
+function assertExists(query, expectedExists = 2) {
+ assert.eq(expectedExists, coll.count(query));
+ // An $exists:true predicate generates no index filters. Add another predicate on the index
+ // key to trigger use of the index.
+ let andClause = {};
+ andClause[indexKeyField] = {$ne: null};
+ Object.extend(query, {$and: [andClause]});
+ assert.eq(expectedExists, coll.count(query));
+ assert.eq(expectedExists, hintedCount(query));
+}
+
+/** The query field exists and the sparse index is not used without a hint. */
+function assertExistsUnindexed(query, expectedExists = 2) {
+ assert.eq(expectedExists, coll.count(query));
+ // Even with another predicate on the index key, the sparse index is disallowed.
+ let andClause = {};
+ andClause[indexKeyField] = {$ne: null};
+ Object.extend(query, {$and: [andClause]});
+ assert.eq(expectedExists, coll.count(query));
+ assert.eq(expectedExists, hintedCount(query));
+}
+
+// $exists:false queries match the proper number of documents and disallow the sparse index.
+assertMissing({a: {$exists: false}});
+assertMissing({a: {$not: {$exists: true}}});
+assertMissing({$and: [{a: {$exists: false}}]});
+assertMissing({$or: [{a: {$exists: false}}]});
+assertMissing({$nor: [{a: {$exists: true}}]});
+assertMissing({'a.x': {$exists: false}}, 2, 1);
+
+// Currently a sparse index is disallowed even if the $exists:false query is on a different
+// field.
+assertMissing({b: {$exists: false}}, 2, 1);
+assertMissing({b: {$exists: false}, a: {$ne: 6}}, 2, 1);
+assertMissing({b: {$not: {$exists: true}}}, 2, 1);
+
+// Top level $exists:true queries match the proper number of documents
+// and use the sparse index on { a : 1 }.
+assertExists({a: {$exists: true}});
+
+// Nested $exists queries match the proper number of documents and disallow the sparse index.
+assertExistsUnindexed({$nor: [{a: {$exists: false}}]});
+assertExistsUnindexed({$nor: [{'a.x': {$exists: false}}]}, 1);
+assertExistsUnindexed({a: {$not: {$exists: false}}});
+
+// Nested $exists queries disallow the sparse index in some cases where it is not strictly
+// necessary to do so. (Descriptive tests.)
+assertExistsUnindexed({$nor: [{b: {$exists: false}}]}, 1); // Unindexed field.
+assertExists({$or: [{a: {$exists: true}}]}); // $exists:true not $exists:false.
+
+// Behavior is similar with $elemMatch.
+coll.drop();
+assert.writeOK(coll.insert({a: [{}]}));
+assert.writeOK(coll.insert({a: [{b: 1}]}));
+assert.writeOK(coll.insert({a: [{b: [1]}]}));
+setIndex('a.b');
+
+assertMissing({a: {$elemMatch: {b: {$exists: false}}}});
+
+// A $elemMatch predicate is treated as nested, and the index should be used for $exists:true.
+assertExists({a: {$elemMatch: {b: {$exists: true}}}});
+
+// A $not within $elemMatch should not attempt to use a sparse index for $exists:false.
+assertExistsUnindexed({'a.b': {$elemMatch: {$not: {$exists: false}}}}, 1);
+assertExistsUnindexed({'a.b': {$elemMatch: {$gt: 0, $not: {$exists: false}}}}, 1);
+
+// A non sparse index will not be disallowed.
+coll.drop();
+assert.writeOK(coll.insert({}));
+coll.ensureIndex({a: 1});
+assert.eq(1, coll.find({a: {$exists: false}}).itcount());
})();
diff --git a/jstests/core/explain_agg_write_concern.js b/jstests/core/explain_agg_write_concern.js
index 5377d0011c3..cf28b097632 100644
--- a/jstests/core/explain_agg_write_concern.js
+++ b/jstests/core/explain_agg_write_concern.js
@@ -12,64 +12,62 @@
* Tests related to the aggregate commands behavior with writeConcern and writeConcern + explain.
*/
(function() {
- "use strict";
+"use strict";
- const collName = "explain_agg_write_concern";
- let coll = db[collName];
- let outColl = db[collName + "_out"];
- coll.drop();
- outColl.drop();
+const collName = "explain_agg_write_concern";
+let coll = db[collName];
+let outColl = db[collName + "_out"];
+coll.drop();
+outColl.drop();
- assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(coll.insert({_id: 1}));
- // Agg should accept write concern if the last stage is a $out.
- assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$out: outColl.getName()}],
- cursor: {},
- writeConcern: {w: 1}
- }));
- assert.eq(1, outColl.find().itcount());
- outColl.drop();
+// Agg should accept write concern if the last stage is a $out.
+assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$out: outColl.getName()}],
+ cursor: {},
+ writeConcern: {w: 1}
+}));
+assert.eq(1, outColl.find().itcount());
+outColl.drop();
- // Agg should accept writeConcern even if read-only.
- assert.commandWorked(
- db.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {}, writeConcern: {w: 1}}));
+// Agg should accept writeConcern even if read-only.
+assert.commandWorked(
+ db.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {}, writeConcern: {w: 1}}));
- // Agg should succeed if the last stage is an $out and the explain flag is set.
- assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$out: outColl.getName()}],
- explain: true,
- }));
- assert.eq(0, outColl.find().itcount());
- outColl.drop();
+// Agg should succeed if the last stage is an $out and the explain flag is set.
+assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$out: outColl.getName()}],
+ explain: true,
+}));
+assert.eq(0, outColl.find().itcount());
+outColl.drop();
- // Agg should fail if the last stage is an $out and both the explain flag and writeConcern are
- // set.
- assert.commandFailed(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$out: outColl.getName()}],
- explain: true,
- writeConcern: {w: 1}
- }));
+// Agg should fail if the last stage is an $out and both the explain flag and writeConcern are
+// set.
+assert.commandFailed(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$out: outColl.getName()}],
+ explain: true,
+ writeConcern: {w: 1}
+}));
- // Agg explain helpers with all verbosities (or verbosity omitted) should fail if the last stage
- // is an $out and writeConcern is set.
- assert.throws(function() {
- coll.explain().aggregate([{$out: outColl.getName()}], {writeConcern: {w: 1}});
- });
- assert.throws(function() {
- coll.explain("queryPlanner").aggregate([{$out: outColl.getName()}], {writeConcern: {w: 1}});
- });
- assert.throws(function() {
- coll.explain("executionStats").aggregate([{$out: outColl.getName()}], {
- writeConcern: {w: 1}
- });
- });
- assert.throws(function() {
- coll.explain("allPlansExecution").aggregate([{$out: outColl.getName()}], {
- writeConcern: {w: 1}
- });
+// Agg explain helpers with all verbosities (or verbosity omitted) should fail if the last stage
+// is an $out and writeConcern is set.
+assert.throws(function() {
+ coll.explain().aggregate([{$out: outColl.getName()}], {writeConcern: {w: 1}});
+});
+assert.throws(function() {
+ coll.explain("queryPlanner").aggregate([{$out: outColl.getName()}], {writeConcern: {w: 1}});
+});
+assert.throws(function() {
+ coll.explain("executionStats").aggregate([{$out: outColl.getName()}], {writeConcern: {w: 1}});
+});
+assert.throws(function() {
+ coll.explain("allPlansExecution").aggregate([{$out: outColl.getName()}], {
+ writeConcern: {w: 1}
});
+});
}());
diff --git a/jstests/core/explain_db_mismatch.js b/jstests/core/explain_db_mismatch.js
index 13d54cae77f..09950f489d0 100644
--- a/jstests/core/explain_db_mismatch.js
+++ b/jstests/core/explain_db_mismatch.js
@@ -1,7 +1,6 @@
// Ensure that explain command errors if the inner command has a $db field that doesn't match the
// outer command.
(function() {
- assert.commandFailedWithCode(
- db.runCommand({explain: {find: 'some_collection', $db: 'not_my_db'}}),
- ErrorCodes.InvalidNamespace);
+assert.commandFailedWithCode(db.runCommand({explain: {find: 'some_collection', $db: 'not_my_db'}}),
+ ErrorCodes.InvalidNamespace);
}());
diff --git a/jstests/core/explain_delete.js b/jstests/core/explain_delete.js
index 9599c7df9b8..1863979faa5 100644
--- a/jstests/core/explain_delete.js
+++ b/jstests/core/explain_delete.js
@@ -2,66 +2,66 @@
// Tests for explaining the delete command.
(function() {
- "use strict";
+"use strict";
- var collName = "jstests_explain_delete";
- var t = db[collName];
- t.drop();
+var collName = "jstests_explain_delete";
+var t = db[collName];
+t.drop();
- var explain;
+var explain;
- /**
- * Verify that the explain command output 'explain' shows a DELETE stage with an nWouldDelete
- * value equal to 'nWouldDelete'.
- */
- function checkNWouldDelete(explain, nWouldDelete) {
- assert.commandWorked(explain);
- assert("executionStats" in explain);
- var executionStats = explain.executionStats;
- assert("executionStages" in executionStats);
+/**
+ * Verify that the explain command output 'explain' shows a DELETE stage with an nWouldDelete
+ * value equal to 'nWouldDelete'.
+ */
+function checkNWouldDelete(explain, nWouldDelete) {
+ assert.commandWorked(explain);
+ assert("executionStats" in explain);
+ var executionStats = explain.executionStats;
+ assert("executionStages" in executionStats);
- // If passed through mongos, then DELETE stage(s) should be below the SHARD_WRITE mongos
- // stage. Otherwise the DELETE stage is the root stage.
- var execStages = executionStats.executionStages;
- if ("SHARD_WRITE" === execStages.stage) {
- let totalToBeDeletedAcrossAllShards = 0;
- execStages.shards.forEach(function(shardExplain) {
- const rootStageName = shardExplain.executionStages.stage;
- assert.eq(rootStageName, "DELETE", tojson(execStages));
- totalToBeDeletedAcrossAllShards += shardExplain.executionStages.nWouldDelete;
- });
- assert.eq(totalToBeDeletedAcrossAllShards, nWouldDelete, explain);
- } else {
- assert.eq(execStages.stage, "DELETE", explain);
- assert.eq(execStages.nWouldDelete, nWouldDelete, explain);
- }
+ // If passed through mongos, then DELETE stage(s) should be below the SHARD_WRITE mongos
+ // stage. Otherwise the DELETE stage is the root stage.
+ var execStages = executionStats.executionStages;
+ if ("SHARD_WRITE" === execStages.stage) {
+ let totalToBeDeletedAcrossAllShards = 0;
+ execStages.shards.forEach(function(shardExplain) {
+ const rootStageName = shardExplain.executionStages.stage;
+ assert.eq(rootStageName, "DELETE", tojson(execStages));
+ totalToBeDeletedAcrossAllShards += shardExplain.executionStages.nWouldDelete;
+ });
+ assert.eq(totalToBeDeletedAcrossAllShards, nWouldDelete, explain);
+ } else {
+ assert.eq(execStages.stage, "DELETE", explain);
+ assert.eq(execStages.nWouldDelete, nWouldDelete, explain);
}
+}
- // Explain delete against an empty collection.
- assert.commandWorked(db.createCollection(t.getName()));
- explain = db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}});
- checkNWouldDelete(explain, 0);
+// Explain delete against an empty collection.
+assert.commandWorked(db.createCollection(t.getName()));
+explain = db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}});
+checkNWouldDelete(explain, 0);
- // Add an index but no data, and check that the explain still works.
- t.ensureIndex({a: 1});
- explain = db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}});
- checkNWouldDelete(explain, 0);
+// Add an index but no data, and check that the explain still works.
+t.ensureIndex({a: 1});
+explain = db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}});
+checkNWouldDelete(explain, 0);
- // Add some copies of the same document.
- for (var i = 0; i < 10; i++) {
- t.insert({a: 1});
- }
- assert.eq(10, t.count());
+// Add some copies of the same document.
+for (var i = 0; i < 10; i++) {
+ t.insert({a: 1});
+}
+assert.eq(10, t.count());
- // Run an explain which shows that all 10 documents *would* be deleted.
- explain = db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}});
- checkNWouldDelete(explain, 10);
+// Run an explain which shows that all 10 documents *would* be deleted.
+explain = db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}});
+checkNWouldDelete(explain, 10);
- // Make sure all 10 documents are still there.
- assert.eq(10, t.count());
+// Make sure all 10 documents are still there.
+assert.eq(10, t.count());
- // If we run the same thing without the explain, then all 10 docs should be deleted.
- var deleteResult = db.runCommand({delete: collName, deletes: [{q: {a: 1}, limit: 0}]});
- assert.commandWorked(deleteResult);
- assert.eq(0, t.count());
+// If we run the same thing without the explain, then all 10 docs should be deleted.
+var deleteResult = db.runCommand({delete: collName, deletes: [{q: {a: 1}, limit: 0}]});
+assert.commandWorked(deleteResult);
+assert.eq(0, t.count());
}());
diff --git a/jstests/core/explain_distinct.js b/jstests/core/explain_distinct.js
index a3cb6606d30..1c4d6612acb 100644
--- a/jstests/core/explain_distinct.js
+++ b/jstests/core/explain_distinct.js
@@ -6,86 +6,86 @@
* This test ensures that explain on the distinct command works.
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/analyze_plan.js");
+load("jstests/libs/analyze_plan.js");
- var collName = "jstests_explain_distinct";
- var coll = db[collName];
+var collName = "jstests_explain_distinct";
+var coll = db[collName];
- function runDistinctExplain(collection, keyString, query) {
- var distinctCmd = {distinct: collection.getName(), key: keyString};
+function runDistinctExplain(collection, keyString, query) {
+ var distinctCmd = {distinct: collection.getName(), key: keyString};
- if (typeof query !== 'undefined') {
- distinctCmd.query = query;
- }
-
- return coll.runCommand({explain: distinctCmd, verbosity: 'executionStats'});
- }
-
- coll.drop();
-
- // Collection doesn't exist.
- var explain = runDistinctExplain(coll, 'a', {});
- assert.commandWorked(explain);
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"));
-
- // Insert the data to perform distinct() on.
- for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({a: 1, b: 1}));
- assert.writeOK(coll.insert({a: 2, c: 1}));
+ if (typeof query !== 'undefined') {
+ distinctCmd.query = query;
}
- assert.commandFailed(runDistinctExplain(coll, {}, {})); // Bad keyString.
- assert.commandFailed(runDistinctExplain(coll, 'a', 'a')); // Bad query.
- assert.commandFailed(runDistinctExplain(coll, 'b', {$not: 1})); // Bad query.
- assert.commandFailed(runDistinctExplain(coll, 'a', {$not: 1})); // Bad query.
- assert.commandFailed(runDistinctExplain(coll, '_id', {$not: 1})); // Bad query.
-
- // Ensure that server accepts a distinct command with no 'query' field.
- assert.commandWorked(runDistinctExplain(coll, '', null));
- assert.commandWorked(runDistinctExplain(coll, ''));
-
- assert.eq([1], coll.distinct('b'));
- var explain = runDistinctExplain(coll, 'b', {});
- assert.commandWorked(explain);
- assert.eq(20, explain.executionStats.nReturned);
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
-
- assert.commandWorked(coll.createIndex({a: 1}));
-
- assert.eq([1, 2], coll.distinct('a'));
- var explain = runDistinctExplain(coll, 'a', {});
- assert.commandWorked(explain);
- assert.eq(2, explain.executionStats.nReturned);
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
-
- // Check that the DISTINCT_SCAN stage has the correct stats.
- var stage = getPlanStage(explain.queryPlanner.winningPlan, "DISTINCT_SCAN");
- assert.eq({a: 1}, stage.keyPattern);
- assert.eq("a_1", stage.indexName);
- assert.eq(false, stage.isMultiKey);
- assert.eq(false, stage.isUnique);
- assert.eq(false, stage.isSparse);
- assert.eq(false, stage.isPartial);
- assert.lte(1, stage.indexVersion);
- assert("indexBounds" in stage);
-
- assert.commandWorked(coll.createIndex({a: 1, b: 1}));
-
- assert.eq([1], coll.distinct('a', {a: 1}));
- var explain = runDistinctExplain(coll, 'a', {a: 1});
- assert.commandWorked(explain);
- assert.eq(1, explain.executionStats.nReturned);
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
-
- assert.eq([1], coll.distinct('b', {a: 1}));
- var explain = runDistinctExplain(coll, 'b', {a: 1});
- assert.commandWorked(explain);
- assert.eq(1, explain.executionStats.nReturned);
- assert(!planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+ return coll.runCommand({explain: distinctCmd, verbosity: 'executionStats'});
+}
+
+coll.drop();
+
+// Collection doesn't exist.
+var explain = runDistinctExplain(coll, 'a', {});
+assert.commandWorked(explain);
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"));
+
+// Insert the data to perform distinct() on.
+for (var i = 0; i < 10; i++) {
+ assert.writeOK(coll.insert({a: 1, b: 1}));
+ assert.writeOK(coll.insert({a: 2, c: 1}));
+}
+
+assert.commandFailed(runDistinctExplain(coll, {}, {})); // Bad keyString.
+assert.commandFailed(runDistinctExplain(coll, 'a', 'a')); // Bad query.
+assert.commandFailed(runDistinctExplain(coll, 'b', {$not: 1})); // Bad query.
+assert.commandFailed(runDistinctExplain(coll, 'a', {$not: 1})); // Bad query.
+assert.commandFailed(runDistinctExplain(coll, '_id', {$not: 1})); // Bad query.
+
+// Ensure that server accepts a distinct command with no 'query' field.
+assert.commandWorked(runDistinctExplain(coll, '', null));
+assert.commandWorked(runDistinctExplain(coll, ''));
+
+assert.eq([1], coll.distinct('b'));
+var explain = runDistinctExplain(coll, 'b', {});
+assert.commandWorked(explain);
+assert.eq(20, explain.executionStats.nReturned);
+assert(isCollscan(db, explain.queryPlanner.winningPlan));
+
+assert.commandWorked(coll.createIndex({a: 1}));
+
+assert.eq([1, 2], coll.distinct('a'));
+var explain = runDistinctExplain(coll, 'a', {});
+assert.commandWorked(explain);
+assert.eq(2, explain.executionStats.nReturned);
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+
+// Check that the DISTINCT_SCAN stage has the correct stats.
+var stage = getPlanStage(explain.queryPlanner.winningPlan, "DISTINCT_SCAN");
+assert.eq({a: 1}, stage.keyPattern);
+assert.eq("a_1", stage.indexName);
+assert.eq(false, stage.isMultiKey);
+assert.eq(false, stage.isUnique);
+assert.eq(false, stage.isSparse);
+assert.eq(false, stage.isPartial);
+assert.lte(1, stage.indexVersion);
+assert("indexBounds" in stage);
+
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+
+assert.eq([1], coll.distinct('a', {a: 1}));
+var explain = runDistinctExplain(coll, 'a', {a: 1});
+assert.commandWorked(explain);
+assert.eq(1, explain.executionStats.nReturned);
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
+
+assert.eq([1], coll.distinct('b', {a: 1}));
+var explain = runDistinctExplain(coll, 'b', {a: 1});
+assert.commandWorked(explain);
+assert.eq(1, explain.executionStats.nReturned);
+assert(!planHasStage(db, explain.queryPlanner.winningPlan, "FETCH"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED"));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN"));
})();
diff --git a/jstests/core/explain_find_and_modify.js b/jstests/core/explain_find_and_modify.js
index a0ba989dd0e..8b7c65d519e 100644
--- a/jstests/core/explain_find_and_modify.js
+++ b/jstests/core/explain_find_and_modify.js
@@ -12,300 +12,299 @@
* 5. The reported stats should reflect how the command would be executed.
*/
(function() {
- "use strict";
- var cName = "explain_find_and_modify";
- var t = db.getCollection(cName);
+"use strict";
+var cName = "explain_find_and_modify";
+var t = db.getCollection(cName);
- // Different types of findAndModify explain requests.
- var explainRemove = {explain: {findAndModify: cName, remove: true, query: {_id: 0}}};
- var explainUpdate = {explain: {findAndModify: cName, update: {$inc: {i: 1}}, query: {_id: 0}}};
- var explainUpsert = {
- explain: {findAndModify: cName, update: {$inc: {i: 1}}, query: {_id: 0}, upsert: true}
- };
+// Different types of findAndModify explain requests.
+var explainRemove = {explain: {findAndModify: cName, remove: true, query: {_id: 0}}};
+var explainUpdate = {explain: {findAndModify: cName, update: {$inc: {i: 1}}, query: {_id: 0}}};
+var explainUpsert = {
+ explain: {findAndModify: cName, update: {$inc: {i: 1}}, query: {_id: 0}, upsert: true}
+};
- // 1. Explaining findAndModify should never create a database.
+// 1. Explaining findAndModify should never create a database.
- // Make sure this one doesn't exist before we start.
- assert.commandWorked(db.getSiblingDB(cName).runCommand({dropDatabase: 1}));
- var newDB = db.getSiblingDB(cName);
+// Make sure this one doesn't exist before we start.
+assert.commandWorked(db.getSiblingDB(cName).runCommand({dropDatabase: 1}));
+var newDB = db.getSiblingDB(cName);
- // Explain the command, ensuring the database is not created.
- var err_msg = "Explaining findAndModify on a non-existent database should return an error.";
- assert.commandFailed(newDB.runCommand(explainRemove), err_msg);
- assertDBDoesNotExist(newDB, "Explaining a remove should not create a database.");
+// Explain the command, ensuring the database is not created.
+var err_msg = "Explaining findAndModify on a non-existent database should return an error.";
+assert.commandFailed(newDB.runCommand(explainRemove), err_msg);
+assertDBDoesNotExist(newDB, "Explaining a remove should not create a database.");
- assert.commandFailed(newDB.runCommand(explainUpsert), err_msg);
- assertDBDoesNotExist(newDB, "Explaining an upsert should not create a database.");
+assert.commandFailed(newDB.runCommand(explainUpsert), err_msg);
+assertDBDoesNotExist(newDB, "Explaining an upsert should not create a database.");
- // 2. Explaining findAndModify should never create a collection.
+// 2. Explaining findAndModify should never create a collection.
- // Insert a document to make sure the database exists.
- t.insert({'will': 'be dropped'});
- // Make sure the collection doesn't exist.
- t.drop();
+// Insert a document to make sure the database exists.
+t.insert({'will': 'be dropped'});
+// Make sure the collection doesn't exist.
+t.drop();
- // Explain the command, ensuring the collection is not created.
- assert.commandWorked(db.runCommand(explainRemove));
- assertCollDoesNotExist(cName, "explaining a remove should not create a new collection.");
+// Explain the command, ensuring the collection is not created.
+assert.commandWorked(db.runCommand(explainRemove));
+assertCollDoesNotExist(cName, "explaining a remove should not create a new collection.");
- assert.commandWorked(db.runCommand(explainUpsert));
- assertCollDoesNotExist(cName, "explaining an upsert should not create a new collection.");
+assert.commandWorked(db.runCommand(explainUpsert));
+assertCollDoesNotExist(cName, "explaining an upsert should not create a new collection.");
- assert.commandWorked(db.runCommand(Object.merge(explainUpsert, {fields: {x: 1}})));
- assertCollDoesNotExist(cName, "explaining an upsert should not create a new collection.");
+assert.commandWorked(db.runCommand(Object.merge(explainUpsert, {fields: {x: 1}})));
+assertCollDoesNotExist(cName, "explaining an upsert should not create a new collection.");
- // 3. Explaining findAndModify should not work with an invalid findAndModify command object.
+// 3. Explaining findAndModify should not work with an invalid findAndModify command object.
- // Specifying both remove and new is illegal.
- assert.commandFailed(db.runCommand({remove: true, new: true}));
+// Specifying both remove and new is illegal.
+assert.commandFailed(db.runCommand({remove: true, new: true}));
- // 4. Explaining findAndModify should not modify any contents of the collection.
- var onlyDoc = {_id: 0, i: 1};
- assert.writeOK(t.insert(onlyDoc));
+// 4. Explaining findAndModify should not modify any contents of the collection.
+var onlyDoc = {_id: 0, i: 1};
+assert.writeOK(t.insert(onlyDoc));
- // Explaining a delete should not delete anything.
- var matchingRemoveCmd = {findAndModify: cName, remove: true, query: {_id: onlyDoc._id}};
- var res = db.runCommand({explain: matchingRemoveCmd});
- assert.commandWorked(res);
- assert.eq(t.find().itcount(), 1, "Explaining a remove should not remove any documents.");
+// Explaining a delete should not delete anything.
+var matchingRemoveCmd = {findAndModify: cName, remove: true, query: {_id: onlyDoc._id}};
+var res = db.runCommand({explain: matchingRemoveCmd});
+assert.commandWorked(res);
+assert.eq(t.find().itcount(), 1, "Explaining a remove should not remove any documents.");
- // Explaining an update should not update anything.
- var matchingUpdateCmd = {findAndModify: cName, update: {x: "x"}, query: {_id: onlyDoc._id}};
- var res = db.runCommand({explain: matchingUpdateCmd});
- assert.commandWorked(res);
- assert.eq(t.findOne(), onlyDoc, "Explaining an update should not update any documents.");
+// Explaining an update should not update anything.
+var matchingUpdateCmd = {findAndModify: cName, update: {x: "x"}, query: {_id: onlyDoc._id}};
+var res = db.runCommand({explain: matchingUpdateCmd});
+assert.commandWorked(res);
+assert.eq(t.findOne(), onlyDoc, "Explaining an update should not update any documents.");
- // Explaining an upsert should not insert anything.
- var matchingUpsertCmd =
- {findAndModify: cName, update: {x: "x"}, query: {_id: "non-match"}, upsert: true};
- var res = db.runCommand({explain: matchingUpsertCmd});
- assert.commandWorked(res);
- assert.eq(t.find().itcount(), 1, "Explaining an upsert should not insert any documents.");
+// Explaining an upsert should not insert anything.
+var matchingUpsertCmd =
+ {findAndModify: cName, update: {x: "x"}, query: {_id: "non-match"}, upsert: true};
+var res = db.runCommand({explain: matchingUpsertCmd});
+assert.commandWorked(res);
+assert.eq(t.find().itcount(), 1, "Explaining an upsert should not insert any documents.");
- // 5. The reported stats should reflect how it would execute and what it would modify.
- var isMongos = db.runCommand({isdbgrid: 1}).isdbgrid;
+// 5. The reported stats should reflect how it would execute and what it would modify.
+var isMongos = db.runCommand({isdbgrid: 1}).isdbgrid;
- // List out the command to be explained, and the expected results of that explain.
- var testCases = [
- // -------------------------------------- Removes ----------------------------------------
- {
- // Non-matching remove command.
- cmd: {remove: true, query: {_id: "no-match"}},
- expectedResult: {
- executionStats: {
- nReturned: 0,
- executionSuccess: true,
- executionStages: {stage: "DELETE", nWouldDelete: 0}
- }
- }
- },
- {
- // Matching remove command.
- cmd: {remove: true, query: {_id: onlyDoc._id}},
- expectedResult: {
- executionStats: {
- nReturned: 1,
- executionSuccess: true,
- executionStages: {stage: "DELETE", nWouldDelete: 1}
- }
- }
- },
- // -------------------------------------- Updates ----------------------------------------
- {
- // Non-matching update query.
- cmd: {update: {$inc: {i: 1}}, query: {_id: "no-match"}},
- expectedResult: {
- executionStats: {
- nReturned: 0,
- executionSuccess: true,
- executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: false}
- }
- }
- },
- {
- // Non-matching update query, returning new doc.
- cmd: {update: {$inc: {i: 1}}, query: {_id: "no-match"}, new: true},
- expectedResult: {
- executionStats: {
- nReturned: 0,
- executionSuccess: true,
- executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: false}
- }
- }
- },
- {
- // Matching update query.
- cmd: {update: {$inc: {i: 1}}, query: {_id: onlyDoc._id}},
- expectedResult: {
- executionStats: {
- nReturned: 1,
- executionSuccess: true,
- executionStages: {stage: "UPDATE", nWouldModify: 1, wouldInsert: false}
- }
- }
- },
- {
- // Matching update query, returning new doc.
- cmd: {update: {$inc: {i: 1}}, query: {_id: onlyDoc._id}, new: true},
- expectedResult: {
- executionStats: {
- nReturned: 1,
- executionSuccess: true,
- executionStages: {stage: "UPDATE", nWouldModify: 1, wouldInsert: false}
- }
- }
- },
- // -------------------------------------- Upserts ----------------------------------------
- {
- // Non-matching upsert query.
- cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: "no-match"}},
- expectedResult: {
- executionStats: {
- nReturned: 0,
- executionSuccess: true,
- executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: true}
- }
- }
- },
- {
- // Non-matching upsert query, returning new doc.
- cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: "no-match"}, new: true},
- expectedResult: {
- executionStats: {
- nReturned: 1,
- executionSuccess: true,
- executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: true}
- }
- }
- },
- {
- // Matching upsert query, returning new doc.
- cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: onlyDoc._id}, new: true},
- expectedResult: {
- executionStats: {
- nReturned: 1,
- executionSuccess: true,
- executionStages: {stage: "UPDATE", nWouldModify: 1, wouldInsert: false}
- }
- }
+// List out the command to be explained, and the expected results of that explain.
+var testCases = [
+ // -------------------------------------- Removes ----------------------------------------
+ {
+ // Non-matching remove command.
+ cmd: {remove: true, query: {_id: "no-match"}},
+ expectedResult: {
+ executionStats: {
+ nReturned: 0,
+ executionSuccess: true,
+ executionStages: {stage: "DELETE", nWouldDelete: 0}
+ }
}
- ];
-
- // Apply all the same test cases, this time adding a projection stage.
- testCases = testCases.concat(testCases.map(function makeProjection(testCase) {
- return {
- cmd: Object.merge(testCase.cmd, {fields: {i: 0}}),
- expectedResult: {
- executionStats: {
- // nReturned Shouldn't change.
- nReturned: testCase.expectedResult.executionStats.nReturned,
- executionStages: {
- stage: "PROJECTION_DEFAULT",
- transformBy: {i: 0},
- // put previous root stage under projection stage.
- inputStage: testCase.expectedResult.executionStats.executionStages
- }
- }
+ },
+ {
+ // Matching remove command.
+ cmd: {remove: true, query: {_id: onlyDoc._id}},
+ expectedResult: {
+ executionStats: {
+ nReturned: 1,
+ executionSuccess: true,
+ executionStages: {stage: "DELETE", nWouldDelete: 1}
}
- };
- }));
- // Actually assert on the test cases.
- testCases.forEach(function(testCase) {
- assertExplainMatchedAllVerbosities(testCase.cmd, testCase.expectedResult);
- });
-
- // ----------------------------------------- Helpers -----------------------------------------
-
- /**
- * Helper to make this test work in the sharding passthrough suite.
- *
- * Transforms the explain output so that if it came from a mongos, it will be modified
- * to have the same format as though it had come from a mongod.
- */
- function transformIfSharded(explainOut) {
- if (!isMongos) {
- return explainOut;
}
+ },
+ // -------------------------------------- Updates ----------------------------------------
+ {
+ // Non-matching update query.
+ cmd: {update: {$inc: {i: 1}}, query: {_id: "no-match"}},
+ expectedResult: {
+ executionStats: {
+ nReturned: 0,
+ executionSuccess: true,
+ executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: false}
+ }
+ }
+ },
+ {
+ // Non-matching update query, returning new doc.
+ cmd: {update: {$inc: {i: 1}}, query: {_id: "no-match"}, new: true},
+ expectedResult: {
+ executionStats: {
+ nReturned: 0,
+ executionSuccess: true,
+ executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: false}
+ }
+ }
+ },
+ {
+ // Matching update query.
+ cmd: {update: {$inc: {i: 1}}, query: {_id: onlyDoc._id}},
+ expectedResult: {
+ executionStats: {
+ nReturned: 1,
+ executionSuccess: true,
+ executionStages: {stage: "UPDATE", nWouldModify: 1, wouldInsert: false}
+ }
+ }
+ },
+ {
+ // Matching update query, returning new doc.
+ cmd: {update: {$inc: {i: 1}}, query: {_id: onlyDoc._id}, new: true},
+ expectedResult: {
+ executionStats: {
+ nReturned: 1,
+ executionSuccess: true,
+ executionStages: {stage: "UPDATE", nWouldModify: 1, wouldInsert: false}
+ }
+ }
+ },
+ // -------------------------------------- Upserts ----------------------------------------
+ {
+ // Non-matching upsert query.
+ cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: "no-match"}},
+ expectedResult: {
+ executionStats: {
+ nReturned: 0,
+ executionSuccess: true,
+ executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: true}
+ }
+ }
+ },
+ {
+ // Non-matching upsert query, returning new doc.
+ cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: "no-match"}, new: true},
+ expectedResult: {
+ executionStats: {
+ nReturned: 1,
+ executionSuccess: true,
+ executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: true}
+ }
+ }
+ },
+ {
+ // Matching upsert query, returning new doc.
+ cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: onlyDoc._id}, new: true},
+ expectedResult: {
+ executionStats: {
+ nReturned: 1,
+ executionSuccess: true,
+ executionStages: {stage: "UPDATE", nWouldModify: 1, wouldInsert: false}
+ }
+ }
+ }
+];
- // Asserts that the explain command ran on a single shard and modifies the given
- // explain output to have a top-level UPDATE or DELETE stage by removing the
- // top-level SINGLE_SHARD stage.
- function replace(outerKey, innerKey) {
- assert(explainOut.hasOwnProperty(outerKey));
- assert(explainOut[outerKey].hasOwnProperty(innerKey));
-
- var shardStage = explainOut[outerKey][innerKey];
- assert.eq("SINGLE_SHARD", shardStage.stage);
- assert.eq(1, shardStage.shards.length);
- Object.extend(explainOut[outerKey], shardStage.shards[0], false);
+// Apply all the same test cases, this time adding a projection stage.
+testCases = testCases.concat(testCases.map(function makeProjection(testCase) {
+ return {
+ cmd: Object.merge(testCase.cmd, {fields: {i: 0}}),
+ expectedResult: {
+ executionStats: {
+ // nReturned Shouldn't change.
+ nReturned: testCase.expectedResult.executionStats.nReturned,
+ executionStages: {
+ stage: "PROJECTION_DEFAULT",
+ transformBy: {i: 0},
+ // put previous root stage under projection stage.
+ inputStage: testCase.expectedResult.executionStats.executionStages
+ }
+ }
}
+ };
+}));
+// Actually assert on the test cases.
+testCases.forEach(function(testCase) {
+ assertExplainMatchedAllVerbosities(testCase.cmd, testCase.expectedResult);
+});
- replace("queryPlanner", "winningPlan");
- replace("executionStats", "executionStages");
+// ----------------------------------------- Helpers -----------------------------------------
+/**
+ * Helper to make this test work in the sharding passthrough suite.
+ *
+ * Transforms the explain output so that if it came from a mongos, it will be modified
+ * to have the same format as though it had come from a mongod.
+ */
+function transformIfSharded(explainOut) {
+ if (!isMongos) {
return explainOut;
}
- /**
- * Assert the results from running the explain match the expected results.
- *
- * Since we aren't expecting a perfect match (we only specify a subset of the fields we expect
- * to match), recursively go through the expected results, and make sure each one has a
- * corresponding field on the actual results, and that their values match.
- * Example doc for expectedMatches:
- * {executionStats: {nReturned: 0, executionStages: {isEOF: 1}}}
- */
- function assertExplainResultsMatch(explainOut, expectedMatches, preMsg, currentPath) {
- // This is only used recursively, to keep track of where we are in the document.
- var isRootLevel = typeof currentPath === "undefined";
- Object.keys(expectedMatches).forEach(function(key) {
- var totalFieldName = isRootLevel ? key : currentPath + "." + key;
- assert(explainOut.hasOwnProperty(key),
- preMsg + "Explain's output does not have a value for " + key);
- if (typeof expectedMatches[key] === "object") {
- // Sub-doc, recurse to match on it's fields
- assertExplainResultsMatch(
- explainOut[key], expectedMatches[key], preMsg, totalFieldName);
- } else {
- assert.eq(explainOut[key],
- expectedMatches[key],
- preMsg + "Explain's " + totalFieldName + " (" + explainOut[key] + ")" +
- " does not match expected value (" + expectedMatches[key] + ").");
- }
- });
- }
+ // Asserts that the explain command ran on a single shard and modifies the given
+ // explain output to have a top-level UPDATE or DELETE stage by removing the
+ // top-level SINGLE_SHARD stage.
+ function replace(outerKey, innerKey) {
+ assert(explainOut.hasOwnProperty(outerKey));
+ assert(explainOut[outerKey].hasOwnProperty(innerKey));
- /**
- * Assert that running explain on the given findAndModify command matches the expected results,
- * on all the different verbosities (but just assert the command worked on the lowest verbosity,
- * since it doesn't have any useful stats).
- */
- function assertExplainMatchedAllVerbosities(findAndModifyArgs, expectedResult) {
- ["queryPlanner", "executionStats", "allPlansExecution"].forEach(function(verbosityMode) {
- var cmd = {
- explain: Object.merge({findAndModify: cName}, findAndModifyArgs),
- verbosity: verbosityMode
- };
- var msg = "Error after running command: " + tojson(cmd) + ": ";
- var explainOut = db.runCommand(cmd);
- assert.commandWorked(explainOut, "command: " + tojson(cmd));
- // Don't check explain results for queryPlanner mode, as that doesn't have any of the
- // interesting stats.
- if (verbosityMode !== "queryPlanner") {
- explainOut = transformIfSharded(explainOut);
- assertExplainResultsMatch(explainOut, expectedResult, msg);
- }
- });
+ var shardStage = explainOut[outerKey][innerKey];
+ assert.eq("SINGLE_SHARD", shardStage.stage);
+ assert.eq(1, shardStage.shards.length);
+ Object.extend(explainOut[outerKey], shardStage.shards[0], false);
}
- function assertDBDoesNotExist(db, msg) {
- assert.eq(db.getMongo().getDBNames().indexOf(db.getName()),
- -1,
- msg + "db " + db.getName() + " exists.");
- }
+ replace("queryPlanner", "winningPlan");
+ replace("executionStats", "executionStages");
- function assertCollDoesNotExist(cName, msg) {
- assert.eq(
- db.getCollectionNames().indexOf(cName), -1, msg + "collection " + cName + " exists.");
- }
+ return explainOut;
+}
+
+/**
+ * Assert the results from running the explain match the expected results.
+ *
+ * Since we aren't expecting a perfect match (we only specify a subset of the fields we expect
+ * to match), recursively go through the expected results, and make sure each one has a
+ * corresponding field on the actual results, and that their values match.
+ * Example doc for expectedMatches:
+ * {executionStats: {nReturned: 0, executionStages: {isEOF: 1}}}
+ */
+function assertExplainResultsMatch(explainOut, expectedMatches, preMsg, currentPath) {
+ // This is only used recursively, to keep track of where we are in the document.
+ var isRootLevel = typeof currentPath === "undefined";
+ Object.keys(expectedMatches).forEach(function(key) {
+ var totalFieldName = isRootLevel ? key : currentPath + "." + key;
+ assert(explainOut.hasOwnProperty(key),
+ preMsg + "Explain's output does not have a value for " + key);
+ if (typeof expectedMatches[key] === "object") {
+ // Sub-doc, recurse to match on it's fields
+ assertExplainResultsMatch(
+ explainOut[key], expectedMatches[key], preMsg, totalFieldName);
+ } else {
+ assert.eq(explainOut[key],
+ expectedMatches[key],
+ preMsg + "Explain's " + totalFieldName + " (" + explainOut[key] + ")" +
+ " does not match expected value (" + expectedMatches[key] + ").");
+ }
+ });
+}
+
+/**
+ * Assert that running explain on the given findAndModify command matches the expected results,
+ * on all the different verbosities (but just assert the command worked on the lowest verbosity,
+ * since it doesn't have any useful stats).
+ */
+function assertExplainMatchedAllVerbosities(findAndModifyArgs, expectedResult) {
+ ["queryPlanner", "executionStats", "allPlansExecution"].forEach(function(verbosityMode) {
+ var cmd = {
+ explain: Object.merge({findAndModify: cName}, findAndModifyArgs),
+ verbosity: verbosityMode
+ };
+ var msg = "Error after running command: " + tojson(cmd) + ": ";
+ var explainOut = db.runCommand(cmd);
+ assert.commandWorked(explainOut, "command: " + tojson(cmd));
+ // Don't check explain results for queryPlanner mode, as that doesn't have any of the
+ // interesting stats.
+ if (verbosityMode !== "queryPlanner") {
+ explainOut = transformIfSharded(explainOut);
+ assertExplainResultsMatch(explainOut, expectedResult, msg);
+ }
+ });
+}
+
+function assertDBDoesNotExist(db, msg) {
+ assert.eq(db.getMongo().getDBNames().indexOf(db.getName()),
+ -1,
+ msg + "db " + db.getName() + " exists.");
+}
+
+function assertCollDoesNotExist(cName, msg) {
+ assert.eq(db.getCollectionNames().indexOf(cName), -1, msg + "collection " + cName + " exists.");
+}
})();
diff --git a/jstests/core/explain_missing_collection.js b/jstests/core/explain_missing_collection.js
index e129fb7f16a..c186d3015a4 100644
--- a/jstests/core/explain_missing_collection.js
+++ b/jstests/core/explain_missing_collection.js
@@ -3,45 +3,45 @@
* @tags: [assumes_no_implicit_collection_creation_after_drop]
*/
(function() {
- var missingColl = db.explain_null_collection;
+var missingColl = db.explain_null_collection;
- var explain;
- var explainColl;
+var explain;
+var explainColl;
- // .find()
- missingColl.drop();
- explain = missingColl.explain("executionStats").find().finish();
- assert.commandWorked(explain);
- assert("executionStats" in explain);
+// .find()
+missingColl.drop();
+explain = missingColl.explain("executionStats").find().finish();
+assert.commandWorked(explain);
+assert("executionStats" in explain);
- // .count()
- missingColl.drop();
- explain = missingColl.explain("executionStats").count();
- assert.commandWorked(explain);
- assert("executionStats" in explain);
+// .count()
+missingColl.drop();
+explain = missingColl.explain("executionStats").count();
+assert.commandWorked(explain);
+assert("executionStats" in explain);
- // .remove()
- missingColl.drop();
- explain = missingColl.explain("executionStats").remove({a: 1});
- assert.commandWorked(explain);
- assert("executionStats" in explain);
+// .remove()
+missingColl.drop();
+explain = missingColl.explain("executionStats").remove({a: 1});
+assert.commandWorked(explain);
+assert("executionStats" in explain);
- // .update() with upsert: false
- missingColl.drop();
- explainColl = missingColl.explain("executionStats");
- explain = explainColl.update({a: 1}, {b: 1});
- assert.commandWorked(explain);
- assert("executionStats" in explain);
+// .update() with upsert: false
+missingColl.drop();
+explainColl = missingColl.explain("executionStats");
+explain = explainColl.update({a: 1}, {b: 1});
+assert.commandWorked(explain);
+assert("executionStats" in explain);
- // .update() with upsert: true
- missingColl.drop();
- explainColl = missingColl.explain("executionStats");
- explain = explainColl.update({a: 1}, {b: 1}, {upsert: true});
- assert.commandWorked(explain);
- assert("executionStats" in explain);
+// .update() with upsert: true
+missingColl.drop();
+explainColl = missingColl.explain("executionStats");
+explain = explainColl.update({a: 1}, {b: 1}, {upsert: true});
+assert.commandWorked(explain);
+assert("executionStats" in explain);
- // .aggregate()
- missingColl.drop();
- explain = missingColl.explain("executionStats").aggregate([{$match: {a: 1}}]);
- assert.commandWorked(explain);
+// .aggregate()
+missingColl.drop();
+explain = missingColl.explain("executionStats").aggregate([{$match: {a: 1}}]);
+assert.commandWorked(explain);
}());
diff --git a/jstests/core/explain_missing_database.js b/jstests/core/explain_missing_database.js
index 93123086bde..a1eb89e10e4 100644
--- a/jstests/core/explain_missing_database.js
+++ b/jstests/core/explain_missing_database.js
@@ -1,44 +1,44 @@
// Test explain of various operations against a non-existent database
(function() {
- var explainMissingDb = db.getSiblingDB("explainMissingDb");
+var explainMissingDb = db.getSiblingDB("explainMissingDb");
- var explain;
- var explainColl;
+var explain;
+var explainColl;
- // .find()
- explainMissingDb.dropDatabase();
- explain = explainMissingDb.collection.explain("executionStats").find().finish();
- assert.commandWorked(explain);
- assert("executionStats" in explain);
+// .find()
+explainMissingDb.dropDatabase();
+explain = explainMissingDb.collection.explain("executionStats").find().finish();
+assert.commandWorked(explain);
+assert("executionStats" in explain);
- // .count()
- explainMissingDb.dropDatabase();
- explain = explainMissingDb.collection.explain("executionStats").count();
- assert.commandWorked(explain);
- assert("executionStats" in explain);
+// .count()
+explainMissingDb.dropDatabase();
+explain = explainMissingDb.collection.explain("executionStats").count();
+assert.commandWorked(explain);
+assert("executionStats" in explain);
- // .remove()
- explainMissingDb.dropDatabase();
- explain = explainMissingDb.collection.explain("executionStats").remove({a: 1});
- assert.commandWorked(explain);
- assert("executionStats" in explain);
+// .remove()
+explainMissingDb.dropDatabase();
+explain = explainMissingDb.collection.explain("executionStats").remove({a: 1});
+assert.commandWorked(explain);
+assert("executionStats" in explain);
- // .update() with upsert: false
- explainMissingDb.dropDatabase();
- explainColl = explainMissingDb.collection.explain("executionStats");
- explain = explainColl.update({a: 1}, {b: 1});
- assert.commandWorked(explain);
- assert("executionStats" in explain);
+// .update() with upsert: false
+explainMissingDb.dropDatabase();
+explainColl = explainMissingDb.collection.explain("executionStats");
+explain = explainColl.update({a: 1}, {b: 1});
+assert.commandWorked(explain);
+assert("executionStats" in explain);
- // .update() with upsert: true
- explainMissingDb.dropDatabase();
- explainColl = explainMissingDb.collection.explain("executionStats");
- explain = explainColl.update({a: 1}, {b: 1}, {upsert: true});
- assert.commandWorked(explain);
- assert("executionStats" in explain);
+// .update() with upsert: true
+explainMissingDb.dropDatabase();
+explainColl = explainMissingDb.collection.explain("executionStats");
+explain = explainColl.update({a: 1}, {b: 1}, {upsert: true});
+assert.commandWorked(explain);
+assert("executionStats" in explain);
- // .aggregate()
- explainMissingDb.dropDatabase();
- explain = explainMissingDb.collection.explain("executionStats").aggregate([{$match: {a: 1}}]);
- assert.commandWorked(explain);
+// .aggregate()
+explainMissingDb.dropDatabase();
+explain = explainMissingDb.collection.explain("executionStats").aggregate([{$match: {a: 1}}]);
+assert.commandWorked(explain);
}());
diff --git a/jstests/core/explain_multi_plan.js b/jstests/core/explain_multi_plan.js
index 956cc41e0df..1b2b0d6cdb6 100644
--- a/jstests/core/explain_multi_plan.js
+++ b/jstests/core/explain_multi_plan.js
@@ -10,71 +10,71 @@
* there are multiple plans available. This is a regression test for SERVER-20849 and SERVER-21376.
*/
(function() {
- "use strict";
- var coll = db.explainMultiPlan;
- coll.drop();
+"use strict";
+var coll = db.explainMultiPlan;
+coll.drop();
- // Create indices to ensure there are multiple plans available.
- assert.commandWorked(coll.ensureIndex({a: 1, b: 1}));
- assert.commandWorked(coll.ensureIndex({a: 1, b: -1}));
+// Create indices to ensure there are multiple plans available.
+assert.commandWorked(coll.ensureIndex({a: 1, b: 1}));
+assert.commandWorked(coll.ensureIndex({a: 1, b: -1}));
- // Insert some data to work with.
- var bulk = coll.initializeOrderedBulkOp();
- var nDocs = 100;
- for (var i = 0; i < nDocs; ++i) {
- bulk.insert({a: i, b: nDocs - i});
- }
- bulk.execute();
+// Insert some data to work with.
+var bulk = coll.initializeOrderedBulkOp();
+var nDocs = 100;
+for (var i = 0; i < nDocs; ++i) {
+ bulk.insert({a: i, b: nDocs - i});
+}
+bulk.execute();
- // SERVER-20849: The following commands should not crash the server.
- assert.doesNotThrow(function() {
- coll.explain("allPlansExecution").update({a: {$gte: 1}}, {$set: {x: 0}});
- });
+// SERVER-20849: The following commands should not crash the server.
+assert.doesNotThrow(function() {
+ coll.explain("allPlansExecution").update({a: {$gte: 1}}, {$set: {x: 0}});
+});
- assert.doesNotThrow(function() {
- coll.explain("allPlansExecution").remove({a: {$gte: 1}});
- });
+assert.doesNotThrow(function() {
+ coll.explain("allPlansExecution").remove({a: {$gte: 1}});
+});
- assert.doesNotThrow(function() {
- coll.explain("allPlansExecution").findAndModify({query: {a: {$gte: 1}}, remove: true});
- });
+assert.doesNotThrow(function() {
+ coll.explain("allPlansExecution").findAndModify({query: {a: {$gte: 1}}, remove: true});
+});
- assert.doesNotThrow(function() {
- coll.explain("allPlansExecution").findAndModify({query: {a: {$gte: 1}}, update: {y: 1}});
- });
+assert.doesNotThrow(function() {
+ coll.explain("allPlansExecution").findAndModify({query: {a: {$gte: 1}}, update: {y: 1}});
+});
- assert.doesNotThrow(function() {
- coll.explain("allPlansExecution").find({a: {$gte: 1}}).finish();
- });
+assert.doesNotThrow(function() {
+ coll.explain("allPlansExecution").find({a: {$gte: 1}}).finish();
+});
- assert.doesNotThrow(function() {
- coll.explain("allPlansExecution").count({a: {$gte: 1}});
- });
+assert.doesNotThrow(function() {
+ coll.explain("allPlansExecution").count({a: {$gte: 1}});
+});
- assert.doesNotThrow(function() {
- coll.explain("allPlansExecution").distinct("a", {a: {$gte: 1}});
- });
+assert.doesNotThrow(function() {
+ coll.explain("allPlansExecution").distinct("a", {a: {$gte: 1}});
+});
- // SERVER-21376: Make sure the 'rejectedPlans' field is filled in appropriately.
- function assertHasRejectedPlans(explainOutput) {
- var queryPlannerOutput = explainOutput.queryPlanner;
+// SERVER-21376: Make sure the 'rejectedPlans' field is filled in appropriately.
+function assertHasRejectedPlans(explainOutput) {
+ var queryPlannerOutput = explainOutput.queryPlanner;
- // The 'rejectedPlans' section will be in a different place if passed through a mongos.
- if ("SINGLE_SHARD" == queryPlannerOutput.winningPlan.stage) {
- var shards = queryPlannerOutput.winningPlan.shards;
- shards.forEach(function assertShardHasRejectedPlans(shard) {
- assert.gt(shard.rejectedPlans.length, 0);
- });
- } else {
- assert.gt(queryPlannerOutput.rejectedPlans.length, 0);
- }
+ // The 'rejectedPlans' section will be in a different place if passed through a mongos.
+ if ("SINGLE_SHARD" == queryPlannerOutput.winningPlan.stage) {
+ var shards = queryPlannerOutput.winningPlan.shards;
+ shards.forEach(function assertShardHasRejectedPlans(shard) {
+ assert.gt(shard.rejectedPlans.length, 0);
+ });
+ } else {
+ assert.gt(queryPlannerOutput.rejectedPlans.length, 0);
}
+}
- var res = coll.explain("queryPlanner").find({a: {$gte: 1}}).finish();
- assert.commandWorked(res);
- assertHasRejectedPlans(res);
+var res = coll.explain("queryPlanner").find({a: {$gte: 1}}).finish();
+assert.commandWorked(res);
+assertHasRejectedPlans(res);
- res = coll.explain("executionStats").find({a: {$gte: 1}}).finish();
- assert.commandWorked(res);
- assertHasRejectedPlans(res);
+res = coll.explain("executionStats").find({a: {$gte: 1}}).finish();
+assert.commandWorked(res);
+assertHasRejectedPlans(res);
}());
diff --git a/jstests/core/explain_multikey.js b/jstests/core/explain_multikey.js
index 91763555ffb..1ec20bb4552 100644
--- a/jstests/core/explain_multikey.js
+++ b/jstests/core/explain_multikey.js
@@ -4,79 +4,79 @@
// be the case on all shards.
// @tags: [assumes_unsharded_collection]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js");
+load("jstests/libs/analyze_plan.js");
- var coll = db.explain_multikey;
- var keyPattern = {
- a: 1,
- "b.c": 1,
- "b.d": 1,
- };
+var coll = db.explain_multikey;
+var keyPattern = {
+ a: 1,
+ "b.c": 1,
+ "b.d": 1,
+};
- /**
- * Creates an index with a key pattern of 'keyPattern' on a collection containing a single
- * document and runs the specified command under explain.
- *
- * @param {Object} testOptions
- * @param {Object} testOptions.docToInsert - The document to insert into the collection.
- * @param {Object} testOptions.commandObj - The operation to run "explain" on.
- * @param {string} testOptions.stage - The plan summary name of the winning plan.
- *
- * @returns {Object} The "queryPlanner" information of the stage with the specified plan summary
- * name.
- */
- function createIndexAndRunExplain(testOptions) {
- coll.drop();
+/**
+ * Creates an index with a key pattern of 'keyPattern' on a collection containing a single
+ * document and runs the specified command under explain.
+ *
+ * @param {Object} testOptions
+ * @param {Object} testOptions.docToInsert - The document to insert into the collection.
+ * @param {Object} testOptions.commandObj - The operation to run "explain" on.
+ * @param {string} testOptions.stage - The plan summary name of the winning plan.
+ *
+ * @returns {Object} The "queryPlanner" information of the stage with the specified plan summary
+ * name.
+ */
+function createIndexAndRunExplain(testOptions) {
+ coll.drop();
- assert.commandWorked(coll.createIndex(keyPattern));
- assert.writeOK(coll.insert(testOptions.docToInsert));
+ assert.commandWorked(coll.createIndex(keyPattern));
+ assert.writeOK(coll.insert(testOptions.docToInsert));
- var explain = db.runCommand({explain: testOptions.commandObj});
- assert.commandWorked(explain);
+ var explain = db.runCommand({explain: testOptions.commandObj});
+ assert.commandWorked(explain);
- assert(planHasStage(db, explain.queryPlanner.winningPlan, testOptions.stage),
- "expected stage to be present: " + tojson(explain));
- return getPlanStage(explain.queryPlanner.winningPlan, testOptions.stage);
- }
+ assert(planHasStage(db, explain.queryPlanner.winningPlan, testOptions.stage),
+ "expected stage to be present: " + tojson(explain));
+ return getPlanStage(explain.queryPlanner.winningPlan, testOptions.stage);
+}
- // Calls createIndexAndRunExplain() twice: once with a document that causes the created index to
- // be multikey, and again with a document that doesn't cause the created index to be multikey.
- function verifyMultikeyInfoInExplainOutput(testOptions) {
- // Insert a document that should cause the index to be multikey.
- testOptions.docToInsert = {
- a: 1,
- b: [{c: ["w", "x"], d: 3}, {c: ["y", "z"], d: 4}],
- };
- var stage = createIndexAndRunExplain(testOptions);
+// Calls createIndexAndRunExplain() twice: once with a document that causes the created index to
+// be multikey, and again with a document that doesn't cause the created index to be multikey.
+function verifyMultikeyInfoInExplainOutput(testOptions) {
+ // Insert a document that should cause the index to be multikey.
+ testOptions.docToInsert = {
+ a: 1,
+ b: [{c: ["w", "x"], d: 3}, {c: ["y", "z"], d: 4}],
+ };
+ var stage = createIndexAndRunExplain(testOptions);
- assert.eq(true, stage.isMultiKey, "expected index to be multikey: " + tojson(stage));
- assert.eq({a: [], "b.c": ["b", "b.c"], "b.d": ["b"]}, stage.multiKeyPaths, tojson(stage));
+ assert.eq(true, stage.isMultiKey, "expected index to be multikey: " + tojson(stage));
+ assert.eq({a: [], "b.c": ["b", "b.c"], "b.d": ["b"]}, stage.multiKeyPaths, tojson(stage));
- // Drop the collection and insert a document that shouldn't cause the index to be multikey.
- testOptions.docToInsert = {
- a: 1,
- b: {c: "w", d: 4},
- };
- stage = createIndexAndRunExplain(testOptions);
+ // Drop the collection and insert a document that shouldn't cause the index to be multikey.
+ testOptions.docToInsert = {
+ a: 1,
+ b: {c: "w", d: 4},
+ };
+ stage = createIndexAndRunExplain(testOptions);
- assert.eq(false, stage.isMultiKey, "expected index not to be multikey: " + tojson(stage));
- assert.eq({a: [], "b.c": [], "b.d": []}, stage.multiKeyPaths, tojson(stage));
- }
+ assert.eq(false, stage.isMultiKey, "expected index not to be multikey: " + tojson(stage));
+ assert.eq({a: [], "b.c": [], "b.d": []}, stage.multiKeyPaths, tojson(stage));
+}
- verifyMultikeyInfoInExplainOutput({
- commandObj: {find: coll.getName(), hint: keyPattern},
- stage: "IXSCAN",
- });
+verifyMultikeyInfoInExplainOutput({
+ commandObj: {find: coll.getName(), hint: keyPattern},
+ stage: "IXSCAN",
+});
- verifyMultikeyInfoInExplainOutput({
- commandObj: {count: coll.getName(), hint: keyPattern},
- stage: "COUNT_SCAN",
- });
+verifyMultikeyInfoInExplainOutput({
+ commandObj: {count: coll.getName(), hint: keyPattern},
+ stage: "COUNT_SCAN",
+});
- verifyMultikeyInfoInExplainOutput({
- commandObj: {distinct: coll.getName(), key: "a"},
- stage: "DISTINCT_SCAN",
- });
+verifyMultikeyInfoInExplainOutput({
+ commandObj: {distinct: coll.getName(), key: "a"},
+ stage: "DISTINCT_SCAN",
+});
})();
diff --git a/jstests/core/explain_sample.js b/jstests/core/explain_sample.js
index 8bc7b53906f..bb8ea6d54ef 100644
--- a/jstests/core/explain_sample.js
+++ b/jstests/core/explain_sample.js
@@ -4,39 +4,39 @@
* @tags: [requires_wiredtiger]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js");
+load("jstests/libs/analyze_plan.js");
- // Although this test is tagged with 'requires_wiredtiger', this is not sufficient for ensuring
- // that the parallel suite runs this test only on WT configurations.
- if (jsTest.options().storageEngine && jsTest.options().storageEngine !== "wiredTiger") {
- jsTest.log("Skipping test on non-WT storage engine: " + jsTest.options().storageEngine);
- return;
- }
+// Although this test is tagged with 'requires_wiredtiger', this is not sufficient for ensuring
+// that the parallel suite runs this test only on WT configurations.
+if (jsTest.options().storageEngine && jsTest.options().storageEngine !== "wiredTiger") {
+ jsTest.log("Skipping test on non-WT storage engine: " + jsTest.options().storageEngine);
+ return;
+}
- const coll = db.explain_sample;
- coll.drop();
+const coll = db.explain_sample;
+coll.drop();
- let docsToInsert = [];
- for (let i = 0; i < 1000; ++i) {
- docsToInsert.push({_id: i});
- }
- assert.commandWorked(coll.insert(docsToInsert));
+let docsToInsert = [];
+for (let i = 0; i < 1000; ++i) {
+ docsToInsert.push({_id: i});
+}
+assert.commandWorked(coll.insert(docsToInsert));
- // Verify that explain reports execution stats for the MULTI_ITERATOR stage. This is designed to
- // reproduce SERVER-35973.
- const explain =
- assert.commandWorked(coll.explain("allPlansExecution").aggregate([{$sample: {size: 10}}]));
- const multiIteratorStages = getAggPlanStages(explain, "MULTI_ITERATOR");
- assert.gt(multiIteratorStages.length, 0, tojson(explain));
- assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.nReturned, 0),
- 0,
- tojson(multiIteratorStages));
- assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.advanced, 0),
- 0,
- tojson(multiIteratorStages));
- assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.works, 0),
- 0,
- tojson(multiIteratorStages));
+// Verify that explain reports execution stats for the MULTI_ITERATOR stage. This is designed to
+// reproduce SERVER-35973.
+const explain =
+ assert.commandWorked(coll.explain("allPlansExecution").aggregate([{$sample: {size: 10}}]));
+const multiIteratorStages = getAggPlanStages(explain, "MULTI_ITERATOR");
+assert.gt(multiIteratorStages.length, 0, tojson(explain));
+assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.nReturned, 0),
+ 0,
+ tojson(multiIteratorStages));
+assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.advanced, 0),
+ 0,
+ tojson(multiIteratorStages));
+assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.works, 0),
+ 0,
+ tojson(multiIteratorStages));
}());
diff --git a/jstests/core/explain_uuid.js b/jstests/core/explain_uuid.js
index be5e9e01adb..bee56d4b2a9 100644
--- a/jstests/core/explain_uuid.js
+++ b/jstests/core/explain_uuid.js
@@ -3,54 +3,53 @@
* cleanly.
*/
(function() {
- "use strict";
-
- // Use our own database so that we're guaranteed the only collection is this one.
- const explainDB = db.getSiblingDB("explain_uuid_db");
-
- assert.commandWorked(explainDB.dropDatabase());
-
- const coll = explainDB.explain_uuid;
- assert.commandWorked(coll.insert({a: 1}));
-
- const collInfos = explainDB.getCollectionInfos({name: coll.getName()});
- assert.eq(collInfos.length, 1, collInfos);
- const uuid = collInfos[0].info.uuid;
-
- // Run a find explain looking up by UUID.
- assert.commandFailedWithCode(explainDB.runCommand({explain: {find: uuid}}),
- ErrorCodes.InvalidNamespace);
-
- // Do similar for other commands.
- assert.commandFailedWithCode(explainDB.runCommand({explain: {aggregate: uuid, cursor: {}}}),
- ErrorCodes.TypeMismatch);
-
- assert.commandFailedWithCode(explainDB.runCommand({explain: {count: uuid}}),
- ErrorCodes.InvalidNamespace);
-
- assert.commandFailedWithCode(explainDB.runCommand({explain: {distinct: uuid, key: "x"}}),
- ErrorCodes.InvalidNamespace);
-
- // When auth is enabled, running findAndModify with an invalid namespace will produce a special
- // error during the auth check, rather than the generic 'InvalidNamespace' error.
- const expectedCode = TestData.auth ? 17137 : ErrorCodes.InvalidNamespace;
- assert.commandFailedWithCode(
- explainDB.runCommand({explain: {findAndModify: uuid, query: {a: 1}, remove: true}}),
- expectedCode);
-
- assert.commandFailedWithCode(
- explainDB.runCommand({explain: {delete: uuid, deletes: [{q: {}, limit: 1}]}}),
- ErrorCodes.BadValue);
-
- assert.commandFailedWithCode(explainDB.runCommand({
- explain: {
- update: uuid,
- updates: [{
- q: {a: 1},
- u: {$set: {b: 1}},
- }]
- }
- }),
- ErrorCodes.BadValue);
-
+"use strict";
+
+// Use our own database so that we're guaranteed the only collection is this one.
+const explainDB = db.getSiblingDB("explain_uuid_db");
+
+assert.commandWorked(explainDB.dropDatabase());
+
+const coll = explainDB.explain_uuid;
+assert.commandWorked(coll.insert({a: 1}));
+
+const collInfos = explainDB.getCollectionInfos({name: coll.getName()});
+assert.eq(collInfos.length, 1, collInfos);
+const uuid = collInfos[0].info.uuid;
+
+// Run a find explain looking up by UUID.
+assert.commandFailedWithCode(explainDB.runCommand({explain: {find: uuid}}),
+ ErrorCodes.InvalidNamespace);
+
+// Do similar for other commands.
+assert.commandFailedWithCode(explainDB.runCommand({explain: {aggregate: uuid, cursor: {}}}),
+ ErrorCodes.TypeMismatch);
+
+assert.commandFailedWithCode(explainDB.runCommand({explain: {count: uuid}}),
+ ErrorCodes.InvalidNamespace);
+
+assert.commandFailedWithCode(explainDB.runCommand({explain: {distinct: uuid, key: "x"}}),
+ ErrorCodes.InvalidNamespace);
+
+// When auth is enabled, running findAndModify with an invalid namespace will produce a special
+// error during the auth check, rather than the generic 'InvalidNamespace' error.
+const expectedCode = TestData.auth ? 17137 : ErrorCodes.InvalidNamespace;
+assert.commandFailedWithCode(
+ explainDB.runCommand({explain: {findAndModify: uuid, query: {a: 1}, remove: true}}),
+ expectedCode);
+
+assert.commandFailedWithCode(
+ explainDB.runCommand({explain: {delete: uuid, deletes: [{q: {}, limit: 1}]}}),
+ ErrorCodes.BadValue);
+
+assert.commandFailedWithCode(explainDB.runCommand({
+ explain: {
+ update: uuid,
+ updates: [{
+ q: {a: 1},
+ u: {$set: {b: 1}},
+ }]
+ }
+}),
+ ErrorCodes.BadValue);
})();
diff --git a/jstests/core/explain_writecmd_nonexistent_collection.js b/jstests/core/explain_writecmd_nonexistent_collection.js
index 2d3080357b5..2496f4b63a0 100644
--- a/jstests/core/explain_writecmd_nonexistent_collection.js
+++ b/jstests/core/explain_writecmd_nonexistent_collection.js
@@ -3,38 +3,38 @@
// @tags: [requires_non_retryable_writes, requires_fastcount,
// assumes_no_implicit_collection_creation_after_drop]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js");
+load("jstests/libs/analyze_plan.js");
- function assertCollectionDoesNotExist(collName) {
- const collectionList = db.getCollectionInfos({name: collName});
- assert.eq(0, collectionList.length, collectionList);
- }
+function assertCollectionDoesNotExist(collName) {
+ const collectionList = db.getCollectionInfos({name: collName});
+ assert.eq(0, collectionList.length, collectionList);
+}
- const collName = "explain_delete_nonexistent_collection";
- const coll = db[collName];
- coll.drop();
+const collName = "explain_delete_nonexistent_collection";
+const coll = db[collName];
+coll.drop();
- // Explain of delete against a non-existent collection returns an EOF plan.
- let explain = assert.commandWorked(
- db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}}));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"), explain);
- assert(!planHasStage(db, explain.queryPlanner.winningPlan, "DELETE"), explain);
+// Explain of delete against a non-existent collection returns an EOF plan.
+let explain = assert.commandWorked(
+ db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}}));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"), explain);
+assert(!planHasStage(db, explain.queryPlanner.winningPlan, "DELETE"), explain);
- assertCollectionDoesNotExist(collName);
+assertCollectionDoesNotExist(collName);
- // Explain of an update with upsert:false returns an EOF plan.
- explain = assert.commandWorked(db.runCommand(
- {explain: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 1}}, upsert: false}]}}));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"), explain);
- assert(!planHasStage(db, explain.queryPlanner.winningPlan, "UPDATE"), explain);
- assertCollectionDoesNotExist(collName);
+// Explain of an update with upsert:false returns an EOF plan.
+explain = assert.commandWorked(db.runCommand(
+ {explain: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 1}}, upsert: false}]}}));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"), explain);
+assert(!planHasStage(db, explain.queryPlanner.winningPlan, "UPDATE"), explain);
+assertCollectionDoesNotExist(collName);
- // Explain of an update with upsert:true returns an EOF plan, and does not create a collection.
- explain = assert.commandWorked(db.runCommand(
- {explain: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 1}}, upsert: true}]}}));
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"), explain);
- assert(!planHasStage(db, explain.queryPlanner.winningPlan, "UPDATE"), explain);
- assertCollectionDoesNotExist(collName);
+// Explain of an update with upsert:true returns an EOF plan, and does not create a collection.
+explain = assert.commandWorked(db.runCommand(
+ {explain: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 1}}, upsert: true}]}}));
+assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"), explain);
+assert(!planHasStage(db, explain.queryPlanner.winningPlan, "UPDATE"), explain);
+assertCollectionDoesNotExist(collName);
}());
diff --git a/jstests/core/expr.js b/jstests/core/expr.js
index f0a463a22fb..78ef8b87f0c 100644
--- a/jstests/core/expr.js
+++ b/jstests/core/expr.js
@@ -6,317 +6,316 @@
// Tests for $expr in the CRUD commands.
(function() {
- "use strict";
-
- const coll = db.expr;
-
- const isMaster = db.runCommand("ismaster");
- assert.commandWorked(isMaster);
- const isMongos = (isMaster.msg === "isdbgrid");
-
- //
- // $expr in aggregate.
- //
-
- coll.drop();
- assert.writeOK(coll.insert({a: 0}));
- assert.eq(1, coll.aggregate([{$match: {$expr: {$eq: ["$a", 0]}}}]).itcount());
- assert.throws(function() {
- coll.aggregate([{$match: {$expr: {$eq: ["$a", "$$unbound"]}}}]);
- });
- assert.throws(function() {
- coll.aggregate([{$match: {$expr: {$divide: [1, "$a"]}}}]);
- });
-
- //
- // $expr in count.
- //
-
- coll.drop();
- assert.writeOK(coll.insert({a: 0}));
- assert.eq(1, coll.find({$expr: {$eq: ["$a", 0]}}).count());
- assert.throws(function() {
- coll.find({$expr: {$eq: ["$a", "$$unbound"]}}).count();
- });
- assert.throws(function() {
- coll.find({$expr: {$divide: [1, "$a"]}}).count();
- });
-
- //
- // $expr in distinct.
- //
-
- coll.drop();
- assert.writeOK(coll.insert({a: 0}));
- assert.eq(1, coll.distinct("a", {$expr: {$eq: ["$a", 0]}}).length);
- assert.throws(function() {
- coll.distinct("a", {$expr: {$eq: ["$a", "$$unbound"]}});
- });
- assert.throws(function() {
- coll.distinct("a", {$expr: {$divide: [1, "$a"]}});
- });
-
- //
- // $expr in find.
- //
-
- // $expr is allowed in query.
- coll.drop();
- assert.writeOK(coll.insert({a: 0}));
- assert.eq(1, coll.find({$expr: {$eq: ["$a", 0]}}).itcount());
-
- // $expr with time zone expression across getMore (SERVER-31664).
- coll.drop();
- assert.writeOK(coll.insert({a: ISODate("2017-10-01T22:00:00")}));
-
- let res = assert.commandWorked(db.runCommand({
- find: coll.getName(),
- filter: {$expr: {$eq: [1, {$dayOfMonth: {date: "$a", timezone: "America/New_York"}}]}},
- batchSize: 0
- }));
- assert.eq(0, res.cursor.firstBatch.length);
-
- let cursorId = res.cursor.id;
- res = assert.commandWorked(db.runCommand({getMore: cursorId, collection: coll.getName()}));
- assert.eq(1, res.cursor.nextBatch.length);
-
- // $expr with unbound variable throws.
- assert.throws(function() {
- coll.find({$expr: {$eq: ["$a", "$$unbound"]}}).itcount();
- });
-
- // $and with $expr child containing an invalid expression throws.
- assert.throws(function() {
- coll.find({$and: [{a: 0}, {$expr: {$anyElementTrue: undefined}}]}).itcount();
- });
-
- // $or with $expr child containing an invalid expression throws.
- assert.throws(function() {
- coll.find({$or: [{a: 0}, {$expr: {$anyElementTrue: undefined}}]}).itcount();
- });
-
- // $nor with $expr child containing an invalid expression throws.
- assert.throws(function() {
- coll.find({$nor: [{a: 0}, {$expr: {$anyElementTrue: undefined}}]}).itcount();
- });
-
- // $expr with division by zero throws.
- assert.throws(function() {
- coll.find({$expr: {$divide: [1, "$a"]}}).itcount();
- });
-
- // $expr is allowed in find with explain.
- assert.commandWorked(coll.find({$expr: {$eq: ["$a", 0]}}).explain());
-
- // $expr with unbound variable in find with explain throws.
- assert.throws(function() {
- coll.find({$expr: {$eq: ["$a", "$$unbound"]}}).explain();
- });
-
- // $expr with division by zero in find with explain with executionStats throws.
- assert.throws(function() {
- coll.find({$expr: {$divide: [1, "$a"]}}).explain("executionStats");
- });
-
- // $expr is not allowed in $elemMatch projection.
- coll.drop();
- assert.writeOK(coll.insert({a: [{b: 5}]}));
- assert.throws(function() {
- coll.find({}, {a: {$elemMatch: {$expr: {$eq: ["$b", 5]}}}}).itcount();
- });
-
- //
- // $expr in findAndModify.
- //
-
- // $expr is allowed in the query when upsert=false.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: 0}));
- assert.eq({_id: 0, a: 0, b: 6},
- coll.findAndModify(
- {query: {_id: 0, $expr: {$eq: ["$a", 0]}}, update: {$set: {b: 6}}, new: true}));
-
- // $expr with unbound variable throws.
- assert.throws(function() {
- coll.findAndModify(
- {query: {_id: 0, $expr: {$eq: ["$a", "$$unbound"]}}, update: {$set: {b: 6}}});
- });
-
- // $expr with division by zero throws.
- assert.throws(function() {
- coll.findAndModify({query: {_id: 0, $expr: {$divide: [1, "$a"]}}, update: {$set: {b: 6}}});
- });
-
- // $expr is not allowed in the query when upsert=true.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: 0}));
- assert.throws(function() {
- coll.findAndModify(
- {query: {_id: 0, $expr: {$eq: ["$a", 0]}}, update: {$set: {b: 6}}, upsert: true});
- });
-
- // $expr is not allowed in $pull filter.
+"use strict";
+
+const coll = db.expr;
+
+const isMaster = db.runCommand("ismaster");
+assert.commandWorked(isMaster);
+const isMongos = (isMaster.msg === "isdbgrid");
+
+//
+// $expr in aggregate.
+//
+
+coll.drop();
+assert.writeOK(coll.insert({a: 0}));
+assert.eq(1, coll.aggregate([{$match: {$expr: {$eq: ["$a", 0]}}}]).itcount());
+assert.throws(function() {
+ coll.aggregate([{$match: {$expr: {$eq: ["$a", "$$unbound"]}}}]);
+});
+assert.throws(function() {
+ coll.aggregate([{$match: {$expr: {$divide: [1, "$a"]}}}]);
+});
+
+//
+// $expr in count.
+//
+
+coll.drop();
+assert.writeOK(coll.insert({a: 0}));
+assert.eq(1, coll.find({$expr: {$eq: ["$a", 0]}}).count());
+assert.throws(function() {
+ coll.find({$expr: {$eq: ["$a", "$$unbound"]}}).count();
+});
+assert.throws(function() {
+ coll.find({$expr: {$divide: [1, "$a"]}}).count();
+});
+
+//
+// $expr in distinct.
+//
+
+coll.drop();
+assert.writeOK(coll.insert({a: 0}));
+assert.eq(1, coll.distinct("a", {$expr: {$eq: ["$a", 0]}}).length);
+assert.throws(function() {
+ coll.distinct("a", {$expr: {$eq: ["$a", "$$unbound"]}});
+});
+assert.throws(function() {
+ coll.distinct("a", {$expr: {$divide: [1, "$a"]}});
+});
+
+//
+// $expr in find.
+//
+
+// $expr is allowed in query.
+coll.drop();
+assert.writeOK(coll.insert({a: 0}));
+assert.eq(1, coll.find({$expr: {$eq: ["$a", 0]}}).itcount());
+
+// $expr with time zone expression across getMore (SERVER-31664).
+coll.drop();
+assert.writeOK(coll.insert({a: ISODate("2017-10-01T22:00:00")}));
+
+let res = assert.commandWorked(db.runCommand({
+ find: coll.getName(),
+ filter: {$expr: {$eq: [1, {$dayOfMonth: {date: "$a", timezone: "America/New_York"}}]}},
+ batchSize: 0
+}));
+assert.eq(0, res.cursor.firstBatch.length);
+
+let cursorId = res.cursor.id;
+res = assert.commandWorked(db.runCommand({getMore: cursorId, collection: coll.getName()}));
+assert.eq(1, res.cursor.nextBatch.length);
+
+// $expr with unbound variable throws.
+assert.throws(function() {
+ coll.find({$expr: {$eq: ["$a", "$$unbound"]}}).itcount();
+});
+
+// $and with $expr child containing an invalid expression throws.
+assert.throws(function() {
+ coll.find({$and: [{a: 0}, {$expr: {$anyElementTrue: undefined}}]}).itcount();
+});
+
+// $or with $expr child containing an invalid expression throws.
+assert.throws(function() {
+ coll.find({$or: [{a: 0}, {$expr: {$anyElementTrue: undefined}}]}).itcount();
+});
+
+// $nor with $expr child containing an invalid expression throws.
+assert.throws(function() {
+ coll.find({$nor: [{a: 0}, {$expr: {$anyElementTrue: undefined}}]}).itcount();
+});
+
+// $expr with division by zero throws.
+assert.throws(function() {
+ coll.find({$expr: {$divide: [1, "$a"]}}).itcount();
+});
+
+// $expr is allowed in find with explain.
+assert.commandWorked(coll.find({$expr: {$eq: ["$a", 0]}}).explain());
+
+// $expr with unbound variable in find with explain throws.
+assert.throws(function() {
+ coll.find({$expr: {$eq: ["$a", "$$unbound"]}}).explain();
+});
+
+// $expr with division by zero in find with explain with executionStats throws.
+assert.throws(function() {
+ coll.find({$expr: {$divide: [1, "$a"]}}).explain("executionStats");
+});
+
+// $expr is not allowed in $elemMatch projection.
+coll.drop();
+assert.writeOK(coll.insert({a: [{b: 5}]}));
+assert.throws(function() {
+ coll.find({}, {a: {$elemMatch: {$expr: {$eq: ["$b", 5]}}}}).itcount();
+});
+
+//
+// $expr in findAndModify.
+//
+
+// $expr is allowed in the query when upsert=false.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: 0}));
+assert.eq({_id: 0, a: 0, b: 6},
+ coll.findAndModify(
+ {query: {_id: 0, $expr: {$eq: ["$a", 0]}}, update: {$set: {b: 6}}, new: true}));
+
+// $expr with unbound variable throws.
+assert.throws(function() {
+ coll.findAndModify(
+ {query: {_id: 0, $expr: {$eq: ["$a", "$$unbound"]}}, update: {$set: {b: 6}}});
+});
+
+// $expr with division by zero throws.
+assert.throws(function() {
+ coll.findAndModify({query: {_id: 0, $expr: {$divide: [1, "$a"]}}, update: {$set: {b: 6}}});
+});
+
+// $expr is not allowed in the query when upsert=true.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: 0}));
+assert.throws(function() {
+ coll.findAndModify(
+ {query: {_id: 0, $expr: {$eq: ["$a", 0]}}, update: {$set: {b: 6}}, upsert: true});
+});
+
+// $expr is not allowed in $pull filter.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]}));
+assert.throws(function() {
+ coll.findAndModify({query: {_id: 0}, update: {$pull: {a: {$expr: {$eq: ["$b", 5]}}}}});
+});
+
+// $expr is not allowed in arrayFilters.
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]}));
assert.throws(function() {
- coll.findAndModify({query: {_id: 0}, update: {$pull: {a: {$expr: {$eq: ["$b", 5]}}}}});
- });
-
- // $expr is not allowed in arrayFilters.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]}));
- assert.throws(function() {
- coll.findAndModify({
- query: {_id: 0},
- update: {$set: {"a.$[i].b": 6}},
- arrayFilters: [{"i.b": 5, $expr: {$eq: ["$i.b", 5]}}]
- });
+ coll.findAndModify({
+ query: {_id: 0},
+ update: {$set: {"a.$[i].b": 6}},
+ arrayFilters: [{"i.b": 5, $expr: {$eq: ["$i.b", 5]}}]
});
+ });
+}
+
+//
+// $expr in the $geoNear stage.
+//
+
+coll.drop();
+assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, a: 0}));
+assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
+assert.eq(1,
+ coll.aggregate({
+ $geoNear: {
+ near: {type: "Point", coordinates: [0, 0]},
+ distanceField: "dist",
+ spherical: true,
+ query: {$expr: {$eq: ["$a", 0]}}
+ }
+ })
+ .toArray()
+ .length);
+assert.throws(() => coll.aggregate({
+ $geoNear: {
+ near: {type: "Point", coordinates: [0, 0]},
+ distanceField: "dist",
+ spherical: true,
+ query: {$expr: {$eq: ["$a", "$$unbound"]}}
}
-
- //
- // $expr in the $geoNear stage.
- //
-
- coll.drop();
- assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, a: 0}));
- assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
- assert.eq(1,
- coll.aggregate({
- $geoNear: {
- near: {type: "Point", coordinates: [0, 0]},
- distanceField: "dist",
- spherical: true,
- query: {$expr: {$eq: ["$a", 0]}}
- }
- })
- .toArray()
- .length);
- assert.throws(() => coll.aggregate({
- $geoNear: {
- near: {type: "Point", coordinates: [0, 0]},
- distanceField: "dist",
- spherical: true,
- query: {$expr: {$eq: ["$a", "$$unbound"]}}
- }
- }));
- assert.throws(() => coll.aggregate({
- $geoNear: {
- near: {type: "Point", coordinates: [0, 0]},
- distanceField: "dist",
- spherical: true,
- query: {$expr: {$divide: [1, "$a"]}}
- }
- }));
-
- //
- // $expr in mapReduce.
- //
-
- coll.drop();
- assert.writeOK(coll.insert({a: 0}));
- let mapReduceOut = coll.mapReduce(
+}));
+assert.throws(() => coll.aggregate({
+ $geoNear: {
+ near: {type: "Point", coordinates: [0, 0]},
+ distanceField: "dist",
+ spherical: true,
+ query: {$expr: {$divide: [1, "$a"]}}
+ }
+}));
+
+//
+// $expr in mapReduce.
+//
+
+coll.drop();
+assert.writeOK(coll.insert({a: 0}));
+let mapReduceOut = coll.mapReduce(
+ function() {
+ emit(this.a, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {$expr: {$eq: ["$a", 0]}}});
+assert.commandWorked(mapReduceOut);
+assert.eq(mapReduceOut.results.length, 1, tojson(mapReduceOut));
+assert.throws(function() {
+ coll.mapReduce(
function() {
emit(this.a, 1);
},
function(key, values) {
return Array.sum(values);
},
- {out: {inline: 1}, query: {$expr: {$eq: ["$a", 0]}}});
- assert.commandWorked(mapReduceOut);
- assert.eq(mapReduceOut.results.length, 1, tojson(mapReduceOut));
- assert.throws(function() {
- coll.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {$expr: {$eq: ["$a", "$$unbound"]}}});
- });
- assert.throws(function() {
- coll.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {$expr: {$divide: [1, "$a"]}}});
- });
-
- //
- // $expr in remove.
- //
-
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: 0}));
- let writeRes = coll.remove({_id: 0, $expr: {$eq: ["$a", 0]}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
- assert.writeError(coll.remove({_id: 0, $expr: {$eq: ["$a", "$$unbound"]}}));
- assert.writeOK(coll.insert({_id: 0, a: 0}));
- assert.writeError(coll.remove({_id: 0, $expr: {$divide: [1, "$a"]}}));
-
- // Any writes preceding the write that fails to parse are executed.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0}));
- assert.writeOK(coll.insert({_id: 1}));
- writeRes = db.runCommand({
- delete: coll.getName(),
- deletes: [{q: {_id: 0}, limit: 1}, {q: {$expr: "$$unbound"}, limit: 1}]
- });
- assert.commandWorkedIgnoringWriteErrors(writeRes);
- assert.eq(writeRes.writeErrors[0].code, 17276, tojson(writeRes));
- assert.eq(writeRes.n, 1, tojson(writeRes));
-
- //
- // $expr in update.
- //
-
- // $expr is allowed in the query when upsert=false.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: 0}));
- assert.writeOK(coll.update({_id: 0, $expr: {$eq: ["$a", 0]}}, {$set: {b: 6}}));
- assert.eq({_id: 0, a: 0, b: 6}, coll.findOne({_id: 0}));
-
- // $expr with unbound variable fails.
- assert.writeError(coll.update({_id: 0, $expr: {$eq: ["$a", "$$unbound"]}}, {$set: {b: 6}}));
-
- // $expr with division by zero fails.
- assert.writeError(coll.update({_id: 0, $expr: {$divide: [1, "$a"]}}, {$set: {b: 6}}));
-
- // $expr is not allowed in the query when upsert=true.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: 5}));
- assert.writeError(
- coll.update({_id: 0, $expr: {$eq: ["$a", 5]}}, {$set: {b: 6}}, {upsert: true}));
-
- // $expr is not allowed in $pull filter.
+ {out: {inline: 1}, query: {$expr: {$eq: ["$a", "$$unbound"]}}});
+});
+assert.throws(function() {
+ coll.mapReduce(
+ function() {
+ emit(this.a, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {$expr: {$divide: [1, "$a"]}}});
+});
+
+//
+// $expr in remove.
+//
+
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: 0}));
+let writeRes = coll.remove({_id: 0, $expr: {$eq: ["$a", 0]}});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+assert.writeError(coll.remove({_id: 0, $expr: {$eq: ["$a", "$$unbound"]}}));
+assert.writeOK(coll.insert({_id: 0, a: 0}));
+assert.writeError(coll.remove({_id: 0, $expr: {$divide: [1, "$a"]}}));
+
+// Any writes preceding the write that fails to parse are executed.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0}));
+assert.writeOK(coll.insert({_id: 1}));
+writeRes = db.runCommand({
+ delete: coll.getName(),
+ deletes: [{q: {_id: 0}, limit: 1}, {q: {$expr: "$$unbound"}, limit: 1}]
+});
+assert.commandWorkedIgnoringWriteErrors(writeRes);
+assert.eq(writeRes.writeErrors[0].code, 17276, tojson(writeRes));
+assert.eq(writeRes.n, 1, tojson(writeRes));
+
+//
+// $expr in update.
+//
+
+// $expr is allowed in the query when upsert=false.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: 0}));
+assert.writeOK(coll.update({_id: 0, $expr: {$eq: ["$a", 0]}}, {$set: {b: 6}}));
+assert.eq({_id: 0, a: 0, b: 6}, coll.findOne({_id: 0}));
+
+// $expr with unbound variable fails.
+assert.writeError(coll.update({_id: 0, $expr: {$eq: ["$a", "$$unbound"]}}, {$set: {b: 6}}));
+
+// $expr with division by zero fails.
+assert.writeError(coll.update({_id: 0, $expr: {$divide: [1, "$a"]}}, {$set: {b: 6}}));
+
+// $expr is not allowed in the query when upsert=true.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: 5}));
+assert.writeError(coll.update({_id: 0, $expr: {$eq: ["$a", 5]}}, {$set: {b: 6}}, {upsert: true}));
+
+// $expr is not allowed in $pull filter.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]}));
+assert.writeError(coll.update({_id: 0}, {$pull: {a: {$expr: {$eq: ["$b", 5]}}}}));
+
+// $expr is not allowed in arrayFilters.
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]}));
- assert.writeError(coll.update({_id: 0}, {$pull: {a: {$expr: {$eq: ["$b", 5]}}}}));
-
- // $expr is not allowed in arrayFilters.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]}));
- assert.writeError(coll.update({_id: 0},
- {$set: {"a.$[i].b": 6}},
- {arrayFilters: [{"i.b": 5, $expr: {$eq: ["$i.b", 5]}}]}));
- }
-
- // Any writes preceding the write that fails to parse are executed.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0}));
- assert.writeOK(coll.insert({_id: 1}));
- writeRes = db.runCommand({
- update: coll.getName(),
- updates: [{q: {_id: 0}, u: {$set: {b: 6}}}, {q: {$expr: "$$unbound"}, u: {$set: {b: 6}}}]
- });
- assert.commandWorkedIgnoringWriteErrors(writeRes);
- assert.eq(writeRes.writeErrors[0].code, 17276, tojson(writeRes));
- assert.eq(writeRes.n, 1, tojson(writeRes));
+ assert.writeError(coll.update({_id: 0},
+ {$set: {"a.$[i].b": 6}},
+ {arrayFilters: [{"i.b": 5, $expr: {$eq: ["$i.b", 5]}}]}));
+}
+
+// Any writes preceding the write that fails to parse are executed.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0}));
+assert.writeOK(coll.insert({_id: 1}));
+writeRes = db.runCommand({
+ update: coll.getName(),
+ updates: [{q: {_id: 0}, u: {$set: {b: 6}}}, {q: {$expr: "$$unbound"}, u: {$set: {b: 6}}}]
+});
+assert.commandWorkedIgnoringWriteErrors(writeRes);
+assert.eq(writeRes.writeErrors[0].code, 17276, tojson(writeRes));
+assert.eq(writeRes.n, 1, tojson(writeRes));
})();
diff --git a/jstests/core/expr_index_use.js b/jstests/core/expr_index_use.js
index 79fe6d87b86..d0eb55656b2 100644
--- a/jstests/core/expr_index_use.js
+++ b/jstests/core/expr_index_use.js
@@ -1,239 +1,242 @@
// Confirms expected index use when performing a match with a $expr statement.
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js");
-
- const coll = db.expr_index_use;
- coll.drop();
-
- assert.writeOK(coll.insert({a: {b: 1}}));
- assert.writeOK(coll.insert({a: {b: [1]}}));
- assert.writeOK(coll.insert({a: [{b: 1}]}));
- assert.writeOK(coll.insert({a: [{b: [1]}]}));
- assert.commandWorked(coll.createIndex({"a.b": 1}));
-
- assert.writeOK(coll.insert({c: {d: 1}}));
- assert.commandWorked(coll.createIndex({"c.d": 1}));
-
- assert.writeOK(coll.insert({e: [{f: 1}]}));
- assert.commandWorked(coll.createIndex({"e.f": 1}));
-
- assert.writeOK(coll.insert({g: {h: [1]}}));
- assert.commandWorked(coll.createIndex({"g.h": 1}));
-
- assert.writeOK(coll.insert({i: 1, j: [1]}));
- assert.commandWorked(coll.createIndex({i: 1, j: 1}));
-
- assert.writeOK(coll.insert({k: 1, l: "abc"}));
- assert.commandWorked(coll.createIndex({k: 1, l: "text"}));
-
- assert.writeOK(coll.insert({x: 0}));
- assert.writeOK(coll.insert({x: 1, y: 1}));
- assert.writeOK(coll.insert({x: 2, y: 2}));
- assert.writeOK(coll.insert({x: 3, y: 10}));
- assert.writeOK(coll.insert({y: 20}));
- assert.commandWorked(coll.createIndex({x: 1, y: 1}));
-
- assert.writeOK(coll.insert({w: 123}));
- assert.writeOK(coll.insert({}));
- assert.writeOK(coll.insert({w: null}));
- assert.writeOK(coll.insert({w: undefined}));
- assert.writeOK(coll.insert({w: NaN}));
- assert.writeOK(coll.insert({w: "foo"}));
- assert.writeOK(coll.insert({w: "FOO"}));
- assert.writeOK(coll.insert({w: {z: 1}}));
- assert.writeOK(coll.insert({w: {z: 2}}));
- assert.commandWorked(coll.createIndex({w: 1}));
- assert.commandWorked(coll.createIndex({"w.z": 1}));
-
- /**
- * Executes the expression 'expr' as both a find and an aggregate. Then confirms
- * 'metricsToCheck', which is an object containing:
- * - nReturned: The number of documents the pipeline is expected to return.
- * - expectedIndex: Either an index specification object when index use is expected or
- * 'null' if a collection scan is expected.
- */
- function confirmExpectedExprExecution(expr, metricsToCheck, collation) {
- assert(metricsToCheck.hasOwnProperty("nReturned"),
- "metricsToCheck must contain an nReturned field");
-
- let aggOptions = {};
- if (collation) {
- aggOptions.collation = collation;
- }
-
- const pipeline = [{$match: {$expr: expr}}];
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+
+const coll = db.expr_index_use;
+coll.drop();
+
+assert.writeOK(coll.insert({a: {b: 1}}));
+assert.writeOK(coll.insert({a: {b: [1]}}));
+assert.writeOK(coll.insert({a: [{b: 1}]}));
+assert.writeOK(coll.insert({a: [{b: [1]}]}));
+assert.commandWorked(coll.createIndex({"a.b": 1}));
+
+assert.writeOK(coll.insert({c: {d: 1}}));
+assert.commandWorked(coll.createIndex({"c.d": 1}));
+
+assert.writeOK(coll.insert({e: [{f: 1}]}));
+assert.commandWorked(coll.createIndex({"e.f": 1}));
+
+assert.writeOK(coll.insert({g: {h: [1]}}));
+assert.commandWorked(coll.createIndex({"g.h": 1}));
+
+assert.writeOK(coll.insert({i: 1, j: [1]}));
+assert.commandWorked(coll.createIndex({i: 1, j: 1}));
+
+assert.writeOK(coll.insert({k: 1, l: "abc"}));
+assert.commandWorked(coll.createIndex({k: 1, l: "text"}));
+
+assert.writeOK(coll.insert({x: 0}));
+assert.writeOK(coll.insert({x: 1, y: 1}));
+assert.writeOK(coll.insert({x: 2, y: 2}));
+assert.writeOK(coll.insert({x: 3, y: 10}));
+assert.writeOK(coll.insert({y: 20}));
+assert.commandWorked(coll.createIndex({x: 1, y: 1}));
+
+assert.writeOK(coll.insert({w: 123}));
+assert.writeOK(coll.insert({}));
+assert.writeOK(coll.insert({w: null}));
+assert.writeOK(coll.insert({w: undefined}));
+assert.writeOK(coll.insert({w: NaN}));
+assert.writeOK(coll.insert({w: "foo"}));
+assert.writeOK(coll.insert({w: "FOO"}));
+assert.writeOK(coll.insert({w: {z: 1}}));
+assert.writeOK(coll.insert({w: {z: 2}}));
+assert.commandWorked(coll.createIndex({w: 1}));
+assert.commandWorked(coll.createIndex({"w.z": 1}));
+
+/**
+ * Executes the expression 'expr' as both a find and an aggregate. Then confirms
+ * 'metricsToCheck', which is an object containing:
+ * - nReturned: The number of documents the pipeline is expected to return.
+ * - expectedIndex: Either an index specification object when index use is expected or
+ * 'null' if a collection scan is expected.
+ */
+function confirmExpectedExprExecution(expr, metricsToCheck, collation) {
+ assert(metricsToCheck.hasOwnProperty("nReturned"),
+ "metricsToCheck must contain an nReturned field");
+
+ let aggOptions = {};
+ if (collation) {
+ aggOptions.collation = collation;
+ }
- // Verify that $expr returns the correct number of results when run inside the $match stage
- // of an aggregate.
- assert.eq(metricsToCheck.nReturned, coll.aggregate(pipeline, aggOptions).itcount());
+ const pipeline = [{$match: {$expr: expr}}];
- // Verify that $expr returns the correct number of results when run in a find command.
- let cursor = coll.find({$expr: expr});
- if (collation) {
- cursor = cursor.collation(collation);
- }
- assert.eq(metricsToCheck.nReturned, cursor.itcount());
-
- // Verify that $expr returns the correct number of results when evaluated inside a $project,
- // with optimizations inhibited. We expect the plan to be COLLSCAN.
- const pipelineWithProject = [
- {$_internalInhibitOptimization: {}},
- {$project: {result: {$cond: [expr, true, false]}}},
- {$match: {result: true}}
- ];
- assert.eq(metricsToCheck.nReturned,
- coll.aggregate(pipelineWithProject, aggOptions).itcount());
- let explain = coll.explain("executionStats").aggregate(pipelineWithProject, aggOptions);
- assert(getAggPlanStage(explain, "COLLSCAN"), tojson(explain));
-
- // Verifies that there are no rejected plans, and that the winning plan uses the expected
- // index.
- //
- // 'getPlanStageFunc' is a function which can be called to obtain stage-specific information
- // from the explain output. There are different versions of this function for find and
- // aggregate explain output.
- function verifyExplainOutput(explain, getPlanStageFunc) {
- assert(!hasRejectedPlans(explain), tojson(explain));
-
- if (metricsToCheck.hasOwnProperty("expectedIndex")) {
- const stage = getPlanStageFunc(explain, "IXSCAN");
- assert.neq(null, stage, tojson(explain));
- assert(stage.hasOwnProperty("keyPattern"), tojson(explain));
- assert.docEq(stage.keyPattern, metricsToCheck.expectedIndex, tojson(explain));
- } else {
- assert(getPlanStageFunc(explain, "COLLSCAN"), tojson(explain));
- }
- }
+ // Verify that $expr returns the correct number of results when run inside the $match stage
+ // of an aggregate.
+ assert.eq(metricsToCheck.nReturned, coll.aggregate(pipeline, aggOptions).itcount());
- explain =
- assert.commandWorked(coll.explain("executionStats").aggregate(pipeline, aggOptions));
- verifyExplainOutput(explain, getPlanStage);
-
- cursor = coll.explain("executionStats").find({$expr: expr});
- if (collation) {
- cursor = cursor.collation(collation);
+ // Verify that $expr returns the correct number of results when run in a find command.
+ let cursor = coll.find({$expr: expr});
+ if (collation) {
+ cursor = cursor.collation(collation);
+ }
+ assert.eq(metricsToCheck.nReturned, cursor.itcount());
+
+ // Verify that $expr returns the correct number of results when evaluated inside a $project,
+ // with optimizations inhibited. We expect the plan to be COLLSCAN.
+ const pipelineWithProject = [
+ {$_internalInhibitOptimization: {}},
+ {$project: {result: {$cond: [expr, true, false]}}},
+ {$match: {result: true}}
+ ];
+ assert.eq(metricsToCheck.nReturned, coll.aggregate(pipelineWithProject, aggOptions).itcount());
+ let explain = coll.explain("executionStats").aggregate(pipelineWithProject, aggOptions);
+ assert(getAggPlanStage(explain, "COLLSCAN"), tojson(explain));
+
+ // Verifies that there are no rejected plans, and that the winning plan uses the expected
+ // index.
+ //
+ // 'getPlanStageFunc' is a function which can be called to obtain stage-specific information
+ // from the explain output. There are different versions of this function for find and
+ // aggregate explain output.
+ function verifyExplainOutput(explain, getPlanStageFunc) {
+ assert(!hasRejectedPlans(explain), tojson(explain));
+
+ if (metricsToCheck.hasOwnProperty("expectedIndex")) {
+ const stage = getPlanStageFunc(explain, "IXSCAN");
+ assert.neq(null, stage, tojson(explain));
+ assert(stage.hasOwnProperty("keyPattern"), tojson(explain));
+ assert.docEq(stage.keyPattern, metricsToCheck.expectedIndex, tojson(explain));
+ } else {
+ assert(getPlanStageFunc(explain, "COLLSCAN"), tojson(explain));
}
- explain = assert.commandWorked(cursor.finish());
- verifyExplainOutput(explain, getPlanStage);
}
- // Comparison of field and constant.
- confirmExpectedExprExecution({$eq: ["$x", 1]}, {nReturned: 1, expectedIndex: {x: 1, y: 1}});
- confirmExpectedExprExecution({$eq: [1, "$x"]}, {nReturned: 1, expectedIndex: {x: 1, y: 1}});
-
- // $and with both children eligible for index use.
- confirmExpectedExprExecution({$and: [{$eq: ["$x", 2]}, {$eq: ["$y", 2]}]},
- {nReturned: 1, expectedIndex: {x: 1, y: 1}});
-
- // $and with one child eligible for index use and one that is not.
- confirmExpectedExprExecution({$and: [{$eq: ["$x", 1]}, {$eq: ["$x", "$y"]}]},
- {nReturned: 1, expectedIndex: {x: 1, y: 1}});
-
- // $and with one child eligible for index use and a second child containing a $or where one of
- // the two children are eligible.
- confirmExpectedExprExecution(
- {$and: [{$eq: ["$x", 1]}, {$or: [{$eq: ["$x", "$y"]}, {$eq: ["$y", 1]}]}]},
- {nReturned: 1, expectedIndex: {x: 1, y: 1}});
-
- // Equality comparison against non-multikey dotted path field is expected to use an index.
- confirmExpectedExprExecution({$eq: ["$c.d", 1]}, {nReturned: 1, expectedIndex: {"c.d": 1}});
-
- // $lt, $lte, $gt, $gte, $in, $ne, and $cmp are not expected to use an index. This is because we
- // have not yet implemented a rewrite of these operators to indexable MatchExpression.
- confirmExpectedExprExecution({$lt: ["$x", 1]}, {nReturned: 20});
- confirmExpectedExprExecution({$lt: [1, "$x"]}, {nReturned: 2});
- confirmExpectedExprExecution({$lte: ["$x", 1]}, {nReturned: 21});
- confirmExpectedExprExecution({$lte: [1, "$x"]}, {nReturned: 3});
- confirmExpectedExprExecution({$gt: ["$x", 1]}, {nReturned: 2});
- confirmExpectedExprExecution({$gt: [1, "$x"]}, {nReturned: 20});
- confirmExpectedExprExecution({$gte: ["$x", 1]}, {nReturned: 3});
- confirmExpectedExprExecution({$gte: [1, "$x"]}, {nReturned: 21});
- confirmExpectedExprExecution({$in: ["$x", [1, 3]]}, {nReturned: 2});
- confirmExpectedExprExecution({$cmp: ["$x", 1]}, {nReturned: 22});
- confirmExpectedExprExecution({$ne: ["$x", 1]}, {nReturned: 22});
-
- // Comparison with an array value is not expected to use an index.
- confirmExpectedExprExecution({$eq: ["$a.b", [1]]}, {nReturned: 2});
- confirmExpectedExprExecution({$eq: ["$w", [1]]}, {nReturned: 0});
-
- // A constant expression is not expected to use an index.
- confirmExpectedExprExecution(1, {nReturned: 23});
- confirmExpectedExprExecution(false, {nReturned: 0});
- confirmExpectedExprExecution({$eq: [1, 1]}, {nReturned: 23});
- confirmExpectedExprExecution({$eq: [0, 1]}, {nReturned: 0});
-
- // Comparison of 2 fields is not expected to use an index.
- confirmExpectedExprExecution({$eq: ["$x", "$y"]}, {nReturned: 20});
-
- // Comparison against multikey field not expected to use an index.
- confirmExpectedExprExecution({$eq: ["$a.b", 1]}, {nReturned: 1});
- confirmExpectedExprExecution({$eq: ["$e.f", [1]]}, {nReturned: 1});
- confirmExpectedExprExecution({$eq: ["$e.f", 1]}, {nReturned: 0});
- confirmExpectedExprExecution({$eq: ["$g.h", [1]]}, {nReturned: 1});
- confirmExpectedExprExecution({$eq: ["$g.h", 1]}, {nReturned: 0});
-
- // Comparison against a non-multikey field of a multikey index can use an index
- const metricsToCheck = {nReturned: 1};
- metricsToCheck.expectedIndex = {i: 1, j: 1};
- confirmExpectedExprExecution({$eq: ["$i", 1]}, metricsToCheck);
- metricsToCheck.nReturned = 0;
- confirmExpectedExprExecution({$eq: ["$i", 2]}, metricsToCheck);
-
- // Equality to NaN can use an index.
- confirmExpectedExprExecution({$eq: ["$w", NaN]}, {nReturned: 1, expectedIndex: {w: 1}});
-
- // Equality to undefined and equality to missing cannot use an index.
- confirmExpectedExprExecution({$eq: ["$w", undefined]}, {nReturned: 16});
- confirmExpectedExprExecution({$eq: ["$w", "$$REMOVE"]}, {nReturned: 16});
-
- // Equality to null can use an index.
- confirmExpectedExprExecution({$eq: ["$w", null]}, {nReturned: 1, expectedIndex: {w: 1}});
-
- // Equality inside a nested object can use a non-multikey index.
- confirmExpectedExprExecution({$eq: ["$w.z", 2]}, {nReturned: 1, expectedIndex: {"w.z": 1}});
-
- // Test that the collation is respected. Since the collations do not match, we should not use
- // the index.
- const caseInsensitiveCollation = {locale: "en_US", strength: 2};
- if (db.getMongo().useReadCommands()) {
- confirmExpectedExprExecution(
- {$eq: ["$w", "FoO"]}, {nReturned: 2}, caseInsensitiveCollation);
- }
+ explain = assert.commandWorked(coll.explain("executionStats").aggregate(pipeline, aggOptions));
+ verifyExplainOutput(explain, getPlanStage);
- // Test equality queries against a hashed index.
- assert.commandWorked(coll.dropIndex({w: 1}));
- assert.commandWorked(coll.createIndex({w: "hashed"}));
- confirmExpectedExprExecution({$eq: ["$w", 123]}, {nReturned: 1, expectedIndex: {w: "hashed"}});
- confirmExpectedExprExecution({$eq: ["$w", null]}, {nReturned: 1, expectedIndex: {w: "hashed"}});
- confirmExpectedExprExecution({$eq: ["$w", NaN]}, {nReturned: 1, expectedIndex: {w: "hashed"}});
- confirmExpectedExprExecution({$eq: ["$w", undefined]}, {nReturned: 16});
- confirmExpectedExprExecution({$eq: ["$w", "$$REMOVE"]}, {nReturned: 16});
-
- // Test that equality to null queries can use a sparse index.
- assert.commandWorked(coll.dropIndex({w: "hashed"}));
- assert.commandWorked(coll.createIndex({w: 1}, {sparse: true}));
- confirmExpectedExprExecution({$eq: ["$w", null]}, {nReturned: 1, expectedIndex: {w: 1}});
-
- // Equality match against text index prefix is expected to fail. Equality predicates are
- // required against the prefix fields of a text index, but currently $eq inside $expr does not
- // qualify.
- assert.throws(() =>
- coll.aggregate([{$match: {$expr: {$eq: ["$k", 1]}, $text: {$search: "abc"}}}])
- .itcount());
-
- // Test that equality match in $expr respects the collection's default collation, both when
- // there is an index with a matching collation and when there isn't.
- assert.commandWorked(db.runCommand({drop: coll.getName()}));
- assert.commandWorked(
- db.createCollection(coll.getName(), {collation: caseInsensitiveCollation}));
- assert.writeOK(coll.insert({a: "foo", b: "bar"}));
- assert.writeOK(coll.insert({a: "FOO", b: "BAR"}));
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}, {collation: {locale: "simple"}}));
-
- confirmExpectedExprExecution({$eq: ["$a", "foo"]}, {nReturned: 2, expectedIndex: {a: 1}});
- confirmExpectedExprExecution({$eq: ["$b", "bar"]}, {nReturned: 2});
+ cursor = coll.explain("executionStats").find({$expr: expr});
+ if (collation) {
+ cursor = cursor.collation(collation);
+ }
+ explain = assert.commandWorked(cursor.finish());
+ verifyExplainOutput(explain, getPlanStage);
+}
+
+// Comparison of field and constant.
+confirmExpectedExprExecution({$eq: ["$x", 1]}, {nReturned: 1, expectedIndex: {x: 1, y: 1}});
+confirmExpectedExprExecution({$eq: [1, "$x"]}, {nReturned: 1, expectedIndex: {x: 1, y: 1}});
+
+// $and with both children eligible for index use.
+confirmExpectedExprExecution({$and: [{$eq: ["$x", 2]}, {$eq: ["$y", 2]}]},
+ {nReturned: 1, expectedIndex: {x: 1, y: 1}});
+
+// $and with one child eligible for index use and one that is not.
+confirmExpectedExprExecution({$and: [{$eq: ["$x", 1]}, {$eq: ["$x", "$y"]}]},
+ {nReturned: 1, expectedIndex: {x: 1, y: 1}});
+
+// $and with one child eligible for index use and a second child containing a $or where one of
+// the two children are eligible.
+confirmExpectedExprExecution(
+ {$and: [{$eq: ["$x", 1]}, {$or: [{$eq: ["$x", "$y"]}, {$eq: ["$y", 1]}]}]},
+ {nReturned: 1, expectedIndex: {x: 1, y: 1}});
+
+// Equality comparison against non-multikey dotted path field is expected to use an index.
+confirmExpectedExprExecution({$eq: ["$c.d", 1]}, {nReturned: 1, expectedIndex: {"c.d": 1}});
+
+// $lt, $lte, $gt, $gte, $in, $ne, and $cmp are not expected to use an index. This is because we
+// have not yet implemented a rewrite of these operators to indexable MatchExpression.
+confirmExpectedExprExecution({$lt: ["$x", 1]}, {nReturned: 20});
+confirmExpectedExprExecution({$lt: [1, "$x"]}, {nReturned: 2});
+confirmExpectedExprExecution({$lte: ["$x", 1]}, {nReturned: 21});
+confirmExpectedExprExecution({$lte: [1, "$x"]}, {nReturned: 3});
+confirmExpectedExprExecution({$gt: ["$x", 1]}, {nReturned: 2});
+confirmExpectedExprExecution({$gt: [1, "$x"]}, {nReturned: 20});
+confirmExpectedExprExecution({$gte: ["$x", 1]}, {nReturned: 3});
+confirmExpectedExprExecution({$gte: [1, "$x"]}, {nReturned: 21});
+confirmExpectedExprExecution({$in: ["$x", [1, 3]]}, {nReturned: 2});
+confirmExpectedExprExecution({$cmp: ["$x", 1]}, {nReturned: 22});
+confirmExpectedExprExecution({$ne: ["$x", 1]}, {nReturned: 22});
+
+// Comparison with an array value is not expected to use an index.
+confirmExpectedExprExecution({$eq: ["$a.b", [1]]}, {nReturned: 2});
+confirmExpectedExprExecution({$eq: ["$w", [1]]}, {nReturned: 0});
+
+// A constant expression is not expected to use an index.
+confirmExpectedExprExecution(1, {nReturned: 23});
+confirmExpectedExprExecution(false, {nReturned: 0});
+confirmExpectedExprExecution({$eq: [1, 1]}, {nReturned: 23});
+confirmExpectedExprExecution({$eq: [0, 1]}, {nReturned: 0});
+
+// Comparison of 2 fields is not expected to use an index.
+confirmExpectedExprExecution({$eq: ["$x", "$y"]}, {nReturned: 20});
+
+// Comparison against multikey field not expected to use an index.
+confirmExpectedExprExecution({$eq: ["$a.b", 1]}, {nReturned: 1});
+confirmExpectedExprExecution({$eq: ["$e.f", [1]]}, {nReturned: 1});
+confirmExpectedExprExecution({$eq: ["$e.f", 1]}, {nReturned: 0});
+confirmExpectedExprExecution({$eq: ["$g.h", [1]]}, {nReturned: 1});
+confirmExpectedExprExecution({$eq: ["$g.h", 1]}, {nReturned: 0});
+
+// Comparison against a non-multikey field of a multikey index can use an index
+const metricsToCheck = {
+ nReturned: 1
+};
+metricsToCheck.expectedIndex = {
+ i: 1,
+ j: 1
+};
+confirmExpectedExprExecution({$eq: ["$i", 1]}, metricsToCheck);
+metricsToCheck.nReturned = 0;
+confirmExpectedExprExecution({$eq: ["$i", 2]}, metricsToCheck);
+
+// Equality to NaN can use an index.
+confirmExpectedExprExecution({$eq: ["$w", NaN]}, {nReturned: 1, expectedIndex: {w: 1}});
+
+// Equality to undefined and equality to missing cannot use an index.
+confirmExpectedExprExecution({$eq: ["$w", undefined]}, {nReturned: 16});
+confirmExpectedExprExecution({$eq: ["$w", "$$REMOVE"]}, {nReturned: 16});
+
+// Equality to null can use an index.
+confirmExpectedExprExecution({$eq: ["$w", null]}, {nReturned: 1, expectedIndex: {w: 1}});
+
+// Equality inside a nested object can use a non-multikey index.
+confirmExpectedExprExecution({$eq: ["$w.z", 2]}, {nReturned: 1, expectedIndex: {"w.z": 1}});
+
+// Test that the collation is respected. Since the collations do not match, we should not use
+// the index.
+const caseInsensitiveCollation = {
+ locale: "en_US",
+ strength: 2
+};
+if (db.getMongo().useReadCommands()) {
+ confirmExpectedExprExecution({$eq: ["$w", "FoO"]}, {nReturned: 2}, caseInsensitiveCollation);
+}
+
+// Test equality queries against a hashed index.
+assert.commandWorked(coll.dropIndex({w: 1}));
+assert.commandWorked(coll.createIndex({w: "hashed"}));
+confirmExpectedExprExecution({$eq: ["$w", 123]}, {nReturned: 1, expectedIndex: {w: "hashed"}});
+confirmExpectedExprExecution({$eq: ["$w", null]}, {nReturned: 1, expectedIndex: {w: "hashed"}});
+confirmExpectedExprExecution({$eq: ["$w", NaN]}, {nReturned: 1, expectedIndex: {w: "hashed"}});
+confirmExpectedExprExecution({$eq: ["$w", undefined]}, {nReturned: 16});
+confirmExpectedExprExecution({$eq: ["$w", "$$REMOVE"]}, {nReturned: 16});
+
+// Test that equality to null queries can use a sparse index.
+assert.commandWorked(coll.dropIndex({w: "hashed"}));
+assert.commandWorked(coll.createIndex({w: 1}, {sparse: true}));
+confirmExpectedExprExecution({$eq: ["$w", null]}, {nReturned: 1, expectedIndex: {w: 1}});
+
+// Equality match against text index prefix is expected to fail. Equality predicates are
+// required against the prefix fields of a text index, but currently $eq inside $expr does not
+// qualify.
+assert.throws(
+ () => coll.aggregate([{$match: {$expr: {$eq: ["$k", 1]}, $text: {$search: "abc"}}}]).itcount());
+
+// Test that equality match in $expr respects the collection's default collation, both when
+// there is an index with a matching collation and when there isn't.
+assert.commandWorked(db.runCommand({drop: coll.getName()}));
+assert.commandWorked(db.createCollection(coll.getName(), {collation: caseInsensitiveCollation}));
+assert.writeOK(coll.insert({a: "foo", b: "bar"}));
+assert.writeOK(coll.insert({a: "FOO", b: "BAR"}));
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}, {collation: {locale: "simple"}}));
+
+confirmExpectedExprExecution({$eq: ["$a", "foo"]}, {nReturned: 2, expectedIndex: {a: 1}});
+confirmExpectedExprExecution({$eq: ["$b", "bar"]}, {nReturned: 2});
})();
diff --git a/jstests/core/expr_or_pushdown.js b/jstests/core/expr_or_pushdown.js
index 431e2932ae7..e2605e08c91 100644
--- a/jstests/core/expr_or_pushdown.js
+++ b/jstests/core/expr_or_pushdown.js
@@ -3,23 +3,23 @@
* as expected.
*/
(function() {
- "use strict";
+"use strict";
- const coll = db.expr_or_pushdown;
- coll.drop();
- assert.commandWorked(coll.createIndex({"a": 1, "b": 1}));
- assert.commandWorked(coll.insert({_id: 0, a: "a", b: "b", d: "d"}));
- assert.commandWorked(coll.insert({_id: 1, a: "a", b: "c", d: "d"}));
- assert.commandWorked(coll.insert({_id: 2, a: "a", b: "x", d: "d"}));
- assert.commandWorked(coll.insert({_id: 3, a: "x", b: "b", d: "d"}));
- assert.commandWorked(coll.insert({_id: 4, a: "a", b: "b", d: "x"}));
+const coll = db.expr_or_pushdown;
+coll.drop();
+assert.commandWorked(coll.createIndex({"a": 1, "b": 1}));
+assert.commandWorked(coll.insert({_id: 0, a: "a", b: "b", d: "d"}));
+assert.commandWorked(coll.insert({_id: 1, a: "a", b: "c", d: "d"}));
+assert.commandWorked(coll.insert({_id: 2, a: "a", b: "x", d: "d"}));
+assert.commandWorked(coll.insert({_id: 3, a: "x", b: "b", d: "d"}));
+assert.commandWorked(coll.insert({_id: 4, a: "a", b: "b", d: "x"}));
- const results = coll.find({
- $expr: {$and: [{$eq: ["$d", "d"]}, {$eq: ["$a", "a"]}]},
- $or: [{"b": "b"}, {"b": "c"}]
- })
- .sort({_id: 1})
- .toArray();
+const results = coll.find({
+ $expr: {$and: [{$eq: ["$d", "d"]}, {$eq: ["$a", "a"]}]},
+ $or: [{"b": "b"}, {"b": "c"}]
+ })
+ .sort({_id: 1})
+ .toArray();
- assert.eq(results, [{_id: 0, a: "a", b: "b", d: "d"}, {_id: 1, a: "a", b: "c", d: "d"}]);
+assert.eq(results, [{_id: 0, a: "a", b: "b", d: "d"}, {_id: 1, a: "a", b: "c", d: "d"}]);
}());
diff --git a/jstests/core/expr_valid_positions.js b/jstests/core/expr_valid_positions.js
index df1f2470261..cd3ae2bf917 100644
--- a/jstests/core/expr_valid_positions.js
+++ b/jstests/core/expr_valid_positions.js
@@ -1,23 +1,23 @@
// Verify that $expr can be used in the top-level position, but not in subdocuments.
(function() {
- "use strict";
+"use strict";
- const coll = db.expr_valid_positions;
+const coll = db.expr_valid_positions;
- // Works at the BSON root level.
- assert.eq(0, coll.find({$expr: {$eq: ["$foo", "$bar"]}}).itcount());
+// Works at the BSON root level.
+assert.eq(0, coll.find({$expr: {$eq: ["$foo", "$bar"]}}).itcount());
- // Works inside a $or.
- assert.eq(0, coll.find({$or: [{$expr: {$eq: ["$foo", "$bar"]}}, {b: {$gt: 3}}]}).itcount());
+// Works inside a $or.
+assert.eq(0, coll.find({$or: [{$expr: {$eq: ["$foo", "$bar"]}}, {b: {$gt: 3}}]}).itcount());
- // Fails inside an elemMatch.
- assert.throws(function() {
- coll.find({a: {$elemMatch: {$expr: {$eq: ["$foo", "$bar"]}}}}).itcount();
- });
+// Fails inside an elemMatch.
+assert.throws(function() {
+ coll.find({a: {$elemMatch: {$expr: {$eq: ["$foo", "$bar"]}}}}).itcount();
+});
- // Fails inside an _internalSchemaObjectMatch.
- assert.throws(function() {
- coll.find({a: {$_internalSchemaObjectMatch: {$expr: {$eq: ["$foo", "$bar"]}}}}).itcount();
- });
+// Fails inside an _internalSchemaObjectMatch.
+assert.throws(function() {
+ coll.find({a: {$_internalSchemaObjectMatch: {$expr: {$eq: ["$foo", "$bar"]}}}}).itcount();
+});
}()); \ No newline at end of file
diff --git a/jstests/core/failcommand_failpoint.js b/jstests/core/failcommand_failpoint.js
index 94712cbac9a..e78d39e3d50 100644
--- a/jstests/core/failcommand_failpoint.js
+++ b/jstests/core/failcommand_failpoint.js
@@ -2,288 +2,287 @@
* @tags: [assumes_read_concern_unchanged, assumes_read_preference_unchanged]
*/
(function() {
- "use strict";
+"use strict";
- const testDB = db.getSiblingDB("test_failcommand");
- const adminDB = db.getSiblingDB("admin");
+const testDB = db.getSiblingDB("test_failcommand");
+const adminDB = db.getSiblingDB("admin");
- const getThreadName = function() {
- let myUri = adminDB.runCommand({whatsmyuri: 1}).you;
- return adminDB.aggregate([{$currentOp: {localOps: true}}, {$match: {client: myUri}}])
- .toArray()[0]
- .desc;
- };
+const getThreadName = function() {
+ let myUri = adminDB.runCommand({whatsmyuri: 1}).you;
+ return adminDB.aggregate([{$currentOp: {localOps: true}}, {$match: {client: myUri}}])
+ .toArray()[0]
+ .desc;
+};
- let threadName = getThreadName();
+let threadName = getThreadName();
- // Test failing with a particular error code.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- errorCode: ErrorCodes.NotMaster,
- failCommands: ["ping"],
- threadName: threadName,
- }
- }));
- assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
- assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Test failing with a particular error code.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {
+ errorCode: ErrorCodes.NotMaster,
+ failCommands: ["ping"],
+ threadName: threadName,
+ }
+}));
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
+assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
- // Test that only commands specified in failCommands fail.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- errorCode: ErrorCodes.BadValue,
- failCommands: ["ping"],
- threadName: threadName,
- }
- }));
- assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue);
- assert.commandWorked(testDB.runCommand({isMaster: 1}));
- assert.commandWorked(testDB.runCommand({buildinfo: 1}));
- assert.commandWorked(testDB.runCommand({find: "collection"}));
- assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Test that only commands specified in failCommands fail.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {
+ errorCode: ErrorCodes.BadValue,
+ failCommands: ["ping"],
+ threadName: threadName,
+ }
+}));
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue);
+assert.commandWorked(testDB.runCommand({isMaster: 1}));
+assert.commandWorked(testDB.runCommand({buildinfo: 1}));
+assert.commandWorked(testDB.runCommand({find: "collection"}));
+assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
- // Test failing with multiple commands specified in failCommands.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- errorCode: ErrorCodes.BadValue,
- failCommands: ["ping", "isMaster"],
- threadName: threadName,
- }
- }));
- assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue);
- assert.commandFailedWithCode(testDB.runCommand({isMaster: 1}), ErrorCodes.BadValue);
- assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Test failing with multiple commands specified in failCommands.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {
+ errorCode: ErrorCodes.BadValue,
+ failCommands: ["ping", "isMaster"],
+ threadName: threadName,
+ }
+}));
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue);
+assert.commandFailedWithCode(testDB.runCommand({isMaster: 1}), ErrorCodes.BadValue);
+assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
- // Test skip when failing with a particular error code.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: {skip: 2},
- data: {
- errorCode: ErrorCodes.NotMaster,
- failCommands: ["ping"],
- threadName: threadName,
- }
- }));
- assert.commandWorked(testDB.runCommand({ping: 1}));
- assert.commandWorked(testDB.runCommand({ping: 1}));
- assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
- assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Test skip when failing with a particular error code.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: {skip: 2},
+ data: {
+ errorCode: ErrorCodes.NotMaster,
+ failCommands: ["ping"],
+ threadName: threadName,
+ }
+}));
+assert.commandWorked(testDB.runCommand({ping: 1}));
+assert.commandWorked(testDB.runCommand({ping: 1}));
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
+assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
- // Test times when failing with a particular error code.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: {times: 2},
- data: {
- errorCode: ErrorCodes.NotMaster,
- failCommands: ["ping"],
- threadName: threadName,
- }
- }));
- assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
- assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
- assert.commandWorked(testDB.runCommand({ping: 1}));
- assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Test times when failing with a particular error code.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 2},
+ data: {
+ errorCode: ErrorCodes.NotMaster,
+ failCommands: ["ping"],
+ threadName: threadName,
+ }
+}));
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster);
+assert.commandWorked(testDB.runCommand({ping: 1}));
+assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
- // Commands not specified in failCommands are not counted for skip.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: {skip: 1},
- data: {
- errorCode: ErrorCodes.BadValue,
- failCommands: ["ping"],
- threadName: threadName,
- }
- }));
- assert.commandWorked(testDB.runCommand({isMaster: 1}));
- assert.commandWorked(testDB.runCommand({buildinfo: 1}));
- assert.commandWorked(testDB.runCommand({ping: 1}));
- assert.commandWorked(testDB.runCommand({find: "c"}));
- assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue);
- assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Commands not specified in failCommands are not counted for skip.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: {skip: 1},
+ data: {
+ errorCode: ErrorCodes.BadValue,
+ failCommands: ["ping"],
+ threadName: threadName,
+ }
+}));
+assert.commandWorked(testDB.runCommand({isMaster: 1}));
+assert.commandWorked(testDB.runCommand({buildinfo: 1}));
+assert.commandWorked(testDB.runCommand({ping: 1}));
+assert.commandWorked(testDB.runCommand({find: "c"}));
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue);
+assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
- // Commands not specified in failCommands are not counted for times.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: {times: 1},
- data: {
- errorCode: ErrorCodes.BadValue,
- failCommands: ["ping"],
- threadName: threadName,
- }
- }));
- assert.commandWorked(testDB.runCommand({isMaster: 1}));
- assert.commandWorked(testDB.runCommand({buildinfo: 1}));
- assert.commandWorked(testDB.runCommand({find: "c"}));
- assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue);
- assert.commandWorked(testDB.runCommand({ping: 1}));
- assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Commands not specified in failCommands are not counted for times.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 1},
+ data: {
+ errorCode: ErrorCodes.BadValue,
+ failCommands: ["ping"],
+ threadName: threadName,
+ }
+}));
+assert.commandWorked(testDB.runCommand({isMaster: 1}));
+assert.commandWorked(testDB.runCommand({buildinfo: 1}));
+assert.commandWorked(testDB.runCommand({find: "c"}));
+assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue);
+assert.commandWorked(testDB.runCommand({ping: 1}));
+assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
- // Test closing connection.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- closeConnection: true,
- failCommands: ["ping"],
- threadName: threadName,
- }
- }));
- assert.throws(() => testDB.runCommand({ping: 1}));
- assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Test closing connection.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {
+ closeConnection: true,
+ failCommands: ["ping"],
+ threadName: threadName,
+ }
+}));
+assert.throws(() => testDB.runCommand({ping: 1}));
+assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
- threadName = getThreadName();
+threadName = getThreadName();
- // Test that only commands specified in failCommands fail when closing the connection.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- closeConnection: true,
- failCommands: ["ping"],
- threadName: threadName,
- }
- }));
- assert.commandWorked(testDB.runCommand({isMaster: 1}));
- assert.commandWorked(testDB.runCommand({buildinfo: 1}));
- assert.commandWorked(testDB.runCommand({find: "c"}));
- assert.throws(() => testDB.runCommand({ping: 1}));
- assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Test that only commands specified in failCommands fail when closing the connection.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {
+ closeConnection: true,
+ failCommands: ["ping"],
+ threadName: threadName,
+ }
+}));
+assert.commandWorked(testDB.runCommand({isMaster: 1}));
+assert.commandWorked(testDB.runCommand({buildinfo: 1}));
+assert.commandWorked(testDB.runCommand({find: "c"}));
+assert.throws(() => testDB.runCommand({ping: 1}));
+assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
- threadName = getThreadName();
+threadName = getThreadName();
- // Test skip when closing connection.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: {skip: 2},
- data: {
- closeConnection: true,
- failCommands: ["ping"],
- threadName: threadName,
- }
- }));
- assert.commandWorked(testDB.runCommand({ping: 1}));
- assert.commandWorked(testDB.runCommand({ping: 1}));
- assert.throws(() => testDB.runCommand({ping: 1}));
- assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Test skip when closing connection.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: {skip: 2},
+ data: {
+ closeConnection: true,
+ failCommands: ["ping"],
+ threadName: threadName,
+ }
+}));
+assert.commandWorked(testDB.runCommand({ping: 1}));
+assert.commandWorked(testDB.runCommand({ping: 1}));
+assert.throws(() => testDB.runCommand({ping: 1}));
+assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
- threadName = getThreadName();
+threadName = getThreadName();
- // Commands not specified in failCommands are not counted for skip.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: {skip: 1},
- data: {
- closeConnection: true,
- failCommands: ["ping"],
- threadName: threadName,
- }
- }));
- assert.commandWorked(testDB.runCommand({isMaster: 1}));
- assert.commandWorked(testDB.runCommand({buildinfo: 1}));
- assert.commandWorked(testDB.runCommand({ping: 1}));
- assert.commandWorked(testDB.runCommand({find: "c"}));
- assert.throws(() => testDB.runCommand({ping: 1}));
- assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Commands not specified in failCommands are not counted for skip.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: {skip: 1},
+ data: {
+ closeConnection: true,
+ failCommands: ["ping"],
+ threadName: threadName,
+ }
+}));
+assert.commandWorked(testDB.runCommand({isMaster: 1}));
+assert.commandWorked(testDB.runCommand({buildinfo: 1}));
+assert.commandWorked(testDB.runCommand({ping: 1}));
+assert.commandWorked(testDB.runCommand({find: "c"}));
+assert.throws(() => testDB.runCommand({ping: 1}));
+assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
- threadName = getThreadName();
+threadName = getThreadName();
- // Commands not specified in failCommands are not counted for times.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: {times: 1},
- data: {
- closeConnection: true,
- failCommands: ["ping"],
- threadName: threadName,
- }
- }));
- assert.commandWorked(testDB.runCommand({isMaster: 1}));
- assert.commandWorked(testDB.runCommand({buildinfo: 1}));
- assert.commandWorked(testDB.runCommand({find: "c"}));
- assert.throws(() => testDB.runCommand({ping: 1}));
- assert.commandWorked(testDB.runCommand({ping: 1}));
- assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Commands not specified in failCommands are not counted for times.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 1},
+ data: {
+ closeConnection: true,
+ failCommands: ["ping"],
+ threadName: threadName,
+ }
+}));
+assert.commandWorked(testDB.runCommand({isMaster: 1}));
+assert.commandWorked(testDB.runCommand({buildinfo: 1}));
+assert.commandWorked(testDB.runCommand({find: "c"}));
+assert.throws(() => testDB.runCommand({ping: 1}));
+assert.commandWorked(testDB.runCommand({ping: 1}));
+assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
- threadName = getThreadName();
+threadName = getThreadName();
- // Cannot fail on "configureFailPoint" command.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: {times: 1},
- data: {
- errorCode: ErrorCodes.BadValue,
- failCommands: ["configureFailPoint"],
- threadName: threadName,
- }
- }));
- assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Cannot fail on "configureFailPoint" command.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 1},
+ data: {
+ errorCode: ErrorCodes.BadValue,
+ failCommands: ["configureFailPoint"],
+ threadName: threadName,
+ }
+}));
+assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
- // Test with success and writeConcernError.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: {times: 1},
- data: {
- writeConcernError: {code: 12345, errmsg: "hello"},
- failCommands: ['insert', 'ping'],
- threadName: threadName,
- }
- }));
- // Commands that don't support writeConcern don't tick counter.
- assert.commandWorked(testDB.runCommand({ping: 1}));
- // Unlisted commands don't tick counter.
- assert.commandWorked(testDB.runCommand({update: "c", updates: [{q: {}, u: {}, upsert: true}]}));
- var res = testDB.runCommand({insert: "c", documents: [{}]});
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"});
- assert.commandWorked(testDB.runCommand({insert: "c", documents: [{}]})); // Works again.
- assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Test with success and writeConcernError.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 1},
+ data: {
+ writeConcernError: {code: 12345, errmsg: "hello"},
+ failCommands: ['insert', 'ping'],
+ threadName: threadName,
+ }
+}));
+// Commands that don't support writeConcern don't tick counter.
+assert.commandWorked(testDB.runCommand({ping: 1}));
+// Unlisted commands don't tick counter.
+assert.commandWorked(testDB.runCommand({update: "c", updates: [{q: {}, u: {}, upsert: true}]}));
+var res = testDB.runCommand({insert: "c", documents: [{}]});
+assert.commandWorkedIgnoringWriteConcernErrors(res);
+assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"});
+assert.commandWorked(testDB.runCommand({insert: "c", documents: [{}]})); // Works again.
+assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
- // Test with natural failure and writeConcernError.
+// Test with natural failure and writeConcernError.
- // This document is removed before testing the following insert to prevent a DuplicateKeyError
- // if the failcommand_failpoint test is run multiple times on the same fixture.
- testDB.c.remove({_id: 'dup'});
+// This document is removed before testing the following insert to prevent a DuplicateKeyError
+// if the failcommand_failpoint test is run multiple times on the same fixture.
+testDB.c.remove({_id: 'dup'});
- assert.commandWorked(testDB.runCommand({insert: "c", documents: [{_id: 'dup'}]}));
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: {times: 1},
- data: {
- writeConcernError: {code: 12345, errmsg: "hello"},
- failCommands: ['insert'],
- threadName: threadName,
- }
- }));
- var res = testDB.runCommand({insert: "c", documents: [{_id: 'dup'}]});
- assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
- assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"});
- assert.commandWorked(testDB.runCommand({insert: "c", documents: [{}]})); // Works again.
- assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
+assert.commandWorked(testDB.runCommand({insert: "c", documents: [{_id: 'dup'}]}));
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 1},
+ data: {
+ writeConcernError: {code: 12345, errmsg: "hello"},
+ failCommands: ['insert'],
+ threadName: threadName,
+ }
+}));
+var res = testDB.runCommand({insert: "c", documents: [{_id: 'dup'}]});
+assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
+assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"});
+assert.commandWorked(testDB.runCommand({insert: "c", documents: [{}]})); // Works again.
+assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"}));
- // Test that specifying both writeConcernError and closeConnection : false will not make
- // `times` decrement twice per operation
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCommand",
- mode: {times: 2},
- data: {
- failCommands: ["insert"],
- closeConnection: false,
- writeConcernError: {code: 12345, errmsg: "hello"},
- threadName: threadName,
- }
- }));
-
- var res = testDB.runCommand({insert: "test", documents: [{a: "something"}]});
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"});
- res = testDB.runCommand({insert: "test", documents: [{a: "something else"}]});
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"});
- assert.commandWorked(testDB.runCommand({insert: "test", documents: [{b: "or_other"}]}));
+// Test that specifying both writeConcernError and closeConnection : false will not make
+// `times` decrement twice per operation
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 2},
+ data: {
+ failCommands: ["insert"],
+ closeConnection: false,
+ writeConcernError: {code: 12345, errmsg: "hello"},
+ threadName: threadName,
+ }
+}));
+var res = testDB.runCommand({insert: "test", documents: [{a: "something"}]});
+assert.commandWorkedIgnoringWriteConcernErrors(res);
+assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"});
+res = testDB.runCommand({insert: "test", documents: [{a: "something else"}]});
+assert.commandWorkedIgnoringWriteConcernErrors(res);
+assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"});
+assert.commandWorked(testDB.runCommand({insert: "test", documents: [{b: "or_other"}]}));
}());
diff --git a/jstests/core/field_name_validation.js b/jstests/core/field_name_validation.js
index 656e01886cc..72d346a0f0c 100644
--- a/jstests/core/field_name_validation.js
+++ b/jstests/core/field_name_validation.js
@@ -10,173 +10,170 @@
* @tags: [assumes_unsharded_collection]
*/
(function() {
- "use strict";
-
- const coll = db.field_name_validation;
- coll.drop();
-
- //
- // Insert command field name validation.
- //
-
- // Test that dotted field names are allowed.
- assert.writeOK(coll.insert({"a.b": 1}));
- assert.writeOK(coll.insert({"_id.a": 1}));
- assert.writeOK(coll.insert({a: {"a.b": 1}}));
- assert.writeOK(coll.insert({_id: {"a.b": 1}}));
-
- // Test that _id cannot be a regex.
- assert.writeError(coll.insert({_id: /a/}));
-
- // Test that _id cannot be an array.
- assert.writeError(coll.insert({_id: [9]}));
-
- // Test that $-prefixed field names are allowed in embedded objects.
- assert.writeOK(coll.insert({a: {$b: 1}}));
- assert.eq(1, coll.find({"a.$b": 1}).itcount());
-
- // Test that $-prefixed field names are not allowed at the top level.
- assert.writeErrorWithCode(coll.insert({$a: 1}), ErrorCodes.BadValue);
- assert.writeErrorWithCode(coll.insert({valid: 1, $a: 1}), ErrorCodes.BadValue);
-
- // Test that reserved $-prefixed field names are also not allowed.
- assert.writeErrorWithCode(coll.insert({$ref: 1}), ErrorCodes.BadValue);
- assert.writeErrorWithCode(coll.insert({$id: 1}), ErrorCodes.BadValue);
- assert.writeErrorWithCode(coll.insert({$db: 1}), ErrorCodes.BadValue);
-
- // Test that _id cannot be an object with an element that has a $-prefixed field name.
- assert.writeErrorWithCode(coll.insert({_id: {$b: 1}}), ErrorCodes.DollarPrefixedFieldName);
- assert.writeErrorWithCode(coll.insert({_id: {a: 1, $b: 1}}),
- ErrorCodes.DollarPrefixedFieldName);
-
- // Should not enforce the same restrictions on an embedded _id field.
- assert.writeOK(coll.insert({a: {_id: [9]}}));
- assert.writeOK(coll.insert({a: {_id: /a/}}));
- assert.writeOK(coll.insert({a: {_id: {$b: 1}}}));
-
- //
- // Update command field name validation.
- //
- coll.drop();
-
- // Dotted fields are allowed in an update.
- assert.writeOK(coll.update({}, {"a.b": 1}, {upsert: true}));
- assert.eq(0, coll.find({"a.b": 1}).itcount());
- assert.eq(1, coll.find({}).itcount());
-
- // Dotted fields represent paths in $set.
- assert.writeOK(coll.update({}, {$set: {"a.b": 1}}, {upsert: true}));
- assert.eq(1, coll.find({"a.b": 1}).itcount());
-
- // Dotted fields represent paths in the query object.
- assert.writeOK(coll.update({"a.b": 1}, {$set: {"a.b": 2}}));
- assert.eq(1, coll.find({"a.b": 2}).itcount());
- assert.eq(1, coll.find({a: {b: 2}}).itcount());
-
- assert.writeOK(coll.update({"a.b": 2}, {"a.b": 3}));
- assert.eq(0, coll.find({"a.b": 3}).itcount());
-
- // $-prefixed field names are not allowed.
- assert.writeErrorWithCode(coll.update({"a.b": 1}, {$c: 1}, {upsert: true}),
- ErrorCodes.FailedToParse);
- assert.writeErrorWithCode(coll.update({"a.b": 1}, {$set: {$c: 1}}, {upsert: true}),
- ErrorCodes.DollarPrefixedFieldName);
- assert.writeErrorWithCode(coll.update({"a.b": 1}, {$set: {c: {$d: 1}}}, {upsert: true}),
- ErrorCodes.DollarPrefixedFieldName);
-
- // Reserved $-prefixed field names are also not allowed.
- assert.writeErrorWithCode(coll.update({"a.b": 1}, {$ref: 1}), ErrorCodes.FailedToParse);
- assert.writeErrorWithCode(coll.update({"a.b": 1}, {$id: 1}), ErrorCodes.FailedToParse);
- assert.writeErrorWithCode(coll.update({"a.b": 1}, {$db: 1}), ErrorCodes.FailedToParse);
-
- //
- // FindAndModify field name validation.
- //
- coll.drop();
-
- // Dotted fields are allowed in update object.
- coll.findAndModify({query: {_id: 0}, update: {_id: 0, "a.b": 1}, upsert: true});
- assert.eq([{_id: 0, "a.b": 1}], coll.find({_id: 0}).toArray());
-
- // Dotted fields represent paths in $set.
- coll.findAndModify({query: {_id: 1}, update: {$set: {_id: 1, "a.b": 1}}, upsert: true});
- assert.eq([{_id: 1, a: {b: 1}}], coll.find({_id: 1}).toArray());
-
- // Dotted fields represent paths in the query object.
- coll.findAndModify({query: {_id: 0, "a.b": 1}, update: {"a.b": 2}});
- assert.eq([{_id: 0, "a.b": 1}], coll.find({_id: 0}).toArray());
-
- coll.findAndModify({query: {_id: 1, "a.b": 1}, update: {$set: {_id: 1, "a.b": 2}}});
- assert.eq([{_id: 1, a: {b: 2}}], coll.find({_id: 1}).toArray());
-
- // $-prefixed field names are not allowed.
- assert.throws(function() {
- coll.findAndModify({query: {_id: 1}, update: {_id: 1, $invalid: 1}});
- });
- assert.throws(function() {
- coll.findAndModify({query: {_id: 1}, update: {$set: {_id: 1, $invalid: 1}}});
- });
-
- // Reserved $-prefixed field names are also not allowed.
- assert.throws(function() {
- coll.findAndModify({query: {_id: 1}, update: {_id: 1, $ref: 1}});
- });
- assert.throws(function() {
- coll.findAndModify({query: {_id: 1}, update: {_id: 1, $id: 1}});
- });
- assert.throws(function() {
- coll.findAndModify({query: {_id: 1}, update: {_id: 1, $db: 1}});
- });
-
- //
- // Aggregation field name validation.
- //
- coll.drop();
-
- assert.writeOK(coll.insert({_id: {a: 1, b: 2}, "c.d": 3}));
-
- // Dotted fields represent paths in an aggregation pipeline.
- assert.eq(coll.aggregate([{$match: {"_id.a": 1}}, {$project: {"_id.b": 1}}]).toArray(),
- [{_id: {b: 2}}]);
- assert.eq(coll.aggregate([{$match: {"c.d": 3}}, {$project: {"_id.b": 1}}]).toArray(), []);
-
- assert.eq(coll.aggregate([{$project: {"_id.a": 1}}]).toArray(), [{_id: {a: 1}}]);
- assert.eq(coll.aggregate([{$project: {"c.d": 1, _id: 0}}]).toArray(), [{}]);
-
- assert.eq(coll.aggregate([
- {$addFields: {"new.field": {$multiply: ["$c.d", "$_id.a"]}}},
- {$project: {"new.field": 1, _id: 0}}
- ])
- .toArray(),
- [{new: {field: null}}]);
-
- assert.eq(coll.aggregate([{$group: {_id: "$_id.a", e: {$sum: "$_id.b"}}}]).toArray(),
- [{_id: 1, e: 2}]);
- assert.eq(coll.aggregate([{$group: {_id: "$_id.a", e: {$sum: "$c.d"}}}]).toArray(),
- [{_id: 1, e: 0}]);
-
- // Accumulation statements cannot have a dotted field name.
- assert.commandFailed(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$group: {_id: "$_id.a", "e.f": {$sum: "$_id.b"}}}]
- }));
-
- // $-prefixed field names are not allowed in an aggregation pipeline.
- assert.commandFailed(
- db.runCommand({aggregate: coll.getName(), pipeline: [{$match: {"$invalid": 1}}]}));
-
- assert.commandFailed(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$project: {"_id.a": 1, "$newField": {$multiply: ["$_id.b", "$_id.a"]}}}]
- }));
-
- assert.commandFailed(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$addFields: {"_id.a": 1, "$newField": {$multiply: ["$_id.b", "$_id.a"]}}}]
- }));
-
- assert.commandFailed(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$group: {_id: "$_id.a", "$invalid": {$sum: "$_id.b"}}}]
- }));
+"use strict";
+
+const coll = db.field_name_validation;
+coll.drop();
+
+//
+// Insert command field name validation.
+//
+
+// Test that dotted field names are allowed.
+assert.writeOK(coll.insert({"a.b": 1}));
+assert.writeOK(coll.insert({"_id.a": 1}));
+assert.writeOK(coll.insert({a: {"a.b": 1}}));
+assert.writeOK(coll.insert({_id: {"a.b": 1}}));
+
+// Test that _id cannot be a regex.
+assert.writeError(coll.insert({_id: /a/}));
+
+// Test that _id cannot be an array.
+assert.writeError(coll.insert({_id: [9]}));
+
+// Test that $-prefixed field names are allowed in embedded objects.
+assert.writeOK(coll.insert({a: {$b: 1}}));
+assert.eq(1, coll.find({"a.$b": 1}).itcount());
+
+// Test that $-prefixed field names are not allowed at the top level.
+assert.writeErrorWithCode(coll.insert({$a: 1}), ErrorCodes.BadValue);
+assert.writeErrorWithCode(coll.insert({valid: 1, $a: 1}), ErrorCodes.BadValue);
+
+// Test that reserved $-prefixed field names are also not allowed.
+assert.writeErrorWithCode(coll.insert({$ref: 1}), ErrorCodes.BadValue);
+assert.writeErrorWithCode(coll.insert({$id: 1}), ErrorCodes.BadValue);
+assert.writeErrorWithCode(coll.insert({$db: 1}), ErrorCodes.BadValue);
+
+// Test that _id cannot be an object with an element that has a $-prefixed field name.
+assert.writeErrorWithCode(coll.insert({_id: {$b: 1}}), ErrorCodes.DollarPrefixedFieldName);
+assert.writeErrorWithCode(coll.insert({_id: {a: 1, $b: 1}}), ErrorCodes.DollarPrefixedFieldName);
+
+// Should not enforce the same restrictions on an embedded _id field.
+assert.writeOK(coll.insert({a: {_id: [9]}}));
+assert.writeOK(coll.insert({a: {_id: /a/}}));
+assert.writeOK(coll.insert({a: {_id: {$b: 1}}}));
+
+//
+// Update command field name validation.
+//
+coll.drop();
+
+// Dotted fields are allowed in an update.
+assert.writeOK(coll.update({}, {"a.b": 1}, {upsert: true}));
+assert.eq(0, coll.find({"a.b": 1}).itcount());
+assert.eq(1, coll.find({}).itcount());
+
+// Dotted fields represent paths in $set.
+assert.writeOK(coll.update({}, {$set: {"a.b": 1}}, {upsert: true}));
+assert.eq(1, coll.find({"a.b": 1}).itcount());
+
+// Dotted fields represent paths in the query object.
+assert.writeOK(coll.update({"a.b": 1}, {$set: {"a.b": 2}}));
+assert.eq(1, coll.find({"a.b": 2}).itcount());
+assert.eq(1, coll.find({a: {b: 2}}).itcount());
+
+assert.writeOK(coll.update({"a.b": 2}, {"a.b": 3}));
+assert.eq(0, coll.find({"a.b": 3}).itcount());
+
+// $-prefixed field names are not allowed.
+assert.writeErrorWithCode(coll.update({"a.b": 1}, {$c: 1}, {upsert: true}),
+ ErrorCodes.FailedToParse);
+assert.writeErrorWithCode(coll.update({"a.b": 1}, {$set: {$c: 1}}, {upsert: true}),
+ ErrorCodes.DollarPrefixedFieldName);
+assert.writeErrorWithCode(coll.update({"a.b": 1}, {$set: {c: {$d: 1}}}, {upsert: true}),
+ ErrorCodes.DollarPrefixedFieldName);
+
+// Reserved $-prefixed field names are also not allowed.
+assert.writeErrorWithCode(coll.update({"a.b": 1}, {$ref: 1}), ErrorCodes.FailedToParse);
+assert.writeErrorWithCode(coll.update({"a.b": 1}, {$id: 1}), ErrorCodes.FailedToParse);
+assert.writeErrorWithCode(coll.update({"a.b": 1}, {$db: 1}), ErrorCodes.FailedToParse);
+
+//
+// FindAndModify field name validation.
+//
+coll.drop();
+
+// Dotted fields are allowed in update object.
+coll.findAndModify({query: {_id: 0}, update: {_id: 0, "a.b": 1}, upsert: true});
+assert.eq([{_id: 0, "a.b": 1}], coll.find({_id: 0}).toArray());
+
+// Dotted fields represent paths in $set.
+coll.findAndModify({query: {_id: 1}, update: {$set: {_id: 1, "a.b": 1}}, upsert: true});
+assert.eq([{_id: 1, a: {b: 1}}], coll.find({_id: 1}).toArray());
+
+// Dotted fields represent paths in the query object.
+coll.findAndModify({query: {_id: 0, "a.b": 1}, update: {"a.b": 2}});
+assert.eq([{_id: 0, "a.b": 1}], coll.find({_id: 0}).toArray());
+
+coll.findAndModify({query: {_id: 1, "a.b": 1}, update: {$set: {_id: 1, "a.b": 2}}});
+assert.eq([{_id: 1, a: {b: 2}}], coll.find({_id: 1}).toArray());
+
+// $-prefixed field names are not allowed.
+assert.throws(function() {
+ coll.findAndModify({query: {_id: 1}, update: {_id: 1, $invalid: 1}});
+});
+assert.throws(function() {
+ coll.findAndModify({query: {_id: 1}, update: {$set: {_id: 1, $invalid: 1}}});
+});
+
+// Reserved $-prefixed field names are also not allowed.
+assert.throws(function() {
+ coll.findAndModify({query: {_id: 1}, update: {_id: 1, $ref: 1}});
+});
+assert.throws(function() {
+ coll.findAndModify({query: {_id: 1}, update: {_id: 1, $id: 1}});
+});
+assert.throws(function() {
+ coll.findAndModify({query: {_id: 1}, update: {_id: 1, $db: 1}});
+});
+
+//
+// Aggregation field name validation.
+//
+coll.drop();
+
+assert.writeOK(coll.insert({_id: {a: 1, b: 2}, "c.d": 3}));
+
+// Dotted fields represent paths in an aggregation pipeline.
+assert.eq(coll.aggregate([{$match: {"_id.a": 1}}, {$project: {"_id.b": 1}}]).toArray(),
+ [{_id: {b: 2}}]);
+assert.eq(coll.aggregate([{$match: {"c.d": 3}}, {$project: {"_id.b": 1}}]).toArray(), []);
+
+assert.eq(coll.aggregate([{$project: {"_id.a": 1}}]).toArray(), [{_id: {a: 1}}]);
+assert.eq(coll.aggregate([{$project: {"c.d": 1, _id: 0}}]).toArray(), [{}]);
+
+assert.eq(coll.aggregate([
+ {$addFields: {"new.field": {$multiply: ["$c.d", "$_id.a"]}}},
+ {$project: {"new.field": 1, _id: 0}}
+ ])
+ .toArray(),
+ [{new: {field: null}}]);
+
+assert.eq(coll.aggregate([{$group: {_id: "$_id.a", e: {$sum: "$_id.b"}}}]).toArray(),
+ [{_id: 1, e: 2}]);
+assert.eq(coll.aggregate([{$group: {_id: "$_id.a", e: {$sum: "$c.d"}}}]).toArray(),
+ [{_id: 1, e: 0}]);
+
+// Accumulation statements cannot have a dotted field name.
+assert.commandFailed(db.runCommand(
+ {aggregate: coll.getName(), pipeline: [{$group: {_id: "$_id.a", "e.f": {$sum: "$_id.b"}}}]}));
+
+// $-prefixed field names are not allowed in an aggregation pipeline.
+assert.commandFailed(
+ db.runCommand({aggregate: coll.getName(), pipeline: [{$match: {"$invalid": 1}}]}));
+
+assert.commandFailed(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$project: {"_id.a": 1, "$newField": {$multiply: ["$_id.b", "$_id.a"]}}}]
+}));
+
+assert.commandFailed(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$addFields: {"_id.a": 1, "$newField": {$multiply: ["$_id.b", "$_id.a"]}}}]
+}));
+
+assert.commandFailed(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$group: {_id: "$_id.a", "$invalid": {$sum: "$_id.b"}}}]
+}));
})();
diff --git a/jstests/core/filemd5.js b/jstests/core/filemd5.js
index 4c8ad3cd754..9ea70283a73 100644
--- a/jstests/core/filemd5.js
+++ b/jstests/core/filemd5.js
@@ -9,20 +9,20 @@
// ]
(function() {
- "use strict";
+"use strict";
- db.fs.chunks.drop();
- assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "test")}));
+db.fs.chunks.drop();
+assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "test")}));
- assert.commandFailedWithCode(db.runCommand({filemd5: 1, root: "fs"}), ErrorCodes.BadValue);
+assert.commandFailedWithCode(db.runCommand({filemd5: 1, root: "fs"}), ErrorCodes.BadValue);
- db.fs.chunks.ensureIndex({files_id: 1, n: 1});
- assert.commandWorked(db.runCommand({filemd5: 1, root: "fs"}));
+db.fs.chunks.ensureIndex({files_id: 1, n: 1});
+assert.commandWorked(db.runCommand({filemd5: 1, root: "fs"}));
- assert.commandFailedWithCode(db.runCommand({filemd5: 1, root: "fs", partialOk: 1, md5state: 5}),
- 50847);
- assert.writeOK(db.fs.chunks.insert({files_id: 2, n: 0}));
- assert.commandFailedWithCode(db.runCommand({filemd5: 2, root: "fs"}), 50848);
- assert.writeOK(db.fs.chunks.update({files_id: 2, n: 0}, {$set: {data: 5}}));
- assert.commandFailedWithCode(db.runCommand({filemd5: 2, root: "fs"}), 50849);
+assert.commandFailedWithCode(db.runCommand({filemd5: 1, root: "fs", partialOk: 1, md5state: 5}),
+ 50847);
+assert.writeOK(db.fs.chunks.insert({files_id: 2, n: 0}));
+assert.commandFailedWithCode(db.runCommand({filemd5: 2, root: "fs"}), 50848);
+assert.writeOK(db.fs.chunks.update({files_id: 2, n: 0}, {$set: {data: 5}}));
+assert.commandFailedWithCode(db.runCommand({filemd5: 2, root: "fs"}), 50849);
}());
diff --git a/jstests/core/find4.js b/jstests/core/find4.js
index ad482916a19..3721763b358 100644
--- a/jstests/core/find4.js
+++ b/jstests/core/find4.js
@@ -1,42 +1,42 @@
(function() {
- "use strict";
+"use strict";
- const coll = db.find4;
- coll.drop();
+const coll = db.find4;
+coll.drop();
- assert.writeOK(coll.insert({a: 1123, b: 54332}));
+assert.writeOK(coll.insert({a: 1123, b: 54332}));
- let o = coll.findOne();
- assert.eq(1123, o.a, "A");
- assert.eq(54332, o.b, "B");
- assert(o._id.str, "C");
+let o = coll.findOne();
+assert.eq(1123, o.a, "A");
+assert.eq(54332, o.b, "B");
+assert(o._id.str, "C");
- o = coll.findOne({}, {a: 1});
- assert.eq(1123, o.a, "D");
- assert(o._id.str, "E");
- assert(!o.b, "F");
+o = coll.findOne({}, {a: 1});
+assert.eq(1123, o.a, "D");
+assert(o._id.str, "E");
+assert(!o.b, "F");
- o = coll.findOne({}, {b: 1});
- assert.eq(54332, o.b, "G");
- assert(o._id.str, "H");
- assert(!o.a, "I");
+o = coll.findOne({}, {b: 1});
+assert.eq(54332, o.b, "G");
+assert(o._id.str, "H");
+assert(!o.a, "I");
- assert(coll.drop());
+assert(coll.drop());
- assert.writeOK(coll.insert({a: 1, b: 1}));
- assert.writeOK(coll.insert({a: 2, b: 2}));
- assert.eq("1-1,2-2",
- coll.find()
- .sort({a: 1})
- .map(function(z) {
- return z.a + "-" + z.b;
- })
- .toString());
- assert.eq("1-undefined,2-undefined",
- coll.find({}, {a: 1})
- .sort({a: 1})
- .map(function(z) {
- return z.a + "-" + z.b;
- })
- .toString());
+assert.writeOK(coll.insert({a: 1, b: 1}));
+assert.writeOK(coll.insert({a: 2, b: 2}));
+assert.eq("1-1,2-2",
+ coll.find()
+ .sort({a: 1})
+ .map(function(z) {
+ return z.a + "-" + z.b;
+ })
+ .toString());
+assert.eq("1-undefined,2-undefined",
+ coll.find({}, {a: 1})
+ .sort({a: 1})
+ .map(function(z) {
+ return z.a + "-" + z.b;
+ })
+ .toString());
}());
diff --git a/jstests/core/find5.js b/jstests/core/find5.js
index 41ed0034b5c..f7e52c0ccc6 100644
--- a/jstests/core/find5.js
+++ b/jstests/core/find5.js
@@ -1,56 +1,56 @@
// @tags: [requires_fastcount]
(function() {
- "use strict";
+"use strict";
- const coll = db.find5;
- coll.drop();
+const coll = db.find5;
+coll.drop();
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({b: 5}));
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({b: 5}));
- assert.eq(2, coll.find({}, {b: 1}).count(), "A");
+assert.eq(2, coll.find({}, {b: 1}).count(), "A");
- function getIds(projection) {
- return coll.find({}, projection).map(doc => doc._id).sort();
- }
+function getIds(projection) {
+ return coll.find({}, projection).map(doc => doc._id).sort();
+}
- assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({})), "B1 ");
- assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({a: 1})), "B2 ");
- assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({b: 1})), "B3 ");
- assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({c: 1})), "B4 ");
+assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({})), "B1 ");
+assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({a: 1})), "B2 ");
+assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({b: 1})), "B3 ");
+assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({c: 1})), "B4 ");
- let results = coll.find({}, {a: 1}).sort({a: -1});
- let first = results[0];
- assert.eq(1, first.a, "C1");
- assert.isnull(first.b, "C2");
+let results = coll.find({}, {a: 1}).sort({a: -1});
+let first = results[0];
+assert.eq(1, first.a, "C1");
+assert.isnull(first.b, "C2");
- let second = results[1];
- assert.isnull(second.a, "C3");
- assert.isnull(second.b, "C4");
+let second = results[1];
+assert.isnull(second.a, "C3");
+assert.isnull(second.b, "C4");
- results = coll.find({}, {b: 1}).sort({a: -1});
- first = results[0];
- assert.isnull(first.a, "C5");
- assert.isnull(first.b, "C6");
+results = coll.find({}, {b: 1}).sort({a: -1});
+first = results[0];
+assert.isnull(first.a, "C5");
+assert.isnull(first.b, "C6");
- second = results[1];
- assert.isnull(second.a, "C7");
- assert.eq(5, second.b, "C8");
+second = results[1];
+assert.isnull(second.a, "C7");
+assert.eq(5, second.b, "C8");
- assert(coll.drop());
+assert(coll.drop());
- assert.writeOK(coll.insert({a: 1, b: {c: 2, d: 3, e: 4}}));
- assert.eq(2, coll.findOne({}, {"b.c": 1}).b.c, "D");
+assert.writeOK(coll.insert({a: 1, b: {c: 2, d: 3, e: 4}}));
+assert.eq(2, coll.findOne({}, {"b.c": 1}).b.c, "D");
- const o = coll.findOne({}, {"b.c": 1, "b.d": 1});
- assert(o.b.c, "E 1");
- assert(o.b.d, "E 2");
- assert(!o.b.e, "E 3");
+const o = coll.findOne({}, {"b.c": 1, "b.d": 1});
+assert(o.b.c, "E 1");
+assert(o.b.d, "E 2");
+assert(!o.b.e, "E 3");
- assert(!coll.findOne({}, {"b.c": 1}).b.d, "F");
+assert(!coll.findOne({}, {"b.c": 1}).b.d, "F");
- assert(coll.drop());
- assert.writeOK(coll.insert({a: {b: {c: 1}}}));
- assert.eq(1, coll.findOne({}, {"a.b.c": 1}).a.b.c, "G");
+assert(coll.drop());
+assert.writeOK(coll.insert({a: {b: {c: 1}}}));
+assert.eq(1, coll.findOne({}, {"a.b.c": 1}).a.b.c, "G");
}());
diff --git a/jstests/core/find_and_modify3.js b/jstests/core/find_and_modify3.js
index 3f8fc22d98d..a319aef7a2d 100644
--- a/jstests/core/find_and_modify3.js
+++ b/jstests/core/find_and_modify3.js
@@ -19,7 +19,8 @@ orig2 = t.findOne({_id: 2});
out = t.findAndModify({
query: {_id: 1, 'comments.i': 0},
- update: {$set: {'comments.$.j': 2}}, 'new': true,
+ update: {$set: {'comments.$.j': 2}},
+ 'new': true,
sort: {other: 1}
});
assert.eq(out.comments[0], {i: 0, j: 2});
@@ -29,7 +30,8 @@ assert.eq(t.findOne({_id: 2}), orig2);
out = t.findAndModify({
query: {other: 1, 'comments.i': 1},
- update: {$set: {'comments.$.j': 3}}, 'new': true,
+ update: {$set: {'comments.$.j': 3}},
+ 'new': true,
sort: {other: 1}
});
assert.eq(out.comments[0], {i: 0, j: 2});
diff --git a/jstests/core/find_and_modify4.js b/jstests/core/find_and_modify4.js
index 15fb93c8a9f..d5b3ae23cb2 100644
--- a/jstests/core/find_and_modify4.js
+++ b/jstests/core/find_and_modify4.js
@@ -11,7 +11,8 @@ function getNextVal(counterName) {
var ret = t.findAndModify({
query: {_id: counterName},
update: {$inc: {val: 1}},
- upsert: true, 'new': true,
+ upsert: true,
+ 'new': true,
});
return ret;
}
diff --git a/jstests/core/find_and_modify_concurrent_update.js b/jstests/core/find_and_modify_concurrent_update.js
index 80b737cfbed..9682bea4c65 100644
--- a/jstests/core/find_and_modify_concurrent_update.js
+++ b/jstests/core/find_and_modify_concurrent_update.js
@@ -11,34 +11,33 @@
// Ensures that find and modify will not apply an update to a document which, due to a concurrent
// modification, no longer matches the query predicate.
(function() {
- "use strict";
-
- // Repeat the test a few times, as the timing of the yield means it won't fail consistently.
- for (var i = 0; i < 3; i++) {
- var t = db.find_and_modify_concurrent;
- t.drop();
-
- assert.commandWorked(t.ensureIndex({a: 1}));
- assert.commandWorked(t.ensureIndex({b: 1}));
- assert.writeOK(t.insert({_id: 1, a: 1, b: 1}));
-
- var join = startParallelShell(
- "db.find_and_modify_concurrent.update({a: 1, b: 1}, {$inc: {a: 1}});");
-
- // Due to the sleep, we expect this find and modify to yield before updating the
- // document.
- var res = t.findAndModify(
- {query: {a: 1, b: 1, $where: "sleep(100); return true;"}, update: {$inc: {a: 1}}});
-
- join();
- var docs = t.find().toArray();
- assert.eq(docs.length, 1);
-
- // Both the find and modify and the update operations look for a document with a==1,
- // and then increment 'a' by 1. One should win the race and set a=2. The other should
- // fail to find a match. The assertion is that 'a' got incremented once (not zero times
- // and not twice).
- assert.eq(docs[0].a, 2);
- }
-
+"use strict";
+
+// Repeat the test a few times, as the timing of the yield means it won't fail consistently.
+for (var i = 0; i < 3; i++) {
+ var t = db.find_and_modify_concurrent;
+ t.drop();
+
+ assert.commandWorked(t.ensureIndex({a: 1}));
+ assert.commandWorked(t.ensureIndex({b: 1}));
+ assert.writeOK(t.insert({_id: 1, a: 1, b: 1}));
+
+ var join =
+ startParallelShell("db.find_and_modify_concurrent.update({a: 1, b: 1}, {$inc: {a: 1}});");
+
+ // Due to the sleep, we expect this find and modify to yield before updating the
+ // document.
+ var res = t.findAndModify(
+ {query: {a: 1, b: 1, $where: "sleep(100); return true;"}, update: {$inc: {a: 1}}});
+
+ join();
+ var docs = t.find().toArray();
+ assert.eq(docs.length, 1);
+
+ // Both the find and modify and the update operations look for a document with a==1,
+ // and then increment 'a' by 1. One should win the race and set a=2. The other should
+ // fail to find a match. The assertion is that 'a' got incremented once (not zero times
+ // and not twice).
+ assert.eq(docs[0].a, 2);
+}
})();
diff --git a/jstests/core/find_and_modify_empty_coll.js b/jstests/core/find_and_modify_empty_coll.js
index 7325d73583e..c47674c800a 100644
--- a/jstests/core/find_and_modify_empty_coll.js
+++ b/jstests/core/find_and_modify_empty_coll.js
@@ -7,19 +7,19 @@
* Test that findAndModify works against a non-existent collection.
*/
(function() {
- 'use strict';
- var coll = db.find_and_modify_server18054;
- coll.drop();
+'use strict';
+var coll = db.find_and_modify_server18054;
+coll.drop();
- assert.eq(null, coll.findAndModify({remove: true}));
- assert.eq(null, coll.findAndModify({update: {$inc: {i: 1}}}));
- var upserted =
- coll.findAndModify({query: {_id: 0}, update: {$inc: {i: 1}}, upsert: true, new: true});
- assert.eq(upserted, {_id: 0, i: 1});
+assert.eq(null, coll.findAndModify({remove: true}));
+assert.eq(null, coll.findAndModify({update: {$inc: {i: 1}}}));
+var upserted =
+ coll.findAndModify({query: {_id: 0}, update: {$inc: {i: 1}}, upsert: true, new: true});
+assert.eq(upserted, {_id: 0, i: 1});
- coll.drop();
+coll.drop();
- assert.eq(null, coll.findAndModify({remove: true, fields: {z: 1}}));
- assert.eq(null, coll.findAndModify({update: {$inc: {i: 1}}, fields: {z: 1}}));
- assert.eq(null, coll.findAndModify({update: {$inc: {i: 1}}, upsert: true, fields: {z: 1}}));
+assert.eq(null, coll.findAndModify({remove: true, fields: {z: 1}}));
+assert.eq(null, coll.findAndModify({update: {$inc: {i: 1}}, fields: {z: 1}}));
+assert.eq(null, coll.findAndModify({update: {$inc: {i: 1}}, upsert: true, fields: {z: 1}}));
})();
diff --git a/jstests/core/find_and_modify_invalid_query_params.js b/jstests/core/find_and_modify_invalid_query_params.js
index a54d9217b24..6c5d16c94f1 100644
--- a/jstests/core/find_and_modify_invalid_query_params.js
+++ b/jstests/core/find_and_modify_invalid_query_params.js
@@ -4,90 +4,89 @@
* @tags: [assumes_unsharded_collection]
*/
(function() {
- "use strict";
-
- const coll = db.find_and_modify_invalid_inputs;
- coll.drop();
- coll.insert({_id: 0});
- coll.insert({_id: 1});
-
- function assertFailedWithCode(cmd, errorCode) {
- const err = assert.throws(() => coll.findAndModify(cmd));
- assert.eq(err.code, errorCode);
- }
-
- function assertWorked(cmd, expectedValue) {
- const out = assert.doesNotThrow(() => coll.findAndModify(cmd));
- assert.eq(out.value, expectedValue);
- }
-
- // Verify that the findAndModify command works when we supply a valid query.
- let out = coll.findAndModify({query: {_id: 1}, update: {$set: {value: "basic"}}, new: true});
- assert.eq(out, {_id: 1, value: "basic"});
-
- // Verify that invalid 'query' object fails.
- assertFailedWithCode({query: null, update: {value: 2}}, 31160);
- assertFailedWithCode({query: 1, update: {value: 2}}, 31160);
- assertFailedWithCode({query: "{_id: 1}", update: {value: 2}}, 31160);
- assertFailedWithCode({query: false, update: {value: 2}}, 31160);
-
- // Verify that missing and empty query object is allowed.
- assertWorked({update: {$set: {value: "missingQuery"}}, new: true}, "missingQuery");
- assertWorked({query: {}, update: {$set: {value: "emptyQuery"}}, new: true}, "emptyQuery");
-
- // Verify that command works when we supply a valid sort specification.
- assertWorked({sort: {_id: -1}, update: {$set: {value: "sort"}}, new: true}, "sort");
-
- // Verify that invaid 'sort' object fails.
- assertFailedWithCode({sort: null, update: {value: 2}}, 31174);
- assertFailedWithCode({sort: 1, update: {value: 2}}, 31174);
- assertFailedWithCode({sort: "{_id: 1}", update: {value: 2}}, 31174);
- assertFailedWithCode({sort: false, update: {value: 2}}, 31174);
-
- // Verify that missing and empty 'sort' object is allowed.
- assertWorked({update: {$set: {value: "missingSort"}}, new: true}, "missingSort");
- assertWorked({sort: {}, update: {$set: {value: "emptySort"}}, new: true}, "emptySort");
-
- // Verify that the 'fields' projection works.
- assertWorked({fields: {_id: 0}, update: {$set: {value: "project"}}, new: true}, "project");
-
- // Verify that invaid 'fields' object fails.
- assertFailedWithCode({fields: null, update: {value: 2}}, 31175);
- assertFailedWithCode({fields: 1, update: {value: 2}}, 31175);
- assertFailedWithCode({fields: "{_id: 1}", update: {value: 2}}, 31175);
- assertFailedWithCode({fields: false, update: {value: 2}}, 31175);
-
- // Verify that missing and empty 'fields' object is allowed. Also verify that the command
- // projects all the fields.
- assertWorked({update: {$set: {value: "missingFields"}}, new: true}, "missingFields");
- assertWorked({fields: {}, update: {$set: {value: "emptyFields"}}, new: true}, "emptyFields");
-
- // Verify that findOneAndDelete() shell helper throws the same errors as findAndModify().
- let err = assert.throws(() => coll.findOneAndDelete("{_id: 1}"));
- assert.eq(err.code, 31160);
- err = assert.throws(() => coll.findOneAndDelete(null, {sort: 1}));
- assert.eq(err.code, 31174);
-
- // Verify that findOneAndReplace() shell helper throws the same errors as findAndModify().
- err = assert.throws(() => coll.findOneAndReplace("{_id: 1}", {}));
- assert.eq(err.code, 31160);
- err = assert.throws(() => coll.findOneAndReplace(null, {}, {sort: 1}));
- assert.eq(err.code, 31174);
-
- // Verify that findOneAndUpdate() shell helper throws the same errors as findAndModify().
- err = assert.throws(() => coll.findOneAndUpdate("{_id: 1}", {$set: {value: "new"}}));
- assert.eq(err.code, 31160);
- err = assert.throws(() => coll.findOneAndUpdate(null, {$set: {value: "new"}}, {sort: 1}));
- assert.eq(err.code, 31174);
-
- // Verify that find and modify shell helpers allow null query object.
- out =
- coll.findOneAndUpdate(null, {$set: {value: "findOneAndUpdate"}}, {returnNewDocument: true});
- assert.eq(out.value, "findOneAndUpdate");
-
- out = coll.findOneAndReplace(null, {value: "findOneAndReplace"}, {returnNewDocument: true});
- assert.eq(out.value, "findOneAndReplace");
-
- out = coll.findOneAndDelete(null);
- assert.eq(out.value, "findOneAndReplace");
+"use strict";
+
+const coll = db.find_and_modify_invalid_inputs;
+coll.drop();
+coll.insert({_id: 0});
+coll.insert({_id: 1});
+
+function assertFailedWithCode(cmd, errorCode) {
+ const err = assert.throws(() => coll.findAndModify(cmd));
+ assert.eq(err.code, errorCode);
+}
+
+function assertWorked(cmd, expectedValue) {
+ const out = assert.doesNotThrow(() => coll.findAndModify(cmd));
+ assert.eq(out.value, expectedValue);
+}
+
+// Verify that the findAndModify command works when we supply a valid query.
+let out = coll.findAndModify({query: {_id: 1}, update: {$set: {value: "basic"}}, new: true});
+assert.eq(out, {_id: 1, value: "basic"});
+
+// Verify that invalid 'query' object fails.
+assertFailedWithCode({query: null, update: {value: 2}}, 31160);
+assertFailedWithCode({query: 1, update: {value: 2}}, 31160);
+assertFailedWithCode({query: "{_id: 1}", update: {value: 2}}, 31160);
+assertFailedWithCode({query: false, update: {value: 2}}, 31160);
+
+// Verify that missing and empty query object is allowed.
+assertWorked({update: {$set: {value: "missingQuery"}}, new: true}, "missingQuery");
+assertWorked({query: {}, update: {$set: {value: "emptyQuery"}}, new: true}, "emptyQuery");
+
+// Verify that command works when we supply a valid sort specification.
+assertWorked({sort: {_id: -1}, update: {$set: {value: "sort"}}, new: true}, "sort");
+
+// Verify that invaid 'sort' object fails.
+assertFailedWithCode({sort: null, update: {value: 2}}, 31174);
+assertFailedWithCode({sort: 1, update: {value: 2}}, 31174);
+assertFailedWithCode({sort: "{_id: 1}", update: {value: 2}}, 31174);
+assertFailedWithCode({sort: false, update: {value: 2}}, 31174);
+
+// Verify that missing and empty 'sort' object is allowed.
+assertWorked({update: {$set: {value: "missingSort"}}, new: true}, "missingSort");
+assertWorked({sort: {}, update: {$set: {value: "emptySort"}}, new: true}, "emptySort");
+
+// Verify that the 'fields' projection works.
+assertWorked({fields: {_id: 0}, update: {$set: {value: "project"}}, new: true}, "project");
+
+// Verify that invaid 'fields' object fails.
+assertFailedWithCode({fields: null, update: {value: 2}}, 31175);
+assertFailedWithCode({fields: 1, update: {value: 2}}, 31175);
+assertFailedWithCode({fields: "{_id: 1}", update: {value: 2}}, 31175);
+assertFailedWithCode({fields: false, update: {value: 2}}, 31175);
+
+// Verify that missing and empty 'fields' object is allowed. Also verify that the command
+// projects all the fields.
+assertWorked({update: {$set: {value: "missingFields"}}, new: true}, "missingFields");
+assertWorked({fields: {}, update: {$set: {value: "emptyFields"}}, new: true}, "emptyFields");
+
+// Verify that findOneAndDelete() shell helper throws the same errors as findAndModify().
+let err = assert.throws(() => coll.findOneAndDelete("{_id: 1}"));
+assert.eq(err.code, 31160);
+err = assert.throws(() => coll.findOneAndDelete(null, {sort: 1}));
+assert.eq(err.code, 31174);
+
+// Verify that findOneAndReplace() shell helper throws the same errors as findAndModify().
+err = assert.throws(() => coll.findOneAndReplace("{_id: 1}", {}));
+assert.eq(err.code, 31160);
+err = assert.throws(() => coll.findOneAndReplace(null, {}, {sort: 1}));
+assert.eq(err.code, 31174);
+
+// Verify that findOneAndUpdate() shell helper throws the same errors as findAndModify().
+err = assert.throws(() => coll.findOneAndUpdate("{_id: 1}", {$set: {value: "new"}}));
+assert.eq(err.code, 31160);
+err = assert.throws(() => coll.findOneAndUpdate(null, {$set: {value: "new"}}, {sort: 1}));
+assert.eq(err.code, 31174);
+
+// Verify that find and modify shell helpers allow null query object.
+out = coll.findOneAndUpdate(null, {$set: {value: "findOneAndUpdate"}}, {returnNewDocument: true});
+assert.eq(out.value, "findOneAndUpdate");
+
+out = coll.findOneAndReplace(null, {value: "findOneAndReplace"}, {returnNewDocument: true});
+assert.eq(out.value, "findOneAndReplace");
+
+out = coll.findOneAndDelete(null);
+assert.eq(out.value, "findOneAndReplace");
})();
diff --git a/jstests/core/find_and_modify_pipeline_update.js b/jstests/core/find_and_modify_pipeline_update.js
index ba793fb2ce3..781b4d0335d 100644
--- a/jstests/core/find_and_modify_pipeline_update.js
+++ b/jstests/core/find_and_modify_pipeline_update.js
@@ -3,55 +3,52 @@
* @tags: [requires_non_retryable_writes]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/fixture_helpers.js"); // For isMongos.
+load("jstests/libs/fixture_helpers.js"); // For isMongos.
- const coll = db.find_and_modify_pipeline_update;
- coll.drop();
+const coll = db.find_and_modify_pipeline_update;
+coll.drop();
- // Test that it generally works.
- assert.commandWorked(coll.insert([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}]));
- let found = coll.findAndModify({query: {_id: 0}, update: [{$set: {y: 1}}]});
- assert.eq(found, {_id: 0});
- found = coll.findAndModify({query: {_id: 0}, update: [{$set: {z: 2}}], new: true});
- assert.eq(found, {_id: 0, y: 1, z: 2});
+// Test that it generally works.
+assert.commandWorked(coll.insert([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}]));
+let found = coll.findAndModify({query: {_id: 0}, update: [{$set: {y: 1}}]});
+assert.eq(found, {_id: 0});
+found = coll.findAndModify({query: {_id: 0}, update: [{$set: {z: 2}}], new: true});
+assert.eq(found, {_id: 0, y: 1, z: 2});
- found = coll.findAndModify({query: {_id: 0}, update: [{$unset: ["z"]}], new: true});
- assert.eq(found, {_id: 0, y: 1});
+found = coll.findAndModify({query: {_id: 0}, update: [{$unset: ["z"]}], new: true});
+assert.eq(found, {_id: 0, y: 1});
- // Test that pipeline-style update supports the 'fields' argument.
+// Test that pipeline-style update supports the 'fields' argument.
+assert(coll.drop());
+assert.commandWorked(coll.insert([{_id: 0, x: 0}, {_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}]));
+found = coll.findAndModify({query: {_id: 0}, update: [{$set: {y: 0}}], fields: {x: 0}});
+assert.eq(found, {_id: 0});
+
+found = coll.findAndModify({query: {_id: 1}, update: [{$set: {y: 1}}], fields: {x: 1}});
+assert.eq(found, {_id: 1, x: 1});
+
+found = coll.findAndModify({query: {_id: 2}, update: [{$set: {y: 2}}], fields: {x: 0}, new: true});
+assert.eq(found, {_id: 2, y: 2});
+
+found = coll.findAndModify({query: {_id: 3}, update: [{$set: {y: 3}}], fields: {x: 1}, new: true});
+assert.eq(found, {_id: 3, x: 3});
+
+// We skip the following test for sharded fixtures as it will fail as the query for
+// findAndModify must contain the shard key.
+if (!FixtureHelpers.isMongos(db)) {
+ // Test that 'sort' works with pipeline-style update.
assert(coll.drop());
assert.commandWorked(
- coll.insert([{_id: 0, x: 0}, {_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}]));
- found = coll.findAndModify({query: {_id: 0}, update: [{$set: {y: 0}}], fields: {x: 0}});
- assert.eq(found, {_id: 0});
-
- found = coll.findAndModify({query: {_id: 1}, update: [{$set: {y: 1}}], fields: {x: 1}});
- assert.eq(found, {_id: 1, x: 1});
-
- found =
- coll.findAndModify({query: {_id: 2}, update: [{$set: {y: 2}}], fields: {x: 0}, new: true});
- assert.eq(found, {_id: 2, y: 2});
-
- found =
- coll.findAndModify({query: {_id: 3}, update: [{$set: {y: 3}}], fields: {x: 1}, new: true});
- assert.eq(found, {_id: 3, x: 3});
-
- // We skip the following test for sharded fixtures as it will fail as the query for
- // findAndModify must contain the shard key.
- if (!FixtureHelpers.isMongos(db)) {
- // Test that 'sort' works with pipeline-style update.
- assert(coll.drop());
- assert.commandWorked(
- coll.insert([{_id: 0, x: 'b'}, {_id: 1, x: 'd'}, {_id: 2, x: 'a'}, {_id: 3, x: 'c'}]));
- found = coll.findAndModify({update: [{$set: {foo: "bar"}}], sort: {x: -1}, new: true});
- assert.eq(found, {_id: 1, x: 'd', foo: "bar"});
- }
-
- // Test that it rejects the combination of arrayFilters and a pipeline-style update.
- let err =
- assert.throws(() => coll.findAndModify(
- {query: {_id: 1}, update: [{$set: {y: 1}}], arrayFilters: [{"i.x": 4}]}));
- assert.eq(err.code, ErrorCodes.FailedToParse);
+ coll.insert([{_id: 0, x: 'b'}, {_id: 1, x: 'd'}, {_id: 2, x: 'a'}, {_id: 3, x: 'c'}]));
+ found = coll.findAndModify({update: [{$set: {foo: "bar"}}], sort: {x: -1}, new: true});
+ assert.eq(found, {_id: 1, x: 'd', foo: "bar"});
+}
+
+// Test that it rejects the combination of arrayFilters and a pipeline-style update.
+let err =
+ assert.throws(() => coll.findAndModify(
+ {query: {_id: 1}, update: [{$set: {y: 1}}], arrayFilters: [{"i.x": 4}]}));
+assert.eq(err.code, ErrorCodes.FailedToParse);
}());
diff --git a/jstests/core/find_and_modify_server6226.js b/jstests/core/find_and_modify_server6226.js
index a4093142150..e99b910e178 100644
--- a/jstests/core/find_and_modify_server6226.js
+++ b/jstests/core/find_and_modify_server6226.js
@@ -1,9 +1,9 @@
(function() {
- 'use strict';
+'use strict';
- var t = db.find_and_modify_server6226;
- t.drop();
+var t = db.find_and_modify_server6226;
+t.drop();
- var ret = t.findAndModify({query: {_id: 1}, update: {"$inc": {i: 1}}, upsert: true});
- assert.isnull(ret);
+var ret = t.findAndModify({query: {_id: 1}, update: {"$inc": {i: 1}}, upsert: true});
+assert.isnull(ret);
})();
diff --git a/jstests/core/find_and_modify_server6865.js b/jstests/core/find_and_modify_server6865.js
index 98e5b28ee47..1c5d9363a6f 100644
--- a/jstests/core/find_and_modify_server6865.js
+++ b/jstests/core/find_and_modify_server6865.js
@@ -8,297 +8,286 @@
* when remove=true or new=false, but not when new=true.
*/
(function() {
- 'use strict';
+'use strict';
- var collName = 'find_and_modify_server6865';
- var t = db.getCollection(collName);
- t.drop();
+var collName = 'find_and_modify_server6865';
+var t = db.getCollection(collName);
+t.drop();
- /**
- * Asserts that the specified query and projection returns the expected
- * result, using both the find() operation and the findAndModify command.
- *
- * insert -- document to insert after dropping collection t
- * cmdObj -- arguments to the findAndModify command
- *
- * expected -- the document 'value' expected to be returned after the
- * projection is applied
- */
- function testFAMWorked(insert, cmdObj, expected) {
- t.drop();
- t.insert(insert);
-
- var res;
-
- if (!cmdObj['new']) {
- // Test that the find operation returns the expected result.
- res = t.findOne(cmdObj['query'], cmdObj['fields']);
- assert.eq(res, expected, 'positional projection failed for find');
- }
-
- // Test that the findAndModify command returns the expected result.
- res = t.runCommand('findAndModify', cmdObj);
- assert.commandWorked(res, 'findAndModify command failed');
- assert.eq(res.value, expected, 'positional projection failed for findAndModify');
-
- if (cmdObj['new']) {
- // Test that the find operation returns the expected result.
- res = t.findOne(cmdObj['query'], cmdObj['fields']);
- assert.eq(res, expected, 'positional projection failed for find');
- }
- }
+/**
+ * Asserts that the specified query and projection returns the expected
+ * result, using both the find() operation and the findAndModify command.
+ *
+ * insert -- document to insert after dropping collection t
+ * cmdObj -- arguments to the findAndModify command
+ *
+ * expected -- the document 'value' expected to be returned after the
+ * projection is applied
+ */
+function testFAMWorked(insert, cmdObj, expected) {
+ t.drop();
+ t.insert(insert);
- /**
- * Asserts that the specified findAndModify command returns an error.
- */
- function testFAMFailed(insert, cmdObj) {
- t.drop();
- t.insert(insert);
+ var res;
- var res = t.runCommand('findAndModify', cmdObj);
- assert.commandFailed(res, 'findAndModify command unexpectedly succeeded');
+ if (!cmdObj['new']) {
+ // Test that the find operation returns the expected result.
+ res = t.findOne(cmdObj['query'], cmdObj['fields']);
+ assert.eq(res, expected, 'positional projection failed for find');
}
- //
- // Delete operations
- //
-
- // Simple query that uses an inclusion projection.
- testFAMWorked({_id: 42, a: [1, 2], b: 3},
- {query: {_id: 42}, fields: {_id: 0, b: 1}, remove: true},
- {b: 3});
-
- // Simple query that uses an exclusion projection.
- testFAMWorked({_id: 42, a: [1, 2], b: 3, c: 4},
- {query: {_id: 42}, fields: {a: 0, b: 0}, remove: true},
- {_id: 42, c: 4});
+ // Test that the findAndModify command returns the expected result.
+ res = t.runCommand('findAndModify', cmdObj);
+ assert.commandWorked(res, 'findAndModify command failed');
+ assert.eq(res.value, expected, 'positional projection failed for findAndModify');
- // Simple query that uses $elemMatch in the projection.
- testFAMWorked({
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {query: {_id: 42}, fields: {b: {$elemMatch: {value: 2}}}, remove: true},
- {_id: 42, b: [{name: 'second', value: 2}]});
-
- // Query on an array of values while using a positional projection.
- testFAMWorked(
- {_id: 42, a: [1, 2]}, {query: {a: 2}, fields: {'a.$': 1}, remove: true}, {_id: 42, a: [2]});
+ if (cmdObj['new']) {
+ // Test that the find operation returns the expected result.
+ res = t.findOne(cmdObj['query'], cmdObj['fields']);
+ assert.eq(res, expected, 'positional projection failed for find');
+ }
+}
- // Query on an array of objects while using a positional projection.
- testFAMWorked({
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {query: {_id: 42, 'b.name': 'third'}, fields: {'b.$': 1}, remove: true},
- {_id: 42, b: [{name: 'third', value: 3}]});
-
- // Query on an array of objects while using a position projection.
- // Verifies that the projection {'b.$.value': 1} is treated the
- // same as {'b.$': 1}.
- testFAMWorked({
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {query: {_id: 42, 'b.name': 'third'}, fields: {'b.$.value': 1}, remove: true},
- {_id: 42, b: [{name: 'third', value: 3}]});
-
- // Query on an array of objects using $elemMatch while using an inclusion projection.
- testFAMWorked({
- _id: 42,
- a: 5,
- b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
- },
- {
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, a: 5},
- remove: true
- },
- {a: 5});
-
- // Query on an array of objects using $elemMatch while using the positional
- // operator in the projection.
- testFAMWorked({
- _id: 42,
- b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
- },
- {
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, 'b.$': 1},
- remove: true
- },
- {b: [{name: 'john', value: 1}]});
-
- //
- // Update operations with new=false
- //
-
- // Simple query that uses an inclusion projection.
- testFAMWorked({_id: 42, a: [1, 2], b: 3},
- {query: {_id: 42}, fields: {_id: 0, b: 1}, update: {$inc: {b: 1}}, new: false},
- {b: 3});
-
- // Simple query that uses an exclusion projection.
- testFAMWorked({_id: 42, a: [1, 2], b: 3, c: 4},
- {query: {_id: 42}, fields: {a: 0, b: 0}, update: {$set: {c: 5}}, new: false},
- {_id: 42, c: 4});
-
- // Simple query that uses $elemMatch in the projection.
- testFAMWorked({
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {
- query: {_id: 42},
- fields: {b: {$elemMatch: {value: 2}}},
- update: {$set: {name: '2nd'}},
- new: false
- },
- {_id: 42, b: [{name: 'second', value: 2}]});
-
- // Query on an array of values while using a positional projection.
- testFAMWorked(
- {_id: 42, a: [1, 2]},
- {query: {a: 2}, fields: {'a.$': 1}, update: {$set: {'b.kind': 'xyz'}}, new: false},
- {_id: 42, a: [2]});
-
- // Query on an array of objects while using a positional projection.
- testFAMWorked({
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {
- query: {_id: 42, 'b.name': 'third'},
- fields: {'b.$': 1},
- update: {$set: {'b.$.kind': 'xyz'}},
- new: false
- },
- {_id: 42, b: [{name: 'third', value: 3}]});
-
- // Query on an array of objects while using $elemMatch in the projection,
- // where the matched array element is modified.
- testFAMWorked(
- {_id: 1, a: [{x: 1, y: 1}, {x: 1, y: 2}]},
- {query: {_id: 1}, fields: {a: {$elemMatch: {x: 1}}}, update: {$pop: {a: -1}}, new: false},
- {_id: 1, a: [{x: 1, y: 1}]});
-
- // Query on an array of objects using $elemMatch while using an inclusion projection.
- testFAMWorked({
- _id: 42,
- a: 5,
- b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
- },
- {
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, a: 5},
- update: {$inc: {a: 6}},
- new: false
- },
- {a: 5});
-
- // Query on an array of objects using $elemMatch while using the positional
- // operator in the projection.
- testFAMWorked({
- _id: 42,
- b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
- },
- {
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, 'b.$': 1},
- update: {$set: {name: 'james'}},
- new: false
- },
- {b: [{name: 'john', value: 1}]});
-
- //
- // Update operations with new=true
- //
-
- // Simple query that uses an inclusion projection.
- testFAMWorked({_id: 42, a: [1, 2], b: 3},
- {query: {_id: 42}, fields: {_id: 0, b: 1}, update: {$inc: {b: 1}}, new: true},
- {b: 4});
-
- // Simple query that uses an exclusion projection.
- testFAMWorked({_id: 42, a: [1, 2], b: 3, c: 4},
- {query: {_id: 42}, fields: {a: 0, b: 0}, update: {$set: {c: 5}}, new: true},
- {_id: 42, c: 5});
-
- // Simple query that uses $elemMatch in the projection.
- testFAMWorked({
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {
- query: {_id: 42},
- fields: {b: {$elemMatch: {value: 2}}},
- update: {$set: {'b.1.name': '2nd'}},
- new: true
- },
- {_id: 42, b: [{name: '2nd', value: 2}]});
-
- // Query on an array of values while using a positional projection.
- testFAMFailed(
- {_id: 42, a: [1, 2]},
- {query: {a: 2}, fields: {'a.$': 1}, update: {$set: {'b.kind': 'xyz'}}, new: true});
-
- // Query on an array of objects while using a positional projection.
- testFAMFailed({
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {
- query: {_id: 42, 'b.name': 'third'},
- fields: {'b.$': 1},
- update: {$set: {'b.$.kind': 'xyz'}},
- new: true
- });
-
- // Query on an array of objects while using $elemMatch in the projection.
- testFAMWorked({
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {
- query: {_id: 42},
- fields: {b: {$elemMatch: {value: 2}}, c: 1},
- update: {$set: {c: 'xyz'}},
- new: true
- },
- {_id: 42, b: [{name: 'second', value: 2}], c: 'xyz'});
-
- // Query on an array of objects while using $elemMatch in the projection,
- // where the matched array element is modified.
- testFAMWorked(
- {_id: 1, a: [{x: 1, y: 1}, {x: 1, y: 2}]},
- {query: {_id: 1}, fields: {a: {$elemMatch: {x: 1}}}, update: {$pop: {a: -1}}, new: true},
- {_id: 1, a: [{x: 1, y: 2}]});
-
- // Query on an array of objects using $elemMatch while using an inclusion projection.
- testFAMWorked({
- _id: 42,
- a: 5,
- b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
+/**
+ * Asserts that the specified findAndModify command returns an error.
+ */
+function testFAMFailed(insert, cmdObj) {
+ t.drop();
+ t.insert(insert);
+
+ var res = t.runCommand('findAndModify', cmdObj);
+ assert.commandFailed(res, 'findAndModify command unexpectedly succeeded');
+}
+
+//
+// Delete operations
+//
+
+// Simple query that uses an inclusion projection.
+testFAMWorked(
+ {_id: 42, a: [1, 2], b: 3}, {query: {_id: 42}, fields: {_id: 0, b: 1}, remove: true}, {b: 3});
+
+// Simple query that uses an exclusion projection.
+testFAMWorked({_id: 42, a: [1, 2], b: 3, c: 4},
+ {query: {_id: 42}, fields: {a: 0, b: 0}, remove: true},
+ {_id: 42, c: 4});
+
+// Simple query that uses $elemMatch in the projection.
+testFAMWorked({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+},
+ {query: {_id: 42}, fields: {b: {$elemMatch: {value: 2}}}, remove: true},
+ {_id: 42, b: [{name: 'second', value: 2}]});
+
+// Query on an array of values while using a positional projection.
+testFAMWorked(
+ {_id: 42, a: [1, 2]}, {query: {a: 2}, fields: {'a.$': 1}, remove: true}, {_id: 42, a: [2]});
+
+// Query on an array of objects while using a positional projection.
+testFAMWorked({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+},
+ {query: {_id: 42, 'b.name': 'third'}, fields: {'b.$': 1}, remove: true},
+ {_id: 42, b: [{name: 'third', value: 3}]});
+
+// Query on an array of objects while using a position projection.
+// Verifies that the projection {'b.$.value': 1} is treated the
+// same as {'b.$': 1}.
+testFAMWorked({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+},
+ {query: {_id: 42, 'b.name': 'third'}, fields: {'b.$.value': 1}, remove: true},
+ {_id: 42, b: [{name: 'third', value: 3}]});
+
+// Query on an array of objects using $elemMatch while using an inclusion projection.
+testFAMWorked({
+ _id: 42,
+ a: 5,
+ b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
+},
+ {
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, a: 5},
+ remove: true
+ },
+ {a: 5});
+
+// Query on an array of objects using $elemMatch while using the positional
+// operator in the projection.
+testFAMWorked(
+ {_id: 42, b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]},
+ {
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, 'b.$': 1},
+ remove: true
},
- {
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, a: 5},
- update: {$inc: {a: 6}},
- new: true
- },
- {a: 11});
-
- // Query on an array of objects using $elemMatch while using the positional
- // operator in the projection.
- testFAMFailed({
- _id: 42,
- b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
+ {b: [{name: 'john', value: 1}]});
+
+//
+// Update operations with new=false
+//
+
+// Simple query that uses an inclusion projection.
+testFAMWorked({_id: 42, a: [1, 2], b: 3},
+ {query: {_id: 42}, fields: {_id: 0, b: 1}, update: {$inc: {b: 1}}, new: false},
+ {b: 3});
+
+// Simple query that uses an exclusion projection.
+testFAMWorked({_id: 42, a: [1, 2], b: 3, c: 4},
+ {query: {_id: 42}, fields: {a: 0, b: 0}, update: {$set: {c: 5}}, new: false},
+ {_id: 42, c: 4});
+
+// Simple query that uses $elemMatch in the projection.
+testFAMWorked({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+},
+ {
+ query: {_id: 42},
+ fields: {b: {$elemMatch: {value: 2}}},
+ update: {$set: {name: '2nd'}},
+ new: false
+ },
+ {_id: 42, b: [{name: 'second', value: 2}]});
+
+// Query on an array of values while using a positional projection.
+testFAMWorked({_id: 42, a: [1, 2]},
+ {query: {a: 2}, fields: {'a.$': 1}, update: {$set: {'b.kind': 'xyz'}}, new: false},
+ {_id: 42, a: [2]});
+
+// Query on an array of objects while using a positional projection.
+testFAMWorked({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+},
+ {
+ query: {_id: 42, 'b.name': 'third'},
+ fields: {'b.$': 1},
+ update: {$set: {'b.$.kind': 'xyz'}},
+ new: false
+ },
+ {_id: 42, b: [{name: 'third', value: 3}]});
+
+// Query on an array of objects while using $elemMatch in the projection,
+// where the matched array element is modified.
+testFAMWorked(
+ {_id: 1, a: [{x: 1, y: 1}, {x: 1, y: 2}]},
+ {query: {_id: 1}, fields: {a: {$elemMatch: {x: 1}}}, update: {$pop: {a: -1}}, new: false},
+ {_id: 1, a: [{x: 1, y: 1}]});
+
+// Query on an array of objects using $elemMatch while using an inclusion projection.
+testFAMWorked({
+ _id: 42,
+ a: 5,
+ b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
+},
+ {
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, a: 5},
+ update: {$inc: {a: 6}},
+ new: false
+ },
+ {a: 5});
+
+// Query on an array of objects using $elemMatch while using the positional
+// operator in the projection.
+testFAMWorked(
+ {_id: 42, b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]},
+ {
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, 'b.$': 1},
+ update: {$set: {name: 'james'}},
+ new: false
},
- {
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, 'b.$': 1},
- update: {$set: {name: 'james'}},
- new: true
- });
-
+ {b: [{name: 'john', value: 1}]});
+
+//
+// Update operations with new=true
+//
+
+// Simple query that uses an inclusion projection.
+testFAMWorked({_id: 42, a: [1, 2], b: 3},
+ {query: {_id: 42}, fields: {_id: 0, b: 1}, update: {$inc: {b: 1}}, new: true},
+ {b: 4});
+
+// Simple query that uses an exclusion projection.
+testFAMWorked({_id: 42, a: [1, 2], b: 3, c: 4},
+ {query: {_id: 42}, fields: {a: 0, b: 0}, update: {$set: {c: 5}}, new: true},
+ {_id: 42, c: 5});
+
+// Simple query that uses $elemMatch in the projection.
+testFAMWorked({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+},
+ {
+ query: {_id: 42},
+ fields: {b: {$elemMatch: {value: 2}}},
+ update: {$set: {'b.1.name': '2nd'}},
+ new: true
+ },
+ {_id: 42, b: [{name: '2nd', value: 2}]});
+
+// Query on an array of values while using a positional projection.
+testFAMFailed({_id: 42, a: [1, 2]},
+ {query: {a: 2}, fields: {'a.$': 1}, update: {$set: {'b.kind': 'xyz'}}, new: true});
+
+// Query on an array of objects while using a positional projection.
+testFAMFailed({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+},
+ {
+ query: {_id: 42, 'b.name': 'third'},
+ fields: {'b.$': 1},
+ update: {$set: {'b.$.kind': 'xyz'}},
+ new: true
+ });
+
+// Query on an array of objects while using $elemMatch in the projection.
+testFAMWorked({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+},
+ {
+ query: {_id: 42},
+ fields: {b: {$elemMatch: {value: 2}}, c: 1},
+ update: {$set: {c: 'xyz'}},
+ new: true
+ },
+ {_id: 42, b: [{name: 'second', value: 2}], c: 'xyz'});
+
+// Query on an array of objects while using $elemMatch in the projection,
+// where the matched array element is modified.
+testFAMWorked(
+ {_id: 1, a: [{x: 1, y: 1}, {x: 1, y: 2}]},
+ {query: {_id: 1}, fields: {a: {$elemMatch: {x: 1}}}, update: {$pop: {a: -1}}, new: true},
+ {_id: 1, a: [{x: 1, y: 2}]});
+
+// Query on an array of objects using $elemMatch while using an inclusion projection.
+testFAMWorked({
+ _id: 42,
+ a: 5,
+ b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
+},
+ {
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, a: 5},
+ update: {$inc: {a: 6}},
+ new: true
+ },
+ {a: 11});
+
+// Query on an array of objects using $elemMatch while using the positional
+// operator in the projection.
+testFAMFailed(
+ {_id: 42, b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]}, {
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, 'b.$': 1},
+ update: {$set: {name: 'james'}},
+ new: true
+ });
})();
diff --git a/jstests/core/find_dedup.js b/jstests/core/find_dedup.js
index a2ac0bfa3d8..df9dbfa9d12 100644
--- a/jstests/core/find_dedup.js
+++ b/jstests/core/find_dedup.js
@@ -3,45 +3,45 @@
// @tags: [requires_fastcount]
(function() {
- "use strict";
+"use strict";
- const coll = db.jstests_find_dedup;
+const coll = db.jstests_find_dedup;
- function checkDedup(query, idArray) {
- const resultsArr = coll.find(query).sort({_id: 1}).toArray();
- assert.eq(resultsArr.length, idArray.length, "same number of results");
+function checkDedup(query, idArray) {
+ const resultsArr = coll.find(query).sort({_id: 1}).toArray();
+ assert.eq(resultsArr.length, idArray.length, "same number of results");
- for (let i = 0; i < idArray.length; i++) {
- assert(("_id" in resultsArr[i]), "result doc missing _id");
- assert.eq(idArray[i], resultsArr[i]._id, "_id mismatch for doc " + i);
- }
+ for (let i = 0; i < idArray.length; i++) {
+ assert(("_id" in resultsArr[i]), "result doc missing _id");
+ assert.eq(idArray[i], resultsArr[i]._id, "_id mismatch for doc " + i);
}
-
- // Deduping $or
- coll.drop();
- coll.ensureIndex({a: 1, b: 1});
- assert.writeOK(coll.insert({_id: 1, a: 1, b: 1}));
- assert.writeOK(coll.insert({_id: 2, a: 1, b: 1}));
- assert.writeOK(coll.insert({_id: 3, a: 2, b: 2}));
- assert.writeOK(coll.insert({_id: 4, a: 3, b: 3}));
- assert.writeOK(coll.insert({_id: 5, a: 3, b: 3}));
- checkDedup({
- $or: [
- {a: {$gte: 0, $lte: 2}, b: {$gte: 0, $lte: 2}},
- {a: {$gte: 1, $lte: 3}, b: {$gte: 1, $lte: 3}},
- {a: {$gte: 1, $lte: 4}, b: {$gte: 1, $lte: 4}}
- ]
- },
- [1, 2, 3, 4, 5]);
-
- // Deduping multikey
- assert(coll.drop());
- assert.writeOK(coll.insert({_id: 1, a: [1, 2, 3], b: [4, 5, 6]}));
- assert.writeOK(coll.insert({_id: 2, a: [1, 2, 3], b: [4, 5, 6]}));
- assert.eq(2, coll.count());
-
- checkDedup({$or: [{a: {$in: [1, 2]}}, {b: {$in: [4, 5]}}]}, [1, 2]);
-
- assert.commandWorked(coll.createIndex({a: 1}));
- checkDedup({$or: [{a: {$in: [1, 2]}}, {b: {$in: [4, 5]}}]}, [1, 2]);
+}
+
+// Deduping $or
+coll.drop();
+coll.ensureIndex({a: 1, b: 1});
+assert.writeOK(coll.insert({_id: 1, a: 1, b: 1}));
+assert.writeOK(coll.insert({_id: 2, a: 1, b: 1}));
+assert.writeOK(coll.insert({_id: 3, a: 2, b: 2}));
+assert.writeOK(coll.insert({_id: 4, a: 3, b: 3}));
+assert.writeOK(coll.insert({_id: 5, a: 3, b: 3}));
+checkDedup({
+ $or: [
+ {a: {$gte: 0, $lte: 2}, b: {$gte: 0, $lte: 2}},
+ {a: {$gte: 1, $lte: 3}, b: {$gte: 1, $lte: 3}},
+ {a: {$gte: 1, $lte: 4}, b: {$gte: 1, $lte: 4}}
+ ]
+},
+ [1, 2, 3, 4, 5]);
+
+// Deduping multikey
+assert(coll.drop());
+assert.writeOK(coll.insert({_id: 1, a: [1, 2, 3], b: [4, 5, 6]}));
+assert.writeOK(coll.insert({_id: 2, a: [1, 2, 3], b: [4, 5, 6]}));
+assert.eq(2, coll.count());
+
+checkDedup({$or: [{a: {$in: [1, 2]}}, {b: {$in: [4, 5]}}]}, [1, 2]);
+
+assert.commandWorked(coll.createIndex({a: 1}));
+checkDedup({$or: [{a: {$in: [1, 2]}}, {b: {$in: [4, 5]}}]}, [1, 2]);
}());
diff --git a/jstests/core/find_getmore_bsonsize.js b/jstests/core/find_getmore_bsonsize.js
index 6b9008cec51..6a19dec4302 100644
--- a/jstests/core/find_getmore_bsonsize.js
+++ b/jstests/core/find_getmore_bsonsize.js
@@ -3,86 +3,86 @@
// Ensure that the find and getMore commands can handle documents nearing the 16 MB size limit for
// user-stored BSON documents.
(function() {
- 'use strict';
+'use strict';
- var cmdRes;
- var collName = 'find_getmore_bsonsize';
- var coll = db[collName];
+var cmdRes;
+var collName = 'find_getmore_bsonsize';
+var coll = db[collName];
- coll.drop();
+coll.drop();
- var oneKB = 1024;
- var oneMB = 1024 * oneKB;
+var oneKB = 1024;
+var oneMB = 1024 * oneKB;
- // Build a (1 MB - 1 KB) string.
- var smallStr = 'x';
- while (smallStr.length < oneMB) {
- smallStr += smallStr;
- }
- assert.eq(smallStr.length, oneMB);
- smallStr = smallStr.substring(0, oneMB - oneKB);
+// Build a (1 MB - 1 KB) string.
+var smallStr = 'x';
+while (smallStr.length < oneMB) {
+ smallStr += smallStr;
+}
+assert.eq(smallStr.length, oneMB);
+smallStr = smallStr.substring(0, oneMB - oneKB);
- // Build a (16 MB - 1 KB) string.
- var bigStr = 'y';
- while (bigStr.length < (16 * oneMB)) {
- bigStr += bigStr;
- }
- assert.eq(bigStr.length, 16 * oneMB);
- bigStr = bigStr.substring(0, (16 * oneMB) - oneKB);
+// Build a (16 MB - 1 KB) string.
+var bigStr = 'y';
+while (bigStr.length < (16 * oneMB)) {
+ bigStr += bigStr;
+}
+assert.eq(bigStr.length, 16 * oneMB);
+bigStr = bigStr.substring(0, (16 * oneMB) - oneKB);
- // Collection has one ~1 MB doc followed by one ~16 MB doc.
- assert.writeOK(coll.insert({_id: 0, padding: smallStr}));
- assert.writeOK(coll.insert({_id: 1, padding: bigStr}));
+// Collection has one ~1 MB doc followed by one ~16 MB doc.
+assert.writeOK(coll.insert({_id: 0, padding: smallStr}));
+assert.writeOK(coll.insert({_id: 1, padding: bigStr}));
- // Find command should just return the first doc, as adding the last would create an invalid
- // command response document.
- cmdRes = db.runCommand({find: collName});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 1);
+// Find command should just return the first doc, as adding the last would create an invalid
+// command response document.
+cmdRes = db.runCommand({find: collName});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 1);
- // The 16 MB doc should be returned alone on getMore. This is the last document in the
- // collection, so the server should close the cursor.
- cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName});
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 1);
+// The 16 MB doc should be returned alone on getMore. This is the last document in the
+// collection, so the server should close the cursor.
+cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName});
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.nextBatch.length, 1);
- // Setup a cursor without returning any results (batchSize of zero).
- cmdRes = db.runCommand({find: collName, batchSize: 0});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 0);
+// Setup a cursor without returning any results (batchSize of zero).
+cmdRes = db.runCommand({find: collName, batchSize: 0});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 0);
- // First getMore should only return one doc, since both don't fit in the response.
- cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName});
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 1);
+// First getMore should only return one doc, since both don't fit in the response.
+cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName});
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.nextBatch.length, 1);
- // Second getMore should return the second doc and a third will close the cursor.
- cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName});
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 1);
+// Second getMore should return the second doc and a third will close the cursor.
+cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName});
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.nextBatch.length, 1);
- coll.drop();
+coll.drop();
- // Insert a document of exactly 16MB and make sure the find command can return it.
- bigStr = 'y';
- while (bigStr.length < (16 * oneMB)) {
- bigStr += bigStr;
- }
- bigStr = bigStr.substring(0, (16 * oneMB) - 32);
- var maxSizeDoc = {_id: 0, padding: bigStr};
- assert.eq(Object.bsonsize(maxSizeDoc), 16 * oneMB);
- assert.writeOK(coll.insert(maxSizeDoc));
+// Insert a document of exactly 16MB and make sure the find command can return it.
+bigStr = 'y';
+while (bigStr.length < (16 * oneMB)) {
+ bigStr += bigStr;
+}
+bigStr = bigStr.substring(0, (16 * oneMB) - 32);
+var maxSizeDoc = {_id: 0, padding: bigStr};
+assert.eq(Object.bsonsize(maxSizeDoc), 16 * oneMB);
+assert.writeOK(coll.insert(maxSizeDoc));
- cmdRes = db.runCommand({find: collName});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 1);
+cmdRes = db.runCommand({find: collName});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 1);
})();
diff --git a/jstests/core/find_getmore_cmd.js b/jstests/core/find_getmore_cmd.js
index 5f815fb1d4c..55ad7a4a443 100644
--- a/jstests/core/find_getmore_cmd.js
+++ b/jstests/core/find_getmore_cmd.js
@@ -2,88 +2,85 @@
// Tests that explicitly invoke the find and getMore commands.
(function() {
- 'use strict';
+'use strict';
- var cmdRes;
- var cursorId;
- var defaultBatchSize = 101;
- var collName = 'find_getmore_cmd';
- var coll = db[collName];
+var cmdRes;
+var cursorId;
+var defaultBatchSize = 101;
+var collName = 'find_getmore_cmd';
+var coll = db[collName];
- coll.drop();
- for (var i = 0; i < 150; i++) {
- assert.writeOK(coll.insert({a: i}));
- }
+coll.drop();
+for (var i = 0; i < 150; i++) {
+ assert.writeOK(coll.insert({a: i}));
+}
- // Verify result of a find command that specifies none of the optional arguments.
- cmdRes = db.runCommand({find: collName});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, defaultBatchSize);
+// Verify result of a find command that specifies none of the optional arguments.
+cmdRes = db.runCommand({find: collName});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, defaultBatchSize);
- // Use a getMore command to get the next batch.
- cursorId = cmdRes.cursor.id;
- cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 150 - defaultBatchSize);
+// Use a getMore command to get the next batch.
+cursorId = cmdRes.cursor.id;
+cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.nextBatch.length, 150 - defaultBatchSize);
- // Cursor should have been closed, so attempting to get another batch should fail.
- cmdRes = db.runCommand({getMore: cursorId, collection: collName});
- assert.commandFailed(cmdRes);
+// Cursor should have been closed, so attempting to get another batch should fail.
+cmdRes = db.runCommand({getMore: cursorId, collection: collName});
+assert.commandFailed(cmdRes);
- // Find command with limit.
- cmdRes = db.runCommand({find: collName, limit: 10});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 10);
+// Find command with limit.
+cmdRes = db.runCommand({find: collName, limit: 10});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 10);
- // Find command with positive batchSize followed by getMore command with positive batchSize.
- cmdRes = db.runCommand({find: collName, batchSize: 10});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 10);
- cmdRes =
- db.runCommand({getMore: cmdRes.cursor.id, collection: collName, batchSize: NumberInt(5)});
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 5);
+// Find command with positive batchSize followed by getMore command with positive batchSize.
+cmdRes = db.runCommand({find: collName, batchSize: 10});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 10);
+cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, batchSize: NumberInt(5)});
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.nextBatch.length, 5);
- // Find command with zero batchSize followed by getMore command (default batchSize).
- cmdRes = db.runCommand({find: collName, batchSize: 0});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 0);
- cmdRes =
- db.runCommand({getMore: cmdRes.cursor.id, collection: collName, batchSize: NumberInt(5)});
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 5);
+// Find command with zero batchSize followed by getMore command (default batchSize).
+cmdRes = db.runCommand({find: collName, batchSize: 0});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 0);
+cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, batchSize: NumberInt(5)});
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.nextBatch.length, 5);
- // Batch size and limit together.
- cmdRes = db.runCommand({find: collName, batchSize: 10, limit: 20});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 10);
- cmdRes =
- db.runCommand({getMore: cmdRes.cursor.id, collection: collName, batchSize: NumberInt(11)});
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 10);
+// Batch size and limit together.
+cmdRes = db.runCommand({find: collName, batchSize: 10, limit: 20});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 10);
+cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, batchSize: NumberInt(11)});
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.nextBatch.length, 10);
- // Find command with batchSize and singleBatch.
- cmdRes = db.runCommand({find: collName, batchSize: 10, singleBatch: true});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 10);
+// Find command with batchSize and singleBatch.
+cmdRes = db.runCommand({find: collName, batchSize: 10, singleBatch: true});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 10);
- // Error on invalid collection name.
- assert.commandFailedWithCode(db.runCommand({find: ""}), ErrorCodes.InvalidNamespace);
+// Error on invalid collection name.
+assert.commandFailedWithCode(db.runCommand({find: ""}), ErrorCodes.InvalidNamespace);
})();
diff --git a/jstests/core/find_projection_meta_errors.js b/jstests/core/find_projection_meta_errors.js
index 6fd69cb9d04..c43590e3bad 100644
--- a/jstests/core/find_projection_meta_errors.js
+++ b/jstests/core/find_projection_meta_errors.js
@@ -1,23 +1,23 @@
// Basic tests for errors when parsing the $meta projection.
(function() {
- "use strict";
+"use strict";
- const coll = db.find_projection_meta_errors;
- coll.drop();
+const coll = db.find_projection_meta_errors;
+coll.drop();
- assert.commandWorked(coll.insert({a: 1}));
- assert.commandWorked(coll.insert({a: 2}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 2}));
- assert.commandFailedWithCode(
- db.runCommand({find: coll.getName(), projection: {score: {$meta: "searchScore"}}}),
- ErrorCodes.BadValue);
+assert.commandFailedWithCode(
+ db.runCommand({find: coll.getName(), projection: {score: {$meta: "searchScore"}}}),
+ ErrorCodes.BadValue);
- assert.commandFailedWithCode(
- db.runCommand({find: coll.getName(), projection: {score: {$meta: "searchHighlights"}}}),
- ErrorCodes.BadValue);
+assert.commandFailedWithCode(
+ db.runCommand({find: coll.getName(), projection: {score: {$meta: "searchHighlights"}}}),
+ ErrorCodes.BadValue);
- assert.commandFailedWithCode(
- db.runCommand({find: coll.getName(), projection: {score: {$meta: "some garbage"}}}),
- ErrorCodes.BadValue);
+assert.commandFailedWithCode(
+ db.runCommand({find: coll.getName(), projection: {score: {$meta: "some garbage"}}}),
+ ErrorCodes.BadValue);
}());
diff --git a/jstests/core/fsync.js b/jstests/core/fsync.js
index 2f116364c34..bd5526b8bc9 100644
--- a/jstests/core/fsync.js
+++ b/jstests/core/fsync.js
@@ -9,107 +9,107 @@
* @tags: [requires_fastcount, requires_fsync]
*/
(function() {
- "use strict";
-
- // Start with a clean DB.
- var fsyncLockDB = db.getSisterDB('fsyncLockTestDB');
- fsyncLockDB.dropDatabase();
-
- // Tests the db.fsyncLock/fsyncUnlock features.
- var storageEngine = db.serverStatus().storageEngine.name;
-
- // As of SERVER-18899 fsyncLock/fsyncUnlock will error when called on a storage engine
- // that does not support the begin/end backup commands.
- var supportsFsync = db.fsyncLock();
-
- if (!supportsFsync.ok) {
- assert.commandFailedWithCode(supportsFsync, ErrorCodes.CommandNotSupported);
- jsTestLog("Skipping test for " + storageEngine + " as it does not support fsync");
- return;
- }
- db.fsyncUnlock();
-
- var resFail = fsyncLockDB.runCommand({fsync: 1, lock: 1});
-
- // Start with a clean DB
- var fsyncLockDB = db.getSisterDB('fsyncLockTestDB');
- fsyncLockDB.dropDatabase();
-
- // Test that a single, regular write works as expected.
- assert.writeOK(fsyncLockDB.coll.insert({x: 1}));
-
- // Test that fsyncLock doesn't work unless invoked against the admin DB.
- var resFail = fsyncLockDB.runCommand({fsync: 1, lock: 1});
- assert(!resFail.ok, "fsyncLock command succeeded against DB other than admin.");
-
- // Uses admin automatically and locks the server for writes.
- var fsyncLockRes = db.fsyncLock();
- assert(fsyncLockRes.ok, "fsyncLock command failed against admin DB");
- assert(db.currentOp().fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server");
-
- // Make sure writes are blocked. Spawn a write operation in a separate shell and make sure it
- // is blocked. There is really no way to do that currently, so just check that the write didn't
- // go through.
- var writeOpHandle = startParallelShell("db.getSisterDB('fsyncLockTestDB').coll.insert({x:1});");
- sleep(3000);
-
- // Make sure reads can still run even though there is a pending write and also that the write
- // didn't get through.
- assert.eq(1, fsyncLockDB.coll.find({}).itcount());
-
- // Unlock and make sure the insert succeeded.
- var fsyncUnlockRes = db.fsyncUnlock();
- assert(fsyncUnlockRes.ok, "fsyncUnlock command failed");
- assert(db.currentOp().fsyncLock == null, "fsyncUnlock is not null in db.currentOp");
-
- // Make sure the db is unlocked and the initial write made it through.
- writeOpHandle();
- assert.writeOK(fsyncLockDB.coll.insert({x: 2}));
-
- assert.eq(3, fsyncLockDB.coll.count({}));
-
- // Issue the fsyncLock and fsyncUnlock a second time, to ensure that we can
- // run this command repeatedly with no problems.
- var fsyncLockRes = db.fsyncLock();
- assert(fsyncLockRes.ok, "Second execution of fsyncLock command failed");
-
- var fsyncUnlockRes = db.fsyncUnlock();
- assert(fsyncUnlockRes.ok, "Second execution of fsyncUnlock command failed");
-
- // Make sure that insert attempts made during multiple fsyncLock requests will not execute until
- // all locks have been released.
- fsyncLockRes = db.fsyncLock();
- assert.commandWorked(fsyncLockRes);
- assert(fsyncLockRes.lockCount == 1, tojson(fsyncLockRes));
- let currentOp = db.currentOp();
- assert.commandWorked(currentOp);
- assert(currentOp.fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server");
-
- let shellHandle1 =
- startParallelShell("db.getSisterDB('fsyncLockTestDB').multipleLock.insert({x:1});");
-
- fsyncLockRes = db.fsyncLock();
- assert.commandWorked(fsyncLockRes);
- assert(fsyncLockRes.lockCount == 2, tojson(fsyncLockRes));
- currentOp = db.currentOp();
- assert.commandWorked(currentOp);
- assert(currentOp.fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server");
-
- let shellHandle2 =
- startParallelShell("db.getSisterDB('fsyncLockTestDB').multipleLock.insert({x:1});");
- sleep(3000);
- assert.eq(0, fsyncLockDB.multipleLock.find({}).itcount());
-
- fsyncUnlockRes = db.fsyncUnlock();
- assert.commandWorked(fsyncUnlockRes);
- assert(fsyncUnlockRes.lockCount == 1, tojson(fsyncLockRes));
- sleep(3000);
- assert.eq(0, fsyncLockDB.multipleLock.find({}).itcount());
-
- fsyncUnlockRes = db.fsyncUnlock();
- assert.commandWorked(fsyncUnlockRes);
- assert(fsyncUnlockRes.lockCount == 0, tojson(fsyncLockRes));
- shellHandle1();
- shellHandle2();
- assert.eq(2, fsyncLockDB.multipleLock.find({}).itcount());
+"use strict";
+
+// Start with a clean DB.
+var fsyncLockDB = db.getSisterDB('fsyncLockTestDB');
+fsyncLockDB.dropDatabase();
+
+// Tests the db.fsyncLock/fsyncUnlock features.
+var storageEngine = db.serverStatus().storageEngine.name;
+
+// As of SERVER-18899 fsyncLock/fsyncUnlock will error when called on a storage engine
+// that does not support the begin/end backup commands.
+var supportsFsync = db.fsyncLock();
+
+if (!supportsFsync.ok) {
+ assert.commandFailedWithCode(supportsFsync, ErrorCodes.CommandNotSupported);
+ jsTestLog("Skipping test for " + storageEngine + " as it does not support fsync");
+ return;
+}
+db.fsyncUnlock();
+
+var resFail = fsyncLockDB.runCommand({fsync: 1, lock: 1});
+
+// Start with a clean DB
+var fsyncLockDB = db.getSisterDB('fsyncLockTestDB');
+fsyncLockDB.dropDatabase();
+
+// Test that a single, regular write works as expected.
+assert.writeOK(fsyncLockDB.coll.insert({x: 1}));
+
+// Test that fsyncLock doesn't work unless invoked against the admin DB.
+var resFail = fsyncLockDB.runCommand({fsync: 1, lock: 1});
+assert(!resFail.ok, "fsyncLock command succeeded against DB other than admin.");
+
+// Uses admin automatically and locks the server for writes.
+var fsyncLockRes = db.fsyncLock();
+assert(fsyncLockRes.ok, "fsyncLock command failed against admin DB");
+assert(db.currentOp().fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server");
+
+// Make sure writes are blocked. Spawn a write operation in a separate shell and make sure it
+// is blocked. There is really no way to do that currently, so just check that the write didn't
+// go through.
+var writeOpHandle = startParallelShell("db.getSisterDB('fsyncLockTestDB').coll.insert({x:1});");
+sleep(3000);
+
+// Make sure reads can still run even though there is a pending write and also that the write
+// didn't get through.
+assert.eq(1, fsyncLockDB.coll.find({}).itcount());
+
+// Unlock and make sure the insert succeeded.
+var fsyncUnlockRes = db.fsyncUnlock();
+assert(fsyncUnlockRes.ok, "fsyncUnlock command failed");
+assert(db.currentOp().fsyncLock == null, "fsyncUnlock is not null in db.currentOp");
+
+// Make sure the db is unlocked and the initial write made it through.
+writeOpHandle();
+assert.writeOK(fsyncLockDB.coll.insert({x: 2}));
+
+assert.eq(3, fsyncLockDB.coll.count({}));
+
+// Issue the fsyncLock and fsyncUnlock a second time, to ensure that we can
+// run this command repeatedly with no problems.
+var fsyncLockRes = db.fsyncLock();
+assert(fsyncLockRes.ok, "Second execution of fsyncLock command failed");
+
+var fsyncUnlockRes = db.fsyncUnlock();
+assert(fsyncUnlockRes.ok, "Second execution of fsyncUnlock command failed");
+
+// Make sure that insert attempts made during multiple fsyncLock requests will not execute until
+// all locks have been released.
+fsyncLockRes = db.fsyncLock();
+assert.commandWorked(fsyncLockRes);
+assert(fsyncLockRes.lockCount == 1, tojson(fsyncLockRes));
+let currentOp = db.currentOp();
+assert.commandWorked(currentOp);
+assert(currentOp.fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server");
+
+let shellHandle1 =
+ startParallelShell("db.getSisterDB('fsyncLockTestDB').multipleLock.insert({x:1});");
+
+fsyncLockRes = db.fsyncLock();
+assert.commandWorked(fsyncLockRes);
+assert(fsyncLockRes.lockCount == 2, tojson(fsyncLockRes));
+currentOp = db.currentOp();
+assert.commandWorked(currentOp);
+assert(currentOp.fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server");
+
+let shellHandle2 =
+ startParallelShell("db.getSisterDB('fsyncLockTestDB').multipleLock.insert({x:1});");
+sleep(3000);
+assert.eq(0, fsyncLockDB.multipleLock.find({}).itcount());
+
+fsyncUnlockRes = db.fsyncUnlock();
+assert.commandWorked(fsyncUnlockRes);
+assert(fsyncUnlockRes.lockCount == 1, tojson(fsyncLockRes));
+sleep(3000);
+assert.eq(0, fsyncLockDB.multipleLock.find({}).itcount());
+
+fsyncUnlockRes = db.fsyncUnlock();
+assert.commandWorked(fsyncUnlockRes);
+assert(fsyncUnlockRes.lockCount == 0, tojson(fsyncLockRes));
+shellHandle1();
+shellHandle2();
+assert.eq(2, fsyncLockDB.multipleLock.find({}).itcount());
}());
diff --git a/jstests/core/fts1.js b/jstests/core/fts1.js
index 9b95fa8dc14..2ce50a22eeb 100644
--- a/jstests/core/fts1.js
+++ b/jstests/core/fts1.js
@@ -2,26 +2,26 @@
// collection.
// @tags: [assumes_no_implicit_index_creation]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/fts.js");
+load("jstests/libs/fts.js");
- const coll = db.text1;
- coll.drop();
+const coll = db.text1;
+coll.drop();
- assert.commandWorked(coll.createIndex({x: "text"}, {name: "x_text"}));
+assert.commandWorked(coll.createIndex({x: "text"}, {name: "x_text"}));
- assert.eq([], queryIDS(coll, "az"), "A0");
+assert.eq([], queryIDS(coll, "az"), "A0");
- assert.writeOK(coll.insert({_id: 1, x: "az b c"}));
- assert.writeOK(coll.insert({_id: 2, x: "az b"}));
- assert.writeOK(coll.insert({_id: 3, x: "b c"}));
- assert.writeOK(coll.insert({_id: 4, x: "b c d"}));
+assert.writeOK(coll.insert({_id: 1, x: "az b c"}));
+assert.writeOK(coll.insert({_id: 2, x: "az b"}));
+assert.writeOK(coll.insert({_id: 3, x: "b c"}));
+assert.writeOK(coll.insert({_id: 4, x: "b c d"}));
- assert.eq([1, 2, 3, 4], queryIDS(coll, "c az").sort(), "A1");
- assert.eq([4], queryIDS(coll, "d"), "A2");
+assert.eq([1, 2, 3, 4], queryIDS(coll, "c az").sort(), "A1");
+assert.eq([4], queryIDS(coll, "d"), "A2");
- const index = coll.getIndexes().find(index => index.name === "x_text");
- assert.neq(index, undefined);
- assert.gte(index.textIndexVersion, 1, tojson(index));
+const index = coll.getIndexes().find(index => index.name === "x_text");
+assert.neq(index, undefined);
+assert.gte(index.textIndexVersion, 1, tojson(index));
}());
diff --git a/jstests/core/fts_array.js b/jstests/core/fts_array.js
index 967dd223392..16d51981908 100644
--- a/jstests/core/fts_array.js
+++ b/jstests/core/fts_array.js
@@ -2,54 +2,52 @@
* Tests for the interaction between FTS indexes and arrays.
*/
(function() {
- "use strict";
+"use strict";
- let coll = db.jstests_fts_array;
- coll.drop();
- assert.commandWorked(coll.createIndex({"a.b": 1, words: "text"}));
+let coll = db.jstests_fts_array;
+coll.drop();
+assert.commandWorked(coll.createIndex({"a.b": 1, words: "text"}));
- // Verify that the leading field of the index cannot contain an array.
- assert.writeErrorWithCode(coll.insert({a: {b: []}, words: "omnibus"}),
- ErrorCodes.CannotBuildIndexKeys);
- assert.writeErrorWithCode(coll.insert({a: {b: [1]}, words: "omnibus"}),
- ErrorCodes.CannotBuildIndexKeys);
- assert.writeErrorWithCode(coll.insert({a: {b: [1, 2]}, words: "omnibus"}),
- ErrorCodes.CannotBuildIndexKeys);
- assert.writeErrorWithCode(coll.insert({a: [], words: "omnibus"}),
- ErrorCodes.CannotBuildIndexKeys);
- assert.writeErrorWithCode(coll.insert({a: [{b: 1}], words: "omnibus"}),
- ErrorCodes.CannotBuildIndexKeys);
- assert.writeErrorWithCode(coll.insert({a: [{b: 1}, {b: 2}], words: "omnibus"}),
- ErrorCodes.CannotBuildIndexKeys);
+// Verify that the leading field of the index cannot contain an array.
+assert.writeErrorWithCode(coll.insert({a: {b: []}, words: "omnibus"}),
+ ErrorCodes.CannotBuildIndexKeys);
+assert.writeErrorWithCode(coll.insert({a: {b: [1]}, words: "omnibus"}),
+ ErrorCodes.CannotBuildIndexKeys);
+assert.writeErrorWithCode(coll.insert({a: {b: [1, 2]}, words: "omnibus"}),
+ ErrorCodes.CannotBuildIndexKeys);
+assert.writeErrorWithCode(coll.insert({a: [], words: "omnibus"}), ErrorCodes.CannotBuildIndexKeys);
+assert.writeErrorWithCode(coll.insert({a: [{b: 1}], words: "omnibus"}),
+ ErrorCodes.CannotBuildIndexKeys);
+assert.writeErrorWithCode(coll.insert({a: [{b: 1}, {b: 2}], words: "omnibus"}),
+ ErrorCodes.CannotBuildIndexKeys);
- coll.drop();
- assert.commandWorked(coll.createIndex({words: "text", "y.z": 1}));
+coll.drop();
+assert.commandWorked(coll.createIndex({words: "text", "y.z": 1}));
- // Verify that the trailing field of the index cannot contain an array.
- assert.writeErrorWithCode(coll.insert({words: "macerate", y: {z: []}}),
- ErrorCodes.CannotBuildIndexKeys);
- assert.writeErrorWithCode(coll.insert({words: "macerate", y: {z: [1]}}),
- ErrorCodes.CannotBuildIndexKeys);
- assert.writeErrorWithCode(coll.insert({words: "macerate", y: {z: [1, 2]}}),
- ErrorCodes.CannotBuildIndexKeys);
- assert.writeErrorWithCode(coll.insert({words: "macerate", y: []}),
- ErrorCodes.CannotBuildIndexKeys);
- assert.writeErrorWithCode(coll.insert({words: "macerate", y: [{z: 1}]}),
- ErrorCodes.CannotBuildIndexKeys);
- assert.writeErrorWithCode(coll.insert({words: "macerate", y: [{z: 1}, {z: 2}]}),
- ErrorCodes.CannotBuildIndexKeys);
+// Verify that the trailing field of the index cannot contain an array.
+assert.writeErrorWithCode(coll.insert({words: "macerate", y: {z: []}}),
+ ErrorCodes.CannotBuildIndexKeys);
+assert.writeErrorWithCode(coll.insert({words: "macerate", y: {z: [1]}}),
+ ErrorCodes.CannotBuildIndexKeys);
+assert.writeErrorWithCode(coll.insert({words: "macerate", y: {z: [1, 2]}}),
+ ErrorCodes.CannotBuildIndexKeys);
+assert.writeErrorWithCode(coll.insert({words: "macerate", y: []}), ErrorCodes.CannotBuildIndexKeys);
+assert.writeErrorWithCode(coll.insert({words: "macerate", y: [{z: 1}]}),
+ ErrorCodes.CannotBuildIndexKeys);
+assert.writeErrorWithCode(coll.insert({words: "macerate", y: [{z: 1}, {z: 2}]}),
+ ErrorCodes.CannotBuildIndexKeys);
- // Verify that array fields are allowed when positionally indexed.
- coll.drop();
- assert.commandWorked(coll.createIndex({"a.0": 1, words: "text"}));
- assert.writeOK(coll.insert({a: [0, 1, 2], words: "dander"}));
- assert.eq({a: [0, 1, 2], words: "dander"},
- coll.findOne({"a.0": 0, $text: {$search: "dander"}}, {_id: 0, a: 1, words: 1}));
- assert.writeErrorWithCode(coll.insert({a: [[8, 9], 1, 2], words: "dander"}),
- ErrorCodes.CannotBuildIndexKeys);
- coll.drop();
- assert.commandWorked(coll.createIndex({"a.0.1": 1, words: "text"}));
- assert.writeOK(coll.insert({a: [[8, 9], 1, 2], words: "dander"}));
- assert.eq({a: [[8, 9], 1, 2], words: "dander"},
- coll.findOne({"a.0.1": 9, $text: {$search: "dander"}}, {_id: 0, a: 1, words: 1}));
+// Verify that array fields are allowed when positionally indexed.
+coll.drop();
+assert.commandWorked(coll.createIndex({"a.0": 1, words: "text"}));
+assert.writeOK(coll.insert({a: [0, 1, 2], words: "dander"}));
+assert.eq({a: [0, 1, 2], words: "dander"},
+ coll.findOne({"a.0": 0, $text: {$search: "dander"}}, {_id: 0, a: 1, words: 1}));
+assert.writeErrorWithCode(coll.insert({a: [[8, 9], 1, 2], words: "dander"}),
+ ErrorCodes.CannotBuildIndexKeys);
+coll.drop();
+assert.commandWorked(coll.createIndex({"a.0.1": 1, words: "text"}));
+assert.writeOK(coll.insert({a: [[8, 9], 1, 2], words: "dander"}));
+assert.eq({a: [[8, 9], 1, 2], words: "dander"},
+ coll.findOne({"a.0.1": 9, $text: {$search: "dander"}}, {_id: 0, a: 1, words: 1}));
}());
diff --git a/jstests/core/fts_diacritic_and_caseinsensitive.js b/jstests/core/fts_diacritic_and_caseinsensitive.js
index 898735f3140..476fe9d2ca1 100644
--- a/jstests/core/fts_diacritic_and_caseinsensitive.js
+++ b/jstests/core/fts_diacritic_and_caseinsensitive.js
@@ -3,32 +3,29 @@
load('jstests/libs/fts.js');
(function() {
- "use strict";
- var coll = db.fts_diacritic_and_caseinsensitive;
-
- coll.drop();
-
- assert.writeOK(coll.insert({
- _id: 0,
- a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico."
- }));
-
- assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"}));
-
- assert.eq([0], queryIDS(coll, "proximo voo a", null));
- assert.eq([0], queryIDS(coll, "átlántico", null));
- assert.eq([0], queryIDS(coll, "\"proxIMO\"", null));
- assert.eq([0], queryIDS(coll, "\"poé\" atlânTico", null));
- assert.eq([0], queryIDS(coll, "\"próximo voo\" \"unico médico\"", null));
- assert.eq([0], queryIDS(coll, "\"proximo voo\" -\"unico atlantico\"", null));
-
- assert.eq([], queryIDS(coll, "À", null));
- assert.eq([], queryIDS(coll, "próximoo", null));
- assert.eq([], queryIDS(coll, "proximoo vvôo àa", null));
- assert.eq([], queryIDS(coll, "À -próximo -Vôo", null));
- assert.eq([], queryIDS(coll, "à proximo -voo", null));
- assert.eq([], queryIDS(coll, "mo vo", null));
- assert.eq([], queryIDS(coll, "\"unico atlantico\"", null));
- assert.eq([], queryIDS(coll, "\"próximo Vôo\" -\"unico medico\"", null));
-
+"use strict";
+var coll = db.fts_diacritic_and_caseinsensitive;
+
+coll.drop();
+
+assert.writeOK(coll.insert(
+ {_id: 0, a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico."}));
+
+assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"}));
+
+assert.eq([0], queryIDS(coll, "proximo voo a", null));
+assert.eq([0], queryIDS(coll, "átlántico", null));
+assert.eq([0], queryIDS(coll, "\"proxIMO\"", null));
+assert.eq([0], queryIDS(coll, "\"poé\" atlânTico", null));
+assert.eq([0], queryIDS(coll, "\"próximo voo\" \"unico médico\"", null));
+assert.eq([0], queryIDS(coll, "\"proximo voo\" -\"unico atlantico\"", null));
+
+assert.eq([], queryIDS(coll, "À", null));
+assert.eq([], queryIDS(coll, "próximoo", null));
+assert.eq([], queryIDS(coll, "proximoo vvôo àa", null));
+assert.eq([], queryIDS(coll, "À -próximo -Vôo", null));
+assert.eq([], queryIDS(coll, "à proximo -voo", null));
+assert.eq([], queryIDS(coll, "mo vo", null));
+assert.eq([], queryIDS(coll, "\"unico atlantico\"", null));
+assert.eq([], queryIDS(coll, "\"próximo Vôo\" -\"unico medico\"", null));
})();
diff --git a/jstests/core/fts_diacritic_and_casesensitive.js b/jstests/core/fts_diacritic_and_casesensitive.js
index d5c15034dbc..ae3c51c703b 100644
--- a/jstests/core/fts_diacritic_and_casesensitive.js
+++ b/jstests/core/fts_diacritic_and_casesensitive.js
@@ -4,62 +4,51 @@
load('jstests/libs/fts.js');
(function() {
- "use strict";
- var coll = db.fts_diacritic_and_casesensitive;
-
- coll.drop();
-
- assert.writeOK(coll.insert({
- _id: 0,
- a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico."
- }));
-
- assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"}));
-
- assert.eq(
- [0],
- queryIDS(coll, "próximo vôo à", null, {$diacriticSensitive: true, $caseSensitive: true}));
- assert.eq([0],
- queryIDS(coll, "Atlântico", null, {$diacriticSensitive: true, $caseSensitive: true}));
- assert.eq(
- [0],
- queryIDS(coll, "\"próximo\"", null, {$diacriticSensitive: true, $caseSensitive: true}));
- assert.eq(
- [0],
- queryIDS(
- coll, "\"Põe\" Atlântico", null, {$diacriticSensitive: true, $caseSensitive: true}));
- assert.eq([0],
- queryIDS(coll,
- "\"próximo Vôo\" \"único Médico\"",
- null,
- {$diacriticSensitive: true, $caseSensitive: true}));
- assert.eq([0],
- queryIDS(coll,
- "\"próximo Vôo\" -\"único médico\"",
- null,
- {$diacriticSensitive: true, $caseSensitive: true}));
-
- assert.eq([], queryIDS(coll, "À", null, {$diacriticSensitive: true, $caseSensitive: true}));
- assert.eq([],
- queryIDS(coll, "Próximo", null, {$diacriticSensitive: true, $caseSensitive: true}));
- assert.eq(
- [],
- queryIDS(coll, "proximo vôo à", null, {$diacriticSensitive: true, $caseSensitive: true}));
- assert.eq(
- [],
- queryIDS(coll, "À -próximo -Vôo", null, {$diacriticSensitive: true, $caseSensitive: true}));
- assert.eq(
- [],
- queryIDS(coll, "à proximo -Vôo", null, {$diacriticSensitive: true, $caseSensitive: true}));
- assert.eq([], queryIDS(coll, "mo Vô", null, {$diacriticSensitive: true, $caseSensitive: true}));
- assert.eq(
- [],
- queryIDS(
- coll, "\"único médico\"", null, {$diacriticSensitive: true, $caseSensitive: true}));
- assert.eq([],
- queryIDS(coll,
- "\"próximo Vôo\" -\"único Médico\"",
- null,
- {$diacriticSensitive: true, $caseSensitive: true}));
-
+"use strict";
+var coll = db.fts_diacritic_and_casesensitive;
+
+coll.drop();
+
+assert.writeOK(coll.insert(
+ {_id: 0, a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico."}));
+
+assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"}));
+
+assert.eq([0],
+ queryIDS(coll, "próximo vôo à", null, {$diacriticSensitive: true, $caseSensitive: true}));
+assert.eq([0],
+ queryIDS(coll, "Atlântico", null, {$diacriticSensitive: true, $caseSensitive: true}));
+assert.eq([0],
+ queryIDS(coll, "\"próximo\"", null, {$diacriticSensitive: true, $caseSensitive: true}));
+assert.eq(
+ [0],
+ queryIDS(coll, "\"Põe\" Atlântico", null, {$diacriticSensitive: true, $caseSensitive: true}));
+assert.eq([0],
+ queryIDS(coll,
+ "\"próximo Vôo\" \"único Médico\"",
+ null,
+ {$diacriticSensitive: true, $caseSensitive: true}));
+assert.eq([0],
+ queryIDS(coll,
+ "\"próximo Vôo\" -\"único médico\"",
+ null,
+ {$diacriticSensitive: true, $caseSensitive: true}));
+
+assert.eq([], queryIDS(coll, "À", null, {$diacriticSensitive: true, $caseSensitive: true}));
+assert.eq([], queryIDS(coll, "Próximo", null, {$diacriticSensitive: true, $caseSensitive: true}));
+assert.eq([],
+ queryIDS(coll, "proximo vôo à", null, {$diacriticSensitive: true, $caseSensitive: true}));
+assert.eq(
+ [], queryIDS(coll, "À -próximo -Vôo", null, {$diacriticSensitive: true, $caseSensitive: true}));
+assert.eq(
+ [], queryIDS(coll, "à proximo -Vôo", null, {$diacriticSensitive: true, $caseSensitive: true}));
+assert.eq([], queryIDS(coll, "mo Vô", null, {$diacriticSensitive: true, $caseSensitive: true}));
+assert.eq(
+ [],
+ queryIDS(coll, "\"único médico\"", null, {$diacriticSensitive: true, $caseSensitive: true}));
+assert.eq([],
+ queryIDS(coll,
+ "\"próximo Vôo\" -\"único Médico\"",
+ null,
+ {$diacriticSensitive: true, $caseSensitive: true}));
})(); \ No newline at end of file
diff --git a/jstests/core/fts_diacriticsensitive.js b/jstests/core/fts_diacriticsensitive.js
index e21d5360051..a377b810ea6 100644
--- a/jstests/core/fts_diacriticsensitive.js
+++ b/jstests/core/fts_diacriticsensitive.js
@@ -3,40 +3,36 @@
load('jstests/libs/fts.js');
(function() {
- "use strict";
- var coll = db.fts_diacriticsensitive;
-
- coll.drop();
-
- assert.writeOK(coll.insert({
- _id: 0,
- a: "O próximo vôo à noite sobre o Atlântico, põe freqüentemente o único médico."
- }));
-
- assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"}));
-
- assert.throws(function() {
- queryIDS(coll, "hello", null, {$diacriticSensitive: "invalid"});
- });
-
- assert.eq([0], queryIDS(coll, "PRÓXIMO VÔO À", null, {$diacriticSensitive: true}));
- assert.eq([0], queryIDS(coll, "atlântico", null, {$diacriticSensitive: true}));
- assert.eq([0], queryIDS(coll, "\"próximo\"", null, {$diacriticSensitive: true}));
- assert.eq([0], queryIDS(coll, "\"põe\" atlântico", null, {$diacriticSensitive: true}));
- assert.eq(
- [0], queryIDS(coll, "\"próximo vôo\" \"único médico\"", null, {$diacriticSensitive: true}));
- assert.eq(
- [0],
- queryIDS(coll, "\"próximo vôo\" -\"unico médico\"", null, {$diacriticSensitive: true}));
-
- assert.eq([], queryIDS(coll, "à", null, {$diacriticSensitive: true}));
- assert.eq([], queryIDS(coll, "proximo", null, {$diacriticSensitive: true}));
- assert.eq([], queryIDS(coll, "proximo voo à", null, {$diacriticSensitive: true}));
- assert.eq([], queryIDS(coll, "à -PRÓXIMO -vôo", null, {$diacriticSensitive: true}));
- assert.eq([], queryIDS(coll, "à proximo -vôo", null, {$diacriticSensitive: true}));
- assert.eq([], queryIDS(coll, "mo vô", null, {$diacriticSensitive: true}));
- assert.eq([], queryIDS(coll, "\"unico medico\"", null, {$diacriticSensitive: true}));
- assert.eq(
- [], queryIDS(coll, "\"próximo vôo\" -\"único médico\"", null, {$diacriticSensitive: true}));
-
+"use strict";
+var coll = db.fts_diacriticsensitive;
+
+coll.drop();
+
+assert.writeOK(coll.insert(
+ {_id: 0, a: "O próximo vôo à noite sobre o Atlântico, põe freqüentemente o único médico."}));
+
+assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"}));
+
+assert.throws(function() {
+ queryIDS(coll, "hello", null, {$diacriticSensitive: "invalid"});
+});
+
+assert.eq([0], queryIDS(coll, "PRÓXIMO VÔO À", null, {$diacriticSensitive: true}));
+assert.eq([0], queryIDS(coll, "atlântico", null, {$diacriticSensitive: true}));
+assert.eq([0], queryIDS(coll, "\"próximo\"", null, {$diacriticSensitive: true}));
+assert.eq([0], queryIDS(coll, "\"põe\" atlântico", null, {$diacriticSensitive: true}));
+assert.eq([0],
+ queryIDS(coll, "\"próximo vôo\" \"único médico\"", null, {$diacriticSensitive: true}));
+assert.eq([0],
+ queryIDS(coll, "\"próximo vôo\" -\"unico médico\"", null, {$diacriticSensitive: true}));
+
+assert.eq([], queryIDS(coll, "à", null, {$diacriticSensitive: true}));
+assert.eq([], queryIDS(coll, "proximo", null, {$diacriticSensitive: true}));
+assert.eq([], queryIDS(coll, "proximo voo à", null, {$diacriticSensitive: true}));
+assert.eq([], queryIDS(coll, "à -PRÓXIMO -vôo", null, {$diacriticSensitive: true}));
+assert.eq([], queryIDS(coll, "à proximo -vôo", null, {$diacriticSensitive: true}));
+assert.eq([], queryIDS(coll, "mo vô", null, {$diacriticSensitive: true}));
+assert.eq([], queryIDS(coll, "\"unico medico\"", null, {$diacriticSensitive: true}));
+assert.eq([],
+ queryIDS(coll, "\"próximo vôo\" -\"único médico\"", null, {$diacriticSensitive: true}));
})();
diff --git a/jstests/core/fts_dotted_prefix_fields.js b/jstests/core/fts_dotted_prefix_fields.js
index f811c4a7203..efbe3a91abf 100644
--- a/jstests/core/fts_dotted_prefix_fields.js
+++ b/jstests/core/fts_dotted_prefix_fields.js
@@ -1,15 +1,15 @@
// Test that text search works correct when the text index has dotted paths as the non-text
// prefixes.
(function() {
- "use strict";
+"use strict";
- let coll = db.fts_dotted_prefix_fields;
- coll.drop();
- assert.commandWorked(coll.createIndex({"a.x": 1, "a.y": 1, "b.x": 1, "b.y": 1, words: "text"}));
- assert.writeOK(coll.insert({a: {x: 1, y: 2}, b: {x: 3, y: 4}, words: "lorem ipsum dolor sit"}));
- assert.writeOK(coll.insert({a: {x: 1, y: 2}, b: {x: 5, y: 4}, words: "lorem ipsum dolor sit"}));
+let coll = db.fts_dotted_prefix_fields;
+coll.drop();
+assert.commandWorked(coll.createIndex({"a.x": 1, "a.y": 1, "b.x": 1, "b.y": 1, words: "text"}));
+assert.writeOK(coll.insert({a: {x: 1, y: 2}, b: {x: 3, y: 4}, words: "lorem ipsum dolor sit"}));
+assert.writeOK(coll.insert({a: {x: 1, y: 2}, b: {x: 5, y: 4}, words: "lorem ipsum dolor sit"}));
- assert.eq(1,
- coll.find({$text: {$search: "lorem ipsum"}, "a.x": 1, "a.y": 2, "b.x": 3, "b.y": 4})
- .itcount());
+assert.eq(
+ 1,
+ coll.find({$text: {$search: "lorem ipsum"}, "a.x": 1, "a.y": 2, "b.x": 3, "b.y": 4}).itcount());
}());
diff --git a/jstests/core/fts_explain.js b/jstests/core/fts_explain.js
index 9245ac7ec52..0b147e5987a 100644
--- a/jstests/core/fts_explain.js
+++ b/jstests/core/fts_explain.js
@@ -5,35 +5,35 @@
// Test $text explain. SERVER-12037.
(function() {
- "use strict";
-
- const coll = db.fts_explain;
- let res;
-
- coll.drop();
- res = coll.ensureIndex({content: "text"}, {default_language: "none"});
- assert.commandWorked(res);
-
- res = coll.insert({content: "some data"});
- assert.writeOK(res);
-
- const explain =
- coll.find({$text: {$search: "\"a\" -b -\"c\""}}, {content: 1, score: {$meta: "textScore"}})
- .explain(true);
- let stage = explain.executionStats.executionStages;
- if ("SINGLE_SHARD" === stage.stage) {
- stage = stage.shards[0].executionStages;
- }
-
- assert.eq(stage.stage, "PROJECTION_DEFAULT");
-
- let textStage = stage.inputStage;
- assert.eq(textStage.stage, "TEXT");
- assert.gte(textStage.textIndexVersion, 1, "textIndexVersion incorrect or missing.");
- assert.eq(textStage.inputStage.stage, "TEXT_MATCH");
- assert.eq(textStage.inputStage.inputStage.stage, "TEXT_OR");
- assert.eq(textStage.parsedTextQuery.terms, ["a"]);
- assert.eq(textStage.parsedTextQuery.negatedTerms, ["b"]);
- assert.eq(textStage.parsedTextQuery.phrases, ["a"]);
- assert.eq(textStage.parsedTextQuery.negatedPhrases, ["c"]);
+"use strict";
+
+const coll = db.fts_explain;
+let res;
+
+coll.drop();
+res = coll.ensureIndex({content: "text"}, {default_language: "none"});
+assert.commandWorked(res);
+
+res = coll.insert({content: "some data"});
+assert.writeOK(res);
+
+const explain =
+ coll.find({$text: {$search: "\"a\" -b -\"c\""}}, {content: 1, score: {$meta: "textScore"}})
+ .explain(true);
+let stage = explain.executionStats.executionStages;
+if ("SINGLE_SHARD" === stage.stage) {
+ stage = stage.shards[0].executionStages;
+}
+
+assert.eq(stage.stage, "PROJECTION_DEFAULT");
+
+let textStage = stage.inputStage;
+assert.eq(textStage.stage, "TEXT");
+assert.gte(textStage.textIndexVersion, 1, "textIndexVersion incorrect or missing.");
+assert.eq(textStage.inputStage.stage, "TEXT_MATCH");
+assert.eq(textStage.inputStage.inputStage.stage, "TEXT_OR");
+assert.eq(textStage.parsedTextQuery.terms, ["a"]);
+assert.eq(textStage.parsedTextQuery.negatedTerms, ["b"]);
+assert.eq(textStage.parsedTextQuery.phrases, ["a"]);
+assert.eq(textStage.parsedTextQuery.negatedPhrases, ["c"]);
})();
diff --git a/jstests/core/fts_index_version2.js b/jstests/core/fts_index_version2.js
index 05fecab36ee..f8c57f4e2d7 100644
--- a/jstests/core/fts_index_version2.js
+++ b/jstests/core/fts_index_version2.js
@@ -3,31 +3,28 @@
load('jstests/libs/fts.js');
(function() {
- "use strict";
- var coll = db.fts_index_version2;
-
- coll.drop();
-
- assert.writeOK(coll.insert({
- _id: 0,
- a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico."
- }));
-
- assert.commandWorked(
- coll.ensureIndex({a: "text"}, {default_language: "portuguese", textIndexVersion: 2}));
-
- assert.eq([0], queryIDS(coll, "próximo vôo à", null));
- assert.eq([0], queryIDS(coll, "atlântico", null));
- assert.eq([0], queryIDS(coll, "\"próxIMO\"", null));
- assert.eq([0], queryIDS(coll, "\"põe\" atlânTico", null));
- assert.eq([0], queryIDS(coll, "\"próximo vôo\" \"único médico\"", null));
- assert.eq([0], queryIDS(coll, "\"próximo vôo\" -\"único Atlântico\"", null));
-
- assert.eq([], queryIDS(coll, "proximo voo a", null));
- assert.eq([], queryIDS(coll, "átlántico", null));
- assert.eq([], queryIDS(coll, "\"proxIMO\"", null));
- assert.eq([], queryIDS(coll, "\"poé\" atlânTico", null));
- assert.eq([], queryIDS(coll, "\"próximo voo\" \"unico médico\"", null));
- assert.eq([], queryIDS(coll, "\"próximo Vôo\" -\"único Médico\"", null));
-
+"use strict";
+var coll = db.fts_index_version2;
+
+coll.drop();
+
+assert.writeOK(coll.insert(
+ {_id: 0, a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico."}));
+
+assert.commandWorked(
+ coll.ensureIndex({a: "text"}, {default_language: "portuguese", textIndexVersion: 2}));
+
+assert.eq([0], queryIDS(coll, "próximo vôo à", null));
+assert.eq([0], queryIDS(coll, "atlântico", null));
+assert.eq([0], queryIDS(coll, "\"próxIMO\"", null));
+assert.eq([0], queryIDS(coll, "\"põe\" atlânTico", null));
+assert.eq([0], queryIDS(coll, "\"próximo vôo\" \"único médico\"", null));
+assert.eq([0], queryIDS(coll, "\"próximo vôo\" -\"único Atlântico\"", null));
+
+assert.eq([], queryIDS(coll, "proximo voo a", null));
+assert.eq([], queryIDS(coll, "átlántico", null));
+assert.eq([], queryIDS(coll, "\"proxIMO\"", null));
+assert.eq([], queryIDS(coll, "\"poé\" atlânTico", null));
+assert.eq([], queryIDS(coll, "\"próximo voo\" \"unico médico\"", null));
+assert.eq([], queryIDS(coll, "\"próximo Vôo\" -\"único Médico\"", null));
})();
diff --git a/jstests/core/fts_querylang.js b/jstests/core/fts_querylang.js
index de27b65ba5b..a80258940ba 100644
--- a/jstests/core/fts_querylang.js
+++ b/jstests/core/fts_querylang.js
@@ -1,86 +1,91 @@
// Test the $text query operator.
// @tags: [requires_non_retryable_writes]
(function() {
- "use strict";
+"use strict";
- const coll = db.getCollection("fts_querylang");
- coll.drop();
+const coll = db.getCollection("fts_querylang");
+coll.drop();
- assert.commandWorked(coll.insert({_id: 0, unindexedField: 0, a: "textual content"}));
- assert.commandWorked(coll.insert({_id: 1, unindexedField: 1, a: "additional content"}));
- assert.commandWorked(coll.insert({_id: 2, unindexedField: 2, a: "irrelevant content"}));
- assert.commandWorked(coll.createIndex({a: "text"}));
+assert.commandWorked(coll.insert({_id: 0, unindexedField: 0, a: "textual content"}));
+assert.commandWorked(coll.insert({_id: 1, unindexedField: 1, a: "additional content"}));
+assert.commandWorked(coll.insert({_id: 2, unindexedField: 2, a: "irrelevant content"}));
+assert.commandWorked(coll.createIndex({a: "text"}));
- // Test text query with no results.
- assert.eq(false, coll.find({$text: {$search: "words"}}).hasNext());
+// Test text query with no results.
+assert.eq(false, coll.find({$text: {$search: "words"}}).hasNext());
- // Test basic text query.
- let results = coll.find({$text: {$search: "textual content -irrelevant"}}).toArray();
- assert.eq(results.length, 2, results);
- assert.neq(results[0]._id, 2, results);
- assert.neq(results[1]._id, 2, results);
+// Test basic text query.
+let results = coll.find({$text: {$search: "textual content -irrelevant"}}).toArray();
+assert.eq(results.length, 2, results);
+assert.neq(results[0]._id, 2, results);
+assert.neq(results[1]._id, 2, results);
- // Test sort with basic text query.
- results = coll.find({$text: {$search: "textual content -irrelevant"}})
- .sort({unindexedField: 1})
- .toArray();
- assert.eq(results.length, 2, results);
- assert.eq(results[0]._id, 0, results);
- assert.eq(results[1]._id, 1, results);
+// Test sort with basic text query.
+results = coll.find({$text: {$search: "textual content -irrelevant"}})
+ .sort({unindexedField: 1})
+ .toArray();
+assert.eq(results.length, 2, results);
+assert.eq(results[0]._id, 0, results);
+assert.eq(results[1]._id, 1, results);
- // Test skip with basic text query.
- results = coll.find({$text: {$search: "textual content -irrelevant"}})
- .sort({unindexedField: 1})
- .skip(1)
- .toArray();
- assert.eq(results.length, 1, results);
- assert.eq(results[0]._id, 1, results);
+// Test skip with basic text query.
+results = coll.find({$text: {$search: "textual content -irrelevant"}})
+ .sort({unindexedField: 1})
+ .skip(1)
+ .toArray();
+assert.eq(results.length, 1, results);
+assert.eq(results[0]._id, 1, results);
- // Test limit with basic text query.
- results = coll.find({$text: {$search: "textual content -irrelevant"}})
- .sort({unindexedField: 1})
- .limit(1)
- .toArray();
- assert.eq(results.length, 1, results);
- assert.eq(results[0]._id, 0, results);
+// Test limit with basic text query.
+results = coll.find({$text: {$search: "textual content -irrelevant"}})
+ .sort({unindexedField: 1})
+ .limit(1)
+ .toArray();
+assert.eq(results.length, 1, results);
+assert.eq(results[0]._id, 0, results);
- // Test $and of basic text query with indexed expression.
- results = coll.find({$text: {$search: "content -irrelevant"}, _id: 1}).toArray();
- assert.eq(results.length, 1, results);
- assert.eq(results[0]._id, 1, results);
+// Test $and of basic text query with indexed expression.
+results = coll.find({$text: {$search: "content -irrelevant"}, _id: 1}).toArray();
+assert.eq(results.length, 1, results);
+assert.eq(results[0]._id, 1, results);
- // Test $and of basic text query with indexed expression and bad language.
- assert.commandFailedWithCode(assert.throws(function() {
- coll.find({$text: {$search: "content -irrelevant", $language: "spanglish"}, _id: 1})
- .itcount();
- }),
- ErrorCodes.BadValue);
+// Test $and of basic text query with indexed expression and bad language.
+assert.commandFailedWithCode(
+ assert.throws(function() {
+ coll.find({
+ $text: {$search: "content -irrelevant", $language: "spanglish"},
+ _id: 1
+ })
+ .itcount();
+ }),
+ ErrorCodes.BadValue);
- // Test $and of basic text query with unindexed expression.
- results = coll.find({$text: {$search: "content -irrelevant"}, unindexedField: 1}).toArray();
- assert.eq(results.length, 1, results);
- assert.eq(results[0]._id, 1, results);
+// Test $and of basic text query with unindexed expression.
+results = coll.find({$text: {$search: "content -irrelevant"}, unindexedField: 1}).toArray();
+assert.eq(results.length, 1, results);
+assert.eq(results[0]._id, 1, results);
- // Test $language.
- let cursor = coll.find({$text: {$search: "contents", $language: "none"}});
- assert.eq(false, cursor.hasNext());
+// Test $language.
+let cursor = coll.find({$text: {$search: "contents", $language: "none"}});
+assert.eq(false, cursor.hasNext());
- cursor = coll.find({$text: {$search: "contents", $language: "EN"}});
- assert.eq(true, cursor.hasNext());
+cursor = coll.find({$text: {$search: "contents", $language: "EN"}});
+assert.eq(true, cursor.hasNext());
- cursor = coll.find({$text: {$search: "contents", $language: "spanglish"}});
- assert.commandFailedWithCode(assert.throws(function() {
- cursor.next();
- }),
- ErrorCodes.BadValue);
+cursor = coll.find({$text: {$search: "contents", $language: "spanglish"}});
+assert.commandFailedWithCode(assert.throws(function() {
+ cursor.next();
+ }),
+ ErrorCodes.BadValue);
- // Test update with $text.
- coll.update({$text: {$search: "textual content -irrelevant"}}, {$set: {b: 1}}, {multi: true});
- assert.eq(2, coll.find({b: 1}).itcount(), 'incorrect number of documents updated');
+// Test update with $text.
+coll.update({$text: {$search: "textual content -irrelevant"}}, {$set: {b: 1}}, {multi: true});
+assert.eq(2, coll.find({b: 1}).itcount(), 'incorrect number of documents updated');
- // $text cannot be contained within a $nor.
- assert.commandFailedWithCode(assert.throws(function() {
- coll.find({$nor: [{$text: {$search: 'a'}}]}).itcount();
- }),
- ErrorCodes.BadValue);
+// $text cannot be contained within a $nor.
+assert.commandFailedWithCode(
+ assert.throws(function() {
+ coll.find({$nor: [{$text: {$search: 'a'}}]}).itcount();
+ }),
+ ErrorCodes.BadValue);
}());
diff --git a/jstests/core/fts_score_sort.js b/jstests/core/fts_score_sort.js
index e074ca68ca2..9a4cc1a120b 100644
--- a/jstests/core/fts_score_sort.js
+++ b/jstests/core/fts_score_sort.js
@@ -1,61 +1,61 @@
// Test sorting with text score metadata.
(function() {
- "use strict";
-
- var t = db.getSiblingDB("test").getCollection("fts_score_sort");
- t.drop();
-
- assert.writeOK(t.insert({_id: 0, a: "textual content"}));
- assert.writeOK(t.insert({_id: 1, a: "additional content"}));
- assert.writeOK(t.insert({_id: 2, a: "irrelevant content"}));
- assert.commandWorked(t.ensureIndex({a: "text"}));
-
- // $meta sort specification should be rejected if it has additional keys.
- assert.throws(function() {
- t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}})
- .sort({score: {$meta: "textScore", extra: 1}})
- .itcount();
- });
-
- // $meta sort specification should be rejected if the type of meta sort is not known.
- assert.throws(function() {
- t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}})
- .sort({score: {$meta: "unknown"}})
- .itcount();
- });
-
- // Sort spefication should be rejected if a $-keyword other than $meta is used.
- assert.throws(function() {
- t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}})
- .sort({score: {$notMeta: "textScore"}})
- .itcount();
- });
-
- // Sort spefication should be rejected if it is a string, not an object with $meta.
- assert.throws(function() {
- t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}})
- .sort({score: "textScore"})
- .itcount();
- });
-
- // Sort by the text score.
- var results =
- t.find({$text: {$search: "textual content -irrelevant"}}, {score: {$meta: "textScore"}})
- .sort({score: {$meta: "textScore"}})
- .toArray();
- assert.eq(results.length, 2);
- assert.eq(results[0]._id, 0);
- assert.eq(results[1]._id, 1);
- assert.gt(results[0].score, results[1].score);
-
- // Sort by {_id descending, score} and verify the order is right.
- var results =
- t.find({$text: {$search: "textual content -irrelevant"}}, {score: {$meta: "textScore"}})
- .sort({_id: -1, score: {$meta: "textScore"}})
- .toArray();
- assert.eq(results.length, 2);
- assert.eq(results[0]._id, 1);
- assert.eq(results[1]._id, 0);
- // Note the reversal from above.
- assert.lt(results[0].score, results[1].score);
+"use strict";
+
+var t = db.getSiblingDB("test").getCollection("fts_score_sort");
+t.drop();
+
+assert.writeOK(t.insert({_id: 0, a: "textual content"}));
+assert.writeOK(t.insert({_id: 1, a: "additional content"}));
+assert.writeOK(t.insert({_id: 2, a: "irrelevant content"}));
+assert.commandWorked(t.ensureIndex({a: "text"}));
+
+// $meta sort specification should be rejected if it has additional keys.
+assert.throws(function() {
+ t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}})
+ .sort({score: {$meta: "textScore", extra: 1}})
+ .itcount();
+});
+
+// $meta sort specification should be rejected if the type of meta sort is not known.
+assert.throws(function() {
+ t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}})
+ .sort({score: {$meta: "unknown"}})
+ .itcount();
+});
+
+// Sort spefication should be rejected if a $-keyword other than $meta is used.
+assert.throws(function() {
+ t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}})
+ .sort({score: {$notMeta: "textScore"}})
+ .itcount();
+});
+
+// Sort spefication should be rejected if it is a string, not an object with $meta.
+assert.throws(function() {
+ t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}})
+ .sort({score: "textScore"})
+ .itcount();
+});
+
+// Sort by the text score.
+var results =
+ t.find({$text: {$search: "textual content -irrelevant"}}, {score: {$meta: "textScore"}})
+ .sort({score: {$meta: "textScore"}})
+ .toArray();
+assert.eq(results.length, 2);
+assert.eq(results[0]._id, 0);
+assert.eq(results[1]._id, 1);
+assert.gt(results[0].score, results[1].score);
+
+// Sort by {_id descending, score} and verify the order is right.
+var results =
+ t.find({$text: {$search: "textual content -irrelevant"}}, {score: {$meta: "textScore"}})
+ .sort({_id: -1, score: {$meta: "textScore"}})
+ .toArray();
+assert.eq(results.length, 2);
+assert.eq(results[0]._id, 1);
+assert.eq(results[1]._id, 0);
+// Note the reversal from above.
+assert.lt(results[0].score, results[1].score);
}());
diff --git a/jstests/core/fts_spanish.js b/jstests/core/fts_spanish.js
index 89915cf2889..264e1d9125b 100644
--- a/jstests/core/fts_spanish.js
+++ b/jstests/core/fts_spanish.js
@@ -1,37 +1,32 @@
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/fts.js");
+load("jstests/libs/fts.js");
- const coll = db.text_spanish;
- coll.drop();
+const coll = db.text_spanish;
+coll.drop();
- assert.writeOK(coll.insert({_id: 1, title: "mi blog", text: "Este es un blog de prueba"}));
- assert.writeOK(
- coll.insert({_id: 2, title: "mi segundo post", text: "Este es un blog de prueba"}));
- assert.writeOK(coll.insert(
- {_id: 3, title: "cuchillos son divertidos", text: "este es mi tercer blog stemmed"}));
- assert.writeOK(coll.insert({
- _id: 4,
- language: "en",
- title: "My fourth blog",
- text: "This stemmed blog is in english"
- }));
+assert.writeOK(coll.insert({_id: 1, title: "mi blog", text: "Este es un blog de prueba"}));
+assert.writeOK(coll.insert({_id: 2, title: "mi segundo post", text: "Este es un blog de prueba"}));
+assert.writeOK(coll.insert(
+ {_id: 3, title: "cuchillos son divertidos", text: "este es mi tercer blog stemmed"}));
+assert.writeOK(coll.insert(
+ {_id: 4, language: "en", title: "My fourth blog", text: "This stemmed blog is in english"}));
- // Create a text index, giving more weight to the "title" field.
- assert.commandWorked(coll.createIndex({title: "text", text: "text"},
- {weights: {title: 10}, default_language: "es"}));
+// Create a text index, giving more weight to the "title" field.
+assert.commandWorked(coll.createIndex({title: "text", text: "text"},
+ {weights: {title: 10}, default_language: "es"}));
- assert.eq(4, coll.count({$text: {$search: "blog"}}));
- assert.eq([4], queryIDS(coll, "stem"));
- assert.eq([3], queryIDS(coll, "stemmed"));
- assert.eq([4], queryIDS(coll, "stemmed", null, {"$language": "en"}));
- assert.eq([1, 2], queryIDS(coll, "prueba").sort());
+assert.eq(4, coll.count({$text: {$search: "blog"}}));
+assert.eq([4], queryIDS(coll, "stem"));
+assert.eq([3], queryIDS(coll, "stemmed"));
+assert.eq([4], queryIDS(coll, "stemmed", null, {"$language": "en"}));
+assert.eq([1, 2], queryIDS(coll, "prueba").sort());
- assert.writeError(coll.insert({_id: 5, language: "spanglish", title: "", text: ""}));
+assert.writeError(coll.insert({_id: 5, language: "spanglish", title: "", text: ""}));
- assert.commandWorked(coll.dropIndexes());
- assert.commandFailedWithCode(
- coll.createIndex({title: "text", text: "text"}, {default_language: "spanglish"}),
- ErrorCodes.CannotCreateIndex);
+assert.commandWorked(coll.dropIndexes());
+assert.commandFailedWithCode(
+ coll.createIndex({title: "text", text: "text"}, {default_language: "spanglish"}),
+ ErrorCodes.CannotCreateIndex);
}());
diff --git a/jstests/core/fts_trailing_fields.js b/jstests/core/fts_trailing_fields.js
index 2c7f79b423d..3f46cd1b1b7 100644
--- a/jstests/core/fts_trailing_fields.js
+++ b/jstests/core/fts_trailing_fields.js
@@ -1,22 +1,22 @@
// Tests for predicates which can use the trailing field of a text index.
(function() {
- "use strict";
+"use strict";
- const coll = db.fts_trailing_fields;
+const coll = db.fts_trailing_fields;
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1, b: "text", c: 1}));
- assert.writeOK(coll.insert({a: 2, b: "lorem ipsum"}));
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1, b: "text", c: 1}));
+assert.writeOK(coll.insert({a: 2, b: "lorem ipsum"}));
- assert.eq(0, coll.find({a: 2, $text: {$search: "lorem"}, c: {$exists: true}}).itcount());
- assert.eq(1, coll.find({a: 2, $text: {$search: "lorem"}, c: null}).itcount());
- assert.eq(1, coll.find({a: 2, $text: {$search: "lorem"}, c: {$exists: false}}).itcount());
+assert.eq(0, coll.find({a: 2, $text: {$search: "lorem"}, c: {$exists: true}}).itcount());
+assert.eq(1, coll.find({a: 2, $text: {$search: "lorem"}, c: null}).itcount());
+assert.eq(1, coll.find({a: 2, $text: {$search: "lorem"}, c: {$exists: false}}).itcount());
- // An equality predicate on the leading field isn't useful, but it shouldn't cause any problems.
- // Same with an $elemMatch predicate on one of the trailing fields.
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1, b: "text", "c.d": 1}));
- assert.writeOK(coll.insert({a: 2, b: "lorem ipsum", c: {d: 3}}));
- assert.eq(0, coll.find({a: [1, 2], $text: {$search: "lorem"}}).itcount());
- assert.eq(0, coll.find({a: 2, $text: {$search: "lorem"}, c: {$elemMatch: {d: 3}}}).itcount());
+// An equality predicate on the leading field isn't useful, but it shouldn't cause any problems.
+// Same with an $elemMatch predicate on one of the trailing fields.
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1, b: "text", "c.d": 1}));
+assert.writeOK(coll.insert({a: 2, b: "lorem ipsum", c: {d: 3}}));
+assert.eq(0, coll.find({a: [1, 2], $text: {$search: "lorem"}}).itcount());
+assert.eq(0, coll.find({a: 2, $text: {$search: "lorem"}, c: {$elemMatch: {d: 3}}}).itcount());
}());
diff --git a/jstests/core/function_string_representations.js b/jstests/core/function_string_representations.js
index 840038c766f..106cdcda9f2 100644
--- a/jstests/core/function_string_representations.js
+++ b/jstests/core/function_string_representations.js
@@ -2,40 +2,41 @@
// does_not_support_stepdowns,
// ]
-/** Demonstrate that mapReduce can accept functions represented by strings.
+/**
+ * Demonstrate that mapReduce can accept functions represented by strings.
* Some drivers do not have a type which represents a Javascript function. These languages represent
* the arguments to mapReduce as strings.
*/
(function() {
- "use strict";
-
- var col = db.function_string_representations;
- col.drop();
- assert.writeOK(col.insert({
- _id: "abc123",
- ord_date: new Date("Oct 04, 2012"),
- status: 'A',
- price: 25,
- items: [{sku: "mmm", qty: 5, price: 2.5}, {sku: "nnn", qty: 5, price: 2.5}]
- }));
-
- var mapFunction = "function() {emit(this._id, this.price);}";
- var reduceFunction = "function(keyCustId, valuesPrices) {return Array.sum(valuesPrices);}";
- assert.commandWorked(col.mapReduce(mapFunction, reduceFunction, {out: "map_reduce_example"}));
-
- // Provided strings may end with semicolons and/or whitespace
- mapFunction += " ; ";
- reduceFunction += " ; ";
- assert.commandWorked(col.mapReduce(mapFunction, reduceFunction, {out: "map_reduce_example"}));
-
- // $where exhibits the same behavior
- var whereFunction = "function() {return this.price === 25;}";
- assert.eq(1, col.find({$where: whereFunction}).itcount());
-
- whereFunction += ";";
- assert.eq(1, col.find({$where: whereFunction}).itcount());
-
- // system.js does not need to be tested, as its contents types' are preserved, and
- // strings are not promoted into functions.
+"use strict";
+
+var col = db.function_string_representations;
+col.drop();
+assert.writeOK(col.insert({
+ _id: "abc123",
+ ord_date: new Date("Oct 04, 2012"),
+ status: 'A',
+ price: 25,
+ items: [{sku: "mmm", qty: 5, price: 2.5}, {sku: "nnn", qty: 5, price: 2.5}]
+}));
+
+var mapFunction = "function() {emit(this._id, this.price);}";
+var reduceFunction = "function(keyCustId, valuesPrices) {return Array.sum(valuesPrices);}";
+assert.commandWorked(col.mapReduce(mapFunction, reduceFunction, {out: "map_reduce_example"}));
+
+// Provided strings may end with semicolons and/or whitespace
+mapFunction += " ; ";
+reduceFunction += " ; ";
+assert.commandWorked(col.mapReduce(mapFunction, reduceFunction, {out: "map_reduce_example"}));
+
+// $where exhibits the same behavior
+var whereFunction = "function() {return this.price === 25;}";
+assert.eq(1, col.find({$where: whereFunction}).itcount());
+
+whereFunction += ";";
+assert.eq(1, col.find({$where: whereFunction}).itcount());
+
+// system.js does not need to be tested, as its contents types' are preserved, and
+// strings are not promoted into functions.
})();
diff --git a/jstests/core/geo3.js b/jstests/core/geo3.js
index 6735eb78eb1..b5fec6769e9 100644
--- a/jstests/core/geo3.js
+++ b/jstests/core/geo3.js
@@ -1,77 +1,79 @@
// @tags: [requires_fastcount, operations_longer_than_stepdown_interval_in_txns]
(function() {
- t = db.geo3;
- t.drop();
-
- n = 1;
- arr = [];
- for (var x = -100; x < 100; x += 2) {
- for (var y = -100; y < 100; y += 2) {
- arr.push({_id: n++, loc: [x, y], a: Math.abs(x) % 5, b: Math.abs(y) % 5});
- }
- }
- t.insert(arr);
- assert.eq(t.count(), 100 * 100);
- assert.eq(t.count(), n - 1);
-
- t.ensureIndex({loc: "2d"});
-
- // Test the "query" parameter in $geoNear.
-
- let res = t.aggregate([
- {$geoNear: {near: [50, 50], distanceField: "dist", query: {a: 2}}},
- {$limit: 10},
- ]).toArray();
- assert.eq(10, res.length, tojson(res));
- res.forEach(doc => assert.eq(2, doc.a, tojson(doc)));
-
- function avgA(q, len) {
- if (!len)
- len = 10;
- var realq = {loc: {$near: [50, 50]}};
- if (q)
- Object.extend(realq, q);
- var as = t.find(realq).limit(len).map(function(z) {
- return z.a;
- });
- assert.eq(len, as.length, "length in avgA");
- return Array.avg(as);
- }
-
- function testFiltering(msg) {
- assert.gt(2, avgA({}), msg + " testFiltering 1 ");
- assert.eq(2, avgA({a: 2}), msg + " testFiltering 2 ");
- assert.eq(4, avgA({a: 4}), msg + " testFiltering 3 ");
+t = db.geo3;
+t.drop();
+
+n = 1;
+arr = [];
+for (var x = -100; x < 100; x += 2) {
+ for (var y = -100; y < 100; y += 2) {
+ arr.push({_id: n++, loc: [x, y], a: Math.abs(x) % 5, b: Math.abs(y) % 5});
}
+}
+t.insert(arr);
+assert.eq(t.count(), 100 * 100);
+assert.eq(t.count(), n - 1);
- testFiltering("just loc");
+t.ensureIndex({loc: "2d"});
- assert.commandWorked(t.dropIndex({loc: "2d"}));
- assert.commandWorked(t.ensureIndex({loc: "2d", a: 1}));
+// Test the "query" parameter in $geoNear.
- res = t.aggregate([
+let res = t.aggregate([
{$geoNear: {near: [50, 50], distanceField: "dist", query: {a: 2}}},
{$limit: 10},
]).toArray();
- assert.eq(10, res.length, "B3");
- res.forEach(doc => assert.eq(2, doc.a, tojson(doc)));
-
- testFiltering("loc and a");
-
- assert.commandWorked(t.dropIndex({loc: "2d", a: 1}));
- assert.commandWorked(t.ensureIndex({loc: "2d", b: 1}));
-
- testFiltering("loc and b");
-
- q = {loc: {$near: [50, 50]}};
- assert.eq(100, t.find(q).limit(100).itcount(), "D1");
- assert.eq(100, t.find(q).limit(100).size(), "D2");
-
- assert.eq(20, t.find(q).limit(20).itcount(), "D3");
- assert.eq(20, t.find(q).limit(20).size(), "D4");
-
- // SERVER-14039 Wrong limit after skip with $nearSphere, 2d index
- assert.eq(10, t.find(q).skip(10).limit(10).itcount(), "D5");
- assert.eq(10, t.find(q).skip(10).limit(10).size(), "D6");
+assert.eq(10, res.length, tojson(res));
+res.forEach(doc => assert.eq(2, doc.a, tojson(doc)));
+
+function avgA(q, len) {
+ if (!len)
+ len = 10;
+ var realq = {loc: {$near: [50, 50]}};
+ if (q)
+ Object.extend(realq, q);
+ var as = t.find(realq).limit(len).map(function(z) {
+ return z.a;
+ });
+ assert.eq(len, as.length, "length in avgA");
+ return Array.avg(as);
+}
+
+function testFiltering(msg) {
+ assert.gt(2, avgA({}), msg + " testFiltering 1 ");
+ assert.eq(2, avgA({a: 2}), msg + " testFiltering 2 ");
+ assert.eq(4, avgA({a: 4}), msg + " testFiltering 3 ");
+}
+
+testFiltering("just loc");
+
+assert.commandWorked(t.dropIndex({loc: "2d"}));
+assert.commandWorked(t.ensureIndex({loc: "2d", a: 1}));
+
+res = t.aggregate([
+ {$geoNear: {near: [50, 50], distanceField: "dist", query: {a: 2}}},
+ {$limit: 10},
+ ]).toArray();
+assert.eq(10, res.length, "B3");
+res.forEach(doc => assert.eq(2, doc.a, tojson(doc)));
+
+testFiltering("loc and a");
+
+assert.commandWorked(t.dropIndex({loc: "2d", a: 1}));
+assert.commandWorked(t.ensureIndex({loc: "2d", b: 1}));
+
+testFiltering("loc and b");
+
+q = {
+ loc: {$near: [50, 50]}
+};
+assert.eq(100, t.find(q).limit(100).itcount(), "D1");
+assert.eq(100, t.find(q).limit(100).size(), "D2");
+
+assert.eq(20, t.find(q).limit(20).itcount(), "D3");
+assert.eq(20, t.find(q).limit(20).size(), "D4");
+
+// SERVER-14039 Wrong limit after skip with $nearSphere, 2d index
+assert.eq(10, t.find(q).skip(10).limit(10).itcount(), "D5");
+assert.eq(10, t.find(q).skip(10).limit(10).size(), "D6");
}());
diff --git a/jstests/core/geo_2d_trailing_fields.js b/jstests/core/geo_2d_trailing_fields.js
index 60c1e207ca7..3cb25a6e9ce 100644
--- a/jstests/core/geo_2d_trailing_fields.js
+++ b/jstests/core/geo_2d_trailing_fields.js
@@ -1,49 +1,47 @@
// Tests for predicates which can use the trailing field of a 2d index.
(function() {
- "use strict";
-
- const coll = db.geo_2d_trailing_fields;
-
- const isMaster = assert.commandWorked(db.adminCommand({isMaster: 1}));
-
- coll.drop();
- assert.commandWorked(coll.createIndex({a: "2d", b: 1}));
- assert.writeOK(coll.insert({a: [0, 0]}));
-
- // Verify that $near queries handle existence predicates over the trailing fields correctly.
- assert.eq(0, coll.find({a: {$near: [0, 0]}, b: {$exists: true}}).itcount());
- assert.eq(1, coll.find({a: {$near: [0, 0]}, b: null}).itcount());
- assert.eq(1, coll.find({a: {$near: [0, 0]}, b: {$exists: false}}).itcount());
-
- // Verify that non-near 2d queries handle existence predicates over the trailing fields
- // correctly.
- assert.eq(0,
- coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$exists: true}}).itcount());
- assert.eq(1, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: null}).itcount());
- assert.eq(1,
- coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$exists: false}}).itcount());
-
- coll.drop();
- assert.commandWorked(coll.createIndex({a: "2d", "b.c": 1}));
- assert.writeOK(coll.insert({a: [0, 0], b: [{c: 2}, {c: 3}]}));
-
- // Verify that $near queries correctly handle predicates which cannot be covered due to array
- // semantics.
- assert.eq(0, coll.find({a: {$near: [0, 0]}, "b.c": [2, 3]}).itcount());
- assert.eq(0, coll.find({a: {$near: [0, 0]}, "b.c": {$type: "array"}}).itcount());
-
- // Verify that non-near 2d queries correctly handle predicates which cannot be covered due to
- // array semantics.
- assert.eq(0, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, "b.c": [2, 3]}).itcount());
- assert.eq(
- 0, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, "b.c": {$type: "array"}}).itcount());
-
- coll.drop();
- assert.commandWorked(coll.createIndex({a: "2d", "b.c": 1}));
- assert.writeOK(coll.insert({a: [0, 0], b: [{c: 1}, {c: 2}]}));
-
- // Verify that non-near 2d queries correctly handle predicates which cannot be covered due to
- // array semantics.
- assert.eq(
- 1, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$elemMatch: {c: 1}}}).itcount());
+"use strict";
+
+const coll = db.geo_2d_trailing_fields;
+
+const isMaster = assert.commandWorked(db.adminCommand({isMaster: 1}));
+
+coll.drop();
+assert.commandWorked(coll.createIndex({a: "2d", b: 1}));
+assert.writeOK(coll.insert({a: [0, 0]}));
+
+// Verify that $near queries handle existence predicates over the trailing fields correctly.
+assert.eq(0, coll.find({a: {$near: [0, 0]}, b: {$exists: true}}).itcount());
+assert.eq(1, coll.find({a: {$near: [0, 0]}, b: null}).itcount());
+assert.eq(1, coll.find({a: {$near: [0, 0]}, b: {$exists: false}}).itcount());
+
+// Verify that non-near 2d queries handle existence predicates over the trailing fields
+// correctly.
+assert.eq(0, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$exists: true}}).itcount());
+assert.eq(1, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: null}).itcount());
+assert.eq(1, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$exists: false}}).itcount());
+
+coll.drop();
+assert.commandWorked(coll.createIndex({a: "2d", "b.c": 1}));
+assert.writeOK(coll.insert({a: [0, 0], b: [{c: 2}, {c: 3}]}));
+
+// Verify that $near queries correctly handle predicates which cannot be covered due to array
+// semantics.
+assert.eq(0, coll.find({a: {$near: [0, 0]}, "b.c": [2, 3]}).itcount());
+assert.eq(0, coll.find({a: {$near: [0, 0]}, "b.c": {$type: "array"}}).itcount());
+
+// Verify that non-near 2d queries correctly handle predicates which cannot be covered due to
+// array semantics.
+assert.eq(0, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, "b.c": [2, 3]}).itcount());
+assert.eq(0,
+ coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, "b.c": {$type: "array"}}).itcount());
+
+coll.drop();
+assert.commandWorked(coll.createIndex({a: "2d", "b.c": 1}));
+assert.writeOK(coll.insert({a: [0, 0], b: [{c: 1}, {c: 2}]}));
+
+// Verify that non-near 2d queries correctly handle predicates which cannot be covered due to
+// array semantics.
+assert.eq(1,
+ coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$elemMatch: {c: 1}}}).itcount());
}());
diff --git a/jstests/core/geo_2d_with_geojson_point.js b/jstests/core/geo_2d_with_geojson_point.js
index 23592e004f8..5c30d6e30ab 100644
--- a/jstests/core/geo_2d_with_geojson_point.js
+++ b/jstests/core/geo_2d_with_geojson_point.js
@@ -9,5 +9,5 @@ t.ensureIndex({loc: '2d'});
var geoJSONPoint = {type: 'Point', coordinates: [0, 0]};
print(assert.throws(function() {
- t.findOne({loc: {$near: {$geometry: geoJSONPoint}}});
-}, [], 'querying 2d index with GeoJSON point.'));
+ t.findOne({loc: {$near: {$geometry: geoJSONPoint}}});
+ }, [], 'querying 2d index with GeoJSON point.'));
diff --git a/jstests/core/geo_big_polygon2.js b/jstests/core/geo_big_polygon2.js
index 3f93d44ef0a..1c4e0d42b87 100644
--- a/jstests/core/geo_big_polygon2.js
+++ b/jstests/core/geo_big_polygon2.js
@@ -36,386 +36,387 @@ var objects = [
{name: "just north of equator", geo: {type: "Point", coordinates: [-97.9, 0.1]}},
{name: "just south of equator", geo: {type: "Point", coordinates: [-97.9, -0.1]}},
{
- name: "north pole - crs84CRS",
- geo: {type: "Point", coordinates: [-97.9, 90.0], crs: crs84CRS}
- },
- {
- name: "south pole - epsg4326CRS",
- geo: {type: "Point", coordinates: [-97.9, -90.0], crs: epsg4326CRS}
+ name: "north pole - crs84CRS",
+ geo: {type: "Point", coordinates: [-97.9, 90.0], crs: crs84CRS}
+ },
+ {
+ name: "south pole - epsg4326CRS",
+ geo: {type: "Point", coordinates: [-97.9, -90.0], crs: epsg4326CRS}
+ },
+ {
+ name: "short line string: PA, LA, 4corners, ATX, Mansfield, FL, Reston, NYC",
+ geo: {
+ type: "LineString",
+ coordinates: [
+ [-122.1611953, 37.4420407],
+ [-118.283638, 34.028517],
+ [-109.045223, 36.9990835],
+ [-97.850404, 30.3921555],
+ [-97.904187, 30.395457],
+ [-86.600836, 30.398147],
+ [-77.357837, 38.9589935],
+ [-73.987723, 40.7575074]
+ ]
+ }
+ },
+ {
+ name: "1024 point long line string from south pole to north pole",
+ geo: {type: "LineString", coordinates: genLonLine(2.349902, -90.0, 90.0, 180.0 / 1024)}
+ },
+ {
+ name: "line crossing equator - epsg4326CRS",
+ geo: {
+ type: "LineString",
+ coordinates: [[-77.0451853, -12.0553442], [-76.7784557, 18.0098528]],
+ crs: epsg4326CRS
+ }
+ },
+ {
+ name: "GeoJson polygon",
+ geo: {
+ type: "Polygon",
+ coordinates:
+ [[[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]]]
+ }
+ },
+ {
+ name: "polygon w/ hole",
+ geo: {
+ type: "Polygon",
+ coordinates: [
+ [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]],
+ [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]]
+ ]
+ }
+ },
+ {
+ name: "polygon w/ two holes",
+ geo: {
+ type: "Polygon",
+ coordinates: [
+ [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]],
+ [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]],
+ [[-55.0, 40.0], [-45.0, 40.0], [-45.0, 50.0], [-55.0, 50.0], [-55.0, 40.0]]
+ ]
+ }
+ },
+ {
+ name: "polygon covering North pole",
+ geo: {
+ type: "Polygon",
+ coordinates: [[[-120.0, 89.0], [0.0, 89.0], [120.0, 89.0], [-120.0, 89.0]]]
+ }
+ },
+ {
+ name: "polygon covering South pole",
+ geo: {
+ type: "Polygon",
+ coordinates: [[[-120.0, -89.0], [0.0, -89.0], [120.0, -89.0], [-120.0, -89.0]]]
+ }
+ },
+ {
+ name: "big polygon/rectangle covering both poles",
+ geo: {
+ type: "Polygon",
+ coordinates: [
+ [[-130.0, 89.0], [-120.0, 89.0], [-120.0, -89.0], [-130.0, -89.0], [-130.0, 89.0]]
+ ],
+ crs: strictCRS
+ }
+ },
+ {
+ name: "polygon (triangle) w/ hole at North pole",
+ geo: {
+ type: "Polygon",
+ coordinates: [
+ [[-120.0, 80.0], [0.0, 80.0], [120.0, 80.0], [-120.0, 80.0]],
+ [[-120.0, 88.0], [0.0, 88.0], [120.0, 88.0], [-120.0, 88.0]]
+ ]
+ }
},
- {
- name: "short line string: PA, LA, 4corners, ATX, Mansfield, FL, Reston, NYC",
- geo: {
- type: "LineString",
- coordinates: [
- [-122.1611953, 37.4420407],
- [-118.283638, 34.028517],
- [-109.045223, 36.9990835],
- [-97.850404, 30.3921555],
- [-97.904187, 30.395457],
- [-86.600836, 30.398147],
- [-77.357837, 38.9589935],
- [-73.987723, 40.7575074]
- ]
- }
+ {
+ name: "polygon with edge on equator",
+ geo: {
+ type: "Polygon",
+ coordinates: [[[-120.0, 0.0], [120.0, 0.0], [0.0, 90.0], [-120.0, 0.0]]]
+ }
},
- {
- name: "1024 point long line string from south pole to north pole",
- geo: {type: "LineString", coordinates: genLonLine(2.349902, -90.0, 90.0, 180.0 / 1024)}
+ {
+ name: "polygon just inside single hemisphere (Northern) - China, California, Europe",
+ geo: {
+ type: "Polygon",
+ coordinates:
+ [[[120.0, 0.000001], [-120.0, 0.000001], [0.0, 0.000001], [120.0, 0.000001]]]
+ }
},
{
- name: "line crossing equator - epsg4326CRS",
- geo: {
- type: "LineString",
- coordinates: [[-77.0451853, -12.0553442], [-76.7784557, 18.0098528]],
- crs: epsg4326CRS
- }
+ name: "polygon inside Northern hemisphere",
+ geo: {
+ type: "Polygon",
+ coordinates: [[[120.0, 80.0], [-120.0, 80.0], [0.0, 80.0], [120.0, 80.0]]]
+ }
},
{
- name: "GeoJson polygon",
- geo: {
- type: "Polygon",
- coordinates:
- [[[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]]]
- }
+ name: "polygon just inside a single hemisphere (Southern) - Pacific, Indonesia, Africa",
+ geo: {
+ type: "Polygon",
+ coordinates:
+ [[[-120.0, -0.000001], [120.0, -0.000001], [0.0, -0.000001], [-120.0, -0.000001]]]
+ }
},
{
- name: "polygon w/ hole",
- geo: {
- type: "Polygon",
- coordinates: [
- [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]],
- [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]]
- ]
- }
- },
- {
- name: "polygon w/ two holes",
- geo: {
- type: "Polygon",
- coordinates: [
- [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]],
- [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]],
- [[-55.0, 40.0], [-45.0, 40.0], [-45.0, 50.0], [-55.0, 50.0], [-55.0, 40.0]]
- ]
- }
+ name: "polygon inside Southern hemisphere",
+ geo: {
+ type: "Polygon",
+ coordinates: [[[-120.0, -80.0], [120.0, -80.0], [0.0, -80.0], [-120.0, -80.0]]]
+ }
+ },
+ {
+ name: "single point (MultiPoint): Palo Alto",
+ geo: {type: "MultiPoint", coordinates: [[-122.1611953, 37.4420407]]}
+ },
+ {
+ name: "multiple points(MultiPoint): PA, LA, 4corners, ATX, Mansfield, FL, Reston, NYC",
+ geo: {
+ type: "MultiPoint",
+ coordinates: [
+ [-122.1611953, 37.4420407],
+ [-118.283638, 34.028517],
+ [-109.045223, 36.9990835],
+ [-97.850404, 30.3921555],
+ [-97.904187, 30.395457],
+ [-86.600836, 30.398147],
+ [-77.357837, 38.9589935],
+ [-73.987723, 40.7575074]
+ ]
+ }
},
{
- name: "polygon covering North pole",
- geo: {
- type: "Polygon",
- coordinates: [[[-120.0, 89.0], [0.0, 89.0], [120.0, 89.0], [-120.0, 89.0]]]
- }
+ name: "two points (MultiPoint): Shenzhen, Guangdong, China",
+ geo: {type: "MultiPoint", coordinates: [[114.0538788, 22.5551603], [114.022837, 22.44395]]}
+ },
+ {
+ name: "two points (MultiPoint) but only one in: Shenzhen, Guangdong, China",
+ geo: {type: "MultiPoint", coordinates: [[114.0538788, 22.5551603], [113.743858, 23.025815]]}
+ },
+ {
+ name: "multi line string: new zealand bays",
+ geo: {
+ type: "MultiLineString",
+ coordinates: [
+ [
+ [172.803869, -43.592789],
+ [172.659335, -43.620348],
+ [172.684038, -43.636528],
+ [172.820922, -43.605325]
+ ],
+ [
+ [172.830497, -43.607768],
+ [172.813263, -43.656319],
+ [172.823096, -43.660996],
+ [172.850943, -43.607609]
+ ],
+ [
+ [172.912056, -43.623148],
+ [172.887696, -43.670897],
+ [172.900469, -43.676178],
+ [172.931735, -43.622839]
+ ]
+ ]
+ }
},
- {
- name: "polygon covering South pole",
- geo: {
- type: "Polygon",
- coordinates: [[[-120.0, -89.0], [0.0, -89.0], [120.0, -89.0], [-120.0, -89.0]]]
- }
- },
- {
- name: "big polygon/rectangle covering both poles",
- geo: {
- type: "Polygon",
- coordinates:
- [[[-130.0, 89.0], [-120.0, 89.0], [-120.0, -89.0], [-130.0, -89.0], [-130.0, 89.0]]],
- crs: strictCRS
- }
- },
- {
- name: "polygon (triangle) w/ hole at North pole",
- geo: {
- type: "Polygon",
- coordinates: [
- [[-120.0, 80.0], [0.0, 80.0], [120.0, 80.0], [-120.0, 80.0]],
- [[-120.0, 88.0], [0.0, 88.0], [120.0, 88.0], [-120.0, 88.0]]
- ]
- }
- },
- {
- name: "polygon with edge on equator",
- geo: {
- type: "Polygon",
- coordinates: [[[-120.0, 0.0], [120.0, 0.0], [0.0, 90.0], [-120.0, 0.0]]]
- }
- },
- {
- name: "polygon just inside single hemisphere (Northern) - China, California, Europe",
- geo: {
- type: "Polygon",
- coordinates:
- [[[120.0, 0.000001], [-120.0, 0.000001], [0.0, 0.000001], [120.0, 0.000001]]]
- }
- },
- {
- name: "polygon inside Northern hemisphere",
- geo: {
- type: "Polygon",
- coordinates: [[[120.0, 80.0], [-120.0, 80.0], [0.0, 80.0], [120.0, 80.0]]]
- }
- },
- {
- name: "polygon just inside a single hemisphere (Southern) - Pacific, Indonesia, Africa",
- geo: {
- type: "Polygon",
- coordinates:
- [[[-120.0, -0.000001], [120.0, -0.000001], [0.0, -0.000001], [-120.0, -0.000001]]]
- }
- },
- {
- name: "polygon inside Southern hemisphere",
- geo: {
- type: "Polygon",
- coordinates: [[[-120.0, -80.0], [120.0, -80.0], [0.0, -80.0], [-120.0, -80.0]]]
- }
- },
- {
- name: "single point (MultiPoint): Palo Alto",
- geo: {type: "MultiPoint", coordinates: [[-122.1611953, 37.4420407]]}
- },
- {
- name: "multiple points(MultiPoint): PA, LA, 4corners, ATX, Mansfield, FL, Reston, NYC",
- geo: {
- type: "MultiPoint",
- coordinates: [
- [-122.1611953, 37.4420407],
- [-118.283638, 34.028517],
- [-109.045223, 36.9990835],
- [-97.850404, 30.3921555],
- [-97.904187, 30.395457],
- [-86.600836, 30.398147],
- [-77.357837, 38.9589935],
- [-73.987723, 40.7575074]
- ]
- }
- },
- {
- name: "two points (MultiPoint): Shenzhen, Guangdong, China",
- geo: {type: "MultiPoint", coordinates: [[114.0538788, 22.5551603], [114.022837, 22.44395]]}
- },
- {
- name: "two points (MultiPoint) but only one in: Shenzhen, Guangdong, China",
- geo: {type: "MultiPoint", coordinates: [[114.0538788, 22.5551603], [113.743858, 23.025815]]}
- },
- {
- name: "multi line string: new zealand bays",
- geo: {
- type: "MultiLineString",
- coordinates: [
- [
- [172.803869, -43.592789],
- [172.659335, -43.620348],
- [172.684038, -43.636528],
- [172.820922, -43.605325]
- ],
- [
- [172.830497, -43.607768],
- [172.813263, -43.656319],
- [172.823096, -43.660996],
- [172.850943, -43.607609]
- ],
- [
- [172.912056, -43.623148],
- [172.887696, -43.670897],
- [172.900469, -43.676178],
- [172.931735, -43.622839]
- ]
- ]
- }
- },
- {
- name: "multi polygon: new zealand north and south islands",
- geo: {
- type: "MultiPolygon",
- coordinates: [
- [[
- [165.773255, -45.902933],
- [169.398419, -47.261538],
- [174.672744, -41.767722],
- [172.288845, -39.897992],
- [165.773255, -45.902933]
- ]],
- [[
- [173.166448, -39.778262],
- [175.342744, -42.677333],
- [179.913373, -37.224362],
- [171.475953, -32.688871],
- [173.166448, -39.778262]
- ]]
- ]
- }
- },
- {
- name: "geometry collection: point in Australia and triangle around Australia",
- geo: {
- type: "GeometryCollection",
- geometries: [
- {name: "center of Australia", type: "Point", coordinates: [133.985885, -27.240790]},
- {
- name: "Triangle around Australia",
- type: "Polygon",
- coordinates: [[
- [97.423178, -44.735405],
- [169.845050, -38.432287],
- [143.824366, 15.966509],
- [97.423178, -44.735405]
+ {
+ name: "multi polygon: new zealand north and south islands",
+ geo: {
+ type: "MultiPolygon",
+ coordinates: [
+ [[
+ [165.773255, -45.902933],
+ [169.398419, -47.261538],
+ [174.672744, -41.767722],
+ [172.288845, -39.897992],
+ [165.773255, -45.902933]
+ ]],
+ [[
+ [173.166448, -39.778262],
+ [175.342744, -42.677333],
+ [179.913373, -37.224362],
+ [171.475953, -32.688871],
+ [173.166448, -39.778262]
]]
- }
- ]
- }
+ ]
+ }
+ },
+ {
+ name: "geometry collection: point in Australia and triangle around Australia",
+ geo: {
+ type: "GeometryCollection",
+ geometries: [
+ {name: "center of Australia", type: "Point", coordinates: [133.985885, -27.240790]},
+ {
+ name: "Triangle around Australia",
+ type: "Polygon",
+ coordinates: [[
+ [97.423178, -44.735405],
+ [169.845050, -38.432287],
+ [143.824366, 15.966509],
+ [97.423178, -44.735405]
+ ]]
+ }
+ ]
+ }
}
];
// Test various polygons which are not queryable
var badPolys = [
{
- name: "Polygon with bad CRS",
- type: "Polygon",
- coordinates: [[
- [114.0834046, 22.6648202],
- [113.8293457, 22.3819359],
- [114.2736054, 22.4047911],
- [114.0834046, 22.6648202]
- ]],
- crs: badCRS
- },
- {
- name: "Open polygon < 3 sides",
- type: "Polygon",
- coordinates: [[[114.0834046, 22.6648202], [113.8293457, 22.3819359]]],
- crs: strictCRS
- },
- {
- name: "Open polygon > 3 sides",
- type: "Polygon",
- coordinates: [[
- [114.0834046, 22.6648202],
- [113.8293457, 22.3819359],
- [114.2736054, 22.4047911],
- [114.1, 22.5]
- ]],
- crs: strictCRS
- },
- {
- name: "duplicate non-adjacent points",
- type: "Polygon",
- coordinates: [[
- [114.0834046, 22.6648202],
- [113.8293457, 22.3819359],
- [114.2736054, 22.4047911],
- [113.8293457, 22.3819359],
- [-65.9165954, 22.6648202],
- [114.0834046, 22.6648202]
- ]],
- crs: strictCRS
- },
- {
- name: "One hole in polygon",
- type: "Polygon",
- coordinates: [
- [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]],
- [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]]
- ],
- crs: strictCRS
- },
- {
- name: "2 holes in polygon",
- type: "Polygon",
- coordinates: [
- [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]],
- [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]],
- [[-55.0, 40.0], [-45.0, 40.0], [-45.0, 50.0], [-55.0, 50.0], [-55.0, 40.0]]
- ],
- crs: strictCRS
- },
- {
- name: "complex polygon (edges cross)",
- type: "Polygon",
- coordinates: [[[10.0, 10.0], [20.0, 10.0], [10.0, 20.0], [20.0, 20.0], [10.0, 10.0]]],
- crs: strictCRS
+ name: "Polygon with bad CRS",
+ type: "Polygon",
+ coordinates: [[
+ [114.0834046, 22.6648202],
+ [113.8293457, 22.3819359],
+ [114.2736054, 22.4047911],
+ [114.0834046, 22.6648202]
+ ]],
+ crs: badCRS
+ },
+ {
+ name: "Open polygon < 3 sides",
+ type: "Polygon",
+ coordinates: [[[114.0834046, 22.6648202], [113.8293457, 22.3819359]]],
+ crs: strictCRS
+ },
+ {
+ name: "Open polygon > 3 sides",
+ type: "Polygon",
+ coordinates: [[
+ [114.0834046, 22.6648202],
+ [113.8293457, 22.3819359],
+ [114.2736054, 22.4047911],
+ [114.1, 22.5]
+ ]],
+ crs: strictCRS
+ },
+ {
+ name: "duplicate non-adjacent points",
+ type: "Polygon",
+ coordinates: [[
+ [114.0834046, 22.6648202],
+ [113.8293457, 22.3819359],
+ [114.2736054, 22.4047911],
+ [113.8293457, 22.3819359],
+ [-65.9165954, 22.6648202],
+ [114.0834046, 22.6648202]
+ ]],
+ crs: strictCRS
+ },
+ {
+ name: "One hole in polygon",
+ type: "Polygon",
+ coordinates: [
+ [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]],
+ [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]]
+ ],
+ crs: strictCRS
+ },
+ {
+ name: "2 holes in polygon",
+ type: "Polygon",
+ coordinates: [
+ [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]],
+ [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]],
+ [[-55.0, 40.0], [-45.0, 40.0], [-45.0, 50.0], [-55.0, 50.0], [-55.0, 40.0]]
+ ],
+ crs: strictCRS
+ },
+ {
+ name: "complex polygon (edges cross)",
+ type: "Polygon",
+ coordinates: [[[10.0, 10.0], [20.0, 10.0], [10.0, 20.0], [20.0, 20.0], [10.0, 10.0]]],
+ crs: strictCRS
}
];
// Closed polygons used in query (3, 4, 5, 6-sided)
var polys = [
{
- name: "3 sided closed polygon",
- type: "Polygon", // triangle
- coordinates: [[[10.0, 10.0], [20.0, 10.0], [15.0, 17.0], [10.0, 10.0]]],
- crs: strictCRS,
- nW: 0,
- nI: 1
- },
- {
- name: "3 sided closed polygon (non-big)",
- type: "Polygon", // triangle
- coordinates: [[[10.0, 10.0], [20.0, 10.0], [15.0, 17.0], [10.0, 10.0]]],
- nW: 0,
- nI: 1
- },
- {
- name: "4 sided closed polygon",
- type: "Polygon", // rectangle
- coordinates: [[[10.0, 10.0], [20.0, 10.0], [20.0, 20.0], [10.0, 20.0], [10.0, 10.0]]],
- crs: strictCRS,
- nW: 0,
- nI: 1
- },
- {
- name: "4 sided closed polygon (non-big)",
- type: "Polygon", // rectangle
- coordinates: [[[10.0, 10.0], [20.0, 10.0], [20.0, 20.0], [10.0, 20.0], [10.0, 10.0]]],
- nW: 0,
- nI: 1
- },
- {
- name: "5 sided closed polygon",
- type: "Polygon", // pentagon
- coordinates:
- [[[10.0, 10.0], [20.0, 10.0], [25.0, 18.0], [15.0, 25.0], [5.0, 18.0], [10.0, 10.0]]],
- crs: strictCRS,
- nW: 0,
- nI: 1
- },
- {
- name: "5 sided closed polygon (non-big)",
- type: "Polygon", // pentagon
- coordinates:
- [[[10.0, 10.0], [20.0, 10.0], [25.0, 18.0], [15.0, 25.0], [5.0, 18.0], [10.0, 10.0]]],
- nW: 0,
- nI: 1
- },
- {
- name: "6 sided closed polygon",
- type: "Polygon", // hexagon
- coordinates: [[
- [10.0, 10.0],
- [15.0, 10.0],
- [22.0, 15.0],
- [15.0, 20.0],
- [10.0, 20.0],
- [7.0, 15.0],
- [10.0, 10.0]
- ]],
- crs: strictCRS,
- nW: 0,
- nI: 1
- },
- {
- name: "6 sided closed polygon (non-big)",
- type: "Polygon", // hexagon
- coordinates: [[
- [10.0, 10.0],
- [15.0, 10.0],
- [22.0, 15.0],
- [15.0, 20.0],
- [10.0, 20.0],
- [7.0, 15.0],
- [10.0, 10.0]
- ]],
- nW: 0,
- nI: 1
+ name: "3 sided closed polygon",
+ type: "Polygon", // triangle
+ coordinates: [[[10.0, 10.0], [20.0, 10.0], [15.0, 17.0], [10.0, 10.0]]],
+ crs: strictCRS,
+ nW: 0,
+ nI: 1
+ },
+ {
+ name: "3 sided closed polygon (non-big)",
+ type: "Polygon", // triangle
+ coordinates: [[[10.0, 10.0], [20.0, 10.0], [15.0, 17.0], [10.0, 10.0]]],
+ nW: 0,
+ nI: 1
+ },
+ {
+ name: "4 sided closed polygon",
+ type: "Polygon", // rectangle
+ coordinates: [[[10.0, 10.0], [20.0, 10.0], [20.0, 20.0], [10.0, 20.0], [10.0, 10.0]]],
+ crs: strictCRS,
+ nW: 0,
+ nI: 1
+ },
+ {
+ name: "4 sided closed polygon (non-big)",
+ type: "Polygon", // rectangle
+ coordinates: [[[10.0, 10.0], [20.0, 10.0], [20.0, 20.0], [10.0, 20.0], [10.0, 10.0]]],
+ nW: 0,
+ nI: 1
+ },
+ {
+ name: "5 sided closed polygon",
+ type: "Polygon", // pentagon
+ coordinates:
+ [[[10.0, 10.0], [20.0, 10.0], [25.0, 18.0], [15.0, 25.0], [5.0, 18.0], [10.0, 10.0]]],
+ crs: strictCRS,
+ nW: 0,
+ nI: 1
+ },
+ {
+ name: "5 sided closed polygon (non-big)",
+ type: "Polygon", // pentagon
+ coordinates:
+ [[[10.0, 10.0], [20.0, 10.0], [25.0, 18.0], [15.0, 25.0], [5.0, 18.0], [10.0, 10.0]]],
+ nW: 0,
+ nI: 1
+ },
+ {
+ name: "6 sided closed polygon",
+ type: "Polygon", // hexagon
+ coordinates: [[
+ [10.0, 10.0],
+ [15.0, 10.0],
+ [22.0, 15.0],
+ [15.0, 20.0],
+ [10.0, 20.0],
+ [7.0, 15.0],
+ [10.0, 10.0]
+ ]],
+ crs: strictCRS,
+ nW: 0,
+ nI: 1
+ },
+ {
+ name: "6 sided closed polygon (non-big)",
+ type: "Polygon", // hexagon
+ coordinates: [[
+ [10.0, 10.0],
+ [15.0, 10.0],
+ [22.0, 15.0],
+ [15.0, 20.0],
+ [10.0, 20.0],
+ [7.0, 15.0],
+ [10.0, 10.0]
+ ]],
+ nW: 0,
+ nI: 1
}
];
@@ -477,67 +478,67 @@ var totalObjects = getNumberOfValidObjects(objects);
var nsidedPolys = [
// Big Polygon centered on 0, 0
{
- name: "4 sided polygon centered on 0, 0",
- type: "Polygon",
- coordinates: [nGonGenerator(4, 30, true, 0, 0)],
- crs: strictCRS,
- nW: totalObjects - 3,
- nI: totalObjects
+ name: "4 sided polygon centered on 0, 0",
+ type: "Polygon",
+ coordinates: [nGonGenerator(4, 30, true, 0, 0)],
+ crs: strictCRS,
+ nW: totalObjects - 3,
+ nI: totalObjects
},
// Non-big polygons have counterclockwise coordinates
{
- name: "4 sided polygon centered on 0, 0 (non-big)",
- type: "Polygon",
- coordinates: [nGonGenerator(4, 30, false, 0, 0)],
- nW: 0,
- nI: 3
+ name: "4 sided polygon centered on 0, 0 (non-big)",
+ type: "Polygon",
+ coordinates: [nGonGenerator(4, 30, false, 0, 0)],
+ nW: 0,
+ nI: 3
},
{
- name: "100 sided polygon centered on 0, 0",
- type: "Polygon",
- coordinates: [nGonGenerator(100, 20, true, 0, 0)],
- crs: strictCRS,
- nW: totalObjects - 3,
- nI: totalObjects
+ name: "100 sided polygon centered on 0, 0",
+ type: "Polygon",
+ coordinates: [nGonGenerator(100, 20, true, 0, 0)],
+ crs: strictCRS,
+ nW: totalObjects - 3,
+ nI: totalObjects
},
{
- name: "100 sided polygon centered on 0, 0 (non-big)",
- type: "Polygon",
- coordinates: [nGonGenerator(100, 20, false, 0, 0)],
- nW: 0,
- nI: 3
+ name: "100 sided polygon centered on 0, 0 (non-big)",
+ type: "Polygon",
+ coordinates: [nGonGenerator(100, 20, false, 0, 0)],
+ nW: 0,
+ nI: 3
},
{
- name: "5000 sided polygon centered on 0, 0 (non-big)",
- type: "Polygon",
- coordinates: [nGonGenerator(5000, 89.99, false, 0, 0)],
- nW: 0,
- nI: 3
+ name: "5000 sided polygon centered on 0, 0 (non-big)",
+ type: "Polygon",
+ coordinates: [nGonGenerator(5000, 89.99, false, 0, 0)],
+ nW: 0,
+ nI: 3
},
{
- name: "25000 sided polygon centered on 0, 0",
- type: "Polygon",
- coordinates: [nGonGenerator(25000, 89.99, true, 0, 0)],
- crs: strictCRS,
- nW: totalObjects - 3,
- nI: totalObjects
+ name: "25000 sided polygon centered on 0, 0",
+ type: "Polygon",
+ coordinates: [nGonGenerator(25000, 89.99, true, 0, 0)],
+ crs: strictCRS,
+ nW: totalObjects - 3,
+ nI: totalObjects
},
// Big polygon centered on Shenzen
{
- name: "4 sided polygon centered on Shenzen",
- type: "Polygon",
- coordinates: [nGonGenerator(4, 5, true, 114.1, 22.55)],
- crs: strictCRS,
- nW: totalObjects - 3,
- nI: totalObjects - 2
+ name: "4 sided polygon centered on Shenzen",
+ type: "Polygon",
+ coordinates: [nGonGenerator(4, 5, true, 114.1, 22.55)],
+ crs: strictCRS,
+ nW: totalObjects - 3,
+ nI: totalObjects - 2
},
{
- name: "4 sided polygon centered on Shenzen (non-big)",
- type: "Polygon",
- coordinates: [nGonGenerator(4, 5, false, 114.1, 22.55)],
- crs: strictCRS,
- nW: 2,
- nI: 3
+ name: "4 sided polygon centered on Shenzen (non-big)",
+ type: "Polygon",
+ coordinates: [nGonGenerator(4, 5, false, 114.1, 22.55)],
+ crs: strictCRS,
+ nW: 2,
+ nI: 3
}
];
@@ -567,7 +568,6 @@ totalObjects = coll.count();
var indexes = ["none", "2dsphere"];
indexes.forEach(function(index) {
-
// Reset indexes on collection
assert.commandWorked(coll.dropIndexes(), "drop indexes");
@@ -578,7 +578,6 @@ indexes.forEach(function(index) {
// These polygons should not be queryable
badPolys.forEach(function(p) {
-
// within
assert.throws(function() {
coll.count({geo: {$geoWithin: {$geometry: p}}});
@@ -592,7 +591,6 @@ indexes.forEach(function(index) {
// Tests for closed polygons
polys.forEach(function(p) {
-
// geoWithin query
var docArray = [];
var q = {geo: {$geoWithin: {$geometry: p}}};
@@ -622,19 +620,15 @@ indexes.forEach(function(index) {
bulk.insert(doc);
});
assert.eq(docArray.length, bulk.execute().nInserted, "reinsert " + p.name);
-
});
// test the n-sided closed polygons
nsidedPolys.forEach(function(p) {
-
// within
assert.eq(p.nW, coll.count({geo: {$geoWithin: {$geometry: p}}}), "within " + p.name);
// intersects
assert.eq(
p.nI, coll.count({geo: {$geoIntersects: {$geometry: p}}}), "intersection " + p.name);
-
});
-
});
diff --git a/jstests/core/geo_big_polygon3.js b/jstests/core/geo_big_polygon3.js
index 5adae06102e..424510f521a 100644
--- a/jstests/core/geo_big_polygon3.js
+++ b/jstests/core/geo_big_polygon3.js
@@ -28,21 +28,20 @@ coll.drop();
var objects = [
{name: "point with strictCRS", type: "Point", coordinates: [-97.9, 0], crs: strictCRS},
{
- name: "multipoint with strictCRS",
- type: "MultiPoint",
- coordinates: [[-97.9, 0], [-10.9, 0]],
- crs: strictCRS
+ name: "multipoint with strictCRS",
+ type: "MultiPoint",
+ coordinates: [[-97.9, 0], [-10.9, 0]],
+ crs: strictCRS
},
{
- name: "line with strictCRS",
- type: "LineString",
- coordinates: [[-122.1611953, 37.4420407], [-118.283638, 34.028517]],
- crs: strictCRS
+ name: "line with strictCRS",
+ type: "LineString",
+ coordinates: [[-122.1611953, 37.4420407], [-118.283638, 34.028517]],
+ crs: strictCRS
}
];
objects.forEach(function(o) {
-
// within
assert.throws(function() {
coll.count({geo: {$geoWithin: {$geometry: o}}});
@@ -99,34 +98,34 @@ assert.commandWorked(coll.dropIndex({geo: "2dsphere"}), "drop 2dsphere index");
objects = [
{
- name: "NYC Times Square - point",
- geo: {type: "Point", coordinates: [-73.9857, 40.7577], crs: strictCRS}
+ name: "NYC Times Square - point",
+ geo: {type: "Point", coordinates: [-73.9857, 40.7577], crs: strictCRS}
},
{
- name: "NYC CitiField & JFK - multipoint",
- geo: {
- type: "MultiPoint",
- coordinates: [[-73.8458, 40.7569], [-73.7789, 40.6397]],
- crs: strictCRS
- }
+ name: "NYC CitiField & JFK - multipoint",
+ geo: {
+ type: "MultiPoint",
+ coordinates: [[-73.8458, 40.7569], [-73.7789, 40.6397]],
+ crs: strictCRS
+ }
},
{
- name: "NYC - Times Square to CitiField to JFK - line/string",
- geo: {
- type: "LineString",
- coordinates: [[-73.9857, 40.7577], [-73.8458, 40.7569], [-73.7789, 40.6397]],
- crs: strictCRS
- }
+ name: "NYC - Times Square to CitiField to JFK - line/string",
+ geo: {
+ type: "LineString",
+ coordinates: [[-73.9857, 40.7577], [-73.8458, 40.7569], [-73.7789, 40.6397]],
+ crs: strictCRS
+ }
},
{
- name: "NYC - Times Square to CitiField to JFK to Times Square - polygon",
- geo: {
- type: "Polygon",
- coordinates: [
- [[-73.9857, 40.7577], [-73.7789, 40.6397], [-73.8458, 40.7569], [-73.9857, 40.7577]]
- ],
- crs: strictCRS
- }
+ name: "NYC - Times Square to CitiField to JFK to Times Square - polygon",
+ geo: {
+ type: "Polygon",
+ coordinates: [
+ [[-73.9857, 40.7577], [-73.7789, 40.6397], [-73.8458, 40.7569], [-73.9857, 40.7577]]
+ ],
+ crs: strictCRS
+ }
}
];
@@ -165,44 +164,44 @@ coll.remove({});
// Objects should be found from query
objects = [
{
- name: "NYC Times Square - point crs84CRS",
- geo: {type: "Point", coordinates: [-73.9857, 40.7577], crs: crs84CRS}
+ name: "NYC Times Square - point crs84CRS",
+ geo: {type: "Point", coordinates: [-73.9857, 40.7577], crs: crs84CRS}
},
{
- name: "NYC Times Square - point epsg4326CRS",
- geo: {type: "Point", coordinates: [-73.9857, 40.7577], crs: epsg4326CRS}
+ name: "NYC Times Square - point epsg4326CRS",
+ geo: {type: "Point", coordinates: [-73.9857, 40.7577], crs: epsg4326CRS}
},
{
- name: "NYC CitiField & JFK - multipoint crs84CRS",
- geo: {
- type: "MultiPoint",
- coordinates: [[-73.8458, 40.7569], [-73.7789, 40.6397]],
- crs: crs84CRS
- }
+ name: "NYC CitiField & JFK - multipoint crs84CRS",
+ geo: {
+ type: "MultiPoint",
+ coordinates: [[-73.8458, 40.7569], [-73.7789, 40.6397]],
+ crs: crs84CRS
+ }
},
{
- name: "NYC CitiField & JFK - multipoint epsg4326CRS",
- geo: {
- type: "MultiPoint",
- coordinates: [[-73.8458, 40.7569], [-73.7789, 40.6397]],
- crs: epsg4326CRS
- }
+ name: "NYC CitiField & JFK - multipoint epsg4326CRS",
+ geo: {
+ type: "MultiPoint",
+ coordinates: [[-73.8458, 40.7569], [-73.7789, 40.6397]],
+ crs: epsg4326CRS
+ }
},
{
- name: "NYC - Times Square to CitiField to JFK - line/string crs84CRS",
- geo: {
- type: "LineString",
- coordinates: [[-73.9857, 40.7577], [-73.8458, 40.7569], [-73.7789, 40.6397]],
- crs: crs84CRS
- }
+ name: "NYC - Times Square to CitiField to JFK - line/string crs84CRS",
+ geo: {
+ type: "LineString",
+ coordinates: [[-73.9857, 40.7577], [-73.8458, 40.7569], [-73.7789, 40.6397]],
+ crs: crs84CRS
+ }
},
{
- name: "NYC - Times Square to CitiField to JFK - line/string epsg4326CRS",
- geo: {
- type: "LineString",
- coordinates: [[-73.9857, 40.7577], [-73.8458, 40.7569], [-73.7789, 40.6397]],
- crs: epsg4326CRS
- }
+ name: "NYC - Times Square to CitiField to JFK - line/string epsg4326CRS",
+ geo: {
+ type: "LineString",
+ coordinates: [[-73.9857, 40.7577], [-73.8458, 40.7569], [-73.7789, 40.6397]],
+ crs: epsg4326CRS
+ }
}
];
diff --git a/jstests/core/geo_distinct.js b/jstests/core/geo_distinct.js
index f2064008ae9..965ec6f7a18 100644
--- a/jstests/core/geo_distinct.js
+++ b/jstests/core/geo_distinct.js
@@ -5,110 +5,113 @@
// @tags: [requires_fastcount]
(function() {
- "use strict";
- const coll = db.geo_distinct;
- let res;
-
- //
- // 1. Test distinct with geo values for 'key'.
- //
-
- coll.drop();
- assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [10, 20]}}));
- assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [10, 20]}}));
- assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [20, 30]}}));
- assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [20, 30]}}));
- assert.eq(4, coll.count());
-
- // Test distinct on GeoJSON points with/without a 2dsphere index.
-
- res = coll.runCommand('distinct', {key: 'loc'});
- assert.commandWorked(res);
- assert.eq(res.values.sort(bsonWoCompare),
- [{type: 'Point', coordinates: [10, 20]}, {type: 'Point', coordinates: [20, 30]}]);
-
- assert.commandWorked(coll.createIndex({loc: '2dsphere'}));
-
- res = coll.runCommand('distinct', {key: 'loc'});
- assert.commandWorked(res);
- assert.eq(res.values.sort(bsonWoCompare),
- [{type: 'Point', coordinates: [10, 20]}, {type: 'Point', coordinates: [20, 30]}]);
-
- // Test distinct on legacy points with/without a 2d index.
-
- // (Note that distinct on a 2d-indexed field doesn't produce a list of coordinate pairs, since
- // distinct logically operates on unique values in an array. Hence, the results are unintuitive
- // and not semantically meaningful.)
-
- assert.commandWorked(coll.dropIndexes());
-
- res = coll.runCommand('distinct', {key: 'loc.coordinates'});
- assert.commandWorked(res);
- assert.eq(res.values.sort(), [10, 20, 30]);
-
- assert.commandWorked(coll.createIndex({'loc.coordinates': '2d'}));
-
- res = coll.runCommand('distinct', {key: 'loc.coordinates'});
- assert.commandWorked(res);
- assert.eq(res.values.sort(), [10, 20, 30]);
-
- //
- // 2. Test distinct with geo predicates for 'query'.
- //
-
- assert(coll.drop());
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 50; ++i) {
- bulk.insert({zone: 1, loc: {type: 'Point', coordinates: [-20, -20]}});
- bulk.insert({zone: 2, loc: {type: 'Point', coordinates: [-10, -10]}});
- bulk.insert({zone: 3, loc: {type: 'Point', coordinates: [0, 0]}});
- bulk.insert({zone: 4, loc: {type: 'Point', coordinates: [10, 10]}});
- bulk.insert({zone: 5, loc: {type: 'Point', coordinates: [20, 20]}});
- }
- assert.writeOK(bulk.execute());
-
- const originGeoJSON = {type: 'Point', coordinates: [0, 0]};
-
- // Test distinct with $nearSphere query predicate.
-
- // A. Unindexed key, no geo index on query predicate.
- res = coll.runCommand(
- 'distinct',
- {key: 'zone', query: {loc: {$nearSphere: {$geometry: originGeoJSON, $maxDistance: 1}}}});
- assert.commandFailed(res);
- // B. Unindexed key, with 2dsphere index on query predicate.
- assert.commandWorked(coll.createIndex({loc: '2dsphere'}));
- res = coll.runCommand(
- 'distinct',
- {key: 'zone', query: {loc: {$nearSphere: {$geometry: originGeoJSON, $maxDistance: 1}}}});
- assert.commandWorked(res);
- assert.eq(res.values.sort(), [3]);
- // C. Indexed key, with 2dsphere index on query predicate.
- assert.commandWorked(coll.createIndex({zone: 1}));
- res = coll.runCommand(
- 'distinct',
- {key: 'zone', query: {loc: {$nearSphere: {$geometry: originGeoJSON, $maxDistance: 1}}}});
- assert.commandWorked(res);
- assert.eq(res.values.sort(), [3]);
-
- // Test distinct with $near query predicate.
-
- assert.commandWorked(coll.dropIndexes());
-
- // A. Unindexed key, no geo index on query predicate.
- res = coll.runCommand(
- 'distinct', {key: 'zone', query: {'loc.coordinates': {$near: [0, 0], $maxDistance: 1}}});
- assert.commandFailed(res);
- // B. Unindexed key, with 2d index on query predicate.
- assert.commandWorked(coll.createIndex({'loc.coordinates': '2d'}));
- res = coll.runCommand(
- 'distinct', {key: 'zone', query: {'loc.coordinates': {$near: [0, 0], $maxDistance: 1}}});
- assert.commandWorked(res);
- assert.eq(res.values.sort(), [3]);
- // C. Indexed key, with 2d index on query predicate.
- assert.commandWorked(coll.createIndex({zone: 1}));
- res = coll.runCommand(
- 'distinct', {key: 'zone', query: {'loc.coordinates': {$near: [0, 0], $maxDistance: 1}}});
- assert.commandWorked(res);
- assert.eq(res.values.sort(), [3]);
+"use strict";
+const coll = db.geo_distinct;
+let res;
+
+//
+// 1. Test distinct with geo values for 'key'.
+//
+
+coll.drop();
+assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [10, 20]}}));
+assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [10, 20]}}));
+assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [20, 30]}}));
+assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [20, 30]}}));
+assert.eq(4, coll.count());
+
+// Test distinct on GeoJSON points with/without a 2dsphere index.
+
+res = coll.runCommand('distinct', {key: 'loc'});
+assert.commandWorked(res);
+assert.eq(res.values.sort(bsonWoCompare),
+ [{type: 'Point', coordinates: [10, 20]}, {type: 'Point', coordinates: [20, 30]}]);
+
+assert.commandWorked(coll.createIndex({loc: '2dsphere'}));
+
+res = coll.runCommand('distinct', {key: 'loc'});
+assert.commandWorked(res);
+assert.eq(res.values.sort(bsonWoCompare),
+ [{type: 'Point', coordinates: [10, 20]}, {type: 'Point', coordinates: [20, 30]}]);
+
+// Test distinct on legacy points with/without a 2d index.
+
+// (Note that distinct on a 2d-indexed field doesn't produce a list of coordinate pairs, since
+// distinct logically operates on unique values in an array. Hence, the results are unintuitive
+// and not semantically meaningful.)
+
+assert.commandWorked(coll.dropIndexes());
+
+res = coll.runCommand('distinct', {key: 'loc.coordinates'});
+assert.commandWorked(res);
+assert.eq(res.values.sort(), [10, 20, 30]);
+
+assert.commandWorked(coll.createIndex({'loc.coordinates': '2d'}));
+
+res = coll.runCommand('distinct', {key: 'loc.coordinates'});
+assert.commandWorked(res);
+assert.eq(res.values.sort(), [10, 20, 30]);
+
+//
+// 2. Test distinct with geo predicates for 'query'.
+//
+
+assert(coll.drop());
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < 50; ++i) {
+ bulk.insert({zone: 1, loc: {type: 'Point', coordinates: [-20, -20]}});
+ bulk.insert({zone: 2, loc: {type: 'Point', coordinates: [-10, -10]}});
+ bulk.insert({zone: 3, loc: {type: 'Point', coordinates: [0, 0]}});
+ bulk.insert({zone: 4, loc: {type: 'Point', coordinates: [10, 10]}});
+ bulk.insert({zone: 5, loc: {type: 'Point', coordinates: [20, 20]}});
+}
+assert.writeOK(bulk.execute());
+
+const originGeoJSON = {
+ type: 'Point',
+ coordinates: [0, 0]
+};
+
+// Test distinct with $nearSphere query predicate.
+
+// A. Unindexed key, no geo index on query predicate.
+res = coll.runCommand(
+ 'distinct',
+ {key: 'zone', query: {loc: {$nearSphere: {$geometry: originGeoJSON, $maxDistance: 1}}}});
+assert.commandFailed(res);
+// B. Unindexed key, with 2dsphere index on query predicate.
+assert.commandWorked(coll.createIndex({loc: '2dsphere'}));
+res = coll.runCommand(
+ 'distinct',
+ {key: 'zone', query: {loc: {$nearSphere: {$geometry: originGeoJSON, $maxDistance: 1}}}});
+assert.commandWorked(res);
+assert.eq(res.values.sort(), [3]);
+// C. Indexed key, with 2dsphere index on query predicate.
+assert.commandWorked(coll.createIndex({zone: 1}));
+res = coll.runCommand(
+ 'distinct',
+ {key: 'zone', query: {loc: {$nearSphere: {$geometry: originGeoJSON, $maxDistance: 1}}}});
+assert.commandWorked(res);
+assert.eq(res.values.sort(), [3]);
+
+// Test distinct with $near query predicate.
+
+assert.commandWorked(coll.dropIndexes());
+
+// A. Unindexed key, no geo index on query predicate.
+res = coll.runCommand('distinct',
+ {key: 'zone', query: {'loc.coordinates': {$near: [0, 0], $maxDistance: 1}}});
+assert.commandFailed(res);
+// B. Unindexed key, with 2d index on query predicate.
+assert.commandWorked(coll.createIndex({'loc.coordinates': '2d'}));
+res = coll.runCommand('distinct',
+ {key: 'zone', query: {'loc.coordinates': {$near: [0, 0], $maxDistance: 1}}});
+assert.commandWorked(res);
+assert.eq(res.values.sort(), [3]);
+// C. Indexed key, with 2d index on query predicate.
+assert.commandWorked(coll.createIndex({zone: 1}));
+res = coll.runCommand('distinct',
+ {key: 'zone', query: {'loc.coordinates': {$near: [0, 0], $maxDistance: 1}}});
+assert.commandWorked(res);
+assert.eq(res.values.sort(), [3]);
}());
diff --git a/jstests/core/geo_fiddly_box.js b/jstests/core/geo_fiddly_box.js
index b99a28c01f7..efb185e2dfd 100644
--- a/jstests/core/geo_fiddly_box.js
+++ b/jstests/core/geo_fiddly_box.js
@@ -42,15 +42,14 @@ for (var x = min; x <= max; x += step) {
}
}
-assert.eq(numItems,
- t.count({
- loc: {
- $within: {
- $box: [
- [min - epsilon / 3, min - epsilon / 3],
- [max + epsilon / 3, max + epsilon / 3]
- ]
- }
- }
- }),
- "Not all locations found!");
+assert.eq(
+ numItems,
+ t.count({
+ loc: {
+ $within: {
+ $box:
+ [[min - epsilon / 3, min - epsilon / 3], [max + epsilon / 3, max + epsilon / 3]]
+ }
+ }
+ }),
+ "Not all locations found!");
diff --git a/jstests/core/geo_mapreduce2.js b/jstests/core/geo_mapreduce2.js
index d7f73ce3d69..43eaffed82e 100644
--- a/jstests/core/geo_mapreduce2.js
+++ b/jstests/core/geo_mapreduce2.js
@@ -21,7 +21,6 @@ m = function() {
// reduce function
r = function(key, values) {
-
var total = 0;
for (var i = 0; i < values.length; i++) {
total += values[i].count;
diff --git a/jstests/core/geo_mindistance.js b/jstests/core/geo_mindistance.js
index 92ccc617cf5..4ca58b26003 100644
--- a/jstests/core/geo_mindistance.js
+++ b/jstests/core/geo_mindistance.js
@@ -2,253 +2,247 @@
// @tags: [requires_fastcount, requires_getmore]
(function() {
- "use strict";
-
- load("jstests/libs/geo_math.js");
-
- var t = db.geo_mindistance;
- t.drop();
-
- const km = 1000;
-
- /**
- * Asserts that two numeric values are equal within some absolute error.
- */
- function assertApproxEqual(rhs, lhs, error, msg) {
- assert.lt(Math.abs(rhs - rhs), error, msg);
- }
-
- /**
- * Count documents within some radius of (0, 0), in kilometers. With this function we can use
- * the existing $maxDistance option to test the newer $minDistance option's behavior.
- */
- function n_docs_within(radius_km) {
- // $geoNear's distances are in meters for geoJSON points.
- return t
- .aggregate([
- {
- $geoNear: {
- near: {type: 'Point', coordinates: [0, 0]},
- distanceField: "dis",
- spherical: true,
- maxDistance: radius_km * km,
- }
- },
- {$limit: 1000}
- ])
- .itcount();
+"use strict";
+
+load("jstests/libs/geo_math.js");
+
+var t = db.geo_mindistance;
+t.drop();
+
+const km = 1000;
+
+/**
+ * Asserts that two numeric values are equal within some absolute error.
+ */
+function assertApproxEqual(rhs, lhs, error, msg) {
+ assert.lt(Math.abs(rhs - rhs), error, msg);
+}
+
+/**
+ * Count documents within some radius of (0, 0), in kilometers. With this function we can use
+ * the existing $maxDistance option to test the newer $minDistance option's behavior.
+ */
+function n_docs_within(radius_km) {
+ // $geoNear's distances are in meters for geoJSON points.
+ return t
+ .aggregate([
+ {
+ $geoNear: {
+ near: {type: 'Point', coordinates: [0, 0]},
+ distanceField: "dis",
+ spherical: true,
+ maxDistance: radius_km * km,
+ }
+ },
+ {$limit: 1000}
+ ])
+ .itcount();
+}
+
+//
+// Setup.
+//
+
+/**
+ * Make 121 points from long, lat = (0, 0) (in Gulf of Guinea) to (10, 10) (inland Nigeria).
+ */
+for (var x = 0; x <= 10; x += 1) {
+ for (var y = 0; y <= 10; y += 1) {
+ t.insert({loc: [x, y]});
}
-
- //
- // Setup.
- //
-
- /**
- * Make 121 points from long, lat = (0, 0) (in Gulf of Guinea) to (10, 10) (inland Nigeria).
- */
- for (var x = 0; x <= 10; x += 1) {
- for (var y = 0; y <= 10; y += 1) {
- t.insert({loc: [x, y]});
- }
- }
-
- t.ensureIndex({loc: "2dsphere"});
-
- var n_docs = t.count(), geoJSONPoint = {type: 'Point', coordinates: [0, 0]},
- legacyPoint = [0, 0];
-
- //
- // Test $near with GeoJSON point (required for $near with 2dsphere index). min/maxDistance are
- // in meters.
- //
-
- var n_min1400_count =
- t.find({loc: {$near: {$geometry: geoJSONPoint, $minDistance: 1400 * km}}}).count();
-
- assert.eq(n_docs - n_docs_within(1400),
- n_min1400_count,
- "Expected " + (n_docs - n_docs_within(1400)) +
- " points $near (0, 0) with $minDistance 1400 km, got " + n_min1400_count);
-
- var n_bw500_and_1000_count =
- t.find({
- loc: {
- $near:
- {$geometry: geoJSONPoint, $minDistance: 500 * km, $maxDistance: 1000 * km}
- }
- }).count();
-
- assert.eq(n_docs_within(1000) - n_docs_within(500),
- n_bw500_and_1000_count,
- "Expected " + (n_docs_within(1000) - n_docs_within(500)) +
- " points $near (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " +
- n_bw500_and_1000_count);
-
- //
- // $nearSphere with 2dsphere index can take a legacy or GeoJSON point. First test $nearSphere
- // with legacy point. min/maxDistance are in radians.
- //
-
- n_min1400_count =
- t.find({loc: {$nearSphere: legacyPoint, $minDistance: metersToRadians(1400 * km)}}).count();
-
- assert.eq(n_docs - n_docs_within(1400),
- n_min1400_count,
- "Expected " + (n_docs - n_docs_within(1400)) +
- " points $nearSphere (0, 0) with $minDistance 1400 km, got " + n_min1400_count);
-
- n_bw500_and_1000_count = t.find({
- loc: {
- $nearSphere: legacyPoint,
- $minDistance: metersToRadians(500 * km),
- $maxDistance: metersToRadians(1000 * km)
- }
- }).count();
-
- assert.eq(
- n_docs_within(1000) - n_docs_within(500),
- n_bw500_and_1000_count,
- "Expected " + (n_docs_within(1000) - n_docs_within(500)) +
- " points $nearSphere (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " +
- n_bw500_and_1000_count);
-
- //
- // Test $nearSphere with GeoJSON point. min/maxDistance are in meters.
- //
-
- n_min1400_count = t.find({loc: {$nearSphere: geoJSONPoint, $minDistance: 1400 * km}}).count();
-
- assert.eq(n_docs - n_docs_within(1400),
- n_min1400_count,
- "Expected " + (n_docs - n_docs_within(1400)) +
- " points $nearSphere (0, 0) with $minDistance 1400 km, got " + n_min1400_count);
-
- n_bw500_and_1000_count =
- t.find({
- loc: {$nearSphere: geoJSONPoint, $minDistance: 500 * km, $maxDistance: 1000 * km}
- }).count();
-
- assert.eq(
- n_docs_within(1000) - n_docs_within(500),
- n_bw500_and_1000_count,
- "Expected " + (n_docs_within(1000) - n_docs_within(500)) +
- " points $nearSphere (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " +
- n_bw500_and_1000_count);
-
- //
- // Test $geoNear aggregation stage with GeoJSON point. Distances are in meters.
- //
-
- let geoNearCount = t.aggregate({
- $geoNear: {
- near: {type: 'Point', coordinates: [0, 0]},
- minDistance: 1400 * km,
- spherical: true,
- distanceField: "d",
- }
- }).itcount();
- assert.eq(n_docs - n_docs_within(1400),
- geoNearCount,
- "Expected " + (n_docs - n_docs_within(1400)) +
- " points geoNear (0, 0) with $minDistance 1400 km, got " + geoNearCount);
-
- geoNearCount = t.aggregate({
+}
+
+t.ensureIndex({loc: "2dsphere"});
+
+var n_docs = t.count(), geoJSONPoint = {type: 'Point', coordinates: [0, 0]}, legacyPoint = [0, 0];
+
+//
+// Test $near with GeoJSON point (required for $near with 2dsphere index). min/maxDistance are
+// in meters.
+//
+
+var n_min1400_count =
+ t.find({loc: {$near: {$geometry: geoJSONPoint, $minDistance: 1400 * km}}}).count();
+
+assert.eq(n_docs - n_docs_within(1400),
+ n_min1400_count,
+ "Expected " + (n_docs - n_docs_within(1400)) +
+ " points $near (0, 0) with $minDistance 1400 km, got " + n_min1400_count);
+
+var n_bw500_and_1000_count =
+ t.find({
+ loc: {$near: {$geometry: geoJSONPoint, $minDistance: 500 * km, $maxDistance: 1000 * km}}
+ }).count();
+
+assert.eq(n_docs_within(1000) - n_docs_within(500),
+ n_bw500_and_1000_count,
+ "Expected " + (n_docs_within(1000) - n_docs_within(500)) +
+ " points $near (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " +
+ n_bw500_and_1000_count);
+
+//
+// $nearSphere with 2dsphere index can take a legacy or GeoJSON point. First test $nearSphere
+// with legacy point. min/maxDistance are in radians.
+//
+
+n_min1400_count =
+ t.find({loc: {$nearSphere: legacyPoint, $minDistance: metersToRadians(1400 * km)}}).count();
+
+assert.eq(n_docs - n_docs_within(1400),
+ n_min1400_count,
+ "Expected " + (n_docs - n_docs_within(1400)) +
+ " points $nearSphere (0, 0) with $minDistance 1400 km, got " + n_min1400_count);
+
+n_bw500_and_1000_count = t.find({
+ loc: {
+ $nearSphere: legacyPoint,
+ $minDistance: metersToRadians(500 * km),
+ $maxDistance: metersToRadians(1000 * km)
+ }
+ }).count();
+
+assert.eq(n_docs_within(1000) - n_docs_within(500),
+ n_bw500_and_1000_count,
+ "Expected " + (n_docs_within(1000) - n_docs_within(500)) +
+ " points $nearSphere (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " +
+ n_bw500_and_1000_count);
+
+//
+// Test $nearSphere with GeoJSON point. min/maxDistance are in meters.
+//
+
+n_min1400_count = t.find({loc: {$nearSphere: geoJSONPoint, $minDistance: 1400 * km}}).count();
+
+assert.eq(n_docs - n_docs_within(1400),
+ n_min1400_count,
+ "Expected " + (n_docs - n_docs_within(1400)) +
+ " points $nearSphere (0, 0) with $minDistance 1400 km, got " + n_min1400_count);
+
+n_bw500_and_1000_count =
+ t.find({
+ loc: {$nearSphere: geoJSONPoint, $minDistance: 500 * km, $maxDistance: 1000 * km}
+ }).count();
+
+assert.eq(n_docs_within(1000) - n_docs_within(500),
+ n_bw500_and_1000_count,
+ "Expected " + (n_docs_within(1000) - n_docs_within(500)) +
+ " points $nearSphere (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " +
+ n_bw500_and_1000_count);
+
+//
+// Test $geoNear aggregation stage with GeoJSON point. Distances are in meters.
+//
+
+let geoNearCount = t.aggregate({
$geoNear: {
near: {type: 'Point', coordinates: [0, 0]},
- minDistance: 500 * km,
- maxDistance: 1000 * km,
- spherical: true,
- distanceField: "d",
- }
- }).itcount();
- assert.eq(n_docs_within(1000) - n_docs_within(500),
- geoNearCount,
- "Expected " + (n_docs_within(1000) - n_docs_within(500)) +
- " points geoNear (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " +
- geoNearCount);
-
- //
- // Test $geoNear aggregation stage with legacy point. Distances are in radians.
- //
-
- geoNearCount = t.aggregate({
- $geoNear: {
- near: legacyPoint,
- minDistance: metersToRadians(1400 * km),
- spherical: true,
- distanceField: "d",
- }
- }).itcount();
- assert.eq(n_docs - n_docs_within(1400),
- geoNearCount,
- "Expected " + (n_docs - n_docs_within(1400)) +
- " points geoNear (0, 0) with $minDistance 1400 km, got " + geoNearCount);
-
- geoNearCount = t.aggregate({
- $geoNear: {
- near: legacyPoint,
- minDistance: metersToRadians(500 * km),
- maxDistance: metersToRadians(1000 * km),
+ minDistance: 1400 * km,
spherical: true,
distanceField: "d",
}
}).itcount();
- assert.eq(n_docs_within(1000) - n_docs_within(500),
- geoNearCount,
- "Expected " + (n_docs_within(1000) - n_docs_within(500)) +
- " points geoNear (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " +
- geoNearCount);
-
- t.drop();
- assert.commandWorked(t.createIndex({loc: "2d"}));
- assert.writeOK(t.insert({loc: [0, 40]}));
- assert.writeOK(t.insert({loc: [0, 41]}));
- assert.writeOK(t.insert({loc: [0, 42]}));
-
- // Test minDistance for 2d index with $near.
- assert.eq(3, t.find({loc: {$near: [0, 0]}}).itcount());
- assert.eq(1, t.find({loc: {$near: [0, 0], $minDistance: 41.5}}).itcount());
-
- // Test minDistance for 2d index with $nearSphere. Distances are in radians.
- assert.eq(3, t.find({loc: {$nearSphere: [0, 0]}}).itcount());
- assert.eq(1, t.find({loc: {$nearSphere: [0, 0], $minDistance: deg2rad(41.5)}}).itcount());
-
- // Test minDistance for 2d index with $geoNear stage and spherical=false.
- let cmdResult =
- t.aggregate({$geoNear: {near: [0, 0], spherical: false, distanceField: "dis"}}).toArray();
- assert.eq(3, cmdResult.length);
- assert.eq(40, cmdResult[0].dis);
- assert.eq(41, cmdResult[1].dis);
- assert.eq(42, cmdResult[2].dis);
-
- cmdResult = t.aggregate({
- $geoNear: {
- near: [0, 0],
- minDistance: 41.5,
- spherical: false,
- distanceField: "dis",
- }
- }).toArray();
- assert.eq(1, cmdResult.length);
- assert.eq(42, cmdResult[0].dis);
-
- // Test minDistance for 2d index with $geoNear stage and spherical=true. Distances are in
- // radians.
- cmdResult =
- t.aggregate({$geoNear: {near: [0, 0], spherical: true, distanceField: "dis"}}).toArray();
- assert.eq(3, cmdResult.length);
- assertApproxEqual(deg2rad(40), cmdResult[0].dis, 1e-3);
- assertApproxEqual(deg2rad(41), cmdResult[1].dis, 1e-3);
- assertApproxEqual(deg2rad(42), cmdResult[2].dis, 1e-3);
-
- cmdResult = t.aggregate({
- $geoNear: {
- near: [0, 0],
- minDistance: deg2rad(41.5),
- spherical: true,
- distanceField: "dis",
- }
- }).toArray();
- assert.eq(1, cmdResult.length);
- assertApproxEqual(deg2rad(42), cmdResult[0].dis, 1e-3);
+assert.eq(n_docs - n_docs_within(1400),
+ geoNearCount,
+ "Expected " + (n_docs - n_docs_within(1400)) +
+ " points geoNear (0, 0) with $minDistance 1400 km, got " + geoNearCount);
+
+geoNearCount = t.aggregate({
+ $geoNear: {
+ near: {type: 'Point', coordinates: [0, 0]},
+ minDistance: 500 * km,
+ maxDistance: 1000 * km,
+ spherical: true,
+ distanceField: "d",
+ }
+ }).itcount();
+assert.eq(n_docs_within(1000) - n_docs_within(500),
+ geoNearCount,
+ "Expected " + (n_docs_within(1000) - n_docs_within(500)) +
+ " points geoNear (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " +
+ geoNearCount);
+
+//
+// Test $geoNear aggregation stage with legacy point. Distances are in radians.
+//
+
+geoNearCount = t.aggregate({
+ $geoNear: {
+ near: legacyPoint,
+ minDistance: metersToRadians(1400 * km),
+ spherical: true,
+ distanceField: "d",
+ }
+ }).itcount();
+assert.eq(n_docs - n_docs_within(1400),
+ geoNearCount,
+ "Expected " + (n_docs - n_docs_within(1400)) +
+ " points geoNear (0, 0) with $minDistance 1400 km, got " + geoNearCount);
+
+geoNearCount = t.aggregate({
+ $geoNear: {
+ near: legacyPoint,
+ minDistance: metersToRadians(500 * km),
+ maxDistance: metersToRadians(1000 * km),
+ spherical: true,
+ distanceField: "d",
+ }
+ }).itcount();
+assert.eq(n_docs_within(1000) - n_docs_within(500),
+ geoNearCount,
+ "Expected " + (n_docs_within(1000) - n_docs_within(500)) +
+ " points geoNear (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " +
+ geoNearCount);
+
+t.drop();
+assert.commandWorked(t.createIndex({loc: "2d"}));
+assert.writeOK(t.insert({loc: [0, 40]}));
+assert.writeOK(t.insert({loc: [0, 41]}));
+assert.writeOK(t.insert({loc: [0, 42]}));
+
+// Test minDistance for 2d index with $near.
+assert.eq(3, t.find({loc: {$near: [0, 0]}}).itcount());
+assert.eq(1, t.find({loc: {$near: [0, 0], $minDistance: 41.5}}).itcount());
+
+// Test minDistance for 2d index with $nearSphere. Distances are in radians.
+assert.eq(3, t.find({loc: {$nearSphere: [0, 0]}}).itcount());
+assert.eq(1, t.find({loc: {$nearSphere: [0, 0], $minDistance: deg2rad(41.5)}}).itcount());
+
+// Test minDistance for 2d index with $geoNear stage and spherical=false.
+let cmdResult =
+ t.aggregate({$geoNear: {near: [0, 0], spherical: false, distanceField: "dis"}}).toArray();
+assert.eq(3, cmdResult.length);
+assert.eq(40, cmdResult[0].dis);
+assert.eq(41, cmdResult[1].dis);
+assert.eq(42, cmdResult[2].dis);
+
+cmdResult = t.aggregate({
+ $geoNear: {
+ near: [0, 0],
+ minDistance: 41.5,
+ spherical: false,
+ distanceField: "dis",
+ }
+ }).toArray();
+assert.eq(1, cmdResult.length);
+assert.eq(42, cmdResult[0].dis);
+
+// Test minDistance for 2d index with $geoNear stage and spherical=true. Distances are in
+// radians.
+cmdResult =
+ t.aggregate({$geoNear: {near: [0, 0], spherical: true, distanceField: "dis"}}).toArray();
+assert.eq(3, cmdResult.length);
+assertApproxEqual(deg2rad(40), cmdResult[0].dis, 1e-3);
+assertApproxEqual(deg2rad(41), cmdResult[1].dis, 1e-3);
+assertApproxEqual(deg2rad(42), cmdResult[2].dis, 1e-3);
+
+cmdResult = t.aggregate({
+ $geoNear: {
+ near: [0, 0],
+ minDistance: deg2rad(41.5),
+ spherical: true,
+ distanceField: "dis",
+ }
+ }).toArray();
+assert.eq(1, cmdResult.length);
+assertApproxEqual(deg2rad(42), cmdResult[0].dis, 1e-3);
}());
diff --git a/jstests/core/geo_mindistance_boundaries.js b/jstests/core/geo_mindistance_boundaries.js
index 7e97732dfd1..32977ac4b12 100644
--- a/jstests/core/geo_mindistance_boundaries.js
+++ b/jstests/core/geo_mindistance_boundaries.js
@@ -1,6 +1,6 @@
/* Test boundary conditions for $minDistance option for $near and $nearSphere
* queries. SERVER-9395.
-*/
+ */
var t = db.geo_mindistance_boundaries;
t.drop();
t.insert({loc: [1, 0]}); // 1 degree of longitude from origin.
@@ -19,7 +19,7 @@ var km = 1000, earthRadiusMeters = 6378.1 * km, geoJSONPoint = {type: 'Point', c
/* Grow epsilon's exponent until epsilon exceeds the margin of error for the
* representation of degreeInMeters. The server uses 64-bit math, too, so we'll
* find the smallest epsilon the server can detect.
-*/
+ */
while (degreeInMeters + metersEpsilon == degreeInMeters) {
metersEpsilon *= 2;
}
@@ -37,19 +37,17 @@ assert.eq(1,
t.find({loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters}}}).itcount(),
"Expected to find (0, 1) within $minDistance 1 degree from origin");
-assert.eq(
- 1,
- t.find({
- loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters - metersEpsilon}}
- }).itcount(),
- "Expected to find (0, 1) within $minDistance (1 degree - epsilon) from origin");
+assert.eq(1,
+ t.find({
+ loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters - metersEpsilon}}
+ }).itcount(),
+ "Expected to find (0, 1) within $minDistance (1 degree - epsilon) from origin");
-assert.eq(
- 0,
- t.find({
- loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters + metersEpsilon}}
- }).itcount(),
- "Expected *not* to find (0, 1) within $minDistance (1 degree + epsilon) from origin");
+assert.eq(0,
+ t.find({
+ loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters + metersEpsilon}}
+ }).itcount(),
+ "Expected *not* to find (0, 1) within $minDistance (1 degree + epsilon) from origin");
//
// Test boundary conditions for $nearSphere and GeoJSON, in meters.
diff --git a/jstests/core/geo_nearwithin.js b/jstests/core/geo_nearwithin.js
index a63871c3195..49b8d155a44 100644
--- a/jstests/core/geo_nearwithin.js
+++ b/jstests/core/geo_nearwithin.js
@@ -1,40 +1,40 @@
// Test $near + $within.
(function() {
- t = db.geo_nearwithin;
- t.drop();
-
- points = 10;
- for (var x = -points; x < points; x += 1) {
- for (var y = -points; y < points; y += 1) {
- assert.commandWorked(t.insert({geo: [x, y]}));
- }
+t = db.geo_nearwithin;
+t.drop();
+
+points = 10;
+for (var x = -points; x < points; x += 1) {
+ for (var y = -points; y < points; y += 1) {
+ assert.commandWorked(t.insert({geo: [x, y]}));
}
+}
- assert.commandWorked(t.ensureIndex({geo: "2d"}));
+assert.commandWorked(t.ensureIndex({geo: "2d"}));
- const runQuery = (center) =>
- t.find({$and: [{geo: {$near: [0, 0]}}, {geo: {$within: {$center: center}}}]}).toArray();
+const runQuery = (center) =>
+ t.find({$and: [{geo: {$near: [0, 0]}}, {geo: {$within: {$center: center}}}]}).toArray();
- resNear = runQuery([[0, 0], 1]);
- assert.eq(resNear.length, 5);
+resNear = runQuery([[0, 0], 1]);
+assert.eq(resNear.length, 5);
- resNear = runQuery([[0, 0], 0]);
- assert.eq(resNear.length, 1);
+resNear = runQuery([[0, 0], 0]);
+assert.eq(resNear.length, 1);
- resNear = runQuery([[1, 0], 0.5]);
- assert.eq(resNear.length, 1);
+resNear = runQuery([[1, 0], 0.5]);
+assert.eq(resNear.length, 1);
- resNear = runQuery([[1, 0], 1.5]);
- assert.eq(resNear.length, 9);
+resNear = runQuery([[1, 0], 1.5]);
+assert.eq(resNear.length, 9);
- // We want everything distance >1 from us but <1.5
- // These points are (-+1, -+1)
- resNear = t.find({
- $and: [
- {geo: {$near: [0, 0]}},
- {geo: {$within: {$center: [[0, 0], 1.5]}}},
- {geo: {$not: {$within: {$center: [[0, 0], 1]}}}}
- ]
- }).toArray();
- assert.eq(resNear.length, 4);
+// We want everything distance >1 from us but <1.5
+// These points are (-+1, -+1)
+resNear = t.find({
+ $and: [
+ {geo: {$near: [0, 0]}},
+ {geo: {$within: {$center: [[0, 0], 1.5]}}},
+ {geo: {$not: {$within: {$center: [[0, 0], 1]}}}}
+ ]
+ }).toArray();
+assert.eq(resNear.length, 4);
}());
diff --git a/jstests/core/geo_operator_crs.js b/jstests/core/geo_operator_crs.js
index b2cc8fe0439..063426b6b45 100644
--- a/jstests/core/geo_operator_crs.js
+++ b/jstests/core/geo_operator_crs.js
@@ -4,55 +4,55 @@
// Tests that the correct CRSes are used for geo queries (based on input geometry)
//
(function() {
- var coll = db.geo_operator_crs;
- coll.drop();
+var coll = db.geo_operator_crs;
+coll.drop();
- //
- // Test 2dsphere index
- //
+//
+// Test 2dsphere index
+//
- assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
+assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
- var legacyZeroPt = [0, 0];
- var jsonZeroPt = {type: "Point", coordinates: [0, 0]};
- var legacy90Pt = [90, 0];
- var json90Pt = {type: "Point", coordinates: [90, 0]};
+var legacyZeroPt = [0, 0];
+var jsonZeroPt = {type: "Point", coordinates: [0, 0]};
+var legacy90Pt = [90, 0];
+var json90Pt = {type: "Point", coordinates: [90, 0]};
- assert.writeOK(coll.insert({geo: json90Pt}));
+assert.writeOK(coll.insert({geo: json90Pt}));
- var earthRadiusMeters = 6378.1 * 1000;
- var result = null;
+var earthRadiusMeters = 6378.1 * 1000;
+var result = null;
- const runQuery = (point) =>
- coll.find({geo: {$nearSphere: point}}, {dis: {$meta: "geoNearDistance"}}).toArray();
+const runQuery = (point) =>
+ coll.find({geo: {$nearSphere: point}}, {dis: {$meta: "geoNearDistance"}}).toArray();
- result = runQuery(legacyZeroPt);
- assert.close(result[0].dis, Math.PI / 2);
+result = runQuery(legacyZeroPt);
+assert.close(result[0].dis, Math.PI / 2);
- result = runQuery(jsonZeroPt);
- assert.close(result[0].dis, (Math.PI / 2) * earthRadiusMeters);
+result = runQuery(jsonZeroPt);
+assert.close(result[0].dis, (Math.PI / 2) * earthRadiusMeters);
- assert.writeOK(coll.remove({}));
- assert.commandWorked(coll.dropIndexes());
+assert.writeOK(coll.remove({}));
+assert.commandWorked(coll.dropIndexes());
- //
- // Test 2d Index
- //
+//
+// Test 2d Index
+//
- assert.commandWorked(coll.ensureIndex({geo: "2d"}));
+assert.commandWorked(coll.ensureIndex({geo: "2d"}));
- assert.writeOK(coll.insert({geo: legacy90Pt}));
+assert.writeOK(coll.insert({geo: legacy90Pt}));
- result = runQuery(legacyZeroPt);
- assert.close(result[0].dis, Math.PI / 2);
+result = runQuery(legacyZeroPt);
+assert.close(result[0].dis, Math.PI / 2);
- // GeoJSON not supported unless there's a 2dsphere index
+// GeoJSON not supported unless there's a 2dsphere index
- //
- // Test with a 2d and 2dsphere index using the aggregation $geoNear stage.
- //
+//
+// Test with a 2d and 2dsphere index using the aggregation $geoNear stage.
+//
- assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
- result = coll.aggregate({$geoNear: {near: jsonZeroPt, distanceField: "dis"}}).toArray();
- assert.close(result[0].dis, (Math.PI / 2) * earthRadiusMeters);
+assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
+result = coll.aggregate({$geoNear: {near: jsonZeroPt, distanceField: "dis"}}).toArray();
+assert.close(result[0].dis, (Math.PI / 2) * earthRadiusMeters);
}());
diff --git a/jstests/core/geo_or.js b/jstests/core/geo_or.js
index 1324d581be8..4da82d49ca2 100644
--- a/jstests/core/geo_or.js
+++ b/jstests/core/geo_or.js
@@ -42,10 +42,10 @@ assert.eq(
$or: [
{loc: {$geoIntersects: {$geometry: {type: 'LineString', coordinates: [p, q]}}}},
{
- loc: {
- $geoIntersects:
- {$geometry: {type: 'LineString', coordinates: [[0, 0], [1, 1]]}}
- }
+ loc: {
+ $geoIntersects:
+ {$geometry: {type: 'LineString', coordinates: [[0, 0], [1, 1]]}}
+ }
}
]
}).itcount(),
@@ -63,18 +63,18 @@ assert.eq(
t.find({
$or: [
{
- loc: {
- $geoIntersects:
- {$geometry: {type: 'Polygon', coordinates: [[[0, 0], p, q, [0, 0]]]}}
- }
+ loc: {
+ $geoIntersects:
+ {$geometry: {type: 'Polygon', coordinates: [[[0, 0], p, q, [0, 0]]]}}
+ }
},
{
- loc: {
- $geoIntersects: {
- $geometry:
- {type: 'Polygon', coordinates: [[[0, 0], [1, 1], [0, 1], [0, 0]]]}
- }
- }
+ loc: {
+ $geoIntersects: {
+ $geometry:
+ {type: 'Polygon', coordinates: [[[0, 0], [1, 1], [0, 1], [0, 0]]]}
+ }
+ }
}
]
}).itcount(),
diff --git a/jstests/core/geo_polygon1.js b/jstests/core/geo_polygon1.js
index 45f0eb71d64..34b0cafa1d4 100644
--- a/jstests/core/geo_polygon1.js
+++ b/jstests/core/geo_polygon1.js
@@ -25,10 +25,10 @@ boxBounds = [[0, 0], [0, 10], [10, 10], [10, 0]];
assert.eq(num, t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(), "Bounding Box Test");
// Make sure we can add object-based polygons
-assert.eq(
- num, t.find({
- loc: {$within: {$polygon: {a: [-10, -10], b: [-10, 10], c: [10, 10], d: [10, -10]}}}
- }).count());
+assert.eq(num,
+ t.find({
+ loc: {$within: {$polygon: {a: [-10, -10], b: [-10, 10], c: [10, 10], d: [10, -10]}}}
+ }).count());
// Look in a box much bigger than the one we have data in
boxBounds = [[-100, -100], [-100, 100], [100, 100], [100, -100]];
diff --git a/jstests/core/geo_polygon1_noindex.js b/jstests/core/geo_polygon1_noindex.js
index e5aabb5043d..d9831a6990c 100644
--- a/jstests/core/geo_polygon1_noindex.js
+++ b/jstests/core/geo_polygon1_noindex.js
@@ -21,10 +21,10 @@ boxBounds = [[0, 0], [0, 10], [10, 10], [10, 0]];
assert.eq(num, t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(), "Bounding Box Test");
// Make sure we can add object-based polygons
-assert.eq(
- num, t.find({
- loc: {$within: {$polygon: {a: [-10, -10], b: [-10, 10], c: [10, 10], d: [10, -10]}}}
- }).count());
+assert.eq(num,
+ t.find({
+ loc: {$within: {$polygon: {a: [-10, -10], b: [-10, 10], c: [10, 10], d: [10, -10]}}}
+ }).count());
// Look in a box much bigger than the one we have data in
boxBounds = [[-100, -100], [-100, 100], [100, 100], [100, -100]];
diff --git a/jstests/core/geo_polygon2.js b/jstests/core/geo_polygon2.js
index 6891cd7c8a8..2bfaf0b1087 100644
--- a/jstests/core/geo_polygon2.js
+++ b/jstests/core/geo_polygon2.js
@@ -22,7 +22,6 @@ for (var test = 0; test < numTests; test++) {
printjson({test: test, rotation: rotation, bits: bits});
var rotatePoint = function(x, y) {
-
if (y == undefined) {
y = x[1];
x = x[0];
@@ -46,7 +45,6 @@ for (var test = 0; test < numTests; test++) {
}
grid.toString = function() {
-
var gridStr = "";
for (var j = grid[0].length - 1; j >= -1; j--) {
for (var i = 0; i < grid.length; i++) {
@@ -81,7 +79,6 @@ for (var test = 0; test < numTests; test++) {
// print( grid.toString() )
var pickDirections = function() {
-
var up = Math.floor(Random.rand() * 3);
if (up == 2)
up = -1;
@@ -127,7 +124,6 @@ for (var test = 0; test < numTests; test++) {
turtlePath = [];
var nextSeg = function(currTurtle, prevTurtle) {
-
var pathX = currTurtle[0];
if (currTurtle[1] < prevTurtle[1]) {
diff --git a/jstests/core/geo_polygon3.js b/jstests/core/geo_polygon3.js
index ac668f37c04..f1e819e1920 100644
--- a/jstests/core/geo_polygon3.js
+++ b/jstests/core/geo_polygon3.js
@@ -3,65 +3,63 @@
//
(function() {
- "use strict";
+"use strict";
- const numTests = 31;
+const numTests = 31;
- for (let n = 0; n < numTests; n++) {
- let t = db.geo_polygon3;
- t.drop();
+for (let n = 0; n < numTests; n++) {
+ let t = db.geo_polygon3;
+ t.drop();
- let num = 0;
- for (let x = 1; x < 9; x++) {
- for (let y = 1; y < 9; y++) {
- let o = {_id: num++, loc: [x, y]};
- assert.writeOK(t.insert(o));
- }
+ let num = 0;
+ for (let x = 1; x < 9; x++) {
+ for (let y = 1; y < 9; y++) {
+ let o = {_id: num++, loc: [x, y]};
+ assert.writeOK(t.insert(o));
}
+ }
- assert.commandWorked(t.createIndex({loc: "2d"}, {bits: 2 + n}));
+ assert.commandWorked(t.createIndex({loc: "2d"}, {bits: 2 + n}));
- const triangle = [[0, 0], [1, 1], [0, 2]];
+ const triangle = [[0, 0], [1, 1], [0, 2]];
- // Look at only a small slice of the data within a triangle
- assert.eq(1, t.find({loc: {"$within": {"$polygon": triangle}}}).itcount(), "Triangle Test");
+ // Look at only a small slice of the data within a triangle
+ assert.eq(1, t.find({loc: {"$within": {"$polygon": triangle}}}).itcount(), "Triangle Test");
- let boxBounds = [[0, 0], [0, 10], [10, 10], [10, 0]];
+ let boxBounds = [[0, 0], [0, 10], [10, 10], [10, 0]];
- assert.eq(num,
- t.find({loc: {"$within": {"$polygon": boxBounds}}}).itcount(),
- "Bounding Box Test");
+ assert.eq(
+ num, t.find({loc: {"$within": {"$polygon": boxBounds}}}).itcount(), "Bounding Box Test");
- // Look in a box much bigger than the one we have data in
- boxBounds = [[-100, -100], [-100, 100], [100, 100], [100, -100]];
- assert.eq(num,
- t.find({loc: {"$within": {"$polygon": boxBounds}}}).itcount(),
- "Big Bounding Box Test");
+ // Look in a box much bigger than the one we have data in
+ boxBounds = [[-100, -100], [-100, 100], [100, 100], [100, -100]];
+ assert.eq(num,
+ t.find({loc: {"$within": {"$polygon": boxBounds}}}).itcount(),
+ "Big Bounding Box Test");
- assert(t.drop());
+ assert(t.drop());
- const pacman = [
- [0, 2],
- [0, 4],
- [2, 6],
- [4, 6], // Head
- [6, 4],
- [4, 3],
- [6, 2], // Mouth
- [4, 0],
- [2, 0] // Bottom
- ];
+ const pacman = [
+ [0, 2],
+ [0, 4],
+ [2, 6],
+ [4, 6], // Head
+ [6, 4],
+ [4, 3],
+ [6, 2], // Mouth
+ [4, 0],
+ [2, 0] // Bottom
+ ];
- assert.writeOK(t.insert({loc: [1, 3]})); // Add a point that's in
- assert.commandWorked(t.createIndex({loc: "2d"}, {bits: 2 + n}));
+ assert.writeOK(t.insert({loc: [1, 3]})); // Add a point that's in
+ assert.commandWorked(t.createIndex({loc: "2d"}, {bits: 2 + n}));
- assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).itcount(), "Pacman single point");
+ assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).itcount(), "Pacman single point");
- assert.writeOK(
- t.insert({loc: [5, 3]})); // Add a point that's out right in the mouth opening
- assert.writeOK(t.insert({loc: [3, 7]})); // Add a point above the center of the head
- assert.writeOK(t.insert({loc: [3, -1]})); // Add a point below the center of the bottom
+ assert.writeOK(t.insert({loc: [5, 3]})); // Add a point that's out right in the mouth opening
+ assert.writeOK(t.insert({loc: [3, 7]})); // Add a point above the center of the head
+ assert.writeOK(t.insert({loc: [3, -1]})); // Add a point below the center of the bottom
- assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).itcount(), "Pacman double point");
- }
+ assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).itcount(), "Pacman double point");
+}
})();
diff --git a/jstests/core/geo_s2disjoint_holes.js b/jstests/core/geo_s2disjoint_holes.js
index 0b088434b36..dd17dd29b1d 100644
--- a/jstests/core/geo_s2disjoint_holes.js
+++ b/jstests/core/geo_s2disjoint_holes.js
@@ -12,13 +12,14 @@
// http://geojson.org/geojson-spec.html#polygon
//
-var t = db.geo_s2disjoint_holes, coordinates =
- [
- // One square.
- [[9, 9], [9, 11], [11, 11], [11, 9], [9, 9]],
- // Another disjoint square.
- [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]
- ],
+var t = db.geo_s2disjoint_holes,
+ coordinates =
+ [
+ // One square.
+ [[9, 9], [9, 11], [11, 11], [11, 9], [9, 9]],
+ // Another disjoint square.
+ [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]
+ ],
poly = {type: 'Polygon', coordinates: coordinates}, multiPoly = {
type: 'MultiPolygon',
// Multi-polygon's coordinates are wrapped in one more array.
@@ -33,12 +34,12 @@ jsTest.log("We're going to print some error messages, don't be alarmed.");
// Can't query with a polygon or multi-polygon that has a non-contained hole.
//
print(assert.throws(function() {
- t.findOne({geo: {$geoWithin: {$geometry: poly}}});
-}, [], "parsing a polygon with non-overlapping holes."));
+ t.findOne({geo: {$geoWithin: {$geometry: poly}}});
+ }, [], "parsing a polygon with non-overlapping holes."));
print(assert.throws(function() {
- t.findOne({geo: {$geoWithin: {$geometry: multiPoly}}});
-}, [], "parsing a multi-polygon with non-overlapping holes."));
+ t.findOne({geo: {$geoWithin: {$geometry: multiPoly}}});
+ }, [], "parsing a multi-polygon with non-overlapping holes."));
//
// Can't insert a bad polygon or a bad multi-polygon with a 2dsphere index.
diff --git a/jstests/core/geo_s2dupe_points.js b/jstests/core/geo_s2dupe_points.js
index 406a7b1ff4c..faa06cabb9b 100644
--- a/jstests/core/geo_s2dupe_points.js
+++ b/jstests/core/geo_s2dupe_points.js
@@ -57,16 +57,23 @@ var multiPolygonWithDupes = {
coordinates: [
[[[102.0, 2.0], [103.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],
[
- [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],
- [
- [100.2, 0.2],
- [100.8, 0.2],
- [100.8, 0.8],
- [100.8, 0.8],
- [100.8, 0.8],
- [100.2, 0.8],
- [100.2, 0.2]
- ]
+ [
+ [100.0, 0.0],
+ [101.0, 0.0],
+ [101.0, 1.0],
+ [101.0, 1.0],
+ [100.0, 1.0],
+ [100.0, 0.0]
+ ],
+ [
+ [100.2, 0.2],
+ [100.8, 0.2],
+ [100.8, 0.8],
+ [100.8, 0.8],
+ [100.8, 0.8],
+ [100.2, 0.8],
+ [100.2, 0.2]
+ ]
]
]
}
@@ -76,8 +83,8 @@ var multiPolygonWithoutDupes = {
coordinates: [
[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],
[
- [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],
- [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]
+ [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],
+ [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]
]
]
};
diff --git a/jstests/core/geo_s2index.js b/jstests/core/geo_s2index.js
index 99c3852aae9..0b1644e41da 100644
--- a/jstests/core/geo_s2index.js
+++ b/jstests/core/geo_s2index.js
@@ -101,22 +101,14 @@ assert.throws(function() {
t.drop();
t.ensureIndex({loc: "2dsphere"});
res = t.insert({
- loc: {
- type: 'Point',
- coordinates: [40, 5],
- crs: {type: 'name', properties: {name: 'EPSG:2000'}}
- }
+ loc: {type: 'Point', coordinates: [40, 5], crs: {type: 'name', properties: {name: 'EPSG:2000'}}}
});
assert.writeError(res);
assert.eq(0, t.find().itcount());
res = t.insert({loc: {type: 'Point', coordinates: [40, 5]}});
assert.writeOK(res);
res = t.insert({
- loc: {
- type: 'Point',
- coordinates: [40, 5],
- crs: {type: 'name', properties: {name: 'EPSG:4326'}}
- }
+ loc: {type: 'Point', coordinates: [40, 5], crs: {type: 'name', properties: {name: 'EPSG:4326'}}}
});
assert.writeOK(res);
res = t.insert({
diff --git a/jstests/core/geo_s2indexversion1.js b/jstests/core/geo_s2indexversion1.js
index d9797a67505..7b17796f29f 100644
--- a/jstests/core/geo_s2indexversion1.js
+++ b/jstests/core/geo_s2indexversion1.js
@@ -136,11 +136,11 @@ var multiPolygonDoc = {
type: "MultiPolygon",
coordinates: [
[[
- [-73.958, 40.8003],
- [-73.9498, 40.7968],
- [-73.9737, 40.7648],
- [-73.9814, 40.7681],
- [-73.958, 40.8003]
+ [-73.958, 40.8003],
+ [-73.9498, 40.7968],
+ [-73.9737, 40.7648],
+ [-73.9814, 40.7681],
+ [-73.958, 40.8003]
]],
[[[-73.958, 40.8003], [-73.9498, 40.7968], [-73.9737, 40.7648], [-73.958, 40.8003]]]
]
@@ -151,22 +151,22 @@ var geometryCollectionDoc = {
type: "GeometryCollection",
geometries: [
{
- type: "MultiPoint",
- coordinates: [
- [-73.9580, 40.8003],
- [-73.9498, 40.7968],
- [-73.9737, 40.7648],
- [-73.9814, 40.7681]
- ]
+ type: "MultiPoint",
+ coordinates: [
+ [-73.9580, 40.8003],
+ [-73.9498, 40.7968],
+ [-73.9737, 40.7648],
+ [-73.9814, 40.7681]
+ ]
},
{
- type: "MultiLineString",
- coordinates: [
- [[-73.96943, 40.78519], [-73.96082, 40.78095]],
- [[-73.96415, 40.79229], [-73.95544, 40.78854]],
- [[-73.97162, 40.78205], [-73.96374, 40.77715]],
- [[-73.97880, 40.77247], [-73.97036, 40.76811]]
- ]
+ type: "MultiLineString",
+ coordinates: [
+ [[-73.96943, 40.78519], [-73.96082, 40.78095]],
+ [[-73.96415, 40.79229], [-73.95544, 40.78854]],
+ [[-73.97162, 40.78205], [-73.96374, 40.77715]],
+ [[-73.97880, 40.77247], [-73.97036, 40.76811]]
+ ]
}
]
}
diff --git a/jstests/core/geo_s2meridian.js b/jstests/core/geo_s2meridian.js
index 583b426845c..763067e8a34 100644
--- a/jstests/core/geo_s2meridian.js
+++ b/jstests/core/geo_s2meridian.js
@@ -45,8 +45,7 @@ t.insert(pointOnPositiveSideOfMeridian);
meridianCrossingPoly = {
type: "Polygon",
- coordinates:
- [[[-178.0, 10.0], [178.0, 10.0], [178.0, -10.0], [-178.0, -10.0], [-178.0, 10.0]]]
+ coordinates: [[[-178.0, 10.0], [178.0, 10.0], [178.0, -10.0], [-178.0, -10.0], [-178.0, 10.0]]]
};
result = t.find({geo: {$geoWithin: {$geometry: meridianCrossingPoly}}});
diff --git a/jstests/core/geo_s2multi.js b/jstests/core/geo_s2multi.js
index 8899c9d5561..d9a4032d070 100644
--- a/jstests/core/geo_s2multi.js
+++ b/jstests/core/geo_s2multi.js
@@ -21,8 +21,8 @@ multiPolygonA = {
"coordinates": [
[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],
[
- [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],
- [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]
+ [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],
+ [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]
]
]
};
@@ -31,36 +31,33 @@ assert.writeOK(t.insert({geo: multiPolygonA}));
assert.eq(3, t.find({
geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [100, 0]}}}
}).itcount());
-assert.eq(3,
- t.find({
- geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [101.0, 1.0]}}}
- }).itcount());
+assert.eq(3, t.find({
+ geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [101.0, 1.0]}}}
+ }).itcount());
// Inside the hole in multiPolygonA
-assert.eq(
- 0, t.find({
- geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [100.21, 0.21]}}}
- }).itcount());
+assert.eq(0,
+ t.find({
+ geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [100.21, 0.21]}}}
+ }).itcount());
// One point inside the hole, one out.
assert.eq(
- 3,
- t.find({
- geo: {
- $geoIntersects:
- {$geometry: {"type": "MultiPoint", "coordinates": [[100, 0], [100.21, 0.21]]}}
- }
- }).itcount());
+ 3, t.find({
+ geo: {
+ $geoIntersects:
+ {$geometry: {"type": "MultiPoint", "coordinates": [[100, 0], [100.21, 0.21]]}}
+ }
+ }).itcount());
assert.eq(
- 3,
- t.find({
- geo: {
- $geoIntersects: {
- $geometry:
- {"type": "MultiPoint", "coordinates": [[100, 0], [100.21, 0.21], [101, 1]]}
- }
- }
- }).itcount());
+ 3, t.find({
+ geo: {
+ $geoIntersects: {
+ $geometry:
+ {"type": "MultiPoint", "coordinates": [[100, 0], [100.21, 0.21], [101, 1]]}
+ }
+ }
+ }).itcount());
// Polygon contains itself and the multipoint.
assert.eq(2, t.find({geo: {$geoWithin: {$geometry: multiPolygonA}}}).itcount());
diff --git a/jstests/core/geo_s2near.js b/jstests/core/geo_s2near.js
index d0a591d45e6..86373c4aa11 100644
--- a/jstests/core/geo_s2near.js
+++ b/jstests/core/geo_s2near.js
@@ -2,142 +2,153 @@
// Test 2dsphere near search, called via find and $geoNear.
(function() {
- t = db.geo_s2near;
- t.drop();
+t = db.geo_s2near;
+t.drop();
- // Make sure that geoNear gives us back loc
- goldenPoint = {type: "Point", coordinates: [31.0, 41.0]};
- t.insert({geo: goldenPoint});
- t.ensureIndex({geo: "2dsphere"});
- resNear =
- t.aggregate([
- {$geoNear: {near: [30, 40], distanceField: "d", spherical: true, includeLocs: "loc"}},
- {$limit: 1}
- ]).toArray();
- assert.eq(resNear.length, 1, tojson(resNear));
- assert.eq(resNear[0].loc, goldenPoint);
+// Make sure that geoNear gives us back loc
+goldenPoint = {
+ type: "Point",
+ coordinates: [31.0, 41.0]
+};
+t.insert({geo: goldenPoint});
+t.ensureIndex({geo: "2dsphere"});
+resNear =
+ t.aggregate([
+ {$geoNear: {near: [30, 40], distanceField: "d", spherical: true, includeLocs: "loc"}},
+ {$limit: 1}
+ ]).toArray();
+assert.eq(resNear.length, 1, tojson(resNear));
+assert.eq(resNear[0].loc, goldenPoint);
- // FYI:
- // One degree of long @ 0 is 111km or so.
- // One degree of lat @ 0 is 110km or so.
- lat = 0;
- lng = 0;
- points = 10;
- for (var x = -points; x < points; x += 1) {
- for (var y = -points; y < points; y += 1) {
- t.insert({geo: {"type": "Point", "coordinates": [lng + x / 1000.0, lat + y / 1000.0]}});
- }
+// FYI:
+// One degree of long @ 0 is 111km or so.
+// One degree of lat @ 0 is 110km or so.
+lat = 0;
+lng = 0;
+points = 10;
+for (var x = -points; x < points; x += 1) {
+ for (var y = -points; y < points; y += 1) {
+ t.insert({geo: {"type": "Point", "coordinates": [lng + x / 1000.0, lat + y / 1000.0]}});
}
+}
- origin = {"type": "Point", "coordinates": [lng, lat]};
+origin = {
+ "type": "Point",
+ "coordinates": [lng, lat]
+};
- t.ensureIndex({geo: "2dsphere"});
+t.ensureIndex({geo: "2dsphere"});
- // Near only works when the query is a point.
- someline = {"type": "LineString", "coordinates": [[40, 5], [41, 6]]};
- somepoly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]};
- assert.throws(function() {
- return t.find({"geo": {"$near": {"$geometry": someline}}}).count();
- });
- assert.throws(function() {
- return t.find({"geo": {"$near": {"$geometry": somepoly}}}).count();
- });
- assert.throws(function() {
- return t.aggregate({$geoNear: {near: someline, distanceField: "dis", spherical: true}});
- });
- assert.throws(function() {
- return t.aggregate({$geoNear: {near: somepoly, distanceField: "dis", spherical: true}});
- });
+// Near only works when the query is a point.
+someline = {
+ "type": "LineString",
+ "coordinates": [[40, 5], [41, 6]]
+};
+somepoly = {
+ "type": "Polygon",
+ "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]
+};
+assert.throws(function() {
+ return t.find({"geo": {"$near": {"$geometry": someline}}}).count();
+});
+assert.throws(function() {
+ return t.find({"geo": {"$near": {"$geometry": somepoly}}}).count();
+});
+assert.throws(function() {
+ return t.aggregate({$geoNear: {near: someline, distanceField: "dis", spherical: true}});
+});
+assert.throws(function() {
+ return t.aggregate({$geoNear: {near: somepoly, distanceField: "dis", spherical: true}});
+});
- // Do some basic near searches.
- res = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: 2000}}}).limit(10);
- resNear = t.aggregate([
- {$geoNear: {near: [0, 0], distanceField: "dis", maxDistance: Math.PI, spherical: true}},
- {$limit: 10},
- ]);
- assert.eq(res.itcount(), resNear.itcount(), "10");
+// Do some basic near searches.
+res = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: 2000}}}).limit(10);
+resNear = t.aggregate([
+ {$geoNear: {near: [0, 0], distanceField: "dis", maxDistance: Math.PI, spherical: true}},
+ {$limit: 10},
+]);
+assert.eq(res.itcount(), resNear.itcount(), "10");
- res = t.find({"geo": {"$near": {"$geometry": origin}}}).limit(10);
- resNear = t.aggregate([
- {$geoNear: {near: [0, 0], distanceField: "dis", spherical: true}},
- {$limit: 10},
- ]);
- assert.eq(res.itcount(), resNear.itcount(), "10");
+res = t.find({"geo": {"$near": {"$geometry": origin}}}).limit(10);
+resNear = t.aggregate([
+ {$geoNear: {near: [0, 0], distanceField: "dis", spherical: true}},
+ {$limit: 10},
+]);
+assert.eq(res.itcount(), resNear.itcount(), "10");
- // Find all the points!
- res = t.find({"geo": {"$near": {"$geometry": origin}}}).limit(10000);
- resNear = t.aggregate([
- {$geoNear: {near: [0, 0], distanceField: "dis", spherical: true}},
- {$limit: 10000},
- ]);
- assert.eq(res.itcount(), resNear.itcount(), ((2 * points) * (2 * points)).toString());
+// Find all the points!
+res = t.find({"geo": {"$near": {"$geometry": origin}}}).limit(10000);
+resNear = t.aggregate([
+ {$geoNear: {near: [0, 0], distanceField: "dis", spherical: true}},
+ {$limit: 10000},
+]);
+assert.eq(res.itcount(), resNear.itcount(), ((2 * points) * (2 * points)).toString());
- // longitude goes -180 to 180
- // latitude goes -90 to 90
- // Let's put in some perverse (polar) data and make sure we get it back.
- // Points go long, lat.
- t.insert({geo: {"type": "Point", "coordinates": [-180, -90]}});
- t.insert({geo: {"type": "Point", "coordinates": [180, -90]}});
- t.insert({geo: {"type": "Point", "coordinates": [180, 90]}});
- t.insert({geo: {"type": "Point", "coordinates": [-180, 90]}});
- res = t.find({"geo": {"$near": {"$geometry": origin}}}).limit(10000);
- resNear = t.aggregate([
- {$geoNear: {near: [0, 0], distanceField: "dis", spherical: true}},
- {$limit: 10000},
- ]);
- assert.eq(res.itcount(), resNear.itcount(), ((2 * points) * (2 * points) + 4).toString());
+// longitude goes -180 to 180
+// latitude goes -90 to 90
+// Let's put in some perverse (polar) data and make sure we get it back.
+// Points go long, lat.
+t.insert({geo: {"type": "Point", "coordinates": [-180, -90]}});
+t.insert({geo: {"type": "Point", "coordinates": [180, -90]}});
+t.insert({geo: {"type": "Point", "coordinates": [180, 90]}});
+t.insert({geo: {"type": "Point", "coordinates": [-180, 90]}});
+res = t.find({"geo": {"$near": {"$geometry": origin}}}).limit(10000);
+resNear = t.aggregate([
+ {$geoNear: {near: [0, 0], distanceField: "dis", spherical: true}},
+ {$limit: 10000},
+]);
+assert.eq(res.itcount(), resNear.itcount(), ((2 * points) * (2 * points) + 4).toString());
- function testRadAndDegreesOK(distance) {
- // Distance for old style points is radians.
- resRadians =
- t.find({geo: {$nearSphere: [0, 0], $maxDistance: (distance / (6378.1 * 1000))}});
- // Distance for new style points is meters.
- resMeters = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: distance}}});
- // And we should get the same # of results no matter what.
- assert.eq(resRadians.itcount(), resMeters.itcount());
+function testRadAndDegreesOK(distance) {
+ // Distance for old style points is radians.
+ resRadians = t.find({geo: {$nearSphere: [0, 0], $maxDistance: (distance / (6378.1 * 1000))}});
+ // Distance for new style points is meters.
+ resMeters = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: distance}}});
+ // And we should get the same # of results no matter what.
+ assert.eq(resRadians.itcount(), resMeters.itcount());
- // Also, $geoNear should behave the same way.
- resGNMeters = t.aggregate({
- $geoNear: {
- near: origin,
- distanceField: "dis",
- maxDistance: distance,
- spherical: true,
- }
- }).toArray();
- resGNRadians = t.aggregate({
- $geoNear: {
- near: [0, 0],
- distanceField: "dis",
- maxDistance: (distance / (6378.1 * 1000)),
- spherical: true,
- }
- }).toArray();
- const errmsg = `$geoNear using meter distances returned ${tojson(resGNMeters)}, but ` +
- `$geoNear using radian distances returned ${tojson(resGNRadians)}`;
- assert.eq(resGNRadians.length, resGNMeters.length, errmsg);
- for (var i = 0; i < resGNRadians.length; ++i) {
- // Radius of earth * radians = distance in meters.
- assert.close(resGNRadians[i].dis * 6378.1 * 1000, resGNMeters[i].dis);
- }
+ // Also, $geoNear should behave the same way.
+ resGNMeters = t.aggregate({
+ $geoNear: {
+ near: origin,
+ distanceField: "dis",
+ maxDistance: distance,
+ spherical: true,
+ }
+ }).toArray();
+ resGNRadians = t.aggregate({
+ $geoNear: {
+ near: [0, 0],
+ distanceField: "dis",
+ maxDistance: (distance / (6378.1 * 1000)),
+ spherical: true,
+ }
+ }).toArray();
+ const errmsg = `$geoNear using meter distances returned ${tojson(resGNMeters)}, but ` +
+ `$geoNear using radian distances returned ${tojson(resGNRadians)}`;
+ assert.eq(resGNRadians.length, resGNMeters.length, errmsg);
+ for (var i = 0; i < resGNRadians.length; ++i) {
+ // Radius of earth * radians = distance in meters.
+ assert.close(resGNRadians[i].dis * 6378.1 * 1000, resGNMeters[i].dis);
}
+}
- testRadAndDegreesOK(1);
- testRadAndDegreesOK(10);
- testRadAndDegreesOK(50);
- testRadAndDegreesOK(10000);
+testRadAndDegreesOK(1);
+testRadAndDegreesOK(10);
+testRadAndDegreesOK(50);
+testRadAndDegreesOK(10000);
- // SERVER-13666 legacy coordinates must be in bounds for spherical near queries.
- assert.commandFailedWithCode(db.runCommand({
- aggregate: t.getName(),
- cursor: {},
- pipeline: [{
- $geoNear: {
- near: [1210.466, 31.2051],
- distanceField: "dis",
- spherical: true,
- }
- }]
- }),
- 17444);
+// SERVER-13666 legacy coordinates must be in bounds for spherical near queries.
+assert.commandFailedWithCode(db.runCommand({
+ aggregate: t.getName(),
+ cursor: {},
+ pipeline: [{
+ $geoNear: {
+ near: [1210.466, 31.2051],
+ distanceField: "dis",
+ spherical: true,
+ }
+ }]
+}),
+ 17444);
}());
diff --git a/jstests/core/geo_s2near_equator_opposite.js b/jstests/core/geo_s2near_equator_opposite.js
index fb17310030b..485afc52fd4 100644
--- a/jstests/core/geo_s2near_equator_opposite.js
+++ b/jstests/core/geo_s2near_equator_opposite.js
@@ -2,55 +2,51 @@
// on the equator
// First reported in SERVER-11830 as a regression in 2.5
(function() {
- var t = db.geos2nearequatoropposite;
+var t = db.geos2nearequatoropposite;
- t.drop();
+t.drop();
- t.insert({loc: {type: 'Point', coordinates: [0, 0]}});
- t.insert({loc: {type: 'Point', coordinates: [-1, 0]}});
+t.insert({loc: {type: 'Point', coordinates: [0, 0]}});
+t.insert({loc: {type: 'Point', coordinates: [-1, 0]}});
- t.ensureIndex({loc: '2dsphere'});
+t.ensureIndex({loc: '2dsphere'});
- // upper bound for half of earth's circumference in meters
- var dist = 40075000 / 2 + 1;
+// upper bound for half of earth's circumference in meters
+var dist = 40075000 / 2 + 1;
- var nearSphereCount =
- t.find({
- loc: {
- $nearSphere:
- {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist}
- }
- }).itcount();
- var nearCount =
- t.find({
- loc: {$near: {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist}}
- }).itcount();
- var geoNearResult = t.aggregate([
- {
- $geoNear: {
- near: {type: 'Point', coordinates: [180, 0]},
- spherical: true,
- distanceField: "dist",
- }
- },
- {
- $group: {
- _id: null,
- nResults: {$sum: 1},
- maxDistance: {$max: "$distanceField"},
- }
+var nearSphereCount =
+ t.find({
+ loc: {$nearSphere: {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist}}
+ }).itcount();
+var nearCount =
+ t.find({
+ loc: {$near: {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist}}
+ }).itcount();
+var geoNearResult = t.aggregate([
+ {
+ $geoNear: {
+ near: {type: 'Point', coordinates: [180, 0]},
+ spherical: true,
+ distanceField: "dist",
}
- ]).toArray();
+ },
+ {
+ $group: {
+ _id: null,
+ nResults: {$sum: 1},
+ maxDistance: {$max: "$distanceField"},
+ }
+ }
+ ]).toArray();
- assert.eq(2, nearSphereCount, 'unexpected document count for nearSphere');
- assert.eq(2, nearCount, 'unexpected document count for near');
- assert.eq(1, geoNearResult.length, `unexpected $geoNear result: ${tojson(geoNearResult)}`);
+assert.eq(2, nearSphereCount, 'unexpected document count for nearSphere');
+assert.eq(2, nearCount, 'unexpected document count for near');
+assert.eq(1, geoNearResult.length, `unexpected $geoNear result: ${tojson(geoNearResult)}`);
- const geoNearStats = geoNearResult[0];
- assert.eq(2,
- geoNearStats.nResults,
- `unexpected document count for $geoNear: ${tojson(geoNearStats)}`);
- assert.gt(dist,
- geoNearStats.maxDistance,
- `unexpected maximum distance in $geoNear results: ${tojson(geoNearStats)}`);
+const geoNearStats = geoNearResult[0];
+assert.eq(
+ 2, geoNearStats.nResults, `unexpected document count for $geoNear: ${tojson(geoNearStats)}`);
+assert.gt(dist,
+ geoNearStats.maxDistance,
+ `unexpected maximum distance in $geoNear results: ${tojson(geoNearStats)}`);
}());
diff --git a/jstests/core/geo_s2nearwithin.js b/jstests/core/geo_s2nearwithin.js
index 6df9a1940df..d8f15dcdb54 100644
--- a/jstests/core/geo_s2nearwithin.js
+++ b/jstests/core/geo_s2nearwithin.js
@@ -1,57 +1,57 @@
// Test $geoNear + $within.
(function() {
- t = db.geo_s2nearwithin;
- t.drop();
-
- points = 10;
- for (var x = -points; x < points; x += 1) {
- for (var y = -points; y < points; y += 1) {
- assert.commandWorked(t.insert({geo: [x, y]}));
- }
- }
+t = db.geo_s2nearwithin;
+t.drop();
- origin = {"type": "Point", "coordinates": [0, 0]};
-
- assert.commandWorked(t.ensureIndex({geo: "2dsphere"}));
- // Near requires an index, and 2dsphere is an index. Spherical isn't
- // specified so this doesn't work.
- let res = assert.commandFailedWithCode(t.runCommand("aggregate", {
- cursor: {},
- pipeline: [{
- $geoNear: {
- near: [0, 0],
- distanceField: "d",
- query: {geo: {$within: {$center: [[0, 0], 1]}}}
- }
- }],
- }),
- ErrorCodes.BadValue);
- assert(res.errmsg.includes("unable to find index for $geoNear query"), tojson(res));
-
- // Spherical is specified so this does work. Old style points are weird
- // because you can use them with both $center and $centerSphere. Points are
- // the only things we will do this conversion for.
- const runGeoNear = (within) => t.aggregate({
- $geoNear: {
- near: [0, 0],
- distanceField: "d",
- spherical: true,
- query: {geo: {$within: within}},
- }
- }).toArray();
-
- resNear = runGeoNear({$center: [[0, 0], 1]});
- assert.eq(resNear.length, 5);
-
- resNear = runGeoNear({$centerSphere: [[0, 0], Math.PI / 180.0]});
- assert.eq(resNear.length, 5);
-
- resNear = runGeoNear({$centerSphere: [[0, 0], 0]});
- assert.eq(resNear.length, 1);
-
- resNear = runGeoNear({$centerSphere: [[1, 0], 0.5 * Math.PI / 180.0]});
- assert.eq(resNear.length, 1);
-
- resNear = runGeoNear({$center: [[1, 0], 1.5]});
- assert.eq(resNear.length, 9);
+points = 10;
+for (var x = -points; x < points; x += 1) {
+ for (var y = -points; y < points; y += 1) {
+ assert.commandWorked(t.insert({geo: [x, y]}));
+ }
+}
+
+origin = {
+ "type": "Point",
+ "coordinates": [0, 0]
+};
+
+assert.commandWorked(t.ensureIndex({geo: "2dsphere"}));
+// Near requires an index, and 2dsphere is an index. Spherical isn't
+// specified so this doesn't work.
+let res = assert.commandFailedWithCode(t.runCommand("aggregate", {
+ cursor: {},
+ pipeline: [{
+ $geoNear:
+ {near: [0, 0], distanceField: "d", query: {geo: {$within: {$center: [[0, 0], 1]}}}}
+ }],
+}),
+ ErrorCodes.BadValue);
+assert(res.errmsg.includes("unable to find index for $geoNear query"), tojson(res));
+
+// Spherical is specified so this does work. Old style points are weird
+// because you can use them with both $center and $centerSphere. Points are
+// the only things we will do this conversion for.
+const runGeoNear = (within) => t.aggregate({
+ $geoNear: {
+ near: [0, 0],
+ distanceField: "d",
+ spherical: true,
+ query: {geo: {$within: within}},
+ }
+ }).toArray();
+
+resNear = runGeoNear({$center: [[0, 0], 1]});
+assert.eq(resNear.length, 5);
+
+resNear = runGeoNear({$centerSphere: [[0, 0], Math.PI / 180.0]});
+assert.eq(resNear.length, 5);
+
+resNear = runGeoNear({$centerSphere: [[0, 0], 0]});
+assert.eq(resNear.length, 1);
+
+resNear = runGeoNear({$centerSphere: [[1, 0], 0.5 * Math.PI / 180.0]});
+assert.eq(resNear.length, 1);
+
+resNear = runGeoNear({$center: [[1, 0], 1.5]});
+assert.eq(resNear.length, 9);
}());
diff --git a/jstests/core/geo_s2ordering.js b/jstests/core/geo_s2ordering.js
index b20189e49b8..dc9f660ae6c 100644
--- a/jstests/core/geo_s2ordering.js
+++ b/jstests/core/geo_s2ordering.js
@@ -4,56 +4,56 @@
// for 2dsphere.
// @tags: [assumes_balancer_off, operations_longer_than_stepdown_interval_in_txns]
(function() {
- "use strict";
-
- const coll = db.geo_s2ordering;
- coll.drop();
-
- const needle = "hari";
-
- // We insert lots of points in a region and look for a non-geo key which is rare.
- function makepoints(needle) {
- const lat = 0;
- const lng = 0;
- const points = 50.0;
-
- const bulk = coll.initializeUnorderedBulkOp();
- for (let x = -points; x < points; x += 1) {
- for (let y = -points; y < points; y += 1) {
- bulk.insert({
- nongeo: x.toString() + "," + y.toString(),
- geo: {type: "Point", coordinates: [lng + x / points, lat + y / points]}
- });
- }
+"use strict";
+
+const coll = db.geo_s2ordering;
+coll.drop();
+
+const needle = "hari";
+
+// We insert lots of points in a region and look for a non-geo key which is rare.
+function makepoints(needle) {
+ const lat = 0;
+ const lng = 0;
+ const points = 50.0;
+
+ const bulk = coll.initializeUnorderedBulkOp();
+ for (let x = -points; x < points; x += 1) {
+ for (let y = -points; y < points; y += 1) {
+ bulk.insert({
+ nongeo: x.toString() + "," + y.toString(),
+ geo: {type: "Point", coordinates: [lng + x / points, lat + y / points]}
+ });
}
- bulk.insert({nongeo: needle, geo: {type: "Point", coordinates: [0, 0]}});
- assert.writeOK(bulk.execute());
}
-
- function runTest(index) {
- assert.commandWorked(coll.ensureIndex(index));
- const cursor =
- coll.find({nongeo: needle, geo: {$within: {$centerSphere: [[0, 0], Math.PI / 180.0]}}});
- const stats = cursor.explain("executionStats").executionStats;
- assert.commandWorked(coll.dropIndex(index));
- return stats;
- }
-
- makepoints(needle);
-
- // Indexing non-geo first should be quicker.
- const fast = runTest({nongeo: 1, geo: "2dsphere"});
- const slow = runTest({geo: "2dsphere", nongeo: 1});
-
- // The nReturned should be the same.
- assert.eq(fast.nReturned, 1);
- assert.eq(slow.nReturned, 1);
-
- // Only one document is examined, since we use the index.
- assert.eq(fast.totalDocsExamined, 1);
- assert.eq(slow.totalDocsExamined, 1);
-
- // The ordering actually matters for lookup speed.
- // totalKeysExamined is a direct measure of its speed.
- assert.lt(fast.totalKeysExamined, slow.totalKeysExamined);
+ bulk.insert({nongeo: needle, geo: {type: "Point", coordinates: [0, 0]}});
+ assert.writeOK(bulk.execute());
+}
+
+function runTest(index) {
+ assert.commandWorked(coll.ensureIndex(index));
+ const cursor =
+ coll.find({nongeo: needle, geo: {$within: {$centerSphere: [[0, 0], Math.PI / 180.0]}}});
+ const stats = cursor.explain("executionStats").executionStats;
+ assert.commandWorked(coll.dropIndex(index));
+ return stats;
+}
+
+makepoints(needle);
+
+// Indexing non-geo first should be quicker.
+const fast = runTest({nongeo: 1, geo: "2dsphere"});
+const slow = runTest({geo: "2dsphere", nongeo: 1});
+
+// The nReturned should be the same.
+assert.eq(fast.nReturned, 1);
+assert.eq(slow.nReturned, 1);
+
+// Only one document is examined, since we use the index.
+assert.eq(fast.totalDocsExamined, 1);
+assert.eq(slow.totalDocsExamined, 1);
+
+// The ordering actually matters for lookup speed.
+// totalKeysExamined is a direct measure of its speed.
+assert.lt(fast.totalKeysExamined, slow.totalKeysExamined);
}());
diff --git a/jstests/core/geo_s2polywithholes.js b/jstests/core/geo_s2polywithholes.js
index 80f7b0556c4..020ba350e85 100644
--- a/jstests/core/geo_s2polywithholes.js
+++ b/jstests/core/geo_s2polywithholes.js
@@ -73,9 +73,7 @@ assert.writeError(t.insert({geo: polyWithBiggerHole}));
// Test 6: Holes cannot share more than one vertex with exterior loop
var polySharedVertices = {
"type": "Polygon",
- "coordinates": [
- [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]],
- [[0, 0], [0.1, 0.9], [1, 1], [0.9, 0.1], [0, 0]]
- ]
+ "coordinates":
+ [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]], [[0, 0], [0.1, 0.9], [1, 1], [0.9, 0.1], [0, 0]]]
};
assert.writeError(t.insert({geo: polySharedVertices}));
diff --git a/jstests/core/geo_s2sparse.js b/jstests/core/geo_s2sparse.js
index 57f4f73fa3a..2fb93200c44 100644
--- a/jstests/core/geo_s2sparse.js
+++ b/jstests/core/geo_s2sparse.js
@@ -2,131 +2,130 @@
// All V2 2dsphere indices are sparse in the geo fields.
(function() {
- "use strict";
-
- var coll = db.geo_s2sparse;
- var point = {type: "Point", coordinates: [5, 5]};
- var indexSpec = {geo: "2dsphere", nonGeo: 1};
- var indexName = 'geo_2dsphere_nonGeo_1';
-
- //
- // V2 indices are "geo sparse" always.
- //
-
- // Clean up.
- coll.drop();
- coll.ensureIndex(indexSpec);
-
- var bulkInsertDocs = function(coll, numDocs, makeDocFn) {
- print("Bulk inserting " + numDocs + " documents");
-
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; ++i) {
- bulk.insert(makeDocFn(i));
- }
-
- assert.writeOK(bulk.execute());
-
- print("Bulk inserting " + numDocs + " documents completed");
- };
-
- // Insert N documents with the geo field.
- var N = 1000;
- bulkInsertDocs(coll, N, function(i) {
- return {geo: point, nonGeo: "point_" + i};
- });
-
- // Expect N keys.
- assert.eq(N, coll.validate().keysPerIndex[indexName]);
-
- // Insert N documents without the geo field.
- bulkInsertDocs(coll, N, function(i) {
- return {wrongGeo: point, nonGeo: i};
- });
-
- // Still expect N keys as we didn't insert any geo stuff.
- assert.eq(N, coll.validate().keysPerIndex[indexName]);
-
- // Insert N documents with just the geo field.
- bulkInsertDocs(coll, N, function(i) {
- return {geo: point};
- });
-
- // Expect 2N keys.
- assert.eq(N + N, coll.validate().keysPerIndex[indexName]);
-
- // Add some "not geo" stuff.
- bulkInsertDocs(coll, N, function(i) {
- return {geo: null};
- });
- bulkInsertDocs(coll, N, function(i) {
- return {geo: []};
- });
- bulkInsertDocs(coll, N, function(i) {
- return {geo: undefined};
- });
- bulkInsertDocs(coll, N, function(i) {
- return {geo: {}};
- });
-
- // Still expect 2N keys.
- assert.eq(N + N, coll.validate().keysPerIndex[indexName]);
-
- //
- // V1 indices are never sparse
- //
-
- coll.drop();
- coll.ensureIndex(indexSpec, {"2dsphereIndexVersion": 1});
-
- // Insert N documents with the geo field.
- bulkInsertDocs(coll, N, function(i) {
- return {geo: point, nonGeo: "point_" + i};
- });
-
- // Expect N keys.
- assert.eq(N, coll.validate().keysPerIndex[indexName]);
-
- // Insert N documents without the geo field.
- bulkInsertDocs(coll, N, function(i) {
- return {wrongGeo: point, nonGeo: i};
- });
-
- // Expect N keys as it's a V1 index.
- assert.eq(N + N, coll.validate().keysPerIndex[indexName]);
-
- //
- // V2 indices with several 2dsphere-indexed fields are only sparse if all are missing.
- //
-
- // Clean up.
- coll.drop();
- coll.ensureIndex({geo: "2dsphere", otherGeo: "2dsphere"});
-
- indexName = 'geo_2dsphere_otherGeo_2dsphere';
-
- // Insert N documents with the first geo field.
- bulkInsertDocs(coll, N, function(i) {
- return {geo: point};
- });
-
- // Expect N keys.
- assert.eq(N, coll.validate().keysPerIndex[indexName]);
-
- // Insert N documents with the second geo field.
- bulkInsertDocs(coll, N, function(i) {
- return {otherGeo: point};
- });
-
- // They get inserted too.
- assert.eq(N + N, coll.validate().keysPerIndex[indexName]);
-
- // Insert N documents with neither geo field.
- bulkInsertDocs(coll, N, function(i) {
- return {nonGeo: i};
- });
-
- // Still expect 2N keys as the neither geo docs were omitted from the index.
- assert.eq(N + N, coll.validate().keysPerIndex[indexName]);
+"use strict";
+
+var coll = db.geo_s2sparse;
+var point = {type: "Point", coordinates: [5, 5]};
+var indexSpec = {geo: "2dsphere", nonGeo: 1};
+var indexName = 'geo_2dsphere_nonGeo_1';
+
+//
+// V2 indices are "geo sparse" always.
+//
+// Clean up.
+coll.drop();
+coll.ensureIndex(indexSpec);
+
+var bulkInsertDocs = function(coll, numDocs, makeDocFn) {
+ print("Bulk inserting " + numDocs + " documents");
+
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < numDocs; ++i) {
+ bulk.insert(makeDocFn(i));
+ }
+
+ assert.writeOK(bulk.execute());
+
+ print("Bulk inserting " + numDocs + " documents completed");
+};
+
+// Insert N documents with the geo field.
+var N = 1000;
+bulkInsertDocs(coll, N, function(i) {
+ return {geo: point, nonGeo: "point_" + i};
+});
+
+// Expect N keys.
+assert.eq(N, coll.validate().keysPerIndex[indexName]);
+
+// Insert N documents without the geo field.
+bulkInsertDocs(coll, N, function(i) {
+ return {wrongGeo: point, nonGeo: i};
+});
+
+// Still expect N keys as we didn't insert any geo stuff.
+assert.eq(N, coll.validate().keysPerIndex[indexName]);
+
+// Insert N documents with just the geo field.
+bulkInsertDocs(coll, N, function(i) {
+ return {geo: point};
+});
+
+// Expect 2N keys.
+assert.eq(N + N, coll.validate().keysPerIndex[indexName]);
+
+// Add some "not geo" stuff.
+bulkInsertDocs(coll, N, function(i) {
+ return {geo: null};
+});
+bulkInsertDocs(coll, N, function(i) {
+ return {geo: []};
+});
+bulkInsertDocs(coll, N, function(i) {
+ return {geo: undefined};
+});
+bulkInsertDocs(coll, N, function(i) {
+ return {geo: {}};
+});
+
+// Still expect 2N keys.
+assert.eq(N + N, coll.validate().keysPerIndex[indexName]);
+
+//
+// V1 indices are never sparse
+//
+
+coll.drop();
+coll.ensureIndex(indexSpec, {"2dsphereIndexVersion": 1});
+
+// Insert N documents with the geo field.
+bulkInsertDocs(coll, N, function(i) {
+ return {geo: point, nonGeo: "point_" + i};
+});
+
+// Expect N keys.
+assert.eq(N, coll.validate().keysPerIndex[indexName]);
+
+// Insert N documents without the geo field.
+bulkInsertDocs(coll, N, function(i) {
+ return {wrongGeo: point, nonGeo: i};
+});
+
+// Expect N keys as it's a V1 index.
+assert.eq(N + N, coll.validate().keysPerIndex[indexName]);
+
+//
+// V2 indices with several 2dsphere-indexed fields are only sparse if all are missing.
+//
+
+// Clean up.
+coll.drop();
+coll.ensureIndex({geo: "2dsphere", otherGeo: "2dsphere"});
+
+indexName = 'geo_2dsphere_otherGeo_2dsphere';
+
+// Insert N documents with the first geo field.
+bulkInsertDocs(coll, N, function(i) {
+ return {geo: point};
+});
+
+// Expect N keys.
+assert.eq(N, coll.validate().keysPerIndex[indexName]);
+
+// Insert N documents with the second geo field.
+bulkInsertDocs(coll, N, function(i) {
+ return {otherGeo: point};
+});
+
+// They get inserted too.
+assert.eq(N + N, coll.validate().keysPerIndex[indexName]);
+
+// Insert N documents with neither geo field.
+bulkInsertDocs(coll, N, function(i) {
+ return {nonGeo: i};
+});
+
+// Still expect 2N keys as the neither geo docs were omitted from the index.
+assert.eq(N + N, coll.validate().keysPerIndex[indexName]);
})();
diff --git a/jstests/core/geo_s2twofields.js b/jstests/core/geo_s2twofields.js
index c52b5d1d265..c50ca3c46b5 100644
--- a/jstests/core/geo_s2twofields.js
+++ b/jstests/core/geo_s2twofields.js
@@ -4,85 +4,83 @@
// @tags: [requires_fastcount, operations_longer_than_stepdown_interval_in_txns]
(function() {
- var t = db.geo_s2twofields;
- t.drop();
+var t = db.geo_s2twofields;
+t.drop();
- Random.setRandomSeed();
- var random = Random.rand;
- var PI = Math.PI;
+Random.setRandomSeed();
+var random = Random.rand;
+var PI = Math.PI;
- function randomCoord(center, minDistDeg, maxDistDeg) {
- var dx = random() * (maxDistDeg - minDistDeg) + minDistDeg;
- var dy = random() * (maxDistDeg - minDistDeg) + minDistDeg;
- return [center[0] + dx, center[1] + dy];
- }
+function randomCoord(center, minDistDeg, maxDistDeg) {
+ var dx = random() * (maxDistDeg - minDistDeg) + minDistDeg;
+ var dy = random() * (maxDistDeg - minDistDeg) + minDistDeg;
+ return [center[0] + dx, center[1] + dy];
+}
- var nyc = {type: "Point", coordinates: [-74.0064, 40.7142]};
- var miami = {type: "Point", coordinates: [-80.1303, 25.7903]};
- var maxPoints = 10000;
- var degrees = 5;
+var nyc = {type: "Point", coordinates: [-74.0064, 40.7142]};
+var miami = {type: "Point", coordinates: [-80.1303, 25.7903]};
+var maxPoints = 10000;
+var degrees = 5;
- var arr = [];
- for (var i = 0; i < maxPoints; ++i) {
- var fromCoord = randomCoord(nyc.coordinates, 0, degrees);
- var toCoord = randomCoord(miami.coordinates, 0, degrees);
+var arr = [];
+for (var i = 0; i < maxPoints; ++i) {
+ var fromCoord = randomCoord(nyc.coordinates, 0, degrees);
+ var toCoord = randomCoord(miami.coordinates, 0, degrees);
- arr.push({
- from: {type: "Point", coordinates: fromCoord},
- to: {type: "Point", coordinates: toCoord}
- });
- }
- res = t.insert(arr);
- assert.writeOK(res);
- assert.eq(t.count(), maxPoints);
+ arr.push(
+ {from: {type: "Point", coordinates: fromCoord}, to: {type: "Point", coordinates: toCoord}});
+}
+res = t.insert(arr);
+assert.writeOK(res);
+assert.eq(t.count(), maxPoints);
- function semiRigorousTime(func) {
- var lowestTime = func();
- var iter = 2;
- for (var i = 0; i < iter; ++i) {
- var run = func();
- if (run < lowestTime) {
- lowestTime = run;
- }
+function semiRigorousTime(func) {
+ var lowestTime = func();
+ var iter = 2;
+ for (var i = 0; i < iter; ++i) {
+ var run = func();
+ if (run < lowestTime) {
+ lowestTime = run;
}
- return lowestTime;
- }
-
- function timeWithoutAndWithAnIndex(index, query) {
- t.dropIndex(index);
- var withoutTime = semiRigorousTime(function() {
- return t.find(query).explain("executionStats").executionStats.executionTimeMillis;
- });
- t.ensureIndex(index);
- var withTime = semiRigorousTime(function() {
- return t.find(query).explain("executionStats").executionStats.executionTimeMillis;
- });
- t.dropIndex(index);
- return [withoutTime, withTime];
}
+ return lowestTime;
+}
- var maxQueryRad = 0.5 * PI / 180.0;
- // When we're not looking at ALL the data, anything indexed should beat not-indexed.
- var smallQuery = timeWithoutAndWithAnIndex({to: "2dsphere", from: "2dsphere"}, {
- from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}},
- to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}
+function timeWithoutAndWithAnIndex(index, query) {
+ t.dropIndex(index);
+ var withoutTime = semiRigorousTime(function() {
+ return t.find(query).explain("executionStats").executionStats.executionTimeMillis;
});
- print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]);
- // assert(smallQuery[0] > smallQuery[1]);
-
- // Let's just index one field.
- var smallQuery = timeWithoutAndWithAnIndex({to: "2dsphere"}, {
- from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}},
- to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}
+ t.ensureIndex(index);
+ var withTime = semiRigorousTime(function() {
+ return t.find(query).explain("executionStats").executionStats.executionTimeMillis;
});
- print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]);
- // assert(smallQuery[0] > smallQuery[1]);
+ t.dropIndex(index);
+ return [withoutTime, withTime];
+}
- // And the other one.
- var smallQuery = timeWithoutAndWithAnIndex({from: "2dsphere"}, {
- from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}},
- to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}
- });
- print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]);
- // assert(smallQuery[0] > smallQuery[1]);
+var maxQueryRad = 0.5 * PI / 180.0;
+// When we're not looking at ALL the data, anything indexed should beat not-indexed.
+var smallQuery = timeWithoutAndWithAnIndex({to: "2dsphere", from: "2dsphere"}, {
+ from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}},
+ to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}
+});
+print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]);
+// assert(smallQuery[0] > smallQuery[1]);
+
+// Let's just index one field.
+var smallQuery = timeWithoutAndWithAnIndex({to: "2dsphere"}, {
+ from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}},
+ to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}
+});
+print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]);
+// assert(smallQuery[0] > smallQuery[1]);
+
+// And the other one.
+var smallQuery = timeWithoutAndWithAnIndex({from: "2dsphere"}, {
+ from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}},
+ to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}
+});
+print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]);
+// assert(smallQuery[0] > smallQuery[1]);
}());
diff --git a/jstests/core/geo_s2within_line_polygon_sphere.js b/jstests/core/geo_s2within_line_polygon_sphere.js
index cbd95f7a717..17b89d25f9e 100644
--- a/jstests/core/geo_s2within_line_polygon_sphere.js
+++ b/jstests/core/geo_s2within_line_polygon_sphere.js
@@ -1,249 +1,240 @@
// Tests for $geowithin $centerSphere operator with LineString and Polygon.
(function() {
- function testGeoWithinCenterSphereLinePolygon(coll) {
- coll.drop();
-
- // Convenient test function for $geowithin $centerSphere.
- function testGeoWithinCenterSphere(centerSphere, expected) {
- let result = coll.find({geoField: {$geoWithin: {$centerSphere: centerSphere}}},
- {"name": 1, "_id": 0})
- .sort({"name": 1})
- .toArray();
- assert.eq(result, expected);
- }
+function testGeoWithinCenterSphereLinePolygon(coll) {
+ coll.drop();
+
+ // Convenient test function for $geowithin $centerSphere.
+ function testGeoWithinCenterSphere(centerSphere, expected) {
+ let result = coll.find({geoField: {$geoWithin: {$centerSphere: centerSphere}}},
+ {"name": 1, "_id": 0})
+ .sort({"name": 1})
+ .toArray();
+ assert.eq(result, expected);
+ }
- // Basic tests.
- assert.writeOK(
- coll.insert({name: "Point1", geoField: {type: "Point", coordinates: [1, 1]}}));
- assert.writeOK(coll.insert(
- {name: "LineString1", geoField: {type: "LineString", coordinates: [[1, 1], [2, 2]]}}));
- assert.writeOK(coll.insert({
- name: "Polygon1",
- geoField: {type: "Polygon", coordinates: [[[1, 1], [2, 2], [2, 1], [1, 1]]]}
- }));
-
- // The second parameter of $centerSphere is in radian and the angle between [1, 1] and [2,2]
- // is about 0.0246 radian, much less than 1.
- testGeoWithinCenterSphere([[1, 1], 1],
- [{name: 'LineString1'}, {name: 'Point1'}, {name: 'Polygon1'}]);
-
- let geoDoc = {
- "name": "LineString2",
- "geoField": {
- "type": "LineString",
- "coordinates": [
- [151.0997772216797, -33.86157820443923],
- [151.21719360351562, -33.8952122494965]
- ]
- }
- };
- assert.writeOK(coll.insert(geoDoc));
-
- // Test for a LineString within a geowithin sphere.
- testGeoWithinCenterSphere([[151.16789425018004, -33.8508357122312], 0.0011167360027064348],
- [{name: "LineString2"}]);
-
- // Test for a LineString intersecting with geowithin sphere (should not return a match).
- testGeoWithinCenterSphere([[151.09822404831158, -33.85109290503663], 0.0013568277575574095],
- []);
-
- geoDoc = {
- "name": "LineString3",
- "geoField": {
- "type": "LineString",
- "coordinates": [
- [174.72896575927734, -36.86698689106876],
- [174.72965240478516, -36.90707799098374],
- [174.7808074951172, -36.9062544131224],
- [174.77840423583982, -36.88154294352893],
- [174.72827911376953, -36.88373984256185]
- ]
- }
- };
- assert.writeOK(coll.insert(geoDoc));
-
- // Test for a LineString forming a closed loop rectangle within a geowithin sphere.
- testGeoWithinCenterSphere([[174.75211152791763, -36.88962755605813], 0.000550933650273084],
- [{name: "LineString3"}]);
-
- // Test for a LineString intersecting with geowithin sphere (should not return a match).
- testGeoWithinCenterSphere([[174.75689891704758, -36.8998373317427], 0.0005315628331256537],
- []);
-
- // Test for a LineString outside of geowithin sphere (should not return a match).
- testGeoWithinCenterSphere([[174.8099591465865, -36.89409450096385], 0.00027296698925637807],
- []);
-
- // Test for a Polygon within a geowithin sphere.
- geoDoc = {
- "name": "Polygon2",
- "city": "Wellington",
- "geoField": {
- "type": "Polygon",
- "coordinates": [[
- [174.72930908203125, -41.281676559981676],
- [174.76261138916013, -41.34820622928743],
- [174.84329223632812, -41.32861539747227],
- [174.8312759399414, -41.280902559820895],
- [174.72930908203125, -41.281676559981676]
+ // Basic tests.
+ assert.writeOK(coll.insert({name: "Point1", geoField: {type: "Point", coordinates: [1, 1]}}));
+ assert.writeOK(coll.insert(
+ {name: "LineString1", geoField: {type: "LineString", coordinates: [[1, 1], [2, 2]]}}));
+ assert.writeOK(coll.insert({
+ name: "Polygon1",
+ geoField: {type: "Polygon", coordinates: [[[1, 1], [2, 2], [2, 1], [1, 1]]]}
+ }));
+
+ // The second parameter of $centerSphere is in radian and the angle between [1, 1] and [2,2]
+ // is about 0.0246 radian, much less than 1.
+ testGeoWithinCenterSphere([[1, 1], 1],
+ [{name: 'LineString1'}, {name: 'Point1'}, {name: 'Polygon1'}]);
+
+ let geoDoc = {
+ "name": "LineString2",
+ "geoField": {
+ "type": "LineString",
+ "coordinates":
+ [[151.0997772216797, -33.86157820443923], [151.21719360351562, -33.8952122494965]]
+ }
+ };
+ assert.writeOK(coll.insert(geoDoc));
+
+ // Test for a LineString within a geowithin sphere.
+ testGeoWithinCenterSphere([[151.16789425018004, -33.8508357122312], 0.0011167360027064348],
+ [{name: "LineString2"}]);
+
+ // Test for a LineString intersecting with geowithin sphere (should not return a match).
+ testGeoWithinCenterSphere([[151.09822404831158, -33.85109290503663], 0.0013568277575574095],
+ []);
+
+ geoDoc = {
+ "name": "LineString3",
+ "geoField": {
+ "type": "LineString",
+ "coordinates": [
+ [174.72896575927734, -36.86698689106876],
+ [174.72965240478516, -36.90707799098374],
+ [174.7808074951172, -36.9062544131224],
+ [174.77840423583982, -36.88154294352893],
+ [174.72827911376953, -36.88373984256185]
+ ]
+ }
+ };
+ assert.writeOK(coll.insert(geoDoc));
+
+ // Test for a LineString forming a closed loop rectangle within a geowithin sphere.
+ testGeoWithinCenterSphere([[174.75211152791763, -36.88962755605813], 0.000550933650273084],
+ [{name: "LineString3"}]);
+
+ // Test for a LineString intersecting with geowithin sphere (should not return a match).
+ testGeoWithinCenterSphere([[174.75689891704758, -36.8998373317427], 0.0005315628331256537], []);
+
+ // Test for a LineString outside of geowithin sphere (should not return a match).
+ testGeoWithinCenterSphere([[174.8099591465865, -36.89409450096385], 0.00027296698925637807],
+ []);
+
+ // Test for a Polygon within a geowithin sphere.
+ geoDoc = {
+ "name": "Polygon2",
+ "city": "Wellington",
+ "geoField": {
+ "type": "Polygon",
+ "coordinates": [[
+ [174.72930908203125, -41.281676559981676],
+ [174.76261138916013, -41.34820622928743],
+ [174.84329223632812, -41.32861539747227],
+ [174.8312759399414, -41.280902559820895],
+ [174.72930908203125, -41.281676559981676]
+ ]]
+ }
+ };
+ assert.writeOK(coll.insert(geoDoc));
+
+ // Test for a Polygon within a geowithin sphere.
+ testGeoWithinCenterSphere([[174.78536621904806, -41.30510816038769], 0.0009483659386360411],
+ [{name: "Polygon2"}]);
+
+ // Test for an empty query cap (radius 0) inside of a polygon that covers the centerSphere
+ // (should not return a match).
+ testGeoWithinCenterSphere([[174.79144274337722, -41.307682001033385], 0], []);
+
+ // Test for a Polygon intersecting with geowithin sphere (should not return a match).
+ testGeoWithinCenterSphere([[174.7599527533759, -41.27137819591382], 0.0011247013153526434], []);
+
+ // Test for a Polygon outside of geowithin sphere (should not return a match).
+ testGeoWithinCenterSphere([[174.80008799649448, -41.201484845543426], 0.0007748581633291528],
+ []);
+
+ geoDoc = {
+ "name": "MultiPolygon1",
+ "city": "Sydney",
+ "geoField": {
+ "type": "MultiPolygon",
+ "coordinates": [
+ [[
+ [151.21032714843747, -33.85074408022877],
+ [151.23367309570312, -33.84333046657819],
+ [151.20929718017578, -33.81680727566872],
+ [151.1876678466797, -33.829927301798676],
+ [151.21032714843747, -33.85074408022877]
+ ]],
+ [[
+ [151.20140075683594, -33.856446422184305],
+ [151.17565155029297, -33.88979749364442],
+ [151.2044906616211, -33.9151583833889],
+ [151.23058319091797, -33.87041555094182],
+ [151.20140075683594, -33.856446422184305]
]]
- }
- };
- assert.writeOK(coll.insert(geoDoc));
-
- // Test for a Polygon within a geowithin sphere.
- testGeoWithinCenterSphere([[174.78536621904806, -41.30510816038769], 0.0009483659386360411],
- [{name: "Polygon2"}]);
-
- // Test for an empty query cap (radius 0) inside of a polygon that covers the centerSphere
- // (should not return a match).
- testGeoWithinCenterSphere([[174.79144274337722, -41.307682001033385], 0], []);
-
- // Test for a Polygon intersecting with geowithin sphere (should not return a match).
- testGeoWithinCenterSphere([[174.7599527533759, -41.27137819591382], 0.0011247013153526434],
- []);
-
- // Test for a Polygon outside of geowithin sphere (should not return a match).
- testGeoWithinCenterSphere(
- [[174.80008799649448, -41.201484845543426], 0.0007748581633291528], []);
-
- geoDoc = {
- "name": "MultiPolygon1",
- "city": "Sydney",
- "geoField": {
- "type": "MultiPolygon",
- "coordinates": [
- [[
- [151.21032714843747, -33.85074408022877],
- [151.23367309570312, -33.84333046657819],
- [151.20929718017578, -33.81680727566872],
- [151.1876678466797, -33.829927301798676],
- [151.21032714843747, -33.85074408022877]
- ]],
- [[
- [151.20140075683594, -33.856446422184305],
- [151.17565155029297, -33.88979749364442],
- [151.2044906616211, -33.9151583833889],
- [151.23058319091797, -33.87041555094182],
- [151.20140075683594, -33.856446422184305]
- ]]
+ ]
+ }
+ };
+
+ assert.writeOK(coll.insert(geoDoc));
+
+ // Test for a MultiPolygon (two seperate polygons) within a geowithin sphere.
+ testGeoWithinCenterSphere([[151.20821632978107, -33.865139891361636], 0.000981007241416606],
+ [{name: "MultiPolygon1"}]);
+
+ // Verify that only one of the polygons of a MultiPolygon in the $centerSphere does not
+ // match
+ testGeoWithinCenterSphere([[151.20438542915883, -33.89006380099829], 0.0006390286437185907],
+ []);
+
+ geoDoc = {
+ "name": "MultiPolygon2",
+ "city": "Sydney",
+ "geoField": {
+ "type": "MultiPolygon",
+ "coordinates": [[
+ [
+ [151.203031539917, -33.87116383262648],
+ [151.20401859283447, -33.88270791866475],
+ [151.21891021728516, -33.88256540860479],
+ [151.2138032913208, -33.86817066653049],
+ [151.203031539917, -33.87116383262648]
+ ],
+ [
+ [151.21041297912598, -33.86980979429744],
+ [151.20938301086426, -33.8767579211837],
+ [151.2121295928955, -33.87722110953139],
+ [151.21315956115723, -33.86995232565932],
+ [151.21041297912598, -33.86980979429744]
]
- }
- };
-
- assert.writeOK(coll.insert(geoDoc));
-
- // Test for a MultiPolygon (two seperate polygons) within a geowithin sphere.
- testGeoWithinCenterSphere([[151.20821632978107, -33.865139891361636], 0.000981007241416606],
- [{name: "MultiPolygon1"}]);
-
- // Verify that only one of the polygons of a MultiPolygon in the $centerSphere does not
- // match
- testGeoWithinCenterSphere([[151.20438542915883, -33.89006380099829], 0.0006390286437185907],
- []);
-
- geoDoc = {
- "name": "MultiPolygon2",
- "city": "Sydney",
- "geoField": {
- "type": "MultiPolygon",
- "coordinates": [[
- [
- [151.203031539917, -33.87116383262648],
- [151.20401859283447, -33.88270791866475],
- [151.21891021728516, -33.88256540860479],
- [151.2138032913208, -33.86817066653049],
- [151.203031539917, -33.87116383262648]
- ],
- [
- [151.21041297912598, -33.86980979429744],
- [151.20938301086426, -33.8767579211837],
- [151.2121295928955, -33.87722110953139],
- [151.21315956115723, -33.86995232565932],
- [151.21041297912598, -33.86980979429744]
- ]
- ]]
- }
- };
- assert.writeOK(coll.insert(geoDoc));
-
- // Test for a MultiPolygon (with a hole) within a geowithin sphere.
- testGeoWithinCenterSphere(
- [[151.20936119647115, -33.875266834633265], 0.00020277354002627845],
- [{name: "MultiPolygon2"}]);
-
- // Test for centerSphere as big as earth radius (should return all).
- testGeoWithinCenterSphere(
- [[151.20936119647115, -33.875266834633265], 3.14159265358979323846], [
- {name: "LineString1"},
- {name: "LineString2"},
- {name: "LineString3"},
- {name: "MultiPolygon1"},
- {name: "MultiPolygon2"},
- {name: "Point1"},
- {name: "Polygon1"},
- {name: "Polygon2"}
- ]);
-
- // Test for a MultiPolygon with holes intersecting with geowithin sphere (should not return
- // a match).
- testGeoWithinCenterSphere(
- [[151.21028000820485, -33.87067923462358], 0.00013138775245714733], []);
-
- // Test for a MultiPolygon with holes with geowithin sphere inside the hole (should not
- // return a match).
- testGeoWithinCenterSphere(
- [[151.21093787887645, -33.87533330567804], 0.000016565456776516003], []);
-
- coll.drop();
-
- // Test for a large query cap containing both of line vertices but not the line itself.
- // (should not return a match).
- geoDoc = {
- "name": "HorizontalLongLine",
- "geoField": {
- "type": "LineString",
- "coordinates": [[96.328125, 5.61598581915534], [153.984375, -6.315298538330033]]
- }
- };
- assert.writeOK(coll.insert(geoDoc));
-
- // Test for a large query cap containing both of line vertices but not the line itself.
- // (should not return a match).
- testGeoWithinCenterSphere([[-59.80246852929814, -2.3633072488322853], 2.768403272464979],
- []);
-
- coll.drop();
-
- // Test for a large query cap containing all polygon vertices but not the whole polygon.
- // (should not return a match).
- geoDoc = {
- "name": "LargeRegion",
- "geoField": {
- "type": "Polygon",
- "coordinates": [[
- [98.96484375, -11.350796722383672],
- [135.35156249999997, -11.350796722383672],
- [135.35156249999997, 0.8788717828324276],
- [98.96484375, 0.8788717828324276],
- [98.96484375, -11.350796722383672]
- ]]
- }
- };
- assert.writeOK(coll.insert(geoDoc));
-
- // Test for a large query cap containing both of line vertices but not the line itself.
- // (should not return a match).
- testGeoWithinCenterSphere([[-61.52266094410311, 17.79937981451866], 2.9592242752161573],
- []);
- }
+ ]]
+ }
+ };
+ assert.writeOK(coll.insert(geoDoc));
+
+ // Test for a MultiPolygon (with a hole) within a geowithin sphere.
+ testGeoWithinCenterSphere([[151.20936119647115, -33.875266834633265], 0.00020277354002627845],
+ [{name: "MultiPolygon2"}]);
+
+ // Test for centerSphere as big as earth radius (should return all).
+ testGeoWithinCenterSphere([[151.20936119647115, -33.875266834633265], 3.14159265358979323846], [
+ {name: "LineString1"},
+ {name: "LineString2"},
+ {name: "LineString3"},
+ {name: "MultiPolygon1"},
+ {name: "MultiPolygon2"},
+ {name: "Point1"},
+ {name: "Polygon1"},
+ {name: "Polygon2"}
+ ]);
+
+ // Test for a MultiPolygon with holes intersecting with geowithin sphere (should not return
+ // a match).
+ testGeoWithinCenterSphere([[151.21028000820485, -33.87067923462358], 0.00013138775245714733],
+ []);
+
+ // Test for a MultiPolygon with holes with geowithin sphere inside the hole (should not
+ // return a match).
+ testGeoWithinCenterSphere([[151.21093787887645, -33.87533330567804], 0.000016565456776516003],
+ []);
+
+ coll.drop();
+
+ // Test for a large query cap containing both of line vertices but not the line itself.
+ // (should not return a match).
+ geoDoc = {
+ "name": "HorizontalLongLine",
+ "geoField": {
+ "type": "LineString",
+ "coordinates": [[96.328125, 5.61598581915534], [153.984375, -6.315298538330033]]
+ }
+ };
+ assert.writeOK(coll.insert(geoDoc));
+
+ // Test for a large query cap containing both of line vertices but not the line itself.
+ // (should not return a match).
+ testGeoWithinCenterSphere([[-59.80246852929814, -2.3633072488322853], 2.768403272464979], []);
+
+ coll.drop();
+
+ // Test for a large query cap containing all polygon vertices but not the whole polygon.
+ // (should not return a match).
+ geoDoc = {
+ "name": "LargeRegion",
+ "geoField": {
+ "type": "Polygon",
+ "coordinates": [[
+ [98.96484375, -11.350796722383672],
+ [135.35156249999997, -11.350796722383672],
+ [135.35156249999997, 0.8788717828324276],
+ [98.96484375, 0.8788717828324276],
+ [98.96484375, -11.350796722383672]
+ ]]
+ }
+ };
+ assert.writeOK(coll.insert(geoDoc));
+
+ // Test for a large query cap containing both of line vertices but not the line itself.
+ // (should not return a match).
+ testGeoWithinCenterSphere([[-61.52266094410311, 17.79937981451866], 2.9592242752161573], []);
+}
- // Test $geowithin $centerSphere for LineString and Polygon without index.
- let coll = db.geo_s2within_line_polygon_sphere;
- testGeoWithinCenterSphereLinePolygon(coll);
+// Test $geowithin $centerSphere for LineString and Polygon without index.
+let coll = db.geo_s2within_line_polygon_sphere;
+testGeoWithinCenterSphereLinePolygon(coll);
- // Test $geowithin $centerSphere for LineString and Polygon with 2dsphere index.
- assert.commandWorked(coll.createIndex({geoField: "2dsphere"}));
- testGeoWithinCenterSphereLinePolygon(coll);
+// Test $geowithin $centerSphere for LineString and Polygon with 2dsphere index.
+assert.commandWorked(coll.createIndex({geoField: "2dsphere"}));
+testGeoWithinCenterSphereLinePolygon(coll);
})(); \ No newline at end of file
diff --git a/jstests/core/geo_update_btree.js b/jstests/core/geo_update_btree.js
index 476921b5c2f..981f0c629c3 100644
--- a/jstests/core/geo_update_btree.js
+++ b/jstests/core/geo_update_btree.js
@@ -20,14 +20,15 @@ if (testingReplication) {
Random.setRandomSeed();
var parallelInsert = startParallelShell(
- "Random.setRandomSeed();" + "for ( var i = 0; i < 1000; i++ ) {" +
+ "Random.setRandomSeed();" +
+ "for ( var i = 0; i < 1000; i++ ) {" +
" var doc = { loc: [ Random.rand() * 180, Random.rand() * 180 ], v: '' };" +
- " db.jstests_geo_update_btree.insert(doc);" + "}");
+ " db.jstests_geo_update_btree.insert(doc);" +
+ "}");
for (i = 0; i < 1000; i++) {
coll.update({
- loc:
- {$within: {$center: [[Random.rand() * 180, Random.rand() * 180], Random.rand() * 50]}}
+ loc: {$within: {$center: [[Random.rand() * 180, Random.rand() * 180], Random.rand() * 50]}}
},
{$set: {v: big}},
false,
diff --git a/jstests/core/geob.js b/jstests/core/geob.js
index 2664d6c5921..d1f01bf7b9c 100644
--- a/jstests/core/geob.js
+++ b/jstests/core/geob.js
@@ -1,38 +1,38 @@
(function() {
- "use strict";
- var t = db.geob;
- t.drop();
-
- var a = {p: [0, 0]};
- var b = {p: [1, 0]};
- var c = {p: [3, 4]};
- var d = {p: [0, 6]};
-
- t.save(a);
- t.save(b);
- t.save(c);
- t.save(d);
- t.ensureIndex({p: "2d"});
-
- let res = t.aggregate({$geoNear: {near: [0, 0], distanceField: "dis"}}).toArray();
-
- assert.close(0, res[0].dis, "B1");
- assert.eq(a._id, res[0]._id, "B2");
-
- assert.close(1, res[1].dis, "C1");
- assert.eq(b._id, res[1]._id, "C2");
-
- assert.close(5, res[2].dis, "D1");
- assert.eq(c._id, res[2]._id, "D2");
-
- assert.close(6, res[3].dis, "E1");
- assert.eq(d._id, res[3]._id, "E2");
-
- res = t.aggregate({
- $geoNear: {near: [0, 0], distanceField: "dis", distanceMultiplier: 2.0}
- }).toArray();
- assert.close(0, res[0].dis, "G");
- assert.close(2, res[1].dis, "H");
- assert.close(10, res[2].dis, "I");
- assert.close(12, res[3].dis, "J");
+"use strict";
+var t = db.geob;
+t.drop();
+
+var a = {p: [0, 0]};
+var b = {p: [1, 0]};
+var c = {p: [3, 4]};
+var d = {p: [0, 6]};
+
+t.save(a);
+t.save(b);
+t.save(c);
+t.save(d);
+t.ensureIndex({p: "2d"});
+
+let res = t.aggregate({$geoNear: {near: [0, 0], distanceField: "dis"}}).toArray();
+
+assert.close(0, res[0].dis, "B1");
+assert.eq(a._id, res[0]._id, "B2");
+
+assert.close(1, res[1].dis, "C1");
+assert.eq(b._id, res[1]._id, "C2");
+
+assert.close(5, res[2].dis, "D1");
+assert.eq(c._id, res[2]._id, "D2");
+
+assert.close(6, res[3].dis, "E1");
+assert.eq(d._id, res[3]._id, "E2");
+
+res = t.aggregate({
+ $geoNear: {near: [0, 0], distanceField: "dis", distanceMultiplier: 2.0}
+ }).toArray();
+assert.close(0, res[0].dis, "G");
+assert.close(2, res[1].dis, "H");
+assert.close(10, res[2].dis, "I");
+assert.close(12, res[3].dis, "J");
}());
diff --git a/jstests/core/geonear_cmd_input_validation.js b/jstests/core/geonear_cmd_input_validation.js
index 9cc82cb6f25..611e29b01d1 100644
--- a/jstests/core/geonear_cmd_input_validation.js
+++ b/jstests/core/geonear_cmd_input_validation.js
@@ -69,7 +69,6 @@ indexTypes.forEach(function(indexType) {
// Try several bad values for min/maxDistance.
badNumbers.concat(outOfRangeDistances).forEach(function(badDistance) {
-
var msg = ("geoNear with spherical=" + spherical + " and " + pointDescription +
" and " + indexType + " index should've failed with " + optionName +
" " + badDistance);
@@ -80,7 +79,6 @@ indexTypes.forEach(function(indexType) {
// Bad values for limit / num.
['num', 'limit'].forEach(function(limitOptionName) {
[-1, 'foo'].forEach(function(badLimit) {
-
var msg =
("geoNear with spherical=" + spherical + " and " + pointDescription +
" and " + indexType + " index should've failed with '" +
@@ -94,7 +92,6 @@ indexTypes.forEach(function(indexType) {
// Bad values for distanceMultiplier.
badNumbers.forEach(function(badNumber) {
-
var msg = ("geoNear with spherical=" + spherical + " and " + pointDescription +
" and " + indexType +
" index should've failed with distanceMultiplier " + badNumber);
diff --git a/jstests/core/geonear_key.js b/jstests/core/geonear_key.js
index 41fcfb0a5da..0238e012577 100644
--- a/jstests/core/geonear_key.js
+++ b/jstests/core/geonear_key.js
@@ -2,101 +2,99 @@
* Tests for the 'key' field accepted by the $geoNear aggregation stage.
*/
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js");
-
- const coll = db.jstests_geonear_key;
- coll.drop();
-
- assert.writeOK(coll.insert({_id: 0, a: [1, 1]}));
- assert.writeOK(coll.insert({_id: 1, a: [1, 2]}));
- assert.writeOK(coll.insert({_id: 2, b: {c: [1, 1]}}));
- assert.writeOK(coll.insert({_id: 3, b: {c: [1, 2]}}));
- assert.writeOK(coll.insert({_id: 4, b: {d: [1, 1]}}));
- assert.writeOK(coll.insert({_id: 5, b: {d: [1, 2]}}));
-
- /**
- * Runs an aggregation consisting of a single $geoNear stage described by 'nearParams', and
- * returns the raw command result object. 'nearParams' consists of the parameters to the
- * $geoNear stage, but is expected to omit 'distanceField'.
- */
- function runNearAgg(nearParams) {
- let nearAggParams = Object.extend({distanceField: "dist"}, nearParams);
- let nearAggStage = {$geoNear: nearAggParams};
- let aggCmd = {aggregate: coll.getName(), pipeline: [nearAggStage], cursor: {}};
- return db.runCommand(aggCmd);
- }
+"use strict";
- /**
- * Runs the near described by 'nearParams' as a $geoNear aggregation and verifies that the
- * operation fails with 'code'.
- */
- function assertGeoNearFails(nearParams, code) {
- assert.commandFailedWithCode(runNearAgg(nearParams), code);
- }
+load("jstests/libs/analyze_plan.js");
- /**
- * Runs the near described by 'nearParams' as a $geoNear aggregation and verifies that the
- * operation returns the _id values in 'expectedIds', in order.
- */
- function assertGeoNearSucceedsAndReturnsIds(nearParams, expectedIds) {
- let aggResult = assert.commandWorked(runNearAgg(nearParams));
- let res = aggResult.cursor.firstBatch;
- let errfn = () => `expected ids ${tojson(expectedIds)}, but these documents were ` +
- `returned: ${tojson(res)}`;
-
- assert.eq(expectedIds.length, res.length, errfn);
- for (let i = 0; i < expectedIds.length; i++) {
- assert.eq(expectedIds[i], aggResult.cursor.firstBatch[i]._id, errfn);
- }
- }
+const coll = db.jstests_geonear_key;
+coll.drop();
+
+assert.writeOK(coll.insert({_id: 0, a: [1, 1]}));
+assert.writeOK(coll.insert({_id: 1, a: [1, 2]}));
+assert.writeOK(coll.insert({_id: 2, b: {c: [1, 1]}}));
+assert.writeOK(coll.insert({_id: 3, b: {c: [1, 2]}}));
+assert.writeOK(coll.insert({_id: 4, b: {d: [1, 1]}}));
+assert.writeOK(coll.insert({_id: 5, b: {d: [1, 2]}}));
+
+/**
+ * Runs an aggregation consisting of a single $geoNear stage described by 'nearParams', and
+ * returns the raw command result object. 'nearParams' consists of the parameters to the
+ * $geoNear stage, but is expected to omit 'distanceField'.
+ */
+function runNearAgg(nearParams) {
+ let nearAggParams = Object.extend({distanceField: "dist"}, nearParams);
+ let nearAggStage = {$geoNear: nearAggParams};
+ let aggCmd = {aggregate: coll.getName(), pipeline: [nearAggStage], cursor: {}};
+ return db.runCommand(aggCmd);
+}
- // Verify that $geoNear fails when the key field is not a string.
- assertGeoNearFails({near: [0, 0], key: 1}, ErrorCodes.TypeMismatch);
-
- // Verify that $geoNear fails when the key field the empty string.
- assertGeoNearFails({near: [0, 0], key: ""}, ErrorCodes.BadValue);
-
- // Verify that $geoNear fails when there are no eligible indexes.
- assertGeoNearFails({near: [0, 0]}, ErrorCodes.IndexNotFound);
-
- // Verify that the query system raises an error when an index is specified that doesn't exist.
- assertGeoNearFails({near: [0, 0], key: "a"}, ErrorCodes.BadValue);
-
- // Create a number of 2d and 2dsphere indexes.
- assert.commandWorked(coll.createIndex({a: "2d"}));
- assert.commandWorked(coll.createIndex({a: "2dsphere"}));
- assert.commandWorked(coll.createIndex({"b.c": "2d"}));
- assert.commandWorked(coll.createIndex({"b.d": "2dsphere"}));
-
- // Verify that $geoNear fails when the index to use is ambiguous because of the absence of the
- // key field.
- assertGeoNearFails({near: [0, 0]}, ErrorCodes.IndexNotFound);
-
- // Verify that the key field can correctly identify the index to use, when there is only a
- // single geo index on the relevant path.
- assertGeoNearSucceedsAndReturnsIds({near: [0, 0], key: "b.c"}, [2, 3]);
- assertGeoNearSucceedsAndReturnsIds({near: {type: "Point", coordinates: [0, 0]}, key: "b.d"},
- [4, 5]);
-
- // Verify that when the key path has both a 2d or 2dsphere index, the command still succeeds.
- assertGeoNearSucceedsAndReturnsIds({near: [0, 0], key: "a"}, [0, 1]);
- assertGeoNearSucceedsAndReturnsIds({near: [0, 0], spherical: true, key: "a"}, [0, 1]);
- assertGeoNearSucceedsAndReturnsIds({near: {type: "Point", coordinates: [0, 0]}, key: "a"},
- [0, 1]);
- assertGeoNearSucceedsAndReturnsIds(
- {near: {type: "Point", coordinates: [0, 0]}, spherical: true, key: "a"}, [0, 1]);
-
- // Verify that $geoNear fails when a GeoJSON point is used with a 'key' path that only has a 2d
- // index. GeoJSON points can only be used for spherical geometry.
- assertGeoNearFails({near: {type: "Point", coordinates: [0, 0]}, key: "b.c"},
- ErrorCodes.BadValue);
-
- // Verify that $geoNear fails when:
- // -- The only index available over the 'key' path is 2dsphere.
- // -- spherical=false.
- // -- The search point is a legacy coordinate pair.
- assertGeoNearFails({near: [0, 0], key: "b.d"}, ErrorCodes.BadValue);
- assertGeoNearFails({near: [0, 0], key: "b.d", spherical: false}, ErrorCodes.BadValue);
+/**
+ * Runs the near described by 'nearParams' as a $geoNear aggregation and verifies that the
+ * operation fails with 'code'.
+ */
+function assertGeoNearFails(nearParams, code) {
+ assert.commandFailedWithCode(runNearAgg(nearParams), code);
+}
+
+/**
+ * Runs the near described by 'nearParams' as a $geoNear aggregation and verifies that the
+ * operation returns the _id values in 'expectedIds', in order.
+ */
+function assertGeoNearSucceedsAndReturnsIds(nearParams, expectedIds) {
+ let aggResult = assert.commandWorked(runNearAgg(nearParams));
+ let res = aggResult.cursor.firstBatch;
+ let errfn = () => `expected ids ${tojson(expectedIds)}, but these documents were ` +
+ `returned: ${tojson(res)}`;
+
+ assert.eq(expectedIds.length, res.length, errfn);
+ for (let i = 0; i < expectedIds.length; i++) {
+ assert.eq(expectedIds[i], aggResult.cursor.firstBatch[i]._id, errfn);
+ }
+}
+
+// Verify that $geoNear fails when the key field is not a string.
+assertGeoNearFails({near: [0, 0], key: 1}, ErrorCodes.TypeMismatch);
+
+// Verify that $geoNear fails when the key field the empty string.
+assertGeoNearFails({near: [0, 0], key: ""}, ErrorCodes.BadValue);
+
+// Verify that $geoNear fails when there are no eligible indexes.
+assertGeoNearFails({near: [0, 0]}, ErrorCodes.IndexNotFound);
+
+// Verify that the query system raises an error when an index is specified that doesn't exist.
+assertGeoNearFails({near: [0, 0], key: "a"}, ErrorCodes.BadValue);
+
+// Create a number of 2d and 2dsphere indexes.
+assert.commandWorked(coll.createIndex({a: "2d"}));
+assert.commandWorked(coll.createIndex({a: "2dsphere"}));
+assert.commandWorked(coll.createIndex({"b.c": "2d"}));
+assert.commandWorked(coll.createIndex({"b.d": "2dsphere"}));
+
+// Verify that $geoNear fails when the index to use is ambiguous because of the absence of the
+// key field.
+assertGeoNearFails({near: [0, 0]}, ErrorCodes.IndexNotFound);
+
+// Verify that the key field can correctly identify the index to use, when there is only a
+// single geo index on the relevant path.
+assertGeoNearSucceedsAndReturnsIds({near: [0, 0], key: "b.c"}, [2, 3]);
+assertGeoNearSucceedsAndReturnsIds({near: {type: "Point", coordinates: [0, 0]}, key: "b.d"},
+ [4, 5]);
+
+// Verify that when the key path has both a 2d or 2dsphere index, the command still succeeds.
+assertGeoNearSucceedsAndReturnsIds({near: [0, 0], key: "a"}, [0, 1]);
+assertGeoNearSucceedsAndReturnsIds({near: [0, 0], spherical: true, key: "a"}, [0, 1]);
+assertGeoNearSucceedsAndReturnsIds({near: {type: "Point", coordinates: [0, 0]}, key: "a"}, [0, 1]);
+assertGeoNearSucceedsAndReturnsIds(
+ {near: {type: "Point", coordinates: [0, 0]}, spherical: true, key: "a"}, [0, 1]);
+
+// Verify that $geoNear fails when a GeoJSON point is used with a 'key' path that only has a 2d
+// index. GeoJSON points can only be used for spherical geometry.
+assertGeoNearFails({near: {type: "Point", coordinates: [0, 0]}, key: "b.c"}, ErrorCodes.BadValue);
+
+// Verify that $geoNear fails when:
+// -- The only index available over the 'key' path is 2dsphere.
+// -- spherical=false.
+// -- The search point is a legacy coordinate pair.
+assertGeoNearFails({near: [0, 0], key: "b.d"}, ErrorCodes.BadValue);
+assertGeoNearFails({near: [0, 0], key: "b.d", spherical: false}, ErrorCodes.BadValue);
}());
diff --git a/jstests/core/getlog2.js b/jstests/core/getlog2.js
index 4d9eebe9374..4d62966edfa 100644
--- a/jstests/core/getlog2.js
+++ b/jstests/core/getlog2.js
@@ -9,74 +9,74 @@
// ]
(function() {
- 'use strict';
+'use strict';
- // We turn off gossiping the mongo shell's clusterTime because it causes the slow command log
- // messages to get truncated since they'll exceed 512 characters. The truncated log messages
- // will fail to match the find and update patterns defined later on in this test.
- TestData.skipGossipingClusterTime = true;
+// We turn off gossiping the mongo shell's clusterTime because it causes the slow command log
+// messages to get truncated since they'll exceed 512 characters. The truncated log messages
+// will fail to match the find and update patterns defined later on in this test.
+TestData.skipGossipingClusterTime = true;
- const glcol = db.getLogTest2;
- glcol.drop();
+const glcol = db.getLogTest2;
+glcol.drop();
- function contains(arr, func) {
- let i = arr.length;
- while (i--) {
- if (func(arr[i])) {
- return true;
- }
+function contains(arr, func) {
+ let i = arr.length;
+ while (i--) {
+ if (func(arr[i])) {
+ return true;
}
- return false;
}
+ return false;
+}
- // test doesn't work when talking to mongos
- if (db.isMaster().msg === "isdbgrid") {
- return;
- }
+// test doesn't work when talking to mongos
+if (db.isMaster().msg === "isdbgrid") {
+ return;
+}
- // 1. Run a slow query
- glcol.save({"SENTINEL": 1});
- glcol.findOne({
- "SENTINEL": 1,
- "$where": function() {
- sleep(1000);
- return true;
- }
- });
+// 1. Run a slow query
+glcol.save({"SENTINEL": 1});
+glcol.findOne({
+ "SENTINEL": 1,
+ "$where": function() {
+ sleep(1000);
+ return true;
+ }
+});
- const query = assert.commandWorked(db.adminCommand({getLog: "global"}));
- assert(query.log, "no log field");
- assert.gt(query.log.length, 0, "no log lines");
+const query = assert.commandWorked(db.adminCommand({getLog: "global"}));
+assert(query.log, "no log field");
+assert.gt(query.log.length, 0, "no log lines");
- // Ensure that slow query is logged in detail.
- assert(contains(query.log, function(v) {
- print(v);
- const opString = db.getMongo().useReadCommands() ? " find " : " query ";
- const filterString = db.getMongo().useReadCommands() ? "filter:" : "command:";
- return v.indexOf(opString) != -1 && v.indexOf(filterString) != -1 &&
- v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 &&
- v.indexOf("SENTINEL") != -1;
- }));
+// Ensure that slow query is logged in detail.
+assert(contains(query.log, function(v) {
+ print(v);
+ const opString = db.getMongo().useReadCommands() ? " find " : " query ";
+ const filterString = db.getMongo().useReadCommands() ? "filter:" : "command:";
+ return v.indexOf(opString) != -1 && v.indexOf(filterString) != -1 &&
+ v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 &&
+ v.indexOf("SENTINEL") != -1;
+}));
- // 2. Run a slow update
- glcol.update({
- "SENTINEL": 1,
- "$where": function() {
- sleep(1000);
- return true;
- }
- },
- {"x": "x"});
+// 2. Run a slow update
+glcol.update({
+ "SENTINEL": 1,
+ "$where": function() {
+ sleep(1000);
+ return true;
+ }
+},
+ {"x": "x"});
- const update = assert.commandWorked(db.adminCommand({getLog: "global"}));
- assert(update.log, "no log field");
- assert.gt(update.log.length, 0, "no log lines");
+const update = assert.commandWorked(db.adminCommand({getLog: "global"}));
+assert(update.log, "no log field");
+assert.gt(update.log.length, 0, "no log lines");
- // Ensure that slow update is logged in deail.
- assert(contains(update.log, function(v) {
- print(v);
- return v.indexOf(" update ") != -1 && v.indexOf("command") != -1 &&
- v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 &&
- v.indexOf("SENTINEL") != -1;
- }));
+// Ensure that slow update is logged in deail.
+assert(contains(update.log, function(v) {
+ print(v);
+ return v.indexOf(" update ") != -1 && v.indexOf("command") != -1 &&
+ v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 &&
+ v.indexOf("SENTINEL") != -1;
+}));
})();
diff --git a/jstests/core/getmore_cmd_maxtimems.js b/jstests/core/getmore_cmd_maxtimems.js
index 7b13f858bc1..1b8e20ba962 100644
--- a/jstests/core/getmore_cmd_maxtimems.js
+++ b/jstests/core/getmore_cmd_maxtimems.js
@@ -4,46 +4,46 @@
// Test attaching maxTimeMS to a getMore command.
(function() {
- 'use strict';
-
- var cmdRes;
- var collName = 'getmore_cmd_maxtimems';
- var coll = db[collName];
- coll.drop();
-
- for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({a: i}));
- }
-
- // Can't attach maxTimeMS to a getMore command for a non-tailable cursor over a non-capped
- // collection.
- cmdRes = db.runCommand({find: collName, batchSize: 2});
- assert.commandWorked(cmdRes);
- cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000});
- assert.commandFailed(cmdRes);
-
- coll.drop();
- assert.commandWorked(db.createCollection(collName, {capped: true, size: 1024}));
- for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({a: i}));
- }
-
- // Can't attach maxTimeMS to a getMore command for a non-tailable cursor over a capped
- // collection.
- cmdRes = db.runCommand({find: collName, batchSize: 2});
- assert.commandWorked(cmdRes);
- cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000});
- assert.commandFailed(cmdRes);
-
- // Can't attach maxTimeMS to a getMore command for a non-awaitData tailable cursor.
- cmdRes = db.runCommand({find: collName, batchSize: 2, tailable: true});
- assert.commandWorked(cmdRes);
- cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000});
- assert.commandFailed(cmdRes);
-
- // Can attach maxTimeMS to a getMore command for an awaitData cursor.
- cmdRes = db.runCommand({find: collName, batchSize: 2, tailable: true, awaitData: true});
- assert.commandWorked(cmdRes);
- cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000});
- assert.commandWorked(cmdRes);
+'use strict';
+
+var cmdRes;
+var collName = 'getmore_cmd_maxtimems';
+var coll = db[collName];
+coll.drop();
+
+for (var i = 0; i < 10; i++) {
+ assert.writeOK(coll.insert({a: i}));
+}
+
+// Can't attach maxTimeMS to a getMore command for a non-tailable cursor over a non-capped
+// collection.
+cmdRes = db.runCommand({find: collName, batchSize: 2});
+assert.commandWorked(cmdRes);
+cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000});
+assert.commandFailed(cmdRes);
+
+coll.drop();
+assert.commandWorked(db.createCollection(collName, {capped: true, size: 1024}));
+for (var i = 0; i < 10; i++) {
+ assert.writeOK(coll.insert({a: i}));
+}
+
+// Can't attach maxTimeMS to a getMore command for a non-tailable cursor over a capped
+// collection.
+cmdRes = db.runCommand({find: collName, batchSize: 2});
+assert.commandWorked(cmdRes);
+cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000});
+assert.commandFailed(cmdRes);
+
+// Can't attach maxTimeMS to a getMore command for a non-awaitData tailable cursor.
+cmdRes = db.runCommand({find: collName, batchSize: 2, tailable: true});
+assert.commandWorked(cmdRes);
+cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000});
+assert.commandFailed(cmdRes);
+
+// Can attach maxTimeMS to a getMore command for an awaitData cursor.
+cmdRes = db.runCommand({find: collName, batchSize: 2, tailable: true, awaitData: true});
+assert.commandWorked(cmdRes);
+cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000});
+assert.commandWorked(cmdRes);
})();
diff --git a/jstests/core/getmore_invalidated_cursors.js b/jstests/core/getmore_invalidated_cursors.js
index c244b071716..43f27ed5e49 100644
--- a/jstests/core/getmore_invalidated_cursors.js
+++ b/jstests/core/getmore_invalidated_cursors.js
@@ -4,117 +4,116 @@
// Tests that running a getMore on a cursor that has been invalidated by something like a collection
// drop will return an appropriate error message.
(function() {
- 'use strict';
+'use strict';
- load('jstests/libs/fixture_helpers.js'); // For FixtureHelpers.
+load('jstests/libs/fixture_helpers.js'); // For FixtureHelpers.
- const testDB = db.getSiblingDB("getmore_invalidated_cursors");
- const coll = testDB.test;
+const testDB = db.getSiblingDB("getmore_invalidated_cursors");
+const coll = testDB.test;
- const nDocs = 100;
+const nDocs = 100;
- function setupCollection() {
- coll.drop();
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nDocs; ++i) {
- bulk.insert({_id: i, x: i});
- }
- assert.writeOK(bulk.execute());
- assert.commandWorked(coll.createIndex({x: 1}));
+function setupCollection() {
+ coll.drop();
+ const bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < nDocs; ++i) {
+ bulk.insert({_id: i, x: i});
}
+ assert.writeOK(bulk.execute());
+ assert.commandWorked(coll.createIndex({x: 1}));
+}
- // Test that dropping the database between a find and a getMore will return an appropriate error
- // code and message.
- setupCollection();
-
- // Make sure the batch size is small enough to ensure a getMore will need to be sent to at least
- // one shard.
- const batchSize = (nDocs / FixtureHelpers.numberOfShardsForCollection(coll)) - 1;
-
- const isShardedCollection = coll.stats().sharded;
- const shellReadMode = testDB.getMongo().readMode();
+// Test that dropping the database between a find and a getMore will return an appropriate error
+// code and message.
+setupCollection();
- let cursor = coll.find().batchSize(batchSize);
- cursor.next(); // Send the query to the server.
+// Make sure the batch size is small enough to ensure a getMore will need to be sent to at least
+// one shard.
+const batchSize = (nDocs / FixtureHelpers.numberOfShardsForCollection(coll)) - 1;
- assert.commandWorked(testDB.dropDatabase());
+const isShardedCollection = coll.stats().sharded;
+const shellReadMode = testDB.getMongo().readMode();
- let error = assert.throws(() => cursor.itcount());
+let cursor = coll.find().batchSize(batchSize);
+cursor.next(); // Send the query to the server.
- if (testDB.runCommand({isdbgrid: 1}).isdbgrid && shellReadMode == 'legacy') {
- // The cursor will be invalidated on mongos, and we won't be able to find it.
- assert.neq(-1, error.message.indexOf('didn\'t exist on server'), error.message);
- } else {
- assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error));
- assert.neq(-1, error.message.indexOf('collection dropped'), error.message);
- }
+assert.commandWorked(testDB.dropDatabase());
- // Test that dropping the collection between a find and a getMore will return an appropriate
- // error code and message.
- setupCollection();
- cursor = coll.find().batchSize(batchSize);
- cursor.next(); // Send the query to the server.
+let error = assert.throws(() => cursor.itcount());
- coll.drop();
- error = assert.throws(() => cursor.itcount());
+if (testDB.runCommand({isdbgrid: 1}).isdbgrid && shellReadMode == 'legacy') {
+ // The cursor will be invalidated on mongos, and we won't be able to find it.
+ assert.neq(-1, error.message.indexOf('didn\'t exist on server'), error.message);
+} else {
assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error));
- // In replica sets, collection drops are done in two phases, first renaming the collection to a
- // "drop pending" namespace, and then later reaping the collection. Therefore, we expect to
- // either see an error message related to a collection drop, or one related to a collection
- // rename.
- const droppedMsg = 'collection dropped';
- const renamedMsg = 'collection renamed';
- assert(-1 !== error.message.indexOf(droppedMsg) || -1 !== error.message.indexOf(renamedMsg),
- error.message);
-
- // Test that dropping an index between a find and a getMore has no effect on the query if the
- // query is not using the index.
+ assert.neq(-1, error.message.indexOf('collection dropped'), error.message);
+}
+
+// Test that dropping the collection between a find and a getMore will return an appropriate
+// error code and message.
+setupCollection();
+cursor = coll.find().batchSize(batchSize);
+cursor.next(); // Send the query to the server.
+
+coll.drop();
+error = assert.throws(() => cursor.itcount());
+assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error));
+// In replica sets, collection drops are done in two phases, first renaming the collection to a
+// "drop pending" namespace, and then later reaping the collection. Therefore, we expect to
+// either see an error message related to a collection drop, or one related to a collection
+// rename.
+const droppedMsg = 'collection dropped';
+const renamedMsg = 'collection renamed';
+assert(-1 !== error.message.indexOf(droppedMsg) || -1 !== error.message.indexOf(renamedMsg),
+ error.message);
+
+// Test that dropping an index between a find and a getMore has no effect on the query if the
+// query is not using the index.
+setupCollection();
+cursor = coll.find().batchSize(batchSize);
+cursor.next(); // Send the query to the server.
+assert.commandWorked(testDB.runCommand({dropIndexes: coll.getName(), index: {x: 1}}));
+assert.eq(cursor.itcount(), nDocs - 1);
+
+// Test that dropping the index being scanned by a cursor between a find and a getMore kills the
+// query with the appropriate code and message.
+setupCollection();
+cursor = coll.find().hint({x: 1}).batchSize(batchSize);
+cursor.next(); // Send the query to the server.
+assert.commandWorked(testDB.runCommand({dropIndexes: coll.getName(), index: {x: 1}}));
+error = assert.throws(() => cursor.itcount());
+assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error));
+assert.neq(-1, error.message.indexOf('index \'x_1\' dropped'), error.message);
+
+// Test that killing a cursor between a find and a getMore will return an appropriate error
+// code and message.
+
+setupCollection();
+// Use the find command so that we can extract the cursor id to pass to the killCursors command.
+let cursorId =
+ assert
+ .commandWorked(testDB.runCommand({find: coll.getName(), filter: {}, batchSize: batchSize}))
+ .cursor.id;
+assert.commandWorked(testDB.runCommand({killCursors: coll.getName(), cursors: [cursorId]}));
+assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: coll.getName()}),
+ ErrorCodes.CursorNotFound);
+
+// Test that all cursors on collections to be renamed get invalidated. Note that we can't do
+// renames on sharded collections.
+if (!isShardedCollection) {
setupCollection();
+ const collRenamed = testDB.test_rename;
+ collRenamed.drop();
cursor = coll.find().batchSize(batchSize);
- cursor.next(); // Send the query to the server.
- assert.commandWorked(testDB.runCommand({dropIndexes: coll.getName(), index: {x: 1}}));
- assert.eq(cursor.itcount(), nDocs - 1);
+ assert(cursor.hasNext(), "Expected more data from find call on " + coll.getName());
+ assert.commandWorked(testDB.adminCommand({
+ renameCollection: testDB.getName() + "." + coll.getName(),
+ to: testDB.getName() + "." + collRenamed.getName()
+ }));
- // Test that dropping the index being scanned by a cursor between a find and a getMore kills the
- // query with the appropriate code and message.
- setupCollection();
- cursor = coll.find().hint({x: 1}).batchSize(batchSize);
- cursor.next(); // Send the query to the server.
- assert.commandWorked(testDB.runCommand({dropIndexes: coll.getName(), index: {x: 1}}));
+ // Ensure getMore fails with an appropriate error code and message.
error = assert.throws(() => cursor.itcount());
assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error));
- assert.neq(-1, error.message.indexOf('index \'x_1\' dropped'), error.message);
-
- // Test that killing a cursor between a find and a getMore will return an appropriate error
- // code and message.
-
- setupCollection();
- // Use the find command so that we can extract the cursor id to pass to the killCursors command.
- let cursorId = assert
- .commandWorked(testDB.runCommand(
- {find: coll.getName(), filter: {}, batchSize: batchSize}))
- .cursor.id;
- assert.commandWorked(testDB.runCommand({killCursors: coll.getName(), cursors: [cursorId]}));
- assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: coll.getName()}),
- ErrorCodes.CursorNotFound);
-
- // Test that all cursors on collections to be renamed get invalidated. Note that we can't do
- // renames on sharded collections.
- if (!isShardedCollection) {
- setupCollection();
- const collRenamed = testDB.test_rename;
- collRenamed.drop();
- cursor = coll.find().batchSize(batchSize);
- assert(cursor.hasNext(), "Expected more data from find call on " + coll.getName());
- assert.commandWorked(testDB.adminCommand({
- renameCollection: testDB.getName() + "." + coll.getName(),
- to: testDB.getName() + "." + collRenamed.getName()
- }));
-
- // Ensure getMore fails with an appropriate error code and message.
- error = assert.throws(() => cursor.itcount());
- assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error));
- assert.neq(-1, error.message.indexOf('collection renamed'), error.message);
- }
-
+ assert.neq(-1, error.message.indexOf('collection renamed'), error.message);
+}
}());
diff --git a/jstests/core/getmore_invalidated_documents.js b/jstests/core/getmore_invalidated_documents.js
index 7d00748bbce..378fde3b02a 100644
--- a/jstests/core/getmore_invalidated_documents.js
+++ b/jstests/core/getmore_invalidated_documents.js
@@ -6,231 +6,230 @@
// Tests for invalidation during a getmore. This behavior is storage-engine dependent.
// See SERVER-16675.
(function() {
- "use strict";
-
- var t = db.getmore_invalidated_documents;
-
- var count;
- var cursor;
- var nextDoc;
- var x;
- var y;
-
- // Case #1: Text search with deletion invalidation.
- t.drop();
- assert.commandWorked(t.ensureIndex({a: "text"}));
- assert.writeOK(t.insert({_id: 1, a: "bar"}));
- assert.writeOK(t.insert({_id: 2, a: "bar"}));
- assert.writeOK(t.insert({_id: 3, a: "bar"}));
-
- cursor = t.find({$text: {$search: "bar"}}).batchSize(2);
- cursor.next();
- cursor.next();
-
- assert.writeOK(t.remove({_id: 3}));
-
- // We should get back the document or not (depending on the storage engine / concurrency model).
- // Either is fine as long as we don't crash.
- count = cursor.itcount();
- assert(count === 0 || count === 1);
-
- // Case #2: Text search with mutation invalidation.
- t.drop();
- assert.commandWorked(t.ensureIndex({a: "text"}));
- assert.writeOK(t.insert({_id: 1, a: "bar"}));
- assert.writeOK(t.insert({_id: 2, a: "bar"}));
- assert.writeOK(t.insert({_id: 3, a: "bar"}));
-
- cursor = t.find({$text: {$search: "bar"}}).batchSize(2);
- cursor.next();
- cursor.next();
-
- // Update the next matching doc so that it no longer matches.
- assert.writeOK(t.update({_id: 3}, {$set: {a: "nomatch"}}));
-
- // Either the cursor should skip the result that no longer matches, or we should get back the
- // old
- // version of the doc.
- assert(!cursor.hasNext() || cursor.next()["a"] === "bar");
-
- // Case #3: Merge sort with deletion invalidation.
- t.drop();
- assert.commandWorked(t.ensureIndex({a: 1, b: 1}));
- assert.writeOK(t.insert({a: 1, b: 1}));
- assert.writeOK(t.insert({a: 1, b: 2}));
- assert.writeOK(t.insert({a: 2, b: 3}));
- assert.writeOK(t.insert({a: 2, b: 4}));
-
- cursor = t.find({a: {$in: [1, 2]}}).sort({b: 1}).batchSize(2);
- cursor.next();
- cursor.next();
-
- assert.writeOK(t.remove({a: 2, b: 3}));
-
- count = cursor.itcount();
- assert(count === 1 || count === 2);
-
- // Case #4: Merge sort with mutation invalidation.
- t.drop();
- assert.commandWorked(t.ensureIndex({a: 1, b: 1}));
- assert.writeOK(t.insert({a: 1, b: 1}));
- assert.writeOK(t.insert({a: 1, b: 2}));
- assert.writeOK(t.insert({a: 2, b: 3}));
- assert.writeOK(t.insert({a: 2, b: 4}));
-
- cursor = t.find({a: {$in: [1, 2]}}).sort({b: 1}).batchSize(2);
- cursor.next();
- cursor.next();
-
- assert.writeOK(t.update({a: 2, b: 3}, {$set: {a: 6}}));
-
- // Either the cursor should skip the result that no longer matches, or we should get back the
- // old
- // version of the doc.
- assert(cursor.hasNext());
+"use strict";
+
+var t = db.getmore_invalidated_documents;
+
+var count;
+var cursor;
+var nextDoc;
+var x;
+var y;
+
+// Case #1: Text search with deletion invalidation.
+t.drop();
+assert.commandWorked(t.ensureIndex({a: "text"}));
+assert.writeOK(t.insert({_id: 1, a: "bar"}));
+assert.writeOK(t.insert({_id: 2, a: "bar"}));
+assert.writeOK(t.insert({_id: 3, a: "bar"}));
+
+cursor = t.find({$text: {$search: "bar"}}).batchSize(2);
+cursor.next();
+cursor.next();
+
+assert.writeOK(t.remove({_id: 3}));
+
+// We should get back the document or not (depending on the storage engine / concurrency model).
+// Either is fine as long as we don't crash.
+count = cursor.itcount();
+assert(count === 0 || count === 1);
+
+// Case #2: Text search with mutation invalidation.
+t.drop();
+assert.commandWorked(t.ensureIndex({a: "text"}));
+assert.writeOK(t.insert({_id: 1, a: "bar"}));
+assert.writeOK(t.insert({_id: 2, a: "bar"}));
+assert.writeOK(t.insert({_id: 3, a: "bar"}));
+
+cursor = t.find({$text: {$search: "bar"}}).batchSize(2);
+cursor.next();
+cursor.next();
+
+// Update the next matching doc so that it no longer matches.
+assert.writeOK(t.update({_id: 3}, {$set: {a: "nomatch"}}));
+
+// Either the cursor should skip the result that no longer matches, or we should get back the
+// old
+// version of the doc.
+assert(!cursor.hasNext() || cursor.next()["a"] === "bar");
+
+// Case #3: Merge sort with deletion invalidation.
+t.drop();
+assert.commandWorked(t.ensureIndex({a: 1, b: 1}));
+assert.writeOK(t.insert({a: 1, b: 1}));
+assert.writeOK(t.insert({a: 1, b: 2}));
+assert.writeOK(t.insert({a: 2, b: 3}));
+assert.writeOK(t.insert({a: 2, b: 4}));
+
+cursor = t.find({a: {$in: [1, 2]}}).sort({b: 1}).batchSize(2);
+cursor.next();
+cursor.next();
+
+assert.writeOK(t.remove({a: 2, b: 3}));
+
+count = cursor.itcount();
+assert(count === 1 || count === 2);
+
+// Case #4: Merge sort with mutation invalidation.
+t.drop();
+assert.commandWorked(t.ensureIndex({a: 1, b: 1}));
+assert.writeOK(t.insert({a: 1, b: 1}));
+assert.writeOK(t.insert({a: 1, b: 2}));
+assert.writeOK(t.insert({a: 2, b: 3}));
+assert.writeOK(t.insert({a: 2, b: 4}));
+
+cursor = t.find({a: {$in: [1, 2]}}).sort({b: 1}).batchSize(2);
+cursor.next();
+cursor.next();
+
+assert.writeOK(t.update({a: 2, b: 3}, {$set: {a: 6}}));
+
+// Either the cursor should skip the result that no longer matches, or we should get back the
+// old
+// version of the doc.
+assert(cursor.hasNext());
+assert(cursor.next()["a"] === 2);
+if (cursor.hasNext()) {
assert(cursor.next()["a"] === 2);
- if (cursor.hasNext()) {
- assert(cursor.next()["a"] === 2);
- }
- assert(!cursor.hasNext());
-
- // Case #5: 2d near with deletion invalidation.
- t.drop();
- t.ensureIndex({geo: "2d"});
- for (x = -1; x < 1; x++) {
- for (y = -1; y < 1; y++) {
- assert.writeOK(t.insert({geo: [x, y]}));
- }
+}
+assert(!cursor.hasNext());
+
+// Case #5: 2d near with deletion invalidation.
+t.drop();
+t.ensureIndex({geo: "2d"});
+for (x = -1; x < 1; x++) {
+ for (y = -1; y < 1; y++) {
+ assert.writeOK(t.insert({geo: [x, y]}));
}
+}
- cursor = t.find({geo: {$near: [0, 0], $maxDistance: 5}}).batchSize(2);
- cursor.next();
- cursor.next();
+cursor = t.find({geo: {$near: [0, 0], $maxDistance: 5}}).batchSize(2);
+cursor.next();
+cursor.next();
- // Drop all documents in the collection.
- assert.writeOK(t.remove({}));
+// Drop all documents in the collection.
+assert.writeOK(t.remove({}));
- // Both MMAP v1 and doc-locking storage engines should force fetch the doc (it will be buffered
- // because it is the same distance from the center point as a doc already returned).
- assert(cursor.hasNext());
+// Both MMAP v1 and doc-locking storage engines should force fetch the doc (it will be buffered
+// because it is the same distance from the center point as a doc already returned).
+assert(cursor.hasNext());
- // Case #6: 2dsphere near with deletion invalidation.
- t.drop();
- t.ensureIndex({geo: "2dsphere"});
- for (x = -1; x < 1; x++) {
- for (y = -1; y < 1; y++) {
- assert.writeOK(t.insert({geo: [x, y]}));
- }
+// Case #6: 2dsphere near with deletion invalidation.
+t.drop();
+t.ensureIndex({geo: "2dsphere"});
+for (x = -1; x < 1; x++) {
+ for (y = -1; y < 1; y++) {
+ assert.writeOK(t.insert({geo: [x, y]}));
}
-
- cursor = t.find({geo: {$nearSphere: [0, 0], $maxDistance: 5}}).batchSize(2);
- cursor.next();
- cursor.next();
-
- // Drop all documents in the collection.
- assert.writeOK(t.remove({}));
-
- // Both MMAP v1 and doc-locking storage engines should force fetch the doc (it will be buffered
- // because it is the same distance from the center point as a doc already returned).
- assert(cursor.hasNext());
-
- // Case #7: 2dsphere near with deletion invalidation (again).
- t.drop();
- t.ensureIndex({geo: "2dsphere"});
- for (x = 0; x < 6; x++) {
- assert.writeOK(t.insert({geo: [x, x]}));
+}
+
+cursor = t.find({geo: {$nearSphere: [0, 0], $maxDistance: 5}}).batchSize(2);
+cursor.next();
+cursor.next();
+
+// Drop all documents in the collection.
+assert.writeOK(t.remove({}));
+
+// Both MMAP v1 and doc-locking storage engines should force fetch the doc (it will be buffered
+// because it is the same distance from the center point as a doc already returned).
+assert(cursor.hasNext());
+
+// Case #7: 2dsphere near with deletion invalidation (again).
+t.drop();
+t.ensureIndex({geo: "2dsphere"});
+for (x = 0; x < 6; x++) {
+ assert.writeOK(t.insert({geo: [x, x]}));
+}
+
+cursor = t.find({geo: {$nearSphere: [0, 0], $maxDistance: 10}}).batchSize(2);
+cursor.next();
+cursor.next();
+
+// Drop all documents in the collection.
+assert.writeOK(t.remove({}));
+
+// We might force-fetch or we might skip over the deleted documents, depending on the internals
+// of the geo near search. Just make sure that we can exhaust the cursor without crashing.
+assert.gte(cursor.itcount(), 0);
+
+// Case #8: 2d near with mutation invalidation.
+t.drop();
+t.ensureIndex({geo: "2d"});
+for (x = -1; x < 1; x++) {
+ for (y = -1; y < 1; y++) {
+ assert.writeOK(t.insert({geo: [x, y]}));
}
-
- cursor = t.find({geo: {$nearSphere: [0, 0], $maxDistance: 10}}).batchSize(2);
- cursor.next();
- cursor.next();
-
- // Drop all documents in the collection.
- assert.writeOK(t.remove({}));
-
- // We might force-fetch or we might skip over the deleted documents, depending on the internals
- // of the geo near search. Just make sure that we can exhaust the cursor without crashing.
- assert.gte(cursor.itcount(), 0);
-
- // Case #8: 2d near with mutation invalidation.
- t.drop();
- t.ensureIndex({geo: "2d"});
- for (x = -1; x < 1; x++) {
- for (y = -1; y < 1; y++) {
- assert.writeOK(t.insert({geo: [x, y]}));
- }
- }
-
- cursor = t.find({geo: {$near: [0, 0], $maxDistance: 5}}).batchSize(2);
- cursor.next();
- cursor.next();
-
- // Update all documents in the collection to have position [15, 15].
- assert.writeOK(t.update({}, {$set: {geo: [15, 15]}}, false, true));
-
- // The old version of the document should be returned (the update should not be reflected in the
- // results of the near search).
- nextDoc = cursor.next();
- printjson(nextDoc);
- assert.neq([15, 15], nextDoc.geo);
- assert(nextDoc.geo[0] === 0 || nextDoc.geo[1] === 0);
-
- // Case #9: 2dsphere near with mutation invalidation.
- t.drop();
- t.ensureIndex({geo: "2dsphere"});
- for (x = -1; x < 1; x++) {
- for (y = -1; y < 1; y++) {
- assert.writeOK(t.insert({geo: [x, y]}));
- }
+}
+
+cursor = t.find({geo: {$near: [0, 0], $maxDistance: 5}}).batchSize(2);
+cursor.next();
+cursor.next();
+
+// Update all documents in the collection to have position [15, 15].
+assert.writeOK(t.update({}, {$set: {geo: [15, 15]}}, false, true));
+
+// The old version of the document should be returned (the update should not be reflected in the
+// results of the near search).
+nextDoc = cursor.next();
+printjson(nextDoc);
+assert.neq([15, 15], nextDoc.geo);
+assert(nextDoc.geo[0] === 0 || nextDoc.geo[1] === 0);
+
+// Case #9: 2dsphere near with mutation invalidation.
+t.drop();
+t.ensureIndex({geo: "2dsphere"});
+for (x = -1; x < 1; x++) {
+ for (y = -1; y < 1; y++) {
+ assert.writeOK(t.insert({geo: [x, y]}));
}
-
- cursor = t.find({geo: {$nearSphere: [0, 0], $maxDistance: 5}}).batchSize(2);
- cursor.next();
- cursor.next();
-
- // Update all documents in the collection to have position [15, 15].
- assert.writeOK(t.update({}, {$set: {geo: [15, 15]}}, false, true));
-
- // The old version of the document should be returned (the update should not be reflected in the
- // results of the near search).
- nextDoc = cursor.next();
- printjson(nextDoc);
- assert.neq([15, 15], nextDoc.geo);
- assert(nextDoc.geo[0] === 0 || nextDoc.geo[1] === 0);
-
- // Case #10: sort with deletion invalidation.
- t.drop();
- t.ensureIndex({a: 1});
- t.insert({a: 1, b: 2});
- t.insert({a: 3, b: 3});
- t.insert({a: 2, b: 1});
-
- cursor = t.find({a: {$in: [1, 2, 3]}}).sort({b: 1}).batchSize(2);
- cursor.next();
- cursor.next();
-
- assert.writeOK(t.remove({a: 2}));
-
- if (cursor.hasNext()) {
- assert.eq(cursor.next().b, 3);
- }
-
- // Case #11: sort with mutation invalidation.
- t.drop();
- t.ensureIndex({a: 1});
- t.insert({a: 1, b: 2});
- t.insert({a: 3, b: 3});
- t.insert({a: 2, b: 1});
-
- cursor = t.find({a: {$in: [1, 2, 3]}}).sort({b: 1}).batchSize(2);
- cursor.next();
- cursor.next();
-
- assert.writeOK(t.update({a: 2}, {$set: {a: 4}}));
-
- count = cursor.itcount();
- if (cursor.hasNext()) {
- assert.eq(cursor.next().b, 3);
- }
-
+}
+
+cursor = t.find({geo: {$nearSphere: [0, 0], $maxDistance: 5}}).batchSize(2);
+cursor.next();
+cursor.next();
+
+// Update all documents in the collection to have position [15, 15].
+assert.writeOK(t.update({}, {$set: {geo: [15, 15]}}, false, true));
+
+// The old version of the document should be returned (the update should not be reflected in the
+// results of the near search).
+nextDoc = cursor.next();
+printjson(nextDoc);
+assert.neq([15, 15], nextDoc.geo);
+assert(nextDoc.geo[0] === 0 || nextDoc.geo[1] === 0);
+
+// Case #10: sort with deletion invalidation.
+t.drop();
+t.ensureIndex({a: 1});
+t.insert({a: 1, b: 2});
+t.insert({a: 3, b: 3});
+t.insert({a: 2, b: 1});
+
+cursor = t.find({a: {$in: [1, 2, 3]}}).sort({b: 1}).batchSize(2);
+cursor.next();
+cursor.next();
+
+assert.writeOK(t.remove({a: 2}));
+
+if (cursor.hasNext()) {
+ assert.eq(cursor.next().b, 3);
+}
+
+// Case #11: sort with mutation invalidation.
+t.drop();
+t.ensureIndex({a: 1});
+t.insert({a: 1, b: 2});
+t.insert({a: 3, b: 3});
+t.insert({a: 2, b: 1});
+
+cursor = t.find({a: {$in: [1, 2, 3]}}).sort({b: 1}).batchSize(2);
+cursor.next();
+cursor.next();
+
+assert.writeOK(t.update({a: 2}, {$set: {a: 4}}));
+
+count = cursor.itcount();
+if (cursor.hasNext()) {
+ assert.eq(cursor.next().b, 3);
+}
})();
diff --git a/jstests/core/hash.js b/jstests/core/hash.js
index 2e8ad576159..4f7cebcdb5a 100644
--- a/jstests/core/hash.js
+++ b/jstests/core/hash.js
@@ -6,58 +6,58 @@
* architectures.
*/
(function() {
- 'use strict';
+'use strict';
- const hashOfMaxNumberLong = NumberLong("1136124329541638701");
- const hashOfLowestNumberLong = NumberLong("5744114172487291558");
- const hashOfZeroNumberLong = NumberLong("5574369198691456941");
+const hashOfMaxNumberLong = NumberLong("1136124329541638701");
+const hashOfLowestNumberLong = NumberLong("5744114172487291558");
+const hashOfZeroNumberLong = NumberLong("5574369198691456941");
- const hashTests = [
- // Hash value of a string.
- {key: "hashthis", expected: NumberLong("6271151123721111923")},
+const hashTests = [
+ // Hash value of a string.
+ {key: "hashthis", expected: NumberLong("6271151123721111923")},
- // The smallest positive double that overflows a 64-bit signed int. This is a special case,
- // as described in SERVER-37183.
- {key: Math.pow(2, 63), expected: hashOfLowestNumberLong},
+ // The smallest positive double that overflows a 64-bit signed int. This is a special case,
+ // as described in SERVER-37183.
+ {key: Math.pow(2, 63), expected: hashOfLowestNumberLong},
- // The next biggest number. Large doubles get clamped to the max 64-bit signed value before
- // being hashed.
- {key: Math.pow(2, 63) + Math.pow(2, 11), expected: hashOfMaxNumberLong},
+ // The next biggest number. Large doubles get clamped to the max 64-bit signed value before
+ // being hashed.
+ {key: Math.pow(2, 63) + Math.pow(2, 11), expected: hashOfMaxNumberLong},
- // Really large numbers and positive infinity also get clamped to the same value.
- {key: Math.pow(2, 500), expected: hashOfMaxNumberLong},
- {key: Infinity, expected: hashOfMaxNumberLong},
+ // Really large numbers and positive infinity also get clamped to the same value.
+ {key: Math.pow(2, 500), expected: hashOfMaxNumberLong},
+ {key: Infinity, expected: hashOfMaxNumberLong},
- // Just under the largest double that overflows a 64-bit signed int. This value gets
- // converted to a signed 64-bit int and then hashed.
- {key: Math.pow(2, 63) - Math.pow(2, 10), expected: NumberLong("-3954856262017896439")},
+ // Just under the largest double that overflows a 64-bit signed int. This value gets
+ // converted to a signed 64-bit int and then hashed.
+ {key: Math.pow(2, 63) - Math.pow(2, 10), expected: NumberLong("-3954856262017896439")},
- // Lowest negative double that does not overflow a 64-bit signed int.
- {key: -Math.pow(2, 63), expected: hashOfLowestNumberLong},
+ // Lowest negative double that does not overflow a 64-bit signed int.
+ {key: -Math.pow(2, 63), expected: hashOfLowestNumberLong},
- // Just above the lowest negative double that does not overflow a 64-bit signed int.
- {key: -(Math.pow(2, 63) - Math.pow(2, 10)), expected: NumberLong("-1762411739488908479")},
+ // Just above the lowest negative double that does not overflow a 64-bit signed int.
+ {key: -(Math.pow(2, 63) - Math.pow(2, 10)), expected: NumberLong("-1762411739488908479")},
- // A negative overflowing double gets clamped to -2^63 before being hashed.
- {key: -(Math.pow(2, 63) + Math.pow(2, 11)), expected: hashOfLowestNumberLong},
- {key: -Infinity, expected: hashOfLowestNumberLong},
+ // A negative overflowing double gets clamped to -2^63 before being hashed.
+ {key: -(Math.pow(2, 63) + Math.pow(2, 11)), expected: hashOfLowestNumberLong},
+ {key: -Infinity, expected: hashOfLowestNumberLong},
- // NaN values get converted to 0 and then hashed.
- {key: 0, expected: hashOfZeroNumberLong},
- {key: NumberLong("0"), expected: hashOfZeroNumberLong},
- {key: NaN, expected: hashOfZeroNumberLong},
- {key: -NaN, expected: hashOfZeroNumberLong},
+ // NaN values get converted to 0 and then hashed.
+ {key: 0, expected: hashOfZeroNumberLong},
+ {key: NumberLong("0"), expected: hashOfZeroNumberLong},
+ {key: NaN, expected: hashOfZeroNumberLong},
+ {key: -NaN, expected: hashOfZeroNumberLong},
- // Hash an object.
- {key: {a: 1, b: 2}, expected: NumberLong("-7076810813311352857")},
+ // Hash an object.
+ {key: {a: 1, b: 2}, expected: NumberLong("-7076810813311352857")},
- // Hash an object with some corner-case values.
- {key: {a: Math.pow(2, 63), b: NaN}, expected: NumberLong("1223292051903137684")},
- ];
+ // Hash an object with some corner-case values.
+ {key: {a: Math.pow(2, 63), b: NaN}, expected: NumberLong("1223292051903137684")},
+];
- hashTests.forEach(test => {
- const hashResult = db.runCommand({_hashBSONElement: test.key, seed: 1});
- assert.commandWorked(hashResult);
- assert.eq(test.expected, hashResult.out, tojson(test.key));
- });
+hashTests.forEach(test => {
+ const hashResult = db.runCommand({_hashBSONElement: test.key, seed: 1});
+ assert.commandWorked(hashResult);
+ assert.eq(test.expected, hashResult.out, tojson(test.key));
+});
})();
diff --git a/jstests/core/idhack.js b/jstests/core/idhack.js
index e7d8a51bcc0..f4cbe2fe5f6 100644
--- a/jstests/core/idhack.js
+++ b/jstests/core/idhack.js
@@ -1,87 +1,89 @@
// @tags: [requires_non_retryable_writes, assumes_balancer_off]
(function() {
- "use strict";
-
- const t = db.idhack;
- t.drop();
-
- // Include helpers for analyzing explain output.
- load("jstests/libs/analyze_plan.js");
-
- assert.writeOK(t.insert({_id: {x: 1}, z: 1}));
- assert.writeOK(t.insert({_id: {x: 2}, z: 2}));
- assert.writeOK(t.insert({_id: {x: 3}, z: 3}));
- assert.writeOK(t.insert({_id: 1, z: 4}));
- assert.writeOK(t.insert({_id: 2, z: 5}));
- assert.writeOK(t.insert({_id: 3, z: 6}));
-
- assert.eq(2, t.findOne({_id: {x: 2}}).z);
- assert.eq(2, t.find({_id: {$gte: 2}}).count());
- assert.eq(2, t.find({_id: {$gte: 2}}).itcount());
-
- t.update({_id: {x: 2}}, {$set: {z: 7}});
- assert.eq(7, t.findOne({_id: {x: 2}}).z);
-
- t.update({_id: {$gte: 2}}, {$set: {z: 8}}, false, true);
- assert.eq(4, t.findOne({_id: 1}).z);
- assert.eq(8, t.findOne({_id: 2}).z);
- assert.eq(8, t.findOne({_id: 3}).z);
-
- // explain output should show that the ID hack was applied.
- const query = {_id: {x: 2}};
- let explain = t.find(query).explain(true);
- assert.eq(1, explain.executionStats.nReturned);
- assert.eq(1, explain.executionStats.totalKeysExamined);
- assert(isIdhack(db, explain.queryPlanner.winningPlan));
-
- // ID hack cannot be used with hint().
- t.ensureIndex({_id: 1, a: 1});
- explain = t.find(query).hint({_id: 1, a: 1}).explain();
- assert(!isIdhack(db, explain.queryPlanner.winningPlan));
-
- // ID hack cannot be used with skip().
- explain = t.find(query).skip(1).explain();
- assert(!isIdhack(db, explain.queryPlanner.winningPlan));
-
- // ID hack cannot be used with a regex predicate.
- assert.writeOK(t.insert({_id: "abc"}));
- explain = t.find({_id: /abc/}).explain();
- assert.eq({_id: "abc"}, t.findOne({_id: /abc/}));
- assert(!isIdhack(db, explain.queryPlanner.winningPlan));
-
- // Covered query returning _id field only can be handled by ID hack.
- explain = t.find(query, {_id: 1}).explain();
- assert(isIdhack(db, explain.queryPlanner.winningPlan));
- // Check doc from covered ID hack query.
- assert.eq({_id: {x: 2}}, t.findOne(query, {_id: 1}));
-
- //
- // Non-covered projection for idhack.
- //
-
- t.drop();
- assert.writeOK(t.insert({_id: 0, a: 0, b: [{c: 1}, {c: 2}]}));
- assert.writeOK(t.insert({_id: 1, a: 1, b: [{c: 3}, {c: 4}]}));
-
- // Simple inclusion.
- assert.eq({_id: 1, a: 1}, t.find({_id: 1}, {a: 1}).next());
- assert.eq({a: 1}, t.find({_id: 1}, {_id: 0, a: 1}).next());
- assert.eq({_id: 0, a: 0}, t.find({_id: 0}, {_id: 1, a: 1}).next());
-
- // Non-simple: exclusion.
- assert.eq({_id: 1, a: 1}, t.find({_id: 1}, {b: 0}).next());
- assert.eq({_id: 0}, t.find({_id: 0}, {a: 0, b: 0}).next());
-
- // Non-simple: dotted fields.
- assert.eq({b: [{c: 1}, {c: 2}]}, t.find({_id: 0}, {_id: 0, "b.c": 1}).next());
- assert.eq({_id: 1}, t.find({_id: 1}, {"foo.bar": 1}).next());
-
- // Non-simple: elemMatch projection.
- assert.eq({_id: 1, b: [{c: 4}]}, t.find({_id: 1}, {b: {$elemMatch: {c: 4}}}).next());
-
- // Non-simple: .returnKey().
- assert.eq({_id: 1}, t.find({_id: 1}).returnKey().next());
-
- // Non-simple: .returnKey() overrides other projections.
- assert.eq({_id: 1}, t.find({_id: 1}, {a: 1}).returnKey().next());
+"use strict";
+
+const t = db.idhack;
+t.drop();
+
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
+
+assert.writeOK(t.insert({_id: {x: 1}, z: 1}));
+assert.writeOK(t.insert({_id: {x: 2}, z: 2}));
+assert.writeOK(t.insert({_id: {x: 3}, z: 3}));
+assert.writeOK(t.insert({_id: 1, z: 4}));
+assert.writeOK(t.insert({_id: 2, z: 5}));
+assert.writeOK(t.insert({_id: 3, z: 6}));
+
+assert.eq(2, t.findOne({_id: {x: 2}}).z);
+assert.eq(2, t.find({_id: {$gte: 2}}).count());
+assert.eq(2, t.find({_id: {$gte: 2}}).itcount());
+
+t.update({_id: {x: 2}}, {$set: {z: 7}});
+assert.eq(7, t.findOne({_id: {x: 2}}).z);
+
+t.update({_id: {$gte: 2}}, {$set: {z: 8}}, false, true);
+assert.eq(4, t.findOne({_id: 1}).z);
+assert.eq(8, t.findOne({_id: 2}).z);
+assert.eq(8, t.findOne({_id: 3}).z);
+
+// explain output should show that the ID hack was applied.
+const query = {
+ _id: {x: 2}
+};
+let explain = t.find(query).explain(true);
+assert.eq(1, explain.executionStats.nReturned);
+assert.eq(1, explain.executionStats.totalKeysExamined);
+assert(isIdhack(db, explain.queryPlanner.winningPlan));
+
+// ID hack cannot be used with hint().
+t.ensureIndex({_id: 1, a: 1});
+explain = t.find(query).hint({_id: 1, a: 1}).explain();
+assert(!isIdhack(db, explain.queryPlanner.winningPlan));
+
+// ID hack cannot be used with skip().
+explain = t.find(query).skip(1).explain();
+assert(!isIdhack(db, explain.queryPlanner.winningPlan));
+
+// ID hack cannot be used with a regex predicate.
+assert.writeOK(t.insert({_id: "abc"}));
+explain = t.find({_id: /abc/}).explain();
+assert.eq({_id: "abc"}, t.findOne({_id: /abc/}));
+assert(!isIdhack(db, explain.queryPlanner.winningPlan));
+
+// Covered query returning _id field only can be handled by ID hack.
+explain = t.find(query, {_id: 1}).explain();
+assert(isIdhack(db, explain.queryPlanner.winningPlan));
+// Check doc from covered ID hack query.
+assert.eq({_id: {x: 2}}, t.findOne(query, {_id: 1}));
+
+//
+// Non-covered projection for idhack.
+//
+
+t.drop();
+assert.writeOK(t.insert({_id: 0, a: 0, b: [{c: 1}, {c: 2}]}));
+assert.writeOK(t.insert({_id: 1, a: 1, b: [{c: 3}, {c: 4}]}));
+
+// Simple inclusion.
+assert.eq({_id: 1, a: 1}, t.find({_id: 1}, {a: 1}).next());
+assert.eq({a: 1}, t.find({_id: 1}, {_id: 0, a: 1}).next());
+assert.eq({_id: 0, a: 0}, t.find({_id: 0}, {_id: 1, a: 1}).next());
+
+// Non-simple: exclusion.
+assert.eq({_id: 1, a: 1}, t.find({_id: 1}, {b: 0}).next());
+assert.eq({_id: 0}, t.find({_id: 0}, {a: 0, b: 0}).next());
+
+// Non-simple: dotted fields.
+assert.eq({b: [{c: 1}, {c: 2}]}, t.find({_id: 0}, {_id: 0, "b.c": 1}).next());
+assert.eq({_id: 1}, t.find({_id: 1}, {"foo.bar": 1}).next());
+
+// Non-simple: elemMatch projection.
+assert.eq({_id: 1, b: [{c: 4}]}, t.find({_id: 1}, {b: {$elemMatch: {c: 4}}}).next());
+
+// Non-simple: .returnKey().
+assert.eq({_id: 1}, t.find({_id: 1}).returnKey().next());
+
+// Non-simple: .returnKey() overrides other projections.
+assert.eq({_id: 1}, t.find({_id: 1}, {a: 1}).returnKey().next());
})();
diff --git a/jstests/core/index_bigkeys.js b/jstests/core/index_bigkeys.js
index 3b598333102..be8ae1f8d65 100644
--- a/jstests/core/index_bigkeys.js
+++ b/jstests/core/index_bigkeys.js
@@ -7,11 +7,11 @@
* @tags: [assumes_no_implicit_index_creation, requires_non_retryable_writes]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/index_bigkeys.js");
+load("jstests/libs/index_bigkeys.js");
- const collName = "index_bigkeys_foreground_test";
+const collName = "index_bigkeys_foreground_test";
- testAllInteractionsWithBigIndexKeys(db, collName, false);
+testAllInteractionsWithBigIndexKeys(db, collName, false);
}());
diff --git a/jstests/core/index_bigkeys_background.js b/jstests/core/index_bigkeys_background.js
index 88cfb7c1222..b7963f3235b 100644
--- a/jstests/core/index_bigkeys_background.js
+++ b/jstests/core/index_bigkeys_background.js
@@ -14,11 +14,11 @@
* ]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/index_bigkeys.js");
+load("jstests/libs/index_bigkeys.js");
- const collName = "index_bigkeys_background_test";
+const collName = "index_bigkeys_background_test";
- testAllInteractionsWithBigIndexKeys(db, collName, true);
+testAllInteractionsWithBigIndexKeys(db, collName, true);
}());
diff --git a/jstests/core/index_bounds_code.js b/jstests/core/index_bounds_code.js
index 5070c3fe0d0..cd1fa58b306 100644
--- a/jstests/core/index_bounds_code.js
+++ b/jstests/core/index_bounds_code.js
@@ -1,55 +1,50 @@
// Index bounds generation tests for Code/CodeWSCope values.
// @tags: [requires_non_retryable_writes, assumes_unsharded_collection]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount.
+load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount.
- const coll = db.index_bounds_code;
- coll.drop();
+const coll = db.index_bounds_code;
+coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- const insertedFunc = function() {
- return 1;
- };
- assert.writeOK(coll.insert({a: insertedFunc}));
+assert.commandWorked(coll.createIndex({a: 1}));
+const insertedFunc = function() {
+ return 1;
+};
+assert.writeOK(coll.insert({a: insertedFunc}));
- // Test that queries involving comparison operators with values of type Code are covered.
- const proj = {a: 1, _id: 0};
- const func = function() {
- return 2;
- };
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gt: func}}, project: proj, count: 0});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gte: func}}, project: proj, count: 0});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lt: func}}, project: proj, count: 1});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lte: func}}, project: proj, count: 1});
+// Test that queries involving comparison operators with values of type Code are covered.
+const proj = {
+ a: 1,
+ _id: 0
+};
+const func = function() {
+ return 2;
+};
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: func}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: func}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: func}}, project: proj, count: 1});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: func}}, project: proj, count: 1});
- // Test for equality against the original inserted function.
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gt: insertedFunc}}, project: proj, count: 0});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gte: insertedFunc}}, project: proj, count: 1});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lt: insertedFunc}}, project: proj, count: 0});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lte: insertedFunc}}, project: proj, count: 1});
+// Test for equality against the original inserted function.
+assertCoveredQueryAndCount(
+ {collection: coll, query: {a: {$gt: insertedFunc}}, project: proj, count: 0});
+assertCoveredQueryAndCount(
+ {collection: coll, query: {a: {$gte: insertedFunc}}, project: proj, count: 1});
+assertCoveredQueryAndCount(
+ {collection: coll, query: {a: {$lt: insertedFunc}}, project: proj, count: 0});
+assertCoveredQueryAndCount(
+ {collection: coll, query: {a: {$lte: insertedFunc}}, project: proj, count: 1});
- // Test that documents that lie outside of the generated index bounds are not returned.
- coll.remove({});
- assert.writeOK(coll.insert({a: "string"}));
- assert.writeOK(coll.insert({a: {b: 1}}));
- assert.writeOK(coll.insert({a: MaxKey}));
+// Test that documents that lie outside of the generated index bounds are not returned.
+coll.remove({});
+assert.writeOK(coll.insert({a: "string"}));
+assert.writeOK(coll.insert({a: {b: 1}}));
+assert.writeOK(coll.insert({a: MaxKey}));
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gt: func}}, project: proj, count: 0});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gte: func}}, project: proj, count: 0});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lt: func}}, project: proj, count: 0});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lte: func}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: func}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: func}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: func}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: func}}, project: proj, count: 0});
})();
diff --git a/jstests/core/index_bounds_maxkey.js b/jstests/core/index_bounds_maxkey.js
index b22af082b13..f7cd1eb2e66 100644
--- a/jstests/core/index_bounds_maxkey.js
+++ b/jstests/core/index_bounds_maxkey.js
@@ -1,39 +1,34 @@
// Index bounds generation tests for MaxKey values.
// @tags: [requires_non_retryable_writes, assumes_unsharded_collection]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount.
+load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount.
- const coll = db.index_bounds_maxkey;
- coll.drop();
+const coll = db.index_bounds_maxkey;
+coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.writeOK(coll.insert({a: MaxKey}));
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.writeOK(coll.insert({a: MaxKey}));
- // Test that queries involving comparison operators with MaxKey are covered.
- const proj = {a: 1, _id: 0};
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gt: MaxKey}}, project: proj, count: 0});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gte: MaxKey}}, project: proj, count: 1});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lt: MaxKey}}, project: proj, count: 1});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lte: MaxKey}}, project: proj, count: 1});
+// Test that queries involving comparison operators with MaxKey are covered.
+const proj = {
+ a: 1,
+ _id: 0
+};
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: MaxKey}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: MaxKey}}, project: proj, count: 1});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: MaxKey}}, project: proj, count: 1});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MaxKey}}, project: proj, count: 1});
- // Test that all documents are considered less than MaxKey, regardless of the presence of
- // the queried field 'a'.
- coll.remove({});
- assert.writeOK(coll.insert({a: "string"}));
- assert.writeOK(coll.insert({a: {b: 1}}));
- assert.writeOK(coll.insert({}));
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gt: MaxKey}}, project: proj, count: 0});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gte: MaxKey}}, project: proj, count: 0});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lt: MaxKey}}, project: proj, count: 3});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lte: MaxKey}}, project: proj, count: 3});
+// Test that all documents are considered less than MaxKey, regardless of the presence of
+// the queried field 'a'.
+coll.remove({});
+assert.writeOK(coll.insert({a: "string"}));
+assert.writeOK(coll.insert({a: {b: 1}}));
+assert.writeOK(coll.insert({}));
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: MaxKey}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: MaxKey}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: MaxKey}}, project: proj, count: 3});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MaxKey}}, project: proj, count: 3});
})();
diff --git a/jstests/core/index_bounds_minkey.js b/jstests/core/index_bounds_minkey.js
index 6fa9d4f0d1e..31d38a2115e 100644
--- a/jstests/core/index_bounds_minkey.js
+++ b/jstests/core/index_bounds_minkey.js
@@ -1,39 +1,34 @@
// Index bounds generation tests for MinKey values.
// @tags: [requires_non_retryable_writes, assumes_unsharded_collection]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount.
+load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount.
- const coll = db.index_bounds_minkey;
- coll.drop();
+const coll = db.index_bounds_minkey;
+coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.writeOK(coll.insert({a: MinKey}));
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.writeOK(coll.insert({a: MinKey}));
- // Test that queries involving comparison operators with MinKey are covered.
- const proj = {a: 1, _id: 0};
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gt: MinKey}}, project: proj, count: 1});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gte: MinKey}}, project: proj, count: 1});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lt: MinKey}}, project: proj, count: 0});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lte: MinKey}}, project: proj, count: 1});
+// Test that queries involving comparison operators with MinKey are covered.
+const proj = {
+ a: 1,
+ _id: 0
+};
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: MinKey}}, project: proj, count: 1});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: MinKey}}, project: proj, count: 1});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: MinKey}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MinKey}}, project: proj, count: 1});
- // Test that all documents are considered greater than MinKey, regardless of the presence of
- // the queried field 'a'.
- coll.remove({});
- assert.writeOK(coll.insert({a: "string"}));
- assert.writeOK(coll.insert({a: {b: 1}}));
- assert.writeOK(coll.insert({}));
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gt: MinKey}}, project: proj, count: 3});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gte: MinKey}}, project: proj, count: 3});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lt: MinKey}}, project: proj, count: 0});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lte: MinKey}}, project: proj, count: 0});
+// Test that all documents are considered greater than MinKey, regardless of the presence of
+// the queried field 'a'.
+coll.remove({});
+assert.writeOK(coll.insert({a: "string"}));
+assert.writeOK(coll.insert({a: {b: 1}}));
+assert.writeOK(coll.insert({}));
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: MinKey}}, project: proj, count: 3});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: MinKey}}, project: proj, count: 3});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: MinKey}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MinKey}}, project: proj, count: 0});
})();
diff --git a/jstests/core/index_bounds_object.js b/jstests/core/index_bounds_object.js
index 22a7f433efd..b1bdb2e9591 100644
--- a/jstests/core/index_bounds_object.js
+++ b/jstests/core/index_bounds_object.js
@@ -1,61 +1,59 @@
// Index bounds generation tests for Object values.
// @tags: [requires_non_retryable_writes, assumes_unsharded_collection]
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount.
-
- const coll = db.index_bounds_object;
- coll.drop();
-
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.writeOK(coll.insert({a: {b: 1}}));
-
- // Test that queries involving comparison operators with objects are covered.
- const proj = {a: 1, _id: 0};
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gt: {b: 0}}}, project: proj, count: 1});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gt: {b: 2}}}, project: proj, count: 0});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gte: {b: 1}}}, project: proj, count: 1});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gte: {b: 1, c: 2}}}, project: proj, count: 0});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lt: {b: 2}}}, project: proj, count: 1});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lte: {b: 1}}}, project: proj, count: 1});
-
- // Test that queries involving comparisons with an empty object are covered.
- assert.writeOK(coll.insert({a: {}}));
- assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: {}}}, project: proj, count: 1});
- assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: {}}}, project: proj, count: 2});
- assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: {}}}, project: proj, count: 0});
- assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: {}}}, project: proj, count: 1});
-
- // Test that queries involving comparisons with a range of objects are covered.
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gt: {}, $lt: {b: 2}}}, project: proj, count: 1});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$gte: {}, $lt: {b: 2}}}, project: proj, count: 2});
- assertCoveredQueryAndCount(
- {collection: coll, query: {a: {$lt: {}, $gte: {}}}, project: proj, count: 0});
-
- // Test that documents that lie outside of the generated index bounds are not returned. Cannot
- // test empty array upper bounds since that would force the index to be multi-key.
- coll.remove({});
- assert.writeOK(coll.insert({a: "string"}));
- assert.writeOK(coll.insert({a: true}));
- assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: {}}}, project: proj, count: 0});
- assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: {}}}, project: proj, count: 0});
- assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: {}}}, project: proj, count: 0});
- assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: {}}}, project: proj, count: 0});
-
- // Adding a document containing an array makes the index multi-key which can never be used for a
- // covered query.
- assert.writeOK(coll.insert({a: []}));
- assert(!isIndexOnly(db, coll.find({a: {$gt: {}}}, proj).explain().queryPlanner.winningPlan));
- assert(!isIndexOnly(db, coll.find({a: {$gte: {}}}, proj).explain().queryPlanner.winningPlan));
- assert(!isIndexOnly(db, coll.find({a: {$lt: {}}}, proj).explain().queryPlanner.winningPlan));
- assert(!isIndexOnly(db, coll.find({a: {$lte: {}}}, proj).explain().queryPlanner.winningPlan));
+"use strict";
+
+load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount.
+
+const coll = db.index_bounds_object;
+coll.drop();
+
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.writeOK(coll.insert({a: {b: 1}}));
+
+// Test that queries involving comparison operators with objects are covered.
+const proj = {
+ a: 1,
+ _id: 0
+};
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: {b: 0}}}, project: proj, count: 1});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: {b: 2}}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: {b: 1}}}, project: proj, count: 1});
+assertCoveredQueryAndCount(
+ {collection: coll, query: {a: {$gte: {b: 1, c: 2}}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: {b: 2}}}, project: proj, count: 1});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: {b: 1}}}, project: proj, count: 1});
+
+// Test that queries involving comparisons with an empty object are covered.
+assert.writeOK(coll.insert({a: {}}));
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: {}}}, project: proj, count: 1});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: {}}}, project: proj, count: 2});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: {}}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: {}}}, project: proj, count: 1});
+
+// Test that queries involving comparisons with a range of objects are covered.
+assertCoveredQueryAndCount(
+ {collection: coll, query: {a: {$gt: {}, $lt: {b: 2}}}, project: proj, count: 1});
+assertCoveredQueryAndCount(
+ {collection: coll, query: {a: {$gte: {}, $lt: {b: 2}}}, project: proj, count: 2});
+assertCoveredQueryAndCount(
+ {collection: coll, query: {a: {$lt: {}, $gte: {}}}, project: proj, count: 0});
+
+// Test that documents that lie outside of the generated index bounds are not returned. Cannot
+// test empty array upper bounds since that would force the index to be multi-key.
+coll.remove({});
+assert.writeOK(coll.insert({a: "string"}));
+assert.writeOK(coll.insert({a: true}));
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: {}}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: {}}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: {}}}, project: proj, count: 0});
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: {}}}, project: proj, count: 0});
+
+// Adding a document containing an array makes the index multi-key which can never be used for a
+// covered query.
+assert.writeOK(coll.insert({a: []}));
+assert(!isIndexOnly(db, coll.find({a: {$gt: {}}}, proj).explain().queryPlanner.winningPlan));
+assert(!isIndexOnly(db, coll.find({a: {$gte: {}}}, proj).explain().queryPlanner.winningPlan));
+assert(!isIndexOnly(db, coll.find({a: {$lt: {}}}, proj).explain().queryPlanner.winningPlan));
+assert(!isIndexOnly(db, coll.find({a: {$lte: {}}}, proj).explain().queryPlanner.winningPlan));
})();
diff --git a/jstests/core/index_bounds_pipe.js b/jstests/core/index_bounds_pipe.js
index ee6cbd5b5f7..e0ef8cf915a 100644
--- a/jstests/core/index_bounds_pipe.js
+++ b/jstests/core/index_bounds_pipe.js
@@ -3,115 +3,112 @@
* non-escaped pipe '|' characters.
*/
(function() {
- 'use strict';
-
- load('jstests/libs/analyze_plan.js');
-
- const collName = 'index_bounds_pipe';
- const coll = db.getCollection(collName);
- coll.drop();
-
- assert.writeOK(coll.insert({_id: ''}));
- assert.writeOK(coll.insert({_id: '\\|'}));
- assert.writeOK(coll.insert({_id: 'a'}));
- assert.writeOK(coll.insert({_id: 'a|b'}));
- assert.writeOK(coll.insert({_id: 'b'}));
- assert.writeOK(coll.insert({_id: '|'}));
-
- /**
- * Asserts that a query on a field using 'params.regex' uses index bounds 'params.bounds' and
- * returns results identical to 'params.results'.
- *
- * Also tests that a query using 'params.regex' will return documents with a field of type regex
- * with an identical regular expression value.
- */
- function assertIndexBoundsAndResult(params) {
- const query = {_id: params.regex};
- const command = {find: collName, filter: query, projection: {_id: 1}, sort: {_id: 1}};
- const explain = db.runCommand({explain: command});
- assert.commandWorked(explain);
-
- // Check that the query uses correct index bounds. When run against a sharded cluster, there
- // may be multiple index scan stages, but each should have the same index bounds.
- const ixscans = getPlanStages(explain.queryPlanner.winningPlan, 'IXSCAN');
- assert.gt(ixscans.length, 0, 'Plan unexpectedly missing IXSCAN stage: ' + tojson(explain));
- for (let i = 0; i < ixscans.length; i++) {
- const ixscan = ixscans[i];
- assert.eq(ixscan.indexBounds._id,
- params.bounds,
- `Expected bounds of ${tojson(params.bounds)} but got ${
- tojson(ixscan.indexBounds._id)}. i=${i}, all output: ${tojson(explain)}`);
- }
-
- // Check that the query regex matches expected strings.
- const results = db.runCommand(command);
- assert.commandWorked(results);
- assert.eq(results.cursor.firstBatch,
- params.results,
- 'Regex query ' + tojson(query) + ' returned incorrect results');
-
- // Check that the query regex will exactly match identical regular expression objects.
- const collRegexValue = db.getCollection(collName + params.regex);
- collRegexValue.drop();
- assert.commandWorked(collRegexValue.createIndex({x: 1}));
-
- const doc = {_id: 0, x: params.regex};
- assert.writeOK(collRegexValue.insert(doc));
-
- const regexQuery = {x: params.regex};
- assert.eq(collRegexValue.findOne(regexQuery),
- doc,
- 'Regex query ' + tojson(regexQuery) +
- ' did not match document with identical regex value');
+'use strict';
+
+load('jstests/libs/analyze_plan.js');
+
+const collName = 'index_bounds_pipe';
+const coll = db.getCollection(collName);
+coll.drop();
+
+assert.writeOK(coll.insert({_id: ''}));
+assert.writeOK(coll.insert({_id: '\\|'}));
+assert.writeOK(coll.insert({_id: 'a'}));
+assert.writeOK(coll.insert({_id: 'a|b'}));
+assert.writeOK(coll.insert({_id: 'b'}));
+assert.writeOK(coll.insert({_id: '|'}));
+
+/**
+ * Asserts that a query on a field using 'params.regex' uses index bounds 'params.bounds' and
+ * returns results identical to 'params.results'.
+ *
+ * Also tests that a query using 'params.regex' will return documents with a field of type regex
+ * with an identical regular expression value.
+ */
+function assertIndexBoundsAndResult(params) {
+ const query = {_id: params.regex};
+ const command = {find: collName, filter: query, projection: {_id: 1}, sort: {_id: 1}};
+ const explain = db.runCommand({explain: command});
+ assert.commandWorked(explain);
+
+ // Check that the query uses correct index bounds. When run against a sharded cluster, there
+ // may be multiple index scan stages, but each should have the same index bounds.
+ const ixscans = getPlanStages(explain.queryPlanner.winningPlan, 'IXSCAN');
+ assert.gt(ixscans.length, 0, 'Plan unexpectedly missing IXSCAN stage: ' + tojson(explain));
+ for (let i = 0; i < ixscans.length; i++) {
+ const ixscan = ixscans[i];
+ assert.eq(ixscan.indexBounds._id,
+ params.bounds,
+ `Expected bounds of ${tojson(params.bounds)} but got ${
+ tojson(ixscan.indexBounds._id)}. i=${i}, all output: ${tojson(explain)}`);
}
- // An anchored regex that uses no special operators can use tight index bounds.
- assertIndexBoundsAndResult(
- {regex: /^a/, bounds: ['["a", "b")', '[/^a/, /^a/]'], results: [{_id: 'a'}, {_id: 'a|b'}]});
- assertIndexBoundsAndResult(
- {regex: /^\\/, bounds: ['["\\", "]")', '[/^\\\\/, /^\\\\/]'], results: [{_id: '\\|'}]});
-
- // An anchored regex using the alternation operator cannot use tight index bounds.
- assertIndexBoundsAndResult({
- regex: /^a|b/,
- bounds: ['["", {})', '[/^a|b/, /^a|b/]'],
- results: [{_id: 'a'}, {_id: 'a|b'}, {_id: 'b'}]
- });
-
- // An anchored regex that uses an escaped pipe character can use tight index bounds.
- assertIndexBoundsAndResult(
- {regex: /^a\|/, bounds: ['["a|", "a}")', '[/^a\\|/, /^a\\|/]'], results: [{_id: 'a|b'}]});
- assertIndexBoundsAndResult(
- {regex: /^\|/, bounds: ['["|", "}")', '[/^\\|/, /^\\|/]'], results: [{_id: '|'}]});
-
- // A pipe character that is preceded by an escaped backslash is correctly interpreted as the
- // alternation operator and cannot use tight index bounds.
- assertIndexBoundsAndResult({
- regex: /^\\|b/,
- bounds: ['["", {})', '[/^\\\\|b/, /^\\\\|b/]'],
- results: [{_id: '\\|'}, {_id: 'a|b'}, {_id: 'b'}]
- });
- assertIndexBoundsAndResult({
- regex: /^\\|^b/,
- bounds: ['["", {})', '[/^\\\\|^b/, /^\\\\|^b/]'],
- results: [{_id: '\\|'}, {_id: 'b'}]
- });
-
- // An escaped backslash immediately followed by an escaped pipe does not use tight index bounds.
- assertIndexBoundsAndResult({
- regex: /^\\\|/,
- bounds: ['["", {})', '[/^\\\\\\|/, /^\\\\\\|/]'],
- results: [{_id: '\\|'}]
- });
-
- // A pipe escaped with the \Q...\E escape sequence does not use tight index bounds.
- assertIndexBoundsAndResult(
- {regex: /^\Q|\E/, bounds: ['["", {})', '[/^\\Q|\\E/, /^\\Q|\\E/]'], results: [{_id: '|'}]});
-
- // An escaped pipe within \Q...\E can use tight index bounds.
- assertIndexBoundsAndResult({
- regex: /^\Q\|\E/,
- bounds: ['["\\|", "\\}")', '[/^\\Q\\|\\E/, /^\\Q\\|\\E/]'],
- results: [{_id: '\\|'}]
- });
+ // Check that the query regex matches expected strings.
+ const results = db.runCommand(command);
+ assert.commandWorked(results);
+ assert.eq(results.cursor.firstBatch,
+ params.results,
+ 'Regex query ' + tojson(query) + ' returned incorrect results');
+
+ // Check that the query regex will exactly match identical regular expression objects.
+ const collRegexValue = db.getCollection(collName + params.regex);
+ collRegexValue.drop();
+ assert.commandWorked(collRegexValue.createIndex({x: 1}));
+
+ const doc = {_id: 0, x: params.regex};
+ assert.writeOK(collRegexValue.insert(doc));
+
+ const regexQuery = {x: params.regex};
+ assert.eq(
+ collRegexValue.findOne(regexQuery),
+ doc,
+ 'Regex query ' + tojson(regexQuery) + ' did not match document with identical regex value');
+}
+
+// An anchored regex that uses no special operators can use tight index bounds.
+assertIndexBoundsAndResult(
+ {regex: /^a/, bounds: ['["a", "b")', '[/^a/, /^a/]'], results: [{_id: 'a'}, {_id: 'a|b'}]});
+assertIndexBoundsAndResult(
+ {regex: /^\\/, bounds: ['["\\", "]")', '[/^\\\\/, /^\\\\/]'], results: [{_id: '\\|'}]});
+
+// An anchored regex using the alternation operator cannot use tight index bounds.
+assertIndexBoundsAndResult({
+ regex: /^a|b/,
+ bounds: ['["", {})', '[/^a|b/, /^a|b/]'],
+ results: [{_id: 'a'}, {_id: 'a|b'}, {_id: 'b'}]
+});
+
+// An anchored regex that uses an escaped pipe character can use tight index bounds.
+assertIndexBoundsAndResult(
+ {regex: /^a\|/, bounds: ['["a|", "a}")', '[/^a\\|/, /^a\\|/]'], results: [{_id: 'a|b'}]});
+assertIndexBoundsAndResult(
+ {regex: /^\|/, bounds: ['["|", "}")', '[/^\\|/, /^\\|/]'], results: [{_id: '|'}]});
+
+// A pipe character that is preceded by an escaped backslash is correctly interpreted as the
+// alternation operator and cannot use tight index bounds.
+assertIndexBoundsAndResult({
+ regex: /^\\|b/,
+ bounds: ['["", {})', '[/^\\\\|b/, /^\\\\|b/]'],
+ results: [{_id: '\\|'}, {_id: 'a|b'}, {_id: 'b'}]
+});
+assertIndexBoundsAndResult({
+ regex: /^\\|^b/,
+ bounds: ['["", {})', '[/^\\\\|^b/, /^\\\\|^b/]'],
+ results: [{_id: '\\|'}, {_id: 'b'}]
+});
+
+// An escaped backslash immediately followed by an escaped pipe does not use tight index bounds.
+assertIndexBoundsAndResult(
+ {regex: /^\\\|/, bounds: ['["", {})', '[/^\\\\\\|/, /^\\\\\\|/]'], results: [{_id: '\\|'}]});
+
+// A pipe escaped with the \Q...\E escape sequence does not use tight index bounds.
+assertIndexBoundsAndResult(
+ {regex: /^\Q|\E/, bounds: ['["", {})', '[/^\\Q|\\E/, /^\\Q|\\E/]'], results: [{_id: '|'}]});
+
+// An escaped pipe within \Q...\E can use tight index bounds.
+assertIndexBoundsAndResult({
+ regex: /^\Q\|\E/,
+ bounds: ['["\\|", "\\}")', '[/^\\Q\\|\\E/, /^\\Q\\|\\E/]'],
+ results: [{_id: '\\|'}]
+});
}());
diff --git a/jstests/core/index_bounds_timestamp.js b/jstests/core/index_bounds_timestamp.js
index 1f7cc261c30..fe0acf12936 100644
--- a/jstests/core/index_bounds_timestamp.js
+++ b/jstests/core/index_bounds_timestamp.js
@@ -3,147 +3,136 @@
// inclusiveness and exactness.
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js");
-
- // Setup the test collection.
- let coll = db.index_bounds_timestamp;
- coll.drop();
-
- // Create an index on the ts and _id fields.
- assert.commandWorked(coll.createIndex({ts: 1, _id: 1}));
-
- // Insert some test documents.
- // NOTE: Inserting Timestamp() or Timestamp(0, 0) into a collection creates a Timestamp for the
- // current time. Max Timestamp value is Timestamp(2^32 - 1, 2^32 - 1).
- const documents = [
- {_id: 0, ts: new Timestamp(0, 1)},
- {_id: 1, ts: new Timestamp(0, Math.pow(2, 31))},
- {_id: 2, ts: new Timestamp(0, Math.pow(2, 32) - 1)},
- {_id: 3, ts: new Timestamp(1, 0)},
- {_id: 4, ts: new Timestamp(Math.pow(2, 32) - 1, Math.pow(2, 32) - 1)}
- ];
- assert.writeOK(coll.insert(documents));
-
- // Sanity check the timestamp bounds generation plan.
- let plan;
-
- // Check that count over (Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] is a covered query.
- plan = coll.explain("executionStats").find({ts: {$gt: Timestamp(0, 0)}}).count();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $gt count should be a covered query");
- assertExplainCount({explainResults: plan, expectedCount: 5});
-
- // Check that find over (Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] does not require a
- // FETCH stage when the query is covered by an index.
- plan =
- coll.explain("executionStats").find({ts: {$gt: Timestamp(0, 0)}}, {ts: 1, _id: 0}).finish();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $gt find with project should be a covered query");
-
- // Check that count over [Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] is a covered query.
- plan = coll.explain("executionStats").find({ts: {$gte: Timestamp(0, 0)}}).count();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $gte count should be a covered query");
- assertExplainCount({explainResults: plan, expectedCount: 5});
-
- // Check that find over [Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] does not require a
- // FETCH stage when the query is covered by an index.
- plan = coll.explain("executionStats")
- .find({ts: {$gte: Timestamp(0, 0)}}, {ts: 1, _id: 0})
- .finish();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $gte find with project should be a covered query");
-
- // Check that count over [Timestamp(0, 0), Timestamp(1, 0)) is a covered query.
- plan = coll.explain("executionStats").find({ts: {$lt: Timestamp(1, 0)}}).count();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $lt count should be a covered query");
- assertExplainCount({explainResults: plan, expectedCount: 3});
-
- // Check that find over [Timestamp(0, 0), Timestamp(1, 0)) does not require a FETCH stage when
- // the query is covered by an index.
- plan =
- coll.explain("executionStats").find({ts: {$lt: Timestamp(1, 0)}}, {ts: 1, _id: 0}).finish();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $lt find with project should be a covered query");
-
- // Check that count over [Timestamp(0, 0), Timestamp(1, 0)] is a covered query.
- plan = coll.explain("executionStats").find({ts: {$lte: Timestamp(1, 0)}}).count();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $lte count should be a covered query");
- assertExplainCount({explainResults: plan, expectedCount: 4});
-
- // Check that find over [Timestamp(0, 0), Timestamp(1, 0)] does not require a FETCH stage when
- // the query is covered by an index.
- plan = coll.explain("executionStats")
- .find({ts: {$lte: Timestamp(1, 0)}}, {ts: 1, _id: 0})
- .finish();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $lte find with project should be a covered query");
-
- // Check that count over (Timestamp(0, 1), Timestamp(1, 0)) is a covered query.
- plan = coll.explain("executionStats")
- .find({ts: {$gt: Timestamp(0, 1), $lt: Timestamp(1, 0)}})
- .count();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $gt, $lt count should be a covered query");
- assertExplainCount({explainResults: plan, expectedCount: 2});
-
- // Check that find over (Timestamp(0, 1), Timestamp(1, 0)) does not require a FETCH stage when
- // the query is covered by an index.
- plan = coll.explain("executionStats")
- .find({ts: {$gt: Timestamp(0, 1), $lt: Timestamp(1, 0)}}, {ts: 1, _id: 0})
- .finish();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $gt, $lt find with project should be a covered query");
-
- // Check that count over (Timestamp(0, 1), Timestamp(1, 0)] is a covered query.
- plan = coll.explain("executionStats")
- .find({ts: {$gt: Timestamp(0, 1), $lte: Timestamp(1, 0)}})
- .count();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $gt, $lte count should be a covered query");
- assertExplainCount({explainResults: plan, expectedCount: 3});
-
- // Check that find over (Timestamp(0, 1), Timestamp(1, 0)] does not require a FETCH stage when
- // the query is covered by an index.
- plan = coll.explain("executionStats")
- .find({ts: {$gt: Timestamp(0, 1), $lte: Timestamp(1, 0)}}, {ts: 1, _id: 0})
- .finish();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $gt, $lte find with project should be a covered query");
-
- // Check that count over [Timestamp(0, 1), Timestamp(1, 0)) is a covered query.
- plan = coll.explain("executionStats")
- .find({ts: {$gte: Timestamp(0, 1), $lt: Timestamp(1, 0)}})
- .count();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $gte, $lt count should be a covered query");
- assertExplainCount({explainResults: plan, expectedCount: 3});
-
- // Check that find over [Timestamp(0, 1), Timestamp(1, 0)) does not require a FETCH stage when
- // the query is covered by an index.
- plan = coll.explain("executionStats")
- .find({ts: {$gte: Timestamp(0, 1), $lt: Timestamp(1, 0)}}, {ts: 1, _id: 0})
- .finish();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $gte, $lt find with project should be a covered query");
-
- // Check that count over [Timestamp(0, 1), Timestamp(1, 0)] is a covered query.
- plan = coll.explain("executionStats")
- .find({ts: {$gte: Timestamp(0, 1), $lte: Timestamp(1, 0)}})
- .count();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $gte, $lte count should be a covered query");
- assertExplainCount({explainResults: plan, expectedCount: 4});
-
- // Check that find over [Timestamp(0, 1), Timestamp(1, 0)] does not require a FETCH stage when
- // the query is covered by an index.
- plan = coll.explain("executionStats")
- .find({ts: {$gte: Timestamp(0, 1), $lte: Timestamp(1, 0)}}, {ts: 1, _id: 0})
- .finish();
- assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $gte, $lte find with project should be a covered query");
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+
+// Setup the test collection.
+let coll = db.index_bounds_timestamp;
+coll.drop();
+
+// Create an index on the ts and _id fields.
+assert.commandWorked(coll.createIndex({ts: 1, _id: 1}));
+
+// Insert some test documents.
+// NOTE: Inserting Timestamp() or Timestamp(0, 0) into a collection creates a Timestamp for the
+// current time. Max Timestamp value is Timestamp(2^32 - 1, 2^32 - 1).
+const documents = [
+ {_id: 0, ts: new Timestamp(0, 1)},
+ {_id: 1, ts: new Timestamp(0, Math.pow(2, 31))},
+ {_id: 2, ts: new Timestamp(0, Math.pow(2, 32) - 1)},
+ {_id: 3, ts: new Timestamp(1, 0)},
+ {_id: 4, ts: new Timestamp(Math.pow(2, 32) - 1, Math.pow(2, 32) - 1)}
+];
+assert.writeOK(coll.insert(documents));
+
+// Sanity check the timestamp bounds generation plan.
+let plan;
+
+// Check that count over (Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] is a covered query.
+plan = coll.explain("executionStats").find({ts: {$gt: Timestamp(0, 0)}}).count();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan), "ts $gt count should be a covered query");
+assertExplainCount({explainResults: plan, expectedCount: 5});
+
+// Check that find over (Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] does not require a
+// FETCH stage when the query is covered by an index.
+plan = coll.explain("executionStats").find({ts: {$gt: Timestamp(0, 0)}}, {ts: 1, _id: 0}).finish();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
+ "ts $gt find with project should be a covered query");
+
+// Check that count over [Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] is a covered query.
+plan = coll.explain("executionStats").find({ts: {$gte: Timestamp(0, 0)}}).count();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan), "ts $gte count should be a covered query");
+assertExplainCount({explainResults: plan, expectedCount: 5});
+
+// Check that find over [Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] does not require a
+// FETCH stage when the query is covered by an index.
+plan = coll.explain("executionStats").find({ts: {$gte: Timestamp(0, 0)}}, {ts: 1, _id: 0}).finish();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
+ "ts $gte find with project should be a covered query");
+
+// Check that count over [Timestamp(0, 0), Timestamp(1, 0)) is a covered query.
+plan = coll.explain("executionStats").find({ts: {$lt: Timestamp(1, 0)}}).count();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan), "ts $lt count should be a covered query");
+assertExplainCount({explainResults: plan, expectedCount: 3});
+
+// Check that find over [Timestamp(0, 0), Timestamp(1, 0)) does not require a FETCH stage when
+// the query is covered by an index.
+plan = coll.explain("executionStats").find({ts: {$lt: Timestamp(1, 0)}}, {ts: 1, _id: 0}).finish();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
+ "ts $lt find with project should be a covered query");
+
+// Check that count over [Timestamp(0, 0), Timestamp(1, 0)] is a covered query.
+plan = coll.explain("executionStats").find({ts: {$lte: Timestamp(1, 0)}}).count();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan), "ts $lte count should be a covered query");
+assertExplainCount({explainResults: plan, expectedCount: 4});
+
+// Check that find over [Timestamp(0, 0), Timestamp(1, 0)] does not require a FETCH stage when
+// the query is covered by an index.
+plan = coll.explain("executionStats").find({ts: {$lte: Timestamp(1, 0)}}, {ts: 1, _id: 0}).finish();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
+ "ts $lte find with project should be a covered query");
+
+// Check that count over (Timestamp(0, 1), Timestamp(1, 0)) is a covered query.
+plan =
+ coll.explain("executionStats").find({ts: {$gt: Timestamp(0, 1), $lt: Timestamp(1, 0)}}).count();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
+ "ts $gt, $lt count should be a covered query");
+assertExplainCount({explainResults: plan, expectedCount: 2});
+
+// Check that find over (Timestamp(0, 1), Timestamp(1, 0)) does not require a FETCH stage when
+// the query is covered by an index.
+plan = coll.explain("executionStats")
+ .find({ts: {$gt: Timestamp(0, 1), $lt: Timestamp(1, 0)}}, {ts: 1, _id: 0})
+ .finish();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
+ "ts $gt, $lt find with project should be a covered query");
+
+// Check that count over (Timestamp(0, 1), Timestamp(1, 0)] is a covered query.
+plan = coll.explain("executionStats")
+ .find({ts: {$gt: Timestamp(0, 1), $lte: Timestamp(1, 0)}})
+ .count();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
+ "ts $gt, $lte count should be a covered query");
+assertExplainCount({explainResults: plan, expectedCount: 3});
+
+// Check that find over (Timestamp(0, 1), Timestamp(1, 0)] does not require a FETCH stage when
+// the query is covered by an index.
+plan = coll.explain("executionStats")
+ .find({ts: {$gt: Timestamp(0, 1), $lte: Timestamp(1, 0)}}, {ts: 1, _id: 0})
+ .finish();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
+ "ts $gt, $lte find with project should be a covered query");
+
+// Check that count over [Timestamp(0, 1), Timestamp(1, 0)) is a covered query.
+plan = coll.explain("executionStats")
+ .find({ts: {$gte: Timestamp(0, 1), $lt: Timestamp(1, 0)}})
+ .count();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
+ "ts $gte, $lt count should be a covered query");
+assertExplainCount({explainResults: plan, expectedCount: 3});
+
+// Check that find over [Timestamp(0, 1), Timestamp(1, 0)) does not require a FETCH stage when
+// the query is covered by an index.
+plan = coll.explain("executionStats")
+ .find({ts: {$gte: Timestamp(0, 1), $lt: Timestamp(1, 0)}}, {ts: 1, _id: 0})
+ .finish();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
+ "ts $gte, $lt find with project should be a covered query");
+
+// Check that count over [Timestamp(0, 1), Timestamp(1, 0)] is a covered query.
+plan = coll.explain("executionStats")
+ .find({ts: {$gte: Timestamp(0, 1), $lte: Timestamp(1, 0)}})
+ .count();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
+ "ts $gte, $lte count should be a covered query");
+assertExplainCount({explainResults: plan, expectedCount: 4});
+
+// Check that find over [Timestamp(0, 1), Timestamp(1, 0)] does not require a FETCH stage when
+// the query is covered by an index.
+plan = coll.explain("executionStats")
+ .find({ts: {$gte: Timestamp(0, 1), $lte: Timestamp(1, 0)}}, {ts: 1, _id: 0})
+ .finish();
+assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
+ "ts $gte, $lte find with project should be a covered query");
})();
diff --git a/jstests/core/index_check6.js b/jstests/core/index_check6.js
index fd4a7177ffb..6e5ccdb7a4c 100644
--- a/jstests/core/index_check6.js
+++ b/jstests/core/index_check6.js
@@ -38,7 +38,8 @@ assert.eq(
"D"); // SERVER-371
assert.eq.automsg("2",
- "t.find( { age:30, rating:{ $gte:4, $lte:5} } )" + ".explain('executionStats')" +
+ "t.find( { age:30, rating:{ $gte:4, $lte:5} } )" +
+ ".explain('executionStats')" +
".executionStats.totalKeysExamined");
t.drop();
diff --git a/jstests/core/index_create_with_nul_in_name.js b/jstests/core/index_create_with_nul_in_name.js
index c128dcc5880..c4894830fc8 100644
--- a/jstests/core/index_create_with_nul_in_name.js
+++ b/jstests/core/index_create_with_nul_in_name.js
@@ -1,14 +1,14 @@
// SERVER-16672 disallow creating indexes with NUL bytes in the name
(function() {
- 'use strict';
+'use strict';
- var coll = db.create_index_with_nul_in_name;
- coll.drop();
+var coll = db.create_index_with_nul_in_name;
+coll.drop();
- var idx = {key: {'a': 1}, name: 'foo\0bar', ns: coll.getFullName()};
+var idx = {key: {'a': 1}, name: 'foo\0bar', ns: coll.getFullName()};
- var res = coll.runCommand('createIndexes', {indexes: [idx]});
- assert.commandFailed(res, tojson(res));
- assert.eq(res.code, 67); // CannotCreateIndex
+var res = coll.runCommand('createIndexes', {indexes: [idx]});
+assert.commandFailed(res, tojson(res));
+assert.eq(res.code, 67); // CannotCreateIndex
}());
diff --git a/jstests/core/index_decimal.js b/jstests/core/index_decimal.js
index 1fbb62332a2..9736d8f0903 100644
--- a/jstests/core/index_decimal.js
+++ b/jstests/core/index_decimal.js
@@ -5,53 +5,52 @@
// Test indexing of decimal numbers
(function() {
- 'use strict';
-
- // Include helpers for analyzing explain output.
- load('jstests/libs/analyze_plan.js');
-
- var t = db.decimal_indexing;
- t.drop();
-
- // Create doubles and NumberDecimals. The double 0.1 is actually 0.10000000000000000555
- // and the double 0.3 is actually 0.2999999999999999888, so we can check ordering.
- assert.writeOK(t.insert({x: 0.1, y: NumberDecimal('0.3000')}));
- assert.writeOK(t.insert({x: 0.1}));
- assert.writeOK(t.insert({y: 0.3}));
-
- // Create an index on existing numbers.
- assert.commandWorked(t.createIndex({x: 1}));
- assert.commandWorked(t.createIndex({y: -1}));
-
- // Insert some more items after index creation. Use _id for decimal.
- assert.writeOK(t.insert({x: NumberDecimal('0.10')}));
- assert.writeOK(t.insert({_id: NumberDecimal('0E3')}));
- assert.writeError(t.insert({_id: -0.0}));
-
- // Check that we return exactly the right document, use an index to do so, and that the
- // result of the covered query has the right number of trailing zeros.
- var qres = t.find({x: NumberDecimal('0.1')}, {_id: 0, x: 1}).toArray();
- var qplan = t.find({x: NumberDecimal('0.1')}, {_id: 0, x: 1}).explain();
- assert.neq(tojson(NumberDecimal('0.1')),
- tojson(NumberDecimal('0.10')),
- 'trailing zeros are significant for exact equality');
- assert.eq(qres,
- [{x: NumberDecimal('0.10')}],
- 'query for x equal to decimal 0.10 returns wrong value');
- assert(isIndexOnly(db, qplan.queryPlanner.winningPlan),
- 'query on decimal should be covered: ' + tojson(qplan));
-
- // Check that queries for exact floating point numbers don't return nearby decimals.
- assert.eq(t.find({x: 0.1}, {_id: 0}).sort({x: 1, y: 1}).toArray(),
- [{x: 0.1}, {x: 0.1, y: NumberDecimal('0.3000')}],
- 'wrong result for querying {x: 0.1}');
- assert.eq(t.find({x: {$lt: 0.1}}, {_id: 0}).toArray(),
- [{x: NumberDecimal('0.10')}],
- 'querying for decimal less than double 0.1 should return decimal 0.10');
- assert.eq(t.find({y: {$lt: NumberDecimal('0.3')}}, {y: 1, _id: 0}).toArray(),
- [{y: 0.3}],
- 'querying for double less than decimal 0.3 should return double 0.3');
- assert.eq(t.find({_id: 0}, {_id: 1}).toArray(),
- [{_id: NumberDecimal('0E3')}],
- 'querying for zero does not return the correct decimal');
+'use strict';
+
+// Include helpers for analyzing explain output.
+load('jstests/libs/analyze_plan.js');
+
+var t = db.decimal_indexing;
+t.drop();
+
+// Create doubles and NumberDecimals. The double 0.1 is actually 0.10000000000000000555
+// and the double 0.3 is actually 0.2999999999999999888, so we can check ordering.
+assert.writeOK(t.insert({x: 0.1, y: NumberDecimal('0.3000')}));
+assert.writeOK(t.insert({x: 0.1}));
+assert.writeOK(t.insert({y: 0.3}));
+
+// Create an index on existing numbers.
+assert.commandWorked(t.createIndex({x: 1}));
+assert.commandWorked(t.createIndex({y: -1}));
+
+// Insert some more items after index creation. Use _id for decimal.
+assert.writeOK(t.insert({x: NumberDecimal('0.10')}));
+assert.writeOK(t.insert({_id: NumberDecimal('0E3')}));
+assert.writeError(t.insert({_id: -0.0}));
+
+// Check that we return exactly the right document, use an index to do so, and that the
+// result of the covered query has the right number of trailing zeros.
+var qres = t.find({x: NumberDecimal('0.1')}, {_id: 0, x: 1}).toArray();
+var qplan = t.find({x: NumberDecimal('0.1')}, {_id: 0, x: 1}).explain();
+assert.neq(tojson(NumberDecimal('0.1')),
+ tojson(NumberDecimal('0.10')),
+ 'trailing zeros are significant for exact equality');
+assert.eq(
+ qres, [{x: NumberDecimal('0.10')}], 'query for x equal to decimal 0.10 returns wrong value');
+assert(isIndexOnly(db, qplan.queryPlanner.winningPlan),
+ 'query on decimal should be covered: ' + tojson(qplan));
+
+// Check that queries for exact floating point numbers don't return nearby decimals.
+assert.eq(t.find({x: 0.1}, {_id: 0}).sort({x: 1, y: 1}).toArray(),
+ [{x: 0.1}, {x: 0.1, y: NumberDecimal('0.3000')}],
+ 'wrong result for querying {x: 0.1}');
+assert.eq(t.find({x: {$lt: 0.1}}, {_id: 0}).toArray(),
+ [{x: NumberDecimal('0.10')}],
+ 'querying for decimal less than double 0.1 should return decimal 0.10');
+assert.eq(t.find({y: {$lt: NumberDecimal('0.3')}}, {y: 1, _id: 0}).toArray(),
+ [{y: 0.3}],
+ 'querying for double less than decimal 0.3 should return double 0.3');
+assert.eq(t.find({_id: 0}, {_id: 1}).toArray(),
+ [{_id: NumberDecimal('0E3')}],
+ 'querying for zero does not return the correct decimal');
})();
diff --git a/jstests/core/index_elemmatch1.js b/jstests/core/index_elemmatch1.js
index 7b37c55d37d..3957e9d185c 100644
--- a/jstests/core/index_elemmatch1.js
+++ b/jstests/core/index_elemmatch1.js
@@ -3,30 +3,34 @@
* @tags: [assumes_balancer_off]
*/
(function() {
- "use strict";
+"use strict";
- const coll = db.index_elemmatch1;
- coll.drop();
+const coll = db.index_elemmatch1;
+coll.drop();
- let x = 0;
- let y = 0;
- const bulk = coll.initializeUnorderedBulkOp();
- for (let a = 0; a < 10; a++) {
- for (let b = 0; b < 10; b++) {
- bulk.insert({a: a, b: b % 10, arr: [{x: x++ % 10, y: y++ % 10}]});
- }
+let x = 0;
+let y = 0;
+const bulk = coll.initializeUnorderedBulkOp();
+for (let a = 0; a < 10; a++) {
+ for (let b = 0; b < 10; b++) {
+ bulk.insert({a: a, b: b % 10, arr: [{x: x++ % 10, y: y++ % 10}]});
}
- assert.commandWorked(bulk.execute());
+}
+assert.commandWorked(bulk.execute());
- assert.commandWorked(coll.createIndex({a: 1, b: 1}));
- assert.commandWorked(coll.createIndex({"arr.x": 1, a: 1}));
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+assert.commandWorked(coll.createIndex({"arr.x": 1, a: 1}));
- const query = {a: 5, b: {$in: [1, 3, 5]}, arr: {$elemMatch: {x: 5, y: 5}}};
+const query = {
+ a: 5,
+ b: {$in: [1, 3, 5]},
+ arr: {$elemMatch: {x: 5, y: 5}}
+};
- const count = coll.find(query).itcount();
- assert.eq(count, 1);
+const count = coll.find(query).itcount();
+assert.eq(count, 1);
- const explain = coll.find(query).hint({"arr.x": 1, a: 1}).explain("executionStats");
- assert.commandWorked(explain);
- assert.eq(count, explain.executionStats.totalKeysExamined, explain);
+const explain = coll.find(query).hint({"arr.x": 1, a: 1}).explain("executionStats");
+assert.commandWorked(explain);
+assert.eq(count, explain.executionStats.totalKeysExamined, explain);
})();
diff --git a/jstests/core/index_elemmatch2.js b/jstests/core/index_elemmatch2.js
index d87b26e7642..d2ff872dc49 100644
--- a/jstests/core/index_elemmatch2.js
+++ b/jstests/core/index_elemmatch2.js
@@ -3,61 +3,62 @@
* compatible with the index.
*/
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js");
-
- const coll = db.elemMatch_index;
- coll.drop();
-
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({a: [{}]}));
- assert.writeOK(coll.insert({a: [1, null]}));
- assert.writeOK(coll.insert({a: [{type: "Point", coordinates: [0, 0]}]}));
-
- assert.commandWorked(coll.createIndex({a: 1}, {sparse: true}));
-
- function assertIndexResults(coll, query, useIndex, nReturned) {
- const explainPlan = coll.find(query).explain("executionStats");
- assert.eq(isIxscan(db, explainPlan.queryPlanner.winningPlan), useIndex);
- assert.eq(explainPlan.executionStats.nReturned, nReturned);
- }
-
- assertIndexResults(coll, {a: {$elemMatch: {$exists: false}}}, false, 0);
-
- // An $elemMatch predicate is treated as nested, and the index should be used for $exists:true.
- assertIndexResults(coll, {a: {$elemMatch: {$exists: true}}}, true, 3);
-
- // $not within $elemMatch should not attempt to use a sparse index for $exists:false.
- assertIndexResults(coll, {a: {$elemMatch: {$not: {$exists: false}}}}, false, 3);
- assertIndexResults(coll, {a: {$elemMatch: {$gt: 0, $not: {$exists: false}}}}, false, 1);
-
- // $geo within $elemMatch should not attempt to use a non-geo index.
- assertIndexResults(
- coll,
- {
- a: {
- $elemMatch: {
- $geoWithin: {
- $geometry:
- {type: "Polygon", coordinates: [[[0, 0], [0, 1], [1, 0], [0, 0]]]}
- }
- }
- }
- },
- false,
- 1);
-
- // $in with a null value within $elemMatch should use a sparse index.
- assertIndexResults(coll, {a: {$elemMatch: {$in: [null]}}}, true, 1);
-
- // $eq with a null value within $elemMatch should use a sparse index.
- assertIndexResults(coll, {a: {$elemMatch: {$eq: null}}}, true, 1);
-
- // A negated regex within $elemMatch should not use an index, sparse or not.
- assertIndexResults(coll, {a: {$elemMatch: {$not: {$in: [/^a/]}}}}, false, 3);
-
- coll.dropIndexes();
- assert.commandWorked(coll.createIndex({a: 1}));
- assertIndexResults(coll, {a: {$elemMatch: {$not: {$in: [/^a/]}}}}, false, 3);
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+
+const coll = db.elemMatch_index;
+coll.drop();
+
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: [{}]}));
+assert.writeOK(coll.insert({a: [1, null]}));
+assert.writeOK(coll.insert({a: [{type: "Point", coordinates: [0, 0]}]}));
+
+assert.commandWorked(coll.createIndex({a: 1}, {sparse: true}));
+
+function assertIndexResults(coll, query, useIndex, nReturned) {
+ const explainPlan = coll.find(query).explain("executionStats");
+ assert.eq(isIxscan(db, explainPlan.queryPlanner.winningPlan), useIndex);
+ assert.eq(explainPlan.executionStats.nReturned, nReturned);
+}
+
+assertIndexResults(coll, {a: {$elemMatch: {$exists: false}}}, false, 0);
+
+// An $elemMatch predicate is treated as nested, and the index should be used for $exists:true.
+assertIndexResults(coll, {a: {$elemMatch: {$exists: true}}}, true, 3);
+
+// $not within $elemMatch should not attempt to use a sparse index for $exists:false.
+assertIndexResults(coll, {a: {$elemMatch: {$not: {$exists: false}}}}, false, 3);
+assertIndexResults(coll, {a: {$elemMatch: {$gt: 0, $not: {$exists: false}}}}, false, 1);
+
+// $geo within $elemMatch should not attempt to use a non-geo index.
+assertIndexResults(coll,
+ {
+ a: {
+ $elemMatch: {
+ $geoWithin: {
+ $geometry: {
+ type: "Polygon",
+ coordinates: [[[0, 0], [0, 1], [1, 0], [0, 0]]]
+ }
+ }
+ }
+ }
+ },
+ false,
+ 1);
+
+// $in with a null value within $elemMatch should use a sparse index.
+assertIndexResults(coll, {a: {$elemMatch: {$in: [null]}}}, true, 1);
+
+// $eq with a null value within $elemMatch should use a sparse index.
+assertIndexResults(coll, {a: {$elemMatch: {$eq: null}}}, true, 1);
+
+// A negated regex within $elemMatch should not use an index, sparse or not.
+assertIndexResults(coll, {a: {$elemMatch: {$not: {$in: [/^a/]}}}}, false, 3);
+
+coll.dropIndexes();
+assert.commandWorked(coll.createIndex({a: 1}));
+assertIndexResults(coll, {a: {$elemMatch: {$not: {$in: [/^a/]}}}}, false, 3);
})();
diff --git a/jstests/core/index_filter_catalog_independent.js b/jstests/core/index_filter_catalog_independent.js
index f3ea81a6627..32c7c1669a6 100644
--- a/jstests/core/index_filter_catalog_independent.js
+++ b/jstests/core/index_filter_catalog_independent.js
@@ -11,79 +11,79 @@
* ]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For getPlanStages.
+load("jstests/libs/analyze_plan.js"); // For getPlanStages.
- const collName = "index_filter_catalog_independent";
- const coll = db[collName];
- coll.drop();
+const collName = "index_filter_catalog_independent";
+const coll = db[collName];
+coll.drop();
- /*
- * Check that there's one index filter on the given query which allows only 'indexes'.
- */
- function assertOneIndexFilter(query, indexes) {
- let res = assert.commandWorked(db.runCommand({planCacheListFilters: collName}));
- assert.eq(res.filters.length, 1);
- assert.eq(res.filters[0].query, query);
- assert.eq(res.filters[0].indexes, indexes);
- }
+/*
+ * Check that there's one index filter on the given query which allows only 'indexes'.
+ */
+function assertOneIndexFilter(query, indexes) {
+ let res = assert.commandWorked(db.runCommand({planCacheListFilters: collName}));
+ assert.eq(res.filters.length, 1);
+ assert.eq(res.filters[0].query, query);
+ assert.eq(res.filters[0].indexes, indexes);
+}
- function assertIsIxScanOnIndex(winningPlan, keyPattern) {
- const ixScans = getPlanStages(winningPlan, "IXSCAN");
- assert.gt(ixScans.length, 0);
- ixScans.every((ixScan) => assert.eq(ixScan.keyPattern, keyPattern));
+function assertIsIxScanOnIndex(winningPlan, keyPattern) {
+ const ixScans = getPlanStages(winningPlan, "IXSCAN");
+ assert.gt(ixScans.length, 0);
+ ixScans.every((ixScan) => assert.eq(ixScan.keyPattern, keyPattern));
- const collScans = getPlanStages(winningPlan, "COLLSCAN");
- assert.eq(collScans.length, 0);
- }
+ const collScans = getPlanStages(winningPlan, "COLLSCAN");
+ assert.eq(collScans.length, 0);
+}
- function checkIndexFilterSet(explain, shouldBeSet) {
- if (explain.queryPlanner.winningPlan.shards) {
- for (let shard of explain.queryPlanner.winningPlan.shards) {
- assert.eq(shard.indexFilterSet, shouldBeSet);
- }
- } else {
- assert.eq(explain.queryPlanner.indexFilterSet, shouldBeSet);
+function checkIndexFilterSet(explain, shouldBeSet) {
+ if (explain.queryPlanner.winningPlan.shards) {
+ for (let shard of explain.queryPlanner.winningPlan.shards) {
+ assert.eq(shard.indexFilterSet, shouldBeSet);
}
+ } else {
+ assert.eq(explain.queryPlanner.indexFilterSet, shouldBeSet);
}
+}
- assert.commandWorked(coll.createIndexes([{x: 1}, {x: 1, y: 1}]));
- assert.commandWorked(
- db.runCommand({planCacheSetFilter: collName, query: {"x": 3}, indexes: [{x: 1, y: 1}]}));
- assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]);
+assert.commandWorked(coll.createIndexes([{x: 1}, {x: 1, y: 1}]));
+assert.commandWorked(
+ db.runCommand({planCacheSetFilter: collName, query: {"x": 3}, indexes: [{x: 1, y: 1}]}));
+assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]);
- let explain = assert.commandWorked(coll.find({x: 3}).explain());
- checkIndexFilterSet(explain, true);
- assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1, y: 1});
+let explain = assert.commandWorked(coll.find({x: 3}).explain());
+checkIndexFilterSet(explain, true);
+assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1, y: 1});
- // Drop an index. The filter should not change.
- assert.commandWorked(coll.dropIndex({x: 1, y: 1}));
- assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]);
+// Drop an index. The filter should not change.
+assert.commandWorked(coll.dropIndex({x: 1, y: 1}));
+assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]);
- // The {x: 1} index _could_ be used, but should not be considered because of the filter.
- // Since we dropped the {x: 1, y: 1} index, a COLLSCAN must be used.
- explain = coll.find({x: 3}).explain();
- checkIndexFilterSet(explain, true);
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
+// The {x: 1} index _could_ be used, but should not be considered because of the filter.
+// Since we dropped the {x: 1, y: 1} index, a COLLSCAN must be used.
+explain = coll.find({x: 3}).explain();
+checkIndexFilterSet(explain, true);
+assert(isCollscan(db, explain.queryPlanner.winningPlan));
- // Create another index. This should not change whether the index filter is applied.
- assert.commandWorked(coll.createIndex({x: 1, z: 1}));
- explain = assert.commandWorked(coll.find({x: 3}).explain());
- checkIndexFilterSet(explain, true);
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
+// Create another index. This should not change whether the index filter is applied.
+assert.commandWorked(coll.createIndex({x: 1, z: 1}));
+explain = assert.commandWorked(coll.find({x: 3}).explain());
+checkIndexFilterSet(explain, true);
+assert(isCollscan(db, explain.queryPlanner.winningPlan));
- // Changing the catalog and then setting an index filter should not result in duplicate entries.
- assert.commandWorked(coll.createIndex({x: 1, a: 1}));
- assert.commandWorked(
- db.runCommand({planCacheSetFilter: collName, query: {"x": 3}, indexes: [{x: 1, y: 1}]}));
- assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]);
+// Changing the catalog and then setting an index filter should not result in duplicate entries.
+assert.commandWorked(coll.createIndex({x: 1, a: 1}));
+assert.commandWorked(
+ db.runCommand({planCacheSetFilter: collName, query: {"x": 3}, indexes: [{x: 1, y: 1}]}));
+assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]);
- // Recreate the {x: 1, y: 1} index and be sure that it's still used.
- assert.commandWorked(coll.createIndexes([{x: 1}, {x: 1, y: 1}]));
- assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]);
+// Recreate the {x: 1, y: 1} index and be sure that it's still used.
+assert.commandWorked(coll.createIndexes([{x: 1}, {x: 1, y: 1}]));
+assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]);
- explain = assert.commandWorked(coll.find({x: 3}).explain());
- checkIndexFilterSet(explain, true);
- assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1, y: 1});
+explain = assert.commandWorked(coll.find({x: 3}).explain());
+checkIndexFilterSet(explain, true);
+assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1, y: 1});
})();
diff --git a/jstests/core/index_filter_collation.js b/jstests/core/index_filter_collation.js
index 92bbe005ce4..d6fa0daaa73 100644
--- a/jstests/core/index_filter_collation.js
+++ b/jstests/core/index_filter_collation.js
@@ -11,73 +11,76 @@
* ]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For getPlanStages.
+load("jstests/libs/analyze_plan.js"); // For getPlanStages.
- const collName = "index_filter_collation";
- const coll = db[collName];
+const collName = "index_filter_collation";
+const coll = db[collName];
- const caseInsensitive = {locale: "fr", strength: 2};
- coll.drop();
- assert.commandWorked(db.createCollection(collName, {collation: caseInsensitive}));
+const caseInsensitive = {
+ locale: "fr",
+ strength: 2
+};
+coll.drop();
+assert.commandWorked(db.createCollection(collName, {collation: caseInsensitive}));
- function checkIndexFilterSet(explain, shouldBeSet) {
- if (explain.queryPlanner.winningPlan.shards) {
- for (let shard of explain.queryPlanner.winningPlan.shards) {
- assert.eq(shard.indexFilterSet, shouldBeSet);
- }
- } else {
- assert.eq(explain.queryPlanner.indexFilterSet, shouldBeSet);
+function checkIndexFilterSet(explain, shouldBeSet) {
+ if (explain.queryPlanner.winningPlan.shards) {
+ for (let shard of explain.queryPlanner.winningPlan.shards) {
+ assert.eq(shard.indexFilterSet, shouldBeSet);
}
+ } else {
+ assert.eq(explain.queryPlanner.indexFilterSet, shouldBeSet);
}
+}
- // Now create an index filter on a query with no collation specified.
- assert.commandWorked(coll.createIndexes([{x: 1}, {x: 1, y: 1}]));
- assert.commandWorked(
- db.runCommand({planCacheSetFilter: collName, query: {"x": 3}, indexes: [{x: 1, y: 1}]}));
+// Now create an index filter on a query with no collation specified.
+assert.commandWorked(coll.createIndexes([{x: 1}, {x: 1, y: 1}]));
+assert.commandWorked(
+ db.runCommand({planCacheSetFilter: collName, query: {"x": 3}, indexes: [{x: 1, y: 1}]}));
- const listFilters = assert.commandWorked(db.runCommand({planCacheListFilters: collName}));
- assert.eq(listFilters.filters.length, 1);
- assert.eq(listFilters.filters[0].query, {x: 3});
- assert.eq(listFilters.filters[0].indexes, [{x: 1, y: 1}]);
+const listFilters = assert.commandWorked(db.runCommand({planCacheListFilters: collName}));
+assert.eq(listFilters.filters.length, 1);
+assert.eq(listFilters.filters[0].query, {x: 3});
+assert.eq(listFilters.filters[0].indexes, [{x: 1, y: 1}]);
- // Create an index filter on a query with the default collation specified.
- assert.commandWorked(db.runCommand({
- planCacheSetFilter: collName,
- query: {"x": 3},
- collation: caseInsensitive,
- indexes: [{x: 1}]
- }));
+// Create an index filter on a query with the default collation specified.
+assert.commandWorked(db.runCommand({
+ planCacheSetFilter: collName,
+ query: {"x": 3},
+ collation: caseInsensitive,
+ indexes: [{x: 1}]
+}));
- // Although these two queries would run with the same collation, they have different "shapes"
- // so we expect there to be two index filters present.
- let res = assert.commandWorked(db.runCommand({planCacheListFilters: collName}));
- assert.eq(res.filters.length, 2);
+// Although these two queries would run with the same collation, they have different "shapes"
+// so we expect there to be two index filters present.
+let res = assert.commandWorked(db.runCommand({planCacheListFilters: collName}));
+assert.eq(res.filters.length, 2);
- // One of the filters should only be applied to queries with the "fr" collation
- // and use the {x: 1} index.
- assert(res.filters.some((filter) => filter.hasOwnProperty("collation") &&
- filter.collation.locale === "fr" &&
- friendlyEqual(filter.indexes, [{x: 1}])));
+// One of the filters should only be applied to queries with the "fr" collation
+// and use the {x: 1} index.
+assert(res.filters.some((filter) => filter.hasOwnProperty("collation") &&
+ filter.collation.locale === "fr" &&
+ friendlyEqual(filter.indexes, [{x: 1}])));
- // The other should not have any collation, and allow the index {x: 1, y: 1}.
- assert(res.filters.some((filter) => !filter.hasOwnProperty("collation") &&
- friendlyEqual(filter.indexes, [{x: 1, y: 1}])));
+// The other should not have any collation, and allow the index {x: 1, y: 1}.
+assert(res.filters.some((filter) => !filter.hasOwnProperty("collation") &&
+ friendlyEqual(filter.indexes, [{x: 1, y: 1}])));
- function assertIsIxScanOnIndex(winningPlan, keyPattern) {
- const ixScans = getPlanStages(winningPlan, "IXSCAN");
- assert.gt(ixScans.length, 0);
- assert.eq(ixScans[0].keyPattern, keyPattern);
- }
+function assertIsIxScanOnIndex(winningPlan, keyPattern) {
+ const ixScans = getPlanStages(winningPlan, "IXSCAN");
+ assert.gt(ixScans.length, 0);
+ assert.eq(ixScans[0].keyPattern, keyPattern);
+}
- // Run the queries and be sure the correct indexes are used.
- let explain = coll.find({x: 3}).explain();
- checkIndexFilterSet(explain, true);
- assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1, y: 1});
+// Run the queries and be sure the correct indexes are used.
+let explain = coll.find({x: 3}).explain();
+checkIndexFilterSet(explain, true);
+assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1, y: 1});
- // Run the queries and be sure the correct indexes are used.
- explain = coll.find({x: 3}).collation(caseInsensitive).explain();
- checkIndexFilterSet(explain, true);
- assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1});
+// Run the queries and be sure the correct indexes are used.
+explain = coll.find({x: 3}).collation(caseInsensitive).explain();
+checkIndexFilterSet(explain, true);
+assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1});
})();
diff --git a/jstests/core/index_id_options.js b/jstests/core/index_id_options.js
index 7f16c7ba8c9..91bdce8090f 100644
--- a/jstests/core/index_id_options.js
+++ b/jstests/core/index_id_options.js
@@ -10,69 +10,66 @@
// - Non-_id indexes cannot have the name "_id_".
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/get_index_helpers.js");
- // Must use local db for testing because autoIndexId:false collections are not allowed in
- // replicated databases.
- var coll = db.getSiblingDB("local").index_id_options;
+// Must use local db for testing because autoIndexId:false collections are not allowed in
+// replicated databases.
+var coll = db.getSiblingDB("local").index_id_options;
- // _id indexes must have key pattern {_id: 1}.
- coll.drop();
- assert.commandWorked(coll.runCommand("create", {autoIndexId: false}));
- assert.commandFailed(coll.ensureIndex({_id: -1}, {name: "_id_"}));
+// _id indexes must have key pattern {_id: 1}.
+coll.drop();
+assert.commandWorked(coll.runCommand("create", {autoIndexId: false}));
+assert.commandFailed(coll.ensureIndex({_id: -1}, {name: "_id_"}));
- // The name of an _id index gets corrected to "_id_".
- coll.drop();
- assert.commandWorked(coll.runCommand("create", {autoIndexId: false}));
- assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "bad"}));
- var spec = GetIndexHelpers.findByKeyPattern(coll.getIndexes(), {_id: 1});
- assert.neq(null, spec, "_id index spec not found");
- assert.eq("_id_", spec.name, tojson(spec));
+// The name of an _id index gets corrected to "_id_".
+coll.drop();
+assert.commandWorked(coll.runCommand("create", {autoIndexId: false}));
+assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "bad"}));
+var spec = GetIndexHelpers.findByKeyPattern(coll.getIndexes(), {_id: 1});
+assert.neq(null, spec, "_id index spec not found");
+assert.eq("_id_", spec.name, tojson(spec));
- // _id indexes cannot have any options other than "key", "name", "ns", "v", and "collation."
- coll.drop();
- assert.commandWorked(coll.runCommand("create", {autoIndexId: false}));
- assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", unique: true}));
- assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", sparse: false}));
- assert.commandFailed(
- coll.ensureIndex({_id: 1}, {name: "_id_", partialFilterExpression: {a: 1}}));
- assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", expireAfterSeconds: 3600}));
- assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", background: false}));
- assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", unknown: true}));
- assert.commandWorked(coll.ensureIndex(
- {_id: 1}, {name: "_id_", ns: coll.getFullName(), v: 2, collation: {locale: "simple"}}));
+// _id indexes cannot have any options other than "key", "name", "ns", "v", and "collation."
+coll.drop();
+assert.commandWorked(coll.runCommand("create", {autoIndexId: false}));
+assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", unique: true}));
+assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", sparse: false}));
+assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", partialFilterExpression: {a: 1}}));
+assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", expireAfterSeconds: 3600}));
+assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", background: false}));
+assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", unknown: true}));
+assert.commandWorked(coll.ensureIndex(
+ {_id: 1}, {name: "_id_", ns: coll.getFullName(), v: 2, collation: {locale: "simple"}}));
- // _id indexes must have the collection default collation.
- coll.drop();
- assert.commandWorked(coll.runCommand("create", {autoIndexId: false}));
- assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "en_US"}}));
- assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "simple"}}));
+// _id indexes must have the collection default collation.
+coll.drop();
+assert.commandWorked(coll.runCommand("create", {autoIndexId: false}));
+assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "en_US"}}));
+assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "simple"}}));
- coll.drop();
- assert.commandWorked(coll.runCommand("create", {autoIndexId: false}));
- assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "_id_"}));
+coll.drop();
+assert.commandWorked(coll.runCommand("create", {autoIndexId: false}));
+assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "_id_"}));
- coll.drop();
- assert.commandWorked(
- coll.runCommand("create", {autoIndexId: false, collation: {locale: "en_US"}}));
- assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "simple"}}));
- assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "fr_CA"}}));
- assert.commandWorked(
- coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "en_US", strength: 3}}));
+coll.drop();
+assert.commandWorked(coll.runCommand("create", {autoIndexId: false, collation: {locale: "en_US"}}));
+assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "simple"}}));
+assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "fr_CA"}}));
+assert.commandWorked(
+ coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "en_US", strength: 3}}));
- coll.drop();
- assert.commandWorked(
- coll.runCommand("create", {autoIndexId: false, collation: {locale: "en_US"}}));
- assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "_id_"}));
- spec = GetIndexHelpers.findByName(coll.getIndexes(), "_id_");
- assert.neq(null, spec, "_id index spec not found");
- assert.eq("en_US", spec.collation.locale, tojson(spec));
+coll.drop();
+assert.commandWorked(coll.runCommand("create", {autoIndexId: false, collation: {locale: "en_US"}}));
+assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "_id_"}));
+spec = GetIndexHelpers.findByName(coll.getIndexes(), "_id_");
+assert.neq(null, spec, "_id index spec not found");
+assert.eq("en_US", spec.collation.locale, tojson(spec));
- // Non-_id indexes cannot have the name "_id_".
- coll.drop();
- assert.commandWorked(coll.runCommand("create", {autoIndexId: false}));
- assert.commandFailed(coll.ensureIndex({_id: "hashed"}, {name: "_id_"}));
- assert.commandFailed(coll.ensureIndex({a: 1}, {name: "_id_"}));
+// Non-_id indexes cannot have the name "_id_".
+coll.drop();
+assert.commandWorked(coll.runCommand("create", {autoIndexId: false}));
+assert.commandFailed(coll.ensureIndex({_id: "hashed"}, {name: "_id_"}));
+assert.commandFailed(coll.ensureIndex({a: 1}, {name: "_id_"}));
})();
diff --git a/jstests/core/index_multikey.js b/jstests/core/index_multikey.js
index 8adde5a4ef0..32ff3bd58c4 100644
--- a/jstests/core/index_multikey.js
+++ b/jstests/core/index_multikey.js
@@ -3,36 +3,35 @@
* successful and unsuccessful inserts.
*/
(function() {
- "use strict";
+"use strict";
- // For making assertions about explain output.
- load("jstests/libs/analyze_plan.js");
+// For making assertions about explain output.
+load("jstests/libs/analyze_plan.js");
- const coll = db.getCollection("index_multikey");
- coll.drop();
+const coll = db.getCollection("index_multikey");
+coll.drop();
- function getIndexScanExplainOutput() {
- const explain = coll.find().hint({a: 1, b: 1}).explain();
- assert.commandWorked(explain);
- return getPlanStage(explain.queryPlanner.winningPlan, "IXSCAN");
- }
+function getIndexScanExplainOutput() {
+ const explain = coll.find().hint({a: 1, b: 1}).explain();
+ assert.commandWorked(explain);
+ return getPlanStage(explain.queryPlanner.winningPlan, "IXSCAN");
+}
- assert.commandWorked(coll.createIndex({a: 1, b: 1}));
- assert.commandWorked(coll.createIndex({"a.0.0": 1}));
- let ixscan = getIndexScanExplainOutput();
- assert.eq(ixscan.isMultiKey,
- false,
- "empty index should not be marked multikey; plan: " + tojson(ixscan));
- assert.eq(ixscan.multiKeyPaths,
- {a: [], b: []},
- "empty index should have no multiKeyPaths; plan: " + tojson(ixscan));
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+assert.commandWorked(coll.createIndex({"a.0.0": 1}));
+let ixscan = getIndexScanExplainOutput();
+assert.eq(
+ ixscan.isMultiKey, false, "empty index should not be marked multikey; plan: " + tojson(ixscan));
+assert.eq(ixscan.multiKeyPaths,
+ {a: [], b: []},
+ "empty index should have no multiKeyPaths; plan: " + tojson(ixscan));
- assert.commandWorked(coll.insert({a: [1, 2, 3]}));
- ixscan = getIndexScanExplainOutput();
- assert.eq(ixscan.isMultiKey,
- true,
- "index should have been marked as multikey after insert; plan: " + tojson(ixscan));
- assert.eq(ixscan.multiKeyPaths,
- {a: ["a"], b: []},
- "index has wrong multikey paths after insert; plan: " + ixscan);
+assert.commandWorked(coll.insert({a: [1, 2, 3]}));
+ixscan = getIndexScanExplainOutput();
+assert.eq(ixscan.isMultiKey,
+ true,
+ "index should have been marked as multikey after insert; plan: " + tojson(ixscan));
+assert.eq(ixscan.multiKeyPaths,
+ {a: ["a"], b: []},
+ "index has wrong multikey paths after insert; plan: " + ixscan);
})();
diff --git a/jstests/core/index_multiple_compatibility.js b/jstests/core/index_multiple_compatibility.js
index 1ebe785abfc..8c203a49941 100644
--- a/jstests/core/index_multiple_compatibility.js
+++ b/jstests/core/index_multiple_compatibility.js
@@ -4,231 +4,236 @@
// Test that multiple indexes behave correctly together.
(function() {
- 'use strict';
- var coll = db.index_multiple_compatibility;
+'use strict';
+var coll = db.index_multiple_compatibility;
+coll.drop();
+
+const enUSStrength1 = {
+ locale: "en_US",
+ strength: 1
+};
+const enUSStrength2 = {
+ locale: "en_US",
+ strength: 2
+};
+const enUSStrength3 = {
+ locale: "en_US",
+ strength: 3
+};
+
+/**
+ * testIndexCompat runs a series of operations on two indexes to ensure that the two behave
+ * properly in combination.
+ *
+ * 'index1' and 'index2' take a document in the following format:
+ *
+ * {
+ * index: {key: Document, name: String, collation: Document, options...}
+ * doc: Document
+ * }
+ *
+ * The 'index' key indicates the index to create, and 'doc' (optional) indicates a document to
+ * insert in the collection, and look for in *only* this index. The 'index' key will be passed
+ * directly to the createIndexes command.
+ *
+ * 'both' optionally provides a document to insert into the collection, and expect in both
+ * indexes.
+ *
+ * - Create both indexes.
+ * - Insert document in index1.
+ * - Check that it is present in index1, and absent in index2, using find and a hint.
+ * - Insert document in index2.
+ * - Check that it is present in index2, and absent in index1, using find and a hint.
+ * - Insert the document 'both', if it is provided. Check that it is inserted in both indexes.
+ * - Delete documents ensuring they are removed from the appropriate indexes.
+ */
+function testIndexCompat(coll, index1, index2, both) {
coll.drop();
- const enUSStrength1 = {locale: "en_US", strength: 1};
- const enUSStrength2 = {locale: "en_US", strength: 2};
- const enUSStrength3 = {locale: "en_US", strength: 3};
-
- /**
- * testIndexCompat runs a series of operations on two indexes to ensure that the two behave
- * properly in combination.
- *
- * 'index1' and 'index2' take a document in the following format:
- *
- * {
- * index: {key: Document, name: String, collation: Document, options...}
- * doc: Document
- * }
- *
- * The 'index' key indicates the index to create, and 'doc' (optional) indicates a document to
- * insert in the collection, and look for in *only* this index. The 'index' key will be passed
- * directly to the createIndexes command.
- *
- * 'both' optionally provides a document to insert into the collection, and expect in both
- * indexes.
- *
- * - Create both indexes.
- * - Insert document in index1.
- * - Check that it is present in index1, and absent in index2, using find and a hint.
- * - Insert document in index2.
- * - Check that it is present in index2, and absent in index1, using find and a hint.
- * - Insert the document 'both', if it is provided. Check that it is inserted in both indexes.
- * - Delete documents ensuring they are removed from the appropriate indexes.
- */
- function testIndexCompat(coll, index1, index2, both) {
- coll.drop();
-
- assert(index1.hasOwnProperty('index'));
- assert(index2.hasOwnProperty('index'));
-
- assert.commandWorked(
- db.runCommand({createIndexes: coll.getName(), indexes: [index1.index, index2.index]}));
-
- // Check index 1 document.
- if (index1.hasOwnProperty('doc')) {
- assert.writeOK(coll.insert(index1.doc));
- assert.eq(coll.find(index1.doc).hint(index1.index.name).itcount(), 1);
- assert.eq(coll.find(index1.doc).hint(index2.index.name).itcount(), 0);
- }
+ assert(index1.hasOwnProperty('index'));
+ assert(index2.hasOwnProperty('index'));
- // Check index 2 document.
- if (index2.hasOwnProperty('doc')) {
- assert.writeOK(coll.insert(index2.doc));
- assert.eq(coll.find(index2.doc).hint(index2.index.name).itcount(), 1);
- assert.eq(coll.find(index2.doc).hint(index1.index.name).itcount(), 0);
- }
+ assert.commandWorked(
+ db.runCommand({createIndexes: coll.getName(), indexes: [index1.index, index2.index]}));
- // Check for present of both in both index1 and index2.
- if (typeof both !== "undefined") {
- assert.writeOK(coll.insert(both));
- assert.eq(coll.find(both).hint(index1.index.name).itcount(), 1);
- assert.eq(coll.find(both).hint(index2.index.name).itcount(), 1);
- }
+ // Check index 1 document.
+ if (index1.hasOwnProperty('doc')) {
+ assert.writeOK(coll.insert(index1.doc));
+ assert.eq(coll.find(index1.doc).hint(index1.index.name).itcount(), 1);
+ assert.eq(coll.find(index1.doc).hint(index2.index.name).itcount(), 0);
+ }
- // Remove index 1 document.
- if (index1.hasOwnProperty('doc')) {
- assert.writeOK(coll.remove(index1.doc));
- assert.eq(coll.find(index1.doc).hint(index1.index.name).itcount(), 0);
- }
+ // Check index 2 document.
+ if (index2.hasOwnProperty('doc')) {
+ assert.writeOK(coll.insert(index2.doc));
+ assert.eq(coll.find(index2.doc).hint(index2.index.name).itcount(), 1);
+ assert.eq(coll.find(index2.doc).hint(index1.index.name).itcount(), 0);
+ }
- // Remove index 2 document.
- if (index2.hasOwnProperty('doc')) {
- assert.writeOK(coll.remove(index2.doc));
- assert.eq(coll.find(index2.doc).hint(index2.index.name).itcount(), 0);
- }
+ // Check for present of both in both index1 and index2.
+ if (typeof both !== "undefined") {
+ assert.writeOK(coll.insert(both));
+ assert.eq(coll.find(both).hint(index1.index.name).itcount(), 1);
+ assert.eq(coll.find(both).hint(index2.index.name).itcount(), 1);
+ }
- // Remove both.
- if (typeof both !== "undefined") {
- assert.writeOK(coll.remove(both));
- assert.eq(coll.find(both).hint(index1.index.name).itcount(), 0);
- assert.eq(coll.find(both).hint(index2.index.name).itcount(), 0);
- }
+ // Remove index 1 document.
+ if (index1.hasOwnProperty('doc')) {
+ assert.writeOK(coll.remove(index1.doc));
+ assert.eq(coll.find(index1.doc).hint(index1.index.name).itcount(), 0);
}
- // Two identical partial indexes.
- testIndexCompat(coll,
- {
- index: {
- key: {a: 1},
- name: "a1",
- collation: enUSStrength1,
- partialFilterExpression: {a: {$type: 'string'}}
- }
- },
- {
- index: {
- key: {a: 1},
- name: "a2",
- collation: enUSStrength2,
- partialFilterExpression: {a: {$type: 'string'}}
- }
- },
- {a: "A"});
-
- // Two non-overlapping partial indexes.
- testIndexCompat(coll,
- {
- index: {
- key: {a: 1},
- name: "a1",
- collation: enUSStrength1,
- partialFilterExpression: {a: {$lt: 10}}
- },
- doc: {a: 5}
+ // Remove index 2 document.
+ if (index2.hasOwnProperty('doc')) {
+ assert.writeOK(coll.remove(index2.doc));
+ assert.eq(coll.find(index2.doc).hint(index2.index.name).itcount(), 0);
+ }
+
+ // Remove both.
+ if (typeof both !== "undefined") {
+ assert.writeOK(coll.remove(both));
+ assert.eq(coll.find(both).hint(index1.index.name).itcount(), 0);
+ assert.eq(coll.find(both).hint(index2.index.name).itcount(), 0);
+ }
+}
+
+// Two identical partial indexes.
+testIndexCompat(coll,
+ {
+ index: {
+ key: {a: 1},
+ name: "a1",
+ collation: enUSStrength1,
+ partialFilterExpression: {a: {$type: 'string'}}
+ }
+ },
+ {
+ index: {
+ key: {a: 1},
+ name: "a2",
+ collation: enUSStrength2,
+ partialFilterExpression: {a: {$type: 'string'}}
+ }
+ },
+ {a: "A"});
+
+// Two non-overlapping partial indexes.
+testIndexCompat(coll,
+ {
+ index: {
+ key: {a: 1},
+ name: "a1",
+ collation: enUSStrength1,
+ partialFilterExpression: {a: {$lt: 10}}
},
- {
- index: {
- key: {a: 1},
- name: "a2",
- collation: enUSStrength2,
- partialFilterExpression: {a: {$gt: 20}}
- },
- doc: {a: 25}
- });
-
- // Two partially overlapping partial indexes.
- testIndexCompat(coll,
- {
- index: {
- key: {a: 1},
- name: "a1",
- collation: enUSStrength1,
- partialFilterExpression: {a: {$lt: 10}},
- },
- doc: {a: -5}
+ doc: {a: 5}
+ },
+ {
+ index: {
+ key: {a: 1},
+ name: "a2",
+ collation: enUSStrength2,
+ partialFilterExpression: {a: {$gt: 20}}
},
- {
- index: {
- key: {a: 1},
- name: "a2",
- collation: enUSStrength2,
- partialFilterExpression: {a: {$gte: 0}}
- },
- doc: {a: 15}
+ doc: {a: 25}
+ });
+
+// Two partially overlapping partial indexes.
+testIndexCompat(coll,
+ {
+ index: {
+ key: {a: 1},
+ name: "a1",
+ collation: enUSStrength1,
+ partialFilterExpression: {a: {$lt: 10}},
},
- {a: 5});
-
- // A partial and sparse index.
- testIndexCompat(
- coll,
- {
- index:
- {key: {a: 1}, name: "a1", collation: enUSStrength1, partialFilterExpression: {b: 0}},
- doc: {b: 0}
- },
- {
- index: {key: {a: 1}, name: "a2", collation: enUSStrength2, sparse: true},
- doc: {a: 5, b: 1}
- },
- {a: -1, b: 0});
-
- // A sparse and non-sparse index.
- testIndexCompat(
- coll,
- {
- index: {key: {a: 1}, name: "a1", collation: enUSStrength1, sparse: true},
- },
- {index: {key: {a: 1}, name: "a2", collation: enUSStrength2, sparse: false}, doc: {b: 0}},
- {a: 1});
-
- // A unique index and non-unique index.
- testIndexCompat(coll,
- {
- index: {key: {a: 1}, name: "unique", collation: enUSStrength1, unique: true},
+ doc: {a: -5}
+ },
+ {
+ index: {
+ key: {a: 1},
+ name: "a2",
+ collation: enUSStrength2,
+ partialFilterExpression: {a: {$gte: 0}}
},
- {index: {key: {a: 1}, name: "reg", collation: enUSStrength2, unique: false}},
- {a: "foo"});
-
- // Test that unique constraints are still enforced.
- assert.writeOK(coll.insert({a: "f"}));
- assert.writeError(coll.insert({a: "F"}));
-
- // A unique partial index and non-unique index.
- testIndexCompat(
- coll,
- {
- index: {
- key: {a: 1},
- name: "unique",
- collation: enUSStrength1,
- unique: true,
- partialFilterExpression: {a: {$type: 'number'}}
- }
- },
- {index: {key: {a: 1}, name: "a", collation: enUSStrength2, unique: false}, doc: {a: "foo"}},
- {a: 5});
-
- assert.writeOK(coll.insert({a: 5}));
- // Test that uniqueness is only enforced by the partial index.
- assert.writeOK(coll.insert({a: "foo"}));
- assert.writeOK(coll.insert({a: "foo"}));
- assert.writeError(coll.insert({a: 5}));
-
- // Two unique indexes with different collations.
- testIndexCompat(coll,
- {index: {key: {a: 1}, name: "a1", collation: enUSStrength1, unique: true}},
- {index: {key: {a: 1}, name: "a2", collation: enUSStrength3, unique: true}},
- {a: "a"});
-
- // Unique enforced on both indexes.
- assert.writeOK(coll.insert({a: "a"}));
- assert.writeError(coll.insert({a: "a"}));
- assert.writeError(coll.insert({a: "A"}));
-
- // A unique and sparse index.
- testIndexCompat(
- coll,
- {
- index: {key: {a: 1}, name: "a1", collation: enUSStrength1, unique: true, sparse: true},
- },
- {index: {key: {a: 1}, name: "a2", collation: enUSStrength2, unique: false}, doc: {b: 0}},
- {a: "a"});
-
- assert.writeOK(coll.insert({a: "a"}));
- assert.writeOK(coll.insert({}));
- assert.writeOK(coll.insert({}));
- assert.writeError(coll.insert({a: "a"}));
+ doc: {a: 15}
+ },
+ {a: 5});
+
+// A partial and sparse index.
+testIndexCompat(
+ coll,
+ {
+ index: {key: {a: 1}, name: "a1", collation: enUSStrength1, partialFilterExpression: {b: 0}},
+ doc: {b: 0}
+ },
+ {index: {key: {a: 1}, name: "a2", collation: enUSStrength2, sparse: true}, doc: {a: 5, b: 1}},
+ {a: -1, b: 0});
+
+// A sparse and non-sparse index.
+testIndexCompat(
+ coll,
+ {
+ index: {key: {a: 1}, name: "a1", collation: enUSStrength1, sparse: true},
+ },
+ {index: {key: {a: 1}, name: "a2", collation: enUSStrength2, sparse: false}, doc: {b: 0}},
+ {a: 1});
+
+// A unique index and non-unique index.
+testIndexCompat(coll,
+ {
+ index: {key: {a: 1}, name: "unique", collation: enUSStrength1, unique: true},
+ },
+ {index: {key: {a: 1}, name: "reg", collation: enUSStrength2, unique: false}},
+ {a: "foo"});
+
+// Test that unique constraints are still enforced.
+assert.writeOK(coll.insert({a: "f"}));
+assert.writeError(coll.insert({a: "F"}));
+
+// A unique partial index and non-unique index.
+testIndexCompat(
+ coll,
+ {
+ index: {
+ key: {a: 1},
+ name: "unique",
+ collation: enUSStrength1,
+ unique: true,
+ partialFilterExpression: {a: {$type: 'number'}}
+ }
+ },
+ {index: {key: {a: 1}, name: "a", collation: enUSStrength2, unique: false}, doc: {a: "foo"}},
+ {a: 5});
+
+assert.writeOK(coll.insert({a: 5}));
+// Test that uniqueness is only enforced by the partial index.
+assert.writeOK(coll.insert({a: "foo"}));
+assert.writeOK(coll.insert({a: "foo"}));
+assert.writeError(coll.insert({a: 5}));
+
+// Two unique indexes with different collations.
+testIndexCompat(coll,
+ {index: {key: {a: 1}, name: "a1", collation: enUSStrength1, unique: true}},
+ {index: {key: {a: 1}, name: "a2", collation: enUSStrength3, unique: true}},
+ {a: "a"});
+
+// Unique enforced on both indexes.
+assert.writeOK(coll.insert({a: "a"}));
+assert.writeError(coll.insert({a: "a"}));
+assert.writeError(coll.insert({a: "A"}));
+
+// A unique and sparse index.
+testIndexCompat(
+ coll,
+ {
+ index: {key: {a: 1}, name: "a1", collation: enUSStrength1, unique: true, sparse: true},
+ },
+ {index: {key: {a: 1}, name: "a2", collation: enUSStrength2, unique: false}, doc: {b: 0}},
+ {a: "a"});
+
+assert.writeOK(coll.insert({a: "a"}));
+assert.writeOK(coll.insert({}));
+assert.writeOK(coll.insert({}));
+assert.writeError(coll.insert({a: "a"}));
})();
diff --git a/jstests/core/index_partial_2dsphere.js b/jstests/core/index_partial_2dsphere.js
index 502f70aa556..15e6427667a 100644
--- a/jstests/core/index_partial_2dsphere.js
+++ b/jstests/core/index_partial_2dsphere.js
@@ -4,67 +4,64 @@
(function() {
- "use strict";
+"use strict";
- let coll = db.index_partial_2dsphere;
- coll.drop();
+let coll = db.index_partial_2dsphere;
+coll.drop();
- // Create a 2dsphere partial index for documents where isIndexed is greater than 0.
- let partialIndex = {geoJson: '2dsphere'};
- assert.commandWorked(
- coll.createIndex(partialIndex, {partialFilterExpression: {isIndexed: {$gt: 0}}}));
+// Create a 2dsphere partial index for documents where isIndexed is greater than 0.
+let partialIndex = {geoJson: '2dsphere'};
+assert.commandWorked(
+ coll.createIndex(partialIndex, {partialFilterExpression: {isIndexed: {$gt: 0}}}));
- // This document has an invalid geoJSON format (duplicated points), but will not be indexed.
- let unindexedDoc = {
- "_id": 0,
- "isIndexed": -1,
- "geoJson": {"type": "Polygon", "coordinates": [[[0, 0], [0, 1], [1, 1], [0, 1], [0, 0]]]}
- };
+// This document has an invalid geoJSON format (duplicated points), but will not be indexed.
+let unindexedDoc = {
+ "_id": 0,
+ "isIndexed": -1,
+ "geoJson": {"type": "Polygon", "coordinates": [[[0, 0], [0, 1], [1, 1], [0, 1], [0, 0]]]}
+};
- // This document has valid geoJson, and will be indexed.
- let indexedDoc = {
- "_id": 1,
- "isIndexed": 1,
- "geoJson": {"type": "Polygon", "coordinates": [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]}
- };
+// This document has valid geoJson, and will be indexed.
+let indexedDoc = {
+ "_id": 1,
+ "isIndexed": 1,
+ "geoJson": {"type": "Polygon", "coordinates": [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]}
+};
- assert.writeOK(coll.insert(unindexedDoc));
- assert.writeOK(coll.insert(indexedDoc));
+assert.writeOK(coll.insert(unindexedDoc));
+assert.writeOK(coll.insert(indexedDoc));
- // Return the one indexed document.
- assert.eq(1,
- coll.find({
- isIndexed: 1,
- geoJson: {$geoNear: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
- .itcount());
+// Return the one indexed document.
+assert.eq(
+ 1,
+ coll.find(
+ {isIndexed: 1, geoJson: {$geoNear: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .itcount());
- // Don't let an update to a document with an invalid geoJson succeed.
- assert.writeError(coll.update({_id: 0}, {$set: {isIndexed: 1}}));
+// Don't let an update to a document with an invalid geoJson succeed.
+assert.writeError(coll.update({_id: 0}, {$set: {isIndexed: 1}}));
- // Update the indexed document to remove it from the index.
- assert.writeOK(coll.update({_id: 1}, {$set: {isIndexed: -1}}));
+// Update the indexed document to remove it from the index.
+assert.writeOK(coll.update({_id: 1}, {$set: {isIndexed: -1}}));
- // This query should now return zero documents.
- assert.eq(0,
- coll.find({
- isIndexed: 1,
- geoJson: {$geoNear: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
- .itcount());
+// This query should now return zero documents.
+assert.eq(
+ 0,
+ coll.find(
+ {isIndexed: 1, geoJson: {$geoNear: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .itcount());
- // Re-index the document.
- assert.writeOK(coll.update({_id: 1}, {$set: {isIndexed: 1}}));
+// Re-index the document.
+assert.writeOK(coll.update({_id: 1}, {$set: {isIndexed: 1}}));
- // Remove both should succeed without error.
- assert.writeOK(coll.remove({_id: 0}));
- assert.writeOK(coll.remove({_id: 1}));
+// Remove both should succeed without error.
+assert.writeOK(coll.remove({_id: 0}));
+assert.writeOK(coll.remove({_id: 1}));
- assert.eq(0,
- coll.find({
- isIndexed: 1,
- geoJson: {$geoNear: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
- .itcount());
- assert.commandWorked(coll.dropIndex(partialIndex));
+assert.eq(
+ 0,
+ coll.find(
+ {isIndexed: 1, geoJson: {$geoNear: {$geometry: {type: "Point", coordinates: [0, 0]}}}})
+ .itcount());
+assert.commandWorked(coll.dropIndex(partialIndex));
})();
diff --git a/jstests/core/index_partial_create_drop.js b/jstests/core/index_partial_create_drop.js
index 55a6b06d117..0233f3fb8a0 100644
--- a/jstests/core/index_partial_create_drop.js
+++ b/jstests/core/index_partial_create_drop.js
@@ -10,73 +10,71 @@
// Test partial index creation and drops.
(function() {
- "use strict";
- var coll = db.index_partial_create_drop;
+"use strict";
+var coll = db.index_partial_create_drop;
- var getNumKeys = function(idxName) {
- var res = assert.commandWorked(coll.validate(true));
- var kpi;
+var getNumKeys = function(idxName) {
+ var res = assert.commandWorked(coll.validate(true));
+ var kpi;
- var isShardedNS = res.hasOwnProperty('raw');
- if (isShardedNS) {
- kpi = res.raw[Object.getOwnPropertyNames(res.raw)[0]].keysPerIndex;
- } else {
- kpi = res.keysPerIndex;
- }
- return kpi[idxName];
- };
+ var isShardedNS = res.hasOwnProperty('raw');
+ if (isShardedNS) {
+ kpi = res.raw[Object.getOwnPropertyNames(res.raw)[0]].keysPerIndex;
+ } else {
+ kpi = res.keysPerIndex;
+ }
+ return kpi[idxName];
+};
- coll.drop();
+coll.drop();
- // Check bad filter spec on create.
- assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: 5}));
- assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {x: {$asdasd: 3}}}));
- assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {$and: 5}}));
- assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {x: /abc/}}));
- assert.commandFailed(coll.ensureIndex({x: 1}, {
- partialFilterExpression:
- {$and: [{$and: [{x: {$lt: 2}}, {x: {$gt: 0}}]}, {x: {$exists: true}}]}
- }));
- // Use of $expr is banned in a partial index filter.
- assert.commandFailed(
- coll.createIndex({x: 1}, {partialFilterExpression: {$expr: {$eq: ["$x", 5]}}}));
- assert.commandFailed(coll.createIndex(
- {x: 1}, {partialFilterExpression: {$expr: {$eq: [{$trim: {input: "$x"}}, "hi"]}}}));
+// Check bad filter spec on create.
+assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: 5}));
+assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {x: {$asdasd: 3}}}));
+assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {$and: 5}}));
+assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {x: /abc/}}));
+assert.commandFailed(coll.ensureIndex({x: 1}, {
+ partialFilterExpression: {$and: [{$and: [{x: {$lt: 2}}, {x: {$gt: 0}}]}, {x: {$exists: true}}]}
+}));
+// Use of $expr is banned in a partial index filter.
+assert.commandFailed(
+ coll.createIndex({x: 1}, {partialFilterExpression: {$expr: {$eq: ["$x", 5]}}}));
+assert.commandFailed(coll.createIndex(
+ {x: 1}, {partialFilterExpression: {$expr: {$eq: [{$trim: {input: "$x"}}, "hi"]}}}));
- for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({x: i, a: i}));
- }
+for (var i = 0; i < 10; i++) {
+ assert.writeOK(coll.insert({x: i, a: i}));
+}
- // Create partial index.
- assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: {$lt: 5}}}));
- assert.eq(5, getNumKeys("x_1"));
- assert.commandWorked(coll.dropIndex({x: 1}));
- assert.eq(1, coll.getIndexes().length);
+// Create partial index.
+assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: {$lt: 5}}}));
+assert.eq(5, getNumKeys("x_1"));
+assert.commandWorked(coll.dropIndex({x: 1}));
+assert.eq(1, coll.getIndexes().length);
- // Create partial index in background.
- assert.commandWorked(
- coll.ensureIndex({x: 1}, {background: true, partialFilterExpression: {a: {$lt: 5}}}));
- assert.eq(5, getNumKeys("x_1"));
- assert.commandWorked(coll.dropIndex({x: 1}));
- assert.eq(1, coll.getIndexes().length);
+// Create partial index in background.
+assert.commandWorked(
+ coll.ensureIndex({x: 1}, {background: true, partialFilterExpression: {a: {$lt: 5}}}));
+assert.eq(5, getNumKeys("x_1"));
+assert.commandWorked(coll.dropIndex({x: 1}));
+assert.eq(1, coll.getIndexes().length);
- // Create complete index, same key as previous indexes.
- assert.commandWorked(coll.ensureIndex({x: 1}));
- assert.eq(10, getNumKeys("x_1"));
- assert.commandWorked(coll.dropIndex({x: 1}));
- assert.eq(1, coll.getIndexes().length);
+// Create complete index, same key as previous indexes.
+assert.commandWorked(coll.ensureIndex({x: 1}));
+assert.eq(10, getNumKeys("x_1"));
+assert.commandWorked(coll.dropIndex({x: 1}));
+assert.eq(1, coll.getIndexes().length);
- // Partial indexes can't also be sparse indexes.
- assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}, sparse: true}));
- assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}, sparse: 1}));
- assert.commandWorked(
- coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}, sparse: false}));
- assert.eq(2, coll.getIndexes().length);
- assert.commandWorked(coll.dropIndex({x: 1}));
- assert.eq(1, coll.getIndexes().length);
+// Partial indexes can't also be sparse indexes.
+assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}, sparse: true}));
+assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}, sparse: 1}));
+assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}, sparse: false}));
+assert.eq(2, coll.getIndexes().length);
+assert.commandWorked(coll.dropIndex({x: 1}));
+assert.eq(1, coll.getIndexes().length);
- // SERVER-18858: Verify that query compatible w/ partial index succeeds after index drop.
- assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: {$lt: 5}}}));
- assert.commandWorked(coll.dropIndex({x: 1}));
- assert.eq(1, coll.find({x: 0, a: 0}).itcount());
+// SERVER-18858: Verify that query compatible w/ partial index succeeds after index drop.
+assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: {$lt: 5}}}));
+assert.commandWorked(coll.dropIndex({x: 1}));
+assert.eq(1, coll.find({x: 0, a: 0}).itcount());
})();
diff --git a/jstests/core/index_partial_read_ops.js b/jstests/core/index_partial_read_ops.js
index 27fdb430fba..eba93a7ee23 100644
--- a/jstests/core/index_partial_read_ops.js
+++ b/jstests/core/index_partial_read_ops.js
@@ -8,75 +8,75 @@
load("jstests/libs/analyze_plan.js");
(function() {
- "use strict";
- var explain;
- var coll = db.index_partial_read_ops;
- coll.drop();
-
- assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: {$lte: 1.5}}}));
- assert.writeOK(coll.insert({x: 5, a: 2})); // Not in index.
- assert.writeOK(coll.insert({x: 6, a: 1})); // In index.
-
- //
- // Verify basic functionality with find().
- //
-
- // find() operations that should use index.
- explain = coll.explain('executionStats').find({x: 6, a: 1}).finish();
- assert.eq(1, explain.executionStats.nReturned);
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
- explain = coll.explain('executionStats').find({x: {$gt: 1}, a: 1}).finish();
- assert.eq(1, explain.executionStats.nReturned);
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
- explain = coll.explain('executionStats').find({x: 6, a: {$lte: 1}}).finish();
- assert.eq(1, explain.executionStats.nReturned);
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
-
- // find() operations that should not use index.
- explain = coll.explain('executionStats').find({x: 6, a: {$lt: 1.6}}).finish();
- assert.eq(1, explain.executionStats.nReturned);
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
- explain = coll.explain('executionStats').find({x: 6}).finish();
- assert.eq(1, explain.executionStats.nReturned);
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
-
- //
- // Verify basic functionality with the count command.
- //
-
- // Count operation that should use index.
- explain = coll.explain('executionStats').count({x: {$gt: 1}, a: 1});
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
-
- // Count operation that should not use index.
- explain = coll.explain('executionStats').count({x: {$gt: 1}, a: 2});
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
-
- //
- // Verify basic functionality with the aggregate command.
- //
-
- // Aggregate operation that should use index.
- explain = coll.aggregate([{$match: {x: {$gt: 1}, a: 1}}], {explain: true});
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
-
- // Aggregate operation that should not use index.
- explain = coll.aggregate([{$match: {x: {$gt: 1}, a: 2}}], {explain: true});
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
-
- //
- // Verify basic functionality with the findAndModify command.
- //
-
- // findAndModify operation that should use index.
- explain = coll.explain('executionStats')
- .findAndModify({query: {x: {$gt: 1}, a: 1}, update: {$inc: {x: 1}}});
- assert.eq(1, explain.executionStats.nReturned);
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
-
- // findAndModify operation that should not use index.
- explain = coll.explain('executionStats')
- .findAndModify({query: {x: {$gt: 1}, a: 2}, update: {$inc: {x: 1}}});
- assert.eq(1, explain.executionStats.nReturned);
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
+"use strict";
+var explain;
+var coll = db.index_partial_read_ops;
+coll.drop();
+
+assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: {$lte: 1.5}}}));
+assert.writeOK(coll.insert({x: 5, a: 2})); // Not in index.
+assert.writeOK(coll.insert({x: 6, a: 1})); // In index.
+
+//
+// Verify basic functionality with find().
+//
+
+// find() operations that should use index.
+explain = coll.explain('executionStats').find({x: 6, a: 1}).finish();
+assert.eq(1, explain.executionStats.nReturned);
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+explain = coll.explain('executionStats').find({x: {$gt: 1}, a: 1}).finish();
+assert.eq(1, explain.executionStats.nReturned);
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+explain = coll.explain('executionStats').find({x: 6, a: {$lte: 1}}).finish();
+assert.eq(1, explain.executionStats.nReturned);
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+
+// find() operations that should not use index.
+explain = coll.explain('executionStats').find({x: 6, a: {$lt: 1.6}}).finish();
+assert.eq(1, explain.executionStats.nReturned);
+assert(isCollscan(db, explain.queryPlanner.winningPlan));
+explain = coll.explain('executionStats').find({x: 6}).finish();
+assert.eq(1, explain.executionStats.nReturned);
+assert(isCollscan(db, explain.queryPlanner.winningPlan));
+
+//
+// Verify basic functionality with the count command.
+//
+
+// Count operation that should use index.
+explain = coll.explain('executionStats').count({x: {$gt: 1}, a: 1});
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+
+// Count operation that should not use index.
+explain = coll.explain('executionStats').count({x: {$gt: 1}, a: 2});
+assert(isCollscan(db, explain.queryPlanner.winningPlan));
+
+//
+// Verify basic functionality with the aggregate command.
+//
+
+// Aggregate operation that should use index.
+explain = coll.aggregate([{$match: {x: {$gt: 1}, a: 1}}], {explain: true});
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+
+// Aggregate operation that should not use index.
+explain = coll.aggregate([{$match: {x: {$gt: 1}, a: 2}}], {explain: true});
+assert(isCollscan(db, explain.queryPlanner.winningPlan));
+
+//
+// Verify basic functionality with the findAndModify command.
+//
+
+// findAndModify operation that should use index.
+explain = coll.explain('executionStats')
+ .findAndModify({query: {x: {$gt: 1}, a: 1}, update: {$inc: {x: 1}}});
+assert.eq(1, explain.executionStats.nReturned);
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+
+// findAndModify operation that should not use index.
+explain = coll.explain('executionStats')
+ .findAndModify({query: {x: {$gt: 1}, a: 2}, update: {$inc: {x: 1}}});
+assert.eq(1, explain.executionStats.nReturned);
+assert(isCollscan(db, explain.queryPlanner.winningPlan));
})();
diff --git a/jstests/core/index_partial_validate.js b/jstests/core/index_partial_validate.js
index bd854de9751..321fede5c19 100644
--- a/jstests/core/index_partial_validate.js
+++ b/jstests/core/index_partial_validate.js
@@ -3,19 +3,19 @@
'use strict';
(function() {
- var t = db.index_partial_validate;
- t.drop();
+var t = db.index_partial_validate;
+t.drop();
- var res = t.ensureIndex({a: 1}, {partialFilterExpression: {a: {$lte: 1}}});
- assert.commandWorked(res);
+var res = t.ensureIndex({a: 1}, {partialFilterExpression: {a: {$lte: 1}}});
+assert.commandWorked(res);
- res = t.ensureIndex({b: 1});
- assert.commandWorked(res);
+res = t.ensureIndex({b: 1});
+assert.commandWorked(res);
- res = t.insert({non_indexed_field: 'x'});
- assert.writeOK(res);
+res = t.insert({non_indexed_field: 'x'});
+assert.writeOK(res);
- res = t.validate(true);
- assert.commandWorked(res);
- assert(res.valid, 'Validate failed with response:\n' + tojson(res));
+res = t.validate(true);
+assert.commandWorked(res);
+assert(res.valid, 'Validate failed with response:\n' + tojson(res));
})();
diff --git a/jstests/core/index_partial_write_ops.js b/jstests/core/index_partial_write_ops.js
index 730bcca5318..d79ce93155f 100644
--- a/jstests/core/index_partial_write_ops.js
+++ b/jstests/core/index_partial_write_ops.js
@@ -2,79 +2,78 @@
// @tags: [cannot_create_unique_index_when_using_hashed_shard_key, requires_non_retryable_writes]
(function() {
- "use strict";
- var coll = db.index_partial_write_ops;
-
- var getNumKeys = function(idxName) {
- var res = assert.commandWorked(coll.validate(true));
- var kpi;
-
- var isShardedNS = res.hasOwnProperty('raw');
- if (isShardedNS) {
- kpi = res.raw[Object.getOwnPropertyNames(res.raw)[0]].keysPerIndex;
- } else {
- kpi = res.keysPerIndex;
- }
- return kpi[idxName];
- };
+"use strict";
+var coll = db.index_partial_write_ops;
+
+var getNumKeys = function(idxName) {
+ var res = assert.commandWorked(coll.validate(true));
+ var kpi;
+
+ var isShardedNS = res.hasOwnProperty('raw');
+ if (isShardedNS) {
+ kpi = res.raw[Object.getOwnPropertyNames(res.raw)[0]].keysPerIndex;
+ } else {
+ kpi = res.keysPerIndex;
+ }
+ return kpi[idxName];
+};
- coll.drop();
+coll.drop();
- // Create partial index.
- assert.commandWorked(coll.ensureIndex({x: 1}, {unique: true, partialFilterExpression: {a: 1}}));
+// Create partial index.
+assert.commandWorked(coll.ensureIndex({x: 1}, {unique: true, partialFilterExpression: {a: 1}}));
- assert.writeOK(coll.insert({_id: 1, x: 5, a: 2, b: 1})); // Not in index.
- assert.writeOK(coll.insert({_id: 2, x: 6, a: 1, b: 1})); // In index.
+assert.writeOK(coll.insert({_id: 1, x: 5, a: 2, b: 1})); // Not in index.
+assert.writeOK(coll.insert({_id: 2, x: 6, a: 1, b: 1})); // In index.
- assert.eq(1, getNumKeys("x_1"));
+assert.eq(1, getNumKeys("x_1"));
- // Move into partial index, then back out.
- assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
- assert.eq(2, getNumKeys("x_1"));
+// Move into partial index, then back out.
+assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
+assert.eq(2, getNumKeys("x_1"));
- assert.writeOK(coll.update({_id: 1}, {$set: {a: 2}}));
- assert.eq(1, getNumKeys("x_1"));
+assert.writeOK(coll.update({_id: 1}, {$set: {a: 2}}));
+assert.eq(1, getNumKeys("x_1"));
- // Bit blip doc in partial index, and out of partial index.
- assert.writeOK(coll.update({_id: 2}, {$set: {b: 2}}));
- assert.eq(1, getNumKeys("x_1"));
+// Bit blip doc in partial index, and out of partial index.
+assert.writeOK(coll.update({_id: 2}, {$set: {b: 2}}));
+assert.eq(1, getNumKeys("x_1"));
- assert.writeOK(coll.update({_id: 1}, {$set: {b: 2}}));
- assert.eq(1, getNumKeys("x_1"));
+assert.writeOK(coll.update({_id: 1}, {$set: {b: 2}}));
+assert.eq(1, getNumKeys("x_1"));
- var array = [];
- for (var i = 0; i < 2048; i++) {
- array.push({arbitrary: i});
- }
+var array = [];
+for (var i = 0; i < 2048; i++) {
+ array.push({arbitrary: i});
+}
- // Update that causes record relocation.
- assert.writeOK(coll.update({_id: 2}, {$set: {b: array}}));
- assert.eq(1, getNumKeys("x_1"));
+// Update that causes record relocation.
+assert.writeOK(coll.update({_id: 2}, {$set: {b: array}}));
+assert.eq(1, getNumKeys("x_1"));
- assert.writeOK(coll.update({_id: 1}, {$set: {b: array}}));
- assert.eq(1, getNumKeys("x_1"));
+assert.writeOK(coll.update({_id: 1}, {$set: {b: array}}));
+assert.eq(1, getNumKeys("x_1"));
- // Delete that doesn't affect partial index.
- assert.writeOK(coll.remove({x: 5}));
- assert.eq(1, getNumKeys("x_1"));
+// Delete that doesn't affect partial index.
+assert.writeOK(coll.remove({x: 5}));
+assert.eq(1, getNumKeys("x_1"));
- // Delete that does affect partial index.
- assert.writeOK(coll.remove({x: 6}));
- assert.eq(0, getNumKeys("x_1"));
+// Delete that does affect partial index.
+assert.writeOK(coll.remove({x: 6}));
+assert.eq(0, getNumKeys("x_1"));
- // Documents with duplicate keys that straddle the index.
- assert.writeOK(coll.insert({_id: 3, x: 1, a: 1})); // In index.
- assert.writeOK(coll.insert({_id: 4, x: 1, a: 0})); // Not in index.
- assert.writeErrorWithCode(
- coll.insert({_id: 5, x: 1, a: 1}),
- ErrorCodes.DuplicateKey); // Duplicate key constraint prevents insertion.
+// Documents with duplicate keys that straddle the index.
+assert.writeOK(coll.insert({_id: 3, x: 1, a: 1})); // In index.
+assert.writeOK(coll.insert({_id: 4, x: 1, a: 0})); // Not in index.
+assert.writeErrorWithCode(coll.insert({_id: 5, x: 1, a: 1}),
+ ErrorCodes.DuplicateKey); // Duplicate key constraint prevents insertion.
- // Only _id 3 is in the index.
- assert.eq(1, getNumKeys("x_1"));
+// Only _id 3 is in the index.
+assert.eq(1, getNumKeys("x_1"));
- // Remove _id 4, _id 3 should remain in index.
- assert.writeOK(coll.remove({_id: 4}));
+// Remove _id 4, _id 3 should remain in index.
+assert.writeOK(coll.remove({_id: 4}));
- // _id 3 is still in the index.
- assert.eq(1, getNumKeys("x_1"));
+// _id 3 is still in the index.
+assert.eq(1, getNumKeys("x_1"));
})();
diff --git a/jstests/core/index_stats.js b/jstests/core/index_stats.js
index d815dde1b8d..ee99fdc4831 100644
--- a/jstests/core/index_stats.js
+++ b/jstests/core/index_stats.js
@@ -12,211 +12,211 @@
// ]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js");
+load("jstests/libs/analyze_plan.js");
- var colName = "jstests_index_stats";
- var col = db[colName];
- col.drop();
+var colName = "jstests_index_stats";
+var col = db[colName];
+col.drop();
- var getUsageCount = function(indexName, collection) {
- collection = collection || col;
- var cursor = collection.aggregate([{$indexStats: {}}]);
- while (cursor.hasNext()) {
- var doc = cursor.next();
+var getUsageCount = function(indexName, collection) {
+ collection = collection || col;
+ var cursor = collection.aggregate([{$indexStats: {}}]);
+ while (cursor.hasNext()) {
+ var doc = cursor.next();
- if (doc.name === indexName) {
- return doc.accesses.ops;
- }
+ if (doc.name === indexName) {
+ return doc.accesses.ops;
}
+ }
- return undefined;
- };
-
- var getIndexKey = function(indexName) {
- var cursor = col.aggregate([{$indexStats: {}}]);
- while (cursor.hasNext()) {
- var doc = cursor.next();
-
- if (doc.name === indexName) {
- return doc.key;
- }
- }
+ return undefined;
+};
- return undefined;
- };
+var getIndexKey = function(indexName) {
+ var cursor = col.aggregate([{$indexStats: {}}]);
+ while (cursor.hasNext()) {
+ var doc = cursor.next();
- var getIndexNamesForWinningPlan = function(explain) {
- var indexNameList = [];
- var winningStages = getPlanStages(explain.queryPlanner.winningPlan, "IXSCAN");
- for (var i = 0; i < winningStages.length; ++i) {
- indexNameList.push(winningStages[i].indexName);
+ if (doc.name === indexName) {
+ return doc.key;
}
+ }
- return indexNameList;
- };
-
- assert.writeOK(col.insert({a: 1, b: 1, c: 1}));
- assert.writeOK(col.insert({a: 2, b: 2, c: 2}));
- assert.writeOK(col.insert({a: 3, b: 3, c: 3}));
-
- //
- // Confirm no index stats object exists prior to index creation.
- //
- col.findOne({a: 1});
- assert.eq(undefined, getUsageCount("a_1"));
-
- //
- // Create indexes.
- //
- assert.commandWorked(col.createIndex({a: 1}, {name: "a_1"}));
- assert.commandWorked(col.createIndex({b: 1, c: 1}, {name: "b_1_c_1"}));
- var countA = 0; // Tracks expected index access for "a_1".
- var countB = 0; // Tracks expected index access for "b_1_c_1".
-
- //
- // Confirm a stats object exists post index creation (with 0 count).
- //
- assert.eq(countA, getUsageCount("a_1"));
- assert.eq({a: 1}, getIndexKey("a_1"));
-
- //
- // Confirm index stats tick on find().
- //
- col.findOne({a: 1});
- countA++;
-
- assert.eq(countA, getUsageCount("a_1"));
-
- //
- // Confirm index stats tick on findAndModify() update.
- //
- var res =
- db.runCommand({findAndModify: colName, query: {a: 1}, update: {$set: {d: 1}}, 'new': true});
- assert.commandWorked(res);
- countA++;
- assert.eq(countA, getUsageCount("a_1"));
+ return undefined;
+};
- //
- // Confirm index stats tick on findAndModify() delete.
- //
- res = db.runCommand({findAndModify: colName, query: {a: 2}, remove: true});
- assert.commandWorked(res);
- countA++;
- assert.eq(countA, getUsageCount("a_1"));
- assert.writeOK(col.insert(res.value));
-
- //
- // Confirm $and operation ticks indexes for winning plan, but not rejected plans.
- //
-
- // We cannot use explain() to determine which indexes would be used for this query, since
- // 1) explain() will not bump the access counters
- // 2) explain() always runs the multi planner, and the multi planner may choose a different
- // index each run. We therefore run the query, and check that only one of the indexes has its
- // counter bumped (assuming we never choose an index intersection plan).
- const results = col.find({a: 2, b: 2}).itcount();
- if (countA + 1 == getUsageCount("a_1")) {
- // Plan using index A was chosen. Index B should not have been used (assuming no index
- // intersection plans are used).
- countA++;
- } else {
- // Plan using index B was chosen. Index A should not have been used (assuming no index
- // intersection plans are used).
- assert.eq(++countB, getUsageCount("b_1_c_1"));
+var getIndexNamesForWinningPlan = function(explain) {
+ var indexNameList = [];
+ var winningStages = getPlanStages(explain.queryPlanner.winningPlan, "IXSCAN");
+ for (var i = 0; i < winningStages.length; ++i) {
+ indexNameList.push(winningStages[i].indexName);
}
- assert.eq(countA, getUsageCount("a_1"));
- assert.eq(countB, getUsageCount("b_1_c_1"));
- assert.eq(0, getUsageCount("_id_"));
-
- //
- // Confirm index stats tick on distinct().
- //
- res = db.runCommand({distinct: colName, key: "b", query: {b: 1}});
- assert.commandWorked(res);
- countB++;
- assert.eq(countB, getUsageCount("b_1_c_1"));
-
- //
- // Confirm index stats tick on aggregate w/ match.
- //
- res = db.runCommand({aggregate: colName, pipeline: [{$match: {b: 1}}], cursor: {}});
- assert.commandWorked(res);
- countB++;
- assert.eq(countB, getUsageCount("b_1_c_1"));
-
- //
- // Confirm index stats tick on mapReduce with query.
- //
- res = db.runCommand({
- mapReduce: colName,
- map: function() {
- emit(this.b, this.c);
- },
- reduce: function(key, val) {
- return val;
- },
- query: {b: 2},
- out: {inline: true}
- });
- assert.commandWorked(res);
- countB++;
- assert.eq(countB, getUsageCount("b_1_c_1"));
-
- //
- // Confirm index stats tick on update().
- //
- assert.writeOK(col.update({a: 2}, {$set: {d: 2}}));
- countA++;
- assert.eq(countA, getUsageCount("a_1"));
-
- //
- // Confirm index stats tick on remove().
- //
- assert.writeOK(col.remove({a: 2}));
- countA++;
- assert.eq(countA, getUsageCount("a_1"));
- //
- // Confirm multiple index $or operation ticks all involved indexes.
- //
- col.findOne({$or: [{a: 1}, {b: 1, c: 1}]});
+ return indexNameList;
+};
+
+assert.writeOK(col.insert({a: 1, b: 1, c: 1}));
+assert.writeOK(col.insert({a: 2, b: 2, c: 2}));
+assert.writeOK(col.insert({a: 3, b: 3, c: 3}));
+
+//
+// Confirm no index stats object exists prior to index creation.
+//
+col.findOne({a: 1});
+assert.eq(undefined, getUsageCount("a_1"));
+
+//
+// Create indexes.
+//
+assert.commandWorked(col.createIndex({a: 1}, {name: "a_1"}));
+assert.commandWorked(col.createIndex({b: 1, c: 1}, {name: "b_1_c_1"}));
+var countA = 0; // Tracks expected index access for "a_1".
+var countB = 0; // Tracks expected index access for "b_1_c_1".
+
+//
+// Confirm a stats object exists post index creation (with 0 count).
+//
+assert.eq(countA, getUsageCount("a_1"));
+assert.eq({a: 1}, getIndexKey("a_1"));
+
+//
+// Confirm index stats tick on find().
+//
+col.findOne({a: 1});
+countA++;
+
+assert.eq(countA, getUsageCount("a_1"));
+
+//
+// Confirm index stats tick on findAndModify() update.
+//
+var res =
+ db.runCommand({findAndModify: colName, query: {a: 1}, update: {$set: {d: 1}}, 'new': true});
+assert.commandWorked(res);
+countA++;
+assert.eq(countA, getUsageCount("a_1"));
+
+//
+// Confirm index stats tick on findAndModify() delete.
+//
+res = db.runCommand({findAndModify: colName, query: {a: 2}, remove: true});
+assert.commandWorked(res);
+countA++;
+assert.eq(countA, getUsageCount("a_1"));
+assert.writeOK(col.insert(res.value));
+
+//
+// Confirm $and operation ticks indexes for winning plan, but not rejected plans.
+//
+
+// We cannot use explain() to determine which indexes would be used for this query, since
+// 1) explain() will not bump the access counters
+// 2) explain() always runs the multi planner, and the multi planner may choose a different
+// index each run. We therefore run the query, and check that only one of the indexes has its
+// counter bumped (assuming we never choose an index intersection plan).
+const results = col.find({a: 2, b: 2}).itcount();
+if (countA + 1 == getUsageCount("a_1")) {
+ // Plan using index A was chosen. Index B should not have been used (assuming no index
+ // intersection plans are used).
countA++;
- countB++;
- assert.eq(countA, getUsageCount("a_1"));
- assert.eq(countB, getUsageCount("b_1_c_1"));
-
- //
- // Confirm index stats object does not exist post index drop.
- //
- assert.commandWorked(col.dropIndex("b_1_c_1"));
- countB = 0;
- assert.eq(undefined, getUsageCount("b_1_c_1"));
-
- //
- // Confirm index stats object exists with count 0 once index is recreated.
- //
- assert.commandWorked(col.createIndex({b: 1, c: 1}, {name: "b_1_c_1"}));
- assert.eq(countB, getUsageCount("b_1_c_1"));
-
- //
- // Confirm that retrieval fails if $indexStats is not in the first pipeline position.
- //
- assert.throws(function() {
- col.aggregate([{$match: {}}, {$indexStats: {}}]);
- });
-
- //
- // Confirm index use is recorded for $lookup.
- //
- const foreignCollection = db[colName + "_foreign"];
- foreignCollection.drop();
- assert.writeOK(foreignCollection.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
- col.drop();
- assert.writeOK(col.insert([{_id: 0, foreignId: 1}, {_id: 1, foreignId: 2}]));
- assert.eq(0, getUsageCount("_id_"));
- assert.eq(2,
+} else {
+ // Plan using index B was chosen. Index A should not have been used (assuming no index
+ // intersection plans are used).
+ assert.eq(++countB, getUsageCount("b_1_c_1"));
+}
+assert.eq(countA, getUsageCount("a_1"));
+assert.eq(countB, getUsageCount("b_1_c_1"));
+assert.eq(0, getUsageCount("_id_"));
+
+//
+// Confirm index stats tick on distinct().
+//
+res = db.runCommand({distinct: colName, key: "b", query: {b: 1}});
+assert.commandWorked(res);
+countB++;
+assert.eq(countB, getUsageCount("b_1_c_1"));
+
+//
+// Confirm index stats tick on aggregate w/ match.
+//
+res = db.runCommand({aggregate: colName, pipeline: [{$match: {b: 1}}], cursor: {}});
+assert.commandWorked(res);
+countB++;
+assert.eq(countB, getUsageCount("b_1_c_1"));
+
+//
+// Confirm index stats tick on mapReduce with query.
+//
+res = db.runCommand({
+ mapReduce: colName,
+ map: function() {
+ emit(this.b, this.c);
+ },
+ reduce: function(key, val) {
+ return val;
+ },
+ query: {b: 2},
+ out: {inline: true}
+});
+assert.commandWorked(res);
+countB++;
+assert.eq(countB, getUsageCount("b_1_c_1"));
+
+//
+// Confirm index stats tick on update().
+//
+assert.writeOK(col.update({a: 2}, {$set: {d: 2}}));
+countA++;
+assert.eq(countA, getUsageCount("a_1"));
+
+//
+// Confirm index stats tick on remove().
+//
+assert.writeOK(col.remove({a: 2}));
+countA++;
+assert.eq(countA, getUsageCount("a_1"));
+
+//
+// Confirm multiple index $or operation ticks all involved indexes.
+//
+col.findOne({$or: [{a: 1}, {b: 1, c: 1}]});
+countA++;
+countB++;
+assert.eq(countA, getUsageCount("a_1"));
+assert.eq(countB, getUsageCount("b_1_c_1"));
+
+//
+// Confirm index stats object does not exist post index drop.
+//
+assert.commandWorked(col.dropIndex("b_1_c_1"));
+countB = 0;
+assert.eq(undefined, getUsageCount("b_1_c_1"));
+
+//
+// Confirm index stats object exists with count 0 once index is recreated.
+//
+assert.commandWorked(col.createIndex({b: 1, c: 1}, {name: "b_1_c_1"}));
+assert.eq(countB, getUsageCount("b_1_c_1"));
+
+//
+// Confirm that retrieval fails if $indexStats is not in the first pipeline position.
+//
+assert.throws(function() {
+ col.aggregate([{$match: {}}, {$indexStats: {}}]);
+});
+
+//
+// Confirm index use is recorded for $lookup.
+//
+const foreignCollection = db[colName + "_foreign"];
+foreignCollection.drop();
+assert.writeOK(foreignCollection.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
+col.drop();
+assert.writeOK(col.insert([{_id: 0, foreignId: 1}, {_id: 1, foreignId: 2}]));
+assert.eq(0, getUsageCount("_id_"));
+assert.eq(2,
col.aggregate([
{$match: {_id: {$in: [0, 1]}}},
{
@@ -229,26 +229,26 @@
}
])
.itcount());
- assert.eq(1, getUsageCount("_id_", col), "Expected aggregation to use _id index");
- assert.eq(2,
- getUsageCount("_id_", foreignCollection),
- "Expected each lookup to be tracked as an index use");
-
- //
- // Confirm index use is recorded for $graphLookup.
- //
- foreignCollection.drop();
- assert.writeOK(foreignCollection.insert([
- {_id: 0, connectedTo: 1},
- {_id: 1, connectedTo: "X"},
- {_id: 2, connectedTo: 3},
- {_id: 3, connectedTo: "Y"}, // Be sure to use a different value here to make sure
- // $graphLookup doesn't cache the query.
- ]));
- col.drop();
- assert.writeOK(col.insert([{_id: 0, foreignId: 0}, {_id: 1, foreignId: 2}]));
- assert.eq(0, getUsageCount("_id_"));
- assert.eq(2,
+assert.eq(1, getUsageCount("_id_", col), "Expected aggregation to use _id index");
+assert.eq(2,
+ getUsageCount("_id_", foreignCollection),
+ "Expected each lookup to be tracked as an index use");
+
+//
+// Confirm index use is recorded for $graphLookup.
+//
+foreignCollection.drop();
+assert.writeOK(foreignCollection.insert([
+ {_id: 0, connectedTo: 1},
+ {_id: 1, connectedTo: "X"},
+ {_id: 2, connectedTo: 3},
+ {_id: 3, connectedTo: "Y"}, // Be sure to use a different value here to make sure
+ // $graphLookup doesn't cache the query.
+]));
+col.drop();
+assert.writeOK(col.insert([{_id: 0, foreignId: 0}, {_id: 1, foreignId: 2}]));
+assert.eq(0, getUsageCount("_id_"));
+assert.eq(2,
col.aggregate([
{$match: {_id: {$in: [0, 1]}}},
{
@@ -262,8 +262,8 @@
}
])
.itcount());
- assert.eq(1, getUsageCount("_id_", col), "Expected aggregation to use _id index");
- assert.eq(2 * 3,
- getUsageCount("_id_", foreignCollection),
- "Expected each of two graph searches to issue 3 queries, each using the _id index");
+assert.eq(1, getUsageCount("_id_", col), "Expected aggregation to use _id index");
+assert.eq(2 * 3,
+ getUsageCount("_id_", foreignCollection),
+ "Expected each of two graph searches to issue 3 queries, each using the _id index");
})();
diff --git a/jstests/core/index_type_change.js b/jstests/core/index_type_change.js
index ad2525fe015..af2671338a2 100644
--- a/jstests/core/index_type_change.js
+++ b/jstests/core/index_type_change.js
@@ -11,32 +11,32 @@
load("jstests/libs/analyze_plan.js"); // For 'isIndexOnly'.
(function() {
- "use strict";
+"use strict";
- var coll = db.index_type_change;
- coll.drop();
- assert.commandWorked(coll.ensureIndex({a: 1}));
+var coll = db.index_type_change;
+coll.drop();
+assert.commandWorked(coll.ensureIndex({a: 1}));
- assert.writeOK(coll.insert({a: 2}));
- assert.eq(1, coll.find({a: {$type: "double"}}).itcount());
+assert.writeOK(coll.insert({a: 2}));
+assert.eq(1, coll.find({a: {$type: "double"}}).itcount());
- var newVal = new NumberLong(2);
- var res = coll.update({}, {a: newVal}); // Replacement update.
- assert.writeOK(res);
- assert.eq(res.nMatched, 1);
- if (coll.getMongo().writeMode() == "commands")
- assert.eq(res.nModified, 1);
+var newVal = new NumberLong(2);
+var res = coll.update({}, {a: newVal}); // Replacement update.
+assert.writeOK(res);
+assert.eq(res.nMatched, 1);
+if (coll.getMongo().writeMode() == "commands")
+ assert.eq(res.nModified, 1);
- // Make sure it actually changed the type.
- assert.eq(1, coll.find({a: {$type: "long"}}).itcount());
+// Make sure it actually changed the type.
+assert.eq(1, coll.find({a: {$type: "long"}}).itcount());
- // Now use a covered query to ensure the index entry has been updated.
+// Now use a covered query to ensure the index entry has been updated.
- // First make sure it's actually using a covered index scan.
- var explain = coll.explain().find({a: 2}, {_id: 0, a: 1});
- assert(isIndexOnly(db, explain));
+// First make sure it's actually using a covered index scan.
+var explain = coll.explain().find({a: 2}, {_id: 0, a: 1});
+assert(isIndexOnly(db, explain));
- var updated = coll.findOne({a: 2}, {_id: 0, a: 1});
+var updated = coll.findOne({a: 2}, {_id: 0, a: 1});
- assert(updated.a instanceof NumberLong, "Index entry did not change type");
+assert(updated.a instanceof NumberLong, "Index entry did not change type");
})();
diff --git a/jstests/core/indexes_multiple_commands.js b/jstests/core/indexes_multiple_commands.js
index 60bc2b69173..7058fd32019 100644
--- a/jstests/core/indexes_multiple_commands.js
+++ b/jstests/core/indexes_multiple_commands.js
@@ -5,162 +5,158 @@
// Test that commands behave correctly under the presence of multiple indexes with the same key
// pattern.
(function() {
- 'use strict';
-
- var coll = db.indexes_multiple_commands;
- var usingWriteCommands = db.getMongo().writeMode() === "commands";
-
- /**
- * Assert that the result of the index creation ('cmd') indicates that 'numIndexes' were
- * created.
- *
- * If omitted, 'numIndexes' defaults to 1.
- *
- * @param cmd {Function} A function to execute that attempts to create indexes.
- * @param numIndexes {Number} The expected number of indexes that cmd creates.
- */
- function assertIndexesCreated(cmd, numIndexes) {
- var cmdResult;
-
- if (typeof numIndexes === "undefined") {
- numIndexes = 1;
- }
-
- if (usingWriteCommands) {
- cmdResult = cmd();
- if (numIndexes == 0) {
- assert.commandFailedWithCode(cmdResult, ErrorCodes.IndexOptionsConflict);
- return;
- }
-
- assert.commandWorked(cmdResult);
- var isShardedNS = cmdResult.hasOwnProperty('raw');
- if (isShardedNS) {
- cmdResult = cmdResult['raw'][Object.getOwnPropertyNames(cmdResult['raw'])[0]];
- }
- assert.eq(cmdResult.numIndexesAfter - cmdResult.numIndexesBefore,
- numIndexes,
- tojson(cmdResult));
- } else {
- var nIndexesBefore = coll.getIndexes().length;
- cmdResult = cmd();
- if (numIndexes == 0) {
- assert.commandFailedWithCode(cmdResult, ErrorCodes.IndexOptionsConflict);
- return;
- }
-
- assert.commandWorked(cmdResult);
- var nIndexesAfter = coll.getIndexes().length;
- assert.eq(nIndexesAfter - nIndexesBefore, numIndexes, tojson(coll.getIndexes()));
- }
- }
-
- /**
- * Assert that the result of the index create command indicates no indexes were created since
- * the indexes were the same (collation and key pattern matched).
- *
- * (Index creation succeeds if none are created, as long as no options conflict.)
- *
- * @param {Function} A function to execute that attempts to create indexes.
- */
- function assertIndexNotCreated(cmd) {
- assertIndexesCreated(cmd, 0);
+'use strict';
+
+var coll = db.indexes_multiple_commands;
+var usingWriteCommands = db.getMongo().writeMode() === "commands";
+
+/**
+ * Assert that the result of the index creation ('cmd') indicates that 'numIndexes' were
+ * created.
+ *
+ * If omitted, 'numIndexes' defaults to 1.
+ *
+ * @param cmd {Function} A function to execute that attempts to create indexes.
+ * @param numIndexes {Number} The expected number of indexes that cmd creates.
+ */
+function assertIndexesCreated(cmd, numIndexes) {
+ var cmdResult;
+
+ if (typeof numIndexes === "undefined") {
+ numIndexes = 1;
}
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName()));
-
- // Test that multiple indexes with the same key pattern and different collation can be created.
-
- assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "a_1"}));
- // The requested index already exists, but with a different name, so the index is not created.
- assertIndexNotCreated(() => coll.createIndex({a: 1}, {name: "a_1:1"}));
-
- // Indexes with different collations and the same key pattern are allowed if the names are
- // not the same.
- assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "fr", collation: {locale: "fr"}}));
- assertIndexesCreated(
- () => coll.createIndex({a: 1}, {name: "en_US", collation: {locale: "en_US"}}));
-
- // The requested index doesn't yet exist, but the name is used, so this command fails.
- assert.commandFailed(coll.createIndex({a: 1}, {name: "a_1", collation: {locale: "en_US"}}));
-
- // The requested index already exists with a different name, so the index is not created.
- assertIndexNotCreated(() => coll.createIndex({a: 1}, {name: "fr2", collation: {locale: "fr"}}));
-
- // Options can differ on indexes with different collations.
- assertIndexesCreated(
- () => coll.createIndex(
- {a: 1}, {name: "fr1_sparse", collation: {locale: "fr", strength: 1}, sparse: true}));
-
- // The requested index already exists, but with different options, so the command fails.
- assert.commandFailed(
- coll.createIndex({a: 1}, {name: "fr_sparse", collation: {locale: "fr"}, sparse: true}));
-
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName()));
-
- // Multiple non-conflicting indexes can be created in one command.
- var multipleCreate = () => db.runCommand({
- createIndexes: coll.getName(),
- indexes: [
- {key: {a: 1}, name: "en_US", collation: {locale: "en_US"}},
- {key: {a: 1}, name: "en_US_1", collation: {locale: "en_US", strength: 1}}
- ]
- });
- assertIndexesCreated(multipleCreate, 2);
-
- // Cannot create another _id index.
- assert.commandFailed(coll.createIndex({_id: 1}, {name: "other", collation: {locale: "fr"}}));
-
- // Test that indexes must be dropped by name if the key pattern is ambiguous.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName()));
-
- // Create multiple indexes with the same key pattern and collation.
- assertIndexesCreated(() =>
- coll.createIndex({a: 1}, {name: "foo", collation: {locale: "en_US"}}));
- assertIndexesCreated(
- () => coll.createIndex({a: 1}, {name: "bar", collation: {locale: "en_US", strength: 1}}));
-
- // Indexes cannot be dropped by an ambiguous key pattern.
- assert.commandFailed(coll.dropIndex({a: 1}));
-
- // Indexes can be dropped by name.
- assert.commandWorked(coll.dropIndex("foo"));
- assert.commandWorked(coll.dropIndex("bar"));
-
- // Test that hint behaves correctly in the presence of multiple indexes.
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName()));
-
- assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "sbc"}));
- assertIndexesCreated(
- () => coll.createIndex(
- {a: 1}, {name: "caseInsensitive", collation: {locale: "en_US", strength: 2}}));
-
- assert.writeOK(coll.insert([{a: "a"}, {a: "A"}, {a: 20}]));
-
- // An ambiguous hint pattern fails.
- assert.throws(() => coll.find({a: 1}).hint({a: 1}).itcount());
- if (db.getMongo().useReadCommands()) {
- assert.throws(
- () =>
- coll.find({a: 1}).collation({locale: "en_US", strength: 2}).hint({a: 1}).itcount());
- }
+ if (usingWriteCommands) {
+ cmdResult = cmd();
+ if (numIndexes == 0) {
+ assert.commandFailedWithCode(cmdResult, ErrorCodes.IndexOptionsConflict);
+ return;
+ }
- // Index hint by name succeeds.
- assert.eq(coll.find({a: "a"}).hint("sbc").itcount(), 1);
- // A hint on an incompatible index does a whole index scan, and then filters using the query
- // collation.
- assert.eq(coll.find({a: "a"}).hint("caseInsensitive").itcount(), 1);
- if (db.getMongo().useReadCommands()) {
+ assert.commandWorked(cmdResult);
+ var isShardedNS = cmdResult.hasOwnProperty('raw');
+ if (isShardedNS) {
+ cmdResult = cmdResult['raw'][Object.getOwnPropertyNames(cmdResult['raw'])[0]];
+ }
assert.eq(
- coll.find({a: "a"}).collation({locale: "en_US", strength: 2}).hint("sbc").itcount(), 2);
+ cmdResult.numIndexesAfter - cmdResult.numIndexesBefore, numIndexes, tojson(cmdResult));
+ } else {
+ var nIndexesBefore = coll.getIndexes().length;
+ cmdResult = cmd();
+ if (numIndexes == 0) {
+ assert.commandFailedWithCode(cmdResult, ErrorCodes.IndexOptionsConflict);
+ return;
+ }
- // A non-ambiguous index hint by key pattern is allowed, even if the collation doesn't
- // match.
- assertIndexesCreated(() => coll.createIndex({b: 1}, {collation: {locale: "fr"}}));
- assert.eq(coll.find({a: "a"}).collation({locale: "en_US"}).hint({b: 1}).itcount(), 1);
+ assert.commandWorked(cmdResult);
+ var nIndexesAfter = coll.getIndexes().length;
+ assert.eq(nIndexesAfter - nIndexesBefore, numIndexes, tojson(coll.getIndexes()));
}
+}
+
+/**
+ * Assert that the result of the index create command indicates no indexes were created since
+ * the indexes were the same (collation and key pattern matched).
+ *
+ * (Index creation succeeds if none are created, as long as no options conflict.)
+ *
+ * @param {Function} A function to execute that attempts to create indexes.
+ */
+function assertIndexNotCreated(cmd) {
+ assertIndexesCreated(cmd, 0);
+}
+
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName()));
+
+// Test that multiple indexes with the same key pattern and different collation can be created.
+
+assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "a_1"}));
+// The requested index already exists, but with a different name, so the index is not created.
+assertIndexNotCreated(() => coll.createIndex({a: 1}, {name: "a_1:1"}));
+
+// Indexes with different collations and the same key pattern are allowed if the names are
+// not the same.
+assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "fr", collation: {locale: "fr"}}));
+assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "en_US", collation: {locale: "en_US"}}));
+
+// The requested index doesn't yet exist, but the name is used, so this command fails.
+assert.commandFailed(coll.createIndex({a: 1}, {name: "a_1", collation: {locale: "en_US"}}));
+
+// The requested index already exists with a different name, so the index is not created.
+assertIndexNotCreated(() => coll.createIndex({a: 1}, {name: "fr2", collation: {locale: "fr"}}));
+
+// Options can differ on indexes with different collations.
+assertIndexesCreated(
+ () => coll.createIndex(
+ {a: 1}, {name: "fr1_sparse", collation: {locale: "fr", strength: 1}, sparse: true}));
+
+// The requested index already exists, but with different options, so the command fails.
+assert.commandFailed(
+ coll.createIndex({a: 1}, {name: "fr_sparse", collation: {locale: "fr"}, sparse: true}));
+
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName()));
+
+// Multiple non-conflicting indexes can be created in one command.
+var multipleCreate = () => db.runCommand({
+ createIndexes: coll.getName(),
+ indexes: [
+ {key: {a: 1}, name: "en_US", collation: {locale: "en_US"}},
+ {key: {a: 1}, name: "en_US_1", collation: {locale: "en_US", strength: 1}}
+ ]
+});
+assertIndexesCreated(multipleCreate, 2);
+
+// Cannot create another _id index.
+assert.commandFailed(coll.createIndex({_id: 1}, {name: "other", collation: {locale: "fr"}}));
+
+// Test that indexes must be dropped by name if the key pattern is ambiguous.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName()));
+
+// Create multiple indexes with the same key pattern and collation.
+assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "foo", collation: {locale: "en_US"}}));
+assertIndexesCreated(
+ () => coll.createIndex({a: 1}, {name: "bar", collation: {locale: "en_US", strength: 1}}));
+
+// Indexes cannot be dropped by an ambiguous key pattern.
+assert.commandFailed(coll.dropIndex({a: 1}));
+
+// Indexes can be dropped by name.
+assert.commandWorked(coll.dropIndex("foo"));
+assert.commandWorked(coll.dropIndex("bar"));
+
+// Test that hint behaves correctly in the presence of multiple indexes.
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName()));
+
+assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "sbc"}));
+assertIndexesCreated(
+ () => coll.createIndex({a: 1},
+ {name: "caseInsensitive", collation: {locale: "en_US", strength: 2}}));
+
+assert.writeOK(coll.insert([{a: "a"}, {a: "A"}, {a: 20}]));
+
+// An ambiguous hint pattern fails.
+assert.throws(() => coll.find({a: 1}).hint({a: 1}).itcount());
+if (db.getMongo().useReadCommands()) {
+ assert.throws(
+ () => coll.find({a: 1}).collation({locale: "en_US", strength: 2}).hint({a: 1}).itcount());
+}
+
+// Index hint by name succeeds.
+assert.eq(coll.find({a: "a"}).hint("sbc").itcount(), 1);
+// A hint on an incompatible index does a whole index scan, and then filters using the query
+// collation.
+assert.eq(coll.find({a: "a"}).hint("caseInsensitive").itcount(), 1);
+if (db.getMongo().useReadCommands()) {
+ assert.eq(coll.find({a: "a"}).collation({locale: "en_US", strength: 2}).hint("sbc").itcount(),
+ 2);
+
+ // A non-ambiguous index hint by key pattern is allowed, even if the collation doesn't
+ // match.
+ assertIndexesCreated(() => coll.createIndex({b: 1}, {collation: {locale: "fr"}}));
+ assert.eq(coll.find({a: "a"}).collation({locale: "en_US"}).hint({b: 1}).itcount(), 1);
+}
})();
diff --git a/jstests/core/insert_one.js b/jstests/core/insert_one.js
index 0a50ee1b7f3..9a1a6d393f8 100644
--- a/jstests/core/insert_one.js
+++ b/jstests/core/insert_one.js
@@ -3,36 +3,36 @@
* object's prototype's methods.
*/
(function() {
- 'use strict';
- var col = db.insert_one_number;
- col.drop();
+'use strict';
+var col = db.insert_one_number;
+col.drop();
- assert.eq(col.find().itcount(), 0, "collection should be empty");
+assert.eq(col.find().itcount(), 0, "collection should be empty");
- assert.throws(function() {
- col.insertOne(1);
- }, [], "insertOne should only accept objects");
+assert.throws(function() {
+ col.insertOne(1);
+}, [], "insertOne should only accept objects");
- assert.eq(col.find().itcount(), 0, "collection should still be empty");
+assert.eq(col.find().itcount(), 0, "collection should still be empty");
- var result = col.insertOne({abc: 'def'});
- assert(result.acknowledged, "insertOne should succeed on documents");
+var result = col.insertOne({abc: 'def'});
+assert(result.acknowledged, "insertOne should succeed on documents");
- assert.docEq(col.findOne({_id: result.insertedId}),
- {_id: result.insertedId, abc: 'def'},
- "simple document not equal to collection find result");
+assert.docEq(col.findOne({_id: result.insertedId}),
+ {_id: result.insertedId, abc: 'def'},
+ "simple document not equal to collection find result");
- var doc = new Number();
- doc.x = 12;
- assert('zeroPad' in doc, "number object should have 'zeroPad' in prototype");
+var doc = new Number();
+doc.x = 12;
+assert('zeroPad' in doc, "number object should have 'zeroPad' in prototype");
- result = col.insertOne(doc);
- assert(result.acknowledged, "insertOne should succeed on documents");
+result = col.insertOne(doc);
+assert(result.acknowledged, "insertOne should succeed on documents");
- assert(!('zeroPad' in col.findOne({_id: result.insertedId})),
- "inserted result should not have functions from the number object's prototype");
+assert(!('zeroPad' in col.findOne({_id: result.insertedId})),
+ "inserted result should not have functions from the number object's prototype");
- assert.docEq(col.findOne({_id: result.insertedId}),
- {_id: result.insertedId, x: doc.x},
- "document with prototype not equal to collection find result");
+assert.docEq(col.findOne({_id: result.insertedId}),
+ {_id: result.insertedId, x: doc.x},
+ "document with prototype not equal to collection find result");
})();
diff --git a/jstests/core/invalid_collation_locale.js b/jstests/core/invalid_collation_locale.js
index 38209ed1f49..d520aef5920 100644
--- a/jstests/core/invalid_collation_locale.js
+++ b/jstests/core/invalid_collation_locale.js
@@ -1,30 +1,25 @@
// This test is meant to reproduce SERVER-38840, where the ICU library crashes on Windows when
// attempting to parse an invalid ID-prefixed locale.
(function() {
- "use strict";
+"use strict";
- const coll = db.invalid_collation_locale;
- coll.drop();
+const coll = db.invalid_collation_locale;
+coll.drop();
- // Locale's which start with "x" or "i" followed by a separator ("_" or "-") are considered
- // ID-prefixed.
- assert.commandFailedWithCode(
- db.createCollection(coll.getName(), {collation: {locale: "x_invalid"}}),
- ErrorCodes.BadValue);
+// Locale's which start with "x" or "i" followed by a separator ("_" or "-") are considered
+// ID-prefixed.
+assert.commandFailedWithCode(
+ db.createCollection(coll.getName(), {collation: {locale: "x_invalid"}}), ErrorCodes.BadValue);
- assert.commandFailedWithCode(
- db.createCollection(coll.getName(), {collation: {locale: "X_invalid"}}),
- ErrorCodes.BadValue);
+assert.commandFailedWithCode(
+ db.createCollection(coll.getName(), {collation: {locale: "X_invalid"}}), ErrorCodes.BadValue);
- assert.commandFailedWithCode(
- db.createCollection(coll.getName(), {collation: {locale: "i-invalid"}}),
- ErrorCodes.BadValue);
+assert.commandFailedWithCode(
+ db.createCollection(coll.getName(), {collation: {locale: "i-invalid"}}), ErrorCodes.BadValue);
- assert.commandFailedWithCode(
- db.createCollection(coll.getName(), {collation: {locale: "I-invalid"}}),
- ErrorCodes.BadValue);
+assert.commandFailedWithCode(
+ db.createCollection(coll.getName(), {collation: {locale: "I-invalid"}}), ErrorCodes.BadValue);
- assert.commandFailedWithCode(
- db.createCollection(coll.getName(), {collation: {locale: "xx_invalid"}}),
- ErrorCodes.BadValue);
+assert.commandFailedWithCode(
+ db.createCollection(coll.getName(), {collation: {locale: "xx_invalid"}}), ErrorCodes.BadValue);
})();
diff --git a/jstests/core/invalid_db_name.js b/jstests/core/invalid_db_name.js
index 18da1e229f9..23cec76d446 100644
--- a/jstests/core/invalid_db_name.js
+++ b/jstests/core/invalid_db_name.js
@@ -3,16 +3,16 @@
// Can't shard collection with invalid db name.
// @tags: [assumes_unsharded_collection]
(function() {
- var invalidDB = db.getSiblingDB("NonExistentDB");
+var invalidDB = db.getSiblingDB("NonExistentDB");
- // This is a hack to bypass invalid database name checking by the DB constructor
- invalidDB._name = "Invalid DB Name";
+// This is a hack to bypass invalid database name checking by the DB constructor
+invalidDB._name = "Invalid DB Name";
- assert.writeError(invalidDB.coll.insert({x: 1}));
+assert.writeError(invalidDB.coll.insert({x: 1}));
- // Ensure that no database was created
- var dbList = db.getSiblingDB('admin').runCommand({listDatabases: 1}).databases;
- dbList.forEach(function(dbInfo) {
- assert.neq('Invalid DB Name', dbInfo.name, 'database with invalid name was created');
- });
+// Ensure that no database was created
+var dbList = db.getSiblingDB('admin').runCommand({listDatabases: 1}).databases;
+dbList.forEach(function(dbInfo) {
+ assert.neq('Invalid DB Name', dbInfo.name, 'database with invalid name was created');
+});
}());
diff --git a/jstests/core/js_jit.js b/jstests/core/js_jit.js
index 4ccdd2917ae..72290d45758 100644
--- a/jstests/core/js_jit.js
+++ b/jstests/core/js_jit.js
@@ -5,36 +5,36 @@
* implementations correctly. We force the JIT to kick in by using large loops.
*/
(function() {
- 'use strict';
+'use strict';
- function testDBCollection() {
- const c = new DBCollection(null, null, "foo", "test.foo");
- for (let i = 0; i < 100000; i++) {
- if (c.toString() != "test.foo") {
- throw i;
- }
+function testDBCollection() {
+ const c = new DBCollection(null, null, "foo", "test.foo");
+ for (let i = 0; i < 100000; i++) {
+ if (c.toString() != "test.foo") {
+ throw i;
}
}
+}
- function testDB() {
- const c = new DB(null, "test");
- for (let i = 0; i < 100000; i++) {
- if (c.toString() != "test") {
- throw i;
- }
+function testDB() {
+ const c = new DB(null, "test");
+ for (let i = 0; i < 100000; i++) {
+ if (c.toString() != "test") {
+ throw i;
}
}
+}
- function testDBQuery() {
- const c = DBQuery('a', 'b', 'c', 'd');
- for (let i = 0; i < 100000; i++) {
- if (c.toString() != "DBQuery: d -> null") {
- throw i;
- }
+function testDBQuery() {
+ const c = DBQuery('a', 'b', 'c', 'd');
+ for (let i = 0; i < 100000; i++) {
+ if (c.toString() != "DBQuery: d -> null") {
+ throw i;
}
}
+}
- testDBCollection();
- testDB();
- testDBQuery();
+testDBCollection();
+testDB();
+testDBQuery();
})(); \ No newline at end of file
diff --git a/jstests/core/json1.js b/jstests/core/json1.js
index 127795a5126..731bef9fcdc 100644
--- a/jstests/core/json1.js
+++ b/jstests/core/json1.js
@@ -5,7 +5,7 @@ x = {
};
eval("y = " + tojson(x));
assert.eq(tojson(x), tojson(y), "A");
-assert.eq(typeof(x.nulls), typeof(y.nulls), "B");
+assert.eq(typeof (x.nulls), typeof (y.nulls), "B");
// each type is parsed properly
x = {
diff --git a/jstests/core/json_schema/additional_items.js b/jstests/core/json_schema/additional_items.js
index c3866c88565..7165e9d4363 100644
--- a/jstests/core/json_schema/additional_items.js
+++ b/jstests/core/json_schema/additional_items.js
@@ -4,83 +4,88 @@
* Tests the JSON Schema "additionalItems" keyword.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/assert_schema_match.js");
+load("jstests/libs/assert_schema_match.js");
- const coll = db.getCollection("json_schema_additional_items");
- coll.drop();
+const coll = db.getCollection("json_schema_additional_items");
+coll.drop();
- // Test that the JSON Schema fails to parse if "additionalItems" is not a boolean or object.
- assert.throws(() => coll.find({$jsonSchema: {additionalItems: 1}}).itcount());
- assert.throws(() => coll.find({$jsonSchema: {additionalItems: 1.0}}).itcount());
- assert.throws(() => coll.find({$jsonSchema: {additionalItems: "true"}}).itcount());
+// Test that the JSON Schema fails to parse if "additionalItems" is not a boolean or object.
+assert.throws(() => coll.find({$jsonSchema: {additionalItems: 1}}).itcount());
+assert.throws(() => coll.find({$jsonSchema: {additionalItems: 1.0}}).itcount());
+assert.throws(() => coll.find({$jsonSchema: {additionalItems: "true"}}).itcount());
- // Test that "additionalItems" has no effect at the top level (but is still accepted).
- assertSchemaMatch(coll, {items: [{type: "number"}], additionalItems: false}, {}, true);
- assertSchemaMatch(coll, {items: [{type: "number"}], additionalItems: true}, {}, true);
- assertSchemaMatch(
- coll, {items: [{type: "number"}], additionalItems: {type: "string"}}, {}, true);
+// Test that "additionalItems" has no effect at the top level (but is still accepted).
+assertSchemaMatch(coll, {items: [{type: "number"}], additionalItems: false}, {}, true);
+assertSchemaMatch(coll, {items: [{type: "number"}], additionalItems: true}, {}, true);
+assertSchemaMatch(coll, {items: [{type: "number"}], additionalItems: {type: "string"}}, {}, true);
- // Test that "additionalItems" has no effect when "items" is not present.
- let schema = {properties: {a: {additionalItems: false}}};
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {a: "blah"}, true);
- assertSchemaMatch(coll, schema, {a: []}, true);
- assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true);
+// Test that "additionalItems" has no effect when "items" is not present.
+let schema = {properties: {a: {additionalItems: false}}};
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {a: "blah"}, true);
+assertSchemaMatch(coll, schema, {a: []}, true);
+assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true);
- schema = {properties: {a: {additionalItems: {type: "object"}}}};
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {a: "blah"}, true);
- assertSchemaMatch(coll, schema, {a: []}, true);
- assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true);
+schema = {
+ properties: {a: {additionalItems: {type: "object"}}}
+};
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {a: "blah"}, true);
+assertSchemaMatch(coll, schema, {a: []}, true);
+assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true);
- // Test that "additionalItems" has no effect when "items" is a schema that applies to every
- // element in the array.
- schema = {properties: {a: {items: {}, additionalItems: false}}};
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {a: "blah"}, true);
- assertSchemaMatch(coll, schema, {a: []}, true);
- assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true);
+// Test that "additionalItems" has no effect when "items" is a schema that applies to every
+// element in the array.
+schema = {
+ properties: {a: {items: {}, additionalItems: false}}
+};
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {a: "blah"}, true);
+assertSchemaMatch(coll, schema, {a: []}, true);
+assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true);
- schema = {properties: {a: {items: {}, additionalItems: {type: "object"}}}};
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {a: "blah"}, true);
- assertSchemaMatch(coll, schema, {a: []}, true);
- assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true);
+schema = {
+ properties: {a: {items: {}, additionalItems: {type: "object"}}}
+};
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {a: "blah"}, true);
+assertSchemaMatch(coll, schema, {a: []}, true);
+assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true);
- // Test that {additionalItems: false} correctly bans array indexes not covered by "items".
- schema = {
- properties: {a: {items: [{type: "number"}, {type: "string"}], additionalItems: false}}
- };
- assertSchemaMatch(coll, schema, {a: []}, true);
- assertSchemaMatch(coll, schema, {a: [229]}, true);
- assertSchemaMatch(coll, schema, {a: [229, "West 43rd"]}, true);
- assertSchemaMatch(coll, schema, {a: [229, "West 43rd", "Street"]}, false);
+// Test that {additionalItems: false} correctly bans array indexes not covered by "items".
+schema = {
+ properties: {a: {items: [{type: "number"}, {type: "string"}], additionalItems: false}}
+};
+assertSchemaMatch(coll, schema, {a: []}, true);
+assertSchemaMatch(coll, schema, {a: [229]}, true);
+assertSchemaMatch(coll, schema, {a: [229, "West 43rd"]}, true);
+assertSchemaMatch(coll, schema, {a: [229, "West 43rd", "Street"]}, false);
- // Test that {additionalItems: true} has no effect.
- assertSchemaMatch(
- coll,
- {properties: {a: {items: [{type: "number"}, {type: "string"}], additionalItems: true}}},
- {a: [229, "West 43rd", "Street"]},
- true);
- assertSchemaMatch(
- coll, {properties: {a: {items: [{not: {}}], additionalItems: true}}}, {a: []}, true);
+// Test that {additionalItems: true} has no effect.
+assertSchemaMatch(
+ coll,
+ {properties: {a: {items: [{type: "number"}, {type: "string"}], additionalItems: true}}},
+ {a: [229, "West 43rd", "Street"]},
+ true);
+assertSchemaMatch(
+ coll, {properties: {a: {items: [{not: {}}], additionalItems: true}}}, {a: []}, true);
- // Test that the "additionalItems" schema only applies to array indexes not covered by "items".
- schema = {
- properties:
- {a: {items: [{type: "number"}, {type: "string"}], additionalItems: {type: "object"}}}
- };
- assertSchemaMatch(coll, schema, {a: []}, true);
- assertSchemaMatch(coll, schema, {a: [229]}, true);
- assertSchemaMatch(coll, schema, {a: [229, "West 43rd"]}, true);
- assertSchemaMatch(coll, schema, {a: [229, "West 43rd", "Street"]}, false);
- assertSchemaMatch(coll, schema, {a: [229, "West 43rd", {}]}, true);
+// Test that the "additionalItems" schema only applies to array indexes not covered by "items".
+schema = {
+ properties:
+ {a: {items: [{type: "number"}, {type: "string"}], additionalItems: {type: "object"}}}
+};
+assertSchemaMatch(coll, schema, {a: []}, true);
+assertSchemaMatch(coll, schema, {a: [229]}, true);
+assertSchemaMatch(coll, schema, {a: [229, "West 43rd"]}, true);
+assertSchemaMatch(coll, schema, {a: [229, "West 43rd", "Street"]}, false);
+assertSchemaMatch(coll, schema, {a: [229, "West 43rd", {}]}, true);
- // Test that an empty array does not fail against "additionalItems".
- assertSchemaMatch(
- coll, {properties: {a: {items: [{not: {}}], additionalItems: false}}}, {a: []}, true);
- assertSchemaMatch(
- coll, {properties: {a: {items: [{not: {}}], additionalItems: {not: {}}}}}, {a: []}, true);
+// Test that an empty array does not fail against "additionalItems".
+assertSchemaMatch(
+ coll, {properties: {a: {items: [{not: {}}], additionalItems: false}}}, {a: []}, true);
+assertSchemaMatch(
+ coll, {properties: {a: {items: [{not: {}}], additionalItems: {not: {}}}}}, {a: []}, true);
}());
diff --git a/jstests/core/json_schema/additional_properties.js b/jstests/core/json_schema/additional_properties.js
index ce699a6036c..0a78d2415ef 100644
--- a/jstests/core/json_schema/additional_properties.js
+++ b/jstests/core/json_schema/additional_properties.js
@@ -4,247 +4,234 @@
* Tests for the JSON Schema 'additionalProperties' keyword.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/assert_schema_match.js");
+load("jstests/libs/assert_schema_match.js");
- const coll = db.schema_allowed_properties;
+const coll = db.schema_allowed_properties;
- // Tests for {additionalProperties:false} at the top level.
- assertSchemaMatch(
- coll, {properties: {_id: {}, a: {}}, additionalProperties: false}, {_id: 1}, true);
- assertSchemaMatch(
- coll, {properties: {_id: {}, a: {}}, additionalProperties: false}, {_id: 1, a: 1}, true);
- assertSchemaMatch(
- coll, {properties: {_id: {}, a: {}}, additionalProperties: false}, {_id: 1, b: 1}, false);
- assertSchemaMatch(coll,
- {properties: {_id: {}, a: {}}, additionalProperties: false},
- {_id: 1, a: 1, b: 1},
- false);
+// Tests for {additionalProperties:false} at the top level.
+assertSchemaMatch(
+ coll, {properties: {_id: {}, a: {}}, additionalProperties: false}, {_id: 1}, true);
+assertSchemaMatch(
+ coll, {properties: {_id: {}, a: {}}, additionalProperties: false}, {_id: 1, a: 1}, true);
+assertSchemaMatch(
+ coll, {properties: {_id: {}, a: {}}, additionalProperties: false}, {_id: 1, b: 1}, false);
+assertSchemaMatch(
+ coll, {properties: {_id: {}, a: {}}, additionalProperties: false}, {_id: 1, a: 1, b: 1}, false);
- // Tests for {additionalProperties:true} at the top level.
- assertSchemaMatch(
- coll, {properties: {_id: {}, a: {}}, additionalProperties: true}, {_id: 1}, true);
- assertSchemaMatch(
- coll, {properties: {_id: {}, a: {}}, additionalProperties: true}, {_id: 1, a: 1}, true);
- assertSchemaMatch(
- coll, {properties: {_id: {}, a: {}}, additionalProperties: true}, {_id: 1, b: 1}, true);
- assertSchemaMatch(coll,
- {properties: {_id: {}, a: {}}, additionalProperties: true},
- {_id: 1, a: 1, b: 1},
- true);
+// Tests for {additionalProperties:true} at the top level.
+assertSchemaMatch(coll, {properties: {_id: {}, a: {}}, additionalProperties: true}, {_id: 1}, true);
+assertSchemaMatch(
+ coll, {properties: {_id: {}, a: {}}, additionalProperties: true}, {_id: 1, a: 1}, true);
+assertSchemaMatch(
+ coll, {properties: {_id: {}, a: {}}, additionalProperties: true}, {_id: 1, b: 1}, true);
+assertSchemaMatch(
+ coll, {properties: {_id: {}, a: {}}, additionalProperties: true}, {_id: 1, a: 1, b: 1}, true);
- // Tests for additionalProperties with a nested schema at the top level.
- assertSchemaMatch(coll,
- {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}},
- {_id: 1},
- true);
- assertSchemaMatch(coll,
- {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}},
- {_id: 1, a: 1},
- true);
- assertSchemaMatch(coll,
- {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}},
- {_id: 1, b: 1},
- true);
- assertSchemaMatch(coll,
- {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}},
- {_id: 1, b: "str"},
- false);
+// Tests for additionalProperties with a nested schema at the top level.
+assertSchemaMatch(
+ coll, {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}}, {_id: 1}, true);
+assertSchemaMatch(coll,
+ {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}},
+ {_id: 1, a: 1},
+ true);
+assertSchemaMatch(coll,
+ {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}},
+ {_id: 1, b: 1},
+ true);
+assertSchemaMatch(coll,
+ {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}},
+ {_id: 1, b: "str"},
+ false);
- // Tests for additionalProperties together with patternProperties at the top level.
- assertSchemaMatch(coll,
- {
- properties: {_id: {}, a: {}},
- patternProperties: {"^b": {type: "string"}},
- additionalProperties: {type: "number"}
- },
- {_id: 1},
- true);
- assertSchemaMatch(coll,
- {
- properties: {_id: {}, a: {}},
- patternProperties: {"^b": {type: "string"}},
- additionalProperties: {type: "number"}
- },
- {_id: 1, a: 1},
- true);
- assertSchemaMatch(coll,
- {
- properties: {_id: {}, a: {}},
- patternProperties: {"^b": {type: "string"}},
- additionalProperties: {type: "number"}
- },
- {_id: 1, a: 1, ba: "str"},
- true);
- assertSchemaMatch(coll,
- {
- properties: {_id: {}, a: {}},
- patternProperties: {"^b": {type: "string"}},
- additionalProperties: {type: "number"}
- },
- {_id: 1, a: 1, ba: "str", other: 1},
- true);
- assertSchemaMatch(coll,
- {
- properties: {_id: {}, a: {}},
- patternProperties: {"^b": {type: "string"}},
- additionalProperties: {type: "number"}
- },
- {_id: 1, a: 1, ba: "str", other: "str"},
- false);
- assertSchemaMatch(coll,
- {
- properties: {_id: {}, a: {}},
- patternProperties: {"^b": {type: "string"}},
- additionalProperties: {type: "number"}
- },
- {_id: 1, a: 1, ba: 1, other: 1},
- false);
- assertSchemaMatch(coll,
- {
- properties: {_id: {}, a: {}},
- patternProperties: {"^b": {type: "string"}},
- additionalProperties: false
- },
- {_id: 1, a: 1, ba: "str"},
- true);
- assertSchemaMatch(coll,
- {
- properties: {_id: {}, a: {}},
- patternProperties: {"^b": {type: "string"}},
- additionalProperties: false
- },
- {_id: 1, a: 1, ba: "str", other: 1},
- false);
+// Tests for additionalProperties together with patternProperties at the top level.
+assertSchemaMatch(coll,
+ {
+ properties: {_id: {}, a: {}},
+ patternProperties: {"^b": {type: "string"}},
+ additionalProperties: {type: "number"}
+ },
+ {_id: 1},
+ true);
+assertSchemaMatch(coll,
+ {
+ properties: {_id: {}, a: {}},
+ patternProperties: {"^b": {type: "string"}},
+ additionalProperties: {type: "number"}
+ },
+ {_id: 1, a: 1},
+ true);
+assertSchemaMatch(coll,
+ {
+ properties: {_id: {}, a: {}},
+ patternProperties: {"^b": {type: "string"}},
+ additionalProperties: {type: "number"}
+ },
+ {_id: 1, a: 1, ba: "str"},
+ true);
+assertSchemaMatch(coll,
+ {
+ properties: {_id: {}, a: {}},
+ patternProperties: {"^b": {type: "string"}},
+ additionalProperties: {type: "number"}
+ },
+ {_id: 1, a: 1, ba: "str", other: 1},
+ true);
+assertSchemaMatch(coll,
+ {
+ properties: {_id: {}, a: {}},
+ patternProperties: {"^b": {type: "string"}},
+ additionalProperties: {type: "number"}
+ },
+ {_id: 1, a: 1, ba: "str", other: "str"},
+ false);
+assertSchemaMatch(coll,
+ {
+ properties: {_id: {}, a: {}},
+ patternProperties: {"^b": {type: "string"}},
+ additionalProperties: {type: "number"}
+ },
+ {_id: 1, a: 1, ba: 1, other: 1},
+ false);
+assertSchemaMatch(coll,
+ {
+ properties: {_id: {}, a: {}},
+ patternProperties: {"^b": {type: "string"}},
+ additionalProperties: false
+ },
+ {_id: 1, a: 1, ba: "str"},
+ true);
+assertSchemaMatch(coll,
+ {
+ properties: {_id: {}, a: {}},
+ patternProperties: {"^b": {type: "string"}},
+ additionalProperties: false
+ },
+ {_id: 1, a: 1, ba: "str", other: 1},
+ false);
- // Tests for {additionalProperties:false} in a nested schema.
- assertSchemaMatch(
- coll, {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, {}, true);
- assertSchemaMatch(coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: false}}},
- {obj: 1},
- true);
- assertSchemaMatch(coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: false}}},
- {obj: {}},
- true);
- assertSchemaMatch(coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: false}}},
- {obj: {a: 1}},
- true);
- assertSchemaMatch(coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: false}}},
- {obj: {a: 1, b: 1}},
- false);
- assertSchemaMatch(coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: false}}},
- {obj: {b: 1}},
- false);
+// Tests for {additionalProperties:false} in a nested schema.
+assertSchemaMatch(
+ coll, {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, {}, true);
+assertSchemaMatch(
+ coll, {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, {obj: 1}, true);
+assertSchemaMatch(
+ coll, {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, {obj: {}}, true);
+assertSchemaMatch(coll,
+ {properties: {obj: {properties: {a: {}}, additionalProperties: false}}},
+ {obj: {a: 1}},
+ true);
+assertSchemaMatch(coll,
+ {properties: {obj: {properties: {a: {}}, additionalProperties: false}}},
+ {obj: {a: 1, b: 1}},
+ false);
+assertSchemaMatch(coll,
+ {properties: {obj: {properties: {a: {}}, additionalProperties: false}}},
+ {obj: {b: 1}},
+ false);
- // Tests for {additionalProperties:true} in a nested schema.
- assertSchemaMatch(coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: true}}},
- {obj: {}},
- true);
- assertSchemaMatch(coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: true}}},
- {obj: {a: 1}},
- true);
- assertSchemaMatch(coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: true}}},
- {obj: {a: 1, b: 1}},
- true);
- assertSchemaMatch(coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: true}}},
- {obj: {b: 1}},
- true);
+// Tests for {additionalProperties:true} in a nested schema.
+assertSchemaMatch(
+ coll, {properties: {obj: {properties: {a: {}}, additionalProperties: true}}}, {obj: {}}, true);
+assertSchemaMatch(coll,
+ {properties: {obj: {properties: {a: {}}, additionalProperties: true}}},
+ {obj: {a: 1}},
+ true);
+assertSchemaMatch(coll,
+ {properties: {obj: {properties: {a: {}}, additionalProperties: true}}},
+ {obj: {a: 1, b: 1}},
+ true);
+assertSchemaMatch(coll,
+ {properties: {obj: {properties: {a: {}}, additionalProperties: true}}},
+ {obj: {b: 1}},
+ true);
- // Tests for additionalProperties whose value is a nested schema, which is itself contained
- // within a nested schema.
- assertSchemaMatch(
- coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}},
- {},
- true);
- assertSchemaMatch(
- coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}},
- {obj: 1},
- true);
- assertSchemaMatch(
- coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}},
- {obj: {}},
- true);
- assertSchemaMatch(
- coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}},
- {obj: {a: 1}},
- true);
- assertSchemaMatch(
- coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}},
- {obj: {a: 1, b: 1}},
- true);
- assertSchemaMatch(
- coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}},
- {obj: {a: 1, b: "str"}},
- false);
- assertSchemaMatch(
- coll,
- {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}},
- {obj: {b: "str"}},
- false);
+// Tests for additionalProperties whose value is a nested schema, which is itself contained
+// within a nested schema.
+assertSchemaMatch(
+ coll,
+ {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}},
+ {},
+ true);
+assertSchemaMatch(
+ coll,
+ {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}},
+ {obj: 1},
+ true);
+assertSchemaMatch(
+ coll,
+ {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}},
+ {obj: {}},
+ true);
+assertSchemaMatch(
+ coll,
+ {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}},
+ {obj: {a: 1}},
+ true);
+assertSchemaMatch(
+ coll,
+ {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}},
+ {obj: {a: 1, b: 1}},
+ true);
+assertSchemaMatch(
+ coll,
+ {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}},
+ {obj: {a: 1, b: "str"}},
+ false);
+assertSchemaMatch(
+ coll,
+ {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}},
+ {obj: {b: "str"}},
+ false);
- // Tests for additionalProperties together with patternProperties, both inside a nested schema.
- assertSchemaMatch(coll,
- {
- properties: {
- obj: {
- properties: {a: {}},
- patternProperties: {"^b": {type: "string"}},
- additionalProperties: {type: "number"}
- }
- }
- },
- {obj: {}},
- true);
- assertSchemaMatch(coll,
- {
- properties: {
- obj: {
- properties: {a: {}},
- patternProperties: {"^b": {type: "string"}},
- additionalProperties: {type: "number"}
- }
- }
- },
- {obj: {a: 1, ba: "str", c: 1}},
- true);
- assertSchemaMatch(coll,
- {
- properties: {
- obj: {
- properties: {a: {}},
- patternProperties: {"^b": {type: "string"}},
- additionalProperties: {type: "number"}
- }
- }
- },
- {obj: {a: 1, ba: 1, c: 1}},
- false);
- assertSchemaMatch(coll,
- {
- properties: {
- obj: {
- properties: {a: {}},
- patternProperties: {"^b": {type: "string"}},
- additionalProperties: {type: "number"}
- }
- }
- },
- {obj: {a: 1, ba: 1, c: "str"}},
- false);
+// Tests for additionalProperties together with patternProperties, both inside a nested schema.
+assertSchemaMatch(coll,
+ {
+ properties: {
+ obj: {
+ properties: {a: {}},
+ patternProperties: {"^b": {type: "string"}},
+ additionalProperties: {type: "number"}
+ }
+ }
+ },
+ {obj: {}},
+ true);
+assertSchemaMatch(coll,
+ {
+ properties: {
+ obj: {
+ properties: {a: {}},
+ patternProperties: {"^b": {type: "string"}},
+ additionalProperties: {type: "number"}
+ }
+ }
+ },
+ {obj: {a: 1, ba: "str", c: 1}},
+ true);
+assertSchemaMatch(coll,
+ {
+ properties: {
+ obj: {
+ properties: {a: {}},
+ patternProperties: {"^b": {type: "string"}},
+ additionalProperties: {type: "number"}
+ }
+ }
+ },
+ {obj: {a: 1, ba: 1, c: 1}},
+ false);
+assertSchemaMatch(coll,
+ {
+ properties: {
+ obj: {
+ properties: {a: {}},
+ patternProperties: {"^b": {type: "string"}},
+ additionalProperties: {type: "number"}
+ }
+ }
+ },
+ {obj: {a: 1, ba: 1, c: "str"}},
+ false);
}());
diff --git a/jstests/core/json_schema/bsontype.js b/jstests/core/json_schema/bsontype.js
index f5ec15a06a8..ac874c66788 100644
--- a/jstests/core/json_schema/bsontype.js
+++ b/jstests/core/json_schema/bsontype.js
@@ -4,301 +4,285 @@
* Tests for the non-standard 'bsonType' keyword in JSON Schema, as well as some tests for 'type'.
*/
(function() {
- "use strict";
-
- load("jstests/libs/assert_schema_match.js");
-
- const coll = db.jstests_schema_bsontype;
-
- // bsonType "double".
- assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: 3}, true);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: NumberLong(3)}, false);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: NumberInt(3)}, false);
- assertSchemaMatch(
- coll, {properties: {num: {bsonType: "double"}}}, {num: NumberDecimal(3)}, false);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: {}}, false);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: [3]}, false);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {foo: {}}, true);
-
- // type "double" should fail.
- assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "double"}}}}).itcount());
-
- // bsonType "string".
- assertSchemaMatch(coll, {properties: {str: {bsonType: "string"}}}, {str: ""}, true);
- assertSchemaMatch(coll, {properties: {str: {bsonType: "string"}}}, {str: true}, false);
- assertSchemaMatch(coll, {properties: {str: {bsonType: "string"}}}, {str: [1, "foo"]}, false);
-
- // type "string".
- assertSchemaMatch(coll, {properties: {str: {type: "string"}}}, {str: ""}, true);
- assertSchemaMatch(coll, {properties: {str: {type: "string"}}}, {str: true}, false);
- assertSchemaMatch(coll, {properties: {str: {type: "string"}}}, {str: [1, "foo"]}, false);
-
- // bsonType "object".
- assertSchemaMatch(coll, {bsonType: "object"}, {}, true);
- assertSchemaMatch(coll, {properties: {obj: {bsonType: "object"}}}, {obj: {}}, true);
- assertSchemaMatch(coll, {properties: {obj: {bsonType: "object"}}}, {obj: true}, false);
- assertSchemaMatch(coll, {properties: {obj: {bsonType: "object"}}}, {obj: [{}]}, false);
-
- // type "object".
- assertSchemaMatch(coll, {type: "object"}, {}, true);
- assertSchemaMatch(coll, {properties: {obj: {type: "object"}}}, {obj: {}}, true);
- assertSchemaMatch(coll, {properties: {obj: {type: "object"}}}, {obj: true}, false);
- assertSchemaMatch(coll, {properties: {obj: {type: "object"}}}, {obj: [{}]}, false);
-
- // bsonType "array".
- assertSchemaMatch(coll, {bsonType: "array"}, {arr: []}, false);
- assertSchemaMatch(coll, {properties: {arr: {bsonType: "array"}}}, {arr: []}, true);
- assertSchemaMatch(coll, {properties: {arr: {bsonType: "array"}}}, {arr: {}}, false);
-
- // type "array".
- assertSchemaMatch(coll, {type: "array"}, {arr: []}, false);
- assertSchemaMatch(coll, {properties: {arr: {type: "array"}}}, {arr: []}, true);
- assertSchemaMatch(coll, {properties: {arr: {type: "array"}}}, {arr: {}}, false);
-
- // bsonType "binData".
- assertSchemaMatch(coll,
- {properties: {bin: {bsonType: "binData"}}},
- {bin: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")},
- true);
- assertSchemaMatch(coll, {properties: {bin: {bsonType: "binData"}}}, {bin: {}}, false);
-
- // type "binData" should fail.
- assert.throws(() => coll.find({$jsonSchema: {properties: {bin: {type: "binData"}}}}).itcount());
-
- // bsonType "undefined".
- assertSchemaMatch(
- coll, {properties: {u: {bsonType: "undefined"}}, required: ["u"]}, {u: undefined}, true);
- assertSchemaMatch(coll, {properties: {u: {bsonType: "undefined"}}, required: ["u"]}, {}, false);
- assertSchemaMatch(
- coll, {properties: {u: {bsonType: "undefined"}}, required: ["u"]}, {u: null}, false);
-
- // type "undefined" should fail.
- assert.throws(() => coll.find({$jsonSchema: {properties: {u: {type: "undefined"}}}}).itcount());
-
- // bsonType "objectId".
- assertSchemaMatch(coll, {properties: {o: {bsonType: "objectId"}}}, {o: ObjectId()}, true);
- assertSchemaMatch(coll, {properties: {o: {bsonType: "objectId"}}}, {o: 1}, false);
-
- // type "objectId" should fail.
- assert.throws(() => coll.find({$jsonSchema: {properties: {o: {type: "objectId"}}}}).itcount());
-
- // bsonType "bool".
- assertSchemaMatch(coll, {properties: {b: {bsonType: "bool"}}}, {b: true}, true);
- assertSchemaMatch(coll, {properties: {b: {bsonType: "bool"}}}, {b: false}, true);
- assertSchemaMatch(coll, {properties: {b: {bsonType: "bool"}}}, {b: 1}, false);
-
- // bsonType "boolean" should fail.
- assert.throws(() =>
- coll.find({$jsonSchema: {properties: {b: {bsonType: "boolean"}}}}).itcount());
-
- // type "boolean".
- assertSchemaMatch(coll, {properties: {b: {type: "boolean"}}}, {b: true}, true);
- assertSchemaMatch(coll, {properties: {b: {type: "boolean"}}}, {b: false}, true);
- assertSchemaMatch(coll, {properties: {b: {type: "boolean"}}}, {b: 1}, false);
-
- // type "bool" should fail.
- assert.throws(() => coll.find({$jsonSchema: {properties: {b: {type: "bool"}}}}).itcount());
-
- // bsonType "date".
- assertSchemaMatch(coll, {properties: {date: {bsonType: "date"}}}, {date: new Date()}, true);
- assertSchemaMatch(coll, {properties: {date: {bsonType: "date"}}}, {date: 1}, false);
-
- // type "date" should fail.
- assert.throws(() => coll.find({$jsonSchema: {properties: {b: {type: "date"}}}}).itcount());
-
- // bsonType "null".
- assertSchemaMatch(
- coll, {properties: {n: {bsonType: "null"}}, required: ["n"]}, {n: null}, true);
- assertSchemaMatch(coll, {properties: {n: {bsonType: "null"}}, required: ["n"]}, {}, false);
- assertSchemaMatch(
- coll, {properties: {n: {bsonType: "null"}}, required: ["n"]}, {u: undefined}, false);
-
- // type "null".
- assertSchemaMatch(coll, {properties: {n: {type: "null"}}, required: ["n"]}, {n: null}, true);
- assertSchemaMatch(coll, {properties: {n: {type: "null"}}, required: ["n"]}, {}, false);
- assertSchemaMatch(
- coll, {properties: {n: {type: "null"}}, required: ["n"]}, {u: undefined}, false);
-
- // bsonType "regex".
- assertSchemaMatch(coll, {properties: {r: {bsonType: "regex"}}}, {r: /^abc/}, true);
- assertSchemaMatch(coll, {properties: {r: {bsonType: "regex"}}}, {r: "^abc"}, false);
-
- // type "regex" should fail.
- assert.throws(() => coll.find({$jsonSchema: {properties: {r: {type: "regex"}}}}).itcount());
-
- // bsonType "javascript".
- assertSchemaMatch(coll,
- {properties: {code: {bsonType: "javascript"}}},
- {code: Code("function() { return true; }")},
- true);
- assertSchemaMatch(coll, {properties: {code: {bsonType: "javascript"}}}, {code: 1}, false);
-
- // type "javascript" should fail.
- assert.throws(
- () => coll.find({$jsonSchema: {properties: {code: {type: "javascript"}}}}).itcount());
-
- // bsonType "javascriptWithScope".
- assertSchemaMatch(coll,
- {properties: {code: {bsonType: "javascriptWithScope"}}},
- {code: Code("function() { return true; }", {scope: true})},
- true);
- assertSchemaMatch(
- coll, {properties: {code: {bsonType: "javascriptWithScope"}}}, {code: 1}, false);
-
- // type "javascriptWithScope" should fail.
- assert.throws(() =>
- coll.find({$jsonSchema: {properties: {code: {type: "javascriptWithScope"}}}})
- .itcount());
-
- // bsonType "int".
- assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: NumberInt(3)}, true);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: NumberLong(3)}, false);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: 3}, false);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: NumberDecimal(3)}, false);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: {}}, false);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {foo: {}}, true);
-
- // type "int" should fail.
- assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "int"}}}}).itcount());
-
- // bsonType "integer" should fail.
- assert.throws(
- () => coll.find({$jsonSchema: {properties: {num: {bsonType: "integer"}}}}).itcount());
-
- // type "integer" is explicitly unsupported and should fail.
- assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "integer"}}}}).itcount());
-
- // bsonType "timestamp".
- assertSchemaMatch(
- coll, {properties: {ts: {bsonType: "timestamp"}}}, {ts: Timestamp(0, 1234)}, true);
- assertSchemaMatch(coll, {properties: {ts: {bsonType: "timestamp"}}}, {ts: new Date()}, false);
-
- // type "timestamp" should fail.
- assert.throws(() =>
- coll.find({$jsonSchema: {properties: {ts: {type: "timestamp"}}}}).itcount());
-
- // bsonType "long".
- assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: NumberLong(3)}, true);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: NumberInt(3)}, false);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: 3}, false);
- assertSchemaMatch(
- coll, {properties: {num: {bsonType: "long"}}}, {num: NumberDecimal(3)}, false);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: {}}, false);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {foo: {}}, true);
-
- // type "long" should fail.
- assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "long"}}}}).itcount());
-
- // bsonType "decimal".
- assertSchemaMatch(
- coll, {properties: {num: {bsonType: "decimal"}}}, {num: NumberDecimal(3)}, true);
- assertSchemaMatch(
- coll, {properties: {num: {bsonType: "decimal"}}}, {num: NumberLong(3)}, false);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: NumberInt(3)}, false);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: 3}, false);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: {}}, false);
- assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {foo: {}}, true);
-
- // type "decimal" should fail.
- assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "decimal"}}}}).itcount());
-
- // bsonType "minKey".
- assertSchemaMatch(coll, {properties: {k: {bsonType: "minKey"}}}, {k: MinKey()}, true);
- assertSchemaMatch(coll, {properties: {k: {bsonType: "minKey"}}}, {k: MaxKey()}, false);
-
- // type "minKey" should fail.
- assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "minKey"}}}}).itcount());
-
- // bsonType "maxKey".
- assertSchemaMatch(coll, {properties: {k: {bsonType: "maxKey"}}}, {k: MaxKey()}, true);
- assertSchemaMatch(coll, {properties: {k: {bsonType: "maxKey"}}}, {k: MinKey()}, false);
-
- // type "maxKey" should fail.
- assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "maxKey"}}}}).itcount());
-
- // Test that 'bsonType' keyword rejects unknown type aliases.
- assert.throws(() =>
- coll.find({$jsonSchema: {properties: {f: {bsonType: "unknown"}}}}).itcount());
-
- // Test that 'type' keyword rejects unknown type aliases.
- assert.throws(() => coll.find({$jsonSchema: {properties: {f: {type: "unknown"}}}}).itcount());
-
- // Specifying both "type" and "bsonType" in the same schema should fail.
- assert.throws(() => coll.find({$jsonSchema: {bsonType: "string", type: "string"}}).itcount());
- assert.throws(
- () => coll.find({$jsonSchema: {properties: {a: {bsonType: "string", type: "string"}}}})
- .itcount());
-
- // "type" and "bsonType" are both allowed when they are not sibling keywords in the same
- // subschema.
- assertSchemaMatch(
- coll, {type: "object", properties: {obj: {bsonType: "object"}}}, {obj: {}}, true);
- assertSchemaMatch(
- coll, {type: "object", properties: {obj: {bsonType: "object"}}}, {obj: []}, false);
- assertSchemaMatch(coll,
- {properties: {a: {bsonType: "long"}, b: {type: "null"}}},
- {a: NumberLong(3), b: null},
- true);
- assertSchemaMatch(
- coll, {properties: {a: {bsonType: "long"}, b: {type: "null"}}}, {a: NumberLong(3)}, true);
- assertSchemaMatch(
- coll, {properties: {a: {bsonType: "long"}, b: {type: "null"}}}, {b: null}, true);
- assertSchemaMatch(coll,
- {properties: {a: {bsonType: "long"}, b: {type: "null"}}},
- {b: null},
- {a: 3, b: null},
- false);
- assertSchemaMatch(coll,
- {properties: {a: {bsonType: "long"}, b: {type: "null"}}},
- {b: null},
- {a: NumberLong(3), b: 3},
- false);
-
- // Test that the 'type' keyword rejects an array of aliases if one of those aliases is invalid.
- assert.throws(() => coll.find({$jsonSchema: {f: {type: ["number", "objectId"]}}}).itcount());
- assert.throws(() => coll.find({$jsonSchema: {f: {type: ["object", "unknown"]}}}).itcount());
-
- // Test that the 'bsonType' keyword rejects an array of aliases if one of those aliases is
- // invalid.
- assert.throws(() => coll.find({$jsonSchema: {f: {bsonType: ["number", "unknown"]}}}).itcount());
- assert.throws(() => coll.find({$jsonSchema: {bsonType: ["unknown"]}}).itcount());
-
- // Test that the 'type' keyword rejects an array which contains a numerical type alias.
- assert.throws(() => coll.find({$jsonSchema: {f: {type: ["number", 2]}}}).itcount());
-
- // Test that the 'bsonType' keyword rejects an array which contains a numerical type alias.
- assert.throws(() => coll.find({$jsonSchema: {f: {bsonType: ["number", 2]}}}).itcount());
-
- // Test that the 'type' keyword rejects an array which contains duplicate aliases.
- assert.throws(
- () => coll.find({$jsonSchema: {f: {type: ["number", "string", "number"]}}}).itcount());
-
- // Test that the 'bsonType' keyword rejects an array which contains duplicate aliases.
- assert.throws(
- () => coll.find({$jsonSchema: {f: {bsonType: ["number", "string", "number"]}}}).itcount());
-
- // Test that the 'type' keyword can accept an array of type aliases.
- assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {f: 1}, true);
- assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {f: "str"}, true);
- assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {}, true);
- assertSchemaMatch(
- coll, {properties: {f: {type: ["number", "string"]}}}, {f: ["str", 1]}, false);
- assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {f: {}}, false);
-
- // Test that the 'bsonType' keyword can accept an array of type aliases.
- assertSchemaMatch(coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: 1}, true);
- assertSchemaMatch(
- coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: ObjectId()}, true);
- assertSchemaMatch(coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {}, true);
- assertSchemaMatch(coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: [1]}, false);
- assertSchemaMatch(
- coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: NumberInt(1)}, false);
-
- // Test that the 'type' keyword with an array of types is valid at the top-level.
- assertSchemaMatch(coll, {type: ["object", "string"]}, {}, true);
- assertSchemaMatch(coll, {type: ["object", "string"]}, {foo: 1, bar: 1}, true);
-
- // Test that the 'bsonType' keyword with an array of types is valid at the top-level.
- assertSchemaMatch(coll, {bsonType: ["object", "double"]}, {}, true);
- assertSchemaMatch(coll, {bsonType: ["object", "double"]}, {foo: 1, bar: 1}, true);
+"use strict";
+
+load("jstests/libs/assert_schema_match.js");
+
+const coll = db.jstests_schema_bsontype;
+
+// bsonType "double".
+assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: 3}, true);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: NumberLong(3)}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: NumberInt(3)}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: NumberDecimal(3)}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: {}}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: [3]}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {foo: {}}, true);
+
+// type "double" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "double"}}}}).itcount());
+
+// bsonType "string".
+assertSchemaMatch(coll, {properties: {str: {bsonType: "string"}}}, {str: ""}, true);
+assertSchemaMatch(coll, {properties: {str: {bsonType: "string"}}}, {str: true}, false);
+assertSchemaMatch(coll, {properties: {str: {bsonType: "string"}}}, {str: [1, "foo"]}, false);
+
+// type "string".
+assertSchemaMatch(coll, {properties: {str: {type: "string"}}}, {str: ""}, true);
+assertSchemaMatch(coll, {properties: {str: {type: "string"}}}, {str: true}, false);
+assertSchemaMatch(coll, {properties: {str: {type: "string"}}}, {str: [1, "foo"]}, false);
+
+// bsonType "object".
+assertSchemaMatch(coll, {bsonType: "object"}, {}, true);
+assertSchemaMatch(coll, {properties: {obj: {bsonType: "object"}}}, {obj: {}}, true);
+assertSchemaMatch(coll, {properties: {obj: {bsonType: "object"}}}, {obj: true}, false);
+assertSchemaMatch(coll, {properties: {obj: {bsonType: "object"}}}, {obj: [{}]}, false);
+
+// type "object".
+assertSchemaMatch(coll, {type: "object"}, {}, true);
+assertSchemaMatch(coll, {properties: {obj: {type: "object"}}}, {obj: {}}, true);
+assertSchemaMatch(coll, {properties: {obj: {type: "object"}}}, {obj: true}, false);
+assertSchemaMatch(coll, {properties: {obj: {type: "object"}}}, {obj: [{}]}, false);
+
+// bsonType "array".
+assertSchemaMatch(coll, {bsonType: "array"}, {arr: []}, false);
+assertSchemaMatch(coll, {properties: {arr: {bsonType: "array"}}}, {arr: []}, true);
+assertSchemaMatch(coll, {properties: {arr: {bsonType: "array"}}}, {arr: {}}, false);
+
+// type "array".
+assertSchemaMatch(coll, {type: "array"}, {arr: []}, false);
+assertSchemaMatch(coll, {properties: {arr: {type: "array"}}}, {arr: []}, true);
+assertSchemaMatch(coll, {properties: {arr: {type: "array"}}}, {arr: {}}, false);
+
+// bsonType "binData".
+assertSchemaMatch(coll,
+ {properties: {bin: {bsonType: "binData"}}},
+ {bin: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")},
+ true);
+assertSchemaMatch(coll, {properties: {bin: {bsonType: "binData"}}}, {bin: {}}, false);
+
+// type "binData" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {bin: {type: "binData"}}}}).itcount());
+
+// bsonType "undefined".
+assertSchemaMatch(
+ coll, {properties: {u: {bsonType: "undefined"}}, required: ["u"]}, {u: undefined}, true);
+assertSchemaMatch(coll, {properties: {u: {bsonType: "undefined"}}, required: ["u"]}, {}, false);
+assertSchemaMatch(
+ coll, {properties: {u: {bsonType: "undefined"}}, required: ["u"]}, {u: null}, false);
+
+// type "undefined" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {u: {type: "undefined"}}}}).itcount());
+
+// bsonType "objectId".
+assertSchemaMatch(coll, {properties: {o: {bsonType: "objectId"}}}, {o: ObjectId()}, true);
+assertSchemaMatch(coll, {properties: {o: {bsonType: "objectId"}}}, {o: 1}, false);
+
+// type "objectId" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {o: {type: "objectId"}}}}).itcount());
+
+// bsonType "bool".
+assertSchemaMatch(coll, {properties: {b: {bsonType: "bool"}}}, {b: true}, true);
+assertSchemaMatch(coll, {properties: {b: {bsonType: "bool"}}}, {b: false}, true);
+assertSchemaMatch(coll, {properties: {b: {bsonType: "bool"}}}, {b: 1}, false);
+
+// bsonType "boolean" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {b: {bsonType: "boolean"}}}}).itcount());
+
+// type "boolean".
+assertSchemaMatch(coll, {properties: {b: {type: "boolean"}}}, {b: true}, true);
+assertSchemaMatch(coll, {properties: {b: {type: "boolean"}}}, {b: false}, true);
+assertSchemaMatch(coll, {properties: {b: {type: "boolean"}}}, {b: 1}, false);
+
+// type "bool" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {b: {type: "bool"}}}}).itcount());
+
+// bsonType "date".
+assertSchemaMatch(coll, {properties: {date: {bsonType: "date"}}}, {date: new Date()}, true);
+assertSchemaMatch(coll, {properties: {date: {bsonType: "date"}}}, {date: 1}, false);
+
+// type "date" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {b: {type: "date"}}}}).itcount());
+
+// bsonType "null".
+assertSchemaMatch(coll, {properties: {n: {bsonType: "null"}}, required: ["n"]}, {n: null}, true);
+assertSchemaMatch(coll, {properties: {n: {bsonType: "null"}}, required: ["n"]}, {}, false);
+assertSchemaMatch(
+ coll, {properties: {n: {bsonType: "null"}}, required: ["n"]}, {u: undefined}, false);
+
+// type "null".
+assertSchemaMatch(coll, {properties: {n: {type: "null"}}, required: ["n"]}, {n: null}, true);
+assertSchemaMatch(coll, {properties: {n: {type: "null"}}, required: ["n"]}, {}, false);
+assertSchemaMatch(coll, {properties: {n: {type: "null"}}, required: ["n"]}, {u: undefined}, false);
+
+// bsonType "regex".
+assertSchemaMatch(coll, {properties: {r: {bsonType: "regex"}}}, {r: /^abc/}, true);
+assertSchemaMatch(coll, {properties: {r: {bsonType: "regex"}}}, {r: "^abc"}, false);
+
+// type "regex" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {r: {type: "regex"}}}}).itcount());
+
+// bsonType "javascript".
+assertSchemaMatch(coll,
+ {properties: {code: {bsonType: "javascript"}}},
+ {code: Code("function() { return true; }")},
+ true);
+assertSchemaMatch(coll, {properties: {code: {bsonType: "javascript"}}}, {code: 1}, false);
+
+// type "javascript" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {code: {type: "javascript"}}}}).itcount());
+
+// bsonType "javascriptWithScope".
+assertSchemaMatch(coll,
+ {properties: {code: {bsonType: "javascriptWithScope"}}},
+ {code: Code("function() { return true; }", {scope: true})},
+ true);
+assertSchemaMatch(coll, {properties: {code: {bsonType: "javascriptWithScope"}}}, {code: 1}, false);
+
+// type "javascriptWithScope" should fail.
+assert.throws(
+ () => coll.find({$jsonSchema: {properties: {code: {type: "javascriptWithScope"}}}}).itcount());
+
+// bsonType "int".
+assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: NumberInt(3)}, true);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: NumberLong(3)}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: 3}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: NumberDecimal(3)}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: {}}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {foo: {}}, true);
+
+// type "int" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "int"}}}}).itcount());
+
+// bsonType "integer" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {num: {bsonType: "integer"}}}}).itcount());
+
+// type "integer" is explicitly unsupported and should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "integer"}}}}).itcount());
+
+// bsonType "timestamp".
+assertSchemaMatch(
+ coll, {properties: {ts: {bsonType: "timestamp"}}}, {ts: Timestamp(0, 1234)}, true);
+assertSchemaMatch(coll, {properties: {ts: {bsonType: "timestamp"}}}, {ts: new Date()}, false);
+
+// type "timestamp" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {ts: {type: "timestamp"}}}}).itcount());
+
+// bsonType "long".
+assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: NumberLong(3)}, true);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: NumberInt(3)}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: 3}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: NumberDecimal(3)}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: {}}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {foo: {}}, true);
+
+// type "long" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "long"}}}}).itcount());
+
+// bsonType "decimal".
+assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: NumberDecimal(3)}, true);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: NumberLong(3)}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: NumberInt(3)}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: 3}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: {}}, false);
+assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {foo: {}}, true);
+
+// type "decimal" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "decimal"}}}}).itcount());
+
+// bsonType "minKey".
+assertSchemaMatch(coll, {properties: {k: {bsonType: "minKey"}}}, {k: MinKey()}, true);
+assertSchemaMatch(coll, {properties: {k: {bsonType: "minKey"}}}, {k: MaxKey()}, false);
+
+// type "minKey" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "minKey"}}}}).itcount());
+
+// bsonType "maxKey".
+assertSchemaMatch(coll, {properties: {k: {bsonType: "maxKey"}}}, {k: MaxKey()}, true);
+assertSchemaMatch(coll, {properties: {k: {bsonType: "maxKey"}}}, {k: MinKey()}, false);
+
+// type "maxKey" should fail.
+assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "maxKey"}}}}).itcount());
+
+// Test that 'bsonType' keyword rejects unknown type aliases.
+assert.throws(() => coll.find({$jsonSchema: {properties: {f: {bsonType: "unknown"}}}}).itcount());
+
+// Test that 'type' keyword rejects unknown type aliases.
+assert.throws(() => coll.find({$jsonSchema: {properties: {f: {type: "unknown"}}}}).itcount());
+
+// Specifying both "type" and "bsonType" in the same schema should fail.
+assert.throws(() => coll.find({$jsonSchema: {bsonType: "string", type: "string"}}).itcount());
+assert.throws(() =>
+ coll.find({$jsonSchema: {properties: {a: {bsonType: "string", type: "string"}}}})
+ .itcount());
+
+// "type" and "bsonType" are both allowed when they are not sibling keywords in the same
+// subschema.
+assertSchemaMatch(coll, {type: "object", properties: {obj: {bsonType: "object"}}}, {obj: {}}, true);
+assertSchemaMatch(
+ coll, {type: "object", properties: {obj: {bsonType: "object"}}}, {obj: []}, false);
+assertSchemaMatch(coll,
+ {properties: {a: {bsonType: "long"}, b: {type: "null"}}},
+ {a: NumberLong(3), b: null},
+ true);
+assertSchemaMatch(
+ coll, {properties: {a: {bsonType: "long"}, b: {type: "null"}}}, {a: NumberLong(3)}, true);
+assertSchemaMatch(coll, {properties: {a: {bsonType: "long"}, b: {type: "null"}}}, {b: null}, true);
+assertSchemaMatch(coll,
+ {properties: {a: {bsonType: "long"}, b: {type: "null"}}},
+ {b: null},
+ {a: 3, b: null},
+ false);
+assertSchemaMatch(coll,
+ {properties: {a: {bsonType: "long"}, b: {type: "null"}}},
+ {b: null},
+ {a: NumberLong(3), b: 3},
+ false);
+
+// Test that the 'type' keyword rejects an array of aliases if one of those aliases is invalid.
+assert.throws(() => coll.find({$jsonSchema: {f: {type: ["number", "objectId"]}}}).itcount());
+assert.throws(() => coll.find({$jsonSchema: {f: {type: ["object", "unknown"]}}}).itcount());
+
+// Test that the 'bsonType' keyword rejects an array of aliases if one of those aliases is
+// invalid.
+assert.throws(() => coll.find({$jsonSchema: {f: {bsonType: ["number", "unknown"]}}}).itcount());
+assert.throws(() => coll.find({$jsonSchema: {bsonType: ["unknown"]}}).itcount());
+
+// Test that the 'type' keyword rejects an array which contains a numerical type alias.
+assert.throws(() => coll.find({$jsonSchema: {f: {type: ["number", 2]}}}).itcount());
+
+// Test that the 'bsonType' keyword rejects an array which contains a numerical type alias.
+assert.throws(() => coll.find({$jsonSchema: {f: {bsonType: ["number", 2]}}}).itcount());
+
+// Test that the 'type' keyword rejects an array which contains duplicate aliases.
+assert.throws(() =>
+ coll.find({$jsonSchema: {f: {type: ["number", "string", "number"]}}}).itcount());
+
+// Test that the 'bsonType' keyword rejects an array which contains duplicate aliases.
+assert.throws(
+ () => coll.find({$jsonSchema: {f: {bsonType: ["number", "string", "number"]}}}).itcount());
+
+// Test that the 'type' keyword can accept an array of type aliases.
+assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {f: 1}, true);
+assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {f: "str"}, true);
+assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {}, true);
+assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {f: ["str", 1]}, false);
+assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {f: {}}, false);
+
+// Test that the 'bsonType' keyword can accept an array of type aliases.
+assertSchemaMatch(coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: 1}, true);
+assertSchemaMatch(
+ coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: ObjectId()}, true);
+assertSchemaMatch(coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {}, true);
+assertSchemaMatch(coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: [1]}, false);
+assertSchemaMatch(
+ coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: NumberInt(1)}, false);
+
+// Test that the 'type' keyword with an array of types is valid at the top-level.
+assertSchemaMatch(coll, {type: ["object", "string"]}, {}, true);
+assertSchemaMatch(coll, {type: ["object", "string"]}, {foo: 1, bar: 1}, true);
+
+// Test that the 'bsonType' keyword with an array of types is valid at the top-level.
+assertSchemaMatch(coll, {bsonType: ["object", "double"]}, {}, true);
+assertSchemaMatch(coll, {bsonType: ["object", "double"]}, {foo: 1, bar: 1}, true);
}());
diff --git a/jstests/core/json_schema/dependencies.js b/jstests/core/json_schema/dependencies.js
index 442976aa5fc..ffcd917889b 100644
--- a/jstests/core/json_schema/dependencies.js
+++ b/jstests/core/json_schema/dependencies.js
@@ -4,122 +4,107 @@
* Tests for the JSON Schema 'dependencies' keyword.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/assert_schema_match.js");
+load("jstests/libs/assert_schema_match.js");
- const coll = db.jstests_schema_dependencies;
+const coll = db.jstests_schema_dependencies;
- // Top-level schema dependency.
- assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {}, true);
- assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {foo: 1, bar: 1}, true);
- assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {bar: 1}, true);
- assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {foo: 1}, false);
+// Top-level schema dependency.
+assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {}, true);
+assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {foo: 1, bar: 1}, true);
+assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {bar: 1}, true);
+assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {foo: 1}, false);
- assertSchemaMatch(
- coll,
- {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}},
- {},
- true);
- assertSchemaMatch(
- coll,
- {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}},
- {bar: 1},
- true);
- assertSchemaMatch(
- coll,
- {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}},
- {foo: 1, bar: 1},
- true);
- assertSchemaMatch(
- coll,
- {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}},
- {foo: 1, bar: 1, baz: 1},
- false);
- assertSchemaMatch(
- coll,
- {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}},
- {foo: 1, bar: 1, baz: "str"},
- true);
+assertSchemaMatch(coll,
+ {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}},
+ {},
+ true);
+assertSchemaMatch(coll,
+ {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}},
+ {bar: 1},
+ true);
+assertSchemaMatch(coll,
+ {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}},
+ {foo: 1, bar: 1},
+ true);
+assertSchemaMatch(coll,
+ {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}},
+ {foo: 1, bar: 1, baz: 1},
+ false);
+assertSchemaMatch(coll,
+ {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}},
+ {foo: 1, bar: 1, baz: "str"},
+ true);
- // Top-level property dependency.
- assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {}, true);
- assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {bar: 1}, true);
- assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {baz: 1}, true);
- assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {bar: 1, baz: 1}, true);
- assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1}, false);
- assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1, bar: 1}, false);
- assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1, baz: 1}, false);
- assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1, bar: 1, baz: 1}, true);
+// Top-level property dependency.
+assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {}, true);
+assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {bar: 1}, true);
+assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {baz: 1}, true);
+assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {bar: 1, baz: 1}, true);
+assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1}, false);
+assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1, bar: 1}, false);
+assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1, baz: 1}, false);
+assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1, bar: 1, baz: 1}, true);
- // Nested schema dependency.
- assertSchemaMatch(
- coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {}, true);
- assertSchemaMatch(
- coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {obj: 1}, true);
- assertSchemaMatch(
- coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {obj: {}}, true);
- assertSchemaMatch(coll,
- {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}},
- {obj: {bar: 1}},
- true);
- assertSchemaMatch(coll,
- {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}},
- {obj: {foo: 1}},
- false);
- assertSchemaMatch(coll,
- {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}},
- {obj: {foo: 1, bar: 1}},
- true);
+// Nested schema dependency.
+assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {}, true);
+assertSchemaMatch(
+ coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {obj: 1}, true);
+assertSchemaMatch(
+ coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {obj: {}}, true);
+assertSchemaMatch(
+ coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {obj: {bar: 1}}, true);
+assertSchemaMatch(
+ coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {obj: {foo: 1}}, false);
+assertSchemaMatch(coll,
+ {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}},
+ {obj: {foo: 1, bar: 1}},
+ true);
- // Nested property dependency.
- assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {}, true);
- assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: 1}, true);
- assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {}}, true);
- assertSchemaMatch(
- coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {bar: 1}}, true);
- assertSchemaMatch(
- coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {foo: 1}}, false);
- assertSchemaMatch(
- coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {foo: 1, bar: 1}}, true);
+// Nested property dependency.
+assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {}, true);
+assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: 1}, true);
+assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {}}, true);
+assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {bar: 1}}, true);
+assertSchemaMatch(
+ coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {foo: 1}}, false);
+assertSchemaMatch(
+ coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {foo: 1, bar: 1}}, true);
- // Nested property dependency and nested schema dependency.
- assertSchemaMatch(
- coll, {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, {}, true);
- assertSchemaMatch(coll,
- {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}},
- {obj: 1},
- true);
- assertSchemaMatch(coll,
- {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}},
- {obj: {}},
- true);
- assertSchemaMatch(coll,
- {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}},
- {obj: {b: 1, d: 1}},
- true);
- assertSchemaMatch(coll,
- {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}},
- {obj: {a: 1, b: 1, c: 1}},
- false);
- assertSchemaMatch(coll,
- {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}},
- {obj: {a: 1, c: 0, d: 1}},
- false);
- assertSchemaMatch(coll,
- {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}},
- {obj: {b: 1, c: 1, d: 1}},
- true);
- assertSchemaMatch(coll,
- {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}},
- {obj: {a: 1, b: 1, d: 1}},
- true);
- assertSchemaMatch(coll,
- {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}},
- {obj: {a: 1, b: 1, c: 1, d: 1}},
- true);
+// Nested property dependency and nested schema dependency.
+assertSchemaMatch(
+ coll, {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, {}, true);
+assertSchemaMatch(
+ coll, {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, {obj: 1}, true);
+assertSchemaMatch(
+ coll, {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, {obj: {}}, true);
+assertSchemaMatch(coll,
+ {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}},
+ {obj: {b: 1, d: 1}},
+ true);
+assertSchemaMatch(coll,
+ {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}},
+ {obj: {a: 1, b: 1, c: 1}},
+ false);
+assertSchemaMatch(coll,
+ {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}},
+ {obj: {a: 1, c: 0, d: 1}},
+ false);
+assertSchemaMatch(coll,
+ {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}},
+ {obj: {b: 1, c: 1, d: 1}},
+ true);
+assertSchemaMatch(coll,
+ {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}},
+ {obj: {a: 1, b: 1, d: 1}},
+ true);
+assertSchemaMatch(coll,
+ {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}},
+ {obj: {a: 1, b: 1, c: 1, d: 1}},
+ true);
- // Empty dependencies matches everything.
- assertSchemaMatch(coll, {dependencies: {}}, {}, true);
- assertSchemaMatch(coll, {properties: {obj: {dependencies: {}}}}, {obj: {}}, true);
+// Empty dependencies matches everything.
+assertSchemaMatch(coll, {dependencies: {}}, {}, true);
+assertSchemaMatch(coll, {properties: {obj: {dependencies: {}}}}, {obj: {}}, true);
}());
diff --git a/jstests/core/json_schema/encrypt.js b/jstests/core/json_schema/encrypt.js
index b7e5c5ce0d5..32d93f43da4 100644
--- a/jstests/core/json_schema/encrypt.js
+++ b/jstests/core/json_schema/encrypt.js
@@ -6,62 +6,60 @@
* ]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/assert_schema_match.js");
+load("jstests/libs/assert_schema_match.js");
- const coll = db.jstests_schema_encrypt;
- const encryptedBinDataElement = BinData(6, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA");
- const nonEncryptedBinDataElement = BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA");
+const coll = db.jstests_schema_encrypt;
+const encryptedBinDataElement = BinData(6, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA");
+const nonEncryptedBinDataElement = BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA");
- // Only elements of type BinData with subtype '6' should match.
- assertSchemaMatch(
- coll, {properties: {bin: {encrypt: {}}}}, {bin: encryptedBinDataElement}, true);
- assertSchemaMatch(coll, {properties: {bin: {encrypt: {}}}}, {bin: {}}, false);
- assertSchemaMatch(
- coll, {properties: {bin: {encrypt: {}}}}, {bin: nonEncryptedBinDataElement}, false);
- // Nested in object.
- assertSchemaMatch(coll,
- {properties: {obj: {type: 'object', properties: {a: {encrypt: {}}}}}},
- {obj: {a: encryptedBinDataElement}},
- true);
- assertSchemaMatch(coll,
- {properties: {obj: {type: 'object', properties: {a: {encrypt: {}}}}}},
- {obj: {a: {}}},
- false);
- assertSchemaMatch(coll,
- {properties: {obj: {type: 'object', properties: {a: {encrypt: {}}}}}},
- {obj: {a: nonEncryptedBinDataElement}},
- false);
+// Only elements of type BinData with subtype '6' should match.
+assertSchemaMatch(coll, {properties: {bin: {encrypt: {}}}}, {bin: encryptedBinDataElement}, true);
+assertSchemaMatch(coll, {properties: {bin: {encrypt: {}}}}, {bin: {}}, false);
+assertSchemaMatch(
+ coll, {properties: {bin: {encrypt: {}}}}, {bin: nonEncryptedBinDataElement}, false);
+// Nested in object.
+assertSchemaMatch(coll,
+ {properties: {obj: {type: 'object', properties: {a: {encrypt: {}}}}}},
+ {obj: {a: encryptedBinDataElement}},
+ true);
+assertSchemaMatch(coll,
+ {properties: {obj: {type: 'object', properties: {a: {encrypt: {}}}}}},
+ {obj: {a: {}}},
+ false);
+assertSchemaMatch(coll,
+ {properties: {obj: {type: 'object', properties: {a: {encrypt: {}}}}}},
+ {obj: {a: nonEncryptedBinDataElement}},
+ false);
- // Nested in array.
- assertSchemaMatch(coll,
- {properties: {arr: {type: 'array', items: {encrypt: {}}}}},
- {arr: [encryptedBinDataElement, encryptedBinDataElement]},
- true);
- assertSchemaMatch(
- coll, {properties: {arr: {type: 'array', items: {encrypt: {}}}}}, {arr: [{}, {}]}, false);
- assertSchemaMatch(coll,
- {properties: {arr: {type: 'array', items: {encrypt: {}}}}},
- {arr: [encryptedBinDataElement, nonEncryptedBinDataElement]},
- false);
+// Nested in array.
+assertSchemaMatch(coll,
+ {properties: {arr: {type: 'array', items: {encrypt: {}}}}},
+ {arr: [encryptedBinDataElement, encryptedBinDataElement]},
+ true);
+assertSchemaMatch(
+ coll, {properties: {arr: {type: 'array', items: {encrypt: {}}}}}, {arr: [{}, {}]}, false);
+assertSchemaMatch(coll,
+ {properties: {arr: {type: 'array', items: {encrypt: {}}}}},
+ {arr: [encryptedBinDataElement, nonEncryptedBinDataElement]},
+ false);
- // If array is not specified, should not traverse array of encrypted BinData's.
- assertSchemaMatch(coll,
- {properties: {bin: {encrypt: {}}}},
- {bin: [encryptedBinDataElement, encryptedBinDataElement]},
- false);
+// If array is not specified, should not traverse array of encrypted BinData's.
+assertSchemaMatch(coll,
+ {properties: {bin: {encrypt: {}}}},
+ {bin: [encryptedBinDataElement, encryptedBinDataElement]},
+ false);
- // Encrypt alongside type/bsontype should fail to parse.
- assert.commandFailedWithCode(coll.runCommand({
- find: "coll",
- filter: {$jsonSchema: {properties: {bin: {encrypt: {}, type: 'object'}}}}
- }),
- ErrorCodes.FailedToParse);
+// Encrypt alongside type/bsontype should fail to parse.
+assert.commandFailedWithCode(
+ coll.runCommand(
+ {find: "coll", filter: {$jsonSchema: {properties: {bin: {encrypt: {}, type: 'object'}}}}}),
+ ErrorCodes.FailedToParse);
- assert.commandFailedWithCode(coll.runCommand({
- find: "coll",
- filter: {$jsonSchema: {properties: {bin: {encrypt: {}, bsonType: 'object'}}}}
- }),
- ErrorCodes.FailedToParse);
+assert.commandFailedWithCode(coll.runCommand({
+ find: "coll",
+ filter: {$jsonSchema: {properties: {bin: {encrypt: {}, bsonType: 'object'}}}}
+}),
+ ErrorCodes.FailedToParse);
}());
diff --git a/jstests/core/json_schema/items.js b/jstests/core/json_schema/items.js
index 57974a3d612..3bb71c79c7d 100644
--- a/jstests/core/json_schema/items.js
+++ b/jstests/core/json_schema/items.js
@@ -4,58 +4,64 @@
* Tests the JSON Schema "items" keyword.
*/
(function() {
- "use strict";
-
- load("jstests/libs/assert_schema_match.js");
-
- const coll = db.getCollection("json_schema_items");
- coll.drop();
-
- // Test that the JSON Schema fails to parse if "items" is not an object or array.
- assert.throws(() => coll.find({$jsonSchema: {items: 1}}).itcount());
- assert.throws(() => coll.find({$jsonSchema: {items: 1.0}}).itcount());
- assert.throws(() => coll.find({$jsonSchema: {items: "true"}}).itcount());
-
- // Test that "items" has no effect at the top level (but is still accepted).
- assertSchemaMatch(coll, {items: {type: "number"}}, {}, true);
- assertSchemaMatch(coll, {items: [{type: "number"}]}, {}, true);
-
- // Test that "items" matches documents where the field is missing or not an array.
- assertSchemaMatch(coll, {properties: {a: {items: {minimum: 0}}}}, {}, true);
- assertSchemaMatch(coll, {properties: {a: {items: {minimum: 0}}}}, {a: -1}, true);
- assertSchemaMatch(coll, {properties: {a: {items: [{minimum: 0}]}}}, {}, true);
- assertSchemaMatch(coll, {properties: {a: {items: [{minimum: 0}]}}}, {a: -1}, true);
-
- // Test that when "items" is an object, the schema applies to all elements of the array.
- let schema = {properties: {a: {items: {pattern: "a+b"}}}};
- assertSchemaMatch(coll, schema, {a: []}, true);
- assertSchemaMatch(coll, schema, {a: [7]}, true);
- assertSchemaMatch(coll, schema, {a: [null]}, true);
- assertSchemaMatch(coll, schema, {a: ["cab"]}, true);
- assertSchemaMatch(coll, schema, {a: ["cab", "caab"]}, true);
- assertSchemaMatch(coll, schema, {a: ["cab", "caab", "b"]}, false);
-
- // Test that when "items" is an array, each element schema only apply to elements at that
- // position.
- schema = {properties: {a: {items: [{multipleOf: 2}]}}};
- assertSchemaMatch(coll, schema, {a: []}, true);
- assertSchemaMatch(coll, schema, {a: [2]}, true);
- assertSchemaMatch(coll, schema, {a: [2, 3]}, true);
- assertSchemaMatch(coll, schema, {a: [3]}, false);
-
- schema = {properties: {a: {items: [{maxLength: 1}, {maxLength: 2}]}}};
- assertSchemaMatch(coll, schema, {a: []}, true);
- assertSchemaMatch(coll, schema, {a: ["1"]}, true);
- assertSchemaMatch(coll, schema, {a: ["1"]}, true);
- assertSchemaMatch(coll, schema, {a: ["1", "12"]}, true);
- assertSchemaMatch(coll, schema, {a: ["1", "12", "123"]}, true);
- assertSchemaMatch(coll, schema, {a: ["12"]}, false);
- assertSchemaMatch(coll, schema, {a: ["1", "123"]}, false);
-
- // Test that "items" has no effect when it is an empty array (but is still accepted).
- schema = {properties: {a: {items: []}}};
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {a: "blah"}, true);
- assertSchemaMatch(coll, schema, {a: []}, true);
- assertSchemaMatch(coll, schema, {a: [1, "foo", {}]}, true);
+"use strict";
+
+load("jstests/libs/assert_schema_match.js");
+
+const coll = db.getCollection("json_schema_items");
+coll.drop();
+
+// Test that the JSON Schema fails to parse if "items" is not an object or array.
+assert.throws(() => coll.find({$jsonSchema: {items: 1}}).itcount());
+assert.throws(() => coll.find({$jsonSchema: {items: 1.0}}).itcount());
+assert.throws(() => coll.find({$jsonSchema: {items: "true"}}).itcount());
+
+// Test that "items" has no effect at the top level (but is still accepted).
+assertSchemaMatch(coll, {items: {type: "number"}}, {}, true);
+assertSchemaMatch(coll, {items: [{type: "number"}]}, {}, true);
+
+// Test that "items" matches documents where the field is missing or not an array.
+assertSchemaMatch(coll, {properties: {a: {items: {minimum: 0}}}}, {}, true);
+assertSchemaMatch(coll, {properties: {a: {items: {minimum: 0}}}}, {a: -1}, true);
+assertSchemaMatch(coll, {properties: {a: {items: [{minimum: 0}]}}}, {}, true);
+assertSchemaMatch(coll, {properties: {a: {items: [{minimum: 0}]}}}, {a: -1}, true);
+
+// Test that when "items" is an object, the schema applies to all elements of the array.
+let schema = {properties: {a: {items: {pattern: "a+b"}}}};
+assertSchemaMatch(coll, schema, {a: []}, true);
+assertSchemaMatch(coll, schema, {a: [7]}, true);
+assertSchemaMatch(coll, schema, {a: [null]}, true);
+assertSchemaMatch(coll, schema, {a: ["cab"]}, true);
+assertSchemaMatch(coll, schema, {a: ["cab", "caab"]}, true);
+assertSchemaMatch(coll, schema, {a: ["cab", "caab", "b"]}, false);
+
+// Test that when "items" is an array, each element schema only apply to elements at that
+// position.
+schema = {
+ properties: {a: {items: [{multipleOf: 2}]}}
+};
+assertSchemaMatch(coll, schema, {a: []}, true);
+assertSchemaMatch(coll, schema, {a: [2]}, true);
+assertSchemaMatch(coll, schema, {a: [2, 3]}, true);
+assertSchemaMatch(coll, schema, {a: [3]}, false);
+
+schema = {
+ properties: {a: {items: [{maxLength: 1}, {maxLength: 2}]}}
+};
+assertSchemaMatch(coll, schema, {a: []}, true);
+assertSchemaMatch(coll, schema, {a: ["1"]}, true);
+assertSchemaMatch(coll, schema, {a: ["1"]}, true);
+assertSchemaMatch(coll, schema, {a: ["1", "12"]}, true);
+assertSchemaMatch(coll, schema, {a: ["1", "12", "123"]}, true);
+assertSchemaMatch(coll, schema, {a: ["12"]}, false);
+assertSchemaMatch(coll, schema, {a: ["1", "123"]}, false);
+
+// Test that "items" has no effect when it is an empty array (but is still accepted).
+schema = {
+ properties: {a: {items: []}}
+};
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {a: "blah"}, true);
+assertSchemaMatch(coll, schema, {a: []}, true);
+assertSchemaMatch(coll, schema, {a: [1, "foo", {}]}, true);
}());
diff --git a/jstests/core/json_schema/json_schema.js b/jstests/core/json_schema/json_schema.js
index 613b14af226..13a10fde323 100644
--- a/jstests/core/json_schema/json_schema.js
+++ b/jstests/core/json_schema/json_schema.js
@@ -5,340 +5,335 @@
* Tests for JSON Schema document validation.
*/
(function() {
- "use strict";
-
- load("jstests/libs/assert_schema_match.js");
-
- let coll = db.jstests_json_schema;
- coll.drop();
-
- assert.writeOK(coll.insert({_id: 0, num: 3}));
- assert.writeOK(coll.insert({_id: 1, num: -3}));
- assert.writeOK(coll.insert({_id: 2, num: NumberInt(2)}));
- assert.writeOK(coll.insert({_id: 3, num: NumberInt(-2)}));
- assert.writeOK(coll.insert({_id: 4, num: NumberLong(1)}));
- assert.writeOK(coll.insert({_id: 5, num: NumberLong(-1)}));
- assert.writeOK(coll.insert({_id: 6, num: {}}));
- assert.writeOK(coll.insert({_id: 7, num: "str"}));
- assert.writeOK(coll.insert({_id: 8, num: "string"}));
- assert.writeOK(coll.insert({_id: 9}));
-
- // Test that $jsonSchema fails to parse if its argument is not an object.
- assert.throws(function() {
- coll.find({$jsonSchema: "foo"}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: []}).itcount();
- });
-
- // Test that $jsonSchema fails to parse if the value for the "type" keyword is not a string.
- assert.throws(function() {
- coll.find({$jsonSchema: {type: 3}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {type: {}}}).itcount();
- });
-
- // Test that $jsonSchema fails to parse if the value for the "type" keyword is an unsupported
- // alias.
- assert.throws(function() {
- coll.find({$jsonSchema: {type: 'integer'}}).itcount();
- });
-
- // Test that $jsonSchema fails to parse if the value for the properties keyword is not an
- // object.
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: 3}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: []}}).itcount();
- });
-
- // Test that $jsonSchema fails to parse if one of the properties named inside the argument for
- // the properties keyword is not an object.
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {num: "number"}}}).itcount();
- });
-
- // Test that $jsonSchema fails to parse if the values for the maximum, maxLength, and
- // minlength keywords are not numbers.
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {num: {maximum: "0"}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {num: {maximum: {}}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {num: {maxLength: "0"}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {num: {maxLength: {}}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {num: {minLength: "0"}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {num: {minLength: {}}}}}).itcount();
- });
-
- // Test that the empty schema matches everything.
- assert.eq(10, coll.find({$jsonSchema: {}}).itcount());
-
- // Test that a schema just checking that the type of stored documents is "object" is legal and
- // matches everything.
- assert.eq(10, coll.find({$jsonSchema: {type: "object"}}).itcount());
-
- // Test that schemas whose top-level type is not object matches nothing.
- assert.eq(0, coll.find({$jsonSchema: {type: "string"}}).itcount());
- assert.eq(0, coll.find({$jsonSchema: {bsonType: "long"}}).itcount());
- assert.eq(0, coll.find({$jsonSchema: {bsonType: "objectId"}}).itcount());
-
- // Test that type:"number" only matches numbers, or documents where the field is missing.
- assert.eq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 9}],
- coll.find({$jsonSchema: {properties: {num: {type: "number"}}}}, {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- // Test that maximum restriction is enforced correctly.
- assert.eq([{_id: 1}, {_id: 3}, {_id: 5}, {_id: 9}],
- coll.find({$jsonSchema: {properties: {num: {type: "number", maximum: -1}}}}, {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- // Repeat the test, but include an explicit top-level type:"object".
- assert.eq(
- [{_id: 1}, {_id: 3}, {_id: 5}, {_id: 9}],
- coll.find({$jsonSchema: {type: "object", properties: {num: {type: "number", maximum: -1}}}},
- {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- // Test that type:"long" only matches longs, or documents where the field is missing.
- assert.eq([{_id: 4}, {_id: 5}, {_id: 9}],
- coll.find({$jsonSchema: {properties: {num: {bsonType: "long"}}}}, {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- // Test that maximum restriction is enforced correctly with type:"long".
- assert.eq(
- [{_id: 5}, {_id: 9}],
- coll.find({$jsonSchema: {properties: {num: {bsonType: "long", maximum: 0}}}}, {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- // Test that maximum restriction without a numeric type specified only applies to numbers.
- assert.eq([{_id: 1}, {_id: 3}, {_id: 5}, {_id: 6}, {_id: 7}, {_id: 8}, {_id: 9}],
- coll.find({$jsonSchema: {properties: {num: {maximum: 0}}}}, {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- // Test that maximum restriction does nothing if a non-numeric type is also specified.
- assert.eq([{_id: 7}, {_id: 8}, {_id: 9}],
- coll.find({$jsonSchema: {properties: {num: {type: "string", maximum: 0}}}}, {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- // Test that maxLength restriction doesn't return strings with length greater than maxLength.
- assert.eq(
- [{_id: 9}],
- coll.find({$jsonSchema: {properties: {num: {type: "string", maxLength: 2}}}}, {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- // Test that maxLength restriction returns strings with length less than or equal to maxLength.
- assert.eq(
- [{_id: 7}, {_id: 9}],
- coll.find({$jsonSchema: {properties: {num: {type: "string", maxLength: 3}}}}, {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- // Test that minLength restriction doesn't return strings with length less than minLength.
- assert.eq(
- [{_id: 8}, {_id: 9}],
- coll.find({$jsonSchema: {properties: {num: {type: "string", minLength: 4}}}}, {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- // Test that minLength restriction returns strings with length greater than or equal to
- // minLength.
- assert.eq(
- [{_id: 7}, {_id: 8}, {_id: 9}],
- coll.find({$jsonSchema: {properties: {num: {type: "string", minLength: 3}}}}, {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- // Test that $jsonSchema fails to parse if the values for the pattern keyword is not a string.
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {num: {pattern: 0}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {num: {pattern: {}}}}}).itcount();
- });
-
- // Tests that the pattern keyword only returns strings that match the regex pattern.
- assert.eq(
- [{_id: 8}, {_id: 9}],
- coll.find({$jsonSchema: {properties: {num: {type: "string", pattern: "ing"}}}}, {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, obj: 3}));
- assert.writeOK(coll.insert({_id: 1, obj: {f1: {f3: "str"}, f2: "str"}}));
- assert.writeOK(coll.insert({_id: 2, obj: {f1: "str", f2: "str"}}));
- assert.writeOK(coll.insert({_id: 3, obj: {f1: 1, f2: "str"}}));
-
- // Test that properties keyword can be used recursively, and that it does not apply when the
- // field does not contain on object.
- assert.eq([{_id: 0}, {_id: 1}],
- coll.find({
- $jsonSchema: {
- properties: {
- obj: {
- properties: {
- f1: {type: "object", properties: {f3: {type: "string"}}},
- f2: {type: "string"}
- }
+"use strict";
+
+load("jstests/libs/assert_schema_match.js");
+
+let coll = db.jstests_json_schema;
+coll.drop();
+
+assert.writeOK(coll.insert({_id: 0, num: 3}));
+assert.writeOK(coll.insert({_id: 1, num: -3}));
+assert.writeOK(coll.insert({_id: 2, num: NumberInt(2)}));
+assert.writeOK(coll.insert({_id: 3, num: NumberInt(-2)}));
+assert.writeOK(coll.insert({_id: 4, num: NumberLong(1)}));
+assert.writeOK(coll.insert({_id: 5, num: NumberLong(-1)}));
+assert.writeOK(coll.insert({_id: 6, num: {}}));
+assert.writeOK(coll.insert({_id: 7, num: "str"}));
+assert.writeOK(coll.insert({_id: 8, num: "string"}));
+assert.writeOK(coll.insert({_id: 9}));
+
+// Test that $jsonSchema fails to parse if its argument is not an object.
+assert.throws(function() {
+ coll.find({$jsonSchema: "foo"}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: []}).itcount();
+});
+
+// Test that $jsonSchema fails to parse if the value for the "type" keyword is not a string.
+assert.throws(function() {
+ coll.find({$jsonSchema: {type: 3}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {type: {}}}).itcount();
+});
+
+// Test that $jsonSchema fails to parse if the value for the "type" keyword is an unsupported
+// alias.
+assert.throws(function() {
+ coll.find({$jsonSchema: {type: 'integer'}}).itcount();
+});
+
+// Test that $jsonSchema fails to parse if the value for the properties keyword is not an
+// object.
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: 3}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: []}}).itcount();
+});
+
+// Test that $jsonSchema fails to parse if one of the properties named inside the argument for
+// the properties keyword is not an object.
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {num: "number"}}}).itcount();
+});
+
+// Test that $jsonSchema fails to parse if the values for the maximum, maxLength, and
+// minlength keywords are not numbers.
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {num: {maximum: "0"}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {num: {maximum: {}}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {num: {maxLength: "0"}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {num: {maxLength: {}}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {num: {minLength: "0"}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {num: {minLength: {}}}}}).itcount();
+});
+
+// Test that the empty schema matches everything.
+assert.eq(10, coll.find({$jsonSchema: {}}).itcount());
+
+// Test that a schema just checking that the type of stored documents is "object" is legal and
+// matches everything.
+assert.eq(10, coll.find({$jsonSchema: {type: "object"}}).itcount());
+
+// Test that schemas whose top-level type is not object matches nothing.
+assert.eq(0, coll.find({$jsonSchema: {type: "string"}}).itcount());
+assert.eq(0, coll.find({$jsonSchema: {bsonType: "long"}}).itcount());
+assert.eq(0, coll.find({$jsonSchema: {bsonType: "objectId"}}).itcount());
+
+// Test that type:"number" only matches numbers, or documents where the field is missing.
+assert.eq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 9}],
+ coll.find({$jsonSchema: {properties: {num: {type: "number"}}}}, {_id: 1})
+ .sort({_id: 1})
+ .toArray());
+
+// Test that maximum restriction is enforced correctly.
+assert.eq([{_id: 1}, {_id: 3}, {_id: 5}, {_id: 9}],
+ coll.find({$jsonSchema: {properties: {num: {type: "number", maximum: -1}}}}, {_id: 1})
+ .sort({_id: 1})
+ .toArray());
+
+// Repeat the test, but include an explicit top-level type:"object".
+assert.eq(
+ [{_id: 1}, {_id: 3}, {_id: 5}, {_id: 9}],
+ coll.find({$jsonSchema: {type: "object", properties: {num: {type: "number", maximum: -1}}}},
+ {_id: 1})
+ .sort({_id: 1})
+ .toArray());
+
+// Test that type:"long" only matches longs, or documents where the field is missing.
+assert.eq([{_id: 4}, {_id: 5}, {_id: 9}],
+ coll.find({$jsonSchema: {properties: {num: {bsonType: "long"}}}}, {_id: 1})
+ .sort({_id: 1})
+ .toArray());
+
+// Test that maximum restriction is enforced correctly with type:"long".
+assert.eq([{_id: 5}, {_id: 9}],
+ coll.find({$jsonSchema: {properties: {num: {bsonType: "long", maximum: 0}}}}, {_id: 1})
+ .sort({_id: 1})
+ .toArray());
+
+// Test that maximum restriction without a numeric type specified only applies to numbers.
+assert.eq(
+ [{_id: 1}, {_id: 3}, {_id: 5}, {_id: 6}, {_id: 7}, {_id: 8}, {_id: 9}],
+ coll.find({$jsonSchema: {properties: {num: {maximum: 0}}}}, {_id: 1}).sort({_id: 1}).toArray());
+
+// Test that maximum restriction does nothing if a non-numeric type is also specified.
+assert.eq([{_id: 7}, {_id: 8}, {_id: 9}],
+ coll.find({$jsonSchema: {properties: {num: {type: "string", maximum: 0}}}}, {_id: 1})
+ .sort({_id: 1})
+ .toArray());
+
+// Test that maxLength restriction doesn't return strings with length greater than maxLength.
+assert.eq([{_id: 9}],
+ coll.find({$jsonSchema: {properties: {num: {type: "string", maxLength: 2}}}}, {_id: 1})
+ .sort({_id: 1})
+ .toArray());
+
+// Test that maxLength restriction returns strings with length less than or equal to maxLength.
+assert.eq([{_id: 7}, {_id: 9}],
+ coll.find({$jsonSchema: {properties: {num: {type: "string", maxLength: 3}}}}, {_id: 1})
+ .sort({_id: 1})
+ .toArray());
+
+// Test that minLength restriction doesn't return strings with length less than minLength.
+assert.eq([{_id: 8}, {_id: 9}],
+ coll.find({$jsonSchema: {properties: {num: {type: "string", minLength: 4}}}}, {_id: 1})
+ .sort({_id: 1})
+ .toArray());
+
+// Test that minLength restriction returns strings with length greater than or equal to
+// minLength.
+assert.eq([{_id: 7}, {_id: 8}, {_id: 9}],
+ coll.find({$jsonSchema: {properties: {num: {type: "string", minLength: 3}}}}, {_id: 1})
+ .sort({_id: 1})
+ .toArray());
+
+// Test that $jsonSchema fails to parse if the values for the pattern keyword is not a string.
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {num: {pattern: 0}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {num: {pattern: {}}}}}).itcount();
+});
+
+// Tests that the pattern keyword only returns strings that match the regex pattern.
+assert.eq([{_id: 8}, {_id: 9}],
+ coll.find({$jsonSchema: {properties: {num: {type: "string", pattern: "ing"}}}}, {_id: 1})
+ .sort({_id: 1})
+ .toArray());
+
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, obj: 3}));
+assert.writeOK(coll.insert({_id: 1, obj: {f1: {f3: "str"}, f2: "str"}}));
+assert.writeOK(coll.insert({_id: 2, obj: {f1: "str", f2: "str"}}));
+assert.writeOK(coll.insert({_id: 3, obj: {f1: 1, f2: "str"}}));
+
+// Test that properties keyword can be used recursively, and that it does not apply when the
+// field does not contain on object.
+assert.eq([{_id: 0}, {_id: 1}],
+ coll.find({
+ $jsonSchema: {
+ properties: {
+ obj: {
+ properties: {
+ f1: {type: "object", properties: {f3: {type: "string"}}},
+ f2: {type: "string"}
}
}
}
- },
- {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- // Test that $jsonSchema can be combined with other operators in the match language.
- assert.eq(
- [{_id: 0}, {_id: 1}, {_id: 2}],
- coll.find({
- $or: [
- {"obj.f1": "str"},
- {
- $jsonSchema: {
- properties: {
- obj: {
- properties: {
- f1: {type: "object", properties: {f3: {type: "string"}}},
- f2: {type: "string"}
+ }
+ },
+ {_id: 1})
+ .sort({_id: 1})
+ .toArray());
+
+// Test that $jsonSchema can be combined with other operators in the match language.
+assert.eq([{_id: 0}, {_id: 1}, {_id: 2}],
+ coll.find({
+ $or: [
+ {"obj.f1": "str"},
+ {
+ $jsonSchema: {
+ properties: {
+ obj: {
+ properties: {
+ f1: {type: "object", properties: {f3: {type: "string"}}},
+ f2: {type: "string"}
+ }
}
}
}
}
- }
- ]
- },
- {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, arr: 3}));
- assert.writeOK(coll.insert({_id: 1, arr: [1, "foo"]}));
- assert.writeOK(coll.insert({_id: 2, arr: [{a: 1}, {b: 2}]}));
- assert.writeOK(coll.insert({_id: 3, arr: []}));
- assert.writeOK(coll.insert({_id: 4, arr: {a: []}}));
-
- // Test that the type:"array" restriction works as expected.
- assert.eq([{_id: 1}, {_id: 2}, {_id: 3}],
- coll.find({$jsonSchema: {properties: {arr: {type: "array"}}}}, {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- // Test that type:"number" works correctly in the presence of arrays.
- assert.eq([{_id: 0}],
- coll.find({$jsonSchema: {properties: {arr: {type: "number"}}}}, {_id: 1})
- .sort({_id: 1})
- .toArray());
-
- // Test that the following keywords fail to parse although present in the spec:
- // - default
- // - definitions
- // - format
- // - id
- // - $ref
- // - $schema
- let res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {default: {_id: 0}}}});
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand({
- find: coll.getName(),
- query: {$jsonSchema: {definitions: {numberField: {type: "number"}}}}
- });
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {format: "email"}}});
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {id: "someschema.json"}}});
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand({
- find: coll.getName(),
- query: {$jsonSchema: {properties: {a: {$ref: "#/definitions/positiveInt"}}}}
- });
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {$schema: "hyper-schema"}}});
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand({
- find: coll.getName(),
- query: {$jsonSchema: {$schema: "http://json-schema.org/draft-04/schema#"}}
- });
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- // Test that the following whitelisted keywords are verified as strings but otherwise ignored
- // in a top-level schema:
- // - description
- // - title
- assertSchemaMatch(coll, {description: "test"}, {}, true);
- assertSchemaMatch(coll, {title: "insert title"}, {}, true);
-
- // Repeat the test above with nested schema.
- assertSchemaMatch(coll, {properties: {a: {description: "test"}}}, {a: {}}, true);
- assertSchemaMatch(coll, {properties: {a: {title: "this is a's title"}}}, {a: {}}, true);
-
- // Test that the $jsonSchema validator is correctly stored in the collection catalog.
- coll.drop();
- let schema = {properties: {a: {type: 'number'}, b: {minLength: 1}}};
- assert.commandWorked(db.createCollection(coll.getName(), {validator: {$jsonSchema: schema}}));
-
- let listCollectionsOutput = db.runCommand({listCollections: 1, filter: {name: coll.getName()}});
- assert.commandWorked(listCollectionsOutput);
- assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.validator, {$jsonSchema: schema});
-
- // Repeat the test above using the whitelisted metadata keywords.
- coll.drop();
- schema = {title: "Test schema", description: "Metadata keyword test"};
- assert.commandWorked(db.createCollection(coll.getName(), {validator: {$jsonSchema: schema}}));
-
- listCollectionsOutput = db.runCommand({listCollections: 1, filter: {name: coll.getName()}});
- assert.commandWorked(listCollectionsOutput);
- assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.validator, {$jsonSchema: schema});
-
- // Repeat again with a nested schema.
- coll.drop();
- schema = {properties: {a: {title: "Nested title", description: "Nested description"}}};
- assert.commandWorked(db.createCollection(coll.getName(), {validator: {$jsonSchema: schema}}));
-
- listCollectionsOutput = db.runCommand({listCollections: 1, filter: {name: coll.getName()}});
- assert.commandWorked(listCollectionsOutput);
- assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.validator, {$jsonSchema: schema});
-
- // Test that $jsonSchema and various internal match expressions work correctly with sibling
- // predicates.
- coll.drop();
- assert.writeOK(coll.insert({_id: 1, a: 1, b: 1}));
- assert.writeOK(coll.insert({_id: 2, a: 2, b: 2}));
-
- assert.eq(1,
- coll.find({$jsonSchema: {properties: {a: {type: "number"}}, required: ["a"]}, b: 1})
- .itcount());
- assert.eq(1, coll.find({$or: [{$jsonSchema: {}, a: 1}, {b: 1}]}).itcount());
- assert.eq(1, coll.find({$and: [{$jsonSchema: {}, a: 1}, {b: 1}]}).itcount());
-
- assert.eq(1, coll.find({$_internalSchemaMinProperties: 3, b: 2}).itcount());
- assert.eq(1, coll.find({$_internalSchemaMaxProperties: 3, b: 2}).itcount());
- assert.eq(1, coll.find({$alwaysTrue: 1, b: 2}).itcount());
- assert.eq(0, coll.find({$alwaysFalse: 1, b: 2}).itcount());
+ ]
+ },
+ {_id: 1})
+ .sort({_id: 1})
+ .toArray());
+
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, arr: 3}));
+assert.writeOK(coll.insert({_id: 1, arr: [1, "foo"]}));
+assert.writeOK(coll.insert({_id: 2, arr: [{a: 1}, {b: 2}]}));
+assert.writeOK(coll.insert({_id: 3, arr: []}));
+assert.writeOK(coll.insert({_id: 4, arr: {a: []}}));
+
+// Test that the type:"array" restriction works as expected.
+assert.eq([{_id: 1}, {_id: 2}, {_id: 3}],
+ coll.find({$jsonSchema: {properties: {arr: {type: "array"}}}}, {_id: 1})
+ .sort({_id: 1})
+ .toArray());
+
+// Test that type:"number" works correctly in the presence of arrays.
+assert.eq([{_id: 0}],
+ coll.find({$jsonSchema: {properties: {arr: {type: "number"}}}}, {_id: 1})
+ .sort({_id: 1})
+ .toArray());
+
+// Test that the following keywords fail to parse although present in the spec:
+// - default
+// - definitions
+// - format
+// - id
+// - $ref
+// - $schema
+let res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {default: {_id: 0}}}});
+assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+res = coll.runCommand(
+ {find: coll.getName(), query: {$jsonSchema: {definitions: {numberField: {type: "number"}}}}});
+assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {format: "email"}}});
+assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {id: "someschema.json"}}});
+assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+res = coll.runCommand({
+ find: coll.getName(),
+ query: {$jsonSchema: {properties: {a: {$ref: "#/definitions/positiveInt"}}}}
+});
+assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {$schema: "hyper-schema"}}});
+assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+res = coll.runCommand({
+ find: coll.getName(),
+ query: {$jsonSchema: {$schema: "http://json-schema.org/draft-04/schema#"}}
+});
+assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+// Test that the following whitelisted keywords are verified as strings but otherwise ignored
+// in a top-level schema:
+// - description
+// - title
+assertSchemaMatch(coll, {description: "test"}, {}, true);
+assertSchemaMatch(coll, {title: "insert title"}, {}, true);
+
+// Repeat the test above with nested schema.
+assertSchemaMatch(coll, {properties: {a: {description: "test"}}}, {a: {}}, true);
+assertSchemaMatch(coll, {properties: {a: {title: "this is a's title"}}}, {a: {}}, true);
+
+// Test that the $jsonSchema validator is correctly stored in the collection catalog.
+coll.drop();
+let schema = {properties: {a: {type: 'number'}, b: {minLength: 1}}};
+assert.commandWorked(db.createCollection(coll.getName(), {validator: {$jsonSchema: schema}}));
+
+let listCollectionsOutput = db.runCommand({listCollections: 1, filter: {name: coll.getName()}});
+assert.commandWorked(listCollectionsOutput);
+assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.validator, {$jsonSchema: schema});
+
+// Repeat the test above using the whitelisted metadata keywords.
+coll.drop();
+schema = {
+ title: "Test schema",
+ description: "Metadata keyword test"
+};
+assert.commandWorked(db.createCollection(coll.getName(), {validator: {$jsonSchema: schema}}));
+
+listCollectionsOutput = db.runCommand({listCollections: 1, filter: {name: coll.getName()}});
+assert.commandWorked(listCollectionsOutput);
+assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.validator, {$jsonSchema: schema});
+
+// Repeat again with a nested schema.
+coll.drop();
+schema = {
+ properties: {a: {title: "Nested title", description: "Nested description"}}
+};
+assert.commandWorked(db.createCollection(coll.getName(), {validator: {$jsonSchema: schema}}));
+
+listCollectionsOutput = db.runCommand({listCollections: 1, filter: {name: coll.getName()}});
+assert.commandWorked(listCollectionsOutput);
+assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.validator, {$jsonSchema: schema});
+
+// Test that $jsonSchema and various internal match expressions work correctly with sibling
+// predicates.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, a: 1, b: 1}));
+assert.writeOK(coll.insert({_id: 2, a: 2, b: 2}));
+
+assert.eq(
+ 1,
+ coll.find({$jsonSchema: {properties: {a: {type: "number"}}, required: ["a"]}, b: 1}).itcount());
+assert.eq(1, coll.find({$or: [{$jsonSchema: {}, a: 1}, {b: 1}]}).itcount());
+assert.eq(1, coll.find({$and: [{$jsonSchema: {}, a: 1}, {b: 1}]}).itcount());
+
+assert.eq(1, coll.find({$_internalSchemaMinProperties: 3, b: 2}).itcount());
+assert.eq(1, coll.find({$_internalSchemaMaxProperties: 3, b: 2}).itcount());
+assert.eq(1, coll.find({$alwaysTrue: 1, b: 2}).itcount());
+assert.eq(0, coll.find({$alwaysFalse: 1, b: 2}).itcount());
}());
diff --git a/jstests/core/json_schema/logical_keywords.js b/jstests/core/json_schema/logical_keywords.js
index 507123e2c69..3b7895f27cd 100644
--- a/jstests/core/json_schema/logical_keywords.js
+++ b/jstests/core/json_schema/logical_keywords.js
@@ -10,222 +10,268 @@
* - enum
*/
(function() {
- "use strict";
-
- load("jstests/libs/assert_schema_match.js");
-
- const coll = db.jstests_json_schema_logical;
-
- // Test that $jsonSchema fails to parse if the values for the allOf, anyOf, and oneOf
- // keywords are not arrays of valid schema.
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {foo: {allOf: {maximum: "0"}}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {foo: {allOf: [0]}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {foo: {allOf: [{invalid: "0"}]}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {foo: {anyOf: {maximum: "0"}}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {foo: {anyOf: [0]}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {foo: {anyOf: [{invalid: "0"}]}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {foo: {oneOf: {maximum: "0"}}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {foo: {oneOf: [0]}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {foo: {oneOf: [{invalid: "0"}]}}}}).itcount();
- });
-
- // Test that $jsonSchema fails to parse if the value for the 'not' keyword is not a
- // valid schema object.
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {foo: {not: {maximum: "0"}}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {foo: {not: [0]}}}}).itcount();
- });
- assert.throws(function() {
- coll.find({$jsonSchema: {properties: {foo: {not: [{}]}}}}).itcount();
- });
-
- // Test that the 'allOf' keyword correctly returns documents that match every schema in
- // the array.
- let schema = {properties: {foo: {allOf: [{minimum: 1}]}}};
- assertSchemaMatch(coll, schema, {foo: 1}, true);
- assertSchemaMatch(coll, schema, {foo: 0}, false);
- assertSchemaMatch(coll, schema, {foo: "string"}, true);
-
- schema = {properties: {foo: {allOf: [{}]}}};
- assertSchemaMatch(coll, schema, {foo: {}}, true);
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {foo: 0}, true);
-
- schema = {properties: {foo: {allOf: [{type: 'number'}, {minimum: 0}]}}};
- assertSchemaMatch(coll, schema, {foo: 0}, true);
- assertSchemaMatch(coll, schema, {foo: "string"}, false);
- assertSchemaMatch(coll, schema, {foo: [0]}, false);
-
- // Test that a top-level 'allOf' keyword matches the correct documents.
- assertSchemaMatch(coll, {allOf: [{}]}, {}, true);
- assertSchemaMatch(coll, {allOf: [{}]}, {foo: 0}, true);
- assertSchemaMatch(coll, {allOf: [{type: 'string'}]}, {}, false);
- assertSchemaMatch(coll, {allOf: [{properties: {foo: {type: 'string'}}}]}, {foo: "str"}, true);
- assertSchemaMatch(coll, {allOf: [{properties: {foo: {type: 'string'}}}]}, {foo: 1}, false);
-
- // Test that 'allOf' in conjunction with another keyword matches the correct documents.
- assertSchemaMatch(
- coll, {properties: {foo: {type: "number", allOf: [{minimum: 1}]}}}, {foo: 1}, true);
- assertSchemaMatch(
- coll, {properties: {foo: {type: "number", allOf: [{minimum: 1}]}}}, {foo: "str"}, false);
-
- // Test that the 'anyOf' keyword correctly returns documents that match at least one schema
- // in the array.
- schema = {properties: {foo: {anyOf: [{type: 'string'}, {type: 'number', minimum: 1}]}}};
- assertSchemaMatch(coll, schema, {foo: "str"}, true);
- assertSchemaMatch(coll, schema, {foo: 1}, true);
- assertSchemaMatch(coll, schema, {foo: 0}, false);
-
- schema = {properties: {foo: {anyOf: [{type: 'string'}, {type: 'object'}]}}};
- assertSchemaMatch(coll, schema, {foo: {}}, true);
- assertSchemaMatch(coll, schema, {foo: "str"}, true);
- assertSchemaMatch(coll, schema, {foo: [{}]}, false);
-
- schema = {properties: {foo: {anyOf: [{}]}}};
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {foo: {}}, true);
- assertSchemaMatch(coll, schema, {foo: 0}, true);
-
- // Test that a top-level 'anyOf' keyword matches the correct documents.
- schema = {anyOf: [{}]};
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {foo: 1}, true);
-
- schema = {anyOf: [{properties: {foo: {type: 'string'}}}]};
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {foo: "str"}, true);
- assertSchemaMatch(coll, schema, {foo: 1}, false);
-
- // Test that 'anyOf' in conjunction with another keyword matches the correct documents.
- schema = {properties: {foo: {type: "number", anyOf: [{minimum: 1}]}}};
- assertSchemaMatch(coll, schema, {foo: 1}, true);
- assertSchemaMatch(coll, schema, {foo: "str"}, false);
-
- // Test that the 'oneOf' keyword correctly returns documents that match exactly one schema
- // in the array.
- schema = {properties: {foo: {oneOf: [{minimum: 0}, {maximum: 3}]}}};
- assertSchemaMatch(coll, schema, {foo: 4}, true);
- assertSchemaMatch(coll, schema, {foo: 1}, false);
- assertSchemaMatch(coll, schema, {foo: "str"}, false);
-
- schema = {properties: {foo: {oneOf: [{type: 'string'}, {pattern: "ing"}]}}};
- assertSchemaMatch(coll, schema, {foo: "str"}, true);
- assertSchemaMatch(coll, schema, {foo: "string"}, false);
-
- schema = {properties: {foo: {oneOf: [{}]}}};
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {foo: 1}, true);
-
- // Test that a top-level 'oneOf' keyword matches the correct documents.
- schema = {oneOf: [{}]};
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {foo: 1}, true);
-
- schema = {oneOf: [{properties: {foo: {type: 'string'}}}]};
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {foo: "str"}, true);
- assertSchemaMatch(coll, schema, {foo: 1}, false);
-
- assertSchemaMatch(coll, {oneOf: [{}, {}]}, {}, false);
-
- // Test that 'oneOf' in conjunction with another keyword matches the correct documents.
- schema = {properties: {foo: {type: "number", oneOf: [{minimum: 4}]}}};
- assertSchemaMatch(coll, schema, {foo: 4}, true);
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {foo: "str"}, false);
-
- // Test that the 'not' keyword correctly returns documents that do not match any schema
- // in the array.
- schema = {properties: {foo: {not: {type: 'number'}}}};
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {foo: "str"}, true);
- assertSchemaMatch(coll, schema, {foo: 1}, false);
-
- schema = {properties: {foo: {not: {}}}};
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {foo: 1}, false);
-
- // Test that a top-level 'not' keyword matches the correct documents.
- assertSchemaMatch(coll, {not: {}}, {}, false);
-
- schema = {not: {properties: {foo: {type: 'string'}}}};
- assertSchemaMatch(coll, schema, {foo: 1}, true);
- assertSchemaMatch(coll, schema, {foo: "str"}, false);
- assertSchemaMatch(coll, schema, {}, false);
-
- // Test that 'not' in conjunction with another keyword matches the correct documents.
- schema = {properties: {foo: {type: "string", not: {maxLength: 4}}}};
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {foo: "string"}, true);
- assertSchemaMatch(coll, schema, {foo: "str"}, false);
- assertSchemaMatch(coll, schema, {foo: 1}, false);
-
- // Test that the 'enum' keyword correctly matches scalar values.
- schema = {properties: {a: {enum: ["str", 5]}}};
- assertSchemaMatch(coll, schema, {a: "str"}, true);
- assertSchemaMatch(coll, schema, {a: 5}, true);
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {a: ["str"]}, false);
-
- // Test that the 'enum' keyword with a null value correctly matches literal null elements, but
- // not 'missing' or 'undefined.
- schema = {properties: {a: {enum: [null]}}};
- assertSchemaMatch(coll, schema, {a: null}, true);
- assertSchemaMatch(coll, schema, {a: undefined}, false);
- assertSchemaMatch(coll, schema, {a: 1}, false);
- assertSchemaMatch(coll, {properties: {a: {enum: [null]}}, required: ['a']}, {}, false);
-
- // Test that the 'enum' keyword correctly matches array values.
- schema = {properties: {a: {enum: [[1, 2, "3"]]}}};
- assertSchemaMatch(coll, schema, {a: [1, 2, "3"]}, true);
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {a: [2, "3", 1]}, false);
-
- schema = {properties: {a: {enum: [[]]}}};
- assertSchemaMatch(coll, schema, {a: []}, true);
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {a: [1]}, false);
-
- // Test that the 'enum' keyword does not traverse arrays when matching.
- schema = {properties: {a: {enum: ["str", 1]}}};
- assertSchemaMatch(coll, schema, {a: ["str"]}, false);
- assertSchemaMatch(coll, schema, {a: [1]}, false);
-
- // Test that the 'enum' keyword matches objects regardless of the field ordering.
- schema = {properties: {a: {enum: [{name: "tiny", size: "large"}]}}};
- assertSchemaMatch(coll, schema, {a: {name: "tiny", size: "large"}}, true);
- assertSchemaMatch(coll, schema, {a: {size: "large", name: "tiny"}}, true);
-
- // Test that the 'enum' keyword does not match documents with additional fields.
- assertSchemaMatch(coll,
- {properties: {a: {enum: [{name: "tiny"}]}}},
- {a: {size: "large", name: "tiny"}},
- false);
-
- // Test that a top-level 'enum' matches the correct documents.
- assertSchemaMatch(coll, {enum: [{_id: 0}]}, {_id: 0}, true);
- assertSchemaMatch(coll, {enum: [{_id: 0, a: "str"}]}, {_id: 0, a: "str"}, true);
- assertSchemaMatch(coll, {enum: [{}]}, {}, false);
- assertSchemaMatch(coll, {enum: [null]}, {}, false);
- assertSchemaMatch(coll, {enum: [{_id: 0, a: "str"}]}, {_id: 0, a: "str", b: 1}, false);
- assertSchemaMatch(coll, {enum: [1, 2]}, {}, false);
+"use strict";
+
+load("jstests/libs/assert_schema_match.js");
+
+const coll = db.jstests_json_schema_logical;
+
+// Test that $jsonSchema fails to parse if the values for the allOf, anyOf, and oneOf
+// keywords are not arrays of valid schema.
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {foo: {allOf: {maximum: "0"}}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {foo: {allOf: [0]}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {foo: {allOf: [{invalid: "0"}]}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {foo: {anyOf: {maximum: "0"}}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {foo: {anyOf: [0]}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {foo: {anyOf: [{invalid: "0"}]}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {foo: {oneOf: {maximum: "0"}}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {foo: {oneOf: [0]}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {foo: {oneOf: [{invalid: "0"}]}}}}).itcount();
+});
+
+// Test that $jsonSchema fails to parse if the value for the 'not' keyword is not a
+// valid schema object.
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {foo: {not: {maximum: "0"}}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {foo: {not: [0]}}}}).itcount();
+});
+assert.throws(function() {
+ coll.find({$jsonSchema: {properties: {foo: {not: [{}]}}}}).itcount();
+});
+
+// Test that the 'allOf' keyword correctly returns documents that match every schema in
+// the array.
+let schema = {properties: {foo: {allOf: [{minimum: 1}]}}};
+assertSchemaMatch(coll, schema, {foo: 1}, true);
+assertSchemaMatch(coll, schema, {foo: 0}, false);
+assertSchemaMatch(coll, schema, {foo: "string"}, true);
+
+schema = {
+ properties: {foo: {allOf: [{}]}}
+};
+assertSchemaMatch(coll, schema, {foo: {}}, true);
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {foo: 0}, true);
+
+schema = {
+ properties: {foo: {allOf: [{type: 'number'}, {minimum: 0}]}}
+};
+assertSchemaMatch(coll, schema, {foo: 0}, true);
+assertSchemaMatch(coll, schema, {foo: "string"}, false);
+assertSchemaMatch(coll, schema, {foo: [0]}, false);
+
+// Test that a top-level 'allOf' keyword matches the correct documents.
+assertSchemaMatch(coll, {allOf: [{}]}, {}, true);
+assertSchemaMatch(coll, {allOf: [{}]}, {foo: 0}, true);
+assertSchemaMatch(coll, {allOf: [{type: 'string'}]}, {}, false);
+assertSchemaMatch(coll, {allOf: [{properties: {foo: {type: 'string'}}}]}, {foo: "str"}, true);
+assertSchemaMatch(coll, {allOf: [{properties: {foo: {type: 'string'}}}]}, {foo: 1}, false);
+
+// Test that 'allOf' in conjunction with another keyword matches the correct documents.
+assertSchemaMatch(
+ coll, {properties: {foo: {type: "number", allOf: [{minimum: 1}]}}}, {foo: 1}, true);
+assertSchemaMatch(
+ coll, {properties: {foo: {type: "number", allOf: [{minimum: 1}]}}}, {foo: "str"}, false);
+
+// Test that the 'anyOf' keyword correctly returns documents that match at least one schema
+// in the array.
+schema = {
+ properties: {foo: {anyOf: [{type: 'string'}, {type: 'number', minimum: 1}]}}
+};
+assertSchemaMatch(coll, schema, {foo: "str"}, true);
+assertSchemaMatch(coll, schema, {foo: 1}, true);
+assertSchemaMatch(coll, schema, {foo: 0}, false);
+
+schema = {
+ properties: {foo: {anyOf: [{type: 'string'}, {type: 'object'}]}}
+};
+assertSchemaMatch(coll, schema, {foo: {}}, true);
+assertSchemaMatch(coll, schema, {foo: "str"}, true);
+assertSchemaMatch(coll, schema, {foo: [{}]}, false);
+
+schema = {
+ properties: {foo: {anyOf: [{}]}}
+};
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {foo: {}}, true);
+assertSchemaMatch(coll, schema, {foo: 0}, true);
+
+// Test that a top-level 'anyOf' keyword matches the correct documents.
+schema = {
+ anyOf: [{}]
+};
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {foo: 1}, true);
+
+schema = {
+ anyOf: [{properties: {foo: {type: 'string'}}}]
+};
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {foo: "str"}, true);
+assertSchemaMatch(coll, schema, {foo: 1}, false);
+
+// Test that 'anyOf' in conjunction with another keyword matches the correct documents.
+schema = {
+ properties: {foo: {type: "number", anyOf: [{minimum: 1}]}}
+};
+assertSchemaMatch(coll, schema, {foo: 1}, true);
+assertSchemaMatch(coll, schema, {foo: "str"}, false);
+
+// Test that the 'oneOf' keyword correctly returns documents that match exactly one schema
+// in the array.
+schema = {
+ properties: {foo: {oneOf: [{minimum: 0}, {maximum: 3}]}}
+};
+assertSchemaMatch(coll, schema, {foo: 4}, true);
+assertSchemaMatch(coll, schema, {foo: 1}, false);
+assertSchemaMatch(coll, schema, {foo: "str"}, false);
+
+schema = {
+ properties: {foo: {oneOf: [{type: 'string'}, {pattern: "ing"}]}}
+};
+assertSchemaMatch(coll, schema, {foo: "str"}, true);
+assertSchemaMatch(coll, schema, {foo: "string"}, false);
+
+schema = {
+ properties: {foo: {oneOf: [{}]}}
+};
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {foo: 1}, true);
+
+// Test that a top-level 'oneOf' keyword matches the correct documents.
+schema = {
+ oneOf: [{}]
+};
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {foo: 1}, true);
+
+schema = {
+ oneOf: [{properties: {foo: {type: 'string'}}}]
+};
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {foo: "str"}, true);
+assertSchemaMatch(coll, schema, {foo: 1}, false);
+
+assertSchemaMatch(coll, {oneOf: [{}, {}]}, {}, false);
+
+// Test that 'oneOf' in conjunction with another keyword matches the correct documents.
+schema = {
+ properties: {foo: {type: "number", oneOf: [{minimum: 4}]}}
+};
+assertSchemaMatch(coll, schema, {foo: 4}, true);
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {foo: "str"}, false);
+
+// Test that the 'not' keyword correctly returns documents that do not match any schema
+// in the array.
+schema = {
+ properties: {foo: {not: {type: 'number'}}}
+};
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {foo: "str"}, true);
+assertSchemaMatch(coll, schema, {foo: 1}, false);
+
+schema = {
+ properties: {foo: {not: {}}}
+};
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {foo: 1}, false);
+
+// Test that a top-level 'not' keyword matches the correct documents.
+assertSchemaMatch(coll, {not: {}}, {}, false);
+
+schema = {
+ not: {properties: {foo: {type: 'string'}}}
+};
+assertSchemaMatch(coll, schema, {foo: 1}, true);
+assertSchemaMatch(coll, schema, {foo: "str"}, false);
+assertSchemaMatch(coll, schema, {}, false);
+
+// Test that 'not' in conjunction with another keyword matches the correct documents.
+schema = {
+ properties: {foo: {type: "string", not: {maxLength: 4}}}
+};
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {foo: "string"}, true);
+assertSchemaMatch(coll, schema, {foo: "str"}, false);
+assertSchemaMatch(coll, schema, {foo: 1}, false);
+
+// Test that the 'enum' keyword correctly matches scalar values.
+schema = {
+ properties: {a: {enum: ["str", 5]}}
+};
+assertSchemaMatch(coll, schema, {a: "str"}, true);
+assertSchemaMatch(coll, schema, {a: 5}, true);
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {a: ["str"]}, false);
+
+// Test that the 'enum' keyword with a null value correctly matches literal null elements, but
+// not 'missing' or 'undefined.
+schema = {
+ properties: {a: {enum: [null]}}
+};
+assertSchemaMatch(coll, schema, {a: null}, true);
+assertSchemaMatch(coll, schema, {a: undefined}, false);
+assertSchemaMatch(coll, schema, {a: 1}, false);
+assertSchemaMatch(coll, {properties: {a: {enum: [null]}}, required: ['a']}, {}, false);
+
+// Test that the 'enum' keyword correctly matches array values.
+schema = {
+ properties: {a: {enum: [[1, 2, "3"]]}}
+};
+assertSchemaMatch(coll, schema, {a: [1, 2, "3"]}, true);
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {a: [2, "3", 1]}, false);
+
+schema = {
+ properties: {a: {enum: [[]]}}
+};
+assertSchemaMatch(coll, schema, {a: []}, true);
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {a: [1]}, false);
+
+// Test that the 'enum' keyword does not traverse arrays when matching.
+schema = {
+ properties: {a: {enum: ["str", 1]}}
+};
+assertSchemaMatch(coll, schema, {a: ["str"]}, false);
+assertSchemaMatch(coll, schema, {a: [1]}, false);
+
+// Test that the 'enum' keyword matches objects regardless of the field ordering.
+schema = {
+ properties: {a: {enum: [{name: "tiny", size: "large"}]}}
+};
+assertSchemaMatch(coll, schema, {a: {name: "tiny", size: "large"}}, true);
+assertSchemaMatch(coll, schema, {a: {size: "large", name: "tiny"}}, true);
+
+// Test that the 'enum' keyword does not match documents with additional fields.
+assertSchemaMatch(
+ coll, {properties: {a: {enum: [{name: "tiny"}]}}}, {a: {size: "large", name: "tiny"}}, false);
+
+// Test that a top-level 'enum' matches the correct documents.
+assertSchemaMatch(coll, {enum: [{_id: 0}]}, {_id: 0}, true);
+assertSchemaMatch(coll, {enum: [{_id: 0, a: "str"}]}, {_id: 0, a: "str"}, true);
+assertSchemaMatch(coll, {enum: [{}]}, {}, false);
+assertSchemaMatch(coll, {enum: [null]}, {}, false);
+assertSchemaMatch(coll, {enum: [{_id: 0, a: "str"}]}, {_id: 0, a: "str", b: 1}, false);
+assertSchemaMatch(coll, {enum: [1, 2]}, {}, false);
}());
diff --git a/jstests/core/json_schema/min_max_items.js b/jstests/core/json_schema/min_max_items.js
index 1dff469747d..3c27283a037 100644
--- a/jstests/core/json_schema/min_max_items.js
+++ b/jstests/core/json_schema/min_max_items.js
@@ -4,44 +4,44 @@
* Tests the JSON Schema keywords "minItems" and "maxItems".
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/assert_schema_match.js");
+load("jstests/libs/assert_schema_match.js");
- const coll = db.getCollection("json_schema_min_max_items");
- coll.drop();
+const coll = db.getCollection("json_schema_min_max_items");
+coll.drop();
- // Test that the JSON Schema fails to parse if "minItems" is not a valid number.
- assert.throws(() => coll.find({$jsonSchema: {minItems: "blah"}}).itcount());
- assert.throws(() => coll.find({$jsonSchema: {minItems: -1}}).itcount());
- assert.throws(() => coll.find({$jsonSchema: {minItems: 12.5}}).itcount());
+// Test that the JSON Schema fails to parse if "minItems" is not a valid number.
+assert.throws(() => coll.find({$jsonSchema: {minItems: "blah"}}).itcount());
+assert.throws(() => coll.find({$jsonSchema: {minItems: -1}}).itcount());
+assert.throws(() => coll.find({$jsonSchema: {minItems: 12.5}}).itcount());
- // Test that "minItems" matches when the field is missing or not an array.
- assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {}, true);
- assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: "foo"}, true);
+// Test that "minItems" matches when the field is missing or not an array.
+assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {}, true);
+assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: "foo"}, true);
- // Test that "minItems" matches arrays with the requisite number of items.
- assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: []}, false);
- assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: ["x"]}, true);
- assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: [0, 1]}, true);
+// Test that "minItems" matches arrays with the requisite number of items.
+assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: []}, false);
+assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: ["x"]}, true);
+assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: [0, 1]}, true);
- // Test that "minItems" has no effect when specified at the top level.
- assertSchemaMatch(coll, {minItems: 2}, {}, true);
+// Test that "minItems" has no effect when specified at the top level.
+assertSchemaMatch(coll, {minItems: 2}, {}, true);
- // Test that the JSON Schema fails to parse if "maxItems" is not a valid number.
- assert.throws(() => coll.find({$jsonSchema: {maxItems: "blah"}}).itcount());
- assert.throws(() => coll.find({$jsonSchema: {maxItems: -1}}).itcount());
- assert.throws(() => coll.find({$jsonSchema: {maxItems: 12.5}}).itcount());
+// Test that the JSON Schema fails to parse if "maxItems" is not a valid number.
+assert.throws(() => coll.find({$jsonSchema: {maxItems: "blah"}}).itcount());
+assert.throws(() => coll.find({$jsonSchema: {maxItems: -1}}).itcount());
+assert.throws(() => coll.find({$jsonSchema: {maxItems: 12.5}}).itcount());
- // Test that "maxItems" matches when the field is missing or not an array.
- assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {}, true);
- assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: "foo"}, true);
+// Test that "maxItems" matches when the field is missing or not an array.
+assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {}, true);
+assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: "foo"}, true);
- // Test that "maxItems" matches arrays with the requisite number of items.
- assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: []}, true);
- assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: ["x"]}, true);
- assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: [0, 1]}, false);
+// Test that "maxItems" matches arrays with the requisite number of items.
+assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: []}, true);
+assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: ["x"]}, true);
+assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: [0, 1]}, false);
- // Test that "maxItems" has no effect when specified at the top level.
- assertSchemaMatch(coll, {maxItems: 2}, {}, true);
+// Test that "maxItems" has no effect when specified at the top level.
+assertSchemaMatch(coll, {maxItems: 2}, {}, true);
}());
diff --git a/jstests/core/json_schema/min_max_properties.js b/jstests/core/json_schema/min_max_properties.js
index fbfffceb96c..975a22fd527 100644
--- a/jstests/core/json_schema/min_max_properties.js
+++ b/jstests/core/json_schema/min_max_properties.js
@@ -4,46 +4,46 @@
* Tests for the JSON Schema 'minProperties' and 'maxProperties' keywords.
*/
(function() {
- "use strict";
-
- load("jstests/libs/assert_schema_match.js");
-
- const coll = db.jstests_schema_min_max_properties;
-
- // Test that {minProperties: 0} matches any object.
- assertSchemaMatch(coll, {minProperties: 0}, {}, true);
- assertSchemaMatch(coll, {minProperties: 0}, {a: 1}, true);
- assertSchemaMatch(coll, {minProperties: 0}, {a: 1, b: 2}, true);
-
- // Test that {maxProperties: 0} matches nothing, since objects always must have the "_id" field
- // when inserted into a collection.
- assertSchemaMatch(coll, {maxProperties: 0}, {}, false);
- assertSchemaMatch(coll, {maxProperties: 0}, {a: 1}, false);
- assertSchemaMatch(coll, {maxProperties: 0}, {a: 1, b: 2}, false);
-
- // Test top-level minProperties greater than 0.
- assertSchemaMatch(coll, {minProperties: 2}, {_id: 0}, false);
- assertSchemaMatch(coll, {minProperties: 2}, {_id: 0, a: 1}, true);
- assertSchemaMatch(coll, {minProperties: 2}, {_id: 0, a: 1, b: 2}, true);
-
- // Test top-level maxProperties greater than 0.
- assertSchemaMatch(coll, {maxProperties: 2}, {_id: 0}, true);
- assertSchemaMatch(coll, {maxProperties: 2}, {_id: 0, a: 1}, true);
- assertSchemaMatch(coll, {maxProperties: 2}, {_id: 0, a: 1, b: 2}, false);
-
- // Test nested maxProperties greater than 0.
- assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: 1}, true);
- assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: {}}, true);
- assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: {b: 1}}, true);
- assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: {b: 1, c: 1}}, false);
-
- // Test nested maxProperties of 0.
- assertSchemaMatch(coll, {properties: {a: {maxProperties: 0}}}, {a: {}}, true);
- assertSchemaMatch(coll, {properties: {a: {maxProperties: 0}}}, {a: {b: 1}}, false);
-
- // Test nested minProperties greater than 0.
- assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: 1}, true);
- assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: {}}, false);
- assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: {b: 1}}, true);
- assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: {b: 1, c: 1}}, true);
+"use strict";
+
+load("jstests/libs/assert_schema_match.js");
+
+const coll = db.jstests_schema_min_max_properties;
+
+// Test that {minProperties: 0} matches any object.
+assertSchemaMatch(coll, {minProperties: 0}, {}, true);
+assertSchemaMatch(coll, {minProperties: 0}, {a: 1}, true);
+assertSchemaMatch(coll, {minProperties: 0}, {a: 1, b: 2}, true);
+
+// Test that {maxProperties: 0} matches nothing, since objects always must have the "_id" field
+// when inserted into a collection.
+assertSchemaMatch(coll, {maxProperties: 0}, {}, false);
+assertSchemaMatch(coll, {maxProperties: 0}, {a: 1}, false);
+assertSchemaMatch(coll, {maxProperties: 0}, {a: 1, b: 2}, false);
+
+// Test top-level minProperties greater than 0.
+assertSchemaMatch(coll, {minProperties: 2}, {_id: 0}, false);
+assertSchemaMatch(coll, {minProperties: 2}, {_id: 0, a: 1}, true);
+assertSchemaMatch(coll, {minProperties: 2}, {_id: 0, a: 1, b: 2}, true);
+
+// Test top-level maxProperties greater than 0.
+assertSchemaMatch(coll, {maxProperties: 2}, {_id: 0}, true);
+assertSchemaMatch(coll, {maxProperties: 2}, {_id: 0, a: 1}, true);
+assertSchemaMatch(coll, {maxProperties: 2}, {_id: 0, a: 1, b: 2}, false);
+
+// Test nested maxProperties greater than 0.
+assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: 1}, true);
+assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: {}}, true);
+assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: {b: 1}}, true);
+assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: {b: 1, c: 1}}, false);
+
+// Test nested maxProperties of 0.
+assertSchemaMatch(coll, {properties: {a: {maxProperties: 0}}}, {a: {}}, true);
+assertSchemaMatch(coll, {properties: {a: {maxProperties: 0}}}, {a: {b: 1}}, false);
+
+// Test nested minProperties greater than 0.
+assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: 1}, true);
+assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: {}}, false);
+assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: {b: 1}}, true);
+assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: {b: 1, c: 1}}, true);
}());
diff --git a/jstests/core/json_schema/misc_validation.js b/jstests/core/json_schema/misc_validation.js
index 5a126993902..7ac2fb60fb5 100644
--- a/jstests/core/json_schema/misc_validation.js
+++ b/jstests/core/json_schema/misc_validation.js
@@ -21,174 +21,177 @@
* ]
*/
(function() {
- "use strict";
-
- // For isWiredTiger.
- load("jstests/concurrency/fsm_workload_helpers/server_types.js");
- // For isReplSet
- load("jstests/libs/fixture_helpers.js");
- // For arrayEq.
- load("jstests/aggregation/extras/utils.js");
-
- const testName = "json_schema_misc_validation";
- const testDB = db.getSiblingDB(testName);
- assert.commandWorked(testDB.dropDatabase());
- assert.commandWorked(testDB.createCollection(testName));
- const coll = testDB.getCollection(testName);
- coll.drop();
-
- const isMongos = (testDB.runCommand("ismaster").msg === "isdbgrid");
-
- // Test that $jsonSchema is rejected in an $elemMatch projection.
- assert.throws(function() {
- coll.find({}, {a: {$elemMatch: {$jsonSchema: {}}}}).itcount();
- });
-
- // Test that an invalid $jsonSchema fails to parse in a count command.
- const invalidSchema = {invalid: {}};
- assert.throws(function() {
- coll.count({$jsonSchema: invalidSchema});
- });
-
- // Test that an invalid $jsonSchema fails to parse in a $geoNear query.
- assert.commandWorked(coll.createIndex({geo: "2dsphere"}));
- let res = testDB.runCommand({
- aggregate: coll.getName(),
- cursor: {},
- pipeline: [{
- $geoNear: {
- near: [30, 40],
- distanceField: "dis",
- query: {$jsonSchema: invalidSchema},
- }
- }],
- });
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
- assert.neq(-1,
- res.errmsg.indexOf("Unknown $jsonSchema keyword"),
- `$geoNear failed for a reason other than invalid query: ${tojson(res)}`);
-
- // Test that an invalid $jsonSchema fails to parse in a distinct command.
- assert.throws(function() {
- coll.distinct("a", {$jsonSchema: invalidSchema});
- });
-
- // Test that an invalid $jsonSchema fails to parse in a $match stage within a view.
- res = testDB.createView("invalid", coll.getName(), [{$match: {$jsonSchema: invalidSchema}}]);
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- // Test that an invalid $jsonSchema fails to parse in a listCollections command.
- res = testDB.runCommand({listCollections: 1, filter: {$jsonSchema: invalidSchema}});
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- // Test that a valid $jsonSchema is legal in a count command.
- coll.drop();
- assert.writeOK(coll.insert({a: 1, b: "str"}));
- assert.writeOK(coll.insert({a: 1, b: 1}));
- assert.eq(1,
- coll.count({$jsonSchema: {properties: {a: {type: "number"}, b: {type: "string"}}}}));
-
- // Test that a valid $jsonSchema is legal in a $geoNear stage.
- const point = {type: "Point", coordinates: [31.0, 41.0]};
- assert.writeOK(coll.insert({geo: point, a: 1}));
- assert.writeOK(coll.insert({geo: point, a: 0}));
- assert.commandWorked(coll.createIndex({geo: "2dsphere"}));
- res = coll.aggregate({
- $geoNear: {
- near: [30, 40],
- spherical: true,
- query: {$jsonSchema: {properties: {a: {minimum: 1}}}},
- distanceField: "dis",
- includeLocs: "loc",
- }
- })
- .toArray();
- assert.eq(1, res.length, tojson(res));
- assert.eq(res[0].loc, point, tojson(res));
-
- // Test that a valid $jsonSchema is legal in a distinct command.
- coll.drop();
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({a: 2}));
- assert.writeOK(coll.insert({a: "str"}));
- assert.writeOK(coll.insert({a: ["STR", "str"]}));
-
- assert(arrayEq([1, 2], coll.distinct("a", {$jsonSchema: {properties: {a: {type: "number"}}}})));
+"use strict";
+
+// For isWiredTiger.
+load("jstests/concurrency/fsm_workload_helpers/server_types.js");
+// For isReplSet
+load("jstests/libs/fixture_helpers.js");
+// For arrayEq.
+load("jstests/aggregation/extras/utils.js");
+
+const testName = "json_schema_misc_validation";
+const testDB = db.getSiblingDB(testName);
+assert.commandWorked(testDB.dropDatabase());
+assert.commandWorked(testDB.createCollection(testName));
+const coll = testDB.getCollection(testName);
+coll.drop();
+
+const isMongos = (testDB.runCommand("ismaster").msg === "isdbgrid");
+
+// Test that $jsonSchema is rejected in an $elemMatch projection.
+assert.throws(function() {
+ coll.find({}, {a: {$elemMatch: {$jsonSchema: {}}}}).itcount();
+});
+
+// Test that an invalid $jsonSchema fails to parse in a count command.
+const invalidSchema = {
+ invalid: {}
+};
+assert.throws(function() {
+ coll.count({$jsonSchema: invalidSchema});
+});
+
+// Test that an invalid $jsonSchema fails to parse in a $geoNear query.
+assert.commandWorked(coll.createIndex({geo: "2dsphere"}));
+let res = testDB.runCommand({
+ aggregate: coll.getName(),
+ cursor: {},
+ pipeline: [{
+ $geoNear: {
+ near: [30, 40],
+ distanceField: "dis",
+ query: {$jsonSchema: invalidSchema},
+ }
+ }],
+});
+assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+assert.neq(-1,
+ res.errmsg.indexOf("Unknown $jsonSchema keyword"),
+ `$geoNear failed for a reason other than invalid query: ${tojson(res)}`);
+
+// Test that an invalid $jsonSchema fails to parse in a distinct command.
+assert.throws(function() {
+ coll.distinct("a", {$jsonSchema: invalidSchema});
+});
+
+// Test that an invalid $jsonSchema fails to parse in a $match stage within a view.
+res = testDB.createView("invalid", coll.getName(), [{$match: {$jsonSchema: invalidSchema}}]);
+assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+// Test that an invalid $jsonSchema fails to parse in a listCollections command.
+res = testDB.runCommand({listCollections: 1, filter: {$jsonSchema: invalidSchema}});
+assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+// Test that a valid $jsonSchema is legal in a count command.
+coll.drop();
+assert.writeOK(coll.insert({a: 1, b: "str"}));
+assert.writeOK(coll.insert({a: 1, b: 1}));
+assert.eq(1, coll.count({$jsonSchema: {properties: {a: {type: "number"}, b: {type: "string"}}}}));
+
+// Test that a valid $jsonSchema is legal in a $geoNear stage.
+const point = {
+ type: "Point",
+ coordinates: [31.0, 41.0]
+};
+assert.writeOK(coll.insert({geo: point, a: 1}));
+assert.writeOK(coll.insert({geo: point, a: 0}));
+assert.commandWorked(coll.createIndex({geo: "2dsphere"}));
+res = coll.aggregate({
+ $geoNear: {
+ near: [30, 40],
+ spherical: true,
+ query: {$jsonSchema: {properties: {a: {minimum: 1}}}},
+ distanceField: "dis",
+ includeLocs: "loc",
+ }
+ })
+ .toArray();
+assert.eq(1, res.length, tojson(res));
+assert.eq(res[0].loc, point, tojson(res));
+
+// Test that a valid $jsonSchema is legal in a distinct command.
+coll.drop();
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 2}));
+assert.writeOK(coll.insert({a: "str"}));
+assert.writeOK(coll.insert({a: ["STR", "str"]}));
+
+assert(arrayEq([1, 2], coll.distinct("a", {$jsonSchema: {properties: {a: {type: "number"}}}})));
+
+// Test that $jsonSchema in a query does not respect the collection-default collation.
+let schema = {properties: {a: {enum: ["STR"]}}};
+const caseInsensitiveCollation = {
+ locale: "en_US",
+ strength: 1
+};
+coll.drop();
+assert.commandWorked(
+ testDB.createCollection(coll.getName(), {collation: caseInsensitiveCollation}));
+assert.writeOK(coll.insert({a: "str"}));
+assert.writeOK(coll.insert({a: ["STR", "sTr"]}));
+assert.eq(0, coll.find({$jsonSchema: schema}).itcount());
+assert.eq(2, coll.find({$jsonSchema: {properties: {a: {uniqueItems: true}}}}).itcount());
+assert.eq(2, coll.find({a: "STR"}).itcount());
+
+// Test that $jsonSchema does not respect the collation set explicitly on a query.
+coll.drop();
+assert.writeOK(coll.insert({a: "str"}));
+assert.writeOK(coll.insert({a: ["STR", "sTr"]}));
+
+if (testDB.getMongo().useReadCommands()) {
+ assert.eq(0, coll.find({$jsonSchema: schema}).collation(caseInsensitiveCollation).itcount());
+ assert.eq(2,
+ coll.find({$jsonSchema: {properties: {a: {uniqueItems: true}}}})
+ .collation(caseInsensitiveCollation)
+ .itcount());
+ assert.eq(2, coll.find({a: "STR"}).collation(caseInsensitiveCollation).itcount());
- // Test that $jsonSchema in a query does not respect the collection-default collation.
- let schema = {properties: {a: {enum: ["STR"]}}};
- const caseInsensitiveCollation = {locale: "en_US", strength: 1};
+ // Test that $jsonSchema can be used in a $match stage within a view.
coll.drop();
- assert.commandWorked(
- testDB.createCollection(coll.getName(), {collation: caseInsensitiveCollation}));
- assert.writeOK(coll.insert({a: "str"}));
- assert.writeOK(coll.insert({a: ["STR", "sTr"]}));
- assert.eq(0, coll.find({$jsonSchema: schema}).itcount());
- assert.eq(2, coll.find({$jsonSchema: {properties: {a: {uniqueItems: true}}}}).itcount());
- assert.eq(2, coll.find({a: "STR"}).itcount());
-
- // Test that $jsonSchema does not respect the collation set explicitly on a query.
- coll.drop();
- assert.writeOK(coll.insert({a: "str"}));
- assert.writeOK(coll.insert({a: ["STR", "sTr"]}));
-
- if (testDB.getMongo().useReadCommands()) {
- assert.eq(0,
- coll.find({$jsonSchema: schema}).collation(caseInsensitiveCollation).itcount());
- assert.eq(2,
- coll.find({$jsonSchema: {properties: {a: {uniqueItems: true}}}})
- .collation(caseInsensitiveCollation)
- .itcount());
- assert.eq(2, coll.find({a: "STR"}).collation(caseInsensitiveCollation).itcount());
-
- // Test that $jsonSchema can be used in a $match stage within a view.
- coll.drop();
- let bulk = coll.initializeUnorderedBulkOp();
- bulk.insert({name: "Peter", age: 65});
- bulk.insert({name: "Paul", age: 105});
- bulk.insert({name: "Mary", age: 10});
- bulk.insert({name: "John", age: "unknown"});
- bulk.insert({name: "Mark"});
- bulk.insert({});
- assert.writeOK(bulk.execute());
-
- assert.commandWorked(testDB.createView(
- "seniorCitizens", coll.getName(), [{
- $match: {
- $jsonSchema: {
- required: ["name", "age"],
- properties:
- {name: {type: "string"}, age: {type: "number", minimum: 65}}
- }
+ let bulk = coll.initializeUnorderedBulkOp();
+ bulk.insert({name: "Peter", age: 65});
+ bulk.insert({name: "Paul", age: 105});
+ bulk.insert({name: "Mary", age: 10});
+ bulk.insert({name: "John", age: "unknown"});
+ bulk.insert({name: "Mark"});
+ bulk.insert({});
+ assert.writeOK(bulk.execute());
+
+ assert.commandWorked(testDB.createView(
+ "seniorCitizens", coll.getName(), [{
+ $match: {
+ $jsonSchema: {
+ required: ["name", "age"],
+ properties: {name: {type: "string"}, age: {type: "number", minimum: 65}}
}
- }]));
- assert.eq(2, testDB.seniorCitizens.find().itcount());
- }
-
- // Test that $jsonSchema can be used in the listCollections filter.
- res = testDB.runCommand({
- listCollections: 1,
- filter: {$jsonSchema: {properties: {name: {enum: [coll.getName()]}}}}
- });
- assert.commandWorked(res);
- assert.eq(1, res.cursor.firstBatch.length);
-
- // Test that $jsonSchema can be used in the listDatabases filter.
- res = testDB.adminCommand(
- {listDatabases: 1, filter: {$jsonSchema: {properties: {name: {enum: [coll.getName()]}}}}});
- assert.commandWorked(res);
- assert.eq(1, res.databases.length);
-
- // Test that $jsonSchema can be used in the filter of a $graphLookup stage.
- const foreign = testDB.json_schema_foreign;
- foreign.drop();
- coll.drop();
- for (let i = 0; i < 10; i++) {
- assert.writeOK(foreign.insert({_id: i, n: [i - 1, i + 1]}));
- }
- assert.writeOK(coll.insert({starting: 0}));
-
- res = coll.aggregate({
+ }
+ }]));
+ assert.eq(2, testDB.seniorCitizens.find().itcount());
+}
+
+// Test that $jsonSchema can be used in the listCollections filter.
+res = testDB.runCommand(
+ {listCollections: 1, filter: {$jsonSchema: {properties: {name: {enum: [coll.getName()]}}}}});
+assert.commandWorked(res);
+assert.eq(1, res.cursor.firstBatch.length);
+
+// Test that $jsonSchema can be used in the listDatabases filter.
+res = testDB.adminCommand(
+ {listDatabases: 1, filter: {$jsonSchema: {properties: {name: {enum: [coll.getName()]}}}}});
+assert.commandWorked(res);
+assert.eq(1, res.databases.length);
+
+// Test that $jsonSchema can be used in the filter of a $graphLookup stage.
+const foreign = testDB.json_schema_foreign;
+foreign.drop();
+coll.drop();
+for (let i = 0; i < 10; i++) {
+ assert.writeOK(foreign.insert({_id: i, n: [i - 1, i + 1]}));
+}
+assert.writeOK(coll.insert({starting: 0}));
+
+res = coll.aggregate({
$graphLookup: {
from: foreign.getName(),
startWith: "$starting",
@@ -199,156 +202,155 @@
}
})
.toArray();
- assert.eq(1, res.length);
- assert.eq(res[0].integers.length, 5);
-
- // Test that $jsonSchema is legal in a delete command.
- coll.drop();
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({a: 2}));
- assert.writeOK(coll.insert({a: "str"}));
- assert.writeOK(coll.insert({a: [3]}));
-
- schema = {properties: {a: {type: "number", maximum: 2}}};
-
- res = coll.deleteMany({$jsonSchema: schema});
- assert.eq(2, res.deletedCount);
- assert.eq(0, coll.find({$jsonSchema: schema}).itcount());
-
- // Test that $jsonSchema does not respect the collation specified in a delete command.
- if (db.getMongo().writeMode() === "commands") {
- res = coll.deleteMany({$jsonSchema: {properties: {a: {enum: ["STR"]}}}},
- {collation: caseInsensitiveCollation});
- assert.eq(0, res.deletedCount);
- } else {
- res = testDB.runCommand({
- delete: coll.getName(),
- deletes: [{q: {$jsonSchema: {properties: {a: {enum: ["STR"]}}}}}],
- collation: caseInsensitiveCollation,
- });
- assert.eq(res.deletedCount);
- }
-
- // Test that $jsonSchema is legal in an update command.
- coll.drop();
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({a: 2}));
-
- res = coll.update({$jsonSchema: schema}, {$inc: {a: 1}}, {multi: true});
- assert.writeOK(res);
- assert.eq(2, res.nMatched);
- assert.eq(1, coll.find({$jsonSchema: schema}).itcount());
-
- // Test that $jsonSchema is legal in a findAndModify command.
- coll.drop();
- assert.writeOK(coll.insert({a: "long_string"}));
- assert.writeOK(coll.insert({a: "short"}));
+assert.eq(1, res.length);
+assert.eq(res[0].integers.length, 5);
+
+// Test that $jsonSchema is legal in a delete command.
+coll.drop();
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 2}));
+assert.writeOK(coll.insert({a: "str"}));
+assert.writeOK(coll.insert({a: [3]}));
+
+schema = {
+ properties: {a: {type: "number", maximum: 2}}
+};
+
+res = coll.deleteMany({$jsonSchema: schema});
+assert.eq(2, res.deletedCount);
+assert.eq(0, coll.find({$jsonSchema: schema}).itcount());
+
+// Test that $jsonSchema does not respect the collation specified in a delete command.
+if (db.getMongo().writeMode() === "commands") {
+ res = coll.deleteMany({$jsonSchema: {properties: {a: {enum: ["STR"]}}}},
+ {collation: caseInsensitiveCollation});
+ assert.eq(0, res.deletedCount);
+} else {
+ res = testDB.runCommand({
+ delete: coll.getName(),
+ deletes: [{q: {$jsonSchema: {properties: {a: {enum: ["STR"]}}}}}],
+ collation: caseInsensitiveCollation,
+ });
+ assert.eq(res.deletedCount);
+}
+
+// Test that $jsonSchema is legal in an update command.
+coll.drop();
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 2}));
+
+res = coll.update({$jsonSchema: schema}, {$inc: {a: 1}}, {multi: true});
+assert.writeOK(res);
+assert.eq(2, res.nMatched);
+assert.eq(1, coll.find({$jsonSchema: schema}).itcount());
+
+// Test that $jsonSchema is legal in a findAndModify command.
+coll.drop();
+assert.writeOK(coll.insert({a: "long_string"}));
+assert.writeOK(coll.insert({a: "short"}));
+
+schema = {
+ properties: {a: {type: "string", minLength: 6}}
+};
+res = coll.findAndModify({query: {$jsonSchema: schema}, update: {$set: {a: "extra_long_string"}}});
+assert.eq("long_string", res.a);
+assert.eq(1, coll.find({$jsonSchema: schema}).itcount());
+
+// Test that $jsonSchema works correctly in the presence of a basic b-tree index.
+coll.drop();
+assert.writeOK(coll.insert({_id: 1, a: 1, b: 1}));
+assert.writeOK(coll.insert({_id: 2, a: 2, b: 2, point: [5, 5]}));
+assert.writeOK(coll.insert({_id: 3, a: "temp text test"}));
+
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.eq(3, coll.find({$jsonSchema: {}}).itcount());
+assert.eq(2, coll.find({$jsonSchema: {properties: {a: {type: "number"}}}}).itcount());
+assert.eq(2,
+ coll.find({$jsonSchema: {required: ["a"], properties: {a: {type: "number"}}}}).itcount());
+assert.eq(2, coll.find({$or: [{$jsonSchema: {properties: {a: {minimum: 2}}}}, {b: 2}]}).itcount());
+
+// Test that $jsonSchema works correctly in the presence of a geo index.
+coll.dropIndexes();
+assert.commandWorked(coll.createIndex({point: "2dsphere"}));
+assert.eq(1, coll.find({$jsonSchema: {required: ["point"]}}).itcount());
+
+assert.eq(1,
+ coll.find({
+ $jsonSchema: {properties: {point: {minItems: 2}}},
+ point: {$geoNear: {$geometry: {type: "Point", coordinates: [5, 5]}}}
+ })
+ .itcount());
- schema = {properties: {a: {type: "string", minLength: 6}}};
- res = coll.findAndModify(
- {query: {$jsonSchema: schema}, update: {$set: {a: "extra_long_string"}}});
- assert.eq("long_string", res.a);
- assert.eq(1, coll.find({$jsonSchema: schema}).itcount());
+coll.dropIndexes();
+assert.commandWorked(coll.createIndex({a: 1, point: "2dsphere"}));
+assert.eq(1, coll.find({$jsonSchema: {required: ["a", "point"]}}).itcount());
- // Test that $jsonSchema works correctly in the presence of a basic b-tree index.
+assert.eq(1,
+ coll.find({
+ $jsonSchema: {required: ["a"], properties: {a: {minLength: 3}}},
+ point: {$geoNear: {$geometry: {type: "Point", coordinates: [5, 5]}}}
+ })
+ .itcount());
+
+assert.eq(1,
+ coll.find({
+ $and: [
+ {$jsonSchema: {properties: {point: {maxItems: 2}}}},
+ {point: {$geoNear: {$geometry: {type: "Point", coordinates: [5, 5]}}}, a: 2}
+ ]
+ })
+ .itcount());
+
+// Test that $jsonSchema works correctly in the presence of a text index.
+coll.dropIndexes();
+assert.commandWorked(coll.createIndex({a: "text"}));
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.eq(3, coll.find({$jsonSchema: {properties: {a: {minLength: 5}}}}).itcount());
+assert.eq(1, coll.find({$jsonSchema: {required: ["a"]}, $text: {$search: "test"}}).itcount());
+assert.eq(
+ 3, coll.find({$or: [{$jsonSchema: {required: ["a"]}}, {$text: {$search: "TEST"}}]}).itcount());
+assert.eq(1, coll.find({$and: [{$jsonSchema: {}}, {$text: {$search: "TEST"}}]}).itcount());
+
+if (!isMongos) {
coll.drop();
- assert.writeOK(coll.insert({_id: 1, a: 1, b: 1}));
- assert.writeOK(coll.insert({_id: 2, a: 2, b: 2, point: [5, 5]}));
- assert.writeOK(coll.insert({_id: 3, a: "temp text test"}));
-
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.eq(3, coll.find({$jsonSchema: {}}).itcount());
- assert.eq(2, coll.find({$jsonSchema: {properties: {a: {type: "number"}}}}).itcount());
- assert.eq(
- 2,
- coll.find({$jsonSchema: {required: ["a"], properties: {a: {type: "number"}}}}).itcount());
- assert.eq(2,
- coll.find({$or: [{$jsonSchema: {properties: {a: {minimum: 2}}}}, {b: 2}]}).itcount());
-
- // Test that $jsonSchema works correctly in the presence of a geo index.
- coll.dropIndexes();
- assert.commandWorked(coll.createIndex({point: "2dsphere"}));
- assert.eq(1, coll.find({$jsonSchema: {required: ["point"]}}).itcount());
-
- assert.eq(1,
- coll.find({
- $jsonSchema: {properties: {point: {minItems: 2}}},
- point: {$geoNear: {$geometry: {type: "Point", coordinates: [5, 5]}}}
- })
- .itcount());
-
- coll.dropIndexes();
- assert.commandWorked(coll.createIndex({a: 1, point: "2dsphere"}));
- assert.eq(1, coll.find({$jsonSchema: {required: ["a", "point"]}}).itcount());
-
- assert.eq(1,
- coll.find({
- $jsonSchema: {required: ["a"], properties: {a: {minLength: 3}}},
- point: {$geoNear: {$geometry: {type: "Point", coordinates: [5, 5]}}}
- })
- .itcount());
-
- assert.eq(
- 1,
- coll.find({
- $and: [
- {$jsonSchema: {properties: {point: {maxItems: 2}}}},
- {point: {$geoNear: {$geometry: {type: "Point", coordinates: [5, 5]}}}, a: 2}
- ]
- })
- .itcount());
-
- // Test that $jsonSchema works correctly in the presence of a text index.
- coll.dropIndexes();
- assert.commandWorked(coll.createIndex({a: "text"}));
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.eq(3, coll.find({$jsonSchema: {properties: {a: {minLength: 5}}}}).itcount());
- assert.eq(1, coll.find({$jsonSchema: {required: ["a"]}, $text: {$search: "test"}}).itcount());
- assert.eq(
- 3,
- coll.find({$or: [{$jsonSchema: {required: ["a"]}}, {$text: {$search: "TEST"}}]}).itcount());
- assert.eq(1, coll.find({$and: [{$jsonSchema: {}}, {$text: {$search: "TEST"}}]}).itcount());
-
- if (!isMongos) {
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: true}));
-
- // Test $jsonSchema in the precondition checking for applyOps.
- res = testDB.adminCommand({
- applyOps: [
- {op: "u", ns: coll.getFullName(), o2: {_id: 0}, o: {$set: {a: false}}},
+ assert.writeOK(coll.insert({_id: 0, a: true}));
+
+ // Test $jsonSchema in the precondition checking for applyOps.
+ res = testDB.adminCommand({
+ applyOps: [
+ {op: "u", ns: coll.getFullName(), o2: {_id: 0}, o: {$set: {a: false}}},
+ ],
+ preCondition: [{
+ ns: coll.getFullName(),
+ q: {$jsonSchema: {properties: {a: {type: "boolean"}}}},
+ res: {a: true}
+ }]
+ });
+ assert.commandWorked(res);
+ assert.eq(1, res.applied);
+
+ // Use majority write concern to clear the drop-pending that can cause lock conflicts with
+ // transactions.
+ coll.drop({writeConcern: {w: "majority"}});
+ assert.writeOK(coll.insert({_id: 1, a: true}));
+
+ if (FixtureHelpers.isReplSet(db) && !isMongos && isWiredTiger(db)) {
+ // Test $jsonSchema in the precondition checking for doTxn.
+ const session = db.getMongo().startSession();
+ const sessionDb = session.getDatabase(testDB.getName());
+ res = sessionDb.adminCommand({
+ doTxn: [
+ {op: "u", ns: coll.getFullName(), o2: {_id: 1}, o: {$set: {a: false}}},
],
preCondition: [{
ns: coll.getFullName(),
q: {$jsonSchema: {properties: {a: {type: "boolean"}}}},
res: {a: true}
- }]
+ }],
+ txnNumber: NumberLong("0")
});
assert.commandWorked(res);
assert.eq(1, res.applied);
-
- // Use majority write concern to clear the drop-pending that can cause lock conflicts with
- // transactions.
- coll.drop({writeConcern: {w: "majority"}});
- assert.writeOK(coll.insert({_id: 1, a: true}));
-
- if (FixtureHelpers.isReplSet(db) && !isMongos && isWiredTiger(db)) {
- // Test $jsonSchema in the precondition checking for doTxn.
- const session = db.getMongo().startSession();
- const sessionDb = session.getDatabase(testDB.getName());
- res = sessionDb.adminCommand({
- doTxn: [
- {op: "u", ns: coll.getFullName(), o2: {_id: 1}, o: {$set: {a: false}}},
- ],
- preCondition: [{
- ns: coll.getFullName(),
- q: {$jsonSchema: {properties: {a: {type: "boolean"}}}},
- res: {a: true}
- }],
- txnNumber: NumberLong("0")
- });
- assert.commandWorked(res);
- assert.eq(1, res.applied);
- }
}
+}
}());
diff --git a/jstests/core/json_schema/pattern_properties.js b/jstests/core/json_schema/pattern_properties.js
index b94987f2a4a..4c75b78f72a 100644
--- a/jstests/core/json_schema/pattern_properties.js
+++ b/jstests/core/json_schema/pattern_properties.js
@@ -4,87 +4,83 @@
* Tests for the JSON Schema 'patternProperties' keyword.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/assert_schema_match.js");
+load("jstests/libs/assert_schema_match.js");
- const coll = db.schema_pattern_properties;
+const coll = db.schema_pattern_properties;
- // Test top-level patternProperties.
- assertSchemaMatch(
- coll, {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, {}, true);
- assertSchemaMatch(
- coll, {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, {c: 1}, true);
- assertSchemaMatch(coll,
- {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}},
- {ca: 1, cb: 1},
- true);
- assertSchemaMatch(coll,
- {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}},
- {a: "str", ca: 1, cb: 1},
- false);
- assertSchemaMatch(coll,
- {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}},
- {a: 1, b: 1, ca: 1, cb: 1},
- false);
- assertSchemaMatch(coll,
- {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}},
- {a: 1, b: "str", ca: 1, cb: 1},
- true);
+// Test top-level patternProperties.
+assertSchemaMatch(
+ coll, {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, {}, true);
+assertSchemaMatch(
+ coll, {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, {c: 1}, true);
+assertSchemaMatch(coll,
+ {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}},
+ {ca: 1, cb: 1},
+ true);
+assertSchemaMatch(coll,
+ {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}},
+ {a: "str", ca: 1, cb: 1},
+ false);
+assertSchemaMatch(coll,
+ {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}},
+ {a: 1, b: 1, ca: 1, cb: 1},
+ false);
+assertSchemaMatch(coll,
+ {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}},
+ {a: 1, b: "str", ca: 1, cb: 1},
+ true);
- // Test patternProperties within a nested schema.
- assertSchemaMatch(
- coll,
- {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}},
- {},
- true);
- assertSchemaMatch(
- coll,
- {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}},
- {obj: 1},
- true);
- assertSchemaMatch(
- coll,
- {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}},
- {obj: {}},
- true);
- assertSchemaMatch(
- coll,
- {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}},
- {obj: {ca: 1, cb: 1}},
- true);
- assertSchemaMatch(
- coll,
- {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}},
- {obj: {ac: "str", ca: 1, cb: 1}},
- false);
- assertSchemaMatch(
- coll,
- {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}},
- {obj: {ac: 1, bc: 1, ca: 1, cb: 1}},
- false);
- assertSchemaMatch(
- coll,
- {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}},
- {obj: {ac: 1, bc: "str", ca: 1, cb: 1}},
- true);
+// Test patternProperties within a nested schema.
+assertSchemaMatch(
+ coll,
+ {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}},
+ {},
+ true);
+assertSchemaMatch(
+ coll,
+ {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}},
+ {obj: 1},
+ true);
+assertSchemaMatch(
+ coll,
+ {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}},
+ {obj: {}},
+ true);
+assertSchemaMatch(
+ coll,
+ {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}},
+ {obj: {ca: 1, cb: 1}},
+ true);
+assertSchemaMatch(
+ coll,
+ {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}},
+ {obj: {ac: "str", ca: 1, cb: 1}},
+ false);
+assertSchemaMatch(
+ coll,
+ {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}},
+ {obj: {ac: 1, bc: 1, ca: 1, cb: 1}},
+ false);
+assertSchemaMatch(
+ coll,
+ {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}},
+ {obj: {ac: 1, bc: "str", ca: 1, cb: 1}},
+ true);
- // Test that 'patternProperties' still applies, even if the field name also appears in
- // 'properties'.
- assertSchemaMatch(
- coll,
- {properties: {aa: {type: "number"}}, patternProperties: {"^a": {type: "string"}}},
- {aa: 1},
- false);
- assertSchemaMatch(coll,
- {
- properties: {
- obj: {
- properties: {aa: {type: "number"}},
- patternProperties: {"^a": {type: "string"}}
- }
- }
- },
- {obj: {aa: 1}},
- false);
+// Test that 'patternProperties' still applies, even if the field name also appears in
+// 'properties'.
+assertSchemaMatch(coll,
+ {properties: {aa: {type: "number"}}, patternProperties: {"^a": {type: "string"}}},
+ {aa: 1},
+ false);
+assertSchemaMatch(
+ coll,
+ {
+ properties:
+ {obj: {properties: {aa: {type: "number"}}, patternProperties: {"^a": {type: "string"}}}}
+ },
+ {obj: {aa: 1}},
+ false);
}());
diff --git a/jstests/core/json_schema/required.js b/jstests/core/json_schema/required.js
index 4ffc7438b48..a9a0cd67a48 100644
--- a/jstests/core/json_schema/required.js
+++ b/jstests/core/json_schema/required.js
@@ -4,24 +4,24 @@
* Tests for handling of the JSON Schema 'required' keyword.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/assert_schema_match.js");
+load("jstests/libs/assert_schema_match.js");
- const coll = db.jstests_schema_required;
+const coll = db.jstests_schema_required;
- assertSchemaMatch(coll, {required: ["a"]}, {a: 1}, true);
- assertSchemaMatch(coll, {required: ["a"]}, {}, false);
- assertSchemaMatch(coll, {required: ["a"]}, {b: 1}, false);
- assertSchemaMatch(coll, {required: ["a"]}, {b: {a: 1}}, false);
+assertSchemaMatch(coll, {required: ["a"]}, {a: 1}, true);
+assertSchemaMatch(coll, {required: ["a"]}, {}, false);
+assertSchemaMatch(coll, {required: ["a"]}, {b: 1}, false);
+assertSchemaMatch(coll, {required: ["a"]}, {b: {a: 1}}, false);
- assertSchemaMatch(coll, {required: ["a", "b"]}, {a: 1, b: 1, c: 1}, true);
- assertSchemaMatch(coll, {required: ["a", "b"]}, {a: 1, c: 1}, false);
- assertSchemaMatch(coll, {required: ["a", "b"]}, {b: 1, c: 1}, false);
+assertSchemaMatch(coll, {required: ["a", "b"]}, {a: 1, b: 1, c: 1}, true);
+assertSchemaMatch(coll, {required: ["a", "b"]}, {a: 1, c: 1}, false);
+assertSchemaMatch(coll, {required: ["a", "b"]}, {b: 1, c: 1}, false);
- assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {}, true);
- assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: 1}, true);
- assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: {b: 1}}, true);
- assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: {c: 1}}, false);
- assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: {}}, false);
+assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {}, true);
+assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: 1}, true);
+assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: {b: 1}}, true);
+assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: {c: 1}}, false);
+assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: {}}, false);
}());
diff --git a/jstests/core/json_schema/unique_items.js b/jstests/core/json_schema/unique_items.js
index 4e558b5db73..955bae74e88 100644
--- a/jstests/core/json_schema/unique_items.js
+++ b/jstests/core/json_schema/unique_items.js
@@ -4,62 +4,68 @@
* Tests the JSON Schema "uniqueItems" keyword.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/assert_schema_match.js");
+load("jstests/libs/assert_schema_match.js");
- const coll = db.getCollection("json_schema_unique_items");
- coll.drop();
+const coll = db.getCollection("json_schema_unique_items");
+coll.drop();
- // Test that the JSON Schema fails to parse if "uniqueItems" is not a boolean.
- assert.throws(() => coll.find({$jsonSchema: {uniqueItems: 1}}).itcount());
- assert.throws(() => coll.find({$jsonSchema: {uniqueItems: 1.0}}).itcount());
- assert.throws(() => coll.find({$jsonSchema: {uniqueItems: "true"}}).itcount());
+// Test that the JSON Schema fails to parse if "uniqueItems" is not a boolean.
+assert.throws(() => coll.find({$jsonSchema: {uniqueItems: 1}}).itcount());
+assert.throws(() => coll.find({$jsonSchema: {uniqueItems: 1.0}}).itcount());
+assert.throws(() => coll.find({$jsonSchema: {uniqueItems: "true"}}).itcount());
- // Test that "uniqueItems" has no effect at the top level (but still succeeds).
- assertSchemaMatch(coll, {uniqueItems: true}, {}, true);
- assertSchemaMatch(coll, {uniqueItems: false}, {}, true);
+// Test that "uniqueItems" has no effect at the top level (but still succeeds).
+assertSchemaMatch(coll, {uniqueItems: true}, {}, true);
+assertSchemaMatch(coll, {uniqueItems: false}, {}, true);
- // Test that "uniqueItems" matches when the field is missing or not an array.
- let schema = {properties: {a: {uniqueItems: true}}};
- assertSchemaMatch(coll, schema, {}, true);
- assertSchemaMatch(coll, schema, {a: "foo"}, true);
- assertSchemaMatch(coll, schema, {a: {foo: [1, 1], bar: [2, 2]}}, true);
+// Test that "uniqueItems" matches when the field is missing or not an array.
+let schema = {properties: {a: {uniqueItems: true}}};
+assertSchemaMatch(coll, schema, {}, true);
+assertSchemaMatch(coll, schema, {a: "foo"}, true);
+assertSchemaMatch(coll, schema, {a: {foo: [1, 1], bar: [2, 2]}}, true);
- // Test that {uniqueItems: true} matches arrays whose items are all unique.
- schema = {properties: {a: {uniqueItems: true}}};
- assertSchemaMatch(coll, schema, {a: []}, true);
- assertSchemaMatch(coll, schema, {a: [1]}, true);
- assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true);
- assertSchemaMatch(coll, schema, {a: ["foo", "FOO"]}, true);
- assertSchemaMatch(coll, schema, {a: [{}, "", [], null]}, true);
- assertSchemaMatch(coll, schema, {a: [[1, 2], [2, 1]]}, true);
+// Test that {uniqueItems: true} matches arrays whose items are all unique.
+schema = {
+ properties: {a: {uniqueItems: true}}
+};
+assertSchemaMatch(coll, schema, {a: []}, true);
+assertSchemaMatch(coll, schema, {a: [1]}, true);
+assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true);
+assertSchemaMatch(coll, schema, {a: ["foo", "FOO"]}, true);
+assertSchemaMatch(coll, schema, {a: [{}, "", [], null]}, true);
+assertSchemaMatch(coll, schema, {a: [[1, 2], [2, 1]]}, true);
- // Test that {uniqueItems: true} rejects arrays with duplicates.
- schema = {properties: {a: {uniqueItems: true}}};
- assertSchemaMatch(coll, schema, {a: [1, 1]}, false);
- assertSchemaMatch(coll, schema, {a: [NumberLong(1), NumberInt(1)]}, false);
- assertSchemaMatch(coll, schema, {a: ["foo", "foo"]}, false);
- assertSchemaMatch(coll, schema, {a: [{a: 1}, {a: 1}]}, false);
- assertSchemaMatch(coll, schema, {a: [[1, 2], [1, 2]]}, false);
- assertSchemaMatch(coll, schema, {a: [null, null]}, false);
- assertSchemaMatch(coll, schema, {a: [{x: 1, y: 1}, {y: 1, x: 1}]}, false);
- assertSchemaMatch(coll, schema, {a: [{x: [1, 2], y: "a"}, {y: "a", x: [1, 2]}]}, false);
+// Test that {uniqueItems: true} rejects arrays with duplicates.
+schema = {
+ properties: {a: {uniqueItems: true}}
+};
+assertSchemaMatch(coll, schema, {a: [1, 1]}, false);
+assertSchemaMatch(coll, schema, {a: [NumberLong(1), NumberInt(1)]}, false);
+assertSchemaMatch(coll, schema, {a: ["foo", "foo"]}, false);
+assertSchemaMatch(coll, schema, {a: [{a: 1}, {a: 1}]}, false);
+assertSchemaMatch(coll, schema, {a: [[1, 2], [1, 2]]}, false);
+assertSchemaMatch(coll, schema, {a: [null, null]}, false);
+assertSchemaMatch(coll, schema, {a: [{x: 1, y: 1}, {y: 1, x: 1}]}, false);
+assertSchemaMatch(coll, schema, {a: [{x: [1, 2], y: "a"}, {y: "a", x: [1, 2]}]}, false);
- // Test that {uniqueItems: false} has no effect.
- schema = {properties: {a: {uniqueItems: false}}};
- assertSchemaMatch(coll, schema, {a: []}, true);
- assertSchemaMatch(coll, schema, {a: [1]}, true);
- assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true);
- assertSchemaMatch(coll, schema, {a: ["foo", "FOO"]}, true);
- assertSchemaMatch(coll, schema, {a: [{}, "", [], null]}, true);
- assertSchemaMatch(coll, schema, {a: [[1, 2], [2, 1]]}, true);
- assertSchemaMatch(coll, schema, {a: [1, 1]}, true);
- assertSchemaMatch(coll, schema, {a: [NumberLong(1), NumberInt(1)]}, true);
- assertSchemaMatch(coll, schema, {a: ["foo", "foo"]}, true);
- assertSchemaMatch(coll, schema, {a: [{a: 1}, {a: 1}]}, true);
- assertSchemaMatch(coll, schema, {a: [[1, 2], [1, 2]]}, true);
- assertSchemaMatch(coll, schema, {a: [null, null]}, true);
- assertSchemaMatch(coll, schema, {a: [{x: 1, y: 1}, {y: 1, x: 1}]}, true);
- assertSchemaMatch(coll, schema, {a: [{x: [1, 2], y: "a"}, {y: "a", x: [1, 2]}]}, true);
+// Test that {uniqueItems: false} has no effect.
+schema = {
+ properties: {a: {uniqueItems: false}}
+};
+assertSchemaMatch(coll, schema, {a: []}, true);
+assertSchemaMatch(coll, schema, {a: [1]}, true);
+assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true);
+assertSchemaMatch(coll, schema, {a: ["foo", "FOO"]}, true);
+assertSchemaMatch(coll, schema, {a: [{}, "", [], null]}, true);
+assertSchemaMatch(coll, schema, {a: [[1, 2], [2, 1]]}, true);
+assertSchemaMatch(coll, schema, {a: [1, 1]}, true);
+assertSchemaMatch(coll, schema, {a: [NumberLong(1), NumberInt(1)]}, true);
+assertSchemaMatch(coll, schema, {a: ["foo", "foo"]}, true);
+assertSchemaMatch(coll, schema, {a: [{a: 1}, {a: 1}]}, true);
+assertSchemaMatch(coll, schema, {a: [[1, 2], [1, 2]]}, true);
+assertSchemaMatch(coll, schema, {a: [null, null]}, true);
+assertSchemaMatch(coll, schema, {a: [{x: 1, y: 1}, {y: 1, x: 1}]}, true);
+assertSchemaMatch(coll, schema, {a: [{x: [1, 2], y: "a"}, {y: "a", x: [1, 2]}]}, true);
}());
diff --git a/jstests/core/jssymbol.js b/jstests/core/jssymbol.js
index 6f216b52879..8a5e538aeeb 100644
--- a/jstests/core/jssymbol.js
+++ b/jstests/core/jssymbol.js
@@ -1,31 +1,31 @@
// Test Symbol.toPrimitive works for DB and BSON objects
//
(function() {
- // Exercise Symbol.toPrimitive on DB objects
- assert(`${db}` === 'test');
- assert(isNaN(+db));
+// Exercise Symbol.toPrimitive on DB objects
+assert(`${db}` === 'test');
+assert(isNaN(+db));
- // Exercise the special Symbol methods and make sure DB.getProperty handles them
- assert(db[Symbol.iterator] != 1);
- assert(db[Symbol.match] != 1);
- assert(db[Symbol.species] != 1);
- assert(db[Symbol.toPrimitive] != 1);
+// Exercise the special Symbol methods and make sure DB.getProperty handles them
+assert(db[Symbol.iterator] != 1);
+assert(db[Symbol.match] != 1);
+assert(db[Symbol.species] != 1);
+assert(db[Symbol.toPrimitive] != 1);
- // Exercise Symbol.toPrimitive on BSON objects
- col1 = db.jssymbol_col;
- col1.insert({});
- a = db.getCollection("jssymbol_col").getIndexes()[0];
+// Exercise Symbol.toPrimitive on BSON objects
+col1 = db.jssymbol_col;
+col1.insert({});
+a = db.getCollection("jssymbol_col").getIndexes()[0];
- assert(isNaN(+a));
- assert(+a.v >= 1);
- assert(`${a.v}` >= 1);
- assert(`${a}` == '[object BSON]');
+assert(isNaN(+a));
+assert(+a.v >= 1);
+assert(`${a.v}` >= 1);
+assert(`${a}` == '[object BSON]');
- // Exercise the special Symbol methods and make sure BSON.resolve handles them
- assert(db[Symbol.iterator] != 1);
- assert(db[Symbol.match] != 1);
- assert(db[Symbol.species] != 1);
- assert(db[Symbol.toPrimitive] != 1);
+// Exercise the special Symbol methods and make sure BSON.resolve handles them
+assert(db[Symbol.iterator] != 1);
+assert(db[Symbol.match] != 1);
+assert(db[Symbol.species] != 1);
+assert(db[Symbol.toPrimitive] != 1);
- col1.drop();
+col1.drop();
})();
diff --git a/jstests/core/kill_cursors.js b/jstests/core/kill_cursors.js
index 096d8962d2a..a65078028ed 100644
--- a/jstests/core/kill_cursors.js
+++ b/jstests/core/kill_cursors.js
@@ -10,74 +10,73 @@
//
// Test the killCursors command.
(function() {
- 'use strict';
+'use strict';
- var cmdRes;
- var cursor;
- var cursorId;
+var cmdRes;
+var cursor;
+var cursorId;
- var coll = db.jstest_killcursors;
- coll.drop();
+var coll = db.jstest_killcursors;
+coll.drop();
- for (var i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({_id: i}));
- }
+for (var i = 0; i < 10; i++) {
+ assert.writeOK(coll.insert({_id: i}));
+}
- // killCursors command should fail if the collection name is not a string.
- cmdRes = db.runCommand(
- {killCursors: {foo: "bad collection param"}, cursors: [NumberLong(123), NumberLong(456)]});
- assert.commandFailedWithCode(cmdRes, ErrorCodes.FailedToParse);
+// killCursors command should fail if the collection name is not a string.
+cmdRes = db.runCommand(
+ {killCursors: {foo: "bad collection param"}, cursors: [NumberLong(123), NumberLong(456)]});
+assert.commandFailedWithCode(cmdRes, ErrorCodes.FailedToParse);
- // killCursors command should fail if the cursors parameter is not an array.
- cmdRes = db.runCommand(
- {killCursors: coll.getName(), cursors: {a: NumberLong(123), b: NumberLong(456)}});
- assert.commandFailedWithCode(cmdRes, ErrorCodes.FailedToParse);
+// killCursors command should fail if the cursors parameter is not an array.
+cmdRes =
+ db.runCommand({killCursors: coll.getName(), cursors: {a: NumberLong(123), b: NumberLong(456)}});
+assert.commandFailedWithCode(cmdRes, ErrorCodes.FailedToParse);
- // killCursors command should fail if the cursors parameter is an empty array.
- cmdRes = db.runCommand({killCursors: coll.getName(), cursors: []});
- assert.commandFailedWithCode(cmdRes, ErrorCodes.BadValue);
+// killCursors command should fail if the cursors parameter is an empty array.
+cmdRes = db.runCommand({killCursors: coll.getName(), cursors: []});
+assert.commandFailedWithCode(cmdRes, ErrorCodes.BadValue);
- // killCursors command should report cursors as not found if the collection does not exist.
- cmdRes = db.runCommand(
- {killCursors: "non-existent-collection", cursors: [NumberLong(123), NumberLong(456)]});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursorsKilled, []);
- assert.eq(cmdRes.cursorsNotFound, [NumberLong(123), NumberLong(456)]);
- assert.eq(cmdRes.cursorsAlive, []);
- assert.eq(cmdRes.cursorsUnknown, []);
+// killCursors command should report cursors as not found if the collection does not exist.
+cmdRes = db.runCommand(
+ {killCursors: "non-existent-collection", cursors: [NumberLong(123), NumberLong(456)]});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursorsKilled, []);
+assert.eq(cmdRes.cursorsNotFound, [NumberLong(123), NumberLong(456)]);
+assert.eq(cmdRes.cursorsAlive, []);
+assert.eq(cmdRes.cursorsUnknown, []);
- // killCursors command should report non-existent cursors as "not found".
- cmdRes =
- db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), NumberLong(456)]});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursorsKilled, []);
- assert.eq(cmdRes.cursorsNotFound, [NumberLong(123), NumberLong(456)]);
- assert.eq(cmdRes.cursorsAlive, []);
- assert.eq(cmdRes.cursorsUnknown, []);
+// killCursors command should report non-existent cursors as "not found".
+cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), NumberLong(456)]});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursorsKilled, []);
+assert.eq(cmdRes.cursorsNotFound, [NumberLong(123), NumberLong(456)]);
+assert.eq(cmdRes.cursorsAlive, []);
+assert.eq(cmdRes.cursorsUnknown, []);
- // Test a case where one cursors exists and is killed but the other does not exist.
- cmdRes = db.runCommand({find: coll.getName(), batchSize: 2});
- assert.commandWorked(cmdRes);
- cursorId = cmdRes.cursor.id;
- assert.neq(cursorId, NumberLong(0));
+// Test a case where one cursors exists and is killed but the other does not exist.
+cmdRes = db.runCommand({find: coll.getName(), batchSize: 2});
+assert.commandWorked(cmdRes);
+cursorId = cmdRes.cursor.id;
+assert.neq(cursorId, NumberLong(0));
- cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), cursorId]});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursorsKilled, [cursorId]);
- assert.eq(cmdRes.cursorsNotFound, [NumberLong(123)]);
- assert.eq(cmdRes.cursorsAlive, []);
- assert.eq(cmdRes.cursorsUnknown, []);
+cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), cursorId]});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursorsKilled, [cursorId]);
+assert.eq(cmdRes.cursorsNotFound, [NumberLong(123)]);
+assert.eq(cmdRes.cursorsAlive, []);
+assert.eq(cmdRes.cursorsUnknown, []);
- // Test killing a noTimeout cursor.
- cmdRes = db.runCommand({find: coll.getName(), batchSize: 2, noCursorTimeout: true});
- assert.commandWorked(cmdRes);
- cursorId = cmdRes.cursor.id;
- assert.neq(cursorId, NumberLong(0));
+// Test killing a noTimeout cursor.
+cmdRes = db.runCommand({find: coll.getName(), batchSize: 2, noCursorTimeout: true});
+assert.commandWorked(cmdRes);
+cursorId = cmdRes.cursor.id;
+assert.neq(cursorId, NumberLong(0));
- cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), cursorId]});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursorsKilled, [cursorId]);
- assert.eq(cmdRes.cursorsNotFound, [NumberLong(123)]);
- assert.eq(cmdRes.cursorsAlive, []);
- assert.eq(cmdRes.cursorsUnknown, []);
+cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), cursorId]});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursorsKilled, [cursorId]);
+assert.eq(cmdRes.cursorsNotFound, [NumberLong(123)]);
+assert.eq(cmdRes.cursorsAlive, []);
+assert.eq(cmdRes.cursorsUnknown, []);
})();
diff --git a/jstests/core/killop_drop_collection.js b/jstests/core/killop_drop_collection.js
index 621b5c4ace9..b4efd13733b 100644
--- a/jstests/core/killop_drop_collection.js
+++ b/jstests/core/killop_drop_collection.js
@@ -11,64 +11,62 @@
* ]
*/
(function() {
- "use strict";
+"use strict";
- var collectionName = "killop_drop";
- let collection = db.getCollection(collectionName);
- collection.drop();
- for (let i = 0; i < 1000; i++) {
- assert.writeOK(collection.insert({x: i}));
- }
- assert.writeOK(collection.createIndex({x: 1}, {background: true}));
-
- // Attempt to fsyncLock the database, aborting early if the storage engine doesn't support it.
- const storageEngine = jsTest.options().storageEngine;
- let fsyncRes = db.fsyncLock();
- if (!fsyncRes.ok) {
- assert.commandFailedWithCode(fsyncRes, ErrorCodes.CommandNotSupported);
- jsTest.log("Skipping test on storage engine " + storageEngine +
- ", which does not support fsyncLock.");
- return;
- }
+var collectionName = "killop_drop";
+let collection = db.getCollection(collectionName);
+collection.drop();
+for (let i = 0; i < 1000; i++) {
+ assert.writeOK(collection.insert({x: i}));
+}
+assert.writeOK(collection.createIndex({x: 1}, {background: true}));
- // Kick off a drop on the collection.
- const useDefaultPort = null;
- const noConnect = false;
- // The drop will occasionally, and legitimately be interrupted by killOp (and not succeed).
- let awaitDropCommand = startParallelShell(function() {
- let res = db.getSiblingDB("test").runCommand({drop: "killop_drop"});
- let collectionFound = db.getCollectionNames().includes("killop_drop");
- if (res.ok == 1) {
- // Ensure that the collection has been dropped.
- assert(
- !collectionFound,
- "Expected collection to not appear in listCollections output after being dropped");
- } else {
- // Ensure that the collection hasn't been dropped.
- assert(collectionFound,
- "Expected collection to appear in listCollections output after drop failed");
- }
- }, useDefaultPort, noConnect);
+// Attempt to fsyncLock the database, aborting early if the storage engine doesn't support it.
+const storageEngine = jsTest.options().storageEngine;
+let fsyncRes = db.fsyncLock();
+if (!fsyncRes.ok) {
+ assert.commandFailedWithCode(fsyncRes, ErrorCodes.CommandNotSupported);
+ jsTest.log("Skipping test on storage engine " + storageEngine +
+ ", which does not support fsyncLock.");
+ return;
+}
- // Wait for the drop operation to appear in the db.currentOp() output.
- let dropCommandOpId = null;
- assert.soon(function() {
- let dropOpsInProgress = db.currentOp().inprog.filter(
- op => op.command && op.command.drop === collection.getName());
- if (dropOpsInProgress.length > 0) {
- dropCommandOpId = dropOpsInProgress[0].opid;
- }
- return dropCommandOpId;
- });
+// Kick off a drop on the collection.
+const useDefaultPort = null;
+const noConnect = false;
+// The drop will occasionally, and legitimately be interrupted by killOp (and not succeed).
+let awaitDropCommand = startParallelShell(function() {
+ let res = db.getSiblingDB("test").runCommand({drop: "killop_drop"});
+ let collectionFound = db.getCollectionNames().includes("killop_drop");
+ if (res.ok == 1) {
+ // Ensure that the collection has been dropped.
+ assert(!collectionFound,
+ "Expected collection to not appear in listCollections output after being dropped");
+ } else {
+ // Ensure that the collection hasn't been dropped.
+ assert(collectionFound,
+ "Expected collection to appear in listCollections output after drop failed");
+ }
+}, useDefaultPort, noConnect);
- // Issue a killOp for the drop command, then unlock the server. We expect that the drop
- // operation was *not* killed, and that the collection was dropped successfully.
- assert.commandWorked(db.killOp(dropCommandOpId));
- let unlockRes = assert.commandWorked(db.fsyncUnlock());
- assert.eq(0,
- unlockRes.lockCount,
- "Expected the number of fsyncLocks to be zero after issuing fsyncUnlock");
+// Wait for the drop operation to appear in the db.currentOp() output.
+let dropCommandOpId = null;
+assert.soon(function() {
+ let dropOpsInProgress =
+ db.currentOp().inprog.filter(op => op.command && op.command.drop === collection.getName());
+ if (dropOpsInProgress.length > 0) {
+ dropCommandOpId = dropOpsInProgress[0].opid;
+ }
+ return dropCommandOpId;
+});
- awaitDropCommand();
+// Issue a killOp for the drop command, then unlock the server. We expect that the drop
+// operation was *not* killed, and that the collection was dropped successfully.
+assert.commandWorked(db.killOp(dropCommandOpId));
+let unlockRes = assert.commandWorked(db.fsyncUnlock());
+assert.eq(0,
+ unlockRes.lockCount,
+ "Expected the number of fsyncLocks to be zero after issuing fsyncUnlock");
+awaitDropCommand();
}());
diff --git a/jstests/core/list_all_local_sessions.js b/jstests/core/list_all_local_sessions.js
index 78189b1b324..72226dfbb08 100644
--- a/jstests/core/list_all_local_sessions.js
+++ b/jstests/core/list_all_local_sessions.js
@@ -11,34 +11,34 @@
// ]
(function() {
- 'use strict';
+'use strict';
- const admin = db.getSisterDB('admin');
+const admin = db.getSisterDB('admin');
- // Get current log level.
- let originalLogLevel = assert.commandWorked(admin.setLogLevel(1)).was.verbosity;
+// Get current log level.
+let originalLogLevel = assert.commandWorked(admin.setLogLevel(1)).was.verbosity;
- try {
- const listAllLocalSessions = function() {
- return admin.aggregate([{'$listLocalSessions': {allUsers: true}}]);
- };
+try {
+ const listAllLocalSessions = function() {
+ return admin.aggregate([{'$listLocalSessions': {allUsers: true}}]);
+ };
- // Start a new session and capture its sessionId.
- const myid = assert.commandWorked(db.runCommand({startSession: 1})).id.id;
- assert(myid !== undefined);
+ // Start a new session and capture its sessionId.
+ const myid = assert.commandWorked(db.runCommand({startSession: 1})).id.id;
+ assert(myid !== undefined);
- // Ensure that the cache now contains the session and is visible by admin.
- const resultArray = assert.doesNotThrow(listAllLocalSessions).toArray();
- assert.gte(resultArray.length, 1);
- const resultArrayMine = resultArray
- .map(function(sess) {
- return sess._id.id;
- })
- .filter(function(id) {
- return 0 == bsonWoCompare({x: id}, {x: myid});
- });
- assert.eq(resultArrayMine.length, 1);
- } finally {
- admin.setLogLevel(originalLogLevel);
- }
+ // Ensure that the cache now contains the session and is visible by admin.
+ const resultArray = assert.doesNotThrow(listAllLocalSessions).toArray();
+ assert.gte(resultArray.length, 1);
+ const resultArrayMine = resultArray
+ .map(function(sess) {
+ return sess._id.id;
+ })
+ .filter(function(id) {
+ return 0 == bsonWoCompare({x: id}, {x: myid});
+ });
+ assert.eq(resultArrayMine.length, 1);
+} finally {
+ admin.setLogLevel(originalLogLevel);
+}
})();
diff --git a/jstests/core/list_all_sessions.js b/jstests/core/list_all_sessions.js
index 88bd83da628..9d02b99167b 100644
--- a/jstests/core/list_all_sessions.js
+++ b/jstests/core/list_all_sessions.js
@@ -8,51 +8,51 @@
// Basic tests for the $listSessions {allUsers:true} aggregation stage.
(function() {
- 'use strict';
- load('jstests/aggregation/extras/utils.js');
-
- const admin = db.getSiblingDB("admin");
- const config = db.getSiblingDB("config");
- const pipeline = [{'$listSessions': {allUsers: true}}];
- function listSessions() {
- return config.system.sessions.aggregate(pipeline);
- }
- function listSessionsWithFilter(filter) {
- return config.system.sessions.aggregate(
- [{'$listSessions': {allUsers: true}}, {$match: filter}]);
- }
-
- // Get current log level.
- let originalLogLevel = assert.commandWorked(admin.setLogLevel(1)).was.verbosity;
-
- try {
- // Start a new session and capture its sessionId.
- const myid = assert.commandWorked(admin.runCommand({startSession: 1})).id.id;
- assert(myid !== undefined);
- assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
-
- // Ensure that the cache now contains the session and is visible by admin.
- assert.soon(function() {
- const resultArray = listSessions().toArray();
- if (resultArray.length < 1) {
- return false;
- }
- const resultArrayMine = resultArray
- .map(function(sess) {
- return sess._id.id;
- })
- .filter(function(id) {
- return 0 == bsonWoCompare({x: id}, {x: myid});
- });
- return resultArrayMine.length == 1;
- }, "Failed to locate session in collection");
-
- const sessionList = listSessionsWithFilter({_id: "non_existent"}).toArray();
- assert.eq(0, sessionList.length, tojson(sessionList));
-
- // Make sure pipelining other collections fail.
- assertErrorCode(admin.system.collections, pipeline, ErrorCodes.InvalidNamespace);
- } finally {
- admin.setLogLevel(originalLogLevel);
- }
+'use strict';
+load('jstests/aggregation/extras/utils.js');
+
+const admin = db.getSiblingDB("admin");
+const config = db.getSiblingDB("config");
+const pipeline = [{'$listSessions': {allUsers: true}}];
+function listSessions() {
+ return config.system.sessions.aggregate(pipeline);
+}
+function listSessionsWithFilter(filter) {
+ return config.system.sessions.aggregate(
+ [{'$listSessions': {allUsers: true}}, {$match: filter}]);
+}
+
+// Get current log level.
+let originalLogLevel = assert.commandWorked(admin.setLogLevel(1)).was.verbosity;
+
+try {
+ // Start a new session and capture its sessionId.
+ const myid = assert.commandWorked(admin.runCommand({startSession: 1})).id.id;
+ assert(myid !== undefined);
+ assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
+
+ // Ensure that the cache now contains the session and is visible by admin.
+ assert.soon(function() {
+ const resultArray = listSessions().toArray();
+ if (resultArray.length < 1) {
+ return false;
+ }
+ const resultArrayMine = resultArray
+ .map(function(sess) {
+ return sess._id.id;
+ })
+ .filter(function(id) {
+ return 0 == bsonWoCompare({x: id}, {x: myid});
+ });
+ return resultArrayMine.length == 1;
+ }, "Failed to locate session in collection");
+
+ const sessionList = listSessionsWithFilter({_id: "non_existent"}).toArray();
+ assert.eq(0, sessionList.length, tojson(sessionList));
+
+ // Make sure pipelining other collections fail.
+ assertErrorCode(admin.system.collections, pipeline, ErrorCodes.InvalidNamespace);
+} finally {
+ admin.setLogLevel(originalLogLevel);
+}
})();
diff --git a/jstests/core/list_collections1.js b/jstests/core/list_collections1.js
index ddc563cf852..39445a3ff91 100644
--- a/jstests/core/list_collections1.js
+++ b/jstests/core/list_collections1.js
@@ -12,291 +12,290 @@
// listCollections output.
(function() {
- "use strict";
-
- var mydb = db.getSiblingDB("list_collections1");
- var cursor;
- var res;
- var collObj;
-
- //
- // Test basic command output.
- //
-
- assert.commandWorked(mydb.dropDatabase());
- assert.commandWorked(mydb.createCollection("foo"));
- res = mydb.runCommand("listCollections");
- assert.commandWorked(res);
- assert.eq('object', typeof(res.cursor));
- assert.eq(0, res.cursor.id);
- assert.eq('string', typeof(res.cursor.ns));
- collObj = res.cursor.firstBatch.filter(function(c) {
- return c.name === "foo";
- })[0];
- assert(collObj);
- assert.eq('object', typeof(collObj.options));
- assert.eq('collection', collObj.type, tojson(collObj));
- assert.eq(false, collObj.info.readOnly, tojson(collObj));
- assert.eq("object", typeof(collObj.idIndex), tojson(collObj));
- assert(collObj.idIndex.hasOwnProperty("v"), tojson(collObj));
-
- //
- // Test basic command output for views.
- //
-
- assert.commandWorked(mydb.createView("bar", "foo", []));
- res = mydb.runCommand("listCollections");
- assert.commandWorked(res);
- collObj = res.cursor.firstBatch.filter(function(c) {
- return c.name === "bar";
- })[0];
- assert(collObj);
- assert.eq("object", typeof(collObj.options), tojson(collObj));
- assert.eq("foo", collObj.options.viewOn, tojson(collObj));
- assert.eq([], collObj.options.pipeline, tojson(collObj));
- assert.eq("view", collObj.type, tojson(collObj));
- assert.eq(true, collObj.info.readOnly, tojson(collObj));
- assert(!collObj.hasOwnProperty("idIndex"), tojson(collObj));
-
- //
- // Test basic usage with DBCommandCursor.
- //
-
- var getListCollectionsCursor = function(options, subsequentBatchSize) {
- return new DBCommandCursor(
- mydb, mydb.runCommand("listCollections", options), subsequentBatchSize);
- };
-
- var cursorCountMatching = function(cursor, pred) {
- return cursor.toArray().filter(pred).length;
- };
-
- assert.commandWorked(mydb.dropDatabase());
- assert.commandWorked(mydb.createCollection("foo"));
- assert.eq(1, cursorCountMatching(getListCollectionsCursor(), function(c) {
- return c.name === "foo";
- }));
-
- //
- // Test that the collection metadata object is returned correctly.
- //
-
- assert.commandWorked(mydb.dropDatabase());
- assert.commandWorked(mydb.createCollection("foo"));
- assert.commandWorked(mydb.runCommand(
- {applyOps: [{op: "c", ns: mydb.getName() + ".$cmd", o: {create: "bar", temp: true}}]}));
- assert.eq(1, cursorCountMatching(getListCollectionsCursor(), function(c) {
- return c.name === "foo" && c.options.temp === undefined;
- }));
- assert.eq(1, cursorCountMatching(getListCollectionsCursor(), function(c) {
- return c.name === "bar" && c.options.temp === true;
- }));
-
- //
- // Test basic usage of "filter" option.
- //
-
- assert.commandWorked(mydb.dropDatabase());
- assert.commandWorked(mydb.createCollection("foo"));
- assert.commandWorked(mydb.runCommand(
- {applyOps: [{op: "c", ns: mydb.getName() + ".$cmd", o: {create: "bar", temp: true}}]}));
- assert.eq(2, cursorCountMatching(getListCollectionsCursor({filter: {}}), function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
- assert.eq(2, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount());
- assert.eq(1, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount());
- assert.eq(1, getListCollectionsCursor({filter: {"options.temp": true}}).itcount());
- mydb.foo.drop();
- assert.eq(1, cursorCountMatching(getListCollectionsCursor({filter: {}}), function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
- assert.eq(1, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount());
- assert.eq(0, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount());
- assert.eq(1, getListCollectionsCursor({filter: {"options.temp": true}}).itcount());
- mydb.bar.drop();
- assert.eq(0, cursorCountMatching(getListCollectionsCursor({filter: {}}), function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
- assert.eq(0, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount());
- assert.eq(0, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount());
- assert.eq(0, getListCollectionsCursor({filter: {"options.temp": true}}).itcount());
-
- //
- // Test for invalid values of "filter".
- //
-
- assert.throws(function() {
- getListCollectionsCursor({filter: {$invalid: 1}});
- });
- assert.throws(function() {
- getListCollectionsCursor({filter: 0});
- });
- assert.throws(function() {
- getListCollectionsCursor({filter: 'x'});
- });
- assert.throws(function() {
- getListCollectionsCursor({filter: []});
- });
-
- //
- // Test basic usage of "cursor.batchSize" option.
- //
-
- assert.commandWorked(mydb.dropDatabase());
- assert.commandWorked(mydb.createCollection("foo"));
- assert.commandWorked(mydb.createCollection("bar"));
- cursor = getListCollectionsCursor({cursor: {batchSize: 2}});
- assert.eq(2, cursor.objsLeftInBatch());
- assert.eq(2, cursorCountMatching(cursor, function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
- cursor = getListCollectionsCursor({cursor: {batchSize: 1}});
- assert.eq(1, cursor.objsLeftInBatch());
- assert.eq(2, cursorCountMatching(cursor, function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
- cursor = getListCollectionsCursor({cursor: {batchSize: 0}});
- assert.eq(0, cursor.objsLeftInBatch());
- assert.eq(2, cursorCountMatching(cursor, function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
-
- cursor = getListCollectionsCursor({cursor: {batchSize: NumberInt(2)}});
- assert.eq(2, cursor.objsLeftInBatch());
- assert.eq(2, cursorCountMatching(cursor, function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
- cursor = getListCollectionsCursor({cursor: {batchSize: NumberLong(2)}});
- assert.eq(2, cursor.objsLeftInBatch());
- assert.eq(2, cursorCountMatching(cursor, function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
-
- // Test a large batch size, and assert that at least 2 results are returned in the initial
- // batch.
- cursor = getListCollectionsCursor({cursor: {batchSize: Math.pow(2, 62)}});
- assert.lte(2, cursor.objsLeftInBatch());
- assert.eq(2, cursorCountMatching(cursor, function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
-
- // Ensure that the server accepts an empty object for "cursor". This is equivalent to not
- // specifying "cursor" at all.
- //
- // We do not test for objsLeftInBatch() here, since the default batch size for this command
- // is not specified.
- cursor = getListCollectionsCursor({cursor: {}});
- assert.eq(2, cursorCountMatching(cursor, function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
-
- //
- // Test for invalid values of "cursor" and "cursor.batchSize".
- //
-
- assert.throws(function() {
- getListCollectionsCursor({cursor: 0});
- });
- assert.throws(function() {
- getListCollectionsCursor({cursor: 'x'});
- });
- assert.throws(function() {
- getListCollectionsCursor({cursor: []});
- });
- assert.throws(function() {
- getListCollectionsCursor({cursor: {foo: 1}});
- });
- assert.throws(function() {
- getListCollectionsCursor({cursor: {batchSize: -1}});
- });
- assert.throws(function() {
- getListCollectionsCursor({cursor: {batchSize: 'x'}});
- });
- assert.throws(function() {
- getListCollectionsCursor({cursor: {batchSize: {}}});
- });
- assert.throws(function() {
- getListCollectionsCursor({cursor: {batchSize: 2, foo: 1}});
- });
-
- //
- // Test more than 2 batches of results.
- //
-
- assert.commandWorked(mydb.dropDatabase());
- assert.commandWorked(mydb.createCollection("foo"));
- assert.commandWorked(mydb.createCollection("bar"));
- assert.commandWorked(mydb.createCollection("baz"));
- assert.commandWorked(mydb.createCollection("quux"));
- cursor = getListCollectionsCursor({cursor: {batchSize: 0}}, 2);
- assert.eq(0, cursor.objsLeftInBatch());
- assert(cursor.hasNext());
- assert.eq(2, cursor.objsLeftInBatch());
- cursor.next();
- assert(cursor.hasNext());
- assert.eq(1, cursor.objsLeftInBatch());
- cursor.next();
- assert(cursor.hasNext());
- assert.eq(2, cursor.objsLeftInBatch());
- cursor.next();
- assert(cursor.hasNext());
- assert.eq(1, cursor.objsLeftInBatch());
-
- //
- // Test on non-existent database.
- //
-
- assert.commandWorked(mydb.dropDatabase());
- cursor = getListCollectionsCursor();
- assert.eq(0, cursorCountMatching(cursor, function(c) {
- return c.name === "foo";
- }));
-
- //
- // Test on empty database.
- //
-
- assert.commandWorked(mydb.dropDatabase());
- assert.commandWorked(mydb.createCollection("foo"));
- mydb.foo.drop();
- cursor = getListCollectionsCursor();
- assert.eq(0, cursorCountMatching(cursor, function(c) {
- return c.name === "foo";
- }));
-
- //
- // Test killCursors against a listCollections cursor.
- //
-
- assert.commandWorked(mydb.dropDatabase());
- assert.commandWorked(mydb.createCollection("foo"));
- assert.commandWorked(mydb.createCollection("bar"));
- assert.commandWorked(mydb.createCollection("baz"));
- assert.commandWorked(mydb.createCollection("quux"));
-
- res = mydb.runCommand("listCollections", {cursor: {batchSize: 0}});
- cursor = new DBCommandCursor(mydb, res, 2);
- cursor.close();
- cursor = new DBCommandCursor(mydb, res, 2);
- assert.throws(function() {
- cursor.hasNext();
- });
-
- //
- // Test parsing of the 'includePendingDrops' flag. If included, its argument must be of
- // 'boolean' type. Functional testing of the 'includePendingDrops' flag is done in
- // "jstests/replsets".
- //
-
- // Bad argument types.
- assert.commandFailedWithCode(mydb.runCommand("listCollections", {includePendingDrops: {}}),
- ErrorCodes.TypeMismatch);
- assert.commandFailedWithCode(mydb.runCommand("listCollections", {includePendingDrops: "s"}),
- ErrorCodes.TypeMismatch);
-
- // Valid argument types.
- assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: 1}));
- assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: true}));
- assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: false}));
+"use strict";
+var mydb = db.getSiblingDB("list_collections1");
+var cursor;
+var res;
+var collObj;
+
+//
+// Test basic command output.
+//
+
+assert.commandWorked(mydb.dropDatabase());
+assert.commandWorked(mydb.createCollection("foo"));
+res = mydb.runCommand("listCollections");
+assert.commandWorked(res);
+assert.eq('object', typeof (res.cursor));
+assert.eq(0, res.cursor.id);
+assert.eq('string', typeof (res.cursor.ns));
+collObj = res.cursor.firstBatch.filter(function(c) {
+ return c.name === "foo";
+})[0];
+assert(collObj);
+assert.eq('object', typeof (collObj.options));
+assert.eq('collection', collObj.type, tojson(collObj));
+assert.eq(false, collObj.info.readOnly, tojson(collObj));
+assert.eq("object", typeof (collObj.idIndex), tojson(collObj));
+assert(collObj.idIndex.hasOwnProperty("v"), tojson(collObj));
+
+//
+// Test basic command output for views.
+//
+
+assert.commandWorked(mydb.createView("bar", "foo", []));
+res = mydb.runCommand("listCollections");
+assert.commandWorked(res);
+collObj = res.cursor.firstBatch.filter(function(c) {
+ return c.name === "bar";
+})[0];
+assert(collObj);
+assert.eq("object", typeof (collObj.options), tojson(collObj));
+assert.eq("foo", collObj.options.viewOn, tojson(collObj));
+assert.eq([], collObj.options.pipeline, tojson(collObj));
+assert.eq("view", collObj.type, tojson(collObj));
+assert.eq(true, collObj.info.readOnly, tojson(collObj));
+assert(!collObj.hasOwnProperty("idIndex"), tojson(collObj));
+
+//
+// Test basic usage with DBCommandCursor.
+//
+
+var getListCollectionsCursor = function(options, subsequentBatchSize) {
+ return new DBCommandCursor(
+ mydb, mydb.runCommand("listCollections", options), subsequentBatchSize);
+};
+
+var cursorCountMatching = function(cursor, pred) {
+ return cursor.toArray().filter(pred).length;
+};
+
+assert.commandWorked(mydb.dropDatabase());
+assert.commandWorked(mydb.createCollection("foo"));
+assert.eq(1, cursorCountMatching(getListCollectionsCursor(), function(c) {
+ return c.name === "foo";
+ }));
+
+//
+// Test that the collection metadata object is returned correctly.
+//
+
+assert.commandWorked(mydb.dropDatabase());
+assert.commandWorked(mydb.createCollection("foo"));
+assert.commandWorked(mydb.runCommand(
+ {applyOps: [{op: "c", ns: mydb.getName() + ".$cmd", o: {create: "bar", temp: true}}]}));
+assert.eq(1, cursorCountMatching(getListCollectionsCursor(), function(c) {
+ return c.name === "foo" && c.options.temp === undefined;
+ }));
+assert.eq(1, cursorCountMatching(getListCollectionsCursor(), function(c) {
+ return c.name === "bar" && c.options.temp === true;
+ }));
+
+//
+// Test basic usage of "filter" option.
+//
+
+assert.commandWorked(mydb.dropDatabase());
+assert.commandWorked(mydb.createCollection("foo"));
+assert.commandWorked(mydb.runCommand(
+ {applyOps: [{op: "c", ns: mydb.getName() + ".$cmd", o: {create: "bar", temp: true}}]}));
+assert.eq(2, cursorCountMatching(getListCollectionsCursor({filter: {}}), function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
+assert.eq(2, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount());
+assert.eq(1, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount());
+assert.eq(1, getListCollectionsCursor({filter: {"options.temp": true}}).itcount());
+mydb.foo.drop();
+assert.eq(1, cursorCountMatching(getListCollectionsCursor({filter: {}}), function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
+assert.eq(1, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount());
+assert.eq(0, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount());
+assert.eq(1, getListCollectionsCursor({filter: {"options.temp": true}}).itcount());
+mydb.bar.drop();
+assert.eq(0, cursorCountMatching(getListCollectionsCursor({filter: {}}), function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
+assert.eq(0, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount());
+assert.eq(0, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount());
+assert.eq(0, getListCollectionsCursor({filter: {"options.temp": true}}).itcount());
+
+//
+// Test for invalid values of "filter".
+//
+
+assert.throws(function() {
+ getListCollectionsCursor({filter: {$invalid: 1}});
+});
+assert.throws(function() {
+ getListCollectionsCursor({filter: 0});
+});
+assert.throws(function() {
+ getListCollectionsCursor({filter: 'x'});
+});
+assert.throws(function() {
+ getListCollectionsCursor({filter: []});
+});
+
+//
+// Test basic usage of "cursor.batchSize" option.
+//
+
+assert.commandWorked(mydb.dropDatabase());
+assert.commandWorked(mydb.createCollection("foo"));
+assert.commandWorked(mydb.createCollection("bar"));
+cursor = getListCollectionsCursor({cursor: {batchSize: 2}});
+assert.eq(2, cursor.objsLeftInBatch());
+assert.eq(2, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
+cursor = getListCollectionsCursor({cursor: {batchSize: 1}});
+assert.eq(1, cursor.objsLeftInBatch());
+assert.eq(2, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
+cursor = getListCollectionsCursor({cursor: {batchSize: 0}});
+assert.eq(0, cursor.objsLeftInBatch());
+assert.eq(2, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
+
+cursor = getListCollectionsCursor({cursor: {batchSize: NumberInt(2)}});
+assert.eq(2, cursor.objsLeftInBatch());
+assert.eq(2, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
+cursor = getListCollectionsCursor({cursor: {batchSize: NumberLong(2)}});
+assert.eq(2, cursor.objsLeftInBatch());
+assert.eq(2, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
+
+// Test a large batch size, and assert that at least 2 results are returned in the initial
+// batch.
+cursor = getListCollectionsCursor({cursor: {batchSize: Math.pow(2, 62)}});
+assert.lte(2, cursor.objsLeftInBatch());
+assert.eq(2, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
+
+// Ensure that the server accepts an empty object for "cursor". This is equivalent to not
+// specifying "cursor" at all.
+//
+// We do not test for objsLeftInBatch() here, since the default batch size for this command
+// is not specified.
+cursor = getListCollectionsCursor({cursor: {}});
+assert.eq(2, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
+
+//
+// Test for invalid values of "cursor" and "cursor.batchSize".
+//
+
+assert.throws(function() {
+ getListCollectionsCursor({cursor: 0});
+});
+assert.throws(function() {
+ getListCollectionsCursor({cursor: 'x'});
+});
+assert.throws(function() {
+ getListCollectionsCursor({cursor: []});
+});
+assert.throws(function() {
+ getListCollectionsCursor({cursor: {foo: 1}});
+});
+assert.throws(function() {
+ getListCollectionsCursor({cursor: {batchSize: -1}});
+});
+assert.throws(function() {
+ getListCollectionsCursor({cursor: {batchSize: 'x'}});
+});
+assert.throws(function() {
+ getListCollectionsCursor({cursor: {batchSize: {}}});
+});
+assert.throws(function() {
+ getListCollectionsCursor({cursor: {batchSize: 2, foo: 1}});
+});
+
+//
+// Test more than 2 batches of results.
+//
+
+assert.commandWorked(mydb.dropDatabase());
+assert.commandWorked(mydb.createCollection("foo"));
+assert.commandWorked(mydb.createCollection("bar"));
+assert.commandWorked(mydb.createCollection("baz"));
+assert.commandWorked(mydb.createCollection("quux"));
+cursor = getListCollectionsCursor({cursor: {batchSize: 0}}, 2);
+assert.eq(0, cursor.objsLeftInBatch());
+assert(cursor.hasNext());
+assert.eq(2, cursor.objsLeftInBatch());
+cursor.next();
+assert(cursor.hasNext());
+assert.eq(1, cursor.objsLeftInBatch());
+cursor.next();
+assert(cursor.hasNext());
+assert.eq(2, cursor.objsLeftInBatch());
+cursor.next();
+assert(cursor.hasNext());
+assert.eq(1, cursor.objsLeftInBatch());
+
+//
+// Test on non-existent database.
+//
+
+assert.commandWorked(mydb.dropDatabase());
+cursor = getListCollectionsCursor();
+assert.eq(0, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo";
+ }));
+
+//
+// Test on empty database.
+//
+
+assert.commandWorked(mydb.dropDatabase());
+assert.commandWorked(mydb.createCollection("foo"));
+mydb.foo.drop();
+cursor = getListCollectionsCursor();
+assert.eq(0, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo";
+ }));
+
+//
+// Test killCursors against a listCollections cursor.
+//
+
+assert.commandWorked(mydb.dropDatabase());
+assert.commandWorked(mydb.createCollection("foo"));
+assert.commandWorked(mydb.createCollection("bar"));
+assert.commandWorked(mydb.createCollection("baz"));
+assert.commandWorked(mydb.createCollection("quux"));
+
+res = mydb.runCommand("listCollections", {cursor: {batchSize: 0}});
+cursor = new DBCommandCursor(mydb, res, 2);
+cursor.close();
+cursor = new DBCommandCursor(mydb, res, 2);
+assert.throws(function() {
+ cursor.hasNext();
+});
+
+//
+// Test parsing of the 'includePendingDrops' flag. If included, its argument must be of
+// 'boolean' type. Functional testing of the 'includePendingDrops' flag is done in
+// "jstests/replsets".
+//
+
+// Bad argument types.
+assert.commandFailedWithCode(mydb.runCommand("listCollections", {includePendingDrops: {}}),
+ ErrorCodes.TypeMismatch);
+assert.commandFailedWithCode(mydb.runCommand("listCollections", {includePendingDrops: "s"}),
+ ErrorCodes.TypeMismatch);
+
+// Valid argument types.
+assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: 1}));
+assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: true}));
+assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: false}));
}());
diff --git a/jstests/core/list_collections_filter.js b/jstests/core/list_collections_filter.js
index fdd1c85429c..e2f93f84ead 100644
--- a/jstests/core/list_collections_filter.js
+++ b/jstests/core/list_collections_filter.js
@@ -1,115 +1,111 @@
// Test SERVER-18622 listCollections should special case filtering by name.
// @tags: [requires_replication]
(function() {
- "use strict";
- var mydb = db.getSiblingDB("list_collections_filter");
- assert.commandWorked(mydb.dropDatabase());
+"use strict";
+var mydb = db.getSiblingDB("list_collections_filter");
+assert.commandWorked(mydb.dropDatabase());
- // Make some collections.
- assert.commandWorked(mydb.createCollection("lists"));
- assert.commandWorked(mydb.createCollection("ordered_sets"));
- assert.commandWorked(mydb.createCollection("unordered_sets"));
- assert.commandWorked(mydb.runCommand({
- applyOps:
- [{op: "c", ns: mydb.getName() + ".$cmd", o: {create: "arrays_temp", temp: true}}]
- }));
+// Make some collections.
+assert.commandWorked(mydb.createCollection("lists"));
+assert.commandWorked(mydb.createCollection("ordered_sets"));
+assert.commandWorked(mydb.createCollection("unordered_sets"));
+assert.commandWorked(mydb.runCommand(
+ {applyOps: [{op: "c", ns: mydb.getName() + ".$cmd", o: {create: "arrays_temp", temp: true}}]}));
- /**
- * Asserts that the names of the collections returned from running the listCollections
- * command with the given filter match the expected names.
- */
- function testListCollections(filter, expectedNames) {
- if (filter === undefined) {
- filter = {};
- }
+/**
+ * Asserts that the names of the collections returned from running the listCollections
+ * command with the given filter match the expected names.
+ */
+function testListCollections(filter, expectedNames) {
+ if (filter === undefined) {
+ filter = {};
+ }
- var cursor =
- new DBCommandCursor(mydb, mydb.runCommand("listCollections", {filter: filter}));
- function stripToName(result) {
- return result.name;
- }
- var cursorResultNames = cursor.toArray().map(stripToName);
+ var cursor = new DBCommandCursor(mydb, mydb.runCommand("listCollections", {filter: filter}));
+ function stripToName(result) {
+ return result.name;
+ }
+ var cursorResultNames = cursor.toArray().map(stripToName);
- assert.eq(cursorResultNames.sort(), expectedNames.sort());
+ assert.eq(cursorResultNames.sort(), expectedNames.sort());
- // Assert the shell helper returns the same list, but in sorted order.
- var shellResultNames = mydb.getCollectionInfos(filter).map(stripToName);
- assert.eq(shellResultNames, expectedNames.sort());
- }
+ // Assert the shell helper returns the same list, but in sorted order.
+ var shellResultNames = mydb.getCollectionInfos(filter).map(stripToName);
+ assert.eq(shellResultNames, expectedNames.sort());
+}
- // No filter.
- testListCollections({}, ["lists", "ordered_sets", "unordered_sets", "arrays_temp"]);
+// No filter.
+testListCollections({}, ["lists", "ordered_sets", "unordered_sets", "arrays_temp"]);
- // Filter without name.
- testListCollections({options: {}}, ["lists", "ordered_sets", "unordered_sets"]);
+// Filter without name.
+testListCollections({options: {}}, ["lists", "ordered_sets", "unordered_sets"]);
- // Filter with exact match on name.
- testListCollections({name: "lists"}, ["lists"]);
- testListCollections({name: "non-existent"}, []);
- testListCollections({name: ""}, []);
- testListCollections({name: 1234}, []);
+// Filter with exact match on name.
+testListCollections({name: "lists"}, ["lists"]);
+testListCollections({name: "non-existent"}, []);
+testListCollections({name: ""}, []);
+testListCollections({name: 1234}, []);
- // Filter with $in.
- testListCollections({name: {$in: ["lists"]}}, ["lists"]);
- testListCollections({name: {$in: []}}, []);
- testListCollections({name: {$in: ["lists", "ordered_sets", "non-existent", "", 1234]}},
- ["lists", "ordered_sets"]);
- // With a regex.
- testListCollections({name: {$in: ["lists", /.*_sets$/, "non-existent", "", 1234]}},
- ["lists", "ordered_sets", "unordered_sets"]);
+// Filter with $in.
+testListCollections({name: {$in: ["lists"]}}, ["lists"]);
+testListCollections({name: {$in: []}}, []);
+testListCollections({name: {$in: ["lists", "ordered_sets", "non-existent", "", 1234]}},
+ ["lists", "ordered_sets"]);
+// With a regex.
+testListCollections({name: {$in: ["lists", /.*_sets$/, "non-existent", "", 1234]}},
+ ["lists", "ordered_sets", "unordered_sets"]);
- // Filter with $and.
- testListCollections({name: "lists", options: {}}, ["lists"]);
- testListCollections({name: "lists", options: {temp: true}}, []);
- testListCollections({$and: [{name: "lists"}, {options: {temp: true}}]}, []);
- testListCollections({name: "arrays_temp", options: {temp: true}}, ["arrays_temp"]);
+// Filter with $and.
+testListCollections({name: "lists", options: {}}, ["lists"]);
+testListCollections({name: "lists", options: {temp: true}}, []);
+testListCollections({$and: [{name: "lists"}, {options: {temp: true}}]}, []);
+testListCollections({name: "arrays_temp", options: {temp: true}}, ["arrays_temp"]);
- // Filter with $and and $in.
- testListCollections({name: {$in: ["lists", /.*_sets$/]}, options: {}},
- ["lists", "ordered_sets", "unordered_sets"]);
- testListCollections({
- $and: [
- {name: {$in: ["lists", /.*_sets$/]}},
- {name: "lists"},
- {options: {}},
- ]
- },
- ["lists"]);
- testListCollections({
- $and: [
- {name: {$in: ["lists", /.*_sets$/]}},
- {name: "non-existent"},
- {options: {}},
- ]
- },
- []);
+// Filter with $and and $in.
+testListCollections({name: {$in: ["lists", /.*_sets$/]}, options: {}},
+ ["lists", "ordered_sets", "unordered_sets"]);
+testListCollections({
+ $and: [
+ {name: {$in: ["lists", /.*_sets$/]}},
+ {name: "lists"},
+ {options: {}},
+ ]
+},
+ ["lists"]);
+testListCollections({
+ $and: [
+ {name: {$in: ["lists", /.*_sets$/]}},
+ {name: "non-existent"},
+ {options: {}},
+ ]
+},
+ []);
- // Filter with $expr.
- testListCollections({$expr: {$eq: ["$name", "lists"]}}, ["lists"]);
+// Filter with $expr.
+testListCollections({$expr: {$eq: ["$name", "lists"]}}, ["lists"]);
- // Filter with $expr with an unbound variable.
- assert.throws(function() {
- mydb.getCollectionInfos({$expr: {$eq: ["$name", "$$unbound"]}});
- });
+// Filter with $expr with an unbound variable.
+assert.throws(function() {
+ mydb.getCollectionInfos({$expr: {$eq: ["$name", "$$unbound"]}});
+});
- // Filter with $expr with a runtime error.
- assert.throws(function() {
- mydb.getCollectionInfos({$expr: {$abs: "$name"}});
- });
+// Filter with $expr with a runtime error.
+assert.throws(function() {
+ mydb.getCollectionInfos({$expr: {$abs: "$name"}});
+});
- // No extensions are allowed in filters.
- assert.throws(function() {
- mydb.getCollectionInfos({$text: {$search: "str"}});
- });
- assert.throws(function() {
- mydb.getCollectionInfos({
- $where: function() {
- return true;
- }
- });
- });
- assert.throws(function() {
- mydb.getCollectionInfos(
- {a: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}});
+// No extensions are allowed in filters.
+assert.throws(function() {
+ mydb.getCollectionInfos({$text: {$search: "str"}});
+});
+assert.throws(function() {
+ mydb.getCollectionInfos({
+ $where: function() {
+ return true;
+ }
});
+});
+assert.throws(function() {
+ mydb.getCollectionInfos({a: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}});
+});
}());
diff --git a/jstests/core/list_collections_name_only.js b/jstests/core/list_collections_name_only.js
index dd50398bcba..9a89fed9e20 100644
--- a/jstests/core/list_collections_name_only.js
+++ b/jstests/core/list_collections_name_only.js
@@ -1,34 +1,33 @@
// Test nameOnly option of listCollections
(function() {
- "use strict";
+"use strict";
- var mydb = db.getSiblingDB("list_collections_nameonly");
- var res;
- var collObj;
+var mydb = db.getSiblingDB("list_collections_nameonly");
+var res;
+var collObj;
- assert.commandWorked(mydb.dropDatabase());
- assert.commandWorked(mydb.createCollection("foo"));
- res = mydb.runCommand({listCollections: 1, nameOnly: true});
- assert.commandWorked(res);
- collObj = res.cursor.firstBatch[0];
- // collObj should only have name and type fields.
- assert.eq('foo', collObj.name);
- assert.eq('collection', collObj.type);
- assert(!collObj.hasOwnProperty("idIndex"), tojson(collObj));
- assert(!collObj.hasOwnProperty("options"), tojson(collObj));
- assert(!collObj.hasOwnProperty("info"), tojson(collObj));
-
- // listCollections for views still works
- assert.commandWorked(mydb.createView("bar", "foo", []));
- res = mydb.runCommand({listCollections: 1, nameOnly: true});
- assert.commandWorked(res);
- print(tojson(res));
- collObj = res.cursor.firstBatch.filter(function(c) {
- return c.name === "bar";
- })[0];
- assert.eq('bar', collObj.name);
- assert.eq('view', collObj.type);
- assert(!collObj.hasOwnProperty("options"), tojson(collObj));
- assert(!collObj.hasOwnProperty("info"), tojson(collObj));
+assert.commandWorked(mydb.dropDatabase());
+assert.commandWorked(mydb.createCollection("foo"));
+res = mydb.runCommand({listCollections: 1, nameOnly: true});
+assert.commandWorked(res);
+collObj = res.cursor.firstBatch[0];
+// collObj should only have name and type fields.
+assert.eq('foo', collObj.name);
+assert.eq('collection', collObj.type);
+assert(!collObj.hasOwnProperty("idIndex"), tojson(collObj));
+assert(!collObj.hasOwnProperty("options"), tojson(collObj));
+assert(!collObj.hasOwnProperty("info"), tojson(collObj));
+// listCollections for views still works
+assert.commandWorked(mydb.createView("bar", "foo", []));
+res = mydb.runCommand({listCollections: 1, nameOnly: true});
+assert.commandWorked(res);
+print(tojson(res));
+collObj = res.cursor.firstBatch.filter(function(c) {
+ return c.name === "bar";
+})[0];
+assert.eq('bar', collObj.name);
+assert.eq('view', collObj.type);
+assert(!collObj.hasOwnProperty("options"), tojson(collObj));
+assert(!collObj.hasOwnProperty("info"), tojson(collObj));
}());
diff --git a/jstests/core/list_collections_no_views.js b/jstests/core/list_collections_no_views.js
index 1b454eb7978..ed2c1b95d02 100644
--- a/jstests/core/list_collections_no_views.js
+++ b/jstests/core/list_collections_no_views.js
@@ -3,132 +3,132 @@
// assumes_superuser_permissions,
// ]
(function() {
- 'use strict';
- let mydb = db.getSiblingDB('list_collections_no_views');
-
- assert.commandWorked(mydb.createCollection('foo'));
- assert.commandWorked(mydb.createView('bar', 'foo', []));
-
- let all = mydb.runCommand({listCollections: 1});
- assert.commandWorked(all);
-
- let allExpected = [
- {
- "name": "bar",
- "type": "view",
- },
- {
- "name": "foo",
- "type": "collection",
- },
- {
- "name": "system.views",
- "type": "collection",
- },
- ];
-
- assert.eq(allExpected,
- all.cursor.firstBatch
- .map(function(c) {
- return {name: c.name, type: c.type};
- })
- .sort(function(c1, c2) {
- if (c1.name > c2.name) {
- return 1;
- }
-
- if (c1.name < c2.name) {
- return -1;
- }
-
- return 0;
- }));
-
- // {type: {$exists: false}} is needed for versions <= 3.2
- let collOnlyCommand = {
- listCollections: 1,
- filter: {$or: [{type: 'collection'}, {type: {$exists: false}}]}
- };
-
- let collOnly = mydb.runCommand(collOnlyCommand);
- assert.commandWorked(collOnly);
-
- let collOnlyExpected = [
- {
- "name": "foo",
- "type": "collection",
- },
- {
- "name": "system.views",
- "type": "collection",
- },
- ];
-
- assert.eq(collOnlyExpected,
- collOnly.cursor.firstBatch
- .map(function(c) {
- return {name: c.name, type: c.type};
- })
- .sort(function(c1, c2) {
- if (c1.name > c2.name) {
- return 1;
- }
-
- if (c1.name < c2.name) {
- return -1;
- }
-
- return 0;
- }));
-
- let viewOnly = mydb.runCommand({listCollections: 1, filter: {type: 'view'}});
- assert.commandWorked(viewOnly);
- let viewOnlyExpected = [{
+'use strict';
+let mydb = db.getSiblingDB('list_collections_no_views');
+
+assert.commandWorked(mydb.createCollection('foo'));
+assert.commandWorked(mydb.createView('bar', 'foo', []));
+
+let all = mydb.runCommand({listCollections: 1});
+assert.commandWorked(all);
+
+let allExpected = [
+ {
"name": "bar",
"type": "view",
- }];
-
- assert.eq(viewOnlyExpected,
- viewOnly.cursor.firstBatch
- .map(function(c) {
- return {name: c.name, type: c.type};
- })
- .sort(function(c1, c2) {
- if (c1.name > c2.name) {
- return 1;
- }
-
- if (c1.name < c2.name) {
- return -1;
- }
-
- return 0;
- }));
-
- let views = mydb.getCollection('system.views');
- views.insertOne({invalid: NumberLong(1000)});
-
- let collOnlyInvalidView = mydb.runCommand(collOnlyCommand);
- assert.eq(collOnlyExpected,
- collOnlyInvalidView.cursor.firstBatch
- .map(function(c) {
- return {name: c.name, type: c.type};
- })
- .sort(function(c1, c2) {
- if (c1.name > c2.name) {
- return 1;
- }
-
- if (c1.name < c2.name) {
- return -1;
- }
-
- return 0;
- }));
-
- assert.commandFailed(mydb.runCommand({listCollections: 1}));
- assert.commandFailed(mydb.runCommand({listCollections: 1, filter: {type: 'view'}}));
-
- // Fix database state for end of test validation and burn-in tests
- mydb.dropDatabase();
+ },
+ {
+ "name": "foo",
+ "type": "collection",
+ },
+ {
+ "name": "system.views",
+ "type": "collection",
+ },
+];
+
+assert.eq(allExpected,
+ all.cursor.firstBatch
+ .map(function(c) {
+ return {name: c.name, type: c.type};
+ })
+ .sort(function(c1, c2) {
+ if (c1.name > c2.name) {
+ return 1;
+ }
+
+ if (c1.name < c2.name) {
+ return -1;
+ }
+
+ return 0;
+ }));
+
+// {type: {$exists: false}} is needed for versions <= 3.2
+let collOnlyCommand = {
+ listCollections: 1,
+ filter: {$or: [{type: 'collection'}, {type: {$exists: false}}]}
+};
+
+let collOnly = mydb.runCommand(collOnlyCommand);
+assert.commandWorked(collOnly);
+
+let collOnlyExpected = [
+ {
+ "name": "foo",
+ "type": "collection",
+ },
+ {
+ "name": "system.views",
+ "type": "collection",
+ },
+];
+
+assert.eq(collOnlyExpected,
+ collOnly.cursor.firstBatch
+ .map(function(c) {
+ return {name: c.name, type: c.type};
+ })
+ .sort(function(c1, c2) {
+ if (c1.name > c2.name) {
+ return 1;
+ }
+
+ if (c1.name < c2.name) {
+ return -1;
+ }
+
+ return 0;
+ }));
+
+let viewOnly = mydb.runCommand({listCollections: 1, filter: {type: 'view'}});
+assert.commandWorked(viewOnly);
+let viewOnlyExpected = [{
+ "name": "bar",
+ "type": "view",
+}];
+
+assert.eq(viewOnlyExpected,
+ viewOnly.cursor.firstBatch
+ .map(function(c) {
+ return {name: c.name, type: c.type};
+ })
+ .sort(function(c1, c2) {
+ if (c1.name > c2.name) {
+ return 1;
+ }
+
+ if (c1.name < c2.name) {
+ return -1;
+ }
+
+ return 0;
+ }));
+
+let views = mydb.getCollection('system.views');
+views.insertOne({invalid: NumberLong(1000)});
+
+let collOnlyInvalidView = mydb.runCommand(collOnlyCommand);
+assert.eq(collOnlyExpected,
+ collOnlyInvalidView.cursor.firstBatch
+ .map(function(c) {
+ return {name: c.name, type: c.type};
+ })
+ .sort(function(c1, c2) {
+ if (c1.name > c2.name) {
+ return 1;
+ }
+
+ if (c1.name < c2.name) {
+ return -1;
+ }
+
+ return 0;
+ }));
+
+assert.commandFailed(mydb.runCommand({listCollections: 1}));
+assert.commandFailed(mydb.runCommand({listCollections: 1, filter: {type: 'view'}}));
+
+// Fix database state for end of test validation and burn-in tests
+mydb.dropDatabase();
})();
diff --git a/jstests/core/list_commands.js b/jstests/core/list_commands.js
index 94e85511a9b..a8ced3cb885 100644
--- a/jstests/core/list_commands.js
+++ b/jstests/core/list_commands.js
@@ -1,27 +1,27 @@
// Test for listCommands.
(function() {
- "use strict";
+"use strict";
- var commands = db.runCommand({listCommands: 1});
- assert.commandWorked(commands);
+var commands = db.runCommand({listCommands: 1});
+assert.commandWorked(commands);
- // Test that result is sorted.
- function isSorted(obj) {
- var previousProperty;
- for (var property in obj["commands"]) {
- if (previousProperty && (previousProperty > property)) {
- return false;
- }
- previousProperty = property;
+// Test that result is sorted.
+function isSorted(obj) {
+ var previousProperty;
+ for (var property in obj["commands"]) {
+ if (previousProperty && (previousProperty > property)) {
+ return false;
}
- return true;
+ previousProperty = property;
}
- assert(isSorted(commands));
+ return true;
+}
+assert(isSorted(commands));
- // Test that result contains basic commands.
- assert(commands.hasOwnProperty("commands"));
- assert(commands["commands"].hasOwnProperty("isMaster"));
- assert(commands["commands"].hasOwnProperty("insert"));
- assert(commands["commands"].hasOwnProperty("ping"));
+// Test that result contains basic commands.
+assert(commands.hasOwnProperty("commands"));
+assert(commands["commands"].hasOwnProperty("isMaster"));
+assert(commands["commands"].hasOwnProperty("insert"));
+assert(commands["commands"].hasOwnProperty("ping"));
})();
diff --git a/jstests/core/list_databases.js b/jstests/core/list_databases.js
index 930e6f36322..2a1db9fc5d1 100644
--- a/jstests/core/list_databases.js
+++ b/jstests/core/list_databases.js
@@ -2,92 +2,91 @@
* Tests for the listDatabases command.
*/
(function() {
- "use strict";
+"use strict";
- // Given the output from the listDatabases command, ensures that the total size reported is the
- // sum of the individual db sizes.
- function verifySizeSum(listDatabasesOut) {
- assert(listDatabasesOut.hasOwnProperty("databases"));
- const dbList = listDatabasesOut.databases;
- let sizeSum = 0;
- for (let i = 0; i < dbList.length; i++) {
- sizeSum += dbList[i].sizeOnDisk;
- }
- assert.eq(sizeSum, listDatabasesOut.totalSize);
+// Given the output from the listDatabases command, ensures that the total size reported is the
+// sum of the individual db sizes.
+function verifySizeSum(listDatabasesOut) {
+ assert(listDatabasesOut.hasOwnProperty("databases"));
+ const dbList = listDatabasesOut.databases;
+ let sizeSum = 0;
+ for (let i = 0; i < dbList.length; i++) {
+ sizeSum += dbList[i].sizeOnDisk;
}
+ assert.eq(sizeSum, listDatabasesOut.totalSize);
+}
- function verifyNameOnly(listDatabasesOut) {
- for (let field in listDatabasesOut) {
- assert(['databases', 'nameOnly', 'ok', 'operationTime', '$clusterTime'].some((f) => f ==
- field),
- 'unexpected field ' + field);
- }
- listDatabasesOut.databases.forEach((database) => {
- for (let field in database) {
- assert.eq(field, "name", "expected name only");
- }
- });
+function verifyNameOnly(listDatabasesOut) {
+ for (let field in listDatabasesOut) {
+ assert(['databases', 'nameOnly', 'ok', 'operationTime', '$clusterTime'].some((f) => f ==
+ field),
+ 'unexpected field ' + field);
}
+ listDatabasesOut.databases.forEach((database) => {
+ for (let field in database) {
+ assert.eq(field, "name", "expected name only");
+ }
+ });
+}
- // Make 4 test databases.
- db.getSiblingDB("jstest_list_databases_foo").coll.insert({});
- db.getSiblingDB("jstest_list_databases_bar").coll.insert({});
- db.getSiblingDB("jstest_list_databases_baz").coll.insert({});
- db.getSiblingDB("jstest_list_databases_zap").coll.insert({});
+// Make 4 test databases.
+db.getSiblingDB("jstest_list_databases_foo").coll.insert({});
+db.getSiblingDB("jstest_list_databases_bar").coll.insert({});
+db.getSiblingDB("jstest_list_databases_baz").coll.insert({});
+db.getSiblingDB("jstest_list_databases_zap").coll.insert({});
- let cmdRes = assert.commandWorked(
- db.adminCommand({listDatabases: 1, filter: {name: /jstest_list_databases/}}));
- assert.eq(4, cmdRes.databases.length);
- verifySizeSum(cmdRes);
+let cmdRes = assert.commandWorked(
+ db.adminCommand({listDatabases: 1, filter: {name: /jstest_list_databases/}}));
+assert.eq(4, cmdRes.databases.length);
+verifySizeSum(cmdRes);
- // Now only list databases starting with a particular prefix.
- cmdRes = assert.commandWorked(
- db.adminCommand({listDatabases: 1, filter: {name: /^jstest_list_databases_ba/}}));
- assert.eq(2, cmdRes.databases.length);
- verifySizeSum(cmdRes);
+// Now only list databases starting with a particular prefix.
+cmdRes = assert.commandWorked(
+ db.adminCommand({listDatabases: 1, filter: {name: /^jstest_list_databases_ba/}}));
+assert.eq(2, cmdRes.databases.length);
+verifySizeSum(cmdRes);
- // Now return only the admin database.
- cmdRes = assert.commandWorked(db.adminCommand({listDatabases: 1, filter: {name: "admin"}}));
- assert.eq(1, cmdRes.databases.length);
- verifySizeSum(cmdRes);
+// Now return only the admin database.
+cmdRes = assert.commandWorked(db.adminCommand({listDatabases: 1, filter: {name: "admin"}}));
+assert.eq(1, cmdRes.databases.length);
+verifySizeSum(cmdRes);
- // Now return only the names.
- cmdRes = assert.commandWorked(db.adminCommand({listDatabases: 1, nameOnly: true}));
- assert.lte(4, cmdRes.databases.length, tojson(cmdRes));
- verifyNameOnly(cmdRes);
+// Now return only the names.
+cmdRes = assert.commandWorked(db.adminCommand({listDatabases: 1, nameOnly: true}));
+assert.lte(4, cmdRes.databases.length, tojson(cmdRes));
+verifyNameOnly(cmdRes);
- // Now return only the name of the zap database.
- cmdRes = assert.commandWorked(
- db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: /zap/}}));
- assert.eq(1, cmdRes.databases.length, tojson(cmdRes));
- verifyNameOnly(cmdRes);
+// Now return only the name of the zap database.
+cmdRes = assert.commandWorked(
+ db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: /zap/}}));
+assert.eq(1, cmdRes.databases.length, tojson(cmdRes));
+verifyNameOnly(cmdRes);
- // $expr in filter.
- cmdRes = assert.commandWorked(db.adminCommand(
- {listDatabases: 1, filter: {$expr: {$eq: ["$name", "jstest_list_databases_zap"]}}}));
- assert.eq(1, cmdRes.databases.length, tojson(cmdRes));
- assert.eq("jstest_list_databases_zap", cmdRes.databases[0].name, tojson(cmdRes));
+// $expr in filter.
+cmdRes = assert.commandWorked(db.adminCommand(
+ {listDatabases: 1, filter: {$expr: {$eq: ["$name", "jstest_list_databases_zap"]}}}));
+assert.eq(1, cmdRes.databases.length, tojson(cmdRes));
+assert.eq("jstest_list_databases_zap", cmdRes.databases[0].name, tojson(cmdRes));
- // $expr with an unbound variable in filter.
- assert.commandFailed(
- db.adminCommand({listDatabases: 1, filter: {$expr: {$eq: ["$name", "$$unbound"]}}}));
+// $expr with an unbound variable in filter.
+assert.commandFailed(
+ db.adminCommand({listDatabases: 1, filter: {$expr: {$eq: ["$name", "$$unbound"]}}}));
- // $expr with a filter that throws at runtime.
- assert.commandFailed(db.adminCommand({listDatabases: 1, filter: {$expr: {$abs: "$name"}}}));
+// $expr with a filter that throws at runtime.
+assert.commandFailed(db.adminCommand({listDatabases: 1, filter: {$expr: {$abs: "$name"}}}));
- // No extensions are allowed in filters.
- assert.commandFailed(db.adminCommand({listDatabases: 1, filter: {$text: {$search: "str"}}}));
- assert.commandFailed(db.adminCommand({
- listDatabases: 1,
- filter: {
- $where: function() {
- return true;
- }
+// No extensions are allowed in filters.
+assert.commandFailed(db.adminCommand({listDatabases: 1, filter: {$text: {$search: "str"}}}));
+assert.commandFailed(db.adminCommand({
+ listDatabases: 1,
+ filter: {
+ $where: function() {
+ return true;
}
- }));
- assert.commandFailed(db.adminCommand({
- listDatabases: 1,
- filter: {a: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}}
- }));
-
+ }
+}));
+assert.commandFailed(db.adminCommand({
+ listDatabases: 1,
+ filter: {a: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}}
+}));
}());
diff --git a/jstests/core/list_indexes.js b/jstests/core/list_indexes.js
index f32ff128dbf..b5c3ab4f8b9 100644
--- a/jstests/core/list_indexes.js
+++ b/jstests/core/list_indexes.js
@@ -7,177 +7,177 @@
load("jstests/libs/fixture_helpers.js");
(function() {
- "use strict";
-
- var coll = db.list_indexes1;
- var cursor;
- var res;
- var specs;
-
- //
- // Test basic command output.
- //
-
- coll.drop();
- assert.commandWorked(coll.getDB().createCollection(coll.getName()));
- res = coll.runCommand("listIndexes");
- assert.commandWorked(res);
- assert.eq("object", typeof(res.cursor));
- assert.eq(0, res.cursor.id);
- assert.eq("string", typeof(res.cursor.ns));
- assert.eq(1, res.cursor.firstBatch.length);
- assert.eq("_id_", res.cursor.firstBatch[0].name);
-
- //
- // Test basic usage with DBCommandCursor.
- //
-
- var getListIndexesCursor = function(coll, options, subsequentBatchSize) {
- return new DBCommandCursor(
- coll.getDB(), coll.runCommand("listIndexes", options), subsequentBatchSize);
- };
-
- var cursorGetIndexSpecs = function(cursor) {
- return cursor.toArray().sort(function(a, b) {
- return a.name > b.name;
- });
- };
-
- var cursorGetIndexNames = function(cursor) {
- return cursorGetIndexSpecs(cursor).map(function(spec) {
- return spec.name;
- });
- };
-
- coll.drop();
- assert.commandWorked(coll.getDB().createCollection(coll.getName()));
- assert.eq(["_id_"], cursorGetIndexNames(getListIndexesCursor(coll)));
-
- //
- // Test that the index metadata object is returned correctly.
- //
-
- coll.drop();
- assert.commandWorked(coll.getDB().createCollection(coll.getName()));
- assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true}));
- specs = cursorGetIndexSpecs(getListIndexesCursor(coll));
- assert.eq(2, specs.length);
- assert.eq("_id_", specs[0].name);
- assert.eq(coll.getFullName(), specs[0].ns);
- assert.eq({_id: 1}, specs[0].key);
- assert(!specs[0].hasOwnProperty("unique"));
- assert.eq("a_1", specs[1].name);
- assert.eq(coll.getFullName(), specs[1].ns);
- assert.eq({a: 1}, specs[1].key);
- assert.eq(true, specs[1].unique);
-
- //
- // Test that the command does not accept invalid values for the collection.
- //
-
- assert.commandFailed(coll.getDB().runCommand({listIndexes: ""}));
- assert.commandFailed(coll.getDB().runCommand({listIndexes: 1}));
- assert.commandFailed(coll.getDB().runCommand({listIndexes: {}}));
- assert.commandFailed(coll.getDB().runCommand({listIndexes: []}));
-
- //
- // Test basic usage of "cursor.batchSize" option.
- //
-
- coll.drop();
- assert.commandWorked(coll.getDB().createCollection(coll.getName()));
- assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true}));
-
- cursor = getListIndexesCursor(coll, {cursor: {batchSize: 2}});
- assert.eq(2, cursor.objsLeftInBatch());
- assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor));
-
- cursor = getListIndexesCursor(coll, {cursor: {batchSize: 1}});
- assert.eq(1, cursor.objsLeftInBatch());
- assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor));
-
- cursor = getListIndexesCursor(coll, {cursor: {batchSize: 0}});
- assert.eq(0, cursor.objsLeftInBatch());
- assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor));
-
- cursor = getListIndexesCursor(coll, {cursor: {batchSize: NumberInt(2)}});
- assert.eq(2, cursor.objsLeftInBatch());
- assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor));
-
- cursor = getListIndexesCursor(coll, {cursor: {batchSize: NumberLong(2)}});
- assert.eq(2, cursor.objsLeftInBatch());
- assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor));
-
- cursor = getListIndexesCursor(coll, {cursor: {batchSize: Math.pow(2, 62)}});
- assert.eq(2, cursor.objsLeftInBatch());
- assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor));
-
- // Ensure that the server accepts an empty object for "cursor". This is equivalent to not
- // specifying "cursor" at all.
- //
- // We do not test for objsLeftInBatch() here, since the default batch size for this command is
- // not specified.
- cursor = getListIndexesCursor(coll, {cursor: {}});
- assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor));
-
- //
- // Test more than 2 batches of results.
- //
-
- coll.drop();
- assert.commandWorked(coll.getDB().createCollection(coll.getName()));
- assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true}));
- assert.commandWorked(coll.ensureIndex({b: 1}, {unique: true}));
- assert.commandWorked(coll.ensureIndex({c: 1}, {unique: true}));
-
- cursor = getListIndexesCursor(coll, {cursor: {batchSize: 0}}, 2);
- assert.eq(0, cursor.objsLeftInBatch());
- assert(cursor.hasNext());
- assert.eq(2, cursor.objsLeftInBatch());
-
- cursor.next();
- assert(cursor.hasNext());
- assert.eq(1, cursor.objsLeftInBatch());
-
- cursor.next();
- assert(cursor.hasNext());
- assert.eq(2, cursor.objsLeftInBatch());
-
- cursor.next();
- assert(cursor.hasNext());
- assert.eq(1, cursor.objsLeftInBatch());
-
- cursor.next();
- assert(!cursor.hasNext());
-
- //
- // Test on collection with no indexes. The local database is not accessible via mongos.
- //
-
- if (!FixtureHelpers.isMongos(db)) {
- let localColl = db.getSiblingDB("local").getCollection("list_indexes1");
- localColl.drop();
- assert.commandWorked(
- localColl.getDB().createCollection(localColl.getName(), {autoIndexId: false}));
- assert.eq([], cursorGetIndexNames(getListIndexesCursor(localColl)));
- localColl.drop();
- }
-
- //
- // Test killCursors against a listCollections cursor.
- //
-
- coll.drop();
- assert.commandWorked(coll.getDB().createCollection(coll.getName()));
- assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true}));
- assert.commandWorked(coll.ensureIndex({b: 1}, {unique: true}));
- assert.commandWorked(coll.ensureIndex({c: 1}, {unique: true}));
-
- res = coll.runCommand("listIndexes", {cursor: {batchSize: 0}});
- cursor = new DBCommandCursor(coll.getDB(), res, 2);
- cursor.close();
- cursor = new DBCommandCursor(coll.getDB(), res, 2);
- assert.throws(function() {
- cursor.hasNext();
+"use strict";
+
+var coll = db.list_indexes1;
+var cursor;
+var res;
+var specs;
+
+//
+// Test basic command output.
+//
+
+coll.drop();
+assert.commandWorked(coll.getDB().createCollection(coll.getName()));
+res = coll.runCommand("listIndexes");
+assert.commandWorked(res);
+assert.eq("object", typeof (res.cursor));
+assert.eq(0, res.cursor.id);
+assert.eq("string", typeof (res.cursor.ns));
+assert.eq(1, res.cursor.firstBatch.length);
+assert.eq("_id_", res.cursor.firstBatch[0].name);
+
+//
+// Test basic usage with DBCommandCursor.
+//
+
+var getListIndexesCursor = function(coll, options, subsequentBatchSize) {
+ return new DBCommandCursor(
+ coll.getDB(), coll.runCommand("listIndexes", options), subsequentBatchSize);
+};
+
+var cursorGetIndexSpecs = function(cursor) {
+ return cursor.toArray().sort(function(a, b) {
+ return a.name > b.name;
});
+};
+
+var cursorGetIndexNames = function(cursor) {
+ return cursorGetIndexSpecs(cursor).map(function(spec) {
+ return spec.name;
+ });
+};
+
+coll.drop();
+assert.commandWorked(coll.getDB().createCollection(coll.getName()));
+assert.eq(["_id_"], cursorGetIndexNames(getListIndexesCursor(coll)));
+
+//
+// Test that the index metadata object is returned correctly.
+//
+
+coll.drop();
+assert.commandWorked(coll.getDB().createCollection(coll.getName()));
+assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true}));
+specs = cursorGetIndexSpecs(getListIndexesCursor(coll));
+assert.eq(2, specs.length);
+assert.eq("_id_", specs[0].name);
+assert.eq(coll.getFullName(), specs[0].ns);
+assert.eq({_id: 1}, specs[0].key);
+assert(!specs[0].hasOwnProperty("unique"));
+assert.eq("a_1", specs[1].name);
+assert.eq(coll.getFullName(), specs[1].ns);
+assert.eq({a: 1}, specs[1].key);
+assert.eq(true, specs[1].unique);
+
+//
+// Test that the command does not accept invalid values for the collection.
+//
+
+assert.commandFailed(coll.getDB().runCommand({listIndexes: ""}));
+assert.commandFailed(coll.getDB().runCommand({listIndexes: 1}));
+assert.commandFailed(coll.getDB().runCommand({listIndexes: {}}));
+assert.commandFailed(coll.getDB().runCommand({listIndexes: []}));
+
+//
+// Test basic usage of "cursor.batchSize" option.
+//
+
+coll.drop();
+assert.commandWorked(coll.getDB().createCollection(coll.getName()));
+assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true}));
+
+cursor = getListIndexesCursor(coll, {cursor: {batchSize: 2}});
+assert.eq(2, cursor.objsLeftInBatch());
+assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor));
+
+cursor = getListIndexesCursor(coll, {cursor: {batchSize: 1}});
+assert.eq(1, cursor.objsLeftInBatch());
+assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor));
+
+cursor = getListIndexesCursor(coll, {cursor: {batchSize: 0}});
+assert.eq(0, cursor.objsLeftInBatch());
+assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor));
+
+cursor = getListIndexesCursor(coll, {cursor: {batchSize: NumberInt(2)}});
+assert.eq(2, cursor.objsLeftInBatch());
+assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor));
+
+cursor = getListIndexesCursor(coll, {cursor: {batchSize: NumberLong(2)}});
+assert.eq(2, cursor.objsLeftInBatch());
+assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor));
+
+cursor = getListIndexesCursor(coll, {cursor: {batchSize: Math.pow(2, 62)}});
+assert.eq(2, cursor.objsLeftInBatch());
+assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor));
+
+// Ensure that the server accepts an empty object for "cursor". This is equivalent to not
+// specifying "cursor" at all.
+//
+// We do not test for objsLeftInBatch() here, since the default batch size for this command is
+// not specified.
+cursor = getListIndexesCursor(coll, {cursor: {}});
+assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor));
+
+//
+// Test more than 2 batches of results.
+//
+
+coll.drop();
+assert.commandWorked(coll.getDB().createCollection(coll.getName()));
+assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true}));
+assert.commandWorked(coll.ensureIndex({b: 1}, {unique: true}));
+assert.commandWorked(coll.ensureIndex({c: 1}, {unique: true}));
+
+cursor = getListIndexesCursor(coll, {cursor: {batchSize: 0}}, 2);
+assert.eq(0, cursor.objsLeftInBatch());
+assert(cursor.hasNext());
+assert.eq(2, cursor.objsLeftInBatch());
+
+cursor.next();
+assert(cursor.hasNext());
+assert.eq(1, cursor.objsLeftInBatch());
+
+cursor.next();
+assert(cursor.hasNext());
+assert.eq(2, cursor.objsLeftInBatch());
+
+cursor.next();
+assert(cursor.hasNext());
+assert.eq(1, cursor.objsLeftInBatch());
+
+cursor.next();
+assert(!cursor.hasNext());
+
+//
+// Test on collection with no indexes. The local database is not accessible via mongos.
+//
+
+if (!FixtureHelpers.isMongos(db)) {
+ let localColl = db.getSiblingDB("local").getCollection("list_indexes1");
+ localColl.drop();
+ assert.commandWorked(
+ localColl.getDB().createCollection(localColl.getName(), {autoIndexId: false}));
+ assert.eq([], cursorGetIndexNames(getListIndexesCursor(localColl)));
+ localColl.drop();
+}
+
+//
+// Test killCursors against a listCollections cursor.
+//
+
+coll.drop();
+assert.commandWorked(coll.getDB().createCollection(coll.getName()));
+assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true}));
+assert.commandWorked(coll.ensureIndex({b: 1}, {unique: true}));
+assert.commandWorked(coll.ensureIndex({c: 1}, {unique: true}));
+
+res = coll.runCommand("listIndexes", {cursor: {batchSize: 0}});
+cursor = new DBCommandCursor(coll.getDB(), res, 2);
+cursor.close();
+cursor = new DBCommandCursor(coll.getDB(), res, 2);
+assert.throws(function() {
+ cursor.hasNext();
+});
}());
diff --git a/jstests/core/list_indexes_invalidation.js b/jstests/core/list_indexes_invalidation.js
index 38a70ce4005..85ab71eec42 100644
--- a/jstests/core/list_indexes_invalidation.js
+++ b/jstests/core/list_indexes_invalidation.js
@@ -3,38 +3,37 @@
// @tags: [assumes_unsharded_collection, requires_non_retryable_commands, requires_fastcount]
(function() {
- 'use strict';
- let collName = 'system_indexes_invalidations';
- let collNameRenamed = 'renamed_collection';
- let coll = db[collName];
- let collRenamed = db[collNameRenamed];
+'use strict';
+let collName = 'system_indexes_invalidations';
+let collNameRenamed = 'renamed_collection';
+let coll = db[collName];
+let collRenamed = db[collNameRenamed];
- function testIndexInvalidation(isRename) {
- coll.drop();
- collRenamed.drop();
- assert.commandWorked(coll.createIndexes([{a: 1}, {b: 1}, {c: 1}]));
+function testIndexInvalidation(isRename) {
+ coll.drop();
+ collRenamed.drop();
+ assert.commandWorked(coll.createIndexes([{a: 1}, {b: 1}, {c: 1}]));
- // Get the first two indexes.
- let cmd = {listIndexes: collName};
- Object.extend(cmd, {batchSize: 2});
- let res = db.runCommand(cmd);
- assert.commandWorked(res, 'could not run ' + tojson(cmd));
- printjson(res);
+ // Get the first two indexes.
+ let cmd = {listIndexes: collName};
+ Object.extend(cmd, {batchSize: 2});
+ let res = db.runCommand(cmd);
+ assert.commandWorked(res, 'could not run ' + tojson(cmd));
+ printjson(res);
- // Ensure the cursor has data, rename or drop the collection, and exhaust the cursor.
- let cursor = new DBCommandCursor(db, res);
- let errMsg =
- 'expected more data from command ' + tojson(cmd) + ', with result ' + tojson(res);
- assert(cursor.hasNext(), errMsg);
- if (isRename) {
- assert.commandWorked(coll.renameCollection(collNameRenamed));
- } else {
- assert(coll.drop());
- }
- assert.gt(cursor.itcount(), 0, errMsg);
+ // Ensure the cursor has data, rename or drop the collection, and exhaust the cursor.
+ let cursor = new DBCommandCursor(db, res);
+ let errMsg = 'expected more data from command ' + tojson(cmd) + ', with result ' + tojson(res);
+ assert(cursor.hasNext(), errMsg);
+ if (isRename) {
+ assert.commandWorked(coll.renameCollection(collNameRenamed));
+ } else {
+ assert(coll.drop());
}
+ assert.gt(cursor.itcount(), 0, errMsg);
+}
- // Test that we invalidate indexes for both collection drops and renames.
- testIndexInvalidation(false);
- testIndexInvalidation(true);
+// Test that we invalidate indexes for both collection drops and renames.
+testIndexInvalidation(false);
+testIndexInvalidation(true);
}());
diff --git a/jstests/core/list_indexes_non_existent_ns.js b/jstests/core/list_indexes_non_existent_ns.js
index 0e134862c3a..11a82746b06 100644
--- a/jstests/core/list_indexes_non_existent_ns.js
+++ b/jstests/core/list_indexes_non_existent_ns.js
@@ -1,18 +1,18 @@
// Test the listIndexes command on non-existent collection.
(function() {
- var dbTest = db.getSiblingDB("list_indexes_non_existent_db");
- assert.commandWorked(dbTest.dropDatabase());
+var dbTest = db.getSiblingDB("list_indexes_non_existent_db");
+assert.commandWorked(dbTest.dropDatabase());
- var coll;
+var coll;
- // Non-existent database
- coll = dbTest.getCollection("list_indexes_non_existent_db");
- assert.commandFailed(coll.runCommand("listIndexes"));
+// Non-existent database
+coll = dbTest.getCollection("list_indexes_non_existent_db");
+assert.commandFailed(coll.runCommand("listIndexes"));
- // Creates the actual database that did not exist till now
- coll.insert({});
+// Creates the actual database that did not exist till now
+coll.insert({});
- // Non-existent collection
- coll = dbTest.getCollection("list_indexes_non_existent_collection");
- assert.commandFailed(coll.runCommand("listIndexes"));
+// Non-existent collection
+coll = dbTest.getCollection("list_indexes_non_existent_collection");
+assert.commandFailed(coll.runCommand("listIndexes"));
}());
diff --git a/jstests/core/list_local_sessions.js b/jstests/core/list_local_sessions.js
index 3943ee66c73..c1ba5799c96 100644
--- a/jstests/core/list_local_sessions.js
+++ b/jstests/core/list_local_sessions.js
@@ -11,73 +11,73 @@
// ]
(function() {
- 'use strict';
+'use strict';
- const admin = db.getSisterDB('admin');
- function listLocalSessions() {
- return admin.aggregate([{'$listLocalSessions': {allUsers: false}}]);
- }
+const admin = db.getSisterDB('admin');
+function listLocalSessions() {
+ return admin.aggregate([{'$listLocalSessions': {allUsers: false}}]);
+}
- // Get current log level.
- let originalLogLevel = assert.commandWorked(admin.setLogLevel(1)).was.verbosity;
+// Get current log level.
+let originalLogLevel = assert.commandWorked(admin.setLogLevel(1)).was.verbosity;
- try {
- // Start a new session and capture its sessionId.
- const myid = assert.commandWorked(db.runCommand({startSession: 1})).id.id;
- assert(myid !== undefined);
+try {
+ // Start a new session and capture its sessionId.
+ const myid = assert.commandWorked(db.runCommand({startSession: 1})).id.id;
+ assert(myid !== undefined);
- // Ensure that the cache now contains the session and is visible.
- const resultArray = assert.doesNotThrow(listLocalSessions).toArray();
- assert.gte(resultArray.length, 1);
- const resultArrayMine = resultArray
- .map(function(sess) {
- return sess._id.id;
- })
- .filter(function(id) {
- return 0 == bsonWoCompare({x: id}, {x: myid});
- });
- assert.eq(resultArrayMine.length, 1);
+ // Ensure that the cache now contains the session and is visible.
+ const resultArray = assert.doesNotThrow(listLocalSessions).toArray();
+ assert.gte(resultArray.length, 1);
+ const resultArrayMine = resultArray
+ .map(function(sess) {
+ return sess._id.id;
+ })
+ .filter(function(id) {
+ return 0 == bsonWoCompare({x: id}, {x: myid});
+ });
+ assert.eq(resultArrayMine.length, 1);
- // Try asking for the session by username.
- const myusername = (function() {
- if (0 == bsonWoCompare({x: resultArray[0]._id.uid}, {x: computeSHA256Block("")})) {
- // Code for "we're running in no-auth mode"
- return {user: "", db: ""};
- }
- const connstats = assert.commandWorked(db.runCommand({connectionStatus: 1}));
- const authUsers = connstats.authInfo.authenticatedUsers;
- assert(authUsers !== undefined);
- assert.eq(authUsers.length, 1);
- assert(authUsers[0].user !== undefined);
- assert(authUsers[0].db !== undefined);
- return {user: authUsers[0].user, db: authUsers[0].db};
- })();
+ // Try asking for the session by username.
+ const myusername = (function() {
+ if (0 == bsonWoCompare({x: resultArray[0]._id.uid}, {x: computeSHA256Block("")})) {
+ // Code for "we're running in no-auth mode"
+ return {user: "", db: ""};
+ }
+ const connstats = assert.commandWorked(db.runCommand({connectionStatus: 1}));
+ const authUsers = connstats.authInfo.authenticatedUsers;
+ assert(authUsers !== undefined);
+ assert.eq(authUsers.length, 1);
+ assert(authUsers[0].user !== undefined);
+ assert(authUsers[0].db !== undefined);
+ return {user: authUsers[0].user, db: authUsers[0].db};
+ })();
- const listMyLocalSessions = function() {
- return admin.aggregate([{'$listLocalSessions': {users: [myusername]}}]);
- };
+ const listMyLocalSessions = function() {
+ return admin.aggregate([{'$listLocalSessions': {users: [myusername]}}]);
+ };
- const myArray = assert.doesNotThrow(listMyLocalSessions)
- .toArray()
- .map(function(sess) {
- return sess._id.id;
- })
- .filter(function(id) {
- return 0 == bsonWoCompare({x: id}, {x: myid});
- });
- assert.eq(myArray.length, 1);
+ const myArray = assert.doesNotThrow(listMyLocalSessions)
+ .toArray()
+ .map(function(sess) {
+ return sess._id.id;
+ })
+ .filter(function(id) {
+ return 0 == bsonWoCompare({x: id}, {x: myid});
+ });
+ assert.eq(myArray.length, 1);
- print("sessions returned from $listLocalSessions filtered by user: [ " + myArray +
- " ]");
- print("sessions returned from un-filtered $listLocalSessions for this user: [ " +
- resultArrayMine + " ]");
+ print("sessions returned from $listLocalSessions filtered by user: [ " + myArray +
+ " ]");
+ print("sessions returned from un-filtered $listLocalSessions for this user: [ " +
+ resultArrayMine + " ]");
- assert.eq(
- 0,
- bsonWoCompare(myArray, resultArrayMine),
- "set of listed sessions for user contains different sessions from prior $listLocalSessions run");
+ assert.eq(
+ 0,
+ bsonWoCompare(myArray, resultArrayMine),
+ "set of listed sessions for user contains different sessions from prior $listLocalSessions run");
- } finally {
- admin.setLogLevel(originalLogLevel);
- }
+} finally {
+ admin.setLogLevel(originalLogLevel);
+}
})();
diff --git a/jstests/core/list_namespaces_invalidation.js b/jstests/core/list_namespaces_invalidation.js
index ebd5dd82542..4bfbdffd4e6 100644
--- a/jstests/core/list_namespaces_invalidation.js
+++ b/jstests/core/list_namespaces_invalidation.js
@@ -1,71 +1,70 @@
// @tags: [requires_non_retryable_commands, requires_fastcount]
(function() {
- 'use strict';
- let dbInvalidName = 'system_namespaces_invalidations';
- let dbInvalid = db.getSiblingDB(dbInvalidName);
- let num_collections = 3;
- let DROP = 1;
- let RENAME = 2;
- let MOVE = 3;
- function testNamespaceInvalidation(namespaceAction, batchSize) {
- dbInvalid.dropDatabase();
+'use strict';
+let dbInvalidName = 'system_namespaces_invalidations';
+let dbInvalid = db.getSiblingDB(dbInvalidName);
+let num_collections = 3;
+let DROP = 1;
+let RENAME = 2;
+let MOVE = 3;
+function testNamespaceInvalidation(namespaceAction, batchSize) {
+ dbInvalid.dropDatabase();
- // Create enough collections to necessitate multiple cursor batches.
- for (let i = 0; i < num_collections; i++) {
- assert.commandWorked(dbInvalid.createCollection('coll' + i.toString()));
- }
+ // Create enough collections to necessitate multiple cursor batches.
+ for (let i = 0; i < num_collections; i++) {
+ assert.commandWorked(dbInvalid.createCollection('coll' + i.toString()));
+ }
- // Get the first two namespaces using listCollections.
- let cmd = {listCollections: dbInvalidName};
- Object.extend(cmd, {batchSize: batchSize});
- let res = dbInvalid.runCommand(cmd);
- assert.commandWorked(res, 'could not run ' + tojson(cmd));
- printjson(res);
+ // Get the first two namespaces using listCollections.
+ let cmd = {listCollections: dbInvalidName};
+ Object.extend(cmd, {batchSize: batchSize});
+ let res = dbInvalid.runCommand(cmd);
+ assert.commandWorked(res, 'could not run ' + tojson(cmd));
+ printjson(res);
- // Ensure the cursor has data, invalidate the namespace, and exhaust the cursor.
- let cursor = new DBCommandCursor(dbInvalid, res);
- let errMsg =
- 'expected more data from command ' + tojson(cmd) + ', with result ' + tojson(res);
- assert(cursor.hasNext(), errMsg);
- if (namespaceAction == RENAME) {
- // Rename the collection to something that does not fit in the previously allocated
- // memory for the record.
- assert.commandWorked(
- dbInvalid['coll1'].renameCollection('coll1' +
- 'lkdsahflaksjdhfsdkljhfskladhfkahfsakfla' +
- 'skfjhaslfaslfkhasklfjhsakljhdsjksahkldjslh'));
- } else if (namespaceAction == DROP) {
- assert(dbInvalid['coll1'].drop());
- } else if (namespaceAction == MOVE) {
- let modCmd = {
- collMod: 'coll1',
- validator: {
- $or: [
- {phone: {$type: "string"}},
- {email: {$regex: /@mongodb\.com$/}},
- {status: {$in: ["Unknown", "Incomplete"]}},
- {address: {$type: "string"}},
- {ssn: {$type: "string"}},
- {favoriteBook: {$type: "string"}},
- {favoriteColor: {$type: "string"}},
- {favoriteBeverage: {$type: "string"}},
- {favoriteDay: {$type: "string"}},
- {favoriteFood: {$type: "string"}},
- {favoriteSport: {$type: "string"}},
- {favoriteMovie: {$type: "string"}},
- {favoriteShow: {$type: "string"}}
- ]
- }
- };
- assert.commandWorked(dbInvalid.runCommand(modCmd));
- }
- assert.gt(cursor.itcount(), 0, errMsg);
- }
- // Test that we invalidate the old namespace record ID when we remove, rename, or move a
- // namespace record.
- for (let j = 2; j < 7; j++) {
- testNamespaceInvalidation(DROP, j);
- testNamespaceInvalidation(RENAME, j);
- testNamespaceInvalidation(MOVE, j);
+ // Ensure the cursor has data, invalidate the namespace, and exhaust the cursor.
+ let cursor = new DBCommandCursor(dbInvalid, res);
+ let errMsg = 'expected more data from command ' + tojson(cmd) + ', with result ' + tojson(res);
+ assert(cursor.hasNext(), errMsg);
+ if (namespaceAction == RENAME) {
+ // Rename the collection to something that does not fit in the previously allocated
+ // memory for the record.
+ assert.commandWorked(
+ dbInvalid['coll1'].renameCollection('coll1' +
+ 'lkdsahflaksjdhfsdkljhfskladhfkahfsakfla' +
+ 'skfjhaslfaslfkhasklfjhsakljhdsjksahkldjslh'));
+ } else if (namespaceAction == DROP) {
+ assert(dbInvalid['coll1'].drop());
+ } else if (namespaceAction == MOVE) {
+ let modCmd = {
+ collMod: 'coll1',
+ validator: {
+ $or: [
+ {phone: {$type: "string"}},
+ {email: {$regex: /@mongodb\.com$/}},
+ {status: {$in: ["Unknown", "Incomplete"]}},
+ {address: {$type: "string"}},
+ {ssn: {$type: "string"}},
+ {favoriteBook: {$type: "string"}},
+ {favoriteColor: {$type: "string"}},
+ {favoriteBeverage: {$type: "string"}},
+ {favoriteDay: {$type: "string"}},
+ {favoriteFood: {$type: "string"}},
+ {favoriteSport: {$type: "string"}},
+ {favoriteMovie: {$type: "string"}},
+ {favoriteShow: {$type: "string"}}
+ ]
+ }
+ };
+ assert.commandWorked(dbInvalid.runCommand(modCmd));
}
+ assert.gt(cursor.itcount(), 0, errMsg);
+}
+// Test that we invalidate the old namespace record ID when we remove, rename, or move a
+// namespace record.
+for (let j = 2; j < 7; j++) {
+ testNamespaceInvalidation(DROP, j);
+ testNamespaceInvalidation(RENAME, j);
+ testNamespaceInvalidation(MOVE, j);
+}
}());
diff --git a/jstests/core/list_sessions.js b/jstests/core/list_sessions.js
index 9b04d3c1aa5..65345ca7c01 100644
--- a/jstests/core/list_sessions.js
+++ b/jstests/core/list_sessions.js
@@ -8,65 +8,65 @@
// Basic tests for the $listSessions aggregation stage.
(function() {
- 'use strict';
- load('jstests/aggregation/extras/utils.js');
+'use strict';
+load('jstests/aggregation/extras/utils.js');
- const admin = db.getSiblingDB('admin');
- const config = db.getSiblingDB('config');
- const pipeline = [{'$listSessions': {}}];
- function listSessions() {
- return config.system.sessions.aggregate(pipeline);
- }
+const admin = db.getSiblingDB('admin');
+const config = db.getSiblingDB('config');
+const pipeline = [{'$listSessions': {}}];
+function listSessions() {
+ return config.system.sessions.aggregate(pipeline);
+}
- // Start a new session and capture its sessionId.
- const myid = assert.commandWorked(admin.runCommand({startSession: 1})).id.id;
- assert(myid !== undefined);
+// Start a new session and capture its sessionId.
+const myid = assert.commandWorked(admin.runCommand({startSession: 1})).id.id;
+assert(myid !== undefined);
- // Sync cache to collection and ensure it arrived.
- assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
- var resultArrayMine;
- assert.soon(function() {
- const resultArray = listSessions().toArray();
- if (resultArray.length < 1) {
- return false;
- }
- resultArrayMine = resultArray
- .map(function(sess) {
- return sess._id;
- })
- .filter(function(id) {
- return 0 == bsonWoCompare({x: id.id}, {x: myid});
- });
- return resultArrayMine.length == 1;
- }, "Failed to locate session in collection");
+// Sync cache to collection and ensure it arrived.
+assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
+var resultArrayMine;
+assert.soon(function() {
+ const resultArray = listSessions().toArray();
+ if (resultArray.length < 1) {
+ return false;
+ }
+ resultArrayMine = resultArray
+ .map(function(sess) {
+ return sess._id;
+ })
+ .filter(function(id) {
+ return 0 == bsonWoCompare({x: id.id}, {x: myid});
+ });
+ return resultArrayMine.length == 1;
+}, "Failed to locate session in collection");
- // Try asking for the session by username.
- const myusername = (function() {
- if (0 == bsonWoCompare({x: resultArrayMine[0].uid}, {x: computeSHA256Block("")})) {
- // Code for "we're running in no-auth mode"
- return {user: "", db: ""};
- }
- const connstats = assert.commandWorked(db.runCommand({connectionStatus: 1}));
- const authUsers = connstats.authInfo.authenticatedUsers;
- assert(authUsers !== undefined);
- assert.eq(authUsers.length, 1);
- assert(authUsers[0].user !== undefined);
- assert(authUsers[0].db !== undefined);
- return {user: authUsers[0].user, db: authUsers[0].db};
- })();
- function listMySessions() {
- return config.system.sessions.aggregate([{'$listSessions': {users: [myusername]}}]);
+// Try asking for the session by username.
+const myusername = (function() {
+ if (0 == bsonWoCompare({x: resultArrayMine[0].uid}, {x: computeSHA256Block("")})) {
+ // Code for "we're running in no-auth mode"
+ return {user: "", db: ""};
}
- const myArray = listMySessions()
- .toArray()
- .map(function(sess) {
- return sess._id;
- })
- .filter(function(id) {
- return 0 == bsonWoCompare({x: id.id}, {x: myid});
- });
- assert.eq(0, bsonWoCompare(myArray, resultArrayMine));
+ const connstats = assert.commandWorked(db.runCommand({connectionStatus: 1}));
+ const authUsers = connstats.authInfo.authenticatedUsers;
+ assert(authUsers !== undefined);
+ assert.eq(authUsers.length, 1);
+ assert(authUsers[0].user !== undefined);
+ assert(authUsers[0].db !== undefined);
+ return {user: authUsers[0].user, db: authUsers[0].db};
+})();
+function listMySessions() {
+ return config.system.sessions.aggregate([{'$listSessions': {users: [myusername]}}]);
+}
+const myArray = listMySessions()
+ .toArray()
+ .map(function(sess) {
+ return sess._id;
+ })
+ .filter(function(id) {
+ return 0 == bsonWoCompare({x: id.id}, {x: myid});
+ });
+assert.eq(0, bsonWoCompare(myArray, resultArrayMine));
- // Make sure pipelining other collections fail.
- assertErrorCode(admin.system.collections, pipeline, ErrorCodes.InvalidNamespace);
+// Make sure pipelining other collections fail.
+assertErrorCode(admin.system.collections, pipeline, ErrorCodes.InvalidNamespace);
})();
diff --git a/jstests/core/long_index_rename.js b/jstests/core/long_index_rename.js
index 06361a10cde..5bcdaf902e3 100644
--- a/jstests/core/long_index_rename.js
+++ b/jstests/core/long_index_rename.js
@@ -3,28 +3,28 @@
// @tags: [requires_non_retryable_commands, assumes_unsharded_collection]
(function() {
- 'use strict';
+'use strict';
- const coll = db.long_index_rename;
- coll.drop();
+const coll = db.long_index_rename;
+coll.drop();
- for (let i = 1; i < 10; i++) {
- coll.save({a: i});
- }
+for (let i = 1; i < 10; i++) {
+ coll.save({a: i});
+}
- // Compute maximum index name length for this collection under FCV 4.0.
- const maxNsLength = 127;
- const maxIndexNameLength = maxNsLength - (coll.getFullName() + ".$").length;
- jsTestLog('Max index name length under FCV 4.0 = ' + maxIndexNameLength);
+// Compute maximum index name length for this collection under FCV 4.0.
+const maxNsLength = 127;
+const maxIndexNameLength = maxNsLength - (coll.getFullName() + ".$").length;
+jsTestLog('Max index name length under FCV 4.0 = ' + maxIndexNameLength);
- // Create an index with the longest name allowed for this collection.
- assert.commandWorked(coll.createIndex({a: 1}, {name: 'a'.repeat(maxIndexNameLength)}));
+// Create an index with the longest name allowed for this collection.
+assert.commandWorked(coll.createIndex({a: 1}, {name: 'a'.repeat(maxIndexNameLength)}));
- // Beginning with 4.2, index namespaces longer than 127 characters are acceptable.
- assert.commandWorked(coll.createIndex({b: 1}, {name: 'b'.repeat(maxIndexNameLength) + 1}));
+// Beginning with 4.2, index namespaces longer than 127 characters are acceptable.
+assert.commandWorked(coll.createIndex({b: 1}, {name: 'b'.repeat(maxIndexNameLength) + 1}));
- // Before 4.2, index namespace lengths were checked while renaming collections.
- const dest = db.long_index_rename2;
- dest.drop();
- assert.commandWorked(coll.renameCollection(dest.getName()));
+// Before 4.2, index namespace lengths were checked while renaming collections.
+const dest = db.long_index_rename2;
+dest.drop();
+assert.commandWorked(coll.renameCollection(dest.getName()));
})();
diff --git a/jstests/core/max_doc_size.js b/jstests/core/max_doc_size.js
index 775121a5c9f..859896c17f6 100644
--- a/jstests/core/max_doc_size.js
+++ b/jstests/core/max_doc_size.js
@@ -7,66 +7,66 @@
* - Documents over the maximum BSON size limit cannot be written.
*/
(function() {
- 'use strict';
+'use strict';
- const maxBsonObjectSize = db.isMaster().maxBsonObjectSize;
- const docOverhead = Object.bsonsize({_id: new ObjectId(), x: ''});
- const maxStrSize = maxBsonObjectSize - docOverhead;
- const maxStr = 'a'.repeat(maxStrSize);
- const coll = db.max_doc_size;
+const maxBsonObjectSize = db.isMaster().maxBsonObjectSize;
+const docOverhead = Object.bsonsize({_id: new ObjectId(), x: ''});
+const maxStrSize = maxBsonObjectSize - docOverhead;
+const maxStr = 'a'.repeat(maxStrSize);
+const coll = db.max_doc_size;
- //
- // Test that documents at the size limit can be written and read back.
- //
- coll.drop();
- assert.commandWorked(
- db.runCommand({insert: coll.getName(), documents: [{_id: new ObjectId(), x: maxStr}]}));
- assert.eq(coll.find({}).itcount(), 1);
+//
+// Test that documents at the size limit can be written and read back.
+//
+coll.drop();
+assert.commandWorked(
+ db.runCommand({insert: coll.getName(), documents: [{_id: new ObjectId(), x: maxStr}]}));
+assert.eq(coll.find({}).itcount(), 1);
- coll.drop();
- const objectId = new ObjectId();
- assert.commandWorked(db.runCommand({
- update: coll.getName(),
- ordered: true,
- updates: [{q: {_id: objectId}, u: {_id: objectId, x: maxStr}, upsert: true}]
- }));
- assert.eq(coll.find({}).itcount(), 1);
+coll.drop();
+const objectId = new ObjectId();
+assert.commandWorked(db.runCommand({
+ update: coll.getName(),
+ ordered: true,
+ updates: [{q: {_id: objectId}, u: {_id: objectId, x: maxStr}, upsert: true}]
+}));
+assert.eq(coll.find({}).itcount(), 1);
- coll.drop();
+coll.drop();
- assert.commandWorked(coll.insert({_id: objectId}));
- assert.commandWorked(db.runCommand({
- update: coll.getName(),
- ordered: true,
- updates: [{q: {_id: objectId}, u: {$set: {x: maxStr}}}]
- }));
- assert.eq(coll.find({}).itcount(), 1);
+assert.commandWorked(coll.insert({_id: objectId}));
+assert.commandWorked(db.runCommand({
+ update: coll.getName(),
+ ordered: true,
+ updates: [{q: {_id: objectId}, u: {$set: {x: maxStr}}}]
+}));
+assert.eq(coll.find({}).itcount(), 1);
- //
- // Test that documents over the size limit cannot be written.
- //
- const largerThanMaxString = maxStr + 'a';
+//
+// Test that documents over the size limit cannot be written.
+//
+const largerThanMaxString = maxStr + 'a';
- coll.drop();
- assert.commandFailedWithCode(
- db.runCommand(
- {insert: coll.getName(), documents: [{_id: new ObjectId(), x: largerThanMaxString}]}),
- 2);
+coll.drop();
+assert.commandFailedWithCode(
+ db.runCommand(
+ {insert: coll.getName(), documents: [{_id: new ObjectId(), x: largerThanMaxString}]}),
+ 2);
- coll.drop();
- assert.commandFailedWithCode(db.runCommand({
- update: coll.getName(),
- ordered: true,
- updates: [{q: {_id: objectId}, u: {_id: objectId, x: largerThanMaxString}, upsert: true}]
- }),
- 17420);
+coll.drop();
+assert.commandFailedWithCode(db.runCommand({
+ update: coll.getName(),
+ ordered: true,
+ updates: [{q: {_id: objectId}, u: {_id: objectId, x: largerThanMaxString}, upsert: true}]
+}),
+ 17420);
- coll.drop();
- assert.commandWorked(coll.insert({_id: objectId}));
- assert.commandFailedWithCode(db.runCommand({
- update: coll.getName(),
- ordered: true,
- updates: [{q: {_id: objectId}, u: {$set: {x: largerThanMaxString}}}]
- }),
- 17419);
+coll.drop();
+assert.commandWorked(coll.insert({_id: objectId}));
+assert.commandFailedWithCode(db.runCommand({
+ update: coll.getName(),
+ ordered: true,
+ updates: [{q: {_id: objectId}, u: {$set: {x: largerThanMaxString}}}]
+}),
+ 17419);
})();
diff --git a/jstests/core/max_time_ms.js b/jstests/core/max_time_ms.js
index 8a539cc5493..470d281b080 100644
--- a/jstests/core/max_time_ms.js
+++ b/jstests/core/max_time_ms.js
@@ -230,14 +230,14 @@ assert.eq(1, t.getDB().runCommand({ping: 1, maxTimeMS: NumberInt(0)}).ok);
assert.eq(1, t.getDB().runCommand({ping: 1, maxTimeMS: NumberLong(0)}).ok);
assert.throws.automsg(function() {
- t.find().maxTimeMS(-1).itcount();
-});
+ t.find().maxTimeMS(-1).itcount();
+ });
assert.throws.automsg(function() {
- t.find().maxTimeMS(NumberInt(-1)).itcount();
-});
+ t.find().maxTimeMS(NumberInt(-1)).itcount();
+ });
assert.throws.automsg(function() {
- t.find().maxTimeMS(NumberLong(-1)).itcount();
-});
+ t.find().maxTimeMS(NumberLong(-1)).itcount();
+ });
assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: -1}).ok);
assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: NumberInt(-1)}).ok);
assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: NumberLong(-1)}).ok);
@@ -260,37 +260,37 @@ assert.eq(1, t.getDB().runCommand({ping: 1, maxTimeMS: NumberInt(maxValue)}).ok)
assert.eq(1, t.getDB().runCommand({ping: 1, maxTimeMS: NumberLong(maxValue)}).ok);
assert.throws.automsg(function() {
- t.find().maxTimeMS(maxValue + 1).itcount();
-});
+ t.find().maxTimeMS(maxValue + 1).itcount();
+ });
assert.throws.automsg(function() {
- t.find().maxTimeMS(NumberInt(maxValue + 1)).itcount();
-});
+ t.find().maxTimeMS(NumberInt(maxValue + 1)).itcount();
+ });
assert.throws.automsg(function() {
- t.find().maxTimeMS(NumberLong(maxValue + 1)).itcount();
-});
+ t.find().maxTimeMS(NumberLong(maxValue + 1)).itcount();
+ });
assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: maxValue + 1}).ok);
assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: NumberInt(maxValue + 1)}).ok);
assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: NumberLong(maxValue + 1)}).ok);
// Verify invalid values are rejected.
assert.throws.automsg(function() {
- t.find().maxTimeMS(0.1).itcount();
-});
+ t.find().maxTimeMS(0.1).itcount();
+ });
assert.throws.automsg(function() {
- t.find().maxTimeMS(-0.1).itcount();
-});
+ t.find().maxTimeMS(-0.1).itcount();
+ });
assert.throws.automsg(function() {
- t.find().maxTimeMS().itcount();
-});
+ t.find().maxTimeMS().itcount();
+ });
assert.throws.automsg(function() {
- t.find().maxTimeMS("").itcount();
-});
+ t.find().maxTimeMS("").itcount();
+ });
assert.throws.automsg(function() {
- t.find().maxTimeMS(true).itcount();
-});
+ t.find().maxTimeMS(true).itcount();
+ });
assert.throws.automsg(function() {
- t.find().maxTimeMS({}).itcount();
-});
+ t.find().maxTimeMS({}).itcount();
+ });
assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: 0.1}).ok);
assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: -0.1}).ok);
assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: undefined}).ok);
@@ -323,8 +323,8 @@ assert.eq(
1, t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}).ok);
res = t.getDB().runCommand({ping: 1, maxTimeMS: 10 * 1000});
assert(res.ok == 0 && res.code == ErrorCodes.MaxTimeMSExpired,
- "expected command to trigger maxTimeAlwaysTimeOut fail point, ok=" + res.ok + ", code=" +
- res.code);
+ "expected command to trigger maxTimeAlwaysTimeOut fail point, ok=" + res.ok +
+ ", code=" + res.code);
assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}).ok);
// maxTimeNeverTimeOut positive test for command.
@@ -333,8 +333,8 @@ assert.eq(1,
t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}).ok);
res = t.getDB().adminCommand({sleep: 1, millis: 300, maxTimeMS: 100});
assert(res.ok == 1,
- "expected command to trigger maxTimeNeverTimeOut fail point, ok=" + res.ok + ", code=" +
- res.code);
+ "expected command to trigger maxTimeNeverTimeOut fail point, ok=" + res.ok +
+ ", code=" + res.code);
assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}).ok);
// maxTimeAlwaysTimeOut positive test for query.
diff --git a/jstests/core/min_max_bounds.js b/jstests/core/min_max_bounds.js
index 41e20157985..c2171df23fb 100644
--- a/jstests/core/min_max_bounds.js
+++ b/jstests/core/min_max_bounds.js
@@ -3,74 +3,74 @@
* @tags: [assumes_balancer_off]
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/libs/fixture_helpers.js'); // For FixtureHelpers.
- load('jstests/aggregation/extras/utils.js'); // For resultsEq.
+load('jstests/libs/fixture_helpers.js'); // For FixtureHelpers.
+load('jstests/aggregation/extras/utils.js'); // For resultsEq.
- var coll = db.query_bound_inclusion;
- coll.drop();
- assert.writeOK(coll.insert({a: 1, b: 1}));
- assert.writeOK(coll.insert({a: 2, b: 2}));
- assert.writeOK(coll.insert({a: 3, b: 3}));
+var coll = db.query_bound_inclusion;
+coll.drop();
+assert.writeOK(coll.insert({a: 1, b: 1}));
+assert.writeOK(coll.insert({a: 2, b: 2}));
+assert.writeOK(coll.insert({a: 3, b: 3}));
- assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({a: 1}));
- var res = coll.find().sort({a: 1}).toArray();
- assert.eq(res.length, 3);
+var res = coll.find().sort({a: 1}).toArray();
+assert.eq(res.length, 3);
+assert.eq(res[0].a, 1);
+assert.eq(res[1].a, 2);
+assert.eq(res[2].a, 3);
+
+res = coll.find().sort({a: -1}).toArray();
+assert.eq(res.length, 3);
+assert.eq(res[0].a, 3);
+assert.eq(res[1].a, 2);
+assert.eq(res[2].a, 1);
+
+res = coll.find().min({a: 1}).max({a: 3}).hint({a: 1}).toArray();
+assert.eq(res.length, 2);
+if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) {
assert.eq(res[0].a, 1);
assert.eq(res[1].a, 2);
- assert.eq(res[2].a, 3);
+} else {
+ // With more than one shard, we cannot assume the results will come back in order, since we
+ // did not request a sort.
+ assert(resultsEq(res.map((result) => result.a), [1, 2]));
+}
- res = coll.find().sort({a: -1}).toArray();
- assert.eq(res.length, 3);
- assert.eq(res[0].a, 3);
- assert.eq(res[1].a, 2);
- assert.eq(res[2].a, 1);
+res = coll.find().min({a: 1}).max({a: 3}).sort({a: -1}).hint({a: 1}).toArray();
+assert.eq(res.length, 2);
+assert.eq(res[0].a, 2);
+assert.eq(res[1].a, 1);
- res = coll.find().min({a: 1}).max({a: 3}).hint({a: 1}).toArray();
- assert.eq(res.length, 2);
- if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) {
- assert.eq(res[0].a, 1);
- assert.eq(res[1].a, 2);
- } else {
- // With more than one shard, we cannot assume the results will come back in order, since we
- // did not request a sort.
- assert(resultsEq(res.map((result) => result.a), [1, 2]));
- }
+assert.commandWorked(coll.createIndex({b: -1}));
- res = coll.find().min({a: 1}).max({a: 3}).sort({a: -1}).hint({a: 1}).toArray();
- assert.eq(res.length, 2);
- assert.eq(res[0].a, 2);
- assert.eq(res[1].a, 1);
+res = coll.find().sort({b: -1}).toArray();
+assert.eq(res.length, 3);
+assert.eq(res[0].b, 3);
+assert.eq(res[1].b, 2);
+assert.eq(res[2].b, 1);
- assert.commandWorked(coll.createIndex({b: -1}));
+res = coll.find().sort({b: 1}).toArray();
+assert.eq(res.length, 3);
+assert.eq(res[0].b, 1);
+assert.eq(res[1].b, 2);
+assert.eq(res[2].b, 3);
- res = coll.find().sort({b: -1}).toArray();
- assert.eq(res.length, 3);
+res = coll.find().min({b: 3}).max({b: 1}).hint({b: -1}).toArray();
+assert.eq(res.length, 2);
+if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) {
assert.eq(res[0].b, 3);
assert.eq(res[1].b, 2);
- assert.eq(res[2].b, 1);
-
- res = coll.find().sort({b: 1}).toArray();
- assert.eq(res.length, 3);
- assert.eq(res[0].b, 1);
- assert.eq(res[1].b, 2);
- assert.eq(res[2].b, 3);
-
- res = coll.find().min({b: 3}).max({b: 1}).hint({b: -1}).toArray();
- assert.eq(res.length, 2);
- if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) {
- assert.eq(res[0].b, 3);
- assert.eq(res[1].b, 2);
- } else {
- // With more than one shard, we cannot assume the results will come back in order, since we
- // did not request a sort.
- assert(resultsEq(res.map((result) => result.b), [3, 2]));
- }
+} else {
+ // With more than one shard, we cannot assume the results will come back in order, since we
+ // did not request a sort.
+ assert(resultsEq(res.map((result) => result.b), [3, 2]));
+}
- res = coll.find().min({b: 3}).max({b: 1}).sort({b: 1}).hint({b: -1}).toArray();
- assert.eq(res.length, 2);
- assert.eq(res[0].b, 2);
- assert.eq(res[1].b, 3);
+res = coll.find().min({b: 3}).max({b: 1}).sort({b: 1}).hint({b: -1}).toArray();
+assert.eq(res.length, 2);
+assert.eq(res[0].b, 2);
+assert.eq(res[1].b, 3);
})();
diff --git a/jstests/core/min_max_hashed_index.js b/jstests/core/min_max_hashed_index.js
index aacd1987f44..511f5a9ae62 100644
--- a/jstests/core/min_max_hashed_index.js
+++ b/jstests/core/min_max_hashed_index.js
@@ -2,17 +2,17 @@
* Check that min() and max() work with a hashed index.
*/
(function() {
- "use strict";
+"use strict";
- const coll = db.min_max_hashed_index;
- coll.drop();
- assert.commandWorked(coll.insert({a: "test"}));
- assert.commandWorked(coll.createIndex({a: 1}));
- const minWithNormalIndex = coll.find({}, {_id: 0}).min({a: -Infinity}).hint({a: 1}).toArray();
- assert.eq(minWithNormalIndex, [{a: "test"}]);
+const coll = db.min_max_hashed_index;
+coll.drop();
+assert.commandWorked(coll.insert({a: "test"}));
+assert.commandWorked(coll.createIndex({a: 1}));
+const minWithNormalIndex = coll.find({}, {_id: 0}).min({a: -Infinity}).hint({a: 1}).toArray();
+assert.eq(minWithNormalIndex, [{a: "test"}]);
- assert.commandWorked(coll.createIndex({a: "hashed"}));
- const minWithHashedIndex =
- coll.find({}, {_id: 0}).min({a: -Infinity}).hint({a: "hashed"}).toArray();
- assert.eq(minWithHashedIndex, [{a: "test"}]);
+assert.commandWorked(coll.createIndex({a: "hashed"}));
+const minWithHashedIndex =
+ coll.find({}, {_id: 0}).min({a: -Infinity}).hint({a: "hashed"}).toArray();
+assert.eq(minWithHashedIndex, [{a: "test"}]);
})();
diff --git a/jstests/core/min_max_key.js b/jstests/core/min_max_key.js
index d65d68292fa..e14a7ba4fda 100644
--- a/jstests/core/min_max_key.js
+++ b/jstests/core/min_max_key.js
@@ -1,98 +1,98 @@
// Tests the behavior of queries using MinKey and MaxKey
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For 'resultsEq'.
+load("jstests/aggregation/extras/utils.js"); // For 'resultsEq'.
- const coll = db.test_min_max;
- coll.drop();
+const coll = db.test_min_max;
+coll.drop();
- const allElements = [
+const allElements = [
+ {_id: "a_max_key", a: MaxKey},
+ {_id: "a_min_key", a: MinKey},
+ {_id: "a_null", a: null},
+ {_id: "a_number", a: 4},
+ {_id: "a_subobject", a: {b: "hi"}},
+ {_id: "a_undefined", a: undefined},
+ {_id: "a_string", a: "hello"}
+];
+
+assert.writeOK(coll.insert(allElements));
+
+function testQueriesWithMinOrMaxKey() {
+ const eqMinRes = coll.find({a: {$eq: MinKey}}).toArray();
+ const expectedEqMin = [{_id: "a_min_key", a: MinKey}];
+ assert(resultsEq(expectedEqMin, eqMinRes), tojson(eqMinRes));
+
+ const gtMinRes = coll.find({a: {$gt: MinKey}}).toArray();
+ const expectedGtMin = [
{_id: "a_max_key", a: MaxKey},
- {_id: "a_min_key", a: MinKey},
{_id: "a_null", a: null},
{_id: "a_number", a: 4},
{_id: "a_subobject", a: {b: "hi"}},
{_id: "a_undefined", a: undefined},
{_id: "a_string", a: "hello"}
];
+ assert(resultsEq(expectedGtMin, gtMinRes), tojson(gtMinRes));
+
+ const gteMinRes = coll.find({a: {$gte: MinKey}}).toArray();
+ assert(resultsEq(allElements, gteMinRes), tojson(gteMinRes));
+
+ const ltMinRes = coll.find({a: {$lt: MinKey}}).toArray();
+ assert(resultsEq([], ltMinRes), tojson(ltMinRes));
+
+ const lteMinRes = coll.find({a: {$lte: MinKey}}).toArray();
+ assert(resultsEq(expectedEqMin, lteMinRes), tojson(lteMinRes));
+
+ const eqMaxRes = coll.find({a: {$eq: MaxKey}}).toArray();
+ const expectedEqMax = [{_id: "a_max_key", a: MaxKey}];
+ assert(resultsEq(expectedEqMax, eqMaxRes), tojson(eqMaxRes));
+
+ const gtMaxRes = coll.find({a: {$gt: MaxKey}}).toArray();
+ assert(resultsEq([], gtMaxRes), tojson(gtMaxRes));
- assert.writeOK(coll.insert(allElements));
-
- function testQueriesWithMinOrMaxKey() {
- const eqMinRes = coll.find({a: {$eq: MinKey}}).toArray();
- const expectedEqMin = [{_id: "a_min_key", a: MinKey}];
- assert(resultsEq(expectedEqMin, eqMinRes), tojson(eqMinRes));
-
- const gtMinRes = coll.find({a: {$gt: MinKey}}).toArray();
- const expectedGtMin = [
- {_id: "a_max_key", a: MaxKey},
- {_id: "a_null", a: null},
- {_id: "a_number", a: 4},
- {_id: "a_subobject", a: {b: "hi"}},
- {_id: "a_undefined", a: undefined},
- {_id: "a_string", a: "hello"}
- ];
- assert(resultsEq(expectedGtMin, gtMinRes), tojson(gtMinRes));
-
- const gteMinRes = coll.find({a: {$gte: MinKey}}).toArray();
- assert(resultsEq(allElements, gteMinRes), tojson(gteMinRes));
-
- const ltMinRes = coll.find({a: {$lt: MinKey}}).toArray();
- assert(resultsEq([], ltMinRes), tojson(ltMinRes));
-
- const lteMinRes = coll.find({a: {$lte: MinKey}}).toArray();
- assert(resultsEq(expectedEqMin, lteMinRes), tojson(lteMinRes));
-
- const eqMaxRes = coll.find({a: {$eq: MaxKey}}).toArray();
- const expectedEqMax = [{_id: "a_max_key", a: MaxKey}];
- assert(resultsEq(expectedEqMax, eqMaxRes), tojson(eqMaxRes));
-
- const gtMaxRes = coll.find({a: {$gt: MaxKey}}).toArray();
- assert(resultsEq([], gtMaxRes), tojson(gtMaxRes));
-
- const gteMaxRes = coll.find({a: {$gte: MaxKey}}).toArray();
- assert(resultsEq(expectedEqMax, gteMaxRes), tojson(gteMaxRes));
-
- const ltMaxRes = coll.find({a: {$lt: MaxKey}}).toArray();
- const expectedLtMax = [
- {_id: "a_min_key", a: MinKey},
- {_id: "a_null", a: null},
- {_id: "a_number", a: 4},
- {_id: "a_subobject", a: {b: "hi"}},
- {_id: "a_undefined", a: undefined},
- {_id: "a_string", a: "hello"}
- ];
- assert(resultsEq(expectedLtMax, ltMaxRes), tojson(ltMaxRes));
-
- const lteMaxRes = coll.find({a: {$lte: MaxKey}}).toArray();
- assert(resultsEq(allElements, lteMaxRes), tojson(lteMaxRes));
- }
-
- function testTypeBracketedQueries() {
- // Queries that do not involve MinKey or MaxKey follow type bracketing and thus do not
- // return MinKey or MaxKey as results. These queries are being run to test this
- // functionality.
- const numRes = coll.find({a: {$gt: 3}}).toArray();
- const expectedNum = [{_id: "a_number", a: 4}];
- assert(resultsEq(expectedNum, numRes), tojson(numRes));
- const noNum = coll.find({a: {$lt: 3}}).toArray();
- assert(resultsEq([], noNum), tojson(noNum));
-
- const stringRes = coll.find({a: {$gt: "best"}}).toArray();
- const expectedString = [{_id: "a_string", a: "hello"}];
- assert(resultsEq(expectedString, stringRes), tojson(stringRes));
- }
-
- testQueriesWithMinOrMaxKey();
- testTypeBracketedQueries();
-
- assert.commandWorked(coll.createIndex({a: 1}));
- // TODO: SERVER-35921 The results of the queries above should not change based on the
- // presence of an index
- assert.commandWorked(coll.dropIndexes());
-
- testQueriesWithMinOrMaxKey();
- testTypeBracketedQueries();
+ const gteMaxRes = coll.find({a: {$gte: MaxKey}}).toArray();
+ assert(resultsEq(expectedEqMax, gteMaxRes), tojson(gteMaxRes));
+
+ const ltMaxRes = coll.find({a: {$lt: MaxKey}}).toArray();
+ const expectedLtMax = [
+ {_id: "a_min_key", a: MinKey},
+ {_id: "a_null", a: null},
+ {_id: "a_number", a: 4},
+ {_id: "a_subobject", a: {b: "hi"}},
+ {_id: "a_undefined", a: undefined},
+ {_id: "a_string", a: "hello"}
+ ];
+ assert(resultsEq(expectedLtMax, ltMaxRes), tojson(ltMaxRes));
+
+ const lteMaxRes = coll.find({a: {$lte: MaxKey}}).toArray();
+ assert(resultsEq(allElements, lteMaxRes), tojson(lteMaxRes));
+}
+
+function testTypeBracketedQueries() {
+ // Queries that do not involve MinKey or MaxKey follow type bracketing and thus do not
+ // return MinKey or MaxKey as results. These queries are being run to test this
+ // functionality.
+ const numRes = coll.find({a: {$gt: 3}}).toArray();
+ const expectedNum = [{_id: "a_number", a: 4}];
+ assert(resultsEq(expectedNum, numRes), tojson(numRes));
+ const noNum = coll.find({a: {$lt: 3}}).toArray();
+ assert(resultsEq([], noNum), tojson(noNum));
+
+ const stringRes = coll.find({a: {$gt: "best"}}).toArray();
+ const expectedString = [{_id: "a_string", a: "hello"}];
+ assert(resultsEq(expectedString, stringRes), tojson(stringRes));
+}
+
+testQueriesWithMinOrMaxKey();
+testTypeBracketedQueries();
+
+assert.commandWorked(coll.createIndex({a: 1}));
+// TODO: SERVER-35921 The results of the queries above should not change based on the
+// presence of an index
+assert.commandWorked(coll.dropIndexes());
+
+testQueriesWithMinOrMaxKey();
+testTypeBracketedQueries();
}());
diff --git a/jstests/core/minmax.js b/jstests/core/minmax.js
index 1387d0adb4e..1a32fe9d059 100644
--- a/jstests/core/minmax.js
+++ b/jstests/core/minmax.js
@@ -1,159 +1,153 @@
// Test min / max query parameters.
// @tags: [assumes_balancer_off]
(function() {
- "use strict";
-
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- load("jstests/aggregation/extras/utils.js"); // For resultsEq.
-
- const coll = db.jstests_minmax;
- coll.drop();
-
- function addData() {
- assert.commandWorked(coll.save({a: 1, b: 1}));
- assert.commandWorked(coll.save({a: 1, b: 2}));
- assert.commandWorked(coll.save({a: 2, b: 1}));
- assert.commandWorked(coll.save({a: 2, b: 2}));
- }
-
- assert.commandWorked(coll.ensureIndex({a: 1, b: 1}));
- addData();
-
- assert.eq(1,
- coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).max({a: 2, b: 1}).toArray().length);
- assert.eq(
- 2, coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).max({a: 2, b: 1.5}).toArray().length);
- assert.eq(2,
- coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).max({a: 2, b: 2}).toArray().length);
-
- // Single bound.
- assert.eq(3, coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).toArray().length);
- assert.eq(3, coll.find().hint({a: 1, b: 1}).max({a: 2, b: 1.5}).toArray().length);
- assert.eq(3,
- coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).hint({a: 1, b: 1}).toArray().length);
- assert.eq(
- 3, coll.find().hint({a: 1, b: 1}).max({a: 2, b: 1.5}).hint({a: 1, b: 1}).toArray().length);
-
- coll.drop();
- assert.commandWorked(coll.ensureIndex({a: 1, b: -1}));
- addData();
- assert.eq(4, coll.find().hint({a: 1, b: -1}).min({a: 1, b: 2}).toArray().length);
- assert.eq(4, coll.find().hint({a: 1, b: -1}).max({a: 2, b: 0.5}).toArray().length);
- assert.eq(1, coll.find().hint({a: 1, b: -1}).min({a: 2, b: 1}).toArray().length);
- assert.eq(1, coll.find().hint({a: 1, b: -1}).max({a: 1, b: 1.5}).toArray().length);
- assert.eq(
- 4, coll.find().hint({a: 1, b: -1}).min({a: 1, b: 2}).hint({a: 1, b: -1}).toArray().length);
- assert.eq(
- 4,
- coll.find().hint({a: 1, b: -1}).max({a: 2, b: 0.5}).hint({a: 1, b: -1}).toArray().length);
- assert.eq(
- 1, coll.find().hint({a: 1, b: -1}).min({a: 2, b: 1}).hint({a: 1, b: -1}).toArray().length);
- assert.eq(
- 1,
- coll.find().hint({a: 1, b: -1}).max({a: 1, b: 1.5}).hint({a: 1, b: -1}).toArray().length);
-
- // Check that min/max requires a hint.
- let error = assert.throws(() => coll.find().min({a: 1, b: 2}).max({a: 2, b: 1}).toArray());
- assert.eq(error.code, 51173);
-
- // Hint doesn't match.
- error = assert.throws(function() {
- coll.find().min({a: 1}).hint({a: 1, b: -1}).toArray();
- });
- assert.eq(error.code, 51174, error);
-
- error = assert.throws(function() {
- coll.find().min({a: 1, b: 1}).max({a: 1}).hint({a: 1, b: -1}).toArray();
- });
- assert.eq(error.code, 51176, error);
-
- error = assert.throws(function() {
- coll.find().min({b: 1}).max({a: 1, b: 2}).hint({a: 1, b: -1}).toArray();
- });
- assert.eq(error.code, 51176, error);
-
- // No query solutions.
- error = assert.throws(function() {
- coll.find().min({a: 1}).hint({$natural: 1}).toArray();
- });
- assert.eq(error.code, ErrorCodes.BadValue, error);
-
- error = assert.throws(function() {
- coll.find().max({a: 1}).hint({$natural: 1}).toArray();
- });
- assert.eq(error.code, ErrorCodes.BadValue);
-
- coll.drop();
- assert.commandWorked(coll.ensureIndex({a: 1}));
- for (let i = 0; i < 10; ++i) {
- assert.commandWorked(coll.save({_id: i, a: i}));
- }
-
- // Reverse direction scan of the a:1 index between a:6 (inclusive) and a:3 (exclusive) is
- // expected to fail, as max must be > min.
- error = assert.throws(function() {
- coll.find().hint({a: 1}).min({a: 6}).max({a: 3}).sort({a: -1}).toArray();
- });
- assert.eq(error.code, 51175);
-
- // A find with identical min and max values is expected to fail, as max is exclusive.
- error = assert.throws(function() {
- coll.find().hint({a: 1}).min({a: 2}).max({a: 2}).toArray();
- });
- assert.eq(error.code, 51175);
-
- error = assert.throws(function() {
- coll.find().hint({a: 1}).min({a: 2}).max({a: 2}).sort({a: -1}).toArray();
- });
- assert.eq(error.code, 51175);
-
- coll.drop();
- addData();
- assert.commandWorked(coll.ensureIndex({a: 1, b: 1}));
-
- error = assert.throws(function() {
- coll.find().min({a: 1, b: 2}).max({a: 1, b: 2}).hint({a: 1, b: 1}).toArray();
- });
- assert.eq(error.code, 51175);
-
- // Test ascending index.
- coll.drop();
- assert.commandWorked(coll.ensureIndex({a: 1}));
- assert.commandWorked(coll.insert({a: 3}));
- assert.commandWorked(coll.insert({a: 4}));
- assert.commandWorked(coll.insert({a: 5}));
-
- let cursor = coll.find().hint({a: 1}).min({a: 4});
- if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) {
- assert.eq(4, cursor.next().a);
- assert.eq(5, cursor.next().a);
- } else {
- // With more than one shard, we cannot assume the results will come back in order, since we
- // did not request a sort.
- assert(resultsEq([cursor.next().a, cursor.next().a], [4, 5]));
- }
- assert(!cursor.hasNext());
-
- cursor = coll.find().hint({a: 1}).max({a: 4});
- assert.eq(3, cursor.next()["a"]);
- assert(!cursor.hasNext());
-
- // Test descending index.
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.ensureIndex({a: -1}));
-
- cursor = coll.find().hint({a: -1}).min({a: 4});
- if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) {
- assert.eq(4, cursor.next().a);
- assert.eq(3, cursor.next().a);
- } else {
- // With more than one shard, we cannot assume the results will come back in order, since we
- // did not request a sort.
- assert(resultsEq([cursor.next().a, cursor.next().a], [4, 3]));
- }
- assert(!cursor.hasNext());
-
- cursor = coll.find().hint({a: -1}).max({a: 4});
- assert.eq(5, cursor.next()["a"]);
- assert(!cursor.hasNext());
+"use strict";
+
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/aggregation/extras/utils.js"); // For resultsEq.
+
+const coll = db.jstests_minmax;
+coll.drop();
+
+function addData() {
+ assert.commandWorked(coll.save({a: 1, b: 1}));
+ assert.commandWorked(coll.save({a: 1, b: 2}));
+ assert.commandWorked(coll.save({a: 2, b: 1}));
+ assert.commandWorked(coll.save({a: 2, b: 2}));
+}
+
+assert.commandWorked(coll.ensureIndex({a: 1, b: 1}));
+addData();
+
+assert.eq(1, coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).max({a: 2, b: 1}).toArray().length);
+assert.eq(2, coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).max({a: 2, b: 1.5}).toArray().length);
+assert.eq(2, coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).max({a: 2, b: 2}).toArray().length);
+
+// Single bound.
+assert.eq(3, coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).toArray().length);
+assert.eq(3, coll.find().hint({a: 1, b: 1}).max({a: 2, b: 1.5}).toArray().length);
+assert.eq(3, coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).hint({a: 1, b: 1}).toArray().length);
+assert.eq(3,
+ coll.find().hint({a: 1, b: 1}).max({a: 2, b: 1.5}).hint({a: 1, b: 1}).toArray().length);
+
+coll.drop();
+assert.commandWorked(coll.ensureIndex({a: 1, b: -1}));
+addData();
+assert.eq(4, coll.find().hint({a: 1, b: -1}).min({a: 1, b: 2}).toArray().length);
+assert.eq(4, coll.find().hint({a: 1, b: -1}).max({a: 2, b: 0.5}).toArray().length);
+assert.eq(1, coll.find().hint({a: 1, b: -1}).min({a: 2, b: 1}).toArray().length);
+assert.eq(1, coll.find().hint({a: 1, b: -1}).max({a: 1, b: 1.5}).toArray().length);
+assert.eq(4,
+ coll.find().hint({a: 1, b: -1}).min({a: 1, b: 2}).hint({a: 1, b: -1}).toArray().length);
+assert.eq(4,
+ coll.find().hint({a: 1, b: -1}).max({a: 2, b: 0.5}).hint({a: 1, b: -1}).toArray().length);
+assert.eq(1,
+ coll.find().hint({a: 1, b: -1}).min({a: 2, b: 1}).hint({a: 1, b: -1}).toArray().length);
+assert.eq(1,
+ coll.find().hint({a: 1, b: -1}).max({a: 1, b: 1.5}).hint({a: 1, b: -1}).toArray().length);
+
+// Check that min/max requires a hint.
+let error = assert.throws(() => coll.find().min({a: 1, b: 2}).max({a: 2, b: 1}).toArray());
+assert.eq(error.code, 51173);
+
+// Hint doesn't match.
+error = assert.throws(function() {
+ coll.find().min({a: 1}).hint({a: 1, b: -1}).toArray();
+});
+assert.eq(error.code, 51174, error);
+
+error = assert.throws(function() {
+ coll.find().min({a: 1, b: 1}).max({a: 1}).hint({a: 1, b: -1}).toArray();
+});
+assert.eq(error.code, 51176, error);
+
+error = assert.throws(function() {
+ coll.find().min({b: 1}).max({a: 1, b: 2}).hint({a: 1, b: -1}).toArray();
+});
+assert.eq(error.code, 51176, error);
+
+// No query solutions.
+error = assert.throws(function() {
+ coll.find().min({a: 1}).hint({$natural: 1}).toArray();
+});
+assert.eq(error.code, ErrorCodes.BadValue, error);
+
+error = assert.throws(function() {
+ coll.find().max({a: 1}).hint({$natural: 1}).toArray();
+});
+assert.eq(error.code, ErrorCodes.BadValue);
+
+coll.drop();
+assert.commandWorked(coll.ensureIndex({a: 1}));
+for (let i = 0; i < 10; ++i) {
+ assert.commandWorked(coll.save({_id: i, a: i}));
+}
+
+// Reverse direction scan of the a:1 index between a:6 (inclusive) and a:3 (exclusive) is
+// expected to fail, as max must be > min.
+error = assert.throws(function() {
+ coll.find().hint({a: 1}).min({a: 6}).max({a: 3}).sort({a: -1}).toArray();
+});
+assert.eq(error.code, 51175);
+
+// A find with identical min and max values is expected to fail, as max is exclusive.
+error = assert.throws(function() {
+ coll.find().hint({a: 1}).min({a: 2}).max({a: 2}).toArray();
+});
+assert.eq(error.code, 51175);
+
+error = assert.throws(function() {
+ coll.find().hint({a: 1}).min({a: 2}).max({a: 2}).sort({a: -1}).toArray();
+});
+assert.eq(error.code, 51175);
+
+coll.drop();
+addData();
+assert.commandWorked(coll.ensureIndex({a: 1, b: 1}));
+
+error = assert.throws(function() {
+ coll.find().min({a: 1, b: 2}).max({a: 1, b: 2}).hint({a: 1, b: 1}).toArray();
+});
+assert.eq(error.code, 51175);
+
+// Test ascending index.
+coll.drop();
+assert.commandWorked(coll.ensureIndex({a: 1}));
+assert.commandWorked(coll.insert({a: 3}));
+assert.commandWorked(coll.insert({a: 4}));
+assert.commandWorked(coll.insert({a: 5}));
+
+let cursor = coll.find().hint({a: 1}).min({a: 4});
+if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) {
+ assert.eq(4, cursor.next().a);
+ assert.eq(5, cursor.next().a);
+} else {
+ // With more than one shard, we cannot assume the results will come back in order, since we
+ // did not request a sort.
+ assert(resultsEq([cursor.next().a, cursor.next().a], [4, 5]));
+}
+assert(!cursor.hasNext());
+
+cursor = coll.find().hint({a: 1}).max({a: 4});
+assert.eq(3, cursor.next()["a"]);
+assert(!cursor.hasNext());
+
+// Test descending index.
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(coll.ensureIndex({a: -1}));
+
+cursor = coll.find().hint({a: -1}).min({a: 4});
+if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) {
+ assert.eq(4, cursor.next().a);
+ assert.eq(3, cursor.next().a);
+} else {
+ // With more than one shard, we cannot assume the results will come back in order, since we
+ // did not request a sort.
+ assert(resultsEq([cursor.next().a, cursor.next().a], [4, 3]));
+}
+assert(!cursor.hasNext());
+
+cursor = coll.find().hint({a: -1}).max({a: 4});
+assert.eq(5, cursor.next()["a"]);
+assert(!cursor.hasNext());
}());
diff --git a/jstests/core/minmax_edge.js b/jstests/core/minmax_edge.js
index cf75edaef80..081af7a347e 100644
--- a/jstests/core/minmax_edge.js
+++ b/jstests/core/minmax_edge.js
@@ -3,230 +3,242 @@
* Other edge cases are covered by C++ unit tests.
*/
(function() {
- const t = db.minmax_edge;
-
- /*
- * Function to verify that the results of a query match the expected results.
- * Results is the cursor toArray, expectedIds is a list of _ids
- */
- function verifyResultIds(results, expectedIds) {
- // check they are the same length
- assert.eq(results.length, expectedIds.length);
-
- function compare(a, b) {
- if (a._id < b._id)
- return -1;
- if (a._id > b._id)
- return 1;
- return 0;
- }
-
- results.sort(compare);
- expectedIds.sort();
-
- for (var i = 0; i < results.length; i++) {
- assert.eq(results._id, expectedIds._ids);
- }
- }
+const t = db.minmax_edge;
- /*
- * Shortcut to drop the collection and insert these 3 test docs. Used to change the indices
- * regardless of any previous indices.
- */
- function reset(t) {
- t.drop();
- assert.writeOK(t.insert({_id: 0, a: 1, b: 1}));
- assert.writeOK(t.insert({_id: 1, a: 1, b: 2}));
- assert.writeOK(t.insert({_id: 2, a: 1, b: 3}));
-
- assert.writeOK(t.insert({_id: 3, a: 2, b: 1}));
- assert.writeOK(t.insert({_id: 4, a: 2, b: 2}));
- assert.writeOK(t.insert({_id: 5, a: 2, b: 3}));
-
- assert.writeOK(t.insert({_id: 6, a: 3, b: 1}));
- assert.writeOK(t.insert({_id: 7, a: 3, b: 2}));
- assert.writeOK(t.insert({_id: 8, a: 3, b: 3}));
+/*
+ * Function to verify that the results of a query match the expected results.
+ * Results is the cursor toArray, expectedIds is a list of _ids
+ */
+function verifyResultIds(results, expectedIds) {
+ // check they are the same length
+ assert.eq(results.length, expectedIds.length);
+
+ function compare(a, b) {
+ if (a._id < b._id)
+ return -1;
+ if (a._id > b._id)
+ return 1;
+ return 0;
}
- // Two helpers to save typing
- function verifyMin(minDoc, idx, expectedIds) {
- verifyResultIds(t.find().min(minDoc).hint(idx).toArray(), expectedIds);
- }
+ results.sort(compare);
+ expectedIds.sort();
- function verifyMax(minDoc, idx, expectedIds) {
- verifyResultIds(t.find().max(minDoc).hint(idx).toArray(), expectedIds);
+ for (var i = 0; i < results.length; i++) {
+ assert.eq(results._id, expectedIds._ids);
}
+}
- // Basic ascending index.
- reset(t);
- let indexSpec = {a: 1};
- assert.commandWorked(t.createIndex(indexSpec));
-
- verifyMin({a: Infinity}, indexSpec, []);
- verifyMax({a: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-
- verifyMin({a: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
- verifyMax({a: -Infinity}, indexSpec, []);
-
- // NaN < all ints.
- verifyMin({a: NaN}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
- verifyMax({a: NaN}, indexSpec, []);
-
- // {a: 1} > all ints.
- verifyMin({a: {a: 1}}, indexSpec, []);
- verifyMax({a: {a: 1}}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-
- // 'a' > all ints.
- verifyMin({a: 'a'}, indexSpec, []);
- verifyMax({a: 'a'}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-
- // Now with a compound index.
- reset(t);
- indexSpec = {a: 1, b: -1};
-
- assert.commandWorked(t.createIndex(indexSpec));
-
- // Same as single-key index assertions, with b field present.
- verifyMin({a: NaN, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
- verifyMax({a: NaN, b: 1}, indexSpec, []);
-
- verifyMin({a: Infinity, b: 1}, indexSpec, []);
- verifyMax({a: Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-
- verifyMin({a: -Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
- verifyMax({a: -Infinity, b: 1}, indexSpec, []);
-
- verifyMin({a: {a: 1}, b: 1}, indexSpec, []);
- verifyMax({a: {a: 1}, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-
- verifyMin({a: 'a', b: 1}, indexSpec, []);
- verifyMax({a: 'a', b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-
- // Edge cases on b values
- verifyMin({a: 1, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
- verifyMin({a: 2, b: Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]);
- verifyMin({a: 3, b: Infinity}, indexSpec, [6, 7, 8]);
- verifyMax({a: 1, b: Infinity}, indexSpec, []);
- verifyMax({a: 2, b: Infinity}, indexSpec, [0, 1, 2]);
- verifyMax({a: 3, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]);
-
- verifyMin({a: 1, b: -Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]);
- verifyMin({a: 2, b: -Infinity}, indexSpec, [6, 7, 8]);
- verifyMin({a: 3, b: -Infinity}, indexSpec, []);
- verifyMax({a: 1, b: -Infinity}, indexSpec, [0, 1, 2]);
- verifyMax({a: 2, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]);
- verifyMax({a: 3, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-
- verifyMin({a: 2, b: NaN}, indexSpec, [6, 7, 8]);
- verifyMax({a: 2, b: NaN}, indexSpec, [0, 1, 2, 3, 4, 5]);
-
- verifyMin({a: 2, b: {b: 1}}, indexSpec, [3, 4, 5, 6, 7, 8]);
- verifyMax({a: 2, b: {b: 1}}, indexSpec, [0, 1, 2]);
-
- verifyMin({a: 2, b: 'b'}, indexSpec, [3, 4, 5, 6, 7, 8]);
- verifyMax({a: 2, b: 'b'}, indexSpec, [0, 1, 2]);
-
- // Test descending index.
- reset(t);
- indexSpec = {a: -1};
- assert.commandWorked(t.createIndex(indexSpec));
-
- verifyMin({a: NaN}, indexSpec, []);
- verifyMax({a: NaN}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-
- verifyMin({a: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
- verifyMax({a: Infinity}, indexSpec, []);
-
- verifyMin({a: -Infinity}, indexSpec, []);
- verifyMax({a: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-
- verifyMin({a: {a: 1}}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
- verifyMax({a: {a: 1}}, indexSpec, []);
-
- verifyMin({a: 'a'}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
- verifyMax({a: 'a'}, indexSpec, []);
-
- // Now with a compound index.
- reset(t);
- indexSpec = {a: -1, b: -1};
- assert.commandWorked(t.createIndex(indexSpec));
-
- // Same as single-key index assertions, with b field present.
- verifyMin({a: NaN, b: 1}, indexSpec, []);
- verifyMax({a: NaN, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-
- verifyMin({a: Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
- verifyMax({a: Infinity, b: 1}, indexSpec, []);
-
- verifyMin({a: -Infinity, b: 1}, indexSpec, []);
- verifyMax({a: -Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-
- verifyMin({a: {a: 1}, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
- verifyMax({a: {a: 1}, b: 1}, indexSpec, []);
-
- verifyMin({a: 'a', b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
- verifyMax({a: 'a', b: 1}, indexSpec, []);
-
- // Edge cases on b values.
- verifyMin({a: 1, b: Infinity}, indexSpec, [0, 1, 2]);
- verifyMin({a: 2, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]);
- verifyMin({a: 3, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
- verifyMax({a: 1, b: Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]);
- verifyMax({a: 2, b: Infinity}, indexSpec, [6, 7, 8]);
- verifyMax({a: 3, b: Infinity}, indexSpec, []);
-
- verifyMin({a: 1, b: -Infinity}, indexSpec, []);
- verifyMin({a: 2, b: -Infinity}, indexSpec, [0, 1, 2]);
- verifyMin({a: 3, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]);
- verifyMax({a: 1, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
- verifyMax({a: 2, b: -Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]);
- verifyMax({a: 3, b: -Infinity}, indexSpec, [6, 7, 8]);
+/*
+ * Shortcut to drop the collection and insert these 3 test docs. Used to change the indices
+ * regardless of any previous indices.
+ */
+function reset(t) {
+ t.drop();
+ assert.writeOK(t.insert({_id: 0, a: 1, b: 1}));
+ assert.writeOK(t.insert({_id: 1, a: 1, b: 2}));
+ assert.writeOK(t.insert({_id: 2, a: 1, b: 3}));
+
+ assert.writeOK(t.insert({_id: 3, a: 2, b: 1}));
+ assert.writeOK(t.insert({_id: 4, a: 2, b: 2}));
+ assert.writeOK(t.insert({_id: 5, a: 2, b: 3}));
+
+ assert.writeOK(t.insert({_id: 6, a: 3, b: 1}));
+ assert.writeOK(t.insert({_id: 7, a: 3, b: 2}));
+ assert.writeOK(t.insert({_id: 8, a: 3, b: 3}));
+}
- verifyMin({a: 2, b: NaN}, indexSpec, [0, 1, 2]);
- verifyMax({a: 2, b: NaN}, indexSpec, [3, 4, 5, 6, 7, 8]);
+// Two helpers to save typing
+function verifyMin(minDoc, idx, expectedIds) {
+ verifyResultIds(t.find().min(minDoc).hint(idx).toArray(), expectedIds);
+}
- verifyMin({a: 2, b: {b: 1}}, indexSpec, [3, 4, 5, 6, 7, 8]);
- verifyMax({a: 2, b: {b: 1}}, indexSpec, [0, 1, 2]);
+function verifyMax(minDoc, idx, expectedIds) {
+ verifyResultIds(t.find().max(minDoc).hint(idx).toArray(), expectedIds);
+}
+
+// Basic ascending index.
+reset(t);
+let indexSpec = {a: 1};
+assert.commandWorked(t.createIndex(indexSpec));
+
+verifyMin({a: Infinity}, indexSpec, []);
+verifyMax({a: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+verifyMin({a: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMax({a: -Infinity}, indexSpec, []);
+
+// NaN < all ints.
+verifyMin({a: NaN}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMax({a: NaN}, indexSpec, []);
- verifyMin({a: 2, b: 'b'}, indexSpec, [3, 4, 5, 6, 7, 8]);
- verifyMax({a: 2, b: 'b'}, indexSpec, [0, 1, 2]);
+// {a: 1} > all ints.
+verifyMin({a: {a: 1}}, indexSpec, []);
+verifyMax({a: {a: 1}}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
- // Now a couple cases with an extra compound index.
- t.drop();
- indexSpec = {a: 1, b: -1, c: 1};
- assert.commandWorked(t.createIndex(indexSpec));
- // The following documents are in order according to the index.
- t.insert({_id: 0, a: 1, b: 'b', c: 1});
- t.insert({_id: 1, a: 1, b: 'b', c: 2});
- t.insert({_id: 2, a: 1, b: 'a', c: 1});
- t.insert({_id: 3, a: 1, b: 'a', c: 2});
- t.insert({_id: 4, a: 2, b: 'b', c: 1});
- t.insert({_id: 5, a: 2, b: 'b', c: 2});
- t.insert({_id: 6, a: 2, b: 'a', c: 1});
- t.insert({_id: 7, a: 2, b: 'a', c: 2});
-
- verifyMin({a: 1, b: 'a', c: 1}, indexSpec, [2, 3, 4, 5, 6, 7]);
- verifyMin({a: 2, b: 'a', c: 2}, indexSpec, [7]);
- verifyMax({a: 1, b: 'a', c: 1}, indexSpec, [0, 1]);
- verifyMax({a: 2, b: 'a', c: 2}, indexSpec, [0, 1, 2, 3, 4, 5, 6]);
-
- verifyMin({a: Infinity, b: 'a', c: 2}, indexSpec, []);
- verifyMax({a: Infinity, b: 'a', c: 2}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7]);
-
- verifyMin({a: -Infinity, b: 'a', c: 2}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7]);
- verifyMax({a: -Infinity, b: 'a', c: 2}, indexSpec, []);
-
- // 'a' > Infinity, actually.
- verifyMin({a: 1, b: Infinity, c: 2}, indexSpec, [4, 5, 6, 7]);
- verifyMax({a: 1, b: Infinity, c: 2}, indexSpec, [0, 1, 2, 3]);
-
- // Also, 'a' > -Infinity.
- verifyMin({a: 1, b: -Infinity, c: 2}, indexSpec, [4, 5, 6, 7]);
- verifyMax({a: 1, b: -Infinity, c: 2}, indexSpec, [0, 1, 2, 3]);
-
- verifyMin({a: 1, b: 'a', c: Infinity}, indexSpec, [4, 5, 6, 7]);
- verifyMax({a: 1, b: 'a', c: Infinity}, indexSpec, [0, 1, 2, 3]);
-
- verifyMin({a: 1, b: 'a', c: -Infinity}, indexSpec, [2, 3, 4, 5, 6, 7]);
- verifyMax({a: 1, b: 'a', c: -Infinity}, indexSpec, [0, 1]);
+// 'a' > all ints.
+verifyMin({a: 'a'}, indexSpec, []);
+verifyMax({a: 'a'}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+// Now with a compound index.
+reset(t);
+indexSpec = {
+ a: 1,
+ b: -1
+};
+
+assert.commandWorked(t.createIndex(indexSpec));
+
+// Same as single-key index assertions, with b field present.
+verifyMin({a: NaN, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMax({a: NaN, b: 1}, indexSpec, []);
+
+verifyMin({a: Infinity, b: 1}, indexSpec, []);
+verifyMax({a: Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+verifyMin({a: -Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMax({a: -Infinity, b: 1}, indexSpec, []);
+
+verifyMin({a: {a: 1}, b: 1}, indexSpec, []);
+verifyMax({a: {a: 1}, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+verifyMin({a: 'a', b: 1}, indexSpec, []);
+verifyMax({a: 'a', b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+// Edge cases on b values
+verifyMin({a: 1, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMin({a: 2, b: Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]);
+verifyMin({a: 3, b: Infinity}, indexSpec, [6, 7, 8]);
+verifyMax({a: 1, b: Infinity}, indexSpec, []);
+verifyMax({a: 2, b: Infinity}, indexSpec, [0, 1, 2]);
+verifyMax({a: 3, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]);
+
+verifyMin({a: 1, b: -Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]);
+verifyMin({a: 2, b: -Infinity}, indexSpec, [6, 7, 8]);
+verifyMin({a: 3, b: -Infinity}, indexSpec, []);
+verifyMax({a: 1, b: -Infinity}, indexSpec, [0, 1, 2]);
+verifyMax({a: 2, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]);
+verifyMax({a: 3, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+verifyMin({a: 2, b: NaN}, indexSpec, [6, 7, 8]);
+verifyMax({a: 2, b: NaN}, indexSpec, [0, 1, 2, 3, 4, 5]);
+
+verifyMin({a: 2, b: {b: 1}}, indexSpec, [3, 4, 5, 6, 7, 8]);
+verifyMax({a: 2, b: {b: 1}}, indexSpec, [0, 1, 2]);
+
+verifyMin({a: 2, b: 'b'}, indexSpec, [3, 4, 5, 6, 7, 8]);
+verifyMax({a: 2, b: 'b'}, indexSpec, [0, 1, 2]);
+
+// Test descending index.
+reset(t);
+indexSpec = {
+ a: -1
+};
+assert.commandWorked(t.createIndex(indexSpec));
+
+verifyMin({a: NaN}, indexSpec, []);
+verifyMax({a: NaN}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+verifyMin({a: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMax({a: Infinity}, indexSpec, []);
+
+verifyMin({a: -Infinity}, indexSpec, []);
+verifyMax({a: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+verifyMin({a: {a: 1}}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMax({a: {a: 1}}, indexSpec, []);
+
+verifyMin({a: 'a'}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMax({a: 'a'}, indexSpec, []);
+
+// Now with a compound index.
+reset(t);
+indexSpec = {
+ a: -1,
+ b: -1
+};
+assert.commandWorked(t.createIndex(indexSpec));
+
+// Same as single-key index assertions, with b field present.
+verifyMin({a: NaN, b: 1}, indexSpec, []);
+verifyMax({a: NaN, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+verifyMin({a: Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMax({a: Infinity, b: 1}, indexSpec, []);
+
+verifyMin({a: -Infinity, b: 1}, indexSpec, []);
+verifyMax({a: -Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+verifyMin({a: {a: 1}, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMax({a: {a: 1}, b: 1}, indexSpec, []);
+
+verifyMin({a: 'a', b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMax({a: 'a', b: 1}, indexSpec, []);
+
+// Edge cases on b values.
+verifyMin({a: 1, b: Infinity}, indexSpec, [0, 1, 2]);
+verifyMin({a: 2, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]);
+verifyMin({a: 3, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMax({a: 1, b: Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]);
+verifyMax({a: 2, b: Infinity}, indexSpec, [6, 7, 8]);
+verifyMax({a: 3, b: Infinity}, indexSpec, []);
+
+verifyMin({a: 1, b: -Infinity}, indexSpec, []);
+verifyMin({a: 2, b: -Infinity}, indexSpec, [0, 1, 2]);
+verifyMin({a: 3, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]);
+verifyMax({a: 1, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMax({a: 2, b: -Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]);
+verifyMax({a: 3, b: -Infinity}, indexSpec, [6, 7, 8]);
+
+verifyMin({a: 2, b: NaN}, indexSpec, [0, 1, 2]);
+verifyMax({a: 2, b: NaN}, indexSpec, [3, 4, 5, 6, 7, 8]);
+
+verifyMin({a: 2, b: {b: 1}}, indexSpec, [3, 4, 5, 6, 7, 8]);
+verifyMax({a: 2, b: {b: 1}}, indexSpec, [0, 1, 2]);
+
+verifyMin({a: 2, b: 'b'}, indexSpec, [3, 4, 5, 6, 7, 8]);
+verifyMax({a: 2, b: 'b'}, indexSpec, [0, 1, 2]);
+
+// Now a couple cases with an extra compound index.
+t.drop();
+indexSpec = {
+ a: 1,
+ b: -1,
+ c: 1
+};
+assert.commandWorked(t.createIndex(indexSpec));
+// The following documents are in order according to the index.
+t.insert({_id: 0, a: 1, b: 'b', c: 1});
+t.insert({_id: 1, a: 1, b: 'b', c: 2});
+t.insert({_id: 2, a: 1, b: 'a', c: 1});
+t.insert({_id: 3, a: 1, b: 'a', c: 2});
+t.insert({_id: 4, a: 2, b: 'b', c: 1});
+t.insert({_id: 5, a: 2, b: 'b', c: 2});
+t.insert({_id: 6, a: 2, b: 'a', c: 1});
+t.insert({_id: 7, a: 2, b: 'a', c: 2});
+
+verifyMin({a: 1, b: 'a', c: 1}, indexSpec, [2, 3, 4, 5, 6, 7]);
+verifyMin({a: 2, b: 'a', c: 2}, indexSpec, [7]);
+verifyMax({a: 1, b: 'a', c: 1}, indexSpec, [0, 1]);
+verifyMax({a: 2, b: 'a', c: 2}, indexSpec, [0, 1, 2, 3, 4, 5, 6]);
+
+verifyMin({a: Infinity, b: 'a', c: 2}, indexSpec, []);
+verifyMax({a: Infinity, b: 'a', c: 2}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7]);
+
+verifyMin({a: -Infinity, b: 'a', c: 2}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7]);
+verifyMax({a: -Infinity, b: 'a', c: 2}, indexSpec, []);
+
+// 'a' > Infinity, actually.
+verifyMin({a: 1, b: Infinity, c: 2}, indexSpec, [4, 5, 6, 7]);
+verifyMax({a: 1, b: Infinity, c: 2}, indexSpec, [0, 1, 2, 3]);
+
+// Also, 'a' > -Infinity.
+verifyMin({a: 1, b: -Infinity, c: 2}, indexSpec, [4, 5, 6, 7]);
+verifyMax({a: 1, b: -Infinity, c: 2}, indexSpec, [0, 1, 2, 3]);
+
+verifyMin({a: 1, b: 'a', c: Infinity}, indexSpec, [4, 5, 6, 7]);
+verifyMax({a: 1, b: 'a', c: Infinity}, indexSpec, [0, 1, 2, 3]);
+
+verifyMin({a: 1, b: 'a', c: -Infinity}, indexSpec, [2, 3, 4, 5, 6, 7]);
+verifyMax({a: 1, b: 'a', c: -Infinity}, indexSpec, [0, 1]);
})();
diff --git a/jstests/core/mr5.js b/jstests/core/mr5.js
index c78ce1d8f4e..1858eaa57a5 100644
--- a/jstests/core/mr5.js
+++ b/jstests/core/mr5.js
@@ -5,63 +5,63 @@
// ]
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For resultsEq.
+load("jstests/aggregation/extras/utils.js"); // For resultsEq.
- const t = db.mr5;
- t.drop();
+const t = db.mr5;
+t.drop();
- assert.writeOK(t.insert({"partner": 1, "visits": 9}));
- assert.writeOK(t.insert({"partner": 2, "visits": 9}));
- assert.writeOK(t.insert({"partner": 1, "visits": 11}));
- assert.writeOK(t.insert({"partner": 1, "visits": 30}));
- assert.writeOK(t.insert({"partner": 2, "visits": 41}));
- assert.writeOK(t.insert({"partner": 2, "visits": 41}));
+assert.writeOK(t.insert({"partner": 1, "visits": 9}));
+assert.writeOK(t.insert({"partner": 2, "visits": 9}));
+assert.writeOK(t.insert({"partner": 1, "visits": 11}));
+assert.writeOK(t.insert({"partner": 1, "visits": 30}));
+assert.writeOK(t.insert({"partner": 2, "visits": 41}));
+assert.writeOK(t.insert({"partner": 2, "visits": 41}));
- let mapper = function() {
- emit(this.partner, {stats: [this.visits]});
- };
+let mapper = function() {
+ emit(this.partner, {stats: [this.visits]});
+};
- const reducer = function(k, v) {
- var stats = [];
- var total = 0;
- for (var i = 0; i < v.length; i++) {
- for (var j in v[i].stats) {
- stats.push(v[i].stats[j]);
- total += v[i].stats[j];
- }
+const reducer = function(k, v) {
+ var stats = [];
+ var total = 0;
+ for (var i = 0; i < v.length; i++) {
+ for (var j in v[i].stats) {
+ stats.push(v[i].stats[j]);
+ total += v[i].stats[j];
}
- return {stats: stats, total: total};
- };
+ }
+ return {stats: stats, total: total};
+};
- let res = t.mapReduce(mapper, reducer, {out: "mr5_out", scope: {xx: 1}});
+let res = t.mapReduce(mapper, reducer, {out: "mr5_out", scope: {xx: 1}});
- let resultAsObj = res.convertToSingleObject();
- assert.eq(2,
- Object.keySet(resultAsObj).length,
- `Expected 2 keys ("1" and "2") in object ${tojson(resultAsObj)}`);
- // Use resultsEq() to avoid any assumptions about order.
- assert(resultsEq([9, 11, 30], resultAsObj["1"].stats));
- assert(resultsEq([9, 41, 41], resultAsObj["2"].stats));
+let resultAsObj = res.convertToSingleObject();
+assert.eq(2,
+ Object.keySet(resultAsObj).length,
+ `Expected 2 keys ("1" and "2") in object ${tojson(resultAsObj)}`);
+// Use resultsEq() to avoid any assumptions about order.
+assert(resultsEq([9, 11, 30], resultAsObj["1"].stats));
+assert(resultsEq([9, 41, 41], resultAsObj["2"].stats));
- res.drop();
+res.drop();
- mapper = function() {
- var x = "partner";
- var y = "visits";
- emit(this[x], {stats: [this[y]]});
- };
+mapper = function() {
+ var x = "partner";
+ var y = "visits";
+ emit(this[x], {stats: [this[y]]});
+};
- res = t.mapReduce(mapper, reducer, {out: "mr5_out", scope: {xx: 1}});
+res = t.mapReduce(mapper, reducer, {out: "mr5_out", scope: {xx: 1}});
- resultAsObj = res.convertToSingleObject();
- assert.eq(2,
- Object.keySet(resultAsObj).length,
- `Expected 2 keys ("1" and "2") in object ${tojson(resultAsObj)}`);
- // Use resultsEq() to avoid any assumptions about order.
- assert(resultsEq([9, 11, 30], resultAsObj["1"].stats));
- assert(resultsEq([9, 41, 41], resultAsObj["2"].stats));
+resultAsObj = res.convertToSingleObject();
+assert.eq(2,
+ Object.keySet(resultAsObj).length,
+ `Expected 2 keys ("1" and "2") in object ${tojson(resultAsObj)}`);
+// Use resultsEq() to avoid any assumptions about order.
+assert(resultsEq([9, 11, 30], resultAsObj["1"].stats));
+assert(resultsEq([9, 41, 41], resultAsObj["2"].stats));
- res.drop();
+res.drop();
}());
diff --git a/jstests/core/mr_bigobject.js b/jstests/core/mr_bigobject.js
index 513d48d25a2..92865a04f0a 100644
--- a/jstests/core/mr_bigobject.js
+++ b/jstests/core/mr_bigobject.js
@@ -39,7 +39,7 @@ r = function(k, v) {
total = 0;
for (var i = 0; i < v.length; i++) {
var x = v[i];
- if (typeof(x) == "number")
+ if (typeof (x) == "number")
total += x;
else
total += x.length;
diff --git a/jstests/core/mr_bigobject_replace.js b/jstests/core/mr_bigobject_replace.js
index 3c32e6de8af..c02ee7f1fac 100644
--- a/jstests/core/mr_bigobject_replace.js
+++ b/jstests/core/mr_bigobject_replace.js
@@ -13,58 +13,58 @@
* "replace" action for the out collection.
*/
(function() {
- function mapper() {
- // Emit multiple values to ensure that the reducer gets called.
- emit(this._id, 1);
- emit(this._id, 1);
- }
-
- function createBigDocument() {
- // Returns a document of the form { _id: ObjectId(...), value: '...' } with the specified
- // 'targetSize' in bytes.
- function makeDocWithSize(targetSize) {
- var doc = {_id: new ObjectId(), value: ''};
+function mapper() {
+ // Emit multiple values to ensure that the reducer gets called.
+ emit(this._id, 1);
+ emit(this._id, 1);
+}
- var size = Object.bsonsize(doc);
- assert.gte(targetSize, size);
+function createBigDocument() {
+ // Returns a document of the form { _id: ObjectId(...), value: '...' } with the specified
+ // 'targetSize' in bytes.
+ function makeDocWithSize(targetSize) {
+ var doc = {_id: new ObjectId(), value: ''};
- // Set 'value' as a string with enough characters to make the whole document 'size'
- // bytes long.
- doc.value = new Array(targetSize - size + 1).join('x');
- assert.eq(targetSize, Object.bsonsize(doc));
+ var size = Object.bsonsize(doc);
+ assert.gte(targetSize, size);
- return doc;
- }
+ // Set 'value' as a string with enough characters to make the whole document 'size'
+ // bytes long.
+ doc.value = new Array(targetSize - size + 1).join('x');
+ assert.eq(targetSize, Object.bsonsize(doc));
- var maxDocSize = 16 * 1024 * 1024;
- return makeDocWithSize(maxDocSize + 1).value;
+ return doc;
}
- function runTest(testOptions) {
- db.input.drop();
- db.mr_bigobject_replace.drop();
+ var maxDocSize = 16 * 1024 * 1024;
+ return makeDocWithSize(maxDocSize + 1).value;
+}
- // Insert a document so the mapper gets run.
- assert.writeOK(db.input.insert({}));
+function runTest(testOptions) {
+ db.input.drop();
+ db.mr_bigobject_replace.drop();
- var res = db.runCommand(Object.extend({
- mapReduce: "input",
- map: mapper,
- out: {replace: "mr_bigobject_replace"},
- },
- testOptions));
+ // Insert a document so the mapper gets run.
+ assert.writeOK(db.input.insert({}));
- assert.commandFailed(res, "creating a document larger than 16MB didn't fail");
- assert.lte(0,
- res.errmsg.indexOf("object to insert too large"),
- "map-reduce command failed for a reason other than inserting a large document");
- }
+ var res = db.runCommand(Object.extend({
+ mapReduce: "input",
+ map: mapper,
+ out: {replace: "mr_bigobject_replace"},
+ },
+ testOptions));
+
+ assert.commandFailed(res, "creating a document larger than 16MB didn't fail");
+ assert.lte(0,
+ res.errmsg.indexOf("object to insert too large"),
+ "map-reduce command failed for a reason other than inserting a large document");
+}
- runTest({reduce: createBigDocument});
- runTest({
- reduce: function() {
- return 1;
- },
- finalize: createBigDocument
- });
+runTest({reduce: createBigDocument});
+runTest({
+ reduce: function() {
+ return 1;
+ },
+ finalize: createBigDocument
+});
})();
diff --git a/jstests/core/mr_killop.js b/jstests/core/mr_killop.js
index 56a025dc4b5..168b54a5f6d 100644
--- a/jstests/core/mr_killop.js
+++ b/jstests/core/mr_killop.js
@@ -60,17 +60,17 @@ function op(childLoop) {
}
/**
-* Run one map reduce with the specified parameters in a parallel shell, kill the
-* map reduce op or its child op with killOp, and wait for the map reduce op to
-* terminate.
-* @param childLoop - if true, a distinct $where op is killed rather than the map reduce op.
-* This is necessay for a child distinct $where of a map reduce op because child
-* ops currently mask parent ops in currentOp.
-*/
+ * Run one map reduce with the specified parameters in a parallel shell, kill the
+ * map reduce op or its child op with killOp, and wait for the map reduce op to
+ * terminate.
+ * @param childLoop - if true, a distinct $where op is killed rather than the map reduce op.
+ * This is necessay for a child distinct $where of a map reduce op because child
+ * ops currently mask parent ops in currentOp.
+ */
function testOne(map, reduce, finalize, scope, childLoop, wait) {
- debug("testOne - map = " + tojson(map) + "; reduce = " + tojson(reduce) + "; finalize = " +
- tojson(finalize) + "; scope = " + tojson(scope) + "; childLoop = " + childLoop +
- "; wait = " + wait);
+ debug("testOne - map = " + tojson(map) + "; reduce = " + tojson(reduce) +
+ "; finalize = " + tojson(finalize) + "; scope = " + tojson(scope) +
+ "; childLoop = " + childLoop + "; wait = " + wait);
t.drop();
t2.drop();
diff --git a/jstests/core/mr_stored.js b/jstests/core/mr_stored.js
index e2c8d1450ec..c1c38253727 100644
--- a/jstests/core/mr_stored.js
+++ b/jstests/core/mr_stored.js
@@ -8,81 +8,81 @@
// requires_non_retryable_writes,
// ]
(function() {
- "use strict";
+"use strict";
- // Use a unique database name to avoid conflicts with other tests that directly modify
- // system.js.
- const testDB = db.getSiblingDB("mr_stored");
- const coll = testDB.test;
- coll.drop();
+// Use a unique database name to avoid conflicts with other tests that directly modify
+// system.js.
+const testDB = db.getSiblingDB("mr_stored");
+const coll = testDB.test;
+coll.drop();
- assert.commandWorked(coll.insert({"partner": 1, "visits": 9}));
- assert.commandWorked(coll.insert({"partner": 2, "visits": 9}));
- assert.commandWorked(coll.insert({"partner": 1, "visits": 11}));
- assert.commandWorked(coll.insert({"partner": 1, "visits": 30}));
- assert.commandWorked(coll.insert({"partner": 2, "visits": 41}));
- assert.commandWorked(coll.insert({"partner": 2, "visits": 41}));
+assert.commandWorked(coll.insert({"partner": 1, "visits": 9}));
+assert.commandWorked(coll.insert({"partner": 2, "visits": 9}));
+assert.commandWorked(coll.insert({"partner": 1, "visits": 11}));
+assert.commandWorked(coll.insert({"partner": 1, "visits": 30}));
+assert.commandWorked(coll.insert({"partner": 2, "visits": 41}));
+assert.commandWorked(coll.insert({"partner": 2, "visits": 41}));
- let map = function(obj) {
- emit(obj.partner, {stats: [obj.visits]});
- };
+let map = function(obj) {
+ emit(obj.partner, {stats: [obj.visits]});
+};
- let reduce = function(k, v) {
- var stats = [];
- var total = 0;
- for (var i = 0; i < v.length; i++) {
- for (var j in v[i].stats) {
- stats.push(v[i].stats[j]);
- total += v[i].stats[j];
- }
+let reduce = function(k, v) {
+ var stats = [];
+ var total = 0;
+ for (var i = 0; i < v.length; i++) {
+ for (var j in v[i].stats) {
+ stats.push(v[i].stats[j]);
+ total += v[i].stats[j];
}
- return {stats: stats, total: total};
- };
+ }
+ return {stats: stats, total: total};
+};
- // Test that map reduce works with stored javascript
- assert.commandWorked(testDB.system.js.insert({_id: "mr_stored_map", value: map}));
- assert.commandWorked(testDB.system.js.insert({_id: "mr_stored_reduce", value: reduce}));
+// Test that map reduce works with stored javascript
+assert.commandWorked(testDB.system.js.insert({_id: "mr_stored_map", value: map}));
+assert.commandWorked(testDB.system.js.insert({_id: "mr_stored_reduce", value: reduce}));
- let res = coll.mapReduce(
- function() {
- mr_stored_map(this);
- },
- function(k, v) {
- return mr_stored_reduce(k, v);
- },
- {out: "mr_stored_out", scope: {xx: 1}});
+let res = coll.mapReduce(
+ function() {
+ mr_stored_map(this);
+ },
+ function(k, v) {
+ return mr_stored_reduce(k, v);
+ },
+ {out: "mr_stored_out", scope: {xx: 1}});
- let z = res.convertToSingleObject();
- assert.eq(2, Object.keySet(z).length);
- assert.eq([9, 11, 30], z["1"].stats);
- assert.eq([9, 41, 41], z["2"].stats);
+let z = res.convertToSingleObject();
+assert.eq(2, Object.keySet(z).length);
+assert.eq([9, 11, 30], z["1"].stats);
+assert.eq([9, 41, 41], z["2"].stats);
- res.drop();
+res.drop();
- map = function(obj) {
- var x = "partner";
- var y = "visits";
- emit(obj[x], {stats: [obj[y]]});
- };
+map = function(obj) {
+ var x = "partner";
+ var y = "visits";
+ emit(obj[x], {stats: [obj[y]]});
+};
- assert.commandWorked(testDB.system.js.save({_id: "mr_stored_map", value: map}));
+assert.commandWorked(testDB.system.js.save({_id: "mr_stored_map", value: map}));
- res = coll.mapReduce(
- function() {
- mr_stored_map(this);
- },
- function(k, v) {
- return mr_stored_reduce(k, v);
- },
- {out: "mr_stored_out", scope: {xx: 1}});
+res = coll.mapReduce(
+ function() {
+ mr_stored_map(this);
+ },
+ function(k, v) {
+ return mr_stored_reduce(k, v);
+ },
+ {out: "mr_stored_out", scope: {xx: 1}});
- z = res.convertToSingleObject();
- assert.eq(2, Object.keySet(z).length);
- assert.eq([9, 11, 30], z["1"].stats);
- assert.eq([9, 41, 41], z["2"].stats);
+z = res.convertToSingleObject();
+assert.eq(2, Object.keySet(z).length);
+assert.eq([9, 11, 30], z["1"].stats);
+assert.eq([9, 41, 41], z["2"].stats);
- assert.commandWorked(testDB.system.js.remove({_id: "mr_stored_map"}));
- assert.commandWorked(testDB.system.js.remove({_id: "mr_stored_reduce"}));
+assert.commandWorked(testDB.system.js.remove({_id: "mr_stored_map"}));
+assert.commandWorked(testDB.system.js.remove({_id: "mr_stored_reduce"}));
- res.drop();
+res.drop();
}());
diff --git a/jstests/core/mr_tolerates_js_exception.js b/jstests/core/mr_tolerates_js_exception.js
index 29de4cf795d..2689bce8433 100644
--- a/jstests/core/mr_tolerates_js_exception.js
+++ b/jstests/core/mr_tolerates_js_exception.js
@@ -10,63 +10,61 @@
* ]
*/
(function() {
- "use strict";
+"use strict";
- let coll = db.mr_tolerates_js_exception;
- coll.drop();
- for (let i = 0; i < 100; i++) {
- assert.writeOK(coll.insert({_id: i, a: 1}));
- }
+let coll = db.mr_tolerates_js_exception;
+coll.drop();
+for (let i = 0; i < 100; i++) {
+ assert.writeOK(coll.insert({_id: i, a: 1}));
+}
- // Test that the command fails with a JS interpreter failure error when the reduce function
- // throws.
- let cmdOutput = db.runCommand({
- mapReduce: coll.getName(),
- map: function() {
- emit(this.a, 1);
- },
- reduce: function(key, value) {
- (function myFunction() {
- throw new Error("Intentionally thrown inside reduce function");
- })();
- },
- out: {inline: 1}
- });
- assert.commandFailedWithCode(cmdOutput, ErrorCodes.JSInterpreterFailure, tojson(cmdOutput));
- assert(/Intentionally thrown inside reduce function/.test(cmdOutput.errmsg),
- () => "mapReduce didn't include the message from the exception thrown: " +
- tojson(cmdOutput));
- assert(/myFunction@/.test(cmdOutput.errmsg),
- () => "mapReduce didn't return the JavaScript stacktrace: " + tojson(cmdOutput));
- assert(
- !cmdOutput.hasOwnProperty("stack"),
- () => "mapReduce shouldn't return JavaScript stacktrace separately: " + tojson(cmdOutput));
- assert(!cmdOutput.hasOwnProperty("originalError"),
- () => "mapReduce shouldn't return wrapped version of the error: " + tojson(cmdOutput));
+// Test that the command fails with a JS interpreter failure error when the reduce function
+// throws.
+let cmdOutput = db.runCommand({
+ mapReduce: coll.getName(),
+ map: function() {
+ emit(this.a, 1);
+ },
+ reduce: function(key, value) {
+ (function myFunction() {
+ throw new Error("Intentionally thrown inside reduce function");
+ })();
+ },
+ out: {inline: 1}
+});
+assert.commandFailedWithCode(cmdOutput, ErrorCodes.JSInterpreterFailure, tojson(cmdOutput));
+assert(
+ /Intentionally thrown inside reduce function/.test(cmdOutput.errmsg),
+ () => "mapReduce didn't include the message from the exception thrown: " + tojson(cmdOutput));
+assert(/myFunction@/.test(cmdOutput.errmsg),
+ () => "mapReduce didn't return the JavaScript stacktrace: " + tojson(cmdOutput));
+assert(!cmdOutput.hasOwnProperty("stack"),
+ () => "mapReduce shouldn't return JavaScript stacktrace separately: " + tojson(cmdOutput));
+assert(!cmdOutput.hasOwnProperty("originalError"),
+ () => "mapReduce shouldn't return wrapped version of the error: " + tojson(cmdOutput));
- // Test that the command fails with a JS interpreter failure error when the map function
- // throws.
- cmdOutput = db.runCommand({
- mapReduce: coll.getName(),
- map: function() {
- (function myFunction() {
- throw new Error("Intentionally thrown inside map function");
- })();
- },
- reduce: function(key, value) {
- return Array.sum(value);
- },
- out: {inline: 1}
- });
- assert.commandFailedWithCode(cmdOutput, ErrorCodes.JSInterpreterFailure, tojson(cmdOutput));
- assert(/Intentionally thrown inside map function/.test(cmdOutput.errmsg),
- () => "mapReduce didn't include the message from the exception thrown: " +
- tojson(cmdOutput));
- assert(/myFunction@/.test(cmdOutput.errmsg),
- () => "mapReduce didn't return the JavaScript stacktrace: " + tojson(cmdOutput));
- assert(
- !cmdOutput.hasOwnProperty("stack"),
- () => "mapReduce shouldn't return JavaScript stacktrace separately: " + tojson(cmdOutput));
- assert(!cmdOutput.hasOwnProperty("originalError"),
- () => "mapReduce shouldn't return wrapped version of the error: " + tojson(cmdOutput));
+// Test that the command fails with a JS interpreter failure error when the map function
+// throws.
+cmdOutput = db.runCommand({
+ mapReduce: coll.getName(),
+ map: function() {
+ (function myFunction() {
+ throw new Error("Intentionally thrown inside map function");
+ })();
+ },
+ reduce: function(key, value) {
+ return Array.sum(value);
+ },
+ out: {inline: 1}
+});
+assert.commandFailedWithCode(cmdOutput, ErrorCodes.JSInterpreterFailure, tojson(cmdOutput));
+assert(
+ /Intentionally thrown inside map function/.test(cmdOutput.errmsg),
+ () => "mapReduce didn't include the message from the exception thrown: " + tojson(cmdOutput));
+assert(/myFunction@/.test(cmdOutput.errmsg),
+ () => "mapReduce didn't return the JavaScript stacktrace: " + tojson(cmdOutput));
+assert(!cmdOutput.hasOwnProperty("stack"),
+ () => "mapReduce shouldn't return JavaScript stacktrace separately: " + tojson(cmdOutput));
+assert(!cmdOutput.hasOwnProperty("originalError"),
+ () => "mapReduce shouldn't return wrapped version of the error: " + tojson(cmdOutput));
}());
diff --git a/jstests/core/nan.js b/jstests/core/nan.js
index 1b34a53e64d..4cca00c4b66 100644
--- a/jstests/core/nan.js
+++ b/jstests/core/nan.js
@@ -2,59 +2,59 @@
* Tests basic NaN handling. Note that WiredTiger indexes handle -NaN and NaN differently.
*/
(function() {
- "use strict";
-
- const coll = db.jstests_nan;
- coll.drop();
-
- assert.writeOK(coll.insert({_id: 0, a: -Infinity}));
- assert.writeOK(coll.insert({_id: 1, a: -3}));
- assert.writeOK(coll.insert({_id: 2, a: 0}));
- assert.writeOK(coll.insert({_id: 3, a: 3}));
- assert.writeOK(coll.insert({_id: 4, a: Infinity}));
- assert.writeOK(coll.insert({_id: 5, a: NaN}));
- assert.writeOK(coll.insert({_id: 6, a: -NaN}));
- assert.writeOK(coll.insert({_id: 7, a: undefined}));
- assert.writeOK(coll.insert({_id: 8, a: null}));
- assert.writeOK(coll.insert({_id: 9, a: []}));
- assert.writeOK(coll.insert({_id: 10, a: {b: 1}}));
- assert.writeOK(coll.insert({_id: 11, a: {b: 1}}));
-
- /**
- * Ensures correct results for EQ, LT, LTE, GT, and GTE cases.
- */
- var testNaNComparisons = function() {
- // EQ
- let cursor = coll.find({a: NaN}).sort({_id: 1});
- assert.eq(5, cursor.next()["_id"]);
- assert.eq(6, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- // LT
- cursor = coll.find({a: {$lt: NaN}});
- assert(!cursor.hasNext());
-
- // LTE
- cursor = coll.find({a: {$lte: NaN}}).sort({_id: 1});
- assert.eq(5, cursor.next()["_id"]);
- assert.eq(6, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- // GT
- cursor = coll.find({a: {$gt: NaN}});
- assert(!cursor.hasNext());
-
- // GTE
- cursor = coll.find({a: {$gte: NaN}}).sort({_id: 1});
- assert.eq(5, cursor.next()["_id"]);
- assert.eq(6, cursor.next()["_id"]);
- assert(!cursor.hasNext());
- };
-
- // Unindexed.
- testNaNComparisons();
-
- // Indexed.
- assert.commandWorked(coll.createIndex({a: 1}));
- testNaNComparisons();
+"use strict";
+
+const coll = db.jstests_nan;
+coll.drop();
+
+assert.writeOK(coll.insert({_id: 0, a: -Infinity}));
+assert.writeOK(coll.insert({_id: 1, a: -3}));
+assert.writeOK(coll.insert({_id: 2, a: 0}));
+assert.writeOK(coll.insert({_id: 3, a: 3}));
+assert.writeOK(coll.insert({_id: 4, a: Infinity}));
+assert.writeOK(coll.insert({_id: 5, a: NaN}));
+assert.writeOK(coll.insert({_id: 6, a: -NaN}));
+assert.writeOK(coll.insert({_id: 7, a: undefined}));
+assert.writeOK(coll.insert({_id: 8, a: null}));
+assert.writeOK(coll.insert({_id: 9, a: []}));
+assert.writeOK(coll.insert({_id: 10, a: {b: 1}}));
+assert.writeOK(coll.insert({_id: 11, a: {b: 1}}));
+
+/**
+ * Ensures correct results for EQ, LT, LTE, GT, and GTE cases.
+ */
+var testNaNComparisons = function() {
+ // EQ
+ let cursor = coll.find({a: NaN}).sort({_id: 1});
+ assert.eq(5, cursor.next()["_id"]);
+ assert.eq(6, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ // LT
+ cursor = coll.find({a: {$lt: NaN}});
+ assert(!cursor.hasNext());
+
+ // LTE
+ cursor = coll.find({a: {$lte: NaN}}).sort({_id: 1});
+ assert.eq(5, cursor.next()["_id"]);
+ assert.eq(6, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ // GT
+ cursor = coll.find({a: {$gt: NaN}});
+ assert(!cursor.hasNext());
+
+ // GTE
+ cursor = coll.find({a: {$gte: NaN}}).sort({_id: 1});
+ assert.eq(5, cursor.next()["_id"]);
+ assert.eq(6, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+};
+
+// Unindexed.
+testNaNComparisons();
+
+// Indexed.
+assert.commandWorked(coll.createIndex({a: 1}));
+testNaNComparisons();
}());
diff --git a/jstests/core/natural.js b/jstests/core/natural.js
index d972be22839..2471e2be495 100644
--- a/jstests/core/natural.js
+++ b/jstests/core/natural.js
@@ -1,26 +1,26 @@
// Tests for $natural sort and $natural hint.
(function() {
- 'use strict';
+'use strict';
- var results;
+var results;
- var coll = db.jstests_natural;
- coll.drop();
+var coll = db.jstests_natural;
+coll.drop();
- assert.commandWorked(coll.ensureIndex({a: 1}));
- assert.writeOK(coll.insert({_id: 1, a: 3}));
- assert.writeOK(coll.insert({_id: 2, a: 2}));
- assert.writeOK(coll.insert({_id: 3, a: 1}));
+assert.commandWorked(coll.ensureIndex({a: 1}));
+assert.writeOK(coll.insert({_id: 1, a: 3}));
+assert.writeOK(coll.insert({_id: 2, a: 2}));
+assert.writeOK(coll.insert({_id: 3, a: 1}));
- // Regression test for SERVER-20660. Ensures that documents returned with $natural don't have
- // any extraneous fields.
- results = coll.find({a: 2}).sort({$natural: 1}).toArray();
- assert.eq(results.length, 1);
- assert.eq(results[0], {_id: 2, a: 2});
+// Regression test for SERVER-20660. Ensures that documents returned with $natural don't have
+// any extraneous fields.
+results = coll.find({a: 2}).sort({$natural: 1}).toArray();
+assert.eq(results.length, 1);
+assert.eq(results[0], {_id: 2, a: 2});
- // Regression test for SERVER-20660. Ensures that documents returned with $natural don't have
- // any extraneous fields.
- results = coll.find({a: 2}).hint({$natural: -1}).toArray();
- assert.eq(results.length, 1);
- assert.eq(results[0], {_id: 2, a: 2});
+// Regression test for SERVER-20660. Ensures that documents returned with $natural don't have
+// any extraneous fields.
+results = coll.find({a: 2}).hint({$natural: -1}).toArray();
+assert.eq(results.length, 1);
+assert.eq(results[0], {_id: 2, a: 2});
})();
diff --git a/jstests/core/ne_array.js b/jstests/core/ne_array.js
index 5e5b8c860dd..e703d4b13a4 100644
--- a/jstests/core/ne_array.js
+++ b/jstests/core/ne_array.js
@@ -4,62 +4,57 @@
// returned for this type of query when an index is present.
// @tags: [requires_non_retryable_writes]
(function() {
- const coll = db.ne_array;
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
-
- assert.commandWorked(coll.insert({_id: 0, a: [1]}));
- assert.commandWorked(coll.insert({_id: 1, a: [1, 3]}));
-
- assert.eq(coll.find({a: {$ne: [1, 3]}}, {_id: 1}).toArray(), [{_id: 0}]);
- assert.eq(coll.find({a: {$ne: [1]}}, {_id: 1}).toArray(), [{_id: 1}]);
-
- assert.eq(coll.find({a: {$not: {$in: [[1]]}}}, {_id: 1}).toArray(), [{_id: 1}]);
- assert.eq(coll.find({a: {$not: {$in: [[1, 3]]}}}, {_id: 1}).toArray(), [{_id: 0}]);
- assert.eq(coll.find({a: {$not: {$in: [[1], [1, 3]]}}}, {_id: 1}).toArray(), []);
- assert.eq(coll.find({a: {$not: {$in: ["scalar value", [1, 3]]}}}, {_id: 1}).toArray(),
- [{_id: 0}]);
-
- // Insert some documents which have nested arrays so we can test $elemMatch value.
- assert.commandWorked(coll.remove({}));
- assert.commandWorked(coll.insert({_id: 0, a: [[123]]}));
- assert.commandWorked(coll.insert({_id: 1, a: [4, 5, [123]]}));
- assert.commandWorked(coll.insert({_id: 2, a: [7, 8]}));
-
- // sort by _id in case we run on a sharded cluster which puts the documents on different
- // shards (and thus, might return them in any order).
- assert.eq(coll.find({a: {$elemMatch: {$not: {$eq: [123]}}}}, {_id: 1}).sort({_id: 1}).toArray(),
- [{_id: 1}, {_id: 2}]);
-
- assert.eq(
- coll.find({a: {$elemMatch: {$not: {$in: [[123]]}}}}, {_id: 1}).sort({_id: 1}).toArray(),
- [{_id: 1}, {_id: 2}]);
-
- assert.eq(coll.find({a: {$not: {$elemMatch: {$eq: [123]}}}}, {_id: 1}).toArray(), [{_id: 2}]);
- assert.eq(coll.find({a: {$not: {$elemMatch: {$in: [[123]]}}}}, {_id: 1}).toArray(), [{_id: 2}]);
-
- // Test $elemMatch object.
- assert.commandWorked(coll.remove({}));
- coll.dropIndexes();
- assert.commandWorked(coll.createIndex({"a.b": 1}));
- assert.commandWorked(coll.insert({_id: 0, a: [[123]]}));
- assert.commandWorked(coll.insert({_id: 1, a: [{b: 123}]}));
- assert.commandWorked(coll.insert({_id: 2, a: [{b: [4, [123]]}]}));
- assert.commandWorked(coll.insert({_id: 3, a: [{b: [[123]]}]}));
-
- // Remember that $ne with an array will match arrays where _none_ of the elements match.
- assert.eq(coll.find({a: {$elemMatch: {b: {$ne: [123]}}}}, {_id: 1}).sort({_id: 1}).toArray(),
- [{_id: 0}, {_id: 1}]);
- assert.eq(coll.find({a: {$elemMatch: {b: {$not: {$in: [[123]]}}}}}, {_id: 1})
- .sort({_id: 1})
- .toArray(),
- [{_id: 0}, {_id: 1}]);
-
- assert.eq(coll.find({a: {$not: {$elemMatch: {b: [123]}}}}, {_id: 1}).sort({_id: 1}).toArray(),
- [{_id: 0}, {_id: 1}]);
- assert.eq(coll.find({a: {$not: {$elemMatch: {b: {$in: [[123]]}}}}}, {_id: 1})
- .sort({_id: 1})
- .toArray(),
- [{_id: 0}, {_id: 1}]);
-
+const coll = db.ne_array;
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
+
+assert.commandWorked(coll.insert({_id: 0, a: [1]}));
+assert.commandWorked(coll.insert({_id: 1, a: [1, 3]}));
+
+assert.eq(coll.find({a: {$ne: [1, 3]}}, {_id: 1}).toArray(), [{_id: 0}]);
+assert.eq(coll.find({a: {$ne: [1]}}, {_id: 1}).toArray(), [{_id: 1}]);
+
+assert.eq(coll.find({a: {$not: {$in: [[1]]}}}, {_id: 1}).toArray(), [{_id: 1}]);
+assert.eq(coll.find({a: {$not: {$in: [[1, 3]]}}}, {_id: 1}).toArray(), [{_id: 0}]);
+assert.eq(coll.find({a: {$not: {$in: [[1], [1, 3]]}}}, {_id: 1}).toArray(), []);
+assert.eq(coll.find({a: {$not: {$in: ["scalar value", [1, 3]]}}}, {_id: 1}).toArray(), [{_id: 0}]);
+
+// Insert some documents which have nested arrays so we can test $elemMatch value.
+assert.commandWorked(coll.remove({}));
+assert.commandWorked(coll.insert({_id: 0, a: [[123]]}));
+assert.commandWorked(coll.insert({_id: 1, a: [4, 5, [123]]}));
+assert.commandWorked(coll.insert({_id: 2, a: [7, 8]}));
+
+// sort by _id in case we run on a sharded cluster which puts the documents on different
+// shards (and thus, might return them in any order).
+assert.eq(coll.find({a: {$elemMatch: {$not: {$eq: [123]}}}}, {_id: 1}).sort({_id: 1}).toArray(),
+ [{_id: 1}, {_id: 2}]);
+
+assert.eq(coll.find({a: {$elemMatch: {$not: {$in: [[123]]}}}}, {_id: 1}).sort({_id: 1}).toArray(),
+ [{_id: 1}, {_id: 2}]);
+
+assert.eq(coll.find({a: {$not: {$elemMatch: {$eq: [123]}}}}, {_id: 1}).toArray(), [{_id: 2}]);
+assert.eq(coll.find({a: {$not: {$elemMatch: {$in: [[123]]}}}}, {_id: 1}).toArray(), [{_id: 2}]);
+
+// Test $elemMatch object.
+assert.commandWorked(coll.remove({}));
+coll.dropIndexes();
+assert.commandWorked(coll.createIndex({"a.b": 1}));
+assert.commandWorked(coll.insert({_id: 0, a: [[123]]}));
+assert.commandWorked(coll.insert({_id: 1, a: [{b: 123}]}));
+assert.commandWorked(coll.insert({_id: 2, a: [{b: [4, [123]]}]}));
+assert.commandWorked(coll.insert({_id: 3, a: [{b: [[123]]}]}));
+
+// Remember that $ne with an array will match arrays where _none_ of the elements match.
+assert.eq(coll.find({a: {$elemMatch: {b: {$ne: [123]}}}}, {_id: 1}).sort({_id: 1}).toArray(),
+ [{_id: 0}, {_id: 1}]);
+assert.eq(
+ coll.find({a: {$elemMatch: {b: {$not: {$in: [[123]]}}}}}, {_id: 1}).sort({_id: 1}).toArray(),
+ [{_id: 0}, {_id: 1}]);
+
+assert.eq(coll.find({a: {$not: {$elemMatch: {b: [123]}}}}, {_id: 1}).sort({_id: 1}).toArray(),
+ [{_id: 0}, {_id: 1}]);
+assert.eq(
+ coll.find({a: {$not: {$elemMatch: {b: {$in: [[123]]}}}}}, {_id: 1}).sort({_id: 1}).toArray(),
+ [{_id: 0}, {_id: 1}]);
})();
diff --git a/jstests/core/nestedarr1.js b/jstests/core/nestedarr1.js
index 9fc8ef3c582..7edef9db512 100644
--- a/jstests/core/nestedarr1.js
+++ b/jstests/core/nestedarr1.js
@@ -6,42 +6,41 @@
* supported BSON nesting depth, as well as maintaining index consistency.
*/
(function() {
- "use strict";
+"use strict";
- function makeNestArr(depth) {
- if (depth == 1) {
- return {a: 1};
- } else if (depth == 2) {
- return {a: [1]};
- } else {
- return {a: [makeNestArr(depth - 2)]};
- }
+function makeNestArr(depth) {
+ if (depth == 1) {
+ return {a: 1};
+ } else if (depth == 2) {
+ return {a: [1]};
+ } else {
+ return {a: [makeNestArr(depth - 2)]};
}
+}
- let collection = db.arrNestTest;
- collection.drop();
+let collection = db.arrNestTest;
+collection.drop();
- assert.commandWorked(collection.ensureIndex({a: 1}));
+assert.commandWorked(collection.ensureIndex({a: 1}));
- const kMaxDocumentDepthSoftLimit = 100;
- const kJavaScriptMaxDepthLimit = 150;
+const kMaxDocumentDepthSoftLimit = 100;
+const kJavaScriptMaxDepthLimit = 150;
- let level;
- for (level = 1; level < kJavaScriptMaxDepthLimit - 3; level++) {
- let res = db.runCommand({insert: collection.getName(), documents: [makeNestArr(level)]});
- if (!res.ok) {
- assert.commandFailedWithCode(
- res, 17280, "Expected insertion to fail only because key is too large to index");
- break;
- }
+let level;
+for (level = 1; level < kJavaScriptMaxDepthLimit - 3; level++) {
+ let res = db.runCommand({insert: collection.getName(), documents: [makeNestArr(level)]});
+ if (!res.ok) {
+ assert.commandFailedWithCode(
+ res, 17280, "Expected insertion to fail only because key is too large to index");
+ break;
}
+}
- assert.gt(level,
- kMaxDocumentDepthSoftLimit,
- "Unable to insert a document nested with " + level +
- " levels, which is less than the supported limit of " +
- kMaxDocumentDepthSoftLimit);
- assert.eq(collection.count(),
- collection.find().hint({a: 1}).itcount(),
- "Number of documents in collection does not match number of entries in index");
+assert.gt(level,
+ kMaxDocumentDepthSoftLimit,
+ "Unable to insert a document nested with " + level +
+ " levels, which is less than the supported limit of " + kMaxDocumentDepthSoftLimit);
+assert.eq(collection.count(),
+ collection.find().hint({a: 1}).itcount(),
+ "Number of documents in collection does not match number of entries in index");
}());
diff --git a/jstests/core/nestedobj1.js b/jstests/core/nestedobj1.js
index ea1984e7954..44fdd4599d7 100644
--- a/jstests/core/nestedobj1.js
+++ b/jstests/core/nestedobj1.js
@@ -6,41 +6,40 @@
* supported BSON nesting depth, as well as maintaining index consistency.
*/
(function() {
- "use strict";
+"use strict";
- function makeNestObj(depth) {
- if (depth == 1) {
- return {a: 1};
- } else {
- return {a: makeNestObj(depth - 1)};
- }
+function makeNestObj(depth) {
+ if (depth == 1) {
+ return {a: 1};
+ } else {
+ return {a: makeNestObj(depth - 1)};
}
+}
- let collection = db.objNestTest;
- collection.drop();
+let collection = db.objNestTest;
+collection.drop();
- assert.commandWorked(collection.ensureIndex({a: 1}));
+assert.commandWorked(collection.ensureIndex({a: 1}));
- const kMaxDocumentDepthSoftLimit = 100;
- const kJavaScriptMaxDepthLimit = 150;
+const kMaxDocumentDepthSoftLimit = 100;
+const kJavaScriptMaxDepthLimit = 150;
- let level;
- for (level = 1; level < kJavaScriptMaxDepthLimit - 3; level++) {
- let object = makeNestObj(level);
- let res = db.runCommand({insert: collection.getName(), documents: [makeNestObj(level)]});
- if (!res.ok) {
- assert.commandFailedWithCode(
- res, 17280, "Expected insertion to fail only because key is too large to index");
- break;
- }
+let level;
+for (level = 1; level < kJavaScriptMaxDepthLimit - 3; level++) {
+ let object = makeNestObj(level);
+ let res = db.runCommand({insert: collection.getName(), documents: [makeNestObj(level)]});
+ if (!res.ok) {
+ assert.commandFailedWithCode(
+ res, 17280, "Expected insertion to fail only because key is too large to index");
+ break;
}
+}
- assert.gt(level,
- kMaxDocumentDepthSoftLimit,
- "Unable to insert a document nested with " + level +
- " levels, which is less than the supported limit of " +
- kMaxDocumentDepthSoftLimit);
- assert.eq(collection.count(),
- collection.find().hint({a: 1}).itcount(),
- "Number of documents in collection does not match number of entries in index");
+assert.gt(level,
+ kMaxDocumentDepthSoftLimit,
+ "Unable to insert a document nested with " + level +
+ " levels, which is less than the supported limit of " + kMaxDocumentDepthSoftLimit);
+assert.eq(collection.count(),
+ collection.find().hint({a: 1}).itcount(),
+ "Number of documents in collection does not match number of entries in index");
}());
diff --git a/jstests/core/nin.js b/jstests/core/nin.js
index 36ed47550e0..d8a254357ba 100644
--- a/jstests/core/nin.js
+++ b/jstests/core/nin.js
@@ -12,12 +12,11 @@ function checkEqual(name, key, value) {
assert.eq(t.find().count(),
i + n,
- "checkEqual " + name + " $in + $nin != total | " + i + " + " + n + " != " +
- t.find().count());
+ "checkEqual " + name + " $in + $nin != total | " + i + " + " + n +
+ " != " + t.find().count());
}
doTest = function(n) {
-
t.save({a: [1, 2, 3]});
t.save({a: [1, 2, 4]});
t.save({a: [1, 8, 5]});
diff --git a/jstests/core/no_db_created.js b/jstests/core/no_db_created.js
index 231e8ffe581..e563a7cd468 100644
--- a/jstests/core/no_db_created.js
+++ b/jstests/core/no_db_created.js
@@ -3,35 +3,35 @@
// checks that operations do not create a database
(function() {
- "use strict";
- var adminDB = db.getSiblingDB("admin");
- var noDB = function(db) {
- var dbName = db.getName();
- var dbsRes = assert.commandWorked(adminDB.runCommand("listDatabases"));
- dbsRes.databases.forEach(function(e) {
- assert.neq(
- dbName, e.name, "Found db which shouldn't exist:" + dbName + "; " + tojson(dbsRes));
- });
- };
- var mydb = db.getSiblingDB("neverCreated");
- mydb.dropDatabase();
- noDB(mydb);
+"use strict";
+var adminDB = db.getSiblingDB("admin");
+var noDB = function(db) {
+ var dbName = db.getName();
+ var dbsRes = assert.commandWorked(adminDB.runCommand("listDatabases"));
+ dbsRes.databases.forEach(function(e) {
+ assert.neq(
+ dbName, e.name, "Found db which shouldn't exist:" + dbName + "; " + tojson(dbsRes));
+ });
+};
+var mydb = db.getSiblingDB("neverCreated");
+mydb.dropDatabase();
+noDB(mydb);
- var coll = mydb.fake;
+var coll = mydb.fake;
- // force:true is for replset passthroughs
- assert.commandFailed(coll.runCommand("compact", {force: true}));
- noDB(mydb);
- assert.writeOK(coll.insert({}));
- mydb.dropDatabase();
+// force:true is for replset passthroughs
+assert.commandFailed(coll.runCommand("compact", {force: true}));
+noDB(mydb);
+assert.writeOK(coll.insert({}));
+mydb.dropDatabase();
- assert.commandFailed(coll.runCommand("dropIndexes"));
- noDB(mydb);
- assert.writeOK(coll.insert({}));
- mydb.dropDatabase();
+assert.commandFailed(coll.runCommand("dropIndexes"));
+noDB(mydb);
+assert.writeOK(coll.insert({}));
+mydb.dropDatabase();
- assert.commandFailed(coll.runCommand("collMod", {expireAfterSeconds: 1}));
- noDB(mydb);
- assert.writeOK(coll.insert({}));
- mydb.dropDatabase();
+assert.commandFailed(coll.runCommand("collMod", {expireAfterSeconds: 1}));
+noDB(mydb);
+assert.writeOK(coll.insert({}));
+mydb.dropDatabase();
}()); \ No newline at end of file
diff --git a/jstests/core/not2.js b/jstests/core/not2.js
index 610d79c4d8f..8f0f91da1d5 100644
--- a/jstests/core/not2.js
+++ b/jstests/core/not2.js
@@ -1,89 +1,89 @@
// @tags: [requires_non_retryable_writes]
(function() {
- "use strict";
+"use strict";
- const coll = db.jstests_not2;
- coll.drop();
+const coll = db.jstests_not2;
+coll.drop();
- function check(query, expected) {
- const resultList = coll.find(query).sort({i: 1}).toArray();
- assert.eq(expected.length, resultList.length, query);
+function check(query, expected) {
+ const resultList = coll.find(query).sort({i: 1}).toArray();
+ assert.eq(expected.length, resultList.length, query);
- for (let x = 0; x < expected.length; ++x) {
- assert.eq(expected[x], resultList[x].i, query);
- }
+ for (let x = 0; x < expected.length; ++x) {
+ assert.eq(expected[x], resultList[x].i, query);
}
+}
- function fail(query) {
- assert.throws(() => coll.find(query).itcount());
- }
+function fail(query) {
+ assert.throws(() => coll.find(query).itcount());
+}
- function doTest() {
- assert.writeOK(coll.remove({}));
+function doTest() {
+ assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({i: "a"}));
- assert.writeOK(coll.insert({i: "b"}));
+ assert.writeOK(coll.insert({i: "a"}));
+ assert.writeOK(coll.insert({i: "b"}));
- // TODO SERVER-12735: We currently do not handle double negatives during query
- // canonicalization.
- fail({i: {$not: {$not: "a"}}});
- check({i: {$not: {$not: {$gt: "a"}}}}, ["b"]);
+ // TODO SERVER-12735: We currently do not handle double negatives during query
+ // canonicalization.
+ fail({i: {$not: {$not: "a"}}});
+ check({i: {$not: {$not: {$gt: "a"}}}}, ["b"]);
- fail({i: {$not: "a"}});
- fail({i: {$not: {$ref: "foo"}}});
- fail({i: {$not: {}}});
- check({i: {$gt: "a"}}, ["b"]);
- check({i: {$not: {$gt: "a"}}}, ["a"]);
- check({i: {$not: {$ne: "a"}}}, ["a"]);
- check({i: {$not: {$gte: "b"}}}, ["a"]);
- check({i: {$exists: true}}, ["a", "b"]);
- check({i: {$not: {$exists: true}}}, []);
- check({j: {$not: {$exists: false}}}, []);
- check({j: {$not: {$exists: true}}}, ["a", "b"]);
- check({i: {$not: {$in: ["a"]}}}, ["b"]);
- check({i: {$not: {$in: ["a", "b"]}}}, []);
- check({i: {$not: {$in: ["g"]}}}, ["a", "b"]);
- check({i: {$not: {$nin: ["a"]}}}, ["a"]);
- check({i: {$not: /a/}}, ["b"]);
- check({i: {$not: /(a|b)/}}, []);
- check({i: {$not: /a/, $regex: "a"}}, []);
- check({i: {$not: /aa/}}, ["a", "b"]);
- check({i: {$not: {$regex: "a"}}}, ["b"]);
- check({i: {$not: {$regex: "A", $options: "i"}}}, ["b"]);
- check({i: {$not: {$regex: "[ab]"}}}, []);
- check({i: {$not: {$regex: "^foo"}}}, ["a", "b"]);
- fail({i: {$not: {$options: "a"}}});
- check({i: {$type: 2}}, ["a", "b"]);
- check({i: {$not: {$type: 1}}}, ["a", "b"]);
- check({i: {$not: {$type: 2}}}, []);
+ fail({i: {$not: "a"}});
+ fail({i: {$not: {$ref: "foo"}}});
+ fail({i: {$not: {}}});
+ check({i: {$gt: "a"}}, ["b"]);
+ check({i: {$not: {$gt: "a"}}}, ["a"]);
+ check({i: {$not: {$ne: "a"}}}, ["a"]);
+ check({i: {$not: {$gte: "b"}}}, ["a"]);
+ check({i: {$exists: true}}, ["a", "b"]);
+ check({i: {$not: {$exists: true}}}, []);
+ check({j: {$not: {$exists: false}}}, []);
+ check({j: {$not: {$exists: true}}}, ["a", "b"]);
+ check({i: {$not: {$in: ["a"]}}}, ["b"]);
+ check({i: {$not: {$in: ["a", "b"]}}}, []);
+ check({i: {$not: {$in: ["g"]}}}, ["a", "b"]);
+ check({i: {$not: {$nin: ["a"]}}}, ["a"]);
+ check({i: {$not: /a/}}, ["b"]);
+ check({i: {$not: /(a|b)/}}, []);
+ check({i: {$not: /a/, $regex: "a"}}, []);
+ check({i: {$not: /aa/}}, ["a", "b"]);
+ check({i: {$not: {$regex: "a"}}}, ["b"]);
+ check({i: {$not: {$regex: "A", $options: "i"}}}, ["b"]);
+ check({i: {$not: {$regex: "[ab]"}}}, []);
+ check({i: {$not: {$regex: "^foo"}}}, ["a", "b"]);
+ fail({i: {$not: {$options: "a"}}});
+ check({i: {$type: 2}}, ["a", "b"]);
+ check({i: {$not: {$type: 1}}}, ["a", "b"]);
+ check({i: {$not: {$type: 2}}}, []);
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({i: 1}));
- check({i: {$not: {$mod: [5, 1]}}}, []);
- check({i: {$mod: [5, 2]}}, []);
- check({i: {$not: {$mod: [5, 2]}}}, [1]);
+ assert.writeOK(coll.remove({}));
+ assert.writeOK(coll.insert({i: 1}));
+ check({i: {$not: {$mod: [5, 1]}}}, []);
+ check({i: {$mod: [5, 2]}}, []);
+ check({i: {$not: {$mod: [5, 2]}}}, [1]);
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({i: ["a", "b"]}));
- check({i: {$not: {$size: 2}}}, []);
- check({i: {$not: {$size: 3}}}, [["a", "b"]]);
- check({i: {$not: {$gt: "a"}}}, []);
- check({i: {$not: {$gt: "c"}}}, [["a", "b"]]);
- check({i: {$not: {$all: ["a", "b"]}}}, []);
- check({i: {$not: {$all: ["c"]}}}, [["a", "b"]]);
+ assert.writeOK(coll.remove({}));
+ assert.writeOK(coll.insert({i: ["a", "b"]}));
+ check({i: {$not: {$size: 2}}}, []);
+ check({i: {$not: {$size: 3}}}, [["a", "b"]]);
+ check({i: {$not: {$gt: "a"}}}, []);
+ check({i: {$not: {$gt: "c"}}}, [["a", "b"]]);
+ check({i: {$not: {$all: ["a", "b"]}}}, []);
+ check({i: {$not: {$all: ["c"]}}}, [["a", "b"]]);
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({i: [{j: "a"}]}));
- assert.writeOK(coll.insert({i: [{j: "b"}]}));
- check({i: {$not: {$elemMatch: {j: "a"}}}}, [[{j: "b"}]]);
- check({i: {$not: {$elemMatch: {j: "f"}}}}, [[{j: "a"}], [{j: "b"}]]);
- }
+ assert.writeOK(coll.remove({}));
+ assert.writeOK(coll.insert({i: [{j: "a"}]}));
+ assert.writeOK(coll.insert({i: [{j: "b"}]}));
+ check({i: {$not: {$elemMatch: {j: "a"}}}}, [[{j: "b"}]]);
+ check({i: {$not: {$elemMatch: {j: "f"}}}}, [[{j: "a"}], [{j: "b"}]]);
+}
- // Run the test without any index.
- doTest();
+// Run the test without any index.
+doTest();
- // Run the test with an index present.
- assert.commandWorked(coll.ensureIndex({i: 1}));
- doTest();
+// Run the test with an index present.
+assert.commandWorked(coll.ensureIndex({i: 1}));
+doTest();
}());
diff --git a/jstests/core/ns_length.js b/jstests/core/ns_length.js
index 405d319630a..61ddf18343d 100644
--- a/jstests/core/ns_length.js
+++ b/jstests/core/ns_length.js
@@ -1,95 +1,96 @@
-/** SERVER-7282 Faulty logic when testing maximum collection name length.
+/**
+ * SERVER-7282 Faulty logic when testing maximum collection name length.
* @tags: [requires_non_retryable_commands, assumes_unsharded_collection]
*/
(function() {
- 'use strict';
+'use strict';
- // constants from server
- const maxNsLength = 127;
- const maxNsCollectionLength = 120;
+// constants from server
+const maxNsLength = 127;
+const maxNsCollectionLength = 120;
- const myDb = db.getSiblingDB("ns_length");
- myDb.dropDatabase(); // start empty
+const myDb = db.getSiblingDB("ns_length");
+myDb.dropDatabase(); // start empty
- function mkStr(length) {
- let s = "";
- while (s.length < length) {
- s += "x";
- }
- return s;
+function mkStr(length) {
+ let s = "";
+ while (s.length < length) {
+ s += "x";
}
+ return s;
+}
- function canMakeCollectionWithName(name) {
- assert.eq(myDb.stats().storageSize, 0, "initial conditions");
+function canMakeCollectionWithName(name) {
+ assert.eq(myDb.stats().storageSize, 0, "initial conditions");
- let success = false;
- try {
- // may either throw or return an error
- success = !(myDb[name].insert({}).hasWriteError());
- } catch (e) {
- success = false;
- }
-
- if (!success) {
- assert.eq(myDb.stats().storageSize, 0, "no files should be created on error");
- return false;
- }
-
- myDb.dropDatabase();
- return true;
+ let success = false;
+ try {
+ // may either throw or return an error
+ success = !(myDb[name].insert({}).hasWriteError());
+ } catch (e) {
+ success = false;
}
- function canMakeIndexWithName(collection, name) {
- var success = collection.ensureIndex({x: 1}, {name: name}).ok;
- if (success) {
- assert.commandWorked(collection.dropIndex(name));
- }
- return success;
+ if (!success) {
+ assert.eq(myDb.stats().storageSize, 0, "no files should be created on error");
+ return false;
}
- function canRenameCollection(from, to) {
- var success = myDb[from].renameCollection(to).ok;
- if (success) {
- // put it back
- assert.commandWorked(myDb[to].renameCollection(from));
- }
- return success;
- }
+ myDb.dropDatabase();
+ return true;
+}
- // test making collections around the name limit
- const prefixOverhead = (myDb.getName() + ".").length;
- const maxCollectionNameLength = maxNsCollectionLength - prefixOverhead;
- for (let i = maxCollectionNameLength - 3; i <= maxCollectionNameLength + 3; i++) {
- assert.eq(canMakeCollectionWithName(mkStr(i)),
- i <= maxCollectionNameLength,
- "ns name length = " + (prefixOverhead + i));
+function canMakeIndexWithName(collection, name) {
+ var success = collection.ensureIndex({x: 1}, {name: name}).ok;
+ if (success) {
+ assert.commandWorked(collection.dropIndex(name));
}
+ return success;
+}
- // test making indexes around the name limit
- const collection = myDb.collection;
- collection.insert({});
- const maxIndexNameLength = maxNsLength - (collection.getFullName() + ".$").length;
- for (let i = maxIndexNameLength - 3; i <= maxIndexNameLength + 3; i++) {
- assert(canMakeIndexWithName(collection, mkStr(i)),
- "index ns name length = " + ((collection.getFullName() + ".$").length + i));
+function canRenameCollection(from, to) {
+ var success = myDb[from].renameCollection(to).ok;
+ if (success) {
+ // put it back
+ assert.commandWorked(myDb[to].renameCollection(from));
}
+ return success;
+}
- // test renaming collections with the destination around the name limit
- myDb.from.insert({});
- for (let i = maxCollectionNameLength - 3; i <= maxCollectionNameLength + 3; i++) {
- assert.eq(canRenameCollection("from", mkStr(i)),
- i <= maxCollectionNameLength,
- "new ns name length = " + (prefixOverhead + i));
- }
+// test making collections around the name limit
+const prefixOverhead = (myDb.getName() + ".").length;
+const maxCollectionNameLength = maxNsCollectionLength - prefixOverhead;
+for (let i = maxCollectionNameLength - 3; i <= maxCollectionNameLength + 3; i++) {
+ assert.eq(canMakeCollectionWithName(mkStr(i)),
+ i <= maxCollectionNameLength,
+ "ns name length = " + (prefixOverhead + i));
+}
- // test renaming collections with the destination around the name limit due to long indexe names
- myDb.from.ensureIndex({a: 1}, {name: mkStr(100)});
- const indexNsNameOverhead =
- (myDb.getName() + "..$").length + 100; // index ns name - collection name
- var maxCollectionNameWithIndex = maxNsLength - indexNsNameOverhead;
- for (let i = maxCollectionNameWithIndex - 3; i <= maxCollectionNameWithIndex + 3; i++) {
- assert(canRenameCollection("from", mkStr(i)),
- "index ns name length = " + (indexNsNameOverhead + i));
- }
+// test making indexes around the name limit
+const collection = myDb.collection;
+collection.insert({});
+const maxIndexNameLength = maxNsLength - (collection.getFullName() + ".$").length;
+for (let i = maxIndexNameLength - 3; i <= maxIndexNameLength + 3; i++) {
+ assert(canMakeIndexWithName(collection, mkStr(i)),
+ "index ns name length = " + ((collection.getFullName() + ".$").length + i));
+}
+
+// test renaming collections with the destination around the name limit
+myDb.from.insert({});
+for (let i = maxCollectionNameLength - 3; i <= maxCollectionNameLength + 3; i++) {
+ assert.eq(canRenameCollection("from", mkStr(i)),
+ i <= maxCollectionNameLength,
+ "new ns name length = " + (prefixOverhead + i));
+}
+
+// test renaming collections with the destination around the name limit due to long indexe names
+myDb.from.ensureIndex({a: 1}, {name: mkStr(100)});
+const indexNsNameOverhead =
+ (myDb.getName() + "..$").length + 100; // index ns name - collection name
+var maxCollectionNameWithIndex = maxNsLength - indexNsNameOverhead;
+for (let i = maxCollectionNameWithIndex - 3; i <= maxCollectionNameWithIndex + 3; i++) {
+ assert(canRenameCollection("from", mkStr(i)),
+ "index ns name length = " + (indexNsNameOverhead + i));
+}
})();
diff --git a/jstests/core/null_query_semantics.js b/jstests/core/null_query_semantics.js
index 7aa7a2a585f..8f664a6d80b 100644
--- a/jstests/core/null_query_semantics.js
+++ b/jstests/core/null_query_semantics.js
@@ -1,201 +1,308 @@
// Tests the behavior of queries with a {$eq: null} or {$ne: null} predicate.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For 'resultsEq'.
+load("jstests/aggregation/extras/utils.js"); // For 'resultsEq'.
- const coll = db.not_equals_null;
- coll.drop();
+const coll = db.not_equals_null;
+coll.drop();
- function extractAValues(results) {
- return results.map(function(res) {
- if (!res.hasOwnProperty("a")) {
- return {};
- }
- return {a: res.a};
- });
- }
+function extractAValues(results) {
+ return results.map(function(res) {
+ if (!res.hasOwnProperty("a")) {
+ return {};
+ }
+ return {a: res.a};
+ });
+}
+
+function testNotEqualsNullSemantics() {
+ // For the first portion of the test, only insert documents without arrays. This will avoid
+ // making the indexes multi-key, which may allow an index to be used to answer the queries.
+ assert.writeOK(coll.insert([
+ {_id: "a_empty_subobject", a: {}},
+ {_id: "a_null", a: null},
+ {_id: "a_number", a: 4},
+ {_id: "a_subobject_b_not_null", a: {b: "hi"}},
+ {_id: "a_subobject_b_null", a: {b: null}},
+ {_id: "a_subobject_b_undefined", a: {b: undefined}},
+ {_id: "a_undefined", a: undefined},
+ {_id: "no_a"},
+ ]));
+
+ // Throughout this test we will run queries with a projection which may allow the planner to
+ // consider an index-only plan. Checking the results of those queries will test that the
+ // query system will never choose such an optimization if it is incorrect.
+ const projectToOnlyA = {_id: 0, a: 1};
+ const projectToOnlyADotB = {_id: 0, "a.b": 1};
+
+ // Test the semantics of the query {a: {$eq: null}}.
+ (function testBasicNullQuery() {
+ const noProjectResults = coll.find({a: {$eq: null}}).toArray();
+ const expected =
+ [{_id: "a_null", a: null}, {_id: "a_undefined", a: undefined}, {_id: "no_a"}];
+ assert(resultsEq(expected, noProjectResults), tojson(noProjectResults));
+
+ const projectResults = coll.find({a: {$eq: null}}, projectToOnlyA).toArray();
+ assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults));
+ }());
+
+ // Test the semantics of the query {a: {$ne: null}}.
+ (function testBasicNotEqualsNullQuery() {
+ const noProjectResults = coll.find({a: {$ne: null}}).toArray();
+ const expected = [
+ {_id: "a_empty_subobject", a: {}},
+ {_id: "a_number", a: 4},
+ {_id: "a_subobject_b_not_null", a: {b: "hi"}},
+ {_id: "a_subobject_b_null", a: {b: null}},
+ {_id: "a_subobject_b_undefined", a: {b: undefined}},
+ ];
+ assert(resultsEq(noProjectResults, expected), tojson(noProjectResults));
+
+ const projectResults = coll.find({a: {$ne: null}}, projectToOnlyA).toArray();
+ assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults));
+ }());
+
+ // Test the semantics of the query {a: {$nin: [null, <number>]}}.
+ (function testNotInNullQuery() {
+ const query = {a: {$nin: [null, 4]}};
+ const noProjectResults = coll.find(query).toArray();
+ const expected = [
+ {_id: "a_empty_subobject", a: {}},
+ {_id: "a_subobject_b_not_null", a: {b: "hi"}},
+ {_id: "a_subobject_b_null", a: {b: null}},
+ {_id: "a_subobject_b_undefined", a: {b: undefined}},
+ ];
+
+ // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined".
+ const expectedWithUndefined = expected.concat([
+ {_id: "a_undefined", a: undefined},
+ ]);
+ assert(resultsEq(noProjectResults, expected) ||
+ resultsEq(noProjectResults, expectedWithUndefined),
+ noProjectResults);
+
+ const projectResults = coll.find(query, projectToOnlyA).toArray();
+ assert(resultsEq(projectResults, extractAValues(expected)) ||
+ resultsEq(projectResults, extractAValues(expectedWithUndefined)),
+ projectResults);
+ }());
+
+ (function testNotInNullAndRegexQuery() {
+ // While $nin: [null, ...] can be indexed, $nin: [<regex>] cannot. Ensure that we get
+ // the correct results in this case.
+ const query = {a: {$nin: [null, /^hi.*/]}};
+ const noProjectResults = coll.find(query).toArray();
+ const expected = [
+ {_id: "a_empty_subobject", a: {}},
+ {_id: "a_empty_subobject", a: 4},
+ {_id: "a_subobject_b_not_null", a: {b: "hi"}},
+ {_id: "a_subobject_b_null", a: {b: null}},
+ {_id: "a_subobject_b_undefined", a: {b: undefined}},
+
+ // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined".
+ {_id: "a_undefined", a: undefined},
+ ];
+ assert(resultsEq(noProjectResults, expected), tojson(noProjectResults));
- function testNotEqualsNullSemantics() {
- // For the first portion of the test, only insert documents without arrays. This will avoid
- // making the indexes multi-key, which may allow an index to be used to answer the queries.
- assert.writeOK(coll.insert([
+ const projectResults = coll.find(query, projectToOnlyA).toArray();
+ assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults));
+ }());
+
+ (function testExistsFalse() {
+ const noProjectResults = coll.find({a: {$exists: false}}).toArray();
+ const expected = [
+ {_id: "no_a"},
+ ];
+ assert(resultsEq(noProjectResults, expected), tojson(noProjectResults));
+
+ const projectResults = coll.find({a: {$exists: false}}, projectToOnlyA).toArray();
+ assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults));
+ }());
+
+ // Test the semantics of the query {"a.b": {$eq: null}}.
+ (function testDottedEqualsNull() {
+ const noProjectResults = coll.find({"a.b": {$eq: null}}).toArray();
+ assert(resultsEq(noProjectResults,
+ [
+ {_id: "a_empty_subobject", a: {}},
+ {_id: "a_null", a: null},
+ {_id: "a_number", a: 4},
+ {_id: "a_subobject_b_null", a: {b: null}},
+ {_id: "a_subobject_b_undefined", a: {b: undefined}},
+ {_id: "a_undefined", a: undefined},
+ {_id: "no_a"}
+ ]),
+ tojson(noProjectResults));
+
+ const projectResults = coll.find({"a.b": {$eq: null}}, projectToOnlyADotB).toArray();
+ assert(resultsEq(projectResults,
+ [{a: {}}, {}, {}, {a: {b: null}}, {a: {b: undefined}}, {}, {}]),
+ tojson(projectResults));
+ }());
+
+ // Test the semantics of the query {"a.b": {$ne: null}}.
+ (function testDottedNotEqualsNull() {
+ const noProjectResults = coll.find({"a.b": {$ne: null}}).toArray();
+ assert(resultsEq(noProjectResults, [{_id: "a_subobject_b_not_null", a: {b: "hi"}}]),
+ tojson(noProjectResults));
+
+ const projectResults = coll.find({"a.b": {$ne: null}}, projectToOnlyADotB).toArray();
+ assert(resultsEq(projectResults, [{a: {b: "hi"}}]), tojson(projectResults));
+ }());
+
+ (function testDottedExistsFalse() {
+ const noProjectResults = coll.find({"a.b": {$exists: false}}).toArray();
+ const expected = [
+ {_id: "no_a"},
{_id: "a_empty_subobject", a: {}},
{_id: "a_null", a: null},
{_id: "a_number", a: 4},
+ {_id: "a_undefined", a: undefined},
+ ];
+ assert(resultsEq(noProjectResults, expected), tojson(noProjectResults));
+
+ const projectResults = coll.find({"a.b": {$exists: false}}, projectToOnlyADotB).toArray();
+ assert(resultsEq(projectResults, [{}, {a: {}}, {}, {}, {}]), tojson(projectResults));
+ }());
+
+ // Test similar queries, but with an $elemMatch. These queries should have no results since
+ // an $elemMatch requires an array.
+ (function testElemMatchQueriesWithNoArrays() {
+ for (let elemMatchQuery of [{a: {$elemMatch: {$eq: null}}},
+ {a: {$elemMatch: {$ne: null}}},
+ {"a.b": {$elemMatch: {$eq: null}}},
+ {"a.b": {$elemMatch: {$ne: null}}},
+ {a: {$elemMatch: {b: {$eq: null}}}},
+ {a: {$elemMatch: {b: {$ne: null}}}},
+ ]) {
+ const noProjectResults = coll.find(elemMatchQuery).toArray();
+ assert(resultsEq(noProjectResults, []),
+ `Expected no results for query ${tojson(elemMatchQuery)}, got ` +
+ tojson(noProjectResults));
+
+ let projectResults = coll.find(elemMatchQuery, projectToOnlyA).toArray();
+ assert(resultsEq(projectResults, []),
+ `Expected no results for query ${tojson(elemMatchQuery)}, got ` +
+ tojson(projectResults));
+
+ projectResults = coll.find(elemMatchQuery, projectToOnlyADotB).toArray();
+ assert(resultsEq(projectResults, []),
+ `Expected no results for query ${tojson(elemMatchQuery)}, got ` +
+ tojson(projectResults));
+ }
+ }());
+
+ // An index which includes "a" or a sub-path of "a" will become multi-key after this insert.
+ const writeResult = coll.insert([
+ {_id: "a_double_array", a: [[]]},
+ {_id: "a_empty_array", a: []},
+ {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]},
+ {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]},
+ {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]},
+ {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
+ {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]},
+ {_id: "a_value_array_all_nulls", a: [null, null]},
+ {_id: "a_value_array_no_nulls", a: [1, "string", 4]},
+ {_id: "a_value_array_with_null", a: [1, "string", null, 4]},
+ {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]},
+ ]);
+ if (writeResult.hasWriteErrors()) {
+ // We're testing a hashed index which is incompatible with arrays. Skip the multi-key
+ // portion of this test for this index.
+ assert.eq(writeResult.getWriteErrors().length, 1, tojson(writeResult));
+ assert.eq(writeResult.getWriteErrors()[0].code, 16766, tojson(writeResult));
+ return;
+ }
+ assert.writeOK(writeResult);
+
+ // Test the semantics of the query {a: {$eq: null}}.
+ (function testBasicNullQuery() {
+ const noProjectResults = coll.find({a: {$eq: null}}).toArray();
+ const expected = [
+ {_id: "a_null", a: null},
+ {_id: "a_undefined", a: undefined},
+ {_id: "a_value_array_all_nulls", a: [null, null]},
+ {_id: "a_value_array_with_null", a: [1, "string", null, 4]},
+ {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]},
+ {_id: "no_a"},
+ ];
+ assert(resultsEq(noProjectResults, expected), tojson(noProjectResults));
+
+ const projectResults = coll.find({a: {$eq: null}}, projectToOnlyA).toArray();
+ assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults));
+ }());
+
+ // Test the semantics of the query {a: {$ne: null}}.
+ (function testBasicNotEqualsNullQuery() {
+ const noProjectResults = coll.find({a: {$ne: null}}).toArray();
+ const expected = [
+ {_id: "a_double_array", a: [[]]},
+ {_id: "a_empty_array", a: []},
+ {_id: "a_empty_subobject", a: {}},
+ {_id: "a_number", a: 4},
+ {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]},
+ {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]},
+ {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]},
+ {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
+ {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]},
{_id: "a_subobject_b_not_null", a: {b: "hi"}},
{_id: "a_subobject_b_null", a: {b: null}},
{_id: "a_subobject_b_undefined", a: {b: undefined}},
+ {_id: "a_value_array_no_nulls", a: [1, "string", 4]},
+ ];
+ assert(resultsEq(noProjectResults, expected), tojson(noProjectResults));
+
+ const projectResults = coll.find({a: {$ne: null}}, projectToOnlyA).toArray();
+ assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults));
+ }());
+
+ // Test the semantics of the query {a: {$nin: [null, <number>]}}.
+ (function testNotInNullQuery() {
+ const query = {a: {$nin: [null, 75]}};
+ const noProjectResults = coll.find(query).toArray();
+ const expected = [
+ {_id: "a_empty_subobject", a: {}},
+ {_id: "a_number", a: 4},
+ {_id: "a_subobject_b_not_null", a: {b: "hi"}},
+ {_id: "a_subobject_b_null", a: {b: null}},
+ {_id: "a_subobject_b_undefined", a: {b: undefined}},
+
+ {_id: "a_double_array", a: [[]]},
+ {_id: "a_empty_array", a: []},
+ {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]},
+ {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]},
+ {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]},
+ {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
+ {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]},
+ {_id: "a_value_array_no_nulls", a: [1, "string", 4]},
+ ];
+
+ // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined".
+ const expectedWithUndefined = expected.concat([
{_id: "a_undefined", a: undefined},
- {_id: "no_a"},
- ]));
-
- // Throughout this test we will run queries with a projection which may allow the planner to
- // consider an index-only plan. Checking the results of those queries will test that the
- // query system will never choose such an optimization if it is incorrect.
- const projectToOnlyA = {_id: 0, a: 1};
- const projectToOnlyADotB = {_id: 0, "a.b": 1};
-
- // Test the semantics of the query {a: {$eq: null}}.
- (function testBasicNullQuery() {
- const noProjectResults = coll.find({a: {$eq: null}}).toArray();
- const expected =
- [{_id: "a_null", a: null}, {_id: "a_undefined", a: undefined}, {_id: "no_a"}];
- assert(resultsEq(expected, noProjectResults), tojson(noProjectResults));
-
- const projectResults = coll.find({a: {$eq: null}}, projectToOnlyA).toArray();
- assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults));
- }());
-
- // Test the semantics of the query {a: {$ne: null}}.
- (function testBasicNotEqualsNullQuery() {
- const noProjectResults = coll.find({a: {$ne: null}}).toArray();
- const expected = [
- {_id: "a_empty_subobject", a: {}},
- {_id: "a_number", a: 4},
- {_id: "a_subobject_b_not_null", a: {b: "hi"}},
- {_id: "a_subobject_b_null", a: {b: null}},
- {_id: "a_subobject_b_undefined", a: {b: undefined}},
- ];
- assert(resultsEq(noProjectResults, expected), tojson(noProjectResults));
-
- const projectResults = coll.find({a: {$ne: null}}, projectToOnlyA).toArray();
- assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults));
- }());
-
- // Test the semantics of the query {a: {$nin: [null, <number>]}}.
- (function testNotInNullQuery() {
- const query = {a: {$nin: [null, 4]}};
- const noProjectResults = coll.find(query).toArray();
- const expected = [
- {_id: "a_empty_subobject", a: {}},
- {_id: "a_subobject_b_not_null", a: {b: "hi"}},
- {_id: "a_subobject_b_null", a: {b: null}},
- {_id: "a_subobject_b_undefined", a: {b: undefined}},
- ];
+ {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]},
+ ]);
+ assert(resultsEq(noProjectResults, expected) ||
+ resultsEq(noProjectResults, expectedWithUndefined),
+ noProjectResults);
+
+ const projectResults = coll.find(query, projectToOnlyA).toArray();
+ assert(resultsEq(projectResults, extractAValues(expected)) ||
+ resultsEq(projectResults, extractAValues(expectedWithUndefined)),
+ projectResults);
+ }());
+
+ (function testNotInNullAndRegexQuery() {
+ const query = {a: {$nin: [null, /^str.*/]}};
+ const noProjectResults = coll.find(query).toArray();
+ const expected = [
+ {_id: "a_empty_subobject", a: {}},
+ {_id: "a_number", a: 4},
+ {_id: "a_subobject_b_not_null", a: {b: "hi"}},
+ {_id: "a_subobject_b_null", a: {b: null}},
+ {_id: "a_subobject_b_undefined", a: {b: undefined}},
- // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined".
- const expectedWithUndefined = expected.concat([
- {_id: "a_undefined", a: undefined},
- ]);
- assert(resultsEq(noProjectResults, expected) ||
- resultsEq(noProjectResults, expectedWithUndefined),
- noProjectResults);
-
- const projectResults = coll.find(query, projectToOnlyA).toArray();
- assert(resultsEq(projectResults, extractAValues(expected)) ||
- resultsEq(projectResults, extractAValues(expectedWithUndefined)),
- projectResults);
- }());
-
- (function testNotInNullAndRegexQuery() {
- // While $nin: [null, ...] can be indexed, $nin: [<regex>] cannot. Ensure that we get
- // the correct results in this case.
- const query = {a: {$nin: [null, /^hi.*/]}};
- const noProjectResults = coll.find(query).toArray();
- const expected = [
- {_id: "a_empty_subobject", a: {}},
- {_id: "a_empty_subobject", a: 4},
- {_id: "a_subobject_b_not_null", a: {b: "hi"}},
- {_id: "a_subobject_b_null", a: {b: null}},
- {_id: "a_subobject_b_undefined", a: {b: undefined}},
-
- // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined".
- {_id: "a_undefined", a: undefined},
- ];
- assert(resultsEq(noProjectResults, expected), tojson(noProjectResults));
-
- const projectResults = coll.find(query, projectToOnlyA).toArray();
- assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults));
- }());
-
- (function testExistsFalse() {
- const noProjectResults = coll.find({a: {$exists: false}}).toArray();
- const expected = [
- {_id: "no_a"},
- ];
- assert(resultsEq(noProjectResults, expected), tojson(noProjectResults));
-
- const projectResults = coll.find({a: {$exists: false}}, projectToOnlyA).toArray();
- assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults));
- }());
-
- // Test the semantics of the query {"a.b": {$eq: null}}.
- (function testDottedEqualsNull() {
- const noProjectResults = coll.find({"a.b": {$eq: null}}).toArray();
- assert(resultsEq(noProjectResults,
- [
- {_id: "a_empty_subobject", a: {}},
- {_id: "a_null", a: null},
- {_id: "a_number", a: 4},
- {_id: "a_subobject_b_null", a: {b: null}},
- {_id: "a_subobject_b_undefined", a: {b: undefined}},
- {_id: "a_undefined", a: undefined},
- {_id: "no_a"}
- ]),
- tojson(noProjectResults));
-
- const projectResults = coll.find({"a.b": {$eq: null}}, projectToOnlyADotB).toArray();
- assert(resultsEq(projectResults,
- [{a: {}}, {}, {}, {a: {b: null}}, {a: {b: undefined}}, {}, {}]),
- tojson(projectResults));
- }());
-
- // Test the semantics of the query {"a.b": {$ne: null}}.
- (function testDottedNotEqualsNull() {
- const noProjectResults = coll.find({"a.b": {$ne: null}}).toArray();
- assert(resultsEq(noProjectResults, [{_id: "a_subobject_b_not_null", a: {b: "hi"}}]),
- tojson(noProjectResults));
-
- const projectResults = coll.find({"a.b": {$ne: null}}, projectToOnlyADotB).toArray();
- assert(resultsEq(projectResults, [{a: {b: "hi"}}]), tojson(projectResults));
- }());
-
- (function testDottedExistsFalse() {
- const noProjectResults = coll.find({"a.b": {$exists: false}}).toArray();
- const expected = [
- {_id: "no_a"},
- {_id: "a_empty_subobject", a: {}},
- {_id: "a_null", a: null},
- {_id: "a_number", a: 4},
- {_id: "a_undefined", a: undefined},
- ];
- assert(resultsEq(noProjectResults, expected), tojson(noProjectResults));
-
- const projectResults =
- coll.find({"a.b": {$exists: false}}, projectToOnlyADotB).toArray();
- assert(resultsEq(projectResults, [{}, {a: {}}, {}, {}, {}]), tojson(projectResults));
- }());
-
- // Test similar queries, but with an $elemMatch. These queries should have no results since
- // an $elemMatch requires an array.
- (function testElemMatchQueriesWithNoArrays() {
- for (let elemMatchQuery of[{a: {$elemMatch: {$eq: null}}},
- {a: {$elemMatch: {$ne: null}}},
- {"a.b": {$elemMatch: {$eq: null}}},
- {"a.b": {$elemMatch: {$ne: null}}},
- {a: {$elemMatch: {b: {$eq: null}}}},
- {a: {$elemMatch: {b: {$ne: null}}}},
- ]) {
- const noProjectResults = coll.find(elemMatchQuery).toArray();
- assert(resultsEq(noProjectResults, []),
- `Expected no results for query ${tojson(elemMatchQuery)}, got ` +
- tojson(noProjectResults));
-
- let projectResults = coll.find(elemMatchQuery, projectToOnlyA).toArray();
- assert(resultsEq(projectResults, []),
- `Expected no results for query ${tojson(elemMatchQuery)}, got ` +
- tojson(projectResults));
-
- projectResults = coll.find(elemMatchQuery, projectToOnlyADotB).toArray();
- assert(resultsEq(projectResults, []),
- `Expected no results for query ${tojson(elemMatchQuery)}, got ` +
- tojson(projectResults));
- }
- }());
-
- // An index which includes "a" or a sub-path of "a" will become multi-key after this insert.
- const writeResult = coll.insert([
{_id: "a_double_array", a: [[]]},
{_id: "a_empty_array", a: []},
{_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]},
@@ -203,394 +310,283 @@
{_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]},
{_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
{_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]},
+ ];
+
+ // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined".
+ const expectedWithUndefined = expected.concat([
+ {_id: "a_undefined", a: undefined},
+ ]);
+
+ assert(resultsEq(noProjectResults, expected) ||
+ resultsEq(noProjectResults, expectedWithUndefined),
+ noProjectResults);
+
+ const projectResults = coll.find(query, projectToOnlyA).toArray();
+ assert(resultsEq(projectResults, extractAValues(expected)) ||
+ resultsEq(projectResults, extractAValues(expectedWithUndefined)),
+ projectResults);
+ }());
+
+ // Test the results of similar queries with an $elemMatch.
+ (function testElemMatchValue() {
+ // Test $elemMatch with equality to null.
+ let noProjectResults = coll.find({a: {$elemMatch: {$eq: null}}}).toArray();
+ const expectedEqualToNull = [
{_id: "a_value_array_all_nulls", a: [null, null]},
- {_id: "a_value_array_no_nulls", a: [1, "string", 4]},
{_id: "a_value_array_with_null", a: [1, "string", null, 4]},
{_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]},
- ]);
- if (writeResult.hasWriteErrors()) {
- // We're testing a hashed index which is incompatible with arrays. Skip the multi-key
- // portion of this test for this index.
- assert.eq(writeResult.getWriteErrors().length, 1, tojson(writeResult));
- assert.eq(writeResult.getWriteErrors()[0].code, 16766, tojson(writeResult));
- return;
- }
- assert.writeOK(writeResult);
-
- // Test the semantics of the query {a: {$eq: null}}.
- (function testBasicNullQuery() {
- const noProjectResults = coll.find({a: {$eq: null}}).toArray();
- const expected = [
- {_id: "a_null", a: null},
- {_id: "a_undefined", a: undefined},
- {_id: "a_value_array_all_nulls", a: [null, null]},
- {_id: "a_value_array_with_null", a: [1, "string", null, 4]},
- {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]},
- {_id: "no_a"},
- ];
- assert(resultsEq(noProjectResults, expected), tojson(noProjectResults));
-
- const projectResults = coll.find({a: {$eq: null}}, projectToOnlyA).toArray();
- assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults));
- }());
-
- // Test the semantics of the query {a: {$ne: null}}.
- (function testBasicNotEqualsNullQuery() {
- const noProjectResults = coll.find({a: {$ne: null}}).toArray();
- const expected = [
- {_id: "a_double_array", a: [[]]},
- {_id: "a_empty_array", a: []},
- {_id: "a_empty_subobject", a: {}},
- {_id: "a_number", a: 4},
- {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]},
- {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]},
- {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]},
- {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
- {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]},
- {_id: "a_subobject_b_not_null", a: {b: "hi"}},
- {_id: "a_subobject_b_null", a: {b: null}},
- {_id: "a_subobject_b_undefined", a: {b: undefined}},
- {_id: "a_value_array_no_nulls", a: [1, "string", 4]},
- ];
- assert(resultsEq(noProjectResults, expected), tojson(noProjectResults));
-
- const projectResults = coll.find({a: {$ne: null}}, projectToOnlyA).toArray();
- assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults));
- }());
-
- // Test the semantics of the query {a: {$nin: [null, <number>]}}.
- (function testNotInNullQuery() {
- const query = {a: {$nin: [null, 75]}};
- const noProjectResults = coll.find(query).toArray();
- const expected = [
- {_id: "a_empty_subobject", a: {}},
- {_id: "a_number", a: 4},
- {_id: "a_subobject_b_not_null", a: {b: "hi"}},
- {_id: "a_subobject_b_null", a: {b: null}},
- {_id: "a_subobject_b_undefined", a: {b: undefined}},
-
- {_id: "a_double_array", a: [[]]},
- {_id: "a_empty_array", a: []},
- {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]},
- {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]},
- {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]},
- {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
- {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]},
- {_id: "a_value_array_no_nulls", a: [1, "string", 4]},
- ];
+ ];
+ assert(resultsEq(noProjectResults, expectedEqualToNull), tojson(noProjectResults));
- // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined".
- const expectedWithUndefined = expected.concat([
- {_id: "a_undefined", a: undefined},
- {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]},
- ]);
- assert(resultsEq(noProjectResults, expected) ||
- resultsEq(noProjectResults, expectedWithUndefined),
- noProjectResults);
-
- const projectResults = coll.find(query, projectToOnlyA).toArray();
- assert(resultsEq(projectResults, extractAValues(expected)) ||
- resultsEq(projectResults, extractAValues(expectedWithUndefined)),
- projectResults);
- }());
-
- (function testNotInNullAndRegexQuery() {
- const query = {a: {$nin: [null, /^str.*/]}};
- const noProjectResults = coll.find(query).toArray();
- const expected = [
- {_id: "a_empty_subobject", a: {}},
- {_id: "a_number", a: 4},
- {_id: "a_subobject_b_not_null", a: {b: "hi"}},
- {_id: "a_subobject_b_null", a: {b: null}},
- {_id: "a_subobject_b_undefined", a: {b: undefined}},
-
- {_id: "a_double_array", a: [[]]},
- {_id: "a_empty_array", a: []},
- {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]},
- {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]},
- {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]},
- {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
- {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]},
- ];
+ let projectResults = coll.find({a: {$elemMatch: {$eq: null}}}, projectToOnlyA).toArray();
+ assert(resultsEq(projectResults, extractAValues(expectedEqualToNull)),
+ tojson(projectResults));
- // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined".
- const expectedWithUndefined = expected.concat([
- {_id: "a_undefined", a: undefined},
- ]);
-
- assert(resultsEq(noProjectResults, expected) ||
- resultsEq(noProjectResults, expectedWithUndefined),
- noProjectResults);
-
- const projectResults = coll.find(query, projectToOnlyA).toArray();
- assert(resultsEq(projectResults, extractAValues(expected)) ||
- resultsEq(projectResults, extractAValues(expectedWithUndefined)),
- projectResults);
- }());
-
- // Test the results of similar queries with an $elemMatch.
- (function testElemMatchValue() {
- // Test $elemMatch with equality to null.
- let noProjectResults = coll.find({a: {$elemMatch: {$eq: null}}}).toArray();
- const expectedEqualToNull = [
- {_id: "a_value_array_all_nulls", a: [null, null]},
- {_id: "a_value_array_with_null", a: [1, "string", null, 4]},
- {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]},
- ];
- assert(resultsEq(noProjectResults, expectedEqualToNull), tojson(noProjectResults));
-
- let projectResults =
- coll.find({a: {$elemMatch: {$eq: null}}}, projectToOnlyA).toArray();
- assert(resultsEq(projectResults, extractAValues(expectedEqualToNull)),
- tojson(projectResults));
-
- // Test $elemMatch with not equal to null.
- noProjectResults = coll.find({a: {$elemMatch: {$ne: null}}}).toArray();
- const expectedNotEqualToNull = [
- {_id: "a_double_array", a: [[]]},
- {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]},
- {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]},
- {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]},
- {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
- {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]},
- {_id: "a_value_array_no_nulls", a: [1, "string", 4]},
- {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]},
- {_id: "a_value_array_with_null", a: [1, "string", null, 4]},
- ];
- assert(resultsEq(noProjectResults, expectedNotEqualToNull), tojson(noProjectResults));
-
- projectResults = coll.find({a: {$elemMatch: {$ne: null}}}, projectToOnlyA).toArray();
- assert(resultsEq(projectResults, extractAValues(expectedNotEqualToNull)),
- tojson(projectResults));
- }());
-
- // Test the semantics of the query {"a.b": {$eq: null}}. The semantics here are to return
- // those documents which have one of the following properties:
- // - A non-object, non-array value for "a"
- // - A subobject "a" with a missing, null, or undefined value for "b"
- // - An array which has at least one object in it which has a missing, null, or undefined
- // value for "b".
- (function testDottedEqualsNull() {
- const noProjectResults = coll.find({"a.b": {$eq: null}}).toArray();
- assert(
- resultsEq(noProjectResults,
- [
- {_id: "a_empty_subobject", a: {}},
- {_id: "a_null", a: null},
- {_id: "a_number", a: 4},
- {_id: "a_subobject_b_null", a: {b: null}},
- {_id: "a_subobject_b_undefined", a: {b: undefined}},
- {_id: "a_undefined", a: undefined},
- {_id: "no_a"},
- {
+ // Test $elemMatch with not equal to null.
+ noProjectResults = coll.find({a: {$elemMatch: {$ne: null}}}).toArray();
+ const expectedNotEqualToNull = [
+ {_id: "a_double_array", a: [[]]},
+ {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]},
+ {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]},
+ {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]},
+ {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
+ {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]},
+ {_id: "a_value_array_no_nulls", a: [1, "string", 4]},
+ {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]},
+ {_id: "a_value_array_with_null", a: [1, "string", null, 4]},
+ ];
+ assert(resultsEq(noProjectResults, expectedNotEqualToNull), tojson(noProjectResults));
+
+ projectResults = coll.find({a: {$elemMatch: {$ne: null}}}, projectToOnlyA).toArray();
+ assert(resultsEq(projectResults, extractAValues(expectedNotEqualToNull)),
+ tojson(projectResults));
+ }());
+
+ // Test the semantics of the query {"a.b": {$eq: null}}. The semantics here are to return
+ // those documents which have one of the following properties:
+ // - A non-object, non-array value for "a"
+ // - A subobject "a" with a missing, null, or undefined value for "b"
+ // - An array which has at least one object in it which has a missing, null, or undefined
+ // value for "b".
+ (function testDottedEqualsNull() {
+ const noProjectResults = coll.find({"a.b": {$eq: null}}).toArray();
+ assert(
+ resultsEq(noProjectResults,
+ [
+ {_id: "a_empty_subobject", a: {}},
+ {_id: "a_null", a: null},
+ {_id: "a_number", a: 4},
+ {_id: "a_subobject_b_null", a: {b: null}},
+ {_id: "a_subobject_b_undefined", a: {b: undefined}},
+ {_id: "a_undefined", a: undefined},
+ {_id: "no_a"},
+ {
_id: "a_object_array_all_b_nulls",
a: [{b: null}, {b: undefined}, {b: null}, {}]
- },
- {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]},
- {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
- {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]},
- ]),
- tojson(noProjectResults));
-
- const projectResults = coll.find({"a.b": {$eq: null}}, projectToOnlyADotB).toArray();
- assert(resultsEq(projectResults,
- [
- {a: {}},
- {},
- {},
- {a: {b: null}},
- {a: {b: undefined}},
- {},
- {},
- {a: [{b: null}, {b: undefined}, {b: null}, {}]},
- {a: [{b: null}, {b: 3}, {b: null}]},
- {a: [{b: undefined}, {b: 3}]},
- {a: [{b: 3}, {}]},
- ]),
- tojson(projectResults));
- }());
-
- // Test the semantics of the query {"a.b": {$ne: null}}.
- (function testDottedNotEqualsNull() {
- const noProjectResults = coll.find({"a.b": {$ne: null}}).toArray();
- assert(
- resultsEq(noProjectResults,
- [
- {_id: "a_subobject_b_not_null", a: {b: "hi"}},
- {_id: "a_double_array", a: [[]]},
- {_id: "a_empty_array", a: []},
- {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]},
- {_id: "a_value_array_all_nulls", a: [null, null]},
- {_id: "a_value_array_no_nulls", a: [1, "string", 4]},
- {_id: "a_value_array_with_null", a: [1, "string", null, 4]},
- {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]}
- ]),
- tojson(noProjectResults));
-
- const projectResults = coll.find({"a.b": {$ne: null}}, projectToOnlyADotB).toArray();
- assert(resultsEq(projectResults,
- [
- {a: {b: "hi"}},
- {a: [[]]},
- {a: []},
- {a: [{b: 1}, {b: 3}, {b: "string"}]},
- {a: []},
- {a: []},
- {a: []},
- {a: []}
- ]),
- tojson(projectResults));
- }());
-
- // Test the semantics of the query {a.b: {$nin: [null, <number>]}}.
- (function testDottedNotInNullQuery() {
- const query = {"a.b": {$nin: [null, 75]}};
- const noProjectResults = coll.find(query).toArray();
- const expected = [
- {_id: "a_subobject_b_not_null", a: {b: "hi"}},
- {_id: "a_double_array", a: [[]]},
- {_id: "a_empty_array", a: []},
- {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]},
- {_id: "a_value_array_all_nulls", a: [null, null]},
- {_id: "a_value_array_no_nulls", a: [1, "string", 4]},
- {_id: "a_value_array_with_null", a: [1, "string", null, 4]},
- {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]},
- ];
-
- // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined".
- const expectedWithUndefined = expected.concat([
- {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
- {_id: "a_subobject_b_undefined", a: {b: undefined}},
- ]);
- assert(resultsEq(noProjectResults, expected) ||
- resultsEq(noProjectResults, expectedWithUndefined),
- noProjectResults);
-
- const projectResults = coll.find(query, projectToOnlyA).toArray();
- assert(resultsEq(projectResults, extractAValues(expected)) ||
- resultsEq(projectResults, extractAValues(expectedWithUndefined)),
- projectResults);
- }());
-
- // Test the semantics of the query {a.b: {$nin: [null, <regex>]}}.
- (function testDottedNotInNullAndRegexQuery() {
- const query = {"a.b": {$nin: [null, /^str.*/]}};
- const noProjectResults = coll.find(query).toArray();
- const expected = [
- {_id: "a_subobject_b_not_null", a: {b: "hi"}},
- {_id: "a_double_array", a: [[]]},
- {_id: "a_empty_array", a: []},
- {_id: "a_value_array_all_nulls", a: [null, null]},
- {_id: "a_value_array_no_nulls", a: [1, "string", 4]},
- {_id: "a_value_array_with_null", a: [1, "string", null, 4]},
- {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]},
- ];
+ },
+ {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]},
+ {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
+ {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]},
+ ]),
+ tojson(noProjectResults));
+
+ const projectResults = coll.find({"a.b": {$eq: null}}, projectToOnlyADotB).toArray();
+ assert(resultsEq(projectResults,
+ [
+ {a: {}},
+ {},
+ {},
+ {a: {b: null}},
+ {a: {b: undefined}},
+ {},
+ {},
+ {a: [{b: null}, {b: undefined}, {b: null}, {}]},
+ {a: [{b: null}, {b: 3}, {b: null}]},
+ {a: [{b: undefined}, {b: 3}]},
+ {a: [{b: 3}, {}]},
+ ]),
+ tojson(projectResults));
+ }());
+
+ // Test the semantics of the query {"a.b": {$ne: null}}.
+ (function testDottedNotEqualsNull() {
+ const noProjectResults = coll.find({"a.b": {$ne: null}}).toArray();
+ assert(resultsEq(noProjectResults,
+ [
+ {_id: "a_subobject_b_not_null", a: {b: "hi"}},
+ {_id: "a_double_array", a: [[]]},
+ {_id: "a_empty_array", a: []},
+ {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]},
+ {_id: "a_value_array_all_nulls", a: [null, null]},
+ {_id: "a_value_array_no_nulls", a: [1, "string", 4]},
+ {_id: "a_value_array_with_null", a: [1, "string", null, 4]},
+ {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]}
+ ]),
+ tojson(noProjectResults));
+
+ const projectResults = coll.find({"a.b": {$ne: null}}, projectToOnlyADotB).toArray();
+ assert(resultsEq(projectResults,
+ [
+ {a: {b: "hi"}},
+ {a: [[]]},
+ {a: []},
+ {a: [{b: 1}, {b: 3}, {b: "string"}]},
+ {a: []},
+ {a: []},
+ {a: []},
+ {a: []}
+ ]),
+ tojson(projectResults));
+ }());
+
+ // Test the semantics of the query {a.b: {$nin: [null, <number>]}}.
+ (function testDottedNotInNullQuery() {
+ const query = {"a.b": {$nin: [null, 75]}};
+ const noProjectResults = coll.find(query).toArray();
+ const expected = [
+ {_id: "a_subobject_b_not_null", a: {b: "hi"}},
+ {_id: "a_double_array", a: [[]]},
+ {_id: "a_empty_array", a: []},
+ {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]},
+ {_id: "a_value_array_all_nulls", a: [null, null]},
+ {_id: "a_value_array_no_nulls", a: [1, "string", 4]},
+ {_id: "a_value_array_with_null", a: [1, "string", null, 4]},
+ {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]},
+ ];
- // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined".
- const expectedWithUndefined = expected.concat([
- {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
- {_id: "a_subobject_b_undefined", a: {b: undefined}},
- ]);
- assert(resultsEq(noProjectResults, expected) ||
- resultsEq(noProjectResults, expectedWithUndefined),
- noProjectResults);
-
- const projectResults = coll.find(query, projectToOnlyA).toArray();
- assert(resultsEq(projectResults, extractAValues(expected)) ||
- resultsEq(projectResults, extractAValues(expectedWithUndefined)),
- projectResults);
- }());
-
- // Test the results of similar dotted queries with an $elemMatch. These should have no
- // results since none of our documents have an array at the path "a.b".
- (function testDottedElemMatchValue() {
- let results = coll.find({"a.b": {$elemMatch: {$eq: null}}}).toArray();
- assert(resultsEq(results, []), tojson(results));
-
- results = coll.find({"a.b": {$elemMatch: {$ne: null}}}).toArray();
- assert(resultsEq(results, []), tojson(results));
- }());
-
- // Test null semantics within an $elemMatch object.
- (function testElemMatchObject() {
- // Test $elemMatch with equality to null.
- let noProjectResults = coll.find({a: {$elemMatch: {b: {$eq: null}}}}).toArray();
- const expectedEqualToNull = [
- {_id: "a_double_array", a: [[]]},
- {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]},
- {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]},
- {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
- {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]},
- ];
- assert(resultsEq(noProjectResults, expectedEqualToNull), tojson(noProjectResults));
-
- let projectResults =
- coll.find({a: {$elemMatch: {b: {$eq: null}}}}, projectToOnlyADotB).toArray();
- assert(resultsEq(projectResults,
- [
- {a: [[]]},
- {a: [{b: null}, {b: undefined}, {b: null}, {}]},
- {a: [{b: null}, {b: 3}, {b: null}]},
- {a: [{b: undefined}, {b: 3}]},
- {a: [{b: 3}, {}]},
- ]),
- tojson(projectResults));
-
- // Test $elemMatch with not equal to null.
- noProjectResults = coll.find({a: {$elemMatch: {b: {$ne: null}}}}).toArray();
- const expectedNotEqualToNull = [
- {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]},
- {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]},
- {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
- {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]},
- ];
- assert(resultsEq(noProjectResults, expectedNotEqualToNull), tojson(noProjectResults));
-
- projectResults =
- coll.find({a: {$elemMatch: {b: {$ne: null}}}}, projectToOnlyADotB).toArray();
- assert(resultsEq(projectResults,
- [
- {a: [{b: 1}, {b: 3}, {b: "string"}]},
- {a: [{b: null}, {b: 3}, {b: null}]},
- {a: [{b: undefined}, {b: 3}]},
- {a: [{b: 3}, {}]},
- ]),
- tojson(projectResults));
- }());
- }
+ // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined".
+ const expectedWithUndefined = expected.concat([
+ {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
+ {_id: "a_subobject_b_undefined", a: {b: undefined}},
+ ]);
+ assert(resultsEq(noProjectResults, expected) ||
+ resultsEq(noProjectResults, expectedWithUndefined),
+ noProjectResults);
+
+ const projectResults = coll.find(query, projectToOnlyA).toArray();
+ assert(resultsEq(projectResults, extractAValues(expected)) ||
+ resultsEq(projectResults, extractAValues(expectedWithUndefined)),
+ projectResults);
+ }());
+
+ // Test the semantics of the query {a.b: {$nin: [null, <regex>]}}.
+ (function testDottedNotInNullAndRegexQuery() {
+ const query = {"a.b": {$nin: [null, /^str.*/]}};
+ const noProjectResults = coll.find(query).toArray();
+ const expected = [
+ {_id: "a_subobject_b_not_null", a: {b: "hi"}},
+ {_id: "a_double_array", a: [[]]},
+ {_id: "a_empty_array", a: []},
+ {_id: "a_value_array_all_nulls", a: [null, null]},
+ {_id: "a_value_array_no_nulls", a: [1, "string", 4]},
+ {_id: "a_value_array_with_null", a: [1, "string", null, 4]},
+ {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]},
+ ];
- // Test without any indexes.
+ // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined".
+ const expectedWithUndefined = expected.concat([
+ {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
+ {_id: "a_subobject_b_undefined", a: {b: undefined}},
+ ]);
+ assert(resultsEq(noProjectResults, expected) ||
+ resultsEq(noProjectResults, expectedWithUndefined),
+ noProjectResults);
+
+ const projectResults = coll.find(query, projectToOnlyA).toArray();
+ assert(resultsEq(projectResults, extractAValues(expected)) ||
+ resultsEq(projectResults, extractAValues(expectedWithUndefined)),
+ projectResults);
+ }());
+
+ // Test the results of similar dotted queries with an $elemMatch. These should have no
+ // results since none of our documents have an array at the path "a.b".
+ (function testDottedElemMatchValue() {
+ let results = coll.find({"a.b": {$elemMatch: {$eq: null}}}).toArray();
+ assert(resultsEq(results, []), tojson(results));
+
+ results = coll.find({"a.b": {$elemMatch: {$ne: null}}}).toArray();
+ assert(resultsEq(results, []), tojson(results));
+ }());
+
+ // Test null semantics within an $elemMatch object.
+ (function testElemMatchObject() {
+ // Test $elemMatch with equality to null.
+ let noProjectResults = coll.find({a: {$elemMatch: {b: {$eq: null}}}}).toArray();
+ const expectedEqualToNull = [
+ {_id: "a_double_array", a: [[]]},
+ {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]},
+ {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]},
+ {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
+ {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]},
+ ];
+ assert(resultsEq(noProjectResults, expectedEqualToNull), tojson(noProjectResults));
+
+ let projectResults =
+ coll.find({a: {$elemMatch: {b: {$eq: null}}}}, projectToOnlyADotB).toArray();
+ assert(resultsEq(projectResults,
+ [
+ {a: [[]]},
+ {a: [{b: null}, {b: undefined}, {b: null}, {}]},
+ {a: [{b: null}, {b: 3}, {b: null}]},
+ {a: [{b: undefined}, {b: 3}]},
+ {a: [{b: 3}, {}]},
+ ]),
+ tojson(projectResults));
+
+ // Test $elemMatch with not equal to null.
+ noProjectResults = coll.find({a: {$elemMatch: {b: {$ne: null}}}}).toArray();
+ const expectedNotEqualToNull = [
+ {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]},
+ {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]},
+ {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]},
+ {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]},
+ ];
+ assert(resultsEq(noProjectResults, expectedNotEqualToNull), tojson(noProjectResults));
+
+ projectResults =
+ coll.find({a: {$elemMatch: {b: {$ne: null}}}}, projectToOnlyADotB).toArray();
+ assert(resultsEq(projectResults,
+ [
+ {a: [{b: 1}, {b: 3}, {b: "string"}]},
+ {a: [{b: null}, {b: 3}, {b: null}]},
+ {a: [{b: undefined}, {b: 3}]},
+ {a: [{b: 3}, {}]},
+ ]),
+ tojson(projectResults));
+ }());
+}
+
+// Test without any indexes.
+testNotEqualsNullSemantics(coll);
+
+const keyPatterns = [
+ {keyPattern: {a: 1}},
+ {keyPattern: {a: -1}},
+ {keyPattern: {a: "hashed"}},
+ {keyPattern: {a: 1}, options: {partialFilterExpression: {a: {$exists: true}}}},
+ {keyPattern: {a: 1}, options: {sparse: true}},
+ {keyPattern: {"a.b": 1}},
+ {keyPattern: {_id: 1, "a.b": 1}},
+ {keyPattern: {"a.b": 1, _id: 1}},
+ {keyPattern: {"a.b": 1}, options: {partialFilterExpression: {a: {$exists: true}}}},
+ {keyPattern: {"a.b": 1, _id: 1}, options: {sparse: true}},
+ {keyPattern: {"$**": 1}},
+ {keyPattern: {"a.$**": 1}}
+];
+
+// Test with a variety of other indexes.
+for (let indexSpec of keyPatterns) {
+ coll.drop();
+ jsTestLog(`Index spec: ${tojson(indexSpec)}`);
+ assert.commandWorked(coll.createIndex(indexSpec.keyPattern, indexSpec.options));
testNotEqualsNullSemantics(coll);
+}
- const keyPatterns = [
- {keyPattern: {a: 1}},
- {keyPattern: {a: -1}},
- {keyPattern: {a: "hashed"}},
- {keyPattern: {a: 1}, options: {partialFilterExpression: {a: {$exists: true}}}},
- {keyPattern: {a: 1}, options: {sparse: true}},
- {keyPattern: {"a.b": 1}},
- {keyPattern: {_id: 1, "a.b": 1}},
- {keyPattern: {"a.b": 1, _id: 1}},
- {keyPattern: {"a.b": 1}, options: {partialFilterExpression: {a: {$exists: true}}}},
- {keyPattern: {"a.b": 1, _id: 1}, options: {sparse: true}},
- {keyPattern: {"$**": 1}},
- {keyPattern: {"a.$**": 1}}
- ];
-
- // Test with a variety of other indexes.
- for (let indexSpec of keyPatterns) {
- coll.drop();
- jsTestLog(`Index spec: ${tojson(indexSpec)}`);
- assert.commandWorked(coll.createIndex(indexSpec.keyPattern, indexSpec.options));
- testNotEqualsNullSemantics(coll);
- }
-
- // Test that you cannot use a $ne: null predicate in a partial filter expression.
- assert.commandFailedWithCode(
- coll.createIndex({a: 1}, {partialFilterExpression: {a: {$ne: null}}}),
- ErrorCodes.CannotCreateIndex);
+// Test that you cannot use a $ne: null predicate in a partial filter expression.
+assert.commandFailedWithCode(coll.createIndex({a: 1}, {partialFilterExpression: {a: {$ne: null}}}),
+ ErrorCodes.CannotCreateIndex);
- assert.commandFailedWithCode(
- coll.createIndex({a: 1}, {partialFilterExpression: {a: {$elemMatch: {$ne: null}}}}),
- ErrorCodes.CannotCreateIndex);
+assert.commandFailedWithCode(
+ coll.createIndex({a: 1}, {partialFilterExpression: {a: {$elemMatch: {$ne: null}}}}),
+ ErrorCodes.CannotCreateIndex);
}());
diff --git a/jstests/core/numberlong.js b/jstests/core/numberlong.js
index c50fc8599c3..a7dfd014539 100644
--- a/jstests/core/numberlong.js
+++ b/jstests/core/numberlong.js
@@ -132,14 +132,14 @@ for (var i = 0; i < badValues.length; i++) {
// parsing
assert.throws.automsg(function() {
- new NumberLong("");
-});
+ new NumberLong("");
+ });
assert.throws.automsg(function() {
- new NumberLong("y");
-});
+ new NumberLong("y");
+ });
assert.throws.automsg(function() {
- new NumberLong("11111111111111111111");
-});
+ new NumberLong("11111111111111111111");
+ });
// create NumberLong from NumberInt (SERVER-9973)
assert.doesNotThrow.automsg(function() {
diff --git a/jstests/core/numberlong3.js b/jstests/core/numberlong3.js
index b62d1865ff4..0dabdec2a05 100644
--- a/jstests/core/numberlong3.js
+++ b/jstests/core/numberlong3.js
@@ -13,7 +13,7 @@ for (i = 10; i >= 0; --i) {
}
ret = t.find().sort({x: 1}).toArray().filter(function(x) {
- return typeof(x.x.floatApprox) != 'undefined';
+ return typeof (x.x.floatApprox) != 'undefined';
});
// printjson( ret );
diff --git a/jstests/core/objid6.js b/jstests/core/objid6.js
index 0165d0c8e37..28be2a3fa42 100644
--- a/jstests/core/objid6.js
+++ b/jstests/core/objid6.js
@@ -1,10 +1,10 @@
(function() {
- 'use strict';
+'use strict';
- var o = new ObjectId();
- assert(o.getTimestamp);
+var o = new ObjectId();
+assert(o.getTimestamp);
- var a = new ObjectId("4c17f616a707427266a2801a");
- var b = new ObjectId("4c17f616a707428966a2801c");
- assert.eq(a.getTimestamp(), b.getTimestamp());
+var a = new ObjectId("4c17f616a707427266a2801a");
+var b = new ObjectId("4c17f616a707428966a2801c");
+assert.eq(a.getTimestamp(), b.getTimestamp());
})();
diff --git a/jstests/core/opcounters_active.js b/jstests/core/opcounters_active.js
index 9c93adcc719..4e4fe7ff326 100644
--- a/jstests/core/opcounters_active.js
+++ b/jstests/core/opcounters_active.js
@@ -3,33 +3,33 @@
// ]
(function() {
- "use strict";
- // Test the getActiveCommands function
- // Should remove the listCollections section but keep the rest
- var testInput = {
- "isMaster": {"failed": NumberLong(0), "total": NumberLong(3)},
- "mapreduce": {"shardedfinish": {"failed": NumberLong(0), "total": NumberLong(1)}},
- "listCollections": {"failed": NumberLong(0), "total": NumberLong(0)}
- };
- var testExpected = {
- "isMaster": {"failed": NumberLong(0), "total": NumberLong(3)},
- "mapreduce": {"shardedfinish": {"failed": NumberLong(0), "total": NumberLong(1)}}
- };
- var testResult = getActiveCommands(testInput);
+"use strict";
+// Test the getActiveCommands function
+// Should remove the listCollections section but keep the rest
+var testInput = {
+ "isMaster": {"failed": NumberLong(0), "total": NumberLong(3)},
+ "mapreduce": {"shardedfinish": {"failed": NumberLong(0), "total": NumberLong(1)}},
+ "listCollections": {"failed": NumberLong(0), "total": NumberLong(0)}
+};
+var testExpected = {
+ "isMaster": {"failed": NumberLong(0), "total": NumberLong(3)},
+ "mapreduce": {"shardedfinish": {"failed": NumberLong(0), "total": NumberLong(1)}}
+};
+var testResult = getActiveCommands(testInput);
- assert.eq(testResult, testExpected, "getActiveCommands did not return the expected result");
+assert.eq(testResult, testExpected, "getActiveCommands did not return the expected result");
- // Test that the serverstatus helper works
- var result = db.serverStatus();
- assert.neq(undefined, result, tojson(result));
- // Test that the metrics tree returns
- assert.neq(undefined, result.metrics, tojson(result));
- // Test that the metrics.commands tree returns
- assert.neq(undefined, result.metrics.commands, tojson(result));
- // Test that the metrics.commands.serverStatus value is non-zero
- assert.neq(0, result.metrics.commands.serverStatus.total, tojson(result));
+// Test that the serverstatus helper works
+var result = db.serverStatus();
+assert.neq(undefined, result, tojson(result));
+// Test that the metrics tree returns
+assert.neq(undefined, result.metrics, tojson(result));
+// Test that the metrics.commands tree returns
+assert.neq(undefined, result.metrics.commands, tojson(result));
+// Test that the metrics.commands.serverStatus value is non-zero
+assert.neq(0, result.metrics.commands.serverStatus.total, tojson(result));
- // Test that the command returns successfully when no metrics tree is present
- var result = db.serverStatus({"metrics": 0});
- assert.eq(undefined, result.metrics, tojson(result));
+// Test that the command returns successfully when no metrics tree is present
+var result = db.serverStatus({"metrics": 0});
+assert.eq(undefined, result.metrics, tojson(result));
}()); \ No newline at end of file
diff --git a/jstests/core/operation_latency_histogram.js b/jstests/core/operation_latency_histogram.js
index d3bce1305c9..d81308192ac 100644
--- a/jstests/core/operation_latency_histogram.js
+++ b/jstests/core/operation_latency_histogram.js
@@ -15,170 +15,169 @@
// tag incompatible_with_embedded.
(function() {
- "use strict";
-
- load("jstests/libs/stats.js");
- var name = "operationalLatencyHistogramTest";
-
- var testDB = db.getSiblingDB(name);
- var testColl = testDB[name + "coll"];
-
- testColl.drop();
-
- // Test aggregation command output format.
- var commandResult = testDB.runCommand(
- {aggregate: testColl.getName(), pipeline: [{$collStats: {latencyStats: {}}}], cursor: {}});
- assert.commandWorked(commandResult);
- assert(commandResult.cursor.firstBatch.length == 1);
-
- var stats = commandResult.cursor.firstBatch[0];
- var histogramTypes = ["reads", "writes", "commands"];
-
- assert(stats.hasOwnProperty("localTime"));
- assert(stats.hasOwnProperty("latencyStats"));
-
- histogramTypes.forEach(function(key) {
- assert(stats.latencyStats.hasOwnProperty(key));
- assert(stats.latencyStats[key].hasOwnProperty("ops"));
- assert(stats.latencyStats[key].hasOwnProperty("latency"));
- });
-
- var lastHistogram = getHistogramStats(testColl);
-
- // Insert
- var numRecords = 100;
- for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.insert({_id: i}));
- }
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0);
-
- // Update
- for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.update({_id: i}, {x: i}));
- }
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0);
-
- // Find
- var cursors = [];
- for (var i = 0; i < numRecords; i++) {
- cursors[i] = testColl.find({x: {$gte: i}}).batchSize(2);
- assert.eq(cursors[i].next()._id, i);
- }
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0);
-
- // GetMore
- for (var i = 0; i < numRecords / 2; i++) {
- // Trigger two getmore commands.
- assert.eq(cursors[i].next()._id, i + 1);
- assert.eq(cursors[i].next()._id, i + 2);
- assert.eq(cursors[i].next()._id, i + 3);
- assert.eq(cursors[i].next()._id, i + 4);
- }
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0);
-
- // KillCursors
- // The last cursor has no additional results, hence does not need to be closed.
- for (var i = 0; i < numRecords - 1; i++) {
- cursors[i].close();
- }
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, numRecords - 1);
-
- // Remove
- for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.remove({_id: i}));
- }
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0);
-
- // Upsert
- for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1}));
- }
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0);
-
- // Aggregate
- for (var i = 0; i < numRecords; i++) {
- testColl.aggregate([{$match: {x: i}}, {$group: {_id: "$x"}}]);
- }
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0);
-
- // Count
- for (var i = 0; i < numRecords; i++) {
- testColl.count({x: i});
- }
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0);
-
- // FindAndModify
- testColl.findAndModify({query: {}, update: {pt: {type: "Point", coordinates: [0, 0]}}});
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 1, 0);
-
- // CreateIndex
- assert.commandWorked(testColl.createIndex({pt: "2dsphere"}));
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
-
- // $geoNear aggregation stage
- assert.commandWorked(testDB.runCommand({
- aggregate: testColl.getName(),
- pipeline: [{
- $geoNear: {
- near: {type: "Point", coordinates: [0, 0]},
- spherical: true,
- distanceField: "dist",
- }
- }],
- cursor: {},
- }));
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 1, 0, 0);
-
- // GetIndexes
- testColl.getIndexes();
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
-
- // Reindex
- assert.commandWorked(testColl.reIndex());
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
-
- // DropIndex
- assert.commandWorked(testColl.dropIndex({pt: "2dsphere"}));
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
-
- // Explain
- testColl.explain().find().next();
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
-
- // CollStats
- assert.commandWorked(testDB.runCommand({collStats: testColl.getName()}));
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
-
- // CollMod
- assert.commandWorked(
- testDB.runCommand({collStats: testColl.getName(), validationLevel: "off"}));
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
-
- // Compact
- // Use force:true in case we're in replset.
- var commandResult = testDB.runCommand({compact: testColl.getName(), force: true});
- // If storage engine supports compact, it should count as a command.
- if (!commandResult.ok) {
- assert.commandFailedWithCode(commandResult, ErrorCodes.CommandNotSupported);
- }
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
-
- // DataSize
- testColl.dataSize();
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
-
- // PlanCache
- testColl.getPlanCache().listQueryShapes();
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
-
- // Commands which occur on the database only should not effect the collection stats.
- assert.commandWorked(testDB.serverStatus());
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 0);
-
- assert.commandWorked(testColl.runCommand("whatsmyuri"));
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 0);
-
- // Test non-command.
- assert.commandFailed(testColl.runCommand("IHopeNobodyEverMakesThisACommand"));
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 0);
+"use strict";
+
+load("jstests/libs/stats.js");
+var name = "operationalLatencyHistogramTest";
+
+var testDB = db.getSiblingDB(name);
+var testColl = testDB[name + "coll"];
+
+testColl.drop();
+
+// Test aggregation command output format.
+var commandResult = testDB.runCommand(
+ {aggregate: testColl.getName(), pipeline: [{$collStats: {latencyStats: {}}}], cursor: {}});
+assert.commandWorked(commandResult);
+assert(commandResult.cursor.firstBatch.length == 1);
+
+var stats = commandResult.cursor.firstBatch[0];
+var histogramTypes = ["reads", "writes", "commands"];
+
+assert(stats.hasOwnProperty("localTime"));
+assert(stats.hasOwnProperty("latencyStats"));
+
+histogramTypes.forEach(function(key) {
+ assert(stats.latencyStats.hasOwnProperty(key));
+ assert(stats.latencyStats[key].hasOwnProperty("ops"));
+ assert(stats.latencyStats[key].hasOwnProperty("latency"));
+});
+
+var lastHistogram = getHistogramStats(testColl);
+
+// Insert
+var numRecords = 100;
+for (var i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.insert({_id: i}));
+}
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0);
+
+// Update
+for (var i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.update({_id: i}, {x: i}));
+}
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0);
+
+// Find
+var cursors = [];
+for (var i = 0; i < numRecords; i++) {
+ cursors[i] = testColl.find({x: {$gte: i}}).batchSize(2);
+ assert.eq(cursors[i].next()._id, i);
+}
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0);
+
+// GetMore
+for (var i = 0; i < numRecords / 2; i++) {
+ // Trigger two getmore commands.
+ assert.eq(cursors[i].next()._id, i + 1);
+ assert.eq(cursors[i].next()._id, i + 2);
+ assert.eq(cursors[i].next()._id, i + 3);
+ assert.eq(cursors[i].next()._id, i + 4);
+}
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0);
+
+// KillCursors
+// The last cursor has no additional results, hence does not need to be closed.
+for (var i = 0; i < numRecords - 1; i++) {
+ cursors[i].close();
+}
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, numRecords - 1);
+
+// Remove
+for (var i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.remove({_id: i}));
+}
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0);
+
+// Upsert
+for (var i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1}));
+}
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0);
+
+// Aggregate
+for (var i = 0; i < numRecords; i++) {
+ testColl.aggregate([{$match: {x: i}}, {$group: {_id: "$x"}}]);
+}
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0);
+
+// Count
+for (var i = 0; i < numRecords; i++) {
+ testColl.count({x: i});
+}
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0);
+
+// FindAndModify
+testColl.findAndModify({query: {}, update: {pt: {type: "Point", coordinates: [0, 0]}}});
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 1, 0);
+
+// CreateIndex
+assert.commandWorked(testColl.createIndex({pt: "2dsphere"}));
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
+
+// $geoNear aggregation stage
+assert.commandWorked(testDB.runCommand({
+ aggregate: testColl.getName(),
+ pipeline: [{
+ $geoNear: {
+ near: {type: "Point", coordinates: [0, 0]},
+ spherical: true,
+ distanceField: "dist",
+ }
+ }],
+ cursor: {},
+}));
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 1, 0, 0);
+
+// GetIndexes
+testColl.getIndexes();
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
+
+// Reindex
+assert.commandWorked(testColl.reIndex());
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
+
+// DropIndex
+assert.commandWorked(testColl.dropIndex({pt: "2dsphere"}));
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
+
+// Explain
+testColl.explain().find().next();
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
+
+// CollStats
+assert.commandWorked(testDB.runCommand({collStats: testColl.getName()}));
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
+
+// CollMod
+assert.commandWorked(testDB.runCommand({collStats: testColl.getName(), validationLevel: "off"}));
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
+
+// Compact
+// Use force:true in case we're in replset.
+var commandResult = testDB.runCommand({compact: testColl.getName(), force: true});
+// If storage engine supports compact, it should count as a command.
+if (!commandResult.ok) {
+ assert.commandFailedWithCode(commandResult, ErrorCodes.CommandNotSupported);
+}
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
+
+// DataSize
+testColl.dataSize();
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
+
+// PlanCache
+testColl.getPlanCache().listQueryShapes();
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
+
+// Commands which occur on the database only should not effect the collection stats.
+assert.commandWorked(testDB.serverStatus());
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 0);
+
+assert.commandWorked(testColl.runCommand("whatsmyuri"));
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 0);
+
+// Test non-command.
+assert.commandFailed(testColl.runCommand("IHopeNobodyEverMakesThisACommand"));
+lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 0);
}());
diff --git a/jstests/core/optime_cmp.js b/jstests/core/optime_cmp.js
index 436039946ce..db58f856f76 100644
--- a/jstests/core/optime_cmp.js
+++ b/jstests/core/optime_cmp.js
@@ -1,15 +1,14 @@
(function() {
- 'use strict';
+'use strict';
- // PV1
- assert.eq(-1, rs.compareOpTimes({ts: Timestamp(2, 2), t: 2}, {ts: Timestamp(3, 1), t: 2}));
- assert.eq(-1, rs.compareOpTimes({ts: Timestamp(2, 2), t: 2}, {ts: Timestamp(2, 4), t: 2}));
- assert.eq(-1, rs.compareOpTimes({ts: Timestamp(3, 0), t: 2}, {ts: Timestamp(2, 0), t: 3}));
+// PV1
+assert.eq(-1, rs.compareOpTimes({ts: Timestamp(2, 2), t: 2}, {ts: Timestamp(3, 1), t: 2}));
+assert.eq(-1, rs.compareOpTimes({ts: Timestamp(2, 2), t: 2}, {ts: Timestamp(2, 4), t: 2}));
+assert.eq(-1, rs.compareOpTimes({ts: Timestamp(3, 0), t: 2}, {ts: Timestamp(2, 0), t: 3}));
- assert.eq(0, rs.compareOpTimes({ts: Timestamp(3, 0), t: 2}, {ts: Timestamp(3, 0), t: 2}));
-
- assert.eq(1, rs.compareOpTimes({ts: Timestamp(3, 1), t: 2}, {ts: Timestamp(2, 2), t: 2}));
- assert.eq(1, rs.compareOpTimes({ts: Timestamp(2, 4), t: 2}, {ts: Timestamp(2, 2), t: 2}));
- assert.eq(1, rs.compareOpTimes({ts: Timestamp(2, 0), t: 3}, {ts: Timestamp(3, 0), t: 2}));
+assert.eq(0, rs.compareOpTimes({ts: Timestamp(3, 0), t: 2}, {ts: Timestamp(3, 0), t: 2}));
+assert.eq(1, rs.compareOpTimes({ts: Timestamp(3, 1), t: 2}, {ts: Timestamp(2, 2), t: 2}));
+assert.eq(1, rs.compareOpTimes({ts: Timestamp(2, 4), t: 2}, {ts: Timestamp(2, 2), t: 2}));
+assert.eq(1, rs.compareOpTimes({ts: Timestamp(2, 0), t: 3}, {ts: Timestamp(3, 0), t: 2}));
})();
diff --git a/jstests/core/optimized_match_explain.js b/jstests/core/optimized_match_explain.js
index 6f73349e08c..5575b8498bb 100644
--- a/jstests/core/optimized_match_explain.js
+++ b/jstests/core/optimized_match_explain.js
@@ -4,22 +4,22 @@
* Tests that the explain output for $match reflects any optimizations.
*/
(function() {
- "use strict";
- load("jstests/libs/analyze_plan.js");
+"use strict";
+load("jstests/libs/analyze_plan.js");
- const coll = db.match_explain;
- coll.drop();
+const coll = db.match_explain;
+coll.drop();
- assert.writeOK(coll.insert({a: 1, b: 1}));
- assert.writeOK(coll.insert({a: 2, b: 3}));
- assert.writeOK(coll.insert({a: 1, b: 2}));
- assert.writeOK(coll.insert({a: 1, b: 4}));
+assert.writeOK(coll.insert({a: 1, b: 1}));
+assert.writeOK(coll.insert({a: 2, b: 3}));
+assert.writeOK(coll.insert({a: 1, b: 2}));
+assert.writeOK(coll.insert({a: 1, b: 4}));
- // Explain output should reflect optimizations.
- // $and should not be in the explain output because it is optimized out.
- let explain = coll.explain().aggregate(
- [{$sort: {b: -1}}, {$addFields: {c: {$mod: ["$a", 4]}}}, {$match: {$and: [{c: 1}]}}]);
+// Explain output should reflect optimizations.
+// $and should not be in the explain output because it is optimized out.
+let explain = coll.explain().aggregate(
+ [{$sort: {b: -1}}, {$addFields: {c: {$mod: ["$a", 4]}}}, {$match: {$and: [{c: 1}]}}]);
- assert.commandWorked(explain);
- assert.eq(getAggPlanStage(explain, "$match"), {$match: {c: {$eq: 1}}});
+assert.commandWorked(explain);
+assert.eq(getAggPlanStage(explain, "$match"), {$match: {c: {$eq: 1}}});
}());
diff --git a/jstests/core/or1.js b/jstests/core/or1.js
index e7c417800b6..c5975a058f2 100644
--- a/jstests/core/or1.js
+++ b/jstests/core/or1.js
@@ -17,7 +17,6 @@ checkArrs = function(a, b) {
};
doTest = function() {
-
t.save({_id: 0, a: 1});
t.save({_id: 1, a: 2});
t.save({_id: 2, b: 1});
@@ -43,11 +42,11 @@ doTest = function() {
a1b2 = t.find({$or: [{a: 1}, {b: 2}]}).toArray();
checkArrs(
[
- {_id: 0, a: 1},
- {_id: 3, b: 2},
- {_id: 4, a: 1, b: 1},
- {_id: 5, a: 1, b: 2},
- {_id: 7, a: 2, b: 2}
+ {_id: 0, a: 1},
+ {_id: 3, b: 2},
+ {_id: 4, a: 1, b: 1},
+ {_id: 5, a: 1, b: 2},
+ {_id: 7, a: 2, b: 2}
],
a1b2);
@@ -56,7 +55,6 @@ doTest = function() {
assert.eq(1, t.find({$or: [{a: {$in: [0, 1]}}]}).toArray().length);
assert.eq(1, t.find({$or: [{b: {$in: [0, 1]}}]}).toArray().length);
assert.eq(1, t.find({$or: [{a: {$in: [0, 1]}}, {b: {$in: [0, 1]}}]}).toArray().length);
-
};
doTest();
diff --git a/jstests/core/or4.js b/jstests/core/or4.js
index b71f4254c79..8e07a42efa7 100644
--- a/jstests/core/or4.js
+++ b/jstests/core/or4.js
@@ -6,80 +6,80 @@
// ]
(function() {
- "use strict";
+"use strict";
- const coll = db.or4;
- coll.drop();
+const coll = db.or4;
+coll.drop();
- coll.ensureIndex({a: 1});
- coll.ensureIndex({b: 1});
+coll.ensureIndex({a: 1});
+coll.ensureIndex({b: 1});
- assert.writeOK(coll.insert({a: 2}));
- assert.writeOK(coll.insert({b: 3}));
- assert.writeOK(coll.insert({b: 3}));
- assert.writeOK(coll.insert({a: 2, b: 3}));
+assert.writeOK(coll.insert({a: 2}));
+assert.writeOK(coll.insert({b: 3}));
+assert.writeOK(coll.insert({b: 3}));
+assert.writeOK(coll.insert({a: 2, b: 3}));
- assert.eq(4, coll.count({$or: [{a: 2}, {b: 3}]}));
- assert.eq(2, coll.count({$or: [{a: 2}, {a: 2}]}));
+assert.eq(4, coll.count({$or: [{a: 2}, {b: 3}]}));
+assert.eq(2, coll.count({$or: [{a: 2}, {a: 2}]}));
- assert.eq(2, coll.find({}).skip(2).count(true));
- assert.eq(2, coll.find({$or: [{a: 2}, {b: 3}]}).skip(2).count(true));
- assert.eq(1, coll.find({$or: [{a: 2}, {b: 3}]}).skip(3).count(true));
+assert.eq(2, coll.find({}).skip(2).count(true));
+assert.eq(2, coll.find({$or: [{a: 2}, {b: 3}]}).skip(2).count(true));
+assert.eq(1, coll.find({$or: [{a: 2}, {b: 3}]}).skip(3).count(true));
- assert.eq(2, coll.find({}).limit(2).count(true));
- assert.eq(1, coll.find({$or: [{a: 2}, {b: 3}]}).limit(1).count(true));
- assert.eq(2, coll.find({$or: [{a: 2}, {b: 3}]}).limit(2).count(true));
- assert.eq(3, coll.find({$or: [{a: 2}, {b: 3}]}).limit(3).count(true));
- assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).limit(4).count(true));
+assert.eq(2, coll.find({}).limit(2).count(true));
+assert.eq(1, coll.find({$or: [{a: 2}, {b: 3}]}).limit(1).count(true));
+assert.eq(2, coll.find({$or: [{a: 2}, {b: 3}]}).limit(2).count(true));
+assert.eq(3, coll.find({$or: [{a: 2}, {b: 3}]}).limit(3).count(true));
+assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).limit(4).count(true));
- coll.remove({$or: [{a: 2}, {b: 3}]});
- assert.eq(0, coll.count());
+coll.remove({$or: [{a: 2}, {b: 3}]});
+assert.eq(0, coll.count());
- assert.writeOK(coll.insert({b: 3}));
- coll.remove({$or: [{a: 2}, {b: 3}]});
- assert.eq(0, coll.count());
+assert.writeOK(coll.insert({b: 3}));
+coll.remove({$or: [{a: 2}, {b: 3}]});
+assert.eq(0, coll.count());
- assert.writeOK(coll.insert({a: 2}));
- assert.writeOK(coll.insert({b: 3}));
- assert.writeOK(coll.insert({a: 2, b: 3}));
+assert.writeOK(coll.insert({a: 2}));
+assert.writeOK(coll.insert({b: 3}));
+assert.writeOK(coll.insert({a: 2, b: 3}));
- coll.update({$or: [{a: 2}, {b: 3}]}, {$set: {z: 1}}, false, true);
- assert.eq(3, coll.count({z: 1}));
+coll.update({$or: [{a: 2}, {b: 3}]}, {$set: {z: 1}}, false, true);
+assert.eq(3, coll.count({z: 1}));
- assert.eq(3, coll.find({$or: [{a: 2}, {b: 3}]}).toArray().length);
- assert.eq(coll.find().sort({_id: 1}).toArray(),
- coll.find({$or: [{a: 2}, {b: 3}]}).sort({_id: 1}).toArray());
- assert.eq(2, coll.find({$or: [{a: 2}, {b: 3}]}).skip(1).toArray().length);
+assert.eq(3, coll.find({$or: [{a: 2}, {b: 3}]}).toArray().length);
+assert.eq(coll.find().sort({_id: 1}).toArray(),
+ coll.find({$or: [{a: 2}, {b: 3}]}).sort({_id: 1}).toArray());
+assert.eq(2, coll.find({$or: [{a: 2}, {b: 3}]}).skip(1).toArray().length);
- assert.eq(3, coll.find({$or: [{a: 2}, {b: 3}]}).batchSize(2).toArray().length);
+assert.eq(3, coll.find({$or: [{a: 2}, {b: 3}]}).batchSize(2).toArray().length);
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({b: 4}));
- assert.writeOK(coll.insert({a: 2}));
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({b: 4}));
+assert.writeOK(coll.insert({a: 2}));
- assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).batchSize(2).toArray().length);
+assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).batchSize(2).toArray().length);
- assert.writeOK(coll.insert({a: 1, b: 3}));
- assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).limit(4).toArray().length);
+assert.writeOK(coll.insert({a: 1, b: 3}));
+assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).limit(4).toArray().length);
- assert.eq([1, 2], Array.sort(coll.distinct('a', {$or: [{a: 2}, {b: 3}]})));
+assert.eq([1, 2], Array.sort(coll.distinct('a', {$or: [{a: 2}, {b: 3}]})));
- assert.eq(5,
- coll.mapReduce(
- function() {
- emit('a', this.a);
- },
- function(key, vals) {
- return vals.length;
- },
- {out: {inline: true}, query: {$or: [{a: 2}, {b: 3}]}})
- .counts.input);
+assert.eq(5,
+ coll.mapReduce(
+ function() {
+ emit('a', this.a);
+ },
+ function(key, vals) {
+ return vals.length;
+ },
+ {out: {inline: true}, query: {$or: [{a: 2}, {b: 3}]}})
+ .counts.input);
- coll.remove({});
+coll.remove({});
- assert.writeOK(coll.insert({a: [1, 2]}));
- assert.eq(1, coll.find({$or: [{a: 1}, {a: 2}]}).toArray().length);
- assert.eq(1, coll.count({$or: [{a: 1}, {a: 2}]}));
- assert.eq(1, coll.find({$or: [{a: 2}, {a: 1}]}).toArray().length);
- assert.eq(1, coll.count({$or: [{a: 2}, {a: 1}]}));
+assert.writeOK(coll.insert({a: [1, 2]}));
+assert.eq(1, coll.find({$or: [{a: 1}, {a: 2}]}).toArray().length);
+assert.eq(1, coll.count({$or: [{a: 1}, {a: 2}]}));
+assert.eq(1, coll.find({$or: [{a: 2}, {a: 1}]}).toArray().length);
+assert.eq(1, coll.count({$or: [{a: 2}, {a: 1}]}));
})();
diff --git a/jstests/core/or5.js b/jstests/core/or5.js
index 7e61f9bf8cf..e0af20752f0 100644
--- a/jstests/core/or5.js
+++ b/jstests/core/or5.js
@@ -36,8 +36,8 @@ for (i = 2; i <= 7; ++i) {
t.ensureIndex({z: "2d"});
assert.throws.automsg(function() {
- return t.find({$or: [{z: {$near: [50, 50]}}, {a: 2}]}).toArray();
-});
+ return t.find({$or: [{z: {$near: [50, 50]}}, {a: 2}]}).toArray();
+ });
function reset() {
t.drop();
diff --git a/jstests/core/or_always_false.js b/jstests/core/or_always_false.js
index eb479486eac..6760ee37775 100644
--- a/jstests/core/or_always_false.js
+++ b/jstests/core/or_always_false.js
@@ -1,17 +1,16 @@
// Tests that a rooted-$or query with each clause provably false will not return any results.
(function() {
- "use strict";
+"use strict";
- const coll = db.or_always_false;
- coll.drop();
+const coll = db.or_always_false;
+coll.drop();
- assert.writeOK(coll.insert([{}, {}, {}]));
- const emptyOrError = assert.throws(() => coll.find({$or: []}).itcount());
- assert.eq(emptyOrError.code, ErrorCodes.BadValue);
+assert.writeOK(coll.insert([{}, {}, {}]));
+const emptyOrError = assert.throws(() => coll.find({$or: []}).itcount());
+assert.eq(emptyOrError.code, ErrorCodes.BadValue);
- assert.eq(coll.find({$or: [{$alwaysFalse: 1}]}).itcount(), 0);
- assert.eq(coll.find({$or: [{a: {$all: []}}]}).itcount(), 0);
- assert.eq(coll.find({$or: [{$alwaysFalse: 1}, {$alwaysFalse: 1}]}).itcount(), 0);
- assert.eq(coll.find({$or: [{$alwaysFalse: 1}, {a: {$all: []}}, {$alwaysFalse: 1}]}).itcount(),
- 0);
+assert.eq(coll.find({$or: [{$alwaysFalse: 1}]}).itcount(), 0);
+assert.eq(coll.find({$or: [{a: {$all: []}}]}).itcount(), 0);
+assert.eq(coll.find({$or: [{$alwaysFalse: 1}, {$alwaysFalse: 1}]}).itcount(), 0);
+assert.eq(coll.find({$or: [{$alwaysFalse: 1}, {a: {$all: []}}, {$alwaysFalse: 1}]}).itcount(), 0);
}());
diff --git a/jstests/core/or_inexact.js b/jstests/core/or_inexact.js
index 3e7e374d7f5..17aeea618b3 100644
--- a/jstests/core/or_inexact.js
+++ b/jstests/core/or_inexact.js
@@ -119,26 +119,26 @@ t.insert({_id: 1, pre: 4, loc: {type: "Point", coordinates: [0, 0]}});
cursor = t.find({
$or: [
{
- pre: 3,
- loc: {
- $geoWithin: {
- $geometry: {
- type: "Polygon",
- coordinates: [[[39, 4], [41, 4], [41, 6], [39, 6], [39, 4]]]
- }
- }
- }
+ pre: 3,
+ loc: {
+ $geoWithin: {
+ $geometry: {
+ type: "Polygon",
+ coordinates: [[[39, 4], [41, 4], [41, 6], [39, 6], [39, 4]]]
+ }
+ }
+ }
},
{
- pre: 4,
- loc: {
- $geoWithin: {
- $geometry: {
- type: "Polygon",
- coordinates: [[[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]]
- }
- }
- }
+ pre: 4,
+ loc: {
+ $geoWithin: {
+ $geometry: {
+ type: "Polygon",
+ coordinates: [[[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]]
+ }
+ }
+ }
}
]
});
@@ -152,26 +152,26 @@ t.insert({_id: 1, pre: 4, loc: {type: "Point", coordinates: [0, 0]}});
cursor = t.find({
$or: [
{
- pre: 3,
- loc: {
- $geoWithin: {
- $geometry: {
- type: "Polygon",
- coordinates: [[[39, 4], [41, 4], [41, 6], [39, 6], [39, 4]]]
- }
- }
- }
+ pre: 3,
+ loc: {
+ $geoWithin: {
+ $geometry: {
+ type: "Polygon",
+ coordinates: [[[39, 4], [41, 4], [41, 6], [39, 6], [39, 4]]]
+ }
+ }
+ }
},
{
- pre: 4,
- loc: {
- $geoWithin: {
- $geometry: {
- type: "Polygon",
- coordinates: [[[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]]
- }
- }
- }
+ pre: 4,
+ loc: {
+ $geoWithin: {
+ $geometry: {
+ type: "Polygon",
+ coordinates: [[[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]]
+ }
+ }
+ }
}
]
});
@@ -259,24 +259,24 @@ t.insert({_id: 1, loc: {type: "Point", coordinates: [0, 0]}});
cursor = t.find({
$or: [
{
- loc: {
- $geoWithin: {
- $geometry: {
- type: "Polygon",
- coordinates: [[[39, 4], [41, 4], [41, 6], [39, 6], [39, 4]]]
- }
- }
- }
+ loc: {
+ $geoWithin: {
+ $geometry: {
+ type: "Polygon",
+ coordinates: [[[39, 4], [41, 4], [41, 6], [39, 6], [39, 4]]]
+ }
+ }
+ }
},
{
- loc: {
- $geoWithin: {
- $geometry: {
- type: "Polygon",
- coordinates: [[[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]]
- }
- }
- }
+ loc: {
+ $geoWithin: {
+ $geometry: {
+ type: "Polygon",
+ coordinates: [[[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]]
+ }
+ }
+ }
}
]
});
diff --git a/jstests/core/ord.js b/jstests/core/ord.js
index 640f5de13cc..2cd2cef0a66 100644
--- a/jstests/core/ord.js
+++ b/jstests/core/ord.js
@@ -6,48 +6,48 @@
// behavior is changed.
(function() {
- "use strict";
-
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-
- const t = db.jstests_ord;
- t.drop();
-
- t.ensureIndex({a: 1});
- t.ensureIndex({b: 1});
-
- for (let i = 0; i < 80; ++i) {
- t.save({a: 1});
- }
-
- for (let i = 0; i < 100; ++i) {
- t.save({b: 1});
- }
-
- const c = t.find({$or: [{a: 1}, {b: 1}]}).batchSize(100);
- for (let i = 0; i < 100; ++i) {
- c.next();
- }
- // At this point, our initial query has ended and there is a client cursor waiting
- // to read additional documents from index {b:1}. Deduping is performed against
- // the index key {a:1}.
-
- t.dropIndex({a: 1});
-
- // Dropping an index kills all cursors on the indexed namespace, not just those
- // cursors using the dropped index.
- if (FixtureHelpers.isMongos(db)) {
- // mongos may have some data left from a previous batch stored in memory, so it might not
- // return an error immediately, but it should eventually.
- assert.soon(function() {
- try {
- c.next();
- return false; // We didn't throw an error yet.
- } catch (e) {
- return true;
- }
- });
- } else {
- assert.throws(() => c.next());
- }
+"use strict";
+
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+
+const t = db.jstests_ord;
+t.drop();
+
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
+
+for (let i = 0; i < 80; ++i) {
+ t.save({a: 1});
+}
+
+for (let i = 0; i < 100; ++i) {
+ t.save({b: 1});
+}
+
+const c = t.find({$or: [{a: 1}, {b: 1}]}).batchSize(100);
+for (let i = 0; i < 100; ++i) {
+ c.next();
+}
+// At this point, our initial query has ended and there is a client cursor waiting
+// to read additional documents from index {b:1}. Deduping is performed against
+// the index key {a:1}.
+
+t.dropIndex({a: 1});
+
+// Dropping an index kills all cursors on the indexed namespace, not just those
+// cursors using the dropped index.
+if (FixtureHelpers.isMongos(db)) {
+ // mongos may have some data left from a previous batch stored in memory, so it might not
+ // return an error immediately, but it should eventually.
+ assert.soon(function() {
+ try {
+ c.next();
+ return false; // We didn't throw an error yet.
+ } catch (e) {
+ return true;
+ }
+ });
+} else {
+ assert.throws(() => c.next());
+}
})();
diff --git a/jstests/core/plan_cache_clear.js b/jstests/core/plan_cache_clear.js
index 5ac1ca775c3..8170aa9595d 100644
--- a/jstests/core/plan_cache_clear.js
+++ b/jstests/core/plan_cache_clear.js
@@ -12,100 +12,97 @@
// ]
(function() {
- var t = db.jstests_plan_cache_clear;
- t.drop();
+var t = db.jstests_plan_cache_clear;
+t.drop();
- // Utility function to list query shapes in cache.
- function getShapes(collection) {
- if (collection == undefined) {
- collection = t;
- }
- var res = collection.runCommand('planCacheListQueryShapes');
- print('planCacheListQueryShapes() = ' + tojson(res));
- assert.commandWorked(res, 'planCacheListQueryShapes failed');
- assert(res.hasOwnProperty('shapes'), 'shapes missing from planCacheListQueryShapes result');
- return res.shapes;
+// Utility function to list query shapes in cache.
+function getShapes(collection) {
+ if (collection == undefined) {
+ collection = t;
}
+ var res = collection.runCommand('planCacheListQueryShapes');
+ print('planCacheListQueryShapes() = ' + tojson(res));
+ assert.commandWorked(res, 'planCacheListQueryShapes failed');
+ assert(res.hasOwnProperty('shapes'), 'shapes missing from planCacheListQueryShapes result');
+ return res.shapes;
+}
- t.save({a: 1, b: 1});
- t.save({a: 1, b: 2});
- t.save({a: 1, b: 2});
- t.save({a: 2, b: 2});
+t.save({a: 1, b: 1});
+t.save({a: 1, b: 2});
+t.save({a: 1, b: 2});
+t.save({a: 2, b: 2});
- // We need two indices so that the MultiPlanRunner is executed.
- t.ensureIndex({a: 1});
- t.ensureIndex({a: 1, b: 1});
+// We need two indices so that the MultiPlanRunner is executed.
+t.ensureIndex({a: 1});
+t.ensureIndex({a: 1, b: 1});
- // Run a query so that an entry is inserted into the cache.
- assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count');
-
- // Invalid key should be a no-op.
- assert.commandWorked(t.runCommand('planCacheClear', {query: {unknownfield: 1}}));
- assert.eq(
- 1, getShapes().length, 'removing unknown query should not affecting exisiting entries');
+// Run a query so that an entry is inserted into the cache.
+assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count');
- // Run a new query shape and drop it from the cache
- assert.eq(1, getShapes().length, 'unexpected cache size after running 2nd query');
- assert.commandWorked(t.runCommand('planCacheClear', {query: {a: 1, b: 1}}));
- assert.eq(0, getShapes().length, 'unexpected cache size after dropping 2nd query from cache');
+// Invalid key should be a no-op.
+assert.commandWorked(t.runCommand('planCacheClear', {query: {unknownfield: 1}}));
+assert.eq(1, getShapes().length, 'removing unknown query should not affecting exisiting entries');
- // planCacheClear can clear $expr queries.
- assert.eq(
- 1, t.find({a: 1, b: 1, $expr: {$eq: ['$a', 1]}}).itcount(), 'unexpected document count');
- assert.eq(1, getShapes().length, 'unexpected cache size after running 2nd query');
- assert.commandWorked(
- t.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', 1]}}}));
- assert.eq(0, getShapes().length, 'unexpected cache size after dropping 2nd query from cache');
+// Run a new query shape and drop it from the cache
+assert.eq(1, getShapes().length, 'unexpected cache size after running 2nd query');
+assert.commandWorked(t.runCommand('planCacheClear', {query: {a: 1, b: 1}}));
+assert.eq(0, getShapes().length, 'unexpected cache size after dropping 2nd query from cache');
- // planCacheClear fails with an $expr query with an unbound variable.
- assert.commandFailed(
- t.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', '$$unbound']}}}));
+// planCacheClear can clear $expr queries.
+assert.eq(1, t.find({a: 1, b: 1, $expr: {$eq: ['$a', 1]}}).itcount(), 'unexpected document count');
+assert.eq(1, getShapes().length, 'unexpected cache size after running 2nd query');
+assert.commandWorked(
+ t.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', 1]}}}));
+assert.eq(0, getShapes().length, 'unexpected cache size after dropping 2nd query from cache');
- // Insert two more shapes into the cache.
- assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count');
- assert.eq(1, t.find({a: 1, b: 1}, {_id: 0, a: 1}).itcount(), 'unexpected document count');
+// planCacheClear fails with an $expr query with an unbound variable.
+assert.commandFailed(
+ t.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', '$$unbound']}}}));
- // Drop query cache. This clears all cached queries in the collection.
- res = t.runCommand('planCacheClear');
- print('planCacheClear() = ' + tojson(res));
- assert.commandWorked(res, 'planCacheClear failed');
- assert.eq(
- 0, getShapes().length, 'plan cache should be empty after successful planCacheClear()');
+// Insert two more shapes into the cache.
+assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count');
+assert.eq(1, t.find({a: 1, b: 1}, {_id: 0, a: 1}).itcount(), 'unexpected document count');
- //
- // Query Plan Revision
- // http://docs.mongodb.org/manual/core/query-plans/#query-plan-revision
- // As collections change over time, the query optimizer deletes the query plan and re-evaluates
- // after any of the following events:
- // - The reIndex rebuilds the index.
- // - You add or drop an index.
- // - The mongod process restarts.
- //
+// Drop query cache. This clears all cached queries in the collection.
+res = t.runCommand('planCacheClear');
+print('planCacheClear() = ' + tojson(res));
+assert.commandWorked(res, 'planCacheClear failed');
+assert.eq(0, getShapes().length, 'plan cache should be empty after successful planCacheClear()');
- // Case 1: The reIndex rebuilds the index.
- // Steps:
- // Populate the cache with 1 entry.
- // Run reIndex on the collection.
- // Confirm that cache is empty.
- const isMongos = db.adminCommand({isdbgrid: 1}).isdbgrid;
- if (!isMongos) {
- assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count');
- assert.eq(1, getShapes().length, 'plan cache should not be empty after query');
- res = t.reIndex();
- print('reIndex result = ' + tojson(res));
- assert.eq(0, getShapes().length, 'plan cache should be empty after reIndex operation');
- }
+//
+// Query Plan Revision
+// http://docs.mongodb.org/manual/core/query-plans/#query-plan-revision
+// As collections change over time, the query optimizer deletes the query plan and re-evaluates
+// after any of the following events:
+// - The reIndex rebuilds the index.
+// - You add or drop an index.
+// - The mongod process restarts.
+//
- // Case 2: You add or drop an index.
- // Steps:
- // Populate the cache with 1 entry.
- // Add an index.
- // Confirm that cache is empty.
+// Case 1: The reIndex rebuilds the index.
+// Steps:
+// Populate the cache with 1 entry.
+// Run reIndex on the collection.
+// Confirm that cache is empty.
+const isMongos = db.adminCommand({isdbgrid: 1}).isdbgrid;
+if (!isMongos) {
assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count');
assert.eq(1, getShapes().length, 'plan cache should not be empty after query');
- t.ensureIndex({b: 1});
- assert.eq(0, getShapes().length, 'plan cache should be empty after adding index');
+ res = t.reIndex();
+ print('reIndex result = ' + tojson(res));
+ assert.eq(0, getShapes().length, 'plan cache should be empty after reIndex operation');
+}
+
+// Case 2: You add or drop an index.
+// Steps:
+// Populate the cache with 1 entry.
+// Add an index.
+// Confirm that cache is empty.
+assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count');
+assert.eq(1, getShapes().length, 'plan cache should not be empty after query');
+t.ensureIndex({b: 1});
+assert.eq(0, getShapes().length, 'plan cache should be empty after adding index');
- // Case 3: The mongod process restarts
- // Not applicable.
+// Case 3: The mongod process restarts
+// Not applicable.
})();
diff --git a/jstests/core/plan_cache_list_plans.js b/jstests/core/plan_cache_list_plans.js
index b0ae8497ba4..a077f9fafbe 100644
--- a/jstests/core/plan_cache_list_plans.js
+++ b/jstests/core/plan_cache_list_plans.js
@@ -13,118 +13,115 @@
// ]
(function() {
- "use strict";
- let t = db.jstests_plan_cache_list_plans;
- t.drop();
-
- function getPlansForCacheEntry(query, sort, projection) {
- let key = {query: query, sort: sort, projection: projection};
- let res = t.runCommand('planCacheListPlans', key);
- assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed');
- assert(res.hasOwnProperty('plans'),
- 'plans missing from planCacheListPlans(' + tojson(key, '', true) + ') result');
- return res;
- }
-
- // Assert that timeOfCreation exists in the cache entry. The difference between the current time
- // and the time a plan was cached should not be larger than an hour.
- function checkTimeOfCreation(query, sort, projection, date) {
- let key = {query: query, sort: sort, projection: projection};
- let res = t.runCommand('planCacheListPlans', key);
- assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed');
- assert(res.hasOwnProperty('timeOfCreation'),
- 'timeOfCreation missing from planCacheListPlans');
- let kMillisecondsPerHour = 1000 * 60 * 60;
- assert.lte(Math.abs(date - res.timeOfCreation.getTime()),
- kMillisecondsPerHour,
- 'timeOfCreation value is incorrect');
- }
-
- assert.commandWorked(t.save({a: 1, b: 1}));
- assert.commandWorked(t.save({a: 1, b: 2}));
- assert.commandWorked(t.save({a: 1, b: 2}));
- assert.commandWorked(t.save({a: 2, b: 2}));
-
- // We need two indices so that the MultiPlanRunner is executed.
- assert.commandWorked(t.ensureIndex({a: 1}));
- assert.commandWorked(t.ensureIndex({a: 1, b: 1}));
-
- // Invalid key should be an error.
- assert.eq([],
- getPlansForCacheEntry({unknownfield: 1}, {}, {}).plans,
- 'planCacheListPlans should return empty results on unknown query shape');
-
- // Create a cache entry.
- assert.eq(1,
- t.find({a: 1, b: 1}, {_id: 0, a: 1}).sort({a: -1}).itcount(),
- 'unexpected document count');
-
- let now = (new Date()).getTime();
- checkTimeOfCreation({a: 1, b: 1}, {a: -1}, {_id: 0, a: 1}, now);
-
- // Retrieve plans for valid cache entry.
- let entry = getPlansForCacheEntry({a: 1, b: 1}, {a: -1}, {_id: 0, a: 1});
- assert(entry.hasOwnProperty('works'),
- 'works missing from planCacheListPlans() result ' + tojson(entry));
- assert.eq(entry.isActive, false);
-
- let plans = entry.plans;
- assert.eq(2, plans.length, 'unexpected number of plans cached for query');
-
- // Print every plan.
- // Plan details/feedback verified separately in section after Query Plan Revision tests.
- print('planCacheListPlans result:');
- for (let i = 0; i < plans.length; i++) {
- print('plan ' + i + ': ' + tojson(plans[i]));
- }
+"use strict";
+let t = db.jstests_plan_cache_list_plans;
+t.drop();
+
+function getPlansForCacheEntry(query, sort, projection) {
+ let key = {query: query, sort: sort, projection: projection};
+ let res = t.runCommand('planCacheListPlans', key);
+ assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed');
+ assert(res.hasOwnProperty('plans'),
+ 'plans missing from planCacheListPlans(' + tojson(key, '', true) + ') result');
+ return res;
+}
+
+// Assert that timeOfCreation exists in the cache entry. The difference between the current time
+// and the time a plan was cached should not be larger than an hour.
+function checkTimeOfCreation(query, sort, projection, date) {
+ let key = {query: query, sort: sort, projection: projection};
+ let res = t.runCommand('planCacheListPlans', key);
+ assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed');
+ assert(res.hasOwnProperty('timeOfCreation'), 'timeOfCreation missing from planCacheListPlans');
+ let kMillisecondsPerHour = 1000 * 60 * 60;
+ assert.lte(Math.abs(date - res.timeOfCreation.getTime()),
+ kMillisecondsPerHour,
+ 'timeOfCreation value is incorrect');
+}
+
+assert.commandWorked(t.save({a: 1, b: 1}));
+assert.commandWorked(t.save({a: 1, b: 2}));
+assert.commandWorked(t.save({a: 1, b: 2}));
+assert.commandWorked(t.save({a: 2, b: 2}));
+
+// We need two indices so that the MultiPlanRunner is executed.
+assert.commandWorked(t.ensureIndex({a: 1}));
+assert.commandWorked(t.ensureIndex({a: 1, b: 1}));
+
+// Invalid key should be an error.
+assert.eq([],
+ getPlansForCacheEntry({unknownfield: 1}, {}, {}).plans,
+ 'planCacheListPlans should return empty results on unknown query shape');
+
+// Create a cache entry.
+assert.eq(
+ 1, t.find({a: 1, b: 1}, {_id: 0, a: 1}).sort({a: -1}).itcount(), 'unexpected document count');
+
+let now = (new Date()).getTime();
+checkTimeOfCreation({a: 1, b: 1}, {a: -1}, {_id: 0, a: 1}, now);
+
+// Retrieve plans for valid cache entry.
+let entry = getPlansForCacheEntry({a: 1, b: 1}, {a: -1}, {_id: 0, a: 1});
+assert(entry.hasOwnProperty('works'),
+ 'works missing from planCacheListPlans() result ' + tojson(entry));
+assert.eq(entry.isActive, false);
+
+let plans = entry.plans;
+assert.eq(2, plans.length, 'unexpected number of plans cached for query');
+
+// Print every plan.
+// Plan details/feedback verified separately in section after Query Plan Revision tests.
+print('planCacheListPlans result:');
+for (let i = 0; i < plans.length; i++) {
+ print('plan ' + i + ': ' + tojson(plans[i]));
+}
+
+// Test the queryHash and planCacheKey property by comparing entries for two different
+// query shapes.
+assert.eq(0, t.find({a: 132}).sort({b: -1, a: 1}).itcount(), 'unexpected document count');
+let entryNewShape = getPlansForCacheEntry({a: 123}, {b: -1, a: 1}, {});
+assert.eq(entry.hasOwnProperty("queryHash"), true);
+assert.eq(entryNewShape.hasOwnProperty("queryHash"), true);
+assert.neq(entry["queryHash"], entryNewShape["queryHash"]);
+assert.eq(entry.hasOwnProperty("planCacheKey"), true);
+assert.eq(entryNewShape.hasOwnProperty("planCacheKey"), true);
+assert.neq(entry["planCacheKey"], entryNewShape["planCacheKey"]);
- // Test the queryHash and planCacheKey property by comparing entries for two different
- // query shapes.
- assert.eq(0, t.find({a: 132}).sort({b: -1, a: 1}).itcount(), 'unexpected document count');
- let entryNewShape = getPlansForCacheEntry({a: 123}, {b: -1, a: 1}, {});
- assert.eq(entry.hasOwnProperty("queryHash"), true);
- assert.eq(entryNewShape.hasOwnProperty("queryHash"), true);
- assert.neq(entry["queryHash"], entryNewShape["queryHash"]);
- assert.eq(entry.hasOwnProperty("planCacheKey"), true);
- assert.eq(entryNewShape.hasOwnProperty("planCacheKey"), true);
- assert.neq(entry["planCacheKey"], entryNewShape["planCacheKey"]);
-
- //
- // Tests for plan reason and feedback in planCacheListPlans
- //
-
- // Generate more plans for test query by adding indexes (compound and sparse). This will also
- // clear the plan cache.
- assert.commandWorked(t.ensureIndex({a: -1}, {sparse: true}));
- assert.commandWorked(t.ensureIndex({a: 1, b: 1}));
-
- // Implementation note: feedback stats is calculated after 20 executions. See
- // PlanCacheEntry::kMaxFeedback.
- let numExecutions = 100;
- for (let i = 0; i < numExecutions; i++) {
- assert.eq(0, t.find({a: 3, b: 3}, {_id: 0, a: 1}).sort({a: -1}).itcount(), 'query failed');
- }
+//
+// Tests for plan reason and feedback in planCacheListPlans
+//
- now = (new Date()).getTime();
- checkTimeOfCreation({a: 3, b: 3}, {a: -1}, {_id: 0, a: 1}, now);
-
- entry = getPlansForCacheEntry({a: 3, b: 3}, {a: -1}, {_id: 0, a: 1});
- assert(entry.hasOwnProperty('works'), 'works missing from planCacheListPlans() result');
- assert.eq(entry.isActive, true);
- plans = entry.plans;
-
- // This should be obvious but feedback is available only for the first (winning) plan.
- print('planCacheListPlans result (after adding indexes and completing 20 executions):');
- for (let i = 0; i < plans.length; i++) {
- print('plan ' + i + ': ' + tojson(plans[i]));
- assert.gt(plans[i].reason.score, 0, 'plan ' + i + ' score is invalid');
- if (i > 0) {
- assert.lte(plans[i].reason.score,
- plans[i - 1].reason.score,
- 'plans not sorted by score in descending order. ' +
- 'plan ' + i +
- ' has a score that is greater than that of the previous plan');
- }
- assert(plans[i].reason.stats.hasOwnProperty('stage'), 'no stats inserted for plan ' + i);
+// Generate more plans for test query by adding indexes (compound and sparse). This will also
+// clear the plan cache.
+assert.commandWorked(t.ensureIndex({a: -1}, {sparse: true}));
+assert.commandWorked(t.ensureIndex({a: 1, b: 1}));
+
+// Implementation note: feedback stats is calculated after 20 executions. See
+// PlanCacheEntry::kMaxFeedback.
+let numExecutions = 100;
+for (let i = 0; i < numExecutions; i++) {
+ assert.eq(0, t.find({a: 3, b: 3}, {_id: 0, a: 1}).sort({a: -1}).itcount(), 'query failed');
+}
+
+now = (new Date()).getTime();
+checkTimeOfCreation({a: 3, b: 3}, {a: -1}, {_id: 0, a: 1}, now);
+
+entry = getPlansForCacheEntry({a: 3, b: 3}, {a: -1}, {_id: 0, a: 1});
+assert(entry.hasOwnProperty('works'), 'works missing from planCacheListPlans() result');
+assert.eq(entry.isActive, true);
+plans = entry.plans;
+
+// This should be obvious but feedback is available only for the first (winning) plan.
+print('planCacheListPlans result (after adding indexes and completing 20 executions):');
+for (let i = 0; i < plans.length; i++) {
+ print('plan ' + i + ': ' + tojson(plans[i]));
+ assert.gt(plans[i].reason.score, 0, 'plan ' + i + ' score is invalid');
+ if (i > 0) {
+ assert.lte(plans[i].reason.score,
+ plans[i - 1].reason.score,
+ 'plans not sorted by score in descending order. ' +
+ 'plan ' + i + ' has a score that is greater than that of the previous plan');
}
+ assert(plans[i].reason.stats.hasOwnProperty('stage'), 'no stats inserted for plan ' + i);
+}
})();
diff --git a/jstests/core/plan_cache_list_shapes.js b/jstests/core/plan_cache_list_shapes.js
index bda2a40b073..89b9c900354 100644
--- a/jstests/core/plan_cache_list_shapes.js
+++ b/jstests/core/plan_cache_list_shapes.js
@@ -11,85 +11,85 @@
// assumes_balancer_off,
// ]
(function() {
- const t = db.jstests_plan_cache_list_shapes;
- t.drop();
+const t = db.jstests_plan_cache_list_shapes;
+t.drop();
- // Utility function to list query shapes in cache.
- function getShapes(collection) {
- if (collection === undefined) {
- collection = t;
- }
- const res = collection.runCommand('planCacheListQueryShapes');
- print('planCacheListQueryShapes() = ' + tojson(res));
- assert.commandWorked(res, 'planCacheListQueryShapes failed');
- assert(res.hasOwnProperty('shapes'), 'shapes missing from planCacheListQueryShapes result');
- return res.shapes;
+// Utility function to list query shapes in cache.
+function getShapes(collection) {
+ if (collection === undefined) {
+ collection = t;
}
+ const res = collection.runCommand('planCacheListQueryShapes');
+ print('planCacheListQueryShapes() = ' + tojson(res));
+ assert.commandWorked(res, 'planCacheListQueryShapes failed');
+ assert(res.hasOwnProperty('shapes'), 'shapes missing from planCacheListQueryShapes result');
+ return res.shapes;
+}
- // Attempting to retrieve cache information on non-existent collection is not an error and
- // should return an empty array of query shapes.
- const missingCollection = db.jstests_query_cache_missing;
- missingCollection.drop();
- assert.eq(0,
- getShapes(missingCollection).length,
- 'planCacheListQueryShapes should return empty array on non-existent collection');
+// Attempting to retrieve cache information on non-existent collection is not an error and
+// should return an empty array of query shapes.
+const missingCollection = db.jstests_query_cache_missing;
+missingCollection.drop();
+assert.eq(0,
+ getShapes(missingCollection).length,
+ 'planCacheListQueryShapes should return empty array on non-existent collection');
- assert.commandWorked(t.save({a: 1, b: 1}));
- assert.commandWorked(t.save({a: 1, b: 2}));
- assert.commandWorked(t.save({a: 1, b: 2}));
- assert.commandWorked(t.save({a: 2, b: 2}));
+assert.commandWorked(t.save({a: 1, b: 1}));
+assert.commandWorked(t.save({a: 1, b: 2}));
+assert.commandWorked(t.save({a: 1, b: 2}));
+assert.commandWorked(t.save({a: 2, b: 2}));
- // We need two indices so that the MultiPlanRunner is executed.
- assert.commandWorked(t.ensureIndex({a: 1}));
- assert.commandWorked(t.ensureIndex({a: 1, b: 1}));
+// We need two indices so that the MultiPlanRunner is executed.
+assert.commandWorked(t.ensureIndex({a: 1}));
+assert.commandWorked(t.ensureIndex({a: 1, b: 1}));
- // Run a query.
- assert.eq(1,
- t.find({a: 1, b: 1}, {_id: 1, a: 1}).sort({a: -1}).itcount(),
- 'unexpected document count');
+// Run a query.
+assert.eq(
+ 1, t.find({a: 1, b: 1}, {_id: 1, a: 1}).sort({a: -1}).itcount(), 'unexpected document count');
- // We now expect the two indices to be compared and a cache entry to exist. Retrieve query
- // shapes from the test collection Number of shapes should match queries executed by multi-plan
- // runner.
- let shapes = getShapes();
- assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
- // Since the queryHash is computed in the server, we filter it out when matching query shapes
- // here.
- let filteredShape0 = shapes[0];
- delete filteredShape0.queryHash;
- assert.eq({query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 1, a: 1}},
- filteredShape0,
- 'unexpected query shape returned from planCacheListQueryShapes');
+// We now expect the two indices to be compared and a cache entry to exist. Retrieve query
+// shapes from the test collection Number of shapes should match queries executed by multi-plan
+// runner.
+let shapes = getShapes();
+assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
+// Since the queryHash is computed in the server, we filter it out when matching query shapes
+// here.
+let filteredShape0 = shapes[0];
+delete filteredShape0.queryHash;
+assert.eq({query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 1, a: 1}},
+ filteredShape0,
+ 'unexpected query shape returned from planCacheListQueryShapes');
- // Running a different query shape should cause another entry to be cached.
- assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count');
- shapes = getShapes();
- assert.eq(2, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
+// Running a different query shape should cause another entry to be cached.
+assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count');
+shapes = getShapes();
+assert.eq(2, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
- // Check that each shape has a unique queryHash.
- assert.neq(shapes[0]["queryHash"], shapes[1]["queryHash"]);
+// Check that each shape has a unique queryHash.
+assert.neq(shapes[0]["queryHash"], shapes[1]["queryHash"]);
- // Check that queries with different regex options have distinct shapes.
+// Check that queries with different regex options have distinct shapes.
- // Insert some documents with strings so we have something to search for.
- for (let i = 0; i < 5; i++) {
- assert.commandWorked(t.insert({a: 3, s: 'hello world'}));
- }
- assert.commandWorked(t.insert({a: 3, s: 'hElLo wOrLd'}));
+// Insert some documents with strings so we have something to search for.
+for (let i = 0; i < 5; i++) {
+ assert.commandWorked(t.insert({a: 3, s: 'hello world'}));
+}
+assert.commandWorked(t.insert({a: 3, s: 'hElLo wOrLd'}));
- // Run a query with a regex. Also must include 'a' so that the query may use more than one
- // index, and thus, must use the MultiPlanner.
- const regexQuery = {s: {$regex: 'hello world', $options: 'm'}, a: 3};
- assert.eq(5, t.find(regexQuery).itcount());
+// Run a query with a regex. Also must include 'a' so that the query may use more than one
+// index, and thus, must use the MultiPlanner.
+const regexQuery = {
+ s: {$regex: 'hello world', $options: 'm'},
+ a: 3
+};
+assert.eq(5, t.find(regexQuery).itcount());
- assert.eq(
- 3, getShapes().length, 'unexpected number of shapes in planCacheListQueryShapes result ');
+assert.eq(3, getShapes().length, 'unexpected number of shapes in planCacheListQueryShapes result ');
- // Run the same query, but with different regex options. We expect that this should cause a
- // shape to get added.
- regexQuery.s.$options = 'mi';
- // There is one more result since the query is now case sensitive.
- assert.eq(6, t.find(regexQuery).itcount());
- assert.eq(
- 4, getShapes().length, 'unexpected number of shapes in planCacheListQueryShapes result');
+// Run the same query, but with different regex options. We expect that this should cause a
+// shape to get added.
+regexQuery.s.$options = 'mi';
+// There is one more result since the query is now case sensitive.
+assert.eq(6, t.find(regexQuery).itcount());
+assert.eq(4, getShapes().length, 'unexpected number of shapes in planCacheListQueryShapes result');
})();
diff --git a/jstests/core/profile1.js b/jstests/core/profile1.js
index 09f1655937c..485b26f29fa 100644
--- a/jstests/core/profile1.js
+++ b/jstests/core/profile1.js
@@ -9,103 +9,103 @@
// ]
(function() {
- "use strict";
- function profileCursor(query) {
- query = query || {};
- Object.extend(query, {user: username + "@" + db.getName()});
- return db.system.profile.find(query);
- }
-
- function getProfileAString() {
- var s = "\n";
- profileCursor().forEach(function(z) {
- s += tojson(z) + " ,\n";
- });
- return s;
- }
-
- function resetProfile(level, slowms) {
- db.setProfilingLevel(0);
- db.system.profile.drop();
- db.setProfilingLevel(level, slowms);
- }
-
- // special db so that it can be run in parallel tests
- var stddb = db;
- db = db.getSisterDB("profile1");
- var username = "jstests_profile1_user";
-
- db.dropUser(username);
- db.dropDatabase();
-
- try {
- db.createUser({user: username, pwd: "password", roles: jsTest.basicUserRoles});
- db.auth(username, "password");
-
- // With pre-created system.profile (capped)
- db.runCommand({profile: 0});
+"use strict";
+function profileCursor(query) {
+ query = query || {};
+ Object.extend(query, {user: username + "@" + db.getName()});
+ return db.system.profile.find(query);
+}
+
+function getProfileAString() {
+ var s = "\n";
+ profileCursor().forEach(function(z) {
+ s += tojson(z) + " ,\n";
+ });
+ return s;
+}
+
+function resetProfile(level, slowms) {
+ db.setProfilingLevel(0);
+ db.system.profile.drop();
+ db.setProfilingLevel(level, slowms);
+}
+
+// special db so that it can be run in parallel tests
+var stddb = db;
+db = db.getSisterDB("profile1");
+var username = "jstests_profile1_user";
+
+db.dropUser(username);
+db.dropDatabase();
+
+try {
+ db.createUser({user: username, pwd: "password", roles: jsTest.basicUserRoles});
+ db.auth(username, "password");
+
+ // With pre-created system.profile (capped)
+ db.runCommand({profile: 0});
+ db.getCollection("system.profile").drop();
+ assert.eq(0, db.runCommand({profile: -1}).was, "A");
+
+ // Create 32MB profile (capped) collection
+ db.system.profile.drop();
+ db.createCollection("system.profile", {capped: true, size: 32 * 1024 * 1024});
+ db.runCommand({profile: 2});
+ assert.eq(2, db.runCommand({profile: -1}).was, "B");
+ assert.eq(1, db.system.profile.stats().capped, "C");
+
+ db.foo.findOne();
+
+ var profileItems = profileCursor().toArray();
+
+ // create a msg for later if there is a failure.
+ var msg = "";
+ profileItems.forEach(function(d) {
+ msg += "profile doc: " + d.ns + " " + d.op + " " + tojson(d.query ? d.query : d.command) +
+ '\n';
+ });
+ msg += tojson(db.system.profile.stats());
+
+ // If these nunmbers don't match, it is possible the collection has rolled over
+ // (set to 32MB above in the hope this doesn't happen)
+ assert.eq(2, profileItems.length, "E2 -- " + msg);
+
+ // Make sure we can't drop if profiling is still on
+ assert.throws(function(z) {
db.getCollection("system.profile").drop();
- assert.eq(0, db.runCommand({profile: -1}).was, "A");
-
- // Create 32MB profile (capped) collection
- db.system.profile.drop();
- db.createCollection("system.profile", {capped: true, size: 32 * 1024 * 1024});
- db.runCommand({profile: 2});
- assert.eq(2, db.runCommand({profile: -1}).was, "B");
- assert.eq(1, db.system.profile.stats().capped, "C");
-
- db.foo.findOne();
-
- var profileItems = profileCursor().toArray();
-
- // create a msg for later if there is a failure.
- var msg = "";
- profileItems.forEach(function(d) {
- msg += "profile doc: " + d.ns + " " + d.op + " " +
- tojson(d.query ? d.query : d.command) + '\n';
- });
- msg += tojson(db.system.profile.stats());
-
- // If these nunmbers don't match, it is possible the collection has rolled over
- // (set to 32MB above in the hope this doesn't happen)
- assert.eq(2, profileItems.length, "E2 -- " + msg);
-
- // Make sure we can't drop if profiling is still on
- assert.throws(function(z) {
- db.getCollection("system.profile").drop();
- });
-
- // With pre-created system.profile (un-capped)
- db.runCommand({profile: 0});
- db.getCollection("system.profile").drop();
- assert.eq(0, db.runCommand({profile: -1}).was, "F");
-
- db.createCollection("system.profile");
- assert.eq(0, db.runCommand({profile: 2}).ok);
- assert.eq(0, db.runCommand({profile: -1}).was, "G");
- assert(!db.system.profile.stats().capped, "G1");
-
- // With no system.profile collection
- db.runCommand({profile: 0});
- db.getCollection("system.profile").drop();
- assert.eq(0, db.runCommand({profile: -1}).was, "H");
-
- db.runCommand({profile: 2});
- assert.eq(2, db.runCommand({profile: -1}).was, "I");
- assert.eq(1, db.system.profile.stats().capped, "J");
-
- resetProfile(2);
- db.profile1.drop();
- var q = {_id: 5};
- var u = {$inc: {x: 1}};
- db.profile1.update(q, u);
- var r = profileCursor({ns: db.profile1.getFullName()}).sort({$natural: -1})[0];
- assert.eq({q: q, u: u, multi: false, upsert: false}, r.command, tojson(r));
- assert.eq("update", r.op, tojson(r));
- assert.eq("profile1.profile1", r.ns, tojson(r));
- } finally {
- // disable profiling for subsequent tests
- assert.commandWorked(db.runCommand({profile: 0}));
- db = stddb;
- }
+ });
+
+ // With pre-created system.profile (un-capped)
+ db.runCommand({profile: 0});
+ db.getCollection("system.profile").drop();
+ assert.eq(0, db.runCommand({profile: -1}).was, "F");
+
+ db.createCollection("system.profile");
+ assert.eq(0, db.runCommand({profile: 2}).ok);
+ assert.eq(0, db.runCommand({profile: -1}).was, "G");
+ assert(!db.system.profile.stats().capped, "G1");
+
+ // With no system.profile collection
+ db.runCommand({profile: 0});
+ db.getCollection("system.profile").drop();
+ assert.eq(0, db.runCommand({profile: -1}).was, "H");
+
+ db.runCommand({profile: 2});
+ assert.eq(2, db.runCommand({profile: -1}).was, "I");
+ assert.eq(1, db.system.profile.stats().capped, "J");
+
+ resetProfile(2);
+ db.profile1.drop();
+ var q = {_id: 5};
+ var u = {$inc: {x: 1}};
+ db.profile1.update(q, u);
+ var r = profileCursor({ns: db.profile1.getFullName()}).sort({$natural: -1})[0];
+ assert.eq({q: q, u: u, multi: false, upsert: false}, r.command, tojson(r));
+ assert.eq("update", r.op, tojson(r));
+ assert.eq("profile1.profile1", r.ns, tojson(r));
+} finally {
+ // disable profiling for subsequent tests
+ assert.commandWorked(db.runCommand({profile: 0}));
+ db = stddb;
+}
}());
diff --git a/jstests/core/profile2.js b/jstests/core/profile2.js
index 788f20f79a1..d71471b2e5f 100644
--- a/jstests/core/profile2.js
+++ b/jstests/core/profile2.js
@@ -24,7 +24,7 @@ var result = results[0];
assert(result.hasOwnProperty('ns'));
assert(result.hasOwnProperty('millis'));
assert(result.hasOwnProperty('command'));
-assert.eq('string', typeof(result.command.$truncated));
+assert.eq('string', typeof (result.command.$truncated));
// String value is truncated.
assert(result.command.$truncated.match(/filter: { a: "a+\.\.\." }/));
@@ -40,7 +40,7 @@ var result = results[0];
assert(result.hasOwnProperty('ns'));
assert(result.hasOwnProperty('millis'));
assert(result.hasOwnProperty('command'));
-assert.eq('string', typeof(result.command.$truncated));
+assert.eq('string', typeof (result.command.$truncated));
// String value is truncated.
assert(result.command.$truncated.match(
/^{ q: { a: "a+\.\.\." }, u: {}, multi: false, upsert: false }$/));
@@ -57,7 +57,7 @@ var result = results[0];
assert(result.hasOwnProperty('ns'));
assert(result.hasOwnProperty('millis'));
assert(result.hasOwnProperty('command'));
-assert.eq('string', typeof(result.command.$truncated));
+assert.eq('string', typeof (result.command.$truncated));
// String value is truncated.
assert(result.command.$truncated.match(
/^{ q: {}, u: { a: "a+\.\.\." }, multi: false, upsert: false }$/));
@@ -78,7 +78,7 @@ var result = results[0];
assert(result.hasOwnProperty('ns'));
assert(result.hasOwnProperty('millis'));
assert(result.hasOwnProperty('command'));
-assert.eq('string', typeof(result.command.$truncated));
+assert.eq('string', typeof (result.command.$truncated));
// Query object itself is truncated.
assert(result.command.$truncated.match(/filter: { a0: 1\.0, a1: .*\.\.\.$/));
diff --git a/jstests/core/profile_agg.js b/jstests/core/profile_agg.js
index 1224105109b..02a29500cf6 100644
--- a/jstests/core/profile_agg.js
+++ b/jstests/core/profile_agg.js
@@ -3,95 +3,95 @@
// Confirms that profiled aggregation execution contains all expected metrics with proper values.
(function() {
- "use strict";
-
- // For getLatestProfilerEntry and getProfilerProtocolStringForCommand
- load("jstests/libs/profiler.js");
-
- var testDB = db.getSiblingDB("profile_agg");
- assert.commandWorked(testDB.dropDatabase());
- var coll = testDB.getCollection("test");
-
- testDB.setProfilingLevel(2);
-
- //
- // Confirm metrics for agg w/ $match.
- //
- var i;
- for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
- assert.commandWorked(coll.createIndex({a: 1}));
-
- assert.eq(8,
- coll.aggregate([{$match: {a: {$gte: 2}}}],
- {collation: {locale: "fr"}, comment: "agg_comment"})
- .itcount());
- var profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "command", tojson(profileObj));
- assert.eq(profileObj.nreturned, 8, tojson(profileObj));
- assert.eq(profileObj.keysExamined, 8, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 8, tojson(profileObj));
- assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
- assert.eq(profileObj.protocol,
- getProfilerProtocolStringForCommand(testDB.getMongo()),
- tojson(profileObj));
- assert.eq(profileObj.command.aggregate, coll.getName(), tojson(profileObj));
- assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj));
- assert.eq(profileObj.command.comment, "agg_comment", tojson(profileObj));
- assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
- assert(!profileObj.hasOwnProperty("hasSortStage"), tojson(profileObj));
- // Testing that 'usedDisk' is set when disk is used requires either using a lot of data or
- // configuring a server parameter which could mess up other tests. This testing is
- // done elsewhere so that this test can stay in the core suite
- assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm "fromMultiPlanner" metric.
- //
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
- for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
-
- assert.eq(1, coll.aggregate([{$match: {a: 3, b: 3}}]).itcount());
- profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
-
- //
- // Confirm that the "hint" modifier is in the profiler document.
- //
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
-
- assert.eq(1, coll.aggregate([{$match: {a: 3, b: 3}}], {hint: {_id: 1}}).itcount());
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.command.hint, {_id: 1}, tojson(profileObj));
-
- //
- // Confirm that aggregations are truncated in the profiler as { $truncated: <string>, comment:
- // <string> } when a comment parameter is provided.
- //
- let matchPredicate = {};
-
- for (let i = 0; i < 501; i++) {
- matchPredicate[i] = "a".repeat(150);
- }
-
- assert.eq(coll.aggregate([{$match: matchPredicate}], {comment: "profile_agg"}).itcount(), 0);
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq((typeof profileObj.command.$truncated), "string", tojson(profileObj));
- assert.eq(profileObj.command.comment, "profile_agg", tojson(profileObj));
+"use strict";
+
+// For getLatestProfilerEntry and getProfilerProtocolStringForCommand
+load("jstests/libs/profiler.js");
+
+var testDB = db.getSiblingDB("profile_agg");
+assert.commandWorked(testDB.dropDatabase());
+var coll = testDB.getCollection("test");
+
+testDB.setProfilingLevel(2);
+
+//
+// Confirm metrics for agg w/ $match.
+//
+var i;
+for (i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+assert.commandWorked(coll.createIndex({a: 1}));
+
+assert.eq(
+ 8,
+ coll.aggregate([{$match: {a: {$gte: 2}}}], {collation: {locale: "fr"}, comment: "agg_comment"})
+ .itcount());
+var profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "command", tojson(profileObj));
+assert.eq(profileObj.nreturned, 8, tojson(profileObj));
+assert.eq(profileObj.keysExamined, 8, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 8, tojson(profileObj));
+assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
+assert.eq(profileObj.protocol,
+ getProfilerProtocolStringForCommand(testDB.getMongo()),
+ tojson(profileObj));
+assert.eq(profileObj.command.aggregate, coll.getName(), tojson(profileObj));
+assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj));
+assert.eq(profileObj.command.comment, "agg_comment", tojson(profileObj));
+assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
+assert(!profileObj.hasOwnProperty("hasSortStage"), tojson(profileObj));
+// Testing that 'usedDisk' is set when disk is used requires either using a lot of data or
+// configuring a server parameter which could mess up other tests. This testing is
+// done elsewhere so that this test can stay in the core suite
+assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm "fromMultiPlanner" metric.
+//
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
+for (i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
+
+assert.eq(1, coll.aggregate([{$match: {a: 3, b: 3}}]).itcount());
+profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
+
+//
+// Confirm that the "hint" modifier is in the profiler document.
+//
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
+for (i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
+
+assert.eq(1, coll.aggregate([{$match: {a: 3, b: 3}}], {hint: {_id: 1}}).itcount());
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.command.hint, {_id: 1}, tojson(profileObj));
+
+//
+// Confirm that aggregations are truncated in the profiler as { $truncated: <string>, comment:
+// <string> } when a comment parameter is provided.
+//
+let matchPredicate = {};
+
+for (let i = 0; i < 501; i++) {
+ matchPredicate[i] = "a".repeat(150);
+}
+
+assert.eq(coll.aggregate([{$match: matchPredicate}], {comment: "profile_agg"}).itcount(), 0);
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq((typeof profileObj.command.$truncated), "string", tojson(profileObj));
+assert.eq(profileObj.command.comment, "profile_agg", tojson(profileObj));
})();
diff --git a/jstests/core/profile_count.js b/jstests/core/profile_count.js
index 4ef361e06e9..103a08cb728 100644
--- a/jstests/core/profile_count.js
+++ b/jstests/core/profile_count.js
@@ -3,90 +3,92 @@
// Confirms that profiled count execution contains all expected metrics with proper values.
(function() {
- "use strict";
-
- // For getLatestProfilerEntry and getProfilerProtocolStringForCommand
- load("jstests/libs/profiler.js");
-
- var testDB = db.getSiblingDB("profile_count");
- assert.commandWorked(testDB.dropDatabase());
- var conn = testDB.getMongo();
- var coll = testDB.getCollection("test");
-
- testDB.setProfilingLevel(2);
-
- //
- // Collection-level count.
- //
- var i;
- for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
-
- assert.eq(10, coll.count({}, {collation: {locale: "fr"}}));
-
- var profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "command", tojson(profileObj));
- assert.eq(profileObj.protocol, getProfilerProtocolStringForCommand(conn), tojson(profileObj));
- assert.eq(profileObj.command.count, coll.getName(), tojson(profileObj));
- assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj));
- assert.eq(profileObj.planSummary, "RECORD_STORE_FAST_COUNT", tojson(profileObj));
- assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Count with non-indexed query.
- //
- coll.drop();
- for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
-
- var query = {a: {$gte: 5}};
- assert.eq(5, coll.count(query));
- profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.command.query, query, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 10, tojson(profileObj));
-
- //
- // Count with indexed query.
- //
- coll.drop();
- for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
- assert.commandWorked(coll.createIndex({a: 1}));
-
- query = {a: {$gte: 5}};
- assert.eq(5, coll.count(query));
- profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.command.query, query, tojson(profileObj));
- assert.eq(profileObj.keysExamined, 6, tojson(profileObj));
- assert.eq(profileObj.planSummary, "COUNT_SCAN { a: 1 }", tojson(profileObj));
- assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm "fromMultiPlanner" metric.
- //
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
- for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
-
- assert.eq(1, coll.count({a: 3, b: 3}));
- profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+"use strict";
+
+// For getLatestProfilerEntry and getProfilerProtocolStringForCommand
+load("jstests/libs/profiler.js");
+
+var testDB = db.getSiblingDB("profile_count");
+assert.commandWorked(testDB.dropDatabase());
+var conn = testDB.getMongo();
+var coll = testDB.getCollection("test");
+
+testDB.setProfilingLevel(2);
+
+//
+// Collection-level count.
+//
+var i;
+for (i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+
+assert.eq(10, coll.count({}, {collation: {locale: "fr"}}));
+
+var profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "command", tojson(profileObj));
+assert.eq(profileObj.protocol, getProfilerProtocolStringForCommand(conn), tojson(profileObj));
+assert.eq(profileObj.command.count, coll.getName(), tojson(profileObj));
+assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj));
+assert.eq(profileObj.planSummary, "RECORD_STORE_FAST_COUNT", tojson(profileObj));
+assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Count with non-indexed query.
+//
+coll.drop();
+for (i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+
+var query = {a: {$gte: 5}};
+assert.eq(5, coll.count(query));
+profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.command.query, query, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 10, tojson(profileObj));
+
+//
+// Count with indexed query.
+//
+coll.drop();
+for (i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+assert.commandWorked(coll.createIndex({a: 1}));
+
+query = {
+ a: {$gte: 5}
+};
+assert.eq(5, coll.count(query));
+profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.command.query, query, tojson(profileObj));
+assert.eq(profileObj.keysExamined, 6, tojson(profileObj));
+assert.eq(profileObj.planSummary, "COUNT_SCAN { a: 1 }", tojson(profileObj));
+assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm "fromMultiPlanner" metric.
+//
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
+for (i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
+
+assert.eq(1, coll.count({a: 3, b: 3}));
+profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
})();
diff --git a/jstests/core/profile_delete.js b/jstests/core/profile_delete.js
index c860ddb36f4..29f3b3ff5e7 100644
--- a/jstests/core/profile_delete.js
+++ b/jstests/core/profile_delete.js
@@ -3,100 +3,100 @@
// Confirms that profiled delete execution contains all expected metrics with proper values.
(function() {
- "use strict";
-
- load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
-
- // Setup test db and collection.
- var testDB = db.getSiblingDB("profile_delete");
- assert.commandWorked(testDB.dropDatabase());
- var coll = testDB.getCollection("test");
-
- testDB.setProfilingLevel(2);
-
- //
- // Confirm metrics for single document delete.
- //
- var i;
- for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
- assert.commandWorked(coll.createIndex({a: 1}));
-
- assert.writeOK(coll.remove({a: {$gte: 2}, b: {$gte: 2}},
- db.getMongo().writeMode() === "commands"
- ? {justOne: true, collation: {locale: "fr"}}
- : {justOne: true}));
-
- var profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "remove", tojson(profileObj));
- if (db.getMongo().writeMode() === "commands") {
- assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj));
- }
- assert.eq(profileObj.ndeleted, 1, tojson(profileObj));
- assert.eq(profileObj.keysExamined, 1, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 1, tojson(profileObj));
- assert.eq(profileObj.keysDeleted, 2, tojson(profileObj));
- assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
- assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm metrics for multiple document delete.
- //
- coll.drop();
- for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
-
- assert.writeOK(coll.remove({a: {$gte: 2}}));
- profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.ndeleted, 8, tojson(profileObj));
- assert.eq(profileObj.keysDeleted, 8, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm "fromMultiPlanner" metric.
- //
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
- for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
-
- assert.writeOK(coll.remove({a: 3, b: 3}));
- profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm killing a remove operation will not log 'ndeleted' to the profiler.
- //
- assert(coll.drop());
-
- for (let i = 0; i < 100; ++i) {
- assert.commandWorked(coll.insert({a: 1}));
- }
-
- const deleteResult = testDB.runCommand({
- delete: coll.getName(),
- deletes: [{q: {$where: "sleep(1000);return true", a: 1}, limit: 0}],
- maxTimeMS: 1
- });
-
- // This command will time out before completing.
- assert.commandFailedWithCode(deleteResult, ErrorCodes.MaxTimeMSExpired);
-
- profileObj = getLatestProfilerEntry(testDB);
-
- // 'ndeleted' should not be defined.
- assert(!profileObj.hasOwnProperty("ndeleted"), profileObj);
+"use strict";
+
+load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
+
+// Setup test db and collection.
+var testDB = db.getSiblingDB("profile_delete");
+assert.commandWorked(testDB.dropDatabase());
+var coll = testDB.getCollection("test");
+
+testDB.setProfilingLevel(2);
+
+//
+// Confirm metrics for single document delete.
+//
+var i;
+for (i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
+assert.commandWorked(coll.createIndex({a: 1}));
+
+assert.writeOK(coll.remove({a: {$gte: 2}, b: {$gte: 2}},
+ db.getMongo().writeMode() === "commands"
+ ? {justOne: true, collation: {locale: "fr"}}
+ : {justOne: true}));
+
+var profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "remove", tojson(profileObj));
+if (db.getMongo().writeMode() === "commands") {
+ assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj));
+}
+assert.eq(profileObj.ndeleted, 1, tojson(profileObj));
+assert.eq(profileObj.keysExamined, 1, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 1, tojson(profileObj));
+assert.eq(profileObj.keysDeleted, 2, tojson(profileObj));
+assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
+assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm metrics for multiple document delete.
+//
+coll.drop();
+for (i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+
+assert.writeOK(coll.remove({a: {$gte: 2}}));
+profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.ndeleted, 8, tojson(profileObj));
+assert.eq(profileObj.keysDeleted, 8, tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm "fromMultiPlanner" metric.
+//
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
+for (i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
+
+assert.writeOK(coll.remove({a: 3, b: 3}));
+profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm killing a remove operation will not log 'ndeleted' to the profiler.
+//
+assert(coll.drop());
+
+for (let i = 0; i < 100; ++i) {
+ assert.commandWorked(coll.insert({a: 1}));
+}
+
+const deleteResult = testDB.runCommand({
+ delete: coll.getName(),
+ deletes: [{q: {$where: "sleep(1000);return true", a: 1}, limit: 0}],
+ maxTimeMS: 1
+});
+
+// This command will time out before completing.
+assert.commandFailedWithCode(deleteResult, ErrorCodes.MaxTimeMSExpired);
+
+profileObj = getLatestProfilerEntry(testDB);
+
+// 'ndeleted' should not be defined.
+assert(!profileObj.hasOwnProperty("ndeleted"), profileObj);
})();
diff --git a/jstests/core/profile_distinct.js b/jstests/core/profile_distinct.js
index 72d010636d6..6a2272e0f8a 100644
--- a/jstests/core/profile_distinct.js
+++ b/jstests/core/profile_distinct.js
@@ -3,58 +3,58 @@
// Confirms that profiled distinct execution contains all expected metrics with proper values.
(function() {
- "use strict";
-
- // For getLatestProfilerEntry and getProfilerProtocolStringForCommand
- load("jstests/libs/profiler.js");
-
- var testDB = db.getSiblingDB("profile_distinct");
- assert.commandWorked(testDB.dropDatabase());
- var conn = testDB.getMongo();
- var coll = testDB.getCollection("test");
-
- testDB.setProfilingLevel(2);
-
- //
- // Confirm metrics for distinct with query.
- //
- var i;
- for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i % 5, b: i}));
- }
- assert.commandWorked(coll.createIndex({b: 1}));
-
- coll.distinct("a", {b: {$gte: 5}}, {collation: {locale: "fr"}});
- var profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "command", tojson(profileObj));
- assert.eq(profileObj.keysExamined, 5, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 5, tojson(profileObj));
- assert.eq(profileObj.planSummary, "IXSCAN { b: 1 }", tojson(profileObj));
- assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
- assert.eq(profileObj.protocol, getProfilerProtocolStringForCommand(conn), tojson(profileObj));
- assert.eq(coll.getName(), profileObj.command.distinct, tojson(profileObj));
- assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj));
- assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm "fromMultiPlanner" metric.
- //
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
- for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
-
- coll.distinct("a", {a: 3, b: 3});
- profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+"use strict";
+
+// For getLatestProfilerEntry and getProfilerProtocolStringForCommand
+load("jstests/libs/profiler.js");
+
+var testDB = db.getSiblingDB("profile_distinct");
+assert.commandWorked(testDB.dropDatabase());
+var conn = testDB.getMongo();
+var coll = testDB.getCollection("test");
+
+testDB.setProfilingLevel(2);
+
+//
+// Confirm metrics for distinct with query.
+//
+var i;
+for (i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i % 5, b: i}));
+}
+assert.commandWorked(coll.createIndex({b: 1}));
+
+coll.distinct("a", {b: {$gte: 5}}, {collation: {locale: "fr"}});
+var profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "command", tojson(profileObj));
+assert.eq(profileObj.keysExamined, 5, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 5, tojson(profileObj));
+assert.eq(profileObj.planSummary, "IXSCAN { b: 1 }", tojson(profileObj));
+assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
+assert.eq(profileObj.protocol, getProfilerProtocolStringForCommand(conn), tojson(profileObj));
+assert.eq(coll.getName(), profileObj.command.distinct, tojson(profileObj));
+assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj));
+assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm "fromMultiPlanner" metric.
+//
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
+for (i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
+
+coll.distinct("a", {a: 3, b: 3});
+profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
})();
diff --git a/jstests/core/profile_find.js b/jstests/core/profile_find.js
index cfdf1c995ec..23bcf96e2ad 100644
--- a/jstests/core/profile_find.js
+++ b/jstests/core/profile_find.js
@@ -3,182 +3,184 @@
// Confirms that profiled find execution contains all expected metrics with proper values.
(function() {
- "use strict";
-
- // For getLatestProfilerEntry and getProfilerProtocolStringForCommand
- load("jstests/libs/profiler.js");
-
- var testDB = db.getSiblingDB("profile_find");
- assert.commandWorked(testDB.dropDatabase());
- var coll = testDB.getCollection("test");
- var isLegacyReadMode = (testDB.getMongo().readMode() === "legacy");
-
- testDB.setProfilingLevel(2);
- const profileEntryFilter = {op: "query"};
-
- //
- // Confirm most metrics on single document read.
- //
- var i;
- for (i = 0; i < 3; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
- assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr"}}));
-
- if (!isLegacyReadMode) {
- assert.eq(coll.find({a: 1}).collation({locale: "fr"}).limit(1).itcount(), 1);
- } else {
- assert.neq(coll.findOne({a: 1}), null);
- }
-
- var profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
-
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.keysExamined, 1, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 1, tojson(profileObj));
- assert.eq(profileObj.nreturned, 1, tojson(profileObj));
- assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
- assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
- assert.eq(profileObj.command.filter, {a: 1}, tojson(profileObj));
- if (isLegacyReadMode) {
- assert.eq(profileObj.command.ntoreturn, -1, tojson(profileObj));
- } else {
- assert.eq(profileObj.command.limit, 1, tojson(profileObj));
- assert.eq(profileObj.protocol,
- getProfilerProtocolStringForCommand(testDB.getMongo()),
- tojson(profileObj));
- }
-
- if (!isLegacyReadMode) {
- assert.eq(profileObj.command.collation, {locale: "fr"});
- }
- assert.eq(profileObj.cursorExhausted, true, tojson(profileObj));
- assert(!profileObj.hasOwnProperty("cursorid"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
- assert(profileObj.locks.hasOwnProperty("Global"), tojson(profileObj));
- assert(profileObj.locks.hasOwnProperty("Database"), tojson(profileObj));
- assert(profileObj.locks.hasOwnProperty("Collection"), tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm "cursorId" and "hasSortStage" metrics.
- //
- coll.drop();
- for (i = 0; i < 3; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
- assert.commandWorked(coll.createIndex({a: 1}));
-
+"use strict";
+
+// For getLatestProfilerEntry and getProfilerProtocolStringForCommand
+load("jstests/libs/profiler.js");
+
+var testDB = db.getSiblingDB("profile_find");
+assert.commandWorked(testDB.dropDatabase());
+var coll = testDB.getCollection("test");
+var isLegacyReadMode = (testDB.getMongo().readMode() === "legacy");
+
+testDB.setProfilingLevel(2);
+const profileEntryFilter = {
+ op: "query"
+};
+
+//
+// Confirm most metrics on single document read.
+//
+var i;
+for (i = 0; i < 3; ++i) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
+assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr"}}));
+
+if (!isLegacyReadMode) {
+ assert.eq(coll.find({a: 1}).collation({locale: "fr"}).limit(1).itcount(), 1);
+} else {
assert.neq(coll.findOne({a: 1}), null);
-
- assert.neq(coll.find({a: {$gte: 0}}).sort({b: 1}).batchSize(1).next(), null);
- profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
-
- assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
- assert(profileObj.hasOwnProperty("cursorid"), tojson(profileObj));
- assert(!profileObj.hasOwnProperty("cursorExhausted"), tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm "fromMultiPlanner" metric.
- //
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
- for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
-
- assert.neq(coll.findOne({a: 3, b: 3}), null);
- profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
-
- assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm "replanned" metric.
- // We should ideally be using a fail-point to trigger "replanned" rather than relying on
- // current query planner behavior knowledge to setup a scenario. SERVER-23620 has been entered
- // to add this fail-point and to update appropriate tests.
- //
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
- for (i = 0; i < 20; ++i) {
- assert.writeOK(coll.insert({a: 5, b: i}));
- assert.writeOK(coll.insert({a: i, b: 10}));
- }
-
- // Until we get the failpoint described in the above comment (regarding SERVER-23620), we must
- // run the query twice. The first time will create an inactive cache entry. The second run will
- // take the same number of works, and create an active cache entry.
- assert.neq(coll.findOne({a: 5, b: 15}), null);
- assert.neq(coll.findOne({a: 5, b: 15}), null);
-
- // Run a query with the same shape, but with different parameters. The plan cached for the
- // query above will perform poorly (since the selectivities are different) and we will be
- // forced to replan.
- assert.neq(coll.findOne({a: 15, b: 10}), null);
- profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
-
- assert.eq(profileObj.replanned, true, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm that query modifiers such as "hint" are in the profiler document.
- //
- coll.drop();
- assert.writeOK(coll.insert({_id: 2}));
-
- assert.eq(coll.find().hint({_id: 1}).itcount(), 1);
- profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
- assert.eq(profileObj.command.hint, {_id: 1}, tojson(profileObj));
-
- assert.eq(coll.find().comment("a comment").itcount(), 1);
- profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
- assert.eq(profileObj.command.comment, "a comment", tojson(profileObj));
-
- var maxTimeMS = 100000;
- assert.eq(coll.find().maxTimeMS(maxTimeMS).itcount(), 1);
- profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
- assert.eq(profileObj.command.maxTimeMS, maxTimeMS, tojson(profileObj));
-
- assert.eq(coll.find().max({_id: 3}).hint({_id: 1}).itcount(), 1);
- profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
- assert.eq(profileObj.command.max, {_id: 3}, tojson(profileObj));
-
- assert.eq(coll.find().min({_id: 0}).hint({_id: 1}).itcount(), 1);
- profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
- assert.eq(profileObj.command.min, {_id: 0}, tojson(profileObj));
-
- assert.eq(coll.find().returnKey().itcount(), 1);
- profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
- assert.eq(profileObj.command.returnKey, true, tojson(profileObj));
-
- //
- // Confirm that queries are truncated in the profiler as { $truncated: <string>, comment:
- // <string> }
- //
- let queryPredicate = {};
-
- for (let i = 0; i < 501; i++) {
- queryPredicate[i] = "a".repeat(150);
- }
-
- assert.eq(coll.find(queryPredicate).comment("profile_find").itcount(), 0);
- profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
- assert.eq((typeof profileObj.command.$truncated), "string", tojson(profileObj));
- assert.eq(profileObj.command.comment, "profile_find", tojson(profileObj));
-
- //
- // Confirm that a query whose filter contains a field named 'query' appears as expected in the
- // profiler. This test ensures that upconverting a legacy query correctly identifies this as a
- // user field rather than a wrapped filter spec.
- //
- coll.find({query: "foo"}).itcount();
- profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
- assert.eq(profileObj.command.filter, {query: "foo"}, tojson(profileObj));
+}
+
+var profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
+
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+assert.eq(profileObj.keysExamined, 1, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 1, tojson(profileObj));
+assert.eq(profileObj.nreturned, 1, tojson(profileObj));
+assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
+assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
+assert.eq(profileObj.command.filter, {a: 1}, tojson(profileObj));
+if (isLegacyReadMode) {
+ assert.eq(profileObj.command.ntoreturn, -1, tojson(profileObj));
+} else {
+ assert.eq(profileObj.command.limit, 1, tojson(profileObj));
+ assert.eq(profileObj.protocol,
+ getProfilerProtocolStringForCommand(testDB.getMongo()),
+ tojson(profileObj));
+}
+
+if (!isLegacyReadMode) {
+ assert.eq(profileObj.command.collation, {locale: "fr"});
+}
+assert.eq(profileObj.cursorExhausted, true, tojson(profileObj));
+assert(!profileObj.hasOwnProperty("cursorid"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
+assert(profileObj.locks.hasOwnProperty("Global"), tojson(profileObj));
+assert(profileObj.locks.hasOwnProperty("Database"), tojson(profileObj));
+assert(profileObj.locks.hasOwnProperty("Collection"), tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm "cursorId" and "hasSortStage" metrics.
+//
+coll.drop();
+for (i = 0; i < 3; ++i) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
+assert.commandWorked(coll.createIndex({a: 1}));
+
+assert.neq(coll.findOne({a: 1}), null);
+
+assert.neq(coll.find({a: {$gte: 0}}).sort({b: 1}).batchSize(1).next(), null);
+profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
+
+assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
+assert(profileObj.hasOwnProperty("cursorid"), tojson(profileObj));
+assert(!profileObj.hasOwnProperty("cursorExhausted"), tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm "fromMultiPlanner" metric.
+//
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
+for (i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
+
+assert.neq(coll.findOne({a: 3, b: 3}), null);
+profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
+
+assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm "replanned" metric.
+// We should ideally be using a fail-point to trigger "replanned" rather than relying on
+// current query planner behavior knowledge to setup a scenario. SERVER-23620 has been entered
+// to add this fail-point and to update appropriate tests.
+//
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
+for (i = 0; i < 20; ++i) {
+ assert.writeOK(coll.insert({a: 5, b: i}));
+ assert.writeOK(coll.insert({a: i, b: 10}));
+}
+
+// Until we get the failpoint described in the above comment (regarding SERVER-23620), we must
+// run the query twice. The first time will create an inactive cache entry. The second run will
+// take the same number of works, and create an active cache entry.
+assert.neq(coll.findOne({a: 5, b: 15}), null);
+assert.neq(coll.findOne({a: 5, b: 15}), null);
+
+// Run a query with the same shape, but with different parameters. The plan cached for the
+// query above will perform poorly (since the selectivities are different) and we will be
+// forced to replan.
+assert.neq(coll.findOne({a: 15, b: 10}), null);
+profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
+
+assert.eq(profileObj.replanned, true, tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm that query modifiers such as "hint" are in the profiler document.
+//
+coll.drop();
+assert.writeOK(coll.insert({_id: 2}));
+
+assert.eq(coll.find().hint({_id: 1}).itcount(), 1);
+profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
+assert.eq(profileObj.command.hint, {_id: 1}, tojson(profileObj));
+
+assert.eq(coll.find().comment("a comment").itcount(), 1);
+profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
+assert.eq(profileObj.command.comment, "a comment", tojson(profileObj));
+
+var maxTimeMS = 100000;
+assert.eq(coll.find().maxTimeMS(maxTimeMS).itcount(), 1);
+profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
+assert.eq(profileObj.command.maxTimeMS, maxTimeMS, tojson(profileObj));
+
+assert.eq(coll.find().max({_id: 3}).hint({_id: 1}).itcount(), 1);
+profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
+assert.eq(profileObj.command.max, {_id: 3}, tojson(profileObj));
+
+assert.eq(coll.find().min({_id: 0}).hint({_id: 1}).itcount(), 1);
+profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
+assert.eq(profileObj.command.min, {_id: 0}, tojson(profileObj));
+
+assert.eq(coll.find().returnKey().itcount(), 1);
+profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
+assert.eq(profileObj.command.returnKey, true, tojson(profileObj));
+
+//
+// Confirm that queries are truncated in the profiler as { $truncated: <string>, comment:
+// <string> }
+//
+let queryPredicate = {};
+
+for (let i = 0; i < 501; i++) {
+ queryPredicate[i] = "a".repeat(150);
+}
+
+assert.eq(coll.find(queryPredicate).comment("profile_find").itcount(), 0);
+profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
+assert.eq((typeof profileObj.command.$truncated), "string", tojson(profileObj));
+assert.eq(profileObj.command.comment, "profile_find", tojson(profileObj));
+
+//
+// Confirm that a query whose filter contains a field named 'query' appears as expected in the
+// profiler. This test ensures that upconverting a legacy query correctly identifies this as a
+// user field rather than a wrapped filter spec.
+//
+coll.find({query: "foo"}).itcount();
+profileObj = getLatestProfilerEntry(testDB, profileEntryFilter);
+assert.eq(profileObj.command.filter, {query: "foo"}, tojson(profileObj));
})();
diff --git a/jstests/core/profile_findandmodify.js b/jstests/core/profile_findandmodify.js
index 56e673ae639..3c646d6468c 100644
--- a/jstests/core/profile_findandmodify.js
+++ b/jstests/core/profile_findandmodify.js
@@ -2,184 +2,183 @@
// @tags: [requires_profiling]
(function() {
- "use strict";
-
- load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
-
- var testDB = db.getSiblingDB("profile_findandmodify");
- assert.commandWorked(testDB.dropDatabase());
- var coll = testDB.getCollection("test");
-
- testDB.setProfilingLevel(2);
-
- //
- // Update as findAndModify.
- //
- coll.drop();
- for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i, a: i, b: [0]}));
- }
- assert.commandWorked(coll.createIndex({b: 1}));
-
- assert.eq({_id: 2, a: 2, b: [0]}, coll.findAndModify({
- query: {a: 2},
- update: {$inc: {"b.$[i]": 1}},
- collation: {locale: "fr"},
- arrayFilters: [{i: 0}]
- }));
-
- var profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.op, "command", tojson(profileObj));
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj));
- assert.eq(profileObj.command.update, {$inc: {"b.$[i]": 1}}, tojson(profileObj));
- assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj));
- assert.eq(profileObj.command.arrayFilters, [{i: 0}], tojson(profileObj));
- assert.eq(profileObj.keysExamined, 0, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 3, tojson(profileObj));
- assert.eq(profileObj.nMatched, 1, tojson(profileObj));
- assert.eq(profileObj.nModified, 1, tojson(profileObj));
- assert.eq(profileObj.keysInserted, 1, tojson(profileObj));
- assert.eq(profileObj.keysDeleted, 1, tojson(profileObj));
- assert.eq(profileObj.planSummary, "COLLSCAN", tojson(profileObj));
- assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Delete as findAndModify.
- //
- coll.drop();
- for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i, a: i}));
- }
-
- assert.eq({_id: 2, a: 2}, coll.findAndModify({query: {a: 2}, remove: true}));
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.op, "command", tojson(profileObj));
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj));
- assert.eq(profileObj.command.remove, true, tojson(profileObj));
- assert.eq(profileObj.keysExamined, 0, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 3, tojson(profileObj));
- assert.eq(profileObj.ndeleted, 1, tojson(profileObj));
- assert.eq(profileObj.keysDeleted, 1, tojson(profileObj));
- assert.eq(profileObj.planSummary, "COLLSCAN", tojson(profileObj));
- assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Update with {upsert: true} as findAndModify.
- //
- coll.drop();
- for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i, a: i}));
- }
-
- assert.eq(
- {_id: 4, a: 1},
- coll.findAndModify({query: {_id: 4}, update: {$inc: {a: 1}}, upsert: true, new: true}));
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.op, "command", tojson(profileObj));
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.command.query, {_id: 4}, tojson(profileObj));
- assert.eq(profileObj.command.update, {$inc: {a: 1}}, tojson(profileObj));
- assert.eq(profileObj.command.upsert, true, tojson(profileObj));
- assert.eq(profileObj.command.new, true, tojson(profileObj));
- assert.eq(profileObj.keysExamined, 0, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 0, tojson(profileObj));
- assert.eq(profileObj.nMatched, 0, tojson(profileObj));
- assert.eq(profileObj.nModified, 0, tojson(profileObj));
- assert.eq(profileObj.upsert, true, tojson(profileObj));
- assert.eq(profileObj.keysInserted, 1, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Idhack update as findAndModify.
- //
- coll.drop();
- for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i, a: i}));
- }
-
- assert.eq({_id: 2, a: 2}, coll.findAndModify({query: {_id: 2}, update: {$inc: {b: 1}}}));
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.keysExamined, 1, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 1, tojson(profileObj));
- assert.eq(profileObj.nMatched, 1, tojson(profileObj));
- assert.eq(profileObj.nModified, 1, tojson(profileObj));
- assert.eq(profileObj.planSummary, "IDHACK", tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Update as findAndModify with projection.
- //
- coll.drop();
- for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i, a: i}));
- }
-
- assert.eq({a: 2},
- coll.findAndModify({query: {a: 2}, update: {$inc: {b: 1}}, fields: {_id: 0, a: 1}}));
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.op, "command", tojson(profileObj));
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj));
- assert.eq(profileObj.command.update, {$inc: {b: 1}}, tojson(profileObj));
- assert.eq(profileObj.command.fields, {_id: 0, a: 1}, tojson(profileObj));
- assert.eq(profileObj.keysExamined, 0, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 3, tojson(profileObj));
- assert.eq(profileObj.nMatched, 1, tojson(profileObj));
- assert.eq(profileObj.nModified, 1, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Delete as findAndModify with projection.
- //
- coll.drop();
- for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i, a: i}));
- }
-
- assert.eq({a: 2}, coll.findAndModify({query: {a: 2}, remove: true, fields: {_id: 0, a: 1}}));
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.op, "command", tojson(profileObj));
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj));
- assert.eq(profileObj.command.remove, true, tojson(profileObj));
- assert.eq(profileObj.command.fields, {_id: 0, a: 1}, tojson(profileObj));
- assert.eq(profileObj.ndeleted, 1, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm "hasSortStage" on findAndModify with sort.
- //
- coll.drop();
- for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({_id: i, a: i}));
- }
-
- assert.eq({_id: 0, a: 0},
- coll.findAndModify({query: {a: {$gte: 0}}, sort: {a: 1}, update: {$inc: {b: 1}}}));
-
- profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
-
- //
- // Confirm "fromMultiPlanner" metric.
- //
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
- for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
-
- coll.findAndModify({query: {a: 3, b: 3}, update: {$set: {c: 1}}});
- profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
+"use strict";
+
+load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
+
+var testDB = db.getSiblingDB("profile_findandmodify");
+assert.commandWorked(testDB.dropDatabase());
+var coll = testDB.getCollection("test");
+
+testDB.setProfilingLevel(2);
+
+//
+// Update as findAndModify.
+//
+coll.drop();
+for (var i = 0; i < 3; i++) {
+ assert.writeOK(coll.insert({_id: i, a: i, b: [0]}));
+}
+assert.commandWorked(coll.createIndex({b: 1}));
+
+assert.eq({_id: 2, a: 2, b: [0]}, coll.findAndModify({
+ query: {a: 2},
+ update: {$inc: {"b.$[i]": 1}},
+ collation: {locale: "fr"},
+ arrayFilters: [{i: 0}]
+}));
+
+var profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.op, "command", tojson(profileObj));
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj));
+assert.eq(profileObj.command.update, {$inc: {"b.$[i]": 1}}, tojson(profileObj));
+assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj));
+assert.eq(profileObj.command.arrayFilters, [{i: 0}], tojson(profileObj));
+assert.eq(profileObj.keysExamined, 0, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 3, tojson(profileObj));
+assert.eq(profileObj.nMatched, 1, tojson(profileObj));
+assert.eq(profileObj.nModified, 1, tojson(profileObj));
+assert.eq(profileObj.keysInserted, 1, tojson(profileObj));
+assert.eq(profileObj.keysDeleted, 1, tojson(profileObj));
+assert.eq(profileObj.planSummary, "COLLSCAN", tojson(profileObj));
+assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Delete as findAndModify.
+//
+coll.drop();
+for (var i = 0; i < 3; i++) {
+ assert.writeOK(coll.insert({_id: i, a: i}));
+}
+
+assert.eq({_id: 2, a: 2}, coll.findAndModify({query: {a: 2}, remove: true}));
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.op, "command", tojson(profileObj));
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj));
+assert.eq(profileObj.command.remove, true, tojson(profileObj));
+assert.eq(profileObj.keysExamined, 0, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 3, tojson(profileObj));
+assert.eq(profileObj.ndeleted, 1, tojson(profileObj));
+assert.eq(profileObj.keysDeleted, 1, tojson(profileObj));
+assert.eq(profileObj.planSummary, "COLLSCAN", tojson(profileObj));
+assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Update with {upsert: true} as findAndModify.
+//
+coll.drop();
+for (var i = 0; i < 3; i++) {
+ assert.writeOK(coll.insert({_id: i, a: i}));
+}
+
+assert.eq({_id: 4, a: 1},
+ coll.findAndModify({query: {_id: 4}, update: {$inc: {a: 1}}, upsert: true, new: true}));
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.op, "command", tojson(profileObj));
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+assert.eq(profileObj.command.query, {_id: 4}, tojson(profileObj));
+assert.eq(profileObj.command.update, {$inc: {a: 1}}, tojson(profileObj));
+assert.eq(profileObj.command.upsert, true, tojson(profileObj));
+assert.eq(profileObj.command.new, true, tojson(profileObj));
+assert.eq(profileObj.keysExamined, 0, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 0, tojson(profileObj));
+assert.eq(profileObj.nMatched, 0, tojson(profileObj));
+assert.eq(profileObj.nModified, 0, tojson(profileObj));
+assert.eq(profileObj.upsert, true, tojson(profileObj));
+assert.eq(profileObj.keysInserted, 1, tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Idhack update as findAndModify.
+//
+coll.drop();
+for (var i = 0; i < 3; i++) {
+ assert.writeOK(coll.insert({_id: i, a: i}));
+}
+
+assert.eq({_id: 2, a: 2}, coll.findAndModify({query: {_id: 2}, update: {$inc: {b: 1}}}));
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.keysExamined, 1, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 1, tojson(profileObj));
+assert.eq(profileObj.nMatched, 1, tojson(profileObj));
+assert.eq(profileObj.nModified, 1, tojson(profileObj));
+assert.eq(profileObj.planSummary, "IDHACK", tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Update as findAndModify with projection.
+//
+coll.drop();
+for (var i = 0; i < 3; i++) {
+ assert.writeOK(coll.insert({_id: i, a: i}));
+}
+
+assert.eq({a: 2},
+ coll.findAndModify({query: {a: 2}, update: {$inc: {b: 1}}, fields: {_id: 0, a: 1}}));
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.op, "command", tojson(profileObj));
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj));
+assert.eq(profileObj.command.update, {$inc: {b: 1}}, tojson(profileObj));
+assert.eq(profileObj.command.fields, {_id: 0, a: 1}, tojson(profileObj));
+assert.eq(profileObj.keysExamined, 0, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 3, tojson(profileObj));
+assert.eq(profileObj.nMatched, 1, tojson(profileObj));
+assert.eq(profileObj.nModified, 1, tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Delete as findAndModify with projection.
+//
+coll.drop();
+for (var i = 0; i < 3; i++) {
+ assert.writeOK(coll.insert({_id: i, a: i}));
+}
+
+assert.eq({a: 2}, coll.findAndModify({query: {a: 2}, remove: true, fields: {_id: 0, a: 1}}));
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.op, "command", tojson(profileObj));
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj));
+assert.eq(profileObj.command.remove, true, tojson(profileObj));
+assert.eq(profileObj.command.fields, {_id: 0, a: 1}, tojson(profileObj));
+assert.eq(profileObj.ndeleted, 1, tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm "hasSortStage" on findAndModify with sort.
+//
+coll.drop();
+for (var i = 0; i < 3; i++) {
+ assert.writeOK(coll.insert({_id: i, a: i}));
+}
+
+assert.eq({_id: 0, a: 0},
+ coll.findAndModify({query: {a: {$gte: 0}}, sort: {a: 1}, update: {$inc: {b: 1}}}));
+
+profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
+
+//
+// Confirm "fromMultiPlanner" metric.
+//
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
+for (i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
+
+coll.findAndModify({query: {a: 3, b: 3}, update: {$set: {c: 1}}});
+profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
})();
diff --git a/jstests/core/profile_getmore.js b/jstests/core/profile_getmore.js
index 74c62f0176b..5cdc3a51ffe 100644
--- a/jstests/core/profile_getmore.js
+++ b/jstests/core/profile_getmore.js
@@ -3,148 +3,146 @@
// Confirms that profiled getMore execution contains all expected metrics with proper values.
(function() {
- "use strict";
-
- load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
-
- var testDB = db.getSiblingDB("profile_getmore");
- assert.commandWorked(testDB.dropDatabase());
- var coll = testDB.getCollection("test");
-
- testDB.setProfilingLevel(2);
-
- //
- // Confirm basic metrics on getMore with a not-exhausted cursor.
- //
- var i;
- for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
- assert.commandWorked(coll.createIndex({a: 1}));
-
- var cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).batchSize(2);
- cursor.next(); // Perform initial query and consume first of 2 docs returned.
-
- var cursorId =
- getLatestProfilerEntry(testDB, {op: "query"}).cursorid; // Save cursorid from find.
-
- cursor.next(); // Consume second of 2 docs from initial query.
- cursor.next(); // getMore performed, leaving open cursor.
-
- var profileObj = getLatestProfilerEntry(testDB, {op: "getmore"});
-
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "getmore", tojson(profileObj));
- assert.eq(profileObj.keysExamined, 2, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 2, tojson(profileObj));
- assert.eq(profileObj.cursorid, cursorId, tojson(profileObj));
- assert.eq(profileObj.nreturned, 2, tojson(profileObj));
- assert.eq(profileObj.command.getMore, cursorId, tojson(profileObj));
- assert.eq(profileObj.command.collection, coll.getName(), tojson(profileObj));
- assert.eq(profileObj.command.batchSize, 2, tojson(profileObj));
- assert.eq(profileObj.originatingCommand.filter, {a: {$gt: 0}});
- assert.eq(profileObj.originatingCommand.sort, {a: 1});
- assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
- assert(profileObj.hasOwnProperty("execStats"), tojson(profileObj));
- assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
- assert(profileObj.locks.hasOwnProperty("Global"), tojson(profileObj));
- assert(profileObj.locks.hasOwnProperty("Database"), tojson(profileObj));
- assert(profileObj.locks.hasOwnProperty("Collection"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
- assert(!profileObj.hasOwnProperty("cursorExhausted"), tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm hasSortStage on getMore with a not-exhausted cursor and in-memory sort.
- //
- coll.drop();
- for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
-
- cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).batchSize(2);
- cursor.next(); // Perform initial query and consume first of 2 docs returned.
- cursor.next(); // Consume second of 2 docs from initial query.
- cursor.next(); // getMore performed, leaving open cursor.
-
- profileObj = getLatestProfilerEntry(testDB, {op: "getmore"});
-
- assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
-
- //
- // Confirm "cursorExhausted" metric.
- //
- coll.drop();
- for (i = 0; i < 3; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
-
- cursor = coll.find().batchSize(2);
- cursor.next(); // Perform initial query and consume first of 3 docs returned.
- cursor.itcount(); // Exhaust the cursor.
-
- profileObj = getLatestProfilerEntry(testDB, {op: "getmore"});
-
- assert(profileObj.hasOwnProperty("cursorid"),
- tojson(profileObj)); // cursorid should always be present on getMore.
- assert.neq(0, profileObj.cursorid, tojson(profileObj));
- assert.eq(profileObj.cursorExhausted, true, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm getMore on aggregation.
- //
- coll.drop();
- for (i = 0; i < 20; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
- assert.commandWorked(coll.createIndex({a: 1}));
-
- var cursor = coll.aggregate([{$match: {a: {$gte: 0}}}], {cursor: {batchSize: 0}, hint: {a: 1}});
- var cursorId = getLatestProfilerEntry(testDB, {"command.aggregate": coll.getName()}).cursorid;
- assert.neq(0, cursorId);
-
- cursor.next(); // Consume the result set.
-
- profileObj = getLatestProfilerEntry(testDB, {op: "getmore"});
-
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "getmore", tojson(profileObj));
- assert.eq(profileObj.command.getMore, cursorId, tojson(profileObj));
- assert.eq(profileObj.command.collection, coll.getName(), tojson(profileObj));
- assert.eq(
- profileObj.originatingCommand.pipeline[0], {$match: {a: {$gte: 0}}}, tojson(profileObj));
- assert.eq(profileObj.cursorid, cursorId, tojson(profileObj));
- assert.eq(profileObj.nreturned, 20, tojson(profileObj));
- assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
- assert.eq(profileObj.cursorExhausted, true, tojson(profileObj));
- assert.eq(profileObj.keysExamined, 20, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 20, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
- assert.eq(profileObj.originatingCommand.hint, {a: 1}, tojson(profileObj));
-
- //
- // Confirm that originatingCommand is truncated in the profiler as { $truncated: <string>,
- // comment: <string> }
- //
- let docToInsert = {};
-
- for (i = 0; i < 501; i++) {
- docToInsert[i] = "a".repeat(150);
- }
-
- coll.drop();
- for (i = 0; i < 4; i++) {
- assert.writeOK(coll.insert(docToInsert));
- }
-
- cursor = coll.find(docToInsert).comment("profile_getmore").batchSize(2);
- assert.eq(cursor.itcount(), 4); // Consume result set and trigger getMore.
-
- profileObj = getLatestProfilerEntry(testDB, {op: "getmore"});
- assert.eq((typeof profileObj.originatingCommand.$truncated), "string", tojson(profileObj));
- assert.eq(profileObj.originatingCommand.comment, "profile_getmore", tojson(profileObj));
+"use strict";
+
+load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
+
+var testDB = db.getSiblingDB("profile_getmore");
+assert.commandWorked(testDB.dropDatabase());
+var coll = testDB.getCollection("test");
+
+testDB.setProfilingLevel(2);
+
+//
+// Confirm basic metrics on getMore with a not-exhausted cursor.
+//
+var i;
+for (i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+assert.commandWorked(coll.createIndex({a: 1}));
+
+var cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).batchSize(2);
+cursor.next(); // Perform initial query and consume first of 2 docs returned.
+
+var cursorId = getLatestProfilerEntry(testDB, {op: "query"}).cursorid; // Save cursorid from find.
+
+cursor.next(); // Consume second of 2 docs from initial query.
+cursor.next(); // getMore performed, leaving open cursor.
+
+var profileObj = getLatestProfilerEntry(testDB, {op: "getmore"});
+
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "getmore", tojson(profileObj));
+assert.eq(profileObj.keysExamined, 2, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 2, tojson(profileObj));
+assert.eq(profileObj.cursorid, cursorId, tojson(profileObj));
+assert.eq(profileObj.nreturned, 2, tojson(profileObj));
+assert.eq(profileObj.command.getMore, cursorId, tojson(profileObj));
+assert.eq(profileObj.command.collection, coll.getName(), tojson(profileObj));
+assert.eq(profileObj.command.batchSize, 2, tojson(profileObj));
+assert.eq(profileObj.originatingCommand.filter, {a: {$gt: 0}});
+assert.eq(profileObj.originatingCommand.sort, {a: 1});
+assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
+assert(profileObj.hasOwnProperty("execStats"), tojson(profileObj));
+assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
+assert(profileObj.locks.hasOwnProperty("Global"), tojson(profileObj));
+assert(profileObj.locks.hasOwnProperty("Database"), tojson(profileObj));
+assert(profileObj.locks.hasOwnProperty("Collection"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
+assert(!profileObj.hasOwnProperty("cursorExhausted"), tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm hasSortStage on getMore with a not-exhausted cursor and in-memory sort.
+//
+coll.drop();
+for (i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+
+cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).batchSize(2);
+cursor.next(); // Perform initial query and consume first of 2 docs returned.
+cursor.next(); // Consume second of 2 docs from initial query.
+cursor.next(); // getMore performed, leaving open cursor.
+
+profileObj = getLatestProfilerEntry(testDB, {op: "getmore"});
+
+assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
+
+//
+// Confirm "cursorExhausted" metric.
+//
+coll.drop();
+for (i = 0; i < 3; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+
+cursor = coll.find().batchSize(2);
+cursor.next(); // Perform initial query and consume first of 3 docs returned.
+cursor.itcount(); // Exhaust the cursor.
+
+profileObj = getLatestProfilerEntry(testDB, {op: "getmore"});
+
+assert(profileObj.hasOwnProperty("cursorid"),
+ tojson(profileObj)); // cursorid should always be present on getMore.
+assert.neq(0, profileObj.cursorid, tojson(profileObj));
+assert.eq(profileObj.cursorExhausted, true, tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm getMore on aggregation.
+//
+coll.drop();
+for (i = 0; i < 20; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+assert.commandWorked(coll.createIndex({a: 1}));
+
+var cursor = coll.aggregate([{$match: {a: {$gte: 0}}}], {cursor: {batchSize: 0}, hint: {a: 1}});
+var cursorId = getLatestProfilerEntry(testDB, {"command.aggregate": coll.getName()}).cursorid;
+assert.neq(0, cursorId);
+
+cursor.next(); // Consume the result set.
+
+profileObj = getLatestProfilerEntry(testDB, {op: "getmore"});
+
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "getmore", tojson(profileObj));
+assert.eq(profileObj.command.getMore, cursorId, tojson(profileObj));
+assert.eq(profileObj.command.collection, coll.getName(), tojson(profileObj));
+assert.eq(profileObj.originatingCommand.pipeline[0], {$match: {a: {$gte: 0}}}, tojson(profileObj));
+assert.eq(profileObj.cursorid, cursorId, tojson(profileObj));
+assert.eq(profileObj.nreturned, 20, tojson(profileObj));
+assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
+assert.eq(profileObj.cursorExhausted, true, tojson(profileObj));
+assert.eq(profileObj.keysExamined, 20, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 20, tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+assert.eq(profileObj.originatingCommand.hint, {a: 1}, tojson(profileObj));
+
+//
+// Confirm that originatingCommand is truncated in the profiler as { $truncated: <string>,
+// comment: <string> }
+//
+let docToInsert = {};
+
+for (i = 0; i < 501; i++) {
+ docToInsert[i] = "a".repeat(150);
+}
+
+coll.drop();
+for (i = 0; i < 4; i++) {
+ assert.writeOK(coll.insert(docToInsert));
+}
+
+cursor = coll.find(docToInsert).comment("profile_getmore").batchSize(2);
+assert.eq(cursor.itcount(), 4); // Consume result set and trigger getMore.
+
+profileObj = getLatestProfilerEntry(testDB, {op: "getmore"});
+assert.eq((typeof profileObj.originatingCommand.$truncated), "string", tojson(profileObj));
+assert.eq(profileObj.originatingCommand.comment, "profile_getmore", tojson(profileObj));
})();
diff --git a/jstests/core/profile_insert.js b/jstests/core/profile_insert.js
index 5f1bff8e2ea..fa53801521d 100644
--- a/jstests/core/profile_insert.js
+++ b/jstests/core/profile_insert.js
@@ -7,89 +7,91 @@
// ]
(function() {
- "use strict";
+"use strict";
- // For getLatestProfilerEntry and getProfilerProtocolStringForCommand
- load("jstests/libs/profiler.js");
+// For getLatestProfilerEntry and getProfilerProtocolStringForCommand
+load("jstests/libs/profiler.js");
- var testDB = db.getSiblingDB("profile_insert");
- assert.commandWorked(testDB.dropDatabase());
- var coll = testDB.getCollection("test");
- var isWriteCommand = (db.getMongo().writeMode() === "commands");
+var testDB = db.getSiblingDB("profile_insert");
+assert.commandWorked(testDB.dropDatabase());
+var coll = testDB.getCollection("test");
+var isWriteCommand = (db.getMongo().writeMode() === "commands");
- testDB.setProfilingLevel(2);
+testDB.setProfilingLevel(2);
- //
- // Test single insert.
- //
- var doc = {_id: 1};
- var result = coll.insert(doc);
- if (isWriteCommand) {
- assert.writeOK(result);
- }
+//
+// Test single insert.
+//
+var doc = {_id: 1};
+var result = coll.insert(doc);
+if (isWriteCommand) {
+ assert.writeOK(result);
+}
- var profileObj = getLatestProfilerEntry(testDB);
+var profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "insert", tojson(profileObj));
- assert.eq(profileObj.ninserted, 1, tojson(profileObj));
- assert.eq(profileObj.keysInserted, 1, tojson(profileObj));
- if (isWriteCommand) {
- assert.eq(profileObj.command.ordered, true, tojson(profileObj));
- assert.eq(profileObj.protocol,
- getProfilerProtocolStringForCommand(testDB.getMongo()),
- tojson(profileObj));
- assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
- }
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "insert", tojson(profileObj));
+assert.eq(profileObj.ninserted, 1, tojson(profileObj));
+assert.eq(profileObj.keysInserted, 1, tojson(profileObj));
+if (isWriteCommand) {
+ assert.eq(profileObj.command.ordered, true, tojson(profileObj));
+ assert.eq(profileObj.protocol,
+ getProfilerProtocolStringForCommand(testDB.getMongo()),
+ tojson(profileObj));
+ assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
+}
- assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("ts"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("client"), tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("ts"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("client"), tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
- //
- // Test multi-insert.
- //
- coll.drop();
+//
+// Test multi-insert.
+//
+coll.drop();
- var docArray = [{_id: 1}, {_id: 2}];
- var bulk = coll.initializeUnorderedBulkOp();
- bulk.insert(docArray[0]);
- bulk.insert(docArray[1]);
- result = bulk.execute();
- if (isWriteCommand) {
- assert.writeOK(result);
- }
+var docArray = [{_id: 1}, {_id: 2}];
+var bulk = coll.initializeUnorderedBulkOp();
+bulk.insert(docArray[0]);
+bulk.insert(docArray[1]);
+result = bulk.execute();
+if (isWriteCommand) {
+ assert.writeOK(result);
+}
- profileObj = getLatestProfilerEntry(testDB);
+profileObj = getLatestProfilerEntry(testDB);
- if (isWriteCommand) {
- assert.eq(profileObj.ninserted, 2, tojson(profileObj));
- assert.eq(profileObj.keysInserted, 2, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
- } else {
- // Documents were inserted one at a time.
- assert.eq(profileObj.ninserted, 1, tojson(profileObj));
- assert.eq(profileObj.keysInserted, 1, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
- }
+if (isWriteCommand) {
+ assert.eq(profileObj.ninserted, 2, tojson(profileObj));
+ assert.eq(profileObj.keysInserted, 2, tojson(profileObj));
+ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+} else {
+ // Documents were inserted one at a time.
+ assert.eq(profileObj.ninserted, 1, tojson(profileObj));
+ assert.eq(profileObj.keysInserted, 1, tojson(profileObj));
+ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+}
- //
- // Test insert options.
- //
- coll.drop();
- doc = {_id: 1};
- var wtimeout = 60000;
- assert.writeOK(coll.insert(doc, {writeConcern: {w: 1, wtimeout: wtimeout}, ordered: false}));
+//
+// Test insert options.
+//
+coll.drop();
+doc = {
+ _id: 1
+};
+var wtimeout = 60000;
+assert.writeOK(coll.insert(doc, {writeConcern: {w: 1, wtimeout: wtimeout}, ordered: false}));
- profileObj = getLatestProfilerEntry(testDB);
+profileObj = getLatestProfilerEntry(testDB);
- if (isWriteCommand) {
- assert.eq(profileObj.command.ordered, false, tojson(profileObj));
- assert.eq(profileObj.command.writeConcern.w, 1, tojson(profileObj));
- assert.eq(profileObj.command.writeConcern.wtimeout, wtimeout, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
- }
+if (isWriteCommand) {
+ assert.eq(profileObj.command.ordered, false, tojson(profileObj));
+ assert.eq(profileObj.command.writeConcern.w, 1, tojson(profileObj));
+ assert.eq(profileObj.command.writeConcern.wtimeout, wtimeout, tojson(profileObj));
+ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+}
})();
diff --git a/jstests/core/profile_list_collections.js b/jstests/core/profile_list_collections.js
index 3db9e7971c9..cf6132e71c7 100644
--- a/jstests/core/profile_list_collections.js
+++ b/jstests/core/profile_list_collections.js
@@ -3,37 +3,39 @@
// Confirms that a listCollections command is not profiled.
(function() {
- "use strict";
+"use strict";
- // For getLatestProfilerEntry and getProfilerProtocolStringForCommand.
- load("jstests/libs/profiler.js");
+// For getLatestProfilerEntry and getProfilerProtocolStringForCommand.
+load("jstests/libs/profiler.js");
- var testDB = db.getSiblingDB("profile_list_collections");
- assert.commandWorked(testDB.dropDatabase());
- const numCollections = 5;
- for (let i = 0; i < numCollections; ++i) {
- assert.commandWorked(testDB.runCommand({create: "test_" + i}));
- }
+var testDB = db.getSiblingDB("profile_list_collections");
+assert.commandWorked(testDB.dropDatabase());
+const numCollections = 5;
+for (let i = 0; i < numCollections; ++i) {
+ assert.commandWorked(testDB.runCommand({create: "test_" + i}));
+}
- testDB.setProfilingLevel(2);
+testDB.setProfilingLevel(2);
- const profileEntryFilter = {op: "command", command: "listCollections"};
+const profileEntryFilter = {
+ op: "command",
+ command: "listCollections"
+};
- let cmdRes =
- assert.commandWorked(testDB.runCommand({listCollections: 1, cursor: {batchSize: 1}}));
+let cmdRes = assert.commandWorked(testDB.runCommand({listCollections: 1, cursor: {batchSize: 1}}));
- // We don't profile listCollections commands.
- assert.eq(testDB.system.profile.find(profileEntryFilter).itcount(),
- 0,
- "Did not expect any profile entry for a listCollections command");
+// We don't profile listCollections commands.
+assert.eq(testDB.system.profile.find(profileEntryFilter).itcount(),
+ 0,
+ "Did not expect any profile entry for a listCollections command");
- const getMoreCollName = cmdRes.cursor.ns.substr(cmdRes.cursor.ns.indexOf(".") + 1);
- cmdRes = assert.commandWorked(
- testDB.runCommand({getMore: cmdRes.cursor.id, collection: getMoreCollName}));
+const getMoreCollName = cmdRes.cursor.ns.substr(cmdRes.cursor.ns.indexOf(".") + 1);
+cmdRes = assert.commandWorked(
+ testDB.runCommand({getMore: cmdRes.cursor.id, collection: getMoreCollName}));
- // A listCollections cursor doesn't really have a namespace to use to record profile entries, so
- // does not get recorded in the profile.
- assert.throws(() => getLatestProfilerEntry(testDB, {op: "getmore"}),
- [],
- "Did not expect to find entry for getMore on a listCollections cursor");
+// A listCollections cursor doesn't really have a namespace to use to record profile entries, so
+// does not get recorded in the profile.
+assert.throws(() => getLatestProfilerEntry(testDB, {op: "getmore"}),
+ [],
+ "Did not expect to find entry for getMore on a listCollections cursor");
})();
diff --git a/jstests/core/profile_list_indexes.js b/jstests/core/profile_list_indexes.js
index 2876a58ae90..b94ee6b422b 100644
--- a/jstests/core/profile_list_indexes.js
+++ b/jstests/core/profile_list_indexes.js
@@ -3,41 +3,46 @@
// Confirms that a listIndexes command and subsequent getMores of its cursor are profiled correctly.
(function() {
- "use strict";
-
- // For getLatestProfilerEntry and getProfilerProtocolStringForCommand.
- load("jstests/libs/profiler.js");
-
- var testDB = db.getSiblingDB("profile_list_indexes");
- var testColl = testDB.testColl;
- assert.commandWorked(testDB.dropDatabase());
- const numIndexes = 5;
- for (let i = 0; i < numIndexes; ++i) {
- let indexSpec = {};
- indexSpec["fakeField_" + i] = 1;
- assert.commandWorked(testColl.ensureIndex(indexSpec));
- }
-
- testDB.setProfilingLevel(2);
-
- const listIndexesCommand = {listIndexes: testColl.getName(), cursor: {batchSize: 1}};
- const profileEntryFilter = {op: "command"};
- for (var field in listIndexesCommand) {
- profileEntryFilter['command.' + field] = listIndexesCommand[field];
- }
-
- let cmdRes = assert.commandWorked(testDB.runCommand(listIndexesCommand));
-
- assert.eq(testDB.system.profile.find(profileEntryFilter).itcount(),
- 1,
- "Expected to find profile entry for a listIndexes command");
-
- const getMoreCollName = cmdRes.cursor.ns.substr(cmdRes.cursor.ns.indexOf(".") + 1);
- cmdRes = assert.commandWorked(
- testDB.runCommand({getMore: cmdRes.cursor.id, collection: getMoreCollName}));
-
- const getMoreProfileEntry = getLatestProfilerEntry(testDB, {op: "getmore"});
- for (var field in listIndexesCommand) {
- assert.eq(getMoreProfileEntry.originatingCommand[field], listIndexesCommand[field], field);
- }
+"use strict";
+
+// For getLatestProfilerEntry and getProfilerProtocolStringForCommand.
+load("jstests/libs/profiler.js");
+
+var testDB = db.getSiblingDB("profile_list_indexes");
+var testColl = testDB.testColl;
+assert.commandWorked(testDB.dropDatabase());
+const numIndexes = 5;
+for (let i = 0; i < numIndexes; ++i) {
+ let indexSpec = {};
+ indexSpec["fakeField_" + i] = 1;
+ assert.commandWorked(testColl.ensureIndex(indexSpec));
+}
+
+testDB.setProfilingLevel(2);
+
+const listIndexesCommand = {
+ listIndexes: testColl.getName(),
+ cursor: {batchSize: 1}
+};
+const profileEntryFilter = {
+ op: "command"
+};
+for (var field in listIndexesCommand) {
+ profileEntryFilter['command.' + field] = listIndexesCommand[field];
+}
+
+let cmdRes = assert.commandWorked(testDB.runCommand(listIndexesCommand));
+
+assert.eq(testDB.system.profile.find(profileEntryFilter).itcount(),
+ 1,
+ "Expected to find profile entry for a listIndexes command");
+
+const getMoreCollName = cmdRes.cursor.ns.substr(cmdRes.cursor.ns.indexOf(".") + 1);
+cmdRes = assert.commandWorked(
+ testDB.runCommand({getMore: cmdRes.cursor.id, collection: getMoreCollName}));
+
+const getMoreProfileEntry = getLatestProfilerEntry(testDB, {op: "getmore"});
+for (var field in listIndexesCommand) {
+ assert.eq(getMoreProfileEntry.originatingCommand[field], listIndexesCommand[field], field);
+}
})();
diff --git a/jstests/core/profile_mapreduce.js b/jstests/core/profile_mapreduce.js
index 117689c4d73..0dc3c81a0c2 100644
--- a/jstests/core/profile_mapreduce.js
+++ b/jstests/core/profile_mapreduce.js
@@ -8,97 +8,97 @@
// Confirms that profiled findAndModify execution contains all expected metrics with proper values.
(function() {
- "use strict";
-
- // For getLatestProfilerEntry and getProfilerProtocolStringForCommand
- load("jstests/libs/profiler.js");
-
- var testDB = db.getSiblingDB("profile_mapreduce");
- assert.commandWorked(testDB.dropDatabase());
- var conn = testDB.getMongo();
- var coll = testDB.getCollection("test");
-
- testDB.setProfilingLevel(2);
-
- var mapFunction = function() {
- emit(this.a, this.b);
- };
-
- var reduceFunction = function(a, b) {
- return Array.sum(b);
- };
-
- //
- // Confirm metrics for mapReduce with query.
- //
- coll.drop();
- for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
- assert.commandWorked(coll.createIndex({a: 1}));
-
- coll.mapReduce(mapFunction,
- reduceFunction,
- {query: {a: {$gte: 0}}, out: {inline: 1}, collation: {locale: "fr"}});
-
- var profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "command", tojson(profileObj));
- assert.eq(profileObj.keysExamined, 3, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 3, tojson(profileObj));
- assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
- assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
- assert.eq(profileObj.protocol, getProfilerProtocolStringForCommand(conn), tojson(profileObj));
- assert.eq(coll.getName(), profileObj.command.mapreduce, tojson(profileObj));
- assert.eq({locale: "fr"}, profileObj.command.collation, tojson(profileObj));
- assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm metrics for mapReduce with sort stage.
- //
- coll.drop();
- for (var i = 0; i < 5; i++) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
-
- coll.mapReduce(mapFunction, reduceFunction, {sort: {b: 1}, out: {inline: 1}});
-
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm namespace field is correct when output is a collection.
- //
- coll.drop();
- for (var i = 0; i < 3; i++) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
-
- var outputCollectionName = "output_col";
- coll.mapReduce(mapFunction, reduceFunction, {query: {a: {$gte: 0}}, out: outputCollectionName});
-
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
-
- //
- // Confirm "fromMultiPlanner" metric.
- //
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
- for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
-
- coll.mapReduce(mapFunction, reduceFunction, {query: {a: 3, b: 3}, out: {inline: 1}});
- profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+"use strict";
+
+// For getLatestProfilerEntry and getProfilerProtocolStringForCommand
+load("jstests/libs/profiler.js");
+
+var testDB = db.getSiblingDB("profile_mapreduce");
+assert.commandWorked(testDB.dropDatabase());
+var conn = testDB.getMongo();
+var coll = testDB.getCollection("test");
+
+testDB.setProfilingLevel(2);
+
+var mapFunction = function() {
+ emit(this.a, this.b);
+};
+
+var reduceFunction = function(a, b) {
+ return Array.sum(b);
+};
+
+//
+// Confirm metrics for mapReduce with query.
+//
+coll.drop();
+for (var i = 0; i < 3; i++) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
+assert.commandWorked(coll.createIndex({a: 1}));
+
+coll.mapReduce(mapFunction,
+ reduceFunction,
+ {query: {a: {$gte: 0}}, out: {inline: 1}, collation: {locale: "fr"}});
+
+var profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "command", tojson(profileObj));
+assert.eq(profileObj.keysExamined, 3, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 3, tojson(profileObj));
+assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
+assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
+assert.eq(profileObj.protocol, getProfilerProtocolStringForCommand(conn), tojson(profileObj));
+assert.eq(coll.getName(), profileObj.command.mapreduce, tojson(profileObj));
+assert.eq({locale: "fr"}, profileObj.command.collation, tojson(profileObj));
+assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm metrics for mapReduce with sort stage.
+//
+coll.drop();
+for (var i = 0; i < 5; i++) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
+
+coll.mapReduce(mapFunction, reduceFunction, {sort: {b: 1}, out: {inline: 1}});
+
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm namespace field is correct when output is a collection.
+//
+coll.drop();
+for (var i = 0; i < 3; i++) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
+
+var outputCollectionName = "output_col";
+coll.mapReduce(mapFunction, reduceFunction, {query: {a: {$gte: 0}}, out: outputCollectionName});
+
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+
+//
+// Confirm "fromMultiPlanner" metric.
+//
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
+for (i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
+
+coll.mapReduce(mapFunction, reduceFunction, {query: {a: 3, b: 3}, out: {inline: 1}});
+profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
})();
diff --git a/jstests/core/profile_no_such_db.js b/jstests/core/profile_no_such_db.js
index 905c49ae409..1ac30b7c22e 100644
--- a/jstests/core/profile_no_such_db.js
+++ b/jstests/core/profile_no_such_db.js
@@ -2,39 +2,38 @@
// Test that reading the profiling level doesn't create databases, but setting it does.
(function(db) {
- 'use strict';
+'use strict';
- function dbExists() {
- return Array.contains(db.getMongo().getDBNames(), db.getName());
- }
+function dbExists() {
+ return Array.contains(db.getMongo().getDBNames(), db.getName());
+}
- db = db.getSiblingDB('profile_no_such_db'); // Note: changes db argument not global var.
- assert.commandWorked(db.dropDatabase());
- assert(!dbExists());
-
- // Reading the profiling level shouldn't create the database.
- var defaultProfilingLevel = db.getProfilingLevel();
- assert(!dbExists());
+db = db.getSiblingDB('profile_no_such_db'); // Note: changes db argument not global var.
+assert.commandWorked(db.dropDatabase());
+assert(!dbExists());
- // This test assumes that the default profiling level hasn't been changed.
- assert.eq(defaultProfilingLevel, 0);
+// Reading the profiling level shouldn't create the database.
+var defaultProfilingLevel = db.getProfilingLevel();
+assert(!dbExists());
- [0, 1, 2].forEach(function(level) {
- jsTest.log('Testing profiling level ' + level);
+// This test assumes that the default profiling level hasn't been changed.
+assert.eq(defaultProfilingLevel, 0);
- // Setting the profiling level creates the database.
- // Note: setting the profiling level to 0 puts the database in a weird state where it
- // exists internally, but doesn't show up in listDatabases, and won't exist if you
- // restart the server.
- var res = db.setProfilingLevel(level);
- assert.eq(res.was, defaultProfilingLevel);
- assert(dbExists() || level == 0);
- assert.eq(db.getProfilingLevel(), level);
+[0, 1, 2].forEach(function(level) {
+ jsTest.log('Testing profiling level ' + level);
- // Dropping the db reverts the profiling level to the default.
- assert.commandWorked(db.dropDatabase());
- assert.eq(db.getProfilingLevel(), defaultProfilingLevel);
- assert(!dbExists());
- });
+ // Setting the profiling level creates the database.
+ // Note: setting the profiling level to 0 puts the database in a weird state where it
+ // exists internally, but doesn't show up in listDatabases, and won't exist if you
+ // restart the server.
+ var res = db.setProfilingLevel(level);
+ assert.eq(res.was, defaultProfilingLevel);
+ assert(dbExists() || level == 0);
+ assert.eq(db.getProfilingLevel(), level);
+ // Dropping the db reverts the profiling level to the default.
+ assert.commandWorked(db.dropDatabase());
+ assert.eq(db.getProfilingLevel(), defaultProfilingLevel);
+ assert(!dbExists());
+});
}(db));
diff --git a/jstests/core/profile_query_hash.js b/jstests/core/profile_query_hash.js
index 4c7b3e23ab7..e635c7b6b56 100644
--- a/jstests/core/profile_query_hash.js
+++ b/jstests/core/profile_query_hash.js
@@ -3,109 +3,106 @@
// Confirms that profile entries for find commands contain the appropriate query hash.
(function() {
- "use strict";
-
- // For getLatestProfilerEntry
- load("jstests/libs/profiler.js");
-
- const testDB = db.getSiblingDB("query_hash");
- assert.commandWorked(testDB.dropDatabase());
-
- const coll = testDB.test;
-
- // Utility function to list query shapes in cache. The length of the list of query shapes
- // returned is used to validate the number of query hashes accumulated.
- function getShapes(collection) {
- const res = collection.runCommand('planCacheListQueryShapes');
- return res.shapes;
- }
-
- assert.writeOK(coll.insert({a: 1, b: 1}));
- assert.writeOK(coll.insert({a: 1, b: 2}));
- assert.writeOK(coll.insert({a: 1, b: 2}));
- assert.writeOK(coll.insert({a: 2, b: 2}));
-
- // We need two indices since we do not currently create cache entries for queries with a single
- // candidate plan.
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({a: 1, b: 1}));
-
- assert.commandWorked(testDB.setProfilingLevel(2));
-
- // Executes query0 and gets the corresponding system.profile entry.
- assert.eq(
- 1,
- coll.find({a: 1, b: 1}, {a: 1}).sort({a: -1}).comment("Query0 find command").itcount(),
- 'unexpected document count');
- const profileObj0 =
- getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query0 find command"});
- assert(profileObj0.hasOwnProperty("planCacheKey"), tojson(profileObj0));
- let shapes = getShapes(coll);
- assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
-
- // Executes query1 and gets the corresponding system.profile entry.
- assert.eq(
- 0,
- coll.find({a: 2, b: 1}, {a: 1}).sort({a: -1}).comment("Query1 find command").itcount(),
- 'unexpected document count');
- const profileObj1 =
- getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query1 find command"});
- assert(profileObj1.hasOwnProperty("planCacheKey"), tojson(profileObj1));
-
- // Since the query shapes are the same, we only expect there to be one query shape present in
- // the plan cache commands output.
- shapes = getShapes(coll);
- assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
- assert.eq(
- profileObj0.planCacheKey, profileObj1.planCacheKey, 'unexpected not matching query hashes');
-
- // Test that the planCacheKey is the same in explain output for query0 and query1 as it was
- // in system.profile output.
- const explainQuery0 = assert.commandWorked(coll.find({a: 1, b: 1}, {a: 1})
- .sort({a: -1})
- .comment("Query0 find command")
- .explain("queryPlanner"));
- assert.eq(explainQuery0.queryPlanner.planCacheKey, profileObj0.planCacheKey, explainQuery0);
- const explainQuery1 = assert.commandWorked(coll.find({a: 2, b: 1}, {a: 1})
- .sort({a: -1})
- .comment("Query1 find command")
- .explain("queryPlanner"));
- assert.eq(explainQuery1.queryPlanner.planCacheKey, profileObj0.planCacheKey, explainQuery1);
-
- // Check that the 'planCacheKey' is the same for both query 0 and query 1.
- assert.eq(explainQuery0.queryPlanner.planCacheKey, explainQuery1.queryPlanner.planCacheKey);
-
- // Executes query2 and gets the corresponding system.profile entry.
- assert.eq(0,
- coll.find({a: 12000, b: 1}).comment("Query2 find command").itcount(),
- 'unexpected document count');
- const profileObj2 =
- getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query2 find command"});
- assert(profileObj2.hasOwnProperty("planCacheKey"), tojson(profileObj2));
-
- // Query0 and query1 should both have the same query hash for the given indexes. Whereas, query2
- // should have a unique hash. Asserts that a total of two distinct hashes results in two query
- // shapes.
- shapes = getShapes(coll);
- assert.eq(2, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
- assert.neq(
- profileObj0.planCacheKey, profileObj2.planCacheKey, 'unexpected matching query hashes');
-
- // The planCacheKey in explain should be different for query2 than the hash from query0 and
- // query1.
- const explainQuery2 = assert.commandWorked(
- coll.find({a: 12000, b: 1}).comment("Query2 find command").explain("queryPlanner"));
- assert(explainQuery2.queryPlanner.hasOwnProperty("planCacheKey"));
- assert.neq(explainQuery2.queryPlanner.planCacheKey, profileObj0.planCacheKey, explainQuery2);
- assert.eq(explainQuery2.queryPlanner.planCacheKey, profileObj2.planCacheKey, explainQuery2);
-
- // Now drop an index. This should change the 'planCacheKey' value for queries, but not the
- // 'queryHash'.
- assert.commandWorked(coll.dropIndex({a: 1}));
- const explainQuery2PostCatalogChange = assert.commandWorked(
- coll.find({a: 12000, b: 1}).comment("Query2 find command").explain("queryPlanner"));
- assert.eq(explainQuery2.queryPlanner.queryHash,
- explainQuery2PostCatalogChange.queryPlanner.queryHash);
- assert.neq(explainQuery2.queryPlanner.planCacheKey,
- explainQuery2PostCatalogChange.queryPlanner.planCacheKey);
+"use strict";
+
+// For getLatestProfilerEntry
+load("jstests/libs/profiler.js");
+
+const testDB = db.getSiblingDB("query_hash");
+assert.commandWorked(testDB.dropDatabase());
+
+const coll = testDB.test;
+
+// Utility function to list query shapes in cache. The length of the list of query shapes
+// returned is used to validate the number of query hashes accumulated.
+function getShapes(collection) {
+ const res = collection.runCommand('planCacheListQueryShapes');
+ return res.shapes;
+}
+
+assert.writeOK(coll.insert({a: 1, b: 1}));
+assert.writeOK(coll.insert({a: 1, b: 2}));
+assert.writeOK(coll.insert({a: 1, b: 2}));
+assert.writeOK(coll.insert({a: 2, b: 2}));
+
+// We need two indices since we do not currently create cache entries for queries with a single
+// candidate plan.
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+
+assert.commandWorked(testDB.setProfilingLevel(2));
+
+// Executes query0 and gets the corresponding system.profile entry.
+assert.eq(1,
+ coll.find({a: 1, b: 1}, {a: 1}).sort({a: -1}).comment("Query0 find command").itcount(),
+ 'unexpected document count');
+const profileObj0 =
+ getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query0 find command"});
+assert(profileObj0.hasOwnProperty("planCacheKey"), tojson(profileObj0));
+let shapes = getShapes(coll);
+assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
+
+// Executes query1 and gets the corresponding system.profile entry.
+assert.eq(0,
+ coll.find({a: 2, b: 1}, {a: 1}).sort({a: -1}).comment("Query1 find command").itcount(),
+ 'unexpected document count');
+const profileObj1 =
+ getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query1 find command"});
+assert(profileObj1.hasOwnProperty("planCacheKey"), tojson(profileObj1));
+
+// Since the query shapes are the same, we only expect there to be one query shape present in
+// the plan cache commands output.
+shapes = getShapes(coll);
+assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
+assert.eq(
+ profileObj0.planCacheKey, profileObj1.planCacheKey, 'unexpected not matching query hashes');
+
+// Test that the planCacheKey is the same in explain output for query0 and query1 as it was
+// in system.profile output.
+const explainQuery0 = assert.commandWorked(coll.find({a: 1, b: 1}, {a: 1})
+ .sort({a: -1})
+ .comment("Query0 find command")
+ .explain("queryPlanner"));
+assert.eq(explainQuery0.queryPlanner.planCacheKey, profileObj0.planCacheKey, explainQuery0);
+const explainQuery1 = assert.commandWorked(coll.find({a: 2, b: 1}, {a: 1})
+ .sort({a: -1})
+ .comment("Query1 find command")
+ .explain("queryPlanner"));
+assert.eq(explainQuery1.queryPlanner.planCacheKey, profileObj0.planCacheKey, explainQuery1);
+
+// Check that the 'planCacheKey' is the same for both query 0 and query 1.
+assert.eq(explainQuery0.queryPlanner.planCacheKey, explainQuery1.queryPlanner.planCacheKey);
+
+// Executes query2 and gets the corresponding system.profile entry.
+assert.eq(0,
+ coll.find({a: 12000, b: 1}).comment("Query2 find command").itcount(),
+ 'unexpected document count');
+const profileObj2 =
+ getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query2 find command"});
+assert(profileObj2.hasOwnProperty("planCacheKey"), tojson(profileObj2));
+
+// Query0 and query1 should both have the same query hash for the given indexes. Whereas, query2
+// should have a unique hash. Asserts that a total of two distinct hashes results in two query
+// shapes.
+shapes = getShapes(coll);
+assert.eq(2, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
+assert.neq(profileObj0.planCacheKey, profileObj2.planCacheKey, 'unexpected matching query hashes');
+
+// The planCacheKey in explain should be different for query2 than the hash from query0 and
+// query1.
+const explainQuery2 = assert.commandWorked(
+ coll.find({a: 12000, b: 1}).comment("Query2 find command").explain("queryPlanner"));
+assert(explainQuery2.queryPlanner.hasOwnProperty("planCacheKey"));
+assert.neq(explainQuery2.queryPlanner.planCacheKey, profileObj0.planCacheKey, explainQuery2);
+assert.eq(explainQuery2.queryPlanner.planCacheKey, profileObj2.planCacheKey, explainQuery2);
+
+// Now drop an index. This should change the 'planCacheKey' value for queries, but not the
+// 'queryHash'.
+assert.commandWorked(coll.dropIndex({a: 1}));
+const explainQuery2PostCatalogChange = assert.commandWorked(
+ coll.find({a: 12000, b: 1}).comment("Query2 find command").explain("queryPlanner"));
+assert.eq(explainQuery2.queryPlanner.queryHash,
+ explainQuery2PostCatalogChange.queryPlanner.queryHash);
+assert.neq(explainQuery2.queryPlanner.planCacheKey,
+ explainQuery2PostCatalogChange.queryPlanner.planCacheKey);
})();
diff --git a/jstests/core/profile_repair_cursor.js b/jstests/core/profile_repair_cursor.js
index f22399c58ff..c0b3a34a929 100644
--- a/jstests/core/profile_repair_cursor.js
+++ b/jstests/core/profile_repair_cursor.js
@@ -4,37 +4,40 @@
// correctly.
(function() {
- "use strict";
+"use strict";
- // For getLatestProfilerEntry and getProfilerProtocolStringForCommand
- load("jstests/libs/profiler.js");
+// For getLatestProfilerEntry and getProfilerProtocolStringForCommand
+load("jstests/libs/profiler.js");
- var testDB = db.getSiblingDB("profile_repair_cursor");
- var testColl = testDB.testColl;
- assert.commandWorked(testDB.dropDatabase());
+var testDB = db.getSiblingDB("profile_repair_cursor");
+var testColl = testDB.testColl;
+assert.commandWorked(testDB.dropDatabase());
- // Insert some data to scan over.
- assert.writeOK(testColl.insert([{}, {}, {}, {}]));
+// Insert some data to scan over.
+assert.writeOK(testColl.insert([{}, {}, {}, {}]));
- testDB.setProfilingLevel(2);
+testDB.setProfilingLevel(2);
- const profileEntryFilter = {op: "command", "command.repairCursor": testColl.getName()};
+const profileEntryFilter = {
+ op: "command",
+ "command.repairCursor": testColl.getName()
+};
- let cmdRes = testDB.runCommand({repairCursor: testColl.getName()});
- if (cmdRes.code === ErrorCodes.CommandNotSupported) {
- // Some storage engines do not support this command, so we can skip this test.
- return;
- }
- assert.commandWorked(cmdRes);
+let cmdRes = testDB.runCommand({repairCursor: testColl.getName()});
+if (cmdRes.code === ErrorCodes.CommandNotSupported) {
+ // Some storage engines do not support this command, so we can skip this test.
+ return;
+}
+assert.commandWorked(cmdRes);
- assert.eq(testDB.system.profile.find(profileEntryFilter).itcount(),
- 1,
- "expected to find profile entry for a repairCursor command");
+assert.eq(testDB.system.profile.find(profileEntryFilter).itcount(),
+ 1,
+ "expected to find profile entry for a repairCursor command");
- const getMoreCollName = cmdRes.cursor.ns.substr(cmdRes.cursor.ns.indexOf(".") + 1);
- cmdRes = assert.commandWorked(
- testDB.runCommand({getMore: cmdRes.cursor.id, collection: getMoreCollName}));
+const getMoreCollName = cmdRes.cursor.ns.substr(cmdRes.cursor.ns.indexOf(".") + 1);
+cmdRes = assert.commandWorked(
+ testDB.runCommand({getMore: cmdRes.cursor.id, collection: getMoreCollName}));
- const getMoreProfileEntry = getLatestProfilerEntry(testDB, {op: "getmore"});
- assert.eq(getMoreProfileEntry.originatingCommand.repairCursor, testColl.getName());
+const getMoreProfileEntry = getLatestProfilerEntry(testDB, {op: "getmore"});
+assert.eq(getMoreProfileEntry.originatingCommand.repairCursor, testColl.getName());
})();
diff --git a/jstests/core/profile_sampling.js b/jstests/core/profile_sampling.js
index 9b37e274055..2bd2261031e 100644
--- a/jstests/core/profile_sampling.js
+++ b/jstests/core/profile_sampling.js
@@ -1,64 +1,64 @@
// Confirms that the number of profiled operations is consistent with the sampleRate, if set.
// @tags: [does_not_support_stepdowns, requires_fastcount, requires_profiling]
(function() {
- "use strict";
+"use strict";
- // Use a special db to support running other tests in parallel.
- const profileDB = db.getSisterDB("profile_sampling");
- const coll = profileDB.profile_sampling;
+// Use a special db to support running other tests in parallel.
+const profileDB = db.getSisterDB("profile_sampling");
+const coll = profileDB.profile_sampling;
- profileDB.dropDatabase();
+profileDB.dropDatabase();
- let originalProfilingSettings;
- try {
- originalProfilingSettings = assert.commandWorked(profileDB.setProfilingLevel(0));
- profileDB.system.profile.drop();
- assert.eq(0, profileDB.system.profile.count());
+let originalProfilingSettings;
+try {
+ originalProfilingSettings = assert.commandWorked(profileDB.setProfilingLevel(0));
+ profileDB.system.profile.drop();
+ assert.eq(0, profileDB.system.profile.count());
- profileDB.createCollection(coll.getName());
- assert.writeOK(coll.insert({x: 1}));
+ profileDB.createCollection(coll.getName());
+ assert.writeOK(coll.insert({x: 1}));
- assert.commandWorked(profileDB.setProfilingLevel(1, {sampleRate: 0, slowms: -1}));
+ assert.commandWorked(profileDB.setProfilingLevel(1, {sampleRate: 0, slowms: -1}));
- assert.neq(null, coll.findOne({x: 1}));
- assert.eq(1, coll.find({x: 1}).count());
- assert.writeOK(coll.update({x: 1}, {$inc: {a: 1}}));
-
- assert.commandWorked(profileDB.setProfilingLevel(0));
+ assert.neq(null, coll.findOne({x: 1}));
+ assert.eq(1, coll.find({x: 1}).count());
+ assert.writeOK(coll.update({x: 1}, {$inc: {a: 1}}));
- assert.eq(0, profileDB.system.profile.count());
+ assert.commandWorked(profileDB.setProfilingLevel(0));
- profileDB.system.profile.drop();
- assert.commandWorked(profileDB.setProfilingLevel(1, {sampleRate: 0.5, slowms: -1}));
+ assert.eq(0, profileDB.system.profile.count());
- // This should generate about 500 profile log entries.
- for (let i = 0; i < 500; i++) {
- assert.neq(null, coll.findOne({x: 1}));
- assert.writeOK(coll.update({x: 1}, {$inc: {a: 1}}));
- }
+ profileDB.system.profile.drop();
+ assert.commandWorked(profileDB.setProfilingLevel(1, {sampleRate: 0.5, slowms: -1}));
- assert.commandWorked(profileDB.setProfilingLevel(0));
+ // This should generate about 500 profile log entries.
+ for (let i = 0; i < 500; i++) {
+ assert.neq(null, coll.findOne({x: 1}));
+ assert.writeOK(coll.update({x: 1}, {$inc: {a: 1}}));
+ }
- assert.between(10, profileDB.system.profile.count(), 990);
- profileDB.system.profile.drop();
+ assert.commandWorked(profileDB.setProfilingLevel(0));
- // Profiling level of 2 should log all operations, regardless of sample rate setting.
- assert.commandWorked(profileDB.setProfilingLevel(2, {sampleRate: 0}));
- // This should generate exactly 1000 profile log entries.
- for (let i = 0; i < 5; i++) {
- assert.neq(null, coll.findOne({x: 1}));
- assert.writeOK(coll.update({x: 1}, {$inc: {a: 1}}));
- }
- assert.commandWorked(profileDB.setProfilingLevel(0));
- assert.eq(10, profileDB.system.profile.count());
- profileDB.system.profile.drop();
+ assert.between(10, profileDB.system.profile.count(), 990);
+ profileDB.system.profile.drop();
- } finally {
- let profileCmd = {};
- profileCmd.profile = originalProfilingSettings.was;
- profileCmd = Object.extend(profileCmd, originalProfilingSettings);
- delete profileCmd.was;
- delete profileCmd.ok;
- assert.commandWorked(profileDB.runCommand(profileCmd));
+ // Profiling level of 2 should log all operations, regardless of sample rate setting.
+ assert.commandWorked(profileDB.setProfilingLevel(2, {sampleRate: 0}));
+ // This should generate exactly 1000 profile log entries.
+ for (let i = 0; i < 5; i++) {
+ assert.neq(null, coll.findOne({x: 1}));
+ assert.writeOK(coll.update({x: 1}, {$inc: {a: 1}}));
}
+ assert.commandWorked(profileDB.setProfilingLevel(0));
+ assert.eq(10, profileDB.system.profile.count());
+ profileDB.system.profile.drop();
+
+} finally {
+ let profileCmd = {};
+ profileCmd.profile = originalProfilingSettings.was;
+ profileCmd = Object.extend(profileCmd, originalProfilingSettings);
+ delete profileCmd.was;
+ delete profileCmd.ok;
+ assert.commandWorked(profileDB.runCommand(profileCmd));
+}
}());
diff --git a/jstests/core/profile_update.js b/jstests/core/profile_update.js
index 685594cb45f..8cde2ea6784 100644
--- a/jstests/core/profile_update.js
+++ b/jstests/core/profile_update.js
@@ -3,124 +3,124 @@
// Confirms that profiled update execution contains all expected metrics with proper values.
(function() {
- "use strict";
-
- load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
-
- // Setup test db and collection.
- var testDB = db.getSiblingDB("profile_update");
- assert.commandWorked(testDB.dropDatabase());
- var coll = testDB.getCollection("test");
-
- testDB.setProfilingLevel(2);
-
- //
- // Confirm metrics for single document update.
- //
- var i;
- for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
- assert.commandWorked(coll.createIndex({a: 1}));
-
- assert.writeOK(coll.update({a: {$gte: 2}}, {$set: {c: 1}, $inc: {a: -10}}));
-
- var profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "update", tojson(profileObj));
- assert.eq(profileObj.keysExamined, 1, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 1, tojson(profileObj));
- assert.eq(profileObj.keysInserted, 1, tojson(profileObj));
- assert.eq(profileObj.keysDeleted, 1, tojson(profileObj));
- assert.eq(profileObj.nMatched, 1, tojson(profileObj));
- assert.eq(profileObj.nModified, 1, tojson(profileObj));
- assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
- assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
- assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm metrics for parameters that require "commands" mode.
- //
-
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0]}));
-
- assert.writeOK(coll.update(
- {_id: 0}, {$set: {"a.$[i]": 1}}, {collation: {locale: "fr"}, arrayFilters: [{i: 0}]}));
-
- profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "update", tojson(profileObj));
- assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj));
- assert.eq(profileObj.command.arrayFilters, [{i: 0}], tojson(profileObj));
- }
-
- //
- // Confirm metrics for multiple indexed document update.
- //
+"use strict";
+
+load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
+
+// Setup test db and collection.
+var testDB = db.getSiblingDB("profile_update");
+assert.commandWorked(testDB.dropDatabase());
+var coll = testDB.getCollection("test");
+
+testDB.setProfilingLevel(2);
+
+//
+// Confirm metrics for single document update.
+//
+var i;
+for (i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+assert.commandWorked(coll.createIndex({a: 1}));
+
+assert.writeOK(coll.update({a: {$gte: 2}}, {$set: {c: 1}, $inc: {a: -10}}));
+
+var profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "update", tojson(profileObj));
+assert.eq(profileObj.keysExamined, 1, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 1, tojson(profileObj));
+assert.eq(profileObj.keysInserted, 1, tojson(profileObj));
+assert.eq(profileObj.keysDeleted, 1, tojson(profileObj));
+assert.eq(profileObj.nMatched, 1, tojson(profileObj));
+assert.eq(profileObj.nModified, 1, tojson(profileObj));
+assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
+assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("millis"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj));
+assert(profileObj.hasOwnProperty("locks"), tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm metrics for parameters that require "commands" mode.
+//
+
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
- for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
- assert.commandWorked(coll.createIndex({a: 1}));
+ assert.writeOK(coll.insert({_id: 0, a: [0]}));
- assert.writeOK(coll.update({a: {$gte: 5}}, {$set: {c: 1}, $inc: {a: -10}}, {multi: true}));
- profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.keysExamined, 5, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 5, tojson(profileObj));
- assert.eq(profileObj.keysInserted, 5, tojson(profileObj));
- assert.eq(profileObj.keysDeleted, 5, tojson(profileObj));
- assert.eq(profileObj.nMatched, 5, tojson(profileObj));
- assert.eq(profileObj.nModified, 5, tojson(profileObj));
- assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
- assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm metrics for insert on update with "upsert: true".
- //
- coll.drop();
- for (i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
- assert.commandWorked(coll.createIndex({a: 1}));
+ assert.writeOK(coll.update(
+ {_id: 0}, {$set: {"a.$[i]": 1}}, {collation: {locale: "fr"}, arrayFilters: [{i: 0}]}));
- assert.writeOK(coll.update({_id: "new value", a: 4}, {$inc: {b: 1}}, {upsert: true}));
profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.command,
- {q: {_id: "new value", a: 4}, u: {$inc: {b: 1}}, multi: false, upsert: true},
- tojson(profileObj));
- assert.eq(profileObj.keysExamined, 0, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 0, tojson(profileObj));
- assert.eq(profileObj.keysInserted, 2, tojson(profileObj));
- assert.eq(profileObj.nMatched, 0, tojson(profileObj));
- assert.eq(profileObj.nModified, 0, tojson(profileObj));
- assert.eq(profileObj.upsert, true, tojson(profileObj));
- assert.eq(profileObj.planSummary, "IXSCAN { _id: 1 }", tojson(profileObj));
- assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
-
- //
- // Confirm "fromMultiPlanner" metric.
- //
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
- for (i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
-
- assert.writeOK(coll.update({a: 3, b: 3}, {$set: {c: 1}}));
- profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
- assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+ assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj));
+ assert.eq(profileObj.op, "update", tojson(profileObj));
+ assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj));
+ assert.eq(profileObj.command.arrayFilters, [{i: 0}], tojson(profileObj));
+}
+
+//
+// Confirm metrics for multiple indexed document update.
+//
+coll.drop();
+for (i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+assert.commandWorked(coll.createIndex({a: 1}));
+
+assert.writeOK(coll.update({a: {$gte: 5}}, {$set: {c: 1}, $inc: {a: -10}}, {multi: true}));
+profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.keysExamined, 5, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 5, tojson(profileObj));
+assert.eq(profileObj.keysInserted, 5, tojson(profileObj));
+assert.eq(profileObj.keysDeleted, 5, tojson(profileObj));
+assert.eq(profileObj.nMatched, 5, tojson(profileObj));
+assert.eq(profileObj.nModified, 5, tojson(profileObj));
+assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj));
+assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm metrics for insert on update with "upsert: true".
+//
+coll.drop();
+for (i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+assert.commandWorked(coll.createIndex({a: 1}));
+
+assert.writeOK(coll.update({_id: "new value", a: 4}, {$inc: {b: 1}}, {upsert: true}));
+profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.command,
+ {q: {_id: "new value", a: 4}, u: {$inc: {b: 1}}, multi: false, upsert: true},
+ tojson(profileObj));
+assert.eq(profileObj.keysExamined, 0, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 0, tojson(profileObj));
+assert.eq(profileObj.keysInserted, 2, tojson(profileObj));
+assert.eq(profileObj.nMatched, 0, tojson(profileObj));
+assert.eq(profileObj.nModified, 0, tojson(profileObj));
+assert.eq(profileObj.upsert, true, tojson(profileObj));
+assert.eq(profileObj.planSummary, "IXSCAN { _id: 1 }", tojson(profileObj));
+assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
+
+//
+// Confirm "fromMultiPlanner" metric.
+//
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
+for (i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
+
+assert.writeOK(coll.update({a: 3, b: 3}, {$set: {c: 1}}));
+profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj));
+assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj));
})();
diff --git a/jstests/core/projection_dotted_paths.js b/jstests/core/projection_dotted_paths.js
index e76feb7a2ee..5af357bde02 100644
--- a/jstests/core/projection_dotted_paths.js
+++ b/jstests/core/projection_dotted_paths.js
@@ -7,90 +7,89 @@
* when appropriate.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js");
+load("jstests/libs/analyze_plan.js");
- let coll = db["projection_dotted_paths"];
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1, "b.c": 1, "b.d": 1, c: 1}));
- assert.writeOK(coll.insert({_id: 1, a: 1, b: {c: 1, d: 1, e: 1}, c: 1, e: 1}));
+let coll = db["projection_dotted_paths"];
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1, "b.c": 1, "b.d": 1, c: 1}));
+assert.writeOK(coll.insert({_id: 1, a: 1, b: {c: 1, d: 1, e: 1}, c: 1, e: 1}));
- // Project exactly the set of fields in the index. Verify that the projection is computed
- // correctly and that the plan is covered.
- let resultDoc = coll.findOne({a: 1}, {_id: 0, a: 1, "b.c": 1, "b.d": 1, c: 1});
- assert.eq(resultDoc, {a: 1, b: {c: 1, d: 1}, c: 1});
- let explain =
- coll.find({a: 1}, {_id: 0, a: 1, "b.c": 1, "b.d": 1, c: 1}).explain("queryPlanner");
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
- assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
+// Project exactly the set of fields in the index. Verify that the projection is computed
+// correctly and that the plan is covered.
+let resultDoc = coll.findOne({a: 1}, {_id: 0, a: 1, "b.c": 1, "b.d": 1, c: 1});
+assert.eq(resultDoc, {a: 1, b: {c: 1, d: 1}, c: 1});
+let explain = coll.find({a: 1}, {_id: 0, a: 1, "b.c": 1, "b.d": 1, c: 1}).explain("queryPlanner");
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
- // Project a subset of the indexed fields. Verify that the projection is computed correctly and
- // that the plan is covered.
- resultDoc = coll.findOne({a: 1}, {_id: 0, "b.c": 1, c: 1});
- assert.eq(resultDoc, {b: {c: 1}, c: 1});
- explain = coll.find({a: 1}, {_id: 0, "b.c": 1, c: 1}).explain("queryPlanner");
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
- assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
+// Project a subset of the indexed fields. Verify that the projection is computed correctly and
+// that the plan is covered.
+resultDoc = coll.findOne({a: 1}, {_id: 0, "b.c": 1, c: 1});
+assert.eq(resultDoc, {b: {c: 1}, c: 1});
+explain = coll.find({a: 1}, {_id: 0, "b.c": 1, c: 1}).explain("queryPlanner");
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
- // Project exactly the set of fields in the index but also include _id. Verify that the
- // projection is computed correctly and that the plan cannot be covered.
- resultDoc = coll.findOne({a: 1}, {_id: 1, a: 1, "b.c": 1, "b.d": 1, c: 1});
- assert.eq(resultDoc, {_id: 1, a: 1, b: {c: 1, d: 1}, c: 1});
- explain = coll.find({a: 1}, {_id: 0, "b.c": 1, c: 1}).explain("queryPlanner");
- explain = coll.find({a: 1}, {_id: 1, a: 1, "b.c": 1, "b.d": 1, c: 1}).explain("queryPlanner");
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
- assert(!isIndexOnly(db, explain.queryPlanner.winningPlan));
+// Project exactly the set of fields in the index but also include _id. Verify that the
+// projection is computed correctly and that the plan cannot be covered.
+resultDoc = coll.findOne({a: 1}, {_id: 1, a: 1, "b.c": 1, "b.d": 1, c: 1});
+assert.eq(resultDoc, {_id: 1, a: 1, b: {c: 1, d: 1}, c: 1});
+explain = coll.find({a: 1}, {_id: 0, "b.c": 1, c: 1}).explain("queryPlanner");
+explain = coll.find({a: 1}, {_id: 1, a: 1, "b.c": 1, "b.d": 1, c: 1}).explain("queryPlanner");
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+assert(!isIndexOnly(db, explain.queryPlanner.winningPlan));
- // Project a not-indexed field that exists in the collection. The plan should not be covered.
- resultDoc = coll.findOne({a: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1});
- assert.eq(resultDoc, {b: {c: 1, e: 1}, c: 1});
- explain = coll.find({a: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1}).explain("queryPlanner");
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
- assert(!isIndexOnly(db, explain.queryPlanner.winningPlan));
+// Project a not-indexed field that exists in the collection. The plan should not be covered.
+resultDoc = coll.findOne({a: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1});
+assert.eq(resultDoc, {b: {c: 1, e: 1}, c: 1});
+explain = coll.find({a: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1}).explain("queryPlanner");
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+assert(!isIndexOnly(db, explain.queryPlanner.winningPlan));
- // Project a not-indexed field that does not exist in the collection. The plan should not be
- // covered.
- resultDoc = coll.findOne({a: 1}, {_id: 0, "b.c": 1, "b.z": 1, c: 1});
- assert.eq(resultDoc, {b: {c: 1}, c: 1});
- explain = coll.find({a: 1}, {_id: 0, "b.c": 1, "b.z": 1, c: 1}).explain("queryPlanner");
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
- assert(!isIndexOnly(db, explain.queryPlanner.winningPlan));
+// Project a not-indexed field that does not exist in the collection. The plan should not be
+// covered.
+resultDoc = coll.findOne({a: 1}, {_id: 0, "b.c": 1, "b.z": 1, c: 1});
+assert.eq(resultDoc, {b: {c: 1}, c: 1});
+explain = coll.find({a: 1}, {_id: 0, "b.c": 1, "b.z": 1, c: 1}).explain("queryPlanner");
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+assert(!isIndexOnly(db, explain.queryPlanner.winningPlan));
- // Verify that the correct projection is computed with an idhack query.
- resultDoc = coll.findOne({_id: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1});
- assert.eq(resultDoc, {b: {c: 1, e: 1}, c: 1});
- explain = coll.find({_id: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1}).explain("queryPlanner");
- assert(isIdhack(db, explain.queryPlanner.winningPlan));
+// Verify that the correct projection is computed with an idhack query.
+resultDoc = coll.findOne({_id: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1});
+assert.eq(resultDoc, {b: {c: 1, e: 1}, c: 1});
+explain = coll.find({_id: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1}).explain("queryPlanner");
+assert(isIdhack(db, explain.queryPlanner.winningPlan));
- // If we make a dotted path multikey, projections using that path cannot be covered. But
- // projections which do not include the multikey path can still be covered.
- assert.writeOK(coll.insert({a: 2, b: {c: 1, d: [1, 2, 3]}}));
+// If we make a dotted path multikey, projections using that path cannot be covered. But
+// projections which do not include the multikey path can still be covered.
+assert.writeOK(coll.insert({a: 2, b: {c: 1, d: [1, 2, 3]}}));
- resultDoc = coll.findOne({a: 2}, {_id: 0, "b.c": 1, "b.d": 1});
- assert.eq(resultDoc, {b: {c: 1, d: [1, 2, 3]}});
- explain = coll.find({a: 2}, {_id: 0, "b.c": 1, "b.d": 1}).explain("queryPlanner");
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
- assert(!isIndexOnly(db, explain.queryPlanner.winningPlan));
+resultDoc = coll.findOne({a: 2}, {_id: 0, "b.c": 1, "b.d": 1});
+assert.eq(resultDoc, {b: {c: 1, d: [1, 2, 3]}});
+explain = coll.find({a: 2}, {_id: 0, "b.c": 1, "b.d": 1}).explain("queryPlanner");
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+assert(!isIndexOnly(db, explain.queryPlanner.winningPlan));
- resultDoc = coll.findOne({a: 2}, {_id: 0, "b.c": 1});
- assert.eq(resultDoc, {b: {c: 1}});
- explain = coll.find({a: 2}, {_id: 0, "b.c": 1}).explain("queryPlanner");
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
- // Path-level multikey info allows for generating a covered plan.
- assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
+resultDoc = coll.findOne({a: 2}, {_id: 0, "b.c": 1});
+assert.eq(resultDoc, {b: {c: 1}});
+explain = coll.find({a: 2}, {_id: 0, "b.c": 1}).explain("queryPlanner");
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+// Path-level multikey info allows for generating a covered plan.
+assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
- // Verify that dotted projections work for multiple levels of nesting.
- assert.commandWorked(coll.createIndex({a: 1, "x.y.y": 1, "x.y.z": 1, "x.z": 1}));
- assert.writeOK(coll.insert({a: 3, x: {y: {y: 1, f: 1, z: 1}, f: 1, z: 1}}));
- resultDoc = coll.findOne({a: 3}, {_id: 0, "x.y.y": 1, "x.y.z": 1, "x.z": 1});
- assert.eq(resultDoc, {x: {y: {y: 1, z: 1}, z: 1}});
- explain = coll.find({a: 3}, {_id: 0, "x.y.y": 1, "x.y.z": 1, "x.z": 1}).explain("queryPlanner");
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
- assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
+// Verify that dotted projections work for multiple levels of nesting.
+assert.commandWorked(coll.createIndex({a: 1, "x.y.y": 1, "x.y.z": 1, "x.z": 1}));
+assert.writeOK(coll.insert({a: 3, x: {y: {y: 1, f: 1, z: 1}, f: 1, z: 1}}));
+resultDoc = coll.findOne({a: 3}, {_id: 0, "x.y.y": 1, "x.y.z": 1, "x.z": 1});
+assert.eq(resultDoc, {x: {y: {y: 1, z: 1}, z: 1}});
+explain = coll.find({a: 3}, {_id: 0, "x.y.y": 1, "x.y.z": 1, "x.z": 1}).explain("queryPlanner");
+assert(isIxscan(db, explain.queryPlanner.winningPlan));
+assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
- // If projected nested paths do not exist in the indexed document, then they will get filled in
- // with nulls. This is a bug tracked by SERVER-23229.
- resultDoc = coll.findOne({a: 1}, {_id: 0, "x.y.y": 1, "x.y.z": 1, "x.z": 1});
- assert.eq(resultDoc, {x: {y: {y: null, z: null}, z: null}});
+// If projected nested paths do not exist in the indexed document, then they will get filled in
+// with nulls. This is a bug tracked by SERVER-23229.
+resultDoc = coll.findOne({a: 1}, {_id: 0, "x.y.y": 1, "x.y.z": 1, "x.z": 1});
+assert.eq(resultDoc, {x: {y: {y: null, z: null}, z: null}});
}());
diff --git a/jstests/core/push2.js b/jstests/core/push2.js
index c8d8e7be64c..10669aa2581 100644
--- a/jstests/core/push2.js
+++ b/jstests/core/push2.js
@@ -1,22 +1,22 @@
(function() {
- t = db.push2;
- t.drop();
+t = db.push2;
+t.drop();
- t.save({_id: 1, a: []});
+t.save({_id: 1, a: []});
- s = new Array(700000).toString();
+s = new Array(700000).toString();
- gotError = null;
+gotError = null;
- for (x = 0; x < 100; x++) {
- print(x + " pushes");
- var res = t.update({}, {$push: {a: s}});
- gotError = res.hasWriteError();
- if (gotError)
- break;
- }
+for (x = 0; x < 100; x++) {
+ print(x + " pushes");
+ var res = t.update({}, {$push: {a: s}});
+ gotError = res.hasWriteError();
+ if (gotError)
+ break;
+}
- assert(gotError, "should have gotten error");
+assert(gotError, "should have gotten error");
- t.drop();
+t.drop();
})();
diff --git a/jstests/core/query_hash_stability.js b/jstests/core/query_hash_stability.js
index 14ae20fdb98..4efa9b74e4e 100644
--- a/jstests/core/query_hash_stability.js
+++ b/jstests/core/query_hash_stability.js
@@ -3,54 +3,55 @@
* across catalog changes.
*/
(function() {
- "use strict";
- load('jstests/libs/fixture_helpers.js'); // For and isMongos().
-
- const collName = "query_hash_stability";
- const coll = db[collName];
- coll.drop();
- // Be sure the collection exists.
- assert.commandWorked(coll.insert({x: 5}));
-
- function getPlanCacheKeyFromExplain(explainRes) {
- const hash = FixtureHelpers.isMongos(db)
- ? explainRes.queryPlanner.winningPlan.shards[0].planCacheKey
- : explainRes.queryPlanner.planCacheKey;
- assert.eq(typeof(hash), "string");
- return hash;
- }
-
- function getQueryHashFromExplain(explainRes) {
- const hash = FixtureHelpers.isMongos(db)
- ? explainRes.queryPlanner.winningPlan.shards[0].queryHash
- : explainRes.queryPlanner.queryHash;
- assert.eq(typeof(hash), "string");
- return hash;
- }
-
- const query = {x: 3};
-
- const initialExplain = coll.find(query).explain();
-
- // Add a sparse index.
- assert.commandWorked(coll.createIndex({x: 1}, {sparse: true}));
-
- const withIndexExplain = coll.find(query).explain();
-
- // 'queryHash' shouldn't change across catalog changes.
- assert.eq(getQueryHashFromExplain(initialExplain), getQueryHashFromExplain(withIndexExplain));
- // We added an index so the plan cache key changed.
- assert.neq(getPlanCacheKeyFromExplain(initialExplain),
- getPlanCacheKeyFromExplain(withIndexExplain));
-
- // Drop the index.
- assert.commandWorked(coll.dropIndex({x: 1}));
- const postDropExplain = coll.find(query).explain();
-
- // 'queryHash' shouldn't change across catalog changes.
- assert.eq(getQueryHashFromExplain(initialExplain), getQueryHashFromExplain(postDropExplain));
-
- // The 'planCacheKey' should be the same as what it was before we dropped the index.
- assert.eq(getPlanCacheKeyFromExplain(initialExplain),
- getPlanCacheKeyFromExplain(postDropExplain));
+"use strict";
+load('jstests/libs/fixture_helpers.js'); // For and isMongos().
+
+const collName = "query_hash_stability";
+const coll = db[collName];
+coll.drop();
+// Be sure the collection exists.
+assert.commandWorked(coll.insert({x: 5}));
+
+function getPlanCacheKeyFromExplain(explainRes) {
+ const hash = FixtureHelpers.isMongos(db)
+ ? explainRes.queryPlanner.winningPlan.shards[0].planCacheKey
+ : explainRes.queryPlanner.planCacheKey;
+ assert.eq(typeof (hash), "string");
+ return hash;
+}
+
+function getQueryHashFromExplain(explainRes) {
+ const hash = FixtureHelpers.isMongos(db)
+ ? explainRes.queryPlanner.winningPlan.shards[0].queryHash
+ : explainRes.queryPlanner.queryHash;
+ assert.eq(typeof (hash), "string");
+ return hash;
+}
+
+const query = {
+ x: 3
+};
+
+const initialExplain = coll.find(query).explain();
+
+// Add a sparse index.
+assert.commandWorked(coll.createIndex({x: 1}, {sparse: true}));
+
+const withIndexExplain = coll.find(query).explain();
+
+// 'queryHash' shouldn't change across catalog changes.
+assert.eq(getQueryHashFromExplain(initialExplain), getQueryHashFromExplain(withIndexExplain));
+// We added an index so the plan cache key changed.
+assert.neq(getPlanCacheKeyFromExplain(initialExplain),
+ getPlanCacheKeyFromExplain(withIndexExplain));
+
+// Drop the index.
+assert.commandWorked(coll.dropIndex({x: 1}));
+const postDropExplain = coll.find(query).explain();
+
+// 'queryHash' shouldn't change across catalog changes.
+assert.eq(getQueryHashFromExplain(initialExplain), getQueryHashFromExplain(postDropExplain));
+
+// The 'planCacheKey' should be the same as what it was before we dropped the index.
+assert.eq(getPlanCacheKeyFromExplain(initialExplain), getPlanCacheKeyFromExplain(postDropExplain));
})();
diff --git a/jstests/core/queryoptimizer3.js b/jstests/core/queryoptimizer3.js
index 277ad738ce1..9fa0585991a 100644
--- a/jstests/core/queryoptimizer3.js
+++ b/jstests/core/queryoptimizer3.js
@@ -13,57 +13,57 @@
// ]
(function() {
- 'use strict';
+'use strict';
- var coll = db.jstests_queryoptimizer3;
+var coll = db.jstests_queryoptimizer3;
- var shellWaitHandle = startParallelShell(function() {
- for (var i = 0; i < 400; ++i) {
- sleep(50);
- try {
- db.jstests_queryoptimizer3.drop();
- } catch (e) {
- if (e.code === ErrorCodes.BackgroundOperationInProgressForNamespace) {
- print("Background operation temporarily in progress while attempting to drop " +
- "collection.");
- continue;
- }
- throw e;
+var shellWaitHandle = startParallelShell(function() {
+ for (var i = 0; i < 400; ++i) {
+ sleep(50);
+ try {
+ db.jstests_queryoptimizer3.drop();
+ } catch (e) {
+ if (e.code === ErrorCodes.BackgroundOperationInProgressForNamespace) {
+ print("Background operation temporarily in progress while attempting to drop " +
+ "collection.");
+ continue;
}
+ throw e;
}
- });
+ }
+});
- for (var i = 0; i < 100; ++i) {
- coll.drop();
- assert.commandWorked(coll.ensureIndex({a: 1}));
- assert.commandWorked(coll.ensureIndex({b: 1}));
+for (var i = 0; i < 100; ++i) {
+ coll.drop();
+ assert.commandWorked(coll.ensureIndex({a: 1}));
+ assert.commandWorked(coll.ensureIndex({b: 1}));
- var bulk = coll.initializeUnorderedBulkOp();
- for (var j = 0; j < 100; ++j) {
- bulk.insert({a: j, b: j});
- }
- assert.commandWorked(bulk.execute());
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var j = 0; j < 100; ++j) {
+ bulk.insert({a: j, b: j});
+ }
+ assert.commandWorked(bulk.execute());
- try {
- var m = i % 5;
- if (m == 0) {
- coll.count({a: {$gte: 0}, b: {$gte: 0}});
- } else if (m == 1) {
- coll.find({a: {$gte: 0}, b: {$gte: 0}}).itcount();
- } else if (m == 2) {
- coll.remove({a: {$gte: 0}, b: {$gte: 0}});
- } else if (m == 3) {
- coll.update({a: {$gte: 0}, b: {$gte: 0}}, {});
- } else if (m == 4) {
- coll.distinct('x', {a: {$gte: 0}, b: {$gte: 0}});
- }
- } catch (e) {
- print("Op killed during yield: " + e.message);
+ try {
+ var m = i % 5;
+ if (m == 0) {
+ coll.count({a: {$gte: 0}, b: {$gte: 0}});
+ } else if (m == 1) {
+ coll.find({a: {$gte: 0}, b: {$gte: 0}}).itcount();
+ } else if (m == 2) {
+ coll.remove({a: {$gte: 0}, b: {$gte: 0}});
+ } else if (m == 3) {
+ coll.update({a: {$gte: 0}, b: {$gte: 0}}, {});
+ } else if (m == 4) {
+ coll.distinct('x', {a: {$gte: 0}, b: {$gte: 0}});
}
+ } catch (e) {
+ print("Op killed during yield: " + e.message);
}
+}
- shellWaitHandle();
+shellWaitHandle();
- // Ensure that the server is still responding
- assert.commandWorked(db.runCommand({isMaster: 1}));
+// Ensure that the server is still responding
+assert.commandWorked(db.runCommand({isMaster: 1}));
})();
diff --git a/jstests/core/read_after_optime.js b/jstests/core/read_after_optime.js
index 33c5594d742..15ca380de47 100644
--- a/jstests/core/read_after_optime.js
+++ b/jstests/core/read_after_optime.js
@@ -1,14 +1,14 @@
// Test that attempting to read after optime fails if replication is not enabled.
(function() {
- "use strict";
+"use strict";
- var currentTime = new Date();
+var currentTime = new Date();
- var futureOpTime = new Timestamp((currentTime / 1000 + 3600), 0);
+var futureOpTime = new Timestamp((currentTime / 1000 + 3600), 0);
- assert.commandFailedWithCode(
- db.runCommand(
- {find: 'user', filter: {x: 1}, readConcern: {afterOpTime: {ts: futureOpTime, t: 0}}}),
- [ErrorCodes.NotAReplicaSet, ErrorCodes.NotImplemented]);
+assert.commandFailedWithCode(
+ db.runCommand(
+ {find: 'user', filter: {x: 1}, readConcern: {afterOpTime: {ts: futureOpTime, t: 0}}}),
+ [ErrorCodes.NotAReplicaSet, ErrorCodes.NotImplemented]);
})();
diff --git a/jstests/core/record_store_count.js b/jstests/core/record_store_count.js
index 2748b451031..61a1680fa94 100644
--- a/jstests/core/record_store_count.js
+++ b/jstests/core/record_store_count.js
@@ -7,78 +7,81 @@ load("jstests/libs/analyze_plan.js"); // For 'planHasStage'.
load("jstests/libs/fixture_helpers.js"); // For isMongos and isSharded.
(function() {
- "use strict";
+"use strict";
- var coll = db.record_store_count;
- coll.drop();
+var coll = db.record_store_count;
+coll.drop();
- assert.writeOK(coll.insert({x: 0}));
- assert.writeOK(coll.insert({x: 1}));
+assert.writeOK(coll.insert({x: 0}));
+assert.writeOK(coll.insert({x: 1}));
- assert.commandWorked(coll.ensureIndex({x: 1}));
+assert.commandWorked(coll.ensureIndex({x: 1}));
- //
- // Logically empty predicates should use the record store's count.
- //
- // If the collection is sharded, however, then we can't use fast count, since we need to perform
- // shard filtering to avoid counting data that is not logically owned by the shard.
- //
- var explain = coll.explain().count({});
- assert(!planHasStage(db, explain.queryPlanner.winningPlan, "COLLSCAN"));
- if (!isMongos(db) || !FixtureHelpers.isSharded(coll)) {
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "RECORD_STORE_FAST_COUNT"));
- }
+//
+// Logically empty predicates should use the record store's count.
+//
+// If the collection is sharded, however, then we can't use fast count, since we need to perform
+// shard filtering to avoid counting data that is not logically owned by the shard.
+//
+var explain = coll.explain().count({});
+assert(!planHasStage(db, explain.queryPlanner.winningPlan, "COLLSCAN"));
+if (!isMongos(db) || !FixtureHelpers.isSharded(coll)) {
+ assert(planHasStage(db, explain.queryPlanner.winningPlan, "RECORD_STORE_FAST_COUNT"));
+}
- explain = coll.explain().count({$comment: "hi"});
- assert(!planHasStage(db, explain.queryPlanner.winningPlan, "COLLSCAN"));
- if (!isMongos(db) || !FixtureHelpers.isSharded(coll)) {
- assert(planHasStage(db, explain.queryPlanner.winningPlan, "RECORD_STORE_FAST_COUNT"));
- }
+explain = coll.explain().count({$comment: "hi"});
+assert(!planHasStage(db, explain.queryPlanner.winningPlan, "COLLSCAN"));
+if (!isMongos(db) || !FixtureHelpers.isSharded(coll)) {
+ assert(planHasStage(db, explain.queryPlanner.winningPlan, "RECORD_STORE_FAST_COUNT"));
+}
- //
- // A non-empty query predicate should prevent the use of the record store's count.
- //
+//
+// A non-empty query predicate should prevent the use of the record store's count.
+//
- function checkPlan(plan, expectedStages, unexpectedStages) {
- for (let stage of expectedStages) {
- assert(planHasStage(db, plan, stage));
- }
- for (let stage of unexpectedStages) {
- assert(!planHasStage(db, plan, stage));
- }
+function checkPlan(plan, expectedStages, unexpectedStages) {
+ for (let stage of expectedStages) {
+ assert(planHasStage(db, plan, stage));
+ }
+ for (let stage of unexpectedStages) {
+ assert(!planHasStage(db, plan, stage));
}
+}
- function testExplainAndExpectStage({expectedStages, unexpectedStages, hintIndex}) {
- explain = coll.explain().find({x: 0}).hint(hintIndex).count();
- checkPlan(explain.queryPlanner.winningPlan, expectedStages, unexpectedStages);
+function testExplainAndExpectStage({expectedStages, unexpectedStages, hintIndex}) {
+ explain = coll.explain().find({x: 0}).hint(hintIndex).count();
+ checkPlan(explain.queryPlanner.winningPlan, expectedStages, unexpectedStages);
- explain = coll.explain().find({x: 0, $comment: "hi"}).hint(hintIndex).count();
- checkPlan(explain.queryPlanner.winningPlan, expectedStages, unexpectedStages);
- }
+ explain = coll.explain().find({x: 0, $comment: "hi"}).hint(hintIndex).count();
+ checkPlan(explain.queryPlanner.winningPlan, expectedStages, unexpectedStages);
+}
- if (!isMongos(db) || !FixtureHelpers.isSharded(coll)) {
- // In an unsharded collection we can use the COUNT_SCAN stage.
- testExplainAndExpectStage(
- {expectedStages: ["COUNT_SCAN"], unexpectedStages: [], hintIndex: {x: 1}});
- return;
- }
+if (!isMongos(db) || !FixtureHelpers.isSharded(coll)) {
+ // In an unsharded collection we can use the COUNT_SCAN stage.
+ testExplainAndExpectStage(
+ {expectedStages: ["COUNT_SCAN"], unexpectedStages: [], hintIndex: {x: 1}});
+ return;
+}
- // The remainder of the test is only relevant for sharded clusters.
+// The remainder of the test is only relevant for sharded clusters.
- // Without an index on the shard key, the entire document will have to be fetched.
- testExplainAndExpectStage({
- expectedStages: ["COUNT", "SHARDING_FILTER", "FETCH"],
- unexpectedStages: [],
- hintIndex: {x: 1}
- });
+// Without an index on the shard key, the entire document will have to be fetched.
+testExplainAndExpectStage({
+ expectedStages: ["COUNT", "SHARDING_FILTER", "FETCH"],
+ unexpectedStages: [],
+ hintIndex: {x: 1}
+});
- // Add an index which includes the shard key. This means the FETCH should no longer be necesary
- // since the SHARDING_FILTER can get the shard key straight from the index.
- const kNewIndexSpec = {x: 1, _id: 1};
- assert.commandWorked(coll.ensureIndex(kNewIndexSpec));
- testExplainAndExpectStage({
- expectedStages: ["COUNT", "SHARDING_FILTER"],
- unexpectedStages: ["FETCH"],
- hintIndex: kNewIndexSpec
- });
+// Add an index which includes the shard key. This means the FETCH should no longer be necesary
+// since the SHARDING_FILTER can get the shard key straight from the index.
+const kNewIndexSpec = {
+ x: 1,
+ _id: 1
+};
+assert.commandWorked(coll.ensureIndex(kNewIndexSpec));
+testExplainAndExpectStage({
+ expectedStages: ["COUNT", "SHARDING_FILTER"],
+ unexpectedStages: ["FETCH"],
+ hintIndex: kNewIndexSpec
+});
})();
diff --git a/jstests/core/recursion.js b/jstests/core/recursion.js
index 6f6e5c906af..617a51edccd 100644
--- a/jstests/core/recursion.js
+++ b/jstests/core/recursion.js
@@ -7,29 +7,28 @@
// ]
(function() {
- "use strict";
+"use strict";
- db.recursion.drop();
+db.recursion.drop();
- // Make sure the shell doesn't blow up
- function shellRecursion() {
- shellRecursion.apply();
- }
- assert.throws(shellRecursion);
+// Make sure the shell doesn't blow up
+function shellRecursion() {
+ shellRecursion.apply();
+}
+assert.throws(shellRecursion);
- // Make sure mapReduce doesn't blow up
- function mapReduceRecursion() {
- db.recursion.mapReduce(
- function() {
- (function recursion() {
- recursion.apply();
- })();
- },
- function() {},
- {out: 'inline'});
- }
+// Make sure mapReduce doesn't blow up
+function mapReduceRecursion() {
+ db.recursion.mapReduce(
+ function() {
+ (function recursion() {
+ recursion.apply();
+ })();
+ },
+ function() {},
+ {out: 'inline'});
+}
- db.recursion.insert({});
- assert.commandFailedWithCode(assert.throws(mapReduceRecursion),
- ErrorCodes.JSInterpreterFailure);
+db.recursion.insert({});
+assert.commandFailedWithCode(assert.throws(mapReduceRecursion), ErrorCodes.JSInterpreterFailure);
}());
diff --git a/jstests/core/regex.js b/jstests/core/regex.js
index 1c6a9d6a3bb..488d41f41d0 100644
--- a/jstests/core/regex.js
+++ b/jstests/core/regex.js
@@ -1,83 +1,85 @@
(function() {
- 'use strict';
-
- const t = db.jstests_regex;
-
- const isMaster = db.runCommand("ismaster");
- assert.commandWorked(isMaster);
- const isMongos = (isMaster.msg === "isdbgrid");
-
- t.drop();
- assert.writeOK(t.save({a: "bcd"}));
- assert.eq(1, t.count({a: /b/}), "A");
- assert.eq(1, t.count({a: /bc/}), "B");
- assert.eq(1, t.count({a: /bcd/}), "C");
- assert.eq(0, t.count({a: /bcde/}), "D");
-
- t.drop();
- assert.writeOK(t.save({a: {b: "cde"}}));
- assert.eq(1, t.count({'a.b': /de/}), "E");
-
- t.drop();
- assert.writeOK(t.save({a: {b: ["cde"]}}));
- assert.eq(1, t.count({'a.b': /de/}), "F");
-
- t.drop();
- assert.writeOK(t.save({a: [{b: "cde"}]}));
- assert.eq(1, t.count({'a.b': /de/}), "G");
-
- t.drop();
- assert.writeOK(t.save({a: [{b: ["cde"]}]}));
- assert.eq(1, t.count({'a.b': /de/}), "H");
-
- //
- // Confirm match and explain serialization for $elemMatch with $regex.
- //
- t.drop();
- assert.writeOK(t.insert({x: ["abc"]}));
-
- const query = {x: {$elemMatch: {$regex: 'ABC', $options: 'i'}}};
- assert.eq(1, t.count(query));
-
- const result = t.find(query).explain();
- assert.commandWorked(result);
-
- if (!isMongos) {
- assert(result.hasOwnProperty("queryPlanner"));
- assert(result.queryPlanner.hasOwnProperty("parsedQuery"), tojson(result));
- assert.eq(result.queryPlanner.parsedQuery, query);
- }
-
- //
- // Disallow embedded null bytes when using $regex syntax.
- //
- t.drop();
- assert.throws(function() {
- t.find({a: {$regex: "a\0b", $options: "i"}}).itcount();
- });
- assert.throws(function() {
- t.find({a: {$regex: "ab", $options: "i\0"}}).itcount();
- });
- assert.throws(function() {
- t.find({key: {$regex: 'abcd\0xyz'}}).explain();
- });
-
- //
- // Confirm $options and mode specified in $regex are not allowed to be specified together.
- //
- t.drop();
- assert.commandWorked(t.insert({x: ["abc"]}));
-
- let regexFirst = assert.throws(() => t.find({x: {$regex: /ab/i, $options: 's'}}).itcount());
- assert.commandFailedWithCode(regexFirst, 51075);
-
- let optsFirst = assert.throws(() => t.find({x: {$options: 's', $regex: /ab/i}}).itcount());
- assert.commandFailedWithCode(optsFirst, 51074);
-
- t.drop();
- assert.commandWorked(t.save({x: ["abc"]}));
-
- assert.eq(1, t.count({x: {$regex: /ABC/i}}));
- assert.eq(1, t.count({x: {$regex: /ABC/, $options: 'i'}}));
- assert.eq(1, t.count({x: {$options: 'i', $regex: /ABC/}}));
+'use strict';
+
+const t = db.jstests_regex;
+
+const isMaster = db.runCommand("ismaster");
+assert.commandWorked(isMaster);
+const isMongos = (isMaster.msg === "isdbgrid");
+
+t.drop();
+assert.writeOK(t.save({a: "bcd"}));
+assert.eq(1, t.count({a: /b/}), "A");
+assert.eq(1, t.count({a: /bc/}), "B");
+assert.eq(1, t.count({a: /bcd/}), "C");
+assert.eq(0, t.count({a: /bcde/}), "D");
+
+t.drop();
+assert.writeOK(t.save({a: {b: "cde"}}));
+assert.eq(1, t.count({'a.b': /de/}), "E");
+
+t.drop();
+assert.writeOK(t.save({a: {b: ["cde"]}}));
+assert.eq(1, t.count({'a.b': /de/}), "F");
+
+t.drop();
+assert.writeOK(t.save({a: [{b: "cde"}]}));
+assert.eq(1, t.count({'a.b': /de/}), "G");
+
+t.drop();
+assert.writeOK(t.save({a: [{b: ["cde"]}]}));
+assert.eq(1, t.count({'a.b': /de/}), "H");
+
+//
+// Confirm match and explain serialization for $elemMatch with $regex.
+//
+t.drop();
+assert.writeOK(t.insert({x: ["abc"]}));
+
+const query = {
+ x: {$elemMatch: {$regex: 'ABC', $options: 'i'}}
+};
+assert.eq(1, t.count(query));
+
+const result = t.find(query).explain();
+assert.commandWorked(result);
+
+if (!isMongos) {
+ assert(result.hasOwnProperty("queryPlanner"));
+ assert(result.queryPlanner.hasOwnProperty("parsedQuery"), tojson(result));
+ assert.eq(result.queryPlanner.parsedQuery, query);
+}
+
+//
+// Disallow embedded null bytes when using $regex syntax.
+//
+t.drop();
+assert.throws(function() {
+ t.find({a: {$regex: "a\0b", $options: "i"}}).itcount();
+});
+assert.throws(function() {
+ t.find({a: {$regex: "ab", $options: "i\0"}}).itcount();
+});
+assert.throws(function() {
+ t.find({key: {$regex: 'abcd\0xyz'}}).explain();
+});
+
+//
+// Confirm $options and mode specified in $regex are not allowed to be specified together.
+//
+t.drop();
+assert.commandWorked(t.insert({x: ["abc"]}));
+
+let regexFirst = assert.throws(() => t.find({x: {$regex: /ab/i, $options: 's'}}).itcount());
+assert.commandFailedWithCode(regexFirst, 51075);
+
+let optsFirst = assert.throws(() => t.find({x: {$options: 's', $regex: /ab/i}}).itcount());
+assert.commandFailedWithCode(optsFirst, 51074);
+
+t.drop();
+assert.commandWorked(t.save({x: ["abc"]}));
+
+assert.eq(1, t.count({x: {$regex: /ABC/i}}));
+assert.eq(1, t.count({x: {$regex: /ABC/, $options: 'i'}}));
+assert.eq(1, t.count({x: {$options: 'i', $regex: /ABC/}}));
})();
diff --git a/jstests/core/regex5.js b/jstests/core/regex5.js
index 6d11fce5578..69537e149e5 100644
--- a/jstests/core/regex5.js
+++ b/jstests/core/regex5.js
@@ -15,7 +15,6 @@ a = /.*b.*c/;
x = /.*y.*/;
doit = function() {
-
assert.eq(1, t.find({x: a}).count(), "A");
assert.eq(2, t.find({x: x}).count(), "B");
assert.eq(2, t.find({x: {$in: [x]}}).count(), "C"); // SERVER-322
diff --git a/jstests/core/regex_error.js b/jstests/core/regex_error.js
index a6deb56c460..19e191d754c 100644
--- a/jstests/core/regex_error.js
+++ b/jstests/core/regex_error.js
@@ -2,14 +2,13 @@
* Test that the server errors when given an invalid regex.
*/
(function() {
- const coll = db.regex_error;
- coll.drop();
+const coll = db.regex_error;
+coll.drop();
- // Run some invalid regexes.
- assert.commandFailedWithCode(coll.runCommand("find", {filter: {a: {$regex: "[)"}}}), 51091);
- assert.commandFailedWithCode(coll.runCommand("find", {filter: {a: {$regex: "ab\0c"}}}),
- ErrorCodes.BadValue);
- assert.commandFailedWithCode(
- coll.runCommand("find", {filter: {a: {$regex: "ab", $options: "\0i"}}}),
- ErrorCodes.BadValue);
+// Run some invalid regexes.
+assert.commandFailedWithCode(coll.runCommand("find", {filter: {a: {$regex: "[)"}}}), 51091);
+assert.commandFailedWithCode(coll.runCommand("find", {filter: {a: {$regex: "ab\0c"}}}),
+ ErrorCodes.BadValue);
+assert.commandFailedWithCode(
+ coll.runCommand("find", {filter: {a: {$regex: "ab", $options: "\0i"}}}), ErrorCodes.BadValue);
})();
diff --git a/jstests/core/regex_limit.js b/jstests/core/regex_limit.js
index 71a8c4e915c..31f72b758f4 100644
--- a/jstests/core/regex_limit.js
+++ b/jstests/core/regex_limit.js
@@ -2,25 +2,25 @@
* Test the behavior of very, very long regex patterns.
*/
(function() {
- "use strict";
+"use strict";
- const coll = db.regex_limit;
- coll.drop();
+const coll = db.regex_limit;
+coll.drop();
- const kMaxRegexPatternLen = 32761;
+const kMaxRegexPatternLen = 32761;
- // Populate the collection with a document containing a very long string.
- assert.commandWorked(coll.insert({z: "c".repeat(100000)}));
+// Populate the collection with a document containing a very long string.
+assert.commandWorked(coll.insert({z: "c".repeat(100000)}));
- // Test that a regex exactly at the maximum allowable pattern length can find a document.
- const patternMaxLen = "c".repeat(kMaxRegexPatternLen);
- assert.eq(1, coll.find({z: {$regex: patternMaxLen}}).itcount());
- assert.eq(1, coll.find({z: {$in: [new RegExp(patternMaxLen)]}}).itcount());
+// Test that a regex exactly at the maximum allowable pattern length can find a document.
+const patternMaxLen = "c".repeat(kMaxRegexPatternLen);
+assert.eq(1, coll.find({z: {$regex: patternMaxLen}}).itcount());
+assert.eq(1, coll.find({z: {$in: [new RegExp(patternMaxLen)]}}).itcount());
- // Test that a regex pattern exceeding the limit fails.
- const patternTooLong = "c".repeat(kMaxRegexPatternLen + 1);
- assert.commandFailedWithCode(coll.runCommand("find", {filter: {z: {$regex: patternTooLong}}}),
- 51091);
- assert.commandFailedWithCode(
- coll.runCommand("find", {filter: {z: {$in: [new RegExp(patternTooLong)]}}}), 51091);
+// Test that a regex pattern exceeding the limit fails.
+const patternTooLong = "c".repeat(kMaxRegexPatternLen + 1);
+assert.commandFailedWithCode(coll.runCommand("find", {filter: {z: {$regex: patternTooLong}}}),
+ 51091);
+assert.commandFailedWithCode(
+ coll.runCommand("find", {filter: {z: {$in: [new RegExp(patternTooLong)]}}}), 51091);
}());
diff --git a/jstests/core/regex_unicode.js b/jstests/core/regex_unicode.js
index 32a3d177831..2befd6f700c 100644
--- a/jstests/core/regex_unicode.js
+++ b/jstests/core/regex_unicode.js
@@ -2,113 +2,122 @@
* Test regexes with various Unicode options.
*/
(function() {
- "use strict";
-
- const coll = db.getCollection("regex_unicode");
- coll.drop();
-
- // Populate the collection with strings containing ASCII and non-ASCII characters.
- let docAllAscii = {_id: 0, text: "kyle"};
- let docNoAscii = {_id: 1, text: "박정수"};
- let docMixed = {_id: 2, text: "suárez"};
- [docAllAscii, docNoAscii, docMixed].forEach((doc) => assert.commandWorked(coll.insert(doc)));
-
- /**
- * Helper function that asserts that a find command with a filter on the "text" field using
- * 'regex' returns 'expected' when sorting by _id ascending.
- */
- function assertFindResultsEq(regex, expected) {
- const res = coll.find({text: {$regex: regex}}).sort({_id: 1}).toArray();
- const errfn =
- `Regex query "${regex}" returned ${tojson(res)} ` + `but expected ${tojson(expected)}`;
- assert.eq(res, expected, errfn);
- }
-
- // Sanity check on exact characters.
- assertFindResultsEq("y", [docAllAscii]);
- assertFindResultsEq("e", [docAllAscii, docMixed]);
- assertFindResultsEq("á", [docMixed]);
- assertFindResultsEq("정", [docNoAscii]);
-
- // Test that the (*UTF) and (*UTF8) options are accepted.
- assertFindResultsEq("(*UTF)e", [docAllAscii, docMixed]);
- assertFindResultsEq("(*UTF)á", [docMixed]);
- assertFindResultsEq("(*UTF)정", [docNoAscii]);
- assertFindResultsEq("(*UTF8)e", [docAllAscii, docMixed]);
- assertFindResultsEq("(*UTF8)á", [docMixed]);
- assertFindResultsEq("(*UTF8)정", [docNoAscii]);
-
- // Test that regexes support Unicode character properties.
- assertFindResultsEq(String.raw `\p{Latin}`, [docAllAscii, docMixed]);
- assertFindResultsEq(String.raw `^\p{Latin}+$`, [docAllAscii, docMixed]);
- assertFindResultsEq(String.raw `\p{Hangul}`, [docNoAscii]);
- assertFindResultsEq(String.raw `^\p{Hangul}+$`, [docNoAscii]);
- assertFindResultsEq(String.raw `^\p{L}+$`, [docAllAscii, docNoAscii, docMixed]);
- assertFindResultsEq(String.raw `^\p{Xan}+$`, [docAllAscii, docNoAscii, docMixed]);
-
- // Tests for the '\w' character type, which matches any "word" character. In the default mode,
- // characters outside of the ASCII code point range are excluded.
-
- // An unanchored regex should match the two documents that contain at least one ASCII character.
- assertFindResultsEq(String.raw `\w`, [docAllAscii, docMixed]);
-
- // This anchored regex will only match the document with exclusively ASCII characters, since the
- // Unicode character in the mixed document will prevent it from being considered all "word"
- // characters.
- assertFindResultsEq(String.raw `^\w+$`, [docAllAscii]);
-
- // When the (*UCP) option is specified, Unicode "word" characters are included in the '\w'
- // character type, so all three documents should match.
- assertFindResultsEq(String.raw `(*UCP)\w`, [docAllAscii, docNoAscii, docMixed]);
- assertFindResultsEq(String.raw `(*UCP)^\w+$`, [docAllAscii, docNoAscii, docMixed]);
-
- // By default, the [:alpha:] character class matches ASCII alphabetic characters.
- assertFindResultsEq("[[:alpha:]]", [docAllAscii, docMixed]);
- assertFindResultsEq("^[[:alpha:]]+$", [docAllAscii]);
-
- // When the (*UCP) option is specified, [:alpha:] becomes \p{L} and matches all Unicode
- // alphabetic characters.
- assertFindResultsEq("(*UCP)[[:alpha:]]", [docAllAscii, docNoAscii, docMixed]);
- assertFindResultsEq("(*UCP)^[[:alpha:]]+$", [docAllAscii, docNoAscii, docMixed]);
-
- // Drop the collection and repopulate it with numerical characters.
- coll.drop();
- docAllAscii = {_id: 0, text: "02191996"};
- docNoAscii = {_id: 1, text: "༢༣༤༥"};
- docMixed = {_id: 2, text: "9୩୪୬୯6"};
- [docAllAscii, docNoAscii, docMixed].forEach((doc) => assert.commandWorked(coll.insert(doc)));
-
- // Sanity check on exact characters.
- assertFindResultsEq("1", [docAllAscii]);
- assertFindResultsEq("9", [docAllAscii, docMixed]);
- assertFindResultsEq("୪", [docMixed]);
- assertFindResultsEq("༣", [docNoAscii]);
-
- // Test that the regexes are matched by the numeric Unicode character property.
- assertFindResultsEq(String.raw `^\p{N}+$`, [docAllAscii, docNoAscii, docMixed]);
- assertFindResultsEq(String.raw `^\p{Xan}+$`, [docAllAscii, docNoAscii, docMixed]);
-
- // Tests for the '\d' character type, which matches any "digit" character. In the default mode,
- // characters outside of the ASCII code point range are excluded.
- // An unanchored regex should match the two documents that contain at least one ASCII character.
- assertFindResultsEq(String.raw `\d`, [docAllAscii, docMixed]);
-
- // This anchored regex will only match the document with exclusively ASCII characters, since the
- // Unicode character in the mixed document will prevent it from being considered all "digit"
- // characters.
- assertFindResultsEq(String.raw `^\d+$`, [docAllAscii]);
-
- // When the (*UCP) option is specified, Unicode "digit" characters are included in the '\d'
- // character type, so all three documents should match.
- assertFindResultsEq(String.raw `(*UCP)\d`, [docAllAscii, docNoAscii, docMixed]);
- assertFindResultsEq(String.raw `(*UCP)^\d+$`, [docAllAscii, docNoAscii, docMixed]);
-
- // By default, the [:digit:] character class matches ASCII decimal digit characters.
- assertFindResultsEq("[[:digit:]]", [docAllAscii, docMixed]);
- assertFindResultsEq("^[[:digit:]]+$", [docAllAscii]);
-
- // When the (*UCP) option is specified, [:digit:] becomes \p{N} and matches all Unicode
- // decimal digit characters.
- assertFindResultsEq("(*UCP)[[:digit:]]", [docAllAscii, docNoAscii, docMixed]);
- assertFindResultsEq("(*UCP)^[[:digit:]]+$", [docAllAscii, docNoAscii, docMixed]);
+"use strict";
+
+const coll = db.getCollection("regex_unicode");
+coll.drop();
+
+// Populate the collection with strings containing ASCII and non-ASCII characters.
+let docAllAscii = {_id: 0, text: "kyle"};
+let docNoAscii = {_id: 1, text: "박정수"};
+let docMixed = {_id: 2, text: "suárez"};
+[docAllAscii, docNoAscii, docMixed].forEach((doc) => assert.commandWorked(coll.insert(doc)));
+
+/**
+ * Helper function that asserts that a find command with a filter on the "text" field using
+ * 'regex' returns 'expected' when sorting by _id ascending.
+ */
+function assertFindResultsEq(regex, expected) {
+ const res = coll.find({text: {$regex: regex}}).sort({_id: 1}).toArray();
+ const errfn = `Regex query "${regex}" returned ${tojson(res)} ` +
+ `but expected ${tojson(expected)}`;
+ assert.eq(res, expected, errfn);
+}
+
+// Sanity check on exact characters.
+assertFindResultsEq("y", [docAllAscii]);
+assertFindResultsEq("e", [docAllAscii, docMixed]);
+assertFindResultsEq("á", [docMixed]);
+assertFindResultsEq("정", [docNoAscii]);
+
+// Test that the (*UTF) and (*UTF8) options are accepted.
+assertFindResultsEq("(*UTF)e", [docAllAscii, docMixed]);
+assertFindResultsEq("(*UTF)á", [docMixed]);
+assertFindResultsEq("(*UTF)정", [docNoAscii]);
+assertFindResultsEq("(*UTF8)e", [docAllAscii, docMixed]);
+assertFindResultsEq("(*UTF8)á", [docMixed]);
+assertFindResultsEq("(*UTF8)정", [docNoAscii]);
+
+// Test that regexes support Unicode character properties.
+assertFindResultsEq(String.raw`\p{Latin}`, [docAllAscii, docMixed]);
+assertFindResultsEq(String.raw`^\p{Latin}+$`, [docAllAscii, docMixed]);
+assertFindResultsEq(String.raw`\p{Hangul}`, [docNoAscii]);
+assertFindResultsEq(String.raw`^\p{Hangul}+$`, [docNoAscii]);
+assertFindResultsEq(String.raw`^\p{L}+$`, [docAllAscii, docNoAscii, docMixed]);
+assertFindResultsEq(String.raw`^\p{Xan}+$`, [docAllAscii, docNoAscii, docMixed]);
+
+// Tests for the '\w' character type, which matches any "word" character. In the default mode,
+// characters outside of the ASCII code point range are excluded.
+
+// An unanchored regex should match the two documents that contain at least one ASCII character.
+assertFindResultsEq(String.raw`\w`, [docAllAscii, docMixed]);
+
+// This anchored regex will only match the document with exclusively ASCII characters, since the
+// Unicode character in the mixed document will prevent it from being considered all "word"
+// characters.
+assertFindResultsEq(String.raw`^\w+$`, [docAllAscii]);
+
+// When the (*UCP) option is specified, Unicode "word" characters are included in the '\w'
+// character type, so all three documents should match.
+assertFindResultsEq(String.raw`(*UCP)\w`, [docAllAscii, docNoAscii, docMixed]);
+assertFindResultsEq(String.raw`(*UCP)^\w+$`, [docAllAscii, docNoAscii, docMixed]);
+
+// By default, the [:alpha:] character class matches ASCII alphabetic characters.
+assertFindResultsEq("[[:alpha:]]", [docAllAscii, docMixed]);
+assertFindResultsEq("^[[:alpha:]]+$", [docAllAscii]);
+
+// When the (*UCP) option is specified, [:alpha:] becomes \p{L} and matches all Unicode
+// alphabetic characters.
+assertFindResultsEq("(*UCP)[[:alpha:]]", [docAllAscii, docNoAscii, docMixed]);
+assertFindResultsEq("(*UCP)^[[:alpha:]]+$", [docAllAscii, docNoAscii, docMixed]);
+
+// Drop the collection and repopulate it with numerical characters.
+coll.drop();
+docAllAscii = {
+ _id: 0,
+ text: "02191996"
+};
+docNoAscii = {
+ _id: 1,
+ text: "༢༣༤༥"
+};
+docMixed = {
+ _id: 2,
+ text: "9୩୪୬୯6"
+};
+[docAllAscii, docNoAscii, docMixed].forEach((doc) => assert.commandWorked(coll.insert(doc)));
+
+// Sanity check on exact characters.
+assertFindResultsEq("1", [docAllAscii]);
+assertFindResultsEq("9", [docAllAscii, docMixed]);
+assertFindResultsEq("୪", [docMixed]);
+assertFindResultsEq("༣", [docNoAscii]);
+
+// Test that the regexes are matched by the numeric Unicode character property.
+assertFindResultsEq(String.raw`^\p{N}+$`, [docAllAscii, docNoAscii, docMixed]);
+assertFindResultsEq(String.raw`^\p{Xan}+$`, [docAllAscii, docNoAscii, docMixed]);
+
+// Tests for the '\d' character type, which matches any "digit" character. In the default mode,
+// characters outside of the ASCII code point range are excluded.
+// An unanchored regex should match the two documents that contain at least one ASCII character.
+assertFindResultsEq(String.raw`\d`, [docAllAscii, docMixed]);
+
+// This anchored regex will only match the document with exclusively ASCII characters, since the
+// Unicode character in the mixed document will prevent it from being considered all "digit"
+// characters.
+assertFindResultsEq(String.raw`^\d+$`, [docAllAscii]);
+
+// When the (*UCP) option is specified, Unicode "digit" characters are included in the '\d'
+// character type, so all three documents should match.
+assertFindResultsEq(String.raw`(*UCP)\d`, [docAllAscii, docNoAscii, docMixed]);
+assertFindResultsEq(String.raw`(*UCP)^\d+$`, [docAllAscii, docNoAscii, docMixed]);
+
+// By default, the [:digit:] character class matches ASCII decimal digit characters.
+assertFindResultsEq("[[:digit:]]", [docAllAscii, docMixed]);
+assertFindResultsEq("^[[:digit:]]+$", [docAllAscii]);
+
+// When the (*UCP) option is specified, [:digit:] becomes \p{N} and matches all Unicode
+// decimal digit characters.
+assertFindResultsEq("(*UCP)[[:digit:]]", [docAllAscii, docNoAscii, docMixed]);
+assertFindResultsEq("(*UCP)^[[:digit:]]+$", [docAllAscii, docNoAscii, docMixed]);
}());
diff --git a/jstests/core/regex_util.js b/jstests/core/regex_util.js
index b0c7791b6c1..7d87ac5f283 100644
--- a/jstests/core/regex_util.js
+++ b/jstests/core/regex_util.js
@@ -1,26 +1,26 @@
// Tests for RegExp.escape
(function() {
- var TEST_STRINGS = [
- "[db]",
- "{ab}",
- "<c2>",
- "(abc)",
- "^first^",
- "&addr",
- "k@10gen.com",
- "#4",
- "!b",
- "<>3",
- "****word+",
- "\t| |\n\r",
- "Mongo-db",
- "[{(<>)}]!@#%^&*+\\"
- ];
+var TEST_STRINGS = [
+ "[db]",
+ "{ab}",
+ "<c2>",
+ "(abc)",
+ "^first^",
+ "&addr",
+ "k@10gen.com",
+ "#4",
+ "!b",
+ "<>3",
+ "****word+",
+ "\t| |\n\r",
+ "Mongo-db",
+ "[{(<>)}]!@#%^&*+\\"
+];
- TEST_STRINGS.forEach(function(str) {
- var escaped = RegExp.escape(str);
- var regex = new RegExp(escaped);
- assert(regex.test(str), "Wrong escape for " + str);
- });
+TEST_STRINGS.forEach(function(str) {
+ var escaped = RegExp.escape(str);
+ var regex = new RegExp(escaped);
+ assert(regex.test(str), "Wrong escape for " + str);
+});
})();
diff --git a/jstests/core/regex_verbs.js b/jstests/core/regex_verbs.js
index 92a03af1b4d..52ac9bb07bf 100644
--- a/jstests/core/regex_verbs.js
+++ b/jstests/core/regex_verbs.js
@@ -2,48 +2,54 @@
* Tests regular expressions and the use of various UCP verbs.
*/
(function() {
- "use strict";
-
- const coll = db.getCollection("regex_backtracking_verbs");
- coll.drop();
-
- const docA = {_id: 0, text: "a"};
- const docB = {_id: 1, text: "b"};
- [docA, docB].forEach(doc => assert.commandWorked(coll.insert(doc)));
-
- /**
- * Helper function that asserts that a find command with a filter on the "text" field using
- * 'regex' returns 'expected' when sorting by _id ascending.
- */
- function assertFindResultsEq(regex, expected) {
- const res = coll.find({text: {$regex: regex}}).sort({_id: 1}).toArray();
- const errfn = `Regex query ${tojson(regex)} returned ${tojson(res)} ` +
- `but expected ${tojson(expected)}`;
- assert.eq(res, expected, errfn);
- }
-
- const assertMatchesEverything = (regex) => assertFindResultsEq(regex, [docA, docB]);
- const assertMatchesNothing = (regex) => assertFindResultsEq(regex, []);
-
- // On encountering FAIL, the pattern immediately does not match.
- assertMatchesNothing("(*FAIL)");
- assertMatchesNothing("a(*FAIL)");
- assertMatchesNothing("(*FAIL)b");
-
- // On encountering ACCEPT, the pattern immediately matches.
- assertMatchesEverything("(*ACCEPT)");
- assertMatchesEverything("(*ACCEPT)a");
- assertMatchesEverything("(*ACCEPT)c");
- assertFindResultsEq("b(*ACCEPT)", [docB]);
-
- // The following tests simply assert that the backtracking verbs are accepted and do not
- // influence matching.
- ["COMMIT", "PRUNE", "PRUNE:FOO", "SKIP", "SKIP:BAR", "THEN", "THEN:BAZ"].forEach(verb => {
- // Verb by itself is the same as an empty regex and matches everything.
- assertMatchesEverything(`(*${verb})`);
-
- // Verb with pattern does not affect the "matchiness" of the pattern.
- assertFindResultsEq(`(*${verb})a`, [docA]);
- assertFindResultsEq(`(*${verb})[Bb]`, [docB]);
- });
+"use strict";
+
+const coll = db.getCollection("regex_backtracking_verbs");
+coll.drop();
+
+const docA = {
+ _id: 0,
+ text: "a"
+};
+const docB = {
+ _id: 1,
+ text: "b"
+};
+[docA, docB].forEach(doc => assert.commandWorked(coll.insert(doc)));
+
+/**
+ * Helper function that asserts that a find command with a filter on the "text" field using
+ * 'regex' returns 'expected' when sorting by _id ascending.
+ */
+function assertFindResultsEq(regex, expected) {
+ const res = coll.find({text: {$regex: regex}}).sort({_id: 1}).toArray();
+ const errfn = `Regex query ${tojson(regex)} returned ${tojson(res)} ` +
+ `but expected ${tojson(expected)}`;
+ assert.eq(res, expected, errfn);
+}
+
+const assertMatchesEverything = (regex) => assertFindResultsEq(regex, [docA, docB]);
+const assertMatchesNothing = (regex) => assertFindResultsEq(regex, []);
+
+// On encountering FAIL, the pattern immediately does not match.
+assertMatchesNothing("(*FAIL)");
+assertMatchesNothing("a(*FAIL)");
+assertMatchesNothing("(*FAIL)b");
+
+// On encountering ACCEPT, the pattern immediately matches.
+assertMatchesEverything("(*ACCEPT)");
+assertMatchesEverything("(*ACCEPT)a");
+assertMatchesEverything("(*ACCEPT)c");
+assertFindResultsEq("b(*ACCEPT)", [docB]);
+
+// The following tests simply assert that the backtracking verbs are accepted and do not
+// influence matching.
+["COMMIT", "PRUNE", "PRUNE:FOO", "SKIP", "SKIP:BAR", "THEN", "THEN:BAZ"].forEach(verb => {
+ // Verb by itself is the same as an empty regex and matches everything.
+ assertMatchesEverything(`(*${verb})`);
+
+ // Verb with pattern does not affect the "matchiness" of the pattern.
+ assertFindResultsEq(`(*${verb})a`, [docA]);
+ assertFindResultsEq(`(*${verb})[Bb]`, [docB]);
+});
}());
diff --git a/jstests/core/remove2.js b/jstests/core/remove2.js
index 601684f5041..50fe507c134 100644
--- a/jstests/core/remove2.js
+++ b/jstests/core/remove2.js
@@ -3,45 +3,45 @@
// remove2.js
// a unit test for db remove
(function() {
- "use strict";
-
- const t = db.removetest2;
-
- function f() {
- t.save({
- x: [3, 3, 3, 3, 3, 3, 3, 3, 4, 5, 6],
- z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
- });
- t.save({x: 9});
- t.save({x: 1});
-
- t.remove({x: 3});
-
- assert(t.findOne({x: 3}) == null);
- assert(t.validate().valid);
- }
-
- function g() {
- t.save({x: [3, 4, 5, 6], z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"});
- t.save({x: [7, 8, 9], z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"});
-
- const res = t.remove({x: {$gte: 3}});
-
- assert.writeOK(res);
- assert(t.findOne({x: 3}) == null);
- assert(t.findOne({x: 8}) == null);
- assert(t.validate().valid);
- }
-
- t.drop();
- f();
- t.drop();
- g();
-
- t.ensureIndex({x: 1});
- t.remove({});
- f();
- t.drop();
- t.ensureIndex({x: 1});
- g();
+"use strict";
+
+const t = db.removetest2;
+
+function f() {
+ t.save({
+ x: [3, 3, 3, 3, 3, 3, 3, 3, 4, 5, 6],
+ z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ });
+ t.save({x: 9});
+ t.save({x: 1});
+
+ t.remove({x: 3});
+
+ assert(t.findOne({x: 3}) == null);
+ assert(t.validate().valid);
+}
+
+function g() {
+ t.save({x: [3, 4, 5, 6], z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"});
+ t.save({x: [7, 8, 9], z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"});
+
+ const res = t.remove({x: {$gte: 3}});
+
+ assert.writeOK(res);
+ assert(t.findOne({x: 3}) == null);
+ assert(t.findOne({x: 8}) == null);
+ assert(t.validate().valid);
+}
+
+t.drop();
+f();
+t.drop();
+g();
+
+t.ensureIndex({x: 1});
+t.remove({});
+f();
+t.drop();
+t.ensureIndex({x: 1});
+g();
})();
diff --git a/jstests/core/remove9.js b/jstests/core/remove9.js
index 8762e3944ff..888625764ec 100644
--- a/jstests/core/remove9.js
+++ b/jstests/core/remove9.js
@@ -7,32 +7,32 @@
// SERVER-2009 Count odd numbered entries while updating and deleting even numbered entries.
(function() {
- "use strict";
+"use strict";
- const t = db.jstests_remove9;
- t.drop();
- t.ensureIndex({i: 1});
-
- const bulk = t.initializeUnorderedBulkOp();
- for (let i = 0; i < 1000; ++i) {
- bulk.insert({i: i});
- }
- assert.writeOK(bulk.execute());
+const t = db.jstests_remove9;
+t.drop();
+t.ensureIndex({i: 1});
- const s = startParallelShell(function() {
- const t = db.jstests_remove9;
- Random.setRandomSeed();
- for (let j = 0; j < 5000; ++j) {
- const i = Random.randInt(499) * 2;
- t.update({i: i}, {$set: {i: 2000}});
- t.remove({i: 2000});
- t.save({i: i});
- }
- });
+const bulk = t.initializeUnorderedBulkOp();
+for (let i = 0; i < 1000; ++i) {
+ bulk.insert({i: i});
+}
+assert.writeOK(bulk.execute());
- for (let i = 0; i < 1000; ++i) {
- assert.eq(500, t.find({i: {$gte: 0, $mod: [2, 1]}}).hint({i: 1}).itcount());
+const s = startParallelShell(function() {
+ const t = db.jstests_remove9;
+ Random.setRandomSeed();
+ for (let j = 0; j < 5000; ++j) {
+ const i = Random.randInt(499) * 2;
+ t.update({i: i}, {$set: {i: 2000}});
+ t.remove({i: 2000});
+ t.save({i: i});
}
+});
+
+for (let i = 0; i < 1000; ++i) {
+ assert.eq(500, t.find({i: {$gte: 0, $mod: [2, 1]}}).hint({i: 1}).itcount());
+}
- s();
+s();
})();
diff --git a/jstests/core/remove_undefined.js b/jstests/core/remove_undefined.js
index c0c031a5763..6b97cc5d053 100644
--- a/jstests/core/remove_undefined.js
+++ b/jstests/core/remove_undefined.js
@@ -1,32 +1,35 @@
// @tags: [requires_non_retryable_writes, requires_fastcount]
(function() {
- "use strict";
+"use strict";
- const coll = db.remove_undefined;
- coll.drop();
+const coll = db.remove_undefined;
+coll.drop();
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.insert({_id: 2}));
- assert.writeOK(coll.insert({_id: null}));
+assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(coll.insert({_id: 2}));
+assert.writeOK(coll.insert({_id: null}));
- const obj = {foo: 1, nullElem: null};
+const obj = {
+ foo: 1,
+ nullElem: null
+};
- coll.remove({x: obj.bar});
- assert.eq(3, coll.count());
+coll.remove({x: obj.bar});
+assert.eq(3, coll.count());
- coll.remove({x: undefined});
- assert.eq(3, coll.count());
+coll.remove({x: undefined});
+assert.eq(3, coll.count());
- assert.writeErrorWithCode(coll.remove({_id: obj.bar}), ErrorCodes.BadValue);
- assert.writeErrorWithCode(coll.remove({_id: undefined}), ErrorCodes.BadValue);
+assert.writeErrorWithCode(coll.remove({_id: obj.bar}), ErrorCodes.BadValue);
+assert.writeErrorWithCode(coll.remove({_id: undefined}), ErrorCodes.BadValue);
- coll.remove({_id: obj.nullElem});
- assert.eq(2, coll.count());
+coll.remove({_id: obj.nullElem});
+assert.eq(2, coll.count());
- assert.writeOK(coll.insert({_id: null}));
- assert.eq(3, coll.count());
+assert.writeOK(coll.insert({_id: null}));
+assert.eq(3, coll.count());
- assert.writeErrorWithCode(coll.remove({_id: undefined}), ErrorCodes.BadValue);
- assert.eq(3, coll.count());
+assert.writeErrorWithCode(coll.remove({_id: undefined}), ErrorCodes.BadValue);
+assert.eq(3, coll.count());
})();
diff --git a/jstests/core/removea.js b/jstests/core/removea.js
index 082833b503a..ee914662d92 100644
--- a/jstests/core/removea.js
+++ b/jstests/core/removea.js
@@ -2,31 +2,31 @@
// Test removal of a substantial proportion of inserted documents.
(function() {
- "use strict";
+"use strict";
- const t = db.jstests_removea;
+const t = db.jstests_removea;
- Random.setRandomSeed();
+Random.setRandomSeed();
- for (let v = 0; v < 2; ++v) { // Try each index version.
- t.drop();
- t.ensureIndex({a: 1}, {v: v});
- const S = 100;
- const B = 100;
- for (let x = 0; x < S; x++) {
- let batch = [];
- for (let y = 0; y < B; y++) {
- let i = y + (B * x);
- batch.push({a: i});
- }
- assert.writeOK(t.insert(batch));
+for (let v = 0; v < 2; ++v) { // Try each index version.
+ t.drop();
+ t.ensureIndex({a: 1}, {v: v});
+ const S = 100;
+ const B = 100;
+ for (let x = 0; x < S; x++) {
+ let batch = [];
+ for (let y = 0; y < B; y++) {
+ let i = y + (B * x);
+ batch.push({a: i});
}
- assert.eq(t.count(), S * B);
+ assert.writeOK(t.insert(batch));
+ }
+ assert.eq(t.count(), S * B);
- let toDrop = [];
- for (let i = 0; i < S * B; ++i) {
- toDrop.push(Random.randInt(10000)); // Dups in the query will be ignored.
- }
- assert.writeOK(t.remove({a: {$in: toDrop}}));
+ let toDrop = [];
+ for (let i = 0; i < S * B; ++i) {
+ toDrop.push(Random.randInt(10000)); // Dups in the query will be ignored.
}
+ assert.writeOK(t.remove({a: {$in: toDrop}}));
+}
})();
diff --git a/jstests/core/removeb.js b/jstests/core/removeb.js
index 4cf00d46ffa..eeed0fc30bc 100644
--- a/jstests/core/removeb.js
+++ b/jstests/core/removeb.js
@@ -7,57 +7,57 @@
// Test removal of Records that have been reused since the remove operation began. SERVER-5198
(function() {
- "use strict";
+"use strict";
- const t = db.jstests_removeb;
- t.drop();
+const t = db.jstests_removeb;
+t.drop();
- t.ensureIndex({a: 1});
+t.ensureIndex({a: 1});
- // Make the index multikey to trigger cursor dedup checking.
- t.insert({a: [-1, -2]});
- t.remove({});
+// Make the index multikey to trigger cursor dedup checking.
+t.insert({a: [-1, -2]});
+t.remove({});
- const insertDocs = function(collection, nDocs) {
- print("Bulk inserting " + nDocs + " documents");
+const insertDocs = function(collection, nDocs) {
+ print("Bulk inserting " + nDocs + " documents");
- const bulk = collection.initializeUnorderedBulkOp();
- for (let i = 0; i < nDocs; ++i) {
- bulk.insert({a: i});
- }
+ const bulk = collection.initializeUnorderedBulkOp();
+ for (let i = 0; i < nDocs; ++i) {
+ bulk.insert({a: i});
+ }
- assert.writeOK(bulk.execute());
+ assert.writeOK(bulk.execute());
- print("Bulk insert " + nDocs + " documents completed");
- };
+ print("Bulk insert " + nDocs + " documents completed");
+};
- insertDocs(t, 20000);
+insertDocs(t, 20000);
- const p = startParallelShell(function() {
- // Wait until the remove operation (below) begins running.
- while (db.jstests_removeb.count() === 20000) {
- }
+const p = startParallelShell(function() {
+ // Wait until the remove operation (below) begins running.
+ while (db.jstests_removeb.count() === 20000) {
+ }
- // Insert documents with increasing 'a' values. These inserted documents may
- // reuse Records freed by the remove operation in progress and will be
- // visited by the remove operation if it has not completed.
- for (let i = 20000; i < 40000; i += 100) {
- const bulk = db.jstests_removeb.initializeUnorderedBulkOp();
- for (let j = 0; j < 100; ++j) {
- bulk.insert({a: i + j});
- }
- assert.writeOK(bulk.execute());
- if (i % 1000 === 0) {
- print(i - 20000 + " of second set of 20000 documents inserted");
- }
+ // Insert documents with increasing 'a' values. These inserted documents may
+ // reuse Records freed by the remove operation in progress and will be
+ // visited by the remove operation if it has not completed.
+ for (let i = 20000; i < 40000; i += 100) {
+ const bulk = db.jstests_removeb.initializeUnorderedBulkOp();
+ for (let j = 0; j < 100; ++j) {
+ bulk.insert({a: i + j});
+ }
+ assert.writeOK(bulk.execute());
+ if (i % 1000 === 0) {
+ print(i - 20000 + " of second set of 20000 documents inserted");
}
- });
+ }
+});
- // Remove using the a:1 index in ascending direction.
- var res = t.remove({a: {$gte: 0}});
- assert(!res.hasWriteError(), 'The remove operation failed.');
+// Remove using the a:1 index in ascending direction.
+var res = t.remove({a: {$gte: 0}});
+assert(!res.hasWriteError(), 'The remove operation failed.');
- p();
+p();
- t.drop();
+t.drop();
})();
diff --git a/jstests/core/rename6.js b/jstests/core/rename6.js
index dbdf677a811..faa36a448dd 100644
--- a/jstests/core/rename6.js
+++ b/jstests/core/rename6.js
@@ -6,33 +6,33 @@
// @tags: [requires_non_retryable_commands, assumes_unsharded_collection]
(function() {
- 'use strict';
+'use strict';
- const testDB = db.getSiblingDB("test");
- const c = "rename2c";
- const dbc = testDB.getCollection(c);
- const d = "dest4567890123456789012345678901234567890123456789012345678901234567890";
- const dbd = testDB.getCollection(d);
+const testDB = db.getSiblingDB("test");
+const c = "rename2c";
+const dbc = testDB.getCollection(c);
+const d = "dest4567890123456789012345678901234567890123456789012345678901234567890";
+const dbd = testDB.getCollection(d);
- dbc.drop();
- dbd.drop();
+dbc.drop();
+dbd.drop();
- dbc.ensureIndex({
- "name": 1,
- "date": 1,
- "time": 1,
- "renameCollection": 1,
- "mongodb": 1,
- "testing": 1,
- "data": 1
- });
+dbc.ensureIndex({
+ "name": 1,
+ "date": 1,
+ "time": 1,
+ "renameCollection": 1,
+ "mongodb": 1,
+ "testing": 1,
+ "data": 1
+});
- // Checking for the newly created index and the _id index in original collection.
- assert.eq(2, dbc.getIndexes().length, "Long Rename Init");
- // Should succeed in renaming collection as the long index namespace is acceptable.
- assert.commandWorked(dbc.renameCollection(d), "Long Rename Exec");
- // Since we succeeded we should have the 2 indexes moved and no indexes under the old collection
- // name.
- assert.eq(0, dbc.getIndexes().length, "Long Rename Result 1");
- assert.eq(2, dbd.getIndexes().length, "Long Rename Result 2");
+// Checking for the newly created index and the _id index in original collection.
+assert.eq(2, dbc.getIndexes().length, "Long Rename Init");
+// Should succeed in renaming collection as the long index namespace is acceptable.
+assert.commandWorked(dbc.renameCollection(d), "Long Rename Exec");
+// Since we succeeded we should have the 2 indexes moved and no indexes under the old collection
+// name.
+assert.eq(0, dbc.getIndexes().length, "Long Rename Result 1");
+assert.eq(2, dbd.getIndexes().length, "Long Rename Result 2");
})();
diff --git a/jstests/core/rename_change_target_type.js b/jstests/core/rename_change_target_type.js
index 25fbcfb0f01..859e1add0b2 100644
--- a/jstests/core/rename_change_target_type.js
+++ b/jstests/core/rename_change_target_type.js
@@ -1,15 +1,15 @@
// Test that a rename that overwrites its destination with an equivalent value of a different type
// updates the type of the destination (SERVER-32109).
(function() {
- "use strict";
+"use strict";
- let coll = db.rename_change_target_type;
- coll.drop();
+let coll = db.rename_change_target_type;
+coll.drop();
- assert.writeOK(coll.insert({to: NumberLong(100), from: 100}));
- assert.writeOK(coll.update({}, {$rename: {from: "to"}}));
+assert.writeOK(coll.insert({to: NumberLong(100), from: 100}));
+assert.writeOK(coll.update({}, {$rename: {from: "to"}}));
- let aggResult = coll.aggregate([{$project: {toType: {$type: "$to"}}}]).toArray();
- assert.eq(aggResult.length, 1);
- assert.eq(aggResult[0].toType, "double", "Incorrect type resulting from $rename");
+let aggResult = coll.aggregate([{$project: {toType: {$type: "$to"}}}]).toArray();
+assert.eq(aggResult.length, 1);
+assert.eq(aggResult[0].toType, "double", "Incorrect type resulting from $rename");
})();
diff --git a/jstests/core/restart_catalog.js b/jstests/core/restart_catalog.js
index 19bd0f9f27c..bf254537239 100644
--- a/jstests/core/restart_catalog.js
+++ b/jstests/core/restart_catalog.js
@@ -12,127 +12,128 @@
* ]
*/
(function() {
- "use strict";
-
- // Only run this test if the storage engine is "wiredTiger" or "inMemory".
- const acceptedStorageEngines = ["wiredTiger", "inMemory"];
- const currentStorageEngine = jsTest.options().storageEngine || "wiredTiger";
- if (!acceptedStorageEngines.includes(currentStorageEngine)) {
- jsTest.log("Refusing to run restartCatalog test on " + currentStorageEngine +
- " storage engine");
- return;
- }
-
- // Helper function for sorting documents in JavaScript.
- function sortOnId(doc1, doc2) {
- return bsonWoCompare({_: doc1._id}, {_: doc2._id});
- }
-
- const testDB = db.getSiblingDB("restart_catalog");
- const artistsColl = testDB.getCollection("artists");
- const songsColl = testDB.getCollection("songs");
- artistsColl.drop();
- songsColl.drop();
-
- // Populate some data into the collection.
- const artists = [
- {_id: "beyonce"},
- {_id: "fenech-soler"},
- {_id: "gallant"},
- ];
- for (let artist of artists) {
- assert.commandWorked(artistsColl.insert(artist));
- }
-
- const songs = [
- {_id: "flawless", artist: "beyonce", sales: 5000},
- {_id: "conversation", artist: "fenech-soler", sales: 75.5},
- {_id: "kaleidoscope", artist: "fenech-soler", sales: 30.0},
- {_id: "miyazaki", artist: "gallant", sales: 400.3},
- {_id: "percogesic", artist: "gallant", sales: 550.8},
- {_id: "shotgun", artist: "gallant", sales: 300.0},
- ];
- for (let song of songs) {
- assert.commandWorked(songsColl.insert(song, {writeConcern: {w: "majority"}}));
- }
-
- // Perform some queries.
- function assertQueriesFindExpectedData() {
- assert.eq(artistsColl.find().sort({_id: 1}).toArray(), artists);
- assert.eq(songsColl.find().sort({_id: 1}).toArray(), songs.sort(sortOnId));
-
- const songsWithLotsOfSales = songs.filter(song => song.sales > 500).sort(sortOnId);
- assert.eq(songsColl.find({sales: {$gt: 500}}).sort({_id: 1}).toArray(),
- songsWithLotsOfSales);
-
- const songsByGallant = songs.filter(song => song.artist === "gallant").sort(sortOnId);
- assert.eq(songsColl.aggregate([{$match: {artist: "gallant"}}, {$sort: {_id: 1}}]).toArray(),
- songsByGallant);
-
- const initialValue = 0;
- const totalSales = songs.reduce((total, song) => total + song.sales, initialValue);
- assert.eq(songsColl
- .aggregate([{$group: {_id: null, totalSales: {$sum: "$sales"}}}],
- {readConcern: {level: "majority"}})
- .toArray(),
- [{_id: null, totalSales: totalSales}]);
- }
- assertQueriesFindExpectedData();
-
- // Remember what indexes are present, then restart the catalog.
- const songIndexesBeforeRestart = songsColl.getIndexes().sort(sortOnId);
- const artistIndexesBeforeRestart = artistsColl.getIndexes().sort(sortOnId);
- assert.commandWorked(db.adminCommand({restartCatalog: 1}));
-
- // Access the query plan cache. (This makes no assumptions about the state of the plan cache
- // after restart; however, the database definitely should not crash.)
- [songsColl, artistsColl].forEach(coll => {
- assert.commandWorked(coll.runCommand("planCacheListPlans", {query: {_id: 1}}));
- assert.commandWorked(coll.runCommand("planCacheListQueryShapes"));
- assert.commandWorked(coll.runCommand("planCacheClear"));
- });
-
- // Verify that the data in the collections has not changed.
- assertQueriesFindExpectedData();
-
- // Verify that both collections have the same indexes as prior to the restart.
- const songIndexesAfterRestart = songsColl.getIndexes().sort(sortOnId);
- assert.eq(songIndexesBeforeRestart, songIndexesAfterRestart);
- const artistIndexesAfterRestart = artistsColl.getIndexes().sort(sortOnId);
- assert.eq(artistIndexesBeforeRestart, artistIndexesAfterRestart);
-
- // Create new indexes and run more queries.
- assert.commandWorked(songsColl.createIndex({sales: 1}));
- assert.commandWorked(songsColl.createIndex({artist: 1, sales: 1}));
- assertQueriesFindExpectedData();
-
- // Modify an existing collection.
- assert.commandWorked(artistsColl.runCommand("collMod", {validator: {_id: {$type: "string"}}}));
- assert.writeErrorWithCode(artistsColl.insert({_id: 7}), ErrorCodes.DocumentValidationFailure);
-
- // Perform another write, implicitly creating a new collection and database.
- const secondTestDB = db.getSiblingDB("restart_catalog_2");
- const foodColl = secondTestDB.getCollection("food");
- foodColl.drop();
- const doc = {_id: "apple", category: "fruit"};
- assert.commandWorked(foodColl.insert(doc));
- assert.eq(foodColl.find().toArray(), [doc]);
-
- // Build a new index on the new collection.
- assert.commandWorked(foodColl.createIndex({category: -1}));
- assert.eq(foodColl.find().hint({category: -1}).toArray(), [doc]);
-
- // The restartCatalog command kills all cursors. Test that a getMore on a cursor that existed
- // during restartCatalog fails with the appropriate error code. We insert a second document so
- // that we can make a query happen in two batches.
- assert.commandWorked(foodColl.insert({_id: "orange"}));
- let cursorResponse = assert.commandWorked(
- secondTestDB.runCommand({find: foodColl.getName(), filter: {}, batchSize: 1}));
- assert.eq(cursorResponse.cursor.firstBatch.length, 1);
- assert.neq(cursorResponse.cursor.id, 0);
- assert.commandWorked(secondTestDB.adminCommand({restartCatalog: 1}));
- assert.commandFailedWithCode(
- secondTestDB.runCommand(
- {getMore: cursorResponse.cursor.id, collection: foodColl.getName()}),
- ErrorCodes.QueryPlanKilled);
+"use strict";
+
+// Only run this test if the storage engine is "wiredTiger" or "inMemory".
+const acceptedStorageEngines = ["wiredTiger", "inMemory"];
+const currentStorageEngine = jsTest.options().storageEngine || "wiredTiger";
+if (!acceptedStorageEngines.includes(currentStorageEngine)) {
+ jsTest.log("Refusing to run restartCatalog test on " + currentStorageEngine +
+ " storage engine");
+ return;
+}
+
+// Helper function for sorting documents in JavaScript.
+function sortOnId(doc1, doc2) {
+ return bsonWoCompare({_: doc1._id}, {_: doc2._id});
+}
+
+const testDB = db.getSiblingDB("restart_catalog");
+const artistsColl = testDB.getCollection("artists");
+const songsColl = testDB.getCollection("songs");
+artistsColl.drop();
+songsColl.drop();
+
+// Populate some data into the collection.
+const artists = [
+ {_id: "beyonce"},
+ {_id: "fenech-soler"},
+ {_id: "gallant"},
+];
+for (let artist of artists) {
+ assert.commandWorked(artistsColl.insert(artist));
+}
+
+const songs = [
+ {_id: "flawless", artist: "beyonce", sales: 5000},
+ {_id: "conversation", artist: "fenech-soler", sales: 75.5},
+ {_id: "kaleidoscope", artist: "fenech-soler", sales: 30.0},
+ {_id: "miyazaki", artist: "gallant", sales: 400.3},
+ {_id: "percogesic", artist: "gallant", sales: 550.8},
+ {_id: "shotgun", artist: "gallant", sales: 300.0},
+];
+for (let song of songs) {
+ assert.commandWorked(songsColl.insert(song, {writeConcern: {w: "majority"}}));
+}
+
+// Perform some queries.
+function assertQueriesFindExpectedData() {
+ assert.eq(artistsColl.find().sort({_id: 1}).toArray(), artists);
+ assert.eq(songsColl.find().sort({_id: 1}).toArray(), songs.sort(sortOnId));
+
+ const songsWithLotsOfSales = songs.filter(song => song.sales > 500).sort(sortOnId);
+ assert.eq(songsColl.find({sales: {$gt: 500}}).sort({_id: 1}).toArray(), songsWithLotsOfSales);
+
+ const songsByGallant = songs.filter(song => song.artist === "gallant").sort(sortOnId);
+ assert.eq(songsColl.aggregate([{$match: {artist: "gallant"}}, {$sort: {_id: 1}}]).toArray(),
+ songsByGallant);
+
+ const initialValue = 0;
+ const totalSales = songs.reduce((total, song) => total + song.sales, initialValue);
+ assert.eq(songsColl
+ .aggregate([{$group: {_id: null, totalSales: {$sum: "$sales"}}}],
+ {readConcern: {level: "majority"}})
+ .toArray(),
+ [{_id: null, totalSales: totalSales}]);
+}
+assertQueriesFindExpectedData();
+
+// Remember what indexes are present, then restart the catalog.
+const songIndexesBeforeRestart = songsColl.getIndexes().sort(sortOnId);
+const artistIndexesBeforeRestart = artistsColl.getIndexes().sort(sortOnId);
+assert.commandWorked(db.adminCommand({restartCatalog: 1}));
+
+// Access the query plan cache. (This makes no assumptions about the state of the plan cache
+// after restart; however, the database definitely should not crash.)
+[songsColl, artistsColl].forEach(coll => {
+ assert.commandWorked(coll.runCommand("planCacheListPlans", {query: {_id: 1}}));
+ assert.commandWorked(coll.runCommand("planCacheListQueryShapes"));
+ assert.commandWorked(coll.runCommand("planCacheClear"));
+});
+
+// Verify that the data in the collections has not changed.
+assertQueriesFindExpectedData();
+
+// Verify that both collections have the same indexes as prior to the restart.
+const songIndexesAfterRestart = songsColl.getIndexes().sort(sortOnId);
+assert.eq(songIndexesBeforeRestart, songIndexesAfterRestart);
+const artistIndexesAfterRestart = artistsColl.getIndexes().sort(sortOnId);
+assert.eq(artistIndexesBeforeRestart, artistIndexesAfterRestart);
+
+// Create new indexes and run more queries.
+assert.commandWorked(songsColl.createIndex({sales: 1}));
+assert.commandWorked(songsColl.createIndex({artist: 1, sales: 1}));
+assertQueriesFindExpectedData();
+
+// Modify an existing collection.
+assert.commandWorked(artistsColl.runCommand("collMod", {validator: {_id: {$type: "string"}}}));
+assert.writeErrorWithCode(artistsColl.insert({_id: 7}), ErrorCodes.DocumentValidationFailure);
+
+// Perform another write, implicitly creating a new collection and database.
+const secondTestDB = db.getSiblingDB("restart_catalog_2");
+const foodColl = secondTestDB.getCollection("food");
+foodColl.drop();
+const doc = {
+ _id: "apple",
+ category: "fruit"
+};
+assert.commandWorked(foodColl.insert(doc));
+assert.eq(foodColl.find().toArray(), [doc]);
+
+// Build a new index on the new collection.
+assert.commandWorked(foodColl.createIndex({category: -1}));
+assert.eq(foodColl.find().hint({category: -1}).toArray(), [doc]);
+
+// The restartCatalog command kills all cursors. Test that a getMore on a cursor that existed
+// during restartCatalog fails with the appropriate error code. We insert a second document so
+// that we can make a query happen in two batches.
+assert.commandWorked(foodColl.insert({_id: "orange"}));
+let cursorResponse = assert.commandWorked(
+ secondTestDB.runCommand({find: foodColl.getName(), filter: {}, batchSize: 1}));
+assert.eq(cursorResponse.cursor.firstBatch.length, 1);
+assert.neq(cursorResponse.cursor.id, 0);
+assert.commandWorked(secondTestDB.adminCommand({restartCatalog: 1}));
+assert.commandFailedWithCode(
+ secondTestDB.runCommand({getMore: cursorResponse.cursor.id, collection: foodColl.getName()}),
+ ErrorCodes.QueryPlanKilled);
}());
diff --git a/jstests/core/return_key.js b/jstests/core/return_key.js
index 38843eaf0a3..26dd01082b6 100644
--- a/jstests/core/return_key.js
+++ b/jstests/core/return_key.js
@@ -9,78 +9,76 @@
load("jstests/libs/analyze_plan.js");
(function() {
- 'use strict';
+'use strict';
- var results;
- var explain;
+var results;
+var explain;
- var coll = db.jstests_returnkey;
- coll.drop();
+var coll = db.jstests_returnkey;
+coll.drop();
- assert.writeOK(coll.insert({a: 1, b: 3}));
- assert.writeOK(coll.insert({a: 2, b: 2}));
- assert.writeOK(coll.insert({a: 3, b: 1}));
+assert.writeOK(coll.insert({a: 1, b: 3}));
+assert.writeOK(coll.insert({a: 2, b: 2}));
+assert.writeOK(coll.insert({a: 3, b: 1}));
- assert.commandWorked(coll.ensureIndex({a: 1}));
- assert.commandWorked(coll.ensureIndex({b: 1}));
+assert.commandWorked(coll.ensureIndex({a: 1}));
+assert.commandWorked(coll.ensureIndex({b: 1}));
- // Basic returnKey.
- results = coll.find().hint({a: 1}).sort({a: 1}).returnKey().toArray();
- assert.eq(results, [{a: 1}, {a: 2}, {a: 3}]);
- results = coll.find().hint({a: 1}).sort({a: -1}).returnKey().toArray();
- assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]);
+// Basic returnKey.
+results = coll.find().hint({a: 1}).sort({a: 1}).returnKey().toArray();
+assert.eq(results, [{a: 1}, {a: 2}, {a: 3}]);
+results = coll.find().hint({a: 1}).sort({a: -1}).returnKey().toArray();
+assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]);
- // Check that the plan is covered.
- explain = coll.find().hint({a: 1}).sort({a: 1}).returnKey().explain();
- assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
- explain = coll.find().hint({a: 1}).sort({a: -1}).returnKey().explain();
- assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
+// Check that the plan is covered.
+explain = coll.find().hint({a: 1}).sort({a: 1}).returnKey().explain();
+assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
+explain = coll.find().hint({a: 1}).sort({a: -1}).returnKey().explain();
+assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
- // returnKey with an in-memory sort.
- results = coll.find().hint({a: 1}).sort({b: 1}).returnKey().toArray();
- assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]);
- results = coll.find().hint({a: 1}).sort({b: -1}).returnKey().toArray();
- assert.eq(results, [{a: 1}, {a: 2}, {a: 3}]);
+// returnKey with an in-memory sort.
+results = coll.find().hint({a: 1}).sort({b: 1}).returnKey().toArray();
+assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]);
+results = coll.find().hint({a: 1}).sort({b: -1}).returnKey().toArray();
+assert.eq(results, [{a: 1}, {a: 2}, {a: 3}]);
- // Check that the plan is not covered.
- explain = coll.find().hint({a: 1}).sort({b: 1}).returnKey().explain();
- assert(!isIndexOnly(db, explain.queryPlanner.winningPlan));
- explain = coll.find().hint({a: 1}).sort({b: -1}).returnKey().explain();
- assert(!isIndexOnly(db, explain.queryPlanner.winningPlan));
+// Check that the plan is not covered.
+explain = coll.find().hint({a: 1}).sort({b: 1}).returnKey().explain();
+assert(!isIndexOnly(db, explain.queryPlanner.winningPlan));
+explain = coll.find().hint({a: 1}).sort({b: -1}).returnKey().explain();
+assert(!isIndexOnly(db, explain.queryPlanner.winningPlan));
- // returnKey takes precedence over other a regular inclusion projection. Should still be
- // covered.
- results = coll.find({}, {b: 1}).hint({a: 1}).sort({a: -1}).returnKey().toArray();
- assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]);
- explain = coll.find({}, {b: 1}).hint({a: 1}).sort({a: -1}).returnKey().explain();
- assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
+// returnKey takes precedence over other a regular inclusion projection. Should still be
+// covered.
+results = coll.find({}, {b: 1}).hint({a: 1}).sort({a: -1}).returnKey().toArray();
+assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]);
+explain = coll.find({}, {b: 1}).hint({a: 1}).sort({a: -1}).returnKey().explain();
+assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
- // returnKey takes precedence over other a regular exclusion projection. Should still be
- // covered.
- results = coll.find({}, {a: 0}).hint({a: 1}).sort({a: -1}).returnKey().toArray();
- assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]);
- explain = coll.find({}, {a: 0}).hint({a: 1}).sort({a: -1}).returnKey().explain();
- assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
+// returnKey takes precedence over other a regular exclusion projection. Should still be
+// covered.
+results = coll.find({}, {a: 0}).hint({a: 1}).sort({a: -1}).returnKey().toArray();
+assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]);
+explain = coll.find({}, {a: 0}).hint({a: 1}).sort({a: -1}).returnKey().explain();
+assert(isIndexOnly(db, explain.queryPlanner.winningPlan));
- // Unlike other projections, sortKey meta-projection can co-exist with returnKey.
- results =
- coll.find({}, {c: {$meta: 'sortKey'}}).hint({a: 1}).sort({a: -1}).returnKey().toArray();
- assert.eq(results, [{a: 3, c: {'': 3}}, {a: 2, c: {'': 2}}, {a: 1, c: {'': 1}}]);
+// Unlike other projections, sortKey meta-projection can co-exist with returnKey.
+results = coll.find({}, {c: {$meta: 'sortKey'}}).hint({a: 1}).sort({a: -1}).returnKey().toArray();
+assert.eq(results, [{a: 3, c: {'': 3}}, {a: 2, c: {'': 2}}, {a: 1, c: {'': 1}}]);
- // returnKey with sortKey $meta where there is an in-memory sort.
- results =
- coll.find({}, {c: {$meta: 'sortKey'}}).hint({a: 1}).sort({b: 1}).returnKey().toArray();
- assert.eq(results, [{a: 3, c: {'': 1}}, {a: 2, c: {'': 2}}, {a: 1, c: {'': 3}}]);
+// returnKey with sortKey $meta where there is an in-memory sort.
+results = coll.find({}, {c: {$meta: 'sortKey'}}).hint({a: 1}).sort({b: 1}).returnKey().toArray();
+assert.eq(results, [{a: 3, c: {'': 1}}, {a: 2, c: {'': 2}}, {a: 1, c: {'': 3}}]);
- // returnKey with multiple sortKey $meta projections.
- results = coll.find({}, {c: {$meta: 'sortKey'}, d: {$meta: 'sortKey'}})
- .hint({a: 1})
- .sort({b: 1})
- .returnKey()
- .toArray();
- assert.eq(results, [
- {a: 3, c: {'': 1}, d: {'': 1}},
- {a: 2, c: {'': 2}, d: {'': 2}},
- {a: 1, c: {'': 3}, d: {'': 3}}
- ]);
+// returnKey with multiple sortKey $meta projections.
+results = coll.find({}, {c: {$meta: 'sortKey'}, d: {$meta: 'sortKey'}})
+ .hint({a: 1})
+ .sort({b: 1})
+ .returnKey()
+ .toArray();
+assert.eq(results, [
+ {a: 3, c: {'': 1}, d: {'': 1}},
+ {a: 2, c: {'': 2}, d: {'': 2}},
+ {a: 1, c: {'': 3}, d: {'': 3}}
+]);
})();
diff --git a/jstests/core/role_management_helpers.js b/jstests/core/role_management_helpers.js
index b0f1762acf9..f59ff425d52 100644
--- a/jstests/core/role_management_helpers.js
+++ b/jstests/core/role_management_helpers.js
@@ -34,121 +34,119 @@ function assertHasPrivilege(privilegeArray, privilege) {
}
}
assert(false,
- "Privilege " + tojson(privilege) + " not found in privilege array: " +
- tojson(privilegeArray));
+ "Privilege " + tojson(privilege) +
+ " not found in privilege array: " + tojson(privilegeArray));
}
(function(db) {
- var db = db.getSiblingDB("role_management_helpers");
- db.dropDatabase();
- db.dropAllRoles();
-
- db.createRole({
- role: 'roleA',
- roles: [],
- privileges: [{resource: {db: db.getName(), collection: "foo"}, actions: ['find']}]
- });
- db.createRole({role: 'roleB', privileges: [], roles: ["roleA"]});
- db.createRole({role: 'roleC', privileges: [], roles: []});
-
- // Test getRole
- var roleObj = db.getRole("roleA");
- assert.eq(0, roleObj.roles.length);
- assert.eq(null, roleObj.privileges);
- roleObj = db.getRole("roleA", {showPrivileges: true});
- assert.eq(1, roleObj.privileges.length);
- assertHasPrivilege(roleObj.privileges,
- {resource: {db: db.getName(), collection: "foo"}, actions: ['find']});
- roleObj = db.getRole("roleB", {showPrivileges: true});
- assert.eq(1, roleObj.inheritedPrivileges.length); // inherited from roleA
- assertHasPrivilege(roleObj.inheritedPrivileges,
- {resource: {db: db.getName(), collection: "foo"}, actions: ['find']});
- assert.eq(1, roleObj.roles.length);
- assertHasRole(roleObj.roles, "roleA", db.getName());
-
- // Test getRoles
- var roles = db.getRoles();
- assert.eq(3, roles.length);
- printjson(roles);
- assert(roles[0].role == 'roleA' || roles[1].role == 'roleA' || roles[2].role == 'roleA');
- assert(roles[0].role == 'roleB' || roles[1].role == 'roleB' || roles[2].role == 'roleB');
- assert(roles[0].role == 'roleC' || roles[1].role == 'roleC' || roles[2].role == 'roleC');
- assert.eq(null, roles[0].inheritedPrivileges);
- var roles = db.getRoles({showPrivileges: true, showBuiltinRoles: true});
- assert.eq(9, roles.length);
- assert.neq(null, roles[0].inheritedPrivileges);
-
- // Granting roles to nonexistent role fails
- assert.throws(function() {
- db.grantRolesToRole("fakeRole", ['dbAdmin']);
- });
- // Granting roles to built-in role fails
- assert.throws(function() {
- db.grantRolesToRole("readWrite", ['dbAdmin']);
- });
- // Granting non-existant role fails
- assert.throws(function() {
- db.grantRolesToRole("roleB", ['dbAdmin', 'fakeRole']);
- });
-
- roleObj = db.getRole("roleB", {showPrivileges: true});
- assert.eq(1, roleObj.inheritedPrivileges.length);
- assert.eq(1, roleObj.roles.length);
- assertHasRole(roleObj.roles, "roleA", db.getName());
-
- // Granting a role you already have is no problem
- db.grantRolesToRole("roleB", ['readWrite', 'roleC']);
- roleObj = db.getRole("roleB", {showPrivileges: true});
- assert.gt(roleObj.inheritedPrivileges.length, 1); // Got privileges from readWrite role
- assert.eq(3, roleObj.roles.length);
- assertHasRole(roleObj.roles, "readWrite", db.getName());
- assertHasRole(roleObj.roles, "roleA", db.getName());
- assertHasRole(roleObj.roles, "roleC", db.getName());
-
- // Revoking roles the role doesn't have is fine
- db.revokeRolesFromRole("roleB", ['roleA', 'readWrite', 'dbAdmin']);
- roleObj = db.getRole("roleB", {showPrivileges: true});
- assert.eq(0, roleObj.inheritedPrivileges.length);
- assert.eq(1, roleObj.roles.length);
- assertHasRole(roleObj.roles, "roleC", db.getName());
-
- // Privileges on the same resource get collapsed
- db.grantPrivilegesToRole("roleA", [
- {resource: {db: db.getName(), collection: ""}, actions: ['dropDatabase']},
- {resource: {db: db.getName(), collection: "foo"}, actions: ['insert']}
- ]);
- roleObj = db.getRole("roleA", {showPrivileges: true});
- assert.eq(0, roleObj.roles.length);
- assert.eq(2, roleObj.privileges.length);
- assertHasPrivilege(
- roleObj.privileges,
- {resource: {db: db.getName(), collection: "foo"}, actions: ['find', 'insert']});
- assertHasPrivilege(roleObj.privileges,
- {resource: {db: db.getName(), collection: ""}, actions: ['dropDatabase']});
-
- // Update role
- db.updateRole("roleA", {
- roles: ['roleB'],
- privileges: [{resource: {db: db.getName(), collection: "foo"}, actions: ['find']}]
- });
- roleObj = db.getRole("roleA", {showPrivileges: true});
- assert.eq(1, roleObj.roles.length);
- assertHasRole(roleObj.roles, "roleB", db.getName());
- assert.eq(1, roleObj.privileges.length);
- assertHasPrivilege(roleObj.privileges,
- {resource: {db: db.getName(), collection: "foo"}, actions: ['find']});
-
- // Test dropRole
- db.dropRole('roleC');
- assert.eq(null, db.getRole('roleC'));
- roleObj = db.getRole("roleB", {showPrivileges: true});
- assert.eq(0, roleObj.privileges.length);
- assert.eq(0, roleObj.roles.length);
-
- // Test dropAllRoles
- db.dropAllRoles();
- assert.eq(null, db.getRole('roleA'));
- assert.eq(null, db.getRole('roleB'));
- assert.eq(null, db.getRole('roleC'));
-
+var db = db.getSiblingDB("role_management_helpers");
+db.dropDatabase();
+db.dropAllRoles();
+
+db.createRole({
+ role: 'roleA',
+ roles: [],
+ privileges: [{resource: {db: db.getName(), collection: "foo"}, actions: ['find']}]
+});
+db.createRole({role: 'roleB', privileges: [], roles: ["roleA"]});
+db.createRole({role: 'roleC', privileges: [], roles: []});
+
+// Test getRole
+var roleObj = db.getRole("roleA");
+assert.eq(0, roleObj.roles.length);
+assert.eq(null, roleObj.privileges);
+roleObj = db.getRole("roleA", {showPrivileges: true});
+assert.eq(1, roleObj.privileges.length);
+assertHasPrivilege(roleObj.privileges,
+ {resource: {db: db.getName(), collection: "foo"}, actions: ['find']});
+roleObj = db.getRole("roleB", {showPrivileges: true});
+assert.eq(1, roleObj.inheritedPrivileges.length); // inherited from roleA
+assertHasPrivilege(roleObj.inheritedPrivileges,
+ {resource: {db: db.getName(), collection: "foo"}, actions: ['find']});
+assert.eq(1, roleObj.roles.length);
+assertHasRole(roleObj.roles, "roleA", db.getName());
+
+// Test getRoles
+var roles = db.getRoles();
+assert.eq(3, roles.length);
+printjson(roles);
+assert(roles[0].role == 'roleA' || roles[1].role == 'roleA' || roles[2].role == 'roleA');
+assert(roles[0].role == 'roleB' || roles[1].role == 'roleB' || roles[2].role == 'roleB');
+assert(roles[0].role == 'roleC' || roles[1].role == 'roleC' || roles[2].role == 'roleC');
+assert.eq(null, roles[0].inheritedPrivileges);
+var roles = db.getRoles({showPrivileges: true, showBuiltinRoles: true});
+assert.eq(9, roles.length);
+assert.neq(null, roles[0].inheritedPrivileges);
+
+// Granting roles to nonexistent role fails
+assert.throws(function() {
+ db.grantRolesToRole("fakeRole", ['dbAdmin']);
+});
+// Granting roles to built-in role fails
+assert.throws(function() {
+ db.grantRolesToRole("readWrite", ['dbAdmin']);
+});
+// Granting non-existant role fails
+assert.throws(function() {
+ db.grantRolesToRole("roleB", ['dbAdmin', 'fakeRole']);
+});
+
+roleObj = db.getRole("roleB", {showPrivileges: true});
+assert.eq(1, roleObj.inheritedPrivileges.length);
+assert.eq(1, roleObj.roles.length);
+assertHasRole(roleObj.roles, "roleA", db.getName());
+
+// Granting a role you already have is no problem
+db.grantRolesToRole("roleB", ['readWrite', 'roleC']);
+roleObj = db.getRole("roleB", {showPrivileges: true});
+assert.gt(roleObj.inheritedPrivileges.length, 1); // Got privileges from readWrite role
+assert.eq(3, roleObj.roles.length);
+assertHasRole(roleObj.roles, "readWrite", db.getName());
+assertHasRole(roleObj.roles, "roleA", db.getName());
+assertHasRole(roleObj.roles, "roleC", db.getName());
+
+// Revoking roles the role doesn't have is fine
+db.revokeRolesFromRole("roleB", ['roleA', 'readWrite', 'dbAdmin']);
+roleObj = db.getRole("roleB", {showPrivileges: true});
+assert.eq(0, roleObj.inheritedPrivileges.length);
+assert.eq(1, roleObj.roles.length);
+assertHasRole(roleObj.roles, "roleC", db.getName());
+
+// Privileges on the same resource get collapsed
+db.grantPrivilegesToRole("roleA", [
+ {resource: {db: db.getName(), collection: ""}, actions: ['dropDatabase']},
+ {resource: {db: db.getName(), collection: "foo"}, actions: ['insert']}
+]);
+roleObj = db.getRole("roleA", {showPrivileges: true});
+assert.eq(0, roleObj.roles.length);
+assert.eq(2, roleObj.privileges.length);
+assertHasPrivilege(roleObj.privileges,
+ {resource: {db: db.getName(), collection: "foo"}, actions: ['find', 'insert']});
+assertHasPrivilege(roleObj.privileges,
+ {resource: {db: db.getName(), collection: ""}, actions: ['dropDatabase']});
+
+// Update role
+db.updateRole("roleA", {
+ roles: ['roleB'],
+ privileges: [{resource: {db: db.getName(), collection: "foo"}, actions: ['find']}]
+});
+roleObj = db.getRole("roleA", {showPrivileges: true});
+assert.eq(1, roleObj.roles.length);
+assertHasRole(roleObj.roles, "roleB", db.getName());
+assert.eq(1, roleObj.privileges.length);
+assertHasPrivilege(roleObj.privileges,
+ {resource: {db: db.getName(), collection: "foo"}, actions: ['find']});
+
+// Test dropRole
+db.dropRole('roleC');
+assert.eq(null, db.getRole('roleC'));
+roleObj = db.getRole("roleB", {showPrivileges: true});
+assert.eq(0, roleObj.privileges.length);
+assert.eq(0, roleObj.roles.length);
+
+// Test dropAllRoles
+db.dropAllRoles();
+assert.eq(null, db.getRole('roleA'));
+assert.eq(null, db.getRole('roleB'));
+assert.eq(null, db.getRole('roleC'));
}(db));
diff --git a/jstests/core/rollback_index_drop.js b/jstests/core/rollback_index_drop.js
index 3e7c3a97952..6f999f56209 100644
--- a/jstests/core/rollback_index_drop.js
+++ b/jstests/core/rollback_index_drop.js
@@ -5,31 +5,30 @@
//
// @tags: [does_not_support_stepdowns, assumes_unsharded_collection]
(function() {
- "use strict";
+"use strict";
- const coll = db.rollback_index_drop;
- coll.drop();
+const coll = db.rollback_index_drop;
+coll.drop();
- assert.commandWorked(coll.insert([{a: 1}, {a: 2}, {a: 3}]));
- assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.insert([{a: 1}, {a: 2}, {a: 3}]));
+assert.commandWorked(coll.createIndex({a: 1}));
- // Verify that the index has the expected set of keys.
- assert.eq([{a: 1}, {a: 2}, {a: 3}],
- coll.find().hint({a: 1}).sort({a: 1}).returnKey().toArray());
+// Verify that the index has the expected set of keys.
+assert.eq([{a: 1}, {a: 2}, {a: 3}], coll.find().hint({a: 1}).sort({a: 1}).returnKey().toArray());
- // Run a dropIndexes command that attempts to drop both {a: 1} and an invalid index. This should
- // cause the drop of {a: 1} to rollback, since the set of index drops happen atomically.
- assert.commandFailedWithCode(
- db.runCommand({dropIndexes: coll.getName(), index: ["a_1", "unknown"]}),
- ErrorCodes.IndexNotFound);
+// Run a dropIndexes command that attempts to drop both {a: 1} and an invalid index. This should
+// cause the drop of {a: 1} to rollback, since the set of index drops happen atomically.
+assert.commandFailedWithCode(
+ db.runCommand({dropIndexes: coll.getName(), index: ["a_1", "unknown"]}),
+ ErrorCodes.IndexNotFound);
- // Verify that the {a: 1} index is still present in listIndexes output.
- const indexList = coll.getIndexes();
- assert.neq(undefined, indexList.find((idx) => idx.name === "a_1"), indexList);
+// Verify that the {a: 1} index is still present in listIndexes output.
+const indexList = coll.getIndexes();
+assert.neq(undefined, indexList.find((idx) => idx.name === "a_1"), indexList);
- // Write to the collection and ensure that the resulting set of index keys is correct.
- assert.commandWorked(coll.update({a: 3}, {$inc: {a: 1}}));
- assert.commandWorked(coll.insert({a: 5}));
- assert.eq([{a: 1}, {a: 2}, {a: 4}, {a: 5}],
- coll.find().hint({a: 1}).sort({a: 1}).returnKey().toArray());
+// Write to the collection and ensure that the resulting set of index keys is correct.
+assert.commandWorked(coll.update({a: 3}, {$inc: {a: 1}}));
+assert.commandWorked(coll.insert({a: 5}));
+assert.eq([{a: 1}, {a: 2}, {a: 4}, {a: 5}],
+ coll.find().hint({a: 1}).sort({a: 1}).returnKey().toArray());
}());
diff --git a/jstests/core/server1470.js b/jstests/core/server1470.js
index 040eda4228f..41f7bfea7e0 100644
--- a/jstests/core/server1470.js
+++ b/jstests/core/server1470.js
@@ -12,7 +12,7 @@ q = {
};
t.update(q, {$set: {x: 1}}, true, true);
ref = t.findOne().pic;
-assert.eq("object", typeof(ref));
+assert.eq("object", typeof (ref));
assert.eq(q.pic["$ref"], ref["$ref"]);
assert.eq(q.pic["$id"], ref["$id"]);
diff --git a/jstests/core/server14747.js b/jstests/core/server14747.js
index e75407a7fdf..c6d77e6adb4 100644
--- a/jstests/core/server14747.js
+++ b/jstests/core/server14747.js
@@ -3,15 +3,14 @@
(function() {
- "use strict";
- var t = db.jstests_server14747;
-
- t.drop();
- t.ensureIndex({a: 1, b: 1});
- t.ensureIndex({a: 1, c: 1});
- t.insert({a: 1});
- for (var i = 0; i < 10; i++) {
- t.find({a: 1}).explain(true);
- }
+"use strict";
+var t = db.jstests_server14747;
+t.drop();
+t.ensureIndex({a: 1, b: 1});
+t.ensureIndex({a: 1, c: 1});
+t.insert({a: 1});
+for (var i = 0; i < 10; i++) {
+ t.find({a: 1}).explain(true);
+}
}());
diff --git a/jstests/core/server14753.js b/jstests/core/server14753.js
index cd6ea309399..e8de183f2d7 100644
--- a/jstests/core/server14753.js
+++ b/jstests/core/server14753.js
@@ -3,17 +3,16 @@
(function() {
- "use strict";
- var t = db.jstests_server14753;
-
- t.drop();
- t.ensureIndex({a: 1});
- t.ensureIndex({b: 1});
- for (var i = 0; i < 20; i++) {
- t.insert({b: i});
- }
- for (var i = 0; i < 20; i++) {
- t.find({b: 1}).sort({a: 1}).next();
- }
+"use strict";
+var t = db.jstests_server14753;
+t.drop();
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
+for (var i = 0; i < 20; i++) {
+ t.insert({b: i});
+}
+for (var i = 0; i < 20; i++) {
+ t.find({b: 1}).sort({a: 1}).next();
+}
}());
diff --git a/jstests/core/server22053.js b/jstests/core/server22053.js
index d295a72cc9b..d803c732b86 100644
--- a/jstests/core/server22053.js
+++ b/jstests/core/server22053.js
@@ -1,19 +1,19 @@
(function() {
- "use strict";
- var t = db.jstests_server22053;
+"use strict";
+var t = db.jstests_server22053;
- /* eslint-disable no-sparse-arrays */
- var s0 = [, , 3, , , 6];
- t.coll.insert({mys: s0});
+/* eslint-disable no-sparse-arrays */
+var s0 = [, , 3, , , 6];
+t.coll.insert({mys: s0});
- var cur = t.coll.find();
- var doc = cur.next();
- assert.eq(6, doc['mys'].length);
- assert.eq(undefined, doc['mys'][0]);
- assert.eq(undefined, doc['mys'][1]);
- assert.eq(3, doc['mys'][2]);
- assert.eq(undefined, doc['mys'][3]);
- assert.eq(undefined, doc['mys'][4]);
- assert.eq(6, doc['mys'][5]);
+var cur = t.coll.find();
+var doc = cur.next();
+assert.eq(6, doc['mys'].length);
+assert.eq(undefined, doc['mys'][0]);
+assert.eq(undefined, doc['mys'][1]);
+assert.eq(3, doc['mys'][2]);
+assert.eq(undefined, doc['mys'][3]);
+assert.eq(undefined, doc['mys'][4]);
+assert.eq(6, doc['mys'][5]);
}()); \ No newline at end of file
diff --git a/jstests/core/server25192.js b/jstests/core/server25192.js
index e07cfdcf50c..a275c768f71 100644
--- a/jstests/core/server25192.js
+++ b/jstests/core/server25192.js
@@ -1,12 +1,12 @@
(function() {
- "use strict";
+"use strict";
- var x = {};
+var x = {};
- assert.doesNotThrow(function() {
- Object.extend(x, {a: null}, true);
- }, [], "Extending an object with a null field does not throw");
+assert.doesNotThrow(function() {
+ Object.extend(x, {a: null}, true);
+}, [], "Extending an object with a null field does not throw");
- assert.eq(x.a, null);
+assert.eq(x.a, null);
}());
diff --git a/jstests/core/set_param1.js b/jstests/core/set_param1.js
index 7910c01aa96..6484f2241e2 100644
--- a/jstests/core/set_param1.js
+++ b/jstests/core/set_param1.js
@@ -48,78 +48,74 @@ assert.commandFailed(
// Set multiple component log levels at once.
(function() {
- assert.commandWorked(db.adminCommand({
- "setParameter": 1,
- logComponentVerbosity: {
- verbosity: 2,
- accessControl: {verbosity: 0},
- storage: {verbosity: 3, journal: {verbosity: 5}}
- }
- }));
-
- var result =
- assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1}))
- .logComponentVerbosity;
-
- assert.eq(2, result.verbosity);
- assert.eq(0, result.accessControl.verbosity);
- assert.eq(3, result.storage.verbosity);
- assert.eq(5, result.storage.journal.verbosity);
+assert.commandWorked(db.adminCommand({
+ "setParameter": 1,
+ logComponentVerbosity: {
+ verbosity: 2,
+ accessControl: {verbosity: 0},
+ storage: {verbosity: 3, journal: {verbosity: 5}}
+ }
+}));
+
+var result = assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1}))
+ .logComponentVerbosity;
+
+assert.eq(2, result.verbosity);
+assert.eq(0, result.accessControl.verbosity);
+assert.eq(3, result.storage.verbosity);
+assert.eq(5, result.storage.journal.verbosity);
})();
// Set multiple component log levels at once.
// Unrecognized field names not mapping to a log component shall be rejected
// No changes shall apply.
(function() {
- assert.commandFailed(db.adminCommand({
- "setParameter": 1,
- logComponentVerbosity: {
- verbosity: 6,
- accessControl: {verbosity: 5},
- storage: {verbosity: 4, journal: {verbosity: 6}},
- NoSuchComponent: {verbosity: 2},
- extraField: 123
- }
- }));
-
- var result =
- assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1}))
- .logComponentVerbosity;
-
- assert.eq(2, result.verbosity);
- assert.eq(0, result.accessControl.verbosity);
- assert.eq(3, result.storage.verbosity);
- assert.eq(5, result.storage.journal.verbosity);
+assert.commandFailed(db.adminCommand({
+ "setParameter": 1,
+ logComponentVerbosity: {
+ verbosity: 6,
+ accessControl: {verbosity: 5},
+ storage: {verbosity: 4, journal: {verbosity: 6}},
+ NoSuchComponent: {verbosity: 2},
+ extraField: 123
+ }
+}));
+
+var result = assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1}))
+ .logComponentVerbosity;
+
+assert.eq(2, result.verbosity);
+assert.eq(0, result.accessControl.verbosity);
+assert.eq(3, result.storage.verbosity);
+assert.eq(5, result.storage.journal.verbosity);
})();
// Clear verbosity for default and journal.
(function() {
- assert.commandWorked(db.adminCommand({
- "setParameter": 1,
- logComponentVerbosity: {verbosity: -1, storage: {journal: {verbosity: -1}}}
- }));
-
- var result =
- assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1}))
- .logComponentVerbosity;
-
- assert.eq(0, result.verbosity);
- assert.eq(0, result.accessControl.verbosity);
- assert.eq(3, result.storage.verbosity);
- assert.eq(-1, result.storage.journal.verbosity);
+assert.commandWorked(db.adminCommand({
+ "setParameter": 1,
+ logComponentVerbosity: {verbosity: -1, storage: {journal: {verbosity: -1}}}
+}));
+
+var result = assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1}))
+ .logComponentVerbosity;
+
+assert.eq(0, result.verbosity);
+assert.eq(0, result.accessControl.verbosity);
+assert.eq(3, result.storage.verbosity);
+assert.eq(-1, result.storage.journal.verbosity);
})();
// Set accessControl verbosity using numerical level instead of
// subdocument with 'verbosity' field.
(function() {
- assert.commandWorked(
- db.adminCommand({"setParameter": 1, logComponentVerbosity: {accessControl: 5}}));
+assert.commandWorked(
+ db.adminCommand({"setParameter": 1, logComponentVerbosity: {accessControl: 5}}));
- var result =
- assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1}))
- .logComponentVerbosity;
+var result = assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1}))
+ .logComponentVerbosity;
- assert.eq(5, result.accessControl.verbosity);
+assert.eq(5, result.accessControl.verbosity);
})();
// Restore old verbosity values.
diff --git a/jstests/core/set_type_change.js b/jstests/core/set_type_change.js
index 565da8be12e..5b06449dce4 100644
--- a/jstests/core/set_type_change.js
+++ b/jstests/core/set_type_change.js
@@ -8,21 +8,21 @@
* the document, including any relevant indices.
*/
(function() {
- "use strict";
+"use strict";
- var coll = db.set_type_change;
- coll.drop();
- assert.commandWorked(coll.ensureIndex({a: 1}));
+var coll = db.set_type_change;
+coll.drop();
+assert.commandWorked(coll.ensureIndex({a: 1}));
- assert.writeOK(coll.insert({a: 2}));
+assert.writeOK(coll.insert({a: 2}));
- var newVal = new NumberLong(2);
- var res = coll.update({}, {$set: {a: newVal}});
- assert.eq(res.nMatched, 1);
- if (coll.getMongo().writeMode() == "commands")
- assert.eq(res.nModified, 1);
+var newVal = new NumberLong(2);
+var res = coll.update({}, {$set: {a: newVal}});
+assert.eq(res.nMatched, 1);
+if (coll.getMongo().writeMode() == "commands")
+ assert.eq(res.nModified, 1);
- // Make sure it actually changed the type.
- var updated = coll.findOne();
- assert(updated.a instanceof NumberLong, "$set did not update type of value: " + updated.a);
+// Make sure it actually changed the type.
+var updated = coll.findOne();
+assert(updated.a instanceof NumberLong, "$set did not update type of value: " + updated.a);
})();
diff --git a/jstests/core/shell_connection_strings.js b/jstests/core/shell_connection_strings.js
index ff9aa727480..0cf2f3867d5 100644
--- a/jstests/core/shell_connection_strings.js
+++ b/jstests/core/shell_connection_strings.js
@@ -3,34 +3,33 @@
// uses_multiple_connections,
// ]
(function() {
- 'use strict';
+'use strict';
- const mongod = new MongoURI(db.getMongo().host).servers[0];
- const host = mongod.host;
- const port = mongod.port;
+const mongod = new MongoURI(db.getMongo().host).servers[0];
+const host = mongod.host;
+const port = mongod.port;
- function testConnect(ok, ...args) {
- const exitCode = runMongoProgram('mongo', '--eval', ';', ...args);
- if (ok) {
- assert.eq(exitCode, 0, "failed to connect with `" + args.join(' ') + "`");
- } else {
- assert.neq(
- exitCode, 0, "unexpectedly succeeded connecting with `" + args.join(' ') + "`");
- }
+function testConnect(ok, ...args) {
+ const exitCode = runMongoProgram('mongo', '--eval', ';', ...args);
+ if (ok) {
+ assert.eq(exitCode, 0, "failed to connect with `" + args.join(' ') + "`");
+ } else {
+ assert.neq(exitCode, 0, "unexpectedly succeeded connecting with `" + args.join(' ') + "`");
}
+}
- testConnect(true, `${host}:${port}`);
- testConnect(true, `${host}:${port}/test`);
- testConnect(true, `${host}:${port}/admin`);
- testConnect(true, host, '--port', port);
- testConnect(true, '--host', host, '--port', port, 'test');
- testConnect(true, '--host', host, '--port', port, 'admin');
- testConnect(true, `mongodb://${host}:${port}/test`);
- testConnect(true, `mongodb://${host}:${port}/test?connectTimeoutMS=10000`);
+testConnect(true, `${host}:${port}`);
+testConnect(true, `${host}:${port}/test`);
+testConnect(true, `${host}:${port}/admin`);
+testConnect(true, host, '--port', port);
+testConnect(true, '--host', host, '--port', port, 'test');
+testConnect(true, '--host', host, '--port', port, 'admin');
+testConnect(true, `mongodb://${host}:${port}/test`);
+testConnect(true, `mongodb://${host}:${port}/test?connectTimeoutMS=10000`);
- // if a full URI is provided, you cannot also specify host or port
- testConnect(false, `${host}/test`, '--port', port);
- testConnect(false, `mongodb://${host}:${port}/test`, '--port', port);
- testConnect(false, `mongodb://${host}:${port}/test`, '--host', host);
- testConnect(false, `mongodb://${host}:${port}/test`, '--host', host, '--port', port);
+// if a full URI is provided, you cannot also specify host or port
+testConnect(false, `${host}/test`, '--port', port);
+testConnect(false, `mongodb://${host}:${port}/test`, '--port', port);
+testConnect(false, `mongodb://${host}:${port}/test`, '--host', host);
+testConnect(false, `mongodb://${host}:${port}/test`, '--host', host, '--port', port);
})();
diff --git a/jstests/core/single_batch.js b/jstests/core/single_batch.js
index ccf9f73362f..b06e5ce7aa5 100644
--- a/jstests/core/single_batch.js
+++ b/jstests/core/single_batch.js
@@ -1,21 +1,21 @@
// Test the "single batch" semantics of negative limit.
(function() {
- 'use strict';
+'use strict';
- var coll = db.jstests_single_batch;
- coll.drop();
+var coll = db.jstests_single_batch;
+coll.drop();
- // Approximately 1 MB.
- var padding = new Array(1024 * 1024).join("x");
+// Approximately 1 MB.
+var padding = new Array(1024 * 1024).join("x");
- // Insert ~20 MB of data.
- for (var i = 0; i < 20; i++) {
- assert.writeOK(coll.insert({_id: i, padding: padding}));
- }
+// Insert ~20 MB of data.
+for (var i = 0; i < 20; i++) {
+ assert.writeOK(coll.insert({_id: i, padding: padding}));
+}
- // The limit is 18, but we should end up with fewer documents since 18 docs won't fit in a
- // single 16 MB batch.
- var numResults = coll.find().limit(-18).itcount();
- assert.lt(numResults, 18);
- assert.gt(numResults, 0);
+// The limit is 18, but we should end up with fewer documents since 18 docs won't fit in a
+// single 16 MB batch.
+var numResults = coll.find().limit(-18).itcount();
+assert.lt(numResults, 18);
+assert.gt(numResults, 0);
})();
diff --git a/jstests/core/sort1.js b/jstests/core/sort1.js
index 50599ad340f..edd787306b0 100644
--- a/jstests/core/sort1.js
+++ b/jstests/core/sort1.js
@@ -1,70 +1,69 @@
(function() {
- 'use strict';
+'use strict';
- var coll = db.sort1;
- coll.drop();
+var coll = db.sort1;
+coll.drop();
- coll.save({x: 3, z: 33});
- coll.save({x: 5, z: 33});
- coll.save({x: 2, z: 33});
- coll.save({x: 3, z: 33});
- coll.save({x: 1, z: 33});
+coll.save({x: 3, z: 33});
+coll.save({x: 5, z: 33});
+coll.save({x: 2, z: 33});
+coll.save({x: 3, z: 33});
+coll.save({x: 1, z: 33});
- for (var pass = 0; pass < 2; pass++) {
- assert(coll.find().sort({x: 1})[0].x == 1);
- assert(coll.find().sort({x: 1}).skip(1)[0].x == 2);
- assert(coll.find().sort({x: -1})[0].x == 5);
- assert(coll.find().sort({x: -1})[1].x == 3);
- assert.eq(coll.find().sort({x: -1}).skip(0)[0].x, 5);
- assert.eq(coll.find().sort({x: -1}).skip(1)[0].x, 3);
- coll.ensureIndex({x: 1});
- }
+for (var pass = 0; pass < 2; pass++) {
+ assert(coll.find().sort({x: 1})[0].x == 1);
+ assert(coll.find().sort({x: 1}).skip(1)[0].x == 2);
+ assert(coll.find().sort({x: -1})[0].x == 5);
+ assert(coll.find().sort({x: -1})[1].x == 3);
+ assert.eq(coll.find().sort({x: -1}).skip(0)[0].x, 5);
+ assert.eq(coll.find().sort({x: -1}).skip(1)[0].x, 3);
+ coll.ensureIndex({x: 1});
+}
- assert(coll.validate().valid);
+assert(coll.validate().valid);
- coll.drop();
- coll.save({x: 'a'});
- coll.save({x: 'aba'});
- coll.save({x: 'zed'});
- coll.save({x: 'foo'});
+coll.drop();
+coll.save({x: 'a'});
+coll.save({x: 'aba'});
+coll.save({x: 'zed'});
+coll.save({x: 'foo'});
- for (var pass = 0; pass < 2; pass++) {
- assert.eq("a", coll.find().sort({'x': 1}).limit(1).next().x, "c.1");
- assert.eq("a", coll.find().sort({'x': 1}).next().x, "c.2");
- assert.eq("zed", coll.find().sort({'x': -1}).limit(1).next().x, "c.3");
- assert.eq("zed", coll.find().sort({'x': -1}).next().x, "c.4");
- coll.ensureIndex({x: 1});
- }
+for (var pass = 0; pass < 2; pass++) {
+ assert.eq("a", coll.find().sort({'x': 1}).limit(1).next().x, "c.1");
+ assert.eq("a", coll.find().sort({'x': 1}).next().x, "c.2");
+ assert.eq("zed", coll.find().sort({'x': -1}).limit(1).next().x, "c.3");
+ assert.eq("zed", coll.find().sort({'x': -1}).next().x, "c.4");
+ coll.ensureIndex({x: 1});
+}
- assert(coll.validate().valid);
+assert(coll.validate().valid);
- // Ensure that sorts with a collation and no index return the correct ordering. Here we use the
- // 'numericOrdering' option which orders number-like strings by their numerical values.
- if (db.getMongo().useReadCommands()) {
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, str: '1000'}));
- assert.writeOK(coll.insert({_id: 1, str: '5'}));
- assert.writeOK(coll.insert({_id: 2, str: '200'}));
+// Ensure that sorts with a collation and no index return the correct ordering. Here we use the
+// 'numericOrdering' option which orders number-like strings by their numerical values.
+if (db.getMongo().useReadCommands()) {
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, str: '1000'}));
+ assert.writeOK(coll.insert({_id: 1, str: '5'}));
+ assert.writeOK(coll.insert({_id: 2, str: '200'}));
- var cursor =
- coll.find().sort({str: -1}).collation({locale: 'en_US', numericOrdering: true});
- assert.eq(cursor.next(), {_id: 0, str: '1000'});
- assert.eq(cursor.next(), {_id: 2, str: '200'});
- assert.eq(cursor.next(), {_id: 1, str: '5'});
- assert(!cursor.hasNext());
- }
+ var cursor = coll.find().sort({str: -1}).collation({locale: 'en_US', numericOrdering: true});
+ assert.eq(cursor.next(), {_id: 0, str: '1000'});
+ assert.eq(cursor.next(), {_id: 2, str: '200'});
+ assert.eq(cursor.next(), {_id: 1, str: '5'});
+ assert(!cursor.hasNext());
+}
- // Ensure that sorting of arrays correctly respects a collation with numeric ordering.
- if (db.getMongo().useReadCommands()) {
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, strs: ['1000', '500']}));
- assert.writeOK(coll.insert({_id: 1, strs: ['2000', '60']}));
- cursor = coll.find({strs: {$lt: '1000'}}).sort({strs: 1}).collation({
- locale: 'en_US',
- numericOrdering: true
- });
- assert.eq(cursor.next(), {_id: 1, strs: ['2000', '60']});
- assert.eq(cursor.next(), {_id: 0, strs: ['1000', '500']});
- assert(!cursor.hasNext());
- }
+// Ensure that sorting of arrays correctly respects a collation with numeric ordering.
+if (db.getMongo().useReadCommands()) {
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, strs: ['1000', '500']}));
+ assert.writeOK(coll.insert({_id: 1, strs: ['2000', '60']}));
+ cursor = coll.find({strs: {$lt: '1000'}}).sort({strs: 1}).collation({
+ locale: 'en_US',
+ numericOrdering: true
+ });
+ assert.eq(cursor.next(), {_id: 1, strs: ['2000', '60']});
+ assert.eq(cursor.next(), {_id: 0, strs: ['1000', '500']});
+ assert(!cursor.hasNext());
+}
})();
diff --git a/jstests/core/sort3.js b/jstests/core/sort3.js
index 1a1df005fb3..5e5f3313f51 100644
--- a/jstests/core/sort3.js
+++ b/jstests/core/sort3.js
@@ -1,13 +1,13 @@
(function() {
- "use strict";
+"use strict";
- const coll = db.sort3;
- coll.drop();
+const coll = db.sort3;
+coll.drop();
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({a: 5}));
- assert.writeOK(coll.insert({a: 3}));
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 5}));
+assert.writeOK(coll.insert({a: 3}));
- assert.eq([1, 3, 5], coll.find().sort({a: 1}).toArray().map(doc => doc.a));
- assert.eq([5, 3, 1], coll.find().sort({a: -1}).toArray().map(doc => doc.a));
+assert.eq([1, 3, 5], coll.find().sort({a: 1}).toArray().map(doc => doc.a));
+assert.eq([5, 3, 1], coll.find().sort({a: -1}).toArray().map(doc => doc.a));
}());
diff --git a/jstests/core/sort4.js b/jstests/core/sort4.js
index ef33e779d8e..63d7f3810bd 100644
--- a/jstests/core/sort4.js
+++ b/jstests/core/sort4.js
@@ -1,45 +1,45 @@
(function() {
- "use strict";
-
- const coll = db.sort4;
- coll.drop();
-
- function nice(sort, correct, extra) {
- const c = coll.find().sort(sort);
- let s = "";
- c.forEach(function(z) {
- if (s.length) {
- s += ",";
- }
- s += z.name;
- if (z.prename) {
- s += z.prename;
- }
- });
- if (correct) {
- assert.eq(correct, s, tojson(sort) + "(" + extra + ")");
+"use strict";
+
+const coll = db.sort4;
+coll.drop();
+
+function nice(sort, correct, extra) {
+ const c = coll.find().sort(sort);
+ let s = "";
+ c.forEach(function(z) {
+ if (s.length) {
+ s += ",";
+ }
+ s += z.name;
+ if (z.prename) {
+ s += z.prename;
}
- return s;
+ });
+ if (correct) {
+ assert.eq(correct, s, tojson(sort) + "(" + extra + ")");
}
+ return s;
+}
- assert.writeOK(coll.insert({name: 'A', prename: 'B'}));
- assert.writeOK(coll.insert({name: 'A', prename: 'C'}));
- assert.writeOK(coll.insert({name: 'B', prename: 'B'}));
- assert.writeOK(coll.insert({name: 'B', prename: 'D'}));
+assert.writeOK(coll.insert({name: 'A', prename: 'B'}));
+assert.writeOK(coll.insert({name: 'A', prename: 'C'}));
+assert.writeOK(coll.insert({name: 'B', prename: 'B'}));
+assert.writeOK(coll.insert({name: 'B', prename: 'D'}));
- nice({name: 1, prename: 1}, "AB,AC,BB,BD", "s3");
- nice({prename: 1, name: 1}, "AB,BB,AC,BD", "s3");
+nice({name: 1, prename: 1}, "AB,AC,BB,BD", "s3");
+nice({prename: 1, name: 1}, "AB,BB,AC,BD", "s3");
- assert.writeOK(coll.insert({name: 'A'}));
- nice({name: 1, prename: 1}, "A,AB,AC,BB,BD", "e1");
+assert.writeOK(coll.insert({name: 'A'}));
+nice({name: 1, prename: 1}, "A,AB,AC,BB,BD", "e1");
- assert.writeOK(coll.insert({name: 'C'}));
- nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2"); // SERVER-282
+assert.writeOK(coll.insert({name: 'C'}));
+nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2"); // SERVER-282
- assert.commandWorked(coll.ensureIndex({name: 1, prename: 1}));
- nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2ia"); // SERVER-282
+assert.commandWorked(coll.ensureIndex({name: 1, prename: 1}));
+nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2ia"); // SERVER-282
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.ensureIndex({name: 1}));
- nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2ib"); // SERVER-282
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(coll.ensureIndex({name: 1}));
+nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2ib"); // SERVER-282
}());
diff --git a/jstests/core/sort_array.js b/jstests/core/sort_array.js
index 48ccdea93c4..20ae0187693 100644
--- a/jstests/core/sort_array.js
+++ b/jstests/core/sort_array.js
@@ -4,225 +4,193 @@
* Tests for sorting documents by fields that contain arrays.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js");
+load("jstests/libs/analyze_plan.js");
- let coll = db.jstests_array_sort;
+let coll = db.jstests_array_sort;
- /**
- * Runs a $match-$sort-$project query as both a find and then an aggregate. Asserts that the
- * result set, after being converted to an array, is equal to 'expected'. Also asserts that the
- * find plan uses the SORT stage and the agg plan uses the "$sort" agg stage.
- */
- function testAggAndFindSort({filter, sort, project, hint, expected}) {
- let cursor = coll.find(filter, project).sort(sort);
- assert.eq(cursor.toArray(), expected);
- if (hint) {
- // If there was a hint specified, make sure we get the same results with the hint.
- cursor = coll.find(filter, project).sort(sort).hint(hint);
- assert.eq(cursor.toArray(), expected);
- }
- let explain = coll.find(filter, project).sort(sort).explain();
- assert(planHasStage(db, explain, "SORT"));
-
- let pipeline = [
- {$_internalInhibitOptimization: {}},
- {$match: filter},
- {$sort: sort},
- {$project: project},
- ];
- cursor = coll.aggregate(pipeline);
+/**
+ * Runs a $match-$sort-$project query as both a find and then an aggregate. Asserts that the
+ * result set, after being converted to an array, is equal to 'expected'. Also asserts that the
+ * find plan uses the SORT stage and the agg plan uses the "$sort" agg stage.
+ */
+function testAggAndFindSort({filter, sort, project, hint, expected}) {
+ let cursor = coll.find(filter, project).sort(sort);
+ assert.eq(cursor.toArray(), expected);
+ if (hint) {
+ // If there was a hint specified, make sure we get the same results with the hint.
+ cursor = coll.find(filter, project).sort(sort).hint(hint);
assert.eq(cursor.toArray(), expected);
- explain = coll.explain().aggregate(pipeline);
- assert(aggPlanHasStage(explain, "$sort"));
}
-
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]}));
- assert.writeOK(coll.insert({_id: 1, a: [8, 4, -1]}));
-
- // Sanity check that a sort on "_id" is usually pushed down into the query layer, but that
- // $_internalInhibitOptimization prevents this from happening. This makes sure that this test is
- // actually exercising the agg blocking sort implementation.
- let explain = coll.explain().aggregate([{$sort: {_id: 1}}]);
- assert(!aggPlanHasStage(explain, "$sort"));
- explain = coll.explain().aggregate([{$_internalInhibitOptimization: {}}, {$sort: {_id: 1}}]);
- assert(aggPlanHasStage(explain, "$sort"));
-
- // Ascending sort, without an index.
- testAggAndFindSort({
- filter: {a: {$gte: 2}},
- sort: {a: 1},
- project: {_id: 1, a: 1},
- expected: [{_id: 1, a: [8, 4, -1]}, {_id: 0, a: [3, 0, 1]}]
- });
-
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]}));
- assert.writeOK(coll.insert({_id: 1, a: [0, 4, -1]}));
-
- // Descending sort, without an index.
- testAggAndFindSort({
- filter: {a: {$gte: 2}},
- sort: {a: -1},
- project: {_id: 1, a: 1},
- expected: [{_id: 1, a: [0, 4, -1]}, {_id: 0, a: [3, 0, 1]}]
- });
-
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]}));
- assert.writeOK(coll.insert({_id: 1, a: [8, 4, -1]}));
- assert.commandWorked(coll.createIndex({a: 1}));
-
- // Ascending sort, in the presence of an index. The multikey index should not be used to provide
- // the sort.
- testAggAndFindSort({
- filter: {a: {$gte: 2}},
- sort: {a: 1},
- project: {_id: 1, a: 1},
- expected: [{_id: 1, a: [8, 4, -1]}, {_id: 0, a: [3, 0, 1]}]
- });
-
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]}));
- assert.writeOK(coll.insert({_id: 1, a: [0, 4, -1]}));
-
- // Descending sort, in the presence of an index.
- testAggAndFindSort({
- filter: {a: {$gte: 2}},
- sort: {a: -1},
- project: {_id: 1, a: 1},
- expected: [{_id: 1, a: [0, 4, -1]}, {_id: 0, a: [3, 0, 1]}]
- });
-
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, x: [{y: [4, 0, 1], z: 7}, {y: 0, z: 9}]}));
- assert.writeOK(coll.insert({_id: 1, x: [{y: 1, z: 7}, {y: 0, z: [8, 6]}]}));
-
- // Compound mixed ascending/descending sorts, without an index. Sort key for doc with _id: 0 is
- // {'': 0, '': 9}. Sort key for doc with _id: 1 is {'': 0, '': 8}.
- testAggAndFindSort({
- filter: {},
- sort: {"x.y": 1, "x.z": -1},
- project: {_id: 1},
- expected: [{_id: 0}, {_id: 1}]
- });
-
- // Sort key for doc with _id: 0 is {'': 4, '': 7}. Sort key for doc with _id: 1 is {'': 1, '':
- // 7}.
- testAggAndFindSort({
- filter: {},
- sort: {"x.y": -1, "x.z": 1},
- project: {_id: 1},
- expected: [{_id: 0}, {_id: 1}]
- });
-
- assert.commandWorked(coll.createIndex({"x.y": 1, "x.z": -1}));
-
- // Compound mixed ascending/descending sorts, with an index.
- testAggAndFindSort({
- filter: {},
- sort: {"x.y": 1, "x.z": -1},
- project: {_id: 1},
- expected: [{_id: 0}, {_id: 1}]
- });
- testAggAndFindSort({
- filter: {},
- sort: {"x.y": -1, "x.z": 1},
- project: {_id: 1},
- expected: [{_id: 0}, {_id: 1}]
- });
-
- // Test that a multikey index can provide a sort over a non-multikey field.
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1, "b.c": 1}));
- assert.writeOK(coll.insert({a: [1, 2, 3], b: {c: 9}}));
- explain = coll.find({a: 2}).sort({"b.c": -1}).explain();
- assert(planHasStage(db, explain, "IXSCAN"));
- assert(!planHasStage(db, explain, "SORT"));
-
- const pipeline = [{$match: {a: 2}}, {$sort: {"b.c": -1}}];
+ let explain = coll.find(filter, project).sort(sort).explain();
+ assert(planHasStage(db, explain, "SORT"));
+
+ let pipeline = [
+ {$_internalInhibitOptimization: {}},
+ {$match: filter},
+ {$sort: sort},
+ {$project: project},
+ ];
+ cursor = coll.aggregate(pipeline);
+ assert.eq(cursor.toArray(), expected);
explain = coll.explain().aggregate(pipeline);
- assert(isQueryPlan(explain));
- assert(planHasStage(db, explain, "IXSCAN"));
- assert(!planHasStage(db, explain, "SORT"));
-
- // Test that we can correctly sort by an array field in agg when there are additional fields not
- // involved in the sort pattern.
- coll.drop();
- assert.writeOK(coll.insert(
- {_id: 0, a: 1, b: {c: 1}, d: [{e: {f: 1, g: [6, 5, 4]}}, {e: {g: [3, 2, 1]}}]}));
- assert.writeOK(coll.insert(
- {_id: 1, a: 2, b: {c: 2}, d: [{e: {f: 2, g: [5, 4, 3]}}, {e: {g: [2, 1, 0]}}]}));
-
- testAggAndFindSort(
- {filter: {}, sort: {"d.e.g": 1}, project: {_id: 1}, expected: [{_id: 1}, {_id: 0}]});
-
- // Test a sort over the trailing field of a compound index, where the two fields of the index
- // share a path prefix. This is designed as a regression test for SERVER-31858.
- coll.drop();
- assert.writeOK(coll.insert({_id: 2, a: [{b: 1, c: 2}, {b: 2, c: 3}]}));
- assert.writeOK(coll.insert({_id: 0, a: [{b: 2, c: 0}, {b: 1, c: 4}]}));
- assert.writeOK(coll.insert({_id: 1, a: [{b: 1, c: 5}, {b: 2, c: 1}]}));
- assert.commandWorked(coll.createIndex({"a.b": 1, "a.c": 1}));
- testAggAndFindSort({
- filter: {"a.b": 1},
- project: {_id: 1},
- sort: {"a.c": 1},
- expected: [{_id: 0}, {_id: 1}, {_id: 2}]
- });
-
- // Test that an indexed and unindexed sort return the same thing for a path "a.x" which
- // traverses through an array.
- coll.drop();
- assert.commandWorked(coll.insert({_id: 0, a: [{x: 2}]}));
- assert.commandWorked(coll.insert({_id: 1, a: [{x: 1}]}));
- assert.commandWorked(coll.insert({_id: 2, a: [{x: 3}]}));
- testAggAndFindSort({
- filter: {},
- project: {_id: 1},
- sort: {"a.x": 1},
- expected: [{_id: 1}, {_id: 0}, {_id: 2}]
- });
- assert.commandWorked(coll.createIndex({"a.x": 1}));
- testAggAndFindSort({
- filter: {},
- project: {_id: 1},
- sort: {"a.x": 1},
- expected: [{_id: 1}, {_id: 0}, {_id: 2}]
- });
- testAggAndFindSort({
- filter: {},
- project: {_id: 1},
- sort: {"a.x": 1},
- hint: {"a.x": 1},
- expected: [{_id: 1}, {_id: 0}, {_id: 2}]
- });
-
- // Now repeat the test with multiple entries along the path "a.x".
- coll.drop();
- assert.commandWorked(coll.insert({_id: 0, a: [{x: 2}, {x: 3}]}));
- assert.commandWorked(coll.insert({_id: 1, a: [{x: 1}, {x: 4}]}));
- assert.commandWorked(coll.insert({_id: 2, a: [{x: 3}, {x: 4}]}));
- testAggAndFindSort({
- filter: {},
- project: {_id: 1},
- sort: {"a.x": 1},
- expected: [{_id: 1}, {_id: 0}, {_id: 2}]
- });
- assert.commandWorked(coll.createIndex({"a.x": 1}));
- testAggAndFindSort({
- filter: {},
- project: {_id: 1},
- sort: {"a.x": 1},
- expected: [{_id: 1}, {_id: 0}, {_id: 2}]
- });
- testAggAndFindSort({
- filter: {},
- project: {_id: 1},
- sort: {"a.x": 1},
- hint: {"a.x": 1},
- expected: [{_id: 1}, {_id: 0}, {_id: 2}]
- });
+ assert(aggPlanHasStage(explain, "$sort"));
+}
+
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]}));
+assert.writeOK(coll.insert({_id: 1, a: [8, 4, -1]}));
+
+// Sanity check that a sort on "_id" is usually pushed down into the query layer, but that
+// $_internalInhibitOptimization prevents this from happening. This makes sure that this test is
+// actually exercising the agg blocking sort implementation.
+let explain = coll.explain().aggregate([{$sort: {_id: 1}}]);
+assert(!aggPlanHasStage(explain, "$sort"));
+explain = coll.explain().aggregate([{$_internalInhibitOptimization: {}}, {$sort: {_id: 1}}]);
+assert(aggPlanHasStage(explain, "$sort"));
+
+// Ascending sort, without an index.
+testAggAndFindSort({
+ filter: {a: {$gte: 2}},
+ sort: {a: 1},
+ project: {_id: 1, a: 1},
+ expected: [{_id: 1, a: [8, 4, -1]}, {_id: 0, a: [3, 0, 1]}]
+});
+
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]}));
+assert.writeOK(coll.insert({_id: 1, a: [0, 4, -1]}));
+
+// Descending sort, without an index.
+testAggAndFindSort({
+ filter: {a: {$gte: 2}},
+ sort: {a: -1},
+ project: {_id: 1, a: 1},
+ expected: [{_id: 1, a: [0, 4, -1]}, {_id: 0, a: [3, 0, 1]}]
+});
+
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]}));
+assert.writeOK(coll.insert({_id: 1, a: [8, 4, -1]}));
+assert.commandWorked(coll.createIndex({a: 1}));
+
+// Ascending sort, in the presence of an index. The multikey index should not be used to provide
+// the sort.
+testAggAndFindSort({
+ filter: {a: {$gte: 2}},
+ sort: {a: 1},
+ project: {_id: 1, a: 1},
+ expected: [{_id: 1, a: [8, 4, -1]}, {_id: 0, a: [3, 0, 1]}]
+});
+
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]}));
+assert.writeOK(coll.insert({_id: 1, a: [0, 4, -1]}));
+
+// Descending sort, in the presence of an index.
+testAggAndFindSort({
+ filter: {a: {$gte: 2}},
+ sort: {a: -1},
+ project: {_id: 1, a: 1},
+ expected: [{_id: 1, a: [0, 4, -1]}, {_id: 0, a: [3, 0, 1]}]
+});
+
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, x: [{y: [4, 0, 1], z: 7}, {y: 0, z: 9}]}));
+assert.writeOK(coll.insert({_id: 1, x: [{y: 1, z: 7}, {y: 0, z: [8, 6]}]}));
+
+// Compound mixed ascending/descending sorts, without an index. Sort key for doc with _id: 0 is
+// {'': 0, '': 9}. Sort key for doc with _id: 1 is {'': 0, '': 8}.
+testAggAndFindSort(
+ {filter: {}, sort: {"x.y": 1, "x.z": -1}, project: {_id: 1}, expected: [{_id: 0}, {_id: 1}]});
+
+// Sort key for doc with _id: 0 is {'': 4, '': 7}. Sort key for doc with _id: 1 is {'': 1, '':
+// 7}.
+testAggAndFindSort(
+ {filter: {}, sort: {"x.y": -1, "x.z": 1}, project: {_id: 1}, expected: [{_id: 0}, {_id: 1}]});
+
+assert.commandWorked(coll.createIndex({"x.y": 1, "x.z": -1}));
+
+// Compound mixed ascending/descending sorts, with an index.
+testAggAndFindSort(
+ {filter: {}, sort: {"x.y": 1, "x.z": -1}, project: {_id: 1}, expected: [{_id: 0}, {_id: 1}]});
+testAggAndFindSort(
+ {filter: {}, sort: {"x.y": -1, "x.z": 1}, project: {_id: 1}, expected: [{_id: 0}, {_id: 1}]});
+
+// Test that a multikey index can provide a sort over a non-multikey field.
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1, "b.c": 1}));
+assert.writeOK(coll.insert({a: [1, 2, 3], b: {c: 9}}));
+explain = coll.find({a: 2}).sort({"b.c": -1}).explain();
+assert(planHasStage(db, explain, "IXSCAN"));
+assert(!planHasStage(db, explain, "SORT"));
+
+const pipeline = [{$match: {a: 2}}, {$sort: {"b.c": -1}}];
+explain = coll.explain().aggregate(pipeline);
+assert(isQueryPlan(explain));
+assert(planHasStage(db, explain, "IXSCAN"));
+assert(!planHasStage(db, explain, "SORT"));
+
+// Test that we can correctly sort by an array field in agg when there are additional fields not
+// involved in the sort pattern.
+coll.drop();
+assert.writeOK(
+ coll.insert({_id: 0, a: 1, b: {c: 1}, d: [{e: {f: 1, g: [6, 5, 4]}}, {e: {g: [3, 2, 1]}}]}));
+assert.writeOK(
+ coll.insert({_id: 1, a: 2, b: {c: 2}, d: [{e: {f: 2, g: [5, 4, 3]}}, {e: {g: [2, 1, 0]}}]}));
+
+testAggAndFindSort(
+ {filter: {}, sort: {"d.e.g": 1}, project: {_id: 1}, expected: [{_id: 1}, {_id: 0}]});
+
+// Test a sort over the trailing field of a compound index, where the two fields of the index
+// share a path prefix. This is designed as a regression test for SERVER-31858.
+coll.drop();
+assert.writeOK(coll.insert({_id: 2, a: [{b: 1, c: 2}, {b: 2, c: 3}]}));
+assert.writeOK(coll.insert({_id: 0, a: [{b: 2, c: 0}, {b: 1, c: 4}]}));
+assert.writeOK(coll.insert({_id: 1, a: [{b: 1, c: 5}, {b: 2, c: 1}]}));
+assert.commandWorked(coll.createIndex({"a.b": 1, "a.c": 1}));
+testAggAndFindSort({
+ filter: {"a.b": 1},
+ project: {_id: 1},
+ sort: {"a.c": 1},
+ expected: [{_id: 0}, {_id: 1}, {_id: 2}]
+});
+
+// Test that an indexed and unindexed sort return the same thing for a path "a.x" which
+// traverses through an array.
+coll.drop();
+assert.commandWorked(coll.insert({_id: 0, a: [{x: 2}]}));
+assert.commandWorked(coll.insert({_id: 1, a: [{x: 1}]}));
+assert.commandWorked(coll.insert({_id: 2, a: [{x: 3}]}));
+testAggAndFindSort(
+ {filter: {}, project: {_id: 1}, sort: {"a.x": 1}, expected: [{_id: 1}, {_id: 0}, {_id: 2}]});
+assert.commandWorked(coll.createIndex({"a.x": 1}));
+testAggAndFindSort(
+ {filter: {}, project: {_id: 1}, sort: {"a.x": 1}, expected: [{_id: 1}, {_id: 0}, {_id: 2}]});
+testAggAndFindSort({
+ filter: {},
+ project: {_id: 1},
+ sort: {"a.x": 1},
+ hint: {"a.x": 1},
+ expected: [{_id: 1}, {_id: 0}, {_id: 2}]
+});
+
+// Now repeat the test with multiple entries along the path "a.x".
+coll.drop();
+assert.commandWorked(coll.insert({_id: 0, a: [{x: 2}, {x: 3}]}));
+assert.commandWorked(coll.insert({_id: 1, a: [{x: 1}, {x: 4}]}));
+assert.commandWorked(coll.insert({_id: 2, a: [{x: 3}, {x: 4}]}));
+testAggAndFindSort(
+ {filter: {}, project: {_id: 1}, sort: {"a.x": 1}, expected: [{_id: 1}, {_id: 0}, {_id: 2}]});
+assert.commandWorked(coll.createIndex({"a.x": 1}));
+testAggAndFindSort(
+ {filter: {}, project: {_id: 1}, sort: {"a.x": 1}, expected: [{_id: 1}, {_id: 0}, {_id: 2}]});
+testAggAndFindSort({
+ filter: {},
+ project: {_id: 1},
+ sort: {"a.x": 1},
+ hint: {"a.x": 1},
+ expected: [{_id: 1}, {_id: 0}, {_id: 2}]
+});
}());
diff --git a/jstests/core/sorta.js b/jstests/core/sorta.js
index 91f36ba3621..f030cc6a673 100644
--- a/jstests/core/sorta.js
+++ b/jstests/core/sorta.js
@@ -1,30 +1,30 @@
// SERVER-2905 sorting with missing fields
(function() {
- 'use strict';
+'use strict';
- var coll = db.jstests_sorta;
- coll.drop();
+var coll = db.jstests_sorta;
+coll.drop();
- const docs = [
- {_id: 0, a: MinKey},
- {_id: 1, a: []},
- {_id: 2, a: []},
- {_id: 3, a: null},
- {_id: 4},
- {_id: 5, a: null},
- {_id: 6, a: 1},
- {_id: 7, a: [2]},
- {_id: 8, a: MaxKey}
- ];
- const bulk = coll.initializeUnorderedBulkOp();
- for (let doc of docs) {
- bulk.insert(doc);
- }
- assert.writeOK(bulk.execute());
+const docs = [
+ {_id: 0, a: MinKey},
+ {_id: 1, a: []},
+ {_id: 2, a: []},
+ {_id: 3, a: null},
+ {_id: 4},
+ {_id: 5, a: null},
+ {_id: 6, a: 1},
+ {_id: 7, a: [2]},
+ {_id: 8, a: MaxKey}
+];
+const bulk = coll.initializeUnorderedBulkOp();
+for (let doc of docs) {
+ bulk.insert(doc);
+}
+assert.writeOK(bulk.execute());
- assert.eq(coll.find().sort({a: 1, _id: 1}).toArray(), docs);
+assert.eq(coll.find().sort({a: 1, _id: 1}).toArray(), docs);
- assert.commandWorked(coll.createIndex({a: 1, _id: 1}));
- assert.eq(coll.find().sort({a: 1, _id: 1}).toArray(), docs);
+assert.commandWorked(coll.createIndex({a: 1, _id: 1}));
+assert.eq(coll.find().sort({a: 1, _id: 1}).toArray(), docs);
})();
diff --git a/jstests/core/sortc.js b/jstests/core/sortc.js
index 2b1070b8b6b..3b6213a11c7 100644
--- a/jstests/core/sortc.js
+++ b/jstests/core/sortc.js
@@ -1,34 +1,34 @@
// Test sorting with skipping and multiple candidate query plans.
(function() {
- "use strict";
+"use strict";
- const coll = db.jstests_sortc;
- coll.drop();
+const coll = db.jstests_sortc;
+coll.drop();
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({a: 2}));
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 2}));
- function checkA(a, sort, skip, query) {
- query = query || {};
- assert.eq(a, coll.find(query).sort(sort).skip(skip)[0].a);
- }
+function checkA(a, sort, skip, query) {
+ query = query || {};
+ assert.eq(a, coll.find(query).sort(sort).skip(skip)[0].a);
+}
- function checkSortAndSkip() {
- checkA(1, {a: 1}, 0);
- checkA(2, {a: 1}, 1);
+function checkSortAndSkip() {
+ checkA(1, {a: 1}, 0);
+ checkA(2, {a: 1}, 1);
- checkA(1, {a: 1}, 0, {a: {$gt: 0}, b: null});
- checkA(2, {a: 1}, 1, {a: {$gt: 0}, b: null});
+ checkA(1, {a: 1}, 0, {a: {$gt: 0}, b: null});
+ checkA(2, {a: 1}, 1, {a: {$gt: 0}, b: null});
- checkA(2, {a: -1}, 0);
- checkA(1, {a: -1}, 1);
+ checkA(2, {a: -1}, 0);
+ checkA(1, {a: -1}, 1);
- checkA(2, {a: -1}, 0, {a: {$gt: 0}, b: null});
- checkA(1, {a: -1}, 1, {a: {$gt: 0}, b: null});
- }
+ checkA(2, {a: -1}, 0, {a: {$gt: 0}, b: null});
+ checkA(1, {a: -1}, 1, {a: {$gt: 0}, b: null});
+}
- checkSortAndSkip();
+checkSortAndSkip();
- assert.commandWorked(coll.createIndex({a: 1}));
- checkSortAndSkip();
+assert.commandWorked(coll.createIndex({a: 1}));
+checkSortAndSkip();
}());
diff --git a/jstests/core/sorth.js b/jstests/core/sorth.js
index 9d6519613d3..c096f265882 100644
--- a/jstests/core/sorth.js
+++ b/jstests/core/sorth.js
@@ -1,277 +1,244 @@
// Tests for the $in/sort/limit optimization combined with inequality bounds. SERVER-5777
(function() {
- "use strict";
-
- var t = db.jstests_sorth;
- t.drop();
-
- // These can be set to modify the query run by the helper find().
- var _sort;
- var _limit;
- var _hint;
-
- /**
- * Generate a cursor using global parameters '_sort', '_hint', and '_limit'.
- */
- function find(query) {
- return t.find(query, {_id: 0}).sort(_sort).limit(_limit).hint(_hint);
+"use strict";
+
+var t = db.jstests_sorth;
+t.drop();
+
+// These can be set to modify the query run by the helper find().
+var _sort;
+var _limit;
+var _hint;
+
+/**
+ * Generate a cursor using global parameters '_sort', '_hint', and '_limit'.
+ */
+function find(query) {
+ return t.find(query, {_id: 0}).sort(_sort).limit(_limit).hint(_hint);
+}
+
+/**
+ * Returns true if the elements of 'expectedMatches' match element by element with
+ * 'actualMatches', only considering the fields 'a' and 'b'.
+ *
+ * @param {Array} expectedMatches - expected results from a query.
+ * @param {Array} actualMatches - the actual results from that query.
+ */
+function resultsMatch(expectedMatches, actualMatches) {
+ if (expectedMatches.length !== actualMatches.length) {
+ return false;
}
- /**
- * Returns true if the elements of 'expectedMatches' match element by element with
- * 'actualMatches', only considering the fields 'a' and 'b'.
- *
- * @param {Array} expectedMatches - expected results from a query.
- * @param {Array} actualMatches - the actual results from that query.
- */
- function resultsMatch(expectedMatches, actualMatches) {
- if (expectedMatches.length !== actualMatches.length) {
+ for (var i = 0; i < expectedMatches.length; ++i) {
+ if ((expectedMatches[i].a !== actualMatches[i].a) ||
+ (expectedMatches[i].b !== actualMatches[i].b)) {
return false;
}
-
- for (var i = 0; i < expectedMatches.length; ++i) {
- if ((expectedMatches[i].a !== actualMatches[i].a) ||
- (expectedMatches[i].b !== actualMatches[i].b)) {
- return false;
- }
- }
- return true;
}
-
- /**
- * Asserts that the given query returns results that are expected.
- *
- * @param {Object} options.query - the query to run.
- * @param {Array.<Object>} options.expectedQueryResults - the expected results from the query.
- * @param {Array.<Array>} [options.acceptableQueryResults=[options.expectedQueryResults]] - An
- * array of acceptable outcomes of the query. This can be used if there are multiple results
- * that are considered correct for the query.
- */
- function assertMatches(options) {
- const results = find(options.query).toArray();
- const acceptableQueryResults =
- options.acceptableQueryResults || [options.expectedQueryResults];
- assert.gte(acceptableQueryResults.length, 1);
- for (var i = 0; i < acceptableQueryResults.length; ++i) {
- const validResultSet = acceptableQueryResults[i];
-
- // All results should have the same number of results.
- assert.eq(validResultSet.length,
- results.length,
- "Expected " + results.length + " results from query " +
- tojson(options.query) + " but found " + validResultSet.length);
-
- if (resultsMatch(validResultSet, results)) {
- return;
- }
+ return true;
+}
+
+/**
+ * Asserts that the given query returns results that are expected.
+ *
+ * @param {Object} options.query - the query to run.
+ * @param {Array.<Object>} options.expectedQueryResults - the expected results from the query.
+ * @param {Array.<Array>} [options.acceptableQueryResults=[options.expectedQueryResults]] - An
+ * array of acceptable outcomes of the query. This can be used if there are multiple results
+ * that are considered correct for the query.
+ */
+function assertMatches(options) {
+ const results = find(options.query).toArray();
+ const acceptableQueryResults = options.acceptableQueryResults || [options.expectedQueryResults];
+ assert.gte(acceptableQueryResults.length, 1);
+ for (var i = 0; i < acceptableQueryResults.length; ++i) {
+ const validResultSet = acceptableQueryResults[i];
+
+ // All results should have the same number of results.
+ assert.eq(validResultSet.length,
+ results.length,
+ "Expected " + results.length + " results from query " + tojson(options.query) +
+ " but found " + validResultSet.length);
+
+ if (resultsMatch(validResultSet, results)) {
+ return;
}
- throw new Error("Unexpected results for query " + tojson(options.query) + ": " +
- tojson(results) + ", acceptable results were: " +
- tojson(acceptableQueryResults));
- }
-
- /**
- * Reset data, index, and _sort and _hint globals.
- */
- function reset(sort, index) {
- t.drop();
- t.save({a: 1, b: 1});
- t.save({a: 1, b: 2});
- t.save({a: 1, b: 3});
- t.save({a: 2, b: 0});
- t.save({a: 2, b: 3});
- t.save({a: 2, b: 5});
- t.ensureIndex(index);
- _sort = sort;
- _hint = index;
- }
-
- function checkForwardDirection(options) {
- // All callers specify a sort that is prefixed by b, ascending.
- assert.eq(Object.keys(options.sort)[0], "b");
- assert.eq(options.sort.b, 1);
-
- // None of the callers specify a sort on "a".
- assert(!options.sort.hasOwnProperty("a"));
-
- reset(options.sort, options.index);
-
- _limit = -1;
-
- // Lower bound checks.
- assertMatches(
- {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$gte: 0}}});
- assertMatches(
- {expectedQueryResults: [{a: 1, b: 1}], query: {a: {$in: [1, 2]}, b: {$gt: 0}}});
- assertMatches(
- {expectedQueryResults: [{a: 1, b: 1}], query: {a: {$in: [1, 2]}, b: {$gte: 1}}});
- assertMatches(
- {expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$gt: 1}}});
- assertMatches(
- {expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$gte: 2}}});
-
- // Since we are sorting on the field "b", and the sort specification doesn't include the
- // field "a", any query that is expected to result in a document with a value of 3 for "b"
- // has two acceptable results, since there are two documents with a value of 3 for "b". The
- // same argument applies for all assertions below involving a result with a value of 3 for
- // the field "b".
- assertMatches({
- acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
- query: {a: {$in: [1, 2]}, b: {$gt: 2}}
- });
- assertMatches({
- acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
- query: {a: {$in: [1, 2]}, b: {$gte: 3}}
- });
- assertMatches(
- {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gt: 3}}});
- assertMatches(
- {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 4}}});
- assertMatches(
- {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gt: 4}}});
- assertMatches(
- {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 5}}});
-
- // Upper bound checks.
- assertMatches(
- {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lte: 0}}});
- assertMatches(
- {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lt: 1}}});
- assertMatches(
- {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lte: 1}}});
- assertMatches(
- {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lt: 3}}});
-
- // Lower and upper bounds checks.
- assertMatches({
- expectedQueryResults: [{a: 2, b: 0}],
- query: {a: {$in: [1, 2]}, b: {$gte: 0, $lte: 0}}
- });
- assertMatches({
- expectedQueryResults: [{a: 2, b: 0}],
- query: {a: {$in: [1, 2]}, b: {$gte: 0, $lt: 1}}
- });
- assertMatches({
- expectedQueryResults: [{a: 2, b: 0}],
- query: {a: {$in: [1, 2]}, b: {$gte: 0, $lte: 1}}
- });
- assertMatches({
- expectedQueryResults: [{a: 1, b: 1}],
- query: {a: {$in: [1, 2]}, b: {$gt: 0, $lte: 1}}
- });
- assertMatches({
- expectedQueryResults: [{a: 1, b: 2}],
- query: {a: {$in: [1, 2]}, b: {$gte: 2, $lt: 3}}
- });
- assertMatches({
- acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
- query: {a: {$in: [1, 2]}, b: {$gte: 2.5, $lte: 3}}
- });
- assertMatches({
- acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
- query: {a: {$in: [1, 2]}, b: {$gt: 2.5, $lte: 3}}
- });
-
- // Limit is -2.
- _limit = -2;
- assertMatches({
- expectedQueryResults: [{a: 2, b: 0}, {a: 1, b: 1}],
- query: {a: {$in: [1, 2]}, b: {$gte: 0}}
- });
- assertMatches({
- acceptableQueryResults: [[{a: 1, b: 2}, {a: 2, b: 3}], [{a: 1, b: 2}, {a: 1, b: 3}]],
- query: {a: {$in: [1, 2]}, b: {$gt: 1}}
- });
- assertMatches(
- {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gt: 4}}});
-
- // With an additional document between the $in values.
- t.save({a: 1.5, b: 3});
- assertMatches({
- expectedQueryResults: [{a: 2, b: 0}, {a: 1, b: 1}],
- query: {a: {$in: [1, 2]}, b: {$gte: 0}}
- });
- }
-
- // Basic test with an index suffix order.
- checkForwardDirection({sort: {b: 1}, index: {a: 1, b: 1}});
- // With an additional index field.
- checkForwardDirection({sort: {b: 1}, index: {a: 1, b: 1, c: 1}});
- // With an additional reverse direction index field.
- checkForwardDirection({sort: {b: 1}, index: {a: 1, b: 1, c: -1}});
- // With an additional ordered index field.
- checkForwardDirection({sort: {b: 1, c: 1}, index: {a: 1, b: 1, c: 1}});
- // With an additional reverse direction ordered index field.
- checkForwardDirection({sort: {b: 1, c: -1}, index: {a: 1, b: 1, c: -1}});
-
- function checkReverseDirection(options) {
- // All callers specify a sort that is prefixed by "b", descending.
- assert.eq(Object.keys(options.sort)[0], "b");
- assert.eq(options.sort.b, -1);
- // None of the callers specify a sort on "a".
- assert(!options.sort.hasOwnProperty("a"));
-
- reset(options.sort, options.index);
- _limit = -1;
-
- // For matching documents, highest value of 'b' is 5.
- assertMatches(
- {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 0}}});
- assertMatches(
- {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 5}}});
- assertMatches(
- {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$lte: 5}}});
- assertMatches({
- expectedQueryResults: [{a: 2, b: 5}],
- query: {a: {$in: [1, 2]}, b: {$lte: 5, $gte: 5}}
- });
-
- // For matching documents, highest value of 'b' is 2.
- assertMatches(
- {expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$lt: 3}}});
- assertMatches(
- {expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$lt: 3, $gt: 1}}});
-
- // For matching documents, highest value of 'b' is 1.
- assertMatches({
- expectedQueryResults: [{a: 1, b: 1}],
- query: {a: {$in: [1, 2]}, b: {$lt: 2, $gte: 1}}
- });
-
- // These queries expect 3 as the highest value of 'b' among matching documents, but there
- // are two documents with a value of 3 for the field 'b'. Either document is acceptable,
- // since there is no sort order on any other existing fields.
- assertMatches({
- acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
- query: {a: {$in: [1, 2]}, b: {$lt: 5}}
- });
- assertMatches({
- acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
- query: {a: {$in: [1, 2]}, b: {$lt: 3.1}}
- });
- assertMatches({
- acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
- query: {a: {$in: [1, 2]}, b: {$lt: 3.5}}
- });
- assertMatches({
- acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
- query: {a: {$in: [1, 2]}, b: {$lte: 3}}
- });
- assertMatches({
- acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
- query: {a: {$in: [1, 2]}, b: {$lt: 3.5, $gte: 3}}
- });
- assertMatches({
- acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
- query: {a: {$in: [1, 2]}, b: {$lte: 3, $gt: 0}}
- });
}
-
- // With a descending order index.
- checkReverseDirection({sort: {b: -1}, index: {a: 1, b: -1}});
- checkReverseDirection({sort: {b: -1}, index: {a: 1, b: -1, c: 1}});
- checkReverseDirection({sort: {b: -1}, index: {a: 1, b: -1, c: -1}});
- checkReverseDirection({sort: {b: -1, c: 1}, index: {a: 1, b: -1, c: 1}});
- checkReverseDirection({sort: {b: -1, c: -1}, index: {a: 1, b: -1, c: -1}});
+ throw new Error("Unexpected results for query " + tojson(options.query) + ": " +
+ tojson(results) +
+ ", acceptable results were: " + tojson(acceptableQueryResults));
+}
+
+/**
+ * Reset data, index, and _sort and _hint globals.
+ */
+function reset(sort, index) {
+ t.drop();
+ t.save({a: 1, b: 1});
+ t.save({a: 1, b: 2});
+ t.save({a: 1, b: 3});
+ t.save({a: 2, b: 0});
+ t.save({a: 2, b: 3});
+ t.save({a: 2, b: 5});
+ t.ensureIndex(index);
+ _sort = sort;
+ _hint = index;
+}
+
+function checkForwardDirection(options) {
+ // All callers specify a sort that is prefixed by b, ascending.
+ assert.eq(Object.keys(options.sort)[0], "b");
+ assert.eq(options.sort.b, 1);
+
+ // None of the callers specify a sort on "a".
+ assert(!options.sort.hasOwnProperty("a"));
+
+ reset(options.sort, options.index);
+
+ _limit = -1;
+
+ // Lower bound checks.
+ assertMatches({expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$gte: 0}}});
+ assertMatches({expectedQueryResults: [{a: 1, b: 1}], query: {a: {$in: [1, 2]}, b: {$gt: 0}}});
+ assertMatches({expectedQueryResults: [{a: 1, b: 1}], query: {a: {$in: [1, 2]}, b: {$gte: 1}}});
+ assertMatches({expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$gt: 1}}});
+ assertMatches({expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$gte: 2}}});
+
+ // Since we are sorting on the field "b", and the sort specification doesn't include the
+ // field "a", any query that is expected to result in a document with a value of 3 for "b"
+ // has two acceptable results, since there are two documents with a value of 3 for "b". The
+ // same argument applies for all assertions below involving a result with a value of 3 for
+ // the field "b".
+ assertMatches({
+ acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
+ query: {a: {$in: [1, 2]}, b: {$gt: 2}}
+ });
+ assertMatches({
+ acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
+ query: {a: {$in: [1, 2]}, b: {$gte: 3}}
+ });
+ assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gt: 3}}});
+ assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 4}}});
+ assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gt: 4}}});
+ assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 5}}});
+
+ // Upper bound checks.
+ assertMatches({expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lte: 0}}});
+ assertMatches({expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lt: 1}}});
+ assertMatches({expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lte: 1}}});
+ assertMatches({expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lt: 3}}});
+
+ // Lower and upper bounds checks.
+ assertMatches(
+ {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$gte: 0, $lte: 0}}});
+ assertMatches(
+ {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$gte: 0, $lt: 1}}});
+ assertMatches(
+ {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$gte: 0, $lte: 1}}});
+ assertMatches(
+ {expectedQueryResults: [{a: 1, b: 1}], query: {a: {$in: [1, 2]}, b: {$gt: 0, $lte: 1}}});
+ assertMatches(
+ {expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$gte: 2, $lt: 3}}});
+ assertMatches({
+ acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
+ query: {a: {$in: [1, 2]}, b: {$gte: 2.5, $lte: 3}}
+ });
+ assertMatches({
+ acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
+ query: {a: {$in: [1, 2]}, b: {$gt: 2.5, $lte: 3}}
+ });
+
+ // Limit is -2.
+ _limit = -2;
+ assertMatches({
+ expectedQueryResults: [{a: 2, b: 0}, {a: 1, b: 1}],
+ query: {a: {$in: [1, 2]}, b: {$gte: 0}}
+ });
+ assertMatches({
+ acceptableQueryResults: [[{a: 1, b: 2}, {a: 2, b: 3}], [{a: 1, b: 2}, {a: 1, b: 3}]],
+ query: {a: {$in: [1, 2]}, b: {$gt: 1}}
+ });
+ assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gt: 4}}});
+
+ // With an additional document between the $in values.
+ t.save({a: 1.5, b: 3});
+ assertMatches({
+ expectedQueryResults: [{a: 2, b: 0}, {a: 1, b: 1}],
+ query: {a: {$in: [1, 2]}, b: {$gte: 0}}
+ });
+}
+
+// Basic test with an index suffix order.
+checkForwardDirection({sort: {b: 1}, index: {a: 1, b: 1}});
+// With an additional index field.
+checkForwardDirection({sort: {b: 1}, index: {a: 1, b: 1, c: 1}});
+// With an additional reverse direction index field.
+checkForwardDirection({sort: {b: 1}, index: {a: 1, b: 1, c: -1}});
+// With an additional ordered index field.
+checkForwardDirection({sort: {b: 1, c: 1}, index: {a: 1, b: 1, c: 1}});
+// With an additional reverse direction ordered index field.
+checkForwardDirection({sort: {b: 1, c: -1}, index: {a: 1, b: 1, c: -1}});
+
+function checkReverseDirection(options) {
+ // All callers specify a sort that is prefixed by "b", descending.
+ assert.eq(Object.keys(options.sort)[0], "b");
+ assert.eq(options.sort.b, -1);
+ // None of the callers specify a sort on "a".
+ assert(!options.sort.hasOwnProperty("a"));
+
+ reset(options.sort, options.index);
+ _limit = -1;
+
+ // For matching documents, highest value of 'b' is 5.
+ assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 0}}});
+ assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 5}}});
+ assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$lte: 5}}});
+ assertMatches(
+ {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$lte: 5, $gte: 5}}});
+
+ // For matching documents, highest value of 'b' is 2.
+ assertMatches({expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$lt: 3}}});
+ assertMatches(
+ {expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$lt: 3, $gt: 1}}});
+
+ // For matching documents, highest value of 'b' is 1.
+ assertMatches(
+ {expectedQueryResults: [{a: 1, b: 1}], query: {a: {$in: [1, 2]}, b: {$lt: 2, $gte: 1}}});
+
+ // These queries expect 3 as the highest value of 'b' among matching documents, but there
+ // are two documents with a value of 3 for the field 'b'. Either document is acceptable,
+ // since there is no sort order on any other existing fields.
+ assertMatches({
+ acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
+ query: {a: {$in: [1, 2]}, b: {$lt: 5}}
+ });
+ assertMatches({
+ acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
+ query: {a: {$in: [1, 2]}, b: {$lt: 3.1}}
+ });
+ assertMatches({
+ acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
+ query: {a: {$in: [1, 2]}, b: {$lt: 3.5}}
+ });
+ assertMatches({
+ acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
+ query: {a: {$in: [1, 2]}, b: {$lte: 3}}
+ });
+ assertMatches({
+ acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
+ query: {a: {$in: [1, 2]}, b: {$lt: 3.5, $gte: 3}}
+ });
+ assertMatches({
+ acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]],
+ query: {a: {$in: [1, 2]}, b: {$lte: 3, $gt: 0}}
+ });
+}
+
+// With a descending order index.
+checkReverseDirection({sort: {b: -1}, index: {a: 1, b: -1}});
+checkReverseDirection({sort: {b: -1}, index: {a: 1, b: -1, c: 1}});
+checkReverseDirection({sort: {b: -1}, index: {a: 1, b: -1, c: -1}});
+checkReverseDirection({sort: {b: -1, c: 1}, index: {a: 1, b: -1, c: 1}});
+checkReverseDirection({sort: {b: -1, c: -1}, index: {a: 1, b: -1, c: -1}});
}());
diff --git a/jstests/core/sortl.js b/jstests/core/sortl.js
index 247a175a6f0..d0d94473460 100644
--- a/jstests/core/sortl.js
+++ b/jstests/core/sortl.js
@@ -1,36 +1,36 @@
// Tests equality query on _id with a sort, intended to be tested on both mongos and mongod. For
// SERVER-20641.
(function() {
- 'use strict';
- var coll = db.sortl;
- coll.drop();
+'use strict';
+var coll = db.sortl;
+coll.drop();
- assert.writeOK(coll.insert({_id: 1, a: 2}));
- var res = coll.find({_id: 1}).sort({a: 1});
- assert.eq(res.next(), {_id: 1, a: 2});
- assert.eq(res.hasNext(), false);
+assert.writeOK(coll.insert({_id: 1, a: 2}));
+var res = coll.find({_id: 1}).sort({a: 1});
+assert.eq(res.next(), {_id: 1, a: 2});
+assert.eq(res.hasNext(), false);
- res = coll.find({_id: 1}, {b: {$meta: "sortKey"}}).sort({a: 1});
- assert.eq(res.next(), {_id: 1, a: 2, b: {"": 2}});
- assert.eq(res.hasNext(), false);
+res = coll.find({_id: 1}, {b: {$meta: "sortKey"}}).sort({a: 1});
+assert.eq(res.next(), {_id: 1, a: 2, b: {"": 2}});
+assert.eq(res.hasNext(), false);
- res = db.runCommand({
- findAndModify: coll.getName(),
- query: {_id: 1},
- update: {$set: {b: 1}},
- sort: {a: 1},
- fields: {c: {$meta: "sortKey"}}
- });
- assert.commandFailedWithCode(res, ErrorCodes.BadValue, "$meta sortKey update");
+res = db.runCommand({
+ findAndModify: coll.getName(),
+ query: {_id: 1},
+ update: {$set: {b: 1}},
+ sort: {a: 1},
+ fields: {c: {$meta: "sortKey"}}
+});
+assert.commandFailedWithCode(res, ErrorCodes.BadValue, "$meta sortKey update");
- res = db.runCommand({
- findAndModify: coll.getName(),
- query: {_id: 1},
- remove: true,
- sort: {b: 1},
- fields: {c: {$meta: "sortKey"}}
- });
- assert.commandFailedWithCode(res, ErrorCodes.BadValue, "$meta sortKey delete");
+res = db.runCommand({
+ findAndModify: coll.getName(),
+ query: {_id: 1},
+ remove: true,
+ sort: {b: 1},
+ fields: {c: {$meta: "sortKey"}}
+});
+assert.commandFailedWithCode(res, ErrorCodes.BadValue, "$meta sortKey delete");
- coll.drop();
+coll.drop();
})();
diff --git a/jstests/core/sparse_index_supports_ne_null.js b/jstests/core/sparse_index_supports_ne_null.js
index a46616bbad9..590bacc5021 100644
--- a/jstests/core/sparse_index_supports_ne_null.js
+++ b/jstests/core/sparse_index_supports_ne_null.js
@@ -8,200 +8,193 @@
* @tags: [assumes_unsharded_collection]
*/
(function() {
- "use strict";
- load("jstests/libs/analyze_plan.js"); // For getPlanStages.
+"use strict";
+load("jstests/libs/analyze_plan.js"); // For getPlanStages.
- const coll = db.sparse_index_supports_ne_null;
- coll.drop();
+const coll = db.sparse_index_supports_ne_null;
+coll.drop();
- function checkQuery({query, shouldUseIndex, nResultsExpected, indexKeyPattern}) {
- const explain = assert.commandWorked(coll.find(query).explain());
- const ixScans = getPlanStages(explain.queryPlanner.winningPlan, "IXSCAN");
+function checkQuery({query, shouldUseIndex, nResultsExpected, indexKeyPattern}) {
+ const explain = assert.commandWorked(coll.find(query).explain());
+ const ixScans = getPlanStages(explain.queryPlanner.winningPlan, "IXSCAN");
- if (shouldUseIndex) {
- assert.gte(ixScans.length, 1, explain);
- assert.eq(ixScans[0].keyPattern, indexKeyPattern);
- } else {
- assert.eq(ixScans.length, 0, explain);
- }
-
- assert.eq(coll.find(query).itcount(), nResultsExpected);
+ if (shouldUseIndex) {
+ assert.gte(ixScans.length, 1, explain);
+ assert.eq(ixScans[0].keyPattern, indexKeyPattern);
+ } else {
+ assert.eq(ixScans.length, 0, explain);
}
- // Non compound case.
- (function() {
- const query = {a: {$ne: null}};
- const elemMatchQuery = {a: {$elemMatch: {$ne: null}}};
- const keyPattern = {a: 1};
-
- assert.commandWorked(coll.insert({a: 1}));
- assert.commandWorked(coll.insert({a: {x: 1}}));
- assert.commandWorked(coll.insert({a: null}));
- assert.commandWorked(coll.insert({a: undefined}));
-
- assert.commandWorked(coll.createIndex(keyPattern, {sparse: true}));
-
- // Be sure the index is used.
- checkQuery(
- {query: query, shouldUseIndex: true, nResultsExpected: 2, indexKeyPattern: keyPattern});
- checkQuery({
- query: elemMatchQuery,
- shouldUseIndex: true,
- nResultsExpected: 0,
- indexKeyPattern: keyPattern
- });
-
- // When the index becomes multikey, it cannot support {$ne: null} queries.
- assert.commandWorked(coll.insert({a: [1, 2, 3]}));
- checkQuery({
- query: query,
- shouldUseIndex: false,
- nResultsExpected: 3,
- indexKeyPattern: keyPattern
- });
- // But it can support queries with {$ne: null} within an $elemMatch.
- checkQuery({
- query: elemMatchQuery,
- shouldUseIndex: true,
- nResultsExpected: 1,
- indexKeyPattern: keyPattern
- });
- })();
-
- // Compound case.
- (function() {
- const query = {a: {$ne: null}};
- const elemMatchQuery = {a: {$elemMatch: {$ne: null}}};
- const keyPattern = {a: 1, b: 1};
-
- coll.drop();
- assert.commandWorked(coll.insert({a: 1, b: 1}));
- assert.commandWorked(coll.insert({a: {x: 1}, b: 1}));
- assert.commandWorked(coll.insert({a: null, b: 1}));
- assert.commandWorked(coll.insert({a: undefined, b: 1}));
-
- assert.commandWorked(coll.createIndex(keyPattern, {sparse: true}));
-
- // Be sure the index is used.
- checkQuery(
- {query: query, shouldUseIndex: true, nResultsExpected: 2, indexKeyPattern: keyPattern});
- checkQuery({
- query: elemMatchQuery,
- shouldUseIndex: true,
- nResultsExpected: 0,
- indexKeyPattern: keyPattern
- });
-
- // When the index becomes multikey on the second field, it should still be usable.
- assert.commandWorked(coll.insert({a: 1, b: [1, 2, 3]}));
- checkQuery(
- {query: query, shouldUseIndex: true, nResultsExpected: 3, indexKeyPattern: keyPattern});
- checkQuery({
- query: elemMatchQuery,
- shouldUseIndex: true,
- nResultsExpected: 0,
- indexKeyPattern: keyPattern
- });
-
- // When the index becomes multikey on the first field, it should no longer be usable.
- assert.commandWorked(coll.insert({a: [1, 2, 3], b: 1}));
- checkQuery({
- query: query,
- shouldUseIndex: false,
- nResultsExpected: 4,
- indexKeyPattern: keyPattern
- });
- // Queries which use a $elemMatch should still be able to use the index.
- checkQuery({
- query: elemMatchQuery,
- shouldUseIndex: true,
- nResultsExpected: 1,
- indexKeyPattern: keyPattern
- });
- })();
-
- // Nested field multikey with $elemMatch.
- (function() {
- const keyPattern = {"a.b.c.d": 1};
- coll.drop();
- assert.commandWorked(coll.insert({a: {b: [{c: {d: 1}}]}}));
- assert.commandWorked(coll.insert({a: {b: [{c: {d: {e: 1}}}]}}));
- assert.commandWorked(coll.insert({a: {b: [{c: {d: null}}]}}));
- assert.commandWorked(coll.insert({a: {b: [{c: {d: undefined}}]}}));
-
- assert.commandWorked(coll.createIndex(keyPattern, {sparse: true}));
-
- const query = {"a.b.c.d": {$ne: null}};
- // $elemMatch object can only use the index when none of the paths below the $elemMatch is
- // not multikey.
- const elemMatchObjectQuery = {"a.b": {$elemMatch: {"c.d": {$ne: null}}}};
- // $elemMatch value can always use the index.
- const elemMatchValueQuery = {"a.b.c.d": {$elemMatch: {$ne: null}}};
-
- // 'a.b' is multikey, so the index isn't used.
- checkQuery({
- query: query,
- shouldUseIndex: false,
- nResultsExpected: 2,
- indexKeyPattern: keyPattern
- });
- // Since the multikey portion is above the $elemMatch, the $elemMatch query may use the
- // index.
- checkQuery({
- query: elemMatchObjectQuery,
- shouldUseIndex: true,
- nResultsExpected: 2,
- indexKeyPattern: keyPattern
- });
- checkQuery({
- query: elemMatchValueQuery,
- shouldUseIndex: true,
- nResultsExpected: 0,
- indexKeyPattern: keyPattern
- });
-
- // Make the index become multikey on 'a' (another field above the $elemMatch).
- assert.commandWorked(coll.insert({a: [{b: [{c: {d: 1}}]}]}));
- checkQuery({
- query: query,
- shouldUseIndex: false,
- nResultsExpected: 3,
- indexKeyPattern: keyPattern
- });
- // The only multikey paths are still above the $elemMatch, queries which use a $elemMatch
- // should still be able to use the index.
- checkQuery({
- query: elemMatchObjectQuery,
- shouldUseIndex: true,
- nResultsExpected: 3,
- indexKeyPattern: keyPattern
- });
- checkQuery({
- query: elemMatchValueQuery,
- shouldUseIndex: true,
- nResultsExpected: 0,
- indexKeyPattern: keyPattern
- });
-
- // Make the index multikey for 'a.b.c'. Now the $elemMatch query may not use the index.
- assert.commandWorked(coll.insert({a: {b: [{c: [{d: 1}]}]}}));
- checkQuery({
- query: query,
- shouldUseIndex: false,
- nResultsExpected: 4,
- indexKeyPattern: keyPattern
- });
- checkQuery({
- query: elemMatchObjectQuery,
- shouldUseIndex: false,
- nResultsExpected: 4,
- indexKeyPattern: keyPattern
- });
- checkQuery({
- query: elemMatchValueQuery,
- shouldUseIndex: true,
- nResultsExpected: 0,
- indexKeyPattern: keyPattern
- });
- })();
+ assert.eq(coll.find(query).itcount(), nResultsExpected);
+}
+
+// Non compound case.
+(function() {
+const query = {
+ a: {$ne: null}
+};
+const elemMatchQuery = {
+ a: {$elemMatch: {$ne: null}}
+};
+const keyPattern = {
+ a: 1
+};
+
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: {x: 1}}));
+assert.commandWorked(coll.insert({a: null}));
+assert.commandWorked(coll.insert({a: undefined}));
+
+assert.commandWorked(coll.createIndex(keyPattern, {sparse: true}));
+
+// Be sure the index is used.
+checkQuery({query: query, shouldUseIndex: true, nResultsExpected: 2, indexKeyPattern: keyPattern});
+checkQuery({
+ query: elemMatchQuery,
+ shouldUseIndex: true,
+ nResultsExpected: 0,
+ indexKeyPattern: keyPattern
+});
+
+// When the index becomes multikey, it cannot support {$ne: null} queries.
+assert.commandWorked(coll.insert({a: [1, 2, 3]}));
+checkQuery({query: query, shouldUseIndex: false, nResultsExpected: 3, indexKeyPattern: keyPattern});
+// But it can support queries with {$ne: null} within an $elemMatch.
+checkQuery({
+ query: elemMatchQuery,
+ shouldUseIndex: true,
+ nResultsExpected: 1,
+ indexKeyPattern: keyPattern
+});
+})();
+
+// Compound case.
+(function() {
+const query = {
+ a: {$ne: null}
+};
+const elemMatchQuery = {
+ a: {$elemMatch: {$ne: null}}
+};
+const keyPattern = {
+ a: 1,
+ b: 1
+};
+
+coll.drop();
+assert.commandWorked(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({a: {x: 1}, b: 1}));
+assert.commandWorked(coll.insert({a: null, b: 1}));
+assert.commandWorked(coll.insert({a: undefined, b: 1}));
+
+assert.commandWorked(coll.createIndex(keyPattern, {sparse: true}));
+
+// Be sure the index is used.
+checkQuery({query: query, shouldUseIndex: true, nResultsExpected: 2, indexKeyPattern: keyPattern});
+checkQuery({
+ query: elemMatchQuery,
+ shouldUseIndex: true,
+ nResultsExpected: 0,
+ indexKeyPattern: keyPattern
+});
+
+// When the index becomes multikey on the second field, it should still be usable.
+assert.commandWorked(coll.insert({a: 1, b: [1, 2, 3]}));
+checkQuery({query: query, shouldUseIndex: true, nResultsExpected: 3, indexKeyPattern: keyPattern});
+checkQuery({
+ query: elemMatchQuery,
+ shouldUseIndex: true,
+ nResultsExpected: 0,
+ indexKeyPattern: keyPattern
+});
+
+// When the index becomes multikey on the first field, it should no longer be usable.
+assert.commandWorked(coll.insert({a: [1, 2, 3], b: 1}));
+checkQuery({query: query, shouldUseIndex: false, nResultsExpected: 4, indexKeyPattern: keyPattern});
+// Queries which use a $elemMatch should still be able to use the index.
+checkQuery({
+ query: elemMatchQuery,
+ shouldUseIndex: true,
+ nResultsExpected: 1,
+ indexKeyPattern: keyPattern
+});
+})();
+
+// Nested field multikey with $elemMatch.
+(function() {
+const keyPattern = {
+ "a.b.c.d": 1
+};
+coll.drop();
+assert.commandWorked(coll.insert({a: {b: [{c: {d: 1}}]}}));
+assert.commandWorked(coll.insert({a: {b: [{c: {d: {e: 1}}}]}}));
+assert.commandWorked(coll.insert({a: {b: [{c: {d: null}}]}}));
+assert.commandWorked(coll.insert({a: {b: [{c: {d: undefined}}]}}));
+
+assert.commandWorked(coll.createIndex(keyPattern, {sparse: true}));
+
+const query = {
+ "a.b.c.d": {$ne: null}
+};
+// $elemMatch object can only use the index when none of the paths below the $elemMatch is
+// not multikey.
+const elemMatchObjectQuery = {
+ "a.b": {$elemMatch: {"c.d": {$ne: null}}}
+};
+// $elemMatch value can always use the index.
+const elemMatchValueQuery = {
+ "a.b.c.d": {$elemMatch: {$ne: null}}
+};
+
+// 'a.b' is multikey, so the index isn't used.
+checkQuery({query: query, shouldUseIndex: false, nResultsExpected: 2, indexKeyPattern: keyPattern});
+// Since the multikey portion is above the $elemMatch, the $elemMatch query may use the
+// index.
+checkQuery({
+ query: elemMatchObjectQuery,
+ shouldUseIndex: true,
+ nResultsExpected: 2,
+ indexKeyPattern: keyPattern
+});
+checkQuery({
+ query: elemMatchValueQuery,
+ shouldUseIndex: true,
+ nResultsExpected: 0,
+ indexKeyPattern: keyPattern
+});
+
+// Make the index become multikey on 'a' (another field above the $elemMatch).
+assert.commandWorked(coll.insert({a: [{b: [{c: {d: 1}}]}]}));
+checkQuery({query: query, shouldUseIndex: false, nResultsExpected: 3, indexKeyPattern: keyPattern});
+// The only multikey paths are still above the $elemMatch, queries which use a $elemMatch
+// should still be able to use the index.
+checkQuery({
+ query: elemMatchObjectQuery,
+ shouldUseIndex: true,
+ nResultsExpected: 3,
+ indexKeyPattern: keyPattern
+});
+checkQuery({
+ query: elemMatchValueQuery,
+ shouldUseIndex: true,
+ nResultsExpected: 0,
+ indexKeyPattern: keyPattern
+});
+
+// Make the index multikey for 'a.b.c'. Now the $elemMatch query may not use the index.
+assert.commandWorked(coll.insert({a: {b: [{c: [{d: 1}]}]}}));
+checkQuery({query: query, shouldUseIndex: false, nResultsExpected: 4, indexKeyPattern: keyPattern});
+checkQuery({
+ query: elemMatchObjectQuery,
+ shouldUseIndex: false,
+ nResultsExpected: 4,
+ indexKeyPattern: keyPattern
+});
+checkQuery({
+ query: elemMatchValueQuery,
+ shouldUseIndex: true,
+ nResultsExpected: 0,
+ indexKeyPattern: keyPattern
+});
+})();
})();
diff --git a/jstests/core/startup_log.js b/jstests/core/startup_log.js
index 1c66e463f43..0da9f636a3b 100644
--- a/jstests/core/startup_log.js
+++ b/jstests/core/startup_log.js
@@ -13,103 +13,101 @@
load('jstests/aggregation/extras/utils.js');
(function() {
- 'use strict';
+'use strict';
- // Check that smallArray is entirely contained by largeArray
- // returns false if a member of smallArray is not in largeArray
- function arrayIsSubset(smallArray, largeArray) {
- for (var i = 0; i < smallArray.length; i++) {
- if (!Array.contains(largeArray, smallArray[i])) {
- print("Could not find " + smallArray[i] + " in largeArray");
- return false;
- }
+// Check that smallArray is entirely contained by largeArray
+// returns false if a member of smallArray is not in largeArray
+function arrayIsSubset(smallArray, largeArray) {
+ for (var i = 0; i < smallArray.length; i++) {
+ if (!Array.contains(largeArray, smallArray[i])) {
+ print("Could not find " + smallArray[i] + " in largeArray");
+ return false;
}
-
- return true;
}
- // Test startup_log
- var stats = db.getSisterDB("local").startup_log.stats();
- assert(stats.capped);
+ return true;
+}
- var latestStartUpLog =
- db.getSisterDB("local").startup_log.find().sort({$natural: -1}).limit(1).next();
- var serverStatus = db._adminCommand("serverStatus");
- var cmdLine = db._adminCommand("getCmdLineOpts").parsed;
+// Test startup_log
+var stats = db.getSisterDB("local").startup_log.stats();
+assert(stats.capped);
- // Test that the startup log has the expected keys
- var verbose = false;
- var expectedKeys =
- ["_id", "hostname", "startTime", "startTimeLocal", "cmdLine", "pid", "buildinfo"];
- var keys = Object.keySet(latestStartUpLog);
- assert(arrayEq(expectedKeys, keys, verbose), 'startup_log keys failed');
+var latestStartUpLog =
+ db.getSisterDB("local").startup_log.find().sort({$natural: -1}).limit(1).next();
+var serverStatus = db._adminCommand("serverStatus");
+var cmdLine = db._adminCommand("getCmdLineOpts").parsed;
- // Tests _id implicitly - should be comprised of host-timestamp
- // Setup expected startTime and startTimeLocal from the supplied timestamp
- var _id = latestStartUpLog._id.split('-'); // _id should consist of host-timestamp
- var _idUptime = _id.pop();
- var _idHost = _id.join('-');
- var uptimeSinceEpochRounded = Math.floor(_idUptime / 1000) * 1000;
- var startTime = new Date(uptimeSinceEpochRounded); // Expected startTime
+// Test that the startup log has the expected keys
+var verbose = false;
+var expectedKeys =
+ ["_id", "hostname", "startTime", "startTimeLocal", "cmdLine", "pid", "buildinfo"];
+var keys = Object.keySet(latestStartUpLog);
+assert(arrayEq(expectedKeys, keys, verbose), 'startup_log keys failed');
- assert.eq(_idHost, latestStartUpLog.hostname, "Hostname doesn't match one from _id");
- assert.eq(serverStatus.host.split(':')[0],
- latestStartUpLog.hostname,
- "Hostname doesn't match one in server status");
- assert.closeWithinMS(startTime,
- latestStartUpLog.startTime,
- "StartTime doesn't match one from _id",
- 2000); // Expect less than 2 sec delta
- assert.eq(cmdLine, latestStartUpLog.cmdLine, "cmdLine doesn't match that from getCmdLineOpts");
- assert.eq(serverStatus.pid, latestStartUpLog.pid, "pid doesn't match that from serverStatus");
+// Tests _id implicitly - should be comprised of host-timestamp
+// Setup expected startTime and startTimeLocal from the supplied timestamp
+var _id = latestStartUpLog._id.split('-'); // _id should consist of host-timestamp
+var _idUptime = _id.pop();
+var _idHost = _id.join('-');
+var uptimeSinceEpochRounded = Math.floor(_idUptime / 1000) * 1000;
+var startTime = new Date(uptimeSinceEpochRounded); // Expected startTime
- // Test buildinfo
- var buildinfo = db.runCommand("buildinfo");
- delete buildinfo.ok; // Delete extra meta info not in startup_log
- delete buildinfo.operationTime; // Delete extra meta info not in startup_log
- delete buildinfo.$clusterTime; // Delete extra meta info not in startup_log
- var isMaster = db._adminCommand("ismaster");
+assert.eq(_idHost, latestStartUpLog.hostname, "Hostname doesn't match one from _id");
+assert.eq(serverStatus.host.split(':')[0],
+ latestStartUpLog.hostname,
+ "Hostname doesn't match one in server status");
+assert.closeWithinMS(startTime,
+ latestStartUpLog.startTime,
+ "StartTime doesn't match one from _id",
+ 2000); // Expect less than 2 sec delta
+assert.eq(cmdLine, latestStartUpLog.cmdLine, "cmdLine doesn't match that from getCmdLineOpts");
+assert.eq(serverStatus.pid, latestStartUpLog.pid, "pid doesn't match that from serverStatus");
- // Test buildinfo has the expected keys
- var expectedKeys = [
- "version",
- "gitVersion",
- "allocator",
- "versionArray",
- "javascriptEngine",
- "openssl",
- "buildEnvironment",
- "debug",
- "maxBsonObjectSize",
- "bits",
- "modules"
- ];
+// Test buildinfo
+var buildinfo = db.runCommand("buildinfo");
+delete buildinfo.ok; // Delete extra meta info not in startup_log
+delete buildinfo.operationTime; // Delete extra meta info not in startup_log
+delete buildinfo.$clusterTime; // Delete extra meta info not in startup_log
+var isMaster = db._adminCommand("ismaster");
- var keys = Object.keySet(latestStartUpLog.buildinfo);
- // Disabled to check
- assert(arrayIsSubset(expectedKeys, keys),
- "buildinfo keys failed! \n expected:\t" + expectedKeys + "\n actual:\t" + keys);
- assert.eq(buildinfo,
- latestStartUpLog.buildinfo,
- "buildinfo doesn't match that from buildinfo command");
+// Test buildinfo has the expected keys
+var expectedKeys = [
+ "version",
+ "gitVersion",
+ "allocator",
+ "versionArray",
+ "javascriptEngine",
+ "openssl",
+ "buildEnvironment",
+ "debug",
+ "maxBsonObjectSize",
+ "bits",
+ "modules"
+];
- // Test version and version Array
- var version = latestStartUpLog.buildinfo.version.split('-')[0];
- var versionArray = latestStartUpLog.buildinfo.versionArray;
- var versionArrayCleaned = versionArray.slice(0, 3);
- if (versionArray[3] == -100) {
- versionArrayCleaned[2] -= 1;
- }
+var keys = Object.keySet(latestStartUpLog.buildinfo);
+// Disabled to check
+assert(arrayIsSubset(expectedKeys, keys),
+ "buildinfo keys failed! \n expected:\t" + expectedKeys + "\n actual:\t" + keys);
+assert.eq(
+ buildinfo, latestStartUpLog.buildinfo, "buildinfo doesn't match that from buildinfo command");
- assert.eq(serverStatus.version,
- latestStartUpLog.buildinfo.version,
- "Mongo version doesn't match that from ServerStatus");
- assert.eq(
- version, versionArrayCleaned.join('.'), "version doesn't match that from the versionArray");
- var jsEngine = latestStartUpLog.buildinfo.javascriptEngine;
- assert((jsEngine == "none") || jsEngine.startsWith("mozjs"));
- assert.eq(isMaster.maxBsonObjectSize,
- latestStartUpLog.buildinfo.maxBsonObjectSize,
- "maxBsonObjectSize doesn't match one from ismaster");
+// Test version and version Array
+var version = latestStartUpLog.buildinfo.version.split('-')[0];
+var versionArray = latestStartUpLog.buildinfo.versionArray;
+var versionArrayCleaned = versionArray.slice(0, 3);
+if (versionArray[3] == -100) {
+ versionArrayCleaned[2] -= 1;
+}
+assert.eq(serverStatus.version,
+ latestStartUpLog.buildinfo.version,
+ "Mongo version doesn't match that from ServerStatus");
+assert.eq(
+ version, versionArrayCleaned.join('.'), "version doesn't match that from the versionArray");
+var jsEngine = latestStartUpLog.buildinfo.javascriptEngine;
+assert((jsEngine == "none") || jsEngine.startsWith("mozjs"));
+assert.eq(isMaster.maxBsonObjectSize,
+ latestStartUpLog.buildinfo.maxBsonObjectSize,
+ "maxBsonObjectSize doesn't match one from ismaster");
})();
diff --git a/jstests/core/tailable_cursor_invalidation.js b/jstests/core/tailable_cursor_invalidation.js
index 97ea96bb8d0..2424bce64f7 100644
--- a/jstests/core/tailable_cursor_invalidation.js
+++ b/jstests/core/tailable_cursor_invalidation.js
@@ -3,70 +3,69 @@
// Tests for the behavior of tailable cursors when a collection is dropped or the cursor is
// otherwise invalidated.
(function() {
- "use strict";
+"use strict";
- const collName = "tailable_cursor_invalidation";
- const coll = db[collName];
- coll.drop();
+const collName = "tailable_cursor_invalidation";
+const coll = db[collName];
+coll.drop();
- // Test that you cannot open a tailable cursor on a non-existent collection.
- assert.eq(0, assert.commandWorked(db.runCommand({find: collName})).cursor.id);
- assert.eq(0, assert.commandWorked(db.runCommand({find: collName, tailable: true})).cursor.id);
- assert.eq(0,
- assert.commandWorked(db.runCommand({find: collName, tailable: true, awaitData: true}))
- .cursor.id);
- const emptyBatchCursorId =
- assert
- .commandWorked(
- db.runCommand({find: collName, tailable: true, awaitData: true, batchSize: 0}))
- .cursor.id;
- const isMongos = db.adminCommand({isdbgrid: 1}).isdbgrid;
- if (isMongos) {
- // Mongos will let you establish a cursor with batch size 0 and return to you before it
- // realizes the shard's cursor is exhausted. The next getMore should return a 0 cursor id
- // though.
- assert.neq(emptyBatchCursorId, 0);
- assert.eq(
- 0,
- assert.commandWorked(db.runCommand({getMore: emptyBatchCursorId, collection: collName}))
- .cursor.id);
- } else {
- // A mongod should know immediately that the collection doesn't exist, and return a 0 cursor
- // id.
- assert.eq(0, emptyBatchCursorId);
- }
+// Test that you cannot open a tailable cursor on a non-existent collection.
+assert.eq(0, assert.commandWorked(db.runCommand({find: collName})).cursor.id);
+assert.eq(0, assert.commandWorked(db.runCommand({find: collName, tailable: true})).cursor.id);
+assert.eq(0,
+ assert.commandWorked(db.runCommand({find: collName, tailable: true, awaitData: true}))
+ .cursor.id);
+const emptyBatchCursorId = assert
+ .commandWorked(db.runCommand(
+ {find: collName, tailable: true, awaitData: true, batchSize: 0}))
+ .cursor.id;
+const isMongos = db.adminCommand({isdbgrid: 1}).isdbgrid;
+if (isMongos) {
+ // Mongos will let you establish a cursor with batch size 0 and return to you before it
+ // realizes the shard's cursor is exhausted. The next getMore should return a 0 cursor id
+ // though.
+ assert.neq(emptyBatchCursorId, 0);
+ assert.eq(
+ 0,
+ assert.commandWorked(db.runCommand({getMore: emptyBatchCursorId, collection: collName}))
+ .cursor.id);
+} else {
+ // A mongod should know immediately that the collection doesn't exist, and return a 0 cursor
+ // id.
+ assert.eq(0, emptyBatchCursorId);
+}
- function dropAndRecreateColl() {
- coll.drop();
- assert.commandWorked(db.createCollection(collName, {capped: true, size: 1024}));
- const numDocs = 4;
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; ++i) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+function dropAndRecreateColl() {
+ coll.drop();
+ assert.commandWorked(db.createCollection(collName, {capped: true, size: 1024}));
+ const numDocs = 4;
+ const bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({_id: i});
}
- dropAndRecreateColl();
+ assert.writeOK(bulk.execute());
+}
+dropAndRecreateColl();
- /**
- * Runs a find command to establish a cursor. Asserts that the command worked and that the
- * cursor id is not 0, then returns the cursor id.
- */
- function openCursor({tailable, awaitData}) {
- const findRes = assert.commandWorked(
- db.runCommand({find: collName, tailable: tailable, awaitData: awaitData}));
- assert.neq(findRes.cursor.id, 0);
- assert.eq(findRes.cursor.ns, coll.getFullName());
- return findRes.cursor.id;
- }
+/**
+ * Runs a find command to establish a cursor. Asserts that the command worked and that the
+ * cursor id is not 0, then returns the cursor id.
+ */
+function openCursor({tailable, awaitData}) {
+ const findRes = assert.commandWorked(
+ db.runCommand({find: collName, tailable: tailable, awaitData: awaitData}));
+ assert.neq(findRes.cursor.id, 0);
+ assert.eq(findRes.cursor.ns, coll.getFullName());
+ return findRes.cursor.id;
+}
- // Test that the cursor dies on getMore if the collection has been dropped.
- let cursorId = openCursor({tailable: true, awaitData: false});
- dropAndRecreateColl();
- assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: collName}),
- ErrorCodes.QueryPlanKilled);
- cursorId = openCursor({tailable: true, awaitData: true});
- dropAndRecreateColl();
- assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: collName}),
- ErrorCodes.QueryPlanKilled);
+// Test that the cursor dies on getMore if the collection has been dropped.
+let cursorId = openCursor({tailable: true, awaitData: false});
+dropAndRecreateColl();
+assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: collName}),
+ ErrorCodes.QueryPlanKilled);
+cursorId = openCursor({tailable: true, awaitData: true});
+dropAndRecreateColl();
+assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: collName}),
+ ErrorCodes.QueryPlanKilled);
}());
diff --git a/jstests/core/tailable_getmore_batch_size.js b/jstests/core/tailable_getmore_batch_size.js
index 9e96f6f68a3..466fa25a686 100644
--- a/jstests/core/tailable_getmore_batch_size.js
+++ b/jstests/core/tailable_getmore_batch_size.js
@@ -3,94 +3,94 @@
// Tests for the behavior of combining the tailable and awaitData options to the getMore command
// with the batchSize option.
(function() {
- "use strict";
+"use strict";
- const collName = "tailable_getmore_batch_size";
- const coll = db[collName];
- const batchSize = 2;
+const collName = "tailable_getmore_batch_size";
+const coll = db[collName];
+const batchSize = 2;
- function dropAndRecreateColl({numDocs}) {
- coll.drop();
- assert.commandWorked(db.createCollection(collName, {capped: true, size: 1024}));
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; ++i) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+function dropAndRecreateColl({numDocs}) {
+ coll.drop();
+ assert.commandWorked(db.createCollection(collName, {capped: true, size: 1024}));
+ const bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({_id: i});
}
+ assert.writeOK(bulk.execute());
+}
- // Test that running a find with the 'tailable' option will return results immediately, even if
- // there are fewer than the specified batch size.
- dropAndRecreateColl({numDocs: batchSize - 1});
- let findRes =
- assert.commandWorked(db.runCommand({find: collName, tailable: true, batchSize: batchSize}));
- assert.eq(findRes.cursor.firstBatch.length, batchSize - 1);
- assert.neq(findRes.cursor.id, 0);
- // Test that the same is true for a find with the 'tailable' and 'awaitData' options set.
- findRes = assert.commandWorked(
- db.runCommand({find: collName, tailable: true, awaitData: true, batchSize: batchSize}));
- assert.eq(findRes.cursor.firstBatch.length, batchSize - 1);
- assert.neq(findRes.cursor.id, 0);
+// Test that running a find with the 'tailable' option will return results immediately, even if
+// there are fewer than the specified batch size.
+dropAndRecreateColl({numDocs: batchSize - 1});
+let findRes =
+ assert.commandWorked(db.runCommand({find: collName, tailable: true, batchSize: batchSize}));
+assert.eq(findRes.cursor.firstBatch.length, batchSize - 1);
+assert.neq(findRes.cursor.id, 0);
+// Test that the same is true for a find with the 'tailable' and 'awaitData' options set.
+findRes = assert.commandWorked(
+ db.runCommand({find: collName, tailable: true, awaitData: true, batchSize: batchSize}));
+assert.eq(findRes.cursor.firstBatch.length, batchSize - 1);
+assert.neq(findRes.cursor.id, 0);
- /**
- * Runs a find command with a batchSize of 'batchSize' to establish a cursor. Asserts that the
- * command worked and that the cursor id is not 0, then returns the cursor id.
- */
- function openCursor({batchSize, tailable, awaitData}) {
- const findRes = assert.commandWorked(db.runCommand(
- {find: collName, tailable: tailable, awaitData: awaitData, batchSize: batchSize}));
- assert.eq(findRes.cursor.firstBatch.length, batchSize);
- assert.neq(findRes.cursor.id, 0);
- assert.eq(findRes.cursor.ns, coll.getFullName());
- return findRes.cursor.id;
- }
+/**
+ * Runs a find command with a batchSize of 'batchSize' to establish a cursor. Asserts that the
+ * command worked and that the cursor id is not 0, then returns the cursor id.
+ */
+function openCursor({batchSize, tailable, awaitData}) {
+ const findRes = assert.commandWorked(db.runCommand(
+ {find: collName, tailable: tailable, awaitData: awaitData, batchSize: batchSize}));
+ assert.eq(findRes.cursor.firstBatch.length, batchSize);
+ assert.neq(findRes.cursor.id, 0);
+ assert.eq(findRes.cursor.ns, coll.getFullName());
+ return findRes.cursor.id;
+}
- // Test that specifying a batch size to a getMore on a tailable cursor produces a batch of the
- // desired size when the number of results is larger than the batch size.
+// Test that specifying a batch size to a getMore on a tailable cursor produces a batch of the
+// desired size when the number of results is larger than the batch size.
- // One batch's worth for the find and one more than one batch's worth for the getMore.
- dropAndRecreateColl({numDocs: batchSize + (batchSize + 1)});
- let cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: false});
- let getMoreRes = assert.commandWorked(
- db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize}));
- assert.eq(getMoreRes.cursor.nextBatch.length, batchSize);
+// One batch's worth for the find and one more than one batch's worth for the getMore.
+dropAndRecreateColl({numDocs: batchSize + (batchSize + 1)});
+let cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: false});
+let getMoreRes = assert.commandWorked(
+ db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize}));
+assert.eq(getMoreRes.cursor.nextBatch.length, batchSize);
- // Test that the same is true for a tailable, *awaitData* cursor.
- cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: true});
- getMoreRes = assert.commandWorked(
- db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize}));
- assert.eq(getMoreRes.cursor.nextBatch.length, batchSize);
+// Test that the same is true for a tailable, *awaitData* cursor.
+cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: true});
+getMoreRes = assert.commandWorked(
+ db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize}));
+assert.eq(getMoreRes.cursor.nextBatch.length, batchSize);
- // Test that specifying a batch size to a getMore on a tailable cursor returns all
- // new results immediately, even if the batch size is larger than the number of new results.
- // One batch's worth for the find and one less than one batch's worth for the getMore.
- dropAndRecreateColl({numDocs: batchSize + (batchSize - 1)});
- cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: false});
- getMoreRes = assert.commandWorked(
- db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize}));
- assert.eq(getMoreRes.cursor.nextBatch.length, batchSize - 1);
+// Test that specifying a batch size to a getMore on a tailable cursor returns all
+// new results immediately, even if the batch size is larger than the number of new results.
+// One batch's worth for the find and one less than one batch's worth for the getMore.
+dropAndRecreateColl({numDocs: batchSize + (batchSize - 1)});
+cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: false});
+getMoreRes = assert.commandWorked(
+ db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize}));
+assert.eq(getMoreRes.cursor.nextBatch.length, batchSize - 1);
- // Test that the same is true for a tailable, *awaitData* cursor.
- cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: true});
- getMoreRes = assert.commandWorked(
- db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize}));
- assert.eq(getMoreRes.cursor.nextBatch.length, batchSize - 1);
+// Test that the same is true for a tailable, *awaitData* cursor.
+cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: true});
+getMoreRes = assert.commandWorked(
+ db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize}));
+assert.eq(getMoreRes.cursor.nextBatch.length, batchSize - 1);
- // Test that using a smaller batch size than there are results will return all results without
- // empty batches in between (SERVER-30799).
- dropAndRecreateColl({numDocs: batchSize * 3});
- cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: false});
- getMoreRes = assert.commandWorked(
- db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize}));
- assert.eq(getMoreRes.cursor.nextBatch.length, batchSize);
- getMoreRes = assert.commandWorked(
- db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize}));
- assert.eq(getMoreRes.cursor.nextBatch.length, batchSize);
- getMoreRes = assert.commandWorked(
- db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize}));
- assert.eq(getMoreRes.cursor.nextBatch.length, 0);
+// Test that using a smaller batch size than there are results will return all results without
+// empty batches in between (SERVER-30799).
+dropAndRecreateColl({numDocs: batchSize * 3});
+cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: false});
+getMoreRes = assert.commandWorked(
+ db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize}));
+assert.eq(getMoreRes.cursor.nextBatch.length, batchSize);
+getMoreRes = assert.commandWorked(
+ db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize}));
+assert.eq(getMoreRes.cursor.nextBatch.length, batchSize);
+getMoreRes = assert.commandWorked(
+ db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize}));
+assert.eq(getMoreRes.cursor.nextBatch.length, 0);
- // Avoid leaving the cursor open. Cursors above are killed by drops, but we'll avoid dropping
- // the collection at the end so other consistency checks like validate can be run against it.
- assert.commandWorked(db.runCommand({killCursors: collName, cursors: [getMoreRes.cursor.id]}));
+// Avoid leaving the cursor open. Cursors above are killed by drops, but we'll avoid dropping
+// the collection at the end so other consistency checks like validate can be run against it.
+assert.commandWorked(db.runCommand({killCursors: collName, cursors: [getMoreRes.cursor.id]}));
}());
diff --git a/jstests/core/tailable_skip_limit.js b/jstests/core/tailable_skip_limit.js
index 8669e29a836..672a52aeb3d 100644
--- a/jstests/core/tailable_skip_limit.js
+++ b/jstests/core/tailable_skip_limit.js
@@ -2,92 +2,92 @@
// Test that tailable cursors work correctly with skip and limit.
(function() {
- "use strict";
+"use strict";
- // Setup the capped collection.
- var collname = "jstests_tailable_skip_limit";
- var t = db[collname];
- t.drop();
- assert.commandWorked(db.createCollection(collname, {capped: true, size: 1024}));
+// Setup the capped collection.
+var collname = "jstests_tailable_skip_limit";
+var t = db[collname];
+t.drop();
+assert.commandWorked(db.createCollection(collname, {capped: true, size: 1024}));
- assert.writeOK(t.insert({_id: 1}));
- assert.writeOK(t.insert({_id: 2}));
+assert.writeOK(t.insert({_id: 1}));
+assert.writeOK(t.insert({_id: 2}));
- // Non-tailable with skip
- var cursor = t.find().skip(1);
- assert.eq(2, cursor.next()["_id"]);
- assert(!cursor.hasNext());
- assert.writeOK(t.insert({_id: 3}));
- assert(!cursor.hasNext());
+// Non-tailable with skip
+var cursor = t.find().skip(1);
+assert.eq(2, cursor.next()["_id"]);
+assert(!cursor.hasNext());
+assert.writeOK(t.insert({_id: 3}));
+assert(!cursor.hasNext());
- // Non-tailable with limit
- var cursor = t.find().limit(100);
- for (var i = 1; i <= 3; i++) {
- assert.eq(i, cursor.next()["_id"]);
- }
- assert(!cursor.hasNext());
- assert.writeOK(t.insert({_id: 4}));
- assert(!cursor.hasNext());
+// Non-tailable with limit
+var cursor = t.find().limit(100);
+for (var i = 1; i <= 3; i++) {
+ assert.eq(i, cursor.next()["_id"]);
+}
+assert(!cursor.hasNext());
+assert.writeOK(t.insert({_id: 4}));
+assert(!cursor.hasNext());
- // Non-tailable with negative limit
- var cursor = t.find().limit(-100);
- for (var i = 1; i <= 4; i++) {
- assert.eq(i, cursor.next()["_id"]);
- }
- assert(!cursor.hasNext());
- assert.writeOK(t.insert({_id: 5}));
- assert(!cursor.hasNext());
+// Non-tailable with negative limit
+var cursor = t.find().limit(-100);
+for (var i = 1; i <= 4; i++) {
+ assert.eq(i, cursor.next()["_id"]);
+}
+assert(!cursor.hasNext());
+assert.writeOK(t.insert({_id: 5}));
+assert(!cursor.hasNext());
- // Tailable with skip
- cursor = t.find().addOption(2).skip(4);
- assert.eq(5, cursor.next()["_id"]);
- assert(!cursor.hasNext());
- assert.writeOK(t.insert({_id: 6}));
- assert(cursor.hasNext());
- assert.eq(6, cursor.next()["_id"]);
+// Tailable with skip
+cursor = t.find().addOption(2).skip(4);
+assert.eq(5, cursor.next()["_id"]);
+assert(!cursor.hasNext());
+assert.writeOK(t.insert({_id: 6}));
+assert(cursor.hasNext());
+assert.eq(6, cursor.next()["_id"]);
- // Tailable with limit
- var cursor = t.find().addOption(2).limit(100);
- for (var i = 1; i <= 6; i++) {
- assert.eq(i, cursor.next()["_id"]);
- }
- assert(!cursor.hasNext());
- assert.writeOK(t.insert({_id: 7}));
- assert(cursor.hasNext());
- assert.eq(7, cursor.next()["_id"]);
+// Tailable with limit
+var cursor = t.find().addOption(2).limit(100);
+for (var i = 1; i <= 6; i++) {
+ assert.eq(i, cursor.next()["_id"]);
+}
+assert(!cursor.hasNext());
+assert.writeOK(t.insert({_id: 7}));
+assert(cursor.hasNext());
+assert.eq(7, cursor.next()["_id"]);
- // Tailable with negative limit is an error.
- assert.throws(function() {
- t.find().addOption(2).limit(-100).next();
- });
+// Tailable with negative limit is an error.
+assert.throws(function() {
+ t.find().addOption(2).limit(-100).next();
+});
+assert.throws(function() {
+ t.find().addOption(2).limit(-1).itcount();
+});
+
+// When using read commands, a limit of 1 with the tailable option is allowed. In legacy
+// readMode, an ntoreturn of 1 means the same thing as ntoreturn -1 and is disallowed with
+// tailable.
+if (db.getMongo().useReadCommands()) {
+ assert.eq(1, t.find().addOption(2).limit(1).itcount());
+} else {
assert.throws(function() {
- t.find().addOption(2).limit(-1).itcount();
+ t.find().addOption(2).limit(1).itcount();
});
+}
- // When using read commands, a limit of 1 with the tailable option is allowed. In legacy
- // readMode, an ntoreturn of 1 means the same thing as ntoreturn -1 and is disallowed with
- // tailable.
- if (db.getMongo().useReadCommands()) {
- assert.eq(1, t.find().addOption(2).limit(1).itcount());
- } else {
- assert.throws(function() {
- t.find().addOption(2).limit(1).itcount();
- });
- }
-
- // Tests that a tailable cursor over an empty capped collection produces a dead cursor, intended
- // to be run on both mongod and mongos. For SERVER-20720.
- t.drop();
- assert.commandWorked(db.createCollection(t.getName(), {capped: true, size: 1024}));
+// Tests that a tailable cursor over an empty capped collection produces a dead cursor, intended
+// to be run on both mongod and mongos. For SERVER-20720.
+t.drop();
+assert.commandWorked(db.createCollection(t.getName(), {capped: true, size: 1024}));
- var cmdRes = db.runCommand({find: t.getName(), tailable: true});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, t.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 0);
+var cmdRes = db.runCommand({find: t.getName(), tailable: true});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, t.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 0);
- // Test that the cursor works in the shell.
- assert.eq(t.find().addOption(2).itcount(), 0);
- assert.writeOK(t.insert({a: 1}));
- assert.eq(t.find().addOption(2).itcount(), 1);
+// Test that the cursor works in the shell.
+assert.eq(t.find().addOption(2).itcount(), 0);
+assert.writeOK(t.insert({a: 1}));
+assert.eq(t.find().addOption(2).itcount(), 1);
})();
diff --git a/jstests/core/text_covered_matching.js b/jstests/core/text_covered_matching.js
index f3fe9908d1b..a81dfd84e09 100644
--- a/jstests/core/text_covered_matching.js
+++ b/jstests/core/text_covered_matching.js
@@ -12,176 +12,176 @@
load("jstests/libs/analyze_plan.js");
(function() {
- "use strict";
- const coll = db.text_covered_matching;
+"use strict";
+const coll = db.text_covered_matching;
- coll.drop();
- assert.commandWorked(coll.createIndex({a: "text", b: 1}));
- assert.writeOK(coll.insert({a: "hello", b: 1, c: 1}));
- assert.writeOK(coll.insert({a: "world", b: 2, c: 2}));
- assert.writeOK(coll.insert({a: "hello world", b: 3, c: 3}));
+coll.drop();
+assert.commandWorked(coll.createIndex({a: "text", b: 1}));
+assert.writeOK(coll.insert({a: "hello", b: 1, c: 1}));
+assert.writeOK(coll.insert({a: "world", b: 2, c: 2}));
+assert.writeOK(coll.insert({a: "hello world", b: 3, c: 3}));
- //
- // Test the query {$text: {$search: "hello"}, b: 1} with and without the 'textScore' in the
- // output.
- //
+//
+// Test the query {$text: {$search: "hello"}, b: 1} with and without the 'textScore' in the
+// output.
+//
- // Expected result:
- // - We examine two keys, for the two documents with "hello" in their text;
- // - we examine only one document, because covered matching rejects the index entry for
- // which b != 1;
- // - we return exactly one document.
- let explainResult = coll.find({$text: {$search: "hello"}, b: 1}).explain("executionStats");
- assert.commandWorked(explainResult);
- assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR"));
- assert.eq(explainResult.executionStats.totalKeysExamined,
- 2,
- "Unexpected number of keys examined: " + tojson(explainResult));
- assert.eq(explainResult.executionStats.totalDocsExamined,
- 1,
- "Unexpected number of documents examined: " + tojson(explainResult));
- assert.eq(explainResult.executionStats.nReturned,
- 1,
- "Unexpected number of results returned: " + tojson(explainResult));
+// Expected result:
+// - We examine two keys, for the two documents with "hello" in their text;
+// - we examine only one document, because covered matching rejects the index entry for
+// which b != 1;
+// - we return exactly one document.
+let explainResult = coll.find({$text: {$search: "hello"}, b: 1}).explain("executionStats");
+assert.commandWorked(explainResult);
+assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR"));
+assert.eq(explainResult.executionStats.totalKeysExamined,
+ 2,
+ "Unexpected number of keys examined: " + tojson(explainResult));
+assert.eq(explainResult.executionStats.totalDocsExamined,
+ 1,
+ "Unexpected number of documents examined: " + tojson(explainResult));
+assert.eq(explainResult.executionStats.nReturned,
+ 1,
+ "Unexpected number of results returned: " + tojson(explainResult));
- // When we include the text score in the projection, we use a TEXT_OR instead of an OR in our
- // query plan, which changes how filtering is done. We should get the same result, however.
- explainResult = coll.find({$text: {$search: "hello"}, b: 1},
- {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}})
- .explain("executionStats");
- assert.commandWorked(explainResult);
- assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "TEXT_OR"));
- assert.eq(explainResult.executionStats.totalKeysExamined,
- 2,
- "Unexpected number of keys examined: " + tojson(explainResult));
- assert.eq(explainResult.executionStats.totalDocsExamined,
- 1,
- "Unexpected number of documents examined: " + tojson(explainResult));
- assert.eq(explainResult.executionStats.nReturned,
- 1,
- "Unexpected number of results returned: " + tojson(explainResult));
+// When we include the text score in the projection, we use a TEXT_OR instead of an OR in our
+// query plan, which changes how filtering is done. We should get the same result, however.
+explainResult = coll.find({$text: {$search: "hello"}, b: 1},
+ {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}})
+ .explain("executionStats");
+assert.commandWorked(explainResult);
+assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "TEXT_OR"));
+assert.eq(explainResult.executionStats.totalKeysExamined,
+ 2,
+ "Unexpected number of keys examined: " + tojson(explainResult));
+assert.eq(explainResult.executionStats.totalDocsExamined,
+ 1,
+ "Unexpected number of documents examined: " + tojson(explainResult));
+assert.eq(explainResult.executionStats.nReturned,
+ 1,
+ "Unexpected number of results returned: " + tojson(explainResult));
- //
- // Test the query {$text: {$search: "hello"}, c: 1} with and without the 'textScore' in the
- // output.
- //
+//
+// Test the query {$text: {$search: "hello"}, c: 1} with and without the 'textScore' in the
+// output.
+//
- // Expected result:
- // - We examine two keys, for the two documents with "hello" in their text;
- // - we examine more than just the matching document, because we need to fetch documents in
- // order to examine the non-covered 'c' field;
- // - we return exactly one document.
- explainResult = coll.find({$text: {$search: "hello"}, c: 1}).explain("executionStats");
- assert.commandWorked(explainResult);
- assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR"));
- assert.eq(explainResult.executionStats.totalKeysExamined,
- 2,
- "Unexpected number of keys examined: " + tojson(explainResult));
- assert.gt(explainResult.executionStats.totalDocsExamined,
- 1,
- "Unexpected number of documents examined: " + tojson(explainResult));
- assert.eq(explainResult.executionStats.nReturned,
- 1,
- "Unexpected number of results returned: " + tojson(explainResult));
+// Expected result:
+// - We examine two keys, for the two documents with "hello" in their text;
+// - we examine more than just the matching document, because we need to fetch documents in
+// order to examine the non-covered 'c' field;
+// - we return exactly one document.
+explainResult = coll.find({$text: {$search: "hello"}, c: 1}).explain("executionStats");
+assert.commandWorked(explainResult);
+assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR"));
+assert.eq(explainResult.executionStats.totalKeysExamined,
+ 2,
+ "Unexpected number of keys examined: " + tojson(explainResult));
+assert.gt(explainResult.executionStats.totalDocsExamined,
+ 1,
+ "Unexpected number of documents examined: " + tojson(explainResult));
+assert.eq(explainResult.executionStats.nReturned,
+ 1,
+ "Unexpected number of results returned: " + tojson(explainResult));
- // As before, including the text score in the projection changes how filtering occurs, but we
- // still expect the same result.
- explainResult = coll.find({$text: {$search: "hello"}, c: 1},
- {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}})
- .explain("executionStats");
- assert.commandWorked(explainResult);
- assert.eq(explainResult.executionStats.totalKeysExamined,
- 2,
- "Unexpected number of keys examined: " + tojson(explainResult));
- assert.gt(explainResult.executionStats.totalDocsExamined,
- 1,
- "Unexpected number of documents examined: " + tojson(explainResult));
- assert.eq(explainResult.executionStats.nReturned,
- 1,
- "Unexpected number of results returned: " + tojson(explainResult));
+// As before, including the text score in the projection changes how filtering occurs, but we
+// still expect the same result.
+explainResult = coll.find({$text: {$search: "hello"}, c: 1},
+ {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}})
+ .explain("executionStats");
+assert.commandWorked(explainResult);
+assert.eq(explainResult.executionStats.totalKeysExamined,
+ 2,
+ "Unexpected number of keys examined: " + tojson(explainResult));
+assert.gt(explainResult.executionStats.totalDocsExamined,
+ 1,
+ "Unexpected number of documents examined: " + tojson(explainResult));
+assert.eq(explainResult.executionStats.nReturned,
+ 1,
+ "Unexpected number of results returned: " + tojson(explainResult));
- //
- // Test the first query again, but this time, use dotted fields to make sure they don't confuse
- // the query planner:
- // {$text: {$search: "hello"}, "b.d": 1}
- //
- coll.drop();
- assert.commandWorked(coll.createIndex({a: "text", "b.d": 1}));
- assert.writeOK(coll.insert({a: "hello", b: {d: 1}, c: {e: 1}}));
- assert.writeOK(coll.insert({a: "world", b: {d: 2}, c: {e: 2}}));
- assert.writeOK(coll.insert({a: "hello world", b: {d: 3}, c: {e: 3}}));
+//
+// Test the first query again, but this time, use dotted fields to make sure they don't confuse
+// the query planner:
+// {$text: {$search: "hello"}, "b.d": 1}
+//
+coll.drop();
+assert.commandWorked(coll.createIndex({a: "text", "b.d": 1}));
+assert.writeOK(coll.insert({a: "hello", b: {d: 1}, c: {e: 1}}));
+assert.writeOK(coll.insert({a: "world", b: {d: 2}, c: {e: 2}}));
+assert.writeOK(coll.insert({a: "hello world", b: {d: 3}, c: {e: 3}}));
- // Expected result:
- // - We examine two keys, for the two documents with "hello" in their text;
- // - we examine only one document, because covered matching rejects the index entry for
- // which b != 1;
- // - we return exactly one document.
- explainResult = coll.find({$text: {$search: "hello"}, "b.d": 1}).explain("executionStats");
- assert.commandWorked(explainResult);
- assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR"));
- assert.eq(explainResult.executionStats.totalKeysExamined,
- 2,
- "Unexpected number of keys examined: " + tojson(explainResult));
- assert.eq(explainResult.executionStats.totalDocsExamined,
- 1,
- "Unexpected number of documents examined: " + tojson(explainResult));
- assert.eq(explainResult.executionStats.nReturned,
- 1,
- "Unexpected number of results returned: " + tojson(explainResult));
+// Expected result:
+// - We examine two keys, for the two documents with "hello" in their text;
+// - we examine only one document, because covered matching rejects the index entry for
+// which b != 1;
+// - we return exactly one document.
+explainResult = coll.find({$text: {$search: "hello"}, "b.d": 1}).explain("executionStats");
+assert.commandWorked(explainResult);
+assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR"));
+assert.eq(explainResult.executionStats.totalKeysExamined,
+ 2,
+ "Unexpected number of keys examined: " + tojson(explainResult));
+assert.eq(explainResult.executionStats.totalDocsExamined,
+ 1,
+ "Unexpected number of documents examined: " + tojson(explainResult));
+assert.eq(explainResult.executionStats.nReturned,
+ 1,
+ "Unexpected number of results returned: " + tojson(explainResult));
- // When we include the text score in the projection, we use a TEXT_OR instead of an OR in our
- // query plan, which changes how filtering is done. We should get the same result, however.
- explainResult = coll.find({$text: {$search: "hello"}, "b.d": 1},
- {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}})
- .explain("executionStats");
- assert.commandWorked(explainResult);
- assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "TEXT_OR"));
- assert.eq(explainResult.executionStats.totalKeysExamined,
- 2,
- "Unexpected number of keys examined: " + tojson(explainResult));
- assert.eq(explainResult.executionStats.totalDocsExamined,
- 1,
- "Unexpected number of documents examined: " + tojson(explainResult));
- assert.eq(explainResult.executionStats.nReturned,
- 1,
- "Unexpected number of results returned: " + tojson(explainResult));
+// When we include the text score in the projection, we use a TEXT_OR instead of an OR in our
+// query plan, which changes how filtering is done. We should get the same result, however.
+explainResult = coll.find({$text: {$search: "hello"}, "b.d": 1},
+ {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}})
+ .explain("executionStats");
+assert.commandWorked(explainResult);
+assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "TEXT_OR"));
+assert.eq(explainResult.executionStats.totalKeysExamined,
+ 2,
+ "Unexpected number of keys examined: " + tojson(explainResult));
+assert.eq(explainResult.executionStats.totalDocsExamined,
+ 1,
+ "Unexpected number of documents examined: " + tojson(explainResult));
+assert.eq(explainResult.executionStats.nReturned,
+ 1,
+ "Unexpected number of results returned: " + tojson(explainResult));
- //
- // Test the second query again, this time with dotted fields:
- // {$text: {$search: "hello"}, "c.e": 1}
- //
+//
+// Test the second query again, this time with dotted fields:
+// {$text: {$search: "hello"}, "c.e": 1}
+//
- // Expected result:
- // - We examine two keys, for the two documents with "hello" in their text;
- // - we examine more than just the matching document, because we need to fetch documents in
- // order to examine the non-covered 'c' field;
- // - we return exactly one document.
- explainResult = coll.find({$text: {$search: "hello"}, "c.e": 1}).explain("executionStats");
- assert.commandWorked(explainResult);
- assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR"));
- assert.eq(explainResult.executionStats.totalKeysExamined,
- 2,
- "Unexpected number of keys examined: " + tojson(explainResult));
- assert.gt(explainResult.executionStats.totalDocsExamined,
- 1,
- "Unexpected number of documents examined: " + tojson(explainResult));
- assert.eq(explainResult.executionStats.nReturned,
- 1,
- "Unexpected number of results returned: " + tojson(explainResult));
+// Expected result:
+// - We examine two keys, for the two documents with "hello" in their text;
+// - we examine more than just the matching document, because we need to fetch documents in
+// order to examine the non-covered 'c' field;
+// - we return exactly one document.
+explainResult = coll.find({$text: {$search: "hello"}, "c.e": 1}).explain("executionStats");
+assert.commandWorked(explainResult);
+assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR"));
+assert.eq(explainResult.executionStats.totalKeysExamined,
+ 2,
+ "Unexpected number of keys examined: " + tojson(explainResult));
+assert.gt(explainResult.executionStats.totalDocsExamined,
+ 1,
+ "Unexpected number of documents examined: " + tojson(explainResult));
+assert.eq(explainResult.executionStats.nReturned,
+ 1,
+ "Unexpected number of results returned: " + tojson(explainResult));
- // As before, including the text score in the projection changes how filtering occurs, but we
- // still expect the same result.
- explainResult = coll.find({$text: {$search: "hello"}, "c.e": 1},
- {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}})
- .explain("executionStats");
- assert.commandWorked(explainResult);
- assert.eq(explainResult.executionStats.totalKeysExamined,
- 2,
- "Unexpected number of keys examined: " + tojson(explainResult));
- assert.gt(explainResult.executionStats.totalDocsExamined,
- 1,
- "Unexpected number of documents examined: " + tojson(explainResult));
- assert.eq(explainResult.executionStats.nReturned,
- 1,
- "Unexpected number of results returned: " + tojson(explainResult));
+// As before, including the text score in the projection changes how filtering occurs, but we
+// still expect the same result.
+explainResult = coll.find({$text: {$search: "hello"}, "c.e": 1},
+ {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}})
+ .explain("executionStats");
+assert.commandWorked(explainResult);
+assert.eq(explainResult.executionStats.totalKeysExamined,
+ 2,
+ "Unexpected number of keys examined: " + tojson(explainResult));
+assert.gt(explainResult.executionStats.totalDocsExamined,
+ 1,
+ "Unexpected number of documents examined: " + tojson(explainResult));
+assert.eq(explainResult.executionStats.nReturned,
+ 1,
+ "Unexpected number of results returned: " + tojson(explainResult));
})();
diff --git a/jstests/core/text_index_limits.js b/jstests/core/text_index_limits.js
index 73df159b4b5..69a9be2f751 100644
--- a/jstests/core/text_index_limits.js
+++ b/jstests/core/text_index_limits.js
@@ -6,43 +6,42 @@
* @tags: [does_not_support_stepdowns]
*/
(function() {
- "use strict";
+"use strict";
- var t = db.text_index_limits;
- t.drop();
+var t = db.text_index_limits;
+t.drop();
- assert.commandWorked(t.createIndex({comments: "text"}));
+assert.commandWorked(t.createIndex({comments: "text"}));
- // 1. Test number of unique terms exceeds 400,000
- let commentsWithALotOfUniqueWords = "";
- // 26^4 = 456,976 > 400,000
- for (let ch1 = 97; ch1 < 123; ch1++) {
- for (let ch2 = 97; ch2 < 123; ch2++) {
- for (let ch3 = 97; ch3 < 123; ch3++) {
- for (let ch4 = 97; ch4 < 123; ch4++) {
- let word = String.fromCharCode(ch1, ch2, ch3, ch4);
- commentsWithALotOfUniqueWords += word + " ";
- }
+// 1. Test number of unique terms exceeds 400,000
+let commentsWithALotOfUniqueWords = "";
+// 26^4 = 456,976 > 400,000
+for (let ch1 = 97; ch1 < 123; ch1++) {
+ for (let ch2 = 97; ch2 < 123; ch2++) {
+ for (let ch3 = 97; ch3 < 123; ch3++) {
+ for (let ch4 = 97; ch4 < 123; ch4++) {
+ let word = String.fromCharCode(ch1, ch2, ch3, ch4);
+ commentsWithALotOfUniqueWords += word + " ";
}
}
}
- assert.commandWorked(db.runCommand(
- {insert: t.getName(), documents: [{_id: 1, comments: commentsWithALotOfUniqueWords}]}));
+}
+assert.commandWorked(db.runCommand(
+ {insert: t.getName(), documents: [{_id: 1, comments: commentsWithALotOfUniqueWords}]}));
- // 2. Test total size of index keys for unique terms exceeds 4MB
+// 2. Test total size of index keys for unique terms exceeds 4MB
- // 26^3 = 17576 < 400,000
- let prefix = "a".repeat(400);
- let commentsWithWordsOfLargeSize = "";
- for (let ch1 = 97; ch1 < 123; ch1++) {
- for (let ch2 = 97; ch2 < 123; ch2++) {
- for (let ch3 = 97; ch3 < 123; ch3++) {
- let word = String.fromCharCode(ch1, ch2, ch3);
- commentsWithWordsOfLargeSize += prefix + word + " ";
- }
+// 26^3 = 17576 < 400,000
+let prefix = "a".repeat(400);
+let commentsWithWordsOfLargeSize = "";
+for (let ch1 = 97; ch1 < 123; ch1++) {
+ for (let ch2 = 97; ch2 < 123; ch2++) {
+ for (let ch3 = 97; ch3 < 123; ch3++) {
+ let word = String.fromCharCode(ch1, ch2, ch3);
+ commentsWithWordsOfLargeSize += prefix + word + " ";
}
}
- assert.commandWorked(db.runCommand(
- {insert: t.getName(), documents: [{_id: 2, comments: commentsWithWordsOfLargeSize}]}));
-
+}
+assert.commandWorked(db.runCommand(
+ {insert: t.getName(), documents: [{_id: 2, comments: commentsWithWordsOfLargeSize}]}));
}());
diff --git a/jstests/core/throw_big.js b/jstests/core/throw_big.js
index 422ee93a6ae..ef9554966e0 100644
--- a/jstests/core/throw_big.js
+++ b/jstests/core/throw_big.js
@@ -2,15 +2,14 @@
* Test that verifies the javascript integration can handle large string exception messages.
*/
(function() {
- 'use strict';
+'use strict';
- var len = 65 * 1024 * 1024;
- var str = new Array(len + 1).join('b');
-
- // We expect to successfully throw and catch this large exception message.
- // We do not want the mongo shell to terminate.
- assert.throws(function() {
- throw str;
- });
+var len = 65 * 1024 * 1024;
+var str = new Array(len + 1).join('b');
+// We expect to successfully throw and catch this large exception message.
+// We do not want the mongo shell to terminate.
+assert.throws(function() {
+ throw str;
+});
})();
diff --git a/jstests/core/top.js b/jstests/core/top.js
index eca4570472f..2d8fd8297c3 100644
--- a/jstests/core/top.js
+++ b/jstests/core/top.js
@@ -14,110 +14,110 @@
*/
(function() {
- load("jstests/libs/stats.js");
-
- var name = "toptest";
-
- var testDB = db.getSiblingDB(name);
- var testColl = testDB[name + "coll"];
- testColl.drop();
-
- // Perform an operation on the collection so that it is present in the "top" command's output.
- assert.eq(testColl.find({}).itcount(), 0);
-
- // This variable is used to get differential output
- var lastTop = getTop(testColl);
-
- var numRecords = 100;
-
- // Insert
- for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.insert({_id: i}));
- }
- assertTopDiffEq(testColl, lastTop, "insert", numRecords);
- lastTop = assertTopDiffEq(testColl, lastTop, "writeLock", numRecords);
-
- // Update
- for (i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.update({_id: i}, {x: i}));
- }
- lastTop = assertTopDiffEq(testColl, lastTop, "update", numRecords);
-
- // Queries
- var query = {};
- for (i = 0; i < numRecords; i++) {
- query[i] = testColl.find({x: {$gte: i}}).batchSize(2);
- assert.eq(query[i].next()._id, i);
- }
- lastTop = assertTopDiffEq(testColl, lastTop, "queries", numRecords);
-
- // Getmore
- for (i = 0; i < numRecords / 2; i++) {
- assert.eq(query[i].next()._id, i + 1);
- assert.eq(query[i].next()._id, i + 2);
- assert.eq(query[i].next()._id, i + 3);
- assert.eq(query[i].next()._id, i + 4);
- }
- lastTop = assertTopDiffEq(testColl, lastTop, "getmore", numRecords);
-
- // Remove
- for (i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.remove({_id: 1}));
- }
- lastTop = assertTopDiffEq(testColl, lastTop, "remove", numRecords);
-
- // Upsert, note that these are counted as updates, not inserts
- for (i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1}));
- }
- lastTop = assertTopDiffEq(testColl, lastTop, "update", numRecords);
-
- // Commands
- var res;
-
- // "count" command
- lastTop = getTop(testColl); // ignore any commands before this
- for (i = 0; i < numRecords; i++) {
- res = assert.commandWorked(testDB.runCommand({count: testColl.getName()}));
- assert.eq(res.n, numRecords, tojson(res));
- }
- lastTop = assertTopDiffEq(testColl, lastTop, "commands", numRecords);
-
- // "findAndModify" command
- lastTop = getTop(testColl);
- for (i = 0; i < numRecords; i++) {
- res = assert.commandWorked(testDB.runCommand({
- findAndModify: testColl.getName(),
- query: {_id: i},
- update: {$inc: {x: 1}},
- }));
- assert.eq(res.value.x, i, tojson(res));
- }
- lastTop = assertTopDiffEq(testColl, lastTop, "commands", numRecords);
-
- lastTop = getTop(testColl);
- for (i = 0; i < numRecords; i++) {
- res = assert.commandWorked(testDB.runCommand({
- findAndModify: testColl.getName(),
- query: {_id: i},
- remove: true,
- }));
- assert.eq(res.value.x, i + 1, tojson(res));
- }
- lastTop = assertTopDiffEq(testColl, lastTop, "commands", numRecords);
-
- // getIndexes
- assert.eq(1, testColl.getIndexes().length);
- assertTopDiffEq(testColl, lastTop, "commands", 1);
- lastTop = assertTopDiffEq(testColl, lastTop, "readLock", 1);
-
- // createIndex
- res = assert.commandWorked(testColl.createIndex({x: 1}));
- assertTopDiffEq(testColl, lastTop, "writeLock", 1);
- lastTop = assertTopDiffEq(testColl, lastTop, "commands", 1);
-
- // dropIndex
- res = assert.commandWorked(testColl.dropIndex({x: 1}));
- assertTopDiffEq(testColl, lastTop, "commands", 1);
- lastTop = assertTopDiffEq(testColl, lastTop, "writeLock", 1);
+load("jstests/libs/stats.js");
+
+var name = "toptest";
+
+var testDB = db.getSiblingDB(name);
+var testColl = testDB[name + "coll"];
+testColl.drop();
+
+// Perform an operation on the collection so that it is present in the "top" command's output.
+assert.eq(testColl.find({}).itcount(), 0);
+
+// This variable is used to get differential output
+var lastTop = getTop(testColl);
+
+var numRecords = 100;
+
+// Insert
+for (var i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.insert({_id: i}));
+}
+assertTopDiffEq(testColl, lastTop, "insert", numRecords);
+lastTop = assertTopDiffEq(testColl, lastTop, "writeLock", numRecords);
+
+// Update
+for (i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.update({_id: i}, {x: i}));
+}
+lastTop = assertTopDiffEq(testColl, lastTop, "update", numRecords);
+
+// Queries
+var query = {};
+for (i = 0; i < numRecords; i++) {
+ query[i] = testColl.find({x: {$gte: i}}).batchSize(2);
+ assert.eq(query[i].next()._id, i);
+}
+lastTop = assertTopDiffEq(testColl, lastTop, "queries", numRecords);
+
+// Getmore
+for (i = 0; i < numRecords / 2; i++) {
+ assert.eq(query[i].next()._id, i + 1);
+ assert.eq(query[i].next()._id, i + 2);
+ assert.eq(query[i].next()._id, i + 3);
+ assert.eq(query[i].next()._id, i + 4);
+}
+lastTop = assertTopDiffEq(testColl, lastTop, "getmore", numRecords);
+
+// Remove
+for (i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.remove({_id: 1}));
+}
+lastTop = assertTopDiffEq(testColl, lastTop, "remove", numRecords);
+
+// Upsert, note that these are counted as updates, not inserts
+for (i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1}));
+}
+lastTop = assertTopDiffEq(testColl, lastTop, "update", numRecords);
+
+// Commands
+var res;
+
+// "count" command
+lastTop = getTop(testColl); // ignore any commands before this
+for (i = 0; i < numRecords; i++) {
+ res = assert.commandWorked(testDB.runCommand({count: testColl.getName()}));
+ assert.eq(res.n, numRecords, tojson(res));
+}
+lastTop = assertTopDiffEq(testColl, lastTop, "commands", numRecords);
+
+// "findAndModify" command
+lastTop = getTop(testColl);
+for (i = 0; i < numRecords; i++) {
+ res = assert.commandWorked(testDB.runCommand({
+ findAndModify: testColl.getName(),
+ query: {_id: i},
+ update: {$inc: {x: 1}},
+ }));
+ assert.eq(res.value.x, i, tojson(res));
+}
+lastTop = assertTopDiffEq(testColl, lastTop, "commands", numRecords);
+
+lastTop = getTop(testColl);
+for (i = 0; i < numRecords; i++) {
+ res = assert.commandWorked(testDB.runCommand({
+ findAndModify: testColl.getName(),
+ query: {_id: i},
+ remove: true,
+ }));
+ assert.eq(res.value.x, i + 1, tojson(res));
+}
+lastTop = assertTopDiffEq(testColl, lastTop, "commands", numRecords);
+
+// getIndexes
+assert.eq(1, testColl.getIndexes().length);
+assertTopDiffEq(testColl, lastTop, "commands", 1);
+lastTop = assertTopDiffEq(testColl, lastTop, "readLock", 1);
+
+// createIndex
+res = assert.commandWorked(testColl.createIndex({x: 1}));
+assertTopDiffEq(testColl, lastTop, "writeLock", 1);
+lastTop = assertTopDiffEq(testColl, lastTop, "commands", 1);
+
+// dropIndex
+res = assert.commandWorked(testColl.dropIndex({x: 1}));
+assertTopDiffEq(testColl, lastTop, "commands", 1);
+lastTop = assertTopDiffEq(testColl, lastTop, "writeLock", 1);
}());
diff --git a/jstests/core/ts1.js b/jstests/core/ts1.js
index 79a2db95dca..a52995dd4c8 100644
--- a/jstests/core/ts1.js
+++ b/jstests/core/ts1.js
@@ -3,43 +3,42 @@
// if the inserts are into a sharded collection.
// @tags: [assumes_unsharded_collection]
(function() {
- "use strict";
- const t = db.ts1;
- t.drop();
-
- const N = 20;
-
- for (let i = 0; i < N; i++) {
- assert.writeOK(t.insert({_id: i, x: new Timestamp()}));
- sleep(100);
- }
-
- function get(i) {
- return t.findOne({_id: i}).x;
- }
-
- function cmp(a, b) {
- if (a.t < b.t)
- return -1;
- if (a.t > b.t)
- return 1;
-
- return a.i - b.i;
- }
-
- for (let i = 0; i < N - 1; i++) {
- const a = get(i);
- const b = get(i + 1);
- assert.gt(0,
- cmp(a, b),
- `Expected ${tojson(a)} to be smaller than ${tojson(b)} (at iteration ${i})`);
- }
-
- assert.eq(N, t.find({x: {$type: 17}}).itcount());
- assert.eq(0, t.find({x: {$type: 3}}).itcount());
-
- assert.writeOK(t.insert({_id: 100, x: new Timestamp(123456, 50)}));
- const x = t.findOne({_id: 100}).x;
- assert.eq(123456, x.t);
- assert.eq(50, x.i);
+"use strict";
+const t = db.ts1;
+t.drop();
+
+const N = 20;
+
+for (let i = 0; i < N; i++) {
+ assert.writeOK(t.insert({_id: i, x: new Timestamp()}));
+ sleep(100);
+}
+
+function get(i) {
+ return t.findOne({_id: i}).x;
+}
+
+function cmp(a, b) {
+ if (a.t < b.t)
+ return -1;
+ if (a.t > b.t)
+ return 1;
+
+ return a.i - b.i;
+}
+
+for (let i = 0; i < N - 1; i++) {
+ const a = get(i);
+ const b = get(i + 1);
+ assert.gt(
+ 0, cmp(a, b), `Expected ${tojson(a)} to be smaller than ${tojson(b)} (at iteration ${i})`);
+}
+
+assert.eq(N, t.find({x: {$type: 17}}).itcount());
+assert.eq(0, t.find({x: {$type: 3}}).itcount());
+
+assert.writeOK(t.insert({_id: 100, x: new Timestamp(123456, 50)}));
+const x = t.findOne({_id: 100}).x;
+assert.eq(123456, x.t);
+assert.eq(50, x.i);
}());
diff --git a/jstests/core/ttl_index_options.js b/jstests/core/ttl_index_options.js
index f4d3c4b3e42..47ae2709073 100644
--- a/jstests/core/ttl_index_options.js
+++ b/jstests/core/ttl_index_options.js
@@ -4,42 +4,40 @@
* @tags: [requires_ttl_index]
*/
(function() {
- 'use strict';
-
- let coll = db.core_ttl_index_options;
- coll.drop();
-
- // Ensure that any overflows are caught when converting from seconds to milliseconds.
- assert.commandFailedWithCode(
- coll.createIndexes([{x: 1}], {expireAfterSeconds: 9223372036854775808}),
- ErrorCodes.CannotCreateIndex);
- assert.commandFailedWithCode(
- coll.createIndexes([{x: 1}], {expireAfterSeconds: 9999999999999999}),
- ErrorCodes.CannotCreateIndex);
-
- // Ensure that we cannot provide a time that is larger than the current epoch time.
- let secondsSinceEpoch = Date.now() / 1000;
- assert.commandFailedWithCode(
- coll.createIndexes([{x: 1}], {expireAfterSeconds: secondsSinceEpoch + 1000}),
- ErrorCodes.CannotCreateIndex);
-
- // 'expireAfterSeconds' cannot be less than 0.
- assert.commandFailedWithCode(coll.createIndexes([{x: 1}], {expireAfterSeconds: -1}),
- ErrorCodes.CannotCreateIndex);
- assert.commandWorked(coll.createIndexes([{z: 1}], {expireAfterSeconds: 0}));
-
- // Compound indexes are not support with TTL indexes.
- assert.commandFailedWithCode(coll.createIndexes([{x: 1, y: 1}], {expireAfterSeconds: 100}),
- ErrorCodes.CannotCreateIndex);
-
- // 'expireAfterSeconds' should be a number.
- assert.commandFailedWithCode(
- coll.createIndexes([{x: 1}], {expireAfterSeconds: "invalidOption"}),
- ErrorCodes.CannotCreateIndex);
-
- // Using 'expireAfterSeconds' as an index key is valid, but doesn't create a TTL index.
- assert.commandWorked(coll.createIndexes([{x: 1, expireAfterSeconds: 3600}]));
-
- // Create a valid TTL index.
- assert.commandWorked(coll.createIndexes([{x: 1}, {y: 1}], {expireAfterSeconds: 3600}));
+'use strict';
+
+let coll = db.core_ttl_index_options;
+coll.drop();
+
+// Ensure that any overflows are caught when converting from seconds to milliseconds.
+assert.commandFailedWithCode(
+ coll.createIndexes([{x: 1}], {expireAfterSeconds: 9223372036854775808}),
+ ErrorCodes.CannotCreateIndex);
+assert.commandFailedWithCode(coll.createIndexes([{x: 1}], {expireAfterSeconds: 9999999999999999}),
+ ErrorCodes.CannotCreateIndex);
+
+// Ensure that we cannot provide a time that is larger than the current epoch time.
+let secondsSinceEpoch = Date.now() / 1000;
+assert.commandFailedWithCode(
+ coll.createIndexes([{x: 1}], {expireAfterSeconds: secondsSinceEpoch + 1000}),
+ ErrorCodes.CannotCreateIndex);
+
+// 'expireAfterSeconds' cannot be less than 0.
+assert.commandFailedWithCode(coll.createIndexes([{x: 1}], {expireAfterSeconds: -1}),
+ ErrorCodes.CannotCreateIndex);
+assert.commandWorked(coll.createIndexes([{z: 1}], {expireAfterSeconds: 0}));
+
+// Compound indexes are not support with TTL indexes.
+assert.commandFailedWithCode(coll.createIndexes([{x: 1, y: 1}], {expireAfterSeconds: 100}),
+ ErrorCodes.CannotCreateIndex);
+
+// 'expireAfterSeconds' should be a number.
+assert.commandFailedWithCode(coll.createIndexes([{x: 1}], {expireAfterSeconds: "invalidOption"}),
+ ErrorCodes.CannotCreateIndex);
+
+// Using 'expireAfterSeconds' as an index key is valid, but doesn't create a TTL index.
+assert.commandWorked(coll.createIndexes([{x: 1, expireAfterSeconds: 3600}]));
+
+// Create a valid TTL index.
+assert.commandWorked(coll.createIndexes([{x: 1}, {y: 1}], {expireAfterSeconds: 3600}));
}());
diff --git a/jstests/core/txns/abort_expired_transaction.js b/jstests/core/txns/abort_expired_transaction.js
index 3022080d1b4..c64ed7407e5 100644
--- a/jstests/core/txns/abort_expired_transaction.js
+++ b/jstests/core/txns/abort_expired_transaction.js
@@ -5,85 +5,83 @@
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const testDBName = "testDB";
- const testCollName = "abort_expired_transaction";
- const ns = testDBName + "." + testCollName;
- const testDB = db.getSiblingDB(testDBName);
- const testColl = testDB[testCollName];
- testColl.drop({writeConcern: {w: "majority"}});
+const testDBName = "testDB";
+const testCollName = "abort_expired_transaction";
+const ns = testDBName + "." + testCollName;
+const testDB = db.getSiblingDB(testDBName);
+const testColl = testDB[testCollName];
+testColl.drop({writeConcern: {w: "majority"}});
- // Need the original 'transactionLifetimeLimitSeconds' value so that we can reset it back at the
- // end of the test.
- const res = assert.commandWorked(
- db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1}));
- const originalTransactionLifetimeLimitSeconds = res.transactionLifetimeLimitSeconds;
+// Need the original 'transactionLifetimeLimitSeconds' value so that we can reset it back at the
+// end of the test.
+const res =
+ assert.commandWorked(db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1}));
+const originalTransactionLifetimeLimitSeconds = res.transactionLifetimeLimitSeconds;
- try {
- jsTest.log("Decrease transactionLifetimeLimitSeconds from " +
- originalTransactionLifetimeLimitSeconds + " to 1 second.");
- assert.commandWorked(
- db.adminCommand({setParameter: 1, transactionLifetimeLimitSeconds: 1}));
+try {
+ jsTest.log("Decrease transactionLifetimeLimitSeconds from " +
+ originalTransactionLifetimeLimitSeconds + " to 1 second.");
+ assert.commandWorked(db.adminCommand({setParameter: 1, transactionLifetimeLimitSeconds: 1}));
- jsTest.log("Create a collection '" + ns + "' outside of the transaction.");
- assert.writeOK(testColl.insert({foo: "bar"}, {writeConcern: {w: "majority"}}));
+ jsTest.log("Create a collection '" + ns + "' outside of the transaction.");
+ assert.writeOK(testColl.insert({foo: "bar"}, {writeConcern: {w: "majority"}}));
- jsTest.log("Set up the session.");
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(testDBName);
+ jsTest.log("Set up the session.");
+ const sessionOptions = {causalConsistency: false};
+ const session = db.getMongo().startSession(sessionOptions);
+ const sessionDb = session.getDatabase(testDBName);
- let txnNumber = 0;
+ let txnNumber = 0;
- jsTest.log("Insert a document starting a transaction.");
- assert.commandWorked(sessionDb.runCommand({
- insert: testCollName,
- documents: [{_id: "insert-1"}],
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false,
- }));
+ jsTest.log("Insert a document starting a transaction.");
+ assert.commandWorked(sessionDb.runCommand({
+ insert: testCollName,
+ documents: [{_id: "insert-1"}],
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false,
+ }));
- // We can deterministically wait for the transaction to be aborted by waiting for currentOp
- // to cease reporting the inactive transaction: the transaction should disappear from the
- // currentOp results once aborted.
- jsTest.log("Wait for the transaction to expire and be aborted.");
- assert.soon(
- function() {
- const sessionFilter = {
- active: false,
- opid: {$exists: false},
- desc: "inactive transaction",
- "transaction.parameters.txnNumber": NumberLong(txnNumber),
- "lsid.id": session.getSessionId().id
- };
- const res = db.getSiblingDB("admin").aggregate(
- [{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter}]);
- return (res.itcount() == 0);
+ // We can deterministically wait for the transaction to be aborted by waiting for currentOp
+ // to cease reporting the inactive transaction: the transaction should disappear from the
+ // currentOp results once aborted.
+ jsTest.log("Wait for the transaction to expire and be aborted.");
+ assert.soon(
+ function() {
+ const sessionFilter = {
+ active: false,
+ opid: {$exists: false},
+ desc: "inactive transaction",
+ "transaction.parameters.txnNumber": NumberLong(txnNumber),
+ "lsid.id": session.getSessionId().id
+ };
+ const res = db.getSiblingDB("admin").aggregate(
+ [{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter}]);
+ return (res.itcount() == 0);
+ },
+ "currentOp reports that the idle transaction still exists, it has not been " +
+ "aborted as expected.");
- },
- "currentOp reports that the idle transaction still exists, it has not been " +
- "aborted as expected.");
+ jsTest.log(
+ "Attempt to do a write in the transaction, which should fail because the transaction " +
+ "was aborted");
+ assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: testCollName,
+ documents: [{_id: "insert-2"}],
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ }),
+ ErrorCodes.NoSuchTransaction);
- jsTest.log(
- "Attempt to do a write in the transaction, which should fail because the transaction " +
- "was aborted");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: testCollName,
- documents: [{_id: "insert-2"}],
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
-
- session.endSession();
- } finally {
- // Must ensure that the transactionLifetimeLimitSeconds is reset so that it does not impact
- // other tests in the suite.
- assert.commandWorked(db.adminCommand({
- setParameter: 1,
- transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds
- }));
- }
+ session.endSession();
+} finally {
+ // Must ensure that the transactionLifetimeLimitSeconds is reset so that it does not impact
+ // other tests in the suite.
+ assert.commandWorked(db.adminCommand({
+ setParameter: 1,
+ transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds
+ }));
+}
}());
diff --git a/jstests/core/txns/abort_prepared_transaction.js b/jstests/core/txns/abort_prepared_transaction.js
index 365a4d852bf..3f2a21f98c1 100644
--- a/jstests/core/txns/abort_prepared_transaction.js
+++ b/jstests/core/txns/abort_prepared_transaction.js
@@ -4,79 +4,85 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const dbName = "test";
- const collName = "abort_prepared_transaction";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "abort_prepared_transaction";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
- testColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+testColl.drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- const doc1 = {_id: 1, x: 1};
+const doc1 = {
+ _id: 1,
+ x: 1
+};
- // ---- Test 1. Insert a single document and run prepare. ----
+// ---- Test 1. Insert a single document and run prepare. ----
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc1));
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc1));
- // Insert should not be visible outside the session.
- assert.eq(null, testColl.findOne(doc1));
+// Insert should not be visible outside the session.
+assert.eq(null, testColl.findOne(doc1));
- // Insert should be visible in this session.
- assert.eq(doc1, sessionColl.findOne(doc1));
+// Insert should be visible in this session.
+assert.eq(doc1, sessionColl.findOne(doc1));
- PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(session.abortTransaction_forTesting());
+PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(session.abortTransaction_forTesting());
- // After abort the insert is rolled back.
- assert.eq(null, testColl.findOne(doc1));
+// After abort the insert is rolled back.
+assert.eq(null, testColl.findOne(doc1));
- // ---- Test 2. Update a document and run prepare. ----
+// ---- Test 2. Update a document and run prepare. ----
- // Insert a document to update.
- assert.commandWorked(sessionColl.insert(doc1, {writeConcern: {w: "majority"}}));
+// Insert a document to update.
+assert.commandWorked(sessionColl.insert(doc1, {writeConcern: {w: "majority"}}));
- session.startTransaction();
- assert.commandWorked(sessionColl.update(doc1, {$inc: {x: 1}}));
+session.startTransaction();
+assert.commandWorked(sessionColl.update(doc1, {$inc: {x: 1}}));
- const doc2 = {_id: 1, x: 2};
+const doc2 = {
+ _id: 1,
+ x: 2
+};
- // Update should not be visible outside the session.
- assert.eq(null, testColl.findOne(doc2));
+// Update should not be visible outside the session.
+assert.eq(null, testColl.findOne(doc2));
- // Update should be visible in this session.
- assert.eq(doc2, sessionColl.findOne(doc2));
+// Update should be visible in this session.
+assert.eq(doc2, sessionColl.findOne(doc2));
- PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(session.abortTransaction_forTesting());
+PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(session.abortTransaction_forTesting());
- // After abort the update is rolled back.
- assert.eq(doc1, testColl.findOne({_id: 1}));
+// After abort the update is rolled back.
+assert.eq(doc1, testColl.findOne({_id: 1}));
- // ---- Test 3. Delete a document and run prepare. ----
+// ---- Test 3. Delete a document and run prepare. ----
- // Update the document.
- assert.commandWorked(sessionColl.update(doc1, {$inc: {x: 1}}, {writeConcern: {w: "majority"}}));
+// Update the document.
+assert.commandWorked(sessionColl.update(doc1, {$inc: {x: 1}}, {writeConcern: {w: "majority"}}));
- session.startTransaction();
- assert.commandWorked(sessionColl.remove(doc2, {justOne: true}));
+session.startTransaction();
+assert.commandWorked(sessionColl.remove(doc2, {justOne: true}));
- // Delete should not be visible outside the session, so the document should be.
- assert.eq(doc2, testColl.findOne(doc2));
+// Delete should not be visible outside the session, so the document should be.
+assert.eq(doc2, testColl.findOne(doc2));
- // Document should not be visible in this session, since the delete should be visible.
- assert.eq(null, sessionColl.findOne(doc2));
+// Document should not be visible in this session, since the delete should be visible.
+assert.eq(null, sessionColl.findOne(doc2));
- PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(session.abortTransaction_forTesting());
+PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(session.abortTransaction_forTesting());
- // After abort the delete is rolled back.
- assert.eq(doc2, testColl.findOne(doc2));
+// After abort the delete is rolled back.
+assert.eq(doc2, testColl.findOne(doc2));
}());
diff --git a/jstests/core/txns/abort_transaction_thread_does_not_block_on_locks.js b/jstests/core/txns/abort_transaction_thread_does_not_block_on_locks.js
index 297f0bbe902..5b899e73689 100644
--- a/jstests/core/txns/abort_transaction_thread_does_not_block_on_locks.js
+++ b/jstests/core/txns/abort_transaction_thread_does_not_block_on_locks.js
@@ -6,104 +6,104 @@
//
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "abort_transaction_thread_does_not_block_on_locks";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
- const sessionOptions = {causalConsistency: false};
+const dbName = "test";
+const collName = "abort_transaction_thread_does_not_block_on_locks";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
+const sessionOptions = {
+ causalConsistency: false
+};
- let dropRes = testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- if (!dropRes.ok) {
- assert.commandFailedWithCode(dropRes, ErrorCodes.NamespaceNotFound);
- }
+let dropRes = testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+if (!dropRes.ok) {
+ assert.commandFailedWithCode(dropRes, ErrorCodes.NamespaceNotFound);
+}
- const bulk = testColl.initializeUnorderedBulkOp();
- for (let i = 0; i < 4; ++i) {
- bulk.insert({_id: i});
- }
- assert.commandWorked(bulk.execute({w: "majority"}));
+const bulk = testColl.initializeUnorderedBulkOp();
+for (let i = 0; i < 4; ++i) {
+ bulk.insert({_id: i});
+}
+assert.commandWorked(bulk.execute({w: "majority"}));
- const res = assert.commandWorked(
- db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1}));
- const originalTransactionLifetimeLimitSeconds = res.transactionLifetimeLimitSeconds;
+const res =
+ assert.commandWorked(db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1}));
+const originalTransactionLifetimeLimitSeconds = res.transactionLifetimeLimitSeconds;
- try {
- let transactionLifeTime = 10;
- jsTest.log("Decrease transactionLifetimeLimitSeconds to " + transactionLifeTime +
- " seconds.");
- assert.commandWorked(db.adminCommand(
- {setParameter: 1, transactionLifetimeLimitSeconds: transactionLifeTime}));
+try {
+ let transactionLifeTime = 10;
+ jsTest.log("Decrease transactionLifetimeLimitSeconds to " + transactionLifeTime + " seconds.");
+ assert.commandWorked(
+ db.adminCommand({setParameter: 1, transactionLifetimeLimitSeconds: transactionLifeTime}));
- // Set up two transactions with IX locks and cursors.
+ // Set up two transactions with IX locks and cursors.
- let session1 = db.getMongo().startSession(sessionOptions);
- let sessionDb1 = session1.getDatabase(dbName);
- let sessionColl1 = sessionDb1[collName];
+ let session1 = db.getMongo().startSession(sessionOptions);
+ let sessionDb1 = session1.getDatabase(dbName);
+ let sessionColl1 = sessionDb1[collName];
- let session2 = db.getMongo().startSession(sessionOptions);
- let sessionDb2 = session2.getDatabase(dbName);
- let sessionColl2 = sessionDb2[collName];
+ let session2 = db.getMongo().startSession(sessionOptions);
+ let sessionDb2 = session2.getDatabase(dbName);
+ let sessionColl2 = sessionDb2[collName];
- let firstTxnNumber = 1;
- let secondTxnNumber = 2;
+ let firstTxnNumber = 1;
+ let secondTxnNumber = 2;
- jsTest.log("Setting up first transaction with an open cursor and IX lock");
- let cursorRes1 = assert.commandWorked(sessionDb1.runCommand({
- find: collName,
- batchSize: 2,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(firstTxnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
- assert(cursorRes1.hasOwnProperty("cursor"), tojson(cursorRes1));
- assert.neq(0, cursorRes1.cursor.id, tojson(cursorRes1));
+ jsTest.log("Setting up first transaction with an open cursor and IX lock");
+ let cursorRes1 = assert.commandWorked(sessionDb1.runCommand({
+ find: collName,
+ batchSize: 2,
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(firstTxnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+ }));
+ assert(cursorRes1.hasOwnProperty("cursor"), tojson(cursorRes1));
+ assert.neq(0, cursorRes1.cursor.id, tojson(cursorRes1));
- jsTest.log("Setting up second transaction with an open cursor and IX lock");
- let cursorRes2 = assert.commandWorked(sessionDb2.runCommand({
- find: collName,
- batchSize: 2,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(secondTxnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
- assert(cursorRes2.hasOwnProperty("cursor"), tojson(cursorRes2));
- assert.neq(0, cursorRes2.cursor.id, tojson(cursorRes2));
+ jsTest.log("Setting up second transaction with an open cursor and IX lock");
+ let cursorRes2 = assert.commandWorked(sessionDb2.runCommand({
+ find: collName,
+ batchSize: 2,
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(secondTxnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+ }));
+ assert(cursorRes2.hasOwnProperty("cursor"), tojson(cursorRes2));
+ assert.neq(0, cursorRes2.cursor.id, tojson(cursorRes2));
- jsTest.log("Perform a drop. This will block until both transactions finish. The " +
- "transactions should expire in " + transactionLifeTime * 1.5 +
- " seconds or less.");
- assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}));
+ jsTest.log("Perform a drop. This will block until both transactions finish. The " +
+ "transactions should expire in " + transactionLifeTime * 1.5 + " seconds or less.");
+ assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}));
- // Verify and cleanup.
+ // Verify and cleanup.
- jsTest.log("Drop finished. Verifying that the transactions were aborted as expected");
- assert.commandFailedWithCode(sessionDb1.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(firstTxnNumber),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
- assert.commandFailedWithCode(sessionDb2.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(secondTxnNumber),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
+ jsTest.log("Drop finished. Verifying that the transactions were aborted as expected");
+ assert.commandFailedWithCode(sessionDb1.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(firstTxnNumber),
+ stmtId: NumberInt(2),
+ autocommit: false
+ }),
+ ErrorCodes.NoSuchTransaction);
+ assert.commandFailedWithCode(sessionDb2.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(secondTxnNumber),
+ stmtId: NumberInt(2),
+ autocommit: false
+ }),
+ ErrorCodes.NoSuchTransaction);
- session1.endSession();
- session2.endSession();
- } finally {
- assert.commandWorked(db.adminCommand({
- setParameter: 1,
- transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds
- }));
- }
+ session1.endSession();
+ session2.endSession();
+} finally {
+ assert.commandWorked(db.adminCommand({
+ setParameter: 1,
+ transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds
+ }));
+}
}());
diff --git a/jstests/core/txns/abort_unprepared_transactions_on_FCV_downgrade.js b/jstests/core/txns/abort_unprepared_transactions_on_FCV_downgrade.js
index cfedbb158b1..aadda6561a9 100644
--- a/jstests/core/txns/abort_unprepared_transactions_on_FCV_downgrade.js
+++ b/jstests/core/txns/abort_unprepared_transactions_on_FCV_downgrade.js
@@ -1,45 +1,47 @@
// Test that open unprepared transactions are aborted on FCV downgrade.
// @tags: [uses_transactions]
(function() {
- "use strict";
- load("jstests/libs/feature_compatibility_version.js");
-
- const dbName = "test";
- const collName = "abort_unprepared_transactions_on_FCV_downgrade";
- const testDB = db.getSiblingDB(dbName);
- const adminDB = db.getSiblingDB("admin");
- testDB[collName].drop({writeConcern: {w: "majority"}});
-
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDB = session.getDatabase(dbName);
-
- try {
- jsTestLog("Start a transaction.");
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionDB[collName].insert({_id: "insert-1"}));
-
- jsTestLog("Attempt to drop the collection. This should fail due to the open transaction.");
- assert.commandFailedWithCode(testDB.runCommand({drop: collName, maxTimeMS: 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog("Downgrade the featureCompatibilityVersion.");
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(adminDB, lastStableFCV);
-
- jsTestLog("Drop the collection. This should succeed, since the transaction was aborted.");
- assert.commandWorked(testDB.runCommand({drop: collName}));
-
- jsTestLog("Test that committing the transaction fails, since it was aborted.");
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- } finally {
- jsTestLog("Restore the original featureCompatibilityVersion.");
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(adminDB, latestFCV);
- }
-
- session.endSession();
+"use strict";
+load("jstests/libs/feature_compatibility_version.js");
+
+const dbName = "test";
+const collName = "abort_unprepared_transactions_on_FCV_downgrade";
+const testDB = db.getSiblingDB(dbName);
+const adminDB = db.getSiblingDB("admin");
+testDB[collName].drop({writeConcern: {w: "majority"}});
+
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDB = session.getDatabase(dbName);
+
+try {
+ jsTestLog("Start a transaction.");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ assert.commandWorked(sessionDB[collName].insert({_id: "insert-1"}));
+
+ jsTestLog("Attempt to drop the collection. This should fail due to the open transaction.");
+ assert.commandFailedWithCode(testDB.runCommand({drop: collName, maxTimeMS: 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+ jsTestLog("Downgrade the featureCompatibilityVersion.");
+ assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+ checkFCV(adminDB, lastStableFCV);
+
+ jsTestLog("Drop the collection. This should succeed, since the transaction was aborted.");
+ assert.commandWorked(testDB.runCommand({drop: collName}));
+
+ jsTestLog("Test that committing the transaction fails, since it was aborted.");
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+} finally {
+ jsTestLog("Restore the original featureCompatibilityVersion.");
+ assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+ checkFCV(adminDB, latestFCV);
+}
+
+session.endSession();
}());
diff --git a/jstests/core/txns/aggregation_in_transaction.js b/jstests/core/txns/aggregation_in_transaction.js
index c14c4276fa8..76c5f4d6a0a 100644
--- a/jstests/core/txns/aggregation_in_transaction.js
+++ b/jstests/core/txns/aggregation_in_transaction.js
@@ -1,56 +1,62 @@
// Tests that aggregation is supported in transactions.
// @tags: [uses_transactions, uses_snapshot_read_concern]
(function() {
- "use strict";
-
- load("jstests/libs/fixture_helpers.js"); // For isSharded.
-
- const session = db.getMongo().startSession({causalConsistency: false});
- const testDB = session.getDatabase("test");
- const coll = testDB.getCollection("aggregation_in_transaction");
- const foreignColl = testDB.getCollection("aggregation_in_transaction_lookup");
-
- [coll, foreignColl].forEach(col => {
- const reply = col.runCommand("drop", {writeConcern: {w: "majority"}});
- if (reply.ok !== 1) {
- assert.commandFailedWithCode(reply, ErrorCodes.NamespaceNotFound);
- }
- });
-
- // Populate the collections.
- const testDoc = {_id: 0, foreignKey: "orange"};
- assert.commandWorked(coll.insert(testDoc, {writeConcern: {w: "majority"}}));
- const foreignDoc = {_id: "orange", val: 9};
- assert.commandWorked(foreignColl.insert(foreignDoc, {writeConcern: {w: "majority"}}));
-
- const isForeignSharded = FixtureHelpers.isSharded(foreignColl);
-
- // Run a dummy find to start the transaction.
- jsTestLog("Starting transaction.");
- session.startTransaction({readConcern: {level: "snapshot"}});
- let cursor = coll.find();
- cursor.next();
-
- // Insert a document outside of the transaction. Subsequent aggregations should not see this
- // document.
- jsTestLog("Inserting document outside of transaction.");
- assert.commandWorked(db.getSiblingDB(testDB.getName()).getCollection(coll.getName()).insert({
- _id: "not_visible_in_transaction",
- foreignKey: "orange",
- }));
-
- // Perform an aggregation that is fed by a cursor on the underlying collection. Only the
- // majority-committed document present at the start of the transaction should be found.
- jsTestLog("Starting aggregations inside of the transaction.");
- cursor = coll.aggregate({$match: {}});
- assert.docEq(testDoc, cursor.next());
- assert(!cursor.hasNext());
+"use strict";
+
+load("jstests/libs/fixture_helpers.js"); // For isSharded.
- // Perform aggregations that look at other collections.
- // TODO: SERVER-39162 Sharded $lookup is not supported in transactions.
- if (!isForeignSharded) {
- const lookupDoc = Object.extend(testDoc, {lookup: [foreignDoc]});
- cursor = coll.aggregate({
+const session = db.getMongo().startSession({causalConsistency: false});
+const testDB = session.getDatabase("test");
+const coll = testDB.getCollection("aggregation_in_transaction");
+const foreignColl = testDB.getCollection("aggregation_in_transaction_lookup");
+
+[coll, foreignColl].forEach(col => {
+ const reply = col.runCommand("drop", {writeConcern: {w: "majority"}});
+ if (reply.ok !== 1) {
+ assert.commandFailedWithCode(reply, ErrorCodes.NamespaceNotFound);
+ }
+});
+
+// Populate the collections.
+const testDoc = {
+ _id: 0,
+ foreignKey: "orange"
+};
+assert.commandWorked(coll.insert(testDoc, {writeConcern: {w: "majority"}}));
+const foreignDoc = {
+ _id: "orange",
+ val: 9
+};
+assert.commandWorked(foreignColl.insert(foreignDoc, {writeConcern: {w: "majority"}}));
+
+const isForeignSharded = FixtureHelpers.isSharded(foreignColl);
+
+// Run a dummy find to start the transaction.
+jsTestLog("Starting transaction.");
+session.startTransaction({readConcern: {level: "snapshot"}});
+let cursor = coll.find();
+cursor.next();
+
+// Insert a document outside of the transaction. Subsequent aggregations should not see this
+// document.
+jsTestLog("Inserting document outside of transaction.");
+assert.commandWorked(db.getSiblingDB(testDB.getName()).getCollection(coll.getName()).insert({
+ _id: "not_visible_in_transaction",
+ foreignKey: "orange",
+}));
+
+// Perform an aggregation that is fed by a cursor on the underlying collection. Only the
+// majority-committed document present at the start of the transaction should be found.
+jsTestLog("Starting aggregations inside of the transaction.");
+cursor = coll.aggregate({$match: {}});
+assert.docEq(testDoc, cursor.next());
+assert(!cursor.hasNext());
+
+// Perform aggregations that look at other collections.
+// TODO: SERVER-39162 Sharded $lookup is not supported in transactions.
+if (!isForeignSharded) {
+ const lookupDoc = Object.extend(testDoc, {lookup: [foreignDoc]});
+ cursor = coll.aggregate({
$lookup: {
from: foreignColl.getName(),
localField: "foreignKey",
@@ -58,10 +64,10 @@
as: "lookup",
}
});
- assert.docEq(cursor.next(), lookupDoc);
- assert(!cursor.hasNext());
+ assert.docEq(cursor.next(), lookupDoc);
+ assert(!cursor.hasNext());
- cursor = coll.aggregate({
+ cursor = coll.aggregate({
$graphLookup: {
from: foreignColl.getName(),
startWith: "$foreignKey",
@@ -70,50 +76,47 @@
as: "lookup"
}
});
- assert.docEq(cursor.next(), lookupDoc);
- assert(!cursor.hasNext());
- } else {
- // TODO SERVER-39048: Test that $lookup on sharded collection is banned
- // within a transaction.
- }
-
- jsTestLog("Testing $count within a transaction.");
-
- let countRes = coll.aggregate([{$count: "count"}]).toArray();
- assert.eq(countRes.length, 1, tojson(countRes));
- assert.eq(countRes[0].count, 1, tojson(countRes));
-
- assert.commandWorked(coll.insert({a: 2}));
- countRes = coll.aggregate([{$count: "count"}]).toArray();
- assert.eq(countRes.length, 1, tojson(countRes));
- assert.eq(countRes[0].count, 2, tojson(countRes));
-
- assert.commandWorked(
- db.getSiblingDB(testDB.getName()).getCollection(coll.getName()).insert({a: 3}));
- countRes = coll.aggregate([{$count: "count"}]).toArray();
- assert.eq(countRes.length, 1, tojson(countRes));
- assert.eq(countRes[0].count, 2, tojson(countRes));
-
- assert.commandWorked(session.commitTransaction_forTesting());
- jsTestLog("Transaction committed.");
-
- // Perform aggregations with non-cursor initial sources and assert that they are not supported
- // in transactions.
- jsTestLog("Running aggregations in transactions that are expected to throw and fail.");
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.throws(() => coll.aggregate({$currentOp: {allUsers: true, localOps: true}}).next());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.throws(
- () => coll.aggregate({$collStats: {latencyStats: {histograms: true}, storageStats: {}}})
- .next());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.throws(() => coll.aggregate({$indexStats: {}}).next());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+ assert.docEq(cursor.next(), lookupDoc);
+ assert(!cursor.hasNext());
+} else {
+ // TODO SERVER-39048: Test that $lookup on sharded collection is banned
+ // within a transaction.
+}
+
+jsTestLog("Testing $count within a transaction.");
+
+let countRes = coll.aggregate([{$count: "count"}]).toArray();
+assert.eq(countRes.length, 1, tojson(countRes));
+assert.eq(countRes[0].count, 1, tojson(countRes));
+
+assert.commandWorked(coll.insert({a: 2}));
+countRes = coll.aggregate([{$count: "count"}]).toArray();
+assert.eq(countRes.length, 1, tojson(countRes));
+assert.eq(countRes[0].count, 2, tojson(countRes));
+
+assert.commandWorked(
+ db.getSiblingDB(testDB.getName()).getCollection(coll.getName()).insert({a: 3}));
+countRes = coll.aggregate([{$count: "count"}]).toArray();
+assert.eq(countRes.length, 1, tojson(countRes));
+assert.eq(countRes[0].count, 2, tojson(countRes));
+
+assert.commandWorked(session.commitTransaction_forTesting());
+jsTestLog("Transaction committed.");
+
+// Perform aggregations with non-cursor initial sources and assert that they are not supported
+// in transactions.
+jsTestLog("Running aggregations in transactions that are expected to throw and fail.");
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.throws(() => coll.aggregate({$currentOp: {allUsers: true, localOps: true}}).next());
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.throws(
+ () =>
+ coll.aggregate({$collStats: {latencyStats: {histograms: true}, storageStats: {}}}).next());
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.throws(() => coll.aggregate({$indexStats: {}}).next());
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
}());
diff --git a/jstests/core/txns/await_prepared_transactions_on_FCV_downgrade.js b/jstests/core/txns/await_prepared_transactions_on_FCV_downgrade.js
index a6623101196..32e5822519c 100644
--- a/jstests/core/txns/await_prepared_transactions_on_FCV_downgrade.js
+++ b/jstests/core/txns/await_prepared_transactions_on_FCV_downgrade.js
@@ -3,68 +3,68 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const dbName = "test";
- const collName = "await_prepared_transactions_on_FCV_downgrade";
- const testDB = db.getSiblingDB(dbName);
- const adminDB = db.getSiblingDB("admin");
+const dbName = "test";
+const collName = "await_prepared_transactions_on_FCV_downgrade";
+const testDB = db.getSiblingDB(dbName);
+const adminDB = db.getSiblingDB("admin");
- testDB[collName].drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+testDB[collName].drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const session = testDB.getMongo().startSession();
- const sessionDB = session.getDatabase(dbName);
+const session = testDB.getMongo().startSession();
+const sessionDB = session.getDatabase(dbName);
- try {
- jsTestLog("Start a transaction.");
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({"a": 1}));
+try {
+ jsTestLog("Start a transaction.");
+ session.startTransaction();
+ assert.commandWorked(sessionDB[collName].insert({"a": 1}));
- jsTestLog("Put that transaction into a prepared state.");
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ jsTestLog("Put that transaction into a prepared state.");
+ let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- // The setFCV command will need to acquire a global S lock to complete. The global
- // lock is currently held by prepare, so that will block. We use a failpoint to make that
- // command fail immediately when it tries to get the lock.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "alwaysOn"}));
+ // The setFCV command will need to acquire a global S lock to complete. The global
+ // lock is currently held by prepare, so that will block. We use a failpoint to make that
+ // command fail immediately when it tries to get the lock.
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "alwaysOn"}));
- jsTestLog("Attempt to downgrade the featureCompatibilityVersion.");
- assert.commandFailedWithCode(
- testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}),
- ErrorCodes.LockTimeout);
+ jsTestLog("Attempt to downgrade the featureCompatibilityVersion.");
+ assert.commandFailedWithCode(
+ testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}),
+ ErrorCodes.LockTimeout);
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "off"}));
+ assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "off"}));
- jsTestLog("Verify that the setFCV command set the target version to 'lastStable'.");
- checkFCV(adminDB, lastStableFCV, lastStableFCV);
+ jsTestLog("Verify that the setFCV command set the target version to 'lastStable'.");
+ checkFCV(adminDB, lastStableFCV, lastStableFCV);
- jsTestLog("Commit the prepared transaction.");
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+ jsTestLog("Commit the prepared transaction.");
+ assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- jsTestLog("Rerun the setFCV command and let it complete successfully.");
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(adminDB, lastStableFCV);
+ jsTestLog("Rerun the setFCV command and let it complete successfully.");
+ assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+ checkFCV(adminDB, lastStableFCV);
- jsTestLog("Verify that we are not allowed to prepare a transaction after downgrading.");
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({"b": 2}));
- assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
- ErrorCodes.CommandNotSupported);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- } finally {
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "off"}));
+ jsTestLog("Verify that we are not allowed to prepare a transaction after downgrading.");
+ session.startTransaction();
+ assert.commandWorked(sessionDB[collName].insert({"b": 2}));
+ assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
+ ErrorCodes.CommandNotSupported);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+} finally {
+ assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "off"}));
- jsTestLog("Restore the original featureCompatibilityVersion.");
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(adminDB, latestFCV);
- }
+ jsTestLog("Restore the original featureCompatibilityVersion.");
+ assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+ checkFCV(adminDB, latestFCV);
+}
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/banned_txn_dbs.js b/jstests/core/txns/banned_txn_dbs.js
index 4422d19ea0a..78bcef608a5 100644
--- a/jstests/core/txns/banned_txn_dbs.js
+++ b/jstests/core/txns/banned_txn_dbs.js
@@ -2,36 +2,36 @@
// transactions.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const session = db.getMongo().startSession({causalConsistency: false});
- const collName = "banned_txn_dbs";
+const session = db.getMongo().startSession({causalConsistency: false});
+const collName = "banned_txn_dbs";
- function runTest(sessionDB) {
- jsTest.log("Testing database " + sessionDB.getName());
+function runTest(sessionDB) {
+ jsTest.log("Testing database " + sessionDB.getName());
- let sessionColl = sessionDB[collName];
- sessionColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(sessionDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+ let sessionColl = sessionDB[collName];
+ sessionColl.drop({writeConcern: {w: "majority"}});
+ assert.commandWorked(sessionDB.createCollection(collName, {writeConcern: {w: "majority"}}));
- jsTest.log("Testing read commands are forbidden.");
- session.startTransaction();
- let error = assert.throws(() => sessionColl.find().itcount());
- assert.commandFailedWithCode(error, ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+ jsTest.log("Testing read commands are forbidden.");
+ session.startTransaction();
+ let error = assert.throws(() => sessionColl.find().itcount());
+ assert.commandFailedWithCode(error, ErrorCodes.OperationNotSupportedInTransaction);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
- jsTest.log("Testing write commands are forbidden.");
- session.startTransaction();
- assert.commandFailedWithCode(sessionColl.insert({}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- }
+ jsTest.log("Testing write commands are forbidden.");
+ session.startTransaction();
+ assert.commandFailedWithCode(sessionColl.insert({}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+}
- runTest(session.getDatabase("config"));
- runTest(session.getDatabase("admin"));
- runTest(session.getDatabase("local"));
+runTest(session.getDatabase("config"));
+runTest(session.getDatabase("admin"));
+runTest(session.getDatabase("local"));
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/basic_causal_consistency.js b/jstests/core/txns/basic_causal_consistency.js
index 84f1520a105..5a78ddc0900 100644
--- a/jstests/core/txns/basic_causal_consistency.js
+++ b/jstests/core/txns/basic_causal_consistency.js
@@ -1,33 +1,35 @@
// Test that the shell helper supports causal consistency.
// @tags: [uses_transactions, uses_snapshot_read_concern]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "basic_causal_consistency";
- const testDB = db.getSiblingDB(dbName);
+const dbName = "test";
+const collName = "basic_causal_consistency";
+const testDB = db.getSiblingDB(dbName);
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const sessionOptions = {causalConsistency: true};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb.getCollection(collName);
+const sessionOptions = {
+ causalConsistency: true
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb.getCollection(collName);
- session.startTransaction({readConcern: {level: "snapshot"}});
+session.startTransaction({readConcern: {level: "snapshot"}});
- // Performing a read first should work when snapshot readConcern is specified.
- assert.docEq(null, sessionColl.findOne({_id: "insert-1"}));
+// Performing a read first should work when snapshot readConcern is specified.
+assert.docEq(null, sessionColl.findOne({_id: "insert-1"}));
- assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
+assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
- assert.docEq(null, sessionColl.findOne({_id: "insert-2"}));
+assert.docEq(null, sessionColl.findOne({_id: "insert-2"}));
- assert.docEq({_id: "insert-1"}, sessionColl.findOne({_id: "insert-1"}));
+assert.docEq({_id: "insert-1"}, sessionColl.findOne({_id: "insert-1"}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js b/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js
index 08a8551fb09..7f611ce2869 100644
--- a/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js
+++ b/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js
@@ -3,59 +3,59 @@
// requires_document_locking,
// ]
(function() {
- "use strict";
+"use strict";
- const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid";
+const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid";
- const session = db.getMongo().startSession();
- const sessionDb = session.getDatabase("admin");
+const session = db.getMongo().startSession();
+const sessionDb = session.getDatabase("admin");
- const nonRetryableWriteCommands = [
- // Commands that are allowed in transactions.
- {aggregate: 1},
- {commitTransaction: 1},
- {distinct: "c"},
- {find: "c"},
- {getMore: NumberLong(1), collection: "c"},
- {killCursors: 1},
- // A selection of commands that are not allowed in transactions.
- {count: 1},
- {explain: {find: "c"}},
- {filemd5: 1},
- {isMaster: 1},
- {buildInfo: 1},
- {ping: 1},
- {listCommands: 1},
- {create: "c"},
- {drop: 1},
- {createIndexes: 1},
- {mapReduce: "c"}
- ];
+const nonRetryableWriteCommands = [
+ // Commands that are allowed in transactions.
+ {aggregate: 1},
+ {commitTransaction: 1},
+ {distinct: "c"},
+ {find: "c"},
+ {getMore: NumberLong(1), collection: "c"},
+ {killCursors: 1},
+ // A selection of commands that are not allowed in transactions.
+ {count: 1},
+ {explain: {find: "c"}},
+ {filemd5: 1},
+ {isMaster: 1},
+ {buildInfo: 1},
+ {ping: 1},
+ {listCommands: 1},
+ {create: "c"},
+ {drop: 1},
+ {createIndexes: 1},
+ {mapReduce: "c"}
+];
- const nonRetryableWriteCommandsMongodOnly = [
- // Commands that are allowed in transactions.
- {coordinateCommitTransaction: 1, participants: []},
- {geoSearch: 1},
- {prepareTransaction: 1},
- // A selection of commands that are not allowed in transactions.
- {applyOps: 1}
- ];
+const nonRetryableWriteCommandsMongodOnly = [
+ // Commands that are allowed in transactions.
+ {coordinateCommitTransaction: 1, participants: []},
+ {geoSearch: 1},
+ {prepareTransaction: 1},
+ // A selection of commands that are not allowed in transactions.
+ {applyOps: 1}
+];
- nonRetryableWriteCommands.forEach(function(command) {
+nonRetryableWriteCommands.forEach(function(command) {
+ jsTest.log("Testing command: " + tojson(command));
+ assert.commandFailedWithCode(
+ sessionDb.runCommand(Object.assign({}, command, {txnNumber: NumberLong(0)})),
+ [50768, 50889]);
+});
+
+if (!isMongos) {
+ nonRetryableWriteCommandsMongodOnly.forEach(function(command) {
jsTest.log("Testing command: " + tojson(command));
assert.commandFailedWithCode(
sessionDb.runCommand(Object.assign({}, command, {txnNumber: NumberLong(0)})),
[50768, 50889]);
});
+}
- if (!isMongos) {
- nonRetryableWriteCommandsMongodOnly.forEach(function(command) {
- jsTest.log("Testing command: " + tojson(command));
- assert.commandFailedWithCode(
- sessionDb.runCommand(Object.assign({}, command, {txnNumber: NumberLong(0)})),
- [50768, 50889]);
- });
- }
-
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/commands_not_allowed_in_txn.js b/jstests/core/txns/commands_not_allowed_in_txn.js
index d7ae9e51698..41260615d91 100644
--- a/jstests/core/txns/commands_not_allowed_in_txn.js
+++ b/jstests/core/txns/commands_not_allowed_in_txn.js
@@ -4,192 +4,189 @@
// uses_transactions,
// ]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "commands_not_allowed_in_txn";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- let txnNumber = 0;
-
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
-
- const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid";
-
+"use strict";
+
+const dbName = "test";
+const collName = "commands_not_allowed_in_txn";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+let txnNumber = 0;
+
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+
+const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid";
+
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.runCommand({
+ createIndexes: collName,
+ indexes: [
+ {name: "geo_2d", key: {geo: "2d"}},
+ {key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}
+ ],
+ writeConcern: {w: "majority"}
+}));
+
+function setup() {
+ testColl.dropIndex({a: 1});
+ testDB.runCommand({drop: "create_collection", writeConcern: {w: "majority"}});
+ testDB.runCommand({drop: "drop_collection", writeConcern: {w: "majority"}});
assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
- assert.commandWorked(testDB.runCommand({
- createIndexes: collName,
- indexes: [
- {name: "geo_2d", key: {geo: "2d"}},
- {key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}
- ],
- writeConcern: {w: "majority"}
- }));
-
- function setup() {
- testColl.dropIndex({a: 1});
- testDB.runCommand({drop: "create_collection", writeConcern: {w: "majority"}});
- testDB.runCommand({drop: "drop_collection", writeConcern: {w: "majority"}});
- assert.commandWorked(
- testDB.createCollection("drop_collection", {writeConcern: {w: "majority"}}));
- }
-
- function testCommand(command) {
- jsTest.log("Testing command: " + tojson(command));
- const errmsgRegExp = new RegExp(
- 'Cannot run .* in a multi-document transaction.\|This command is not supported in transactions');
-
- // Check that the command runs successfully outside transactions.
- setup();
- assert.commandWorked(sessionDb.runCommand(command));
-
- // Check that the command cannot be used to start a transaction.
- setup();
- let res = assert.commandFailedWithCode(sessionDb.runCommand(Object.assign({}, command, {
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(++txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- })),
- ErrorCodes.OperationNotSupportedInTransaction);
- // Check that the command fails with expected error message.
- assert(res.errmsg.match(errmsgRegExp), res);
-
- // Mongos has special handling for commitTransaction to support commit recovery.
- if (!isMongos) {
- assert.commandFailedWithCode(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(1),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
- }
-
- // Check that the command fails inside a transaction, but does not abort the transaction.
- setup();
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(++txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
- res = assert.commandFailedWithCode(
- sessionDb.runCommand(Object.assign(
- {},
- command,
- {txnNumber: NumberLong(txnNumber), stmtId: NumberInt(1), autocommit: false})),
- ErrorCodes.OperationNotSupportedInTransaction);
- // Check that the command fails with expected error message.
- assert(res.errmsg.match(errmsgRegExp), res);
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(2),
- autocommit: false
- }));
- }
+ testDB.createCollection("drop_collection", {writeConcern: {w: "majority"}}));
+}
- //
- // Test a selection of commands that are not allowed in transactions.
- //
-
- const commands = [
- {count: collName},
- {count: collName, query: {a: 1}},
- {explain: {find: collName}},
- {filemd5: 1, root: "fs"},
- {isMaster: 1},
- {buildInfo: 1},
- {ping: 1},
- {listCommands: 1},
- {create: "create_collection", writeConcern: {w: "majority"}},
- {drop: "drop_collection", writeConcern: {w: "majority"}},
- {
- createIndexes: collName,
- indexes: [{name: "a_1", key: {a: 1}}],
- writeConcern: {w: "majority"}
- },
- // Output inline so the implicitly shard accessed collections override won't drop the
- // output collection during the active transaction test case, which would hang indefinitely
- // waiting for a database exclusive lock.
- {mapReduce: collName, map: function() {}, reduce: function(key, vals) {}, out: {inline: 1}},
- ];
-
- // There is no applyOps command on mongos.
- if (!isMongos) {
- commands.push(
- {applyOps: [{op: "u", ns: testColl.getFullName(), o2: {_id: 0}, o: {$set: {a: 5}}}]});
- }
+function testCommand(command) {
+ jsTest.log("Testing command: " + tojson(command));
+ const errmsgRegExp = new RegExp(
+ 'Cannot run .* in a multi-document transaction.\|This command is not supported in transactions');
- commands.forEach(testCommand);
+ // Check that the command runs successfully outside transactions.
+ setup();
+ assert.commandWorked(sessionDb.runCommand(command));
- //
- // Test that doTxn is not allowed at positions after the first in transactions.
- //
+ // Check that the command cannot be used to start a transaction.
+ setup();
+ let res = assert.commandFailedWithCode(sessionDb.runCommand(Object.assign({}, command, {
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(++txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+ })),
+ ErrorCodes.OperationNotSupportedInTransaction);
+ // Check that the command fails with expected error message.
+ assert(res.errmsg.match(errmsgRegExp), res);
- // There is no doTxn command on mongos.
+ // Mongos has special handling for commitTransaction to support commit recovery.
if (!isMongos) {
- assert.commandWorked(sessionDb.runCommand({
- find: collName,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(++txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
- assert.commandFailedWithCode(sessionDb.runCommand({
- doTxn: [{op: "u", ns: testColl.getFullName(), o2: {_id: 0}, o: {$set: {a: 5}}}],
+ assert.commandFailedWithCode(sessionDb.adminCommand({
+ commitTransaction: 1,
txnNumber: NumberLong(txnNumber),
stmtId: NumberInt(1),
autocommit: false
}),
- ErrorCodes.OperationNotSupportedInTransaction);
-
- // It is still possible to commit the transaction. The rejected command does not abort the
- // transaction.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(2),
- autocommit: false
- }));
+ ErrorCodes.NoSuchTransaction);
}
- //
- // Test that a find command with the read-once cursor option is not allowed in a transaction.
- //
- assert.commandFailedWithCode(sessionDb.runCommand({
+ // Check that the command fails inside a transaction, but does not abort the transaction.
+ setup();
+ assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(++txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+ }));
+ res = assert.commandFailedWithCode(
+ sessionDb.runCommand(Object.assign(
+ {},
+ command,
+ {txnNumber: NumberLong(txnNumber), stmtId: NumberInt(1), autocommit: false})),
+ ErrorCodes.OperationNotSupportedInTransaction);
+ // Check that the command fails with expected error message.
+ assert(res.errmsg.match(errmsgRegExp), res);
+ assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(2),
+ autocommit: false
+ }));
+}
+
+//
+// Test a selection of commands that are not allowed in transactions.
+//
+
+const commands = [
+ {count: collName},
+ {count: collName, query: {a: 1}},
+ {explain: {find: collName}},
+ {filemd5: 1, root: "fs"},
+ {isMaster: 1},
+ {buildInfo: 1},
+ {ping: 1},
+ {listCommands: 1},
+ {create: "create_collection", writeConcern: {w: "majority"}},
+ {drop: "drop_collection", writeConcern: {w: "majority"}},
+ {createIndexes: collName, indexes: [{name: "a_1", key: {a: 1}}], writeConcern: {w: "majority"}},
+ // Output inline so the implicitly shard accessed collections override won't drop the
+ // output collection during the active transaction test case, which would hang indefinitely
+ // waiting for a database exclusive lock.
+ {mapReduce: collName, map: function() {}, reduce: function(key, vals) {}, out: {inline: 1}},
+];
+
+// There is no applyOps command on mongos.
+if (!isMongos) {
+ commands.push(
+ {applyOps: [{op: "u", ns: testColl.getFullName(), o2: {_id: 0}, o: {$set: {a: 5}}}]});
+}
+
+commands.forEach(testCommand);
+
+//
+// Test that doTxn is not allowed at positions after the first in transactions.
+//
+
+// There is no doTxn command on mongos.
+if (!isMongos) {
+ assert.commandWorked(sessionDb.runCommand({
find: collName,
- readOnce: true,
readConcern: {level: "snapshot"},
txnNumber: NumberLong(++txnNumber),
stmtId: NumberInt(0),
startTransaction: true,
autocommit: false
+ }));
+ assert.commandFailedWithCode(sessionDb.runCommand({
+ doTxn: [{op: "u", ns: testColl.getFullName(), o2: {_id: 0}, o: {$set: {a: 5}}}],
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(1),
+ autocommit: false
}),
ErrorCodes.OperationNotSupportedInTransaction);
- // Mongos has special handling for commitTransaction to support commit recovery.
- if (!isMongos) {
- // The failed find should abort the transaction so a commit should fail.
- assert.commandFailedWithCode(sessionDb.adminCommand({
- commitTransaction: 1,
- autocommit: false,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(1),
- }),
- ErrorCodes.NoSuchTransaction);
- }
+ // It is still possible to commit the transaction. The rejected command does not abort the
+ // transaction.
+ assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(2),
+ autocommit: false
+ }));
+}
+
+//
+// Test that a find command with the read-once cursor option is not allowed in a transaction.
+//
+assert.commandFailedWithCode(sessionDb.runCommand({
+ find: collName,
+ readOnce: true,
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(++txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+
+// Mongos has special handling for commitTransaction to support commit recovery.
+if (!isMongos) {
+ // The failed find should abort the transaction so a commit should fail.
+ assert.commandFailedWithCode(sessionDb.adminCommand({
+ commitTransaction: 1,
+ autocommit: false,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(1),
+ }),
+ ErrorCodes.NoSuchTransaction);
+}
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/commit_and_abort_large_prepared_transactions.js b/jstests/core/txns/commit_and_abort_large_prepared_transactions.js
index 448c2bc79b5..d7505dfb043 100644
--- a/jstests/core/txns/commit_and_abort_large_prepared_transactions.js
+++ b/jstests/core/txns/commit_and_abort_large_prepared_transactions.js
@@ -5,48 +5,47 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const dbName = "test";
- const collName = "large_prepared_transactions";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- // As we are not able to send a single request larger than 16MB, we insert two documents
- // of 10MB each to create a "large" transaction.
- const kSize10MB = 10 * 1024 * 1024;
- function createLargeDocument(id) {
- return {_id: id, longString: new Array(kSize10MB).join("a")};
- }
-
- testColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
-
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- // Test preparing and committing a large transaction with two 10MB inserts.
- let doc1 = createLargeDocument(1);
- let doc2 = createLargeDocument(2);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc1));
- assert.commandWorked(sessionColl.insert(doc2));
-
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- assert.sameMembers(sessionColl.find().toArray(), [doc1, doc2]);
-
- // Test preparing and aborting a large transaction with two 10MB inserts.
- let doc3 = createLargeDocument(3);
- let doc4 = createLargeDocument(4);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc3));
- assert.commandWorked(sessionColl.insert(doc4));
-
- PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(session.abortTransaction_forTesting());
- assert.sameMembers(sessionColl.find({_id: {$gt: 2}}).toArray(), []);
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const dbName = "test";
+const collName = "large_prepared_transactions";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+// As we are not able to send a single request larger than 16MB, we insert two documents
+// of 10MB each to create a "large" transaction.
+const kSize10MB = 10 * 1024 * 1024;
+function createLargeDocument(id) {
+ return {_id: id, longString: new Array(kSize10MB).join("a")};
+}
+
+testColl.drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+// Test preparing and committing a large transaction with two 10MB inserts.
+let doc1 = createLargeDocument(1);
+let doc2 = createLargeDocument(2);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc1));
+assert.commandWorked(sessionColl.insert(doc2));
+
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.sameMembers(sessionColl.find().toArray(), [doc1, doc2]);
+
+// Test preparing and aborting a large transaction with two 10MB inserts.
+let doc3 = createLargeDocument(3);
+let doc4 = createLargeDocument(4);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc3));
+assert.commandWorked(sessionColl.insert(doc4));
+
+PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(session.abortTransaction_forTesting());
+assert.sameMembers(sessionColl.find({_id: {$gt: 2}}).toArray(), []);
}());
diff --git a/jstests/core/txns/commit_and_abort_large_unprepared_transactions.js b/jstests/core/txns/commit_and_abort_large_unprepared_transactions.js
index 2ddda9ed3f3..feb09ef4656 100644
--- a/jstests/core/txns/commit_and_abort_large_unprepared_transactions.js
+++ b/jstests/core/txns/commit_and_abort_large_unprepared_transactions.js
@@ -5,45 +5,44 @@
*/
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "large_unprepared_transactions";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- // As we are not able to send a single request larger than 16MB, we insert two documents
- // of 10MB each to create a "large" transaction.
- const kSize10MB = 10 * 1024 * 1024;
- function createLargeDocument(id) {
- return {_id: id, longString: new Array(kSize10MB).join("a")};
- }
-
- testColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
-
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- // Test committing an unprepared large transaction with two 10MB inserts.
- let doc1 = createLargeDocument(1);
- let doc2 = createLargeDocument(2);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc1));
- assert.commandWorked(sessionColl.insert(doc2));
-
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.sameMembers(sessionColl.find().toArray(), [doc1, doc2]);
-
- // Test aborting an unprepared large transaction with two 10MB inserts.
- let doc3 = createLargeDocument(3);
- let doc4 = createLargeDocument(4);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc3));
- assert.commandWorked(sessionColl.insert(doc4));
-
- assert.commandWorked(session.abortTransaction_forTesting());
- assert.sameMembers(sessionColl.find({_id: {$gt: 2}}).toArray(), []);
+"use strict";
+
+const dbName = "test";
+const collName = "large_unprepared_transactions";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+// As we are not able to send a single request larger than 16MB, we insert two documents
+// of 10MB each to create a "large" transaction.
+const kSize10MB = 10 * 1024 * 1024;
+function createLargeDocument(id) {
+ return {_id: id, longString: new Array(kSize10MB).join("a")};
+}
+
+testColl.drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+// Test committing an unprepared large transaction with two 10MB inserts.
+let doc1 = createLargeDocument(1);
+let doc2 = createLargeDocument(2);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc1));
+assert.commandWorked(sessionColl.insert(doc2));
+
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.sameMembers(sessionColl.find().toArray(), [doc1, doc2]);
+
+// Test aborting an unprepared large transaction with two 10MB inserts.
+let doc3 = createLargeDocument(3);
+let doc4 = createLargeDocument(4);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc3));
+assert.commandWorked(sessionColl.insert(doc4));
+
+assert.commandWorked(session.abortTransaction_forTesting());
+assert.sameMembers(sessionColl.find({_id: {$gt: 2}}).toArray(), []);
}());
diff --git a/jstests/core/txns/commit_prepared_transaction.js b/jstests/core/txns/commit_prepared_transaction.js
index 39e9fa6c3b7..d8bd4908943 100644
--- a/jstests/core/txns/commit_prepared_transaction.js
+++ b/jstests/core/txns/commit_prepared_transaction.js
@@ -7,85 +7,91 @@
load("jstests/core/txns/libs/prepare_helpers.js");
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "commit_prepared_transaction";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "commit_prepared_transaction";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
- testColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+testColl.drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- const doc1 = {_id: 1, x: 1};
+const doc1 = {
+ _id: 1,
+ x: 1
+};
- // ---- Test 1. Insert a single document and run prepare. ----
+// ---- Test 1. Insert a single document and run prepare. ----
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc1));
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc1));
- // Insert should not be visible outside the session.
- assert.eq(null, testColl.findOne(doc1));
+// Insert should not be visible outside the session.
+assert.eq(null, testColl.findOne(doc1));
- // Insert should be visible in this session.
- assert.eq(doc1, sessionColl.findOne(doc1));
+// Insert should be visible in this session.
+assert.eq(doc1, sessionColl.findOne(doc1));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- // Users should not be allowed to modify config.transaction entries for prepared transactions.
- // This portion of the test needs to run on a connection without implicit sessions, because
- // writes to `config.transactions` are disallowed under sessions.
- {
- var conn = new Mongo(db.getMongo().host);
- conn._setDummyDefaultSession();
- var configDB = conn.getDB('config');
- assert.commandFailed(configDB.transactions.remove({"_id.id": session.getSessionId().id}));
- assert.commandFailed(configDB.transactions.update({"_id.id": session.getSessionId().id},
- {$set: {extraField: 1}}));
- }
+// Users should not be allowed to modify config.transaction entries for prepared transactions.
+// This portion of the test needs to run on a connection without implicit sessions, because
+// writes to `config.transactions` are disallowed under sessions.
+{
+ var conn = new Mongo(db.getMongo().host);
+ conn._setDummyDefaultSession();
+ var configDB = conn.getDB('config');
+ assert.commandFailed(configDB.transactions.remove({"_id.id": session.getSessionId().id}));
+ assert.commandFailed(configDB.transactions.update({"_id.id": session.getSessionId().id},
+ {$set: {extraField: 1}}));
+}
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- // After commit the insert persists.
- assert.eq(doc1, testColl.findOne(doc1));
+// After commit the insert persists.
+assert.eq(doc1, testColl.findOne(doc1));
- // ---- Test 2. Update a document and run prepare. ----
+// ---- Test 2. Update a document and run prepare. ----
- session.startTransaction();
- assert.commandWorked(sessionColl.update(doc1, {$inc: {x: 1}}));
+session.startTransaction();
+assert.commandWorked(sessionColl.update(doc1, {$inc: {x: 1}}));
- const doc2 = {_id: 1, x: 2};
+const doc2 = {
+ _id: 1,
+ x: 2
+};
- // Update should not be visible outside the session.
- assert.eq(null, testColl.findOne(doc2));
+// Update should not be visible outside the session.
+assert.eq(null, testColl.findOne(doc2));
- // Update should be visible in this session.
- assert.eq(doc2, sessionColl.findOne(doc2));
+// Update should be visible in this session.
+assert.eq(doc2, sessionColl.findOne(doc2));
- prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- // After commit the update persists.
- assert.eq(doc2, testColl.findOne({_id: 1}));
+// After commit the update persists.
+assert.eq(doc2, testColl.findOne({_id: 1}));
- // ---- Test 3. Delete a document and run prepare. ----
+// ---- Test 3. Delete a document and run prepare. ----
- session.startTransaction();
- assert.commandWorked(sessionColl.remove(doc2, {justOne: true}));
+session.startTransaction();
+assert.commandWorked(sessionColl.remove(doc2, {justOne: true}));
- // Delete should not be visible outside the session, so the document should be.
- assert.eq(doc2, testColl.findOne(doc2));
+// Delete should not be visible outside the session, so the document should be.
+assert.eq(doc2, testColl.findOne(doc2));
- // Document should not be visible in this session, since the delete should be visible.
- assert.eq(null, sessionColl.findOne(doc2));
+// Document should not be visible in this session, since the delete should be visible.
+assert.eq(null, sessionColl.findOne(doc2));
- prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- // After commit the delete persists.
- assert.eq(null, testColl.findOne(doc2));
+// After commit the delete persists.
+assert.eq(null, testColl.findOne(doc2));
}());
diff --git a/jstests/core/txns/commit_prepared_transaction_errors.js b/jstests/core/txns/commit_prepared_transaction_errors.js
index 97ecd5bf8e9..64b27f3c16c 100644
--- a/jstests/core/txns/commit_prepared_transaction_errors.js
+++ b/jstests/core/txns/commit_prepared_transaction_errors.js
@@ -4,70 +4,69 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const dbName = "test";
- const collName = "commit_prepared_transaction_errors";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "commit_prepared_transaction_errors";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
- testColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+testColl.drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- const doc = {_id: 1};
+const doc = {
+ _id: 1
+};
- jsTestLog("Test committing a prepared transaction with no 'commitTimestamp'.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc));
- PrepareHelpers.prepareTransaction(session);
- assert.commandFailedWithCode(sessionDB.adminCommand({commitTransaction: 1}),
- ErrorCodes.InvalidOptions);
- // Make sure the transaction is still running by observing write conflicts.
- const anotherSession = db.getMongo().startSession({causalConsistency: false});
- anotherSession.startTransaction();
- assert.commandFailedWithCode(
- anotherSession.getDatabase(dbName).getCollection(collName).insert(doc),
- ErrorCodes.WriteConflict);
- assert.commandFailedWithCode(anotherSession.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- // Abort the original transaction.
- assert.commandWorked(session.abortTransaction_forTesting());
+jsTestLog("Test committing a prepared transaction with no 'commitTimestamp'.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc));
+PrepareHelpers.prepareTransaction(session);
+assert.commandFailedWithCode(sessionDB.adminCommand({commitTransaction: 1}),
+ ErrorCodes.InvalidOptions);
+// Make sure the transaction is still running by observing write conflicts.
+const anotherSession = db.getMongo().startSession({causalConsistency: false});
+anotherSession.startTransaction();
+assert.commandFailedWithCode(anotherSession.getDatabase(dbName).getCollection(collName).insert(doc),
+ ErrorCodes.WriteConflict);
+assert.commandFailedWithCode(anotherSession.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+// Abort the original transaction.
+assert.commandWorked(session.abortTransaction_forTesting());
- jsTestLog("Test committing a prepared transaction with an invalid 'commitTimestamp'.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc));
- PrepareHelpers.prepareTransaction(session);
- assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, 5),
- ErrorCodes.TypeMismatch);
+jsTestLog("Test committing a prepared transaction with an invalid 'commitTimestamp'.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc));
+PrepareHelpers.prepareTransaction(session);
+assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, 5), ErrorCodes.TypeMismatch);
- jsTestLog("Test committing a prepared transaction with a null 'commitTimestamp'.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc));
- PrepareHelpers.prepareTransaction(session);
- assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, Timestamp(0, 0)),
- ErrorCodes.InvalidOptions);
+jsTestLog("Test committing a prepared transaction with a null 'commitTimestamp'.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc));
+PrepareHelpers.prepareTransaction(session);
+assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, Timestamp(0, 0)),
+ ErrorCodes.InvalidOptions);
- jsTestLog("Test committing an unprepared transaction with a 'commitTimestamp'.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc));
- let res = assert.commandFailedWithCode(
- PrepareHelpers.commitTransaction(session, Timestamp(3, 3)), ErrorCodes.InvalidOptions);
- assert(res.errmsg.includes("cannot provide commitTimestamp to unprepared transaction"), res);
+jsTestLog("Test committing an unprepared transaction with a 'commitTimestamp'.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc));
+let res = assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, Timestamp(3, 3)),
+ ErrorCodes.InvalidOptions);
+assert(res.errmsg.includes("cannot provide commitTimestamp to unprepared transaction"), res);
- jsTestLog("Test committing an unprepared transaction with a null 'commitTimestamp'.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc));
- assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, Timestamp(0, 0)),
- ErrorCodes.InvalidOptions);
+jsTestLog("Test committing an unprepared transaction with a null 'commitTimestamp'.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc));
+assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, Timestamp(0, 0)),
+ ErrorCodes.InvalidOptions);
- jsTestLog("Test committing an unprepared transaction with an invalid 'commitTimestamp'.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc));
- assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, 5),
- ErrorCodes.TypeMismatch);
+jsTestLog("Test committing an unprepared transaction with an invalid 'commitTimestamp'.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc));
+assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, 5), ErrorCodes.TypeMismatch);
}());
diff --git a/jstests/core/txns/concurrent_drops_and_creates.js b/jstests/core/txns/concurrent_drops_and_creates.js
index b025f5a33c3..101deb76a9b 100644
--- a/jstests/core/txns/concurrent_drops_and_creates.js
+++ b/jstests/core/txns/concurrent_drops_and_creates.js
@@ -2,77 +2,75 @@
// transaction started.
// @tags: [uses_transactions, uses_snapshot_read_concern]
(function() {
- "use strict";
+"use strict";
- const dbName1 = "test1";
- const dbName2 = "test2";
- const collNameA = "coll_A";
- const collNameB = "coll_B";
+const dbName1 = "test1";
+const dbName2 = "test2";
+const collNameA = "coll_A";
+const collNameB = "coll_B";
- const sessionOutsideTxn = db.getMongo().startSession({causalConsistency: true});
- const testDB1 = sessionOutsideTxn.getDatabase(dbName1);
- const testDB2 = sessionOutsideTxn.getDatabase(dbName2);
- testDB1.runCommand({drop: collNameA, writeConcern: {w: "majority"}});
- testDB2.runCommand({drop: collNameB, writeConcern: {w: "majority"}});
+const sessionOutsideTxn = db.getMongo().startSession({causalConsistency: true});
+const testDB1 = sessionOutsideTxn.getDatabase(dbName1);
+const testDB2 = sessionOutsideTxn.getDatabase(dbName2);
+testDB1.runCommand({drop: collNameA, writeConcern: {w: "majority"}});
+testDB2.runCommand({drop: collNameB, writeConcern: {w: "majority"}});
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB1 = session.getDatabase(dbName1);
- const sessionDB2 = session.getDatabase(dbName2);
- const sessionCollA = sessionDB1[collNameA];
- const sessionCollB = sessionDB2[collNameB];
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB1 = session.getDatabase(dbName1);
+const sessionDB2 = session.getDatabase(dbName2);
+const sessionCollA = sessionDB1[collNameA];
+const sessionCollB = sessionDB2[collNameB];
- //
- // A transaction with snapshot read concern cannot write to a collection that has been dropped
- // since the transaction started.
- //
+//
+// A transaction with snapshot read concern cannot write to a collection that has been dropped
+// since the transaction started.
+//
- // Ensure collection A and collection B exist.
- assert.commandWorked(sessionCollA.insert({}));
- assert.commandWorked(sessionCollB.insert({}));
+// Ensure collection A and collection B exist.
+assert.commandWorked(sessionCollA.insert({}));
+assert.commandWorked(sessionCollB.insert({}));
- // Start the transaction with a write to collection A.
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionCollA.insert({}));
+// Start the transaction with a write to collection A.
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.commandWorked(sessionCollA.insert({}));
- // Drop collection B outside of the transaction. Advance the cluster time of the session
- // performing the drop to ensure it happens at a later cluster time than the transaction began.
- sessionOutsideTxn.advanceClusterTime(session.getClusterTime());
- assert.commandWorked(testDB2.runCommand({drop: collNameB, writeConcern: {w: "majority"}}));
+// Drop collection B outside of the transaction. Advance the cluster time of the session
+// performing the drop to ensure it happens at a later cluster time than the transaction began.
+sessionOutsideTxn.advanceClusterTime(session.getClusterTime());
+assert.commandWorked(testDB2.runCommand({drop: collNameB, writeConcern: {w: "majority"}}));
- // We cannot write to collection B in the transaction, since it is illegal to implicitly create
- // collections in transactions. The collection drop is visible to the transaction in this way,
- // since our implementation of the in-memory collection catalog always has the most recent
- // collection metadata.
- assert.commandFailedWithCode(sessionCollB.insert({}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// We cannot write to collection B in the transaction, since it is illegal to implicitly create
+// collections in transactions. The collection drop is visible to the transaction in this way,
+// since our implementation of the in-memory collection catalog always has the most recent
+// collection metadata.
+assert.commandFailedWithCode(sessionCollB.insert({}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- //
- // A transaction with snapshot read concern cannot write to a collection that has been created
- // since the transaction started.
- //
+//
+// A transaction with snapshot read concern cannot write to a collection that has been created
+// since the transaction started.
+//
- // Ensure collection A exists and collection B does not exist.
- assert.commandWorked(sessionCollA.insert({}));
- testDB2.runCommand({drop: collNameB, writeConcern: {w: "majority"}});
+// Ensure collection A exists and collection B does not exist.
+assert.commandWorked(sessionCollA.insert({}));
+testDB2.runCommand({drop: collNameB, writeConcern: {w: "majority"}});
- // Start the transaction with a write to collection A.
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionCollA.insert({}));
+// Start the transaction with a write to collection A.
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.commandWorked(sessionCollA.insert({}));
- // Create collection B outside of the transaction. Advance the cluster time of the session
- // performing the drop to ensure it happens at a later cluster time than the transaction began.
- sessionOutsideTxn.advanceClusterTime(session.getClusterTime());
- assert.commandWorked(testDB2.runCommand({create: collNameB}));
+// Create collection B outside of the transaction. Advance the cluster time of the session
+// performing the drop to ensure it happens at a later cluster time than the transaction began.
+sessionOutsideTxn.advanceClusterTime(session.getClusterTime());
+assert.commandWorked(testDB2.runCommand({create: collNameB}));
- // We cannot write to collection B in the transaction, since it experienced catalog changes
- // since the transaction's read timestamp. Since our implementation of the in-memory collection
- // catalog always has the most recent collection metadata, we do not allow you to read from a
- // collection at a time prior to its most recent catalog changes.
- assert.commandFailedWithCode(sessionCollB.insert({}), ErrorCodes.SnapshotUnavailable);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// We cannot write to collection B in the transaction, since it experienced catalog changes
+// since the transaction's read timestamp. Since our implementation of the in-memory collection
+// catalog always has the most recent collection metadata, we do not allow you to read from a
+// collection at a time prior to its most recent catalog changes.
+assert.commandFailedWithCode(sessionCollB.insert({}), ErrorCodes.SnapshotUnavailable);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/create_collection_not_blocked_by_txn.js b/jstests/core/txns/create_collection_not_blocked_by_txn.js
index 679004631da..ba043977bd4 100644
--- a/jstests/core/txns/create_collection_not_blocked_by_txn.js
+++ b/jstests/core/txns/create_collection_not_blocked_by_txn.js
@@ -5,29 +5,29 @@
*/
(function() {
- "use strict";
+"use strict";
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- let db = rst.getPrimary().getDB("test");
+let db = rst.getPrimary().getDB("test");
- assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
+assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
- const session = db.getMongo().startSession();
- const sessionDb = session.getDatabase("test");
+const session = db.getMongo().startSession();
+const sessionDb = session.getDatabase("test");
- session.startTransaction();
- // This holds a database IX lock and a collection IX lock on "a".
- sessionDb.a.insert({y: 1});
+session.startTransaction();
+// This holds a database IX lock and a collection IX lock on "a".
+sessionDb.a.insert({y: 1});
- // This only requires database IX lock.
- assert.commandWorked(db.createCollection("b"));
- // Implicit creation.
- assert.commandWorked(db.runCommand({insert: "c", documents: [{x: 2}]}));
+// This only requires database IX lock.
+assert.commandWorked(db.createCollection("b"));
+// Implicit creation.
+assert.commandWorked(db.runCommand({insert: "c", documents: [{x: 2}]}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/core/txns/currentop_blocked_operations.js b/jstests/core/txns/currentop_blocked_operations.js
index 8e51334bdff..01a5026a668 100644
--- a/jstests/core/txns/currentop_blocked_operations.js
+++ b/jstests/core/txns/currentop_blocked_operations.js
@@ -4,80 +4,79 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const dbName = "test";
- const collName = "currentop_blocked_operations";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "currentop_blocked_operations";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
- testColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+testColl.drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const session = db.getMongo().startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = db.getMongo().startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- // Returns when the operation matching the 'matchExpr' is blocked, as evaluated by the
- // 'isBlockedFunc'.
- let waitForBlockedOp = function(matchExpr, isBlockedFunc) {
- assert.soon(function() {
- let cursor =
- db.getSiblingDB("admin").aggregate([{$currentOp: {}}, {$match: matchExpr}]);
- if (cursor.hasNext()) {
- let op = cursor.next();
- printjson(op);
- return isBlockedFunc(op);
- }
- return false;
- });
- };
+// Returns when the operation matching the 'matchExpr' is blocked, as evaluated by the
+// 'isBlockedFunc'.
+let waitForBlockedOp = function(matchExpr, isBlockedFunc) {
+ assert.soon(function() {
+ let cursor = db.getSiblingDB("admin").aggregate([{$currentOp: {}}, {$match: matchExpr}]);
+ if (cursor.hasNext()) {
+ let op = cursor.next();
+ printjson(op);
+ return isBlockedFunc(op);
+ }
+ return false;
+ });
+};
- // This transaction will block conflicting non-transactional operations.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 2222}));
+// This transaction will block conflicting non-transactional operations.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 2222}));
- // This insert operation will encounter a WriteConflictException due to the unique key
- // violation. It will block in an infinite write conflict loop until the transaction completes.
- TestData.dbName = dbName;
- TestData.collName = collName;
- let awaitInsert = startParallelShell(function() {
- let coll = db.getSiblingDB(TestData.dbName).getCollection(TestData.collName);
- assert.commandWorked(coll.insert({_id: 2222, x: 0}));
- });
+// This insert operation will encounter a WriteConflictException due to the unique key
+// violation. It will block in an infinite write conflict loop until the transaction completes.
+TestData.dbName = dbName;
+TestData.collName = collName;
+let awaitInsert = startParallelShell(function() {
+ let coll = db.getSiblingDB(TestData.dbName).getCollection(TestData.collName);
+ assert.commandWorked(coll.insert({_id: 2222, x: 0}));
+});
- // Wait for the counter to reach a high enough number to confirm the operation is retrying
- // constantly.
- waitForBlockedOp({"command.insert": collName}, function(op) {
- return op.writeConflicts > 20;
- });
+// Wait for the counter to reach a high enough number to confirm the operation is retrying
+// constantly.
+waitForBlockedOp({"command.insert": collName}, function(op) {
+ return op.writeConflicts > 20;
+});
- assert.commandWorked(session.abortTransaction_forTesting());
- awaitInsert();
- assert.eq(1, testColl.find({_id: 2222, x: 0}).itcount());
+assert.commandWorked(session.abortTransaction_forTesting());
+awaitInsert();
+assert.eq(1, testColl.find({_id: 2222, x: 0}).itcount());
- // This prepared transaction will block conflicting non-transactional operations.
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 2222}, {$set: {x: 1}}));
- PrepareHelpers.prepareTransaction(session);
+// This prepared transaction will block conflicting non-transactional operations.
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 2222}, {$set: {x: 1}}));
+PrepareHelpers.prepareTransaction(session);
- // This update operation will encounter a prepare conflict due to the prepared transaction's
- // modification to the same document. It will block without retrying until the prepared
- // transaction completes.
- TestData.dbName = dbName;
- TestData.collName = collName;
- let awaitUpdate = startParallelShell(function() {
- let coll = db.getSiblingDB(TestData.dbName).getCollection(TestData.collName);
- assert.commandWorked(coll.update({_id: 2222}, {$set: {x: 999}}));
- });
+// This update operation will encounter a prepare conflict due to the prepared transaction's
+// modification to the same document. It will block without retrying until the prepared
+// transaction completes.
+TestData.dbName = dbName;
+TestData.collName = collName;
+let awaitUpdate = startParallelShell(function() {
+ let coll = db.getSiblingDB(TestData.dbName).getCollection(TestData.collName);
+ assert.commandWorked(coll.update({_id: 2222}, {$set: {x: 999}}));
+});
- // Expect at least one prepare conflict.
- waitForBlockedOp({ns: testColl.getFullName(), op: "update"}, function(op) {
- return op.prepareReadConflicts > 0;
- });
+// Expect at least one prepare conflict.
+waitForBlockedOp({ns: testColl.getFullName(), op: "update"}, function(op) {
+ return op.prepareReadConflicts > 0;
+});
- assert.commandWorked(session.abortTransaction_forTesting());
- awaitUpdate();
- assert.eq(1, testColl.find({_id: 2222, x: 999}).itcount());
+assert.commandWorked(session.abortTransaction_forTesting());
+awaitUpdate();
+assert.eq(1, testColl.find({_id: 2222, x: 999}).itcount());
})();
diff --git a/jstests/core/txns/dbstats_not_blocked_by_txn.js b/jstests/core/txns/dbstats_not_blocked_by_txn.js
index 4da7b2ccbe0..6adf567b191 100644
--- a/jstests/core/txns/dbstats_not_blocked_by_txn.js
+++ b/jstests/core/txns/dbstats_not_blocked_by_txn.js
@@ -4,31 +4,31 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
- var dbName = 'dbstats_not_blocked_by_txn';
- var mydb = db.getSiblingDB(dbName);
+"use strict";
+var dbName = 'dbstats_not_blocked_by_txn';
+var mydb = db.getSiblingDB(dbName);
- mydb.foo.drop({writeConcern: {w: "majority"}});
- mydb.createCollection("foo", {writeConcern: {w: "majority"}});
+mydb.foo.drop({writeConcern: {w: "majority"}});
+mydb.createCollection("foo", {writeConcern: {w: "majority"}});
- var session = db.getMongo().startSession();
- var sessionDb = session.getDatabase(dbName);
+var session = db.getMongo().startSession();
+var sessionDb = session.getDatabase(dbName);
- const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid";
- if (isMongos) {
- // Before starting the transaction below, access the collection so it can be implicitly
- // sharded and force all shards to refresh their database versions because the refresh
- // requires an exclusive lock and would block behind the transaction.
- assert.eq(sessionDb.foo.find().itcount(), 0);
- assert.commandWorked(sessionDb.runCommand({listCollections: 1, nameOnly: true}));
- }
+const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid";
+if (isMongos) {
+ // Before starting the transaction below, access the collection so it can be implicitly
+ // sharded and force all shards to refresh their database versions because the refresh
+ // requires an exclusive lock and would block behind the transaction.
+ assert.eq(sessionDb.foo.find().itcount(), 0);
+ assert.commandWorked(sessionDb.runCommand({listCollections: 1, nameOnly: true}));
+}
- session.startTransaction();
- assert.commandWorked(sessionDb.foo.insert({x: 1}));
+session.startTransaction();
+assert.commandWorked(sessionDb.foo.insert({x: 1}));
- let res = mydb.runCommand({dbstats: 1, maxTimeMS: 10 * 1000});
- assert.commandWorked(res, "dbstats should have succeeded and not timed out");
+let res = mydb.runCommand({dbstats: 1, maxTimeMS: 10 * 1000});
+assert.commandWorked(res, "dbstats should have succeeded and not timed out");
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
+assert.commandWorked(session.commitTransaction_forTesting());
+session.endSession();
}());
diff --git a/jstests/core/txns/default_read_concern.js b/jstests/core/txns/default_read_concern.js
index fbdfb3fb6f1..d593ec73332 100644
--- a/jstests/core/txns/default_read_concern.js
+++ b/jstests/core/txns/default_read_concern.js
@@ -4,45 +4,45 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "default_read_concern";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
-
- // Prepare the collection
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-
- const session = db.getMongo().startSession();
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb.getCollection(collName);
-
- jsTestLog("Start a transaction with default readConcern");
- session.startTransaction();
-
- // Inserts outside transaction aren't visible, even after they are
- // majority-committed. (It is not a requirement that transactions with local
- // readConcern do not see writes from another session. At some point, it
- // would be desirable to have a transaction with readConcern local or
- // majority see writes from other sessions. However, our current
- // implementation of ensuring any data we read does not get rolled back
- // relies on the fact that we read from a single WT snapshot, since we
- // choose the timestamp to wait on in the first command of the
- // transaction.)
- let assertSameMembers = (members) => {
- assert.sameMembers(members, sessionColl.find().toArray());
- };
-
- assertSameMembers([{_id: 0}]);
- assert.commandWorked(testColl.insert({_id: 1}));
- assertSameMembers([{_id: 0}]);
- assert.commandWorked(testColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
- assertSameMembers([{_id: 0}]);
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assertSameMembers([{_id: 0}, {_id: 1}, {_id: 2}]);
- session.endSession();
+"use strict";
+
+const dbName = "test";
+const collName = "default_read_concern";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
+
+// Prepare the collection
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+
+const session = db.getMongo().startSession();
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb.getCollection(collName);
+
+jsTestLog("Start a transaction with default readConcern");
+session.startTransaction();
+
+// Inserts outside transaction aren't visible, even after they are
+// majority-committed. (It is not a requirement that transactions with local
+// readConcern do not see writes from another session. At some point, it
+// would be desirable to have a transaction with readConcern local or
+// majority see writes from other sessions. However, our current
+// implementation of ensuring any data we read does not get rolled back
+// relies on the fact that we read from a single WT snapshot, since we
+// choose the timestamp to wait on in the first command of the
+// transaction.)
+let assertSameMembers = (members) => {
+ assert.sameMembers(members, sessionColl.find().toArray());
+};
+
+assertSameMembers([{_id: 0}]);
+assert.commandWorked(testColl.insert({_id: 1}));
+assertSameMembers([{_id: 0}]);
+assert.commandWorked(testColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+assertSameMembers([{_id: 0}]);
+
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assertSameMembers([{_id: 0}, {_id: 1}, {_id: 2}]);
+session.endSession();
}());
diff --git a/jstests/core/txns/disallow_operations_on_prepared_transaction.js b/jstests/core/txns/disallow_operations_on_prepared_transaction.js
index 6263c4d6ccb..13d423ab4c1 100644
--- a/jstests/core/txns/disallow_operations_on_prepared_transaction.js
+++ b/jstests/core/txns/disallow_operations_on_prepared_transaction.js
@@ -7,126 +7,126 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const dbName = "test";
- const collName = "disallow_operations_on_prepared_transaction";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- jsTestLog("Test that you can call prepareTransaction on a prepared transaction.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 1}));
- let firstTimestamp = PrepareHelpers.prepareTransaction(session);
- let secondTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.eq(firstTimestamp, secondTimestamp);
- assert.commandWorked(session.abortTransaction_forTesting());
-
- jsTestLog("Test that you can call commitTransaction on a prepared transaction.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 2}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- jsTestLog("Test that you can call abortTransaction on a prepared transaction.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 3}));
- PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(session.abortTransaction_forTesting());
-
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 4}));
- PrepareHelpers.prepareTransaction(session);
-
- jsTestLog("Test that you can't run an aggregation on a prepared transaction.");
- assert.commandFailedWithCode(assert.throws(function() {
- sessionColl.aggregate({$match: {}});
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Test that you can't run delete on a prepared transaction.");
- var res = assert.commandFailedWithCode(sessionColl.remove({_id: 4}),
- ErrorCodes.PreparedTransactionInProgress);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
-
- jsTestLog("Test that you can't run distinct on a prepared transaction.");
- assert.commandFailedWithCode(assert.throws(function() {
- sessionColl.distinct("_id");
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- // This fails with ConflictingOperationInProgress instead of PreparedTransactionInProgress
- // because doTxn is always runs with startTransaction = true.
- jsTestLog("Test that you can't run doTxn on a prepared transaction.");
- assert.commandFailedWithCode(sessionDB.runCommand({
- doTxn: [{op: "u", ns: testColl.getFullName(), o2: {_id: 0}, o: {$set: {a: 5}}}],
- txnNumber: NumberLong(session.getTxnNumber_forTesting()),
- stmtId: NumberInt(1),
- autocommit: false
- }),
- ErrorCodes.OperationNotSupportedInTransaction);
-
- jsTestLog("Test that you can't run find on a prepared transaction.");
- assert.commandFailedWithCode(assert.throws(function() {
- sessionColl.find({}).toArray();
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Test that you can't run findandmodify on a prepared transaction.");
- assert.commandFailedWithCode(sessionDB.runCommand({
- findandmodify: collName,
- remove: true,
- txnNumber: NumberLong(session.getTxnNumber_forTesting()),
- stmtId: NumberInt(1),
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Test that you can't run findAndModify on a prepared transaction.");
- assert.commandFailedWithCode(assert.throws(function() {
- sessionColl.findAndModify({query: {_id: 4}, remove: true});
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Test that you can't run geoSearch on a prepared transaction.");
- assert.commandFailedWithCode(
- sessionDB.runCommand({geoSearch: collName, near: [0, 0], search: {a: 1}}),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Test that you can't insert on a prepared transaction.");
- res = assert.commandFailedWithCode(sessionColl.insert({_id: 5}),
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const dbName = "test";
+const collName = "disallow_operations_on_prepared_transaction";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+jsTestLog("Test that you can call prepareTransaction on a prepared transaction.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 1}));
+let firstTimestamp = PrepareHelpers.prepareTransaction(session);
+let secondTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.eq(firstTimestamp, secondTimestamp);
+assert.commandWorked(session.abortTransaction_forTesting());
+
+jsTestLog("Test that you can call commitTransaction on a prepared transaction.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 2}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+jsTestLog("Test that you can call abortTransaction on a prepared transaction.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 3}));
+PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(session.abortTransaction_forTesting());
+
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 4}));
+PrepareHelpers.prepareTransaction(session);
+
+jsTestLog("Test that you can't run an aggregation on a prepared transaction.");
+assert.commandFailedWithCode(assert.throws(function() {
+ sessionColl.aggregate({$match: {}});
+ }),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Test that you can't run delete on a prepared transaction.");
+var res = assert.commandFailedWithCode(sessionColl.remove({_id: 4}),
ErrorCodes.PreparedTransactionInProgress);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
-
- jsTestLog("Test that you can't run update on a prepared transaction.");
- res = assert.commandFailedWithCode(sessionColl.update({_id: 4}, {a: 1}),
- ErrorCodes.PreparedTransactionInProgress);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- assert.commandWorked(session.abortTransaction_forTesting());
-
- jsTestLog("Test that you can't run getMore on a prepared transaction.");
- session.startTransaction();
- res = assert.commandWorked(sessionDB.runCommand({find: collName, batchSize: 1}));
- assert(res.hasOwnProperty("cursor"), tojson(res));
- assert(res.cursor.hasOwnProperty("id"), tojson(res));
- PrepareHelpers.prepareTransaction(session);
- assert.commandFailedWithCode(
- sessionDB.runCommand({getMore: res.cursor.id, collection: collName}),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Test that you can't run killCursors on a prepared transaction.");
- assert.commandFailedWithCode(
- sessionDB.runCommand({killCursors: collName, cursors: [res.cursor.id]}),
- ErrorCodes.PreparedTransactionInProgress);
- assert.commandWorked(session.abortTransaction_forTesting());
-
- session.endSession();
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+
+jsTestLog("Test that you can't run distinct on a prepared transaction.");
+assert.commandFailedWithCode(assert.throws(function() {
+ sessionColl.distinct("_id");
+ }),
+ ErrorCodes.PreparedTransactionInProgress);
+
+// This fails with ConflictingOperationInProgress instead of PreparedTransactionInProgress
+// because doTxn is always runs with startTransaction = true.
+jsTestLog("Test that you can't run doTxn on a prepared transaction.");
+assert.commandFailedWithCode(sessionDB.runCommand({
+ doTxn: [{op: "u", ns: testColl.getFullName(), o2: {_id: 0}, o: {$set: {a: 5}}}],
+ txnNumber: NumberLong(session.getTxnNumber_forTesting()),
+ stmtId: NumberInt(1),
+ autocommit: false
+}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+
+jsTestLog("Test that you can't run find on a prepared transaction.");
+assert.commandFailedWithCode(assert.throws(function() {
+ sessionColl.find({}).toArray();
+ }),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Test that you can't run findandmodify on a prepared transaction.");
+assert.commandFailedWithCode(sessionDB.runCommand({
+ findandmodify: collName,
+ remove: true,
+ txnNumber: NumberLong(session.getTxnNumber_forTesting()),
+ stmtId: NumberInt(1),
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Test that you can't run findAndModify on a prepared transaction.");
+assert.commandFailedWithCode(
+ assert.throws(function() {
+ sessionColl.findAndModify({query: {_id: 4}, remove: true});
+ }),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Test that you can't run geoSearch on a prepared transaction.");
+assert.commandFailedWithCode(
+ sessionDB.runCommand({geoSearch: collName, near: [0, 0], search: {a: 1}}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Test that you can't insert on a prepared transaction.");
+res = assert.commandFailedWithCode(sessionColl.insert({_id: 5}),
+ ErrorCodes.PreparedTransactionInProgress);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+
+jsTestLog("Test that you can't run update on a prepared transaction.");
+res = assert.commandFailedWithCode(sessionColl.update({_id: 4}, {a: 1}),
+ ErrorCodes.PreparedTransactionInProgress);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+assert.commandWorked(session.abortTransaction_forTesting());
+
+jsTestLog("Test that you can't run getMore on a prepared transaction.");
+session.startTransaction();
+res = assert.commandWorked(sessionDB.runCommand({find: collName, batchSize: 1}));
+assert(res.hasOwnProperty("cursor"), tojson(res));
+assert(res.cursor.hasOwnProperty("id"), tojson(res));
+PrepareHelpers.prepareTransaction(session);
+assert.commandFailedWithCode(sessionDB.runCommand({getMore: res.cursor.id, collection: collName}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Test that you can't run killCursors on a prepared transaction.");
+assert.commandFailedWithCode(
+ sessionDB.runCommand({killCursors: collName, cursors: [res.cursor.id]}),
+ ErrorCodes.PreparedTransactionInProgress);
+assert.commandWorked(session.abortTransaction_forTesting());
+
+session.endSession();
}());
diff --git a/jstests/core/txns/do_txn_atomicity.js b/jstests/core/txns/do_txn_atomicity.js
index 59307a4641b..72d2591dbb6 100644
--- a/jstests/core/txns/do_txn_atomicity.js
+++ b/jstests/core/txns/do_txn_atomicity.js
@@ -2,87 +2,87 @@
// Tests that doTxn is atomic for CRUD operations
(function() {
- 'use strict';
+'use strict';
- var session = db.getMongo().startSession();
- var sessionDb = session.getDatabase("test");
- var txnNumber = 0;
+var session = db.getMongo().startSession();
+var sessionDb = session.getDatabase("test");
+var txnNumber = 0;
- var t = db.doTxn;
- t.drop({writeConcern: {w: "majority"}});
- assert.writeOK(t.insert({_id: 1}));
+var t = db.doTxn;
+t.drop({writeConcern: {w: "majority"}});
+assert.writeOK(t.insert({_id: 1}));
- // Operations including commands are not allowed and should be rejected completely.
- assert.commandFailedWithCode(sessionDb.adminCommand({
- doTxn: [
- {op: 'i', ns: t.getFullName(), o: {_id: ObjectId(), x: 1}},
- {op: 'c', ns: "invalid", o: {create: "t"}},
- ],
- txnNumber: NumberLong(txnNumber++)
- }),
- ErrorCodes.InvalidOptions);
- assert.eq(t.count({x: 1}), 0);
+// Operations including commands are not allowed and should be rejected completely.
+assert.commandFailedWithCode(sessionDb.adminCommand({
+ doTxn: [
+ {op: 'i', ns: t.getFullName(), o: {_id: ObjectId(), x: 1}},
+ {op: 'c', ns: "invalid", o: {create: "t"}},
+ ],
+ txnNumber: NumberLong(txnNumber++)
+}),
+ ErrorCodes.InvalidOptions);
+assert.eq(t.count({x: 1}), 0);
- // Operations only including CRUD commands should be atomic, so the next insert will fail.
- assert.commandFailedWithCode(sessionDb.adminCommand({
- doTxn: [
- {op: 'i', ns: t.getFullName(), o: {_id: ObjectId(), x: 1}},
- {op: 'i', ns: "invalid", o: {_id: ObjectId(), x: 1}},
- ],
- txnNumber: NumberLong(txnNumber++)
- }),
- ErrorCodes.InvalidNamespace);
- assert.eq(t.count({x: 1}), 0);
+// Operations only including CRUD commands should be atomic, so the next insert will fail.
+assert.commandFailedWithCode(sessionDb.adminCommand({
+ doTxn: [
+ {op: 'i', ns: t.getFullName(), o: {_id: ObjectId(), x: 1}},
+ {op: 'i', ns: "invalid", o: {_id: ObjectId(), x: 1}},
+ ],
+ txnNumber: NumberLong(txnNumber++)
+}),
+ ErrorCodes.InvalidNamespace);
+assert.eq(t.count({x: 1}), 0);
- // Operations on non-existent databases cannot be atomic.
- var newDBName = "do_txn_atomicity";
- var newDB = sessionDb.getSiblingDB(newDBName);
- assert.commandWorked(newDB.dropDatabase());
- // Updates on a non-existent database no longer implicitly create collections and will fail with
- // a NamespaceNotFound error.
- assert.commandFailedWithCode(newDB.runCommand({
- doTxn: [{op: "u", ns: newDBName + ".foo", o: {_id: 5, x: 17}, o2: {_id: 5, x: 16}}],
- txnNumber: NumberLong(txnNumber++)
- }),
- ErrorCodes.NamespaceNotFound);
+// Operations on non-existent databases cannot be atomic.
+var newDBName = "do_txn_atomicity";
+var newDB = sessionDb.getSiblingDB(newDBName);
+assert.commandWorked(newDB.dropDatabase());
+// Updates on a non-existent database no longer implicitly create collections and will fail with
+// a NamespaceNotFound error.
+assert.commandFailedWithCode(newDB.runCommand({
+ doTxn: [{op: "u", ns: newDBName + ".foo", o: {_id: 5, x: 17}, o2: {_id: 5, x: 16}}],
+ txnNumber: NumberLong(txnNumber++)
+}),
+ ErrorCodes.NamespaceNotFound);
- var sawTooManyLocksError = false;
+var sawTooManyLocksError = false;
- function applyWithManyLocks(n) {
- let cappedOps = [];
- let multiOps = [];
+function applyWithManyLocks(n) {
+ let cappedOps = [];
+ let multiOps = [];
- for (let i = 0; i < n; i++) {
- // Write to a capped collection, as that may require a lock for serialization.
- let cappedName = "capped" + n + "-" + i;
- assert.commandWorked(newDB.createCollection(cappedName, {capped: true, size: 100}));
- cappedOps.push({op: 'i', ns: newDBName + "." + cappedName, o: {_id: 0}});
+ for (let i = 0; i < n; i++) {
+ // Write to a capped collection, as that may require a lock for serialization.
+ let cappedName = "capped" + n + "-" + i;
+ assert.commandWorked(newDB.createCollection(cappedName, {capped: true, size: 100}));
+ cappedOps.push({op: 'i', ns: newDBName + "." + cappedName, o: {_id: 0}});
- // Make an index multi-key, as that may require a lock for updating the catalog.
- let multiName = "multi" + n + "-" + i;
- assert.commandWorked(newDB[multiName].createIndex({x: 1}));
- multiOps.push({op: 'i', ns: newDBName + "." + multiName, o: {_id: 0, x: [0, 1]}});
- }
-
- let res = [cappedOps, multiOps].map(
- (doTxn) => newDB.runCommand({doTxn: doTxn, txnNumber: NumberLong(txnNumber++)}));
- sawTooManyLocksError |= res.some((res) => res.code === ErrorCodes.TooManyLocks);
- // Transactions involving just two collections should succeed.
- if (n <= 2)
- res.every((res) => res.ok);
- // All transactions should either completely succeed or completely fail.
- assert(res.every((res) => res.results.every((result) => result == res.ok)));
- assert(res.every((res) => !res.ok || res.applied == n));
+ // Make an index multi-key, as that may require a lock for updating the catalog.
+ let multiName = "multi" + n + "-" + i;
+ assert.commandWorked(newDB[multiName].createIndex({x: 1}));
+ multiOps.push({op: 'i', ns: newDBName + "." + multiName, o: {_id: 0, x: [0, 1]}});
}
- // Try requiring different numbers of collection accesses in a single operation to cover
- // all edge cases, so we run out of available locks in different code paths such as during
- // oplog application.
- applyWithManyLocks(1);
- applyWithManyLocks(2);
+ let res = [cappedOps, multiOps].map(
+ (doTxn) => newDB.runCommand({doTxn: doTxn, txnNumber: NumberLong(txnNumber++)}));
+ sawTooManyLocksError |= res.some((res) => res.code === ErrorCodes.TooManyLocks);
+ // Transactions involving just two collections should succeed.
+ if (n <= 2)
+ res.every((res) => res.ok);
+ // All transactions should either completely succeed or completely fail.
+ assert(res.every((res) => res.results.every((result) => result == res.ok)));
+ assert(res.every((res) => !res.ok || res.applied == n));
+}
- for (let i = 9; i < 20; i++) {
- applyWithManyLocks(i);
- }
- assert(!sawTooManyLocksError, "test should not exhaust the max number of locks held at once");
+// Try requiring different numbers of collection accesses in a single operation to cover
+// all edge cases, so we run out of available locks in different code paths such as during
+// oplog application.
+applyWithManyLocks(1);
+applyWithManyLocks(2);
+
+for (let i = 9; i < 20; i++) {
+ applyWithManyLocks(i);
+}
+assert(!sawTooManyLocksError, "test should not exhaust the max number of locks held at once");
})();
diff --git a/jstests/core/txns/do_txn_basic.js b/jstests/core/txns/do_txn_basic.js
index 1325db840b5..1b0cc100644 100644
--- a/jstests/core/txns/do_txn_basic.js
+++ b/jstests/core/txns/do_txn_basic.js
@@ -1,347 +1,339 @@
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- const t = db.do_txn1;
-
- var session = db.getMongo().startSession();
- db = session.getDatabase("test");
- var txnNumber = 0;
-
- // Use majority write concern to clear the drop-pending that can cause lock conflicts with
- // transactions.
- t.drop({writeConcern: {w: "majority"}});
-
- //
- // Input validation tests
- //
-
- jsTestLog("Empty array of operations.");
- assert.commandFailedWithCode(db.adminCommand({doTxn: [], txnNumber: NumberLong(txnNumber++)}),
- ErrorCodes.InvalidOptions,
- 'doTxn should fail on empty array of operations');
-
- jsTestLog("Non-array type for operations.");
- assert.commandFailedWithCode(
- db.adminCommand({doTxn: "not an array", txnNumber: NumberLong(txnNumber++)}),
- ErrorCodes.TypeMismatch,
- 'doTxn should fail on non-array type for operations');
-
- jsTestLog("Missing 'op' field in an operation.");
- assert.commandFailedWithCode(
- db.adminCommand(
- {doTxn: [{ns: t.getFullName(), o: {_id: 0}}], txnNumber: NumberLong(txnNumber++)}),
- ErrorCodes.FailedToParse,
- 'doTxn should fail on operation without "op" field');
-
- jsTestLog("Non-string 'op' field in an operation.");
- assert.commandFailedWithCode(db.adminCommand({
- doTxn: [{op: 12345, ns: t.getFullName(), o: {_id: 0}}],
+"use strict";
+
+const t = db.do_txn1;
+
+var session = db.getMongo().startSession();
+db = session.getDatabase("test");
+var txnNumber = 0;
+
+// Use majority write concern to clear the drop-pending that can cause lock conflicts with
+// transactions.
+t.drop({writeConcern: {w: "majority"}});
+
+//
+// Input validation tests
+//
+
+jsTestLog("Empty array of operations.");
+assert.commandFailedWithCode(db.adminCommand({doTxn: [], txnNumber: NumberLong(txnNumber++)}),
+ ErrorCodes.InvalidOptions,
+ 'doTxn should fail on empty array of operations');
+
+jsTestLog("Non-array type for operations.");
+assert.commandFailedWithCode(
+ db.adminCommand({doTxn: "not an array", txnNumber: NumberLong(txnNumber++)}),
+ ErrorCodes.TypeMismatch,
+ 'doTxn should fail on non-array type for operations');
+
+jsTestLog("Missing 'op' field in an operation.");
+assert.commandFailedWithCode(
+ db.adminCommand(
+ {doTxn: [{ns: t.getFullName(), o: {_id: 0}}], txnNumber: NumberLong(txnNumber++)}),
+ ErrorCodes.FailedToParse,
+ 'doTxn should fail on operation without "op" field');
+
+jsTestLog("Non-string 'op' field in an operation.");
+assert.commandFailedWithCode(db.adminCommand({
+ doTxn: [{op: 12345, ns: t.getFullName(), o: {_id: 0}}],
+ txnNumber: NumberLong(txnNumber++)
+}),
+ ErrorCodes.FailedToParse,
+ 'doTxn should fail on operation with non-string "op" field');
+
+jsTestLog("Empty 'op' field value in an operation.");
+assert.commandFailedWithCode(
+ db.adminCommand(
+ {doTxn: [{op: '', ns: t.getFullName(), o: {_id: 0}}], txnNumber: NumberLong(txnNumber++)}),
+ ErrorCodes.FailedToParse,
+ 'doTxn should fail on operation with empty "op" field value');
+
+jsTestLog("Missing 'ns' field in an operation.");
+assert.commandFailedWithCode(
+ db.adminCommand({doTxn: [{op: 'u', o: {_id: 0}}], txnNumber: NumberLong(txnNumber++)}),
+ ErrorCodes.FailedToParse,
+ 'doTxn should fail on operation without "ns" field');
+
+jsTestLog("Missing 'o' field in an operation.");
+assert.commandFailedWithCode(
+ db.adminCommand({doTxn: [{op: 'u', ns: t.getFullName()}], txnNumber: NumberLong(txnNumber++)}),
+ ErrorCodes.FailedToParse,
+ 'doTxn should fail on operation without "o" field');
+
+jsTestLog("Non-string 'ns' field in an operation.");
+assert.commandFailedWithCode(
+ db.adminCommand(
+ {doTxn: [{op: 'u', ns: 12345, o: {_id: 0}}], txnNumber: NumberLong(txnNumber++)}),
+ ErrorCodes.FailedToParse,
+ 'doTxn should fail on operation with non-string "ns" field');
+
+jsTestLog("Missing dbname in 'ns' field.");
+assert.commandFailedWithCode(
+ db.adminCommand(
+ {doTxn: [{op: 'd', ns: t.getName(), o: {_id: 1}}], txnNumber: NumberLong(txnNumber++)}),
+ ErrorCodes.InvalidNamespace,
+ 'doTxn should fail with a missing dbname in the "ns" field value');
+
+jsTestLog("Empty 'ns' field value.");
+assert.commandFailed(
+ db.adminCommand({doTxn: [{op: 'u', ns: '', o: {_id: 0}}], txnNumber: NumberLong(txnNumber++)}),
+ 'doTxn should fail with empty "ns" field value');
+
+jsTestLog("Valid 'ns' field value in unknown operation type 'x'.");
+assert.commandFailedWithCode(
+ db.adminCommand(
+ {doTxn: [{op: 'x', ns: t.getFullName(), o: {_id: 0}}], txnNumber: NumberLong(txnNumber++)}),
+ ErrorCodes.FailedToParse,
+ 'doTxn should fail on unknown operation type "x" with valid "ns" value');
+
+jsTestLog("Illegal operation type 'n' (no-op).");
+assert.commandFailedWithCode(
+ db.adminCommand(
+ {doTxn: [{op: 'n', ns: t.getFullName(), o: {_id: 0}}], txnNumber: NumberLong(txnNumber++)}),
+ ErrorCodes.InvalidOptions,
+ 'doTxn should fail on "no op" operations.');
+
+jsTestLog("Illegal operation type 'c' (command).");
+assert.commandFailedWithCode(db.adminCommand({
+ doTxn: [{op: 'c', ns: t.getCollection('$cmd').getFullName(), o: {applyOps: []}}],
+ txnNumber: NumberLong(txnNumber++)
+}),
+ ErrorCodes.InvalidOptions,
+ 'doTxn should fail on commands.');
+
+jsTestLog("No transaction number in an otherwise valid operation.");
+assert.commandFailedWithCode(
+ db.adminCommand({doTxn: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]}),
+ ErrorCodes.InvalidOptions,
+ 'doTxn should fail when no transaction number is given.');
+
+jsTestLog("Session IDs and transaction numbers on sub-ops are not allowed");
+jsTestLog("doTxn should fail when inner transaction contains session id.");
+var lsid = {id: UUID()};
+res = assert.commandFailedWithCode(db.runCommand({
+ doTxn: [{
+ op: "i",
+ ns: t.getFullName(),
+ o: {_id: 7, x: 24},
+ lsid: lsid,
+ txnNumber: NumberLong(1),
+ }],
+ txnNumber: NumberLong(txnNumber++)
+}),
+ ErrorCodes.FailedToParse,
+ 'doTxn should fail when inner transaction contains session id.');
+
+jsTestLog("doTxn should fail when inner transaction contains transaction number.");
+res = assert.commandFailedWithCode(
+ db.runCommand({
+ doTxn: [{
+ op: "u",
+ ns: t.getFullName(),
+ o2: {_id: 7},
+ o: {$set: {x: 25}},
+ txnNumber: NumberLong(1),
+ }],
txnNumber: NumberLong(txnNumber++)
}),
- ErrorCodes.FailedToParse,
- 'doTxn should fail on operation with non-string "op" field');
-
- jsTestLog("Empty 'op' field value in an operation.");
- assert.commandFailedWithCode(db.adminCommand({
- doTxn: [{op: '', ns: t.getFullName(), o: {_id: 0}}],
- txnNumber: NumberLong(txnNumber++)
- }),
- ErrorCodes.FailedToParse,
- 'doTxn should fail on operation with empty "op" field value');
-
- jsTestLog("Missing 'ns' field in an operation.");
- assert.commandFailedWithCode(
- db.adminCommand({doTxn: [{op: 'u', o: {_id: 0}}], txnNumber: NumberLong(txnNumber++)}),
- ErrorCodes.FailedToParse,
- 'doTxn should fail on operation without "ns" field');
-
- jsTestLog("Missing 'o' field in an operation.");
- assert.commandFailedWithCode(
- db.adminCommand(
- {doTxn: [{op: 'u', ns: t.getFullName()}], txnNumber: NumberLong(txnNumber++)}),
- ErrorCodes.FailedToParse,
- 'doTxn should fail on operation without "o" field');
-
- jsTestLog("Non-string 'ns' field in an operation.");
- assert.commandFailedWithCode(
- db.adminCommand(
- {doTxn: [{op: 'u', ns: 12345, o: {_id: 0}}], txnNumber: NumberLong(txnNumber++)}),
- ErrorCodes.FailedToParse,
- 'doTxn should fail on operation with non-string "ns" field');
-
- jsTestLog("Missing dbname in 'ns' field.");
- assert.commandFailedWithCode(
- db.adminCommand(
- {doTxn: [{op: 'd', ns: t.getName(), o: {_id: 1}}], txnNumber: NumberLong(txnNumber++)}),
- ErrorCodes.InvalidNamespace,
- 'doTxn should fail with a missing dbname in the "ns" field value');
-
- jsTestLog("Empty 'ns' field value.");
- assert.commandFailed(
- db.adminCommand(
- {doTxn: [{op: 'u', ns: '', o: {_id: 0}}], txnNumber: NumberLong(txnNumber++)}),
- 'doTxn should fail with empty "ns" field value');
-
- jsTestLog("Valid 'ns' field value in unknown operation type 'x'.");
- assert.commandFailedWithCode(
- db.adminCommand({
- doTxn: [{op: 'x', ns: t.getFullName(), o: {_id: 0}}],
- txnNumber: NumberLong(txnNumber++)
- }),
- ErrorCodes.FailedToParse,
- 'doTxn should fail on unknown operation type "x" with valid "ns" value');
-
- jsTestLog("Illegal operation type 'n' (no-op).");
- assert.commandFailedWithCode(db.adminCommand({
- doTxn: [{op: 'n', ns: t.getFullName(), o: {_id: 0}}],
- txnNumber: NumberLong(txnNumber++)
- }),
- ErrorCodes.InvalidOptions,
- 'doTxn should fail on "no op" operations.');
-
- jsTestLog("Illegal operation type 'c' (command).");
- assert.commandFailedWithCode(db.adminCommand({
- doTxn: [{op: 'c', ns: t.getCollection('$cmd').getFullName(), o: {applyOps: []}}],
- txnNumber: NumberLong(txnNumber++)
- }),
- ErrorCodes.InvalidOptions,
- 'doTxn should fail on commands.');
-
- jsTestLog("No transaction number in an otherwise valid operation.");
- assert.commandFailedWithCode(
- db.adminCommand({doTxn: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]}),
- ErrorCodes.InvalidOptions,
- 'doTxn should fail when no transaction number is given.');
-
- jsTestLog("Session IDs and transaction numbers on sub-ops are not allowed");
- jsTestLog("doTxn should fail when inner transaction contains session id.");
- var lsid = {id: UUID()};
- res = assert.commandFailedWithCode(
- db.runCommand({
- doTxn: [{
- op: "i",
- ns: t.getFullName(),
- o: {_id: 7, x: 24},
- lsid: lsid,
- txnNumber: NumberLong(1),
- }],
- txnNumber: NumberLong(txnNumber++)
- }),
- ErrorCodes.FailedToParse,
- 'doTxn should fail when inner transaction contains session id.');
-
- jsTestLog("doTxn should fail when inner transaction contains transaction number.");
- res = assert.commandFailedWithCode(
- db.runCommand({
- doTxn: [{
- op: "u",
- ns: t.getFullName(),
- o2: {_id: 7},
- o: {$set: {x: 25}},
- txnNumber: NumberLong(1),
- }],
- txnNumber: NumberLong(txnNumber++)
- }),
- ErrorCodes.FailedToParse,
- 'doTxn should fail when inner transaction contains transaction number.');
-
- jsTestLog("doTxn should fail when inner transaction contains statement id.");
- res = assert.commandFailedWithCode(
- db.runCommand({
- doTxn: [{
- op: "d",
- ns: t.getFullName(),
- o: {_id: 7},
- stmtId: 0,
- }],
- txnNumber: NumberLong(txnNumber++)
- }),
- ErrorCodes.FailedToParse,
- 'doTxn should fail when inner transaction contains statement id.');
-
- jsTestLog("Malformed operation with unexpected field 'x'.");
- assert.commandFailedWithCode(db.adminCommand({
- doTxn: [{op: 'i', ns: t.getFullName(), o: {_id: 0}, x: 1}],
+ ErrorCodes.FailedToParse,
+ 'doTxn should fail when inner transaction contains transaction number.');
+
+jsTestLog("doTxn should fail when inner transaction contains statement id.");
+res =
+ assert.commandFailedWithCode(db.runCommand({
+ doTxn: [{
+ op: "d",
+ ns: t.getFullName(),
+ o: {_id: 7},
+ stmtId: 0,
+ }],
txnNumber: NumberLong(txnNumber++)
}),
ErrorCodes.FailedToParse,
- 'doTxn should fail on malformed operations.');
-
- assert.eq(0, t.find().count(), "Non-zero amount of documents in collection to start");
-
- /**
- * Test function for running CRUD operations on non-existent namespaces using various
- * combinations of invalid namespaces (collection/database)
- *
- * Leave 'expectedErrorCode' undefined if this command is expected to run successfully.
- */
- function testCrudOperationOnNonExistentNamespace(optype, o, o2, expectedErrorCode) {
- expectedErrorCode = expectedErrorCode || ErrorCodes.OK;
- const t2 = db.getSiblingDB('do_txn1_no_such_db').getCollection('t');
- [t, t2].forEach(coll => {
- const op = {op: optype, ns: coll.getFullName(), o: o, o2: o2};
- const cmd = {doTxn: [op], txnNumber: NumberLong(txnNumber++)};
- jsTestLog('Testing doTxn on non-existent namespace: ' + tojson(cmd));
- if (expectedErrorCode === ErrorCodes.OK) {
- assert.commandWorked(db.adminCommand(cmd));
- } else {
- assert.commandFailedWithCode(db.adminCommand(cmd), expectedErrorCode);
- }
- });
- }
-
- // Insert, delete, and update operations on non-existent collections/databases should return
- // NamespaceNotFound.
- jsTestLog("testCrudOperationOnNonExistentNamespace");
- testCrudOperationOnNonExistentNamespace('i', {_id: 0}, {}, ErrorCodes.NamespaceNotFound);
- testCrudOperationOnNonExistentNamespace('d', {_id: 0}, {}, ErrorCodes.NamespaceNotFound);
- testCrudOperationOnNonExistentNamespace('u', {x: 0}, {_id: 0}, ErrorCodes.NamespaceNotFound);
-
- jsTestLog("Valid insert");
- assert.commandWorked(db.createCollection(t.getName()));
- var a = assert.commandWorked(db.adminCommand({
- doTxn: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}],
- txnNumber: NumberLong(txnNumber++)
- }));
- assert.eq(1, t.find().count(), "Valid insert failed");
- assert.eq(true, a.results[0], "Bad result value for valid insert");
-
- jsTestLog("Duplicate insert");
- a = assert.commandFailedWithCode(db.adminCommand({
- doTxn: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}],
- txnNumber: NumberLong(txnNumber++)
- }),
- ErrorCodes.DuplicateKey);
- assert.eq(1,
- t.find().count(),
- "The number of documents changed despite the duplicate insert failing");
- assert.eq(false, a.results[0], "Bad result value for duplicate insert");
-
- var o = {_id: 5, x: 17};
- assert.eq(o, t.findOne(), "Mismatching document inserted.");
-
- jsTestLog("doTxn should fail on insert of object with empty array element");
- // 'o' field is an empty array.
- assert.commandFailed(
- db.adminCommand(
- {doTxn: [{op: 'i', ns: t.getFullName(), o: []}], txnNumber: NumberLong(txnNumber++)}),
- 'doTxn should fail on insert of object with empty array element');
-
- jsTestLog("two valid updates");
- var res = assert.commandWorked(db.runCommand({
- doTxn: [
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 18}}},
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 19}}}
- ],
- txnNumber: NumberLong(txnNumber++)
- }));
-
- o.x++;
- o.x++;
-
- assert.eq(1, t.find().count(), "Updates increased number of documents");
- assert.eq(o, t.findOne(), "Document doesn't match expected");
- assert.eq(true, res.results[0], "Bad result value for valid update");
- assert.eq(true, res.results[1], "Bad result value for valid update");
-
- jsTestLog("preCondition fully matches");
- res = assert.commandWorked(db.runCommand({
- doTxn: [
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 20}}},
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 21}}}
- ],
- preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}],
- txnNumber: NumberLong(txnNumber++)
- }));
-
- o.x++;
- o.x++;
-
- assert.eq(1, t.find().count(), "Updates increased number of documents");
- assert.eq(o, t.findOne(), "Document doesn't match expected");
- assert.eq(true, res.results[0], "Bad result value for valid update");
- assert.eq(true, res.results[1], "Bad result value for valid update");
-
- jsTestLog("preCondition doesn't match ns");
- res = assert.commandFailed(db.runCommand({
- doTxn: [
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}},
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 23}}}
- ],
- preCondition: [{ns: "foo.otherName", q: {_id: 5}, res: {x: 21}}],
- txnNumber: NumberLong(txnNumber++)
- }));
-
- assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied");
-
- jsTestLog("preCondition doesn't match query");
- res = assert.commandFailed(db.runCommand({
- doTxn: [
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}},
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 23}}}
- ],
- preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}],
- txnNumber: NumberLong(txnNumber++)
- }));
-
- assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied");
-
- jsTestLog("upsert disallowed");
- res = assert.commandFailed(db.runCommand({
- doTxn: [
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}},
- {op: "u", ns: t.getFullName(), o2: {_id: 6}, o: {$set: {x: 23}}}
- ],
- txnNumber: NumberLong(txnNumber++)
- }));
-
- assert.eq(false, res.results[0], "Op required upsert, which should be disallowed.");
- assert.eq(false, res.results[1], "Op required upsert, which should be disallowed.");
-
- // When applying a "u" (update) op, we default to 'UpdateNode' update semantics, and $set
- // operations add new fields in lexicographic order.
- jsTestLog("$set field addition order");
- res = assert.commandWorked(db.adminCommand({
- doTxn: [
- {"op": "i", "ns": t.getFullName(), "o": {_id: 6}},
- {"op": "u", "ns": t.getFullName(), "o2": {_id: 6}, "o": {$set: {z: 1, a: 2}}}
- ],
- txnNumber: NumberLong(txnNumber++)
- }));
- assert.eq(t.findOne({_id: 6}), {_id: 6, a: 2, z: 1}); // Note: 'a' and 'z' have been sorted.
-
- // 'ModifierInterface' semantics are not supported, so an update with {$v: 0} should fail.
- jsTestLog("Fail update with {$v:0}");
- res = assert.commandFailed(db.adminCommand({
- doTxn: [
- {"op": "i", "ns": t.getFullName(), "o": {_id: 7}},
- {
- "op": "u",
- "ns": t.getFullName(),
- "o2": {_id: 7},
- "o": {$v: NumberLong(0), $set: {z: 1, a: 2}}
- }
- ],
- txnNumber: NumberLong(txnNumber++),
- }));
- assert.eq(res.code, 40682);
-
- // When we explicitly specify {$v: 1}, we should get 'UpdateNode' update semantics, and $set
- // operations get performed in lexicographic order.
- jsTestLog("update with {$v:1}");
- res = assert.commandWorked(db.adminCommand({
- doTxn: [
- {"op": "i", "ns": t.getFullName(), "o": {_id: 8}},
- {
- "op": "u",
- "ns": t.getFullName(),
- "o2": {_id: 8},
- "o": {$v: NumberLong(1), $set: {z: 1, a: 2}}
- }
- ],
- txnNumber: NumberLong(txnNumber++),
- }));
- assert.eq(t.findOne({_id: 8}), {_id: 8, a: 2, z: 1}); // Note: 'a' and 'z' have been sorted.
+ 'doTxn should fail when inner transaction contains statement id.');
+
+jsTestLog("Malformed operation with unexpected field 'x'.");
+assert.commandFailedWithCode(db.adminCommand({
+ doTxn: [{op: 'i', ns: t.getFullName(), o: {_id: 0}, x: 1}],
+ txnNumber: NumberLong(txnNumber++)
+}),
+ ErrorCodes.FailedToParse,
+ 'doTxn should fail on malformed operations.');
+
+assert.eq(0, t.find().count(), "Non-zero amount of documents in collection to start");
+
+/**
+ * Test function for running CRUD operations on non-existent namespaces using various
+ * combinations of invalid namespaces (collection/database)
+ *
+ * Leave 'expectedErrorCode' undefined if this command is expected to run successfully.
+ */
+function testCrudOperationOnNonExistentNamespace(optype, o, o2, expectedErrorCode) {
+ expectedErrorCode = expectedErrorCode || ErrorCodes.OK;
+ const t2 = db.getSiblingDB('do_txn1_no_such_db').getCollection('t');
+ [t, t2].forEach(coll => {
+ const op = {op: optype, ns: coll.getFullName(), o: o, o2: o2};
+ const cmd = {doTxn: [op], txnNumber: NumberLong(txnNumber++)};
+ jsTestLog('Testing doTxn on non-existent namespace: ' + tojson(cmd));
+ if (expectedErrorCode === ErrorCodes.OK) {
+ assert.commandWorked(db.adminCommand(cmd));
+ } else {
+ assert.commandFailedWithCode(db.adminCommand(cmd), expectedErrorCode);
+ }
+ });
+}
+
+// Insert, delete, and update operations on non-existent collections/databases should return
+// NamespaceNotFound.
+jsTestLog("testCrudOperationOnNonExistentNamespace");
+testCrudOperationOnNonExistentNamespace('i', {_id: 0}, {}, ErrorCodes.NamespaceNotFound);
+testCrudOperationOnNonExistentNamespace('d', {_id: 0}, {}, ErrorCodes.NamespaceNotFound);
+testCrudOperationOnNonExistentNamespace('u', {x: 0}, {_id: 0}, ErrorCodes.NamespaceNotFound);
+
+jsTestLog("Valid insert");
+assert.commandWorked(db.createCollection(t.getName()));
+var a = assert.commandWorked(db.adminCommand({
+ doTxn: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}],
+ txnNumber: NumberLong(txnNumber++)
+}));
+assert.eq(1, t.find().count(), "Valid insert failed");
+assert.eq(true, a.results[0], "Bad result value for valid insert");
+
+jsTestLog("Duplicate insert");
+a = assert.commandFailedWithCode(db.adminCommand({
+ doTxn: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}],
+ txnNumber: NumberLong(txnNumber++)
+}),
+ ErrorCodes.DuplicateKey);
+assert.eq(
+ 1, t.find().count(), "The number of documents changed despite the duplicate insert failing");
+assert.eq(false, a.results[0], "Bad result value for duplicate insert");
+
+var o = {_id: 5, x: 17};
+assert.eq(o, t.findOne(), "Mismatching document inserted.");
+
+jsTestLog("doTxn should fail on insert of object with empty array element");
+// 'o' field is an empty array.
+assert.commandFailed(
+ db.adminCommand(
+ {doTxn: [{op: 'i', ns: t.getFullName(), o: []}], txnNumber: NumberLong(txnNumber++)}),
+ 'doTxn should fail on insert of object with empty array element');
+
+jsTestLog("two valid updates");
+var res = assert.commandWorked(db.runCommand({
+ doTxn: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 18}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 19}}}
+ ],
+ txnNumber: NumberLong(txnNumber++)
+}));
+
+o.x++;
+o.x++;
+
+assert.eq(1, t.find().count(), "Updates increased number of documents");
+assert.eq(o, t.findOne(), "Document doesn't match expected");
+assert.eq(true, res.results[0], "Bad result value for valid update");
+assert.eq(true, res.results[1], "Bad result value for valid update");
+
+jsTestLog("preCondition fully matches");
+res = assert.commandWorked(db.runCommand({
+ doTxn: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 20}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 21}}}
+ ],
+ preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}],
+ txnNumber: NumberLong(txnNumber++)
+}));
+
+o.x++;
+o.x++;
+
+assert.eq(1, t.find().count(), "Updates increased number of documents");
+assert.eq(o, t.findOne(), "Document doesn't match expected");
+assert.eq(true, res.results[0], "Bad result value for valid update");
+assert.eq(true, res.results[1], "Bad result value for valid update");
+
+jsTestLog("preCondition doesn't match ns");
+res = assert.commandFailed(db.runCommand({
+ doTxn: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 23}}}
+ ],
+ preCondition: [{ns: "foo.otherName", q: {_id: 5}, res: {x: 21}}],
+ txnNumber: NumberLong(txnNumber++)
+}));
+
+assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied");
+
+jsTestLog("preCondition doesn't match query");
+res = assert.commandFailed(db.runCommand({
+ doTxn: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 23}}}
+ ],
+ preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}],
+ txnNumber: NumberLong(txnNumber++)
+}));
+
+assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied");
+
+jsTestLog("upsert disallowed");
+res = assert.commandFailed(db.runCommand({
+ doTxn: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 6}, o: {$set: {x: 23}}}
+ ],
+ txnNumber: NumberLong(txnNumber++)
+}));
+
+assert.eq(false, res.results[0], "Op required upsert, which should be disallowed.");
+assert.eq(false, res.results[1], "Op required upsert, which should be disallowed.");
+
+// When applying a "u" (update) op, we default to 'UpdateNode' update semantics, and $set
+// operations add new fields in lexicographic order.
+jsTestLog("$set field addition order");
+res = assert.commandWorked(db.adminCommand({
+ doTxn: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 6}},
+ {"op": "u", "ns": t.getFullName(), "o2": {_id: 6}, "o": {$set: {z: 1, a: 2}}}
+ ],
+ txnNumber: NumberLong(txnNumber++)
+}));
+assert.eq(t.findOne({_id: 6}), {_id: 6, a: 2, z: 1}); // Note: 'a' and 'z' have been sorted.
+
+// 'ModifierInterface' semantics are not supported, so an update with {$v: 0} should fail.
+jsTestLog("Fail update with {$v:0}");
+res = assert.commandFailed(db.adminCommand({
+ doTxn: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 7}},
+ {
+ "op": "u",
+ "ns": t.getFullName(),
+ "o2": {_id: 7},
+ "o": {$v: NumberLong(0), $set: {z: 1, a: 2}}
+ }
+ ],
+ txnNumber: NumberLong(txnNumber++),
+}));
+assert.eq(res.code, 40682);
+
+// When we explicitly specify {$v: 1}, we should get 'UpdateNode' update semantics, and $set
+// operations get performed in lexicographic order.
+jsTestLog("update with {$v:1}");
+res = assert.commandWorked(db.adminCommand({
+ doTxn: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 8}},
+ {
+ "op": "u",
+ "ns": t.getFullName(),
+ "o2": {_id: 8},
+ "o": {$v: NumberLong(1), $set: {z: 1, a: 2}}
+ }
+ ],
+ txnNumber: NumberLong(txnNumber++),
+}));
+assert.eq(t.findOne({_id: 8}), {_id: 8, a: 2, z: 1}); // Note: 'a' and 'z' have been sorted.
})();
diff --git a/jstests/core/txns/downgrade_fcv_while_large_partial_txn_in_progress.js b/jstests/core/txns/downgrade_fcv_while_large_partial_txn_in_progress.js
index a53457fc6e5..3bbd380770e 100644
--- a/jstests/core/txns/downgrade_fcv_while_large_partial_txn_in_progress.js
+++ b/jstests/core/txns/downgrade_fcv_while_large_partial_txn_in_progress.js
@@ -7,69 +7,66 @@
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "downgrade_fcv_while_large_partial_txn_in_progress";
- const testDB = db.getSiblingDB(dbName);
+const dbName = "test";
+const collName = "downgrade_fcv_while_large_partial_txn_in_progress";
+const testDB = db.getSiblingDB(dbName);
- assert.commandWorked(db.adminCommand({
- configureFailPoint: "hangBeforeAbortingRunningTransactionsOnFCVDowngrade",
- mode: "alwaysOn"
- }));
+assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "hangBeforeAbortingRunningTransactionsOnFCVDowngrade", mode: "alwaysOn"}));
- // As we are not able to send a single request larger than 16MB, we insert two documents
- // of 10MB each to create a "large" transaction.
- const kSize10MB = 10 * 1024 * 1024;
- function createLargeDocument(id) {
- return {_id: id, longString: new Array(kSize10MB).join("a")};
- }
+// As we are not able to send a single request larger than 16MB, we insert two documents
+// of 10MB each to create a "large" transaction.
+const kSize10MB = 10 * 1024 * 1024;
+function createLargeDocument(id) {
+ return {_id: id, longString: new Array(kSize10MB).join("a")};
+}
- testDB[collName].drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+testDB[collName].drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- let doc1 = createLargeDocument(1);
- let doc2 = createLargeDocument(2);
+let doc1 = createLargeDocument(1);
+let doc2 = createLargeDocument(2);
- jsTestLog("Start a transaction and insert documents with sizes that add up to more than 16MB.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc1));
- assert.commandWorked(sessionColl.insert(doc2));
+jsTestLog("Start a transaction and insert documents with sizes that add up to more than 16MB.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc1));
+assert.commandWorked(sessionColl.insert(doc2));
- let downgradeFCV = startParallelShell(function() {
- load("jstests/libs/feature_compatibility_version.js");
+let downgradeFCV = startParallelShell(function() {
+ load("jstests/libs/feature_compatibility_version.js");
- const testDB = db.getSiblingDB("test");
- const adminDB = db.getSiblingDB("admin");
- try {
- jsTestLog("Downgrade to FCV4.0.");
- assert.commandWorked(
- testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(adminDB, lastStableFCV);
- } finally {
- jsTestLog("Restore back to FCV4.2.");
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(adminDB, latestFCV);
- }
- });
+ const testDB = db.getSiblingDB("test");
+ const adminDB = db.getSiblingDB("admin");
+ try {
+ jsTestLog("Downgrade to FCV4.0.");
+ assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+ checkFCV(adminDB, lastStableFCV);
+ } finally {
+ jsTestLog("Restore back to FCV4.2.");
+ assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+ checkFCV(adminDB, latestFCV);
+ }
+});
- // Wait until the in-memory FCV state has been changed to 4.0.
- assert.soon(function() {
- const adminDB = db.getSiblingDB("admin");
- let res = adminDB.runCommand({getParameter: 1, featureCompatibilityVersion: 1});
- assert.commandWorked(res);
- return "4.0" === res.featureCompatibilityVersion.version;
- }, "Failed to detect the FCV change to 4.0 from server status.");
+// Wait until the in-memory FCV state has been changed to 4.0.
+assert.soon(function() {
+ const adminDB = db.getSiblingDB("admin");
+ let res = adminDB.runCommand({getParameter: 1, featureCompatibilityVersion: 1});
+ assert.commandWorked(res);
+ return "4.0" === res.featureCompatibilityVersion.version;
+}, "Failed to detect the FCV change to 4.0 from server status.");
- jsTestLog("Attempt to commit the large transaction using the FCV4.0 oplog format.");
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.TransactionTooLarge);
+jsTestLog("Attempt to commit the large transaction using the FCV4.0 oplog format.");
+assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.TransactionTooLarge);
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "hangBeforeAbortingRunningTransactionsOnFCVDowngrade", mode: "off"}));
- downgradeFCV();
+assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "hangBeforeAbortingRunningTransactionsOnFCVDowngrade", mode: "off"}));
+downgradeFCV();
}());
diff --git a/jstests/core/txns/drop_collection_not_blocked_by_txn.js b/jstests/core/txns/drop_collection_not_blocked_by_txn.js
index c32f7372506..85dcda1b8e1 100644
--- a/jstests/core/txns/drop_collection_not_blocked_by_txn.js
+++ b/jstests/core/txns/drop_collection_not_blocked_by_txn.js
@@ -5,28 +5,28 @@
*/
(function() {
- "use strict";
+"use strict";
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- let db = rst.getPrimary().getDB("test");
+let db = rst.getPrimary().getDB("test");
- assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
- assert.commandWorked(db.runCommand({insert: "b", documents: [{x: 1}]}));
+assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
+assert.commandWorked(db.runCommand({insert: "b", documents: [{x: 1}]}));
- const session = db.getMongo().startSession();
- const sessionDb = session.getDatabase("test");
+const session = db.getMongo().startSession();
+const sessionDb = session.getDatabase("test");
- session.startTransaction();
- // This holds a database IX lock and a collection IX lock on "a".
- sessionDb.a.insert({y: 1});
+session.startTransaction();
+// This holds a database IX lock and a collection IX lock on "a".
+sessionDb.a.insert({y: 1});
- // This only requires database IX lock.
- assert.commandWorked(db.runCommand({drop: "b"}));
+// This only requires database IX lock.
+assert.commandWorked(db.runCommand({drop: "b"}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/core/txns/empty_commit_abort.js b/jstests/core/txns/empty_commit_abort.js
index d496cf41623..4882b477df2 100644
--- a/jstests/core/txns/empty_commit_abort.js
+++ b/jstests/core/txns/empty_commit_abort.js
@@ -4,61 +4,62 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "empty_commit_abort";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "empty_commit_abort";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
- testColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+testColl.drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const doc = {_id: 1, a: 1, b: 1};
- assert.commandWorked(testColl.insert(doc));
+const doc = {
+ _id: 1,
+ a: 1,
+ b: 1
+};
+assert.commandWorked(testColl.insert(doc));
- const session = db.getMongo().startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = db.getMongo().startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- // ---- Test 1. No operations before commit ----
- session.startTransaction();
- assert.commandFailedWithCode(sessionDB.adminCommand({commitTransaction: 1}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// ---- Test 1. No operations before commit ----
+session.startTransaction();
+assert.commandFailedWithCode(sessionDB.adminCommand({commitTransaction: 1}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- // ---- Test 2. No operations before abort ----
- session.startTransaction();
- assert.commandFailedWithCode(sessionDB.adminCommand({abortTransaction: 1}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// ---- Test 2. No operations before abort ----
+session.startTransaction();
+assert.commandFailedWithCode(sessionDB.adminCommand({abortTransaction: 1}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- // ---- Test 3. Only reads before commit ----
- session.startTransaction();
- assert.eq(doc, sessionColl.findOne({a: 1}));
- assert.commandWorked(session.commitTransaction_forTesting());
+// ---- Test 3. Only reads before commit ----
+session.startTransaction();
+assert.eq(doc, sessionColl.findOne({a: 1}));
+assert.commandWorked(session.commitTransaction_forTesting());
- // ---- Test 4. Only reads before abort ----
- session.startTransaction();
- assert.eq(doc, sessionColl.findOne({a: 1}));
- assert.commandWorked(session.abortTransaction_forTesting());
+// ---- Test 4. Only reads before abort ----
+session.startTransaction();
+assert.eq(doc, sessionColl.findOne({a: 1}));
+assert.commandWorked(session.abortTransaction_forTesting());
- // ---- Test 5. Noop writes before commit ----
- session.startTransaction();
- let res = assert.commandWorked(sessionColl.update({_id: 1}, {$set: {b: 1}}));
- assert.eq(res.nMatched, 1, tojson(res));
- assert.eq(res.nModified, 0, tojson(res));
- assert.eq(res.nUpserted, 0, tojson(res));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // ---- Test 6. Noop writes before abort ----
- session.startTransaction();
- res = assert.commandWorked(sessionColl.update({_id: 1}, {$set: {b: 1}}));
- assert.eq(res.nMatched, 1, tojson(res));
- assert.eq(res.nModified, 0, tojson(res));
- assert.eq(res.nUpserted, 0, tojson(res));
- assert.commandWorked(session.abortTransaction_forTesting());
+// ---- Test 5. Noop writes before commit ----
+session.startTransaction();
+let res = assert.commandWorked(sessionColl.update({_id: 1}, {$set: {b: 1}}));
+assert.eq(res.nMatched, 1, tojson(res));
+assert.eq(res.nModified, 0, tojson(res));
+assert.eq(res.nUpserted, 0, tojson(res));
+assert.commandWorked(session.commitTransaction_forTesting());
+// ---- Test 6. Noop writes before abort ----
+session.startTransaction();
+res = assert.commandWorked(sessionColl.update({_id: 1}, {$set: {b: 1}}));
+assert.eq(res.nMatched, 1, tojson(res));
+assert.eq(res.nModified, 0, tojson(res));
+assert.eq(res.nUpserted, 0, tojson(res));
+assert.commandWorked(session.abortTransaction_forTesting());
}());
diff --git a/jstests/core/txns/empty_prepare.js b/jstests/core/txns/empty_prepare.js
index cb1b616c9fc..59c0bce1f54 100644
--- a/jstests/core/txns/empty_prepare.js
+++ b/jstests/core/txns/empty_prepare.js
@@ -4,50 +4,52 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "empty_prepare";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- testColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- const doc = {_id: 1, a: 1, b: 1};
- assert.commandWorked(testColl.insert(doc));
-
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- // ---- Test 1. No operations before prepare ----
-
- session.startTransaction();
- assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // ---- Test 2. Only reads before prepare ----
-
- session.startTransaction();
- assert.eq(doc, sessionColl.findOne({a: 1}));
- let res = assert.commandWorked(sessionDB.adminCommand({prepareTransaction: 1}));
- // Makes sure prepareTransaction returns prepareTimestamp in its response.
- assert(res.hasOwnProperty("prepareTimestamp"), tojson(res));
- assert.commandWorked(session.abortTransaction_forTesting());
-
- // ---- Test 3. Noop writes before prepare ----
-
- session.startTransaction();
- res = assert.commandWorked(sessionColl.update({a: 1}, {$set: {b: 1}}));
- assert.eq(res.nMatched, 1, tojson(res));
- assert.eq(res.nModified, 0, tojson(res));
- assert.eq(res.nUpserted, 0, tojson(res));
- res = assert.commandWorked(sessionDB.adminCommand({prepareTransaction: 1}));
- // Makes sure prepareTransaction returns prepareTimestamp in its response.
- assert(res.hasOwnProperty("prepareTimestamp"), tojson(res));
- assert.commandWorked(session.abortTransaction_forTesting());
-
+"use strict";
+
+const dbName = "test";
+const collName = "empty_prepare";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+testColl.drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+const doc = {
+ _id: 1,
+ a: 1,
+ b: 1
+};
+assert.commandWorked(testColl.insert(doc));
+
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+// ---- Test 1. No operations before prepare ----
+
+session.startTransaction();
+assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// ---- Test 2. Only reads before prepare ----
+
+session.startTransaction();
+assert.eq(doc, sessionColl.findOne({a: 1}));
+let res = assert.commandWorked(sessionDB.adminCommand({prepareTransaction: 1}));
+// Makes sure prepareTransaction returns prepareTimestamp in its response.
+assert(res.hasOwnProperty("prepareTimestamp"), tojson(res));
+assert.commandWorked(session.abortTransaction_forTesting());
+
+// ---- Test 3. Noop writes before prepare ----
+
+session.startTransaction();
+res = assert.commandWorked(sessionColl.update({a: 1}, {$set: {b: 1}}));
+assert.eq(res.nMatched, 1, tojson(res));
+assert.eq(res.nModified, 0, tojson(res));
+assert.eq(res.nUpserted, 0, tojson(res));
+res = assert.commandWorked(sessionDB.adminCommand({prepareTransaction: 1}));
+// Makes sure prepareTransaction returns prepareTimestamp in its response.
+assert(res.hasOwnProperty("prepareTimestamp"), tojson(res));
+assert.commandWorked(session.abortTransaction_forTesting());
}());
diff --git a/jstests/core/txns/ensure_active_txn_for_prepare_transaction.js b/jstests/core/txns/ensure_active_txn_for_prepare_transaction.js
index a6ef6ab7c77..2847d139c89 100644
--- a/jstests/core/txns/ensure_active_txn_for_prepare_transaction.js
+++ b/jstests/core/txns/ensure_active_txn_for_prepare_transaction.js
@@ -5,58 +5,48 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const dbName = "test";
- const collName = "ensure_active_txn_for_prepare_transaction";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- jsTestLog("Test that we can't call prepareTransaction if there was never a transaction on " +
- "the session");
- assert.commandFailedWithCode(sessionDB.adminCommand({
- prepareTransaction: 1,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(1),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
-
- jsTestLog(
- "Test that we can't call prepareTransaction if the most recent transaction was aborted");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 1}));
- assert.commandWorked(session.abortTransaction_forTesting());
-
- assert.commandFailedWithCode(sessionDB.adminCommand({
- prepareTransaction: 1,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(1),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
-
- jsTestLog(
- "Test that we can't call prepareTransaction if the most recent transaction was committed");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 1}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.commandFailedWithCode(sessionDB.adminCommand({
- prepareTransaction: 1,
- txnNumber: NumberLong(1),
- stmtId: NumberInt(1),
- autocommit: false
- }),
- ErrorCodes.TransactionCommitted);
-
- session.endSession();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const dbName = "test";
+const collName = "ensure_active_txn_for_prepare_transaction";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+jsTestLog("Test that we can't call prepareTransaction if there was never a transaction on " +
+ "the session");
+assert.commandFailedWithCode(
+ sessionDB.adminCommand(
+ {prepareTransaction: 1, txnNumber: NumberLong(0), stmtId: NumberInt(1), autocommit: false}),
+ ErrorCodes.NoSuchTransaction);
+
+jsTestLog("Test that we can't call prepareTransaction if the most recent transaction was aborted");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 1}));
+assert.commandWorked(session.abortTransaction_forTesting());
+
+assert.commandFailedWithCode(
+ sessionDB.adminCommand(
+ {prepareTransaction: 1, txnNumber: NumberLong(0), stmtId: NumberInt(1), autocommit: false}),
+ ErrorCodes.NoSuchTransaction);
+
+jsTestLog(
+ "Test that we can't call prepareTransaction if the most recent transaction was committed");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 1}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.commandFailedWithCode(
+ sessionDB.adminCommand(
+ {prepareTransaction: 1, txnNumber: NumberLong(1), stmtId: NumberInt(1), autocommit: false}),
+ ErrorCodes.TransactionCommitted);
+
+session.endSession();
}());
diff --git a/jstests/core/txns/errors_on_committed_transaction.js b/jstests/core/txns/errors_on_committed_transaction.js
index 6425fd1239e..2734f7fa11a 100644
--- a/jstests/core/txns/errors_on_committed_transaction.js
+++ b/jstests/core/txns/errors_on_committed_transaction.js
@@ -4,72 +4,71 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "prepare_committed_transaction";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "prepare_committed_transaction";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
- testColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+testColl.drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- const doc = {x: 1};
+const doc = {
+ x: 1
+};
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc));
- assert.commandWorked(session.commitTransaction_forTesting());
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc));
+assert.commandWorked(session.commitTransaction_forTesting());
- const txnNumber = NumberLong(session.getTxnNumber_forTesting());
+const txnNumber = NumberLong(session.getTxnNumber_forTesting());
- // Call prepare on committed transaction.
- jsTestLog("Test that calling prepare on a committed transaction fails.");
- assert.commandFailedWithCode(
- sessionDB.adminCommand({prepareTransaction: 1, txnNumber: txnNumber, autocommit: false}),
- ErrorCodes.TransactionCommitted);
+// Call prepare on committed transaction.
+jsTestLog("Test that calling prepare on a committed transaction fails.");
+assert.commandFailedWithCode(
+ sessionDB.adminCommand({prepareTransaction: 1, txnNumber: txnNumber, autocommit: false}),
+ ErrorCodes.TransactionCommitted);
- jsTestLog("Test the error precedence when calling prepare on a committed transaction but not " +
- "providing txnNumber to prepareTransaction.");
- assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1, autocommit: false}),
- ErrorCodes.InvalidOptions);
+jsTestLog("Test the error precedence when calling prepare on a committed transaction but not " +
+ "providing txnNumber to prepareTransaction.");
+assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1, autocommit: false}),
+ ErrorCodes.InvalidOptions);
- jsTestLog("Test the error precedence when calling prepare on a committed transaction but not " +
- "providing autocommit to prepareTransaction.");
- assert.commandFailedWithCode(
- sessionDB.adminCommand({prepareTransaction: 1, txnNumber: txnNumber}), 50768);
+jsTestLog("Test the error precedence when calling prepare on a committed transaction but not " +
+ "providing autocommit to prepareTransaction.");
+assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1, txnNumber: txnNumber}),
+ 50768);
- jsTestLog("Test the error precedence when calling prepare on a committed transaction and " +
- "providing startTransaction to prepareTransaction.");
- assert.commandFailedWithCode(sessionDB.adminCommand({
- prepareTransaction: 1,
- txnNumber: txnNumber,
- autocommit: false,
- startTransaction: true
- }),
- ErrorCodes.OperationNotSupportedInTransaction);
+jsTestLog("Test the error precedence when calling prepare on a committed transaction and " +
+ "providing startTransaction to prepareTransaction.");
+assert.commandFailedWithCode(
+ sessionDB.adminCommand(
+ {prepareTransaction: 1, txnNumber: txnNumber, autocommit: false, startTransaction: true}),
+ ErrorCodes.OperationNotSupportedInTransaction);
- // Call commit on committed transaction without shell helper.
- jsTestLog("Test that calling commit with invalid fields on a committed transaction fails.");
- assert.commandFailedWithCode(
- sessionDB.adminCommand(
- {commitTransaction: 1, invalidField: 1, txnNumber: txnNumber, autocommit: false}),
- 40415 /* IDL unknown field error */);
+// Call commit on committed transaction without shell helper.
+jsTestLog("Test that calling commit with invalid fields on a committed transaction fails.");
+assert.commandFailedWithCode(
+ sessionDB.adminCommand(
+ {commitTransaction: 1, invalidField: 1, txnNumber: txnNumber, autocommit: false}),
+ 40415 /* IDL unknown field error */);
- // Call abort on committed transaction without shell helper.
- jsTestLog("Test that calling abort on a committed transaction fails.");
- assert.commandFailedWithCode(
- sessionDB.adminCommand({abortTransaction: 1, txnNumber: txnNumber, autocommit: false}),
- ErrorCodes.TransactionCommitted);
+// Call abort on committed transaction without shell helper.
+jsTestLog("Test that calling abort on a committed transaction fails.");
+assert.commandFailedWithCode(
+ sessionDB.adminCommand({abortTransaction: 1, txnNumber: txnNumber, autocommit: false}),
+ ErrorCodes.TransactionCommitted);
- jsTestLog("Test that calling abort with invalid fields on a committed transaction fails.");
- assert.commandFailedWithCode(
- sessionDB.adminCommand(
- {abortTransaction: 1, invalidField: 1, txnNumber: txnNumber, autocommit: false}),
- ErrorCodes.TransactionCommitted);
+jsTestLog("Test that calling abort with invalid fields on a committed transaction fails.");
+assert.commandFailedWithCode(
+ sessionDB.adminCommand(
+ {abortTransaction: 1, invalidField: 1, txnNumber: txnNumber, autocommit: false}),
+ ErrorCodes.TransactionCommitted);
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/find_and_modify_in_transaction.js b/jstests/core/txns/find_and_modify_in_transaction.js
index 8724b13c85d..02c5a1639e1 100644
--- a/jstests/core/txns/find_and_modify_in_transaction.js
+++ b/jstests/core/txns/find_and_modify_in_transaction.js
@@ -1,151 +1,151 @@
// Test transactions including find-and-modify
// @tags: [assumes_unsharded_collection, uses_transactions]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "find_and_modify_in_transaction";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
+const dbName = "test";
+const collName = "find_and_modify_in_transaction";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
- jsTest.log("Prepopulate the collection.");
- assert.writeOK(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 1}, {_id: 2, a: 2}],
- {writeConcern: {w: "majority"}}));
+jsTest.log("Prepopulate the collection.");
+assert.writeOK(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 1}, {_id: 2, a: 2}],
+ {writeConcern: {w: "majority"}}));
- /***********************************************************************************************
- * Do a non-matching find-and-modify with remove.
- **********************************************************************************************/
+/***********************************************************************************************
+ * Do a non-matching find-and-modify with remove.
+ **********************************************************************************************/
- jsTest.log("Do a non-matching find-and-modify with remove.");
- session.startTransaction({writeConcern: {w: "majority"}});
+jsTest.log("Do a non-matching find-and-modify with remove.");
+session.startTransaction({writeConcern: {w: "majority"}});
- // Do a findAndModify that affects no documents.
- let res = sessionColl.findAndModify({query: {a: 99}, remove: true});
- assert.eq(null, res);
- let docs = sessionColl.find({}).toArray();
- assert.sameMembers(docs, [{_id: 0, a: 0}, {_id: 1, a: 1}, {_id: 2, a: 2}]);
+// Do a findAndModify that affects no documents.
+let res = sessionColl.findAndModify({query: {a: 99}, remove: true});
+assert.eq(null, res);
+let docs = sessionColl.find({}).toArray();
+assert.sameMembers(docs, [{_id: 0, a: 0}, {_id: 1, a: 1}, {_id: 2, a: 2}]);
- // Commit the transaction.
- assert.commandWorked(session.commitTransaction_forTesting());
+// Commit the transaction.
+assert.commandWorked(session.commitTransaction_forTesting());
- /***********************************************************************************************
- * Do a non-matching find-and-modify with update.
- **********************************************************************************************/
+/***********************************************************************************************
+ * Do a non-matching find-and-modify with update.
+ **********************************************************************************************/
- jsTest.log("Do a non-matching find-and-modify with update.");
+jsTest.log("Do a non-matching find-and-modify with update.");
- session.startTransaction({writeConcern: {w: "majority"}});
+session.startTransaction({writeConcern: {w: "majority"}});
- res = sessionColl.findAndModify({query: {a: 99}, update: {$inc: {a: 100}}});
- assert.eq(null, res);
- docs = sessionColl.find({}).toArray();
- assert.sameMembers(docs, [{_id: 0, a: 0}, {_id: 1, a: 1}, {_id: 2, a: 2}]);
+res = sessionColl.findAndModify({query: {a: 99}, update: {$inc: {a: 100}}});
+assert.eq(null, res);
+docs = sessionColl.find({}).toArray();
+assert.sameMembers(docs, [{_id: 0, a: 0}, {_id: 1, a: 1}, {_id: 2, a: 2}]);
- // Commit the transaction.
- assert.commandWorked(session.commitTransaction_forTesting());
+// Commit the transaction.
+assert.commandWorked(session.commitTransaction_forTesting());
- /***********************************************************************************************
- * Do a matching find-and-modify with remove.
- **********************************************************************************************/
+/***********************************************************************************************
+ * Do a matching find-and-modify with remove.
+ **********************************************************************************************/
- jsTest.log("Do a matching find-and-modify with remove.");
+jsTest.log("Do a matching find-and-modify with remove.");
- session.startTransaction({writeConcern: {w: "majority"}});
+session.startTransaction({writeConcern: {w: "majority"}});
- res = sessionColl.findAndModify({query: {a: 0}, remove: true});
- assert.eq({_id: 0, a: 0}, res);
- docs = sessionColl.find({}).toArray();
- assert.sameMembers(docs, [{_id: 1, a: 1}, {_id: 2, a: 2}]);
+res = sessionColl.findAndModify({query: {a: 0}, remove: true});
+assert.eq({_id: 0, a: 0}, res);
+docs = sessionColl.find({}).toArray();
+assert.sameMembers(docs, [{_id: 1, a: 1}, {_id: 2, a: 2}]);
- // Commit the transaction.
- assert.commandWorked(session.commitTransaction_forTesting());
+// Commit the transaction.
+assert.commandWorked(session.commitTransaction_forTesting());
- /***********************************************************************************************
- * Do a matching find-and-modify with update, requesting the old doc.
- **********************************************************************************************/
+/***********************************************************************************************
+ * Do a matching find-and-modify with update, requesting the old doc.
+ **********************************************************************************************/
- jsTest.log("Do a matching find-and-modify with update, requesting the old doc.");
- session.startTransaction({writeConcern: {w: "majority"}});
+jsTest.log("Do a matching find-and-modify with update, requesting the old doc.");
+session.startTransaction({writeConcern: {w: "majority"}});
- res = sessionColl.findAndModify({query: {a: 1}, update: {$inc: {a: 100}}});
- assert.eq({_id: 1, a: 1}, res);
- docs = sessionColl.find({}).toArray();
- assert.sameMembers(docs, [{_id: 1, a: 101}, {_id: 2, a: 2}]);
+res = sessionColl.findAndModify({query: {a: 1}, update: {$inc: {a: 100}}});
+assert.eq({_id: 1, a: 1}, res);
+docs = sessionColl.find({}).toArray();
+assert.sameMembers(docs, [{_id: 1, a: 101}, {_id: 2, a: 2}]);
- // Commit the transaction.
- assert.commandWorked(session.commitTransaction_forTesting());
+// Commit the transaction.
+assert.commandWorked(session.commitTransaction_forTesting());
- /***********************************************************************************************
- * Do a matching find-and-modify with update, requesting the new doc.
- **********************************************************************************************/
+/***********************************************************************************************
+ * Do a matching find-and-modify with update, requesting the new doc.
+ **********************************************************************************************/
- jsTest.log("Do a matching find-and-modify with update, requesting the new doc.");
- session.startTransaction({writeConcern: {w: "majority"}});
+jsTest.log("Do a matching find-and-modify with update, requesting the new doc.");
+session.startTransaction({writeConcern: {w: "majority"}});
- res = sessionColl.findAndModify({query: {a: 2}, update: {$inc: {a: 100}}, new: true});
- assert.eq({_id: 2, a: 102}, res);
- docs = sessionColl.find({}).toArray();
- assert.sameMembers(docs, [{_id: 1, a: 101}, {_id: 2, a: 102}]);
+res = sessionColl.findAndModify({query: {a: 2}, update: {$inc: {a: 100}}, new: true});
+assert.eq({_id: 2, a: 102}, res);
+docs = sessionColl.find({}).toArray();
+assert.sameMembers(docs, [{_id: 1, a: 101}, {_id: 2, a: 102}]);
- // Commit the transaction.
- assert.commandWorked(session.commitTransaction_forTesting());
+// Commit the transaction.
+assert.commandWorked(session.commitTransaction_forTesting());
- /***********************************************************************************************
- * Do a matching find-and-modify with upsert, requesting the new doc.
- **********************************************************************************************/
+/***********************************************************************************************
+ * Do a matching find-and-modify with upsert, requesting the new doc.
+ **********************************************************************************************/
- jsTest.log("Do a matching find-and-modify with upsert, requesting the new doc.");
- session.startTransaction({writeConcern: {w: "majority"}});
+jsTest.log("Do a matching find-and-modify with upsert, requesting the new doc.");
+session.startTransaction({writeConcern: {w: "majority"}});
- res = sessionColl.findAndModify(
- {query: {_id: 2}, update: {$inc: {a: 100}}, upsert: true, new: true});
- assert.eq({_id: 2, a: 202}, res);
- docs = sessionColl.find({}).toArray();
- assert.sameMembers(docs, [{_id: 1, a: 101}, {_id: 2, a: 202}]);
+res =
+ sessionColl.findAndModify({query: {_id: 2}, update: {$inc: {a: 100}}, upsert: true, new: true});
+assert.eq({_id: 2, a: 202}, res);
+docs = sessionColl.find({}).toArray();
+assert.sameMembers(docs, [{_id: 1, a: 101}, {_id: 2, a: 202}]);
- // Commit the transaction.
- assert.commandWorked(session.commitTransaction_forTesting());
+// Commit the transaction.
+assert.commandWorked(session.commitTransaction_forTesting());
- /***********************************************************************************************
- * Do a non-matching find-and-modify with upsert, requesting the old doc.
- **********************************************************************************************/
+/***********************************************************************************************
+ * Do a non-matching find-and-modify with upsert, requesting the old doc.
+ **********************************************************************************************/
- jsTest.log("Do a non-matching find-and-modify with upsert, requesting the old doc.");
- session.startTransaction({writeConcern: {w: "majority"}});
+jsTest.log("Do a non-matching find-and-modify with upsert, requesting the old doc.");
+session.startTransaction({writeConcern: {w: "majority"}});
- res = sessionColl.findAndModify({query: {a: 3}, update: {$inc: {a: 100}}, upsert: true});
- assert.eq(null, res);
- docs = sessionColl.find({a: 103}, {_id: 0}).toArray();
- assert.sameMembers(docs, [{a: 103}]);
+res = sessionColl.findAndModify({query: {a: 3}, update: {$inc: {a: 100}}, upsert: true});
+assert.eq(null, res);
+docs = sessionColl.find({a: 103}, {_id: 0}).toArray();
+assert.sameMembers(docs, [{a: 103}]);
- // Commit the transaction.
- assert.commandWorked(session.commitTransaction_forTesting());
+// Commit the transaction.
+assert.commandWorked(session.commitTransaction_forTesting());
- /***********************************************************************************************
- * Do a non-matching find-and-modify with upsert, requesting the new doc.
- **********************************************************************************************/
+/***********************************************************************************************
+ * Do a non-matching find-and-modify with upsert, requesting the new doc.
+ **********************************************************************************************/
- jsTest.log("Do a non-matching find-and-modify with upsert, requesting the new doc.");
- session.startTransaction({writeConcern: {w: "majority"}});
- res = sessionColl.findAndModify(
- {query: {a: 4}, update: {$inc: {a: 200}}, upsert: true, new: true});
+jsTest.log("Do a non-matching find-and-modify with upsert, requesting the new doc.");
+session.startTransaction({writeConcern: {w: "majority"}});
+res = sessionColl.findAndModify({query: {a: 4}, update: {$inc: {a: 200}}, upsert: true, new: true});
- const newdoc = res;
- assert.eq(204, newdoc.a);
- docs = sessionColl.find({a: 204}).toArray();
- assert.sameMembers(docs, [newdoc]);
+const newdoc = res;
+assert.eq(204, newdoc.a);
+docs = sessionColl.find({a: 204}).toArray();
+assert.sameMembers(docs, [newdoc]);
- // Commit the transaction.
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
+// Commit the transaction.
+assert.commandWorked(session.commitTransaction_forTesting());
+session.endSession();
}());
diff --git a/jstests/core/txns/finished_transaction_error_handling.js b/jstests/core/txns/finished_transaction_error_handling.js
index f0907998578..7cabb693fe5 100644
--- a/jstests/core/txns/finished_transaction_error_handling.js
+++ b/jstests/core/txns/finished_transaction_error_handling.js
@@ -1,140 +1,145 @@
// Test committed and aborted transactions cannot be changed but commitTransaction is retryable.
// @tags: [uses_transactions, uses_snapshot_read_concern]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "finished_transaction_error_handling";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
+const dbName = "test";
+const collName = "finished_transaction_error_handling";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
- const writeConcern = {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS};
- testDB.runCommand({drop: collName, writeConcern: writeConcern});
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: writeConcern}));
+const writeConcern = {
+ w: "majority",
+ wtimeout: ReplSetTest.kDefaultTimeoutMS
+};
+testDB.runCommand({drop: collName, writeConcern: writeConcern});
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: writeConcern}));
- let txnNumber = 0;
- let stmtId = 0;
+let txnNumber = 0;
+let stmtId = 0;
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
- jsTestLog("Test aborted transaction number cannot be reused.");
- txnNumber++;
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "abort-txn-1"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- stmtId: NumberInt(stmtId++),
- autocommit: false
- }));
- assert.commandWorked(sessionDb.adminCommand({
- abortTransaction: 1,
- writeConcern: {w: "majority"},
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false
- }));
+jsTestLog("Test aborted transaction number cannot be reused.");
+txnNumber++;
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "abort-txn-1"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ stmtId: NumberInt(stmtId++),
+ autocommit: false
+}));
+assert.commandWorked(sessionDb.adminCommand({
+ abortTransaction: 1,
+ writeConcern: {w: "majority"},
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false
+}));
- jsTestLog("Attempt to commit an aborted transaction");
- assert.commandFailedWithCode(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
+jsTestLog("Attempt to commit an aborted transaction");
+assert.commandFailedWithCode(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false
+}),
+ ErrorCodes.NoSuchTransaction);
- jsTestLog("Attempt to abort an aborted transaction");
- assert.commandFailedWithCode(sessionDb.adminCommand({
- abortTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
+jsTestLog("Attempt to abort an aborted transaction");
+assert.commandFailedWithCode(sessionDb.adminCommand({
+ abortTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false
+}),
+ ErrorCodes.NoSuchTransaction);
- jsTestLog("Attempt to continue an aborted transaction");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "abort-txn-2"}],
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
+jsTestLog("Attempt to continue an aborted transaction");
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "abort-txn-2"}],
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false
+}),
+ ErrorCodes.NoSuchTransaction);
- jsTestLog("Attempt to restart an aborted transaction");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "abort-txn-2"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- stmtId: NumberInt(stmtId++),
- autocommit: false
- }),
- ErrorCodes.ConflictingOperationInProgress);
+jsTestLog("Attempt to restart an aborted transaction");
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "abort-txn-2"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ stmtId: NumberInt(stmtId++),
+ autocommit: false
+}),
+ ErrorCodes.ConflictingOperationInProgress);
- jsTest.log("Test commitTransaction command is retryable");
- txnNumber++;
- stmtId = 0;
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "commit-txn-1"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- stmtId: NumberInt(stmtId++),
- autocommit: false
- }));
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false
- }));
+jsTest.log("Test commitTransaction command is retryable");
+txnNumber++;
+stmtId = 0;
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "commit-txn-1"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ stmtId: NumberInt(stmtId++),
+ autocommit: false
+}));
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false
+}));
- jsTestLog("Retry commitTransaction command on a committed transaction");
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false
- }));
+jsTestLog("Retry commitTransaction command on a committed transaction");
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false
+}));
- jsTestLog("Attempt to abort a committed transaction");
- assert.commandFailedWithCode(sessionDb.adminCommand({
- abortTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false
- }),
- ErrorCodes.TransactionCommitted);
+jsTestLog("Attempt to abort a committed transaction");
+assert.commandFailedWithCode(sessionDb.adminCommand({
+ abortTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false
+}),
+ ErrorCodes.TransactionCommitted);
- jsTestLog("Attempt to continue a committed transaction");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "commit-txn-2"}],
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false
- }),
- ErrorCodes.TransactionCommitted);
+jsTestLog("Attempt to continue a committed transaction");
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "commit-txn-2"}],
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false
+}),
+ ErrorCodes.TransactionCommitted);
- jsTestLog("Attempt to restart a committed transaction");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "commit-txn-2"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- stmtId: NumberInt(stmtId++),
- autocommit: false
- }),
- ErrorCodes.ConflictingOperationInProgress);
+jsTestLog("Attempt to restart a committed transaction");
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "commit-txn-2"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ stmtId: NumberInt(stmtId++),
+ autocommit: false
+}),
+ ErrorCodes.ConflictingOperationInProgress);
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/indexing_not_blocked_by_txn.js b/jstests/core/txns/indexing_not_blocked_by_txn.js
index c08a6a7e495..020c16b28b8 100644
--- a/jstests/core/txns/indexing_not_blocked_by_txn.js
+++ b/jstests/core/txns/indexing_not_blocked_by_txn.js
@@ -7,35 +7,37 @@
* @tags: [uses_transactions, assumes_unsharded_collection]
*/
(function() {
- "use strict";
- var dbName = 'indexing_not_blocked_by_txn';
- var mydb = db.getSiblingDB(dbName);
- const wcMajority = {writeConcern: {w: "majority"}};
+"use strict";
+var dbName = 'indexing_not_blocked_by_txn';
+var mydb = db.getSiblingDB(dbName);
+const wcMajority = {
+ writeConcern: {w: "majority"}
+};
- mydb.foo.drop(wcMajority);
- mydb.bar.drop(wcMajority);
- assert.commandWorked(mydb.createCollection("foo", wcMajority));
- assert.commandWorked(mydb.foo.createIndex({x: 1}));
- assert.commandWorked(mydb.createCollection("bar", wcMajority));
+mydb.foo.drop(wcMajority);
+mydb.bar.drop(wcMajority);
+assert.commandWorked(mydb.createCollection("foo", wcMajority));
+assert.commandWorked(mydb.foo.createIndex({x: 1}));
+assert.commandWorked(mydb.createCollection("bar", wcMajority));
- var session = db.getMongo().startSession();
- var sessionDb = session.getDatabase(dbName);
+var session = db.getMongo().startSession();
+var sessionDb = session.getDatabase(dbName);
- session.startTransaction();
- assert.commandWorked(sessionDb.foo.insert({x: 1}));
+session.startTransaction();
+assert.commandWorked(sessionDb.foo.insert({x: 1}));
- // Creating already existing index is a no-op that shouldn't take strong locks.
- assert.commandWorked(mydb.foo.createIndex({x: 1}));
+// Creating already existing index is a no-op that shouldn't take strong locks.
+assert.commandWorked(mydb.foo.createIndex({x: 1}));
- // Creating an index on a different collection should not conflict.
- assert.commandWorked(mydb.bar.createIndex({x: 1}));
+// Creating an index on a different collection should not conflict.
+assert.commandWorked(mydb.bar.createIndex({x: 1}));
- // Dropping shouldn't either.
- assert.commandWorked(mydb.bar.dropIndex({x: 1}));
+// Dropping shouldn't either.
+assert.commandWorked(mydb.bar.dropIndex({x: 1}));
- // Creating an index on a non-existent collection in an existing database should not conflict.
- assert.commandWorked(mydb.baz.createIndex({x: 1}));
+// Creating an index on a non-existent collection in an existing database should not conflict.
+assert.commandWorked(mydb.baz.createIndex({x: 1}));
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
+assert.commandWorked(session.commitTransaction_forTesting());
+session.endSession();
}());
diff --git a/jstests/core/txns/kill_cursors_in_transaction.js b/jstests/core/txns/kill_cursors_in_transaction.js
index 84a58bfdc33..f0dbe8330f1 100644
--- a/jstests/core/txns/kill_cursors_in_transaction.js
+++ b/jstests/core/txns/kill_cursors_in_transaction.js
@@ -1,78 +1,76 @@
// Tests that the killCursors command is allowed in transactions.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "kill_cursors_in_transaction";
- const testDB = db.getSiblingDB(dbName);
- const adminDB = db.getSiblingDB("admin");
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
+const dbName = "test";
+const collName = "kill_cursors_in_transaction";
+const testDB = db.getSiblingDB(dbName);
+const adminDB = db.getSiblingDB("admin");
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
- sessionColl.drop({writeConcern: {w: "majority"}});
- for (let i = 0; i < 4; ++i) {
- assert.commandWorked(sessionColl.insert({_id: i}));
- }
+sessionColl.drop({writeConcern: {w: "majority"}});
+for (let i = 0; i < 4; ++i) {
+ assert.commandWorked(sessionColl.insert({_id: i}));
+}
- jsTest.log("Test that the killCursors command is allowed in transactions.");
+jsTest.log("Test that the killCursors command is allowed in transactions.");
- session.startTransaction();
- let res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
- assert(res.hasOwnProperty("cursor"), tojson(res));
- assert(res.cursor.hasOwnProperty("id"), tojson(res));
- assert.commandWorked(sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]}));
- assert.commandWorked(session.commitTransaction_forTesting());
+session.startTransaction();
+let res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
+assert(res.hasOwnProperty("cursor"), tojson(res));
+assert(res.cursor.hasOwnProperty("id"), tojson(res));
+assert.commandWorked(sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]}));
+assert.commandWorked(session.commitTransaction_forTesting());
- jsTest.log("Test that the killCursors cannot be the first operation in a transaction.");
- res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
- assert(res.hasOwnProperty("cursor"), tojson(res));
- assert(res.cursor.hasOwnProperty("id"), tojson(res));
- session.startTransaction();
- assert.commandFailedWithCode(
- sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+jsTest.log("Test that the killCursors cannot be the first operation in a transaction.");
+res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
+assert(res.hasOwnProperty("cursor"), tojson(res));
+assert(res.cursor.hasOwnProperty("id"), tojson(res));
+session.startTransaction();
+assert.commandFailedWithCode(
+ sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- jsTest.log("killCursors must not block on locks held by the transaction in which it is run.");
+jsTest.log("killCursors must not block on locks held by the transaction in which it is run.");
- session.startTransaction();
+session.startTransaction();
- // Open a cursor on the collection.
- res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
- assert(res.hasOwnProperty("cursor"), tojson(res));
- assert(res.cursor.hasOwnProperty("id"), tojson(res));
+// Open a cursor on the collection.
+res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
+assert(res.hasOwnProperty("cursor"), tojson(res));
+assert(res.cursor.hasOwnProperty("id"), tojson(res));
- // Start a drop, which will hang.
- let awaitDrop = startParallelShell(function() {
- db.getSiblingDB("test")["kill_cursors_in_transaction"].drop(
- {writeConcern: {w: "majority"}});
- });
+// Start a drop, which will hang.
+let awaitDrop = startParallelShell(function() {
+ db.getSiblingDB("test")["kill_cursors_in_transaction"].drop({writeConcern: {w: "majority"}});
+});
- // Wait for the drop to have a pending MODE_X lock on the database.
- assert.soon(
- function() {
- return adminDB
- .aggregate([
- {$currentOp: {}},
- {$match: {"command.drop": collName, waitingForLock: true}}
- ])
- .itcount() === 1;
- },
- function() {
- return "Failed to find drop in currentOp output: " +
- tojson(adminDB.aggregate([{$currentOp: {}}]).toArray());
- });
+// Wait for the drop to have a pending MODE_X lock on the database.
+assert.soon(
+ function() {
+ return adminDB
+ .aggregate([
+ {$currentOp: {}},
+ {$match: {"command.drop": collName, waitingForLock: true}}
+ ])
+ .itcount() === 1;
+ },
+ function() {
+ return "Failed to find drop in currentOp output: " +
+ tojson(adminDB.aggregate([{$currentOp: {}}]).toArray());
+ });
- // killCursors does not block behind the pending MODE_X lock.
- assert.commandWorked(sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]}));
+// killCursors does not block behind the pending MODE_X lock.
+assert.commandWorked(sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- // Once the transaction has committed, the drop can proceed.
- awaitDrop();
+// Once the transaction has committed, the drop can proceed.
+awaitDrop();
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/kill_op_on_txn_expiry.js b/jstests/core/txns/kill_op_on_txn_expiry.js
index 298b5d0926e..dde4930bfae 100644
--- a/jstests/core/txns/kill_op_on_txn_expiry.js
+++ b/jstests/core/txns/kill_op_on_txn_expiry.js
@@ -1,95 +1,95 @@
// Test that ongoing operations in a transaction are interrupted when the transaction expires.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- load('jstests/libs/parallelTester.js');
- load("jstests/libs/check_log.js");
-
- const dbName = "test";
- const collName = "kill_op_on_txn_expiry";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+"use strict";
+
+load('jstests/libs/parallelTester.js');
+load("jstests/libs/check_log.js");
+
+const dbName = "test";
+const collName = "kill_op_on_txn_expiry";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+
+// Need the original 'transactionLifetimeLimitSeconds' value so that we can reset it back at the
+// end of the test.
+const res =
+ assert.commandWorked(db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1}));
+const originalTransactionLifetimeLimitSeconds = res.transactionLifetimeLimitSeconds;
+
+// Decrease transactionLifetimeLimitSeconds so it expires faster
+jsTest.log("Decrease transactionLifetimeLimitSeconds from " +
+ originalTransactionLifetimeLimitSeconds + " to 30 seconds.");
+assert.commandWorked(db.adminCommand({setParameter: 1, transactionLifetimeLimitSeconds: 30}));
+
+try {
+ jsTestLog("Starting transaction");
+
+ let txnNumber = 0;
+ assert.commandWorked(testColl.runCommand({
+ insert: collName,
+ documents: [{_id: 0}],
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false,
+ lsid: session.getSessionId(),
+ }));
+
+ jsTestLog("Enabling fail point to block batch inserts");
assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
-
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
-
- // Need the original 'transactionLifetimeLimitSeconds' value so that we can reset it back at the
- // end of the test.
- const res = assert.commandWorked(
- db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1}));
- const originalTransactionLifetimeLimitSeconds = res.transactionLifetimeLimitSeconds;
-
- // Decrease transactionLifetimeLimitSeconds so it expires faster
- jsTest.log("Decrease transactionLifetimeLimitSeconds from " +
- originalTransactionLifetimeLimitSeconds + " to 30 seconds.");
- assert.commandWorked(db.adminCommand({setParameter: 1, transactionLifetimeLimitSeconds: 30}));
-
- try {
- jsTestLog("Starting transaction");
-
- let txnNumber = 0;
- assert.commandWorked(testColl.runCommand({
+ testDB.adminCommand({configureFailPoint: "hangDuringBatchInsert", mode: "alwaysOn"}));
+ // Clear ramlog so checkLog can't find log messages from previous times this fail point was
+ // enabled.
+ assert.commandWorked(testDB.adminCommand({clearLog: 'global'}));
+
+ jsTestLog("Starting insert operation in parallel thread");
+ let workerThread = new ScopedThread((sessionId, txnNumber, dbName, collName) => {
+ // Deserialize the session ID from its string representation.
+ sessionId = eval("(" + sessionId + ")");
+
+ let coll = db.getSiblingDB(dbName).getCollection(collName);
+ assert.commandFailedWithCode(coll.runCommand({
insert: collName,
- documents: [{_id: 0}],
+ documents: [{_id: 1}],
txnNumber: NumberLong(txnNumber),
- startTransaction: true,
autocommit: false,
- lsid: session.getSessionId(),
- }));
-
- jsTestLog("Enabling fail point to block batch inserts");
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "hangDuringBatchInsert", mode: "alwaysOn"}));
- // Clear ramlog so checkLog can't find log messages from previous times this fail point was
- // enabled.
- assert.commandWorked(testDB.adminCommand({clearLog: 'global'}));
-
- jsTestLog("Starting insert operation in parallel thread");
- let workerThread = new ScopedThread((sessionId, txnNumber, dbName, collName) => {
- // Deserialize the session ID from its string representation.
- sessionId = eval("(" + sessionId + ")");
-
- let coll = db.getSiblingDB(dbName).getCollection(collName);
- assert.commandFailedWithCode(coll.runCommand({
- insert: collName,
- documents: [{_id: 1}],
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- lsid: sessionId
- }),
- ErrorCodes.ExceededTimeLimit);
-
- }, tojson(session.getSessionId()), txnNumber, dbName, collName);
- workerThread.start();
-
- jsTestLog("Wait for insert to be blocked");
- checkLog.contains(db.getMongo(), "hangDuringBatchInsert fail point enabled");
-
- jsTestLog("Wait for the transaction to expire");
- checkLog.contains(db.getMongo(), "Aborting transaction with txnNumber " + txnNumber);
-
- jsTestLog("Disabling fail point to enable insert to proceed and detect that the session " +
- "has been killed");
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "hangDuringBatchInsert", mode: "off"}));
-
- workerThread.join();
- assert(!workerThread.hasFailed());
- } finally {
- // Must ensure that the transactionLifetimeLimitSeconds is reset so that it does not impact
- // other tests in the suite.
- assert.commandWorked(db.adminCommand({
- setParameter: 1,
- transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds
- }));
- }
-
- session.endSession();
+ lsid: sessionId
+ }),
+ ErrorCodes.ExceededTimeLimit);
+ }, tojson(session.getSessionId()), txnNumber, dbName, collName);
+ workerThread.start();
+
+ jsTestLog("Wait for insert to be blocked");
+ checkLog.contains(db.getMongo(), "hangDuringBatchInsert fail point enabled");
+
+ jsTestLog("Wait for the transaction to expire");
+ checkLog.contains(db.getMongo(), "Aborting transaction with txnNumber " + txnNumber);
+
+ jsTestLog("Disabling fail point to enable insert to proceed and detect that the session " +
+ "has been killed");
+ assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "hangDuringBatchInsert", mode: "off"}));
+
+ workerThread.join();
+ assert(!workerThread.hasFailed());
+} finally {
+ // Must ensure that the transactionLifetimeLimitSeconds is reset so that it does not impact
+ // other tests in the suite.
+ assert.commandWorked(db.adminCommand({
+ setParameter: 1,
+ transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds
+ }));
+}
+
+session.endSession();
}());
diff --git a/jstests/core/txns/kill_sessions_kills_transaction.js b/jstests/core/txns/kill_sessions_kills_transaction.js
index 4b4e7ee9afb..bd03a124624 100644
--- a/jstests/core/txns/kill_sessions_kills_transaction.js
+++ b/jstests/core/txns/kill_sessions_kills_transaction.js
@@ -1,77 +1,77 @@
// Tests that killSessions kills inactive transactions.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "kill_sessions_kills_transaction";
- const testDB = db.getSiblingDB(dbName);
- const adminDB = db.getSiblingDB("admin");
- const testColl = testDB[collName];
- const sessionOptions = {causalConsistency: false};
+const dbName = "test";
+const collName = "kill_sessions_kills_transaction";
+const testDB = db.getSiblingDB(dbName);
+const adminDB = db.getSiblingDB("admin");
+const testColl = testDB[collName];
+const sessionOptions = {
+ causalConsistency: false
+};
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- const bulk = testColl.initializeUnorderedBulkOp();
- for (let i = 0; i < 4; ++i) {
- bulk.insert({_id: i});
- }
- assert.commandWorked(bulk.execute({w: "majority"}));
+const bulk = testColl.initializeUnorderedBulkOp();
+for (let i = 0; i < 4; ++i) {
+ bulk.insert({_id: i});
+}
+assert.commandWorked(bulk.execute({w: "majority"}));
- jsTest.log("Test that killing a session kills an inactive transaction.");
- let session = db.getMongo().startSession(sessionOptions);
- let sessionDb = session.getDatabase(dbName);
- let sessionColl = sessionDb[collName];
+jsTest.log("Test that killing a session kills an inactive transaction.");
+let session = db.getMongo().startSession(sessionOptions);
+let sessionDb = session.getDatabase(dbName);
+let sessionColl = sessionDb[collName];
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 5}));
- assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]}));
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 5}));
+assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]}));
+assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- session.endSession();
+session.endSession();
- jsTest.log("killSessions must not block on locks held by a transaction it plans to kill.");
- session = db.getMongo().startSession(sessionOptions);
- sessionDb = session.getDatabase(dbName);
- sessionColl = sessionDb[collName];
+jsTest.log("killSessions must not block on locks held by a transaction it plans to kill.");
+session = db.getMongo().startSession(sessionOptions);
+sessionDb = session.getDatabase(dbName);
+sessionColl = sessionDb[collName];
- session.startTransaction();
- // Open a cursor on the collection.
- assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
+session.startTransaction();
+// Open a cursor on the collection.
+assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
- // Start a drop, which will hang.
- let awaitDrop = startParallelShell(function() {
- db.getSiblingDB("test")["kill_sessions_kills_transaction"].drop(
- {writeConcern: {w: "majority"}});
- });
+// Start a drop, which will hang.
+let awaitDrop = startParallelShell(function() {
+ db.getSiblingDB("test")["kill_sessions_kills_transaction"].drop(
+ {writeConcern: {w: "majority"}});
+});
- // Wait for the drop to have a pending MODE_X lock on the database.
- assert.soon(
- function() {
- return adminDB
- .aggregate([
- {$currentOp: {}},
- {$match: {"command.drop": collName, waitingForLock: true}}
- ])
- .itcount() === 1;
- },
- function() {
- return "Failed to find drop in currentOp output: " +
- tojson(adminDB.aggregate([{$currentOp: {}}]).toArray());
- });
+// Wait for the drop to have a pending MODE_X lock on the database.
+assert.soon(
+ function() {
+ return adminDB
+ .aggregate([
+ {$currentOp: {}},
+ {$match: {"command.drop": collName, waitingForLock: true}}
+ ])
+ .itcount() === 1;
+ },
+ function() {
+ return "Failed to find drop in currentOp output: " +
+ tojson(adminDB.aggregate([{$currentOp: {}}]).toArray());
+ });
- // killSessions needs to acquire a MODE_IS lock on the collection in order to kill the open
- // cursor. However, the transaction is holding a MODE_IX lock on the collection, which will
- // block the drop from obtaining a MODE_X lock on the database, which will block the
- // killSessions from taking a MODE_IS lock on the collection. In order to avoid hanging,
- // killSessions must first kill the transaction, so that it releases its MODE_IX collection
- // lock. This allows the drop to proceed and obtain and release the MODE_X lock. Finally,
- // killSessions can obtain a MODE_IS collection lock and kill the cursor.
- assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]}));
- awaitDrop();
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// killSessions needs to acquire a MODE_IS lock on the collection in order to kill the open
+// cursor. However, the transaction is holding a MODE_IX lock on the collection, which will
+// block the drop from obtaining a MODE_X lock on the database, which will block the
+// killSessions from taking a MODE_IS lock on the collection. In order to avoid hanging,
+// killSessions must first kill the transaction, so that it releases its MODE_IX collection
+// lock. This allows the drop to proceed and obtain and release the MODE_X lock. Finally,
+// killSessions can obtain a MODE_IS collection lock and kill the cursor.
+assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]}));
+awaitDrop();
+assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/kill_transaction_cursors_after_commit.js b/jstests/core/txns/kill_transaction_cursors_after_commit.js
index 003158c3e52..0910b6fb1b7 100644
--- a/jstests/core/txns/kill_transaction_cursors_after_commit.js
+++ b/jstests/core/txns/kill_transaction_cursors_after_commit.js
@@ -1,35 +1,35 @@
// Tests that cursors created in transactions may be killed outside of the transaction.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "kill_transaction_cursors";
- const testDB = db.getSiblingDB(dbName);
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
+const dbName = "test";
+const collName = "kill_transaction_cursors";
+const testDB = db.getSiblingDB(dbName);
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
- sessionColl.drop({writeConcern: {w: "majority"}});
- for (let i = 0; i < 4; ++i) {
- assert.commandWorked(sessionColl.insert({_id: i}));
- }
+sessionColl.drop({writeConcern: {w: "majority"}});
+for (let i = 0; i < 4; ++i) {
+ assert.commandWorked(sessionColl.insert({_id: i}));
+}
- jsTest.log("Test that cursors created in transactions may be kill outside of the transaction.");
- session.startTransaction();
- let res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
- assert(res.hasOwnProperty("cursor"), tojson(res));
- assert(res.cursor.hasOwnProperty("id"), tojson(res));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.commandWorked(sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]}));
+jsTest.log("Test that cursors created in transactions may be kill outside of the transaction.");
+session.startTransaction();
+let res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
+assert(res.hasOwnProperty("cursor"), tojson(res));
+assert(res.cursor.hasOwnProperty("id"), tojson(res));
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]}));
- jsTest.log("Test that cursors created in transactions may be kill outside of the session.");
- session.startTransaction();
- res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
- assert(res.hasOwnProperty("cursor"), tojson(res));
- assert(res.cursor.hasOwnProperty("id"), tojson(res));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.commandWorked(testDB.runCommand({killCursors: collName, cursors: [res.cursor.id]}));
+jsTest.log("Test that cursors created in transactions may be kill outside of the session.");
+session.startTransaction();
+res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
+assert(res.hasOwnProperty("cursor"), tojson(res));
+assert(res.cursor.hasOwnProperty("id"), tojson(res));
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(testDB.runCommand({killCursors: collName, cursors: [res.cursor.id]}));
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/kill_txn_cursor.js b/jstests/core/txns/kill_txn_cursor.js
index cd2332b6d24..0dc68af52c1 100644
--- a/jstests/core/txns/kill_txn_cursor.js
+++ b/jstests/core/txns/kill_txn_cursor.js
@@ -1,63 +1,64 @@
// Tests that killing a cursor created in a transaction does not abort the transaction.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "kill_txn_cursor";
- const testDB = db.getSiblingDB(dbName);
-
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
-
- const bulk = sessionColl.initializeUnorderedBulkOp();
- for (let i = 0; i < 4; ++i) {
- bulk.insert({_id: i});
- }
- assert.commandWorked(bulk.execute({w: "majority"}));
-
- jsTest.log("Start a transaction.");
- session.startTransaction({writeConcern: {w: "majority"}});
-
- // Open cursor 1, and do not exhaust the cursor.
- let cursorRes1 = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
- assert(cursorRes1.hasOwnProperty("cursor"), tojson(cursorRes1));
- assert(cursorRes1.cursor.hasOwnProperty("id"), tojson(cursorRes1));
- let cursorId1 = cursorRes1.cursor.id;
- jsTest.log("Opened cursor 1 with id " + cursorId1);
-
- // Open cursor 2, and do not exhaust the cursor.
- let cursorRes2 = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
- assert(cursorRes2.hasOwnProperty("cursor"), tojson(cursorRes2));
- assert(cursorRes2.cursor.hasOwnProperty("id"), tojson(cursorRes2));
- let cursorId2 = cursorRes2.cursor.id;
- jsTest.log("Opened cursor 2 with id " + cursorId2);
-
- jsTest.log("Kill cursor 1 outside of the transaction.");
- // Kill cursor 1. We check that the kill was successful by asserting that the killCursors
- // command worked. We could run a getMore and check that we get a CursorNotFound error, but this
- // error would abort the transaction and kill cursor 2, and we want to check that cursor 2 is
- // still alive.
- assert.commandWorked(testDB.runCommand({killCursors: collName, cursors: [cursorId1]}));
-
- jsTest.log("Cursor 2 is still alive.");
- cursorRes2 =
- assert.commandWorked(sessionDb.runCommand({getMore: cursorId2, collection: collName}));
- assert(cursorRes2.hasOwnProperty("cursor"));
- assert(cursorRes2.cursor.hasOwnProperty("nextBatch"));
- assert.sameMembers(cursorRes2.cursor.nextBatch, [{_id: 2}, {_id: 3}]);
-
- jsTest.log("Can still write in the transaction");
- assert.commandWorked(sessionColl.insert({_id: 4}));
-
- jsTest.log("Commit transaction.");
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.sameMembers([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}],
- sessionColl.find().toArray());
-
- session.endSession();
+"use strict";
+
+const dbName = "test";
+const collName = "kill_txn_cursor";
+const testDB = db.getSiblingDB(dbName);
+
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+
+const bulk = sessionColl.initializeUnorderedBulkOp();
+for (let i = 0; i < 4; ++i) {
+ bulk.insert({_id: i});
+}
+assert.commandWorked(bulk.execute({w: "majority"}));
+
+jsTest.log("Start a transaction.");
+session.startTransaction({writeConcern: {w: "majority"}});
+
+// Open cursor 1, and do not exhaust the cursor.
+let cursorRes1 = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
+assert(cursorRes1.hasOwnProperty("cursor"), tojson(cursorRes1));
+assert(cursorRes1.cursor.hasOwnProperty("id"), tojson(cursorRes1));
+let cursorId1 = cursorRes1.cursor.id;
+jsTest.log("Opened cursor 1 with id " + cursorId1);
+
+// Open cursor 2, and do not exhaust the cursor.
+let cursorRes2 = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2}));
+assert(cursorRes2.hasOwnProperty("cursor"), tojson(cursorRes2));
+assert(cursorRes2.cursor.hasOwnProperty("id"), tojson(cursorRes2));
+let cursorId2 = cursorRes2.cursor.id;
+jsTest.log("Opened cursor 2 with id " + cursorId2);
+
+jsTest.log("Kill cursor 1 outside of the transaction.");
+// Kill cursor 1. We check that the kill was successful by asserting that the killCursors
+// command worked. We could run a getMore and check that we get a CursorNotFound error, but this
+// error would abort the transaction and kill cursor 2, and we want to check that cursor 2 is
+// still alive.
+assert.commandWorked(testDB.runCommand({killCursors: collName, cursors: [cursorId1]}));
+
+jsTest.log("Cursor 2 is still alive.");
+cursorRes2 = assert.commandWorked(sessionDb.runCommand({getMore: cursorId2, collection: collName}));
+assert(cursorRes2.hasOwnProperty("cursor"));
+assert(cursorRes2.cursor.hasOwnProperty("nextBatch"));
+assert.sameMembers(cursorRes2.cursor.nextBatch, [{_id: 2}, {_id: 3}]);
+
+jsTest.log("Can still write in the transaction");
+assert.commandWorked(sessionColl.insert({_id: 4}));
+
+jsTest.log("Commit transaction.");
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.sameMembers([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}],
+ sessionColl.find().toArray());
+
+session.endSession();
}());
diff --git a/jstests/core/txns/large_transactions_require_fcv42.js b/jstests/core/txns/large_transactions_require_fcv42.js
index ce24c8c0c39..cbfa89f6e73 100644
--- a/jstests/core/txns/large_transactions_require_fcv42.js
+++ b/jstests/core/txns/large_transactions_require_fcv42.js
@@ -4,72 +4,73 @@
* @tags: [uses_transactions]
*/
(function() {
- "uses strict";
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
+"uses strict";
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const dbName = "test";
- const collName = "large_transactions_require_fcv42";
- const testDB = db.getSiblingDB(dbName);
- const adminDB = db.getSiblingDB('admin');
+const dbName = "test";
+const collName = "large_transactions_require_fcv42";
+const testDB = db.getSiblingDB(dbName);
+const adminDB = db.getSiblingDB('admin');
- testDB[collName].drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+testDB[collName].drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- // As we are not able to send a single request larger than 16MB, we insert two documents
- // of 10MB each to create a "large" transaction.
- const kSize10MB = 10 * 1024 * 1024;
- function createLargeDocument(id) {
- return {_id: id, longString: "a".repeat(kSize10MB)};
- }
+// As we are not able to send a single request larger than 16MB, we insert two documents
+// of 10MB each to create a "large" transaction.
+const kSize10MB = 10 * 1024 * 1024;
+function createLargeDocument(id) {
+ return {_id: id, longString: "a".repeat(kSize10MB)};
+}
- try {
- jsTestLog("Test that creating a transaction larger than 16MB succeeds in FCV 4.2.");
- let doc1 = createLargeDocument(1);
- let doc2 = createLargeDocument(2);
+try {
+ jsTestLog("Test that creating a transaction larger than 16MB succeeds in FCV 4.2.");
+ let doc1 = createLargeDocument(1);
+ let doc2 = createLargeDocument(2);
- checkFCV(adminDB, latestFCV);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc1));
- assert.commandWorked(sessionColl.insert(doc2));
- assert.commandWorked(session.commitTransaction_forTesting());
+ checkFCV(adminDB, latestFCV);
+ session.startTransaction();
+ assert.commandWorked(sessionColl.insert(doc1));
+ assert.commandWorked(sessionColl.insert(doc2));
+ assert.commandWorked(session.commitTransaction_forTesting());
- jsTestLog("Downgrade the featureCompatibilityVersion.");
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(adminDB, lastStableFCV);
+ jsTestLog("Downgrade the featureCompatibilityVersion.");
+ assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+ checkFCV(adminDB, lastStableFCV);
- jsTestLog("Test that trying to create a transaction larger than 16MB fails in FCV 4.0.");
- let doc3 = createLargeDocument(3);
- let doc4 = createLargeDocument(4);
+ jsTestLog("Test that trying to create a transaction larger than 16MB fails in FCV 4.0.");
+ let doc3 = createLargeDocument(3);
+ let doc4 = createLargeDocument(4);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc3));
- assert.commandFailedWithCode(sessionColl.insert(doc4), ErrorCodes.TransactionTooLarge);
- // We have to call 'abortTransaction' here to clear the transaction state in the shell.
- // Otherwise, the later call to 'startTransaction' will fail with 'Transaction already in
- // progress'.
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- } finally {
- jsTestLog("Restore to FCV 4.2.");
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(adminDB, latestFCV);
- }
+ session.startTransaction();
+ assert.commandWorked(sessionColl.insert(doc3));
+ assert.commandFailedWithCode(sessionColl.insert(doc4), ErrorCodes.TransactionTooLarge);
+ // We have to call 'abortTransaction' here to clear the transaction state in the shell.
+ // Otherwise, the later call to 'startTransaction' will fail with 'Transaction already in
+ // progress'.
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+} finally {
+ jsTestLog("Restore to FCV 4.2.");
+ assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+ checkFCV(adminDB, latestFCV);
+}
- jsTestLog(
- "Test that creating a transaction larger than 16MB succeeds after upgrading to FCV 4.2.");
- let doc5 = createLargeDocument(5);
- let doc6 = createLargeDocument(6);
+jsTestLog("Test that creating a transaction larger than 16MB succeeds after upgrading to FCV 4.2.");
+let doc5 = createLargeDocument(5);
+let doc6 = createLargeDocument(6);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc5));
- assert.commandWorked(sessionColl.insert(doc6));
- assert.commandWorked(session.commitTransaction_forTesting());
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc5));
+assert.commandWorked(sessionColl.insert(doc6));
+assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/libs/prepare_helpers.js b/jstests/core/txns/libs/prepare_helpers.js
index 9fca9e20cfc..c6b22220a94 100644
--- a/jstests/core/txns/libs/prepare_helpers.js
+++ b/jstests/core/txns/libs/prepare_helpers.js
@@ -5,7 +5,6 @@
*
*/
const PrepareHelpers = (function() {
-
/**
* Prepares the active transaction on the session. This expects the 'prepareTransaction' command
* to succeed and return a non-null 'prepareTimestamp'.
@@ -106,7 +105,7 @@ const PrepareHelpers = (function() {
assert.commandWorked(coll.insert({tenKB: tenKB}, {writeConcern: {w: numNodes}}));
}
- for (let [nodeName, oplog] of[["primary", primaryOplog], ["secondary", secondaryOplog]]) {
+ for (let [nodeName, oplog] of [["primary", primaryOplog], ["secondary", secondaryOplog]]) {
assert.soon(function() {
const dataSize = oplog.dataSize();
const prepareEntryRemoved = (oplog.findOne({prepare: true}) === null);
diff --git a/jstests/core/txns/libs/write_conflicts.js b/jstests/core/txns/libs/write_conflicts.js
index 5496464d9ed..fdaf2114d82 100644
--- a/jstests/core/txns/libs/write_conflicts.js
+++ b/jstests/core/txns/libs/write_conflicts.js
@@ -5,7 +5,6 @@
*
*/
var WriteConflictHelpers = (function() {
-
/**
* Write conflict test cases.
*
@@ -125,8 +124,8 @@ var WriteConflictHelpers = (function() {
const session2 = conn.startSession(sessionOptions);
jsTestLog("Executing write conflict test, case '" + writeConflictTestCase.name +
- "'. \n transaction 1 op: " + tojson(txn1Op) + "\n transaction 2 op: " +
- tojson(txn2Op));
+ "'. \n transaction 1 op: " + tojson(txn1Op) +
+ "\n transaction 2 op: " + tojson(txn2Op));
// Run the specified write conflict test.
try {
diff --git a/jstests/core/txns/list_collections_not_blocked_by_txn.js b/jstests/core/txns/list_collections_not_blocked_by_txn.js
index 6f23c1ea88b..faf095129d2 100644
--- a/jstests/core/txns/list_collections_not_blocked_by_txn.js
+++ b/jstests/core/txns/list_collections_not_blocked_by_txn.js
@@ -2,42 +2,42 @@
// This test ensures that listCollections does not conflict with multi-statement transactions
// as a result of taking MODE_S locks that are incompatible with MODE_IX needed for writes.
(function() {
- "use strict";
- var dbName = 'list_collections_not_blocked';
- var mydb = db.getSiblingDB(dbName);
- var session = db.getMongo().startSession({causalConsistency: false});
- var sessionDb = session.getDatabase(dbName);
+"use strict";
+var dbName = 'list_collections_not_blocked';
+var mydb = db.getSiblingDB(dbName);
+var session = db.getMongo().startSession({causalConsistency: false});
+var sessionDb = session.getDatabase(dbName);
- mydb.foo.drop({writeConcern: {w: "majority"}});
+mydb.foo.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(mydb.createCollection("foo", {writeConcern: {w: "majority"}}));
+assert.commandWorked(mydb.createCollection("foo", {writeConcern: {w: "majority"}}));
- const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid";
- if (isMongos) {
- // Before starting the transaction below, access the collection so it can be implicitly
- // sharded and force all shards to refresh their database versions because the refresh
- // requires an exclusive lock and would block behind the transaction.
- assert.eq(sessionDb.foo.find().itcount(), 0);
- assert.commandWorked(sessionDb.runCommand({listCollections: 1, nameOnly: true}));
- }
+const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid";
+if (isMongos) {
+ // Before starting the transaction below, access the collection so it can be implicitly
+ // sharded and force all shards to refresh their database versions because the refresh
+ // requires an exclusive lock and would block behind the transaction.
+ assert.eq(sessionDb.foo.find().itcount(), 0);
+ assert.commandWorked(sessionDb.runCommand({listCollections: 1, nameOnly: true}));
+}
- session.startTransaction({readConcern: {level: "snapshot"}});
+session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionDb.foo.insert({x: 1}));
+assert.commandWorked(sessionDb.foo.insert({x: 1}));
- for (let nameOnly of[false, true]) {
- // Check that both the nameOnly and full versions of listCollections don't block.
- let res = mydb.runCommand({listCollections: 1, nameOnly, maxTimeMS: 20 * 1000});
- assert.commandWorked(res, "listCollections should have succeeded and not timed out");
- let collObj = res.cursor.firstBatch[0];
- // collObj should only have name and type fields.
- assert.eq('foo', collObj.name);
- assert.eq('collection', collObj.type);
- assert(collObj.hasOwnProperty("idIndex") == !nameOnly, tojson(collObj));
- assert(collObj.hasOwnProperty("options") == !nameOnly, tojson(collObj));
- assert(collObj.hasOwnProperty("info") == !nameOnly, tojson(collObj));
- }
+for (let nameOnly of [false, true]) {
+ // Check that both the nameOnly and full versions of listCollections don't block.
+ let res = mydb.runCommand({listCollections: 1, nameOnly, maxTimeMS: 20 * 1000});
+ assert.commandWorked(res, "listCollections should have succeeded and not timed out");
+ let collObj = res.cursor.firstBatch[0];
+ // collObj should only have name and type fields.
+ assert.eq('foo', collObj.name);
+ assert.eq('collection', collObj.type);
+ assert(collObj.hasOwnProperty("idIndex") == !nameOnly, tojson(collObj));
+ assert(collObj.hasOwnProperty("options") == !nameOnly, tojson(collObj));
+ assert(collObj.hasOwnProperty("info") == !nameOnly, tojson(collObj));
+}
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
+assert.commandWorked(session.commitTransaction_forTesting());
+session.endSession();
}());
diff --git a/jstests/core/txns/listcollections_autocomplete.js b/jstests/core/txns/listcollections_autocomplete.js
index 9020ded9ca0..01921406f43 100644
--- a/jstests/core/txns/listcollections_autocomplete.js
+++ b/jstests/core/txns/listcollections_autocomplete.js
@@ -4,56 +4,55 @@
* @tags: [uses_transactions, assumes_unsharded_collection]
*/
(function() {
- 'use strict';
+'use strict';
- function testAutoComplete() {
- // This method updates a global object with an array of strings on success.
- assert.soon(() => {
- shellAutocomplete("db.");
- return true;
- }, null, 30 * 1000);
- return __autocomplete__;
- }
+function testAutoComplete() {
+ // This method updates a global object with an array of strings on success.
+ assert.soon(() => {
+ shellAutocomplete("db.");
+ return true;
+ }, null, 30 * 1000);
+ return __autocomplete__;
+}
- // Create a collection.
- const collName = 'listcollections_autocomplete';
- assert.commandWorked(db[collName].insertOne({}, {writeConcern: {w: 'majority'}}));
+// Create a collection.
+const collName = 'listcollections_autocomplete';
+assert.commandWorked(db[collName].insertOne({}, {writeConcern: {w: 'majority'}}));
- jsTestLog("Start transaction");
+jsTestLog("Start transaction");
- const session = db.getMongo().startSession();
- const sessionDb = session.getDatabase('test');
- const sessionColl = sessionDb[collName];
- session.startTransaction_forTesting();
- assert.commandWorked(sessionColl.insertOne({}));
+const session = db.getMongo().startSession();
+const sessionDb = session.getDatabase('test');
+const sessionColl = sessionDb[collName];
+session.startTransaction_forTesting();
+assert.commandWorked(sessionColl.insertOne({}));
- jsTestLog("Start dropDatabase in parallel shell");
+jsTestLog("Start dropDatabase in parallel shell");
- // Wait for global X lock while blocked behind transaction with global IX lock.
- var awaitShell = startParallelShell(function() {
- db.getSiblingDB("test2").dropDatabase();
- });
+// Wait for global X lock while blocked behind transaction with global IX lock.
+var awaitShell = startParallelShell(function() {
+ db.getSiblingDB("test2").dropDatabase();
+});
- jsTestLog("Wait for dropDatabase to appear in currentOp");
+jsTestLog("Wait for dropDatabase to appear in currentOp");
- assert.soon(() => {
- return db.currentOp({'command.dropDatabase': 1}).inprog;
- });
+assert.soon(() => {
+ return db.currentOp({'command.dropDatabase': 1}).inprog;
+});
- jsTestLog("Test that autocompleting collection names fails quickly");
+jsTestLog("Test that autocompleting collection names fails quickly");
- let db_stuff = testAutoComplete();
- assert(!db_stuff.includes(collName),
- `Completions should not include "${collName}": ${db_stuff}`);
+let db_stuff = testAutoComplete();
+assert(!db_stuff.includes(collName), `Completions should not include "${collName}": ${db_stuff}`);
- // Verify we have some results despite the timeout.
- assert.contains('db.adminCommand(', db_stuff);
+// Verify we have some results despite the timeout.
+assert.contains('db.adminCommand(', db_stuff);
- jsTestLog("Abort transaction");
+jsTestLog("Abort transaction");
- assert.commandWorked(session.abortTransaction_forTesting());
- awaitShell();
- db_stuff = testAutoComplete();
- assert.contains('db.adminCommand(', db_stuff);
- assert.contains(`db.${collName}`, db_stuff);
+assert.commandWorked(session.abortTransaction_forTesting());
+awaitShell();
+db_stuff = testAutoComplete();
+assert.contains('db.adminCommand(', db_stuff);
+assert.contains(`db.${collName}`, db_stuff);
})();
diff --git a/jstests/core/txns/many_txns.js b/jstests/core/txns/many_txns.js
index 9516b046a5c..2dfda376423 100644
--- a/jstests/core/txns/many_txns.js
+++ b/jstests/core/txns/many_txns.js
@@ -2,90 +2,95 @@
// many resources (like "write tickets") and don't prevent other operations from succeeding.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "many_txns";
- const numTxns = 150;
-
- const testDB = db.getSiblingDB(dbName);
- const coll = testDB[collName];
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(
- testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}}));
-
- const sessionOptions = {causalConsistency: false};
-
- const startTime = new Date();
-
- // Non-transactional write to give something to find.
- const initialDoc = {_id: "pretransaction1", x: 0};
- assert.commandWorked(coll.insert(initialDoc, {writeConcern: {w: "majority"}}));
-
- // Start many transactions, each inserting two documents.
- jsTest.log("Start " + numTxns + " transactions, each inserting two documents");
- var sessions = [];
- for (let txnNr = 0; txnNr < numTxns; ++txnNr) {
- const session = testDB.getMongo().startSession(sessionOptions);
- sessions[txnNr] = session;
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
- let doc = seq => ({_id: "txn-" + txnNr + "-" + seq});
-
- session.startTransaction();
-
- let docs = sessionColl.find({}).toArray();
- assert.sameMembers(docs, [initialDoc]);
-
- // Insert a doc within the transaction.
- assert.commandWorked(sessionColl.insert(doc(1)));
-
- // Read in the same transaction returns the doc, but not from other txns.
- docs = sessionColl.find({_id: {$ne: initialDoc._id}}).toArray();
- assert.sameMembers(docs, [doc(1)]);
-
- // Insert a doc within a transaction.
- assert.commandWorked(sessionColl.insert(doc(2)));
+"use strict";
+
+const dbName = "test";
+const collName = "many_txns";
+const numTxns = 150;
+
+const testDB = db.getSiblingDB(dbName);
+const coll = testDB[collName];
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}}));
+
+const sessionOptions = {
+ causalConsistency: false
+};
+
+const startTime = new Date();
+
+// Non-transactional write to give something to find.
+const initialDoc = {
+ _id: "pretransaction1",
+ x: 0
+};
+assert.commandWorked(coll.insert(initialDoc, {writeConcern: {w: "majority"}}));
+
+// Start many transactions, each inserting two documents.
+jsTest.log("Start " + numTxns + " transactions, each inserting two documents");
+var sessions = [];
+for (let txnNr = 0; txnNr < numTxns; ++txnNr) {
+ const session = testDB.getMongo().startSession(sessionOptions);
+ sessions[txnNr] = session;
+ const sessionDb = session.getDatabase(dbName);
+ const sessionColl = sessionDb[collName];
+ let doc = seq => ({_id: "txn-" + txnNr + "-" + seq});
+
+ session.startTransaction();
+
+ let docs = sessionColl.find({}).toArray();
+ assert.sameMembers(docs, [initialDoc]);
+
+ // Insert a doc within the transaction.
+ assert.commandWorked(sessionColl.insert(doc(1)));
+
+ // Read in the same transaction returns the doc, but not from other txns.
+ docs = sessionColl.find({_id: {$ne: initialDoc._id}}).toArray();
+ assert.sameMembers(docs, [doc(1)]);
+
+ // Insert a doc within a transaction.
+ assert.commandWorked(sessionColl.insert(doc(2)));
+}
+const secondDoc = {
+ _id: "midtransactions",
+ x: 1
+};
+assert.commandWorked(coll.insert(secondDoc, {writeConcern: {w: "majority"}}));
+
+// Commit all sessions.
+jsTest.log("Commit all transactions.");
+let numAborted = 0;
+for (let txnNr = 0; txnNr < numTxns; ++txnNr) {
+ // First check that a non-transactional operation conflicts and times out quickly.
+ let doc = seq => ({_id: "txn-" + txnNr + "-" + seq});
+ let insertCmd = {insert: collName, documents: [doc(1)], maxTimeMS: 10};
+ let insertRes = testDB.runCommand(insertCmd);
+
+ const session = sessions[txnNr];
+ let commitRes = session.commitTransaction_forTesting();
+ if (commitRes.code === ErrorCodes.NoSuchTransaction) {
+ ++numAborted;
+ continue;
}
- const secondDoc = {_id: "midtransactions", x: 1};
- assert.commandWorked(coll.insert(secondDoc, {writeConcern: {w: "majority"}}));
-
- // Commit all sessions.
- jsTest.log("Commit all transactions.");
- let numAborted = 0;
- for (let txnNr = 0; txnNr < numTxns; ++txnNr) {
- // First check that a non-transactional operation conflicts and times out quickly.
- let doc = seq => ({_id: "txn-" + txnNr + "-" + seq});
- let insertCmd = {insert: collName, documents: [doc(1)], maxTimeMS: 10};
- let insertRes = testDB.runCommand(insertCmd);
-
- const session = sessions[txnNr];
- let commitRes = session.commitTransaction_forTesting();
- if (commitRes.code === ErrorCodes.NoSuchTransaction) {
- ++numAborted;
- continue;
- }
- assert.commandWorked(commitRes, "couldn't commit transaction " + txnNr);
- assert.commandFailedWithCode(insertRes, ErrorCodes.MaxTimeMSExpired, tojson({insertCmd}));
-
- // Read with default read concern sees the committed transaction.
- assert.eq(doc(1), coll.findOne(doc(1)));
- assert.eq(doc(2), coll.findOne(doc(2)));
- session.endSession();
- }
-
- assert.eq(initialDoc, coll.findOne(initialDoc));
- assert.eq(secondDoc, coll.findOne(secondDoc));
-
- const elapsedTime = new Date() - startTime;
- jsTest.log("Test completed with " + numAborted + " aborted transactions in " + elapsedTime +
- " ms");
-
- // Check whether we should expect aborts. If the parameter doesn't exist (mongos) don't check.
- const getParamRes = db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1});
- if (getParamRes.ok && elapsedTime < getParamRes.transactionLifetimeLimitSeconds)
- assert.eq(numAborted,
- 0,
- "should not get aborts when transactionLifetimeLimitSeconds not exceeded");
+ assert.commandWorked(commitRes, "couldn't commit transaction " + txnNr);
+ assert.commandFailedWithCode(insertRes, ErrorCodes.MaxTimeMSExpired, tojson({insertCmd}));
+
+ // Read with default read concern sees the committed transaction.
+ assert.eq(doc(1), coll.findOne(doc(1)));
+ assert.eq(doc(2), coll.findOne(doc(2)));
+ session.endSession();
+}
+
+assert.eq(initialDoc, coll.findOne(initialDoc));
+assert.eq(secondDoc, coll.findOne(secondDoc));
+
+const elapsedTime = new Date() - startTime;
+jsTest.log("Test completed with " + numAborted + " aborted transactions in " + elapsedTime + " ms");
+
+// Check whether we should expect aborts. If the parameter doesn't exist (mongos) don't check.
+const getParamRes = db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1});
+if (getParamRes.ok && elapsedTime < getParamRes.transactionLifetimeLimitSeconds)
+ assert.eq(
+ numAborted, 0, "should not get aborts when transactionLifetimeLimitSeconds not exceeded");
}());
diff --git a/jstests/core/txns/multi_delete_in_transaction.js b/jstests/core/txns/multi_delete_in_transaction.js
index 5dcfab97217..c8aad0c5c79 100644
--- a/jstests/core/txns/multi_delete_in_transaction.js
+++ b/jstests/core/txns/multi_delete_in_transaction.js
@@ -1,60 +1,61 @@
// Test transactions including multi-deletes
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "multi_delete_in_transaction";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
+const dbName = "test";
+const collName = "multi_delete_in_transaction";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
- jsTest.log("Prepopulate the collection.");
- assert.writeOK(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}],
- {writeConcern: {w: "majority"}}));
+jsTest.log("Prepopulate the collection.");
+assert.writeOK(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}],
+ {writeConcern: {w: "majority"}}));
- jsTest.log("Do an empty multi-delete.");
- session.startTransaction({writeConcern: {w: "majority"}});
+jsTest.log("Do an empty multi-delete.");
+session.startTransaction({writeConcern: {w: "majority"}});
- // Remove no docs.
- let res = sessionColl.remove({a: 99}, {justOne: false});
- assert.eq(0, res.nRemoved);
- res = sessionColl.find({});
- assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}]);
+// Remove no docs.
+let res = sessionColl.remove({a: 99}, {justOne: false});
+assert.eq(0, res.nRemoved);
+res = sessionColl.find({});
+assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}]);
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- jsTest.log("Do a single-result multi-delete.");
- session.startTransaction({writeConcern: {w: "majority"}});
+jsTest.log("Do a single-result multi-delete.");
+session.startTransaction({writeConcern: {w: "majority"}});
- // Remove one doc.
- res = sessionColl.remove({a: 1}, {justOne: false});
- assert.eq(1, res.nRemoved);
- res = sessionColl.find({});
- assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}]);
+// Remove one doc.
+res = sessionColl.remove({a: 1}, {justOne: false});
+assert.eq(1, res.nRemoved);
+res = sessionColl.find({});
+assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}]);
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- jsTest.log("Do a multiple-result multi-delete.");
- session.startTransaction({writeConcern: {w: "majority"}});
+jsTest.log("Do a multiple-result multi-delete.");
+session.startTransaction({writeConcern: {w: "majority"}});
- // Remove 2 docs.
- res = sessionColl.remove({a: 0}, {justOne: false});
- assert.eq(2, res.nRemoved);
- res = sessionColl.find({});
- assert.sameMembers(res.toArray(), []);
+// Remove 2 docs.
+res = sessionColl.remove({a: 0}, {justOne: false});
+assert.eq(2, res.nRemoved);
+res = sessionColl.find({});
+assert.sameMembers(res.toArray(), []);
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- // Collection should be empty.
- assert.eq(0, testColl.find().itcount());
+// Collection should be empty.
+assert.eq(0, testColl.find().itcount());
}());
diff --git a/jstests/core/txns/multi_statement_transaction.js b/jstests/core/txns/multi_statement_transaction.js
index 5a29e81d4d7..37d25b56b26 100644
--- a/jstests/core/txns/multi_statement_transaction.js
+++ b/jstests/core/txns/multi_statement_transaction.js
@@ -1,159 +1,160 @@
// Test basic multi-statement transaction.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "multi_statement_transaction";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
+const dbName = "test";
+const collName = "multi_statement_transaction";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
- /***********************************************************************************************
- * Insert two documents in a transaction.
- **********************************************************************************************/
+/***********************************************************************************************
+ * Insert two documents in a transaction.
+ **********************************************************************************************/
- assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
- jsTest.log("Insert two documents in a transaction");
+jsTest.log("Insert two documents in a transaction");
- session.startTransaction();
+session.startTransaction();
- // Insert a doc within the transaction.
- assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
+// Insert a doc within the transaction.
+assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
- // Cannot read with default read concern.
- assert.eq(null, testColl.findOne({_id: "insert-1"}));
- // But read in the same transaction returns the doc.
- assert.docEq({_id: "insert-1"}, sessionColl.findOne());
+// Cannot read with default read concern.
+assert.eq(null, testColl.findOne({_id: "insert-1"}));
+// But read in the same transaction returns the doc.
+assert.docEq({_id: "insert-1"}, sessionColl.findOne());
- // Read with aggregation also returns the document.
- let docs = sessionColl.aggregate([{$match: {_id: "insert-1"}}]).toArray();
- assert.sameMembers([{_id: "insert-1"}], docs);
+// Read with aggregation also returns the document.
+let docs = sessionColl.aggregate([{$match: {_id: "insert-1"}}]).toArray();
+assert.sameMembers([{_id: "insert-1"}], docs);
- // Insert a doc within a transaction.
- assert.commandWorked(sessionColl.insert({_id: "insert-2"}));
+// Insert a doc within a transaction.
+assert.commandWorked(sessionColl.insert({_id: "insert-2"}));
- // Cannot read with default read concern.
- assert.eq(null, testColl.findOne({_id: "insert-1"}));
- // Cannot read with default read concern.
- assert.eq(null, testColl.findOne({_id: "insert-2"}));
+// Cannot read with default read concern.
+assert.eq(null, testColl.findOne({_id: "insert-1"}));
+// Cannot read with default read concern.
+assert.eq(null, testColl.findOne({_id: "insert-2"}));
- // Commit the transaction.
- assert.commandWorked(session.commitTransaction_forTesting());
+// Commit the transaction.
+assert.commandWorked(session.commitTransaction_forTesting());
- // Read with default read concern sees the committed transaction.
- assert.eq({_id: "insert-1"}, testColl.findOne({_id: "insert-1"}));
- assert.eq({_id: "insert-2"}, testColl.findOne({_id: "insert-2"}));
+// Read with default read concern sees the committed transaction.
+assert.eq({_id: "insert-1"}, testColl.findOne({_id: "insert-1"}));
+assert.eq({_id: "insert-2"}, testColl.findOne({_id: "insert-2"}));
- /***********************************************************************************************
- * Update documents in a transaction.
- **********************************************************************************************/
+/***********************************************************************************************
+ * Update documents in a transaction.
+ **********************************************************************************************/
- assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
- jsTest.log("Update documents in a transaction");
+jsTest.log("Update documents in a transaction");
- // Insert the docs to be updated.
- assert.commandWorked(sessionColl.insert([{_id: "update-1", a: 0}, {_id: "update-2", a: 0}],
- {writeConcern: {w: "majority"}}));
+// Insert the docs to be updated.
+assert.commandWorked(sessionColl.insert([{_id: "update-1", a: 0}, {_id: "update-2", a: 0}],
+ {writeConcern: {w: "majority"}}));
- // Update the docs in a new transaction.
- session.startTransaction();
+// Update the docs in a new transaction.
+session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: "update-1"}, {$inc: {a: 1}}));
+assert.commandWorked(sessionColl.update({_id: "update-1"}, {$inc: {a: 1}}));
- // Batch update in transaction.
- let bulk = sessionColl.initializeUnorderedBulkOp();
- bulk.find({_id: "update-1"}).updateOne({$inc: {a: 1}});
- bulk.find({_id: "update-2"}).updateOne({$inc: {a: 1}});
- assert.commandWorked(bulk.execute());
+// Batch update in transaction.
+let bulk = sessionColl.initializeUnorderedBulkOp();
+bulk.find({_id: "update-1"}).updateOne({$inc: {a: 1}});
+bulk.find({_id: "update-2"}).updateOne({$inc: {a: 1}});
+assert.commandWorked(bulk.execute());
- // Cannot read with default read concern.
- assert.eq({_id: "update-1", a: 0}, testColl.findOne({_id: "update-1"}));
- assert.eq({_id: "update-2", a: 0}, testColl.findOne({_id: "update-2"}));
+// Cannot read with default read concern.
+assert.eq({_id: "update-1", a: 0}, testColl.findOne({_id: "update-1"}));
+assert.eq({_id: "update-2", a: 0}, testColl.findOne({_id: "update-2"}));
- // Commit the transaction.
- assert.commandWorked(session.commitTransaction_forTesting());
+// Commit the transaction.
+assert.commandWorked(session.commitTransaction_forTesting());
- // Read with default read concern sees the committed transaction.
- assert.eq({_id: "update-1", a: 2}, testColl.findOne({_id: "update-1"}));
- assert.eq({_id: "update-2", a: 1}, testColl.findOne({_id: "update-2"}));
+// Read with default read concern sees the committed transaction.
+assert.eq({_id: "update-1", a: 2}, testColl.findOne({_id: "update-1"}));
+assert.eq({_id: "update-2", a: 1}, testColl.findOne({_id: "update-2"}));
- /***********************************************************************************************
- * Insert, update and read documents in a transaction.
- **********************************************************************************************/
+/***********************************************************************************************
+ * Insert, update and read documents in a transaction.
+ **********************************************************************************************/
- assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
- jsTest.log("Insert, update and read documents in a transaction");
+jsTest.log("Insert, update and read documents in a transaction");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert([{_id: "doc-1"}, {_id: "doc-2"}]));
+session.startTransaction();
+assert.commandWorked(sessionColl.insert([{_id: "doc-1"}, {_id: "doc-2"}]));
- // Update the two docs in transaction.
- assert.commandWorked(sessionColl.update({_id: "doc-1"}, {$inc: {a: 1}}));
- assert.commandWorked(sessionColl.update({_id: "doc-2"}, {$inc: {a: 1}}));
+// Update the two docs in transaction.
+assert.commandWorked(sessionColl.update({_id: "doc-1"}, {$inc: {a: 1}}));
+assert.commandWorked(sessionColl.update({_id: "doc-2"}, {$inc: {a: 1}}));
- // Cannot read with default read concern.
- assert.eq(null, testColl.findOne({_id: "doc-1"}));
- assert.eq(null, testColl.findOne({_id: "doc-2"}));
+// Cannot read with default read concern.
+assert.eq(null, testColl.findOne({_id: "doc-1"}));
+assert.eq(null, testColl.findOne({_id: "doc-2"}));
- // But read in the same transaction returns the docs.
- docs = sessionColl.find({$or: [{_id: "doc-1"}, {_id: "doc-2"}]}).toArray();
- assert.sameMembers([{_id: "doc-1", a: 1}, {_id: "doc-2", a: 1}], docs);
+// But read in the same transaction returns the docs.
+docs = sessionColl.find({$or: [{_id: "doc-1"}, {_id: "doc-2"}]}).toArray();
+assert.sameMembers([{_id: "doc-1", a: 1}, {_id: "doc-2", a: 1}], docs);
- // Commit the transaction.
- assert.commandWorked(session.commitTransaction_forTesting());
+// Commit the transaction.
+assert.commandWorked(session.commitTransaction_forTesting());
- // Read with default read concern sees the committed transaction.
- assert.eq({_id: "doc-1", a: 1}, testColl.findOne({_id: "doc-1"}));
- assert.eq({_id: "doc-2", a: 1}, testColl.findOne({_id: "doc-2"}));
+// Read with default read concern sees the committed transaction.
+assert.eq({_id: "doc-1", a: 1}, testColl.findOne({_id: "doc-1"}));
+assert.eq({_id: "doc-2", a: 1}, testColl.findOne({_id: "doc-2"}));
- jsTest.log("Insert and delete documents in a transaction");
+jsTest.log("Insert and delete documents in a transaction");
- assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(
- testColl.insert([{_id: "doc-1"}, {_id: "doc-2"}], {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ testColl.insert([{_id: "doc-1"}, {_id: "doc-2"}], {writeConcern: {w: "majority"}}));
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "doc-3"}));
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "doc-3"}));
- // Remove three docs in transaction.
- assert.commandWorked(sessionColl.remove({_id: "doc-1"}));
+// Remove three docs in transaction.
+assert.commandWorked(sessionColl.remove({_id: "doc-1"}));
- // Batch delete.
- bulk = sessionColl.initializeUnorderedBulkOp();
- bulk.find({_id: "doc-2"}).removeOne();
- bulk.find({_id: "doc-3"}).removeOne();
- assert.commandWorked(bulk.execute());
+// Batch delete.
+bulk = sessionColl.initializeUnorderedBulkOp();
+bulk.find({_id: "doc-2"}).removeOne();
+bulk.find({_id: "doc-3"}).removeOne();
+assert.commandWorked(bulk.execute());
- // Cannot read the new doc and still see the to-be removed docs with default read concern.
- assert.eq({_id: "doc-1"}, testColl.findOne({_id: "doc-1"}));
- assert.eq({_id: "doc-2"}, testColl.findOne({_id: "doc-2"}));
- assert.eq(null, testColl.findOne({_id: "doc-3"}));
+// Cannot read the new doc and still see the to-be removed docs with default read concern.
+assert.eq({_id: "doc-1"}, testColl.findOne({_id: "doc-1"}));
+assert.eq({_id: "doc-2"}, testColl.findOne({_id: "doc-2"}));
+assert.eq(null, testColl.findOne({_id: "doc-3"}));
- // But read in the same transaction sees the docs get deleted.
- docs = sessionColl.find({$or: [{_id: "doc-1"}, {_id: "doc-2"}, {_id: "doc-3"}]}).toArray();
- assert.sameMembers([], docs);
+// But read in the same transaction sees the docs get deleted.
+docs = sessionColl.find({$or: [{_id: "doc-1"}, {_id: "doc-2"}, {_id: "doc-3"}]}).toArray();
+assert.sameMembers([], docs);
- // Commit the transaction.
- assert.commandWorked(session.commitTransaction_forTesting());
+// Commit the transaction.
+assert.commandWorked(session.commitTransaction_forTesting());
- // Read with default read concern sees the commmitted transaction.
- assert.eq(null, testColl.findOne({_id: "doc-1"}));
- assert.eq(null, testColl.findOne({_id: "doc-2"}));
- assert.eq(null, testColl.findOne({_id: "doc-3"}));
+// Read with default read concern sees the commmitted transaction.
+assert.eq(null, testColl.findOne({_id: "doc-1"}));
+assert.eq(null, testColl.findOne({_id: "doc-2"}));
+assert.eq(null, testColl.findOne({_id: "doc-3"}));
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/multi_statement_transaction_abort.js b/jstests/core/txns/multi_statement_transaction_abort.js
index 3e8e8a62758..a7946af8eda 100644
--- a/jstests/core/txns/multi_statement_transaction_abort.js
+++ b/jstests/core/txns/multi_statement_transaction_abort.js
@@ -1,255 +1,257 @@
// Test basic multi-statement transaction abort.
// @tags: [uses_transactions, uses_snapshot_read_concern]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "multi_statement_transaction_abort";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
-
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- let txnNumber = 0;
-
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
-
- jsTest.log("Insert two documents in a transaction and abort");
-
- // Insert a doc within the transaction.
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "insert-1"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- }));
-
- // Insert a doc within a transaction.
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "insert-2"}],
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }));
-
- // Cannot read with default read concern.
- assert.eq(null, testColl.findOne({_id: "insert-1"}));
- // Cannot read with default read concern.
- assert.eq(null, testColl.findOne({_id: "insert-2"}));
-
- // abortTransaction can only be run on the admin database.
- assert.commandWorked(sessionDb.adminCommand({
- abortTransaction: 1,
- writeConcern: {w: "majority"},
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }));
-
- // Read with default read concern cannot see the aborted transaction.
- assert.eq(null, testColl.findOne({_id: "insert-1"}));
- assert.eq(null, testColl.findOne({_id: "insert-2"}));
-
- jsTest.log("Insert two documents in a transaction and commit");
-
- // Insert a doc with the same _id in a new transaction should work.
- txnNumber++;
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "insert-1"}, {_id: "insert-2"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- }));
- // commitTransaction can only be called on the admin database.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- writeConcern: {w: "majority"},
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }));
- // Read with default read concern sees the committed transaction.
- assert.eq({_id: "insert-1"}, testColl.findOne({_id: "insert-1"}));
- assert.eq({_id: "insert-2"}, testColl.findOne({_id: "insert-2"}));
-
- jsTest.log("Cannot abort empty transaction because it's not in progress");
- txnNumber++;
- // abortTransaction can only be called on the admin database.
- let res = sessionDb.adminCommand({
- abortTransaction: 1,
- writeConcern: {w: "majority"},
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- });
- assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
-
- jsTest.log("Abort transaction on duplicated key errors");
- assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(testColl.insert({_id: "insert-1"}, {writeConcern: {w: "majority"}}));
- txnNumber++;
- // The first insert works well.
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "insert-2"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- }));
- // But the second insert throws duplicated index key error.
- res = assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "insert-1", x: 0}],
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }),
- ErrorCodes.DuplicateKey);
- // DuplicateKey is not a transient error.
- assert.eq(res.errorLabels, null);
-
- // The error aborts the transaction.
- // commitTransaction can only be called on the admin database.
- assert.commandFailedWithCode(sessionDb.adminCommand({
- commitTransaction: 1,
- writeConcern: {w: "majority"},
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
- // Verify the documents are the same.
- assert.eq({_id: "insert-1"}, testColl.findOne({_id: "insert-1"}));
- assert.eq(null, testColl.findOne({_id: "insert-2"}));
-
- jsTest.log("Abort transaction on write conflict errors");
- assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
- txnNumber++;
- const session2 = testDB.getMongo().startSession(sessionOptions);
- const sessionDb2 = session2.getDatabase(dbName);
- // Insert a doc from session 1.
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "insert-1", from: 1}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- }));
- let txnNumber2 = 0;
- // Insert a doc from session 2 that doesn't conflict with session 1.
- assert.commandWorked(sessionDb2.runCommand({
- insert: collName,
- documents: [{_id: "insert-2", from: 2}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber2),
- startTransaction: true,
- autocommit: false
- }));
- // Insert a doc from session 2 that conflicts with session 1.
- res = sessionDb2.runCommand({
- insert: collName,
- documents: [{_id: "insert-1", from: 2}],
- txnNumber: NumberLong(txnNumber2),
- autocommit: false
- });
- assert.commandFailedWithCode(res, ErrorCodes.WriteConflict);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
-
- // Session 1 isn't affected.
- // commitTransaction can only be called on the admin database.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- writeConcern: {w: "majority"},
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }));
- // Transaction on session 2 is aborted.
- assert.commandFailedWithCode(sessionDb2.adminCommand({
- commitTransaction: 1,
- writeConcern: {w: "majority"},
- txnNumber: NumberLong(txnNumber2),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
- // Verify the documents only reflect the first transaction.
- assert.eq({_id: "insert-1", from: 1}, testColl.findOne({_id: "insert-1"}));
- assert.eq(null, testColl.findOne({_id: "insert-2"}));
-
- jsTest.log("Higher transaction number aborts existing running transaction.");
- txnNumber++;
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "running-txn-1"}, {_id: "running-txn-2"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- }));
- // A higher txnNumber aborts the old and inserts the same document.
- txnNumber++;
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "running-txn-2"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- }));
- // commitTransaction can only be called on the admin database.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- writeConcern: {w: "majority"},
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }));
- // Read with default read concern sees the committed transaction but cannot see the aborted one.
- assert.eq(null, testColl.findOne({_id: "running-txn-1"}));
- assert.eq({_id: "running-txn-2"}, testColl.findOne({_id: "running-txn-2"}));
-
- jsTest.log("Higher transaction number aborts existing running snapshot read.");
- assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(
- testColl.insert([{doc: 1}, {doc: 2}, {doc: 3}], {writeConcern: {w: "majority"}}));
- txnNumber++;
- // Perform a snapshot read under a new transaction.
- let runningReadResult = assert.commandWorked(sessionDb.runCommand({
- find: collName,
- batchSize: 2,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- }));
-
- // The cursor has not been exhausted.
- assert(runningReadResult.hasOwnProperty("cursor"), tojson(runningReadResult));
- assert.neq(0, runningReadResult.cursor.id, tojson(runningReadResult));
-
- txnNumber++;
- // Perform a second snapshot read under a new transaction.
- let newReadResult = assert.commandWorked(sessionDb.runCommand({
- find: collName,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- }));
-
- // The cursor has been exhausted.
- assert(newReadResult.hasOwnProperty("cursor"), tojson(newReadResult));
- assert.eq(0, newReadResult.cursor.id, tojson(newReadResult));
- // commitTransaction can only be called on the admin database.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- writeConcern: {w: "majority"},
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }));
-
- session.endSession();
+"use strict";
+
+const dbName = "test";
+const collName = "multi_statement_transaction_abort";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+let txnNumber = 0;
+
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+
+jsTest.log("Insert two documents in a transaction and abort");
+
+// Insert a doc within the transaction.
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "insert-1"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+}));
+
+// Insert a doc within a transaction.
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "insert-2"}],
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+}));
+
+// Cannot read with default read concern.
+assert.eq(null, testColl.findOne({_id: "insert-1"}));
+// Cannot read with default read concern.
+assert.eq(null, testColl.findOne({_id: "insert-2"}));
+
+// abortTransaction can only be run on the admin database.
+assert.commandWorked(sessionDb.adminCommand({
+ abortTransaction: 1,
+ writeConcern: {w: "majority"},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+}));
+
+// Read with default read concern cannot see the aborted transaction.
+assert.eq(null, testColl.findOne({_id: "insert-1"}));
+assert.eq(null, testColl.findOne({_id: "insert-2"}));
+
+jsTest.log("Insert two documents in a transaction and commit");
+
+// Insert a doc with the same _id in a new transaction should work.
+txnNumber++;
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "insert-1"}, {_id: "insert-2"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+}));
+// commitTransaction can only be called on the admin database.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ writeConcern: {w: "majority"},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+}));
+// Read with default read concern sees the committed transaction.
+assert.eq({_id: "insert-1"}, testColl.findOne({_id: "insert-1"}));
+assert.eq({_id: "insert-2"}, testColl.findOne({_id: "insert-2"}));
+
+jsTest.log("Cannot abort empty transaction because it's not in progress");
+txnNumber++;
+// abortTransaction can only be called on the admin database.
+let res = sessionDb.adminCommand({
+ abortTransaction: 1,
+ writeConcern: {w: "majority"},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+});
+assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+
+jsTest.log("Abort transaction on duplicated key errors");
+assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(testColl.insert({_id: "insert-1"}, {writeConcern: {w: "majority"}}));
+txnNumber++;
+// The first insert works well.
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "insert-2"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+}));
+// But the second insert throws duplicated index key error.
+res = assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "insert-1", x: 0}],
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+}),
+ ErrorCodes.DuplicateKey);
+// DuplicateKey is not a transient error.
+assert.eq(res.errorLabels, null);
+
+// The error aborts the transaction.
+// commitTransaction can only be called on the admin database.
+assert.commandFailedWithCode(sessionDb.adminCommand({
+ commitTransaction: 1,
+ writeConcern: {w: "majority"},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+}),
+ ErrorCodes.NoSuchTransaction);
+// Verify the documents are the same.
+assert.eq({_id: "insert-1"}, testColl.findOne({_id: "insert-1"}));
+assert.eq(null, testColl.findOne({_id: "insert-2"}));
+
+jsTest.log("Abort transaction on write conflict errors");
+assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
+txnNumber++;
+const session2 = testDB.getMongo().startSession(sessionOptions);
+const sessionDb2 = session2.getDatabase(dbName);
+// Insert a doc from session 1.
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "insert-1", from: 1}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+}));
+let txnNumber2 = 0;
+// Insert a doc from session 2 that doesn't conflict with session 1.
+assert.commandWorked(sessionDb2.runCommand({
+ insert: collName,
+ documents: [{_id: "insert-2", from: 2}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber2),
+ startTransaction: true,
+ autocommit: false
+}));
+// Insert a doc from session 2 that conflicts with session 1.
+res = sessionDb2.runCommand({
+ insert: collName,
+ documents: [{_id: "insert-1", from: 2}],
+ txnNumber: NumberLong(txnNumber2),
+ autocommit: false
+});
+assert.commandFailedWithCode(res, ErrorCodes.WriteConflict);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+
+// Session 1 isn't affected.
+// commitTransaction can only be called on the admin database.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ writeConcern: {w: "majority"},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+}));
+// Transaction on session 2 is aborted.
+assert.commandFailedWithCode(sessionDb2.adminCommand({
+ commitTransaction: 1,
+ writeConcern: {w: "majority"},
+ txnNumber: NumberLong(txnNumber2),
+ autocommit: false
+}),
+ ErrorCodes.NoSuchTransaction);
+// Verify the documents only reflect the first transaction.
+assert.eq({_id: "insert-1", from: 1}, testColl.findOne({_id: "insert-1"}));
+assert.eq(null, testColl.findOne({_id: "insert-2"}));
+
+jsTest.log("Higher transaction number aborts existing running transaction.");
+txnNumber++;
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "running-txn-1"}, {_id: "running-txn-2"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+}));
+// A higher txnNumber aborts the old and inserts the same document.
+txnNumber++;
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "running-txn-2"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+}));
+// commitTransaction can only be called on the admin database.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ writeConcern: {w: "majority"},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+}));
+// Read with default read concern sees the committed transaction but cannot see the aborted one.
+assert.eq(null, testColl.findOne({_id: "running-txn-1"}));
+assert.eq({_id: "running-txn-2"}, testColl.findOne({_id: "running-txn-2"}));
+
+jsTest.log("Higher transaction number aborts existing running snapshot read.");
+assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ testColl.insert([{doc: 1}, {doc: 2}, {doc: 3}], {writeConcern: {w: "majority"}}));
+txnNumber++;
+// Perform a snapshot read under a new transaction.
+let runningReadResult = assert.commandWorked(sessionDb.runCommand({
+ find: collName,
+ batchSize: 2,
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+}));
+
+// The cursor has not been exhausted.
+assert(runningReadResult.hasOwnProperty("cursor"), tojson(runningReadResult));
+assert.neq(0, runningReadResult.cursor.id, tojson(runningReadResult));
+
+txnNumber++;
+// Perform a second snapshot read under a new transaction.
+let newReadResult = assert.commandWorked(sessionDb.runCommand({
+ find: collName,
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+}));
+
+// The cursor has been exhausted.
+assert(newReadResult.hasOwnProperty("cursor"), tojson(newReadResult));
+assert.eq(0, newReadResult.cursor.id, tojson(newReadResult));
+// commitTransaction can only be called on the admin database.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ writeConcern: {w: "majority"},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+}));
+
+session.endSession();
}());
diff --git a/jstests/core/txns/multi_statement_transaction_command_args.js b/jstests/core/txns/multi_statement_transaction_command_args.js
index f0e4ce29759..ef176a4a28b 100644
--- a/jstests/core/txns/multi_statement_transaction_command_args.js
+++ b/jstests/core/txns/multi_statement_transaction_command_args.js
@@ -5,310 +5,308 @@
*/
(function() {
- "use strict";
- load('jstests/libs/uuid_util.js');
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-
- // Makes assertions on commands run without logical session ids.
- TestData.disableImplicitSessions = true;
-
- const dbName = "test";
- const collName = "multi_statement_transaction_command_args";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
- let txnNumber = 0;
-
- // Set up the test collection.
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
-
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
-
- // Initiate the session.
- const sessionOptions = {causalConsistency: false};
- let session = db.getMongo().startSession(sessionOptions);
- let sessionDb = session.getDatabase(dbName);
-
- /***********************************************************************************************
- * Verify that fields are not accepted unless their preconditional fields are present in
- * this hierarchy: lsid -> txnNumber -> autocommit -> startTransaction
- * Omitted fields are commented out explicitly.
- **********************************************************************************************/
-
- // lsid -> txnNumber.
- // Running a command through 'sessionDb' implicitly attaches an 'lsid' to commands,
- // so 'testDB' is used instead.
- jsTestLog("Try to begin a transaction with txnNumber but no lsid");
- txnNumber++;
- let res = assert.commandFailedWithCode(testDB.runCommand({
- find: collName,
- filter: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- // autocommit: false,
- // startTransaction: true
- }),
- ErrorCodes.InvalidOptions);
- assert(res.errmsg.includes("Transaction number requires a session ID"));
-
- // txnNumber -> autocommit
- jsTestLog("Try to begin a transaction with autocommit but no txnNumber");
- txnNumber++;
- res = assert.commandFailedWithCode(sessionDb.runCommand({
- find: collName,
- filter: {},
- readConcern: {level: "snapshot"},
- // txnNumber: NumberLong(txnNumber),
- autocommit: false,
- // startTransaction: true
- }),
- ErrorCodes.InvalidOptions);
- assert(res.errmsg.includes("'autocommit' field requires a transaction number"));
-
- // autocommit -> startTransaction
- jsTestLog("Try to begin a transaction with startTransaction but no autocommit");
- txnNumber++;
- res = assert.commandFailedWithCode(sessionDb.runCommand({
- find: collName,
- filter: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- // autocommit: false,
- startTransaction: true
- }),
+"use strict";
+load('jstests/libs/uuid_util.js');
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+
+// Makes assertions on commands run without logical session ids.
+TestData.disableImplicitSessions = true;
+
+const dbName = "test";
+const collName = "multi_statement_transaction_command_args";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
+let txnNumber = 0;
+
+// Set up the test collection.
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+
+// Initiate the session.
+const sessionOptions = {
+ causalConsistency: false
+};
+let session = db.getMongo().startSession(sessionOptions);
+let sessionDb = session.getDatabase(dbName);
+
+/***********************************************************************************************
+ * Verify that fields are not accepted unless their preconditional fields are present in
+ * this hierarchy: lsid -> txnNumber -> autocommit -> startTransaction
+ * Omitted fields are commented out explicitly.
+ **********************************************************************************************/
+
+// lsid -> txnNumber.
+// Running a command through 'sessionDb' implicitly attaches an 'lsid' to commands,
+// so 'testDB' is used instead.
+jsTestLog("Try to begin a transaction with txnNumber but no lsid");
+txnNumber++;
+let res = assert.commandFailedWithCode(testDB.runCommand({
+ find: collName,
+ filter: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ // autocommit: false,
+ // startTransaction: true
+}),
ErrorCodes.InvalidOptions);
- assert(res.errmsg.includes("'startTransaction' field requires 'autocommit' field"));
-
- /***********************************************************************************************
- * Verify that the 'startTransaction' argument works correctly.
- **********************************************************************************************/
-
- jsTestLog("Begin a transaction with startTransaction=true and autocommit=false");
- txnNumber++;
-
- // Start the transaction.
- assert.commandWorked(sessionDb.runCommand({
- find: collName,
- filter: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- }));
-
- // Commit the transaction.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
-
- jsTestLog("Try to start an already in progress transaction.");
- txnNumber++;
-
- // Start the transaction.
- assert.commandWorked(sessionDb.runCommand({
- find: collName,
- filter: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- }));
-
- // Try to start the transaction again.
- assert.commandFailedWithCode(sessionDb.runCommand({
- find: collName,
- filter: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- }),
- ErrorCodes.ConflictingOperationInProgress);
-
- // Commit the transaction.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
-
- jsTestLog(
- "Try to begin a transaction by omitting 'startTransaction' and setting autocommit=false");
- txnNumber++;
- assert.commandFailedWithCode(sessionDb.runCommand({
- find: collName,
- filter: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }),
- [ErrorCodes.InvalidOptions, ErrorCodes.NoSuchTransaction]);
-
- jsTestLog("Try to begin a transaction with startTransaction=false and autocommit=false");
- txnNumber++;
- assert.commandFailedWithCode(sessionDb.runCommand({
- find: collName,
- filter: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: false,
- autocommit: false
- }),
- ErrorCodes.InvalidOptions);
-
- /***********************************************************************************************
- * Setting autocommit=true or omitting autocommit on a transaction operation fails.
- **********************************************************************************************/
-
- jsTestLog("Run an initial transaction operation with autocommit=true");
- txnNumber++;
-
- assert.commandFailedWithCode(sessionDb.runCommand({
- find: collName,
- filter: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: true
- }),
- ErrorCodes.InvalidOptions);
-
- // Mongos has special handling for commitTransaction to support commit recovery.
- if (!FixtureHelpers.isMongos(sessionDb)) {
- // Committing the transaction should fail.
- assert.commandFailedWithCode(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.NoSuchTransaction);
- }
-
- jsTestLog("Run a non-initial transaction operation with autocommit=true");
- txnNumber++;
-
- // Start the transaction with an insert.
- assert.commandWorked(sessionDb.runCommand({
- find: collName,
- filter: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- }));
-
- // Try to execute a transaction operation with autocommit=true. It should fail without affecting
- // the transaction.
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: txnNumber + "_1"}],
- txnNumber: NumberLong(txnNumber),
- autocommit: true
- }),
- ErrorCodes.InvalidOptions);
-
- // Try to execute a transaction operation without an autocommit field. It should fail without
- // affecting the transaction.
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: txnNumber + "_2"}],
- txnNumber: NumberLong(txnNumber),
- }),
- ErrorCodes.IncompleteTransactionHistory);
-
- // Committing the transaction should succeed.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
-
- /***********************************************************************************************
- * Invalid to include autocommit field on an operation not inside a transaction.
- **********************************************************************************************/
-
- jsTestLog("Run an operation with autocommit=false outside of a transaction.");
- txnNumber++;
-
- assert.commandWorked(sessionDb.runCommand({find: collName, filter: {}}));
-
- assert.commandFailedWithCode(
- sessionDb.runCommand(
- {find: collName, filter: {}, txnNumber: NumberLong(txnNumber), autocommit: false}),
- ErrorCodes.NoSuchTransaction);
-
- /***********************************************************************************************
- * The 'autocommit' field must be specified on commit/abort commands.
- **********************************************************************************************/
-
- jsTestLog("Run a commitTransaction command with valid and invalid 'autocommit' field values.");
- txnNumber++;
-
- // Start the transaction with an insert.
- assert.commandWorked(sessionDb.runCommand({
- find: collName,
- filter: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- }));
-
- // Committing the transaction should fail if 'autocommit' is omitted.
+assert(res.errmsg.includes("Transaction number requires a session ID"));
+
+// txnNumber -> autocommit
+jsTestLog("Try to begin a transaction with autocommit but no txnNumber");
+txnNumber++;
+res = assert.commandFailedWithCode(sessionDb.runCommand({
+ find: collName,
+ filter: {},
+ readConcern: {level: "snapshot"},
+ // txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ // startTransaction: true
+}),
+ ErrorCodes.InvalidOptions);
+assert(res.errmsg.includes("'autocommit' field requires a transaction number"));
+
+// autocommit -> startTransaction
+jsTestLog("Try to begin a transaction with startTransaction but no autocommit");
+txnNumber++;
+res = assert.commandFailedWithCode(sessionDb.runCommand({
+ find: collName,
+ filter: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ // autocommit: false,
+ startTransaction: true
+}),
+ ErrorCodes.InvalidOptions);
+assert(res.errmsg.includes("'startTransaction' field requires 'autocommit' field"));
+
+/***********************************************************************************************
+ * Verify that the 'startTransaction' argument works correctly.
+ **********************************************************************************************/
+
+jsTestLog("Begin a transaction with startTransaction=true and autocommit=false");
+txnNumber++;
+
+// Start the transaction.
+assert.commandWorked(sessionDb.runCommand({
+ find: collName,
+ filter: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+}));
+
+// Commit the transaction.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
+
+jsTestLog("Try to start an already in progress transaction.");
+txnNumber++;
+
+// Start the transaction.
+assert.commandWorked(sessionDb.runCommand({
+ find: collName,
+ filter: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+}));
+
+// Try to start the transaction again.
+assert.commandFailedWithCode(sessionDb.runCommand({
+ find: collName,
+ filter: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+}),
+ ErrorCodes.ConflictingOperationInProgress);
+
+// Commit the transaction.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
+
+jsTestLog("Try to begin a transaction by omitting 'startTransaction' and setting autocommit=false");
+txnNumber++;
+assert.commandFailedWithCode(sessionDb.runCommand({
+ find: collName,
+ filter: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+}),
+ [ErrorCodes.InvalidOptions, ErrorCodes.NoSuchTransaction]);
+
+jsTestLog("Try to begin a transaction with startTransaction=false and autocommit=false");
+txnNumber++;
+assert.commandFailedWithCode(sessionDb.runCommand({
+ find: collName,
+ filter: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: false,
+ autocommit: false
+}),
+ ErrorCodes.InvalidOptions);
+
+/***********************************************************************************************
+ * Setting autocommit=true or omitting autocommit on a transaction operation fails.
+ **********************************************************************************************/
+
+jsTestLog("Run an initial transaction operation with autocommit=true");
+txnNumber++;
+
+assert.commandFailedWithCode(sessionDb.runCommand({
+ find: collName,
+ filter: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: true
+}),
+ ErrorCodes.InvalidOptions);
+
+// Mongos has special handling for commitTransaction to support commit recovery.
+if (!FixtureHelpers.isMongos(sessionDb)) {
+ // Committing the transaction should fail.
assert.commandFailedWithCode(sessionDb.adminCommand({
commitTransaction: 1,
txnNumber: NumberLong(txnNumber),
- writeConcern: {w: "majority"}
- }),
- 50768);
-
- // Committing the transaction should fail if autocommit=true.
- assert.commandFailedWithCode(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- writeConcern: {w: "majority"},
- autocommit: true
- }),
- ErrorCodes.InvalidOptions);
-
- // Committing the transaction should succeed.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
autocommit: false,
writeConcern: {w: "majority"}
- }));
-
- jsTestLog("Run an abortTransaction command with and without an 'autocommit' field");
- txnNumber++;
-
- // Start the transaction with an insert.
- assert.commandWorked(sessionDb.runCommand({
- find: collName,
- filter: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false,
- }));
-
- // Aborting the transaction should fail if 'autocommit' is omitted.
- assert.commandFailedWithCode(
- sessionDb.adminCommand({abortTransaction: 1, txnNumber: NumberLong(txnNumber)}), 50768);
-
- // Aborting the transaction should fail if autocommit=true.
- assert.commandFailedWithCode(
- sessionDb.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: true}),
- ErrorCodes.InvalidOptions);
-
- // Aborting the transaction should succeed.
- assert.commandWorked(sessionDb.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+ }),
+ ErrorCodes.NoSuchTransaction);
+}
+
+jsTestLog("Run a non-initial transaction operation with autocommit=true");
+txnNumber++;
+
+// Start the transaction with an insert.
+assert.commandWorked(sessionDb.runCommand({
+ find: collName,
+ filter: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+}));
+
+// Try to execute a transaction operation with autocommit=true. It should fail without affecting
+// the transaction.
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: txnNumber + "_1"}],
+ txnNumber: NumberLong(txnNumber),
+ autocommit: true
+}),
+ ErrorCodes.InvalidOptions);
+
+// Try to execute a transaction operation without an autocommit field. It should fail without
+// affecting the transaction.
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: txnNumber + "_2"}],
+ txnNumber: NumberLong(txnNumber),
+}),
+ ErrorCodes.IncompleteTransactionHistory);
+
+// Committing the transaction should succeed.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
+
+/***********************************************************************************************
+ * Invalid to include autocommit field on an operation not inside a transaction.
+ **********************************************************************************************/
+
+jsTestLog("Run an operation with autocommit=false outside of a transaction.");
+txnNumber++;
+
+assert.commandWorked(sessionDb.runCommand({find: collName, filter: {}}));
+
+assert.commandFailedWithCode(
+ sessionDb.runCommand(
+ {find: collName, filter: {}, txnNumber: NumberLong(txnNumber), autocommit: false}),
+ ErrorCodes.NoSuchTransaction);
+
+/***********************************************************************************************
+ * The 'autocommit' field must be specified on commit/abort commands.
+ **********************************************************************************************/
+
+jsTestLog("Run a commitTransaction command with valid and invalid 'autocommit' field values.");
+txnNumber++;
+
+// Start the transaction with an insert.
+assert.commandWorked(sessionDb.runCommand({
+ find: collName,
+ filter: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+}));
+
+// Committing the transaction should fail if 'autocommit' is omitted.
+assert.commandFailedWithCode(
+ sessionDb.adminCommand(
+ {commitTransaction: 1, txnNumber: NumberLong(txnNumber), writeConcern: {w: "majority"}}),
+ 50768);
+
+// Committing the transaction should fail if autocommit=true.
+assert.commandFailedWithCode(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ writeConcern: {w: "majority"},
+ autocommit: true
+}),
+ ErrorCodes.InvalidOptions);
+
+// Committing the transaction should succeed.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
+
+jsTestLog("Run an abortTransaction command with and without an 'autocommit' field");
+txnNumber++;
+
+// Start the transaction with an insert.
+assert.commandWorked(sessionDb.runCommand({
+ find: collName,
+ filter: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false,
+}));
+
+// Aborting the transaction should fail if 'autocommit' is omitted.
+assert.commandFailedWithCode(
+ sessionDb.adminCommand({abortTransaction: 1, txnNumber: NumberLong(txnNumber)}), 50768);
+
+// Aborting the transaction should fail if autocommit=true.
+assert.commandFailedWithCode(
+ sessionDb.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: true}),
+ ErrorCodes.InvalidOptions);
+
+// Aborting the transaction should succeed.
+assert.commandWorked(sessionDb.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
}());
diff --git a/jstests/core/txns/multi_statement_transaction_using_api.js b/jstests/core/txns/multi_statement_transaction_using_api.js
index d9b440c355c..910fa45c68b 100644
--- a/jstests/core/txns/multi_statement_transaction_using_api.js
+++ b/jstests/core/txns/multi_statement_transaction_using_api.js
@@ -1,114 +1,116 @@
// Test basic transaction write ops, reads, and commit/abort using the shell helper.
// @tags: [uses_transactions, uses_snapshot_read_concern]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "multi_transaction_test_using_api";
- const testDB = db.getSiblingDB(dbName);
+const dbName = "test";
+const collName = "multi_transaction_test_using_api";
+const testDB = db.getSiblingDB(dbName);
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb.getCollection(collName);
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb.getCollection(collName);
- //
- // Test that calling abortTransaction as the first statement in a transaction is allowed and
- // modifies the state accordingly.
- //
- jsTestLog("Call abortTransaction as the first statement in a transaction");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+//
+// Test that calling abortTransaction as the first statement in a transaction is allowed and
+// modifies the state accordingly.
+//
+jsTestLog("Call abortTransaction as the first statement in a transaction");
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- // Successfully call abortTransaction.
- assert.commandWorked(session.abortTransaction_forTesting());
+// Successfully call abortTransaction.
+assert.commandWorked(session.abortTransaction_forTesting());
- //
- // Test that calling commitTransaction as the first statement in a transaction is allowed and
- // modifies the state accordingly.
- //
- jsTestLog("Call commitTransaction as the first statement in a transaction");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+//
+// Test that calling commitTransaction as the first statement in a transaction is allowed and
+// modifies the state accordingly.
+//
+jsTestLog("Call commitTransaction as the first statement in a transaction");
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- // Successfully call commitTransaction.
- assert.commandWorked(session.commitTransaction_forTesting());
+// Successfully call commitTransaction.
+assert.commandWorked(session.commitTransaction_forTesting());
- jsTestLog("Run CRUD ops, read ops, and commit transaction.");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+jsTestLog("Run CRUD ops, read ops, and commit transaction.");
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- // Performing a read first should work when snapshot readConcern is specified.
- assert.docEq(null, sessionColl.findOne({_id: "insert-1"}));
+// Performing a read first should work when snapshot readConcern is specified.
+assert.docEq(null, sessionColl.findOne({_id: "insert-1"}));
- assert.commandWorked(sessionColl.insert({_id: "insert-1", a: 0}));
+assert.commandWorked(sessionColl.insert({_id: "insert-1", a: 0}));
- assert.commandWorked(sessionColl.insert({_id: "insert-2", a: 0}));
+assert.commandWorked(sessionColl.insert({_id: "insert-2", a: 0}));
- assert.commandWorked(sessionColl.insert({_id: "insert-3", a: 0}));
+assert.commandWorked(sessionColl.insert({_id: "insert-3", a: 0}));
- assert.commandWorked(sessionColl.update({_id: "insert-1"}, {$inc: {a: 1}}));
+assert.commandWorked(sessionColl.update({_id: "insert-1"}, {$inc: {a: 1}}));
- assert.commandWorked(sessionColl.deleteOne({_id: "insert-2"}));
+assert.commandWorked(sessionColl.deleteOne({_id: "insert-2"}));
- sessionColl.findAndModify({query: {_id: "insert-3"}, update: {$set: {a: 2}}});
+sessionColl.findAndModify({query: {_id: "insert-3"}, update: {$set: {a: 2}}});
- // Try to find a document within a transaction.
- let cursor = sessionColl.find({_id: "insert-1"});
- assert.docEq({_id: "insert-1", a: 1}, cursor.next());
- assert(!cursor.hasNext());
+// Try to find a document within a transaction.
+let cursor = sessionColl.find({_id: "insert-1"});
+assert.docEq({_id: "insert-1", a: 1}, cursor.next());
+assert(!cursor.hasNext());
- // Try to find a document using findOne within a transaction
- assert.eq({_id: "insert-1", a: 1}, sessionColl.findOne({_id: "insert-1"}));
+// Try to find a document using findOne within a transaction
+assert.eq({_id: "insert-1", a: 1}, sessionColl.findOne({_id: "insert-1"}));
- // Find a document with the aggregation shell helper within a transaction.
- cursor = sessionColl.aggregate({$match: {_id: "insert-1"}});
- assert.docEq({_id: "insert-1", a: 1}, cursor.next());
- assert(!cursor.hasNext());
+// Find a document with the aggregation shell helper within a transaction.
+cursor = sessionColl.aggregate({$match: {_id: "insert-1"}});
+assert.docEq({_id: "insert-1", a: 1}, cursor.next());
+assert(!cursor.hasNext());
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- // Make sure the correct documents exist after committing the transaciton.
- assert.eq({_id: "insert-1", a: 1}, sessionColl.findOne({_id: "insert-1"}));
- assert.eq({_id: "insert-3", a: 2}, sessionColl.findOne({_id: "insert-3"}));
- assert.eq(null, sessionColl.findOne({_id: "insert-2"}));
+// Make sure the correct documents exist after committing the transaciton.
+assert.eq({_id: "insert-1", a: 1}, sessionColl.findOne({_id: "insert-1"}));
+assert.eq({_id: "insert-3", a: 2}, sessionColl.findOne({_id: "insert-3"}));
+assert.eq(null, sessionColl.findOne({_id: "insert-2"}));
- jsTestLog("Insert a doc and abort transaction.");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+jsTestLog("Insert a doc and abort transaction.");
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- assert.commandWorked(sessionColl.insert({_id: "insert-4", a: 0}));
+assert.commandWorked(sessionColl.insert({_id: "insert-4", a: 0}));
- assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(session.abortTransaction_forTesting());
- // Verify that we cannot see the document we tried to insert.
- assert.eq(null, sessionColl.findOne({_id: "insert-4"}));
+// Verify that we cannot see the document we tried to insert.
+assert.eq(null, sessionColl.findOne({_id: "insert-4"}));
- jsTestLog("Bulk insert and update operations within transaction.");
+jsTestLog("Bulk insert and update operations within transaction.");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- let bulk = sessionColl.initializeUnorderedBulkOp();
- bulk.insert({_id: "bulk-1"});
- bulk.insert({_id: "bulk-2"});
- bulk.find({_id: "bulk-1"}).updateOne({$set: {status: "bulk"}});
- bulk.find({_id: "bulk-2"}).updateOne({$set: {status: "bulk"}});
- assert.commandWorked(bulk.execute());
- assert.commandWorked(session.commitTransaction_forTesting());
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+let bulk = sessionColl.initializeUnorderedBulkOp();
+bulk.insert({_id: "bulk-1"});
+bulk.insert({_id: "bulk-2"});
+bulk.find({_id: "bulk-1"}).updateOne({$set: {status: "bulk"}});
+bulk.find({_id: "bulk-2"}).updateOne({$set: {status: "bulk"}});
+assert.commandWorked(bulk.execute());
+assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq({_id: "bulk-1", status: "bulk"}, sessionColl.findOne({_id: "bulk-1"}));
- assert.eq({_id: "bulk-2", status: "bulk"}, sessionColl.findOne({_id: "bulk-2"}));
+assert.eq({_id: "bulk-1", status: "bulk"}, sessionColl.findOne({_id: "bulk-1"}));
+assert.eq({_id: "bulk-2", status: "bulk"}, sessionColl.findOne({_id: "bulk-2"}));
- jsTestLog("Bulk delete operations within transaction.");
+jsTestLog("Bulk delete operations within transaction.");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- bulk = sessionColl.initializeUnorderedBulkOp();
- bulk.find({_id: "bulk-1"}).removeOne();
- bulk.find({_id: "bulk-2"}).removeOne();
- assert.commandWorked(bulk.execute());
- assert.commandWorked(session.commitTransaction_forTesting());
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+bulk = sessionColl.initializeUnorderedBulkOp();
+bulk.find({_id: "bulk-1"}).removeOne();
+bulk.find({_id: "bulk-2"}).removeOne();
+assert.commandWorked(bulk.execute());
+assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(null, sessionColl.findOne({_id: "bulk-1"}));
- assert.eq(null, sessionColl.findOne({_id: "bulk-2"}));
+assert.eq(null, sessionColl.findOne({_id: "bulk-1"}));
+assert.eq(null, sessionColl.findOne({_id: "bulk-2"}));
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/multi_statement_transaction_write_error.js b/jstests/core/txns/multi_statement_transaction_write_error.js
index c8828a2d735..bea6fda153d 100644
--- a/jstests/core/txns/multi_statement_transaction_write_error.js
+++ b/jstests/core/txns/multi_statement_transaction_write_error.js
@@ -4,187 +4,187 @@
* @tags: [requires_capped, uses_transactions]
*/
(function() {
- "use strict";
-
- const dbName = "test";
- const testDB = db.getSiblingDB(dbName);
- const testCollName = "transactions_write_errors";
- const cappedCollName = "capped_transactions_write_errors";
- const testColl = testDB[testCollName];
- const cappedColl = testDB[cappedCollName];
-
- testDB.runCommand({drop: testCollName, writeConcern: {w: "majority"}});
- testDB.runCommand({drop: cappedCollName, writeConcern: {w: "majority"}});
-
- assert.commandWorked(testDB.createCollection(testColl.getName()));
- assert.commandWorked(testDB.createCollection(cappedCollName, {capped: true, size: 1000}));
-
- // Assert that "cmd" fails with error "code" after "nExpected" operations, or fail with "msg"
- function runInTxn({cmd, msg, code, nExpected, expectedErrorIndex}) {
- const session = db.getMongo().startSession();
- session.startTransaction();
+"use strict";
+
+const dbName = "test";
+const testDB = db.getSiblingDB(dbName);
+const testCollName = "transactions_write_errors";
+const cappedCollName = "capped_transactions_write_errors";
+const testColl = testDB[testCollName];
+const cappedColl = testDB[cappedCollName];
+
+testDB.runCommand({drop: testCollName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: cappedCollName, writeConcern: {w: "majority"}});
+
+assert.commandWorked(testDB.createCollection(testColl.getName()));
+assert.commandWorked(testDB.createCollection(cappedCollName, {capped: true, size: 1000}));
+
+// Assert that "cmd" fails with error "code" after "nExpected" operations, or fail with "msg"
+function runInTxn({cmd, msg, code, nExpected, expectedErrorIndex}) {
+ const session = db.getMongo().startSession();
+ session.startTransaction();
+ try {
+ var res = session.getDatabase(dbName).runCommand(cmd);
try {
- var res = session.getDatabase(dbName).runCommand(cmd);
- try {
- // Writes reply with ok: 1 and a writeErrors array
- assert.eq(res.ok, 1, "reply.ok : " + msg);
- assert.eq(res.n, nExpected, "reply.n : " + msg);
- // The first and only error comes after nExpected successful writes in the batch
- assert.eq(res.writeErrors.length, 1, "number of write errors : " + msg);
- assert.eq(res.writeErrors[0].index, expectedErrorIndex, "error index : " + msg);
- assert.eq(res.writeErrors[0].code, code, "error code : " + msg);
- assert(!res.hasOwnProperty("errorLabels"), msg);
- } catch (e) {
- printjson(cmd);
- printjson(res);
- throw e;
- }
- } finally {
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+ // Writes reply with ok: 1 and a writeErrors array
+ assert.eq(res.ok, 1, "reply.ok : " + msg);
+ assert.eq(res.n, nExpected, "reply.n : " + msg);
+ // The first and only error comes after nExpected successful writes in the batch
+ assert.eq(res.writeErrors.length, 1, "number of write errors : " + msg);
+ assert.eq(res.writeErrors[0].index, expectedErrorIndex, "error index : " + msg);
+ assert.eq(res.writeErrors[0].code, code, "error code : " + msg);
+ assert(!res.hasOwnProperty("errorLabels"), msg);
+ } catch (e) {
+ printjson(cmd);
+ printjson(res);
+ throw e;
}
+ } finally {
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
}
+}
+
+// Run "cmdName" against each collection in "collNames", with combos of "goodOp" and "badOp" in
+// a batch, it should fail with "code".
+function exerciseWriteInTxn({collNames, cmdName, goodOp, badOp, code}) {
+ for (let collName of collNames) {
+ for (let ordered of [true, false]) {
+ let docsField;
+ switch (cmdName) {
+ case "insert":
+ docsField = "documents";
+ break;
+ case "update":
+ docsField = "updates";
+ break;
+ case "delete":
+ docsField = "deletes";
+ break;
+ }
- // Run "cmdName" against each collection in "collNames", with combos of "goodOp" and "badOp" in
- // a batch, it should fail with "code".
- function exerciseWriteInTxn({collNames, cmdName, goodOp, badOp, code}) {
- for (let collName of collNames) {
- for (let ordered of[true, false]) {
- let docsField;
- switch (cmdName) {
- case "insert":
- docsField = "documents";
- break;
- case "update":
- docsField = "updates";
- break;
- case "delete":
- docsField = "deletes";
- break;
+ // Construct command like {insert: collectionName, documents: [...]}
+ let newCmd = () => {
+ var cmd = {};
+ cmd[cmdName] = collName;
+ if (!ordered) {
+ cmd.ordered = false;
}
- // Construct command like {insert: collectionName, documents: [...]}
- let newCmd = () => {
- var cmd = {};
- cmd[cmdName] = collName;
- if (!ordered) {
- cmd.ordered = false;
- }
-
- return cmd;
- };
-
- var cmd = newCmd();
- cmd[docsField] = [badOp];
- runInTxn({
- cmd: cmd,
- msg: `one bad ${cmdName} on ${collName} collection, ordered ${ordered}`,
- code: code,
- nExpected: 0,
- expectedErrorIndex: 0
- });
-
- cmd = newCmd();
- cmd[docsField] = [goodOp, badOp];
- let expected = 1;
- if (cmdName == 'delete' && db.getMongo().isMongos()) {
- // The bad delete write will cause mongos to fail during targetting and not
- // do any write at all.
- expected = 0;
- }
- runInTxn({
- cmd: cmd,
- msg:
- `one bad ${cmdName} after a good one on ${collName} collection, ordered ${ordered}`,
- code: code,
- nExpected: expected,
- expectedErrorIndex: 1
- });
-
- cmd = newCmd();
- cmd[docsField] = [goodOp, goodOp, badOp];
- expected = 2;
- if (cmdName == 'delete' && db.getMongo().isMongos()) {
- // The bad delete write will cause mongos to fail during targetting and not
- // do any write at all.
- expected = 0;
- }
- runInTxn({
- cmd: cmd,
- msg:
- `one bad ${cmdName} after two good ones on ${collName} collection, ordered ${ordered}`,
- code: code,
- nExpected: expected,
- expectedErrorIndex: 2
- });
-
- cmd = newCmd();
- cmd[docsField] = [goodOp, goodOp, badOp, badOp];
- expected = 2;
- if (cmdName == 'delete' && db.getMongo().isMongos()) {
- // The bad delete write will cause mongos to fail during targetting and not
- // do any write at all.
- expected = 0;
- }
- runInTxn({
- cmd: cmd,
- msg:
- `two bad ${cmdName}s after two good ones on ${collName} collection, ordered ${ordered}`,
- code: code,
- nExpected: expected,
- expectedErrorIndex: 2
- });
-
- cmd = newCmd();
- cmd[docsField] = [badOp, goodOp];
- runInTxn({
- cmd: cmd,
- msg:
- `good ${cmdName} after a bad one on ${collName} collection, ordered ${ordered}`,
- code: code,
- nExpected: 0,
- expectedErrorIndex: 0
- });
+ return cmd;
+ };
+
+ var cmd = newCmd();
+ cmd[docsField] = [badOp];
+ runInTxn({
+ cmd: cmd,
+ msg: `one bad ${cmdName} on ${collName} collection, ordered ${ordered}`,
+ code: code,
+ nExpected: 0,
+ expectedErrorIndex: 0
+ });
+
+ cmd = newCmd();
+ cmd[docsField] = [goodOp, badOp];
+ let expected = 1;
+ if (cmdName == 'delete' && db.getMongo().isMongos()) {
+ // The bad delete write will cause mongos to fail during targetting and not
+ // do any write at all.
+ expected = 0;
}
+ runInTxn({
+ cmd: cmd,
+ msg: `one bad ${cmdName} after a good one on ${collName} collection, ordered ${
+ ordered}`,
+ code: code,
+ nExpected: expected,
+ expectedErrorIndex: 1
+ });
+
+ cmd = newCmd();
+ cmd[docsField] = [goodOp, goodOp, badOp];
+ expected = 2;
+ if (cmdName == 'delete' && db.getMongo().isMongos()) {
+ // The bad delete write will cause mongos to fail during targetting and not
+ // do any write at all.
+ expected = 0;
+ }
+ runInTxn({
+ cmd: cmd,
+ msg: `one bad ${cmdName} after two good ones on ${collName} collection, ordered ${
+ ordered}`,
+ code: code,
+ nExpected: expected,
+ expectedErrorIndex: 2
+ });
+
+ cmd = newCmd();
+ cmd[docsField] = [goodOp, goodOp, badOp, badOp];
+ expected = 2;
+ if (cmdName == 'delete' && db.getMongo().isMongos()) {
+ // The bad delete write will cause mongos to fail during targetting and not
+ // do any write at all.
+ expected = 0;
+ }
+ runInTxn({
+ cmd: cmd,
+ msg: `two bad ${cmdName}s after two good ones on ${collName} collection, ordered ${
+ ordered}`,
+ code: code,
+ nExpected: expected,
+ expectedErrorIndex: 2
+ });
+
+ cmd = newCmd();
+ cmd[docsField] = [badOp, goodOp];
+ runInTxn({
+ cmd: cmd,
+ msg:
+ `good ${cmdName} after a bad one on ${collName} collection, ordered ${ordered}`,
+ code: code,
+ nExpected: 0,
+ expectedErrorIndex: 0
+ });
}
}
-
- // Set up a document so we can get a DuplicateKey error trying to insert it again.
- assert.commandWorked(testColl.insert({_id: 5}));
- exerciseWriteInTxn({
- collNames: [testCollName],
- cmdName: "insert",
- goodOp: {},
- badOp: {_id: 5},
- code: ErrorCodes.DuplicateKey
- });
-
- // Set up a document with a string field so we can update it but fail to increment it.
- assert.commandWorked(testColl.insertOne({_id: 0, x: "string"}));
- exerciseWriteInTxn({
- collNames: [testCollName],
- cmdName: "update",
- goodOp: {q: {_id: 0}, u: {$set: {x: "STRING"}}},
- badOp: {q: {_id: 0}, u: {$inc: {x: 1}}},
- code: ErrorCodes.TypeMismatch
- });
-
- // Give the good delete operation some documents to delete
- assert.commandWorked(testColl.insertMany([{}, {}, {}, {}]));
- exerciseWriteInTxn({
- collNames: [testCollName],
- cmdName: "delete",
- goodOp: {q: {}, limit: 1},
- badOp: {q: {$foo: 1}, limit: 1},
- code: ErrorCodes.BadValue
- });
-
- // Capped deletes are prohibited
- runInTxn({
- cmd: {delete: cappedCollName, deletes: [{q: {}, limit: 1}]},
- msg: `delete from ${cappedCollName}`,
- code: ErrorCodes.IllegalOperation,
- nExpected: 0,
- expectedErrorIndex: 0
- });
+}
+
+// Set up a document so we can get a DuplicateKey error trying to insert it again.
+assert.commandWorked(testColl.insert({_id: 5}));
+exerciseWriteInTxn({
+ collNames: [testCollName],
+ cmdName: "insert",
+ goodOp: {},
+ badOp: {_id: 5},
+ code: ErrorCodes.DuplicateKey
+});
+
+// Set up a document with a string field so we can update it but fail to increment it.
+assert.commandWorked(testColl.insertOne({_id: 0, x: "string"}));
+exerciseWriteInTxn({
+ collNames: [testCollName],
+ cmdName: "update",
+ goodOp: {q: {_id: 0}, u: {$set: {x: "STRING"}}},
+ badOp: {q: {_id: 0}, u: {$inc: {x: 1}}},
+ code: ErrorCodes.TypeMismatch
+});
+
+// Give the good delete operation some documents to delete
+assert.commandWorked(testColl.insertMany([{}, {}, {}, {}]));
+exerciseWriteInTxn({
+ collNames: [testCollName],
+ cmdName: "delete",
+ goodOp: {q: {}, limit: 1},
+ badOp: {q: {$foo: 1}, limit: 1},
+ code: ErrorCodes.BadValue
+});
+
+// Capped deletes are prohibited
+runInTxn({
+ cmd: {delete: cappedCollName, deletes: [{q: {}, limit: 1}]},
+ msg: `delete from ${cappedCollName}`,
+ code: ErrorCodes.IllegalOperation,
+ nExpected: 0,
+ expectedErrorIndex: 0
+});
}());
diff --git a/jstests/core/txns/multi_update_in_transaction.js b/jstests/core/txns/multi_update_in_transaction.js
index 3b309194a73..c6d9f3e994c 100644
--- a/jstests/core/txns/multi_update_in_transaction.js
+++ b/jstests/core/txns/multi_update_in_transaction.js
@@ -1,91 +1,92 @@
// Test transactions including multi-updates.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "multi_update_in_transaction";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
-
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
-
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
-
- jsTest.log("Prepopulate the collection.");
- assert.writeOK(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}],
- {writeConcern: {w: "majority"}}));
-
- jsTest.log("Do an empty multi-update.");
- session.startTransaction({writeConcern: {w: "majority"}});
-
- // Update 0 docs.
- let res = sessionColl.update({a: 99}, {$set: {b: 1}}, {multi: true});
- assert.eq(0, res.nModified);
- res = sessionColl.find({});
- assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}]);
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- jsTest.log("Do a single-result multi-update.");
- session.startTransaction({writeConcern: {w: "majority"}});
-
- // Update 1 doc.
- res = sessionColl.update({a: 1}, {$set: {b: 1}}, {multi: true});
- assert.eq(1, res.nModified);
- res = sessionColl.find({});
- assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1, b: 1}]);
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- jsTest.log("Do a multiple-result multi-update.");
- session.startTransaction({writeConcern: {w: "majority"}});
-
- // Update 2 docs.
- res = sessionColl.update({a: 0}, {$set: {b: 2}}, {multi: true});
- assert.eq(2, res.nModified);
- res = sessionColl.find({});
- assert.sameMembers(res.toArray(),
- [{_id: 0, a: 0, b: 2}, {_id: 1, a: 0, b: 2}, {_id: 2, a: 1, b: 1}]);
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- jsTest.log("Do a multiple-query multi-update.");
- session.startTransaction({writeConcern: {w: "majority"}});
-
- // Bulk update 3 docs.
- let bulk = sessionColl.initializeUnorderedBulkOp();
- bulk.find({a: 0}).update({$set: {c: 1}});
- bulk.find({_id: 2}).update({$set: {c: 2}});
- res = assert.commandWorked(bulk.execute());
- assert.eq(3, res.nModified);
-
- res = sessionColl.find({});
- assert.sameMembers(
- res.toArray(),
- [{_id: 0, a: 0, b: 2, c: 1}, {_id: 1, a: 0, b: 2, c: 1}, {_id: 2, a: 1, b: 1, c: 2}]);
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- jsTest.log("Do a multi-update with upsert.");
- session.startTransaction({writeConcern: {w: "majority"}});
-
- // Upsert 1 doc.
- res = sessionColl.update({_id: 3}, {$set: {d: 1}}, {multi: true, upsert: true});
- assert.eq(1, res.nUpserted);
- res = sessionColl.find({});
- assert.sameMembers(res.toArray(), [
- {_id: 0, a: 0, b: 2, c: 1},
- {_id: 1, a: 0, b: 2, c: 1},
- {_id: 2, a: 1, b: 1, c: 2},
- {_id: 3, d: 1}
- ]);
-
- assert.commandWorked(session.commitTransaction_forTesting());
+"use strict";
+
+const dbName = "test";
+const collName = "multi_update_in_transaction";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+
+jsTest.log("Prepopulate the collection.");
+assert.writeOK(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}],
+ {writeConcern: {w: "majority"}}));
+
+jsTest.log("Do an empty multi-update.");
+session.startTransaction({writeConcern: {w: "majority"}});
+
+// Update 0 docs.
+let res = sessionColl.update({a: 99}, {$set: {b: 1}}, {multi: true});
+assert.eq(0, res.nModified);
+res = sessionColl.find({});
+assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}]);
+
+assert.commandWorked(session.commitTransaction_forTesting());
+
+jsTest.log("Do a single-result multi-update.");
+session.startTransaction({writeConcern: {w: "majority"}});
+
+// Update 1 doc.
+res = sessionColl.update({a: 1}, {$set: {b: 1}}, {multi: true});
+assert.eq(1, res.nModified);
+res = sessionColl.find({});
+assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1, b: 1}]);
+
+assert.commandWorked(session.commitTransaction_forTesting());
+
+jsTest.log("Do a multiple-result multi-update.");
+session.startTransaction({writeConcern: {w: "majority"}});
+
+// Update 2 docs.
+res = sessionColl.update({a: 0}, {$set: {b: 2}}, {multi: true});
+assert.eq(2, res.nModified);
+res = sessionColl.find({});
+assert.sameMembers(res.toArray(),
+ [{_id: 0, a: 0, b: 2}, {_id: 1, a: 0, b: 2}, {_id: 2, a: 1, b: 1}]);
+
+assert.commandWorked(session.commitTransaction_forTesting());
+
+jsTest.log("Do a multiple-query multi-update.");
+session.startTransaction({writeConcern: {w: "majority"}});
+
+// Bulk update 3 docs.
+let bulk = sessionColl.initializeUnorderedBulkOp();
+bulk.find({a: 0}).update({$set: {c: 1}});
+bulk.find({_id: 2}).update({$set: {c: 2}});
+res = assert.commandWorked(bulk.execute());
+assert.eq(3, res.nModified);
+
+res = sessionColl.find({});
+assert.sameMembers(
+ res.toArray(),
+ [{_id: 0, a: 0, b: 2, c: 1}, {_id: 1, a: 0, b: 2, c: 1}, {_id: 2, a: 1, b: 1, c: 2}]);
+
+assert.commandWorked(session.commitTransaction_forTesting());
+
+jsTest.log("Do a multi-update with upsert.");
+session.startTransaction({writeConcern: {w: "majority"}});
+
+// Upsert 1 doc.
+res = sessionColl.update({_id: 3}, {$set: {d: 1}}, {multi: true, upsert: true});
+assert.eq(1, res.nUpserted);
+res = sessionColl.find({});
+assert.sameMembers(res.toArray(), [
+ {_id: 0, a: 0, b: 2, c: 1},
+ {_id: 1, a: 0, b: 2, c: 1},
+ {_id: 2, a: 1, b: 1, c: 2},
+ {_id: 3, d: 1}
+]);
+
+assert.commandWorked(session.commitTransaction_forTesting());
}());
diff --git a/jstests/core/txns/no_implicit_collection_creation_in_txn.js b/jstests/core/txns/no_implicit_collection_creation_in_txn.js
index 40de017b421..42494d50958 100644
--- a/jstests/core/txns/no_implicit_collection_creation_in_txn.js
+++ b/jstests/core/txns/no_implicit_collection_creation_in_txn.js
@@ -2,107 +2,102 @@
// multi-document transaction.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "no_implicit_collection_creation_in_txn";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
-
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
-
- jsTest.log("Cannot implicitly create a collection in a transaction using insert.");
-
- // Insert succeeds when the collection exists.
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
-
- session.startTransaction({writeConcern: {w: "majority"}});
- sessionColl.insert({_id: "doc"});
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq({_id: "doc"}, testColl.findOne({_id: "doc"}));
-
- // Insert fails when the collection does not exist.
- assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}));
-
- session.startTransaction({writeConcern: {w: "majority"}});
- assert.commandFailedWithCode(sessionColl.insert({_id: "doc"}),
- ErrorCodes.OperationNotSupportedInTransaction);
-
- // Committing the transaction should fail, since it should never have been started.
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.eq(null, testColl.findOne({_id: "doc"}));
-
- jsTest.log("Cannot implicitly create a collection in a transaction using update.");
-
- // Update with upsert=true succeeds when the collection exists.
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
-
- session.startTransaction({writeConcern: {w: "majority"}});
- sessionColl.update({_id: "doc"}, {$set: {updated: true}}, {upsert: true});
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq({_id: "doc", updated: true}, testColl.findOne({_id: "doc"}));
-
- // Update with upsert=true fails when the collection does not exist.
- assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}));
-
- session.startTransaction({writeConcern: {w: "majority"}});
- assert.commandFailedWithCode(
- sessionColl.update({_id: "doc"}, {$set: {updated: true}}, {upsert: true}),
- ErrorCodes.OperationNotSupportedInTransaction);
-
- // Committing the transaction should fail, since it should never have been started.
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.eq(null, testColl.findOne({_id: "doc"}));
-
- // Update with upsert=false succeeds when the collection does not exist.
- session.startTransaction({writeConcern: {w: "majority"}});
- assert.commandWorked(
- sessionColl.update({_id: "doc"}, {$set: {updated: true}}, {upsert: false}));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(null, testColl.findOne({_id: "doc"}));
-
- jsTest.log("Cannot implicitly create a collection in a transaction using findAndModify.");
-
- // findAndModify with upsert=true succeeds when the collection exists.
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
-
- session.startTransaction({writeConcern: {w: "majority"}});
- let res = sessionColl.findAndModify(
- {query: {_id: "doc"}, update: {$set: {updated: true}}, upsert: true});
- assert.eq(null, res);
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq({_id: "doc", updated: true}, testColl.findOne({_id: "doc"}));
-
- // findAndModify with upsert=true fails when the collection does not exist.
- assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}));
-
- session.startTransaction({writeConcern: {w: "majority"}});
- res = assert.throws(() => sessionColl.findAndModify(
- {query: {_id: "doc"}, update: {$set: {updated: true}}, upsert: true}));
- assert.commandFailedWithCode(res, ErrorCodes.OperationNotSupportedInTransaction);
-
- // Committing the transaction should fail, since it should never have been started.
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.eq(null, testColl.findOne({_id: "doc"}));
-
- // findAndModify with upsert=false succeeds when the collection does not exist.
- session.startTransaction({writeConcern: {w: "majority"}});
- res = sessionColl.findAndModify(
- {query: {_id: "doc"}, update: {$set: {updated: true}}, upsert: false});
- assert.eq(null, res);
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(null, testColl.findOne({_id: "doc"}));
-
- session.endSession();
+"use strict";
+
+const dbName = "test";
+const collName = "no_implicit_collection_creation_in_txn";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+
+jsTest.log("Cannot implicitly create a collection in a transaction using insert.");
+
+// Insert succeeds when the collection exists.
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+
+session.startTransaction({writeConcern: {w: "majority"}});
+sessionColl.insert({_id: "doc"});
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.eq({_id: "doc"}, testColl.findOne({_id: "doc"}));
+
+// Insert fails when the collection does not exist.
+assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}));
+
+session.startTransaction({writeConcern: {w: "majority"}});
+assert.commandFailedWithCode(sessionColl.insert({_id: "doc"}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+
+// Committing the transaction should fail, since it should never have been started.
+assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+assert.eq(null, testColl.findOne({_id: "doc"}));
+
+jsTest.log("Cannot implicitly create a collection in a transaction using update.");
+
+// Update with upsert=true succeeds when the collection exists.
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+
+session.startTransaction({writeConcern: {w: "majority"}});
+sessionColl.update({_id: "doc"}, {$set: {updated: true}}, {upsert: true});
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.eq({_id: "doc", updated: true}, testColl.findOne({_id: "doc"}));
+
+// Update with upsert=true fails when the collection does not exist.
+assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}));
+
+session.startTransaction({writeConcern: {w: "majority"}});
+assert.commandFailedWithCode(
+ sessionColl.update({_id: "doc"}, {$set: {updated: true}}, {upsert: true}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+
+// Committing the transaction should fail, since it should never have been started.
+assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+assert.eq(null, testColl.findOne({_id: "doc"}));
+
+// Update with upsert=false succeeds when the collection does not exist.
+session.startTransaction({writeConcern: {w: "majority"}});
+assert.commandWorked(sessionColl.update({_id: "doc"}, {$set: {updated: true}}, {upsert: false}));
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.eq(null, testColl.findOne({_id: "doc"}));
+
+jsTest.log("Cannot implicitly create a collection in a transaction using findAndModify.");
+
+// findAndModify with upsert=true succeeds when the collection exists.
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+
+session.startTransaction({writeConcern: {w: "majority"}});
+let res =
+ sessionColl.findAndModify({query: {_id: "doc"}, update: {$set: {updated: true}}, upsert: true});
+assert.eq(null, res);
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.eq({_id: "doc", updated: true}, testColl.findOne({_id: "doc"}));
+
+// findAndModify with upsert=true fails when the collection does not exist.
+assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}));
+
+session.startTransaction({writeConcern: {w: "majority"}});
+res = assert.throws(() => sessionColl.findAndModify(
+ {query: {_id: "doc"}, update: {$set: {updated: true}}, upsert: true}));
+assert.commandFailedWithCode(res, ErrorCodes.OperationNotSupportedInTransaction);
+
+// Committing the transaction should fail, since it should never have been started.
+assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+assert.eq(null, testColl.findOne({_id: "doc"}));
+
+// findAndModify with upsert=false succeeds when the collection does not exist.
+session.startTransaction({writeConcern: {w: "majority"}});
+res = sessionColl.findAndModify(
+ {query: {_id: "doc"}, update: {$set: {updated: true}}, upsert: false});
+assert.eq(null, res);
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.eq(null, testColl.findOne({_id: "doc"}));
+
+session.endSession();
}());
diff --git a/jstests/core/txns/no_new_transactions_when_prepared_transaction_in_progress.js b/jstests/core/txns/no_new_transactions_when_prepared_transaction_in_progress.js
index 2aa272a0d2b..ce41fb98620 100644
--- a/jstests/core/txns/no_new_transactions_when_prepared_transaction_in_progress.js
+++ b/jstests/core/txns/no_new_transactions_when_prepared_transaction_in_progress.js
@@ -5,51 +5,52 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const dbName = "test";
- const collName = "no_new_transactions_when_prepared_transaction_in_progress";
- const testDB = db.getSiblingDB(dbName);
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
-
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb.getCollection(collName);
-
- jsTestLog(
- "Test starting a new transaction while an existing prepared transaction exists on the " +
- "session.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
- PrepareHelpers.prepareTransaction(session);
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "cannot_start"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(1),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog(
- "Test error precedence when executing a malformed command during a prepared transaction.");
- // The following command specifies txnNumber: 2 without startTransaction: true.
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "no_such_txn"}],
- txnNumber: NumberLong(2),
- stmtId: NumberInt(0),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
- assert.commandWorked(session.abortTransaction_forTesting());
-
- session.endSession();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const dbName = "test";
+const collName = "no_new_transactions_when_prepared_transaction_in_progress";
+const testDB = db.getSiblingDB(dbName);
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb.getCollection(collName);
+
+jsTestLog("Test starting a new transaction while an existing prepared transaction exists on the " +
+ "session.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
+PrepareHelpers.prepareTransaction(session);
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "cannot_start"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(1),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog(
+ "Test error precedence when executing a malformed command during a prepared transaction.");
+// The following command specifies txnNumber: 2 without startTransaction: true.
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "no_such_txn"}],
+ txnNumber: NumberLong(2),
+ stmtId: NumberInt(0),
+ autocommit: false
+}),
+ ErrorCodes.NoSuchTransaction);
+assert.commandWorked(session.abortTransaction_forTesting());
+
+session.endSession();
}());
diff --git a/jstests/core/txns/no_read_concern_snapshot_outside_txn.js b/jstests/core/txns/no_read_concern_snapshot_outside_txn.js
index 2b69510ecde..7840a538b74 100644
--- a/jstests/core/txns/no_read_concern_snapshot_outside_txn.js
+++ b/jstests/core/txns/no_read_concern_snapshot_outside_txn.js
@@ -5,67 +5,69 @@
*/
(function() {
- "use strict";
- const dbName = "test";
- const collName = "no_read_concern_snapshot_outside_txn";
- const testDB = db.getSiblingDB(dbName);
+"use strict";
+const dbName = "test";
+const collName = "no_read_concern_snapshot_outside_txn";
+const testDB = db.getSiblingDB(dbName);
- // Set up the test collection.
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+// Set up the test collection.
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
- // Initiate the session.
- const sessionOptions = {causalConsistency: false};
- let session = db.getMongo().startSession(sessionOptions);
- let sessionDb = session.getDatabase(dbName);
- let txnNumber = 0;
- let stmtId = 0;
+// Initiate the session.
+const sessionOptions = {
+ causalConsistency: false
+};
+let session = db.getMongo().startSession(sessionOptions);
+let sessionDb = session.getDatabase(dbName);
+let txnNumber = 0;
+let stmtId = 0;
- function tryCommands({testDB, message}) {
- jsTestLog("Verify that inserts cannot use readConcern snapshot " + message);
- let cmd = {
- insert: collName,
- documents: [{_id: 0}],
- readConcern: {level: "snapshot"},
- };
- assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
+function tryCommands({testDB, message}) {
+ jsTestLog("Verify that inserts cannot use readConcern snapshot " + message);
+ let cmd = {
+ insert: collName,
+ documents: [{_id: 0}],
+ readConcern: {level: "snapshot"},
+ };
+ assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
- jsTestLog("Verify that updates cannot use readConcern snapshot " + message);
- cmd = {
- update: collName,
- updates: [{q: {_id: 0}, u: {$set: {x: 1}}}],
- readConcern: {level: "snapshot"},
- };
- assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
+ jsTestLog("Verify that updates cannot use readConcern snapshot " + message);
+ cmd = {
+ update: collName,
+ updates: [{q: {_id: 0}, u: {$set: {x: 1}}}],
+ readConcern: {level: "snapshot"},
+ };
+ assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
- jsTestLog("Verify that deletes cannot use readConcern snapshot " + message);
- cmd = {
- delete: collName,
- deletes: [{q: {_id: 0}, limit: 1}],
- readConcern: {level: "snapshot"},
- };
- assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
+ jsTestLog("Verify that deletes cannot use readConcern snapshot " + message);
+ cmd = {
+ delete: collName,
+ deletes: [{q: {_id: 0}, limit: 1}],
+ readConcern: {level: "snapshot"},
+ };
+ assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
- jsTestLog("Verify that findAndModify cannot use readConcern snapshot " + message);
- cmd = {
- findAndModify: collName,
- query: {_id: 0},
- remove: true,
- readConcern: {level: "snapshot"},
- };
- assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
+ jsTestLog("Verify that findAndModify cannot use readConcern snapshot " + message);
+ cmd = {
+ findAndModify: collName,
+ query: {_id: 0},
+ remove: true,
+ readConcern: {level: "snapshot"},
+ };
+ assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
- jsTestLog("Verify that finds cannot use readConcern snapshot " + message);
- cmd = {find: collName, readConcern: {level: "snapshot"}};
- assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
+ jsTestLog("Verify that finds cannot use readConcern snapshot " + message);
+ cmd = {find: collName, readConcern: {level: "snapshot"}};
+ assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
- jsTestLog("Verify that aggregate cannot use readConcern snapshot " + message);
- cmd = {aggregate: collName, pipeline: [], readConcern: {level: "snapshot"}};
- assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
- }
- tryCommands({testDB: sessionDb, message: "in session."});
- tryCommands({testDB: testDB, message: "outside session."});
+ jsTestLog("Verify that aggregate cannot use readConcern snapshot " + message);
+ cmd = {aggregate: collName, pipeline: [], readConcern: {level: "snapshot"}};
+ assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions);
+}
+tryCommands({testDB: sessionDb, message: "in session."});
+tryCommands({testDB: testDB, message: "outside session."});
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/no_read_or_write_concern_inside_txn.js b/jstests/core/txns/no_read_or_write_concern_inside_txn.js
index 31e29117f81..b8333eed92f 100644
--- a/jstests/core/txns/no_read_or_write_concern_inside_txn.js
+++ b/jstests/core/txns/no_read_or_write_concern_inside_txn.js
@@ -6,156 +6,158 @@
*/
(function() {
- "use strict";
- const dbName = "test";
- const collName = "no_read_or_write_concerns_inside_txn";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
-
- // Set up the test collection.
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
-
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
-
- // Initiate the session.
- const sessionOptions = {causalConsistency: false};
- let session = db.getMongo().startSession(sessionOptions);
- let sessionDb = session.getDatabase(dbName);
- let txnNumber = 0;
- let stmtId = 0;
-
- jsTestLog("Starting first transaction");
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: 0}],
- readConcern: {level: "snapshot"},
- startTransaction: true,
- autocommit: false,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++)
- }));
-
- jsTestLog("Attempting to insert with readConcern: snapshot within a transaction.");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: 1}],
- readConcern: {level: "snapshot"},
- autocommit: false,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++)
- }),
- ErrorCodes.InvalidOptions);
-
- jsTestLog("Attempting to insert with readConcern:majority within a transaction.");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: 2}],
- readConcern: {level: "majority"},
- autocommit: false,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++)
- }),
- ErrorCodes.InvalidOptions);
-
- jsTestLog("Attempting to insert with readConcern:local within a transaction.");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: 3}],
- readConcern: {level: "local"},
- autocommit: false,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++)
- }),
- ErrorCodes.InvalidOptions);
-
- jsTestLog("Transaction should still commit.");
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- autocommit: false,
- writeConcern: {w: "majority"},
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++)
- }));
- assert.sameMembers(testColl.find().toArray(), [{_id: 0}]);
-
- // Drop and re-create collection to keep parts of test isolated from one another.
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
-
- txnNumber++;
- stmtId = 0;
-
- jsTestLog("Attempting to start transaction with local writeConcern.");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: 4}],
- readConcern: {level: "snapshot"},
- writeConcern: {w: 1},
- startTransaction: true,
- autocommit: false,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++)
- }),
- ErrorCodes.InvalidOptions);
- txnNumber++;
- stmtId = 0;
-
- jsTestLog("Attempting to start transaction with majority writeConcern.");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: 5}],
- readConcern: {level: "snapshot"},
- writeConcern: {w: "majority"},
- startTransaction: true,
- autocommit: false,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++)
- }),
- ErrorCodes.InvalidOptions);
- txnNumber++;
- stmtId = 0;
-
- jsTestLog("Starting transaction normally.");
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: 6}],
- readConcern: {level: "snapshot"},
- startTransaction: true,
- autocommit: false,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++)
- }));
-
- jsTestLog("Attempting to write within transaction with majority write concern.");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: 7}],
- writeConcern: {w: "majority"},
- autocommit: false,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++)
- }),
- ErrorCodes.InvalidOptions);
-
- jsTestLog("Attempting to write within transaction with local write concern.");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: 8}],
- writeConcern: {w: 1},
- autocommit: false,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++)
- }),
- ErrorCodes.InvalidOptions);
-
- jsTestLog("Transaction should still commit.");
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- autocommit: false,
- writeConcern: {w: "majority"},
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++)
- }));
- assert.sameMembers(testColl.find().toArray(), [{_id: 6}]);
- session.endSession();
+"use strict";
+const dbName = "test";
+const collName = "no_read_or_write_concerns_inside_txn";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
+
+// Set up the test collection.
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+
+// Initiate the session.
+const sessionOptions = {
+ causalConsistency: false
+};
+let session = db.getMongo().startSession(sessionOptions);
+let sessionDb = session.getDatabase(dbName);
+let txnNumber = 0;
+let stmtId = 0;
+
+jsTestLog("Starting first transaction");
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: 0}],
+ readConcern: {level: "snapshot"},
+ startTransaction: true,
+ autocommit: false,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++)
+}));
+
+jsTestLog("Attempting to insert with readConcern: snapshot within a transaction.");
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: 1}],
+ readConcern: {level: "snapshot"},
+ autocommit: false,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++)
+}),
+ ErrorCodes.InvalidOptions);
+
+jsTestLog("Attempting to insert with readConcern:majority within a transaction.");
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: 2}],
+ readConcern: {level: "majority"},
+ autocommit: false,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++)
+}),
+ ErrorCodes.InvalidOptions);
+
+jsTestLog("Attempting to insert with readConcern:local within a transaction.");
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: 3}],
+ readConcern: {level: "local"},
+ autocommit: false,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++)
+}),
+ ErrorCodes.InvalidOptions);
+
+jsTestLog("Transaction should still commit.");
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ autocommit: false,
+ writeConcern: {w: "majority"},
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++)
+}));
+assert.sameMembers(testColl.find().toArray(), [{_id: 0}]);
+
+// Drop and re-create collection to keep parts of test isolated from one another.
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+
+txnNumber++;
+stmtId = 0;
+
+jsTestLog("Attempting to start transaction with local writeConcern.");
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: 4}],
+ readConcern: {level: "snapshot"},
+ writeConcern: {w: 1},
+ startTransaction: true,
+ autocommit: false,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++)
+}),
+ ErrorCodes.InvalidOptions);
+txnNumber++;
+stmtId = 0;
+
+jsTestLog("Attempting to start transaction with majority writeConcern.");
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: 5}],
+ readConcern: {level: "snapshot"},
+ writeConcern: {w: "majority"},
+ startTransaction: true,
+ autocommit: false,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++)
+}),
+ ErrorCodes.InvalidOptions);
+txnNumber++;
+stmtId = 0;
+
+jsTestLog("Starting transaction normally.");
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: 6}],
+ readConcern: {level: "snapshot"},
+ startTransaction: true,
+ autocommit: false,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++)
+}));
+
+jsTestLog("Attempting to write within transaction with majority write concern.");
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: 7}],
+ writeConcern: {w: "majority"},
+ autocommit: false,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++)
+}),
+ ErrorCodes.InvalidOptions);
+
+jsTestLog("Attempting to write within transaction with local write concern.");
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: 8}],
+ writeConcern: {w: 1},
+ autocommit: false,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++)
+}),
+ ErrorCodes.InvalidOptions);
+
+jsTestLog("Transaction should still commit.");
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ autocommit: false,
+ writeConcern: {w: "majority"},
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++)
+}));
+assert.sameMembers(testColl.find().toArray(), [{_id: 6}]);
+session.endSession();
}());
diff --git a/jstests/core/txns/no_reads_from_system_dot_views_in_txn.js b/jstests/core/txns/no_reads_from_system_dot_views_in_txn.js
index 280b4f0f1bf..808bc8dbb72 100644
--- a/jstests/core/txns/no_reads_from_system_dot_views_in_txn.js
+++ b/jstests/core/txns/no_reads_from_system_dot_views_in_txn.js
@@ -1,43 +1,40 @@
// Tests that it is illegal to read from system.views within a transaction.
// @tags: [uses_transactions, uses_snapshot_read_concern]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'.
+load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'.
- const session = db.getMongo().startSession({causalConsistency: false});
+const session = db.getMongo().startSession({causalConsistency: false});
- // Use a custom database to avoid conflict with other tests that use system.views.
- const testDB = session.getDatabase("no_reads_from_system_dot_views_in_txn");
- assert.commandWorked(testDB.dropDatabase());
+// Use a custom database to avoid conflict with other tests that use system.views.
+const testDB = session.getDatabase("no_reads_from_system_dot_views_in_txn");
+assert.commandWorked(testDB.dropDatabase());
- testDB.runCommand({create: "foo", viewOn: "bar", pipeline: []});
+testDB.runCommand({create: "foo", viewOn: "bar", pipeline: []});
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandFailedWithCode(testDB.runCommand({find: "system.views", filter: {}}), 51071);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.commandFailedWithCode(testDB.runCommand({find: "system.views", filter: {}}), 51071);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- if (FixtureHelpers.isMongos(testDB)) {
- // The rest of the test is concerned with a find by UUID which is not supported against
- // mongos.
- return;
- }
+if (FixtureHelpers.isMongos(testDB)) {
+ // The rest of the test is concerned with a find by UUID which is not supported against
+ // mongos.
+ return;
+}
- const collectionInfos =
- new DBCommandCursor(testDB, assert.commandWorked(testDB.runCommand({listCollections: 1})));
- let systemViewsUUID = null;
- while (collectionInfos.hasNext()) {
- const next = collectionInfos.next();
- if (next.name === "system.views") {
- systemViewsUUID = next.info.uuid;
- }
+const collectionInfos =
+ new DBCommandCursor(testDB, assert.commandWorked(testDB.runCommand({listCollections: 1})));
+let systemViewsUUID = null;
+while (collectionInfos.hasNext()) {
+ const next = collectionInfos.next();
+ if (next.name === "system.views") {
+ systemViewsUUID = next.info.uuid;
}
- assert.neq(null, systemViewsUUID, "did not find UUID for system.views");
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandFailedWithCode(testDB.runCommand({find: systemViewsUUID, filter: {}}), 51070);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+}
+assert.neq(null, systemViewsUUID, "did not find UUID for system.views");
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.commandFailedWithCode(testDB.runCommand({find: systemViewsUUID, filter: {}}), 51070);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
}());
diff --git a/jstests/core/txns/no_writes_to_config_transactions_with_prepared_transaction.js b/jstests/core/txns/no_writes_to_config_transactions_with_prepared_transaction.js
index 3c909583cfe..067bf1482fa 100644
--- a/jstests/core/txns/no_writes_to_config_transactions_with_prepared_transaction.js
+++ b/jstests/core/txns/no_writes_to_config_transactions_with_prepared_transaction.js
@@ -6,95 +6,90 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- TestData.disableImplicitSessions = true;
-
- const dbName = "test";
- const collName = "no_writes_to_config_transactions_with_prepared_transaction";
- const collName2 = "no_writes_to_config_transactions_with_prepared_transaction2";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- const config = db.getSiblingDB("config");
- const transactionsColl = config.getCollection("transactions");
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- testDB.runCommand({drop: collName2, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName2, writeConcern: {w: "majority"}}));
-
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- const sessionConfigDB = session.getDatabase("config");
-
- // Start a transaction using runCommand so that we can run commands on the session but outside
- // the transaction.
- assert.commandWorked(sessionDB.runCommand({
- insert: collName,
- documents: [{_id: 1}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(0),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
- assert.commandWorked(sessionDB.adminCommand({
- prepareTransaction: 1,
- txnNumber: NumberLong(0),
- stmtId: NumberInt(1),
- autocommit: false
- }));
-
- let transactionEntry = config.transactions.findOne();
- const txnNum = transactionEntry.txnNum;
-
- jsTestLog("Test that updates to config.transactions fails when there is a prepared " +
- "transaction on the session");
- assert.commandFailedWithCode(
- sessionConfigDB.transactions.update({_id: transactionEntry._id},
- {$set: {"txnNumber": NumberLong(23)}}),
- 40528);
-
- // Make sure that the txnNumber wasn't modified.
- transactionEntry = config.transactions.findOne();
- assert.eq(transactionEntry.txnNum, NumberLong(txnNum));
-
- jsTestLog("Test that deletes to config.transactions fails when there is a prepared " +
- "transaction on the session");
- assert.commandFailedWithCode(sessionConfigDB.transactions.remove({_id: transactionEntry._id}),
- 40528);
-
- // Make sure that the entry in config.transactions wasn't removed.
- transactionEntry = config.transactions.findOne();
- assert(transactionEntry);
-
- jsTestLog("Test that dropping config.transactions fails when there is a prepared transaction" +
- " on the session");
- assert.commandFailedWithCode(assert.throws(function() {
- sessionConfigDB.transactions.drop();
- }),
- 40528);
-
- jsTestLog("Test that we can prepare a transaction on a different session");
- const session2 = db.getMongo().startSession({causalConsistency: false});
- const sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName2);
-
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({_id: 1}));
- // This will cause an insertion into config.transactions
- PrepareHelpers.prepareTransaction(session2);
-
- assert.commandWorked(sessionDB.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(0), stmtid: NumberInt(2), autocommit: false}));
- session.endSession();
-
- assert.commandWorked(session2.abortTransaction_forTesting());
- session2.endSession();
-
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+TestData.disableImplicitSessions = true;
+
+const dbName = "test";
+const collName = "no_writes_to_config_transactions_with_prepared_transaction";
+const collName2 = "no_writes_to_config_transactions_with_prepared_transaction2";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+const config = db.getSiblingDB("config");
+const transactionsColl = config.getCollection("transactions");
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+testDB.runCommand({drop: collName2, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName2, writeConcern: {w: "majority"}}));
+
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+const sessionConfigDB = session.getDatabase("config");
+
+// Start a transaction using runCommand so that we can run commands on the session but outside
+// the transaction.
+assert.commandWorked(sessionDB.runCommand({
+ insert: collName,
+ documents: [{_id: 1}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(0),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}));
+assert.commandWorked(sessionDB.adminCommand(
+ {prepareTransaction: 1, txnNumber: NumberLong(0), stmtId: NumberInt(1), autocommit: false}));
+
+let transactionEntry = config.transactions.findOne();
+const txnNum = transactionEntry.txnNum;
+
+jsTestLog("Test that updates to config.transactions fails when there is a prepared " +
+ "transaction on the session");
+assert.commandFailedWithCode(
+ sessionConfigDB.transactions.update({_id: transactionEntry._id},
+ {$set: {"txnNumber": NumberLong(23)}}),
+ 40528);
+
+// Make sure that the txnNumber wasn't modified.
+transactionEntry = config.transactions.findOne();
+assert.eq(transactionEntry.txnNum, NumberLong(txnNum));
+
+jsTestLog("Test that deletes to config.transactions fails when there is a prepared " +
+ "transaction on the session");
+assert.commandFailedWithCode(sessionConfigDB.transactions.remove({_id: transactionEntry._id}),
+ 40528);
+
+// Make sure that the entry in config.transactions wasn't removed.
+transactionEntry = config.transactions.findOne();
+assert(transactionEntry);
+
+jsTestLog("Test that dropping config.transactions fails when there is a prepared transaction" +
+ " on the session");
+assert.commandFailedWithCode(assert.throws(function() {
+ sessionConfigDB.transactions.drop();
+ }),
+ 40528);
+
+jsTestLog("Test that we can prepare a transaction on a different session");
+const session2 = db.getMongo().startSession({causalConsistency: false});
+const sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName2);
+
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({_id: 1}));
+// This will cause an insertion into config.transactions
+PrepareHelpers.prepareTransaction(session2);
+
+assert.commandWorked(sessionDB.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(0), stmtid: NumberInt(2), autocommit: false}));
+session.endSession();
+
+assert.commandWorked(session2.abortTransaction_forTesting());
+session2.endSession();
}());
diff --git a/jstests/core/txns/no_writes_to_system_collections_in_txn.js b/jstests/core/txns/no_writes_to_system_collections_in_txn.js
index 4b13908773e..a5956723b1e 100644
--- a/jstests/core/txns/no_writes_to_system_collections_in_txn.js
+++ b/jstests/core/txns/no_writes_to_system_collections_in_txn.js
@@ -1,58 +1,51 @@
// Tests that it is illegal to write to system collections within a transaction.
// @tags: [uses_transactions, uses_snapshot_read_concern]
(function() {
- "use strict";
-
- const session = db.getMongo().startSession({causalConsistency: false});
-
- // Use a custom database, to avoid conflict with other tests that use the system.js collection.
- const testDB = session.getDatabase("no_writes_system_collections_in_txn");
- assert.commandWorked(testDB.dropDatabase());
- const systemColl = testDB.getCollection("system.js");
- const systemDotViews = testDB.getCollection("system.views");
-
- // Ensure that a collection exists with at least one document.
- assert.commandWorked(systemColl.insert({name: 0}, {writeConcern: {w: "majority"}}));
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- let error = assert.throws(() => systemColl.findAndModify({query: {}, update: {}}));
- assert.commandFailedWithCode(error, 50781);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- error = assert.throws(() => systemColl.findAndModify({query: {}, remove: true}));
- assert.commandFailedWithCode(error, 50781);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandFailedWithCode(systemColl.insert({name: "new"}), 50791);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandFailedWithCode(
- systemDotViews.insert({_id: "new.view", viewOn: "bar", pipeline: []}), 50791);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandFailedWithCode(systemColl.update({name: 0}, {$set: {name: "jungsoo"}}), 50791);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandFailedWithCode(
- systemColl.update({name: "nonexistent"}, {$set: {name: "jungsoo"}}, {upsert: true}), 50791);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandFailedWithCode(systemColl.remove({name: 0}), 50791);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- assert.commandWorked(systemColl.remove({_id: {$exists: true}}));
- assert.eq(systemColl.find().itcount(), 0);
+"use strict";
+
+const session = db.getMongo().startSession({causalConsistency: false});
+
+// Use a custom database, to avoid conflict with other tests that use the system.js collection.
+const testDB = session.getDatabase("no_writes_system_collections_in_txn");
+assert.commandWorked(testDB.dropDatabase());
+const systemColl = testDB.getCollection("system.js");
+const systemDotViews = testDB.getCollection("system.views");
+
+// Ensure that a collection exists with at least one document.
+assert.commandWorked(systemColl.insert({name: 0}, {writeConcern: {w: "majority"}}));
+
+session.startTransaction({readConcern: {level: "snapshot"}});
+let error = assert.throws(() => systemColl.findAndModify({query: {}, update: {}}));
+assert.commandFailedWithCode(error, 50781);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+session.startTransaction({readConcern: {level: "snapshot"}});
+error = assert.throws(() => systemColl.findAndModify({query: {}, remove: true}));
+assert.commandFailedWithCode(error, 50781);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.commandFailedWithCode(systemColl.insert({name: "new"}), 50791);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.commandFailedWithCode(systemDotViews.insert({_id: "new.view", viewOn: "bar", pipeline: []}),
+ 50791);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.commandFailedWithCode(systemColl.update({name: 0}, {$set: {name: "jungsoo"}}), 50791);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.commandFailedWithCode(
+ systemColl.update({name: "nonexistent"}, {$set: {name: "jungsoo"}}, {upsert: true}), 50791);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.commandFailedWithCode(systemColl.remove({name: 0}), 50791);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+assert.commandWorked(systemColl.remove({_id: {$exists: true}}));
+assert.eq(systemColl.find().itcount(), 0);
}());
diff --git a/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js b/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js
index 74ef4228362..8fb9b6b5a3e 100644
--- a/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js
+++ b/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js
@@ -7,94 +7,99 @@
*/
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "non_transactional_operations_on_session_with_transactions";
-
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
-
- // Clean up and create test collection.
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
-
- let txnNumber = 0;
-
- /**
- * Asserts that the given result cursor has the expected contents and that it is exhausted if
- * specified.
- */
- function assertCursorBatchContents(result, expectedContents, isExhausted) {
- assert.gt(expectedContents.length, 0, "Non-empty expected contents required.");
- assert(result.hasOwnProperty("cursor"), tojson(result));
- assert(result["cursor"].hasOwnProperty("firstBatch"), tojson(result));
- assert.eq(expectedContents.length, result["cursor"]["firstBatch"].length, tojson(result));
- for (let i = 0; i < expectedContents.length; i++) {
- assert.docEq(expectedContents[i], result["cursor"]["firstBatch"][i], tojson(result));
- }
- assert.eq(isExhausted, result["cursor"]["id"] === 0, tojson(result));
- }
+"use strict";
+
+const dbName = "test";
+const collName = "non_transactional_operations_on_session_with_transactions";
+
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
+
+// Clean up and create test collection.
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const doc1 = {_id: "insert-1"};
- const doc2 = {_id: "insert-2"};
-
- // Insert a document in a transaction.
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [doc1],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- }));
-
- // Test that we cannot observe the insert outside of the transaction.
- assert.eq(null, testColl.findOne(doc1));
- assert.eq(null, sessionColl.findOne(doc1));
- assert.eq(null, testColl.findOne(doc2));
- assert.eq(null, sessionColl.findOne(doc2));
-
- // Test that we observe the insert inside of the transaction.
- assertCursorBatchContents(
- assert.commandWorked(sessionDb.runCommand(
- {find: collName, batchSize: 10, txnNumber: NumberLong(txnNumber), autocommit: false})),
- [doc1],
- false);
-
- // Insert a document on the session outside of the transaction.
- assert.commandWorked(sessionDb.runCommand({insert: collName, documents: [doc2]}));
-
- // Test that we observe the insert outside of the transaction.
- assert.eq(null, testColl.findOne(doc1));
- assert.eq(null, sessionColl.findOne(doc1));
- assert.docEq(doc2, testColl.findOne(doc2));
- assert.docEq(doc2, sessionColl.findOne(doc2));
-
- // Test that we do not observe the new insert inside of the transaction.
- assertCursorBatchContents(
- assert.commandWorked(sessionDb.runCommand(
- {find: collName, batchSize: 10, txnNumber: NumberLong(txnNumber), autocommit: false})),
- [doc1],
- false);
-
- // Commit the transaction.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- writeConcern: {w: "majority"},
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }));
-
- // Test that we see both documents outside of the transaction.
- assert.docEq(doc1, testColl.findOne(doc1));
- assert.docEq(doc1, sessionColl.findOne(doc1));
- assert.docEq(doc2, testColl.findOne(doc2));
- assert.docEq(doc2, sessionColl.findOne(doc2));
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+let txnNumber = 0;
+
+/**
+ * Asserts that the given result cursor has the expected contents and that it is exhausted if
+ * specified.
+ */
+function assertCursorBatchContents(result, expectedContents, isExhausted) {
+ assert.gt(expectedContents.length, 0, "Non-empty expected contents required.");
+ assert(result.hasOwnProperty("cursor"), tojson(result));
+ assert(result["cursor"].hasOwnProperty("firstBatch"), tojson(result));
+ assert.eq(expectedContents.length, result["cursor"]["firstBatch"].length, tojson(result));
+ for (let i = 0; i < expectedContents.length; i++) {
+ assert.docEq(expectedContents[i], result["cursor"]["firstBatch"][i], tojson(result));
+ }
+ assert.eq(isExhausted, result["cursor"]["id"] === 0, tojson(result));
+}
+
+const doc1 = {
+ _id: "insert-1"
+};
+const doc2 = {
+ _id: "insert-2"
+};
+
+// Insert a document in a transaction.
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [doc1],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+}));
+
+// Test that we cannot observe the insert outside of the transaction.
+assert.eq(null, testColl.findOne(doc1));
+assert.eq(null, sessionColl.findOne(doc1));
+assert.eq(null, testColl.findOne(doc2));
+assert.eq(null, sessionColl.findOne(doc2));
+
+// Test that we observe the insert inside of the transaction.
+assertCursorBatchContents(
+ assert.commandWorked(sessionDb.runCommand(
+ {find: collName, batchSize: 10, txnNumber: NumberLong(txnNumber), autocommit: false})),
+ [doc1],
+ false);
+
+// Insert a document on the session outside of the transaction.
+assert.commandWorked(sessionDb.runCommand({insert: collName, documents: [doc2]}));
+
+// Test that we observe the insert outside of the transaction.
+assert.eq(null, testColl.findOne(doc1));
+assert.eq(null, sessionColl.findOne(doc1));
+assert.docEq(doc2, testColl.findOne(doc2));
+assert.docEq(doc2, sessionColl.findOne(doc2));
+
+// Test that we do not observe the new insert inside of the transaction.
+assertCursorBatchContents(
+ assert.commandWorked(sessionDb.runCommand(
+ {find: collName, batchSize: 10, txnNumber: NumberLong(txnNumber), autocommit: false})),
+ [doc1],
+ false);
+
+// Commit the transaction.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ writeConcern: {w: "majority"},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+}));
+
+// Test that we see both documents outside of the transaction.
+assert.docEq(doc1, testColl.findOne(doc1));
+assert.docEq(doc1, sessionColl.findOne(doc1));
+assert.docEq(doc2, testColl.findOne(doc2));
+assert.docEq(doc2, sessionColl.findOne(doc2));
}()); \ No newline at end of file
diff --git a/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js b/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js
index 52a98d1e69c..675db8b65f3 100644
--- a/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js
+++ b/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js
@@ -1,53 +1,56 @@
// Tests that no-op createIndex commands do not block behind transactions.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- const dbName = 'noop_createIndexes_not_blocked';
- const collName = 'test';
- const testDB = db.getSiblingDB(dbName);
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
-
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
-
- const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid";
- if (isMongos) {
- // Access the collection before creating indexes so it can be implicitly sharded.
- assert.eq(sessionDB[collName].find().itcount(), 0);
- }
-
- const createIndexesCommand = {createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]};
- assert.commandWorked(sessionDB.runCommand(createIndexesCommand));
-
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({a: 5, b: 6}));
-
- // This should not block because an identical index exists.
- let res = testDB.runCommand(createIndexesCommand);
- assert.commandWorked(res);
- assert.eq(res.numIndexesBefore, res.numIndexesAfter);
-
- // This should not block but return an error because the index exists with different options.
- res = testDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {a: 1}, name: "unique_a_1", unique: true}],
- });
- assert.commandFailedWithCode(res, ErrorCodes.IndexOptionsConflict);
-
- // This should block and time out because the index does not already exist.
- res = testDB.runCommand(
- {createIndexes: collName, indexes: [{key: {b: 1}, name: "b_1"}], maxTimeMS: 500});
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
- // This should block and time out because one of the indexes does not already exist.
- res = testDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {a: 1}, name: "a_1"}, {key: {b: 1}, name: "b_1"}],
- maxTimeMS: 500
- });
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
- assert.commandWorked(session.commitTransaction_forTesting());
+"use strict";
+
+const dbName = 'noop_createIndexes_not_blocked';
+const collName = 'test';
+const testDB = db.getSiblingDB(dbName);
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+
+const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid";
+if (isMongos) {
+ // Access the collection before creating indexes so it can be implicitly sharded.
+ assert.eq(sessionDB[collName].find().itcount(), 0);
+}
+
+const createIndexesCommand = {
+ createIndexes: collName,
+ indexes: [{key: {a: 1}, name: "a_1"}]
+};
+assert.commandWorked(sessionDB.runCommand(createIndexesCommand));
+
+session.startTransaction();
+assert.commandWorked(sessionDB[collName].insert({a: 5, b: 6}));
+
+// This should not block because an identical index exists.
+let res = testDB.runCommand(createIndexesCommand);
+assert.commandWorked(res);
+assert.eq(res.numIndexesBefore, res.numIndexesAfter);
+
+// This should not block but return an error because the index exists with different options.
+res = testDB.runCommand({
+ createIndexes: collName,
+ indexes: [{key: {a: 1}, name: "unique_a_1", unique: true}],
+});
+assert.commandFailedWithCode(res, ErrorCodes.IndexOptionsConflict);
+
+// This should block and time out because the index does not already exist.
+res = testDB.runCommand(
+ {createIndexes: collName, indexes: [{key: {b: 1}, name: "b_1"}], maxTimeMS: 500});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+
+// This should block and time out because one of the indexes does not already exist.
+res = testDB.runCommand({
+ createIndexes: collName,
+ indexes: [{key: {a: 1}, name: "a_1"}, {key: {b: 1}, name: "b_1"}],
+ maxTimeMS: 500
+});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+
+assert.commandWorked(session.commitTransaction_forTesting());
}());
diff --git a/jstests/core/txns/prepare_conflict.js b/jstests/core/txns/prepare_conflict.js
index eade0e15d1e..7eb4212c5c3 100644
--- a/jstests/core/txns/prepare_conflict.js
+++ b/jstests/core/txns/prepare_conflict.js
@@ -4,95 +4,101 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const dbName = "test";
- const collName = "prepare_conflict";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- testColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- function assertPrepareConflict(filter, clusterTime) {
- // Use a 5 second timeout so that there is enough time for the prepared transaction to
- // release its locks and for the command to obtain those locks.
- assert.commandFailedWithCode(
- // Use afterClusterTime read to make sure that it will block on a prepare conflict.
- testDB.runCommand({
- find: collName,
- filter: filter,
- readConcern: {afterClusterTime: clusterTime},
- maxTimeMS: 5000
- }),
- ErrorCodes.MaxTimeMSExpired);
-
- let prepareConflicted = false;
- const cur =
- testDB.system.profile.find({"ns": testColl.getFullName(), "command.filter": filter});
- while (cur.hasNext()) {
- const n = cur.next();
- print("op: " + JSON.stringify(n));
- if (n.prepareReadConflicts > 0) {
- prepareConflicted = true;
- }
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const dbName = "test";
+const collName = "prepare_conflict";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+testColl.drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+function assertPrepareConflict(filter, clusterTime) {
+ // Use a 5 second timeout so that there is enough time for the prepared transaction to
+ // release its locks and for the command to obtain those locks.
+ assert.commandFailedWithCode(
+ // Use afterClusterTime read to make sure that it will block on a prepare conflict.
+ testDB.runCommand({
+ find: collName,
+ filter: filter,
+ readConcern: {afterClusterTime: clusterTime},
+ maxTimeMS: 5000
+ }),
+ ErrorCodes.MaxTimeMSExpired);
+
+ let prepareConflicted = false;
+ const cur =
+ testDB.system.profile.find({"ns": testColl.getFullName(), "command.filter": filter});
+ while (cur.hasNext()) {
+ const n = cur.next();
+ print("op: " + JSON.stringify(n));
+ if (n.prepareReadConflicts > 0) {
+ prepareConflicted = true;
}
- assert(prepareConflicted);
}
-
- // Insert a document modified by the transaction.
- const txnDoc = {_id: 1, x: 1};
- assert.commandWorked(testColl.insert(txnDoc));
-
- // Insert a document unmodified by the transaction.
- const otherDoc = {_id: 2, y: 2};
- assert.commandWorked(testColl.insert(otherDoc, {writeConcern: {w: "majority"}}));
-
- // Create an index on 'y' to avoid conflicts on the field.
- assert.commandWorked(testColl.createIndex({y: 1}));
-
- // Enable the profiler to log slow queries. We expect a 'find' to hang until the prepare
- // conflict is resolved.
- assert.commandWorked(testDB.runCommand({profile: 1, level: 1, slowms: 100}));
-
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionDB.runCommand({
- update: collName,
- updates: [{q: txnDoc, u: {$inc: {x: 1}}}],
- }));
-
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- // Conflict on _id of prepared document.
- assertPrepareConflict({_id: txnDoc._id}, prepareTimestamp);
-
- // Conflict on field that could be added to a prepared document.
- assertPrepareConflict({randomField: "random"}, prepareTimestamp);
-
- // No conflict on _id of a non-prepared document.
- assert.commandWorked(testDB.runCommand({find: collName, filter: {_id: otherDoc._id}}));
-
- // No conflict on indexed field of a non-prepared document.
- assert.commandWorked(testDB.runCommand({find: collName, filter: {y: otherDoc.y}}));
-
- // At this point, we can guarantee all subsequent reads will conflict. Do a read in a parallel
- // shell, abort the transaction, then ensure the read succeeded with the old document.
- TestData.collName = collName;
- TestData.dbName = dbName;
- TestData.txnDoc = txnDoc;
- const findAwait = startParallelShell(function() {
- const it = db.getSiblingDB(TestData.dbName)
- .runCommand({find: TestData.collName, filter: {_id: TestData.txnDoc._id}});
- }, db.getMongo().port);
-
- assert.commandWorked(session.abortTransaction_forTesting());
-
- // The find command should be successful.
- findAwait({checkExitSuccess: true});
-
- // The document should be unmodified, because we aborted.
- assert.eq(txnDoc, testColl.findOne(txnDoc));
+ assert(prepareConflicted);
+}
+
+// Insert a document modified by the transaction.
+const txnDoc = {
+ _id: 1,
+ x: 1
+};
+assert.commandWorked(testColl.insert(txnDoc));
+
+// Insert a document unmodified by the transaction.
+const otherDoc = {
+ _id: 2,
+ y: 2
+};
+assert.commandWorked(testColl.insert(otherDoc, {writeConcern: {w: "majority"}}));
+
+// Create an index on 'y' to avoid conflicts on the field.
+assert.commandWorked(testColl.createIndex({y: 1}));
+
+// Enable the profiler to log slow queries. We expect a 'find' to hang until the prepare
+// conflict is resolved.
+assert.commandWorked(testDB.runCommand({profile: 1, level: 1, slowms: 100}));
+
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.commandWorked(sessionDB.runCommand({
+ update: collName,
+ updates: [{q: txnDoc, u: {$inc: {x: 1}}}],
+}));
+
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+// Conflict on _id of prepared document.
+assertPrepareConflict({_id: txnDoc._id}, prepareTimestamp);
+
+// Conflict on field that could be added to a prepared document.
+assertPrepareConflict({randomField: "random"}, prepareTimestamp);
+
+// No conflict on _id of a non-prepared document.
+assert.commandWorked(testDB.runCommand({find: collName, filter: {_id: otherDoc._id}}));
+
+// No conflict on indexed field of a non-prepared document.
+assert.commandWorked(testDB.runCommand({find: collName, filter: {y: otherDoc.y}}));
+
+// At this point, we can guarantee all subsequent reads will conflict. Do a read in a parallel
+// shell, abort the transaction, then ensure the read succeeded with the old document.
+TestData.collName = collName;
+TestData.dbName = dbName;
+TestData.txnDoc = txnDoc;
+const findAwait = startParallelShell(function() {
+ const it = db.getSiblingDB(TestData.dbName)
+ .runCommand({find: TestData.collName, filter: {_id: TestData.txnDoc._id}});
+}, db.getMongo().port);
+
+assert.commandWorked(session.abortTransaction_forTesting());
+
+// The find command should be successful.
+findAwait({checkExitSuccess: true});
+
+// The document should be unmodified, because we aborted.
+assert.eq(txnDoc, testColl.findOne(txnDoc));
})();
diff --git a/jstests/core/txns/prepare_conflict_aggregation_behavior.js b/jstests/core/txns/prepare_conflict_aggregation_behavior.js
index 37c9984d042..c62b7370dc6 100644
--- a/jstests/core/txns/prepare_conflict_aggregation_behavior.js
+++ b/jstests/core/txns/prepare_conflict_aggregation_behavior.js
@@ -6,85 +6,85 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const failureTimeout = 5 * 1000; // 5 seconds.
- const dbName = "test";
- const collName = "prepare_conflict_aggregation_behavior";
- const outCollName = collName + "_out";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
- const outColl = testDB.getCollection(outCollName);
+const failureTimeout = 5 * 1000; // 5 seconds.
+const dbName = "test";
+const collName = "prepare_conflict_aggregation_behavior";
+const outCollName = collName + "_out";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
+const outColl = testDB.getCollection(outCollName);
- testColl.drop({writeConcern: {w: "majority"}});
- outColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- assert.commandWorked(testDB.runCommand({create: outCollName, writeConcern: {w: "majority"}}));
+testColl.drop({writeConcern: {w: "majority"}});
+outColl.drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.runCommand({create: outCollName, writeConcern: {w: "majority"}}));
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
- const sessionOutColl = sessionDB.getCollection(outCollName);
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+const sessionOutColl = sessionDB.getCollection(outCollName);
- assert.commandWorked(testColl.insert({_id: 1}));
- assert.commandWorked(outColl.insert({_id: 0}));
+assert.commandWorked(testColl.insert({_id: 1}));
+assert.commandWorked(outColl.insert({_id: 0}));
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {a: 1}));
- assert.commandWorked(sessionOutColl.update({_id: 0}, {a: 1}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {a: 1}));
+assert.commandWorked(sessionOutColl.update({_id: 0}, {a: 1}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- jsTestLog("Test that reads from an aggregation pipeline with $merge don't block on prepare" +
- " conflicts");
- testColl.aggregate([
- {$addFields: {b: 1}},
- {$merge: {into: outCollName, whenMatched: "fail", whenNotMatched: "insert"}}
- ]);
+jsTestLog("Test that reads from an aggregation pipeline with $merge don't block on prepare" +
+ " conflicts");
+testColl.aggregate([
+ {$addFields: {b: 1}},
+ {$merge: {into: outCollName, whenMatched: "fail", whenNotMatched: "insert"}}
+]);
- // Make sure that we can see the inserts from the aggregation but not the updates from the
- // prepared transaction.
- assert.eq([{_id: 0}, {_id: 1, b: 1}], outColl.find().toArray());
+// Make sure that we can see the inserts from the aggregation but not the updates from the
+// prepared transaction.
+assert.eq([{_id: 0}, {_id: 1, b: 1}], outColl.find().toArray());
- assert.commandWorked(session.abortTransaction_forTesting());
- session.startTransaction();
- assert.commandWorked(sessionOutColl.update({_id: 1}, {_id: 1, a: 1}));
- prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(session.abortTransaction_forTesting());
+session.startTransaction();
+assert.commandWorked(sessionOutColl.update({_id: 1}, {_id: 1, a: 1}));
+prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- jsTestLog("Test that writes from an aggregation pipeline block on prepare conflicts");
- let pipeline = [
- {$addFields: {c: 1}},
- {$merge: {into: outCollName, whenMatched: "replace", whenNotMatched: "insert"}}
- ];
- assert.commandFailedWithCode(testDB.runCommand({
- aggregate: collName,
- pipeline: pipeline,
- cursor: {},
- maxTimeMS: failureTimeout,
- }),
- ErrorCodes.MaxTimeMSExpired);
+jsTestLog("Test that writes from an aggregation pipeline block on prepare conflicts");
+let pipeline = [
+ {$addFields: {c: 1}},
+ {$merge: {into: outCollName, whenMatched: "replace", whenNotMatched: "insert"}}
+];
+assert.commandFailedWithCode(testDB.runCommand({
+ aggregate: collName,
+ pipeline: pipeline,
+ cursor: {},
+ maxTimeMS: failureTimeout,
+}),
+ ErrorCodes.MaxTimeMSExpired);
- // Make sure that we can't see the update from the aggregation or the prepared transaction.
- assert.eq([{_id: 0}, {_id: 1, b: 1}], outColl.find().toArray());
+// Make sure that we can't see the update from the aggregation or the prepared transaction.
+assert.eq([{_id: 0}, {_id: 1, b: 1}], outColl.find().toArray());
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- // Make sure that the $merge pipeline works once the transaction is committed.
- testColl.aggregate(pipeline);
- assert.eq([{_id: 0}, {_id: 1, c: 1}], outColl.find().toArray());
+// Make sure that the $merge pipeline works once the transaction is committed.
+testColl.aggregate(pipeline);
+assert.eq([{_id: 0}, {_id: 1, c: 1}], outColl.find().toArray());
- // At the time of this writing, change streams can sometimes adjust the readConcern to
- // 'majority' after receiving the command and thus need to wait for read concern again. When
- // doing this, we assume that a change stream with a stage which performs writes is not allowed.
- // Test that this is true.
- pipeline = [{$changeStream: {}}, {$addFields: {d: 1}}, {$out: outCollName}];
- assert.commandFailedWithCode(testDB.runCommand({
- aggregate: collName,
- pipeline: pipeline,
- cursor: {},
- maxTimeMS: failureTimeout,
- }),
- ErrorCodes.IllegalOperation);
+// At the time of this writing, change streams can sometimes adjust the readConcern to
+// 'majority' after receiving the command and thus need to wait for read concern again. When
+// doing this, we assume that a change stream with a stage which performs writes is not allowed.
+// Test that this is true.
+pipeline = [{$changeStream: {}}, {$addFields: {d: 1}}, {$out: outCollName}];
+assert.commandFailedWithCode(testDB.runCommand({
+ aggregate: collName,
+ pipeline: pipeline,
+ cursor: {},
+ maxTimeMS: failureTimeout,
+}),
+ ErrorCodes.IllegalOperation);
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/prepare_nonexistent_transaction.js b/jstests/core/txns/prepare_nonexistent_transaction.js
index 40e0b540354..c68c9164556 100644
--- a/jstests/core/txns/prepare_nonexistent_transaction.js
+++ b/jstests/core/txns/prepare_nonexistent_transaction.js
@@ -4,98 +4,97 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "prepare_nonexistent_transaction";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "prepare_nonexistent_transaction";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
- testColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+testColl.drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- const doc = {x: 1};
+const doc = {
+ x: 1
+};
- jsTestLog("Test that if there is no transaction active on the current session, errors with " +
- "'NoSuchTransaction'.");
- assert.commandFailedWithCode(
- sessionDB.adminCommand(
- {prepareTransaction: 1, txnNumber: NumberLong(0), autocommit: false}),
- ErrorCodes.NoSuchTransaction);
+jsTestLog("Test that if there is no transaction active on the current session, errors with " +
+ "'NoSuchTransaction'.");
+assert.commandFailedWithCode(
+ sessionDB.adminCommand({prepareTransaction: 1, txnNumber: NumberLong(0), autocommit: false}),
+ ErrorCodes.NoSuchTransaction);
- jsTestLog("Test that if there is a transaction running on the current session and the " +
- "'txnNumber' given is greater than the current transaction, errors with " +
- "'NoSuchTransaction'.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc));
- assert.commandFailedWithCode(sessionDB.adminCommand({
- prepareTransaction: 1,
- txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
- assert.commandWorked(session.abortTransaction_forTesting());
+jsTestLog("Test that if there is a transaction running on the current session and the " +
+ "'txnNumber' given is greater than the current transaction, errors with " +
+ "'NoSuchTransaction'.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc));
+assert.commandFailedWithCode(sessionDB.adminCommand({
+ prepareTransaction: 1,
+ txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
+ autocommit: false
+}),
+ ErrorCodes.NoSuchTransaction);
+assert.commandWorked(session.abortTransaction_forTesting());
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc));
- assert.commandWorked(session.abortTransaction_forTesting());
- jsTestLog("Test that if there is no transaction active on the current session, the " +
- "'txnNumber' given matches the last known transaction for this session and the " +
- "last known transaction was aborted then it errors with 'NoSuchTransaction'.");
- assert.commandFailedWithCode(sessionDB.adminCommand({
- prepareTransaction: 1,
- txnNumber: NumberLong(session.getTxnNumber_forTesting()),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc));
+assert.commandWorked(session.abortTransaction_forTesting());
+jsTestLog("Test that if there is no transaction active on the current session, the " +
+ "'txnNumber' given matches the last known transaction for this session and the " +
+ "last known transaction was aborted then it errors with 'NoSuchTransaction'.");
+assert.commandFailedWithCode(sessionDB.adminCommand({
+ prepareTransaction: 1,
+ txnNumber: NumberLong(session.getTxnNumber_forTesting()),
+ autocommit: false
+}),
+ ErrorCodes.NoSuchTransaction);
- jsTestLog("Test that if there is a transaction running on the current session and the " +
- "'txnNumber' given is less than the current transaction, errors with " +
- "'TransactionTooOld'.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc));
- assert.commandFailedWithCode(
- sessionDB.adminCommand(
- {prepareTransaction: 1, txnNumber: NumberLong(0), autocommit: false}),
- ErrorCodes.TransactionTooOld);
- assert.commandWorked(session.abortTransaction_forTesting());
+jsTestLog("Test that if there is a transaction running on the current session and the " +
+ "'txnNumber' given is less than the current transaction, errors with " +
+ "'TransactionTooOld'.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc));
+assert.commandFailedWithCode(
+ sessionDB.adminCommand({prepareTransaction: 1, txnNumber: NumberLong(0), autocommit: false}),
+ ErrorCodes.TransactionTooOld);
+assert.commandWorked(session.abortTransaction_forTesting());
- jsTestLog("Test that if there is no transaction active on the current session and the " +
- "'txnNumber' given is less than the current transaction, errors with " +
- "'TransactionTooOld'.");
- assert.commandFailedWithCode(
- sessionDB.adminCommand(
- {prepareTransaction: 1, txnNumber: NumberLong(0), autocommit: false}),
- ErrorCodes.TransactionTooOld);
+jsTestLog("Test that if there is no transaction active on the current session and the " +
+ "'txnNumber' given is less than the current transaction, errors with " +
+ "'TransactionTooOld'.");
+assert.commandFailedWithCode(
+ sessionDB.adminCommand({prepareTransaction: 1, txnNumber: NumberLong(0), autocommit: false}),
+ ErrorCodes.TransactionTooOld);
- jsTestLog("Test the error precedence when calling prepare on a nonexistent transaction but " +
- "not providing txnNumber to prepareTransaction.");
- assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1, autocommit: false}),
- ErrorCodes.InvalidOptions);
+jsTestLog("Test the error precedence when calling prepare on a nonexistent transaction but " +
+ "not providing txnNumber to prepareTransaction.");
+assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1, autocommit: false}),
+ ErrorCodes.InvalidOptions);
- jsTestLog("Test the error precedence when calling prepare on a nonexistent transaction but " +
- "not providing autocommit to prepareTransaction.");
- assert.commandFailedWithCode(sessionDB.adminCommand({
- prepareTransaction: 1,
- txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
- }),
- 50768);
+jsTestLog("Test the error precedence when calling prepare on a nonexistent transaction but " +
+ "not providing autocommit to prepareTransaction.");
+assert.commandFailedWithCode(sessionDB.adminCommand({
+ prepareTransaction: 1,
+ txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
+}),
+ 50768);
- jsTestLog("Test the error precedence when calling prepare on a nonexistent transaction and " +
- "providing startTransaction to prepareTransaction.");
- assert.commandFailedWithCode(sessionDB.adminCommand({
- prepareTransaction: 1,
- // The last txnNumber we used was saved on the server's session, so we use a txnNumber that
- // is greater than that to make sure it has never been seen before.
- txnNumber: NumberLong(session.getTxnNumber_forTesting() + 2),
- autocommit: false,
- startTransaction: true
- }),
- ErrorCodes.OperationNotSupportedInTransaction);
+jsTestLog("Test the error precedence when calling prepare on a nonexistent transaction and " +
+ "providing startTransaction to prepareTransaction.");
+assert.commandFailedWithCode(sessionDB.adminCommand({
+ prepareTransaction: 1,
+ // The last txnNumber we used was saved on the server's session, so we use a txnNumber that
+ // is greater than that to make sure it has never been seen before.
+ txnNumber: NumberLong(session.getTxnNumber_forTesting() + 2),
+ autocommit: false,
+ startTransaction: true
+}),
+ ErrorCodes.OperationNotSupportedInTransaction);
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/prepare_prepared_transaction.js b/jstests/core/txns/prepare_prepared_transaction.js
index 8032e885e3e..e7148349f53 100644
--- a/jstests/core/txns/prepare_prepared_transaction.js
+++ b/jstests/core/txns/prepare_prepared_transaction.js
@@ -4,34 +4,37 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const dbName = "test";
- const collName = "prepare_prepared_transaction";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "prepare_prepared_transaction";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
- testColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+testColl.drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const session = testDB.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = testDB.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- const doc1 = {_id: 1, x: 1};
+const doc1 = {
+ _id: 1,
+ x: 1
+};
- // Attempting to prepare an already prepared transaction should return successfully with a
- // prepareTimestamp.
+// Attempting to prepare an already prepared transaction should return successfully with a
+// prepareTimestamp.
- // Client's opTime is later than the prepareOpTime, so just return the prepareTimestamp.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc1));
- const firstTimestamp = PrepareHelpers.prepareTransaction(session);
- const secondTimestamp = PrepareHelpers.prepareTransaction(session);
- // Both prepareTimestamps should be equal.
- assert.eq(firstTimestamp, secondTimestamp);
- assert.commandWorked(session.abortTransaction_forTesting());
+// Client's opTime is later than the prepareOpTime, so just return the prepareTimestamp.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc1));
+const firstTimestamp = PrepareHelpers.prepareTransaction(session);
+const secondTimestamp = PrepareHelpers.prepareTransaction(session);
+// Both prepareTimestamps should be equal.
+assert.eq(firstTimestamp, secondTimestamp);
+assert.commandWorked(session.abortTransaction_forTesting());
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/prepare_requires_fcv42.js b/jstests/core/txns/prepare_requires_fcv42.js
index a7be765a969..6f32918d332 100644
--- a/jstests/core/txns/prepare_requires_fcv42.js
+++ b/jstests/core/txns/prepare_requires_fcv42.js
@@ -4,54 +4,56 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const dbName = "test";
- const collName = "prepare_requires_fcv42";
- const testDB = db.getSiblingDB(dbName);
- const adminDB = db.getSiblingDB('admin');
-
- testDB[collName].drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDB = session.getDatabase(dbName);
-
- try {
- jsTestLog("Transaction succeeds in latest FCV.");
- checkFCV(adminDB, latestFCV);
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({_id: "a"}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- jsTestLog("Downgrade the featureCompatibilityVersion.");
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(adminDB, lastStableFCV);
-
- jsTestLog("Transaction fails to prepare in last stable FCV.");
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({_id: "b"}));
- assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
- ErrorCodes.CommandNotSupported);
- // Abort the transaction in the shell.
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- } finally {
- jsTestLog("Restore the original featureCompatibilityVersion.");
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(adminDB, latestFCV);
- }
-
- jsTestLog("Transaction succeeds in latest FCV after upgrade.");
+"use strict";
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const dbName = "test";
+const collName = "prepare_requires_fcv42";
+const testDB = db.getSiblingDB(dbName);
+const adminDB = db.getSiblingDB('admin');
+
+testDB[collName].drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDB = session.getDatabase(dbName);
+
+try {
+ jsTestLog("Transaction succeeds in latest FCV.");
+ checkFCV(adminDB, latestFCV);
session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({_id: "c"}));
+ assert.commandWorked(sessionDB[collName].insert({_id: "a"}));
let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- session.endSession();
+ jsTestLog("Downgrade the featureCompatibilityVersion.");
+ assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+ checkFCV(adminDB, lastStableFCV);
+
+ jsTestLog("Transaction fails to prepare in last stable FCV.");
+ session.startTransaction();
+ assert.commandWorked(sessionDB[collName].insert({_id: "b"}));
+ assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
+ ErrorCodes.CommandNotSupported);
+ // Abort the transaction in the shell.
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+} finally {
+ jsTestLog("Restore the original featureCompatibilityVersion.");
+ assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+ checkFCV(adminDB, latestFCV);
+}
+
+jsTestLog("Transaction succeeds in latest FCV after upgrade.");
+session.startTransaction();
+assert.commandWorked(sessionDB[collName].insert({_id: "c"}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+session.endSession();
}());
diff --git a/jstests/core/txns/prepare_transaction_fails_on_temp_collections.js b/jstests/core/txns/prepare_transaction_fails_on_temp_collections.js
index 091665d2509..14ba3cb7926 100644
--- a/jstests/core/txns/prepare_transaction_fails_on_temp_collections.js
+++ b/jstests/core/txns/prepare_transaction_fails_on_temp_collections.js
@@ -8,31 +8,30 @@
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const tempCollName = "prepare_transaction_fails_on_temp_collections";
- const testDB = db.getSiblingDB(dbName);
- const testTempColl = testDB.getCollection(tempCollName);
+const dbName = "test";
+const tempCollName = "prepare_transaction_fails_on_temp_collections";
+const testDB = db.getSiblingDB(dbName);
+const testTempColl = testDB.getCollection(tempCollName);
- testTempColl.drop({writeConcern: {w: "majority"}});
+testTempColl.drop({writeConcern: {w: "majority"}});
- jsTest.log("Creating a temporary collection.");
- assert.commandWorked(testDB.runCommand({
- applyOps:
- [{op: "c", ns: testDB.getName() + ".$cmd", o: {create: tempCollName, temp: true}}]
- }));
+jsTest.log("Creating a temporary collection.");
+assert.commandWorked(testDB.runCommand({
+ applyOps: [{op: "c", ns: testDB.getName() + ".$cmd", o: {create: tempCollName, temp: true}}]
+}));
- const session = db.getMongo().startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionTempColl = sessionDB.getCollection(tempCollName);
+const session = db.getMongo().startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionTempColl = sessionDB.getCollection(tempCollName);
- jsTest.log("Setting up a transaction with an operation on a temporary collection.");
- session.startTransaction();
- assert.commandWorked(sessionTempColl.insert({x: 1000}));
+jsTest.log("Setting up a transaction with an operation on a temporary collection.");
+session.startTransaction();
+assert.commandWorked(sessionTempColl.insert({x: 1000}));
- jsTest.log("Calling prepareTransaction for a transaction with operations against a " +
- "temporary collection should now fail.");
- assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
- ErrorCodes.OperationNotSupportedInTransaction);
+jsTest.log("Calling prepareTransaction for a transaction with operations against a " +
+ "temporary collection should now fail.");
+assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
+ ErrorCodes.OperationNotSupportedInTransaction);
})();
diff --git a/jstests/core/txns/prepare_transaction_unique_index_conflict.js b/jstests/core/txns/prepare_transaction_unique_index_conflict.js
index e364a1e0a0e..9fc0dae7a0b 100644
--- a/jstests/core/txns/prepare_transaction_unique_index_conflict.js
+++ b/jstests/core/txns/prepare_transaction_unique_index_conflict.js
@@ -9,36 +9,36 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const dbName = "test";
- const collName = "prepare_transaction_unique_index_conflict";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "prepare_transaction_unique_index_conflict";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
- testColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+testColl.drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const session = db.getMongo().startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = db.getMongo().startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- assert.commandWorked(testColl.insert({_id: 1, a: 0}));
+assert.commandWorked(testColl.insert({_id: 1, a: 0}));
- // Ensure that the "a" field is unique.
- assert.commandWorked(testColl.createIndex({"a": 1}, {unique: true}));
+// Ensure that the "a" field is unique.
+assert.commandWorked(testColl.createIndex({"a": 1}, {unique: true}));
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 2, a: 1}));
- assert.commandWorked(sessionColl.update({_id: 2}, {$unset: {a: 1}}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 2, a: 1}));
+assert.commandWorked(sessionColl.update({_id: 2}, {$unset: {a: 1}}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- // While trying to insert this document, the node will have to perform reads to check if it
- // violates the unique index, which should cause a prepare conflict.
- assert.commandFailedWithCode(
- testDB.runCommand({insert: collName, documents: [{_id: 3, a: 1}], maxTimeMS: 5000}),
- ErrorCodes.MaxTimeMSExpired);
+// While trying to insert this document, the node will have to perform reads to check if it
+// violates the unique index, which should cause a prepare conflict.
+assert.commandFailedWithCode(
+ testDB.runCommand({insert: collName, documents: [{_id: 3, a: 1}], maxTimeMS: 5000}),
+ ErrorCodes.MaxTimeMSExpired);
- assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(session.abortTransaction_forTesting());
})();
diff --git a/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js b/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js
index da915154ee8..0458f213960 100644
--- a/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js
+++ b/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js
@@ -1,65 +1,78 @@
// Test that prepared transactions don't block DDL operations on the non-conflicting collections.
// @tags: [uses_transactions, uses_prepare_transaction]
(function() {
- "use strict";
+"use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- const dbName = "prepared_transactions_do_not_block_non_conflicting_ddl";
- const collName = "transactions_collection";
- const otherDBName = "prepared_transactions_do_not_block_non_conflicting_ddl_other";
- const otherCollName = "transactions_collection_other";
- const testDB = db.getSiblingDB(dbName);
- const otherDB = db.getSiblingDB(otherDBName);
+load("jstests/core/txns/libs/prepare_helpers.js");
+const dbName = "prepared_transactions_do_not_block_non_conflicting_ddl";
+const collName = "transactions_collection";
+const otherDBName = "prepared_transactions_do_not_block_non_conflicting_ddl_other";
+const otherCollName = "transactions_collection_other";
+const testDB = db.getSiblingDB(dbName);
+const otherDB = db.getSiblingDB(otherDBName);
- const session = testDB.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB[collName];
+const session = testDB.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB[collName];
- // Setup.
- testDB.dropDatabase();
- otherDB.dropDatabase();
- assert.commandWorked(sessionColl.insert({_id: 1, x: 0}));
+// Setup.
+testDB.dropDatabase();
+otherDB.dropDatabase();
+assert.commandWorked(sessionColl.insert({_id: 1, x: 0}));
- /**
- * Tests that DDL operations on non-conflicting namespaces don't block on transactions.
- */
- function testSuccess(cmdDBName, ddlCmd) {
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(testDB.getSiblingDB(cmdDBName).runCommand(ddlCmd));
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- }
+/**
+ * Tests that DDL operations on non-conflicting namespaces don't block on transactions.
+ */
+function testSuccess(cmdDBName, ddlCmd) {
+ session.startTransaction();
+ assert.commandWorked(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ assert.commandWorked(testDB.getSiblingDB(cmdDBName).runCommand(ddlCmd));
+ assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+}
- jsTest.log("Test 'create'.");
- const createCmd = {create: collName};
- testSuccess(otherDBName, createCmd);
+jsTest.log("Test 'create'.");
+const createCmd = {
+ create: collName
+};
+testSuccess(otherDBName, createCmd);
- jsTest.log("Test 'createIndexes'.");
- const createIndexesCmd = {createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1"}]};
- testSuccess(otherDBName, createIndexesCmd);
+jsTest.log("Test 'createIndexes'.");
+const createIndexesCmd = {
+ createIndexes: collName,
+ indexes: [{key: {x: 1}, name: "x_1"}]
+};
+testSuccess(otherDBName, createIndexesCmd);
- jsTest.log("Test 'dropIndexes'.");
- const dropIndexesCmd = {dropIndexes: collName, index: "x_1"};
- testSuccess(otherDBName, dropIndexesCmd);
+jsTest.log("Test 'dropIndexes'.");
+const dropIndexesCmd = {
+ dropIndexes: collName,
+ index: "x_1"
+};
+testSuccess(otherDBName, dropIndexesCmd);
- sessionColl.createIndex({multiKeyField: 1});
- jsTest.log("Test 'insert' that enables multi-key index on the same collection.");
- const insertAndSetMultiKeyCmd = {insert: collName, documents: [{multiKeyField: [1, 2]}]};
- testSuccess(dbName, insertAndSetMultiKeyCmd);
+sessionColl.createIndex({multiKeyField: 1});
+jsTest.log("Test 'insert' that enables multi-key index on the same collection.");
+const insertAndSetMultiKeyCmd = {
+ insert: collName,
+ documents: [{multiKeyField: [1, 2]}]
+};
+testSuccess(dbName, insertAndSetMultiKeyCmd);
- jsTest.log("Test 'drop'.");
- const dropCmd = {drop: collName};
- testSuccess(otherDBName, dropCmd);
+jsTest.log("Test 'drop'.");
+const dropCmd = {
+ drop: collName
+};
+testSuccess(otherDBName, dropCmd);
- jsTest.log("Test 'renameCollection'.");
- assert.commandWorked(otherDB.getCollection(collName).insert({x: "doc-for-rename-collection"}));
- otherDB.runCommand({drop: otherCollName});
- const renameCollectionCmd = {
- renameCollection: otherDBName + "." + collName,
- to: otherDBName + "." + otherCollName
- };
- testSuccess("admin", renameCollectionCmd);
+jsTest.log("Test 'renameCollection'.");
+assert.commandWorked(otherDB.getCollection(collName).insert({x: "doc-for-rename-collection"}));
+otherDB.runCommand({drop: otherCollName});
+const renameCollectionCmd = {
+ renameCollection: otherDBName + "." + collName,
+ to: otherDBName + "." + otherCollName
+};
+testSuccess("admin", renameCollectionCmd);
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/read_concerns.js b/jstests/core/txns/read_concerns.js
index ffdd381128a..409236a5ffd 100644
--- a/jstests/core/txns/read_concerns.js
+++ b/jstests/core/txns/read_concerns.js
@@ -2,61 +2,61 @@
//
// @tags: [uses_transactions, uses_snapshot_read_concern]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "supported_read_concern_levels";
-
- function runTest(level, sessionOptions, supported) {
- jsTestLog("Testing transactions with read concern level: " + level +
- " and sessionOptions: " + tojson(sessionOptions));
-
- db.getSiblingDB(dbName).runCommand({drop: collName, writeConcern: {w: "majority"}});
-
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB[collName];
-
- // Set up the collection.
- assert.writeOK(sessionColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-
- if (level) {
- session.startTransaction({readConcern: {level: level}});
- } else {
- session.startTransaction();
- }
-
- const res = sessionDB.runCommand({find: collName});
- if (supported) {
- assert.commandWorked(res,
- "expected success, read concern level: " + level +
- ", sessionOptions: " + tojson(sessionOptions));
- assert.commandWorked(session.commitTransaction_forTesting());
- } else {
- assert.commandFailedWithCode(res,
- ErrorCodes.InvalidOptions,
- "expected failure, read concern level: " + level +
- ", sessionOptions: " + tojson(sessionOptions));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- }
-
- session.endSession();
- }
+"use strict";
+
+const dbName = "test";
+const collName = "supported_read_concern_levels";
+
+function runTest(level, sessionOptions, supported) {
+ jsTestLog("Testing transactions with read concern level: " + level +
+ " and sessionOptions: " + tojson(sessionOptions));
+
+ db.getSiblingDB(dbName).runCommand({drop: collName, writeConcern: {w: "majority"}});
+
+ const session = db.getMongo().startSession(sessionOptions);
+ const sessionDB = session.getDatabase(dbName);
+ const sessionColl = sessionDB[collName];
- // Starting a txn with no read concern level is allowed.
- runTest(undefined, {causalConsistency: false}, true /*supported*/);
- runTest(undefined, {causalConsistency: true}, true /*supported*/);
+ // Set up the collection.
+ assert.writeOK(sessionColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
- const kSupportedLevels = ["local", "majority", "snapshot"];
- for (let level of kSupportedLevels) {
- runTest(level, {causalConsistency: false}, true /*supported*/);
- runTest(level, {causalConsistency: true}, true /*supported*/);
+ if (level) {
+ session.startTransaction({readConcern: {level: level}});
+ } else {
+ session.startTransaction();
}
- const kUnsupportedLevels = ["available", "linearizable"];
- for (let level of kUnsupportedLevels) {
- runTest(level, {causalConsistency: false}, false /*supported*/);
- runTest(level, {causalConsistency: true}, false /*supported*/);
+ const res = sessionDB.runCommand({find: collName});
+ if (supported) {
+ assert.commandWorked(res,
+ "expected success, read concern level: " + level +
+ ", sessionOptions: " + tojson(sessionOptions));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ } else {
+ assert.commandFailedWithCode(res,
+ ErrorCodes.InvalidOptions,
+ "expected failure, read concern level: " + level +
+ ", sessionOptions: " + tojson(sessionOptions));
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
}
+
+ session.endSession();
+}
+
+// Starting a txn with no read concern level is allowed.
+runTest(undefined, {causalConsistency: false}, true /*supported*/);
+runTest(undefined, {causalConsistency: true}, true /*supported*/);
+
+const kSupportedLevels = ["local", "majority", "snapshot"];
+for (let level of kSupportedLevels) {
+ runTest(level, {causalConsistency: false}, true /*supported*/);
+ runTest(level, {causalConsistency: true}, true /*supported*/);
+}
+
+const kUnsupportedLevels = ["available", "linearizable"];
+for (let level of kUnsupportedLevels) {
+ runTest(level, {causalConsistency: false}, false /*supported*/);
+ runTest(level, {causalConsistency: true}, false /*supported*/);
+}
}());
diff --git a/jstests/core/txns/read_own_multikey_writes.js b/jstests/core/txns/read_own_multikey_writes.js
index 69f6f035f3e..9af97dc4baa 100644
--- a/jstests/core/txns/read_own_multikey_writes.js
+++ b/jstests/core/txns/read_own_multikey_writes.js
@@ -1,32 +1,32 @@
// Tests that multikey updates made inside a transaction are visible to that transaction's reads.
// @tags: [assumes_unsharded_collection, uses_transactions]
(function() {
- "use strict";
+"use strict";
- const dbName = 'test';
- const collName = 'testReadOwnMultikeyWrites';
- // Use majority write concern to clear the drop-pending that can cause lock conflicts with
- // transactions.
- db.getSiblingDB(dbName).getCollection(collName).drop({writeConcern: {w: "majority"}});
+const dbName = 'test';
+const collName = 'testReadOwnMultikeyWrites';
+// Use majority write concern to clear the drop-pending that can cause lock conflicts with
+// transactions.
+db.getSiblingDB(dbName).getCollection(collName).drop({writeConcern: {w: "majority"}});
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb.getCollection(collName);
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb.getCollection(collName);
- assert.commandWorked(sessionDb.runCommand({create: collName}));
+assert.commandWorked(sessionDb.runCommand({create: collName}));
- assert.writeOK(sessionColl.insert({a: 1}));
- assert.commandWorked(sessionColl.createIndex({a: 1}));
+assert.writeOK(sessionColl.insert({a: 1}));
+assert.commandWorked(sessionColl.createIndex({a: 1}));
- session.startTransaction();
- assert.writeOK(sessionColl.update({}, {$set: {a: [1, 2, 3]}}));
- assert.eq(1, sessionColl.find({}, {_id: 0, a: 1}).sort({a: 1}).itcount());
- assert.commandWorked(session.commitTransaction_forTesting());
+session.startTransaction();
+assert.writeOK(sessionColl.update({}, {$set: {a: [1, 2, 3]}}));
+assert.eq(1, sessionColl.find({}, {_id: 0, a: 1}).sort({a: 1}).itcount());
+assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(1,
- db.getSiblingDB(dbName)
- .getCollection(collName)
- .find({}, {_id: 0, a: 1})
- .sort({a: 1})
- .itcount());
+assert.eq(1,
+ db.getSiblingDB(dbName)
+ .getCollection(collName)
+ .find({}, {_id: 0, a: 1})
+ .sort({a: 1})
+ .itcount());
})();
diff --git a/jstests/core/txns/rename_collection_not_blocked_by_txn.js b/jstests/core/txns/rename_collection_not_blocked_by_txn.js
index b5c6cb4c0c4..4c3921d6c12 100644
--- a/jstests/core/txns/rename_collection_not_blocked_by_txn.js
+++ b/jstests/core/txns/rename_collection_not_blocked_by_txn.js
@@ -5,31 +5,30 @@
*/
(function() {
- "use strict";
+"use strict";
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- let db = rst.getPrimary().getDB("test");
+let db = rst.getPrimary().getDB("test");
- assert.commandWorked(db.runCommand({insert: "t", documents: [{x: 1}]}));
- assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
- assert.commandWorked(db.runCommand({insert: "b", documents: [{x: 1}]}));
+assert.commandWorked(db.runCommand({insert: "t", documents: [{x: 1}]}));
+assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
+assert.commandWorked(db.runCommand({insert: "b", documents: [{x: 1}]}));
- const session = db.getMongo().startSession();
- const sessionDb = session.getDatabase("test");
+const session = db.getMongo().startSession();
+const sessionDb = session.getDatabase("test");
- session.startTransaction();
- // This holds a database IX lock and a collection IX lock on "test.t".
- sessionDb.t.insert({y: 1});
+session.startTransaction();
+// This holds a database IX lock and a collection IX lock on "test.t".
+sessionDb.t.insert({y: 1});
- // This only requires database IX lock.
- assert.commandWorked(
- db.adminCommand({renameCollection: "test.a", to: "test.b", dropTarget: true}));
- assert.commandWorked(db.adminCommand({renameCollection: "test.b", to: "test.c"}));
+// This only requires database IX lock.
+assert.commandWorked(db.adminCommand({renameCollection: "test.a", to: "test.b", dropTarget: true}));
+assert.commandWorked(db.adminCommand({renameCollection: "test.b", to: "test.c"}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/core/txns/repeatable_reads_in_transaction.js b/jstests/core/txns/repeatable_reads_in_transaction.js
index 2aa80d4cc71..3286b6e72cb 100644
--- a/jstests/core/txns/repeatable_reads_in_transaction.js
+++ b/jstests/core/txns/repeatable_reads_in_transaction.js
@@ -2,75 +2,75 @@
// read the same data even if it was modified outside of the transaction.
// @tags: [uses_transactions, uses_snapshot_read_concern]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "repeatable_reads_in_transaction";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
+const dbName = "test";
+const collName = "repeatable_reads_in_transaction";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb.getCollection(collName);
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb.getCollection(collName);
- // Initialize second session variables.
- const session2 = testDB.getMongo().startSession(sessionOptions);
- const session2Db = session2.getDatabase(dbName);
- const session2Coll = session2Db.getCollection(collName);
+// Initialize second session variables.
+const session2 = testDB.getMongo().startSession(sessionOptions);
+const session2Db = session2.getDatabase(dbName);
+const session2Coll = session2Db.getCollection(collName);
- jsTest.log("Prepopulate the collection.");
- assert.writeOK(
- testColl.insert([{_id: 0}, {_id: 1}, {_id: 2}], {writeConcern: {w: "majority"}}));
+jsTest.log("Prepopulate the collection.");
+assert.writeOK(testColl.insert([{_id: 0}, {_id: 1}, {_id: 2}], {writeConcern: {w: "majority"}}));
- // Create a constant array of documents we expect to be returned during a read-only transaction.
- // The value should not change since external changes should not be visible within this
- // transaction.
- const expectedDocs = [{_id: 0}, {_id: 1}, {_id: 2}];
+// Create a constant array of documents we expect to be returned during a read-only transaction.
+// The value should not change since external changes should not be visible within this
+// transaction.
+const expectedDocs = [{_id: 0}, {_id: 1}, {_id: 2}];
- jsTestLog("Start a read-only transaction on the first session.");
- session.startTransaction({writeConcern: {w: "majority"}});
+jsTestLog("Start a read-only transaction on the first session.");
+session.startTransaction({writeConcern: {w: "majority"}});
- assert.sameMembers(expectedDocs, sessionColl.find().toArray());
+assert.sameMembers(expectedDocs, sessionColl.find().toArray());
- jsTestLog("Start a transaction on the second session that modifies the same collection.");
- session2.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+jsTestLog("Start a transaction on the second session that modifies the same collection.");
+session2.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- assert.commandWorked(session2Coll.insert({_id: 3}));
- assert.commandWorked(session2Coll.update({_id: 1}, {$set: {a: 1}}));
- assert.commandWorked(session2Coll.deleteOne({_id: 2}));
+assert.commandWorked(session2Coll.insert({_id: 3}));
+assert.commandWorked(session2Coll.update({_id: 1}, {$set: {a: 1}}));
+assert.commandWorked(session2Coll.deleteOne({_id: 2}));
- jsTestLog(
- "Continue reading in the first transaction. Changes from the second transaction should not be visible.");
+jsTestLog(
+ "Continue reading in the first transaction. Changes from the second transaction should not be visible.");
- assert.sameMembers(expectedDocs, sessionColl.find().toArray());
+assert.sameMembers(expectedDocs, sessionColl.find().toArray());
- jsTestLog("Committing the second transaction.");
- assert.commandWorked(session2.commitTransaction_forTesting());
+jsTestLog("Committing the second transaction.");
+assert.commandWorked(session2.commitTransaction_forTesting());
- jsTestLog(
- "Committed changes from the second transaction should still not be visible to the first.");
+jsTestLog(
+ "Committed changes from the second transaction should still not be visible to the first.");
- assert.sameMembers(expectedDocs, sessionColl.find().toArray());
+assert.sameMembers(expectedDocs, sessionColl.find().toArray());
- jsTestLog(
- "Writes that occur outside of a transaction should not be visible to a read only transaction.");
+jsTestLog(
+ "Writes that occur outside of a transaction should not be visible to a read only transaction.");
- assert.writeOK(testColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
+assert.writeOK(testColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
- assert.sameMembers(expectedDocs, sessionColl.find().toArray());
+assert.sameMembers(expectedDocs, sessionColl.find().toArray());
- jsTestLog("Committing first transaction.");
- assert.commandWorked(session.commitTransaction_forTesting());
+jsTestLog("Committing first transaction.");
+assert.commandWorked(session.commitTransaction_forTesting());
- // Make sure the correct documents exist after committing the second transaction.
- assert.sameMembers([{_id: 0}, {_id: 1, a: 1}, {_id: 3}, {_id: 4}],
- sessionColl.find().toArray());
+// Make sure the correct documents exist after committing the second transaction.
+assert.sameMembers([{_id: 0}, {_id: 1, a: 1}, {_id: 3}, {_id: 4}], sessionColl.find().toArray());
- session.endSession();
- session2.endSession();
+session.endSession();
+session2.endSession();
}());
diff --git a/jstests/core/txns/shell_prompt_in_transaction.js b/jstests/core/txns/shell_prompt_in_transaction.js
index 019ea5595de..ab96bef95f2 100644
--- a/jstests/core/txns/shell_prompt_in_transaction.js
+++ b/jstests/core/txns/shell_prompt_in_transaction.js
@@ -2,42 +2,44 @@
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- const collName = "shell_prompt_in_transaction";
-
- db.getCollection(collName).drop({writeConcern: {w: "majority"}});
- assert.commandWorked(db.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- // Override the global "db".
- const session = db.getMongo().startSession();
- db = session.getDatabase(db.getName());
- const coll = db.getCollection(collName);
-
- function simulatePrompt() {
- __promptWrapper__(defaultPrompt);
- }
-
- // Start a transaction, so the session will attach txn info to the commands running on it.
- session.startTransaction();
- jsTestLog("Run shell prompt to simulate a user hitting enter.");
- simulatePrompt();
- const doc = {_id: "shell-write"};
- assert.commandWorked(coll.insert(doc));
- assert.docEq(doc, coll.findOne());
- simulatePrompt();
- assert.commandWorked(session.abortTransaction_forTesting());
- assert.docEq(null, coll.findOne());
-
- // Start a transaction, so the session has a running transaction now.
- simulatePrompt();
- session.startTransaction();
- jsTestLog("Run shell prompt to simulate a user hitting enter.");
- simulatePrompt();
- assert.commandWorked(coll.insert(doc));
- simulatePrompt();
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.docEq(doc, coll.findOne());
-
- coll.drop({writeConcern: {w: "majority"}});
+"use strict";
+
+const collName = "shell_prompt_in_transaction";
+
+db.getCollection(collName).drop({writeConcern: {w: "majority"}});
+assert.commandWorked(db.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+// Override the global "db".
+const session = db.getMongo().startSession();
+db = session.getDatabase(db.getName());
+const coll = db.getCollection(collName);
+
+function simulatePrompt() {
+ __promptWrapper__(defaultPrompt);
+}
+
+// Start a transaction, so the session will attach txn info to the commands running on it.
+session.startTransaction();
+jsTestLog("Run shell prompt to simulate a user hitting enter.");
+simulatePrompt();
+const doc = {
+ _id: "shell-write"
+};
+assert.commandWorked(coll.insert(doc));
+assert.docEq(doc, coll.findOne());
+simulatePrompt();
+assert.commandWorked(session.abortTransaction_forTesting());
+assert.docEq(null, coll.findOne());
+
+// Start a transaction, so the session has a running transaction now.
+simulatePrompt();
+session.startTransaction();
+jsTestLog("Run shell prompt to simulate a user hitting enter.");
+simulatePrompt();
+assert.commandWorked(coll.insert(doc));
+simulatePrompt();
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.docEq(doc, coll.findOne());
+
+coll.drop({writeConcern: {w: "majority"}});
})();
diff --git a/jstests/core/txns/speculative_snapshot_includes_all_writes.js b/jstests/core/txns/speculative_snapshot_includes_all_writes.js
index d3c3b01f827..efeefdfa889 100644
--- a/jstests/core/txns/speculative_snapshot_includes_all_writes.js
+++ b/jstests/core/txns/speculative_snapshot_includes_all_writes.js
@@ -4,111 +4,111 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
-
- const dbName = "test";
- const collName = "speculative_snapshot_includes_all_writes_1";
- const collName2 = "speculative_snapshot_includes_all_writes_2";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
- const testColl2 = testDB[collName2];
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- testDB.runCommand({drop: collName2, writeConcern: {w: "majority"}});
-
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
- assert.commandWorked(testDB.createCollection(collName2, {writeConcern: {w: "majority"}}));
-
- const sessionOptions = {causalConsistency: false};
-
- function startSessionAndTransaction(readConcernLevel) {
- let session = db.getMongo().startSession(sessionOptions);
- jsTestLog("Start a transaction with readConcern " + readConcernLevel.level + ".");
- session.startTransaction({readConcern: readConcernLevel});
- return session;
- }
-
- let checkReads = (session, collExpected, coll2Expected) => {
- let sessionDb = session.getDatabase(dbName);
- let coll = sessionDb.getCollection(collName);
- let coll2 = sessionDb.getCollection(collName2);
- assert.sameMembers(collExpected, coll.find().toArray());
- assert.sameMembers(coll2Expected, coll2.find().toArray());
- };
-
- // Clear ramlog so checkLog can't find log messages from previous times this fail point was
- // enabled.
- assert.commandWorked(testDB.adminCommand({clearLog: 'global'}));
-
- jsTest.log("Prepopulate the collections.");
- assert.commandWorked(testColl.insert([{_id: 0}], {writeConcern: {w: "majority"}}));
- assert.commandWorked(testColl2.insert([{_id: "a"}], {writeConcern: {w: "majority"}}));
-
- jsTest.log("Create the uncommitted write.");
-
- assert.commandWorked(db.adminCommand({
- configureFailPoint: "hangAfterCollectionInserts",
- mode: "alwaysOn",
- data: {collectionNS: testColl2.getFullName()}
- }));
-
- const joinHungWrite = startParallelShell(() => {
- assert.commandWorked(
- db.getSiblingDB("test").speculative_snapshot_includes_all_writes_2.insert(
- {_id: "b"}, {writeConcern: {w: "majority"}}));
- });
-
- checkLog.contains(
- db.getMongo(),
- "hangAfterCollectionInserts fail point enabled for " + testColl2.getFullName());
-
- jsTest.log("Create a write following the uncommitted write.");
- // Note this write must use local write concern; it cannot be majority committed until
- // the prior uncommitted write is committed.
- assert.commandWorked(testColl.insert([{_id: 1}]));
-
- const snapshotSession = startSessionAndTransaction({level: "snapshot"});
- checkReads(snapshotSession, [{_id: 0}], [{_id: "a"}]);
-
- const majoritySession = startSessionAndTransaction({level: "majority"});
- checkReads(majoritySession, [{_id: 0}, {_id: 1}], [{_id: "a"}]);
-
- const localSession = startSessionAndTransaction({level: "local"});
- checkReads(localSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]);
-
- const defaultSession = startSessionAndTransaction({});
- checkReads(defaultSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]);
-
- jsTestLog("Allow the uncommitted write to finish.");
- assert.commandWorked(db.adminCommand({
- configureFailPoint: "hangAfterCollectionInserts",
- mode: "off",
- }));
-
- joinHungWrite();
-
- jsTestLog("Double-checking that writes not committed at start of snapshot cannot appear.");
- checkReads(snapshotSession, [{_id: 0}], [{_id: "a"}]);
-
- jsTestLog(
- "Double-checking that writes performed before the start of a transaction of 'majority' or lower must appear.");
- checkReads(majoritySession, [{_id: 0}, {_id: 1}], [{_id: "a"}]);
- checkReads(localSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]);
- checkReads(defaultSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]);
-
- jsTestLog("Committing transactions.");
- assert.commandWorked(snapshotSession.commitTransaction_forTesting());
- assert.commandWorked(majoritySession.commitTransaction_forTesting());
- assert.commandWorked(localSession.commitTransaction_forTesting());
- assert.commandWorked(defaultSession.commitTransaction_forTesting());
-
- jsTestLog("A new local read must see all committed writes.");
- checkReads(defaultSession, [{_id: 0}, {_id: 1}], [{_id: "a"}, {_id: "b"}]);
-
- snapshotSession.endSession();
- majoritySession.endSession();
- localSession.endSession();
- defaultSession.endSession();
+"use strict";
+
+load("jstests/libs/check_log.js");
+
+const dbName = "test";
+const collName = "speculative_snapshot_includes_all_writes_1";
+const collName2 = "speculative_snapshot_includes_all_writes_2";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
+const testColl2 = testDB[collName2];
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: collName2, writeConcern: {w: "majority"}});
+
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.createCollection(collName2, {writeConcern: {w: "majority"}}));
+
+const sessionOptions = {
+ causalConsistency: false
+};
+
+function startSessionAndTransaction(readConcernLevel) {
+ let session = db.getMongo().startSession(sessionOptions);
+ jsTestLog("Start a transaction with readConcern " + readConcernLevel.level + ".");
+ session.startTransaction({readConcern: readConcernLevel});
+ return session;
+}
+
+let checkReads = (session, collExpected, coll2Expected) => {
+ let sessionDb = session.getDatabase(dbName);
+ let coll = sessionDb.getCollection(collName);
+ let coll2 = sessionDb.getCollection(collName2);
+ assert.sameMembers(collExpected, coll.find().toArray());
+ assert.sameMembers(coll2Expected, coll2.find().toArray());
+};
+
+// Clear ramlog so checkLog can't find log messages from previous times this fail point was
+// enabled.
+assert.commandWorked(testDB.adminCommand({clearLog: 'global'}));
+
+jsTest.log("Prepopulate the collections.");
+assert.commandWorked(testColl.insert([{_id: 0}], {writeConcern: {w: "majority"}}));
+assert.commandWorked(testColl2.insert([{_id: "a"}], {writeConcern: {w: "majority"}}));
+
+jsTest.log("Create the uncommitted write.");
+
+assert.commandWorked(db.adminCommand({
+ configureFailPoint: "hangAfterCollectionInserts",
+ mode: "alwaysOn",
+ data: {collectionNS: testColl2.getFullName()}
+}));
+
+const joinHungWrite = startParallelShell(() => {
+ assert.commandWorked(db.getSiblingDB("test").speculative_snapshot_includes_all_writes_2.insert(
+ {_id: "b"}, {writeConcern: {w: "majority"}}));
+});
+
+checkLog.contains(db.getMongo(),
+ "hangAfterCollectionInserts fail point enabled for " + testColl2.getFullName());
+
+jsTest.log("Create a write following the uncommitted write.");
+// Note this write must use local write concern; it cannot be majority committed until
+// the prior uncommitted write is committed.
+assert.commandWorked(testColl.insert([{_id: 1}]));
+
+const snapshotSession = startSessionAndTransaction({level: "snapshot"});
+checkReads(snapshotSession, [{_id: 0}], [{_id: "a"}]);
+
+const majoritySession = startSessionAndTransaction({level: "majority"});
+checkReads(majoritySession, [{_id: 0}, {_id: 1}], [{_id: "a"}]);
+
+const localSession = startSessionAndTransaction({level: "local"});
+checkReads(localSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]);
+
+const defaultSession = startSessionAndTransaction({});
+checkReads(defaultSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]);
+
+jsTestLog("Allow the uncommitted write to finish.");
+assert.commandWorked(db.adminCommand({
+ configureFailPoint: "hangAfterCollectionInserts",
+ mode: "off",
+}));
+
+joinHungWrite();
+
+jsTestLog("Double-checking that writes not committed at start of snapshot cannot appear.");
+checkReads(snapshotSession, [{_id: 0}], [{_id: "a"}]);
+
+jsTestLog(
+ "Double-checking that writes performed before the start of a transaction of 'majority' or lower must appear.");
+checkReads(majoritySession, [{_id: 0}, {_id: 1}], [{_id: "a"}]);
+checkReads(localSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]);
+checkReads(defaultSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]);
+
+jsTestLog("Committing transactions.");
+assert.commandWorked(snapshotSession.commitTransaction_forTesting());
+assert.commandWorked(majoritySession.commitTransaction_forTesting());
+assert.commandWorked(localSession.commitTransaction_forTesting());
+assert.commandWorked(defaultSession.commitTransaction_forTesting());
+
+jsTestLog("A new local read must see all committed writes.");
+checkReads(defaultSession, [{_id: 0}, {_id: 1}], [{_id: "a"}, {_id: "b"}]);
+
+snapshotSession.endSession();
+majoritySession.endSession();
+localSession.endSession();
+defaultSession.endSession();
}());
diff --git a/jstests/core/txns/start_transaction_with_read.js b/jstests/core/txns/start_transaction_with_read.js
index f49a4518171..045b9af1083 100644
--- a/jstests/core/txns/start_transaction_with_read.js
+++ b/jstests/core/txns/start_transaction_with_read.js
@@ -1,52 +1,57 @@
// Test transaction starting with read.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "start_transaction_with_read";
+const dbName = "test";
+const collName = "start_transaction_with_read";
- const testDB = db.getSiblingDB(dbName);
- const coll = testDB[collName];
+const testDB = db.getSiblingDB(dbName);
+const coll = testDB[collName];
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}});
+testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}});
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
- // Non-transactional write to give something to find.
- const initialDoc = {_id: "pretransaction1", x: 0};
- assert.writeOK(sessionColl.insert(initialDoc, {writeConcern: {w: "majority"}}));
+// Non-transactional write to give something to find.
+const initialDoc = {
+ _id: "pretransaction1",
+ x: 0
+};
+assert.writeOK(sessionColl.insert(initialDoc, {writeConcern: {w: "majority"}}));
- jsTest.log("Start a transaction with a read");
+jsTest.log("Start a transaction with a read");
- session.startTransaction();
+session.startTransaction();
- let docs = sessionColl.find({}).toArray();
- assert.sameMembers(docs, [initialDoc]);
+let docs = sessionColl.find({}).toArray();
+assert.sameMembers(docs, [initialDoc]);
- jsTest.log("Insert two documents in a transaction");
+jsTest.log("Insert two documents in a transaction");
- // Insert a doc within the transaction.
- assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
+// Insert a doc within the transaction.
+assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
- // Read in the same transaction returns the doc.
- docs = sessionColl.find({_id: "insert-1"}).toArray();
- assert.sameMembers(docs, [{_id: "insert-1"}]);
+// Read in the same transaction returns the doc.
+docs = sessionColl.find({_id: "insert-1"}).toArray();
+assert.sameMembers(docs, [{_id: "insert-1"}]);
- // Insert a doc within a transaction.
- assert.commandWorked(sessionColl.insert({_id: "insert-2"}));
+// Insert a doc within a transaction.
+assert.commandWorked(sessionColl.insert({_id: "insert-2"}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- // Read with default read concern sees the committed transaction.
- assert.eq({_id: "insert-1"}, coll.findOne({_id: "insert-1"}));
- assert.eq({_id: "insert-2"}, coll.findOne({_id: "insert-2"}));
- assert.eq(initialDoc, coll.findOne(initialDoc));
+// Read with default read concern sees the committed transaction.
+assert.eq({_id: "insert-1"}, coll.findOne({_id: "insert-1"}));
+assert.eq({_id: "insert-2"}, coll.findOne({_id: "insert-2"}));
+assert.eq(initialDoc, coll.findOne(initialDoc));
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/statement_ids_accepted.js b/jstests/core/txns/statement_ids_accepted.js
index 55aa90ce782..05640ead1f8 100644
--- a/jstests/core/txns/statement_ids_accepted.js
+++ b/jstests/core/txns/statement_ids_accepted.js
@@ -2,122 +2,199 @@
// commands that are allowed in transactions.
// @tags: [uses_transactions, uses_prepare_transaction]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "statement_ids_accepted";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
+const dbName = "test";
+const collName = "statement_ids_accepted";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- let txnNumber = 0;
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+let txnNumber = 0;
- jsTestLog("Check that abortTransaction accepts a statement ID");
- assert.commandWorked(sessionDb.runCommand({
- find: collName,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
- // abortTransaction can only be run on the admin database.
- assert.commandWorked(sessionDb.adminCommand({
- abortTransaction: 1,
- txnNumber: NumberLong(txnNumber++),
- stmtId: NumberInt(1),
- autocommit: false
- }));
+jsTestLog("Check that abortTransaction accepts a statement ID");
+assert.commandWorked(sessionDb.runCommand({
+ find: collName,
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}));
+// abortTransaction can only be run on the admin database.
+assert.commandWorked(sessionDb.adminCommand({
+ abortTransaction: 1,
+ txnNumber: NumberLong(txnNumber++),
+ stmtId: NumberInt(1),
+ autocommit: false
+}));
- jsTestLog("Check that aggregate accepts a statement ID");
- assert.commandWorked(sessionDb.runCommand({
- aggregate: collName,
- cursor: {},
- pipeline: [{$match: {}}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber++),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
+jsTestLog("Check that aggregate accepts a statement ID");
+assert.commandWorked(sessionDb.runCommand({
+ aggregate: collName,
+ cursor: {},
+ pipeline: [{$match: {}}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber++),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}));
- // The applyOps command is intentionally left out.
+// The applyOps command is intentionally left out.
- jsTestLog("Check that commitTransaction accepts a statement ID");
- assert.commandWorked(sessionDb.runCommand({
- find: collName,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
- // commitTransaction can only be run on the admin database.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber++),
- stmtId: NumberInt(1),
- autocommit: false
- }));
+jsTestLog("Check that commitTransaction accepts a statement ID");
+assert.commandWorked(sessionDb.runCommand({
+ find: collName,
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}));
+// commitTransaction can only be run on the admin database.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber++),
+ stmtId: NumberInt(1),
+ autocommit: false
+}));
- jsTestLog("Check that delete accepts a statement ID");
- assert.commandWorked(sessionDb.runCommand({
- delete: collName,
- deletes: [{q: {}, limit: 1}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber++),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
+jsTestLog("Check that delete accepts a statement ID");
+assert.commandWorked(sessionDb.runCommand({
+ delete: collName,
+ deletes: [{q: {}, limit: 1}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber++),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}));
- jsTestLog("Check that distinct accepts a statement ID");
- assert.commandWorked(sessionDb.runCommand({
- distinct: collName,
- key: "x",
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber++),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
+jsTestLog("Check that distinct accepts a statement ID");
+assert.commandWorked(sessionDb.runCommand({
+ distinct: collName,
+ key: "x",
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber++),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}));
- // The doTxn command is intentionally left out.
+// The doTxn command is intentionally left out.
- jsTestLog("Check that find and getmore accept a statement ID");
- // Put in some data to find so getMore has a cursor to use.
- assert.writeOK(testColl.insert([{_id: 0}, {_id: 1}], {writeConcern: {w: "majority"}}));
- let res = assert.commandWorked(sessionDb.runCommand({
- find: collName,
- batchSize: 1,
- filter: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
+jsTestLog("Check that find and getmore accept a statement ID");
+// Put in some data to find so getMore has a cursor to use.
+assert.writeOK(testColl.insert([{_id: 0}, {_id: 1}], {writeConcern: {w: "majority"}}));
+let res = assert.commandWorked(sessionDb.runCommand({
+ find: collName,
+ batchSize: 1,
+ filter: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}));
+
+assert.commandWorked(sessionDb.runCommand({
+ getMore: res.cursor.id,
+ collection: collName,
+ batchSize: 1,
+ txnNumber: NumberLong(txnNumber++),
+ stmtId: NumberInt(1),
+ autocommit: false
+}));
+
+jsTestLog("Check that findandmodify accepts a statement ID");
+assert.commandWorked(sessionDb.runCommand({
+ findandmodify: collName,
+ remove: true,
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber++),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}));
+
+jsTestLog("Check that findAndModify accepts a statement ID");
+assert.commandWorked(sessionDb.runCommand({
+ findAndModify: collName,
+ remove: true,
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}));
+
+// Abort the transaction to release locks.
+// abortTransaction can only be run on the admin database.
+assert.commandWorked(sessionDb.adminCommand({
+ abortTransaction: 1,
+ txnNumber: NumberLong(txnNumber++),
+ stmtId: NumberInt(0),
+ autocommit: false
+}));
+jsTestLog("Check that insert accepts a statement ID");
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "doc1"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}));
+
+// Abort the transaction to release locks.
+// abortTransaction can only be run on the admin database.
+assert.commandWorked(sessionDb.adminCommand({
+ abortTransaction: 1,
+ txnNumber: NumberLong(txnNumber++),
+ stmtId: NumberInt(1),
+ autocommit: false
+}));
+
+const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid";
+if (!isMongos) {
+ // Skip commands that do not exist on mongos.
+
+ jsTestLog("Check that geoSearch accepts a statement ID");
+ assert.writeOK(testColl.insert({geo: {type: "Point", coordinates: [0, 0]}, a: 0}),
+ {writeConcern: {w: "majority"}});
+ assert.writeOK(testColl.insert({geoh: {lat: 0, long: 0}, b: 0}),
+ {writeConcern: {w: "majority"}});
assert.commandWorked(sessionDb.runCommand({
- getMore: res.cursor.id,
- collection: collName,
- batchSize: 1,
- txnNumber: NumberLong(txnNumber++),
- stmtId: NumberInt(1),
- autocommit: false
+ createIndexes: collName,
+ indexes: [
+ {name: "geo", key: {geo: "2dsphere"}},
+ {name: "geoh", key: {geoh: "geoHaystack", b: 1}, bucketSize: 1}
+ ],
+ writeConcern: {w: "majority"}
}));
+ // Ensure the snapshot is available following the index creation.
+ assert.soonNoExcept(function() {
+ testColl.find({}, {readConcern: {level: "snapshot"}});
+ return true;
+ });
- jsTestLog("Check that findandmodify accepts a statement ID");
+ jsTestLog("Check that geoSearch accepts a statement ID");
assert.commandWorked(sessionDb.runCommand({
- findandmodify: collName,
- remove: true,
+ geoSearch: collName,
+ search: {b: 0},
+ near: [0, 0],
+ maxDistance: 1,
readConcern: {level: "snapshot"},
txnNumber: NumberLong(txnNumber++),
stmtId: NumberInt(0),
@@ -125,136 +202,60 @@
autocommit: false
}));
- jsTestLog("Check that findAndModify accepts a statement ID");
+ jsTestLog("Check that prepareTransaction accepts a statement ID");
assert.commandWorked(sessionDb.runCommand({
- findAndModify: collName,
- remove: true,
+ insert: collName,
+ documents: [{_id: "doc2"}],
readConcern: {level: "snapshot"},
txnNumber: NumberLong(txnNumber),
stmtId: NumberInt(0),
startTransaction: true,
autocommit: false
}));
-
- // Abort the transaction to release locks.
- // abortTransaction can only be run on the admin database.
+ // prepareTransaction can only be run on the admin database.
assert.commandWorked(sessionDb.adminCommand({
- abortTransaction: 1,
- txnNumber: NumberLong(txnNumber++),
- stmtId: NumberInt(0),
- autocommit: false
- }));
-
- jsTestLog("Check that insert accepts a statement ID");
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "doc1"}],
- readConcern: {level: "snapshot"},
+ prepareTransaction: 1,
txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
+ stmtId: NumberInt(1),
autocommit: false
}));
-
- // Abort the transaction to release locks.
- // abortTransaction can only be run on the admin database.
assert.commandWorked(sessionDb.adminCommand({
abortTransaction: 1,
txnNumber: NumberLong(txnNumber++),
- stmtId: NumberInt(1),
+ stmtId: NumberInt(2),
autocommit: false
}));
-
- const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid";
- if (!isMongos) {
- // Skip commands that do not exist on mongos.
-
- jsTestLog("Check that geoSearch accepts a statement ID");
- assert.writeOK(testColl.insert({geo: {type: "Point", coordinates: [0, 0]}, a: 0}),
- {writeConcern: {w: "majority"}});
- assert.writeOK(testColl.insert({geoh: {lat: 0, long: 0}, b: 0}),
- {writeConcern: {w: "majority"}});
- assert.commandWorked(sessionDb.runCommand({
- createIndexes: collName,
- indexes: [
- {name: "geo", key: {geo: "2dsphere"}},
- {name: "geoh", key: {geoh: "geoHaystack", b: 1}, bucketSize: 1}
- ],
- writeConcern: {w: "majority"}
- }));
- // Ensure the snapshot is available following the index creation.
- assert.soonNoExcept(function() {
- testColl.find({}, {readConcern: {level: "snapshot"}});
- return true;
- });
-
- jsTestLog("Check that geoSearch accepts a statement ID");
- assert.commandWorked(sessionDb.runCommand({
- geoSearch: collName,
- search: {b: 0},
- near: [0, 0],
- maxDistance: 1,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber++),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
-
- jsTestLog("Check that prepareTransaction accepts a statement ID");
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "doc2"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false
- }));
- // prepareTransaction can only be run on the admin database.
- assert.commandWorked(sessionDb.adminCommand({
- prepareTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(1),
- autocommit: false
- }));
- assert.commandWorked(sessionDb.adminCommand({
- abortTransaction: 1,
- txnNumber: NumberLong(txnNumber++),
- stmtId: NumberInt(2),
- autocommit: false
- }));
- assert.commandFailedWithCode(sessionDb.runCommand({
- prepareTransaction: 1,
- txnNumber: NumberLong(txnNumber++),
- stmtId: NumberInt(0),
- autocommit: false
- }),
- ErrorCodes.Unauthorized);
- }
-
- // refreshLogicalSessionCacheNow is intentionally omitted.
-
- jsTestLog("Check that update accepts a statement ID");
- assert.commandWorked(sessionDb.runCommand({
- update: collName,
- updates: [{q: {_id: "doc1"}, u: {$inc: {a: 1}}}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
+ assert.commandFailedWithCode(sessionDb.runCommand({
+ prepareTransaction: 1,
+ txnNumber: NumberLong(txnNumber++),
stmtId: NumberInt(0),
- startTransaction: true,
autocommit: false
- }));
+ }),
+ ErrorCodes.Unauthorized);
+}
- // Abort the last transaction because it appears the system stalls during shutdown if
- // a transaction is open.
- // abortTransaction can only be run on the admin database.
- assert.commandWorked(sessionDb.adminCommand({
- abortTransaction: 1,
- txnNumber: NumberLong(txnNumber++),
- stmtId: NumberInt(1),
- autocommit: false
- }));
+// refreshLogicalSessionCacheNow is intentionally omitted.
+
+jsTestLog("Check that update accepts a statement ID");
+assert.commandWorked(sessionDb.runCommand({
+ update: collName,
+ updates: [{q: {_id: "doc1"}, u: {$inc: {a: 1}}}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false
+}));
+
+// Abort the last transaction because it appears the system stalls during shutdown if
+// a transaction is open.
+// abortTransaction can only be run on the admin database.
+assert.commandWorked(sessionDb.adminCommand({
+ abortTransaction: 1,
+ txnNumber: NumberLong(txnNumber++),
+ stmtId: NumberInt(1),
+ autocommit: false
+}));
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js b/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js
index e26f88b85c3..6910dd88b68 100644
--- a/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js
+++ b/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js
@@ -5,235 +5,236 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- 'use strict';
+'use strict';
+load("jstests/libs/check_log.js");
+load('jstests/core/txns/libs/prepare_helpers.js');
+load('jstests/libs/parallel_shell_helpers.js');
+
+TestData.dbName = 'test';
+const baseCollName = 'timestamped_reads_wait_for_prepare_oplog_visibility';
+const testDB = db.getSiblingDB(TestData.dbName);
+TestData.failureTimeout = 1 * 1000; // 1 second.
+TestData.successTimeout = 5 * 60 * 1000; // 5 minutes.
+TestData.txnDoc = {
+ _id: 1,
+ x: 1
+};
+TestData.otherDoc = {
+ _id: 2,
+ y: 7
+};
+TestData.txnDocFilter = {
+ _id: TestData.txnDoc._id
+};
+TestData.otherDocFilter = {
+ _id: TestData.otherDoc._id
+};
+
+/**
+ * A function that accepts a 'readFunc' and a collection name. 'readFunc' accepts a collection
+ * name and returns an object with an 'oplogVisibility' test field and a 'prepareConflict' test
+ * field. This function is run in a separate thread and tests that oplog visibility blocks
+ * certain reads and that prepare conflicts block other types of reads.
+ */
+const readThreadFunc = function(readFunc, _collName) {
load("jstests/libs/check_log.js");
- load('jstests/core/txns/libs/prepare_helpers.js');
- load('jstests/libs/parallel_shell_helpers.js');
-
- TestData.dbName = 'test';
- const baseCollName = 'timestamped_reads_wait_for_prepare_oplog_visibility';
- const testDB = db.getSiblingDB(TestData.dbName);
- TestData.failureTimeout = 1 * 1000; // 1 second.
- TestData.successTimeout = 5 * 60 * 1000; // 5 minutes.
- TestData.txnDoc = {_id: 1, x: 1};
- TestData.otherDoc = {_id: 2, y: 7};
- TestData.txnDocFilter = {_id: TestData.txnDoc._id};
- TestData.otherDocFilter = {_id: TestData.otherDoc._id};
-
- /**
- * A function that accepts a 'readFunc' and a collection name. 'readFunc' accepts a collection
- * name and returns an object with an 'oplogVisibility' test field and a 'prepareConflict' test
- * field. This function is run in a separate thread and tests that oplog visibility blocks
- * certain reads and that prepare conflicts block other types of reads.
- */
- const readThreadFunc = function(readFunc, _collName) {
- load("jstests/libs/check_log.js");
-
- // Do not start reads until we are blocked in 'prepareTransaction'.
- checkLog.contains(db.getMongo(), "hangAfterReservingPrepareTimestamp fail point enabled");
-
- // Create a 'readFuncObj' from the 'readFunc'.
- const readFuncObj = readFunc(_collName);
- readFuncObj.oplogVisibility();
-
- // Let the transaction finish preparing and wait for 'prepareTransaction' to complete.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: 'hangAfterReservingPrepareTimestamp', mode: 'off'}));
- checkLog.contains(db.getMongo(), "command: prepareTransaction");
-
- readFuncObj.prepareConflict();
- };
- function runTest(prefix, readFunc) {
- // Reset the log history between tests.
- assert.commandWorked(db.adminCommand({clearLog: 'global'}));
+ // Do not start reads until we are blocked in 'prepareTransaction'.
+ checkLog.contains(db.getMongo(), "hangAfterReservingPrepareTimestamp fail point enabled");
+
+ // Create a 'readFuncObj' from the 'readFunc'.
+ const readFuncObj = readFunc(_collName);
+ readFuncObj.oplogVisibility();
+
+ // Let the transaction finish preparing and wait for 'prepareTransaction' to complete.
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: 'hangAfterReservingPrepareTimestamp', mode: 'off'}));
+ checkLog.contains(db.getMongo(), "command: prepareTransaction");
+
+ readFuncObj.prepareConflict();
+};
+
+function runTest(prefix, readFunc) {
+ // Reset the log history between tests.
+ assert.commandWorked(db.adminCommand({clearLog: 'global'}));
+
+ jsTestLog('Testing oplog visibility for ' + prefix);
+ const collName = baseCollName + '_' + prefix;
+ const testColl = testDB.getCollection(collName);
+
+ testColl.drop({writeConcern: {w: "majority"}});
+ assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: 'majority'}}));
+
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'hangAfterReservingPrepareTimestamp', mode: 'alwaysOn'}));
+
+ // Insert a document for the transaction.
+ assert.commandWorked(testColl.insert(TestData.txnDoc));
+ // Insert a document untouched by the transaction.
+ assert.commandWorked(testColl.insert(TestData.otherDoc, {writeconcern: {w: "majority"}}));
+
+ // Start a transaction with a single update on the 'txnDoc'.
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDB = session.getDatabase(TestData.dbName);
+ session.startTransaction({readConcern: {level: 'snapshot'}});
+ assert.commandWorked(sessionDB[collName].update(TestData.txnDoc, {$inc: {x: 1}}));
+
+ // We set the log level up to know when 'prepareTransaction' completes.
+ db.setLogLevel(1);
- jsTestLog('Testing oplog visibility for ' + prefix);
- const collName = baseCollName + '_' + prefix;
- const testColl = testDB.getCollection(collName);
+ // Clear the log history to ensure we only see the most recent 'prepareTransaction'
+ // failpoint log message.
+ assert.commandWorked(db.adminCommand({clearLog: 'global'}));
+ const joinReadThread = startParallelShell(funWithArgs(readThreadFunc, readFunc, collName));
- testColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: 'majority'}}));
+ jsTestLog("Preparing the transaction for " + prefix);
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'hangAfterReservingPrepareTimestamp', mode: 'alwaysOn'}));
+ db.setLogLevel(0);
+ joinReadThread({checkExitSuccess: true});
- // Insert a document for the transaction.
- assert.commandWorked(testColl.insert(TestData.txnDoc));
- // Insert a document untouched by the transaction.
- assert.commandWorked(testColl.insert(TestData.otherDoc, {writeconcern: {w: "majority"}}));
+ PrepareHelpers.commitTransaction(session, prepareTimestamp);
+}
+
+const snapshotRead = function(_collName) {
+ const _db = db.getSiblingDB(TestData.dbName);
+
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDB = session.getDatabase(TestData.dbName);
+
+ const oplogVisibility = function() {
+ jsTestLog("Snapshot reads should not block on oplog visibility.");
+ session.startTransaction({readConcern: {level: 'snapshot'}});
+ let cursor = assert.commandWorked(sessionDB.runCommand(
+ {find: _collName, filter: TestData.txnDocFilter, maxTimeMS: TestData.successTimeout}));
+ assert.sameMembers(cursor.cursor.firstBatch, [TestData.txnDoc], tojson(cursor));
+ assert.commandWorked(session.abortTransaction_forTesting());
- // Start a transaction with a single update on the 'txnDoc'.
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(TestData.dbName);
session.startTransaction({readConcern: {level: 'snapshot'}});
- assert.commandWorked(sessionDB[collName].update(TestData.txnDoc, {$inc: {x: 1}}));
-
- // We set the log level up to know when 'prepareTransaction' completes.
- db.setLogLevel(1);
-
- // Clear the log history to ensure we only see the most recent 'prepareTransaction'
- // failpoint log message.
- assert.commandWorked(db.adminCommand({clearLog: 'global'}));
- const joinReadThread = startParallelShell(funWithArgs(readThreadFunc, readFunc, collName));
-
- jsTestLog("Preparing the transaction for " + prefix);
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- db.setLogLevel(0);
- joinReadThread({checkExitSuccess: true});
-
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
- }
-
- const snapshotRead = function(_collName) {
- const _db = db.getSiblingDB(TestData.dbName);
-
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(TestData.dbName);
-
- const oplogVisibility = function() {
- jsTestLog("Snapshot reads should not block on oplog visibility.");
- session.startTransaction({readConcern: {level: 'snapshot'}});
- let cursor = assert.commandWorked(sessionDB.runCommand({
- find: _collName,
- filter: TestData.txnDocFilter,
- maxTimeMS: TestData.successTimeout
- }));
- assert.sameMembers(cursor.cursor.firstBatch, [TestData.txnDoc], tojson(cursor));
- assert.commandWorked(session.abortTransaction_forTesting());
-
- session.startTransaction({readConcern: {level: 'snapshot'}});
- cursor = assert.commandWorked(sessionDB.runCommand({
- find: _collName,
- filter: TestData.otherDocFilter,
- maxTimeMS: TestData.successTimeout
- }));
- assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor));
- assert.commandWorked(session.abortTransaction_forTesting());
- };
-
- const prepareConflict = function() {
- jsTestLog("Snapshot reads should block on prepared transactions for " +
- "conflicting documents.");
- session.startTransaction({readConcern: {level: 'snapshot'}});
- let cursor = assert.commandFailedWithCode(sessionDB.runCommand({
- find: _collName,
- filter: TestData.txnDocFilter,
- maxTimeMS: TestData.failureTimeout
- }),
- ErrorCodes.MaxTimeMSExpired);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTestLog("Snapshot reads should succeed on non-conflicting documents while a " +
- "transaction is in prepare.");
- session.startTransaction({readConcern: {level: 'snapshot'}});
- cursor = assert.commandWorked(sessionDB.runCommand({
- find: _collName,
- filter: TestData.otherDocFilter,
- maxTimeMS: TestData.successTimeout
- }));
- assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor));
- assert.commandWorked(session.abortTransaction_forTesting());
- };
-
- return {oplogVisibility: oplogVisibility, prepareConflict: prepareConflict};
+ cursor = assert.commandWorked(sessionDB.runCommand({
+ find: _collName,
+ filter: TestData.otherDocFilter,
+ maxTimeMS: TestData.successTimeout
+ }));
+ assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor));
+ assert.commandWorked(session.abortTransaction_forTesting());
};
- const afterClusterTime = function(_collName) {
- const _db = db.getSiblingDB(TestData.dbName);
-
- // Advance the cluster time with an arbitrary other insert.
- let res = assert.commandWorked(
- _db.runCommand({insert: _collName, documents: [{advanceClusterTime: 1}]}));
- assert(res.hasOwnProperty("$clusterTime"), tojson(res));
- assert(res.$clusterTime.hasOwnProperty("clusterTime"), tojson(res));
- const clusterTime = res.$clusterTime.clusterTime;
- jsTestLog("Using afterClusterTime: " + clusterTime);
-
- const oplogVisibility = function() {
- jsTestLog("afterClusterTime reads should block on oplog visibility.");
- assert.commandFailedWithCode(_db.runCommand({
- find: _collName,
- filter: TestData.txnDocFilter,
- readConcern: {afterClusterTime: clusterTime},
- maxTimeMS: TestData.failureTimeout
- }),
- ErrorCodes.MaxTimeMSExpired);
- assert.commandFailedWithCode(_db.runCommand({
- find: _collName,
- filter: TestData.otherDocFilter,
- readConcern: {afterClusterTime: clusterTime},
- maxTimeMS: TestData.failureTimeout
- }),
- ErrorCodes.MaxTimeMSExpired);
- };
-
- const prepareConflict = function() {
- jsTestLog("afterClusterTime reads should block on prepared transactions for " +
- "conflicting documents.");
- assert.commandFailedWithCode(_db.runCommand({
- find: _collName,
- filter: TestData.txnDocFilter,
- readConcern: {afterClusterTime: clusterTime},
- maxTimeMS: TestData.failureTimeout
- }),
- ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog("afterClusterTime reads should succeed on non-conflicting documents " +
- "while transaction is in prepare.");
- let cursor = assert.commandWorked(_db.runCommand({
- find: _collName,
- filter: TestData.otherDocFilter,
- readConcern: {afterClusterTime: clusterTime},
- maxTimeMS: TestData.successTimeout
- }));
- assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor));
- };
-
- return {oplogVisibility: oplogVisibility, prepareConflict: prepareConflict};
+ const prepareConflict = function() {
+ jsTestLog("Snapshot reads should block on prepared transactions for " +
+ "conflicting documents.");
+ session.startTransaction({readConcern: {level: 'snapshot'}});
+ let cursor = assert.commandFailedWithCode(sessionDB.runCommand({
+ find: _collName,
+ filter: TestData.txnDocFilter,
+ maxTimeMS: TestData.failureTimeout
+ }),
+ ErrorCodes.MaxTimeMSExpired);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ jsTestLog("Snapshot reads should succeed on non-conflicting documents while a " +
+ "transaction is in prepare.");
+ session.startTransaction({readConcern: {level: 'snapshot'}});
+ cursor = assert.commandWorked(sessionDB.runCommand({
+ find: _collName,
+ filter: TestData.otherDocFilter,
+ maxTimeMS: TestData.successTimeout
+ }));
+ assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor));
+ assert.commandWorked(session.abortTransaction_forTesting());
};
- const normalRead = function(_collName) {
- const _db = db.getSiblingDB(TestData.dbName);
-
- const oplogVisibility = function() {
- jsTestLog("Ordinary reads should not block on oplog visibility.");
- let cursor = assert.commandWorked(_db.runCommand({
- find: _collName,
- filter: TestData.txnDocFilter,
- maxTimeMS: TestData.successTimeout
- }));
- assert.sameMembers(cursor.cursor.firstBatch, [TestData.txnDoc], tojson(cursor));
- cursor = assert.commandWorked(_db.runCommand({
- find: _collName,
- filter: TestData.otherDocFilter,
- maxTimeMS: TestData.successTimeout
- }));
- assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor));
- };
-
- const prepareConflict = function() {
- jsTestLog("Ordinary reads should not block on prepared transactions.");
- let cursor = assert.commandWorked(_db.runCommand({
- find: _collName,
- filter: TestData.txnDocFilter,
- maxTimeMS: TestData.successTimeout
- }));
- assert.sameMembers(cursor.cursor.firstBatch, [TestData.txnDoc], tojson(cursor));
- cursor = assert.commandWorked(_db.runCommand({
- find: _collName,
- filter: TestData.otherDocFilter,
- maxTimeMS: TestData.successTimeout
- }));
- assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor));
- };
-
- return {oplogVisibility: oplogVisibility, prepareConflict: prepareConflict};
+ return {oplogVisibility: oplogVisibility, prepareConflict: prepareConflict};
+};
+
+const afterClusterTime = function(_collName) {
+ const _db = db.getSiblingDB(TestData.dbName);
+
+ // Advance the cluster time with an arbitrary other insert.
+ let res = assert.commandWorked(
+ _db.runCommand({insert: _collName, documents: [{advanceClusterTime: 1}]}));
+ assert(res.hasOwnProperty("$clusterTime"), tojson(res));
+ assert(res.$clusterTime.hasOwnProperty("clusterTime"), tojson(res));
+ const clusterTime = res.$clusterTime.clusterTime;
+ jsTestLog("Using afterClusterTime: " + clusterTime);
+
+ const oplogVisibility = function() {
+ jsTestLog("afterClusterTime reads should block on oplog visibility.");
+ assert.commandFailedWithCode(_db.runCommand({
+ find: _collName,
+ filter: TestData.txnDocFilter,
+ readConcern: {afterClusterTime: clusterTime},
+ maxTimeMS: TestData.failureTimeout
+ }),
+ ErrorCodes.MaxTimeMSExpired);
+ assert.commandFailedWithCode(_db.runCommand({
+ find: _collName,
+ filter: TestData.otherDocFilter,
+ readConcern: {afterClusterTime: clusterTime},
+ maxTimeMS: TestData.failureTimeout
+ }),
+ ErrorCodes.MaxTimeMSExpired);
};
- runTest('normal_reads', normalRead);
- runTest('snapshot_reads', snapshotRead);
- runTest('afterClusterTime', afterClusterTime);
+ const prepareConflict = function() {
+ jsTestLog("afterClusterTime reads should block on prepared transactions for " +
+ "conflicting documents.");
+ assert.commandFailedWithCode(_db.runCommand({
+ find: _collName,
+ filter: TestData.txnDocFilter,
+ readConcern: {afterClusterTime: clusterTime},
+ maxTimeMS: TestData.failureTimeout
+ }),
+ ErrorCodes.MaxTimeMSExpired);
+
+ jsTestLog("afterClusterTime reads should succeed on non-conflicting documents " +
+ "while transaction is in prepare.");
+ let cursor = assert.commandWorked(_db.runCommand({
+ find: _collName,
+ filter: TestData.otherDocFilter,
+ readConcern: {afterClusterTime: clusterTime},
+ maxTimeMS: TestData.successTimeout
+ }));
+ assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor));
+ };
+
+ return {oplogVisibility: oplogVisibility, prepareConflict: prepareConflict};
+};
+
+const normalRead = function(_collName) {
+ const _db = db.getSiblingDB(TestData.dbName);
+
+ const oplogVisibility = function() {
+ jsTestLog("Ordinary reads should not block on oplog visibility.");
+ let cursor = assert.commandWorked(_db.runCommand(
+ {find: _collName, filter: TestData.txnDocFilter, maxTimeMS: TestData.successTimeout}));
+ assert.sameMembers(cursor.cursor.firstBatch, [TestData.txnDoc], tojson(cursor));
+ cursor = assert.commandWorked(_db.runCommand({
+ find: _collName,
+ filter: TestData.otherDocFilter,
+ maxTimeMS: TestData.successTimeout
+ }));
+ assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor));
+ };
+
+ const prepareConflict = function() {
+ jsTestLog("Ordinary reads should not block on prepared transactions.");
+ let cursor = assert.commandWorked(_db.runCommand(
+ {find: _collName, filter: TestData.txnDocFilter, maxTimeMS: TestData.successTimeout}));
+ assert.sameMembers(cursor.cursor.firstBatch, [TestData.txnDoc], tojson(cursor));
+ cursor = assert.commandWorked(_db.runCommand({
+ find: _collName,
+ filter: TestData.otherDocFilter,
+ maxTimeMS: TestData.successTimeout
+ }));
+ assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor));
+ };
+
+ return {oplogVisibility: oplogVisibility, prepareConflict: prepareConflict};
+};
+
+runTest('normal_reads', normalRead);
+runTest('snapshot_reads', snapshotRead);
+runTest('afterClusterTime', afterClusterTime);
})();
diff --git a/jstests/core/txns/transaction_error_handling.js b/jstests/core/txns/transaction_error_handling.js
index 26ccd742934..74852bd58f8 100644
--- a/jstests/core/txns/transaction_error_handling.js
+++ b/jstests/core/txns/transaction_error_handling.js
@@ -1,122 +1,124 @@
// Test basic transaction error handling.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "transaction_error_handling";
- const testDB = db.getSiblingDB(dbName);
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
-
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb.getCollection(collName);
-
- jsTestLog("Test that we cannot abort or commit a nonexistant transaction.");
- // Cannot abort or commit a nonexistant transaction.
- try {
- assert.commandWorked(session.commitTransaction_forTesting());
- } catch (e) {
- assert.eq(e.message, "There is no active transaction to commit on this session.");
- }
-
- try {
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- } catch (e) {
- assert.eq(e.message, "There is no active transaction to abort on this session.");
- }
-
- // Try to start a transaction when the state is 'active'.
- jsTestLog("Test that we cannot start a transaction with one already started or in progress.");
- session.startTransaction();
- try {
- session.startTransaction();
- } catch (e) {
- assert.eq(e.message, "Transaction already in progress on this session.");
- }
-
- // Try starting a transaction after inserting something.
- assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
- // Try to start a transaction when the state is 'active'.
- try {
- session.startTransaction();
- } catch (e) {
- assert.eq(e.message, "Transaction already in progress on this session.");
- }
-
- // At this point, the transaction is still 'active'. We will commit this transaction and test
- // that calling commitTransaction again should work while calling abortTransaction should not.
- assert.commandWorked(session.commitTransaction_forTesting());
+"use strict";
- jsTestLog("Test that we can commit a transaction more than once.");
- // The transaction state is 'committed'. We can call commitTransaction again in this state.
- assert.commandWorked(session.commitTransaction_forTesting());
+const dbName = "test";
+const collName = "transaction_error_handling";
+const testDB = db.getSiblingDB(dbName);
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- jsTestLog("Test that we cannot abort a transaction that has already been committed");
- // We cannot call abortTransaction on a transaction that has already been committed.
- try {
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- } catch (e) {
- assert.eq(e.message, "Cannot call abortTransaction after calling commitTransaction.");
- }
-
- // Start a new transaction that will be aborted. Test that we cannot call commit or
- // abortTransaction on a transaction that is in the 'aborted' state.
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb.getCollection(collName);
+
+jsTestLog("Test that we cannot abort or commit a nonexistant transaction.");
+// Cannot abort or commit a nonexistant transaction.
+try {
+ assert.commandWorked(session.commitTransaction_forTesting());
+} catch (e) {
+ assert.eq(e.message, "There is no active transaction to commit on this session.");
+}
+
+try {
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+} catch (e) {
+ assert.eq(e.message, "There is no active transaction to abort on this session.");
+}
+
+// Try to start a transaction when the state is 'active'.
+jsTestLog("Test that we cannot start a transaction with one already started or in progress.");
+session.startTransaction();
+try {
session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "insert-2"}));
- assert.commandWorked(session.abortTransaction_forTesting());
-
- jsTestLog("Test that we cannot commit a transaction that has already been aborted.");
- // We cannot call commitTransaction on a transaction that has already been aborted.
- try {
- assert.commandWorked(session.commitTransaction_forTesting());
- } catch (e) {
- assert.eq(e.message, "Cannot call commitTransaction after calling abortTransaction.");
- }
-
- jsTestLog("Test that we cannot abort a transaction that has already been aborted.");
- // We also cannot call abortTransaction on a transaction that has already been aborted.
- try {
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- } catch (e) {
- assert.eq(e.message, "Cannot call abortTransaction twice.");
- }
-
- jsTestLog(
- "Test that a normal operation after committing a transaction changes the state to inactive.");
+} catch (e) {
+ assert.eq(e.message, "Transaction already in progress on this session.");
+}
+
+// Try starting a transaction after inserting something.
+assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
+// Try to start a transaction when the state is 'active'.
+try {
session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "insert-3"}));
- // The transaction state should be changed to 'committed'.
+} catch (e) {
+ assert.eq(e.message, "Transaction already in progress on this session.");
+}
+
+// At this point, the transaction is still 'active'. We will commit this transaction and test
+// that calling commitTransaction again should work while calling abortTransaction should not.
+assert.commandWorked(session.commitTransaction_forTesting());
+
+jsTestLog("Test that we can commit a transaction more than once.");
+// The transaction state is 'committed'. We can call commitTransaction again in this state.
+assert.commandWorked(session.commitTransaction_forTesting());
+
+jsTestLog("Test that we cannot abort a transaction that has already been committed");
+// We cannot call abortTransaction on a transaction that has already been committed.
+try {
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+} catch (e) {
+ assert.eq(e.message, "Cannot call abortTransaction after calling commitTransaction.");
+}
+
+// Start a new transaction that will be aborted. Test that we cannot call commit or
+// abortTransaction on a transaction that is in the 'aborted' state.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "insert-2"}));
+assert.commandWorked(session.abortTransaction_forTesting());
+
+jsTestLog("Test that we cannot commit a transaction that has already been aborted.");
+// We cannot call commitTransaction on a transaction that has already been aborted.
+try {
assert.commandWorked(session.commitTransaction_forTesting());
- // The transaction state should be changed to 'inactive'.
- assert.commandWorked(sessionColl.insert({_id: "normal-insert"}));
- try {
- assert.commandWorked(session.commitTransaction_forTesting());
- } catch (e) {
- assert.eq(e.message, "There is no active transaction to commit on this session.");
- }
-
- jsTestLog(
- "Test that a normal operation after aborting a transaction changes the state to inactive.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "insert-4"}));
- // The transaction state should be changed to 'aborted'.
- assert.commandWorked(session.abortTransaction_forTesting());
- // The transaction state should be changed to 'inactive'.
- assert.commandWorked(sessionColl.insert({_id: "normal-insert-2"}));
- try {
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- } catch (e) {
- assert.eq(e.message, "There is no active transaction to abort on this session.");
- }
-
- session.endSession();
+} catch (e) {
+ assert.eq(e.message, "Cannot call commitTransaction after calling abortTransaction.");
+}
+
+jsTestLog("Test that we cannot abort a transaction that has already been aborted.");
+// We also cannot call abortTransaction on a transaction that has already been aborted.
+try {
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+} catch (e) {
+ assert.eq(e.message, "Cannot call abortTransaction twice.");
+}
+
+jsTestLog(
+ "Test that a normal operation after committing a transaction changes the state to inactive.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "insert-3"}));
+// The transaction state should be changed to 'committed'.
+assert.commandWorked(session.commitTransaction_forTesting());
+// The transaction state should be changed to 'inactive'.
+assert.commandWorked(sessionColl.insert({_id: "normal-insert"}));
+try {
+ assert.commandWorked(session.commitTransaction_forTesting());
+} catch (e) {
+ assert.eq(e.message, "There is no active transaction to commit on this session.");
+}
+
+jsTestLog(
+ "Test that a normal operation after aborting a transaction changes the state to inactive.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "insert-4"}));
+// The transaction state should be changed to 'aborted'.
+assert.commandWorked(session.abortTransaction_forTesting());
+// The transaction state should be changed to 'inactive'.
+assert.commandWorked(sessionColl.insert({_id: "normal-insert-2"}));
+try {
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+} catch (e) {
+ assert.eq(e.message, "There is no active transaction to abort on this session.");
+}
+
+session.endSession();
}());
diff --git a/jstests/core/txns/transaction_ops_against_capped_collection.js b/jstests/core/txns/transaction_ops_against_capped_collection.js
index b7a0720a875..86c7c4f3383 100644
--- a/jstests/core/txns/transaction_ops_against_capped_collection.js
+++ b/jstests/core/txns/transaction_ops_against_capped_collection.js
@@ -6,99 +6,95 @@
* @tags: [requires_capped, uses_transactions]
*/
(function() {
- "use strict";
-
- const dbName = "test";
- const cappedCollName = "transaction_ops_against_capped_collection";
- const testDB = db.getSiblingDB(dbName);
- const cappedTestColl = testDB.getCollection(cappedCollName);
- const testDocument = {"a": 1};
-
- cappedTestColl.drop({writeConcern: {w: "majority"}});
-
- jsTest.log("Creating a capped collection '" + dbName + "." + cappedCollName + "'.");
- assert.commandWorked(testDB.createCollection(cappedCollName, {capped: true, size: 500}));
-
- jsTest.log("Adding a document to the capped collection so that the update op can be tested " +
- "in the subsequent transaction attempts");
- assert.commandWorked(cappedTestColl.insert(testDocument));
-
- jsTest.log("Setting up a transaction in which to execute transaction ops.");
- const session = db.getMongo().startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionCappedColl = sessionDB.getCollection(cappedCollName);
-
- jsTest.log(
- "Starting individual transactions for writes against capped collections that should " +
- " fail.");
-
- /*
- * Write ops (should fail):
- */
-
- jsTest.log("About to try: insert");
- session.startTransaction();
- assert.commandFailedWithCode(sessionCappedColl.insert({"x": 55}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTest.log("About to try: update");
- session.startTransaction();
- assert.commandFailedWithCode(sessionCappedColl.update(testDocument, {"a": 1000}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTest.log("About to try: findAndModify (update version)");
- session.startTransaction();
- assert.commandFailedWithCode(
- sessionDB.runCommand(
- {findAndModify: cappedCollName, query: testDocument, update: {"$set": {"a": 1000}}}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTest.log("About to try: findAndModify (remove version)");
- session.startTransaction();
- assert.commandFailedWithCode(
- sessionDB.runCommand({findAndModify: cappedCollName, query: testDocument, remove: true}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // Deletes do not work against capped collections so we will not test them in transactions.
-
- jsTest.log(
- "Starting individual transactions for reads against capped collections that should " +
- " succeed.");
-
- /*
- * Read ops (should succeed):
- */
-
- jsTest.log("About to try: find");
- session.startTransaction();
- let findRes = assert.commandWorked(sessionDB.runCommand({"find": cappedCollName}));
- assert.eq(1, findRes.cursor.firstBatch[0].a);
- assert.commandWorked(session.abortTransaction_forTesting());
-
- jsTest.log("About to try: distinct");
- session.startTransaction();
- let distinctRes =
- assert.commandWorked(sessionDB.runCommand({"distinct": cappedCollName, "key": "a"}));
- assert.eq(1, distinctRes.values);
- assert.commandWorked(session.abortTransaction_forTesting());
-
- jsTest.log("About to try: aggregate");
- session.startTransaction();
- let aggRes = assert.commandWorked(sessionDB.runCommand({
- aggregate: cappedCollName,
- pipeline: [{$match: {"a": 1}}],
- cursor: {},
- }));
- assert.eq(1, aggRes.cursor.firstBatch[0].a);
- assert.commandWorked(session.abortTransaction_forTesting());
-
- session.endSession();
+"use strict";
+
+const dbName = "test";
+const cappedCollName = "transaction_ops_against_capped_collection";
+const testDB = db.getSiblingDB(dbName);
+const cappedTestColl = testDB.getCollection(cappedCollName);
+const testDocument = {
+ "a": 1
+};
+
+cappedTestColl.drop({writeConcern: {w: "majority"}});
+
+jsTest.log("Creating a capped collection '" + dbName + "." + cappedCollName + "'.");
+assert.commandWorked(testDB.createCollection(cappedCollName, {capped: true, size: 500}));
+
+jsTest.log("Adding a document to the capped collection so that the update op can be tested " +
+ "in the subsequent transaction attempts");
+assert.commandWorked(cappedTestColl.insert(testDocument));
+
+jsTest.log("Setting up a transaction in which to execute transaction ops.");
+const session = db.getMongo().startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionCappedColl = sessionDB.getCollection(cappedCollName);
+
+jsTest.log("Starting individual transactions for writes against capped collections that should " +
+ " fail.");
+
+/*
+ * Write ops (should fail):
+ */
+
+jsTest.log("About to try: insert");
+session.startTransaction();
+assert.commandFailedWithCode(sessionCappedColl.insert({"x": 55}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+jsTest.log("About to try: update");
+session.startTransaction();
+assert.commandFailedWithCode(sessionCappedColl.update(testDocument, {"a": 1000}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+jsTest.log("About to try: findAndModify (update version)");
+session.startTransaction();
+assert.commandFailedWithCode(
+ sessionDB.runCommand(
+ {findAndModify: cappedCollName, query: testDocument, update: {"$set": {"a": 1000}}}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+jsTest.log("About to try: findAndModify (remove version)");
+session.startTransaction();
+assert.commandFailedWithCode(
+ sessionDB.runCommand({findAndModify: cappedCollName, query: testDocument, remove: true}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// Deletes do not work against capped collections so we will not test them in transactions.
+
+jsTest.log("Starting individual transactions for reads against capped collections that should " +
+ " succeed.");
+
+/*
+ * Read ops (should succeed):
+ */
+
+jsTest.log("About to try: find");
+session.startTransaction();
+let findRes = assert.commandWorked(sessionDB.runCommand({"find": cappedCollName}));
+assert.eq(1, findRes.cursor.firstBatch[0].a);
+assert.commandWorked(session.abortTransaction_forTesting());
+
+jsTest.log("About to try: distinct");
+session.startTransaction();
+let distinctRes =
+ assert.commandWorked(sessionDB.runCommand({"distinct": cappedCollName, "key": "a"}));
+assert.eq(1, distinctRes.values);
+assert.commandWorked(session.abortTransaction_forTesting());
+
+jsTest.log("About to try: aggregate");
+session.startTransaction();
+let aggRes = assert.commandWorked(sessionDB.runCommand({
+ aggregate: cappedCollName,
+ pipeline: [{$match: {"a": 1}}],
+ cursor: {},
+}));
+assert.eq(1, aggRes.cursor.firstBatch[0].a);
+assert.commandWorked(session.abortTransaction_forTesting());
+
+session.endSession();
})();
diff --git a/jstests/core/txns/transactions_block_ddl.js b/jstests/core/txns/transactions_block_ddl.js
index 70b085c6b71..5e34a4b84be 100644
--- a/jstests/core/txns/transactions_block_ddl.js
+++ b/jstests/core/txns/transactions_block_ddl.js
@@ -1,121 +1,129 @@
// Test that open transactions block DDL operations on the involved collections.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/parallelTester.js"); // for ScopedThread.
+load("jstests/libs/parallelTester.js"); // for ScopedThread.
- const dbName = "transactions_block_ddl";
- const collName = "transactions_block_ddl";
- const otherDBName = "transactions_block_ddl_other";
- const otherCollName = "transactions_block_ddl_other";
- const testDB = db.getSiblingDB(dbName);
+const dbName = "transactions_block_ddl";
+const collName = "transactions_block_ddl";
+const otherDBName = "transactions_block_ddl_other";
+const otherCollName = "transactions_block_ddl_other";
+const testDB = db.getSiblingDB(dbName);
- const session = testDB.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB[collName];
+const session = testDB.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB[collName];
- /**
- * Tests that DDL operations block on transactions and fail when their maxTimeMS expires.
- */
- function testTimeout(cmdDBName, ddlCmd) {
- // Setup.
- sessionDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(sessionColl.createIndex({b: 1}, {name: "b_1"}));
+/**
+ * Tests that DDL operations block on transactions and fail when their maxTimeMS expires.
+ */
+function testTimeout(cmdDBName, ddlCmd) {
+ // Setup.
+ sessionDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+ assert.commandWorked(sessionColl.createIndex({b: 1}, {name: "b_1"}));
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({a: 5, b: 6}));
- assert.commandFailedWithCode(
- testDB.getSiblingDB(cmdDBName).runCommand(Object.assign({}, ddlCmd, {maxTimeMS: 500})),
- ErrorCodes.MaxTimeMSExpired);
- assert.commandWorked(session.commitTransaction_forTesting());
- }
+ session.startTransaction();
+ assert.commandWorked(sessionColl.insert({a: 5, b: 6}));
+ assert.commandFailedWithCode(
+ testDB.getSiblingDB(cmdDBName).runCommand(Object.assign({}, ddlCmd, {maxTimeMS: 500})),
+ ErrorCodes.MaxTimeMSExpired);
+ assert.commandWorked(session.commitTransaction_forTesting());
+}
- /**
- * Tests that DDL operations block on transactions but can succeed once the transaction commits.
- */
- function testSuccessOnTxnCommit(cmdDBName, ddlCmd, currentOpFilter) {
- // Setup.
- sessionDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(sessionColl.createIndex({b: 1}, {name: "b_1"}));
+/**
+ * Tests that DDL operations block on transactions but can succeed once the transaction commits.
+ */
+function testSuccessOnTxnCommit(cmdDBName, ddlCmd, currentOpFilter) {
+ // Setup.
+ sessionDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+ assert.commandWorked(sessionColl.createIndex({b: 1}, {name: "b_1"}));
- jsTestLog("About to start tranasction");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({a: 5, b: 6}));
- jsTestLog("Transaction started, running ddl operation " + ddlCmd);
- let thread = new ScopedThread(function(cmdDBName, ddlCmd) {
- return db.getSiblingDB(cmdDBName).runCommand(ddlCmd);
- }, cmdDBName, ddlCmd);
- thread.start();
- // Wait for the DDL operation to have pending locks.
- assert.soon(
- function() {
- // Note that we cannot use the $currentOp agg stage because it acquires locks
- // (SERVER-35289).
- return testDB.currentOp({$and: [currentOpFilter, {waitingForLock: true}]})
- .inprog.length === 1;
- },
- function() {
- return "Failed to find DDL command in currentOp output: " +
- tojson(testDB.currentOp().inprog);
- });
- jsTestLog("Committing transaction");
- assert.commandWorked(session.commitTransaction_forTesting());
- jsTestLog("Transaction committed, waiting for ddl operation to complete.");
- thread.join();
- assert.commandWorked(thread.returnData());
- }
+ jsTestLog("About to start tranasction");
+ session.startTransaction();
+ assert.commandWorked(sessionColl.insert({a: 5, b: 6}));
+ jsTestLog("Transaction started, running ddl operation " + ddlCmd);
+ let thread = new ScopedThread(function(cmdDBName, ddlCmd) {
+ return db.getSiblingDB(cmdDBName).runCommand(ddlCmd);
+ }, cmdDBName, ddlCmd);
+ thread.start();
+ // Wait for the DDL operation to have pending locks.
+ assert.soon(
+ function() {
+ // Note that we cannot use the $currentOp agg stage because it acquires locks
+ // (SERVER-35289).
+ return testDB.currentOp({$and: [currentOpFilter, {waitingForLock: true}]})
+ .inprog.length === 1;
+ },
+ function() {
+ return "Failed to find DDL command in currentOp output: " +
+ tojson(testDB.currentOp().inprog);
+ });
+ jsTestLog("Committing transaction");
+ assert.commandWorked(session.commitTransaction_forTesting());
+ jsTestLog("Transaction committed, waiting for ddl operation to complete.");
+ thread.join();
+ assert.commandWorked(thread.returnData());
+}
- jsTestLog("Testing that 'drop' blocks on transactions");
- const dropCmd = {drop: collName, writeConcern: {w: "majority"}};
- testTimeout(dbName, dropCmd);
- testSuccessOnTxnCommit(dbName, dropCmd, {"command.drop": collName});
+jsTestLog("Testing that 'drop' blocks on transactions");
+const dropCmd = {
+ drop: collName,
+ writeConcern: {w: "majority"}
+};
+testTimeout(dbName, dropCmd);
+testSuccessOnTxnCommit(dbName, dropCmd, {"command.drop": collName});
- jsTestLog("Testing that 'dropDatabase' blocks on transactions");
- const dropDatabaseCmd = {dropDatabase: 1, writeConcern: {w: "majority"}};
- testTimeout(dbName, dropDatabaseCmd);
- testSuccessOnTxnCommit(dbName, dropDatabaseCmd, {"command.dropDatabase": 1});
+jsTestLog("Testing that 'dropDatabase' blocks on transactions");
+const dropDatabaseCmd = {
+ dropDatabase: 1,
+ writeConcern: {w: "majority"}
+};
+testTimeout(dbName, dropDatabaseCmd);
+testSuccessOnTxnCommit(dbName, dropDatabaseCmd, {"command.dropDatabase": 1});
- jsTestLog("Testing that 'renameCollection' within databases blocks on transactions");
- testDB.runCommand({drop: otherCollName, writeConcern: {w: "majority"}});
- const renameCollectionCmdSameDB = {
- renameCollection: sessionColl.getFullName(),
- to: dbName + "." + otherCollName,
- writeConcern: {w: "majority"}
- };
- testTimeout("admin", renameCollectionCmdSameDB);
- testSuccessOnTxnCommit("admin",
- renameCollectionCmdSameDB,
- {"command.renameCollection": sessionColl.getFullName()});
+jsTestLog("Testing that 'renameCollection' within databases blocks on transactions");
+testDB.runCommand({drop: otherCollName, writeConcern: {w: "majority"}});
+const renameCollectionCmdSameDB = {
+ renameCollection: sessionColl.getFullName(),
+ to: dbName + "." + otherCollName,
+ writeConcern: {w: "majority"}
+};
+testTimeout("admin", renameCollectionCmdSameDB);
+testSuccessOnTxnCommit(
+ "admin", renameCollectionCmdSameDB, {"command.renameCollection": sessionColl.getFullName()});
- jsTestLog("Testing that 'renameCollection' across databases blocks on transactions");
- testDB.getSiblingDB(otherDBName)
- .runCommand({drop: otherCollName, writeConcern: {w: "majority"}});
- const renameCollectionCmdDifferentDB = {
- renameCollection: sessionColl.getFullName(),
- to: otherDBName + "." + otherCollName,
- writeConcern: {w: "majority"}
- };
- testTimeout("admin", renameCollectionCmdDifferentDB);
- testSuccessOnTxnCommit("admin",
- renameCollectionCmdDifferentDB,
- {"command.renameCollection": sessionColl.getFullName()});
+jsTestLog("Testing that 'renameCollection' across databases blocks on transactions");
+testDB.getSiblingDB(otherDBName).runCommand({drop: otherCollName, writeConcern: {w: "majority"}});
+const renameCollectionCmdDifferentDB = {
+ renameCollection: sessionColl.getFullName(),
+ to: otherDBName + "." + otherCollName,
+ writeConcern: {w: "majority"}
+};
+testTimeout("admin", renameCollectionCmdDifferentDB);
+testSuccessOnTxnCommit("admin",
+ renameCollectionCmdDifferentDB,
+ {"command.renameCollection": sessionColl.getFullName()});
- jsTestLog("Testing that 'createIndexes' blocks on transactions");
- // The transaction will insert a document that has a field 'a'.
- const createIndexesCmd = {
- createIndexes: collName,
- indexes: [{key: {a: 1}, name: "a_1"}],
- writeConcern: {w: "majority"}
- };
- testTimeout(dbName, createIndexesCmd);
- testSuccessOnTxnCommit(dbName, createIndexesCmd, {"command.createIndexes": collName});
+jsTestLog("Testing that 'createIndexes' blocks on transactions");
+// The transaction will insert a document that has a field 'a'.
+const createIndexesCmd = {
+ createIndexes: collName,
+ indexes: [{key: {a: 1}, name: "a_1"}],
+ writeConcern: {w: "majority"}
+};
+testTimeout(dbName, createIndexesCmd);
+testSuccessOnTxnCommit(dbName, createIndexesCmd, {"command.createIndexes": collName});
- jsTestLog("Testing that 'dropIndexes' blocks on transactions");
- // The setup creates an index on {b: 1} called 'b_1'. The transaction will insert a document
- // that has a field 'b'.
- const dropIndexesCmd = {dropIndexes: collName, index: "b_1", writeConcern: {w: "majority"}};
- testTimeout(dbName, dropIndexesCmd);
- testSuccessOnTxnCommit(dbName, dropIndexesCmd, {"command.dropIndexes": collName});
- session.endSession();
+jsTestLog("Testing that 'dropIndexes' blocks on transactions");
+// The setup creates an index on {b: 1} called 'b_1'. The transaction will insert a document
+// that has a field 'b'.
+const dropIndexesCmd = {
+ dropIndexes: collName,
+ index: "b_1",
+ writeConcern: {w: "majority"}
+};
+testTimeout(dbName, dropIndexesCmd);
+testSuccessOnTxnCommit(dbName, dropIndexesCmd, {"command.dropIndexes": collName});
+session.endSession();
}());
diff --git a/jstests/core/txns/transactions_profiling.js b/jstests/core/txns/transactions_profiling.js
index 548c800eeb4..55f63ab6cfc 100644
--- a/jstests/core/txns/transactions_profiling.js
+++ b/jstests/core/txns/transactions_profiling.js
@@ -1,248 +1,242 @@
// Test profiling for commands in multi-document transactions.
// @tags: [uses_transactions]
(function() {
- "use strict";
- load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
-
- const dbName = "test";
- const collName = "transactions_profiling";
- const testDB = db.getSiblingDB(dbName);
- testDB[collName].drop({writeConcern: {w: "majority"}});
-
- testDB.setProfilingLevel(2);
-
- const sessionOptions = {causalConsistency: false};
- let session = testDB.getMongo().startSession(sessionOptions);
- let sessionDB = session.getDatabase(dbName);
- let sessionColl = sessionDB[collName];
-
- assert.commandWorked(sessionColl.insert({_id: "findAndModify-doc"}));
- assert.commandWorked(sessionColl.insert({_id: "delete-doc"}));
- assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-1"}));
- assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-2"}));
- assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-3"}));
- assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-4"}));
- assert.commandWorked(sessionColl.insert({_id: "read-doc"}));
- assert.commandWorked(sessionColl.insert({_id: "update-doc"}));
- assert.commandWorked(sessionColl.insert({_id: "multi-update-doc-1"}));
- assert.commandWorked(sessionColl.insert({_id: "multi-update-doc-2"}));
- assert.commandWorked(testDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}],
- writeConcern: {w: "majority"}
- }));
-
- jsTestLog("Test commands that can use shell helpers.");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
-
- jsTestLog("Test aggregate.");
- assert.eq(1, sessionColl.aggregate([{$match: {_id: "read-doc"}}]).itcount());
- let profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "command", tojson(profileObj));
- assert.eq(profileObj.command.aggregate, sessionColl.getName(), tojson(profileObj));
- assert.eq(profileObj.nreturned, 1, tojson(profileObj));
-
- jsTestLog("Test delete.");
- assert.commandWorked(sessionColl.deleteOne({_id: "delete-doc"}));
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "remove", tojson(profileObj));
- assert.eq(profileObj.ndeleted, 1, tojson(profileObj));
-
- jsTestLog("Test multi delete.");
- assert.commandWorked(
- sessionColl.deleteMany({_id: {$in: ["multi-delete-doc-1", "multi-delete-doc-2"]}}));
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "remove", tojson(profileObj));
- assert.eq(profileObj.ndeleted, 2, tojson(profileObj));
-
- jsTestLog("Test batch delete.");
- assert.commandWorked(sessionDB.runCommand({
- delete: collName,
- deletes: [
- {q: {_id: "multi-delete-doc-3"}, limit: 1},
- {q: {_id: "multi-delete-doc-4"}, limit: 1}
- ]
- }));
- // We see the profile entry from the second delete.
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "remove", tojson(profileObj));
- assert.eq(profileObj.ndeleted, 1, tojson(profileObj));
-
- jsTestLog("Test distinct.");
- assert.eq(["read-doc"], sessionColl.distinct("_id", {_id: "read-doc"}));
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "command", tojson(profileObj));
- assert.eq(profileObj.command.distinct, sessionColl.getName(), tojson(profileObj));
-
- jsTestLog("Test find.");
- assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount());
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "query", tojson(profileObj));
- assert.eq(profileObj.nreturned, 1, tojson(profileObj));
-
- jsTestLog("Test findAndModify.");
- assert.eq({_id: "findAndModify-doc", updated: true},
- sessionColl.findAndModify(
- {query: {_id: "findAndModify-doc"}, update: {$set: {updated: true}}, new: true}));
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.op, "command", tojson(profileObj));
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.command.findandmodify, sessionColl.getName(), tojson(profileObj));
- assert.eq(profileObj.nMatched, 1, tojson(profileObj));
- assert.eq(profileObj.nModified, 1, tojson(profileObj));
-
- jsTestLog("Test geoSearch.");
- assert.commandWorked(
- sessionDB.runCommand({geoSearch: collName, near: [0, 0], maxDistance: 1, search: {a: 1}}));
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.op, "command", tojson(profileObj));
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.command.geoSearch, sessionColl.getName(), tojson(profileObj));
-
- jsTestLog("Test getMore.");
- let res = assert.commandWorked(
- sessionDB.runCommand({find: collName, filter: {_id: "read-doc"}, batchSize: 0}));
- assert(res.hasOwnProperty("cursor"), tojson(res));
- assert(res.cursor.hasOwnProperty("id"), tojson(res));
- let cursorId = res.cursor.id;
- res = assert.commandWorked(sessionDB.runCommand({getMore: cursorId, collection: collName}));
- assert.eq([{_id: "read-doc"}], res.cursor.nextBatch, tojson(res));
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "getmore", tojson(profileObj));
- assert.eq(profileObj.nreturned, 1, tojson(profileObj));
-
- jsTestLog("Test insert.");
- assert.commandWorked(sessionColl.insert({_id: "insert-doc"}));
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "insert", tojson(profileObj));
- assert.eq(profileObj.ninserted, 1, tojson(profileObj));
-
- jsTestLog("Test update.");
- assert.commandWorked(sessionColl.update({_id: "update-doc"}, {$set: {updated: true}}));
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "update", tojson(profileObj));
- assert.eq(profileObj.nMatched, 1, tojson(profileObj));
- assert.eq(profileObj.nModified, 1, tojson(profileObj));
-
- jsTestLog("Test multi update.");
- assert.commandWorked(sessionColl.updateMany(
- {_id: {$in: ["multi-update-doc-1", "multi-update-doc-2"]}}, {$set: {updated: true}}));
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "update", tojson(profileObj));
- assert.eq(profileObj.nMatched, 2, tojson(profileObj));
- assert.eq(profileObj.nModified, 2, tojson(profileObj));
-
- jsTestLog("Test batch update.");
- assert.commandWorked(sessionDB.runCommand({
- update: collName,
- updates: [
- {q: {_id: "multi-update-doc-1"}, u: {$set: {batch_updated: true}}},
- {q: {_id: "multi-update-doc-2"}, u: {$set: {batch_updated: true}}}
- ]
- }));
- // We see the profile entry from the second update.
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "update", tojson(profileObj));
- assert.eq(profileObj.nMatched, 1, tojson(profileObj));
- assert.eq(profileObj.nModified, 1, tojson(profileObj));
-
- jsTestLog("Committing transaction.");
- assert.commandWorked(session.commitTransaction_forTesting());
-
- jsTestLog("Test delete with a write conflict.");
- assert.commandWorked(sessionColl.insert({_id: "delete-doc"}, {writeConcern: {w: "majority"}}));
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
-
- // Perform an operation in the transaction to establish the snapshot.
- assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount());
-
- // Update the document outside of the transaction.
- assert.commandWorked(testDB[collName].update({_id: "delete-doc"}, {$set: {conflict: true}}));
-
- // Deleting the document in the transaction fails, but profiling is still successful.
- assert.throws(function() {
- sessionColl.deleteOne({_id: "delete-doc"});
- });
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "remove", tojson(profileObj));
- assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTestLog("Test findAndModify with a write conflict.");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
-
- // Perform an operation in the transaction to establish the snapshot.
- assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount());
-
- // Update the document outside of the transaction.
- assert.commandWorked(
- testDB[collName].update({_id: "findAndModify-doc"}, {$set: {conflict: true}}));
-
- // Modifying the document in the transaction fails, but profiling is still successful.
- assert.throws(function() {
- sessionColl.findAndModify(
- {query: {_id: "findAndModify-doc"}, update: {$set: {conflict: false}}});
- });
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.op, "command", tojson(profileObj));
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.command.findandmodify, sessionColl.getName(), tojson(profileObj));
- assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTestLog("Test insert with a write conflict.");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
-
- // Perform an operation in the transaction to establish the snapshot.
- assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount());
-
- // Insert a document outside of the transaction.
- assert.commandWorked(testDB[collName].insert({_id: "conflict-doc"}));
-
- // Inserting a document with the same _id in the transaction fails, but profiling is still
- // successful.
- assert.commandFailedWithCode(sessionColl.insert({_id: "conflict-doc"}),
- ErrorCodes.WriteConflict);
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "insert", tojson(profileObj));
- assert.eq(profileObj.ninserted, 0, tojson(profileObj));
- assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTestLog("Test update with a write conflict.");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
-
- // Perform an operation in the transaction to establish the snapshot.
- assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount());
-
- // Update the document outside of the transaction.
- assert.commandWorked(testDB[collName].update({_id: "update-doc"}, {$set: {conflict: true}}));
-
- // Updating the document in the transaction fails, but profiling is still successful.
- assert.commandFailedWithCode(sessionColl.update({_id: "update-doc"}, {$set: {conflict: false}}),
- ErrorCodes.WriteConflict);
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
- assert.eq(profileObj.op, "update", tojson(profileObj));
- assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- session.endSession();
+"use strict";
+load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
+
+const dbName = "test";
+const collName = "transactions_profiling";
+const testDB = db.getSiblingDB(dbName);
+testDB[collName].drop({writeConcern: {w: "majority"}});
+
+testDB.setProfilingLevel(2);
+
+const sessionOptions = {
+ causalConsistency: false
+};
+let session = testDB.getMongo().startSession(sessionOptions);
+let sessionDB = session.getDatabase(dbName);
+let sessionColl = sessionDB[collName];
+
+assert.commandWorked(sessionColl.insert({_id: "findAndModify-doc"}));
+assert.commandWorked(sessionColl.insert({_id: "delete-doc"}));
+assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-1"}));
+assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-2"}));
+assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-3"}));
+assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-4"}));
+assert.commandWorked(sessionColl.insert({_id: "read-doc"}));
+assert.commandWorked(sessionColl.insert({_id: "update-doc"}));
+assert.commandWorked(sessionColl.insert({_id: "multi-update-doc-1"}));
+assert.commandWorked(sessionColl.insert({_id: "multi-update-doc-2"}));
+assert.commandWorked(testDB.runCommand({
+ createIndexes: collName,
+ indexes: [{key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}],
+ writeConcern: {w: "majority"}
+}));
+
+jsTestLog("Test commands that can use shell helpers.");
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+
+jsTestLog("Test aggregate.");
+assert.eq(1, sessionColl.aggregate([{$match: {_id: "read-doc"}}]).itcount());
+let profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "command", tojson(profileObj));
+assert.eq(profileObj.command.aggregate, sessionColl.getName(), tojson(profileObj));
+assert.eq(profileObj.nreturned, 1, tojson(profileObj));
+
+jsTestLog("Test delete.");
+assert.commandWorked(sessionColl.deleteOne({_id: "delete-doc"}));
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "remove", tojson(profileObj));
+assert.eq(profileObj.ndeleted, 1, tojson(profileObj));
+
+jsTestLog("Test multi delete.");
+assert.commandWorked(
+ sessionColl.deleteMany({_id: {$in: ["multi-delete-doc-1", "multi-delete-doc-2"]}}));
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "remove", tojson(profileObj));
+assert.eq(profileObj.ndeleted, 2, tojson(profileObj));
+
+jsTestLog("Test batch delete.");
+assert.commandWorked(sessionDB.runCommand({
+ delete: collName,
+ deletes:
+ [{q: {_id: "multi-delete-doc-3"}, limit: 1}, {q: {_id: "multi-delete-doc-4"}, limit: 1}]
+}));
+// We see the profile entry from the second delete.
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "remove", tojson(profileObj));
+assert.eq(profileObj.ndeleted, 1, tojson(profileObj));
+
+jsTestLog("Test distinct.");
+assert.eq(["read-doc"], sessionColl.distinct("_id", {_id: "read-doc"}));
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "command", tojson(profileObj));
+assert.eq(profileObj.command.distinct, sessionColl.getName(), tojson(profileObj));
+
+jsTestLog("Test find.");
+assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount());
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "query", tojson(profileObj));
+assert.eq(profileObj.nreturned, 1, tojson(profileObj));
+
+jsTestLog("Test findAndModify.");
+assert.eq({_id: "findAndModify-doc", updated: true},
+ sessionColl.findAndModify(
+ {query: {_id: "findAndModify-doc"}, update: {$set: {updated: true}}, new: true}));
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.op, "command", tojson(profileObj));
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.command.findandmodify, sessionColl.getName(), tojson(profileObj));
+assert.eq(profileObj.nMatched, 1, tojson(profileObj));
+assert.eq(profileObj.nModified, 1, tojson(profileObj));
+
+jsTestLog("Test geoSearch.");
+assert.commandWorked(
+ sessionDB.runCommand({geoSearch: collName, near: [0, 0], maxDistance: 1, search: {a: 1}}));
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.op, "command", tojson(profileObj));
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.command.geoSearch, sessionColl.getName(), tojson(profileObj));
+
+jsTestLog("Test getMore.");
+let res = assert.commandWorked(
+ sessionDB.runCommand({find: collName, filter: {_id: "read-doc"}, batchSize: 0}));
+assert(res.hasOwnProperty("cursor"), tojson(res));
+assert(res.cursor.hasOwnProperty("id"), tojson(res));
+let cursorId = res.cursor.id;
+res = assert.commandWorked(sessionDB.runCommand({getMore: cursorId, collection: collName}));
+assert.eq([{_id: "read-doc"}], res.cursor.nextBatch, tojson(res));
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "getmore", tojson(profileObj));
+assert.eq(profileObj.nreturned, 1, tojson(profileObj));
+
+jsTestLog("Test insert.");
+assert.commandWorked(sessionColl.insert({_id: "insert-doc"}));
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "insert", tojson(profileObj));
+assert.eq(profileObj.ninserted, 1, tojson(profileObj));
+
+jsTestLog("Test update.");
+assert.commandWorked(sessionColl.update({_id: "update-doc"}, {$set: {updated: true}}));
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "update", tojson(profileObj));
+assert.eq(profileObj.nMatched, 1, tojson(profileObj));
+assert.eq(profileObj.nModified, 1, tojson(profileObj));
+
+jsTestLog("Test multi update.");
+assert.commandWorked(sessionColl.updateMany(
+ {_id: {$in: ["multi-update-doc-1", "multi-update-doc-2"]}}, {$set: {updated: true}}));
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "update", tojson(profileObj));
+assert.eq(profileObj.nMatched, 2, tojson(profileObj));
+assert.eq(profileObj.nModified, 2, tojson(profileObj));
+
+jsTestLog("Test batch update.");
+assert.commandWorked(sessionDB.runCommand({
+ update: collName,
+ updates: [
+ {q: {_id: "multi-update-doc-1"}, u: {$set: {batch_updated: true}}},
+ {q: {_id: "multi-update-doc-2"}, u: {$set: {batch_updated: true}}}
+ ]
+}));
+// We see the profile entry from the second update.
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "update", tojson(profileObj));
+assert.eq(profileObj.nMatched, 1, tojson(profileObj));
+assert.eq(profileObj.nModified, 1, tojson(profileObj));
+
+jsTestLog("Committing transaction.");
+assert.commandWorked(session.commitTransaction_forTesting());
+
+jsTestLog("Test delete with a write conflict.");
+assert.commandWorked(sessionColl.insert({_id: "delete-doc"}, {writeConcern: {w: "majority"}}));
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+
+// Perform an operation in the transaction to establish the snapshot.
+assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount());
+
+// Update the document outside of the transaction.
+assert.commandWorked(testDB[collName].update({_id: "delete-doc"}, {$set: {conflict: true}}));
+
+// Deleting the document in the transaction fails, but profiling is still successful.
+assert.throws(function() {
+ sessionColl.deleteOne({_id: "delete-doc"});
+});
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "remove", tojson(profileObj));
+assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj));
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+jsTestLog("Test findAndModify with a write conflict.");
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+
+// Perform an operation in the transaction to establish the snapshot.
+assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount());
+
+// Update the document outside of the transaction.
+assert.commandWorked(testDB[collName].update({_id: "findAndModify-doc"}, {$set: {conflict: true}}));
+
+// Modifying the document in the transaction fails, but profiling is still successful.
+assert.throws(function() {
+ sessionColl.findAndModify(
+ {query: {_id: "findAndModify-doc"}, update: {$set: {conflict: false}}});
+});
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.op, "command", tojson(profileObj));
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.command.findandmodify, sessionColl.getName(), tojson(profileObj));
+assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj));
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+jsTestLog("Test insert with a write conflict.");
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+
+// Perform an operation in the transaction to establish the snapshot.
+assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount());
+
+// Insert a document outside of the transaction.
+assert.commandWorked(testDB[collName].insert({_id: "conflict-doc"}));
+
+// Inserting a document with the same _id in the transaction fails, but profiling is still
+// successful.
+assert.commandFailedWithCode(sessionColl.insert({_id: "conflict-doc"}), ErrorCodes.WriteConflict);
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "insert", tojson(profileObj));
+assert.eq(profileObj.ninserted, 0, tojson(profileObj));
+assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj));
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+jsTestLog("Test update with a write conflict.");
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+
+// Perform an operation in the transaction to establish the snapshot.
+assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount());
+
+// Update the document outside of the transaction.
+assert.commandWorked(testDB[collName].update({_id: "update-doc"}, {$set: {conflict: true}}));
+
+// Updating the document in the transaction fails, but profiling is still successful.
+assert.commandFailedWithCode(sessionColl.update({_id: "update-doc"}, {$set: {conflict: false}}),
+ ErrorCodes.WriteConflict);
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj));
+assert.eq(profileObj.op, "update", tojson(profileObj));
+assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj));
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+session.endSession();
}());
diff --git a/jstests/core/txns/transactions_profiling_with_drops.js b/jstests/core/txns/transactions_profiling_with_drops.js
index ee25f5cc442..03fea946b35 100644
--- a/jstests/core/txns/transactions_profiling_with_drops.js
+++ b/jstests/core/txns/transactions_profiling_with_drops.js
@@ -1,112 +1,112 @@
// Tests that locks acquisitions for profiling in a transaction have a 0-second timeout.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
-
- const dbName = "test";
- const collName = "transactions_profiling_with_drops";
- const adminDB = db.getSiblingDB("admin");
- const testDB = db.getSiblingDB(dbName);
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
-
- sessionDb.runCommand({dropDatabase: 1, writeConcern: {w: "majority"}});
- assert.commandWorked(sessionColl.insert({_id: "doc"}, {w: "majority"}));
- assert.commandWorked(sessionDb.runCommand({profile: 1, slowms: 1}));
-
- jsTest.log("Test read profiling with operation holding database X lock.");
-
- jsTest.log("Start transaction.");
- session.startTransaction();
-
- jsTest.log("Run a slow read. Profiling in the transaction should succeed.");
- assert.sameMembers(
- [{_id: "doc"}],
- sessionColl.find({$where: "sleep(1000); return true;"}).comment("read success").toArray());
- profilerHasSingleMatchingEntryOrThrow(
- {profileDB: testDB, filter: {"command.comment": "read success"}});
-
- // Lock 'test' database in X mode.
- let lockShell = startParallelShell(function() {
- assert.commandFailed(db.adminCommand({
- sleep: 1,
- secs: 500,
- lock: "w",
- lockTarget: "test",
- $comment: "transaction_profiling_with_drops lock sleep"
- }));
+"use strict";
+
+load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
+
+const dbName = "test";
+const collName = "transactions_profiling_with_drops";
+const adminDB = db.getSiblingDB("admin");
+const testDB = db.getSiblingDB(dbName);
+const session = db.getMongo().startSession({causalConsistency: false});
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+
+sessionDb.runCommand({dropDatabase: 1, writeConcern: {w: "majority"}});
+assert.commandWorked(sessionColl.insert({_id: "doc"}, {w: "majority"}));
+assert.commandWorked(sessionDb.runCommand({profile: 1, slowms: 1}));
+
+jsTest.log("Test read profiling with operation holding database X lock.");
+
+jsTest.log("Start transaction.");
+session.startTransaction();
+
+jsTest.log("Run a slow read. Profiling in the transaction should succeed.");
+assert.sameMembers(
+ [{_id: "doc"}],
+ sessionColl.find({$where: "sleep(1000); return true;"}).comment("read success").toArray());
+profilerHasSingleMatchingEntryOrThrow(
+ {profileDB: testDB, filter: {"command.comment": "read success"}});
+
+// Lock 'test' database in X mode.
+let lockShell = startParallelShell(function() {
+ assert.commandFailed(db.adminCommand({
+ sleep: 1,
+ secs: 500,
+ lock: "w",
+ lockTarget: "test",
+ $comment: "transaction_profiling_with_drops lock sleep"
+ }));
+});
+
+const waitForCommand = function(opFilter) {
+ let opId = -1;
+ assert.soon(function() {
+ const curopRes = testDB.currentOp();
+ assert.commandWorked(curopRes);
+ const foundOp = curopRes["inprog"].filter(opFilter);
+
+ if (foundOp.length == 1) {
+ opId = foundOp[0]["opid"];
+ }
+ return (foundOp.length == 1);
});
+ return opId;
+};
- const waitForCommand = function(opFilter) {
- let opId = -1;
- assert.soon(function() {
- const curopRes = testDB.currentOp();
- assert.commandWorked(curopRes);
- const foundOp = curopRes["inprog"].filter(opFilter);
-
- if (foundOp.length == 1) {
- opId = foundOp[0]["opid"];
- }
- return (foundOp.length == 1);
- });
- return opId;
- };
-
- // Wait for sleep to appear in currentOp
- let opId = waitForCommand(
- op => (op["ns"] == "admin.$cmd" &&
- op["command"]["$comment"] == "transaction_profiling_with_drops lock sleep"));
-
- jsTest.log("Run a slow read. Profiling in the transaction should fail.");
- assert.sameMembers(
- [{_id: "doc"}],
- sessionColl.find({$where: "sleep(1000); return true;"}).comment("read failure").toArray());
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.commandWorked(testDB.killOp(opId));
- lockShell();
-
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: testDB, filter: {"command.comment": "read failure"}});
-
- jsTest.log("Test write profiling with operation holding database X lock.");
-
- jsTest.log("Start transaction.");
- session.startTransaction();
-
- jsTest.log("Run a slow write. Profiling in the transaction should succeed.");
- assert.commandWorked(sessionColl.update(
- {$where: "sleep(1000); return true;"}, {$inc: {good: 1}}, {collation: {locale: "en"}}));
- profilerHasSingleMatchingEntryOrThrow(
- {profileDB: testDB, filter: {"command.collation": {locale: "en"}}});
-
- // Lock 'test' database in X mode.
- lockShell = startParallelShell(function() {
- assert.commandFailed(db.getSiblingDB("test").adminCommand(
- {sleep: 1, secs: 500, lock: "w", lockTarget: "test", $comment: "lock sleep"}));
- });
+// Wait for sleep to appear in currentOp
+let opId = waitForCommand(
+ op => (op["ns"] == "admin.$cmd" &&
+ op["command"]["$comment"] == "transaction_profiling_with_drops lock sleep"));
+
+jsTest.log("Run a slow read. Profiling in the transaction should fail.");
+assert.sameMembers(
+ [{_id: "doc"}],
+ sessionColl.find({$where: "sleep(1000); return true;"}).comment("read failure").toArray());
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.commandWorked(testDB.killOp(opId));
+lockShell();
+
+profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: testDB, filter: {"command.comment": "read failure"}});
+
+jsTest.log("Test write profiling with operation holding database X lock.");
+
+jsTest.log("Start transaction.");
+session.startTransaction();
+
+jsTest.log("Run a slow write. Profiling in the transaction should succeed.");
+assert.commandWorked(sessionColl.update(
+ {$where: "sleep(1000); return true;"}, {$inc: {good: 1}}, {collation: {locale: "en"}}));
+profilerHasSingleMatchingEntryOrThrow(
+ {profileDB: testDB, filter: {"command.collation": {locale: "en"}}});
+
+// Lock 'test' database in X mode.
+lockShell = startParallelShell(function() {
+ assert.commandFailed(db.getSiblingDB("test").adminCommand(
+ {sleep: 1, secs: 500, lock: "w", lockTarget: "test", $comment: "lock sleep"}));
+});
- // Wait for sleep to appear in currentOp
- opId = waitForCommand(
- op => (op["ns"] == "admin.$cmd" && op["command"]["$comment"] == "lock sleep"));
+// Wait for sleep to appear in currentOp
+opId =
+ waitForCommand(op => (op["ns"] == "admin.$cmd" && op["command"]["$comment"] == "lock sleep"));
- jsTest.log("Run a slow write. Profiling in the transaction should still succeed " +
- "since the transaction already has an IX DB lock.");
- assert.commandWorked(sessionColl.update(
- {$where: "sleep(1000); return true;"}, {$inc: {good: 1}}, {collation: {locale: "fr"}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+jsTest.log("Run a slow write. Profiling in the transaction should still succeed " +
+ "since the transaction already has an IX DB lock.");
+assert.commandWorked(sessionColl.update(
+ {$where: "sleep(1000); return true;"}, {$inc: {good: 1}}, {collation: {locale: "fr"}}));
+assert.commandWorked(session.commitTransaction_forTesting());
- assert.commandWorked(testDB.killOp(opId));
- lockShell();
+assert.commandWorked(testDB.killOp(opId));
+lockShell();
- profilerHasSingleMatchingEntryOrThrow(
- {profileDB: testDB, filter: {"command.collation": {locale: "fr"}}});
+profilerHasSingleMatchingEntryOrThrow(
+ {profileDB: testDB, filter: {"command.collation": {locale: "fr"}}});
- jsTest.log("Both writes should succeed");
- assert.docEq({_id: "doc", good: 2}, sessionColl.findOne());
+jsTest.log("Both writes should succeed");
+assert.docEq({_id: "doc", good: 2}, sessionColl.findOne());
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/transactions_write_conflicts.js b/jstests/core/txns/transactions_write_conflicts.js
index dc4b742deeb..531477353fc 100644
--- a/jstests/core/txns/transactions_write_conflicts.js
+++ b/jstests/core/txns/transactions_write_conflicts.js
@@ -32,164 +32,232 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/write_conflicts.js"); // for 'WriteConflictHelpers'.
-
- const dbName = "test";
- const collName = "transactions_write_conflicts";
-
- const testDB = db.getSiblingDB(dbName);
- const coll = testDB[collName];
-
- // Clean up and create test collection.
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}});
-
- /***********************************************************************************************
- * Single document write conflicts.
- **********************************************************************************************/
-
- jsTestLog("Test single document write conflicts.");
-
- print("insert-insert conflict.");
- let t1Op = {insert: collName, documents: [{_id: 1, t1: 1}]};
- let t2Op = {insert: collName, documents: [{_id: 1, t2: 1}]};
- let expectedDocs1 = [{_id: 1, t1: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins);
- let expectedDocs2 = [{_id: 1, t2: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins);
-
- print("update-update conflict");
- let initOp = {insert: collName, documents: [{_id: 1}]}; // the document to update.
- t1Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {t1: 1}}}]};
- t2Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {t2: 1}}}]};
- expectedDocs1 = [{_id: 1, t1: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
- expectedDocs2 = [{_id: 1, t2: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
-
- print("upsert-upsert conflict");
- t1Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {t1: 1}}, upsert: true}]};
- t2Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {t2: 1}}, upsert: true}]};
- expectedDocs1 = [{_id: 1, t1: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
- expectedDocs2 = [{_id: 1, t2: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
-
- print("delete-delete conflict");
- initOp = {insert: collName, documents: [{_id: 1}]}; // the document to delete.
- t1Op = {delete: collName, deletes: [{q: {_id: 1}, limit: 1}]};
- t2Op = {delete: collName, deletes: [{q: {_id: 1}, limit: 1}]};
- expectedDocs1 = [];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
- expectedDocs2 = [];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
-
- print("update-delete conflict");
- initOp = {insert: collName, documents: [{_id: 1}]}; // the document to delete/update.
- t1Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {t1: 1}}}]};
- t2Op = {delete: collName, deletes: [{q: {_id: 1}, limit: 1}]};
- expectedDocs1 = [{_id: 1, t1: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
- expectedDocs2 = [];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
-
- print("delete-update conflict");
- initOp = {insert: collName, documents: [{_id: 1}]}; // the document to delete/update.
- t1Op = {delete: collName, deletes: [{q: {_id: 1}, limit: 1}]};
- t2Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {t2: 1}}}]};
- expectedDocs1 = [];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
- expectedDocs2 = [{_id: 1, t2: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
-
- /***********************************************************************************************
- * Multi-document and predicate based write conflicts.
- **********************************************************************************************/
-
- jsTestLog("Test multi-document and predicate based write conflicts.");
-
- print("batch insert-batch insert conflict");
- t1Op = {insert: collName, documents: [{_id: 1}, {_id: 2}, {_id: 3}]};
- t2Op = {insert: collName, documents: [{_id: 2}, {_id: 3}, {_id: 4}]};
- expectedDocs1 = [{_id: 1}, {_id: 2}, {_id: 3}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins);
- expectedDocs2 = [{_id: 2}, {_id: 3}, {_id: 4}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins);
-
- print("multiupdate-multiupdate conflict");
- initOp = {
- insert: collName,
- documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to update/delete.
- };
- // Predicate intersection: [{_id: 2}, {_id: 3}]
- t1Op = {update: collName, updates: [{q: {_id: {$lte: 3}}, u: {$set: {t1: 1}}, multi: true}]};
- t2Op = {update: collName, updates: [{q: {_id: {$gte: 2}}, u: {$set: {t2: 1}}, multi: true}]};
- expectedDocs1 = [{_id: 1, t1: 1}, {_id: 2, t1: 1}, {_id: 3, t1: 1}, {_id: 4}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
- expectedDocs2 = [{_id: 1}, {_id: 2, t2: 1}, {_id: 3, t2: 1}, {_id: 4, t2: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
-
- print("multiupdate-multidelete conflict");
- initOp = {
- insert: collName,
- documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to update/delete.
- };
- // Predicate intersection: [{_id: 2}, {_id: 3}]
- t1Op = {update: collName, updates: [{q: {_id: {$lte: 3}}, u: {$set: {t1: 1}}, multi: true}]};
- t2Op = {delete: collName, deletes: [{q: {_id: {$gte: 2}}, limit: 0}]};
- expectedDocs1 = [{_id: 1, t1: 1}, {_id: 2, t1: 1}, {_id: 3, t1: 1}, {_id: 4}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
- expectedDocs2 = [{_id: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
-
- print("multidelete-multiupdate conflict");
- initOp = {
- insert: collName,
- documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to update/delete.
- };
- // Predicate intersection: [{_id: 2}, {_id: 3}]
- t1Op = {delete: collName, deletes: [{q: {_id: {$lte: 3}}, limit: 0}]};
- t2Op = {update: collName, updates: [{q: {_id: {$gte: 2}}, u: {$set: {t2: 1}}, multi: true}]};
- expectedDocs1 = [{_id: 4}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
- expectedDocs2 = [{_id: 1}, {_id: 2, t2: 1}, {_id: 3, t2: 1}, {_id: 4, t2: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
-
- print("multidelete-multidelete conflict");
- initOp = {
- insert: collName,
- documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to delete.
- };
- // Predicate intersection: [{_id: 2}, {_id: 3}]
- t1Op = {delete: collName, deletes: [{q: {_id: {$lte: 3}}, limit: 0}]};
- t2Op = {delete: collName, deletes: [{q: {_id: {$gte: 2}}, limit: 0}]};
- expectedDocs1 = [{_id: 4}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
- expectedDocs2 = [{_id: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
+"use strict";
+load("jstests/core/txns/libs/write_conflicts.js"); // for 'WriteConflictHelpers'.
+
+const dbName = "test";
+const collName = "transactions_write_conflicts";
+
+const testDB = db.getSiblingDB(dbName);
+const coll = testDB[collName];
+
+// Clean up and create test collection.
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}});
+
+/***********************************************************************************************
+ * Single document write conflicts.
+ **********************************************************************************************/
+
+jsTestLog("Test single document write conflicts.");
+
+print("insert-insert conflict.");
+let t1Op = {insert: collName, documents: [{_id: 1, t1: 1}]};
+let t2Op = {insert: collName, documents: [{_id: 1, t2: 1}]};
+let expectedDocs1 = [{_id: 1, t1: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins);
+let expectedDocs2 = [{_id: 1, t2: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins);
+
+print("update-update conflict");
+let initOp = {insert: collName, documents: [{_id: 1}]}; // the document to update.
+t1Op = {
+ update: collName,
+ updates: [{q: {_id: 1}, u: {$set: {t1: 1}}}]
+};
+t2Op = {
+ update: collName,
+ updates: [{q: {_id: 1}, u: {$set: {t2: 1}}}]
+};
+expectedDocs1 = [{_id: 1, t1: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
+expectedDocs2 = [{_id: 1, t2: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
+
+print("upsert-upsert conflict");
+t1Op = {
+ update: collName,
+ updates: [{q: {_id: 1}, u: {$set: {t1: 1}}, upsert: true}]
+};
+t2Op = {
+ update: collName,
+ updates: [{q: {_id: 1}, u: {$set: {t2: 1}}, upsert: true}]
+};
+expectedDocs1 = [{_id: 1, t1: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
+expectedDocs2 = [{_id: 1, t2: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
+
+print("delete-delete conflict");
+initOp = {
+ insert: collName,
+ documents: [{_id: 1}]
+}; // the document to delete.
+t1Op = {
+ delete: collName,
+ deletes: [{q: {_id: 1}, limit: 1}]
+};
+t2Op = {
+ delete: collName,
+ deletes: [{q: {_id: 1}, limit: 1}]
+};
+expectedDocs1 = [];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
+expectedDocs2 = [];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
+
+print("update-delete conflict");
+initOp = {
+ insert: collName,
+ documents: [{_id: 1}]
+}; // the document to delete/update.
+t1Op = {
+ update: collName,
+ updates: [{q: {_id: 1}, u: {$set: {t1: 1}}}]
+};
+t2Op = {
+ delete: collName,
+ deletes: [{q: {_id: 1}, limit: 1}]
+};
+expectedDocs1 = [{_id: 1, t1: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
+expectedDocs2 = [];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
+
+print("delete-update conflict");
+initOp = {
+ insert: collName,
+ documents: [{_id: 1}]
+}; // the document to delete/update.
+t1Op = {
+ delete: collName,
+ deletes: [{q: {_id: 1}, limit: 1}]
+};
+t2Op = {
+ update: collName,
+ updates: [{q: {_id: 1}, u: {$set: {t2: 1}}}]
+};
+expectedDocs1 = [];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
+expectedDocs2 = [{_id: 1, t2: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
+
+/***********************************************************************************************
+ * Multi-document and predicate based write conflicts.
+ **********************************************************************************************/
+
+jsTestLog("Test multi-document and predicate based write conflicts.");
+
+print("batch insert-batch insert conflict");
+t1Op = {
+ insert: collName,
+ documents: [{_id: 1}, {_id: 2}, {_id: 3}]
+};
+t2Op = {
+ insert: collName,
+ documents: [{_id: 2}, {_id: 3}, {_id: 4}]
+};
+expectedDocs1 = [{_id: 1}, {_id: 2}, {_id: 3}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins);
+expectedDocs2 = [{_id: 2}, {_id: 3}, {_id: 4}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins);
+
+print("multiupdate-multiupdate conflict");
+initOp = {
+ insert: collName,
+ documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to update/delete.
+};
+// Predicate intersection: [{_id: 2}, {_id: 3}]
+t1Op = {
+ update: collName,
+ updates: [{q: {_id: {$lte: 3}}, u: {$set: {t1: 1}}, multi: true}]
+};
+t2Op = {
+ update: collName,
+ updates: [{q: {_id: {$gte: 2}}, u: {$set: {t2: 1}}, multi: true}]
+};
+expectedDocs1 = [{_id: 1, t1: 1}, {_id: 2, t1: 1}, {_id: 3, t1: 1}, {_id: 4}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
+expectedDocs2 = [{_id: 1}, {_id: 2, t2: 1}, {_id: 3, t2: 1}, {_id: 4, t2: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
+
+print("multiupdate-multidelete conflict");
+initOp = {
+ insert: collName,
+ documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to update/delete.
+};
+// Predicate intersection: [{_id: 2}, {_id: 3}]
+t1Op = {
+ update: collName,
+ updates: [{q: {_id: {$lte: 3}}, u: {$set: {t1: 1}}, multi: true}]
+};
+t2Op = {
+ delete: collName,
+ deletes: [{q: {_id: {$gte: 2}}, limit: 0}]
+};
+expectedDocs1 = [{_id: 1, t1: 1}, {_id: 2, t1: 1}, {_id: 3, t1: 1}, {_id: 4}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
+expectedDocs2 = [{_id: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
+
+print("multidelete-multiupdate conflict");
+initOp = {
+ insert: collName,
+ documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to update/delete.
+};
+// Predicate intersection: [{_id: 2}, {_id: 3}]
+t1Op = {
+ delete: collName,
+ deletes: [{q: {_id: {$lte: 3}}, limit: 0}]
+};
+t2Op = {
+ update: collName,
+ updates: [{q: {_id: {$gte: 2}}, u: {$set: {t2: 1}}, multi: true}]
+};
+expectedDocs1 = [{_id: 4}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
+expectedDocs2 = [{_id: 1}, {_id: 2, t2: 1}, {_id: 3, t2: 1}, {_id: 4, t2: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
+
+print("multidelete-multidelete conflict");
+initOp = {
+ insert: collName,
+ documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to delete.
+};
+// Predicate intersection: [{_id: 2}, {_id: 3}]
+t1Op = {
+ delete: collName,
+ deletes: [{q: {_id: {$lte: 3}}, limit: 0}]
+};
+t2Op = {
+ delete: collName,
+ deletes: [{q: {_id: {$gte: 2}}, limit: 0}]
+};
+expectedDocs1 = [{_id: 4}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
+expectedDocs2 = [{_id: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
}());
diff --git a/jstests/core/txns/transactions_write_conflicts_unique_indexes.js b/jstests/core/txns/transactions_write_conflicts_unique_indexes.js
index 4ce4a2d5eb1..53158d7dd88 100644
--- a/jstests/core/txns/transactions_write_conflicts_unique_indexes.js
+++ b/jstests/core/txns/transactions_write_conflicts_unique_indexes.js
@@ -5,117 +5,140 @@
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/write_conflicts.js"); // for 'WriteConflictHelpers'.
-
- const dbName = "test";
- const collName = "transactions_write_conflicts_unique_indexes";
-
- const testDB = db.getSiblingDB(dbName);
- const coll = testDB[collName];
-
- // Clean up and create test collection.
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}});
-
- // Create a unique index on field 'x'.
- assert.commandWorked(coll.createIndex({x: 1}, {unique: true}));
-
- /***********************************************************************************************
- * Single document conflicts.
- **********************************************************************************************/
-
- jsTestLog("Test single document write conflicts.");
-
- print("insert-insert conflict.");
-
- let t1Op = {insert: collName, documents: [{_id: 1, x: 1}]};
- let t2Op = {insert: collName, documents: [{_id: 2, x: 1}]};
- let expectedDocs1 = [{_id: 1, x: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins);
- let expectedDocs2 = [{_id: 2, x: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins);
-
- print("update-update conflict");
- let initOp = {
- insert: collName,
- documents: [{_id: 1, x: 1}, {_id: 2, x: 2}]
- }; // the document to update.
- t1Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {x: 3}}}]};
- t2Op = {update: collName, updates: [{q: {_id: 2}, u: {$set: {x: 3}}}]};
- expectedDocs1 = [{_id: 1, x: 3}, {_id: 2, x: 2}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
- expectedDocs2 = [{_id: 1, x: 1}, {_id: 2, x: 3}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
-
- print("upsert-upsert conflict");
- t1Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {x: 1}}, upsert: true}]};
- t2Op = {update: collName, updates: [{q: {_id: 2}, u: {$set: {x: 1}}, upsert: true}]};
- expectedDocs1 = [{_id: 1, x: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins);
- expectedDocs2 = [{_id: 2, x: 1}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins);
-
- /***********************************************************************************************
- * Multi-document and predicate based conflicts.
- **********************************************************************************************/
-
- jsTestLog("Test multi-document and predicate based write conflicts.");
-
- print("batch insert-batch insert conflict");
- t1Op = {insert: collName, documents: [{_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}]};
- t2Op = {insert: collName, documents: [{_id: 4, x: 2}, {_id: 5, x: 3}, {_id: 6, x: 4}]};
- expectedDocs1 = [{_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins);
- expectedDocs2 = [{_id: 4, x: 2}, {_id: 5, x: 3}, {_id: 6, x: 4}];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins);
-
- print("multiupdate-multiupdate conflict");
- // Update disjoint sets of documents such that the post-image of each set would create a unique
- // index violation.
- initOp = {
- insert: collName,
- documents: [
- // Set 1
- {_id: 1, x: 1},
- {_id: 2, x: 2},
- {_id: 3, x: 3},
- // Set 2
- {_id: 4, x: 10},
- {_id: 5, x: 11},
- {_id: 6, x: 12}
- ] // the documents to update.
- };
- t1Op = {update: collName, updates: [{q: {_id: {$lte: 3}}, u: {$inc: {x: 4}}, multi: true}]};
- t2Op = {update: collName, updates: [{q: {_id: {$gte: 4}}, u: {$inc: {x: -4}}, multi: true}]};
- expectedDocs1 = [
- {_id: 1, x: 5},
- {_id: 2, x: 6},
- {_id: 3, x: 7},
- {_id: 4, x: 10},
- {_id: 5, x: 11},
- {_id: 6, x: 12}
- ];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
- expectedDocs2 = [
+"use strict";
+
+load("jstests/core/txns/libs/write_conflicts.js"); // for 'WriteConflictHelpers'.
+
+const dbName = "test";
+const collName = "transactions_write_conflicts_unique_indexes";
+
+const testDB = db.getSiblingDB(dbName);
+const coll = testDB[collName];
+
+// Clean up and create test collection.
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}});
+
+// Create a unique index on field 'x'.
+assert.commandWorked(coll.createIndex({x: 1}, {unique: true}));
+
+/***********************************************************************************************
+ * Single document conflicts.
+ **********************************************************************************************/
+
+jsTestLog("Test single document write conflicts.");
+
+print("insert-insert conflict.");
+
+let t1Op = {insert: collName, documents: [{_id: 1, x: 1}]};
+let t2Op = {insert: collName, documents: [{_id: 2, x: 1}]};
+let expectedDocs1 = [{_id: 1, x: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins);
+let expectedDocs2 = [{_id: 2, x: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins);
+
+print("update-update conflict");
+let initOp = {
+ insert: collName,
+ documents: [{_id: 1, x: 1}, {_id: 2, x: 2}]
+}; // the document to update.
+t1Op = {
+ update: collName,
+ updates: [{q: {_id: 1}, u: {$set: {x: 3}}}]
+};
+t2Op = {
+ update: collName,
+ updates: [{q: {_id: 2}, u: {$set: {x: 3}}}]
+};
+expectedDocs1 = [{_id: 1, x: 3}, {_id: 2, x: 2}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
+expectedDocs2 = [{_id: 1, x: 1}, {_id: 2, x: 3}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
+
+print("upsert-upsert conflict");
+t1Op = {
+ update: collName,
+ updates: [{q: {_id: 1}, u: {$set: {x: 1}}, upsert: true}]
+};
+t2Op = {
+ update: collName,
+ updates: [{q: {_id: 2}, u: {$set: {x: 1}}, upsert: true}]
+};
+expectedDocs1 = [{_id: 1, x: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins);
+expectedDocs2 = [{_id: 2, x: 1}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins);
+
+/***********************************************************************************************
+ * Multi-document and predicate based conflicts.
+ **********************************************************************************************/
+
+jsTestLog("Test multi-document and predicate based write conflicts.");
+
+print("batch insert-batch insert conflict");
+t1Op = {
+ insert: collName,
+ documents: [{_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}]
+};
+t2Op = {
+ insert: collName,
+ documents: [{_id: 4, x: 2}, {_id: 5, x: 3}, {_id: 6, x: 4}]
+};
+expectedDocs1 = [{_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins);
+expectedDocs2 = [{_id: 4, x: 2}, {_id: 5, x: 3}, {_id: 6, x: 4}];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins);
+
+print("multiupdate-multiupdate conflict");
+// Update disjoint sets of documents such that the post-image of each set would create a unique
+// index violation.
+initOp = {
+ insert: collName,
+ documents: [
+ // Set 1
{_id: 1, x: 1},
{_id: 2, x: 2},
{_id: 3, x: 3},
- {_id: 4, x: 6},
- {_id: 5, x: 7},
- {_id: 6, x: 8}
- ];
- WriteConflictHelpers.writeConflictTest(
- coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
-
+ // Set 2
+ {_id: 4, x: 10},
+ {_id: 5, x: 11},
+ {_id: 6, x: 12}
+ ] // the documents to update.
+};
+t1Op = {
+ update: collName,
+ updates: [{q: {_id: {$lte: 3}}, u: {$inc: {x: 4}}, multi: true}]
+};
+t2Op = {
+ update: collName,
+ updates: [{q: {_id: {$gte: 4}}, u: {$inc: {x: -4}}, multi: true}]
+};
+expectedDocs1 = [
+ {_id: 1, x: 5},
+ {_id: 2, x: 6},
+ {_id: 3, x: 7},
+ {_id: 4, x: 10},
+ {_id: 5, x: 11},
+ {_id: 6, x: 12}
+];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp);
+expectedDocs2 = [
+ {_id: 1, x: 1},
+ {_id: 2, x: 2},
+ {_id: 3, x: 3},
+ {_id: 4, x: 6},
+ {_id: 5, x: 7},
+ {_id: 6, x: 8}
+];
+WriteConflictHelpers.writeConflictTest(
+ coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp);
}());
diff --git a/jstests/core/txns/upconvert_read_concern.js b/jstests/core/txns/upconvert_read_concern.js
index 15d166dde08..2f49280e128 100644
--- a/jstests/core/txns/upconvert_read_concern.js
+++ b/jstests/core/txns/upconvert_read_concern.js
@@ -2,97 +2,94 @@
// 'snapshot'.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "upconvert_read_concern";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
+const dbName = "test";
+const collName = "upconvert_read_concern";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
- function testUpconvertReadConcern(readConcern) {
- jsTest.log("Test that the following readConcern is upconverted: " + tojson(readConcern));
- assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
+function testUpconvertReadConcern(readConcern) {
+ jsTest.log("Test that the following readConcern is upconverted: " + tojson(readConcern));
+ assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}}));
- // Start a new transaction with the given readConcern.
- session.startTransaction();
- let command = {find: collName};
- if (readConcern) {
- Object.extend(command, {readConcern: readConcern});
- }
- assert.commandWorked(sessionDb.runCommand(command));
+ // Start a new transaction with the given readConcern.
+ session.startTransaction();
+ let command = {find: collName};
+ if (readConcern) {
+ Object.extend(command, {readConcern: readConcern});
+ }
+ assert.commandWorked(sessionDb.runCommand(command));
- // Insert a document outside of the transaction.
- assert.commandWorked(testColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+ // Insert a document outside of the transaction.
+ assert.commandWorked(testColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
- // Test that the transaction does not see the new document (it has snapshot isolation).
- let res = assert.commandWorked(sessionDb.runCommand({find: collName}));
- assert.eq(res.cursor.firstBatch.length, 0, tojson(res));
+ // Test that the transaction does not see the new document (it has snapshot isolation).
+ let res = assert.commandWorked(sessionDb.runCommand({find: collName}));
+ assert.eq(res.cursor.firstBatch.length, 0, tojson(res));
- // Commit the transaction.
- assert.commandWorked(session.commitTransaction_forTesting());
- }
-
- testUpconvertReadConcern(null);
- testUpconvertReadConcern({});
- testUpconvertReadConcern({level: "local"});
- testUpconvertReadConcern({level: "majority"});
- testUpconvertReadConcern({level: "snapshot"});
-
- function testCannotUpconvertReadConcern(readConcern) {
- jsTest.log("Test that the following readConcern cannot be upconverted: " + readConcern);
-
- // Start a new transaction with the given readConcern.
- session.startTransaction();
- assert.commandFailedWithCode(
- sessionDb.runCommand({find: collName, readConcern: readConcern}),
- ErrorCodes.InvalidOptions);
-
- // No more operations are allowed in the transaction.
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
- ErrorCodes.NoSuchTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- }
+ // Commit the transaction.
+ assert.commandWorked(session.commitTransaction_forTesting());
+}
- testCannotUpconvertReadConcern({level: "available"});
- testCannotUpconvertReadConcern({level: "linearizable"});
+testUpconvertReadConcern(null);
+testUpconvertReadConcern({});
+testUpconvertReadConcern({level: "local"});
+testUpconvertReadConcern({level: "majority"});
+testUpconvertReadConcern({level: "snapshot"});
- jsTest.log("Test starting a transaction with an invalid readConcern");
+function testCannotUpconvertReadConcern(readConcern) {
+ jsTest.log("Test that the following readConcern cannot be upconverted: " + readConcern);
// Start a new transaction with the given readConcern.
session.startTransaction();
- assert.commandFailedWithCode(
- sessionDb.runCommand({find: collName, readConcern: {level: "bad"}}),
- ErrorCodes.FailedToParse);
+ assert.commandFailedWithCode(sessionDb.runCommand({find: collName, readConcern: readConcern}),
+ ErrorCodes.InvalidOptions);
// No more operations are allowed in the transaction.
assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
ErrorCodes.NoSuchTransaction);
assert.commandFailedWithCode(session.abortTransaction_forTesting(),
ErrorCodes.NoSuchTransaction);
+}
- jsTest.log("Test specifying readConcern on the second statement in a transaction");
+testCannotUpconvertReadConcern({level: "available"});
+testCannotUpconvertReadConcern({level: "linearizable"});
- // Start a new transaction with snapshot readConcern.
- session.startTransaction();
- assert.commandWorked(sessionDb.runCommand({find: collName, readConcern: {level: "snapshot"}}));
+jsTest.log("Test starting a transaction with an invalid readConcern");
- // The second statement cannot specify a readConcern.
- assert.commandFailedWithCode(
- sessionDb.runCommand({find: collName, readConcern: {level: "snapshot"}}),
- ErrorCodes.InvalidOptions);
+// Start a new transaction with the given readConcern.
+session.startTransaction();
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName, readConcern: {level: "bad"}}),
+ ErrorCodes.FailedToParse);
- // The transaction is still active and can be committed.
- assert.commandWorked(session.commitTransaction_forTesting());
+// No more operations are allowed in the transaction.
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.NoSuchTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+jsTest.log("Test specifying readConcern on the second statement in a transaction");
+
+// Start a new transaction with snapshot readConcern.
+session.startTransaction();
+assert.commandWorked(sessionDb.runCommand({find: collName, readConcern: {level: "snapshot"}}));
+
+// The second statement cannot specify a readConcern.
+assert.commandFailedWithCode(
+ sessionDb.runCommand({find: collName, readConcern: {level: "snapshot"}}),
+ ErrorCodes.InvalidOptions);
+
+// The transaction is still active and can be committed.
+assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
+session.endSession();
}());
diff --git a/jstests/core/txns/view_reads_in_transaction.js b/jstests/core/txns/view_reads_in_transaction.js
index 9e7ab14cd95..5a1a08761b8 100644
--- a/jstests/core/txns/view_reads_in_transaction.js
+++ b/jstests/core/txns/view_reads_in_transaction.js
@@ -1,60 +1,62 @@
// Tests that reads on views are supported in transactions.
// @tags: [uses_transactions, uses_snapshot_read_concern]
(function() {
- "use strict";
-
- const session = db.getMongo().startSession({causalConsistency: false});
- const testDB = session.getDatabase("test");
- const coll = testDB.getCollection("view_reads_in_transaction_data_coll");
- const view = testDB.getCollection("view_reads_in_transaction_actual_view");
-
- coll.drop({writeConcern: {w: "majority"}});
- view.drop({writeConcern: {w: "majority"}});
-
- // Populate the backing collection.
- const testDoc = {_id: "kyle"};
- assert.commandWorked(coll.insert(testDoc, {writeConcern: {w: "majority"}}));
-
- // Create an identity view on the data-bearing collection.
- assert.commandWorked(view.runCommand(
- "create", {viewOn: coll.getName(), pipeline: [], writeConcern: {w: "majority"}}));
-
- const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid";
- if (isMongos) {
- // Refresh the router's and shard's database versions so the distinct run below can succeed.
- // This is necessary because shards always abort their local transaction on stale version
- // errors and mongos is not allowed to retry on these errors in a transaction if the stale
- // shard has completed at least one earlier statement.
- assert.eq(view.distinct("_id"), ["kyle"]);
- }
-
- // Run a dummy find to start the transaction.
- jsTestLog("Starting transaction.");
- session.startTransaction({readConcern: {level: "snapshot"}});
- let cursor = coll.find();
- cursor.next();
-
- // Insert a document outside of the transaction. Subsequent reads should not see this document.
- jsTestLog("Inserting document outside of transaction.");
- assert.commandWorked(db.getSiblingDB(testDB.getName()).getCollection(coll.getName()).insert({
- _id: "not_visible_in_transaction",
- }));
-
- // Perform reads on views, which will be transformed into aggregations on the backing
- // collection.
- jsTestLog("Performing reads on the view inside the transaction.");
- cursor = view.find();
- assert.docEq(testDoc, cursor.next());
- assert(!cursor.hasNext());
-
- cursor = view.aggregate({$match: {}});
- assert.docEq(testDoc, cursor.next());
- assert(!cursor.hasNext());
-
- assert.eq(view.find({_id: {$exists: 1}}).itcount(), 1);
-
+"use strict";
+
+const session = db.getMongo().startSession({causalConsistency: false});
+const testDB = session.getDatabase("test");
+const coll = testDB.getCollection("view_reads_in_transaction_data_coll");
+const view = testDB.getCollection("view_reads_in_transaction_actual_view");
+
+coll.drop({writeConcern: {w: "majority"}});
+view.drop({writeConcern: {w: "majority"}});
+
+// Populate the backing collection.
+const testDoc = {
+ _id: "kyle"
+};
+assert.commandWorked(coll.insert(testDoc, {writeConcern: {w: "majority"}}));
+
+// Create an identity view on the data-bearing collection.
+assert.commandWorked(view.runCommand(
+ "create", {viewOn: coll.getName(), pipeline: [], writeConcern: {w: "majority"}}));
+
+const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid";
+if (isMongos) {
+ // Refresh the router's and shard's database versions so the distinct run below can succeed.
+ // This is necessary because shards always abort their local transaction on stale version
+ // errors and mongos is not allowed to retry on these errors in a transaction if the stale
+ // shard has completed at least one earlier statement.
assert.eq(view.distinct("_id"), ["kyle"]);
+}
+
+// Run a dummy find to start the transaction.
+jsTestLog("Starting transaction.");
+session.startTransaction({readConcern: {level: "snapshot"}});
+let cursor = coll.find();
+cursor.next();
+
+// Insert a document outside of the transaction. Subsequent reads should not see this document.
+jsTestLog("Inserting document outside of transaction.");
+assert.commandWorked(db.getSiblingDB(testDB.getName()).getCollection(coll.getName()).insert({
+ _id: "not_visible_in_transaction",
+}));
+
+// Perform reads on views, which will be transformed into aggregations on the backing
+// collection.
+jsTestLog("Performing reads on the view inside the transaction.");
+cursor = view.find();
+assert.docEq(testDoc, cursor.next());
+assert(!cursor.hasNext());
+
+cursor = view.aggregate({$match: {}});
+assert.docEq(testDoc, cursor.next());
+assert(!cursor.hasNext());
+
+assert.eq(view.find({_id: {$exists: 1}}).itcount(), 1);
+
+assert.eq(view.distinct("_id"), ["kyle"]);
- assert.commandWorked(session.commitTransaction_forTesting());
- jsTestLog("Transaction committed.");
+assert.commandWorked(session.commitTransaction_forTesting());
+jsTestLog("Transaction committed.");
}());
diff --git a/jstests/core/txns/write_conflicts_with_non_txns.js b/jstests/core/txns/write_conflicts_with_non_txns.js
index 451e7a6ae29..e8c3f9fcd47 100644
--- a/jstests/core/txns/write_conflicts_with_non_txns.js
+++ b/jstests/core/txns/write_conflicts_with_non_txns.js
@@ -19,129 +19,134 @@
(function() {
- "use strict";
-
- load('jstests/libs/parallelTester.js'); // for ScopedThread.
-
- const dbName = "test";
- const collName = "write_conflicts_with_non_txns";
-
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB[collName];
-
- // Clean up and create test collection.
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
-
- // Two conflicting documents to be inserted by a multi-document transaction and a
- // non-transactional write, respectively.
- const txnDoc = {_id: 1};
- const nonTxnDoc = {_id: 1, nonTxn: true};
-
- // Performs a single document insert on the test collection. Returns the command result object.
- function singleDocWrite(dbName, collName, doc) {
- const testColl = db.getSiblingDB(dbName)[collName];
- return testColl.runCommand({insert: collName, documents: [doc]});
- }
-
- // Returns true if a single document insert has started running on the server.
- function writeStarted() {
- return testDB.currentOp().inprog.some(op => {
- return op.active && (op.ns === testColl.getFullName()) && (op.op === "insert") &&
- (op.writeConflicts > 0);
- });
- }
-
- /**
- * A non-transactional (single document) write should keep retrying when attempting to insert a
- * document that conflicts with a previous write done by a running transaction, and should be
- * allowed to continue after the transaction commits. If 'maxTimeMS' is specified, a single
- * document write should timeout after the given time limit if there is a write conflict.
- */
-
- jsTestLog("Start a multi-document transaction with a document insert.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(txnDoc));
-
- jsTestLog("Do a conflicting single document insert outside of transaction with maxTimeMS.");
- assert.commandFailedWithCode(
- testColl.runCommand({insert: collName, documents: [nonTxnDoc], maxTimeMS: 100}),
- ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog("Doing conflicting single document write in separate thread.");
- let thread = new ScopedThread(singleDocWrite, dbName, collName, nonTxnDoc);
- thread.start();
-
- // Wait for the single doc write to start.
- assert.soon(writeStarted);
-
- // Commit the transaction, which should allow the single document write to finish. Since the
- // single doc write should get serialized after the transaction, we expect it to fail with a
- // duplicate key error.
- jsTestLog("Commit the multi-document transaction.");
- assert.commandWorked(session.commitTransaction_forTesting());
- thread.join();
- assert.commandFailedWithCode(thread.returnData(), ErrorCodes.DuplicateKey);
-
- // Check the final documents.
- assert.sameMembers([txnDoc], testColl.find().toArray());
-
- // Clean up the test collection.
- assert.commandWorked(testColl.remove({}));
-
- /**
- * A non-transactional (single document) write should keep retrying when attempting to insert a
- * document that conflicts with a previous write done by a running transaction, and should be
- * allowed to continue and complete successfully after the transaction aborts.
- */
-
- jsTestLog("Start a multi-document transaction with a document insert.");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(txnDoc));
-
- jsTestLog("Doing conflicting single document write in separate thread.");
- thread = new ScopedThread(singleDocWrite, dbName, collName, nonTxnDoc);
- thread.start();
-
- // Wait for the single doc write to start.
- assert.soon(writeStarted);
-
- // Abort the transaction, which should allow the single document write to finish and insert its
- // document successfully.
- jsTestLog("Abort the multi-document transaction.");
- assert.commandWorked(session.abortTransaction_forTesting());
- thread.join();
- assert.commandWorked(thread.returnData());
-
- // Check the final documents.
- assert.sameMembers([nonTxnDoc], testColl.find().toArray());
-
- // Clean up the test collection.
- assert.commandWorked(testColl.remove({}));
-
- /**
- * A transaction that tries to write to a document that was updated by a non-transaction after
- * it started should fail with a WriteConflict.
- */
-
- jsTestLog("Start a multi-document transaction.");
- session.startTransaction();
- assert.commandWorked(sessionColl.runCommand({find: collName}));
-
- jsTestLog("Do a single document insert outside of the transaction.");
- assert.commandWorked(testColl.insert(nonTxnDoc));
-
- jsTestLog("Insert a conflicting document inside the multi-document transaction.");
- assert.commandFailedWithCode(sessionColl.insert(txnDoc), ErrorCodes.WriteConflict);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // Check the final documents.
- assert.sameMembers([nonTxnDoc], testColl.find().toArray());
+"use strict";
+
+load('jstests/libs/parallelTester.js'); // for ScopedThread.
+
+const dbName = "test";
+const collName = "write_conflicts_with_non_txns";
+
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB[collName];
+
+// Clean up and create test collection.
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+
+// Two conflicting documents to be inserted by a multi-document transaction and a
+// non-transactional write, respectively.
+const txnDoc = {
+ _id: 1
+};
+const nonTxnDoc = {
+ _id: 1,
+ nonTxn: true
+};
+
+// Performs a single document insert on the test collection. Returns the command result object.
+function singleDocWrite(dbName, collName, doc) {
+ const testColl = db.getSiblingDB(dbName)[collName];
+ return testColl.runCommand({insert: collName, documents: [doc]});
+}
+
+// Returns true if a single document insert has started running on the server.
+function writeStarted() {
+ return testDB.currentOp().inprog.some(op => {
+ return op.active && (op.ns === testColl.getFullName()) && (op.op === "insert") &&
+ (op.writeConflicts > 0);
+ });
+}
+/**
+ * A non-transactional (single document) write should keep retrying when attempting to insert a
+ * document that conflicts with a previous write done by a running transaction, and should be
+ * allowed to continue after the transaction commits. If 'maxTimeMS' is specified, a single
+ * document write should timeout after the given time limit if there is a write conflict.
+ */
+
+jsTestLog("Start a multi-document transaction with a document insert.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(txnDoc));
+
+jsTestLog("Do a conflicting single document insert outside of transaction with maxTimeMS.");
+assert.commandFailedWithCode(
+ testColl.runCommand({insert: collName, documents: [nonTxnDoc], maxTimeMS: 100}),
+ ErrorCodes.MaxTimeMSExpired);
+
+jsTestLog("Doing conflicting single document write in separate thread.");
+let thread = new ScopedThread(singleDocWrite, dbName, collName, nonTxnDoc);
+thread.start();
+
+// Wait for the single doc write to start.
+assert.soon(writeStarted);
+
+// Commit the transaction, which should allow the single document write to finish. Since the
+// single doc write should get serialized after the transaction, we expect it to fail with a
+// duplicate key error.
+jsTestLog("Commit the multi-document transaction.");
+assert.commandWorked(session.commitTransaction_forTesting());
+thread.join();
+assert.commandFailedWithCode(thread.returnData(), ErrorCodes.DuplicateKey);
+
+// Check the final documents.
+assert.sameMembers([txnDoc], testColl.find().toArray());
+
+// Clean up the test collection.
+assert.commandWorked(testColl.remove({}));
+
+/**
+ * A non-transactional (single document) write should keep retrying when attempting to insert a
+ * document that conflicts with a previous write done by a running transaction, and should be
+ * allowed to continue and complete successfully after the transaction aborts.
+ */
+
+jsTestLog("Start a multi-document transaction with a document insert.");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(txnDoc));
+
+jsTestLog("Doing conflicting single document write in separate thread.");
+thread = new ScopedThread(singleDocWrite, dbName, collName, nonTxnDoc);
+thread.start();
+
+// Wait for the single doc write to start.
+assert.soon(writeStarted);
+
+// Abort the transaction, which should allow the single document write to finish and insert its
+// document successfully.
+jsTestLog("Abort the multi-document transaction.");
+assert.commandWorked(session.abortTransaction_forTesting());
+thread.join();
+assert.commandWorked(thread.returnData());
+
+// Check the final documents.
+assert.sameMembers([nonTxnDoc], testColl.find().toArray());
+
+// Clean up the test collection.
+assert.commandWorked(testColl.remove({}));
+
+/**
+ * A transaction that tries to write to a document that was updated by a non-transaction after
+ * it started should fail with a WriteConflict.
+ */
+
+jsTestLog("Start a multi-document transaction.");
+session.startTransaction();
+assert.commandWorked(sessionColl.runCommand({find: collName}));
+
+jsTestLog("Do a single document insert outside of the transaction.");
+assert.commandWorked(testColl.insert(nonTxnDoc));
+
+jsTestLog("Insert a conflicting document inside the multi-document transaction.");
+assert.commandFailedWithCode(sessionColl.insert(txnDoc), ErrorCodes.WriteConflict);
+assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// Check the final documents.
+assert.sameMembers([nonTxnDoc], testColl.find().toArray());
}());
diff --git a/jstests/core/type4.js b/jstests/core/type4.js
index 82197d4f1e2..7f3adf6645c 100644
--- a/jstests/core/type4.js
+++ b/jstests/core/type4.js
@@ -1,42 +1,42 @@
(function() {
- "use strict";
+"use strict";
- // Tests for SERVER-20080
- //
- // Verify that various types cannot be invoked as constructors
+// Tests for SERVER-20080
+//
+// Verify that various types cannot be invoked as constructors
- var t = db.jstests_type4;
- t.drop();
- t.insert({});
- t.insert({});
- t.insert({});
+var t = db.jstests_type4;
+t.drop();
+t.insert({});
+t.insert({});
+t.insert({});
- var oldReadMode = db.getMongo().readMode();
+var oldReadMode = db.getMongo().readMode();
- assert.throws(function() {
- (new _rand())();
- }, [], "invoke constructor on natively injected function");
+assert.throws(function() {
+ (new _rand())();
+}, [], "invoke constructor on natively injected function");
- assert.throws(function() {
- var doc = db.test.findOne();
- new doc();
- }, [], "invoke constructor on BSON");
+assert.throws(function() {
+ var doc = db.test.findOne();
+ new doc();
+}, [], "invoke constructor on BSON");
- assert.throws(function() {
- db.getMongo().forceReadMode("commands");
- var cursor = t.find();
- cursor.next();
+assert.throws(function() {
+ db.getMongo().forceReadMode("commands");
+ var cursor = t.find();
+ cursor.next();
- new cursor._cursor._cursorHandle();
- }, [], "invoke constructor on CursorHandle");
+ new cursor._cursor._cursorHandle();
+}, [], "invoke constructor on CursorHandle");
- assert.throws(function() {
- db.getMongo().forceReadMode("legacy");
- var cursor = t.find();
- cursor.next();
+assert.throws(function() {
+ db.getMongo().forceReadMode("legacy");
+ var cursor = t.find();
+ cursor.next();
- new cursor._cursor();
- }, [], "invoke constructor on Cursor");
+ new cursor._cursor();
+}, [], "invoke constructor on Cursor");
- db.getMongo().forceReadMode(oldReadMode);
+db.getMongo().forceReadMode(oldReadMode);
})();
diff --git a/jstests/core/type5.js b/jstests/core/type5.js
index d4dfc42d9f6..b0f84f4885b 100644
--- a/jstests/core/type5.js
+++ b/jstests/core/type5.js
@@ -1,22 +1,21 @@
(function() {
- "use strict";
+"use strict";
- // This checks SERVER-20375 - Constrain JS method thisv
- //
- // Check to make sure we can't invoke methods on incorrect types, or on
- // prototypes of objects that aren't intended to have methods invoked on
- // them.
-
- assert.throws(function() {
- HexData(0, "aaaa").hex.apply({});
- }, [], "invoke method on object of incorrect type");
- assert.throws(function() {
- var x = HexData(0, "aaaa");
- x.hex.apply(10);
- }, [], "invoke method on incorrect type");
- assert.throws(function() {
- var x = HexData(0, "aaaa");
- x.hex.apply(x.__proto__);
- }, [], "invoke method on prototype of correct type");
+// This checks SERVER-20375 - Constrain JS method thisv
+//
+// Check to make sure we can't invoke methods on incorrect types, or on
+// prototypes of objects that aren't intended to have methods invoked on
+// them.
+assert.throws(function() {
+ HexData(0, "aaaa").hex.apply({});
+}, [], "invoke method on object of incorrect type");
+assert.throws(function() {
+ var x = HexData(0, "aaaa");
+ x.hex.apply(10);
+}, [], "invoke method on incorrect type");
+assert.throws(function() {
+ var x = HexData(0, "aaaa");
+ x.hex.apply(x.__proto__);
+}, [], "invoke method on prototype of correct type");
})();
diff --git a/jstests/core/type6.js b/jstests/core/type6.js
index 39c3e2567bb..8dbc1770cc3 100644
--- a/jstests/core/type6.js
+++ b/jstests/core/type6.js
@@ -1,17 +1,17 @@
(function() {
- "use strict";
+"use strict";
- // SERVER-20319 Min/MaxKey check type of singleton
- //
- // make sure swapping min/max key's prototype doesn't blow things up
+// SERVER-20319 Min/MaxKey check type of singleton
+//
+// make sure swapping min/max key's prototype doesn't blow things up
- assert.throws(function() {
- MinKey().__proto__.singleton = 1000;
- MinKey();
- }, [], "make sure manipulating MinKey's proto is safe");
+assert.throws(function() {
+ MinKey().__proto__.singleton = 1000;
+ MinKey();
+}, [], "make sure manipulating MinKey's proto is safe");
- assert.throws(function() {
- MaxKey().__proto__.singleton = 1000;
- MaxKey();
- }, [], "make sure manipulating MaxKey's proto is safe");
+assert.throws(function() {
+ MaxKey().__proto__.singleton = 1000;
+ MaxKey();
+}, [], "make sure manipulating MaxKey's proto is safe");
})();
diff --git a/jstests/core/type7.js b/jstests/core/type7.js
index 1d67922d491..a9e0d67c3b0 100644
--- a/jstests/core/type7.js
+++ b/jstests/core/type7.js
@@ -1,46 +1,46 @@
(function() {
- "use strict";
+"use strict";
- // SERVER-20332 make JS NumberLong more robust
- //
- // Make sure swapping floatApprox, top and bottom don't break NumberLong
+// SERVER-20332 make JS NumberLong more robust
+//
+// Make sure swapping floatApprox, top and bottom don't break NumberLong
- // Picking 2^54 because it's representable as a double (as a power of
- // two), but big enough that the NumberLong code doesn't know it (numbers
- // over 2^53 can lose precision)
- var number = NumberLong("18014398509481984");
+// Picking 2^54 because it's representable as a double (as a power of
+// two), but big enough that the NumberLong code doesn't know it (numbers
+// over 2^53 can lose precision)
+var number = NumberLong("18014398509481984");
- {
- // Make sure all elements in a new NumberLong are valid
+{
+ // Make sure all elements in a new NumberLong are valid
- assert.eq(number.floatApprox, 18014398509481984);
- assert.eq(number.top, 4194304);
- assert.eq(number.bottom, 0);
- assert.eq(number.valueOf(), 18014398509481984);
- }
+ assert.eq(number.floatApprox, 18014398509481984);
+ assert.eq(number.top, 4194304);
+ assert.eq(number.bottom, 0);
+ assert.eq(number.valueOf(), 18014398509481984);
+}
- {
- // Make sure that floatApprox, top and bottom cannot be set
+{
+ // Make sure that floatApprox, top and bottom cannot be set
- assert.throws(function() {
- number.floatApprox = "a";
- }, [], "floatApprox should not be setable.");
+ assert.throws(function() {
+ number.floatApprox = "a";
+ }, [], "floatApprox should not be setable.");
- assert.throws(function() {
- number.top = "a";
- }, [], "top should not be setable.");
+ assert.throws(function() {
+ number.top = "a";
+ }, [], "top should not be setable.");
- assert.throws(function() {
- number.bottom = "a";
- }, [], "bottom should not be setable.");
- }
+ assert.throws(function() {
+ number.bottom = "a";
+ }, [], "bottom should not be setable.");
+}
- {
- // Make sure we fall back to floatApprox
+{
+ // Make sure we fall back to floatApprox
- delete number.top;
- delete number.bottom;
+ delete number.top;
+ delete number.bottom;
- assert.eq(number.valueOf(), 18014398509481984);
- }
+ assert.eq(number.valueOf(), 18014398509481984);
+}
})();
diff --git a/jstests/core/type8.js b/jstests/core/type8.js
index ceb4993ecb1..e540cc901c7 100644
--- a/jstests/core/type8.js
+++ b/jstests/core/type8.js
@@ -1,18 +1,18 @@
(function() {
- "use strict";
+"use strict";
- // SERVER-8246 Min/MaxKey should be comparable
- //
- // make sure that the MinKey MaxKey JS types are comparable
+// SERVER-8246 Min/MaxKey should be comparable
+//
+// make sure that the MinKey MaxKey JS types are comparable
- function testType(t1, t2) {
- db.minmaxcmp.save({_id: t1});
- var doc = db.minmaxcmp.findOne({_id: t1});
- assert.eq(doc._id, t1, "Value for " + t1 + " did not round-trip to DB correctly");
- assert.neq(doc._id, t2, "Value for " + t1 + " should not equal " + t2);
- assert(doc._id instanceof t1, "Value for " + t1 + "should be instance of" + t1);
- assert(!(doc._id instanceof t2), "Value for " + t1 + "shouldn't be instance of" + t2);
- }
- testType(MinKey, MaxKey);
- testType(MaxKey, MinKey);
+function testType(t1, t2) {
+ db.minmaxcmp.save({_id: t1});
+ var doc = db.minmaxcmp.findOne({_id: t1});
+ assert.eq(doc._id, t1, "Value for " + t1 + " did not round-trip to DB correctly");
+ assert.neq(doc._id, t2, "Value for " + t1 + " should not equal " + t2);
+ assert(doc._id instanceof t1, "Value for " + t1 + "should be instance of" + t1);
+ assert(!(doc._id instanceof t2), "Value for " + t1 + "shouldn't be instance of" + t2);
+}
+testType(MinKey, MaxKey);
+testType(MaxKey, MinKey);
})();
diff --git a/jstests/core/type_array.js b/jstests/core/type_array.js
index 49ebf26764e..0bc3dc0f6f3 100644
--- a/jstests/core/type_array.js
+++ b/jstests/core/type_array.js
@@ -4,70 +4,69 @@
* Tests for the array-related behavior of the $type query operator.
*/
(function() {
- "use strict";
+"use strict";
- let coll = db.jstest_type_array;
- coll.drop();
+let coll = db.jstest_type_array;
+coll.drop();
- /**
- * Iterates 'cursor' and returns a sorted array of the '_id' fields for the returned documents.
- */
- function extractSortedIdsFromCursor(cursor) {
- let ids = [];
- while (cursor.hasNext()) {
- ids.push(cursor.next()._id);
- }
- return ids.sort();
+/**
+ * Iterates 'cursor' and returns a sorted array of the '_id' fields for the returned documents.
+ */
+function extractSortedIdsFromCursor(cursor) {
+ let ids = [];
+ while (cursor.hasNext()) {
+ ids.push(cursor.next()._id);
}
+ return ids.sort();
+}
- function runTests() {
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 1, a: [1, 2, 3]}));
- assert.writeOK(coll.insert({_id: 2, a: [1, "foo", 3]}));
- assert.writeOK(coll.insert({_id: 3, a: []}));
- assert.writeOK(coll.insert({_id: 4, a: [[]]}));
- assert.writeOK(coll.insert({_id: 5, a: [[[]]]}));
- assert.writeOK(coll.insert({_id: 6, a: 1}));
- assert.writeOK(coll.insert({_id: 7, a: "foo"}));
+function runTests() {
+ assert.writeOK(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 1, a: [1, 2, 3]}));
+ assert.writeOK(coll.insert({_id: 2, a: [1, "foo", 3]}));
+ assert.writeOK(coll.insert({_id: 3, a: []}));
+ assert.writeOK(coll.insert({_id: 4, a: [[]]}));
+ assert.writeOK(coll.insert({_id: 5, a: [[[]]]}));
+ assert.writeOK(coll.insert({_id: 6, a: 1}));
+ assert.writeOK(coll.insert({_id: 7, a: "foo"}));
- assert.eq([1, 2, 6], extractSortedIdsFromCursor(coll.find({a: {$type: "number"}})));
- assert.eq([2, 7], extractSortedIdsFromCursor(coll.find({a: {$type: "string"}})));
- assert.eq([1, 2, 3, 4, 5], extractSortedIdsFromCursor(coll.find({a: {$type: "array"}})));
- assert.eq([4, 5], extractSortedIdsFromCursor(coll.find({"a.0": {$type: "array"}})));
- assert.eq([5], extractSortedIdsFromCursor(coll.find({"a.0.0": {$type: "array"}})));
+ assert.eq([1, 2, 6], extractSortedIdsFromCursor(coll.find({a: {$type: "number"}})));
+ assert.eq([2, 7], extractSortedIdsFromCursor(coll.find({a: {$type: "string"}})));
+ assert.eq([1, 2, 3, 4, 5], extractSortedIdsFromCursor(coll.find({a: {$type: "array"}})));
+ assert.eq([4, 5], extractSortedIdsFromCursor(coll.find({"a.0": {$type: "array"}})));
+ assert.eq([5], extractSortedIdsFromCursor(coll.find({"a.0.0": {$type: "array"}})));
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.writeOK(coll.insert({_id: 1, a: NumberInt(1)}));
- assert.writeOK(coll.insert({_id: 2, a: NumberLong(1)}));
- assert.writeOK(coll.insert({_id: 3, a: "str"}));
- assert.writeOK(coll.insert({_id: 4, a: []}));
- assert.writeOK(coll.insert({_id: 5, a: [NumberInt(1), "str"]}));
- assert.writeOK(coll.insert({_id: 6}));
+ assert.writeOK(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 0, a: 1}));
+ assert.writeOK(coll.insert({_id: 1, a: NumberInt(1)}));
+ assert.writeOK(coll.insert({_id: 2, a: NumberLong(1)}));
+ assert.writeOK(coll.insert({_id: 3, a: "str"}));
+ assert.writeOK(coll.insert({_id: 4, a: []}));
+ assert.writeOK(coll.insert({_id: 5, a: [NumberInt(1), "str"]}));
+ assert.writeOK(coll.insert({_id: 6}));
- // Test that $type fails when given array that contains an element that is neither a string
- // nor a number.
- assert.throws(() => coll.find({a: {$type: ["string", null]}}).itcount());
- assert.throws(() => coll.find({a: {$type: [{}, "string"]}}).itcount());
+ // Test that $type fails when given array that contains an element that is neither a string
+ // nor a number.
+ assert.throws(() => coll.find({a: {$type: ["string", null]}}).itcount());
+ assert.throws(() => coll.find({a: {$type: [{}, "string"]}}).itcount());
- // Test that $type with an array of types can accept both string aliases and numerical type
- // codes. Also verifies matching behavior for arrays and for missing values.
- assert.eq([2, 3, 5], extractSortedIdsFromCursor(coll.find({a: {$type: ["long", 2]}})));
+ // Test that $type with an array of types can accept both string aliases and numerical type
+ // codes. Also verifies matching behavior for arrays and for missing values.
+ assert.eq([2, 3, 5], extractSortedIdsFromCursor(coll.find({a: {$type: ["long", 2]}})));
- // Test $type with an array of types, where one of those types is itself "array".
- assert.eq([2, 4, 5],
- extractSortedIdsFromCursor(coll.find({a: {$type: ["long", "array"]}})));
+ // Test $type with an array of types, where one of those types is itself "array".
+ assert.eq([2, 4, 5], extractSortedIdsFromCursor(coll.find({a: {$type: ["long", "array"]}})));
- // Test that duplicate types are allowed in the array.
- assert.eq([2, 4, 5],
- extractSortedIdsFromCursor(
- coll.find({a: {$type: ["long", "array", "long", "array"]}})));
- assert.eq([2, 4, 5],
- extractSortedIdsFromCursor(coll.find({a: {$type: ["long", "array", 18, 4]}})));
- }
+ // Test that duplicate types are allowed in the array.
+ assert.eq(
+ [2, 4, 5],
+ extractSortedIdsFromCursor(coll.find({a: {$type: ["long", "array", "long", "array"]}})));
+ assert.eq([2, 4, 5],
+ extractSortedIdsFromCursor(coll.find({a: {$type: ["long", "array", 18, 4]}})));
+}
- // Verify $type queries both with and without an index.
- runTests();
- assert.writeOK(coll.createIndex({a: 1}));
- runTests();
+// Verify $type queries both with and without an index.
+runTests();
+assert.writeOK(coll.createIndex({a: 1}));
+runTests();
}());
diff --git a/jstests/core/uniqueness.js b/jstests/core/uniqueness.js
index e25c8a48d70..54a5a71c276 100644
--- a/jstests/core/uniqueness.js
+++ b/jstests/core/uniqueness.js
@@ -9,76 +9,78 @@
// ]
(function() {
- "use strict";
+"use strict";
- var res;
+var res;
- let t = db.jstests_uniqueness;
+let t = db.jstests_uniqueness;
- t.drop();
+t.drop();
- // test uniqueness of _id
+// test uniqueness of _id
- res = t.save({_id: 3});
- assert.writeOK(res);
+res = t.save({_id: 3});
+assert.writeOK(res);
- // this should yield an error
- res = t.insert({_id: 3});
- assert.writeError(res);
- assert.eq(1, t.count());
+// this should yield an error
+res = t.insert({_id: 3});
+assert.writeError(res);
+assert.eq(1, t.count());
- res = t.insert({_id: 4, x: 99});
- assert.writeOK(res);
+res = t.insert({_id: 4, x: 99});
+assert.writeOK(res);
- // this should yield an error
- res = t.update({_id: 4}, {_id: 3, x: 99});
- assert.writeError(res);
- assert(t.findOne({_id: 4}));
+// this should yield an error
+res = t.update({_id: 4}, {_id: 3, x: 99});
+assert.writeError(res);
+assert(t.findOne({_id: 4}));
- // Check for an error message when we index and there are dups
- db.jstests_uniqueness2.drop();
- db.jstests_uniqueness2.insert({a: 3});
- db.jstests_uniqueness2.insert({a: 3});
- assert.eq(2, db.jstests_uniqueness2.count());
- res = db.jstests_uniqueness2.ensureIndex({a: 1}, true);
- assert.commandFailed(res);
- assert(res.errmsg.match(/E11000/));
+// Check for an error message when we index and there are dups
+db.jstests_uniqueness2.drop();
+db.jstests_uniqueness2.insert({a: 3});
+db.jstests_uniqueness2.insert({a: 3});
+assert.eq(2, db.jstests_uniqueness2.count());
+res = db.jstests_uniqueness2.ensureIndex({a: 1}, true);
+assert.commandFailed(res);
+assert(res.errmsg.match(/E11000/));
- // Check for an error message when we index in the background and there are dups
- db.jstests_uniqueness2.drop();
- db.jstests_uniqueness2.insert({a: 3});
- db.jstests_uniqueness2.insert({a: 3});
- assert.eq(2, db.jstests_uniqueness2.count());
- res = db.jstests_uniqueness2.ensureIndex({a: 1}, {unique: true, background: true});
- assert.commandFailed(res);
- assert(res.errmsg.match(/E11000/));
+// Check for an error message when we index in the background and there are dups
+db.jstests_uniqueness2.drop();
+db.jstests_uniqueness2.insert({a: 3});
+db.jstests_uniqueness2.insert({a: 3});
+assert.eq(2, db.jstests_uniqueness2.count());
+res = db.jstests_uniqueness2.ensureIndex({a: 1}, {unique: true, background: true});
+assert.commandFailed(res);
+assert(res.errmsg.match(/E11000/));
- // Verify that duplicate key errors follow a fixed format, including field information.
- const coll = db.checkDupErrorMessage;
- const key = {_id: 1};
- const expectedMessage =
- 'E11000 duplicate key error collection: ' + coll + ' index: _id_ dup key: { _id: 1.0 }';
- coll.drop();
- assert.commandWorked(coll.insert(key));
- res = coll.insert(key);
- assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
- assert.eq(res.nInserted, 0, tojson(res));
- const writeError = res.getWriteError();
- assert.eq(writeError.errmsg,
- expectedMessage,
- "The duplicate key error message must exactly match." + tojson(res));
+// Verify that duplicate key errors follow a fixed format, including field information.
+const coll = db.checkDupErrorMessage;
+const key = {
+ _id: 1
+};
+const expectedMessage =
+ 'E11000 duplicate key error collection: ' + coll + ' index: _id_ dup key: { _id: 1.0 }';
+coll.drop();
+assert.commandWorked(coll.insert(key));
+res = coll.insert(key);
+assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
+assert.eq(res.nInserted, 0, tojson(res));
+const writeError = res.getWriteError();
+assert.eq(writeError.errmsg,
+ expectedMessage,
+ "The duplicate key error message must exactly match." + tojson(res));
- /* Check that if we update and remove _id, it gets added back by the DB */
+/* Check that if we update and remove _id, it gets added back by the DB */
- /* - test when object grows */
- t.drop();
- t.save({_id: 'Z'});
- t.update({}, {k: 2});
- assert.eq('Z', t.findOne()._id, "uniqueness.js problem with adding back _id");
+/* - test when object grows */
+t.drop();
+t.save({_id: 'Z'});
+t.update({}, {k: 2});
+assert.eq('Z', t.findOne()._id, "uniqueness.js problem with adding back _id");
- /* - test when doesn't grow */
- t.drop();
- t.save({_id: 'Z', k: 3});
- t.update({}, {k: 2});
- assert.eq('Z', t.findOne()._id, "uniqueness.js problem with adding back _id (2)");
+/* - test when doesn't grow */
+t.drop();
+t.save({_id: 'Z', k: 3});
+t.update({}, {k: 2});
+assert.eq('Z', t.findOne()._id, "uniqueness.js problem with adding back _id (2)");
})();
diff --git a/jstests/core/update_affects_indexes.js b/jstests/core/update_affects_indexes.js
index a396fc29079..956efadbf0e 100644
--- a/jstests/core/update_affects_indexes.js
+++ b/jstests/core/update_affects_indexes.js
@@ -1,94 +1,94 @@
// This is a regression test for SERVER-32048. It checks that index keys are correctly updated when
// an update modifier implicitly creates a new array element.
(function() {
- "use strict";
+"use strict";
- let coll = db.update_affects_indexes;
- coll.drop();
- let indexKeyPattern = {"a.b": 1};
- assert.commandWorked(coll.createIndex(indexKeyPattern));
+let coll = db.update_affects_indexes;
+coll.drop();
+let indexKeyPattern = {"a.b": 1};
+assert.commandWorked(coll.createIndex(indexKeyPattern));
- // Tests that the document 'docId' has all the index keys in 'expectedKeys' and none of the
- // index keys in 'unexpectedKeys'.
- function assertExpectedIndexKeys(docId, expectedKeys, unexpectedKeys) {
- for (let key of expectedKeys) {
- let res = coll.find(docId).hint(indexKeyPattern).min(key).returnKey().toArray();
- assert.eq(1, res.length, tojson(res));
- assert.eq(key, res[0]);
- }
+// Tests that the document 'docId' has all the index keys in 'expectedKeys' and none of the
+// index keys in 'unexpectedKeys'.
+function assertExpectedIndexKeys(docId, expectedKeys, unexpectedKeys) {
+ for (let key of expectedKeys) {
+ let res = coll.find(docId).hint(indexKeyPattern).min(key).returnKey().toArray();
+ assert.eq(1, res.length, tojson(res));
+ assert.eq(key, res[0]);
+ }
- for (let key of unexpectedKeys) {
- let res = coll.find(docId).hint(indexKeyPattern).min(key).returnKey().toArray();
- if (res.length > 0) {
- assert.eq(1, res.length, tojson(res));
- assert.neq(0, bsonWoCompare(key, res[0]), tojson(res[0]));
- }
+ for (let key of unexpectedKeys) {
+ let res = coll.find(docId).hint(indexKeyPattern).min(key).returnKey().toArray();
+ if (res.length > 0) {
+ assert.eq(1, res.length, tojson(res));
+ assert.neq(0, bsonWoCompare(key, res[0]), tojson(res[0]));
}
}
+}
- // $set implicitly creates array element at end of array.
- assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]}));
- assertExpectedIndexKeys({_id: 0}, [{"a.b": 0}], [{"a.b": null}]);
- assert.writeOK(coll.update({_id: 0}, {$set: {"a.1.c": 0}}));
- assertExpectedIndexKeys({_id: 0}, [{"a.b": 0}, {"a.b": null}], []);
+// $set implicitly creates array element at end of array.
+assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]}));
+assertExpectedIndexKeys({_id: 0}, [{"a.b": 0}], [{"a.b": null}]);
+assert.writeOK(coll.update({_id: 0}, {$set: {"a.1.c": 0}}));
+assertExpectedIndexKeys({_id: 0}, [{"a.b": 0}, {"a.b": null}], []);
- // $set implicitly creates array element beyond end of array.
- assert.writeOK(coll.insert({_id: 1, a: [{b: 0}]}));
- assertExpectedIndexKeys({_id: 1}, [{"a.b": 0}], [{"a.b": null}]);
- assert.writeOK(coll.update({_id: 1}, {$set: {"a.3.c": 0}}));
- assertExpectedIndexKeys({_id: 1}, [{"a.b": 0}, {"a.b": null}], []);
+// $set implicitly creates array element beyond end of array.
+assert.writeOK(coll.insert({_id: 1, a: [{b: 0}]}));
+assertExpectedIndexKeys({_id: 1}, [{"a.b": 0}], [{"a.b": null}]);
+assert.writeOK(coll.update({_id: 1}, {$set: {"a.3.c": 0}}));
+assertExpectedIndexKeys({_id: 1}, [{"a.b": 0}, {"a.b": null}], []);
- // $set implicitly creates array element in empty array (no index key changes needed).
- assert.writeOK(coll.insert({_id: 2, a: []}));
- assertExpectedIndexKeys({_id: 2}, [{"a.b": null}], []);
- assert.writeOK(coll.update({_id: 2}, {$set: {"a.0.c": 0}}));
- assertExpectedIndexKeys({_id: 2}, [{"a.b": null}], []);
+// $set implicitly creates array element in empty array (no index key changes needed).
+assert.writeOK(coll.insert({_id: 2, a: []}));
+assertExpectedIndexKeys({_id: 2}, [{"a.b": null}], []);
+assert.writeOK(coll.update({_id: 2}, {$set: {"a.0.c": 0}}));
+assertExpectedIndexKeys({_id: 2}, [{"a.b": null}], []);
- // $inc implicitly creates array element at end of array.
- assert.writeOK(coll.insert({_id: 3, a: [{b: 0}]}));
- assertExpectedIndexKeys({_id: 3}, [{"a.b": 0}], [{"a.b": null}]);
- assert.writeOK(coll.update({_id: 3}, {$inc: {"a.1.c": 0}}));
- assertExpectedIndexKeys({_id: 3}, [{"a.b": 0}, {"a.b": null}], []);
+// $inc implicitly creates array element at end of array.
+assert.writeOK(coll.insert({_id: 3, a: [{b: 0}]}));
+assertExpectedIndexKeys({_id: 3}, [{"a.b": 0}], [{"a.b": null}]);
+assert.writeOK(coll.update({_id: 3}, {$inc: {"a.1.c": 0}}));
+assertExpectedIndexKeys({_id: 3}, [{"a.b": 0}, {"a.b": null}], []);
- // $mul implicitly creates array element at end of array.
- assert.writeOK(coll.insert({_id: 4, a: [{b: 0}]}));
- assertExpectedIndexKeys({_id: 4}, [{"a.b": 0}], [{"a.b": null}]);
- assert.writeOK(coll.update({_id: 4}, {$mul: {"a.1.c": 0}}));
- assertExpectedIndexKeys({_id: 4}, [{"a.b": 0}, {"a.b": null}], []);
+// $mul implicitly creates array element at end of array.
+assert.writeOK(coll.insert({_id: 4, a: [{b: 0}]}));
+assertExpectedIndexKeys({_id: 4}, [{"a.b": 0}], [{"a.b": null}]);
+assert.writeOK(coll.update({_id: 4}, {$mul: {"a.1.c": 0}}));
+assertExpectedIndexKeys({_id: 4}, [{"a.b": 0}, {"a.b": null}], []);
- // $addToSet implicitly creates array element at end of array.
- assert.writeOK(coll.insert({_id: 5, a: [{b: 0}]}));
- assertExpectedIndexKeys({_id: 5}, [{"a.b": 0}], [{"a.b": null}]);
- assert.writeOK(coll.update({_id: 5}, {$addToSet: {"a.1.c": 0}}));
- assertExpectedIndexKeys({_id: 5}, [{"a.b": 0}, {"a.b": null}], []);
+// $addToSet implicitly creates array element at end of array.
+assert.writeOK(coll.insert({_id: 5, a: [{b: 0}]}));
+assertExpectedIndexKeys({_id: 5}, [{"a.b": 0}], [{"a.b": null}]);
+assert.writeOK(coll.update({_id: 5}, {$addToSet: {"a.1.c": 0}}));
+assertExpectedIndexKeys({_id: 5}, [{"a.b": 0}, {"a.b": null}], []);
- // $bit implicitly creates array element at end of array.
- assert.writeOK(coll.insert({_id: 6, a: [{b: 0}]}));
- assertExpectedIndexKeys({_id: 6}, [{"a.b": 0}], [{"a.b": null}]);
- assert.writeOK(coll.update({_id: 6}, {$bit: {"a.1.c": {and: NumberInt(1)}}}));
- assertExpectedIndexKeys({_id: 6}, [{"a.b": 0}, {"a.b": null}], []);
+// $bit implicitly creates array element at end of array.
+assert.writeOK(coll.insert({_id: 6, a: [{b: 0}]}));
+assertExpectedIndexKeys({_id: 6}, [{"a.b": 0}], [{"a.b": null}]);
+assert.writeOK(coll.update({_id: 6}, {$bit: {"a.1.c": {and: NumberInt(1)}}}));
+assertExpectedIndexKeys({_id: 6}, [{"a.b": 0}, {"a.b": null}], []);
- // $min implicitly creates array element at end of array.
- assert.writeOK(coll.insert({_id: 7, a: [{b: 0}]}));
- assertExpectedIndexKeys({_id: 7}, [{"a.b": 0}], [{"a.b": null}]);
- assert.writeOK(coll.update({_id: 7}, {$min: {"a.1.c": 0}}));
- assertExpectedIndexKeys({_id: 7}, [{"a.b": 0}, {"a.b": null}], []);
+// $min implicitly creates array element at end of array.
+assert.writeOK(coll.insert({_id: 7, a: [{b: 0}]}));
+assertExpectedIndexKeys({_id: 7}, [{"a.b": 0}], [{"a.b": null}]);
+assert.writeOK(coll.update({_id: 7}, {$min: {"a.1.c": 0}}));
+assertExpectedIndexKeys({_id: 7}, [{"a.b": 0}, {"a.b": null}], []);
- // $max implicitly creates array element at end of array.
- assert.writeOK(coll.insert({_id: 8, a: [{b: 0}]}));
- assertExpectedIndexKeys({_id: 8}, [{"a.b": 0}], [{"a.b": null}]);
- assert.writeOK(coll.update({_id: 8}, {$max: {"a.1.c": 0}}));
- assertExpectedIndexKeys({_id: 8}, [{"a.b": 0}, {"a.b": null}], []);
+// $max implicitly creates array element at end of array.
+assert.writeOK(coll.insert({_id: 8, a: [{b: 0}]}));
+assertExpectedIndexKeys({_id: 8}, [{"a.b": 0}], [{"a.b": null}]);
+assert.writeOK(coll.update({_id: 8}, {$max: {"a.1.c": 0}}));
+assertExpectedIndexKeys({_id: 8}, [{"a.b": 0}, {"a.b": null}], []);
- // $currentDate implicitly creates array element at end of array.
- assert.writeOK(coll.insert({_id: 9, a: [{b: 0}]}));
- assertExpectedIndexKeys({_id: 9}, [{"a.b": 0}], [{"a.b": null}]);
- assert.writeOK(coll.update({_id: 9}, {$currentDate: {"a.1.c": true}}));
- assertExpectedIndexKeys({_id: 9}, [{"a.b": 0}, {"a.b": null}], []);
+// $currentDate implicitly creates array element at end of array.
+assert.writeOK(coll.insert({_id: 9, a: [{b: 0}]}));
+assertExpectedIndexKeys({_id: 9}, [{"a.b": 0}], [{"a.b": null}]);
+assert.writeOK(coll.update({_id: 9}, {$currentDate: {"a.1.c": true}}));
+assertExpectedIndexKeys({_id: 9}, [{"a.b": 0}, {"a.b": null}], []);
- // $push implicitly creates array element at end of array.
- assert.writeOK(coll.insert({_id: 10, a: [{b: 0}]}));
- assertExpectedIndexKeys({_id: 10}, [{"a.b": 0}], [{"a.b": null}]);
- assert.writeOK(coll.update({_id: 10}, {$push: {"a.1.c": 0}}));
- assertExpectedIndexKeys({_id: 10}, [{"a.b": 0}, {"a.b": null}], []);
+// $push implicitly creates array element at end of array.
+assert.writeOK(coll.insert({_id: 10, a: [{b: 0}]}));
+assertExpectedIndexKeys({_id: 10}, [{"a.b": 0}], [{"a.b": null}]);
+assert.writeOK(coll.update({_id: 10}, {$push: {"a.1.c": 0}}));
+assertExpectedIndexKeys({_id: 10}, [{"a.b": 0}, {"a.b": null}], []);
}());
diff --git a/jstests/core/update_arrayFilters.js b/jstests/core/update_arrayFilters.js
index a59b135e75d..55d7614495d 100644
--- a/jstests/core/update_arrayFilters.js
+++ b/jstests/core/update_arrayFilters.js
@@ -4,714 +4,695 @@
// Tests for the arrayFilters option to update and findAndModify.
(function() {
- "use strict";
+"use strict";
- const collName = "update_arrayFilters";
- let coll = db[collName];
- coll.drop();
- assert.commandWorked(db.createCollection(collName));
- let res;
-
- //
- // Tests for update.
- //
-
- if (db.getMongo().writeMode() !== "commands") {
- assert.throws(function() {
- coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
- });
- } else {
- // Non-array arrayFilters fails to parse.
- assert.writeError(coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: {i: 0}}),
- ErrorCodes.TypeMismatch);
-
- // Non-object array filter fails to parse.
- assert.writeError(coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: ["bad"]}),
- ErrorCodes.TypeMismatch);
-
- // Bad array filter fails to parse.
- res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0, j: 0}]});
- assert.writeErrorWithCode(res, ErrorCodes.FailedToParse);
- assert.neq(-1,
- res.getWriteError().errmsg.indexOf("Expected a single top-level field name"),
- "update failed for a reason other than failing to parse array filters");
-
- // Multiple array filters with the same id fails to parse.
- res = coll.update(
- {_id: 0}, {$set: {"a.$[i]": 5, "a.$[j]": 6}}, {arrayFilters: [{i: 0}, {j: 0}, {i: 1}]});
- assert.writeErrorWithCode(res, ErrorCodes.FailedToParse);
- assert.neq(
- -1,
- res.getWriteError().errmsg.indexOf(
- "Found multiple array filters with the same top-level field name"),
- "update failed for a reason other than multiple array filters with the same top-level field name");
-
- // Unused array filter fails to parse.
- res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}, {j: 0}]});
- assert.writeErrorWithCode(res, ErrorCodes.FailedToParse);
- assert.neq(
- -1,
- res.getWriteError().errmsg.indexOf(
- "The array filter for identifier 'j' was not used in the update { $set: { a.$[i]: 5.0 } }"),
- "update failed for a reason other than unused array filter");
-
- // Array filter without a top-level field name fails to parse.
- res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$alwaysTrue: 1}]});
- assert.writeErrorWithCode(res, ErrorCodes.FailedToParse);
- assert.neq(
- -1,
- res.getWriteError().errmsg.indexOf(
- "Cannot use an expression without a top-level field name in arrayFilters"),
- "update failed for a reason other than missing a top-level field name in arrayFilter");
-
- // Array filter with $text inside fails to parse.
- res = coll.update(
- {_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$text: {$search: "foo"}}]});
- assert.writeErrorWithCode(res, ErrorCodes.BadValue);
-
- // Array filter with $where inside fails to parse.
- res =
- coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$where: "this.a == 2"}]});
- assert.writeErrorWithCode(res, ErrorCodes.BadValue);
-
- // Array filter with $geoNear inside fails to parse.
- res = coll.update(
- {_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{loc: {$geoNear: [50, 50]}}]});
- assert.writeErrorWithCode(res, ErrorCodes.BadValue);
-
- // Array filter with $expr inside fails to parse.
- res = coll.update(
- {_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$expr: {$eq: ["$foo", "$bar"]}}]});
- assert.writeErrorWithCode(res, ErrorCodes.QueryFeatureNotAllowed);
-
- // Good value for arrayFilters succeeds.
- assert.writeOK(coll.update(
- {_id: 0}, {$set: {"a.$[i]": 5, "a.$[j]": 6}}, {arrayFilters: [{i: 0}, {j: 0}]}));
- assert.writeOK(coll.update(
- {_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$or: [{i: 0}, {$and: [{}]}]}]}));
- }
-
- //
- // Tests for findAndModify.
- //
+const collName = "update_arrayFilters";
+let coll = db[collName];
+coll.drop();
+assert.commandWorked(db.createCollection(collName));
+let res;
- // Non-array arrayFilters fails to parse.
- assert.throws(function() {
- coll.findAndModify({query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: {i: 0}});
- });
+//
+// Tests for update.
+//
- // Non-object array filter fails to parse.
+if (db.getMongo().writeMode() !== "commands") {
assert.throws(function() {
- coll.findAndModify({query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: ["bad"]});
+ coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
});
+} else {
+ // Non-array arrayFilters fails to parse.
+ assert.writeError(coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: {i: 0}}),
+ ErrorCodes.TypeMismatch);
- // arrayFilters option not allowed with remove=true.
- assert.throws(function() {
- coll.findAndModify({query: {_id: 0}, remove: true, arrayFilters: [{i: 0}]});
- });
+ // Non-object array filter fails to parse.
+ assert.writeError(coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: ["bad"]}),
+ ErrorCodes.TypeMismatch);
// Bad array filter fails to parse.
- assert.throws(function() {
- coll.findAndModify(
- {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0, j: 0}]});
- });
+ res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0, j: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.FailedToParse);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf("Expected a single top-level field name"),
+ "update failed for a reason other than failing to parse array filters");
// Multiple array filters with the same id fails to parse.
- assert.throws(function() {
- coll.findAndModify({
- query: {_id: 0},
- update: {$set: {"a.$[i]": 5, "a.$[j]": 6}},
- arrayFilters: [{i: 0}, {j: 0}, {i: 1}]
- });
- });
+ res = coll.update(
+ {_id: 0}, {$set: {"a.$[i]": 5, "a.$[j]": 6}}, {arrayFilters: [{i: 0}, {j: 0}, {i: 1}]});
+ assert.writeErrorWithCode(res, ErrorCodes.FailedToParse);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf(
+ "Found multiple array filters with the same top-level field name"),
+ "update failed for a reason other than multiple array filters with the same top-level field name");
// Unused array filter fails to parse.
- assert.throws(function() {
- coll.findAndModify(
- {query: {_id: 0}, update: {$set: {"a.$[i]": 5}, arrayFilters: [{i: 0}, {j: 0}]}});
- });
+ res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}, {j: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.FailedToParse);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf(
+ "The array filter for identifier 'j' was not used in the update { $set: { a.$[i]: 5.0 } }"),
+ "update failed for a reason other than unused array filter");
+
+ // Array filter without a top-level field name fails to parse.
+ res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$alwaysTrue: 1}]});
+ assert.writeErrorWithCode(res, ErrorCodes.FailedToParse);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf(
+ "Cannot use an expression without a top-level field name in arrayFilters"),
+ "update failed for a reason other than missing a top-level field name in arrayFilter");
+
+ // Array filter with $text inside fails to parse.
+ res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$text: {$search: "foo"}}]});
+ assert.writeErrorWithCode(res, ErrorCodes.BadValue);
+
+ // Array filter with $where inside fails to parse.
+ res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$where: "this.a == 2"}]});
+ assert.writeErrorWithCode(res, ErrorCodes.BadValue);
+
+ // Array filter with $geoNear inside fails to parse.
+ res =
+ coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{loc: {$geoNear: [50, 50]}}]});
+ assert.writeErrorWithCode(res, ErrorCodes.BadValue);
+
+ // Array filter with $expr inside fails to parse.
+ res = coll.update(
+ {_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$expr: {$eq: ["$foo", "$bar"]}}]});
+ assert.writeErrorWithCode(res, ErrorCodes.QueryFeatureNotAllowed);
// Good value for arrayFilters succeeds.
- assert.eq(null, coll.findAndModify({
+ assert.writeOK(coll.update(
+ {_id: 0}, {$set: {"a.$[i]": 5, "a.$[j]": 6}}, {arrayFilters: [{i: 0}, {j: 0}]}));
+ assert.writeOK(coll.update(
+ {_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$or: [{i: 0}, {$and: [{}]}]}]}));
+}
+
+//
+// Tests for findAndModify.
+//
+
+// Non-array arrayFilters fails to parse.
+assert.throws(function() {
+ coll.findAndModify({query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: {i: 0}});
+});
+
+// Non-object array filter fails to parse.
+assert.throws(function() {
+ coll.findAndModify({query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: ["bad"]});
+});
+
+// arrayFilters option not allowed with remove=true.
+assert.throws(function() {
+ coll.findAndModify({query: {_id: 0}, remove: true, arrayFilters: [{i: 0}]});
+});
+
+// Bad array filter fails to parse.
+assert.throws(function() {
+ coll.findAndModify(
+ {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0, j: 0}]});
+});
+
+// Multiple array filters with the same id fails to parse.
+assert.throws(function() {
+ coll.findAndModify({
query: {_id: 0},
update: {$set: {"a.$[i]": 5, "a.$[j]": 6}},
- arrayFilters: [{i: 0}, {j: 0}]
- }));
- assert.eq(null, coll.findAndModify({
- query: {_id: 0},
- update: {$set: {"a.$[i]": 5}},
- arrayFilters: [{$or: [{i: 0}, {$and: [{}]}]}]
- }));
-
- //
- // Tests for the bulk API.
- //
-
- if (db.getMongo().writeMode() !== "commands") {
- let bulk = coll.initializeUnorderedBulkOp();
- bulk.find({});
- assert.throws(function() {
- bulk.arrayFilters([{i: 0}]);
- });
- } else {
- // update().
- let bulk = coll.initializeUnorderedBulkOp();
- bulk.find({}).arrayFilters("bad").update({$set: {"a.$[i]": 5}});
- assert.throws(function() {
- bulk.execute();
- });
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({}).arrayFilters([{i: 0}]).update({$set: {"a.$[i]": 5}});
- assert.writeOK(bulk.execute());
-
- // updateOne().
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({_id: 0}).arrayFilters("bad").updateOne({$set: {"a.$[i]": 5}});
- assert.throws(function() {
- bulk.execute();
- });
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({_id: 0}).arrayFilters([{i: 0}]).updateOne({$set: {"a.$[i]": 5}});
- assert.writeOK(bulk.execute());
- }
-
- //
- // Tests for the CRUD API.
- //
-
- // findOneAndUpdate().
+ arrayFilters: [{i: 0}, {j: 0}, {i: 1}]
+ });
+});
+
+// Unused array filter fails to parse.
+assert.throws(function() {
+ coll.findAndModify(
+ {query: {_id: 0}, update: {$set: {"a.$[i]": 5}, arrayFilters: [{i: 0}, {j: 0}]}});
+});
+
+// Good value for arrayFilters succeeds.
+assert.eq(null, coll.findAndModify({
+ query: {_id: 0},
+ update: {$set: {"a.$[i]": 5, "a.$[j]": 6}},
+ arrayFilters: [{i: 0}, {j: 0}]
+}));
+assert.eq(null, coll.findAndModify({
+ query: {_id: 0},
+ update: {$set: {"a.$[i]": 5}},
+ arrayFilters: [{$or: [{i: 0}, {$and: [{}]}]}]
+}));
+
+//
+// Tests for the bulk API.
+//
+
+if (db.getMongo().writeMode() !== "commands") {
+ let bulk = coll.initializeUnorderedBulkOp();
+ bulk.find({});
assert.throws(function() {
- coll.findOneAndUpdate({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"});
+ bulk.arrayFilters([{i: 0}]);
});
- assert.eq(null,
- coll.findOneAndUpdate({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}));
+} else {
+ // update().
+ let bulk = coll.initializeUnorderedBulkOp();
+ bulk.find({}).arrayFilters("bad").update({$set: {"a.$[i]": 5}});
+ assert.throws(function() {
+ bulk.execute();
+ });
+ bulk = coll.initializeUnorderedBulkOp();
+ bulk.find({}).arrayFilters([{i: 0}]).update({$set: {"a.$[i]": 5}});
+ assert.writeOK(bulk.execute());
// updateOne().
- if (db.getMongo().writeMode() !== "commands") {
- assert.throws(function() {
- coll.updateOne({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
- });
- } else {
- assert.throws(function() {
- coll.updateOne({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"});
- });
- res = coll.updateOne({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
- assert.eq(0, res.modifiedCount);
- }
-
- // updateMany().
- if (db.getMongo().writeMode() !== "commands") {
- assert.throws(function() {
- coll.updateMany({}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
- });
- } else {
- assert.throws(function() {
- coll.updateMany({}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"});
- });
- res = coll.updateMany({}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
- assert.eq(0, res.modifiedCount);
- }
-
- // updateOne with bulkWrite().
- if (db.getMongo().writeMode() !== "commands") {
- assert.throws(function() {
- coll.bulkWrite([{
- updateOne:
- {filter: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}
- }]);
- });
- } else {
- assert.throws(function() {
- coll.bulkWrite([{
- updateOne:
- {filter: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: "bad"}
- }]);
- });
- res = coll.bulkWrite([{
- updateOne: {filter: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}
- }]);
- assert.eq(0, res.matchedCount);
- }
-
- // updateMany with bulkWrite().
- if (db.getMongo().writeMode() !== "commands") {
- assert.throws(function() {
- coll.bulkWrite([
- {updateMany: {filter: {}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}}
- ]);
- });
- } else {
- assert.throws(function() {
- coll.bulkWrite(
- [{updateMany: {filter: {}, update: {$set: {"a.$[i]": 5}}, arrayFilters: "bad"}}]);
- });
- res = coll.bulkWrite(
- [{updateMany: {filter: {}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}}]);
- assert.eq(0, res.matchedCount);
- }
-
- //
- // Tests for explain().
- //
+ bulk = coll.initializeUnorderedBulkOp();
+ bulk.find({_id: 0}).arrayFilters("bad").updateOne({$set: {"a.$[i]": 5}});
+ assert.throws(function() {
+ bulk.execute();
+ });
+ bulk = coll.initializeUnorderedBulkOp();
+ bulk.find({_id: 0}).arrayFilters([{i: 0}]).updateOne({$set: {"a.$[i]": 5}});
+ assert.writeOK(bulk.execute());
+}
+
+//
+// Tests for the CRUD API.
+//
+
+// findOneAndUpdate().
+assert.throws(function() {
+ coll.findOneAndUpdate({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"});
+});
+assert.eq(null, coll.findOneAndUpdate({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}));
+
+// updateOne().
+if (db.getMongo().writeMode() !== "commands") {
+ assert.throws(function() {
+ coll.updateOne({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
+ });
+} else {
+ assert.throws(function() {
+ coll.updateOne({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"});
+ });
+ res = coll.updateOne({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
+ assert.eq(0, res.modifiedCount);
+}
- // update().
- if (db.getMongo().writeMode() !== "commands") {
- assert.throws(function() {
- coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
- });
- } else {
- assert.throws(function() {
- coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"});
- });
- assert.commandWorked(
- coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}));
- }
-
- // findAndModify().
+// updateMany().
+if (db.getMongo().writeMode() !== "commands") {
assert.throws(function() {
- coll.explain().findAndModify(
- {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: "bad"});
+ coll.updateMany({}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
});
- assert.commandWorked(coll.explain().findAndModify(
- {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}));
+} else {
+ assert.throws(function() {
+ coll.updateMany({}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"});
+ });
+ res = coll.updateMany({}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
+ assert.eq(0, res.modifiedCount);
+}
- //
- // Tests for individual update modifiers.
- //
+// updateOne with bulkWrite().
+if (db.getMongo().writeMode() !== "commands") {
+ assert.throws(function() {
+ coll.bulkWrite([
+ {updateOne: {filter: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}}
+ ]);
+ });
+} else {
+ assert.throws(function() {
+ coll.bulkWrite(
+ [{updateOne: {filter: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: "bad"}}]);
+ });
+ res = coll.bulkWrite(
+ [{updateOne: {filter: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}}]);
+ assert.eq(0, res.matchedCount);
+}
- // $set.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1, 2, 1]});
- }
- assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[]": 3}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 3, 3, 3]});
-
- // $unset.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$unset: {"a.$[i]": true}}, {arrayFilters: [{i: 0}]}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [null, 1, null, 1]});
- }
- assert.writeOK(coll.update({_id: 0}, {$unset: {"a.$[]": true}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [null, null, null, null]});
-
- // $inc.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$inc: {"a.$[i]": 1}}, {arrayFilters: [{i: 1}]}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2, 0, 2]});
- }
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
- assert.writeOK(coll.update({_id: 0}, {$inc: {"a.$[]": 1}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [1, 2, 1, 2]});
+// updateMany with bulkWrite().
+if (db.getMongo().writeMode() !== "commands") {
+ assert.throws(function() {
+ coll.bulkWrite(
+ [{updateMany: {filter: {}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}}]);
+ });
+} else {
+ assert.throws(function() {
+ coll.bulkWrite(
+ [{updateMany: {filter: {}, update: {$set: {"a.$[i]": 5}}, arrayFilters: "bad"}}]);
+ });
+ res = coll.bulkWrite(
+ [{updateMany: {filter: {}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}}]);
+ assert.eq(0, res.matchedCount);
+}
- // $mul.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0, 2, 0, 2]}));
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$mul: {"a.$[i]": 3}}, {arrayFilters: [{i: 2}]}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 6, 0, 6]});
- }
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [1, 2, 1, 2]}));
- assert.writeOK(coll.update({_id: 0}, {$mul: {"a.$[]": 3}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 6, 3, 6]});
+//
+// Tests for explain().
+//
- // $rename.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [1, 2, 3, 4]}));
- if (db.getMongo().writeMode() === "commands") {
- res = coll.update({_id: 0}, {$rename: {"a.$[i]": "b"}}, {arrayFilters: [{i: 0}]});
- assert.writeErrorWithCode(res, ErrorCodes.BadValue);
- assert.neq(-1,
- res.getWriteError().errmsg.indexOf(
- "The source field for $rename may not be dynamic: a.$[i]"),
- "update failed for a reason other than using $[] syntax in $rename path");
- res = coll.update({id: 0}, {$rename: {"a": "b"}}, {arrayFilters: [{i: 0}]});
- assert.writeErrorWithCode(res, ErrorCodes.FailedToParse);
- assert.neq(
- -1,
- res.getWriteError().errmsg.indexOf(
- "The array filter for identifier 'i' was not used in the update { $rename: { a: \"b\" } }"),
- "updated failed for reason other than unused array filter");
- }
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0], b: [1]}));
- res = coll.update({_id: 0}, {$rename: {"a.$[]": "b"}});
- assert.writeErrorWithCode(res, ErrorCodes.BadValue);
- assert.neq(-1,
- res.getWriteError().errmsg.indexOf(
- "The source field for $rename may not be dynamic: a.$[]"),
- "update failed for a reason other than using array updates with $rename");
- res = coll.update({_id: 0}, {$rename: {"a": "b.$[]"}});
+// update().
+if (db.getMongo().writeMode() !== "commands") {
+ assert.throws(function() {
+ coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]});
+ });
+} else {
+ assert.throws(function() {
+ coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"});
+ });
+ assert.commandWorked(
+ coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}));
+}
+
+// findAndModify().
+assert.throws(function() {
+ coll.explain().findAndModify(
+ {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: "bad"});
+});
+assert.commandWorked(coll.explain().findAndModify(
+ {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}));
+
+//
+// Tests for individual update modifiers.
+//
+
+// $set.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
+if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1, 2, 1]});
+}
+assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[]": 3}}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 3, 3, 3]});
+
+// $unset.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
+if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update({_id: 0}, {$unset: {"a.$[i]": true}}, {arrayFilters: [{i: 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [null, 1, null, 1]});
+}
+assert.writeOK(coll.update({_id: 0}, {$unset: {"a.$[]": true}}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [null, null, null, null]});
+
+// $inc.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
+if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update({_id: 0}, {$inc: {"a.$[i]": 1}}, {arrayFilters: [{i: 1}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2, 0, 2]});
+}
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
+assert.writeOK(coll.update({_id: 0}, {$inc: {"a.$[]": 1}}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [1, 2, 1, 2]});
+
+// $mul.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [0, 2, 0, 2]}));
+if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update({_id: 0}, {$mul: {"a.$[i]": 3}}, {arrayFilters: [{i: 2}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 6, 0, 6]});
+}
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [1, 2, 1, 2]}));
+assert.writeOK(coll.update({_id: 0}, {$mul: {"a.$[]": 3}}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 6, 3, 6]});
+
+// $rename.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [1, 2, 3, 4]}));
+if (db.getMongo().writeMode() === "commands") {
+ res = coll.update({_id: 0}, {$rename: {"a.$[i]": "b"}}, {arrayFilters: [{i: 0}]});
assert.writeErrorWithCode(res, ErrorCodes.BadValue);
assert.neq(-1,
res.getWriteError().errmsg.indexOf(
- "The destination field for $rename may not be dynamic: b.$[]"),
- "update failed for a reason other than using array updates with $rename");
- assert.writeOK(coll.update({_id: 0}, {$rename: {"a": "b"}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, b: [0]});
-
- // $setOnInsert.
- coll.drop();
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0, a: [0]},
- {$setOnInsert: {"a.$[i]": 1}},
- {arrayFilters: [{i: 0}], upsert: true}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [1]});
- }
- coll.drop();
- assert.writeOK(coll.update({_id: 0, a: [0]}, {$setOnInsert: {"a.$[]": 1}}, {upsert: true}));
+ "The source field for $rename may not be dynamic: a.$[i]"),
+ "update failed for a reason other than using $[] syntax in $rename path");
+ res = coll.update({id: 0}, {$rename: {"a": "b"}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.FailedToParse);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf(
+ "The array filter for identifier 'i' was not used in the update { $rename: { a: \"b\" } }"),
+ "updated failed for reason other than unused array filter");
+}
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [0], b: [1]}));
+res = coll.update({_id: 0}, {$rename: {"a.$[]": "b"}});
+assert.writeErrorWithCode(res, ErrorCodes.BadValue);
+assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("The source field for $rename may not be dynamic: a.$[]"),
+ "update failed for a reason other than using array updates with $rename");
+res = coll.update({_id: 0}, {$rename: {"a": "b.$[]"}});
+assert.writeErrorWithCode(res, ErrorCodes.BadValue);
+assert.neq(-1,
+ res.getWriteError().errmsg.indexOf(
+ "The destination field for $rename may not be dynamic: b.$[]"),
+ "update failed for a reason other than using array updates with $rename");
+assert.writeOK(coll.update({_id: 0}, {$rename: {"a": "b"}}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, b: [0]});
+
+// $setOnInsert.
+coll.drop();
+if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update(
+ {_id: 0, a: [0]}, {$setOnInsert: {"a.$[i]": 1}}, {arrayFilters: [{i: 0}], upsert: true}));
assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [1]});
-
- // $min.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: 0, c: 1}, {b: 0, c: -1}, {b: 1, c: 1}]}));
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(
- coll.update({_id: 0}, {$min: {"a.$[i].c": 0}}, {arrayFilters: [{"i.b": 0}]}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 0}, {b: 0, c: -1}, {b: 1, c: 1}]});
- }
- assert.writeOK(coll.update({_id: 0}, {$min: {"a.$[].c": 0}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 0}, {b: 0, c: -1}, {b: 1, c: 0}]});
-
- // $max.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: 0, c: 1}, {b: 0, c: -1}, {b: 1, c: -1}]}));
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(
- coll.update({_id: 0}, {$max: {"a.$[i].c": 0}}, {arrayFilters: [{"i.b": 0}]}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 1}, {b: 0, c: 0}, {b: 1, c: -1}]});
- }
- assert.writeOK(coll.update({_id: 0}, {$max: {"a.$[].c": 0}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 1}, {b: 0, c: 0}, {b: 1, c: 0}]});
-
- // $currentDate.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0, 1]}));
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(
- coll.update({_id: 0}, {$currentDate: {"a.$[i]": true}}, {arrayFilters: [{i: 0}]}));
- let doc = coll.findOne({_id: 0});
- assert(doc.a[0].constructor == Date, tojson(doc));
- assert.eq(doc.a[1], 1, printjson(doc));
- }
- assert.writeOK(coll.update({_id: 0}, {$currentDate: {"a.$[]": true}}));
+}
+coll.drop();
+assert.writeOK(coll.update({_id: 0, a: [0]}, {$setOnInsert: {"a.$[]": 1}}, {upsert: true}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [1]});
+
+// $min.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [{b: 0, c: 1}, {b: 0, c: -1}, {b: 1, c: 1}]}));
+if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update({_id: 0}, {$min: {"a.$[i].c": 0}}, {arrayFilters: [{"i.b": 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 0}, {b: 0, c: -1}, {b: 1, c: 1}]});
+}
+assert.writeOK(coll.update({_id: 0}, {$min: {"a.$[].c": 0}}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 0}, {b: 0, c: -1}, {b: 1, c: 0}]});
+
+// $max.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [{b: 0, c: 1}, {b: 0, c: -1}, {b: 1, c: -1}]}));
+if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update({_id: 0}, {$max: {"a.$[i].c": 0}}, {arrayFilters: [{"i.b": 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 1}, {b: 0, c: 0}, {b: 1, c: -1}]});
+}
+assert.writeOK(coll.update({_id: 0}, {$max: {"a.$[].c": 0}}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 1}, {b: 0, c: 0}, {b: 1, c: 0}]});
+
+// $currentDate.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [0, 1]}));
+if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(
+ coll.update({_id: 0}, {$currentDate: {"a.$[i]": true}}, {arrayFilters: [{i: 0}]}));
let doc = coll.findOne({_id: 0});
assert(doc.a[0].constructor == Date, tojson(doc));
- assert(doc.a[1].constructor == Date, tojson(doc));
-
- // $addToSet.
- coll.drop();
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.insert({_id: 0, a: [[0], [1]]}));
- assert.writeOK(coll.update({_id: 0}, {$addToSet: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[0, 2], [1]]});
- }
- coll.drop();
+ assert.eq(doc.a[1], 1, printjson(doc));
+}
+assert.writeOK(coll.update({_id: 0}, {$currentDate: {"a.$[]": true}}));
+let doc = coll.findOne({_id: 0});
+assert(doc.a[0].constructor == Date, tojson(doc));
+assert(doc.a[1].constructor == Date, tojson(doc));
+
+// $addToSet.
+coll.drop();
+if (db.getMongo().writeMode() === "commands") {
assert.writeOK(coll.insert({_id: 0, a: [[0], [1]]}));
- assert.writeOK(coll.update({_id: 0}, {$addToSet: {"a.$[]": 2}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[0, 2], [1, 2]]});
-
- // $pop.
+ assert.writeOK(coll.update({_id: 0}, {$addToSet: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[0, 2], [1]]});
+}
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [[0], [1]]}));
+assert.writeOK(coll.update({_id: 0}, {$addToSet: {"a.$[]": 2}}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[0, 2], [1, 2]]});
+
+// $pop.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [1, 2]]}));
+if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update({_id: 0}, {$pop: {"a.$[i]": 1}}, {arrayFilters: [{i: 0}]}));
+ assert.eq({_id: 0, a: [[0], [1, 2]]}, coll.findOne());
+}
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, a: [[0]]}));
+assert.writeOK(coll.update({_id: 0}, {$pop: {"a.$[]": 1}}));
+assert.eq({_id: 0, a: [[]]}, coll.findOne());
+
+// $pullAll.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [[0, 1, 2, 3], [1, 2, 3, 4]]}));
+if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update({_id: 0}, {$pullAll: {"a.$[i]": [0, 2]}}, {arrayFilters: [{i: 0}]}));
+ assert.eq({_id: 0, a: [[1, 3], [1, 2, 3, 4]]}, coll.findOne());
+}
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [[0, 1, 2, 3], [1, 2, 3, 4]]}));
+res = coll.update({_id: 0}, {$pullAll: {"a.$[]": [0, 2]}});
+assert.eq({_id: 0, a: [[1, 3], [1, 3, 4]]}, coll.findOne());
+
+// $pull.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [1, 2]]}));
+if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update({_id: 0}, {$pull: {"a.$[i]": 1}}, {arrayFilters: [{i: 2}]}));
+ assert.eq({_id: 0, a: [[0, 1], [2]]}, coll.findOne());
+}
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [1, 2]]}));
+assert.writeOK(coll.update({_id: 0}, {$pull: {"a.$[]": 1}}));
+assert.eq({_id: 0, a: [[0], [2]]}, coll.findOne());
+
+// $push.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [2, 3]]}));
+if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update({_id: 0}, {$push: {"a.$[i]": 4}}, {arrayFilters: [{i: 0}]}));
+ assert.eq({_id: 0, a: [[0, 1, 4], [2, 3]]}, coll.findOne());
+}
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [2, 3]]}));
+assert.writeOK(coll.update({_id: 0}, {$push: {"a.$[]": 4}}));
+assert.eq({_id: 0, a: [[0, 1, 4], [2, 3, 4]]}, coll.findOne());
+
+// $bit.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [NumberInt(0), NumberInt(2)]}));
+if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(
+ coll.update({_id: 0}, {$bit: {"a.$[i]": {or: NumberInt(10)}}}, {arrayFilters: [{i: 0}]}));
+ assert.eq({_id: 0, a: [NumberInt(10), NumberInt(2)]}, coll.findOne());
+}
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, a: [NumberInt(0), NumberInt(2)]}));
+assert.writeOK(coll.update({_id: 0}, {$bit: {"a.$[]": {or: NumberInt(10)}}}));
+assert.eq({_id: 0, a: [NumberInt(10), NumberInt(10)]}, coll.findOne());
+
+//
+// Multi update tests.
+//
+
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
+assert.writeOK(coll.insert({_id: 1, a: [0, 2, 0, 2]}));
+if (db.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.update({}, {$set: {"a.$[i]": 3}}, {multi: true, arrayFilters: [{i: 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 1, 3, 1]});
+ assert.eq(coll.findOne({_id: 1}), {_id: 1, a: [3, 2, 3, 2]});
+}
+assert.writeOK(coll.update({}, {$set: {"a.$[]": 3}}, {multi: true}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 3, 3, 3]});
+assert.eq(coll.findOne({_id: 1}), {_id: 1, a: [3, 3, 3, 3]});
+
+//
+// Collation tests.
+//
+
+if (db.getMongo().writeMode() === "commands") {
+ // arrayFilters respect operation collation.
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [1, 2]]}));
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$pop: {"a.$[i]": 1}}, {arrayFilters: [{i: 0}]}));
- assert.eq({_id: 0, a: [[0], [1, 2]]}, coll.findOne());
- }
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [[0]]}));
- assert.writeOK(coll.update({_id: 0}, {$pop: {"a.$[]": 1}}));
- assert.eq({_id: 0, a: [[]]}, coll.findOne());
-
- // $pullAll.
+ assert.writeOK(coll.insert({_id: 0, a: ["foo", "FOO"]}));
+ assert.writeOK(
+ coll.update({_id: 0},
+ {$set: {"a.$[i]": "bar"}},
+ {arrayFilters: [{i: "foo"}], collation: {locale: "en_US", strength: 2}}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: ["bar", "bar"]});
+
+ // arrayFilters respect the collection default collation.
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [[0, 1, 2, 3], [1, 2, 3, 4]]}));
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(
- coll.update({_id: 0}, {$pullAll: {"a.$[i]": [0, 2]}}, {arrayFilters: [{i: 0}]}));
- assert.eq({_id: 0, a: [[1, 3], [1, 2, 3, 4]]}, coll.findOne());
- }
+ assert.commandWorked(
+ db.createCollection(collName, {collation: {locale: "en_US", strength: 2}}));
+ coll = db[collName];
+ assert.writeOK(coll.insert({_id: 0, a: ["foo", "FOO"]}));
+ assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": "bar"}}, {arrayFilters: [{i: "foo"}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: ["bar", "bar"]});
+}
+
+//
+// Examples.
+//
+
+// Update all documents in array.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]}));
+assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[].b": 2}}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 2}, {b: 2}]});
+
+// Update all matching documents in array.
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [[0, 1, 2, 3], [1, 2, 3, 4]]}));
- res = coll.update({_id: 0}, {$pullAll: {"a.$[]": [0, 2]}});
- assert.eq({_id: 0, a: [[1, 3], [1, 3, 4]]}, coll.findOne());
+ assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]}));
+ assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i].b": 2}}, {arrayFilters: [{"i.b": 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 2}, {b: 1}]});
+}
- // $pull.
+// Update all matching scalars in array.
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [1, 2]]}));
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$pull: {"a.$[i]": 1}}, {arrayFilters: [{i: 2}]}));
- assert.eq({_id: 0, a: [[0, 1], [2]]}, coll.findOne());
- }
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [1, 2]]}));
- assert.writeOK(coll.update({_id: 0}, {$pull: {"a.$[]": 1}}));
- assert.eq({_id: 0, a: [[0], [2]]}, coll.findOne());
-
- // $push.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [2, 3]]}));
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update({_id: 0}, {$push: {"a.$[i]": 4}}, {arrayFilters: [{i: 0}]}));
- assert.eq({_id: 0, a: [[0, 1, 4], [2, 3]]}, coll.findOne());
- }
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [2, 3]]}));
- assert.writeOK(coll.update({_id: 0}, {$push: {"a.$[]": 4}}));
- assert.eq({_id: 0, a: [[0, 1, 4], [2, 3, 4]]}, coll.findOne());
+ assert.writeOK(coll.insert({_id: 0, a: [0, 1]}));
+ assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1]});
+}
- // $bit.
+// Update all matching scalars in array of arrays.
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [NumberInt(0), NumberInt(2)]}));
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.update(
- {_id: 0}, {$bit: {"a.$[i]": {or: NumberInt(10)}}}, {arrayFilters: [{i: 0}]}));
- assert.eq({_id: 0, a: [NumberInt(10), NumberInt(2)]}, coll.findOne());
- }
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [NumberInt(0), NumberInt(2)]}));
- assert.writeOK(coll.update({_id: 0}, {$bit: {"a.$[]": {or: NumberInt(10)}}}));
- assert.eq({_id: 0, a: [NumberInt(10), NumberInt(10)]}, coll.findOne());
-
- //
- // Multi update tests.
- //
+ assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [0, 1]]}));
+ assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[].$[j]": 2}}, {arrayFilters: [{j: 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[2, 1], [2, 1]]});
+}
+// Update all matching documents in nested array.
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]}));
- assert.writeOK(coll.insert({_id: 1, a: [0, 2, 0, 2]}));
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(
- coll.update({}, {$set: {"a.$[i]": 3}}, {multi: true, arrayFilters: [{i: 0}]}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 1, 3, 1]});
- assert.eq(coll.findOne({_id: 1}), {_id: 1, a: [3, 2, 3, 2]});
- }
- assert.writeOK(coll.update({}, {$set: {"a.$[]": 3}}, {multi: true}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 3, 3, 3]});
- assert.eq(coll.findOne({_id: 1}), {_id: 1, a: [3, 3, 3, 3]});
-
- //
- // Collation tests.
- //
-
- if (db.getMongo().writeMode() === "commands") {
- // arrayFilters respect operation collation.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: ["foo", "FOO"]}));
- assert.writeOK(
- coll.update({_id: 0},
- {$set: {"a.$[i]": "bar"}},
- {arrayFilters: [{i: "foo"}], collation: {locale: "en_US", strength: 2}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: ["bar", "bar"]});
-
- // arrayFilters respect the collection default collation.
- coll.drop();
- assert.commandWorked(
- db.createCollection(collName, {collation: {locale: "en_US", strength: 2}}));
- coll = db[collName];
- assert.writeOK(coll.insert({_id: 0, a: ["foo", "FOO"]}));
- assert.writeOK(
- coll.update({_id: 0}, {$set: {"a.$[i]": "bar"}}, {arrayFilters: [{i: "foo"}]}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: ["bar", "bar"]});
- }
-
- //
- // Examples.
- //
-
- // Update all documents in array.
+ assert.writeOK(
+ coll.insert({_id: 0, a: [{b: 0, c: [{d: 0}, {d: 1}]}, {b: 1, c: [{d: 0}, {d: 1}]}]}));
+ assert.writeOK(coll.update(
+ {_id: 0}, {$set: {"a.$[i].c.$[j].d": 2}}, {arrayFilters: [{"i.b": 0}, {"j.d": 0}]}));
+ assert.eq(coll.findOne({_id: 0}),
+ {_id: 0, a: [{b: 0, c: [{d: 2}, {d: 1}]}, {b: 1, c: [{d: 0}, {d: 1}]}]});
+}
+
+// Update all scalars in array matching a logical predicate.
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]}));
- assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[].b": 2}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 2}, {b: 2}]});
-
- // Update all matching documents in array.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]}));
- assert.writeOK(
- coll.update({_id: 0}, {$set: {"a.$[i].b": 2}}, {arrayFilters: [{"i.b": 0}]}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 2}, {b: 1}]});
- }
-
- // Update all matching scalars in array.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0, 1]}));
- assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1]});
- }
-
- // Update all matching scalars in array of arrays.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [0, 1]]}));
- assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[].$[j]": 2}}, {arrayFilters: [{j: 0}]}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[2, 1], [2, 1]]});
- }
-
- // Update all matching documents in nested array.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
- assert.writeOK(
- coll.insert({_id: 0, a: [{b: 0, c: [{d: 0}, {d: 1}]}, {b: 1, c: [{d: 0}, {d: 1}]}]}));
- assert.writeOK(coll.update(
- {_id: 0}, {$set: {"a.$[i].c.$[j].d": 2}}, {arrayFilters: [{"i.b": 0}, {"j.d": 0}]}));
- assert.eq(coll.findOne({_id: 0}),
- {_id: 0, a: [{b: 0, c: [{d: 2}, {d: 1}]}, {b: 1, c: [{d: 0}, {d: 1}]}]});
- }
-
- // Update all scalars in array matching a logical predicate.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0, 1, 3]}));
- assert.writeOK(coll.update(
- {_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{$or: [{i: 0}, {i: 3}]}]}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1, 2]});
- }
-
- //
- // Error cases.
- //
-
- // Provide an <id> with no array filter.
+ assert.writeOK(coll.insert({_id: 0, a: [0, 1, 3]}));
+ assert.writeOK(
+ coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{$or: [{i: 0}, {i: 3}]}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1, 2]});
+}
+
+//
+// Error cases.
+//
+
+// Provide an <id> with no array filter.
+coll.drop();
+res = coll.update({_id: 0}, {$set: {"a.$[i]": 0}});
+assert.writeErrorWithCode(res, ErrorCodes.BadValue);
+assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("No array filter found for identifier 'i' in path 'a.$[i]'"),
+ "update failed for a reason other than missing array filter");
+
+// Use an <id> at the same position as a $, integer, or field name.
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
- res = coll.update({_id: 0}, {$set: {"a.$[i]": 0}});
- assert.writeErrorWithCode(res, ErrorCodes.BadValue);
+
+ res = coll.update({_id: 0}, {$set: {"a.$[i]": 0, "a.$": 0}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf(
+ "Updating the path 'a.$' would create a conflict at 'a'"),
+ "update failed for a reason other than conflicting array update and positional operator");
+
+ res = coll.update({_id: 0}, {$set: {"a.$[i]": 0, "a.0": 0}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf(
+ "Updating the path 'a.0' would create a conflict at 'a'"),
+ "update failed for a reason other than conflicting array update and integer field name");
+
+ res = coll.update({_id: 0}, {$set: {"a.$[i]": 0, "a.b": 0}}, {arrayFilters: [{i: 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators);
assert.neq(-1,
res.getWriteError().errmsg.indexOf(
- "No array filter found for identifier 'i' in path 'a.$[i]'"),
- "update failed for a reason other than missing array filter");
-
- // Use an <id> at the same position as a $, integer, or field name.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
-
- res = coll.update({_id: 0}, {$set: {"a.$[i]": 0, "a.$": 0}}, {arrayFilters: [{i: 0}]});
- assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators);
- assert.neq(
- -1,
- res.getWriteError().errmsg.indexOf(
- "Updating the path 'a.$' would create a conflict at 'a'"),
- "update failed for a reason other than conflicting array update and positional operator");
-
- res = coll.update({_id: 0}, {$set: {"a.$[i]": 0, "a.0": 0}}, {arrayFilters: [{i: 0}]});
- assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators);
- assert.neq(
- -1,
- res.getWriteError().errmsg.indexOf(
- "Updating the path 'a.0' would create a conflict at 'a'"),
- "update failed for a reason other than conflicting array update and integer field name");
-
- res = coll.update({_id: 0}, {$set: {"a.$[i]": 0, "a.b": 0}}, {arrayFilters: [{i: 0}]});
- assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators);
- assert.neq(-1,
- res.getWriteError().errmsg.indexOf(
- "Updating the path 'a.b' would create a conflict at 'a'"),
- "update failed for a reason other than conflicting array update and field name");
- }
-
- // Include an implicit array traversal in a path in an update modifier.
+ "Updating the path 'a.b' would create a conflict at 'a'"),
+ "update failed for a reason other than conflicting array update and field name");
+}
+
+// Include an implicit array traversal in a path in an update modifier.
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]}));
+res = coll.update({_id: 0}, {$set: {"a.b": 1}});
+assert.writeErrorWithCode(res, ErrorCodes.PathNotViable);
+assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("Cannot create field 'b' in element {a: [ { b: 0.0 } ]}"),
+ "update failed for a reason other than implicit array traversal");
+
+// <id> contains special characters or does not begin with a lowercase letter.
+if (db.getMongo().writeMode() === "commands") {
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]}));
- res = coll.update({_id: 0}, {$set: {"a.b": 1}});
+
+ res = coll.update({_id: 0}, {$set: {"a.$[$i]": 1}}, {arrayFilters: [{"$i": 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.BadValue);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf("unknown top level operator: $i"),
+ "update failed for a reason other than bad array filter identifier");
+
+ res = coll.update({_id: 0}, {$set: {"a.$[I]": 1}}, {arrayFilters: [{"I": 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.BadValue);
+ assert(res.getWriteError().errmsg.startsWith("Error parsing array filter") &&
+ res.getWriteError().errmsg.endsWith(
+ "The top-level field name must be an alphanumeric " +
+ "string beginning with a lowercase letter, found 'I'"),
+ "update failed for a reason other than bad array filter identifier: " +
+ tojson(res.getWriteError()));
+
+ assert.writeOK(coll.insert({_id: 0, a: [0], b: [{j: 0}]}));
+ res = coll.update({_id: 0}, {$set: {"a.$[i.j]": 1, "b.$[i]": 1}}, {arrayFilters: [{"i.j": 0}]});
assert.writeErrorWithCode(res, ErrorCodes.PathNotViable);
+ assert.neq(
+ -1,
+ res.getWriteError().errmsg.indexOf("Cannot create field '$[i' in element {a: [ 0.0 ]}"),
+ "update failed for a reason other than bad array filter identifier");
+}
+
+//
+// Nested array update conflict detection.
+//
+
+if (db.getMongo().writeMode() === "commands") {
+ // "a.$[i].b.$[k].c" and "a.$[j].b.$[k].d" are not a conflict, even if i and j are not
+ // disjoint.
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 0, a: [{x: 0, b: [{y: 0, c: 0, d: 0}]}]}));
+ assert.writeOK(coll.update({_id: 0},
+ {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].d": 1}},
+ {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}]}));
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{x: 0, b: [{y: 0, c: 1, d: 1}]}]});
+
+ // "a.$[i].b.$[k].c" and "a.$[j].b.$[k].c" are a conflict iff i and j are not disjoint.
+ coll.drop();
+ assert.writeOK(
+ coll.insert({_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 0}]}]}));
+
+ res = coll.update({_id: 0},
+ {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].c": 2}},
+ {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators);
assert.neq(-1,
- res.getWriteError().errmsg.indexOf(
- "Cannot create field 'b' in element {a: [ { b: 0.0 } ]}"),
- "update failed for a reason other than implicit array traversal");
-
- // <id> contains special characters or does not begin with a lowercase letter.
- if (db.getMongo().writeMode() === "commands") {
- coll.drop();
-
- res = coll.update({_id: 0}, {$set: {"a.$[$i]": 1}}, {arrayFilters: [{"$i": 0}]});
- assert.writeErrorWithCode(res, ErrorCodes.BadValue);
- assert.neq(-1,
- res.getWriteError().errmsg.indexOf("unknown top level operator: $i"),
- "update failed for a reason other than bad array filter identifier");
-
- res = coll.update({_id: 0}, {$set: {"a.$[I]": 1}}, {arrayFilters: [{"I": 0}]});
- assert.writeErrorWithCode(res, ErrorCodes.BadValue);
- assert(res.getWriteError().errmsg.startsWith("Error parsing array filter") &&
- res.getWriteError().errmsg.endsWith(
- "The top-level field name must be an alphanumeric " +
- "string beginning with a lowercase letter, found 'I'"),
- "update failed for a reason other than bad array filter identifier: " +
- tojson(res.getWriteError()));
-
- assert.writeOK(coll.insert({_id: 0, a: [0], b: [{j: 0}]}));
- res = coll.update(
- {_id: 0}, {$set: {"a.$[i.j]": 1, "b.$[i]": 1}}, {arrayFilters: [{"i.j": 0}]});
- assert.writeErrorWithCode(res, ErrorCodes.PathNotViable);
- assert.neq(
- -1,
- res.getWriteError().errmsg.indexOf("Cannot create field '$[i' in element {a: [ 0.0 ]}"),
- "update failed for a reason other than bad array filter identifier");
- }
-
- //
- // Nested array update conflict detection.
- //
-
- if (db.getMongo().writeMode() === "commands") {
- // "a.$[i].b.$[k].c" and "a.$[j].b.$[k].d" are not a conflict, even if i and j are not
- // disjoint.
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{x: 0, b: [{y: 0, c: 0, d: 0}]}]}));
- assert.writeOK(coll.update({_id: 0},
- {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].d": 1}},
- {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}]}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{x: 0, b: [{y: 0, c: 1, d: 1}]}]});
-
- // "a.$[i].b.$[k].c" and "a.$[j].b.$[k].c" are a conflict iff i and j are not disjoint.
- coll.drop();
- assert.writeOK(
- coll.insert({_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 0}]}]}));
-
- res = coll.update({_id: 0},
- {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].c": 2}},
- {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}]});
- assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators);
- assert.neq(
- -1,
- res.getWriteError().errmsg.indexOf("Update created a conflict at 'a.0.b.$[k].c'"),
- "update failed for a reason other than conflicting array updates");
-
- assert.writeOK(coll.update({_id: 0},
- {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].c": 2}},
- {arrayFilters: [{"i.x": 0}, {"j.x": 1}, {"k.y": 0}]}));
- assert.eq(coll.findOne({_id: 0}),
- {_id: 0, a: [{x: 0, b: [{y: 0, c: 1}]}, {x: 1, b: [{y: 0, c: 2}]}]});
-
- // "a.$[i].b.$[k].c" and "a.$[j].b.$[m].c" are a conflict iff k and m intersect for some
- // element of a matching i and j.
- coll.drop();
- assert.writeOK(coll.insert(
- {_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 0}, {y: 1, c: 0}]}]}));
-
- res = coll.update({_id: 0},
- {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[m].c": 2}},
- {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}, {"m.y": 0}]});
- assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators);
- assert.neq(-1,
- res.getWriteError().errmsg.indexOf("Update created a conflict at 'a.0.b.0.c'"),
- "update failed for a reason other than conflicting array updates");
-
- assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[m].c": 2}}, {
- arrayFilters: [{"i.x": 1}, {"j.x": 1}, {"k.y": 0}, {"m.y": 1}]
- }));
- assert.eq(
- coll.findOne({_id: 0}),
- {_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 1}, {y: 1, c: 2}]}]});
- }
+ res.getWriteError().errmsg.indexOf("Update created a conflict at 'a.0.b.$[k].c'"),
+ "update failed for a reason other than conflicting array updates");
+
+ assert.writeOK(coll.update({_id: 0},
+ {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].c": 2}},
+ {arrayFilters: [{"i.x": 0}, {"j.x": 1}, {"k.y": 0}]}));
+ assert.eq(coll.findOne({_id: 0}),
+ {_id: 0, a: [{x: 0, b: [{y: 0, c: 1}]}, {x: 1, b: [{y: 0, c: 2}]}]});
+ // "a.$[i].b.$[k].c" and "a.$[j].b.$[m].c" are a conflict iff k and m intersect for some
+ // element of a matching i and j.
+ coll.drop();
+ assert.writeOK(coll.insert(
+ {_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 0}, {y: 1, c: 0}]}]}));
+
+ res = coll.update({_id: 0},
+ {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[m].c": 2}},
+ {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}, {"m.y": 0}]});
+ assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators);
+ assert.neq(-1,
+ res.getWriteError().errmsg.indexOf("Update created a conflict at 'a.0.b.0.c'"),
+ "update failed for a reason other than conflicting array updates");
+
+ assert.writeOK(coll.update({_id: 0},
+ {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[m].c": 2}},
+ {arrayFilters: [{"i.x": 1}, {"j.x": 1}, {"k.y": 0}, {"m.y": 1}]}));
+ assert.eq(coll.findOne({_id: 0}),
+ {_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 1}, {y: 1, c: 2}]}]});
+}
})();
diff --git a/jstests/core/update_array_offset_positional.js b/jstests/core/update_array_offset_positional.js
index 216399c86a1..8e433831c01 100644
--- a/jstests/core/update_array_offset_positional.js
+++ b/jstests/core/update_array_offset_positional.js
@@ -2,68 +2,68 @@
* Tests that array offset matches are not used to provide values for the positional operator.
*/
(function() {
- "use strict";
+"use strict";
- let coll = db.jstest_update_array_offset_positional;
- coll.drop();
+let coll = db.jstest_update_array_offset_positional;
+coll.drop();
- //
- // If there is no implicit array traversal, the positional operator cannot be used.
- //
+//
+// If there is no implicit array traversal, the positional operator cannot be used.
+//
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0]}));
- assert.writeError(coll.update({_id: 0, "a.0": 0}, {$set: {"a.$": 1}}));
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [0]}));
+assert.writeError(coll.update({_id: 0, "a.0": 0}, {$set: {"a.$": 1}}));
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]}));
- assert.writeError(coll.update({_id: 0, "a.0.b": 0}, {$set: {"a.$.b": 1}}));
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]}));
+assert.writeError(coll.update({_id: 0, "a.0.b": 0}, {$set: {"a.$.b": 1}}));
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [[0]]}));
- assert.writeError(coll.update({_id: 0, "a.0.0": 0}, {$set: {"a.$.0": 1}}));
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [[0]]}));
+assert.writeError(coll.update({_id: 0, "a.0.0": 0}, {$set: {"a.$.0": 1}}));
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: [0]}]}));
- assert.writeError(coll.update({_id: 0, "a.0.b.0": 0}, {$set: {"a.$.b.0": 1}}));
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [{b: [0]}]}));
+assert.writeError(coll.update({_id: 0, "a.0.b.0": 0}, {$set: {"a.$.b.0": 1}}));
- //
- // Array offset matches are not used to provide values for the positional operator on the same
- // path.
- //
+//
+// Array offset matches are not used to provide values for the positional operator on the same
+// path.
+//
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: [0, 1]}]}));
- assert.writeOK(coll.update({_id: 0, "a.0.b": 1}, {$set: {"a.0.b.$": 2}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: [0, 2]}]});
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [{b: [0, 1]}]}));
+assert.writeOK(coll.update({_id: 0, "a.0.b": 1}, {$set: {"a.0.b.$": 2}}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: [0, 2]}]});
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [{b: [0, 1]}]}));
- assert.writeOK(coll.update({_id: 0, "a.b.1": 1}, {$set: {"a.$.b.1": 2}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: [0, 2]}]});
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [{b: [0, 1]}]}));
+assert.writeOK(coll.update({_id: 0, "a.b.1": 1}, {$set: {"a.$.b.1": 2}}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: [0, 2]}]});
- //
- // Array offset matches are not used to provide values for the positional operator on a
- // different path.
- //
+//
+// Array offset matches are not used to provide values for the positional operator on a
+// different path.
+//
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [0]}));
- assert.writeOK(coll.update({_id: 0, a: 1, "b.0": 0}, {$set: {"a.$": 2}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [0]});
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [0]}));
+assert.writeOK(coll.update({_id: 0, a: 1, "b.0": 0}, {$set: {"a.$": 2}}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [0]});
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [{c: 0}]}));
- assert.writeOK(coll.update({_id: 0, a: 1, "b.0.c": 0}, {$set: {"a.$": 2}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [{c: 0}]});
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [{c: 0}]}));
+assert.writeOK(coll.update({_id: 0, a: 1, "b.0.c": 0}, {$set: {"a.$": 2}}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [{c: 0}]});
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [[0]]}));
- assert.writeOK(coll.update({_id: 0, a: 1, "b.0.0": 0}, {$set: {"a.$": 2}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [[0]]});
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [[0]]}));
+assert.writeOK(coll.update({_id: 0, a: 1, "b.0.0": 0}, {$set: {"a.$": 2}}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [[0]]});
- coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [{c: [0]}]}));
- assert.writeOK(coll.update({_id: 0, a: 1, "b.0.c.0": 0}, {$set: {"a.$": 2}}));
- assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [{c: [0]}]});
+coll.drop();
+assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [{c: [0]}]}));
+assert.writeOK(coll.update({_id: 0, a: 1, "b.0.c.0": 0}, {$set: {"a.$": 2}}));
+assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [{c: [0]}]});
}());
diff --git a/jstests/core/update_blank1.js b/jstests/core/update_blank1.js
index 0a42114ed1c..cd8f7433ebe 100644
--- a/jstests/core/update_blank1.js
+++ b/jstests/core/update_blank1.js
@@ -8,7 +8,9 @@ t.drop();
orig = {
"": 1,
- _id: 2, "a": 3, "b": 4
+ _id: 2,
+ "a": 3,
+ "b": 4
};
t.insert(orig);
var res = t.update({}, {$set: {"c": 5}});
diff --git a/jstests/core/update_hint.js b/jstests/core/update_hint.js
index 64841ab2803..9412f84d71f 100644
--- a/jstests/core/update_hint.js
+++ b/jstests/core/update_hint.js
@@ -8,74 +8,88 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js");
-
- const coll = db.jstests_update_hint;
- coll.drop();
-
- assert.commandWorked(coll.insert({x: 1, y: 1}));
- assert.commandWorked(coll.insert({x: 1, y: 1}));
-
- assert.commandWorked(coll.createIndex({x: 1}));
- assert.commandWorked(coll.createIndex({y: -1}));
-
- function assertCommandUsesIndex(command, expectedHintKeyPattern) {
- const out = assert.commandWorked(coll.runCommand({explain: command}));
- const planStage = getPlanStage(out, "IXSCAN");
- assert.neq(null, planStage);
- assert.eq(planStage.keyPattern, expectedHintKeyPattern, tojson(planStage));
- }
-
- const updateCmd = {
- update: 'jstests_update_hint',
- };
-
- const updates = [{q: {x: 1}, u: {$set: {y: 1}}, hint: {x: 1}}];
-
- updateCmd.updates = updates;
- // Hint using a key pattern.
- assertCommandUsesIndex(updateCmd, {x: 1});
-
- // Hint using an index name.
- updates[0].hint = 'y_-1';
- assertCommandUsesIndex(updateCmd, {y: -1});
-
- // Passing a hint should not use the idhack fast-path.
- updates[0].q = {_id: 1};
- assertCommandUsesIndex(updateCmd, {y: -1});
-
- // Create a sparse index.
- assert.commandWorked(coll.createIndex({s: 1}, {sparse: true}));
-
- // Hint should be respected, even on incomplete indexes.
- updates[0].hint = {s: 1};
- assertCommandUsesIndex(updateCmd, {s: 1});
-
- // Command should fail with incorrectly formatted hints.
- updates[0].hint = 1;
- assert.commandFailedWithCode(coll.runCommand(updateCmd), ErrorCodes.FailedToParse);
- updates[0].hint = true;
- assert.commandFailedWithCode(coll.runCommand(updateCmd), ErrorCodes.FailedToParse);
-
- // Command should fail with hints to non-existent indexes.
- updates[0].hint = {badHint: 1};
- assert.commandFailedWithCode(coll.runCommand(updateCmd), ErrorCodes.BadValue);
-
- // Insert document that will be in the sparse index.
- assert.commandWorked(coll.insert({x: 1, s: 0}));
-
- // Update hinting a sparse index updates only the document in the sparse index.
- updates[0] = {q: {}, u: {$set: {s: 1}}, hint: {s: 1}};
- assert.commandWorked(coll.runCommand(updateCmd));
- assert.eq(1, coll.count({s: 1}));
-
- // Update hinting a sparse index with upsert option can result in an insert even if the correct
- // behaviour would be to update an existing document.
- assert.commandWorked(coll.insert({x: 2}));
- updates[0] = {q: {x: 2}, u: {$set: {s: 1}}, hint: {s: 1}, upsert: true};
- assert.commandWorked(coll.runCommand(updateCmd));
- assert.eq(2, coll.count({x: 2}));
-
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+
+const coll = db.jstests_update_hint;
+coll.drop();
+
+assert.commandWorked(coll.insert({x: 1, y: 1}));
+assert.commandWorked(coll.insert({x: 1, y: 1}));
+
+assert.commandWorked(coll.createIndex({x: 1}));
+assert.commandWorked(coll.createIndex({y: -1}));
+
+function assertCommandUsesIndex(command, expectedHintKeyPattern) {
+ const out = assert.commandWorked(coll.runCommand({explain: command}));
+ const planStage = getPlanStage(out, "IXSCAN");
+ assert.neq(null, planStage);
+ assert.eq(planStage.keyPattern, expectedHintKeyPattern, tojson(planStage));
+}
+
+const updateCmd = {
+ update: 'jstests_update_hint',
+};
+
+const updates = [{q: {x: 1}, u: {$set: {y: 1}}, hint: {x: 1}}];
+
+updateCmd.updates = updates;
+// Hint using a key pattern.
+assertCommandUsesIndex(updateCmd, {x: 1});
+
+// Hint using an index name.
+updates[0].hint = 'y_-1';
+assertCommandUsesIndex(updateCmd, {y: -1});
+
+// Passing a hint should not use the idhack fast-path.
+updates[0].q = {
+ _id: 1
+};
+assertCommandUsesIndex(updateCmd, {y: -1});
+
+// Create a sparse index.
+assert.commandWorked(coll.createIndex({s: 1}, {sparse: true}));
+
+// Hint should be respected, even on incomplete indexes.
+updates[0].hint = {
+ s: 1
+};
+assertCommandUsesIndex(updateCmd, {s: 1});
+
+// Command should fail with incorrectly formatted hints.
+updates[0].hint = 1;
+assert.commandFailedWithCode(coll.runCommand(updateCmd), ErrorCodes.FailedToParse);
+updates[0].hint = true;
+assert.commandFailedWithCode(coll.runCommand(updateCmd), ErrorCodes.FailedToParse);
+
+// Command should fail with hints to non-existent indexes.
+updates[0].hint = {
+ badHint: 1
+};
+assert.commandFailedWithCode(coll.runCommand(updateCmd), ErrorCodes.BadValue);
+
+// Insert document that will be in the sparse index.
+assert.commandWorked(coll.insert({x: 1, s: 0}));
+
+// Update hinting a sparse index updates only the document in the sparse index.
+updates[0] = {
+ q: {},
+ u: {$set: {s: 1}},
+ hint: {s: 1}
+};
+assert.commandWorked(coll.runCommand(updateCmd));
+assert.eq(1, coll.count({s: 1}));
+
+// Update hinting a sparse index with upsert option can result in an insert even if the correct
+// behaviour would be to update an existing document.
+assert.commandWorked(coll.insert({x: 2}));
+updates[0] = {
+ q: {x: 2},
+ u: {$set: {s: 1}},
+ hint: {s: 1},
+ upsert: true
+};
+assert.commandWorked(coll.runCommand(updateCmd));
+assert.eq(2, coll.count({x: 2}));
})();
diff --git a/jstests/core/update_min_max_examples.js b/jstests/core/update_min_max_examples.js
index 62e870147bf..3ec86705a1f 100644
--- a/jstests/core/update_min_max_examples.js
+++ b/jstests/core/update_min_max_examples.js
@@ -1,71 +1,74 @@
// Basic examples for $min/$max
(function() {
- "use strict";
+"use strict";
- let res;
- const coll = db.update_min_max;
- coll.drop();
+let res;
+const coll = db.update_min_max;
+coll.drop();
- // $min for number
- coll.insert({_id: 1, a: 2});
- res = coll.update({_id: 1}, {$min: {a: 1}});
- assert.writeOK(res);
- assert.eq(coll.findOne({_id: 1}).a, 1);
+// $min for number
+coll.insert({_id: 1, a: 2});
+res = coll.update({_id: 1}, {$min: {a: 1}});
+assert.writeOK(res);
+assert.eq(coll.findOne({_id: 1}).a, 1);
- // $max for number
- coll.insert({_id: 2, a: 2});
- res = coll.update({_id: 2}, {$max: {a: 1}});
- assert.writeOK(res);
- assert.eq(coll.findOne({_id: 2}).a, 2);
+// $max for number
+coll.insert({_id: 2, a: 2});
+res = coll.update({_id: 2}, {$max: {a: 1}});
+assert.writeOK(res);
+assert.eq(coll.findOne({_id: 2}).a, 2);
- // $min for Date
- let date = new Date();
- coll.insert({_id: 3, a: date});
- // setMilliseconds() will roll over to change seconds if necessary.
- date.setMilliseconds(date.getMilliseconds() + 2);
- // Test that we have advanced the date and it's no longer the same as the one we inserted.
- assert.eq(null, coll.findOne({_id: 3, a: date}));
- const origDoc = coll.findOne({_id: 3});
- assert.commandWorked(coll.update({_id: 3}, {$min: {a: date}}));
- assert.eq(coll.findOne({_id: 3}).a, origDoc.a);
+// $min for Date
+let date = new Date();
+coll.insert({_id: 3, a: date});
+// setMilliseconds() will roll over to change seconds if necessary.
+date.setMilliseconds(date.getMilliseconds() + 2);
+// Test that we have advanced the date and it's no longer the same as the one we inserted.
+assert.eq(null, coll.findOne({_id: 3, a: date}));
+const origDoc = coll.findOne({_id: 3});
+assert.commandWorked(coll.update({_id: 3}, {$min: {a: date}}));
+assert.eq(coll.findOne({_id: 3}).a, origDoc.a);
- // $max for Date
- coll.insert({_id: 4, a: date});
- // setMilliseconds() will roll over to change seconds if necessary.
- date.setMilliseconds(date.getMilliseconds() + 2);
- // Test that we have advanced the date and it's no longer the same as the one we inserted.
- assert.eq(null, coll.findOne({_id: 4, a: date}));
- res = coll.update({_id: 4}, {$max: {a: date}});
- assert.writeOK(res);
- assert.eq(coll.findOne({_id: 4}).a, date);
+// $max for Date
+coll.insert({_id: 4, a: date});
+// setMilliseconds() will roll over to change seconds if necessary.
+date.setMilliseconds(date.getMilliseconds() + 2);
+// Test that we have advanced the date and it's no longer the same as the one we inserted.
+assert.eq(null, coll.findOne({_id: 4, a: date}));
+res = coll.update({_id: 4}, {$max: {a: date}});
+assert.writeOK(res);
+assert.eq(coll.findOne({_id: 4}).a, date);
- // $max for small number
- coll.insert({_id: 5, a: 1e-15});
- // Slightly bigger than 1e-15.
- const biggerval = 0.000000000000001000000000000001;
- res = coll.update({_id: 5}, {$max: {a: biggerval}});
- assert.writeOK(res);
- assert.eq(coll.findOne({_id: 5}).a, biggerval);
+// $max for small number
+coll.insert({_id: 5, a: 1e-15});
+// Slightly bigger than 1e-15.
+const biggerval = 0.000000000000001000000000000001;
+res = coll.update({_id: 5}, {$max: {a: biggerval}});
+assert.writeOK(res);
+assert.eq(coll.findOne({_id: 5}).a, biggerval);
- // $min for a small number
- coll.insert({_id: 6, a: biggerval});
- res = coll.update({_id: 6}, {$min: {a: 1e-15}});
- assert.writeOK(res);
- assert.eq(coll.findOne({_id: 6}).a, 1e-15);
+// $min for a small number
+coll.insert({_id: 6, a: biggerval});
+res = coll.update({_id: 6}, {$min: {a: 1e-15}});
+assert.writeOK(res);
+assert.eq(coll.findOne({_id: 6}).a, 1e-15);
- // $max with positional operator
- let insertdoc = {_id: 7, y: [{a: 2}, {a: 6}, {a: [9, 1, 1]}]};
- coll.insert(insertdoc);
- res = coll.update({_id: 7, "y.a": 6}, {$max: {"y.$.a": 7}});
- assert.writeOK(res);
- insertdoc.y[1].a = 7;
- assert.docEq(coll.findOne({_id: 7}), insertdoc);
+// $max with positional operator
+let insertdoc = {_id: 7, y: [{a: 2}, {a: 6}, {a: [9, 1, 1]}]};
+coll.insert(insertdoc);
+res = coll.update({_id: 7, "y.a": 6}, {$max: {"y.$.a": 7}});
+assert.writeOK(res);
+insertdoc.y[1].a = 7;
+assert.docEq(coll.findOne({_id: 7}), insertdoc);
- // $min with positional operator
- insertdoc = {_id: 8, y: [{a: 2}, {a: 6}, {a: [9, 1, 1]}]};
- coll.insert(insertdoc);
- res = coll.update({_id: 8, "y.a": 6}, {$min: {"y.$.a": 5}});
- assert.writeOK(res);
- insertdoc.y[1].a = 5;
- assert.docEq(coll.findOne({_id: 8}), insertdoc);
+// $min with positional operator
+insertdoc = {
+ _id: 8,
+ y: [{a: 2}, {a: 6}, {a: [9, 1, 1]}]
+};
+coll.insert(insertdoc);
+res = coll.update({_id: 8, "y.a": 6}, {$min: {"y.$.a": 5}});
+assert.writeOK(res);
+insertdoc.y[1].a = 5;
+assert.docEq(coll.findOne({_id: 8}), insertdoc);
}());
diff --git a/jstests/core/update_modifier_pop.js b/jstests/core/update_modifier_pop.js
index c74d7f254bf..77c6bae702c 100644
--- a/jstests/core/update_modifier_pop.js
+++ b/jstests/core/update_modifier_pop.js
@@ -1,115 +1,112 @@
// @tags: [requires_non_retryable_writes]
(function() {
- "use strict";
-
- let coll = db.update_modifier_pop;
- coll.drop();
-
- assert.writeOK(coll.insert({_id: 0}));
-
- // $pop with value of 0 fails to parse.
- assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": 0}}), ErrorCodes.FailedToParse);
-
- // $pop with value of -2 fails to parse.
- assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": -2}}), ErrorCodes.FailedToParse);
-
- // $pop with value of 2.5 fails to parse.
- assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": 2.5}}),
- ErrorCodes.FailedToParse);
-
- // $pop with value of 1.1 fails to parse.
- assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": 1.1}}),
- ErrorCodes.FailedToParse);
-
- // $pop with a nested object fails to parse.
- assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {a: {b: 1}}}), ErrorCodes.FailedToParse);
-
- // $pop is a no-op when the path does not exist.
- let writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
- assert.eq(writeRes.nMatched, 1);
- if (db.getMongo().writeMode() === "commands") {
- assert.eq(writeRes.nModified, 0);
- }
-
- // $pop is a no-op when the path partially exists.
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: {c: 1}}));
- writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
- assert.eq(writeRes.nMatched, 1);
- if (db.getMongo().writeMode() === "commands") {
- assert.eq(writeRes.nModified, 0);
- }
-
- // $pop fails when the path is blocked by a scalar element.
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: {b: 1}}));
- assert.writeError(coll.update({_id: 0}, {$pop: {"a.b.c": 1}}));
-
- // $pop fails when the path is blocked by an array element.
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: {b: [1, 2]}}));
- assert.writeError(coll.update({_id: 0}, {$pop: {"a.b.c": 1}}));
-
- // $pop fails when the path exists but is not an array.
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: {b: {c: 1}}}));
- assert.writeError(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
-
- // $pop is a no-op when the path contains an empty array.
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: {b: []}}));
- writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
- assert.eq(writeRes.nMatched, 1);
- if (db.getMongo().writeMode() === "commands") {
- assert.eq(writeRes.nModified, 0);
- }
-
- // Successfully pop from the end of an array.
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: {b: [1, 2, 3]}}));
- writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
- assert.eq(writeRes.nMatched, 1);
- if (db.getMongo().writeMode() === "commands") {
- assert.eq(writeRes.nModified, 1);
- }
- assert.eq({_id: 0, a: {b: [1, 2]}}, coll.findOne());
-
- // Successfully pop from the beginning of an array.
- writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": -1}}));
- assert.eq(writeRes.nMatched, 1);
- if (db.getMongo().writeMode() === "commands") {
- assert.eq(writeRes.nModified, 1);
- }
- assert.eq({_id: 0, a: {b: [2]}}, coll.findOne());
-
- // $pop with the positional ($) operator.
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [{b: [1, 2, 3]}, {b: [4, 5, 6]}]}));
- assert.writeOK(coll.update({_id: 0, "a.b": 5}, {$pop: {"a.$.b": 1}}));
- assert.eq({_id: 0, a: [{b: [1, 2, 3]}, {b: [4, 5]}]}, coll.findOne());
-
- // $pop with arrayFilters.
- if (db.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [{b: [1, 2]}, {b: [4, 5]}, {b: [2, 3]}]}));
- assert.writeOK(
- coll.update({_id: 0}, {$pop: {"a.$[i].b": -1}}, {arrayFilters: [{"i.b": 2}]}));
- assert.eq({_id: 0, a: [{b: [2]}, {b: [4, 5]}, {b: [3]}]}, coll.findOne());
- }
-
- // $pop from a nested array.
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [1, [2, 3, 4]]}));
- assert.writeOK(coll.update({_id: 0}, {$pop: {"a.1": 1}}));
- assert.eq({_id: 0, a: [1, [2, 3]]}, coll.findOne());
-
- // $pop is a no-op when array element in path does not exist.
+"use strict";
+
+let coll = db.update_modifier_pop;
+coll.drop();
+
+assert.writeOK(coll.insert({_id: 0}));
+
+// $pop with value of 0 fails to parse.
+assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": 0}}), ErrorCodes.FailedToParse);
+
+// $pop with value of -2 fails to parse.
+assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": -2}}), ErrorCodes.FailedToParse);
+
+// $pop with value of 2.5 fails to parse.
+assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": 2.5}}), ErrorCodes.FailedToParse);
+
+// $pop with value of 1.1 fails to parse.
+assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": 1.1}}), ErrorCodes.FailedToParse);
+
+// $pop with a nested object fails to parse.
+assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {a: {b: 1}}}), ErrorCodes.FailedToParse);
+
+// $pop is a no-op when the path does not exist.
+let writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
+assert.eq(writeRes.nMatched, 1);
+if (db.getMongo().writeMode() === "commands") {
+ assert.eq(writeRes.nModified, 0);
+}
+
+// $pop is a no-op when the path partially exists.
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, a: {c: 1}}));
+writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
+assert.eq(writeRes.nMatched, 1);
+if (db.getMongo().writeMode() === "commands") {
+ assert.eq(writeRes.nModified, 0);
+}
+
+// $pop fails when the path is blocked by a scalar element.
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, a: {b: 1}}));
+assert.writeError(coll.update({_id: 0}, {$pop: {"a.b.c": 1}}));
+
+// $pop fails when the path is blocked by an array element.
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, a: {b: [1, 2]}}));
+assert.writeError(coll.update({_id: 0}, {$pop: {"a.b.c": 1}}));
+
+// $pop fails when the path exists but is not an array.
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, a: {b: {c: 1}}}));
+assert.writeError(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
+
+// $pop is a no-op when the path contains an empty array.
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, a: {b: []}}));
+writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
+assert.eq(writeRes.nMatched, 1);
+if (db.getMongo().writeMode() === "commands") {
+ assert.eq(writeRes.nModified, 0);
+}
+
+// Successfully pop from the end of an array.
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, a: {b: [1, 2, 3]}}));
+writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}}));
+assert.eq(writeRes.nMatched, 1);
+if (db.getMongo().writeMode() === "commands") {
+ assert.eq(writeRes.nModified, 1);
+}
+assert.eq({_id: 0, a: {b: [1, 2]}}, coll.findOne());
+
+// Successfully pop from the beginning of an array.
+writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": -1}}));
+assert.eq(writeRes.nMatched, 1);
+if (db.getMongo().writeMode() === "commands") {
+ assert.eq(writeRes.nModified, 1);
+}
+assert.eq({_id: 0, a: {b: [2]}}, coll.findOne());
+
+// $pop with the positional ($) operator.
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, a: [{b: [1, 2, 3]}, {b: [4, 5, 6]}]}));
+assert.writeOK(coll.update({_id: 0, "a.b": 5}, {$pop: {"a.$.b": 1}}));
+assert.eq({_id: 0, a: [{b: [1, 2, 3]}, {b: [4, 5]}]}, coll.findOne());
+
+// $pop with arrayFilters.
+if (db.getMongo().writeMode() === "commands") {
assert.writeOK(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]}));
- writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.2.b": 1}}));
- assert.eq(writeRes.nMatched, 1);
- if (db.getMongo().writeMode() === "commands") {
- assert.eq(writeRes.nModified, 0);
- }
+ assert.writeOK(coll.insert({_id: 0, a: [{b: [1, 2]}, {b: [4, 5]}, {b: [2, 3]}]}));
+ assert.writeOK(coll.update({_id: 0}, {$pop: {"a.$[i].b": -1}}, {arrayFilters: [{"i.b": 2}]}));
+ assert.eq({_id: 0, a: [{b: [2]}, {b: [4, 5]}, {b: [3]}]}, coll.findOne());
+}
+
+// $pop from a nested array.
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, a: [1, [2, 3, 4]]}));
+assert.writeOK(coll.update({_id: 0}, {$pop: {"a.1": 1}}));
+assert.eq({_id: 0, a: [1, [2, 3]]}, coll.findOne());
+
+// $pop is a no-op when array element in path does not exist.
+assert.writeOK(coll.remove({}));
+assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]}));
+writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.2.b": 1}}));
+assert.eq(writeRes.nMatched, 1);
+if (db.getMongo().writeMode() === "commands") {
+ assert.eq(writeRes.nModified, 0);
+}
}());
diff --git a/jstests/core/update_multi5.js b/jstests/core/update_multi5.js
index 871f10cbc07..8f797d8de2f 100644
--- a/jstests/core/update_multi5.js
+++ b/jstests/core/update_multi5.js
@@ -2,19 +2,18 @@
// tests that $addToSet works in a multi-update.
(function() {
- "use strict";
- var t = db.update_multi5;
- t.drop();
+"use strict";
+var t = db.update_multi5;
+t.drop();
- assert.writeOK(t.insert({path: 'r1', subscribers: [1, 2]}));
- assert.writeOK(t.insert({path: 'r2', subscribers: [3, 4]}));
+assert.writeOK(t.insert({path: 'r1', subscribers: [1, 2]}));
+assert.writeOK(t.insert({path: 'r2', subscribers: [3, 4]}));
- var res =
- assert.writeOK(t.update({}, {$addToSet: {subscribers: 5}}, {upsert: false, multi: true}));
+var res = assert.writeOK(t.update({}, {$addToSet: {subscribers: 5}}, {upsert: false, multi: true}));
- assert.eq(res.nMatched, 2, tojson(res));
+assert.eq(res.nMatched, 2, tojson(res));
- t.find().forEach(function(z) {
- assert.eq(3, z.subscribers.length, tojson(z));
- });
+t.find().forEach(function(z) {
+ assert.eq(3, z.subscribers.length, tojson(z));
+});
})();
diff --git a/jstests/core/update_numeric_field_name.js b/jstests/core/update_numeric_field_name.js
index 2d1a4899adc..33b72e69f2f 100644
--- a/jstests/core/update_numeric_field_name.js
+++ b/jstests/core/update_numeric_field_name.js
@@ -1,29 +1,29 @@
// Test that update operations correctly fail if they violate the "ambiguous field name in array"
// constraint for indexes. This is designed to reproduce SERVER-37058.
(function() {
- "use strict";
+"use strict";
- const coll = db.update_numeric_field_name;
- coll.drop();
+const coll = db.update_numeric_field_name;
+coll.drop();
- assert.commandWorked(coll.insert({_id: 0, 'a': [{}]}));
- assert.commandWorked(coll.createIndex({'a.0.c': 1}));
+assert.commandWorked(coll.insert({_id: 0, 'a': [{}]}));
+assert.commandWorked(coll.createIndex({'a.0.c': 1}));
- // Attempt to insert a field name '0'. The first '0' refers to the first element of the array
- // 'a'.
- assert.commandFailedWithCode(coll.update({_id: 0}, {$set: {'a.0.0': 1}}), 16746);
+// Attempt to insert a field name '0'. The first '0' refers to the first element of the array
+// 'a'.
+assert.commandFailedWithCode(coll.update({_id: 0}, {$set: {'a.0.0': 1}}), 16746);
- // Verify that the indexes were not affected.
- let res = assert.commandWorked(coll.validate(true));
- assert(res.valid, tojson(res));
+// Verify that the indexes were not affected.
+let res = assert.commandWorked(coll.validate(true));
+assert(res.valid, tojson(res));
- assert.commandFailedWithCode(coll.update({_id: 0}, {$set: {'a.0.0.b': 1}}), 16746);
- res = assert.commandWorked(coll.validate(true));
- assert(res.valid, tojson(res));
+assert.commandFailedWithCode(coll.update({_id: 0}, {$set: {'a.0.0.b': 1}}), 16746);
+res = assert.commandWorked(coll.validate(true));
+assert(res.valid, tojson(res));
- // An update which does not violate the ambiguous field name in array constraint should succeed.
- assert.commandWorked(coll.update({_id: 0}, {$set: {'a.1.b.0.0': 1}}));
+// An update which does not violate the ambiguous field name in array constraint should succeed.
+assert.commandWorked(coll.update({_id: 0}, {$set: {'a.1.b.0.0': 1}}));
- res = assert.commandWorked(coll.validate(true));
- assert(res.valid, tojson(res));
+res = assert.commandWorked(coll.validate(true));
+assert(res.valid, tojson(res));
})();
diff --git a/jstests/core/update_pipeline_shell_helpers.js b/jstests/core/update_pipeline_shell_helpers.js
index d45830a06b5..d8bb7d7eb3d 100644
--- a/jstests/core/update_pipeline_shell_helpers.js
+++ b/jstests/core/update_pipeline_shell_helpers.js
@@ -7,84 +7,84 @@
* @tags: [requires_find_command, requires_non_retryable_writes, assumes_write_concern_unchanged]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For 'arrayEq'.
+load("jstests/aggregation/extras/utils.js"); // For 'arrayEq'.
- // Make sure that the test collection is empty before starting the test.
- const testColl = db.update_pipeline_shell_helpers_test;
- testColl.drop();
+// Make sure that the test collection is empty before starting the test.
+const testColl = db.update_pipeline_shell_helpers_test;
+testColl.drop();
- // Insert some test documents.
- assert.commandWorked(testColl.insert({_id: 1, a: 1, b: 2}));
- assert.commandWorked(testColl.insert({_id: 2, a: 2, b: 3}));
+// Insert some test documents.
+assert.commandWorked(testColl.insert({_id: 1, a: 1, b: 2}));
+assert.commandWorked(testColl.insert({_id: 2, a: 2, b: 3}));
- // Test that each of the update shell helpers permits pipeline-style updates.
- assert.commandWorked(testColl.update({_id: 1}, [{$set: {update: true}}]));
- assert.commandWorked(testColl.update({}, [{$set: {updateMulti: true}}], {multi: true}));
- assert.commandWorked(testColl.updateOne({_id: 1}, [{$set: {updateOne: true}}]));
- assert.commandWorked(testColl.updateMany({}, [{$set: {updateMany: true}}]));
- assert.commandWorked(testColl.bulkWrite([
- {updateOne: {filter: {_id: 1}, update: [{$set: {bulkWriteUpdateOne: true}}]}},
- {updateMany: {filter: {}, update: [{$set: {bulkWriteUpdateMany: true}}]}}
- ]));
+// Test that each of the update shell helpers permits pipeline-style updates.
+assert.commandWorked(testColl.update({_id: 1}, [{$set: {update: true}}]));
+assert.commandWorked(testColl.update({}, [{$set: {updateMulti: true}}], {multi: true}));
+assert.commandWorked(testColl.updateOne({_id: 1}, [{$set: {updateOne: true}}]));
+assert.commandWorked(testColl.updateMany({}, [{$set: {updateMany: true}}]));
+assert.commandWorked(testColl.bulkWrite([
+ {updateOne: {filter: {_id: 1}, update: [{$set: {bulkWriteUpdateOne: true}}]}},
+ {updateMany: {filter: {}, update: [{$set: {bulkWriteUpdateMany: true}}]}}
+]));
- // Test that each of the Bulk API update functions correctly handle pipeline syntax.
- const unorderedBulkOp = testColl.initializeUnorderedBulkOp();
- const orderedBulkOp = testColl.initializeOrderedBulkOp();
+// Test that each of the Bulk API update functions correctly handle pipeline syntax.
+const unorderedBulkOp = testColl.initializeUnorderedBulkOp();
+const orderedBulkOp = testColl.initializeOrderedBulkOp();
- unorderedBulkOp.find({_id: 1}).updateOne([{$set: {unorderedBulkOpUpdateOne: true}}]);
- unorderedBulkOp.find({}).update([{$set: {unorderedBulkOpUpdateMulti: true}}]);
- orderedBulkOp.find({_id: 1}).updateOne([{$set: {orderedBulkOpUpdateOne: true}}]);
- orderedBulkOp.find({}).update([{$set: {orderedBulkOpUpdateMulti: true}}]);
- assert.commandWorked(unorderedBulkOp.execute());
- assert.commandWorked(orderedBulkOp.execute());
+unorderedBulkOp.find({_id: 1}).updateOne([{$set: {unorderedBulkOpUpdateOne: true}}]);
+unorderedBulkOp.find({}).update([{$set: {unorderedBulkOpUpdateMulti: true}}]);
+orderedBulkOp.find({_id: 1}).updateOne([{$set: {orderedBulkOpUpdateOne: true}}]);
+orderedBulkOp.find({}).update([{$set: {orderedBulkOpUpdateMulti: true}}]);
+assert.commandWorked(unorderedBulkOp.execute());
+assert.commandWorked(orderedBulkOp.execute());
- // Verify that the results of the various update operations are as expected.
- const observedResults = testColl.find().toArray();
- const expectedResults = [
- {
- _id: 1,
- a: 1,
- b: 2,
- update: true,
- updateMulti: true,
- updateOne: true,
- updateMany: true,
- bulkWriteUpdateOne: true,
- bulkWriteUpdateMany: true,
- unorderedBulkOpUpdateOne: true,
- unorderedBulkOpUpdateMulti: true,
- orderedBulkOpUpdateOne: true,
- orderedBulkOpUpdateMulti: true
- },
- {
- _id: 2,
- a: 2,
- b: 3,
- updateMulti: true,
- updateMany: true,
- bulkWriteUpdateMany: true,
- unorderedBulkOpUpdateMulti: true,
- orderedBulkOpUpdateMulti: true
- }
- ];
- assert(arrayEq(observedResults, expectedResults));
+// Verify that the results of the various update operations are as expected.
+const observedResults = testColl.find().toArray();
+const expectedResults = [
+ {
+ _id: 1,
+ a: 1,
+ b: 2,
+ update: true,
+ updateMulti: true,
+ updateOne: true,
+ updateMany: true,
+ bulkWriteUpdateOne: true,
+ bulkWriteUpdateMany: true,
+ unorderedBulkOpUpdateOne: true,
+ unorderedBulkOpUpdateMulti: true,
+ orderedBulkOpUpdateOne: true,
+ orderedBulkOpUpdateMulti: true
+ },
+ {
+ _id: 2,
+ a: 2,
+ b: 3,
+ updateMulti: true,
+ updateMany: true,
+ bulkWriteUpdateMany: true,
+ unorderedBulkOpUpdateMulti: true,
+ orderedBulkOpUpdateMulti: true
+ }
+];
+assert(arrayEq(observedResults, expectedResults));
- // Test that findAndModify and associated helpers correctly handle pipeline syntax.
- const expectedFindAndModifyPostImage = Object.merge(expectedResults[0], {findAndModify: true});
- const expectedFindOneAndUpdatePostImage =
- Object.merge(expectedFindAndModifyPostImage, {findOneAndUpdate: true});
- const findAndModifyPostImage = testColl.findAndModify(
- {query: {_id: 1}, update: [{$set: {findAndModify: true}}], new: true});
- assert.docEq(findAndModifyPostImage, expectedFindAndModifyPostImage);
- const findOneAndUpdatePostImage = testColl.findOneAndUpdate(
- {_id: 1}, [{$set: {findOneAndUpdate: true}}], {returnNewDocument: true});
- assert.docEq(findOneAndUpdatePostImage, expectedFindOneAndUpdatePostImage);
+// Test that findAndModify and associated helpers correctly handle pipeline syntax.
+const expectedFindAndModifyPostImage = Object.merge(expectedResults[0], {findAndModify: true});
+const expectedFindOneAndUpdatePostImage =
+ Object.merge(expectedFindAndModifyPostImage, {findOneAndUpdate: true});
+const findAndModifyPostImage =
+ testColl.findAndModify({query: {_id: 1}, update: [{$set: {findAndModify: true}}], new: true});
+assert.docEq(findAndModifyPostImage, expectedFindAndModifyPostImage);
+const findOneAndUpdatePostImage = testColl.findOneAndUpdate(
+ {_id: 1}, [{$set: {findOneAndUpdate: true}}], {returnNewDocument: true});
+assert.docEq(findOneAndUpdatePostImage, expectedFindOneAndUpdatePostImage);
- // Shell helpers for replacement updates should reject pipeline-style updates.
- assert.throws(() => testColl.replaceOne({_id: 1}, [{$replaceWith: {}}]));
- assert.throws(() => testColl.findOneAndReplace({_id: 1}, [{$replaceWith: {}}]));
- assert.throws(() => testColl.bulkWrite(
- [{replaceOne: {filter: {_id: 1}, replacement: [{$replaceWith: {}}]}}]));
+// Shell helpers for replacement updates should reject pipeline-style updates.
+assert.throws(() => testColl.replaceOne({_id: 1}, [{$replaceWith: {}}]));
+assert.throws(() => testColl.findOneAndReplace({_id: 1}, [{$replaceWith: {}}]));
+assert.throws(() => testColl.bulkWrite(
+ [{replaceOne: {filter: {_id: 1}, replacement: [{$replaceWith: {}}]}}]));
})();
diff --git a/jstests/core/update_with_pipeline.js b/jstests/core/update_with_pipeline.js
index 07d92c718df..963d72b6592 100644
--- a/jstests/core/update_with_pipeline.js
+++ b/jstests/core/update_with_pipeline.js
@@ -7,227 +7,214 @@
* @tags: [requires_find_command, requires_non_retryable_writes]
*/
(function() {
- "use strict";
-
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-
- const collName = "update_with_pipeline";
- const coll = db[collName];
-
- assert.commandWorked(coll.createIndex({x: 1}));
- assert.commandWorked(coll.createIndex({"y.$**": 1}));
-
- /**
- * Confirms that an update returns the expected set of documents. 'nModified' documents from
- * 'resultDocList' must match. 'nModified' may be smaller then the number of elements in
- * 'resultDocList'. This allows for the case where there are multiple documents that could be
- * updated, but only one is actually updated due to a 'multi: false' argument. Constant values
- * to the update command are passed in the 'constants' argument.
- */
- function testUpdate({
- query,
- initialDocumentList,
- update,
- resultDocList,
- nModified,
- options = {},
- constants = undefined
- }) {
- assert.eq(initialDocumentList.length, resultDocList.length);
- assert.commandWorked(coll.remove({}));
- assert.commandWorked(coll.insert(initialDocumentList));
- const upd = Object.assign({q: query, u: update}, options);
- if (constants !== undefined) {
- upd.c = constants;
- }
- const res = assert.commandWorked(db.runCommand({update: collName, updates: [upd]}));
- assert.eq(nModified, res.nModified);
-
- let nMatched = 0;
- for (let i = 0; i < resultDocList.length; ++i) {
- if (0 === bsonWoCompare(coll.findOne(resultDocList[i]), resultDocList[i])) {
- ++nMatched;
- }
- }
- assert.eq(
- nModified, nMatched, `actual=${coll.find().toArray()}, expected=${resultDocList}`);
- }
+"use strict";
- function testUpsertDoesInsert(query, update, resultDoc) {
- assert.commandWorked(coll.remove({}));
- assert.commandWorked(coll.update(query, update, {upsert: true}));
- assert.eq(coll.findOne({}), resultDoc, coll.find({}).toArray());
- }
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- // Update with existing document.
- testUpdate({
- query: {_id: 1},
- initialDocumentList: [{_id: 1, x: 1}],
- update: [{$set: {foo: 4}}],
- resultDocList: [{_id: 1, x: 1, foo: 4}],
- nModified: 1
- });
- testUpdate({
- query: {_id: 1},
- initialDocumentList: [{_id: 1, x: 1, y: 1}],
- update: [{$project: {x: 1}}],
- resultDocList: [{_id: 1, x: 1}],
- nModified: 1
- });
- testUpdate({
- query: {_id: 1},
- initialDocumentList: [{_id: 1, x: 1, y: [{z: 1, foo: 1}]}],
- update: [{$unset: ["x", "y.z"]}],
- resultDocList: [{_id: 1, y: [{foo: 1}]}],
- nModified: 1
- });
- testUpdate({
- query: {_id: 1},
- initialDocumentList: [{_id: 1, x: 1, t: {u: {v: 1}}}],
- update: [{$replaceWith: "$t"}],
- resultDocList: [{_id: 1, u: {v: 1}}],
- nModified: 1
- });
+const collName = "update_with_pipeline";
+const coll = db[collName];
- // Multi-update.
- testUpdate({
- query: {x: 1},
- initialDocumentList: [{_id: 1, x: 1}, {_id: 2, x: 1}],
- update: [{$set: {bar: 4}}],
- resultDocList: [{_id: 1, x: 1, bar: 4}, {_id: 2, x: 1, bar: 4}],
- nModified: 2,
- options: {multi: true}
- });
+assert.commandWorked(coll.createIndex({x: 1}));
+assert.commandWorked(coll.createIndex({"y.$**": 1}));
- // This test will fail in a sharded cluster when the 2 initial documents live on different
- // shards.
- if (!FixtureHelpers.isMongos(db)) {
- testUpdate({
- query: {_id: {$in: [1, 2]}},
- initialDocumentList: [{_id: 1, x: 1}, {_id: 2, x: 2}],
- update: [{$set: {bar: 4}}],
- resultDocList: [{_id: 1, x: 1, bar: 4}, {_id: 2, x: 2, bar: 4}],
- nModified: 1,
- options: {multi: false}
- });
+/**
+ * Confirms that an update returns the expected set of documents. 'nModified' documents from
+ * 'resultDocList' must match. 'nModified' may be smaller then the number of elements in
+ * 'resultDocList'. This allows for the case where there are multiple documents that could be
+ * updated, but only one is actually updated due to a 'multi: false' argument. Constant values
+ * to the update command are passed in the 'constants' argument.
+ */
+function testUpdate({
+ query,
+ initialDocumentList,
+ update,
+ resultDocList,
+ nModified,
+ options = {},
+ constants = undefined
+}) {
+ assert.eq(initialDocumentList.length, resultDocList.length);
+ assert.commandWorked(coll.remove({}));
+ assert.commandWorked(coll.insert(initialDocumentList));
+ const upd = Object.assign({q: query, u: update}, options);
+ if (constants !== undefined) {
+ upd.c = constants;
}
+ const res = assert.commandWorked(db.runCommand({update: collName, updates: [upd]}));
+ assert.eq(nModified, res.nModified);
- // Upsert performs insert.
- testUpsertDoesInsert({_id: 1, x: 1}, [{$set: {foo: 4}}], {_id: 1, x: 1, foo: 4});
- testUpsertDoesInsert({_id: 1, x: 1}, [{$project: {x: 1}}], {_id: 1, x: 1});
- testUpsertDoesInsert({_id: 1, x: 1}, [{$project: {x: "foo"}}], {_id: 1, x: "foo"});
- testUpsertDoesInsert({_id: 1, x: 1, y: 1}, [{$unset: ["x"]}], {_id: 1, y: 1});
-
- // Update fails when invalid stage is specified. This is a sanity check rather than an
- // exhaustive test of all stages.
- assert.commandFailedWithCode(coll.update({x: 1}, [{$match: {x: 1}}]),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(coll.update({x: 1}, [{$sort: {x: 1}}]), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(coll.update({x: 1}, [{$facet: {a: [{$match: {x: 1}}]}}]),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(coll.update({x: 1}, [{$indexStats: {}}]),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(coll.update({x: 1}, [{
- $bucket: {
- groupBy: "$a",
- boundaries: [0, 1],
- default: "foo",
- output: {count: {$sum: 1}}
- }
- }]),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(
- coll.update({x: 1},
- [{$lookup: {from: "foo", as: "as", localField: "a", foreignField: "b"}}]),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(coll.update({x: 1}, [{
- $graphLookup: {
- from: "foo",
- startWith: "$a",
- connectFromField: "a",
- connectToField: "b",
- as: "as"
- }
- }]),
- ErrorCodes.InvalidOptions);
-
- // Update fails when supported agg stage is specified outside of pipeline.
- assert.commandFailedWithCode(coll.update({_id: 1}, {$addFields: {x: 1}}),
- ErrorCodes.FailedToParse);
-
- // The 'arrayFilters' option is not valid for pipeline updates.
- assert.commandFailedWithCode(
- coll.update({_id: 1}, [{$set: {x: 1}}], {arrayFilters: [{x: {$eq: 1}}]}),
- ErrorCodes.FailedToParse);
-
- // Constants can be specified with pipeline-style updates.
- testUpdate({
- query: {_id: 1},
- initialDocumentList: [{_id: 1, x: 1}],
- useUpdateCommand: true,
- constants: {foo: "bar"},
- update: [{$set: {foo: "$$foo"}}],
- resultDocList: [{_id: 1, x: 1, foo: "bar"}],
- nModified: 1
- });
- testUpdate({
- query: {_id: 1},
- initialDocumentList: [{_id: 1, x: 1}],
- useUpdateCommand: true,
- constants: {foo: {a: {b: {c: "bar"}}}},
- update: [{$set: {foo: "$$foo"}}],
- resultDocList: [{_id: 1, x: 1, foo: {a: {b: {c: "bar"}}}}],
- nModified: 1
- });
- testUpdate({
- query: {_id: 1},
- initialDocumentList: [{_id: 1, x: 1}],
- useUpdateCommand: true,
- constants: {foo: [1, 2, 3]},
- update: [{$set: {foo: {$arrayElemAt: ["$$foo", 2]}}}],
- resultDocList: [{_id: 1, x: 1, foo: 3}],
- nModified: 1
- });
-
- const largeStr = "x".repeat(1000);
- testUpdate({
- query: {_id: 1},
- initialDocumentList: [{_id: 1, x: 1}],
- useUpdateCommand: true,
- constants: {largeStr: largeStr},
- update: [{$set: {foo: "$$largeStr"}}],
- resultDocList: [{_id: 1, x: 1, foo: largeStr}],
- nModified: 1
- });
-
- // References to document fields are not resolved in constants.
+ let nMatched = 0;
+ for (let i = 0; i < resultDocList.length; ++i) {
+ if (0 === bsonWoCompare(coll.findOne(resultDocList[i]), resultDocList[i])) {
+ ++nMatched;
+ }
+ }
+ assert.eq(nModified, nMatched, `actual=${coll.find().toArray()}, expected=${resultDocList}`);
+}
+
+function testUpsertDoesInsert(query, update, resultDoc) {
+ assert.commandWorked(coll.remove({}));
+ assert.commandWorked(coll.update(query, update, {upsert: true}));
+ assert.eq(coll.findOne({}), resultDoc, coll.find({}).toArray());
+}
+
+// Update with existing document.
+testUpdate({
+ query: {_id: 1},
+ initialDocumentList: [{_id: 1, x: 1}],
+ update: [{$set: {foo: 4}}],
+ resultDocList: [{_id: 1, x: 1, foo: 4}],
+ nModified: 1
+});
+testUpdate({
+ query: {_id: 1},
+ initialDocumentList: [{_id: 1, x: 1, y: 1}],
+ update: [{$project: {x: 1}}],
+ resultDocList: [{_id: 1, x: 1}],
+ nModified: 1
+});
+testUpdate({
+ query: {_id: 1},
+ initialDocumentList: [{_id: 1, x: 1, y: [{z: 1, foo: 1}]}],
+ update: [{$unset: ["x", "y.z"]}],
+ resultDocList: [{_id: 1, y: [{foo: 1}]}],
+ nModified: 1
+});
+testUpdate({
+ query: {_id: 1},
+ initialDocumentList: [{_id: 1, x: 1, t: {u: {v: 1}}}],
+ update: [{$replaceWith: "$t"}],
+ resultDocList: [{_id: 1, u: {v: 1}}],
+ nModified: 1
+});
+
+// Multi-update.
+testUpdate({
+ query: {x: 1},
+ initialDocumentList: [{_id: 1, x: 1}, {_id: 2, x: 1}],
+ update: [{$set: {bar: 4}}],
+ resultDocList: [{_id: 1, x: 1, bar: 4}, {_id: 2, x: 1, bar: 4}],
+ nModified: 2,
+ options: {multi: true}
+});
+
+// This test will fail in a sharded cluster when the 2 initial documents live on different
+// shards.
+if (!FixtureHelpers.isMongos(db)) {
testUpdate({
- query: {_id: 1},
- initialDocumentList: [{_id: 1, x: 1}],
- useUpdateCommand: true,
- constants: {foo: "$x"},
- update: [{$set: {foo: "$$foo"}}],
- resultDocList: [{_id: 1, x: 1, foo: "$x"}],
- nModified: 1
+ query: {_id: {$in: [1, 2]}},
+ initialDocumentList: [{_id: 1, x: 1}, {_id: 2, x: 2}],
+ update: [{$set: {bar: 4}}],
+ resultDocList: [{_id: 1, x: 1, bar: 4}, {_id: 2, x: 2, bar: 4}],
+ nModified: 1,
+ options: {multi: false}
});
-
- // Cannot use expressions in constants.
- assert.commandFailedWithCode(db.runCommand({
- update: collName,
- updates: [{q: {_id: 1}, u: [{$set: {x: "$$foo"}}], c: {foo: {$add: [1, 2]}}}]
- }),
- ErrorCodes.DollarPrefixedFieldName);
-
- // Cannot use constants with regular updates.
- assert.commandFailedWithCode(
- db.runCommand(
- {update: collName, updates: [{q: {_id: 1}, u: {x: "$$foo"}, c: {foo: "bar"}}]}),
- 51198);
- assert.commandFailedWithCode(
- db.runCommand(
- {update: collName, updates: [{q: {_id: 1}, u: {$set: {x: "$$foo"}}, c: {foo: "bar"}}]}),
- 51198);
- assert.commandFailedWithCode(
- db.runCommand({update: collName, updates: [{q: {_id: 1}, u: {$set: {x: "1"}}, c: {}}]}),
- 51198);
+}
+
+// Upsert performs insert.
+testUpsertDoesInsert({_id: 1, x: 1}, [{$set: {foo: 4}}], {_id: 1, x: 1, foo: 4});
+testUpsertDoesInsert({_id: 1, x: 1}, [{$project: {x: 1}}], {_id: 1, x: 1});
+testUpsertDoesInsert({_id: 1, x: 1}, [{$project: {x: "foo"}}], {_id: 1, x: "foo"});
+testUpsertDoesInsert({_id: 1, x: 1, y: 1}, [{$unset: ["x"]}], {_id: 1, y: 1});
+
+// Update fails when invalid stage is specified. This is a sanity check rather than an
+// exhaustive test of all stages.
+assert.commandFailedWithCode(coll.update({x: 1}, [{$match: {x: 1}}]), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(coll.update({x: 1}, [{$sort: {x: 1}}]), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(coll.update({x: 1}, [{$facet: {a: [{$match: {x: 1}}]}}]),
+ ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(coll.update({x: 1}, [{$indexStats: {}}]), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(
+ coll.update(
+ {x: 1}, [{
+ $bucket: {groupBy: "$a", boundaries: [0, 1], default: "foo", output: {count: {$sum: 1}}}
+ }]),
+ ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(
+ coll.update({x: 1}, [{$lookup: {from: "foo", as: "as", localField: "a", foreignField: "b"}}]),
+ ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(
+ coll.update(
+ {x: 1}, [{
+ $graphLookup:
+ {from: "foo", startWith: "$a", connectFromField: "a", connectToField: "b", as: "as"}
+ }]),
+ ErrorCodes.InvalidOptions);
+
+// Update fails when supported agg stage is specified outside of pipeline.
+assert.commandFailedWithCode(coll.update({_id: 1}, {$addFields: {x: 1}}), ErrorCodes.FailedToParse);
+
+// The 'arrayFilters' option is not valid for pipeline updates.
+assert.commandFailedWithCode(
+ coll.update({_id: 1}, [{$set: {x: 1}}], {arrayFilters: [{x: {$eq: 1}}]}),
+ ErrorCodes.FailedToParse);
+
+// Constants can be specified with pipeline-style updates.
+testUpdate({
+ query: {_id: 1},
+ initialDocumentList: [{_id: 1, x: 1}],
+ useUpdateCommand: true,
+ constants: {foo: "bar"},
+ update: [{$set: {foo: "$$foo"}}],
+ resultDocList: [{_id: 1, x: 1, foo: "bar"}],
+ nModified: 1
+});
+testUpdate({
+ query: {_id: 1},
+ initialDocumentList: [{_id: 1, x: 1}],
+ useUpdateCommand: true,
+ constants: {foo: {a: {b: {c: "bar"}}}},
+ update: [{$set: {foo: "$$foo"}}],
+ resultDocList: [{_id: 1, x: 1, foo: {a: {b: {c: "bar"}}}}],
+ nModified: 1
+});
+testUpdate({
+ query: {_id: 1},
+ initialDocumentList: [{_id: 1, x: 1}],
+ useUpdateCommand: true,
+ constants: {foo: [1, 2, 3]},
+ update: [{$set: {foo: {$arrayElemAt: ["$$foo", 2]}}}],
+ resultDocList: [{_id: 1, x: 1, foo: 3}],
+ nModified: 1
+});
+
+const largeStr = "x".repeat(1000);
+testUpdate({
+ query: {_id: 1},
+ initialDocumentList: [{_id: 1, x: 1}],
+ useUpdateCommand: true,
+ constants: {largeStr: largeStr},
+ update: [{$set: {foo: "$$largeStr"}}],
+ resultDocList: [{_id: 1, x: 1, foo: largeStr}],
+ nModified: 1
+});
+
+// References to document fields are not resolved in constants.
+testUpdate({
+ query: {_id: 1},
+ initialDocumentList: [{_id: 1, x: 1}],
+ useUpdateCommand: true,
+ constants: {foo: "$x"},
+ update: [{$set: {foo: "$$foo"}}],
+ resultDocList: [{_id: 1, x: 1, foo: "$x"}],
+ nModified: 1
+});
+
+// Cannot use expressions in constants.
+assert.commandFailedWithCode(db.runCommand({
+ update: collName,
+ updates: [{q: {_id: 1}, u: [{$set: {x: "$$foo"}}], c: {foo: {$add: [1, 2]}}}]
+}),
+ ErrorCodes.DollarPrefixedFieldName);
+
+// Cannot use constants with regular updates.
+assert.commandFailedWithCode(
+ db.runCommand({update: collName, updates: [{q: {_id: 1}, u: {x: "$$foo"}, c: {foo: "bar"}}]}),
+ 51198);
+assert.commandFailedWithCode(
+ db.runCommand(
+ {update: collName, updates: [{q: {_id: 1}, u: {$set: {x: "$$foo"}}, c: {foo: "bar"}}]}),
+ 51198);
+assert.commandFailedWithCode(
+ db.runCommand({update: collName, updates: [{q: {_id: 1}, u: {$set: {x: "1"}}, c: {}}]}), 51198);
})();
diff --git a/jstests/core/views/duplicate_ns.js b/jstests/core/views/duplicate_ns.js
index 2ef02cd6bc1..f7693549164 100644
--- a/jstests/core/views/duplicate_ns.js
+++ b/jstests/core/views/duplicate_ns.js
@@ -6,25 +6,25 @@
// Test the creation of view with a duplicate name to a collection.
(function() {
- "use strict";
+"use strict";
- const dbName = "views_duplicate_ns";
- const viewsDb = db.getSiblingDB(dbName);
- const collName = "myns";
- const viewId = dbName + "." + collName;
+const dbName = "views_duplicate_ns";
+const viewsDb = db.getSiblingDB(dbName);
+const collName = "myns";
+const viewId = dbName + "." + collName;
- assert.commandWorked(viewsDb.dropDatabase());
- assert.writeOK(viewsDb.system.views.remove({_id: viewId}));
- assert.commandWorked(viewsDb.runCommand({create: collName}));
- assert.writeOK(viewsDb.system.views.insert({
- _id: viewId,
- viewOn: "coll",
- pipeline: [],
- }));
- assert.eq(2,
- viewsDb.getCollectionInfos()
- .filter(coll => {
- return coll.name === collName;
- })
- .length);
+assert.commandWorked(viewsDb.dropDatabase());
+assert.writeOK(viewsDb.system.views.remove({_id: viewId}));
+assert.commandWorked(viewsDb.runCommand({create: collName}));
+assert.writeOK(viewsDb.system.views.insert({
+ _id: viewId,
+ viewOn: "coll",
+ pipeline: [],
+}));
+assert.eq(2,
+ viewsDb.getCollectionInfos()
+ .filter(coll => {
+ return coll.name === collName;
+ })
+ .length);
}()); \ No newline at end of file
diff --git a/jstests/core/views/invalid_system_views.js b/jstests/core/views/invalid_system_views.js
index c7d758415a7..cdfd8240589 100644
--- a/jstests/core/views/invalid_system_views.js
+++ b/jstests/core/views/invalid_system_views.js
@@ -12,69 +12,66 @@
*/
(function() {
- "use strict";
- const isMongos = db.runCommand({isdbgrid: 1}).isdbgrid;
-
- function runTest(badViewDefinition) {
- let viewsDB = db.getSiblingDB("invalid_system_views");
- assert.commandWorked(viewsDB.dropDatabase());
-
- // Create a regular collection, then insert an invalid view into system.views.
- assert.writeOK(viewsDB.collection.insert({x: 1}));
- assert.commandWorked(viewsDB.runCommand({create: "collection2"}));
- assert.commandWorked(viewsDB.runCommand({create: "collection3"}));
- assert.commandWorked(viewsDB.collection.createIndex({x: 1}));
- assert.writeOK(viewsDB.system.views.insert(badViewDefinition),
- "failed to insert " + tojson(badViewDefinition));
-
- // Test that a command involving views properly fails with a views-specific error code.
- assert.commandFailedWithCode(
- viewsDB.runCommand({listCollections: 1}),
- ErrorCodes.InvalidViewDefinition,
- "listCollections should have failed in the presence of an invalid view");
-
- // Helper function to create a message to use if an assertion fails.
- function makeErrorMessage(msg) {
- return msg +
- " should work on a valid, existing collection, despite the presence of bad views" +
- " in system.views";
- }
-
- if (!isMongos) {
- // Commands that run on existing regular collections should not be impacted by the
- // presence of invalid views. However, applyOps doesn't work on mongos.
- assert.commandWorked(
- db.adminCommand( //
- {
- applyOps:
- [{op: "c", ns: "invalid_system_views.$cmd", o: {drop: "collection3"}}]
- }),
- makeErrorMessage("applyOps"));
- }
-
- assert.writeOK(viewsDB.collection.insert({y: "baz"}), makeErrorMessage("insert"));
-
- assert.writeOK(viewsDB.collection.update({y: "baz"}, {$set: {y: "qux"}}),
- makeErrorMessage("update"));
-
- assert.writeOK(viewsDB.collection.remove({y: "baz"}), makeErrorMessage("remove"));
+"use strict";
+const isMongos = db.runCommand({isdbgrid: 1}).isdbgrid;
+
+function runTest(badViewDefinition) {
+ let viewsDB = db.getSiblingDB("invalid_system_views");
+ assert.commandWorked(viewsDB.dropDatabase());
+
+ // Create a regular collection, then insert an invalid view into system.views.
+ assert.writeOK(viewsDB.collection.insert({x: 1}));
+ assert.commandWorked(viewsDB.runCommand({create: "collection2"}));
+ assert.commandWorked(viewsDB.runCommand({create: "collection3"}));
+ assert.commandWorked(viewsDB.collection.createIndex({x: 1}));
+ assert.writeOK(viewsDB.system.views.insert(badViewDefinition),
+ "failed to insert " + tojson(badViewDefinition));
+
+ // Test that a command involving views properly fails with a views-specific error code.
+ assert.commandFailedWithCode(
+ viewsDB.runCommand({listCollections: 1}),
+ ErrorCodes.InvalidViewDefinition,
+ "listCollections should have failed in the presence of an invalid view");
+
+ // Helper function to create a message to use if an assertion fails.
+ function makeErrorMessage(msg) {
+ return msg +
+ " should work on a valid, existing collection, despite the presence of bad views" +
+ " in system.views";
+ }
+ if (!isMongos) {
+ // Commands that run on existing regular collections should not be impacted by the
+ // presence of invalid views. However, applyOps doesn't work on mongos.
assert.commandWorked(
- viewsDB.runCommand({findAndModify: "collection", query: {x: 1}, update: {x: 2}}),
- makeErrorMessage("findAndModify with update"));
+ db.adminCommand( //
+ {applyOps: [{op: "c", ns: "invalid_system_views.$cmd", o: {drop: "collection3"}}]}),
+ makeErrorMessage("applyOps"));
+ }
- assert.commandWorked(
- viewsDB.runCommand({findAndModify: "collection", query: {x: 2}, remove: true}),
- makeErrorMessage("findAndModify with remove"));
+ assert.writeOK(viewsDB.collection.insert({y: "baz"}), makeErrorMessage("insert"));
- const lookup = {
- $lookup: {from: "collection2", localField: "_id", foreignField: "_id", as: "match"}
- };
- assert.commandWorked(
- viewsDB.runCommand({aggregate: "collection", pipeline: [lookup], cursor: {}}),
- makeErrorMessage("aggregate with $lookup"));
+ assert.writeOK(viewsDB.collection.update({y: "baz"}, {$set: {y: "qux"}}),
+ makeErrorMessage("update"));
+
+ assert.writeOK(viewsDB.collection.remove({y: "baz"}), makeErrorMessage("remove"));
+
+ assert.commandWorked(
+ viewsDB.runCommand({findAndModify: "collection", query: {x: 1}, update: {x: 2}}),
+ makeErrorMessage("findAndModify with update"));
+
+ assert.commandWorked(
+ viewsDB.runCommand({findAndModify: "collection", query: {x: 2}, remove: true}),
+ makeErrorMessage("findAndModify with remove"));
+
+ const lookup = {
+ $lookup: {from: "collection2", localField: "_id", foreignField: "_id", as: "match"}
+ };
+ assert.commandWorked(
+ viewsDB.runCommand({aggregate: "collection", pipeline: [lookup], cursor: {}}),
+ makeErrorMessage("aggregate with $lookup"));
- const graphLookup = {
+ const graphLookup = {
$graphLookup: {
from: "collection2",
startWith: "$_id",
@@ -83,68 +80,61 @@
as: "match"
}
};
- assert.commandWorked(
- viewsDB.runCommand({aggregate: "collection", pipeline: [graphLookup], cursor: {}}),
- makeErrorMessage("aggregate with $graphLookup"));
-
- assert.commandWorked(viewsDB.runCommand({dropIndexes: "collection", index: "x_1"}),
- makeErrorMessage("dropIndexes"));
-
- assert.commandWorked(viewsDB.collection.createIndex({x: 1}),
- makeErrorMessage("createIndexes"));
-
- if (!isMongos) {
- assert.commandWorked(viewsDB.collection.reIndex(), makeErrorMessage("reIndex"));
- }
+ assert.commandWorked(
+ viewsDB.runCommand({aggregate: "collection", pipeline: [graphLookup], cursor: {}}),
+ makeErrorMessage("aggregate with $graphLookup"));
- const storageEngine = jsTest.options().storageEngine;
- if (isMongos || storageEngine === "ephemeralForTest" || storageEngine === "inMemory" ||
- storageEngine === "biggie") {
- print("Not testing compact command on mongos or ephemeral storage engine");
- } else {
- assert.commandWorked(viewsDB.runCommand({compact: "collection", force: true}),
- makeErrorMessage("compact"));
- }
+ assert.commandWorked(viewsDB.runCommand({dropIndexes: "collection", index: "x_1"}),
+ makeErrorMessage("dropIndexes"));
- assert.commandWorked(
- viewsDB.runCommand({collMod: "collection", validator: {x: {$type: "string"}}}),
- makeErrorMessage("collMod"));
-
- const renameCommand = {
- renameCollection: "invalid_system_views.collection",
- to: "invalid_system_views.collection2",
- dropTarget: true
- };
- assert.commandWorked(viewsDB.adminCommand(renameCommand),
- makeErrorMessage("renameCollection"));
+ assert.commandWorked(viewsDB.collection.createIndex({x: 1}), makeErrorMessage("createIndexes"));
- assert.commandWorked(viewsDB.runCommand({drop: "collection2"}), makeErrorMessage("drop"));
+ if (!isMongos) {
+ assert.commandWorked(viewsDB.collection.reIndex(), makeErrorMessage("reIndex"));
+ }
- // Drop the offending view so that the validate hook succeeds.
- assert.writeOK(viewsDB.system.views.remove(badViewDefinition));
+ const storageEngine = jsTest.options().storageEngine;
+ if (isMongos || storageEngine === "ephemeralForTest" || storageEngine === "inMemory" ||
+ storageEngine === "biggie") {
+ print("Not testing compact command on mongos or ephemeral storage engine");
+ } else {
+ assert.commandWorked(viewsDB.runCommand({compact: "collection", force: true}),
+ makeErrorMessage("compact"));
}
- runTest(
- {_id: "invalid_system_views.badViewStringPipeline", viewOn: "collection", pipeline: "bad"});
- runTest({
- _id: "invalid_system_views.badViewEmptyObjectPipeline",
- viewOn: "collection",
- pipeline: {}
- });
- runTest(
- {_id: "invalid_system_views.badViewNumericalPipeline", viewOn: "collection", pipeline: 7});
- runTest({
- _id: "invalid_system_views.badViewArrayWithIntegerPipeline",
- viewOn: "collection",
- pipeline: [1]
- });
- runTest({
- _id: "invalid_system_views.badViewArrayWithEmptyArrayPipeline",
- viewOn: "collection",
- pipeline: [[]]
- });
- runTest({_id: 7, viewOn: "collection", pipeline: []});
- runTest({_id: "invalid_system_views.embedded\0null", viewOn: "collection", pipeline: []});
- runTest({_id: "invalidNotFullyQualifiedNs", viewOn: "collection", pipeline: []});
- runTest({_id: "invalid_system_views.missingViewOnField", pipeline: []});
+ assert.commandWorked(
+ viewsDB.runCommand({collMod: "collection", validator: {x: {$type: "string"}}}),
+ makeErrorMessage("collMod"));
+
+ const renameCommand = {
+ renameCollection: "invalid_system_views.collection",
+ to: "invalid_system_views.collection2",
+ dropTarget: true
+ };
+ assert.commandWorked(viewsDB.adminCommand(renameCommand), makeErrorMessage("renameCollection"));
+
+ assert.commandWorked(viewsDB.runCommand({drop: "collection2"}), makeErrorMessage("drop"));
+
+ // Drop the offending view so that the validate hook succeeds.
+ assert.writeOK(viewsDB.system.views.remove(badViewDefinition));
+}
+
+runTest({_id: "invalid_system_views.badViewStringPipeline", viewOn: "collection", pipeline: "bad"});
+runTest(
+ {_id: "invalid_system_views.badViewEmptyObjectPipeline", viewOn: "collection", pipeline: {}});
+runTest({_id: "invalid_system_views.badViewNumericalPipeline", viewOn: "collection", pipeline: 7});
+runTest({
+ _id: "invalid_system_views.badViewArrayWithIntegerPipeline",
+ viewOn: "collection",
+ pipeline: [1]
+});
+runTest({
+ _id: "invalid_system_views.badViewArrayWithEmptyArrayPipeline",
+ viewOn: "collection",
+ pipeline: [[]]
+});
+runTest({_id: 7, viewOn: "collection", pipeline: []});
+runTest({_id: "invalid_system_views.embedded\0null", viewOn: "collection", pipeline: []});
+runTest({_id: "invalidNotFullyQualifiedNs", viewOn: "collection", pipeline: []});
+runTest({_id: "invalid_system_views.missingViewOnField", pipeline: []});
}());
diff --git a/jstests/core/views/view_with_invalid_dbname.js b/jstests/core/views/view_with_invalid_dbname.js
index a39ca49e934..8305c9970f4 100644
--- a/jstests/core/views/view_with_invalid_dbname.js
+++ b/jstests/core/views/view_with_invalid_dbname.js
@@ -6,24 +6,28 @@
// @tags: [ incompatible_with_embedded, SERVER-38379 ]
(function() {
- "use strict";
+"use strict";
- // Create a view whose dbname has an invalid embedded NULL character. That's not possible with
- // the 'create' command, but it is possible by manually inserting into the 'system.views'
- // collection.
- const viewName = "dbNameWithEmbedded\0Character.collectionName";
- const collName = "viewOnForViewWithInvalidDBNameTest";
- const viewDef = {_id: viewName, viewOn: collName, pipeline: []};
- assert.commandWorked(db.system.views.insert(viewDef));
+// Create a view whose dbname has an invalid embedded NULL character. That's not possible with
+// the 'create' command, but it is possible by manually inserting into the 'system.views'
+// collection.
+const viewName = "dbNameWithEmbedded\0Character.collectionName";
+const collName = "viewOnForViewWithInvalidDBNameTest";
+const viewDef = {
+ _id: viewName,
+ viewOn: collName,
+ pipeline: []
+};
+assert.commandWorked(db.system.views.insert(viewDef));
- // If the reinitialization of the durable view catalog tries to create a NamespaceString using
- // the 'viewName' field, it will throw an exception in a place that is not exception safe,
- // resulting in an invariant failure. This previously occurred because validation was only
- // checking the collection part of the namespace, not the dbname part. With correct validation
- // in place, reinitialization succeeds despite the invalid name.
- assert.commandWorked(db.adminCommand({restartCatalog: 1}));
+// If the reinitialization of the durable view catalog tries to create a NamespaceString using
+// the 'viewName' field, it will throw an exception in a place that is not exception safe,
+// resulting in an invariant failure. This previously occurred because validation was only
+// checking the collection part of the namespace, not the dbname part. With correct validation
+// in place, reinitialization succeeds despite the invalid name.
+assert.commandWorked(db.adminCommand({restartCatalog: 1}));
- // Don't let the bogus view stick around, or else it will cause an error in validation.
- const res = db.system.views.deleteOne({_id: viewName});
- assert.eq(1, res.deletedCount);
+// Don't let the bogus view stick around, or else it will cause an error in validation.
+const res = db.system.views.deleteOne({_id: viewName});
+assert.eq(1, res.deletedCount);
}());
diff --git a/jstests/core/views/views_aggregation.js b/jstests/core/views/views_aggregation.js
index 2b7f78f319f..fb78211307f 100644
--- a/jstests/core/views/views_aggregation.js
+++ b/jstests/core/views/views_aggregation.js
@@ -4,205 +4,206 @@
* requires_non_retryable_commands]
*/
(function() {
- "use strict";
-
- // For assertMergeFailsForAllModesWithCode.
- load("jstests/aggregation/extras/merge_helpers.js");
- load("jstests/aggregation/extras/utils.js"); // For arrayEq, assertErrorCode, and
- // orderedArrayEq.
-
- let viewsDB = db.getSiblingDB("views_aggregation");
- assert.commandWorked(viewsDB.dropDatabase());
-
- // Helper functions.
- let assertAggResultEq = function(collection, pipeline, expected, ordered) {
- let coll = viewsDB.getCollection(collection);
- let arr = coll.aggregate(pipeline).toArray();
- let success = (typeof(ordered) === "undefined" || !ordered) ? arrayEq(arr, expected)
- : orderedArrayEq(arr, expected);
- assert(success, tojson({got: arr, expected: expected}));
- };
- let byPopulation = function(a, b) {
- if (a.pop < b.pop)
- return -1;
- else if (a.pop > b.pop)
- return 1;
- else
- return 0;
- };
-
- // Populate a collection with some test data.
- let allDocuments = [];
- allDocuments.push({_id: "New York", state: "NY", pop: 7});
- allDocuments.push({_id: "Newark", state: "NJ", pop: 3});
- allDocuments.push({_id: "Palo Alto", state: "CA", pop: 10});
- allDocuments.push({_id: "San Francisco", state: "CA", pop: 4});
- allDocuments.push({_id: "Trenton", state: "NJ", pop: 5});
-
- let coll = viewsDB.coll;
- let bulk = coll.initializeUnorderedBulkOp();
- allDocuments.forEach(function(doc) {
- bulk.insert(doc);
+"use strict";
+
+// For assertMergeFailsForAllModesWithCode.
+load("jstests/aggregation/extras/merge_helpers.js");
+load("jstests/aggregation/extras/utils.js"); // For arrayEq, assertErrorCode, and
+ // orderedArrayEq.
+
+let viewsDB = db.getSiblingDB("views_aggregation");
+assert.commandWorked(viewsDB.dropDatabase());
+
+// Helper functions.
+let assertAggResultEq = function(collection, pipeline, expected, ordered) {
+ let coll = viewsDB.getCollection(collection);
+ let arr = coll.aggregate(pipeline).toArray();
+ let success = (typeof (ordered) === "undefined" || !ordered) ? arrayEq(arr, expected)
+ : orderedArrayEq(arr, expected);
+ assert(success, tojson({got: arr, expected: expected}));
+};
+let byPopulation = function(a, b) {
+ if (a.pop < b.pop)
+ return -1;
+ else if (a.pop > b.pop)
+ return 1;
+ else
+ return 0;
+};
+
+// Populate a collection with some test data.
+let allDocuments = [];
+allDocuments.push({_id: "New York", state: "NY", pop: 7});
+allDocuments.push({_id: "Newark", state: "NJ", pop: 3});
+allDocuments.push({_id: "Palo Alto", state: "CA", pop: 10});
+allDocuments.push({_id: "San Francisco", state: "CA", pop: 4});
+allDocuments.push({_id: "Trenton", state: "NJ", pop: 5});
+
+let coll = viewsDB.coll;
+let bulk = coll.initializeUnorderedBulkOp();
+allDocuments.forEach(function(doc) {
+ bulk.insert(doc);
+});
+assert.writeOK(bulk.execute());
+
+// Create views on the data.
+assert.commandWorked(viewsDB.runCommand({create: "emptyPipelineView", viewOn: "coll"}));
+assert.commandWorked(
+ viewsDB.runCommand({create: "identityView", viewOn: "coll", pipeline: [{$match: {}}]}));
+assert.commandWorked(viewsDB.runCommand(
+ {create: "noIdView", viewOn: "coll", pipeline: [{$project: {_id: 0, state: 1, pop: 1}}]}));
+assert.commandWorked(viewsDB.runCommand({
+ create: "popSortedView",
+ viewOn: "identityView",
+ pipeline: [{$match: {pop: {$gte: 0}}}, {$sort: {pop: 1}}]
+}));
+
+// Find all documents with empty aggregations.
+assertAggResultEq("emptyPipelineView", [], allDocuments);
+assertAggResultEq("identityView", [], allDocuments);
+assertAggResultEq("identityView", [{$match: {}}], allDocuments);
+
+// Filter documents on a view with $match.
+assertAggResultEq(
+ "popSortedView", [{$match: {state: "NY"}}], [{_id: "New York", state: "NY", pop: 7}]);
+
+// An aggregation still works on a view that strips _id.
+assertAggResultEq("noIdView", [{$match: {state: "NY"}}], [{state: "NY", pop: 7}]);
+
+// Aggregations work on views that sort.
+const doOrderedSort = true;
+assertAggResultEq("popSortedView", [], allDocuments.sort(byPopulation), doOrderedSort);
+assertAggResultEq("popSortedView", [{$limit: 1}, {$project: {_id: 1}}], [{_id: "Palo Alto"}]);
+
+// Test that the $out stage errors when writing to a view namespace.
+assertErrorCode(coll, [{$out: "emptyPipelineView"}], ErrorCodes.CommandNotSupportedOnView);
+
+// Test that the $merge stage errors when writing to a view namespace.
+assertMergeFailsForAllModesWithCode({
+ source: viewsDB.coll,
+ target: viewsDB.emptyPipelineView,
+ errorCodes: [ErrorCodes.CommandNotSupportedOnView]
+});
+
+// Test that the $merge stage errors when writing to a view namespace in a foreign database.
+let foreignDB = db.getSiblingDB("views_aggregation_foreign");
+foreignDB.view.drop();
+assert.commandWorked(foreignDB.createView("view", "coll", []));
+
+assertMergeFailsForAllModesWithCode({
+ source: viewsDB.coll,
+ target: foreignDB.view,
+ errorCodes: [ErrorCodes.CommandNotSupportedOnView]
+});
+
+// Test that an aggregate on a view propagates the 'bypassDocumentValidation' option.
+const validatedCollName = "collectionWithValidator";
+viewsDB[validatedCollName].drop();
+assert.commandWorked(
+ viewsDB.createCollection(validatedCollName, {validator: {illegalField: {$exists: false}}}));
+
+viewsDB.invalidDocs.drop();
+viewsDB.invalidDocsView.drop();
+assert.writeOK(viewsDB.invalidDocs.insert({illegalField: "present"}));
+assert.commandWorked(viewsDB.createView("invalidDocsView", "invalidDocs", []));
+
+assert.commandWorked(
+ viewsDB.runCommand({
+ aggregate: "invalidDocsView",
+ pipeline: [{$out: validatedCollName}],
+ cursor: {},
+ bypassDocumentValidation: true
+ }),
+ "Expected $out insertions to succeed since 'bypassDocumentValidation' was specified");
+
+// Test that an aggregate on a view propagates the 'allowDiskUse' option.
+const extSortLimit = 100 * 1024 * 1024;
+const largeStrSize = 10 * 1024 * 1024;
+const largeStr = new Array(largeStrSize).join('x');
+viewsDB.largeColl.drop();
+for (let i = 0; i <= extSortLimit / largeStrSize; ++i) {
+ assert.writeOK(viewsDB.largeColl.insert({x: i, largeStr: largeStr}));
+}
+assertErrorCode(viewsDB.largeColl,
+ [{$sort: {x: -1}}],
+ 16819,
+ "Expected in-memory sort to fail due to excessive memory usage");
+viewsDB.largeView.drop();
+assert.commandWorked(viewsDB.createView("largeView", "largeColl", []));
+assertErrorCode(viewsDB.largeView,
+ [{$sort: {x: -1}}],
+ 16819,
+ "Expected in-memory sort to fail due to excessive memory usage");
+
+assert.commandWorked(
+ viewsDB.runCommand(
+ {aggregate: "largeView", pipeline: [{$sort: {x: -1}}], cursor: {}, allowDiskUse: true}),
+ "Expected aggregate to succeed since 'allowDiskUse' was specified");
+
+// Test explain modes on a view.
+let explainPlan = assert.commandWorked(
+ viewsDB.popSortedView.explain("queryPlanner").aggregate([{$limit: 1}, {$match: {pop: 3}}]));
+assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll");
+assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+
+explainPlan = assert.commandWorked(
+ viewsDB.popSortedView.explain("executionStats").aggregate([{$limit: 1}, {$match: {pop: 3}}]));
+assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll");
+assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 5);
+assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+
+explainPlan = assert.commandWorked(viewsDB.popSortedView.explain("allPlansExecution")
+ .aggregate([{$limit: 1}, {$match: {pop: 3}}]));
+assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll");
+assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 5);
+assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+
+// Passing a value of true for the explain option to the aggregation command, without using the
+// shell explain helper, should continue to work.
+explainPlan = assert.commandWorked(
+ viewsDB.popSortedView.aggregate([{$limit: 1}, {$match: {pop: 3}}], {explain: true}));
+assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll");
+assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+
+// Test allPlansExecution explain mode on the base collection.
+explainPlan = assert.commandWorked(
+ viewsDB.coll.explain("allPlansExecution").aggregate([{$limit: 1}, {$match: {pop: 3}}]));
+assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll");
+assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 1);
+assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+
+// The explain:true option should not work when paired with the explain shell helper.
+assert.throws(function() {
+ viewsDB.popSortedView.explain("executionStats").aggregate([{$limit: 1}, {$match: {pop: 3}}], {
+ explain: true
});
- assert.writeOK(bulk.execute());
-
- // Create views on the data.
- assert.commandWorked(viewsDB.runCommand({create: "emptyPipelineView", viewOn: "coll"}));
- assert.commandWorked(
- viewsDB.runCommand({create: "identityView", viewOn: "coll", pipeline: [{$match: {}}]}));
- assert.commandWorked(viewsDB.runCommand(
- {create: "noIdView", viewOn: "coll", pipeline: [{$project: {_id: 0, state: 1, pop: 1}}]}));
- assert.commandWorked(viewsDB.runCommand({
- create: "popSortedView",
- viewOn: "identityView",
- pipeline: [{$match: {pop: {$gte: 0}}}, {$sort: {pop: 1}}]
- }));
-
- // Find all documents with empty aggregations.
- assertAggResultEq("emptyPipelineView", [], allDocuments);
- assertAggResultEq("identityView", [], allDocuments);
- assertAggResultEq("identityView", [{$match: {}}], allDocuments);
-
- // Filter documents on a view with $match.
- assertAggResultEq(
- "popSortedView", [{$match: {state: "NY"}}], [{_id: "New York", state: "NY", pop: 7}]);
-
- // An aggregation still works on a view that strips _id.
- assertAggResultEq("noIdView", [{$match: {state: "NY"}}], [{state: "NY", pop: 7}]);
-
- // Aggregations work on views that sort.
- const doOrderedSort = true;
- assertAggResultEq("popSortedView", [], allDocuments.sort(byPopulation), doOrderedSort);
- assertAggResultEq("popSortedView", [{$limit: 1}, {$project: {_id: 1}}], [{_id: "Palo Alto"}]);
-
- // Test that the $out stage errors when writing to a view namespace.
- assertErrorCode(coll, [{$out: "emptyPipelineView"}], ErrorCodes.CommandNotSupportedOnView);
-
- // Test that the $merge stage errors when writing to a view namespace.
- assertMergeFailsForAllModesWithCode({
- source: viewsDB.coll,
- target: viewsDB.emptyPipelineView,
- errorCodes: [ErrorCodes.CommandNotSupportedOnView]
- });
-
- // Test that the $merge stage errors when writing to a view namespace in a foreign database.
- let foreignDB = db.getSiblingDB("views_aggregation_foreign");
- foreignDB.view.drop();
- assert.commandWorked(foreignDB.createView("view", "coll", []));
-
- assertMergeFailsForAllModesWithCode({
- source: viewsDB.coll,
- target: foreignDB.view,
- errorCodes: [ErrorCodes.CommandNotSupportedOnView]
- });
-
- // Test that an aggregate on a view propagates the 'bypassDocumentValidation' option.
- const validatedCollName = "collectionWithValidator";
- viewsDB[validatedCollName].drop();
- assert.commandWorked(
- viewsDB.createCollection(validatedCollName, {validator: {illegalField: {$exists: false}}}));
-
- viewsDB.invalidDocs.drop();
- viewsDB.invalidDocsView.drop();
- assert.writeOK(viewsDB.invalidDocs.insert({illegalField: "present"}));
- assert.commandWorked(viewsDB.createView("invalidDocsView", "invalidDocs", []));
-
- assert.commandWorked(
- viewsDB.runCommand({
- aggregate: "invalidDocsView",
- pipeline: [{$out: validatedCollName}],
- cursor: {},
- bypassDocumentValidation: true
- }),
- "Expected $out insertions to succeed since 'bypassDocumentValidation' was specified");
-
- // Test that an aggregate on a view propagates the 'allowDiskUse' option.
- const extSortLimit = 100 * 1024 * 1024;
- const largeStrSize = 10 * 1024 * 1024;
- const largeStr = new Array(largeStrSize).join('x');
- viewsDB.largeColl.drop();
- for (let i = 0; i <= extSortLimit / largeStrSize; ++i) {
- assert.writeOK(viewsDB.largeColl.insert({x: i, largeStr: largeStr}));
- }
- assertErrorCode(viewsDB.largeColl,
- [{$sort: {x: -1}}],
- 16819,
- "Expected in-memory sort to fail due to excessive memory usage");
- viewsDB.largeView.drop();
- assert.commandWorked(viewsDB.createView("largeView", "largeColl", []));
- assertErrorCode(viewsDB.largeView,
- [{$sort: {x: -1}}],
- 16819,
- "Expected in-memory sort to fail due to excessive memory usage");
-
- assert.commandWorked(
- viewsDB.runCommand(
- {aggregate: "largeView", pipeline: [{$sort: {x: -1}}], cursor: {}, allowDiskUse: true}),
- "Expected aggregate to succeed since 'allowDiskUse' was specified");
-
- // Test explain modes on a view.
- let explainPlan = assert.commandWorked(
- viewsDB.popSortedView.explain("queryPlanner").aggregate([{$limit: 1}, {$match: {pop: 3}}]));
- assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll");
- assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
-
- explainPlan = assert.commandWorked(viewsDB.popSortedView.explain("executionStats")
- .aggregate([{$limit: 1}, {$match: {pop: 3}}]));
- assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll");
- assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
- assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 5);
- assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
-
- explainPlan = assert.commandWorked(viewsDB.popSortedView.explain("allPlansExecution")
- .aggregate([{$limit: 1}, {$match: {pop: 3}}]));
- assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll");
- assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
- assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 5);
- assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
-
- // Passing a value of true for the explain option to the aggregation command, without using the
- // shell explain helper, should continue to work.
- explainPlan = assert.commandWorked(
- viewsDB.popSortedView.aggregate([{$limit: 1}, {$match: {pop: 3}}], {explain: true}));
- assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll");
- assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
-
- // Test allPlansExecution explain mode on the base collection.
- explainPlan = assert.commandWorked(
- viewsDB.coll.explain("allPlansExecution").aggregate([{$limit: 1}, {$match: {pop: 3}}]));
- assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll");
- assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
- assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 1);
- assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
-
- // The explain:true option should not work when paired with the explain shell helper.
- assert.throws(function() {
- viewsDB.popSortedView.explain("executionStats")
- .aggregate([{$limit: 1}, {$match: {pop: 3}}], {explain: true});
- });
-
- // The remaining tests involve $lookup and $graphLookup. We cannot lookup into sharded
- // collections, so skip these tests if running in a sharded configuration.
- let isMasterResponse = assert.commandWorked(viewsDB.runCommand("isMaster"));
- const isMongos = (isMasterResponse.msg === "isdbgrid");
- if (isMongos) {
- jsTest.log("Tests are being run on a mongos; skipping all $lookup and $graphLookup tests.");
- return;
- }
-
- // Test that the $lookup stage resolves the view namespace referenced in the 'from' field.
- assertAggResultEq(
- coll.getName(),
- [
- {$match: {_id: "New York"}},
- {$lookup: {from: "identityView", localField: "_id", foreignField: "_id", as: "matched"}},
- {$unwind: "$matched"},
- {$project: {_id: 1, matchedId: "$matched._id"}}
- ],
- [{_id: "New York", matchedId: "New York"}]);
+});
+
+// The remaining tests involve $lookup and $graphLookup. We cannot lookup into sharded
+// collections, so skip these tests if running in a sharded configuration.
+let isMasterResponse = assert.commandWorked(viewsDB.runCommand("isMaster"));
+const isMongos = (isMasterResponse.msg === "isdbgrid");
+if (isMongos) {
+ jsTest.log("Tests are being run on a mongos; skipping all $lookup and $graphLookup tests.");
+ return;
+}
+
+// Test that the $lookup stage resolves the view namespace referenced in the 'from' field.
+assertAggResultEq(
+ coll.getName(),
+ [
+ {$match: {_id: "New York"}},
+ {$lookup: {from: "identityView", localField: "_id", foreignField: "_id", as: "matched"}},
+ {$unwind: "$matched"},
+ {$project: {_id: 1, matchedId: "$matched._id"}}
+ ],
+ [{_id: "New York", matchedId: "New York"}]);
- // Test that the $graphLookup stage resolves the view namespace referenced in the 'from' field.
- assertAggResultEq(coll.getName(),
+// Test that the $graphLookup stage resolves the view namespace referenced in the 'from' field.
+assertAggResultEq(coll.getName(),
[
{$match: {_id: "New York"}},
{
@@ -219,22 +220,19 @@
],
[{_id: "New York", matchedId: "New York"}]);
- // Test that the $lookup stage resolves the view namespace referenced in the 'from' field of
- // another $lookup stage nested inside of it.
- assert.commandWorked(viewsDB.runCommand({
- create: "viewWithLookupInside",
- viewOn: coll.getName(),
- pipeline: [
- {
- $lookup:
- {from: "identityView", localField: "_id", foreignField: "_id", as: "matched"}
- },
- {$unwind: "$matched"},
- {$project: {_id: 1, matchedId: "$matched._id"}}
- ]
- }));
-
- assertAggResultEq(
+// Test that the $lookup stage resolves the view namespace referenced in the 'from' field of
+// another $lookup stage nested inside of it.
+assert.commandWorked(viewsDB.runCommand({
+ create: "viewWithLookupInside",
+ viewOn: coll.getName(),
+ pipeline: [
+ {$lookup: {from: "identityView", localField: "_id", foreignField: "_id", as: "matched"}},
+ {$unwind: "$matched"},
+ {$project: {_id: 1, matchedId: "$matched._id"}}
+ ]
+}));
+
+assertAggResultEq(
coll.getName(),
[
{$match: {_id: "New York"}},
@@ -251,9 +249,9 @@
],
[{_id: "New York", matchedId1: "New York", matchedId2: "New York"}]);
- // Test that the $graphLookup stage resolves the view namespace referenced in the 'from' field
- // of a $lookup stage nested inside of it.
- let graphLookupPipeline = [
+// Test that the $graphLookup stage resolves the view namespace referenced in the 'from' field
+// of a $lookup stage nested inside of it.
+let graphLookupPipeline = [
{$match: {_id: "New York"}},
{
$graphLookup: {
@@ -268,13 +266,13 @@
{$project: {_id: 1, matchedId1: "$matched._id", matchedId2: "$matched.matchedId"}}
];
- assertAggResultEq(coll.getName(),
- graphLookupPipeline,
- [{_id: "New York", matchedId1: "New York", matchedId2: "New York"}]);
+assertAggResultEq(coll.getName(),
+ graphLookupPipeline,
+ [{_id: "New York", matchedId1: "New York", matchedId2: "New York"}]);
- // Test that the $lookup stage on a view with a nested $lookup on a different view resolves the
- // view namespaces referenced in their respective 'from' fields.
- assertAggResultEq(
+// Test that the $lookup stage on a view with a nested $lookup on a different view resolves the
+// view namespaces referenced in their respective 'from' fields.
+assertAggResultEq(
coll.getName(),
[
{$match: {_id: "Trenton"}},
@@ -307,11 +305,9 @@
}]
}]);
- // Test that the $facet stage resolves the view namespace referenced in the 'from' field of a
- // $lookup stage nested inside of a $graphLookup stage.
- assertAggResultEq(
- coll.getName(),
- [{$facet: {nested: graphLookupPipeline}}],
- [{nested: [{_id: "New York", matchedId1: "New York", matchedId2: "New York"}]}]);
-
+// Test that the $facet stage resolves the view namespace referenced in the 'from' field of a
+// $lookup stage nested inside of a $graphLookup stage.
+assertAggResultEq(coll.getName(),
+ [{$facet: {nested: graphLookupPipeline}}],
+ [{nested: [{_id: "New York", matchedId1: "New York", matchedId2: "New York"}]}]);
}());
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index 916728d2c90..30019797aa9 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -60,585 +60,578 @@
*/
(function() {
- "use strict";
+"use strict";
- // Pre-written reasons for skipping a test.
- const isAnInternalCommand = "internal command";
- const isUnrelated = "is unrelated";
+// Pre-written reasons for skipping a test.
+const isAnInternalCommand = "internal command";
+const isUnrelated = "is unrelated";
- let viewsCommandTests = {
- _addShard: {skip: isAnInternalCommand},
- _cloneCatalogData: {skip: isAnInternalCommand},
- _cloneCollectionOptionsFromPrimaryShard: {skip: isAnInternalCommand},
- _configsvrAddShard: {skip: isAnInternalCommand},
- _configsvrAddShardToZone: {skip: isAnInternalCommand},
- _configsvrBalancerStart: {skip: isAnInternalCommand},
- _configsvrBalancerStatus: {skip: isAnInternalCommand},
- _configsvrBalancerStop: {skip: isAnInternalCommand},
- _configsvrCommitChunkMerge: {skip: isAnInternalCommand},
- _configsvrCommitChunkMigration: {skip: isAnInternalCommand},
- _configsvrCommitChunkSplit: {skip: isAnInternalCommand},
- _configsvrCommitMovePrimary: {skip: isAnInternalCommand},
- _configsvrCreateCollection: {skip: isAnInternalCommand},
- _configsvrCreateDatabase: {skip: isAnInternalCommand},
- _configsvrDropCollection: {skip: isAnInternalCommand},
- _configsvrDropDatabase: {skip: isAnInternalCommand},
- _configsvrEnableSharding: {skip: isAnInternalCommand},
- _configsvrMoveChunk: {skip: isAnInternalCommand},
- _configsvrMovePrimary: {skip: isAnInternalCommand},
- _configsvrRemoveShard: {skip: isAnInternalCommand},
- _configsvrRemoveShardFromZone: {skip: isAnInternalCommand},
- _configsvrShardCollection: {skip: isAnInternalCommand},
- _configsvrUpdateZoneKeyRange: {skip: isAnInternalCommand},
- _cpuProfilerStart: {skip: isAnInternalCommand},
- _cpuProfilerStop: {skip: isAnInternalCommand},
- _flushDatabaseCacheUpdates: {skip: isUnrelated},
- _flushRoutingTableCacheUpdates: {skip: isUnrelated},
- _getNextSessionMods: {skip: isAnInternalCommand},
- _getUserCacheGeneration: {skip: isAnInternalCommand},
- _hashBSONElement: {skip: isAnInternalCommand},
- _isSelf: {skip: isAnInternalCommand},
- _mergeAuthzCollections: {skip: isAnInternalCommand},
- _migrateClone: {skip: isAnInternalCommand},
- _movePrimary: {skip: isAnInternalCommand},
- _recvChunkAbort: {skip: isAnInternalCommand},
- _recvChunkCommit: {skip: isAnInternalCommand},
- _recvChunkStart: {skip: isAnInternalCommand},
- _recvChunkStatus: {skip: isAnInternalCommand},
- _shardsvrShardCollection: {skip: isAnInternalCommand},
- _transferMods: {skip: isAnInternalCommand},
- abortTransaction: {skip: isUnrelated},
- addShard: {skip: isUnrelated},
- addShardToZone: {skip: isUnrelated},
- aggregate: {command: {aggregate: "view", pipeline: [{$match: {}}], cursor: {}}},
- appendOplogNote: {skip: isUnrelated},
- applyOps: {
- command: {applyOps: [{op: "i", o: {_id: 1}, ns: "test.view"}]},
- expectFailure: true,
- skipSharded: true,
- },
- authenticate: {skip: isUnrelated},
- availableQueryOptions: {skip: isAnInternalCommand},
- balancerStart: {skip: isUnrelated},
- balancerStatus: {skip: isUnrelated},
- balancerStop: {skip: isUnrelated},
- buildInfo: {skip: isUnrelated},
- captrunc: {
- command: {captrunc: "view", n: 2, inc: false},
- expectFailure: true,
- },
- checkShardingIndex: {skip: isUnrelated},
- cleanupOrphaned: {
- skip: "Tested in views/views_sharded.js",
- },
- clearLog: {skip: isUnrelated},
- cloneCollection: {skip: "Tested in noPassthroughWithMongod/clonecollection.js"},
- cloneCollectionAsCapped: {
- command: {cloneCollectionAsCapped: "view", toCollection: "testcapped", size: 10240},
- expectFailure: true,
- },
- collMod: {command: {collMod: "view", viewOn: "other", pipeline: []}},
- collStats: {skip: "Tested in views/views_coll_stats.js"},
- commitTransaction: {skip: isUnrelated},
- compact: {command: {compact: "view", force: true}, expectFailure: true, skipSharded: true},
- configureFailPoint: {skip: isUnrelated},
- connPoolStats: {skip: isUnrelated},
- connPoolSync: {skip: isUnrelated},
- connectionStatus: {skip: isUnrelated},
- convertToCapped: {command: {convertToCapped: "view", size: 12345}, expectFailure: true},
- coordinateCommitTransaction: {skip: isUnrelated},
- count: {command: {count: "view"}},
- cpuload: {skip: isAnInternalCommand},
- create: {skip: "tested in views/views_creation.js"},
- createIndexes: {
- command: {createIndexes: "view", indexes: [{key: {x: 1}, name: "x_1"}]},
- expectFailure: true,
+let viewsCommandTests = {
+ _addShard: {skip: isAnInternalCommand},
+ _cloneCatalogData: {skip: isAnInternalCommand},
+ _cloneCollectionOptionsFromPrimaryShard: {skip: isAnInternalCommand},
+ _configsvrAddShard: {skip: isAnInternalCommand},
+ _configsvrAddShardToZone: {skip: isAnInternalCommand},
+ _configsvrBalancerStart: {skip: isAnInternalCommand},
+ _configsvrBalancerStatus: {skip: isAnInternalCommand},
+ _configsvrBalancerStop: {skip: isAnInternalCommand},
+ _configsvrCommitChunkMerge: {skip: isAnInternalCommand},
+ _configsvrCommitChunkMigration: {skip: isAnInternalCommand},
+ _configsvrCommitChunkSplit: {skip: isAnInternalCommand},
+ _configsvrCommitMovePrimary: {skip: isAnInternalCommand},
+ _configsvrCreateCollection: {skip: isAnInternalCommand},
+ _configsvrCreateDatabase: {skip: isAnInternalCommand},
+ _configsvrDropCollection: {skip: isAnInternalCommand},
+ _configsvrDropDatabase: {skip: isAnInternalCommand},
+ _configsvrEnableSharding: {skip: isAnInternalCommand},
+ _configsvrMoveChunk: {skip: isAnInternalCommand},
+ _configsvrMovePrimary: {skip: isAnInternalCommand},
+ _configsvrRemoveShard: {skip: isAnInternalCommand},
+ _configsvrRemoveShardFromZone: {skip: isAnInternalCommand},
+ _configsvrShardCollection: {skip: isAnInternalCommand},
+ _configsvrUpdateZoneKeyRange: {skip: isAnInternalCommand},
+ _cpuProfilerStart: {skip: isAnInternalCommand},
+ _cpuProfilerStop: {skip: isAnInternalCommand},
+ _flushDatabaseCacheUpdates: {skip: isUnrelated},
+ _flushRoutingTableCacheUpdates: {skip: isUnrelated},
+ _getNextSessionMods: {skip: isAnInternalCommand},
+ _getUserCacheGeneration: {skip: isAnInternalCommand},
+ _hashBSONElement: {skip: isAnInternalCommand},
+ _isSelf: {skip: isAnInternalCommand},
+ _mergeAuthzCollections: {skip: isAnInternalCommand},
+ _migrateClone: {skip: isAnInternalCommand},
+ _movePrimary: {skip: isAnInternalCommand},
+ _recvChunkAbort: {skip: isAnInternalCommand},
+ _recvChunkCommit: {skip: isAnInternalCommand},
+ _recvChunkStart: {skip: isAnInternalCommand},
+ _recvChunkStatus: {skip: isAnInternalCommand},
+ _shardsvrShardCollection: {skip: isAnInternalCommand},
+ _transferMods: {skip: isAnInternalCommand},
+ abortTransaction: {skip: isUnrelated},
+ addShard: {skip: isUnrelated},
+ addShardToZone: {skip: isUnrelated},
+ aggregate: {command: {aggregate: "view", pipeline: [{$match: {}}], cursor: {}}},
+ appendOplogNote: {skip: isUnrelated},
+ applyOps: {
+ command: {applyOps: [{op: "i", o: {_id: 1}, ns: "test.view"}]},
+ expectFailure: true,
+ skipSharded: true,
+ },
+ authenticate: {skip: isUnrelated},
+ availableQueryOptions: {skip: isAnInternalCommand},
+ balancerStart: {skip: isUnrelated},
+ balancerStatus: {skip: isUnrelated},
+ balancerStop: {skip: isUnrelated},
+ buildInfo: {skip: isUnrelated},
+ captrunc: {
+ command: {captrunc: "view", n: 2, inc: false},
+ expectFailure: true,
+ },
+ checkShardingIndex: {skip: isUnrelated},
+ cleanupOrphaned: {
+ skip: "Tested in views/views_sharded.js",
+ },
+ clearLog: {skip: isUnrelated},
+ cloneCollection: {skip: "Tested in noPassthroughWithMongod/clonecollection.js"},
+ cloneCollectionAsCapped: {
+ command: {cloneCollectionAsCapped: "view", toCollection: "testcapped", size: 10240},
+ expectFailure: true,
+ },
+ collMod: {command: {collMod: "view", viewOn: "other", pipeline: []}},
+ collStats: {skip: "Tested in views/views_coll_stats.js"},
+ commitTransaction: {skip: isUnrelated},
+ compact: {command: {compact: "view", force: true}, expectFailure: true, skipSharded: true},
+ configureFailPoint: {skip: isUnrelated},
+ connPoolStats: {skip: isUnrelated},
+ connPoolSync: {skip: isUnrelated},
+ connectionStatus: {skip: isUnrelated},
+ convertToCapped: {command: {convertToCapped: "view", size: 12345}, expectFailure: true},
+ coordinateCommitTransaction: {skip: isUnrelated},
+ count: {command: {count: "view"}},
+ cpuload: {skip: isAnInternalCommand},
+ create: {skip: "tested in views/views_creation.js"},
+ createIndexes: {
+ command: {createIndexes: "view", indexes: [{key: {x: 1}, name: "x_1"}]},
+ expectFailure: true,
+ },
+ createRole: {
+ command: {createRole: "testrole", privileges: [], roles: []},
+ setup: function(conn) {
+ assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1}));
},
- createRole: {
- command: {createRole: "testrole", privileges: [], roles: []},
- setup: function(conn) {
- assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1}));
- },
- teardown: function(conn) {
- assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1}));
- }
- },
- createUser: {
- command: {createUser: "testuser", pwd: "testpass", roles: []},
- setup: function(conn) {
- assert.commandWorked(conn.runCommand({dropAllUsersFromDatabase: 1}));
- },
- teardown: function(conn) {
- assert.commandWorked(conn.runCommand({dropAllUsersFromDatabase: 1}));
- }
- },
- currentOp: {skip: isUnrelated},
- dataSize: {
- command: {dataSize: "test.view"},
- expectFailure: true,
- },
- dbCheck: {command: {dbCheck: "view"}, expectFailure: true},
- dbHash: {
- command: function(conn) {
- let getHash = function() {
- let cmd = {dbHash: 1};
- let res = conn.runCommand(cmd);
- assert.commandWorked(res, tojson(cmd));
- return res.collections["system.views"];
- };
- // The checksum below should change if we change the views, but not otherwise.
- let hash1 = getHash();
- assert.commandWorked(conn.runCommand({create: "view2", viewOn: "view"}),
- "could not create view 'view2' on 'view'");
- let hash2 = getHash();
- assert.neq(hash1, hash2, "expected hash to change after creating new view");
- assert.commandWorked(conn.runCommand({drop: "view2"}), "problem dropping view2");
- let hash3 = getHash();
- assert.eq(hash1, hash3, "hash should be the same again after removing 'view2'");
- }
- },
- dbStats: {skip: "TODO(SERVER-25948)"},
- delete: {command: {delete: "view", deletes: [{q: {x: 1}, limit: 1}]}, expectFailure: true},
- distinct: {command: {distinct: "view", key: "_id"}},
- doTxn: {
- command: {
- doTxn: [{op: "i", o: {_id: 1}, ns: "test.view"}],
- txnNumber: NumberLong("0"),
- lsid: {id: UUID()}
- },
- expectFailure: true,
- expectedErrorCode: [
- ErrorCodes.CommandNotSupportedOnView,
- ErrorCodes.CommandNotSupported,
- ErrorCodes.IllegalOperation
- ],
- skipSharded: true,
- },
- driverOIDTest: {skip: isUnrelated},
- drop: {command: {drop: "view"}},
- dropAllRolesFromDatabase: {skip: isUnrelated},
- dropAllUsersFromDatabase: {skip: isUnrelated},
- dropConnections: {skip: isUnrelated},
- dropDatabase: {command: {dropDatabase: 1}},
- dropIndexes: {command: {dropIndexes: "view"}, expectFailure: true},
- dropRole: {
- command: {dropRole: "testrole"},
- setup: function(conn) {
- assert.commandWorked(
- conn.runCommand({createRole: "testrole", privileges: [], roles: []}));
- },
- teardown: function(conn) {
- assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1}));
- }
- },
- dropUser: {skip: isUnrelated},
- echo: {skip: isUnrelated},
- emptycapped: {
- command: {emptycapped: "view"},
- expectFailure: true,
+ teardown: function(conn) {
+ assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1}));
+ }
+ },
+ createUser: {
+ command: {createUser: "testuser", pwd: "testpass", roles: []},
+ setup: function(conn) {
+ assert.commandWorked(conn.runCommand({dropAllUsersFromDatabase: 1}));
},
- enableSharding: {skip: "Tested as part of shardCollection"},
- endSessions: {skip: isUnrelated},
- explain: {command: {explain: {count: "view"}}},
- features: {skip: isUnrelated},
- filemd5: {skip: isUnrelated},
- find: {skip: "tested in views/views_find.js & views/views_sharded.js"},
- findAndModify: {
- command: {findAndModify: "view", query: {a: 1}, update: {$set: {a: 2}}},
- expectFailure: true
+ teardown: function(conn) {
+ assert.commandWorked(conn.runCommand({dropAllUsersFromDatabase: 1}));
+ }
+ },
+ currentOp: {skip: isUnrelated},
+ dataSize: {
+ command: {dataSize: "test.view"},
+ expectFailure: true,
+ },
+ dbCheck: {command: {dbCheck: "view"}, expectFailure: true},
+ dbHash: {
+ command: function(conn) {
+ let getHash = function() {
+ let cmd = {dbHash: 1};
+ let res = conn.runCommand(cmd);
+ assert.commandWorked(res, tojson(cmd));
+ return res.collections["system.views"];
+ };
+ // The checksum below should change if we change the views, but not otherwise.
+ let hash1 = getHash();
+ assert.commandWorked(conn.runCommand({create: "view2", viewOn: "view"}),
+ "could not create view 'view2' on 'view'");
+ let hash2 = getHash();
+ assert.neq(hash1, hash2, "expected hash to change after creating new view");
+ assert.commandWorked(conn.runCommand({drop: "view2"}), "problem dropping view2");
+ let hash3 = getHash();
+ assert.eq(hash1, hash3, "hash should be the same again after removing 'view2'");
+ }
+ },
+ dbStats: {skip: "TODO(SERVER-25948)"},
+ delete: {command: {delete: "view", deletes: [{q: {x: 1}, limit: 1}]}, expectFailure: true},
+ distinct: {command: {distinct: "view", key: "_id"}},
+ doTxn: {
+ command: {
+ doTxn: [{op: "i", o: {_id: 1}, ns: "test.view"}],
+ txnNumber: NumberLong("0"),
+ lsid: {id: UUID()}
},
- flushRouterConfig: {skip: isUnrelated},
- fsync: {skip: isUnrelated},
- fsyncUnlock: {skip: isUnrelated},
- getDatabaseVersion: {skip: isUnrelated},
- geoSearch: {
- command: {
- geoSearch: "view",
- search: {},
- near: [-50, 37],
- },
- expectFailure: true
+ expectFailure: true,
+ expectedErrorCode: [
+ ErrorCodes.CommandNotSupportedOnView,
+ ErrorCodes.CommandNotSupported,
+ ErrorCodes.IllegalOperation
+ ],
+ skipSharded: true,
+ },
+ driverOIDTest: {skip: isUnrelated},
+ drop: {command: {drop: "view"}},
+ dropAllRolesFromDatabase: {skip: isUnrelated},
+ dropAllUsersFromDatabase: {skip: isUnrelated},
+ dropConnections: {skip: isUnrelated},
+ dropDatabase: {command: {dropDatabase: 1}},
+ dropIndexes: {command: {dropIndexes: "view"}, expectFailure: true},
+ dropRole: {
+ command: {dropRole: "testrole"},
+ setup: function(conn) {
+ assert.commandWorked(
+ conn.runCommand({createRole: "testrole", privileges: [], roles: []}));
},
- getCmdLineOpts: {skip: isUnrelated},
- getDiagnosticData: {skip: isUnrelated},
- getFreeMonitoringStatus: {skip: isUnrelated},
- getLastError: {skip: isUnrelated},
- getLog: {skip: isUnrelated},
- getMore: {
- setup: function(conn) {
- assert.writeOK(conn.collection.remove({}));
- assert.writeOK(conn.collection.insert([{_id: 1}, {_id: 2}, {_id: 3}]));
- },
- command: function(conn) {
- function testGetMoreForCommand(cmd) {
- let res = conn.runCommand(cmd);
- assert.commandWorked(res, tojson(cmd));
- let cursor = res.cursor;
- assert.eq(cursor.ns,
- "test.view",
- "expected view namespace in cursor: " + tojson(cursor));
- let expectedFirstBatch = [{_id: 1}, {_id: 2}];
- assert.eq(cursor.firstBatch, expectedFirstBatch, "returned wrong firstBatch");
- let getmoreCmd = {getMore: cursor.id, collection: "view"};
- res = conn.runCommand(getmoreCmd);
-
- assert.commandWorked(res, tojson(getmoreCmd));
- assert.eq("test.view",
- res.cursor.ns,
- "expected view namespace in cursor: " + tojson(res));
- }
- // find command.
- let findCmd = {find: "view", filter: {_id: {$gt: 0}}, batchSize: 2};
- testGetMoreForCommand(findCmd);
-
- // aggregate command.
- let aggCmd = {
- aggregate: "view",
- pipeline: [{$match: {_id: {$gt: 0}}}],
- cursor: {batchSize: 2}
- };
- testGetMoreForCommand(aggCmd);
- }
+ teardown: function(conn) {
+ assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1}));
+ }
+ },
+ dropUser: {skip: isUnrelated},
+ echo: {skip: isUnrelated},
+ emptycapped: {
+ command: {emptycapped: "view"},
+ expectFailure: true,
+ },
+ enableSharding: {skip: "Tested as part of shardCollection"},
+ endSessions: {skip: isUnrelated},
+ explain: {command: {explain: {count: "view"}}},
+ features: {skip: isUnrelated},
+ filemd5: {skip: isUnrelated},
+ find: {skip: "tested in views/views_find.js & views/views_sharded.js"},
+ findAndModify: {
+ command: {findAndModify: "view", query: {a: 1}, update: {$set: {a: 2}}},
+ expectFailure: true
+ },
+ flushRouterConfig: {skip: isUnrelated},
+ fsync: {skip: isUnrelated},
+ fsyncUnlock: {skip: isUnrelated},
+ getDatabaseVersion: {skip: isUnrelated},
+ geoSearch: {
+ command: {
+ geoSearch: "view",
+ search: {},
+ near: [-50, 37],
},
- getParameter: {skip: isUnrelated},
- getShardMap: {skip: isUnrelated},
- getShardVersion: {
- command: {getShardVersion: "test.view"},
- isAdminCommand: true,
- expectFailure: true,
- skipSharded: true, // mongos is tested in views/views_sharded.js
+ expectFailure: true
+ },
+ getCmdLineOpts: {skip: isUnrelated},
+ getDiagnosticData: {skip: isUnrelated},
+ getFreeMonitoringStatus: {skip: isUnrelated},
+ getLastError: {skip: isUnrelated},
+ getLog: {skip: isUnrelated},
+ getMore: {
+ setup: function(conn) {
+ assert.writeOK(conn.collection.remove({}));
+ assert.writeOK(conn.collection.insert([{_id: 1}, {_id: 2}, {_id: 3}]));
},
- getnonce: {skip: isUnrelated},
- godinsert: {skip: isAnInternalCommand},
- grantPrivilegesToRole: {skip: "tested in auth/commands_user_defined_roles.js"},
- grantRolesToRole: {skip: isUnrelated},
- grantRolesToUser: {skip: isUnrelated},
- handshake: {skip: isUnrelated},
- hostInfo: {skip: isUnrelated},
- httpClientRequest: {skip: isAnInternalCommand},
- insert: {command: {insert: "view", documents: [{x: 1}]}, expectFailure: true},
- invalidateUserCache: {skip: isUnrelated},
- isdbgrid: {skip: isUnrelated},
- isMaster: {skip: isUnrelated},
- killCursors: {
- setup: function(conn) {
- assert.writeOK(conn.collection.remove({}));
- assert.writeOK(conn.collection.insert([{_id: 1}, {_id: 2}, {_id: 3}]));
- },
- command: function(conn) {
- // First get and check a partial result for an aggregate command.
- let aggCmd = {
- aggregate: "view",
- pipeline: [{$sort: {_id: 1}}],
- cursor: {batchSize: 2}
- };
- let res = conn.runCommand(aggCmd);
- assert.commandWorked(res, tojson(aggCmd));
+ command: function(conn) {
+ function testGetMoreForCommand(cmd) {
+ let res = conn.runCommand(cmd);
+ assert.commandWorked(res, tojson(cmd));
let cursor = res.cursor;
assert.eq(
cursor.ns, "test.view", "expected view namespace in cursor: " + tojson(cursor));
let expectedFirstBatch = [{_id: 1}, {_id: 2}];
- assert.eq(
- cursor.firstBatch, expectedFirstBatch, "aggregate returned wrong firstBatch");
+ assert.eq(cursor.firstBatch, expectedFirstBatch, "returned wrong firstBatch");
+ let getmoreCmd = {getMore: cursor.id, collection: "view"};
+ res = conn.runCommand(getmoreCmd);
- // Then check correct execution of the killCursors command.
- let killCursorsCmd = {killCursors: "view", cursors: [cursor.id]};
- res = conn.runCommand(killCursorsCmd);
- assert.commandWorked(res, tojson(killCursorsCmd));
- let expectedRes = {
- cursorsKilled: [cursor.id],
- cursorsNotFound: [],
- cursorsAlive: [],
- cursorsUnknown: [],
- ok: 1
- };
- delete res.operationTime;
- delete res.$clusterTime;
- assert.eq(expectedRes, res, "unexpected result for: " + tojson(killCursorsCmd));
+ assert.commandWorked(res, tojson(getmoreCmd));
+ assert.eq("test.view",
+ res.cursor.ns,
+ "expected view namespace in cursor: " + tojson(res));
}
+ // find command.
+ let findCmd = {find: "view", filter: {_id: {$gt: 0}}, batchSize: 2};
+ testGetMoreForCommand(findCmd);
+
+ // aggregate command.
+ let aggCmd = {
+ aggregate: "view",
+ pipeline: [{$match: {_id: {$gt: 0}}}],
+ cursor: {batchSize: 2}
+ };
+ testGetMoreForCommand(aggCmd);
+ }
+ },
+ getParameter: {skip: isUnrelated},
+ getShardMap: {skip: isUnrelated},
+ getShardVersion: {
+ command: {getShardVersion: "test.view"},
+ isAdminCommand: true,
+ expectFailure: true,
+ skipSharded: true, // mongos is tested in views/views_sharded.js
+ },
+ getnonce: {skip: isUnrelated},
+ godinsert: {skip: isAnInternalCommand},
+ grantPrivilegesToRole: {skip: "tested in auth/commands_user_defined_roles.js"},
+ grantRolesToRole: {skip: isUnrelated},
+ grantRolesToUser: {skip: isUnrelated},
+ handshake: {skip: isUnrelated},
+ hostInfo: {skip: isUnrelated},
+ httpClientRequest: {skip: isAnInternalCommand},
+ insert: {command: {insert: "view", documents: [{x: 1}]}, expectFailure: true},
+ invalidateUserCache: {skip: isUnrelated},
+ isdbgrid: {skip: isUnrelated},
+ isMaster: {skip: isUnrelated},
+ killCursors: {
+ setup: function(conn) {
+ assert.writeOK(conn.collection.remove({}));
+ assert.writeOK(conn.collection.insert([{_id: 1}, {_id: 2}, {_id: 3}]));
},
- killOp: {skip: isUnrelated},
- killSessions: {skip: isUnrelated},
- killAllSessions: {skip: isUnrelated},
- killAllSessionsByPattern: {skip: isUnrelated},
- listCollections: {skip: "tested in views/views_creation.js"},
- listCommands: {skip: isUnrelated},
- listDatabases: {skip: isUnrelated},
- listIndexes: {command: {listIndexes: "view"}, expectFailure: true},
- listShards: {skip: isUnrelated},
- lockInfo: {skip: isUnrelated},
- logApplicationMessage: {skip: isUnrelated},
- logRotate: {skip: isUnrelated},
- logout: {skip: isUnrelated},
- makeSnapshot: {skip: isAnInternalCommand},
- mapReduce: {
- command:
- {mapReduce: "view", map: function() {}, reduce: function(key, vals) {}, out: "out"},
- expectFailure: true
- },
- "mapreduce.shardedfinish": {skip: isAnInternalCommand},
- mergeChunks: {
- command: {mergeChunks: "test.view", bounds: [{x: 0}, {x: 10}]},
- skipStandalone: true,
+ command: function(conn) {
+ // First get and check a partial result for an aggregate command.
+ let aggCmd = {aggregate: "view", pipeline: [{$sort: {_id: 1}}], cursor: {batchSize: 2}};
+ let res = conn.runCommand(aggCmd);
+ assert.commandWorked(res, tojson(aggCmd));
+ let cursor = res.cursor;
+ assert.eq(
+ cursor.ns, "test.view", "expected view namespace in cursor: " + tojson(cursor));
+ let expectedFirstBatch = [{_id: 1}, {_id: 2}];
+ assert.eq(cursor.firstBatch, expectedFirstBatch, "aggregate returned wrong firstBatch");
+
+ // Then check correct execution of the killCursors command.
+ let killCursorsCmd = {killCursors: "view", cursors: [cursor.id]};
+ res = conn.runCommand(killCursorsCmd);
+ assert.commandWorked(res, tojson(killCursorsCmd));
+ let expectedRes = {
+ cursorsKilled: [cursor.id],
+ cursorsNotFound: [],
+ cursorsAlive: [],
+ cursorsUnknown: [],
+ ok: 1
+ };
+ delete res.operationTime;
+ delete res.$clusterTime;
+ assert.eq(expectedRes, res, "unexpected result for: " + tojson(killCursorsCmd));
+ }
+ },
+ killOp: {skip: isUnrelated},
+ killSessions: {skip: isUnrelated},
+ killAllSessions: {skip: isUnrelated},
+ killAllSessionsByPattern: {skip: isUnrelated},
+ listCollections: {skip: "tested in views/views_creation.js"},
+ listCommands: {skip: isUnrelated},
+ listDatabases: {skip: isUnrelated},
+ listIndexes: {command: {listIndexes: "view"}, expectFailure: true},
+ listShards: {skip: isUnrelated},
+ lockInfo: {skip: isUnrelated},
+ logApplicationMessage: {skip: isUnrelated},
+ logRotate: {skip: isUnrelated},
+ logout: {skip: isUnrelated},
+ makeSnapshot: {skip: isAnInternalCommand},
+ mapReduce: {
+ command:
+ {mapReduce: "view", map: function() {}, reduce: function(key, vals) {}, out: "out"},
+ expectFailure: true
+ },
+ "mapreduce.shardedfinish": {skip: isAnInternalCommand},
+ mergeChunks: {
+ command: {mergeChunks: "test.view", bounds: [{x: 0}, {x: 10}]},
+ skipStandalone: true,
+ isAdminCommand: true,
+ expectFailure: true,
+ expectedErrorCode: ErrorCodes.NamespaceNotSharded,
+ },
+ moveChunk: {
+ command: {moveChunk: "test.view"},
+ skipStandalone: true,
+ isAdminCommand: true,
+ expectFailure: true,
+ expectedErrorCode: ErrorCodes.NamespaceNotSharded,
+ },
+ movePrimary: {skip: "Tested in sharding/movePrimary1.js"},
+ multicast: {skip: isUnrelated},
+ netstat: {skip: isAnInternalCommand},
+ ping: {command: {ping: 1}},
+ planCacheClear: {command: {planCacheClear: "view"}, expectFailure: true},
+ planCacheClearFilters: {command: {planCacheClearFilters: "view"}, expectFailure: true},
+ planCacheListFilters: {command: {planCacheListFilters: "view"}, expectFailure: true},
+ planCacheListPlans: {command: {planCacheListPlans: "view"}, expectFailure: true},
+ planCacheListQueryShapes: {command: {planCacheListQueryShapes: "view"}, expectFailure: true},
+ planCacheSetFilter: {command: {planCacheSetFilter: "view"}, expectFailure: true},
+ prepareTransaction: {skip: isUnrelated},
+ profile: {skip: isUnrelated},
+ refreshLogicalSessionCacheNow: {skip: isAnInternalCommand},
+ reapLogicalSessionCacheNow: {skip: isAnInternalCommand},
+ refreshSessions: {skip: isUnrelated},
+ restartCatalog: {skip: isAnInternalCommand},
+ reIndex: {command: {reIndex: "view"}, expectFailure: true},
+ removeShard: {skip: isUnrelated},
+ removeShardFromZone: {skip: isUnrelated},
+ renameCollection: [
+ {
isAdminCommand: true,
+ command: {renameCollection: "test.view", to: "test.otherview"},
expectFailure: true,
- expectedErrorCode: ErrorCodes.NamespaceNotSharded,
+ skipSharded: true,
},
- moveChunk: {
- command: {moveChunk: "test.view"},
- skipStandalone: true,
+ {
isAdminCommand: true,
+ command: {renameCollection: "test.collection", to: "test.view"},
expectFailure: true,
- expectedErrorCode: ErrorCodes.NamespaceNotSharded,
- },
- movePrimary: {skip: "Tested in sharding/movePrimary1.js"},
- multicast: {skip: isUnrelated},
- netstat: {skip: isAnInternalCommand},
- ping: {command: {ping: 1}},
- planCacheClear: {command: {planCacheClear: "view"}, expectFailure: true},
- planCacheClearFilters: {command: {planCacheClearFilters: "view"}, expectFailure: true},
- planCacheListFilters: {command: {planCacheListFilters: "view"}, expectFailure: true},
- planCacheListPlans: {command: {planCacheListPlans: "view"}, expectFailure: true},
- planCacheListQueryShapes:
- {command: {planCacheListQueryShapes: "view"}, expectFailure: true},
- planCacheSetFilter: {command: {planCacheSetFilter: "view"}, expectFailure: true},
- prepareTransaction: {skip: isUnrelated},
- profile: {skip: isUnrelated},
- refreshLogicalSessionCacheNow: {skip: isAnInternalCommand},
- reapLogicalSessionCacheNow: {skip: isAnInternalCommand},
- refreshSessions: {skip: isUnrelated},
- restartCatalog: {skip: isAnInternalCommand},
- reIndex: {command: {reIndex: "view"}, expectFailure: true},
- removeShard: {skip: isUnrelated},
- removeShardFromZone: {skip: isUnrelated},
- renameCollection: [
- {
- isAdminCommand: true,
- command: {renameCollection: "test.view", to: "test.otherview"},
- expectFailure: true,
- skipSharded: true,
- },
- {
- isAdminCommand: true,
- command: {renameCollection: "test.collection", to: "test.view"},
- expectFailure: true,
- expectedErrorCode: ErrorCodes.NamespaceExists,
- skipSharded: true,
- }
- ],
- repairCursor: {command: {repairCursor: "view"}, expectFailure: true},
- repairDatabase: {skip: isUnrelated},
- replSetAbortPrimaryCatchUp: {skip: isUnrelated},
- replSetFreeze: {skip: isUnrelated},
- replSetGetConfig: {skip: isUnrelated},
- replSetGetRBID: {skip: isUnrelated},
- replSetGetStatus: {skip: isUnrelated},
- replSetHeartbeat: {skip: isUnrelated},
- replSetInitiate: {skip: isUnrelated},
- replSetMaintenance: {skip: isUnrelated},
- replSetReconfig: {skip: isUnrelated},
- replSetRequestVotes: {skip: isUnrelated},
- replSetStepDown: {skip: isUnrelated},
- replSetStepUp: {skip: isUnrelated},
- replSetSyncFrom: {skip: isUnrelated},
- replSetTest: {skip: isUnrelated},
- replSetUpdatePosition: {skip: isUnrelated},
- replSetResizeOplog: {skip: isUnrelated},
- resetError: {skip: isUnrelated},
- revokePrivilegesFromRole: {
- command: {
- revokePrivilegesFromRole: "testrole",
- privileges: [{resource: {db: "test", collection: "view"}, actions: ["find"]}]
- },
- setup: function(conn) {
- assert.commandWorked(
- conn.runCommand({createRole: "testrole", privileges: [], roles: []}));
- },
- teardown: function(conn) {
- assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1}));
- }
- },
- revokeRolesFromRole: {skip: isUnrelated},
- revokeRolesFromUser: {skip: isUnrelated},
- rolesInfo: {skip: isUnrelated},
- saslContinue: {skip: isUnrelated},
- saslStart: {skip: isUnrelated},
- serverStatus: {command: {serverStatus: 1}, skip: isUnrelated},
- setIndexCommitQuorum: {skip: isUnrelated},
- setCommittedSnapshot: {skip: isAnInternalCommand},
- setFeatureCompatibilityVersion: {skip: isUnrelated},
- setFreeMonitoring: {skip: isUnrelated},
- setParameter: {skip: isUnrelated},
- setShardVersion: {skip: isUnrelated},
- shardCollection: {
- command: {shardCollection: "test.view", key: {_id: 1}},
- setup: function(conn) {
- assert.commandWorked(conn.adminCommand({enableSharding: "test"}));
- },
- skipStandalone: true,
- expectFailure: true,
- isAdminCommand: true,
+ expectedErrorCode: ErrorCodes.NamespaceExists,
+ skipSharded: true,
+ }
+ ],
+ repairCursor: {command: {repairCursor: "view"}, expectFailure: true},
+ repairDatabase: {skip: isUnrelated},
+ replSetAbortPrimaryCatchUp: {skip: isUnrelated},
+ replSetFreeze: {skip: isUnrelated},
+ replSetGetConfig: {skip: isUnrelated},
+ replSetGetRBID: {skip: isUnrelated},
+ replSetGetStatus: {skip: isUnrelated},
+ replSetHeartbeat: {skip: isUnrelated},
+ replSetInitiate: {skip: isUnrelated},
+ replSetMaintenance: {skip: isUnrelated},
+ replSetReconfig: {skip: isUnrelated},
+ replSetRequestVotes: {skip: isUnrelated},
+ replSetStepDown: {skip: isUnrelated},
+ replSetStepUp: {skip: isUnrelated},
+ replSetSyncFrom: {skip: isUnrelated},
+ replSetTest: {skip: isUnrelated},
+ replSetUpdatePosition: {skip: isUnrelated},
+ replSetResizeOplog: {skip: isUnrelated},
+ resetError: {skip: isUnrelated},
+ revokePrivilegesFromRole: {
+ command: {
+ revokePrivilegesFromRole: "testrole",
+ privileges: [{resource: {db: "test", collection: "view"}, actions: ["find"]}]
},
- shardConnPoolStats: {skip: isUnrelated},
- shardingState: {skip: isUnrelated},
- shutdown: {skip: isUnrelated},
- sleep: {skip: isUnrelated},
- split: {
- command: {split: "test.view", find: {_id: 1}},
- skipStandalone: true,
- expectFailure: true,
- expectedErrorCode: ErrorCodes.NamespaceNotSharded,
- isAdminCommand: true,
+ setup: function(conn) {
+ assert.commandWorked(
+ conn.runCommand({createRole: "testrole", privileges: [], roles: []}));
},
- splitChunk: {
- command: {
- splitChunk: "test.view",
- from: "shard0000",
- min: {x: MinKey},
- max: {x: 0},
- keyPattern: {x: 1},
- splitKeys: [{x: -2}, {x: -1}],
- shardVersion: [Timestamp(1, 2), ObjectId()]
- },
- skipSharded: true,
- expectFailure: true,
- expectedErrorCode: 193,
- isAdminCommand: true,
+ teardown: function(conn) {
+ assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1}));
+ }
+ },
+ revokeRolesFromRole: {skip: isUnrelated},
+ revokeRolesFromUser: {skip: isUnrelated},
+ rolesInfo: {skip: isUnrelated},
+ saslContinue: {skip: isUnrelated},
+ saslStart: {skip: isUnrelated},
+ serverStatus: {command: {serverStatus: 1}, skip: isUnrelated},
+ setIndexCommitQuorum: {skip: isUnrelated},
+ setCommittedSnapshot: {skip: isAnInternalCommand},
+ setFeatureCompatibilityVersion: {skip: isUnrelated},
+ setFreeMonitoring: {skip: isUnrelated},
+ setParameter: {skip: isUnrelated},
+ setShardVersion: {skip: isUnrelated},
+ shardCollection: {
+ command: {shardCollection: "test.view", key: {_id: 1}},
+ setup: function(conn) {
+ assert.commandWorked(conn.adminCommand({enableSharding: "test"}));
},
- splitVector: {
- command: {
- splitVector: "test.view",
- keyPattern: {x: 1},
- maxChunkSize: 1,
- },
- expectFailure: true,
+ skipStandalone: true,
+ expectFailure: true,
+ isAdminCommand: true,
+ },
+ shardConnPoolStats: {skip: isUnrelated},
+ shardingState: {skip: isUnrelated},
+ shutdown: {skip: isUnrelated},
+ sleep: {skip: isUnrelated},
+ split: {
+ command: {split: "test.view", find: {_id: 1}},
+ skipStandalone: true,
+ expectFailure: true,
+ expectedErrorCode: ErrorCodes.NamespaceNotSharded,
+ isAdminCommand: true,
+ },
+ splitChunk: {
+ command: {
+ splitChunk: "test.view",
+ from: "shard0000",
+ min: {x: MinKey},
+ max: {x: 0},
+ keyPattern: {x: 1},
+ splitKeys: [{x: -2}, {x: -1}],
+ shardVersion: [Timestamp(1, 2), ObjectId()]
},
- stageDebug: {skip: isAnInternalCommand},
- startRecordingTraffic: {skip: isUnrelated},
- startSession: {skip: isAnInternalCommand},
- stopRecordingTraffic: {skip: isUnrelated},
- top: {skip: "tested in views/views_stats.js"},
- touch: {
- command: {touch: "view", data: true},
- expectFailure: true,
+ skipSharded: true,
+ expectFailure: true,
+ expectedErrorCode: 193,
+ isAdminCommand: true,
+ },
+ splitVector: {
+ command: {
+ splitVector: "test.view",
+ keyPattern: {x: 1},
+ maxChunkSize: 1,
},
- twoPhaseCreateIndexes: {
- command: {twoPhaseCreateIndexes: "view", indexes: [{key: {x: 1}, name: "x_1"}]},
- expectFailure: true,
+ expectFailure: true,
+ },
+ stageDebug: {skip: isAnInternalCommand},
+ startRecordingTraffic: {skip: isUnrelated},
+ startSession: {skip: isAnInternalCommand},
+ stopRecordingTraffic: {skip: isUnrelated},
+ top: {skip: "tested in views/views_stats.js"},
+ touch: {
+ command: {touch: "view", data: true},
+ expectFailure: true,
+ },
+ twoPhaseCreateIndexes: {
+ command: {twoPhaseCreateIndexes: "view", indexes: [{key: {x: 1}, name: "x_1"}]},
+ expectFailure: true,
+ },
+ unsetSharding: {skip: isAnInternalCommand},
+ update: {command: {update: "view", updates: [{q: {x: 1}, u: {x: 2}}]}, expectFailure: true},
+ updateRole: {
+ command: {
+ updateRole: "testrole",
+ privileges: [{resource: {db: "test", collection: "view"}, actions: ["find"]}]
},
- unsetSharding: {skip: isAnInternalCommand},
- update: {command: {update: "view", updates: [{q: {x: 1}, u: {x: 2}}]}, expectFailure: true},
- updateRole: {
- command: {
- updateRole: "testrole",
- privileges: [{resource: {db: "test", collection: "view"}, actions: ["find"]}]
- },
- setup: function(conn) {
- assert.commandWorked(
- conn.runCommand({createRole: "testrole", privileges: [], roles: []}));
- },
- teardown: function(conn) {
- assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1}));
- }
+ setup: function(conn) {
+ assert.commandWorked(
+ conn.runCommand({createRole: "testrole", privileges: [], roles: []}));
},
- updateUser: {skip: isUnrelated},
- updateZoneKeyRange: {skip: isUnrelated},
- usersInfo: {skip: isUnrelated},
- validate: {command: {validate: "view"}, expectFailure: true},
- waitForOngoingChunkSplits: {skip: isUnrelated},
- voteCommitIndexBuild: {skip: isUnrelated},
- voteCommitTransaction: {skip: isUnrelated},
- voteAbortTransaction: {skip: isUnrelated},
- whatsmyuri: {skip: isUnrelated}
- };
+ teardown: function(conn) {
+ assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1}));
+ }
+ },
+ updateUser: {skip: isUnrelated},
+ updateZoneKeyRange: {skip: isUnrelated},
+ usersInfo: {skip: isUnrelated},
+ validate: {command: {validate: "view"}, expectFailure: true},
+ waitForOngoingChunkSplits: {skip: isUnrelated},
+ voteCommitIndexBuild: {skip: isUnrelated},
+ voteCommitTransaction: {skip: isUnrelated},
+ voteAbortTransaction: {skip: isUnrelated},
+ whatsmyuri: {skip: isUnrelated}
+};
- /**
- * Helper function for failing commands or writes that checks the result 'res' of either.
- * If 'code' is null we only check for failure, otherwise we confirm error code matches as
- * well. On assert 'msg' is printed.
- */
- let assertCommandOrWriteFailed = function(res, code, msg) {
- if (res.writeErrors !== undefined)
- assert.neq(0, res.writeErrors.length, msg);
- else if (res.code !== null)
- assert.commandFailedWithCode(res, code, msg);
- else
- assert.commandFailed(res, msg);
- };
+/**
+ * Helper function for failing commands or writes that checks the result 'res' of either.
+ * If 'code' is null we only check for failure, otherwise we confirm error code matches as
+ * well. On assert 'msg' is printed.
+ */
+let assertCommandOrWriteFailed = function(res, code, msg) {
+ if (res.writeErrors !== undefined)
+ assert.neq(0, res.writeErrors.length, msg);
+ else if (res.code !== null)
+ assert.commandFailedWithCode(res, code, msg);
+ else
+ assert.commandFailed(res, msg);
+};
- // Are we on a mongos?
- var isMaster = db.runCommand("ismaster");
- assert.commandWorked(isMaster);
- var isMongos = (isMaster.msg === "isdbgrid");
+// Are we on a mongos?
+var isMaster = db.runCommand("ismaster");
+assert.commandWorked(isMaster);
+var isMongos = (isMaster.msg === "isdbgrid");
- // Obtain a list of all commands.
- let res = db.runCommand({listCommands: 1});
- assert.commandWorked(res);
+// Obtain a list of all commands.
+let res = db.runCommand({listCommands: 1});
+assert.commandWorked(res);
- let commands = Object.keys(res.commands);
- for (let command of commands) {
- let test = viewsCommandTests[command];
- assert(test !== undefined,
- "Coverage failure: must explicitly define a views test for " + command);
+let commands = Object.keys(res.commands);
+for (let command of commands) {
+ let test = viewsCommandTests[command];
+ assert(test !== undefined,
+ "Coverage failure: must explicitly define a views test for " + command);
- if (!(test instanceof Array))
- test = [test];
- let subtest_nr = 0;
- for (let subtest of test) {
- // Tests can be explicitly skipped. Print the name of the skipped test, as well as
- // the reason why.
- if (subtest.skip !== undefined) {
- print("Skipping " + command + ": " + subtest.skip);
- continue;
- }
+ if (!(test instanceof Array))
+ test = [test];
+ let subtest_nr = 0;
+ for (let subtest of test) {
+ // Tests can be explicitly skipped. Print the name of the skipped test, as well as
+ // the reason why.
+ if (subtest.skip !== undefined) {
+ print("Skipping " + command + ": " + subtest.skip);
+ continue;
+ }
- let dbHandle = db.getSiblingDB("test");
- let commandHandle = dbHandle;
+ let dbHandle = db.getSiblingDB("test");
+ let commandHandle = dbHandle;
- // Skip tests depending on sharding configuration.
- if (subtest.skipSharded && isMongos) {
- print("Skipping " + command + ": not applicable to mongoS");
- continue;
- }
+ // Skip tests depending on sharding configuration.
+ if (subtest.skipSharded && isMongos) {
+ print("Skipping " + command + ": not applicable to mongoS");
+ continue;
+ }
- if (subtest.skipStandalone && !isMongos) {
- print("Skipping " + command + ": not applicable to mongoD");
- continue;
- }
+ if (subtest.skipStandalone && !isMongos) {
+ print("Skipping " + command + ": not applicable to mongoD");
+ continue;
+ }
- // Perform test setup, and call any additional setup callbacks provided by the test.
- // All tests assume that there exists a view named 'view' that is backed by
- // 'collection'.
- assert.commandWorked(dbHandle.dropDatabase());
- assert.commandWorked(dbHandle.runCommand({create: "view", viewOn: "collection"}));
- assert.writeOK(dbHandle.collection.insert({x: 1}));
- if (subtest.setup !== undefined)
- subtest.setup(dbHandle);
+ // Perform test setup, and call any additional setup callbacks provided by the test.
+ // All tests assume that there exists a view named 'view' that is backed by
+ // 'collection'.
+ assert.commandWorked(dbHandle.dropDatabase());
+ assert.commandWorked(dbHandle.runCommand({create: "view", viewOn: "collection"}));
+ assert.writeOK(dbHandle.collection.insert({x: 1}));
+ if (subtest.setup !== undefined)
+ subtest.setup(dbHandle);
- // Execute the command. Print the command name for the first subtest, as otherwise
- // it may be hard to figure out what command caused a failure.
- if (!subtest_nr++)
- print("Testing " + command);
+ // Execute the command. Print the command name for the first subtest, as otherwise
+ // it may be hard to figure out what command caused a failure.
+ if (!subtest_nr++)
+ print("Testing " + command);
- if (subtest.isAdminCommand)
- commandHandle = db.getSiblingDB("admin");
+ if (subtest.isAdminCommand)
+ commandHandle = db.getSiblingDB("admin");
- if (subtest.expectFailure) {
- let expectedErrorCode = subtest.expectedErrorCode;
- if (expectedErrorCode === undefined)
- expectedErrorCode = ErrorCodes.CommandNotSupportedOnView;
+ if (subtest.expectFailure) {
+ let expectedErrorCode = subtest.expectedErrorCode;
+ if (expectedErrorCode === undefined)
+ expectedErrorCode = ErrorCodes.CommandNotSupportedOnView;
- assertCommandOrWriteFailed(commandHandle.runCommand(subtest.command),
- expectedErrorCode,
- tojson(subtest.command));
- } else if (subtest.command instanceof Function)
- subtest.command(commandHandle);
- else
- assert.commandWorked(commandHandle.runCommand(subtest.command),
- tojson(subtest.command));
+ assertCommandOrWriteFailed(commandHandle.runCommand(subtest.command),
+ expectedErrorCode,
+ tojson(subtest.command));
+ } else if (subtest.command instanceof Function)
+ subtest.command(commandHandle);
+ else
+ assert.commandWorked(commandHandle.runCommand(subtest.command),
+ tojson(subtest.command));
- if (subtest.teardown !== undefined)
- subtest.teardown(dbHandle);
- }
+ if (subtest.teardown !== undefined)
+ subtest.teardown(dbHandle);
}
+}
}());
diff --git a/jstests/core/views/views_basic.js b/jstests/core/views/views_basic.js
index 18bd486df8f..1186dbcd779 100644
--- a/jstests/core/views/views_basic.js
+++ b/jstests/core/views/views_basic.js
@@ -1,54 +1,53 @@
// Tests basic functionality of read-only, non-materialized views.
(function() {
- "use strict";
-
- // For arrayEq.
- load("jstests/aggregation/extras/utils.js");
-
- let viewsDB = db.getSiblingDB("views_basic");
- assert.commandWorked(viewsDB.dropDatabase());
-
- let assertCmdResultEq = function(cmd, expected) {
- let res = viewsDB.runCommand(cmd);
- assert.commandWorked(res);
-
- let cursor = new DBCommandCursor(db, res, 5);
- let actual = cursor.toArray();
- assert(arrayEq(actual, expected),
- "actual: " + tojson(cursor.toArray()) + ", expected:" + tojson(expected));
- };
-
- // Insert some control documents.
- let coll = viewsDB.getCollection("collection");
- let bulk = coll.initializeUnorderedBulkOp();
- bulk.insert({_id: "New York", state: "NY", pop: 7});
- bulk.insert({_id: "Oakland", state: "CA", pop: 3});
- bulk.insert({_id: "Palo Alto", state: "CA", pop: 10});
- bulk.insert({_id: "San Francisco", state: "CA", pop: 4});
- bulk.insert({_id: "Trenton", state: "NJ", pop: 5});
- assert.writeOK(bulk.execute());
-
- // Test creating views on both collections and other views, using the database command and the
- // shell helper.
- assert.commandWorked(viewsDB.runCommand(
- {create: "californiaCities", viewOn: "collection", pipeline: [{$match: {state: "CA"}}]}));
- assert.commandWorked(viewsDB.createView("largeCaliforniaCities",
- "californiaCities",
- [{$match: {pop: {$gte: 10}}}, {$sort: {pop: 1}}]));
-
- // Use the find command on a view with various options.
- assertCmdResultEq(
- {find: "californiaCities", filter: {}, projection: {_id: 1, pop: 1}},
- [{_id: "Oakland", pop: 3}, {_id: "Palo Alto", pop: 10}, {_id: "San Francisco", pop: 4}]);
- assertCmdResultEq({find: "largeCaliforniaCities", filter: {pop: {$lt: 50}}, limit: 1},
- [{_id: "Palo Alto", state: "CA", pop: 10}]);
-
- // Use aggregation on a view.
- assertCmdResultEq({
- aggregate: "californiaCities",
- pipeline: [{$group: {_id: "$state", totalPop: {$sum: "$pop"}}}],
- cursor: {}
- },
- [{_id: "CA", totalPop: 17}]);
+"use strict";
+
+// For arrayEq.
+load("jstests/aggregation/extras/utils.js");
+
+let viewsDB = db.getSiblingDB("views_basic");
+assert.commandWorked(viewsDB.dropDatabase());
+
+let assertCmdResultEq = function(cmd, expected) {
+ let res = viewsDB.runCommand(cmd);
+ assert.commandWorked(res);
+
+ let cursor = new DBCommandCursor(db, res, 5);
+ let actual = cursor.toArray();
+ assert(arrayEq(actual, expected),
+ "actual: " + tojson(cursor.toArray()) + ", expected:" + tojson(expected));
+};
+
+// Insert some control documents.
+let coll = viewsDB.getCollection("collection");
+let bulk = coll.initializeUnorderedBulkOp();
+bulk.insert({_id: "New York", state: "NY", pop: 7});
+bulk.insert({_id: "Oakland", state: "CA", pop: 3});
+bulk.insert({_id: "Palo Alto", state: "CA", pop: 10});
+bulk.insert({_id: "San Francisco", state: "CA", pop: 4});
+bulk.insert({_id: "Trenton", state: "NJ", pop: 5});
+assert.writeOK(bulk.execute());
+
+// Test creating views on both collections and other views, using the database command and the
+// shell helper.
+assert.commandWorked(viewsDB.runCommand(
+ {create: "californiaCities", viewOn: "collection", pipeline: [{$match: {state: "CA"}}]}));
+assert.commandWorked(viewsDB.createView(
+ "largeCaliforniaCities", "californiaCities", [{$match: {pop: {$gte: 10}}}, {$sort: {pop: 1}}]));
+
+// Use the find command on a view with various options.
+assertCmdResultEq(
+ {find: "californiaCities", filter: {}, projection: {_id: 1, pop: 1}},
+ [{_id: "Oakland", pop: 3}, {_id: "Palo Alto", pop: 10}, {_id: "San Francisco", pop: 4}]);
+assertCmdResultEq({find: "largeCaliforniaCities", filter: {pop: {$lt: 50}}, limit: 1},
+ [{_id: "Palo Alto", state: "CA", pop: 10}]);
+
+// Use aggregation on a view.
+assertCmdResultEq({
+ aggregate: "californiaCities",
+ pipeline: [{$group: {_id: "$state", totalPop: {$sum: "$pop"}}}],
+ cursor: {}
+},
+ [{_id: "CA", totalPop: 17}]);
}());
diff --git a/jstests/core/views/views_change.js b/jstests/core/views/views_change.js
index f3bd8880a8c..94521013136 100644
--- a/jstests/core/views/views_change.js
+++ b/jstests/core/views/views_change.js
@@ -7,96 +7,96 @@
* ]
*/
(function() {
- "use strict";
-
- // For arrayEq.
- load("jstests/aggregation/extras/utils.js");
-
- let viewDB = db.getSiblingDB("views_change");
- let collection = viewDB.collection;
- let view = viewDB.view;
- let viewOnView = viewDB.viewOnView;
-
- // Convenience functions.
- let resetCollectionAndViews = function() {
- viewDB.runCommand({drop: "collection"});
- viewDB.runCommand({drop: "view"});
- viewDB.runCommand({drop: "viewOnView"});
- assert.commandWorked(viewDB.runCommand({create: "collection"}));
- assert.commandWorked(viewDB.runCommand(
- {create: "view", viewOn: "collection", pipeline: [{$match: {a: 1}}]}));
- assert.commandWorked(viewDB.runCommand(
- {create: "viewOnView", viewOn: "view", pipeline: [{$match: {b: 1}}]}));
- };
- let assertFindResultEq = function(collName, expected) {
- let res = viewDB.runCommand({find: collName, filter: {}, projection: {_id: 0, a: 1, b: 1}});
- assert.commandWorked(res);
- let arr = new DBCommandCursor(db, res).toArray();
- let errmsg = tojson({expected: expected, got: arr});
- assert(arrayEq(arr, expected), errmsg);
- };
-
- let doc = {a: 1, b: 1};
-
- resetCollectionAndViews();
-
- // A view is updated when its viewOn is modified. When auth is enabled, we expect collMod to
- // fail when specifying "viewOn" but not "pipeline".
- assert.writeOK(collection.insert(doc));
- assertFindResultEq("view", [doc]);
- let res = viewDB.runCommand({collMod: "view", viewOn: "nonexistent"});
- if (jsTest.options().auth) {
- assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
- } else {
- assert.commandWorked(res);
- assertFindResultEq("view", []);
- }
-
- resetCollectionAndViews();
-
- // A view is updated when its pipeline is modified. When auth is enabled, we expect collMod to
- // fail when specifying "pipeline" but not "viewOn".
- assert.writeOK(collection.insert(doc));
- assert.writeOK(collection.insert({a: 7}));
- assertFindResultEq("view", [doc]);
- res = viewDB.runCommand({collMod: "view", pipeline: [{$match: {a: {$gt: 4}}}]});
- if (jsTest.options().auth) {
- assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
- } else {
- assert.commandWorked(res);
- assertFindResultEq("view", [{a: 7}]);
- }
-
- resetCollectionAndViews();
-
- // A view is updated when the backing collection is updated.
- assert.writeOK(collection.insert(doc));
- assertFindResultEq("view", [doc]);
- assert.writeOK(collection.update({a: 1}, {$set: {a: 2}}));
+"use strict";
+
+// For arrayEq.
+load("jstests/aggregation/extras/utils.js");
+
+let viewDB = db.getSiblingDB("views_change");
+let collection = viewDB.collection;
+let view = viewDB.view;
+let viewOnView = viewDB.viewOnView;
+
+// Convenience functions.
+let resetCollectionAndViews = function() {
+ viewDB.runCommand({drop: "collection"});
+ viewDB.runCommand({drop: "view"});
+ viewDB.runCommand({drop: "viewOnView"});
+ assert.commandWorked(viewDB.runCommand({create: "collection"}));
+ assert.commandWorked(
+ viewDB.runCommand({create: "view", viewOn: "collection", pipeline: [{$match: {a: 1}}]}));
+ assert.commandWorked(
+ viewDB.runCommand({create: "viewOnView", viewOn: "view", pipeline: [{$match: {b: 1}}]}));
+};
+let assertFindResultEq = function(collName, expected) {
+ let res = viewDB.runCommand({find: collName, filter: {}, projection: {_id: 0, a: 1, b: 1}});
+ assert.commandWorked(res);
+ let arr = new DBCommandCursor(db, res).toArray();
+ let errmsg = tojson({expected: expected, got: arr});
+ assert(arrayEq(arr, expected), errmsg);
+};
+
+let doc = {a: 1, b: 1};
+
+resetCollectionAndViews();
+
+// A view is updated when its viewOn is modified. When auth is enabled, we expect collMod to
+// fail when specifying "viewOn" but not "pipeline".
+assert.writeOK(collection.insert(doc));
+assertFindResultEq("view", [doc]);
+let res = viewDB.runCommand({collMod: "view", viewOn: "nonexistent"});
+if (jsTest.options().auth) {
+ assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
+} else {
+ assert.commandWorked(res);
assertFindResultEq("view", []);
-
- resetCollectionAndViews();
-
- // A view is updated when a backing view is updated.
- assert.writeOK(collection.insert(doc));
- assertFindResultEq("viewOnView", [doc]);
- assert.commandWorked(viewDB.runCommand(
- {collMod: "view", viewOn: "collection", pipeline: [{$match: {nonexistent: 1}}]}));
- assertFindResultEq("viewOnView", []);
-
- resetCollectionAndViews();
-
- // A view appears empty if the backing collection is dropped.
- assert.writeOK(collection.insert(doc));
- assertFindResultEq("view", [doc]);
- assert.commandWorked(viewDB.runCommand({drop: "collection"}));
- assertFindResultEq("view", []);
-
- resetCollectionAndViews();
-
- // A view appears empty if a backing view is dropped.
- assert.writeOK(collection.insert(doc));
- assertFindResultEq("viewOnView", [doc]);
- assert.commandWorked(viewDB.runCommand({drop: "view"}));
- assertFindResultEq("viewOnView", []);
+}
+
+resetCollectionAndViews();
+
+// A view is updated when its pipeline is modified. When auth is enabled, we expect collMod to
+// fail when specifying "pipeline" but not "viewOn".
+assert.writeOK(collection.insert(doc));
+assert.writeOK(collection.insert({a: 7}));
+assertFindResultEq("view", [doc]);
+res = viewDB.runCommand({collMod: "view", pipeline: [{$match: {a: {$gt: 4}}}]});
+if (jsTest.options().auth) {
+ assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
+} else {
+ assert.commandWorked(res);
+ assertFindResultEq("view", [{a: 7}]);
+}
+
+resetCollectionAndViews();
+
+// A view is updated when the backing collection is updated.
+assert.writeOK(collection.insert(doc));
+assertFindResultEq("view", [doc]);
+assert.writeOK(collection.update({a: 1}, {$set: {a: 2}}));
+assertFindResultEq("view", []);
+
+resetCollectionAndViews();
+
+// A view is updated when a backing view is updated.
+assert.writeOK(collection.insert(doc));
+assertFindResultEq("viewOnView", [doc]);
+assert.commandWorked(viewDB.runCommand(
+ {collMod: "view", viewOn: "collection", pipeline: [{$match: {nonexistent: 1}}]}));
+assertFindResultEq("viewOnView", []);
+
+resetCollectionAndViews();
+
+// A view appears empty if the backing collection is dropped.
+assert.writeOK(collection.insert(doc));
+assertFindResultEq("view", [doc]);
+assert.commandWorked(viewDB.runCommand({drop: "collection"}));
+assertFindResultEq("view", []);
+
+resetCollectionAndViews();
+
+// A view appears empty if a backing view is dropped.
+assert.writeOK(collection.insert(doc));
+assertFindResultEq("viewOnView", [doc]);
+assert.commandWorked(viewDB.runCommand({drop: "view"}));
+assertFindResultEq("viewOnView", []);
}());
diff --git a/jstests/core/views/views_coll_stats.js b/jstests/core/views/views_coll_stats.js
index bae2aa4e41f..cb09c41bb70 100644
--- a/jstests/core/views/views_coll_stats.js
+++ b/jstests/core/views/views_coll_stats.js
@@ -1,83 +1,85 @@
// Test that $collStats works on a view and in view pipelines as expected.
(function() {
- "use strict";
+"use strict";
- let viewsDB = db.getSiblingDB("views_coll_stats");
- const matchStage = {$match: {}};
- const collStatsStage = {$collStats: {latencyStats: {}}};
+let viewsDB = db.getSiblingDB("views_coll_stats");
+const matchStage = {
+ $match: {}
+};
+const collStatsStage = {
+ $collStats: {latencyStats: {}}
+};
- function clear() {
- assert.commandWorked(viewsDB.dropDatabase());
- }
+function clear() {
+ assert.commandWorked(viewsDB.dropDatabase());
+}
- function getCollStats(ns) {
- return viewsDB[ns].latencyStats().next();
- }
+function getCollStats(ns) {
+ return viewsDB[ns].latencyStats().next();
+}
- function checkCollStatsBelongTo(stats, expectedNs) {
- assert.eq(stats.ns,
- viewsDB[expectedNs].getFullName(),
- "Expected coll stats for " + expectedNs + " but got " + stats.ns);
- }
+function checkCollStatsBelongTo(stats, expectedNs) {
+ assert.eq(stats.ns,
+ viewsDB[expectedNs].getFullName(),
+ "Expected coll stats for " + expectedNs + " but got " + stats.ns);
+}
- function makeView(viewNs, viewOnNs, pipeline) {
- if (!pipeline) {
- pipeline = [];
- }
- let res = viewsDB.runCommand({create: viewNs, viewOn: viewOnNs, pipeline: pipeline});
- assert.commandWorked(res);
+function makeView(viewNs, viewOnNs, pipeline) {
+ if (!pipeline) {
+ pipeline = [];
}
+ let res = viewsDB.runCommand({create: viewNs, viewOn: viewOnNs, pipeline: pipeline});
+ assert.commandWorked(res);
+}
- clear();
-
- // Check basic latency stats on a view.
- makeView("a", "b");
- checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a");
- clear();
+clear();
- // Check that latency stats does not prepend the view pipeline.
- makeView("a", "b", [matchStage]);
- checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a");
- clear();
+// Check basic latency stats on a view.
+makeView("a", "b");
+checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a");
+clear();
- // Check that latency stats works inside a pipeline.
- makeView("a", "b", [collStatsStage]);
- checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a");
- checkCollStatsBelongTo(viewsDB["b"].latencyStats().next(), "b");
- // Since the $collStats stage is in the pipeline, it should refer to the viewOn namespace.
- checkCollStatsBelongTo(viewsDB["a"].aggregate().next(), "b");
- clear();
+// Check that latency stats does not prepend the view pipeline.
+makeView("a", "b", [matchStage]);
+checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a");
+clear();
- // Check that the first $collStats pipeline stage found will not resolve further views.
- makeView("a", "b", [collStatsStage, matchStage]);
- makeView("b", "c", [collStatsStage]);
- checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a");
- checkCollStatsBelongTo(viewsDB["b"].latencyStats().next(), "b");
- checkCollStatsBelongTo(viewsDB["c"].latencyStats().next(), "c");
- checkCollStatsBelongTo(viewsDB["a"].aggregate().next(), "b");
- checkCollStatsBelongTo(viewsDB["b"].aggregate().next(), "c");
- clear();
+// Check that latency stats works inside a pipeline.
+makeView("a", "b", [collStatsStage]);
+checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a");
+checkCollStatsBelongTo(viewsDB["b"].latencyStats().next(), "b");
+// Since the $collStats stage is in the pipeline, it should refer to the viewOn namespace.
+checkCollStatsBelongTo(viewsDB["a"].aggregate().next(), "b");
+clear();
- // Assert that attempting to retrieve storageStats fails.
- makeView("a", "b");
- assert.commandFailedWithCode(
- viewsDB.runCommand(
- {aggregate: "a", pipeline: [{$collStats: {storageStats: {}}}], cursor: {}}),
- ErrorCodes.CommandNotSupportedOnView);
- clear();
+// Check that the first $collStats pipeline stage found will not resolve further views.
+makeView("a", "b", [collStatsStage, matchStage]);
+makeView("b", "c", [collStatsStage]);
+checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a");
+checkCollStatsBelongTo(viewsDB["b"].latencyStats().next(), "b");
+checkCollStatsBelongTo(viewsDB["c"].latencyStats().next(), "c");
+checkCollStatsBelongTo(viewsDB["a"].aggregate().next(), "b");
+checkCollStatsBelongTo(viewsDB["b"].aggregate().next(), "c");
+clear();
- // Assert that attempting to retrieve collection record count on an identity views fails.
- makeView("a", "b");
- assert.commandFailedWithCode(
- viewsDB.runCommand({aggregate: "a", pipeline: [{$collStats: {count: {}}}], cursor: {}}),
- ErrorCodes.CommandNotSupportedOnView);
- clear();
+// Assert that attempting to retrieve storageStats fails.
+makeView("a", "b");
+assert.commandFailedWithCode(
+ viewsDB.runCommand({aggregate: "a", pipeline: [{$collStats: {storageStats: {}}}], cursor: {}}),
+ ErrorCodes.CommandNotSupportedOnView);
+clear();
- // Assert that attempting to retrieve collection record count on a non-identity view fails.
- makeView("a", "b", [{$match: {a: 0}}]);
- assert.commandFailedWithCode(
- viewsDB.runCommand({aggregate: "a", pipeline: [{$collStats: {count: {}}}], cursor: {}}),
- ErrorCodes.CommandNotSupportedOnView);
- clear();
+// Assert that attempting to retrieve collection record count on an identity views fails.
+makeView("a", "b");
+assert.commandFailedWithCode(
+ viewsDB.runCommand({aggregate: "a", pipeline: [{$collStats: {count: {}}}], cursor: {}}),
+ ErrorCodes.CommandNotSupportedOnView);
+clear();
+// Assert that attempting to retrieve collection record count on a non-identity view fails.
+makeView("a", "b", [{$match: {a: 0}}]);
+assert.commandFailedWithCode(
+ viewsDB.runCommand({aggregate: "a", pipeline: [{$collStats: {count: {}}}], cursor: {}}),
+ ErrorCodes.CommandNotSupportedOnView);
+clear();
}());
diff --git a/jstests/core/views/views_collation.js b/jstests/core/views/views_collation.js
index 32b103ae2fb..9c18c27a41b 100644
--- a/jstests/core/views/views_collation.js
+++ b/jstests/core/views/views_collation.js
@@ -4,164 +4,157 @@
* Tests the behavior of operations when interacting with a view's default collation.
*/
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js");
-
- let viewsDB = db.getSiblingDB("views_collation");
- assert.commandWorked(viewsDB.dropDatabase());
- assert.commandWorked(viewsDB.runCommand({create: "simpleCollection"}));
- assert.commandWorked(viewsDB.runCommand({create: "ukCollection", collation: {locale: "uk"}}));
- assert.commandWorked(viewsDB.runCommand({create: "filCollection", collation: {locale: "fil"}}));
-
- // Creating a view without specifying a collation defaults to the simple collation.
- assert.commandWorked(viewsDB.runCommand({create: "simpleView", viewOn: "ukCollection"}));
- let listCollectionsOutput = viewsDB.runCommand({listCollections: 1, filter: {type: "view"}});
- assert.commandWorked(listCollectionsOutput);
- assert(!listCollectionsOutput.cursor.firstBatch[0].options.hasOwnProperty("collation"));
-
- // Operations that do not specify a collation succeed.
- assert.commandWorked(viewsDB.runCommand({aggregate: "simpleView", pipeline: [], cursor: {}}));
- assert.commandWorked(viewsDB.runCommand({find: "simpleView"}));
- assert.commandWorked(viewsDB.runCommand({count: "simpleView"}));
- assert.commandWorked(viewsDB.runCommand({distinct: "simpleView", key: "x"}));
-
- // Operations that explicitly ask for the "simple" locale succeed against a view with the
- // simple collation.
- assert.commandWorked(viewsDB.runCommand(
- {aggregate: "simpleView", pipeline: [], cursor: {}, collation: {locale: "simple"}}));
- assert.commandWorked(viewsDB.runCommand({find: "simpleView", collation: {locale: "simple"}}));
- assert.commandWorked(viewsDB.runCommand({count: "simpleView", collation: {locale: "simple"}}));
- assert.commandWorked(
- viewsDB.runCommand({distinct: "simpleView", key: "x", collation: {locale: "simple"}}));
-
- // Attempting to override a view's simple collation fails.
- assert.commandFailedWithCode(
- viewsDB.runCommand(
- {aggregate: "simpleView", pipeline: [], cursor: {}, collation: {locale: "en"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand({find: "simpleView", collation: {locale: "fr"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand({count: "simpleView", collation: {locale: "fil"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand({distinct: "simpleView", key: "x", collation: {locale: "es"}}),
- ErrorCodes.OptionNotSupportedOnView);
-
- // Create a view with an explicit, non-simple collation.
- assert.commandWorked(
- viewsDB.createView("filView", "ukCollection", [], {collation: {locale: "fil"}}));
- listCollectionsOutput = viewsDB.runCommand({listCollections: 1, filter: {name: "filView"}});
- assert.commandWorked(listCollectionsOutput);
- assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.collation.locale, "fil");
-
- // Operations that do not specify a collation succeed.
- assert.commandWorked(viewsDB.runCommand({aggregate: "filView", pipeline: [], cursor: {}}));
- assert.commandWorked(viewsDB.runCommand({find: "filView"}));
- assert.commandWorked(viewsDB.runCommand({count: "filView"}));
- assert.commandWorked(viewsDB.runCommand({distinct: "filView", key: "x"}));
-
- // Explain of operations that do not specify a collation succeed.
- assert.commandWorked(viewsDB.runCommand({aggregate: "filView", pipeline: [], explain: true}));
- assert.commandWorked(
- viewsDB.runCommand({explain: {find: "filView"}, verbosity: "allPlansExecution"}));
- assert.commandWorked(
- viewsDB.runCommand({explain: {count: "filView"}, verbosity: "allPlansExecution"}));
- assert.commandWorked(viewsDB.runCommand(
- {explain: {distinct: "filView", key: "x"}, verbosity: "allPlansExecution"}));
-
- // Operations with a matching collation succeed.
- assert.commandWorked(viewsDB.runCommand(
- {aggregate: "filView", pipeline: [], cursor: {}, collation: {locale: "fil"}}));
- assert.commandWorked(viewsDB.runCommand({find: "filView", collation: {locale: "fil"}}));
- assert.commandWorked(viewsDB.runCommand({count: "filView", collation: {locale: "fil"}}));
- assert.commandWorked(
- viewsDB.runCommand({distinct: "filView", key: "x", collation: {locale: "fil"}}));
-
- // Explain of operations with a matching collation succeed.
- assert.commandWorked(viewsDB.runCommand(
- {aggregate: "filView", pipeline: [], explain: true, collation: {locale: "fil"}}));
- assert.commandWorked(viewsDB.runCommand(
- {explain: {find: "filView", collation: {locale: "fil"}}, verbosity: "allPlansExecution"}));
- assert.commandWorked(viewsDB.runCommand(
- {explain: {count: "filView", collation: {locale: "fil"}}, verbosity: "allPlansExecution"}));
- assert.commandWorked(viewsDB.runCommand({
- explain: {distinct: "filView", key: "x", collation: {locale: "fil"}},
- verbosity: "allPlansExecution"
- }));
-
- // Attempting to override the non-simple default collation of a view fails.
- assert.commandFailedWithCode(
- viewsDB.runCommand(
- {aggregate: "filView", pipeline: [], cursor: {}, collation: {locale: "en"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand(
- {aggregate: "filView", pipeline: [], cursor: {}, collation: {locale: "simple"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({find: "filView", collation: {locale: "fr"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand({find: "filView", collation: {locale: "simple"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({count: "filView", collation: {locale: "zh"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand({count: "filView", collation: {locale: "simple"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand({distinct: "filView", key: "x", collation: {locale: "es"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand({distinct: "filView", key: "x", collation: {locale: "simple"}}),
- ErrorCodes.OptionNotSupportedOnView);
-
- // Attempting to override the default collation of a view with explain fails.
- assert.commandFailedWithCode(
- viewsDB.runCommand(
- {aggregate: "filView", pipeline: [], explain: true, collation: {locale: "en"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand(
- {aggregate: "filView", pipeline: [], explain: true, collation: {locale: "simple"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- explain: {find: "filView", collation: {locale: "fr"}},
- verbosity: "allPlansExecution"
- }),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- explain: {find: "filView", collation: {locale: "simple"}},
- verbosity: "allPlansExecution"
- }),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- explain: {count: "filView", collation: {locale: "zh"}},
- verbosity: "allPlansExecution"
- }),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- explain: {count: "filView", collation: {locale: "simple"}},
- verbosity: "allPlansExecution"
- }),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- explain: {distinct: "filView", key: "x", collation: {locale: "es"}},
- verbosity: "allPlansExecution"
- }),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- explain: {distinct: "filView", key: "x", collation: {locale: "simple"}},
- verbosity: "allPlansExecution"
- }),
- ErrorCodes.OptionNotSupportedOnView);
-
- const lookupSimpleView = {
- $lookup: {from: "simpleView", localField: "x", foreignField: "x", as: "result"}
- };
- const nestedLookupSimpleView = {
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+
+let viewsDB = db.getSiblingDB("views_collation");
+assert.commandWorked(viewsDB.dropDatabase());
+assert.commandWorked(viewsDB.runCommand({create: "simpleCollection"}));
+assert.commandWorked(viewsDB.runCommand({create: "ukCollection", collation: {locale: "uk"}}));
+assert.commandWorked(viewsDB.runCommand({create: "filCollection", collation: {locale: "fil"}}));
+
+// Creating a view without specifying a collation defaults to the simple collation.
+assert.commandWorked(viewsDB.runCommand({create: "simpleView", viewOn: "ukCollection"}));
+let listCollectionsOutput = viewsDB.runCommand({listCollections: 1, filter: {type: "view"}});
+assert.commandWorked(listCollectionsOutput);
+assert(!listCollectionsOutput.cursor.firstBatch[0].options.hasOwnProperty("collation"));
+
+// Operations that do not specify a collation succeed.
+assert.commandWorked(viewsDB.runCommand({aggregate: "simpleView", pipeline: [], cursor: {}}));
+assert.commandWorked(viewsDB.runCommand({find: "simpleView"}));
+assert.commandWorked(viewsDB.runCommand({count: "simpleView"}));
+assert.commandWorked(viewsDB.runCommand({distinct: "simpleView", key: "x"}));
+
+// Operations that explicitly ask for the "simple" locale succeed against a view with the
+// simple collation.
+assert.commandWorked(viewsDB.runCommand(
+ {aggregate: "simpleView", pipeline: [], cursor: {}, collation: {locale: "simple"}}));
+assert.commandWorked(viewsDB.runCommand({find: "simpleView", collation: {locale: "simple"}}));
+assert.commandWorked(viewsDB.runCommand({count: "simpleView", collation: {locale: "simple"}}));
+assert.commandWorked(
+ viewsDB.runCommand({distinct: "simpleView", key: "x", collation: {locale: "simple"}}));
+
+// Attempting to override a view's simple collation fails.
+assert.commandFailedWithCode(
+ viewsDB.runCommand(
+ {aggregate: "simpleView", pipeline: [], cursor: {}, collation: {locale: "en"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({find: "simpleView", collation: {locale: "fr"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({count: "simpleView", collation: {locale: "fil"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand({distinct: "simpleView", key: "x", collation: {locale: "es"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+
+// Create a view with an explicit, non-simple collation.
+assert.commandWorked(
+ viewsDB.createView("filView", "ukCollection", [], {collation: {locale: "fil"}}));
+listCollectionsOutput = viewsDB.runCommand({listCollections: 1, filter: {name: "filView"}});
+assert.commandWorked(listCollectionsOutput);
+assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.collation.locale, "fil");
+
+// Operations that do not specify a collation succeed.
+assert.commandWorked(viewsDB.runCommand({aggregate: "filView", pipeline: [], cursor: {}}));
+assert.commandWorked(viewsDB.runCommand({find: "filView"}));
+assert.commandWorked(viewsDB.runCommand({count: "filView"}));
+assert.commandWorked(viewsDB.runCommand({distinct: "filView", key: "x"}));
+
+// Explain of operations that do not specify a collation succeed.
+assert.commandWorked(viewsDB.runCommand({aggregate: "filView", pipeline: [], explain: true}));
+assert.commandWorked(
+ viewsDB.runCommand({explain: {find: "filView"}, verbosity: "allPlansExecution"}));
+assert.commandWorked(
+ viewsDB.runCommand({explain: {count: "filView"}, verbosity: "allPlansExecution"}));
+assert.commandWorked(
+ viewsDB.runCommand({explain: {distinct: "filView", key: "x"}, verbosity: "allPlansExecution"}));
+
+// Operations with a matching collation succeed.
+assert.commandWorked(viewsDB.runCommand(
+ {aggregate: "filView", pipeline: [], cursor: {}, collation: {locale: "fil"}}));
+assert.commandWorked(viewsDB.runCommand({find: "filView", collation: {locale: "fil"}}));
+assert.commandWorked(viewsDB.runCommand({count: "filView", collation: {locale: "fil"}}));
+assert.commandWorked(
+ viewsDB.runCommand({distinct: "filView", key: "x", collation: {locale: "fil"}}));
+
+// Explain of operations with a matching collation succeed.
+assert.commandWorked(viewsDB.runCommand(
+ {aggregate: "filView", pipeline: [], explain: true, collation: {locale: "fil"}}));
+assert.commandWorked(viewsDB.runCommand(
+ {explain: {find: "filView", collation: {locale: "fil"}}, verbosity: "allPlansExecution"}));
+assert.commandWorked(viewsDB.runCommand(
+ {explain: {count: "filView", collation: {locale: "fil"}}, verbosity: "allPlansExecution"}));
+assert.commandWorked(viewsDB.runCommand({
+ explain: {distinct: "filView", key: "x", collation: {locale: "fil"}},
+ verbosity: "allPlansExecution"
+}));
+
+// Attempting to override the non-simple default collation of a view fails.
+assert.commandFailedWithCode(
+ viewsDB.runCommand({aggregate: "filView", pipeline: [], cursor: {}, collation: {locale: "en"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand(
+ {aggregate: "filView", pipeline: [], cursor: {}, collation: {locale: "simple"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({find: "filView", collation: {locale: "fr"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({find: "filView", collation: {locale: "simple"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({count: "filView", collation: {locale: "zh"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({count: "filView", collation: {locale: "simple"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand({distinct: "filView", key: "x", collation: {locale: "es"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand({distinct: "filView", key: "x", collation: {locale: "simple"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+
+// Attempting to override the default collation of a view with explain fails.
+assert.commandFailedWithCode(
+ viewsDB.runCommand(
+ {aggregate: "filView", pipeline: [], explain: true, collation: {locale: "en"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand(
+ {aggregate: "filView", pipeline: [], explain: true, collation: {locale: "simple"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand(
+ {explain: {find: "filView", collation: {locale: "fr"}}, verbosity: "allPlansExecution"}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({
+ explain: {find: "filView", collation: {locale: "simple"}},
+ verbosity: "allPlansExecution"
+}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand(
+ {explain: {count: "filView", collation: {locale: "zh"}}, verbosity: "allPlansExecution"}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({
+ explain: {count: "filView", collation: {locale: "simple"}},
+ verbosity: "allPlansExecution"
+}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({
+ explain: {distinct: "filView", key: "x", collation: {locale: "es"}},
+ verbosity: "allPlansExecution"
+}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({
+ explain: {distinct: "filView", key: "x", collation: {locale: "simple"}},
+ verbosity: "allPlansExecution"
+}),
+ ErrorCodes.OptionNotSupportedOnView);
+
+const lookupSimpleView = {
+ $lookup: {from: "simpleView", localField: "x", foreignField: "x", as: "result"}
+};
+const nestedLookupSimpleView = {
$lookup: {
from: "simpleCollection",
pipeline: [{
@@ -171,7 +164,7 @@
as: "result"
}
};
- const graphLookupSimpleView = {
+const graphLookupSimpleView = {
$graphLookup: {
from: "simpleView",
startWith: "$_id",
@@ -181,65 +174,65 @@
}
};
- // You can lookup into a view with the simple collation if the collection also has the same
- // default collation.
- assert.commandWorked(viewsDB.runCommand(
- {aggregate: "simpleCollection", pipeline: [lookupSimpleView], cursor: {}}));
- assert.commandWorked(viewsDB.runCommand(
- {aggregate: "simpleCollection", pipeline: [nestedLookupSimpleView], cursor: {}}));
- assert.commandWorked(viewsDB.runCommand(
- {aggregate: "simpleCollection", pipeline: [graphLookupSimpleView], cursor: {}}));
-
- // You can lookup into a view with the simple collation if the operation has a matching
- // collation.
- assert.commandWorked(viewsDB.runCommand({
- aggregate: "ukCollection",
- pipeline: [lookupSimpleView],
- cursor: {},
- collation: {locale: "simple"}
- }));
- assert.commandWorked(viewsDB.runCommand({
- aggregate: "ukCollection",
- pipeline: [nestedLookupSimpleView],
- cursor: {},
- collation: {locale: "simple"}
- }));
- assert.commandWorked(viewsDB.runCommand({
- aggregate: "ukCollection",
- pipeline: [graphLookupSimpleView],
- cursor: {},
- collation: {locale: "simple"}
- }));
-
- // You can't lookup into a view with the simple collation if the operation has a conflicting
- // collation.
- assert.commandFailedWithCode(viewsDB.runCommand({
- aggregate: "simpleCollection",
- pipeline: [lookupSimpleView],
- cursor: {},
- collation: {locale: "en"}
- }),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- aggregate: "simpleCollection",
- pipeline: [nestedLookupSimpleView],
- cursor: {},
- collation: {locale: "en"}
- }),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- aggregate: "simpleCollection",
- pipeline: [graphLookupSimpleView],
- cursor: {},
- collation: {locale: "zh"}
- }),
- ErrorCodes.OptionNotSupportedOnView);
-
- const lookupFilView = {
- $lookup: {from: "filView", localField: "x", foreignField: "x", as: "result"}
- };
- function makeNestedLookupFilView(sourceCollName) {
- return {
+// You can lookup into a view with the simple collation if the collection also has the same
+// default collation.
+assert.commandWorked(
+ viewsDB.runCommand({aggregate: "simpleCollection", pipeline: [lookupSimpleView], cursor: {}}));
+assert.commandWorked(viewsDB.runCommand(
+ {aggregate: "simpleCollection", pipeline: [nestedLookupSimpleView], cursor: {}}));
+assert.commandWorked(viewsDB.runCommand(
+ {aggregate: "simpleCollection", pipeline: [graphLookupSimpleView], cursor: {}}));
+
+// You can lookup into a view with the simple collation if the operation has a matching
+// collation.
+assert.commandWorked(viewsDB.runCommand({
+ aggregate: "ukCollection",
+ pipeline: [lookupSimpleView],
+ cursor: {},
+ collation: {locale: "simple"}
+}));
+assert.commandWorked(viewsDB.runCommand({
+ aggregate: "ukCollection",
+ pipeline: [nestedLookupSimpleView],
+ cursor: {},
+ collation: {locale: "simple"}
+}));
+assert.commandWorked(viewsDB.runCommand({
+ aggregate: "ukCollection",
+ pipeline: [graphLookupSimpleView],
+ cursor: {},
+ collation: {locale: "simple"}
+}));
+
+// You can't lookup into a view with the simple collation if the operation has a conflicting
+// collation.
+assert.commandFailedWithCode(viewsDB.runCommand({
+ aggregate: "simpleCollection",
+ pipeline: [lookupSimpleView],
+ cursor: {},
+ collation: {locale: "en"}
+}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({
+ aggregate: "simpleCollection",
+ pipeline: [nestedLookupSimpleView],
+ cursor: {},
+ collation: {locale: "en"}
+}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({
+ aggregate: "simpleCollection",
+ pipeline: [graphLookupSimpleView],
+ cursor: {},
+ collation: {locale: "zh"}
+}),
+ ErrorCodes.OptionNotSupportedOnView);
+
+const lookupFilView = {
+ $lookup: {from: "filView", localField: "x", foreignField: "x", as: "result"}
+};
+function makeNestedLookupFilView(sourceCollName) {
+ return {
$lookup: {
from: sourceCollName,
pipeline: [{
@@ -249,8 +242,8 @@
as: "result"
}
};
- }
- const graphLookupFilView = {
+}
+const graphLookupFilView = {
$graphLookup: {
from: "filView",
startWith: "$_id",
@@ -260,91 +253,90 @@
}
};
- // You can lookup into a view with no operation collation specified if the collection's
- // collation matches the collation of the view.
- assert.commandWorked(
- viewsDB.runCommand({aggregate: "filCollection", pipeline: [lookupFilView], cursor: {}}));
- assert.commandWorked(viewsDB.runCommand({
- aggregate: "filCollection",
- pipeline: [makeNestedLookupFilView("filCollection")],
- cursor: {}
- }));
- assert.commandWorked(viewsDB.runCommand(
- {aggregate: "filCollection", pipeline: [graphLookupFilView], cursor: {}}));
-
- // You can lookup into a view with a non-simple collation if the operation's collation
- // matches.
- assert.commandWorked(viewsDB.runCommand({
- aggregate: "ukCollection",
- pipeline: [lookupFilView],
- cursor: {},
- collation: {locale: "fil"}
- }));
- assert.commandWorked(viewsDB.runCommand({
- aggregate: "ukCollection",
- pipeline: [makeNestedLookupFilView("ukCollection")],
- cursor: {},
- collation: {locale: "fil"}
- }));
- assert.commandWorked(viewsDB.runCommand({
- aggregate: "ukCollection",
- pipeline: [graphLookupFilView],
- cursor: {},
- collation: {locale: "fil"}
- }));
-
- // You can't lookup into a view when aggregating a collection whose default collation does
- // not match the view's default collation.
- assert.commandFailedWithCode(
- viewsDB.runCommand({aggregate: "simpleCollection", cursor: {}, pipeline: [lookupFilView]}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- aggregate: "simpleCollection",
- cursor: {},
- pipeline: [makeNestedLookupFilView("simpleCollation")]
- }),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand(
- {aggregate: "simpleCollection", cursor: {}, pipeline: [graphLookupFilView]}),
- ErrorCodes.OptionNotSupportedOnView);
-
- // You can't lookup into a view when aggregating a collection and the operation's collation
- // does not match the view's default collation.
- assert.commandFailedWithCode(viewsDB.runCommand({
- aggregate: "filCollection",
- pipeline: [lookupFilView],
- cursor: {},
- collation: {locale: "zh"}
- }),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- aggregate: "filCollection",
- pipeline: [makeNestedLookupFilView("filCollection")],
- cursor: {},
- collation: {locale: "zh"}
- }),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- aggregate: "filCollection",
- pipeline: [graphLookupFilView],
- cursor: {},
- collation: {locale: "zh"}
- }),
- ErrorCodes.OptionNotSupportedOnView);
-
- // You may perform an aggregation involving multiple views if they all have the same default
- // collation.
- assert.commandWorked(viewsDB.runCommand(
- {create: "simpleView2", viewOn: "simpleCollection", collation: {locale: "simple"}}));
- assert.commandWorked(
- viewsDB.runCommand({aggregate: "simpleView2", pipeline: [lookupSimpleView], cursor: {}}));
- assert.commandWorked(viewsDB.runCommand(
- {aggregate: "simpleView2", pipeline: [graphLookupSimpleView], cursor: {}}));
-
- // You may perform an aggregation involving multiple views and collections if all the views
- // have the same default collation.
- const graphLookupUkCollection = {
+// You can lookup into a view with no operation collation specified if the collection's
+// collation matches the collation of the view.
+assert.commandWorked(
+ viewsDB.runCommand({aggregate: "filCollection", pipeline: [lookupFilView], cursor: {}}));
+assert.commandWorked(viewsDB.runCommand({
+ aggregate: "filCollection",
+ pipeline: [makeNestedLookupFilView("filCollection")],
+ cursor: {}
+}));
+assert.commandWorked(
+ viewsDB.runCommand({aggregate: "filCollection", pipeline: [graphLookupFilView], cursor: {}}));
+
+// You can lookup into a view with a non-simple collation if the operation's collation
+// matches.
+assert.commandWorked(viewsDB.runCommand({
+ aggregate: "ukCollection",
+ pipeline: [lookupFilView],
+ cursor: {},
+ collation: {locale: "fil"}
+}));
+assert.commandWorked(viewsDB.runCommand({
+ aggregate: "ukCollection",
+ pipeline: [makeNestedLookupFilView("ukCollection")],
+ cursor: {},
+ collation: {locale: "fil"}
+}));
+assert.commandWorked(viewsDB.runCommand({
+ aggregate: "ukCollection",
+ pipeline: [graphLookupFilView],
+ cursor: {},
+ collation: {locale: "fil"}
+}));
+
+// You can't lookup into a view when aggregating a collection whose default collation does
+// not match the view's default collation.
+assert.commandFailedWithCode(
+ viewsDB.runCommand({aggregate: "simpleCollection", cursor: {}, pipeline: [lookupFilView]}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({
+ aggregate: "simpleCollection",
+ cursor: {},
+ pipeline: [makeNestedLookupFilView("simpleCollation")]
+}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand({aggregate: "simpleCollection", cursor: {}, pipeline: [graphLookupFilView]}),
+ ErrorCodes.OptionNotSupportedOnView);
+
+// You can't lookup into a view when aggregating a collection and the operation's collation
+// does not match the view's default collation.
+assert.commandFailedWithCode(viewsDB.runCommand({
+ aggregate: "filCollection",
+ pipeline: [lookupFilView],
+ cursor: {},
+ collation: {locale: "zh"}
+}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({
+ aggregate: "filCollection",
+ pipeline: [makeNestedLookupFilView("filCollection")],
+ cursor: {},
+ collation: {locale: "zh"}
+}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({
+ aggregate: "filCollection",
+ pipeline: [graphLookupFilView],
+ cursor: {},
+ collation: {locale: "zh"}
+}),
+ ErrorCodes.OptionNotSupportedOnView);
+
+// You may perform an aggregation involving multiple views if they all have the same default
+// collation.
+assert.commandWorked(viewsDB.runCommand(
+ {create: "simpleView2", viewOn: "simpleCollection", collation: {locale: "simple"}}));
+assert.commandWorked(
+ viewsDB.runCommand({aggregate: "simpleView2", pipeline: [lookupSimpleView], cursor: {}}));
+assert.commandWorked(
+ viewsDB.runCommand({aggregate: "simpleView2", pipeline: [graphLookupSimpleView], cursor: {}}));
+
+// You may perform an aggregation involving multiple views and collections if all the views
+// have the same default collation.
+const graphLookupUkCollection = {
$graphLookup: {
from: "ukCollection",
startWith: "$_id",
@@ -353,181 +345,170 @@
as: "matched"
}
};
- assert.commandWorked(viewsDB.runCommand({
- aggregate: "simpleView2",
- pipeline: [lookupSimpleView, graphLookupUkCollection],
- cursor: {}
- }));
-
- // You cannot perform an aggregation involving multiple views if the views don't all have
- // the same default collation.
- assert.commandFailedWithCode(
- viewsDB.runCommand({aggregate: "filView", pipeline: [lookupSimpleView], cursor: {}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand({aggregate: "simpleView", pipeline: [lookupFilView], cursor: {}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- aggregate: "simpleCollection",
- pipeline: [lookupFilView, graphLookupSimpleView],
- cursor: {}
- }),
- ErrorCodes.OptionNotSupportedOnView);
-
- // You cannot create a view that depends on another view with a different default collation.
- assert.commandFailedWithCode(
- viewsDB.runCommand({create: "zhView", viewOn: "filView", collation: {locale: "zh"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- create: "zhView",
- viewOn: "simpleCollection",
- pipeline: [lookupFilView],
- collation: {locale: "zh"}
- }),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- create: "zhView",
- viewOn: "simpleCollection",
- pipeline: [makeNestedLookupFilView("zhView")],
- collation: {locale: "zh"}
- }),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- create: "zhView",
- viewOn: "simpleCollection",
- pipeline: [graphLookupSimpleView],
- collation: {locale: "zh"}
- }),
- ErrorCodes.OptionNotSupportedOnView);
-
- // You cannot modify a view to depend on another view with a different default collation.
- assert.commandWorked(viewsDB.runCommand(
- {create: "esView", viewOn: "simpleCollection", collation: {locale: "es"}}));
- assert.commandFailedWithCode(
- viewsDB.runCommand({collMod: "esView", viewOn: "filView", pipeline: []}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand(
- {collMod: "esView", viewOn: "simpleCollection", pipeline: [lookupSimpleView]}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand(
- {collMod: "esView", viewOn: "simpleCollection", pipeline: [graphLookupFilView]}),
- ErrorCodes.OptionNotSupportedOnView);
-
- // Views cannot be dropped and recreated with a different collation if other views depend on
- // that view.
- assert.commandWorked(
- viewsDB.runCommand({create: "filView2", viewOn: "filView", collation: {locale: "fil"}}));
- assert.commandWorked(viewsDB.runCommand({drop: "filView"}));
- assert.commandFailedWithCode(
- viewsDB.runCommand({create: "filView", viewOn: "simpleCollection"}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand(
- {create: "filView", viewOn: "simpleCollection", collation: {locale: "en"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandWorked(
- viewsDB.createView("filView", "ukCollection", [], {collation: {locale: "fil"}}));
-
- // Views cannot be dropped and recreated with a different collation if other views depend on
- // that view via $lookup or $graphLookup.
- assert.commandWorked(viewsDB.runCommand(
- {collMod: "filView2", viewOn: "simpleCollection", pipeline: [lookupFilView]}));
- assert.commandWorked(viewsDB.runCommand({drop: "filView"}));
- assert.commandFailedWithCode(
- viewsDB.runCommand({create: "filView", viewOn: "simpleCollection"}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand(
- {create: "filView", viewOn: "simpleCollection", collation: {locale: "en"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandWorked(viewsDB.runCommand(
- {create: "filView", viewOn: "ukCollection", pipeline: [], collation: {locale: "fil"}}));
-
- assert.commandWorked(viewsDB.runCommand(
- {collMod: "filView2", viewOn: "simpleCollection", pipeline: [graphLookupFilView]}));
- assert.commandWorked(viewsDB.runCommand({drop: "filView"}));
- assert.commandFailedWithCode(
- viewsDB.runCommand({create: "filView", viewOn: "simpleCollection"}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand(
- {create: "filView", viewOn: "simpleCollection", collation: {locale: "en"}}),
- ErrorCodes.OptionNotSupportedOnView);
-
- // If two views "A" and "C" have different collations and depend on the namespace "B", then "B"
- // cannot be created as a view.
- assert.commandWorked(
- viewsDB.runCommand({create: "A", viewOn: "B", collation: {locale: "hsb"}}));
- assert.commandWorked(
- viewsDB.runCommand({create: "B", viewOn: "other", collation: {locale: "hsb"}}));
- assert.commandFailedWithCode(
- viewsDB.runCommand({create: "C", viewOn: "B", collation: {locale: "wae"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandWorked(viewsDB.runCommand({drop: "B"}));
- assert.commandWorked(
- viewsDB.runCommand({create: "C", viewOn: "B", collation: {locale: "wae"}}));
- assert.commandFailedWithCode(
- viewsDB.runCommand({create: "B", viewOn: "other", collation: {locale: "hsb"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand({create: "B", viewOn: "other", collation: {locale: "wae"}}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({create: "B", viewOn: "other"}),
- ErrorCodes.OptionNotSupportedOnView);
-
- // Make sure that when an operation does not specify the collation, it correctly uses the
- // default collation associated with the view. For this, we set up a new backing collection with
- // a case-insensitive view.
- assert.commandWorked(viewsDB.runCommand({create: "case_sensitive_coll"}));
- assert.commandWorked(viewsDB.runCommand({
- create: "case_insensitive_view",
- viewOn: "case_sensitive_coll",
- collation: {locale: "en", strength: 1}
- }));
-
- assert.writeOK(viewsDB.case_sensitive_coll.insert({f: "case"}));
- assert.writeOK(viewsDB.case_sensitive_coll.insert({f: "Case"}));
- assert.writeOK(viewsDB.case_sensitive_coll.insert({f: "CASE"}));
-
- let explain, cursorStage;
-
- // Test that aggregate against a view with a default collation correctly uses the collation.
- // We expect the pipeline to be optimized away, so there should be no pipeline stages in
- // the explain.
- assert.eq(1, viewsDB.case_sensitive_coll.aggregate([{$match: {f: "case"}}]).itcount());
- assert.eq(3, viewsDB.case_insensitive_view.aggregate([{$match: {f: "case"}}]).itcount());
- explain = viewsDB.case_insensitive_view.explain().aggregate([{$match: {f: "case"}}]);
- assert.neq(null, explain.queryPlanner, tojson(explain));
- assert.eq(1, explain.queryPlanner.collation.strength, tojson(explain));
-
- // Test that count against a view with a default collation correctly uses the collation.
- assert.eq(1, viewsDB.case_sensitive_coll.count({f: "case"}));
- assert.eq(3, viewsDB.case_insensitive_view.count({f: "case"}));
- explain = viewsDB.case_insensitive_view.explain().count({f: "case"});
- cursorStage = getAggPlanStage(explain, "$cursor");
- assert.neq(null, cursorStage, tojson(explain));
- assert.eq(1, cursorStage.$cursor.queryPlanner.collation.strength, tojson(cursorStage));
-
- // Test that distinct against a view with a default collation correctly uses the collation.
- assert.eq(3, viewsDB.case_sensitive_coll.distinct("f").length);
- assert.eq(1, viewsDB.case_insensitive_view.distinct("f").length);
- explain = viewsDB.case_insensitive_view.explain().distinct("f");
- cursorStage = getAggPlanStage(explain, "$cursor");
- assert.neq(null, cursorStage, tojson(explain));
- assert.eq(1, cursorStage.$cursor.queryPlanner.collation.strength, tojson(cursorStage));
-
- // Test that find against a view with a default collation correctly uses the collation.
- // We expect the pipeline to be optimized away, so there should be no pipeline stages in
- // the explain output.
- let findRes = viewsDB.runCommand({find: "case_sensitive_coll", filter: {f: "case"}});
- assert.commandWorked(findRes);
- assert.eq(1, findRes.cursor.firstBatch.length);
- findRes = viewsDB.runCommand({find: "case_insensitive_view", filter: {f: "case"}});
- assert.commandWorked(findRes);
- assert.eq(3, findRes.cursor.firstBatch.length);
- explain = viewsDB.runCommand({explain: {find: "case_insensitive_view", filter: {f: "case"}}});
- assert.neq(null, explain.queryPlanner, tojson(explain));
- assert.eq(1, explain.queryPlanner.collation.strength, tojson(explain));
+assert.commandWorked(viewsDB.runCommand(
+ {aggregate: "simpleView2", pipeline: [lookupSimpleView, graphLookupUkCollection], cursor: {}}));
+
+// You cannot perform an aggregation involving multiple views if the views don't all have
+// the same default collation.
+assert.commandFailedWithCode(
+ viewsDB.runCommand({aggregate: "filView", pipeline: [lookupSimpleView], cursor: {}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand({aggregate: "simpleView", pipeline: [lookupFilView], cursor: {}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({
+ aggregate: "simpleCollection",
+ pipeline: [lookupFilView, graphLookupSimpleView],
+ cursor: {}
+}),
+ ErrorCodes.OptionNotSupportedOnView);
+
+// You cannot create a view that depends on another view with a different default collation.
+assert.commandFailedWithCode(
+ viewsDB.runCommand({create: "zhView", viewOn: "filView", collation: {locale: "zh"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({
+ create: "zhView",
+ viewOn: "simpleCollection",
+ pipeline: [lookupFilView],
+ collation: {locale: "zh"}
+}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({
+ create: "zhView",
+ viewOn: "simpleCollection",
+ pipeline: [makeNestedLookupFilView("zhView")],
+ collation: {locale: "zh"}
+}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({
+ create: "zhView",
+ viewOn: "simpleCollection",
+ pipeline: [graphLookupSimpleView],
+ collation: {locale: "zh"}
+}),
+ ErrorCodes.OptionNotSupportedOnView);
+
+// You cannot modify a view to depend on another view with a different default collation.
+assert.commandWorked(
+ viewsDB.runCommand({create: "esView", viewOn: "simpleCollection", collation: {locale: "es"}}));
+assert.commandFailedWithCode(
+ viewsDB.runCommand({collMod: "esView", viewOn: "filView", pipeline: []}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand(
+ {collMod: "esView", viewOn: "simpleCollection", pipeline: [lookupSimpleView]}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand(
+ {collMod: "esView", viewOn: "simpleCollection", pipeline: [graphLookupFilView]}),
+ ErrorCodes.OptionNotSupportedOnView);
+
+// Views cannot be dropped and recreated with a different collation if other views depend on
+// that view.
+assert.commandWorked(
+ viewsDB.runCommand({create: "filView2", viewOn: "filView", collation: {locale: "fil"}}));
+assert.commandWorked(viewsDB.runCommand({drop: "filView"}));
+assert.commandFailedWithCode(viewsDB.runCommand({create: "filView", viewOn: "simpleCollection"}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand({create: "filView", viewOn: "simpleCollection", collation: {locale: "en"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandWorked(
+ viewsDB.createView("filView", "ukCollection", [], {collation: {locale: "fil"}}));
+
+// Views cannot be dropped and recreated with a different collation if other views depend on
+// that view via $lookup or $graphLookup.
+assert.commandWorked(viewsDB.runCommand(
+ {collMod: "filView2", viewOn: "simpleCollection", pipeline: [lookupFilView]}));
+assert.commandWorked(viewsDB.runCommand({drop: "filView"}));
+assert.commandFailedWithCode(viewsDB.runCommand({create: "filView", viewOn: "simpleCollection"}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand({create: "filView", viewOn: "simpleCollection", collation: {locale: "en"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandWorked(viewsDB.runCommand(
+ {create: "filView", viewOn: "ukCollection", pipeline: [], collation: {locale: "fil"}}));
+
+assert.commandWorked(viewsDB.runCommand(
+ {collMod: "filView2", viewOn: "simpleCollection", pipeline: [graphLookupFilView]}));
+assert.commandWorked(viewsDB.runCommand({drop: "filView"}));
+assert.commandFailedWithCode(viewsDB.runCommand({create: "filView", viewOn: "simpleCollection"}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand({create: "filView", viewOn: "simpleCollection", collation: {locale: "en"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+
+// If two views "A" and "C" have different collations and depend on the namespace "B", then "B"
+// cannot be created as a view.
+assert.commandWorked(viewsDB.runCommand({create: "A", viewOn: "B", collation: {locale: "hsb"}}));
+assert.commandWorked(
+ viewsDB.runCommand({create: "B", viewOn: "other", collation: {locale: "hsb"}}));
+assert.commandFailedWithCode(
+ viewsDB.runCommand({create: "C", viewOn: "B", collation: {locale: "wae"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandWorked(viewsDB.runCommand({drop: "B"}));
+assert.commandWorked(viewsDB.runCommand({create: "C", viewOn: "B", collation: {locale: "wae"}}));
+assert.commandFailedWithCode(
+ viewsDB.runCommand({create: "B", viewOn: "other", collation: {locale: "hsb"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand({create: "B", viewOn: "other", collation: {locale: "wae"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({create: "B", viewOn: "other"}),
+ ErrorCodes.OptionNotSupportedOnView);
+
+// Make sure that when an operation does not specify the collation, it correctly uses the
+// default collation associated with the view. For this, we set up a new backing collection with
+// a case-insensitive view.
+assert.commandWorked(viewsDB.runCommand({create: "case_sensitive_coll"}));
+assert.commandWorked(viewsDB.runCommand({
+ create: "case_insensitive_view",
+ viewOn: "case_sensitive_coll",
+ collation: {locale: "en", strength: 1}
+}));
+
+assert.writeOK(viewsDB.case_sensitive_coll.insert({f: "case"}));
+assert.writeOK(viewsDB.case_sensitive_coll.insert({f: "Case"}));
+assert.writeOK(viewsDB.case_sensitive_coll.insert({f: "CASE"}));
+
+let explain, cursorStage;
+
+// Test that aggregate against a view with a default collation correctly uses the collation.
+// We expect the pipeline to be optimized away, so there should be no pipeline stages in
+// the explain.
+assert.eq(1, viewsDB.case_sensitive_coll.aggregate([{$match: {f: "case"}}]).itcount());
+assert.eq(3, viewsDB.case_insensitive_view.aggregate([{$match: {f: "case"}}]).itcount());
+explain = viewsDB.case_insensitive_view.explain().aggregate([{$match: {f: "case"}}]);
+assert.neq(null, explain.queryPlanner, tojson(explain));
+assert.eq(1, explain.queryPlanner.collation.strength, tojson(explain));
+
+// Test that count against a view with a default collation correctly uses the collation.
+assert.eq(1, viewsDB.case_sensitive_coll.count({f: "case"}));
+assert.eq(3, viewsDB.case_insensitive_view.count({f: "case"}));
+explain = viewsDB.case_insensitive_view.explain().count({f: "case"});
+cursorStage = getAggPlanStage(explain, "$cursor");
+assert.neq(null, cursorStage, tojson(explain));
+assert.eq(1, cursorStage.$cursor.queryPlanner.collation.strength, tojson(cursorStage));
+
+// Test that distinct against a view with a default collation correctly uses the collation.
+assert.eq(3, viewsDB.case_sensitive_coll.distinct("f").length);
+assert.eq(1, viewsDB.case_insensitive_view.distinct("f").length);
+explain = viewsDB.case_insensitive_view.explain().distinct("f");
+cursorStage = getAggPlanStage(explain, "$cursor");
+assert.neq(null, cursorStage, tojson(explain));
+assert.eq(1, cursorStage.$cursor.queryPlanner.collation.strength, tojson(cursorStage));
+
+// Test that find against a view with a default collation correctly uses the collation.
+// We expect the pipeline to be optimized away, so there should be no pipeline stages in
+// the explain output.
+let findRes = viewsDB.runCommand({find: "case_sensitive_coll", filter: {f: "case"}});
+assert.commandWorked(findRes);
+assert.eq(1, findRes.cursor.firstBatch.length);
+findRes = viewsDB.runCommand({find: "case_insensitive_view", filter: {f: "case"}});
+assert.commandWorked(findRes);
+assert.eq(3, findRes.cursor.firstBatch.length);
+explain = viewsDB.runCommand({explain: {find: "case_insensitive_view", filter: {f: "case"}}});
+assert.neq(null, explain.queryPlanner, tojson(explain));
+assert.eq(1, explain.queryPlanner.collation.strength, tojson(explain));
}());
diff --git a/jstests/core/views/views_count.js b/jstests/core/views/views_count.js
index cfef3775569..8fa24191959 100644
--- a/jstests/core/views/views_count.js
+++ b/jstests/core/views/views_count.js
@@ -3,83 +3,82 @@
// @tags: [requires_fastcount]
(function() {
- "use strict";
+"use strict";
- var viewsDB = db.getSiblingDB("views_count");
- assert.commandWorked(viewsDB.dropDatabase());
+var viewsDB = db.getSiblingDB("views_count");
+assert.commandWorked(viewsDB.dropDatabase());
- // Insert documents into a collection.
- let coll = viewsDB.getCollection("coll");
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 10; i++) {
- bulk.insert({x: i});
- }
- assert.writeOK(bulk.execute());
+// Insert documents into a collection.
+let coll = viewsDB.getCollection("coll");
+let bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < 10; i++) {
+ bulk.insert({x: i});
+}
+assert.writeOK(bulk.execute());
- // Create views on the data.
- assert.commandWorked(viewsDB.runCommand({create: "identityView", viewOn: "coll"}));
- assert.commandWorked(viewsDB.runCommand(
- {create: "greaterThanThreeView", viewOn: "coll", pipeline: [{$match: {x: {$gt: 3}}}]}));
- assert.commandWorked(viewsDB.runCommand({
- create: "lessThanSevenView",
- viewOn: "greaterThanThreeView",
- pipeline: [{$match: {x: {$lt: 7}}}]
- }));
- let identityView = viewsDB.getCollection("identityView");
- let greaterThanThreeView = viewsDB.getCollection("greaterThanThreeView");
- let lessThanSevenView = viewsDB.getCollection("lessThanSevenView");
+// Create views on the data.
+assert.commandWorked(viewsDB.runCommand({create: "identityView", viewOn: "coll"}));
+assert.commandWorked(viewsDB.runCommand(
+ {create: "greaterThanThreeView", viewOn: "coll", pipeline: [{$match: {x: {$gt: 3}}}]}));
+assert.commandWorked(viewsDB.runCommand({
+ create: "lessThanSevenView",
+ viewOn: "greaterThanThreeView",
+ pipeline: [{$match: {x: {$lt: 7}}}]
+}));
+let identityView = viewsDB.getCollection("identityView");
+let greaterThanThreeView = viewsDB.getCollection("greaterThanThreeView");
+let lessThanSevenView = viewsDB.getCollection("lessThanSevenView");
- // Count on a view, with or without a query.
- assert.eq(coll.count(), identityView.count());
- assert.eq(coll.count({}), identityView.count({}));
- assert.eq(coll.count({x: {$exists: true}}), identityView.count({x: {$exists: true}}));
- assert.eq(coll.count({x: 0}), identityView.count({x: 0}));
- assert.eq(6, greaterThanThreeView.count());
- assert.eq(6, greaterThanThreeView.count({}));
- assert.eq(3, lessThanSevenView.count());
- assert.eq(3, lessThanSevenView.count({}));
+// Count on a view, with or without a query.
+assert.eq(coll.count(), identityView.count());
+assert.eq(coll.count({}), identityView.count({}));
+assert.eq(coll.count({x: {$exists: true}}), identityView.count({x: {$exists: true}}));
+assert.eq(coll.count({x: 0}), identityView.count({x: 0}));
+assert.eq(6, greaterThanThreeView.count());
+assert.eq(6, greaterThanThreeView.count({}));
+assert.eq(3, lessThanSevenView.count());
+assert.eq(3, lessThanSevenView.count({}));
- // Test empty counts.
- assert.eq(coll.count({x: -1}), identityView.count({x: -1}));
- assert.eq(0, greaterThanThreeView.count({x: 2}));
- assert.eq(0, lessThanSevenView.count({x: 9}));
+// Test empty counts.
+assert.eq(coll.count({x: -1}), identityView.count({x: -1}));
+assert.eq(0, greaterThanThreeView.count({x: 2}));
+assert.eq(0, lessThanSevenView.count({x: 9}));
- // Counting on views works with limit and skip.
- assert.eq(7, identityView.count({x: {$exists: true}}, {skip: 3}));
- assert.eq(3, greaterThanThreeView.count({x: {$lt: 100}}, {limit: 3}));
- assert.eq(1, lessThanSevenView.count({}, {skip: 1, limit: 1}));
+// Counting on views works with limit and skip.
+assert.eq(7, identityView.count({x: {$exists: true}}, {skip: 3}));
+assert.eq(3, greaterThanThreeView.count({x: {$lt: 100}}, {limit: 3}));
+assert.eq(1, lessThanSevenView.count({}, {skip: 1, limit: 1}));
- // Count with explain works on a view.
- assert.commandWorked(lessThanSevenView.explain().count());
- assert.commandWorked(greaterThanThreeView.explain().count({x: 6}));
- let explainPlan = lessThanSevenView.explain().count({foo: "bar"});
- assert.commandWorked(explainPlan);
- assert.eq(explainPlan["stages"][0]["$cursor"]["queryPlanner"]["namespace"], "views_count.coll");
+// Count with explain works on a view.
+assert.commandWorked(lessThanSevenView.explain().count());
+assert.commandWorked(greaterThanThreeView.explain().count({x: 6}));
+let explainPlan = lessThanSevenView.explain().count({foo: "bar"});
+assert.commandWorked(explainPlan);
+assert.eq(explainPlan["stages"][0]["$cursor"]["queryPlanner"]["namespace"], "views_count.coll");
- // Count with explicit explain modes works on a view.
- explainPlan =
- assert.commandWorked(lessThanSevenView.explain("queryPlanner").count({x: {$gte: 5}}));
- assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_count.coll");
- assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+// Count with explicit explain modes works on a view.
+explainPlan = assert.commandWorked(lessThanSevenView.explain("queryPlanner").count({x: {$gte: 5}}));
+assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_count.coll");
+assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
- explainPlan =
- assert.commandWorked(lessThanSevenView.explain("executionStats").count({x: {$gte: 5}}));
- assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_count.coll");
- assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
- assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2);
- assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+explainPlan =
+ assert.commandWorked(lessThanSevenView.explain("executionStats").count({x: {$gte: 5}}));
+assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_count.coll");
+assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2);
+assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
- explainPlan =
- assert.commandWorked(lessThanSevenView.explain("allPlansExecution").count({x: {$gte: 5}}));
- assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_count.coll");
- assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
- assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2);
- assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+explainPlan =
+ assert.commandWorked(lessThanSevenView.explain("allPlansExecution").count({x: {$gte: 5}}));
+assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_count.coll");
+assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2);
+assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
- // Count with hint works on a view.
- assert.commandWorked(viewsDB.runCommand({count: "identityView", hint: "_id_"}));
+// Count with hint works on a view.
+assert.commandWorked(viewsDB.runCommand({count: "identityView", hint: "_id_"}));
- assert.commandFailedWithCode(
- viewsDB.runCommand({count: "identityView", collation: {locale: "en_US"}}),
- ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(
+ viewsDB.runCommand({count: "identityView", collation: {locale: "en_US"}}),
+ ErrorCodes.OptionNotSupportedOnView);
}());
diff --git a/jstests/core/views/views_creation.js b/jstests/core/views/views_creation.js
index 2312b78e646..1765b9c4182 100644
--- a/jstests/core/views/views_creation.js
+++ b/jstests/core/views/views_creation.js
@@ -4,112 +4,110 @@
// ]
(function() {
- "use strict";
-
- // For arrayEq.
- load("jstests/aggregation/extras/utils.js");
-
- const viewsDBName = "views_creation";
-
- let viewsDB = db.getSiblingDB(viewsDBName);
- assert.commandWorked(viewsDB.dropDatabase());
-
- let collNames = viewsDB.getCollectionNames();
- assert.eq(0, collNames.length, tojson(collNames));
-
- // You cannot create a view that starts with 'system.'.
- assert.commandFailedWithCode(
- viewsDB.runCommand({create: "system.special", viewOn: "collection"}),
- ErrorCodes.InvalidNamespace,
- "Created an illegal view named 'system.views'");
-
- // Collections that start with 'system.' that are not special to MongoDB fail with a different
- // error code.
- assert.commandFailedWithCode(viewsDB.runCommand({create: "system.foo", viewOn: "collection"}),
- ErrorCodes.InvalidNamespace,
- "Created an illegal view named 'system.foo'");
-
- // Create a collection for test purposes.
- assert.commandWorked(viewsDB.runCommand({create: "collection"}));
-
- let pipe = [{$match: {}}];
-
- // Create a "regular" view on a collection.
- assert.commandWorked(
- viewsDB.runCommand({create: "view", viewOn: "collection", pipeline: pipe}));
-
- collNames = viewsDB.getCollectionNames().filter((function(coll) {
- return !coll.startsWith("system.");
- }));
- assert.eq(2, collNames.length, tojson(collNames));
- let res = viewsDB.runCommand({listCollections: 1, filter: {type: "view"}});
- assert.commandWorked(res);
-
- // Ensure that the output of listCollections has all the expected options for a view.
- let expectedListCollectionsOutput = [{
- name: "view",
- type: "view",
- options: {viewOn: "collection", pipeline: pipe},
- info: {readOnly: true}
- }];
- assert(arrayEq(res.cursor.firstBatch, expectedListCollectionsOutput), tojson({
- expectedListCollectionsOutput: expectedListCollectionsOutput,
- got: res.cursor.firstBatch
- }));
-
- // Create a view on a non-existent collection.
- assert.commandWorked(
- viewsDB.runCommand({create: "viewOnNonexistent", viewOn: "nonexistent", pipeline: pipe}));
-
- // Create a view but don't specify a pipeline; this should default to something sane.
- assert.commandWorked(
- viewsDB.runCommand({create: "viewWithDefaultPipeline", viewOn: "collection"}));
-
- // Specifying a pipeline but no view namespace must fail.
- assert.commandFailed(viewsDB.runCommand({create: "viewNoViewNamespace", pipeline: pipe}));
-
- // Create a view on another view.
- assert.commandWorked(
- viewsDB.runCommand({create: "viewOnView", viewOn: "view", pipeline: pipe}));
-
- // View names are constrained to the same limitations as collection names.
- assert.commandFailed(viewsDB.runCommand({create: "", viewOn: "collection", pipeline: pipe}));
- assert.commandFailedWithCode(
- viewsDB.runCommand({create: "system.local.new", viewOn: "collection", pipeline: pipe}),
- ErrorCodes.InvalidNamespace);
- assert.commandFailedWithCode(
- viewsDB.runCommand({create: "dollar$", viewOn: "collection", pipeline: pipe}),
- ErrorCodes.InvalidNamespace);
-
- // You cannot create a view with a $out stage, by itself or nested inside of a different stage.
- const ERROR_CODE_OUT_BANNED_IN_LOOKUP = 51047;
- const outStage = {$out: "nonExistentCollection"};
- assert.commandFailedWithCode(
- viewsDB.runCommand({create: "viewWithOut", viewOn: "collection", pipeline: [outStage]}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(viewsDB.runCommand({
- create: "viewWithOutInLookup",
- viewOn: "collection",
- pipeline: [{$lookup: {from: "other", pipeline: [outStage], as: "result"}}]
- }),
- ERROR_CODE_OUT_BANNED_IN_LOOKUP);
- assert.commandFailedWithCode(viewsDB.runCommand({
- create: "viewWithOutInFacet",
- viewOn: "collection",
- pipeline: [{$facet: {output: [outStage]}}]
- }),
- 40600);
-
- // These test that, when an existing view in system.views is invalid because of a $out in the
- // pipeline, the database errors on creation of a new view.
- assert.commandWorked(viewsDB.system.views.insert({
- _id: `${viewsDBName}.invalidView`,
- viewOn: "collection",
- pipeline: [{$project: {_id: false}}, {$out: "notExistingCollection"}]
- }));
- assert.commandFailedWithCode(
- viewsDB.runCommand({create: "viewWithBadViewCatalog", viewOn: "collection", pipeline: []}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandWorked(
- viewsDB.system.views.remove({_id: `${viewsDBName}.invalidView`}, {justOne: true}));
+"use strict";
+
+// For arrayEq.
+load("jstests/aggregation/extras/utils.js");
+
+const viewsDBName = "views_creation";
+
+let viewsDB = db.getSiblingDB(viewsDBName);
+assert.commandWorked(viewsDB.dropDatabase());
+
+let collNames = viewsDB.getCollectionNames();
+assert.eq(0, collNames.length, tojson(collNames));
+
+// You cannot create a view that starts with 'system.'.
+assert.commandFailedWithCode(viewsDB.runCommand({create: "system.special", viewOn: "collection"}),
+ ErrorCodes.InvalidNamespace,
+ "Created an illegal view named 'system.views'");
+
+// Collections that start with 'system.' that are not special to MongoDB fail with a different
+// error code.
+assert.commandFailedWithCode(viewsDB.runCommand({create: "system.foo", viewOn: "collection"}),
+ ErrorCodes.InvalidNamespace,
+ "Created an illegal view named 'system.foo'");
+
+// Create a collection for test purposes.
+assert.commandWorked(viewsDB.runCommand({create: "collection"}));
+
+let pipe = [{$match: {}}];
+
+// Create a "regular" view on a collection.
+assert.commandWorked(viewsDB.runCommand({create: "view", viewOn: "collection", pipeline: pipe}));
+
+collNames = viewsDB.getCollectionNames().filter((function(coll) {
+ return !coll.startsWith("system.");
+}));
+assert.eq(2, collNames.length, tojson(collNames));
+let res = viewsDB.runCommand({listCollections: 1, filter: {type: "view"}});
+assert.commandWorked(res);
+
+// Ensure that the output of listCollections has all the expected options for a view.
+let expectedListCollectionsOutput = [{
+ name: "view",
+ type: "view",
+ options: {viewOn: "collection", pipeline: pipe},
+ info: {readOnly: true}
+}];
+assert(arrayEq(res.cursor.firstBatch, expectedListCollectionsOutput), tojson({
+ expectedListCollectionsOutput: expectedListCollectionsOutput,
+ got: res.cursor.firstBatch
+ }));
+
+// Create a view on a non-existent collection.
+assert.commandWorked(
+ viewsDB.runCommand({create: "viewOnNonexistent", viewOn: "nonexistent", pipeline: pipe}));
+
+// Create a view but don't specify a pipeline; this should default to something sane.
+assert.commandWorked(viewsDB.runCommand({create: "viewWithDefaultPipeline", viewOn: "collection"}));
+
+// Specifying a pipeline but no view namespace must fail.
+assert.commandFailed(viewsDB.runCommand({create: "viewNoViewNamespace", pipeline: pipe}));
+
+// Create a view on another view.
+assert.commandWorked(viewsDB.runCommand({create: "viewOnView", viewOn: "view", pipeline: pipe}));
+
+// View names are constrained to the same limitations as collection names.
+assert.commandFailed(viewsDB.runCommand({create: "", viewOn: "collection", pipeline: pipe}));
+assert.commandFailedWithCode(
+ viewsDB.runCommand({create: "system.local.new", viewOn: "collection", pipeline: pipe}),
+ ErrorCodes.InvalidNamespace);
+assert.commandFailedWithCode(
+ viewsDB.runCommand({create: "dollar$", viewOn: "collection", pipeline: pipe}),
+ ErrorCodes.InvalidNamespace);
+
+// You cannot create a view with a $out stage, by itself or nested inside of a different stage.
+const ERROR_CODE_OUT_BANNED_IN_LOOKUP = 51047;
+const outStage = {
+ $out: "nonExistentCollection"
+};
+assert.commandFailedWithCode(
+ viewsDB.runCommand({create: "viewWithOut", viewOn: "collection", pipeline: [outStage]}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandFailedWithCode(viewsDB.runCommand({
+ create: "viewWithOutInLookup",
+ viewOn: "collection",
+ pipeline: [{$lookup: {from: "other", pipeline: [outStage], as: "result"}}]
+}),
+ ERROR_CODE_OUT_BANNED_IN_LOOKUP);
+assert.commandFailedWithCode(viewsDB.runCommand({
+ create: "viewWithOutInFacet",
+ viewOn: "collection",
+ pipeline: [{$facet: {output: [outStage]}}]
+}),
+ 40600);
+
+// These test that, when an existing view in system.views is invalid because of a $out in the
+// pipeline, the database errors on creation of a new view.
+assert.commandWorked(viewsDB.system.views.insert({
+ _id: `${viewsDBName}.invalidView`,
+ viewOn: "collection",
+ pipeline: [{$project: {_id: false}}, {$out: "notExistingCollection"}]
+}));
+assert.commandFailedWithCode(
+ viewsDB.runCommand({create: "viewWithBadViewCatalog", viewOn: "collection", pipeline: []}),
+ ErrorCodes.OptionNotSupportedOnView);
+assert.commandWorked(
+ viewsDB.system.views.remove({_id: `${viewsDBName}.invalidView`}, {justOne: true}));
}());
diff --git a/jstests/core/views/views_distinct.js b/jstests/core/views/views_distinct.js
index 29ddcdc5269..8ef9e208a81 100644
--- a/jstests/core/views/views_distinct.js
+++ b/jstests/core/views/views_distinct.js
@@ -1,142 +1,140 @@
// Test the distinct command with views.
(function() {
- "use strict";
-
- // For arrayEq. We don't use array.eq as it does an ordered comparison on arrays but we don't
- // care about order in the distinct response.
- load("jstests/aggregation/extras/utils.js");
-
- var viewsDB = db.getSiblingDB("views_distinct");
- assert.commandWorked(viewsDB.dropDatabase());
-
- // Populate a collection with some test data.
- let allDocuments = [];
- allDocuments.push({_id: "New York", state: "NY", pop: 7});
- allDocuments.push({_id: "Newark", state: "NJ", pop: 3});
- allDocuments.push({_id: "Palo Alto", state: "CA", pop: 10});
- allDocuments.push({_id: "San Francisco", state: "CA", pop: 4});
- allDocuments.push({_id: "Trenton", state: "NJ", pop: 5});
-
- let coll = viewsDB.getCollection("coll");
- let bulk = coll.initializeUnorderedBulkOp();
- allDocuments.forEach(function(doc) {
- bulk.insert(doc);
- });
- assert.writeOK(bulk.execute());
-
- // Create views on the data.
- assert.commandWorked(viewsDB.runCommand({create: "identityView", viewOn: "coll"}));
- assert.commandWorked(viewsDB.runCommand(
- {create: "largePopView", viewOn: "identityView", pipeline: [{$match: {pop: {$gt: 5}}}]}));
- let identityView = viewsDB.getCollection("identityView");
- let largePopView = viewsDB.getCollection("largePopView");
-
- function assertIdentityViewDistinctMatchesCollection(key, query) {
- query = (query === undefined) ? {} : query;
- const collDistinct = coll.distinct(key, query);
- const viewDistinct = identityView.distinct(key, query);
- assert(arrayEq(collDistinct, viewDistinct),
- "Distinct on a collection did not match distinct on its identity view; got " +
- tojson(viewDistinct) + " but expected " + tojson(collDistinct));
- }
-
- // Test basic distinct requests on known fields without a query.
- assertIdentityViewDistinctMatchesCollection("pop");
- assertIdentityViewDistinctMatchesCollection("_id");
- assert(arrayEq([7, 10], largePopView.distinct("pop")));
- assert(arrayEq(["New York", "Palo Alto"], largePopView.distinct("_id")));
-
- // Test distinct with the presence of a query.
- assertIdentityViewDistinctMatchesCollection("state", {});
- assertIdentityViewDistinctMatchesCollection("pop", {pop: {$exists: true}});
- assertIdentityViewDistinctMatchesCollection("state", {pop: {$gt: 3}});
- assertIdentityViewDistinctMatchesCollection("_id", {state: "CA"});
- assert(arrayEq(["CA"], largePopView.distinct("state", {pop: {$gte: 8}})));
- assert(arrayEq([7], largePopView.distinct("pop", {state: "NY"})));
-
- // Test distinct where we expect an empty set response.
- assertIdentityViewDistinctMatchesCollection("nonexistent");
- assertIdentityViewDistinctMatchesCollection("pop", {pop: {$gt: 1000}});
- assert.eq([], largePopView.distinct("nonexistent"));
- assert.eq([], largePopView.distinct("_id", {state: "FL"}));
-
- // Explain works with distinct.
- assert.commandWorked(identityView.explain().distinct("_id"));
- assert.commandWorked(largePopView.explain().distinct("pop", {state: "CA"}));
- let explainPlan = largePopView.explain().count({foo: "bar"});
- assert.commandWorked(explainPlan);
- assert.eq(explainPlan["stages"][0]["$cursor"]["queryPlanner"]["namespace"],
- "views_distinct.coll");
-
- // Distinct with explicit explain modes works on a view.
- explainPlan = assert.commandWorked(largePopView.explain("queryPlanner").distinct("pop"));
- assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_distinct.coll");
- assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
-
- explainPlan = assert.commandWorked(largePopView.explain("executionStats").distinct("pop"));
- assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_distinct.coll");
- assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
- assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2);
- assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
-
- explainPlan = assert.commandWorked(largePopView.explain("allPlansExecution").distinct("pop"));
- assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_distinct.coll");
- assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
- assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2);
- assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
-
- // Distinct commands fail when they try to change the collation of a view.
- assert.commandFailedWithCode(
- viewsDB.runCommand({distinct: "identityView", key: "state", collation: {locale: "en_US"}}),
- ErrorCodes.OptionNotSupportedOnView);
-
- // Test distinct on nested objects, nested arrays and nullish values.
- coll.drop();
- allDocuments = [];
- allDocuments.push({a: 1, b: [2, 3, [4, 5], {c: 6}], d: {e: [1, 2]}});
- allDocuments.push({a: [1], b: [2, 3, 4, [5]], c: 6, d: {e: 1}});
- allDocuments.push({a: [[1]], b: [2, 3, [4], [5]], c: 6, d: [[{e: 1}]]});
- allDocuments.push({a: [[1]], b: [2, 3, [4], [5]], c: 6, d: [{e: {f: 1}}]});
- allDocuments.push({a: [[1]], b: [2, 3, [4], [5]], c: 6, d: {e: [[{f: 1}]]}});
- allDocuments.push({a: [1, 2], b: 3, c: [6], d: [{e: 1}, {e: [1, 2]}, {e: {someObject: 1}}]});
- allDocuments.push({a: [1, 2], b: [4, 5], c: [undefined], d: [1]});
- allDocuments.push({a: null, b: [4, 5, null, undefined], c: [], d: {e: null}});
- allDocuments.push({a: undefined, b: null, c: [null], d: {e: undefined}});
-
- bulk = coll.initializeUnorderedBulkOp();
- allDocuments.forEach(function(doc) {
- bulk.insert(doc);
- });
- assert.writeOK(bulk.execute());
-
- assertIdentityViewDistinctMatchesCollection("a");
- assertIdentityViewDistinctMatchesCollection("b");
- assertIdentityViewDistinctMatchesCollection("c");
- assertIdentityViewDistinctMatchesCollection("d");
- assertIdentityViewDistinctMatchesCollection("e");
- assertIdentityViewDistinctMatchesCollection("d.e");
- assertIdentityViewDistinctMatchesCollection("d.e.f");
-
- // Test distinct on a deeply nested object through arrays.
- coll.drop();
- assert.commandWorked(coll.insert({
- a: [
- {b: [{c: [{d: 1}]}]},
- {b: {c: "not leaf"}},
- {b: {c: [{d: 2, "not leaf": "not leaf"}]}},
- {b: [{c: {d: 3}}]},
- {b: {c: {d: 4}}, "not leaf": "not leaf"},
- "not leaf",
- // The documents below should not get traversed by the distinct() because of the
- // doubly-nested arrays.
- [[{b: {c: {d: "not leaf"}}}]],
- [{b: {c: [[{d: "not leaf"}]]}}],
- ]
- }));
- assert.commandWorked(coll.insert({a: "not leaf"}));
- assertIdentityViewDistinctMatchesCollection("a");
- assertIdentityViewDistinctMatchesCollection("a.b");
- assertIdentityViewDistinctMatchesCollection("a.b.c");
- assertIdentityViewDistinctMatchesCollection("a.b.c.d");
-
+"use strict";
+
+// For arrayEq. We don't use array.eq as it does an ordered comparison on arrays but we don't
+// care about order in the distinct response.
+load("jstests/aggregation/extras/utils.js");
+
+var viewsDB = db.getSiblingDB("views_distinct");
+assert.commandWorked(viewsDB.dropDatabase());
+
+// Populate a collection with some test data.
+let allDocuments = [];
+allDocuments.push({_id: "New York", state: "NY", pop: 7});
+allDocuments.push({_id: "Newark", state: "NJ", pop: 3});
+allDocuments.push({_id: "Palo Alto", state: "CA", pop: 10});
+allDocuments.push({_id: "San Francisco", state: "CA", pop: 4});
+allDocuments.push({_id: "Trenton", state: "NJ", pop: 5});
+
+let coll = viewsDB.getCollection("coll");
+let bulk = coll.initializeUnorderedBulkOp();
+allDocuments.forEach(function(doc) {
+ bulk.insert(doc);
+});
+assert.writeOK(bulk.execute());
+
+// Create views on the data.
+assert.commandWorked(viewsDB.runCommand({create: "identityView", viewOn: "coll"}));
+assert.commandWorked(viewsDB.runCommand(
+ {create: "largePopView", viewOn: "identityView", pipeline: [{$match: {pop: {$gt: 5}}}]}));
+let identityView = viewsDB.getCollection("identityView");
+let largePopView = viewsDB.getCollection("largePopView");
+
+function assertIdentityViewDistinctMatchesCollection(key, query) {
+ query = (query === undefined) ? {} : query;
+ const collDistinct = coll.distinct(key, query);
+ const viewDistinct = identityView.distinct(key, query);
+ assert(arrayEq(collDistinct, viewDistinct),
+ "Distinct on a collection did not match distinct on its identity view; got " +
+ tojson(viewDistinct) + " but expected " + tojson(collDistinct));
+}
+
+// Test basic distinct requests on known fields without a query.
+assertIdentityViewDistinctMatchesCollection("pop");
+assertIdentityViewDistinctMatchesCollection("_id");
+assert(arrayEq([7, 10], largePopView.distinct("pop")));
+assert(arrayEq(["New York", "Palo Alto"], largePopView.distinct("_id")));
+
+// Test distinct with the presence of a query.
+assertIdentityViewDistinctMatchesCollection("state", {});
+assertIdentityViewDistinctMatchesCollection("pop", {pop: {$exists: true}});
+assertIdentityViewDistinctMatchesCollection("state", {pop: {$gt: 3}});
+assertIdentityViewDistinctMatchesCollection("_id", {state: "CA"});
+assert(arrayEq(["CA"], largePopView.distinct("state", {pop: {$gte: 8}})));
+assert(arrayEq([7], largePopView.distinct("pop", {state: "NY"})));
+
+// Test distinct where we expect an empty set response.
+assertIdentityViewDistinctMatchesCollection("nonexistent");
+assertIdentityViewDistinctMatchesCollection("pop", {pop: {$gt: 1000}});
+assert.eq([], largePopView.distinct("nonexistent"));
+assert.eq([], largePopView.distinct("_id", {state: "FL"}));
+
+// Explain works with distinct.
+assert.commandWorked(identityView.explain().distinct("_id"));
+assert.commandWorked(largePopView.explain().distinct("pop", {state: "CA"}));
+let explainPlan = largePopView.explain().count({foo: "bar"});
+assert.commandWorked(explainPlan);
+assert.eq(explainPlan["stages"][0]["$cursor"]["queryPlanner"]["namespace"], "views_distinct.coll");
+
+// Distinct with explicit explain modes works on a view.
+explainPlan = assert.commandWorked(largePopView.explain("queryPlanner").distinct("pop"));
+assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_distinct.coll");
+assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+
+explainPlan = assert.commandWorked(largePopView.explain("executionStats").distinct("pop"));
+assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_distinct.coll");
+assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2);
+assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+
+explainPlan = assert.commandWorked(largePopView.explain("allPlansExecution").distinct("pop"));
+assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_distinct.coll");
+assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
+assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2);
+assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+
+// Distinct commands fail when they try to change the collation of a view.
+assert.commandFailedWithCode(
+ viewsDB.runCommand({distinct: "identityView", key: "state", collation: {locale: "en_US"}}),
+ ErrorCodes.OptionNotSupportedOnView);
+
+// Test distinct on nested objects, nested arrays and nullish values.
+coll.drop();
+allDocuments = [];
+allDocuments.push({a: 1, b: [2, 3, [4, 5], {c: 6}], d: {e: [1, 2]}});
+allDocuments.push({a: [1], b: [2, 3, 4, [5]], c: 6, d: {e: 1}});
+allDocuments.push({a: [[1]], b: [2, 3, [4], [5]], c: 6, d: [[{e: 1}]]});
+allDocuments.push({a: [[1]], b: [2, 3, [4], [5]], c: 6, d: [{e: {f: 1}}]});
+allDocuments.push({a: [[1]], b: [2, 3, [4], [5]], c: 6, d: {e: [[{f: 1}]]}});
+allDocuments.push({a: [1, 2], b: 3, c: [6], d: [{e: 1}, {e: [1, 2]}, {e: {someObject: 1}}]});
+allDocuments.push({a: [1, 2], b: [4, 5], c: [undefined], d: [1]});
+allDocuments.push({a: null, b: [4, 5, null, undefined], c: [], d: {e: null}});
+allDocuments.push({a: undefined, b: null, c: [null], d: {e: undefined}});
+
+bulk = coll.initializeUnorderedBulkOp();
+allDocuments.forEach(function(doc) {
+ bulk.insert(doc);
+});
+assert.writeOK(bulk.execute());
+
+assertIdentityViewDistinctMatchesCollection("a");
+assertIdentityViewDistinctMatchesCollection("b");
+assertIdentityViewDistinctMatchesCollection("c");
+assertIdentityViewDistinctMatchesCollection("d");
+assertIdentityViewDistinctMatchesCollection("e");
+assertIdentityViewDistinctMatchesCollection("d.e");
+assertIdentityViewDistinctMatchesCollection("d.e.f");
+
+// Test distinct on a deeply nested object through arrays.
+coll.drop();
+assert.commandWorked(coll.insert({
+ a: [
+ {b: [{c: [{d: 1}]}]},
+ {b: {c: "not leaf"}},
+ {b: {c: [{d: 2, "not leaf": "not leaf"}]}},
+ {b: [{c: {d: 3}}]},
+ {b: {c: {d: 4}}, "not leaf": "not leaf"},
+ "not leaf",
+ // The documents below should not get traversed by the distinct() because of the
+ // doubly-nested arrays.
+ [[{b: {c: {d: "not leaf"}}}]],
+ [{b: {c: [[{d: "not leaf"}]]}}],
+ ]
+}));
+assert.commandWorked(coll.insert({a: "not leaf"}));
+assertIdentityViewDistinctMatchesCollection("a");
+assertIdentityViewDistinctMatchesCollection("a.b");
+assertIdentityViewDistinctMatchesCollection("a.b.c");
+assertIdentityViewDistinctMatchesCollection("a.b.c.d");
}());
diff --git a/jstests/core/views/views_drop.js b/jstests/core/views/views_drop.js
index d93def18eae..2f0b9b7e62e 100644
--- a/jstests/core/views/views_drop.js
+++ b/jstests/core/views/views_drop.js
@@ -7,30 +7,29 @@
* ]
*/
(function() {
- "use strict";
+"use strict";
- let viewsDBName = "views_drop";
- let viewsDB = db.getSiblingDB(viewsDBName);
- viewsDB.dropDatabase();
+let viewsDBName = "views_drop";
+let viewsDB = db.getSiblingDB(viewsDBName);
+viewsDB.dropDatabase();
- // Create collection and a view on it.
- assert.writeOK(viewsDB.coll.insert({x: 1}));
- assert.commandWorked(viewsDB.createView("view", "coll", []));
- assert.eq(
- viewsDB.view.find({}, {_id: 0}).toArray(), [{x: 1}], "couldn't find expected doc in view");
+// Create collection and a view on it.
+assert.writeOK(viewsDB.coll.insert({x: 1}));
+assert.commandWorked(viewsDB.createView("view", "coll", []));
+assert.eq(
+ viewsDB.view.find({}, {_id: 0}).toArray(), [{x: 1}], "couldn't find expected doc in view");
- // Drop collection, view and system.views in that order, checking along the way.
- assert(viewsDB.coll.drop(), "couldn't drop coll");
- assert.eq(viewsDB.view.find().toArray(), [], "view isn't empty after dropping coll");
- assert(viewsDB.view.drop(), "couldn't drop view");
- assert.eq(
- viewsDB.system.views.find().toArray(), [], "system.views isn't empty after dropping view");
- assert(viewsDB.system.views.drop(), "couldn't drop system.views");
+// Drop collection, view and system.views in that order, checking along the way.
+assert(viewsDB.coll.drop(), "couldn't drop coll");
+assert.eq(viewsDB.view.find().toArray(), [], "view isn't empty after dropping coll");
+assert(viewsDB.view.drop(), "couldn't drop view");
+assert.eq(
+ viewsDB.system.views.find().toArray(), [], "system.views isn't empty after dropping view");
+assert(viewsDB.system.views.drop(), "couldn't drop system.views");
- // Database should now be empty.
- let res = viewsDB.runCommand({listCollections: 1});
- assert.commandWorked(res);
- assert.eq(res.cursor.firstBatch,
- [],
- viewsDBName + " is not empty after deleting views and system.views");
+// Database should now be empty.
+let res = viewsDB.runCommand({listCollections: 1});
+assert.commandWorked(res);
+assert.eq(
+ res.cursor.firstBatch, [], viewsDBName + " is not empty after deleting views and system.views");
})();
diff --git a/jstests/core/views/views_find.js b/jstests/core/views/views_find.js
index f4a9785a0d8..3a7f5f80ce6 100644
--- a/jstests/core/views/views_find.js
+++ b/jstests/core/views/views_find.js
@@ -3,111 +3,110 @@
* @tags: [requires_find_command, requires_getmore]
*/
(function() {
- "use strict";
-
- // For arrayEq and orderedArrayEq.
- load("jstests/aggregation/extras/utils.js");
-
- let viewsDB = db.getSiblingDB("views_find");
- assert.commandWorked(viewsDB.dropDatabase());
-
- // Helper functions.
- let assertFindResultEq = function(cmd, expected, ordered) {
- let res = viewsDB.runCommand(cmd);
- assert.commandWorked(res);
- let arr = new DBCommandCursor(viewsDB, res, 5).toArray();
- let errmsg = tojson({expected: expected, got: arr});
-
- if (typeof(ordered) === "undefined" || !ordered)
- assert(arrayEq(arr, expected), errmsg);
- else
- assert(orderedArrayEq(arr, expected), errmsg);
- };
-
- // Populate a collection with some test data.
- let allDocuments = [];
- allDocuments.push({_id: "New York", state: "NY", pop: 7});
- allDocuments.push({_id: "Newark", state: "NJ", pop: 3});
- allDocuments.push({_id: "Palo Alto", state: "CA", pop: 10});
- allDocuments.push({_id: "San Francisco", state: "CA", pop: 4});
- allDocuments.push({_id: "Trenton", state: "NJ", pop: 5});
-
- let coll = viewsDB.coll;
- let bulk = coll.initializeUnorderedBulkOp();
- allDocuments.forEach(function(doc) {
- bulk.insert(doc);
- });
- assert.writeOK(bulk.execute());
-
- // Create views on the data.
- assert.commandWorked(
- viewsDB.runCommand({create: "identityView", viewOn: "coll", pipeline: [{$match: {}}]}));
- assert.commandWorked(viewsDB.runCommand({
- create: "noIdView",
- viewOn: "coll",
- pipeline: [{$match: {}}, {$project: {_id: 0, state: 1, pop: 1}}]
- }));
-
- // Filters and "simple" projections.
- assertFindResultEq({find: "identityView"}, allDocuments);
- assertFindResultEq({find: "identityView", filter: {state: "NJ"}, projection: {_id: 1}},
- [{_id: "Trenton"}, {_id: "Newark"}]);
-
- // A view that projects out the _id should still work with the find command.
- assertFindResultEq({find: "noIdView", filter: {state: "NY"}, projection: {pop: 1}}, [{pop: 7}]);
-
- // Sort, limit and batchSize.
- const doOrderedSort = true;
- assertFindResultEq({find: "identityView", sort: {_id: 1}}, allDocuments, doOrderedSort);
- assertFindResultEq(
- {find: "identityView", limit: 1, batchSize: 1, sort: {_id: 1}, projection: {_id: 1}},
- [{_id: "New York"}]);
- assert.commandFailedWithCode(viewsDB.runCommand({find: "identityView", sort: {$natural: 1}}),
- ErrorCodes.InvalidPipelineOperator);
-
- // Negative batch size and limit should fail.
- assert.commandFailed(viewsDB.runCommand({find: "identityView", batchSize: -1}));
- assert.commandFailed(viewsDB.runCommand({find: "identityView", limit: -1}));
-
- // Comment should succeed.
- assert.commandWorked(
- viewsDB.runCommand({find: "identityView", filter: {}, comment: "views_find"}));
-
- // Views support find with explain.
- assert.commandWorked(viewsDB.identityView.find().explain());
-
- // Find with explicit explain modes works on a view.
- let explainPlan = assert.commandWorked(viewsDB.identityView.find().explain("queryPlanner"));
- assert.eq(explainPlan.queryPlanner.namespace, "views_find.coll");
- assert(!explainPlan.hasOwnProperty("executionStats"));
-
- explainPlan = assert.commandWorked(viewsDB.identityView.find().explain("executionStats"));
- assert.eq(explainPlan.queryPlanner.namespace, "views_find.coll");
- assert(explainPlan.hasOwnProperty("executionStats"));
- assert.eq(explainPlan.executionStats.nReturned, 5);
- assert(!explainPlan.executionStats.hasOwnProperty("allPlansExecution"));
-
- explainPlan = assert.commandWorked(viewsDB.identityView.find().explain("allPlansExecution"));
- assert.eq(explainPlan.queryPlanner.namespace, "views_find.coll");
- assert(explainPlan.hasOwnProperty("executionStats"));
- assert.eq(explainPlan.executionStats.nReturned, 5);
- assert(explainPlan.executionStats.hasOwnProperty("allPlansExecution"));
-
- // Only simple 0 or 1 projections are allowed on views.
- assert.writeOK(viewsDB.coll.insert({arr: [{x: 1}]}));
- assert.commandFailedWithCode(
- viewsDB.runCommand({find: "identityView", projection: {arr: {$elemMatch: {x: 1}}}}),
- ErrorCodes.InvalidPipelineOperator);
-
- // Views can support a "findOne" if singleBatch: true and limit: 1.
- assertFindResultEq({find: "identityView", filter: {state: "NY"}, singleBatch: true, limit: 1},
- [{_id: "New York", state: "NY", pop: 7}]);
- assert.eq(viewsDB.identityView.findOne({_id: "San Francisco"}),
- {_id: "San Francisco", state: "CA", pop: 4});
-
- // The readOnce cursor option is not allowed on views. But if we're in a transaction,
- // the error code saying that it's not allowed in a transaction takes precedence.
- assert.commandFailedWithCode(
- viewsDB.runCommand({find: "identityView", readOnce: true}),
- [ErrorCodes.OperationNotSupportedInTransaction, ErrorCodes.InvalidPipelineOperator]);
+"use strict";
+
+// For arrayEq and orderedArrayEq.
+load("jstests/aggregation/extras/utils.js");
+
+let viewsDB = db.getSiblingDB("views_find");
+assert.commandWorked(viewsDB.dropDatabase());
+
+// Helper functions.
+let assertFindResultEq = function(cmd, expected, ordered) {
+ let res = viewsDB.runCommand(cmd);
+ assert.commandWorked(res);
+ let arr = new DBCommandCursor(viewsDB, res, 5).toArray();
+ let errmsg = tojson({expected: expected, got: arr});
+
+ if (typeof (ordered) === "undefined" || !ordered)
+ assert(arrayEq(arr, expected), errmsg);
+ else
+ assert(orderedArrayEq(arr, expected), errmsg);
+};
+
+// Populate a collection with some test data.
+let allDocuments = [];
+allDocuments.push({_id: "New York", state: "NY", pop: 7});
+allDocuments.push({_id: "Newark", state: "NJ", pop: 3});
+allDocuments.push({_id: "Palo Alto", state: "CA", pop: 10});
+allDocuments.push({_id: "San Francisco", state: "CA", pop: 4});
+allDocuments.push({_id: "Trenton", state: "NJ", pop: 5});
+
+let coll = viewsDB.coll;
+let bulk = coll.initializeUnorderedBulkOp();
+allDocuments.forEach(function(doc) {
+ bulk.insert(doc);
+});
+assert.writeOK(bulk.execute());
+
+// Create views on the data.
+assert.commandWorked(
+ viewsDB.runCommand({create: "identityView", viewOn: "coll", pipeline: [{$match: {}}]}));
+assert.commandWorked(viewsDB.runCommand({
+ create: "noIdView",
+ viewOn: "coll",
+ pipeline: [{$match: {}}, {$project: {_id: 0, state: 1, pop: 1}}]
+}));
+
+// Filters and "simple" projections.
+assertFindResultEq({find: "identityView"}, allDocuments);
+assertFindResultEq({find: "identityView", filter: {state: "NJ"}, projection: {_id: 1}},
+ [{_id: "Trenton"}, {_id: "Newark"}]);
+
+// A view that projects out the _id should still work with the find command.
+assertFindResultEq({find: "noIdView", filter: {state: "NY"}, projection: {pop: 1}}, [{pop: 7}]);
+
+// Sort, limit and batchSize.
+const doOrderedSort = true;
+assertFindResultEq({find: "identityView", sort: {_id: 1}}, allDocuments, doOrderedSort);
+assertFindResultEq(
+ {find: "identityView", limit: 1, batchSize: 1, sort: {_id: 1}, projection: {_id: 1}},
+ [{_id: "New York"}]);
+assert.commandFailedWithCode(viewsDB.runCommand({find: "identityView", sort: {$natural: 1}}),
+ ErrorCodes.InvalidPipelineOperator);
+
+// Negative batch size and limit should fail.
+assert.commandFailed(viewsDB.runCommand({find: "identityView", batchSize: -1}));
+assert.commandFailed(viewsDB.runCommand({find: "identityView", limit: -1}));
+
+// Comment should succeed.
+assert.commandWorked(viewsDB.runCommand({find: "identityView", filter: {}, comment: "views_find"}));
+
+// Views support find with explain.
+assert.commandWorked(viewsDB.identityView.find().explain());
+
+// Find with explicit explain modes works on a view.
+let explainPlan = assert.commandWorked(viewsDB.identityView.find().explain("queryPlanner"));
+assert.eq(explainPlan.queryPlanner.namespace, "views_find.coll");
+assert(!explainPlan.hasOwnProperty("executionStats"));
+
+explainPlan = assert.commandWorked(viewsDB.identityView.find().explain("executionStats"));
+assert.eq(explainPlan.queryPlanner.namespace, "views_find.coll");
+assert(explainPlan.hasOwnProperty("executionStats"));
+assert.eq(explainPlan.executionStats.nReturned, 5);
+assert(!explainPlan.executionStats.hasOwnProperty("allPlansExecution"));
+
+explainPlan = assert.commandWorked(viewsDB.identityView.find().explain("allPlansExecution"));
+assert.eq(explainPlan.queryPlanner.namespace, "views_find.coll");
+assert(explainPlan.hasOwnProperty("executionStats"));
+assert.eq(explainPlan.executionStats.nReturned, 5);
+assert(explainPlan.executionStats.hasOwnProperty("allPlansExecution"));
+
+// Only simple 0 or 1 projections are allowed on views.
+assert.writeOK(viewsDB.coll.insert({arr: [{x: 1}]}));
+assert.commandFailedWithCode(
+ viewsDB.runCommand({find: "identityView", projection: {arr: {$elemMatch: {x: 1}}}}),
+ ErrorCodes.InvalidPipelineOperator);
+
+// Views can support a "findOne" if singleBatch: true and limit: 1.
+assertFindResultEq({find: "identityView", filter: {state: "NY"}, singleBatch: true, limit: 1},
+ [{_id: "New York", state: "NY", pop: 7}]);
+assert.eq(viewsDB.identityView.findOne({_id: "San Francisco"}),
+ {_id: "San Francisco", state: "CA", pop: 4});
+
+// The readOnce cursor option is not allowed on views. But if we're in a transaction,
+// the error code saying that it's not allowed in a transaction takes precedence.
+assert.commandFailedWithCode(
+ viewsDB.runCommand({find: "identityView", readOnce: true}),
+ [ErrorCodes.OperationNotSupportedInTransaction, ErrorCodes.InvalidPipelineOperator]);
}());
diff --git a/jstests/core/views/views_rename.js b/jstests/core/views/views_rename.js
index 3ece5d8269c..9d4f1238810 100644
--- a/jstests/core/views/views_rename.js
+++ b/jstests/core/views/views_rename.js
@@ -5,23 +5,23 @@
// ]
(function() {
- // SERVER-30406 Test that renaming system.views correctly invalidates the view catalog
- 'use strict';
+// SERVER-30406 Test that renaming system.views correctly invalidates the view catalog
+'use strict';
- const collName = "views_rename_test";
- let coll = db.getCollection(collName);
+const collName = "views_rename_test";
+let coll = db.getCollection(collName);
- db.view.drop();
- coll.drop();
- assert.commandWorked(db.createView("view", collName, []));
- assert.writeOK(coll.insert({_id: 1}));
- assert.eq(db.view.find().count(), 1, "couldn't find document in view");
- assert.commandWorked(db.system.views.renameCollection("views", /*dropTarget*/ true));
- assert.eq(db.view.find().count(),
- 0,
- "find on view should have returned no results after renaming away system.views");
- assert.commandWorked(db.views.renameCollection("system.views"));
- assert.eq(db.view.find().count(),
- 1,
- "find on view should have worked again after renaming system.views back in place");
+db.view.drop();
+coll.drop();
+assert.commandWorked(db.createView("view", collName, []));
+assert.writeOK(coll.insert({_id: 1}));
+assert.eq(db.view.find().count(), 1, "couldn't find document in view");
+assert.commandWorked(db.system.views.renameCollection("views", /*dropTarget*/ true));
+assert.eq(db.view.find().count(),
+ 0,
+ "find on view should have returned no results after renaming away system.views");
+assert.commandWorked(db.views.renameCollection("system.views"));
+assert.eq(db.view.find().count(),
+ 1,
+ "find on view should have worked again after renaming system.views back in place");
})();
diff --git a/jstests/core/views/views_stats.js b/jstests/core/views/views_stats.js
index 6c1b4b976d6..017d546bb4d 100644
--- a/jstests/core/views/views_stats.js
+++ b/jstests/core/views/views_stats.js
@@ -12,62 +12,62 @@
// ]
(function() {
- "use strict";
- load("jstests/libs/stats.js");
+"use strict";
+load("jstests/libs/stats.js");
- let viewsDB = db.getSiblingDB("views_stats");
- assert.commandWorked(viewsDB.dropDatabase());
- assert.commandWorked(viewsDB.runCommand({create: "view", viewOn: "collection"}));
+let viewsDB = db.getSiblingDB("views_stats");
+assert.commandWorked(viewsDB.dropDatabase());
+assert.commandWorked(viewsDB.runCommand({create: "view", viewOn: "collection"}));
- let view = viewsDB["view"];
- let coll = viewsDB["collection"];
+let view = viewsDB["view"];
+let coll = viewsDB["collection"];
- // Check the histogram counters.
- let lastHistogram = getHistogramStats(view);
- view.aggregate([{$match: {}}]);
- lastHistogram = assertHistogramDiffEq(view, lastHistogram, 1, 0, 0);
+// Check the histogram counters.
+let lastHistogram = getHistogramStats(view);
+view.aggregate([{$match: {}}]);
+lastHistogram = assertHistogramDiffEq(view, lastHistogram, 1, 0, 0);
- // Check that failed inserts, updates, and deletes are counted.
- assert.writeError(view.insert({}));
- lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0);
+// Check that failed inserts, updates, and deletes are counted.
+assert.writeError(view.insert({}));
+lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0);
- assert.writeError(view.remove({}));
- lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0);
+assert.writeError(view.remove({}));
+lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0);
- assert.writeError(view.update({}, {}));
- lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0);
+assert.writeError(view.update({}, {}));
+lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0);
- let isMasterResponse = assert.commandWorked(viewsDB.runCommand("isMaster"));
- const isMongos = (isMasterResponse.msg === "isdbgrid");
- if (isMongos) {
- jsTest.log("Tests are being run on a mongos; skipping top tests.");
- return;
- }
+let isMasterResponse = assert.commandWorked(viewsDB.runCommand("isMaster"));
+const isMongos = (isMasterResponse.msg === "isdbgrid");
+if (isMongos) {
+ jsTest.log("Tests are being run on a mongos; skipping top tests.");
+ return;
+}
- // Check the top counters.
- let lastTop = getTop(view);
- view.aggregate([{$match: {}}]);
- lastTop = assertTopDiffEq(view, lastTop, "commands", 1);
+// Check the top counters.
+let lastTop = getTop(view);
+view.aggregate([{$match: {}}]);
+lastTop = assertTopDiffEq(view, lastTop, "commands", 1);
- assert.writeError(view.insert({}));
- lastTop = assertTopDiffEq(view, lastTop, "insert", 1);
+assert.writeError(view.insert({}));
+lastTop = assertTopDiffEq(view, lastTop, "insert", 1);
- assert.writeError(view.remove({}));
- lastTop = assertTopDiffEq(view, lastTop, "remove", 1);
+assert.writeError(view.remove({}));
+lastTop = assertTopDiffEq(view, lastTop, "remove", 1);
- assert.writeError(view.update({}, {}));
- lastTop = assertTopDiffEq(view, lastTop, "update", 1);
+assert.writeError(view.update({}, {}));
+lastTop = assertTopDiffEq(view, lastTop, "update", 1);
- // Check that operations on the backing collection do not modify the view stats.
- lastTop = getTop(view);
- lastHistogram = getHistogramStats(view);
- assert.writeOK(coll.insert({}));
- assert.writeOK(coll.update({}, {$set: {x: 1}}));
- coll.aggregate([{$match: {}}]);
- assert.writeOK(coll.remove({}));
+// Check that operations on the backing collection do not modify the view stats.
+lastTop = getTop(view);
+lastHistogram = getHistogramStats(view);
+assert.writeOK(coll.insert({}));
+assert.writeOK(coll.update({}, {$set: {x: 1}}));
+coll.aggregate([{$match: {}}]);
+assert.writeOK(coll.remove({}));
- assertTopDiffEq(view, lastTop, "insert", 0);
- assertTopDiffEq(view, lastTop, "update", 0);
- assertTopDiffEq(view, lastTop, "remove", 0);
- assertHistogramDiffEq(view, lastHistogram, 0, 0, 0);
+assertTopDiffEq(view, lastTop, "insert", 0);
+assertTopDiffEq(view, lastTop, "update", 0);
+assertTopDiffEq(view, lastTop, "remove", 0);
+assertHistogramDiffEq(view, lastHistogram, 0, 0, 0);
}());
diff --git a/jstests/core/views/views_validation.js b/jstests/core/views/views_validation.js
index a0e02d0b2c7..e0c8aca80ea 100644
--- a/jstests/core/views/views_validation.js
+++ b/jstests/core/views/views_validation.js
@@ -1,35 +1,34 @@
// @tags: [requires_non_retryable_commands]
(function() {
- "use strict";
- let viewsDb = db.getSiblingDB("views_validation");
- const kMaxViewDepth = 20;
-
- function makeView(viewName, viewOn, pipeline, expectedErrorCode) {
- let options = {create: viewName, viewOn: viewOn};
- if (pipeline) {
- options["pipeline"] = pipeline;
- }
- let res = viewsDb.runCommand(options);
- if (expectedErrorCode !== undefined) {
- assert.commandFailedWithCode(
- res, expectedErrorCode, "Invalid view created " + tojson(options));
- } else {
- assert.commandWorked(res, "Could not create view " + tojson(options));
- }
-
- return viewsDb.getCollection(viewName);
+"use strict";
+let viewsDb = db.getSiblingDB("views_validation");
+const kMaxViewDepth = 20;
+
+function makeView(viewName, viewOn, pipeline, expectedErrorCode) {
+ let options = {create: viewName, viewOn: viewOn};
+ if (pipeline) {
+ options["pipeline"] = pipeline;
}
-
- function makeLookup(from) {
- return {
- $lookup:
- {from: from, as: "as", localField: "localField", foreignField: "foreignField"}
- };
+ let res = viewsDb.runCommand(options);
+ if (expectedErrorCode !== undefined) {
+ assert.commandFailedWithCode(
+ res, expectedErrorCode, "Invalid view created " + tojson(options));
+ } else {
+ assert.commandWorked(res, "Could not create view " + tojson(options));
}
- function makeGraphLookup(from) {
- return {
+ return viewsDb.getCollection(viewName);
+}
+
+function makeLookup(from) {
+ return {
+ $lookup: {from: from, as: "as", localField: "localField", foreignField: "foreignField"}
+ };
+}
+
+function makeGraphLookup(from) {
+ return {
$graphLookup: {
from: from,
as: "as",
@@ -38,96 +37,96 @@
connectToField: "connectToField"
}
};
- }
-
- function makeFacet(from) {
- return {$facet: {"Facet Key": [makeLookup(from)]}};
- }
-
- function clear() {
- assert.commandWorked(viewsDb.dropDatabase());
- }
-
- clear();
-
- // Check that simple cycles are disallowed.
- makeView("a", "a", [], ErrorCodes.GraphContainsCycle);
- makeView("a", "b", [makeLookup("a")], ErrorCodes.GraphContainsCycle);
- clear();
-
- makeView("a", "b", ErrorCodes.OK);
- makeView("b", "a", [], ErrorCodes.GraphContainsCycle);
- makeView("b", "c", [makeLookup("a")], ErrorCodes.GraphContainsCycle);
- clear();
-
- makeView("a", "b");
- makeView("b", "c");
- makeView("c", "a", [], ErrorCodes.GraphContainsCycle);
- clear();
-
- /*
- * Check that view validation does not naively recurse on already visited views.
- *
- * Make a tree of depth 20 as with one view per level follows:
- * 1
- * -----------------------------
- * 2 2 2 2
- * ----- ----- ----- -----
- * 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
- * ... ... ... ...
- *
- * So view i depends on the view (i+1) four times. Since it should only need to recurse
- * down one branch completely for each creation, since this should only need to check a maximum
- * of 20 views instead of 4^20 views.
- */
-
- for (let i = 1; i <= kMaxViewDepth; i++) {
- let childView = "v" + (i + 1);
- makeView("v" + i,
- childView,
- [makeLookup(childView), makeGraphLookup(childView), makeFacet(childView)]);
- }
-
- // Check that any higher depth leads to failure
- makeView("v21", "v22", [], ErrorCodes.ViewDepthLimitExceeded);
- makeView("v0", "v1", [], ErrorCodes.ViewDepthLimitExceeded);
- makeView("v0", "ok", [makeLookup("v1")], ErrorCodes.ViewDepthLimitExceeded);
-
- // But adding to the middle should be ok.
- makeView("vMid", "v10");
- clear();
-
- // Check that $graphLookup and $facet also check for cycles.
- makeView("a", "b", [makeGraphLookup("a")], ErrorCodes.GraphContainsCycle);
- makeView("a", "b", [makeGraphLookup("b")]);
- makeView("b", "c", [makeGraphLookup("a")], ErrorCodes.GraphContainsCycle);
- clear();
-
- makeView("a", "b", [makeFacet("a")], ErrorCodes.GraphContainsCycle);
- makeView("a", "b", [makeFacet("b")]);
- makeView("b", "c", [makeFacet("a")], ErrorCodes.GraphContainsCycle);
- clear();
-
- // Check that collMod also checks for cycles.
- makeView("a", "b");
- makeView("b", "c");
- assert.commandFailedWithCode(viewsDb.runCommand({collMod: "b", viewOn: "a", pipeline: []}),
- ErrorCodes.GraphContainsCycle,
- "collmod changed view to create a cycle");
-
- // Check that collMod disallows the specification of invalid pipelines.
- assert.commandFailedWithCode(viewsDb.runCommand({collMod: "b", viewOn: "c", pipeline: {}}),
- ErrorCodes.InvalidOptions,
- "collMod modified view to have invalid pipeline");
- assert.commandFailedWithCode(
- viewsDb.runCommand({collMod: "b", viewOn: "c", pipeline: {0: {$limit: 7}}}),
- ErrorCodes.InvalidOptions,
- "collMod modified view to have invalid pipeline");
- clear();
-
- // Check that invalid pipelines are disallowed. The following $lookup is missing the 'as' field.
- makeView("a",
- "b",
- [{"$lookup": {from: "a", localField: "b", foreignField: "c"}}],
- ErrorCodes.FailedToParse);
+}
+
+function makeFacet(from) {
+ return {$facet: {"Facet Key": [makeLookup(from)]}};
+}
+
+function clear() {
+ assert.commandWorked(viewsDb.dropDatabase());
+}
+
+clear();
+
+// Check that simple cycles are disallowed.
+makeView("a", "a", [], ErrorCodes.GraphContainsCycle);
+makeView("a", "b", [makeLookup("a")], ErrorCodes.GraphContainsCycle);
+clear();
+
+makeView("a", "b", ErrorCodes.OK);
+makeView("b", "a", [], ErrorCodes.GraphContainsCycle);
+makeView("b", "c", [makeLookup("a")], ErrorCodes.GraphContainsCycle);
+clear();
+
+makeView("a", "b");
+makeView("b", "c");
+makeView("c", "a", [], ErrorCodes.GraphContainsCycle);
+clear();
+
+/*
+ * Check that view validation does not naively recurse on already visited views.
+ *
+ * Make a tree of depth 20 as with one view per level follows:
+ * 1
+ * -----------------------------
+ * 2 2 2 2
+ * ----- ----- ----- -----
+ * 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
+ * ... ... ... ...
+ *
+ * So view i depends on the view (i+1) four times. Since it should only need to recurse
+ * down one branch completely for each creation, since this should only need to check a maximum
+ * of 20 views instead of 4^20 views.
+ */
+
+for (let i = 1; i <= kMaxViewDepth; i++) {
+ let childView = "v" + (i + 1);
+ makeView("v" + i,
+ childView,
+ [makeLookup(childView), makeGraphLookup(childView), makeFacet(childView)]);
+}
+
+// Check that any higher depth leads to failure
+makeView("v21", "v22", [], ErrorCodes.ViewDepthLimitExceeded);
+makeView("v0", "v1", [], ErrorCodes.ViewDepthLimitExceeded);
+makeView("v0", "ok", [makeLookup("v1")], ErrorCodes.ViewDepthLimitExceeded);
+
+// But adding to the middle should be ok.
+makeView("vMid", "v10");
+clear();
+
+// Check that $graphLookup and $facet also check for cycles.
+makeView("a", "b", [makeGraphLookup("a")], ErrorCodes.GraphContainsCycle);
+makeView("a", "b", [makeGraphLookup("b")]);
+makeView("b", "c", [makeGraphLookup("a")], ErrorCodes.GraphContainsCycle);
+clear();
+
+makeView("a", "b", [makeFacet("a")], ErrorCodes.GraphContainsCycle);
+makeView("a", "b", [makeFacet("b")]);
+makeView("b", "c", [makeFacet("a")], ErrorCodes.GraphContainsCycle);
+clear();
+
+// Check that collMod also checks for cycles.
+makeView("a", "b");
+makeView("b", "c");
+assert.commandFailedWithCode(viewsDb.runCommand({collMod: "b", viewOn: "a", pipeline: []}),
+ ErrorCodes.GraphContainsCycle,
+ "collmod changed view to create a cycle");
+
+// Check that collMod disallows the specification of invalid pipelines.
+assert.commandFailedWithCode(viewsDb.runCommand({collMod: "b", viewOn: "c", pipeline: {}}),
+ ErrorCodes.InvalidOptions,
+ "collMod modified view to have invalid pipeline");
+assert.commandFailedWithCode(
+ viewsDb.runCommand({collMod: "b", viewOn: "c", pipeline: {0: {$limit: 7}}}),
+ ErrorCodes.InvalidOptions,
+ "collMod modified view to have invalid pipeline");
+clear();
+
+// Check that invalid pipelines are disallowed. The following $lookup is missing the 'as' field.
+makeView("a",
+ "b",
+ [{"$lookup": {from: "a", localField: "b", foreignField: "c"}}],
+ ErrorCodes.FailedToParse);
}());
diff --git a/jstests/core/where_tolerates_js_exception.js b/jstests/core/where_tolerates_js_exception.js
index b12a7c0a65e..ed11b3e64a5 100644
--- a/jstests/core/where_tolerates_js_exception.js
+++ b/jstests/core/where_tolerates_js_exception.js
@@ -8,28 +8,28 @@
* ]
*/
(function() {
- "use strict";
+"use strict";
- const collection = db.where_tolerates_js_exception;
- collection.drop();
+const collection = db.where_tolerates_js_exception;
+collection.drop();
- assert.commandWorked(collection.save({a: 1}));
+assert.commandWorked(collection.save({a: 1}));
- const res = collection.runCommand("find", {
- filter: {
- $where: function myFunction() {
- return a();
- }
+const res = collection.runCommand("find", {
+ filter: {
+ $where: function myFunction() {
+ return a();
}
- });
+ }
+});
- assert.commandFailedWithCode(res, ErrorCodes.JSInterpreterFailure);
- assert(/ReferenceError/.test(res.errmsg),
- () => "$where didn't failed with a ReferenceError: " + tojson(res));
- assert(/myFunction@/.test(res.errmsg),
- () => "$where didn't return the JavaScript stacktrace: " + tojson(res));
- assert(!res.hasOwnProperty("stack"),
- () => "$where shouldn't return JavaScript stacktrace separately: " + tojson(res));
- assert(!res.hasOwnProperty("originalError"),
- () => "$where shouldn't return wrapped version of the error: " + tojson(res));
+assert.commandFailedWithCode(res, ErrorCodes.JSInterpreterFailure);
+assert(/ReferenceError/.test(res.errmsg),
+ () => "$where didn't failed with a ReferenceError: " + tojson(res));
+assert(/myFunction@/.test(res.errmsg),
+ () => "$where didn't return the JavaScript stacktrace: " + tojson(res));
+assert(!res.hasOwnProperty("stack"),
+ () => "$where shouldn't return JavaScript stacktrace separately: " + tojson(res));
+assert(!res.hasOwnProperty("originalError"),
+ () => "$where shouldn't return wrapped version of the error: " + tojson(res));
})();
diff --git a/jstests/core/wildcard_and_text_indexes.js b/jstests/core/wildcard_and_text_indexes.js
index a4b552e3220..639450b174c 100644
--- a/jstests/core/wildcard_and_text_indexes.js
+++ b/jstests/core/wildcard_and_text_indexes.js
@@ -3,81 +3,80 @@
* @tags: [assumes_balancer_off]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For arrayEq.
- load("jstests/libs/analyze_plan.js"); // For getPlanStages and planHasStage.
- load("jstests/libs/fixture_helpers.js"); // For isMongos.
+load("jstests/aggregation/extras/utils.js"); // For arrayEq.
+load("jstests/libs/analyze_plan.js"); // For getPlanStages and planHasStage.
+load("jstests/libs/fixture_helpers.js"); // For isMongos.
- const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
+const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
- const coll = db.wildcard_and_text_indexes;
- coll.drop();
+const coll = db.wildcard_and_text_indexes;
+coll.drop();
- // Runs a single wildcard query test, confirming that an indexed solution exists, that the $**
- // index on the given 'expectedPath' was used to answer the query, and that the results are
- // identical to those obtained via COLLSCAN.
- function assertWildcardQuery(query, expectedPath) {
- // Explain the query, and determine whether an indexed solution is available.
- const explainOutput = coll.find(query).explain("executionStats");
- const ixScans = getPlanStages(explainOutput.queryPlanner.winningPlan, "IXSCAN");
- // Verify that the winning plan uses the $** index with the expected path.
- assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
- assert.docEq(ixScans[0].keyPattern, {"$_path": 1, [expectedPath]: 1});
- // Verify that the results obtained from the $** index are identical to a COLLSCAN.
- assertArrayEq(coll.find(query).toArray(), coll.find(query).hint({$natural: 1}).toArray());
- }
+// Runs a single wildcard query test, confirming that an indexed solution exists, that the $**
+// index on the given 'expectedPath' was used to answer the query, and that the results are
+// identical to those obtained via COLLSCAN.
+function assertWildcardQuery(query, expectedPath) {
+ // Explain the query, and determine whether an indexed solution is available.
+ const explainOutput = coll.find(query).explain("executionStats");
+ const ixScans = getPlanStages(explainOutput.queryPlanner.winningPlan, "IXSCAN");
+ // Verify that the winning plan uses the $** index with the expected path.
+ assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
+ assert.docEq(ixScans[0].keyPattern, {"$_path": 1, [expectedPath]: 1});
+ // Verify that the results obtained from the $** index are identical to a COLLSCAN.
+ assertArrayEq(coll.find(query).toArray(), coll.find(query).hint({$natural: 1}).toArray());
+}
- // Insert documents containing the field '_fts', which is reserved when using a $text index.
- assert.commandWorked(coll.insert({_id: 1, a: 1, _fts: 1, textToSearch: "banana"}));
- assert.commandWorked(coll.insert({_id: 2, a: 1, _fts: 2, textToSearch: "bananas"}));
- assert.commandWorked(coll.insert({_id: 3, a: 1, _fts: 3}));
+// Insert documents containing the field '_fts', which is reserved when using a $text index.
+assert.commandWorked(coll.insert({_id: 1, a: 1, _fts: 1, textToSearch: "banana"}));
+assert.commandWorked(coll.insert({_id: 2, a: 1, _fts: 2, textToSearch: "bananas"}));
+assert.commandWorked(coll.insert({_id: 3, a: 1, _fts: 3}));
- // Build a wildcard index, and verify that it can be used to query for the field '_fts'.
- assert.commandWorked(coll.createIndex({"$**": 1}));
- assertWildcardQuery({_fts: {$gt: 0, $lt: 4}}, '_fts');
+// Build a wildcard index, and verify that it can be used to query for the field '_fts'.
+assert.commandWorked(coll.createIndex({"$**": 1}));
+assertWildcardQuery({_fts: {$gt: 0, $lt: 4}}, '_fts');
- // Perform the tests below for simple and compound $text indexes.
- for (let textIndex of[{'$**': 'text'}, {a: 1, '$**': 'text'}]) {
- // Build the appropriate text index.
- assert.commandWorked(coll.createIndex(textIndex, {name: "textIndex"}));
+// Perform the tests below for simple and compound $text indexes.
+for (let textIndex of [{'$**': 'text'}, {a: 1, '$**': 'text'}]) {
+ // Build the appropriate text index.
+ assert.commandWorked(coll.createIndex(textIndex, {name: "textIndex"}));
- // Confirm that the $** index can still be used to query for the '_fts' field outside of
- // $text queries.
- assertWildcardQuery({_fts: {$gt: 0, $lt: 4}}, '_fts');
+ // Confirm that the $** index can still be used to query for the '_fts' field outside of
+ // $text queries.
+ assertWildcardQuery({_fts: {$gt: 0, $lt: 4}}, '_fts');
- // Confirm that $** does not generate a candidate plan for $text search, including cases
- // when the query filter contains a compound field in the $text index.
- const textQuery = Object.assign(textIndex.a ? {a: 1} : {}, {$text: {$search: 'banana'}});
- let explainOut = assert.commandWorked(coll.find(textQuery).explain("executionStats"));
- assert(planHasStage(coll.getDB(), explainOut.queryPlanner.winningPlan, "TEXT"));
- assert.eq(getRejectedPlans(explainOut).length, 0);
- assert.eq(explainOut.executionStats.nReturned, 2);
+ // Confirm that $** does not generate a candidate plan for $text search, including cases
+ // when the query filter contains a compound field in the $text index.
+ const textQuery = Object.assign(textIndex.a ? {a: 1} : {}, {$text: {$search: 'banana'}});
+ let explainOut = assert.commandWorked(coll.find(textQuery).explain("executionStats"));
+ assert(planHasStage(coll.getDB(), explainOut.queryPlanner.winningPlan, "TEXT"));
+ assert.eq(getRejectedPlans(explainOut).length, 0);
+ assert.eq(explainOut.executionStats.nReturned, 2);
- // Confirm that $** does not generate a candidate plan for $text search, including cases
- // where the query filter contains a field which is not present in the text index.
- explainOut =
- assert.commandWorked(coll.find(Object.assign({_fts: {$gt: 0, $lt: 4}}, textQuery))
- .explain("executionStats"));
- assert(planHasStage(coll.getDB(), explainOut.queryPlanner.winningPlan, "TEXT"));
- assert.eq(getRejectedPlans(explainOut).length, 0);
- assert.eq(explainOut.executionStats.nReturned, 2);
+ // Confirm that $** does not generate a candidate plan for $text search, including cases
+ // where the query filter contains a field which is not present in the text index.
+ explainOut = assert.commandWorked(
+ coll.find(Object.assign({_fts: {$gt: 0, $lt: 4}}, textQuery)).explain("executionStats"));
+ assert(planHasStage(coll.getDB(), explainOut.queryPlanner.winningPlan, "TEXT"));
+ assert.eq(getRejectedPlans(explainOut).length, 0);
+ assert.eq(explainOut.executionStats.nReturned, 2);
- // Confirm that the $** index can be used alongside a $text predicate in an $or.
- explainOut = assert.commandWorked(
- coll.find({$or: [{_fts: 3}, textQuery]}).explain("executionStats"));
- assert.eq(getRejectedPlans(explainOut).length, 0);
- assert.eq(explainOut.executionStats.nReturned, 3);
+ // Confirm that the $** index can be used alongside a $text predicate in an $or.
+ explainOut =
+ assert.commandWorked(coll.find({$or: [{_fts: 3}, textQuery]}).explain("executionStats"));
+ assert.eq(getRejectedPlans(explainOut).length, 0);
+ assert.eq(explainOut.executionStats.nReturned, 3);
- const textOrWildcard = getPlanStages(explainOut.queryPlanner.winningPlan, "OR").shift();
- assert.eq(textOrWildcard.inputStages.length, 2);
- const textBranch = (textOrWildcard.inputStages[0].stage === "TEXT" ? 0 : 1);
- const wildcardBranch = (textBranch + 1) % 2;
- assert.eq(textOrWildcard.inputStages[textBranch].stage, "TEXT");
- assert.eq(textOrWildcard.inputStages[wildcardBranch].stage, "IXSCAN");
- assert.eq(textOrWildcard.inputStages[wildcardBranch].keyPattern, {$_path: 1, _fts: 1});
+ const textOrWildcard = getPlanStages(explainOut.queryPlanner.winningPlan, "OR").shift();
+ assert.eq(textOrWildcard.inputStages.length, 2);
+ const textBranch = (textOrWildcard.inputStages[0].stage === "TEXT" ? 0 : 1);
+ const wildcardBranch = (textBranch + 1) % 2;
+ assert.eq(textOrWildcard.inputStages[textBranch].stage, "TEXT");
+ assert.eq(textOrWildcard.inputStages[wildcardBranch].stage, "IXSCAN");
+ assert.eq(textOrWildcard.inputStages[wildcardBranch].keyPattern, {$_path: 1, _fts: 1});
- // Drop the index so that a different text index can be created.
- assert.commandWorked(coll.dropIndex("textIndex"));
- }
+ // Drop the index so that a different text index can be created.
+ assert.commandWorked(coll.dropIndex("textIndex"));
+}
})();
diff --git a/jstests/core/wildcard_index_basic_index_bounds.js b/jstests/core/wildcard_index_basic_index_bounds.js
index e2ee7da8710..a685898ead7 100644
--- a/jstests/core/wildcard_index_basic_index_bounds.js
+++ b/jstests/core/wildcard_index_basic_index_bounds.js
@@ -7,229 +7,227 @@
* @tags: [does_not_support_stepdowns, assumes_balancer_off]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For getPlanStages.
- load("jstests/libs/fixture_helpers.js"); // For isMongos and numberOfShardsForCollection.
+load("jstests/libs/analyze_plan.js"); // For getPlanStages.
+load("jstests/libs/fixture_helpers.js"); // For isMongos and numberOfShardsForCollection.
- // Asserts that the given cursors produce identical result sets.
- function assertResultsEq(cursor1, cursor2) {
- while (cursor1.hasNext()) {
- assert(cursor2.hasNext());
- assert.eq(cursor1.next()._id, cursor2.next()._id);
- }
- assert(!cursor2.hasNext());
- }
-
- const coll = db.wildcard_index_bounds;
- coll.drop();
-
- // Template document which defines the 'schema' of the documents in the test collection.
- const templateDoc = {a: 0, b: {c: 0, d: {e: 0}, f: {}}};
- const pathList = ['a', 'b.c', 'b.d.e', 'b.f'];
-
- // Insert a set of documents into the collection, based on the template document and populated
- // with an increasing sequence of values. This is to ensure that the range of values present for
- // each field in the dataset is not entirely homogeneous.
- for (let i = 0; i < 10; i++) {
- (function populateDoc(doc, value) {
- for (let key in doc) {
- if (typeof doc[key] === 'object')
- value = populateDoc(doc[key], value);
- else
- doc[key] = value++;
- }
- return value;
- })(templateDoc, i);
-
- assert.commandWorked(coll.insert(templateDoc));
+// Asserts that the given cursors produce identical result sets.
+function assertResultsEq(cursor1, cursor2) {
+ while (cursor1.hasNext()) {
+ assert(cursor2.hasNext());
+ assert.eq(cursor1.next()._id, cursor2.next()._id);
}
-
- // For sharded passthroughs, we need to know the number of shards occupied by the collection.
- const numShards = FixtureHelpers.numberOfShardsForCollection(coll);
-
- // Set of operations which will be applied to each field in the index in turn. If the 'bounds'
- // property is null, this indicates that the operation is not supported by $** indexes. The
- // 'subpathBounds' property indicates whether the bounds for '$_path' are supposed to contain
- // all subpaths rather than a single point-interval, i.e. ["path.to.field.", "path.to.field/").
- const operationList = [
- {expression: {$gte: 3}, bounds: ['[3.0, inf.0]']},
- {expression: {$gt: 3}, bounds: ['(3.0, inf.0]']},
- {expression: {$lt: 7}, bounds: ['[-inf.0, 7.0)']},
- {expression: {$lte: 7}, bounds: ['[-inf.0, 7.0]']},
- {expression: {$eq: 5}, bounds: ['[5.0, 5.0]']},
- {
- expression: {$in: [3, 5, 7, 9]},
- bounds: ['[3.0, 3.0]', '[5.0, 5.0]', '[7.0, 7.0]', '[9.0, 9.0]']
- },
- {expression: {$exists: true}, bounds: ['[MinKey, MaxKey]'], subpathBounds: true},
- {
- expression: {$gte: MinKey, $lte: MaxKey},
- bounds: ['[MinKey, MaxKey]'],
- subpathBounds: true
- },
- {expression: {$exists: false}, bounds: null},
- {expression: {$eq: null}, bounds: null},
- {expression: {$eq: {abc: 1}}, bounds: null},
- {expression: {$lt: {abc: 1}}, bounds: null},
- {expression: {$ne: {abc: 1}}, bounds: null},
- {expression: {$lt: {abc: 1}, $gt: {abc: 1}}, bounds: null},
- {expression: {$in: [{abc: 1}, 1, 2, 3]}, bounds: null},
- {expression: {$in: [null, 1, 2, 3]}, bounds: null},
- {expression: {$ne: null}, bounds: ["[MinKey, MaxKey]"], subpathBounds: true},
- {expression: {$ne: null, $exists: true}, bounds: ["[MinKey, MaxKey]"], subpathBounds: true},
- // In principle we could have tighter bounds for this. See SERVER-36765.
- {expression: {$eq: null, $exists: true}, bounds: ['[MinKey, MaxKey]'], subpathBounds: true},
- {expression: {$eq: []}, bounds: ['[undefined, undefined]', '[[], []]']}
- ];
-
- // Given a keyPattern and (optional) pathProjection, this function builds a $** index on the
- // collection and then tests each of the match expression in the 'operationList' on each indexed
- // field in turn. The 'expectedPaths' argument lists the set of paths which we expect to have
- // been indexed based on the spec; this function will confirm that only the appropriate paths
- // are present in the $** index. Finally, for each match expression it will perform a rooted-$or
- // with one predicate on each expected path, and a rooted $and over all predicates and paths.
- function runWildcardIndexTest(keyPattern, pathProjection, expectedPaths) {
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.createIndex(
- keyPattern, pathProjection ? {wildcardProjection: pathProjection} : {}));
-
- // The 'expectedPaths' argument is the set of paths which we expect to be indexed, based on
- // the keyPattern and projection. Make sure that the caller has provided this argument.
- assert(expectedPaths);
-
- // Verify the expected behaviour for every combination of path and operator.
- for (let op of operationList) {
- // Build up a list of operations that will later be used to test rooted $or.
- const multiFieldPreds = [];
- const orQueryBounds = [];
-
- for (let path of pathList) {
- // The bounds on '$_path' will always include a point-interval on the path, i.e.
- // ["path.to.field", "path.to.field"]. If 'subpathBounds' is 'true' for this
- // operation, then we add bounds that include all subpaths as well, i.e.
- // ["path.to.field.", "path.to.field/")
- const pointPathBound = `["${path}", "${path}"]`;
- const pathBounds = op.subpathBounds ? [pointPathBound, `["${path}.", "${path}/")`]
- : [pointPathBound];
- // {$_path: pathBounds, path.to.field: [[computed bounds]]}
- const expectedBounds = {$_path: pathBounds, [path]: op.bounds};
- const query = {[path]: op.expression};
-
- // Explain the query, and determine whether an indexed solution is available.
- const ixScans =
- getPlanStages(coll.find(query).explain().queryPlanner.winningPlan, "IXSCAN");
-
- // If we expect the current path to have been excluded based on the $** keyPattern
- // and projection, or if the current operation is not supported by $** indexes,
- // confirm that no indexed solution was found.
- if (!expectedPaths.includes(path) || op.bounds === null) {
- assert.eq(ixScans.length,
- 0,
- () => "Bounds check for operation: " + tojson(op) +
- " failed. Expected no IXSCAN plans to be generated, but got " +
- tojson(ixScans));
- continue;
- }
-
- // Verify that the winning plan uses the $** index with the expected bounds.
- assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
- assert.docEq(ixScans[0].keyPattern, {$_path: 1, [path]: 1});
- assert.docEq(ixScans[0].indexBounds, expectedBounds);
-
- // Verify that the results obtained from the $** index are identical to a COLLSCAN.
- // We must explicitly hint the wildcard index, because we also sort on {_id: 1} to
- // ensure that both result sets are in the same order.
- assertResultsEq(coll.find(query).sort({_id: 1}).hint(keyPattern),
- coll.find(query).sort({_id: 1}).hint({$natural: 1}));
-
- // Push the query into the $or and $and predicate arrays.
- orQueryBounds.push(expectedBounds);
- multiFieldPreds.push(query);
- }
-
- // If the current operation could not use the $** index, skip to the next op.
- if (multiFieldPreds.length === 0) {
+ assert(!cursor2.hasNext());
+}
+
+const coll = db.wildcard_index_bounds;
+coll.drop();
+
+// Template document which defines the 'schema' of the documents in the test collection.
+const templateDoc = {
+ a: 0,
+ b: {c: 0, d: {e: 0}, f: {}}
+};
+const pathList = ['a', 'b.c', 'b.d.e', 'b.f'];
+
+// Insert a set of documents into the collection, based on the template document and populated
+// with an increasing sequence of values. This is to ensure that the range of values present for
+// each field in the dataset is not entirely homogeneous.
+for (let i = 0; i < 10; i++) {
+ (function populateDoc(doc, value) {
+ for (let key in doc) {
+ if (typeof doc[key] === 'object')
+ value = populateDoc(doc[key], value);
+ else
+ doc[key] = value++;
+ }
+ return value;
+ })(templateDoc, i);
+
+ assert.commandWorked(coll.insert(templateDoc));
+}
+
+// For sharded passthroughs, we need to know the number of shards occupied by the collection.
+const numShards = FixtureHelpers.numberOfShardsForCollection(coll);
+
+// Set of operations which will be applied to each field in the index in turn. If the 'bounds'
+// property is null, this indicates that the operation is not supported by $** indexes. The
+// 'subpathBounds' property indicates whether the bounds for '$_path' are supposed to contain
+// all subpaths rather than a single point-interval, i.e. ["path.to.field.", "path.to.field/").
+const operationList = [
+ {expression: {$gte: 3}, bounds: ['[3.0, inf.0]']},
+ {expression: {$gt: 3}, bounds: ['(3.0, inf.0]']},
+ {expression: {$lt: 7}, bounds: ['[-inf.0, 7.0)']},
+ {expression: {$lte: 7}, bounds: ['[-inf.0, 7.0]']},
+ {expression: {$eq: 5}, bounds: ['[5.0, 5.0]']},
+ {
+ expression: {$in: [3, 5, 7, 9]},
+ bounds: ['[3.0, 3.0]', '[5.0, 5.0]', '[7.0, 7.0]', '[9.0, 9.0]']
+ },
+ {expression: {$exists: true}, bounds: ['[MinKey, MaxKey]'], subpathBounds: true},
+ {expression: {$gte: MinKey, $lte: MaxKey}, bounds: ['[MinKey, MaxKey]'], subpathBounds: true},
+ {expression: {$exists: false}, bounds: null},
+ {expression: {$eq: null}, bounds: null},
+ {expression: {$eq: {abc: 1}}, bounds: null},
+ {expression: {$lt: {abc: 1}}, bounds: null},
+ {expression: {$ne: {abc: 1}}, bounds: null},
+ {expression: {$lt: {abc: 1}, $gt: {abc: 1}}, bounds: null},
+ {expression: {$in: [{abc: 1}, 1, 2, 3]}, bounds: null},
+ {expression: {$in: [null, 1, 2, 3]}, bounds: null},
+ {expression: {$ne: null}, bounds: ["[MinKey, MaxKey]"], subpathBounds: true},
+ {expression: {$ne: null, $exists: true}, bounds: ["[MinKey, MaxKey]"], subpathBounds: true},
+ // In principle we could have tighter bounds for this. See SERVER-36765.
+ {expression: {$eq: null, $exists: true}, bounds: ['[MinKey, MaxKey]'], subpathBounds: true},
+ {expression: {$eq: []}, bounds: ['[undefined, undefined]', '[[], []]']}
+];
+
+// Given a keyPattern and (optional) pathProjection, this function builds a $** index on the
+// collection and then tests each of the match expression in the 'operationList' on each indexed
+// field in turn. The 'expectedPaths' argument lists the set of paths which we expect to have
+// been indexed based on the spec; this function will confirm that only the appropriate paths
+// are present in the $** index. Finally, for each match expression it will perform a rooted-$or
+// with one predicate on each expected path, and a rooted $and over all predicates and paths.
+function runWildcardIndexTest(keyPattern, pathProjection, expectedPaths) {
+ assert.commandWorked(coll.dropIndexes());
+ assert.commandWorked(
+ coll.createIndex(keyPattern, pathProjection ? {wildcardProjection: pathProjection} : {}));
+
+ // The 'expectedPaths' argument is the set of paths which we expect to be indexed, based on
+ // the keyPattern and projection. Make sure that the caller has provided this argument.
+ assert(expectedPaths);
+
+ // Verify the expected behaviour for every combination of path and operator.
+ for (let op of operationList) {
+ // Build up a list of operations that will later be used to test rooted $or.
+ const multiFieldPreds = [];
+ const orQueryBounds = [];
+
+ for (let path of pathList) {
+ // The bounds on '$_path' will always include a point-interval on the path, i.e.
+ // ["path.to.field", "path.to.field"]. If 'subpathBounds' is 'true' for this
+ // operation, then we add bounds that include all subpaths as well, i.e.
+ // ["path.to.field.", "path.to.field/")
+ const pointPathBound = `["${path}", "${path}"]`;
+ const pathBounds =
+ op.subpathBounds ? [pointPathBound, `["${path}.", "${path}/")`] : [pointPathBound];
+ // {$_path: pathBounds, path.to.field: [[computed bounds]]}
+ const expectedBounds = {$_path: pathBounds, [path]: op.bounds};
+ const query = {[path]: op.expression};
+
+ // Explain the query, and determine whether an indexed solution is available.
+ const ixScans =
+ getPlanStages(coll.find(query).explain().queryPlanner.winningPlan, "IXSCAN");
+
+ // If we expect the current path to have been excluded based on the $** keyPattern
+ // and projection, or if the current operation is not supported by $** indexes,
+ // confirm that no indexed solution was found.
+ if (!expectedPaths.includes(path) || op.bounds === null) {
+ assert.eq(ixScans.length,
+ 0,
+ () => "Bounds check for operation: " + tojson(op) +
+ " failed. Expected no IXSCAN plans to be generated, but got " +
+ tojson(ixScans));
continue;
}
- // Perform a rooted $or for this operation across all indexed fields; for instance:
- // {$or: [{a: {$eq: 25}}, {'b.c': {$eq: 25}}, {'b.d.e': {$eq: 25}}]}.
- const explainedOr = assert.commandWorked(coll.find({$or: multiFieldPreds}).explain());
-
- // Obtain the list of index bounds from each individual IXSCAN stage across all shards.
- const ixScanBounds = getPlanStages(explainedOr.queryPlanner.winningPlan, "IXSCAN")
- .map(elem => elem.indexBounds);
-
- // We should find that each branch of the $or has used a separate $** sub-index. In the
- // sharded passthroughs, we expect to have 'orQueryBounds' on each shard.
- assert.eq(ixScanBounds.length, orQueryBounds.length * numShards);
- for (let offset = 0; offset < ixScanBounds.length; offset += orQueryBounds.length) {
- const ixBounds = ixScanBounds.slice(offset, offset + orQueryBounds.length);
- orQueryBounds.forEach(
- exBound => assert(ixBounds.some(ixBound => !bsonWoCompare(ixBound, exBound))));
- }
+ // Verify that the winning plan uses the $** index with the expected bounds.
+ assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
+ assert.docEq(ixScans[0].keyPattern, {$_path: 1, [path]: 1});
+ assert.docEq(ixScans[0].indexBounds, expectedBounds);
// Verify that the results obtained from the $** index are identical to a COLLSCAN.
- assertResultsEq(coll.find({$or: multiFieldPreds}).sort({_id: 1}).hint(keyPattern),
- coll.find({$or: multiFieldPreds}).sort({_id: 1}).hint({$natural: 1}));
-
- // Perform an $and for this operation across all indexed fields; for instance:
- // {$and: [{a: {$gte: 50}}, {'b.c': {$gte: 50}}, {'b.d.e': {$gte: 50}}]}.
- const explainedAnd = coll.find({$and: multiFieldPreds}).explain();
- const winningIxScan = getPlanStages(explainedAnd.queryPlanner.winningPlan, "IXSCAN");
-
- // Extract information about the rejected plans. We should have one IXSCAN for each $**
- // candidate that wasn't the winner. Before SERVER-36521 banned them for $** indexes, a
- // number of AND_SORTED plans would also be generated here; we search for these in order
- // to verify that no such plans now exist.
- const rejectedPlans = getRejectedPlans(explainedAnd);
- let rejectedIxScans = [], rejectedAndSorted = [];
- for (let rejectedPlan of rejectedPlans) {
- rejectedAndSorted =
- rejectedAndSorted.concat(getPlanStages(rejectedPlan, "AND_SORTED"));
- rejectedIxScans = rejectedIxScans.concat(getPlanStages(rejectedPlan, "IXSCAN"));
- }
+ // We must explicitly hint the wildcard index, because we also sort on {_id: 1} to
+ // ensure that both result sets are in the same order.
+ assertResultsEq(coll.find(query).sort({_id: 1}).hint(keyPattern),
+ coll.find(query).sort({_id: 1}).hint({$natural: 1}));
+
+ // Push the query into the $or and $and predicate arrays.
+ orQueryBounds.push(expectedBounds);
+ multiFieldPreds.push(query);
+ }
- // Confirm that no AND_SORTED plans were generated.
- assert.eq(rejectedAndSorted.length, 0);
+ // If the current operation could not use the $** index, skip to the next op.
+ if (multiFieldPreds.length === 0) {
+ continue;
+ }
- // We should find that one of the available $** subindexes has been chosen as the
- // winner, and all other candidate $** indexes are present in 'rejectedPlans'.
- assert.eq(winningIxScan.length, numShards);
- assert.eq(rejectedIxScans.length, numShards * (expectedPaths.length - 1));
+ // Perform a rooted $or for this operation across all indexed fields; for instance:
+ // {$or: [{a: {$eq: 25}}, {'b.c': {$eq: 25}}, {'b.d.e': {$eq: 25}}]}.
+ const explainedOr = assert.commandWorked(coll.find({$or: multiFieldPreds}).explain());
+
+ // Obtain the list of index bounds from each individual IXSCAN stage across all shards.
+ const ixScanBounds = getPlanStages(explainedOr.queryPlanner.winningPlan, "IXSCAN")
+ .map(elem => elem.indexBounds);
+
+ // We should find that each branch of the $or has used a separate $** sub-index. In the
+ // sharded passthroughs, we expect to have 'orQueryBounds' on each shard.
+ assert.eq(ixScanBounds.length, orQueryBounds.length * numShards);
+ for (let offset = 0; offset < ixScanBounds.length; offset += orQueryBounds.length) {
+ const ixBounds = ixScanBounds.slice(offset, offset + orQueryBounds.length);
+ orQueryBounds.forEach(
+ exBound => assert(ixBounds.some(ixBound => !bsonWoCompare(ixBound, exBound))));
+ }
- // Verify that each of the IXSCANs have the expected bounds and $_path key.
- for (let ixScan of winningIxScan.concat(rejectedIxScans)) {
- // {$_path: ["['path.to.field', 'path.to.field']"], path.to.field: [[bounds]]}
- const ixScanPath = JSON.parse(ixScan.indexBounds.$_path[0])[0];
- assert.eq(ixScan.indexBounds[ixScanPath], op.bounds);
- assert(expectedPaths.includes(ixScanPath));
- }
+ // Verify that the results obtained from the $** index are identical to a COLLSCAN.
+ assertResultsEq(coll.find({$or: multiFieldPreds}).sort({_id: 1}).hint(keyPattern),
+ coll.find({$or: multiFieldPreds}).sort({_id: 1}).hint({$natural: 1}));
+
+ // Perform an $and for this operation across all indexed fields; for instance:
+ // {$and: [{a: {$gte: 50}}, {'b.c': {$gte: 50}}, {'b.d.e': {$gte: 50}}]}.
+ const explainedAnd = coll.find({$and: multiFieldPreds}).explain();
+ const winningIxScan = getPlanStages(explainedAnd.queryPlanner.winningPlan, "IXSCAN");
+
+ // Extract information about the rejected plans. We should have one IXSCAN for each $**
+ // candidate that wasn't the winner. Before SERVER-36521 banned them for $** indexes, a
+ // number of AND_SORTED plans would also be generated here; we search for these in order
+ // to verify that no such plans now exist.
+ const rejectedPlans = getRejectedPlans(explainedAnd);
+ let rejectedIxScans = [], rejectedAndSorted = [];
+ for (let rejectedPlan of rejectedPlans) {
+ rejectedAndSorted = rejectedAndSorted.concat(getPlanStages(rejectedPlan, "AND_SORTED"));
+ rejectedIxScans = rejectedIxScans.concat(getPlanStages(rejectedPlan, "IXSCAN"));
+ }
- // Verify that the results obtained from the $** index are identical to a COLLSCAN.
- assertResultsEq(coll.find({$and: multiFieldPreds}).sort({_id: 1}).hint(keyPattern),
- coll.find({$and: multiFieldPreds}).sort({_id: 1}).hint({$natural: 1}));
+ // Confirm that no AND_SORTED plans were generated.
+ assert.eq(rejectedAndSorted.length, 0);
+
+ // We should find that one of the available $** subindexes has been chosen as the
+ // winner, and all other candidate $** indexes are present in 'rejectedPlans'.
+ assert.eq(winningIxScan.length, numShards);
+ assert.eq(rejectedIxScans.length, numShards * (expectedPaths.length - 1));
+
+ // Verify that each of the IXSCANs have the expected bounds and $_path key.
+ for (let ixScan of winningIxScan.concat(rejectedIxScans)) {
+ // {$_path: ["['path.to.field', 'path.to.field']"], path.to.field: [[bounds]]}
+ const ixScanPath = JSON.parse(ixScan.indexBounds.$_path[0])[0];
+ assert.eq(ixScan.indexBounds[ixScanPath], op.bounds);
+ assert(expectedPaths.includes(ixScanPath));
}
- }
- // Test a $** index that indexes the entire document.
- runWildcardIndexTest({'$**': 1}, null, ['a', 'b.c', 'b.d.e', 'b.f']);
-
- // Test a $** index on a single subtree.
- runWildcardIndexTest({'a.$**': 1}, null, ['a']);
- runWildcardIndexTest({'b.$**': 1}, null, ['b.c', 'b.d.e', 'b.f']);
- runWildcardIndexTest({'b.d.$**': 1}, null, ['b.d.e']);
-
- // Test a $** index which includes a subset of paths.
- runWildcardIndexTest({'$**': 1}, {a: 1}, ['a']);
- runWildcardIndexTest({'$**': 1}, {b: 1}, ['b.c', 'b.d.e', 'b.f']);
- runWildcardIndexTest({'$**': 1}, {'b.d': 1}, ['b.d.e']);
- runWildcardIndexTest({'$**': 1}, {a: 1, 'b.d': 1}, ['a', 'b.d.e']);
-
- // Test a $** index which excludes a subset of paths.
- runWildcardIndexTest({'$**': 1}, {a: 0}, ['b.c', 'b.d.e', 'b.f']);
- runWildcardIndexTest({'$**': 1}, {b: 0}, ['a']);
- runWildcardIndexTest({'$**': 1}, {'b.d': 0}, ['a', 'b.c', 'b.f']);
- runWildcardIndexTest({'$**': 1}, {a: 0, 'b.d': 0}, ['b.c', 'b.f']);
+ // Verify that the results obtained from the $** index are identical to a COLLSCAN.
+ assertResultsEq(coll.find({$and: multiFieldPreds}).sort({_id: 1}).hint(keyPattern),
+ coll.find({$and: multiFieldPreds}).sort({_id: 1}).hint({$natural: 1}));
+ }
+}
+
+// Test a $** index that indexes the entire document.
+runWildcardIndexTest({'$**': 1}, null, ['a', 'b.c', 'b.d.e', 'b.f']);
+
+// Test a $** index on a single subtree.
+runWildcardIndexTest({'a.$**': 1}, null, ['a']);
+runWildcardIndexTest({'b.$**': 1}, null, ['b.c', 'b.d.e', 'b.f']);
+runWildcardIndexTest({'b.d.$**': 1}, null, ['b.d.e']);
+
+// Test a $** index which includes a subset of paths.
+runWildcardIndexTest({'$**': 1}, {a: 1}, ['a']);
+runWildcardIndexTest({'$**': 1}, {b: 1}, ['b.c', 'b.d.e', 'b.f']);
+runWildcardIndexTest({'$**': 1}, {'b.d': 1}, ['b.d.e']);
+runWildcardIndexTest({'$**': 1}, {a: 1, 'b.d': 1}, ['a', 'b.d.e']);
+
+// Test a $** index which excludes a subset of paths.
+runWildcardIndexTest({'$**': 1}, {a: 0}, ['b.c', 'b.d.e', 'b.f']);
+runWildcardIndexTest({'$**': 1}, {b: 0}, ['a']);
+runWildcardIndexTest({'$**': 1}, {'b.d': 0}, ['a', 'b.c', 'b.f']);
+runWildcardIndexTest({'$**': 1}, {a: 0, 'b.d': 0}, ['b.c', 'b.f']);
})();
diff --git a/jstests/core/wildcard_index_cached_plans.js b/jstests/core/wildcard_index_cached_plans.js
index 634b7cb0368..9b42413955c 100644
--- a/jstests/core/wildcard_index_cached_plans.js
+++ b/jstests/core/wildcard_index_cached_plans.js
@@ -15,140 +15,142 @@
* ]
*/
(function() {
- "use strict";
-
- load('jstests/libs/analyze_plan.js'); // For getPlanStage().
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load('jstests/libs/fixture_helpers.js'); // For getPrimaryForNodeHostingDatabase and isMongos.
-
- const coll = db.wildcard_cached_plans;
- coll.drop();
-
- assert.commandWorked(coll.createIndex({"b.$**": 1}));
- assert.commandWorked(coll.createIndex({"a": 1}));
-
- // In order for the plan cache to be used, there must be more than one plan available. Insert
- // data into the collection such that the b.$** index will be far more selective than the index
- // on 'a' for the query {a: 1, b: 1}.
- for (let i = 0; i < 1000; i++) {
- assert.commandWorked(coll.insert({a: 1}));
- }
- assert.commandWorked(coll.insert({a: 1, b: 1}));
-
- function getCacheEntryForQuery(query) {
- const aggRes =
- FixtureHelpers.getPrimaryForNodeHostingDatabase(db)
- .getCollection(coll.getFullName())
- .aggregate([
- {$planCacheStats: {}},
- {$match: {createdFromQuery: {query: query, sort: {}, projection: {}}}}
- ])
- .toArray();
- assert.lte(aggRes.length, 1);
- if (aggRes.length > 0) {
- return aggRes[0];
- }
- return null;
+"use strict";
+
+load('jstests/libs/analyze_plan.js'); // For getPlanStage().
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load('jstests/libs/fixture_helpers.js'); // For getPrimaryForNodeHostingDatabase and isMongos.
+
+const coll = db.wildcard_cached_plans;
+coll.drop();
+
+assert.commandWorked(coll.createIndex({"b.$**": 1}));
+assert.commandWorked(coll.createIndex({"a": 1}));
+
+// In order for the plan cache to be used, there must be more than one plan available. Insert
+// data into the collection such that the b.$** index will be far more selective than the index
+// on 'a' for the query {a: 1, b: 1}.
+for (let i = 0; i < 1000; i++) {
+ assert.commandWorked(coll.insert({a: 1}));
+}
+assert.commandWorked(coll.insert({a: 1, b: 1}));
+
+function getCacheEntryForQuery(query) {
+ const aggRes = FixtureHelpers.getPrimaryForNodeHostingDatabase(db)
+ .getCollection(coll.getFullName())
+ .aggregate([
+ {$planCacheStats: {}},
+ {$match: {createdFromQuery: {query: query, sort: {}, projection: {}}}}
+ ])
+ .toArray();
+ assert.lte(aggRes.length, 1);
+ if (aggRes.length > 0) {
+ return aggRes[0];
}
-
- function getPlanCacheKeyFromExplain(explainRes) {
- const hash = FixtureHelpers.isMongos(db)
- ? explainRes.queryPlanner.winningPlan.shards[0].planCacheKey
- : explainRes.queryPlanner.planCacheKey;
- assert.eq(typeof(hash), "string");
- return hash;
- }
-
- function getPlanCacheKey(query) {
- return getPlanCacheKeyFromExplain(
- assert.commandWorked(coll.explain().find(query).finish()));
- }
-
- const query = {a: 1, b: 1};
-
- // The plan cache should be empty.
- assert.eq(getCacheEntryForQuery(query), null);
-
- // Run the query twice, once to create the cache entry, and again to make the cache entry
- // active.
- for (let i = 0; i < 2; i++) {
- assert.eq(coll.find(query).itcount(), 1);
- }
-
- // The plan cache should no longer be empty. Check that the chosen plan uses the b.$** index.
- const cacheEntry = getCacheEntryForQuery(query);
- assert.neq(cacheEntry, null);
- assert.eq(cacheEntry.isActive, true);
- // Should be at least two plans: one using the {a: 1} index and the other using the b.$** index.
- assert.gte(cacheEntry.creationExecStats.length, 2, tojson(cacheEntry.plans));
- const plan = cacheEntry.creationExecStats[0].executionStages;
- const ixScanStage = getPlanStage(plan, "IXSCAN");
- assert.neq(ixScanStage, null, () => tojson(plan));
- assert.eq(ixScanStage.keyPattern, {"$_path": 1, "b": 1}, () => tojson(plan));
-
- // Run the query again. This time it should use the cached plan. We should get the same result
- // as earlier.
+ return null;
+}
+
+function getPlanCacheKeyFromExplain(explainRes) {
+ const hash = FixtureHelpers.isMongos(db)
+ ? explainRes.queryPlanner.winningPlan.shards[0].planCacheKey
+ : explainRes.queryPlanner.planCacheKey;
+ assert.eq(typeof (hash), "string");
+ return hash;
+}
+
+function getPlanCacheKey(query) {
+ return getPlanCacheKeyFromExplain(assert.commandWorked(coll.explain().find(query).finish()));
+}
+
+const query = {
+ a: 1,
+ b: 1
+};
+
+// The plan cache should be empty.
+assert.eq(getCacheEntryForQuery(query), null);
+
+// Run the query twice, once to create the cache entry, and again to make the cache entry
+// active.
+for (let i = 0; i < 2; i++) {
assert.eq(coll.find(query).itcount(), 1);
+}
+
+// The plan cache should no longer be empty. Check that the chosen plan uses the b.$** index.
+const cacheEntry = getCacheEntryForQuery(query);
+assert.neq(cacheEntry, null);
+assert.eq(cacheEntry.isActive, true);
+// Should be at least two plans: one using the {a: 1} index and the other using the b.$** index.
+assert.gte(cacheEntry.creationExecStats.length, 2, tojson(cacheEntry.plans));
+const plan = cacheEntry.creationExecStats[0].executionStages;
+const ixScanStage = getPlanStage(plan, "IXSCAN");
+assert.neq(ixScanStage, null, () => tojson(plan));
+assert.eq(ixScanStage.keyPattern, {"$_path": 1, "b": 1}, () => tojson(plan));
+
+// Run the query again. This time it should use the cached plan. We should get the same result
+// as earlier.
+assert.eq(coll.find(query).itcount(), 1);
+
+// Now run a query where b is null. This should have a different shape key from the previous
+// query since $** indexes are sparse.
+const queryWithBNull = {
+ a: 1,
+ b: null
+};
+for (let i = 0; i < 2; i++) {
+ assert.eq(coll.find({a: 1, b: null}).itcount(), 1000);
+}
+assert.neq(getPlanCacheKey(queryWithBNull), getPlanCacheKey(query));
+
+// There should only have been one solution for the above query, so it would not get cached.
+assert.eq(getCacheEntryForQuery({a: 1, b: null}), null);
+
+// Check that indexability discriminators work with collations.
+(function() {
+// Create wildcard index with a collation.
+assertDropAndRecreateCollection(db, coll.getName(), {collation: {locale: "en_US", strength: 1}});
+assert.commandWorked(coll.createIndex({"b.$**": 1}));
+
+// Run a query which uses a different collation from that of the index, but does not use
+// string bounds.
+const queryWithoutStringExplain =
+ coll.explain().find({a: 5, b: 5}).collation({locale: "fr"}).finish();
+let ixScans = getPlanStages(queryWithoutStringExplain.queryPlanner.winningPlan, "IXSCAN");
+assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
+assert.eq(ixScans[0].keyPattern, {$_path: 1, b: 1});
+
+// Run a query which uses a different collation from that of the index and does have string
+// bounds.
+const queryWithStringExplain =
+ coll.explain().find({a: 5, b: "a string"}).collation({locale: "fr"}).finish();
+ixScans = getPlanStages(queryWithStringExplain.queryPlanner.winningPlan, "IXSCAN");
+assert.eq(ixScans.length, 0);
+
+// Check that the shapes are different since the query which matches on a string will not
+// be eligible to use the b.$** index (since the index has a different collation).
+assert.neq(getPlanCacheKeyFromExplain(queryWithoutStringExplain),
+ getPlanCacheKeyFromExplain(queryWithStringExplain));
+})();
- // Now run a query where b is null. This should have a different shape key from the previous
- // query since $** indexes are sparse.
- const queryWithBNull = {a: 1, b: null};
- for (let i = 0; i < 2; i++) {
- assert.eq(coll.find({a: 1, b: null}).itcount(), 1000);
- }
- assert.neq(getPlanCacheKey(queryWithBNull), getPlanCacheKey(query));
-
- // There should only have been one solution for the above query, so it would not get cached.
- assert.eq(getCacheEntryForQuery({a: 1, b: null}), null);
-
- // Check that indexability discriminators work with collations.
- (function() {
- // Create wildcard index with a collation.
- assertDropAndRecreateCollection(
- db, coll.getName(), {collation: {locale: "en_US", strength: 1}});
- assert.commandWorked(coll.createIndex({"b.$**": 1}));
-
- // Run a query which uses a different collation from that of the index, but does not use
- // string bounds.
- const queryWithoutStringExplain =
- coll.explain().find({a: 5, b: 5}).collation({locale: "fr"}).finish();
- let ixScans = getPlanStages(queryWithoutStringExplain.queryPlanner.winningPlan, "IXSCAN");
- assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
- assert.eq(ixScans[0].keyPattern, {$_path: 1, b: 1});
-
- // Run a query which uses a different collation from that of the index and does have string
- // bounds.
- const queryWithStringExplain =
- coll.explain().find({a: 5, b: "a string"}).collation({locale: "fr"}).finish();
- ixScans = getPlanStages(queryWithStringExplain.queryPlanner.winningPlan, "IXSCAN");
- assert.eq(ixScans.length, 0);
-
- // Check that the shapes are different since the query which matches on a string will not
- // be eligible to use the b.$** index (since the index has a different collation).
- assert.neq(getPlanCacheKeyFromExplain(queryWithoutStringExplain),
- getPlanCacheKeyFromExplain(queryWithStringExplain));
- })();
-
- // Check that indexability discriminators work with partial wildcard indexes.
- (function() {
- assertDropAndRecreateCollection(db, coll.getName());
- assert.commandWorked(
- coll.createIndex({"$**": 1}, {partialFilterExpression: {a: {$lte: 5}}}));
-
- // Run a query for a value included by the partial filter expression.
- const queryIndexedExplain = coll.find({a: 4}).explain();
- let ixScans = getPlanStages(queryIndexedExplain.queryPlanner.winningPlan, "IXSCAN");
- assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
- assert.eq(ixScans[0].keyPattern, {$_path: 1, a: 1});
-
- // Run a query which tries to get a value not included by the partial filter expression.
- const queryUnindexedExplain = coll.find({a: 100}).explain();
- ixScans = getPlanStages(queryUnindexedExplain.queryPlanner.winningPlan, "IXSCAN");
- assert.eq(ixScans.length, 0);
-
- // Check that the shapes are different since the query which searches for a value not
- // included by the partial filter expression won't be eligible to use the $** index.
- assert.neq(getPlanCacheKeyFromExplain(queryIndexedExplain),
- getPlanCacheKeyFromExplain(queryUnindexedExplain));
- })();
+// Check that indexability discriminators work with partial wildcard indexes.
+(function() {
+assertDropAndRecreateCollection(db, coll.getName());
+assert.commandWorked(coll.createIndex({"$**": 1}, {partialFilterExpression: {a: {$lte: 5}}}));
+
+// Run a query for a value included by the partial filter expression.
+const queryIndexedExplain = coll.find({a: 4}).explain();
+let ixScans = getPlanStages(queryIndexedExplain.queryPlanner.winningPlan, "IXSCAN");
+assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
+assert.eq(ixScans[0].keyPattern, {$_path: 1, a: 1});
+
+// Run a query which tries to get a value not included by the partial filter expression.
+const queryUnindexedExplain = coll.find({a: 100}).explain();
+ixScans = getPlanStages(queryUnindexedExplain.queryPlanner.winningPlan, "IXSCAN");
+assert.eq(ixScans.length, 0);
+
+// Check that the shapes are different since the query which searches for a value not
+// included by the partial filter expression won't be eligible to use the $** index.
+assert.neq(getPlanCacheKeyFromExplain(queryIndexedExplain),
+ getPlanCacheKeyFromExplain(queryUnindexedExplain));
+})();
})();
diff --git a/jstests/core/wildcard_index_collation.js b/jstests/core/wildcard_index_collation.js
index 5e71100c7c2..9ccbc0181dd 100644
--- a/jstests/core/wildcard_index_collation.js
+++ b/jstests/core/wildcard_index_collation.js
@@ -9,116 +9,116 @@
* requires_non_retryable_writes]
*/
(function() {
- "user strict";
+"user strict";
- load("jstests/aggregation/extras/utils.js"); // For arrayEq.
- load("jstests/libs/analyze_plan.js"); // For getPlanStages.
- load("jstests/libs/get_index_helpers.js"); // For GetIndexHelpers.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/fixture_helpers.js"); // For isMongos.
+load("jstests/aggregation/extras/utils.js"); // For arrayEq.
+load("jstests/libs/analyze_plan.js"); // For getPlanStages.
+load("jstests/libs/get_index_helpers.js"); // For GetIndexHelpers.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/fixture_helpers.js"); // For isMongos.
- const assertArrayEq = (l, r) => assert(arrayEq(l, r));
+const assertArrayEq = (l, r) => assert(arrayEq(l, r));
- // Create the collection and assign it a default case-insensitive collation.
- const coll = assertDropAndRecreateCollection(
- db, "wildcard_collation", {collation: {locale: "en_US", strength: 1}});
+// Create the collection and assign it a default case-insensitive collation.
+const coll = assertDropAndRecreateCollection(
+ db, "wildcard_collation", {collation: {locale: "en_US", strength: 1}});
- // Extracts the winning plan for the given query and projection from the explain output.
- const winningPlan = (query, proj) => FixtureHelpers.isMongos(db)
- ? coll.find(query, proj).explain().queryPlanner.winningPlan.shards[0].winningPlan
- : coll.find(query, proj).explain().queryPlanner.winningPlan;
+// Extracts the winning plan for the given query and projection from the explain output.
+const winningPlan = (query, proj) => FixtureHelpers.isMongos(db)
+ ? coll.find(query, proj).explain().queryPlanner.winningPlan.shards[0].winningPlan
+ : coll.find(query, proj).explain().queryPlanner.winningPlan;
- // Runs the given query and confirms that: (1) the $** was used to answer the query, (2) the
- // results produced by the $** index match the given 'expectedResults', and (3) the same output
- // is produced by a COLLSCAN with the same collation.
- function assertWildcardIndexAnswersQuery(query, expectedResults, projection) {
- // Verify that the $** index can answer this query.
- const ixScans = getPlanStages(winningPlan(query, (projection || {_id: 0})), "IXSCAN");
- assert.gt(ixScans.length, 0, tojson(coll.find(query).explain()));
- ixScans.forEach((ixScan) => assert(ixScan.keyPattern.$_path));
+// Runs the given query and confirms that: (1) the $** was used to answer the query, (2) the
+// results produced by the $** index match the given 'expectedResults', and (3) the same output
+// is produced by a COLLSCAN with the same collation.
+function assertWildcardIndexAnswersQuery(query, expectedResults, projection) {
+ // Verify that the $** index can answer this query.
+ const ixScans = getPlanStages(winningPlan(query, (projection || {_id: 0})), "IXSCAN");
+ assert.gt(ixScans.length, 0, tojson(coll.find(query).explain()));
+ ixScans.forEach((ixScan) => assert(ixScan.keyPattern.$_path));
- // Assert that the $** index produces the expected results, and that these are the same
- // as those produced by a COLLSCAN with the same collation.
- const wildcardResults = coll.find(query, (projection || {_id: 0})).toArray();
- assertArrayEq(wildcardResults, expectedResults);
- assertArrayEq(wildcardResults,
- coll.find(query, (projection || {_id: 0}))
- .collation({locale: "en_US", strength: 1})
- .hint({$natural: 1})
- .toArray());
- }
+ // Assert that the $** index produces the expected results, and that these are the same
+ // as those produced by a COLLSCAN with the same collation.
+ const wildcardResults = coll.find(query, (projection || {_id: 0})).toArray();
+ assertArrayEq(wildcardResults, expectedResults);
+ assertArrayEq(wildcardResults,
+ coll.find(query, (projection || {_id: 0}))
+ .collation({locale: "en_US", strength: 1})
+ .hint({$natural: 1})
+ .toArray());
+}
- // Confirms that the index matching the given keyPattern has the specified collation.
- function assertIndexHasCollation(keyPattern, collation) {
- var indexSpecs = coll.getIndexes();
- var found = GetIndexHelpers.findByKeyPattern(indexSpecs, keyPattern, collation);
- assert.neq(null,
- found,
- "Index with key pattern " + tojson(keyPattern) + " and collation " +
- tojson(collation) + " not found: " + tojson(indexSpecs));
- }
+// Confirms that the index matching the given keyPattern has the specified collation.
+function assertIndexHasCollation(keyPattern, collation) {
+ var indexSpecs = coll.getIndexes();
+ var found = GetIndexHelpers.findByKeyPattern(indexSpecs, keyPattern, collation);
+ assert.neq(null,
+ found,
+ "Index with key pattern " + tojson(keyPattern) + " and collation " +
+ tojson(collation) + " not found: " + tojson(indexSpecs));
+}
- // Confirm that the $** index inherits the collection's default collation.
- assert.commandWorked(coll.createIndex({"$**": 1}));
- assertIndexHasCollation({"$**": 1}, {
- locale: "en_US",
- caseLevel: false,
- caseFirst: "off",
- strength: 1,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: false,
- version: "57.1",
- });
+// Confirm that the $** index inherits the collection's default collation.
+assert.commandWorked(coll.createIndex({"$**": 1}));
+assertIndexHasCollation({"$**": 1}, {
+ locale: "en_US",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 1,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: false,
+ version: "57.1",
+});
- // Insert a series of documents whose fieldnames and values differ only by case.
- assert.commandWorked(coll.insert({a: {b: "string", c: "STRING"}, d: "sTrInG", e: 5}));
- assert.commandWorked(coll.insert({a: {b: "STRING", c: "string"}, d: "StRiNg", e: 5}));
- assert.commandWorked(coll.insert({A: {B: "string", C: "STRING"}, d: "sTrInG", E: 5}));
- assert.commandWorked(coll.insert({A: {B: "STRING", C: "string"}, d: "StRiNg", E: 5}));
+// Insert a series of documents whose fieldnames and values differ only by case.
+assert.commandWorked(coll.insert({a: {b: "string", c: "STRING"}, d: "sTrInG", e: 5}));
+assert.commandWorked(coll.insert({a: {b: "STRING", c: "string"}, d: "StRiNg", e: 5}));
+assert.commandWorked(coll.insert({A: {B: "string", C: "STRING"}, d: "sTrInG", E: 5}));
+assert.commandWorked(coll.insert({A: {B: "STRING", C: "string"}, d: "StRiNg", E: 5}));
- // Confirm that only the document's values adhere to the case-insensitive collation. The field
- // paths, which are also present in the $** index keys, are evaluated using simple binary
- // comparison; so for instance, path "a.b" does *not* match path "A.B".
- assertWildcardIndexAnswersQuery({"a.b": "string"}, [
- {a: {b: "string", c: "STRING"}, d: "sTrInG", e: 5},
- {a: {b: "STRING", c: "string"}, d: "StRiNg", e: 5}
- ]);
- assertWildcardIndexAnswersQuery({"A.B": "string"}, [
- {A: {B: "string", C: "STRING"}, d: "sTrInG", E: 5},
- {A: {B: "STRING", C: "string"}, d: "StRiNg", E: 5}
- ]);
+// Confirm that only the document's values adhere to the case-insensitive collation. The field
+// paths, which are also present in the $** index keys, are evaluated using simple binary
+// comparison; so for instance, path "a.b" does *not* match path "A.B".
+assertWildcardIndexAnswersQuery({"a.b": "string"}, [
+ {a: {b: "string", c: "STRING"}, d: "sTrInG", e: 5},
+ {a: {b: "STRING", c: "string"}, d: "StRiNg", e: 5}
+]);
+assertWildcardIndexAnswersQuery({"A.B": "string"}, [
+ {A: {B: "string", C: "STRING"}, d: "sTrInG", E: 5},
+ {A: {B: "STRING", C: "string"}, d: "StRiNg", E: 5}
+]);
- // All documents in the collection are returned if we query over both upper- and lower-case
- // fieldnames, or when the fieldname has a consistent case across all documents.
- const allDocs = coll.find({}, {_id: 0}).toArray();
- assertWildcardIndexAnswersQuery({$or: [{"a.c": "string"}, {"A.C": "string"}]}, allDocs);
- assertWildcardIndexAnswersQuery({d: "string"}, allDocs);
+// All documents in the collection are returned if we query over both upper- and lower-case
+// fieldnames, or when the fieldname has a consistent case across all documents.
+const allDocs = coll.find({}, {_id: 0}).toArray();
+assertWildcardIndexAnswersQuery({$or: [{"a.c": "string"}, {"A.C": "string"}]}, allDocs);
+assertWildcardIndexAnswersQuery({d: "string"}, allDocs);
- // Confirm that the $** index also differentiates between upper and lower fieldname case when
- // querying fields which do not contain string values.
- assertWildcardIndexAnswersQuery({e: 5}, [
- {a: {b: "string", c: "STRING"}, d: "sTrInG", e: 5},
- {a: {b: "STRING", c: "string"}, d: "StRiNg", e: 5}
- ]);
- assertWildcardIndexAnswersQuery({E: 5}, [
- {A: {B: "string", C: "STRING"}, d: "sTrInG", E: 5},
- {A: {B: "STRING", C: "string"}, d: "StRiNg", E: 5}
- ]);
+// Confirm that the $** index also differentiates between upper and lower fieldname case when
+// querying fields which do not contain string values.
+assertWildcardIndexAnswersQuery({e: 5}, [
+ {a: {b: "string", c: "STRING"}, d: "sTrInG", e: 5},
+ {a: {b: "STRING", c: "string"}, d: "StRiNg", e: 5}
+]);
+assertWildcardIndexAnswersQuery({E: 5}, [
+ {A: {B: "string", C: "STRING"}, d: "sTrInG", E: 5},
+ {A: {B: "STRING", C: "string"}, d: "StRiNg", E: 5}
+]);
- // Confirm that the $** index produces a covered plan for a query on non-string, non-object,
- // non-array values.
- assert(isIndexOnly(coll.getDB(), winningPlan({e: 5}, {_id: 0, e: 1})));
- assert(isIndexOnly(coll.getDB(), winningPlan({E: 5}, {_id: 0, E: 1})));
+// Confirm that the $** index produces a covered plan for a query on non-string, non-object,
+// non-array values.
+assert(isIndexOnly(coll.getDB(), winningPlan({e: 5}, {_id: 0, e: 1})));
+assert(isIndexOnly(coll.getDB(), winningPlan({E: 5}, {_id: 0, E: 1})));
- // Confirm that the $** index differentiates fieldname case when attempting to cover.
- assert(!isIndexOnly(coll.getDB(), winningPlan({e: 5}, {_id: 0, E: 1})));
- assert(!isIndexOnly(coll.getDB(), winningPlan({E: 5}, {_id: 0, e: 1})));
+// Confirm that the $** index differentiates fieldname case when attempting to cover.
+assert(!isIndexOnly(coll.getDB(), winningPlan({e: 5}, {_id: 0, E: 1})));
+assert(!isIndexOnly(coll.getDB(), winningPlan({E: 5}, {_id: 0, e: 1})));
- // Confirm that attempting to project the virtual $_path field which is present in $** index
- // keys produces a non-covered solution, which nonetheless returns the correct results.
- assert(!isIndexOnly(coll.getDB(), winningPlan({e: 5}, {_id: 0, e: 1, $_path: 1})));
- assertWildcardIndexAnswersQuery({e: 5}, [{e: 5}, {e: 5}], {_id: 0, e: 1, $_path: 1});
+// Confirm that attempting to project the virtual $_path field which is present in $** index
+// keys produces a non-covered solution, which nonetheless returns the correct results.
+assert(!isIndexOnly(coll.getDB(), winningPlan({e: 5}, {_id: 0, e: 1, $_path: 1})));
+assertWildcardIndexAnswersQuery({e: 5}, [{e: 5}, {e: 5}], {_id: 0, e: 1, $_path: 1});
})(); \ No newline at end of file
diff --git a/jstests/core/wildcard_index_count.js b/jstests/core/wildcard_index_count.js
index 7b684b29f3c..6c9fa8f05a3 100644
--- a/jstests/core/wildcard_index_count.js
+++ b/jstests/core/wildcard_index_count.js
@@ -6,90 +6,90 @@
// for retrying on interrupt is not prepared to handle aggregation explain.
// @tags: [assumes_unsharded_collection, does_not_support_stepdowns]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js");
+load("jstests/libs/analyze_plan.js");
- const coll = db.wildcard_index_count;
- coll.drop();
+const coll = db.wildcard_index_count;
+coll.drop();
- assert.commandWorked(coll.insert([
- {a: 3},
- {a: null},
- {a: [-1, 0]},
- {a: [4, -3, 5]},
- {},
- {a: {b: 4}},
- {a: []},
- {a: [[], {}]},
- {a: {}},
- ]));
- assert.commandWorked(coll.createIndex({"$**": 1}));
+assert.commandWorked(coll.insert([
+ {a: 3},
+ {a: null},
+ {a: [-1, 0]},
+ {a: [4, -3, 5]},
+ {},
+ {a: {b: 4}},
+ {a: []},
+ {a: [[], {}]},
+ {a: {}},
+]));
+assert.commandWorked(coll.createIndex({"$**": 1}));
- assert.eq(2, coll.count({a: {$gt: 0}}));
- assert.eq(2, coll.find({a: {$gt: 0}}).itcount());
- assert.eq(2, coll.aggregate([{$match: {a: {$gt: 0}}}, {$count: "count"}]).next().count);
+assert.eq(2, coll.count({a: {$gt: 0}}));
+assert.eq(2, coll.find({a: {$gt: 0}}).itcount());
+assert.eq(2, coll.aggregate([{$match: {a: {$gt: 0}}}, {$count: "count"}]).next().count);
- // Verify that this query uses a COUNT_SCAN.
- let explain = coll.explain().count({a: {$gt: 0}});
- let countScan = getPlanStage(explain.queryPlanner.winningPlan, "COUNT_SCAN");
- assert.neq(null, countScan, explain);
- assert.eq({$_path: 1, a: 1}, countScan.keyPattern, countScan);
+// Verify that this query uses a COUNT_SCAN.
+let explain = coll.explain().count({a: {$gt: 0}});
+let countScan = getPlanStage(explain.queryPlanner.winningPlan, "COUNT_SCAN");
+assert.neq(null, countScan, explain);
+assert.eq({$_path: 1, a: 1}, countScan.keyPattern, countScan);
- // Query should also COUNT_SCAN when expressed as an aggregation.
- explain = coll.explain().aggregate([{$match: {a: {$gt: 0}}}, {$count: "count"}]);
- countScan = getAggPlanStage(explain, "COUNT_SCAN");
- assert.neq(null, countScan, explain);
- assert.eq({$_path: 1, a: 1}, countScan.keyPattern, countScan);
+// Query should also COUNT_SCAN when expressed as an aggregation.
+explain = coll.explain().aggregate([{$match: {a: {$gt: 0}}}, {$count: "count"}]);
+countScan = getAggPlanStage(explain, "COUNT_SCAN");
+assert.neq(null, countScan, explain);
+assert.eq({$_path: 1, a: 1}, countScan.keyPattern, countScan);
- // $count of entire collection does not COUNT_SCAN.
- assert.eq(9, coll.find().itcount());
- assert.eq(9, coll.aggregate([{$count: "count"}]).next().count);
- explain = coll.explain().aggregate([{$count: "count"}]);
- countScan = getAggPlanStage(explain, "COUNT_SCAN");
- assert.eq(null, countScan, explain);
+// $count of entire collection does not COUNT_SCAN.
+assert.eq(9, coll.find().itcount());
+assert.eq(9, coll.aggregate([{$count: "count"}]).next().count);
+explain = coll.explain().aggregate([{$count: "count"}]);
+countScan = getAggPlanStage(explain, "COUNT_SCAN");
+assert.eq(null, countScan, explain);
- // When the count consists of multiple intervals, we cannot use COUNT_SCAN.
- assert.eq(2, coll.count({a: {$in: [3, 4]}}));
- assert.eq(2, coll.find({a: {$in: [3, 4]}}).itcount());
- assert.eq(2, coll.aggregate([{$match: {a: {$in: [3, 4]}}}, {$count: "count"}]).next().count);
- explain = coll.explain().aggregate([{$match: {a: {$in: [3, 4]}}}, {$count: "count"}]);
- countScan = getAggPlanStage(explain, "COUNT_SCAN");
- assert.eq(null, countScan, explain);
- let ixscan = getAggPlanStage(explain, "IXSCAN");
- assert.neq(null, ixscan, explain);
- assert.eq({$_path: 1, a: 1}, ixscan.keyPattern, ixscan);
+// When the count consists of multiple intervals, we cannot use COUNT_SCAN.
+assert.eq(2, coll.count({a: {$in: [3, 4]}}));
+assert.eq(2, coll.find({a: {$in: [3, 4]}}).itcount());
+assert.eq(2, coll.aggregate([{$match: {a: {$in: [3, 4]}}}, {$count: "count"}]).next().count);
+explain = coll.explain().aggregate([{$match: {a: {$in: [3, 4]}}}, {$count: "count"}]);
+countScan = getAggPlanStage(explain, "COUNT_SCAN");
+assert.eq(null, countScan, explain);
+let ixscan = getAggPlanStage(explain, "IXSCAN");
+assert.neq(null, ixscan, explain);
+assert.eq({$_path: 1, a: 1}, ixscan.keyPattern, ixscan);
- // Count with an equality match on an empty array cannot use COUNT_SCAN.
- assert.eq(2, coll.count({a: {$eq: []}}));
- assert.eq(2, coll.find({a: {$eq: []}}).itcount());
- assert.eq(2, coll.aggregate([{$match: {a: {$eq: []}}}, {$count: "count"}]).next().count);
- explain = coll.explain().count({a: {$eq: []}});
- countScan = getPlanStage(explain.queryPlanner.winningPlan, "COUNT_SCAN");
- assert.eq(null, countScan, explain);
- ixscan = getPlanStage(explain.queryPlanner.winningPlan, "IXSCAN");
- assert.neq(null, ixscan, explain);
- assert.eq({$_path: 1, a: 1}, ixscan.keyPattern, ixscan);
+// Count with an equality match on an empty array cannot use COUNT_SCAN.
+assert.eq(2, coll.count({a: {$eq: []}}));
+assert.eq(2, coll.find({a: {$eq: []}}).itcount());
+assert.eq(2, coll.aggregate([{$match: {a: {$eq: []}}}, {$count: "count"}]).next().count);
+explain = coll.explain().count({a: {$eq: []}});
+countScan = getPlanStage(explain.queryPlanner.winningPlan, "COUNT_SCAN");
+assert.eq(null, countScan, explain);
+ixscan = getPlanStage(explain.queryPlanner.winningPlan, "IXSCAN");
+assert.neq(null, ixscan, explain);
+assert.eq({$_path: 1, a: 1}, ixscan.keyPattern, ixscan);
- // Count with an equality match on an empty object can use COUNT_SCAN.
- assert.eq(2, coll.count({a: {$eq: {}}}));
- assert.eq(2, coll.find({a: {$eq: {}}}).itcount());
- assert.eq(2, coll.aggregate([{$match: {a: {$eq: {}}}}, {$count: "count"}]).next().count);
- explain = coll.explain().count({a: {$eq: {}}});
- countScan = getPlanStage(explain.queryPlanner.winningPlan, "COUNT_SCAN");
- assert.eq({$_path: 1, a: 1}, countScan.keyPattern, explain);
+// Count with an equality match on an empty object can use COUNT_SCAN.
+assert.eq(2, coll.count({a: {$eq: {}}}));
+assert.eq(2, coll.find({a: {$eq: {}}}).itcount());
+assert.eq(2, coll.aggregate([{$match: {a: {$eq: {}}}}, {$count: "count"}]).next().count);
+explain = coll.explain().count({a: {$eq: {}}});
+countScan = getPlanStage(explain.queryPlanner.winningPlan, "COUNT_SCAN");
+assert.eq({$_path: 1, a: 1}, countScan.keyPattern, explain);
- // Count with equality to a non-empty object cannot use the wildcard index.
- assert.eq(1, coll.count({a: {b: 4}}));
- assert.eq(1, coll.find({a: {b: 4}}).itcount());
- assert.eq(1, coll.aggregate([{$match: {a: {b: 4}}}, {$count: "count"}]).next().count);
- explain = coll.explain().count({a: {b: 4}});
- assert(isCollscan(db, explain.queryPlanner.winningPlan), explain);
+// Count with equality to a non-empty object cannot use the wildcard index.
+assert.eq(1, coll.count({a: {b: 4}}));
+assert.eq(1, coll.find({a: {b: 4}}).itcount());
+assert.eq(1, coll.aggregate([{$match: {a: {b: 4}}}, {$count: "count"}]).next().count);
+explain = coll.explain().count({a: {b: 4}});
+assert(isCollscan(db, explain.queryPlanner.winningPlan), explain);
- // Count with equality to a non-empty array cannot use the wildcard index.
- assert.eq(1, coll.count({a: [-1, 0]}));
- assert.eq(1, coll.find({a: [-1, 0]}).itcount());
- assert.eq(1, coll.aggregate([{$match: {a: [-1, 0]}}, {$count: "count"}]).next().count);
- explain = coll.explain().count({a: [-1, 0]});
- assert(isCollscan(db, explain.queryPlanner.winningPlan), explain);
+// Count with equality to a non-empty array cannot use the wildcard index.
+assert.eq(1, coll.count({a: [-1, 0]}));
+assert.eq(1, coll.find({a: [-1, 0]}).itcount());
+assert.eq(1, coll.aggregate([{$match: {a: [-1, 0]}}, {$count: "count"}]).next().count);
+explain = coll.explain().count({a: [-1, 0]});
+assert(isCollscan(db, explain.queryPlanner.winningPlan), explain);
}());
diff --git a/jstests/core/wildcard_index_covered_queries.js b/jstests/core/wildcard_index_covered_queries.js
index df6142e859e..1e4451e8710 100644
--- a/jstests/core/wildcard_index_covered_queries.js
+++ b/jstests/core/wildcard_index_covered_queries.js
@@ -8,76 +8,76 @@
* @tags: [assumes_unsharded_collection, does_not_support_stepdowns]
*/
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For arrayEq.
- load("jstests/libs/analyze_plan.js"); // For getPlanStages and isIndexOnly.
-
- const assertArrayEq = (l, r) => assert(arrayEq(l, r));
-
- const coll = db.wildcard_covered_query;
- coll.drop();
-
- // Confirms that the $** index can answer the given query and projection, that it produces a
- // covered solution, and that the results are identical to those obtained by a COLLSCAN. If
- // 'shouldFailToCover' is true, inverts the assertion and confirms that the given query and
- // projection do *not* produce a covered plan.
- function assertWildcardProvidesCoveredSolution(query, proj, shouldFailToCover = false) {
- // Obtain the explain output for the given query and projection. We run the explain with
- // 'executionStats' so that we can subsequently validate the number of documents examined.
- const explainOut = assert.commandWorked(coll.find(query, proj).explain("executionStats"));
- const winningPlan = explainOut.queryPlanner.winningPlan;
-
- // Verify that the $** index provided the winning solution for this query.
- const ixScans = getPlanStages(winningPlan, "IXSCAN");
- assert.gt(ixScans.length, 0, tojson(explainOut));
- ixScans.forEach((ixScan) => assert(ixScan.keyPattern.hasOwnProperty("$_path")));
-
- // Verify that the solution is covered, and that no documents were examined. If the argument
- // 'shouldFailToCover' is true, invert the validation to confirm that it is NOT covered.
- assert.eq(!!explainOut.executionStats.totalDocsExamined, shouldFailToCover);
- assert.eq(isIndexOnly(coll.getDB(), winningPlan), !shouldFailToCover);
-
- // Verify that the query covered by the $** index produces the same results as a COLLSCAN.
- assertArrayEq(coll.find(query, proj).toArray(),
- coll.find(query, proj).hint({$natural: 1}).toArray());
- }
-
- // Create a new collection and build a $** index on it.
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 200; i++) {
- bulk.insert({a: {b: i, c: `${(i+1)}`}, d: (i + 2)});
- }
- assert.commandWorked(bulk.execute());
- assert.commandWorked(coll.createIndex({"$**": 1}));
-
- // Verify that the $** index can cover an exact match on an integer value.
- assertWildcardProvidesCoveredSolution({"a.b": 10}, {_id: 0, "a.b": 1});
-
- // Verify that the $** index can cover an exact match on a string value.
- assertWildcardProvidesCoveredSolution({"a.c": "10"}, {_id: 0, "a.c": 1});
-
- // Verify that the $** index can cover a range query for integer values.
- assertWildcardProvidesCoveredSolution({"a.b": {$gt: 10, $lt: 99}}, {_id: 0, "a.b": 1});
-
- // Verify that the $** index can cover a range query for string values.
- assertWildcardProvidesCoveredSolution({"a.c": {$gt: "10", $lt: "99"}}, {_id: 0, "a.c": 1});
-
- // Verify that the $** index can cover an $in query for integer values.
- assertWildcardProvidesCoveredSolution({"a.b": {$in: [0, 50, 100, 150]}}, {_id: 0, "a.b": 1});
-
- // Verify that the $** index can cover an $in query for string values.
- assertWildcardProvidesCoveredSolution({"a.c": {$in: ["0", "50", "100", "150"]}},
- {_id: 0, "a.c": 1});
-
- // Verify that attempting to project the virtual $_path field from the $** keyPattern will fail
- // to do so and will instead produce a non-covered query. However, this query will nonetheless
- // output the correct results.
- const shouldFailToCover = true;
- assertWildcardProvidesCoveredSolution(
- {d: {$in: [0, 25, 50, 75, 100]}}, {_id: 0, d: 1, $_path: 1}, shouldFailToCover);
-
- // Verify that predicates which produce inexact-fetch bounds are not covered by a $** index.
- assertWildcardProvidesCoveredSolution(
- {d: {$elemMatch: {$eq: 50}}}, {_id: 0, d: 1}, shouldFailToCover);
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For arrayEq.
+load("jstests/libs/analyze_plan.js"); // For getPlanStages and isIndexOnly.
+
+const assertArrayEq = (l, r) => assert(arrayEq(l, r));
+
+const coll = db.wildcard_covered_query;
+coll.drop();
+
+// Confirms that the $** index can answer the given query and projection, that it produces a
+// covered solution, and that the results are identical to those obtained by a COLLSCAN. If
+// 'shouldFailToCover' is true, inverts the assertion and confirms that the given query and
+// projection do *not* produce a covered plan.
+function assertWildcardProvidesCoveredSolution(query, proj, shouldFailToCover = false) {
+ // Obtain the explain output for the given query and projection. We run the explain with
+ // 'executionStats' so that we can subsequently validate the number of documents examined.
+ const explainOut = assert.commandWorked(coll.find(query, proj).explain("executionStats"));
+ const winningPlan = explainOut.queryPlanner.winningPlan;
+
+ // Verify that the $** index provided the winning solution for this query.
+ const ixScans = getPlanStages(winningPlan, "IXSCAN");
+ assert.gt(ixScans.length, 0, tojson(explainOut));
+ ixScans.forEach((ixScan) => assert(ixScan.keyPattern.hasOwnProperty("$_path")));
+
+ // Verify that the solution is covered, and that no documents were examined. If the argument
+ // 'shouldFailToCover' is true, invert the validation to confirm that it is NOT covered.
+ assert.eq(!!explainOut.executionStats.totalDocsExamined, shouldFailToCover);
+ assert.eq(isIndexOnly(coll.getDB(), winningPlan), !shouldFailToCover);
+
+ // Verify that the query covered by the $** index produces the same results as a COLLSCAN.
+ assertArrayEq(coll.find(query, proj).toArray(),
+ coll.find(query, proj).hint({$natural: 1}).toArray());
+}
+
+// Create a new collection and build a $** index on it.
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < 200; i++) {
+ bulk.insert({a: {b: i, c: `${(i + 1)}`}, d: (i + 2)});
+}
+assert.commandWorked(bulk.execute());
+assert.commandWorked(coll.createIndex({"$**": 1}));
+
+// Verify that the $** index can cover an exact match on an integer value.
+assertWildcardProvidesCoveredSolution({"a.b": 10}, {_id: 0, "a.b": 1});
+
+// Verify that the $** index can cover an exact match on a string value.
+assertWildcardProvidesCoveredSolution({"a.c": "10"}, {_id: 0, "a.c": 1});
+
+// Verify that the $** index can cover a range query for integer values.
+assertWildcardProvidesCoveredSolution({"a.b": {$gt: 10, $lt: 99}}, {_id: 0, "a.b": 1});
+
+// Verify that the $** index can cover a range query for string values.
+assertWildcardProvidesCoveredSolution({"a.c": {$gt: "10", $lt: "99"}}, {_id: 0, "a.c": 1});
+
+// Verify that the $** index can cover an $in query for integer values.
+assertWildcardProvidesCoveredSolution({"a.b": {$in: [0, 50, 100, 150]}}, {_id: 0, "a.b": 1});
+
+// Verify that the $** index can cover an $in query for string values.
+assertWildcardProvidesCoveredSolution({"a.c": {$in: ["0", "50", "100", "150"]}},
+ {_id: 0, "a.c": 1});
+
+// Verify that attempting to project the virtual $_path field from the $** keyPattern will fail
+// to do so and will instead produce a non-covered query. However, this query will nonetheless
+// output the correct results.
+const shouldFailToCover = true;
+assertWildcardProvidesCoveredSolution(
+ {d: {$in: [0, 25, 50, 75, 100]}}, {_id: 0, d: 1, $_path: 1}, shouldFailToCover);
+
+// Verify that predicates which produce inexact-fetch bounds are not covered by a $** index.
+assertWildcardProvidesCoveredSolution(
+ {d: {$elemMatch: {$eq: 50}}}, {_id: 0, d: 1}, shouldFailToCover);
})(); \ No newline at end of file
diff --git a/jstests/core/wildcard_index_dedup.js b/jstests/core/wildcard_index_dedup.js
index 4fd5bf5df8e..093d3e9d219 100644
--- a/jstests/core/wildcard_index_dedup.js
+++ b/jstests/core/wildcard_index_dedup.js
@@ -5,24 +5,24 @@
* scanned and return only a single object.
*/
(function() {
- "use strict";
+"use strict";
- const coll = db.wildcard_index_dedup;
- coll.drop();
+const coll = db.wildcard_index_dedup;
+coll.drop();
- assert.commandWorked(coll.createIndex({"$**": 1}));
+assert.commandWorked(coll.createIndex({"$**": 1}));
- assert.commandWorked(coll.insert({a: {b: 1, c: {f: 1, g: 1}}, d: {e: [1, 2, 3]}}));
+assert.commandWorked(coll.insert({a: {b: 1, c: {f: 1, g: 1}}, d: {e: [1, 2, 3]}}));
- // An $exists that matches multiple $** index paths from nested objects does not return
- // duplicates of the same object.
- assert.eq(1, coll.find({a: {$exists: true}}).hint({"$**": 1}).itcount());
+// An $exists that matches multiple $** index paths from nested objects does not return
+// duplicates of the same object.
+assert.eq(1, coll.find({a: {$exists: true}}).hint({"$**": 1}).itcount());
- // An $exists that matches multiple $** index paths from nested array does not return
- // duplicates of the same object.
- assert.eq(1, coll.find({d: {$exists: true}}).hint({"$**": 1}).itcount());
+// An $exists that matches multiple $** index paths from nested array does not return
+// duplicates of the same object.
+assert.eq(1, coll.find({d: {$exists: true}}).hint({"$**": 1}).itcount());
- // An $exists with dotted path that matches multiple $** index paths from nested objects
- // does not return duplicates of the same object.
- assert.eq(1, coll.find({"a.c": {$exists: true}}).hint({"$**": 1}).itcount());
+// An $exists with dotted path that matches multiple $** index paths from nested objects
+// does not return duplicates of the same object.
+assert.eq(1, coll.find({"a.c": {$exists: true}}).hint({"$**": 1}).itcount());
})(); \ No newline at end of file
diff --git a/jstests/core/wildcard_index_distinct_scan.js b/jstests/core/wildcard_index_distinct_scan.js
index df831cbc5c9..f1e0fa67f3f 100644
--- a/jstests/core/wildcard_index_distinct_scan.js
+++ b/jstests/core/wildcard_index_distinct_scan.js
@@ -2,197 +2,197 @@
* Tests that a $** index can provide a DISTINCT_SCAN or indexed solution where appropriate.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For arrayEq.
- load("jstests/libs/analyze_plan.js"); // For planHasStage and getPlanStages.
+load("jstests/aggregation/extras/utils.js"); // For arrayEq.
+load("jstests/libs/analyze_plan.js"); // For planHasStage and getPlanStages.
- const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
+const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
- const coll = db.all_paths_distinct_scan;
- coll.drop();
+const coll = db.all_paths_distinct_scan;
+coll.drop();
+
+// Records whether the field which we are distinct-ing over is multikey.
+let distinctFieldIsMultikey = false;
- // Records whether the field which we are distinct-ing over is multikey.
- let distinctFieldIsMultikey = false;
-
- // Insert a set of documents into the collection. The 'listOfValues' argument contains values of
- // various types, and we insert numerous documents containing each of the values. This allows us
- // to confirm that 'distinct' with a wildcard index (1) can return values of any type, (2) will
- // only return the set of unique values, and (3) handles multikey values appropriately in cases
- // where 'listOfValues' includes an array.
- function insertTestData(fieldName, listOfValues) {
- distinctFieldIsMultikey = listOfValues.some((val) => Array.isArray(val));
- const bulk = coll.initializeUnorderedBulkOp();
- coll.drop();
- for (let i = 0; i < 200; i++) {
- const didx = (i % listOfValues.length);
- bulk.insert({[fieldName]: listOfValues[didx], b: didx, c: (-i)});
- }
- assert.commandWorked(bulk.execute());
+// Insert a set of documents into the collection. The 'listOfValues' argument contains values of
+// various types, and we insert numerous documents containing each of the values. This allows us
+// to confirm that 'distinct' with a wildcard index (1) can return values of any type, (2) will
+// only return the set of unique values, and (3) handles multikey values appropriately in cases
+// where 'listOfValues' includes an array.
+function insertTestData(fieldName, listOfValues) {
+ distinctFieldIsMultikey = listOfValues.some((val) => Array.isArray(val));
+ const bulk = coll.initializeUnorderedBulkOp();
+ coll.drop();
+ for (let i = 0; i < 200; i++) {
+ const didx = (i % listOfValues.length);
+ bulk.insert({[fieldName]: listOfValues[didx], b: didx, c: (-i)});
}
+ assert.commandWorked(bulk.execute());
+}
- /**
- * Runs a single wildcard distinct scan test. If 'expectedPath' is non-null, verifies that there
- * is an indexed solution that uses the $** index with the given path string. If 'expectedPath'
- * is null, verifies that no indexed solution was found.
- */
- function assertWildcardDistinctScan(
- {distinctKey, query, pathProjection, expectedScanType, expectedResults, expectedPath}) {
- // Drop all indexes before running the test. This allows us to perform the distinct with a
- // COLLSCAN at first, to confirm that the results are as expected.
- assert.commandWorked(coll.dropIndexes());
-
- // Confirm that the distinct runs with a COLLSCAN.
- let winningPlan = coll.explain().distinct(distinctKey, query).queryPlanner.winningPlan;
+/**
+ * Runs a single wildcard distinct scan test. If 'expectedPath' is non-null, verifies that there
+ * is an indexed solution that uses the $** index with the given path string. If 'expectedPath'
+ * is null, verifies that no indexed solution was found.
+ */
+function assertWildcardDistinctScan(
+ {distinctKey, query, pathProjection, expectedScanType, expectedResults, expectedPath}) {
+ // Drop all indexes before running the test. This allows us to perform the distinct with a
+ // COLLSCAN at first, to confirm that the results are as expected.
+ assert.commandWorked(coll.dropIndexes());
+
+ // Confirm that the distinct runs with a COLLSCAN.
+ let winningPlan = coll.explain().distinct(distinctKey, query).queryPlanner.winningPlan;
+ assert(planHasStage(coll.getDB(), winningPlan, "COLLSCAN"));
+ // Run the distinct and confirm that it produces the expected results.
+ assertArrayEq(coll.distinct(distinctKey, query), expectedResults);
+
+ // Build a wildcard index on the collection and re-run the test.
+ const options = (pathProjection ? {wildcardProjection: pathProjection} : {});
+ assert.commandWorked(coll.createIndex({"$**": 1}, options));
+
+ // We expect the following outcomes for a 'distinct' that attempts to use a $** index:
+ // - No query: COLLSCAN.
+ // - Query for object value on distinct field: COLLSCAN.
+ // - Query for non-object value on non-multikey distinct field: DISTINCT_SCAN.
+ // - Query for non-object value on multikey distinct field: IXSCAN with FETCH.
+ // - Query for non-object value on field other than the distinct field: IXSCAN with FETCH.
+ const fetchIsExpected = (expectedScanType !== "DISTINCT_SCAN");
+
+ // Explain the query, and determine whether an indexed solution is available. If
+ // 'expectedPath' is null, then we do not expect the $** index to provide a plan.
+ winningPlan = coll.explain().distinct(distinctKey, query).queryPlanner.winningPlan;
+ if (!expectedPath) {
assert(planHasStage(coll.getDB(), winningPlan, "COLLSCAN"));
- // Run the distinct and confirm that it produces the expected results.
- assertArrayEq(coll.distinct(distinctKey, query), expectedResults);
-
- // Build a wildcard index on the collection and re-run the test.
- const options = (pathProjection ? {wildcardProjection: pathProjection} : {});
- assert.commandWorked(coll.createIndex({"$**": 1}, options));
-
- // We expect the following outcomes for a 'distinct' that attempts to use a $** index:
- // - No query: COLLSCAN.
- // - Query for object value on distinct field: COLLSCAN.
- // - Query for non-object value on non-multikey distinct field: DISTINCT_SCAN.
- // - Query for non-object value on multikey distinct field: IXSCAN with FETCH.
- // - Query for non-object value on field other than the distinct field: IXSCAN with FETCH.
- const fetchIsExpected = (expectedScanType !== "DISTINCT_SCAN");
-
- // Explain the query, and determine whether an indexed solution is available. If
- // 'expectedPath' is null, then we do not expect the $** index to provide a plan.
- winningPlan = coll.explain().distinct(distinctKey, query).queryPlanner.winningPlan;
- if (!expectedPath) {
- assert(planHasStage(coll.getDB(), winningPlan, "COLLSCAN"));
- assert.eq(expectedScanType, "COLLSCAN");
- return;
- }
-
- // Confirm that the $** distinct scan produces the expected results.
- assertArrayEq(coll.distinct(distinctKey, query), expectedResults);
- // Confirm that the $** plan adheres to 'fetchIsExpected' and 'expectedScanType'.
- assert.eq(planHasStage(coll.getDB(), winningPlan, "FETCH"), fetchIsExpected);
- assert(planHasStage(coll.getDB(), winningPlan, expectedScanType));
- assert.docEq({$_path: 1, [expectedPath]: 1},
- getPlanStages(winningPlan, expectedScanType).shift().keyPattern);
+ assert.eq(expectedScanType, "COLLSCAN");
+ return;
}
- // The set of distinct values that should be produced by each of the test listed below.
- const distinctValues = [1, 2, "3", null, {c: 5, d: 6}, {d: 6, c: 5}, {}, 9, 10, {e: 11}];
-
- // Define the set of values that the distinct field may take. The first test case consists
- // entirely of non-multikey fields, while the second includes multikey fields.
- const testCases = [
- // Non-multikey field values.
- {
- insertField: "a",
- queryField: "a",
- fieldValues: [1, 2, "3", null, {c: 5, d: 6}, {d: 6, c: 5}, {}, 9, 10, {e: 11}]
- },
- // Multikey field values. Note that values within arrays are unwrapped by the distinct
- // scan, and empty arrays are thus not included.
- {
- insertField: "a",
- queryField: "a",
- fieldValues: [1, 2, "3", null, {c: 5, d: 6}, {d: 6, c: 5}, {}, [], [9, 10], [{e: 11}]]
- },
- // Non-multikey dotted field values.
- {
- insertField: "a",
- queryField: "a.x",
- fieldValues: [
- {x: 1},
- {x: 2},
- {x: "3"},
- {x: null},
- {x: {c: 5, d: 6}},
- {x: {d: 6, c: 5}},
- {x: {}},
- {x: 9},
- {x: 10},
- {x: {e: 11}}
- ]
- },
- // Multikey dotted field values.
- {
- insertField: "a",
- queryField: "a.x",
- fieldValues: [
- [{x: 1}],
- [{x: 2}],
- [{x: "3"}],
- [{x: null}],
- [{x: {c: 5, d: 6}}],
- [{x: {d: 6, c: 5}}],
- [{x: {}}],
- [{x: []}],
- [{x: 9}, {x: 10}],
- [{x: [{e: 11}]}]
- ]
- }
- ];
-
- // Run all combinations of query, no-query, multikey and non-multikey distinct tests.
- for (let testCase of testCases) {
- // Log the start of the test and create the dataset.
- jsTestLog("Test case: " + tojson(testCase));
- insertTestData(testCase.insertField, testCase.fieldValues);
-
- // Test that a $** index cannot provide an indexed 'distinct' without a query.
- assertWildcardDistinctScan({
- distinctKey: testCase.queryField,
- query: {},
- expectedScanType: "COLLSCAN",
- expectedResults: distinctValues,
- expectedPath: null
- });
-
- // Test that a $** index can provide an indexed 'distinct' for distinct-key queries.
- assertWildcardDistinctScan({
- distinctKey: testCase.queryField,
- query: {[testCase.queryField]: {$lt: 3}},
- expectedScanType: (distinctFieldIsMultikey ? "IXSCAN" : "DISTINCT_SCAN"),
- expectedResults: [1, 2],
- expectedPath: testCase.queryField
- });
-
- // Test that a $** index can provide an indexed 'distinct' for a query on another field.
- const offset = Math.floor(testCase.fieldValues.length / 2);
- assertWildcardDistinctScan({
- distinctKey: testCase.queryField,
- query: {b: {$gte: offset}},
- expectedScanType: "IXSCAN",
- expectedResults: distinctValues.slice(offset),
- expectedPath: "b"
- });
-
- // Test that a $** index cannot provide an indexed 'distinct' for object value queries.
- assertWildcardDistinctScan({
- distinctKey: testCase.queryField,
- query: {[testCase.queryField]: {$gte: {c: 5}}},
- expectedScanType: "COLLSCAN",
- expectedResults: [{c: 5, d: 6}, {d: 6, c: 5}, {e: 11}],
- expectedPath: null
- });
-
- // Test that a $** index can provide an indexed 'distinct' for a MinMax query.
- assertWildcardDistinctScan({
- distinctKey: testCase.queryField,
- query: {[testCase.queryField]: {$gte: MinKey, $lte: MaxKey}},
- expectedScanType: "IXSCAN",
- expectedResults: distinctValues,
- expectedPath: testCase.queryField
- });
-
- // Test that a $** index cannot provide an indexed 'distinct' for excluded fields.
- assertWildcardDistinctScan({
- distinctKey: testCase.queryField,
- query: {c: {$lt: 0}},
- pathProjection: {c: 0},
- expectedScanType: "COLLSCAN",
- expectedResults: distinctValues,
- expectedPath: null
- });
+ // Confirm that the $** distinct scan produces the expected results.
+ assertArrayEq(coll.distinct(distinctKey, query), expectedResults);
+ // Confirm that the $** plan adheres to 'fetchIsExpected' and 'expectedScanType'.
+ assert.eq(planHasStage(coll.getDB(), winningPlan, "FETCH"), fetchIsExpected);
+ assert(planHasStage(coll.getDB(), winningPlan, expectedScanType));
+ assert.docEq({$_path: 1, [expectedPath]: 1},
+ getPlanStages(winningPlan, expectedScanType).shift().keyPattern);
+}
+
+// The set of distinct values that should be produced by each of the test listed below.
+const distinctValues = [1, 2, "3", null, {c: 5, d: 6}, {d: 6, c: 5}, {}, 9, 10, {e: 11}];
+
+// Define the set of values that the distinct field may take. The first test case consists
+// entirely of non-multikey fields, while the second includes multikey fields.
+const testCases = [
+ // Non-multikey field values.
+ {
+ insertField: "a",
+ queryField: "a",
+ fieldValues: [1, 2, "3", null, {c: 5, d: 6}, {d: 6, c: 5}, {}, 9, 10, {e: 11}]
+ },
+ // Multikey field values. Note that values within arrays are unwrapped by the distinct
+ // scan, and empty arrays are thus not included.
+ {
+ insertField: "a",
+ queryField: "a",
+ fieldValues: [1, 2, "3", null, {c: 5, d: 6}, {d: 6, c: 5}, {}, [], [9, 10], [{e: 11}]]
+ },
+ // Non-multikey dotted field values.
+ {
+ insertField: "a",
+ queryField: "a.x",
+ fieldValues: [
+ {x: 1},
+ {x: 2},
+ {x: "3"},
+ {x: null},
+ {x: {c: 5, d: 6}},
+ {x: {d: 6, c: 5}},
+ {x: {}},
+ {x: 9},
+ {x: 10},
+ {x: {e: 11}}
+ ]
+ },
+ // Multikey dotted field values.
+ {
+ insertField: "a",
+ queryField: "a.x",
+ fieldValues: [
+ [{x: 1}],
+ [{x: 2}],
+ [{x: "3"}],
+ [{x: null}],
+ [{x: {c: 5, d: 6}}],
+ [{x: {d: 6, c: 5}}],
+ [{x: {}}],
+ [{x: []}],
+ [{x: 9}, {x: 10}],
+ [{x: [{e: 11}]}]
+ ]
}
+];
+
+// Run all combinations of query, no-query, multikey and non-multikey distinct tests.
+for (let testCase of testCases) {
+ // Log the start of the test and create the dataset.
+ jsTestLog("Test case: " + tojson(testCase));
+ insertTestData(testCase.insertField, testCase.fieldValues);
+
+ // Test that a $** index cannot provide an indexed 'distinct' without a query.
+ assertWildcardDistinctScan({
+ distinctKey: testCase.queryField,
+ query: {},
+ expectedScanType: "COLLSCAN",
+ expectedResults: distinctValues,
+ expectedPath: null
+ });
+
+ // Test that a $** index can provide an indexed 'distinct' for distinct-key queries.
+ assertWildcardDistinctScan({
+ distinctKey: testCase.queryField,
+ query: {[testCase.queryField]: {$lt: 3}},
+ expectedScanType: (distinctFieldIsMultikey ? "IXSCAN" : "DISTINCT_SCAN"),
+ expectedResults: [1, 2],
+ expectedPath: testCase.queryField
+ });
+
+ // Test that a $** index can provide an indexed 'distinct' for a query on another field.
+ const offset = Math.floor(testCase.fieldValues.length / 2);
+ assertWildcardDistinctScan({
+ distinctKey: testCase.queryField,
+ query: {b: {$gte: offset}},
+ expectedScanType: "IXSCAN",
+ expectedResults: distinctValues.slice(offset),
+ expectedPath: "b"
+ });
+
+ // Test that a $** index cannot provide an indexed 'distinct' for object value queries.
+ assertWildcardDistinctScan({
+ distinctKey: testCase.queryField,
+ query: {[testCase.queryField]: {$gte: {c: 5}}},
+ expectedScanType: "COLLSCAN",
+ expectedResults: [{c: 5, d: 6}, {d: 6, c: 5}, {e: 11}],
+ expectedPath: null
+ });
+
+ // Test that a $** index can provide an indexed 'distinct' for a MinMax query.
+ assertWildcardDistinctScan({
+ distinctKey: testCase.queryField,
+ query: {[testCase.queryField]: {$gte: MinKey, $lte: MaxKey}},
+ expectedScanType: "IXSCAN",
+ expectedResults: distinctValues,
+ expectedPath: testCase.queryField
+ });
+
+ // Test that a $** index cannot provide an indexed 'distinct' for excluded fields.
+ assertWildcardDistinctScan({
+ distinctKey: testCase.queryField,
+ query: {c: {$lt: 0}},
+ pathProjection: {c: 0},
+ expectedScanType: "COLLSCAN",
+ expectedResults: distinctValues,
+ expectedPath: null
+ });
+}
})();
diff --git a/jstests/core/wildcard_index_empty_arrays.js b/jstests/core/wildcard_index_empty_arrays.js
index 7b5e763bbad..cfea1495a48 100644
--- a/jstests/core/wildcard_index_empty_arrays.js
+++ b/jstests/core/wildcard_index_empty_arrays.js
@@ -2,40 +2,42 @@
* Tests that wildcard indexes will correctly match for empty arrays.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For arrayEq.
+load("jstests/aggregation/extras/utils.js"); // For arrayEq.
- const coll = db.wildcard_empty_arrays;
- coll.drop();
+const coll = db.wildcard_empty_arrays;
+coll.drop();
- const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
+const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
- const indexWildcard = {"$**": 1};
- assert.commandWorked(coll.createIndex(indexWildcard));
+const indexWildcard = {
+ "$**": 1
+};
+assert.commandWorked(coll.createIndex(indexWildcard));
- assert.commandWorked(coll.insert({a: 1, b: 1, c: [], d: {e: [5, 6]}}));
- assert.commandWorked(coll.insert({a: 2, b: 2, c: [1, 2], d: {e: []}}));
- assert.commandWorked(coll.insert({a: 1, b: 2, c: [3, 4], d: {e: [7, 8]}, f: [{g: []}]}));
- assert.commandWorked(coll.insert({a: 2, b: [[]], c: 1, d: 4}));
+assert.commandWorked(coll.insert({a: 1, b: 1, c: [], d: {e: [5, 6]}}));
+assert.commandWorked(coll.insert({a: 2, b: 2, c: [1, 2], d: {e: []}}));
+assert.commandWorked(coll.insert({a: 1, b: 2, c: [3, 4], d: {e: [7, 8]}, f: [{g: []}]}));
+assert.commandWorked(coll.insert({a: 2, b: [[]], c: 1, d: 4}));
- // $** index matches empty array.
- assertArrayEq(coll.find({c: []}, {_id: 0}).hint(indexWildcard).toArray(),
- [{a: 1, b: 1, c: [], d: {e: [5, 6]}}]);
+// $** index matches empty array.
+assertArrayEq(coll.find({c: []}, {_id: 0}).hint(indexWildcard).toArray(),
+ [{a: 1, b: 1, c: [], d: {e: [5, 6]}}]);
- // $** index supports equality to array offset.
- assertArrayEq(coll.find({"c.0": 1}, {_id: 0}).hint(indexWildcard).toArray(),
- [{a: 2, b: 2, c: [1, 2], d: {e: []}}]);
+// $** index supports equality to array offset.
+assertArrayEq(coll.find({"c.0": 1}, {_id: 0}).hint(indexWildcard).toArray(),
+ [{a: 2, b: 2, c: [1, 2], d: {e: []}}]);
- // $** index matches empty array nested in object.
- assertArrayEq(coll.find({"d.e": []}, {_id: 0}).hint(indexWildcard).toArray(),
- [{a: 2, b: 2, c: [1, 2], d: {e: []}}]);
+// $** index matches empty array nested in object.
+assertArrayEq(coll.find({"d.e": []}, {_id: 0}).hint(indexWildcard).toArray(),
+ [{a: 2, b: 2, c: [1, 2], d: {e: []}}]);
- // $** index matches empty array nested within an array of objects.
- assertArrayEq(coll.find({"f.0.g": []}, {_id: 0}).hint(indexWildcard).toArray(),
- [{a: 1, b: 2, c: [3, 4], d: {e: [7, 8]}, f: [{g: []}]}]);
+// $** index matches empty array nested within an array of objects.
+assertArrayEq(coll.find({"f.0.g": []}, {_id: 0}).hint(indexWildcard).toArray(),
+ [{a: 1, b: 2, c: [3, 4], d: {e: [7, 8]}, f: [{g: []}]}]);
- // $** index matches empty array nested within an array.
- assertArrayEq(coll.find({"b": []}, {_id: 0}).hint(indexWildcard).toArray(),
- [{a: 2, b: [[]], c: 1, d: 4}]);
+// $** index matches empty array nested within an array.
+assertArrayEq(coll.find({"b": []}, {_id: 0}).hint(indexWildcard).toArray(),
+ [{a: 2, b: [[]], c: 1, d: 4}]);
})(); \ No newline at end of file
diff --git a/jstests/core/wildcard_index_equality_to_empty_obj.js b/jstests/core/wildcard_index_equality_to_empty_obj.js
index 28e99534147..c6801bcdcb6 100644
--- a/jstests/core/wildcard_index_equality_to_empty_obj.js
+++ b/jstests/core/wildcard_index_equality_to_empty_obj.js
@@ -2,75 +2,73 @@
* Tests that a $** index can support queries which test for equality to empty nested objects.
*/
(function() {
- "use strict";
+"use strict";
- const coll = db.wildcard_index_equality_to_empty_obj;
- coll.drop();
+const coll = db.wildcard_index_equality_to_empty_obj;
+coll.drop();
- assert.commandWorked(coll.insert([
- {_id: 0},
- {_id: 1, a: null},
- {_id: 2, a: []},
- {_id: 3, a: {}},
- {_id: 4, a: [{}]},
- {_id: 5, a: [[{}]]},
- {_id: 6, a: [1, 2, {}]},
- {_id: 7, a: {b: 1}},
- {_id: 8, a: 3},
- {_id: 9, a: {b: {}}},
- {_id: 10, a: [0, {b: {}}]},
- ]));
+assert.commandWorked(coll.insert([
+ {_id: 0},
+ {_id: 1, a: null},
+ {_id: 2, a: []},
+ {_id: 3, a: {}},
+ {_id: 4, a: [{}]},
+ {_id: 5, a: [[{}]]},
+ {_id: 6, a: [1, 2, {}]},
+ {_id: 7, a: {b: 1}},
+ {_id: 8, a: 3},
+ {_id: 9, a: {b: {}}},
+ {_id: 10, a: [0, {b: {}}]},
+]));
- assert.commandWorked(coll.createIndex({"$**": 1}));
+assert.commandWorked(coll.createIndex({"$**": 1}));
- // Test that a comparison to empty object query returns the expected results when the $** index
- // is hinted.
- let results = coll.find({a: {}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray();
- assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}]);
+// Test that a comparison to empty object query returns the expected results when the $** index
+// is hinted.
+let results = coll.find({a: {}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray();
+assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}]);
- // Result set should be the same as when hinting a COLLSCAN and with no hint.
- assert.eq(results, coll.find({a: {}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray());
- assert.eq(results, coll.find({a: {}}, {_id: 1}).sort({_id: 1}).toArray());
+// Result set should be the same as when hinting a COLLSCAN and with no hint.
+assert.eq(results, coll.find({a: {}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray());
+assert.eq(results, coll.find({a: {}}, {_id: 1}).sort({_id: 1}).toArray());
- // Repeat the above query, but express it using $lte:{}, which is a synonym for $eq:{}.
- results = coll.find({a: {$lte: {}}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray();
- assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}]);
- assert.eq(results,
- coll.find({a: {$lte: {}}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray());
- assert.eq(results, coll.find({a: {$lte: {}}}, {_id: 1}).sort({_id: 1}).toArray());
+// Repeat the above query, but express it using $lte:{}, which is a synonym for $eq:{}.
+results = coll.find({a: {$lte: {}}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray();
+assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}]);
+assert.eq(results,
+ coll.find({a: {$lte: {}}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray());
+assert.eq(results, coll.find({a: {$lte: {}}}, {_id: 1}).sort({_id: 1}).toArray());
- // Test that an inequality to empty object query results in an error when the $** index is
- // hinted.
- assert.throws(
- () => coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray());
+// Test that an inequality to empty object query results in an error when the $** index is
+// hinted.
+assert.throws(() => coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray());
- // Test that an inequality to empty object query returns the expected results in the presence of
- // the $** index.
- results = coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).toArray();
- assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}, {_id: 7}, {_id: 9}, {_id: 10}]);
+// Test that an inequality to empty object query returns the expected results in the presence of
+// the $** index.
+results = coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).toArray();
+assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}, {_id: 7}, {_id: 9}, {_id: 10}]);
- // Result set should be the same as when hinting a COLLSCAN and with no hint.
- assert.eq(results,
- coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray());
- assert.eq(results, coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).toArray());
+// Result set should be the same as when hinting a COLLSCAN and with no hint.
+assert.eq(results,
+ coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray());
+assert.eq(results, coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).toArray());
- // Test that an $in with an empty object returns the expected results when the $** index is
- // hinted.
- results = coll.find({a: {$in: [3, {}]}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray();
- assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}, {_id: 8}]);
+// Test that an $in with an empty object returns the expected results when the $** index is
+// hinted.
+results = coll.find({a: {$in: [3, {}]}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray();
+assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}, {_id: 8}]);
- // Result set should be the same as when hinting a COLLSCAN and with no hint.
- assert.eq(
- results,
- coll.find({a: {$in: [3, {}]}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray());
- assert.eq(results, coll.find({a: {$in: [3, {}]}}, {_id: 1}).sort({_id: 1}).toArray());
+// Result set should be the same as when hinting a COLLSCAN and with no hint.
+assert.eq(results,
+ coll.find({a: {$in: [3, {}]}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray());
+assert.eq(results, coll.find({a: {$in: [3, {}]}}, {_id: 1}).sort({_id: 1}).toArray());
- // Test that a wildcard index can support equality to an empty object on a dotted field.
- results = coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray();
- assert.eq(results, [{_id: 9}, {_id: 10}]);
+// Test that a wildcard index can support equality to an empty object on a dotted field.
+results = coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray();
+assert.eq(results, [{_id: 9}, {_id: 10}]);
- // Result set should be the same as when hinting a COLLSCAN and with no hint.
- assert.eq(results,
- coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray());
- assert.eq(results, coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).toArray());
+// Result set should be the same as when hinting a COLLSCAN and with no hint.
+assert.eq(results,
+ coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray());
+assert.eq(results, coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).toArray());
}());
diff --git a/jstests/core/wildcard_index_filter.js b/jstests/core/wildcard_index_filter.js
index 74c81edf462..fc1f1efdc6f 100644
--- a/jstests/core/wildcard_index_filter.js
+++ b/jstests/core/wildcard_index_filter.js
@@ -6,88 +6,95 @@
* @tags: [does_not_support_stepdowns]
*/
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js");
-
- const coll = db.wildcard_index_filter;
-
- // Utility function to list index filters.
- function getFilters() {
- const res = assert.commandWorked(coll.runCommand('planCacheListFilters'));
- assert(res.hasOwnProperty('filters'), 'filters missing from planCacheListFilters result');
- return res.filters;
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+
+const coll = db.wildcard_index_filter;
+
+// Utility function to list index filters.
+function getFilters() {
+ const res = assert.commandWorked(coll.runCommand('planCacheListFilters'));
+ assert(res.hasOwnProperty('filters'), 'filters missing from planCacheListFilters result');
+ return res.filters;
+}
+
+// Sets an index filter given a query shape then confirms that the expected index was used to
+// answer a query.
+function assertExpectedIndexAnswersQueryWithFilter(
+ filterQuery, filterIndexes, query, expectedIndexName, hint) {
+ // Clear existing cache filters.
+ assert.commandWorked(coll.runCommand('planCacheClearFilters'), 'planCacheClearFilters failed');
+
+ // Make sure that the filter is set correctly.
+ assert.commandWorked(
+ coll.runCommand('planCacheSetFilter', {query: filterQuery, indexes: filterIndexes}));
+ assert.eq(1,
+ getFilters().length,
+ 'no change in query settings after successfully setting index filters');
+
+ // Check that expectedIndex index was used over another index.
+ let explain;
+ if (hint == undefined) {
+ explain = assert.commandWorked(coll.explain("executionStats").find(query).finish());
+ } else {
+ explain =
+ assert.commandWorked(coll.explain("executionStats").find(query).hint(hint).finish());
}
- // Sets an index filter given a query shape then confirms that the expected index was used to
- // answer a query.
- function assertExpectedIndexAnswersQueryWithFilter(
- filterQuery, filterIndexes, query, expectedIndexName, hint) {
- // Clear existing cache filters.
- assert.commandWorked(coll.runCommand('planCacheClearFilters'),
- 'planCacheClearFilters failed');
-
- // Make sure that the filter is set correctly.
- assert.commandWorked(
- coll.runCommand('planCacheSetFilter', {query: filterQuery, indexes: filterIndexes}));
- assert.eq(1,
- getFilters().length,
- 'no change in query settings after successfully setting index filters');
-
- // Check that expectedIndex index was used over another index.
- let explain;
- if (hint == undefined) {
- explain = assert.commandWorked(coll.explain("executionStats").find(query).finish());
- } else {
- explain = assert.commandWorked(
- coll.explain("executionStats").find(query).hint(hint).finish());
- }
-
- const executionStages = getExecutionStages(explain).shift();
- let planStage = getPlanStage(executionStages, 'IXSCAN');
- assert.neq(null, planStage);
- assert.eq(planStage.indexName, expectedIndexName, tojson(planStage));
- }
+ const executionStages = getExecutionStages(explain).shift();
+ let planStage = getPlanStage(executionStages, 'IXSCAN');
+ assert.neq(null, planStage);
+ assert.eq(planStage.indexName, expectedIndexName, tojson(planStage));
+}
- const indexWildcard = {"$**": 1};
- const indexA = {"a": 1};
- assert.commandWorked(coll.createIndex(indexWildcard));
- assert.commandWorked(coll.createIndex(indexA));
+const indexWildcard = {
+ "$**": 1
+};
+const indexA = {
+ "a": 1
+};
+assert.commandWorked(coll.createIndex(indexWildcard));
+assert.commandWorked(coll.createIndex(indexA));
- assert.commandWorked(coll.insert({a: "a"}));
+assert.commandWorked(coll.insert({a: "a"}));
- // Filtering on $** index. $** index is used over another index.
- assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexWildcard], {a: "a"}, "$**_1");
+// Filtering on $** index. $** index is used over another index.
+assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexWildcard], {a: "a"}, "$**_1");
- // Filtering on regular index. $** index is not used over another index.
- assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexA], {a: "a"}, "a_1");
+// Filtering on regular index. $** index is not used over another index.
+assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexA], {a: "a"}, "a_1");
- assert.commandWorked(coll.insert({a: "a", b: "b"}));
+assert.commandWorked(coll.insert({a: "a", b: "b"}));
- const indexAB = {"a": 1, "b": 1};
- assert.commandWorked(coll.createIndex(indexAB));
+const indexAB = {
+ "a": 1,
+ "b": 1
+};
+assert.commandWorked(coll.createIndex(indexAB));
- // Filtering on $** index. $** index is used over another index for compound query.
- assertExpectedIndexAnswersQueryWithFilter(
- {a: "a", b: "b"}, [indexWildcard], {a: "a", b: "b"}, "$**_1");
+// Filtering on $** index. $** index is used over another index for compound query.
+assertExpectedIndexAnswersQueryWithFilter(
+ {a: "a", b: "b"}, [indexWildcard], {a: "a", b: "b"}, "$**_1");
- // Filtering on regular compound index. Check that $** index is not used over another index
- // for compound query.
- assertExpectedIndexAnswersQueryWithFilter(
- {a: "a", b: "b"}, [indexAB], {a: "a", b: "b"}, "a_1_b_1");
+// Filtering on regular compound index. Check that $** index is not used over another index
+// for compound query.
+assertExpectedIndexAnswersQueryWithFilter({a: "a", b: "b"}, [indexAB], {a: "a", b: "b"}, "a_1_b_1");
- // Filtering on $** index while hinting on another index. Index filter is prioritized.
- assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexWildcard], {a: "a"}, "$**_1", indexA);
+// Filtering on $** index while hinting on another index. Index filter is prioritized.
+assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexWildcard], {a: "a"}, "$**_1", indexA);
- // Filtering on regular index while hinting on $** index. Index filter is prioritized.
- assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexA], {a: "a"}, "a_1", indexWildcard);
+// Filtering on regular index while hinting on $** index. Index filter is prioritized.
+assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexA], {a: "a"}, "a_1", indexWildcard);
- // Index filter for $** index does not apply when query does not match filter query shape.
- assertExpectedIndexAnswersQueryWithFilter({b: "b"}, [indexWildcard], {a: "a"}, "a_1", indexA);
+// Index filter for $** index does not apply when query does not match filter query shape.
+assertExpectedIndexAnswersQueryWithFilter({b: "b"}, [indexWildcard], {a: "a"}, "a_1", indexA);
- const indexAWildcard = {"a.$**": 1};
- assert.commandWorked(coll.createIndex(indexAWildcard));
+const indexAWildcard = {
+ "a.$**": 1
+};
+assert.commandWorked(coll.createIndex(indexAWildcard));
- // Filtering on a path specified $** index. Check that the $** is used over other indices.
- assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexAWildcard], {a: "a"}, "a.$**_1");
+// Filtering on a path specified $** index. Check that the $** is used over other indices.
+assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexAWildcard], {a: "a"}, "a.$**_1");
})(); \ No newline at end of file
diff --git a/jstests/core/wildcard_index_hint.js b/jstests/core/wildcard_index_hint.js
index 3f1ac41b42f..f20e2b238c1 100644
--- a/jstests/core/wildcard_index_hint.js
+++ b/jstests/core/wildcard_index_hint.js
@@ -2,104 +2,96 @@
* Tests that $** indexes obey hinting.
*/
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For arrayEq.
- load("jstests/libs/analyze_plan.js"); // For getPlanStages.
-
- const coll = db.wildcard_hint;
- coll.drop();
-
- const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
-
- // Extracts the winning plan for the given query and hint from the explain output.
- const winningPlan = (query, hint) =>
- assert.commandWorked(coll.find(query).hint(hint).explain()).queryPlanner.winningPlan;
-
- // Runs the given query and confirms that:
- // (1) the expected index was used to answer the query, and
- // (2) the results produced by the index match the given 'expectedResults'.
- function assertExpectedIndexAnswersQueryWithHint(
- query, hint, expectedIndexName, expectedResults) {
- const ixScans = getPlanStages(winningPlan(query, hint), "IXSCAN");
- assert.gt(ixScans.length, 0, tojson(coll.find(query).hint(hint).explain()));
- ixScans.forEach((ixScan) => assert.eq(ixScan.indexName, expectedIndexName));
-
- const wildcardResults = coll.find(query, {_id: 0}).hint(hint).toArray();
- assertArrayEq(wildcardResults, expectedResults);
- }
-
- assert.commandWorked(db.createCollection(coll.getName()));
-
- // Check that error is thrown if the hinted index doesn't exist.
- assert.commandFailedWithCode(
- db.runCommand({find: coll.getName(), filter: {"a": 1}, hint: {"$**": 1}}),
- ErrorCodes.BadValue);
-
- assert.commandWorked(coll.createIndex({"$**": 1}));
-
- assert.commandWorked(coll.insert({_id: 10, a: 1, b: 1, c: {d: 1, e: 1}}));
- assert.commandWorked(coll.insert({a: 1, b: 2, c: {d: 2, e: 2}}));
- assert.commandWorked(coll.insert({a: 2, b: 2, c: {d: 1, e: 2}}));
- assert.commandWorked(coll.insert({a: 2, b: 1, c: {d: 2, e: 2}}));
- assert.commandWorked(coll.insert({a: 2, b: 2, c: {e: 2}}));
-
- // Hint a $** index without a competing index.
- assertExpectedIndexAnswersQueryWithHint(
- {"a": 1},
- {"$**": 1},
- "$**_1",
- [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]);
-
- assert.commandWorked(coll.createIndex({"a": 1}));
-
- // Hint a $** index with a competing index.
- assertExpectedIndexAnswersQueryWithHint(
- {"a": 1},
- {"$**": 1},
- "$**_1",
- [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]);
-
- // Hint a $** index with a competing _id index.
- assertExpectedIndexAnswersQueryWithHint(
- {"a": 1, "_id": 10}, {"$**": 1}, "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}]);
-
- // Hint a regular index with a competing $** index.
- assertExpectedIndexAnswersQueryWithHint(
- {"a": 1}, {"a": 1}, "a_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]);
-
- // Query on fields that not all documents in the collection have with $** index hint.
- assertExpectedIndexAnswersQueryWithHint(
- {"c.d": 1},
- {"$**": 1},
- "$**_1",
- [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 2, b: 2, c: {d: 1, e: 2}}]);
-
- // Adding another wildcard index with a path specified.
- assert.commandWorked(coll.createIndex({"c.$**": 1}));
-
- // Hint on path that is not in query argument.
- assert.commandFailedWithCode(
- db.runCommand({find: coll.getName(), filter: {"a": 1}, hint: {"c.$**": 1}}),
- ErrorCodes.BadValue);
-
- // Hint on a path specified $** index.
- assertExpectedIndexAnswersQueryWithHint(
- {"c.d": 1},
- {"c.$**": 1},
- "c.$**_1",
- [{a: 2, b: 2, c: {d: 1, e: 2}}, {a: 1, b: 1, c: {d: 1, e: 1}}]);
-
- // Min/max with $** index hint.
- assert.commandFailedWithCode(
- db.runCommand({find: coll.getName(), filter: {"b": 1}, min: {"a": 1}, hint: {"$**": 1}}),
- 51174);
-
- // Hint a $** index on a query with compound fields.
- assertExpectedIndexAnswersQueryWithHint(
- {"a": 1, "c.e": 1}, {"$**": 1}, "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}]);
-
- // Hint a $** index by name.
- assertExpectedIndexAnswersQueryWithHint(
- {"a": 1}, "$**_1", "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]);
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For arrayEq.
+load("jstests/libs/analyze_plan.js"); // For getPlanStages.
+
+const coll = db.wildcard_hint;
+coll.drop();
+
+const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
+
+// Extracts the winning plan for the given query and hint from the explain output.
+const winningPlan = (query, hint) =>
+ assert.commandWorked(coll.find(query).hint(hint).explain()).queryPlanner.winningPlan;
+
+// Runs the given query and confirms that:
+// (1) the expected index was used to answer the query, and
+// (2) the results produced by the index match the given 'expectedResults'.
+function assertExpectedIndexAnswersQueryWithHint(query, hint, expectedIndexName, expectedResults) {
+ const ixScans = getPlanStages(winningPlan(query, hint), "IXSCAN");
+ assert.gt(ixScans.length, 0, tojson(coll.find(query).hint(hint).explain()));
+ ixScans.forEach((ixScan) => assert.eq(ixScan.indexName, expectedIndexName));
+
+ const wildcardResults = coll.find(query, {_id: 0}).hint(hint).toArray();
+ assertArrayEq(wildcardResults, expectedResults);
+}
+
+assert.commandWorked(db.createCollection(coll.getName()));
+
+// Check that error is thrown if the hinted index doesn't exist.
+assert.commandFailedWithCode(
+ db.runCommand({find: coll.getName(), filter: {"a": 1}, hint: {"$**": 1}}), ErrorCodes.BadValue);
+
+assert.commandWorked(coll.createIndex({"$**": 1}));
+
+assert.commandWorked(coll.insert({_id: 10, a: 1, b: 1, c: {d: 1, e: 1}}));
+assert.commandWorked(coll.insert({a: 1, b: 2, c: {d: 2, e: 2}}));
+assert.commandWorked(coll.insert({a: 2, b: 2, c: {d: 1, e: 2}}));
+assert.commandWorked(coll.insert({a: 2, b: 1, c: {d: 2, e: 2}}));
+assert.commandWorked(coll.insert({a: 2, b: 2, c: {e: 2}}));
+
+// Hint a $** index without a competing index.
+assertExpectedIndexAnswersQueryWithHint(
+ {"a": 1}, {"$**": 1}, "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]);
+
+assert.commandWorked(coll.createIndex({"a": 1}));
+
+// Hint a $** index with a competing index.
+assertExpectedIndexAnswersQueryWithHint(
+ {"a": 1}, {"$**": 1}, "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]);
+
+// Hint a $** index with a competing _id index.
+assertExpectedIndexAnswersQueryWithHint(
+ {"a": 1, "_id": 10}, {"$**": 1}, "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}]);
+
+// Hint a regular index with a competing $** index.
+assertExpectedIndexAnswersQueryWithHint(
+ {"a": 1}, {"a": 1}, "a_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]);
+
+// Query on fields that not all documents in the collection have with $** index hint.
+assertExpectedIndexAnswersQueryWithHint(
+ {"c.d": 1},
+ {"$**": 1},
+ "$**_1",
+ [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 2, b: 2, c: {d: 1, e: 2}}]);
+
+// Adding another wildcard index with a path specified.
+assert.commandWorked(coll.createIndex({"c.$**": 1}));
+
+// Hint on path that is not in query argument.
+assert.commandFailedWithCode(
+ db.runCommand({find: coll.getName(), filter: {"a": 1}, hint: {"c.$**": 1}}),
+ ErrorCodes.BadValue);
+
+// Hint on a path specified $** index.
+assertExpectedIndexAnswersQueryWithHint(
+ {"c.d": 1},
+ {"c.$**": 1},
+ "c.$**_1",
+ [{a: 2, b: 2, c: {d: 1, e: 2}}, {a: 1, b: 1, c: {d: 1, e: 1}}]);
+
+// Min/max with $** index hint.
+assert.commandFailedWithCode(
+ db.runCommand({find: coll.getName(), filter: {"b": 1}, min: {"a": 1}, hint: {"$**": 1}}),
+ 51174);
+
+// Hint a $** index on a query with compound fields.
+assertExpectedIndexAnswersQueryWithHint(
+ {"a": 1, "c.e": 1}, {"$**": 1}, "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}]);
+
+// Hint a $** index by name.
+assertExpectedIndexAnswersQueryWithHint(
+ {"a": 1}, "$**_1", "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]);
})();
diff --git a/jstests/core/wildcard_index_minmax.js b/jstests/core/wildcard_index_minmax.js
index b13d2c81b94..f7baf8a0713 100644
--- a/jstests/core/wildcard_index_minmax.js
+++ b/jstests/core/wildcard_index_minmax.js
@@ -2,77 +2,73 @@
* Tests that min/max is not supported for wildcard index.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For arrayEq.
+load("jstests/aggregation/extras/utils.js"); // For arrayEq.
- const coll = db.wildcard_index_minmax;
- coll.drop();
+const coll = db.wildcard_index_minmax;
+coll.drop();
- const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
+const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
- assert.commandWorked(coll.insert({a: 1, b: 1}));
- assert.commandWorked(coll.insert({a: 1, b: 2}));
- assert.commandWorked(coll.insert({a: 2, b: 1}));
- assert.commandWorked(coll.insert({a: 2, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 2, b: 1}));
+assert.commandWorked(coll.insert({a: 2, b: 2}));
- assert.commandWorked(coll.createIndex({"$**": 1}));
- assert.commandWorked(coll.createIndex({"a": 1}));
+assert.commandWorked(coll.createIndex({"$**": 1}));
+assert.commandWorked(coll.createIndex({"a": 1}));
- // Throws error for $** index min.
- assert.commandFailedWithCode(
- db.runCommand({find: coll.getName(), min: {"a": 0.5}, hint: {"$**": 1}}), 51174);
+// Throws error for $** index min.
+assert.commandFailedWithCode(
+ db.runCommand({find: coll.getName(), min: {"a": 0.5}, hint: {"$**": 1}}), 51174);
- // Throws error for $** index max.
- assert.commandFailedWithCode(
- db.runCommand({find: coll.getName(), max: {"a": 1.5}, hint: {"$**": 1}}), 51174);
+// Throws error for $** index max.
+assert.commandFailedWithCode(
+ db.runCommand({find: coll.getName(), max: {"a": 1.5}, hint: {"$**": 1}}), 51174);
- // Throws error for $** index min/max.
- assert.commandFailedWithCode(
- db.runCommand({find: coll.getName(), min: {"a": 0.5}, max: {"a": 1.5}, hint: {"$**": 1}}),
- 51174);
+// Throws error for $** index min/max.
+assert.commandFailedWithCode(
+ db.runCommand({find: coll.getName(), min: {"a": 0.5}, max: {"a": 1.5}, hint: {"$**": 1}}),
+ 51174);
- // Throws error for $** index min with filter of a different value.
- assert.commandFailedWithCode(
- db.runCommand({find: coll.getName(), filter: {"a": 2}, min: {"a": 1}, hint: {"$**": 1}}),
- 51174);
+// Throws error for $** index min with filter of a different value.
+assert.commandFailedWithCode(
+ db.runCommand({find: coll.getName(), filter: {"a": 2}, min: {"a": 1}, hint: {"$**": 1}}),
+ 51174);
- // Throws error for $** index max with filter of a different value.
- assert.commandFailedWithCode(
- db.runCommand({find: coll.getName(), filter: {"a": 1}, max: {"a": 1.5}, hint: {"$**": 1}}),
- 51174);
+// Throws error for $** index max with filter of a different value.
+assert.commandFailedWithCode(
+ db.runCommand({find: coll.getName(), filter: {"a": 1}, max: {"a": 1.5}, hint: {"$**": 1}}),
+ 51174);
- // Throws error for $** index min and max with filter of a different value.
- assert.commandFailedWithCode(db.runCommand({
- find: coll.getName(),
- filter: {"a": 1},
- min: {"a": 0.5},
- max: {"a": 1.5},
- hint: {"$**": 1}
- }),
- 51174);
+// Throws error for $** index min and max with filter of a different value.
+assert.commandFailedWithCode(db.runCommand({
+ find: coll.getName(),
+ filter: {"a": 1},
+ min: {"a": 0.5},
+ max: {"a": 1.5},
+ hint: {"$**": 1}
+}),
+ 51174);
- // Throws error for $** index min with filter of the same value.
- assert.commandFailedWithCode(
- db.runCommand({find: coll.getName(), filter: {"a": 1}, min: {"a": 1}, hint: {"$**": 1}}),
- 51174);
+// Throws error for $** index min with filter of the same value.
+assert.commandFailedWithCode(
+ db.runCommand({find: coll.getName(), filter: {"a": 1}, min: {"a": 1}, hint: {"$**": 1}}),
+ 51174);
- // Throws error for $** index max with filter of the same value.
- assert.commandFailedWithCode(
- db.runCommand({find: coll.getName(), filter: {"a": 1}, max: {"a": 1}, hint: {"$**": 1}}),
- 51174);
+// Throws error for $** index max with filter of the same value.
+assert.commandFailedWithCode(
+ db.runCommand({find: coll.getName(), filter: {"a": 1}, max: {"a": 1}, hint: {"$**": 1}}),
+ 51174);
- // Throws error for $** index min and max with filter of the same value.
- assert.commandFailedWithCode(db.runCommand({
- find: coll.getName(),
- filter: {"a": 1},
- min: {"a": 1},
- max: {"a": 1},
- hint: {"$**": 1}
- }),
- 51174);
+// Throws error for $** index min and max with filter of the same value.
+assert.commandFailedWithCode(
+ db.runCommand(
+ {find: coll.getName(), filter: {"a": 1}, min: {"a": 1}, max: {"a": 1}, hint: {"$**": 1}}),
+ 51174);
- // $** index does not interfere with valid min/max.
- assertArrayEq(coll.find({}, {_id: 0}).min({"a": 0.5}).max({"a": 1.5}).hint({a: 1}).toArray(),
- [{a: 1, b: 1}, {a: 1, b: 2}]);
+// $** index does not interfere with valid min/max.
+assertArrayEq(coll.find({}, {_id: 0}).min({"a": 0.5}).max({"a": 1.5}).hint({a: 1}).toArray(),
+ [{a: 1, b: 1}, {a: 1, b: 2}]);
})();
diff --git a/jstests/core/wildcard_index_multikey.js b/jstests/core/wildcard_index_multikey.js
index 039c5176ff4..ce6a7151ad1 100644
--- a/jstests/core/wildcard_index_multikey.js
+++ b/jstests/core/wildcard_index_multikey.js
@@ -3,265 +3,266 @@
* @tags: [assumes_balancer_off]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For arrayEq.
- load("jstests/libs/analyze_plan.js"); // For getPlanStages.
+load("jstests/aggregation/extras/utils.js"); // For arrayEq.
+load("jstests/libs/analyze_plan.js"); // For getPlanStages.
- const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
+const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
- const coll = db.wildcard_multikey_index;
- coll.drop();
+const coll = db.wildcard_multikey_index;
+coll.drop();
- // Template document which defines the 'schema' of the documents in the test collection.
- const templateDoc = {a: [], b: {c: [], d: [{e: 0}]}};
- const pathList = ["a", "b.c", "b.d.e"];
+// Template document which defines the 'schema' of the documents in the test collection.
+const templateDoc = {
+ a: [],
+ b: {c: [], d: [{e: 0}]}
+};
+const pathList = ["a", "b.c", "b.d.e"];
- // Insert a set of documents into the collection, based on the template document and populated
- // with an increasing sequence of values. This is to ensure that the range of values present for
- // each field in the dataset is not entirely homogeneous.
- for (let i = 0; i < 50; i++) {
- (function populateDoc(doc, value) {
- for (let key in doc) {
- if (typeof doc[key] === "object") {
- if (Array.isArray(doc[key])) {
- if (typeof doc[key][0] === "object") {
- value = populateDoc(doc[key][0], value);
- } else {
- doc[key] = [++value, ++value];
- }
+// Insert a set of documents into the collection, based on the template document and populated
+// with an increasing sequence of values. This is to ensure that the range of values present for
+// each field in the dataset is not entirely homogeneous.
+for (let i = 0; i < 50; i++) {
+ (function populateDoc(doc, value) {
+ for (let key in doc) {
+ if (typeof doc[key] === "object") {
+ if (Array.isArray(doc[key])) {
+ if (typeof doc[key][0] === "object") {
+ value = populateDoc(doc[key][0], value);
} else {
- value = populateDoc(doc[key], value);
+ doc[key] = [++value, ++value];
}
} else {
- doc[key] = ++value;
+ value = populateDoc(doc[key], value);
}
+ } else {
+ doc[key] = ++value;
}
- return value;
- })(templateDoc, i);
- assert.commandWorked(coll.insert(templateDoc));
- }
+ }
+ return value;
+ })(templateDoc, i);
+ assert.commandWorked(coll.insert(templateDoc));
+}
- // Set of operations which will be applied to each field in the index in turn.
- const operationList = [
- {expression: {$gte: 10}},
- {expression: {$gt: 10}},
- {expression: {$lt: 40}},
- {expression: {$lte: 40}},
- {expression: {$gt: 10, $lt: 40}},
- {expression: {$eq: 25}},
- {expression: {$in: [5, 15, 35, 40]}},
- {expression: {$elemMatch: {$gte: 10, $lte: 40}}},
- ];
+// Set of operations which will be applied to each field in the index in turn.
+const operationList = [
+ {expression: {$gte: 10}},
+ {expression: {$gt: 10}},
+ {expression: {$lt: 40}},
+ {expression: {$lte: 40}},
+ {expression: {$gt: 10, $lt: 40}},
+ {expression: {$eq: 25}},
+ {expression: {$in: [5, 15, 35, 40]}},
+ {expression: {$elemMatch: {$gte: 10, $lte: 40}}},
+];
- // Given a keyPattern and (optional) pathProjection, this function builds a $** index on the
- // collection and then tests each of the match expression in the 'operationList' on each indexed
- // field in turn. The 'expectedPaths' argument lists the set of paths which we expect to have
- // been indexed based on the spec; this function will confirm that only the appropriate paths
- // are present in the $** index.
- function runWildcardIndexTest(keyPattern, pathProjection, expectedPaths) {
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.createIndex(
- keyPattern, pathProjection ? {wildcardProjection: pathProjection} : {}));
- assert(expectedPaths);
- // Verify the expected behaviour for every combination of path and operator.
- for (let op of operationList) {
- for (let path of pathList) {
- const query = {[path]: op.expression};
- assertWildcardQuery(query, expectedPaths.includes(path) ? path : null);
- }
+// Given a keyPattern and (optional) pathProjection, this function builds a $** index on the
+// collection and then tests each of the match expression in the 'operationList' on each indexed
+// field in turn. The 'expectedPaths' argument lists the set of paths which we expect to have
+// been indexed based on the spec; this function will confirm that only the appropriate paths
+// are present in the $** index.
+function runWildcardIndexTest(keyPattern, pathProjection, expectedPaths) {
+ assert.commandWorked(coll.dropIndexes());
+ assert.commandWorked(
+ coll.createIndex(keyPattern, pathProjection ? {wildcardProjection: pathProjection} : {}));
+ assert(expectedPaths);
+ // Verify the expected behaviour for every combination of path and operator.
+ for (let op of operationList) {
+ for (let path of pathList) {
+ const query = {[path]: op.expression};
+ assertWildcardQuery(query, expectedPaths.includes(path) ? path : null);
}
}
+}
- // Runs a single wildcard query test. If 'expectedPath' is non-null, verifies that there is an
- // indexed solution that uses the $** index with the given path string. If 'expectedPath' is
- // null, verifies that no indexed solution was found. If 'explainStats' is non-empty, verifies
- // that the query's explain output reflects the given stats.
- function assertWildcardQuery(query, expectedPath, explainStats = {}) {
- // Explain the query, and determine whether an indexed solution is available.
- const explainOutput = coll.find(query).explain("executionStats");
- // If we expect the current path to have been excluded based on the $** keyPattern
- // or projection, confirm that no indexed solution was found.
- if (!expectedPath) {
- assert.gt(getPlanStages(explainOutput.queryPlanner.winningPlan, "COLLSCAN").length, 0);
- return;
- }
- // Verify that the winning plan uses the $** index with the expected path.
- const ixScans = getPlanStages(explainOutput.queryPlanner.winningPlan, "IXSCAN");
- assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
- assert.docEq(ixScans[0].keyPattern, {"$_path": 1, [expectedPath]: 1});
- // Verify that the results obtained from the $** index are identical to a COLLSCAN.
- assertArrayEq(coll.find(query).toArray(), coll.find(query).hint({$natural: 1}).toArray());
- // Verify that the explain output reflects the given 'explainStats'.
- for (let stat in explainStats) {
- assert.eq(explainStats[stat],
- stat.split('.').reduce((obj, i) => obj[i], explainOutput),
- explainOutput);
- }
+// Runs a single wildcard query test. If 'expectedPath' is non-null, verifies that there is an
+// indexed solution that uses the $** index with the given path string. If 'expectedPath' is
+// null, verifies that no indexed solution was found. If 'explainStats' is non-empty, verifies
+// that the query's explain output reflects the given stats.
+function assertWildcardQuery(query, expectedPath, explainStats = {}) {
+ // Explain the query, and determine whether an indexed solution is available.
+ const explainOutput = coll.find(query).explain("executionStats");
+ // If we expect the current path to have been excluded based on the $** keyPattern
+ // or projection, confirm that no indexed solution was found.
+ if (!expectedPath) {
+ assert.gt(getPlanStages(explainOutput.queryPlanner.winningPlan, "COLLSCAN").length, 0);
+ return;
}
+ // Verify that the winning plan uses the $** index with the expected path.
+ const ixScans = getPlanStages(explainOutput.queryPlanner.winningPlan, "IXSCAN");
+ assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
+ assert.docEq(ixScans[0].keyPattern, {"$_path": 1, [expectedPath]: 1});
+ // Verify that the results obtained from the $** index are identical to a COLLSCAN.
+ assertArrayEq(coll.find(query).toArray(), coll.find(query).hint({$natural: 1}).toArray());
+ // Verify that the explain output reflects the given 'explainStats'.
+ for (let stat in explainStats) {
+ assert.eq(explainStats[stat],
+ stat.split('.').reduce((obj, i) => obj[i], explainOutput),
+ explainOutput);
+ }
+}
- // Test a $** index that indexes the entire document.
- runWildcardIndexTest({'$**': 1}, null, ['a', 'b.c', 'b.d.e']);
- // Test a $** index on a single subtree.
- runWildcardIndexTest({'a.$**': 1}, null, ['a']);
- runWildcardIndexTest({'b.$**': 1}, null, ['b.c', 'b.d.e']);
- runWildcardIndexTest({'b.c.$**': 1}, null, ['b.c']);
- runWildcardIndexTest({'b.d.$**': 1}, null, ['b.d.e']);
- // Test a $** index which includes a subset of paths.
- runWildcardIndexTest({'$**': 1}, {a: 1}, ['a']);
- runWildcardIndexTest({'$**': 1}, {b: 1}, ['b.c', 'b.d.e']);
- runWildcardIndexTest({'$**': 1}, {'b.d': 1}, ['b.d.e']);
- runWildcardIndexTest({'$**': 1}, {a: 1, 'b.d': 1}, ['a', 'b.d.e']);
- // Test a $** index which excludes a subset of paths.
- runWildcardIndexTest({'$**': 1}, {a: 0}, ['b.c', 'b.d.e']);
- runWildcardIndexTest({'$**': 1}, {b: 0}, ['a']);
- runWildcardIndexTest({'$**': 1}, {'b.c': 0}, ['a', 'b.d.e']);
- runWildcardIndexTest({'$**': 1}, {a: 0, 'b.c': 0}, ['b.d.e']);
-
- // Sanity check that a few queries which need to be planned specially in the multikey case
- // return the correct results.
- coll.drop();
- assert.commandWorked(coll.createIndex({"$**": 1}));
- assert.commandWorked(coll.insert({a: [-5, 15]}));
- assert.eq(1, coll.find({a: {$gt: 0, $lt: 9}}).itcount());
- assert.eq(1, coll.find({a: {$gt: 0, $lt: 9}}).hint({$natural: 1}).itcount());
- assert.eq(0, coll.find({a: {$elemMatch: {$gt: 0, $lt: 9}}}).itcount());
- assert.eq(0, coll.find({a: {$elemMatch: {$gt: 0, $lt: 9}}}).hint({$natural: 1}).itcount());
+// Test a $** index that indexes the entire document.
+runWildcardIndexTest({'$**': 1}, null, ['a', 'b.c', 'b.d.e']);
+// Test a $** index on a single subtree.
+runWildcardIndexTest({'a.$**': 1}, null, ['a']);
+runWildcardIndexTest({'b.$**': 1}, null, ['b.c', 'b.d.e']);
+runWildcardIndexTest({'b.c.$**': 1}, null, ['b.c']);
+runWildcardIndexTest({'b.d.$**': 1}, null, ['b.d.e']);
+// Test a $** index which includes a subset of paths.
+runWildcardIndexTest({'$**': 1}, {a: 1}, ['a']);
+runWildcardIndexTest({'$**': 1}, {b: 1}, ['b.c', 'b.d.e']);
+runWildcardIndexTest({'$**': 1}, {'b.d': 1}, ['b.d.e']);
+runWildcardIndexTest({'$**': 1}, {a: 1, 'b.d': 1}, ['a', 'b.d.e']);
+// Test a $** index which excludes a subset of paths.
+runWildcardIndexTest({'$**': 1}, {a: 0}, ['b.c', 'b.d.e']);
+runWildcardIndexTest({'$**': 1}, {b: 0}, ['a']);
+runWildcardIndexTest({'$**': 1}, {'b.c': 0}, ['a', 'b.d.e']);
+runWildcardIndexTest({'$**': 1}, {a: 0, 'b.c': 0}, ['b.d.e']);
- assert.commandWorked(coll.insert({b: {c: {d: [{e: {f: -5}}, {e: {f: 15}}]}}}));
- assert.eq(1, coll.find({"b.c.d.e.f": {$gt: 0, $lt: 9}}).itcount());
- assert.eq(1, coll.find({"b.c.d.e.f": {$gt: 0, $lt: 9}}).hint({$natural: 1}).itcount());
- assert.eq(0, coll.find({"b.c.d": {$elemMatch: {"e.f": {$gt: 0, $lt: 9}}}}).itcount());
- assert.eq(0,
- coll.find({"b.c.d": {$elemMatch: {"e.f": {$gt: 0, $lt: 9}}}})
- .hint({$natural: 1})
- .itcount());
+// Sanity check that a few queries which need to be planned specially in the multikey case
+// return the correct results.
+coll.drop();
+assert.commandWorked(coll.createIndex({"$**": 1}));
+assert.commandWorked(coll.insert({a: [-5, 15]}));
+assert.eq(1, coll.find({a: {$gt: 0, $lt: 9}}).itcount());
+assert.eq(1, coll.find({a: {$gt: 0, $lt: 9}}).hint({$natural: 1}).itcount());
+assert.eq(0, coll.find({a: {$elemMatch: {$gt: 0, $lt: 9}}}).itcount());
+assert.eq(0, coll.find({a: {$elemMatch: {$gt: 0, $lt: 9}}}).hint({$natural: 1}).itcount());
- // Fieldname-or-array-index query tests.
- assert(coll.drop());
- assert.commandWorked(coll.createIndex({"$**": 1}));
+assert.commandWorked(coll.insert({b: {c: {d: [{e: {f: -5}}, {e: {f: 15}}]}}}));
+assert.eq(1, coll.find({"b.c.d.e.f": {$gt: 0, $lt: 9}}).itcount());
+assert.eq(1, coll.find({"b.c.d.e.f": {$gt: 0, $lt: 9}}).hint({$natural: 1}).itcount());
+assert.eq(0, coll.find({"b.c.d": {$elemMatch: {"e.f": {$gt: 0, $lt: 9}}}}).itcount());
+assert.eq(
+ 0, coll.find({"b.c.d": {$elemMatch: {"e.f": {$gt: 0, $lt: 9}}}}).hint({$natural: 1}).itcount());
- // Insert some documents that exhibit a mix of numeric fieldnames and array indices.
- assert.commandWorked(coll.insert({_id: 1, a: [{b: [{c: 1}]}]}));
- assert.commandWorked(coll.insert({_id: 2, a: [{b: [{c: 0}, {c: 1}]}]}));
- assert.commandWorked(coll.insert({_id: 3, a: {'0': [{b: {'1': {c: 1}}}, {d: 1}]}}));
- assert.commandWorked(coll.insert({_id: 4, a: [{b: [{1: {c: 1}}]}]}));
- assert.commandWorked(
- coll.insert({_id: 5, a: [{b: [{'1': {c: {'2': {d: [0, 1, 2, 3, {e: 1}]}}}}]}]}));
+// Fieldname-or-array-index query tests.
+assert(coll.drop());
+assert.commandWorked(coll.createIndex({"$**": 1}));
- /*
- * Multikey Metadata Keys:
- * {'': 1, '': 'a'}
- * {'': 1, '': 'a.0'}
- * {'': 1, '': 'a.b'}
- * {'': 1, '': 'a.b.1.c.2.d'}
- * Keys:
- * {'': 'a.b.c', '': 1} // _id: 1, a,b multikey
- * {'': 'a.b.c', '': 0} // _id: 2, a,b multikey
- * {'': 'a.b.c', '': 1} // _id: 2, a,b multikey
- * {'': 'a.0.b.1.c', '': 1} // _id: 3, '0, 1' are fieldnames, a.0 multikey
- * {'': 'a.0.d', '': 1} // _id: 3, '0' is fieldname, a.0 multikey
- * {'': 'a.b.1.c', '': 1} // _id: 4, '1' is fieldname, a,b multikey
- * {'': 'a.b.1.c.2.d', '': 0} // _id: 5, a,b,a.b.1.c.2.d multikey, '1' is fieldname
- * {'': 'a.b.1.c.2.d', '': 1} // _id: 5
- * {'': 'a.b.1.c.2.d', '': 2} // _id: 5
- * {'': 'a.b.1.c.2.d', '': 3} // _id: 5
- * {'': 'a.b.1.c.2.d.e', '': 1} // _id: 5
- */
+// Insert some documents that exhibit a mix of numeric fieldnames and array indices.
+assert.commandWorked(coll.insert({_id: 1, a: [{b: [{c: 1}]}]}));
+assert.commandWorked(coll.insert({_id: 2, a: [{b: [{c: 0}, {c: 1}]}]}));
+assert.commandWorked(coll.insert({_id: 3, a: {'0': [{b: {'1': {c: 1}}}, {d: 1}]}}));
+assert.commandWorked(coll.insert({_id: 4, a: [{b: [{1: {c: 1}}]}]}));
+assert.commandWorked(
+ coll.insert({_id: 5, a: [{b: [{'1': {c: {'2': {d: [0, 1, 2, 3, {e: 1}]}}}}]}]}));
- // Test that a query with multiple numeric path components returns all relevant documents,
- // whether the numeric path component refers to a fieldname or array index in each doc:
- //
- // _id:1 will be captured by the special fieldname-or-array-index bounds 'a.b.c', but will be
- // filtered out by the INEXACT_FETCH since it has no array index or fieldname 'b.1'.
- // _id:2 will match both 'a.0' and 'b.1' by array index.
- // _id:3 will match both 'a.0' and 'b.1' by fieldname.
- // _id:4 will match 'a.0' by array index and 'b.1' by fieldname.
- // _id:5 is not captured by the special fieldname-or-array-index bounds.
- //
- // We examine the solution's 'nReturned' versus 'totalDocsExamined' to confirm this.
- // totalDocsExamined: [_id:1, _id:2, _id:3, _id:4], nReturned: [_id:2, _id:3, _id:4]
- assertWildcardQuery({'a.0.b.1.c': 1},
- 'a.0.b.1.c',
- {'executionStats.nReturned': 3, 'executionStats.totalDocsExamined': 4});
+/*
+ * Multikey Metadata Keys:
+ * {'': 1, '': 'a'}
+ * {'': 1, '': 'a.0'}
+ * {'': 1, '': 'a.b'}
+ * {'': 1, '': 'a.b.1.c.2.d'}
+ * Keys:
+ * {'': 'a.b.c', '': 1} // _id: 1, a,b multikey
+ * {'': 'a.b.c', '': 0} // _id: 2, a,b multikey
+ * {'': 'a.b.c', '': 1} // _id: 2, a,b multikey
+ * {'': 'a.0.b.1.c', '': 1} // _id: 3, '0, 1' are fieldnames, a.0 multikey
+ * {'': 'a.0.d', '': 1} // _id: 3, '0' is fieldname, a.0 multikey
+ * {'': 'a.b.1.c', '': 1} // _id: 4, '1' is fieldname, a,b multikey
+ * {'': 'a.b.1.c.2.d', '': 0} // _id: 5, a,b,a.b.1.c.2.d multikey, '1' is fieldname
+ * {'': 'a.b.1.c.2.d', '': 1} // _id: 5
+ * {'': 'a.b.1.c.2.d', '': 2} // _id: 5
+ * {'': 'a.b.1.c.2.d', '': 3} // _id: 5
+ * {'': 'a.b.1.c.2.d.e', '': 1} // _id: 5
+ */
- // Test that we can query a specific field of an array whose fieldname is itself numeric.
- assertWildcardQuery({'a.0.1.d': 1},
- 'a.0.1.d',
- {'executionStats.nReturned': 1, 'executionStats.totalDocsExamined': 1});
+// Test that a query with multiple numeric path components returns all relevant documents,
+// whether the numeric path component refers to a fieldname or array index in each doc:
+//
+// _id:1 will be captured by the special fieldname-or-array-index bounds 'a.b.c', but will be
+// filtered out by the INEXACT_FETCH since it has no array index or fieldname 'b.1'.
+// _id:2 will match both 'a.0' and 'b.1' by array index.
+// _id:3 will match both 'a.0' and 'b.1' by fieldname.
+// _id:4 will match 'a.0' by array index and 'b.1' by fieldname.
+// _id:5 is not captured by the special fieldname-or-array-index bounds.
+//
+// We examine the solution's 'nReturned' versus 'totalDocsExamined' to confirm this.
+// totalDocsExamined: [_id:1, _id:2, _id:3, _id:4], nReturned: [_id:2, _id:3, _id:4]
+assertWildcardQuery({'a.0.b.1.c': 1},
+ 'a.0.b.1.c',
+ {'executionStats.nReturned': 3, 'executionStats.totalDocsExamined': 4});
- // Test that we can query a primitive value at a specific array index.
- assertWildcardQuery({'a.0.b.1.c.2.d.3': 3},
- 'a.0.b.1.c.2.d.3',
- {'executionStats.nReturned': 1, 'executionStats.totalDocsExamined': 1});
+// Test that we can query a specific field of an array whose fieldname is itself numeric.
+assertWildcardQuery({'a.0.1.d': 1},
+ 'a.0.1.d',
+ {'executionStats.nReturned': 1, 'executionStats.totalDocsExamined': 1});
- // Test that a $** index can't be used for a query through more than 8 nested array indices.
- assert.commandWorked(
- coll.insert({_id: 6, a: [{b: [{c: [{d: [{e: [{f: [{g: [{h: [{i: [1]}]}]}]}]}]}]}]}]}));
- // We can query up to a depth of 8 arrays via specific indices, but not through 9 or more.
- assertWildcardQuery({'a.0.b.0.c.0.d.0.e.0.f.0.g.0.h.0.i': 1},
- 'a.0.b.0.c.0.d.0.e.0.f.0.g.0.h.0.i');
- assertWildcardQuery({'a.0.b.0.c.0.d.0.e.0.f.0.g.0.h.0.i.0': 1}, null);
+// Test that we can query a primitive value at a specific array index.
+assertWildcardQuery({'a.0.b.1.c.2.d.3': 3},
+ 'a.0.b.1.c.2.d.3',
+ {'executionStats.nReturned': 1, 'executionStats.totalDocsExamined': 1});
- // Test that fieldname-or-array-index queries do not inappropriately trim predicates; that is,
- // all predicates on the field are added to a FETCH filter above the IXSCAN.
- assert(coll.drop());
- assert.commandWorked(coll.createIndex({"$**": 1}));
+// Test that a $** index can't be used for a query through more than 8 nested array indices.
+assert.commandWorked(
+ coll.insert({_id: 6, a: [{b: [{c: [{d: [{e: [{f: [{g: [{h: [{i: [1]}]}]}]}]}]}]}]}]}));
+// We can query up to a depth of 8 arrays via specific indices, but not through 9 or more.
+assertWildcardQuery({'a.0.b.0.c.0.d.0.e.0.f.0.g.0.h.0.i': 1}, 'a.0.b.0.c.0.d.0.e.0.f.0.g.0.h.0.i');
+assertWildcardQuery({'a.0.b.0.c.0.d.0.e.0.f.0.g.0.h.0.i.0': 1}, null);
- assert.commandWorked(coll.insert({_id: 1, a: [0, 1, 2]}));
- assert.commandWorked(coll.insert({_id: 2, a: [1, 2, 3]}));
- assert.commandWorked(coll.insert({_id: 3, a: [2, 3, 4], b: [5, 6, 7]}));
- assert.commandWorked(coll.insert({_id: 4, a: [3, 4, 5], b: [6, 7, 8], c: {'0': 9}}));
- assert.commandWorked(coll.insert({_id: 5, a: [4, 5, 6], b: [7, 8, 9], c: {'0': 10}}));
- assert.commandWorked(coll.insert({_id: 6, a: [5, 6, 7], b: [8, 9, 10], c: {'0': 11}}));
+// Test that fieldname-or-array-index queries do not inappropriately trim predicates; that is,
+// all predicates on the field are added to a FETCH filter above the IXSCAN.
+assert(coll.drop());
+assert.commandWorked(coll.createIndex({"$**": 1}));
- assertWildcardQuery({"a.0": {$gt: 1, $lt: 4}}, 'a.0', {'executionStats.nReturned': 2});
- assertWildcardQuery({"a.1": {$gte: 1, $lte: 4}}, 'a.1', {'executionStats.nReturned': 4});
- assertWildcardQuery({"b.2": {$in: [5, 9]}}, 'b.2', {'executionStats.nReturned': 1});
- assertWildcardQuery({"c.0": {$in: [10, 11]}}, 'c.0', {'executionStats.nReturned': 2});
+assert.commandWorked(coll.insert({_id: 1, a: [0, 1, 2]}));
+assert.commandWorked(coll.insert({_id: 2, a: [1, 2, 3]}));
+assert.commandWorked(coll.insert({_id: 3, a: [2, 3, 4], b: [5, 6, 7]}));
+assert.commandWorked(coll.insert({_id: 4, a: [3, 4, 5], b: [6, 7, 8], c: {'0': 9}}));
+assert.commandWorked(coll.insert({_id: 5, a: [4, 5, 6], b: [7, 8, 9], c: {'0': 10}}));
+assert.commandWorked(coll.insert({_id: 6, a: [5, 6, 7], b: [8, 9, 10], c: {'0': 11}}));
- // Test that the $** index doesn't trim predicates when planning across multiple nested $and/$or
- // expressions on various fieldname-or-array-index paths.
- const trimTestQuery = {
- $or: [
- {"a.0": {$gte: 0, $lt: 3}, "a.1": {$in: [2, 3, 4]}},
- {"b.1": {$gt: 6, $lte: 9}, "c.0": {$gt: 9, $lt: 12}}
- ]
- };
- const trimTestExplain = coll.find(trimTestQuery).explain("executionStats");
- // Verify that the expected number of documents were matched, and the $** index was used.
- // Matched documents: [_id:2, _id:3, _id:5, _id:6]
- assert.eq(trimTestExplain.executionStats.nReturned, 4);
- const trimTestIxScans = getPlanStages(trimTestExplain.queryPlanner.winningPlan, "IXSCAN");
- for (let ixScan of trimTestIxScans) {
- assert.eq(ixScan.keyPattern["$_path"], 1);
- }
- // Finally, confirm that a collection scan produces the same results.
- assertArrayEq(coll.find(trimTestQuery).toArray(),
- coll.find(trimTestQuery).hint({$natural: 1}).toArray());
+assertWildcardQuery({"a.0": {$gt: 1, $lt: 4}}, 'a.0', {'executionStats.nReturned': 2});
+assertWildcardQuery({"a.1": {$gte: 1, $lte: 4}}, 'a.1', {'executionStats.nReturned': 4});
+assertWildcardQuery({"b.2": {$in: [5, 9]}}, 'b.2', {'executionStats.nReturned': 1});
+assertWildcardQuery({"c.0": {$in: [10, 11]}}, 'c.0', {'executionStats.nReturned': 2});
- // Verify that no overlapping bounds are generated and all the expected documents are returned
- // for fieldname-or-array-index queries.
- const existenceQuery = {"a.0.1": {$exists: true}};
- assert.commandWorked(coll.insert({a: [{1: "exists"}, 1]}));
- assert.commandWorked(coll.insert({a: {0: {1: "exists"}}}));
- assert.commandWorked(coll.insert({a: {0: [2, "exists"]}}));
- assert.commandWorked(coll.insert({a: {0: [2, {"object_exists": 1}]}}));
- assert.commandWorked(coll.insert({a: {0: [2, ["array_exists"]]}}));
- assert.commandWorked(coll.insert({a: {0: [{1: "exists"}]}}));
- assert.commandWorked(coll.insert({a: {0: [{1: []}]}}));
- assert.commandWorked(coll.insert({a: {0: [{1: {}}]}}));
- assert.commandWorked(coll.insert({a: [{0: [{1: ["exists"]}]}]}));
- assert.commandWorked(coll.insert({a: [{}, {0: [{1: ["exists"]}]}]}));
- assert.commandWorked(coll.insert({a: [{}, {0: [[], {}, {1: ["exists"]}]}]}));
+// Test that the $** index doesn't trim predicates when planning across multiple nested $and/$or
+// expressions on various fieldname-or-array-index paths.
+const trimTestQuery = {
+ $or: [
+ {"a.0": {$gte: 0, $lt: 3}, "a.1": {$in: [2, 3, 4]}},
+ {"b.1": {$gt: 6, $lte: 9}, "c.0": {$gt: 9, $lt: 12}}
+ ]
+};
+const trimTestExplain = coll.find(trimTestQuery).explain("executionStats");
+// Verify that the expected number of documents were matched, and the $** index was used.
+// Matched documents: [_id:2, _id:3, _id:5, _id:6]
+assert.eq(trimTestExplain.executionStats.nReturned, 4);
+const trimTestIxScans = getPlanStages(trimTestExplain.queryPlanner.winningPlan, "IXSCAN");
+for (let ixScan of trimTestIxScans) {
+ assert.eq(ixScan.keyPattern["$_path"], 1);
+}
+// Finally, confirm that a collection scan produces the same results.
+assertArrayEq(coll.find(trimTestQuery).toArray(),
+ coll.find(trimTestQuery).hint({$natural: 1}).toArray());
- assert.commandWorked(coll.insert({a: {0: ["not_exist"]}}));
- assert.commandWorked(coll.insert({a: {"01": ["not_exist"]}}));
- assert.commandWorked(coll.insert({a: [{11: "not_exist"}]}));
+// Verify that no overlapping bounds are generated and all the expected documents are returned
+// for fieldname-or-array-index queries.
+const existenceQuery = {
+ "a.0.1": {$exists: true}
+};
+assert.commandWorked(coll.insert({a: [{1: "exists"}, 1]}));
+assert.commandWorked(coll.insert({a: {0: {1: "exists"}}}));
+assert.commandWorked(coll.insert({a: {0: [2, "exists"]}}));
+assert.commandWorked(coll.insert({a: {0: [2, {"object_exists": 1}]}}));
+assert.commandWorked(coll.insert({a: {0: [2, ["array_exists"]]}}));
+assert.commandWorked(coll.insert({a: {0: [{1: "exists"}]}}));
+assert.commandWorked(coll.insert({a: {0: [{1: []}]}}));
+assert.commandWorked(coll.insert({a: {0: [{1: {}}]}}));
+assert.commandWorked(coll.insert({a: [{0: [{1: ["exists"]}]}]}));
+assert.commandWorked(coll.insert({a: [{}, {0: [{1: ["exists"]}]}]}));
+assert.commandWorked(coll.insert({a: [{}, {0: [[], {}, {1: ["exists"]}]}]}));
- assertWildcardQuery(existenceQuery, 'a.0.1', {'executionStats.nReturned': 11});
- // Finally, confirm that a collection scan produces the same results.
- assertArrayEq(coll.find(existenceQuery).toArray(),
- coll.find(existenceQuery).hint({$natural: 1}).toArray());
+assert.commandWorked(coll.insert({a: {0: ["not_exist"]}}));
+assert.commandWorked(coll.insert({a: {"01": ["not_exist"]}}));
+assert.commandWorked(coll.insert({a: [{11: "not_exist"}]}));
+assertWildcardQuery(existenceQuery, 'a.0.1', {'executionStats.nReturned': 11});
+// Finally, confirm that a collection scan produces the same results.
+assertArrayEq(coll.find(existenceQuery).toArray(),
+ coll.find(existenceQuery).hint({$natural: 1}).toArray());
})();
diff --git a/jstests/core/wildcard_index_nonblocking_sort.js b/jstests/core/wildcard_index_nonblocking_sort.js
index c21a0cacdb5..2537906b412 100644
--- a/jstests/core/wildcard_index_nonblocking_sort.js
+++ b/jstests/core/wildcard_index_nonblocking_sort.js
@@ -1,85 +1,83 @@
// @tags: [assumes_balancer_off]
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For arrayEq().
- load("jstests/libs/analyze_plan.js"); // For getPlanStages().
- load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection().
+load("jstests/aggregation/extras/utils.js"); // For arrayEq().
+load("jstests/libs/analyze_plan.js"); // For getPlanStages().
+load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection().
- const coll = db.wildcard_nonblocking_sort;
+const coll = db.wildcard_nonblocking_sort;
- assert.commandWorked(coll.createIndex({"$**": 1}, {wildcardProjection: {"excludedField": 0}}));
+assert.commandWorked(coll.createIndex({"$**": 1}, {wildcardProjection: {"excludedField": 0}}));
- for (let i = 0; i < 50; i++) {
- assert.commandWorked(coll.insert({a: i, b: -i, x: [123], excludedField: i}));
- }
+for (let i = 0; i < 50; i++) {
+ assert.commandWorked(coll.insert({a: i, b: -i, x: [123], excludedField: i}));
+}
- function checkQueryHasSameResultsWhenUsingIdIndex(query, sort, projection) {
- const l = coll.find(query, projection).sort(sort).toArray();
- const r = coll.find(query, projection).sort(sort).hint({$natural: 1}).toArray();
- assert(arrayEq(l, r));
- }
+function checkQueryHasSameResultsWhenUsingIdIndex(query, sort, projection) {
+ const l = coll.find(query, projection).sort(sort).toArray();
+ const r = coll.find(query, projection).sort(sort).hint({$natural: 1}).toArray();
+ assert(arrayEq(l, r));
+}
- function checkQueryUsesSortType(query, sort, projection, isBlocking) {
- const explain = assert.commandWorked(coll.find(query, projection).sort(sort).explain());
- const plan = explain.queryPlanner.winningPlan;
+function checkQueryUsesSortType(query, sort, projection, isBlocking) {
+ const explain = assert.commandWorked(coll.find(query, projection).sort(sort).explain());
+ const plan = explain.queryPlanner.winningPlan;
- const ixScans = getPlanStages(plan, "IXSCAN");
- const sorts = getPlanStages(plan, "SORT");
+ const ixScans = getPlanStages(plan, "IXSCAN");
+ const sorts = getPlanStages(plan, "SORT");
- if (isBlocking) {
- assert.eq(sorts.length, FixtureHelpers.numberOfShardsForCollection(coll));
- assert.eq(sorts[0].sortPattern, sort);
+ if (isBlocking) {
+ assert.eq(sorts.length, FixtureHelpers.numberOfShardsForCollection(coll));
+ assert.eq(sorts[0].sortPattern, sort);
- // A blocking sort may or may not use the index, so we don't check the length of
- // 'ixScans'.
- } else {
- assert.eq(sorts.length, 0);
- assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
+ // A blocking sort may or may not use the index, so we don't check the length of
+ // 'ixScans'.
+ } else {
+ assert.eq(sorts.length, 0);
+ assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
- const sortKey = Object.keys(sort)[0];
- assert.docEq(ixScans[0].keyPattern, {$_path: 1, [sortKey]: 1});
- }
+ const sortKey = Object.keys(sort)[0];
+ assert.docEq(ixScans[0].keyPattern, {$_path: 1, [sortKey]: 1});
}
-
- function checkQueryUsesNonBlockingSortAndGetsCorrectResults(query, sort, projection) {
- checkQueryUsesSortType(query, sort, projection, false);
- checkQueryHasSameResultsWhenUsingIdIndex(query, sort, projection);
- }
-
- function checkQueryUsesBlockingSortAndGetsCorrectResults(query, sort, projection) {
- checkQueryUsesSortType(query, sort, projection, true);
- checkQueryHasSameResultsWhenUsingIdIndex(query, sort, projection);
- }
-
- function runSortTests(dir, proj) {
- // Test that the $** index can provide a non-blocking sort where appropriate.
- checkQueryUsesNonBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: dir}, proj);
- checkQueryUsesNonBlockingSortAndGetsCorrectResults({a: {$gte: 0}, x: 123}, {a: dir}, proj);
-
- // Test that the $** index can produce a solution with a blocking sort where appropriate.
- checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: dir, b: dir}, proj);
- checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: dir, b: -dir}, proj);
- checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: -dir, b: dir}, proj);
- checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$exists: true}}, {a: dir}, proj);
- checkQueryUsesBlockingSortAndGetsCorrectResults({}, {a: dir}, proj);
-
- // Test sorted queries on a field that is excluded by the $** index's wildcardProjection.
- checkQueryUsesBlockingSortAndGetsCorrectResults(
- {excludedField: {$gte: 0}}, {excludedField: dir}, proj);
-
- // Test sorted queries on a multikey field, with and without $elemMatch.
- checkQueryUsesBlockingSortAndGetsCorrectResults({x: 123}, {a: dir}, proj);
- checkQueryUsesBlockingSortAndGetsCorrectResults(
- {x: {$elemMatch: {$eq: 123}}}, {x: dir}, proj);
- checkQueryUsesBlockingSortAndGetsCorrectResults(
- {x: {$elemMatch: {$eq: 123}}}, {a: dir}, proj);
- }
-
- // Run each test for both ascending and descending sorts, with and without a projection.
- for (let dir of[1, -1]) {
- for (let proj of[{}, {_id: 0, a: 1}]) {
- runSortTests(dir, proj);
- }
+}
+
+function checkQueryUsesNonBlockingSortAndGetsCorrectResults(query, sort, projection) {
+ checkQueryUsesSortType(query, sort, projection, false);
+ checkQueryHasSameResultsWhenUsingIdIndex(query, sort, projection);
+}
+
+function checkQueryUsesBlockingSortAndGetsCorrectResults(query, sort, projection) {
+ checkQueryUsesSortType(query, sort, projection, true);
+ checkQueryHasSameResultsWhenUsingIdIndex(query, sort, projection);
+}
+
+function runSortTests(dir, proj) {
+ // Test that the $** index can provide a non-blocking sort where appropriate.
+ checkQueryUsesNonBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: dir}, proj);
+ checkQueryUsesNonBlockingSortAndGetsCorrectResults({a: {$gte: 0}, x: 123}, {a: dir}, proj);
+
+ // Test that the $** index can produce a solution with a blocking sort where appropriate.
+ checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: dir, b: dir}, proj);
+ checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: dir, b: -dir}, proj);
+ checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: -dir, b: dir}, proj);
+ checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$exists: true}}, {a: dir}, proj);
+ checkQueryUsesBlockingSortAndGetsCorrectResults({}, {a: dir}, proj);
+
+ // Test sorted queries on a field that is excluded by the $** index's wildcardProjection.
+ checkQueryUsesBlockingSortAndGetsCorrectResults(
+ {excludedField: {$gte: 0}}, {excludedField: dir}, proj);
+
+ // Test sorted queries on a multikey field, with and without $elemMatch.
+ checkQueryUsesBlockingSortAndGetsCorrectResults({x: 123}, {a: dir}, proj);
+ checkQueryUsesBlockingSortAndGetsCorrectResults({x: {$elemMatch: {$eq: 123}}}, {x: dir}, proj);
+ checkQueryUsesBlockingSortAndGetsCorrectResults({x: {$elemMatch: {$eq: 123}}}, {a: dir}, proj);
+}
+
+// Run each test for both ascending and descending sorts, with and without a projection.
+for (let dir of [1, -1]) {
+ for (let proj of [{}, {_id: 0, a: 1}]) {
+ runSortTests(dir, proj);
}
+}
})();
diff --git a/jstests/core/wildcard_index_partial_index.js b/jstests/core/wildcard_index_partial_index.js
index 5961caea87a..fa76746d9f9 100644
--- a/jstests/core/wildcard_index_partial_index.js
+++ b/jstests/core/wildcard_index_partial_index.js
@@ -2,47 +2,47 @@
* Test that $** indexes work when provided with a partial filter expression.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For isIxScan, isCollscan.
+load("jstests/libs/analyze_plan.js"); // For isIxScan, isCollscan.
- const coll = db.wildcard_partial_index;
+const coll = db.wildcard_partial_index;
- function testPartialWildcardIndex(indexKeyPattern, indexOptions) {
- coll.drop();
+function testPartialWildcardIndex(indexKeyPattern, indexOptions) {
+ coll.drop();
- assert.commandWorked(coll.createIndex(indexKeyPattern, indexOptions));
- assert.commandWorked(coll.insert({x: 5, a: 2})); // Not in index.
- assert.commandWorked(coll.insert({x: 6, a: 1})); // In index.
+ assert.commandWorked(coll.createIndex(indexKeyPattern, indexOptions));
+ assert.commandWorked(coll.insert({x: 5, a: 2})); // Not in index.
+ assert.commandWorked(coll.insert({x: 6, a: 1})); // In index.
- // find() operations that should use the index.
- let explain = coll.explain("executionStats").find({x: 6, a: 1}).finish();
- assert.eq(1, explain.executionStats.nReturned);
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
- explain = coll.explain("executionStats").find({x: {$gt: 1}, a: 1}).finish();
- assert.eq(1, explain.executionStats.nReturned);
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
- explain = coll.explain("executionStats").find({x: 6, a: {$lte: 1}}).finish();
- assert.eq(1, explain.executionStats.nReturned);
- assert(isIxscan(db, explain.queryPlanner.winningPlan));
+ // find() operations that should use the index.
+ let explain = coll.explain("executionStats").find({x: 6, a: 1}).finish();
+ assert.eq(1, explain.executionStats.nReturned);
+ assert(isIxscan(db, explain.queryPlanner.winningPlan));
+ explain = coll.explain("executionStats").find({x: {$gt: 1}, a: 1}).finish();
+ assert.eq(1, explain.executionStats.nReturned);
+ assert(isIxscan(db, explain.queryPlanner.winningPlan));
+ explain = coll.explain("executionStats").find({x: 6, a: {$lte: 1}}).finish();
+ assert.eq(1, explain.executionStats.nReturned);
+ assert(isIxscan(db, explain.queryPlanner.winningPlan));
- // find() operations that should not use the index.
- explain = coll.explain("executionStats").find({x: 6, a: {$lt: 1.6}}).finish();
- assert.eq(1, explain.executionStats.nReturned);
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
+ // find() operations that should not use the index.
+ explain = coll.explain("executionStats").find({x: 6, a: {$lt: 1.6}}).finish();
+ assert.eq(1, explain.executionStats.nReturned);
+ assert(isCollscan(db, explain.queryPlanner.winningPlan));
- explain = coll.explain("executionStats").find({x: 6}).finish();
- assert.eq(1, explain.executionStats.nReturned);
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
+ explain = coll.explain("executionStats").find({x: 6}).finish();
+ assert.eq(1, explain.executionStats.nReturned);
+ assert(isCollscan(db, explain.queryPlanner.winningPlan));
- explain = coll.explain("executionStats").find({a: {$gte: 0}}).finish();
- assert.eq(2, explain.executionStats.nReturned);
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
- }
+ explain = coll.explain("executionStats").find({a: {$gte: 0}}).finish();
+ assert.eq(2, explain.executionStats.nReturned);
+ assert(isCollscan(db, explain.queryPlanner.winningPlan));
+}
- // Case where the partial filter expression is on a field in the index.
- testPartialWildcardIndex({"$**": 1}, {partialFilterExpression: {a: {$lte: 1.5}}});
+// Case where the partial filter expression is on a field in the index.
+testPartialWildcardIndex({"$**": 1}, {partialFilterExpression: {a: {$lte: 1.5}}});
- // Case where the partial filter expression is on a field not included in the index.
- testPartialWildcardIndex({"x.$**": 1}, {partialFilterExpression: {a: {$lte: 1.5}}});
+// Case where the partial filter expression is on a field not included in the index.
+testPartialWildcardIndex({"x.$**": 1}, {partialFilterExpression: {a: {$lte: 1.5}}});
})();
diff --git a/jstests/core/wildcard_index_return_key.js b/jstests/core/wildcard_index_return_key.js
index ceaf691aad8..53f7da8c09c 100644
--- a/jstests/core/wildcard_index_return_key.js
+++ b/jstests/core/wildcard_index_return_key.js
@@ -2,58 +2,57 @@
* Tests that $** indexes works with returnKey option.
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/aggregation/extras/utils.js");
+load("jstests/aggregation/extras/utils.js");
- const coll = db.wildcard_return_key;
- coll.drop();
+const coll = db.wildcard_return_key;
+coll.drop();
- const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
- const assertArrayNotEq = (l, r) => assert(!arrayEq(l, r), tojson(l) + " == " + tojson(r));
+const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
+const assertArrayNotEq = (l, r) => assert(!arrayEq(l, r), tojson(l) + " == " + tojson(r));
- assert.commandWorked(coll.createIndex({"$**": 1}));
+assert.commandWorked(coll.createIndex({"$**": 1}));
- assert.commandWorked(coll.insert({a: 1, b: 2, c: {d: 2, e: 1}}));
- assert.commandWorked(coll.insert({a: 2, b: 2, c: {d: 1, e: 2}}));
- assert.commandWorked(coll.insert({a: 2, b: 1, c: {d: 2, e: 2}}));
- assert.commandWorked(coll.insert({a: 1, b: 1, c: {e: 2}}));
+assert.commandWorked(coll.insert({a: 1, b: 2, c: {d: 2, e: 1}}));
+assert.commandWorked(coll.insert({a: 2, b: 2, c: {d: 1, e: 2}}));
+assert.commandWorked(coll.insert({a: 2, b: 1, c: {d: 2, e: 2}}));
+assert.commandWorked(coll.insert({a: 1, b: 1, c: {e: 2}}));
- // $** index return key with one field argument.
- assertArrayEq(coll.find({a: 1}).returnKey().toArray(),
- [{"$_path": "a", a: 1}, {"$_path": "a", a: 1}]);
+// $** index return key with one field argument.
+assertArrayEq(coll.find({a: 1}).returnKey().toArray(),
+ [{"$_path": "a", a: 1}, {"$_path": "a", a: 1}]);
- // $** index return key with dot path argument.
- assertArrayEq(coll.find({"c.e": 1}).returnKey().toArray(), [{"$_path": "c.e", "c.e": 1}]);
+// $** index return key with dot path argument.
+assertArrayEq(coll.find({"c.e": 1}).returnKey().toArray(), [{"$_path": "c.e", "c.e": 1}]);
- assert.commandWorked(coll.createIndex({"a": 1}));
+assert.commandWorked(coll.createIndex({"a": 1}));
- // $** index return key with competing regular index.
- assertArrayEq(coll.find({a: 1}).hint({"$**": 1}).returnKey().toArray(),
- [{"$_path": "a", a: 1}, {"$_path": "a", a: 1}]);
+// $** index return key with competing regular index.
+assertArrayEq(coll.find({a: 1}).hint({"$**": 1}).returnKey().toArray(),
+ [{"$_path": "a", a: 1}, {"$_path": "a", a: 1}]);
- assert.commandWorked(coll.createIndex({"a": 1, "b": 1}));
+assert.commandWorked(coll.createIndex({"a": 1, "b": 1}));
- // $** index return key with competing compound index.
- assertArrayNotEq(coll.find({a: 1, b: 1}).hint({"$**": 1}).returnKey().toArray(),
- [{a: 1, b: 1}]);
+// $** index return key with competing compound index.
+assertArrayNotEq(coll.find({a: 1, b: 1}).hint({"$**": 1}).returnKey().toArray(), [{a: 1, b: 1}]);
- assert.commandWorked(coll.insert({a: 2, b: 2, c: {e: 2}, f: [1, 2, 3]}));
- assert.commandWorked(coll.insert({a: 2, b: 2, c: {e: 2}, g: [{h: 1}, {i: 2}]}));
+assert.commandWorked(coll.insert({a: 2, b: 2, c: {e: 2}, f: [1, 2, 3]}));
+assert.commandWorked(coll.insert({a: 2, b: 2, c: {e: 2}, g: [{h: 1}, {i: 2}]}));
- // Multikey path $** index return key.
- assertArrayEq(coll.find({f: 1}).returnKey().toArray(), [{"$_path": "f", f: 1}]);
+// Multikey path $** index return key.
+assertArrayEq(coll.find({f: 1}).returnKey().toArray(), [{"$_path": "f", f: 1}]);
- // Multikey subobject $** index return key.
- assertArrayEq(coll.find({"g.h": 1}).returnKey().toArray(), [{"$_path": "g.h", "g.h": 1}]);
+// Multikey subobject $** index return key.
+assertArrayEq(coll.find({"g.h": 1}).returnKey().toArray(), [{"$_path": "g.h", "g.h": 1}]);
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.createIndex({"c.$**": 1}));
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(coll.createIndex({"c.$**": 1}));
- // Path specified $** index return key.
- assertArrayEq(coll.find({"c.d": 1}).returnKey().toArray(), [{"$_path": "c.d", "c.d": 1}]);
+// Path specified $** index return key.
+assertArrayEq(coll.find({"c.d": 1}).returnKey().toArray(), [{"$_path": "c.d", "c.d": 1}]);
- // Path specified $** index return key with irrelevant query. We expect this query to be
- // answered with a COLLSCAN, in which case returnKey is expected to return empty objects.
- assertArrayEq(coll.find({a: 1, b: 1}).returnKey().toArray(), [{}]);
+// Path specified $** index return key with irrelevant query. We expect this query to be
+// answered with a COLLSCAN, in which case returnKey is expected to return empty objects.
+assertArrayEq(coll.find({a: 1, b: 1}).returnKey().toArray(), [{}]);
})(); \ No newline at end of file
diff --git a/jstests/core/wildcard_index_type.js b/jstests/core/wildcard_index_type.js
index 4e8d5c68939..34831c3f320 100644
--- a/jstests/core/wildcard_index_type.js
+++ b/jstests/core/wildcard_index_type.js
@@ -2,144 +2,143 @@
* Test $** support for the $type operator.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For getPlanStages.
+load("jstests/libs/analyze_plan.js"); // For getPlanStages.
- const coll = db.wildcard_index_type;
+const coll = db.wildcard_index_type;
+coll.drop();
+
+const indexWildcard = {
+ "$**": 1
+};
+
+// Inserts the given document and runs the given query to confirm that:
+// (1) query matches the given document if match is true,
+// (2) the winning plan does a wildcard index scan, and
+// (3) the resulting index bound matches 'expectedBounds' if given.
+function assertExpectedDocAnswersWildcardIndexQuery(doc, query, match, expectedBounds) {
coll.drop();
+ assert.commandWorked(coll.createIndex(indexWildcard));
+ assert.commandWorked(coll.insert(doc));
+
+ // Check that a wildcard index scan is being used to answer query.
+ const explain = coll.explain("executionStats").find(query).finish();
+ if (!match) {
+ assert.eq(0, explain.executionStats.nReturned, explain);
+ return;
+ }
- const indexWildcard = {"$**": 1};
-
- // Inserts the given document and runs the given query to confirm that:
- // (1) query matches the given document if match is true,
- // (2) the winning plan does a wildcard index scan, and
- // (3) the resulting index bound matches 'expectedBounds' if given.
- function assertExpectedDocAnswersWildcardIndexQuery(doc, query, match, expectedBounds) {
- coll.drop();
- assert.commandWorked(coll.createIndex(indexWildcard));
- assert.commandWorked(coll.insert(doc));
-
- // Check that a wildcard index scan is being used to answer query.
- const explain = coll.explain("executionStats").find(query).finish();
- if (!match) {
- assert.eq(0, explain.executionStats.nReturned, explain);
- return;
- }
-
- // Check that the query returns the document.
- assert.eq(1, explain.executionStats.nReturned, explain);
-
- // Winning plan uses a wildcard index scan.
- const winningPlan = explain.queryPlanner.winningPlan;
- const ixScans = getPlanStages(winningPlan, "IXSCAN");
- assert.gt(ixScans.length, 0, explain);
- ixScans.forEach((ixScan) => assert(ixScan.keyPattern.$_path));
-
- // Expected bounds were used.
- if (expectedBounds !== undefined) {
- ixScans.forEach((ixScan) => assert.docEq(ixScan.indexBounds, expectedBounds));
- }
+ // Check that the query returns the document.
+ assert.eq(1, explain.executionStats.nReturned, explain);
+
+ // Winning plan uses a wildcard index scan.
+ const winningPlan = explain.queryPlanner.winningPlan;
+ const ixScans = getPlanStages(winningPlan, "IXSCAN");
+ assert.gt(ixScans.length, 0, explain);
+ ixScans.forEach((ixScan) => assert(ixScan.keyPattern.$_path));
+
+ // Expected bounds were used.
+ if (expectedBounds !== undefined) {
+ ixScans.forEach((ixScan) => assert.docEq(ixScan.indexBounds, expectedBounds));
+ }
+}
+
+// A $type of 'string' will match a string value.
+assertExpectedDocAnswersWildcardIndexQuery({a: "a"}, {a: {$type: "string"}}, true);
+
+// A $type of 'double' will match a double.
+assertExpectedDocAnswersWildcardIndexQuery({a: 1.1}, {a: {$type: "double"}}, true);
+
+// A $type of 'boolean' will match a boolean.
+assertExpectedDocAnswersWildcardIndexQuery({a: true}, {a: {$type: "bool"}}, true);
+
+// A $type of 'string' will match a multifield document with a string value.
+assertExpectedDocAnswersWildcardIndexQuery({a: "a", b: 1.1, c: true}, {a: {$type: "string"}}, true);
+
+// A compound $type of 'string' and 'double' will match a multifield document with a string and
+// double value.
+assertExpectedDocAnswersWildcardIndexQuery(
+ {a: "a", b: 1.1, c: true}, {a: {$type: "string"}, b: {$type: "double"}}, true);
+
+// A compound $type of 'string' and 'double' won't match a multifield document with a string but
+// no double value.
+assertExpectedDocAnswersWildcardIndexQuery(
+ {a: "a", b: "b", c: true}, {a: {$type: "string"}, b: {$type: "double"}}, false);
+
+// A $type of 'object' will match a object.
+assertExpectedDocAnswersWildcardIndexQuery(
+ {a: {"": ""}},
+ {a: {$type: "object"}},
+ true,
+ {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]});
+
+// A $type of 'object' will match an empty object.
+assertExpectedDocAnswersWildcardIndexQuery(
+ {a: {}},
+ {a: {$type: "object"}},
+ true,
+ {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]});
+
+// A $type of 'object' will match a nested object.
+assertExpectedDocAnswersWildcardIndexQuery(
+ {b: {a: {}}},
+ {"b.a": {$type: "object"}},
+ true,
+ {$_path: [`["b.a", "b.a"]`, `["b.a.", "b.a/")`], "b.a": [`[MinKey, MaxKey]`]});
+
+// A $type of 'array' will match an empty array.
+assertExpectedDocAnswersWildcardIndexQuery(
+ {a: [[]]},
+ {a: {$type: "array"}},
+ true,
+ {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]});
+
+// A $type of 'array' will match an array.
+assertExpectedDocAnswersWildcardIndexQuery(
+ {a: [["c"]]},
+ {a: {$type: "array"}},
+ true,
+ {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]});
+
+// A $type of 'regex' will match regex.
+assertExpectedDocAnswersWildcardIndexQuery({a: /r/}, {a: {$type: "regex"}}, true);
+
+// A $type of 'null' will match a null value.
+assertExpectedDocAnswersWildcardIndexQuery({a: null}, {a: {$type: "null"}}, true);
+
+// A $type of 'undefined' will match undefined.
+assertExpectedDocAnswersWildcardIndexQuery({a: undefined}, {a: {$type: "undefined"}}, true);
+
+// A $type of 'undefined' won't match a null value.
+assertExpectedDocAnswersWildcardIndexQuery({a: null}, {a: {$type: "undefined"}}, false);
+
+// A $type of 'code' will match a function value.
+assertExpectedDocAnswersWildcardIndexQuery({
+ a: function() {
+ var a = 0;
}
+},
+ {a: {$type: "javascript"}},
+ true);
+
+// A $type of 'binData' will match a binData value.
+assertExpectedDocAnswersWildcardIndexQuery({a: new BinData(0, "")}, {a: {$type: "binData"}}, true);
+
+// A $type of 'timestamp' will match an empty timestamp value.
+assertExpectedDocAnswersWildcardIndexQuery({a: new Timestamp()}, {a: {$type: "timestamp"}}, true);
+
+// A $type of 'timestamp' will match a timestamp value.
+assertExpectedDocAnswersWildcardIndexQuery(
+ {a: new Timestamp(0x80008000, 0)}, {a: {$type: "timestamp"}}, true);
+
+// A $type of 'date' won't match a timestamp value.
+assertExpectedDocAnswersWildcardIndexQuery(
+ {a: new Timestamp(0x80008000, 0)}, {a: {$type: "date"}}, false);
+
+// A $type of 'date' will match a date value.
+assertExpectedDocAnswersWildcardIndexQuery({a: new Date()}, {a: {$type: "date"}}, true);
- // A $type of 'string' will match a string value.
- assertExpectedDocAnswersWildcardIndexQuery({a: "a"}, {a: {$type: "string"}}, true);
-
- // A $type of 'double' will match a double.
- assertExpectedDocAnswersWildcardIndexQuery({a: 1.1}, {a: {$type: "double"}}, true);
-
- // A $type of 'boolean' will match a boolean.
- assertExpectedDocAnswersWildcardIndexQuery({a: true}, {a: {$type: "bool"}}, true);
-
- // A $type of 'string' will match a multifield document with a string value.
- assertExpectedDocAnswersWildcardIndexQuery(
- {a: "a", b: 1.1, c: true}, {a: {$type: "string"}}, true);
-
- // A compound $type of 'string' and 'double' will match a multifield document with a string and
- // double value.
- assertExpectedDocAnswersWildcardIndexQuery(
- {a: "a", b: 1.1, c: true}, {a: {$type: "string"}, b: {$type: "double"}}, true);
-
- // A compound $type of 'string' and 'double' won't match a multifield document with a string but
- // no double value.
- assertExpectedDocAnswersWildcardIndexQuery(
- {a: "a", b: "b", c: true}, {a: {$type: "string"}, b: {$type: "double"}}, false);
-
- // A $type of 'object' will match a object.
- assertExpectedDocAnswersWildcardIndexQuery(
- {a: {"": ""}},
- {a: {$type: "object"}},
- true,
- {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]});
-
- // A $type of 'object' will match an empty object.
- assertExpectedDocAnswersWildcardIndexQuery(
- {a: {}},
- {a: {$type: "object"}},
- true,
- {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]});
-
- // A $type of 'object' will match a nested object.
- assertExpectedDocAnswersWildcardIndexQuery(
- {b: {a: {}}},
- {"b.a": {$type: "object"}},
- true,
- {$_path: [`["b.a", "b.a"]`, `["b.a.", "b.a/")`], "b.a": [`[MinKey, MaxKey]`]});
-
- // A $type of 'array' will match an empty array.
- assertExpectedDocAnswersWildcardIndexQuery(
- {a: [[]]},
- {a: {$type: "array"}},
- true,
- {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]});
-
- // A $type of 'array' will match an array.
- assertExpectedDocAnswersWildcardIndexQuery(
- {a: [["c"]]},
- {a: {$type: "array"}},
- true,
- {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]});
-
- // A $type of 'regex' will match regex.
- assertExpectedDocAnswersWildcardIndexQuery({a: /r/}, {a: {$type: "regex"}}, true);
-
- // A $type of 'null' will match a null value.
- assertExpectedDocAnswersWildcardIndexQuery({a: null}, {a: {$type: "null"}}, true);
-
- // A $type of 'undefined' will match undefined.
- assertExpectedDocAnswersWildcardIndexQuery({a: undefined}, {a: {$type: "undefined"}}, true);
-
- // A $type of 'undefined' won't match a null value.
- assertExpectedDocAnswersWildcardIndexQuery({a: null}, {a: {$type: "undefined"}}, false);
-
- // A $type of 'code' will match a function value.
- assertExpectedDocAnswersWildcardIndexQuery({
- a: function() {
- var a = 0;
- }
- },
- {a: {$type: "javascript"}},
- true);
-
- // A $type of 'binData' will match a binData value.
- assertExpectedDocAnswersWildcardIndexQuery(
- {a: new BinData(0, "")}, {a: {$type: "binData"}}, true);
-
- // A $type of 'timestamp' will match an empty timestamp value.
- assertExpectedDocAnswersWildcardIndexQuery(
- {a: new Timestamp()}, {a: {$type: "timestamp"}}, true);
-
- // A $type of 'timestamp' will match a timestamp value.
- assertExpectedDocAnswersWildcardIndexQuery(
- {a: new Timestamp(0x80008000, 0)}, {a: {$type: "timestamp"}}, true);
-
- // A $type of 'date' won't match a timestamp value.
- assertExpectedDocAnswersWildcardIndexQuery(
- {a: new Timestamp(0x80008000, 0)}, {a: {$type: "date"}}, false);
-
- // A $type of 'date' will match a date value.
- assertExpectedDocAnswersWildcardIndexQuery({a: new Date()}, {a: {$type: "date"}}, true);
-
- // A $type of 'timestamp' won't match a date value.
- assertExpectedDocAnswersWildcardIndexQuery({a: new Date()}, {a: {$type: "timestamp"}}, false);
+// A $type of 'timestamp' won't match a date value.
+assertExpectedDocAnswersWildcardIndexQuery({a: new Date()}, {a: {$type: "timestamp"}}, false);
})();
diff --git a/jstests/core/wildcard_index_validindex.js b/jstests/core/wildcard_index_validindex.js
index 647986f55b4..f647bbcc969 100644
--- a/jstests/core/wildcard_index_validindex.js
+++ b/jstests/core/wildcard_index_validindex.js
@@ -6,145 +6,139 @@
* ]
*/
(function() {
- "use strict";
-
- const kCollectionName = "wildcard_validindex";
- const coll = db.getCollection(kCollectionName);
-
- const kIndexName = "wildcard_validindex";
-
- const createIndexHelper = function(key, parameters) {
- return db.runCommand(
- {createIndexes: kCollectionName, indexes: [Object.assign({key: key}, parameters)]});
- };
-
- const createIndexAndVerifyWithDrop = function(key, parameters) {
- coll.dropIndexes();
- createIndexHelper(key, parameters);
- assert.eq(coll.getIndexes()
- .filter((index) => {
- return index.name == parameters.name;
- })
- .length,
- 1);
- };
-
- // Can create a valid wildcard index.
- createIndexAndVerifyWithDrop({"$**": 1}, {name: kIndexName});
-
- // Can create a valid wildcard index with subpaths.
- createIndexAndVerifyWithDrop({"a.$**": 1}, {name: kIndexName});
-
- // Can create a wildcard index with partialFilterExpression.
- createIndexAndVerifyWithDrop({"$**": 1},
- {name: kIndexName, partialFilterExpression: {a: {"$gt": 0}}});
-
- // Can create a wildcard index with foreground & background construction.
- createIndexAndVerifyWithDrop({"$**": 1}, {background: false, name: kIndexName});
- createIndexAndVerifyWithDrop({"$**": 1}, {background: true, name: kIndexName});
-
- // Can create a wildcard index with index level collation.
- createIndexAndVerifyWithDrop({"$**": 1}, {collation: {locale: "fr"}, name: kIndexName});
-
- // Can create a wildcard index with an inclusion projection.
- createIndexAndVerifyWithDrop({"$**": 1},
- {wildcardProjection: {a: 1, b: 1, c: 1}, name: kIndexName});
- // Can create a wildcard index with an exclusion projection.
- createIndexAndVerifyWithDrop({"$**": 1},
- {wildcardProjection: {a: 0, b: 0, c: 0}, name: kIndexName});
- // Can include _id in an exclusion.
- createIndexAndVerifyWithDrop(
- {"$**": 1}, {wildcardProjection: {_id: 1, a: 0, b: 0, c: 0}, name: kIndexName});
- // Can exclude _id in an exclusion.
- createIndexAndVerifyWithDrop(
- {"$**": 1}, {wildcardProjection: {_id: 0, a: 1, b: 1, c: 1}, name: kIndexName});
-
- // Cannot create a wildcard index with a non-positive numeric key value.
+"use strict";
+
+const kCollectionName = "wildcard_validindex";
+const coll = db.getCollection(kCollectionName);
+
+const kIndexName = "wildcard_validindex";
+
+const createIndexHelper = function(key, parameters) {
+ return db.runCommand(
+ {createIndexes: kCollectionName, indexes: [Object.assign({key: key}, parameters)]});
+};
+
+const createIndexAndVerifyWithDrop = function(key, parameters) {
coll.dropIndexes();
- assert.commandFailedWithCode(coll.createIndex({"$**": 0}), ErrorCodes.CannotCreateIndex);
- assert.commandFailedWithCode(coll.createIndex({"$**": -1}), ErrorCodes.CannotCreateIndex);
- assert.commandFailedWithCode(coll.createIndex({"$**": -2}), ErrorCodes.CannotCreateIndex);
-
- // Cannot create a wildcard index with sparse option.
- assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {sparse: true}),
- ErrorCodes.CannotCreateIndex);
-
- // Cannot create a wildcard index with a v0 or v1 index.
- assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {v: 0}),
- ErrorCodes.CannotCreateIndex);
- assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {v: 1}),
- ErrorCodes.CannotCreateIndex);
-
- // Cannot create a unique index.
- assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {unique: true}),
- ErrorCodes.CannotCreateIndex);
-
- // Cannot create a hashed wildcard index.
- assert.commandFailedWithCode(coll.createIndex({"$**": "hashed"}), ErrorCodes.CannotCreateIndex);
-
- // Cannot create a TTL wildcard index.
- assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {expireAfterSeconds: 3600}),
- ErrorCodes.CannotCreateIndex);
-
- // Cannot create a geoSpatial wildcard index.
- assert.commandFailedWithCode(coll.createIndex({"$**": "2dsphere"}),
- ErrorCodes.CannotCreateIndex);
- assert.commandFailedWithCode(coll.createIndex({"$**": "2d"}), ErrorCodes.CannotCreateIndex);
-
- // Cannot create a text wildcard index using single sub-path syntax.
- assert.commandFailedWithCode(coll.createIndex({"a.$**": "text"}), ErrorCodes.CannotCreateIndex);
-
- // Cannot specify plugin by string.
- assert.commandFailedWithCode(coll.createIndex({"a": "wildcard"}), ErrorCodes.CannotCreateIndex);
- assert.commandFailedWithCode(coll.createIndex({"$**": "wildcard"}),
- ErrorCodes.CannotCreateIndex);
-
- // Cannot create a compound wildcard index.
- assert.commandFailedWithCode(coll.createIndex({"$**": 1, "a": 1}),
- ErrorCodes.CannotCreateIndex);
- assert.commandFailedWithCode(coll.createIndex({"a": 1, "$**": 1}),
- ErrorCodes.CannotCreateIndex);
-
- // Cannot create an wildcard index with an invalid spec.
- assert.commandFailedWithCode(coll.createIndex({"a.$**.$**": 1}), ErrorCodes.CannotCreateIndex);
- assert.commandFailedWithCode(coll.createIndex({"$**.$**": 1}), ErrorCodes.CannotCreateIndex);
- assert.commandFailedWithCode(coll.createIndex({"$**": "hello"}), ErrorCodes.CannotCreateIndex);
-
- // Cannot create an wildcard index with mixed inclusion exclusion.
- assert.commandFailedWithCode(
- createIndexHelper({"$**": 1}, {name: kIndexName, wildcardProjection: {a: 1, b: 0}}), 40178);
- // Cannot create an wildcard index with computed fields.
- assert.commandFailedWithCode(
- createIndexHelper({"$**": 1}, {name: kIndexName, wildcardProjection: {a: 1, b: "string"}}),
- ErrorCodes.FailedToParse);
- // Cannot create an wildcard index with an empty projection.
- assert.commandFailedWithCode(
- createIndexHelper({"$**": 1}, {name: kIndexName, wildcardProjection: {}}),
- ErrorCodes.FailedToParse);
- // Cannot create another index type with "wildcardProjection" projection.
- assert.commandFailedWithCode(
- createIndexHelper({"a": 1}, {name: kIndexName, wildcardProjection: {a: 1, b: 1}}),
- ErrorCodes.BadValue);
- // Cannot create a text index with a "wildcardProjection" projection.
- assert.commandFailedWithCode(
- createIndexHelper({"$**": "text"}, {name: kIndexName, wildcardProjection: {a: 1, b: 1}}),
- ErrorCodes.BadValue);
- // Cannot create an wildcard index with a non-object "wildcardProjection" projection.
- assert.commandFailedWithCode(
- createIndexHelper({"a.$**": 1}, {name: kIndexName, wildcardProjection: "string"}),
- ErrorCodes.TypeMismatch);
- // Cannot exclude an subfield of _id in an inclusion.
- assert.commandFailedWithCode(createIndexHelper({"_id.id": 0, a: 1, b: 1, c: 1}),
- ErrorCodes.CannotCreateIndex);
- // Cannot include an subfield of _id in an exclusion.
- assert.commandFailedWithCode(createIndexHelper({"_id.id": 1, a: 0, b: 0, c: 0}),
- ErrorCodes.CannotCreateIndex);
-
- // Cannot specify both a subpath and a projection.
- assert.commandFailedWithCode(
- createIndexHelper({"a.$**": 1}, {name: kIndexName, wildcardProjection: {a: 1}}),
- ErrorCodes.FailedToParse);
- assert.commandFailedWithCode(
- createIndexHelper({"a.$**": 1}, {name: kIndexName, wildcardProjection: {b: 0}}),
- ErrorCodes.FailedToParse);
+ createIndexHelper(key, parameters);
+ assert.eq(coll.getIndexes()
+ .filter((index) => {
+ return index.name == parameters.name;
+ })
+ .length,
+ 1);
+};
+
+// Can create a valid wildcard index.
+createIndexAndVerifyWithDrop({"$**": 1}, {name: kIndexName});
+
+// Can create a valid wildcard index with subpaths.
+createIndexAndVerifyWithDrop({"a.$**": 1}, {name: kIndexName});
+
+// Can create a wildcard index with partialFilterExpression.
+createIndexAndVerifyWithDrop({"$**": 1},
+ {name: kIndexName, partialFilterExpression: {a: {"$gt": 0}}});
+
+// Can create a wildcard index with foreground & background construction.
+createIndexAndVerifyWithDrop({"$**": 1}, {background: false, name: kIndexName});
+createIndexAndVerifyWithDrop({"$**": 1}, {background: true, name: kIndexName});
+
+// Can create a wildcard index with index level collation.
+createIndexAndVerifyWithDrop({"$**": 1}, {collation: {locale: "fr"}, name: kIndexName});
+
+// Can create a wildcard index with an inclusion projection.
+createIndexAndVerifyWithDrop({"$**": 1},
+ {wildcardProjection: {a: 1, b: 1, c: 1}, name: kIndexName});
+// Can create a wildcard index with an exclusion projection.
+createIndexAndVerifyWithDrop({"$**": 1},
+ {wildcardProjection: {a: 0, b: 0, c: 0}, name: kIndexName});
+// Can include _id in an exclusion.
+createIndexAndVerifyWithDrop({"$**": 1},
+ {wildcardProjection: {_id: 1, a: 0, b: 0, c: 0}, name: kIndexName});
+// Can exclude _id in an exclusion.
+createIndexAndVerifyWithDrop({"$**": 1},
+ {wildcardProjection: {_id: 0, a: 1, b: 1, c: 1}, name: kIndexName});
+
+// Cannot create a wildcard index with a non-positive numeric key value.
+coll.dropIndexes();
+assert.commandFailedWithCode(coll.createIndex({"$**": 0}), ErrorCodes.CannotCreateIndex);
+assert.commandFailedWithCode(coll.createIndex({"$**": -1}), ErrorCodes.CannotCreateIndex);
+assert.commandFailedWithCode(coll.createIndex({"$**": -2}), ErrorCodes.CannotCreateIndex);
+
+// Cannot create a wildcard index with sparse option.
+assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {sparse: true}),
+ ErrorCodes.CannotCreateIndex);
+
+// Cannot create a wildcard index with a v0 or v1 index.
+assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {v: 0}), ErrorCodes.CannotCreateIndex);
+assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {v: 1}), ErrorCodes.CannotCreateIndex);
+
+// Cannot create a unique index.
+assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {unique: true}),
+ ErrorCodes.CannotCreateIndex);
+
+// Cannot create a hashed wildcard index.
+assert.commandFailedWithCode(coll.createIndex({"$**": "hashed"}), ErrorCodes.CannotCreateIndex);
+
+// Cannot create a TTL wildcard index.
+assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {expireAfterSeconds: 3600}),
+ ErrorCodes.CannotCreateIndex);
+
+// Cannot create a geoSpatial wildcard index.
+assert.commandFailedWithCode(coll.createIndex({"$**": "2dsphere"}), ErrorCodes.CannotCreateIndex);
+assert.commandFailedWithCode(coll.createIndex({"$**": "2d"}), ErrorCodes.CannotCreateIndex);
+
+// Cannot create a text wildcard index using single sub-path syntax.
+assert.commandFailedWithCode(coll.createIndex({"a.$**": "text"}), ErrorCodes.CannotCreateIndex);
+
+// Cannot specify plugin by string.
+assert.commandFailedWithCode(coll.createIndex({"a": "wildcard"}), ErrorCodes.CannotCreateIndex);
+assert.commandFailedWithCode(coll.createIndex({"$**": "wildcard"}), ErrorCodes.CannotCreateIndex);
+
+// Cannot create a compound wildcard index.
+assert.commandFailedWithCode(coll.createIndex({"$**": 1, "a": 1}), ErrorCodes.CannotCreateIndex);
+assert.commandFailedWithCode(coll.createIndex({"a": 1, "$**": 1}), ErrorCodes.CannotCreateIndex);
+
+// Cannot create an wildcard index with an invalid spec.
+assert.commandFailedWithCode(coll.createIndex({"a.$**.$**": 1}), ErrorCodes.CannotCreateIndex);
+assert.commandFailedWithCode(coll.createIndex({"$**.$**": 1}), ErrorCodes.CannotCreateIndex);
+assert.commandFailedWithCode(coll.createIndex({"$**": "hello"}), ErrorCodes.CannotCreateIndex);
+
+// Cannot create an wildcard index with mixed inclusion exclusion.
+assert.commandFailedWithCode(
+ createIndexHelper({"$**": 1}, {name: kIndexName, wildcardProjection: {a: 1, b: 0}}), 40178);
+// Cannot create an wildcard index with computed fields.
+assert.commandFailedWithCode(
+ createIndexHelper({"$**": 1}, {name: kIndexName, wildcardProjection: {a: 1, b: "string"}}),
+ ErrorCodes.FailedToParse);
+// Cannot create an wildcard index with an empty projection.
+assert.commandFailedWithCode(
+ createIndexHelper({"$**": 1}, {name: kIndexName, wildcardProjection: {}}),
+ ErrorCodes.FailedToParse);
+// Cannot create another index type with "wildcardProjection" projection.
+assert.commandFailedWithCode(
+ createIndexHelper({"a": 1}, {name: kIndexName, wildcardProjection: {a: 1, b: 1}}),
+ ErrorCodes.BadValue);
+// Cannot create a text index with a "wildcardProjection" projection.
+assert.commandFailedWithCode(
+ createIndexHelper({"$**": "text"}, {name: kIndexName, wildcardProjection: {a: 1, b: 1}}),
+ ErrorCodes.BadValue);
+// Cannot create an wildcard index with a non-object "wildcardProjection" projection.
+assert.commandFailedWithCode(
+ createIndexHelper({"a.$**": 1}, {name: kIndexName, wildcardProjection: "string"}),
+ ErrorCodes.TypeMismatch);
+// Cannot exclude an subfield of _id in an inclusion.
+assert.commandFailedWithCode(createIndexHelper({"_id.id": 0, a: 1, b: 1, c: 1}),
+ ErrorCodes.CannotCreateIndex);
+// Cannot include an subfield of _id in an exclusion.
+assert.commandFailedWithCode(createIndexHelper({"_id.id": 1, a: 0, b: 0, c: 0}),
+ ErrorCodes.CannotCreateIndex);
+
+// Cannot specify both a subpath and a projection.
+assert.commandFailedWithCode(
+ createIndexHelper({"a.$**": 1}, {name: kIndexName, wildcardProjection: {a: 1}}),
+ ErrorCodes.FailedToParse);
+assert.commandFailedWithCode(
+ createIndexHelper({"a.$**": 1}, {name: kIndexName, wildcardProjection: {b: 0}}),
+ ErrorCodes.FailedToParse);
})();
diff --git a/jstests/core/write_commands_reject_unknown_fields.js b/jstests/core/write_commands_reject_unknown_fields.js
index d21cf2ed9f3..a7f834280d1 100644
--- a/jstests/core/write_commands_reject_unknown_fields.js
+++ b/jstests/core/write_commands_reject_unknown_fields.js
@@ -3,19 +3,19 @@
// SERVER-23129 Write commands should reject unknown fields. This is run in passthrough tests to
// ensure that both mongos and mongod reject these commands.
(function() {
- 'use strict';
+'use strict';
- var coll = db.write_commands_reject_unknown_fields;
+var coll = db.write_commands_reject_unknown_fields;
- // All commands must reject fields at the top-level.
- assert.commandFailed(coll.runCommand('insert', {documents: [{}], asdf: true}));
- assert.commandFailed(
- coll.runCommand('update', {updates: [{q: {}, u: {$inc: {a: 1}}}], asdf: true}));
- assert.commandFailed(coll.runCommand('delete', {deletes: [{q: {}, limit: 0}], asdf: true}));
+// All commands must reject fields at the top-level.
+assert.commandFailed(coll.runCommand('insert', {documents: [{}], asdf: true}));
+assert.commandFailed(
+ coll.runCommand('update', {updates: [{q: {}, u: {$inc: {a: 1}}}], asdf: true}));
+assert.commandFailed(coll.runCommand('delete', {deletes: [{q: {}, limit: 0}], asdf: true}));
- // The inner objects in update and delete must also reject unknown fields. Insert isn't included
- // because its inner objects are the raw objects to insert and can have any fields.
- assert.commandFailed(
- coll.runCommand('update', {updates: [{q: {}, u: {$inc: {a: 1}}, asdf: true}]}));
- assert.commandFailed(coll.runCommand('delete', {deletes: [{q: {}, limit: 0, asdf: true}]}));
+// The inner objects in update and delete must also reject unknown fields. Insert isn't included
+// because its inner objects are the raw objects to insert and can have any fields.
+assert.commandFailed(
+ coll.runCommand('update', {updates: [{q: {}, u: {$inc: {a: 1}}, asdf: true}]}));
+assert.commandFailed(coll.runCommand('delete', {deletes: [{q: {}, limit: 0, asdf: true}]}));
}());
diff --git a/jstests/core_standalone/read_concern.js b/jstests/core_standalone/read_concern.js
index 1646ac71d11..769189396b1 100644
--- a/jstests/core_standalone/read_concern.js
+++ b/jstests/core_standalone/read_concern.js
@@ -1,48 +1,46 @@
// This test verifies readConcern behavior on a standalone mongod or embedded
// @tags: [requires_majority_read_concern]
(function() {
- 'use strict';
-
- // For isWiredTiger.
- load("jstests/concurrency/fsm_workload_helpers/server_types.js");
-
- var t = db.read_concern;
- t.drop();
-
- assert.commandWorked(t.runCommand({insert: "read_concern", documents: [{x: 1}]}));
-
- // Local readConcern succeed.
- assert.commandWorked(t.runCommand({find: "read_concern", readConcern: {level: "local"}}),
- "expected local readConcern to succeed on standalone mongod");
-
- // Available readConcern succeed.
- assert.commandWorked(t.runCommand({find: "read_concern", readConcern: {level: "available"}}),
- "expected available readConcern to succeed on standalone mongod");
-
- var majority_result = t.runCommand({find: "read_concern", readConcern: {level: "majority"}});
- if (isWiredTiger(db) || (isEphemeral(db) && !isEphemeralForTest(db))) {
- // Majority readConcern succeed.
- assert.commandWorked(majority_result,
- "expected majority readConcern to succeed on standalone mongod");
- } else {
- // Majority readConcern fail.
- assert.commandFailedWithCode(
- majority_result,
- [ErrorCodes.ReadConcernMajorityNotEnabled, ErrorCodes.NotImplemented],
- "expected majority readConcern to fail on standalone mongod");
- }
-
- // Snapshot readConcern fail.
+'use strict';
+
+// For isWiredTiger.
+load("jstests/concurrency/fsm_workload_helpers/server_types.js");
+
+var t = db.read_concern;
+t.drop();
+
+assert.commandWorked(t.runCommand({insert: "read_concern", documents: [{x: 1}]}));
+
+// Local readConcern succeed.
+assert.commandWorked(t.runCommand({find: "read_concern", readConcern: {level: "local"}}),
+ "expected local readConcern to succeed on standalone mongod");
+
+// Available readConcern succeed.
+assert.commandWorked(t.runCommand({find: "read_concern", readConcern: {level: "available"}}),
+ "expected available readConcern to succeed on standalone mongod");
+
+var majority_result = t.runCommand({find: "read_concern", readConcern: {level: "majority"}});
+if (isWiredTiger(db) || (isEphemeral(db) && !isEphemeralForTest(db))) {
+ // Majority readConcern succeed.
+ assert.commandWorked(majority_result,
+ "expected majority readConcern to succeed on standalone mongod");
+} else {
+ // Majority readConcern fail.
assert.commandFailedWithCode(
- t.runCommand({find: "read_concern", readConcern: {level: "snapshot"}}),
- [ErrorCodes.InvalidOptions, ErrorCodes.NotImplemented],
- "expected snapshot readConcern to fail on standalone mongod");
-
- // Standalones don't support any operations with clusterTime.
- assert.commandFailedWithCode(t.runCommand({
- find: "read_concern",
- readConcern: {level: "local", afterClusterTime: Timestamp(0, 1)}
- }),
- [ErrorCodes.IllegalOperation, ErrorCodes.NotImplemented],
- "expected afterClusterTime read to fail on standalone mongod");
+ majority_result,
+ [ErrorCodes.ReadConcernMajorityNotEnabled, ErrorCodes.NotImplemented],
+ "expected majority readConcern to fail on standalone mongod");
+}
+
+// Snapshot readConcern fail.
+assert.commandFailedWithCode(t.runCommand({find: "read_concern", readConcern: {level: "snapshot"}}),
+ [ErrorCodes.InvalidOptions, ErrorCodes.NotImplemented],
+ "expected snapshot readConcern to fail on standalone mongod");
+
+// Standalones don't support any operations with clusterTime.
+assert.commandFailedWithCode(
+ t.runCommand(
+ {find: "read_concern", readConcern: {level: "local", afterClusterTime: Timestamp(0, 1)}}),
+ [ErrorCodes.IllegalOperation, ErrorCodes.NotImplemented],
+ "expected afterClusterTime read to fail on standalone mongod");
})(); \ No newline at end of file
diff --git a/jstests/core_standalone/write_concern.js b/jstests/core_standalone/write_concern.js
index fd4036e6925..c02da6bb6f8 100644
--- a/jstests/core_standalone/write_concern.js
+++ b/jstests/core_standalone/write_concern.js
@@ -1,18 +1,16 @@
// This test verifies writeConcern behavior on a standalone mongod or embedded mongoed.
(function() {
- 'use strict';
+'use strict';
- var col = db.write_concern;
- col.drop();
+var col = db.write_concern;
+col.drop();
- // Supported writeConcern on standalone
- assert.commandWorked(col.insert({_id: 0}, {writeConcern: {w: 0}}));
- assert.commandWorked(col.insert({_id: 1}, {writeConcern: {w: 1}}));
- assert.commandWorked(col.insert({_id: "majority"}, {writeConcern: {w: "majority"}}));
-
- // writeConcern: 2 should not work on standalone
- assert.writeError(col.insert({_id: 2}, {writeConcern: {w: 2}}),
- "expected writeConcern: 2 to fail");
+// Supported writeConcern on standalone
+assert.commandWorked(col.insert({_id: 0}, {writeConcern: {w: 0}}));
+assert.commandWorked(col.insert({_id: 1}, {writeConcern: {w: 1}}));
+assert.commandWorked(col.insert({_id: "majority"}, {writeConcern: {w: "majority"}}));
+// writeConcern: 2 should not work on standalone
+assert.writeError(col.insert({_id: 2}, {writeConcern: {w: 2}}), "expected writeConcern: 2 to fail");
})(); \ No newline at end of file
diff --git a/jstests/decimal/decimal128_test1.js b/jstests/decimal/decimal128_test1.js
index 2febf2bf125..6cf083341e4 100644
--- a/jstests/decimal/decimal128_test1.js
+++ b/jstests/decimal/decimal128_test1.js
@@ -3,184 +3,161 @@
*/
(function() {
- "use strict";
+"use strict";
- var testData = [
- {"description": "Special - Canonical NaN", "input": "NaN"},
- {"description": "Special - Negative NaN", "input": "NaN", "lossy": true},
- {
- "description": "Special - Negative NaN",
- "expected": "NaN",
- "input": "-NaN",
- "lossy": true
- },
- {"description": "Special - Canonical SNaN", "input": "NaN", "lossy": true},
- {"description": "Special - Negative SNaN", "input": "NaN", "lossy": true},
- {"description": "Special - NaN with a payload", "input": "NaN", "lossy": true},
- {"description": "Special - Canonical Positive Infinity", "input": "Infinity"},
- {"description": "Special - Canonical Negative Infinity", "input": "-Infinity"},
- {
- "description": "Special - Invalid representation treated as 0",
- "input": "0",
- "lossy": true
- },
- {
- "description": "Special - Invalid representation treated as -0",
- "input": "-0",
- "lossy": true
- },
- {
- "description": "Special - Invalid representation treated as 0E3",
- "input": "0E+3",
- "lossy": true
- },
- {
- "description": "Regular - Adjusted Exponent Limit",
- "input": "0.000001234567890123456789012345678901234"
- },
- {"description": "Regular - Smallest", "input": "0.001234"},
- {"description": "Regular - Smallest with Trailing Zeros", "input": "0.00123400000"},
- {"description": "Regular - 0.1", "input": "0.1"},
- {
- "description": "Regular - 0.1234567890123456789012345678901234",
- "input": "0.1234567890123456789012345678901234"
- },
- {"description": "Regular - 0", "input": "0"},
- {"description": "Regular - -0", "input": "-0"},
- {"description": "Regular - -0.0", "input": "-0.0"},
- {"description": "Regular - 2", "input": "2"},
- {"description": "Regular - 2.000", "input": "2.000"},
- {"description": "Regular - Largest", "input": "1234567890123456789012345678901234"},
- {
- "description": "Scientific - Tiniest",
- "input": "9.999999999999999999999999999999999E-6143"
- },
- {"description": "Scientific - Tiny", "input": "1E-6176"},
- {"description": "Scientific - Negative Tiny", "input": "-1E-6176"},
- {
- "description": "Scientific - Adjusted Exponent Limit",
- "input": "1.234567890123456789012345678901234E-7"
- },
- {"description": "Scientific - Fractional", "input": "-1.00E-8"},
- {"description": "Scientific - 0 with Exponent", "input": "0E+6000"},
- {"description": "Scientific - 0 with Negative Exponent", "input": "0E-611"},
- {"description": "Scientific - No Decimal with Signed Exponent", "input": "1E+3"},
- {"description": "Scientific - Trailing Zero", "input": "1.050E+4"},
- {"description": "Scientific - With Decimal", "input": "1.05E+3"},
- {"description": "Scientific - Full", "input": "5192296858534827628530496329220095"},
- {"description": "Scientific - Large", "input": "1.000000000000000000000000000000000E+6144"},
- {
- "description": "Scientific - Largest",
- "input": "9.999999999999999999999999999999999E+6144"
- },
- {
- "description": "Non-Canonical Parsing - Exponent Normalization",
- "input": "-100E-10",
- "expected": "-1.00E-8"
- },
- {
- "description": "Non-Canonical Parsing - Unsigned Positive Exponent",
- "input": "1E3",
- "expected": "1E+3"
- },
- {
- "description": "Non-Canonical Parsing - Lowercase Exponent Identifier",
- "input": "1e+3",
- "expected": "1E+3"
- },
- {
- "description": "Non-Canonical Parsing - Long Significand with Exponent",
- "input": "12345689012345789012345E+12",
- "expected": "1.2345689012345789012345E+34"
- },
- {
- "description": "Non-Canonical Parsing - Positive Sign",
- "input": "+1234567890123456789012345678901234",
- "expected": "1234567890123456789012345678901234"
- },
- {
- "description": "Non-Canonical Parsing - Long Decimal String",
- "input":
- ".0000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "0000000000000000000000000000000000000001",
- "expected": "1E-999"
- },
- {"description": "Non-Canonical Parsing - nan", "input": "nan", "expected": "NaN"},
- {"description": "Non-Canonical Parsing - nAn", "input": "nAn", "expected": "NaN"},
- {
- "description": "Non-Canonical Parsing - +infinity",
- "input": "+infinity",
- "expected": "Infinity"
- },
- {
- "description": "Non-Canonical Parsing - infinity",
- "input": "infinity",
- "expected": "Infinity"
- },
- {
- "description": "Non-Canonical Parsing - infiniTY",
- "input": "infiniTY",
- "expected": "Infinity"
- },
- {"description": "Non-Canonical Parsing - inf", "input": "inf", "expected": "Infinity"},
- {"description": "Non-Canonical Parsing - inF", "input": "inF", "expected": "Infinity"},
- {
- "description": "Non-Canonical Parsing - -infinity",
- "input": "-infinity",
- "expected": "-Infinity"
- },
- {
- "description": "Non-Canonical Parsing - -infiniTy",
- "input": "-infiniTy",
- "expected": "-Infinity"
- },
- {
- "description": "Non-Canonical Parsing - -Inf",
- "input": "-Infinity",
- "expected": "-Infinity"
- },
- {"description": "Non-Canonical Parsing - -inf", "input": "-inf", "expected": "-Infinity"},
- {"description": "Non-Canonical Parsing - -inF", "input": "-inF", "expected": "-Infinity"},
- {"description": "Rounded Subnormal number", "input": "10E-6177", "expected": "1E-6176"},
- {"description": "Clamped", "input": "1E6112", "expected": "1.0E+6112"},
- {
- "description": "Exact rounding",
- "input":
- "100000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
- "0000000000000000000000000000",
- "expected": "1.000000000000000000000000000000000E+999"
- }
- ];
+var testData = [
+ {"description": "Special - Canonical NaN", "input": "NaN"},
+ {"description": "Special - Negative NaN", "input": "NaN", "lossy": true},
+ {"description": "Special - Negative NaN", "expected": "NaN", "input": "-NaN", "lossy": true},
+ {"description": "Special - Canonical SNaN", "input": "NaN", "lossy": true},
+ {"description": "Special - Negative SNaN", "input": "NaN", "lossy": true},
+ {"description": "Special - NaN with a payload", "input": "NaN", "lossy": true},
+ {"description": "Special - Canonical Positive Infinity", "input": "Infinity"},
+ {"description": "Special - Canonical Negative Infinity", "input": "-Infinity"},
+ {"description": "Special - Invalid representation treated as 0", "input": "0", "lossy": true},
+ {"description": "Special - Invalid representation treated as -0", "input": "-0", "lossy": true},
+ {
+ "description": "Special - Invalid representation treated as 0E3",
+ "input": "0E+3",
+ "lossy": true
+ },
+ {
+ "description": "Regular - Adjusted Exponent Limit",
+ "input": "0.000001234567890123456789012345678901234"
+ },
+ {"description": "Regular - Smallest", "input": "0.001234"},
+ {"description": "Regular - Smallest with Trailing Zeros", "input": "0.00123400000"},
+ {"description": "Regular - 0.1", "input": "0.1"},
+ {
+ "description": "Regular - 0.1234567890123456789012345678901234",
+ "input": "0.1234567890123456789012345678901234"
+ },
+ {"description": "Regular - 0", "input": "0"},
+ {"description": "Regular - -0", "input": "-0"},
+ {"description": "Regular - -0.0", "input": "-0.0"},
+ {"description": "Regular - 2", "input": "2"},
+ {"description": "Regular - 2.000", "input": "2.000"},
+ {"description": "Regular - Largest", "input": "1234567890123456789012345678901234"},
+ {"description": "Scientific - Tiniest", "input": "9.999999999999999999999999999999999E-6143"},
+ {"description": "Scientific - Tiny", "input": "1E-6176"},
+ {"description": "Scientific - Negative Tiny", "input": "-1E-6176"},
+ {
+ "description": "Scientific - Adjusted Exponent Limit",
+ "input": "1.234567890123456789012345678901234E-7"
+ },
+ {"description": "Scientific - Fractional", "input": "-1.00E-8"},
+ {"description": "Scientific - 0 with Exponent", "input": "0E+6000"},
+ {"description": "Scientific - 0 with Negative Exponent", "input": "0E-611"},
+ {"description": "Scientific - No Decimal with Signed Exponent", "input": "1E+3"},
+ {"description": "Scientific - Trailing Zero", "input": "1.050E+4"},
+ {"description": "Scientific - With Decimal", "input": "1.05E+3"},
+ {"description": "Scientific - Full", "input": "5192296858534827628530496329220095"},
+ {"description": "Scientific - Large", "input": "1.000000000000000000000000000000000E+6144"},
+ {"description": "Scientific - Largest", "input": "9.999999999999999999999999999999999E+6144"},
+ {
+ "description": "Non-Canonical Parsing - Exponent Normalization",
+ "input": "-100E-10",
+ "expected": "-1.00E-8"
+ },
+ {
+ "description": "Non-Canonical Parsing - Unsigned Positive Exponent",
+ "input": "1E3",
+ "expected": "1E+3"
+ },
+ {
+ "description": "Non-Canonical Parsing - Lowercase Exponent Identifier",
+ "input": "1e+3",
+ "expected": "1E+3"
+ },
+ {
+ "description": "Non-Canonical Parsing - Long Significand with Exponent",
+ "input": "12345689012345789012345E+12",
+ "expected": "1.2345689012345789012345E+34"
+ },
+ {
+ "description": "Non-Canonical Parsing - Positive Sign",
+ "input": "+1234567890123456789012345678901234",
+ "expected": "1234567890123456789012345678901234"
+ },
+ {
+ "description": "Non-Canonical Parsing - Long Decimal String",
+ "input":
+ ".0000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "00000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "0000000000000000000000000000000000000001",
+ "expected": "1E-999"
+ },
+ {"description": "Non-Canonical Parsing - nan", "input": "nan", "expected": "NaN"},
+ {"description": "Non-Canonical Parsing - nAn", "input": "nAn", "expected": "NaN"},
+ {
+ "description": "Non-Canonical Parsing - +infinity",
+ "input": "+infinity",
+ "expected": "Infinity"
+ },
+ {
+ "description": "Non-Canonical Parsing - infinity",
+ "input": "infinity",
+ "expected": "Infinity"
+ },
+ {
+ "description": "Non-Canonical Parsing - infiniTY",
+ "input": "infiniTY",
+ "expected": "Infinity"
+ },
+ {"description": "Non-Canonical Parsing - inf", "input": "inf", "expected": "Infinity"},
+ {"description": "Non-Canonical Parsing - inF", "input": "inF", "expected": "Infinity"},
+ {
+ "description": "Non-Canonical Parsing - -infinity",
+ "input": "-infinity",
+ "expected": "-Infinity"
+ },
+ {
+ "description": "Non-Canonical Parsing - -infiniTy",
+ "input": "-infiniTy",
+ "expected": "-Infinity"
+ },
+ {"description": "Non-Canonical Parsing - -Inf", "input": "-Infinity", "expected": "-Infinity"},
+ {"description": "Non-Canonical Parsing - -inf", "input": "-inf", "expected": "-Infinity"},
+ {"description": "Non-Canonical Parsing - -inF", "input": "-inF", "expected": "-Infinity"},
+ {"description": "Rounded Subnormal number", "input": "10E-6177", "expected": "1E-6176"},
+ {"description": "Clamped", "input": "1E6112", "expected": "1.0E+6112"},
+ {
+ "description": "Exact rounding",
+ "input":
+ "100000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
+ "0000000000000000000000000000",
+ "expected": "1.000000000000000000000000000000000E+999"
+ }
+];
- testData.forEach(function(testCase) {
- print(`Test - ${testCase.description}`);
- var output = NumberDecimal(testCase.input).toString();
- if (testCase.expected) {
- assert.eq(output, `NumberDecimal("${testCase.expected}")`);
- } else {
- assert.eq(output, `NumberDecimal("${testCase.input}")`);
- }
- });
+testData.forEach(function(testCase) {
+ print(`Test - ${testCase.description}`);
+ var output = NumberDecimal(testCase.input).toString();
+ if (testCase.expected) {
+ assert.eq(output, `NumberDecimal("${testCase.expected}")`);
+ } else {
+ assert.eq(output, `NumberDecimal("${testCase.input}")`);
+ }
+});
}()); \ No newline at end of file
diff --git a/jstests/decimal/decimal128_test2.js b/jstests/decimal/decimal128_test2.js
index cb1ad11c0f8..ffbc3704225 100644
--- a/jstests/decimal/decimal128_test2.js
+++ b/jstests/decimal/decimal128_test2.js
@@ -3,317 +3,302 @@
*/
(function() {
- var data = [
- {"description": "[decq021] Normality", "input": "-1234567890123456789012345678901234"},
- {
- "description": "[decq823] values around [u]int32 edges (zeros done earlier)",
- "input": "-2147483649"
- },
- {
- "description": "[decq822] values around [u]int32 edges (zeros done earlier)",
- "input": "-2147483648"
- },
- {
- "description": "[decq821] values around [u]int32 edges (zeros done earlier)",
- "input": "-2147483647"
- },
- {
- "description": "[decq820] values around [u]int32 edges (zeros done earlier)",
- "input": "-2147483646"
- },
- {"description": "[decq152] fold-downs (more below)", "input": "-12345"},
- {"description": "[decq154] fold-downs (more below)", "input": "-1234"},
- {"description": "[decq006] derivative canonical plain strings", "input": "-750"},
- {"description": "[decq164] fold-downs (more below)", "input": "-123.45"},
- {"description": "[decq156] fold-downs (more below)", "input": "-123"},
- {"description": "[decq008] derivative canonical plain strings", "input": "-75.0"},
- {"description": "[decq158] fold-downs (more below)", "input": "-12"},
- {
- "description": "[decq122] Nmax and similar",
- "input": "-9.999999999999999999999999999999999E+6144"
- },
- {
- "description": "[decq002] (mostly derived from the Strawman 4 document and examples)",
- "input": "-7.50"
- },
- {"description": "[decq004] derivative canonical plain strings", "input": "-7.50E+3"},
- {"description": "[decq018] derivative canonical plain strings", "input": "-7.50E-7"},
- {
- "description": "[decq125] Nmax and similar",
- "input": "-1.234567890123456789012345678901234E+6144"
- },
- {
- "description": "[decq131] fold-downs (more below)",
- "input": "-1.230000000000000000000000000000000E+6144"
- },
- {"description": "[decq162] fold-downs (more below)", "input": "-1.23"},
- {
- "description": "[decq176] Nmin and below",
- "input": "-1.000000000000000000000000000000001E-6143"
- },
- {
- "description": "[decq174] Nmin and below",
- "input": "-1.000000000000000000000000000000000E-6143"
- },
- {
- "description": "[decq133] fold-downs (more below)",
- "input": "-1.000000000000000000000000000000000E+6144"
- },
- {"description": "[decq160] fold-downs (more below)", "input": "-1"},
- {"description": "[decq172] Nmin and below", "input": "-1E-6143"},
- {"description": "[decq010] derivative canonical plain strings", "input": "-0.750"},
- {"description": "[decq012] derivative canonical plain strings", "input": "-0.0750"},
- {"description": "[decq014] derivative canonical plain strings", "input": "-0.000750"},
- {"description": "[decq016] derivative canonical plain strings", "input": "-0.00000750"},
- {"description": "[decq404] zeros", "input": "0E-6176"},
- {"description": "[decq424] negative zeros", "input": "-0E-6176"},
- {"description": "[decq407] zeros", "input": "0.00"},
- {"description": "[decq427] negative zeros", "input": "-0.00"},
- {"description": "[decq409] zeros", "input": "0"},
- {"description": "[decq428] negative zeros", "input": "-0"},
- {"description": "[decq700] Selected DPD codes", "input": "0"},
- {"description": "[decq406] zeros", "input": "0.00"},
- {"description": "[decq426] negative zeros", "input": "-0.00"},
- {"description": "[decq410] zeros", "input": "0E+3"},
- {"description": "[decq431] negative zeros", "input": "-0E+3"},
- {"description": "[decq419] clamped zeros...", "input": "0E+6111"},
- {"description": "[decq432] negative zeros", "input": "-0E+6111"},
- {"description": "[decq405] zeros", "input": "0E-6176"},
- {"description": "[decq425] negative zeros", "input": "-0E-6176"},
- {"description": "[decq508] Specials", "input": "Infinity"},
- {"description": "[decq528] Specials", "input": "-Infinity"},
- {"description": "[decq541] Specials", "input": "NaN"},
- {
- "description": "[decq074] Nmin and below",
- "input": "1.000000000000000000000000000000000E-6143"
- },
- {
- "description": "[decq602] fold-down full sequence",
- "input": "1.000000000000000000000000000000000E+6144"
- },
- {
- "description": "[decq604] fold-down full sequence",
- "input": "1.00000000000000000000000000000000E+6143"
- },
- {
- "description": "[decq606] fold-down full sequence",
- "input": "1.0000000000000000000000000000000E+6142"
- },
- {
- "description": "[decq608] fold-down full sequence",
- "input": "1.000000000000000000000000000000E+6141"
- },
- {
- "description": "[decq610] fold-down full sequence",
- "input": "1.00000000000000000000000000000E+6140"
- },
- {
- "description": "[decq612] fold-down full sequence",
- "input": "1.0000000000000000000000000000E+6139"
- },
- {
- "description": "[decq614] fold-down full sequence",
- "input": "1.000000000000000000000000000E+6138"
- },
- {
- "description": "[decq616] fold-down full sequence",
- "input": "1.00000000000000000000000000E+6137"
- },
- {
- "description": "[decq618] fold-down full sequence",
- "input": "1.0000000000000000000000000E+6136"
- },
- {
- "description": "[decq620] fold-down full sequence",
- "input": "1.000000000000000000000000E+6135"
- },
- {
- "description": "[decq622] fold-down full sequence",
- "input": "1.00000000000000000000000E+6134"
- },
- {
- "description": "[decq624] fold-down full sequence",
- "input": "1.0000000000000000000000E+6133"
- },
- {
- "description": "[decq626] fold-down full sequence",
- "input": "1.000000000000000000000E+6132"
- },
- {
- "description": "[decq628] fold-down full sequence",
- "input": "1.00000000000000000000E+6131"
- },
- {
- "description": "[decq630] fold-down full sequence",
- "input": "1.0000000000000000000E+6130"
- },
- {"description": "[decq632] fold-down full sequence", "input": "1.000000000000000000E+6129"},
- {"description": "[decq634] fold-down full sequence", "input": "1.00000000000000000E+6128"},
- {"description": "[decq636] fold-down full sequence", "input": "1.0000000000000000E+6127"},
- {"description": "[decq638] fold-down full sequence", "input": "1.000000000000000E+6126"},
- {"description": "[decq640] fold-down full sequence", "input": "1.00000000000000E+6125"},
- {"description": "[decq642] fold-down full sequence", "input": "1.0000000000000E+6124"},
- {"description": "[decq644] fold-down full sequence", "input": "1.000000000000E+6123"},
- {"description": "[decq646] fold-down full sequence", "input": "1.00000000000E+6122"},
- {"description": "[decq648] fold-down full sequence", "input": "1.0000000000E+6121"},
- {"description": "[decq650] fold-down full sequence", "input": "1.000000000E+6120"},
- {"description": "[decq652] fold-down full sequence", "input": "1.00000000E+6119"},
- {"description": "[decq654] fold-down full sequence", "input": "1.0000000E+6118"},
- {"description": "[decq656] fold-down full sequence", "input": "1.000000E+6117"},
- {"description": "[decq658] fold-down full sequence", "input": "1.00000E+6116"},
- {"description": "[decq660] fold-down full sequence", "input": "1.0000E+6115"},
- {"description": "[decq662] fold-down full sequence", "input": "1.000E+6114"},
- {"description": "[decq664] fold-down full sequence", "input": "1.00E+6113"},
- {"description": "[decq666] fold-down full sequence", "input": "1.0E+6112"},
- {"description": "[decq060] fold-downs (more below)", "input": "1"},
- {"description": "[decq670] fold-down full sequence", "input": "1E+6110"},
- {"description": "[decq668] fold-down full sequence", "input": "1E+6111"},
- {"description": "[decq072] Nmin and below", "input": "1E-6143"},
- {
- "description": "[decq076] Nmin and below",
- "input": "1.000000000000000000000000000000001E-6143"
- },
- {
- "description": "[decq036] fold-downs (more below)",
- "input": "1.230000000000000000000000000000000E+6144"
- },
- {"description": "[decq062] fold-downs (more below)", "input": "1.23"},
- {
- "description": "[decq034] Nmax and similar",
- "input": "1.234567890123456789012345678901234E+6144"
- },
- {"description": "[decq441] exponent lengths", "input": "7"},
- {"description": "[decq449] exponent lengths", "input": "7E+5999"},
- {"description": "[decq447] exponent lengths", "input": "7E+999"},
- {"description": "[decq445] exponent lengths", "input": "7E+99"},
- {"description": "[decq443] exponent lengths", "input": "7E+9"},
- {
- "description": "[decq842] VG testcase",
- "input": "7.049000000000010795488000000000000E-3097"
- },
- {"description": "[decq841] VG testcase", "input": "8.000000000000000000E-1550"},
- {"description": "[decq840] VG testcase", "input": "8.81125000000001349436E-1548"},
- {"description": "[decq701] Selected DPD codes", "input": "9"},
- {
- "description": "[decq032] Nmax and similar",
- "input": "9.999999999999999999999999999999999E+6144"
- },
- {"description": "[decq702] Selected DPD codes", "input": "10"},
- {"description": "[decq057] fold-downs (more below)", "input": "12"},
- {"description": "[decq703] Selected DPD codes", "input": "19"},
- {"description": "[decq704] Selected DPD codes", "input": "20"},
- {"description": "[decq705] Selected DPD codes", "input": "29"},
- {"description": "[decq706] Selected DPD codes", "input": "30"},
- {"description": "[decq707] Selected DPD codes", "input": "39"},
- {"description": "[decq708] Selected DPD codes", "input": "40"},
- {"description": "[decq709] Selected DPD codes", "input": "49"},
- {"description": "[decq710] Selected DPD codes", "input": "50"},
- {"description": "[decq711] Selected DPD codes", "input": "59"},
- {"description": "[decq712] Selected DPD codes", "input": "60"},
- {"description": "[decq713] Selected DPD codes", "input": "69"},
- {"description": "[decq714] Selected DPD codes", "input": "70"},
- {"description": "[decq715] Selected DPD codes", "input": "71"},
- {"description": "[decq716] Selected DPD codes", "input": "72"},
- {"description": "[decq717] Selected DPD codes", "input": "73"},
- {"description": "[decq718] Selected DPD codes", "input": "74"},
- {"description": "[decq719] Selected DPD codes", "input": "75"},
- {"description": "[decq720] Selected DPD codes", "input": "76"},
- {"description": "[decq721] Selected DPD codes", "input": "77"},
- {"description": "[decq722] Selected DPD codes", "input": "78"},
- {"description": "[decq723] Selected DPD codes", "input": "79"},
- {"description": "[decq056] fold-downs (more below)", "input": "123"},
- {"description": "[decq064] fold-downs (more below)", "input": "123.45"},
- {"description": "[decq732] Selected DPD codes", "input": "520"},
- {"description": "[decq733] Selected DPD codes", "input": "521"},
- {"description": "[decq740] DPD: one of each of the huffman groups", "input": "777"},
- {"description": "[decq741] DPD: one of each of the huffman groups", "input": "778"},
- {"description": "[decq742] DPD: one of each of the huffman groups", "input": "787"},
- {"description": "[decq746] DPD: one of each of the huffman groups", "input": "799"},
- {"description": "[decq743] DPD: one of each of the huffman groups", "input": "877"},
- {
- "description": "[decq753] DPD all-highs cases (includes the 24 redundant codes)",
- "input": "888"
- },
- {
- "description": "[decq754] DPD all-highs cases (includes the 24 redundant codes)",
- "input": "889"
- },
- {
- "description": "[decq760] DPD all-highs cases (includes the 24 redundant codes)",
- "input": "898"
- },
- {
- "description": "[decq764] DPD all-highs cases (includes the 24 redundant codes)",
- "input": "899"
- },
- {"description": "[decq745] DPD: one of each of the huffman groups", "input": "979"},
- {
- "description": "[decq770] DPD all-highs cases (includes the 24 redundant codes)",
- "input": "988"
- },
- {
- "description": "[decq774] DPD all-highs cases (includes the 24 redundant codes)",
- "input": "989"
- },
- {"description": "[decq730] Selected DPD codes", "input": "994"},
- {"description": "[decq731] Selected DPD codes", "input": "995"},
- {"description": "[decq744] DPD: one of each of the huffman groups", "input": "997"},
- {
- "description": "[decq780] DPD all-highs cases (includes the 24 redundant codes)",
- "input": "998"
- },
- {
- "description": "[decq787] DPD all-highs cases (includes the 24 redundant codes)",
- "input": "999"
- },
- {"description": "[decq053] fold-downs (more below)", "input": "1234"},
- {"description": "[decq052] fold-downs (more below)", "input": "12345"},
- {"description": "[decq792] Miscellaneous (testers' queries, etc.)", "input": "30000"},
- {"description": "[decq793] Miscellaneous (testers' queries, etc.)", "input": "890000"},
- {
- "description": "[decq824] values around [u]int32 edges (zeros done earlier)",
- "input": "2147483646"
- },
- {
- "description": "[decq825] values around [u]int32 edges (zeros done earlier)",
- "input": "2147483647"
- },
- {
- "description": "[decq826] values around [u]int32 edges (zeros done earlier)",
- "input": "2147483648"
- },
- {
- "description": "[decq827] values around [u]int32 edges (zeros done earlier)",
- "input": "2147483649"
- },
- {
- "description": "[decq828] values around [u]int32 edges (zeros done earlier)",
- "input": "4294967294"
- },
- {
- "description": "[decq829] values around [u]int32 edges (zeros done earlier)",
- "input": "4294967295"
- },
- {
- "description": "[decq830] values around [u]int32 edges (zeros done earlier)",
- "input": "4294967296"
- },
- {
- "description": "[decq831] values around [u]int32 edges (zeros done earlier)",
- "input": "4294967297"
- },
- {"description": "[decq022] Normality", "input": "1111111111111111111111111111111111"},
- {"description": "[decq020] Normality", "input": "1234567890123456789012345678901234"},
- {"description": "[decq550] Specials", "input": "9999999999999999999999999999999999"}
- ];
+var data = [
+ {"description": "[decq021] Normality", "input": "-1234567890123456789012345678901234"},
+ {
+ "description": "[decq823] values around [u]int32 edges (zeros done earlier)",
+ "input": "-2147483649"
+ },
+ {
+ "description": "[decq822] values around [u]int32 edges (zeros done earlier)",
+ "input": "-2147483648"
+ },
+ {
+ "description": "[decq821] values around [u]int32 edges (zeros done earlier)",
+ "input": "-2147483647"
+ },
+ {
+ "description": "[decq820] values around [u]int32 edges (zeros done earlier)",
+ "input": "-2147483646"
+ },
+ {"description": "[decq152] fold-downs (more below)", "input": "-12345"},
+ {"description": "[decq154] fold-downs (more below)", "input": "-1234"},
+ {"description": "[decq006] derivative canonical plain strings", "input": "-750"},
+ {"description": "[decq164] fold-downs (more below)", "input": "-123.45"},
+ {"description": "[decq156] fold-downs (more below)", "input": "-123"},
+ {"description": "[decq008] derivative canonical plain strings", "input": "-75.0"},
+ {"description": "[decq158] fold-downs (more below)", "input": "-12"},
+ {
+ "description": "[decq122] Nmax and similar",
+ "input": "-9.999999999999999999999999999999999E+6144"
+ },
+ {
+ "description": "[decq002] (mostly derived from the Strawman 4 document and examples)",
+ "input": "-7.50"
+ },
+ {"description": "[decq004] derivative canonical plain strings", "input": "-7.50E+3"},
+ {"description": "[decq018] derivative canonical plain strings", "input": "-7.50E-7"},
+ {
+ "description": "[decq125] Nmax and similar",
+ "input": "-1.234567890123456789012345678901234E+6144"
+ },
+ {
+ "description": "[decq131] fold-downs (more below)",
+ "input": "-1.230000000000000000000000000000000E+6144"
+ },
+ {"description": "[decq162] fold-downs (more below)", "input": "-1.23"},
+ {
+ "description": "[decq176] Nmin and below",
+ "input": "-1.000000000000000000000000000000001E-6143"
+ },
+ {
+ "description": "[decq174] Nmin and below",
+ "input": "-1.000000000000000000000000000000000E-6143"
+ },
+ {
+ "description": "[decq133] fold-downs (more below)",
+ "input": "-1.000000000000000000000000000000000E+6144"
+ },
+ {"description": "[decq160] fold-downs (more below)", "input": "-1"},
+ {"description": "[decq172] Nmin and below", "input": "-1E-6143"},
+ {"description": "[decq010] derivative canonical plain strings", "input": "-0.750"},
+ {"description": "[decq012] derivative canonical plain strings", "input": "-0.0750"},
+ {"description": "[decq014] derivative canonical plain strings", "input": "-0.000750"},
+ {"description": "[decq016] derivative canonical plain strings", "input": "-0.00000750"},
+ {"description": "[decq404] zeros", "input": "0E-6176"},
+ {"description": "[decq424] negative zeros", "input": "-0E-6176"},
+ {"description": "[decq407] zeros", "input": "0.00"},
+ {"description": "[decq427] negative zeros", "input": "-0.00"},
+ {"description": "[decq409] zeros", "input": "0"},
+ {"description": "[decq428] negative zeros", "input": "-0"},
+ {"description": "[decq700] Selected DPD codes", "input": "0"},
+ {"description": "[decq406] zeros", "input": "0.00"},
+ {"description": "[decq426] negative zeros", "input": "-0.00"},
+ {"description": "[decq410] zeros", "input": "0E+3"},
+ {"description": "[decq431] negative zeros", "input": "-0E+3"},
+ {"description": "[decq419] clamped zeros...", "input": "0E+6111"},
+ {"description": "[decq432] negative zeros", "input": "-0E+6111"},
+ {"description": "[decq405] zeros", "input": "0E-6176"},
+ {"description": "[decq425] negative zeros", "input": "-0E-6176"},
+ {"description": "[decq508] Specials", "input": "Infinity"},
+ {"description": "[decq528] Specials", "input": "-Infinity"},
+ {"description": "[decq541] Specials", "input": "NaN"},
+ {
+ "description": "[decq074] Nmin and below",
+ "input": "1.000000000000000000000000000000000E-6143"
+ },
+ {
+ "description": "[decq602] fold-down full sequence",
+ "input": "1.000000000000000000000000000000000E+6144"
+ },
+ {
+ "description": "[decq604] fold-down full sequence",
+ "input": "1.00000000000000000000000000000000E+6143"
+ },
+ {
+ "description": "[decq606] fold-down full sequence",
+ "input": "1.0000000000000000000000000000000E+6142"
+ },
+ {
+ "description": "[decq608] fold-down full sequence",
+ "input": "1.000000000000000000000000000000E+6141"
+ },
+ {
+ "description": "[decq610] fold-down full sequence",
+ "input": "1.00000000000000000000000000000E+6140"
+ },
+ {
+ "description": "[decq612] fold-down full sequence",
+ "input": "1.0000000000000000000000000000E+6139"
+ },
+ {
+ "description": "[decq614] fold-down full sequence",
+ "input": "1.000000000000000000000000000E+6138"
+ },
+ {
+ "description": "[decq616] fold-down full sequence",
+ "input": "1.00000000000000000000000000E+6137"
+ },
+ {
+ "description": "[decq618] fold-down full sequence",
+ "input": "1.0000000000000000000000000E+6136"
+ },
+ {
+ "description": "[decq620] fold-down full sequence",
+ "input": "1.000000000000000000000000E+6135"
+ },
+ {
+ "description": "[decq622] fold-down full sequence",
+ "input": "1.00000000000000000000000E+6134"
+ },
+ {"description": "[decq624] fold-down full sequence", "input": "1.0000000000000000000000E+6133"},
+ {"description": "[decq626] fold-down full sequence", "input": "1.000000000000000000000E+6132"},
+ {"description": "[decq628] fold-down full sequence", "input": "1.00000000000000000000E+6131"},
+ {"description": "[decq630] fold-down full sequence", "input": "1.0000000000000000000E+6130"},
+ {"description": "[decq632] fold-down full sequence", "input": "1.000000000000000000E+6129"},
+ {"description": "[decq634] fold-down full sequence", "input": "1.00000000000000000E+6128"},
+ {"description": "[decq636] fold-down full sequence", "input": "1.0000000000000000E+6127"},
+ {"description": "[decq638] fold-down full sequence", "input": "1.000000000000000E+6126"},
+ {"description": "[decq640] fold-down full sequence", "input": "1.00000000000000E+6125"},
+ {"description": "[decq642] fold-down full sequence", "input": "1.0000000000000E+6124"},
+ {"description": "[decq644] fold-down full sequence", "input": "1.000000000000E+6123"},
+ {"description": "[decq646] fold-down full sequence", "input": "1.00000000000E+6122"},
+ {"description": "[decq648] fold-down full sequence", "input": "1.0000000000E+6121"},
+ {"description": "[decq650] fold-down full sequence", "input": "1.000000000E+6120"},
+ {"description": "[decq652] fold-down full sequence", "input": "1.00000000E+6119"},
+ {"description": "[decq654] fold-down full sequence", "input": "1.0000000E+6118"},
+ {"description": "[decq656] fold-down full sequence", "input": "1.000000E+6117"},
+ {"description": "[decq658] fold-down full sequence", "input": "1.00000E+6116"},
+ {"description": "[decq660] fold-down full sequence", "input": "1.0000E+6115"},
+ {"description": "[decq662] fold-down full sequence", "input": "1.000E+6114"},
+ {"description": "[decq664] fold-down full sequence", "input": "1.00E+6113"},
+ {"description": "[decq666] fold-down full sequence", "input": "1.0E+6112"},
+ {"description": "[decq060] fold-downs (more below)", "input": "1"},
+ {"description": "[decq670] fold-down full sequence", "input": "1E+6110"},
+ {"description": "[decq668] fold-down full sequence", "input": "1E+6111"},
+ {"description": "[decq072] Nmin and below", "input": "1E-6143"},
+ {
+ "description": "[decq076] Nmin and below",
+ "input": "1.000000000000000000000000000000001E-6143"
+ },
+ {
+ "description": "[decq036] fold-downs (more below)",
+ "input": "1.230000000000000000000000000000000E+6144"
+ },
+ {"description": "[decq062] fold-downs (more below)", "input": "1.23"},
+ {
+ "description": "[decq034] Nmax and similar",
+ "input": "1.234567890123456789012345678901234E+6144"
+ },
+ {"description": "[decq441] exponent lengths", "input": "7"},
+ {"description": "[decq449] exponent lengths", "input": "7E+5999"},
+ {"description": "[decq447] exponent lengths", "input": "7E+999"},
+ {"description": "[decq445] exponent lengths", "input": "7E+99"},
+ {"description": "[decq443] exponent lengths", "input": "7E+9"},
+ {"description": "[decq842] VG testcase", "input": "7.049000000000010795488000000000000E-3097"},
+ {"description": "[decq841] VG testcase", "input": "8.000000000000000000E-1550"},
+ {"description": "[decq840] VG testcase", "input": "8.81125000000001349436E-1548"},
+ {"description": "[decq701] Selected DPD codes", "input": "9"},
+ {
+ "description": "[decq032] Nmax and similar",
+ "input": "9.999999999999999999999999999999999E+6144"
+ },
+ {"description": "[decq702] Selected DPD codes", "input": "10"},
+ {"description": "[decq057] fold-downs (more below)", "input": "12"},
+ {"description": "[decq703] Selected DPD codes", "input": "19"},
+ {"description": "[decq704] Selected DPD codes", "input": "20"},
+ {"description": "[decq705] Selected DPD codes", "input": "29"},
+ {"description": "[decq706] Selected DPD codes", "input": "30"},
+ {"description": "[decq707] Selected DPD codes", "input": "39"},
+ {"description": "[decq708] Selected DPD codes", "input": "40"},
+ {"description": "[decq709] Selected DPD codes", "input": "49"},
+ {"description": "[decq710] Selected DPD codes", "input": "50"},
+ {"description": "[decq711] Selected DPD codes", "input": "59"},
+ {"description": "[decq712] Selected DPD codes", "input": "60"},
+ {"description": "[decq713] Selected DPD codes", "input": "69"},
+ {"description": "[decq714] Selected DPD codes", "input": "70"},
+ {"description": "[decq715] Selected DPD codes", "input": "71"},
+ {"description": "[decq716] Selected DPD codes", "input": "72"},
+ {"description": "[decq717] Selected DPD codes", "input": "73"},
+ {"description": "[decq718] Selected DPD codes", "input": "74"},
+ {"description": "[decq719] Selected DPD codes", "input": "75"},
+ {"description": "[decq720] Selected DPD codes", "input": "76"},
+ {"description": "[decq721] Selected DPD codes", "input": "77"},
+ {"description": "[decq722] Selected DPD codes", "input": "78"},
+ {"description": "[decq723] Selected DPD codes", "input": "79"},
+ {"description": "[decq056] fold-downs (more below)", "input": "123"},
+ {"description": "[decq064] fold-downs (more below)", "input": "123.45"},
+ {"description": "[decq732] Selected DPD codes", "input": "520"},
+ {"description": "[decq733] Selected DPD codes", "input": "521"},
+ {"description": "[decq740] DPD: one of each of the huffman groups", "input": "777"},
+ {"description": "[decq741] DPD: one of each of the huffman groups", "input": "778"},
+ {"description": "[decq742] DPD: one of each of the huffman groups", "input": "787"},
+ {"description": "[decq746] DPD: one of each of the huffman groups", "input": "799"},
+ {"description": "[decq743] DPD: one of each of the huffman groups", "input": "877"},
+ {
+ "description": "[decq753] DPD all-highs cases (includes the 24 redundant codes)",
+ "input": "888"
+ },
+ {
+ "description": "[decq754] DPD all-highs cases (includes the 24 redundant codes)",
+ "input": "889"
+ },
+ {
+ "description": "[decq760] DPD all-highs cases (includes the 24 redundant codes)",
+ "input": "898"
+ },
+ {
+ "description": "[decq764] DPD all-highs cases (includes the 24 redundant codes)",
+ "input": "899"
+ },
+ {"description": "[decq745] DPD: one of each of the huffman groups", "input": "979"},
+ {
+ "description": "[decq770] DPD all-highs cases (includes the 24 redundant codes)",
+ "input": "988"
+ },
+ {
+ "description": "[decq774] DPD all-highs cases (includes the 24 redundant codes)",
+ "input": "989"
+ },
+ {"description": "[decq730] Selected DPD codes", "input": "994"},
+ {"description": "[decq731] Selected DPD codes", "input": "995"},
+ {"description": "[decq744] DPD: one of each of the huffman groups", "input": "997"},
+ {
+ "description": "[decq780] DPD all-highs cases (includes the 24 redundant codes)",
+ "input": "998"
+ },
+ {
+ "description": "[decq787] DPD all-highs cases (includes the 24 redundant codes)",
+ "input": "999"
+ },
+ {"description": "[decq053] fold-downs (more below)", "input": "1234"},
+ {"description": "[decq052] fold-downs (more below)", "input": "12345"},
+ {"description": "[decq792] Miscellaneous (testers' queries, etc.)", "input": "30000"},
+ {"description": "[decq793] Miscellaneous (testers' queries, etc.)", "input": "890000"},
+ {
+ "description": "[decq824] values around [u]int32 edges (zeros done earlier)",
+ "input": "2147483646"
+ },
+ {
+ "description": "[decq825] values around [u]int32 edges (zeros done earlier)",
+ "input": "2147483647"
+ },
+ {
+ "description": "[decq826] values around [u]int32 edges (zeros done earlier)",
+ "input": "2147483648"
+ },
+ {
+ "description": "[decq827] values around [u]int32 edges (zeros done earlier)",
+ "input": "2147483649"
+ },
+ {
+ "description": "[decq828] values around [u]int32 edges (zeros done earlier)",
+ "input": "4294967294"
+ },
+ {
+ "description": "[decq829] values around [u]int32 edges (zeros done earlier)",
+ "input": "4294967295"
+ },
+ {
+ "description": "[decq830] values around [u]int32 edges (zeros done earlier)",
+ "input": "4294967296"
+ },
+ {
+ "description": "[decq831] values around [u]int32 edges (zeros done earlier)",
+ "input": "4294967297"
+ },
+ {"description": "[decq022] Normality", "input": "1111111111111111111111111111111111"},
+ {"description": "[decq020] Normality", "input": "1234567890123456789012345678901234"},
+ {"description": "[decq550] Specials", "input": "9999999999999999999999999999999999"}
+];
- data.forEach(function(testCase) {
- print(`Test - ${testCase.description}`);
- var output = NumberDecimal(testCase.input).toString();
- if (testCase.expected) {
- assert.eq(output, `NumberDecimal("${testCase.expected}")`);
- } else {
- assert.eq(output, `NumberDecimal("${testCase.input}")`);
- }
- });
+data.forEach(function(testCase) {
+ print(`Test - ${testCase.description}`);
+ var output = NumberDecimal(testCase.input).toString();
+ if (testCase.expected) {
+ assert.eq(output, `NumberDecimal("${testCase.expected}")`);
+ } else {
+ assert.eq(output, `NumberDecimal("${testCase.input}")`);
+ }
+});
}()); \ No newline at end of file
diff --git a/jstests/decimal/decimal128_test3.js b/jstests/decimal/decimal128_test3.js
index b4184615b5f..b50f3e45bd1 100644
--- a/jstests/decimal/decimal128_test3.js
+++ b/jstests/decimal/decimal128_test3.js
@@ -3,670 +3,579 @@
*/
(function() {
- var data = [
- {
- "description": "[basx066] strings without E cannot generate E in result",
- "input": "-00345678.5432",
- "expected": "-345678.5432"
- },
- {
- "description": "[basx065] strings without E cannot generate E in result",
- "input": "-0345678.5432",
- "expected": "-345678.5432"
- },
- {
- "description": "[basx064] strings without E cannot generate E in result",
- "input": "-345678.5432"
- },
- {"description": "[basx041] strings without E cannot generate E in result", "input": "-76"},
- {
- "description": "[basx027] conform to rules and exponent will be in permitted range).",
- "input": "-9.999"
- },
- {
- "description": "[basx026] conform to rules and exponent will be in permitted range).",
- "input": "-9.119"
- },
- {
- "description": "[basx025] conform to rules and exponent will be in permitted range).",
- "input": "-9.11"
- },
- {
- "description": "[basx024] conform to rules and exponent will be in permitted range).",
- "input": "-9.1"
- },
- {
- "description": "[dqbsr531] negatives (Rounded)",
- "input": "-1.1111111111111111111111111111123450",
- "expected": "-1.111111111111111111111111111112345"
- },
- {
- "description": "[basx022] conform to rules and exponent will be in permitted range).",
- "input": "-1.0"
- },
- {
- "description": "[basx021] conform to rules and exponent will be in permitted range).",
- "input": "-1"
- },
- {"description": "[basx601] Zeros", "input": "0.000000000", "expected": "0E-9"},
- {"description": "[basx622] Zeros", "input": "-0.000000000", "expected": "-0E-9"},
- {"description": "[basx602] Zeros", "input": "0.00000000", "expected": "0E-8"},
- {"description": "[basx621] Zeros", "input": "-0.00000000", "expected": "-0E-8"},
- {"description": "[basx603] Zeros", "input": "0.0000000", "expected": "0E-7"},
- {"description": "[basx620] Zeros", "input": "-0.0000000", "expected": "-0E-7"},
- {"description": "[basx604] Zeros", "input": "0.000000"},
- {"description": "[basx619] Zeros", "input": "-0.000000"},
- {"description": "[basx605] Zeros", "input": "0.00000"},
- {"description": "[basx618] Zeros", "input": "-0.00000"},
- {"description": "[basx680] Zeros", "input": "000000.", "expected": "0"},
- {"description": "[basx606] Zeros", "input": "0.0000"},
- {"description": "[basx617] Zeros", "input": "-0.0000"},
- {"description": "[basx681] Zeros", "input": "00000.", "expected": "0"},
- {"description": "[basx686] Zeros", "input": "+00000.", "expected": "0"},
- {"description": "[basx687] Zeros", "input": "-00000.", "expected": "-0"},
- {
- "description": "[basx019] conform to rules and exponent will be in permitted range).",
- "input": "-00.00",
- "expected": "-0.00"
- },
- {"description": "[basx607] Zeros", "input": "0.000"},
- {"description": "[basx616] Zeros", "input": "-0.000"},
- {"description": "[basx682] Zeros", "input": "0000.", "expected": "0"},
- {"description": "[basx155] Numbers with E", "input": "0.000e+0", "expected": "0.000"},
- {"description": "[basx130] Numbers with E", "input": "0.000E-1", "expected": "0.0000"},
- {
- "description": "[basx290] some more negative zeros [systematic tests below]",
- "input": "-0.000E-1",
- "expected": "-0.0000"
- },
- {"description": "[basx131] Numbers with E", "input": "0.000E-2", "expected": "0.00000"},
- {
- "description": "[basx291] some more negative zeros [systematic tests below]",
- "input": "-0.000E-2",
- "expected": "-0.00000"
- },
- {"description": "[basx132] Numbers with E", "input": "0.000E-3", "expected": "0.000000"},
- {
- "description": "[basx292] some more negative zeros [systematic tests below]",
- "input": "-0.000E-3",
- "expected": "-0.000000"
- },
- {"description": "[basx133] Numbers with E", "input": "0.000E-4", "expected": "0E-7"},
- {
- "description": "[basx293] some more negative zeros [systematic tests below]",
- "input": "-0.000E-4",
- "expected": "-0E-7"
- },
- {"description": "[basx608] Zeros", "input": "0.00"},
- {"description": "[basx615] Zeros", "input": "-0.00"},
- {"description": "[basx683] Zeros", "input": "000.", "expected": "0"},
- {"description": "[basx630] Zeros", "input": "0.00E+0", "expected": "0.00"},
- {"description": "[basx670] Zeros", "input": "0.00E-0", "expected": "0.00"},
- {"description": "[basx631] Zeros", "input": "0.00E+1", "expected": "0.0"},
- {"description": "[basx671] Zeros", "input": "0.00E-1", "expected": "0.000"},
- {"description": "[basx134] Numbers with E", "input": "0.00E-2", "expected": "0.0000"},
- {
- "description": "[basx294] some more negative zeros [systematic tests below]",
- "input": "-0.00E-2",
- "expected": "-0.0000"
- },
- {"description": "[basx632] Zeros", "input": "0.00E+2", "expected": "0"},
- {"description": "[basx672] Zeros", "input": "0.00E-2", "expected": "0.0000"},
- {"description": "[basx135] Numbers with E", "input": "0.00E-3", "expected": "0.00000"},
- {
- "description": "[basx295] some more negative zeros [systematic tests below]",
- "input": "-0.00E-3",
- "expected": "-0.00000"
- },
- {"description": "[basx633] Zeros", "input": "0.00E+3", "expected": "0E+1"},
- {"description": "[basx673] Zeros", "input": "0.00E-3", "expected": "0.00000"},
- {"description": "[basx136] Numbers with E", "input": "0.00E-4", "expected": "0.000000"},
- {"description": "[basx674] Zeros", "input": "0.00E-4", "expected": "0.000000"},
- {"description": "[basx634] Zeros", "input": "0.00E+4", "expected": "0E+2"},
- {"description": "[basx137] Numbers with E", "input": "0.00E-5", "expected": "0E-7"},
- {"description": "[basx635] Zeros", "input": "0.00E+5", "expected": "0E+3"},
- {"description": "[basx675] Zeros", "input": "0.00E-5", "expected": "0E-7"},
- {"description": "[basx636] Zeros", "input": "0.00E+6", "expected": "0E+4"},
- {"description": "[basx676] Zeros", "input": "0.00E-6", "expected": "0E-8"},
- {"description": "[basx637] Zeros", "input": "0.00E+7", "expected": "0E+5"},
- {"description": "[basx677] Zeros", "input": "0.00E-7", "expected": "0E-9"},
- {"description": "[basx638] Zeros", "input": "0.00E+8", "expected": "0E+6"},
- {"description": "[basx678] Zeros", "input": "0.00E-8", "expected": "0E-10"},
- {"description": "[basx149] Numbers with E", "input": "000E+9", "expected": "0E+9"},
- {"description": "[basx639] Zeros", "input": "0.00E+9", "expected": "0E+7"},
- {"description": "[basx679] Zeros", "input": "0.00E-9", "expected": "0E-11"},
- {
- "description": "[basx063] strings without E cannot generate E in result",
- "input": "+00345678.5432",
- "expected": "345678.5432"
- },
- {
- "description": "[basx018] conform to rules and exponent will be in permitted range).",
- "input": "-0.0"
- },
- {"description": "[basx609] Zeros", "input": "0.0"},
- {"description": "[basx614] Zeros", "input": "-0.0"},
- {"description": "[basx684] Zeros", "input": "00.", "expected": "0"},
- {"description": "[basx640] Zeros", "input": "0.0E+0", "expected": "0.0"},
- {"description": "[basx660] Zeros", "input": "0.0E-0", "expected": "0.0"},
- {"description": "[basx641] Zeros", "input": "0.0E+1", "expected": "0"},
- {"description": "[basx661] Zeros", "input": "0.0E-1", "expected": "0.00"},
- {
- "description": "[basx296] some more negative zeros [systematic tests below]",
- "input": "-0.0E-2",
- "expected": "-0.000"
- },
- {"description": "[basx642] Zeros", "input": "0.0E+2", "expected": "0E+1"},
- {"description": "[basx662] Zeros", "input": "0.0E-2", "expected": "0.000"},
- {
- "description": "[basx297] some more negative zeros [systematic tests below]",
- "input": "-0.0E-3",
- "expected": "-0.0000"
- },
- {"description": "[basx643] Zeros", "input": "0.0E+3", "expected": "0E+2"},
- {"description": "[basx663] Zeros", "input": "0.0E-3", "expected": "0.0000"},
- {"description": "[basx644] Zeros", "input": "0.0E+4", "expected": "0E+3"},
- {"description": "[basx664] Zeros", "input": "0.0E-4", "expected": "0.00000"},
- {"description": "[basx645] Zeros", "input": "0.0E+5", "expected": "0E+4"},
- {"description": "[basx665] Zeros", "input": "0.0E-5", "expected": "0.000000"},
- {"description": "[basx646] Zeros", "input": "0.0E+6", "expected": "0E+5"},
- {"description": "[basx666] Zeros", "input": "0.0E-6", "expected": "0E-7"},
- {"description": "[basx647] Zeros", "input": "0.0E+7", "expected": "0E+6"},
- {"description": "[basx667] Zeros", "input": "0.0E-7", "expected": "0E-8"},
- {"description": "[basx648] Zeros", "input": "0.0E+8", "expected": "0E+7"},
- {"description": "[basx668] Zeros", "input": "0.0E-8", "expected": "0E-9"},
- {"description": "[basx160] Numbers with E", "input": "00E+9", "expected": "0E+9"},
- {"description": "[basx161] Numbers with E", "input": "00E-9", "expected": "0E-9"},
- {"description": "[basx649] Zeros", "input": "0.0E+9", "expected": "0E+8"},
- {"description": "[basx669] Zeros", "input": "0.0E-9", "expected": "0E-10"},
- {
- "description": "[basx062] strings without E cannot generate E in result",
- "input": "+0345678.5432",
- "expected": "345678.5432"
- },
- {
- "description": "[basx001] conform to rules and exponent will be in permitted range).",
- "input": "0"
- },
- {
- "description": "[basx017] conform to rules and exponent will be in permitted range).",
- "input": "-0"
- },
- {"description": "[basx611] Zeros", "input": "0.", "expected": "0"},
- {"description": "[basx613] Zeros", "input": "-0.", "expected": "-0"},
- {"description": "[basx685] Zeros", "input": "0.", "expected": "0"},
- {"description": "[basx688] Zeros", "input": "+0.", "expected": "0"},
- {"description": "[basx689] Zeros", "input": "-0.", "expected": "-0"},
- {"description": "[basx650] Zeros", "input": "0E+0", "expected": "0"},
- {"description": "[basx651] Zeros", "input": "0E+1"},
- {
- "description": "[basx298] some more negative zeros [systematic tests below]",
- "input": "-0E-2",
- "expected": "-0.00"
- },
- {"description": "[basx652] Zeros", "input": "0E+2"},
- {
- "description": "[basx299] some more negative zeros [systematic tests below]",
- "input": "-0E-3",
- "expected": "-0.000"
- },
- {"description": "[basx653] Zeros", "input": "0E+3"},
- {"description": "[basx654] Zeros", "input": "0E+4"},
- {"description": "[basx655] Zeros", "input": "0E+5"},
- {"description": "[basx656] Zeros", "input": "0E+6"},
- {"description": "[basx657] Zeros", "input": "0E+7"},
- {"description": "[basx658] Zeros", "input": "0E+8"},
- {"description": "[basx138] Numbers with E", "input": "+0E+9", "expected": "0E+9"},
- {"description": "[basx139] Numbers with E", "input": "-0E+9"},
- {"description": "[basx144] Numbers with E", "input": "0E+9"},
- {"description": "[basx154] Numbers with E", "input": "0E9", "expected": "0E+9"},
- {"description": "[basx659] Zeros", "input": "0E+9"},
- {
- "description": "[basx042] strings without E cannot generate E in result",
- "input": "+12.76",
- "expected": "12.76"
- },
- {"description": "[basx143] Numbers with E", "input": "+1E+009", "expected": "1E+9"},
- {
- "description": "[basx061] strings without E cannot generate E in result",
- "input": "+345678.5432",
- "expected": "345678.5432"
- },
- {
- "description": "[basx036] conform to rules and exponent will be in permitted range).",
- "input": "0.0000000123456789",
- "expected": "1.23456789E-8"
- },
- {
- "description": "[basx035] conform to rules and exponent will be in permitted range).",
- "input": "0.000000123456789",
- "expected": "1.23456789E-7"
- },
- {
- "description": "[basx034] conform to rules and exponent will be in permitted range).",
- "input": "0.00000123456789"
- },
- {
- "description": "[basx053] strings without E cannot generate E in result",
- "input": "0.0000050"
- },
- {
- "description": "[basx033] conform to rules and exponent will be in permitted range).",
- "input": "0.0000123456789"
- },
- {
- "description": "[basx016] conform to rules and exponent will be in permitted range).",
- "input": "0.012"
- },
- {
- "description": "[basx015] conform to rules and exponent will be in permitted range).",
- "input": "0.123"
- },
- {
- "description": "[basx037] conform to rules and exponent will be in permitted range).",
- "input": "0.123456789012344"
- },
- {
- "description": "[basx038] conform to rules and exponent will be in permitted range).",
- "input": "0.123456789012345"
- },
- {"description": "[basx250] Numbers with E", "input": "0.1265"},
- {"description": "[basx257] Numbers with E", "input": "0.1265E-0", "expected": "0.1265"},
- {"description": "[basx256] Numbers with E", "input": "0.1265E-1", "expected": "0.01265"},
- {"description": "[basx258] Numbers with E", "input": "0.1265E+1", "expected": "1.265"},
- {"description": "[basx251] Numbers with E", "input": "0.1265E-20", "expected": "1.265E-21"},
- {"description": "[basx263] Numbers with E", "input": "0.1265E+20", "expected": "1.265E+19"},
- {"description": "[basx255] Numbers with E", "input": "0.1265E-2", "expected": "0.001265"},
- {"description": "[basx259] Numbers with E", "input": "0.1265E+2", "expected": "12.65"},
- {"description": "[basx254] Numbers with E", "input": "0.1265E-3", "expected": "0.0001265"},
- {"description": "[basx260] Numbers with E", "input": "0.1265E+3", "expected": "126.5"},
- {"description": "[basx253] Numbers with E", "input": "0.1265E-4", "expected": "0.00001265"},
- {"description": "[basx261] Numbers with E", "input": "0.1265E+4", "expected": "1265"},
- {"description": "[basx252] Numbers with E", "input": "0.1265E-8", "expected": "1.265E-9"},
- {"description": "[basx262] Numbers with E", "input": "0.1265E+8", "expected": "1.265E+7"},
- {"description": "[basx159] Numbers with E", "input": "0.73e-7", "expected": "7.3E-8"},
- {
- "description": "[basx004] conform to rules and exponent will be in permitted range).",
- "input": "1.00"
- },
- {
- "description": "[basx003] conform to rules and exponent will be in permitted range).",
- "input": "1.0"
- },
- {
- "description": "[basx002] conform to rules and exponent will be in permitted range).",
- "input": "1"
- },
- {"description": "[basx148] Numbers with E", "input": "1E+009", "expected": "1E+9"},
- {"description": "[basx153] Numbers with E", "input": "1E009", "expected": "1E+9"},
- {"description": "[basx141] Numbers with E", "input": "1e+09", "expected": "1E+9"},
- {"description": "[basx146] Numbers with E", "input": "1E+09", "expected": "1E+9"},
- {"description": "[basx151] Numbers with E", "input": "1e09", "expected": "1E+9"},
- {"description": "[basx142] Numbers with E", "input": "1E+90"},
- {"description": "[basx147] Numbers with E", "input": "1e+90", "expected": "1E+90"},
- {"description": "[basx152] Numbers with E", "input": "1E90", "expected": "1E+90"},
- {"description": "[basx140] Numbers with E", "input": "1E+9"},
- {"description": "[basx150] Numbers with E", "input": "1E9", "expected": "1E+9"},
- {
- "description": "[basx014] conform to rules and exponent will be in permitted range).",
- "input": "1.234"
- },
- {"description": "[basx170] Numbers with E", "input": "1.265"},
- {"description": "[basx177] Numbers with E", "input": "1.265E-0", "expected": "1.265"},
- {"description": "[basx176] Numbers with E", "input": "1.265E-1", "expected": "0.1265"},
- {"description": "[basx178] Numbers with E", "input": "1.265E+1", "expected": "12.65"},
- {"description": "[basx171] Numbers with E", "input": "1.265E-20"},
- {"description": "[basx183] Numbers with E", "input": "1.265E+20"},
- {"description": "[basx175] Numbers with E", "input": "1.265E-2", "expected": "0.01265"},
- {"description": "[basx179] Numbers with E", "input": "1.265E+2", "expected": "126.5"},
- {"description": "[basx174] Numbers with E", "input": "1.265E-3", "expected": "0.001265"},
- {"description": "[basx180] Numbers with E", "input": "1.265E+3", "expected": "1265"},
- {"description": "[basx173] Numbers with E", "input": "1.265E-4", "expected": "0.0001265"},
- {"description": "[basx181] Numbers with E", "input": "1.265E+4"},
- {"description": "[basx172] Numbers with E", "input": "1.265E-8"},
- {"description": "[basx182] Numbers with E", "input": "1.265E+8"},
- {"description": "[basx157] Numbers with E", "input": "4E+9"},
- {"description": "[basx067] examples", "input": "5E-6", "expected": "0.000005"},
- {"description": "[basx069] examples", "input": "5E-7"},
- {"description": "[basx385] Engineering notation tests", "input": "7E0", "expected": "7"},
- {
- "description": "[basx365] Engineering notation tests",
- "input": "7E10",
- "expected": "7E+10"
- },
- {"description": "[basx405] Engineering notation tests", "input": "7E-10"},
- {
- "description": "[basx363] Engineering notation tests",
- "input": "7E11",
- "expected": "7E+11"
- },
- {"description": "[basx407] Engineering notation tests", "input": "7E-11"},
- {
- "description": "[basx361] Engineering notation tests",
- "input": "7E12",
- "expected": "7E+12"
- },
- {"description": "[basx409] Engineering notation tests", "input": "7E-12"},
- {"description": "[basx411] Engineering notation tests", "input": "7E-13"},
- {"description": "[basx383] Engineering notation tests", "input": "7E1", "expected": "7E+1"},
- {"description": "[basx387] Engineering notation tests", "input": "7E-1", "expected": "0.7"},
- {"description": "[basx381] Engineering notation tests", "input": "7E2", "expected": "7E+2"},
- {
- "description": "[basx389] Engineering notation tests",
- "input": "7E-2",
- "expected": "0.07"
- },
- {"description": "[basx379] Engineering notation tests", "input": "7E3", "expected": "7E+3"},
- {
- "description": "[basx391] Engineering notation tests",
- "input": "7E-3",
- "expected": "0.007"
- },
- {"description": "[basx377] Engineering notation tests", "input": "7E4", "expected": "7E+4"},
- {
- "description": "[basx393] Engineering notation tests",
- "input": "7E-4",
- "expected": "0.0007"
- },
- {"description": "[basx375] Engineering notation tests", "input": "7E5", "expected": "7E+5"},
- {
- "description": "[basx395] Engineering notation tests",
- "input": "7E-5",
- "expected": "0.00007"
- },
- {"description": "[basx373] Engineering notation tests", "input": "7E6", "expected": "7E+6"},
- {
- "description": "[basx397] Engineering notation tests",
- "input": "7E-6",
- "expected": "0.000007"
- },
- {"description": "[basx371] Engineering notation tests", "input": "7E7", "expected": "7E+7"},
- {"description": "[basx399] Engineering notation tests", "input": "7E-7"},
- {"description": "[basx369] Engineering notation tests", "input": "7E8", "expected": "7E+8"},
- {"description": "[basx401] Engineering notation tests", "input": "7E-8"},
- {"description": "[basx367] Engineering notation tests", "input": "7E9", "expected": "7E+9"},
- {"description": "[basx403] Engineering notation tests", "input": "7E-9"},
- {
- "description": "[basx007] conform to rules and exponent will be in permitted range).",
- "input": "10.0"
- },
- {
- "description": "[basx005] conform to rules and exponent will be in permitted range).",
- "input": "10"
- },
- {"description": "[basx165] Numbers with E", "input": "10E+009", "expected": "1.0E+10"},
- {"description": "[basx163] Numbers with E", "input": "10E+09", "expected": "1.0E+10"},
- {"description": "[basx325] Engineering notation tests", "input": "10e0", "expected": "10"},
- {
- "description": "[basx305] Engineering notation tests",
- "input": "10e10",
- "expected": "1.0E+11"
- },
- {
- "description": "[basx345] Engineering notation tests",
- "input": "10e-10",
- "expected": "1.0E-9"
- },
- {
- "description": "[basx303] Engineering notation tests",
- "input": "10e11",
- "expected": "1.0E+12"
- },
- {
- "description": "[basx347] Engineering notation tests",
- "input": "10e-11",
- "expected": "1.0E-10"
- },
- {
- "description": "[basx301] Engineering notation tests",
- "input": "10e12",
- "expected": "1.0E+13"
- },
- {
- "description": "[basx349] Engineering notation tests",
- "input": "10e-12",
- "expected": "1.0E-11"
- },
- {
- "description": "[basx351] Engineering notation tests",
- "input": "10e-13",
- "expected": "1.0E-12"
- },
- {
- "description": "[basx323] Engineering notation tests",
- "input": "10e1",
- "expected": "1.0E+2"
- },
- {
- "description": "[basx327] Engineering notation tests",
- "input": "10e-1",
- "expected": "1.0"
- },
- {
- "description": "[basx321] Engineering notation tests",
- "input": "10e2",
- "expected": "1.0E+3"
- },
- {
- "description": "[basx329] Engineering notation tests",
- "input": "10e-2",
- "expected": "0.10"
- },
- {
- "description": "[basx319] Engineering notation tests",
- "input": "10e3",
- "expected": "1.0E+4"
- },
- {
- "description": "[basx331] Engineering notation tests",
- "input": "10e-3",
- "expected": "0.010"
- },
- {
- "description": "[basx317] Engineering notation tests",
- "input": "10e4",
- "expected": "1.0E+5"
- },
- {
- "description": "[basx333] Engineering notation tests",
- "input": "10e-4",
- "expected": "0.0010"
- },
- {
- "description": "[basx315] Engineering notation tests",
- "input": "10e5",
- "expected": "1.0E+6"
- },
- {
- "description": "[basx335] Engineering notation tests",
- "input": "10e-5",
- "expected": "0.00010"
- },
- {
- "description": "[basx313] Engineering notation tests",
- "input": "10e6",
- "expected": "1.0E+7"
- },
- {
- "description": "[basx337] Engineering notation tests",
- "input": "10e-6",
- "expected": "0.000010"
- },
- {
- "description": "[basx311] Engineering notation tests",
- "input": "10e7",
- "expected": "1.0E+8"
- },
- {
- "description": "[basx339] Engineering notation tests",
- "input": "10e-7",
- "expected": "0.0000010"
- },
- {
- "description": "[basx309] Engineering notation tests",
- "input": "10e8",
- "expected": "1.0E+9"
- },
- {
- "description": "[basx341] Engineering notation tests",
- "input": "10e-8",
- "expected": "1.0E-7"
- },
- {"description": "[basx164] Numbers with E", "input": "10e+90", "expected": "1.0E+91"},
- {"description": "[basx162] Numbers with E", "input": "10E+9", "expected": "1.0E+10"},
- {
- "description": "[basx307] Engineering notation tests",
- "input": "10e9",
- "expected": "1.0E+10"
- },
- {
- "description": "[basx343] Engineering notation tests",
- "input": "10e-9",
- "expected": "1.0E-8"
- },
- {
- "description": "[basx008] conform to rules and exponent will be in permitted range).",
- "input": "10.1"
- },
- {
- "description": "[basx009] conform to rules and exponent will be in permitted range).",
- "input": "10.4"
- },
- {
- "description": "[basx010] conform to rules and exponent will be in permitted range).",
- "input": "10.5"
- },
- {
- "description": "[basx011] conform to rules and exponent will be in permitted range).",
- "input": "10.6"
- },
- {
- "description": "[basx012] conform to rules and exponent will be in permitted range).",
- "input": "10.9"
- },
- {
- "description": "[basx013] conform to rules and exponent will be in permitted range).",
- "input": "11.0"
- },
- {"description": "[basx040] strings without E cannot generate E in result", "input": "12"},
- {"description": "[basx190] Numbers with E", "input": "12.65"},
- {"description": "[basx197] Numbers with E", "input": "12.65E-0", "expected": "12.65"},
- {"description": "[basx196] Numbers with E", "input": "12.65E-1", "expected": "1.265"},
- {"description": "[basx198] Numbers with E", "input": "12.65E+1", "expected": "126.5"},
- {"description": "[basx191] Numbers with E", "input": "12.65E-20", "expected": "1.265E-19"},
- {"description": "[basx203] Numbers with E", "input": "12.65E+20", "expected": "1.265E+21"},
- {"description": "[basx195] Numbers with E", "input": "12.65E-2", "expected": "0.1265"},
- {"description": "[basx199] Numbers with E", "input": "12.65E+2", "expected": "1265"},
- {"description": "[basx194] Numbers with E", "input": "12.65E-3", "expected": "0.01265"},
- {"description": "[basx200] Numbers with E", "input": "12.65E+3", "expected": "1.265E+4"},
- {"description": "[basx193] Numbers with E", "input": "12.65E-4", "expected": "0.001265"},
- {"description": "[basx201] Numbers with E", "input": "12.65E+4", "expected": "1.265E+5"},
- {"description": "[basx192] Numbers with E", "input": "12.65E-8", "expected": "1.265E-7"},
- {"description": "[basx202] Numbers with E", "input": "12.65E+8", "expected": "1.265E+9"},
- {
- "description": "[basx044] strings without E cannot generate E in result",
- "input": "012.76",
- "expected": "12.76"
- },
- {
- "description": "[basx042] strings without E cannot generate E in result",
- "input": "12.76"
- },
- {
- "description": "[basx046] strings without E cannot generate E in result",
- "input": "17.",
- "expected": "17"
- },
- {
- "description": "[basx049] strings without E cannot generate E in result",
- "input": "0044",
- "expected": "44"
- },
- {
- "description": "[basx048] strings without E cannot generate E in result",
- "input": "044",
- "expected": "44"
- },
- {"description": "[basx158] Numbers with E", "input": "44E+9", "expected": "4.4E+10"},
- {"description": "[basx068] examples", "input": "50E-7", "expected": "0.0000050"},
- {"description": "[basx169] Numbers with E", "input": "100e+009", "expected": "1.00E+11"},
- {"description": "[basx167] Numbers with E", "input": "100e+09", "expected": "1.00E+11"},
- {"description": "[basx168] Numbers with E", "input": "100E+90", "expected": "1.00E+92"},
- {"description": "[basx166] Numbers with E", "input": "100e+9", "expected": "1.00E+11"},
- {"description": "[basx210] Numbers with E", "input": "126.5"},
- {"description": "[basx217] Numbers with E", "input": "126.5E-0", "expected": "126.5"},
- {"description": "[basx216] Numbers with E", "input": "126.5E-1", "expected": "12.65"},
- {"description": "[basx218] Numbers with E", "input": "126.5E+1", "expected": "1265"},
- {"description": "[basx211] Numbers with E", "input": "126.5E-20", "expected": "1.265E-18"},
- {"description": "[basx223] Numbers with E", "input": "126.5E+20", "expected": "1.265E+22"},
- {"description": "[basx215] Numbers with E", "input": "126.5E-2", "expected": "1.265"},
- {"description": "[basx219] Numbers with E", "input": "126.5E+2", "expected": "1.265E+4"},
- {"description": "[basx214] Numbers with E", "input": "126.5E-3", "expected": "0.1265"},
- {"description": "[basx220] Numbers with E", "input": "126.5E+3", "expected": "1.265E+5"},
- {"description": "[basx213] Numbers with E", "input": "126.5E-4", "expected": "0.01265"},
- {"description": "[basx221] Numbers with E", "input": "126.5E+4", "expected": "1.265E+6"},
- {"description": "[basx212] Numbers with E", "input": "126.5E-8", "expected": "0.000001265"},
- {"description": "[basx222] Numbers with E", "input": "126.5E+8", "expected": "1.265E+10"},
- {
- "description": "[basx006] conform to rules and exponent will be in permitted range).",
- "input": "1000"
- },
- {"description": "[basx230] Numbers with E", "input": "1265"},
- {"description": "[basx237] Numbers with E", "input": "1265E-0", "expected": "1265"},
- {"description": "[basx236] Numbers with E", "input": "1265E-1", "expected": "126.5"},
- {"description": "[basx238] Numbers with E", "input": "1265E+1", "expected": "1.265E+4"},
- {"description": "[basx231] Numbers with E", "input": "1265E-20", "expected": "1.265E-17"},
- {"description": "[basx243] Numbers with E", "input": "1265E+20", "expected": "1.265E+23"},
- {"description": "[basx235] Numbers with E", "input": "1265E-2", "expected": "12.65"},
- {"description": "[basx239] Numbers with E", "input": "1265E+2", "expected": "1.265E+5"},
- {"description": "[basx234] Numbers with E", "input": "1265E-3", "expected": "1.265"},
- {"description": "[basx240] Numbers with E", "input": "1265E+3", "expected": "1.265E+6"},
- {"description": "[basx233] Numbers with E", "input": "1265E-4", "expected": "0.1265"},
- {"description": "[basx241] Numbers with E", "input": "1265E+4", "expected": "1.265E+7"},
- {"description": "[basx232] Numbers with E", "input": "1265E-8", "expected": "0.00001265"},
- {"description": "[basx242] Numbers with E", "input": "1265E+8", "expected": "1.265E+11"},
- {
- "description": "[basx060] strings without E cannot generate E in result",
- "input": "345678.5432"
- },
- {
- "description": "[basx059] strings without E cannot generate E in result",
- "input": "0345678.54321",
- "expected": "345678.54321"
- },
- {
- "description": "[basx058] strings without E cannot generate E in result",
- "input": "345678.543210"
- },
- {
- "description": "[basx057] strings without E cannot generate E in result",
- "input": "2345678.543210"
- },
- {
- "description": "[basx056] strings without E cannot generate E in result",
- "input": "12345678.543210"
- },
- {
- "description": "[basx031] conform to rules and exponent will be in permitted range).",
- "input": "123456789.000000"
- },
- {
- "description": "[basx030] conform to rules and exponent will be in permitted range).",
- "input": "123456789.123456"
- },
- {
- "description": "[basx032] conform to rules and exponent will be in permitted range).",
- "input": "123456789123456"
- }
- ];
+var data = [
+ {
+ "description": "[basx066] strings without E cannot generate E in result",
+ "input": "-00345678.5432",
+ "expected": "-345678.5432"
+ },
+ {
+ "description": "[basx065] strings without E cannot generate E in result",
+ "input": "-0345678.5432",
+ "expected": "-345678.5432"
+ },
+ {
+ "description": "[basx064] strings without E cannot generate E in result",
+ "input": "-345678.5432"
+ },
+ {"description": "[basx041] strings without E cannot generate E in result", "input": "-76"},
+ {
+ "description": "[basx027] conform to rules and exponent will be in permitted range).",
+ "input": "-9.999"
+ },
+ {
+ "description": "[basx026] conform to rules and exponent will be in permitted range).",
+ "input": "-9.119"
+ },
+ {
+ "description": "[basx025] conform to rules and exponent will be in permitted range).",
+ "input": "-9.11"
+ },
+ {
+ "description": "[basx024] conform to rules and exponent will be in permitted range).",
+ "input": "-9.1"
+ },
+ {
+ "description": "[dqbsr531] negatives (Rounded)",
+ "input": "-1.1111111111111111111111111111123450",
+ "expected": "-1.111111111111111111111111111112345"
+ },
+ {
+ "description": "[basx022] conform to rules and exponent will be in permitted range).",
+ "input": "-1.0"
+ },
+ {
+ "description": "[basx021] conform to rules and exponent will be in permitted range).",
+ "input": "-1"
+ },
+ {"description": "[basx601] Zeros", "input": "0.000000000", "expected": "0E-9"},
+ {"description": "[basx622] Zeros", "input": "-0.000000000", "expected": "-0E-9"},
+ {"description": "[basx602] Zeros", "input": "0.00000000", "expected": "0E-8"},
+ {"description": "[basx621] Zeros", "input": "-0.00000000", "expected": "-0E-8"},
+ {"description": "[basx603] Zeros", "input": "0.0000000", "expected": "0E-7"},
+ {"description": "[basx620] Zeros", "input": "-0.0000000", "expected": "-0E-7"},
+ {"description": "[basx604] Zeros", "input": "0.000000"},
+ {"description": "[basx619] Zeros", "input": "-0.000000"},
+ {"description": "[basx605] Zeros", "input": "0.00000"},
+ {"description": "[basx618] Zeros", "input": "-0.00000"},
+ {"description": "[basx680] Zeros", "input": "000000.", "expected": "0"},
+ {"description": "[basx606] Zeros", "input": "0.0000"},
+ {"description": "[basx617] Zeros", "input": "-0.0000"},
+ {"description": "[basx681] Zeros", "input": "00000.", "expected": "0"},
+ {"description": "[basx686] Zeros", "input": "+00000.", "expected": "0"},
+ {"description": "[basx687] Zeros", "input": "-00000.", "expected": "-0"},
+ {
+ "description": "[basx019] conform to rules and exponent will be in permitted range).",
+ "input": "-00.00",
+ "expected": "-0.00"
+ },
+ {"description": "[basx607] Zeros", "input": "0.000"},
+ {"description": "[basx616] Zeros", "input": "-0.000"},
+ {"description": "[basx682] Zeros", "input": "0000.", "expected": "0"},
+ {"description": "[basx155] Numbers with E", "input": "0.000e+0", "expected": "0.000"},
+ {"description": "[basx130] Numbers with E", "input": "0.000E-1", "expected": "0.0000"},
+ {
+ "description": "[basx290] some more negative zeros [systematic tests below]",
+ "input": "-0.000E-1",
+ "expected": "-0.0000"
+ },
+ {"description": "[basx131] Numbers with E", "input": "0.000E-2", "expected": "0.00000"},
+ {
+ "description": "[basx291] some more negative zeros [systematic tests below]",
+ "input": "-0.000E-2",
+ "expected": "-0.00000"
+ },
+ {"description": "[basx132] Numbers with E", "input": "0.000E-3", "expected": "0.000000"},
+ {
+ "description": "[basx292] some more negative zeros [systematic tests below]",
+ "input": "-0.000E-3",
+ "expected": "-0.000000"
+ },
+ {"description": "[basx133] Numbers with E", "input": "0.000E-4", "expected": "0E-7"},
+ {
+ "description": "[basx293] some more negative zeros [systematic tests below]",
+ "input": "-0.000E-4",
+ "expected": "-0E-7"
+ },
+ {"description": "[basx608] Zeros", "input": "0.00"},
+ {"description": "[basx615] Zeros", "input": "-0.00"},
+ {"description": "[basx683] Zeros", "input": "000.", "expected": "0"},
+ {"description": "[basx630] Zeros", "input": "0.00E+0", "expected": "0.00"},
+ {"description": "[basx670] Zeros", "input": "0.00E-0", "expected": "0.00"},
+ {"description": "[basx631] Zeros", "input": "0.00E+1", "expected": "0.0"},
+ {"description": "[basx671] Zeros", "input": "0.00E-1", "expected": "0.000"},
+ {"description": "[basx134] Numbers with E", "input": "0.00E-2", "expected": "0.0000"},
+ {
+ "description": "[basx294] some more negative zeros [systematic tests below]",
+ "input": "-0.00E-2",
+ "expected": "-0.0000"
+ },
+ {"description": "[basx632] Zeros", "input": "0.00E+2", "expected": "0"},
+ {"description": "[basx672] Zeros", "input": "0.00E-2", "expected": "0.0000"},
+ {"description": "[basx135] Numbers with E", "input": "0.00E-3", "expected": "0.00000"},
+ {
+ "description": "[basx295] some more negative zeros [systematic tests below]",
+ "input": "-0.00E-3",
+ "expected": "-0.00000"
+ },
+ {"description": "[basx633] Zeros", "input": "0.00E+3", "expected": "0E+1"},
+ {"description": "[basx673] Zeros", "input": "0.00E-3", "expected": "0.00000"},
+ {"description": "[basx136] Numbers with E", "input": "0.00E-4", "expected": "0.000000"},
+ {"description": "[basx674] Zeros", "input": "0.00E-4", "expected": "0.000000"},
+ {"description": "[basx634] Zeros", "input": "0.00E+4", "expected": "0E+2"},
+ {"description": "[basx137] Numbers with E", "input": "0.00E-5", "expected": "0E-7"},
+ {"description": "[basx635] Zeros", "input": "0.00E+5", "expected": "0E+3"},
+ {"description": "[basx675] Zeros", "input": "0.00E-5", "expected": "0E-7"},
+ {"description": "[basx636] Zeros", "input": "0.00E+6", "expected": "0E+4"},
+ {"description": "[basx676] Zeros", "input": "0.00E-6", "expected": "0E-8"},
+ {"description": "[basx637] Zeros", "input": "0.00E+7", "expected": "0E+5"},
+ {"description": "[basx677] Zeros", "input": "0.00E-7", "expected": "0E-9"},
+ {"description": "[basx638] Zeros", "input": "0.00E+8", "expected": "0E+6"},
+ {"description": "[basx678] Zeros", "input": "0.00E-8", "expected": "0E-10"},
+ {"description": "[basx149] Numbers with E", "input": "000E+9", "expected": "0E+9"},
+ {"description": "[basx639] Zeros", "input": "0.00E+9", "expected": "0E+7"},
+ {"description": "[basx679] Zeros", "input": "0.00E-9", "expected": "0E-11"},
+ {
+ "description": "[basx063] strings without E cannot generate E in result",
+ "input": "+00345678.5432",
+ "expected": "345678.5432"
+ },
+ {
+ "description": "[basx018] conform to rules and exponent will be in permitted range).",
+ "input": "-0.0"
+ },
+ {"description": "[basx609] Zeros", "input": "0.0"},
+ {"description": "[basx614] Zeros", "input": "-0.0"},
+ {"description": "[basx684] Zeros", "input": "00.", "expected": "0"},
+ {"description": "[basx640] Zeros", "input": "0.0E+0", "expected": "0.0"},
+ {"description": "[basx660] Zeros", "input": "0.0E-0", "expected": "0.0"},
+ {"description": "[basx641] Zeros", "input": "0.0E+1", "expected": "0"},
+ {"description": "[basx661] Zeros", "input": "0.0E-1", "expected": "0.00"},
+ {
+ "description": "[basx296] some more negative zeros [systematic tests below]",
+ "input": "-0.0E-2",
+ "expected": "-0.000"
+ },
+ {"description": "[basx642] Zeros", "input": "0.0E+2", "expected": "0E+1"},
+ {"description": "[basx662] Zeros", "input": "0.0E-2", "expected": "0.000"},
+ {
+ "description": "[basx297] some more negative zeros [systematic tests below]",
+ "input": "-0.0E-3",
+ "expected": "-0.0000"
+ },
+ {"description": "[basx643] Zeros", "input": "0.0E+3", "expected": "0E+2"},
+ {"description": "[basx663] Zeros", "input": "0.0E-3", "expected": "0.0000"},
+ {"description": "[basx644] Zeros", "input": "0.0E+4", "expected": "0E+3"},
+ {"description": "[basx664] Zeros", "input": "0.0E-4", "expected": "0.00000"},
+ {"description": "[basx645] Zeros", "input": "0.0E+5", "expected": "0E+4"},
+ {"description": "[basx665] Zeros", "input": "0.0E-5", "expected": "0.000000"},
+ {"description": "[basx646] Zeros", "input": "0.0E+6", "expected": "0E+5"},
+ {"description": "[basx666] Zeros", "input": "0.0E-6", "expected": "0E-7"},
+ {"description": "[basx647] Zeros", "input": "0.0E+7", "expected": "0E+6"},
+ {"description": "[basx667] Zeros", "input": "0.0E-7", "expected": "0E-8"},
+ {"description": "[basx648] Zeros", "input": "0.0E+8", "expected": "0E+7"},
+ {"description": "[basx668] Zeros", "input": "0.0E-8", "expected": "0E-9"},
+ {"description": "[basx160] Numbers with E", "input": "00E+9", "expected": "0E+9"},
+ {"description": "[basx161] Numbers with E", "input": "00E-9", "expected": "0E-9"},
+ {"description": "[basx649] Zeros", "input": "0.0E+9", "expected": "0E+8"},
+ {"description": "[basx669] Zeros", "input": "0.0E-9", "expected": "0E-10"},
+ {
+ "description": "[basx062] strings without E cannot generate E in result",
+ "input": "+0345678.5432",
+ "expected": "345678.5432"
+ },
+ {
+ "description": "[basx001] conform to rules and exponent will be in permitted range).",
+ "input": "0"
+ },
+ {
+ "description": "[basx017] conform to rules and exponent will be in permitted range).",
+ "input": "-0"
+ },
+ {"description": "[basx611] Zeros", "input": "0.", "expected": "0"},
+ {"description": "[basx613] Zeros", "input": "-0.", "expected": "-0"},
+ {"description": "[basx685] Zeros", "input": "0.", "expected": "0"},
+ {"description": "[basx688] Zeros", "input": "+0.", "expected": "0"},
+ {"description": "[basx689] Zeros", "input": "-0.", "expected": "-0"},
+ {"description": "[basx650] Zeros", "input": "0E+0", "expected": "0"},
+ {"description": "[basx651] Zeros", "input": "0E+1"},
+ {
+ "description": "[basx298] some more negative zeros [systematic tests below]",
+ "input": "-0E-2",
+ "expected": "-0.00"
+ },
+ {"description": "[basx652] Zeros", "input": "0E+2"},
+ {
+ "description": "[basx299] some more negative zeros [systematic tests below]",
+ "input": "-0E-3",
+ "expected": "-0.000"
+ },
+ {"description": "[basx653] Zeros", "input": "0E+3"},
+ {"description": "[basx654] Zeros", "input": "0E+4"},
+ {"description": "[basx655] Zeros", "input": "0E+5"},
+ {"description": "[basx656] Zeros", "input": "0E+6"},
+ {"description": "[basx657] Zeros", "input": "0E+7"},
+ {"description": "[basx658] Zeros", "input": "0E+8"},
+ {"description": "[basx138] Numbers with E", "input": "+0E+9", "expected": "0E+9"},
+ {"description": "[basx139] Numbers with E", "input": "-0E+9"},
+ {"description": "[basx144] Numbers with E", "input": "0E+9"},
+ {"description": "[basx154] Numbers with E", "input": "0E9", "expected": "0E+9"},
+ {"description": "[basx659] Zeros", "input": "0E+9"},
+ {
+ "description": "[basx042] strings without E cannot generate E in result",
+ "input": "+12.76",
+ "expected": "12.76"
+ },
+ {"description": "[basx143] Numbers with E", "input": "+1E+009", "expected": "1E+9"},
+ {
+ "description": "[basx061] strings without E cannot generate E in result",
+ "input": "+345678.5432",
+ "expected": "345678.5432"
+ },
+ {
+ "description": "[basx036] conform to rules and exponent will be in permitted range).",
+ "input": "0.0000000123456789",
+ "expected": "1.23456789E-8"
+ },
+ {
+ "description": "[basx035] conform to rules and exponent will be in permitted range).",
+ "input": "0.000000123456789",
+ "expected": "1.23456789E-7"
+ },
+ {
+ "description": "[basx034] conform to rules and exponent will be in permitted range).",
+ "input": "0.00000123456789"
+ },
+ {
+ "description": "[basx053] strings without E cannot generate E in result",
+ "input": "0.0000050"
+ },
+ {
+ "description": "[basx033] conform to rules and exponent will be in permitted range).",
+ "input": "0.0000123456789"
+ },
+ {
+ "description": "[basx016] conform to rules and exponent will be in permitted range).",
+ "input": "0.012"
+ },
+ {
+ "description": "[basx015] conform to rules and exponent will be in permitted range).",
+ "input": "0.123"
+ },
+ {
+ "description": "[basx037] conform to rules and exponent will be in permitted range).",
+ "input": "0.123456789012344"
+ },
+ {
+ "description": "[basx038] conform to rules and exponent will be in permitted range).",
+ "input": "0.123456789012345"
+ },
+ {"description": "[basx250] Numbers with E", "input": "0.1265"},
+ {"description": "[basx257] Numbers with E", "input": "0.1265E-0", "expected": "0.1265"},
+ {"description": "[basx256] Numbers with E", "input": "0.1265E-1", "expected": "0.01265"},
+ {"description": "[basx258] Numbers with E", "input": "0.1265E+1", "expected": "1.265"},
+ {"description": "[basx251] Numbers with E", "input": "0.1265E-20", "expected": "1.265E-21"},
+ {"description": "[basx263] Numbers with E", "input": "0.1265E+20", "expected": "1.265E+19"},
+ {"description": "[basx255] Numbers with E", "input": "0.1265E-2", "expected": "0.001265"},
+ {"description": "[basx259] Numbers with E", "input": "0.1265E+2", "expected": "12.65"},
+ {"description": "[basx254] Numbers with E", "input": "0.1265E-3", "expected": "0.0001265"},
+ {"description": "[basx260] Numbers with E", "input": "0.1265E+3", "expected": "126.5"},
+ {"description": "[basx253] Numbers with E", "input": "0.1265E-4", "expected": "0.00001265"},
+ {"description": "[basx261] Numbers with E", "input": "0.1265E+4", "expected": "1265"},
+ {"description": "[basx252] Numbers with E", "input": "0.1265E-8", "expected": "1.265E-9"},
+ {"description": "[basx262] Numbers with E", "input": "0.1265E+8", "expected": "1.265E+7"},
+ {"description": "[basx159] Numbers with E", "input": "0.73e-7", "expected": "7.3E-8"},
+ {
+ "description": "[basx004] conform to rules and exponent will be in permitted range).",
+ "input": "1.00"
+ },
+ {
+ "description": "[basx003] conform to rules and exponent will be in permitted range).",
+ "input": "1.0"
+ },
+ {
+ "description": "[basx002] conform to rules and exponent will be in permitted range).",
+ "input": "1"
+ },
+ {"description": "[basx148] Numbers with E", "input": "1E+009", "expected": "1E+9"},
+ {"description": "[basx153] Numbers with E", "input": "1E009", "expected": "1E+9"},
+ {"description": "[basx141] Numbers with E", "input": "1e+09", "expected": "1E+9"},
+ {"description": "[basx146] Numbers with E", "input": "1E+09", "expected": "1E+9"},
+ {"description": "[basx151] Numbers with E", "input": "1e09", "expected": "1E+9"},
+ {"description": "[basx142] Numbers with E", "input": "1E+90"},
+ {"description": "[basx147] Numbers with E", "input": "1e+90", "expected": "1E+90"},
+ {"description": "[basx152] Numbers with E", "input": "1E90", "expected": "1E+90"},
+ {"description": "[basx140] Numbers with E", "input": "1E+9"},
+ {"description": "[basx150] Numbers with E", "input": "1E9", "expected": "1E+9"},
+ {
+ "description": "[basx014] conform to rules and exponent will be in permitted range).",
+ "input": "1.234"
+ },
+ {"description": "[basx170] Numbers with E", "input": "1.265"},
+ {"description": "[basx177] Numbers with E", "input": "1.265E-0", "expected": "1.265"},
+ {"description": "[basx176] Numbers with E", "input": "1.265E-1", "expected": "0.1265"},
+ {"description": "[basx178] Numbers with E", "input": "1.265E+1", "expected": "12.65"},
+ {"description": "[basx171] Numbers with E", "input": "1.265E-20"},
+ {"description": "[basx183] Numbers with E", "input": "1.265E+20"},
+ {"description": "[basx175] Numbers with E", "input": "1.265E-2", "expected": "0.01265"},
+ {"description": "[basx179] Numbers with E", "input": "1.265E+2", "expected": "126.5"},
+ {"description": "[basx174] Numbers with E", "input": "1.265E-3", "expected": "0.001265"},
+ {"description": "[basx180] Numbers with E", "input": "1.265E+3", "expected": "1265"},
+ {"description": "[basx173] Numbers with E", "input": "1.265E-4", "expected": "0.0001265"},
+ {"description": "[basx181] Numbers with E", "input": "1.265E+4"},
+ {"description": "[basx172] Numbers with E", "input": "1.265E-8"},
+ {"description": "[basx182] Numbers with E", "input": "1.265E+8"},
+ {"description": "[basx157] Numbers with E", "input": "4E+9"},
+ {"description": "[basx067] examples", "input": "5E-6", "expected": "0.000005"},
+ {"description": "[basx069] examples", "input": "5E-7"},
+ {"description": "[basx385] Engineering notation tests", "input": "7E0", "expected": "7"},
+ {"description": "[basx365] Engineering notation tests", "input": "7E10", "expected": "7E+10"},
+ {"description": "[basx405] Engineering notation tests", "input": "7E-10"},
+ {"description": "[basx363] Engineering notation tests", "input": "7E11", "expected": "7E+11"},
+ {"description": "[basx407] Engineering notation tests", "input": "7E-11"},
+ {"description": "[basx361] Engineering notation tests", "input": "7E12", "expected": "7E+12"},
+ {"description": "[basx409] Engineering notation tests", "input": "7E-12"},
+ {"description": "[basx411] Engineering notation tests", "input": "7E-13"},
+ {"description": "[basx383] Engineering notation tests", "input": "7E1", "expected": "7E+1"},
+ {"description": "[basx387] Engineering notation tests", "input": "7E-1", "expected": "0.7"},
+ {"description": "[basx381] Engineering notation tests", "input": "7E2", "expected": "7E+2"},
+ {"description": "[basx389] Engineering notation tests", "input": "7E-2", "expected": "0.07"},
+ {"description": "[basx379] Engineering notation tests", "input": "7E3", "expected": "7E+3"},
+ {"description": "[basx391] Engineering notation tests", "input": "7E-3", "expected": "0.007"},
+ {"description": "[basx377] Engineering notation tests", "input": "7E4", "expected": "7E+4"},
+ {"description": "[basx393] Engineering notation tests", "input": "7E-4", "expected": "0.0007"},
+ {"description": "[basx375] Engineering notation tests", "input": "7E5", "expected": "7E+5"},
+ {"description": "[basx395] Engineering notation tests", "input": "7E-5", "expected": "0.00007"},
+ {"description": "[basx373] Engineering notation tests", "input": "7E6", "expected": "7E+6"},
+ {
+ "description": "[basx397] Engineering notation tests",
+ "input": "7E-6",
+ "expected": "0.000007"
+ },
+ {"description": "[basx371] Engineering notation tests", "input": "7E7", "expected": "7E+7"},
+ {"description": "[basx399] Engineering notation tests", "input": "7E-7"},
+ {"description": "[basx369] Engineering notation tests", "input": "7E8", "expected": "7E+8"},
+ {"description": "[basx401] Engineering notation tests", "input": "7E-8"},
+ {"description": "[basx367] Engineering notation tests", "input": "7E9", "expected": "7E+9"},
+ {"description": "[basx403] Engineering notation tests", "input": "7E-9"},
+ {
+ "description": "[basx007] conform to rules and exponent will be in permitted range).",
+ "input": "10.0"
+ },
+ {
+ "description": "[basx005] conform to rules and exponent will be in permitted range).",
+ "input": "10"
+ },
+ {"description": "[basx165] Numbers with E", "input": "10E+009", "expected": "1.0E+10"},
+ {"description": "[basx163] Numbers with E", "input": "10E+09", "expected": "1.0E+10"},
+ {"description": "[basx325] Engineering notation tests", "input": "10e0", "expected": "10"},
+ {
+ "description": "[basx305] Engineering notation tests",
+ "input": "10e10",
+ "expected": "1.0E+11"
+ },
+ {
+ "description": "[basx345] Engineering notation tests",
+ "input": "10e-10",
+ "expected": "1.0E-9"
+ },
+ {
+ "description": "[basx303] Engineering notation tests",
+ "input": "10e11",
+ "expected": "1.0E+12"
+ },
+ {
+ "description": "[basx347] Engineering notation tests",
+ "input": "10e-11",
+ "expected": "1.0E-10"
+ },
+ {
+ "description": "[basx301] Engineering notation tests",
+ "input": "10e12",
+ "expected": "1.0E+13"
+ },
+ {
+ "description": "[basx349] Engineering notation tests",
+ "input": "10e-12",
+ "expected": "1.0E-11"
+ },
+ {
+ "description": "[basx351] Engineering notation tests",
+ "input": "10e-13",
+ "expected": "1.0E-12"
+ },
+ {"description": "[basx323] Engineering notation tests", "input": "10e1", "expected": "1.0E+2"},
+ {"description": "[basx327] Engineering notation tests", "input": "10e-1", "expected": "1.0"},
+ {"description": "[basx321] Engineering notation tests", "input": "10e2", "expected": "1.0E+3"},
+ {"description": "[basx329] Engineering notation tests", "input": "10e-2", "expected": "0.10"},
+ {"description": "[basx319] Engineering notation tests", "input": "10e3", "expected": "1.0E+4"},
+ {"description": "[basx331] Engineering notation tests", "input": "10e-3", "expected": "0.010"},
+ {"description": "[basx317] Engineering notation tests", "input": "10e4", "expected": "1.0E+5"},
+ {"description": "[basx333] Engineering notation tests", "input": "10e-4", "expected": "0.0010"},
+ {"description": "[basx315] Engineering notation tests", "input": "10e5", "expected": "1.0E+6"},
+ {
+ "description": "[basx335] Engineering notation tests",
+ "input": "10e-5",
+ "expected": "0.00010"
+ },
+ {"description": "[basx313] Engineering notation tests", "input": "10e6", "expected": "1.0E+7"},
+ {
+ "description": "[basx337] Engineering notation tests",
+ "input": "10e-6",
+ "expected": "0.000010"
+ },
+ {"description": "[basx311] Engineering notation tests", "input": "10e7", "expected": "1.0E+8"},
+ {
+ "description": "[basx339] Engineering notation tests",
+ "input": "10e-7",
+ "expected": "0.0000010"
+ },
+ {"description": "[basx309] Engineering notation tests", "input": "10e8", "expected": "1.0E+9"},
+ {"description": "[basx341] Engineering notation tests", "input": "10e-8", "expected": "1.0E-7"},
+ {"description": "[basx164] Numbers with E", "input": "10e+90", "expected": "1.0E+91"},
+ {"description": "[basx162] Numbers with E", "input": "10E+9", "expected": "1.0E+10"},
+ {"description": "[basx307] Engineering notation tests", "input": "10e9", "expected": "1.0E+10"},
+ {"description": "[basx343] Engineering notation tests", "input": "10e-9", "expected": "1.0E-8"},
+ {
+ "description": "[basx008] conform to rules and exponent will be in permitted range).",
+ "input": "10.1"
+ },
+ {
+ "description": "[basx009] conform to rules and exponent will be in permitted range).",
+ "input": "10.4"
+ },
+ {
+ "description": "[basx010] conform to rules and exponent will be in permitted range).",
+ "input": "10.5"
+ },
+ {
+ "description": "[basx011] conform to rules and exponent will be in permitted range).",
+ "input": "10.6"
+ },
+ {
+ "description": "[basx012] conform to rules and exponent will be in permitted range).",
+ "input": "10.9"
+ },
+ {
+ "description": "[basx013] conform to rules and exponent will be in permitted range).",
+ "input": "11.0"
+ },
+ {"description": "[basx040] strings without E cannot generate E in result", "input": "12"},
+ {"description": "[basx190] Numbers with E", "input": "12.65"},
+ {"description": "[basx197] Numbers with E", "input": "12.65E-0", "expected": "12.65"},
+ {"description": "[basx196] Numbers with E", "input": "12.65E-1", "expected": "1.265"},
+ {"description": "[basx198] Numbers with E", "input": "12.65E+1", "expected": "126.5"},
+ {"description": "[basx191] Numbers with E", "input": "12.65E-20", "expected": "1.265E-19"},
+ {"description": "[basx203] Numbers with E", "input": "12.65E+20", "expected": "1.265E+21"},
+ {"description": "[basx195] Numbers with E", "input": "12.65E-2", "expected": "0.1265"},
+ {"description": "[basx199] Numbers with E", "input": "12.65E+2", "expected": "1265"},
+ {"description": "[basx194] Numbers with E", "input": "12.65E-3", "expected": "0.01265"},
+ {"description": "[basx200] Numbers with E", "input": "12.65E+3", "expected": "1.265E+4"},
+ {"description": "[basx193] Numbers with E", "input": "12.65E-4", "expected": "0.001265"},
+ {"description": "[basx201] Numbers with E", "input": "12.65E+4", "expected": "1.265E+5"},
+ {"description": "[basx192] Numbers with E", "input": "12.65E-8", "expected": "1.265E-7"},
+ {"description": "[basx202] Numbers with E", "input": "12.65E+8", "expected": "1.265E+9"},
+ {
+ "description": "[basx044] strings without E cannot generate E in result",
+ "input": "012.76",
+ "expected": "12.76"
+ },
+ {"description": "[basx042] strings without E cannot generate E in result", "input": "12.76"},
+ {
+ "description": "[basx046] strings without E cannot generate E in result",
+ "input": "17.",
+ "expected": "17"
+ },
+ {
+ "description": "[basx049] strings without E cannot generate E in result",
+ "input": "0044",
+ "expected": "44"
+ },
+ {
+ "description": "[basx048] strings without E cannot generate E in result",
+ "input": "044",
+ "expected": "44"
+ },
+ {"description": "[basx158] Numbers with E", "input": "44E+9", "expected": "4.4E+10"},
+ {"description": "[basx068] examples", "input": "50E-7", "expected": "0.0000050"},
+ {"description": "[basx169] Numbers with E", "input": "100e+009", "expected": "1.00E+11"},
+ {"description": "[basx167] Numbers with E", "input": "100e+09", "expected": "1.00E+11"},
+ {"description": "[basx168] Numbers with E", "input": "100E+90", "expected": "1.00E+92"},
+ {"description": "[basx166] Numbers with E", "input": "100e+9", "expected": "1.00E+11"},
+ {"description": "[basx210] Numbers with E", "input": "126.5"},
+ {"description": "[basx217] Numbers with E", "input": "126.5E-0", "expected": "126.5"},
+ {"description": "[basx216] Numbers with E", "input": "126.5E-1", "expected": "12.65"},
+ {"description": "[basx218] Numbers with E", "input": "126.5E+1", "expected": "1265"},
+ {"description": "[basx211] Numbers with E", "input": "126.5E-20", "expected": "1.265E-18"},
+ {"description": "[basx223] Numbers with E", "input": "126.5E+20", "expected": "1.265E+22"},
+ {"description": "[basx215] Numbers with E", "input": "126.5E-2", "expected": "1.265"},
+ {"description": "[basx219] Numbers with E", "input": "126.5E+2", "expected": "1.265E+4"},
+ {"description": "[basx214] Numbers with E", "input": "126.5E-3", "expected": "0.1265"},
+ {"description": "[basx220] Numbers with E", "input": "126.5E+3", "expected": "1.265E+5"},
+ {"description": "[basx213] Numbers with E", "input": "126.5E-4", "expected": "0.01265"},
+ {"description": "[basx221] Numbers with E", "input": "126.5E+4", "expected": "1.265E+6"},
+ {"description": "[basx212] Numbers with E", "input": "126.5E-8", "expected": "0.000001265"},
+ {"description": "[basx222] Numbers with E", "input": "126.5E+8", "expected": "1.265E+10"},
+ {
+ "description": "[basx006] conform to rules and exponent will be in permitted range).",
+ "input": "1000"
+ },
+ {"description": "[basx230] Numbers with E", "input": "1265"},
+ {"description": "[basx237] Numbers with E", "input": "1265E-0", "expected": "1265"},
+ {"description": "[basx236] Numbers with E", "input": "1265E-1", "expected": "126.5"},
+ {"description": "[basx238] Numbers with E", "input": "1265E+1", "expected": "1.265E+4"},
+ {"description": "[basx231] Numbers with E", "input": "1265E-20", "expected": "1.265E-17"},
+ {"description": "[basx243] Numbers with E", "input": "1265E+20", "expected": "1.265E+23"},
+ {"description": "[basx235] Numbers with E", "input": "1265E-2", "expected": "12.65"},
+ {"description": "[basx239] Numbers with E", "input": "1265E+2", "expected": "1.265E+5"},
+ {"description": "[basx234] Numbers with E", "input": "1265E-3", "expected": "1.265"},
+ {"description": "[basx240] Numbers with E", "input": "1265E+3", "expected": "1.265E+6"},
+ {"description": "[basx233] Numbers with E", "input": "1265E-4", "expected": "0.1265"},
+ {"description": "[basx241] Numbers with E", "input": "1265E+4", "expected": "1.265E+7"},
+ {"description": "[basx232] Numbers with E", "input": "1265E-8", "expected": "0.00001265"},
+ {"description": "[basx242] Numbers with E", "input": "1265E+8", "expected": "1.265E+11"},
+ {
+ "description": "[basx060] strings without E cannot generate E in result",
+ "input": "345678.5432"
+ },
+ {
+ "description": "[basx059] strings without E cannot generate E in result",
+ "input": "0345678.54321",
+ "expected": "345678.54321"
+ },
+ {
+ "description": "[basx058] strings without E cannot generate E in result",
+ "input": "345678.543210"
+ },
+ {
+ "description": "[basx057] strings without E cannot generate E in result",
+ "input": "2345678.543210"
+ },
+ {
+ "description": "[basx056] strings without E cannot generate E in result",
+ "input": "12345678.543210"
+ },
+ {
+ "description": "[basx031] conform to rules and exponent will be in permitted range).",
+ "input": "123456789.000000"
+ },
+ {
+ "description": "[basx030] conform to rules and exponent will be in permitted range).",
+ "input": "123456789.123456"
+ },
+ {
+ "description": "[basx032] conform to rules and exponent will be in permitted range).",
+ "input": "123456789123456"
+ }
+];
- data.forEach(function(testCase) {
- print(`Test - ${testCase.description}`);
- var output = NumberDecimal(testCase.input).toString();
- if (testCase.expected) {
- assert.eq(output, `NumberDecimal("${testCase.expected}")`);
- } else {
- assert.eq(output, `NumberDecimal("${testCase.input}")`);
- }
- });
+data.forEach(function(testCase) {
+ print(`Test - ${testCase.description}`);
+ var output = NumberDecimal(testCase.input).toString();
+ if (testCase.expected) {
+ assert.eq(output, `NumberDecimal("${testCase.expected}")`);
+ } else {
+ assert.eq(output, `NumberDecimal("${testCase.input}")`);
+ }
+});
}()); \ No newline at end of file
diff --git a/jstests/decimal/decimal128_test4.js b/jstests/decimal/decimal128_test4.js
index 1999ecd67aa..7ec4f14c303 100644
--- a/jstests/decimal/decimal128_test4.js
+++ b/jstests/decimal/decimal128_test4.js
@@ -3,143 +3,137 @@
*/
(function() {
- "use strict";
+"use strict";
- var testData = [
- {
- "description": "[basx023] conform to rules and exponent will be in permitted range).",
- "input": "-0.1"
- },
+var testData = [
+ {
+ "description": "[basx023] conform to rules and exponent will be in permitted range).",
+ "input": "-0.1"
+ },
- {
- "description": "[basx045] strings without E cannot generate E in result",
- "input": "+0.003",
- "expected": "0.003"
- },
- {"description": "[basx610] Zeros", "input": ".0", "expected": "0.0"},
- {"description": "[basx612] Zeros", "input": "-.0", "expected": "-0.0"},
- {
- "description": "[basx043] strings without E cannot generate E in result",
- "input": "+12.76",
- "expected": "12.76"
- },
- {
- "description": "[basx055] strings without E cannot generate E in result",
- "input": "0.00000005",
- "expected": "5E-8"
- },
- {
- "description": "[basx054] strings without E cannot generate E in result",
- "input": "0.0000005",
- "expected": "5E-7"
- },
- {
- "description": "[basx052] strings without E cannot generate E in result",
- "input": "0.000005"
- },
- {
- "description": "[basx051] strings without E cannot generate E in result",
- "input": "00.00005",
- "expected": "0.00005"
- },
- {
- "description": "[basx050] strings without E cannot generate E in result",
- "input": "0.0005"
- },
- {
- "description": "[basx047] strings without E cannot generate E in result",
- "input": ".5",
- "expected": "0.5"
- },
- {
- "description": "[dqbsr431] check rounding modes heeded (Rounded)",
- "input": "1.1111111111111111111111111111123450",
- "expected": "1.111111111111111111111111111112345"
- },
- {
- "description": "OK2",
- "input": ".100000000000000000000000000000000000000000000000000000000000",
- "expected": "0.1000000000000000000000000000000000"
- }
- ];
+ {
+ "description": "[basx045] strings without E cannot generate E in result",
+ "input": "+0.003",
+ "expected": "0.003"
+ },
+ {"description": "[basx610] Zeros", "input": ".0", "expected": "0.0"},
+ {"description": "[basx612] Zeros", "input": "-.0", "expected": "-0.0"},
+ {
+ "description": "[basx043] strings without E cannot generate E in result",
+ "input": "+12.76",
+ "expected": "12.76"
+ },
+ {
+ "description": "[basx055] strings without E cannot generate E in result",
+ "input": "0.00000005",
+ "expected": "5E-8"
+ },
+ {
+ "description": "[basx054] strings without E cannot generate E in result",
+ "input": "0.0000005",
+ "expected": "5E-7"
+ },
+ {"description": "[basx052] strings without E cannot generate E in result", "input": "0.000005"},
+ {
+ "description": "[basx051] strings without E cannot generate E in result",
+ "input": "00.00005",
+ "expected": "0.00005"
+ },
+ {"description": "[basx050] strings without E cannot generate E in result", "input": "0.0005"},
+ {
+ "description": "[basx047] strings without E cannot generate E in result",
+ "input": ".5",
+ "expected": "0.5"
+ },
+ {
+ "description": "[dqbsr431] check rounding modes heeded (Rounded)",
+ "input": "1.1111111111111111111111111111123450",
+ "expected": "1.111111111111111111111111111112345"
+ },
+ {
+ "description": "OK2",
+ "input": ".100000000000000000000000000000000000000000000000000000000000",
+ "expected": "0.1000000000000000000000000000000000"
+ }
+];
- var parseErrors = [
- {"description": "[basx564] Near-specials (Conversion_syntax)", "string": "Infi"},
- {"description": "[basx565] Near-specials (Conversion_syntax)", "string": "Infin"},
- {"description": "[basx566] Near-specials (Conversion_syntax)", "string": "Infini"},
- {"description": "[basx567] Near-specials (Conversion_syntax)", "string": "Infinit"},
- {"description": "[basx568] Near-specials (Conversion_syntax)", "string": "-Infinit"},
- {
- "description":
- "[basx590] some baddies with dots and Es and dots and specials (Conversion_syntax)",
- "string": ".Infinity"
- },
- {"description": "[basx562] Near-specials (Conversion_syntax)", "string": "NaNq"},
- {"description": "[basx563] Near-specials (Conversion_syntax)", "string": "NaNs"},
- {
- "description": "[dqbas939] overflow results at different rounding modes " +
- "(Overflow & Inexact & Rounded)",
- "string": "-7e10000"
- },
- {
- "description": "[dqbsr534] negatives (Rounded & Inexact)",
- "string": "-1.11111111111111111111111111111234650"
- },
- {
- "description": "[dqbsr535] negatives (Rounded & Inexact)",
- "string": "-1.11111111111111111111111111111234551"
- },
- {
- "description": "[dqbsr533] negatives (Rounded & Inexact)",
- "string": "-1.11111111111111111111111111111234550"
- },
- {
- "description": "[dqbsr532] negatives (Rounded & Inexact)",
- "string": "-1.11111111111111111111111111111234549"
- },
- {
- "description": "[dqbsr432] check rounding modes heeded (Rounded & Inexact)",
- "string": "1.11111111111111111111111111111234549"
- },
- {
- "description": "[dqbsr433] check rounding modes heeded (Rounded & Inexact)",
- "string": "1.11111111111111111111111111111234550"
- },
- {
- "description": "[dqbsr435] check rounding modes heeded (Rounded & Inexact)",
- "string": "1.11111111111111111111111111111234551"
- },
- {
- "description": "[dqbsr434] check rounding modes heeded (Rounded & Inexact)",
- "string": "1.11111111111111111111111111111234650"
- },
- {
- "description": "[dqbas938] overflow results at different rounding modes " +
- "(Overflow & Inexact & Rounded)",
- "string": "7e10000"
- },
- {
- "description": "Inexact rounding#1",
- "string": "100000000000000000000000000000000000000000000000000000000001"
- },
- {"description": "Inexact rounding#2", "string": "1E-6177"}
- ];
+var parseErrors = [
+ {"description": "[basx564] Near-specials (Conversion_syntax)", "string": "Infi"},
+ {"description": "[basx565] Near-specials (Conversion_syntax)", "string": "Infin"},
+ {"description": "[basx566] Near-specials (Conversion_syntax)", "string": "Infini"},
+ {"description": "[basx567] Near-specials (Conversion_syntax)", "string": "Infinit"},
+ {"description": "[basx568] Near-specials (Conversion_syntax)", "string": "-Infinit"},
+ {
+ "description":
+ "[basx590] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+ "string": ".Infinity"
+ },
+ {"description": "[basx562] Near-specials (Conversion_syntax)", "string": "NaNq"},
+ {"description": "[basx563] Near-specials (Conversion_syntax)", "string": "NaNs"},
+ {
+ "description": "[dqbas939] overflow results at different rounding modes " +
+ "(Overflow & Inexact & Rounded)",
+ "string": "-7e10000"
+ },
+ {
+ "description": "[dqbsr534] negatives (Rounded & Inexact)",
+ "string": "-1.11111111111111111111111111111234650"
+ },
+ {
+ "description": "[dqbsr535] negatives (Rounded & Inexact)",
+ "string": "-1.11111111111111111111111111111234551"
+ },
+ {
+ "description": "[dqbsr533] negatives (Rounded & Inexact)",
+ "string": "-1.11111111111111111111111111111234550"
+ },
+ {
+ "description": "[dqbsr532] negatives (Rounded & Inexact)",
+ "string": "-1.11111111111111111111111111111234549"
+ },
+ {
+ "description": "[dqbsr432] check rounding modes heeded (Rounded & Inexact)",
+ "string": "1.11111111111111111111111111111234549"
+ },
+ {
+ "description": "[dqbsr433] check rounding modes heeded (Rounded & Inexact)",
+ "string": "1.11111111111111111111111111111234550"
+ },
+ {
+ "description": "[dqbsr435] check rounding modes heeded (Rounded & Inexact)",
+ "string": "1.11111111111111111111111111111234551"
+ },
+ {
+ "description": "[dqbsr434] check rounding modes heeded (Rounded & Inexact)",
+ "string": "1.11111111111111111111111111111234650"
+ },
+ {
+ "description": "[dqbas938] overflow results at different rounding modes " +
+ "(Overflow & Inexact & Rounded)",
+ "string": "7e10000"
+ },
+ {
+ "description": "Inexact rounding#1",
+ "string": "100000000000000000000000000000000000000000000000000000000001"
+ },
+ {"description": "Inexact rounding#2", "string": "1E-6177"}
+];
- testData.forEach(function(testCase) {
- print(`Test - ${testCase.description}`);
- var output = NumberDecimal(testCase.input).toString();
- if (testCase.expected) {
- assert.eq(output, `NumberDecimal("${testCase.expected}")`);
- } else {
- assert.eq(output, `NumberDecimal("${testCase.input}")`);
- }
- });
+testData.forEach(function(testCase) {
+ print(`Test - ${testCase.description}`);
+ var output = NumberDecimal(testCase.input).toString();
+ if (testCase.expected) {
+ assert.eq(output, `NumberDecimal("${testCase.expected}")`);
+ } else {
+ assert.eq(output, `NumberDecimal("${testCase.input}")`);
+ }
+});
- parseErrors.forEach(function(testCase) {
- print(`Test - ${testCase.description}`);
- function test() {
- NumberDecimal(testCase.string);
- }
- assert.throws(test, [], `[Test - ${testCase.description}] should have failed with error.`);
- });
+parseErrors.forEach(function(testCase) {
+ print(`Test - ${testCase.description}`);
+ function test() {
+ NumberDecimal(testCase.string);
+ }
+ assert.throws(test, [], `[Test - ${testCase.description}] should have failed with error.`);
+});
}()); \ No newline at end of file
diff --git a/jstests/decimal/decimal128_test5.js b/jstests/decimal/decimal128_test5.js
index d51ab1282c2..2e264cdca6e 100644
--- a/jstests/decimal/decimal128_test5.js
+++ b/jstests/decimal/decimal128_test5.js
@@ -3,322 +3,321 @@
*/
(function() {
- "use strict";
+"use strict";
- var testData = [
- {
- "description": "[decq035] fold-downs (more below) (Clamped)",
- "input": "1.23E+6144",
- "expected": "1.230000000000000000000000000000000E+6144"
- },
- {
- "description": "[decq037] fold-downs (more below) (Clamped)",
- "input": "1E+6144",
- "expected": "1.000000000000000000000000000000000E+6144"
- },
- {
- "description": "[decq077] Nmin and below (Subnormal)",
- "input": "0.100000000000000000000000000000000E-6143",
- "expected": "1.00000000000000000000000000000000E-6144"
- },
- {
- "description": "[decq078] Nmin and below (Subnormal)",
- "input": "1.00000000000000000000000000000000E-6144"
- },
- {
- "description": "[decq079] Nmin and below (Subnormal)",
- "input": "0.000000000000000000000000000000010E-6143",
- "expected": "1.0E-6175"
- },
- {"description": "[decq080] Nmin and below (Subnormal)", "input": "1.0E-6175"},
- {
- "description": "[decq081] Nmin and below (Subnormal)",
- "input": "0.00000000000000000000000000000001E-6143",
- "expected": "1E-6175"
- },
- {"description": "[decq082] Nmin and below (Subnormal)", "input": "1E-6175"},
- {
- "description": "[decq083] Nmin and below (Subnormal)",
- "input": "0.000000000000000000000000000000001E-6143",
- "expected": "1E-6176"
- },
- {"description": "[decq084] Nmin and below (Subnormal)", "input": "1E-6176"},
- {
- "description": "[decq090] underflows cannot be tested for simple copies, " +
- "check edge cases (Subnormal)",
- "input": "1e-6176",
- "expected": "1E-6176"
- },
- {
- "description": "[decq100] underflows cannot be tested for simple copies, " +
- "check edge cases (Subnormal)",
- "input": "999999999999999999999999999999999e-6176",
- "expected": "9.99999999999999999999999999999999E-6144"
- },
- {
- "description": "[decq130] fold-downs (more below) (Clamped)",
- "input": "-1.23E+6144",
- "expected": "-1.230000000000000000000000000000000E+6144"
- },
- {
- "description": "[decq132] fold-downs (more below) (Clamped)",
- "input": "-1E+6144",
- "expected": "-1.000000000000000000000000000000000E+6144"
- },
- {
- "description": "[decq177] Nmin and below (Subnormal)",
- "input": "-0.100000000000000000000000000000000E-6143",
- "expected": "-1.00000000000000000000000000000000E-6144"
- },
- {
- "description": "[decq178] Nmin and below (Subnormal)",
- "input": "-1.00000000000000000000000000000000E-6144"
- },
- {
- "description": "[decq179] Nmin and below (Subnormal)",
- "input": "-0.000000000000000000000000000000010E-6143",
- "expected": "-1.0E-6175"
- },
- {"description": "[decq180] Nmin and below (Subnormal)", "input": "-1.0E-6175"},
- {
- "description": "[decq181] Nmin and below (Subnormal)",
- "input": "-0.00000000000000000000000000000001E-6143",
- "expected": "-1E-6175"
- },
- {"description": "[decq182] Nmin and below (Subnormal)", "input": "-1E-6175"},
- {
- "description": "[decq183] Nmin and below (Subnormal)",
- "input": "-0.000000000000000000000000000000001E-6143",
- "expected": "-1E-6176"
- },
- {"description": "[decq184] Nmin and below (Subnormal)", "input": "-1E-6176"},
- {
- "description": "[decq190] underflow edge cases (Subnormal)",
- "input": "-1e-6176",
- "expected": "-1E-6176"
- },
- {
- "description": "[decq200] underflow edge cases (Subnormal)",
- "input": "-999999999999999999999999999999999e-6176",
- "expected": "-9.99999999999999999999999999999999E-6144"
- },
- {"description": "[decq400] zeros (Clamped)", "input": "0E-8000", "expected": "0E-6176"},
- {"description": "[decq401] zeros (Clamped)", "input": "0E-6177", "expected": "0E-6176"},
- {
- "description": "[decq414] clamped zeros... (Clamped)",
- "input": "0E+6112",
- "expected": "0E+6111"
- },
- {
- "description": "[decq416] clamped zeros... (Clamped)",
- "input": "0E+6144",
- "expected": "0E+6111"
- },
- {
- "description": "[decq418] clamped zeros... (Clamped)",
- "input": "0E+8000",
- "expected": "0E+6111"
- },
- {
- "description": "[decq420] negative zeros (Clamped)",
- "input": "-0E-8000",
- "expected": "-0E-6176"
- },
- {
- "description": "[decq421] negative zeros (Clamped)",
- "input": "-0E-6177",
- "expected": "-0E-6176"
- },
- {
- "description": "[decq434] clamped zeros... (Clamped)",
- "input": "-0E+6112",
- "expected": "-0E+6111"
- },
- {
- "description": "[decq436] clamped zeros... (Clamped)",
- "input": "-0E+6144",
- "expected": "-0E+6111"
- },
- {
- "description": "[decq438] clamped zeros... (Clamped)",
- "input": "-0E+8000",
- "expected": "-0E+6111"
- },
- {
- "description": "[decq601] fold-down full sequence (Clamped)",
- "input": "1E+6144",
- "expected": "1.000000000000000000000000000000000E+6144"
- },
- {
- "description": "[decq603] fold-down full sequence (Clamped)",
- "input": "1E+6143",
- "expected": "1.00000000000000000000000000000000E+6143"
- },
- {
- "description": "[decq605] fold-down full sequence (Clamped)",
- "input": "1E+6142",
- "expected": "1.0000000000000000000000000000000E+6142"
- },
- {
- "description": "[decq607] fold-down full sequence (Clamped)",
- "input": "1E+6141",
- "expected": "1.000000000000000000000000000000E+6141"
- },
- {
- "description": "[decq609] fold-down full sequence (Clamped)",
- "input": "1E+6140",
- "expected": "1.00000000000000000000000000000E+6140"
- },
- {
- "description": "[decq611] fold-down full sequence (Clamped)",
- "input": "1E+6139",
- "expected": "1.0000000000000000000000000000E+6139"
- },
- {
- "description": "[decq613] fold-down full sequence (Clamped)",
- "input": "1E+6138",
- "expected": "1.000000000000000000000000000E+6138"
- },
- {
- "description": "[decq615] fold-down full sequence (Clamped)",
- "input": "1E+6137",
- "expected": "1.00000000000000000000000000E+6137"
- },
- {
- "description": "[decq617] fold-down full sequence (Clamped)",
- "input": "1E+6136",
- "expected": "1.0000000000000000000000000E+6136"
- },
- {
- "description": "[decq619] fold-down full sequence (Clamped)",
- "input": "1E+6135",
- "expected": "1.000000000000000000000000E+6135"
- },
- {
- "description": "[decq621] fold-down full sequence (Clamped)",
- "input": "1E+6134",
- "expected": "1.00000000000000000000000E+6134"
- },
- {
- "description": "[decq623] fold-down full sequence (Clamped)",
- "input": "1E+6133",
- "expected": "1.0000000000000000000000E+6133"
- },
- {
- "description": "[decq625] fold-down full sequence (Clamped)",
- "input": "1E+6132",
- "expected": "1.000000000000000000000E+6132"
- },
- {
- "description": "[decq627] fold-down full sequence (Clamped)",
- "input": "1E+6131",
- "expected": "1.00000000000000000000E+6131"
- },
- {
- "description": "[decq629] fold-down full sequence (Clamped)",
- "input": "1E+6130",
- "expected": "1.0000000000000000000E+6130"
- },
- {
- "description": "[decq631] fold-down full sequence (Clamped)",
- "input": "1E+6129",
- "expected": "1.000000000000000000E+6129"
- },
- {
- "description": "[decq633] fold-down full sequence (Clamped)",
- "input": "1E+6128",
- "expected": "1.00000000000000000E+6128"
- },
- {
- "description": "[decq635] fold-down full sequence (Clamped)",
- "input": "1E+6127",
- "expected": "1.0000000000000000E+6127"
- },
- {
- "description": "[decq637] fold-down full sequence (Clamped)",
- "input": "1E+6126",
- "expected": "1.000000000000000E+6126"
- },
- {
- "description": "[decq639] fold-down full sequence (Clamped)",
- "input": "1E+6125",
- "expected": "1.00000000000000E+6125"
- },
- {
- "description": "[decq641] fold-down full sequence (Clamped)",
- "input": "1E+6124",
- "expected": "1.0000000000000E+6124"
- },
- {
- "description": "[decq643] fold-down full sequence (Clamped)",
- "input": "1E+6123",
- "expected": "1.000000000000E+6123"
- },
- {
- "description": "[decq645] fold-down full sequence (Clamped)",
- "input": "1E+6122",
- "expected": "1.00000000000E+6122"
- },
- {
- "description": "[decq647] fold-down full sequence (Clamped)",
- "input": "1E+6121",
- "expected": "1.0000000000E+6121"
- },
- {
- "description": "[decq649] fold-down full sequence (Clamped)",
- "input": "1E+6120",
- "expected": "1.000000000E+6120"
- },
- {
- "description": "[decq651] fold-down full sequence (Clamped)",
- "input": "1E+6119",
- "expected": "1.00000000E+6119"
- },
- {
- "description": "[decq653] fold-down full sequence (Clamped)",
- "input": "1E+6118",
- "expected": "1.0000000E+6118"
- },
- {
- "description": "[decq655] fold-down full sequence (Clamped)",
- "input": "1E+6117",
- "expected": "1.000000E+6117"
- },
- {
- "description": "[decq657] fold-down full sequence (Clamped)",
- "input": "1E+6116",
- "expected": "1.00000E+6116"
- },
- {
- "description": "[decq659] fold-down full sequence (Clamped)",
- "input": "1E+6115",
- "expected": "1.0000E+6115"
- },
- {
- "description": "[decq661] fold-down full sequence (Clamped)",
- "input": "1E+6114",
- "expected": "1.000E+6114"
- },
- {
- "description": "[decq663] fold-down full sequence (Clamped)",
- "input": "1E+6113",
- "expected": "1.00E+6113"
- },
- {
- "description": "[decq665] fold-down full sequence (Clamped)",
- "input": "1E+6112",
- "expected": "1.0E+6112"
- }
- ];
-
- testData.forEach(function(testCase) {
- print(`Test - ${testCase.description}`);
- var output = NumberDecimal(testCase.input).toString();
- if (testCase.expected) {
- assert.eq(output, `NumberDecimal("${testCase.expected}")`);
- } else {
- assert.eq(output, `NumberDecimal("${testCase.input}")`);
- }
- });
+var testData = [
+ {
+ "description": "[decq035] fold-downs (more below) (Clamped)",
+ "input": "1.23E+6144",
+ "expected": "1.230000000000000000000000000000000E+6144"
+ },
+ {
+ "description": "[decq037] fold-downs (more below) (Clamped)",
+ "input": "1E+6144",
+ "expected": "1.000000000000000000000000000000000E+6144"
+ },
+ {
+ "description": "[decq077] Nmin and below (Subnormal)",
+ "input": "0.100000000000000000000000000000000E-6143",
+ "expected": "1.00000000000000000000000000000000E-6144"
+ },
+ {
+ "description": "[decq078] Nmin and below (Subnormal)",
+ "input": "1.00000000000000000000000000000000E-6144"
+ },
+ {
+ "description": "[decq079] Nmin and below (Subnormal)",
+ "input": "0.000000000000000000000000000000010E-6143",
+ "expected": "1.0E-6175"
+ },
+ {"description": "[decq080] Nmin and below (Subnormal)", "input": "1.0E-6175"},
+ {
+ "description": "[decq081] Nmin and below (Subnormal)",
+ "input": "0.00000000000000000000000000000001E-6143",
+ "expected": "1E-6175"
+ },
+ {"description": "[decq082] Nmin and below (Subnormal)", "input": "1E-6175"},
+ {
+ "description": "[decq083] Nmin and below (Subnormal)",
+ "input": "0.000000000000000000000000000000001E-6143",
+ "expected": "1E-6176"
+ },
+ {"description": "[decq084] Nmin and below (Subnormal)", "input": "1E-6176"},
+ {
+ "description": "[decq090] underflows cannot be tested for simple copies, " +
+ "check edge cases (Subnormal)",
+ "input": "1e-6176",
+ "expected": "1E-6176"
+ },
+ {
+ "description": "[decq100] underflows cannot be tested for simple copies, " +
+ "check edge cases (Subnormal)",
+ "input": "999999999999999999999999999999999e-6176",
+ "expected": "9.99999999999999999999999999999999E-6144"
+ },
+ {
+ "description": "[decq130] fold-downs (more below) (Clamped)",
+ "input": "-1.23E+6144",
+ "expected": "-1.230000000000000000000000000000000E+6144"
+ },
+ {
+ "description": "[decq132] fold-downs (more below) (Clamped)",
+ "input": "-1E+6144",
+ "expected": "-1.000000000000000000000000000000000E+6144"
+ },
+ {
+ "description": "[decq177] Nmin and below (Subnormal)",
+ "input": "-0.100000000000000000000000000000000E-6143",
+ "expected": "-1.00000000000000000000000000000000E-6144"
+ },
+ {
+ "description": "[decq178] Nmin and below (Subnormal)",
+ "input": "-1.00000000000000000000000000000000E-6144"
+ },
+ {
+ "description": "[decq179] Nmin and below (Subnormal)",
+ "input": "-0.000000000000000000000000000000010E-6143",
+ "expected": "-1.0E-6175"
+ },
+ {"description": "[decq180] Nmin and below (Subnormal)", "input": "-1.0E-6175"},
+ {
+ "description": "[decq181] Nmin and below (Subnormal)",
+ "input": "-0.00000000000000000000000000000001E-6143",
+ "expected": "-1E-6175"
+ },
+ {"description": "[decq182] Nmin and below (Subnormal)", "input": "-1E-6175"},
+ {
+ "description": "[decq183] Nmin and below (Subnormal)",
+ "input": "-0.000000000000000000000000000000001E-6143",
+ "expected": "-1E-6176"
+ },
+ {"description": "[decq184] Nmin and below (Subnormal)", "input": "-1E-6176"},
+ {
+ "description": "[decq190] underflow edge cases (Subnormal)",
+ "input": "-1e-6176",
+ "expected": "-1E-6176"
+ },
+ {
+ "description": "[decq200] underflow edge cases (Subnormal)",
+ "input": "-999999999999999999999999999999999e-6176",
+ "expected": "-9.99999999999999999999999999999999E-6144"
+ },
+ {"description": "[decq400] zeros (Clamped)", "input": "0E-8000", "expected": "0E-6176"},
+ {"description": "[decq401] zeros (Clamped)", "input": "0E-6177", "expected": "0E-6176"},
+ {
+ "description": "[decq414] clamped zeros... (Clamped)",
+ "input": "0E+6112",
+ "expected": "0E+6111"
+ },
+ {
+ "description": "[decq416] clamped zeros... (Clamped)",
+ "input": "0E+6144",
+ "expected": "0E+6111"
+ },
+ {
+ "description": "[decq418] clamped zeros... (Clamped)",
+ "input": "0E+8000",
+ "expected": "0E+6111"
+ },
+ {
+ "description": "[decq420] negative zeros (Clamped)",
+ "input": "-0E-8000",
+ "expected": "-0E-6176"
+ },
+ {
+ "description": "[decq421] negative zeros (Clamped)",
+ "input": "-0E-6177",
+ "expected": "-0E-6176"
+ },
+ {
+ "description": "[decq434] clamped zeros... (Clamped)",
+ "input": "-0E+6112",
+ "expected": "-0E+6111"
+ },
+ {
+ "description": "[decq436] clamped zeros... (Clamped)",
+ "input": "-0E+6144",
+ "expected": "-0E+6111"
+ },
+ {
+ "description": "[decq438] clamped zeros... (Clamped)",
+ "input": "-0E+8000",
+ "expected": "-0E+6111"
+ },
+ {
+ "description": "[decq601] fold-down full sequence (Clamped)",
+ "input": "1E+6144",
+ "expected": "1.000000000000000000000000000000000E+6144"
+ },
+ {
+ "description": "[decq603] fold-down full sequence (Clamped)",
+ "input": "1E+6143",
+ "expected": "1.00000000000000000000000000000000E+6143"
+ },
+ {
+ "description": "[decq605] fold-down full sequence (Clamped)",
+ "input": "1E+6142",
+ "expected": "1.0000000000000000000000000000000E+6142"
+ },
+ {
+ "description": "[decq607] fold-down full sequence (Clamped)",
+ "input": "1E+6141",
+ "expected": "1.000000000000000000000000000000E+6141"
+ },
+ {
+ "description": "[decq609] fold-down full sequence (Clamped)",
+ "input": "1E+6140",
+ "expected": "1.00000000000000000000000000000E+6140"
+ },
+ {
+ "description": "[decq611] fold-down full sequence (Clamped)",
+ "input": "1E+6139",
+ "expected": "1.0000000000000000000000000000E+6139"
+ },
+ {
+ "description": "[decq613] fold-down full sequence (Clamped)",
+ "input": "1E+6138",
+ "expected": "1.000000000000000000000000000E+6138"
+ },
+ {
+ "description": "[decq615] fold-down full sequence (Clamped)",
+ "input": "1E+6137",
+ "expected": "1.00000000000000000000000000E+6137"
+ },
+ {
+ "description": "[decq617] fold-down full sequence (Clamped)",
+ "input": "1E+6136",
+ "expected": "1.0000000000000000000000000E+6136"
+ },
+ {
+ "description": "[decq619] fold-down full sequence (Clamped)",
+ "input": "1E+6135",
+ "expected": "1.000000000000000000000000E+6135"
+ },
+ {
+ "description": "[decq621] fold-down full sequence (Clamped)",
+ "input": "1E+6134",
+ "expected": "1.00000000000000000000000E+6134"
+ },
+ {
+ "description": "[decq623] fold-down full sequence (Clamped)",
+ "input": "1E+6133",
+ "expected": "1.0000000000000000000000E+6133"
+ },
+ {
+ "description": "[decq625] fold-down full sequence (Clamped)",
+ "input": "1E+6132",
+ "expected": "1.000000000000000000000E+6132"
+ },
+ {
+ "description": "[decq627] fold-down full sequence (Clamped)",
+ "input": "1E+6131",
+ "expected": "1.00000000000000000000E+6131"
+ },
+ {
+ "description": "[decq629] fold-down full sequence (Clamped)",
+ "input": "1E+6130",
+ "expected": "1.0000000000000000000E+6130"
+ },
+ {
+ "description": "[decq631] fold-down full sequence (Clamped)",
+ "input": "1E+6129",
+ "expected": "1.000000000000000000E+6129"
+ },
+ {
+ "description": "[decq633] fold-down full sequence (Clamped)",
+ "input": "1E+6128",
+ "expected": "1.00000000000000000E+6128"
+ },
+ {
+ "description": "[decq635] fold-down full sequence (Clamped)",
+ "input": "1E+6127",
+ "expected": "1.0000000000000000E+6127"
+ },
+ {
+ "description": "[decq637] fold-down full sequence (Clamped)",
+ "input": "1E+6126",
+ "expected": "1.000000000000000E+6126"
+ },
+ {
+ "description": "[decq639] fold-down full sequence (Clamped)",
+ "input": "1E+6125",
+ "expected": "1.00000000000000E+6125"
+ },
+ {
+ "description": "[decq641] fold-down full sequence (Clamped)",
+ "input": "1E+6124",
+ "expected": "1.0000000000000E+6124"
+ },
+ {
+ "description": "[decq643] fold-down full sequence (Clamped)",
+ "input": "1E+6123",
+ "expected": "1.000000000000E+6123"
+ },
+ {
+ "description": "[decq645] fold-down full sequence (Clamped)",
+ "input": "1E+6122",
+ "expected": "1.00000000000E+6122"
+ },
+ {
+ "description": "[decq647] fold-down full sequence (Clamped)",
+ "input": "1E+6121",
+ "expected": "1.0000000000E+6121"
+ },
+ {
+ "description": "[decq649] fold-down full sequence (Clamped)",
+ "input": "1E+6120",
+ "expected": "1.000000000E+6120"
+ },
+ {
+ "description": "[decq651] fold-down full sequence (Clamped)",
+ "input": "1E+6119",
+ "expected": "1.00000000E+6119"
+ },
+ {
+ "description": "[decq653] fold-down full sequence (Clamped)",
+ "input": "1E+6118",
+ "expected": "1.0000000E+6118"
+ },
+ {
+ "description": "[decq655] fold-down full sequence (Clamped)",
+ "input": "1E+6117",
+ "expected": "1.000000E+6117"
+ },
+ {
+ "description": "[decq657] fold-down full sequence (Clamped)",
+ "input": "1E+6116",
+ "expected": "1.00000E+6116"
+ },
+ {
+ "description": "[decq659] fold-down full sequence (Clamped)",
+ "input": "1E+6115",
+ "expected": "1.0000E+6115"
+ },
+ {
+ "description": "[decq661] fold-down full sequence (Clamped)",
+ "input": "1E+6114",
+ "expected": "1.000E+6114"
+ },
+ {
+ "description": "[decq663] fold-down full sequence (Clamped)",
+ "input": "1E+6113",
+ "expected": "1.00E+6113"
+ },
+ {
+ "description": "[decq665] fold-down full sequence (Clamped)",
+ "input": "1E+6112",
+ "expected": "1.0E+6112"
+ }
+];
+testData.forEach(function(testCase) {
+ print(`Test - ${testCase.description}`);
+ var output = NumberDecimal(testCase.input).toString();
+ if (testCase.expected) {
+ assert.eq(output, `NumberDecimal("${testCase.expected}")`);
+ } else {
+ assert.eq(output, `NumberDecimal("${testCase.input}")`);
+ }
+});
}());
diff --git a/jstests/decimal/decimal128_test6.js b/jstests/decimal/decimal128_test6.js
index 63bf6ea92ef..07a52669e33 100644
--- a/jstests/decimal/decimal128_test6.js
+++ b/jstests/decimal/decimal128_test6.js
@@ -3,47 +3,47 @@
*/
(function() {
- "use strict";
+"use strict";
- var parseErrors = [
- {"description": "Incomplete Exponent", "string": "1e"},
- {"description": "Exponent at the beginning", "string": "E01"},
- {"description": "Just a decimal place", "string": "."},
- {"description": "2 decimal places", "string": "..3"},
- {"description": "2 decimal places", "string": ".13.3"},
- {"description": "2 decimal places", "string": "1..3"},
- {"description": "2 decimal places", "string": "1.3.4"},
- {"description": "2 decimal places", "string": "1.34."},
- {"description": "Decimal with no digits", "string": ".e"},
- {"description": "2 signs", "string": "+-32.4"},
- {"description": "2 signs", "string": "-+32.4"},
- {"description": "2 negative signs", "string": "--32.4"},
- {"description": "2 negative signs", "string": "-32.-4"},
- {"description": "End in negative sign", "string": "32.0-"},
- {"description": "2 negative signs", "string": "32.4E--21"},
- {"description": "2 negative signs", "string": "32.4E-2-1"},
- {"description": "2 signs", "string": "32.4E+-21"},
- {"description": "Empty string", "string": ""},
- {"description": "leading white space positive number", "string": " 1"},
- {"description": "leading white space negative number", "string": " -1"},
- {"description": "trailing white space", "string": "1 "},
- {"description": "Invalid", "string": "E"},
- {"description": "Invalid", "string": "invalid"},
- {"description": "Invalid", "string": "i"},
- {"description": "Invalid", "string": "in"},
- {"description": "Invalid", "string": "-in"},
- {"description": "Invalid", "string": "Na"},
- {"description": "Invalid", "string": "-Na"},
- {"description": "Invalid", "string": "1.23abc"},
- {"description": "Invalid", "string": "1.23abcE+02"},
- {"description": "Invalid", "string": "1.23E+0aabs2"}
- ];
+var parseErrors = [
+ {"description": "Incomplete Exponent", "string": "1e"},
+ {"description": "Exponent at the beginning", "string": "E01"},
+ {"description": "Just a decimal place", "string": "."},
+ {"description": "2 decimal places", "string": "..3"},
+ {"description": "2 decimal places", "string": ".13.3"},
+ {"description": "2 decimal places", "string": "1..3"},
+ {"description": "2 decimal places", "string": "1.3.4"},
+ {"description": "2 decimal places", "string": "1.34."},
+ {"description": "Decimal with no digits", "string": ".e"},
+ {"description": "2 signs", "string": "+-32.4"},
+ {"description": "2 signs", "string": "-+32.4"},
+ {"description": "2 negative signs", "string": "--32.4"},
+ {"description": "2 negative signs", "string": "-32.-4"},
+ {"description": "End in negative sign", "string": "32.0-"},
+ {"description": "2 negative signs", "string": "32.4E--21"},
+ {"description": "2 negative signs", "string": "32.4E-2-1"},
+ {"description": "2 signs", "string": "32.4E+-21"},
+ {"description": "Empty string", "string": ""},
+ {"description": "leading white space positive number", "string": " 1"},
+ {"description": "leading white space negative number", "string": " -1"},
+ {"description": "trailing white space", "string": "1 "},
+ {"description": "Invalid", "string": "E"},
+ {"description": "Invalid", "string": "invalid"},
+ {"description": "Invalid", "string": "i"},
+ {"description": "Invalid", "string": "in"},
+ {"description": "Invalid", "string": "-in"},
+ {"description": "Invalid", "string": "Na"},
+ {"description": "Invalid", "string": "-Na"},
+ {"description": "Invalid", "string": "1.23abc"},
+ {"description": "Invalid", "string": "1.23abcE+02"},
+ {"description": "Invalid", "string": "1.23E+0aabs2"}
+];
- parseErrors.forEach(function(testCase) {
- print(`Test - ${testCase.description}`);
- function test() {
- NumberDecimal(testCase.string);
- }
- assert.throws(test, [], `[Test - ${testCase.description}] should have failed with error.`);
- });
+parseErrors.forEach(function(testCase) {
+ print(`Test - ${testCase.description}`);
+ function test() {
+ NumberDecimal(testCase.string);
+ }
+ assert.throws(test, [], `[Test - ${testCase.description}] should have failed with error.`);
+});
}()); \ No newline at end of file
diff --git a/jstests/decimal/decimal128_test7.js b/jstests/decimal/decimal128_test7.js
index a0ea63d3efb..d9ff5774ade 100644
--- a/jstests/decimal/decimal128_test7.js
+++ b/jstests/decimal/decimal128_test7.js
@@ -3,384 +3,416 @@
*/
(function() {
- "use strict";
+"use strict";
- var parseErrors = [
- {"description": "[basx572] Near-specials " + "(Conversion_syntax)", "string": "-9Inf"},
- {
- "description": "[basx516] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "-1-"
- },
- {
- "description": "[basx533] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "0000.."
- },
- {
- "description": "[basx534] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": ".0000."
- },
- {
- "description": "[basx535] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "00..00"
- },
- {"description": "[basx569] Near-specials " + "(Conversion_syntax)", "string": "0Inf"},
- {"description": "[basx571] Near-specials " + "(Conversion_syntax)", "string": "-0Inf"},
- {"description": "[basx575] Near-specials " + "(Conversion_syntax)", "string": "0sNaN"},
- {
- "description": "[basx503] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "++1"
- },
- {
- "description": "[basx504] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "--1"
- },
- {
- "description": "[basx505] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "-+1"
- },
- {
- "description": "[basx506] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "+-1"
- },
- {
- "description": "[basx510] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": " +1"
- },
- {
- "description": "[basx513] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": " + 1"
- },
- {
- "description": "[basx514] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": " - 1"
- },
- {
- "description": "[basx501] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "."
- },
- {
- "description": "[basx502] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": ".."
- },
- {
- "description": "[basx519] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": ""
- },
- {
- "description": "[basx525] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "e100"
- },
- {
- "description": "[basx549] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "e+1"
- },
- {
- "description": "[basx577] some baddies with dots and Es and dots and specials " +
- "(Conversion_syntax)",
- "string": ".e+1"
- },
- {
- "description": "[basx578] some baddies with dots and Es and dots and specials " +
- "(Conversion_syntax)",
- "string": "+.e+1"
- },
- {
- "description": "[basx581] some baddies with dots and Es and dots and specials " +
- "(Conversion_syntax)",
- "string": "E+1"
- },
- {
- "description": "[basx582] some baddies with dots and Es and dots and specials " +
- "(Conversion_syntax)",
- "string": ".E+1"
- },
- {
- "description": "[basx583] some baddies with dots and Es and dots and specials " +
- "(Conversion_syntax)",
- "string": "+.E+1"
- },
- {
- "description": "[basx579] some baddies with dots and Es and dots and specials " +
- "(Conversion_syntax)",
- "string": "-.e+"
- },
- {
- "description": "[basx580] some baddies with dots and Es and dots and specials " +
- "(Conversion_syntax)",
- "string": "-.e"
- },
- {
- "description": "[basx584] some baddies with dots and Es and dots and specials " +
- "(Conversion_syntax)",
- "string": "-.E+"
- },
- {
- "description": "[basx585] some baddies with dots and Es and dots and specials " +
- "(Conversion_syntax)",
- "string": "-.E"
- },
- {
- "description": "[basx589] some baddies with dots and Es and dots and specials " +
- "(Conversion_syntax)",
- "string": "+.Inf"
- },
- {
- "description": "[basx586] some baddies with dots and Es and dots and specials " +
- "(Conversion_syntax)",
- "string": ".NaN"
- },
- {
- "description": "[basx587] some baddies with dots and Es and dots and specials " +
- "(Conversion_syntax)",
- "string": "-.NaN"
- },
- {
- "description": "[basx545] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "ONE"
- },
- {"description": "[basx561] Near-specials " + "(Conversion_syntax)", "string": "qNaN"},
- {"description": "[basx573] Near-specials " + "(Conversion_syntax)", "string": "-sNa"},
- {
- "description": "[basx588] some baddies with dots and Es and dots and specials " +
- "(Conversion_syntax)",
- "string": "+.sNaN"
- },
- {
- "description": "[basx544] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "ten"
- },
- {
- "description": "[basx527] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "u0b65"
- },
- {
- "description": "[basx526] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "u0e5a"
- },
- {
- "description": "[basx515] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "x"
- },
- {"description": "[basx574] Near-specials " + "(Conversion_syntax)", "string": "xNaN"},
- {
- "description": "[basx530] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": ".123.5"
- },
- {
- "description": "[basx500] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1..2"
- },
- {
- "description": "[basx542] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1e1.0"
- },
- {
- "description": "[basx553] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1E+1.2.3"
- },
- {
- "description": "[basx543] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1e123e"
- },
- {
- "description": "[basx552] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1E+1.2"
- },
- {
- "description": "[basx546] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1e.1"
- },
- {
- "description": "[basx547] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1e1."
- },
- {
- "description": "[basx554] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1E++1"
- },
- {
- "description": "[basx555] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1E--1"
- },
- {
- "description": "[basx556] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1E+-1"
- },
- {
- "description": "[basx557] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1E-+1"
- },
- {
- "description": "[basx558] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1E'1"
- },
- {
- "description": "[basx559] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1E\"1"
- },
- {
- "description": "[basx520] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1e-"
- },
- {
- "description": "[basx560] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1E"
- },
- {
- "description": "[basx548] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1ee"
- },
- {
- "description": "[basx551] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1.2.1"
- },
- {
- "description": "[basx550] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1.23.4"
- },
- {
- "description": "[basx529] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "1.34.5"
- },
- {
- "description": "[basx531] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "01.35."
- },
- {
- "description": "[basx532] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "01.35-"
- },
- {
- "description": "[basx518] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "3+"
- },
- {
- "description": "[basx521] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "7e99999a"
- },
- {"description": "[basx570] Near-specials " + "(Conversion_syntax)", "string": "9Inf"},
- {
- "description": "[basx512] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "12 "
- },
- {
- "description": "[basx517] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "12-"
- },
- {
- "description": "[basx507] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "12e"
- },
- {
- "description": "[basx508] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "12e++"
- },
- {
- "description": "[basx509] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "12f4"
- },
- {
- "description": "[basx536] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "111e*123"
- },
- {
- "description": "[basx537] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "111e123-"
- },
- {
- "description": "[basx540] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "111e1*23"
- },
- {
- "description": "[basx538] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "111e+12+"
- },
- {
- "description": "[basx539] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "111e1-3-"
- },
- {
- "description": "[basx541] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "111E1e+3"
- },
- {
- "description": "[basx528] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "123,65"
- },
- {
- "description": "[basx523] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "7e12356789012x"
- },
- {
- "description": "[basx522] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
- "(Conversion_syntax)",
- "string": "7e123567890x"
- }
- ];
+var parseErrors = [
+ {
+ "description": "[basx572] Near-specials " +
+ "(Conversion_syntax)",
+ "string": "-9Inf"
+ },
+ {
+ "description": "[basx516] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "-1-"
+ },
+ {
+ "description": "[basx533] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "0000.."
+ },
+ {
+ "description": "[basx534] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": ".0000."
+ },
+ {
+ "description": "[basx535] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "00..00"
+ },
+ {
+ "description": "[basx569] Near-specials " +
+ "(Conversion_syntax)",
+ "string": "0Inf"
+ },
+ {
+ "description": "[basx571] Near-specials " +
+ "(Conversion_syntax)",
+ "string": "-0Inf"
+ },
+ {
+ "description": "[basx575] Near-specials " +
+ "(Conversion_syntax)",
+ "string": "0sNaN"
+ },
+ {
+ "description": "[basx503] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "++1"
+ },
+ {
+ "description": "[basx504] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "--1"
+ },
+ {
+ "description": "[basx505] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "-+1"
+ },
+ {
+ "description": "[basx506] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "+-1"
+ },
+ {
+ "description": "[basx510] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": " +1"
+ },
+ {
+ "description": "[basx513] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": " + 1"
+ },
+ {
+ "description": "[basx514] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": " - 1"
+ },
+ {
+ "description": "[basx501] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "."
+ },
+ {
+ "description": "[basx502] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": ".."
+ },
+ {
+ "description": "[basx519] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": ""
+ },
+ {
+ "description": "[basx525] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "e100"
+ },
+ {
+ "description": "[basx549] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "e+1"
+ },
+ {
+ "description": "[basx577] some baddies with dots and Es and dots and specials " +
+ "(Conversion_syntax)",
+ "string": ".e+1"
+ },
+ {
+ "description": "[basx578] some baddies with dots and Es and dots and specials " +
+ "(Conversion_syntax)",
+ "string": "+.e+1"
+ },
+ {
+ "description": "[basx581] some baddies with dots and Es and dots and specials " +
+ "(Conversion_syntax)",
+ "string": "E+1"
+ },
+ {
+ "description": "[basx582] some baddies with dots and Es and dots and specials " +
+ "(Conversion_syntax)",
+ "string": ".E+1"
+ },
+ {
+ "description": "[basx583] some baddies with dots and Es and dots and specials " +
+ "(Conversion_syntax)",
+ "string": "+.E+1"
+ },
+ {
+ "description": "[basx579] some baddies with dots and Es and dots and specials " +
+ "(Conversion_syntax)",
+ "string": "-.e+"
+ },
+ {
+ "description": "[basx580] some baddies with dots and Es and dots and specials " +
+ "(Conversion_syntax)",
+ "string": "-.e"
+ },
+ {
+ "description": "[basx584] some baddies with dots and Es and dots and specials " +
+ "(Conversion_syntax)",
+ "string": "-.E+"
+ },
+ {
+ "description": "[basx585] some baddies with dots and Es and dots and specials " +
+ "(Conversion_syntax)",
+ "string": "-.E"
+ },
+ {
+ "description": "[basx589] some baddies with dots and Es and dots and specials " +
+ "(Conversion_syntax)",
+ "string": "+.Inf"
+ },
+ {
+ "description": "[basx586] some baddies with dots and Es and dots and specials " +
+ "(Conversion_syntax)",
+ "string": ".NaN"
+ },
+ {
+ "description": "[basx587] some baddies with dots and Es and dots and specials " +
+ "(Conversion_syntax)",
+ "string": "-.NaN"
+ },
+ {
+ "description": "[basx545] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "ONE"
+ },
+ {
+ "description": "[basx561] Near-specials " +
+ "(Conversion_syntax)",
+ "string": "qNaN"
+ },
+ {
+ "description": "[basx573] Near-specials " +
+ "(Conversion_syntax)",
+ "string": "-sNa"
+ },
+ {
+ "description": "[basx588] some baddies with dots and Es and dots and specials " +
+ "(Conversion_syntax)",
+ "string": "+.sNaN"
+ },
+ {
+ "description": "[basx544] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "ten"
+ },
+ {
+ "description": "[basx527] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "u0b65"
+ },
+ {
+ "description": "[basx526] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "u0e5a"
+ },
+ {
+ "description": "[basx515] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "x"
+ },
+ {
+ "description": "[basx574] Near-specials " +
+ "(Conversion_syntax)",
+ "string": "xNaN"
+ },
+ {
+ "description": "[basx530] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": ".123.5"
+ },
+ {
+ "description": "[basx500] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1..2"
+ },
+ {
+ "description": "[basx542] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1e1.0"
+ },
+ {
+ "description": "[basx553] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1E+1.2.3"
+ },
+ {
+ "description": "[basx543] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1e123e"
+ },
+ {
+ "description": "[basx552] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1E+1.2"
+ },
+ {
+ "description": "[basx546] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1e.1"
+ },
+ {
+ "description": "[basx547] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1e1."
+ },
+ {
+ "description": "[basx554] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1E++1"
+ },
+ {
+ "description": "[basx555] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1E--1"
+ },
+ {
+ "description": "[basx556] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1E+-1"
+ },
+ {
+ "description": "[basx557] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1E-+1"
+ },
+ {
+ "description": "[basx558] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1E'1"
+ },
+ {
+ "description": "[basx559] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1E\"1"
+ },
+ {
+ "description": "[basx520] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1e-"
+ },
+ {
+ "description": "[basx560] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1E"
+ },
+ {
+ "description": "[basx548] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1ee"
+ },
+ {
+ "description": "[basx551] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1.2.1"
+ },
+ {
+ "description": "[basx550] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1.23.4"
+ },
+ {
+ "description": "[basx529] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "1.34.5"
+ },
+ {
+ "description": "[basx531] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "01.35."
+ },
+ {
+ "description": "[basx532] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "01.35-"
+ },
+ {
+ "description": "[basx518] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "3+"
+ },
+ {
+ "description": "[basx521] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "7e99999a"
+ },
+ {
+ "description": "[basx570] Near-specials " +
+ "(Conversion_syntax)",
+ "string": "9Inf"
+ },
+ {
+ "description": "[basx512] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "12 "
+ },
+ {
+ "description": "[basx517] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "12-"
+ },
+ {
+ "description": "[basx507] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "12e"
+ },
+ {
+ "description": "[basx508] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "12e++"
+ },
+ {
+ "description": "[basx509] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "12f4"
+ },
+ {
+ "description": "[basx536] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "111e*123"
+ },
+ {
+ "description": "[basx537] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "111e123-"
+ },
+ {
+ "description": "[basx540] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "111e1*23"
+ },
+ {
+ "description": "[basx538] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "111e+12+"
+ },
+ {
+ "description": "[basx539] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "111e1-3-"
+ },
+ {
+ "description": "[basx541] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "111E1e+3"
+ },
+ {
+ "description": "[basx528] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "123,65"
+ },
+ {
+ "description": "[basx523] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "7e12356789012x"
+ },
+ {
+ "description": "[basx522] The 'baddies' tests from DiagBigDecimal, plus some new ones " +
+ "(Conversion_syntax)",
+ "string": "7e123567890x"
+ }
+];
- parseErrors.forEach(function(testCase) {
- print(`Test - ${testCase.description}`);
- function test() {
- NumberDecimal(testCase.string);
- }
- assert.throws(test, [], `[Test - ${testCase.description}] should have failed with error.`);
- });
+parseErrors.forEach(function(testCase) {
+ print(`Test - ${testCase.description}`);
+ function test() {
+ NumberDecimal(testCase.string);
+ }
+ assert.throws(test, [], `[Test - ${testCase.description}] should have failed with error.`);
+});
}()); \ No newline at end of file
diff --git a/jstests/decimal/decimal_constructors.js b/jstests/decimal/decimal_constructors.js
index dd8e6cfef75..93a5bfe9703 100644
--- a/jstests/decimal/decimal_constructors.js
+++ b/jstests/decimal/decimal_constructors.js
@@ -1,42 +1,40 @@
// Tests constructing NumberDecimal with various types
(function() {
- 'use strict';
- var col = db.d_constructors;
- col.drop();
+'use strict';
+var col = db.d_constructors;
+col.drop();
- // Insert some sample data.
+// Insert some sample data.
- assert.writeOK(col.insert([
- {d: NumberDecimal('1')},
- {d: NumberDecimal(1)},
- {d: NumberDecimal(NumberLong('1'))},
- {d: NumberDecimal(NumberInt('1'))},
- {d: NumberDecimal('NaN')},
- {d: NumberDecimal('-NaN')}
- ]),
- 'Initial insertion of decimals failed');
+assert.writeOK(col.insert([
+ {d: NumberDecimal('1')},
+ {d: NumberDecimal(1)},
+ {d: NumberDecimal(NumberLong('1'))},
+ {d: NumberDecimal(NumberInt('1'))},
+ {d: NumberDecimal('NaN')},
+ {d: NumberDecimal('-NaN')}
+]),
+ 'Initial insertion of decimals failed');
- var exactDoubleString = "1427247692705959881058285969449495136382746624";
- var exactDoubleTinyString =
- "0.00000000000000000000000000000000000000000000000000000000000062230152778611417071440640537801242405902521687211671331011166147896988340353834411839448231257136169569665895551224821247160434722900390625";
+var exactDoubleString = "1427247692705959881058285969449495136382746624";
+var exactDoubleTinyString =
+ "0.00000000000000000000000000000000000000000000000000000000000062230152778611417071440640537801242405902521687211671331011166147896988340353834411839448231257136169569665895551224821247160434722900390625";
- assert.throws(
- NumberDecimal, [exactDoubleString], 'Unexpected success in creating invalid Decimal128');
- assert.throws(NumberDecimal,
- [exactDoubleTinyString],
- 'Unexpected success in creating invalid Decimal128');
- assert.throws(
- NumberDecimal, ['some garbage'], 'Unexpected success in creating invalid Decimal128');
+assert.throws(
+ NumberDecimal, [exactDoubleString], 'Unexpected success in creating invalid Decimal128');
+assert.throws(
+ NumberDecimal, [exactDoubleTinyString], 'Unexpected success in creating invalid Decimal128');
+assert.throws(NumberDecimal, ['some garbage'], 'Unexpected success in creating invalid Decimal128');
- // Find values with various types and NumberDecimal constructed types
- assert.eq(col.find({'d': NumberDecimal('1')}).count(), '4');
- assert.eq(col.find({'d': NumberDecimal(1)}).count(), '4');
- assert.eq(col.find({'d': NumberDecimal(NumberLong(1))}).count(), '4');
- assert.eq(col.find({'d': NumberDecimal(NumberInt(1))}).count(), '4');
- assert.eq(col.find({'d': 1}).count(), '4');
- assert.eq(col.find({'d': NumberLong(1)}).count(), '4');
- assert.eq(col.find({'d': NumberInt(1)}).count(), '4');
- // NaN and -NaN are both evaluated to NaN
- assert.eq(col.find({'d': NumberDecimal('NaN')}).count(), 2);
+// Find values with various types and NumberDecimal constructed types
+assert.eq(col.find({'d': NumberDecimal('1')}).count(), '4');
+assert.eq(col.find({'d': NumberDecimal(1)}).count(), '4');
+assert.eq(col.find({'d': NumberDecimal(NumberLong(1))}).count(), '4');
+assert.eq(col.find({'d': NumberDecimal(NumberInt(1))}).count(), '4');
+assert.eq(col.find({'d': 1}).count(), '4');
+assert.eq(col.find({'d': NumberLong(1)}).count(), '4');
+assert.eq(col.find({'d': NumberInt(1)}).count(), '4');
+// NaN and -NaN are both evaluated to NaN
+assert.eq(col.find({'d': NumberDecimal('NaN')}).count(), 2);
}());
diff --git a/jstests/decimal/decimal_find_basic.js b/jstests/decimal/decimal_find_basic.js
index a6a92419616..28a9040a912 100644
--- a/jstests/decimal/decimal_find_basic.js
+++ b/jstests/decimal/decimal_find_basic.js
@@ -1,55 +1,54 @@
// Tests finding NumberDecimal from the shell.
(function() {
- "use strict";
- var col = db.decimal_find_basic;
- col.drop();
-
- // Insert some sample data.
-
- assert.writeOK(col.insert([
- {"decimal": NumberDecimal("0")},
- {"decimal": NumberDecimal("0.00")},
- {"decimal": NumberDecimal("-0")},
- {"decimal": NumberDecimal("1.0")},
- {"decimal": NumberDecimal("1.00")},
- {"decimal": NumberDecimal("2.00")},
- {"decimal": NumberDecimal("12345678901234567890.12345678901234")},
- {"decimal": NumberDecimal("NaN")},
- {"decimal": NumberDecimal("-NaN")},
- {"decimal": NumberDecimal("-Infinity")},
- {"decimal": NumberDecimal("Infinity")},
- ]),
- "Initial insertion of decimals failed");
-
- // Zeros
- assert.eq(col.find({"decimal": NumberDecimal("0")}).count(), "3");
-
- // NaNs
- assert.eq(col.find({"decimal": NumberDecimal("NaN")}).count(), 2, "NaN find failed");
-
- var theNaNs = [{"decimal": NumberDecimal("NaN")}, {"decimal": NumberDecimal("-NaN")}];
-
- assert(bsonWoCompare(theNaNs[0], theNaNs[1]) == 0, "NaN compares equal");
-
- // Infinity
- assert.eq(col.find({"decimal": NumberDecimal("Infinity")}).count(), 1, "Infinity count wrong");
- assert.eq(col.find({"decimal": NumberDecimal("-Infinity")}).count(), 1, "Infinity count wrong");
-
- // Maximum Precision
- assert.eq(col.find({"decimal": NumberDecimal("12345678901234567890.12345678901234")}).count(),
- 1,
- "Maximum precision decimal not found.");
-
- col.drop();
-
- // Maximum and Minimum Values
- assert.writeOK(col.insert([
- {"max": NumberDecimal("9999999999999999999999999999999999E6111")},
- {"min": NumberDecimal("1E-6176")}
- ]));
-
- assert.eq(col.find({"max": NumberDecimal("9999999999999999999999999999999999E6111")}).count(),
- 1);
- assert.eq(col.find({"min": NumberDecimal("1E-6176")}).count(), 1);
+"use strict";
+var col = db.decimal_find_basic;
+col.drop();
+
+// Insert some sample data.
+
+assert.writeOK(col.insert([
+ {"decimal": NumberDecimal("0")},
+ {"decimal": NumberDecimal("0.00")},
+ {"decimal": NumberDecimal("-0")},
+ {"decimal": NumberDecimal("1.0")},
+ {"decimal": NumberDecimal("1.00")},
+ {"decimal": NumberDecimal("2.00")},
+ {"decimal": NumberDecimal("12345678901234567890.12345678901234")},
+ {"decimal": NumberDecimal("NaN")},
+ {"decimal": NumberDecimal("-NaN")},
+ {"decimal": NumberDecimal("-Infinity")},
+ {"decimal": NumberDecimal("Infinity")},
+]),
+ "Initial insertion of decimals failed");
+
+// Zeros
+assert.eq(col.find({"decimal": NumberDecimal("0")}).count(), "3");
+
+// NaNs
+assert.eq(col.find({"decimal": NumberDecimal("NaN")}).count(), 2, "NaN find failed");
+
+var theNaNs = [{"decimal": NumberDecimal("NaN")}, {"decimal": NumberDecimal("-NaN")}];
+
+assert(bsonWoCompare(theNaNs[0], theNaNs[1]) == 0, "NaN compares equal");
+
+// Infinity
+assert.eq(col.find({"decimal": NumberDecimal("Infinity")}).count(), 1, "Infinity count wrong");
+assert.eq(col.find({"decimal": NumberDecimal("-Infinity")}).count(), 1, "Infinity count wrong");
+
+// Maximum Precision
+assert.eq(col.find({"decimal": NumberDecimal("12345678901234567890.12345678901234")}).count(),
+ 1,
+ "Maximum precision decimal not found.");
+
+col.drop();
+
+// Maximum and Minimum Values
+assert.writeOK(col.insert([
+ {"max": NumberDecimal("9999999999999999999999999999999999E6111")},
+ {"min": NumberDecimal("1E-6176")}
+]));
+
+assert.eq(col.find({"max": NumberDecimal("9999999999999999999999999999999999E6111")}).count(), 1);
+assert.eq(col.find({"min": NumberDecimal("1E-6176")}).count(), 1);
}());
diff --git a/jstests/decimal/decimal_find_mixed.js b/jstests/decimal/decimal_find_mixed.js
index 078ada3ad0f..0224c1b3fd8 100644
--- a/jstests/decimal/decimal_find_mixed.js
+++ b/jstests/decimal/decimal_find_mixed.js
@@ -1,70 +1,70 @@
// Tests finding NumberDecimal from the shell in mixed collections.
(function() {
- "use strict";
- var col = db.decimal_find_mixed;
- col.drop();
+"use strict";
+var col = db.decimal_find_mixed;
+col.drop();
- // Insert some sample data.
+// Insert some sample data.
- assert.writeOK(col.insert([
- {"a": -1},
- {"a": NumberDecimal("-1")},
- {"a": NumberLong("-1")},
- {"a": NumberInt("-1")},
- {"a": -0.3},
- {"a": NumberDecimal("-0.3")},
- {"a": -0.1},
- {"a": NumberDecimal("-0.1")},
- {"a": NumberDecimal("0")},
- {"a": 0},
- {"a": NumberDecimal("-0")},
- {"a": NumberDecimal("0.00")},
- {"a": NumberDecimal("0.1")},
- {"a": 0.1},
- {"a": NumberDecimal("0.3")},
- {"a": 0.3},
- {"a": NumberDecimal("0.5")},
- {"a": 0.5},
- {"a": NumberDecimal("1.0")},
- {"a": NumberLong("1")},
- {"a": NumberDecimal("1.00")},
- {"a": NumberDecimal("2.00")},
- {"a": NumberDecimal("12345678901234567890.12345678901234")},
- {"a": NumberDecimal("NaN")},
- {"a": NumberDecimal("-NaN")},
- {"a": NaN},
- {"a": NumberDecimal("Infinity")},
- {"a": Infinity}
- ]),
- "Initial decimal insertion failed");
+assert.writeOK(col.insert([
+ {"a": -1},
+ {"a": NumberDecimal("-1")},
+ {"a": NumberLong("-1")},
+ {"a": NumberInt("-1")},
+ {"a": -0.3},
+ {"a": NumberDecimal("-0.3")},
+ {"a": -0.1},
+ {"a": NumberDecimal("-0.1")},
+ {"a": NumberDecimal("0")},
+ {"a": 0},
+ {"a": NumberDecimal("-0")},
+ {"a": NumberDecimal("0.00")},
+ {"a": NumberDecimal("0.1")},
+ {"a": 0.1},
+ {"a": NumberDecimal("0.3")},
+ {"a": 0.3},
+ {"a": NumberDecimal("0.5")},
+ {"a": 0.5},
+ {"a": NumberDecimal("1.0")},
+ {"a": NumberLong("1")},
+ {"a": NumberDecimal("1.00")},
+ {"a": NumberDecimal("2.00")},
+ {"a": NumberDecimal("12345678901234567890.12345678901234")},
+ {"a": NumberDecimal("NaN")},
+ {"a": NumberDecimal("-NaN")},
+ {"a": NaN},
+ {"a": NumberDecimal("Infinity")},
+ {"a": Infinity}
+]),
+ "Initial decimal insertion failed");
- // Simple finds
- assert.eq(col.find({"a": -1}).count(), 4, "A1");
- assert.eq(col.find({"a": NumberLong("-1")}).count(), 4, "A2");
- assert.eq(col.find({"a": NumberInt("-1")}).count(), 4, "A3");
- assert.eq(col.find({"a": NumberDecimal("-1")}).count(), 4, "A4");
+// Simple finds
+assert.eq(col.find({"a": -1}).count(), 4, "A1");
+assert.eq(col.find({"a": NumberLong("-1")}).count(), 4, "A2");
+assert.eq(col.find({"a": NumberInt("-1")}).count(), 4, "A3");
+assert.eq(col.find({"a": NumberDecimal("-1")}).count(), 4, "A4");
- assert.eq(col.find({"a": NaN}).count(), 3, "B1");
- assert.eq(col.find({"a": NumberDecimal("NaN")}).count(), 3, "B2");
- assert.eq(col.find({"a": Infinity}).count(), 2, "B3");
- assert.eq(col.find({"a": NumberDecimal("Infinity")}).count(), 2, "B4");
+assert.eq(col.find({"a": NaN}).count(), 3, "B1");
+assert.eq(col.find({"a": NumberDecimal("NaN")}).count(), 3, "B2");
+assert.eq(col.find({"a": Infinity}).count(), 2, "B3");
+assert.eq(col.find({"a": NumberDecimal("Infinity")}).count(), 2, "B4");
- assert.eq(col.find({$and: [{"a": {$gte: 0}}, {"a": {$lte: 2}}]}).count(), 14, "C1");
+assert.eq(col.find({$and: [{"a": {$gte: 0}}, {"a": {$lte: 2}}]}).count(), 14, "C1");
- // Proper mixed ordering of decimals and doubles
- col.drop();
- assert.writeOK(col.insert([{"a": NumberDecimal("0.3")}, {"a": 0.3}], "2 insertion failed"));
+// Proper mixed ordering of decimals and doubles
+col.drop();
+assert.writeOK(col.insert([{"a": NumberDecimal("0.3")}, {"a": 0.3}], "2 insertion failed"));
- assert.eq(col.find({"a": {$lt: NumberDecimal("0.3")}}).count(), 1, "D1");
- assert.eq(col.find({"a": {$gt: 0.3}}).count(), 1, "D1");
+assert.eq(col.find({"a": {$lt: NumberDecimal("0.3")}}).count(), 1, "D1");
+assert.eq(col.find({"a": {$gt: 0.3}}).count(), 1, "D1");
- // Find with NumberLong, but not Double
- col.drop();
- assert.writeOK(col.insert([{"a": NumberDecimal("36028797018963967")}], "3 insertion failed"));
+// Find with NumberLong, but not Double
+col.drop();
+assert.writeOK(col.insert([{"a": NumberDecimal("36028797018963967")}], "3 insertion failed"));
- assert.eq(col.find({"a": NumberDecimal("36028797018963967")}).count(), 1, "E1");
- // Not representable as double
- assert.eq(col.find({"a": 36028797018963967}).count(), 0, "E2");
- assert.eq(col.find({"a": NumberLong("36028797018963967")}).count(), 1, "E3");
+assert.eq(col.find({"a": NumberDecimal("36028797018963967")}).count(), 1, "E1");
+// Not representable as double
+assert.eq(col.find({"a": 36028797018963967}).count(), 0, "E2");
+assert.eq(col.find({"a": NumberLong("36028797018963967")}).count(), 1, "E3");
}());
diff --git a/jstests/decimal/decimal_find_query.js b/jstests/decimal/decimal_find_query.js
index 7f3d8c10284..e584e9e1f47 100644
--- a/jstests/decimal/decimal_find_query.js
+++ b/jstests/decimal/decimal_find_query.js
@@ -1,49 +1,49 @@
// Find the decimal using query operators
(function() {
- 'use strict';
- var col = db.decimal_find_query;
- col.drop();
+'use strict';
+var col = db.decimal_find_query;
+col.drop();
- // Insert some sample data.
+// Insert some sample data.
- assert.writeOK(col.insert([
- {'decimal': NumberDecimal('0')},
- {'decimal': NumberDecimal('0.00')},
- {'decimal': NumberDecimal('-0')},
- {'decimal': NumberDecimal('1.0')},
- {'decimal': NumberDecimal('1.00')},
- {'decimal': NumberDecimal('2.00')},
- {'decimal': NumberDecimal('12345678901234.56789012345678901234')},
- {'decimal': NumberDecimal('NaN')},
- {'decimal': NumberDecimal('-NaN')},
- {'decimal': NumberDecimal('Infinity')},
- {'decimal': NumberDecimal('-Infinity')},
- ]),
- 'Initial insertion failed');
+assert.writeOK(col.insert([
+ {'decimal': NumberDecimal('0')},
+ {'decimal': NumberDecimal('0.00')},
+ {'decimal': NumberDecimal('-0')},
+ {'decimal': NumberDecimal('1.0')},
+ {'decimal': NumberDecimal('1.00')},
+ {'decimal': NumberDecimal('2.00')},
+ {'decimal': NumberDecimal('12345678901234.56789012345678901234')},
+ {'decimal': NumberDecimal('NaN')},
+ {'decimal': NumberDecimal('-NaN')},
+ {'decimal': NumberDecimal('Infinity')},
+ {'decimal': NumberDecimal('-Infinity')},
+]),
+ 'Initial insertion failed');
- assert.eq(col.find({'decimal': {$eq: NumberDecimal('1')}}).count(), '2');
- assert.eq(col.find({'decimal': {$lt: NumberDecimal('1.00000000000001')}}).count(), 6);
- assert.eq(col.find({'decimal': {$gt: NumberDecimal('1.5')}}).count(), 3);
+assert.eq(col.find({'decimal': {$eq: NumberDecimal('1')}}).count(), '2');
+assert.eq(col.find({'decimal': {$lt: NumberDecimal('1.00000000000001')}}).count(), 6);
+assert.eq(col.find({'decimal': {$gt: NumberDecimal('1.5')}}).count(), 3);
- assert.eq(col.find({'decimal': {$gte: NumberDecimal('2.000')}}).count(), 3);
- assert.eq(col.find({'decimal': {$lte: NumberDecimal('0.9999999999999999')}}).count(), 4);
+assert.eq(col.find({'decimal': {$gte: NumberDecimal('2.000')}}).count(), 3);
+assert.eq(col.find({'decimal': {$lte: NumberDecimal('0.9999999999999999')}}).count(), 4);
- assert.eq(col.find({'decimal': {$nin: [NumberDecimal('Infinity'), NumberDecimal('-Infinity')]}})
- .count(),
- 9,
- 'Infinity count incorrect');
+assert.eq(
+ col.find({'decimal': {$nin: [NumberDecimal('Infinity'), NumberDecimal('-Infinity')]}}).count(),
+ 9,
+ 'Infinity count incorrect');
- // Test $mod
- col.drop();
- assert.writeOK(col.insert([
- {'decimal': NumberDecimal('0')},
- {'decimal': NumberDecimal('0.00')},
- {'decimal': NumberDecimal('-0')},
- {'decimal': NumberDecimal('1.0')},
- {'decimal': NumberDecimal('1.00')},
- {'decimal': NumberDecimal('2.00')},
- ]),
- '2 insertion failed');
- assert.eq(col.find({'decimal': {$mod: [2, 0]}}).count(), 4, "$mod count incorrect");
+// Test $mod
+col.drop();
+assert.writeOK(col.insert([
+ {'decimal': NumberDecimal('0')},
+ {'decimal': NumberDecimal('0.00')},
+ {'decimal': NumberDecimal('-0')},
+ {'decimal': NumberDecimal('1.0')},
+ {'decimal': NumberDecimal('1.00')},
+ {'decimal': NumberDecimal('2.00')},
+]),
+ '2 insertion failed');
+assert.eq(col.find({'decimal': {$mod: [2, 0]}}).count(), 4, "$mod count incorrect");
}());
diff --git a/jstests/decimal/decimal_roundtrip_basic.js b/jstests/decimal/decimal_roundtrip_basic.js
index b130eeebccb..612b6fcd631 100644
--- a/jstests/decimal/decimal_roundtrip_basic.js
+++ b/jstests/decimal/decimal_roundtrip_basic.js
@@ -1,52 +1,52 @@
// Tests doing simple round-trip operations from the shell.
(function() {
- "use strict";
- var col = db.roundtrip_basic;
- col.drop();
+"use strict";
+var col = db.roundtrip_basic;
+col.drop();
- // Insert some sample data.
+// Insert some sample data.
- assert.writeOK(col.insert([
- {"decimal": NumberDecimal("0")},
- {"decimal": NumberDecimal("0.00")},
- {"decimal": NumberDecimal("-0")},
- {"decimal": NumberDecimal("1.0")},
- {"decimal": NumberDecimal("0.10")},
- {"decimal": NumberDecimal("2.00")},
- {"decimal": NumberDecimal("12345678901234567890.12345678901234")},
- {"decimal": NumberDecimal("NaN")},
- {"decimal": NumberDecimal("-NaN")},
- {"decimal": NumberDecimal("Infinity")},
- {"decimal": NumberDecimal("-Infinity")},
- {"decimal": NumberDecimal("9999999999999999999999999999999999E6111")},
- {"decimal": NumberDecimal("1E-6176")},
- ]),
- "Initial insertion of decimals failed");
+assert.writeOK(col.insert([
+ {"decimal": NumberDecimal("0")},
+ {"decimal": NumberDecimal("0.00")},
+ {"decimal": NumberDecimal("-0")},
+ {"decimal": NumberDecimal("1.0")},
+ {"decimal": NumberDecimal("0.10")},
+ {"decimal": NumberDecimal("2.00")},
+ {"decimal": NumberDecimal("12345678901234567890.12345678901234")},
+ {"decimal": NumberDecimal("NaN")},
+ {"decimal": NumberDecimal("-NaN")},
+ {"decimal": NumberDecimal("Infinity")},
+ {"decimal": NumberDecimal("-Infinity")},
+ {"decimal": NumberDecimal("9999999999999999999999999999999999E6111")},
+ {"decimal": NumberDecimal("1E-6176")},
+]),
+ "Initial insertion of decimals failed");
- // Check that the searching for queryValue results in finding expectedValues.
- // All arguments are string representations of NumberDecimal values.
- function checkDecimals(queryValue, expectedValues) {
- queryValue = NumberDecimal(queryValue);
- expectedValues = expectedValues.map((function(string) {
- return NumberDecimal(string);
- }));
- var docs = col.find({decimal: queryValue}, {_id: 0}).sort({decimal: 1, _id: 1}).toArray();
- var actualValues = docs.map((function(item) {
- return item.decimal;
- }));
- assert.eq(actualValues, expectedValues, "problem retrieving " + queryValue.toString());
- }
+// Check that the searching for queryValue results in finding expectedValues.
+// All arguments are string representations of NumberDecimal values.
+function checkDecimals(queryValue, expectedValues) {
+ queryValue = NumberDecimal(queryValue);
+ expectedValues = expectedValues.map((function(string) {
+ return NumberDecimal(string);
+ }));
+ var docs = col.find({decimal: queryValue}, {_id: 0}).sort({decimal: 1, _id: 1}).toArray();
+ var actualValues = docs.map((function(item) {
+ return item.decimal;
+ }));
+ assert.eq(actualValues, expectedValues, "problem retrieving " + queryValue.toString());
+}
- checkDecimals("0", ["0", "0.00", "-0"]);
- checkDecimals("1.0", ["1.0"]);
- checkDecimals("0.1", ["0.10"]);
- checkDecimals("2", ["2.00"]);
- checkDecimals("12345678901234567890.12345678901234", ["12345678901234567890.12345678901234"]);
- checkDecimals("NaN", ["NaN", "-NaN"]);
- checkDecimals("Infinity", ["Infinity"]);
- checkDecimals("-Infinity", ["-Infinity"]);
- checkDecimals("9999999999999999999999999999999999E6111",
- ["9999999999999999999999999999999999E6111"]);
- checkDecimals("1E-6176", ["1E-6176"]);
+checkDecimals("0", ["0", "0.00", "-0"]);
+checkDecimals("1.0", ["1.0"]);
+checkDecimals("0.1", ["0.10"]);
+checkDecimals("2", ["2.00"]);
+checkDecimals("12345678901234567890.12345678901234", ["12345678901234567890.12345678901234"]);
+checkDecimals("NaN", ["NaN", "-NaN"]);
+checkDecimals("Infinity", ["Infinity"]);
+checkDecimals("-Infinity", ["-Infinity"]);
+checkDecimals("9999999999999999999999999999999999E6111",
+ ["9999999999999999999999999999999999E6111"]);
+checkDecimals("1E-6176", ["1E-6176"]);
}());
diff --git a/jstests/decimal/decimal_update.js b/jstests/decimal/decimal_update.js
index 6be2bd9e3e6..f50994ce32b 100644
--- a/jstests/decimal/decimal_update.js
+++ b/jstests/decimal/decimal_update.js
@@ -1,40 +1,40 @@
// Test decimal updates
(function() {
- "use strict";
- var col = db.decimal_updates;
- col.drop();
+"use strict";
+var col = db.decimal_updates;
+col.drop();
- // Insert some sample data.
- var docs = [
- {'a': NumberDecimal("1.0")},
- {'a': NumberDecimal("0.0")},
- {'a': NumberDecimal("1.00")},
- {'a': NumberLong("1")},
- {'a': 1}
- ];
+// Insert some sample data.
+var docs = [
+ {'a': NumberDecimal("1.0")},
+ {'a': NumberDecimal("0.0")},
+ {'a': NumberDecimal("1.00")},
+ {'a': NumberLong("1")},
+ {'a': 1}
+];
- assert.writeOK(col.insert(docs), "Initial insertion failed");
+assert.writeOK(col.insert(docs), "Initial insertion failed");
- assert.writeOK(col.update({}, {$inc: {'a': NumberDecimal("10")}}, {multi: true}),
- "update $inc failed");
- assert.eq(col.find({a: 11}).count(), 4, "count after $inc incorrect");
- assert.writeOK(col.update({}, {$inc: {'a': NumberDecimal("0")}}, {multi: true}),
- "update $inc 0 failed");
- assert.eq(col.find({a: 11}).count(), 4, "count after $inc 0 incorrect");
+assert.writeOK(col.update({}, {$inc: {'a': NumberDecimal("10")}}, {multi: true}),
+ "update $inc failed");
+assert.eq(col.find({a: 11}).count(), 4, "count after $inc incorrect");
+assert.writeOK(col.update({}, {$inc: {'a': NumberDecimal("0")}}, {multi: true}),
+ "update $inc 0 failed");
+assert.eq(col.find({a: 11}).count(), 4, "count after $inc 0 incorrect");
- col.drop();
- assert.writeOK(col.insert(docs), "Second insertion failed");
+col.drop();
+assert.writeOK(col.insert(docs), "Second insertion failed");
- assert.writeOK(col.update({}, {$mul: {'a': NumberDecimal("1")}}, {multi: true}),
- "update $mul failed");
- assert.eq(col.find({a: 1}).count(), 4, "count after $mul incorrect");
- assert.writeOK(col.update({}, {$mul: {'a': NumberDecimal("2")}}, {multi: true}),
- "update $mul 2 failed");
- assert.eq(col.find({a: 2}).count(), 4, "count after $mul incorrect");
- assert.writeOK(col.update({}, {$mul: {'a': NumberDecimal("0")}}, {multi: true}),
- "update $mul 0 failed");
- assert.eq(col.find({a: 0}).count(), 5, "count after $mul 0 incorrect");
+assert.writeOK(col.update({}, {$mul: {'a': NumberDecimal("1")}}, {multi: true}),
+ "update $mul failed");
+assert.eq(col.find({a: 1}).count(), 4, "count after $mul incorrect");
+assert.writeOK(col.update({}, {$mul: {'a': NumberDecimal("2")}}, {multi: true}),
+ "update $mul 2 failed");
+assert.eq(col.find({a: 2}).count(), 4, "count after $mul incorrect");
+assert.writeOK(col.update({}, {$mul: {'a': NumberDecimal("0")}}, {multi: true}),
+ "update $mul 0 failed");
+assert.eq(col.find({a: 0}).count(), 5, "count after $mul 0 incorrect");
- assert.writeError(col.update({}, {$bit: {'a': {and: 1}}}, {multi: true}), "$bit should fail");
+assert.writeError(col.update({}, {$bit: {'a': {and: 1}}}, {multi: true}), "$bit should fail");
}());
diff --git a/jstests/disk/repair_does_not_invalidate_config_on_standalone.js b/jstests/disk/repair_does_not_invalidate_config_on_standalone.js
index 70f0cde97b8..aba146fb37c 100644
--- a/jstests/disk/repair_does_not_invalidate_config_on_standalone.js
+++ b/jstests/disk/repair_does_not_invalidate_config_on_standalone.js
@@ -6,37 +6,37 @@
(function() {
- load('jstests/disk/libs/wt_file_helper.js');
+load('jstests/disk/libs/wt_file_helper.js');
- const baseName = "repair_does_not_invalidate_config_on_standalone";
- const dbName = baseName;
- const collName = "test";
+const baseName = "repair_does_not_invalidate_config_on_standalone";
+const dbName = baseName;
+const collName = "test";
- const dbpath = MongoRunner.dataPath + baseName + "/";
- resetDbpath(dbpath);
+const dbpath = MongoRunner.dataPath + baseName + "/";
+resetDbpath(dbpath);
- let mongod = MongoRunner.runMongod({dbpath: dbpath});
- const port = mongod.port;
+let mongod = MongoRunner.runMongod({dbpath: dbpath});
+const port = mongod.port;
- let testColl = mongod.getDB(dbName)[collName];
+let testColl = mongod.getDB(dbName)[collName];
- assert.commandWorked(testColl.insert({_id: 0, foo: "bar"}));
+assert.commandWorked(testColl.insert({_id: 0, foo: "bar"}));
- let collUri = getUriForColl(testColl);
- let collFile = dbpath + "/" + collUri + ".wt";
+let collUri = getUriForColl(testColl);
+let collFile = dbpath + "/" + collUri + ".wt";
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
- jsTestLog("Deleting collection file: " + collFile);
- removeFile(collFile);
+jsTestLog("Deleting collection file: " + collFile);
+removeFile(collFile);
- assertRepairSucceeds(dbpath, port);
+assertRepairSucceeds(dbpath, port);
- assertStartAndStopStandaloneOnExistingDbpath(dbpath, port, function(node) {
- let nodeDB = node.getDB(dbName);
- assert(nodeDB[collName].exists());
- assert.eq(nodeDB[collName].find().itcount(), 0);
+assertStartAndStopStandaloneOnExistingDbpath(dbpath, port, function(node) {
+ let nodeDB = node.getDB(dbName);
+ assert(nodeDB[collName].exists());
+ assert.eq(nodeDB[collName].find().itcount(), 0);
- assert(!nodeDB.getSiblingDB("local")["system.replset"].exists());
- });
+ assert(!nodeDB.getSiblingDB("local")["system.replset"].exists());
+});
})();
diff --git a/jstests/disk/repair_failure_is_recoverable.js b/jstests/disk/repair_failure_is_recoverable.js
index cbfc12e5b4b..2ede4bfe36d 100644
--- a/jstests/disk/repair_failure_is_recoverable.js
+++ b/jstests/disk/repair_failure_is_recoverable.js
@@ -7,56 +7,56 @@
(function() {
- load('jstests/disk/libs/wt_file_helper.js');
+load('jstests/disk/libs/wt_file_helper.js');
- const exitBeforeRepairParameter = "exitBeforeDataRepair";
- const exitBeforeRepairInvalidatesConfigParameter = "exitBeforeRepairInvalidatesConfig";
+const exitBeforeRepairParameter = "exitBeforeDataRepair";
+const exitBeforeRepairInvalidatesConfigParameter = "exitBeforeRepairInvalidatesConfig";
- const baseName = "repair_failure_is_recoverable";
- const dbName = "repair_failure_is_recoverable";
- const collName = "test";
+const baseName = "repair_failure_is_recoverable";
+const dbName = "repair_failure_is_recoverable";
+const collName = "test";
- const dbpath = MongoRunner.dataPath + baseName + "/";
- resetDbpath(dbpath);
+const dbpath = MongoRunner.dataPath + baseName + "/";
+resetDbpath(dbpath);
- let mongod = MongoRunner.runMongod({dbpath: dbpath});
- const port = mongod.port;
+let mongod = MongoRunner.runMongod({dbpath: dbpath});
+const port = mongod.port;
- let testColl = mongod.getDB(dbName)[collName];
+let testColl = mongod.getDB(dbName)[collName];
- assert.commandWorked(testColl.insert({_id: 0, foo: "bar"}));
+assert.commandWorked(testColl.insert({_id: 0, foo: "bar"}));
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
- /**
- * Test 1. Cause an exit before repairing data. MongoDB should not be able to restart without
- * --repair.
- */
- assertRepairFailsWithFailpoint(dbpath, port, exitBeforeRepairParameter);
+/**
+ * Test 1. Cause an exit before repairing data. MongoDB should not be able to restart without
+ * --repair.
+ */
+assertRepairFailsWithFailpoint(dbpath, port, exitBeforeRepairParameter);
- assertErrorOnStartupAfterIncompleteRepair(dbpath, port);
+assertErrorOnStartupAfterIncompleteRepair(dbpath, port);
- assertRepairSucceeds(dbpath, port);
+assertRepairSucceeds(dbpath, port);
- assertStartAndStopStandaloneOnExistingDbpath(dbpath, port, function(node) {
- let nodeDB = node.getDB(dbName);
- assert(nodeDB[collName].exists());
- assert.eq(nodeDB[collName].find().itcount(), 1);
- });
+assertStartAndStopStandaloneOnExistingDbpath(dbpath, port, function(node) {
+ let nodeDB = node.getDB(dbName);
+ assert(nodeDB[collName].exists());
+ assert.eq(nodeDB[collName].find().itcount(), 1);
+});
- /**
- * Test 2. Fail after repairing data, before invalidating the replica set config. MongoDB should
- * not be able to restart without --repair.
- */
- assertRepairFailsWithFailpoint(dbpath, port, exitBeforeRepairInvalidatesConfigParameter);
+/**
+ * Test 2. Fail after repairing data, before invalidating the replica set config. MongoDB should
+ * not be able to restart without --repair.
+ */
+assertRepairFailsWithFailpoint(dbpath, port, exitBeforeRepairInvalidatesConfigParameter);
- assertErrorOnStartupAfterIncompleteRepair(dbpath, port);
+assertErrorOnStartupAfterIncompleteRepair(dbpath, port);
- assertRepairSucceeds(dbpath, port);
+assertRepairSucceeds(dbpath, port);
- assertStartAndStopStandaloneOnExistingDbpath(dbpath, port, function(node) {
- let nodeDB = node.getDB(dbName);
- assert(nodeDB[collName].exists());
- assert.eq(nodeDB[collName].find().itcount(), 1);
- });
+assertStartAndStopStandaloneOnExistingDbpath(dbpath, port, function(node) {
+ let nodeDB = node.getDB(dbName);
+ assert(nodeDB[collName].exists());
+ assert.eq(nodeDB[collName].find().itcount(), 1);
+});
})();
diff --git a/jstests/disk/repair_invalidates_replica_set_config.js b/jstests/disk/repair_invalidates_replica_set_config.js
index a110ce79857..2e67012f68f 100644
--- a/jstests/disk/repair_invalidates_replica_set_config.js
+++ b/jstests/disk/repair_invalidates_replica_set_config.js
@@ -7,122 +7,118 @@
(function() {
- load('jstests/disk/libs/wt_file_helper.js');
+load('jstests/disk/libs/wt_file_helper.js');
- const dbName = "repair_invalidates_replica_set_config";
- const collName = "test";
+const dbName = "repair_invalidates_replica_set_config";
+const collName = "test";
- let replSet = new ReplSetTest({nodes: 2});
- replSet.startSet();
- replSet.initiate();
- replSet.awaitReplication();
+let replSet = new ReplSetTest({nodes: 2});
+replSet.startSet();
+replSet.initiate();
+replSet.awaitReplication();
- const originalSecondary = replSet.getSecondary();
+const originalSecondary = replSet.getSecondary();
- let primaryDB = replSet.getPrimary().getDB(dbName);
- let secondaryDB = originalSecondary.getDB(dbName);
+let primaryDB = replSet.getPrimary().getDB(dbName);
+let secondaryDB = originalSecondary.getDB(dbName);
- assert.commandWorked(primaryDB[collName].insert({_id: 0, foo: "bar"}));
- replSet.awaitLastOpCommitted();
+assert.commandWorked(primaryDB[collName].insert({_id: 0, foo: "bar"}));
+replSet.awaitLastOpCommitted();
- const secondaryPort = originalSecondary.port;
- const secondaryDbpath = originalSecondary.dbpath;
+const secondaryPort = originalSecondary.port;
+const secondaryDbpath = originalSecondary.dbpath;
- let secondary = originalSecondary;
+let secondary = originalSecondary;
- //
- // 1. This repairs the data on a clean data directory and asserts that the node is still able
- // to re-join its original replica set without an initial sync.
- //
+//
+// 1. This repairs the data on a clean data directory and asserts that the node is still able
+// to re-join its original replica set without an initial sync.
+//
- // Shut down the secondary.
- MongoRunner.stopMongod(secondary);
+// Shut down the secondary.
+MongoRunner.stopMongod(secondary);
- // Ensure the secondary can be repaired successfully.
- assertRepairSucceeds(secondaryDbpath, secondaryPort);
+// Ensure the secondary can be repaired successfully.
+assertRepairSucceeds(secondaryDbpath, secondaryPort);
- // Starting up without --replSet should not fail, and the collection should exist with its data.
- assertStartAndStopStandaloneOnExistingDbpath(secondaryDbpath, secondaryPort, function(node) {
+// Starting up without --replSet should not fail, and the collection should exist with its data.
+assertStartAndStopStandaloneOnExistingDbpath(secondaryDbpath, secondaryPort, function(node) {
+ let nodeDB = node.getDB(dbName);
+ assert(nodeDB[collName].exists());
+ assert.eq(nodeDB[collName].find().itcount(), 1);
+});
+
+// Starting the secondary with the same data directory should succeed with the same data.
+secondary = assertStartInReplSet(
+ replSet, originalSecondary, false /* cleanData */, false /* expectResync */, function(node) {
let nodeDB = node.getDB(dbName);
- assert(nodeDB[collName].exists());
assert.eq(nodeDB[collName].find().itcount(), 1);
});
-
- // Starting the secondary with the same data directory should succeed with the same data.
- secondary = assertStartInReplSet(replSet,
- originalSecondary,
- false /* cleanData */,
- false /* expectResync */,
- function(node) {
- let nodeDB = node.getDB(dbName);
- assert.eq(nodeDB[collName].find().itcount(), 1);
- });
- secondaryDB = secondary.getDB(dbName);
-
- //
- // 2. This test corrupts WiredTiger data files on a secondary, repairs the data, and asserts
- // that the node is unable to re-join its original replica set without an initial sync.
- //
-
- let secondaryCollUri = getUriForColl(secondaryDB[collName]);
- let secondaryCollFile = secondaryDbpath + "/" + secondaryCollUri + ".wt";
- // Shut down the secondary. Delete the collection's data file.
- MongoRunner.stopMongod(secondary);
- jsTestLog("Deleting secondary collection file: " + secondaryCollFile);
- removeFile(secondaryCollFile);
-
- // Ensure the secondary can be repaired successfully.
- assertRepairSucceeds(secondaryDbpath, secondaryPort);
-
- // Starting up with --replSet should fail with a specific error.
- assertErrorOnStartupWhenStartingAsReplSet(
- secondaryDbpath, secondaryPort, replSet.getReplSetConfig()._id);
-
- // Starting up without --replSet should not fail, but the collection should exist with no data.
- assertStartAndStopStandaloneOnExistingDbpath(secondaryDbpath, secondaryPort, function(node) {
+secondaryDB = secondary.getDB(dbName);
+
+//
+// 2. This test corrupts WiredTiger data files on a secondary, repairs the data, and asserts
+// that the node is unable to re-join its original replica set without an initial sync.
+//
+
+let secondaryCollUri = getUriForColl(secondaryDB[collName]);
+let secondaryCollFile = secondaryDbpath + "/" + secondaryCollUri + ".wt";
+// Shut down the secondary. Delete the collection's data file.
+MongoRunner.stopMongod(secondary);
+jsTestLog("Deleting secondary collection file: " + secondaryCollFile);
+removeFile(secondaryCollFile);
+
+// Ensure the secondary can be repaired successfully.
+assertRepairSucceeds(secondaryDbpath, secondaryPort);
+
+// Starting up with --replSet should fail with a specific error.
+assertErrorOnStartupWhenStartingAsReplSet(
+ secondaryDbpath, secondaryPort, replSet.getReplSetConfig()._id);
+
+// Starting up without --replSet should not fail, but the collection should exist with no data.
+assertStartAndStopStandaloneOnExistingDbpath(secondaryDbpath, secondaryPort, function(node) {
+ let nodeDB = node.getDB(dbName);
+ assert(nodeDB[collName].exists());
+ assert.eq(nodeDB[collName].find().itcount(), 0);
+});
+
+// Starting the secondary with a wiped data directory should force an initial sync.
+secondary = assertStartInReplSet(
+ replSet, originalSecondary, true /* cleanData */, true /* expectResync */, function(node) {
let nodeDB = node.getDB(dbName);
- assert(nodeDB[collName].exists());
- assert.eq(nodeDB[collName].find().itcount(), 0);
+ assert.eq(nodeDB[collName].find().itcount(), 1);
});
-
- // Starting the secondary with a wiped data directory should force an initial sync.
- secondary = assertStartInReplSet(
- replSet, originalSecondary, true /* cleanData */, true /* expectResync */, function(node) {
- let nodeDB = node.getDB(dbName);
- assert.eq(nodeDB[collName].find().itcount(), 1);
- });
- secondaryDB = secondary.getDB(dbName);
-
- //
- // 3. This test corrupts the _mdb_catalog file on a secondary, repairs the data. Because the
- // node's local.system.replset collection gets deleted, we assert that it is able to start up
- // and re-sync on the existing data directory immediately.
-
- // Shut down the secondary. Delete the catalog file.
- MongoRunner.stopMongod(secondary);
- let mdbCatalogFile = secondaryDbpath + "/_mdb_catalog.wt";
- jsTestLog("Deleting secondary catalog file: " + mdbCatalogFile);
- removeFile(mdbCatalogFile);
-
- // Ensure the secondary can be repaired successfully.
- assertRepairSucceeds(secondaryDbpath, secondaryPort);
-
- // Starting up without --replSet should not fail, but the collection should exist with no data.
- assertStartAndStopStandaloneOnExistingDbpath(secondaryDbpath, secondaryPort, function(node) {
+secondaryDB = secondary.getDB(dbName);
+
+//
+// 3. This test corrupts the _mdb_catalog file on a secondary, repairs the data. Because the
+// node's local.system.replset collection gets deleted, we assert that it is able to start up
+// and re-sync on the existing data directory immediately.
+
+// Shut down the secondary. Delete the catalog file.
+MongoRunner.stopMongod(secondary);
+let mdbCatalogFile = secondaryDbpath + "/_mdb_catalog.wt";
+jsTestLog("Deleting secondary catalog file: " + mdbCatalogFile);
+removeFile(mdbCatalogFile);
+
+// Ensure the secondary can be repaired successfully.
+assertRepairSucceeds(secondaryDbpath, secondaryPort);
+
+// Starting up without --replSet should not fail, but the collection should exist with no data.
+assertStartAndStopStandaloneOnExistingDbpath(secondaryDbpath, secondaryPort, function(node) {
+ let nodeDB = node.getDB(dbName);
+ assert(!nodeDB[collName].exists());
+ assert(!nodeDB.getSiblingDB("local")["system.replset"].exists());
+});
+
+// The node's local.system.replset collection has been deleted, so it's perfectly okay that it
+// is is able to start up and re-sync.
+// Starting the secondary with the same data directory should force an initial sync.
+secondary = assertStartInReplSet(
+ replSet, originalSecondary, false /* cleanData */, true /* expectResync */, function(node) {
let nodeDB = node.getDB(dbName);
- assert(!nodeDB[collName].exists());
- assert(!nodeDB.getSiblingDB("local")["system.replset"].exists());
+ assert.eq(nodeDB[collName].find().itcount(), 1);
});
- // The node's local.system.replset collection has been deleted, so it's perfectly okay that it
- // is is able to start up and re-sync.
- // Starting the secondary with the same data directory should force an initial sync.
- secondary = assertStartInReplSet(
- replSet, originalSecondary, false /* cleanData */, true /* expectResync */, function(node) {
- let nodeDB = node.getDB(dbName);
- assert.eq(nodeDB[collName].find().itcount(), 1);
- });
-
- replSet.stopSet();
-
+replSet.stopSet();
})();
diff --git a/jstests/disk/wt_corrupt_file_errors.js b/jstests/disk/wt_corrupt_file_errors.js
index 5f829e5de8f..c45b5ae80df 100644
--- a/jstests/disk/wt_corrupt_file_errors.js
+++ b/jstests/disk/wt_corrupt_file_errors.js
@@ -6,73 +6,69 @@
(function() {
- load('jstests/disk/libs/wt_file_helper.js');
+load('jstests/disk/libs/wt_file_helper.js');
- const baseName = "wt_corrupt_file_errors";
- const collName = "test";
- const dbpath = MongoRunner.dataPath + baseName + "/";
+const baseName = "wt_corrupt_file_errors";
+const collName = "test";
+const dbpath = MongoRunner.dataPath + baseName + "/";
- /**
- * Test 1. Corrupt a collection's .wt file.
- */
-
- assertErrorOnStartupWhenFilesAreCorruptOrMissing(
- dbpath, baseName, collName, (mongod, testColl) => {
- const testCollUri = getUriForColl(testColl);
- const testCollFile = dbpath + testCollUri + ".wt";
- MongoRunner.stopMongod(mongod);
- jsTestLog("corrupting collection file: " + testCollFile);
- corruptFile(testCollFile);
- }, "Fatal Assertion 50882");
+/**
+ * Test 1. Corrupt a collection's .wt file.
+ */
- /**
- * Test 2. Corrupt the _mdb_catalog.
- */
+assertErrorOnStartupWhenFilesAreCorruptOrMissing(dbpath, baseName, collName, (mongod, testColl) => {
+ const testCollUri = getUriForColl(testColl);
+ const testCollFile = dbpath + testCollUri + ".wt";
+ MongoRunner.stopMongod(mongod);
+ jsTestLog("corrupting collection file: " + testCollFile);
+ corruptFile(testCollFile);
+}, "Fatal Assertion 50882");
- assertErrorOnStartupWhenFilesAreCorruptOrMissing(
- dbpath, baseName, collName, (mongod, testColl) => {
- MongoRunner.stopMongod(mongod);
- const mdbCatalogFile = dbpath + "_mdb_catalog.wt";
- jsTestLog("corrupting catalog file: " + mdbCatalogFile);
- corruptFile(mdbCatalogFile);
- }, "Fatal Assertion 50882");
+/**
+ * Test 2. Corrupt the _mdb_catalog.
+ */
- /**
- * Test 3. Corrupt the WiredTiger.wt.
- */
+assertErrorOnStartupWhenFilesAreCorruptOrMissing(dbpath, baseName, collName, (mongod, testColl) => {
+ MongoRunner.stopMongod(mongod);
+ const mdbCatalogFile = dbpath + "_mdb_catalog.wt";
+ jsTestLog("corrupting catalog file: " + mdbCatalogFile);
+ corruptFile(mdbCatalogFile);
+}, "Fatal Assertion 50882");
- assertErrorOnStartupWhenFilesAreCorruptOrMissing(
- dbpath, baseName, collName, (mongod, testColl) => {
- MongoRunner.stopMongod(mongod);
- const WiredTigerWTFile = dbpath + "WiredTiger.wt";
- jsTestLog("corrupting WiredTiger.wt");
- corruptFile(WiredTigerWTFile);
- }, "Fatal Assertion 50944");
+/**
+ * Test 3. Corrupt the WiredTiger.wt.
+ */
- /**
- * Test 4. Corrupt an index file.
- */
+assertErrorOnStartupWhenFilesAreCorruptOrMissing(dbpath, baseName, collName, (mongod, testColl) => {
+ MongoRunner.stopMongod(mongod);
+ const WiredTigerWTFile = dbpath + "WiredTiger.wt";
+ jsTestLog("corrupting WiredTiger.wt");
+ corruptFile(WiredTigerWTFile);
+}, "Fatal Assertion 50944");
- assertErrorOnRequestWhenFilesAreCorruptOrMissing(
- dbpath,
- baseName,
- collName,
- (mongod, testColl) => {
- const indexName = "a_1";
- assert.commandWorked(testColl.createIndex({a: 1}, {name: indexName}));
- const indexUri = getUriForIndex(testColl, indexName);
- MongoRunner.stopMongod(mongod);
- const indexFile = dbpath + indexUri + ".wt";
- jsTestLog("corrupting index file: " + indexFile);
- corruptFile(indexFile);
- },
- (testColl) => {
- // This insert will crash the server because it triggers the code path
- // of looking for the index file.
- assert.throws(function() {
- testColl.insert({a: 1});
- });
- },
- "Fatal Assertion 50882");
+/**
+ * Test 4. Corrupt an index file.
+ */
+assertErrorOnRequestWhenFilesAreCorruptOrMissing(
+ dbpath,
+ baseName,
+ collName,
+ (mongod, testColl) => {
+ const indexName = "a_1";
+ assert.commandWorked(testColl.createIndex({a: 1}, {name: indexName}));
+ const indexUri = getUriForIndex(testColl, indexName);
+ MongoRunner.stopMongod(mongod);
+ const indexFile = dbpath + indexUri + ".wt";
+ jsTestLog("corrupting index file: " + indexFile);
+ corruptFile(indexFile);
+ },
+ (testColl) => {
+ // This insert will crash the server because it triggers the code path
+ // of looking for the index file.
+ assert.throws(function() {
+ testColl.insert({a: 1});
+ });
+ },
+ "Fatal Assertion 50882");
})();
diff --git a/jstests/disk/wt_missing_file_errors.js b/jstests/disk/wt_missing_file_errors.js
index 3a9c783aad6..1a28ff17f9c 100644
--- a/jstests/disk/wt_missing_file_errors.js
+++ b/jstests/disk/wt_missing_file_errors.js
@@ -6,73 +6,69 @@
(function() {
- load('jstests/disk/libs/wt_file_helper.js');
+load('jstests/disk/libs/wt_file_helper.js');
- const baseName = "wt_missing_file_errors";
- const collName = "test";
- const dbpath = MongoRunner.dataPath + baseName + "/";
+const baseName = "wt_missing_file_errors";
+const collName = "test";
+const dbpath = MongoRunner.dataPath + baseName + "/";
- /**
- * Test 1. Delete a collection's .wt file.
- */
-
- assertErrorOnStartupWhenFilesAreCorruptOrMissing(
- dbpath, baseName, collName, (mongod, testColl) => {
- const testCollUri = getUriForColl(testColl);
- const testCollFile = dbpath + testCollUri + ".wt";
- MongoRunner.stopMongod(mongod);
- jsTestLog("deleting collection file: " + testCollFile);
- removeFile(testCollFile);
- }, "Fatal Assertion 50882");
+/**
+ * Test 1. Delete a collection's .wt file.
+ */
- /**
- * Test 2. Delete the _mdb_catalog.
- */
+assertErrorOnStartupWhenFilesAreCorruptOrMissing(dbpath, baseName, collName, (mongod, testColl) => {
+ const testCollUri = getUriForColl(testColl);
+ const testCollFile = dbpath + testCollUri + ".wt";
+ MongoRunner.stopMongod(mongod);
+ jsTestLog("deleting collection file: " + testCollFile);
+ removeFile(testCollFile);
+}, "Fatal Assertion 50882");
- assertErrorOnStartupWhenFilesAreCorruptOrMissing(
- dbpath, baseName, collName, (mongod, testColl) => {
- MongoRunner.stopMongod(mongod);
- let mdbCatalogFile = dbpath + "_mdb_catalog.wt";
- jsTestLog("deleting catalog file: " + mdbCatalogFile);
- removeFile(mdbCatalogFile);
- }, "Fatal Assertion 50882");
+/**
+ * Test 2. Delete the _mdb_catalog.
+ */
- /**
- * Test 3. Delete the WiredTiger.wt.
- */
+assertErrorOnStartupWhenFilesAreCorruptOrMissing(dbpath, baseName, collName, (mongod, testColl) => {
+ MongoRunner.stopMongod(mongod);
+ let mdbCatalogFile = dbpath + "_mdb_catalog.wt";
+ jsTestLog("deleting catalog file: " + mdbCatalogFile);
+ removeFile(mdbCatalogFile);
+}, "Fatal Assertion 50882");
- assertErrorOnStartupWhenFilesAreCorruptOrMissing(
- dbpath, baseName, collName, (mongod, testColl) => {
- MongoRunner.stopMongod(mongod);
- let WiredTigerWTFile = dbpath + "WiredTiger.wt";
- jsTestLog("deleting WiredTiger.wt");
- removeFile(WiredTigerWTFile);
- }, "Fatal Assertion 28595");
+/**
+ * Test 3. Delete the WiredTiger.wt.
+ */
- /**
- * Test 4. Delete an index file.
- */
+assertErrorOnStartupWhenFilesAreCorruptOrMissing(dbpath, baseName, collName, (mongod, testColl) => {
+ MongoRunner.stopMongod(mongod);
+ let WiredTigerWTFile = dbpath + "WiredTiger.wt";
+ jsTestLog("deleting WiredTiger.wt");
+ removeFile(WiredTigerWTFile);
+}, "Fatal Assertion 28595");
- assertErrorOnRequestWhenFilesAreCorruptOrMissing(
- dbpath,
- baseName,
- collName,
- (mongod, testColl) => {
- const indexName = "a_1";
- assert.commandWorked(testColl.createIndex({a: 1}, {name: indexName}));
- const indexUri = getUriForIndex(testColl, indexName);
- MongoRunner.stopMongod(mongod);
- const indexFile = dbpath + indexUri + ".wt";
- jsTestLog("deleting index file: " + indexFile);
- removeFile(indexFile);
- },
- (testColl) => {
- // This insert will crash the server because it triggers the code path
- // of looking for the index file.
- assert.throws(function() {
- testColl.insert({a: 1});
- });
- },
- "Fatal Assertion 50882");
+/**
+ * Test 4. Delete an index file.
+ */
+assertErrorOnRequestWhenFilesAreCorruptOrMissing(
+ dbpath,
+ baseName,
+ collName,
+ (mongod, testColl) => {
+ const indexName = "a_1";
+ assert.commandWorked(testColl.createIndex({a: 1}, {name: indexName}));
+ const indexUri = getUriForIndex(testColl, indexName);
+ MongoRunner.stopMongod(mongod);
+ const indexFile = dbpath + indexUri + ".wt";
+ jsTestLog("deleting index file: " + indexFile);
+ removeFile(indexFile);
+ },
+ (testColl) => {
+ // This insert will crash the server because it triggers the code path
+ // of looking for the index file.
+ assert.throws(function() {
+ testColl.insert({a: 1});
+ });
+ },
+ "Fatal Assertion 50882");
})();
diff --git a/jstests/disk/wt_repair_corrupt_files.js b/jstests/disk/wt_repair_corrupt_files.js
index c19a6d672d1..839dda32ab7 100644
--- a/jstests/disk/wt_repair_corrupt_files.js
+++ b/jstests/disk/wt_repair_corrupt_files.js
@@ -7,109 +7,109 @@
(function() {
- load('jstests/disk/libs/wt_file_helper.js');
+load('jstests/disk/libs/wt_file_helper.js');
- const baseName = "wt_repair_corrupt_files";
- const collName = "test";
- const dbpath = MongoRunner.dataPath + baseName + "/";
+const baseName = "wt_repair_corrupt_files";
+const collName = "test";
+const dbpath = MongoRunner.dataPath + baseName + "/";
+
+/**
+ * Run the test by supplying additional paramters to MongoRunner.runMongod with 'mongodOptions'.
+ */
+let runTest = function(mongodOptions) {
+ resetDbpath(dbpath);
+ jsTestLog("Running test with args: " + tojson(mongodOptions));
/**
- * Run the test by supplying additional paramters to MongoRunner.runMongod with 'mongodOptions'.
+ * Test 1. Create a collection, corrupt its .wt file in an unrecoverable way, run repair.
+ * Verify that repair succeeds at rebuilding it. An empty collection should be visible on
+ * normal startup.
*/
- let runTest = function(mongodOptions) {
- resetDbpath(dbpath);
- jsTestLog("Running test with args: " + tojson(mongodOptions));
-
- /**
- * Test 1. Create a collection, corrupt its .wt file in an unrecoverable way, run repair.
- * Verify that repair succeeds at rebuilding it. An empty collection should be visible on
- * normal startup.
- */
- let mongod = startMongodOnExistingPath(dbpath, mongodOptions);
- let testColl = mongod.getDB(baseName)[collName];
+ let mongod = startMongodOnExistingPath(dbpath, mongodOptions);
+ let testColl = mongod.getDB(baseName)[collName];
- const doc = {a: 1};
- assert.commandWorked(testColl.insert(doc));
+ const doc = {a: 1};
+ assert.commandWorked(testColl.insert(doc));
- let testCollUri = getUriForColl(testColl);
- let testCollFile = dbpath + testCollUri + ".wt";
+ let testCollUri = getUriForColl(testColl);
+ let testCollFile = dbpath + testCollUri + ".wt";
- MongoRunner.stopMongod(mongod);
+ MongoRunner.stopMongod(mongod);
- jsTestLog("corrupting collection file: " + testCollFile);
- corruptFile(testCollFile);
+ jsTestLog("corrupting collection file: " + testCollFile);
+ corruptFile(testCollFile);
- assertRepairSucceeds(dbpath, mongod.port, mongodOptions);
+ assertRepairSucceeds(dbpath, mongod.port, mongodOptions);
- mongod = startMongodOnExistingPath(dbpath, mongodOptions);
- testColl = mongod.getDB(baseName)[collName];
+ mongod = startMongodOnExistingPath(dbpath, mongodOptions);
+ testColl = mongod.getDB(baseName)[collName];
- assert.eq(testCollUri, getUriForColl(testColl));
- assert.eq(testColl.find({}).itcount(), 0);
- assert.eq(testColl.count(), 0);
+ assert.eq(testCollUri, getUriForColl(testColl));
+ assert.eq(testColl.find({}).itcount(), 0);
+ assert.eq(testColl.count(), 0);
- /**
- * Test 2. Corrupt an index file in an unrecoverable way. Verify that repair rebuilds and
- * allows MongoDB to start up normally.
- */
+ /**
+ * Test 2. Corrupt an index file in an unrecoverable way. Verify that repair rebuilds and
+ * allows MongoDB to start up normally.
+ */
- assert.commandWorked(testColl.insert(doc));
+ assert.commandWorked(testColl.insert(doc));
- const indexName = "a_1";
- assert.commandWorked(testColl.createIndex({a: 1}, {name: indexName}));
- assertQueryUsesIndex(testColl, doc, indexName);
+ const indexName = "a_1";
+ assert.commandWorked(testColl.createIndex({a: 1}, {name: indexName}));
+ assertQueryUsesIndex(testColl, doc, indexName);
- let indexUri = getUriForIndex(testColl, indexName);
+ let indexUri = getUriForIndex(testColl, indexName);
- MongoRunner.stopMongod(mongod);
+ MongoRunner.stopMongod(mongod);
- let indexFile = dbpath + indexUri + ".wt";
- jsTestLog("corrupting index file: " + indexFile);
- corruptFile(indexFile);
+ let indexFile = dbpath + indexUri + ".wt";
+ jsTestLog("corrupting index file: " + indexFile);
+ corruptFile(indexFile);
- assertRepairSucceeds(dbpath, mongod.port, mongodOptions);
- mongod = startMongodOnExistingPath(dbpath, mongodOptions);
- testColl = mongod.getDB(baseName)[collName];
+ assertRepairSucceeds(dbpath, mongod.port, mongodOptions);
+ mongod = startMongodOnExistingPath(dbpath, mongodOptions);
+ testColl = mongod.getDB(baseName)[collName];
- // Repair creates new idents.
- assert.neq(indexUri, getUriForIndex(testColl, indexName));
+ // Repair creates new idents.
+ assert.neq(indexUri, getUriForIndex(testColl, indexName));
- assertQueryUsesIndex(testColl, doc, indexName);
- assert.eq(testColl.find(doc).itcount(), 1);
- assert.eq(testColl.count(), 1);
+ assertQueryUsesIndex(testColl, doc, indexName);
+ assert.eq(testColl.find(doc).itcount(), 1);
+ assert.eq(testColl.count(), 1);
- MongoRunner.stopMongod(mongod);
+ MongoRunner.stopMongod(mongod);
- /**
- * Test 3. Corrupt the _mdb_catalog in an unrecoverable way. Verify that repair suceeds
- * in creating an empty catalog and recovers the orphaned testColl, which will still be
- * accessible in the 'local.orphan-' namespace.
- */
+ /**
+ * Test 3. Corrupt the _mdb_catalog in an unrecoverable way. Verify that repair suceeds
+ * in creating an empty catalog and recovers the orphaned testColl, which will still be
+ * accessible in the 'local.orphan-' namespace.
+ */
- let mdbCatalogFile = dbpath + "_mdb_catalog.wt";
- jsTestLog("corrupting catalog file: " + mdbCatalogFile);
- corruptFile(mdbCatalogFile);
+ let mdbCatalogFile = dbpath + "_mdb_catalog.wt";
+ jsTestLog("corrupting catalog file: " + mdbCatalogFile);
+ corruptFile(mdbCatalogFile);
- assertRepairSucceeds(dbpath, mongod.port, mongodOptions);
+ assertRepairSucceeds(dbpath, mongod.port, mongodOptions);
- mongod = startMongodOnExistingPath(dbpath, mongodOptions);
- testColl = mongod.getDB(baseName)[collName];
- assert.isnull(testColl.exists());
- assert.eq(testColl.find(doc).itcount(), 0);
- assert.eq(testColl.count(), 0);
+ mongod = startMongodOnExistingPath(dbpath, mongodOptions);
+ testColl = mongod.getDB(baseName)[collName];
+ assert.isnull(testColl.exists());
+ assert.eq(testColl.find(doc).itcount(), 0);
+ assert.eq(testColl.count(), 0);
- // Ensure the collection orphan was created with the existing document.
- const orphanCollName = "orphan." + testCollUri.replace(/-/g, "_");
- let orphanColl = mongod.getDB('local').getCollection(orphanCollName);
- assert(orphanColl.exists());
- assert.eq(orphanColl.find(doc).itcount(), 1);
- assert.eq(orphanColl.count(), 1);
+ // Ensure the collection orphan was created with the existing document.
+ const orphanCollName = "orphan." + testCollUri.replace(/-/g, "_");
+ let orphanColl = mongod.getDB('local').getCollection(orphanCollName);
+ assert(orphanColl.exists());
+ assert.eq(orphanColl.find(doc).itcount(), 1);
+ assert.eq(orphanColl.count(), 1);
- MongoRunner.stopMongod(mongod);
- };
+ MongoRunner.stopMongod(mongod);
+};
- runTest({});
- runTest({directoryperdb: ""});
- runTest({wiredTigerDirectoryForIndexes: ""});
+runTest({});
+runTest({directoryperdb: ""});
+runTest({wiredTigerDirectoryForIndexes: ""});
})();
diff --git a/jstests/disk/wt_repair_corrupt_metadata.js b/jstests/disk/wt_repair_corrupt_metadata.js
index 0b4bc6a6083..2f8edfe2608 100644
--- a/jstests/disk/wt_repair_corrupt_metadata.js
+++ b/jstests/disk/wt_repair_corrupt_metadata.js
@@ -7,112 +7,112 @@
(function() {
- load('jstests/disk/libs/wt_file_helper.js');
-
- const baseName = "wt_repair_corrupt_metadata";
- const collName = "test";
- const dbpath = MongoRunner.dataPath + baseName + "/";
-
- /**
- * This test runs repair using a version of the WiredTiger.turtle file that has checkpoint
- * information before the collection was created. The turtle file contains checkpoint
- * information about the WiredTiger.wt file, so if these two files become out of sync,
- * WiredTiger will have to attempt a salvage operation on the .wt file and rebuild the .turtle
- * file.
- *
- * The expectation is that the metadata salvage will be successful, and that the collection will
- * be recreated with all of its data.
- */
- let runTest = function(mongodOptions) {
- // Unfortunately using --nojournal triggers a WT_PANIC and aborts in debug builds, which the
- // following test case can exercise.
- // TODO: This return can be removed once WT-4310 is completed.
- let isDebug = db.adminCommand('buildInfo').debug;
- if (isDebug) {
- jsTestLog("Skipping test case because this is a debug build");
- return;
- }
-
- resetDbpath(dbpath);
- jsTestLog("Running test with args: " + tojson(mongodOptions));
+load('jstests/disk/libs/wt_file_helper.js');
- const turtleFile = dbpath + "WiredTiger.turtle";
- const turtleFileWithoutCollection = dbpath + "WiredTiger.turtle.1";
+const baseName = "wt_repair_corrupt_metadata";
+const collName = "test";
+const dbpath = MongoRunner.dataPath + baseName + "/";
- let mongod = startMongodOnExistingPath(dbpath, mongodOptions);
-
- // Force a checkpoint and make a copy of the turtle file.
- assert.commandWorked(mongod.getDB(baseName).adminCommand({fsync: 1}));
- jsTestLog("Making copy of metadata file before creating the collection: " +
- turtleFileWithoutCollection);
- copyFile(turtleFile, turtleFileWithoutCollection);
-
- let testColl = mongod.getDB(baseName)[collName];
- assert.commandWorked(testColl.insert({a: 1}));
+/**
+ * This test runs repair using a version of the WiredTiger.turtle file that has checkpoint
+ * information before the collection was created. The turtle file contains checkpoint
+ * information about the WiredTiger.wt file, so if these two files become out of sync,
+ * WiredTiger will have to attempt a salvage operation on the .wt file and rebuild the .turtle
+ * file.
+ *
+ * The expectation is that the metadata salvage will be successful, and that the collection will
+ * be recreated with all of its data.
+ */
+let runTest = function(mongodOptions) {
+ // Unfortunately using --nojournal triggers a WT_PANIC and aborts in debug builds, which the
+ // following test case can exercise.
+ // TODO: This return can be removed once WT-4310 is completed.
+ let isDebug = db.adminCommand('buildInfo').debug;
+ if (isDebug) {
+ jsTestLog("Skipping test case because this is a debug build");
+ return;
+ }
+
+ resetDbpath(dbpath);
+ jsTestLog("Running test with args: " + tojson(mongodOptions));
+
+ const turtleFile = dbpath + "WiredTiger.turtle";
+ const turtleFileWithoutCollection = dbpath + "WiredTiger.turtle.1";
+
+ let mongod = startMongodOnExistingPath(dbpath, mongodOptions);
+
+ // Force a checkpoint and make a copy of the turtle file.
+ assert.commandWorked(mongod.getDB(baseName).adminCommand({fsync: 1}));
+ jsTestLog("Making copy of metadata file before creating the collection: " +
+ turtleFileWithoutCollection);
+ copyFile(turtleFile, turtleFileWithoutCollection);
+
+ let testColl = mongod.getDB(baseName)[collName];
+ assert.commandWorked(testColl.insert({a: 1}));
+
+ // Force another checkpoint before a clean shutdown.
+ assert.commandWorked(mongod.getDB(baseName).adminCommand({fsync: 1}));
+ MongoRunner.stopMongod(mongod);
+
+ // Guarantee the turtle files changed between checkpoints.
+ assert.neq(md5sumFile(turtleFileWithoutCollection), md5sumFile(turtleFile));
+
+ jsTestLog("Replacing metadata file with a version before the collection existed.");
+ removeFile(turtleFile);
+ copyFile(turtleFileWithoutCollection, turtleFile);
+
+ // This test characterizes the current WiredTiger salvage behaviour, which may be subject to
+ // change in the future. See SERVER-41667.
+ assertRepairSucceeds(dbpath, mongod.port, mongodOptions);
+
+ mongod = startMongodOnExistingPath(dbpath, mongodOptions);
+ testColl = mongod.getDB(baseName)[collName];
+
+ // The collection exists despite using an older turtle file because salvage is able to find
+ // the table in the WiredTiger.wt file.
+ assert(testColl.exists());
+ // We can assert that the data exists because the salvage only took place on the metadata,
+ // not the data.
+ assert.eq(testColl.find({}).itcount(), 1);
+ MongoRunner.stopMongod(mongod);
+
+ // Corrupt the .turtle file in a very specific way such that the log sequence numbers are
+ // invalid.
+ if (mongodOptions.hasOwnProperty('journal')) {
+ // TODO: This return can be removed once WT-4459 is completed.
+ if (_isAddressSanitizerActive()) {
+ jsTestLog("Skipping log file corruption because the address sanitizer is active.");
+ return;
+ }
- // Force another checkpoint before a clean shutdown.
- assert.commandWorked(mongod.getDB(baseName).adminCommand({fsync: 1}));
- MongoRunner.stopMongod(mongod);
+ jsTestLog("Corrupting log file metadata");
- // Guarantee the turtle files changed between checkpoints.
- assert.neq(md5sumFile(turtleFileWithoutCollection), md5sumFile(turtleFile));
+ let data = cat(turtleFile, true /* useBinaryMode */);
+ let re = /checkpoint_lsn=\(([0-9,]+)\)/g;
+ let newData = data.replace(re, "checkpoint_lsn=(1,2)");
- jsTestLog("Replacing metadata file with a version before the collection existed.");
+ print('writing data to new turtle file: \n' + newData);
removeFile(turtleFile);
- copyFile(turtleFileWithoutCollection, turtleFile);
+ writeFile(turtleFile, newData, true /* useBinaryMode */);
- // This test characterizes the current WiredTiger salvage behaviour, which may be subject to
- // change in the future. See SERVER-41667.
assertRepairSucceeds(dbpath, mongod.port, mongodOptions);
mongod = startMongodOnExistingPath(dbpath, mongodOptions);
testColl = mongod.getDB(baseName)[collName];
- // The collection exists despite using an older turtle file because salvage is able to find
- // the table in the WiredTiger.wt file.
+ // The collection exists despite using a salvaged turtle file because salvage is able to
+ // find the table in the WiredTiger.wt file.
assert(testColl.exists());
- // We can assert that the data exists because the salvage only took place on the metadata,
- // not the data.
+
+ // We can assert that the data exists because the salvage only took place on the
+ // metadata, not the data.
assert.eq(testColl.find({}).itcount(), 1);
MongoRunner.stopMongod(mongod);
+ }
+};
- // Corrupt the .turtle file in a very specific way such that the log sequence numbers are
- // invalid.
- if (mongodOptions.hasOwnProperty('journal')) {
- // TODO: This return can be removed once WT-4459 is completed.
- if (_isAddressSanitizerActive()) {
- jsTestLog("Skipping log file corruption because the address sanitizer is active.");
- return;
- }
-
- jsTestLog("Corrupting log file metadata");
-
- let data = cat(turtleFile, true /* useBinaryMode */);
- let re = /checkpoint_lsn=\(([0-9,]+)\)/g;
- let newData = data.replace(re, "checkpoint_lsn=(1,2)");
-
- print('writing data to new turtle file: \n' + newData);
- removeFile(turtleFile);
- writeFile(turtleFile, newData, true /* useBinaryMode */);
-
- assertRepairSucceeds(dbpath, mongod.port, mongodOptions);
-
- mongod = startMongodOnExistingPath(dbpath, mongodOptions);
- testColl = mongod.getDB(baseName)[collName];
-
- // The collection exists despite using a salvaged turtle file because salvage is able to
- // find the table in the WiredTiger.wt file.
- assert(testColl.exists());
-
- // We can assert that the data exists because the salvage only took place on the
- // metadata, not the data.
- assert.eq(testColl.find({}).itcount(), 1);
- MongoRunner.stopMongod(mongod);
- }
- };
-
- // Repair may behave differently with journaling enabled or disabled, but the end result should
- // be the same.
- runTest({journal: ""});
- runTest({nojournal: ""});
+// Repair may behave differently with journaling enabled or disabled, but the end result should
+// be the same.
+runTest({journal: ""});
+runTest({nojournal: ""});
})();
diff --git a/jstests/disk/wt_repair_missing_files.js b/jstests/disk/wt_repair_missing_files.js
index 7e2f2fa2bf8..e9d505458e9 100644
--- a/jstests/disk/wt_repair_missing_files.js
+++ b/jstests/disk/wt_repair_missing_files.js
@@ -7,138 +7,140 @@
(function() {
- load('jstests/disk/libs/wt_file_helper.js');
+load('jstests/disk/libs/wt_file_helper.js');
- const baseName = "wt_repair_missing_files";
- const collName = "test";
- const dbpath = MongoRunner.dataPath + baseName + "/";
+const baseName = "wt_repair_missing_files";
+const collName = "test";
+const dbpath = MongoRunner.dataPath + baseName + "/";
- resetDbpath(dbpath);
+resetDbpath(dbpath);
- /**
- * Test 1. Create a collection, delete it's .wt file, run repair. Verify that repair succeeds at
- * re-creating it. The collection should be visible on normal startup.
- */
+/**
+ * Test 1. Create a collection, delete it's .wt file, run repair. Verify that repair succeeds at
+ * re-creating it. The collection should be visible on normal startup.
+ */
- let mongod = startMongodOnExistingPath(dbpath);
- let testColl = mongod.getDB(baseName)[collName];
+let mongod = startMongodOnExistingPath(dbpath);
+let testColl = mongod.getDB(baseName)[collName];
- const doc = {a: 1};
- assert.commandWorked(testColl.insert(doc));
+const doc = {
+ a: 1
+};
+assert.commandWorked(testColl.insert(doc));
- let testCollUri = getUriForColl(testColl);
- let testCollFile = dbpath + testCollUri + ".wt";
+let testCollUri = getUriForColl(testColl);
+let testCollFile = dbpath + testCollUri + ".wt";
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
- jsTestLog("deleting collection file: " + testCollFile);
- removeFile(testCollFile);
+jsTestLog("deleting collection file: " + testCollFile);
+removeFile(testCollFile);
- assertRepairSucceeds(dbpath, mongod.port);
+assertRepairSucceeds(dbpath, mongod.port);
- mongod = startMongodOnExistingPath(dbpath);
- testColl = mongod.getDB(baseName)[collName];
+mongod = startMongodOnExistingPath(dbpath);
+testColl = mongod.getDB(baseName)[collName];
- assert.eq(testCollUri, getUriForColl(testColl));
- assert.eq(testColl.find({}).itcount(), 0);
- assert.eq(testColl.count(), 0);
+assert.eq(testCollUri, getUriForColl(testColl));
+assert.eq(testColl.find({}).itcount(), 0);
+assert.eq(testColl.count(), 0);
- /**
- * Test 2. Delete an index file. Verify that repair rebuilds and allows MongoDB to start up
- * normally.
- */
+/**
+ * Test 2. Delete an index file. Verify that repair rebuilds and allows MongoDB to start up
+ * normally.
+ */
- assert.commandWorked(testColl.insert(doc));
+assert.commandWorked(testColl.insert(doc));
- const indexName = "a_1";
- assert.commandWorked(testColl.createIndex({a: 1}, {name: indexName}));
- assertQueryUsesIndex(testColl, doc, indexName);
+const indexName = "a_1";
+assert.commandWorked(testColl.createIndex({a: 1}, {name: indexName}));
+assertQueryUsesIndex(testColl, doc, indexName);
- let indexUri = getUriForIndex(testColl, indexName);
+let indexUri = getUriForIndex(testColl, indexName);
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
- let indexFile = dbpath + indexUri + ".wt";
- jsTestLog("deleting index file: " + indexFile);
- removeFile(indexFile);
+let indexFile = dbpath + indexUri + ".wt";
+jsTestLog("deleting index file: " + indexFile);
+removeFile(indexFile);
- assertRepairSucceeds(dbpath, mongod.port);
- mongod = startMongodOnExistingPath(dbpath);
- testColl = mongod.getDB(baseName)[collName];
+assertRepairSucceeds(dbpath, mongod.port);
+mongod = startMongodOnExistingPath(dbpath);
+testColl = mongod.getDB(baseName)[collName];
- // Repair creates new idents.
- assert.neq(indexUri, getUriForIndex(testColl, indexName));
+// Repair creates new idents.
+assert.neq(indexUri, getUriForIndex(testColl, indexName));
- assertQueryUsesIndex(testColl, doc, indexName);
- assert.eq(testColl.find(doc).itcount(), 1);
- assert.eq(testColl.count(), 1);
+assertQueryUsesIndex(testColl, doc, indexName);
+assert.eq(testColl.find(doc).itcount(), 1);
+assert.eq(testColl.count(), 1);
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
- /**
- * Test 3. Delete the sizeStorer. Verify that repair suceeds in recreating it.
- */
+/**
+ * Test 3. Delete the sizeStorer. Verify that repair suceeds in recreating it.
+ */
- let sizeStorerFile = dbpath + "sizeStorer.wt";
- jsTestLog("deleting size storer file: " + sizeStorerFile);
- removeFile(sizeStorerFile);
+let sizeStorerFile = dbpath + "sizeStorer.wt";
+jsTestLog("deleting size storer file: " + sizeStorerFile);
+removeFile(sizeStorerFile);
- assertRepairSucceeds(dbpath, mongod.port);
+assertRepairSucceeds(dbpath, mongod.port);
- mongod = startMongodOnExistingPath(dbpath);
- testColl = mongod.getDB(baseName)[collName];
+mongod = startMongodOnExistingPath(dbpath);
+testColl = mongod.getDB(baseName)[collName];
- assert.eq(testColl.find(doc).itcount(), 1);
- assert.eq(testColl.count(), 1);
- MongoRunner.stopMongod(mongod);
+assert.eq(testColl.find(doc).itcount(), 1);
+assert.eq(testColl.count(), 1);
+MongoRunner.stopMongod(mongod);
- /**
- * Test 4. Delete the _mdb_catalog. Verify that repair suceeds in creating an empty catalog and
- * MongoDB starts up normally with no data.
- */
+/**
+ * Test 4. Delete the _mdb_catalog. Verify that repair suceeds in creating an empty catalog and
+ * MongoDB starts up normally with no data.
+ */
- let mdbCatalogFile = dbpath + "_mdb_catalog.wt";
- jsTestLog("deleting catalog file: " + mdbCatalogFile);
- removeFile(mdbCatalogFile);
+let mdbCatalogFile = dbpath + "_mdb_catalog.wt";
+jsTestLog("deleting catalog file: " + mdbCatalogFile);
+removeFile(mdbCatalogFile);
- assertRepairSucceeds(dbpath, mongod.port);
+assertRepairSucceeds(dbpath, mongod.port);
- mongod = startMongodOnExistingPath(dbpath);
- testColl = mongod.getDB(baseName)[collName];
- assert.isnull(testColl.exists());
+mongod = startMongodOnExistingPath(dbpath);
+testColl = mongod.getDB(baseName)[collName];
+assert.isnull(testColl.exists());
- assert.eq(testColl.find(doc).itcount(), 0);
- assert.eq(testColl.count(), 0);
+assert.eq(testColl.find(doc).itcount(), 0);
+assert.eq(testColl.count(), 0);
- /**
- * Test 5. Verify that using repair with --directoryperdb creates a missing directory and its
- * files, allowing MongoDB to start up normally.
- */
+/**
+ * Test 5. Verify that using repair with --directoryperdb creates a missing directory and its
+ * files, allowing MongoDB to start up normally.
+ */
- MongoRunner.stopMongod(mongod);
- resetDbpath(dbpath);
+MongoRunner.stopMongod(mongod);
+resetDbpath(dbpath);
- mongod = startMongodOnExistingPath(dbpath, {directoryperdb: ""});
- testColl = mongod.getDB(baseName)[collName];
+mongod = startMongodOnExistingPath(dbpath, {directoryperdb: ""});
+testColl = mongod.getDB(baseName)[collName];
- assert.commandWorked(testColl.insert(doc));
+assert.commandWorked(testColl.insert(doc));
- testCollUri = getUriForColl(testColl);
+testCollUri = getUriForColl(testColl);
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
- let dataDir = dbpath + baseName;
- jsTestLog("deleting data directory: " + dataDir);
- removeFile(dataDir);
+let dataDir = dbpath + baseName;
+jsTestLog("deleting data directory: " + dataDir);
+removeFile(dataDir);
- assertRepairSucceeds(dbpath, mongod.port, {directoryperdb: ""});
+assertRepairSucceeds(dbpath, mongod.port, {directoryperdb: ""});
- mongod = startMongodOnExistingPath(dbpath, {directoryperdb: ""});
- testColl = mongod.getDB(baseName)[collName];
+mongod = startMongodOnExistingPath(dbpath, {directoryperdb: ""});
+testColl = mongod.getDB(baseName)[collName];
- assert.eq(testCollUri, getUriForColl(testColl));
- assert.eq(testColl.find({}).itcount(), 0);
- assert.eq(testColl.count(), 0);
+assert.eq(testCollUri, getUriForColl(testColl));
+assert.eq(testColl.find({}).itcount(), 0);
+assert.eq(testColl.count(), 0);
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/disk/wt_repair_orphaned_idents.js b/jstests/disk/wt_repair_orphaned_idents.js
index 43d57f1ff28..83d3bfee424 100644
--- a/jstests/disk/wt_repair_orphaned_idents.js
+++ b/jstests/disk/wt_repair_orphaned_idents.js
@@ -6,83 +6,81 @@
(function() {
- load('jstests/disk/libs/wt_file_helper.js');
-
- const baseName = "wt_repair_orphaned_idents";
- const dbpath = MongoRunner.dataPath + baseName + "/";
-
- resetDbpath(dbpath);
-
- // Create a collection and insert a doc.
- let mongod = MongoRunner.runMongod({dbpath: dbpath});
- const importantCollName = "importantColl";
- const importantDocId = "importantDoc";
- const importantColl = mongod.getDB("test")[importantCollName];
- assert.commandWorked(importantColl.insert({_id: importantDocId}));
- const importantCollIdent = getUriForColl(importantColl);
- MongoRunner.stopMongod(mongod);
-
- // Delete the _mdb_catalog.
- let mdbCatalogFile = dbpath + "_mdb_catalog.wt";
- jsTestLog("deleting catalog file: " + mdbCatalogFile);
- removeFile(mdbCatalogFile);
-
- // Repair crates the _mdb_catalog and catalog entries for all the orphaned idents.
- jsTestLog("running mongod with --repair");
- assert.eq(0, runMongoProgram("mongod", "--repair", "--port", mongod.port, "--dbpath", dbpath));
-
- jsTestLog("restarting mongod");
- mongod = MongoRunner.runMongod({dbpath: dbpath, noCleanData: true});
-
- let localDb = mongod.getDB("local");
- let res = localDb.runCommand({listCollections: 1});
- assert.commandWorked(res, tojson(res));
-
- // This is the function that 'show collections' uses.
- let collNames = localDb.getCollectionNames();
-
- const orphanPrefix = "orphan.";
- let recoveredCount = 0;
- const orphanedImportantCollName = "orphan." + importantCollIdent.replace(/-/g, "_");
- for (let collName of collNames) {
- if (collName.startsWith(orphanPrefix)) {
- // Manually create the _id index.
- assert.commandWorked(localDb[collName].createIndex({_id: 1}));
-
- if (collName == orphanedImportantCollName) {
- assert.commandWorked(localDb.adminCommand(
- {renameCollection: "local." + collName, to: "test." + importantCollName}));
- } else {
- assert.commandWorked(localDb.adminCommand({
- renameCollection: "local." + collName,
- to: "test.recovered" + recoveredCount
- }));
- }
- recoveredCount++;
+load('jstests/disk/libs/wt_file_helper.js');
+
+const baseName = "wt_repair_orphaned_idents";
+const dbpath = MongoRunner.dataPath + baseName + "/";
+
+resetDbpath(dbpath);
+
+// Create a collection and insert a doc.
+let mongod = MongoRunner.runMongod({dbpath: dbpath});
+const importantCollName = "importantColl";
+const importantDocId = "importantDoc";
+const importantColl = mongod.getDB("test")[importantCollName];
+assert.commandWorked(importantColl.insert({_id: importantDocId}));
+const importantCollIdent = getUriForColl(importantColl);
+MongoRunner.stopMongod(mongod);
+
+// Delete the _mdb_catalog.
+let mdbCatalogFile = dbpath + "_mdb_catalog.wt";
+jsTestLog("deleting catalog file: " + mdbCatalogFile);
+removeFile(mdbCatalogFile);
+
+// Repair crates the _mdb_catalog and catalog entries for all the orphaned idents.
+jsTestLog("running mongod with --repair");
+assert.eq(0, runMongoProgram("mongod", "--repair", "--port", mongod.port, "--dbpath", dbpath));
+
+jsTestLog("restarting mongod");
+mongod = MongoRunner.runMongod({dbpath: dbpath, noCleanData: true});
+
+let localDb = mongod.getDB("local");
+let res = localDb.runCommand({listCollections: 1});
+assert.commandWorked(res, tojson(res));
+
+// This is the function that 'show collections' uses.
+let collNames = localDb.getCollectionNames();
+
+const orphanPrefix = "orphan.";
+let recoveredCount = 0;
+const orphanedImportantCollName = "orphan." + importantCollIdent.replace(/-/g, "_");
+for (let collName of collNames) {
+ if (collName.startsWith(orphanPrefix)) {
+ // Manually create the _id index.
+ assert.commandWorked(localDb[collName].createIndex({_id: 1}));
+
+ if (collName == orphanedImportantCollName) {
+ assert.commandWorked(localDb.adminCommand(
+ {renameCollection: "local." + collName, to: "test." + importantCollName}));
+ } else {
+ assert.commandWorked(localDb.adminCommand(
+ {renameCollection: "local." + collName, to: "test.recovered" + recoveredCount}));
}
+ recoveredCount++;
}
- assert.gt(recoveredCount, 0);
+}
+assert.gt(recoveredCount, 0);
- let testDb = mongod.getDB("test");
+let testDb = mongod.getDB("test");
- // Assert the recovered collection still has the original document.
- assert.eq(testDb[importantCollName].find({_id: importantDocId}).count(), 1);
+// Assert the recovered collection still has the original document.
+assert.eq(testDb[importantCollName].find({_id: importantDocId}).count(), 1);
- res = testDb.runCommand({listCollections: 1});
- assert.commandWorked(res);
- assert.eq(res.cursor.firstBatch.length, recoveredCount);
- for (let entry of res.cursor.firstBatch) {
- let collName = entry.name;
- assert(collName.startsWith("recovered") || collName == importantCollName);
+res = testDb.runCommand({listCollections: 1});
+assert.commandWorked(res);
+assert.eq(res.cursor.firstBatch.length, recoveredCount);
+for (let entry of res.cursor.firstBatch) {
+ let collName = entry.name;
+ assert(collName.startsWith("recovered") || collName == importantCollName);
- // Assert _id index has been successfully created.
- assert("idIndex" in entry);
+ // Assert _id index has been successfully created.
+ assert("idIndex" in entry);
- // Make sure we can interact with the recovered collections.
- assert.commandWorked(testDb.runCommand({find: collName}));
- assert.commandWorked(testDb[collName].insert({x: 1}));
- assert(testDb[collName].drop());
- }
+ // Make sure we can interact with the recovered collections.
+ assert.commandWorked(testDb.runCommand({find: collName}));
+ assert.commandWorked(testDb[collName].insert({x: 1}));
+ assert(testDb[collName].drop());
+}
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/fail_point/fail_point.js b/jstests/fail_point/fail_point.js
index c39238344a3..6cd53fe5ad1 100644
--- a/jstests/fail_point/fail_point.js
+++ b/jstests/fail_point/fail_point.js
@@ -1,93 +1,93 @@
// @tags: [requires_sharding]
(function() {
- 'use strict';
-
- /**
- * Performs basic checks on the configureFailPoint command. Also check
- * mongo/util/fail_point_test.cpp for unit tests.
- *
- * @param adminDB {DB} the admin database database object
- */
- function runTest(adminDB) {
- function expectFailPointState(fpState, expectedMode, expectedData) {
- assert.eq(expectedMode, fpState.mode);
-
- // Check that all expected data is present.
- for (var field in expectedData) { // Valid only for 1 level field checks
- assert.eq(expectedData[field], fpState.data[field]);
- }
-
- // Check that all present data is expected.
- for (field in fpState.data) {
- assert.eq(expectedData[field], fpState.data[field]);
- }
+'use strict';
+
+/**
+ * Performs basic checks on the configureFailPoint command. Also check
+ * mongo/util/fail_point_test.cpp for unit tests.
+ *
+ * @param adminDB {DB} the admin database database object
+ */
+function runTest(adminDB) {
+ function expectFailPointState(fpState, expectedMode, expectedData) {
+ assert.eq(expectedMode, fpState.mode);
+
+ // Check that all expected data is present.
+ for (var field in expectedData) { // Valid only for 1 level field checks
+ assert.eq(expectedData[field], fpState.data[field]);
}
- var res;
-
- // A failpoint's state can be read through getParameter by prefixing its name with
- // "failpoint"
-
- // Test non-existing fail point
- assert.commandFailed(
- adminDB.runCommand({configureFailPoint: 'fpNotExist', mode: 'alwaysOn', data: {x: 1}}));
-
- // Test bad mode string
- assert.commandFailed(
- adminDB.runCommand({configureFailPoint: 'dummy', mode: 'badMode', data: {x: 1}}));
- res = adminDB.runCommand({getParameter: 1, "failpoint.dummy": 1});
- assert.commandWorked(res);
- expectFailPointState(res["failpoint.dummy"], 0, {});
-
- // Test bad mode obj
- assert.commandFailed(
- adminDB.runCommand({configureFailPoint: 'dummy', mode: {foo: 3}, data: {x: 1}}));
- res = adminDB.runCommand({getParameter: 1, "failpoint.dummy": 1});
- assert.commandWorked(res);
- expectFailPointState(res["failpoint.dummy"], 0, {});
-
- // Test bad mode type
- assert.commandFailed(
- adminDB.runCommand({configureFailPoint: 'dummy', mode: true, data: {x: 1}}));
- res = adminDB.runCommand({getParameter: 1, "failpoint.dummy": 1});
- assert.commandWorked(res);
- expectFailPointState(res["failpoint.dummy"], 0, {});
-
- // Test bad data type
- assert.commandFailed(
- adminDB.runCommand({configureFailPoint: 'dummy', mode: 'alwaysOn', data: 'data'}));
- res = adminDB.runCommand({getParameter: 1, "failpoint.dummy": 1});
- assert.commandWorked(res);
- expectFailPointState(res["failpoint.dummy"], 0, {});
-
- // Test setting mode to off.
- assert.commandWorked(adminDB.runCommand({configureFailPoint: 'dummy', mode: 'off'}));
- res = adminDB.runCommand({getParameter: 1, "failpoint.dummy": 1});
- assert.commandWorked(res);
- expectFailPointState(res["failpoint.dummy"], 0, {});
-
- // Test setting mode to skip.
- assert.commandWorked(adminDB.runCommand({configureFailPoint: 'dummy', mode: {skip: 2}}));
- res = adminDB.runCommand({getParameter: 1, "failpoint.dummy": 1});
- assert.commandWorked(res);
- expectFailPointState(res["failpoint.dummy"], 4, {});
-
- // Test good command w/ data
- assert.commandWorked(
- adminDB.runCommand({configureFailPoint: 'dummy', mode: 'alwaysOn', data: {x: 1}}));
- res = adminDB.runCommand({getParameter: 1, "failpoint.dummy": 1});
- assert.commandWorked(res);
- expectFailPointState(res["failpoint.dummy"], 1, {x: 1});
+ // Check that all present data is expected.
+ for (field in fpState.data) {
+ assert.eq(expectedData[field], fpState.data[field]);
+ }
}
- var conn = MongoRunner.runMongod();
- runTest(conn.getDB('admin'));
- MongoRunner.stopMongod(conn);
-
- ///////////////////////////////////////////////////////////
- // Test mongos
- var st = new ShardingTest({shards: 1});
- runTest(st.s.getDB('admin'));
- st.stop();
+ var res;
+
+ // A failpoint's state can be read through getParameter by prefixing its name with
+ // "failpoint"
+
+ // Test non-existing fail point
+ assert.commandFailed(
+ adminDB.runCommand({configureFailPoint: 'fpNotExist', mode: 'alwaysOn', data: {x: 1}}));
+
+ // Test bad mode string
+ assert.commandFailed(
+ adminDB.runCommand({configureFailPoint: 'dummy', mode: 'badMode', data: {x: 1}}));
+ res = adminDB.runCommand({getParameter: 1, "failpoint.dummy": 1});
+ assert.commandWorked(res);
+ expectFailPointState(res["failpoint.dummy"], 0, {});
+
+ // Test bad mode obj
+ assert.commandFailed(
+ adminDB.runCommand({configureFailPoint: 'dummy', mode: {foo: 3}, data: {x: 1}}));
+ res = adminDB.runCommand({getParameter: 1, "failpoint.dummy": 1});
+ assert.commandWorked(res);
+ expectFailPointState(res["failpoint.dummy"], 0, {});
+
+ // Test bad mode type
+ assert.commandFailed(
+ adminDB.runCommand({configureFailPoint: 'dummy', mode: true, data: {x: 1}}));
+ res = adminDB.runCommand({getParameter: 1, "failpoint.dummy": 1});
+ assert.commandWorked(res);
+ expectFailPointState(res["failpoint.dummy"], 0, {});
+
+ // Test bad data type
+ assert.commandFailed(
+ adminDB.runCommand({configureFailPoint: 'dummy', mode: 'alwaysOn', data: 'data'}));
+ res = adminDB.runCommand({getParameter: 1, "failpoint.dummy": 1});
+ assert.commandWorked(res);
+ expectFailPointState(res["failpoint.dummy"], 0, {});
+
+ // Test setting mode to off.
+ assert.commandWorked(adminDB.runCommand({configureFailPoint: 'dummy', mode: 'off'}));
+ res = adminDB.runCommand({getParameter: 1, "failpoint.dummy": 1});
+ assert.commandWorked(res);
+ expectFailPointState(res["failpoint.dummy"], 0, {});
+
+ // Test setting mode to skip.
+ assert.commandWorked(adminDB.runCommand({configureFailPoint: 'dummy', mode: {skip: 2}}));
+ res = adminDB.runCommand({getParameter: 1, "failpoint.dummy": 1});
+ assert.commandWorked(res);
+ expectFailPointState(res["failpoint.dummy"], 4, {});
+
+ // Test good command w/ data
+ assert.commandWorked(
+ adminDB.runCommand({configureFailPoint: 'dummy', mode: 'alwaysOn', data: {x: 1}}));
+ res = adminDB.runCommand({getParameter: 1, "failpoint.dummy": 1});
+ assert.commandWorked(res);
+ expectFailPointState(res["failpoint.dummy"], 1, {x: 1});
+}
+
+var conn = MongoRunner.runMongod();
+runTest(conn.getDB('admin'));
+MongoRunner.stopMongod(conn);
+
+///////////////////////////////////////////////////////////
+// Test mongos
+var st = new ShardingTest({shards: 1});
+runTest(st.s.getDB('admin'));
+st.stop();
})();
diff --git a/jstests/fail_point/set_failpoint_through_set_parameter.js b/jstests/fail_point/set_failpoint_through_set_parameter.js
index df25309c2b6..d081913555a 100644
--- a/jstests/fail_point/set_failpoint_through_set_parameter.js
+++ b/jstests/fail_point/set_failpoint_through_set_parameter.js
@@ -4,137 +4,137 @@
*/
(function() {
- "use strict";
-
- var assertStartupSucceeds = function(conn) {
- assert.commandWorked(conn.adminCommand({ismaster: 1}));
- };
-
- var assertStartupFails = function(conn) {
- assert.eq(null, conn);
- };
-
- var validFailpointPayload = {'mode': 'alwaysOn'};
- var validFailpointPayloadWithData = {'mode': 'alwaysOn', 'data': {x: 1}};
- var invalidFailpointPayload = "notJSON";
-
- // In order to be able connect to a mongos that starts up successfully, start a config replica
- // set so that we can provide a valid config connection string to the mongos.
- var configRS = new ReplSetTest({nodes: 3});
- configRS.startSet({configsvr: '', storageEngine: 'wiredTiger'});
- configRS.initiate();
-
- // Setting a failpoint via --setParameter fails if enableTestCommands is not on.
- jsTest.setOption('enableTestCommands', false);
- assertStartupFails(
- MongoRunner.runMongod({setParameter: "failpoint.dummy=" + tojson(validFailpointPayload)}));
- assertStartupFails(MongoRunner.runMongos({
- setParameter: "failpoint.dummy=" + tojson(validFailpointPayload),
- configdb: configRS.getURL()
- }));
- jsTest.setOption('enableTestCommands', true);
-
- // Passing an invalid failpoint payload fails.
- assertStartupFails(MongoRunner.runMongod(
- {setParameter: "failpoint.dummy=" + tojson(invalidFailpointPayload)}));
- assertStartupFails(MongoRunner.runMongos({
- setParameter: "failpoint.dummy=" + tojson(invalidFailpointPayload),
- configdb: configRS.getURL()
- }));
-
- // Valid startup configurations succeed.
- var mongod =
- MongoRunner.runMongod({setParameter: "failpoint.dummy=" + tojson(validFailpointPayload)});
- assertStartupSucceeds(mongod);
- MongoRunner.stopMongod(mongod);
-
- var mongos = MongoRunner.runMongos({
- setParameter: "failpoint.dummy=" + tojson(validFailpointPayload),
- configdb: configRS.getURL()
- });
- assertStartupSucceeds(mongos);
- MongoRunner.stopMongos(mongos);
-
- mongod = MongoRunner.runMongod(
- {setParameter: "failpoint.dummy=" + tojson(validFailpointPayloadWithData)});
- assertStartupSucceeds(mongod);
-
- mongos = MongoRunner.runMongos({
- setParameter: "failpoint.dummy=" + tojson(validFailpointPayloadWithData),
- configdb: configRS.getURL()
- });
- assertStartupSucceeds(mongos);
-
- // The failpoint shows up with the correct data in the results of getParameter.
-
- var res = mongod.adminCommand({getParameter: "*"});
- assert.neq(null, res);
- assert.neq(null, res["failpoint.dummy"]);
- assert.eq(1, res["failpoint.dummy"].mode); // the 'mode' is an enum internally; 'alwaysOn' is 1
- assert.eq(validFailpointPayloadWithData.data, res["failpoint.dummy"].data);
-
- res = mongos.adminCommand({getParameter: "*"});
- assert.neq(null, res);
- assert.neq(null, res["failpoint.dummy"]);
- assert.eq(1, res["failpoint.dummy"].mode); // the 'mode' is an enum internally; 'alwaysOn' is 1
- assert.eq(validFailpointPayloadWithData.data, res["failpoint.dummy"].data);
-
- // The failpoint cannot be set by the setParameter command.
- assert.commandFailed(mongod.adminCommand({setParameter: 1, "dummy": validFailpointPayload}));
- assert.commandFailed(mongos.adminCommand({setParameter: 1, "dummy": validFailpointPayload}));
-
- // After changing the failpoint's state through the configureFailPoint command, the changes are
- // reflected in the output of the getParameter command.
-
- var newData = {x: 2};
-
- mongod.adminCommand({configureFailPoint: "dummy", mode: "alwaysOn", data: newData});
- res = mongod.adminCommand({getParameter: 1, "failpoint.dummy": 1});
- assert.neq(null, res);
- assert.neq(null, res["failpoint.dummy"]);
- assert.eq(1, res["failpoint.dummy"].mode); // the 'mode' is an enum internally; 'alwaysOn' is 1
- assert.eq(newData, res["failpoint.dummy"].data);
-
- mongos.adminCommand({configureFailPoint: "dummy", mode: "alwaysOn", data: newData});
- res = mongos.adminCommand({getParameter: 1, "failpoint.dummy": 1});
- assert.neq(null, res);
- assert.neq(null, res["failpoint.dummy"]);
- assert.eq(1, res["failpoint.dummy"].mode); // the 'mode' is an enum internally; 'alwaysOn' is 1
- assert.eq(newData, res["failpoint.dummy"].data);
-
- MongoRunner.stopMongod(mongod);
- MongoRunner.stopMongos(mongos);
-
- // Failpoint server parameters do not show up in the output of getParameter when not running
- // with enableTestCommands=1.
-
- jsTest.setOption('enableTestCommands', false);
- TestData.roleGraphInvalidationIsFatal = false;
-
- mongod = MongoRunner.runMongod();
- assertStartupSucceeds(mongod);
-
- mongos = MongoRunner.runMongos({configdb: configRS.getURL()});
- assertStartupSucceeds(mongos);
-
- // Doing getParameter for a specific failpoint fails.
- assert.commandFailed(mongod.adminCommand({getParameter: 1, "failpoint.dummy": 1}));
- assert.commandFailed(mongos.adminCommand({getParameter: 1, "failpoint.dummy": 1}));
-
- // No failpoint parameters show up when listing all parameters through getParameter.
- res = mongod.adminCommand({getParameter: "*"});
- assert.neq(null, res);
- for (var parameter in res) { // for-in loop valid only for top-level field checks.
- assert(!parameter.includes("failpoint."));
- }
-
- res = mongos.adminCommand({getParameter: "*"});
- assert.neq(null, res);
- for (var parameter in res) { // for-in loop valid only for top-level field checks.
- assert(!parameter.includes("failpoint."));
- }
-
- MongoRunner.stopMongod(mongod);
- MongoRunner.stopMongos(mongos);
- configRS.stopSet();
+"use strict";
+
+var assertStartupSucceeds = function(conn) {
+ assert.commandWorked(conn.adminCommand({ismaster: 1}));
+};
+
+var assertStartupFails = function(conn) {
+ assert.eq(null, conn);
+};
+
+var validFailpointPayload = {'mode': 'alwaysOn'};
+var validFailpointPayloadWithData = {'mode': 'alwaysOn', 'data': {x: 1}};
+var invalidFailpointPayload = "notJSON";
+
+// In order to be able connect to a mongos that starts up successfully, start a config replica
+// set so that we can provide a valid config connection string to the mongos.
+var configRS = new ReplSetTest({nodes: 3});
+configRS.startSet({configsvr: '', storageEngine: 'wiredTiger'});
+configRS.initiate();
+
+// Setting a failpoint via --setParameter fails if enableTestCommands is not on.
+jsTest.setOption('enableTestCommands', false);
+assertStartupFails(
+ MongoRunner.runMongod({setParameter: "failpoint.dummy=" + tojson(validFailpointPayload)}));
+assertStartupFails(MongoRunner.runMongos({
+ setParameter: "failpoint.dummy=" + tojson(validFailpointPayload),
+ configdb: configRS.getURL()
+}));
+jsTest.setOption('enableTestCommands', true);
+
+// Passing an invalid failpoint payload fails.
+assertStartupFails(
+ MongoRunner.runMongod({setParameter: "failpoint.dummy=" + tojson(invalidFailpointPayload)}));
+assertStartupFails(MongoRunner.runMongos({
+ setParameter: "failpoint.dummy=" + tojson(invalidFailpointPayload),
+ configdb: configRS.getURL()
+}));
+
+// Valid startup configurations succeed.
+var mongod =
+ MongoRunner.runMongod({setParameter: "failpoint.dummy=" + tojson(validFailpointPayload)});
+assertStartupSucceeds(mongod);
+MongoRunner.stopMongod(mongod);
+
+var mongos = MongoRunner.runMongos({
+ setParameter: "failpoint.dummy=" + tojson(validFailpointPayload),
+ configdb: configRS.getURL()
+});
+assertStartupSucceeds(mongos);
+MongoRunner.stopMongos(mongos);
+
+mongod = MongoRunner.runMongod(
+ {setParameter: "failpoint.dummy=" + tojson(validFailpointPayloadWithData)});
+assertStartupSucceeds(mongod);
+
+mongos = MongoRunner.runMongos({
+ setParameter: "failpoint.dummy=" + tojson(validFailpointPayloadWithData),
+ configdb: configRS.getURL()
+});
+assertStartupSucceeds(mongos);
+
+// The failpoint shows up with the correct data in the results of getParameter.
+
+var res = mongod.adminCommand({getParameter: "*"});
+assert.neq(null, res);
+assert.neq(null, res["failpoint.dummy"]);
+assert.eq(1, res["failpoint.dummy"].mode); // the 'mode' is an enum internally; 'alwaysOn' is 1
+assert.eq(validFailpointPayloadWithData.data, res["failpoint.dummy"].data);
+
+res = mongos.adminCommand({getParameter: "*"});
+assert.neq(null, res);
+assert.neq(null, res["failpoint.dummy"]);
+assert.eq(1, res["failpoint.dummy"].mode); // the 'mode' is an enum internally; 'alwaysOn' is 1
+assert.eq(validFailpointPayloadWithData.data, res["failpoint.dummy"].data);
+
+// The failpoint cannot be set by the setParameter command.
+assert.commandFailed(mongod.adminCommand({setParameter: 1, "dummy": validFailpointPayload}));
+assert.commandFailed(mongos.adminCommand({setParameter: 1, "dummy": validFailpointPayload}));
+
+// After changing the failpoint's state through the configureFailPoint command, the changes are
+// reflected in the output of the getParameter command.
+
+var newData = {x: 2};
+
+mongod.adminCommand({configureFailPoint: "dummy", mode: "alwaysOn", data: newData});
+res = mongod.adminCommand({getParameter: 1, "failpoint.dummy": 1});
+assert.neq(null, res);
+assert.neq(null, res["failpoint.dummy"]);
+assert.eq(1, res["failpoint.dummy"].mode); // the 'mode' is an enum internally; 'alwaysOn' is 1
+assert.eq(newData, res["failpoint.dummy"].data);
+
+mongos.adminCommand({configureFailPoint: "dummy", mode: "alwaysOn", data: newData});
+res = mongos.adminCommand({getParameter: 1, "failpoint.dummy": 1});
+assert.neq(null, res);
+assert.neq(null, res["failpoint.dummy"]);
+assert.eq(1, res["failpoint.dummy"].mode); // the 'mode' is an enum internally; 'alwaysOn' is 1
+assert.eq(newData, res["failpoint.dummy"].data);
+
+MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongos(mongos);
+
+// Failpoint server parameters do not show up in the output of getParameter when not running
+// with enableTestCommands=1.
+
+jsTest.setOption('enableTestCommands', false);
+TestData.roleGraphInvalidationIsFatal = false;
+
+mongod = MongoRunner.runMongod();
+assertStartupSucceeds(mongod);
+
+mongos = MongoRunner.runMongos({configdb: configRS.getURL()});
+assertStartupSucceeds(mongos);
+
+// Doing getParameter for a specific failpoint fails.
+assert.commandFailed(mongod.adminCommand({getParameter: 1, "failpoint.dummy": 1}));
+assert.commandFailed(mongos.adminCommand({getParameter: 1, "failpoint.dummy": 1}));
+
+// No failpoint parameters show up when listing all parameters through getParameter.
+res = mongod.adminCommand({getParameter: "*"});
+assert.neq(null, res);
+for (var parameter in res) { // for-in loop valid only for top-level field checks.
+ assert(!parameter.includes("failpoint."));
+}
+
+res = mongos.adminCommand({getParameter: "*"});
+assert.neq(null, res);
+for (var parameter in res) { // for-in loop valid only for top-level field checks.
+ assert(!parameter.includes("failpoint."));
+}
+
+MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongos(mongos);
+configRS.stopSet();
})();
diff --git a/jstests/free_mon/free_mon_announce.js b/jstests/free_mon/free_mon_announce.js
index 55b6a978917..d78d0b58608 100644
--- a/jstests/free_mon/free_mon_announce.js
+++ b/jstests/free_mon/free_mon_announce.js
@@ -3,33 +3,33 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- const mock_web = new FreeMonWebServer();
- mock_web.start();
+const mock_web = new FreeMonWebServer();
+mock_web.start();
- const mongod = MongoRunner.runMongod({
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- });
- assert.neq(mongod, null, 'mongod not running');
- const admin = mongod.getDB('admin');
+const mongod = MongoRunner.runMongod({
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+});
+assert.neq(mongod, null, 'mongod not running');
+const admin = mongod.getDB('admin');
- function getConnectAnnounce() {
- // Capture message as it'd be presented to a user.
- clearRawMongoProgramOutput();
- const exitCode = runMongoProgram(
- 'mongo', '--port', mongod.port, '--eval', "shellHelper( 'show', 'freeMonitoring' );");
- assert.eq(exitCode, 0);
- return rawMongoProgramOutput();
- }
+function getConnectAnnounce() {
+ // Capture message as it'd be presented to a user.
+ clearRawMongoProgramOutput();
+ const exitCode = runMongoProgram(
+ 'mongo', '--port', mongod.port, '--eval', "shellHelper( 'show', 'freeMonitoring' );");
+ assert.eq(exitCode, 0);
+ return rawMongoProgramOutput();
+}
- // state === 'enabled'.
- admin.enableFreeMonitoring();
- WaitForRegistration(mongod);
- const reminder = "To see your monitoring data";
- assert.neq(getConnectAnnounce().search(reminder), -1, 'userReminder not found');
+// state === 'enabled'.
+admin.enableFreeMonitoring();
+WaitForRegistration(mongod);
+const reminder = "To see your monitoring data";
+assert.neq(getConnectAnnounce().search(reminder), -1, 'userReminder not found');
- // Cleanup.
- MongoRunner.stopMongod(mongod);
- mock_web.stop();
+// Cleanup.
+MongoRunner.stopMongod(mongod);
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_disable.js b/jstests/free_mon/free_mon_disable.js
index 2de9de1c651..4bbc2236407 100644
--- a/jstests/free_mon/free_mon_disable.js
+++ b/jstests/free_mon/free_mon_disable.js
@@ -3,33 +3,33 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- let mock_web = new FreeMonWebServer();
+let mock_web = new FreeMonWebServer();
- mock_web.start();
+mock_web.start();
- let options = {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- freeMonitoringTag: "foo",
- verbose: 1,
- };
+let options = {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ freeMonitoringTag: "foo",
+ verbose: 1,
+};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up');
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up');
- assert.commandWorked(conn.adminCommand({setFreeMonitoring: 1, action: "disable"}));
+assert.commandWorked(conn.adminCommand({setFreeMonitoring: 1, action: "disable"}));
- const stats = mock_web.queryStats();
- print(tojson(stats));
+const stats = mock_web.queryStats();
+print(tojson(stats));
- assert.eq(stats.registers, 0);
+assert.eq(stats.registers, 0);
- assert.eq(FreeMonGetStatus(conn).state, "disabled");
+assert.eq(FreeMonGetStatus(conn).state, "disabled");
- assert.eq(FreeMonGetServerStatus(conn).state, "disabled");
+assert.eq(FreeMonGetServerStatus(conn).state, "disabled");
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
- mock_web.stop();
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_http_down.js b/jstests/free_mon/free_mon_http_down.js
index 019b50f23eb..2cdf535a2e0 100644
--- a/jstests/free_mon/free_mon_http_down.js
+++ b/jstests/free_mon/free_mon_http_down.js
@@ -3,28 +3,28 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- let mock_web = new FreeMonWebServer(FAULT_FAIL_REGISTER);
+let mock_web = new FreeMonWebServer(FAULT_FAIL_REGISTER);
- mock_web.start();
+mock_web.start();
- let options = {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- enableFreeMonitoring: "on",
- verbose: 1,
- };
+let options = {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ enableFreeMonitoring: "on",
+ verbose: 1,
+};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up');
- const admin = conn.getDB('admin');
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up');
+const admin = conn.getDB('admin');
- mock_web.waitRegisters(3);
+mock_web.waitRegisters(3);
- const freeMonStats = assert.commandWorked(admin.runCommand({serverStatus: 1})).freeMonitoring;
- assert.gte(freeMonStats.registerErrors, 3);
+const freeMonStats = assert.commandWorked(admin.runCommand({serverStatus: 1})).freeMonitoring;
+assert.gte(freeMonStats.registerErrors, 3);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
- mock_web.stop();
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_http_validate.js b/jstests/free_mon/free_mon_http_validate.js
index aff5ad5e8ec..30f521903cf 100644
--- a/jstests/free_mon/free_mon_http_validate.js
+++ b/jstests/free_mon/free_mon_http_validate.js
@@ -3,33 +3,33 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- let mock_web = new FreeMonWebServer(FAULT_INVALID_REGISTER);
+let mock_web = new FreeMonWebServer(FAULT_INVALID_REGISTER);
- mock_web.start();
+mock_web.start();
- let options = {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- enableFreeMonitoring: "on",
- verbose: 1,
- };
+let options = {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ enableFreeMonitoring: "on",
+ verbose: 1,
+};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up');
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up');
- mock_web.waitRegisters(1);
+mock_web.waitRegisters(1);
- // Sleep for some more time in case free monitoring would still try to register
- sleep(20 * 1000);
+// Sleep for some more time in case free monitoring would still try to register
+sleep(20 * 1000);
- // Ensure it only tried to register once since we gave it a bad response.
- const stats = mock_web.queryStats();
- print(tojson(stats));
+// Ensure it only tried to register once since we gave it a bad response.
+const stats = mock_web.queryStats();
+print(tojson(stats));
- assert.eq(stats.registers, 1);
+assert.eq(stats.registers, 1);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
- mock_web.stop();
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_metrics_halt.js b/jstests/free_mon/free_mon_metrics_halt.js
index 0059b85705b..b0251201926 100644
--- a/jstests/free_mon/free_mon_metrics_halt.js
+++ b/jstests/free_mon/free_mon_metrics_halt.js
@@ -3,29 +3,29 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- let mock_web = new FreeMonWebServer(FAULT_HALT_METRICS_5);
+let mock_web = new FreeMonWebServer(FAULT_HALT_METRICS_5);
- mock_web.start();
+mock_web.start();
- let options = {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- enableFreeMonitoring: "on",
- verbose: 1,
- };
+let options = {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ enableFreeMonitoring: "on",
+ verbose: 1,
+};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up');
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up');
- mock_web.waitMetrics(6);
+mock_web.waitMetrics(6);
- // It gets marked as disabled on halt
- const reg = FreeMonGetRegistration(conn);
- print(tojson(reg));
- assert.eq(reg.state, "disabled");
+// It gets marked as disabled on halt
+const reg = FreeMonGetRegistration(conn);
+print(tojson(reg));
+assert.eq(reg.state, "disabled");
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
- mock_web.stop();
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_metrics_perm_del.js b/jstests/free_mon/free_mon_metrics_perm_del.js
index 369a788159d..a66d6f1e0f1 100644
--- a/jstests/free_mon/free_mon_metrics_perm_del.js
+++ b/jstests/free_mon/free_mon_metrics_perm_del.js
@@ -3,29 +3,29 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- let mock_web = new FreeMonWebServer(FAULT_PERMANENTLY_DELETE_AFTER_3);
+let mock_web = new FreeMonWebServer(FAULT_PERMANENTLY_DELETE_AFTER_3);
- mock_web.start();
+mock_web.start();
- let options = {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- enableFreeMonitoring: "on",
- verbose: 1,
- };
+let options = {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ enableFreeMonitoring: "on",
+ verbose: 1,
+};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up');
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up');
- mock_web.waitMetrics(4);
+mock_web.waitMetrics(4);
- // Make sure the registration document gets removed
- const reg = FreeMonGetRegistration(conn);
- print(tojson(reg));
- assert.eq(reg, undefined);
+// Make sure the registration document gets removed
+const reg = FreeMonGetRegistration(conn);
+print(tojson(reg));
+assert.eq(reg, undefined);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
- mock_web.stop();
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_register.js b/jstests/free_mon/free_mon_register.js
index 6d1ae50274a..19bb2e59244 100644
--- a/jstests/free_mon/free_mon_register.js
+++ b/jstests/free_mon/free_mon_register.js
@@ -3,51 +3,51 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- const localTime = Date.now();
+const localTime = Date.now();
- let mock_web = new FreeMonWebServer();
+let mock_web = new FreeMonWebServer();
- mock_web.start();
+mock_web.start();
- let options = {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- enableFreeMonitoring: "on",
- freeMonitoringTag: "foo",
- verbose: 1,
- };
+let options = {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ enableFreeMonitoring: "on",
+ freeMonitoringTag: "foo",
+ verbose: 1,
+};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up');
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up');
- WaitForRegistration(conn);
+WaitForRegistration(conn);
- const stats = mock_web.queryStats();
- print(tojson(stats));
+const stats = mock_web.queryStats();
+print(tojson(stats));
- assert.eq(stats.registers, 1);
+assert.eq(stats.registers, 1);
- const last_register = mock_web.query("last_register");
- print(tojson(last_register));
+const last_register = mock_web.query("last_register");
+print(tojson(last_register));
- assert.eq(last_register.version, 2);
- assert.gt(new Date().setTime(last_register.localTime["$date"]), localTime);
- assert.eq(last_register.payload.buildInfo.bits, 64);
- assert.eq(last_register.payload.buildInfo.ok, 1);
- assert.eq(last_register.payload.storageEngine.readOnly, false);
- assert.eq(last_register.payload.isMaster.ok, 1);
- assert.eq(last_register.tags, ["foo"]);
+assert.eq(last_register.version, 2);
+assert.gt(new Date().setTime(last_register.localTime["$date"]), localTime);
+assert.eq(last_register.payload.buildInfo.bits, 64);
+assert.eq(last_register.payload.buildInfo.ok, 1);
+assert.eq(last_register.payload.storageEngine.readOnly, false);
+assert.eq(last_register.payload.isMaster.ok, 1);
+assert.eq(last_register.tags, ["foo"]);
- mock_web.waitMetrics(2);
+mock_web.waitMetrics(2);
- const last_metrics = mock_web.query("last_metrics");
- print(tojson(last_metrics));
+const last_metrics = mock_web.query("last_metrics");
+print(tojson(last_metrics));
- assert.eq(last_metrics.version, 2);
- assert.gt(new Date().setTime(last_metrics.localTime["$date"]), localTime);
+assert.eq(last_metrics.version, 2);
+assert.gt(new Date().setTime(last_metrics.localTime["$date"]), localTime);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
- mock_web.stop();
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_register_cmd.js b/jstests/free_mon/free_mon_register_cmd.js
index 35fdc9397af..654a3d6ff91 100644
--- a/jstests/free_mon/free_mon_register_cmd.js
+++ b/jstests/free_mon/free_mon_register_cmd.js
@@ -3,70 +3,70 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- let mock_web = new FreeMonWebServer();
+let mock_web = new FreeMonWebServer();
- mock_web.start();
+mock_web.start();
- let options = {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- verbose: 1,
- };
+let options = {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ verbose: 1,
+};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up');
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up');
- // Wait an arbitrary amount of time to allow the processor loop to start.
- sleep(10 * 1000);
+// Wait an arbitrary amount of time to allow the processor loop to start.
+sleep(10 * 1000);
- // Then verify that no registrations happened since we haven't runtime enabled yed.
- assert.eq('undecided',
- conn.getDB('admin').getFreeMonitoringStatus().state,
- "Initial state should be 'undecided'");
- assert.eq(0, mock_web.queryStats().registers, "mongod registered without enabling free_mod");
+// Then verify that no registrations happened since we haven't runtime enabled yed.
+assert.eq('undecided',
+ conn.getDB('admin').getFreeMonitoringStatus().state,
+ "Initial state should be 'undecided'");
+assert.eq(0, mock_web.queryStats().registers, "mongod registered without enabling free_mod");
- assert.commandWorked(conn.adminCommand({setFreeMonitoring: 1, action: "enable"}));
+assert.commandWorked(conn.adminCommand({setFreeMonitoring: 1, action: "enable"}));
- // The command should either timeout or suceed after registration is complete
- const retStatus1 = conn.adminCommand({getFreeMonitoringStatus: 1});
- assert.commandWorked(retStatus1);
- assert.eq(retStatus1.state, "enabled", tojson(retStatus1));
+// The command should either timeout or suceed after registration is complete
+const retStatus1 = conn.adminCommand({getFreeMonitoringStatus: 1});
+assert.commandWorked(retStatus1);
+assert.eq(retStatus1.state, "enabled", tojson(retStatus1));
- const stats = mock_web.queryStats();
- print(tojson(stats));
+const stats = mock_web.queryStats();
+print(tojson(stats));
- assert.eq(stats.registers, 1);
+assert.eq(stats.registers, 1);
- const last_register = mock_web.query("last_register");
- print(tojson(last_register));
+const last_register = mock_web.query("last_register");
+print(tojson(last_register));
- assert.eq(last_register.version, 2);
- assert.eq(last_register.payload.buildInfo.bits, 64);
- assert.eq(last_register.payload.buildInfo.ok, 1);
- assert.eq(last_register.payload.storageEngine.readOnly, false);
- assert.eq(last_register.payload.isMaster.ok, 1);
+assert.eq(last_register.version, 2);
+assert.eq(last_register.payload.buildInfo.bits, 64);
+assert.eq(last_register.payload.buildInfo.ok, 1);
+assert.eq(last_register.payload.storageEngine.readOnly, false);
+assert.eq(last_register.payload.isMaster.ok, 1);
- mock_web.waitMetrics(2);
+mock_web.waitMetrics(2);
- const last_metrics = mock_web.query("last_metrics");
- print(tojson(last_metrics));
+const last_metrics = mock_web.query("last_metrics");
+print(tojson(last_metrics));
- assert.eq(last_metrics.version, 2);
+assert.eq(last_metrics.version, 2);
- assert.commandWorked(conn.adminCommand({setFreeMonitoring: 1, action: "disable"}));
+assert.commandWorked(conn.adminCommand({setFreeMonitoring: 1, action: "disable"}));
- // Wait for unregistration to occur
- assert.soon(function() {
- const regDoc = FreeMonGetRegistration(conn);
- return regDoc.state == "disabled";
- }, "Failed to unregister", 60 * 1000);
+// Wait for unregistration to occur
+assert.soon(function() {
+ const regDoc = FreeMonGetRegistration(conn);
+ return regDoc.state == "disabled";
+}, "Failed to unregister", 60 * 1000);
- const retStatus2 = conn.adminCommand({getFreeMonitoringStatus: 1});
- assert.commandWorked(retStatus2);
- assert.eq(retStatus2.state, "disabled", tojson(retStatus1));
+const retStatus2 = conn.adminCommand({getFreeMonitoringStatus: 1});
+assert.commandWorked(retStatus2);
+assert.eq(retStatus2.state, "disabled", tojson(retStatus1));
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
- mock_web.stop();
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_register_off.js b/jstests/free_mon/free_mon_register_off.js
index 9f4abd193bc..591f95f0a09 100644
--- a/jstests/free_mon/free_mon_register_off.js
+++ b/jstests/free_mon/free_mon_register_off.js
@@ -3,38 +3,38 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- let mock_web = new FreeMonWebServer();
+let mock_web = new FreeMonWebServer();
- mock_web.start();
+mock_web.start();
- let options = {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- enableFreeMonitoring: "off",
- verbose: 1,
- };
+let options = {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ enableFreeMonitoring: "off",
+ verbose: 1,
+};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up');
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up');
- assert.commandFailed(conn.adminCommand({setFreeMonitoring: 1, action: "enable"}));
+assert.commandFailed(conn.adminCommand({setFreeMonitoring: 1, action: "enable"}));
- // If it some time in case it actually started to process something.
- sleep(10 * 1000);
+// If it some time in case it actually started to process something.
+sleep(10 * 1000);
- const retStatus1 = conn.adminCommand({getFreeMonitoringStatus: 1});
- assert.commandWorked(retStatus1);
- assert.eq(retStatus1.state, "disabled", tojson(retStatus1));
+const retStatus1 = conn.adminCommand({getFreeMonitoringStatus: 1});
+assert.commandWorked(retStatus1);
+assert.eq(retStatus1.state, "disabled", tojson(retStatus1));
- const stats = mock_web.queryStats();
- print(tojson(stats));
+const stats = mock_web.queryStats();
+print(tojson(stats));
- assert.eq(stats.registers, 0);
+assert.eq(stats.registers, 0);
- assert.commandFailed(conn.adminCommand({setFreeMonitoring: 1, action: "disable"}));
+assert.commandFailed(conn.adminCommand({setFreeMonitoring: 1, action: "disable"}));
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
- mock_web.stop();
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_register_resend.js b/jstests/free_mon/free_mon_register_resend.js
index de98e038ac6..ab9af561a4d 100644
--- a/jstests/free_mon/free_mon_register_resend.js
+++ b/jstests/free_mon/free_mon_register_resend.js
@@ -3,26 +3,26 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- let mock_web = new FreeMonWebServer(FAULT_RESEND_REGISTRATION_ONCE);
+let mock_web = new FreeMonWebServer(FAULT_RESEND_REGISTRATION_ONCE);
- mock_web.start();
+mock_web.start();
- let options = {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- enableFreeMonitoring: "on",
- verbose: 1,
- };
+let options = {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ enableFreeMonitoring: "on",
+ verbose: 1,
+};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up');
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up');
- WaitForRegistration(conn);
+WaitForRegistration(conn);
- mock_web.waitRegisters(2);
+mock_web.waitRegisters(2);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
- mock_web.stop();
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_rs_corrupt.js b/jstests/free_mon/free_mon_rs_corrupt.js
index 7f28bf94f5a..c8e3099447c 100644
--- a/jstests/free_mon/free_mon_rs_corrupt.js
+++ b/jstests/free_mon/free_mon_rs_corrupt.js
@@ -3,35 +3,35 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- let mock_web = new FreeMonWebServer();
+let mock_web = new FreeMonWebServer();
- mock_web.start();
+mock_web.start();
- let options = {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- enableFreeMonitoring: "on",
- verbose: 1,
- };
+let options = {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ enableFreeMonitoring: "on",
+ verbose: 1,
+};
- const rst = new ReplSetTest({nodes: 2, nodeOptions: options});
- rst.startSet();
- rst.initiate();
- rst.awaitReplication();
+const rst = new ReplSetTest({nodes: 2, nodeOptions: options});
+rst.startSet();
+rst.initiate();
+rst.awaitReplication();
- WaitForRegistration(rst.getPrimary());
+WaitForRegistration(rst.getPrimary());
- mock_web.waitRegisters(2);
+mock_web.waitRegisters(2);
- // For kicks, corrupt the free monitoring storage state to knock free mon offline
- // and make sure the node does not crash
- rst.getPrimary().getDB("admin").system.version.update({_id: "free_monitoring"},
- {$set: {version: 2}});
+// For kicks, corrupt the free monitoring storage state to knock free mon offline
+// and make sure the node does not crash
+rst.getPrimary().getDB("admin").system.version.update({_id: "free_monitoring"},
+ {$set: {version: 2}});
- sleep(20 * 1000);
+sleep(20 * 1000);
- rst.stopSet();
+rst.stopSet();
- mock_web.stop();
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_rs_delete.js b/jstests/free_mon/free_mon_rs_delete.js
index 0995d05c3f8..139cb5a2414 100644
--- a/jstests/free_mon/free_mon_rs_delete.js
+++ b/jstests/free_mon/free_mon_rs_delete.js
@@ -3,59 +3,59 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- let mock_web = new FreeMonWebServer();
+let mock_web = new FreeMonWebServer();
- mock_web.start();
+mock_web.start();
- let options = {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- enableFreeMonitoring: "on",
- verbose: 1,
- };
+let options = {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ enableFreeMonitoring: "on",
+ verbose: 1,
+};
- const rst = new ReplSetTest({nodes: 2, nodeOptions: options});
- rst.startSet();
- rst.initiate();
- rst.awaitReplication();
+const rst = new ReplSetTest({nodes: 2, nodeOptions: options});
+rst.startSet();
+rst.initiate();
+rst.awaitReplication();
- WaitForRegistration(rst.getPrimary());
+WaitForRegistration(rst.getPrimary());
- mock_web.waitRegisters(2);
+mock_web.waitRegisters(2);
- WaitForRegistration(rst.getPrimary());
- WaitForRegistration(rst.getSecondary());
+WaitForRegistration(rst.getPrimary());
+WaitForRegistration(rst.getSecondary());
- const qs1 = mock_web.queryStats();
+const qs1 = mock_web.queryStats();
- // For kicks, delete the free monitoring storage state to knock free mon offline
- // and make sure the node does not crash
- rst.getPrimary().getDB("admin").system.version.remove({_id: "free_monitoring"});
+// For kicks, delete the free monitoring storage state to knock free mon offline
+// and make sure the node does not crash
+rst.getPrimary().getDB("admin").system.version.remove({_id: "free_monitoring"});
- sleep(20 * 1000);
+sleep(20 * 1000);
- const qs2 = mock_web.queryStats();
+const qs2 = mock_web.queryStats();
- // Verify free monitoring stops but tolerate one additional collection
- assert.gte(qs1.metrics + 2, qs2.metrics);
- assert.eq(qs1.registers, qs2.registers);
+// Verify free monitoring stops but tolerate one additional collection
+assert.gte(qs1.metrics + 2, qs2.metrics);
+assert.eq(qs1.registers, qs2.registers);
- // Make sure we are back to the initial state.
- assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'undecided');
- assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'undecided');
+// Make sure we are back to the initial state.
+assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'undecided');
+assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'undecided');
- // Enable it again to be sure we can resume
- assert.commandWorked(rst.getPrimary().adminCommand({setFreeMonitoring: 1, action: "enable"}));
- WaitForRegistration(rst.getPrimary());
- WaitForRegistration(rst.getSecondary());
+// Enable it again to be sure we can resume
+assert.commandWorked(rst.getPrimary().adminCommand({setFreeMonitoring: 1, action: "enable"}));
+WaitForRegistration(rst.getPrimary());
+WaitForRegistration(rst.getSecondary());
- sleep(20 * 1000);
+sleep(20 * 1000);
- WaitForRegistration(rst.getPrimary());
- WaitForRegistration(rst.getSecondary());
+WaitForRegistration(rst.getPrimary());
+WaitForRegistration(rst.getSecondary());
- rst.stopSet();
+rst.stopSet();
- mock_web.stop();
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_rs_halt.js b/jstests/free_mon/free_mon_rs_halt.js
index 5a5b94accc2..3962391826b 100644
--- a/jstests/free_mon/free_mon_rs_halt.js
+++ b/jstests/free_mon/free_mon_rs_halt.js
@@ -2,61 +2,61 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- let mock_web = new FreeMonWebServer(FAULT_HALT_METRICS_5, true);
+let mock_web = new FreeMonWebServer(FAULT_HALT_METRICS_5, true);
- mock_web.start();
+mock_web.start();
- let options = {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- enableFreeMonitoring: "on",
- verbose: 1,
- };
+let options = {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ enableFreeMonitoring: "on",
+ verbose: 1,
+};
- const rst = new ReplSetTest({nodes: 2, nodeOptions: options});
- rst.startSet();
- rst.initiate();
- rst.awaitReplication();
+const rst = new ReplSetTest({nodes: 2, nodeOptions: options});
+rst.startSet();
+rst.initiate();
+rst.awaitReplication();
- WaitForRegistration(rst.getPrimary());
+WaitForRegistration(rst.getPrimary());
- mock_web.waitRegisters(2);
+mock_web.waitRegisters(2);
- assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'enabled');
- assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'enabled');
+assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'enabled');
+assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'enabled');
- mock_web.enableFaults();
- mock_web.waitFaults(1);
+mock_web.enableFaults();
+mock_web.waitFaults(1);
- const qs1 = mock_web.queryStats();
+const qs1 = mock_web.queryStats();
- sleep(20 * 1000);
+sleep(20 * 1000);
- const qs2 = mock_web.queryStats();
+const qs2 = mock_web.queryStats();
- // Verify free monitoring stops but tolerate one additional collection
- assert.gte(qs1.metrics + 1, qs2.metrics);
- assert.eq(qs1.registers, qs2.registers);
+// Verify free monitoring stops but tolerate one additional collection
+assert.gte(qs1.metrics + 1, qs2.metrics);
+assert.eq(qs1.registers, qs2.registers);
- // Halt causes us to disable free monitoring, not return it to initial state.
- assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'disabled');
- assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'disabled');
+// Halt causes us to disable free monitoring, not return it to initial state.
+assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'disabled');
+assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'disabled');
- // Disable the fault so we can re-enable again
- mock_web.disableFaults();
+// Disable the fault so we can re-enable again
+mock_web.disableFaults();
- // Enable it again to be sure we can resume
- assert.commandWorked(rst.getPrimary().adminCommand({setFreeMonitoring: 1, action: "enable"}));
- WaitForRegistration(rst.getPrimary());
- WaitForRegistration(rst.getSecondary());
+// Enable it again to be sure we can resume
+assert.commandWorked(rst.getPrimary().adminCommand({setFreeMonitoring: 1, action: "enable"}));
+WaitForRegistration(rst.getPrimary());
+WaitForRegistration(rst.getSecondary());
- sleep(20 * 1000);
+sleep(20 * 1000);
- assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'enabled');
- assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'enabled');
+assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'enabled');
+assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'enabled');
- rst.stopSet();
+rst.stopSet();
- mock_web.stop();
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_rs_off.js b/jstests/free_mon/free_mon_rs_off.js
index a15519bd2ac..2b4d34fc87e 100644
--- a/jstests/free_mon/free_mon_rs_off.js
+++ b/jstests/free_mon/free_mon_rs_off.js
@@ -3,36 +3,36 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- let mock_web = new FreeMonWebServer();
+let mock_web = new FreeMonWebServer();
- mock_web.start();
+mock_web.start();
- let options = {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- enableFreeMonitoring: "off",
- verbose: 1,
- };
+let options = {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ enableFreeMonitoring: "off",
+ verbose: 1,
+};
- const rst = new ReplSetTest({nodes: 2, nodeOptions: options});
+const rst = new ReplSetTest({nodes: 2, nodeOptions: options});
- rst.startSet();
- rst.initiate();
- rst.awaitReplication();
+rst.startSet();
+rst.initiate();
+rst.awaitReplication();
- const retStatus1 = rst.getPrimary().adminCommand({getFreeMonitoringStatus: 1});
- assert.commandWorked(retStatus1);
- assert.eq(retStatus1.state, "disabled", tojson(retStatus1));
+const retStatus1 = rst.getPrimary().adminCommand({getFreeMonitoringStatus: 1});
+assert.commandWorked(retStatus1);
+assert.eq(retStatus1.state, "disabled", tojson(retStatus1));
- const stats = mock_web.queryStats();
- print(tojson(stats));
+const stats = mock_web.queryStats();
+print(tojson(stats));
- assert.eq(stats.registers, 0);
+assert.eq(stats.registers, 0);
- assert.commandFailed(rst.getPrimary().adminCommand({setFreeMonitoring: 1, action: "disable"}));
+assert.commandFailed(rst.getPrimary().adminCommand({setFreeMonitoring: 1, action: "disable"}));
- rst.stopSet();
+rst.stopSet();
- mock_web.stop();
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_rs_perm_del.js b/jstests/free_mon/free_mon_rs_perm_del.js
index b8cb3e73a59..baea0675bb7 100644
--- a/jstests/free_mon/free_mon_rs_perm_del.js
+++ b/jstests/free_mon/free_mon_rs_perm_del.js
@@ -3,54 +3,54 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- let mock_web = new FreeMonWebServer(FAULT_PERMANENTLY_DELETE_AFTER_3, true);
+let mock_web = new FreeMonWebServer(FAULT_PERMANENTLY_DELETE_AFTER_3, true);
- mock_web.start();
+mock_web.start();
- let options = {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- enableFreeMonitoring: "on",
- verbose: 1,
- };
+let options = {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ enableFreeMonitoring: "on",
+ verbose: 1,
+};
- const rst = new ReplSetTest({nodes: 2, nodeOptions: options});
- rst.startSet();
- rst.initiate();
- rst.awaitReplication();
+const rst = new ReplSetTest({nodes: 2, nodeOptions: options});
+rst.startSet();
+rst.initiate();
+rst.awaitReplication();
- WaitForRegistration(rst.getPrimary());
+WaitForRegistration(rst.getPrimary());
- mock_web.waitRegisters(2);
+mock_web.waitRegisters(2);
- WaitForRegistration(rst.getPrimary());
- WaitForRegistration(rst.getSecondary());
+WaitForRegistration(rst.getPrimary());
+WaitForRegistration(rst.getSecondary());
- mock_web.enableFaults();
- mock_web.waitFaults(1);
+mock_web.enableFaults();
+mock_web.waitFaults(1);
- sleep(20 * 1000);
+sleep(20 * 1000);
- // Make sure we are back to the initial state.
- assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'undecided');
+// Make sure we are back to the initial state.
+assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'undecided');
- assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'undecided');
+assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'undecided');
- // Disable the fault so we can re-enable again
- mock_web.disableFaults();
+// Disable the fault so we can re-enable again
+mock_web.disableFaults();
- // Enable it again to be sure we can resume
- assert.commandWorked(rst.getPrimary().adminCommand({setFreeMonitoring: 1, action: "enable"}));
- WaitForRegistration(rst.getPrimary());
- WaitForRegistration(rst.getSecondary());
+// Enable it again to be sure we can resume
+assert.commandWorked(rst.getPrimary().adminCommand({setFreeMonitoring: 1, action: "enable"}));
+WaitForRegistration(rst.getPrimary());
+WaitForRegistration(rst.getSecondary());
- sleep(20 * 1000);
+sleep(20 * 1000);
- assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'enabled');
- assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'enabled');
+assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'enabled');
+assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'enabled');
- rst.stopSet();
+rst.stopSet();
- mock_web.stop();
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_rs_register.js b/jstests/free_mon/free_mon_rs_register.js
index 60b6dee5a72..5094835490d 100644
--- a/jstests/free_mon/free_mon_rs_register.js
+++ b/jstests/free_mon/free_mon_rs_register.js
@@ -3,92 +3,92 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- let mock_web = new FreeMonWebServer();
+let mock_web = new FreeMonWebServer();
- mock_web.start();
+mock_web.start();
- let options = {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- verbose: 1,
- };
+let options = {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ verbose: 1,
+};
- const rst = new ReplSetTest({nodes: 2, nodeOptions: options});
+const rst = new ReplSetTest({nodes: 2, nodeOptions: options});
- rst.startSet();
- rst.initiate();
- rst.awaitReplication();
+rst.startSet();
+rst.initiate();
+rst.awaitReplication();
- sleep(10 * 1000);
- assert.eq(0, mock_web.queryStats().registers, "mongod registered without enabling free_mod");
+sleep(10 * 1000);
+assert.eq(0, mock_web.queryStats().registers, "mongod registered without enabling free_mod");
- assert.commandWorked(rst.getPrimary().adminCommand({setFreeMonitoring: 1, action: "enable"}));
- WaitForRegistration(rst.getPrimary());
+assert.commandWorked(rst.getPrimary().adminCommand({setFreeMonitoring: 1, action: "enable"}));
+WaitForRegistration(rst.getPrimary());
- mock_web.waitRegisters(2);
+mock_web.waitRegisters(2);
- WaitForRegistration(rst.getPrimary());
- WaitForRegistration(rst.getSecondary());
+WaitForRegistration(rst.getPrimary());
+WaitForRegistration(rst.getSecondary());
- const last_register = mock_web.query("last_register");
- print(tojson(last_register));
+const last_register = mock_web.query("last_register");
+print(tojson(last_register));
- assert.eq(last_register.version, 2);
- assert.eq(last_register.payload.buildInfo.bits, 64);
- assert.eq(last_register.payload.buildInfo.ok, 1);
- assert.eq(last_register.payload.storageEngine.readOnly, false);
- assert.eq(last_register.payload.isMaster.ok, 1);
- assert.eq(last_register.payload.replSetGetConfig.config.version, 2);
+assert.eq(last_register.version, 2);
+assert.eq(last_register.payload.buildInfo.bits, 64);
+assert.eq(last_register.payload.buildInfo.ok, 1);
+assert.eq(last_register.payload.storageEngine.readOnly, false);
+assert.eq(last_register.payload.isMaster.ok, 1);
+assert.eq(last_register.payload.replSetGetConfig.config.version, 2);
- function isUUID(val) {
- // Mock webserver gives us back unpacked BinData/UUID in the form:
- // { '$uuid': '0123456789abcdef0123456789abcdef' }.
- if ((typeof val) !== 'object') {
- return false;
- }
- const uuid = val['$uuid'];
- if ((typeof uuid) !== 'string') {
- return false;
- }
- return uuid.match(/^[0-9a-fA-F]{32}$/) !== null;
+function isUUID(val) {
+ // Mock webserver gives us back unpacked BinData/UUID in the form:
+ // { '$uuid': '0123456789abcdef0123456789abcdef' }.
+ if ((typeof val) !== 'object') {
+ return false;
}
- assert.eq(isUUID(last_register.payload.uuid['local.oplog.rs']), true);
+ const uuid = val['$uuid'];
+ if ((typeof uuid) !== 'string') {
+ return false;
+ }
+ return uuid.match(/^[0-9a-fA-F]{32}$/) !== null;
+}
+assert.eq(isUUID(last_register.payload.uuid['local.oplog.rs']), true);
- // Restart the secondary
- var s1 = rst._slaves[0];
- var s1Id = rst.getNodeId(s1);
+// Restart the secondary
+var s1 = rst._slaves[0];
+var s1Id = rst.getNodeId(s1);
- rst.stop(s1Id);
- rst.waitForState(s1, ReplSetTest.State.DOWN);
+rst.stop(s1Id);
+rst.waitForState(s1, ReplSetTest.State.DOWN);
- rst.restart(s1Id);
+rst.restart(s1Id);
- mock_web.waitRegisters(3);
+mock_web.waitRegisters(3);
- // Now disable it
- assert.commandWorked(rst.getPrimary().adminCommand({setFreeMonitoring: 1, action: "disable"}));
+// Now disable it
+assert.commandWorked(rst.getPrimary().adminCommand({setFreeMonitoring: 1, action: "disable"}));
- WaitForUnRegistration(rst.getPrimary());
- WaitForUnRegistration(rst.getSecondary());
+WaitForUnRegistration(rst.getPrimary());
+WaitForUnRegistration(rst.getSecondary());
- assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'disabled');
- assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'disabled');
+assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'disabled');
+assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'disabled');
- // Restart the secondary with it disabled
- var s1 = rst._slaves[0];
- var s1Id = rst.getNodeId(s1);
+// Restart the secondary with it disabled
+var s1 = rst._slaves[0];
+var s1Id = rst.getNodeId(s1);
- rst.stop(s1Id);
- rst.waitForState(s1, ReplSetTest.State.DOWN);
+rst.stop(s1Id);
+rst.waitForState(s1, ReplSetTest.State.DOWN);
- rst.restart(s1Id);
+rst.restart(s1Id);
- // Make sure it is disabled
- assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'disabled');
- assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'disabled');
+// Make sure it is disabled
+assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'disabled');
+assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'disabled');
- rst.stopSet();
+rst.stopSet();
- mock_web.stop();
+mock_web.stop();
})();
diff --git a/jstests/free_mon/free_mon_rs_resend.js b/jstests/free_mon/free_mon_rs_resend.js
index 464caf396e5..2241524f37f 100644
--- a/jstests/free_mon/free_mon_rs_resend.js
+++ b/jstests/free_mon/free_mon_rs_resend.js
@@ -3,64 +3,63 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
+'use strict';
- let mock_web = new FreeMonWebServer(FAULT_RESEND_REGISTRATION_AT_3);
- let mock_web_sec = new FreeMonWebServer(FAULT_RESEND_REGISTRATION_ONCE, true);
+let mock_web = new FreeMonWebServer(FAULT_RESEND_REGISTRATION_AT_3);
+let mock_web_sec = new FreeMonWebServer(FAULT_RESEND_REGISTRATION_ONCE, true);
- mock_web.start();
- mock_web_sec.start();
+mock_web.start();
+mock_web_sec.start();
- const rst = new ReplSetTest({
- name: "free_mon_rs_register",
- nodes: [
- {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- verbose: 1,
- },
- {
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web_sec.getURL(),
- verbose: 1,
- }
- ]
- });
+const rst = new ReplSetTest({
+ name: "free_mon_rs_register",
+ nodes: [
+ {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+ verbose: 1,
+ },
+ {
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web_sec.getURL(),
+ verbose: 1,
+ }
+ ]
+});
- rst.startSet();
- rst.initiate();
- rst.awaitReplication();
+rst.startSet();
+rst.initiate();
+rst.awaitReplication();
- sleep(10 * 1000);
- assert.eq(0, mock_web.queryStats().registers, "mongod registered without enabling free_mod");
- assert.eq(
- 0, mock_web_sec.queryStats().registers, "mongod registered without enabling free_mod");
+sleep(10 * 1000);
+assert.eq(0, mock_web.queryStats().registers, "mongod registered without enabling free_mod");
+assert.eq(0, mock_web_sec.queryStats().registers, "mongod registered without enabling free_mod");
- assert.commandWorked(rst.getPrimary().adminCommand({setFreeMonitoring: 1, action: "enable"}));
- WaitForRegistration(rst.getPrimary());
+assert.commandWorked(rst.getPrimary().adminCommand({setFreeMonitoring: 1, action: "enable"}));
+WaitForRegistration(rst.getPrimary());
- mock_web.waitRegisters(1);
- mock_web_sec.waitRegisters(1);
+mock_web.waitRegisters(1);
+mock_web_sec.waitRegisters(1);
- assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'enabled');
- assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'enabled');
+assert.eq(FreeMonGetServerStatus(rst.getPrimary()).state, 'enabled');
+assert.eq(FreeMonGetServerStatus(rst.getSecondary()).state, 'enabled');
- mock_web.waitRegisters(2);
- mock_web_sec.waitRegisters(2);
- mock_web_sec.disableFaults();
+mock_web.waitRegisters(2);
+mock_web_sec.waitRegisters(2);
+mock_web_sec.disableFaults();
- // Trigger resend on the secondary only
- mock_web_sec.enableFaults();
- mock_web_sec.waitFaults(1);
- mock_web_sec.waitRegisters(3);
+// Trigger resend on the secondary only
+mock_web_sec.enableFaults();
+mock_web_sec.waitFaults(1);
+mock_web_sec.waitRegisters(3);
- // Double check registers were as expected
- const stats = mock_web.queryStats();
- assert.eq(stats.registers, 2);
+// Double check registers were as expected
+const stats = mock_web.queryStats();
+assert.eq(stats.registers, 2);
- const stats_sec = mock_web_sec.queryStats();
- assert.gte(stats_sec.registers, 3);
+const stats_sec = mock_web_sec.queryStats();
+assert.gte(stats_sec.registers, 3);
- rst.stopSet();
+rst.stopSet();
- mock_web.stop();
- mock_web_sec.stop();
+mock_web.stop();
+mock_web_sec.stop();
})();
diff --git a/jstests/free_mon/free_mon_server_status.js b/jstests/free_mon/free_mon_server_status.js
index 0dce068a494..8ec2c426da5 100644
--- a/jstests/free_mon/free_mon_server_status.js
+++ b/jstests/free_mon/free_mon_server_status.js
@@ -3,46 +3,46 @@
load("jstests/free_mon/libs/free_mon.js");
(function() {
- 'use strict';
-
- const mock_web = new FreeMonWebServer();
- mock_web.start();
-
- const mongod = MongoRunner.runMongod({
- setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
- });
- assert.neq(mongod, null, 'mongod not running');
- const admin = mongod.getDB('admin');
-
- const kRetryIntervalSecs = 1;
- function freeMonStats() {
- return assert.commandWorked(admin.runCommand({serverStatus: 1})).freeMonitoring;
- }
-
- // Initial state.
- assert.eq(freeMonStats().state, 'undecided');
-
- admin.enableFreeMonitoring();
- WaitForRegistration(mongod);
-
- // Enabled.
- const enabled = freeMonStats();
- assert.eq(enabled.state, 'enabled');
- assert.eq(enabled.retryIntervalSecs, kRetryIntervalSecs);
- assert.eq(enabled.registerErrors, 0);
- assert.eq(enabled.metricsErrors, 0);
-
- // Explicitly disabled.
- admin.disableFreeMonitoring();
- sleep(2); // Give the async command time to run.
-
- const disabled = freeMonStats();
- assert.eq(disabled.state, 'disabled');
- assert.eq(disabled.retryIntervalSecs, kRetryIntervalSecs);
- assert.eq(disabled.registerErrors, 0);
- assert.eq(disabled.metricsErrors, 0);
-
- // Cleanup.
- MongoRunner.stopMongod(mongod);
- mock_web.stop();
+'use strict';
+
+const mock_web = new FreeMonWebServer();
+mock_web.start();
+
+const mongod = MongoRunner.runMongod({
+ setParameter: "cloudFreeMonitoringEndpointURL=" + mock_web.getURL(),
+});
+assert.neq(mongod, null, 'mongod not running');
+const admin = mongod.getDB('admin');
+
+const kRetryIntervalSecs = 1;
+function freeMonStats() {
+ return assert.commandWorked(admin.runCommand({serverStatus: 1})).freeMonitoring;
+}
+
+// Initial state.
+assert.eq(freeMonStats().state, 'undecided');
+
+admin.enableFreeMonitoring();
+WaitForRegistration(mongod);
+
+// Enabled.
+const enabled = freeMonStats();
+assert.eq(enabled.state, 'enabled');
+assert.eq(enabled.retryIntervalSecs, kRetryIntervalSecs);
+assert.eq(enabled.registerErrors, 0);
+assert.eq(enabled.metricsErrors, 0);
+
+// Explicitly disabled.
+admin.disableFreeMonitoring();
+sleep(2); // Give the async command time to run.
+
+const disabled = freeMonStats();
+assert.eq(disabled.state, 'disabled');
+assert.eq(disabled.retryIntervalSecs, kRetryIntervalSecs);
+assert.eq(disabled.registerErrors, 0);
+assert.eq(disabled.metricsErrors, 0);
+
+// Cleanup.
+MongoRunner.stopMongod(mongod);
+mock_web.stop();
})();
diff --git a/jstests/free_mon/libs/free_mon.js b/jstests/free_mon/libs/free_mon.js
index 52de6a22921..5f45c4b3315 100644
--- a/jstests/free_mon/libs/free_mon.js
+++ b/jstests/free_mon/libs/free_mon.js
@@ -17,11 +17,11 @@ const ENABLE_FAULTS = "enable_faults";
class FreeMonWebServer {
/**
- * Create a new webserver.
- *
- * @param {string} fault_type
- * @param {bool} disableFaultsOnStartup optionally disable fault on startup
- */
+ * Create a new webserver.
+ *
+ * @param {string} fault_type
+ * @param {bool} disableFaultsOnStartup optionally disable fault on startup
+ */
constructor(fault_type, disableFaultsOnStartup) {
this.python = "python3";
this.disableFaultsOnStartup = disableFaultsOnStartup || false;
diff --git a/jstests/gle/create_index_gle.js b/jstests/gle/create_index_gle.js
index c3f69f18a03..b8b63182924 100644
--- a/jstests/gle/create_index_gle.js
+++ b/jstests/gle/create_index_gle.js
@@ -1,53 +1,52 @@
load('jstests/replsets/rslib.js');
(function() {
- "use strict";
-
- var st = new ShardingTest({
- name: "zzz",
- shards: {
- rs0: {
- nodes: {n0: {}, n1: {rsConfig: {priority: 0}}},
- oplogSize: 10,
- }
- },
- verbose: 3,
- other: {rsOptions: {verbose: 1}}
- });
- var replTest = st.rs0;
-
- var config = replTest.getReplSetConfig();
- // Add a delay long enough so getLastError would actually 'wait' for write concern.
- config.members[1].slaveDelay = 3;
- config.version = replTest.getReplSetConfigFromNode().version + 1;
-
- reconfig(replTest, config, true);
-
- assert.soon(function() {
- var secConn = replTest.getSecondary();
- var config = secConn.getDB('local').system.replset.findOne();
- return config.members[1].slaveDelay == 3;
- });
-
- replTest.awaitSecondaryNodes();
-
- var testDB = st.s.getDB('test');
- testDB.adminCommand({connPoolSync: 1});
-
+"use strict";
+
+var st = new ShardingTest({
+ name: "zzz",
+ shards: {
+ rs0: {
+ nodes: {n0: {}, n1: {rsConfig: {priority: 0}}},
+ oplogSize: 10,
+ }
+ },
+ verbose: 3,
+ other: {rsOptions: {verbose: 1}}
+});
+var replTest = st.rs0;
+
+var config = replTest.getReplSetConfig();
+// Add a delay long enough so getLastError would actually 'wait' for write concern.
+config.members[1].slaveDelay = 3;
+config.version = replTest.getReplSetConfigFromNode().version + 1;
+
+reconfig(replTest, config, true);
+
+assert.soon(function() {
var secConn = replTest.getSecondary();
- var testDB2 = secConn.getDB('test');
+ var config = secConn.getDB('local').system.replset.findOne();
+ return config.members[1].slaveDelay == 3;
+});
+
+replTest.awaitSecondaryNodes();
+
+var testDB = st.s.getDB('test');
+testDB.adminCommand({connPoolSync: 1});
- testDB.user.insert({x: 1});
+var secConn = replTest.getSecondary();
+var testDB2 = secConn.getDB('test');
- testDB.user.ensureIndex({x: 1});
- assert.gleOK(testDB.runCommand({getLastError: 1, w: 2}));
+testDB.user.insert({x: 1});
- replTest.waitForAllIndexBuildsToFinish('test', 'user');
+testDB.user.ensureIndex({x: 1});
+assert.gleOK(testDB.runCommand({getLastError: 1, w: 2}));
- var priIdx = testDB.user.getIndexes();
- var secIdx = testDB2.user.getIndexes();
+replTest.waitForAllIndexBuildsToFinish('test', 'user');
- assert.eq(priIdx.length, secIdx.length, 'pri: ' + tojson(priIdx) + ', sec: ' + tojson(secIdx));
+var priIdx = testDB.user.getIndexes();
+var secIdx = testDB2.user.getIndexes();
- st.stop();
+assert.eq(priIdx.length, secIdx.length, 'pri: ' + tojson(priIdx) + ', sec: ' + tojson(secIdx));
+st.stop();
}());
diff --git a/jstests/gle/gle_sharded_wc.js b/jstests/gle/gle_sharded_wc.js
index d7b01e08994..6e3cc919212 100644
--- a/jstests/gle/gle_sharded_wc.js
+++ b/jstests/gle/gle_sharded_wc.js
@@ -11,138 +11,138 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
TestData.skipCheckDBHashes = true;
(function() {
- "use strict";
-
- // Skip this test if running with the "wiredTiger" storage engine, since it requires
- // using 'nojournal' in a replica set, which is not supported when using WT.
- if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
- // WT is currently the default engine so it is used when 'storageEngine' is not set.
- jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
- return;
- }
-
- // Options for a cluster with two replica set shards, the first with two nodes the second with
- // one
- // This lets us try a number of GLE scenarios
- var options = {
- rs: true,
- rsOptions: {nojournal: ""},
- // Options for each replica set shard
- rs0: {nodes: 3},
- rs1: {nodes: 3}
- };
-
- var st = new ShardingTest({shards: 2, other: options});
-
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
- var coll = mongos.getCollection(jsTestName() + ".coll");
- var shards = config.shards.find().toArray();
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
- printjson(admin.runCommand({movePrimary: coll.getDB().toString(), to: shards[0]._id}));
- assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: coll.toString(), middle: {_id: 0}}));
- assert.commandWorked(
- admin.runCommand({moveChunk: coll.toString(), find: {_id: 0}, to: shards[1]._id}));
-
- st.printShardingStatus();
-
- var gle = null;
-
- //
- // No journal insert, GLE fails
- coll.remove({});
- coll.insert({_id: 1});
- printjson(gle = coll.getDB().runCommand({getLastError: 1, j: true}));
- assert(!gle.ok);
- assert(gle.errmsg);
-
- //
- // Successful insert, write concern mode invalid
- coll.remove({});
- coll.insert({_id: -1});
- printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 'invalid'}));
- assert(!gle.ok);
- assert(!gle.err);
- assert(gle.errmsg);
- assert.eq(gle.code, 79); // UnknownReplWriteConcern - needed for backwards compatibility
- assert.eq(coll.count(), 1);
-
- //
- // Error on insert (dup key), write concern error not reported
- coll.remove({});
- coll.insert({_id: -1});
- coll.insert({_id: -1});
- printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 'invalid'}));
- assert(gle.ok);
- assert(gle.err);
- assert(gle.code);
- assert(!gle.errmsg);
- assert.eq(coll.count(), 1);
-
- //
- // Successful remove on one shard, write concern timeout on the other
- var s0Id = st.rs0.getNodeId(st.rs0._slaves[0]);
- st.rs0.stop(s0Id);
- coll.remove({});
- st.rs1.awaitReplication(); // To ensure the first shard won't timeout
- printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 3, wtimeout: 5 * 1000}));
- assert(gle.ok);
- assert.eq(gle.err, 'timeout');
- assert(gle.wtimeout);
- assert(gle.shards);
- assert.eq(coll.count(), 0);
-
- //
- // Successful remove on two hosts, write concern timeout on both
- // We don't aggregate two timeouts together
- var s1Id = st.rs1.getNodeId(st.rs1._slaves[0]);
- st.rs1.stop(s1Id);
- // new writes to both shards to ensure that remove will do something on both of them
- coll.insert({_id: -1});
- coll.insert({_id: 1});
-
- coll.remove({});
- printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 3, wtimeout: 5 * 1000}));
-
- assert(!gle.ok);
- assert(gle.errmsg);
- assert.eq(gle.code, 64); // WriteConcernFailed - needed for backwards compatibility
- assert(!gle.wtimeout);
- assert(gle.shards);
- assert(gle.errs);
- assert.eq(coll.count(), 0);
-
- //
- // First replica set with no primary
- //
-
- //
- // Successful bulk insert on two hosts, host changes before gle (error contacting host)
- coll.remove({});
- coll.insert([{_id: 1}, {_id: -1}]);
- // Wait for write to be written to shards before shutting it down.
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- st.rs0.stop(st.rs0.getPrimary(), true); // wait for stop
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- // Should get an error about contacting dead host.
- assert(!gle.ok);
- assert(gle.errmsg);
- assert.eq(coll.count({_id: 1}), 1);
-
- //
- // Failed insert on two hosts, first replica set with no primary
- // NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get
- // successful writes from.
- coll.remove({_id: 1});
- coll.insert([{_id: 1}, {_id: -1}]);
-
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- assert(gle.ok);
- assert(gle.err);
- assert.eq(coll.count({_id: 1}), 1);
-
- st.stop();
+"use strict";
+
+// Skip this test if running with the "wiredTiger" storage engine, since it requires
+// using 'nojournal' in a replica set, which is not supported when using WT.
+if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
+ // WT is currently the default engine so it is used when 'storageEngine' is not set.
+ jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
+ return;
+}
+
+// Options for a cluster with two replica set shards, the first with two nodes the second with
+// one
+// This lets us try a number of GLE scenarios
+var options = {
+ rs: true,
+ rsOptions: {nojournal: ""},
+ // Options for each replica set shard
+ rs0: {nodes: 3},
+ rs1: {nodes: 3}
+};
+
+var st = new ShardingTest({shards: 2, other: options});
+
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var coll = mongos.getCollection(jsTestName() + ".coll");
+var shards = config.shards.find().toArray();
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
+printjson(admin.runCommand({movePrimary: coll.getDB().toString(), to: shards[0]._id}));
+assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: coll.toString(), middle: {_id: 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll.toString(), find: {_id: 0}, to: shards[1]._id}));
+
+st.printShardingStatus();
+
+var gle = null;
+
+//
+// No journal insert, GLE fails
+coll.remove({});
+coll.insert({_id: 1});
+printjson(gle = coll.getDB().runCommand({getLastError: 1, j: true}));
+assert(!gle.ok);
+assert(gle.errmsg);
+
+//
+// Successful insert, write concern mode invalid
+coll.remove({});
+coll.insert({_id: -1});
+printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 'invalid'}));
+assert(!gle.ok);
+assert(!gle.err);
+assert(gle.errmsg);
+assert.eq(gle.code, 79); // UnknownReplWriteConcern - needed for backwards compatibility
+assert.eq(coll.count(), 1);
+
+//
+// Error on insert (dup key), write concern error not reported
+coll.remove({});
+coll.insert({_id: -1});
+coll.insert({_id: -1});
+printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 'invalid'}));
+assert(gle.ok);
+assert(gle.err);
+assert(gle.code);
+assert(!gle.errmsg);
+assert.eq(coll.count(), 1);
+
+//
+// Successful remove on one shard, write concern timeout on the other
+var s0Id = st.rs0.getNodeId(st.rs0._slaves[0]);
+st.rs0.stop(s0Id);
+coll.remove({});
+st.rs1.awaitReplication(); // To ensure the first shard won't timeout
+printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 3, wtimeout: 5 * 1000}));
+assert(gle.ok);
+assert.eq(gle.err, 'timeout');
+assert(gle.wtimeout);
+assert(gle.shards);
+assert.eq(coll.count(), 0);
+
+//
+// Successful remove on two hosts, write concern timeout on both
+// We don't aggregate two timeouts together
+var s1Id = st.rs1.getNodeId(st.rs1._slaves[0]);
+st.rs1.stop(s1Id);
+// new writes to both shards to ensure that remove will do something on both of them
+coll.insert({_id: -1});
+coll.insert({_id: 1});
+
+coll.remove({});
+printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 3, wtimeout: 5 * 1000}));
+
+assert(!gle.ok);
+assert(gle.errmsg);
+assert.eq(gle.code, 64); // WriteConcernFailed - needed for backwards compatibility
+assert(!gle.wtimeout);
+assert(gle.shards);
+assert(gle.errs);
+assert.eq(coll.count(), 0);
+
+//
+// First replica set with no primary
+//
+
+//
+// Successful bulk insert on two hosts, host changes before gle (error contacting host)
+coll.remove({});
+coll.insert([{_id: 1}, {_id: -1}]);
+// Wait for write to be written to shards before shutting it down.
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+st.rs0.stop(st.rs0.getPrimary(), true); // wait for stop
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+// Should get an error about contacting dead host.
+assert(!gle.ok);
+assert(gle.errmsg);
+assert.eq(coll.count({_id: 1}), 1);
+
+//
+// Failed insert on two hosts, first replica set with no primary
+// NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get
+// successful writes from.
+coll.remove({_id: 1});
+coll.insert([{_id: 1}, {_id: -1}]);
+
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+assert(gle.ok);
+assert(gle.err);
+assert.eq(coll.count({_id: 1}), 1);
+
+st.stop();
})();
diff --git a/jstests/gle/gle_sharded_write.js b/jstests/gle/gle_sharded_write.js
index 159982fc575..537787aa1fe 100644
--- a/jstests/gle/gle_sharded_write.js
+++ b/jstests/gle/gle_sharded_write.js
@@ -7,171 +7,171 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 2, mongos: 1, verbose: 3});
-
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
- var coll = mongos.getCollection(jsTestName() + ".coll");
- var shards = config.shards.find().toArray();
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
- printjson(admin.runCommand({movePrimary: coll.getDB().toString(), to: shards[0]._id}));
- assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: coll.toString(), middle: {_id: 0}}));
- assert.commandWorked(
- admin.runCommand({moveChunk: coll.toString(), find: {_id: 0}, to: shards[1]._id}));
-
- st.printShardingStatus();
-
- var gle = null;
-
- //
- // Successful insert
- coll.remove({});
- coll.insert({_id: -1});
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- assert(gle.ok);
- assert('err' in gle);
- assert(!gle.err);
- assert.eq(coll.count(), 1);
-
- //
- // Successful update
- coll.remove({});
- coll.insert({_id: 1});
- coll.update({_id: 1}, {$set: {foo: "bar"}});
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- assert(gle.ok);
- assert('err' in gle);
- assert(!gle.err);
- assert(gle.updatedExisting);
- assert.eq(gle.n, 1);
- assert.eq(coll.count(), 1);
-
- //
- // Successful multi-update
- coll.remove({});
- coll.insert({_id: 1});
- coll.update({}, {$set: {foo: "bar"}}, false, true);
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- assert(gle.ok);
- assert('err' in gle);
- assert(!gle.err);
- assert(gle.updatedExisting);
- assert.eq(gle.n, 1);
- assert.eq(coll.count(), 1);
-
- //
- // Successful upsert
- coll.remove({});
- coll.update({_id: 1}, {_id: 1}, true);
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- assert(gle.ok);
- assert('err' in gle);
- assert(!gle.err);
- assert(!gle.updatedExisting);
- assert.eq(gle.n, 1);
- assert.eq(gle.upserted, 1);
- assert.eq(coll.count(), 1);
-
- //
- // Successful upserts
- coll.remove({});
- coll.update({_id: -1}, {_id: -1}, true);
- coll.update({_id: 1}, {_id: 1}, true);
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- assert(gle.ok);
- assert('err' in gle);
- assert(!gle.err);
- assert(!gle.updatedExisting);
- assert.eq(gle.n, 1);
- assert.eq(gle.upserted, 1);
- assert.eq(coll.count(), 2);
-
- //
- // Successful remove
- coll.remove({});
- coll.insert({_id: 1});
- coll.remove({_id: 1});
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- assert(gle.ok);
- assert('err' in gle);
- assert(!gle.err);
- assert.eq(gle.n, 1);
- assert.eq(coll.count(), 0);
-
- //
- // Error on one host during update
- coll.remove({});
- coll.update({_id: 1}, {$invalid: "xxx"}, true);
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- assert(gle.ok);
- assert(gle.err);
- assert(gle.code);
- assert(!gle.errmsg);
- assert(gle.singleShard);
- assert.eq(coll.count(), 0);
-
- //
- // Error on two hosts during remove
- coll.remove({});
- coll.remove({$invalid: 'remove'});
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- assert(gle.ok);
- assert(gle.err);
- assert(gle.code);
- assert(!gle.errmsg);
- assert(gle.shards);
- assert.eq(coll.count(), 0);
-
- //
- // Repeated calls to GLE should work
- coll.remove({});
- coll.update({_id: 1}, {$invalid: "xxx"}, true);
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- assert(gle.ok);
- assert(gle.err);
- assert(gle.code);
- assert(!gle.errmsg);
- assert(gle.singleShard);
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- assert(gle.ok);
- assert(gle.err);
- assert(gle.code);
- assert(!gle.errmsg);
- assert(gle.singleShard);
- assert.eq(coll.count(), 0);
-
- //
- // First shard down
- //
-
- //
- // Successful bulk insert on two hosts, host dies before gle (error contacting host)
- coll.remove({});
- coll.insert([{_id: 1}, {_id: -1}]);
- // Wait for write to be written to shards before shutting it down.
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- st.rs0.stopSet();
-
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- // Should get an error about contacting dead host.
- assert(!gle.ok);
- assert(gle.errmsg);
-
- //
- // Failed insert on two hosts, first host dead
- // NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get
- // successful writes from.
- coll.remove({_id: 1});
- coll.insert([{_id: 1}, {_id: -1}]);
- printjson(gle = coll.getDB().runCommand({getLastError: 1}));
- assert(gle.ok);
- assert(gle.err);
- assert.eq(coll.count({_id: 1}), 1);
-
- st.stop();
+'use strict';
+
+var st = new ShardingTest({shards: 2, mongos: 1, verbose: 3});
+
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var coll = mongos.getCollection(jsTestName() + ".coll");
+var shards = config.shards.find().toArray();
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
+printjson(admin.runCommand({movePrimary: coll.getDB().toString(), to: shards[0]._id}));
+assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: coll.toString(), middle: {_id: 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll.toString(), find: {_id: 0}, to: shards[1]._id}));
+
+st.printShardingStatus();
+
+var gle = null;
+
+//
+// Successful insert
+coll.remove({});
+coll.insert({_id: -1});
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+assert(gle.ok);
+assert('err' in gle);
+assert(!gle.err);
+assert.eq(coll.count(), 1);
+
+//
+// Successful update
+coll.remove({});
+coll.insert({_id: 1});
+coll.update({_id: 1}, {$set: {foo: "bar"}});
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+assert(gle.ok);
+assert('err' in gle);
+assert(!gle.err);
+assert(gle.updatedExisting);
+assert.eq(gle.n, 1);
+assert.eq(coll.count(), 1);
+
+//
+// Successful multi-update
+coll.remove({});
+coll.insert({_id: 1});
+coll.update({}, {$set: {foo: "bar"}}, false, true);
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+assert(gle.ok);
+assert('err' in gle);
+assert(!gle.err);
+assert(gle.updatedExisting);
+assert.eq(gle.n, 1);
+assert.eq(coll.count(), 1);
+
+//
+// Successful upsert
+coll.remove({});
+coll.update({_id: 1}, {_id: 1}, true);
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+assert(gle.ok);
+assert('err' in gle);
+assert(!gle.err);
+assert(!gle.updatedExisting);
+assert.eq(gle.n, 1);
+assert.eq(gle.upserted, 1);
+assert.eq(coll.count(), 1);
+
+//
+// Successful upserts
+coll.remove({});
+coll.update({_id: -1}, {_id: -1}, true);
+coll.update({_id: 1}, {_id: 1}, true);
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+assert(gle.ok);
+assert('err' in gle);
+assert(!gle.err);
+assert(!gle.updatedExisting);
+assert.eq(gle.n, 1);
+assert.eq(gle.upserted, 1);
+assert.eq(coll.count(), 2);
+
+//
+// Successful remove
+coll.remove({});
+coll.insert({_id: 1});
+coll.remove({_id: 1});
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+assert(gle.ok);
+assert('err' in gle);
+assert(!gle.err);
+assert.eq(gle.n, 1);
+assert.eq(coll.count(), 0);
+
+//
+// Error on one host during update
+coll.remove({});
+coll.update({_id: 1}, {$invalid: "xxx"}, true);
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+assert(gle.ok);
+assert(gle.err);
+assert(gle.code);
+assert(!gle.errmsg);
+assert(gle.singleShard);
+assert.eq(coll.count(), 0);
+
+//
+// Error on two hosts during remove
+coll.remove({});
+coll.remove({$invalid: 'remove'});
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+assert(gle.ok);
+assert(gle.err);
+assert(gle.code);
+assert(!gle.errmsg);
+assert(gle.shards);
+assert.eq(coll.count(), 0);
+
+//
+// Repeated calls to GLE should work
+coll.remove({});
+coll.update({_id: 1}, {$invalid: "xxx"}, true);
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+assert(gle.ok);
+assert(gle.err);
+assert(gle.code);
+assert(!gle.errmsg);
+assert(gle.singleShard);
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+assert(gle.ok);
+assert(gle.err);
+assert(gle.code);
+assert(!gle.errmsg);
+assert(gle.singleShard);
+assert.eq(coll.count(), 0);
+
+//
+// First shard down
+//
+
+//
+// Successful bulk insert on two hosts, host dies before gle (error contacting host)
+coll.remove({});
+coll.insert([{_id: 1}, {_id: -1}]);
+// Wait for write to be written to shards before shutting it down.
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+st.rs0.stopSet();
+
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+// Should get an error about contacting dead host.
+assert(!gle.ok);
+assert(gle.errmsg);
+
+//
+// Failed insert on two hosts, first host dead
+// NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get
+// successful writes from.
+coll.remove({_id: 1});
+coll.insert([{_id: 1}, {_id: -1}]);
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+assert(gle.ok);
+assert(gle.err);
+assert.eq(coll.count({_id: 1}), 1);
+
+st.stop();
})();
diff --git a/jstests/gle/updated_existing.js b/jstests/gle/updated_existing.js
index 3838cb33b6f..609676305a9 100644
--- a/jstests/gle/updated_existing.js
+++ b/jstests/gle/updated_existing.js
@@ -1,7 +1,7 @@
/**
-* SERVER-5872 : This test checks that the return message "updatedExisting" of
-* an upsert is not missing when autosplit takes place.
-*/
+ * SERVER-5872 : This test checks that the return message "updatedExisting" of
+ * an upsert is not missing when autosplit takes place.
+ */
var st = new ShardingTest({shards: 1, mongos: 1, verbose: 1, chunkSize: 1});
diff --git a/jstests/hooks/drop_sharded_collections.js b/jstests/hooks/drop_sharded_collections.js
index 5758e3027e5..dc9bc12a5d3 100644
--- a/jstests/hooks/drop_sharded_collections.js
+++ b/jstests/hooks/drop_sharded_collections.js
@@ -3,31 +3,31 @@
* like config.system.sessions).
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/fixture_helpers.js"); // For isMongos.
+load("jstests/libs/fixture_helpers.js"); // For isMongos.
- assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
- assert(FixtureHelpers.isMongos(db), "not connected to mongos");
+assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
+assert(FixtureHelpers.isMongos(db), "not connected to mongos");
- let balSettingResult = assert.commandWorked(db.adminCommand({balancerStatus: 1}));
- if (balSettingResult.mode !== 'off') {
- assert.commandWorked(db.adminCommand({balancerStop: 1}));
- }
-
- db.getSiblingDB('config').collections.find().forEach(collEntry => {
- if (collEntry._id !== 'config.system.sessions') {
- let nsSplit = collEntry._id.split('.');
- const dbName = nsSplit.shift();
- const collName = nsSplit.join('.');
+let balSettingResult = assert.commandWorked(db.adminCommand({balancerStatus: 1}));
+if (balSettingResult.mode !== 'off') {
+ assert.commandWorked(db.adminCommand({balancerStop: 1}));
+}
- // Note: drop also cleans up tags and chunks associated with ns.
- assert.commandWorked(db.getSiblingDB(dbName).runCommand({drop: collName}));
- }
- });
+db.getSiblingDB('config').collections.find().forEach(collEntry => {
+ if (collEntry._id !== 'config.system.sessions') {
+ let nsSplit = collEntry._id.split('.');
+ const dbName = nsSplit.shift();
+ const collName = nsSplit.join('.');
- // Turn balancer back on if it was not off earlier.
- if (balSettingResult.mode !== 'off') {
- assert.commandWorked(db.adminCommand({balancerStart: 1}));
+ // Note: drop also cleans up tags and chunks associated with ns.
+ assert.commandWorked(db.getSiblingDB(dbName).runCommand({drop: collName}));
}
+});
+
+// Turn balancer back on if it was not off earlier.
+if (balSettingResult.mode !== 'off') {
+ assert.commandWorked(db.adminCommand({balancerStart: 1}));
+}
})();
diff --git a/jstests/hooks/run_check_repl_dbhash.js b/jstests/hooks/run_check_repl_dbhash.js
index bae3943964e..9067d4359ad 100644
--- a/jstests/hooks/run_check_repl_dbhash.js
+++ b/jstests/hooks/run_check_repl_dbhash.js
@@ -3,106 +3,105 @@
'use strict';
(function() {
- load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
- load('jstests/libs/parallelTester.js'); // For ScopedThread.
+load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
+load('jstests/libs/parallelTester.js'); // For ScopedThread.
- function checkReplicatedDataHashesThread(hosts) {
- load('jstests/libs/override_methods/implicitly_retry_on_background_op_in_progress.js');
+function checkReplicatedDataHashesThread(hosts) {
+ load('jstests/libs/override_methods/implicitly_retry_on_background_op_in_progress.js');
- try {
- const excludedDBs = jsTest.options().excludedDBsFromDBHash;
- const rst = new ReplSetTest(hosts[0]);
- rst.checkReplicatedDataHashes(undefined, excludedDBs);
- if (TestData.checkCollectionCounts) {
- rst.checkCollectionCounts();
- }
- return {ok: 1};
- } catch (e) {
- return {ok: 0, hosts: hosts, error: e.toString(), stack: e.stack};
+ try {
+ const excludedDBs = jsTest.options().excludedDBsFromDBHash;
+ const rst = new ReplSetTest(hosts[0]);
+ rst.checkReplicatedDataHashes(undefined, excludedDBs);
+ if (TestData.checkCollectionCounts) {
+ rst.checkCollectionCounts();
}
+ return {ok: 1};
+ } catch (e) {
+ return {ok: 0, hosts: hosts, error: e.toString(), stack: e.stack};
}
+}
- const startTime = Date.now();
- assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
+const startTime = Date.now();
+assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
- let skipped = false;
- try {
- const conn = db.getMongo();
- const topology = DiscoverTopology.findConnectedNodes(conn);
+let skipped = false;
+try {
+ const conn = db.getMongo();
+ const topology = DiscoverTopology.findConnectedNodes(conn);
- if (topology.type === Topology.kStandalone) {
+ if (topology.type === Topology.kStandalone) {
+ print('Skipping data consistency checks for cluster because we are connected to a' +
+ ' stand-alone mongod: ' + tojsononeline(topology));
+ skipped = true;
+ return;
+ }
+
+ if (topology.type === Topology.kReplicaSet) {
+ if (topology.nodes.length === 1) {
print('Skipping data consistency checks for cluster because we are connected to a' +
- ' stand-alone mongod: ' + tojsononeline(topology));
+ ' 1-node replica set: ' + tojsononeline(topology));
skipped = true;
return;
}
- if (topology.type === Topology.kReplicaSet) {
- if (topology.nodes.length === 1) {
- print('Skipping data consistency checks for cluster because we are connected to a' +
- ' 1-node replica set: ' + tojsononeline(topology));
- skipped = true;
- return;
- }
+ const excludedDBs = jsTest.options().excludedDBsFromDBHash;
+ new ReplSetTest(topology.nodes[0]).checkReplicatedDataHashes(undefined, excludedDBs);
+ return;
+ }
- const excludedDBs = jsTest.options().excludedDBsFromDBHash;
- new ReplSetTest(topology.nodes[0]).checkReplicatedDataHashes(undefined, excludedDBs);
- return;
- }
+ if (topology.type !== Topology.kShardedCluster) {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
+ }
- if (topology.type !== Topology.kShardedCluster) {
- throw new Error('Unrecognized topology format: ' + tojson(topology));
+ const threads = [];
+ try {
+ if (topology.configsvr.nodes.length > 1) {
+ const thread =
+ new ScopedThread(checkReplicatedDataHashesThread, topology.configsvr.nodes);
+ threads.push(thread);
+ thread.start();
+ } else {
+ print('Skipping data consistency checks for 1-node CSRS: ' + tojsononeline(topology));
}
- const threads = [];
- try {
- if (topology.configsvr.nodes.length > 1) {
- const thread =
- new ScopedThread(checkReplicatedDataHashesThread, topology.configsvr.nodes);
- threads.push(thread);
- thread.start();
- } else {
- print('Skipping data consistency checks for 1-node CSRS: ' +
+ for (let shardName of Object.keys(topology.shards)) {
+ const shard = topology.shards[shardName];
+
+ if (shard.type === Topology.kStandalone) {
+ print('Skipping data consistency checks for stand-alone shard: ' +
tojsononeline(topology));
+ continue;
}
- for (let shardName of Object.keys(topology.shards)) {
- const shard = topology.shards[shardName];
-
- if (shard.type === Topology.kStandalone) {
- print('Skipping data consistency checks for stand-alone shard: ' +
- tojsononeline(topology));
- continue;
- }
-
- if (shard.type !== Topology.kReplicaSet) {
- throw new Error('Unrecognized topology format: ' + tojson(topology));
- }
-
- if (shard.nodes.length > 1) {
- const thread = new ScopedThread(checkReplicatedDataHashesThread, shard.nodes);
- threads.push(thread);
- thread.start();
- } else {
- print('Skipping data consistency checks for 1-node replica set shard: ' +
- tojsononeline(topology));
- }
+ if (shard.type !== Topology.kReplicaSet) {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
}
- } finally {
- // Wait for each thread to finish. Throw an error if any thread fails.
- const returnData = threads.map(thread => {
- thread.join();
- return thread.returnData();
- });
- returnData.forEach(res => {
- assert.commandWorked(res, 'data consistency checks failed');
- });
+ if (shard.nodes.length > 1) {
+ const thread = new ScopedThread(checkReplicatedDataHashesThread, shard.nodes);
+ threads.push(thread);
+ thread.start();
+ } else {
+ print('Skipping data consistency checks for 1-node replica set shard: ' +
+ tojsononeline(topology));
+ }
}
} finally {
- if (!skipped) {
- const totalTime = Date.now() - startTime;
- print('Finished data consistency checks for cluster in ' + totalTime + ' ms.');
- }
+ // Wait for each thread to finish. Throw an error if any thread fails.
+ const returnData = threads.map(thread => {
+ thread.join();
+ return thread.returnData();
+ });
+
+ returnData.forEach(res => {
+ assert.commandWorked(res, 'data consistency checks failed');
+ });
+ }
+} finally {
+ if (!skipped) {
+ const totalTime = Date.now() - startTime;
+ print('Finished data consistency checks for cluster in ' + totalTime + ' ms.');
}
+}
})();
diff --git a/jstests/hooks/run_check_repl_dbhash_background.js b/jstests/hooks/run_check_repl_dbhash_background.js
index 5d7e2698780..d20c5f78c21 100644
--- a/jstests/hooks/run_check_repl_dbhash_background.js
+++ b/jstests/hooks/run_check_repl_dbhash_background.js
@@ -18,478 +18,472 @@
'use strict';
(function() {
- load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
- load('jstests/libs/parallelTester.js'); // For ScopedThread.
+load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
+load('jstests/libs/parallelTester.js'); // For ScopedThread.
- if (typeof db === 'undefined') {
- throw new Error(
- "Expected mongo shell to be connected a server, but global 'db' object isn't defined");
- }
+if (typeof db === 'undefined') {
+ throw new Error(
+ "Expected mongo shell to be connected a server, but global 'db' object isn't defined");
+}
- // We turn off printing the JavaScript stacktrace in doassert() to avoid generating an
- // overwhelming amount of log messages when handling transient errors.
- TestData = TestData || {};
- TestData.traceExceptions = false;
+// We turn off printing the JavaScript stacktrace in doassert() to avoid generating an
+// overwhelming amount of log messages when handling transient errors.
+TestData = TestData || {};
+TestData.traceExceptions = false;
- const conn = db.getMongo();
- const topology = DiscoverTopology.findConnectedNodes(conn);
+const conn = db.getMongo();
+const topology = DiscoverTopology.findConnectedNodes(conn);
- function checkReplDbhashBackgroundThread(hosts) {
- let debugInfo = [];
+function checkReplDbhashBackgroundThread(hosts) {
+ let debugInfo = [];
- // Calls 'func' with the print() function overridden to be a no-op.
- const quietly = (func) => {
- const printOriginal = print;
- try {
- print = Function.prototype;
- func();
- } finally {
- print = printOriginal;
- }
- };
+ // Calls 'func' with the print() function overridden to be a no-op.
+ const quietly = (func) => {
+ const printOriginal = print;
+ try {
+ print = Function.prototype;
+ func();
+ } finally {
+ print = printOriginal;
+ }
+ };
+
+ let rst;
+ // We construct the ReplSetTest instance with the print() function overridden to be a no-op
+ // in order to suppress the log messages about the replica set configuration. The
+ // run_check_repl_dbhash_background.js hook is executed frequently by resmoke.py and would
+ // otherwise lead to generating an overwhelming amount of log messages.
+ quietly(() => {
+ rst = new ReplSetTest(hosts[0]);
+ });
+
+ if (!rst.getPrimary().adminCommand("serverStatus").storageEngine.supportsSnapshotReadConcern) {
+ print("Skipping data consistency checks for replica set: " + rst.getURL() +
+ " because storage engine does not support snapshot reads.");
+ return {ok: 1};
+ }
+ print("Running data consistency checks for replica set: " + rst.getURL());
+
+ const sessions = [
+ rst.getPrimary(),
+ ...rst.getSecondaries().filter(conn => {
+ return !conn.adminCommand({isMaster: 1}).arbiterOnly;
+ })
+ ].map(conn => conn.startSession({causalConsistency: false}));
+
+ const resetFns = [];
+ const kForeverSeconds = 1e9;
+ const dbNames = new Set();
+
+ // We enable the "WTPreserveSnapshotHistoryIndefinitely" failpoint to ensure that the same
+ // snapshot will be available to read at on the primary and secondaries.
+ for (let session of sessions) {
+ const db = session.getDatabase('admin');
+
+ let preserveRes = assert.commandWorked(db.runCommand({
+ configureFailPoint: 'WTPreserveSnapshotHistoryIndefinitely',
+ mode: 'alwaysOn',
+ }),
+ debugInfo);
+ debugInfo.push({
+ "node": db.getMongo(),
+ "session": session,
+ "preserveFailPointOpTime": preserveRes['operationTime']
+ });
- let rst;
- // We construct the ReplSetTest instance with the print() function overridden to be a no-op
- // in order to suppress the log messages about the replica set configuration. The
- // run_check_repl_dbhash_background.js hook is executed frequently by resmoke.py and would
- // otherwise lead to generating an overwhelming amount of log messages.
- quietly(() => {
- rst = new ReplSetTest(hosts[0]);
+ resetFns.push(() => {
+ assert.commandWorked(db.runCommand({
+ configureFailPoint: 'WTPreserveSnapshotHistoryIndefinitely',
+ mode: 'off',
+ }));
});
+ }
- if (!rst.getPrimary()
- .adminCommand("serverStatus")
- .storageEngine.supportsSnapshotReadConcern) {
- print("Skipping data consistency checks for replica set: " + rst.getURL() +
- " because storage engine does not support snapshot reads.");
- return {ok: 1};
+ for (let session of sessions) {
+ const db = session.getDatabase('admin');
+ const res = assert.commandWorked(db.runCommand({listDatabases: 1, nameOnly: true}));
+ for (let dbInfo of res.databases) {
+ dbNames.add(dbInfo.name);
}
- print("Running data consistency checks for replica set: " + rst.getURL());
-
- const sessions = [
- rst.getPrimary(),
- ...rst.getSecondaries().filter(conn => {
- return !conn.adminCommand({isMaster: 1}).arbiterOnly;
- })
- ].map(conn => conn.startSession({causalConsistency: false}));
-
- const resetFns = [];
- const kForeverSeconds = 1e9;
- const dbNames = new Set();
+ debugInfo.push({
+ "node": db.getMongo(),
+ "session": session,
+ "listDatabaseOpTime": res['operationTime']
+ });
+ }
- // We enable the "WTPreserveSnapshotHistoryIndefinitely" failpoint to ensure that the same
- // snapshot will be available to read at on the primary and secondaries.
- for (let session of sessions) {
+ // Transactions cannot be run on the following databases so we don't attempt to read at a
+ // clusterTime on them either. (The "local" database is also not replicated.)
+ dbNames.delete('admin');
+ dbNames.delete('config');
+ dbNames.delete('local');
+
+ const results = [];
+
+ // The waitForSecondaries() function waits for all secondaries to have applied up to
+ // 'clusterTime' locally. This ensures that a later $_internalReadAtClusterTime read doesn't
+ // fail as a result of the secondary's clusterTime being behind 'clusterTime'.
+ const waitForSecondaries = (clusterTime, signedClusterTime) => {
+ debugInfo.push({"waitForSecondaries": clusterTime, "signedClusterTime": signedClusterTime});
+ for (let i = 1; i < sessions.length; ++i) {
+ const session = sessions[i];
const db = session.getDatabase('admin');
- let preserveRes = assert.commandWorked(db.runCommand({
- configureFailPoint: 'WTPreserveSnapshotHistoryIndefinitely',
- mode: 'alwaysOn',
- }),
- debugInfo);
- debugInfo.push({
- "node": db.getMongo(),
- "session": session,
- "preserveFailPointOpTime": preserveRes['operationTime']
- });
-
- resetFns.push(() => {
- assert.commandWorked(db.runCommand({
- configureFailPoint: 'WTPreserveSnapshotHistoryIndefinitely',
- mode: 'off',
- }));
- });
- }
+ // We advance the clusterTime on the secondary's session to ensure that
+ // 'clusterTime' doesn't exceed the node's notion of the latest clusterTime.
+ session.advanceClusterTime(signedClusterTime);
+
+ // We need to make sure the secondary has applied up to 'clusterTime' and advanced
+ // its majority commit point.
+
+ if (jsTest.options().enableMajorityReadConcern !== false) {
+ // If majority reads are supported, we can issue an afterClusterTime read on
+ // a nonexistent collection and wait on it. This has the advantage of being
+ // easier to debug in case of a timeout.
+ let res = assert.commandWorked(db.runCommand({
+ find: 'run_check_repl_dbhash_background',
+ readConcern: {level: 'majority', afterClusterTime: clusterTime},
+ limit: 1,
+ singleBatch: true,
+ }),
+ debugInfo);
+ debugInfo.push({
+ "node": db.getMongo(),
+ "session": session,
+ "majorityReadOpTime": res['operationTime']
+ });
+ } else {
+ // If majority reads are not supported, then our only option is to poll for the
+ // appliedOpTime on the secondary to catch up.
+ assert.soon(
+ function() {
+ const rsStatus =
+ assert.commandWorked(db.adminCommand({replSetGetStatus: 1}));
+
+ // The 'atClusterTime' waits for the appliedOpTime to advance to
+ // 'clusterTime'.
+ const appliedOpTime = rsStatus.optimes.appliedOpTime;
+ if (bsonWoCompare(appliedOpTime.ts, clusterTime) >= 0) {
+ debugInfo.push({
+ "node": db.getMongo(),
+ "session": session,
+ "appliedOpTime": appliedOpTime.ts
+ });
+ }
- for (let session of sessions) {
- const db = session.getDatabase('admin');
- const res = assert.commandWorked(db.runCommand({listDatabases: 1, nameOnly: true}));
- for (let dbInfo of res.databases) {
- dbNames.add(dbInfo.name);
+ return bsonWoCompare(appliedOpTime.ts, clusterTime) >= 0;
+ },
+ "The majority commit point on secondary " + i + " failed to reach " +
+ clusterTime,
+ 10 * 60 * 1000);
}
- debugInfo.push({
- "node": db.getMongo(),
- "session": session,
- "listDatabaseOpTime": res['operationTime']
- });
}
-
- // Transactions cannot be run on the following databases so we don't attempt to read at a
- // clusterTime on them either. (The "local" database is also not replicated.)
- dbNames.delete('admin');
- dbNames.delete('config');
- dbNames.delete('local');
-
- const results = [];
-
- // The waitForSecondaries() function waits for all secondaries to have applied up to
- // 'clusterTime' locally. This ensures that a later $_internalReadAtClusterTime read doesn't
- // fail as a result of the secondary's clusterTime being behind 'clusterTime'.
- const waitForSecondaries = (clusterTime, signedClusterTime) => {
- debugInfo.push(
- {"waitForSecondaries": clusterTime, "signedClusterTime": signedClusterTime});
- for (let i = 1; i < sessions.length; ++i) {
- const session = sessions[i];
- const db = session.getDatabase('admin');
-
- // We advance the clusterTime on the secondary's session to ensure that
- // 'clusterTime' doesn't exceed the node's notion of the latest clusterTime.
- session.advanceClusterTime(signedClusterTime);
-
- // We need to make sure the secondary has applied up to 'clusterTime' and advanced
- // its majority commit point.
-
- if (jsTest.options().enableMajorityReadConcern !== false) {
- // If majority reads are supported, we can issue an afterClusterTime read on
- // a nonexistent collection and wait on it. This has the advantage of being
- // easier to debug in case of a timeout.
- let res = assert.commandWorked(db.runCommand({
- find: 'run_check_repl_dbhash_background',
- readConcern: {level: 'majority', afterClusterTime: clusterTime},
- limit: 1,
- singleBatch: true,
- }),
- debugInfo);
- debugInfo.push({
- "node": db.getMongo(),
- "session": session,
- "majorityReadOpTime": res['operationTime']
- });
- } else {
- // If majority reads are not supported, then our only option is to poll for the
- // appliedOpTime on the secondary to catch up.
- assert.soon(
- function() {
- const rsStatus =
- assert.commandWorked(db.adminCommand({replSetGetStatus: 1}));
-
- // The 'atClusterTime' waits for the appliedOpTime to advance to
- // 'clusterTime'.
- const appliedOpTime = rsStatus.optimes.appliedOpTime;
- if (bsonWoCompare(appliedOpTime.ts, clusterTime) >= 0) {
- debugInfo.push({
- "node": db.getMongo(),
- "session": session,
- "appliedOpTime": appliedOpTime.ts
- });
- }
-
- return bsonWoCompare(appliedOpTime.ts, clusterTime) >= 0;
- },
- "The majority commit point on secondary " + i + " failed to reach " +
- clusterTime,
- 10 * 60 * 1000);
+ };
+
+ // The checkCollectionHashesForDB() function identifies a collection by its UUID and ignores
+ // the case where a collection isn't present on a node to work around how the collection
+ // catalog isn't multi-versioned. Unlike with ReplSetTest#checkReplicatedDataHashes(), it is
+ // possible for a collection catalog operation (e.g. a drop or rename) to have been applied
+ // on the primary but not yet applied on the secondary.
+ const checkCollectionHashesForDB = (dbName, clusterTime) => {
+ const result = [];
+ const hashes =
+ rst.getHashesUsingSessions(sessions, dbName, {readAtClusterTime: clusterTime});
+ const hashesByUUID = hashes.map((response, i) => {
+ const info = {};
+
+ for (let collName of Object.keys(response.collections)) {
+ const hash = response.collections[collName];
+ const uuid = response.uuids[collName];
+ if (uuid !== undefined) {
+ info[uuid.toString()] = {
+ host: sessions[i].getClient().host,
+ hash,
+ collName,
+ uuid,
+ };
}
}
- };
- // The checkCollectionHashesForDB() function identifies a collection by its UUID and ignores
- // the case where a collection isn't present on a node to work around how the collection
- // catalog isn't multi-versioned. Unlike with ReplSetTest#checkReplicatedDataHashes(), it is
- // possible for a collection catalog operation (e.g. a drop or rename) to have been applied
- // on the primary but not yet applied on the secondary.
- const checkCollectionHashesForDB = (dbName, clusterTime) => {
- const result = [];
- const hashes =
- rst.getHashesUsingSessions(sessions, dbName, {readAtClusterTime: clusterTime});
- const hashesByUUID = hashes.map((response, i) => {
- const info = {};
-
- for (let collName of Object.keys(response.collections)) {
- const hash = response.collections[collName];
- const uuid = response.uuids[collName];
- if (uuid !== undefined) {
- info[uuid.toString()] = {
- host: sessions[i].getClient().host,
- hash,
- collName,
- uuid,
- };
- }
- }
+ return Object.assign({}, response, {hashesByUUID: info});
+ });
- return Object.assign({}, response, {hashesByUUID: info});
- });
-
- const primarySession = sessions[0];
- for (let i = 1; i < hashes.length; ++i) {
- const uuids = new Set([
- ...Object.keys(hashesByUUID[0].hashesByUUID),
- ...Object.keys(hashesByUUID[i].hashesByUUID),
- ]);
-
- const secondarySession = sessions[i];
- for (let uuid of uuids) {
- const primaryInfo = hashesByUUID[0].hashesByUUID[uuid];
- const secondaryInfo = hashesByUUID[i].hashesByUUID[uuid];
-
- if (primaryInfo === undefined) {
- print("Skipping collection because it doesn't exist on the primary: " +
- tojsononeline(secondaryInfo));
- continue;
- }
+ const primarySession = sessions[0];
+ for (let i = 1; i < hashes.length; ++i) {
+ const uuids = new Set([
+ ...Object.keys(hashesByUUID[0].hashesByUUID),
+ ...Object.keys(hashesByUUID[i].hashesByUUID),
+ ]);
+
+ const secondarySession = sessions[i];
+ for (let uuid of uuids) {
+ const primaryInfo = hashesByUUID[0].hashesByUUID[uuid];
+ const secondaryInfo = hashesByUUID[i].hashesByUUID[uuid];
+
+ if (primaryInfo === undefined) {
+ print("Skipping collection because it doesn't exist on the primary: " +
+ tojsononeline(secondaryInfo));
+ continue;
+ }
- if (secondaryInfo === undefined) {
- print("Skipping collection because it doesn't exist on the secondary: " +
- tojsononeline(primaryInfo));
- continue;
- }
+ if (secondaryInfo === undefined) {
+ print("Skipping collection because it doesn't exist on the secondary: " +
+ tojsononeline(primaryInfo));
+ continue;
+ }
- if (primaryInfo.hash !== secondaryInfo.hash) {
- print("DBHash mismatch found for collection with uuid: " + uuid +
- ". Primary info: " + tojsononeline(primaryInfo) +
- ". Secondary info: " + tojsononeline(secondaryInfo));
- const diff = rst.getCollectionDiffUsingSessions(
- primarySession, secondarySession, dbName, primaryInfo.uuid);
-
- result.push({
- primary: primaryInfo,
- secondary: secondaryInfo,
- dbName: dbName,
- diff: diff,
- });
- }
+ if (primaryInfo.hash !== secondaryInfo.hash) {
+ print("DBHash mismatch found for collection with uuid: " + uuid +
+ ". Primary info: " + tojsononeline(primaryInfo) +
+ ". Secondary info: " + tojsononeline(secondaryInfo));
+ const diff = rst.getCollectionDiffUsingSessions(
+ primarySession, secondarySession, dbName, primaryInfo.uuid);
+
+ result.push({
+ primary: primaryInfo,
+ secondary: secondaryInfo,
+ dbName: dbName,
+ diff: diff,
+ });
}
}
+ }
- return result;
- };
+ return result;
+ };
+
+ for (let dbName of dbNames) {
+ let result;
+ let clusterTime;
+ let previousClusterTime;
+ let hasTransientError;
+ let performNoopWrite;
+
+ // The isTransientError() function is responsible for setting hasTransientError to true.
+ const isTransientError = (e) => {
+ // It is possible for the ReplSetTest#getHashesUsingSessions() function to be
+ // interrupted due to active sessions being killed by a test running concurrently.
+ // We treat this as a transient error and simply retry running the dbHash check.
+ //
+ // Note that unlike auto_retry_transaction.js, we do not treat CursorKilled or
+ // CursorNotFound error responses as transient errors because the
+ // run_check_repl_dbhash_background.js hook would only establish a cursor via
+ // ReplSetTest#getCollectionDiffUsingSessions() upon detecting a dbHash mismatch. It
+ // is presumed to still useful to know that a bug exists even if we cannot get more
+ // diagnostics for it.
+ if (e.code === ErrorCodes.Interrupted) {
+ hasTransientError = true;
+ }
- for (let dbName of dbNames) {
- let result;
- let clusterTime;
- let previousClusterTime;
- let hasTransientError;
- let performNoopWrite;
-
- // The isTransientError() function is responsible for setting hasTransientError to true.
- const isTransientError = (e) => {
- // It is possible for the ReplSetTest#getHashesUsingSessions() function to be
- // interrupted due to active sessions being killed by a test running concurrently.
- // We treat this as a transient error and simply retry running the dbHash check.
- //
- // Note that unlike auto_retry_transaction.js, we do not treat CursorKilled or
- // CursorNotFound error responses as transient errors because the
- // run_check_repl_dbhash_background.js hook would only establish a cursor via
- // ReplSetTest#getCollectionDiffUsingSessions() upon detecting a dbHash mismatch. It
- // is presumed to still useful to know that a bug exists even if we cannot get more
- // diagnostics for it.
- if (e.code === ErrorCodes.Interrupted) {
- hasTransientError = true;
+ // Perform a no-op write to the primary if the clusterTime between each call remain
+ // the same and if we encounter the SnapshotUnavailable error as the secondaries
+ // minimum timestamp can be greater than the primaries minimum timestamp.
+ if (e.code === ErrorCodes.SnapshotUnavailable) {
+ if (bsonBinaryEqual(clusterTime, previousClusterTime)) {
+ performNoopWrite = true;
}
+ hasTransientError = true;
+ }
- // Perform a no-op write to the primary if the clusterTime between each call remain
- // the same and if we encounter the SnapshotUnavailable error as the secondaries
- // minimum timestamp can be greater than the primaries minimum timestamp.
- if (e.code === ErrorCodes.SnapshotUnavailable) {
- if (bsonBinaryEqual(clusterTime, previousClusterTime)) {
- performNoopWrite = true;
- }
- hasTransientError = true;
- }
+ // InvalidOptions can be returned when $_internalReadAtClusterTime is greater than
+ // the all-committed timestamp. As the dbHash command is running in the background
+ // at varying times, it's possible that we may run dbHash while a prepared
+ // transactions has yet to commit or abort.
+ if (e.code === ErrorCodes.InvalidOptions) {
+ hasTransientError = true;
+ }
- // InvalidOptions can be returned when $_internalReadAtClusterTime is greater than
- // the all-committed timestamp. As the dbHash command is running in the background
- // at varying times, it's possible that we may run dbHash while a prepared
- // transactions has yet to commit or abort.
- if (e.code === ErrorCodes.InvalidOptions) {
- hasTransientError = true;
- }
+ return hasTransientError;
+ };
- return hasTransientError;
- };
-
- do {
- // SERVER-38928: Due to races around advancing last applied, there's technically no
- // guarantee that a primary will report a later operation time than its
- // secondaries. Perform the snapshot read at the latest reported operation time.
- previousClusterTime = clusterTime;
- clusterTime = sessions[0].getOperationTime();
- let signedClusterTime = sessions[0].getClusterTime();
- for (let sess of sessions.slice(1)) {
- let ts = sess.getOperationTime();
- if (timestampCmp(ts, clusterTime) > 0) {
- clusterTime = ts;
- signedClusterTime = sess.getClusterTime();
- }
+ do {
+ // SERVER-38928: Due to races around advancing last applied, there's technically no
+ // guarantee that a primary will report a later operation time than its
+ // secondaries. Perform the snapshot read at the latest reported operation time.
+ previousClusterTime = clusterTime;
+ clusterTime = sessions[0].getOperationTime();
+ let signedClusterTime = sessions[0].getClusterTime();
+ for (let sess of sessions.slice(1)) {
+ let ts = sess.getOperationTime();
+ if (timestampCmp(ts, clusterTime) > 0) {
+ clusterTime = ts;
+ signedClusterTime = sess.getClusterTime();
}
- waitForSecondaries(clusterTime, signedClusterTime);
+ }
+ waitForSecondaries(clusterTime, signedClusterTime);
- for (let session of sessions) {
- debugInfo.push({
- "node": session.getClient(),
- "session": session,
- "readAtClusterTime": clusterTime
- });
- }
+ for (let session of sessions) {
+ debugInfo.push({
+ "node": session.getClient(),
+ "session": session,
+ "readAtClusterTime": clusterTime
+ });
+ }
- hasTransientError = false;
- performNoopWrite = false;
-
- try {
- result = checkCollectionHashesForDB(dbName, clusterTime);
- } catch (e) {
- if (isTransientError(e)) {
- if (performNoopWrite) {
- const primarySession = sessions[0];
-
- // If the no-op write fails due to the global lock not being able to be
- // acquired within 1 millisecond, retry the operation again at a later
- // time.
- assert.commandWorkedOrFailedWithCode(
- primarySession.getDatabase(dbName).adminCommand(
- {appendOplogNote: 1, data: {}}),
- ErrorCodes.LockFailed);
- }
+ hasTransientError = false;
+ performNoopWrite = false;
- debugInfo.push({"transientError": e, "performNoopWrite": performNoopWrite});
- continue;
+ try {
+ result = checkCollectionHashesForDB(dbName, clusterTime);
+ } catch (e) {
+ if (isTransientError(e)) {
+ if (performNoopWrite) {
+ const primarySession = sessions[0];
+
+ // If the no-op write fails due to the global lock not being able to be
+ // acquired within 1 millisecond, retry the operation again at a later
+ // time.
+ assert.commandWorkedOrFailedWithCode(
+ primarySession.getDatabase(dbName).adminCommand(
+ {appendOplogNote: 1, data: {}}),
+ ErrorCodes.LockFailed);
}
- jsTestLog(debugInfo);
- throw e;
+ debugInfo.push({"transientError": e, "performNoopWrite": performNoopWrite});
+ continue;
}
- } while (hasTransientError);
- for (let mismatchInfo of result) {
- mismatchInfo.atClusterTime = clusterTime;
- results.push(mismatchInfo);
+ jsTestLog(debugInfo);
+ throw e;
}
- }
+ } while (hasTransientError);
- for (let resetFn of resetFns) {
- resetFn();
+ for (let mismatchInfo of result) {
+ mismatchInfo.atClusterTime = clusterTime;
+ results.push(mismatchInfo);
}
+ }
- const headings = [];
- let errorBlob = '';
+ for (let resetFn of resetFns) {
+ resetFn();
+ }
- for (let mismatchInfo of results) {
- const diff = mismatchInfo.diff;
- delete mismatchInfo.diff;
+ const headings = [];
+ let errorBlob = '';
- const heading =
- `dbhash mismatch for ${mismatchInfo.dbName}.${mismatchInfo.primary.collName}`;
+ for (let mismatchInfo of results) {
+ const diff = mismatchInfo.diff;
+ delete mismatchInfo.diff;
- headings.push(heading);
+ const heading =
+ `dbhash mismatch for ${mismatchInfo.dbName}.${mismatchInfo.primary.collName}`;
- if (headings.length > 1) {
- errorBlob += '\n\n';
- }
- errorBlob += heading;
- errorBlob += `: ${tojson(mismatchInfo)}`;
-
- if (diff.docsWithDifferentContents.length > 0) {
- errorBlob +=
- '\nThe following documents have different contents on the primary and' +
- ' secondary:';
- for (let {
- primary, secondary
- } of diff.docsWithDifferentContents) {
- errorBlob += `\n primary: ${tojsononeline(primary)}`;
- errorBlob += `\n secondary: ${tojsononeline(secondary)}`;
- }
- } else {
- errorBlob += '\nNo documents have different contents on the primary and secondary';
- }
+ headings.push(heading);
- if (diff.docsMissingOnPrimary.length > 0) {
- errorBlob += "\nThe following documents aren't present on the primary:";
- for (let doc of diff.docsMissingOnPrimary) {
- errorBlob += `\n ${tojsononeline(doc)}`;
- }
- } else {
- errorBlob += '\nNo documents are missing from the primary';
+ if (headings.length > 1) {
+ errorBlob += '\n\n';
+ }
+ errorBlob += heading;
+ errorBlob += `: ${tojson(mismatchInfo)}`;
+
+ if (diff.docsWithDifferentContents.length > 0) {
+ errorBlob += '\nThe following documents have different contents on the primary and' +
+ ' secondary:';
+ for (let {primary, secondary} of diff.docsWithDifferentContents) {
+ errorBlob += `\n primary: ${tojsononeline(primary)}`;
+ errorBlob += `\n secondary: ${tojsononeline(secondary)}`;
}
+ } else {
+ errorBlob += '\nNo documents have different contents on the primary and secondary';
+ }
- if (diff.docsMissingOnSecondary.length > 0) {
- errorBlob += "\nThe following documents aren't present on the secondary:";
- for (let doc of diff.docsMissingOnSecondary) {
- errorBlob += `\n ${tojsononeline(doc)}`;
- }
- } else {
- errorBlob += '\nNo documents are missing from the secondary';
+ if (diff.docsMissingOnPrimary.length > 0) {
+ errorBlob += "\nThe following documents aren't present on the primary:";
+ for (let doc of diff.docsMissingOnPrimary) {
+ errorBlob += `\n ${tojsononeline(doc)}`;
}
+ } else {
+ errorBlob += '\nNo documents are missing from the primary';
}
- if (headings.length > 0) {
- for (let session of sessions) {
- const query = {};
- const limit = 100;
- rst.dumpOplog(session.getClient(), query, limit);
+ if (diff.docsMissingOnSecondary.length > 0) {
+ errorBlob += "\nThe following documents aren't present on the secondary:";
+ for (let doc of diff.docsMissingOnSecondary) {
+ errorBlob += `\n ${tojsononeline(doc)}`;
}
+ } else {
+ errorBlob += '\nNo documents are missing from the secondary';
+ }
+ }
- print(errorBlob);
- return {
- ok: 0,
- hosts: hosts,
- error: `dbhash mismatch (search for the following headings): ${tojson(headings)}`
- };
+ if (headings.length > 0) {
+ for (let session of sessions) {
+ const query = {};
+ const limit = 100;
+ rst.dumpOplog(session.getClient(), query, limit);
}
- return {ok: 1};
+ print(errorBlob);
+ return {
+ ok: 0,
+ hosts: hosts,
+ error: `dbhash mismatch (search for the following headings): ${tojson(headings)}`
+ };
}
- if (topology.type === Topology.kReplicaSet) {
- let res = checkReplDbhashBackgroundThread(topology.nodes);
- assert.commandWorked(res, () => 'data consistency checks failed: ' + tojson(res));
- } else if (topology.type === Topology.kShardedCluster) {
- const threads = [];
- try {
- if (topology.configsvr.nodes.length > 1) {
- const thread =
- new ScopedThread(checkReplDbhashBackgroundThread, topology.configsvr.nodes);
- threads.push(thread);
- thread.start();
- } else {
- print('Skipping data consistency checks for 1-node CSRS: ' +
- tojsononeline(topology.configsvr));
- }
+ return {ok: 1};
+}
+
+if (topology.type === Topology.kReplicaSet) {
+ let res = checkReplDbhashBackgroundThread(topology.nodes);
+ assert.commandWorked(res, () => 'data consistency checks failed: ' + tojson(res));
+} else if (topology.type === Topology.kShardedCluster) {
+ const threads = [];
+ try {
+ if (topology.configsvr.nodes.length > 1) {
+ const thread =
+ new ScopedThread(checkReplDbhashBackgroundThread, topology.configsvr.nodes);
+ threads.push(thread);
+ thread.start();
+ } else {
+ print('Skipping data consistency checks for 1-node CSRS: ' +
+ tojsononeline(topology.configsvr));
+ }
- for (let shardName of Object.keys(topology.shards)) {
- const shard = topology.shards[shardName];
+ for (let shardName of Object.keys(topology.shards)) {
+ const shard = topology.shards[shardName];
- if (shard.type === Topology.kStandalone) {
- print('Skipping data consistency checks for stand-alone shard ' + shardName +
- ": " + tojsononeline(shard));
- continue;
- }
+ if (shard.type === Topology.kStandalone) {
+ print('Skipping data consistency checks for stand-alone shard ' + shardName + ": " +
+ tojsononeline(shard));
+ continue;
+ }
- if (shard.type !== Topology.kReplicaSet) {
- throw new Error('Unrecognized topology format: ' + tojson(topology));
- }
+ if (shard.type !== Topology.kReplicaSet) {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
+ }
- if (shard.nodes.length > 1) {
- const thread = new ScopedThread(checkReplDbhashBackgroundThread, shard.nodes);
- threads.push(thread);
- thread.start();
- } else {
- print('Skipping data consistency checks for stand-alone shard ' + shardName +
- ": " + tojsononeline(shard));
- }
+ if (shard.nodes.length > 1) {
+ const thread = new ScopedThread(checkReplDbhashBackgroundThread, shard.nodes);
+ threads.push(thread);
+ thread.start();
+ } else {
+ print('Skipping data consistency checks for stand-alone shard ' + shardName + ": " +
+ tojsononeline(shard));
}
- } finally {
- // Wait for each thread to finish. Throw an error if any thread fails.
- let exception;
- const returnData = threads.map(thread => {
- try {
- thread.join();
- return thread.returnData();
- } catch (e) {
- if (!exception) {
- exception = e;
- }
+ }
+ } finally {
+ // Wait for each thread to finish. Throw an error if any thread fails.
+ let exception;
+ const returnData = threads.map(thread => {
+ try {
+ thread.join();
+ return thread.returnData();
+ } catch (e) {
+ if (!exception) {
+ exception = e;
}
- });
- if (exception) {
- throw exception;
}
-
- returnData.forEach(res => {
- assert.commandWorked(res, () => 'data consistency checks failed: ' + tojson(res));
- });
+ });
+ if (exception) {
+ throw exception;
}
- } else {
- throw new Error('Unsupported topology configuration: ' + tojson(topology));
+
+ returnData.forEach(res => {
+ assert.commandWorked(res, () => 'data consistency checks failed: ' + tojson(res));
+ });
}
+} else {
+ throw new Error('Unsupported topology configuration: ' + tojson(topology));
+}
})();
diff --git a/jstests/hooks/run_check_repl_oplogs.js b/jstests/hooks/run_check_repl_oplogs.js
index 40fe76ab4ea..95a03105d0a 100644
--- a/jstests/hooks/run_check_repl_oplogs.js
+++ b/jstests/hooks/run_check_repl_oplogs.js
@@ -3,36 +3,36 @@
'use strict';
(function() {
- var startTime = Date.now();
- assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
-
- let runCheckOnReplSet = function(db) {
- let primaryInfo = db.isMaster();
-
- assert(primaryInfo.ismaster,
- 'shell is not connected to the primary or master node: ' + tojson(primaryInfo));
-
- let testFixture = new ReplSetTest(db.getMongo().host);
- testFixture.checkOplogs();
- };
-
- if (db.getMongo().isMongos()) {
- let configDB = db.getSiblingDB('config');
-
- // Run check on every shard.
- configDB.shards.find().forEach(shardEntry => {
- let newConn = new Mongo(shardEntry.host);
- runCheckOnReplSet(newConn.getDB('test'));
- });
-
- // Run check on config server.
- let cmdLineOpts = db.adminCommand({getCmdLineOpts: 1});
- let configConn = new Mongo(cmdLineOpts.parsed.sharding.configDB);
- runCheckOnReplSet(configConn.getDB('test'));
- } else {
- runCheckOnReplSet(db);
- }
-
- var totalTime = Date.now() - startTime;
- print('Finished consistency oplog checks of cluster in ' + totalTime + ' ms.');
+var startTime = Date.now();
+assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
+
+let runCheckOnReplSet = function(db) {
+ let primaryInfo = db.isMaster();
+
+ assert(primaryInfo.ismaster,
+ 'shell is not connected to the primary or master node: ' + tojson(primaryInfo));
+
+ let testFixture = new ReplSetTest(db.getMongo().host);
+ testFixture.checkOplogs();
+};
+
+if (db.getMongo().isMongos()) {
+ let configDB = db.getSiblingDB('config');
+
+ // Run check on every shard.
+ configDB.shards.find().forEach(shardEntry => {
+ let newConn = new Mongo(shardEntry.host);
+ runCheckOnReplSet(newConn.getDB('test'));
+ });
+
+ // Run check on config server.
+ let cmdLineOpts = db.adminCommand({getCmdLineOpts: 1});
+ let configConn = new Mongo(cmdLineOpts.parsed.sharding.configDB);
+ runCheckOnReplSet(configConn.getDB('test'));
+} else {
+ runCheckOnReplSet(db);
+}
+
+var totalTime = Date.now() - startTime;
+print('Finished consistency oplog checks of cluster in ' + totalTime + ' ms.');
})();
diff --git a/jstests/hooks/run_initial_sync_node_validation.js b/jstests/hooks/run_initial_sync_node_validation.js
index c0a9cc362a1..b624267f28a 100644
--- a/jstests/hooks/run_initial_sync_node_validation.js
+++ b/jstests/hooks/run_initial_sync_node_validation.js
@@ -3,48 +3,47 @@
'use strict';
(function() {
- var startTime = Date.now();
-
- var primaryInfo = db.isMaster();
- assert(primaryInfo.ismaster,
- 'shell is not connected to the primary node: ' + tojson(primaryInfo));
-
- var cmdLineOpts = db.adminCommand('getCmdLineOpts');
- assert.commandWorked(cmdLineOpts);
-
- // The initial sync hooks only work for replica sets.
- var rst = new ReplSetTest(db.getMongo().host);
-
- // Call getPrimary to populate rst with information about the nodes.
- var primary = rst.getPrimary();
- assert(primary, 'calling getPrimary() failed');
-
- // Find the hidden node.
- var hiddenNode;
- for (var secondary of rst._slaves) {
- var isMasterRes = secondary.getDB('admin').isMaster();
- if (isMasterRes.hidden) {
- hiddenNode = secondary;
- break;
- }
+var startTime = Date.now();
+
+var primaryInfo = db.isMaster();
+assert(primaryInfo.ismaster, 'shell is not connected to the primary node: ' + tojson(primaryInfo));
+
+var cmdLineOpts = db.adminCommand('getCmdLineOpts');
+assert.commandWorked(cmdLineOpts);
+
+// The initial sync hooks only work for replica sets.
+var rst = new ReplSetTest(db.getMongo().host);
+
+// Call getPrimary to populate rst with information about the nodes.
+var primary = rst.getPrimary();
+assert(primary, 'calling getPrimary() failed');
+
+// Find the hidden node.
+var hiddenNode;
+for (var secondary of rst._slaves) {
+ var isMasterRes = secondary.getDB('admin').isMaster();
+ if (isMasterRes.hidden) {
+ hiddenNode = secondary;
+ break;
}
+}
- assert(hiddenNode, 'No hidden initial sync node was found in the replica set');
+assert(hiddenNode, 'No hidden initial sync node was found in the replica set');
- // Confirm that the hidden node is in SECONDARY state.
- var res = assert.commandWorked(hiddenNode.adminCommand({replSetGetStatus: 1}));
- assert.eq(res.myState, ReplSetTest.State.SECONDARY, tojson(res));
+// Confirm that the hidden node is in SECONDARY state.
+var res = assert.commandWorked(hiddenNode.adminCommand({replSetGetStatus: 1}));
+assert.eq(res.myState, ReplSetTest.State.SECONDARY, tojson(res));
- /* The checkReplicatedDataHashes call waits until all operations have replicated to and
- have been applied on the secondaries, so we run the validation script after it
- to ensure we're validating the entire contents of the collection */
+/* The checkReplicatedDataHashes call waits until all operations have replicated to and
+ have been applied on the secondaries, so we run the validation script after it
+ to ensure we're validating the entire contents of the collection */
- // For checkDBHashes
- const excludedDBs = jsTest.options().excludedDBsFromDBHash;
- rst.checkReplicatedDataHashes(undefined, excludedDBs);
+// For checkDBHashes
+const excludedDBs = jsTest.options().excludedDBsFromDBHash;
+rst.checkReplicatedDataHashes(undefined, excludedDBs);
- load('jstests/hooks/run_validate_collections.js');
+load('jstests/hooks/run_validate_collections.js');
- var totalTime = Date.now() - startTime;
- print('Finished consistency checks of initial sync node in ' + totalTime + ' ms.');
+var totalTime = Date.now() - startTime;
+print('Finished consistency checks of initial sync node in ' + totalTime + ' ms.');
})();
diff --git a/jstests/hooks/run_validate_collections.js b/jstests/hooks/run_validate_collections.js
index 171e3cd7c00..eeabba7e10e 100644
--- a/jstests/hooks/run_validate_collections.js
+++ b/jstests/hooks/run_validate_collections.js
@@ -3,41 +3,40 @@
'use strict';
(function() {
- load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
- load('jstests/hooks/validate_collections.js'); // For CollectionValidator.
-
- assert.eq(typeof db, 'object', 'Invalid `db` object, is the shell connected to a mongod?');
- const topology = DiscoverTopology.findConnectedNodes(db.getMongo());
-
- const hostList = [];
- let setFCVHost;
-
- if (topology.type === Topology.kStandalone) {
- hostList.push(topology.mongod);
- setFCVHost = topology.mongod;
- } else if (topology.type === Topology.kReplicaSet) {
- hostList.push(...topology.nodes);
- setFCVHost = topology.primary;
- } else if (topology.type === Topology.kShardedCluster) {
- hostList.push(...topology.configsvr.nodes);
-
- for (let shardName of Object.keys(topology.shards)) {
- const shard = topology.shards[shardName];
-
- if (shard.type === Topology.kStandalone) {
- hostList.push(shard.mongod);
- } else if (shard.type === Topology.kReplicaSet) {
- hostList.push(...shard.nodes);
- } else {
- throw new Error('Unrecognized topology format: ' + tojson(topology));
- }
+load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
+load('jstests/hooks/validate_collections.js'); // For CollectionValidator.
+
+assert.eq(typeof db, 'object', 'Invalid `db` object, is the shell connected to a mongod?');
+const topology = DiscoverTopology.findConnectedNodes(db.getMongo());
+
+const hostList = [];
+let setFCVHost;
+
+if (topology.type === Topology.kStandalone) {
+ hostList.push(topology.mongod);
+ setFCVHost = topology.mongod;
+} else if (topology.type === Topology.kReplicaSet) {
+ hostList.push(...topology.nodes);
+ setFCVHost = topology.primary;
+} else if (topology.type === Topology.kShardedCluster) {
+ hostList.push(...topology.configsvr.nodes);
+
+ for (let shardName of Object.keys(topology.shards)) {
+ const shard = topology.shards[shardName];
+
+ if (shard.type === Topology.kStandalone) {
+ hostList.push(shard.mongod);
+ } else if (shard.type === Topology.kReplicaSet) {
+ hostList.push(...shard.nodes);
+ } else {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
}
- // Any of the mongos instances can be used for setting FCV.
- setFCVHost = topology.mongos.nodes[0];
- } else {
- throw new Error('Unrecognized topology format: ' + tojson(topology));
}
+ // Any of the mongos instances can be used for setting FCV.
+ setFCVHost = topology.mongos.nodes[0];
+} else {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
+}
- new CollectionValidator().validateNodes(hostList, setFCVHost);
-
+new CollectionValidator().validateNodes(hostList, setFCVHost);
})();
diff --git a/jstests/hooks/validate_collections.js b/jstests/hooks/validate_collections.js
index 5bfd118dcb5..a5aa67eb72e 100644
--- a/jstests/hooks/validate_collections.js
+++ b/jstests/hooks/validate_collections.js
@@ -73,8 +73,8 @@ function CollectionValidator() {
continue;
}
const host = db.getMongo().host;
- print('Collection validation failed on host ' + host + ' with response: ' +
- tojson(res));
+ print('Collection validation failed on host ' + host +
+ ' with response: ' + tojson(res));
dumpCollection(coll, 100);
full_res.failed_res.push(res);
full_res.ok = 0;
diff --git a/jstests/httpinterface/sharding_configdb_on_default_ports.js b/jstests/httpinterface/sharding_configdb_on_default_ports.js
index 4ea1c461304..c8265245003 100644
--- a/jstests/httpinterface/sharding_configdb_on_default_ports.js
+++ b/jstests/httpinterface/sharding_configdb_on_default_ports.js
@@ -10,20 +10,20 @@
// CSRS config servers.
(function() {
- "use strict";
+"use strict";
- function getHostPart(hostAndPort) {
- return hostAndPort.substr(0, hostAndPort.lastIndexOf(':'));
- }
- var c1, c2, c3;
+function getHostPart(hostAndPort) {
+ return hostAndPort.substr(0, hostAndPort.lastIndexOf(':'));
+}
+var c1, c2, c3;
- // The config servers must support readConcern: majority to be run as a replica set, so
- // explicitly set storage engine to wiredTiger.
- c1 = MongoRunner.runMongod(
- {configsvr: "", port: 27019, replSet: "csrs", storageEngine: "wiredTiger"});
- assert.commandWorked(c1.adminCommand("replSetInitiate"));
- c2 = MongoRunner.runMongod({configsvr: "", storageEngine: "wiredTiger"});
- c3 = MongoRunner.runMongod({configsvr: "", storageEngine: "wiredTiger"});
+// The config servers must support readConcern: majority to be run as a replica set, so
+// explicitly set storage engine to wiredTiger.
+c1 = MongoRunner.runMongod(
+ {configsvr: "", port: 27019, replSet: "csrs", storageEngine: "wiredTiger"});
+assert.commandWorked(c1.adminCommand("replSetInitiate"));
+c2 = MongoRunner.runMongod({configsvr: "", storageEngine: "wiredTiger"});
+c3 = MongoRunner.runMongod({configsvr: "", storageEngine: "wiredTiger"});
- assert(MongoRunner.runMongos({configdb: "csrs/" + getHostPart(c1.host)}));
+assert(MongoRunner.runMongos({configdb: "csrs/" + getHostPart(c1.host)}));
}());
diff --git a/jstests/libs/change_stream_util.js b/jstests/libs/change_stream_util.js
index e1914ab3ce5..c505e47f39f 100644
--- a/jstests/libs/change_stream_util.js
+++ b/jstests/libs/change_stream_util.js
@@ -305,7 +305,6 @@ function ChangeStreamTest(_db, name = "ChangeStreamTest") {
}));
}
}
-
};
/**
diff --git a/jstests/libs/check_log.js b/jstests/libs/check_log.js
index 9940924e2dd..c21a885c2db 100644
--- a/jstests/libs/check_log.js
+++ b/jstests/libs/check_log.js
@@ -4,134 +4,134 @@
var checkLog;
(function() {
- "use strict";
+"use strict";
- if (checkLog) {
- return; // Protect against this file being double-loaded.
- }
+if (checkLog) {
+ return; // Protect against this file being double-loaded.
+}
- checkLog = (function() {
- let getGlobalLog = function(conn) {
- let cmdRes;
- try {
- cmdRes = conn.adminCommand({getLog: 'global'});
- } catch (e) {
- // Retry with network errors.
- print("checkLog ignoring failure: " + e);
- return null;
- }
+checkLog = (function() {
+ let getGlobalLog = function(conn) {
+ let cmdRes;
+ try {
+ cmdRes = conn.adminCommand({getLog: 'global'});
+ } catch (e) {
+ // Retry with network errors.
+ print("checkLog ignoring failure: " + e);
+ return null;
+ }
- return assert.commandWorked(cmdRes).log;
- };
+ return assert.commandWorked(cmdRes).log;
+ };
- /*
- * Calls the 'getLog' function on the provided connection 'conn' to see if the provided msg
- * is found in the logs. Note: this function does not throw an exception, so the return
- * value should not be ignored.
- */
- const checkContainsOnce = function(conn, msg) {
- const logMessages = getGlobalLog(conn);
- if (logMessages === null) {
- return false;
- }
- for (let logMsg of logMessages) {
- if (logMsg.includes(msg)) {
- return true;
- }
- }
+ /*
+ * Calls the 'getLog' function on the provided connection 'conn' to see if the provided msg
+ * is found in the logs. Note: this function does not throw an exception, so the return
+ * value should not be ignored.
+ */
+ const checkContainsOnce = function(conn, msg) {
+ const logMessages = getGlobalLog(conn);
+ if (logMessages === null) {
return false;
- };
+ }
+ for (let logMsg of logMessages) {
+ if (logMsg.includes(msg)) {
+ return true;
+ }
+ }
+ return false;
+ };
- /*
- * Calls the 'getLog' function at regular intervals on the provided connection 'conn' until
- * the provided 'msg' is found in the logs, or it times out. Throws an exception on timeout.
- */
- let contains = function(conn, msg, timeout = 5 * 60 * 1000) {
- assert.soon(function() {
- return checkContainsOnce(conn, msg);
- }, 'Could not find log entries containing the following message: ' + msg, timeout, 300);
- };
+ /*
+ * Calls the 'getLog' function at regular intervals on the provided connection 'conn' until
+ * the provided 'msg' is found in the logs, or it times out. Throws an exception on timeout.
+ */
+ let contains = function(conn, msg, timeout = 5 * 60 * 1000) {
+ assert.soon(function() {
+ return checkContainsOnce(conn, msg);
+ }, 'Could not find log entries containing the following message: ' + msg, timeout, 300);
+ };
- /*
- * Calls the 'getLog' function at regular intervals on the provided connection 'conn' until
- * the provided 'msg' is found in the logs 'expectedCount' times, or it times out.
- * Throws an exception on timeout. If 'exact' is true, checks whether the count is exactly
- * equal to 'expectedCount'. Otherwise, checks whether the count is at least equal to
- * 'expectedCount'. Early returns when at least 'expectedCount' entries are found.
- */
- let containsWithCount = function(
- conn, msg, expectedCount, timeout = 5 * 60 * 1000, exact = true) {
- let expectedStr = exact ? 'exactly ' : 'at least ';
- assert.soon(
- function() {
- let count = 0;
- let logMessages = getGlobalLog(conn);
- if (logMessages === null) {
- return false;
+ /*
+ * Calls the 'getLog' function at regular intervals on the provided connection 'conn' until
+ * the provided 'msg' is found in the logs 'expectedCount' times, or it times out.
+ * Throws an exception on timeout. If 'exact' is true, checks whether the count is exactly
+ * equal to 'expectedCount'. Otherwise, checks whether the count is at least equal to
+ * 'expectedCount'. Early returns when at least 'expectedCount' entries are found.
+ */
+ let containsWithCount = function(
+ conn, msg, expectedCount, timeout = 5 * 60 * 1000, exact = true) {
+ let expectedStr = exact ? 'exactly ' : 'at least ';
+ assert.soon(
+ function() {
+ let count = 0;
+ let logMessages = getGlobalLog(conn);
+ if (logMessages === null) {
+ return false;
+ }
+ for (let i = 0; i < logMessages.length; i++) {
+ if (logMessages[i].indexOf(msg) != -1) {
+ count++;
}
- for (let i = 0; i < logMessages.length; i++) {
- if (logMessages[i].indexOf(msg) != -1) {
- count++;
- }
- if (!exact && count >= expectedCount) {
- print("checkLog found at least " + expectedCount +
- " log entries containing the following message: " + msg);
- return true;
- }
+ if (!exact && count >= expectedCount) {
+ print("checkLog found at least " + expectedCount +
+ " log entries containing the following message: " + msg);
+ return true;
}
+ }
- return exact ? expectedCount === count : expectedCount <= count;
- },
- 'Did not find ' + expectedStr + expectedCount + ' log entries containing the ' +
- 'following message: ' + msg,
- timeout,
- 300);
- };
+ return exact ? expectedCount === count : expectedCount <= count;
+ },
+ 'Did not find ' + expectedStr + expectedCount + ' log entries containing the ' +
+ 'following message: ' + msg,
+ timeout,
+ 300);
+ };
- /*
- * Similar to containsWithCount, but checks whether there are at least 'expectedCount'
- * instances of 'msg' in the logs.
- */
- let containsWithAtLeastCount = function(conn, msg, expectedCount, timeout = 5 * 60 * 1000) {
- containsWithCount(conn, msg, expectedCount, timeout, /*exact*/ false);
- };
+ /*
+ * Similar to containsWithCount, but checks whether there are at least 'expectedCount'
+ * instances of 'msg' in the logs.
+ */
+ let containsWithAtLeastCount = function(conn, msg, expectedCount, timeout = 5 * 60 * 1000) {
+ containsWithCount(conn, msg, expectedCount, timeout, /*exact*/ false);
+ };
- /*
- * Converts a scalar or object to a string format suitable for matching against log output.
- * Field names are not quoted, and by default strings which are not within an enclosing
- * object are not escaped. Similarly, integer values without an enclosing object are
- * serialized as integers, while those within an object are serialized as floats to one
- * decimal point. NumberLongs are unwrapped prior to serialization.
- */
- const formatAsLogLine = function(value, escapeStrings, toDecimal) {
- if (typeof value === "string") {
- return (escapeStrings ? `"${value}"` : value);
- } else if (typeof value === "number") {
- return (Number.isInteger(value) && toDecimal ? value.toFixed(1) : value);
- } else if (value instanceof NumberLong) {
- return `${value}`.match(/NumberLong..(.*)../m)[1];
- } else if (typeof value !== "object") {
- return value;
- } else if (Object.keys(value).length === 0) {
- return Array.isArray(value) ? "[]" : "{}";
- }
- let serialized = [];
- escapeStrings = toDecimal = true;
- for (let fieldName in value) {
- const valueStr = formatAsLogLine(value[fieldName], escapeStrings, toDecimal);
- serialized.push(Array.isArray(value) ? valueStr : `${fieldName}: ${valueStr}`);
- }
- return (Array.isArray(value) ? `[ ${serialized.join(', ')} ]`
- : `{ ${serialized.join(', ')} }`);
- };
+ /*
+ * Converts a scalar or object to a string format suitable for matching against log output.
+ * Field names are not quoted, and by default strings which are not within an enclosing
+ * object are not escaped. Similarly, integer values without an enclosing object are
+ * serialized as integers, while those within an object are serialized as floats to one
+ * decimal point. NumberLongs are unwrapped prior to serialization.
+ */
+ const formatAsLogLine = function(value, escapeStrings, toDecimal) {
+ if (typeof value === "string") {
+ return (escapeStrings ? `"${value}"` : value);
+ } else if (typeof value === "number") {
+ return (Number.isInteger(value) && toDecimal ? value.toFixed(1) : value);
+ } else if (value instanceof NumberLong) {
+ return `${value}`.match(/NumberLong..(.*)../m)[1];
+ } else if (typeof value !== "object") {
+ return value;
+ } else if (Object.keys(value).length === 0) {
+ return Array.isArray(value) ? "[]" : "{}";
+ }
+ let serialized = [];
+ escapeStrings = toDecimal = true;
+ for (let fieldName in value) {
+ const valueStr = formatAsLogLine(value[fieldName], escapeStrings, toDecimal);
+ serialized.push(Array.isArray(value) ? valueStr : `${fieldName}: ${valueStr}`);
+ }
+ return (Array.isArray(value) ? `[ ${serialized.join(', ')} ]`
+ : `{ ${serialized.join(', ')} }`);
+ };
- return {
- getGlobalLog: getGlobalLog,
- checkContainsOnce: checkContainsOnce,
- contains: contains,
- containsWithCount: containsWithCount,
- containsWithAtLeastCount: containsWithAtLeastCount,
- formatAsLogLine: formatAsLogLine
- };
- })();
+ return {
+ getGlobalLog: getGlobalLog,
+ checkContainsOnce: checkContainsOnce,
+ contains: contains,
+ containsWithCount: containsWithCount,
+ containsWithAtLeastCount: containsWithAtLeastCount,
+ formatAsLogLine: formatAsLogLine
+ };
+})();
})();
diff --git a/jstests/libs/csrs_upgrade_util.js b/jstests/libs/csrs_upgrade_util.js
index 8a4b3582f9f..9d4d158eca2 100644
--- a/jstests/libs/csrs_upgrade_util.js
+++ b/jstests/libs/csrs_upgrade_util.js
@@ -1,9 +1,9 @@
/**
-* This file defines a class, CSRSUpgradeCoordinator, which contains logic for spinning up a
-* sharded cluster using SCCC config servers and for upgrading that cluster to CSRS.
-* Include this file and use the CSRSUpgradeCoordinator class in any targetted jstests of csrs
-* upgrade behavior.
-*/
+ * This file defines a class, CSRSUpgradeCoordinator, which contains logic for spinning up a
+ * sharded cluster using SCCC config servers and for upgrading that cluster to CSRS.
+ * Include this file and use the CSRSUpgradeCoordinator class in any targetted jstests of csrs
+ * upgrade behavior.
+ */
load("jstests/replsets/rslib.js");
@@ -204,5 +204,4 @@ var CSRSUpgradeCoordinator = function() {
jsTest.log("Shutting down final SCCC config server now that upgrade is complete");
MongoRunner.stopMongod(st.c1);
};
-
};
diff --git a/jstests/libs/dateutil.js b/jstests/libs/dateutil.js
index 485e07020ee..99e535eaa25 100644
--- a/jstests/libs/dateutil.js
+++ b/jstests/libs/dateutil.js
@@ -4,7 +4,6 @@
* Helpers for generating test dates for aggregations
*/
var DateUtil = (function() {
-
/**
* local function to add leading 0 to month or day if needed.
*/
diff --git a/jstests/libs/feature_compatibility_version.js b/jstests/libs/feature_compatibility_version.js
index b9bb718fe73..869dce125b7 100644
--- a/jstests/libs/feature_compatibility_version.js
+++ b/jstests/libs/feature_compatibility_version.js
@@ -46,8 +46,7 @@ function removeFCVDocument(adminDB) {
let dropOriginalAdminSystemVersionCollection =
{op: "c", ns: "admin.$cmd", ui: originalUUID, o: {drop: "admin.tmp_system_version"}};
assert.commandWorked(adminDB.runCommand({
- applyOps:
- [createNewAdminSystemVersionCollection, dropOriginalAdminSystemVersionCollection]
+ applyOps: [createNewAdminSystemVersionCollection, dropOriginalAdminSystemVersionCollection]
}));
res = adminDB.runCommand({listCollections: 1, filter: {name: "system.version"}});
diff --git a/jstests/libs/fsm_serial_client.js b/jstests/libs/fsm_serial_client.js
index 8279524d8e9..8c6c6fcb690 100644
--- a/jstests/libs/fsm_serial_client.js
+++ b/jstests/libs/fsm_serial_client.js
@@ -21,7 +21,7 @@ runWorkloadsSerially(workloadList.filter(function(file) {
{},
{dbNamePrefix: dbNamePrefix},
{
- keepExistingDatabases: true,
- dropDatabaseBlacklist: fsmDbBlacklist,
- validateCollections: validateCollectionsOnCleanup
+ keepExistingDatabases: true,
+ dropDatabaseBlacklist: fsmDbBlacklist,
+ validateCollections: validateCollectionsOnCleanup
});
diff --git a/jstests/libs/geo_near_random.js b/jstests/libs/geo_near_random.js
index d5de7aa70a7..fd1fe36e799 100644
--- a/jstests/libs/geo_near_random.js
+++ b/jstests/libs/geo_near_random.js
@@ -24,7 +24,6 @@ GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds) {
(Random.rand() * (range - eps) + eps) + indexBounds.min
];
}
-
};
GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) {
@@ -74,7 +73,7 @@ GeoNearRandomTest.prototype.testPt = function(pt, opts) {
let last = runQuery(1);
for (var i = 2; i <= opts.nToTest; i++) {
let ret = runQuery(i);
- this.assertIsPrefix(last, ret, `Unexpected result when comparing ${i-1} and ${i}`);
+ this.assertIsPrefix(last, ret, `Unexpected result when comparing ${i - 1} and ${i}`);
// Make sure distances are in increasing order.
assert.gte(ret[ret.length - 1].dis, last[last.length - 1].dis);
diff --git a/jstests/libs/get_index_helpers.js b/jstests/libs/get_index_helpers.js
index 77468ab17cb..15a18fa7409 100644
--- a/jstests/libs/get_index_helpers.js
+++ b/jstests/libs/get_index_helpers.js
@@ -4,7 +4,6 @@
* Helpers for filtering the index specifications returned by DBCollection.prototype.getIndexes().
*/
var GetIndexHelpers = (function() {
-
/**
* Returns the index specification with the name 'indexName' if it is present in the
* 'indexSpecs' array, and returns null otherwise.
@@ -17,8 +16,8 @@ var GetIndexHelpers = (function() {
const found = indexSpecs.filter(spec => spec.name === indexName);
if (found.length > 1) {
- throw new Error("Found multiple indexes with name '" + indexName + "': " +
- tojson(indexSpecs));
+ throw new Error("Found multiple indexes with name '" + indexName +
+ "': " + tojson(indexSpecs));
}
return (found.length === 1) ? found[0] : null;
}
@@ -38,9 +37,9 @@ var GetIndexHelpers = (function() {
if (!collationWasSpecified) {
if (foundByKeyPattern.length > 1) {
- throw new Error("Found multiple indexes with key pattern " + tojson(keyPattern) +
- " and 'collation' parameter was not specified: " +
- tojson(indexSpecs));
+ throw new Error(
+ "Found multiple indexes with key pattern " + tojson(keyPattern) +
+ " and 'collation' parameter was not specified: " + tojson(indexSpecs));
}
return (foundByKeyPattern.length === 1) ? foundByKeyPattern[0] : null;
}
diff --git a/jstests/libs/json_schema_test_runner.js b/jstests/libs/json_schema_test_runner.js
index 8955f188c2a..b82cc9365ea 100644
--- a/jstests/libs/json_schema_test_runner.js
+++ b/jstests/libs/json_schema_test_runner.js
@@ -2,49 +2,47 @@
* Test runner responsible for parsing and executing a JSON-Schema-Test-Suite json file.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/assert_schema_match.js");
+load("jstests/libs/assert_schema_match.js");
- const coll = db.json_schema_test_corpus;
- coll.drop();
+const coll = db.json_schema_test_corpus;
+coll.drop();
- const jsonFilename = jsTestOptions().jsonSchemaTestFile;
+const jsonFilename = jsTestOptions().jsonSchemaTestFile;
- if (jsonFilename === undefined) {
- throw new Error('JSON Schema tests must be run through resmoke.py');
- }
+if (jsonFilename === undefined) {
+ throw new Error('JSON Schema tests must be run through resmoke.py');
+}
+
+function runSchemaTest(test, schema, banFromTopLevel) {
+ assert(test.hasOwnProperty("data"), "JSON Schema test requires 'data'");
+ assert(test.hasOwnProperty("valid"), "JSON Schema test requires 'valid'");
+ const data = test["data"];
+ const valid = test["valid"];
- function runSchemaTest(test, schema, banFromTopLevel) {
- assert(test.hasOwnProperty("data"), "JSON Schema test requires 'data'");
- assert(test.hasOwnProperty("valid"), "JSON Schema test requires 'valid'");
- const data = test["data"];
- const valid = test["valid"];
-
- try {
- assertSchemaMatch(coll,
- {properties: {schema_test_wrapper: schema}},
- {schema_test_wrapper: data},
- valid);
-
- // Run against a top-level schema if the data is an object, since MongoDB only stores
- // records as documents.
- // (Note: JS notion of an 'object' includes arrays and null.)
- if (typeof data === "object" && !Array.isArray(data) && data !== null &&
- banFromTopLevel !== true) {
- assertSchemaMatch(coll, schema, data, valid);
- }
- } catch (e) {
- throw new Error(tojson(e) + "\n\nJSON Schema test failed for schema " + tojson(schema) +
- " and data " + tojson(data));
+ try {
+ assertSchemaMatch(
+ coll, {properties: {schema_test_wrapper: schema}}, {schema_test_wrapper: data}, valid);
+
+ // Run against a top-level schema if the data is an object, since MongoDB only stores
+ // records as documents.
+ // (Note: JS notion of an 'object' includes arrays and null.)
+ if (typeof data === "object" && !Array.isArray(data) && data !== null &&
+ banFromTopLevel !== true) {
+ assertSchemaMatch(coll, schema, data, valid);
}
+ } catch (e) {
+ throw new Error(tojson(e) + "\n\nJSON Schema test failed for schema " + tojson(schema) +
+ " and data " + tojson(data));
}
-
- const testGroupList = JSON.parse(cat(jsonFilename));
- testGroupList.forEach(function(testGroup) {
- assert(testGroup.hasOwnProperty("schema"), "JSON Schema test requires a 'schema'");
- assert(testGroup.hasOwnProperty("tests"), "JSON Schema test requires a 'tests' list");
- testGroup["tests"].forEach(
- test => runSchemaTest(test, testGroup["schema"], testGroup["banFromTopLevel"]));
- });
+}
+
+const testGroupList = JSON.parse(cat(jsonFilename));
+testGroupList.forEach(function(testGroup) {
+ assert(testGroup.hasOwnProperty("schema"), "JSON Schema test requires a 'schema'");
+ assert(testGroup.hasOwnProperty("tests"), "JSON Schema test requires a 'tests' list");
+ testGroup["tests"].forEach(
+ test => runSchemaTest(test, testGroup["schema"], testGroup["banFromTopLevel"]));
+});
}());
diff --git a/jstests/libs/jstestfuzz/check_for_interrupt_hook.js b/jstests/libs/jstestfuzz/check_for_interrupt_hook.js
index 8612824a637..0a2a3add0d0 100644
--- a/jstests/libs/jstestfuzz/check_for_interrupt_hook.js
+++ b/jstests/libs/jstestfuzz/check_for_interrupt_hook.js
@@ -2,46 +2,46 @@
// the failpoint for the duration of the serverInfo section of the fuzzer's preamble.
(function() {
- 'use strict';
+'use strict';
- load('jstests/libs/jstestfuzz/hook_utils.js');
+load('jstests/libs/jstestfuzz/hook_utils.js');
- let threadName;
+let threadName;
- const disableCheckForInterruptFailFP = function() {
- // There is no synchronization between fuzzer clients so this hook cannot run with the
- // concurrent fuzzer.
- assert.eq(TestData.numTestClients,
- 1,
- 'Cannot run the check for interrupt hook when there is more than 1 client');
+const disableCheckForInterruptFailFP = function() {
+ // There is no synchronization between fuzzer clients so this hook cannot run with the
+ // concurrent fuzzer.
+ assert.eq(TestData.numTestClients,
+ 1,
+ 'Cannot run the check for interrupt hook when there is more than 1 client');
- const myUriRes = assert.commandWorked(db.runCommand({whatsmyuri: 1}));
- const myUri = myUriRes.you;
+ const myUriRes = assert.commandWorked(db.runCommand({whatsmyuri: 1}));
+ const myUri = myUriRes.you;
- const curOpRes = assert.commandWorked(db.adminCommand({currentOp: 1, client: myUri}));
- threadName = curOpRes.inprog[0].desc;
+ const curOpRes = assert.commandWorked(db.adminCommand({currentOp: 1, client: myUri}));
+ threadName = curOpRes.inprog[0].desc;
- assert.commandWorked(db.adminCommand({
- configureFailPoint: 'checkForInterruptFail',
- mode: 'off',
- }));
- };
+ assert.commandWorked(db.adminCommand({
+ configureFailPoint: 'checkForInterruptFail',
+ mode: 'off',
+ }));
+};
- const enableCheckForInterruptFailFP = function() {
- const chance = TestData.checkForInterruptFailpointChance;
+const enableCheckForInterruptFailFP = function() {
+ const chance = TestData.checkForInterruptFailpointChance;
- assert.gte(chance, 0, "checkForInterruptFailpointChance must be >= 0");
- assert.lte(chance, 1, "checkForInterruptFailpointChance must be <= 1");
+ assert.gte(chance, 0, "checkForInterruptFailpointChance must be >= 0");
+ assert.lte(chance, 1, "checkForInterruptFailpointChance must be <= 1");
- assert.commandWorked(db.adminCommand({
- configureFailPoint: 'checkForInterruptFail',
- mode: 'alwaysOn',
- data: {threadName, chance},
- }));
- };
+ assert.commandWorked(db.adminCommand({
+ configureFailPoint: 'checkForInterruptFail',
+ mode: 'alwaysOn',
+ data: {threadName, chance},
+ }));
+};
- defineFuzzerHooks({
- beforeServerInfo: disableCheckForInterruptFailFP,
- afterServerInfo: enableCheckForInterruptFailFP,
- });
+defineFuzzerHooks({
+ beforeServerInfo: disableCheckForInterruptFailFP,
+ afterServerInfo: enableCheckForInterruptFailFP,
+});
})();
diff --git a/jstests/libs/kill_sessions.js b/jstests/libs/kill_sessions.js
index 6ee642eddbc..ae9978c271e 100644
--- a/jstests/libs/kill_sessions.js
+++ b/jstests/libs/kill_sessions.js
@@ -403,41 +403,40 @@ var _kill_sessions_api_module = (function() {
});
[[
- // Verifies that we can killSessions by lsid
- "killSessions",
- function(x) {
- if (!x.uid) {
- return {
- id: x.id,
- uid: computeSHA256Block(""),
- };
- } else {
- return x;
- }
- }
+ // Verifies that we can killSessions by lsid
+ "killSessions",
+ function(x) {
+ if (!x.uid) {
+ return {
+ id: x.id,
+ uid: computeSHA256Block(""),
+ };
+ } else {
+ return x;
+ }
+ }
],
[
- // Verifies that we can kill by pattern by lsid
- "killAllSessionsByPattern",
- function(x) {
- if (!x.uid) {
- return {
- lsid: {
- id: x.id,
- uid: computeSHA256Block(""),
- }
- };
- } else {
- return {lsid: x};
- }
- }
+ // Verifies that we can kill by pattern by lsid
+ "killAllSessionsByPattern",
+ function(x) {
+ if (!x.uid) {
+ return {
+ lsid: {
+ id: x.id,
+ uid: computeSHA256Block(""),
+ }
+ };
+ } else {
+ return {lsid: x};
+ }
+ }
]].forEach(function(cmd) {
noAuth = noAuth.concat(makeNoAuthArgKill.apply({}, cmd));
});
KillSessionsTestHelper.runNoAuth = function(
clientToExecuteVia, clientToKillVia, clientsToVerifyVia) {
-
var fixture = new Fixture(clientToExecuteVia, clientToKillVia, clientsToVerifyVia);
for (var i = 0; i < noAuth.length; ++i) {
@@ -564,102 +563,102 @@ var _kill_sessions_api_module = (function() {
// Tests for makeAuthNoArgKill
[[
- // We can kill our own sessions
- "killSessions",
- "simple",
- "simple",
+ // We can kill our own sessions
+ "killSessions",
+ "simple",
+ "simple",
],
[
- // We can kill all sessions
- "killAllSessions",
- "simple",
- "killAny",
+ // We can kill all sessions
+ "killAllSessions",
+ "simple",
+ "killAny",
],
[
- // We can kill all sessions by pattern
- "killAllSessionsByPattern",
- "simple",
- "killAny",
+ // We can kill all sessions by pattern
+ "killAllSessionsByPattern",
+ "simple",
+ "killAny",
]].forEach(function(cmd) {
auth = auth.concat(makeAuthNoArgKill.apply({}, cmd));
});
// Tests for makeAuthArgKill
[[
- // We can kill our own sessions by id (spoofing our own id)
- "killSessions",
- "simple",
- "simple",
- "killAny",
- function() {
- return function(x) {
- if (!x.uid) {
- return {
- id: x.id,
- uid: computeSHA256Block("simple@admin"),
- };
- } else {
- return x;
- }
- };
- }
+ // We can kill our own sessions by id (spoofing our own id)
+ "killSessions",
+ "simple",
+ "simple",
+ "killAny",
+ function() {
+ return function(x) {
+ if (!x.uid) {
+ return {
+ id: x.id,
+ uid: computeSHA256Block("simple@admin"),
+ };
+ } else {
+ return x;
+ }
+ };
+ }
],
[
- // We can kill our own sessions without spoofing
- "killSessions",
- "simple",
- "simple",
- "simple",
- function() {
- return function(x) {
- return x;
- };
- }
+ // We can kill our own sessions without spoofing
+ "killSessions",
+ "simple",
+ "simple",
+ "simple",
+ function() {
+ return function(x) {
+ return x;
+ };
+ }
],
[
- // We can kill by pattern by id
- "killAllSessionsByPattern",
- "simple",
- "simple",
- "killAny",
- function() {
- return function(x) {
- if (!x.uid) {
- return {
- lsid: {
- id: x.id,
- uid: computeSHA256Block("simple@admin"),
- }
- };
- } else {
- return {lsid: x};
- }
- };
- }
+ // We can kill by pattern by id
+ "killAllSessionsByPattern",
+ "simple",
+ "simple",
+ "killAny",
+ function() {
+ return function(x) {
+ if (!x.uid) {
+ return {
+ lsid: {
+ id: x.id,
+ uid: computeSHA256Block("simple@admin"),
+ }
+ };
+ } else {
+ return {lsid: x};
+ }
+ };
+ }
],
[
- // We can kill any by user
- "killAllSessions",
- "simple",
- "simple2",
- "killAny",
- function(user) {
- return function(x) {
- return {db: "admin", user: user};
- };
- }
+ // We can kill any by user
+ "killAllSessions",
+ "simple",
+ "simple2",
+ "killAny",
+ function(user) {
+ return function(x) {
+ return {db: "admin", user: user};
+ };
+ }
],
[
- // We can kill any by pattern by user
- "killAllSessionsByPattern",
- "simple",
- "simple2",
- "killAny",
- function(user) {
- return function(x) {
- return {uid: computeSHA256Block(user + "@admin")};
- };
- }
+ // We can kill any by pattern by user
+ "killAllSessionsByPattern",
+ "simple",
+ "simple2",
+ "killAny",
+ function(user) {
+ return function(x) {
+ return {uid: computeSHA256Block(user + "@admin")};
+ };
+ }
]].forEach(function(cmd) {
auth = auth.concat(makeAuthArgKill.apply({}, cmd));
});
@@ -683,32 +682,32 @@ var _kill_sessions_api_module = (function() {
// Tests for makeAuthArgKillFailure
[[
- // We can't kill another users sessions
- "killSessions",
- "simple",
- "simple2",
- function(user) {
- return function(x) {
- return {
- id: x.id,
- uid: computeSHA256Block(user + "@admin"),
- };
- };
- },
+ // We can't kill another users sessions
+ "killSessions",
+ "simple",
+ "simple2",
+ function(user) {
+ return function(x) {
+ return {
+ id: x.id,
+ uid: computeSHA256Block(user + "@admin"),
+ };
+ };
+ },
],
[
- // We can't impersonate without impersonate
- "killAllSessionsByPattern",
- "simple",
- "killAny",
- function(user) {
- return function(x) {
- return {
- users: {},
- roles: {},
- };
- };
- },
+ // We can't impersonate without impersonate
+ "killAllSessionsByPattern",
+ "simple",
+ "killAny",
+ function(user) {
+ return function(x) {
+ return {
+ users: {},
+ roles: {},
+ };
+ };
+ },
]].forEach(function(cmd) {
auth = auth.concat(makeAuthArgKillFailure.apply({}, cmd));
});
diff --git a/jstests/libs/mongoebench.js b/jstests/libs/mongoebench.js
index f6feb4eb9f0..a0d6f1b512d 100644
--- a/jstests/libs/mongoebench.js
+++ b/jstests/libs/mongoebench.js
@@ -1,7 +1,6 @@
"use strict";
var {runMongoeBench} = (function() {
-
/**
* Spawns a mongoebench process with the specified options.
*
diff --git a/jstests/libs/mql_model_mongod_test_runner.js b/jstests/libs/mql_model_mongod_test_runner.js
index f19e2ce1f12..4485c81cdc1 100644
--- a/jstests/libs/mql_model_mongod_test_runner.js
+++ b/jstests/libs/mql_model_mongod_test_runner.js
@@ -2,56 +2,56 @@
* Test runner responsible for parsing and executing a MQL MongoD model test json file.
*/
(function() {
- "use strict";
+"use strict";
- const jsonFilename = jsTestOptions().mqlTestFile;
- const mqlRootPath = jsTestOptions().mqlRootPath;
+const jsonFilename = jsTestOptions().mqlTestFile;
+const mqlRootPath = jsTestOptions().mqlRootPath;
- if (jsonFilename === undefined) {
- throw new Error('Undefined JSON file name: MQL Model tests must be run through resmoke.py');
- }
+if (jsonFilename === undefined) {
+ throw new Error('Undefined JSON file name: MQL Model tests must be run through resmoke.py');
+}
- // Populate collections with data fetched from the dataFile.
- function populateCollections(dataFile) {
- const data = JSON.parse(cat(mqlRootPath + dataFile));
+// Populate collections with data fetched from the dataFile.
+function populateCollections(dataFile) {
+ const data = JSON.parse(cat(mqlRootPath + dataFile));
- data.forEach(function(singleColl) {
- assert(singleColl.hasOwnProperty("namespace"), "MQL data model requires a 'namespace'");
- assert(singleColl.hasOwnProperty("data"), "MQL data model requires a 'data'");
+ data.forEach(function(singleColl) {
+ assert(singleColl.hasOwnProperty("namespace"), "MQL data model requires a 'namespace'");
+ assert(singleColl.hasOwnProperty("data"), "MQL data model requires a 'data'");
- const coll = db.getCollection(singleColl["namespace"]);
- coll.drop();
+ const coll = db.getCollection(singleColl["namespace"]);
+ coll.drop();
- singleColl["data"].forEach(function(doc) {
- assert.commandWorked(coll.insert(doc));
- });
+ singleColl["data"].forEach(function(doc) {
+ assert.commandWorked(coll.insert(doc));
});
- }
+ });
+}
- // Run a single find test.
- function runFindTest(testFile, dataFile, expected) {
- populateCollections(dataFile);
+// Run a single find test.
+function runFindTest(testFile, dataFile, expected) {
+ populateCollections(dataFile);
- const test = JSON.parse(cat(mqlRootPath + testFile));
+ const test = JSON.parse(cat(mqlRootPath + testFile));
- const results = db.getCollection(test["find"]).find(test["filter"], {_id: 0}).toArray();
+ const results = db.getCollection(test["find"]).find(test["filter"], {_id: 0}).toArray();
- assert.eq(results, expected);
- }
+ assert.eq(results, expected);
+}
- // Read a list of tests from the jsonFilename and execute them.
- const testList = JSON.parse(cat(jsonFilename));
- testList.forEach(function(singleTest) {
- if (singleTest.hasOwnProperty("match")) {
- // Skip the match test type as it is not directly supported by mongod.
- } else if (singleTest.hasOwnProperty("find")) {
- // Run the find test type.
- assert(singleTest.hasOwnProperty("data"), "MQL model test requires a 'data'");
- assert(singleTest.hasOwnProperty("expected"), "MQL model test requires a 'expected'");
-
- runFindTest(singleTest["find"], singleTest["data"], singleTest["expected"]);
- } else {
- throw new Error("Unknown test type: " + tojson(singleTest));
- }
- });
+// Read a list of tests from the jsonFilename and execute them.
+const testList = JSON.parse(cat(jsonFilename));
+testList.forEach(function(singleTest) {
+ if (singleTest.hasOwnProperty("match")) {
+ // Skip the match test type as it is not directly supported by mongod.
+ } else if (singleTest.hasOwnProperty("find")) {
+ // Run the find test type.
+ assert(singleTest.hasOwnProperty("data"), "MQL model test requires a 'data'");
+ assert(singleTest.hasOwnProperty("expected"), "MQL model test requires a 'expected'");
+
+ runFindTest(singleTest["find"], singleTest["data"], singleTest["expected"]);
+ } else {
+ throw new Error("Unknown test type: " + tojson(singleTest));
+ }
+});
}());
diff --git a/jstests/libs/override_methods/causally_consistent_index_builds.js b/jstests/libs/override_methods/causally_consistent_index_builds.js
index ec20a87f588..cacd1312f80 100644
--- a/jstests/libs/override_methods/causally_consistent_index_builds.js
+++ b/jstests/libs/override_methods/causally_consistent_index_builds.js
@@ -3,48 +3,48 @@
* TODO: SERVER-38961 This override is not necessary when two-phase index builds are complete.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/override_methods/override_helpers.js");
+load("jstests/libs/override_methods/override_helpers.js");
- // This override runs a collMod after a createIndexes command. After collMod completes
- // we can guarantee the background index build started earlier has also completed. We update the
- // command response operationTime and $clusterTime so causally consistent reads only read from
- // that point onwards.
- function runCommandWithCollMod(conn, dbName, commandName, commandObj, func, makeFuncArgs) {
- if (typeof commandObj !== "object" || commandObj === null) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
+// This override runs a collMod after a createIndexes command. After collMod completes
+// we can guarantee the background index build started earlier has also completed. We update the
+// command response operationTime and $clusterTime so causally consistent reads only read from
+// that point onwards.
+function runCommandWithCollMod(conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
+ return func.apply(conn, makeFuncArgs(commandObj));
+ }
- let res = func.apply(conn, makeFuncArgs(commandObj));
- if (commandName !== "createIndexes") {
- return res;
- }
- if (!res.ok) {
- return res;
- }
+ let res = func.apply(conn, makeFuncArgs(commandObj));
+ if (commandName !== "createIndexes") {
+ return res;
+ }
+ if (!res.ok) {
+ return res;
+ }
- let collModCmd = {collMod: commandObj[commandName]};
- let collModRes = func.apply(conn, makeFuncArgs(collModCmd));
+ let collModCmd = {collMod: commandObj[commandName]};
+ let collModRes = func.apply(conn, makeFuncArgs(collModCmd));
- // If a follow-up collMod fails, another command was likely able to execute after the
- // createIndexes command. That means it is safe to use the latest operationTime for
- // causal consistency purposes.
- if (!collModRes.ok) {
- print('note: ignoring collMod failure after sending createIndex command: ' +
- tojson(collModRes));
- }
+ // If a follow-up collMod fails, another command was likely able to execute after the
+ // createIndexes command. That means it is safe to use the latest operationTime for
+ // causal consistency purposes.
+ if (!collModRes.ok) {
+ print('note: ignoring collMod failure after sending createIndex command: ' +
+ tojson(collModRes));
+ }
- // Overwrite the createIndex command's operation and cluster times, so that the owning
- // session can perform causal reads.
- if (collModRes.hasOwnProperty("operationTime")) {
- res.operationTime = collModRes["operationTime"];
- }
- if (collModRes.hasOwnProperty("$clusterTime")) {
- res.$clusterTime = collModRes["$clusterTime"];
- }
- return res;
+ // Overwrite the createIndex command's operation and cluster times, so that the owning
+ // session can perform causal reads.
+ if (collModRes.hasOwnProperty("operationTime")) {
+ res.operationTime = collModRes["operationTime"];
+ }
+ if (collModRes.hasOwnProperty("$clusterTime")) {
+ res.$clusterTime = collModRes["$clusterTime"];
}
+ return res;
+}
- OverrideHelpers.overrideRunCommand(runCommandWithCollMod);
+OverrideHelpers.overrideRunCommand(runCommandWithCollMod);
})();
diff --git a/jstests/libs/override_methods/check_for_operation_not_supported_in_transaction.js b/jstests/libs/override_methods/check_for_operation_not_supported_in_transaction.js
index 355e3f53a55..57ab445896d 100644
--- a/jstests/libs/override_methods/check_for_operation_not_supported_in_transaction.js
+++ b/jstests/libs/override_methods/check_for_operation_not_supported_in_transaction.js
@@ -6,38 +6,36 @@
* InvalidOptions or TransientTransactionError.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/error_code_utils.js");
- load("jstests/libs/override_methods/override_helpers.js");
+load("jstests/libs/error_code_utils.js");
+load("jstests/libs/override_methods/override_helpers.js");
- function runCommandCheckForOperationNotSupportedInTransaction(
- conn, dbName, commandName, commandObj, func, makeFuncArgs) {
- let res = func.apply(conn, makeFuncArgs(commandObj));
- const isTransient =
- (res.errorLabels && res.errorLabels.includes('TransientTransactionError') &&
- !includesErrorCode(res, ErrorCodes.NoSuchTransaction));
+function runCommandCheckForOperationNotSupportedInTransaction(
+ conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ let res = func.apply(conn, makeFuncArgs(commandObj));
+ const isTransient = (res.errorLabels && res.errorLabels.includes('TransientTransactionError') &&
+ !includesErrorCode(res, ErrorCodes.NoSuchTransaction));
- const isNotSupported =
- (includesErrorCode(res, ErrorCodes.OperationNotSupportedInTransaction) ||
- includesErrorCode(res, ErrorCodes.InvalidOptions));
+ const isNotSupported = (includesErrorCode(res, ErrorCodes.OperationNotSupportedInTransaction) ||
+ includesErrorCode(res, ErrorCodes.InvalidOptions));
- if (isTransient || isNotSupported) {
- // Generate an exception, store some info for fsm.js to inspect, and rethrow.
- try {
- assert.commandWorked(res);
- } catch (ex) {
- ex.isTransient = isTransient;
- ex.isNotSupported = isNotSupported;
- throw ex;
- }
+ if (isTransient || isNotSupported) {
+ // Generate an exception, store some info for fsm.js to inspect, and rethrow.
+ try {
+ assert.commandWorked(res);
+ } catch (ex) {
+ ex.isTransient = isTransient;
+ ex.isNotSupported = isNotSupported;
+ throw ex;
}
-
- return res;
}
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/check_for_operation_not_supported_in_transaction.js");
+ return res;
+}
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/check_for_operation_not_supported_in_transaction.js");
- OverrideHelpers.overrideRunCommand(runCommandCheckForOperationNotSupportedInTransaction);
+OverrideHelpers.overrideRunCommand(runCommandCheckForOperationNotSupportedInTransaction);
})();
diff --git a/jstests/libs/override_methods/check_uuids_consistent_across_cluster.js b/jstests/libs/override_methods/check_uuids_consistent_across_cluster.js
index ec9894303b4..4e3b6ae8599 100644
--- a/jstests/libs/override_methods/check_uuids_consistent_across_cluster.js
+++ b/jstests/libs/override_methods/check_uuids_consistent_across_cluster.js
@@ -123,7 +123,7 @@ ShardingTest.prototype.checkUUIDsConsistentAcrossCluster = function() {
for (let authoritativeCollMetadata of authoritativeCollMetadataArr) {
const ns = authoritativeCollMetadata._id;
- const[dbName, collName] = parseNs(ns);
+ const [dbName, collName] = parseNs(ns);
for (let shardConnString of authoritativeCollMetadata.shardConnStrings) {
// A connection the shard may not be cached in ShardingTest if the shard was added
diff --git a/jstests/libs/override_methods/continuous_stepdown.js b/jstests/libs/override_methods/continuous_stepdown.js
index cbd5687dbda..9c7881f9d4a 100644
--- a/jstests/libs/override_methods/continuous_stepdown.js
+++ b/jstests/libs/override_methods/continuous_stepdown.js
@@ -28,392 +28,387 @@
let ContinuousStepdown;
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/parallelTester.js"); // ScopedThread and CountDownLatch
- load("jstests/replsets/rslib.js"); // reconfig
+load("jstests/libs/parallelTester.js"); // ScopedThread and CountDownLatch
+load("jstests/replsets/rslib.js"); // reconfig
+
+/**
+ * Helper class to manage the ScopedThread instance that will continuously step down the primary
+ * node.
+ */
+const StepdownThread = function() {
+ let _counter = null;
+ let _thread = null;
/**
- * Helper class to manage the ScopedThread instance that will continuously step down the primary
- * node.
+ * This function is intended to be called in a separate thread and it continuously
+ * steps down the current primary for a number of attempts.
+ *
+ * @param {CountDownLatch} stopCounter Object, which can be used to stop the thread.
+ *
+ * @param {string} seedNode The connection string of a node from which to discover
+ * the primary of the replica set.
+ *
+ * @param {Object} options Configuration object with the following fields:
+ * stepdownDurationSecs {integer}: The number of seconds after stepping down the
+ * primary for which the node is not re-electable.
+ * stepdownIntervalMS {integer}: The number of milliseconds to wait after
+ * issuing a step down command.
+ *
+ * @return Object with the following fields:
+ * ok {integer}: 0 if it failed, 1 if it succeeded.
+ * error {string}: Only present if ok == 0. Contains the cause for the error.
+ * stack {string}: Only present if ok == 0. Contains the stack at the time of
+ * the error.
*/
- const StepdownThread = function() {
- let _counter = null;
- let _thread = null;
-
- /**
- * This function is intended to be called in a separate thread and it continuously
- * steps down the current primary for a number of attempts.
- *
- * @param {CountDownLatch} stopCounter Object, which can be used to stop the thread.
- *
- * @param {string} seedNode The connection string of a node from which to discover
- * the primary of the replica set.
- *
- * @param {Object} options Configuration object with the following fields:
- * stepdownDurationSecs {integer}: The number of seconds after stepping down the
- * primary for which the node is not re-electable.
- * stepdownIntervalMS {integer}: The number of milliseconds to wait after
- * issuing a step down command.
- *
- * @return Object with the following fields:
- * ok {integer}: 0 if it failed, 1 if it succeeded.
- * error {string}: Only present if ok == 0. Contains the cause for the error.
- * stack {string}: Only present if ok == 0. Contains the stack at the time of
- * the error.
- */
- function _continuousPrimaryStepdownFn(stopCounter, seedNode, options) {
- "use strict";
+ function _continuousPrimaryStepdownFn(stopCounter, seedNode, options) {
+ "use strict";
- print("*** Continuous stepdown thread running with seed node " + seedNode);
+ print("*** Continuous stepdown thread running with seed node " + seedNode);
- try {
- // The config primary may unexpectedly step down during startup if under heavy
- // load and too slowly processing heartbeats.
- const replSet = new ReplSetTest(seedNode);
+ try {
+ // The config primary may unexpectedly step down during startup if under heavy
+ // load and too slowly processing heartbeats.
+ const replSet = new ReplSetTest(seedNode);
- let primary = replSet.getPrimary();
+ let primary = replSet.getPrimary();
- while (stopCounter.getCount() > 0) {
- print("*** Stepping down " + primary);
+ while (stopCounter.getCount() > 0) {
+ print("*** Stepping down " + primary);
- // The command may fail if the node is no longer primary or is in the process of
- // stepping down.
- assert.commandWorkedOrFailedWithCode(
- primary.adminCommand(
- {replSetStepDown: options.stepdownDurationSecs, force: true}),
- [ErrorCodes.NotMaster, ErrorCodes.ConflictingOperationInProgress]);
+ // The command may fail if the node is no longer primary or is in the process of
+ // stepping down.
+ assert.commandWorkedOrFailedWithCode(
+ primary.adminCommand(
+ {replSetStepDown: options.stepdownDurationSecs, force: true}),
+ [ErrorCodes.NotMaster, ErrorCodes.ConflictingOperationInProgress]);
- // Wait for primary to get elected and allow the test to make some progress
- // before attempting another stepdown.
- if (stopCounter.getCount() > 0) {
- primary = replSet.getPrimary();
- }
-
- if (stopCounter.getCount() > 0) {
- sleep(options.stepdownIntervalMS);
- }
+ // Wait for primary to get elected and allow the test to make some progress
+ // before attempting another stepdown.
+ if (stopCounter.getCount() > 0) {
+ primary = replSet.getPrimary();
}
- print("*** Continuous stepdown thread completed successfully");
- return {ok: 1};
- } catch (e) {
- print("*** Continuous stepdown thread caught exception: " + tojson(e));
- return {ok: 0, error: e.toString(), stack: e.stack};
+ if (stopCounter.getCount() > 0) {
+ sleep(options.stepdownIntervalMS);
+ }
}
+
+ print("*** Continuous stepdown thread completed successfully");
+ return {ok: 1};
+ } catch (e) {
+ print("*** Continuous stepdown thread caught exception: " + tojson(e));
+ return {ok: 0, error: e.toString(), stack: e.stack};
+ }
+ }
+
+ /**
+ * Returns true if the stepdown thread has been created and started.
+ */
+ this.hasStarted = function() {
+ return !!_thread;
+ };
+
+ /**
+ * Spawns a ScopedThread using the given seedNode to discover the replica set.
+ */
+ this.start = function(seedNode, options) {
+ if (_thread) {
+ throw new Error("Continuous stepdown thread is already active");
+ }
+
+ _counter = new CountDownLatch(1);
+ _thread = new ScopedThread(_continuousPrimaryStepdownFn, _counter, seedNode, options);
+ _thread.start();
+ };
+
+ /**
+ * Sets the stepdown thread's counter to 0, and waits for it to finish. Throws if the
+ * stepdown thread did not exit successfully.
+ */
+ this.stop = function() {
+ if (!_thread) {
+ throw new Error("Continuous stepdown thread is not active");
}
+ _counter.countDown();
+ _counter = null;
+
+ _thread.join();
+
+ const retVal = _thread.returnData();
+ _thread = null;
+
+ assert.commandWorked(retVal);
+ };
+};
+
+ContinuousStepdown = {};
+
+/**
+ * Defines two methods on ReplSetTest, startContinuousFailover and stopContinuousFailover, that
+ * allow starting and stopping a separate thread that will periodically step down the replica
+ * set's primary node. Also defines these methods on ShardingTest, which allow starting and
+ * stopping a stepdown thread for the test's config server replica set and each of the shard
+ * replica sets, as specified by the given stepdownOptions object.
+ */
+ContinuousStepdown.configure = function(stepdownOptions,
+ {verbositySetting: verbositySetting = {}} = {}) {
+ const defaultOptions = {
+ configStepdown: true,
+ electionTimeoutMS: 5 * 1000,
+ shardStepdown: true,
+ stepdownDurationSecs: 10,
+ stepdownIntervalMS: 8 * 1000,
+ catchUpTimeoutMS: 0,
+ };
+ stepdownOptions = Object.merge(defaultOptions, stepdownOptions);
+
+ verbositySetting = tojson(verbositySetting);
+
+ // Preserve the original ReplSetTest and ShardingTest constructors, because they are being
+ // overriden.
+ const originalReplSetTest = ReplSetTest;
+ const originalShardingTest = ShardingTest;
+
+ /**
+ * Overrides the ReplSetTest constructor to start the continuous primary stepdown thread.
+ */
+ ReplSetTest = function ReplSetTestWithContinuousPrimaryStepdown() {
+ // Construct the original object
+ originalReplSetTest.apply(this, arguments);
+
+ // Preserve the original versions of functions that are overrided below.
+ const _originalStartSetFn = this.startSet;
+ const _originalStopSetFn = this.stopSet;
+ const _originalAwaitLastOpCommitted = this.awaitLastOpCommitted;
+
/**
- * Returns true if the stepdown thread has been created and started.
+ * Overrides startSet call to increase logging verbosity.
*/
- this.hasStarted = function() {
- return !!_thread;
+ this.startSet = function() {
+ let options = arguments[0] || {};
+
+ if (typeof (options.setParameter) === "string") {
+ var eqIdx = options.setParameter.indexOf("=");
+ if (eqIdx != -1) {
+ var param = options.setParameter.substring(0, eqIdx);
+ var value = options.setParameter.substring(eqIdx + 1);
+ options.setParameter = {};
+ options.setParameter[param] = value;
+ }
+ }
+ arguments[0] = options;
+
+ options.setParameter = options.setParameter || {};
+ options.setParameter.logComponentVerbosity = verbositySetting;
+ return _originalStartSetFn.apply(this, arguments);
};
/**
- * Spawns a ScopedThread using the given seedNode to discover the replica set.
+ * Overrides stopSet to terminate the failover thread.
*/
- this.start = function(seedNode, options) {
- if (_thread) {
- throw new Error("Continuous stepdown thread is already active");
- }
+ this.stopSet = function() {
+ this.stopContinuousFailover({waitForPrimary: false});
+ _originalStopSetFn.apply(this, arguments);
+ };
- _counter = new CountDownLatch(1);
- _thread = new ScopedThread(_continuousPrimaryStepdownFn, _counter, seedNode, options);
- _thread.start();
+ /**
+ * Overrides awaitLastOpCommitted to retry on network errors.
+ */
+ this.awaitLastOpCommitted = function() {
+ return retryOnNetworkError(_originalAwaitLastOpCommitted.bind(this));
};
+ // Handle for the continuous stepdown thread.
+ const _stepdownThread = new StepdownThread();
+
/**
- * Sets the stepdown thread's counter to 0, and waits for it to finish. Throws if the
- * stepdown thread did not exit successfully.
+ * Reconfigures the replica set, then starts the stepdown thread. As part of the new
+ * config, this sets:
+ * - electionTimeoutMillis to stepdownOptions.electionTimeoutMS so a new primary can
+ * get elected before the stepdownOptions.stepdownIntervalMS period would cause one
+ * to step down again.
+ * - catchUpTimeoutMillis to stepdownOptions.catchUpTimeoutMS. Lower values increase
+ * the likelihood and volume of rollbacks.
*/
- this.stop = function() {
- if (!_thread) {
- throw new Error("Continuous stepdown thread is not active");
+ this.startContinuousFailover = function() {
+ if (_stepdownThread.hasStarted()) {
+ throw new Error("Continuous failover thread is already active");
}
- _counter.countDown();
- _counter = null;
+ const rsconfig = this.getReplSetConfigFromNode();
- _thread.join();
+ const shouldUpdateElectionTimeout =
+ (rsconfig.settings.electionTimeoutMillis !== stepdownOptions.electionTimeoutMS);
+ const shouldUpdateCatchUpTimeout =
+ (rsconfig.settings.catchUpTimeoutMillis !== stepdownOptions.catchUpTimeoutMS);
- const retVal = _thread.returnData();
- _thread = null;
+ if (shouldUpdateElectionTimeout || shouldUpdateCatchUpTimeout) {
+ rsconfig.settings.electionTimeoutMillis = stepdownOptions.electionTimeoutMS;
+ rsconfig.settings.catchUpTimeoutMillis = stepdownOptions.catchUpTimeoutMS;
- assert.commandWorked(retVal);
- };
- };
+ rsconfig.version += 1;
+ reconfig(this, rsconfig);
- ContinuousStepdown = {};
+ const newSettings = this.getReplSetConfigFromNode().settings;
- /**
- * Defines two methods on ReplSetTest, startContinuousFailover and stopContinuousFailover, that
- * allow starting and stopping a separate thread that will periodically step down the replica
- * set's primary node. Also defines these methods on ShardingTest, which allow starting and
- * stopping a stepdown thread for the test's config server replica set and each of the shard
- * replica sets, as specified by the given stepdownOptions object.
- */
- ContinuousStepdown.configure = function(stepdownOptions,
- {verbositySetting: verbositySetting = {}} = {}) {
- const defaultOptions = {
- configStepdown: true,
- electionTimeoutMS: 5 * 1000,
- shardStepdown: true,
- stepdownDurationSecs: 10,
- stepdownIntervalMS: 8 * 1000,
- catchUpTimeoutMS: 0,
- };
- stepdownOptions = Object.merge(defaultOptions, stepdownOptions);
-
- verbositySetting = tojson(verbositySetting);
+ assert.eq(newSettings.electionTimeoutMillis,
+ stepdownOptions.electionTimeoutMS,
+ "Failed to set the electionTimeoutMillis to " +
+ stepdownOptions.electionTimeoutMS + " milliseconds.");
+ assert.eq(newSettings.catchUpTimeoutMillis,
+ stepdownOptions.catchUpTimeoutMS,
+ "Failed to set the catchUpTimeoutMillis to " +
+ stepdownOptions.catchUpTimeoutMS + " milliseconds.");
+ }
- // Preserve the original ReplSetTest and ShardingTest constructors, because they are being
- // overriden.
- const originalReplSetTest = ReplSetTest;
- const originalShardingTest = ShardingTest;
+ _stepdownThread.start(this.nodes[0].host, stepdownOptions);
+ };
/**
- * Overrides the ReplSetTest constructor to start the continuous primary stepdown thread.
+ * Blocking method, which tells the thread running continuousPrimaryStepdownFn to stop
+ * and waits for it to terminate.
+ *
+ * If waitForPrimary is true, blocks until a new primary has been elected.
*/
- ReplSetTest = function ReplSetTestWithContinuousPrimaryStepdown() {
- // Construct the original object
- originalReplSetTest.apply(this, arguments);
-
- // Preserve the original versions of functions that are overrided below.
- const _originalStartSetFn = this.startSet;
- const _originalStopSetFn = this.stopSet;
- const _originalAwaitLastOpCommitted = this.awaitLastOpCommitted;
-
- /**
- * Overrides startSet call to increase logging verbosity.
- */
- this.startSet = function() {
- let options = arguments[0] || {};
-
- if (typeof(options.setParameter) === "string") {
- var eqIdx = options.setParameter.indexOf("=");
- if (eqIdx != -1) {
- var param = options.setParameter.substring(0, eqIdx);
- var value = options.setParameter.substring(eqIdx + 1);
- options.setParameter = {};
- options.setParameter[param] = value;
- }
- }
- arguments[0] = options;
-
- options.setParameter = options.setParameter || {};
- options.setParameter.logComponentVerbosity = verbositySetting;
- return _originalStartSetFn.apply(this, arguments);
- };
-
- /**
- * Overrides stopSet to terminate the failover thread.
- */
- this.stopSet = function() {
- this.stopContinuousFailover({waitForPrimary: false});
- _originalStopSetFn.apply(this, arguments);
- };
-
- /**
- * Overrides awaitLastOpCommitted to retry on network errors.
- */
- this.awaitLastOpCommitted = function() {
- return retryOnNetworkError(_originalAwaitLastOpCommitted.bind(this));
- };
-
- // Handle for the continuous stepdown thread.
- const _stepdownThread = new StepdownThread();
-
- /**
- * Reconfigures the replica set, then starts the stepdown thread. As part of the new
- * config, this sets:
- * - electionTimeoutMillis to stepdownOptions.electionTimeoutMS so a new primary can
- * get elected before the stepdownOptions.stepdownIntervalMS period would cause one
- * to step down again.
- * - catchUpTimeoutMillis to stepdownOptions.catchUpTimeoutMS. Lower values increase
- * the likelihood and volume of rollbacks.
- */
- this.startContinuousFailover = function() {
- if (_stepdownThread.hasStarted()) {
- throw new Error("Continuous failover thread is already active");
- }
-
- const rsconfig = this.getReplSetConfigFromNode();
+ this.stopContinuousFailover = function({waitForPrimary: waitForPrimary = false} = {}) {
+ if (!_stepdownThread.hasStarted()) {
+ return;
+ }
- const shouldUpdateElectionTimeout =
- (rsconfig.settings.electionTimeoutMillis !== stepdownOptions.electionTimeoutMS);
- const shouldUpdateCatchUpTimeout =
- (rsconfig.settings.catchUpTimeoutMillis !== stepdownOptions.catchUpTimeoutMS);
+ _stepdownThread.stop();
- if (shouldUpdateElectionTimeout || shouldUpdateCatchUpTimeout) {
- rsconfig.settings.electionTimeoutMillis = stepdownOptions.electionTimeoutMS;
- rsconfig.settings.catchUpTimeoutMillis = stepdownOptions.catchUpTimeoutMS;
+ if (waitForPrimary) {
+ this.getPrimary();
+ }
+ };
+ };
- rsconfig.version += 1;
- reconfig(this, rsconfig);
+ Object.extend(ReplSetTest, originalReplSetTest);
- const newSettings = this.getReplSetConfigFromNode().settings;
+ /**
+ * Overrides the ShardingTest constructor to start the continuous primary stepdown thread.
+ */
+ ShardingTest = function ShardingTestWithContinuousPrimaryStepdown(params) {
+ params.other = params.other || {};
- assert.eq(newSettings.electionTimeoutMillis,
- stepdownOptions.electionTimeoutMS,
- "Failed to set the electionTimeoutMillis to " +
- stepdownOptions.electionTimeoutMS + " milliseconds.");
- assert.eq(newSettings.catchUpTimeoutMillis,
- stepdownOptions.catchUpTimeoutMS,
- "Failed to set the catchUpTimeoutMillis to " +
- stepdownOptions.catchUpTimeoutMS + " milliseconds.");
- }
+ if (stepdownOptions.configStepdown) {
+ params.other.configOptions = params.other.configOptions || {};
+ params.other.configOptions.setParameter = params.other.configOptions.setParameter || {};
+ params.other.configOptions.setParameter.logComponentVerbosity = verbositySetting;
+ }
- _stepdownThread.start(this.nodes[0].host, stepdownOptions);
- };
-
- /**
- * Blocking method, which tells the thread running continuousPrimaryStepdownFn to stop
- * and waits for it to terminate.
- *
- * If waitForPrimary is true, blocks until a new primary has been elected.
- */
- this.stopContinuousFailover = function({waitForPrimary: waitForPrimary = false} = {}) {
- if (!_stepdownThread.hasStarted()) {
- return;
- }
+ if (stepdownOptions.shardStepdown) {
+ params.other.shardOptions = params.other.shardOptions || {};
+ params.other.shardOptions.setParameter = params.other.shardOptions.setParameter || {};
+ params.other.shardOptions.setParameter.logComponentVerbosity = verbositySetting;
+ }
- _stepdownThread.stop();
+ // Construct the original object.
+ originalShardingTest.apply(this, arguments);
- if (waitForPrimary) {
- this.getPrimary();
- }
- };
- };
+ // Validate the stepdown options.
+ if (stepdownOptions.configStepdown && !this.configRS) {
+ throw new Error("Continuous config server primary step down only available with CSRS");
+ }
- Object.extend(ReplSetTest, originalReplSetTest);
+ if (stepdownOptions.shardStepdown && this._rs.some(rst => !rst)) {
+ throw new Error(
+ "Continuous shard primary step down only available with replica set shards");
+ }
/**
- * Overrides the ShardingTest constructor to start the continuous primary stepdown thread.
+ * Calls startContinuousFailover on the config server and/or each shard replica set as
+ * specifed by the stepdownOptions object.
*/
- ShardingTest = function ShardingTestWithContinuousPrimaryStepdown(params) {
- params.other = params.other || {};
-
+ this.startContinuousFailover = function() {
if (stepdownOptions.configStepdown) {
- params.other.configOptions = params.other.configOptions || {};
- params.other.configOptions.setParameter =
- params.other.configOptions.setParameter || {};
- params.other.configOptions.setParameter.logComponentVerbosity = verbositySetting;
+ this.configRS.startContinuousFailover();
}
if (stepdownOptions.shardStepdown) {
- params.other.shardOptions = params.other.shardOptions || {};
- params.other.shardOptions.setParameter =
- params.other.shardOptions.setParameter || {};
- params.other.shardOptions.setParameter.logComponentVerbosity = verbositySetting;
+ this._rs.forEach(function(rst) {
+ rst.test.startContinuousFailover();
+ });
}
+ };
- // Construct the original object.
- originalShardingTest.apply(this, arguments);
-
- // Validate the stepdown options.
- if (stepdownOptions.configStepdown && !this.configRS) {
- throw new Error(
- "Continuous config server primary step down only available with CSRS");
+ /**
+ * Calls stopContinuousFailover on the config server and each shard replica set as
+ * specified by the stepdownOptions object.
+ *
+ * If waitForPrimary is true, blocks until each replica set has elected a primary.
+ * If waitForMongosRetarget is true, blocks until each mongos has an up to date view of
+ * the cluster.
+ */
+ this.stopContinuousFailover = function({
+ waitForPrimary: waitForPrimary = false,
+ waitForMongosRetarget: waitForMongosRetarget = false
+ } = {}) {
+ if (stepdownOptions.configStepdown) {
+ this.configRS.stopContinuousFailover({waitForPrimary: waitForPrimary});
}
- if (stepdownOptions.shardStepdown && this._rs.some(rst => !rst)) {
- throw new Error(
- "Continuous shard primary step down only available with replica set shards");
+ if (stepdownOptions.shardStepdown) {
+ this._rs.forEach(function(rst) {
+ rst.test.stopContinuousFailover({waitForPrimary: waitForPrimary});
+ });
}
- /**
- * Calls startContinuousFailover on the config server and/or each shard replica set as
- * specifed by the stepdownOptions object.
- */
- this.startContinuousFailover = function() {
- if (stepdownOptions.configStepdown) {
- this.configRS.startContinuousFailover();
- }
-
- if (stepdownOptions.shardStepdown) {
- this._rs.forEach(function(rst) {
- rst.test.startContinuousFailover();
- });
- }
- };
-
- /**
- * Calls stopContinuousFailover on the config server and each shard replica set as
- * specified by the stepdownOptions object.
- *
- * If waitForPrimary is true, blocks until each replica set has elected a primary.
- * If waitForMongosRetarget is true, blocks until each mongos has an up to date view of
- * the cluster.
- */
- this.stopContinuousFailover = function({
- waitForPrimary: waitForPrimary = false,
- waitForMongosRetarget: waitForMongosRetarget = false
- } = {}) {
- if (stepdownOptions.configStepdown) {
- this.configRS.stopContinuousFailover({waitForPrimary: waitForPrimary});
- }
-
- if (stepdownOptions.shardStepdown) {
- this._rs.forEach(function(rst) {
- rst.test.stopContinuousFailover({waitForPrimary: waitForPrimary});
- });
- }
-
- if (waitForMongosRetarget) {
- // Run validate on each collection in each database to ensure mongos can target
- // the primary for each shard with data, including the config servers.
- this._mongos.forEach(s => {
- const res = assert.commandWorked(s.adminCommand({listDatabases: 1}));
- res.databases.forEach(dbInfo => {
- const startTime = Date.now();
- print("Waiting for mongos: " + s.host + " to retarget db: " +
- dbInfo.name);
-
- const db = s.getDB(dbInfo.name);
- assert.soon(() => {
- let collInfo;
- try {
- collInfo = db.getCollectionInfos();
- } catch (e) {
- if (ErrorCodes.isNotMasterError(e.code)) {
- return false;
- }
- throw e;
+ if (waitForMongosRetarget) {
+ // Run validate on each collection in each database to ensure mongos can target
+ // the primary for each shard with data, including the config servers.
+ this._mongos.forEach(s => {
+ const res = assert.commandWorked(s.adminCommand({listDatabases: 1}));
+ res.databases.forEach(dbInfo => {
+ const startTime = Date.now();
+ print("Waiting for mongos: " + s.host + " to retarget db: " + dbInfo.name);
+
+ const db = s.getDB(dbInfo.name);
+ assert.soon(() => {
+ let collInfo;
+ try {
+ collInfo = db.getCollectionInfos();
+ } catch (e) {
+ if (ErrorCodes.isNotMasterError(e.code)) {
+ return false;
}
+ throw e;
+ }
- collInfo.forEach(collDoc => {
- const res = db.runCommand({collStats: collDoc["name"]});
- if (ErrorCodes.isNotMasterError(res.code)) {
- return false;
- }
- assert.commandWorked(res);
- });
-
- return true;
+ collInfo.forEach(collDoc => {
+ const res = db.runCommand({collStats: collDoc["name"]});
+ if (ErrorCodes.isNotMasterError(res.code)) {
+ return false;
+ }
+ assert.commandWorked(res);
});
- const totalTime = Date.now() - startTime;
- print("Finished waiting for mongos: " + s.host + " to retarget db: " +
- dbInfo.name + ", in " + totalTime + " ms");
+
+ return true;
});
+ const totalTime = Date.now() - startTime;
+ print("Finished waiting for mongos: " + s.host +
+ " to retarget db: " + dbInfo.name + ", in " + totalTime + " ms");
});
- }
-
- };
-
- /**
- * This method is disabled because it runs aggregation, which doesn't handle config
- * server stepdown correctly.
- */
- this.printShardingStatus = function() {};
+ });
+ }
};
- Object.extend(ShardingTest, originalShardingTest);
-
- // The checkUUIDsConsistentAcrossCluster() function is defined on ShardingTest's prototype,
- // but ShardingTest's prototype gets reset when ShardingTest is reassigned. We reload the
- // override to redefine checkUUIDsConsistentAcrossCluster() on the new ShardingTest's
- // prototype.
- load('jstests/libs/override_methods/check_uuids_consistent_across_cluster.js');
+ /**
+ * This method is disabled because it runs aggregation, which doesn't handle config
+ * server stepdown correctly.
+ */
+ this.printShardingStatus = function() {};
};
+
+ Object.extend(ShardingTest, originalShardingTest);
+
+ // The checkUUIDsConsistentAcrossCluster() function is defined on ShardingTest's prototype,
+ // but ShardingTest's prototype gets reset when ShardingTest is reassigned. We reload the
+ // override to redefine checkUUIDsConsistentAcrossCluster() on the new ShardingTest's
+ // prototype.
+ load('jstests/libs/override_methods/check_uuids_consistent_across_cluster.js');
+};
})();
diff --git a/jstests/libs/override_methods/detect_spawning_own_mongod.js b/jstests/libs/override_methods/detect_spawning_own_mongod.js
index f741c086cdd..42b95a58d51 100644
--- a/jstests/libs/override_methods/detect_spawning_own_mongod.js
+++ b/jstests/libs/override_methods/detect_spawning_own_mongod.js
@@ -3,40 +3,38 @@
* suites should not contain JS tests that start their own mongod/s.
*/
(function() {
- 'use strict';
+'use strict';
- MongoRunner.runMongod = function() {
- throw new Error(
- "Detected MongoRunner.runMongod() call in js test from passthrough suite. " +
- "Consider moving the test to one of the jstests/noPassthrough/, " +
- "jstests/replsets/, or jstests/sharding/ directories.");
- };
+MongoRunner.runMongod = function() {
+ throw new Error("Detected MongoRunner.runMongod() call in js test from passthrough suite. " +
+ "Consider moving the test to one of the jstests/noPassthrough/, " +
+ "jstests/replsets/, or jstests/sharding/ directories.");
+};
- MongoRunner.runMongos = function() {
- throw new Error(
- "Detected MongoRunner.runMongos() call in js test from passthrough suite. " +
- "Consider moving the test to one of the jstests/noPassthrough/, " +
- "jstests/replsets/, or jstests/sharding/ directories.");
- };
+MongoRunner.runMongos = function() {
+ throw new Error("Detected MongoRunner.runMongos() call in js test from passthrough suite. " +
+ "Consider moving the test to one of the jstests/noPassthrough/, " +
+ "jstests/replsets/, or jstests/sharding/ directories.");
+};
- const STOverrideConstructor = function() {
- throw new Error("Detected ShardingTest() call in js test from passthrough suite. " +
- "Consider moving the test to one of the jstests/noPassthrough/, " +
- "jstests/replsets/, or jstests/sharding/ directories.");
- };
+const STOverrideConstructor = function() {
+ throw new Error("Detected ShardingTest() call in js test from passthrough suite. " +
+ "Consider moving the test to one of the jstests/noPassthrough/, " +
+ "jstests/replsets/, or jstests/sharding/ directories.");
+};
- // This Object.assign() lets us modify ShardingTest to use the new overridden constructor but
- // still keep any static properties it has.
- ShardingTest = Object.assign(STOverrideConstructor, ShardingTest);
+// This Object.assign() lets us modify ShardingTest to use the new overridden constructor but
+// still keep any static properties it has.
+ShardingTest = Object.assign(STOverrideConstructor, ShardingTest);
- const RSTOverrideConstructor = function() {
- throw new Error("Detected ReplSetTest() call in js test from passthrough suite. " +
- "Consider moving the test to one of the jstests/noPassthrough/, " +
- "jstests/replsets/, or jstests/sharding/ directories.");
- };
+const RSTOverrideConstructor = function() {
+ throw new Error("Detected ReplSetTest() call in js test from passthrough suite. " +
+ "Consider moving the test to one of the jstests/noPassthrough/, " +
+ "jstests/replsets/, or jstests/sharding/ directories.");
+};
- // Same as the above Object.assign() call. In particular, we want to preserve the
- // ReplSetTest.kDefaultTimeoutMS property, which should be accessible to tests in the
- // passthrough suite.
- ReplSetTest = Object.assign(RSTOverrideConstructor, ReplSetTest);
+// Same as the above Object.assign() call. In particular, we want to preserve the
+// ReplSetTest.kDefaultTimeoutMS property, which should be accessible to tests in the
+// passthrough suite.
+ReplSetTest = Object.assign(RSTOverrideConstructor, ReplSetTest);
})();
diff --git a/jstests/libs/override_methods/enable_causal_consistency.js b/jstests/libs/override_methods/enable_causal_consistency.js
index 26c861baa9c..cb9eb52db06 100644
--- a/jstests/libs/override_methods/enable_causal_consistency.js
+++ b/jstests/libs/override_methods/enable_causal_consistency.js
@@ -2,14 +2,14 @@
* Enables causal consistency on the connections.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/override_methods/override_helpers.js");
- load('jstests/libs/override_methods/set_read_preference_secondary.js');
- load('jstests/libs/override_methods/causally_consistent_index_builds.js');
+load("jstests/libs/override_methods/override_helpers.js");
+load('jstests/libs/override_methods/set_read_preference_secondary.js');
+load('jstests/libs/override_methods/causally_consistent_index_builds.js');
- db.getMongo().setCausalConsistency();
+db.getMongo().setCausalConsistency();
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/enable_causal_consistency.js");
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/enable_causal_consistency.js");
})();
diff --git a/jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js b/jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js
index 4da6c596ef6..96860afb3f0 100644
--- a/jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js
+++ b/jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js
@@ -2,12 +2,12 @@
* Enables causal consistency on the connections without setting the read preference to secondary.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/override_methods/override_helpers.js");
+load("jstests/libs/override_methods/override_helpers.js");
- db.getMongo().setCausalConsistency();
+db.getMongo().setCausalConsistency();
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js");
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js");
})();
diff --git a/jstests/libs/override_methods/enable_sessions.js b/jstests/libs/override_methods/enable_sessions.js
index 85bb57d7e94..846143da999 100644
--- a/jstests/libs/override_methods/enable_sessions.js
+++ b/jstests/libs/override_methods/enable_sessions.js
@@ -2,67 +2,65 @@
* Enables sessions on the db object
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/override_methods/override_helpers.js");
+load("jstests/libs/override_methods/override_helpers.js");
- const getDBOriginal = Mongo.prototype.getDB;
+const getDBOriginal = Mongo.prototype.getDB;
- const sessionMap = new WeakMap();
- const sessionOptions = TestData.sessionOptions;
+const sessionMap = new WeakMap();
+const sessionOptions = TestData.sessionOptions;
- // Override the runCommand to check for any command obj that does not contain a logical session
- // and throw an error.
- function runCommandWithLsidCheck(conn, dbName, cmdName, cmdObj, func, makeFuncArgs) {
- if (jsTest.options().disableEnableSessions) {
- return func.apply(conn, makeFuncArgs(cmdObj));
- }
-
- // If the command is in a wrapped form, then we look for the actual command object
- // inside the query/$query object.
- let cmdObjUnwrapped = cmdObj;
- if (cmdName === "query" || cmdName === "$query") {
- cmdObj[cmdName] = Object.assign({}, cmdObj[cmdName]);
- cmdObjUnwrapped = cmdObj[cmdName];
- }
-
- if (!cmdObjUnwrapped.hasOwnProperty("lsid")) {
- // TODO: SERVER-30848 fixes getMore requests to use a session in the mongo shell.
- // Until that happens, we bypass throwing an error for getMore and only throw an error
- // for other requests not using sessions.
- if (cmdName !== "getMore") {
- throw new Error("command object does not have session id: " + tojson(cmdObj));
- }
- }
+// Override the runCommand to check for any command obj that does not contain a logical session
+// and throw an error.
+function runCommandWithLsidCheck(conn, dbName, cmdName, cmdObj, func, makeFuncArgs) {
+ if (jsTest.options().disableEnableSessions) {
return func.apply(conn, makeFuncArgs(cmdObj));
}
- // Override the getDB to return a db object with the correct driverSession. We use a WeakMap
- // to cache the session for each connection instance so we can retrieve the same session on
- // subsequent calls to getDB.
- Mongo.prototype.getDB = function(dbName) {
- if (jsTest.options().disableEnableSessions) {
- return getDBOriginal.apply(this, arguments);
- }
+ // If the command is in a wrapped form, then we look for the actual command object
+ // inside the query/$query object.
+ let cmdObjUnwrapped = cmdObj;
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObj[cmdName] = Object.assign({}, cmdObj[cmdName]);
+ cmdObjUnwrapped = cmdObj[cmdName];
+ }
- if (!sessionMap.has(this)) {
- const session = this.startSession(sessionOptions);
- // Override the endSession function to be a no-op so jstestfuzz doesn't accidentally
- // end the session.
- session.endSession = Function.prototype;
- sessionMap.set(this, session);
+ if (!cmdObjUnwrapped.hasOwnProperty("lsid")) {
+ // TODO: SERVER-30848 fixes getMore requests to use a session in the mongo shell.
+ // Until that happens, we bypass throwing an error for getMore and only throw an error
+ // for other requests not using sessions.
+ if (cmdName !== "getMore") {
+ throw new Error("command object does not have session id: " + tojson(cmdObj));
}
+ }
+ return func.apply(conn, makeFuncArgs(cmdObj));
+}
- const db = getDBOriginal.apply(this, arguments);
- db._session = sessionMap.get(this);
- return db;
- };
+// Override the getDB to return a db object with the correct driverSession. We use a WeakMap
+// to cache the session for each connection instance so we can retrieve the same session on
+// subsequent calls to getDB.
+Mongo.prototype.getDB = function(dbName) {
+ if (jsTest.options().disableEnableSessions) {
+ return getDBOriginal.apply(this, arguments);
+ }
+
+ if (!sessionMap.has(this)) {
+ const session = this.startSession(sessionOptions);
+ // Override the endSession function to be a no-op so jstestfuzz doesn't accidentally
+ // end the session.
+ session.endSession = Function.prototype;
+ sessionMap.set(this, session);
+ }
- // Override the global `db` object to be part of a session.
- db = db.getMongo().getDB(db.getName());
+ const db = getDBOriginal.apply(this, arguments);
+ db._session = sessionMap.get(this);
+ return db;
+};
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/enable_sessions.js");
- OverrideHelpers.overrideRunCommand(runCommandWithLsidCheck);
+// Override the global `db` object to be part of a session.
+db = db.getMongo().getDB(db.getName());
+OverrideHelpers.prependOverrideInParallelShell("jstests/libs/override_methods/enable_sessions.js");
+OverrideHelpers.overrideRunCommand(runCommandWithLsidCheck);
})();
diff --git a/jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js b/jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js
index 2feca955c3d..858a345ae5a 100644
--- a/jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js
+++ b/jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js
@@ -4,46 +4,47 @@
* inaccurate results.
*/
(function() {
- "use strict";
-
- load("jstests/libs/override_methods/override_helpers.js");
-
- function runCommandFailUncleanShutdownIncompatibleCommands(
- conn, dbName, commandName, commandObj, func, makeFuncArgs) {
- if (typeof commandObj !== "object" || commandObj === null) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
-
- // If the command is in a wrapped form, then we look for the actual command object inside
- // the query/$query object.
- let commandObjUnwrapped = commandObj;
- if (commandName === "query" || commandName === "$query") {
- commandObjUnwrapped = commandObj[commandName];
- commandName = Object.keys(commandObjUnwrapped)[0];
- }
-
- if (commandName === "count" && (!commandObjUnwrapped.hasOwnProperty("query") ||
- Object.keys(commandObjUnwrapped["query"]).length === 0)) {
- throw new Error("Cowardly fail if fastcount is run with a mongod that had an unclean" +
- " shutdown: " + tojson(commandObjUnwrapped));
- }
-
- if (commandName === "dataSize" && !commandObjUnwrapped.hasOwnProperty("min") &&
- !commandObjUnwrapped.hasOwnProperty("max")) {
- throw new Error("Cowardly fail if unbounded dataSize is run with a mongod that had an" +
- " unclean shutdown: " + tojson(commandObjUnwrapped));
- }
-
- if (commandName === "collStats" || commandName === "dbStats") {
- throw new Error("Cowardly fail if " + commandName + " is run with a mongod that had" +
- " an unclean shutdown: " + tojson(commandObjUnwrapped));
- }
+"use strict";
+load("jstests/libs/override_methods/override_helpers.js");
+
+function runCommandFailUncleanShutdownIncompatibleCommands(
+ conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
return func.apply(conn, makeFuncArgs(commandObj));
}
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ let commandObjUnwrapped = commandObj;
+ if (commandName === "query" || commandName === "$query") {
+ commandObjUnwrapped = commandObj[commandName];
+ commandName = Object.keys(commandObjUnwrapped)[0];
+ }
+
+ if (commandName === "count" &&
+ (!commandObjUnwrapped.hasOwnProperty("query") ||
+ Object.keys(commandObjUnwrapped["query"]).length === 0)) {
+ throw new Error("Cowardly fail if fastcount is run with a mongod that had an unclean" +
+ " shutdown: " + tojson(commandObjUnwrapped));
+ }
+
+ if (commandName === "dataSize" && !commandObjUnwrapped.hasOwnProperty("min") &&
+ !commandObjUnwrapped.hasOwnProperty("max")) {
+ throw new Error("Cowardly fail if unbounded dataSize is run with a mongod that had an" +
+ " unclean shutdown: " + tojson(commandObjUnwrapped));
+ }
+
+ if (commandName === "collStats" || commandName === "dbStats") {
+ throw new Error("Cowardly fail if " + commandName + " is run with a mongod that had" +
+ " an unclean shutdown: " + tojson(commandObjUnwrapped));
+ }
+
+ return func.apply(conn, makeFuncArgs(commandObj));
+}
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
- OverrideHelpers.overrideRunCommand(runCommandFailUncleanShutdownIncompatibleCommands);
+OverrideHelpers.overrideRunCommand(runCommandFailUncleanShutdownIncompatibleCommands);
})();
diff --git a/jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js b/jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js
index fabeff4915f..5a6b04a308b 100644
--- a/jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js
+++ b/jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js
@@ -3,10 +3,10 @@
* unclean shutdown and won't be restarted when the node is restarted.
*/
(function() {
- "use strict";
+"use strict";
- startParallelShell = function(jsCode, port, noConnect) {
- throw new Error("Cowardly fail if startParallelShell is run with a mongod that had" +
- " an unclean shutdown.");
- };
+startParallelShell = function(jsCode, port, noConnect) {
+ throw new Error("Cowardly fail if startParallelShell is run with a mongod that had" +
+ " an unclean shutdown.");
+};
})();
diff --git a/jstests/libs/override_methods/find_batch_size.js b/jstests/libs/override_methods/find_batch_size.js
index ab773ded7ed..9636be036fd 100644
--- a/jstests/libs/override_methods/find_batch_size.js
+++ b/jstests/libs/override_methods/find_batch_size.js
@@ -10,14 +10,14 @@
// TODO: Add support for overriding batch sizes in the bulk API.
(function() {
- 'use strict';
+'use strict';
- // Save a reference to the original find method in the IIFE's scope.
- // This scoping allows the original method to be called by the find override below.
- var originalFind = DBCollection.prototype.find;
+// Save a reference to the original find method in the IIFE's scope.
+// This scoping allows the original method to be called by the find override below.
+var originalFind = DBCollection.prototype.find;
- DBCollection.prototype.find = function(query, fields, limit, skip, batchSize, options) {
- var batchSizeDefault = batchSize || (TestData && TestData.batchSize);
- return originalFind.call(this, query, fields, limit, skip, batchSizeDefault, options);
- };
+DBCollection.prototype.find = function(query, fields, limit, skip, batchSize, options) {
+ var batchSizeDefault = batchSize || (TestData && TestData.batchSize);
+ return originalFind.call(this, query, fields, limit, skip, batchSizeDefault, options);
+};
}());
diff --git a/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js b/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js
index c7ba66763a2..9e13e0d0847 100644
--- a/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js
+++ b/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js
@@ -24,14 +24,14 @@ ChangeStreamPassthroughHelpers.nsMatchFilter = function(db, collName) {
$match: {
$or: [
{
- "ns.db": db.getName(),
- "ns.coll": (isSingleCollectionStream ? collName : {$exists: true})
+ "ns.db": db.getName(),
+ "ns.coll": (isSingleCollectionStream ? collName : {$exists: true})
},
// Add a clause to detect if the collection being watched is the target of a
// renameCollection command, since that is expected to return a "rename" entry.
{
- "to.db": db.getName(),
- "to.coll": (isSingleCollectionStream ? collName : {$exists: true})
+ "to.db": db.getName(),
+ "to.coll": (isSingleCollectionStream ? collName : {$exists: true})
},
{operationType: "invalidate"}
]
diff --git a/jstests/libs/override_methods/implicit_whole_db_changestreams.js b/jstests/libs/override_methods/implicit_whole_db_changestreams.js
index 93f485e4564..e5fe87c5287 100644
--- a/jstests/libs/override_methods/implicit_whole_db_changestreams.js
+++ b/jstests/libs/override_methods/implicit_whole_db_changestreams.js
@@ -106,56 +106,55 @@ const ChangeStreamPassthroughHelpers = {
};
(function() {
- 'use strict';
+'use strict';
- const originalRunCommandImpl = DB.prototype._runCommandImpl;
- const originalRunCommand = DB.prototype.runCommand;
+const originalRunCommandImpl = DB.prototype._runCommandImpl;
+const originalRunCommand = DB.prototype.runCommand;
- const upconvertedCursors = new Set();
+const upconvertedCursors = new Set();
- const db = null;
+const db = null;
- const passthroughRunCommandImpl = function(dbName, cmdObj, options) {
- // Check whether this command is an upconvertable $changeStream request.
- const upconvertCursor =
- ChangeStreamPassthroughHelpers.isUpconvertableChangeStreamRequest(this, cmdObj);
- if (upconvertCursor) {
- [dbName, cmdObj] =
- ChangeStreamPassthroughHelpers.upconvertChangeStreamRequest(this, cmdObj);
- }
+const passthroughRunCommandImpl = function(dbName, cmdObj, options) {
+ // Check whether this command is an upconvertable $changeStream request.
+ const upconvertCursor =
+ ChangeStreamPassthroughHelpers.isUpconvertableChangeStreamRequest(this, cmdObj);
+ if (upconvertCursor) {
+ [dbName, cmdObj] =
+ ChangeStreamPassthroughHelpers.upconvertChangeStreamRequest(this, cmdObj);
+ }
- // If the command is a getMore, it may be a $changeStream that we upconverted to run
- // whole-db. Ensure that we update the 'collection' field to be the collectionless
- // namespace.
- if (cmdObj && cmdObj.getMore && upconvertedCursors.has(cmdObj.getMore.toString())) {
- [dbName, cmdObj] = ChangeStreamPassthroughHelpers.upconvertGetMoreRequest(this, cmdObj);
- }
+ // If the command is a getMore, it may be a $changeStream that we upconverted to run
+ // whole-db. Ensure that we update the 'collection' field to be the collectionless
+ // namespace.
+ if (cmdObj && cmdObj.getMore && upconvertedCursors.has(cmdObj.getMore.toString())) {
+ [dbName, cmdObj] = ChangeStreamPassthroughHelpers.upconvertGetMoreRequest(this, cmdObj);
+ }
- // Pass the modified command to the original runCommand implementation.
- const res = originalRunCommandImpl.apply(this, [dbName, cmdObj, options]);
+ // Pass the modified command to the original runCommand implementation.
+ const res = originalRunCommandImpl.apply(this, [dbName, cmdObj, options]);
- // Record the upconverted cursor ID so that we can adjust subsequent getMores.
- if (upconvertCursor && res.cursor && res.cursor.id > 0) {
- upconvertedCursors.add(res.cursor.id.toString());
- }
+ // Record the upconverted cursor ID so that we can adjust subsequent getMores.
+ if (upconvertCursor && res.cursor && res.cursor.id > 0) {
+ upconvertedCursors.add(res.cursor.id.toString());
+ }
- return res;
- };
-
- // Redirect the Collection's 'watch' function to use the whole-DB version. Although calls to the
- // shell helpers will ultimately resolve to the overridden runCommand anyway, we need to
- // override the helpers to ensure that the DB.watch function itself is exercised by the
- // passthrough wherever Collection.watch is called.
- DBCollection.prototype.watch = function(pipeline, options) {
- pipeline = Object.assign([], pipeline);
- pipeline.unshift(
- ChangeStreamPassthroughHelpers.nsMatchFilter(this.getDB(), this.getName()));
- return this.getDB().watch(pipeline, options);
- };
-
- // Override DB.runCommand to use the custom or original _runCommandImpl.
- DB.prototype.runCommand = function(cmdObj, extra, queryOptions, noPassthrough) {
- this._runCommandImpl = (noPassthrough ? originalRunCommandImpl : passthroughRunCommandImpl);
- return originalRunCommand.apply(this, [cmdObj, extra, queryOptions]);
- };
+ return res;
+};
+
+// Redirect the Collection's 'watch' function to use the whole-DB version. Although calls to the
+// shell helpers will ultimately resolve to the overridden runCommand anyway, we need to
+// override the helpers to ensure that the DB.watch function itself is exercised by the
+// passthrough wherever Collection.watch is called.
+DBCollection.prototype.watch = function(pipeline, options) {
+ pipeline = Object.assign([], pipeline);
+ pipeline.unshift(ChangeStreamPassthroughHelpers.nsMatchFilter(this.getDB(), this.getName()));
+ return this.getDB().watch(pipeline, options);
+};
+
+// Override DB.runCommand to use the custom or original _runCommandImpl.
+DB.prototype.runCommand = function(cmdObj, extra, queryOptions, noPassthrough) {
+ this._runCommandImpl = (noPassthrough ? originalRunCommandImpl : passthroughRunCommandImpl);
+ return originalRunCommand.apply(this, [cmdObj, extra, queryOptions]);
+};
}());
diff --git a/jstests/libs/override_methods/implicitly_retry_on_background_op_in_progress.js b/jstests/libs/override_methods/implicitly_retry_on_background_op_in_progress.js
index 7ec99e0fd14..c8ca76eb08a 100644
--- a/jstests/libs/override_methods/implicitly_retry_on_background_op_in_progress.js
+++ b/jstests/libs/override_methods/implicitly_retry_on_background_op_in_progress.js
@@ -3,135 +3,134 @@
* codes automatically retry.
*/
(function() {
- "use strict";
-
- load("jstests/libs/override_methods/override_helpers.js");
-
- // These are all commands that can return BackgroundOperationInProgress error codes.
- const commandWhitelist = new Set([
- "cloneCollectionAsCapped",
- "collMod",
- "compact",
- "convertToCapped",
- "createIndexes",
- "drop",
- "dropDatabase",
- "dropIndexes",
- "renameCollection",
- ]);
-
- // Whitelisted errors commands may encounter when retried on a sharded cluster. Shards may
- // return different responses, so errors associated with repeated executions of a command may be
- // ignored.
- const acceptableCommandErrors = {
- "drop": [ErrorCodes.NamespaceNotFound],
- "dropIndexes": [ErrorCodes.IndexNotFound],
- "renameCollection": [ErrorCodes.NamespaceNotFound],
- };
-
- const kTimeout = 10 * 60 * 1000;
- const kInterval = 200;
-
- // Make it easier to understand whether or not returns from the assert.soon are being retried.
- const kNoRetry = true;
- const kRetry = false;
-
- function hasBackgroundOpInProgress(res) {
- // Only these are retryable.
- return res.code === ErrorCodes.BackgroundOperationInProgressForNamespace ||
- res.code === ErrorCodes.BackgroundOperationInProgressForDatabase;
+"use strict";
+
+load("jstests/libs/override_methods/override_helpers.js");
+
+// These are all commands that can return BackgroundOperationInProgress error codes.
+const commandWhitelist = new Set([
+ "cloneCollectionAsCapped",
+ "collMod",
+ "compact",
+ "convertToCapped",
+ "createIndexes",
+ "drop",
+ "dropDatabase",
+ "dropIndexes",
+ "renameCollection",
+]);
+
+// Whitelisted errors commands may encounter when retried on a sharded cluster. Shards may
+// return different responses, so errors associated with repeated executions of a command may be
+// ignored.
+const acceptableCommandErrors = {
+ "drop": [ErrorCodes.NamespaceNotFound],
+ "dropIndexes": [ErrorCodes.IndexNotFound],
+ "renameCollection": [ErrorCodes.NamespaceNotFound],
+};
+
+const kTimeout = 10 * 60 * 1000;
+const kInterval = 200;
+
+// Make it easier to understand whether or not returns from the assert.soon are being retried.
+const kNoRetry = true;
+const kRetry = false;
+
+function hasBackgroundOpInProgress(res) {
+ // Only these are retryable.
+ return res.code === ErrorCodes.BackgroundOperationInProgressForNamespace ||
+ res.code === ErrorCodes.BackgroundOperationInProgressForDatabase;
+}
+
+function runCommandWithRetries(conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
+ return func.apply(conn, makeFuncArgs(commandObj));
}
- function runCommandWithRetries(conn, dbName, commandName, commandObj, func, makeFuncArgs) {
- if (typeof commandObj !== "object" || commandObj === null) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
+ let res;
+ let attempt = 0;
- let res;
- let attempt = 0;
+ assert.soon(
+ () => {
+ attempt++;
- assert.soon(
- () => {
- attempt++;
+ res = func.apply(conn, makeFuncArgs(commandObj));
+ if (res.ok === 1) {
+ return kNoRetry;
+ }
- res = func.apply(conn, makeFuncArgs(commandObj));
- if (res.ok === 1) {
- return kNoRetry;
- }
+ // Commands that are not in the whitelist should never fail with this error code.
+ if (!commandWhitelist.has(commandName)) {
+ return kNoRetry;
+ }
- // Commands that are not in the whitelist should never fail with this error code.
- if (!commandWhitelist.has(commandName)) {
- return kNoRetry;
- }
+ let message = "Retrying the " + commandName +
+ " command because a background operation is in progress (attempt " + attempt + ")";
- let message = "Retrying the " + commandName +
- " command because a background operation is in progress (attempt " + attempt +
- ")";
-
- // This handles the retry case when run against a standalone, replica set, or mongos
- // where both shards returned the same response.
- if (hasBackgroundOpInProgress(res)) {
- print(message);
- return kRetry;
+ // This handles the retry case when run against a standalone, replica set, or mongos
+ // where both shards returned the same response.
+ if (hasBackgroundOpInProgress(res)) {
+ print(message);
+ return kRetry;
+ }
+
+ // The following logic only applies to sharded clusters.
+ if (!conn.isMongos() || !res.raw) {
+ // We don't attempt to retry commands for which mongos doesn't expose the raw
+ // responses from the shards.
+ return kNoRetry;
+ }
+
+ // In certain cases, retrying a command on a sharded cluster may result in a
+ // scenario where one shard has executed the command and another still has a
+ // background operation in progress. Retry, ignoring whitelisted errors on a
+ // command-by-command basis.
+ let shardsWithBackgroundOps = [];
+
+ // If any shard has a background operation in progress and the other shards sent
+ // whitelisted errors after a first attempt, retry the entire command.
+ for (let shard in res.raw) {
+ let shardRes = res.raw[shard];
+ if (shardRes.ok) {
+ continue;
}
- // The following logic only applies to sharded clusters.
- if (!conn.isMongos() || !res.raw) {
- // We don't attempt to retry commands for which mongos doesn't expose the raw
- // responses from the shards.
- return kNoRetry;
+ if (hasBackgroundOpInProgress(shardRes)) {
+ shardsWithBackgroundOps.push(shard);
+ continue;
}
- // In certain cases, retrying a command on a sharded cluster may result in a
- // scenario where one shard has executed the command and another still has a
- // background operation in progress. Retry, ignoring whitelisted errors on a
- // command-by-command basis.
- let shardsWithBackgroundOps = [];
-
- // If any shard has a background operation in progress and the other shards sent
- // whitelisted errors after a first attempt, retry the entire command.
- for (let shard in res.raw) {
- let shardRes = res.raw[shard];
- if (shardRes.ok) {
- continue;
- }
-
- if (hasBackgroundOpInProgress(shardRes)) {
- shardsWithBackgroundOps.push(shard);
- continue;
- }
-
- // If any of the shards return an error that is not whitelisted or even if a
- // whitelisted error is received on the first attempt, do not retry.
- let acceptableErrors = acceptableCommandErrors[commandName] || [];
- if (!acceptableErrors.includes(shardRes.code)) {
- return kNoRetry;
- }
- // Whitelisted errors can only occur from running a command more than once, so
- // it would be unexpected to receive an error on the first attempt.
- if (attempt === 1) {
- return kNoRetry;
- }
+ // If any of the shards return an error that is not whitelisted or even if a
+ // whitelisted error is received on the first attempt, do not retry.
+ let acceptableErrors = acceptableCommandErrors[commandName] || [];
+ if (!acceptableErrors.includes(shardRes.code)) {
+ return kNoRetry;
}
-
- // At this point, all shards have resulted in whitelisted errors resulting in
- // retrying whitelisted commands. Fake a successful response.
- if (shardsWithBackgroundOps.length === 0) {
- print("done retrying " + commandName +
- " command because all shards have responded with acceptable errors");
- res.ok = 1;
+ // Whitelisted errors can only occur from running a command more than once, so
+ // it would be unexpected to receive an error on the first attempt.
+ if (attempt === 1) {
return kNoRetry;
}
-
- print(message + " on shards: " + tojson(shardsWithBackgroundOps));
- return kRetry;
- },
- () => "Timed out while retrying command '" + tojson(commandObj) + "', response: " +
- tojson(res),
- kTimeout,
- kInterval);
- return res;
- }
-
- OverrideHelpers.overrideRunCommand(runCommandWithRetries);
+ }
+
+ // At this point, all shards have resulted in whitelisted errors resulting in
+ // retrying whitelisted commands. Fake a successful response.
+ if (shardsWithBackgroundOps.length === 0) {
+ print("done retrying " + commandName +
+ " command because all shards have responded with acceptable errors");
+ res.ok = 1;
+ return kNoRetry;
+ }
+
+ print(message + " on shards: " + tojson(shardsWithBackgroundOps));
+ return kRetry;
+ },
+ () => "Timed out while retrying command '" + tojson(commandObj) +
+ "', response: " + tojson(res),
+ kTimeout,
+ kInterval);
+ return res;
+}
+
+OverrideHelpers.overrideRunCommand(runCommandWithRetries);
})();
diff --git a/jstests/libs/override_methods/implicitly_retry_on_database_drop_pending.js b/jstests/libs/override_methods/implicitly_retry_on_database_drop_pending.js
index c605f9336d6..534d52a76f3 100644
--- a/jstests/libs/override_methods/implicitly_retry_on_database_drop_pending.js
+++ b/jstests/libs/override_methods/implicitly_retry_on_database_drop_pending.js
@@ -3,178 +3,175 @@
* "DatabaseDropPending" error response are automatically retried until they succeed.
*/
(function() {
- "use strict";
-
- const defaultTimeout = 10 * 60 * 1000;
-
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- const mongoRunCommandWithMetadataOriginal = Mongo.prototype.runCommandWithMetadata;
-
- function awaitLatestOperationMajorityConfirmed(primary) {
- // Get the latest optime from the primary.
- const replSetStatus = assert.commandWorked(primary.adminCommand({replSetGetStatus: 1}),
- "error getting replication status from primary");
- const primaryInfo = replSetStatus.members.find(memberInfo => memberInfo.self);
- assert(primaryInfo !== undefined,
- "failed to find self in replication status: " + tojson(replSetStatus));
-
- // Wait for all operations until 'primaryInfo.optime' to be applied by a majority of the
- // replica set.
- assert.commandWorked( //
- primary.adminCommand({
- getLastError: 1,
- w: "majority",
- wtimeout: defaultTimeout,
- wOpTime: primaryInfo.optime,
- }),
- "error awaiting replication");
+"use strict";
+
+const defaultTimeout = 10 * 60 * 1000;
+
+const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+const mongoRunCommandWithMetadataOriginal = Mongo.prototype.runCommandWithMetadata;
+
+function awaitLatestOperationMajorityConfirmed(primary) {
+ // Get the latest optime from the primary.
+ const replSetStatus = assert.commandWorked(primary.adminCommand({replSetGetStatus: 1}),
+ "error getting replication status from primary");
+ const primaryInfo = replSetStatus.members.find(memberInfo => memberInfo.self);
+ assert(primaryInfo !== undefined,
+ "failed to find self in replication status: " + tojson(replSetStatus));
+
+ // Wait for all operations until 'primaryInfo.optime' to be applied by a majority of the
+ // replica set.
+ assert.commandWorked( //
+ primary.adminCommand({
+ getLastError: 1,
+ w: "majority",
+ wtimeout: defaultTimeout,
+ wOpTime: primaryInfo.optime,
+ }),
+ "error awaiting replication");
+}
+
+function runCommandWithRetries(conn, dbName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
+ return func.apply(conn, makeFuncArgs(commandObj));
}
- function runCommandWithRetries(conn, dbName, commandObj, func, makeFuncArgs) {
- if (typeof commandObj !== "object" || commandObj === null) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
-
- // We create a copy of 'commandObj' to avoid mutating the parameter the caller specified.
- // Instead, we use the makeFuncArgs() function to build the array of arguments to 'func' by
- // giving it the 'commandObj' that should be used. This is done to work around the
- // difference in the order of parameters for the Mongo.prototype.runCommand() and
- // Mongo.prototype.runCommandWithMetadata() functions.
- commandObj = Object.assign({}, commandObj);
- const commandName = Object.keys(commandObj)[0];
- let resPrevious;
- let res;
-
- assert.soon(
- () => {
- resPrevious = res;
- res = func.apply(conn, makeFuncArgs(commandObj));
-
- if (commandName === "insert" || commandName === "update") {
- let opsExecuted;
- const opsToRetry = [];
-
- // We merge ths statistics returned by the server about the number of documents
- // inserted and updated.
- if (commandName === "insert") {
- // We make 'commandObj.documents' refer to 'opsToRetry' to consolidate the
- // logic for how we retry insert and update operations.
- opsExecuted = commandObj.documents;
- commandObj.documents = opsToRetry;
-
- if (resPrevious !== undefined) {
- res.n += resPrevious.n;
- }
- } else if (commandName === "update") {
- // We make 'commandObj.updates' refer to 'opsToRetry' to consolidate the
- // logic for how we retry insert and update operations.
- opsExecuted = commandObj.updates;
- commandObj.updates = opsToRetry;
-
- // The 'upserted' property isn't defined in the response if there weren't
- // any documents upserted, but we define it as an empty array for
- // convenience when merging results from 'resPrevious'.
- res.upserted = res.upserted || [];
-
- if (resPrevious !== undefined) {
- res.n += resPrevious.n;
- res.nModified += resPrevious.nModified;
-
- // We translate the 'upsertInfo.index' back to its index in the original
- // operation that were sent to the server by finding the object's
- // reference (i.e. using strict-equality) in 'originalOps'.
- for (let upsertInfo of res.upserted) {
- upsertInfo.index =
- originalOps.indexOf(opsToRetry[upsertInfo.index]);
- }
-
- res.upserted.push(...resPrevious.upserted);
- }
- }
-
- if (res.ok !== 1 || !res.hasOwnProperty("writeErrors")) {
- // If the operation succeeded or failed for another reason, then we simply
- // return and let the caller deal with the response.
- return true;
+ // We create a copy of 'commandObj' to avoid mutating the parameter the caller specified.
+ // Instead, we use the makeFuncArgs() function to build the array of arguments to 'func' by
+ // giving it the 'commandObj' that should be used. This is done to work around the
+ // difference in the order of parameters for the Mongo.prototype.runCommand() and
+ // Mongo.prototype.runCommandWithMetadata() functions.
+ commandObj = Object.assign({}, commandObj);
+ const commandName = Object.keys(commandObj)[0];
+ let resPrevious;
+ let res;
+
+ assert.soon(
+ () => {
+ resPrevious = res;
+ res = func.apply(conn, makeFuncArgs(commandObj));
+
+ if (commandName === "insert" || commandName === "update") {
+ let opsExecuted;
+ const opsToRetry = [];
+
+ // We merge ths statistics returned by the server about the number of documents
+ // inserted and updated.
+ if (commandName === "insert") {
+ // We make 'commandObj.documents' refer to 'opsToRetry' to consolidate the
+ // logic for how we retry insert and update operations.
+ opsExecuted = commandObj.documents;
+ commandObj.documents = opsToRetry;
+
+ if (resPrevious !== undefined) {
+ res.n += resPrevious.n;
}
-
- for (let writeError of res.writeErrors) {
- if (writeError.code !== ErrorCodes.DatabaseDropPending) {
- // If the operation failed for a reason other than a
- // "DatabaseDropPending" error response, then we simply return and let
- // the caller deal with the response.
- return true;
+ } else if (commandName === "update") {
+ // We make 'commandObj.updates' refer to 'opsToRetry' to consolidate the
+ // logic for how we retry insert and update operations.
+ opsExecuted = commandObj.updates;
+ commandObj.updates = opsToRetry;
+
+ // The 'upserted' property isn't defined in the response if there weren't
+ // any documents upserted, but we define it as an empty array for
+ // convenience when merging results from 'resPrevious'.
+ res.upserted = res.upserted || [];
+
+ if (resPrevious !== undefined) {
+ res.n += resPrevious.n;
+ res.nModified += resPrevious.nModified;
+
+ // We translate the 'upsertInfo.index' back to its index in the original
+ // operation that were sent to the server by finding the object's
+ // reference (i.e. using strict-equality) in 'originalOps'.
+ for (let upsertInfo of res.upserted) {
+ upsertInfo.index = originalOps.indexOf(opsToRetry[upsertInfo.index]);
}
- }
- // We filter out operations that didn't produce a write error to avoid causing a
- // duplicate key error when retrying the operations. We cache the error message
- // for the assertion below to avoid the expense of serializing the server's
- // response as a JSON string repeatedly. (There may be up to 1000 write errors
- // in the server's response.)
- const errorMsg =
- "A write error was returned for an operation outside the list of" +
- " operations executed: " + tojson(res);
-
- for (let writeError of res.writeErrors) {
- assert.lt(writeError.index, opsExecuted.length, errorMsg);
- opsToRetry.push(opsExecuted[writeError.index]);
+ res.upserted.push(...resPrevious.upserted);
}
- } else if (res.ok === 1 || res.code !== ErrorCodes.DatabaseDropPending) {
- return true;
}
- let msg = commandName + " command";
- if (commandName !== "insert" && commandName !== "update") {
- // We intentionally omit the command object in the diagnostic message for
- // "insert" and "update" commands being retried to avoid printing a large blob
- // and hurting readability of the logs.
- msg += " " + tojsononeline(commandObj);
- }
-
- msg += " failed due to the " + dbName + " database being marked as drop-pending." +
- " Waiting for the latest operation to become majority confirmed before trying" +
- " again.";
- print(msg);
-
- // We wait for the primary's latest operation to become majority confirmed.
- // However, we may still need to retry more than once because the primary may not
- // yet have generated the oplog entry for the "dropDatabase" operation while it is
- // dropping each intermediate collection.
- awaitLatestOperationMajorityConfirmed(conn);
-
- if (TestData.skipDropDatabaseOnDatabaseDropPending &&
- commandName === "dropDatabase") {
- // We avoid retrying the "dropDatabase" command when another "dropDatabase"
- // command was already in progress for the database. This reduces the likelihood
- // that other clients would observe another DatabaseDropPending error response
- // when they go to retry, and therefore reduces the risk that repeatedly
- // retrying an individual operation would take longer than the 'defaultTimeout'
- // period.
- res = {ok: 1, dropped: dbName};
+ if (res.ok !== 1 || !res.hasOwnProperty("writeErrors")) {
+ // If the operation succeeded or failed for another reason, then we simply
+ // return and let the caller deal with the response.
return true;
}
- },
- "timed out while retrying '" + commandName +
- "' operation on DatabaseDropPending error response for '" + dbName + "' database",
- defaultTimeout);
- return res;
- }
+ for (let writeError of res.writeErrors) {
+ if (writeError.code !== ErrorCodes.DatabaseDropPending) {
+ // If the operation failed for a reason other than a
+ // "DatabaseDropPending" error response, then we simply return and let
+ // the caller deal with the response.
+ return true;
+ }
+ }
- Mongo.prototype.runCommand = function(dbName, commandObj, options) {
- return runCommandWithRetries(this,
- dbName,
- commandObj,
- mongoRunCommandOriginal,
- (commandObj) => [dbName, commandObj, options]);
- };
-
- Mongo.prototype.runCommandWithMetadata = function(dbName, metadata, commandArgs) {
- return runCommandWithRetries(this,
- dbName,
- commandArgs,
- mongoRunCommandWithMetadataOriginal,
- (commandArgs) => [dbName, metadata, commandArgs]);
- };
+ // We filter out operations that didn't produce a write error to avoid causing a
+ // duplicate key error when retrying the operations. We cache the error message
+ // for the assertion below to avoid the expense of serializing the server's
+ // response as a JSON string repeatedly. (There may be up to 1000 write errors
+ // in the server's response.)
+ const errorMsg = "A write error was returned for an operation outside the list of" +
+ " operations executed: " + tojson(res);
+
+ for (let writeError of res.writeErrors) {
+ assert.lt(writeError.index, opsExecuted.length, errorMsg);
+ opsToRetry.push(opsExecuted[writeError.index]);
+ }
+ } else if (res.ok === 1 || res.code !== ErrorCodes.DatabaseDropPending) {
+ return true;
+ }
+
+ let msg = commandName + " command";
+ if (commandName !== "insert" && commandName !== "update") {
+ // We intentionally omit the command object in the diagnostic message for
+ // "insert" and "update" commands being retried to avoid printing a large blob
+ // and hurting readability of the logs.
+ msg += " " + tojsononeline(commandObj);
+ }
+
+ msg += " failed due to the " + dbName + " database being marked as drop-pending." +
+ " Waiting for the latest operation to become majority confirmed before trying" +
+ " again.";
+ print(msg);
+
+ // We wait for the primary's latest operation to become majority confirmed.
+ // However, we may still need to retry more than once because the primary may not
+ // yet have generated the oplog entry for the "dropDatabase" operation while it is
+ // dropping each intermediate collection.
+ awaitLatestOperationMajorityConfirmed(conn);
+
+ if (TestData.skipDropDatabaseOnDatabaseDropPending && commandName === "dropDatabase") {
+ // We avoid retrying the "dropDatabase" command when another "dropDatabase"
+ // command was already in progress for the database. This reduces the likelihood
+ // that other clients would observe another DatabaseDropPending error response
+ // when they go to retry, and therefore reduces the risk that repeatedly
+ // retrying an individual operation would take longer than the 'defaultTimeout'
+ // period.
+ res = {ok: 1, dropped: dbName};
+ return true;
+ }
+ },
+ "timed out while retrying '" + commandName +
+ "' operation on DatabaseDropPending error response for '" + dbName + "' database",
+ defaultTimeout);
+
+ return res;
+}
+
+Mongo.prototype.runCommand = function(dbName, commandObj, options) {
+ return runCommandWithRetries(this,
+ dbName,
+ commandObj,
+ mongoRunCommandOriginal,
+ (commandObj) => [dbName, commandObj, options]);
+};
+
+Mongo.prototype.runCommandWithMetadata = function(dbName, metadata, commandArgs) {
+ return runCommandWithRetries(this,
+ dbName,
+ commandArgs,
+ mongoRunCommandWithMetadataOriginal,
+ (commandArgs) => [dbName, metadata, commandArgs]);
+};
})();
diff --git a/jstests/libs/override_methods/implicitly_shard_accessed_collections.js b/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
index a6cb5a6c2a0..fc83df394cb 100644
--- a/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
+++ b/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
@@ -34,190 +34,189 @@ const ImplicitlyShardAccessCollSettings = (function() {
})();
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/override_methods/override_helpers.js"); // For 'OverrideHelpers'.
+load("jstests/libs/override_methods/override_helpers.js"); // For 'OverrideHelpers'.
- // Save a reference to the original methods in the IIFE's scope.
- // This scoping allows the original methods to be called by the overrides below.
- var originalGetCollection = DB.prototype.getCollection;
- var originalDBCollectionDrop = DBCollection.prototype.drop;
- var originalStartParallelShell = startParallelShell;
- var originalRunCommand = Mongo.prototype.runCommand;
+// Save a reference to the original methods in the IIFE's scope.
+// This scoping allows the original methods to be called by the overrides below.
+var originalGetCollection = DB.prototype.getCollection;
+var originalDBCollectionDrop = DBCollection.prototype.drop;
+var originalStartParallelShell = startParallelShell;
+var originalRunCommand = Mongo.prototype.runCommand;
- var testMayRunDropInParallel = false;
+var testMayRunDropInParallel = false;
- // Blacklisted namespaces that should not be sharded.
- var blacklistedNamespaces = [
- /\$cmd/,
- /^admin\./,
- /^config\./,
- /\.system\./,
- ];
+// Blacklisted namespaces that should not be sharded.
+var blacklistedNamespaces = [
+ /\$cmd/,
+ /^admin\./,
+ /^config\./,
+ /\.system\./,
+];
- const kZoneName = 'moveToHereForMigrationPassthrough';
+const kZoneName = 'moveToHereForMigrationPassthrough';
- function shardCollection(collection) {
- var db = collection.getDB();
- var dbName = db.getName();
- var fullName = collection.getFullName();
+function shardCollection(collection) {
+ var db = collection.getDB();
+ var dbName = db.getName();
+ var fullName = collection.getFullName();
- for (var ns of blacklistedNamespaces) {
- if (fullName.match(ns)) {
- return;
- }
+ for (var ns of blacklistedNamespaces) {
+ if (fullName.match(ns)) {
+ return;
}
+ }
- var res = db.adminCommand({enableSharding: dbName});
-
- // enableSharding may only be called once for a database.
- if (res.code !== ErrorCodes.AlreadyInitialized) {
- assert.commandWorked(res, "enabling sharding on the '" + dbName + "' db failed");
- }
+ var res = db.adminCommand({enableSharding: dbName});
- res = db.adminCommand(
- {shardCollection: fullName, key: {_id: 'hashed'}, collation: {locale: "simple"}});
-
- let checkResult = function(res, opDescription) {
- if (res.ok === 0 && testMayRunDropInParallel) {
- // We ignore ConflictingOperationInProgress error responses from the
- // "shardCollection" command if it's possible the test was running a "drop" command
- // concurrently. We could retry running the "shardCollection" command, but tests
- // that are likely to trigger this case are also likely running the "drop" command
- // in a loop. We therefore just let the test continue with the collection being
- // unsharded.
- assert.commandFailedWithCode(res, ErrorCodes.ConflictingOperationInProgress);
- jsTest.log("Ignoring failure while " + opDescription +
- " due to a concurrent drop operation: " + tojson(res));
- } else {
- assert.commandWorked(res, opDescription + " failed");
- }
- };
-
- checkResult(res, 'shard ' + fullName);
-
- // Set the entire chunk range to a single zone, so balancer will be forced to move the
- // evenly distributed chunks to a shard (selected at random).
- if (res.ok === 1 &&
- ImplicitlyShardAccessCollSettings.getMode() ===
- ImplicitlyShardAccessCollSettings.Modes.kHashedMoveToSingleShard) {
- let shardName =
- db.getSiblingDB('config').shards.aggregate([{$sample: {size: 1}}]).toArray()[0]._id;
-
- checkResult(db.adminCommand({addShardToZone: shardName, zone: kZoneName}),
- 'add ' + shardName + ' to zone ' + kZoneName);
- checkResult(db.adminCommand({
- updateZoneKeyRange: fullName,
- min: {_id: MinKey},
- max: {_id: MaxKey},
- zone: kZoneName
- }),
- 'set zone for ' + fullName);
-
- // Wake up the balancer.
- checkResult(db.adminCommand({balancerStart: 1}), 'turn on balancer');
- }
+ // enableSharding may only be called once for a database.
+ if (res.code !== ErrorCodes.AlreadyInitialized) {
+ assert.commandWorked(res, "enabling sharding on the '" + dbName + "' db failed");
}
- DB.prototype.getCollection = function() {
- var collection = originalGetCollection.apply(this, arguments);
-
- // The following "collStats" command can behave unexpectedly when running in a causal
- // consistency suite with secondary read preference. "collStats" does not support causal
- // consistency, making it possible to see a stale view of the collection if run on a
- // secondary, potentially causing shardCollection() to be called when it shouldn't.
- // E.g. if the collection has just been sharded but not yet visible on the
- // secondary, we could end up calling shardCollection on it again, which would fail.
- //
- // The workaround is to use a TestData flag to temporarily bypass the read preference
- // override.
- const testDataDoNotOverrideReadPreferenceOriginal = TestData.doNotOverrideReadPreference;
- let collStats;
-
- try {
- TestData.doNotOverrideReadPreference = true;
- collStats = this.runCommand({collStats: collection.getName()});
- } finally {
- TestData.doNotOverrideReadPreference = testDataDoNotOverrideReadPreferenceOriginal;
- }
-
- // If the collection is already sharded or is non-empty, do not attempt to shard.
- if (collStats.sharded || collStats.count > 0) {
- return collection;
+ res = db.adminCommand(
+ {shardCollection: fullName, key: {_id: 'hashed'}, collation: {locale: "simple"}});
+
+ let checkResult = function(res, opDescription) {
+ if (res.ok === 0 && testMayRunDropInParallel) {
+ // We ignore ConflictingOperationInProgress error responses from the
+ // "shardCollection" command if it's possible the test was running a "drop" command
+ // concurrently. We could retry running the "shardCollection" command, but tests
+ // that are likely to trigger this case are also likely running the "drop" command
+ // in a loop. We therefore just let the test continue with the collection being
+ // unsharded.
+ assert.commandFailedWithCode(res, ErrorCodes.ConflictingOperationInProgress);
+ jsTest.log("Ignoring failure while " + opDescription +
+ " due to a concurrent drop operation: " + tojson(res));
+ } else {
+ assert.commandWorked(res, opDescription + " failed");
}
+ };
- // Attempt to enable sharding on database and collection if not already done.
- shardCollection(collection);
+ checkResult(res, 'shard ' + fullName);
+
+ // Set the entire chunk range to a single zone, so balancer will be forced to move the
+ // evenly distributed chunks to a shard (selected at random).
+ if (res.ok === 1 &&
+ ImplicitlyShardAccessCollSettings.getMode() ===
+ ImplicitlyShardAccessCollSettings.Modes.kHashedMoveToSingleShard) {
+ let shardName =
+ db.getSiblingDB('config').shards.aggregate([{$sample: {size: 1}}]).toArray()[0]._id;
+
+ checkResult(db.adminCommand({addShardToZone: shardName, zone: kZoneName}),
+ 'add ' + shardName + ' to zone ' + kZoneName);
+ checkResult(db.adminCommand({
+ updateZoneKeyRange: fullName,
+ min: {_id: MinKey},
+ max: {_id: MaxKey},
+ zone: kZoneName
+ }),
+ 'set zone for ' + fullName);
+
+ // Wake up the balancer.
+ checkResult(db.adminCommand({balancerStart: 1}), 'turn on balancer');
+ }
+}
+
+DB.prototype.getCollection = function() {
+ var collection = originalGetCollection.apply(this, arguments);
+
+ // The following "collStats" command can behave unexpectedly when running in a causal
+ // consistency suite with secondary read preference. "collStats" does not support causal
+ // consistency, making it possible to see a stale view of the collection if run on a
+ // secondary, potentially causing shardCollection() to be called when it shouldn't.
+ // E.g. if the collection has just been sharded but not yet visible on the
+ // secondary, we could end up calling shardCollection on it again, which would fail.
+ //
+ // The workaround is to use a TestData flag to temporarily bypass the read preference
+ // override.
+ const testDataDoNotOverrideReadPreferenceOriginal = TestData.doNotOverrideReadPreference;
+ let collStats;
+
+ try {
+ TestData.doNotOverrideReadPreference = true;
+ collStats = this.runCommand({collStats: collection.getName()});
+ } finally {
+ TestData.doNotOverrideReadPreference = testDataDoNotOverrideReadPreferenceOriginal;
+ }
+ // If the collection is already sharded or is non-empty, do not attempt to shard.
+ if (collStats.sharded || collStats.count > 0) {
return collection;
- };
+ }
- DBCollection.prototype.drop = function() {
- var dropResult = originalDBCollectionDrop.apply(this, arguments);
+ // Attempt to enable sharding on database and collection if not already done.
+ shardCollection(collection);
- // Attempt to enable sharding on database and collection if not already done.
- shardCollection(this);
+ return collection;
+};
- return dropResult;
- };
+DBCollection.prototype.drop = function() {
+ var dropResult = originalDBCollectionDrop.apply(this, arguments);
- // The mapReduce command has a special requirement where the command must indicate the output
- // collection is sharded, so we must be sure to add this information in this passthrough.
- Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
- // Skip any commands that are not mapReduce or do not have an 'out' option.
- if (typeof cmdObj !== 'object' || cmdObj === null ||
- (!cmdObj.hasOwnProperty('mapreduce') && !cmdObj.hasOwnProperty('mapReduce')) ||
- !cmdObj.hasOwnProperty('out')) {
- return originalRunCommand.apply(this, arguments);
- }
+ // Attempt to enable sharding on database and collection if not already done.
+ shardCollection(this);
- const originalCmdObj = Object.merge({}, cmdObj);
+ return dropResult;
+};
- // SERVER-5448 'jsMode' is not supported through mongos. The 'jsMode' should not impact the
- // results at all, so can be safely deleted in the sharded environment.
- delete cmdObj.jsMode;
+// The mapReduce command has a special requirement where the command must indicate the output
+// collection is sharded, so we must be sure to add this information in this passthrough.
+Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
+ // Skip any commands that are not mapReduce or do not have an 'out' option.
+ if (typeof cmdObj !== 'object' || cmdObj === null ||
+ (!cmdObj.hasOwnProperty('mapreduce') && !cmdObj.hasOwnProperty('mapReduce')) ||
+ !cmdObj.hasOwnProperty('out')) {
+ return originalRunCommand.apply(this, arguments);
+ }
- // Modify the output options to specify that the collection is sharded.
- let outputSpec = cmdObj.out;
- if (typeof(outputSpec) === "string") {
- this.getDB(dbName)[outputSpec].drop(); // This will implicitly shard it.
- outputSpec = {replace: outputSpec, sharded: true};
- } else if (typeof(outputSpec) !== "object") {
- // This is a malformed command, just send it along.
- return originalRunCommand.apply(this, arguments);
- } else if (!outputSpec.hasOwnProperty("sharded")) {
- let outputColl = null;
- if (outputSpec.hasOwnProperty("replace")) {
- outputColl = outputSpec.replace;
- } else if (outputSpec.hasOwnProperty("merge")) {
- outputColl = outputSpec.merge;
- } else if (outputSpec.hasOwnProperty("reduce")) {
- outputColl = outputSpec.reduce;
- }
+ const originalCmdObj = Object.merge({}, cmdObj);
- if (outputColl === null) {
- // This is a malformed command, just send it along.
- return originalRunCommand.apply(this, arguments);
- }
- this.getDB(dbName)[outputColl].drop(); // This will implicitly shard it.
- outputSpec.sharded = true;
- }
+ // SERVER-5448 'jsMode' is not supported through mongos. The 'jsMode' should not impact the
+ // results at all, so can be safely deleted in the sharded environment.
+ delete cmdObj.jsMode;
- cmdObj.out = outputSpec;
- jsTestLog('Overriding mapReduce command. Original command: ' + tojson(originalCmdObj) +
- ' New command: ' + tojson(cmdObj));
+ // Modify the output options to specify that the collection is sharded.
+ let outputSpec = cmdObj.out;
+ if (typeof (outputSpec) === "string") {
+ this.getDB(dbName)[outputSpec].drop(); // This will implicitly shard it.
+ outputSpec = {replace: outputSpec, sharded: true};
+ } else if (typeof (outputSpec) !== "object") {
+ // This is a malformed command, just send it along.
return originalRunCommand.apply(this, arguments);
- };
-
- // Tests may use a parallel shell to run the "drop" command concurrently with other
- // operations. This can cause the "shardCollection" command to return a
- // ConflictingOperationInProgress error response.
- startParallelShell = function() {
- testMayRunDropInParallel = true;
- return originalStartParallelShell.apply(this, arguments);
- };
+ } else if (!outputSpec.hasOwnProperty("sharded")) {
+ let outputColl = null;
+ if (outputSpec.hasOwnProperty("replace")) {
+ outputColl = outputSpec.replace;
+ } else if (outputSpec.hasOwnProperty("merge")) {
+ outputColl = outputSpec.merge;
+ } else if (outputSpec.hasOwnProperty("reduce")) {
+ outputColl = outputSpec.reduce;
+ }
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/implicitly_shard_accessed_collections.js");
+ if (outputColl === null) {
+ // This is a malformed command, just send it along.
+ return originalRunCommand.apply(this, arguments);
+ }
+ this.getDB(dbName)[outputColl].drop(); // This will implicitly shard it.
+ outputSpec.sharded = true;
+ }
+ cmdObj.out = outputSpec;
+ jsTestLog('Overriding mapReduce command. Original command: ' + tojson(originalCmdObj) +
+ ' New command: ' + tojson(cmdObj));
+ return originalRunCommand.apply(this, arguments);
+};
+
+// Tests may use a parallel shell to run the "drop" command concurrently with other
+// operations. This can cause the "shardCollection" command to return a
+// ConflictingOperationInProgress error response.
+startParallelShell = function() {
+ testMayRunDropInParallel = true;
+ return originalStartParallelShell.apply(this, arguments);
+};
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/implicitly_shard_accessed_collections.js");
}());
diff --git a/jstests/libs/override_methods/implicitly_wrap_pipelines_in_facets.js b/jstests/libs/override_methods/implicitly_wrap_pipelines_in_facets.js
index 84da15b1b8f..55bc6f36f06 100644
--- a/jstests/libs/override_methods/implicitly_wrap_pipelines_in_facets.js
+++ b/jstests/libs/override_methods/implicitly_wrap_pipelines_in_facets.js
@@ -4,73 +4,72 @@
* yield the same results, but stress the logic of the $facet stage.
*/
(function() {
- 'use strict';
+'use strict';
- // Set the batch size of the $facet stage's buffer to be lower. This will further stress the
- // batching logic, since most pipelines will fall below the default size of 100MB.
- assert.commandWorked(
- db.adminCommand({setParameter: 1, internalQueryFacetBufferSizeBytes: 1000}));
+// Set the batch size of the $facet stage's buffer to be lower. This will further stress the
+// batching logic, since most pipelines will fall below the default size of 100MB.
+assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryFacetBufferSizeBytes: 1000}));
- // Save a reference to the original runCommand method in the IIFE's scope.
- // This scoping allows the original method to be called by the override below.
- var originalRunCommand = Mongo.prototype.runCommand;
+// Save a reference to the original runCommand method in the IIFE's scope.
+// This scoping allows the original method to be called by the override below.
+var originalRunCommand = Mongo.prototype.runCommand;
- Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
- // Skip wrapping the pipeline in a $facet stage if it's not an aggregation, or if it's
- // possibly an invalid one without a pipeline.
- if (typeof cmdObj !== 'object' || cmdObj === null || !cmdObj.hasOwnProperty('aggregate') ||
- !cmdObj.hasOwnProperty('pipeline') || !Array.isArray(cmdObj.pipeline)) {
- return originalRunCommand.apply(this, arguments);
- }
+Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
+ // Skip wrapping the pipeline in a $facet stage if it's not an aggregation, or if it's
+ // possibly an invalid one without a pipeline.
+ if (typeof cmdObj !== 'object' || cmdObj === null || !cmdObj.hasOwnProperty('aggregate') ||
+ !cmdObj.hasOwnProperty('pipeline') || !Array.isArray(cmdObj.pipeline)) {
+ return originalRunCommand.apply(this, arguments);
+ }
- var originalPipeline = cmdObj.pipeline;
+ var originalPipeline = cmdObj.pipeline;
+
+ if (originalPipeline.length === 0) {
+ // Empty pipelines are disallowed within a $facet stage.
+ print('Not wrapping empty pipeline in a $facet stage');
+ return originalRunCommand.apply(this, arguments);
+ }
- if (originalPipeline.length === 0) {
- // Empty pipelines are disallowed within a $facet stage.
- print('Not wrapping empty pipeline in a $facet stage');
+ const stagesDisallowedInsideFacet =
+ ['$changeStream', '$collStats', '$facet', '$geoNear', '$indexStats', '$merge', '$out'];
+ for (let stageSpec of originalPipeline) {
+ // Skip wrapping the pipeline in a $facet stage if it has an invalid stage
+ // specification.
+ if (typeof stageSpec !== 'object' || stageSpec === null) {
+ print('Not wrapping invalid pipeline in a $facet stage');
return originalRunCommand.apply(this, arguments);
}
- const stagesDisallowedInsideFacet =
- ['$changeStream', '$collStats', '$facet', '$geoNear', '$indexStats', '$merge', '$out'];
- for (let stageSpec of originalPipeline) {
- // Skip wrapping the pipeline in a $facet stage if it has an invalid stage
- // specification.
- if (typeof stageSpec !== 'object' || stageSpec === null) {
- print('Not wrapping invalid pipeline in a $facet stage');
+ if (stageSpec.hasOwnProperty('$match') && typeof stageSpec.$match === 'object' &&
+ stageSpec.$match !== null) {
+ if (stageSpec.$match.hasOwnProperty('$text')) {
+ // A $text search is disallowed within a $facet stage.
+ print('Not wrapping $text in a $facet stage');
return originalRunCommand.apply(this, arguments);
}
-
- if (stageSpec.hasOwnProperty('$match') && typeof stageSpec.$match === 'object' &&
- stageSpec.$match !== null) {
- if (stageSpec.$match.hasOwnProperty('$text')) {
- // A $text search is disallowed within a $facet stage.
- print('Not wrapping $text in a $facet stage');
- return originalRunCommand.apply(this, arguments);
- }
- if (Object.keys(stageSpec.$match).length === 0) {
- // Skip wrapping an empty $match stage, since it can be optimized out, resulting
- // in an empty pipeline which is disallowed within a $facet stage.
- print('Not wrapping empty $match in a $facet stage');
- return originalRunCommand.apply(this, arguments);
- }
+ if (Object.keys(stageSpec.$match).length === 0) {
+ // Skip wrapping an empty $match stage, since it can be optimized out, resulting
+ // in an empty pipeline which is disallowed within a $facet stage.
+ print('Not wrapping empty $match in a $facet stage');
+ return originalRunCommand.apply(this, arguments);
}
+ }
- // Skip wrapping the pipeline in a $facet stage if it contains a stage disallowed inside
- // a $facet.
- for (let disallowedStage of stagesDisallowedInsideFacet) {
- if (stageSpec.hasOwnProperty(disallowedStage)) {
- print('Not wrapping ' + disallowedStage + ' in a $facet stage');
- return originalRunCommand.apply(this, arguments);
- }
+ // Skip wrapping the pipeline in a $facet stage if it contains a stage disallowed inside
+ // a $facet.
+ for (let disallowedStage of stagesDisallowedInsideFacet) {
+ if (stageSpec.hasOwnProperty(disallowedStage)) {
+ print('Not wrapping ' + disallowedStage + ' in a $facet stage');
+ return originalRunCommand.apply(this, arguments);
}
}
+ }
- cmdObj.pipeline = [
- {$facet: {originalPipeline: originalPipeline}},
- {$unwind: '$originalPipeline'},
- {$replaceRoot: {newRoot: '$originalPipeline'}},
- ];
- return originalRunCommand.apply(this, arguments);
- };
+ cmdObj.pipeline = [
+ {$facet: {originalPipeline: originalPipeline}},
+ {$unwind: '$originalPipeline'},
+ {$replaceRoot: {newRoot: '$originalPipeline'}},
+ ];
+ return originalRunCommand.apply(this, arguments);
+};
}());
diff --git a/jstests/libs/override_methods/mongos_manual_intervention_actions.js b/jstests/libs/override_methods/mongos_manual_intervention_actions.js
index 802778b6ec1..fb0a7080585 100644
--- a/jstests/libs/override_methods/mongos_manual_intervention_actions.js
+++ b/jstests/libs/override_methods/mongos_manual_intervention_actions.js
@@ -41,7 +41,7 @@ var ManualInterventionActions = (function() {
", dropping the collection, and retrying the command.");
removeChunks(mongosConn, ns);
- const[dbName, collName] = ns.split(".");
+ const [dbName, collName] = ns.split(".");
assert.commandWorked(
mongosConn.getDB(dbName).runCommand({"drop": collName, writeConcern: {w: "majority"}}));
};
@@ -51,64 +51,63 @@ var ManualInterventionActions = (function() {
(function() {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- Mongo.prototype.runCommand = function runCommand(dbName, cmdObj, options) {
- const cmdName = Object.keys(cmdObj)[0];
- const commandsToRetry =
- new Set(["mapReduce", "mapreduce", "shardCollection", "shardcollection"]);
+Mongo.prototype.runCommand = function runCommand(dbName, cmdObj, options) {
+ const cmdName = Object.keys(cmdObj)[0];
+ const commandsToRetry =
+ new Set(["mapReduce", "mapreduce", "shardCollection", "shardcollection"]);
- if (!commandsToRetry.has(cmdName)) {
- return mongoRunCommandOriginal.apply(this, arguments);
- }
+ if (!commandsToRetry.has(cmdName)) {
+ return mongoRunCommandOriginal.apply(this, arguments);
+ }
+
+ const maxAttempts = 10;
+ let numAttempts = 0;
+ let res;
- const maxAttempts = 10;
- let numAttempts = 0;
- let res;
+ while (numAttempts < maxAttempts) {
+ res = mongoRunCommandOriginal.apply(this, arguments);
+ ++numAttempts;
- while (numAttempts < maxAttempts) {
- res = mongoRunCommandOriginal.apply(this, arguments);
- ++numAttempts;
+ if (res.ok === 1 || res.code !== ErrorCodes.ManualInterventionRequired ||
+ numAttempts === maxAttempts) {
+ break;
+ }
- if (res.ok === 1 || res.code !== ErrorCodes.ManualInterventionRequired ||
- numAttempts === maxAttempts) {
+ print("Manual intervention retry attempt# " + numAttempts +
+ " because of error: " + tojson(res));
+
+ if (cmdName === "shardCollection" || cmdName === "shardcollection") {
+ const ns = cmdObj[cmdName];
+ ManualInterventionActions.removePartiallyWrittenChunks(this, ns, cmdObj, numAttempts);
+ } else if (cmdName === "mapReduce" || cmdName === "mapreduce") {
+ const out = cmdObj.out;
+
+ // The output collection can be specified as a string argument to the mapReduce
+ // command's 'out' option, or nested under 'out.replace', 'out.merge', or
+ // 'out.reduce'.
+ let outCollName;
+ if (typeof out === "string") {
+ outCollName = out;
+ } else if (typeof out === "object") {
+ outCollName = out.replace || out.merge || out.reduce;
+ } else {
+ print("Could not parse the output collection's name from 'out' option in " +
+ tojson(cmdObj) + "; not retrying on ManualInterventionRequired error " +
+ tojson(res));
break;
}
- print("Manual intervention retry attempt# " + numAttempts + " because of error: " +
- tojson(res));
-
- if (cmdName === "shardCollection" || cmdName === "shardcollection") {
- const ns = cmdObj[cmdName];
- ManualInterventionActions.removePartiallyWrittenChunks(
- this, ns, cmdObj, numAttempts);
- } else if (cmdName === "mapReduce" || cmdName === "mapreduce") {
- const out = cmdObj.out;
-
- // The output collection can be specified as a string argument to the mapReduce
- // command's 'out' option, or nested under 'out.replace', 'out.merge', or
- // 'out.reduce'.
- let outCollName;
- if (typeof out === "string") {
- outCollName = out;
- } else if (typeof out === "object") {
- outCollName = out.replace || out.merge || out.reduce;
- } else {
- print("Could not parse the output collection's name from 'out' option in " +
- tojson(cmdObj) + "; not retrying on ManualInterventionRequired error " +
- tojson(res));
- break;
- }
-
- // The output collection's database can optionally be specified under 'out.db',
- // else it defaults to the input collection's database.
- const outDbName = out.db || dbName;
-
- const ns = outDbName + "." + outCollName;
- ManualInterventionActions.removePartiallyWrittenChunksAndDropCollection(
- this, ns, cmdObj, numAttempts);
- }
+ // The output collection's database can optionally be specified under 'out.db',
+ // else it defaults to the input collection's database.
+ const outDbName = out.db || dbName;
+
+ const ns = outDbName + "." + outCollName;
+ ManualInterventionActions.removePartiallyWrittenChunksAndDropCollection(
+ this, ns, cmdObj, numAttempts);
}
- return res;
- };
+ }
+ return res;
+};
})();
diff --git a/jstests/libs/override_methods/network_error_and_txn_override.js b/jstests/libs/override_methods/network_error_and_txn_override.js
index 440f10d3c50..56cea366daa 100644
--- a/jstests/libs/override_methods/network_error_and_txn_override.js
+++ b/jstests/libs/override_methods/network_error_and_txn_override.js
@@ -26,1092 +26,1078 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/error_code_utils.js");
- load('jstests/libs/override_methods/override_helpers.js');
- load("jstests/libs/override_methods/read_and_write_concern_helpers.js");
- load("jstests/libs/retryable_writes_util.js");
- load("jstests/libs/transactions_util.js");
-
- // Truncates the 'print' output if it's too long to print.
- const kMaxPrintLength = 5000;
- const kNumPrintEndChars = kMaxPrintLength / 2;
- const originalPrint = print;
- print = function(msg) {
- if (typeof msg !== "string") {
- originalPrint(msg);
- return;
- }
-
- const len = msg.length;
- if (len <= kMaxPrintLength) {
- originalPrint(msg);
- return;
- }
-
- originalPrint(
- `${msg.substr(0, kNumPrintEndChars)}...${msg.substr(len - kNumPrintEndChars)}`);
- };
-
- function configuredForNetworkRetry() {
- assert(TestData.networkErrorAndTxnOverrideConfig, TestData);
- return TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors &&
- !jsTest.options().skipRetryOnNetworkError;
- }
-
- function configuredForTxnOverride() {
- assert(TestData.networkErrorAndTxnOverrideConfig, TestData);
- return TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions;
+"use strict";
+
+load("jstests/libs/error_code_utils.js");
+load('jstests/libs/override_methods/override_helpers.js');
+load("jstests/libs/override_methods/read_and_write_concern_helpers.js");
+load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/transactions_util.js");
+
+// Truncates the 'print' output if it's too long to print.
+const kMaxPrintLength = 5000;
+const kNumPrintEndChars = kMaxPrintLength / 2;
+const originalPrint = print;
+print = function(msg) {
+ if (typeof msg !== "string") {
+ originalPrint(msg);
+ return;
}
- // Commands assumed to not be blindly retryable.
- const kNonRetryableCommands = new Set([
- // Commands that take write concern and do not support txnNumbers.
- "_configsvrAddShard",
- "_configsvrAddShardToZone",
- "_configsvrCommitChunkMerge",
- "_configsvrCommitChunkMigration",
- "_configsvrCommitChunkSplit",
- "_configsvrCreateDatabase",
- "_configsvrEnableSharding",
- "_configsvrMoveChunk",
- "_configsvrMovePrimary",
- "_configsvrRemoveShard",
- "_configsvrRemoveShardFromZone",
- "_configsvrShardCollection",
- "_configsvrUpdateZoneKeyRange",
- "_mergeAuthzCollections",
- "_recvChunkStart",
- "appendOplogNote",
- "applyOps",
- "captrunc",
- "cleanupOrphaned",
- "clone",
- "cloneCollection",
- "cloneCollectionAsCapped",
- "collMod",
- "convertToCapped",
- "create",
- "createIndexes",
- "createRole",
- "createUser",
- "deleteIndexes",
- "drop",
- "dropAllRolesFromDatabase",
- "dropAllUsersFromDatabase",
- "dropDatabase",
- "dropIndexes",
- "dropRole",
- "dropUser",
- "emptycapped",
- "godinsert",
- "grantPrivilegesToRole",
- "grantRolesToRole",
- "grantRolesToUser",
- "mapreduce.shardedfinish",
- "moveChunk",
- "renameCollection",
- "revokePrivilegesFromRole",
- "revokeRolesFromRole",
- "revokeRolesFromUser",
- "updateRole",
- "updateUser",
- ]);
-
- // These commands are not idempotent because they return errors if retried after successfully
- // completing (like IndexNotFound, NamespaceExists, etc.), but because they only take effect
- // once, and many tests use them to set up state, their errors on retries are handled specially.
- const kAcceptableNonRetryableCommands = new Set([
- "create",
- "createIndexes",
- "deleteIndexes",
- "drop",
- "dropDatabase", // Already ignores NamespaceNotFound errors, so not handled below.
- "dropIndexes",
- ]);
-
- // Returns if the given failed response is a safe response to ignore when retrying the
- // given command type.
- function isAcceptableRetryFailedResponse(cmdName, res) {
- assert(!res.ok, res);
- return ((cmdName === "create" && res.code === ErrorCodes.NamespaceExists) ||
- (cmdName === "createIndexes" && res.code === ErrorCodes.IndexAlreadyExists) ||
- (cmdName === "drop" && res.code === ErrorCodes.NamespaceNotFound) ||
- ((cmdName === "dropIndexes" || cmdName === "deleteIndexes") &&
- res.code === ErrorCodes.IndexNotFound));
+ const len = msg.length;
+ if (len <= kMaxPrintLength) {
+ originalPrint(msg);
+ return;
}
- const kCmdsThatInsert = new Set([
- 'insert',
- 'update',
- 'findAndModify',
- 'findandmodify',
- ]);
-
- // Commands that may return different values or fail if retried on a new primary after a
- // failover.
- const kNonFailoverTolerantCommands = new Set([
- "currentOp", // Failovers can change currentOp output.
- "getLog", // The log is different on different servers.
- "killOp", // Failovers may interrupt operations intended to be killed later in the test.
- "logRotate",
- "planCacheClear", // The plan cache isn't replicated.
- "planCacheClearFilters",
- "planCacheListFilters",
- "planCacheListPlans",
- "planCacheListQueryShapes",
- "planCacheSetFilter",
- "profile", // Not replicated, so can't tolerate failovers.
- "setParameter", // Not replicated, so can't tolerate failovers.
- "stageDebug",
- "startSession", // Sessions are flushed to disk asynchronously.
- ]);
-
- function isCommitOrAbort(cmdName) {
- return cmdName === "commitTransaction" || cmdName === "abortTransaction";
+ originalPrint(`${msg.substr(0, kNumPrintEndChars)}...${msg.substr(len - kNumPrintEndChars)}`);
+};
+
+function configuredForNetworkRetry() {
+ assert(TestData.networkErrorAndTxnOverrideConfig, TestData);
+ return TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors &&
+ !jsTest.options().skipRetryOnNetworkError;
+}
+
+function configuredForTxnOverride() {
+ assert(TestData.networkErrorAndTxnOverrideConfig, TestData);
+ return TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions;
+}
+
+// Commands assumed to not be blindly retryable.
+const kNonRetryableCommands = new Set([
+ // Commands that take write concern and do not support txnNumbers.
+ "_configsvrAddShard",
+ "_configsvrAddShardToZone",
+ "_configsvrCommitChunkMerge",
+ "_configsvrCommitChunkMigration",
+ "_configsvrCommitChunkSplit",
+ "_configsvrCreateDatabase",
+ "_configsvrEnableSharding",
+ "_configsvrMoveChunk",
+ "_configsvrMovePrimary",
+ "_configsvrRemoveShard",
+ "_configsvrRemoveShardFromZone",
+ "_configsvrShardCollection",
+ "_configsvrUpdateZoneKeyRange",
+ "_mergeAuthzCollections",
+ "_recvChunkStart",
+ "appendOplogNote",
+ "applyOps",
+ "captrunc",
+ "cleanupOrphaned",
+ "clone",
+ "cloneCollection",
+ "cloneCollectionAsCapped",
+ "collMod",
+ "convertToCapped",
+ "create",
+ "createIndexes",
+ "createRole",
+ "createUser",
+ "deleteIndexes",
+ "drop",
+ "dropAllRolesFromDatabase",
+ "dropAllUsersFromDatabase",
+ "dropDatabase",
+ "dropIndexes",
+ "dropRole",
+ "dropUser",
+ "emptycapped",
+ "godinsert",
+ "grantPrivilegesToRole",
+ "grantRolesToRole",
+ "grantRolesToUser",
+ "mapreduce.shardedfinish",
+ "moveChunk",
+ "renameCollection",
+ "revokePrivilegesFromRole",
+ "revokeRolesFromRole",
+ "revokeRolesFromUser",
+ "updateRole",
+ "updateUser",
+]);
+
+// These commands are not idempotent because they return errors if retried after successfully
+// completing (like IndexNotFound, NamespaceExists, etc.), but because they only take effect
+// once, and many tests use them to set up state, their errors on retries are handled specially.
+const kAcceptableNonRetryableCommands = new Set([
+ "create",
+ "createIndexes",
+ "deleteIndexes",
+ "drop",
+ "dropDatabase", // Already ignores NamespaceNotFound errors, so not handled below.
+ "dropIndexes",
+]);
+
+// Returns if the given failed response is a safe response to ignore when retrying the
+// given command type.
+function isAcceptableRetryFailedResponse(cmdName, res) {
+ assert(!res.ok, res);
+ return ((cmdName === "create" && res.code === ErrorCodes.NamespaceExists) ||
+ (cmdName === "createIndexes" && res.code === ErrorCodes.IndexAlreadyExists) ||
+ (cmdName === "drop" && res.code === ErrorCodes.NamespaceNotFound) ||
+ ((cmdName === "dropIndexes" || cmdName === "deleteIndexes") &&
+ res.code === ErrorCodes.IndexNotFound));
+}
+
+const kCmdsThatInsert = new Set([
+ 'insert',
+ 'update',
+ 'findAndModify',
+ 'findandmodify',
+]);
+
+// Commands that may return different values or fail if retried on a new primary after a
+// failover.
+const kNonFailoverTolerantCommands = new Set([
+ "currentOp", // Failovers can change currentOp output.
+ "getLog", // The log is different on different servers.
+ "killOp", // Failovers may interrupt operations intended to be killed later in the test.
+ "logRotate",
+ "planCacheClear", // The plan cache isn't replicated.
+ "planCacheClearFilters",
+ "planCacheListFilters",
+ "planCacheListPlans",
+ "planCacheListQueryShapes",
+ "planCacheSetFilter",
+ "profile", // Not replicated, so can't tolerate failovers.
+ "setParameter", // Not replicated, so can't tolerate failovers.
+ "stageDebug",
+ "startSession", // Sessions are flushed to disk asynchronously.
+]);
+
+function isCommitOrAbort(cmdName) {
+ return cmdName === "commitTransaction" || cmdName === "abortTransaction";
+}
+
+function isCmdInTransaction(cmdObj) {
+ return cmdObj.hasOwnProperty("autocommit");
+}
+
+// Returns if the given command on the given database can retry network errors.
+function canRetryNetworkErrorForCommand(cmdName, cmdObj) {
+ if (!configuredForNetworkRetry()) {
+ return false;
}
- function isCmdInTransaction(cmdObj) {
- return cmdObj.hasOwnProperty("autocommit");
+ if (isCmdInTransaction(cmdObj)) {
+ // Commands in transactions cannot be retried at the statement level, except for the
+ // commit and abort.
+ return isCommitOrAbort(cmdName);
}
- // Returns if the given command on the given database can retry network errors.
- function canRetryNetworkErrorForCommand(cmdName, cmdObj) {
- if (!configuredForNetworkRetry()) {
- return false;
+ return true;
+}
+
+// Several commands that use the plan executor swallow the actual error code from a failed plan
+// into their error message and instead return OperationFailed.
+//
+// TODO SERVER-32208: Remove this function once it is no longer needed.
+function isRetryableExecutorCodeAndMessage(code, msg) {
+ return code === ErrorCodes.OperationFailed && typeof msg !== "undefined" &&
+ msg.indexOf("InterruptedDueToReplStateChange") >= 0;
+}
+
+// Returns true if the given response could have come from shardCollection being interrupted by
+// a failover.
+function isRetryableShardCollectionResponse(res) {
+ // shardCollection can bury the original error code in the error message.
+ return RetryableWritesUtil.errmsgContainsRetryableCodeName(res.errmsg) ||
+ // shardCollection creates collections on each shard that will receive a chunk using
+ // _cloneCollectionsOptionsFromPrimaryShard, which may fail with either of the following
+ // codes if interupted by a failover.
+ res.code === ErrorCodes.CallbackCanceled || res.code === 17405;
+}
+
+function hasError(res) {
+ return res.ok !== 1 || res.writeErrors;
+}
+
+function hasWriteConcernError(res) {
+ return res.hasOwnProperty("writeConcernError");
+}
+
+// Tracks if the current command is being run in a network retry. This is specifically for
+// retries that this file initiates, not ones that retryable writes initiates.
+let inCommandNetworkErrorRetry = false;
+
+// "Command ID" is an identifier for a given command being overridden. This is to track what log
+// messages come from what commands. This override is highly recursive and this is helpful for
+// debugging that recursion and following what commands initiated other commands.
+let currentCommandID = [];
+let newestCommandID = 0;
+
+// The "nesting level" specifies if this is a top level command or a command being recursively
+// run by the override itself.
+let nestingLevel = 0;
+function isNested() {
+ assert.gt(nestingLevel, 0);
+ return nestingLevel !== 1;
+}
+
+// An object that tracks the current stmtId and txnNumber of the most recently run transaction.
+let txnOptions = {
+ stmtId: new NumberInt(0),
+ txnNumber: new NumberLong(-1),
+};
+
+// Array to hold pairs of (dbName, cmdObj) that will be iterated over when retrying an entire
+// transaction.
+let ops = [];
+function clearOpsList() {
+ ops = [];
+}
+
+// The (initially empty) set of cursors belonging to aggregation operations that executed
+// outside of a transaction. Any getMore operations on these cursors must also execute outside
+// of a transaction. The set stores key/value pairs where the key is a cursor id and the value
+// is the true boolean value.
+let nonTxnAggCursorSet = {};
+
+// Set the max number of operations to run in a transaction. Once we've hit this number of
+// operations, we will commit the transaction. This is to prevent having to retry an extremely
+// long running transaction.
+const maxOpsInTransaction = 10;
+
+const kLogPrefix = "=-=-=-=";
+
+function logErrorFull(msg, cmdName, cmdObj, res) {
+ print(`${kLogPrefix} ${msg} :: ${cmdName}, CommandID: ${currentCommandID},` +
+ ` error: ${tojsononeline(res)}, command: ${tojsononeline(cmdObj)}`);
+ assert.eq(nestingLevel, currentCommandID.length);
+}
+
+function logMsgFull(msgHeader, msgFooter) {
+ print(`${kLogPrefix} ${msgHeader} :: CommandID: ${currentCommandID}, msg: ${msgFooter}`);
+ assert.eq(nestingLevel, currentCommandID.length);
+}
+
+// Validate the command before running it, to prevent tests with non-retryable commands
+// from being run.
+function validateCmdNetworkErrorCompatibility(cmdName, cmdObj) {
+ assert(!inCommandNetworkErrorRetry);
+ assert(!isNested());
+
+ const isRetryableWriteCmd = RetryableWritesUtil.isRetryableWriteCmdName(cmdName);
+ const canRetryWrites = _ServerSession.canRetryWrites(cmdObj);
+ const logSuffix = " CmdName: " + cmdName + ", CmdObj: " + tojson(cmdObj);
+
+ if (isRetryableWriteCmd && !canRetryWrites) {
+ throw new Error("Refusing to run a test that issues non-retryable write operations" +
+ " since the test likely makes assertions on the write results and" +
+ " can lead to spurious failures if a network error occurs." + logSuffix);
+ } else if (cmdName === "getMore") {
+ throw new Error(
+ "Refusing to run a test that issues a getMore command since if a network error" +
+ " occurs during it then we won't know whether the cursor was advanced or not." +
+ logSuffix);
+ } else if (kNonRetryableCommands.has(cmdName) &&
+ !kAcceptableNonRetryableCommands.has(cmdName)) {
+ throw new Error(
+ "Refusing to run a test that issues commands that are not blindly retryable, " +
+ logSuffix);
+ } else if (kNonFailoverTolerantCommands.has(cmdName)) {
+ throw new Error(
+ "Refusing to run a test that issues commands that may return different values" +
+ " after a failover, " + logSuffix);
+ } else if (cmdName === "aggregate") {
+ var stages = cmdObj.pipeline;
+
+ // $listLocalSessions must be the first stage in the pipeline.
+ const firstStage =
+ stages && Array.isArray(stages) && (stages.length > 0) ? stages[0] : undefined;
+ const hasListLocalStage = firstStage && (typeof firstStage === "object") &&
+ firstStage.hasOwnProperty("$listLocalSessions");
+ if (hasListLocalStage) {
+ throw new Error("Refusing to run a test that issues an aggregation command with" +
+ " $listLocalSessions because it relies on in-memory" +
+ " state that may not survive failovers." + logSuffix);
}
- if (isCmdInTransaction(cmdObj)) {
- // Commands in transactions cannot be retried at the statement level, except for the
- // commit and abort.
- return isCommitOrAbort(cmdName);
+ // Aggregate can be either a read or a write depending on whether it has a $out stage.
+ // $out is required to be the last stage of the pipeline.
+ const lastStage = stages && Array.isArray(stages) && (stages.length !== 0)
+ ? stages[stages.length - 1]
+ : undefined;
+ const hasOut =
+ lastStage && (typeof lastStage === "object") && lastStage.hasOwnProperty("$out");
+ if (hasOut) {
+ throw new Error("Refusing to run a test that issues an aggregation command" +
+ " with $out because it is not retryable." + logSuffix);
}
- return true;
- }
-
- // Several commands that use the plan executor swallow the actual error code from a failed plan
- // into their error message and instead return OperationFailed.
- //
- // TODO SERVER-32208: Remove this function once it is no longer needed.
- function isRetryableExecutorCodeAndMessage(code, msg) {
- return code === ErrorCodes.OperationFailed && typeof msg !== "undefined" &&
- msg.indexOf("InterruptedDueToReplStateChange") >= 0;
- }
-
- // Returns true if the given response could have come from shardCollection being interrupted by
- // a failover.
- function isRetryableShardCollectionResponse(res) {
- // shardCollection can bury the original error code in the error message.
- return RetryableWritesUtil.errmsgContainsRetryableCodeName(res.errmsg) ||
- // shardCollection creates collections on each shard that will receive a chunk using
- // _cloneCollectionsOptionsFromPrimaryShard, which may fail with either of the following
- // codes if interupted by a failover.
- res.code === ErrorCodes.CallbackCanceled || res.code === 17405;
- }
-
- function hasError(res) {
- return res.ok !== 1 || res.writeErrors;
- }
-
- function hasWriteConcernError(res) {
- return res.hasOwnProperty("writeConcernError");
- }
-
- // Tracks if the current command is being run in a network retry. This is specifically for
- // retries that this file initiates, not ones that retryable writes initiates.
- let inCommandNetworkErrorRetry = false;
-
- // "Command ID" is an identifier for a given command being overridden. This is to track what log
- // messages come from what commands. This override is highly recursive and this is helpful for
- // debugging that recursion and following what commands initiated other commands.
- let currentCommandID = [];
- let newestCommandID = 0;
-
- // The "nesting level" specifies if this is a top level command or a command being recursively
- // run by the override itself.
- let nestingLevel = 0;
- function isNested() {
- assert.gt(nestingLevel, 0);
- return nestingLevel !== 1;
- }
-
- // An object that tracks the current stmtId and txnNumber of the most recently run transaction.
- let txnOptions = {
- stmtId: new NumberInt(0),
- txnNumber: new NumberLong(-1),
- };
-
- // Array to hold pairs of (dbName, cmdObj) that will be iterated over when retrying an entire
- // transaction.
- let ops = [];
- function clearOpsList() {
- ops = [];
- }
-
- // The (initially empty) set of cursors belonging to aggregation operations that executed
- // outside of a transaction. Any getMore operations on these cursors must also execute outside
- // of a transaction. The set stores key/value pairs where the key is a cursor id and the value
- // is the true boolean value.
- let nonTxnAggCursorSet = {};
-
- // Set the max number of operations to run in a transaction. Once we've hit this number of
- // operations, we will commit the transaction. This is to prevent having to retry an extremely
- // long running transaction.
- const maxOpsInTransaction = 10;
-
- const kLogPrefix = "=-=-=-=";
-
- function logErrorFull(msg, cmdName, cmdObj, res) {
- print(`${kLogPrefix} ${msg} :: ${cmdName}, CommandID: ${currentCommandID},` +
- ` error: ${tojsononeline(res)}, command: ${tojsononeline(cmdObj)}`);
- assert.eq(nestingLevel, currentCommandID.length);
- }
-
- function logMsgFull(msgHeader, msgFooter) {
- print(`${kLogPrefix} ${msgHeader} :: CommandID: ${currentCommandID}, msg: ${msgFooter}`);
- assert.eq(nestingLevel, currentCommandID.length);
- }
-
- // Validate the command before running it, to prevent tests with non-retryable commands
- // from being run.
- function validateCmdNetworkErrorCompatibility(cmdName, cmdObj) {
- assert(!inCommandNetworkErrorRetry);
- assert(!isNested());
-
- const isRetryableWriteCmd = RetryableWritesUtil.isRetryableWriteCmdName(cmdName);
- const canRetryWrites = _ServerSession.canRetryWrites(cmdObj);
- const logSuffix = " CmdName: " + cmdName + ", CmdObj: " + tojson(cmdObj);
-
- if (isRetryableWriteCmd && !canRetryWrites) {
- throw new Error("Refusing to run a test that issues non-retryable write operations" +
- " since the test likely makes assertions on the write results and" +
- " can lead to spurious failures if a network error occurs." +
- logSuffix);
- } else if (cmdName === "getMore") {
- throw new Error(
- "Refusing to run a test that issues a getMore command since if a network error" +
- " occurs during it then we won't know whether the cursor was advanced or not." +
- logSuffix);
- } else if (kNonRetryableCommands.has(cmdName) &&
- !kAcceptableNonRetryableCommands.has(cmdName)) {
+ const hasExplain = cmdObj.hasOwnProperty("explain");
+ if (hasExplain) {
throw new Error(
- "Refusing to run a test that issues commands that are not blindly retryable, " +
+ "Refusing to run a test that issues an aggregation command with explain" +
+ " because it may return incomplete results if interrupted by a stepdown." +
logSuffix);
- } else if (kNonFailoverTolerantCommands.has(cmdName)) {
- throw new Error(
- "Refusing to run a test that issues commands that may return different values" +
- " after a failover, " + logSuffix);
- } else if (cmdName === "aggregate") {
- var stages = cmdObj.pipeline;
-
- // $listLocalSessions must be the first stage in the pipeline.
- const firstStage =
- stages && Array.isArray(stages) && (stages.length > 0) ? stages[0] : undefined;
- const hasListLocalStage = firstStage && (typeof firstStage === "object") &&
- firstStage.hasOwnProperty("$listLocalSessions");
- if (hasListLocalStage) {
- throw new Error("Refusing to run a test that issues an aggregation command with" +
- " $listLocalSessions because it relies on in-memory" +
- " state that may not survive failovers." + logSuffix);
- }
-
- // Aggregate can be either a read or a write depending on whether it has a $out stage.
- // $out is required to be the last stage of the pipeline.
- const lastStage = stages && Array.isArray(stages) && (stages.length !== 0)
- ? stages[stages.length - 1]
- : undefined;
- const hasOut =
- lastStage && (typeof lastStage === "object") && lastStage.hasOwnProperty("$out");
- if (hasOut) {
- throw new Error("Refusing to run a test that issues an aggregation command" +
- " with $out because it is not retryable." + logSuffix);
- }
-
- const hasExplain = cmdObj.hasOwnProperty("explain");
- if (hasExplain) {
- throw new Error(
- "Refusing to run a test that issues an aggregation command with explain" +
- " because it may return incomplete results if interrupted by a stepdown." +
- logSuffix);
- }
- } else if (cmdName === "mapReduce" || cmdName === "mapreduce") {
- throw new Error(
- "Refusing to run a test that issues a mapReduce command, because it calls " +
- " std::terminate() if interrupted by a stepdown." + logSuffix);
}
+ } else if (cmdName === "mapReduce" || cmdName === "mapreduce") {
+ throw new Error(
+ "Refusing to run a test that issues a mapReduce command, because it calls " +
+ " std::terminate() if interrupted by a stepdown." + logSuffix);
+ }
+}
+
+// Default read concern level to use for transactions. Snapshot read concern is not supported in
+// sharded transactions when majority reads are disabled.
+const kDefaultTransactionReadConcernLevel =
+ TestData.hasOwnProperty("defaultTransactionReadConcernLevel")
+ ? TestData.defaultTransactionReadConcernLevel
+ : (TestData.enableMajorityReadConcern !== false ? "snapshot" : "local");
+
+const kDefaultTransactionWriteConcernW = TestData.hasOwnProperty("defaultTransactionWriteConcernW")
+ ? TestData.defaultTransactionWriteConcernW
+ : "majority";
+
+// Default read concern level to use for commands that are not transactions.
+const kDefaultReadConcernLevel = (function() {
+ if (TestData.hasOwnProperty("defaultReadConcernLevel")) {
+ return TestData.defaultReadConcernLevel;
}
- // Default read concern level to use for transactions. Snapshot read concern is not supported in
- // sharded transactions when majority reads are disabled.
- const kDefaultTransactionReadConcernLevel =
- TestData.hasOwnProperty("defaultTransactionReadConcernLevel")
- ? TestData.defaultTransactionReadConcernLevel
- : (TestData.enableMajorityReadConcern !== false ? "snapshot" : "local");
-
- const kDefaultTransactionWriteConcernW =
- TestData.hasOwnProperty("defaultTransactionWriteConcernW")
- ? TestData.defaultTransactionWriteConcernW
- : "majority";
-
- // Default read concern level to use for commands that are not transactions.
- const kDefaultReadConcernLevel = (function() {
- if (TestData.hasOwnProperty("defaultReadConcernLevel")) {
- return TestData.defaultReadConcernLevel;
- }
-
- // Use majority if the suite didn't specify a level, unless the variant doesn't support it.
- return TestData.enableMajorityReadConcern !== false ? "majority" : "local";
- })();
+ // Use majority if the suite didn't specify a level, unless the variant doesn't support it.
+ return TestData.enableMajorityReadConcern !== false ? "majority" : "local";
+})();
- // Default write concern w to use for both transactions and non-transactions.
- const kDefaultWriteConcernW = TestData.hasOwnProperty("defaultWriteConcernW")
- ? TestData.defaultWriteConcernW
- : "majority";
+// Default write concern w to use for both transactions and non-transactions.
+const kDefaultWriteConcernW =
+ TestData.hasOwnProperty("defaultWriteConcernW") ? TestData.defaultWriteConcernW : "majority";
- // Use a "signature" value that won't typically match a value assigned in normal use. This way
- // the wtimeout set by this override is distinguishable in the server logs.
- const kDefaultWtimeout = 5 * 60 * 1000 + 567;
+// Use a "signature" value that won't typically match a value assigned in normal use. This way
+// the wtimeout set by this override is distinguishable in the server logs.
+const kDefaultWtimeout = 5 * 60 * 1000 + 567;
- function appendReadAndWriteConcern(conn, dbName, cmdName, cmdObj) {
- let shouldForceReadConcern = kCommandsSupportingReadConcern.has(cmdName);
- let shouldForceWriteConcern = kCommandsSupportingWriteConcern.has(cmdName);
+function appendReadAndWriteConcern(conn, dbName, cmdName, cmdObj) {
+ let shouldForceReadConcern = kCommandsSupportingReadConcern.has(cmdName);
+ let shouldForceWriteConcern = kCommandsSupportingWriteConcern.has(cmdName);
- if (isCmdInTransaction(cmdObj)) {
- shouldForceReadConcern = false;
- if (cmdObj.startTransaction === true) {
- shouldForceReadConcern = true;
- }
- if (!kCommandsSupportingWriteConcernInTransaction.has(cmdName)) {
- shouldForceWriteConcern = false;
- }
- } else if (cmdName === "aggregate") {
- if (OverrideHelpers.isAggregationWithListLocalSessionsStage(cmdName, cmdObj) ||
- OverrideHelpers.isAggregationWithChangeStreamStage(cmdName, cmdObj)) {
- // The $listLocalSessions stage can only be used with readConcern={level: "local"},
- // and the $changeStream stage can only be used with
- // readConcern={level: "majority"}.
- shouldForceReadConcern = false;
- }
-
- if (OverrideHelpers.isAggregationWithOutOrMergeStage(cmdName, cmdObj)) {
- // The $out stage can only be used with readConcern={level: "local"}.
- shouldForceReadConcern = false;
- } else {
- // A writeConcern can only be used with a $out stage.
- shouldForceWriteConcern = false;
- }
-
- if (cmdObj.explain) {
- // Attempting to specify a readConcern while explaining an aggregation would always
- // return an error prior to SERVER-30582 and it is otherwise only compatible with
- // readConcern={level: "local"}.
- shouldForceReadConcern = false;
- }
- } else if (OverrideHelpers.isMapReduceWithInlineOutput(cmdName, cmdObj)) {
- // A writeConcern can only be used with non-inline output.
+ if (isCmdInTransaction(cmdObj)) {
+ shouldForceReadConcern = false;
+ if (cmdObj.startTransaction === true) {
+ shouldForceReadConcern = true;
+ }
+ if (!kCommandsSupportingWriteConcernInTransaction.has(cmdName)) {
shouldForceWriteConcern = false;
}
+ } else if (cmdName === "aggregate") {
+ if (OverrideHelpers.isAggregationWithListLocalSessionsStage(cmdName, cmdObj) ||
+ OverrideHelpers.isAggregationWithChangeStreamStage(cmdName, cmdObj)) {
+ // The $listLocalSessions stage can only be used with readConcern={level: "local"},
+ // and the $changeStream stage can only be used with
+ // readConcern={level: "majority"}.
+ shouldForceReadConcern = false;
+ }
- // If we're retrying on network errors the write concern should already be majority.
- if ((cmdName === 'drop' || cmdName === 'convertToCapped') && configuredForTxnOverride() &&
- !configuredForNetworkRetry()) {
- // Convert all collection drops to w:majority so they won't prevent subsequent
- // operations in transactions from failing when failing to acquire collection locks.
- cmdObj.writeConcern =
- cmdObj.writeConcern || {w: "majority", wtimeout: kDefaultWtimeout};
+ if (OverrideHelpers.isAggregationWithOutOrMergeStage(cmdName, cmdObj)) {
+ // The $out stage can only be used with readConcern={level: "local"}.
+ shouldForceReadConcern = false;
+ } else {
+ // A writeConcern can only be used with a $out stage.
shouldForceWriteConcern = false;
}
- if (shouldForceReadConcern) {
- let readConcernLevel;
- if (cmdObj.startTransaction === true) {
- readConcernLevel = kDefaultTransactionReadConcernLevel;
- } else {
- readConcernLevel = kDefaultReadConcernLevel;
- }
+ if (cmdObj.explain) {
+ // Attempting to specify a readConcern while explaining an aggregation would always
+ // return an error prior to SERVER-30582 and it is otherwise only compatible with
+ // readConcern={level: "local"}.
+ shouldForceReadConcern = false;
+ }
+ } else if (OverrideHelpers.isMapReduceWithInlineOutput(cmdName, cmdObj)) {
+ // A writeConcern can only be used with non-inline output.
+ shouldForceWriteConcern = false;
+ }
- if (cmdObj.hasOwnProperty("readConcern") &&
- cmdObj.readConcern.hasOwnProperty("level") &&
- cmdObj.readConcern.level !== readConcernLevel) {
- throw new Error("refusing to override existing readConcern " +
- cmdObj.readConcern.level + " with readConcern " + readConcernLevel);
- } else {
- cmdObj.readConcern = {level: readConcernLevel};
- }
+ // If we're retrying on network errors the write concern should already be majority.
+ if ((cmdName === 'drop' || cmdName === 'convertToCapped') && configuredForTxnOverride() &&
+ !configuredForNetworkRetry()) {
+ // Convert all collection drops to w:majority so they won't prevent subsequent
+ // operations in transactions from failing when failing to acquire collection locks.
+ cmdObj.writeConcern = cmdObj.writeConcern || {w: "majority", wtimeout: kDefaultWtimeout};
+ shouldForceWriteConcern = false;
+ }
- // Only attach afterClusterTime if causal consistency is explicitly enabled. Note, it is
- // OK to send a readConcern with only afterClusterTime, which is interpreted as local
- // read concern by the server.
- if (TestData.hasOwnProperty("sessionOptions") &&
- TestData.sessionOptions.causalConsistency === true) {
- const driverSession = conn.getDB(dbName).getSession();
- const operationTime = driverSession.getOperationTime();
- if (operationTime !== undefined) {
- // The command object should always have a readConcern by this point.
- cmdObj.readConcern.afterClusterTime = operationTime;
- }
- }
+ if (shouldForceReadConcern) {
+ let readConcernLevel;
+ if (cmdObj.startTransaction === true) {
+ readConcernLevel = kDefaultTransactionReadConcernLevel;
+ } else {
+ readConcernLevel = kDefaultReadConcernLevel;
}
- if (shouldForceWriteConcern) {
- if (cmdObj.hasOwnProperty("writeConcern")) {
- let writeConcern = cmdObj.writeConcern;
- if (typeof writeConcern !== "object" || writeConcern === null ||
- (writeConcern.hasOwnProperty("w") &&
- bsonWoCompare({_: writeConcern.w}, {_: kDefaultWriteConcernW}) !== 0)) {
- throw new Error("Cowardly refusing to override write concern of command: " +
- tojson(cmdObj));
- }
- }
+ if (cmdObj.hasOwnProperty("readConcern") && cmdObj.readConcern.hasOwnProperty("level") &&
+ cmdObj.readConcern.level !== readConcernLevel) {
+ throw new Error("refusing to override existing readConcern " +
+ cmdObj.readConcern.level + " with readConcern " + readConcernLevel);
+ } else {
+ cmdObj.readConcern = {level: readConcernLevel};
+ }
- if (kCommandsSupportingWriteConcernInTransaction.has(cmdName)) {
- cmdObj.writeConcern = {
- w: kDefaultTransactionWriteConcernW,
- wtimeout: kDefaultWtimeout
- };
- } else {
- cmdObj.writeConcern = {w: kDefaultWriteConcernW, wtimeout: kDefaultWtimeout};
+ // Only attach afterClusterTime if causal consistency is explicitly enabled. Note, it is
+ // OK to send a readConcern with only afterClusterTime, which is interpreted as local
+ // read concern by the server.
+ if (TestData.hasOwnProperty("sessionOptions") &&
+ TestData.sessionOptions.causalConsistency === true) {
+ const driverSession = conn.getDB(dbName).getSession();
+ const operationTime = driverSession.getOperationTime();
+ if (operationTime !== undefined) {
+ // The command object should always have a readConcern by this point.
+ cmdObj.readConcern.afterClusterTime = operationTime;
}
}
}
- // Commits the given transaction. Throws on failure to commit.
- function commitTransaction(conn, lsid, txnNumber) {
- assert(configuredForTxnOverride());
- assert.gte(txnNumber, 0);
-
- logMsgFull('commitTransaction',
- `Committing transaction ${txnNumber} on session ${tojsononeline(lsid)}`);
-
- // Running the command on conn will reenter from the top of `runCommandOverride`, retrying
- // as needed.
- assert.commandWorked(conn.adminCommand({
- commitTransaction: 1,
- autocommit: false,
- lsid: lsid,
- txnNumber: txnNumber,
- }));
-
- // We've successfully committed the transaction, so we can forget the ops we've successfully
- // run.
- clearOpsList();
- }
-
- function abortTransaction(conn, lsid, txnNumber) {
- assert(configuredForTxnOverride());
- assert.gte(txnNumber, 0);
-
- logMsgFull('abortTransaction',
- `Aborting transaction ${txnNumber} on session ${tojsononeline(lsid)}`);
-
- // Running the command on conn will reenter from the top of `runCommandOverride`, retrying
- // as needed.
- const res = conn.adminCommand({
- abortTransaction: 1,
- autocommit: false,
- lsid: lsid,
- txnNumber: txnNumber,
- });
+ if (shouldForceWriteConcern) {
+ if (cmdObj.hasOwnProperty("writeConcern")) {
+ let writeConcern = cmdObj.writeConcern;
+ if (typeof writeConcern !== "object" || writeConcern === null ||
+ (writeConcern.hasOwnProperty("w") &&
+ bsonWoCompare({_: writeConcern.w}, {_: kDefaultWriteConcernW}) !== 0)) {
+ throw new Error("Cowardly refusing to override write concern of command: " +
+ tojson(cmdObj));
+ }
+ }
- // Transient transaction errors mean the transaction has aborted, so consider it a success.
- if (TransactionsUtil.isTransientTransactionError(res)) {
- return;
+ if (kCommandsSupportingWriteConcernInTransaction.has(cmdName)) {
+ cmdObj.writeConcern = {w: kDefaultTransactionWriteConcernW, wtimeout: kDefaultWtimeout};
+ } else {
+ cmdObj.writeConcern = {w: kDefaultWriteConcernW, wtimeout: kDefaultWtimeout};
}
- assert.commandWorked(res);
}
-
- function startNewTransaction(conn, cmdObj) {
- // Bump the txnNumber and reset the stmtId.
- txnOptions.txnNumber = new NumberLong(txnOptions.txnNumber + 1);
- txnOptions.stmtId = new NumberInt(1);
-
- // Used to communicate the txnNumber to unittests.
- TestData.currentTxnOverrideTxnNumber = txnOptions.txnNumber;
-
- cmdObj.startTransaction = true;
- return txnOptions.txnNumber;
+}
+
+// Commits the given transaction. Throws on failure to commit.
+function commitTransaction(conn, lsid, txnNumber) {
+ assert(configuredForTxnOverride());
+ assert.gte(txnNumber, 0);
+
+ logMsgFull('commitTransaction',
+ `Committing transaction ${txnNumber} on session ${tojsononeline(lsid)}`);
+
+ // Running the command on conn will reenter from the top of `runCommandOverride`, retrying
+ // as needed.
+ assert.commandWorked(conn.adminCommand({
+ commitTransaction: 1,
+ autocommit: false,
+ lsid: lsid,
+ txnNumber: txnNumber,
+ }));
+
+ // We've successfully committed the transaction, so we can forget the ops we've successfully
+ // run.
+ clearOpsList();
+}
+
+function abortTransaction(conn, lsid, txnNumber) {
+ assert(configuredForTxnOverride());
+ assert.gte(txnNumber, 0);
+
+ logMsgFull('abortTransaction',
+ `Aborting transaction ${txnNumber} on session ${tojsononeline(lsid)}`);
+
+ // Running the command on conn will reenter from the top of `runCommandOverride`, retrying
+ // as needed.
+ const res = conn.adminCommand({
+ abortTransaction: 1,
+ autocommit: false,
+ lsid: lsid,
+ txnNumber: txnNumber,
+ });
+
+ // Transient transaction errors mean the transaction has aborted, so consider it a success.
+ if (TransactionsUtil.isTransientTransactionError(res)) {
+ return;
}
-
- function calculateStmtIdInc(cmdName, cmdObj) {
- // Reserve the statement ids for batch writes.
- try {
- switch (cmdName) {
- case "insert":
- return cmdObj.documents.length;
- case "update":
- return cmdObj.updates.length;
- case "delete":
- return cmdObj.deletes.length;
- default:
- return 1;
- }
- } catch (e) {
- // Malformed command objects can cause errors to be thrown.
- return 1;
+ assert.commandWorked(res);
+}
+
+function startNewTransaction(conn, cmdObj) {
+ // Bump the txnNumber and reset the stmtId.
+ txnOptions.txnNumber = new NumberLong(txnOptions.txnNumber + 1);
+ txnOptions.stmtId = new NumberInt(1);
+
+ // Used to communicate the txnNumber to unittests.
+ TestData.currentTxnOverrideTxnNumber = txnOptions.txnNumber;
+
+ cmdObj.startTransaction = true;
+ return txnOptions.txnNumber;
+}
+
+function calculateStmtIdInc(cmdName, cmdObj) {
+ // Reserve the statement ids for batch writes.
+ try {
+ switch (cmdName) {
+ case "insert":
+ return cmdObj.documents.length;
+ case "update":
+ return cmdObj.updates.length;
+ case "delete":
+ return cmdObj.deletes.length;
+ default:
+ return 1;
}
+ } catch (e) {
+ // Malformed command objects can cause errors to be thrown.
+ return 1;
}
+}
- function continueTransaction(conn, dbName, cmdName, cmdObj) {
- cmdObj.txnNumber = txnOptions.txnNumber;
- cmdObj.stmtId = txnOptions.stmtId;
- cmdObj.autocommit = false;
+function continueTransaction(conn, dbName, cmdName, cmdObj) {
+ cmdObj.txnNumber = txnOptions.txnNumber;
+ cmdObj.stmtId = txnOptions.stmtId;
+ cmdObj.autocommit = false;
- // Bump the stmtId for the next statement. We do this after so that the stmtIds start at 1.
- txnOptions.stmtId = new NumberInt(txnOptions.stmtId + calculateStmtIdInc(cmdName, cmdObj));
+ // Bump the stmtId for the next statement. We do this after so that the stmtIds start at 1.
+ txnOptions.stmtId = new NumberInt(txnOptions.stmtId + calculateStmtIdInc(cmdName, cmdObj));
- // This function expects to get a command without any read or write concern properties.
- assert(!cmdObj.hasOwnProperty('readConcern'), cmdObj);
- assert(!cmdObj.hasOwnProperty('writeConcern'), cmdObj);
+ // This function expects to get a command without any read or write concern properties.
+ assert(!cmdObj.hasOwnProperty('readConcern'), cmdObj);
+ assert(!cmdObj.hasOwnProperty('writeConcern'), cmdObj);
- // If this is the first time we are running this command, push it to the ops array.
- if (!isNested() && !inCommandNetworkErrorRetry) {
- // Make a copy so the command does not get changed by the test.
- const objCopy = TransactionsUtil.deepCopyObject({}, cmdObj);
+ // If this is the first time we are running this command, push it to the ops array.
+ if (!isNested() && !inCommandNetworkErrorRetry) {
+ // Make a copy so the command does not get changed by the test.
+ const objCopy = TransactionsUtil.deepCopyObject({}, cmdObj);
- // Empty transaction state that needs to be refreshed. The stmtId and startTransaction
- // fields shouldn't need to be refreshed.
- delete objCopy.txnNumber;
- delete objCopy.$clusterTime;
+ // Empty transaction state that needs to be refreshed. The stmtId and startTransaction
+ // fields shouldn't need to be refreshed.
+ delete objCopy.txnNumber;
+ delete objCopy.$clusterTime;
- ops.push({
- dbName: dbName,
- cmdObj: objCopy,
- });
- }
+ ops.push({
+ dbName: dbName,
+ cmdObj: objCopy,
+ });
}
-
- // Returns true iff a command is a "getMore" on a cursor that is in the `nonTxnAggCursorSet`
- // dictionary of cursors that were created outside of any transaction.
- function isCommandNonTxnGetMore(cmdName, cmdObj) {
- return cmdName === "getMore" && nonTxnAggCursorSet[cmdObj.getMore];
+}
+
+// Returns true iff a command is a "getMore" on a cursor that is in the `nonTxnAggCursorSet`
+// dictionary of cursors that were created outside of any transaction.
+function isCommandNonTxnGetMore(cmdName, cmdObj) {
+ return cmdName === "getMore" && nonTxnAggCursorSet[cmdObj.getMore];
+}
+
+function setupTransactionCommand(conn, dbName, cmdName, cmdObj, lsid) {
+ // We want to overwrite whatever read and write concern is already set.
+ delete cmdObj.readConcern;
+ delete cmdObj.writeConcern;
+
+ // If sessions are explicitly disabled for this command, we skip overriding it to
+ // use transactions.
+ const driverSession = conn.getDB(dbName).getSession();
+ const commandSupportsTransaction = TransactionsUtil.commandSupportsTxn(dbName, cmdName, cmdObj);
+ if (commandSupportsTransaction && driverSession.getSessionId() !== null &&
+ !isCommandNonTxnGetMore(cmdName, cmdObj)) {
+ if (isNested()) {
+ // Nested commands should never start a new transaction.
+ } else if (ops.length === 0) {
+ // We should never end a transaction on a getMore.
+ assert.neq(cmdName, "getMore", cmdObj);
+ startNewTransaction(conn, cmdObj);
+ } else if (cmdName === "getMore") {
+ // If the command is a getMore, we cannot consider ending the transaction.
+ } else if (ops.length >= maxOpsInTransaction) {
+ logMsgFull('setupTransactionCommand',
+ `Committing transaction ${txnOptions.txnNumber} on session` +
+ ` ${tojsononeline(lsid)} because we have hit max ops length`);
+ commitTransaction(conn, lsid, txnOptions.txnNumber);
+ startNewTransaction(conn, cmdObj);
+ }
+ continueTransaction(conn, dbName, cmdName, cmdObj);
+
+ } else {
+ if (ops.length > 0 && !isNested()) {
+ logMsgFull('setupTransactionCommand',
+ `Committing transaction ${txnOptions.txnNumber} on session` +
+ ` ${tojsononeline(lsid)} to run a command that does not support` +
+ ` transactions: ${cmdName}`);
+ commitTransaction(conn, lsid, txnOptions.txnNumber);
+ }
}
+ appendReadAndWriteConcern(conn, dbName, cmdName, cmdObj);
+}
+
+// Retries the entire transaction without committing it. Returns immediately on an error with
+// the response from the failed command. This may recursively retry the entire transaction in
+// which case parent retries are completed early.
+function retryEntireTransaction(conn, lsid) {
+ // Re-run every command in the ops array.
+ assert.gt(ops.length, 0);
+
+ // Keep track of what txnNumber this retry is attempting.
+ const retriedTxnNumber = startNewTransaction(conn, {"ignored object": 1});
+
+ logMsgFull('Retrying entire transaction',
+ `txnNumber: ${retriedTxnNumber}, lsid: ${tojsononeline(lsid)}`);
+ let res;
+ for (let op of ops) {
+ logMsgFull('Retrying op',
+ `txnNumber: ${retriedTxnNumber}, lsid: ${tojsononeline(lsid)},` +
+ ` db: ${op.dbName}, op: ${tojsononeline(op.cmdObj)}`);
+ // Running the command on conn will reenter from the top of `runCommandOverride`,
+ // individual statement retries will be suppressed by tracking nesting level.
+ res = conn.getDB(op.dbName).runCommand(op.cmdObj);
+
+ if (hasError(res) || hasWriteConcernError(res)) {
+ return res;
+ }
+ // Sanity check that we checked for an error correctly.
+ assert.commandWorked(res);
- function setupTransactionCommand(conn, dbName, cmdName, cmdObj, lsid) {
- // We want to overwrite whatever read and write concern is already set.
- delete cmdObj.readConcern;
- delete cmdObj.writeConcern;
-
- // If sessions are explicitly disabled for this command, we skip overriding it to
- // use transactions.
- const driverSession = conn.getDB(dbName).getSession();
- const commandSupportsTransaction =
- TransactionsUtil.commandSupportsTxn(dbName, cmdName, cmdObj);
- if (commandSupportsTransaction && driverSession.getSessionId() !== null &&
- !isCommandNonTxnGetMore(cmdName, cmdObj)) {
- if (isNested()) {
- // Nested commands should never start a new transaction.
- } else if (ops.length === 0) {
- // We should never end a transaction on a getMore.
- assert.neq(cmdName, "getMore", cmdObj);
- startNewTransaction(conn, cmdObj);
- } else if (cmdName === "getMore") {
- // If the command is a getMore, we cannot consider ending the transaction.
- } else if (ops.length >= maxOpsInTransaction) {
- logMsgFull('setupTransactionCommand',
- `Committing transaction ${txnOptions.txnNumber} on session` +
- ` ${tojsononeline(lsid)} because we have hit max ops length`);
- commitTransaction(conn, lsid, txnOptions.txnNumber);
- startNewTransaction(conn, cmdObj);
- }
- continueTransaction(conn, dbName, cmdName, cmdObj);
-
- } else {
- if (ops.length > 0 && !isNested()) {
- logMsgFull('setupTransactionCommand',
- `Committing transaction ${txnOptions.txnNumber} on session` +
- ` ${tojsononeline(lsid)} to run a command that does not support` +
- ` transactions: ${cmdName}`);
- commitTransaction(conn, lsid, txnOptions.txnNumber);
- }
+ // If we recursively retried the entire transaction, we do not want to continue this
+ // retry. We just pass up the response from the retry that completed.
+ if (txnOptions.txnNumber !== retriedTxnNumber) {
+ return res;
}
- appendReadAndWriteConcern(conn, dbName, cmdName, cmdObj);
}
- // Retries the entire transaction without committing it. Returns immediately on an error with
- // the response from the failed command. This may recursively retry the entire transaction in
- // which case parent retries are completed early.
- function retryEntireTransaction(conn, lsid) {
- // Re-run every command in the ops array.
- assert.gt(ops.length, 0);
+ // We do not commit the transaction and let it continue in the next operation.
+ return res;
+}
+
+// Creates the given collection, retrying if needed. Throws on failure.
+function createCollectionExplicitly(conn, dbName, collName, lsid) {
+ logMsgFull(
+ 'create',
+ `Explicitly creating collection ${dbName}.${collName} and then retrying transaction`);
+
+ // Always majority commit the create because this is not expected to roll back once
+ // successful.
+ const createCmdObj = {
+ create: collName,
+ lsid: lsid,
+ writeConcern: {w: 'majority'},
+ };
- // Keep track of what txnNumber this retry is attempting.
- const retriedTxnNumber = startNewTransaction(conn, {"ignored object": 1});
-
- logMsgFull('Retrying entire transaction',
- `txnNumber: ${retriedTxnNumber}, lsid: ${tojsononeline(lsid)}`);
- let res;
- for (let op of ops) {
- logMsgFull('Retrying op',
- `txnNumber: ${retriedTxnNumber}, lsid: ${tojsononeline(lsid)},` +
- ` db: ${op.dbName}, op: ${tojsononeline(op.cmdObj)}`);
- // Running the command on conn will reenter from the top of `runCommandOverride`,
- // individual statement retries will be suppressed by tracking nesting level.
- res = conn.getDB(op.dbName).runCommand(op.cmdObj);
-
- if (hasError(res) || hasWriteConcernError(res)) {
- return res;
- }
- // Sanity check that we checked for an error correctly.
- assert.commandWorked(res);
+ // Running the command on conn will reenter from the top of `runCommandOverride`, retrying
+ // as needed. If an error returned by `create` were tolerable, it would already have been
+ // retried by the time it surfaced here.
+ assert.commandWorked(conn.getDB(dbName).runCommand(createCmdObj));
+}
+
+// Processes the response to the command if we are configured for txn override. Performs retries
+// if necessary for implicit collection creation or transient transaction errors.
+// Returns the last command response received by a command or retry.
+function retryWithTxnOverride(res, conn, dbName, cmdName, cmdObj, lsid, logError) {
+ assert(configuredForTxnOverride());
+
+ const failedOnCRUDStatement =
+ hasError(res) && !isCommitOrAbort(cmdName) && isCmdInTransaction(cmdObj);
+ if (failedOnCRUDStatement) {
+ assert.gt(ops.length, 0);
+ abortTransaction(conn, lsid, txnOptions.txnNumber);
+
+ // If the command inserted data and is not supported in a transaction, we assume it
+ // failed because the collection did not exist. We will create the collection and retry
+ // the entire transaction. We should not receive this error in this override for any
+ // other reason.
+ // Tests that expect collections to not exist will have to be skipped.
+ if (kCmdsThatInsert.has(cmdName) &&
+ includesErrorCode(res, ErrorCodes.OperationNotSupportedInTransaction)) {
+ const collName = cmdObj[cmdName];
+ createCollectionExplicitly(conn, dbName, collName, lsid);
- // If we recursively retried the entire transaction, we do not want to continue this
- // retry. We just pass up the response from the retry that completed.
- if (txnOptions.txnNumber !== retriedTxnNumber) {
- return res;
- }
+ return retryEntireTransaction(conn, lsid);
}
- // We do not commit the transaction and let it continue in the next operation.
- return res;
+ // Transaction statements cannot be retried, but retryable codes are expected to succeed
+ // on full transaction retry.
+ if (configuredForNetworkRetry() && RetryableWritesUtil.isRetryableCode(res.code)) {
+ logError("Retrying on retryable error for transaction statement");
+ return retryEntireTransaction(conn, lsid);
+ }
}
- // Creates the given collection, retrying if needed. Throws on failure.
- function createCollectionExplicitly(conn, dbName, collName, lsid) {
- logMsgFull(
- 'create',
- `Explicitly creating collection ${dbName}.${collName} and then retrying transaction`);
-
- // Always majority commit the create because this is not expected to roll back once
- // successful.
- const createCmdObj = {
- create: collName,
- lsid: lsid,
- writeConcern: {w: 'majority'},
- };
+ // Transient transaction errors should retry the entire transaction. A
+ // TransientTransactionError on "abortTransaction" is considered a success.
+ if (TransactionsUtil.isTransientTransactionError(res) && cmdName !== "abortTransaction") {
+ logError("Retrying on TransientTransactionError response");
+ res = retryEntireTransaction(conn, lsid);
- // Running the command on conn will reenter from the top of `runCommandOverride`, retrying
- // as needed. If an error returned by `create` were tolerable, it would already have been
- // retried by the time it surfaced here.
- assert.commandWorked(conn.getDB(dbName).runCommand(createCmdObj));
+ // If we got a TransientTransactionError on 'commitTransaction' retrying the transaction
+ // will not retry it, so we retry it here.
+ if (!hasError(res) && cmdName === "commitTransaction") {
+ commitTransaction(conn, lsid, txnOptions.txnNumber);
+ }
+ return res;
}
- // Processes the response to the command if we are configured for txn override. Performs retries
- // if necessary for implicit collection creation or transient transaction errors.
- // Returns the last command response received by a command or retry.
- function retryWithTxnOverride(res, conn, dbName, cmdName, cmdObj, lsid, logError) {
- assert(configuredForTxnOverride());
+ return res;
+}
- const failedOnCRUDStatement =
- hasError(res) && !isCommitOrAbort(cmdName) && isCmdInTransaction(cmdObj);
- if (failedOnCRUDStatement) {
- assert.gt(ops.length, 0);
- abortTransaction(conn, lsid, txnOptions.txnNumber);
+// Returns true if any error code in a response's "raw" field is retryable.
+function rawResponseHasRetryableError(rawRes, cmdName, logError) {
+ for (let shard in rawRes) {
+ const shardRes = rawRes[shard];
- // If the command inserted data and is not supported in a transaction, we assume it
- // failed because the collection did not exist. We will create the collection and retry
- // the entire transaction. We should not receive this error in this override for any
- // other reason.
- // Tests that expect collections to not exist will have to be skipped.
- if (kCmdsThatInsert.has(cmdName) &&
- includesErrorCode(res, ErrorCodes.OperationNotSupportedInTransaction)) {
- const collName = cmdObj[cmdName];
- createCollectionExplicitly(conn, dbName, collName, lsid);
-
- return retryEntireTransaction(conn, lsid);
- }
+ const logShardError = (msg) => {
+ const msgWithShardPrefix = `Processing raw response from shard: ${shard} :: ${msg}`;
+ logError(msgWithShardPrefix);
+ };
- // Transaction statements cannot be retried, but retryable codes are expected to succeed
- // on full transaction retry.
- if (configuredForNetworkRetry() && RetryableWritesUtil.isRetryableCode(res.code)) {
- logError("Retrying on retryable error for transaction statement");
- return retryEntireTransaction(conn, lsid);
- }
+ // Don't override the responses from each shard because only the top-level code in a
+ // response is used to determine if a command succeeded or not.
+ const networkRetryShardRes = shouldRetryWithNetworkErrorOverride(
+ shardRes, cmdName, logShardError, false /* shouldOverrideAcceptableError */);
+ if (networkRetryShardRes === kContinue) {
+ return true;
}
-
- // Transient transaction errors should retry the entire transaction. A
- // TransientTransactionError on "abortTransaction" is considered a success.
- if (TransactionsUtil.isTransientTransactionError(res) && cmdName !== "abortTransaction") {
- logError("Retrying on TransientTransactionError response");
- res = retryEntireTransaction(conn, lsid);
-
- // If we got a TransientTransactionError on 'commitTransaction' retrying the transaction
- // will not retry it, so we retry it here.
- if (!hasError(res) && cmdName === "commitTransaction") {
- commitTransaction(conn, lsid, txnOptions.txnNumber);
- }
- return res;
+ }
+ return false;
+}
+
+const kContinue = Object.create(null);
+
+// Processes the command response if we are configured for network error retries. Returns the
+// provided response if we should not retry in this override. Returns kContinue if we should
+// retry the current command without subtracting from our retry allocation. By default sets ok=1
+// for failures with acceptable error codes, unless shouldOverrideAcceptableError is false.
+function shouldRetryWithNetworkErrorOverride(
+ res, cmdName, logError, shouldOverrideAcceptableError = true) {
+ assert(configuredForNetworkRetry());
+
+ if (RetryableWritesUtil.isRetryableWriteCmdName(cmdName)) {
+ if ((cmdName === "findandmodify" || cmdName === "findAndModify") &&
+ isRetryableExecutorCodeAndMessage(res.code, res.errmsg)) {
+ // findAndModify can fail during the find stage and return an executor error.
+ logError("Retrying because of executor interruption");
+ return kContinue;
}
+ // Don't interfere with retryable writes.
return res;
}
- // Returns true if any error code in a response's "raw" field is retryable.
- function rawResponseHasRetryableError(rawRes, cmdName, logError) {
- for (let shard in rawRes) {
- const shardRes = rawRes[shard];
-
- const logShardError = (msg) => {
- const msgWithShardPrefix = `Processing raw response from shard: ${shard} :: ${msg}`;
- logError(msgWithShardPrefix);
- };
-
- // Don't override the responses from each shard because only the top-level code in a
- // response is used to determine if a command succeeded or not.
- const networkRetryShardRes = shouldRetryWithNetworkErrorOverride(
- shardRes, cmdName, logShardError, false /* shouldOverrideAcceptableError */);
- if (networkRetryShardRes === kContinue) {
- return true;
- }
- }
- return false;
+ // commitTransaction should be retried on any write concern error.
+ if (cmdName === "commitTransaction" && hasWriteConcernError(res)) {
+ logError("Retrying write concern error response for commitTransaction");
+ return kContinue;
}
- const kContinue = Object.create(null);
-
- // Processes the command response if we are configured for network error retries. Returns the
- // provided response if we should not retry in this override. Returns kContinue if we should
- // retry the current command without subtracting from our retry allocation. By default sets ok=1
- // for failures with acceptable error codes, unless shouldOverrideAcceptableError is false.
- function shouldRetryWithNetworkErrorOverride(
- res, cmdName, logError, shouldOverrideAcceptableError = true) {
- assert(configuredForNetworkRetry());
-
- if (RetryableWritesUtil.isRetryableWriteCmdName(cmdName)) {
- if ((cmdName === "findandmodify" || cmdName === "findAndModify") &&
- isRetryableExecutorCodeAndMessage(res.code, res.errmsg)) {
- // findAndModify can fail during the find stage and return an executor error.
- logError("Retrying because of executor interruption");
- return kContinue;
- }
-
- // Don't interfere with retryable writes.
- return res;
+ if (cmdName === "explain") {
+ // If an explain is interrupted by a stepdown, and it returns before its connection is
+ // closed, it will return incomplete results. To prevent failing the test, force retries
+ // of interrupted explains.
+ if (res.hasOwnProperty("executionStats") && !res.executionStats.executionSuccess &&
+ (RetryableWritesUtil.isRetryableCode(res.executionStats.errorCode) ||
+ isRetryableExecutorCodeAndMessage(res.executionStats.errorCode,
+ res.executionStats.errorMessage))) {
+ logError("Forcing retry of interrupted explain");
+ return kContinue;
}
- // commitTransaction should be retried on any write concern error.
- if (cmdName === "commitTransaction" && hasWriteConcernError(res)) {
- logError("Retrying write concern error response for commitTransaction");
+ // An explain command can fail if its child command cannot be run on the current server.
+ // This can be hit if a primary only or not explicitly slaveOk command is accepted by a
+ // primary node that then steps down and returns before having its connection closed.
+ if (!res.ok && res.errmsg.indexOf("child command cannot run on this node") >= 0) {
+ logError("Forcing retry of explain likely interrupted by transition to secondary");
return kContinue;
}
+ }
- if (cmdName === "explain") {
- // If an explain is interrupted by a stepdown, and it returns before its connection is
- // closed, it will return incomplete results. To prevent failing the test, force retries
- // of interrupted explains.
- if (res.hasOwnProperty("executionStats") && !res.executionStats.executionSuccess &&
- (RetryableWritesUtil.isRetryableCode(res.executionStats.errorCode) ||
- isRetryableExecutorCodeAndMessage(res.executionStats.errorCode,
- res.executionStats.errorMessage))) {
- logError("Forcing retry of interrupted explain");
- return kContinue;
- }
-
- // An explain command can fail if its child command cannot be run on the current server.
- // This can be hit if a primary only or not explicitly slaveOk command is accepted by a
- // primary node that then steps down and returns before having its connection closed.
- if (!res.ok && res.errmsg.indexOf("child command cannot run on this node") >= 0) {
- logError("Forcing retry of explain likely interrupted by transition to secondary");
- return kContinue;
- }
+ if (!res.ok) {
+ if (RetryableWritesUtil.isRetryableCode(res.code)) {
+ // Don't decrement retries, because the command returned before the connection was
+ // closed, so a subsequent attempt will receive a network error (or NotMaster error)
+ // and need to retry.
+ logError("Retrying failed response with retryable code");
+ return kContinue;
}
- if (!res.ok) {
- if (RetryableWritesUtil.isRetryableCode(res.code)) {
- // Don't decrement retries, because the command returned before the connection was
- // closed, so a subsequent attempt will receive a network error (or NotMaster error)
- // and need to retry.
- logError("Retrying failed response with retryable code");
- return kContinue;
- }
-
- if (isRetryableExecutorCodeAndMessage(res.code, res.errmsg)) {
- logError("Retrying because of executor interruption");
- return kContinue;
- }
+ if (isRetryableExecutorCodeAndMessage(res.code, res.errmsg)) {
+ logError("Retrying because of executor interruption");
+ return kContinue;
+ }
- // listCollections and listIndexes called through mongos may return OperationFailed if
- // the request to establish a cursor on the targeted shard fails with a network error.
- //
- // TODO SERVER-30949: Remove this check once those two commands retry on retryable
- // errors automatically.
- if ((cmdName === "listCollections" || cmdName === "listIndexes") &&
- res.code === ErrorCodes.OperationFailed && res.hasOwnProperty("errmsg") &&
- res.errmsg.indexOf("failed to read command response from shard") >= 0) {
- logError("Retrying failed mongos cursor command");
- return kContinue;
- }
+ // listCollections and listIndexes called through mongos may return OperationFailed if
+ // the request to establish a cursor on the targeted shard fails with a network error.
+ //
+ // TODO SERVER-30949: Remove this check once those two commands retry on retryable
+ // errors automatically.
+ if ((cmdName === "listCollections" || cmdName === "listIndexes") &&
+ res.code === ErrorCodes.OperationFailed && res.hasOwnProperty("errmsg") &&
+ res.errmsg.indexOf("failed to read command response from shard") >= 0) {
+ logError("Retrying failed mongos cursor command");
+ return kContinue;
+ }
- // Thrown when an index build is interrupted during its collection scan.
- if (cmdName === "createIndexes" && res.codeName === "InterruptedDueToReplStateChange") {
- logError("Retrying because of interrupted collection scan");
- return kContinue;
- }
+ // Thrown when an index build is interrupted during its collection scan.
+ if (cmdName === "createIndexes" && res.codeName === "InterruptedDueToReplStateChange") {
+ logError("Retrying because of interrupted collection scan");
+ return kContinue;
+ }
- // Some sharding commands return raw responses from all contacted shards and there won't
- // be a top level code if shards returned more than one error code, in which case retry
- // if any error is retryable.
- if (res.hasOwnProperty("raw") && !res.hasOwnProperty("code") &&
- rawResponseHasRetryableError(res.raw, cmdName, logError)) {
- logError("Retrying because of retryable code in raw response");
- return kContinue;
- }
+ // Some sharding commands return raw responses from all contacted shards and there won't
+ // be a top level code if shards returned more than one error code, in which case retry
+ // if any error is retryable.
+ if (res.hasOwnProperty("raw") && !res.hasOwnProperty("code") &&
+ rawResponseHasRetryableError(res.raw, cmdName, logError)) {
+ logError("Retrying because of retryable code in raw response");
+ return kContinue;
+ }
- // Check for the retryable error codes from an interrupted shardCollection.
- if (cmdName === "shardCollection" && isRetryableShardCollectionResponse(res)) {
- logError("Retrying interrupted shardCollection");
- return kContinue;
- }
+ // Check for the retryable error codes from an interrupted shardCollection.
+ if (cmdName === "shardCollection" && isRetryableShardCollectionResponse(res)) {
+ logError("Retrying interrupted shardCollection");
+ return kContinue;
+ }
- // In a sharded cluster, drop may bury the original error code in the error message if
- // interrupted.
- if (cmdName === "drop" &&
- RetryableWritesUtil.errmsgContainsRetryableCodeName(res.errmsg)) {
- logError("Retrying interrupted drop");
- return kContinue;
- }
+ // In a sharded cluster, drop may bury the original error code in the error message if
+ // interrupted.
+ if (cmdName === "drop" && RetryableWritesUtil.errmsgContainsRetryableCodeName(res.errmsg)) {
+ logError("Retrying interrupted drop");
+ return kContinue;
+ }
- if (!shouldOverrideAcceptableError || !isAcceptableRetryFailedResponse(cmdName, res)) {
- // Pass up unretryable errors.
- return res;
- }
+ if (!shouldOverrideAcceptableError || !isAcceptableRetryFailedResponse(cmdName, res)) {
+ // Pass up unretryable errors.
+ return res;
+ }
- // Swallow safe errors that may come from a retry since the command may have completed
- // before the connection was closed.
- logError("Overriding safe failed response for");
- res.ok = 1;
+ // Swallow safe errors that may come from a retry since the command may have completed
+ // before the connection was closed.
+ logError("Overriding safe failed response for");
+ res.ok = 1;
- // Fall through to retry on write concern errors if needed.
- }
+ // Fall through to retry on write concern errors if needed.
+ }
- // Do not retry on a write concern error at this point if there is an actual error.
- // TransientTransactionErrors would already have been retried at an earlier point.
- if (hasWriteConcernError(res) && !hasError(res)) {
- if (RetryableWritesUtil.isRetryableCode(res.writeConcernError.code)) {
- logError("Retrying write concern error response with retryable code");
- return kContinue;
- }
+ // Do not retry on a write concern error at this point if there is an actual error.
+ // TransientTransactionErrors would already have been retried at an earlier point.
+ if (hasWriteConcernError(res) && !hasError(res)) {
+ if (RetryableWritesUtil.isRetryableCode(res.writeConcernError.code)) {
+ logError("Retrying write concern error response with retryable code");
+ return kContinue;
}
-
- return res;
}
- // Processes exceptions if configured for txn override. Retries the entire transaction on
- // transient transaction errors or network errors if configured for network errors as well.
- // If a retry fails, returns the response, or returns null for further exception processing.
- function retryWithTxnOverrideException(e, conn, cmdName, cmdObj, lsid, logError) {
- assert(configuredForTxnOverride());
+ return res;
+}
- if (TransactionsUtil.isTransientTransactionError(e) && cmdName !== "abortTransaction") {
- logError("Retrying on TransientTransactionError exception for command");
- const res = retryEntireTransaction(conn, lsid);
+// Processes exceptions if configured for txn override. Retries the entire transaction on
+// transient transaction errors or network errors if configured for network errors as well.
+// If a retry fails, returns the response, or returns null for further exception processing.
+function retryWithTxnOverrideException(e, conn, cmdName, cmdObj, lsid, logError) {
+ assert(configuredForTxnOverride());
- // If we got a TransientTransactionError on 'commitTransaction' retrying the transaction
- // will not retry it, so we retry it here.
- if (!hasError(res) && cmdName === "commitTransaction") {
- commitTransaction(conn, lsid, txnOptions.txnNumber);
- }
- return res;
- }
+ if (TransactionsUtil.isTransientTransactionError(e) && cmdName !== "abortTransaction") {
+ logError("Retrying on TransientTransactionError exception for command");
+ const res = retryEntireTransaction(conn, lsid);
- if (configuredForNetworkRetry() && isNetworkError(e) &&
- !canRetryNetworkErrorForCommand(cmdName, cmdObj)) {
- logError("Retrying on network exception for transaction statement");
- return retryEntireTransaction(conn, lsid);
+ // If we got a TransientTransactionError on 'commitTransaction' retrying the transaction
+ // will not retry it, so we retry it here.
+ if (!hasError(res) && cmdName === "commitTransaction") {
+ commitTransaction(conn, lsid, txnOptions.txnNumber);
}
- return null;
+ return res;
}
- // Processes exceptions if configured for network error retry. Returns whether to subtract one
- // from the number of command retries this override counts. Throws if we should not retry.
- function shouldRetryWithNetworkExceptionOverride(
- e, cmdName, cmdObj, startTime, numNetworkErrorRetries, logError) {
- assert(configuredForNetworkRetry());
-
- const kReplicaSetMonitorError =
- /^Could not find host matching read preference.*mode: "primary"/;
- if (numNetworkErrorRetries === 0) {
- logError("No retries, throwing");
- throw e;
- } else if (e.message.match(kReplicaSetMonitorError) &&
- Date.now() - startTime < 5 * 60 * 1000) {
- // ReplicaSetMonitor::getHostOrRefresh() waits up to 15 seconds to find the
- // primary of the replica set. It is possible for the step up attempt of another
- // node in the replica set to take longer than 15 seconds so we allow retrying
- // for up to 5 minutes.
- logError("Failed to find primary when attempting to run command," +
- " will retry for another 15 seconds");
- return false;
- } else if ((e.message.indexOf("writeConcernError") >= 0) && isRetryableError(e)) {
- logError("Retrying write concern error exception with retryable code");
- return false;
- } else if (!isNetworkError(e)) {
- logError("Not a network error, throwing");
+ if (configuredForNetworkRetry() && isNetworkError(e) &&
+ !canRetryNetworkErrorForCommand(cmdName, cmdObj)) {
+ logError("Retrying on network exception for transaction statement");
+ return retryEntireTransaction(conn, lsid);
+ }
+ return null;
+}
+
+// Processes exceptions if configured for network error retry. Returns whether to subtract one
+// from the number of command retries this override counts. Throws if we should not retry.
+function shouldRetryWithNetworkExceptionOverride(
+ e, cmdName, cmdObj, startTime, numNetworkErrorRetries, logError) {
+ assert(configuredForNetworkRetry());
+
+ const kReplicaSetMonitorError =
+ /^Could not find host matching read preference.*mode: "primary"/;
+ if (numNetworkErrorRetries === 0) {
+ logError("No retries, throwing");
+ throw e;
+ } else if (e.message.match(kReplicaSetMonitorError) && Date.now() - startTime < 5 * 60 * 1000) {
+ // ReplicaSetMonitor::getHostOrRefresh() waits up to 15 seconds to find the
+ // primary of the replica set. It is possible for the step up attempt of another
+ // node in the replica set to take longer than 15 seconds so we allow retrying
+ // for up to 5 minutes.
+ logError("Failed to find primary when attempting to run command," +
+ " will retry for another 15 seconds");
+ return false;
+ } else if ((e.message.indexOf("writeConcernError") >= 0) && isRetryableError(e)) {
+ logError("Retrying write concern error exception with retryable code");
+ return false;
+ } else if (!isNetworkError(e)) {
+ logError("Not a network error, throwing");
+ throw e;
+ } else if (RetryableWritesUtil.isRetryableWriteCmdName(cmdName)) {
+ if (_ServerSession.canRetryWrites(cmdObj)) {
+ // If the command is retryable, assume the command has already gone through
+ // or will go through the retry logic in SessionAwareClient, so propagate
+ // the error.
+ logError("Letting retryable writes code retry, throwing");
throw e;
- } else if (RetryableWritesUtil.isRetryableWriteCmdName(cmdName)) {
- if (_ServerSession.canRetryWrites(cmdObj)) {
- // If the command is retryable, assume the command has already gone through
- // or will go through the retry logic in SessionAwareClient, so propagate
- // the error.
- logError("Letting retryable writes code retry, throwing");
- throw e;
- }
}
-
- logError("Retrying on ordinary network error, subtracting from retry count");
- return true;
}
- const kMaxNumRetries = 3;
+ logError("Retrying on ordinary network error, subtracting from retry count");
+ return true;
+}
- // This function is the heart of the override with the main error retry loop.
- function runCommandOverrideBody(
- conn, dbName, cmdName, cmdObj, lsid, clientFunction, makeFuncArgs) {
- const startTime = Date.now();
+const kMaxNumRetries = 3;
- const isTxnStatement = isCmdInTransaction(cmdObj);
+// This function is the heart of the override with the main error retry loop.
+function runCommandOverrideBody(conn, dbName, cmdName, cmdObj, lsid, clientFunction, makeFuncArgs) {
+ const startTime = Date.now();
- if (configuredForNetworkRetry() && !isNested() && !isTxnStatement) {
- // If this is a top level command, make sure that the command supports network error
- // retries. Don't validate transaction statements because their encompassing transaction
- // can be retried at a higher level, even if each statement isn't retryable on its own.
- validateCmdNetworkErrorCompatibility(cmdName, cmdObj);
- }
+ const isTxnStatement = isCmdInTransaction(cmdObj);
- if (configuredForTxnOverride()) {
- setupTransactionCommand(conn, dbName, cmdName, cmdObj, lsid);
- }
+ if (configuredForNetworkRetry() && !isNested() && !isTxnStatement) {
+ // If this is a top level command, make sure that the command supports network error
+ // retries. Don't validate transaction statements because their encompassing transaction
+ // can be retried at a higher level, even if each statement isn't retryable on its own.
+ validateCmdNetworkErrorCompatibility(cmdName, cmdObj);
+ }
- const canRetryNetworkError = canRetryNetworkErrorForCommand(cmdName, cmdObj);
- let numNetworkErrorRetries = canRetryNetworkError ? kMaxNumRetries : 0;
- do {
- try {
- // Actually run the provided command.
- let res = clientFunction.apply(conn, makeFuncArgs(cmdObj));
- if (configuredForTxnOverride()) {
- logMsgFull("Override got response",
- `res: ${tojsononeline(res)}, cmd: ${tojsononeline(cmdObj)}`);
-
- if (!hasError(res) &&
- TransactionsUtil.commandIsNonTxnAggregation(cmdName, cmdObj)) {
- nonTxnAggCursorSet[res.cursor.id] = true;
- }
- }
+ if (configuredForTxnOverride()) {
+ setupTransactionCommand(conn, dbName, cmdName, cmdObj, lsid);
+ }
- const logError = (msg) => logErrorFull(msg, cmdName, cmdObj, res);
+ const canRetryNetworkError = canRetryNetworkErrorForCommand(cmdName, cmdObj);
+ let numNetworkErrorRetries = canRetryNetworkError ? kMaxNumRetries : 0;
+ do {
+ try {
+ // Actually run the provided command.
+ let res = clientFunction.apply(conn, makeFuncArgs(cmdObj));
+ if (configuredForTxnOverride()) {
+ logMsgFull("Override got response",
+ `res: ${tojsononeline(res)}, cmd: ${tojsononeline(cmdObj)}`);
- if (configuredForTxnOverride()) {
- res = retryWithTxnOverride(res, conn, dbName, cmdName, cmdObj, lsid, logError);
+ if (!hasError(res) &&
+ TransactionsUtil.commandIsNonTxnAggregation(cmdName, cmdObj)) {
+ nonTxnAggCursorSet[res.cursor.id] = true;
}
+ }
+
+ const logError = (msg) => logErrorFull(msg, cmdName, cmdObj, res);
+
+ if (configuredForTxnOverride()) {
+ res = retryWithTxnOverride(res, conn, dbName, cmdName, cmdObj, lsid, logError);
+ }
- if (canRetryNetworkError) {
- const networkRetryRes =
- shouldRetryWithNetworkErrorOverride(res, cmdName, logError);
- if (networkRetryRes === kContinue) {
- continue;
- } else {
- res = networkRetryRes;
- }
+ if (canRetryNetworkError) {
+ const networkRetryRes = shouldRetryWithNetworkErrorOverride(res, cmdName, logError);
+ if (networkRetryRes === kContinue) {
+ continue;
+ } else {
+ res = networkRetryRes;
}
+ }
- return res;
+ return res;
- } catch (e) {
- const logError = (msg) => logErrorFull(msg, cmdName, cmdObj, e);
+ } catch (e) {
+ const logError = (msg) => logErrorFull(msg, cmdName, cmdObj, e);
- if (configuredForTxnOverride()) {
- const txnRetryOnException =
- retryWithTxnOverrideException(e, conn, cmdName, cmdObj, lsid, logError);
- if (txnRetryOnException) {
- return txnRetryOnException;
- }
+ if (configuredForTxnOverride()) {
+ const txnRetryOnException =
+ retryWithTxnOverrideException(e, conn, cmdName, cmdObj, lsid, logError);
+ if (txnRetryOnException) {
+ return txnRetryOnException;
}
+ }
- if (canRetryNetworkError) {
- const decrementRetryCount = shouldRetryWithNetworkExceptionOverride(
- e, cmdName, cmdObj, startTime, numNetworkErrorRetries, logError);
- if (decrementRetryCount) {
- --numNetworkErrorRetries;
- logMsgFull("Decrementing command network error retry count",
- `New count: ${numNetworkErrorRetries}`);
- }
-
- logErrorFull("Retrying on network error for command", cmdName, cmdObj, e);
- inCommandNetworkErrorRetry = true;
- continue;
+ if (canRetryNetworkError) {
+ const decrementRetryCount = shouldRetryWithNetworkExceptionOverride(
+ e, cmdName, cmdObj, startTime, numNetworkErrorRetries, logError);
+ if (decrementRetryCount) {
+ --numNetworkErrorRetries;
+ logMsgFull("Decrementing command network error retry count",
+ `New count: ${numNetworkErrorRetries}`);
}
- throw e;
+ logErrorFull("Retrying on network error for command", cmdName, cmdObj, e);
+ inCommandNetworkErrorRetry = true;
+ continue;
}
- } while (numNetworkErrorRetries >= 0);
- throw new Error("MONGO UNREACHABLE");
- }
- // Top level runCommand override function.
- function runCommandOverride(conn, dbName, cmdName, cmdObj, clientFunction, makeFuncArgs) {
- currentCommandID.push(newestCommandID++);
- nestingLevel++;
-
- // If the command is in a wrapped form, then we look for the actual command object
- // inside the query/$query object.
- if (cmdName === "query" || cmdName === "$query") {
- cmdObj = cmdObj[cmdName];
- cmdName = Object.keys(cmdObj)[0];
+ throw e;
}
+ } while (numNetworkErrorRetries >= 0);
+ throw new Error("MONGO UNREACHABLE");
+}
+
+// Top level runCommand override function.
+function runCommandOverride(conn, dbName, cmdName, cmdObj, clientFunction, makeFuncArgs) {
+ currentCommandID.push(newestCommandID++);
+ nestingLevel++;
+
+ // If the command is in a wrapped form, then we look for the actual command object
+ // inside the query/$query object.
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObj = cmdObj[cmdName];
+ cmdName = Object.keys(cmdObj)[0];
+ }
- const lsid = cmdObj.lsid;
- try {
- const res = runCommandOverrideBody(
- conn, dbName, cmdName, cmdObj, lsid, clientFunction, makeFuncArgs);
-
- // Many tests run queries that are expected to fail. In this case, when we wrap CRUD ops
- // in transactions, the transaction including the failed query will not be able to
- // commit. This override expects transactions to be able to commit. Rather than
- // blacklisting all tests containing queries that are expected to fail, we clear the ops
- // list when we return an error to the test so we do not retry the failed query.
- if (configuredForTxnOverride() && !isNested() && hasError(res) && (ops.length > 0)) {
- logMsgFull("Clearing ops on failed command",
- `res: ${tojsononeline(res)}, cmd: ${tojsononeline(cmdObj)}`);
- clearOpsList();
- abortTransaction(conn, lsid, txnOptions.txnNumber);
- }
-
- return res;
- } finally {
- // Reset recursion and retry state tracking.
- nestingLevel--;
- currentCommandID.pop();
- inCommandNetworkErrorRetry = false;
+ const lsid = cmdObj.lsid;
+ try {
+ const res = runCommandOverrideBody(
+ conn, dbName, cmdName, cmdObj, lsid, clientFunction, makeFuncArgs);
+
+ // Many tests run queries that are expected to fail. In this case, when we wrap CRUD ops
+ // in transactions, the transaction including the failed query will not be able to
+ // commit. This override expects transactions to be able to commit. Rather than
+ // blacklisting all tests containing queries that are expected to fail, we clear the ops
+ // list when we return an error to the test so we do not retry the failed query.
+ if (configuredForTxnOverride() && !isNested() && hasError(res) && (ops.length > 0)) {
+ logMsgFull("Clearing ops on failed command",
+ `res: ${tojsononeline(res)}, cmd: ${tojsononeline(cmdObj)}`);
+ clearOpsList();
+ abortTransaction(conn, lsid, txnOptions.txnNumber);
}
- }
- if (configuredForNetworkRetry()) {
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/network_error_and_txn_override.js");
-
- const connectOriginal = connect;
-
- connect = function(url, user, pass) {
- let retVal;
-
- let connectionAttempts = 0;
- assert.soon(
- () => {
- try {
- connectionAttempts += 1;
- retVal = connectOriginal.apply(this, arguments);
- return true;
- } catch (e) {
- print(kLogPrefix + " Retrying connection to: " + url + ", attempts: " +
- connectionAttempts + ", failed with: " + tojson(e));
- }
- },
- "Failed connecting to url: " + tojson(url),
- undefined, // Default timeout.
- 2000); // 2 second interval.
-
- return retVal;
- };
+ return res;
+ } finally {
+ // Reset recursion and retry state tracking.
+ nestingLevel--;
+ currentCommandID.pop();
+ inCommandNetworkErrorRetry = false;
}
+}
+
+if (configuredForNetworkRetry()) {
+ OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/network_error_and_txn_override.js");
+
+ const connectOriginal = connect;
+
+ connect = function(url, user, pass) {
+ let retVal;
+
+ let connectionAttempts = 0;
+ assert.soon(
+ () => {
+ try {
+ connectionAttempts += 1;
+ retVal = connectOriginal.apply(this, arguments);
+ return true;
+ } catch (e) {
+ print(kLogPrefix + " Retrying connection to: " + url +
+ ", attempts: " + connectionAttempts + ", failed with: " + tojson(e));
+ }
+ },
+ "Failed connecting to url: " + tojson(url),
+ undefined, // Default timeout.
+ 2000); // 2 second interval.
- if (configuredForTxnOverride()) {
- startParallelShell = function() {
- throw new Error(
- "Cowardly refusing to run test with transaction override enabled when it uses" +
- "startParalleShell()");
- };
- }
+ return retVal;
+ };
+}
+
+if (configuredForTxnOverride()) {
+ startParallelShell = function() {
+ throw new Error(
+ "Cowardly refusing to run test with transaction override enabled when it uses" +
+ "startParalleShell()");
+ };
+}
- OverrideHelpers.overrideRunCommand(runCommandOverride);
+OverrideHelpers.overrideRunCommand(runCommandOverride);
})();
diff --git a/jstests/libs/override_methods/retry_writes_at_least_once.js b/jstests/libs/override_methods/retry_writes_at_least_once.js
index f122769eadc..cde81b5cc7f 100644
--- a/jstests/libs/override_methods/retry_writes_at_least_once.js
+++ b/jstests/libs/override_methods/retry_writes_at_least_once.js
@@ -4,56 +4,55 @@
* command. Returns the result of the latest attempt.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/override_methods/override_helpers.js");
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/override_methods/override_helpers.js");
+load("jstests/libs/retryable_writes_util.js");
- Random.setRandomSeed();
+Random.setRandomSeed();
- const kExtraRetryProbability = 0.2;
+const kExtraRetryProbability = 0.2;
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- const mongoRunCommandWithMetadataOriginal = Mongo.prototype.runCommandWithMetadata;
+const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+const mongoRunCommandWithMetadataOriginal = Mongo.prototype.runCommandWithMetadata;
- Mongo.prototype.runCommand = function runCommand(dbName, cmdObj, options) {
- return runWithRetries(this, cmdObj, mongoRunCommandOriginal, arguments);
- };
+Mongo.prototype.runCommand = function runCommand(dbName, cmdObj, options) {
+ return runWithRetries(this, cmdObj, mongoRunCommandOriginal, arguments);
+};
- Mongo.prototype.runCommandWithMetadata = function runCommandWithMetadata(
- dbName, metadata, cmdObj) {
- return runWithRetries(this, cmdObj, mongoRunCommandWithMetadataOriginal, arguments);
- };
+Mongo.prototype.runCommandWithMetadata = function runCommandWithMetadata(dbName, metadata, cmdObj) {
+ return runWithRetries(this, cmdObj, mongoRunCommandWithMetadataOriginal, arguments);
+};
- function runWithRetries(mongo, cmdObj, clientFunction, clientFunctionArguments) {
- let cmdName = Object.keys(cmdObj)[0];
+function runWithRetries(mongo, cmdObj, clientFunction, clientFunctionArguments) {
+ let cmdName = Object.keys(cmdObj)[0];
- // If the command is in a wrapped form, then we look for the actual command object
- // inside the query/$query object.
- if (cmdName === "query" || cmdName === "$query") {
- cmdObj = cmdObj[cmdName];
- cmdName = Object.keys(cmdObj)[0];
- }
-
- const isRetryableWriteCmd = RetryableWritesUtil.isRetryableWriteCmdName(cmdName);
- const canRetryWrites = _ServerSession.canRetryWrites(cmdObj);
+ // If the command is in a wrapped form, then we look for the actual command object
+ // inside the query/$query object.
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObj = cmdObj[cmdName];
+ cmdName = Object.keys(cmdObj)[0];
+ }
- let res = clientFunction.apply(mongo, clientFunctionArguments);
+ const isRetryableWriteCmd = RetryableWritesUtil.isRetryableWriteCmdName(cmdName);
+ const canRetryWrites = _ServerSession.canRetryWrites(cmdObj);
- if (isRetryableWriteCmd && canRetryWrites) {
- let retryAttempt = 1;
- do {
- print("*** Retry attempt: " + retryAttempt + ", for command: " + cmdName +
- " with txnNumber: " + tojson(cmdObj.txnNumber) + ", and lsid: " +
- tojson(cmdObj.lsid));
- ++retryAttempt;
- res = clientFunction.apply(mongo, clientFunctionArguments);
- } while (Random.rand() <= kExtraRetryProbability);
- }
+ let res = clientFunction.apply(mongo, clientFunctionArguments);
- return res;
+ if (isRetryableWriteCmd && canRetryWrites) {
+ let retryAttempt = 1;
+ do {
+ print("*** Retry attempt: " + retryAttempt + ", for command: " + cmdName +
+ " with txnNumber: " + tojson(cmdObj.txnNumber) +
+ ", and lsid: " + tojson(cmdObj.lsid));
+ ++retryAttempt;
+ res = clientFunction.apply(mongo, clientFunctionArguments);
+ } while (Random.rand() <= kExtraRetryProbability);
}
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/retry_writes_at_least_once.js");
+ return res;
+}
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/retry_writes_at_least_once.js");
})();
diff --git a/jstests/libs/override_methods/set_read_and_write_concerns.js b/jstests/libs/override_methods/set_read_and_write_concerns.js
index 4b9adfb23fa..19c799714e7 100644
--- a/jstests/libs/override_methods/set_read_and_write_concerns.js
+++ b/jstests/libs/override_methods/set_read_and_write_concerns.js
@@ -15,153 +15,153 @@
*
*/
(function() {
- "use strict";
-
- load("jstests/libs/override_methods/override_helpers.js");
- load("jstests/libs/override_methods/read_and_write_concern_helpers.js");
+"use strict";
+
+load("jstests/libs/override_methods/override_helpers.js");
+load("jstests/libs/override_methods/read_and_write_concern_helpers.js");
+
+if (typeof TestData === "undefined" || !TestData.hasOwnProperty("defaultReadConcernLevel")) {
+ throw new Error("The readConcern level to use must be set as the 'defaultReadConcernLevel'" +
+ " property on the global TestData object");
+}
+
+// If the default read concern level is null, that indicates that no read concern overrides
+// should be applied.
+const kDefaultReadConcern = {
+ level: TestData.defaultReadConcernLevel
+};
+const kDefaultWriteConcern =
+ (TestData.hasOwnProperty("defaultWriteConcern")) ? TestData.defaultWriteConcern : {
+ w: "majority",
+ // Use a "signature" value that won't typically match a value assigned in normal use.
+ // This way the wtimeout set by this override is distinguishable in the server logs.
+ wtimeout: 5 * 60 * 1000 + 321, // 300321ms
+ };
+
+function runCommandWithReadAndWriteConcerns(
+ conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
+ return func.apply(conn, makeFuncArgs(commandObj));
+ }
- if (typeof TestData === "undefined" || !TestData.hasOwnProperty("defaultReadConcernLevel")) {
- throw new Error(
- "The readConcern level to use must be set as the 'defaultReadConcernLevel'" +
- " property on the global TestData object");
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ let commandObjUnwrapped = commandObj;
+ if (commandName === "query" || commandName === "$query") {
+ commandObjUnwrapped = commandObj[commandName];
+ commandName = Object.keys(commandObjUnwrapped)[0];
}
- // If the default read concern level is null, that indicates that no read concern overrides
- // should be applied.
- const kDefaultReadConcern = {level: TestData.defaultReadConcernLevel};
- const kDefaultWriteConcern =
- (TestData.hasOwnProperty("defaultWriteConcern")) ? TestData.defaultWriteConcern : {
- w: "majority",
- // Use a "signature" value that won't typically match a value assigned in normal use.
- // This way the wtimeout set by this override is distinguishable in the server logs.
- wtimeout: 5 * 60 * 1000 + 321, // 300321ms
- };
-
- function runCommandWithReadAndWriteConcerns(
- conn, dbName, commandName, commandObj, func, makeFuncArgs) {
- if (typeof commandObj !== "object" || commandObj === null) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
+ let shouldForceReadConcern = kCommandsSupportingReadConcern.has(commandName);
+ let shouldForceWriteConcern = kCommandsSupportingWriteConcern.has(commandName);
- // If the command is in a wrapped form, then we look for the actual command object inside
- // the query/$query object.
- let commandObjUnwrapped = commandObj;
- if (commandName === "query" || commandName === "$query") {
- commandObjUnwrapped = commandObj[commandName];
- commandName = Object.keys(commandObjUnwrapped)[0];
+ // All commands in a multi-document transaction have the autocommit property.
+ if (commandObj.hasOwnProperty("autocommit")) {
+ shouldForceReadConcern = false;
+ if (!kCommandsSupportingWriteConcernInTransaction.has(commandName)) {
+ shouldForceWriteConcern = false;
}
-
- let shouldForceReadConcern = kCommandsSupportingReadConcern.has(commandName);
- let shouldForceWriteConcern = kCommandsSupportingWriteConcern.has(commandName);
-
- // All commands in a multi-document transaction have the autocommit property.
- if (commandObj.hasOwnProperty("autocommit")) {
+ }
+ if (commandName === "aggregate") {
+ if (OverrideHelpers.isAggregationWithListLocalSessionsStage(commandName,
+ commandObjUnwrapped)) {
+ // The $listLocalSessions stage can only be used with readConcern={level: "local"}.
shouldForceReadConcern = false;
- if (!kCommandsSupportingWriteConcernInTransaction.has(commandName)) {
- shouldForceWriteConcern = false;
- }
}
- if (commandName === "aggregate") {
- if (OverrideHelpers.isAggregationWithListLocalSessionsStage(commandName,
- commandObjUnwrapped)) {
- // The $listLocalSessions stage can only be used with readConcern={level: "local"}.
- shouldForceReadConcern = false;
- }
- if (OverrideHelpers.isAggregationWithOutOrMergeStage(commandName,
- commandObjUnwrapped)) {
- // The $out stage can only be used with readConcern={level: "local"} or
- // readConcern={level: "majority"}
- if (TestData.defaultReadConcernLevel === "linearizable") {
- shouldForceReadConcern = false;
- }
- } else {
- // A writeConcern can only be used with a $out stage.
- shouldForceWriteConcern = false;
- }
-
- if (commandObjUnwrapped.explain) {
- // Attempting to specify a readConcern while explaining an aggregation would always
- // return an error prior to SERVER-30582 and it otherwise only compatible with
- // readConcern={level: "local"}.
+ if (OverrideHelpers.isAggregationWithOutOrMergeStage(commandName, commandObjUnwrapped)) {
+ // The $out stage can only be used with readConcern={level: "local"} or
+ // readConcern={level: "majority"}
+ if (TestData.defaultReadConcernLevel === "linearizable") {
shouldForceReadConcern = false;
}
- } else if (OverrideHelpers.isMapReduceWithInlineOutput(commandName, commandObjUnwrapped)) {
- // A writeConcern can only be used with non-inline output.
+ } else {
+ // A writeConcern can only be used with a $out stage.
shouldForceWriteConcern = false;
}
- if (kCommandsOnlySupportingReadConcernSnapshot.has(commandName) &&
- kDefaultReadConcern.level === "snapshot") {
- shouldForceReadConcern = true;
+ if (commandObjUnwrapped.explain) {
+ // Attempting to specify a readConcern while explaining an aggregation would always
+ // return an error prior to SERVER-30582 and it otherwise only compatible with
+ // readConcern={level: "local"}.
+ shouldForceReadConcern = false;
}
+ } else if (OverrideHelpers.isMapReduceWithInlineOutput(commandName, commandObjUnwrapped)) {
+ // A writeConcern can only be used with non-inline output.
+ shouldForceWriteConcern = false;
+ }
- const inWrappedForm = commandObj !== commandObjUnwrapped;
-
- // Only override read concern if an override level was specified.
- if (shouldForceReadConcern && (kDefaultReadConcern.level !== null)) {
- // We create a copy of 'commandObj' to avoid mutating the parameter the caller
- // specified.
- commandObj = Object.assign({}, commandObj);
- if (inWrappedForm) {
- commandObjUnwrapped = Object.assign({}, commandObjUnwrapped);
- commandObj[Object.keys(commandObj)[0]] = commandObjUnwrapped;
- } else {
- commandObjUnwrapped = commandObj;
- }
+ if (kCommandsOnlySupportingReadConcernSnapshot.has(commandName) &&
+ kDefaultReadConcern.level === "snapshot") {
+ shouldForceReadConcern = true;
+ }
- let readConcern;
- if (commandObjUnwrapped.hasOwnProperty("readConcern")) {
- readConcern = commandObjUnwrapped.readConcern;
+ const inWrappedForm = commandObj !== commandObjUnwrapped;
+
+ // Only override read concern if an override level was specified.
+ if (shouldForceReadConcern && (kDefaultReadConcern.level !== null)) {
+ // We create a copy of 'commandObj' to avoid mutating the parameter the caller
+ // specified.
+ commandObj = Object.assign({}, commandObj);
+ if (inWrappedForm) {
+ commandObjUnwrapped = Object.assign({}, commandObjUnwrapped);
+ commandObj[Object.keys(commandObj)[0]] = commandObjUnwrapped;
+ } else {
+ commandObjUnwrapped = commandObj;
+ }
- if (typeof readConcern !== "object" || readConcern === null ||
- (readConcern.hasOwnProperty("level") &&
- bsonWoCompare({_: readConcern.level}, {_: kDefaultReadConcern.level}) !== 0)) {
- throw new Error("Cowardly refusing to override read concern of command: " +
- tojson(commandObj));
- }
- }
+ let readConcern;
+ if (commandObjUnwrapped.hasOwnProperty("readConcern")) {
+ readConcern = commandObjUnwrapped.readConcern;
- // We create a copy of the readConcern object to avoid mutating the parameter the
- // caller specified.
- readConcern = Object.assign({}, readConcern, kDefaultReadConcern);
- commandObjUnwrapped.readConcern = readConcern;
+ if (typeof readConcern !== "object" || readConcern === null ||
+ (readConcern.hasOwnProperty("level") &&
+ bsonWoCompare({_: readConcern.level}, {_: kDefaultReadConcern.level}) !== 0)) {
+ throw new Error("Cowardly refusing to override read concern of command: " +
+ tojson(commandObj));
+ }
}
- if (shouldForceWriteConcern) {
- // We create a copy of 'commandObj' to avoid mutating the parameter the caller
- // specified.
- commandObj = Object.assign({}, commandObj);
- if (inWrappedForm) {
- commandObjUnwrapped = Object.assign({}, commandObjUnwrapped);
- commandObj[Object.keys(commandObj)[0]] = commandObjUnwrapped;
- } else {
- commandObjUnwrapped = commandObj;
- }
+ // We create a copy of the readConcern object to avoid mutating the parameter the
+ // caller specified.
+ readConcern = Object.assign({}, readConcern, kDefaultReadConcern);
+ commandObjUnwrapped.readConcern = readConcern;
+ }
- let writeConcern;
- if (commandObjUnwrapped.hasOwnProperty("writeConcern")) {
- writeConcern = commandObjUnwrapped.writeConcern;
+ if (shouldForceWriteConcern) {
+ // We create a copy of 'commandObj' to avoid mutating the parameter the caller
+ // specified.
+ commandObj = Object.assign({}, commandObj);
+ if (inWrappedForm) {
+ commandObjUnwrapped = Object.assign({}, commandObjUnwrapped);
+ commandObj[Object.keys(commandObj)[0]] = commandObjUnwrapped;
+ } else {
+ commandObjUnwrapped = commandObj;
+ }
- if (typeof writeConcern !== "object" || writeConcern === null ||
- (writeConcern.hasOwnProperty("w") &&
- bsonWoCompare({_: writeConcern.w}, {_: kDefaultWriteConcern.w}) !== 0)) {
- throw new Error("Cowardly refusing to override write concern of command: " +
- tojson(commandObj));
- }
- }
+ let writeConcern;
+ if (commandObjUnwrapped.hasOwnProperty("writeConcern")) {
+ writeConcern = commandObjUnwrapped.writeConcern;
- // We create a copy of the writeConcern object to avoid mutating the parameter the
- // caller specified.
- writeConcern = Object.assign({}, writeConcern, kDefaultWriteConcern);
- commandObjUnwrapped.writeConcern = writeConcern;
+ if (typeof writeConcern !== "object" || writeConcern === null ||
+ (writeConcern.hasOwnProperty("w") &&
+ bsonWoCompare({_: writeConcern.w}, {_: kDefaultWriteConcern.w}) !== 0)) {
+ throw new Error("Cowardly refusing to override write concern of command: " +
+ tojson(commandObj));
+ }
}
- return func.apply(conn, makeFuncArgs(commandObj));
+ // We create a copy of the writeConcern object to avoid mutating the parameter the
+ // caller specified.
+ writeConcern = Object.assign({}, writeConcern, kDefaultWriteConcern);
+ commandObjUnwrapped.writeConcern = writeConcern;
}
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/set_read_and_write_concerns.js");
+ return func.apply(conn, makeFuncArgs(commandObj));
+}
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/set_read_and_write_concerns.js");
- OverrideHelpers.overrideRunCommand(runCommandWithReadAndWriteConcerns);
+OverrideHelpers.overrideRunCommand(runCommandWithReadAndWriteConcerns);
})();
diff --git a/jstests/libs/override_methods/set_read_preference_secondary.js b/jstests/libs/override_methods/set_read_preference_secondary.js
index 562711776f7..e962437140b 100644
--- a/jstests/libs/override_methods/set_read_preference_secondary.js
+++ b/jstests/libs/override_methods/set_read_preference_secondary.js
@@ -2,167 +2,166 @@
* Use prototype overrides to set read preference to "secondary" when running tests.
*/
(function() {
- "use strict";
-
- load("jstests/libs/override_methods/override_helpers.js");
-
- const kReadPreferenceSecondary = {mode: "secondary"};
- const kCommandsSupportingReadPreference = new Set([
- "aggregate",
- "collStats",
- "count",
- "dbStats",
- "distinct",
- "find",
- "geoSearch",
- ]);
- const kDatabasesOnConfigServers = new Set(["config", "admin"]);
-
- // This list of cursor-generating commands is incomplete. For example, "listCollections",
- // "listIndexes", and "repairCursor" are all missing from this list.
- // If we ever add tests that attempt to run getMore or killCursors on cursors generated from
- // those commands, then we should update the contents of this list and also handle any
- // differences in the server's response format.
- const kCursorGeneratingCommands = new Set(["aggregate", "find"]);
-
- const CursorTracker = (function() {
- const kNoCursor = new NumberLong(0);
-
- const connectionsByCursorId = {};
-
- return {
- getConnectionUsedForCursor: function getConnectionUsedForCursor(cursorId) {
- return (cursorId instanceof NumberLong) ? connectionsByCursorId[cursorId]
- : undefined;
- },
-
- setConnectionUsedForCursor: function setConnectionUsedForCursor(cursorId, cursorConn) {
- if (cursorId instanceof NumberLong &&
- !bsonBinaryEqual({_: cursorId}, {_: kNoCursor})) {
- connectionsByCursorId[cursorId] = cursorConn;
- }
- },
- };
- })();
-
- function runCommandWithReadPreferenceSecondary(
- conn, dbName, commandName, commandObj, func, makeFuncArgs) {
- if (typeof commandObj !== "object" || commandObj === null) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
+"use strict";
+
+load("jstests/libs/override_methods/override_helpers.js");
+
+const kReadPreferenceSecondary = {
+ mode: "secondary"
+};
+const kCommandsSupportingReadPreference = new Set([
+ "aggregate",
+ "collStats",
+ "count",
+ "dbStats",
+ "distinct",
+ "find",
+ "geoSearch",
+]);
+const kDatabasesOnConfigServers = new Set(["config", "admin"]);
+
+// This list of cursor-generating commands is incomplete. For example, "listCollections",
+// "listIndexes", and "repairCursor" are all missing from this list.
+// If we ever add tests that attempt to run getMore or killCursors on cursors generated from
+// those commands, then we should update the contents of this list and also handle any
+// differences in the server's response format.
+const kCursorGeneratingCommands = new Set(["aggregate", "find"]);
+
+const CursorTracker = (function() {
+ const kNoCursor = new NumberLong(0);
+
+ const connectionsByCursorId = {};
+
+ return {
+ getConnectionUsedForCursor: function getConnectionUsedForCursor(cursorId) {
+ return (cursorId instanceof NumberLong) ? connectionsByCursorId[cursorId] : undefined;
+ },
+
+ setConnectionUsedForCursor: function setConnectionUsedForCursor(cursorId, cursorConn) {
+ if (cursorId instanceof NumberLong && !bsonBinaryEqual({_: cursorId}, {_: kNoCursor})) {
+ connectionsByCursorId[cursorId] = cursorConn;
+ }
+ },
+ };
+})();
- // If the command is in a wrapped form, then we look for the actual command object inside
- // the query/$query object.
- let commandObjUnwrapped = commandObj;
- if (commandName === "query" || commandName === "$query") {
- commandObjUnwrapped = commandObj[commandName];
- commandName = Object.keys(commandObjUnwrapped)[0];
- }
+function runCommandWithReadPreferenceSecondary(
+ conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
+ return func.apply(conn, makeFuncArgs(commandObj));
+ }
- if (commandObj[commandName] === "system.profile" || commandName === 'profile') {
- throw new Error(
- "Cowardly refusing to run test that interacts with the system profiler as the " +
- "'system.profile' collection is not replicated" + tojson(commandObj));
- }
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ let commandObjUnwrapped = commandObj;
+ if (commandName === "query" || commandName === "$query") {
+ commandObjUnwrapped = commandObj[commandName];
+ commandName = Object.keys(commandObjUnwrapped)[0];
+ }
- if (conn.isReplicaSetConnection()) {
- // When a "getMore" or "killCursors" command is issued on a replica set connection, we
- // attempt to automatically route the command to the server the cursor(s) were
- // originally established on. This makes it possible to use the
- // set_read_preference_secondary.js override without needing to update calls of
- // DB#runCommand() to explicitly track the connection that was used. If the connection
- // is actually a direct connection to a mongod or mongos process, or if the cursor id
- // cannot be found in the CursorTracker, then we'll fall back to using DBClientRS's
- // server selection and send the operation to the current primary. It is possible that
- // the test is trying to exercise the behavior around when an unknown cursor id is sent
- // to the server.
- if (commandName === "getMore") {
- const cursorId = commandObjUnwrapped[commandName];
- const cursorConn = CursorTracker.getConnectionUsedForCursor(cursorId);
- if (cursorConn !== undefined) {
- return func.apply(cursorConn, makeFuncArgs(commandObj));
- }
- } else if (commandName === "killCursors") {
- const cursorIds = commandObjUnwrapped.cursors;
- if (Array.isArray(cursorIds)) {
- let cursorConn;
-
- for (let cursorId of cursorIds) {
- const otherCursorConn = CursorTracker.getConnectionUsedForCursor(cursorId);
- if (cursorConn === undefined) {
- cursorConn = otherCursorConn;
- } else if (otherCursorConn !== undefined) {
- // We set 'cursorConn' back to undefined and break out of the loop so
- // that we don't attempt to automatically route the "killCursors"
- // command when there are cursors from different servers.
- cursorConn = undefined;
- break;
- }
- }
+ if (commandObj[commandName] === "system.profile" || commandName === 'profile') {
+ throw new Error(
+ "Cowardly refusing to run test that interacts with the system profiler as the " +
+ "'system.profile' collection is not replicated" + tojson(commandObj));
+ }
- if (cursorConn !== undefined) {
- return func.apply(cursorConn, makeFuncArgs(commandObj));
+ if (conn.isReplicaSetConnection()) {
+ // When a "getMore" or "killCursors" command is issued on a replica set connection, we
+ // attempt to automatically route the command to the server the cursor(s) were
+ // originally established on. This makes it possible to use the
+ // set_read_preference_secondary.js override without needing to update calls of
+ // DB#runCommand() to explicitly track the connection that was used. If the connection
+ // is actually a direct connection to a mongod or mongos process, or if the cursor id
+ // cannot be found in the CursorTracker, then we'll fall back to using DBClientRS's
+ // server selection and send the operation to the current primary. It is possible that
+ // the test is trying to exercise the behavior around when an unknown cursor id is sent
+ // to the server.
+ if (commandName === "getMore") {
+ const cursorId = commandObjUnwrapped[commandName];
+ const cursorConn = CursorTracker.getConnectionUsedForCursor(cursorId);
+ if (cursorConn !== undefined) {
+ return func.apply(cursorConn, makeFuncArgs(commandObj));
+ }
+ } else if (commandName === "killCursors") {
+ const cursorIds = commandObjUnwrapped.cursors;
+ if (Array.isArray(cursorIds)) {
+ let cursorConn;
+
+ for (let cursorId of cursorIds) {
+ const otherCursorConn = CursorTracker.getConnectionUsedForCursor(cursorId);
+ if (cursorConn === undefined) {
+ cursorConn = otherCursorConn;
+ } else if (otherCursorConn !== undefined) {
+ // We set 'cursorConn' back to undefined and break out of the loop so
+ // that we don't attempt to automatically route the "killCursors"
+ // command when there are cursors from different servers.
+ cursorConn = undefined;
+ break;
}
}
- }
- }
- let shouldForceReadPreference = kCommandsSupportingReadPreference.has(commandName);
- if (OverrideHelpers.isAggregationWithOutOrMergeStage(commandName, commandObjUnwrapped)) {
- // An aggregation with a $out stage must be sent to the primary.
- shouldForceReadPreference = false;
- } else if ((commandName === "mapReduce" || commandName === "mapreduce") &&
- !OverrideHelpers.isMapReduceWithInlineOutput(commandName, commandObjUnwrapped)) {
- // A map-reduce operation with non-inline output must be sent to the primary.
- shouldForceReadPreference = false;
- } else if (conn.isMongos() && kDatabasesOnConfigServers.has(dbName)) {
- // Avoid overriding the read preference for config server since there may only be one
- // of them.
- shouldForceReadPreference = false;
+ if (cursorConn !== undefined) {
+ return func.apply(cursorConn, makeFuncArgs(commandObj));
+ }
+ }
}
+ }
- if (TestData.doNotOverrideReadPreference) {
- // Use this TestData flag to allow certain runCommands to be exempted from
- // setting secondary read preference.
- shouldForceReadPreference = false;
- }
+ let shouldForceReadPreference = kCommandsSupportingReadPreference.has(commandName);
+ if (OverrideHelpers.isAggregationWithOutOrMergeStage(commandName, commandObjUnwrapped)) {
+ // An aggregation with a $out stage must be sent to the primary.
+ shouldForceReadPreference = false;
+ } else if ((commandName === "mapReduce" || commandName === "mapreduce") &&
+ !OverrideHelpers.isMapReduceWithInlineOutput(commandName, commandObjUnwrapped)) {
+ // A map-reduce operation with non-inline output must be sent to the primary.
+ shouldForceReadPreference = false;
+ } else if (conn.isMongos() && kDatabasesOnConfigServers.has(dbName)) {
+ // Avoid overriding the read preference for config server since there may only be one
+ // of them.
+ shouldForceReadPreference = false;
+ }
- if (shouldForceReadPreference) {
- if (commandObj === commandObjUnwrapped) {
- // We wrap the command object using a "query" field rather than a "$query" field to
- // match the implementation of DB.prototype._attachReadPreferenceToCommand().
- commandObj = {query: commandObj};
- } else {
- // We create a copy of 'commandObj' to avoid mutating the parameter the caller
- // specified.
- commandObj = Object.assign({}, commandObj);
- }
+ if (TestData.doNotOverrideReadPreference) {
+ // Use this TestData flag to allow certain runCommands to be exempted from
+ // setting secondary read preference.
+ shouldForceReadPreference = false;
+ }
- if (commandObj.hasOwnProperty("$readPreference") &&
- !bsonBinaryEqual({_: commandObj.$readPreference}, {_: kReadPreferenceSecondary})) {
- throw new Error("Cowardly refusing to override read preference of command: " +
- tojson(commandObj));
- }
+ if (shouldForceReadPreference) {
+ if (commandObj === commandObjUnwrapped) {
+ // We wrap the command object using a "query" field rather than a "$query" field to
+ // match the implementation of DB.prototype._attachReadPreferenceToCommand().
+ commandObj = {query: commandObj};
+ } else {
+ // We create a copy of 'commandObj' to avoid mutating the parameter the caller
+ // specified.
+ commandObj = Object.assign({}, commandObj);
+ }
- commandObj.$readPreference = kReadPreferenceSecondary;
+ if (commandObj.hasOwnProperty("$readPreference") &&
+ !bsonBinaryEqual({_: commandObj.$readPreference}, {_: kReadPreferenceSecondary})) {
+ throw new Error("Cowardly refusing to override read preference of command: " +
+ tojson(commandObj));
}
- const serverResponse = func.apply(conn, makeFuncArgs(commandObj));
+ commandObj.$readPreference = kReadPreferenceSecondary;
+ }
- if (conn.isReplicaSetConnection() && kCursorGeneratingCommands.has(commandName) &&
- serverResponse.ok === 1 && serverResponse.hasOwnProperty("cursor")) {
- // We associate the cursor id returned by the server with the connection that was used
- // to establish it so that we can attempt to automatically route subsequent "getMore"
- // and "killCursors" commands.
- CursorTracker.setConnectionUsedForCursor(serverResponse.cursor.id,
- serverResponse._mongo);
- }
+ const serverResponse = func.apply(conn, makeFuncArgs(commandObj));
- return serverResponse;
+ if (conn.isReplicaSetConnection() && kCursorGeneratingCommands.has(commandName) &&
+ serverResponse.ok === 1 && serverResponse.hasOwnProperty("cursor")) {
+ // We associate the cursor id returned by the server with the connection that was used
+ // to establish it so that we can attempt to automatically route subsequent "getMore"
+ // and "killCursors" commands.
+ CursorTracker.setConnectionUsedForCursor(serverResponse.cursor.id, serverResponse._mongo);
}
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/set_read_preference_secondary.js");
+ return serverResponse;
+}
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/set_read_preference_secondary.js");
- OverrideHelpers.overrideRunCommand(runCommandWithReadPreferenceSecondary);
+OverrideHelpers.overrideRunCommand(runCommandWithReadPreferenceSecondary);
})();
diff --git a/jstests/libs/override_methods/sharding_continuous_config_stepdown.js b/jstests/libs/override_methods/sharding_continuous_config_stepdown.js
index 362310b5248..ad0e8e3d6de 100644
--- a/jstests/libs/override_methods/sharding_continuous_config_stepdown.js
+++ b/jstests/libs/override_methods/sharding_continuous_config_stepdown.js
@@ -1,36 +1,35 @@
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/override_methods/continuous_stepdown.js");
- load("jstests/libs/override_methods/mongos_manual_intervention_actions.js");
+load("jstests/libs/override_methods/continuous_stepdown.js");
+load("jstests/libs/override_methods/mongos_manual_intervention_actions.js");
- ContinuousStepdown.configure({
- configStepdown: true,
- electionTimeoutMS: 5 * 1000,
- shardStepdown: false,
- stepdownDurationSecs: 10,
- stepdownIntervalMS: 8 * 1000,
- },
- {
- verbositySetting: {
- verbosity: 0,
- command: {verbosity: 1},
- network: {verbosity: 1, asio: {verbosity: 2}},
- tracking: {verbosity: 0}
- }
- });
+ContinuousStepdown.configure({
+ configStepdown: true,
+ electionTimeoutMS: 5 * 1000,
+ shardStepdown: false,
+ stepdownDurationSecs: 10,
+ stepdownIntervalMS: 8 * 1000,
+},
+ {
+ verbositySetting: {
+ verbosity: 0,
+ command: {verbosity: 1},
+ network: {verbosity: 1, asio: {verbosity: 2}},
+ tracking: {verbosity: 0}
+ }
+ });
- const originalShardingTest = ShardingTest;
- ShardingTest = function() {
- originalShardingTest.apply(this, arguments);
+const originalShardingTest = ShardingTest;
+ShardingTest = function() {
+ originalShardingTest.apply(this, arguments);
- // Automatically start the continuous stepdown thread on the config server replica set.
- this.startContinuousFailover();
- };
-
- // The checkUUIDsConsistentAcrossCluster() function is defined on ShardingTest's prototype, but
- // ShardingTest's prototype gets reset when ShardingTest is reassigned. We reload the override
- // to redefine checkUUIDsConsistentAcrossCluster() on the new ShardingTest's prototype.
- load('jstests/libs/override_methods/check_uuids_consistent_across_cluster.js');
+ // Automatically start the continuous stepdown thread on the config server replica set.
+ this.startContinuousFailover();
+};
+// The checkUUIDsConsistentAcrossCluster() function is defined on ShardingTest's prototype, but
+// ShardingTest's prototype gets reset when ShardingTest is reassigned. We reload the override
+// to redefine checkUUIDsConsistentAcrossCluster() on the new ShardingTest's prototype.
+load('jstests/libs/override_methods/check_uuids_consistent_across_cluster.js');
})();
diff --git a/jstests/libs/override_methods/txn_passthrough_cmd_massage.js b/jstests/libs/override_methods/txn_passthrough_cmd_massage.js
index 374578da166..114e26c2b46 100644
--- a/jstests/libs/override_methods/txn_passthrough_cmd_massage.js
+++ b/jstests/libs/override_methods/txn_passthrough_cmd_massage.js
@@ -3,72 +3,72 @@
* statement transaction suites.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/override_methods/override_helpers.js");
+load("jstests/libs/override_methods/override_helpers.js");
- function runCommandInMultiStmtTxnPassthrough(
- conn, dbName, commandName, commandObj, func, makeFuncArgs) {
- if (typeof commandObj !== "object" || commandObj === null) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
+function runCommandInMultiStmtTxnPassthrough(
+ conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
+ return func.apply(conn, makeFuncArgs(commandObj));
+ }
- // If the command is in a wrapped form, then we look for the actual command object inside
- // the query/$query object.
- let commandObjUnwrapped = commandObj;
- if (commandName === "query" || commandName === "$query") {
- commandObjUnwrapped = commandObj[commandName];
- commandName = Object.keys(commandObjUnwrapped)[0];
- }
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ let commandObjUnwrapped = commandObj;
+ if (commandName === "query" || commandName === "$query") {
+ commandObjUnwrapped = commandObj[commandName];
+ commandName = Object.keys(commandObjUnwrapped)[0];
+ }
- // Ignore all commands that are part of multi statement transactions.
- if (commandObj.hasOwnProperty("autocommit")) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
+ // Ignore all commands that are part of multi statement transactions.
+ if (commandObj.hasOwnProperty("autocommit")) {
+ return func.apply(conn, makeFuncArgs(commandObj));
+ }
- const majority = {w: 'majority'};
- let massagedCmd = Object.extend(commandObjUnwrapped, {});
+ const majority = {w: 'majority'};
+ let massagedCmd = Object.extend(commandObjUnwrapped, {});
- // Adjust mapReduce and drop to use { w: majority } to make sure that all pending drops that
- // occurred while running these commands are finished after the command returns. This
- // is done to make sure that the pending drop of the two phase drop won't try to contest
- // with db/coll locks in the background.
+ // Adjust mapReduce and drop to use { w: majority } to make sure that all pending drops that
+ // occurred while running these commands are finished after the command returns. This
+ // is done to make sure that the pending drop of the two phase drop won't try to contest
+ // with db/coll locks in the background.
- if (commandName === "mapReduce" || commandName === "mapreduce") {
- if (typeof massagedCmd.out === 'string') {
- massagedCmd.out = {replace: commandObjUnwrapped.out, writeConcern: majority};
- } else if (typeof massagedCmd.out === 'object') {
- let outOptions = massagedCmd.out;
- if (!outOptions.hasOwnProperty('inline')) {
- if (outOptions.hasOwnProperty('writeConcern')) {
- if (outOptions.writeConcern.w !== 'majority') {
- throw new Error(
- 'Running mapReduce with non majority write concern: ' +
- tojson(commandObj) + '. Consider blacklisting the test ' +
- 'since the 2 phase drop can interfere with lock acquisitions.');
- }
- } else {
- outOptions.writeConcern = majority;
+ if (commandName === "mapReduce" || commandName === "mapreduce") {
+ if (typeof massagedCmd.out === 'string') {
+ massagedCmd.out = {replace: commandObjUnwrapped.out, writeConcern: majority};
+ } else if (typeof massagedCmd.out === 'object') {
+ let outOptions = massagedCmd.out;
+ if (!outOptions.hasOwnProperty('inline')) {
+ if (outOptions.hasOwnProperty('writeConcern')) {
+ if (outOptions.writeConcern.w !== 'majority') {
+ throw new Error(
+ 'Running mapReduce with non majority write concern: ' +
+ tojson(commandObj) + '. Consider blacklisting the test ' +
+ 'since the 2 phase drop can interfere with lock acquisitions.');
}
+ } else {
+ outOptions.writeConcern = majority;
}
}
- } else if (commandName === 'drop') {
- if (massagedCmd.hasOwnProperty('writeConcern')) {
- if (massagedCmd.writeConcern.w !== 'majority') {
- throw new Error('Running drop with non majority write concern: ' +
- tojson(commandObj) + '. Consider blacklisting the test ' +
- 'since the 2 phase drop can interfere with lock acquisitions.');
- }
- } else {
- massagedCmd.writeConcern = majority;
+ }
+ } else if (commandName === 'drop') {
+ if (massagedCmd.hasOwnProperty('writeConcern')) {
+ if (massagedCmd.writeConcern.w !== 'majority') {
+ throw new Error('Running drop with non majority write concern: ' +
+ tojson(commandObj) + '. Consider blacklisting the test ' +
+ 'since the 2 phase drop can interfere with lock acquisitions.');
}
+ } else {
+ massagedCmd.writeConcern = majority;
}
-
- return func.apply(conn, makeFuncArgs(massagedCmd));
}
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/txn_passthrough_cmd_massage.js");
+ return func.apply(conn, makeFuncArgs(massagedCmd));
+}
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/txn_passthrough_cmd_massage.js");
- OverrideHelpers.overrideRunCommand(runCommandInMultiStmtTxnPassthrough);
+OverrideHelpers.overrideRunCommand(runCommandInMultiStmtTxnPassthrough);
})();
diff --git a/jstests/libs/override_methods/validate_collections_on_shutdown.js b/jstests/libs/override_methods/validate_collections_on_shutdown.js
index 49036790739..130404dfe6f 100644
--- a/jstests/libs/override_methods/validate_collections_on_shutdown.js
+++ b/jstests/libs/override_methods/validate_collections_on_shutdown.js
@@ -4,116 +4,110 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/command_sequence_with_retries.js"); // for CommandSequenceWithRetries
+load("jstests/libs/command_sequence_with_retries.js"); // for CommandSequenceWithRetries
- MongoRunner.validateCollectionsCallback = function(port) {
- if (jsTest.options().skipCollectionAndIndexValidation) {
- print("Skipping collection validation during mongod shutdown");
- return;
- }
+MongoRunner.validateCollectionsCallback = function(port) {
+ if (jsTest.options().skipCollectionAndIndexValidation) {
+ print("Skipping collection validation during mongod shutdown");
+ return;
+ }
- let conn;
- try {
- conn = new Mongo("localhost:" + port);
- } catch (e) {
- print(
- "Skipping collection validation because we couldn't establish a connection to the" +
- " server on port " + port);
- return;
- }
+ let conn;
+ try {
+ conn = new Mongo("localhost:" + port);
+ } catch (e) {
+ print("Skipping collection validation because we couldn't establish a connection to the" +
+ " server on port " + port);
+ return;
+ }
- // Set slaveOk=true so that we can run commands against any secondaries.
- conn.setSlaveOk();
+ // Set slaveOk=true so that we can run commands against any secondaries.
+ conn.setSlaveOk();
- let dbNames;
- let result =
- new CommandSequenceWithRetries(conn)
- .then("running the isMaster command",
- function(conn) {
- const res = assert.commandWorked(conn.adminCommand({isMaster: 1}));
- if (res.msg === "isdbgrid") {
- return {
- shouldStop: true,
- reason: "not running validate against mongos"
- };
- } else if (!res.ismaster && !res.secondary) {
- return {
- shouldStop: true,
- reason: "not running validate since mongod isn't in the PRIMARY" +
- " or SECONDARY states"
- };
- }
- })
- .then("authenticating",
- function(conn) {
- if (jsTest.options().keyFile) {
- jsTest.authenticate(conn);
- }
- })
- .then(
- "best effort to step down node forever",
- function(conn) {
- if (conn.isReplicaSetMember()) {
- // This node should never run for election again. If the node has not
- // been initialized yet, then it cannot get elected.
- const kFreezeTimeSecs = 24 * 60 * 60; // 24 hours.
+ let dbNames;
+ let result =
+ new CommandSequenceWithRetries(conn)
+ .then("running the isMaster command",
+ function(conn) {
+ const res = assert.commandWorked(conn.adminCommand({isMaster: 1}));
+ if (res.msg === "isdbgrid") {
+ return {shouldStop: true, reason: "not running validate against mongos"};
+ } else if (!res.ismaster && !res.secondary) {
+ return {
+ shouldStop: true,
+ reason: "not running validate since mongod isn't in the PRIMARY" +
+ " or SECONDARY states"
+ };
+ }
+ })
+ .then("authenticating",
+ function(conn) {
+ if (jsTest.options().keyFile) {
+ jsTest.authenticate(conn);
+ }
+ })
+ .then("best effort to step down node forever",
+ function(conn) {
+ if (conn.isReplicaSetMember()) {
+ // This node should never run for election again. If the node has not
+ // been initialized yet, then it cannot get elected.
+ const kFreezeTimeSecs = 24 * 60 * 60; // 24 hours.
- assert.commandWorkedOrFailedWithCode(
- conn.adminCommand({replSetStepDown: kFreezeTimeSecs, force: true}),
- [
+ assert.commandWorkedOrFailedWithCode(
+ conn.adminCommand({replSetStepDown: kFreezeTimeSecs, force: true}), [
ErrorCodes.NotMaster,
ErrorCodes.NotYetInitialized,
ErrorCodes.Unauthorized
- ]);
+ ]);
- assert.commandWorkedOrFailedWithCode(
- conn.adminCommand({replSetFreeze: kFreezeTimeSecs}), [
- ErrorCodes.NotYetInitialized,
- ErrorCodes.Unauthorized,
- // We include "NotSecondary" because if replSetStepDown receives
- // "NotYetInitialized", then this command will fail with
- // "NotSecondary". This is why this is a "best-effort".
- ErrorCodes.NotSecondary
- ]);
- }
- })
- .then("getting the list of databases",
- function(conn) {
- const res = conn.adminCommand({listDatabases: 1});
- if (!res.ok) {
- // TODO: SERVER-31916 for the KeyNotFound error
- assert.commandFailedWithCode(
- res, [ErrorCodes.Unauthorized, ErrorCodes.KeyNotFound]);
- return {shouldStop: true, reason: "cannot run listDatabases"};
- }
- assert.commandWorked(res);
- dbNames = res.databases.map(dbInfo => dbInfo.name);
- })
- .execute();
+ assert.commandWorkedOrFailedWithCode(
+ conn.adminCommand({replSetFreeze: kFreezeTimeSecs}), [
+ ErrorCodes.NotYetInitialized,
+ ErrorCodes.Unauthorized,
+ // We include "NotSecondary" because if replSetStepDown receives
+ // "NotYetInitialized", then this command will fail with
+ // "NotSecondary". This is why this is a "best-effort".
+ ErrorCodes.NotSecondary
+ ]);
+ }
+ })
+ .then("getting the list of databases",
+ function(conn) {
+ const res = conn.adminCommand({listDatabases: 1});
+ if (!res.ok) {
+ // TODO: SERVER-31916 for the KeyNotFound error
+ assert.commandFailedWithCode(
+ res, [ErrorCodes.Unauthorized, ErrorCodes.KeyNotFound]);
+ return {shouldStop: true, reason: "cannot run listDatabases"};
+ }
+ assert.commandWorked(res);
+ dbNames = res.databases.map(dbInfo => dbInfo.name);
+ })
+ .execute();
- if (!result.ok) {
- print("Skipping collection validation: " + result.msg);
- return;
- }
+ if (!result.ok) {
+ print("Skipping collection validation: " + result.msg);
+ return;
+ }
- load('jstests/hooks/validate_collections.js'); // for validateCollections
+ load('jstests/hooks/validate_collections.js'); // for validateCollections
- const cmds = new CommandSequenceWithRetries(conn);
- for (let i = 0; i < dbNames.length; ++i) {
- const dbName = dbNames[i];
- cmds.then("validating " + dbName, function(conn) {
- const validate_res = validateCollections(conn.getDB(dbName), {full: true});
- if (!validate_res.ok) {
- return {
- shouldStop: true,
- reason: "collection validation failed " + tojson(validate_res)
- };
- }
- });
- }
+ const cmds = new CommandSequenceWithRetries(conn);
+ for (let i = 0; i < dbNames.length; ++i) {
+ const dbName = dbNames[i];
+ cmds.then("validating " + dbName, function(conn) {
+ const validate_res = validateCollections(conn.getDB(dbName), {full: true});
+ if (!validate_res.ok) {
+ return {
+ shouldStop: true,
+ reason: "collection validation failed " + tojson(validate_res)
+ };
+ }
+ });
+ }
- assert.commandWorked(cmds.execute());
- };
+ assert.commandWorked(cmds.execute());
+};
})();
diff --git a/jstests/libs/test_background_ops.js b/jstests/libs/test_background_ops.js
index dd2f75a9da5..1393871e1ab 100644
--- a/jstests/libs/test_background_ops.js
+++ b/jstests/libs/test_background_ops.js
@@ -6,7 +6,6 @@
* Allows synchronization between background ops and the test operations
*/
var waitForLock = function(mongo, name) {
-
var ts = new ObjectId();
var lockColl = mongo.getCollection("config.testLocks");
@@ -32,13 +31,13 @@ var waitForLock = function(mongo, name) {
return gleObj.n == 1 || gleObj.updatedExisting;
}, "could not acquire lock", 30 * 1000, 100);
- print("Acquired lock " + tojson({_id: name, ts: ts}) + " curr : " +
- tojson(lockColl.findOne({_id: name})));
+ print("Acquired lock " + tojson({_id: name, ts: ts}) +
+ " curr : " + tojson(lockColl.findOne({_id: name})));
// Set the state back to 0
var unlock = function() {
- print("Releasing lock " + tojson({_id: name, ts: ts}) + " curr : " +
- tojson(lockColl.findOne({_id: name})));
+ print("Releasing lock " + tojson({_id: name, ts: ts}) +
+ " curr : " + tojson(lockColl.findOne({_id: name})));
lockColl.update({_id: name, ts: ts}, {$set: {state: 0}});
};
@@ -101,7 +100,6 @@ function startParallelShell(jsCode, port) {
}
startParallelOps = function(mongo, proc, args, context) {
-
var procName = proc.name + "-" + new ObjectId();
var seed = new ObjectId(new ObjectId().valueOf().split("").reverse().join(""))
.getTimestamp()
@@ -121,7 +119,6 @@ startParallelOps = function(mongo, proc, args, context) {
setResult: setResult,
setup: function(context, stored) {
-
waitForLock = function() {
return context.waitForLock(db.getMongo(), context.procName);
};
@@ -138,7 +135,6 @@ startParallelOps = function(mongo, proc, args, context) {
};
var bootstrapper = function(stored) {
-
var procContext = stored.procContext;
eval("procContext = " + procContext);
procContext.setup(procContext, stored);
@@ -147,7 +143,7 @@ startParallelOps = function(mongo, proc, args, context) {
eval("contexts = " + contexts);
for (var i = 0; i < contexts.length; i++) {
- if (typeof(contexts[i]) != "undefined") {
+ if (typeof (contexts[i]) != "undefined") {
// Evaluate all contexts
contexts[i](procContext);
}
@@ -188,8 +184,11 @@ startParallelOps = function(mongo, proc, args, context) {
var bootstrapStartup = "{ var procName = '" + procName + "'; " +
"var stored = db.getMongo().getCollection( '" + testDataColl + "' )" +
- ".findOne({ _id : procName }); " + "var bootstrapper = stored.bootstrapper; " +
- "eval( 'bootstrapper = ' + bootstrapper ); " + "bootstrapper( stored ); " + "}";
+ ".findOne({ _id : procName }); " +
+ "var bootstrapper = stored.bootstrapper; " +
+ "eval( 'bootstrapper = ' + bootstrapper ); " +
+ "bootstrapper( stored ); " +
+ "}";
// Save the global db object if it exists, so that we can restore it after starting the parallel
// shell.
@@ -236,7 +235,6 @@ startParallelOps = function(mongo, proc, args, context) {
};
var RandomFunctionContext = function(context) {
-
Random.srand(context.seed);
Random.randBool = function() {
@@ -244,7 +242,6 @@ var RandomFunctionContext = function(context) {
};
Random.randInt = function(min, max) {
-
if (max == undefined) {
max = min;
min = 0;
@@ -254,7 +251,6 @@ var RandomFunctionContext = function(context) {
};
Random.randShardKey = function() {
-
var numFields = 2; // Random.randInt(1, 3)
var key = {};
@@ -267,7 +263,6 @@ var RandomFunctionContext = function(context) {
};
Random.randShardKeyValue = function(shardKey) {
-
var keyValue = {};
for (field in shardKey) {
keyValue[field] = Random.randInt(1, 100);
@@ -277,7 +272,6 @@ var RandomFunctionContext = function(context) {
};
Random.randCluster = function() {
-
var numShards = 2; // Random.randInt( 1, 10 )
var rs = false; // Random.randBool()
var st = new ShardingTest({shards: numShards, mongos: 4, other: {rs: rs}});
diff --git a/jstests/libs/transactions_util.js b/jstests/libs/transactions_util.js
index af9cccb44c0..3ddb09d5c28 100644
--- a/jstests/libs/transactions_util.js
+++ b/jstests/libs/transactions_util.js
@@ -76,7 +76,7 @@ var TransactionsUtil = (function() {
function deepCopyObject(dst, src) {
for (var k in src) {
var v = src[k];
- if (typeof(v) == "object" && v !== null) {
+ if (typeof (v) == "object" && v !== null) {
if (v.constructor === ObjectId) { // convert ObjectId properly
eval("v = " + tojson(v));
} else if (v instanceof NumberLong) { // convert NumberLong properly
diff --git a/jstests/libs/txns/txn_passthrough_runner.js b/jstests/libs/txns/txn_passthrough_runner.js
index 1e2640cd11b..43d1ecf6575 100644
--- a/jstests/libs/txns/txn_passthrough_runner.js
+++ b/jstests/libs/txns/txn_passthrough_runner.js
@@ -1,13 +1,13 @@
(function() {
- 'use strict';
+'use strict';
- const testFile = TestData.multiStmtTxnTestFile;
+const testFile = TestData.multiStmtTxnTestFile;
- try {
- load(testFile);
- } finally {
- // Run a lightweight command to allow the override file to commit the last command.
- // Ensure this command runs even if the test errors.
- assert.commandWorked(db.runCommand({ping: 1}));
- }
+try {
+ load(testFile);
+} finally {
+ // Run a lightweight command to allow the override file to commit the last command.
+ // Ensure this command runs even if the test errors.
+ assert.commandWorked(db.runCommand({ping: 1}));
+}
})();
diff --git a/jstests/libs/txns/txn_passthrough_runner_selftest.js b/jstests/libs/txns/txn_passthrough_runner_selftest.js
index 86308f45188..b7836315b59 100644
--- a/jstests/libs/txns/txn_passthrough_runner_selftest.js
+++ b/jstests/libs/txns/txn_passthrough_runner_selftest.js
@@ -2,32 +2,31 @@
// check that operation is not visible immediately, but is visible after the transaction commits.
(function() {
- 'use strict';
+'use strict';
- const testName = jsTest.name();
+const testName = jsTest.name();
- // Use a unique db for every test so burn_in_tests can run this test multiple times.
- db = db.getSiblingDB('txn_self_test' + Random.srand());
+// Use a unique db for every test so burn_in_tests can run this test multiple times.
+db = db.getSiblingDB('txn_self_test' + Random.srand());
- // Profile all commands.
- db.setProfilingLevel(2);
+// Profile all commands.
+db.setProfilingLevel(2);
- const coll = db[testName];
+const coll = db[testName];
- assert.commandWorked(coll.insert({x: 1}));
- let commands = db.system.profile.find().toArray();
- // Check that the insert is not visible because the txn has not committed.
- assert.eq(commands.length, 1);
- assert.eq(commands[0].command.create, testName);
+assert.commandWorked(coll.insert({x: 1}));
+let commands = db.system.profile.find().toArray();
+// Check that the insert is not visible because the txn has not committed.
+assert.eq(commands.length, 1);
+assert.eq(commands[0].command.create, testName);
- // Use a dummy, unrelated operation to signal the txn runner to commit the transaction.
- assert.commandWorked(db.runCommand({ping: 1}));
-
- commands = db.system.profile.find().toArray();
- // Assert the insert is now visible.
- assert.eq(commands.length, 3);
- assert.eq(commands[0].command.create, testName);
- assert.eq(commands[1].command.insert, testName);
- assert.eq(commands[2].command.find, 'system.profile');
+// Use a dummy, unrelated operation to signal the txn runner to commit the transaction.
+assert.commandWorked(db.runCommand({ping: 1}));
+commands = db.system.profile.find().toArray();
+// Assert the insert is now visible.
+assert.eq(commands.length, 3);
+assert.eq(commands[0].command.create, testName);
+assert.eq(commands[1].command.insert, testName);
+assert.eq(commands[2].command.find, 'system.profile');
})();
diff --git a/jstests/multiVersion/2_test_launching_cluster.js b/jstests/multiVersion/2_test_launching_cluster.js
index f26d3e78ac0..30da8d39a73 100644
--- a/jstests/multiVersion/2_test_launching_cluster.js
+++ b/jstests/multiVersion/2_test_launching_cluster.js
@@ -13,49 +13,49 @@
load('./jstests/multiVersion/libs/verify_versions.js');
(function() {
- "use strict";
- // Check our latest versions
- var versionsToCheck = ["last-stable", "latest"];
- var versionsToCheckConfig = ["latest"];
- var versionsToCheckMongos = ["last-stable"];
-
- jsTest.log("Testing mixed versions...");
-
- // Set up a multi-version cluster
- var st = new ShardingTest({
- shards: 2,
- mongos: 2,
- other: {
- mongosOptions: {binVersion: versionsToCheckMongos},
- configOptions: {binVersion: versionsToCheckConfig},
- shardOptions: {binVersion: versionsToCheck},
- enableBalancer: true,
- shardAsReplicaSet: false
- }
- });
-
- var shards = [st.shard0, st.shard1];
- var mongoses = [st.s0, st.s1];
- var configs = [st.config0, st.config1, st.config2];
-
- // Make sure we have hosts of all the different versions
- var versionsFound = [];
- for (var j = 0; j < shards.length; j++)
- versionsFound.push(shards[j].getBinVersion());
-
- assert.allBinVersions(versionsToCheck, versionsFound);
-
- versionsFound = [];
- for (var j = 0; j < mongoses.length; j++)
- versionsFound.push(mongoses[j].getBinVersion());
-
- assert.allBinVersions(versionsToCheckMongos, versionsFound);
-
- versionsFound = [];
- for (var j = 0; j < configs.length; j++)
- versionsFound.push(configs[j].getBinVersion());
-
- assert.allBinVersions(versionsToCheckConfig, versionsFound);
-
- st.stop();
+"use strict";
+// Check our latest versions
+var versionsToCheck = ["last-stable", "latest"];
+var versionsToCheckConfig = ["latest"];
+var versionsToCheckMongos = ["last-stable"];
+
+jsTest.log("Testing mixed versions...");
+
+// Set up a multi-version cluster
+var st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ other: {
+ mongosOptions: {binVersion: versionsToCheckMongos},
+ configOptions: {binVersion: versionsToCheckConfig},
+ shardOptions: {binVersion: versionsToCheck},
+ enableBalancer: true,
+ shardAsReplicaSet: false
+ }
+});
+
+var shards = [st.shard0, st.shard1];
+var mongoses = [st.s0, st.s1];
+var configs = [st.config0, st.config1, st.config2];
+
+// Make sure we have hosts of all the different versions
+var versionsFound = [];
+for (var j = 0; j < shards.length; j++)
+ versionsFound.push(shards[j].getBinVersion());
+
+assert.allBinVersions(versionsToCheck, versionsFound);
+
+versionsFound = [];
+for (var j = 0; j < mongoses.length; j++)
+ versionsFound.push(mongoses[j].getBinVersion());
+
+assert.allBinVersions(versionsToCheckMongos, versionsFound);
+
+versionsFound = [];
+for (var j = 0; j < configs.length; j++)
+ versionsFound.push(configs[j].getBinVersion());
+
+assert.allBinVersions(versionsToCheckConfig, versionsFound);
+
+st.stop();
})();
diff --git a/jstests/multiVersion/add_invalid_shard.js b/jstests/multiVersion/add_invalid_shard.js
index caaeb23b839..1f9dfc9c40f 100644
--- a/jstests/multiVersion/add_invalid_shard.js
+++ b/jstests/multiVersion/add_invalid_shard.js
@@ -3,49 +3,48 @@
*/
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- var configDB = st.s.getDB('config');
- var shardDoc = configDB.shards.findOne();
+var configDB = st.s.getDB('config');
+var shardDoc = configDB.shards.findOne();
- // Can't add mongos as shard.
- assert.commandFailedWithCode(st.admin.runCommand({addshard: st.s.host}),
- ErrorCodes.IllegalOperation);
+// Can't add mongos as shard.
+assert.commandFailedWithCode(st.admin.runCommand({addshard: st.s.host}),
+ ErrorCodes.IllegalOperation);
- // Can't add a mongod with a lower binary version than our featureCompatibilityVersion.
- var lastStableMongod = MongoRunner.runMongod({binVersion: "last-stable", shardsvr: ""});
- assert.commandFailedWithCode(st.admin.runCommand({addshard: lastStableMongod.host}),
- ErrorCodes.IncompatibleServerVersion);
- MongoRunner.stopMongod(lastStableMongod);
+// Can't add a mongod with a lower binary version than our featureCompatibilityVersion.
+var lastStableMongod = MongoRunner.runMongod({binVersion: "last-stable", shardsvr: ""});
+assert.commandFailedWithCode(st.admin.runCommand({addshard: lastStableMongod.host}),
+ ErrorCodes.IncompatibleServerVersion);
+MongoRunner.stopMongod(lastStableMongod);
- // Can't add config servers as shard.
- assert.commandFailed(st.admin.runCommand({addshard: st._configDB}));
+// Can't add config servers as shard.
+assert.commandFailed(st.admin.runCommand({addshard: st._configDB}));
- var replTest = new ReplSetTest({nodes: 2, nodeOptions: {shardsvr: ""}});
- replTest.startSet({oplogSize: 10});
- replTest.initiate();
+var replTest = new ReplSetTest({nodes: 2, nodeOptions: {shardsvr: ""}});
+replTest.startSet({oplogSize: 10});
+replTest.initiate();
- var rsConnStr = replTest.getURL();
- // Can't add replSet as shard if the name doesn't match the replSet config.
- assert.commandFailed(st.admin.runCommand({addshard: "prefix_" + rsConnStr}));
+var rsConnStr = replTest.getURL();
+// Can't add replSet as shard if the name doesn't match the replSet config.
+assert.commandFailed(st.admin.runCommand({addshard: "prefix_" + rsConnStr}));
- assert.commandWorked(st.admin.runCommand({addshard: rsConnStr, name: 'dummyRS'}));
+assert.commandWorked(st.admin.runCommand({addshard: rsConnStr, name: 'dummyRS'}));
- // Cannot add the same replSet shard host twice when using a unique shard name.
- assert.commandFailed(st.admin.runCommand({addshard: rsConnStr, name: 'dupRS'}));
+// Cannot add the same replSet shard host twice when using a unique shard name.
+assert.commandFailed(st.admin.runCommand({addshard: rsConnStr, name: 'dupRS'}));
- // Cannot add the same stand alone shard host twice with a unique shard name.
- assert.commandFailed(st.admin.runCommand({addshard: shardDoc.host, name: 'dupShard'}));
+// Cannot add the same stand alone shard host twice with a unique shard name.
+assert.commandFailed(st.admin.runCommand({addshard: shardDoc.host, name: 'dupShard'}));
- // Cannot add a replica set connection string containing a member that isn't actually part of
- // the replica set.
- var truncatedRSConnStr = rsConnStr.substring(0, rsConnStr.indexOf(','));
- assert.commandFailed(
- st.admin.runCommand({addshard: truncatedRSConnStr + 'fakehost', name: 'dummyRS'}));
-
- replTest.stopSet();
- st.stop();
+// Cannot add a replica set connection string containing a member that isn't actually part of
+// the replica set.
+var truncatedRSConnStr = rsConnStr.substring(0, rsConnStr.indexOf(','));
+assert.commandFailed(
+ st.admin.runCommand({addshard: truncatedRSConnStr + 'fakehost', name: 'dummyRS'}));
+replTest.stopSet();
+st.stop();
})();
diff --git a/jstests/multiVersion/change_streams_feature_compatibility_version.js b/jstests/multiVersion/change_streams_feature_compatibility_version.js
index 23c489893e8..37c8ac7621b 100644
--- a/jstests/multiVersion/change_streams_feature_compatibility_version.js
+++ b/jstests/multiVersion/change_streams_feature_compatibility_version.js
@@ -3,103 +3,101 @@
// stream after network errors.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
- load("jstests/multiVersion/libs/multi_rs.js"); // For upgradeSet.
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+load("jstests/multiVersion/libs/multi_rs.js"); // For upgradeSet.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- const rst = new ReplSetTest({
- nodes: 2,
- nodeOptions: {binVersion: "last-stable"},
- });
+const rst = new ReplSetTest({
+ nodes: 2,
+ nodeOptions: {binVersion: "last-stable"},
+});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
-
- rst.initiate();
-
- let testDB = rst.getPrimary().getDB(jsTestName());
- let coll = testDB.change_stream_upgrade;
-
- // Open a change stream against a 4.0 binary. We will use the resume token from this stream to
- // resume the stream once the set has been upgraded.
- let streamStartedOnOldVersion = coll.watch();
- assert.commandWorked(coll.insert({_id: "first insert, just for resume token"}));
-
- assert.soon(() => streamStartedOnOldVersion.hasNext());
- let change = streamStartedOnOldVersion.next();
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.documentKey._id, "first insert, just for resume token", tojson(change));
- const resumeTokenFromLastStable = change._id;
-
- assert.commandWorked(coll.insert({_id: "before binary upgrade"}));
- // Upgrade the set to the new binary version, but keep the feature compatibility version at 4.0.
- rst.upgradeSet({binVersion: "latest"});
- testDB = rst.getPrimary().getDB(jsTestName());
- coll = testDB.change_stream_upgrade;
-
- // Test that we can resume the stream on the new binaries.
- streamStartedOnOldVersion = coll.watch([], {resumeAfter: resumeTokenFromLastStable});
- assert.soon(() => streamStartedOnOldVersion.hasNext());
- change = streamStartedOnOldVersion.next();
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.documentKey._id, "before binary upgrade", tojson(change));
-
- let streamStartedOnNewVersionOldFCV = coll.watch();
-
- assert.commandWorked(coll.insert({_id: "after binary upgrade, before fcv switch"}));
-
- let resumeTokenFromNewVersionOldFCV;
- [streamStartedOnOldVersion, streamStartedOnNewVersionOldFCV].forEach(stream => {
- assert.soon(() => stream.hasNext());
- change = stream.next();
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(
- change.documentKey._id, "after binary upgrade, before fcv switch", tojson(change));
- if (resumeTokenFromNewVersionOldFCV === undefined) {
- resumeTokenFromNewVersionOldFCV = change._id;
- } else {
- assert.eq(resumeTokenFromNewVersionOldFCV, change._id);
- }
- });
-
- // Explicitly set feature compatibility version to 4.2.
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
-
- const streamStartedOnNewVersion = coll.watch();
-
- // Test that we can still resume with the token from the old version. We should see the same
- // document again.
- streamStartedOnOldVersion = coll.watch([], {resumeAfter: resumeTokenFromLastStable});
- assert.soon(() => streamStartedOnOldVersion.hasNext());
- change = streamStartedOnOldVersion.next();
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.documentKey._id, "before binary upgrade", tojson(change));
-
- assert.soon(() => streamStartedOnOldVersion.hasNext());
- change = streamStartedOnOldVersion.next();
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+
+rst.initiate();
+
+let testDB = rst.getPrimary().getDB(jsTestName());
+let coll = testDB.change_stream_upgrade;
+
+// Open a change stream against a 4.0 binary. We will use the resume token from this stream to
+// resume the stream once the set has been upgraded.
+let streamStartedOnOldVersion = coll.watch();
+assert.commandWorked(coll.insert({_id: "first insert, just for resume token"}));
+
+assert.soon(() => streamStartedOnOldVersion.hasNext());
+let change = streamStartedOnOldVersion.next();
+assert.eq(change.operationType, "insert", tojson(change));
+assert.eq(change.documentKey._id, "first insert, just for resume token", tojson(change));
+const resumeTokenFromLastStable = change._id;
+
+assert.commandWorked(coll.insert({_id: "before binary upgrade"}));
+// Upgrade the set to the new binary version, but keep the feature compatibility version at 4.0.
+rst.upgradeSet({binVersion: "latest"});
+testDB = rst.getPrimary().getDB(jsTestName());
+coll = testDB.change_stream_upgrade;
+
+// Test that we can resume the stream on the new binaries.
+streamStartedOnOldVersion = coll.watch([], {resumeAfter: resumeTokenFromLastStable});
+assert.soon(() => streamStartedOnOldVersion.hasNext());
+change = streamStartedOnOldVersion.next();
+assert.eq(change.operationType, "insert", tojson(change));
+assert.eq(change.documentKey._id, "before binary upgrade", tojson(change));
+
+let streamStartedOnNewVersionOldFCV = coll.watch();
+
+assert.commandWorked(coll.insert({_id: "after binary upgrade, before fcv switch"}));
+
+let resumeTokenFromNewVersionOldFCV;
+[streamStartedOnOldVersion, streamStartedOnNewVersionOldFCV].forEach(stream => {
+ assert.soon(() => stream.hasNext());
+ change = stream.next();
assert.eq(change.operationType, "insert", tojson(change));
assert.eq(change.documentKey._id, "after binary upgrade, before fcv switch", tojson(change));
-
- assert.commandWorked(coll.insert({_id: "after fcv upgrade"}));
- const resumedStreamOnNewVersion =
- coll.watch([], {resumeAfter: resumeTokenFromNewVersionOldFCV});
-
- // Test that all open streams continue to produce change events, and that the newly resumed
- // stream sees the write that just happened since it comes after the resume token used.
- for (let stream of[streamStartedOnOldVersion,
- streamStartedOnNewVersionOldFCV,
- streamStartedOnNewVersion,
- resumedStreamOnNewVersion]) {
- assert.soon(() => stream.hasNext());
- change = stream.next();
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.documentKey._id, "after fcv upgrade", tojson(change));
- stream.close();
+ if (resumeTokenFromNewVersionOldFCV === undefined) {
+ resumeTokenFromNewVersionOldFCV = change._id;
+ } else {
+ assert.eq(resumeTokenFromNewVersionOldFCV, change._id);
}
+});
+
+// Explicitly set feature compatibility version to 4.2.
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
+
+const streamStartedOnNewVersion = coll.watch();
+
+// Test that we can still resume with the token from the old version. We should see the same
+// document again.
+streamStartedOnOldVersion = coll.watch([], {resumeAfter: resumeTokenFromLastStable});
+assert.soon(() => streamStartedOnOldVersion.hasNext());
+change = streamStartedOnOldVersion.next();
+assert.eq(change.operationType, "insert", tojson(change));
+assert.eq(change.documentKey._id, "before binary upgrade", tojson(change));
+
+assert.soon(() => streamStartedOnOldVersion.hasNext());
+change = streamStartedOnOldVersion.next();
+assert.eq(change.operationType, "insert", tojson(change));
+assert.eq(change.documentKey._id, "after binary upgrade, before fcv switch", tojson(change));
+
+assert.commandWorked(coll.insert({_id: "after fcv upgrade"}));
+const resumedStreamOnNewVersion = coll.watch([], {resumeAfter: resumeTokenFromNewVersionOldFCV});
+
+// Test that all open streams continue to produce change events, and that the newly resumed
+// stream sees the write that just happened since it comes after the resume token used.
+for (let stream of [streamStartedOnOldVersion,
+ streamStartedOnNewVersionOldFCV,
+ streamStartedOnNewVersion,
+ resumedStreamOnNewVersion]) {
+ assert.soon(() => stream.hasNext());
+ change = stream.next();
+ assert.eq(change.operationType, "insert", tojson(change));
+ assert.eq(change.documentKey._id, "after fcv upgrade", tojson(change));
+ stream.close();
+}
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/multiVersion/change_streams_high_water_mark_cluster.js b/jstests/multiVersion/change_streams_high_water_mark_cluster.js
index 38c061facfc..24602d8a250 100644
--- a/jstests/multiVersion/change_streams_high_water_mark_cluster.js
+++ b/jstests/multiVersion/change_streams_high_water_mark_cluster.js
@@ -3,244 +3,241 @@
* and downgrade to both pre- and post-backport versions of 4.0 on a sharded cluster.
*/
(function() {
- "use strict";
-
- load("jstests/libs/collection_drop_recreate.js"); // assertCreateCollection
- load("jstests/libs/fixture_helpers.js"); // runCommandOnEachPrimary
- load("jstests/multiVersion/libs/causal_consistency_helpers.js"); // supportsMajorityReadConcern
- load("jstests/multiVersion/libs/change_stream_hwm_helpers.js"); // ChangeStreamHWMHelpers
- load("jstests/multiVersion/libs/index_format_downgrade.js"); // downgradeUniqueIndexes
- load("jstests/multiVersion/libs/multi_cluster.js"); // upgradeCluster
- load("jstests/multiVersion/libs/multi_rs.js"); // upgradeSet
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
+"use strict";
+
+load("jstests/libs/collection_drop_recreate.js"); // assertCreateCollection
+load("jstests/libs/fixture_helpers.js"); // runCommandOnEachPrimary
+load("jstests/multiVersion/libs/causal_consistency_helpers.js"); // supportsMajorityReadConcern
+load("jstests/multiVersion/libs/change_stream_hwm_helpers.js"); // ChangeStreamHWMHelpers
+load("jstests/multiVersion/libs/index_format_downgrade.js"); // downgradeUniqueIndexes
+load("jstests/multiVersion/libs/multi_cluster.js"); // upgradeCluster
+load("jstests/multiVersion/libs/multi_rs.js"); // upgradeSet
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const preBackport40Version = ChangeStreamHWMHelpers.preBackport40Version;
+const postBackport40Version = ChangeStreamHWMHelpers.postBackport40Version;
+const latest42Version = ChangeStreamHWMHelpers.latest42Version;
+
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ rs: {nodes: 3},
+ other: {
+ mongosOptions: {binVersion: preBackport40Version},
+ configOptions: {binVersion: preBackport40Version},
+ rsOptions: {
+ binVersion: preBackport40Version,
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
+ },
+ }
+});
+
+// Obtain references to the test database via mongoS and directly on shard0.
+let mongosDB = st.s.getDB(jsTestName());
+let primaryShard = st.rs0.getPrimary();
+let primaryShardDB = primaryShard.getDB(jsTestName());
+
+// Names of each of the collections used in the course of this test.
+const shardedCollName = "sharded_coll";
+const unshardedCollName = "unsharded_coll";
+
+// Updates the specified cluster components and then refreshes our references to each of them.
+function refreshCluster(version, components, singleShard) {
+ if (singleShard) {
+ singleShard.upgradeSet({binVersion: version});
+ } else {
+ st.upgradeCluster(version, components);
+ }
+
+ // Wait for the config server and shards to become available, and restart mongoS.
+ st.configRS.awaitReplication();
+ st.rs0.awaitReplication();
+ st.rs1.awaitReplication();
+ st.restartMongoses();
+
+ // Having upgraded the cluster, reacquire references to each component.
+ mongosDB = st.s.getDB(jsTestName());
+ primaryShard = st.rs0.getPrimary();
+ primaryShardDB = primaryShard.getDB(jsTestName());
+
+ // Re-apply the 'writePeriodicNoops' parameter to the up/downgraded shards.
+ const mongosAdminDB = mongosDB.getSiblingDB("admin");
+ FixtureHelpers.runCommandOnEachPrimary(
+ {db: mongosAdminDB, cmdObj: {setParameter: 1, writePeriodicNoops: true}});
+}
+
+// Enable sharding on the the test database and ensure that the primary is shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), primaryShard.name);
+
+// Create an unsharded collection on the primary shard via mongoS.
+assertCreateCollection(mongosDB, unshardedCollName);
+
+// Create a sharded collection on {_id: 1}, split across the shards at {_id: 0}.
+const collToShard = assertCreateCollection(mongosDB, shardedCollName);
+st.shardColl(collToShard, {_id: 1}, {_id: 0}, {_id: 1});
+
+// We perform these tests once for pre-backport 4.0, and once for post-backport 4.0.
+for (let oldVersion of [preBackport40Version, postBackport40Version]) {
+ // Maps used to associate collection names with collection objects and HWM tokens.
+ const collMap = {
+ [unshardedCollName]: () => st.s.getDB(jsTestName()).mongosUnshardedColl,
+ [shardedCollName]: () => st.s.getDB(jsTestName()).mongosShardedColl
+ };
+ const hwmTokenMap = {};
+
+ // Determine whether we are running a pre- or post-backport version of 4.0.
+ const isPostBackport = (oldVersion === postBackport40Version);
+
+ // We start with the cluster running on 'oldVersion'. We should only produce PBRTs if we are
+ // running a post-backport version of 4.0.
+ jsTestLog(`Testing binary ${oldVersion} mongoS and shards`);
+ refreshCluster(oldVersion);
+ for (let collName in collMap) {
+ hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens(
+ {coll: collMap[collName](), expectPBRT: isPostBackport});
+ assert.eq(hwmTokenMap[collName] != undefined, isPostBackport);
+ }
+
+ // Upgrade a single shard to 4.2 but leave the mongoS on 'oldVersion'. We should produce
+ // PBRTs only if running a post-backport version of 4.0. Regardless of the exact version of
+ // 4.0, the new shard should continue to produce resumable tokens and use the appropriate
+ // $sortKey format while the cluster is mid-upgrade.
+ jsTestLog("Upgrading shard1 to binary 4.2 FCV 4.0");
+ refreshCluster(latest42Version, null, st.rs1);
+ for (let collName in collMap) {
+ hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
+ coll: collMap[collName](),
+ expectPBRT: isPostBackport,
+ hwmToResume: hwmTokenMap[collName],
+ expectResume: isPostBackport
+ });
+ assert.eq(hwmTokenMap[collName] != undefined, isPostBackport);
+ }
+
+ // Upgrade the remaining shard to 4.2 but leave the mongoS on 'oldVersion'.
+ jsTestLog(`Upgrading to binary ${oldVersion} mongoS and binary 4.2 shards with FCV 4.0`);
+ refreshCluster(latest42Version,
+ {upgradeMongos: false, upgradeShards: true, upgradeConfigs: true});
+
+ // The shards have been upgraded to 4.2 but the mongoS is running 4.0. The mongoS should be
+ // able to merge the output from the shards, but the mongoS streams will only generate a
+ // PBRT if we are upgrading from a post-backport version of 4.0.
+ jsTestLog(`Testing binary ${oldVersion} mongoS and binary 4.2 shards with FCV 4.0`);
+ for (let collName in collMap) {
+ hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
+ coll: collMap[collName](),
+ expectPBRT: isPostBackport,
+ hwmToResume: hwmTokenMap[collName],
+ expectResume: isPostBackport
+ });
+ assert.eq(hwmTokenMap[collName] != undefined, isPostBackport);
+ }
+
+ // Upgrade the mongoS to 4.2 but leave the cluster in FCV 4.0
+ jsTestLog("Upgrading to binary 4.2 mongoS and shards with FCV 4.0");
+ refreshCluster(latest42Version,
+ {upgradeMongos: true, upgradeShards: false, upgradeConfigs: false});
+
+ // All streams should now return PBRTs, and we should obtain a valid HWM from the test.
+ jsTestLog("Testing binary 4.2 mongoS and shards with FCV 4.0");
+ for (let collName in collMap) {
+ hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
+ coll: collMap[collName](),
+ expectPBRT: true,
+ hwmToResume: hwmTokenMap[collName],
+ expectResume: true
+ });
+ assert.neq(hwmTokenMap[collName], undefined);
+ }
+
+ // Set the cluster's FCV to 4.2.
+ assert.commandWorked(mongosDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
+
+ // Streams should return PBRTs; we can resume from all HWMs tokens in the previous test.
+ jsTestLog("Testing binary 4.2 mongoS and shards with FCV 4.2");
+ for (let collName in collMap) {
+ hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
+ coll: collMap[collName](),
+ expectPBRT: true,
+ hwmToResume: hwmTokenMap[collName],
+ expectResume: true
+ });
+ assert.neq(hwmTokenMap[collName], undefined);
+ }
+
+ // Downgrade the cluster to FCV 4.0. We should continue to produce PBRTs and can resume from
+ // the tokens that we generated previously.
+ jsTestLog("Downgrading to FCV 4.0 shards");
+ assert.commandWorked(mongosDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+
+ jsTestLog("Testing binary 4.2 mongoS and shards with downgraded FCV 4.0");
+ for (let collName in collMap) {
+ hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
+ coll: collMap[collName](),
+ expectPBRT: true,
+ hwmToResume: hwmTokenMap[collName],
+ expectResume: true
+ });
+ assert.neq(hwmTokenMap[collName], undefined);
+ }
+
+ // Downgrade the mongoS to 'oldVersion'. We should be able to create new streams and resume
+ // from their tokens, but can only resume from the previously-generated v1 tokens if we are
+ // running a post-backport version of 4.0.
+ jsTestLog(`Downgrading to binary ${oldVersion} mongoS with FCV 4.0 shards`);
+ refreshCluster(oldVersion, {upgradeMongos: true, upgradeShards: false, upgradeConfigs: false});
+
+ // Should only receive PBRTs and be able to resume via mongoS if running post-backport 4.0.
+ jsTestLog(`Testing downgraded binary ${oldVersion} mongoS with binary 4.2 FCV 4.0 shards`);
+ for (let collName in collMap) {
+ hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
+ coll: collMap[collName](),
+ expectPBRT: isPostBackport,
+ hwmToResume: hwmTokenMap[collName],
+ expectResume: isPostBackport
+ });
+ assert.eq(hwmTokenMap[collName] != undefined, isPostBackport);
}
- const preBackport40Version = ChangeStreamHWMHelpers.preBackport40Version;
- const postBackport40Version = ChangeStreamHWMHelpers.postBackport40Version;
- const latest42Version = ChangeStreamHWMHelpers.latest42Version;
-
- const st = new ShardingTest({
- shards: 2,
- mongos: 1,
- rs: {nodes: 3},
- other: {
- mongosOptions: {binVersion: preBackport40Version},
- configOptions: {binVersion: preBackport40Version},
- rsOptions: {
- binVersion: preBackport40Version,
- setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
- },
- }
- });
-
- // Obtain references to the test database via mongoS and directly on shard0.
- let mongosDB = st.s.getDB(jsTestName());
- let primaryShard = st.rs0.getPrimary();
- let primaryShardDB = primaryShard.getDB(jsTestName());
-
- // Names of each of the collections used in the course of this test.
- const shardedCollName = "sharded_coll";
- const unshardedCollName = "unsharded_coll";
-
- // Updates the specified cluster components and then refreshes our references to each of them.
- function refreshCluster(version, components, singleShard) {
- if (singleShard) {
- singleShard.upgradeSet({binVersion: version});
- } else {
- st.upgradeCluster(version, components);
- }
-
- // Wait for the config server and shards to become available, and restart mongoS.
- st.configRS.awaitReplication();
- st.rs0.awaitReplication();
- st.rs1.awaitReplication();
- st.restartMongoses();
-
- // Having upgraded the cluster, reacquire references to each component.
- mongosDB = st.s.getDB(jsTestName());
- primaryShard = st.rs0.getPrimary();
- primaryShardDB = primaryShard.getDB(jsTestName());
-
- // Re-apply the 'writePeriodicNoops' parameter to the up/downgraded shards.
- const mongosAdminDB = mongosDB.getSiblingDB("admin");
- FixtureHelpers.runCommandOnEachPrimary(
- {db: mongosAdminDB, cmdObj: {setParameter: 1, writePeriodicNoops: true}});
+ // Downgrade a single shard to 'oldVersion', after rebuilding all unique indexes so that
+ // their format is compatible. We should continue to observe the same behaviour as we did in
+ // the previous test.
+ jsTestLog(`Downgrading shard1 to binary ${oldVersion}`);
+ downgradeUniqueIndexes(mongosDB);
+ refreshCluster(oldVersion, null, st.rs1);
+ jsTestLog(`Testing binary ${oldVersion} shard1 and mongoS with binary 4.2 FCV 4.0 shard0`);
+ for (let collName in collMap) {
+ hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
+ coll: collMap[collName](),
+ expectPBRT: isPostBackport,
+ hwmToResume: hwmTokenMap[collName],
+ expectResume: isPostBackport
+ });
+ assert.eq(hwmTokenMap[collName] != undefined, isPostBackport);
}
- // Enable sharding on the the test database and ensure that the primary is shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), primaryShard.name);
-
- // Create an unsharded collection on the primary shard via mongoS.
- assertCreateCollection(mongosDB, unshardedCollName);
-
- // Create a sharded collection on {_id: 1}, split across the shards at {_id: 0}.
- const collToShard = assertCreateCollection(mongosDB, shardedCollName);
- st.shardColl(collToShard, {_id: 1}, {_id: 0}, {_id: 1});
-
- // We perform these tests once for pre-backport 4.0, and once for post-backport 4.0.
- for (let oldVersion of[preBackport40Version, postBackport40Version]) {
- // Maps used to associate collection names with collection objects and HWM tokens.
- const collMap = {
- [unshardedCollName]: () => st.s.getDB(jsTestName()).mongosUnshardedColl,
- [shardedCollName]: () => st.s.getDB(jsTestName()).mongosShardedColl
- };
- const hwmTokenMap = {};
-
- // Determine whether we are running a pre- or post-backport version of 4.0.
- const isPostBackport = (oldVersion === postBackport40Version);
-
- // We start with the cluster running on 'oldVersion'. We should only produce PBRTs if we are
- // running a post-backport version of 4.0.
- jsTestLog(`Testing binary ${oldVersion} mongoS and shards`);
- refreshCluster(oldVersion);
- for (let collName in collMap) {
- hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens(
- {coll: collMap[collName](), expectPBRT: isPostBackport});
- assert.eq(hwmTokenMap[collName] != undefined, isPostBackport);
- }
-
- // Upgrade a single shard to 4.2 but leave the mongoS on 'oldVersion'. We should produce
- // PBRTs only if running a post-backport version of 4.0. Regardless of the exact version of
- // 4.0, the new shard should continue to produce resumable tokens and use the appropriate
- // $sortKey format while the cluster is mid-upgrade.
- jsTestLog("Upgrading shard1 to binary 4.2 FCV 4.0");
- refreshCluster(latest42Version, null, st.rs1);
- for (let collName in collMap) {
- hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
- coll: collMap[collName](),
- expectPBRT: isPostBackport,
- hwmToResume: hwmTokenMap[collName],
- expectResume: isPostBackport
- });
- assert.eq(hwmTokenMap[collName] != undefined, isPostBackport);
- }
-
- // Upgrade the remaining shard to 4.2 but leave the mongoS on 'oldVersion'.
- jsTestLog(`Upgrading to binary ${oldVersion} mongoS and binary 4.2 shards with FCV 4.0`);
- refreshCluster(latest42Version,
- {upgradeMongos: false, upgradeShards: true, upgradeConfigs: true});
-
- // The shards have been upgraded to 4.2 but the mongoS is running 4.0. The mongoS should be
- // able to merge the output from the shards, but the mongoS streams will only generate a
- // PBRT if we are upgrading from a post-backport version of 4.0.
- jsTestLog(`Testing binary ${oldVersion} mongoS and binary 4.2 shards with FCV 4.0`);
- for (let collName in collMap) {
- hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
- coll: collMap[collName](),
- expectPBRT: isPostBackport,
- hwmToResume: hwmTokenMap[collName],
- expectResume: isPostBackport
- });
- assert.eq(hwmTokenMap[collName] != undefined, isPostBackport);
- }
-
- // Upgrade the mongoS to 4.2 but leave the cluster in FCV 4.0
- jsTestLog("Upgrading to binary 4.2 mongoS and shards with FCV 4.0");
- refreshCluster(latest42Version,
- {upgradeMongos: true, upgradeShards: false, upgradeConfigs: false});
-
- // All streams should now return PBRTs, and we should obtain a valid HWM from the test.
- jsTestLog("Testing binary 4.2 mongoS and shards with FCV 4.0");
- for (let collName in collMap) {
- hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
- coll: collMap[collName](),
- expectPBRT: true,
- hwmToResume: hwmTokenMap[collName],
- expectResume: true
- });
- assert.neq(hwmTokenMap[collName], undefined);
- }
-
- // Set the cluster's FCV to 4.2.
- assert.commandWorked(mongosDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
-
- // Streams should return PBRTs; we can resume from all HWMs tokens in the previous test.
- jsTestLog("Testing binary 4.2 mongoS and shards with FCV 4.2");
- for (let collName in collMap) {
- hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
- coll: collMap[collName](),
- expectPBRT: true,
- hwmToResume: hwmTokenMap[collName],
- expectResume: true
- });
- assert.neq(hwmTokenMap[collName], undefined);
- }
-
- // Downgrade the cluster to FCV 4.0. We should continue to produce PBRTs and can resume from
- // the tokens that we generated previously.
- jsTestLog("Downgrading to FCV 4.0 shards");
- assert.commandWorked(mongosDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
-
- jsTestLog("Testing binary 4.2 mongoS and shards with downgraded FCV 4.0");
- for (let collName in collMap) {
- hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
- coll: collMap[collName](),
- expectPBRT: true,
- hwmToResume: hwmTokenMap[collName],
- expectResume: true
- });
- assert.neq(hwmTokenMap[collName], undefined);
- }
-
- // Downgrade the mongoS to 'oldVersion'. We should be able to create new streams and resume
- // from their tokens, but can only resume from the previously-generated v1 tokens if we are
- // running a post-backport version of 4.0.
- jsTestLog(`Downgrading to binary ${oldVersion} mongoS with FCV 4.0 shards`);
- refreshCluster(oldVersion,
- {upgradeMongos: true, upgradeShards: false, upgradeConfigs: false});
-
- // Should only receive PBRTs and be able to resume via mongoS if running post-backport 4.0.
- jsTestLog(`Testing downgraded binary ${oldVersion} mongoS with binary 4.2 FCV 4.0 shards`);
- for (let collName in collMap) {
- hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
- coll: collMap[collName](),
- expectPBRT: isPostBackport,
- hwmToResume: hwmTokenMap[collName],
- expectResume: isPostBackport
- });
- assert.eq(hwmTokenMap[collName] != undefined, isPostBackport);
- }
-
- // Downgrade a single shard to 'oldVersion', after rebuilding all unique indexes so that
- // their format is compatible. We should continue to observe the same behaviour as we did in
- // the previous test.
- jsTestLog(`Downgrading shard1 to binary ${oldVersion}`);
- downgradeUniqueIndexes(mongosDB);
- refreshCluster(oldVersion, null, st.rs1);
- jsTestLog(`Testing binary ${oldVersion} shard1 and mongoS with binary 4.2 FCV 4.0 shard0`);
- for (let collName in collMap) {
- hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
- coll: collMap[collName](),
- expectPBRT: isPostBackport,
- hwmToResume: hwmTokenMap[collName],
- expectResume: isPostBackport
- });
- assert.eq(hwmTokenMap[collName] != undefined, isPostBackport);
- }
-
- // Downgrade the remainder of the cluster to 'oldVersion'.
- jsTestLog(`Downgrading to binary ${oldVersion} shards`);
- refreshCluster(oldVersion,
- {upgradeShards: true, upgradeConfigs: false, upgradeMongos: false});
- refreshCluster(oldVersion,
- {upgradeConfigs: true, upgradeShards: false, upgradeMongos: false});
-
- // We should only receive PBRTs and be able to resume if running a post-backport 4.0.
- jsTestLog(`Testing downgraded binary ${oldVersion} mongoS and shards`);
- for (let collName in collMap) {
- hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
- coll: collMap[collName](),
- expectPBRT: isPostBackport,
- hwmToResume: hwmTokenMap[collName],
- expectResume: isPostBackport
- });
- assert.eq(hwmTokenMap[collName] != undefined, isPostBackport);
- }
+ // Downgrade the remainder of the cluster to 'oldVersion'.
+ jsTestLog(`Downgrading to binary ${oldVersion} shards`);
+ refreshCluster(oldVersion, {upgradeShards: true, upgradeConfigs: false, upgradeMongos: false});
+ refreshCluster(oldVersion, {upgradeConfigs: true, upgradeShards: false, upgradeMongos: false});
+
+ // We should only receive PBRTs and be able to resume if running a post-backport 4.0.
+ jsTestLog(`Testing downgraded binary ${oldVersion} mongoS and shards`);
+ for (let collName in collMap) {
+ hwmTokenMap[collName] = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
+ coll: collMap[collName](),
+ expectPBRT: isPostBackport,
+ hwmToResume: hwmTokenMap[collName],
+ expectResume: isPostBackport
+ });
+ assert.eq(hwmTokenMap[collName] != undefined, isPostBackport);
}
+}
- st.stop();
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/multiVersion/change_streams_high_water_mark_replset.js b/jstests/multiVersion/change_streams_high_water_mark_replset.js
index a87a9193135..16ffffc8ee4 100644
--- a/jstests/multiVersion/change_streams_high_water_mark_replset.js
+++ b/jstests/multiVersion/change_streams_high_water_mark_replset.js
@@ -3,106 +3,106 @@
* and downgrade to a pre-backport version of 4.0 on a single replica set.
*/
(function() {
- "use strict";
-
- load("jstests/libs/collection_drop_recreate.js"); // For assertCreateCollection.
- load("jstests/multiVersion/libs/change_stream_hwm_helpers.js"); // For ChangeStreamHWMHelpers.
- load("jstests/multiVersion/libs/index_format_downgrade.js"); // For downgradeUniqueIndexes.
- load("jstests/multiVersion/libs/multi_rs.js"); // For upgradeSet.
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
-
- const preBackport40Version = ChangeStreamHWMHelpers.preBackport40Version;
- const postBackport40Version = ChangeStreamHWMHelpers.postBackport40Version;
- const latest42Version = ChangeStreamHWMHelpers.latest42Version;
-
- const rst = new ReplSetTest({
- nodes: 3,
- nodeOptions: {binVersion: preBackport40Version},
+"use strict";
+
+load("jstests/libs/collection_drop_recreate.js"); // For assertCreateCollection.
+load("jstests/multiVersion/libs/change_stream_hwm_helpers.js"); // For ChangeStreamHWMHelpers.
+load("jstests/multiVersion/libs/index_format_downgrade.js"); // For downgradeUniqueIndexes.
+load("jstests/multiVersion/libs/multi_rs.js"); // For upgradeSet.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+
+const preBackport40Version = ChangeStreamHWMHelpers.preBackport40Version;
+const postBackport40Version = ChangeStreamHWMHelpers.postBackport40Version;
+const latest42Version = ChangeStreamHWMHelpers.latest42Version;
+
+const rst = new ReplSetTest({
+ nodes: 3,
+ nodeOptions: {binVersion: preBackport40Version},
+});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+rst.initiate();
+
+// Obtain references to the test database and create the test collection.
+let testDB = rst.getPrimary().getDB(jsTestName());
+let testColl = testDB.test;
+
+// Up- or downgrades the replset and then refreshes our references to the test collection.
+function refreshReplSet(version) {
+ // Upgrade the set and wait for it to become available again.
+ rst.upgradeSet({binVersion: version});
+ rst.awaitReplication();
+
+ // Having upgraded the cluster, reacquire references to the db and collection.
+ testDB = rst.getPrimary().getDB(jsTestName());
+ testColl = testDB.test;
+}
+
+// We perform these tests once for pre-backport 4.0, and once for post-backport 4.0.
+for (let oldVersion of [preBackport40Version, postBackport40Version]) {
+ // Stores a high water mark generated by the most recent test and used in subsequent tests.
+ let hwmToken = null;
+
+ // Determine whether we are running a pre- or post-backport version of 4.0.
+ const isPostBackport = (oldVersion === postBackport40Version);
+
+ // We start with the replset running on 'oldVersion'. Streams should only produce PBRTs if
+ // we are on a post-backport version of 4.0.
+ jsTestLog(`Testing binary ${oldVersion}`);
+ refreshReplSet(oldVersion);
+ hwmToken = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens(
+ {coll: testColl, expectPBRT: isPostBackport});
+ assert.eq(hwmToken != undefined, isPostBackport);
+
+ // Upgrade the replset to 4.2 but leave it in FCV 4.0
+ jsTestLog("Upgrading to binary 4.2 with FCV 4.0");
+ refreshReplSet(latest42Version);
+
+ // All streams should now return PBRTs, including high water marks.
+ jsTestLog("Testing binary 4.2 with FCV 4.0");
+ hwmToken = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens(
+ {coll: testColl, expectPBRT: true, hwmToResume: hwmToken, expectResume: true});
+ assert.neq(hwmToken, undefined);
+
+ // Set the replset's FCV to 4.2.
+ assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
+
+ // All streams should return PBRTs. We can resume with the HWM token from the previous test.
+ jsTestLog("Testing binary 4.2 with FCV 4.2");
+ hwmToken = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens(
+ {coll: testColl, expectPBRT: true, hwmToResume: hwmToken, expectResume: true});
+ assert.neq(hwmToken, undefined);
+
+ // Downgrade the cluster to FCV 4.0.
+ jsTestLog("Downgrading to FCV 4.0");
+ assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+
+ // All streams should return PBRTs and we can still resume from the last HWM token.
+ jsTestLog("Testing binary 4.2 with downgraded FCV 4.0");
+ hwmToken = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens(
+ {coll: testColl, expectPBRT: true, hwmToResume: hwmToken, expectResume: true});
+ assert.neq(hwmToken, undefined);
+
+ // Downgrade the cluster to 'oldVersion' after rebuilding all unique indexes so that their
+ // format is compatible with binary 4.0.
+ jsTestLog(`Downgrading to binary ${oldVersion}`);
+ downgradeUniqueIndexes(testDB);
+ refreshReplSet(oldVersion);
+
+ // We should receive PBRTs and be able to resume from the earlier HWM tokens only if we have
+ // downgraded to a post-backport version of 4.0.
+ jsTestLog(`Testing downgraded binary ${oldVersion}`);
+ hwmToken = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
+ coll: testColl,
+ expectPBRT: isPostBackport,
+ hwmToResume: hwmToken,
+ expectResume: isPostBackport
});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
- rst.initiate();
-
- // Obtain references to the test database and create the test collection.
- let testDB = rst.getPrimary().getDB(jsTestName());
- let testColl = testDB.test;
-
- // Up- or downgrades the replset and then refreshes our references to the test collection.
- function refreshReplSet(version) {
- // Upgrade the set and wait for it to become available again.
- rst.upgradeSet({binVersion: version});
- rst.awaitReplication();
-
- // Having upgraded the cluster, reacquire references to the db and collection.
- testDB = rst.getPrimary().getDB(jsTestName());
- testColl = testDB.test;
- }
-
- // We perform these tests once for pre-backport 4.0, and once for post-backport 4.0.
- for (let oldVersion of[preBackport40Version, postBackport40Version]) {
- // Stores a high water mark generated by the most recent test and used in subsequent tests.
- let hwmToken = null;
-
- // Determine whether we are running a pre- or post-backport version of 4.0.
- const isPostBackport = (oldVersion === postBackport40Version);
-
- // We start with the replset running on 'oldVersion'. Streams should only produce PBRTs if
- // we are on a post-backport version of 4.0.
- jsTestLog(`Testing binary ${oldVersion}`);
- refreshReplSet(oldVersion);
- hwmToken = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens(
- {coll: testColl, expectPBRT: isPostBackport});
- assert.eq(hwmToken != undefined, isPostBackport);
-
- // Upgrade the replset to 4.2 but leave it in FCV 4.0
- jsTestLog("Upgrading to binary 4.2 with FCV 4.0");
- refreshReplSet(latest42Version);
-
- // All streams should now return PBRTs, including high water marks.
- jsTestLog("Testing binary 4.2 with FCV 4.0");
- hwmToken = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens(
- {coll: testColl, expectPBRT: true, hwmToResume: hwmToken, expectResume: true});
- assert.neq(hwmToken, undefined);
-
- // Set the replset's FCV to 4.2.
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
-
- // All streams should return PBRTs. We can resume with the HWM token from the previous test.
- jsTestLog("Testing binary 4.2 with FCV 4.2");
- hwmToken = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens(
- {coll: testColl, expectPBRT: true, hwmToResume: hwmToken, expectResume: true});
- assert.neq(hwmToken, undefined);
-
- // Downgrade the cluster to FCV 4.0.
- jsTestLog("Downgrading to FCV 4.0");
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
-
- // All streams should return PBRTs and we can still resume from the last HWM token.
- jsTestLog("Testing binary 4.2 with downgraded FCV 4.0");
- hwmToken = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens(
- {coll: testColl, expectPBRT: true, hwmToResume: hwmToken, expectResume: true});
- assert.neq(hwmToken, undefined);
-
- // Downgrade the cluster to 'oldVersion' after rebuilding all unique indexes so that their
- // format is compatible with binary 4.0.
- jsTestLog(`Downgrading to binary ${oldVersion}`);
- downgradeUniqueIndexes(testDB);
- refreshReplSet(oldVersion);
-
- // We should receive PBRTs and be able to resume from the earlier HWM tokens only if we have
- // downgraded to a post-backport version of 4.0.
- jsTestLog(`Testing downgraded binary ${oldVersion}`);
- hwmToken = ChangeStreamHWMHelpers.testPostBatchAndHighWaterMarkTokens({
- coll: testColl,
- expectPBRT: isPostBackport,
- hwmToResume: hwmToken,
- expectResume: isPostBackport
- });
- assert.eq(hwmToken != undefined, isPostBackport);
- }
+ assert.eq(hwmToken != undefined, isPostBackport);
+}
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/multiVersion/change_streams_resume_token_version.js b/jstests/multiVersion/change_streams_resume_token_version.js
index 25575683248..c4abf72f8cf 100644
--- a/jstests/multiVersion/change_streams_resume_token_version.js
+++ b/jstests/multiVersion/change_streams_resume_token_version.js
@@ -2,136 +2,135 @@
// the old version of the resume token doesn't contain enough information to distinguish an
// invalidate event from the event which generated the invalidate.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
- load("jstests/multiVersion/libs/change_stream_hwm_helpers.js"); // For ChangeStreamHWMHelpers.
- load("jstests/multiVersion/libs/multi_rs.js"); // For upgradeSet.
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
+load("jstests/multiVersion/libs/change_stream_hwm_helpers.js"); // For ChangeStreamHWMHelpers.
+load("jstests/multiVersion/libs/multi_rs.js"); // For upgradeSet.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- const preBackport40Version = ChangeStreamHWMHelpers.preBackport40Version;
- const latest42Version = ChangeStreamHWMHelpers.latest42Version;
+const preBackport40Version = ChangeStreamHWMHelpers.preBackport40Version;
+const latest42Version = ChangeStreamHWMHelpers.latest42Version;
- const rst = new ReplSetTest({nodes: 2, nodeOptions: {binVersion: preBackport40Version}});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
- rst.initiate();
-
- let testDB = rst.getPrimary().getDB(jsTestName());
- let coll = testDB.change_stream_upgrade;
-
- // Up- or downgrades the replset and then refreshes our references to the test collection.
- function refreshReplSet(version) {
- // Upgrade the set and wait for it to become available again.
- rst.upgradeSet({binVersion: version});
- rst.awaitReplication();
-
- // Having upgraded the cluster, reacquire references to the db and collection.
- testDB = rst.getPrimary().getDB(jsTestName());
- coll = testDB.change_stream_upgrade;
- }
-
- // Creates a collection, drops it, and returns the resulting 'drop' and 'invalidate' tokens.
- function generateDropAndInvalidateTokens() {
- assertDropAndRecreateCollection(testDB, coll.getName());
- const streamStartedOnOldFCV = coll.watch();
- coll.drop();
-
- assert.soon(() => streamStartedOnOldFCV.hasNext());
- let change = streamStartedOnOldFCV.next();
- assert.eq(change.operationType, "drop", tojson(change));
- const resumeTokenFromDrop = change._id;
-
- assert.soon(() => streamStartedOnOldFCV.hasNext());
- change = streamStartedOnOldFCV.next();
- assert.eq(change.operationType, "invalidate", tojson(change));
- const resumeTokenFromInvalidate = change._id;
-
- return [resumeTokenFromDrop, resumeTokenFromInvalidate];
- }
-
- function testInvalidateV0(resumeTokenFromDrop, resumeTokenFromInvalidate) {
- // These two resume tokens should be the same. Because they cannot be distinguished, any
- // attempt to resume or start a new stream should immediately return invalidate.
- assert.eq(resumeTokenFromDrop, resumeTokenFromInvalidate);
- for (let token of[resumeTokenFromDrop, resumeTokenFromInvalidate]) {
- let newStream = coll.watch([], {startAfter: token, collation: {locale: "simple"}});
- assert.soon(() => newStream.hasNext());
- assert.eq(newStream.next().operationType, "invalidate");
-
- // Test the same thing but with 'resumeAfter' instead of 'startAfter'.
- newStream = coll.watch([], {resumeAfter: token, collation: {locale: "simple"}});
- assert.soon(() => newStream.hasNext());
- assert.eq(newStream.next().operationType, "invalidate");
- }
- }
-
- function testInvalidateV1(resumeTokenFromDrop, resumeTokenFromInvalidate) {
- // This stream should be using the new version of resume tokens which *can* distinguish a
- // drop from the invalidate that follows it. Recreate the collection with the same name and
- // insert a document.
- assert.commandWorked(testDB.runCommand({create: coll.getName()}));
- assert.commandWorked(coll.insert({_id: "insert after drop"}));
-
- assert.neq(resumeTokenFromDrop,
- resumeTokenFromInvalidate,
- () => tojson(resumeTokenFromDrop) + " should not equal " +
- tojson(resumeTokenFromInvalidate));
- let newStream =
- coll.watch([], {startAfter: resumeTokenFromDrop, collation: {locale: "simple"}});
+const rst = new ReplSetTest({nodes: 2, nodeOptions: {binVersion: preBackport40Version}});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+rst.initiate();
+
+let testDB = rst.getPrimary().getDB(jsTestName());
+let coll = testDB.change_stream_upgrade;
+
+// Up- or downgrades the replset and then refreshes our references to the test collection.
+function refreshReplSet(version) {
+ // Upgrade the set and wait for it to become available again.
+ rst.upgradeSet({binVersion: version});
+ rst.awaitReplication();
+
+ // Having upgraded the cluster, reacquire references to the db and collection.
+ testDB = rst.getPrimary().getDB(jsTestName());
+ coll = testDB.change_stream_upgrade;
+}
+
+// Creates a collection, drops it, and returns the resulting 'drop' and 'invalidate' tokens.
+function generateDropAndInvalidateTokens() {
+ assertDropAndRecreateCollection(testDB, coll.getName());
+ const streamStartedOnOldFCV = coll.watch();
+ coll.drop();
+
+ assert.soon(() => streamStartedOnOldFCV.hasNext());
+ let change = streamStartedOnOldFCV.next();
+ assert.eq(change.operationType, "drop", tojson(change));
+ const resumeTokenFromDrop = change._id;
+
+ assert.soon(() => streamStartedOnOldFCV.hasNext());
+ change = streamStartedOnOldFCV.next();
+ assert.eq(change.operationType, "invalidate", tojson(change));
+ const resumeTokenFromInvalidate = change._id;
+
+ return [resumeTokenFromDrop, resumeTokenFromInvalidate];
+}
+
+function testInvalidateV0(resumeTokenFromDrop, resumeTokenFromInvalidate) {
+ // These two resume tokens should be the same. Because they cannot be distinguished, any
+ // attempt to resume or start a new stream should immediately return invalidate.
+ assert.eq(resumeTokenFromDrop, resumeTokenFromInvalidate);
+ for (let token of [resumeTokenFromDrop, resumeTokenFromInvalidate]) {
+ let newStream = coll.watch([], {startAfter: token, collation: {locale: "simple"}});
assert.soon(() => newStream.hasNext());
assert.eq(newStream.next().operationType, "invalidate");
- newStream =
- coll.watch([], {startAfter: resumeTokenFromInvalidate, collation: {locale: "simple"}});
- assert.soon(() => newStream.hasNext());
- const change = newStream.next();
- assert.eq(change.operationType, "insert");
- assert.eq(change.documentKey._id, "insert after drop");
-
- // Test the same thing but with 'resumeAfter' instead of 'startAfter'. This should see an
- // invalidate on the first, and reject the second.
- newStream =
- coll.watch([], {resumeAfter: resumeTokenFromDrop, collation: {locale: "simple"}});
+ // Test the same thing but with 'resumeAfter' instead of 'startAfter'.
+ newStream = coll.watch([], {resumeAfter: token, collation: {locale: "simple"}});
assert.soon(() => newStream.hasNext());
assert.eq(newStream.next().operationType, "invalidate");
- const error = assert.throws(
- () => coll.watch(
- [], {resumeAfter: resumeTokenFromInvalidate, collation: {locale: "simple"}}));
- assert.eq(error.code, ErrorCodes.InvalidResumeToken);
}
-
- // We will test 'drop' and 'invalidate' tokens for resume token formats v0 and v1.
- let resumeTokenFromDropV0, resumeTokenFromInvalidateV0;
- let resumeTokenFromDropV1, resumeTokenFromInvalidateV1;
-
- // We start on 'preBackport40Version'. Generate v0 'drop' and 'invalidate' resume tokens.
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
- [resumeTokenFromDropV0, resumeTokenFromInvalidateV0] = generateDropAndInvalidateTokens();
-
- // Now upgrade the set to 'latest42Version' with FCV 4.0.
- refreshReplSet(latest42Version);
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
-
- // Confirm that the v0 tokens behave as expected for binary 4.2 FCV 4.0.
- testInvalidateV0(resumeTokenFromDropV0, resumeTokenFromInvalidateV0);
-
- // Confirm that new tokens generated by binary 4.2 in FCV 4.0 are v1 rather than v0.
- [resumeTokenFromDropV1, resumeTokenFromInvalidateV1] = generateDropAndInvalidateTokens();
- testInvalidateV1(resumeTokenFromDropV1, resumeTokenFromInvalidateV1);
-
- // Now upgrade the set to 'latest42Version' with FCV 4.2.
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
-
- // Confirm that the v0 tokens behave as expected for binary 4.2 FCV 4.2.
- testInvalidateV0(resumeTokenFromDropV0, resumeTokenFromInvalidateV0);
-
- // Confirm that new tokens generated by binary 4.2 in FCV 4.2 are v1 rather than v0.
- [resumeTokenFromDropV1, resumeTokenFromInvalidateV1] = generateDropAndInvalidateTokens();
- testInvalidateV1(resumeTokenFromDropV1, resumeTokenFromInvalidateV1);
-
- rst.stopSet();
+}
+
+function testInvalidateV1(resumeTokenFromDrop, resumeTokenFromInvalidate) {
+ // This stream should be using the new version of resume tokens which *can* distinguish a
+ // drop from the invalidate that follows it. Recreate the collection with the same name and
+ // insert a document.
+ assert.commandWorked(testDB.runCommand({create: coll.getName()}));
+ assert.commandWorked(coll.insert({_id: "insert after drop"}));
+
+ assert.neq(resumeTokenFromDrop,
+ resumeTokenFromInvalidate,
+ () => tojson(resumeTokenFromDrop) + " should not equal " +
+ tojson(resumeTokenFromInvalidate));
+ let newStream =
+ coll.watch([], {startAfter: resumeTokenFromDrop, collation: {locale: "simple"}});
+ assert.soon(() => newStream.hasNext());
+ assert.eq(newStream.next().operationType, "invalidate");
+
+ newStream =
+ coll.watch([], {startAfter: resumeTokenFromInvalidate, collation: {locale: "simple"}});
+ assert.soon(() => newStream.hasNext());
+ const change = newStream.next();
+ assert.eq(change.operationType, "insert");
+ assert.eq(change.documentKey._id, "insert after drop");
+
+ // Test the same thing but with 'resumeAfter' instead of 'startAfter'. This should see an
+ // invalidate on the first, and reject the second.
+ newStream = coll.watch([], {resumeAfter: resumeTokenFromDrop, collation: {locale: "simple"}});
+ assert.soon(() => newStream.hasNext());
+ assert.eq(newStream.next().operationType, "invalidate");
+ const error = assert.throws(
+ () => coll.watch([],
+ {resumeAfter: resumeTokenFromInvalidate, collation: {locale: "simple"}}));
+ assert.eq(error.code, ErrorCodes.InvalidResumeToken);
+}
+
+// We will test 'drop' and 'invalidate' tokens for resume token formats v0 and v1.
+let resumeTokenFromDropV0, resumeTokenFromInvalidateV0;
+let resumeTokenFromDropV1, resumeTokenFromInvalidateV1;
+
+// We start on 'preBackport40Version'. Generate v0 'drop' and 'invalidate' resume tokens.
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+[resumeTokenFromDropV0, resumeTokenFromInvalidateV0] = generateDropAndInvalidateTokens();
+
+// Now upgrade the set to 'latest42Version' with FCV 4.0.
+refreshReplSet(latest42Version);
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+
+// Confirm that the v0 tokens behave as expected for binary 4.2 FCV 4.0.
+testInvalidateV0(resumeTokenFromDropV0, resumeTokenFromInvalidateV0);
+
+// Confirm that new tokens generated by binary 4.2 in FCV 4.0 are v1 rather than v0.
+[resumeTokenFromDropV1, resumeTokenFromInvalidateV1] = generateDropAndInvalidateTokens();
+testInvalidateV1(resumeTokenFromDropV1, resumeTokenFromInvalidateV1);
+
+// Now upgrade the set to 'latest42Version' with FCV 4.2.
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
+
+// Confirm that the v0 tokens behave as expected for binary 4.2 FCV 4.2.
+testInvalidateV0(resumeTokenFromDropV0, resumeTokenFromInvalidateV0);
+
+// Confirm that new tokens generated by binary 4.2 in FCV 4.2 are v1 rather than v0.
+[resumeTokenFromDropV1, resumeTokenFromInvalidateV1] = generateDropAndInvalidateTokens();
+testInvalidateV1(resumeTokenFromDropV1, resumeTokenFromInvalidateV1);
+
+rst.stopSet();
}());
diff --git a/jstests/multiVersion/clone_helper.js b/jstests/multiVersion/clone_helper.js
index 0c230189e34..9253c0ffc31 100644
--- a/jstests/multiVersion/clone_helper.js
+++ b/jstests/multiVersion/clone_helper.js
@@ -1,67 +1,66 @@
// SERVER-36438 Ensure the 4.2 cloneDatabase() shell helper still successfully executes the clone
// command on a 4.0 server, now that the clone command has been removed as of 4.2.
(function() {
- "use strict";
- const oldVersion = "4.0";
+"use strict";
+const oldVersion = "4.0";
- let numDocs = 2000;
+let numDocs = 2000;
- // 1kb string
- let str = new Array(1000).toString();
+// 1kb string
+let str = new Array(1000).toString();
- let replsetDBName = "cloneDBreplset";
- let standaloneDBName = "cloneDBstandalone";
- let testColName = "foo";
- let testViewName = "view";
+let replsetDBName = "cloneDBreplset";
+let standaloneDBName = "cloneDBstandalone";
+let testColName = "foo";
+let testViewName = "view";
- jsTest.log("Create replica set");
- let replTest =
- new ReplSetTest({name: "testSet", nodes: 3, nodeOptions: {binVersion: oldVersion}});
- replTest.startSet();
- replTest.initiate();
- let master = replTest.getPrimary();
- let masterDB = master.getDB(replsetDBName);
- masterDB.dropDatabase();
+jsTest.log("Create replica set");
+let replTest = new ReplSetTest({name: "testSet", nodes: 3, nodeOptions: {binVersion: oldVersion}});
+replTest.startSet();
+replTest.initiate();
+let master = replTest.getPrimary();
+let masterDB = master.getDB(replsetDBName);
+masterDB.dropDatabase();
- jsTest.log("Create standalone server");
- let standalone = MongoRunner.runMongod({binVersion: oldVersion});
- let standaloneDB = standalone.getDB(replsetDBName);
- standaloneDB.dropDatabase();
+jsTest.log("Create standalone server");
+let standalone = MongoRunner.runMongod({binVersion: oldVersion});
+let standaloneDB = standalone.getDB(replsetDBName);
+standaloneDB.dropDatabase();
- jsTest.log("Insert data into replica set");
- let bulk = masterDB[testColName].initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; i++) {
- bulk.insert({x: i, text: str});
- }
- assert.writeOK(bulk.execute({w: 3}));
+jsTest.log("Insert data into replica set");
+let bulk = masterDB[testColName].initializeUnorderedBulkOp();
+for (let i = 0; i < numDocs; i++) {
+ bulk.insert({x: i, text: str});
+}
+assert.writeOK(bulk.execute({w: 3}));
- jsTest.log("Create view on replica set");
- assert.commandWorked(masterDB.runCommand({create: testViewName, viewOn: testColName}));
+jsTest.log("Create view on replica set");
+assert.commandWorked(masterDB.runCommand({create: testViewName, viewOn: testColName}));
- // Make sure all writes have replicated to secondary.
- replTest.awaitReplication();
+// Make sure all writes have replicated to secondary.
+replTest.awaitReplication();
- jsTest.log("Clone db from replica set to standalone server");
- standaloneDB.cloneDatabase(replTest.getURL());
- assert.eq(numDocs,
- standaloneDB[testColName].find().itcount(),
- "cloneDatabase from replset to standalone failed (document counts do not match)");
- assert.eq(numDocs,
- standaloneDB[testViewName].find().itcount(),
- "cloneDatabase from replset to standalone failed (count on view incorrect)");
+jsTest.log("Clone db from replica set to standalone server");
+standaloneDB.cloneDatabase(replTest.getURL());
+assert.eq(numDocs,
+ standaloneDB[testColName].find().itcount(),
+ "cloneDatabase from replset to standalone failed (document counts do not match)");
+assert.eq(numDocs,
+ standaloneDB[testViewName].find().itcount(),
+ "cloneDatabase from replset to standalone failed (count on view incorrect)");
- jsTest.log("Clone db from replica set PRIMARY to standalone server");
- standaloneDB.dropDatabase();
- standaloneDB.cloneDatabase(master.host);
- assert.eq(numDocs,
- standaloneDB[testColName].find().itcount(),
- "cloneDatabase from PRIMARY to standalone failed (document counts do not match)");
- assert.eq(numDocs,
- standaloneDB[testViewName].find().itcount(),
- "cloneDatabase from PRIMARY to standalone failed (count on view incorrect)");
+jsTest.log("Clone db from replica set PRIMARY to standalone server");
+standaloneDB.dropDatabase();
+standaloneDB.cloneDatabase(master.host);
+assert.eq(numDocs,
+ standaloneDB[testColName].find().itcount(),
+ "cloneDatabase from PRIMARY to standalone failed (document counts do not match)");
+assert.eq(numDocs,
+ standaloneDB[testViewName].find().itcount(),
+ "cloneDatabase from PRIMARY to standalone failed (count on view incorrect)");
- jsTest.log("Shut down replica set and standalone server");
- MongoRunner.stopMongod(standalone);
+jsTest.log("Shut down replica set and standalone server");
+MongoRunner.stopMongod(standalone);
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/multiVersion/collection_autoIndexId_false.js b/jstests/multiVersion/collection_autoIndexId_false.js
index 74fe0698435..ecf7b4912e0 100644
--- a/jstests/multiVersion/collection_autoIndexId_false.js
+++ b/jstests/multiVersion/collection_autoIndexId_false.js
@@ -6,106 +6,105 @@
* documents to be deleted in the previous version.
*/
(function() {
- 'use strict';
- load('jstests/libs/get_index_helpers.js');
-
- const dbName = 'test';
- const collName = 'collection_autoIndexId_false';
-
- // Given a dbpath to a 3.6 server, attempt to upgrade to 4.0.
- function upgrade36To40(dbpath) {
- let conn = MongoRunner.runMongod({binVersion: '4.0', dbpath: dbpath, noCleanData: true});
- assert.neq(null, conn, 'mongod was unable to start with version 4.0');
- let adminDb = conn.getDB('admin');
- assert.commandWorked(adminDb.runCommand({'setFeatureCompatibilityVersion': '4.0'}));
+'use strict';
+load('jstests/libs/get_index_helpers.js');
+
+const dbName = 'test';
+const collName = 'collection_autoIndexId_false';
+
+// Given a dbpath to a 3.6 server, attempt to upgrade to 4.0.
+function upgrade36To40(dbpath) {
+ let conn = MongoRunner.runMongod({binVersion: '4.0', dbpath: dbpath, noCleanData: true});
+ assert.neq(null, conn, 'mongod was unable to start with version 4.0');
+ let adminDb = conn.getDB('admin');
+ assert.commandWorked(adminDb.runCommand({'setFeatureCompatibilityVersion': '4.0'}));
+
+ MongoRunner.stopMongod(conn);
+}
+
+// Given a dbpath to a 3.6 server, attempt to upgrade to latest.
+// shouldPass determines whether or not the upgrade should be successful.
+function upgrade40ToLatest(dbpath, shouldPass) {
+ if (shouldPass) {
+ let conn = MongoRunner.runMongod({binVersion: 'latest', dbpath: dbpath, noCleanData: true});
+ assert.neq(null, conn, 'mongod failed to start with latest version');
+
+ // Ensure the _id index exists.
+ let testDb = conn.getDB(dbName);
+ let coll = testDb.getCollection(collName);
+ let spec = GetIndexHelpers.findByKeyPattern(coll.getIndexes(), {_id: 1});
+ assert.neq(null, spec);
MongoRunner.stopMongod(conn);
- }
+ } else {
+ let conn = MongoRunner.runMongod(
+ {binVersion: 'latest', dbpath: dbpath, noCleanData: true, waitForConnect: false});
- // Given a dbpath to a 3.6 server, attempt to upgrade to latest.
- // shouldPass determines whether or not the upgrade should be successful.
- function upgrade40ToLatest(dbpath, shouldPass) {
- if (shouldPass) {
- let conn =
- MongoRunner.runMongod({binVersion: 'latest', dbpath: dbpath, noCleanData: true});
- assert.neq(null, conn, 'mongod failed to start with latest version');
-
- // Ensure the _id index exists.
- let testDb = conn.getDB(dbName);
- let coll = testDb.getCollection(collName);
- let spec = GetIndexHelpers.findByKeyPattern(coll.getIndexes(), {_id: 1});
- assert.neq(null, spec);
-
- MongoRunner.stopMongod(conn);
- } else {
- let conn = MongoRunner.runMongod(
- {binVersion: 'latest', dbpath: dbpath, noCleanData: true, waitForConnect: false});
-
- // This tests that the server shuts down cleanly despite the inability to build the _id
- // index.
- assert.eq(MongoRunner.EXIT_NEED_DOWNGRADE, waitProgram(conn.pid));
- }
+ // This tests that the server shuts down cleanly despite the inability to build the _id
+ // index.
+ assert.eq(MongoRunner.EXIT_NEED_DOWNGRADE, waitProgram(conn.pid));
}
+}
- // Create a collection with autoIndexId: false on a 3.6 server and assert that an upgrade to
- // latest fails because there are duplicate values for the _id index.
- function cannotUpgradeWithDuplicateIds() {
- let conn = MongoRunner.runMongod({binVersion: '3.6'});
- assert.neq(null, conn, 'mongod was unable able to start with version 3.6');
+// Create a collection with autoIndexId: false on a 3.6 server and assert that an upgrade to
+// latest fails because there are duplicate values for the _id index.
+function cannotUpgradeWithDuplicateIds() {
+ let conn = MongoRunner.runMongod({binVersion: '3.6'});
+ assert.neq(null, conn, 'mongod was unable able to start with version 3.6');
- const dbpath = conn.dbpath;
+ const dbpath = conn.dbpath;
- // Create a collection with autoIndexId: false.
- let testDb = conn.getDB(dbName);
- let coll = testDb.getCollection(collName);
- assert.commandWorked(coll.runCommand('create', {autoIndexId: false}));
- assert.commandWorked(coll.insert({_id: 0, a: 1}));
- assert.commandWorked(coll.insert({_id: 0, a: 2}));
- MongoRunner.stopMongod(conn);
+ // Create a collection with autoIndexId: false.
+ let testDb = conn.getDB(dbName);
+ let coll = testDb.getCollection(collName);
+ assert.commandWorked(coll.runCommand('create', {autoIndexId: false}));
+ assert.commandWorked(coll.insert({_id: 0, a: 1}));
+ assert.commandWorked(coll.insert({_id: 0, a: 2}));
+ MongoRunner.stopMongod(conn);
- // The upgrade to 4.0 should always succeed because it does not care about the _id index.
- upgrade36To40(dbpath);
+ // The upgrade to 4.0 should always succeed because it does not care about the _id index.
+ upgrade36To40(dbpath);
- let shouldPass = false;
- upgrade40ToLatest(dbpath, shouldPass);
+ let shouldPass = false;
+ upgrade40ToLatest(dbpath, shouldPass);
- // Remove the duplicate (now in 4.0) and retry the upgrade to 4.2.
- conn = MongoRunner.runMongod({binVersion: '4.0', dbpath: dbpath, noCleanData: true});
- testDb = conn.getDB(dbName);
- coll = testDb.getCollection(collName);
- assert.commandWorked(coll.remove({_id: 0, a: 2}));
- MongoRunner.stopMongod(conn);
+ // Remove the duplicate (now in 4.0) and retry the upgrade to 4.2.
+ conn = MongoRunner.runMongod({binVersion: '4.0', dbpath: dbpath, noCleanData: true});
+ testDb = conn.getDB(dbName);
+ coll = testDb.getCollection(collName);
+ assert.commandWorked(coll.remove({_id: 0, a: 2}));
+ MongoRunner.stopMongod(conn);
- shouldPass = true;
- upgrade40ToLatest(dbpath, shouldPass);
+ shouldPass = true;
+ upgrade40ToLatest(dbpath, shouldPass);
- resetDbpath(dbpath);
- }
+ resetDbpath(dbpath);
+}
- // Create a collection with autoIndexId: false on a 3.6 server and assert that an upgrade to
- // latest succeeds because the missing _id index is built.
- function canUpgradeWithoutIndex() {
- let conn = MongoRunner.runMongod({binVersion: '3.6'});
- assert.neq(null, conn, 'mongod was unable able to start with version 3.6');
+// Create a collection with autoIndexId: false on a 3.6 server and assert that an upgrade to
+// latest succeeds because the missing _id index is built.
+function canUpgradeWithoutIndex() {
+ let conn = MongoRunner.runMongod({binVersion: '3.6'});
+ assert.neq(null, conn, 'mongod was unable able to start with version 3.6');
- const dbpath = conn.dbpath;
+ const dbpath = conn.dbpath;
- // Create a collection with autoIndexId: false.
- let testDb = conn.getDB(dbName);
- let coll = testDb.getCollection(collName);
- assert.commandWorked(coll.runCommand('create', {autoIndexId: false}));
- assert.commandWorked(coll.insert({_id: 0, a: 1}));
- MongoRunner.stopMongod(conn);
+ // Create a collection with autoIndexId: false.
+ let testDb = conn.getDB(dbName);
+ let coll = testDb.getCollection(collName);
+ assert.commandWorked(coll.runCommand('create', {autoIndexId: false}));
+ assert.commandWorked(coll.insert({_id: 0, a: 1}));
+ MongoRunner.stopMongod(conn);
- // The upgrade to 4.0 should always succeed because it does not care about the _id index.
- upgrade36To40(dbpath);
+ // The upgrade to 4.0 should always succeed because it does not care about the _id index.
+ upgrade36To40(dbpath);
- const shouldPass = true;
- upgrade40ToLatest(dbpath, shouldPass);
+ const shouldPass = true;
+ upgrade40ToLatest(dbpath, shouldPass);
- resetDbpath(dbpath);
- }
+ resetDbpath(dbpath);
+}
- cannotUpgradeWithDuplicateIds();
- canUpgradeWithoutIndex();
+cannotUpgradeWithDuplicateIds();
+canUpgradeWithoutIndex();
})();
diff --git a/jstests/multiVersion/collection_validator_feature_compatibility_version.js b/jstests/multiVersion/collection_validator_feature_compatibility_version.js
index dbb25b1127a..28995d4adfa 100644
--- a/jstests/multiVersion/collection_validator_feature_compatibility_version.js
+++ b/jstests/multiVersion/collection_validator_feature_compatibility_version.js
@@ -9,197 +9,193 @@
*/
(function() {
- "use strict";
-
- const testName = "collection_validator_feature_compatibility_version";
- const dbpath = MongoRunner.dataPath + testName;
-
- // In order to avoid restarting the server for each test case, we declare all the test cases up
- // front, and test them all at once.
- const testCases = [
- {validator: {$expr: {$eq: [{$round: "$a"}, 4]}}, nonMatchingDocument: {a: 5.2}},
- {validator: {$expr: {$eq: [{$trunc: ["$a", 2]}, 4.1]}}, nonMatchingDocument: {a: 4.23}},
- {
- validator: {$expr: {$regexMatch: {input: "$a", regex: /sentinel/}}},
- nonMatchingDocument: {a: "no dice"}
- },
- ];
-
- let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest"});
- assert.neq(null, conn, "mongod was unable to start up");
-
- let testDB = conn.getDB(testName);
-
- let adminDB = conn.getDB("admin");
-
- // Explicitly set feature compatibility version 4.2.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.2"}));
-
- testCases.forEach(function(test, i) {
- // Create a collection with a validator using 4.2 query features.
- const coll = testDB["coll" + i];
- assert.commandWorked(
- testDB.createCollection(coll.getName(), {validator: test.validator}),
- `Expected to be able to create collection with validator ${tojson(test.validator)}`);
-
- // The validator should cause this insert to fail.
- assert.writeErrorWithCode(
- coll.insert(test.nonMatchingDocument),
- ErrorCodes.DocumentValidationFailure,
- `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
- `collection with validator ${tojson(test.validator)}`);
-
- // Set a validator using 4.2 query features on an existing collection.
- coll.drop();
- assert.commandWorked(testDB.createCollection(coll.getName()));
- assert.commandWorked(
- testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
- `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
-
- // Another failing update.
- assert.writeErrorWithCode(
- coll.insert(test.nonMatchingDocument),
- ErrorCodes.DocumentValidationFailure,
- `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
- `collection with validator ${tojson(test.validator)}`);
- });
-
- // Set the feature compatibility version to 4.0.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.0"}));
-
- testCases.forEach(function(test, i) {
- // The validator is already in place, so it should still cause this insert to fail.
- const coll = testDB["coll" + i];
- assert.writeErrorWithCode(
- coll.insert(test.nonMatchingDocument),
- ErrorCodes.DocumentValidationFailure,
- `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
- `collection with validator ${tojson(test.validator)}`);
-
- // Trying to create a new collection with a validator using 4.2 query features should fail
- // while feature compatibility version is 4.0.
- let res = testDB.createCollection("other", {validator: test.validator});
- assert.commandFailedWithCode(
- res,
- ErrorCodes.QueryFeatureNotAllowed,
- 'Expected *not* to be able to create collection with validator ' +
- tojson(test.validator));
- assert(
- res.errmsg.match(/feature compatibility version/),
- `Expected error message from createCollection with validator ` +
- `${tojson(test.validator)} to reference 'feature compatibility version' but got: ` +
- res.errmsg);
-
- // Trying to update a collection with a validator using 4.2 query features should also fail.
- res = testDB.runCommand({collMod: coll.getName(), validator: test.validator});
- assert.commandFailedWithCode(
- res,
- ErrorCodes.QueryFeatureNotAllowed,
- `Expected to be able to create collection with validator ${tojson(test.validator)}`);
- assert(
- res.errmsg.match(/feature compatibility version/),
- `Expected error message from createCollection with validator ` +
- `${tojson(test.validator)} to reference 'feature compatibility version' but got: ` +
- res.errmsg);
- });
-
- MongoRunner.stopMongod(conn);
-
- // If we try to start up a 4.0 mongod, it will fail, because it will not be able to parse
- // the validator using 4.2 query features.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "4.0", noCleanData: true});
- assert.eq(
- null, conn, "mongod 4.0 started, even with a validator using 4.2 query features in place.");
-
- // Starting up a 4.2 mongod, however, should succeed, even though the feature compatibility
- // version is still set to 4.0.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
- assert.neq(null, conn, "mongod was unable to start up");
-
- adminDB = conn.getDB("admin");
- testDB = conn.getDB(testName);
-
- // And the validator should still work.
- testCases.forEach(function(test, i) {
- const coll = testDB["coll" + i];
- assert.writeErrorWithCode(
- coll.insert(test.nonMatchingDocument),
- ErrorCodes.DocumentValidationFailure,
- `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
- `collection with validator ${tojson(test.validator)}`);
-
- // Remove the validator.
- assert.commandWorked(testDB.runCommand({collMod: coll.getName(), validator: {}}));
- });
-
- MongoRunner.stopMongod(conn);
-
- // Now, we should be able to start up a 4.0 mongod.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "4.0", noCleanData: true});
- assert.neq(
- null,
- conn,
- "mongod 4.0 failed to start, even after we removed the validator using 4.2 query features");
-
- MongoRunner.stopMongod(conn);
-
- // The rest of the test uses mongod 4.2.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
- assert.neq(null, conn, "mongod was unable to start up");
-
- adminDB = conn.getDB("admin");
- testDB = conn.getDB(testName);
-
- // Set the feature compatibility version back to 4.2.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.2"}));
-
- testCases.forEach(function(test, i) {
- const coll = testDB["coll2" + i];
-
- // Now we should be able to create a collection with a validator using 4.2 query features
- // again.
- assert.commandWorked(
- testDB.createCollection(coll.getName(), {validator: test.validator}),
- `Expected to be able to create collection with validator ${tojson(test.validator)}`);
-
- // And we should be able to modify a collection to have a validator using 4.2 query
- // features.
- assert.commandWorked(
- testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
- `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
- });
-
- // Set the feature compatibility version to 4.0 and then restart with
- // internalValidateFeaturesAsMaster=false.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.0"}));
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- binVersion: "latest",
- noCleanData: true,
- setParameter: "internalValidateFeaturesAsMaster=false"
- });
- assert.neq(null, conn, "mongod was unable to start up");
-
- testDB = conn.getDB(testName);
-
- testCases.forEach(function(test, i) {
- const coll = testDB["coll3" + i];
- // Even though the feature compatibility version is 4.0, we should still be able to add a
- // validator using 4.2 query features, because internalValidateFeaturesAsMaster is false.
- assert.commandWorked(
- testDB.createCollection(coll.getName(), {validator: test.validator}),
- `Expected to be able to create collection with validator ${tojson(test.validator)}`);
-
- // We should also be able to modify a collection to have a validator using 4.2 query
- // features.
- coll.drop();
- assert.commandWorked(testDB.createCollection(coll.getName()));
- assert.commandWorked(
- testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
- `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
- });
-
- MongoRunner.stopMongod(conn);
-
+"use strict";
+
+const testName = "collection_validator_feature_compatibility_version";
+const dbpath = MongoRunner.dataPath + testName;
+
+// In order to avoid restarting the server for each test case, we declare all the test cases up
+// front, and test them all at once.
+const testCases = [
+ {validator: {$expr: {$eq: [{$round: "$a"}, 4]}}, nonMatchingDocument: {a: 5.2}},
+ {validator: {$expr: {$eq: [{$trunc: ["$a", 2]}, 4.1]}}, nonMatchingDocument: {a: 4.23}},
+ {
+ validator: {$expr: {$regexMatch: {input: "$a", regex: /sentinel/}}},
+ nonMatchingDocument: {a: "no dice"}
+ },
+];
+
+let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest"});
+assert.neq(null, conn, "mongod was unable to start up");
+
+let testDB = conn.getDB(testName);
+
+let adminDB = conn.getDB("admin");
+
+// Explicitly set feature compatibility version 4.2.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.2"}));
+
+testCases.forEach(function(test, i) {
+ // Create a collection with a validator using 4.2 query features.
+ const coll = testDB["coll" + i];
+ assert.commandWorked(
+ testDB.createCollection(coll.getName(), {validator: test.validator}),
+ `Expected to be able to create collection with validator ${tojson(test.validator)}`);
+
+ // The validator should cause this insert to fail.
+ assert.writeErrorWithCode(
+ coll.insert(test.nonMatchingDocument),
+ ErrorCodes.DocumentValidationFailure,
+ `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
+ `collection with validator ${tojson(test.validator)}`);
+
+ // Set a validator using 4.2 query features on an existing collection.
+ coll.drop();
+ assert.commandWorked(testDB.createCollection(coll.getName()));
+ assert.commandWorked(
+ testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
+ `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
+
+ // Another failing update.
+ assert.writeErrorWithCode(
+ coll.insert(test.nonMatchingDocument),
+ ErrorCodes.DocumentValidationFailure,
+ `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
+ `collection with validator ${tojson(test.validator)}`);
+});
+
+// Set the feature compatibility version to 4.0.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.0"}));
+
+testCases.forEach(function(test, i) {
+ // The validator is already in place, so it should still cause this insert to fail.
+ const coll = testDB["coll" + i];
+ assert.writeErrorWithCode(
+ coll.insert(test.nonMatchingDocument),
+ ErrorCodes.DocumentValidationFailure,
+ `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
+ `collection with validator ${tojson(test.validator)}`);
+
+ // Trying to create a new collection with a validator using 4.2 query features should fail
+ // while feature compatibility version is 4.0.
+ let res = testDB.createCollection("other", {validator: test.validator});
+ assert.commandFailedWithCode(
+ res,
+ ErrorCodes.QueryFeatureNotAllowed,
+ 'Expected *not* to be able to create collection with validator ' + tojson(test.validator));
+ assert(res.errmsg.match(/feature compatibility version/),
+ `Expected error message from createCollection with validator ` +
+ `${tojson(test.validator)} to reference 'feature compatibility version' but got: ` +
+ res.errmsg);
+
+ // Trying to update a collection with a validator using 4.2 query features should also fail.
+ res = testDB.runCommand({collMod: coll.getName(), validator: test.validator});
+ assert.commandFailedWithCode(
+ res,
+ ErrorCodes.QueryFeatureNotAllowed,
+ `Expected to be able to create collection with validator ${tojson(test.validator)}`);
+ assert(res.errmsg.match(/feature compatibility version/),
+ `Expected error message from createCollection with validator ` +
+ `${tojson(test.validator)} to reference 'feature compatibility version' but got: ` +
+ res.errmsg);
+});
+
+MongoRunner.stopMongod(conn);
+
+// If we try to start up a 4.0 mongod, it will fail, because it will not be able to parse
+// the validator using 4.2 query features.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "4.0", noCleanData: true});
+assert.eq(
+ null, conn, "mongod 4.0 started, even with a validator using 4.2 query features in place.");
+
+// Starting up a 4.2 mongod, however, should succeed, even though the feature compatibility
+// version is still set to 4.0.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
+assert.neq(null, conn, "mongod was unable to start up");
+
+adminDB = conn.getDB("admin");
+testDB = conn.getDB(testName);
+
+// And the validator should still work.
+testCases.forEach(function(test, i) {
+ const coll = testDB["coll" + i];
+ assert.writeErrorWithCode(
+ coll.insert(test.nonMatchingDocument),
+ ErrorCodes.DocumentValidationFailure,
+ `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
+ `collection with validator ${tojson(test.validator)}`);
+
+ // Remove the validator.
+ assert.commandWorked(testDB.runCommand({collMod: coll.getName(), validator: {}}));
+});
+
+MongoRunner.stopMongod(conn);
+
+// Now, we should be able to start up a 4.0 mongod.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "4.0", noCleanData: true});
+assert.neq(
+ null,
+ conn,
+ "mongod 4.0 failed to start, even after we removed the validator using 4.2 query features");
+
+MongoRunner.stopMongod(conn);
+
+// The rest of the test uses mongod 4.2.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
+assert.neq(null, conn, "mongod was unable to start up");
+
+adminDB = conn.getDB("admin");
+testDB = conn.getDB(testName);
+
+// Set the feature compatibility version back to 4.2.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.2"}));
+
+testCases.forEach(function(test, i) {
+ const coll = testDB["coll2" + i];
+
+ // Now we should be able to create a collection with a validator using 4.2 query features
+ // again.
+ assert.commandWorked(
+ testDB.createCollection(coll.getName(), {validator: test.validator}),
+ `Expected to be able to create collection with validator ${tojson(test.validator)}`);
+
+ // And we should be able to modify a collection to have a validator using 4.2 query
+ // features.
+ assert.commandWorked(
+ testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
+ `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
+});
+
+// Set the feature compatibility version to 4.0 and then restart with
+// internalValidateFeaturesAsMaster=false.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.0"}));
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ binVersion: "latest",
+ noCleanData: true,
+ setParameter: "internalValidateFeaturesAsMaster=false"
+});
+assert.neq(null, conn, "mongod was unable to start up");
+
+testDB = conn.getDB(testName);
+
+testCases.forEach(function(test, i) {
+ const coll = testDB["coll3" + i];
+ // Even though the feature compatibility version is 4.0, we should still be able to add a
+ // validator using 4.2 query features, because internalValidateFeaturesAsMaster is false.
+ assert.commandWorked(
+ testDB.createCollection(coll.getName(), {validator: test.validator}),
+ `Expected to be able to create collection with validator ${tojson(test.validator)}`);
+
+ // We should also be able to modify a collection to have a validator using 4.2 query
+ // features.
+ coll.drop();
+ assert.commandWorked(testDB.createCollection(coll.getName()));
+ assert.commandWorked(
+ testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
+ `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
+});
+
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/multiVersion/config_transactions_set_fcv.js b/jstests/multiVersion/config_transactions_set_fcv.js
index c5dd323c580..9f2a059a4d2 100644
--- a/jstests/multiVersion/config_transactions_set_fcv.js
+++ b/jstests/multiVersion/config_transactions_set_fcv.js
@@ -6,426 +6,431 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/libs/feature_compatibility_version.js");
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
-
- const dbName = "test";
- const collName = "config_transactions_set_fcv";
-
- // Define autocommit as a variable so it can be used in object literals w/o an explicit value.
- const autocommit = false;
-
- // Start a replica set with an odd number of members to verify nodes outside the majority behave
- // correctly around setFeatureCompatibilityVersion, which uses majority writes to update the FCV
- // document. The primary isn't expected to change, so each secondary is given priority 0.
- const rst =
- new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
-
- let testDB = rst.getPrimary().getDB(dbName);
- let adminDB = rst.getPrimary().getDB("admin");
-
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+"use strict";
+load("jstests/libs/feature_compatibility_version.js");
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+
+const dbName = "test";
+const collName = "config_transactions_set_fcv";
+
+// Define autocommit as a variable so it can be used in object literals w/o an explicit value.
+const autocommit = false;
+
+// Start a replica set with an odd number of members to verify nodes outside the majority behave
+// correctly around setFeatureCompatibilityVersion, which uses majority writes to update the FCV
+// document. The primary isn't expected to change, so each secondary is given priority 0.
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
+
+let testDB = rst.getPrimary().getDB(dbName);
+let adminDB = rst.getPrimary().getDB("admin");
+
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+// Starts a dummy transaction, commits or aborts it with or without prepare, then returns the
+// commit or abort response. Returns the response from prepare if it fails.
+function runTxn({lsid, txnNumber}, {commit, prepare, leaveOpen}) {
+ const startTransactionRes = testDB.runCommand({
+ insert: collName,
+ documents: [{x: "dummy_txn"}],
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ lsid,
+ autocommit,
+ });
+ if (!startTransactionRes.ok || leaveOpen) {
+ return startTransactionRes;
+ }
- // Starts a dummy transaction, commits or aborts it with or without prepare, then returns the
- // commit or abort response. Returns the response from prepare if it fails.
- function runTxn({lsid, txnNumber}, {commit, prepare, leaveOpen}) {
- const startTransactionRes = testDB.runCommand({
- insert: collName,
- documents: [{x: "dummy_txn"}],
+ if (prepare) {
+ const prepareRes = testDB.adminCommand({
+ prepareTransaction: 1,
txnNumber: NumberLong(txnNumber),
- startTransaction: true, lsid, autocommit,
+ lsid,
+ autocommit,
+ writeConcern: {w: "majority"}
});
- if (!startTransactionRes.ok || leaveOpen) {
- return startTransactionRes;
- }
-
- if (prepare) {
- const prepareRes = testDB.adminCommand({
- prepareTransaction: 1,
- txnNumber: NumberLong(txnNumber), lsid, autocommit,
- writeConcern: {w: "majority"}
- });
- if (!prepareRes.ok) {
- return prepareRes;
- }
-
- if (commit) {
- // Add 1 to the increment so that the commitTimestamp is after the prepareTimestamp.
- const commitTimestamp = Timestamp(prepareRes.prepareTimestamp.getTime(),
- prepareRes.prepareTimestamp.getInc() + 1);
- return testDB.adminCommand({
- commitTransaction: 1,
- commitTimestamp,
- txnNumber: NumberLong(txnNumber), lsid, autocommit
- });
- } else {
- return testDB.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit});
- }
+ if (!prepareRes.ok) {
+ return prepareRes;
}
if (commit) {
- return testDB.adminCommand(
- {commitTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit});
+ // Add 1 to the increment so that the commitTimestamp is after the prepareTimestamp.
+ const commitTimestamp = Timestamp(prepareRes.prepareTimestamp.getTime(),
+ prepareRes.prepareTimestamp.getInc() + 1);
+ return testDB.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp,
+ txnNumber: NumberLong(txnNumber),
+ lsid,
+ autocommit
+ });
} else {
return testDB.adminCommand(
{abortTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit});
}
}
- // Retries commitTransaction for the given txnId, returning the response.
- function retryCommit({lsid, txnNumber}) {
+ if (commit) {
return testDB.adminCommand(
{commitTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit});
+ } else {
+ return testDB.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit});
}
-
- // Asserts aborting the given txnId returns NoSuchTransaction.
- function assertTransactionAborted({lsid, txnNumber}) {
- assert.commandFailedWithCode(
- testDB.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit}),
- ErrorCodes.NoSuchTransaction);
- }
-
- // Global counter for the number of retryable writes completed. Used to verify retried retryable
- // writes aren't double applied.
- let numRetryableWrites = 0;
-
- // Runs a dummy retryable write and increments the retryable write counter.
- function assertRetryableWriteWorked({lsid, txnNumber}) {
- numRetryableWrites += 1;
- assert.commandWorked(testDB.runCommand({
- insert: collName,
- documents: [{fromRetryableWrite: true}],
- txnNumber: NumberLong(txnNumber), lsid
- }));
- }
-
- // Verifies a txnId has already been used for a retryable write by running a dummy retryable
- // write and asserting the write isn't applied.
- function assertRetryableWriteCanBeRetried({lsid, txnNumber}) {
- assert.commandWorked(testDB.runCommand({
- insert: collName,
- documents: [{fromRetryableWrite: true}],
- txnNumber: NumberLong(txnNumber), lsid
- }));
- assert.eq(numRetryableWrites, testDB[collName].find({fromRetryableWrite: true}).itcount());
- }
-
- // Searches config.transactions for an entry for the given txnId on each node in the replica
- // set, verifying the entry does / does not exist and has the expected state, if specified.
- function checkConfigTransactionEntry(rst, {lsid, txnNumber}, {hasEntry, expectedState}) {
- rst.awaitReplication();
- rst.nodes.forEach((node) => {
- // Search for id since we don't know the uid, which is generated by the server.
- const entry = node.getDB("config").transactions.findOne({"_id.id": lsid.id});
-
- if (!hasEntry) {
- // There should be no entry for this session or it should be for an earlier
- // operation.
- if (entry) {
- assert.gt(txnNumber,
- entry.txnNum,
- "expected entry to have lower txnNumber, entry: " + tojson(entry) +
- ", node: " + tojson(node));
- } else {
- assert.isnull(entry,
- "expected entry to be null, entry: " + tojson(entry) +
- ", node: " + tojson(node));
- }
- return;
- }
-
- assert.eq(txnNumber,
- entry.txnNum,
- "expected entry to have the same txnNumber, entry: " + tojson(entry) +
- ", node: " + tojson(node));
-
- if (expectedState) {
- assert.eq(expectedState,
- entry.state,
- "entry: " + tojson(entry) + ", node: " + tojson(node));
+}
+
+// Retries commitTransaction for the given txnId, returning the response.
+function retryCommit({lsid, txnNumber}) {
+ return testDB.adminCommand(
+ {commitTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit});
+}
+
+// Asserts aborting the given txnId returns NoSuchTransaction.
+function assertTransactionAborted({lsid, txnNumber}) {
+ assert.commandFailedWithCode(
+ testDB.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit}),
+ ErrorCodes.NoSuchTransaction);
+}
+
+// Global counter for the number of retryable writes completed. Used to verify retried retryable
+// writes aren't double applied.
+let numRetryableWrites = 0;
+
+// Runs a dummy retryable write and increments the retryable write counter.
+function assertRetryableWriteWorked({lsid, txnNumber}) {
+ numRetryableWrites += 1;
+ assert.commandWorked(testDB.runCommand({
+ insert: collName,
+ documents: [{fromRetryableWrite: true}],
+ txnNumber: NumberLong(txnNumber),
+ lsid
+ }));
+}
+
+// Verifies a txnId has already been used for a retryable write by running a dummy retryable
+// write and asserting the write isn't applied.
+function assertRetryableWriteCanBeRetried({lsid, txnNumber}) {
+ assert.commandWorked(testDB.runCommand({
+ insert: collName,
+ documents: [{fromRetryableWrite: true}],
+ txnNumber: NumberLong(txnNumber),
+ lsid
+ }));
+ assert.eq(numRetryableWrites, testDB[collName].find({fromRetryableWrite: true}).itcount());
+}
+
+// Searches config.transactions for an entry for the given txnId on each node in the replica
+// set, verifying the entry does / does not exist and has the expected state, if specified.
+function checkConfigTransactionEntry(rst, {lsid, txnNumber}, {hasEntry, expectedState}) {
+ rst.awaitReplication();
+ rst.nodes.forEach((node) => {
+ // Search for id since we don't know the uid, which is generated by the server.
+ const entry = node.getDB("config").transactions.findOne({"_id.id": lsid.id});
+
+ if (!hasEntry) {
+ // There should be no entry for this session or it should be for an earlier
+ // operation.
+ if (entry) {
+ assert.gt(txnNumber,
+ entry.txnNum,
+ "expected entry to have lower txnNumber, entry: " + tojson(entry) +
+ ", node: " + tojson(node));
} else {
- assert(!entry.hasOwnProperty("state"),
- "expected entry to not have state, entry: " + tojson(entry) + ", node: " +
- tojson(node));
- }
- });
- }
-
- function runTest({shouldRestart}) {
- // The test waits for failpoints to log a message when hit, so clear the program output
- // before starting so messages from previous iterations aren't in it.
- clearRawMongoProgramOutput();
-
- const txnIds = {
- write: {lsid: {id: UUID()}, txnNumber: 0}, // Retryable write.
- commit: {lsid: {id: UUID()}, txnNumber: 0}, // Committed transaction w/o prepare.
- commitPrepare: {lsid: {id: UUID()}, txnNumber: 0}, // Committed transaction w/ prepare.
- abort: {lsid: {id: UUID()}, txnNumber: 0}, // Aborted transaction w/o prepare.
- abortPrepare: {lsid: {id: UUID()}, txnNumber: 0}, // Aborted transaction after prepare.
- concurrentTxn: {lsid: {id: UUID()}, txnNumber: 0}, // Transaction concurrent w/ setFCV.
- concurrentWrite:
- {lsid: {id: UUID()}, txnNumber: 0}, // Retryable write concurrent w/ setFCV.
- upgradingTxn:
- {lsid: {id: UUID()}, txnNumber: 0}, // Transaction started during FCV upgrade.
- };
-
- //
- // In the latest FCV, verify the expected updates are made to config.transactions for each
- // case and the successful operations are retryable.
- //
- checkFCV(adminDB, latestFCV);
-
- assertRetryableWriteWorked(txnIds.write);
- assert.commandWorked(runTxn(txnIds.commit, {commit: true, prepare: false}));
- assert.commandWorked(runTxn(txnIds.commitPrepare, {commit: true, prepare: true}));
- assert.commandWorked(runTxn(txnIds.abort, {commit: false, prepare: false}));
- assert.commandWorked(runTxn(txnIds.abortPrepare, {commit: false, prepare: true}));
-
- checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
- checkConfigTransactionEntry(
- rst, txnIds.commit, {hasEntry: true, expectedState: "committed"});
- checkConfigTransactionEntry(
- rst, txnIds.commitPrepare, {hasEntry: true, expectedState: "committed"});
- checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
- checkConfigTransactionEntry(
- rst, txnIds.abortPrepare, {hasEntry: true, expectedState: "aborted"});
-
- // The retryable write and the commit of both committed transactions should be retryable.
- // The aborted transactions should still be aborted.
- assertRetryableWriteCanBeRetried(txnIds.write);
- assert.commandWorked(retryCommit(txnIds.commit));
- assert.commandWorked(retryCommit(txnIds.commitPrepare));
- assertTransactionAborted(txnIds.abort);
- assertTransactionAborted(txnIds.abortPrepare);
-
- //
- // Downgrade to the last-stable FCV and verify config.transactions was updated as expected
- // for previously completed operations and operations concurrent with the downgrade.
- //
-
- if (shouldRestart) {
- // Restart to verify config.transactions entries for sessions not in-memory at the
- // beginning of FCV downgrade are updated correctly.
- jsTestLog("Restarting replica set before downgrading the featureCompatibilityVersion.");
- for (let i = 0; i < rst.nodes.length; i++) {
- rst.restart(i);
+ assert.isnull(entry,
+ "expected entry to be null, entry: " + tojson(entry) +
+ ", node: " + tojson(node));
}
- testDB = rst.getPrimary().getDB(dbName);
- adminDB = rst.getPrimary().getDB("admin");
+ return;
}
- // Make setFCV pause in the downgrading state after getting the list of sessions to
- // potentially modify.
- assert.commandWorked(rst.getPrimary().adminCommand(
- {configureFailPoint: "pauseBeforeDowngradingSessions", mode: "alwaysOn"}));
-
- // Downgrade FCV in a parallel shell and wait until it blocks at the failpoint above.
- const awaitDowngradeFCV = startParallelShell(() => {
- load("jstests/libs/feature_compatibility_version.js");
- jsTestLog("Downgrade the featureCompatibilityVersion in a parallel shell.");
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- }, rst.getPrimary().port);
- waitForFailpoint("Hit pauseBeforeDowngradingSessions failpoint", 1 /*numTimes*/);
-
- // Concurrent transactions that use prepare will fail.
- assert.commandFailedWithCode(runTxn(txnIds.concurrentTxn, {commit: true, prepare: true}),
- ErrorCodes.CommandNotSupported);
- txnIds.concurrentTxn.txnNumber += 1;
-
- // Concurrent transactions that do not use prepare and retryable writes succeed.
- assert.commandWorked(runTxn(txnIds.concurrentTxn, {commit: true, prepare: false}));
- assertRetryableWriteWorked(txnIds.concurrentWrite);
-
- // Unset the failpoint and wait for the downgrade to finish.
- assert.commandWorked(rst.getPrimary().adminCommand(
- {configureFailPoint: "pauseBeforeDowngradingSessions", mode: "off"}));
-
- awaitDowngradeFCV();
- checkFCV(adminDB, lastStableFCV);
-
- // The successful concurrent operations should have entries without state and be retryable.
- checkConfigTransactionEntry(rst, txnIds.concurrentTxn, {hasEntry: true});
- assert.commandWorked(retryCommit(txnIds.concurrentTxn));
- checkConfigTransactionEntry(rst, txnIds.concurrentWrite, {hasEntry: true});
- assertRetryableWriteCanBeRetried(txnIds.concurrentWrite);
-
- // Only the retryable write entry should remain.
- checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
- checkConfigTransactionEntry(rst, txnIds.commit, {hasEntry: false});
- checkConfigTransactionEntry(rst, txnIds.commitPrepare, {hasEntry: false});
- checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
- checkConfigTransactionEntry(rst, txnIds.abortPrepare, {hasEntry: false});
-
- // The retryable write can be retried.
- assertRetryableWriteCanBeRetried(txnIds.write);
-
- // Neither of the commits can be retried.
- assert.commandFailedWithCode(retryCommit(txnIds.commit), ErrorCodes.NoSuchTransaction);
- assert.commandFailedWithCode(retryCommit(txnIds.commitPrepare),
- ErrorCodes.NoSuchTransaction);
-
- //
- // In the last-stable FCV, verify the expected updates are made to config.transactions for
- // each case and the successful operations are retryable.
- //
-
- // Reset each txnId to test upgrade with a clean slate.
- Object.keys(txnIds).forEach((txnIdKey) => {
- txnIds[txnIdKey].lsid = {id: UUID()};
- txnIds[txnIdKey].txnNumber = 0;
- });
+ assert.eq(txnNumber,
+ entry.txnNum,
+ "expected entry to have the same txnNumber, entry: " + tojson(entry) +
+ ", node: " + tojson(node));
- // Prepare can't be used in FCV 4.0, so only commit, abort, and retryable write should
- // succeed.
- assertRetryableWriteWorked(txnIds.write);
- assert.commandWorked(runTxn(txnIds.commit, {commit: true, prepare: false}));
- assert.commandFailedWithCode(runTxn(txnIds.commitPrepare, {commit: true, prepare: true}),
- ErrorCodes.CommandNotSupported);
- assert.commandWorked(runTxn(txnIds.abort, {commit: false, prepare: false}));
- assert.commandFailedWithCode(runTxn(txnIds.abortPrepare, {commit: false, prepare: true}),
- ErrorCodes.CommandNotSupported);
-
- // Only the retryable write and transaction that committed without prepare should have an
- // entry. Neither should have state.
- checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
- checkConfigTransactionEntry(rst, txnIds.commit, {hasEntry: true});
- checkConfigTransactionEntry(rst, txnIds.commitPrepare, {hasEntry: false});
- checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
- checkConfigTransactionEntry(rst, txnIds.abortPrepare, {hasEntry: false});
-
- // The retryable write and successful commit can be retried.
- assertRetryableWriteCanBeRetried(txnIds.write);
- assert.commandWorked(retryCommit(txnIds.commit));
-
- if (shouldRestart) {
- // Restart to verify config.transactions entries for sessions not in-memory at the
- // beginning of FCV upgrade are updated correctly.
- jsTestLog("Restarting replica set before upgrading the featureCompatibilityVersion.");
- for (let i = 0; i < rst.nodes.length; i++) {
- rst.restart(i);
- }
- testDB = rst.getPrimary().getDB(dbName);
- adminDB = rst.getPrimary().getDB("admin");
+ if (expectedState) {
+ assert.eq(
+ expectedState, entry.state, "entry: " + tojson(entry) + ", node: " + tojson(node));
+ } else {
+ assert(!entry.hasOwnProperty("state"),
+ "expected entry to not have state, entry: " + tojson(entry) +
+ ", node: " + tojson(node));
}
+ });
+}
- //
- // Upgrade to the latest FCV and verify config.transactions was updated as expected for
- // previously completed operations and operations concurrent with the upgrade.
- //
-
- // Run a retryable write on the session that will be used during upgrade so it has a
- // transaction table entry and will be checked out by the upgrade.
- assertRetryableWriteWorked(txnIds.upgradingTxn);
- txnIds.upgradingTxn.txnNumber += 1;
-
- // Make setFCV pause in the upgrading state after getting the list of sessions to
- // potentially modify.
- assert.commandWorked(rst.getPrimary().adminCommand(
- {configureFailPoint: "pauseBeforeUpgradingSessions", mode: "alwaysOn"}));
-
- // Upgrade FCV in a parallel shell and wait until it blocks at the failpoint above.
- const awaitUpgradeFCV = startParallelShell(() => {
- load("jstests/libs/feature_compatibility_version.js");
- jsTestLog("Upgrade the featureCompatibilityVersion in a parallel shell.");
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- }, rst.getPrimary().port);
- waitForFailpoint("Hit pauseBeforeUpgradingSessions failpoint", 1 /*numTimes*/);
-
- // Concurrent transactions that use prepare will fail.
- assert.commandFailedWithCode(runTxn(txnIds.concurrentTxn, {commit: true, prepare: true}),
- ErrorCodes.CommandNotSupported);
- txnIds.concurrentTxn.txnNumber += 1;
-
- // Concurrent transactions that do not use prepare and retryable writes succeed.
- assert.commandWorked(runTxn(txnIds.concurrentTxn, {commit: true, prepare: false}));
- assertRetryableWriteWorked(txnIds.concurrentWrite);
-
- // Start a transaction in the upgrading state and verify that it doesn't get aborted by the
- // rest of the upgrade. Note that all sessions are killed and their transactions aborted for
- // writes to the FCV document except when it is set to the fully upgraded state, so this
- // can't be tested for downgrade.
- assert.commandWorked(runTxn(txnIds.upgradingTxn, {leaveOpen: true}));
-
- // Unset the failpoint and wait for the upgrade to finish.
- assert.commandWorked(rst.getPrimary().adminCommand(
- {configureFailPoint: "pauseBeforeUpgradingSessions", mode: "off"}));
-
- awaitUpgradeFCV();
- checkFCV(adminDB, latestFCV);
-
- // The transaction started while upgrading shouldn't have been killed and can be committed.
- assert.commandWorked(testDB.adminCommand({
- commitTransaction: 1,
- lsid: txnIds.upgradingTxn.lsid,
- txnNumber: NumberLong(txnIds.upgradingTxn.txnNumber), autocommit
- }));
-
- // The successful concurrent transaction should have "committed" state and be retryable, and
- // the concurrent retryable write should not have state and also be retryable.
- checkConfigTransactionEntry(
- rst, txnIds.concurrentTxn, {hasEntry: true, expectedState: "committed"});
- assert.commandWorked(retryCommit(txnIds.concurrentTxn));
- checkConfigTransactionEntry(rst, txnIds.concurrentWrite, {hasEntry: true});
- assertRetryableWriteCanBeRetried(txnIds.concurrentWrite);
-
- // There should still only be entries for the committed transaction and retryable write. The
- // committed transaction should now have a "state" field.
- checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
- checkConfigTransactionEntry(
- rst, txnIds.commit, {hasEntry: true, expectedState: "committed"});
- checkConfigTransactionEntry(rst, txnIds.commitPrepare, {hasEntry: false});
- checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
- checkConfigTransactionEntry(rst, txnIds.abortPrepare, {hasEntry: false});
-
- // The retryable write and successful commit can be retried.
- assertRetryableWriteCanBeRetried(txnIds.write);
- assert.commandWorked(retryCommit(txnIds.commit));
- }
+function runTest({shouldRestart}) {
+ // The test waits for failpoints to log a message when hit, so clear the program output
+ // before starting so messages from previous iterations aren't in it.
+ clearRawMongoProgramOutput();
- runTest({shouldRestart: false});
- runTest({shouldRestart: true});
+ const txnIds = {
+ write: {lsid: {id: UUID()}, txnNumber: 0}, // Retryable write.
+ commit: {lsid: {id: UUID()}, txnNumber: 0}, // Committed transaction w/o prepare.
+ commitPrepare: {lsid: {id: UUID()}, txnNumber: 0}, // Committed transaction w/ prepare.
+ abort: {lsid: {id: UUID()}, txnNumber: 0}, // Aborted transaction w/o prepare.
+ abortPrepare: {lsid: {id: UUID()}, txnNumber: 0}, // Aborted transaction after prepare.
+ concurrentTxn: {lsid: {id: UUID()}, txnNumber: 0}, // Transaction concurrent w/ setFCV.
+ concurrentWrite:
+ {lsid: {id: UUID()}, txnNumber: 0}, // Retryable write concurrent w/ setFCV.
+ upgradingTxn:
+ {lsid: {id: UUID()}, txnNumber: 0}, // Transaction started during FCV upgrade.
+ };
//
- // Verify setFCV is interruptible between modifying sessions.
+ // In the latest FCV, verify the expected updates are made to config.transactions for each
+ // case and the successful operations are retryable.
//
- clearRawMongoProgramOutput();
checkFCV(adminDB, latestFCV);
- // Construct a config.transactions entry that would be modified by downgrade.
- const txnIds = {interrupt: {lsid: {id: UUID()}, txnNumber: 0}};
- assert.commandWorked(runTxn(txnIds.interrupt, {commit: true, prepare: true}));
+ assertRetryableWriteWorked(txnIds.write);
+ assert.commandWorked(runTxn(txnIds.commit, {commit: true, prepare: false}));
+ assert.commandWorked(runTxn(txnIds.commitPrepare, {commit: true, prepare: true}));
+ assert.commandWorked(runTxn(txnIds.abort, {commit: false, prepare: false}));
+ assert.commandWorked(runTxn(txnIds.abortPrepare, {commit: false, prepare: true}));
+
+ checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
+ checkConfigTransactionEntry(rst, txnIds.commit, {hasEntry: true, expectedState: "committed"});
checkConfigTransactionEntry(
- rst, txnIds.interrupt, {hasEntry: true, expectedState: "committed"});
+ rst, txnIds.commitPrepare, {hasEntry: true, expectedState: "committed"});
+ checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
+ checkConfigTransactionEntry(
+ rst, txnIds.abortPrepare, {hasEntry: true, expectedState: "aborted"});
+
+ // The retryable write and the commit of both committed transactions should be retryable.
+ // The aborted transactions should still be aborted.
+ assertRetryableWriteCanBeRetried(txnIds.write);
+ assert.commandWorked(retryCommit(txnIds.commit));
+ assert.commandWorked(retryCommit(txnIds.commitPrepare));
+ assertTransactionAborted(txnIds.abort);
+ assertTransactionAborted(txnIds.abortPrepare);
- // Pause setFCV before it would modify the entry.
+ //
+ // Downgrade to the last-stable FCV and verify config.transactions was updated as expected
+ // for previously completed operations and operations concurrent with the downgrade.
+ //
+
+ if (shouldRestart) {
+ // Restart to verify config.transactions entries for sessions not in-memory at the
+ // beginning of FCV downgrade are updated correctly.
+ jsTestLog("Restarting replica set before downgrading the featureCompatibilityVersion.");
+ for (let i = 0; i < rst.nodes.length; i++) {
+ rst.restart(i);
+ }
+ testDB = rst.getPrimary().getDB(dbName);
+ adminDB = rst.getPrimary().getDB("admin");
+ }
+
+ // Make setFCV pause in the downgrading state after getting the list of sessions to
+ // potentially modify.
assert.commandWorked(rst.getPrimary().adminCommand(
{configureFailPoint: "pauseBeforeDowngradingSessions", mode: "alwaysOn"}));
- TestData.setFCVLsid = {id: UUID()};
- const awaitUpgradeFCV = startParallelShell(() => {
+ // Downgrade FCV in a parallel shell and wait until it blocks at the failpoint above.
+ const awaitDowngradeFCV = startParallelShell(() => {
load("jstests/libs/feature_compatibility_version.js");
- assert.commandFailedWithCode(
- db.adminCommand(
- {setFeatureCompatibilityVersion: lastStableFCV, lsid: TestData.setFCVLsid}),
- ErrorCodes.Interrupted);
+ jsTestLog("Downgrade the featureCompatibilityVersion in a parallel shell.");
+ assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
}, rst.getPrimary().port);
waitForFailpoint("Hit pauseBeforeDowngradingSessions failpoint", 1 /*numTimes*/);
- // Kill the session running setFCV.
- assert.commandWorked(rst.getPrimary().adminCommand({killSessions: [TestData.setFCVLsid]}));
+ // Concurrent transactions that use prepare will fail.
+ assert.commandFailedWithCode(runTxn(txnIds.concurrentTxn, {commit: true, prepare: true}),
+ ErrorCodes.CommandNotSupported);
+ txnIds.concurrentTxn.txnNumber += 1;
+
+ // Concurrent transactions that do not use prepare and retryable writes succeed.
+ assert.commandWorked(runTxn(txnIds.concurrentTxn, {commit: true, prepare: false}));
+ assertRetryableWriteWorked(txnIds.concurrentWrite);
- // Unpause the failpoint and verify setFCV returns without modifying config.transactions.
+ // Unset the failpoint and wait for the downgrade to finish.
assert.commandWorked(rst.getPrimary().adminCommand(
{configureFailPoint: "pauseBeforeDowngradingSessions", mode: "off"}));
+ awaitDowngradeFCV();
+ checkFCV(adminDB, lastStableFCV);
+
+ // The successful concurrent operations should have entries without state and be retryable.
+ checkConfigTransactionEntry(rst, txnIds.concurrentTxn, {hasEntry: true});
+ assert.commandWorked(retryCommit(txnIds.concurrentTxn));
+ checkConfigTransactionEntry(rst, txnIds.concurrentWrite, {hasEntry: true});
+ assertRetryableWriteCanBeRetried(txnIds.concurrentWrite);
+
+ // Only the retryable write entry should remain.
+ checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
+ checkConfigTransactionEntry(rst, txnIds.commit, {hasEntry: false});
+ checkConfigTransactionEntry(rst, txnIds.commitPrepare, {hasEntry: false});
+ checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
+ checkConfigTransactionEntry(rst, txnIds.abortPrepare, {hasEntry: false});
+
+ // The retryable write can be retried.
+ assertRetryableWriteCanBeRetried(txnIds.write);
+
+ // Neither of the commits can be retried.
+ assert.commandFailedWithCode(retryCommit(txnIds.commit), ErrorCodes.NoSuchTransaction);
+ assert.commandFailedWithCode(retryCommit(txnIds.commitPrepare), ErrorCodes.NoSuchTransaction);
+
+ //
+ // In the last-stable FCV, verify the expected updates are made to config.transactions for
+ // each case and the successful operations are retryable.
+ //
+
+ // Reset each txnId to test upgrade with a clean slate.
+ Object.keys(txnIds).forEach((txnIdKey) => {
+ txnIds[txnIdKey].lsid = {id: UUID()};
+ txnIds[txnIdKey].txnNumber = 0;
+ });
+
+ // Prepare can't be used in FCV 4.0, so only commit, abort, and retryable write should
+ // succeed.
+ assertRetryableWriteWorked(txnIds.write);
+ assert.commandWorked(runTxn(txnIds.commit, {commit: true, prepare: false}));
+ assert.commandFailedWithCode(runTxn(txnIds.commitPrepare, {commit: true, prepare: true}),
+ ErrorCodes.CommandNotSupported);
+ assert.commandWorked(runTxn(txnIds.abort, {commit: false, prepare: false}));
+ assert.commandFailedWithCode(runTxn(txnIds.abortPrepare, {commit: false, prepare: true}),
+ ErrorCodes.CommandNotSupported);
+
+ // Only the retryable write and transaction that committed without prepare should have an
+ // entry. Neither should have state.
+ checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
+ checkConfigTransactionEntry(rst, txnIds.commit, {hasEntry: true});
+ checkConfigTransactionEntry(rst, txnIds.commitPrepare, {hasEntry: false});
+ checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
+ checkConfigTransactionEntry(rst, txnIds.abortPrepare, {hasEntry: false});
+
+ // The retryable write and successful commit can be retried.
+ assertRetryableWriteCanBeRetried(txnIds.write);
+ assert.commandWorked(retryCommit(txnIds.commit));
+
+ if (shouldRestart) {
+ // Restart to verify config.transactions entries for sessions not in-memory at the
+ // beginning of FCV upgrade are updated correctly.
+ jsTestLog("Restarting replica set before upgrading the featureCompatibilityVersion.");
+ for (let i = 0; i < rst.nodes.length; i++) {
+ rst.restart(i);
+ }
+ testDB = rst.getPrimary().getDB(dbName);
+ adminDB = rst.getPrimary().getDB("admin");
+ }
+
+ //
+ // Upgrade to the latest FCV and verify config.transactions was updated as expected for
+ // previously completed operations and operations concurrent with the upgrade.
+ //
+
+ // Run a retryable write on the session that will be used during upgrade so it has a
+ // transaction table entry and will be checked out by the upgrade.
+ assertRetryableWriteWorked(txnIds.upgradingTxn);
+ txnIds.upgradingTxn.txnNumber += 1;
+
+ // Make setFCV pause in the upgrading state after getting the list of sessions to
+ // potentially modify.
+ assert.commandWorked(rst.getPrimary().adminCommand(
+ {configureFailPoint: "pauseBeforeUpgradingSessions", mode: "alwaysOn"}));
+
+ // Upgrade FCV in a parallel shell and wait until it blocks at the failpoint above.
+ const awaitUpgradeFCV = startParallelShell(() => {
+ load("jstests/libs/feature_compatibility_version.js");
+ jsTestLog("Upgrade the featureCompatibilityVersion in a parallel shell.");
+ assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+ }, rst.getPrimary().port);
+ waitForFailpoint("Hit pauseBeforeUpgradingSessions failpoint", 1 /*numTimes*/);
+
+ // Concurrent transactions that use prepare will fail.
+ assert.commandFailedWithCode(runTxn(txnIds.concurrentTxn, {commit: true, prepare: true}),
+ ErrorCodes.CommandNotSupported);
+ txnIds.concurrentTxn.txnNumber += 1;
+
+ // Concurrent transactions that do not use prepare and retryable writes succeed.
+ assert.commandWorked(runTxn(txnIds.concurrentTxn, {commit: true, prepare: false}));
+ assertRetryableWriteWorked(txnIds.concurrentWrite);
+
+ // Start a transaction in the upgrading state and verify that it doesn't get aborted by the
+ // rest of the upgrade. Note that all sessions are killed and their transactions aborted for
+ // writes to the FCV document except when it is set to the fully upgraded state, so this
+ // can't be tested for downgrade.
+ assert.commandWorked(runTxn(txnIds.upgradingTxn, {leaveOpen: true}));
+
+ // Unset the failpoint and wait for the upgrade to finish.
+ assert.commandWorked(rst.getPrimary().adminCommand(
+ {configureFailPoint: "pauseBeforeUpgradingSessions", mode: "off"}));
+
awaitUpgradeFCV();
+ checkFCV(adminDB, latestFCV);
+
+ // The transaction started while upgrading shouldn't have been killed and can be committed.
+ assert.commandWorked(testDB.adminCommand({
+ commitTransaction: 1,
+ lsid: txnIds.upgradingTxn.lsid,
+ txnNumber: NumberLong(txnIds.upgradingTxn.txnNumber),
+ autocommit
+ }));
+
+ // The successful concurrent transaction should have "committed" state and be retryable, and
+ // the concurrent retryable write should not have state and also be retryable.
checkConfigTransactionEntry(
- rst, txnIds.interrupt, {hasEntry: true, expectedState: "committed"});
+ rst, txnIds.concurrentTxn, {hasEntry: true, expectedState: "committed"});
+ assert.commandWorked(retryCommit(txnIds.concurrentTxn));
+ checkConfigTransactionEntry(rst, txnIds.concurrentWrite, {hasEntry: true});
+ assertRetryableWriteCanBeRetried(txnIds.concurrentWrite);
+
+ // There should still only be entries for the committed transaction and retryable write. The
+ // committed transaction should now have a "state" field.
+ checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
+ checkConfigTransactionEntry(rst, txnIds.commit, {hasEntry: true, expectedState: "committed"});
+ checkConfigTransactionEntry(rst, txnIds.commitPrepare, {hasEntry: false});
+ checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
+ checkConfigTransactionEntry(rst, txnIds.abortPrepare, {hasEntry: false});
+
+ // The retryable write and successful commit can be retried.
+ assertRetryableWriteCanBeRetried(txnIds.write);
+ assert.commandWorked(retryCommit(txnIds.commit));
+}
+
+runTest({shouldRestart: false});
+runTest({shouldRestart: true});
+
+//
+// Verify setFCV is interruptible between modifying sessions.
+//
+clearRawMongoProgramOutput();
+checkFCV(adminDB, latestFCV);
+
+// Construct a config.transactions entry that would be modified by downgrade.
+const txnIds = {
+ interrupt: {lsid: {id: UUID()}, txnNumber: 0}
+};
+assert.commandWorked(runTxn(txnIds.interrupt, {commit: true, prepare: true}));
+checkConfigTransactionEntry(rst, txnIds.interrupt, {hasEntry: true, expectedState: "committed"});
+
+// Pause setFCV before it would modify the entry.
+assert.commandWorked(rst.getPrimary().adminCommand(
+ {configureFailPoint: "pauseBeforeDowngradingSessions", mode: "alwaysOn"}));
+
+TestData.setFCVLsid = {
+ id: UUID()
+};
+const awaitUpgradeFCV = startParallelShell(() => {
+ load("jstests/libs/feature_compatibility_version.js");
+ assert.commandFailedWithCode(
+ db.adminCommand({setFeatureCompatibilityVersion: lastStableFCV, lsid: TestData.setFCVLsid}),
+ ErrorCodes.Interrupted);
+}, rst.getPrimary().port);
+waitForFailpoint("Hit pauseBeforeDowngradingSessions failpoint", 1 /*numTimes*/);
+
+// Kill the session running setFCV.
+assert.commandWorked(rst.getPrimary().adminCommand({killSessions: [TestData.setFCVLsid]}));
+
+// Unpause the failpoint and verify setFCV returns without modifying config.transactions.
+assert.commandWorked(rst.getPrimary().adminCommand(
+ {configureFailPoint: "pauseBeforeDowngradingSessions", mode: "off"}));
+
+awaitUpgradeFCV();
+checkConfigTransactionEntry(rst, txnIds.interrupt, {hasEntry: true, expectedState: "committed"});
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/multiVersion/copydb_helper.js b/jstests/multiVersion/copydb_helper.js
index aff7dd83fa4..25c60f2552a 100644
--- a/jstests/multiVersion/copydb_helper.js
+++ b/jstests/multiVersion/copydb_helper.js
@@ -1,53 +1,50 @@
// SERVER-36438 Ensure the 4.2 copyDatabase() shell helper still successfully executes the copyDB
// command on a 4.0 server, now that the copyDB command has been removed as of 4.2.
(function() {
- "use strict";
- const oldVersion = "4.0";
-
- let runTest = function(useAuth) {
- let conn;
- if (useAuth) {
- conn = MongoRunner.runMongod({auth: "", binVersion: oldVersion});
- } else {
- conn = MongoRunner.runMongod({binVersion: oldVersion});
- }
-
- let fromDB = conn.getDB("copydb2-test-a");
- let toDB = conn.getDB("copydb2-test-b");
- let adminDB = conn.getDB("admin");
-
- if (useAuth) {
- adminDB.createUser({user: "root", pwd: "root", roles: ["root"]});
- adminDB.auth("root", "root");
- fromDB.createUser({
- user: "chevy",
- pwd: "chase",
- roles: ["read", {role: "readWrite", db: toDB._name}]
- });
- }
-
- assert.commandWorked(fromDB.foo.insert({a: 1}));
- assert.commandWorked(fromDB.foo.createIndex({a: 1}));
-
- if (useAuth) {
- assert.commandWorked(toDB.getSiblingDB("admin").logout());
- fromDB.auth("chevy", "chase");
- }
-
- assert.eq(1, fromDB.foo.count());
- assert.eq(0, toDB.foo.count());
-
- assert.commandWorked(fromDB.copyDatabase(fromDB._name, toDB._name));
- assert.eq(1, fromDB.foo.count());
- assert.eq(1, toDB.foo.count());
- assert.eq(fromDB.foo.getIndexes().length, toDB.foo.getIndexes().length);
- MongoRunner.stopMongod(conn);
- };
-
- runTest(/*useAuth*/ false);
-
- // Authenticating as multiple users on multiple databases results in an error.
- if (!jsTest.options().auth) {
- runTest(/*useAuth*/ true);
+"use strict";
+const oldVersion = "4.0";
+
+let runTest = function(useAuth) {
+ let conn;
+ if (useAuth) {
+ conn = MongoRunner.runMongod({auth: "", binVersion: oldVersion});
+ } else {
+ conn = MongoRunner.runMongod({binVersion: oldVersion});
}
+
+ let fromDB = conn.getDB("copydb2-test-a");
+ let toDB = conn.getDB("copydb2-test-b");
+ let adminDB = conn.getDB("admin");
+
+ if (useAuth) {
+ adminDB.createUser({user: "root", pwd: "root", roles: ["root"]});
+ adminDB.auth("root", "root");
+ fromDB.createUser(
+ {user: "chevy", pwd: "chase", roles: ["read", {role: "readWrite", db: toDB._name}]});
+ }
+
+ assert.commandWorked(fromDB.foo.insert({a: 1}));
+ assert.commandWorked(fromDB.foo.createIndex({a: 1}));
+
+ if (useAuth) {
+ assert.commandWorked(toDB.getSiblingDB("admin").logout());
+ fromDB.auth("chevy", "chase");
+ }
+
+ assert.eq(1, fromDB.foo.count());
+ assert.eq(0, toDB.foo.count());
+
+ assert.commandWorked(fromDB.copyDatabase(fromDB._name, toDB._name));
+ assert.eq(1, fromDB.foo.count());
+ assert.eq(1, toDB.foo.count());
+ assert.eq(fromDB.foo.getIndexes().length, toDB.foo.getIndexes().length);
+ MongoRunner.stopMongod(conn);
+};
+
+runTest(/*useAuth*/ false);
+
+// Authenticating as multiple users on multiple databases results in an error.
+if (!jsTest.options().auth) {
+ runTest(/*useAuth*/ true);
+}
})();
diff --git a/jstests/multiVersion/downgrade_after_rollback_via_refetch.js b/jstests/multiVersion/downgrade_after_rollback_via_refetch.js
index 175ddcdf2df..83de5b47a0e 100644
--- a/jstests/multiVersion/downgrade_after_rollback_via_refetch.js
+++ b/jstests/multiVersion/downgrade_after_rollback_via_refetch.js
@@ -3,63 +3,63 @@
// and restarts with 4.0 before its next stable checkpoint, then oplog entries after the common
// point are replayed.
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_test.js");
- TestData.rollbackShutdowns = true;
- TestData.allowUncleanShutdowns = true;
- let name = "downgrade_after_rollback_via_refetch";
- let dbName = "test";
- let sourceCollName = "coll";
+TestData.rollbackShutdowns = true;
+TestData.allowUncleanShutdowns = true;
+let name = "downgrade_after_rollback_via_refetch";
+let dbName = "test";
+let sourceCollName = "coll";
- function testDowngrade(enableMajorityReadConcern) {
- jsTest.log("Test downgrade with enableMajorityReadConcern=" + enableMajorityReadConcern);
+function testDowngrade(enableMajorityReadConcern) {
+ jsTest.log("Test downgrade with enableMajorityReadConcern=" + enableMajorityReadConcern);
- // Set up Rollback Test.
- let replTest = new ReplSetTest(
- {name, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "false"}});
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- config.settings = {chainingAllowed: false};
- replTest.initiate(config);
- let rollbackTest = new RollbackTest(name, replTest);
+ // Set up Rollback Test.
+ let replTest = new ReplSetTest(
+ {name, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "false"}});
+ replTest.startSet();
+ let config = replTest.getReplSetConfig();
+ config.members[2].priority = 0;
+ config.settings = {chainingAllowed: false};
+ replTest.initiate(config);
+ let rollbackTest = new RollbackTest(name, replTest);
- // Set the featureCompatibilityVersion to 4.0, so that we can downgrade the rollback node.
- assert.commandWorked(
- rollbackTest.getPrimary().adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+ // Set the featureCompatibilityVersion to 4.0, so that we can downgrade the rollback node.
+ assert.commandWorked(
+ rollbackTest.getPrimary().adminCommand({setFeatureCompatibilityVersion: "4.0"}));
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
+ let rollbackNode = rollbackTest.transitionToRollbackOperations();
- // Turn off stable checkpoints on the rollback node.
- assert.commandWorked(rollbackNode.adminCommand(
- {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
+ // Turn off stable checkpoints on the rollback node.
+ assert.commandWorked(
+ rollbackNode.adminCommand({configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
- // Wait for a rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+ // Wait for a rollback to finish.
+ rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+ rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+ rollbackTest.transitionToSteadyStateOperations();
- // Replicate a new operation to the rollback node. Replication is disabled on the tiebreaker
- // node, so a successful majority write guarantees the write has replicated to the rollback
- // node.
- assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[sourceCollName].insert(
- {_id: 0}, {writeConcern: {w: "majority"}}));
- assert.eq(rollbackNode.getDB(dbName)[sourceCollName].find({_id: 0}).itcount(), 1);
+ // Replicate a new operation to the rollback node. Replication is disabled on the tiebreaker
+ // node, so a successful majority write guarantees the write has replicated to the rollback
+ // node.
+ assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[sourceCollName].insert(
+ {_id: 0}, {writeConcern: {w: "majority"}}));
+ assert.eq(rollbackNode.getDB(dbName)[sourceCollName].find({_id: 0}).itcount(), 1);
- // Kill and restart the rollback node on 4.0.
- rollbackTest.restartNode(
- 0, 9, {binVersion: "4.0", enableMajorityReadConcern: enableMajorityReadConcern});
- replTest.awaitSecondaryNodes();
+ // Kill and restart the rollback node on 4.0.
+ rollbackTest.restartNode(
+ 0, 9, {binVersion: "4.0", enableMajorityReadConcern: enableMajorityReadConcern});
+ replTest.awaitSecondaryNodes();
- // The rollback node should replay the new operation.
- rollbackNode = rollbackTest.getSecondary();
- assert.eq(rollbackNode.getDB(dbName)[sourceCollName].find({_id: 0}).itcount(), 1);
+ // The rollback node should replay the new operation.
+ rollbackNode = rollbackTest.getSecondary();
+ assert.eq(rollbackNode.getDB(dbName)[sourceCollName].find({_id: 0}).itcount(), 1);
- rollbackTest.stop();
- }
+ rollbackTest.stop();
+}
- testDowngrade("true");
- testDowngrade("false");
+testDowngrade("true");
+testDowngrade("false");
})();
diff --git a/jstests/multiVersion/drop_collection_downgrade_path.js b/jstests/multiVersion/drop_collection_downgrade_path.js
index 5cd829973db..62846ed4ffc 100644
--- a/jstests/multiVersion/drop_collection_downgrade_path.js
+++ b/jstests/multiVersion/drop_collection_downgrade_path.js
@@ -5,52 +5,52 @@
* two phase drop.
*/
(function() {
- "use strict";
-
- TestData.skipCheckDBHashes = true; // Skip db hashes when restarting the replset.
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/replsets/libs/two_phase_drops.js"); // For 'TwoPhaseDropCollectionTest'.
-
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
-
- const dbName = "test";
- const collName = "a";
- assert.commandWorked(rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: "4.0"}));
- rst.stopSet(null, true /* forRestart */);
- rst.startSet({restart: true});
-
- // Make sure collection creation is checkpointed.
- assert.commandWorked(rst.getPrimary().getDB(dbName).runCommand(
- {insert: collName, documents: [{x: 0}], writeConcern: {w: 2}}));
- assert.commandWorked(rst.getPrimary().getDB("admin").runCommand({fsync: 1}));
-
- // Stop secondary's oplog application so the dropCollection can never be committed.
- assert.commandWorked(
- rst.getSecondary().adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
- assert.commandWorked(rst.getPrimary().getDB(dbName).runCommand({drop: collName}));
-
- // Wait until the first phase (renaming) is done on the primary.
- assert.soon(function() {
- let res = rst.getPrimary().getDB("local").oplog.rs.find({o: {drop: collName}}).toArray();
- jsTestLog("dropCollection oplog: " + tojson(res));
- return res.length === 1;
- });
-
- // Kill the 4.2 replica set.
- rst.stopSet(9 /* signal */,
- true /* forRestart */,
- {skipValidation: true, allowedExitCode: MongoRunner.EXIT_SIGKILL});
-
- // Restart the replica set with 4.0 binaries.
- rst.startSet({restart: true, binVersion: "4.0"});
-
- assert.soon(function() {
- let res = TwoPhaseDropCollectionTest.listCollections(rst.getPrimary().getDB(dbName));
- jsTestLog("Collections in \'" + dbName + "\': " + tojson(res));
- return res.length === 0;
- });
-
- rst.stopSet();
+"use strict";
+
+TestData.skipCheckDBHashes = true; // Skip db hashes when restarting the replset.
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/replsets/libs/two_phase_drops.js"); // For 'TwoPhaseDropCollectionTest'.
+
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+
+const dbName = "test";
+const collName = "a";
+assert.commandWorked(rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+rst.stopSet(null, true /* forRestart */);
+rst.startSet({restart: true});
+
+// Make sure collection creation is checkpointed.
+assert.commandWorked(rst.getPrimary().getDB(dbName).runCommand(
+ {insert: collName, documents: [{x: 0}], writeConcern: {w: 2}}));
+assert.commandWorked(rst.getPrimary().getDB("admin").runCommand({fsync: 1}));
+
+// Stop secondary's oplog application so the dropCollection can never be committed.
+assert.commandWorked(
+ rst.getSecondary().adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
+assert.commandWorked(rst.getPrimary().getDB(dbName).runCommand({drop: collName}));
+
+// Wait until the first phase (renaming) is done on the primary.
+assert.soon(function() {
+ let res = rst.getPrimary().getDB("local").oplog.rs.find({o: {drop: collName}}).toArray();
+ jsTestLog("dropCollection oplog: " + tojson(res));
+ return res.length === 1;
+});
+
+// Kill the 4.2 replica set.
+rst.stopSet(9 /* signal */,
+ true /* forRestart */,
+ {skipValidation: true, allowedExitCode: MongoRunner.EXIT_SIGKILL});
+
+// Restart the replica set with 4.0 binaries.
+rst.startSet({restart: true, binVersion: "4.0"});
+
+assert.soon(function() {
+ let res = TwoPhaseDropCollectionTest.listCollections(rst.getPrimary().getDB(dbName));
+ jsTestLog("Collections in \'" + dbName + "\': " + tojson(res));
+ return res.length === 0;
+});
+
+rst.stopSet();
})();
diff --git a/jstests/multiVersion/drop_collection_upgrade_path.js b/jstests/multiVersion/drop_collection_upgrade_path.js
index 55d590bc39c..18c7d34a21d 100644
--- a/jstests/multiVersion/drop_collection_upgrade_path.js
+++ b/jstests/multiVersion/drop_collection_upgrade_path.js
@@ -5,63 +5,63 @@
* two phase drop.
*/
(function() {
- "use strict";
+"use strict";
- TestData.skipCheckDBHashes = true; // Skip db hashes when restarting the replset.
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/replsets/libs/two_phase_drops.js"); // For 'TwoPhaseDropCollectionTest'.
+TestData.skipCheckDBHashes = true; // Skip db hashes when restarting the replset.
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/replsets/libs/two_phase_drops.js"); // For 'TwoPhaseDropCollectionTest'.
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet({binVersion: "4.0"});
- rst.initiate();
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet({binVersion: "4.0"});
+rst.initiate();
- const dbName = "test";
- const collName = "a";
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- const testDB = primary.getDB(dbName);
+const dbName = "test";
+const collName = "a";
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+const testDB = primary.getDB(dbName);
- // Make sure collection creation is checkpointed.
- assert.commandWorked(testDB.runCommand({insert: collName, documents: [{x: 0}]}));
- assert.commandWorked(primary.getDB("admin").runCommand({fsync: 1}));
+// Make sure collection creation is checkpointed.
+assert.commandWorked(testDB.runCommand({insert: collName, documents: [{x: 0}]}));
+assert.commandWorked(primary.getDB("admin").runCommand({fsync: 1}));
- // Stop secondary's oplog application so the dropCollection can never be committed.
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
- assert.commandWorked(testDB.runCommand({drop: collName}));
+// Stop secondary's oplog application so the dropCollection can never be committed.
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
+assert.commandWorked(testDB.runCommand({drop: collName}));
- // Wait until the first phase (renaming) is done on the primary.
- assert.soon(function() {
- let res = primary.getDB("local").oplog.rs.find({o: {drop: collName}}).toArray();
- jsTestLog("dropCollection oplog: " + tojson(res));
- return res.length === 1;
- });
- // This will print out 'test.system.drop.xxxxxx.a' collection if there is one.
- assert(TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(testDB, collName));
+// Wait until the first phase (renaming) is done on the primary.
+assert.soon(function() {
+ let res = primary.getDB("local").oplog.rs.find({o: {drop: collName}}).toArray();
+ jsTestLog("dropCollection oplog: " + tojson(res));
+ return res.length === 1;
+});
+// This will print out 'test.system.drop.xxxxxx.a' collection if there is one.
+assert(TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(testDB, collName));
- // Kill the 4.0 replica set.
- rst.stopSet(9 /* signal */,
- true /* forRestart */,
- {skipValidation: true, allowedExitCode: MongoRunner.EXIT_SIGKILL});
+// Kill the 4.0 replica set.
+rst.stopSet(9 /* signal */,
+ true /* forRestart */,
+ {skipValidation: true, allowedExitCode: MongoRunner.EXIT_SIGKILL});
- // Restart the replica set with 4.2 binaries.
- rst.startSet({restart: true, binVersion: "latest"});
+// Restart the replica set with 4.2 binaries.
+rst.startSet({restart: true, binVersion: "latest"});
- assert.soon(function() {
- if (!TestData.hasOwnProperty("enableMajorityReadConcern") ||
- TestData.enableMajorityReadConcern === true) {
- // If enableMajorityReadConcern is true, that means the binary will use the new
- // 4.2-style two phase drop. Then there should never be 'test.system.drop.xxxxx.a' in
- // 'test' database because the first phase of 4.0-style drop (rename) was not
- // checkpointed and that drop is currently being replayed via 4.2-style two phase drop
- // mechanism.
- assert(!TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(
- rst.getPrimary().getDB(dbName), collName));
- }
- let res = TwoPhaseDropCollectionTest.listCollections(rst.getPrimary().getDB(dbName));
- jsTestLog("Collections in \'" + dbName + "\': " + tojson(res));
- return res.length === 0;
- });
+assert.soon(function() {
+ if (!TestData.hasOwnProperty("enableMajorityReadConcern") ||
+ TestData.enableMajorityReadConcern === true) {
+ // If enableMajorityReadConcern is true, that means the binary will use the new
+ // 4.2-style two phase drop. Then there should never be 'test.system.drop.xxxxx.a' in
+ // 'test' database because the first phase of 4.0-style drop (rename) was not
+ // checkpointed and that drop is currently being replayed via 4.2-style two phase drop
+ // mechanism.
+ assert(!TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(
+ rst.getPrimary().getDB(dbName), collName));
+ }
+ let res = TwoPhaseDropCollectionTest.listCollections(rst.getPrimary().getDB(dbName));
+ jsTestLog("Collections in \'" + dbName + "\': " + tojson(res));
+ return res.length === 0;
+});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/multiVersion/failIndexKeyTooLong_FCV40.js b/jstests/multiVersion/failIndexKeyTooLong_FCV40.js
index b3e6481b87e..d324a01bd24 100644
--- a/jstests/multiVersion/failIndexKeyTooLong_FCV40.js
+++ b/jstests/multiVersion/failIndexKeyTooLong_FCV40.js
@@ -2,63 +2,63 @@
// TODO SERVER-36386: Remove this test
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/feature_compatibility_version.js");
- // Start the node with FCV 4.0
- let conn = MongoRunner.runMongod({binVersion: "latest", cleanData: true});
- assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+// Start the node with FCV 4.0
+let conn = MongoRunner.runMongod({binVersion: "latest", cleanData: true});
+assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
- var db = conn.getDB('test');
- var t = db.index_bigkeys_nofail;
- t.drop();
- var res = db.getSiblingDB('admin').runCommand({setParameter: 1, failIndexKeyTooLong: true});
- var was = res.was;
- assert.commandWorked(res);
+var db = conn.getDB('test');
+var t = db.index_bigkeys_nofail;
+t.drop();
+var res = db.getSiblingDB('admin').runCommand({setParameter: 1, failIndexKeyTooLong: true});
+var was = res.was;
+assert.commandWorked(res);
- var x = new Array(1025).join('x');
- assert.commandWorked(t.ensureIndex({name: 1}));
- assert.writeError(t.insert({name: x}));
- assert.commandWorked(t.dropIndex({name: 1}));
- assert.writeOK(t.insert({name: x}));
- assert.commandFailed(t.ensureIndex({name: 1}));
+var x = new Array(1025).join('x');
+assert.commandWorked(t.ensureIndex({name: 1}));
+assert.writeError(t.insert({name: x}));
+assert.commandWorked(t.dropIndex({name: 1}));
+assert.writeOK(t.insert({name: x}));
+assert.commandFailed(t.ensureIndex({name: 1}));
- t.drop();
- db.getSiblingDB('admin').runCommand({setParameter: 1, failIndexKeyTooLong: false});
+t.drop();
+db.getSiblingDB('admin').runCommand({setParameter: 1, failIndexKeyTooLong: false});
- // inserts
- assert.writeOK(t.insert({_id: 1, name: x}));
- assert.commandWorked(t.ensureIndex({name: 1}));
- assert.writeOK(t.insert({_id: 2, name: x}));
- assert.writeOK(t.insert({_id: 3, name: x}));
- assert.eq(t.count(), 3);
+// inserts
+assert.writeOK(t.insert({_id: 1, name: x}));
+assert.commandWorked(t.ensureIndex({name: 1}));
+assert.writeOK(t.insert({_id: 2, name: x}));
+assert.writeOK(t.insert({_id: 3, name: x}));
+assert.eq(t.count(), 3);
- // updates (smaller and larger)
- assert.writeOK(t.update({_id: 1}, {$set: {name: 'short'}}));
- assert.writeOK(t.update({_id: 1}, {$set: {name: x}}));
- assert.writeOK(t.update({_id: 1}, {$set: {name: x + 'even longer'}}));
+// updates (smaller and larger)
+assert.writeOK(t.update({_id: 1}, {$set: {name: 'short'}}));
+assert.writeOK(t.update({_id: 1}, {$set: {name: x}}));
+assert.writeOK(t.update({_id: 1}, {$set: {name: x + 'even longer'}}));
- // remove
- assert.writeOK(t.remove({_id: 1}));
- assert.eq(t.count(), 2);
+// remove
+assert.writeOK(t.remove({_id: 1}));
+assert.eq(t.count(), 2);
- db.getSiblingDB('admin').runCommand({setParameter: 1, failIndexKeyTooLong: true});
+db.getSiblingDB('admin').runCommand({setParameter: 1, failIndexKeyTooLong: true});
- // can still delete even if key is oversized
- assert.writeOK(t.remove({_id: 2}));
- assert.eq(t.count(), 1);
+// can still delete even if key is oversized
+assert.writeOK(t.remove({_id: 2}));
+assert.eq(t.count(), 1);
- // can still update to shorter, but not longer name.
- assert.writeError(t.update({_id: 3}, {$set: {name: x + 'even longer'}}));
- assert.writeOK(t.update({_id: 3}, {$set: {name: 'short'}}));
- assert.writeError(t.update({_id: 3}, {$set: {name: x}}));
+// can still update to shorter, but not longer name.
+assert.writeError(t.update({_id: 3}, {$set: {name: x + 'even longer'}}));
+assert.writeOK(t.update({_id: 3}, {$set: {name: 'short'}}));
+assert.writeError(t.update({_id: 3}, {$set: {name: x}}));
- db.getSiblingDB('admin').runCommand({setParameter: 1, failIndexKeyTooLong: was});
+db.getSiblingDB('admin').runCommand({setParameter: 1, failIndexKeyTooLong: was});
- // Explicitly drop the collection to avoid failures in post-test hooks that run dbHash and
- // validate commands.
- t.drop();
+// Explicitly drop the collection to avoid failures in post-test hooks that run dbHash and
+// validate commands.
+t.drop();
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js b/jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js
index 2fb02f419ce..c49abeafa8f 100644
--- a/jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js
+++ b/jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js
@@ -8,48 +8,47 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- load("jstests/libs/feature_compatibility_version.js");
-
- const lastStable = "last-stable";
-
- let st = new ShardingTest({mongos: 1, shards: 1});
- const ns = "testDB.testColl";
- let mongosAdminDB = st.s.getDB("admin");
-
- // Assert that a mongos using the 'last-stable' binary version will crash when connecting to a
- // cluster running on the 'latest' binary version with the 'latest' FCV.
- let lastStableMongos =
- MongoRunner.runMongos({configdb: st.configRS.getURL(), binVersion: lastStable});
-
- assert(!lastStableMongos);
-
- // Assert that a mongos using the 'last-stable' binary version will successfully connect to a
- // cluster running on the 'latest' binary version with the 'last-stable' FCV.
- assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- lastStableMongos =
- MongoRunner.runMongos({configdb: st.configRS.getURL(), binVersion: lastStable});
- assert.neq(null,
- lastStableMongos,
- "mongos was unable to start up with binary version=" + lastStable +
- " and connect to FCV=" + lastStableFCV + " cluster");
-
- // Ensure that the 'lastStable' binary mongos can perform reads and writes to the shards in the
- // cluster.
- assert.writeOK(lastStableMongos.getDB("test").foo.insert({x: 1}));
- let foundDoc = lastStableMongos.getDB("test").foo.findOne({x: 1});
- assert.neq(null, foundDoc);
- assert.eq(1, foundDoc.x, tojson(foundDoc));
-
- // Assert that the 'lastStable' binary mongos will crash after the cluster is upgraded to
- // 'latestFCV'.
- assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
- let error = assert.throws(function() {
- lastStableMongos.getDB("test").foo.insert({x: 1});
- });
- assert(isNetworkError(error));
- assert(!lastStableMongos.conn);
-
- st.stop();
+"use strict";
+
+load("jstests/libs/feature_compatibility_version.js");
+
+const lastStable = "last-stable";
+
+let st = new ShardingTest({mongos: 1, shards: 1});
+const ns = "testDB.testColl";
+let mongosAdminDB = st.s.getDB("admin");
+
+// Assert that a mongos using the 'last-stable' binary version will crash when connecting to a
+// cluster running on the 'latest' binary version with the 'latest' FCV.
+let lastStableMongos =
+ MongoRunner.runMongos({configdb: st.configRS.getURL(), binVersion: lastStable});
+
+assert(!lastStableMongos);
+
+// Assert that a mongos using the 'last-stable' binary version will successfully connect to a
+// cluster running on the 'latest' binary version with the 'last-stable' FCV.
+assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+lastStableMongos = MongoRunner.runMongos({configdb: st.configRS.getURL(), binVersion: lastStable});
+assert.neq(null,
+ lastStableMongos,
+ "mongos was unable to start up with binary version=" + lastStable +
+ " and connect to FCV=" + lastStableFCV + " cluster");
+
+// Ensure that the 'lastStable' binary mongos can perform reads and writes to the shards in the
+// cluster.
+assert.writeOK(lastStableMongos.getDB("test").foo.insert({x: 1}));
+let foundDoc = lastStableMongos.getDB("test").foo.findOne({x: 1});
+assert.neq(null, foundDoc);
+assert.eq(1, foundDoc.x, tojson(foundDoc));
+
+// Assert that the 'lastStable' binary mongos will crash after the cluster is upgraded to
+// 'latestFCV'.
+assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
+let error = assert.throws(function() {
+ lastStableMongos.getDB("test").foo.insert({x: 1});
+});
+assert(isNetworkError(error));
+assert(!lastStableMongos.conn);
+
+st.stop();
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js b/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js
index 6a858400ae4..e280a82451e 100644
--- a/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js
+++ b/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js
@@ -1,304 +1,224 @@
// Perform the upgrade/downgrade procedure by first setting the featureCompatibilityVersion and
// then switching the binary.
(function() {
- "use strict";
-
- load("jstests/replsets/rslib.js");
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/libs/get_index_helpers.js");
- load("jstests/libs/check_uuids.js");
- load("jstests/libs/check_unique_indexes.js");
-
- const latestBinary = "latest";
- const lastStableBinary = "last-stable";
-
- let setFCV = function(adminDB, version) {
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: version}));
- checkFCV(adminDB, version);
- };
-
- let insertDataForConn = function(conn, dbs, nodeOptions) {
- for (let i = 0; i < 20; i++) {
- let doc = {id: i, sno: i, a: "foo", conn: conn.name};
- for (let j in dbs) {
- if (nodeOptions.hasOwnProperty("configsvr")) {
- if (j !== "admin" && j !== "local") {
- // We can't create user databases on a --configsvr instance.
- continue;
- }
- // Config servers have a majority write concern.
- assert.writeOK(
- conn.getDB(dbs[j]).foo.insert(doc, {writeConcern: {w: "majority"}}));
- } else {
- assert.writeOK(conn.getDB(dbs[j]).foo.insert(doc));
- }
- }
- }
+"use strict";
+
+load("jstests/replsets/rslib.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/check_uuids.js");
+load("jstests/libs/check_unique_indexes.js");
- // Create unique indexes on collection "foo" with two index formatVersions.
- // Providing index version explicitly allows index creation with corresponding
- // formatVersion.
+const latestBinary = "latest";
+const lastStableBinary = "last-stable";
+
+let setFCV = function(adminDB, version) {
+ assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: version}));
+ checkFCV(adminDB, version);
+};
+
+let insertDataForConn = function(conn, dbs, nodeOptions) {
+ for (let i = 0; i < 20; i++) {
+ let doc = {id: i, sno: i, a: "foo", conn: conn.name};
for (let j in dbs) {
- let testDB = conn.getDB(dbs[j]);
- testDB.getCollectionInfos().forEach(function(c) {
- if (c.name === "foo") {
- let foo = testDB.getCollection(c.name);
- assert.commandWorked(foo.createIndex({id: 1}, {unique: true}));
- assert.commandWorked(foo.createIndex({sno: 1}, {unique: true, v: 1}));
+ if (nodeOptions.hasOwnProperty("configsvr")) {
+ if (j !== "admin" && j !== "local") {
+ // We can't create user databases on a --configsvr instance.
+ continue;
}
- });
+ // Config servers have a majority write concern.
+ assert.writeOK(conn.getDB(dbs[j]).foo.insert(doc, {writeConcern: {w: "majority"}}));
+ } else {
+ assert.writeOK(conn.getDB(dbs[j]).foo.insert(doc));
+ }
}
- };
-
- let recreateUniqueIndexes = function(db, secondary) {
- // Obtain list of all v1 and v2 unique indexes
- var unique_idx = [];
- var unique_idx_v1 = [];
- db.adminCommand("listDatabases").databases.forEach(function(d) {
- if (secondary && !(d.name === "local")) {
- // All replicated indexes will be dropped on the primary, and have that
- // drop propogated. Secondary nodes need to recreate unique indexes
- // associated with local collections.
- return;
+ }
+
+ // Create unique indexes on collection "foo" with two index formatVersions.
+ // Providing index version explicitly allows index creation with corresponding
+ // formatVersion.
+ for (let j in dbs) {
+ let testDB = conn.getDB(dbs[j]);
+ testDB.getCollectionInfos().forEach(function(c) {
+ if (c.name === "foo") {
+ let foo = testDB.getCollection(c.name);
+ assert.commandWorked(foo.createIndex({id: 1}, {unique: true}));
+ assert.commandWorked(foo.createIndex({sno: 1}, {unique: true, v: 1}));
}
- let mdb = db.getSiblingDB(d.name);
- mdb.getCollectionInfos().forEach(function(c) {
- let currentCollection = mdb.getCollection(c.name);
- currentCollection.getIndexes().forEach(function(i) {
- if (i.unique) {
- if (i.v === 1) {
- unique_idx_v1.push(i);
- return;
- }
- unique_idx.push(i);
- }
- });
- });
});
-
- // Drop and create all v:2 indexes
- for (let idx of unique_idx) {
- let [dbName, collName] = idx.ns.split(".");
- let res = db.getSiblingDB(dbName).runCommand({dropIndexes: collName, index: idx.name});
- assert.commandWorked(res);
- res = db.getSiblingDB(dbName).runCommand({
- createIndexes: collName,
- indexes: [{"key": idx.key, "name": idx.name, "unique": true}]
- });
- assert.commandWorked(res);
+ }
+};
+
+let recreateUniqueIndexes = function(db, secondary) {
+ // Obtain list of all v1 and v2 unique indexes
+ var unique_idx = [];
+ var unique_idx_v1 = [];
+ db.adminCommand("listDatabases").databases.forEach(function(d) {
+ if (secondary && !(d.name === "local")) {
+ // All replicated indexes will be dropped on the primary, and have that
+ // drop propogated. Secondary nodes need to recreate unique indexes
+ // associated with local collections.
+ return;
}
-
- // Drop and create all v:1 indexes
- for (let idx of unique_idx_v1) {
- let [dbName, collName] = idx.ns.split(".");
- let res = db.getSiblingDB(dbName).runCommand({dropIndexes: collName, index: idx.name});
- assert.commandWorked(res);
- res = db.getSiblingDB(dbName).runCommand({
- createIndexes: collName,
- indexes: [{"key": idx.key, "name": idx.name, "unique": true, "v": 1}]
+ let mdb = db.getSiblingDB(d.name);
+ mdb.getCollectionInfos().forEach(function(c) {
+ let currentCollection = mdb.getCollection(c.name);
+ currentCollection.getIndexes().forEach(function(i) {
+ if (i.unique) {
+ if (i.v === 1) {
+ unique_idx_v1.push(i);
+ return;
+ }
+ unique_idx.push(i);
+ }
});
- assert.commandWorked(res);
- }
- };
-
- // Create and clear dbpath
- let sharedDbPath = MongoRunner.dataPath + "do_upgrade_downgrade";
- resetDbpath(sharedDbPath);
-
- // Return a mongodb connection with startup options, version and dbpath options
- let startMongodWithVersion = function(nodeOptions, ver, path) {
- let version = ver || latestBinary;
- let dbpath = path || sharedDbPath;
- let conn = MongoRunner.runMongod(
- Object.assign({}, nodeOptions, {dbpath: dbpath, binVersion: version}));
- assert.neq(null,
- conn,
- "mongod was unable to start up with version=" + version + " and path=" + dbpath);
- return conn;
- };
-
- //
- // Standalone tests.
- //
- let standaloneTest = function(nodeOptions) {
- let noCleanDataOptions = Object.assign({noCleanData: true}, nodeOptions);
-
- // New latest binary version standalone.
- jsTest.log("Starting a latest binVersion standalone");
- let conn = startMongodWithVersion(nodeOptions, latestBinary);
- let adminDB = conn.getDB("admin");
-
- // Insert some data.
- insertDataForConn(conn, ["admin", "local", "test"], nodeOptions);
-
- if (!nodeOptions.hasOwnProperty("shardsvr")) {
- // Initially featureCompatibilityVersion is latest except for when we run with shardsvr.
- // We expect featureCompatibilityVersion to be last-stable for shardsvr.
- checkFCV(adminDB, latestFCV);
-
- // Ensure all collections have UUIDs and all unique indexes have new version in latest
- // featureCompatibilityVersion mode.
- checkCollectionUUIDs(adminDB);
- checkUniqueIndexFormatVersion(adminDB);
-
- // Set featureCompatibilityVersion to last-stable.
- setFCV(adminDB, lastStableFCV);
- }
-
- // Ensure featureCompatibilityVersion is last-stable and all collections still have UUIDs.
- checkFCV(adminDB, lastStableFCV);
- checkCollectionUUIDs(adminDB);
-
- // Drop and recreate unique indexes with the older FCV
- recreateUniqueIndexes(adminDB, false);
-
- // Stop latest binary version mongod.
- MongoRunner.stopMongod(conn);
-
- // Start last-stable binary version mongod with same dbpath
- jsTest.log("Starting a last-stable binVersion standalone to test downgrade");
- let lastStableConn = startMongodWithVersion(noCleanDataOptions, lastStableBinary);
- let lastStableAdminDB = lastStableConn.getDB("admin");
-
- // Check FCV document.
- checkFCV(lastStableAdminDB, lastStableFCV);
-
- // Ensure all collections still have UUIDs on a last-stable mongod.
- checkCollectionUUIDs(lastStableAdminDB);
-
- // Stop last-stable binary version mongod.
- MongoRunner.stopMongod(lastStableConn);
-
- // Start latest binary version mongod again.
- jsTest.log("Starting a latest binVersion standalone to test upgrade");
- conn = startMongodWithVersion(noCleanDataOptions, latestBinary);
- adminDB = conn.getDB("admin");
-
- // Ensure setFeatureCompatibilityVersion to latest succeeds, all collections have UUIDs
- // and all unique indexes are in new version.
- setFCV(adminDB, latestFCV);
+ });
+ });
+
+ // Drop and create all v:2 indexes
+ for (let idx of unique_idx) {
+ let [dbName, collName] = idx.ns.split(".");
+ let res = db.getSiblingDB(dbName).runCommand({dropIndexes: collName, index: idx.name});
+ assert.commandWorked(res);
+ res = db.getSiblingDB(dbName).runCommand({
+ createIndexes: collName,
+ indexes: [{"key": idx.key, "name": idx.name, "unique": true}]
+ });
+ assert.commandWorked(res);
+ }
+
+ // Drop and create all v:1 indexes
+ for (let idx of unique_idx_v1) {
+ let [dbName, collName] = idx.ns.split(".");
+ let res = db.getSiblingDB(dbName).runCommand({dropIndexes: collName, index: idx.name});
+ assert.commandWorked(res);
+ res = db.getSiblingDB(dbName).runCommand({
+ createIndexes: collName,
+ indexes: [{"key": idx.key, "name": idx.name, "unique": true, "v": 1}]
+ });
+ assert.commandWorked(res);
+ }
+};
+
+// Create and clear dbpath
+let sharedDbPath = MongoRunner.dataPath + "do_upgrade_downgrade";
+resetDbpath(sharedDbPath);
+
+// Return a mongodb connection with startup options, version and dbpath options
+let startMongodWithVersion = function(nodeOptions, ver, path) {
+ let version = ver || latestBinary;
+ let dbpath = path || sharedDbPath;
+ let conn = MongoRunner.runMongod(
+ Object.assign({}, nodeOptions, {dbpath: dbpath, binVersion: version}));
+ assert.neq(null,
+ conn,
+ "mongod was unable to start up with version=" + version + " and path=" + dbpath);
+ return conn;
+};
+
+//
+// Standalone tests.
+//
+let standaloneTest = function(nodeOptions) {
+ let noCleanDataOptions = Object.assign({noCleanData: true}, nodeOptions);
+
+ // New latest binary version standalone.
+ jsTest.log("Starting a latest binVersion standalone");
+ let conn = startMongodWithVersion(nodeOptions, latestBinary);
+ let adminDB = conn.getDB("admin");
+
+ // Insert some data.
+ insertDataForConn(conn, ["admin", "local", "test"], nodeOptions);
+
+ if (!nodeOptions.hasOwnProperty("shardsvr")) {
+ // Initially featureCompatibilityVersion is latest except for when we run with shardsvr.
+ // We expect featureCompatibilityVersion to be last-stable for shardsvr.
checkFCV(adminDB, latestFCV);
+
+ // Ensure all collections have UUIDs and all unique indexes have new version in latest
+ // featureCompatibilityVersion mode.
checkCollectionUUIDs(adminDB);
checkUniqueIndexFormatVersion(adminDB);
- // Stop latest binary version mongod for the last time
- MongoRunner.stopMongod(conn);
- };
-
- //
- // Replica set tests.
- //
- let replicaSetTest = function(nodeOptions) {
-
- // New latest binary version replica set.
- jsTest.log("Starting a latest binVersion ReplSetTest");
- let rst = new ReplSetTest({nodes: 3, nodeOptions: nodeOptions});
- rst.startSet();
- rst.initiate();
- let primaryAdminDB = rst.getPrimary().getDB("admin");
- let secondaries = rst.getSecondaries();
-
- // Insert some data.
- insertDataForConn(rst.getPrimary(), ["admin", "local", "test"], nodeOptions);
- rst.awaitReplication();
-
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryAdminDB = secondaries[j].getDB("admin");
- // Insert some data into the local DB.
- insertDataForConn(secondaries[j], ["local"], nodeOptions);
- }
-
- if (!nodeOptions.hasOwnProperty("shardsvr")) {
- // Initially featureCompatibilityVersion is latest on primary and secondaries except for
- // when we run with shardsvr. We expect featureCompatibilityVersion to be last-stable
- // for shardsvr.
- checkFCV(primaryAdminDB, latestFCV);
-
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryAdminDB = secondaries[j].getDB("admin");
- checkFCV(secondaryAdminDB, latestFCV);
- }
-
- // Ensure all collections have UUIDs and unique indexes are in new version in latest
- // featureCompatibilityVersion mode on both primary and secondaries.
- checkCollectionUUIDs(primaryAdminDB);
- checkUniqueIndexFormatVersion(primaryAdminDB);
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryAdminDB = secondaries[j].getDB("admin");
- checkCollectionUUIDs(secondaryAdminDB);
- checkUniqueIndexFormatVersion(secondaryAdminDB);
- }
-
- // Change featureCompatibilityVersion to last-stable.
- setFCV(primaryAdminDB, lastStableFCV);
- rst.awaitReplication();
- }
-
- // Ensure featureCompatibilityVersion is last-stable and all collections still have UUIDs.
- checkFCV(primaryAdminDB, lastStableFCV);
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryAdminDB = secondaries[j].getDB("admin");
- checkFCV(secondaryAdminDB, lastStableFCV);
- }
-
- checkCollectionUUIDs(primaryAdminDB);
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryAdminDB = secondaries[j].getDB("admin");
- checkCollectionUUIDs(secondaryAdminDB);
- }
-
- // Drop and recreate unique indexes with the older FCV
- recreateUniqueIndexes(primaryAdminDB, false);
-
- // Now drop and recreate unique indexes on secondaries' "local" database
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryAdminDB = secondaries[j].getDB("admin");
- recreateUniqueIndexes(secondaryAdminDB, true);
- }
-
- // Stop latest binary version replica set.
- rst.stopSet(null /* signal */, true /* forRestart */);
-
- // Downgrade the ReplSetTest binaries and make sure everything is okay.
- jsTest.log("Starting a last-stable binVersion ReplSetTest to test downgrade");
- rst.startSet({restart: true, binVersion: lastStableBinary});
-
- // Check that the featureCompatiblityVersion is set to last-stable and all
- // collections still have UUIDs.
- let lastStablePrimaryAdminDB = rst.getPrimary().getDB("admin");
- let lastStableSecondaries = rst.getSecondaries();
-
- checkFCV(lastStablePrimaryAdminDB, lastStableFCV);
- for (let j = 0; j < lastStableSecondaries.length; j++) {
- let secondaryAdminDB = lastStableSecondaries[j].getDB("admin");
- checkFCV(secondaryAdminDB, lastStableFCV);
- }
-
- checkCollectionUUIDs(lastStablePrimaryAdminDB);
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryAdminDB = lastStableSecondaries[j].getDB("admin");
- checkCollectionUUIDs(secondaryAdminDB);
- }
-
- rst.stopSet(null /* signal */, true /* forRestart */);
-
- // Start latest binary version replica set again.
- jsTest.log("Starting a latest binVersion ReplSetTest to test upgrade");
- rst.startSet({restart: true, binVersion: latestBinary});
- primaryAdminDB = rst.getPrimary().getDB("admin");
- secondaries = rst.getSecondaries();
-
- // Ensure all collections have UUIDs and unique indexes are in new version after switching
- // back to latest featureCompatibilityVersion on both primary and secondaries.
- setFCV(primaryAdminDB, latestFCV);
- rst.awaitReplication();
-
+ // Set featureCompatibilityVersion to last-stable.
+ setFCV(adminDB, lastStableFCV);
+ }
+
+ // Ensure featureCompatibilityVersion is last-stable and all collections still have UUIDs.
+ checkFCV(adminDB, lastStableFCV);
+ checkCollectionUUIDs(adminDB);
+
+ // Drop and recreate unique indexes with the older FCV
+ recreateUniqueIndexes(adminDB, false);
+
+ // Stop latest binary version mongod.
+ MongoRunner.stopMongod(conn);
+
+ // Start last-stable binary version mongod with same dbpath
+ jsTest.log("Starting a last-stable binVersion standalone to test downgrade");
+ let lastStableConn = startMongodWithVersion(noCleanDataOptions, lastStableBinary);
+ let lastStableAdminDB = lastStableConn.getDB("admin");
+
+ // Check FCV document.
+ checkFCV(lastStableAdminDB, lastStableFCV);
+
+ // Ensure all collections still have UUIDs on a last-stable mongod.
+ checkCollectionUUIDs(lastStableAdminDB);
+
+ // Stop last-stable binary version mongod.
+ MongoRunner.stopMongod(lastStableConn);
+
+ // Start latest binary version mongod again.
+ jsTest.log("Starting a latest binVersion standalone to test upgrade");
+ conn = startMongodWithVersion(noCleanDataOptions, latestBinary);
+ adminDB = conn.getDB("admin");
+
+ // Ensure setFeatureCompatibilityVersion to latest succeeds, all collections have UUIDs
+ // and all unique indexes are in new version.
+ setFCV(adminDB, latestFCV);
+ checkFCV(adminDB, latestFCV);
+ checkCollectionUUIDs(adminDB);
+ checkUniqueIndexFormatVersion(adminDB);
+
+ // Stop latest binary version mongod for the last time
+ MongoRunner.stopMongod(conn);
+};
+
+//
+// Replica set tests.
+//
+let replicaSetTest = function(nodeOptions) {
+ // New latest binary version replica set.
+ jsTest.log("Starting a latest binVersion ReplSetTest");
+ let rst = new ReplSetTest({nodes: 3, nodeOptions: nodeOptions});
+ rst.startSet();
+ rst.initiate();
+ let primaryAdminDB = rst.getPrimary().getDB("admin");
+ let secondaries = rst.getSecondaries();
+
+ // Insert some data.
+ insertDataForConn(rst.getPrimary(), ["admin", "local", "test"], nodeOptions);
+ rst.awaitReplication();
+
+ for (let j = 0; j < secondaries.length; j++) {
+ let secondaryAdminDB = secondaries[j].getDB("admin");
+ // Insert some data into the local DB.
+ insertDataForConn(secondaries[j], ["local"], nodeOptions);
+ }
+
+ if (!nodeOptions.hasOwnProperty("shardsvr")) {
+ // Initially featureCompatibilityVersion is latest on primary and secondaries except for
+ // when we run with shardsvr. We expect featureCompatibilityVersion to be last-stable
+ // for shardsvr.
checkFCV(primaryAdminDB, latestFCV);
+
for (let j = 0; j < secondaries.length; j++) {
let secondaryAdminDB = secondaries[j].getDB("admin");
checkFCV(secondaryAdminDB, latestFCV);
}
+ // Ensure all collections have UUIDs and unique indexes are in new version in latest
+ // featureCompatibilityVersion mode on both primary and secondaries.
checkCollectionUUIDs(primaryAdminDB);
checkUniqueIndexFormatVersion(primaryAdminDB);
for (let j = 0; j < secondaries.length; j++) {
@@ -307,18 +227,96 @@
checkUniqueIndexFormatVersion(secondaryAdminDB);
}
- rst.stopSet();
- };
-
- // Do tests for regular standalones and replica sets.
- standaloneTest({});
- replicaSetTest({});
-
- // Do tests for standalones and replica sets started with --shardsvr.
- standaloneTest({shardsvr: ""});
- replicaSetTest({shardsvr: ""});
-
- // Do tests for standalones and replica sets started with --configsvr.
- standaloneTest({configsvr: ""});
- replicaSetTest({configsvr: ""});
+ // Change featureCompatibilityVersion to last-stable.
+ setFCV(primaryAdminDB, lastStableFCV);
+ rst.awaitReplication();
+ }
+
+ // Ensure featureCompatibilityVersion is last-stable and all collections still have UUIDs.
+ checkFCV(primaryAdminDB, lastStableFCV);
+ for (let j = 0; j < secondaries.length; j++) {
+ let secondaryAdminDB = secondaries[j].getDB("admin");
+ checkFCV(secondaryAdminDB, lastStableFCV);
+ }
+
+ checkCollectionUUIDs(primaryAdminDB);
+ for (let j = 0; j < secondaries.length; j++) {
+ let secondaryAdminDB = secondaries[j].getDB("admin");
+ checkCollectionUUIDs(secondaryAdminDB);
+ }
+
+ // Drop and recreate unique indexes with the older FCV
+ recreateUniqueIndexes(primaryAdminDB, false);
+
+ // Now drop and recreate unique indexes on secondaries' "local" database
+ for (let j = 0; j < secondaries.length; j++) {
+ let secondaryAdminDB = secondaries[j].getDB("admin");
+ recreateUniqueIndexes(secondaryAdminDB, true);
+ }
+
+ // Stop latest binary version replica set.
+ rst.stopSet(null /* signal */, true /* forRestart */);
+
+ // Downgrade the ReplSetTest binaries and make sure everything is okay.
+ jsTest.log("Starting a last-stable binVersion ReplSetTest to test downgrade");
+ rst.startSet({restart: true, binVersion: lastStableBinary});
+
+ // Check that the featureCompatiblityVersion is set to last-stable and all
+ // collections still have UUIDs.
+ let lastStablePrimaryAdminDB = rst.getPrimary().getDB("admin");
+ let lastStableSecondaries = rst.getSecondaries();
+
+ checkFCV(lastStablePrimaryAdminDB, lastStableFCV);
+ for (let j = 0; j < lastStableSecondaries.length; j++) {
+ let secondaryAdminDB = lastStableSecondaries[j].getDB("admin");
+ checkFCV(secondaryAdminDB, lastStableFCV);
+ }
+
+ checkCollectionUUIDs(lastStablePrimaryAdminDB);
+ for (let j = 0; j < secondaries.length; j++) {
+ let secondaryAdminDB = lastStableSecondaries[j].getDB("admin");
+ checkCollectionUUIDs(secondaryAdminDB);
+ }
+
+ rst.stopSet(null /* signal */, true /* forRestart */);
+
+ // Start latest binary version replica set again.
+ jsTest.log("Starting a latest binVersion ReplSetTest to test upgrade");
+ rst.startSet({restart: true, binVersion: latestBinary});
+ primaryAdminDB = rst.getPrimary().getDB("admin");
+ secondaries = rst.getSecondaries();
+
+ // Ensure all collections have UUIDs and unique indexes are in new version after switching
+ // back to latest featureCompatibilityVersion on both primary and secondaries.
+ setFCV(primaryAdminDB, latestFCV);
+ rst.awaitReplication();
+
+ checkFCV(primaryAdminDB, latestFCV);
+ for (let j = 0; j < secondaries.length; j++) {
+ let secondaryAdminDB = secondaries[j].getDB("admin");
+ checkFCV(secondaryAdminDB, latestFCV);
+ }
+
+ checkCollectionUUIDs(primaryAdminDB);
+ checkUniqueIndexFormatVersion(primaryAdminDB);
+ for (let j = 0; j < secondaries.length; j++) {
+ let secondaryAdminDB = secondaries[j].getDB("admin");
+ checkCollectionUUIDs(secondaryAdminDB);
+ checkUniqueIndexFormatVersion(secondaryAdminDB);
+ }
+
+ rst.stopSet();
+};
+
+// Do tests for regular standalones and replica sets.
+standaloneTest({});
+replicaSetTest({});
+
+// Do tests for standalones and replica sets started with --shardsvr.
+standaloneTest({shardsvr: ""});
+replicaSetTest({shardsvr: ""});
+
+// Do tests for standalones and replica sets started with --configsvr.
+standaloneTest({configsvr: ""});
+replicaSetTest({configsvr: ""});
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/feature_compatibility_version_lagging_secondary.js b/jstests/multiVersion/genericSetFCVUsage/feature_compatibility_version_lagging_secondary.js
index 008164a5a80..2148a26111e 100644
--- a/jstests/multiVersion/genericSetFCVUsage/feature_compatibility_version_lagging_secondary.js
+++ b/jstests/multiVersion/genericSetFCVUsage/feature_compatibility_version_lagging_secondary.js
@@ -1,53 +1,52 @@
// Tests that a primary with upgrade featureCompatibilityVersion cannot connect with a secondary
// with a lower binary version.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/libs/write_concern_util.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/write_concern_util.js");
- const latest = "latest";
- const downgrade = "last-stable";
+const latest = "latest";
+const downgrade = "last-stable";
- // Start a new replica set with two latest version nodes.
- let rst = new ReplSetTest({
- nodes: [{binVersion: latest}, {binVersion: latest, rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- });
- rst.startSet();
- rst.initiate();
+// Start a new replica set with two latest version nodes.
+let rst = new ReplSetTest({
+ nodes: [{binVersion: latest}, {binVersion: latest, rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
+});
+rst.startSet();
+rst.initiate();
- let primary = rst.getPrimary();
- let latestSecondary = rst.getSecondary();
+let primary = rst.getPrimary();
+let latestSecondary = rst.getSecondary();
- // Set the featureCompatibilityVersion to the downgrade version so that a downgrade node can
- // join the set.
- assert.commandWorked(
- primary.getDB("admin").runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+// Set the featureCompatibilityVersion to the downgrade version so that a downgrade node can
+// join the set.
+assert.commandWorked(
+ primary.getDB("admin").runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- // Add a downgrade node to the set.
- let downgradeSecondary = rst.add({binVersion: downgrade, rsConfig: {priority: 0}});
- rst.reInitiate();
+// Add a downgrade node to the set.
+let downgradeSecondary = rst.add({binVersion: downgrade, rsConfig: {priority: 0}});
+rst.reInitiate();
- // Wait for the downgrade secondary to finish initial sync.
- rst.awaitSecondaryNodes();
- rst.awaitReplication();
+// Wait for the downgrade secondary to finish initial sync.
+rst.awaitSecondaryNodes();
+rst.awaitReplication();
- // Stop replication on the downgrade secondary.
- stopServerReplication(downgradeSecondary);
+// Stop replication on the downgrade secondary.
+stopServerReplication(downgradeSecondary);
- // Set the featureCompatibilityVersion to the upgrade version. This will not replicate to
- // the downgrade secondary, but the downgrade secondary will no longer be able to
- // communicate with the rest of the set.
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+// Set the featureCompatibilityVersion to the upgrade version. This will not replicate to
+// the downgrade secondary, but the downgrade secondary will no longer be able to
+// communicate with the rest of the set.
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- // Shut down the latest version secondary.
- rst.stop(latestSecondary);
+// Shut down the latest version secondary.
+rst.stop(latestSecondary);
- // The primary should step down, since it can no longer see a majority of the replica set.
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- restartServerReplication(downgradeSecondary);
- rst.stopSet();
+// The primary should step down, since it can no longer see a majority of the replica set.
+rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+restartServerReplication(downgradeSecondary);
+rst.stopSet();
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js b/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js
index 6e54a1f0642..273695dbc05 100644
--- a/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js
+++ b/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js
@@ -10,299 +10,291 @@
*/
(function() {
- 'use strict';
-
- load('jstests/libs/get_index_helpers.js');
- load('jstests/multiVersion/libs/multi_rs.js');
- load('jstests/multiVersion/libs/verify_versions.js');
-
- // Setup the dbpath for this test.
- const dbpath = MongoRunner.dataPath + 'major_version_upgrade';
- resetDbpath(dbpath);
-
- // We set noCleanData to true in order to preserve the data files between iterations.
- const defaultOptions = {
- dbpath: dbpath,
- noCleanData: true,
- };
-
- // This lists all supported releases and needs to be kept up to date as versions are added and
- // dropped.
- // TODO SERVER-26792: In the future, we should have a common place from which both the
- // multiversion setup procedure and this test get information about supported major releases.
- const versions = [
- {binVersion: '3.2', testCollection: 'three_two'},
- {binVersion: '3.4', featureCompatibilityVersion: '3.4', testCollection: 'three_four'},
- {binVersion: '3.6', featureCompatibilityVersion: '3.6', testCollection: 'three_six'},
- {binVersion: '4.0', featureCompatibilityVersion: '4.0', testCollection: 'four_zero'},
- {binVersion: 'last-stable', testCollection: 'last_stable'},
- {binVersion: 'latest', featureCompatibilityVersion: '4.2', testCollection: 'latest'},
- ];
-
- // These key patterns are considered valid for existing v:0 and v:1 indexes, but are considered
- // invalid for v:2 indexes or new index builds.
- var invalidIndexSpecs = [
- {a: 0},
- {a: NaN},
- {a: true},
- ];
-
- // When running the oldest supported version, insert indexes with bad key patterns.
- function insertBadIndexes(testDB) {
- invalidIndexSpecs.forEach((spec) => {
- // Generate a unique and identifiable collection name.
- let collName = 'bad_index_' + tojson(spec.a);
- assert.commandWorked(testDB[collName].createIndex(spec, {name: 'badkp'}),
- 'failed to create index with key pattern' + tojson(spec));
-
- });
- }
+'use strict';
+
+load('jstests/libs/get_index_helpers.js');
+load('jstests/multiVersion/libs/multi_rs.js');
+load('jstests/multiVersion/libs/verify_versions.js');
+
+// Setup the dbpath for this test.
+const dbpath = MongoRunner.dataPath + 'major_version_upgrade';
+resetDbpath(dbpath);
+
+// We set noCleanData to true in order to preserve the data files between iterations.
+const defaultOptions = {
+ dbpath: dbpath,
+ noCleanData: true,
+};
+
+// This lists all supported releases and needs to be kept up to date as versions are added and
+// dropped.
+// TODO SERVER-26792: In the future, we should have a common place from which both the
+// multiversion setup procedure and this test get information about supported major releases.
+const versions = [
+ {binVersion: '3.2', testCollection: 'three_two'},
+ {binVersion: '3.4', featureCompatibilityVersion: '3.4', testCollection: 'three_four'},
+ {binVersion: '3.6', featureCompatibilityVersion: '3.6', testCollection: 'three_six'},
+ {binVersion: '4.0', featureCompatibilityVersion: '4.0', testCollection: 'four_zero'},
+ {binVersion: 'last-stable', testCollection: 'last_stable'},
+ {binVersion: 'latest', featureCompatibilityVersion: '4.2', testCollection: 'latest'},
+];
+
+// These key patterns are considered valid for existing v:0 and v:1 indexes, but are considered
+// invalid for v:2 indexes or new index builds.
+var invalidIndexSpecs = [
+ {a: 0},
+ {a: NaN},
+ {a: true},
+];
+
+// When running the oldest supported version, insert indexes with bad key patterns.
+function insertBadIndexes(testDB) {
+ invalidIndexSpecs.forEach((spec) => {
+ // Generate a unique and identifiable collection name.
+ let collName = 'bad_index_' + tojson(spec.a);
+ assert.commandWorked(testDB[collName].createIndex(spec, {name: 'badkp'}),
+ 'failed to create index with key pattern' + tojson(spec));
+ });
+}
+
+// When running the newest version, check that the indexes with bad key patterns are readable.
+function validateBadIndexesStandalone(testDB) {
+ invalidIndexSpecs.forEach((spec) => {
+ // Generate a unique and identifiable collection name.
+ let collName = 'bad_index_' + tojson(spec.a);
+ let indexSpec = GetIndexHelpers.findByName(testDB[collName].getIndexes(), 'badkp');
+ assert.neq(null, indexSpec, 'could not find index "badkp"');
+ assert.eq(1, indexSpec.v, tojson(indexSpec));
+
+ // Collection compact command should succeed, despite the presence of the v:1 index
+ // which would fail v:2 validation rules.
+ assert.commandWorked(testDB.runCommand({compact: collName}));
+
+ // reIndex will fail because when featureCompatibilityVersion>=3.4, reIndex
+ // automatically upgrades v=1 indexes to v=2.
+ assert.commandFailed(testDB[collName].reIndex());
+
+ // reIndex should not drop the index.
+ indexSpec = GetIndexHelpers.findByName(testDB[collName].getIndexes(), 'badkp');
+ assert.neq(null, indexSpec, 'could not find index "badkp" after reIndex');
+ assert.eq(1, indexSpec.v, tojson(indexSpec));
+
+ // A query that hints the index should succeed.
+ assert.commandWorked(testDB.runCommand({find: collName, hint: "badkp"}));
+
+ // Newly created indexes will do stricter validation and should fail if the
+ // key pattern is invalid.
+ assert.commandWorked(testDB[collName].dropIndexes());
+ assert.commandFailedWithCode(
+ testDB[collName].createIndex(spec),
+ ErrorCodes.CannotCreateIndex,
+ 'creating index with key pattern ' + tojson(spec) + ' unexpectedly succeeded');
+ // Index build should also fail if v:1 or v:2 is explicitly requested.
+ assert.commandFailedWithCode(
+ testDB[collName].createIndex(spec, {v: 1}),
+ ErrorCodes.CannotCreateIndex,
+ 'creating index with key pattern ' + tojson(spec) + ' unexpectedly succeeded');
+ assert.commandFailedWithCode(
+ testDB[collName].createIndex(spec, {v: 2}),
+ ErrorCodes.CannotCreateIndex,
+ 'creating index with key pattern ' + tojson(spec) + ' unexpectedly succeeded');
+ });
+}
+
+// Check that secondary nodes have the v:1 indexes.
+function validateBadIndexesSecondary(testDB) {
+ invalidIndexSpecs.forEach((spec) => {
+ // Generate a unique and identifiable collection name.
+ let collName = 'bad_index_' + tojson(spec.a);
+ // Verify that the secondary has the v:1 index.
+ let indexSpec = GetIndexHelpers.findByName(testDB[collName].getIndexes(), 'badkp');
+ assert.neq(null, indexSpec, 'could not find index "badkp"');
+ assert.eq(1, indexSpec.v, tojson(indexSpec));
+ });
+}
+
+// Standalone
+// Iterate from earliest to latest versions specified in the versions list, and follow the steps
+// outlined at the top of this test file.
+let authSchemaUpgraded = false;
+for (let i = 0; i < versions.length; i++) {
+ let version = versions[i];
+ let mongodOptions = Object.extend({binVersion: version.binVersion}, defaultOptions);
+
+ // Start a mongod with specified version.
+ let conn = MongoRunner.runMongod(mongodOptions);
+
+ if ((conn === null) && (i > 0) && !authSchemaUpgraded) {
+ // As of 4.0, mongod will refuse to start up with authSchema 3
+ // until the schema has been upgraded.
+ // Step back a version (to 3.6) in order to perform the upgrade,
+ // Then try startuing 4.0 again.
+ print(
+ "Failed starting mongod, going to try upgrading the auth schema on the prior version");
+ conn = MongoRunner.runMongod(
+ Object.extend({binVersion: versions[i - 1].binVersion}, defaultOptions));
+ assert.neq(null,
+ conn,
+ 'mongod was previously able to start with version ' +
+ tojson(version.binVersion) + " but now can't");
+ assert.commandWorked(conn.getDB('admin').runCommand({authSchemaUpgrade: 1}));
+ MongoRunner.stopMongod(conn);
- // When running the newest version, check that the indexes with bad key patterns are readable.
- function validateBadIndexesStandalone(testDB) {
- invalidIndexSpecs.forEach((spec) => {
- // Generate a unique and identifiable collection name.
- let collName = 'bad_index_' + tojson(spec.a);
- let indexSpec = GetIndexHelpers.findByName(testDB[collName].getIndexes(), 'badkp');
- assert.neq(null, indexSpec, 'could not find index "badkp"');
- assert.eq(1, indexSpec.v, tojson(indexSpec));
-
- // Collection compact command should succeed, despite the presence of the v:1 index
- // which would fail v:2 validation rules.
- assert.commandWorked(testDB.runCommand({compact: collName}));
-
- // reIndex will fail because when featureCompatibilityVersion>=3.4, reIndex
- // automatically upgrades v=1 indexes to v=2.
- assert.commandFailed(testDB[collName].reIndex());
-
- // reIndex should not drop the index.
- indexSpec = GetIndexHelpers.findByName(testDB[collName].getIndexes(), 'badkp');
- assert.neq(null, indexSpec, 'could not find index "badkp" after reIndex');
- assert.eq(1, indexSpec.v, tojson(indexSpec));
-
- // A query that hints the index should succeed.
- assert.commandWorked(testDB.runCommand({find: collName, hint: "badkp"}));
-
- // Newly created indexes will do stricter validation and should fail if the
- // key pattern is invalid.
- assert.commandWorked(testDB[collName].dropIndexes());
- assert.commandFailedWithCode(
- testDB[collName].createIndex(spec),
- ErrorCodes.CannotCreateIndex,
- 'creating index with key pattern ' + tojson(spec) + ' unexpectedly succeeded');
- // Index build should also fail if v:1 or v:2 is explicitly requested.
- assert.commandFailedWithCode(
- testDB[collName].createIndex(spec, {v: 1}),
- ErrorCodes.CannotCreateIndex,
- 'creating index with key pattern ' + tojson(spec) + ' unexpectedly succeeded');
- assert.commandFailedWithCode(
- testDB[collName].createIndex(spec, {v: 2}),
- ErrorCodes.CannotCreateIndex,
- 'creating index with key pattern ' + tojson(spec) + ' unexpectedly succeeded');
-
- });
+ authSchemaUpgraded = true;
+ conn = MongoRunner.runMongod(mongodOptions);
}
- // Check that secondary nodes have the v:1 indexes.
- function validateBadIndexesSecondary(testDB) {
- invalidIndexSpecs.forEach((spec) => {
- // Generate a unique and identifiable collection name.
- let collName = 'bad_index_' + tojson(spec.a);
- // Verify that the secondary has the v:1 index.
- let indexSpec = GetIndexHelpers.findByName(testDB[collName].getIndexes(), 'badkp');
- assert.neq(null, indexSpec, 'could not find index "badkp"');
- assert.eq(1, indexSpec.v, tojson(indexSpec));
- });
+ assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(mongodOptions));
+ assert.binVersion(conn, version.binVersion);
+
+ if ((i === 0) && (version.binVersion <= 3.6)) {
+ // Simulate coming from a <= 2.6 installation where MONGODB-CR was the default/only
+ // authentication mechanism. Eventually, the upgrade process will fail (above) when
+ // running on 4.0 where support for MONGODB-CR has been removed.
+ conn.getDB('admin').system.version.save({"_id": "authSchema", "currentVersion": 3});
}
- // Standalone
- // Iterate from earliest to latest versions specified in the versions list, and follow the steps
- // outlined at the top of this test file.
- let authSchemaUpgraded = false;
- for (let i = 0; i < versions.length; i++) {
- let version = versions[i];
- let mongodOptions = Object.extend({binVersion: version.binVersion}, defaultOptions);
-
- // Start a mongod with specified version.
- let conn = MongoRunner.runMongod(mongodOptions);
-
- if ((conn === null) && (i > 0) && !authSchemaUpgraded) {
- // As of 4.0, mongod will refuse to start up with authSchema 3
- // until the schema has been upgraded.
- // Step back a version (to 3.6) in order to perform the upgrade,
- // Then try startuing 4.0 again.
- print(
- "Failed starting mongod, going to try upgrading the auth schema on the prior version");
- conn = MongoRunner.runMongod(
- Object.extend({binVersion: versions[i - 1].binVersion}, defaultOptions));
- assert.neq(null,
- conn,
- 'mongod was previously able to start with version ' +
- tojson(version.binVersion) + " but now can't");
- assert.commandWorked(conn.getDB('admin').runCommand({authSchemaUpgrade: 1}));
- MongoRunner.stopMongod(conn);
-
- authSchemaUpgraded = true;
- conn = MongoRunner.runMongod(mongodOptions);
- }
+ // Connect to the 'test' database.
+ let testDB = conn.getDB('test');
+ // Verify that the data and indices from previous iterations are still accessible.
+ for (let j = 0; j < i; j++) {
+ let oldVersionCollection = versions[j].testCollection;
+ assert.eq(1,
+ testDB[oldVersionCollection].count(),
+ `data from ${oldVersionCollection} should be available; options: ` +
+ tojson(mongodOptions));
assert.neq(
- null, conn, 'mongod was unable to start up with options: ' + tojson(mongodOptions));
- assert.binVersion(conn, version.binVersion);
-
- if ((i === 0) && (version.binVersion <= 3.6)) {
- // Simulate coming from a <= 2.6 installation where MONGODB-CR was the default/only
- // authentication mechanism. Eventually, the upgrade process will fail (above) when
- // running on 4.0 where support for MONGODB-CR has been removed.
- conn.getDB('admin').system.version.save({"_id": "authSchema", "currentVersion": 3});
- }
+ null,
+ GetIndexHelpers.findByKeyPattern(testDB[oldVersionCollection].getIndexes(), {a: 1}),
+ `index from ${oldVersionCollection} should be available; options: ` +
+ tojson(mongodOptions));
+ }
- // Connect to the 'test' database.
- let testDB = conn.getDB('test');
-
- // Verify that the data and indices from previous iterations are still accessible.
- for (let j = 0; j < i; j++) {
- let oldVersionCollection = versions[j].testCollection;
- assert.eq(1,
- testDB[oldVersionCollection].count(),
- `data from ${oldVersionCollection} should be available; options: ` +
- tojson(mongodOptions));
- assert.neq(
- null,
- GetIndexHelpers.findByKeyPattern(testDB[oldVersionCollection].getIndexes(), {a: 1}),
- `index from ${oldVersionCollection} should be available; options: ` +
- tojson(mongodOptions));
- }
+ // Create a new collection.
+ assert.commandWorked(testDB.createCollection(version.testCollection));
+
+ // Insert a document into the new collection.
+ assert.writeOK(testDB[version.testCollection].insert({a: 1}));
+ assert.eq(1,
+ testDB[version.testCollection].count(),
+ `mongo should have inserted 1 document into collection ${version.testCollection}; ` +
+ 'options: ' + tojson(mongodOptions));
+
+ // Create an index on the new collection.
+ assert.commandWorked(testDB[version.testCollection].createIndex({a: 1}));
+
+ if (i === 0) {
+ // We're on the earliest version, insert indexes with bad key patterns.
+ insertBadIndexes(testDB);
+ } else if (i === versions.length - 1) {
+ // We're on the latest version, check bad indexes are still readable.
+ validateBadIndexesStandalone(testDB);
+ }
- // Create a new collection.
- assert.commandWorked(testDB.createCollection(version.testCollection));
-
- // Insert a document into the new collection.
- assert.writeOK(testDB[version.testCollection].insert({a: 1}));
- assert.eq(
- 1,
- testDB[version.testCollection].count(),
- `mongo should have inserted 1 document into collection ${version.testCollection}; ` +
- 'options: ' + tojson(mongodOptions));
-
- // Create an index on the new collection.
- assert.commandWorked(testDB[version.testCollection].createIndex({a: 1}));
-
- if (i === 0) {
- // We're on the earliest version, insert indexes with bad key patterns.
- insertBadIndexes(testDB);
- } else if (i === versions.length - 1) {
- // We're on the latest version, check bad indexes are still readable.
- validateBadIndexesStandalone(testDB);
- }
+ // Set the appropriate featureCompatibilityVersion upon upgrade, if applicable.
+ if (version.hasOwnProperty('featureCompatibilityVersion')) {
+ let adminDB = conn.getDB("admin");
+ assert.commandWorked(adminDB.runCommand(
+ {"setFeatureCompatibilityVersion": version.featureCompatibilityVersion}));
+ }
- // Set the appropriate featureCompatibilityVersion upon upgrade, if applicable.
- if (version.hasOwnProperty('featureCompatibilityVersion')) {
- let adminDB = conn.getDB("admin");
- assert.commandWorked(adminDB.runCommand(
- {"setFeatureCompatibilityVersion": version.featureCompatibilityVersion}));
+ // Shutdown the current mongod.
+ MongoRunner.stopMongod(conn);
+}
+
+// Replica Sets
+// Setup the ReplSetTest object.
+let nodes = {
+ n1: {binVersion: versions[0].binVersion},
+ n2: {binVersion: versions[0].binVersion},
+ n3: {binVersion: versions[0].binVersion},
+};
+let rst = new ReplSetTest({nodes});
+
+// Start up and initiate the replica set.
+rst.startSet();
+rst.initiate();
+
+// Iterate from earliest to latest versions specified in the versions list, and follow the steps
+// outlined at the top of this test file.
+for (let i = 0; i < versions.length; i++) {
+ let version = versions[i];
+
+ // Connect to the primary running the old version to ensure that the test can insert and
+ // create indices.
+ let primary = rst.getPrimary();
+
+ // Upgrade the secondary nodes first.
+ rst.upgradeSecondaries(primary, {binVersion: version.binVersion});
+
+ assert.neq(null,
+ primary,
+ `replica set was unable to start up after upgrading secondaries to version: ${
+ version.binVersion}`);
+
+ // Connect to the 'test' database.
+ let testDB = primary.getDB('test');
+ assert.commandWorked(testDB.createCollection(version.testCollection));
+ assert.writeOK(testDB[version.testCollection].insert({a: 1}));
+ assert.eq(1,
+ testDB[version.testCollection].count(),
+ `mongo should have inserted 1 document into collection ${version.testCollection}; ` +
+ 'nodes: ' + tojson(nodes));
+
+ // Create an index on the new collection.
+ assert.commandWorked(testDB[version.testCollection].createIndex({a: 1}));
+
+ if (i === 0) {
+ // We're on the earliest version, insert indexes with bad key patterns.
+ insertBadIndexes(testDB);
+ } else if (i === versions.length - 1) {
+ // We're on the latest version, check bad indexes are still readable.
+ for (let secondary of rst.getSecondaries()) {
+ validateBadIndexesSecondary(secondary.getDB('test'));
}
-
- // Shutdown the current mongod.
- MongoRunner.stopMongod(conn);
}
- // Replica Sets
- // Setup the ReplSetTest object.
- let nodes = {
- n1: {binVersion: versions[0].binVersion},
- n2: {binVersion: versions[0].binVersion},
- n3: {binVersion: versions[0].binVersion},
- };
- let rst = new ReplSetTest({nodes});
-
- // Start up and initiate the replica set.
- rst.startSet();
- rst.initiate();
-
- // Iterate from earliest to latest versions specified in the versions list, and follow the steps
- // outlined at the top of this test file.
- for (let i = 0; i < versions.length; i++) {
- let version = versions[i];
-
- // Connect to the primary running the old version to ensure that the test can insert and
- // create indices.
- let primary = rst.getPrimary();
-
- // Upgrade the secondary nodes first.
- rst.upgradeSecondaries(primary, {binVersion: version.binVersion});
-
+ // Do the index creation and insertion again after upgrading the primary node.
+ primary = rst.upgradePrimary(primary, {binVersion: version.binVersion});
+ assert.neq(
+ null, primary, `replica set was unable to start up with version: ${version.binVersion}`);
+ assert.binVersion(primary, version.binVersion);
+ testDB = primary.getDB('test');
+
+ assert.writeOK(testDB[version.testCollection].insert({b: 1}));
+ assert.eq(2,
+ testDB[version.testCollection].count(),
+ `mongo should have inserted 2 documents into collection ${version.testCollection}; ` +
+ 'nodes: ' + tojson(nodes));
+
+ assert.commandWorked(testDB[version.testCollection].createIndex({b: 1}));
+
+ // Verify that all previously inserted data and indices are accessible.
+ for (let j = 0; j <= i; j++) {
+ let oldVersionCollection = versions[j].testCollection;
+ assert.eq(2,
+ testDB[oldVersionCollection].count(),
+ `data from ${oldVersionCollection} should be available; nodes: ${tojson(nodes)}`);
assert.neq(
null,
- primary,
- `replica set was unable to start up after upgrading secondaries to version: ${version.binVersion}`);
-
- // Connect to the 'test' database.
- let testDB = primary.getDB('test');
- assert.commandWorked(testDB.createCollection(version.testCollection));
- assert.writeOK(testDB[version.testCollection].insert({a: 1}));
- assert.eq(
- 1,
- testDB[version.testCollection].count(),
- `mongo should have inserted 1 document into collection ${version.testCollection}; ` +
- 'nodes: ' + tojson(nodes));
-
- // Create an index on the new collection.
- assert.commandWorked(testDB[version.testCollection].createIndex({a: 1}));
-
- if (i === 0) {
- // We're on the earliest version, insert indexes with bad key patterns.
- insertBadIndexes(testDB);
- } else if (i === versions.length - 1) {
- // We're on the latest version, check bad indexes are still readable.
- for (let secondary of rst.getSecondaries()) {
- validateBadIndexesSecondary(secondary.getDB('test'));
- }
- }
-
- // Do the index creation and insertion again after upgrading the primary node.
- primary = rst.upgradePrimary(primary, {binVersion: version.binVersion});
- assert.neq(null,
- primary,
- `replica set was unable to start up with version: ${version.binVersion}`);
- assert.binVersion(primary, version.binVersion);
- testDB = primary.getDB('test');
-
- assert.writeOK(testDB[version.testCollection].insert({b: 1}));
- assert.eq(
- 2,
- testDB[version.testCollection].count(),
- `mongo should have inserted 2 documents into collection ${version.testCollection}; ` +
- 'nodes: ' + tojson(nodes));
-
- assert.commandWorked(testDB[version.testCollection].createIndex({b: 1}));
-
- // Verify that all previously inserted data and indices are accessible.
- for (let j = 0; j <= i; j++) {
- let oldVersionCollection = versions[j].testCollection;
- assert.eq(
- 2,
- testDB[oldVersionCollection].count(),
- `data from ${oldVersionCollection} should be available; nodes: ${tojson(nodes)}`);
- assert.neq(
- null,
- GetIndexHelpers.findByKeyPattern(testDB[oldVersionCollection].getIndexes(), {a: 1}),
- `index from ${oldVersionCollection} should be available; nodes: ${tojson(nodes)}`);
- assert.neq(
- null,
- GetIndexHelpers.findByKeyPattern(testDB[oldVersionCollection].getIndexes(), {b: 1}),
- `index from ${oldVersionCollection} should be available; nodes: ${tojson(nodes)}`);
- }
+ GetIndexHelpers.findByKeyPattern(testDB[oldVersionCollection].getIndexes(), {a: 1}),
+ `index from ${oldVersionCollection} should be available; nodes: ${tojson(nodes)}`);
+ assert.neq(
+ null,
+ GetIndexHelpers.findByKeyPattern(testDB[oldVersionCollection].getIndexes(), {b: 1}),
+ `index from ${oldVersionCollection} should be available; nodes: ${tojson(nodes)}`);
+ }
- // Set the appropriate featureCompatibilityVersion upon upgrade, if applicable.
- if (version.hasOwnProperty('featureCompatibilityVersion')) {
- let primaryAdminDB = primary.getDB("admin");
- assert.commandWorked(primaryAdminDB.runCommand(
- {setFeatureCompatibilityVersion: version.featureCompatibilityVersion}));
- rst.awaitReplication();
- }
+ // Set the appropriate featureCompatibilityVersion upon upgrade, if applicable.
+ if (version.hasOwnProperty('featureCompatibilityVersion')) {
+ let primaryAdminDB = primary.getDB("admin");
+ assert.commandWorked(primaryAdminDB.runCommand(
+ {setFeatureCompatibilityVersion: version.featureCompatibilityVersion}));
+ rst.awaitReplication();
}
+}
- // Stop the replica set.
- rst.stopSet();
+// Stop the replica set.
+rst.stopSet();
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/migration_between_mixed_FCV_mixed_version_mongods.js b/jstests/multiVersion/genericSetFCVUsage/migration_between_mixed_FCV_mixed_version_mongods.js
index fe78152e548..b2b2aee05ad 100644
--- a/jstests/multiVersion/genericSetFCVUsage/migration_between_mixed_FCV_mixed_version_mongods.js
+++ b/jstests/multiVersion/genericSetFCVUsage/migration_between_mixed_FCV_mixed_version_mongods.js
@@ -4,38 +4,37 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/feature_compatibility_version.js");
-
- let st = new ShardingTest({
- shards: [{binVersion: "latest"}, {binVersion: "last-stable"}],
- mongos: {binVersion: "latest"},
- other: {shardAsReplicaSet: false},
- });
-
- let testDB = st.s.getDB("test");
-
- // Create a sharded collection with primary shard 0.
- assert.commandWorked(st.s.adminCommand({enableSharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
- assert.commandWorked(
- st.s.adminCommand({shardCollection: testDB.coll.getFullName(), key: {a: 1}}));
-
- // Set the featureCompatibilityVersion to latestFCV. This will fail because the
- // featureCompatibilityVersion cannot be set to latestFCV on shard 1, but it will set the
- // featureCompatibilityVersion to latestFCV on shard 0.
- assert.commandFailed(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV, latestFCV);
- checkFCV(st.shard0.getDB("admin"), latestFCV);
- checkFCV(st.shard1.getDB("admin"), lastStableFCV);
-
- // It is not possible to move a chunk from a latestFCV shard to a last-stable binary version
- // shard.
- assert.commandFailedWithCode(
- st.s.adminCommand(
- {moveChunk: testDB.coll.getFullName(), find: {a: 1}, to: st.shard1.shardName}),
- ErrorCodes.IncompatibleServerVersion);
-
- st.stop();
+"use strict";
+
+load("jstests/libs/feature_compatibility_version.js");
+
+let st = new ShardingTest({
+ shards: [{binVersion: "latest"}, {binVersion: "last-stable"}],
+ mongos: {binVersion: "latest"},
+ other: {shardAsReplicaSet: false},
+});
+
+let testDB = st.s.getDB("test");
+
+// Create a sharded collection with primary shard 0.
+assert.commandWorked(st.s.adminCommand({enableSharding: testDB.getName()}));
+st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({shardCollection: testDB.coll.getFullName(), key: {a: 1}}));
+
+// Set the featureCompatibilityVersion to latestFCV. This will fail because the
+// featureCompatibilityVersion cannot be set to latestFCV on shard 1, but it will set the
+// featureCompatibilityVersion to latestFCV on shard 0.
+assert.commandFailed(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV, latestFCV);
+checkFCV(st.shard0.getDB("admin"), latestFCV);
+checkFCV(st.shard1.getDB("admin"), lastStableFCV);
+
+// It is not possible to move a chunk from a latestFCV shard to a last-stable binary version
+// shard.
+assert.commandFailedWithCode(
+ st.s.adminCommand(
+ {moveChunk: testDB.coll.getFullName(), find: {a: 1}, to: st.shard1.shardName}),
+ ErrorCodes.IncompatibleServerVersion);
+
+st.stop();
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary.js b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary.js
index 54e654c4dcb..48e710d330d 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary.js
@@ -1,14 +1,14 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/replsets/libs/rename_across_dbs.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/replsets/libs/rename_across_dbs.js");
- const nodes = [{binVersion: 'last-stable'}, {binVersion: 'latest'}, {}];
- const options = {
- nodes: nodes,
- setFeatureCompatibilityVersion: lastStableFCV,
- };
+const nodes = [{binVersion: 'last-stable'}, {binVersion: 'latest'}, {}];
+const options = {
+ nodes: nodes,
+ setFeatureCompatibilityVersion: lastStableFCV,
+};
- new RenameAcrossDatabasesTest(options).run();
+new RenameAcrossDatabasesTest(options).run();
}());
diff --git a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary_drop_target.js b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary_drop_target.js
index 4cdab3f5a61..a1e5869800d 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary_drop_target.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary_drop_target.js
@@ -1,15 +1,15 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/replsets/libs/rename_across_dbs.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/replsets/libs/rename_across_dbs.js");
- const nodes = [{binVersion: 'last-stable'}, {binVersion: 'latest'}, {}];
- const options = {
- nodes: nodes,
- setFeatureCompatibilityVersion: lastStableFCV,
- dropTarget: true,
- };
+const nodes = [{binVersion: 'last-stable'}, {binVersion: 'latest'}, {}];
+const options = {
+ nodes: nodes,
+ setFeatureCompatibilityVersion: lastStableFCV,
+ dropTarget: true,
+};
- new RenameAcrossDatabasesTest(options).run();
+new RenameAcrossDatabasesTest(options).run();
}());
diff --git a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary.js b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary.js
index 3fd541a0ffc..2fb9c126ca6 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary.js
@@ -1,14 +1,14 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/replsets/libs/rename_across_dbs.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/replsets/libs/rename_across_dbs.js");
- const nodes = [{binVersion: 'latest'}, {binVersion: 'last-stable'}, {}];
- const options = {
- nodes: nodes,
- setFeatureCompatibilityVersion: lastStableFCV,
- };
+const nodes = [{binVersion: 'latest'}, {binVersion: 'last-stable'}, {}];
+const options = {
+ nodes: nodes,
+ setFeatureCompatibilityVersion: lastStableFCV,
+};
- new RenameAcrossDatabasesTest(options).run();
+new RenameAcrossDatabasesTest(options).run();
}());
diff --git a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary_drop_target.js b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary_drop_target.js
index 90ca1312db3..814fa096f47 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary_drop_target.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary_drop_target.js
@@ -1,15 +1,15 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/replsets/libs/rename_across_dbs.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/replsets/libs/rename_across_dbs.js");
- const nodes = [{binVersion: 'latest'}, {binVersion: 'last-stable'}, {}];
- const options = {
- nodes: nodes,
- setFeatureCompatibilityVersion: lastStableFCV,
- dropTarget: true,
- };
+const nodes = [{binVersion: 'latest'}, {binVersion: 'last-stable'}, {}];
+const options = {
+ nodes: nodes,
+ setFeatureCompatibilityVersion: lastStableFCV,
+ dropTarget: true,
+};
- new RenameAcrossDatabasesTest(options).run();
+new RenameAcrossDatabasesTest(options).run();
}());
diff --git a/jstests/multiVersion/genericSetFCVUsage/repair_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/repair_feature_compatibility_version.js
index 8eeea0ed427..d37e40c5816 100644
--- a/jstests/multiVersion/genericSetFCVUsage/repair_feature_compatibility_version.js
+++ b/jstests/multiVersion/genericSetFCVUsage/repair_feature_compatibility_version.js
@@ -4,87 +4,82 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/feature_compatibility_version.js");
- let dbpath = MongoRunner.dataPath + "feature_compatibility_version";
- resetDbpath(dbpath);
- let connection;
- let adminDB;
+let dbpath = MongoRunner.dataPath + "feature_compatibility_version";
+resetDbpath(dbpath);
+let connection;
+let adminDB;
- const latest = "latest";
+const latest = "latest";
- /**
- * Ensure that a mongod (without using --repair) fails to start up if there are non-local
- * collections and the FCV document in the admin database has been removed.
- *
- * The mongod has 'version' binary and is started up on 'dbpath'.
- */
- let doStartupFailTests = function(version, dbpath) {
- // Set up a mongod with an admin database but without a FCV document in the admin database.
- setupMissingFCVDoc(version, dbpath);
-
- // Now attempt to start up a new mongod without clearing the data files from 'dbpath', which
- // contain the admin database but are missing the FCV document. The mongod should fail to
- // start up if there is a non-local collection and the FCV document is missing.
- let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: version, noCleanData: true});
- assert.eq(
- null,
- conn,
- "expected mongod to fail when data files are present but no FCV document is found.");
- };
+/**
+ * Ensure that a mongod (without using --repair) fails to start up if there are non-local
+ * collections and the FCV document in the admin database has been removed.
+ *
+ * The mongod has 'version' binary and is started up on 'dbpath'.
+ */
+let doStartupFailTests = function(version, dbpath) {
+ // Set up a mongod with an admin database but without a FCV document in the admin database.
+ setupMissingFCVDoc(version, dbpath);
- /**
- * Starts up a mongod with binary 'version' on 'dbpath', then removes the FCV document from the
- * admin database and returns the mongod.
- */
- let setupMissingFCVDoc = function(version, dbpath) {
- let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: version});
- assert.neq(null,
- conn,
- "mongod was unable to start up with version=" + version + " and no data files");
- adminDB = conn.getDB("admin");
- removeFCVDocument(adminDB);
- MongoRunner.stopMongod(conn);
- return conn;
- };
+ // Now attempt to start up a new mongod without clearing the data files from 'dbpath', which
+ // contain the admin database but are missing the FCV document. The mongod should fail to
+ // start up if there is a non-local collection and the FCV document is missing.
+ let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: version, noCleanData: true});
+ assert.eq(null,
+ conn,
+ "expected mongod to fail when data files are present but no FCV document is found.");
+};
- // Check that start up without --repair fails if there is non-local DB data and the FCV doc was
- // deleted.
- doStartupFailTests(latest, dbpath);
+/**
+ * Starts up a mongod with binary 'version' on 'dbpath', then removes the FCV document from the
+ * admin database and returns the mongod.
+ */
+let setupMissingFCVDoc = function(version, dbpath) {
+ let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: version});
+ assert.neq(
+ null, conn, "mongod was unable to start up with version=" + version + " and no data files");
+ adminDB = conn.getDB("admin");
+ removeFCVDocument(adminDB);
+ MongoRunner.stopMongod(conn);
+ return conn;
+};
- // --repair can be used to restore a missing featureCompatibilityVersion document to an existing
- // admin database, as long as all collections have UUIDs. The FCV should be initialized to
- // lastStableFCV / downgraded FCV.
- connection = setupMissingFCVDoc(latest, dbpath);
- let returnCode =
- runMongoProgram("mongod", "--port", connection.port, "--repair", "--dbpath", dbpath);
- assert.eq(
- returnCode,
- 0,
- "expected mongod --repair to execute successfully when restoring a missing FCV document.");
+// Check that start up without --repair fails if there is non-local DB data and the FCV doc was
+// deleted.
+doStartupFailTests(latest, dbpath);
- connection = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, noCleanData: true});
- assert.neq(null,
- connection,
- "mongod was unable to start up with version=" + latest + " and existing data files");
- adminDB = connection.getDB("admin");
- assert.eq(adminDB.system.version.findOne({_id: "featureCompatibilityVersion"}).version,
- lastStableFCV);
- assert.eq(adminDB.system.version.findOne({_id: "featureCompatibilityVersion"}).targetVersion,
- null);
- MongoRunner.stopMongod(connection);
+// --repair can be used to restore a missing featureCompatibilityVersion document to an existing
+// admin database, as long as all collections have UUIDs. The FCV should be initialized to
+// lastStableFCV / downgraded FCV.
+connection = setupMissingFCVDoc(latest, dbpath);
+let returnCode =
+ runMongoProgram("mongod", "--port", connection.port, "--repair", "--dbpath", dbpath);
+assert.eq(
+ returnCode,
+ 0,
+ "expected mongod --repair to execute successfully when restoring a missing FCV document.");
- // If the featureCompatibilityVersion document is present, --repair should just return success.
- connection = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
- assert.neq(null,
- connection,
- "mongod was unable to start up with version=" + latest + " and no data files");
- MongoRunner.stopMongod(connection);
+connection = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, noCleanData: true});
+assert.neq(null,
+ connection,
+ "mongod was unable to start up with version=" + latest + " and existing data files");
+adminDB = connection.getDB("admin");
+assert.eq(adminDB.system.version.findOne({_id: "featureCompatibilityVersion"}).version,
+ lastStableFCV);
+assert.eq(adminDB.system.version.findOne({_id: "featureCompatibilityVersion"}).targetVersion, null);
+MongoRunner.stopMongod(connection);
- returnCode =
- runMongoProgram("mongod", "--port", connection.port, "--repair", "--dbpath", dbpath);
- assert.eq(returnCode, 0);
+// If the featureCompatibilityVersion document is present, --repair should just return success.
+connection = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
+assert.neq(null,
+ connection,
+ "mongod was unable to start up with version=" + latest + " and no data files");
+MongoRunner.stopMongod(connection);
+returnCode = runMongoProgram("mongod", "--port", connection.port, "--repair", "--dbpath", dbpath);
+assert.eq(returnCode, 0);
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/rollback_last_stable_to_latest.js b/jstests/multiVersion/genericSetFCVUsage/rollback_last_stable_to_latest.js
index d88d7452ed5..9d3c1a60172 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rollback_last_stable_to_latest.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rollback_last_stable_to_latest.js
@@ -4,9 +4,9 @@
*/
(function() {
- "use strict";
- load("jstests/multiVersion/libs/multiversion_rollback.js");
+"use strict";
+load("jstests/multiVersion/libs/multiversion_rollback.js");
- var testName = "multiversion_rollback_last_stable_to_latest";
- testMultiversionRollback(testName, "last-stable", "latest");
+var testName = "multiversion_rollback_last_stable_to_latest";
+testMultiversionRollback(testName, "last-stable", "latest");
})(); \ No newline at end of file
diff --git a/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_last_stable.js b/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_last_stable.js
index 951b6c91ae9..546065ecc5f 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_last_stable.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_last_stable.js
@@ -4,9 +4,9 @@
*/
(function() {
- "use strict";
- load("jstests/multiVersion/libs/multiversion_rollback.js");
+"use strict";
+load("jstests/multiVersion/libs/multiversion_rollback.js");
- var testName = "multiversion_rollback_latest_to_last_stable";
- testMultiversionRollback(testName, "latest", "last-stable");
+var testName = "multiversion_rollback_latest_to_last_stable";
+testMultiversionRollback(testName, "latest", "last-stable");
})(); \ No newline at end of file
diff --git a/jstests/multiVersion/genericSetFCVUsage/setFCV_collmod_transaction_rollback.js b/jstests/multiVersion/genericSetFCVUsage/setFCV_collmod_transaction_rollback.js
index 985e3a62c78..0e8573cef56 100644
--- a/jstests/multiVersion/genericSetFCVUsage/setFCV_collmod_transaction_rollback.js
+++ b/jstests/multiVersion/genericSetFCVUsage/setFCV_collmod_transaction_rollback.js
@@ -3,50 +3,50 @@
* collMod command.
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/feature_compatibility_version.js");
- let dbpath = MongoRunner.dataPath + "setFCV_collmod_transaction_rollback";
- resetDbpath(dbpath);
+let dbpath = MongoRunner.dataPath + "setFCV_collmod_transaction_rollback";
+resetDbpath(dbpath);
- const latest = "latest";
+const latest = "latest";
- let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
- assert.neq(
- null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
- let adminDB = conn.getDB("admin");
+let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
+assert.neq(
+ null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
+let adminDB = conn.getDB("admin");
- var collName = "collModTest";
- var coll = adminDB.getCollection(collName);
- var ttlBeforeRollback = 50;
+var collName = "collModTest";
+var coll = adminDB.getCollection(collName);
+var ttlBeforeRollback = 50;
- assert.commandWorked(
- coll.createIndex({b: 1}, {"name": "index1", "expireAfterSeconds": ttlBeforeRollback}));
+assert.commandWorked(
+ coll.createIndex({b: 1}, {"name": "index1", "expireAfterSeconds": ttlBeforeRollback}));
- // The failpoint causes an interrupt in the collMod's WriteUnitOfWork, thus triggers a rollback.
- assert.commandWorked(
- adminDB.adminCommand({configureFailPoint: "assertAfterIndexUpdate", mode: "alwaysOn"}));
+// The failpoint causes an interrupt in the collMod's WriteUnitOfWork, thus triggers a rollback.
+assert.commandWorked(
+ adminDB.adminCommand({configureFailPoint: "assertAfterIndexUpdate", mode: "alwaysOn"}));
- // Test transaction rollback after index ttl update collMod.
- assert.commandFailedWithCode(
- adminDB.runCommand(
- {"collMod": collName, "index": {"name": "index1", "expireAfterSeconds": 100}}),
- 50970);
+// Test transaction rollback after index ttl update collMod.
+assert.commandFailedWithCode(
+ adminDB.runCommand(
+ {"collMod": collName, "index": {"name": "index1", "expireAfterSeconds": 100}}),
+ 50970);
- const index = coll.getIndexes();
- var ttlAfterRollback = index[1].expireAfterSeconds;
- assert.eq(ttlAfterRollback, ttlBeforeRollback);
+const index = coll.getIndexes();
+var ttlAfterRollback = index[1].expireAfterSeconds;
+assert.eq(ttlAfterRollback, ttlBeforeRollback);
- // SERVER-37634 should remove this test post 4.2.
- // Test transaction rollback after unique index upgrade collMod.
- assert.commandWorked(adminDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+// SERVER-37634 should remove this test post 4.2.
+// Test transaction rollback after unique index upgrade collMod.
+assert.commandWorked(adminDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- assert.commandWorked(coll.createIndex({a: 1}, {unique: true}));
- assert.writeOK(coll.insert({_id: 0, a: 1}));
+assert.commandWorked(coll.createIndex({a: 1}, {unique: true}));
+assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.commandFailedWithCode(adminDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}),
- 50971);
+assert.commandFailedWithCode(adminDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}),
+ 50971);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
index a404d8d01e3..9c62bdb5ee1 100644
--- a/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
+++ b/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
@@ -10,382 +10,373 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
TestData.skipCheckDBHashes = true;
(function() {
- "use strict";
-
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/libs/get_index_helpers.js");
- load("jstests/libs/write_concern_util.js");
- load("jstests/replsets/rslib.js");
-
- let dbpath = MongoRunner.dataPath + "feature_compatibility_version";
- resetDbpath(dbpath);
- let res;
-
- const latest = "latest";
- const lastStable = "last-stable";
-
- //
- // Standalone tests.
- //
-
- let conn;
- let adminDB;
-
- // A 'latest' binary standalone should default to 'latestFCV'.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
- assert.neq(
- null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
- adminDB = conn.getDB("admin");
- checkFCV(adminDB, latestFCV);
-
- jsTestLog("EXPECTED TO FAIL: featureCompatibilityVersion cannot be set to an invalid value");
- assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: 5}));
- assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: "3.2"}));
- assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: "4.4"}));
- assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: "3.4"}));
-
- jsTestLog("EXPECTED TO FAIL: setFeatureCompatibilityVersion rejects unknown fields.");
- assert.commandFailed(
- adminDB.runCommand({setFeatureCompatibilityVersion: lastStable, unknown: 1}));
-
- jsTestLog(
- "EXPECTED TO FAIL: setFeatureCompatibilityVersion can only be run on the admin database");
- assert.commandFailed(
- conn.getDB("test").runCommand({setFeatureCompatibilityVersion: lastStable}));
-
- jsTestLog("EXPECTED TO FAIL: featureCompatibilityVersion cannot be set via setParameter");
- assert.commandFailed(
- adminDB.runCommand({setParameter: 1, featureCompatibilityVersion: lastStable}));
-
- // setFeatureCompatibilityVersion fails to downgrade FCV if the write fails.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCollectionUpdates",
- data: {collectionNS: "admin.system.version"},
- mode: "alwaysOn"
- }));
- jsTestLog(
- "EXPECTED TO FAIL: setFeatureCompatibilityVersion fails to downgrade FCV if the write fails");
- assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(adminDB, latestFCV);
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCollectionUpdates",
- data: {collectionNS: "admin.system.version"},
- mode: "off"
- }));
-
- // featureCompatibilityVersion can be downgraded to 'lastStableFCV'.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(adminDB, lastStableFCV);
-
- // setFeatureCompatibilityVersion fails to upgrade to 'latestFCV' if the write fails.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCollectionUpdates",
- data: {collectionNS: "admin.system.version"},
- mode: "alwaysOn"
- }));
- jsTestLog(
- "EXPECTED TO FAIL: setFeatureCompatibilityVersion fails to upgrade to 'latestFCV' if the write fails");
- assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(adminDB, lastStableFCV);
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCollectionUpdates",
- data: {collectionNS: "admin.system.version"},
- mode: "off"
- }));
-
- // featureCompatibilityVersion can be upgraded to 'latestFCV'.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(adminDB, latestFCV);
-
- MongoRunner.stopMongod(conn);
-
- // featureCompatibilityVersion is durable.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
- assert.neq(
- null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
- adminDB = conn.getDB("admin");
- checkFCV(adminDB, latestFCV);
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(adminDB, lastStableFCV);
- MongoRunner.stopMongod(conn);
-
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, noCleanData: true});
- assert.neq(null,
- conn,
- "mongod was unable to start up with binary version=" + latest +
- " and last-stable featureCompatibilityVersion");
- adminDB = conn.getDB("admin");
- checkFCV(adminDB, lastStableFCV);
- MongoRunner.stopMongod(conn);
-
- // If you upgrade from 'lastStable' binary to 'latest' binary and have non-local databases, FCV
- // remains 'lastStableFCV'.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: lastStable});
- assert.neq(null,
- conn,
- "mongod was unable to start up with version=" + lastStable + " and no data files");
- assert.writeOK(conn.getDB("test").coll.insert({a: 5}));
- adminDB = conn.getDB("admin");
- checkFCV(adminDB, lastStableFCV);
- MongoRunner.stopMongod(conn);
-
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, noCleanData: true});
- assert.neq(null,
- conn,
- "mongod was unable to start up with binary version=" + latest +
- " and featureCompatibilityVersion=" + lastStableFCV);
- adminDB = conn.getDB("admin");
- checkFCV(adminDB, lastStableFCV);
- MongoRunner.stopMongod(conn);
-
- // A 'latest' binary mongod started with --shardsvr and clean data files defaults to
- // 'lastStableFCV'.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, shardsvr: ""});
- assert.neq(
- null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
- adminDB = conn.getDB("admin");
- checkFCV(adminDB, lastStableFCV);
- MongoRunner.stopMongod(conn);
-
- //
- // Replica set tests.
- //
-
- let rst;
- let rstConns;
- let replSetConfig;
- let primaryAdminDB;
- let secondaryAdminDB;
-
- // 'latest' binary replica set.
- rst = new ReplSetTest({nodes: 2, nodeOpts: {binVersion: latest}});
- rst.startSet();
- rst.initiate();
- primaryAdminDB = rst.getPrimary().getDB("admin");
- secondaryAdminDB = rst.getSecondary().getDB("admin");
-
- // FCV should default to 'latestFCV' on primary and secondary in a 'latest' binary replica set.
- checkFCV(primaryAdminDB, latestFCV);
- rst.awaitReplication();
- checkFCV(secondaryAdminDB, latestFCV);
-
- // featureCompatibilityVersion propagates to secondary.
- assert.commandWorked(
- primaryAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(primaryAdminDB, lastStableFCV);
- rst.awaitReplication();
- checkFCV(secondaryAdminDB, lastStableFCV);
-
- jsTestLog("EXPECTED TO FAIL: setFeatureCompatibilityVersion cannot be run on secondary");
- assert.commandFailed(secondaryAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
-
- rst.stopSet();
-
- // A 'latest' binary secondary with a 'lastStable' binary primary will have 'lastStableFCV'
- rst = new ReplSetTest({nodes: [{binVersion: lastStable}, {binVersion: latest}]});
- rstConns = rst.startSet();
- replSetConfig = rst.getReplSetConfig();
- replSetConfig.members[1].priority = 0;
- replSetConfig.members[1].votes = 0;
- rst.initiate(replSetConfig);
- rst.waitForState(rstConns[0], ReplSetTest.State.PRIMARY);
- secondaryAdminDB = rst.getSecondary().getDB("admin");
- checkFCV(secondaryAdminDB, lastStableFCV);
- rst.stopSet();
-
- // Test that a 'lastStable' secondary can successfully perform initial sync from a 'latest'
- // primary with 'lastStableFCV'.
- rst = new ReplSetTest({
- nodes: [{binVersion: latest}, {binVersion: latest, rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- });
- rst.startSet();
- rst.initiate();
-
- let primary = rst.getPrimary();
- primaryAdminDB = primary.getDB("admin");
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-
- let secondary = rst.getSecondary();
-
- // The command should fail because wtimeout expires before a majority responds.
- stopServerReplication(secondary);
- res = primary.adminCommand(
- {setFeatureCompatibilityVersion: latestFCV, writeConcern: {wtimeout: 1000}});
- assert.eq(0, res.ok);
- assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
- restartServerReplication(secondary);
-
- // Because the failed setFCV command left the primary in an intermediary state, complete the
- // upgrade then reset back to the lastStable version.
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-
- secondary = rst.add({binVersion: lastStable});
- secondaryAdminDB = secondary.getDB("admin");
-
- // Rig the election so that the first node running latest version remains the primary after the
- // 'lastStable' secondary is added to the replica set.
- replSetConfig = rst.getReplSetConfig();
- replSetConfig.version = 4;
- replSetConfig.members[2].priority = 0;
- reconfig(rst, replSetConfig);
-
- // Verify that the 'lastStable' secondary successfully performed its initial sync.
- assert.writeOK(
- primaryAdminDB.getSiblingDB("test").coll.insert({awaitRepl: true}, {writeConcern: {w: 3}}));
-
- // Test that a 'lastStable' secondary can no longer replicate from the primary after the FCV is
- // upgraded to 'latestFCV'.
- // Note: the 'lastStable' secondary must stop replicating during the upgrade to ensure it has no
- // chance of seeing the 'upgrading to latest' message in the oplog, whereupon it would crash.
- stopServerReplication(secondary);
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- restartServerReplication(secondary);
- checkFCV(secondaryAdminDB, lastStableFCV);
- assert.writeOK(primaryAdminDB.getSiblingDB("test").coll.insert({shouldReplicate: false}));
- assert.eq(secondaryAdminDB.getSiblingDB("test").coll.find({shouldReplicate: false}).itcount(),
- 0);
- rst.stopSet();
-
- // Test idempotency for setFeatureCompatibilityVersion.
- rst = new ReplSetTest({nodes: 2, nodeOpts: {binVersion: latest}});
- rst.startSet();
- rst.initiate();
-
- // Set FCV to 'lastStableFCV' so that a 'lastStable' binary node can join the set.
- primary = rst.getPrimary();
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- rst.awaitReplication();
-
- // Add a 'lastStable' binary node to the set.
- secondary = rst.add({binVersion: lastStable});
- rst.reInitiate();
-
- // Ensure the 'lastStable' binary node succeeded its initial sync.
- assert.writeOK(primary.getDB("test").coll.insert({awaitRepl: true}, {writeConcern: {w: 3}}));
-
- // Run {setFCV: lastStableFCV}. This should be idempotent.
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- rst.awaitReplication();
-
- // Ensure the secondary is still running.
- rst.stopSet();
-
- //
- // Sharding tests.
- //
-
- let st;
- let mongosAdminDB;
- let configPrimaryAdminDB;
- let shardPrimaryAdminDB;
-
- // A 'latest' binary cluster started with clean data files will set FCV to 'latestFCV'.
- st = new ShardingTest({
- shards: {rs0: {nodes: [{binVersion: latest}, {binVersion: latest}]}},
- other: {useBridge: true}
- });
- mongosAdminDB = st.s.getDB("admin");
- configPrimaryAdminDB = st.configRS.getPrimary().getDB("admin");
- shardPrimaryAdminDB = st.rs0.getPrimary().getDB("admin");
-
- checkFCV(configPrimaryAdminDB, latestFCV);
- checkFCV(shardPrimaryAdminDB, latestFCV);
-
- jsTestLog(
- "EXPECTED TO FAIL: featureCompatibilityVersion cannot be set to invalid value on mongos");
- assert.commandFailed(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: 5}));
- assert.commandFailed(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: "3.2"}));
- assert.commandFailed(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: "4.4"}));
-
- jsTestLog("EXPECTED TO FAIL: setFeatureCompatibilityVersion rejects unknown fields on mongos");
- assert.commandFailed(
- mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV, unknown: 1}));
-
- jsTestLog(
- "EXPECTED TO FAIL: setFeatureCompatibilityVersion can only be run on the admin database on mongos");
- assert.commandFailed(
- st.s.getDB("test").runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-
- jsTestLog(
- "EXPECTED TO FAIL: featureCompatibilityVersion cannot be set via setParameter on mongos");
- assert.commandFailed(
- mongosAdminDB.runCommand({setParameter: 1, featureCompatibilityVersion: lastStableFCV}));
-
- // Prevent the shard primary from receiving messages from the config server primary. When we try
- // to set FCV to 'lastStableFCV', the command should fail because the shard cannot be contacted.
- st.rs0.getPrimary().discardMessagesFrom(st.configRS.getPrimary(), 1.0);
- jsTestLog(
- "EXPECTED TO FAIL: setFeatureCompatibilityVersion cannot be set because the shard primary is not reachable");
- assert.commandFailed(
- mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV, maxTimeMS: 1000}));
- checkFCV(
- configPrimaryAdminDB, lastStableFCV, lastStableFCV /* indicates downgrade in progress */);
- st.rs0.getPrimary().discardMessagesFrom(st.configRS.getPrimary(), 0.0);
-
- // FCV can be set to 'lastStableFCV' on mongos.
- // This is run through assert.soon() because we've just caused a network interruption
- // by discarding messages in the bridge.
- assert.soon(function() {
- res = mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV});
- if (res.ok == 0) {
- print("Failed to set feature compatibility version: " + tojson(res));
- return false;
- }
- return true;
- });
-
- // featureCompatibilityVersion propagates to config and shard.
- checkFCV(configPrimaryAdminDB, lastStableFCV);
- checkFCV(shardPrimaryAdminDB, lastStableFCV);
-
- // A 'latest' binary replica set started as a shard server defaults to 'lastStableFCV'.
- let latestShard = new ReplSetTest({
- name: "latestShard",
- nodes: [{binVersion: latest}, {binVersion: latest}],
- nodeOptions: {shardsvr: ""},
- useHostName: true
- });
- latestShard.startSet();
- latestShard.initiate();
- let latestShardPrimaryAdminDB = latestShard.getPrimary().getDB("admin");
- checkFCV(latestShardPrimaryAdminDB, lastStableFCV);
- assert.commandWorked(mongosAdminDB.runCommand({addShard: latestShard.getURL()}));
- checkFCV(latestShardPrimaryAdminDB, lastStableFCV);
-
- // FCV can be set to 'latestFCV' on mongos.
- assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(st.configRS.getPrimary().getDB("admin"), latestFCV);
- checkFCV(shardPrimaryAdminDB, latestFCV);
- checkFCV(latestShardPrimaryAdminDB, latestFCV);
-
- // Call ShardingTest.stop before shutting down latestShard, so that the UUID check in
- // ShardingTest.stop can talk to latestShard.
- st.stop();
- latestShard.stopSet();
-
- // Create cluster with a 'lastStable' binary mongos so that we can add 'lastStable' binary
- // shards.
- st = new ShardingTest({shards: 0, other: {mongosOptions: {binVersion: lastStable}}});
- mongosAdminDB = st.s.getDB("admin");
- configPrimaryAdminDB = st.configRS.getPrimary().getDB("admin");
- checkFCV(configPrimaryAdminDB, lastStableFCV);
-
- // Adding a 'lastStable' binary shard to a cluster with 'lastStableFCV' succeeds.
- let lastStableShard = new ReplSetTest({
- name: "lastStableShard",
- nodes: [{binVersion: lastStable}, {binVersion: lastStable}],
- nodeOptions: {shardsvr: ""},
- useHostName: true
- });
- lastStableShard.startSet();
- lastStableShard.initiate();
- assert.commandWorked(mongosAdminDB.runCommand({addShard: lastStableShard.getURL()}));
- checkFCV(lastStableShard.getPrimary().getDB("admin"), lastStableFCV);
-
- // call ShardingTest.stop before shutting down lastStableShard, so that the UUID check in
- // ShardingTest.stop can talk to lastStableShard.
- st.stop();
- lastStableShard.stopSet();
+"use strict";
+
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/write_concern_util.js");
+load("jstests/replsets/rslib.js");
+
+let dbpath = MongoRunner.dataPath + "feature_compatibility_version";
+resetDbpath(dbpath);
+let res;
+
+const latest = "latest";
+const lastStable = "last-stable";
+
+//
+// Standalone tests.
+//
+
+let conn;
+let adminDB;
+
+// A 'latest' binary standalone should default to 'latestFCV'.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
+assert.neq(
+ null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
+adminDB = conn.getDB("admin");
+checkFCV(adminDB, latestFCV);
+
+jsTestLog("EXPECTED TO FAIL: featureCompatibilityVersion cannot be set to an invalid value");
+assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: 5}));
+assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: "3.2"}));
+assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: "4.4"}));
+assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: "3.4"}));
+
+jsTestLog("EXPECTED TO FAIL: setFeatureCompatibilityVersion rejects unknown fields.");
+assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: lastStable, unknown: 1}));
+
+jsTestLog("EXPECTED TO FAIL: setFeatureCompatibilityVersion can only be run on the admin database");
+assert.commandFailed(conn.getDB("test").runCommand({setFeatureCompatibilityVersion: lastStable}));
+
+jsTestLog("EXPECTED TO FAIL: featureCompatibilityVersion cannot be set via setParameter");
+assert.commandFailed(
+ adminDB.runCommand({setParameter: 1, featureCompatibilityVersion: lastStable}));
+
+// setFeatureCompatibilityVersion fails to downgrade FCV if the write fails.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCollectionUpdates",
+ data: {collectionNS: "admin.system.version"},
+ mode: "alwaysOn"
+}));
+jsTestLog(
+ "EXPECTED TO FAIL: setFeatureCompatibilityVersion fails to downgrade FCV if the write fails");
+assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(adminDB, latestFCV);
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCollectionUpdates",
+ data: {collectionNS: "admin.system.version"},
+ mode: "off"
+}));
+
+// featureCompatibilityVersion can be downgraded to 'lastStableFCV'.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(adminDB, lastStableFCV);
+
+// setFeatureCompatibilityVersion fails to upgrade to 'latestFCV' if the write fails.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCollectionUpdates",
+ data: {collectionNS: "admin.system.version"},
+ mode: "alwaysOn"
+}));
+jsTestLog(
+ "EXPECTED TO FAIL: setFeatureCompatibilityVersion fails to upgrade to 'latestFCV' if the write fails");
+assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(adminDB, lastStableFCV);
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCollectionUpdates",
+ data: {collectionNS: "admin.system.version"},
+ mode: "off"
+}));
+
+// featureCompatibilityVersion can be upgraded to 'latestFCV'.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(adminDB, latestFCV);
+
+MongoRunner.stopMongod(conn);
+
+// featureCompatibilityVersion is durable.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
+assert.neq(
+ null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
+adminDB = conn.getDB("admin");
+checkFCV(adminDB, latestFCV);
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(adminDB, lastStableFCV);
+MongoRunner.stopMongod(conn);
+
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, noCleanData: true});
+assert.neq(null,
+ conn,
+ "mongod was unable to start up with binary version=" + latest +
+ " and last-stable featureCompatibilityVersion");
+adminDB = conn.getDB("admin");
+checkFCV(adminDB, lastStableFCV);
+MongoRunner.stopMongod(conn);
+
+// If you upgrade from 'lastStable' binary to 'latest' binary and have non-local databases, FCV
+// remains 'lastStableFCV'.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: lastStable});
+assert.neq(
+ null, conn, "mongod was unable to start up with version=" + lastStable + " and no data files");
+assert.writeOK(conn.getDB("test").coll.insert({a: 5}));
+adminDB = conn.getDB("admin");
+checkFCV(adminDB, lastStableFCV);
+MongoRunner.stopMongod(conn);
+
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, noCleanData: true});
+assert.neq(null,
+ conn,
+ "mongod was unable to start up with binary version=" + latest +
+ " and featureCompatibilityVersion=" + lastStableFCV);
+adminDB = conn.getDB("admin");
+checkFCV(adminDB, lastStableFCV);
+MongoRunner.stopMongod(conn);
+
+// A 'latest' binary mongod started with --shardsvr and clean data files defaults to
+// 'lastStableFCV'.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, shardsvr: ""});
+assert.neq(
+ null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
+adminDB = conn.getDB("admin");
+checkFCV(adminDB, lastStableFCV);
+MongoRunner.stopMongod(conn);
+
+//
+// Replica set tests.
+//
+
+let rst;
+let rstConns;
+let replSetConfig;
+let primaryAdminDB;
+let secondaryAdminDB;
+
+// 'latest' binary replica set.
+rst = new ReplSetTest({nodes: 2, nodeOpts: {binVersion: latest}});
+rst.startSet();
+rst.initiate();
+primaryAdminDB = rst.getPrimary().getDB("admin");
+secondaryAdminDB = rst.getSecondary().getDB("admin");
+
+// FCV should default to 'latestFCV' on primary and secondary in a 'latest' binary replica set.
+checkFCV(primaryAdminDB, latestFCV);
+rst.awaitReplication();
+checkFCV(secondaryAdminDB, latestFCV);
+
+// featureCompatibilityVersion propagates to secondary.
+assert.commandWorked(primaryAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(primaryAdminDB, lastStableFCV);
+rst.awaitReplication();
+checkFCV(secondaryAdminDB, lastStableFCV);
+
+jsTestLog("EXPECTED TO FAIL: setFeatureCompatibilityVersion cannot be run on secondary");
+assert.commandFailed(secondaryAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
+
+rst.stopSet();
+
+// A 'latest' binary secondary with a 'lastStable' binary primary will have 'lastStableFCV'
+rst = new ReplSetTest({nodes: [{binVersion: lastStable}, {binVersion: latest}]});
+rstConns = rst.startSet();
+replSetConfig = rst.getReplSetConfig();
+replSetConfig.members[1].priority = 0;
+replSetConfig.members[1].votes = 0;
+rst.initiate(replSetConfig);
+rst.waitForState(rstConns[0], ReplSetTest.State.PRIMARY);
+secondaryAdminDB = rst.getSecondary().getDB("admin");
+checkFCV(secondaryAdminDB, lastStableFCV);
+rst.stopSet();
+
+// Test that a 'lastStable' secondary can successfully perform initial sync from a 'latest'
+// primary with 'lastStableFCV'.
+rst = new ReplSetTest({
+ nodes: [{binVersion: latest}, {binVersion: latest, rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
+});
+rst.startSet();
+rst.initiate();
+
+let primary = rst.getPrimary();
+primaryAdminDB = primary.getDB("admin");
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+
+let secondary = rst.getSecondary();
+
+// The command should fail because wtimeout expires before a majority responds.
+stopServerReplication(secondary);
+res = primary.adminCommand(
+ {setFeatureCompatibilityVersion: latestFCV, writeConcern: {wtimeout: 1000}});
+assert.eq(0, res.ok);
+assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
+restartServerReplication(secondary);
+
+// Because the failed setFCV command left the primary in an intermediary state, complete the
+// upgrade then reset back to the lastStable version.
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+
+secondary = rst.add({binVersion: lastStable});
+secondaryAdminDB = secondary.getDB("admin");
+
+// Rig the election so that the first node running latest version remains the primary after the
+// 'lastStable' secondary is added to the replica set.
+replSetConfig = rst.getReplSetConfig();
+replSetConfig.version = 4;
+replSetConfig.members[2].priority = 0;
+reconfig(rst, replSetConfig);
+
+// Verify that the 'lastStable' secondary successfully performed its initial sync.
+assert.writeOK(
+ primaryAdminDB.getSiblingDB("test").coll.insert({awaitRepl: true}, {writeConcern: {w: 3}}));
+
+// Test that a 'lastStable' secondary can no longer replicate from the primary after the FCV is
+// upgraded to 'latestFCV'.
+// Note: the 'lastStable' secondary must stop replicating during the upgrade to ensure it has no
+// chance of seeing the 'upgrading to latest' message in the oplog, whereupon it would crash.
+stopServerReplication(secondary);
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+restartServerReplication(secondary);
+checkFCV(secondaryAdminDB, lastStableFCV);
+assert.writeOK(primaryAdminDB.getSiblingDB("test").coll.insert({shouldReplicate: false}));
+assert.eq(secondaryAdminDB.getSiblingDB("test").coll.find({shouldReplicate: false}).itcount(), 0);
+rst.stopSet();
+
+// Test idempotency for setFeatureCompatibilityVersion.
+rst = new ReplSetTest({nodes: 2, nodeOpts: {binVersion: latest}});
+rst.startSet();
+rst.initiate();
+
+// Set FCV to 'lastStableFCV' so that a 'lastStable' binary node can join the set.
+primary = rst.getPrimary();
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+rst.awaitReplication();
+
+// Add a 'lastStable' binary node to the set.
+secondary = rst.add({binVersion: lastStable});
+rst.reInitiate();
+
+// Ensure the 'lastStable' binary node succeeded its initial sync.
+assert.writeOK(primary.getDB("test").coll.insert({awaitRepl: true}, {writeConcern: {w: 3}}));
+
+// Run {setFCV: lastStableFCV}. This should be idempotent.
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+rst.awaitReplication();
+
+// Ensure the secondary is still running.
+rst.stopSet();
+
+//
+// Sharding tests.
+//
+
+let st;
+let mongosAdminDB;
+let configPrimaryAdminDB;
+let shardPrimaryAdminDB;
+
+// A 'latest' binary cluster started with clean data files will set FCV to 'latestFCV'.
+st = new ShardingTest({
+ shards: {rs0: {nodes: [{binVersion: latest}, {binVersion: latest}]}},
+ other: {useBridge: true}
+});
+mongosAdminDB = st.s.getDB("admin");
+configPrimaryAdminDB = st.configRS.getPrimary().getDB("admin");
+shardPrimaryAdminDB = st.rs0.getPrimary().getDB("admin");
+
+checkFCV(configPrimaryAdminDB, latestFCV);
+checkFCV(shardPrimaryAdminDB, latestFCV);
+
+jsTestLog("EXPECTED TO FAIL: featureCompatibilityVersion cannot be set to invalid value on mongos");
+assert.commandFailed(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: 5}));
+assert.commandFailed(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: "3.2"}));
+assert.commandFailed(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: "4.4"}));
+
+jsTestLog("EXPECTED TO FAIL: setFeatureCompatibilityVersion rejects unknown fields on mongos");
+assert.commandFailed(
+ mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV, unknown: 1}));
+
+jsTestLog(
+ "EXPECTED TO FAIL: setFeatureCompatibilityVersion can only be run on the admin database on mongos");
+assert.commandFailed(
+ st.s.getDB("test").runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+
+jsTestLog("EXPECTED TO FAIL: featureCompatibilityVersion cannot be set via setParameter on mongos");
+assert.commandFailed(
+ mongosAdminDB.runCommand({setParameter: 1, featureCompatibilityVersion: lastStableFCV}));
+
+// Prevent the shard primary from receiving messages from the config server primary. When we try
+// to set FCV to 'lastStableFCV', the command should fail because the shard cannot be contacted.
+st.rs0.getPrimary().discardMessagesFrom(st.configRS.getPrimary(), 1.0);
+jsTestLog(
+ "EXPECTED TO FAIL: setFeatureCompatibilityVersion cannot be set because the shard primary is not reachable");
+assert.commandFailed(
+ mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV, maxTimeMS: 1000}));
+checkFCV(configPrimaryAdminDB, lastStableFCV, lastStableFCV /* indicates downgrade in progress */);
+st.rs0.getPrimary().discardMessagesFrom(st.configRS.getPrimary(), 0.0);
+
+// FCV can be set to 'lastStableFCV' on mongos.
+// This is run through assert.soon() because we've just caused a network interruption
+// by discarding messages in the bridge.
+assert.soon(function() {
+ res = mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV});
+ if (res.ok == 0) {
+ print("Failed to set feature compatibility version: " + tojson(res));
+ return false;
+ }
+ return true;
+});
+
+// featureCompatibilityVersion propagates to config and shard.
+checkFCV(configPrimaryAdminDB, lastStableFCV);
+checkFCV(shardPrimaryAdminDB, lastStableFCV);
+
+// A 'latest' binary replica set started as a shard server defaults to 'lastStableFCV'.
+let latestShard = new ReplSetTest({
+ name: "latestShard",
+ nodes: [{binVersion: latest}, {binVersion: latest}],
+ nodeOptions: {shardsvr: ""},
+ useHostName: true
+});
+latestShard.startSet();
+latestShard.initiate();
+let latestShardPrimaryAdminDB = latestShard.getPrimary().getDB("admin");
+checkFCV(latestShardPrimaryAdminDB, lastStableFCV);
+assert.commandWorked(mongosAdminDB.runCommand({addShard: latestShard.getURL()}));
+checkFCV(latestShardPrimaryAdminDB, lastStableFCV);
+
+// FCV can be set to 'latestFCV' on mongos.
+assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(st.configRS.getPrimary().getDB("admin"), latestFCV);
+checkFCV(shardPrimaryAdminDB, latestFCV);
+checkFCV(latestShardPrimaryAdminDB, latestFCV);
+
+// Call ShardingTest.stop before shutting down latestShard, so that the UUID check in
+// ShardingTest.stop can talk to latestShard.
+st.stop();
+latestShard.stopSet();
+
+// Create cluster with a 'lastStable' binary mongos so that we can add 'lastStable' binary
+// shards.
+st = new ShardingTest({shards: 0, other: {mongosOptions: {binVersion: lastStable}}});
+mongosAdminDB = st.s.getDB("admin");
+configPrimaryAdminDB = st.configRS.getPrimary().getDB("admin");
+checkFCV(configPrimaryAdminDB, lastStableFCV);
+
+// Adding a 'lastStable' binary shard to a cluster with 'lastStableFCV' succeeds.
+let lastStableShard = new ReplSetTest({
+ name: "lastStableShard",
+ nodes: [{binVersion: lastStable}, {binVersion: lastStable}],
+ nodeOptions: {shardsvr: ""},
+ useHostName: true
+});
+lastStableShard.startSet();
+lastStableShard.initiate();
+assert.commandWorked(mongosAdminDB.runCommand({addShard: lastStableShard.getURL()}));
+checkFCV(lastStableShard.getPrimary().getDB("admin"), lastStableFCV);
+
+// call ShardingTest.stop before shutting down lastStableShard, so that the UUID check in
+// ShardingTest.stop can talk to lastStableShard.
+st.stop();
+lastStableShard.stopSet();
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_while_creating_collection.js b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_while_creating_collection.js
index ba675fbae67..e80e36eb624 100644
--- a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_while_creating_collection.js
+++ b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_while_creating_collection.js
@@ -2,75 +2,72 @@
* Tests that upgrade/downgrade works correctly even while creating a new collection.
*/
(function() {
- "use strict";
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/libs/parallel_shell_helpers.js");
+"use strict";
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/parallel_shell_helpers.js");
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
- // Rig the election so that the first node is always primary and that modifying the
- // featureCompatibilityVersion document doesn't need to wait for data to replicate.
- var replSetConfig = rst.getReplSetConfig();
- replSetConfig.members[1].priority = 0;
- replSetConfig.members[1].votes = 0;
+// Rig the election so that the first node is always primary and that modifying the
+// featureCompatibilityVersion document doesn't need to wait for data to replicate.
+var replSetConfig = rst.getReplSetConfig();
+replSetConfig.members[1].priority = 0;
+replSetConfig.members[1].votes = 0;
- rst.initiate(replSetConfig);
+rst.initiate(replSetConfig);
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB("test");
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB("test");
- for (let versions
- of[{from: lastStableFCV, to: latestFCV}, {from: latestFCV, to: lastStableFCV}]) {
- jsTestLog("Changing FeatureCompatibilityVersion from " + versions.from + " to " +
- versions.to + " while creating a collection");
- assert.commandWorked(
- primaryDB.adminCommand({setFeatureCompatibilityVersion: versions.from}));
+for (let versions of [{from: lastStableFCV, to: latestFCV}, {from: latestFCV, to: lastStableFCV}]) {
+ jsTestLog("Changing FeatureCompatibilityVersion from " + versions.from + " to " + versions.to +
+ " while creating a collection");
+ assert.commandWorked(primaryDB.adminCommand({setFeatureCompatibilityVersion: versions.from}));
- assert.commandWorked(primaryDB.adminCommand(
- {configureFailPoint: "hangBeforeLoggingCreateCollection", mode: "alwaysOn"}));
- primaryDB.mycoll.drop();
+ assert.commandWorked(primaryDB.adminCommand(
+ {configureFailPoint: "hangBeforeLoggingCreateCollection", mode: "alwaysOn"}));
+ primaryDB.mycoll.drop();
- let awaitCreateCollection;
- let awaitUpgradeFCV;
+ let awaitCreateCollection;
+ let awaitUpgradeFCV;
- try {
- awaitCreateCollection = startParallelShell(function() {
- assert.commandWorked(db.runCommand({create: "mycoll"}));
- }, primary.port);
+ try {
+ awaitCreateCollection = startParallelShell(function() {
+ assert.commandWorked(db.runCommand({create: "mycoll"}));
+ }, primary.port);
- assert.soon(function() {
- return rawMongoProgramOutput().match("createCollection: test.mycoll");
- });
+ assert.soon(function() {
+ return rawMongoProgramOutput().match("createCollection: test.mycoll");
+ });
- awaitUpgradeFCV = startParallelShell(
- funWithArgs(function(version) {
- assert.commandWorked(
- db.adminCommand({setFeatureCompatibilityVersion: version}));
- }, versions.to), primary.port);
+ awaitUpgradeFCV = startParallelShell(
+ funWithArgs(function(version) {
+ assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: version}));
+ }, versions.to), primary.port);
- {
- let res;
- assert.soon(
- function() {
- res = assert.commandWorked(primaryDB.adminCommand(
- {getParameter: 1, featureCompatibilityVersion: 1}));
- return res.featureCompatibilityVersion.version === versions.from &&
- res.featureCompatibilityVersion.targetVersion === versions.new;
- },
- function() {
- return "targetVersion of featureCompatibilityVersion document wasn't " +
- "updated on primary: " + tojson(res);
- });
- }
- } finally {
- assert.commandWorked(primaryDB.adminCommand(
- {configureFailPoint: "hangBeforeLoggingCreateCollection", mode: "off"}));
+ {
+ let res;
+ assert.soon(
+ function() {
+ res = assert.commandWorked(
+ primaryDB.adminCommand({getParameter: 1, featureCompatibilityVersion: 1}));
+ return res.featureCompatibilityVersion.version === versions.from &&
+ res.featureCompatibilityVersion.targetVersion === versions.new;
+ },
+ function() {
+ return "targetVersion of featureCompatibilityVersion document wasn't " +
+ "updated on primary: " + tojson(res);
+ });
}
-
- awaitCreateCollection();
- awaitUpgradeFCV();
- rst.checkReplicatedDataHashes();
+ } finally {
+ assert.commandWorked(primaryDB.adminCommand(
+ {configureFailPoint: "hangBeforeLoggingCreateCollection", mode: "off"}));
}
- rst.stopSet();
+
+ awaitCreateCollection();
+ awaitUpgradeFCV();
+ rst.checkReplicatedDataHashes();
+}
+rst.stopSet();
})();
diff --git a/jstests/multiVersion/hybrid_indexes.js b/jstests/multiVersion/hybrid_indexes.js
index 0dac45fad00..e21b15fc7ef 100644
--- a/jstests/multiVersion/hybrid_indexes.js
+++ b/jstests/multiVersion/hybrid_indexes.js
@@ -2,106 +2,104 @@
* Tests that hybrid index builds are only enabled in FCV 4.2.
*/
(function() {
- 'use strict';
-
- const dbName = "test";
- const collName = "hybrid_indexes";
- const dbpath = MongoRunner.dataPath + "hybrid_indexes";
-
- load("jstests/libs/feature_compatibility_version.js");
-
- let conn = MongoRunner.runMongod({binVersion: "latest", cleanData: true, dbpath: dbpath});
- let testDB = conn.getDB(dbName);
- let testColl = testDB[collName];
- testColl.insert({i: 0});
- assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
-
- let buildIndex = function(config) {
- const background = config.background;
- const expected = config.expected;
-
- let res = testDB.adminCommand({getParameter: 1, featureCompatibilityVersion: 1});
- assert.commandWorked(res);
- let fcv = res.version;
-
- clearRawMongoProgramOutput();
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'hangBeforeIndexBuildOf', mode: "alwaysOn", data: {"i": 0}}));
-
- let awaitBuild;
- if (background) {
- awaitBuild = startParallelShell(function() {
- assert.commandWorked(db.hybrid_indexes.createIndex({i: 1}, {background: true}));
- }, conn.port);
- } else {
- awaitBuild = startParallelShell(function() {
- assert.commandWorked(db.hybrid_indexes.createIndex({i: 1}, {background: false}));
- }, conn.port);
- }
-
- let msg =
- "starting on test.hybrid_indexes properties: { v: 2, key: { i: 1.0 }, name: \"i_1\"" +
- ", ns: \"test.hybrid_indexes\", background: " + background + " } using method: " +
- expected;
- print(msg);
- assert.soon(() => rawMongoProgramOutput().indexOf(msg) >= 0, "Index build not started");
- assert.soon(() => rawMongoProgramOutput().indexOf("Hanging before index build of i=0") >= 0,
- "Index build not hanging");
-
- if (expected === "Background" || expected === "Hybrid") {
- assert.commandWorked(testColl.insert({i: 1}));
- } else {
- assert.commandFailedWithCode(
- testDB.runCommand({insert: collName, documents: [{i: 2}], maxTimeMS: 100}),
- ErrorCodes.MaxTimeMSExpired);
- }
-
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangBeforeIndexBuildOf', mode: "off"}));
- awaitBuild();
- assert.commandWorked(testColl.dropIndex("i_1"));
- };
-
- // Test: Background indexes behave as background indexes on FCV 4.0.
-
- buildIndex({background: true, expected: "Background"});
-
- // Test: Foreground indexes behave as foreground idnexes on FCV 4.0.
-
- buildIndex({background: false, expected: "Foreground"});
-
- // Test: Upgrade to FCV 4.2 while a background index build is in progress fails. This is subject
- // to change, but characterizes the current behavior.
+'use strict';
- clearRawMongoProgramOutput();
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'hangAfterStartingIndexBuildUnlocked', mode: "alwaysOn"}));
+const dbName = "test";
+const collName = "hybrid_indexes";
+const dbpath = MongoRunner.dataPath + "hybrid_indexes";
- let awaitBuild = startParallelShell(function() {
- // This fails because of the unlock failpoint.
- assert.commandFailedWithCode(db.hybrid_indexes.createIndex({i: 1}, {background: true}),
- ErrorCodes.OperationFailed);
- }, conn.port);
+load("jstests/libs/feature_compatibility_version.js");
- assert.soon(() => rawMongoProgramOutput().indexOf("Hanging index build with no locks") >= 0,
- "Index build not hanging");
+let conn = MongoRunner.runMongod({binVersion: "latest", cleanData: true, dbpath: dbpath});
+let testDB = conn.getDB(dbName);
+let testColl = testDB[collName];
+testColl.insert({i: 0});
+assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
- assert.commandFailedWithCode(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}),
- ErrorCodes.BackgroundOperationInProgressForNamespace);
+let buildIndex = function(config) {
+ const background = config.background;
+ const expected = config.expected;
+ let res = testDB.adminCommand({getParameter: 1, featureCompatibilityVersion: 1});
+ assert.commandWorked(res);
+ let fcv = res.version;
+
+ clearRawMongoProgramOutput();
assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'hangAfterStartingIndexBuildUnlocked', mode: "off"}));
+ {configureFailPoint: 'hangBeforeIndexBuildOf', mode: "alwaysOn", data: {"i": 0}}));
+
+ let awaitBuild;
+ if (background) {
+ awaitBuild = startParallelShell(function() {
+ assert.commandWorked(db.hybrid_indexes.createIndex({i: 1}, {background: true}));
+ }, conn.port);
+ } else {
+ awaitBuild = startParallelShell(function() {
+ assert.commandWorked(db.hybrid_indexes.createIndex({i: 1}, {background: false}));
+ }, conn.port);
+ }
+
+ let msg = "starting on test.hybrid_indexes properties: { v: 2, key: { i: 1.0 }, name: \"i_1\"" +
+ ", ns: \"test.hybrid_indexes\", background: " + background + " } using method: " + expected;
+ print(msg);
+ assert.soon(() => rawMongoProgramOutput().indexOf(msg) >= 0, "Index build not started");
+ assert.soon(() => rawMongoProgramOutput().indexOf("Hanging before index build of i=0") >= 0,
+ "Index build not hanging");
+
+ if (expected === "Background" || expected === "Hybrid") {
+ assert.commandWorked(testColl.insert({i: 1}));
+ } else {
+ assert.commandFailedWithCode(
+ testDB.runCommand({insert: collName, documents: [{i: 2}], maxTimeMS: 100}),
+ ErrorCodes.MaxTimeMSExpired);
+ }
+
+ assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangBeforeIndexBuildOf', mode: "off"}));
awaitBuild();
+ assert.commandWorked(testColl.dropIndex("i_1"));
+};
+
+// Test: Background indexes behave as background indexes on FCV 4.0.
+
+buildIndex({background: true, expected: "Background"});
+
+// Test: Foreground indexes behave as foreground idnexes on FCV 4.0.
+
+buildIndex({background: false, expected: "Foreground"});
+
+// Test: Upgrade to FCV 4.2 while a background index build is in progress fails. This is subject
+// to change, but characterizes the current behavior.
+
+clearRawMongoProgramOutput();
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'hangAfterStartingIndexBuildUnlocked', mode: "alwaysOn"}));
+
+let awaitBuild = startParallelShell(function() {
+ // This fails because of the unlock failpoint.
+ assert.commandFailedWithCode(db.hybrid_indexes.createIndex({i: 1}, {background: true}),
+ ErrorCodes.OperationFailed);
+}, conn.port);
+
+assert.soon(() => rawMongoProgramOutput().indexOf("Hanging index build with no locks") >= 0,
+ "Index build not hanging");
+
+assert.commandFailedWithCode(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}),
+ ErrorCodes.BackgroundOperationInProgressForNamespace);
+
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuildUnlocked', mode: "off"}));
+awaitBuild();
- // Test: Background indexes behave as hybrid indexes on FCV 4.2.
+// Test: Background indexes behave as hybrid indexes on FCV 4.2.
- assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
+assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
- buildIndex({background: true, expected: "Hybrid"});
+buildIndex({background: true, expected: "Hybrid"});
- // Test: Foreground indexes behave as hybrid indexes on FCV 4.2.
+// Test: Foreground indexes behave as hybrid indexes on FCV 4.2.
- buildIndex({background: false, expected: "Hybrid"});
+buildIndex({background: false, expected: "Hybrid"});
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/multiVersion/index_bigkeys.js b/jstests/multiVersion/index_bigkeys.js
index 3e8955e1ea3..d4bfcc70640 100644
--- a/jstests/multiVersion/index_bigkeys.js
+++ b/jstests/multiVersion/index_bigkeys.js
@@ -102,89 +102,88 @@ function downgradeAndVerifyBehavior(testDowngradeBehaviorFunc) {
}
(function() {
- load("jstests/libs/feature_compatibility_version.js");
-
- // Test the behavior of inserting big index keys of each version.
- // 4.2 binary (with FCV 4.2)
- let conn = MongoRunner.runMongod({binVersion: "latest", cleanData: true});
- testInsertDocumentWithLargeKey(conn, false);
-
- // 4.2 binary (with FCV 4.0)
+load("jstests/libs/feature_compatibility_version.js");
+
+// Test the behavior of inserting big index keys of each version.
+// 4.2 binary (with FCV 4.2)
+let conn = MongoRunner.runMongod({binVersion: "latest", cleanData: true});
+testInsertDocumentWithLargeKey(conn, false);
+
+// 4.2 binary (with FCV 4.0)
+assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+testInsertDocumentWithLargeKey(conn, true);
+MongoRunner.stopMongod(conn);
+
+// 4.0 binary
+conn = MongoRunner.runMongod({binVersion: "4.0", cleanData: true});
+testInsertDocumentWithLargeKey(conn, true);
+MongoRunner.stopMongod(conn);
+
+// Downgrade path
+// 1. Test that 4.0 binary could read and delete big index keys which got
+// inserted by 4.2 binary.
+downgradeAndVerifyBehavior(testColl => {
+ assert.commandWorked(testColl.remove({x: largeKey}));
+ assert(testColl.validate().valid);
+});
+
+// 2. Test that 4.0 binary could update big keys with small keys.
+downgradeAndVerifyBehavior(testColl => {
+ assert.commandWorked(testColl.update({x: largeKey}, {$set: {x: "sss"}}));
+ assert.eq("sss", testColl.find({x: "sss"}).toArray()[0].x);
+});
+
+// 3. Test that 4.0 binary could drop the index which has big keys and the
+// validate will succeed after that.
+downgradeAndVerifyBehavior(testColl => {
+ assert.eq(2, testColl.getIndexes().length);
+ assert(!testColl.validate().valid);
+ assert.commandWorked(testColl.dropIndex({x: 1}));
+ assert.eq(1, testColl.getIndexes().length);
+ assert(testColl.validate().valid);
+});
+
+// Upgrade path
+// 1. Test the normal upgrade path.
+[true, false].forEach(function(uniqueIndex) {
+ // Upgrade all the way to 4.2 binary with FCV 4.2.
+ let conn = MongoRunner.runMongod({binVersion: "4.0", cleanData: true, dbpath: dbpath});
+ assert.commandWorked(
+ conn.getDB(dbName)[collName].createIndex({x: 1}, {name: "x_1", unique: uniqueIndex}));
assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
- testInsertDocumentWithLargeKey(conn, true);
- MongoRunner.stopMongod(conn);
-
- // 4.0 binary
- conn = MongoRunner.runMongod({binVersion: "4.0", cleanData: true});
- testInsertDocumentWithLargeKey(conn, true);
MongoRunner.stopMongod(conn);
-
- // Downgrade path
- // 1. Test that 4.0 binary could read and delete big index keys which got
- // inserted by 4.2 binary.
- downgradeAndVerifyBehavior(testColl => {
- assert.commandWorked(testColl.remove({x: largeKey}));
- assert(testColl.validate().valid);
- });
-
- // 2. Test that 4.0 binary could update big keys with small keys.
- downgradeAndVerifyBehavior(testColl => {
- assert.commandWorked(testColl.update({x: largeKey}, {$set: {x: "sss"}}));
- assert.eq("sss", testColl.find({x: "sss"}).toArray()[0].x);
- });
-
- // 3. Test that 4.0 binary could drop the index which has big keys and the
- // validate will succeed after that.
- downgradeAndVerifyBehavior(testColl => {
- assert.eq(2, testColl.getIndexes().length);
- assert(!testColl.validate().valid);
- assert.commandWorked(testColl.dropIndex({x: 1}));
- assert.eq(1, testColl.getIndexes().length);
- assert(testColl.validate().valid);
- });
-
- // Upgrade path
- // 1. Test the normal upgrade path.
- [true, false].forEach(function(uniqueIndex) {
- // Upgrade all the way to 4.2 binary with FCV 4.2.
- let conn = MongoRunner.runMongod({binVersion: "4.0", cleanData: true, dbpath: dbpath});
- assert.commandWorked(
- conn.getDB(dbName)[collName].createIndex({x: 1}, {name: "x_1", unique: uniqueIndex}));
- assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({binVersion: "latest", noCleanData: true, dbpath: dbpath});
- // Setting the FCV to 4.2
- assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- assert.commandWorked(
- conn.getDB(dbName).runCommand({insert: collName, documents: [documentWithLargeKey]}));
- MongoRunner.stopMongod(conn, null, {skipValidation: false});
+ conn = MongoRunner.runMongod({binVersion: "latest", noCleanData: true, dbpath: dbpath});
+ // Setting the FCV to 4.2
+ assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+ assert.commandWorked(
+ conn.getDB(dbName).runCommand({insert: collName, documents: [documentWithLargeKey]}));
+ MongoRunner.stopMongod(conn, null, {skipValidation: false});
+});
+
+// 2. If 4.0 binary has already inserted documents with large keys by setting
+// 'failIndexKeyTooLong' to be false (which bypasses inserting the index key), 4.2 binary cannot
+// successfully validate the index consistency because some index keys are missing. But reindex
+// should solve this problem.
+[true, false].forEach(function(uniqueIndex) {
+ let conn = MongoRunner.runMongod({
+ binVersion: "4.0",
+ cleanData: true,
+ setParameter: "failIndexKeyTooLong=false",
+ dbpath: dbpath
});
+ assert.commandWorked(
+ conn.getDB(dbName)[collName].createIndex({x: 1}, {name: "x_1", unique: uniqueIndex}));
+ assert.commandWorked(conn.getDB(dbName)[collName].insert(documentWithLargeKey));
+ assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+ MongoRunner.stopMongod(conn);
+ conn = MongoRunner.runMongod({binVersion: "latest", noCleanData: true, dbpath: dbpath});
+ assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- // 2. If 4.0 binary has already inserted documents with large keys by setting
- // 'failIndexKeyTooLong' to be false (which bypasses inserting the index key), 4.2 binary cannot
- // successfully validate the index consistency because some index keys are missing. But reindex
- // should solve this problem.
- [true, false].forEach(function(uniqueIndex) {
- let conn = MongoRunner.runMongod({
- binVersion: "4.0",
- cleanData: true,
- setParameter: "failIndexKeyTooLong=false",
- dbpath: dbpath
- });
- assert.commandWorked(
- conn.getDB(dbName)[collName].createIndex({x: 1}, {name: "x_1", unique: uniqueIndex}));
- assert.commandWorked(conn.getDB(dbName)[collName].insert(documentWithLargeKey));
- assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({binVersion: "latest", noCleanData: true, dbpath: dbpath});
- assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-
- let testColl = conn.getDB(dbName)[collName];
- assert(!testColl.validate().valid);
- testColl.reIndex();
- assert(testColl.validate().valid);
-
- MongoRunner.stopMongod(conn, null, {skipValidation: false});
- });
+ let testColl = conn.getDB(dbName)[collName];
+ assert(!testColl.validate().valid);
+ testColl.reIndex();
+ assert(testColl.validate().valid);
+ MongoRunner.stopMongod(conn, null, {skipValidation: false});
+});
}());
diff --git a/jstests/multiVersion/index_bigkeys_feature_tracker.js b/jstests/multiVersion/index_bigkeys_feature_tracker.js
index f784aea572e..1efed409911 100644
--- a/jstests/multiVersion/index_bigkeys_feature_tracker.js
+++ b/jstests/multiVersion/index_bigkeys_feature_tracker.js
@@ -6,176 +6,165 @@
* TODO SERVER-36385: Remove this test in the master branch once we have created a 4.2 branch.
*/
(function() {
- "use strict";
-
- const collName = "index_bigkeys";
-
- function insertIndexKey(
- createIndexFirst, db, collName, docToInsert, backgroundIndexBuild, update) {
- if (createIndexFirst) {
- assert.commandWorked(db.runCommand({
- createIndexes: collName,
- indexes: [{key: {x: 1}, name: "x_1", background: backgroundIndexBuild}]
- }));
- if (update) {
- assert.commandWorked(
- db.runCommand({insert: collName, documents: [{_id: docToInsert._id, x: 1}]}));
- assert.commandWorked(db.runCommand({
- update: collName,
- updates: [{q: {_id: docToInsert._id}, u: {x: docToInsert}}]
- }));
- } else {
- // This will insert a feature tracker bit on disk.
- assert.commandWorked(db.runCommand({insert: collName, documents: [docToInsert]}));
- }
+"use strict";
+
+const collName = "index_bigkeys";
+
+function insertIndexKey(createIndexFirst, db, collName, docToInsert, backgroundIndexBuild, update) {
+ if (createIndexFirst) {
+ assert.commandWorked(db.runCommand({
+ createIndexes: collName,
+ indexes: [{key: {x: 1}, name: "x_1", background: backgroundIndexBuild}]
+ }));
+ if (update) {
+ assert.commandWorked(
+ db.runCommand({insert: collName, documents: [{_id: docToInsert._id, x: 1}]}));
+ assert.commandWorked(db.runCommand(
+ {update: collName, updates: [{q: {_id: docToInsert._id}, u: {x: docToInsert}}]}));
} else {
- if (update) {
- assert.commandWorked(
- db.runCommand({insert: collName, documents: [{_id: docToInsert._id, x: 1}]}));
- assert.commandWorked(db.runCommand({
- update: collName,
- updates: [{q: {_id: docToInsert._id}, u: {x: docToInsert}}]
- }));
- } else {
- // This will insert a feature tracker bit on disk.
- assert.commandWorked(db.runCommand({insert: collName, documents: [docToInsert]}));
- }
// This will insert a feature tracker bit on disk.
- assert.commandWorked(db.runCommand({
- createIndexes: collName,
- indexes: [{key: {x: 1}, name: "x_1", background: backgroundIndexBuild}]
- }));
+ assert.commandWorked(db.runCommand({insert: collName, documents: [docToInsert]}));
}
- }
-
- function logTestParameters(
- docToInsert, shouldFailOnStartup, createIndexFirst, backgroundIndexBuild, update) {
- let output = {
- "docToInsert._id": docToInsert._id,
- shouldFailOnStartup: shouldFailOnStartup,
- createIndexFirst: createIndexFirst,
- backgroundIndexBuild: backgroundIndexBuild,
- update: update
- };
- jsTestLog("Testing with parameters: " + tojson(output));
- }
-
- const dbpath = MongoRunner.dataPath + "index_bigkeys_feature_tracker";
-
- function testInsertIndexKeyAndDowngradeStandalone(
- docToInsert, shouldFailOnStartup, createIndexFirst, backgroundIndexBuild, update) {
- logTestParameters(
- docToInsert, shouldFailOnStartup, createIndexFirst, backgroundIndexBuild, update);
- const conn = MongoRunner.runMongod({binVersion: "latest", dbpath: dbpath});
-
- insertIndexKey(createIndexFirst,
- conn.getDB("test"),
- collName,
- docToInsert,
- backgroundIndexBuild,
- update);
-
- // Downgrade the FCV to 4.0
- assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
-
- // Index validation would fail because validation code assumes big index keys are not
- // indexed in FCV 4.0.
- MongoRunner.stopMongod(conn, null, {skipValidation: true});
-
- if (shouldFailOnStartup) {
- // 4.0 binary should fail on start up due to the new feature tracker bit.
- assert.eq(
- null,
- MongoRunner.runMongod({binVersion: "4.0", noCleanData: true, dbpath: dbpath}));
+ } else {
+ if (update) {
+ assert.commandWorked(
+ db.runCommand({insert: collName, documents: [{_id: docToInsert._id, x: 1}]}));
+ assert.commandWorked(db.runCommand(
+ {update: collName, updates: [{q: {_id: docToInsert._id}, u: {x: docToInsert}}]}));
} else {
- const conn =
- MongoRunner.runMongod({binVersion: "4.0", noCleanData: true, dbpath: dbpath});
- assert.neq(null, conn);
- MongoRunner.stopMongod(conn, null, {skipValidation: true});
+ // This will insert a feature tracker bit on disk.
+ assert.commandWorked(db.runCommand({insert: collName, documents: [docToInsert]}));
}
+ // This will insert a feature tracker bit on disk.
+ assert.commandWorked(db.runCommand({
+ createIndexes: collName,
+ indexes: [{key: {x: 1}, name: "x_1", background: backgroundIndexBuild}]
+ }));
}
-
- function testInsertIndexKeyAndDowngradeReplset(
- docToInsert, shouldFailOnStartup, createIndexFirst, backgroundIndexBuild, update) {
- logTestParameters(
- docToInsert, shouldFailOnStartup, createIndexFirst, backgroundIndexBuild, update);
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
-
- insertIndexKey(createIndexFirst,
- primary.getDB("test"),
- collName,
- docToInsert,
- backgroundIndexBuild,
- update);
-
- // Downgrade the FCV to 4.0
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
-
- // Index validation would fail because validation code assumes big index keys are not
- // indexed in FCV 4.0.
- rst.stopSet(undefined, undefined, {noCleanData: true, skipValidation: true});
-
- if (shouldFailOnStartup) {
- // 4.0 binary should fail on start up due to the new feature tracker bit.
- assert.throws(function() {
- rst.start(0, {binVersion: "4.0", noCleanData: true}, true);
- });
- } else {
- const conn = rst.start(0, {binVersion: "4.0", noCleanData: true}, true);
- assert.neq(null, conn);
- rst.stopSet(undefined, undefined, {skipValidation: true});
- }
+}
+
+function logTestParameters(
+ docToInsert, shouldFailOnStartup, createIndexFirst, backgroundIndexBuild, update) {
+ let output = {
+ "docToInsert._id": docToInsert._id,
+ shouldFailOnStartup: shouldFailOnStartup,
+ createIndexFirst: createIndexFirst,
+ backgroundIndexBuild: backgroundIndexBuild,
+ update: update
+ };
+ jsTestLog("Testing with parameters: " + tojson(output));
+}
+
+const dbpath = MongoRunner.dataPath + "index_bigkeys_feature_tracker";
+
+function testInsertIndexKeyAndDowngradeStandalone(
+ docToInsert, shouldFailOnStartup, createIndexFirst, backgroundIndexBuild, update) {
+ logTestParameters(
+ docToInsert, shouldFailOnStartup, createIndexFirst, backgroundIndexBuild, update);
+ const conn = MongoRunner.runMongod({binVersion: "latest", dbpath: dbpath});
+
+ insertIndexKey(
+ createIndexFirst, conn.getDB("test"), collName, docToInsert, backgroundIndexBuild, update);
+
+ // Downgrade the FCV to 4.0
+ assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+
+ // Index validation would fail because validation code assumes big index keys are not
+ // indexed in FCV 4.0.
+ MongoRunner.stopMongod(conn, null, {skipValidation: true});
+
+ if (shouldFailOnStartup) {
+ // 4.0 binary should fail on start up due to the new feature tracker bit.
+ assert.eq(null,
+ MongoRunner.runMongod({binVersion: "4.0", noCleanData: true, dbpath: dbpath}));
+ } else {
+ const conn = MongoRunner.runMongod({binVersion: "4.0", noCleanData: true, dbpath: dbpath});
+ assert.neq(null, conn);
+ MongoRunner.stopMongod(conn, null, {skipValidation: true});
}
-
- const largeKeyWithShortTypeBits = 's'.repeat(12345);
- const largeKeyWithLongTypeBits = (() => {
- // {a : [0,1,2, ... ,9999] }
- return {a: Array.from({length: 10000}, (value, i) => i)};
- })();
-
- // Tests for standalone
- jsTestLog("Test for standalone");
- [true, false].forEach(function(backgroundIndexBuild) {
- [true, false].forEach(function(createIndexFirst) {
- [true, false].forEach(function(update) {
- testInsertIndexKeyAndDowngradeStandalone(
- {_id: "shortTypeBits", x: largeKeyWithShortTypeBits},
- false,
- createIndexFirst,
- backgroundIndexBuild,
- update);
- testInsertIndexKeyAndDowngradeStandalone(
- {_id: "longTypeBits", x: largeKeyWithLongTypeBits},
- true,
- createIndexFirst,
- backgroundIndexBuild,
- update);
- });
+}
+
+function testInsertIndexKeyAndDowngradeReplset(
+ docToInsert, shouldFailOnStartup, createIndexFirst, backgroundIndexBuild, update) {
+ logTestParameters(
+ docToInsert, shouldFailOnStartup, createIndexFirst, backgroundIndexBuild, update);
+ const rst = new ReplSetTest({nodes: 1});
+ rst.startSet();
+ rst.initiate();
+
+ const primary = rst.getPrimary();
+
+ insertIndexKey(createIndexFirst,
+ primary.getDB("test"),
+ collName,
+ docToInsert,
+ backgroundIndexBuild,
+ update);
+
+ // Downgrade the FCV to 4.0
+ assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+
+ // Index validation would fail because validation code assumes big index keys are not
+ // indexed in FCV 4.0.
+ rst.stopSet(undefined, undefined, {noCleanData: true, skipValidation: true});
+
+ if (shouldFailOnStartup) {
+ // 4.0 binary should fail on start up due to the new feature tracker bit.
+ assert.throws(function() {
+ rst.start(0, {binVersion: "4.0", noCleanData: true}, true);
+ });
+ } else {
+ const conn = rst.start(0, {binVersion: "4.0", noCleanData: true}, true);
+ assert.neq(null, conn);
+ rst.stopSet(undefined, undefined, {skipValidation: true});
+ }
+}
+
+const largeKeyWithShortTypeBits = 's'.repeat(12345);
+const largeKeyWithLongTypeBits = (() => {
+ // {a : [0,1,2, ... ,9999] }
+ return {a: Array.from({length: 10000}, (value, i) => i)};
+})();
+
+// Tests for standalone
+jsTestLog("Test for standalone");
+[true, false].forEach(function(backgroundIndexBuild) {
+ [true, false].forEach(function(createIndexFirst) {
+ [true, false].forEach(function(update) {
+ testInsertIndexKeyAndDowngradeStandalone(
+ {_id: "shortTypeBits", x: largeKeyWithShortTypeBits},
+ false,
+ createIndexFirst,
+ backgroundIndexBuild,
+ update);
+ testInsertIndexKeyAndDowngradeStandalone(
+ {_id: "longTypeBits", x: largeKeyWithLongTypeBits},
+ true,
+ createIndexFirst,
+ backgroundIndexBuild,
+ update);
});
});
-
- // Tests for replset
- jsTestLog("Test for replset");
- [true, false].forEach(function(backgroundIndexBuild) {
- [true, false].forEach(function(createIndexFirst) {
- [true, false].forEach(function(update) {
- testInsertIndexKeyAndDowngradeReplset(
- {_id: "shortTypeBits", x: largeKeyWithShortTypeBits},
- false,
- createIndexFirst,
- backgroundIndexBuild,
- update);
- testInsertIndexKeyAndDowngradeReplset(
- {_id: "longTypeBits", x: largeKeyWithLongTypeBits},
- true,
- createIndexFirst,
- backgroundIndexBuild,
- update);
- });
+});
+
+// Tests for replset
+jsTestLog("Test for replset");
+[true, false].forEach(function(backgroundIndexBuild) {
+ [true, false].forEach(function(createIndexFirst) {
+ [true, false].forEach(function(update) {
+ testInsertIndexKeyAndDowngradeReplset(
+ {_id: "shortTypeBits", x: largeKeyWithShortTypeBits},
+ false,
+ createIndexFirst,
+ backgroundIndexBuild,
+ update);
+ testInsertIndexKeyAndDowngradeReplset(
+ {_id: "longTypeBits", x: largeKeyWithLongTypeBits},
+ true,
+ createIndexFirst,
+ backgroundIndexBuild,
+ update);
});
});
+});
}());
diff --git a/jstests/multiVersion/index_bigkeys_mixed_version_replset.js b/jstests/multiVersion/index_bigkeys_mixed_version_replset.js
index 09b971e1f24..4ebef10a3c7 100644
--- a/jstests/multiVersion/index_bigkeys_mixed_version_replset.js
+++ b/jstests/multiVersion/index_bigkeys_mixed_version_replset.js
@@ -4,39 +4,41 @@
* TODO SERVER-36385: remove this test in 4.4.
*/
(function() {
- 'use strict';
-
- load("jstests/libs/feature_compatibility_version.js");
-
- TestData.replSetFeatureCompatibilityVersion = "4.0";
- const rst = new ReplSetTest({
- nodes: [
- {binVersion: 'latest'},
- {rsConfig: {priority: 0, votes: 0}},
- ]
- });
- rst.startSet();
- rst.initiate();
- rst.restart(1, {binVersion: '4.0'});
-
- const dbName = "test";
- const collName = "index_bigkeys";
-
- const largeKey = 's'.repeat(12345);
- const documentWithLargeKey = {x: largeKey};
-
- const primary = rst.getPrimary();
- const testDB = primary.getDB(dbName);
- const testColl = testDB[collName];
-
- testColl.drop();
- assert.commandWorked(
- testDB.runCommand({createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1"}]}));
-
- assert.commandFailedWithCode(
- testDB.runCommand({insert: collName, documents: [documentWithLargeKey]}),
- ErrorCodes.KeyTooLong);
- assert.eq(0, testColl.count());
-
- rst.stopSet();
+'use strict';
+
+load("jstests/libs/feature_compatibility_version.js");
+
+TestData.replSetFeatureCompatibilityVersion = "4.0";
+const rst = new ReplSetTest({
+ nodes: [
+ {binVersion: 'latest'},
+ {rsConfig: {priority: 0, votes: 0}},
+ ]
+});
+rst.startSet();
+rst.initiate();
+rst.restart(1, {binVersion: '4.0'});
+
+const dbName = "test";
+const collName = "index_bigkeys";
+
+const largeKey = 's'.repeat(12345);
+const documentWithLargeKey = {
+ x: largeKey
+};
+
+const primary = rst.getPrimary();
+const testDB = primary.getDB(dbName);
+const testColl = testDB[collName];
+
+testColl.drop();
+assert.commandWorked(
+ testDB.runCommand({createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1"}]}));
+
+assert.commandFailedWithCode(
+ testDB.runCommand({insert: collName, documents: [documentWithLargeKey]}),
+ ErrorCodes.KeyTooLong);
+assert.eq(0, testColl.count());
+
+rst.stopSet();
}());
diff --git a/jstests/multiVersion/index_bigkeys_secondary_downgrade_during_index_build_background.js b/jstests/multiVersion/index_bigkeys_secondary_downgrade_during_index_build_background.js
index 073c2ae510f..06e1a5be99c 100644
--- a/jstests/multiVersion/index_bigkeys_secondary_downgrade_during_index_build_background.js
+++ b/jstests/multiVersion/index_bigkeys_secondary_downgrade_during_index_build_background.js
@@ -4,69 +4,71 @@
* TODO SERVER-36385: remove this test in 4.4.
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/feature_compatibility_version.js");
- load('jstests/noPassthrough/libs/index_build.js');
+load("jstests/libs/feature_compatibility_version.js");
+load('jstests/noPassthrough/libs/index_build.js');
- TestData.replSetFeatureCompatibilityVersion = "4.2";
- const rst = new ReplSetTest({nodes: [{binVersion: 'latest'}, {binVersion: 'latest'}]});
- rst.startSet();
- rst.initiate();
- rst.awaitReplication();
+TestData.replSetFeatureCompatibilityVersion = "4.2";
+const rst = new ReplSetTest({nodes: [{binVersion: 'latest'}, {binVersion: 'latest'}]});
+rst.startSet();
+rst.initiate();
+rst.awaitReplication();
- const dbName = "test";
- const collName = "index_bigkeys_downgrade_during_index_build";
+const dbName = "test";
+const collName = "index_bigkeys_downgrade_during_index_build";
- const largeKey = 's'.repeat(12345);
- const documentWithLargeKey = {x: largeKey};
+const largeKey = 's'.repeat(12345);
+const documentWithLargeKey = {
+ x: largeKey
+};
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- const primaryDB = primary.getDB(dbName);
- const secondaryDB = secondary.getDB(dbName);
- const testColl = primaryDB[collName];
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+const primaryDB = primary.getDB(dbName);
+const secondaryDB = secondary.getDB(dbName);
+const testColl = primaryDB[collName];
- testColl.drop({writeConcern: {w: 2}});
+testColl.drop({writeConcern: {w: 2}});
- // Both primary and secondary have documents with large keys.
- let documents = [];
- for (let i = 0; i < 10; i++) {
- documents.push(documentWithLargeKey);
- }
- assert.commandWorked(
- primaryDB.runCommand({insert: collName, documents: documents, writeConcern: {w: 2}}));
+// Both primary and secondary have documents with large keys.
+let documents = [];
+for (let i = 0; i < 10; i++) {
+ documents.push(documentWithLargeKey);
+}
+assert.commandWorked(
+ primaryDB.runCommand({insert: collName, documents: documents, writeConcern: {w: 2}}));
- assert.commandWorked(secondaryDB.adminCommand(
- {configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"}));
+assert.commandWorked(secondaryDB.adminCommand(
+ {configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"}));
- // Start the index build on the primary.
- assert.commandWorked(primaryDB.runCommand(
- {createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1", background: true}]}));
+// Start the index build on the primary.
+assert.commandWorked(primaryDB.runCommand(
+ {createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1", background: true}]}));
- // Make sure index build starts on the secondary.
- IndexBuildTest.waitForIndexBuildToStart(secondaryDB);
+// Make sure index build starts on the secondary.
+IndexBuildTest.waitForIndexBuildToStart(secondaryDB);
- // Downgrade the FCV to 4.0
- assert.commandWorked(primaryDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+// Downgrade the FCV to 4.0
+assert.commandWorked(primaryDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
- // Make sure the secondary has FCV 4.0
- assert.soon(() => {
- let res = secondaryDB.adminCommand({getParameter: 1, featureCompatibilityVersion: 1});
- assert.commandWorked(res);
- return res.featureCompatibilityVersion.version == "4.0";
- });
+// Make sure the secondary has FCV 4.0
+assert.soon(() => {
+ let res = secondaryDB.adminCommand({getParameter: 1, featureCompatibilityVersion: 1});
+ assert.commandWorked(res);
+ return res.featureCompatibilityVersion.version == "4.0";
+});
- // Continue index build on the secondary. There should be no KeyTooLong error.
- assert.commandWorked(
- secondaryDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "off"}));
+// Continue index build on the secondary. There should be no KeyTooLong error.
+assert.commandWorked(
+ secondaryDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "off"}));
- // Make sure the index is successfully created.
- assert.soon(() => {
- return secondaryDB[collName].getIndexes().length == 2;
- });
+// Make sure the index is successfully created.
+assert.soon(() => {
+ return secondaryDB[collName].getIndexes().length == 2;
+});
- const signal = true; // Use default kill signal.
- const forRestart = false;
- rst.stopSet(signal, forRestart, {skipValidation: true});
+const signal = true; // Use default kill signal.
+const forRestart = false;
+rst.stopSet(signal, forRestart, {skipValidation: true});
}());
diff --git a/jstests/multiVersion/initialize_from_old_node.js b/jstests/multiVersion/initialize_from_old_node.js
index 0335ec9bf56..ea89a9c0adc 100644
--- a/jstests/multiVersion/initialize_from_old_node.js
+++ b/jstests/multiVersion/initialize_from_old_node.js
@@ -4,22 +4,22 @@
*/
(function() {
- "use strict";
- var name = "initialize_from_old";
- var oldVersion = 'last-stable';
- var newVersion = 'latest';
- var nodes = {
- n0: {binVersion: oldVersion},
- n1: {binVersion: newVersion},
- n2: {binVersion: newVersion}
- };
- var rst = new ReplSetTest({nodes: nodes, name: name});
- var conns = rst.startSet();
- var oldNode = conns[0];
- var config = rst.getReplSetConfig();
- var response = oldNode.getDB("admin").runCommand({replSetInitiate: config});
- assert.commandWorked(response);
- // Wait for secondaries to finish their initial sync before shutting down the cluster.
- rst.awaitSecondaryNodes();
- rst.stopSet();
+"use strict";
+var name = "initialize_from_old";
+var oldVersion = 'last-stable';
+var newVersion = 'latest';
+var nodes = {
+ n0: {binVersion: oldVersion},
+ n1: {binVersion: newVersion},
+ n2: {binVersion: newVersion}
+};
+var rst = new ReplSetTest({nodes: nodes, name: name});
+var conns = rst.startSet();
+var oldNode = conns[0];
+var config = rst.getReplSetConfig();
+var response = oldNode.getDB("admin").runCommand({replSetInitiate: config});
+assert.commandWorked(response);
+// Wait for secondaries to finish their initial sync before shutting down the cluster.
+rst.awaitSecondaryNodes();
+rst.stopSet();
})();
diff --git a/jstests/multiVersion/json_schema_encrypt_fcv.js b/jstests/multiVersion/json_schema_encrypt_fcv.js
index e5fc30815ca..79928c68452 100644
--- a/jstests/multiVersion/json_schema_encrypt_fcv.js
+++ b/jstests/multiVersion/json_schema_encrypt_fcv.js
@@ -1,210 +1,193 @@
// Test that mongod will not allow creating a validator or view containing JSON Schema with
// encryption keywords when the feature compatibility version is older than 4.2.
(function() {
- "use strict";
+"use strict";
- const testName = "json_schema_encrypt_fcv";
- let dbpath = MongoRunner.dataPath + testName;
- resetDbpath(dbpath);
+const testName = "json_schema_encrypt_fcv";
+let dbpath = MongoRunner.dataPath + testName;
+resetDbpath(dbpath);
- let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest"});
- assert.neq(null, conn, "mongod was unable to start up");
+let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest"});
+assert.neq(null, conn, "mongod was unable to start up");
- let testDB = conn.getDB(testName);
- assert.commandWorked(testDB.dropDatabase());
+let testDB = conn.getDB(testName);
+assert.commandWorked(testDB.dropDatabase());
- let adminDB = conn.getDB("admin");
+let adminDB = conn.getDB("admin");
- // Explicitly set feature compatibility version 4.2.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.2"}));
+// Explicitly set feature compatibility version 4.2.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.2"}));
- // Create a collection with a validator containing JSON Schema with 'encrypt'.
- const jsonSchemaWithEncrypt = {
+// Create a collection with a validator containing JSON Schema with 'encrypt'.
+const jsonSchemaWithEncrypt = {
+ $jsonSchema: {
+ type: "object",
+ properties:
+ {foo: {encrypt: {algorithm: "AEAD_AES_256_CBC_HMAC_SHA_512-Random", keyId: [UUID()]}}}
+ }
+};
+
+assert.commandWorked(testDB.createCollection("coll", {validator: jsonSchemaWithEncrypt}));
+let coll = testDB.coll;
+
+// Create a view with a pipeline which contains a JSON Schema with 'encrypt' in a match stage.
+assert.commandWorked(testDB.runCommand(
+ {create: "collView", viewOn: "coll", pipeline: [{$match: jsonSchemaWithEncrypt}]}));
+
+// The validator should cause this insert to fail.
+assert.writeError(coll.insert({foo: "not encrypted"}), ErrorCodes.DocumentValidationFailure);
+
+// Set a validator with 'encrypt' on an existing collection.
+assert.commandWorked(testDB.runCommand({
+ collMod: "coll",
+ validator: {
$jsonSchema: {
type: "object",
properties: {
- foo: {
- encrypt:
- {algorithm: "AEAD_AES_256_CBC_HMAC_SHA_512-Random", keyId: [UUID()]}
- }
+ bar: {encrypt: {algorithm: "AEAD_AES_256_CBC_HMAC_SHA_512-Random", keyId: [UUID()]}}
}
}
- };
-
- assert.commandWorked(testDB.createCollection("coll", {validator: jsonSchemaWithEncrypt}));
- let coll = testDB.coll;
-
- // Create a view with a pipeline which contains a JSON Schema with 'encrypt' in a match stage.
- assert.commandWorked(testDB.runCommand(
- {create: "collView", viewOn: "coll", pipeline: [{$match: jsonSchemaWithEncrypt}]}));
-
- // The validator should cause this insert to fail.
- assert.writeError(coll.insert({foo: "not encrypted"}), ErrorCodes.DocumentValidationFailure);
-
- // Set a validator with 'encrypt' on an existing collection.
- assert.commandWorked(testDB.runCommand({
- collMod: "coll",
- validator: {
- $jsonSchema: {
- type: "object",
- properties: {
- bar: {
- encrypt: {
- algorithm: "AEAD_AES_256_CBC_HMAC_SHA_512-Random",
- keyId: [UUID()]
- }
- }
- }
- }
- }
- }));
+ }
+}));
+
+// Another failing insert.
+assert.writeError(coll.insert({bar: 1.0}), ErrorCodes.DocumentValidationFailure);
+
+// Querying the view while in FCV 4.2 should work.
+assert.eq([], testDB.collView.find().toArray());
+
+// Set the feature compatibility version to 4.0.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.0"}));
+
+// The validator is already in place, so it should still cause this insert to fail.
+assert.writeError(coll.insert({bar: 1.0}), ErrorCodes.DocumentValidationFailure);
+
+// Trying to create a new collection with a validator that contains 'encrypt' should fail while
+// feature compatibility version is 4.0.
+assert.commandFailedWithCode(testDB.createCollection("coll2", {validator: jsonSchemaWithEncrypt}),
+ ErrorCodes.QueryFeatureNotAllowed);
+
+// Trying to collMod a collection with 'encrypt' in the validator should also fail.
+assert.commandFailedWithCode(testDB.runCommand({collMod: "coll", validator: jsonSchemaWithEncrypt}),
+ ErrorCodes.QueryFeatureNotAllowed);
+
+// Querying the view while in FCV 4.0 should continue to work.
+assert.eq([], testDB.collView.find().toArray());
+
+// Attempting to create a new view containing JSON Schema with 'encrypt' should fail while in
+// FCV 4.0.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {create: "collView2", viewOn: "coll", pipeline: [{$match: jsonSchemaWithEncrypt}]}),
+ ErrorCodes.QueryFeatureNotAllowed);
+
+MongoRunner.stopMongod(conn);
+
+// If we try to start up a 4.0 mongod, it will fail, because it will not be able to parse the
+// $jsonSchema validator.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "4.0", noCleanData: true});
+assert.eq(
+ null, conn, "mongod 4.0 started, even with a $jsonSchema validator with 'encrypt' in place.");
- // Another failing insert.
- assert.writeError(coll.insert({bar: 1.0}), ErrorCodes.DocumentValidationFailure);
+// Starting up a 4.2 mongod, however, should succeed, even though the feature compatibility
+// version is still set to 4.0.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
+assert.neq(null, conn, "mongod was unable to start up");
- // Querying the view while in FCV 4.2 should work.
- assert.eq([], testDB.collView.find().toArray());
+adminDB = conn.getDB("admin");
+testDB = conn.getDB(testName);
+coll = testDB.coll;
- // Set the feature compatibility version to 4.0.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.0"}));
+// And the validator should still work.
+assert.writeError(coll.insert({bar: 1.0}), ErrorCodes.DocumentValidationFailure);
- // The validator is already in place, so it should still cause this insert to fail.
- assert.writeError(coll.insert({bar: 1.0}), ErrorCodes.DocumentValidationFailure);
+// Remove the validator.
+assert.commandWorked(testDB.runCommand({collMod: "coll", validator: {}}));
- // Trying to create a new collection with a validator that contains 'encrypt' should fail while
- // feature compatibility version is 4.0.
- assert.commandFailedWithCode(
- testDB.createCollection("coll2", {validator: jsonSchemaWithEncrypt}),
- ErrorCodes.QueryFeatureNotAllowed);
+// Querying on the view should also still work.
+assert.eq([], testDB.collView.find().toArray());
- // Trying to collMod a collection with 'encrypt' in the validator should also fail.
- assert.commandFailedWithCode(
- testDB.runCommand({collMod: "coll", validator: jsonSchemaWithEncrypt}),
- ErrorCodes.QueryFeatureNotAllowed);
+MongoRunner.stopMongod(conn);
- // Querying the view while in FCV 4.0 should continue to work.
- assert.eq([], testDB.collView.find().toArray());
+// Now, we should be able to start up a 4.0 mongod.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "4.0", noCleanData: true});
+assert.neq(
+ null, conn, "mongod 4.0 failed to start, even after we removed the $jsonSchema validator");
- // Attempting to create a new view containing JSON Schema with 'encrypt' should fail while in
- // FCV 4.0.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {create: "collView2", viewOn: "coll", pipeline: [{$match: jsonSchemaWithEncrypt}]}),
- ErrorCodes.QueryFeatureNotAllowed);
-
- MongoRunner.stopMongod(conn);
-
- // If we try to start up a 4.0 mongod, it will fail, because it will not be able to parse the
- // $jsonSchema validator.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "4.0", noCleanData: true});
- assert.eq(null,
- conn,
- "mongod 4.0 started, even with a $jsonSchema validator with 'encrypt' in place.");
-
- // Starting up a 4.2 mongod, however, should succeed, even though the feature compatibility
- // version is still set to 4.0.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
- assert.neq(null, conn, "mongod was unable to start up");
-
- adminDB = conn.getDB("admin");
- testDB = conn.getDB(testName);
- coll = testDB.coll;
-
- // And the validator should still work.
- assert.writeError(coll.insert({bar: 1.0}), ErrorCodes.DocumentValidationFailure);
-
- // Remove the validator.
- assert.commandWorked(testDB.runCommand({collMod: "coll", validator: {}}));
-
- // Querying on the view should also still work.
- assert.eq([], testDB.collView.find().toArray());
-
- MongoRunner.stopMongod(conn);
-
- // Now, we should be able to start up a 4.0 mongod.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "4.0", noCleanData: true});
- assert.neq(
- null, conn, "mongod 4.0 failed to start, even after we removed the $jsonSchema validator");
-
- testDB = conn.getDB(testName);
-
- // The view containing the JSON Schema 'encrypt' keyword should still exist.
- assert.eq(
- "collView",
- testDB.runCommand({listCollections: 1, filter: {type: "view"}}).cursor.firstBatch[0].name);
-
- // However, querying on the view with the invalid view pipeline should fail on binary version
- // 4.0.
- assert.commandFailedWithCode(testDB.runCommand({find: "collView", filter: {}}),
- ErrorCodes.FailedToParse);
-
- // Dropping the invalid view should be allowed.
- assert.commandWorked(testDB.runCommand({drop: "collView"}));
-
- MongoRunner.stopMongod(conn);
-
- // The rest of the test uses mongod 4.2.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
- assert.neq(null, conn, "mongod was unable to start up");
-
- adminDB = conn.getDB("admin");
- testDB = conn.getDB(testName);
- coll = testDB.coll;
-
- // Set the feature compatibility version back to 4.2.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.2"}));
-
- // Now we should be able to create a collection with a validator containing 'encrypt' again.
- assert.commandWorked(testDB.createCollection("coll2", {validator: jsonSchemaWithEncrypt}));
-
- // And we should be able to modify a collection to have a validator containing 'encrypt'.
- assert.commandWorked(testDB.runCommand({collMod: "coll", validator: jsonSchemaWithEncrypt}));
-
- // And we should be able to create a view with a pipeline containing 'encrypt'.
- assert.commandWorked(testDB.runCommand(
- {create: "collView2", viewOn: "coll", pipeline: [{$match: jsonSchemaWithEncrypt}]}));
-
- // Set the feature compatibility version to 4.0 and then restart with
- // internalValidateFeaturesAsMaster=false.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.0"}));
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- binVersion: "latest",
- noCleanData: true,
- setParameter: "internalValidateFeaturesAsMaster=false"
- });
- assert.neq(null, conn, "mongod was unable to start up");
-
- testDB = conn.getDB(testName);
-
- // Even though the feature compatibility version is 4.0, we should still be able to add a
- // JSON Schema validator containing 'encrypt', because internalValidateFeaturesAsMaster is
- // false.
- assert.commandWorked(testDB.createCollection("coll3", {validator: jsonSchemaWithEncrypt}));
-
- // We should also be able to modify a collection to have a JSON Schema validator containing
- // 'encrypt'.
- assert.commandWorked(testDB.runCommand({
- collMod: "coll3",
- validator: {
- $jsonSchema: {
- type: "object",
- properties: {
- bar: {
- encrypt: {
- algorithm: "AEAD_AES_256_CBC_HMAC_SHA_512-Random",
- keyId: [UUID()]
- }
- }
- }
+testDB = conn.getDB(testName);
+
+// The view containing the JSON Schema 'encrypt' keyword should still exist.
+assert.eq(
+ "collView",
+ testDB.runCommand({listCollections: 1, filter: {type: "view"}}).cursor.firstBatch[0].name);
+
+// However, querying on the view with the invalid view pipeline should fail on binary version
+// 4.0.
+assert.commandFailedWithCode(testDB.runCommand({find: "collView", filter: {}}),
+ ErrorCodes.FailedToParse);
+
+// Dropping the invalid view should be allowed.
+assert.commandWorked(testDB.runCommand({drop: "collView"}));
+
+MongoRunner.stopMongod(conn);
+
+// The rest of the test uses mongod 4.2.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
+assert.neq(null, conn, "mongod was unable to start up");
+
+adminDB = conn.getDB("admin");
+testDB = conn.getDB(testName);
+coll = testDB.coll;
+
+// Set the feature compatibility version back to 4.2.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.2"}));
+
+// Now we should be able to create a collection with a validator containing 'encrypt' again.
+assert.commandWorked(testDB.createCollection("coll2", {validator: jsonSchemaWithEncrypt}));
+
+// And we should be able to modify a collection to have a validator containing 'encrypt'.
+assert.commandWorked(testDB.runCommand({collMod: "coll", validator: jsonSchemaWithEncrypt}));
+
+// And we should be able to create a view with a pipeline containing 'encrypt'.
+assert.commandWorked(testDB.runCommand(
+ {create: "collView2", viewOn: "coll", pipeline: [{$match: jsonSchemaWithEncrypt}]}));
+
+// Set the feature compatibility version to 4.0 and then restart with
+// internalValidateFeaturesAsMaster=false.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: "4.0"}));
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ binVersion: "latest",
+ noCleanData: true,
+ setParameter: "internalValidateFeaturesAsMaster=false"
+});
+assert.neq(null, conn, "mongod was unable to start up");
+
+testDB = conn.getDB(testName);
+
+// Even though the feature compatibility version is 4.0, we should still be able to add a
+// JSON Schema validator containing 'encrypt', because internalValidateFeaturesAsMaster is
+// false.
+assert.commandWorked(testDB.createCollection("coll3", {validator: jsonSchemaWithEncrypt}));
+
+// We should also be able to modify a collection to have a JSON Schema validator containing
+// 'encrypt'.
+assert.commandWorked(testDB.runCommand({
+ collMod: "coll3",
+ validator: {
+ $jsonSchema: {
+ type: "object",
+ properties: {
+ bar: {encrypt: {algorithm: "AEAD_AES_256_CBC_HMAC_SHA_512-Random", keyId: [UUID()]}}
}
}
- }));
+ }
+}));
- // We should also be able to create a view containing a JSON Schema with the 'encrypt' keyword.
- assert.commandWorked(testDB.runCommand(
- {create: "collView3", viewOn: "coll", pipeline: [{$match: jsonSchemaWithEncrypt}]}));
+// We should also be able to create a view containing a JSON Schema with the 'encrypt' keyword.
+assert.commandWorked(testDB.runCommand(
+ {create: "collView3", viewOn: "coll", pipeline: [{$match: jsonSchemaWithEncrypt}]}));
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/multiVersion/libs/data_generators.js b/jstests/multiVersion/libs/data_generators.js
index edf358b8319..5f3ccf20e10 100644
--- a/jstests/multiVersion/libs/data_generators.js
+++ b/jstests/multiVersion/libs/data_generators.js
@@ -612,7 +612,7 @@ function CollectionMetadataGenerator(options) {
for (var option in options) {
if (options.hasOwnProperty(option)) {
if (option === 'capped') {
- if (typeof(options['capped']) !== 'boolean') {
+ if (typeof (options['capped']) !== 'boolean') {
throw Error(
"\"capped\" options must be boolean in CollectionMetadataGenerator");
}
diff --git a/jstests/multiVersion/libs/dumprestore_helpers.js b/jstests/multiVersion/libs/dumprestore_helpers.js
index c62c817332b..5ee3bac4306 100644
--- a/jstests/multiVersion/libs/dumprestore_helpers.js
+++ b/jstests/multiVersion/libs/dumprestore_helpers.js
@@ -55,10 +55,9 @@ function multiVersionDumpRestoreTest(configObj) {
var shardingTestConfig = {
name: testBaseName + "_sharded_source",
mongos: [{binVersion: configObj.serverSourceVersion}],
- shards: [{
- binVersion: configObj.serverSourceVersion,
- storageEngine: configObj.storageEngine
- }],
+ shards: [
+ {binVersion: configObj.serverSourceVersion, storageEngine: configObj.storageEngine}
+ ],
config: [{binVersion: configObj.serverSourceVersion}]
};
var shardingTest = new ShardingTest(shardingTestConfig);
diff --git a/jstests/multiVersion/libs/global_snapshot_reads_helpers.js b/jstests/multiVersion/libs/global_snapshot_reads_helpers.js
index be7730fdc99..407da4bbc7a 100644
--- a/jstests/multiVersion/libs/global_snapshot_reads_helpers.js
+++ b/jstests/multiVersion/libs/global_snapshot_reads_helpers.js
@@ -41,11 +41,11 @@ function runCommandAndVerifyResponse(sessionDb, txnNumber, cmdObj, expectSuccess
return true;
});
} else {
- assert.commandFailedWithCode(sessionDb.runCommand(cmdObj),
- expectedCode,
- "command did not fail with expected error code, cmd: " +
- tojson(cmdObj) + ", expectedCode: " +
- tojson(expectedCode));
+ assert.commandFailedWithCode(
+ sessionDb.runCommand(cmdObj),
+ expectedCode,
+ "command did not fail with expected error code, cmd: " + tojson(cmdObj) +
+ ", expectedCode: " + tojson(expectedCode));
}
return txnNumber;
}
@@ -73,10 +73,10 @@ function verifyGlobalSnapshotReads(conn, expectSuccess, expectedCode) {
txnNumber = runCommandAndVerifyResponse(shardedDb,
txnNumber,
{
- find: "sharded",
- filter: {x: 1},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber)
+ find: "sharded",
+ filter: {x: 1},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber)
},
expectSuccess,
expectedCode);
diff --git a/jstests/multiVersion/libs/initial_sync.js b/jstests/multiVersion/libs/initial_sync.js
index 4999a6a2405..329602f0c4b 100644
--- a/jstests/multiVersion/libs/initial_sync.js
+++ b/jstests/multiVersion/libs/initial_sync.js
@@ -12,7 +12,6 @@ load("./jstests/replsets/rslib.js");
*/
var multversionInitialSyncTest = function(
name, replSetVersion, newNodeVersion, configSettings, fcv) {
-
var nodes = {n1: {binVersion: replSetVersion}, n2: {binVersion: replSetVersion}};
jsTestLog("Starting up a two-node '" + replSetVersion + "' version replica set.");
diff --git a/jstests/multiVersion/libs/multi_cluster.js b/jstests/multiVersion/libs/multi_cluster.js
index 2937d0d4d6d..e611e541c3f 100644
--- a/jstests/multiVersion/libs/multi_cluster.js
+++ b/jstests/multiVersion/libs/multi_cluster.js
@@ -89,7 +89,6 @@ ShardingTest.prototype.upgradeCluster = function(binVersion, options) {
};
ShardingTest.prototype.restartMongoses = function() {
-
var numMongoses = this._mongos.length;
for (var i = 0; i < numMongoses; i++) {
diff --git a/jstests/multiVersion/libs/multi_rs.js b/jstests/multiVersion/libs/multi_rs.js
index 60bc253d877..ce67a5ed157 100644
--- a/jstests/multiVersion/libs/multi_rs.js
+++ b/jstests/multiVersion/libs/multi_rs.js
@@ -15,7 +15,6 @@ ReplSetTest.prototype.upgradeSet = function(options, user, pwd) {
// Then upgrade the primary after stepping down.
this.upgradePrimary(primary, options, user, pwd);
-
};
ReplSetTest.prototype.upgradeSecondaries = function(primary, options, user, pwd) {
@@ -125,7 +124,7 @@ ReplSetTest.prototype.reconnect = function(node) {
this.nodes[nodeId] = new Mongo(node.host);
var except = {};
for (var i in node) {
- if (typeof(node[i]) == "function")
+ if (typeof (node[i]) == "function")
continue;
this.nodes[nodeId][i] = node[i];
}
diff --git a/jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js b/jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js
index 0f77983adbf..2c74d3c632e 100644
--- a/jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js
+++ b/jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js
@@ -46,7 +46,9 @@ function runTxn(testDB, collName, {lsid, txnNumber}, {multiShard}) {
insert: collName,
documents: docs,
txnNumber: NumberLong(txnNumber),
- startTransaction: true, lsid, autocommit,
+ startTransaction: true,
+ lsid,
+ autocommit,
});
if (!startTransactionRes.ok) {
return startTransactionRes;
@@ -55,7 +57,9 @@ function runTxn(testDB, collName, {lsid, txnNumber}, {multiShard}) {
const secondStatementRes = testDB.runCommand({
insert: collName,
documents: docs,
- txnNumber: NumberLong(txnNumber), lsid, autocommit,
+ txnNumber: NumberLong(txnNumber),
+ lsid,
+ autocommit,
});
if (!secondStatementRes.ok) {
return secondStatementRes;
@@ -82,7 +86,8 @@ function assertMultiShardRetryableWriteWorked(testDB, collName, {lsid, txnNumber
assert.commandWorked(testDB.runCommand({
insert: collName,
documents: [{skey: -1, fromRetryableWrite: true}, {skey: 1, fromRetryableWrite: true}],
- txnNumber: NumberLong(txnNumber), lsid
+ txnNumber: NumberLong(txnNumber),
+ lsid
}));
}
@@ -93,7 +98,8 @@ function assertMultiShardRetryableWriteCanBeRetried(testDB, collName, {lsid, txn
assert.commandWorked(testDB.runCommand({
insert: collName,
documents: [{skey: -1, fromRetryableWrite: true}, {skey: 1, fromRetryableWrite: true}],
- txnNumber: NumberLong(txnNumber), lsid
+ txnNumber: NumberLong(txnNumber),
+ lsid
}));
assert.eq(numMultiShardRetryableWrites * 2, // Each write inserts 2 documents.
testDB[collName].find({fromRetryableWrite: true}).itcount());
diff --git a/jstests/multiVersion/libs/verify_collection_data.js b/jstests/multiVersion/libs/verify_collection_data.js
index b8eeca557f7..10b26534ccc 100644
--- a/jstests/multiVersion/libs/verify_collection_data.js
+++ b/jstests/multiVersion/libs/verify_collection_data.js
@@ -22,7 +22,6 @@ load('./jstests/multiVersion/libs/data_generators.js');
// Function to actually add the data generated by the given dataGenerator to a collection
createCollectionWithData = function(db, collectionName, dataGenerator) {
-
// Drop collection if exists
// TODO: add ability to control this
db.getCollection(collectionName).drop();
@@ -104,7 +103,6 @@ function CollectionDataValidator() {
this.validateCollectionData = function(
collection, dbVersionForCollection, options = {indexSpecFieldsToSkip: []}) {
-
if (!_initialized) {
throw Error("validateCollectionWithAllData called, but data is not initialized");
}
diff --git a/jstests/multiVersion/libs/verify_versions.js b/jstests/multiVersion/libs/verify_versions.js
index f20da90de80..fcc9345e276 100644
--- a/jstests/multiVersion/libs/verify_versions.js
+++ b/jstests/multiVersion/libs/verify_versions.js
@@ -4,39 +4,38 @@
var Mongo, assert;
(function() {
- "use strict";
- Mongo.prototype.getBinVersion = function() {
- var result = this.getDB("admin").runCommand({serverStatus: 1});
- return result.version;
- };
+"use strict";
+Mongo.prototype.getBinVersion = function() {
+ var result = this.getDB("admin").runCommand({serverStatus: 1});
+ return result.version;
+};
- // Checks that our mongodb process is of a certain version
- assert.binVersion = function(mongo, version) {
- var currVersion = mongo.getBinVersion();
- assert(MongoRunner.areBinVersionsTheSame(MongoRunner.getBinVersionFor(currVersion),
- MongoRunner.getBinVersionFor(version)),
- "version " + version + " (" + MongoRunner.getBinVersionFor(version) + ")" +
- " is not the same as " + MongoRunner.getBinVersionFor(currVersion));
- };
+// Checks that our mongodb process is of a certain version
+assert.binVersion = function(mongo, version) {
+ var currVersion = mongo.getBinVersion();
+ assert(MongoRunner.areBinVersionsTheSame(MongoRunner.getBinVersionFor(currVersion),
+ MongoRunner.getBinVersionFor(version)),
+ "version " + version + " (" + MongoRunner.getBinVersionFor(version) + ")" +
+ " is not the same as " + MongoRunner.getBinVersionFor(currVersion));
+};
- // Compares an array of desired versions and an array of found versions,
- // looking for versions not found
- assert.allBinVersions = function(versionsWanted, versionsFound) {
-
- for (var i = 0; i < versionsWanted.length; i++) {
- var version = versionsWanted[i];
- var found = false;
- for (var j = 0; j < versionsFound.length; j++) {
- if (MongoRunner.areBinVersionsTheSame(version, versionsFound[j])) {
- found = true;
- break;
- }
+// Compares an array of desired versions and an array of found versions,
+// looking for versions not found
+assert.allBinVersions = function(versionsWanted, versionsFound) {
+ for (var i = 0; i < versionsWanted.length; i++) {
+ var version = versionsWanted[i];
+ var found = false;
+ for (var j = 0; j < versionsFound.length; j++) {
+ if (MongoRunner.areBinVersionsTheSame(version, versionsFound[j])) {
+ found = true;
+ break;
}
-
- assert(found,
- "could not find version " + version + " (" +
- MongoRunner.getBinVersionFor(version) + ")" + " in " + versionsFound);
}
- };
+ assert(found,
+ "could not find version " + version + " (" + MongoRunner.getBinVersionFor(version) +
+ ")" +
+ " in " + versionsFound);
+ }
+};
}());
diff --git a/jstests/multiVersion/long_index_mixed_version_replset.js b/jstests/multiVersion/long_index_mixed_version_replset.js
index b28ce5966c5..78dd4bdce4f 100644
--- a/jstests/multiVersion/long_index_mixed_version_replset.js
+++ b/jstests/multiVersion/long_index_mixed_version_replset.js
@@ -4,44 +4,42 @@
* TODO: remove this test in 4.4.
*/
(function() {
- 'use strict';
+'use strict';
- TestData.replSetFeatureCompatibilityVersion = '4.0';
- const rst = new ReplSetTest({
- nodes: [
- {binVersion: 'latest'},
- {rsConfig: {priority: 0, votes: 0}},
- ]
- });
- rst.startSet();
- rst.initiate();
- rst.restart(1, {binVersion: '4.0'});
+TestData.replSetFeatureCompatibilityVersion = '4.0';
+const rst = new ReplSetTest({
+ nodes: [
+ {binVersion: 'latest'},
+ {rsConfig: {priority: 0, votes: 0}},
+ ]
+});
+rst.startSet();
+rst.initiate();
+rst.restart(1, {binVersion: '4.0'});
- const primary = rst.getPrimary();
- const mydb = primary.getDB('test');
- const coll = mydb.getCollection('long_index_name');
+const primary = rst.getPrimary();
+const mydb = primary.getDB('test');
+const coll = mydb.getCollection('long_index_name');
- // Compute maximum index name length for this collection under FCV 4.0.
- const maxNsLength = 127;
- const maxIndexNameLength = maxNsLength - (coll.getFullName() + ".$").length;
- jsTestLog('Max index name length under FCV 4.0 = ' + maxIndexNameLength);
+// Compute maximum index name length for this collection under FCV 4.0.
+const maxNsLength = 127;
+const maxIndexNameLength = maxNsLength - (coll.getFullName() + ".$").length;
+jsTestLog('Max index name length under FCV 4.0 = ' + maxIndexNameLength);
- // Create an index with the longest name allowed for this collection.
- assert.commandWorked(coll.createIndex({a: 1}, {name: 'a'.repeat(maxIndexNameLength)}));
+// Create an index with the longest name allowed for this collection.
+assert.commandWorked(coll.createIndex({a: 1}, {name: 'a'.repeat(maxIndexNameLength)}));
- // If this command succeeds unexpectedly, it will cause an fassert on the 4.0 secondary which
- // cannot handle long index namespaces, with a "CannotCreateIndex: ... index name ... too long"
- // error message.
- assert.commandFailedWithCode(
- coll.createIndex({b: 1}, {name: 'b'.repeat(maxIndexNameLength + 1)}),
- ErrorCodes.CannotCreateIndex);
+// If this command succeeds unexpectedly, it will cause an fassert on the 4.0 secondary which
+// cannot handle long index namespaces, with a "CannotCreateIndex: ... index name ... too long"
+// error message.
+assert.commandFailedWithCode(coll.createIndex({b: 1}, {name: 'b'.repeat(maxIndexNameLength + 1)}),
+ ErrorCodes.CannotCreateIndex);
- // The existing index on {x: 1} has an index name that is the longest supported under FCV 4.0
- // for the current collection name.
- // Any attempt to rename this collection with a longer name must fail. Otherwise, the invalid
- // index namespace will cause the 4.0 secondary to fassert.
- assert.commandFailedWithCode(coll.renameCollection(coll.getName() + 'z'),
- ErrorCodes.InvalidLength);
+// The existing index on {x: 1} has an index name that is the longest supported under FCV 4.0
+// for the current collection name.
+// Any attempt to rename this collection with a longer name must fail. Otherwise, the invalid
+// index namespace will cause the 4.0 secondary to fassert.
+assert.commandFailedWithCode(coll.renameCollection(coll.getName() + 'z'), ErrorCodes.InvalidLength);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/multiVersion/migration_between_mixed_version_mongods.js b/jstests/multiVersion/migration_between_mixed_version_mongods.js
index a2539d87ddd..bd6b41b0e16 100644
--- a/jstests/multiVersion/migration_between_mixed_version_mongods.js
+++ b/jstests/multiVersion/migration_between_mixed_version_mongods.js
@@ -10,101 +10,99 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
load("./jstests/multiVersion/libs/verify_versions.js");
(function() {
- "use strict";
-
- var options = {
- shards: [
- {binVersion: "last-stable"},
- {binVersion: "last-stable"},
- {binVersion: "latest"},
- {binVersion: "latest"}
- ],
- mongos: 1,
- other: {mongosOptions: {binVersion: "last-stable"}, shardAsReplicaSet: false}
- };
-
- var st = new ShardingTest(options);
- st.stopBalancer();
-
- assert.binVersion(st.shard0, "last-stable");
- assert.binVersion(st.shard1, "last-stable");
- assert.binVersion(st.shard2, "latest");
- assert.binVersion(st.shard3, "latest");
- assert.binVersion(st.s0, "last-stable");
-
- var mongos = st.s0, admin = mongos.getDB('admin'),
- shards = mongos.getCollection('config.shards').find().toArray(),
-
- fooDB = "fooTest", fooNS = fooDB + ".foo", fooColl = mongos.getCollection(fooNS),
- fooDonor = st.shard0, fooRecipient = st.shard2,
- fooDonorColl = fooDonor.getCollection(fooNS),
- fooRecipientColl = fooRecipient.getCollection(fooNS),
-
- barDB = "barTest", barNS = barDB + ".foo", barColl = mongos.getCollection(barNS),
- barDonor = st.shard3, barRecipient = st.shard1,
- barDonorColl = barDonor.getCollection(barNS),
- barRecipientColl = barRecipient.getCollection(barNS);
-
- assert.commandWorked(admin.runCommand({enableSharding: fooDB}));
- assert.commandWorked(admin.runCommand({enableSharding: barDB}));
- st.ensurePrimaryShard(fooDB, shards[0]._id);
- st.ensurePrimaryShard(barDB, shards[3]._id);
-
- assert.commandWorked(admin.runCommand({shardCollection: fooNS, key: {a: 1}}));
- assert.commandWorked(admin.runCommand({split: fooNS, middle: {a: 10}}));
- assert.commandWorked(admin.runCommand({shardCollection: barNS, key: {a: 1}}));
- assert.commandWorked(admin.runCommand({split: barNS, middle: {a: 10}}));
-
- fooColl.insert({a: 0});
- assert.eq(null, fooColl.getDB().getLastError());
- fooColl.insert({a: 10});
- assert.eq(null, fooColl.getDB().getLastError());
- assert.eq(0, fooRecipientColl.count());
- assert.eq(2, fooDonorColl.count());
- assert.eq(2, fooColl.count());
-
- barColl.insert({a: 0});
- assert.eq(null, barColl.getDB().getLastError());
- barColl.insert({a: 10});
- assert.eq(null, barColl.getDB().getLastError());
- assert.eq(0, barRecipientColl.count());
- assert.eq(2, barDonorColl.count());
- assert.eq(2, barColl.count());
-
- /**
- * Perform two migrations:
- * shard0 (last-stable) -> foo chunk -> shard2 (latest)
- * shard3 (latest) -> bar chunk -> shard1 (last-stable)
- */
-
- assert.commandWorked(admin.runCommand(
- {moveChunk: fooNS, find: {a: 10}, to: shards[2]._id, _waitForDelete: true}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: barNS, find: {a: 10}, to: shards[1]._id, _waitForDelete: true}));
- assert.eq(1,
- fooRecipientColl.count(),
- "Foo collection migration failed. " +
- "Last-stable -> latest mongod version migration failure.");
- assert.eq(1,
- fooDonorColl.count(),
- "Foo donor lost its document. " +
- "Last-stable -> latest mongod version migration failure.");
- assert.eq(2,
- fooColl.count(),
- "Incorrect number of documents in foo collection. " +
- "Last-stable -> latest mongod version migration failure.");
- assert.eq(1,
- barRecipientColl.count(),
- "Bar collection migration failed. " +
- "Latest -> last-stable mongod version migration failure.");
- assert.eq(1,
- barDonorColl.count(),
- "Bar donor lost its document. " +
- "Latest -> last-stable mongod version migration failure.");
- assert.eq(2,
- barColl.count(),
- "Incorrect number of documents in bar collection. " +
- "Latest -> last-stable mongod version migration failure.");
-
- st.stop();
+"use strict";
+
+var options = {
+ shards: [
+ {binVersion: "last-stable"},
+ {binVersion: "last-stable"},
+ {binVersion: "latest"},
+ {binVersion: "latest"}
+ ],
+ mongos: 1,
+ other: {mongosOptions: {binVersion: "last-stable"}, shardAsReplicaSet: false}
+};
+
+var st = new ShardingTest(options);
+st.stopBalancer();
+
+assert.binVersion(st.shard0, "last-stable");
+assert.binVersion(st.shard1, "last-stable");
+assert.binVersion(st.shard2, "latest");
+assert.binVersion(st.shard3, "latest");
+assert.binVersion(st.s0, "last-stable");
+
+var mongos = st.s0, admin = mongos.getDB('admin'),
+ shards = mongos.getCollection('config.shards').find().toArray(),
+
+ fooDB = "fooTest", fooNS = fooDB + ".foo", fooColl = mongos.getCollection(fooNS),
+ fooDonor = st.shard0, fooRecipient = st.shard2, fooDonorColl = fooDonor.getCollection(fooNS),
+ fooRecipientColl = fooRecipient.getCollection(fooNS),
+
+ barDB = "barTest", barNS = barDB + ".foo", barColl = mongos.getCollection(barNS),
+ barDonor = st.shard3, barRecipient = st.shard1, barDonorColl = barDonor.getCollection(barNS),
+ barRecipientColl = barRecipient.getCollection(barNS);
+
+assert.commandWorked(admin.runCommand({enableSharding: fooDB}));
+assert.commandWorked(admin.runCommand({enableSharding: barDB}));
+st.ensurePrimaryShard(fooDB, shards[0]._id);
+st.ensurePrimaryShard(barDB, shards[3]._id);
+
+assert.commandWorked(admin.runCommand({shardCollection: fooNS, key: {a: 1}}));
+assert.commandWorked(admin.runCommand({split: fooNS, middle: {a: 10}}));
+assert.commandWorked(admin.runCommand({shardCollection: barNS, key: {a: 1}}));
+assert.commandWorked(admin.runCommand({split: barNS, middle: {a: 10}}));
+
+fooColl.insert({a: 0});
+assert.eq(null, fooColl.getDB().getLastError());
+fooColl.insert({a: 10});
+assert.eq(null, fooColl.getDB().getLastError());
+assert.eq(0, fooRecipientColl.count());
+assert.eq(2, fooDonorColl.count());
+assert.eq(2, fooColl.count());
+
+barColl.insert({a: 0});
+assert.eq(null, barColl.getDB().getLastError());
+barColl.insert({a: 10});
+assert.eq(null, barColl.getDB().getLastError());
+assert.eq(0, barRecipientColl.count());
+assert.eq(2, barDonorColl.count());
+assert.eq(2, barColl.count());
+
+/**
+ * Perform two migrations:
+ * shard0 (last-stable) -> foo chunk -> shard2 (latest)
+ * shard3 (latest) -> bar chunk -> shard1 (last-stable)
+ */
+
+assert.commandWorked(
+ admin.runCommand({moveChunk: fooNS, find: {a: 10}, to: shards[2]._id, _waitForDelete: true}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: barNS, find: {a: 10}, to: shards[1]._id, _waitForDelete: true}));
+assert.eq(1,
+ fooRecipientColl.count(),
+ "Foo collection migration failed. " +
+ "Last-stable -> latest mongod version migration failure.");
+assert.eq(1,
+ fooDonorColl.count(),
+ "Foo donor lost its document. " +
+ "Last-stable -> latest mongod version migration failure.");
+assert.eq(2,
+ fooColl.count(),
+ "Incorrect number of documents in foo collection. " +
+ "Last-stable -> latest mongod version migration failure.");
+assert.eq(1,
+ barRecipientColl.count(),
+ "Bar collection migration failed. " +
+ "Latest -> last-stable mongod version migration failure.");
+assert.eq(1,
+ barDonorColl.count(),
+ "Bar donor lost its document. " +
+ "Latest -> last-stable mongod version migration failure.");
+assert.eq(2,
+ barColl.count(),
+ "Incorrect number of documents in bar collection. " +
+ "Latest -> last-stable mongod version migration failure.");
+
+st.stop();
})();
diff --git a/jstests/multiVersion/minor_version_tags_new_old_new.js b/jstests/multiVersion/minor_version_tags_new_old_new.js
index 29daf24e8c8..eaae74c8810 100644
--- a/jstests/multiVersion/minor_version_tags_new_old_new.js
+++ b/jstests/multiVersion/minor_version_tags_new_old_new.js
@@ -1,16 +1,16 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/tags.js");
+load("jstests/replsets/libs/tags.js");
- var oldVersion = "last-stable";
- var newVersion = "latest";
- let nodes = [
- {binVersion: newVersion},
- {binVersion: oldVersion},
- {binVersion: newVersion},
- {binVersion: oldVersion},
- {binVersion: newVersion}
- ];
- new TagsTest({nodes: nodes, forceWriteMode: 'commands'}).run();
+var oldVersion = "last-stable";
+var newVersion = "latest";
+let nodes = [
+ {binVersion: newVersion},
+ {binVersion: oldVersion},
+ {binVersion: newVersion},
+ {binVersion: oldVersion},
+ {binVersion: newVersion}
+];
+new TagsTest({nodes: nodes, forceWriteMode: 'commands'}).run();
}());
diff --git a/jstests/multiVersion/minor_version_tags_old_new_old.js b/jstests/multiVersion/minor_version_tags_old_new_old.js
index ffbf838e2d7..22ce71964f9 100644
--- a/jstests/multiVersion/minor_version_tags_old_new_old.js
+++ b/jstests/multiVersion/minor_version_tags_old_new_old.js
@@ -1,16 +1,16 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/tags.js");
+load("jstests/replsets/libs/tags.js");
- var oldVersion = "last-stable";
- var newVersion = "latest";
- let nodes = [
- {binVersion: oldVersion},
- {binVersion: newVersion},
- {binVersion: oldVersion},
- {binVersion: newVersion},
- {binVersion: oldVersion}
- ];
- new TagsTest({nodes: nodes, forceWriteMode: 'commands'}).run();
+var oldVersion = "last-stable";
+var newVersion = "latest";
+let nodes = [
+ {binVersion: oldVersion},
+ {binVersion: newVersion},
+ {binVersion: oldVersion},
+ {binVersion: newVersion},
+ {binVersion: oldVersion}
+];
+new TagsTest({nodes: nodes, forceWriteMode: 'commands'}).run();
}());
diff --git a/jstests/multiVersion/mixed_version_transactions_during_rollback_via_refetch.js b/jstests/multiVersion/mixed_version_transactions_during_rollback_via_refetch.js
index 3b291a6d4c2..db3bcada64f 100644
--- a/jstests/multiVersion/mixed_version_transactions_during_rollback_via_refetch.js
+++ b/jstests/multiVersion/mixed_version_transactions_during_rollback_via_refetch.js
@@ -4,65 +4,68 @@
*/
(function() {
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_test.js");
- jsTest.log("Starting a mixed version replica set.");
+jsTest.log("Starting a mixed version replica set.");
- TestData.replSetFeatureCompatibilityVersion = '4.0';
- const rst = new ReplSetTest({
- nodes: [
- {binVersion: 'latest'},
- {binVersion: 'latest'},
- {binVersion: 'latest', rsConfig: {priority: 0}},
- ],
- useBridge: true,
- nodeOptions: {enableMajorityReadConcern: "false"}
- });
- rst.startSet();
- const config = rst.getReplSetConfig();
- config.settings = {chainingAllowed: false};
- rst.initiate(config);
- // A 4.2 binVersion primary with empty data files will set FCV to 4.2 when elected. This will
- // cause an IncompatibleServerVersion error when connecting with a 4.0 binVersion node.
- // Therefore, we wait until the replica set is initiated with FCV4.0 before switching the
- // binVersion to 4.0.
- rst.restart(1, {binVersion: '4.0'});
+TestData.replSetFeatureCompatibilityVersion = '4.0';
+const rst = new ReplSetTest({
+ nodes: [
+ {binVersion: 'latest'},
+ {binVersion: 'latest'},
+ {binVersion: 'latest', rsConfig: {priority: 0}},
+ ],
+ useBridge: true,
+ nodeOptions: {enableMajorityReadConcern: "false"}
+});
+rst.startSet();
+const config = rst.getReplSetConfig();
+config.settings = {
+ chainingAllowed: false
+};
+rst.initiate(config);
+// A 4.2 binVersion primary with empty data files will set FCV to 4.2 when elected. This will
+// cause an IncompatibleServerVersion error when connecting with a 4.0 binVersion node.
+// Therefore, we wait until the replica set is initiated with FCV4.0 before switching the
+// binVersion to 4.0.
+rst.restart(1, {binVersion: '4.0'});
- const collName = 'mixed_version_transactions_during_rollback_via_refetch';
- rst.getPrimary().getDB('test').getCollection(collName).drop({writeConcern: {w: "majority"}});
- assert.commandWorked(
- rst.getPrimary().getDB('test').createCollection(collName, {writeConcern: {w: "majority"}}));
+const collName = 'mixed_version_transactions_during_rollback_via_refetch';
+rst.getPrimary().getDB('test').getCollection(collName).drop({writeConcern: {w: "majority"}});
+assert.commandWorked(
+ rst.getPrimary().getDB('test').createCollection(collName, {writeConcern: {w: "majority"}}));
- rst.awaitReplication();
- const rollbackTest = new RollbackTest(collName, rst);
+rst.awaitReplication();
+const rollbackTest = new RollbackTest(collName, rst);
- const primary = rollbackTest.getPrimary();
+const primary = rollbackTest.getPrimary();
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase('test');
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase('test');
+const sessionColl = sessionDB.getCollection(collName);
- jsTestLog("Start a transaction and insert a document.");
- session.startTransaction();
- const doc1 = {_id: 1};
- assert.commandWorked(sessionColl.insert(doc1));
- assert.eq(doc1, sessionColl.findOne(doc1));
+jsTestLog("Start a transaction and insert a document.");
+session.startTransaction();
+const doc1 = {
+ _id: 1
+};
+assert.commandWorked(sessionColl.insert(doc1));
+assert.eq(doc1, sessionColl.findOne(doc1));
- // Stop replication from the current primary.
- rollbackTest.transitionToRollbackOperations();
- jsTestLog("Commit the transaction. This transaction is expected to be rolled back.");
- assert.commandWorked(session.commitTransaction_forTesting());
+// Stop replication from the current primary.
+rollbackTest.transitionToRollbackOperations();
+jsTestLog("Commit the transaction. This transaction is expected to be rolled back.");
+assert.commandWorked(session.commitTransaction_forTesting());
- // Step down current primary and elect a node that lacks the transaction oplog entry.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+// Step down current primary and elect a node that lacks the transaction oplog entry.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+rollbackTest.transitionToSteadyStateOperations();
- // Assert that the document has been rolled back.
- assert.eq(null, sessionColl.findOne(doc1));
-
- rollbackTest.stop();
+// Assert that the document has been rolled back.
+assert.eq(null, sessionColl.findOne(doc1));
+rollbackTest.stop();
})(); \ No newline at end of file
diff --git a/jstests/multiVersion/mixed_version_unprepared_transactions.js b/jstests/multiVersion/mixed_version_unprepared_transactions.js
index 9b3d8016c7d..7280d7d36ce 100644
--- a/jstests/multiVersion/mixed_version_unprepared_transactions.js
+++ b/jstests/multiVersion/mixed_version_unprepared_transactions.js
@@ -3,71 +3,76 @@
*/
(function() {
- jsTest.log("Starting a mixed version replica set.");
+jsTest.log("Starting a mixed version replica set.");
- TestData.replSetFeatureCompatibilityVersion = '4.0';
- const rst = new ReplSetTest({
- nodes: [
- {binVersion: 'latest'},
- {binVersion: 'latest'},
- {binVersion: 'latest', rsConfig: {priority: 0, votes: 0}},
- ]
- });
- rst.startSet();
- rst.initiate();
- // A 4.2 binVersion primary with empty data files will set FCV to 4.2 when elected. This will
- // cause an IncompatibleServerVersion error when connecting with a 4.0 binVersion node.
- // Therefore, we wait until the replica set is initiated with FCV4.0 before switching the
- // binVersion to 4.0.
- rst.restart(1, {binVersion: '4.0'});
+TestData.replSetFeatureCompatibilityVersion = '4.0';
+const rst = new ReplSetTest({
+ nodes: [
+ {binVersion: 'latest'},
+ {binVersion: 'latest'},
+ {binVersion: 'latest', rsConfig: {priority: 0, votes: 0}},
+ ]
+});
+rst.startSet();
+rst.initiate();
+// A 4.2 binVersion primary with empty data files will set FCV to 4.2 when elected. This will
+// cause an IncompatibleServerVersion error when connecting with a 4.0 binVersion node.
+// Therefore, we wait until the replica set is initiated with FCV4.0 before switching the
+// binVersion to 4.0.
+rst.restart(1, {binVersion: '4.0'});
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const collName = 'mixed_version_transactions';
- const testColl = testDB.getCollection(collName);
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const collName = 'mixed_version_transactions';
+const testColl = testDB.getCollection(collName);
- testColl.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+testColl.drop({writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase('test');
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase('test');
+const sessionColl = sessionDB.getCollection(collName);
- jsTestLog("Start a transaction and insert a document and then commit.");
- session.startTransaction();
- const doc1 = {_id: 1};
- assert.commandWorked(sessionColl.insert(doc1));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(doc1, sessionColl.findOne(doc1));
+jsTestLog("Start a transaction and insert a document and then commit.");
+session.startTransaction();
+const doc1 = {
+ _id: 1
+};
+assert.commandWorked(sessionColl.insert(doc1));
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.eq(doc1, sessionColl.findOne(doc1));
- jsTestLog("Start a transaction and insert a document and then abort.");
- session.startTransaction();
- const doc2 = {_id: 2};
- assert.commandWorked(sessionColl.insert(doc2));
- assert.commandWorked(session.abortTransaction_forTesting());
- assert.eq(null, sessionColl.findOne(doc2));
+jsTestLog("Start a transaction and insert a document and then abort.");
+session.startTransaction();
+const doc2 = {
+ _id: 2
+};
+assert.commandWorked(sessionColl.insert(doc2));
+assert.commandWorked(session.abortTransaction_forTesting());
+assert.eq(null, sessionColl.findOne(doc2));
- jsTestLog("Have the node on 4.0 binVersion become the new primary.");
- rst.stepUp(rst.nodes[1]);
- const newPrimary = rst.getPrimary();
+jsTestLog("Have the node on 4.0 binVersion become the new primary.");
+rst.stepUp(rst.nodes[1]);
+const newPrimary = rst.getPrimary();
- const newSession = newPrimary.startSession({causalConsistency: false});
- const newSessionDB = newSession.getDatabase('test');
- const newSessionColl = newSessionDB.getCollection(collName);
+const newSession = newPrimary.startSession({causalConsistency: false});
+const newSessionDB = newSession.getDatabase('test');
+const newSessionColl = newSessionDB.getCollection(collName);
- jsTestLog("Start a transaction and insert a document and then commit.");
- newSession.startTransaction();
- assert.commandWorked(newSessionColl.insert(doc2));
- assert.commandWorked(newSession.commitTransaction_forTesting());
- assert.eq(doc2, newSessionColl.findOne(doc2));
+jsTestLog("Start a transaction and insert a document and then commit.");
+newSession.startTransaction();
+assert.commandWorked(newSessionColl.insert(doc2));
+assert.commandWorked(newSession.commitTransaction_forTesting());
+assert.eq(doc2, newSessionColl.findOne(doc2));
- jsTestLog("Start a transaction and insert a document and then abort.");
- newSession.startTransaction();
- const doc3 = {_id: 3};
- assert.commandWorked(newSessionColl.insert(doc3));
- assert.commandWorked(newSession.abortTransaction_forTesting());
- assert.eq(null, newSessionColl.findOne(doc3));
-
- rst.stopSet();
+jsTestLog("Start a transaction and insert a document and then abort.");
+newSession.startTransaction();
+const doc3 = {
+ _id: 3
+};
+assert.commandWorked(newSessionColl.insert(doc3));
+assert.commandWorked(newSession.abortTransaction_forTesting());
+assert.eq(null, newSessionColl.findOne(doc3));
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js b/jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js
index 9bd702323f4..ee9b5e65032 100644
--- a/jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js
+++ b/jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js
@@ -15,51 +15,50 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- /* Start a ShardingTest with a 'last-stable' mongos so that a 'last-stable'
- * shard can be added. (A 'last-stable' shard cannot be added from a
- * current mongos because the wire protocol must be presumed different.)
- */
- var st = new ShardingTest({
- shards: 1,
- other: {
- mongosOptions: {binVersion: 'last-stable'},
- shardOptions: {binVersion: 'last-stable'},
- shardAsReplicaSet: false
- }
- });
-
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(st.s.adminCommand({shardCollection: 'test.foo', key: {x: 1}}));
+/* Start a ShardingTest with a 'last-stable' mongos so that a 'last-stable'
+ * shard can be added. (A 'last-stable' shard cannot be added from a
+ * current mongos because the wire protocol must be presumed different.)
+ */
+var st = new ShardingTest({
+ shards: 1,
+ other: {
+ mongosOptions: {binVersion: 'last-stable'},
+ shardOptions: {binVersion: 'last-stable'},
+ shardAsReplicaSet: false
+ }
+});
- // Start a current-version mongos.
- var newMongos = MongoRunner.runMongos({configdb: st._configDB});
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.foo', key: {x: 1}}));
- // Write commands report failure by returning writeError:
+// Start a current-version mongos.
+var newMongos = MongoRunner.runMongos({configdb: st._configDB});
- assert.writeErrorWithCode(newMongos.getDB('test').foo.insert({x: 1}),
- ErrorCodes.IncompatibleServerVersion);
+// Write commands report failure by returning writeError:
- assert.writeErrorWithCode(newMongos.getDB('test').foo.update({x: 1}, {x: 1, y: 2}),
- ErrorCodes.IncompatibleServerVersion);
+assert.writeErrorWithCode(newMongos.getDB('test').foo.insert({x: 1}),
+ ErrorCodes.IncompatibleServerVersion);
- assert.writeErrorWithCode(newMongos.getDB('test').foo.remove({x: 1}),
- ErrorCodes.IncompatibleServerVersion);
+assert.writeErrorWithCode(newMongos.getDB('test').foo.update({x: 1}, {x: 1, y: 2}),
+ ErrorCodes.IncompatibleServerVersion);
- // Query commands, on failure, throw instead:
+assert.writeErrorWithCode(newMongos.getDB('test').foo.remove({x: 1}),
+ ErrorCodes.IncompatibleServerVersion);
- let res;
- res = newMongos.getDB('test').runCommand({find: 'foo'});
- assert.eq(res.code, ErrorCodes.IncompatibleServerVersion);
+// Query commands, on failure, throw instead:
- res = newMongos.getDB('test').runCommand({find: 'foo', filter: {x: 1}});
- assert.eq(res.code, ErrorCodes.IncompatibleServerVersion);
+let res;
+res = newMongos.getDB('test').runCommand({find: 'foo'});
+assert.eq(res.code, ErrorCodes.IncompatibleServerVersion);
- res = newMongos.getDB('test').runCommand({aggregate: 'foo', pipeline: [], cursor: {}});
- assert.eq(res.code, ErrorCodes.IncompatibleServerVersion);
+res = newMongos.getDB('test').runCommand({find: 'foo', filter: {x: 1}});
+assert.eq(res.code, ErrorCodes.IncompatibleServerVersion);
- MongoRunner.stopMongos(newMongos);
- st.stop();
+res = newMongos.getDB('test').runCommand({aggregate: 'foo', pipeline: [], cursor: {}});
+assert.eq(res.code, ErrorCodes.IncompatibleServerVersion);
+MongoRunner.stopMongos(newMongos);
+st.stop();
})();
diff --git a/jstests/multiVersion/now_variable_fcv.js b/jstests/multiVersion/now_variable_fcv.js
index b766809056e..a8a284f8f04 100644
--- a/jstests/multiVersion/now_variable_fcv.js
+++ b/jstests/multiVersion/now_variable_fcv.js
@@ -1,41 +1,39 @@
/**
*/
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod({binVersion: "latest"});
- const db = conn.getDB(jsTest.name());
+const conn = MongoRunner.runMongod({binVersion: "latest"});
+const db = conn.getDB(jsTest.name());
- const coll = db[jsTest.name()];
- const view42 = "viewWithNow42";
- coll.drop();
- assert.commandWorkedOrFailedWithCode(db.runCommand({drop: view42}),
- ErrorCodes.NamespaceNotFound);
+const coll = db[jsTest.name()];
+const view42 = "viewWithNow42";
+coll.drop();
+assert.commandWorkedOrFailedWithCode(db.runCommand({drop: view42}), ErrorCodes.NamespaceNotFound);
- // Just insert a single document so we have something to work with.
- assert.writeOK(coll.insert({a: 1}));
+// Just insert a single document so we have something to work with.
+assert.writeOK(coll.insert({a: 1}));
- assert.commandWorked(
- db.createView(view42, coll.getName(), [{$addFields: {timeField: "$$NOW"}}]),
- 'Expected a view with $$NOW to succeed');
+assert.commandWorked(db.createView(view42, coll.getName(), [{$addFields: {timeField: "$$NOW"}}]),
+ 'Expected a view with $$NOW to succeed');
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
- // It should not be possble to create a view with $$NOW in the 4.0 mode.
- assert.commandFailedWithCode(
- db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$NOW"}}]),
- ErrorCodes.QueryFeatureNotAllowed,
- 'Expected a view with $$NOW to fail');
+// It should not be possble to create a view with $$NOW in the 4.0 mode.
+assert.commandFailedWithCode(
+ db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$NOW"}}]),
+ ErrorCodes.QueryFeatureNotAllowed,
+ 'Expected a view with $$NOW to fail');
- // It should not be possble to create a view with $$CLUSTER_TIME in the 4.0 mode.
- assert.commandFailedWithCode(
- db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$CLUSTER_TIME"}}]),
- ErrorCodes.QueryFeatureNotAllowed,
- 'Expected a view with $$CLUSTER_TIME to fail');
+// It should not be possble to create a view with $$CLUSTER_TIME in the 4.0 mode.
+assert.commandFailedWithCode(
+ db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$CLUSTER_TIME"}}]),
+ ErrorCodes.QueryFeatureNotAllowed,
+ 'Expected a view with $$CLUSTER_TIME to fail');
- // But querying the existing views continue to work.
- assert.commandWorked(db.runCommand({aggregate: view42, pipeline: [], cursor: {}}),
- 'Expected an aggregate with view to work');
+// But querying the existing views continue to work.
+assert.commandWorked(db.runCommand({aggregate: view42, pipeline: [], cursor: {}}),
+ 'Expected an aggregate with view to work');
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/multiVersion/remove_feature_compatibility_version.js b/jstests/multiVersion/remove_feature_compatibility_version.js
index 7c73d2847a1..b4c67b77a0f 100644
--- a/jstests/multiVersion/remove_feature_compatibility_version.js
+++ b/jstests/multiVersion/remove_feature_compatibility_version.js
@@ -3,17 +3,16 @@
* or removing the FCV document should not be allowed.
*/
(function() {
- 'use strict';
+'use strict';
- let standalone = MongoRunner.runMongod();
- assert.neq(null, standalone, 'mongod was unable to start up');
- let adminDB = standalone.getDB('admin');
+let standalone = MongoRunner.runMongod();
+assert.neq(null, standalone, 'mongod was unable to start up');
+let adminDB = standalone.getDB('admin');
- // Renaming the collection or deleting the document should fail.
- assert.commandFailedWithCode(
- adminDB.runCommand(
- {renameCollection: 'admin.system.version', to: 'admin.dummy.collection'}),
- ErrorCodes.IllegalOperation);
- assert.writeErrorWithCode(adminDB.system.version.remove({}), 40670);
- MongoRunner.stopMongod(standalone);
+// Renaming the collection or deleting the document should fail.
+assert.commandFailedWithCode(
+ adminDB.runCommand({renameCollection: 'admin.system.version', to: 'admin.dummy.collection'}),
+ ErrorCodes.IllegalOperation);
+assert.writeErrorWithCode(adminDB.system.version.remove({}), 40670);
+MongoRunner.stopMongod(standalone);
})();
diff --git a/jstests/multiVersion/shard_collection_between_mixed_version_mongods.js b/jstests/multiVersion/shard_collection_between_mixed_version_mongods.js
index 9cae2839266..c8a6ab82351 100644
--- a/jstests/multiVersion/shard_collection_between_mixed_version_mongods.js
+++ b/jstests/multiVersion/shard_collection_between_mixed_version_mongods.js
@@ -6,57 +6,57 @@
load("./jstests/multiVersion/libs/verify_versions.js");
(function() {
- "use strict";
-
- var options = {
- shards: [{binVersion: "latest"}, {binVersion: "4.0.1"}, {binVersion: "4.0.1"}],
- mongos: 1,
- other: {
- mongosOptions: {binVersion: "latest"},
- configOptions: {binVersion: "latest"},
- shardAsReplicaSet: true
- }
- };
-
- var st = new ShardingTest(options);
- assert.binVersion(st.shard0, "latest");
- assert.binVersion(st.shard1, "4.0.1");
- assert.binVersion(st.shard2, "4.0.1");
- assert.binVersion(st.s0, "latest");
-
- var mongos = st.s0;
- var admin = mongos.getDB('admin');
-
- const kDBOnShardWithLatestBinary = "DBWithPrimaryOnLatestBinary";
- const kNSOnLatestShard = kDBOnShardWithLatestBinary + ".Coll";
- const kDBOnShardWithOldBinary = "DBWithPrimaryOnOldBinary";
- const kNSOnOldShard = kDBOnShardWithOldBinary + ".Coll";
-
- assert.commandWorked(admin.runCommand({enableSharding: kDBOnShardWithLatestBinary}));
- assert.commandWorked(admin.runCommand({enableSharding: kDBOnShardWithOldBinary}));
- st.ensurePrimaryShard(kDBOnShardWithLatestBinary, st.shard0.shardName);
- st.ensurePrimaryShard(kDBOnShardWithOldBinary, st.shard1.shardName);
-
- // Test that shardCollection succeeds when both the config server and primary shard are
- // running with latest binVersion, but other shards are running with 4.0.1 which does not
- // have the new shardCollection protocol.
- assert.commandWorked(admin.runCommand({shardCollection: kNSOnLatestShard, key: {a: 1}}));
-
- // Test that shardCollection succeeds when the config server is running with the latest
- // binVersion, but the primary is running with 4.0.1.
- assert.commandWorked(admin.runCommand({shardCollection: kNSOnOldShard, key: {a: 1}}));
-
- mongos.getDB(kDBOnShardWithLatestBinary).Coll.drop();
- mongos.getDB(kDBOnShardWithOldBinary).Coll.drop();
-
- // Test that shardCollection with a hashed shard key succeeds when both the config server and
- // primary shard are running with latest binVersion, but other shards are running with 4.0.1
- // which does not have the new shardCollection protocol.
- assert.commandWorked(admin.runCommand({shardCollection: kNSOnLatestShard, key: {a: "hashed"}}));
-
- // Test that shardCollection with a hashed shard key succeeds when the config server is running
- // with the latest binVersion, but the primary is running with 4.0.1.
- assert.commandWorked(admin.runCommand({shardCollection: kNSOnOldShard, key: {a: "hashed"}}));
-
- st.stop();
+"use strict";
+
+var options = {
+ shards: [{binVersion: "latest"}, {binVersion: "4.0.1"}, {binVersion: "4.0.1"}],
+ mongos: 1,
+ other: {
+ mongosOptions: {binVersion: "latest"},
+ configOptions: {binVersion: "latest"},
+ shardAsReplicaSet: true
+ }
+};
+
+var st = new ShardingTest(options);
+assert.binVersion(st.shard0, "latest");
+assert.binVersion(st.shard1, "4.0.1");
+assert.binVersion(st.shard2, "4.0.1");
+assert.binVersion(st.s0, "latest");
+
+var mongos = st.s0;
+var admin = mongos.getDB('admin');
+
+const kDBOnShardWithLatestBinary = "DBWithPrimaryOnLatestBinary";
+const kNSOnLatestShard = kDBOnShardWithLatestBinary + ".Coll";
+const kDBOnShardWithOldBinary = "DBWithPrimaryOnOldBinary";
+const kNSOnOldShard = kDBOnShardWithOldBinary + ".Coll";
+
+assert.commandWorked(admin.runCommand({enableSharding: kDBOnShardWithLatestBinary}));
+assert.commandWorked(admin.runCommand({enableSharding: kDBOnShardWithOldBinary}));
+st.ensurePrimaryShard(kDBOnShardWithLatestBinary, st.shard0.shardName);
+st.ensurePrimaryShard(kDBOnShardWithOldBinary, st.shard1.shardName);
+
+// Test that shardCollection succeeds when both the config server and primary shard are
+// running with latest binVersion, but other shards are running with 4.0.1 which does not
+// have the new shardCollection protocol.
+assert.commandWorked(admin.runCommand({shardCollection: kNSOnLatestShard, key: {a: 1}}));
+
+// Test that shardCollection succeeds when the config server is running with the latest
+// binVersion, but the primary is running with 4.0.1.
+assert.commandWorked(admin.runCommand({shardCollection: kNSOnOldShard, key: {a: 1}}));
+
+mongos.getDB(kDBOnShardWithLatestBinary).Coll.drop();
+mongos.getDB(kDBOnShardWithOldBinary).Coll.drop();
+
+// Test that shardCollection with a hashed shard key succeeds when both the config server and
+// primary shard are running with latest binVersion, but other shards are running with 4.0.1
+// which does not have the new shardCollection protocol.
+assert.commandWorked(admin.runCommand({shardCollection: kNSOnLatestShard, key: {a: "hashed"}}));
+
+// Test that shardCollection with a hashed shard key succeeds when the config server is running
+// with the latest binVersion, but the primary is running with 4.0.1.
+assert.commandWorked(admin.runCommand({shardCollection: kNSOnOldShard, key: {a: "hashed"}}));
+
+st.stop();
})();
diff --git a/jstests/multiVersion/sharded_txn_downgrade_cluster.js b/jstests/multiVersion/sharded_txn_downgrade_cluster.js
index 394cb89903f..892cfb3ded4 100644
--- a/jstests/multiVersion/sharded_txn_downgrade_cluster.js
+++ b/jstests/multiVersion/sharded_txn_downgrade_cluster.js
@@ -10,87 +10,87 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/multiVersion/libs/multi_rs.js");
- load("jstests/multiVersion/libs/multi_cluster.js");
- load("jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/multiVersion/libs/multi_rs.js");
+load("jstests/multiVersion/libs/multi_cluster.js");
+load("jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js");
- const dbName = "test";
- const collName = "sharded_txn_downgrade_cluster";
+const dbName = "test";
+const collName = "sharded_txn_downgrade_cluster";
- // Start a cluster with two shards at the latest version.
- const st = setUpTwoShardClusterWithBinVersion(dbName, collName, "latest");
+// Start a cluster with two shards at the latest version.
+const st = setUpTwoShardClusterWithBinVersion(dbName, collName, "latest");
- const txnIds = {
- commit: {lsid: {id: UUID()}, txnNumber: 0},
- commitMulti: {lsid: {id: UUID()}, txnNumber: 0},
- write: {lsid: {id: UUID()}, txnNumber: 0},
- };
+const txnIds = {
+ commit: {lsid: {id: UUID()}, txnNumber: 0},
+ commitMulti: {lsid: {id: UUID()}, txnNumber: 0},
+ write: {lsid: {id: UUID()}, txnNumber: 0},
+};
- let testDB = st.s.getDB(dbName);
+let testDB = st.s.getDB(dbName);
- // Retryable writes and transactions with and without prepare should work.
- assert.commandWorked(runTxn(testDB, collName, txnIds.commit, {multiShard: false}));
- assert.commandWorked(runTxn(testDB, collName, txnIds.commitMulti, {multiShard: true}));
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
+// Retryable writes and transactions with and without prepare should work.
+assert.commandWorked(runTxn(testDB, collName, txnIds.commit, {multiShard: false}));
+assert.commandWorked(runTxn(testDB, collName, txnIds.commitMulti, {multiShard: true}));
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
- // commitTransaction for both transactions and the retryable write should be retryable.
- assert.commandWorked(retryCommit(testDB, txnIds.commit));
- assert.commandWorked(retryCommit(testDB, txnIds.commitMulti));
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
+// commitTransaction for both transactions and the retryable write should be retryable.
+assert.commandWorked(retryCommit(testDB, txnIds.commit));
+assert.commandWorked(retryCommit(testDB, txnIds.commitMulti));
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
- // Downgrade featureCompatibilityVersion.
- assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
+// Downgrade featureCompatibilityVersion.
+assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
- // Only the retryable write can be retried. Can't retry the multi shard transaction because it
- // uses coordinateCommit, which is not allowed in FCV 4.0.
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
- assert.commandFailedWithCode(retryCommit(testDB, txnIds.commit), ErrorCodes.NoSuchTransaction);
- assert.commandFailedWithCode(retryCommit(testDB, txnIds.commitMulti),
- ErrorCodes.CommandNotSupported);
+// Only the retryable write can be retried. Can't retry the multi shard transaction because it
+// uses coordinateCommit, which is not allowed in FCV 4.0.
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
+assert.commandFailedWithCode(retryCommit(testDB, txnIds.commit), ErrorCodes.NoSuchTransaction);
+assert.commandFailedWithCode(retryCommit(testDB, txnIds.commitMulti),
+ ErrorCodes.CommandNotSupported);
- downgradeUniqueIndexesScript(st.s.getDB("test"));
+downgradeUniqueIndexesScript(st.s.getDB("test"));
- // Downgrade the mongos servers first.
- jsTestLog("Downgrading mongos servers.");
- st.upgradeCluster("last-stable",
- {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
+// Downgrade the mongos servers first.
+jsTestLog("Downgrading mongos servers.");
+st.upgradeCluster("last-stable",
+ {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
- // Then downgrade the shard servers.
- jsTestLog("Downgrading shard servers.");
- st.upgradeCluster("last-stable",
- {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
+// Then downgrade the shard servers.
+jsTestLog("Downgrading shard servers.");
+st.upgradeCluster("last-stable",
+ {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
- // Then downgrade the config servers.
- jsTestLog("Downgrading config servers.");
- st.upgradeCluster("last-stable",
- {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
- checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
+// Then downgrade the config servers.
+jsTestLog("Downgrading config servers.");
+st.upgradeCluster("last-stable",
+ {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
- testDB = st.s.getDB(dbName);
+testDB = st.s.getDB(dbName);
- // Can still retry the retryable write.
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
+// Can still retry the retryable write.
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
- // The txnIds used for the earlier commits should be re-usable because their history was
- // removed.
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commit);
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.commit);
+// The txnIds used for the earlier commits should be re-usable because their history was
+// removed.
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commit);
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.commit);
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commitMulti);
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.commitMulti);
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commitMulti);
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.commitMulti);
- // Can perform a new operation on each session.
- Object.keys(txnIds).forEach((txnIdKey) => {
- txnIds[txnIdKey].txnNumber += 1;
- });
+// Can perform a new operation on each session.
+Object.keys(txnIds).forEach((txnIdKey) => {
+ txnIds[txnIdKey].txnNumber += 1;
+});
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commit);
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commitMulti);
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commit);
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commitMulti);
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
- st.stop();
+st.stop();
})();
diff --git a/jstests/multiVersion/sharded_txn_upgrade_cluster.js b/jstests/multiVersion/sharded_txn_upgrade_cluster.js
index 04c3bddfde7..6b89679aaab 100644
--- a/jstests/multiVersion/sharded_txn_upgrade_cluster.js
+++ b/jstests/multiVersion/sharded_txn_upgrade_cluster.js
@@ -10,73 +10,72 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/multiVersion/libs/multi_rs.js");
- load("jstests/multiVersion/libs/multi_cluster.js");
- load("jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/multiVersion/libs/multi_rs.js");
+load("jstests/multiVersion/libs/multi_cluster.js");
+load("jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js");
- const dbName = "test";
- const collName = "sharded_txn_upgrade_cluster";
+const dbName = "test";
+const collName = "sharded_txn_upgrade_cluster";
- // Start a cluster with two shards at the last stable version.
- const st = setUpTwoShardClusterWithBinVersion(dbName, collName, "last-stable");
+// Start a cluster with two shards at the last stable version.
+const st = setUpTwoShardClusterWithBinVersion(dbName, collName, "last-stable");
- const txnIds = {
- commit: {lsid: {id: UUID()}, txnNumber: 0},
- commitMulti: {lsid: {id: UUID()}, txnNumber: 0},
- write: {lsid: {id: UUID()}, txnNumber: 0},
- };
+const txnIds = {
+ commit: {lsid: {id: UUID()}, txnNumber: 0},
+ commitMulti: {lsid: {id: UUID()}, txnNumber: 0},
+ write: {lsid: {id: UUID()}, txnNumber: 0},
+};
- let testDB = st.s.getDB(dbName);
+let testDB = st.s.getDB(dbName);
- // Only retryable writes work and they are retryable.
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
+// Only retryable writes work and they are retryable.
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
- // Upgrade the config servers.
- jsTestLog("Upgrading config servers.");
- st.upgradeCluster("latest", {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
+// Upgrade the config servers.
+jsTestLog("Upgrading config servers.");
+st.upgradeCluster("latest", {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
- // Then upgrade the shard servers.
- jsTestLog("Upgrading shard servers.");
- st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
+// Then upgrade the shard servers.
+jsTestLog("Upgrading shard servers.");
+st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
- // Then upgrade mongos servers.
- jsTestLog("Upgrading mongos servers.");
- st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
- checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
+// Then upgrade mongos servers.
+jsTestLog("Upgrading mongos servers.");
+st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
- testDB = st.s.getDB(dbName);
+testDB = st.s.getDB(dbName);
- // Can still retry the retryable write.
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
+// Can still retry the retryable write.
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
- // Transactions that don't use prepare are allowed in FCV 4.0 with a 4.2 binary mongos.
- assert.commandWorked(runTxn(testDB, collName, txnIds.commit, {multiShard: false}));
+// Transactions that don't use prepare are allowed in FCV 4.0 with a 4.2 binary mongos.
+assert.commandWorked(runTxn(testDB, collName, txnIds.commit, {multiShard: false}));
- // Multi shard transactions will fail because coordinateCommit is not allowed in FCV 4.0.
- assert.commandFailedWithCode(runTxn(testDB, collName, txnIds.commitMulti, {multiShard: true}),
- ErrorCodes.CommandNotSupported);
+// Multi shard transactions will fail because coordinateCommit is not allowed in FCV 4.0.
+assert.commandFailedWithCode(runTxn(testDB, collName, txnIds.commitMulti, {multiShard: true}),
+ ErrorCodes.CommandNotSupported);
- // Upgrade the cluster's feature compatibility version to the latest.
- assert.commandWorked(
- st.s.getDB("admin").runCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(st.configRS.getPrimary().getDB("admin"), latestFCV);
+// Upgrade the cluster's feature compatibility version to the latest.
+assert.commandWorked(st.s.getDB("admin").runCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(st.configRS.getPrimary().getDB("admin"), latestFCV);
- // Can still retry the retryable write and the committed transaction.
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
- assert.commandWorked(retryCommit(testDB, txnIds.commit));
+// Can still retry the retryable write and the committed transaction.
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
+assert.commandWorked(retryCommit(testDB, txnIds.commit));
- // Can perform a new operation on each session.
- Object.keys(txnIds).forEach((txnIdKey) => {
- txnIds[txnIdKey].txnNumber += 1;
- });
+// Can perform a new operation on each session.
+Object.keys(txnIds).forEach((txnIdKey) => {
+ txnIds[txnIdKey].txnNumber += 1;
+});
- assert.commandWorked(runTxn(testDB, collName, txnIds.commit, {multiShard: false}));
- assert.commandWorked(runTxn(testDB, collName, txnIds.commitMulti, {multiShard: true}));
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
+assert.commandWorked(runTxn(testDB, collName, txnIds.commit, {multiShard: false}));
+assert.commandWorked(runTxn(testDB, collName, txnIds.commitMulti, {multiShard: true}));
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
- st.stop();
+st.stop();
})();
diff --git a/jstests/multiVersion/skip_level_upgrade.js b/jstests/multiVersion/skip_level_upgrade.js
index 61c4904b0c7..6f268be451a 100644
--- a/jstests/multiVersion/skip_level_upgrade.js
+++ b/jstests/multiVersion/skip_level_upgrade.js
@@ -14,79 +14,78 @@
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/libs/get_index_helpers.js');
+load('jstests/libs/get_index_helpers.js');
- const dbpath = MongoRunner.dataPath + 'skip_level_upgrade';
- resetDbpath(dbpath);
+const dbpath = MongoRunner.dataPath + 'skip_level_upgrade';
+resetDbpath(dbpath);
- // We set noCleanData to true in order to preserve the data files within an iteration.
- const defaultOptions = {
- dbpath: dbpath,
- noCleanData: true,
- };
+// We set noCleanData to true in order to preserve the data files within an iteration.
+const defaultOptions = {
+ dbpath: dbpath,
+ noCleanData: true,
+};
- // This lists all binary versions older than the last-stable version.
- // TODO SERVER-26792: In the future, we should have a common place from which both the
- // multiversion setup procedure and this test get information about supported major releases.
- const versions = [
- {binVersion: '3.2', testCollection: 'three_two'},
- {binVersion: '3.4', testCollection: 'three_four'},
- {binVersion: '3.6', testCollection: 'three_six'}
- ];
+// This lists all binary versions older than the last-stable version.
+// TODO SERVER-26792: In the future, we should have a common place from which both the
+// multiversion setup procedure and this test get information about supported major releases.
+const versions = [
+ {binVersion: '3.2', testCollection: 'three_two'},
+ {binVersion: '3.4', testCollection: 'three_four'},
+ {binVersion: '3.6', testCollection: 'three_six'}
+];
- // Iterate through versions specified in the versions list, and follow the steps outlined at
- // the top of this test file.
- for (let i = 0; i < versions.length; i++) {
- let version = versions[i];
- let mongodOptions = Object.extend({binVersion: version.binVersion}, defaultOptions);
+// Iterate through versions specified in the versions list, and follow the steps outlined at
+// the top of this test file.
+for (let i = 0; i < versions.length; i++) {
+ let version = versions[i];
+ let mongodOptions = Object.extend({binVersion: version.binVersion}, defaultOptions);
- // Start up an old binary version mongod.
- let conn = MongoRunner.runMongod(mongodOptions);
- let port = conn.port;
+ // Start up an old binary version mongod.
+ let conn = MongoRunner.runMongod(mongodOptions);
+ let port = conn.port;
- assert.neq(null,
- conn,
- 'mongod was unable able to start with version ' + tojson(version.binVersion));
+ assert.neq(
+ null, conn, 'mongod was unable able to start with version ' + tojson(version.binVersion));
- // Set up a collection on an old binary version node with one document and an index, and
- // then shut it down.
- let testDB = conn.getDB('test');
- assert.commandWorked(testDB.createCollection(version.testCollection));
- assert.writeOK(testDB[version.testCollection].insert({a: 1}));
- assert.commandWorked(testDB[version.testCollection].createIndex({a: 1}));
- MongoRunner.stopMongod(conn);
+ // Set up a collection on an old binary version node with one document and an index, and
+ // then shut it down.
+ let testDB = conn.getDB('test');
+ assert.commandWorked(testDB.createCollection(version.testCollection));
+ assert.writeOK(testDB[version.testCollection].insert({a: 1}));
+ assert.commandWorked(testDB[version.testCollection].createIndex({a: 1}));
+ MongoRunner.stopMongod(conn);
- // Restart the mongod with the latest binary version on the old version's data files.
- // Should fail due to being a skip level upgrade.
- mongodOptions = Object.extend({binVersion: 'latest'}, defaultOptions);
- conn = MongoRunner.runMongod(mongodOptions);
- assert.eq(null, conn);
+ // Restart the mongod with the latest binary version on the old version's data files.
+ // Should fail due to being a skip level upgrade.
+ mongodOptions = Object.extend({binVersion: 'latest'}, defaultOptions);
+ conn = MongoRunner.runMongod(mongodOptions);
+ assert.eq(null, conn);
- // Restart the mongod with the latest version with --repair. Should fail due to being a
- // skip level upgrade.
- let returnCode = runMongoProgram("mongod", "--port", port, "--repair", "--dbpath", dbpath);
- assert.neq(returnCode, 0, "expected mongod --repair to fail with a skip level upgrade");
+ // Restart the mongod with the latest version with --repair. Should fail due to being a
+ // skip level upgrade.
+ let returnCode = runMongoProgram("mongod", "--port", port, "--repair", "--dbpath", dbpath);
+ assert.neq(returnCode, 0, "expected mongod --repair to fail with a skip level upgrade");
- // Restart the mongod in the originally specified version. Should succeed.
- mongodOptions = Object.extend({binVersion: version.binVersion}, defaultOptions);
- conn = MongoRunner.runMongod(mongodOptions);
+ // Restart the mongod in the originally specified version. Should succeed.
+ mongodOptions = Object.extend({binVersion: version.binVersion}, defaultOptions);
+ conn = MongoRunner.runMongod(mongodOptions);
- // Verify that the data and indices from previous iterations are still accessible.
- testDB = conn.getDB('test');
- assert.eq(1,
- testDB[version.testCollection].count(),
- `data from ${version.testCollection} should be available; options: ` +
- tojson(mongodOptions));
- assert.neq(
- null,
- GetIndexHelpers.findByKeyPattern(testDB[version.testCollection].getIndexes(), {a: 1}),
- `index from ${version.testCollection} should be available; options: ` +
- tojson(mongodOptions));
+ // Verify that the data and indices from previous iterations are still accessible.
+ testDB = conn.getDB('test');
+ assert.eq(1,
+ testDB[version.testCollection].count(),
+ `data from ${version.testCollection} should be available; options: ` +
+ tojson(mongodOptions));
+ assert.neq(
+ null,
+ GetIndexHelpers.findByKeyPattern(testDB[version.testCollection].getIndexes(), {a: 1}),
+ `index from ${version.testCollection} should be available; options: ` +
+ tojson(mongodOptions));
- MongoRunner.stopMongod(conn);
+ MongoRunner.stopMongod(conn);
- resetDbpath(dbpath);
- }
+ resetDbpath(dbpath);
+}
})();
diff --git a/jstests/multiVersion/text_index_limits.js b/jstests/multiVersion/text_index_limits.js
index 5860d5b0726..bc52678ed9c 100644
--- a/jstests/multiVersion/text_index_limits.js
+++ b/jstests/multiVersion/text_index_limits.js
@@ -1,53 +1,53 @@
// TODO SERVER-36440: Remove this test
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/feature_compatibility_version.js");
- // Start the node with FCV 4.0
- let conn = MongoRunner.runMongod({binVersion: "latest"});
- assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
- var db = conn.getDB('test');
- var t = db.text_index_limits;
- t.drop();
+// Start the node with FCV 4.0
+let conn = MongoRunner.runMongod({binVersion: "latest"});
+assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+var db = conn.getDB('test');
+var t = db.text_index_limits;
+t.drop();
- assert.commandWorked(t.createIndex({comments: "text"}));
+assert.commandWorked(t.createIndex({comments: "text"}));
- // 1. Test number of unique terms exceeds 400,000
- let commentsWithALotOfUniqueWords = "";
- // 26^4 = 456,976 > 400,000
- for (let ch1 = 97; ch1 < 123; ch1++) {
- for (let ch2 = 97; ch2 < 123; ch2++) {
- for (let ch3 = 97; ch3 < 123; ch3++) {
- for (let ch4 = 97; ch4 < 123; ch4++) {
- let word = String.fromCharCode(ch1, ch2, ch3, ch4);
- commentsWithALotOfUniqueWords += word + " ";
- }
+// 1. Test number of unique terms exceeds 400,000
+let commentsWithALotOfUniqueWords = "";
+// 26^4 = 456,976 > 400,000
+for (let ch1 = 97; ch1 < 123; ch1++) {
+ for (let ch2 = 97; ch2 < 123; ch2++) {
+ for (let ch3 = 97; ch3 < 123; ch3++) {
+ for (let ch4 = 97; ch4 < 123; ch4++) {
+ let word = String.fromCharCode(ch1, ch2, ch3, ch4);
+ commentsWithALotOfUniqueWords += word + " ";
}
}
}
- assert.commandFailedWithCode(
- db.runCommand(
- {insert: t.getName(), documents: [{_id: 1, comments: commentsWithALotOfUniqueWords}]}),
- 16732);
+}
+assert.commandFailedWithCode(
+ db.runCommand(
+ {insert: t.getName(), documents: [{_id: 1, comments: commentsWithALotOfUniqueWords}]}),
+ 16732);
- // 2. Test total size of index keys for unique terms exceeds 4MB
+// 2. Test total size of index keys for unique terms exceeds 4MB
- // 26^3 = 17576 < 400,000
- let prefix = "a".repeat(400);
- let commentsWithWordsOfLargeSize = "";
- for (let ch1 = 97; ch1 < 123; ch1++) {
- for (let ch2 = 97; ch2 < 123; ch2++) {
- for (let ch3 = 97; ch3 < 123; ch3++) {
- let word = String.fromCharCode(ch1, ch2, ch3);
- commentsWithWordsOfLargeSize += prefix + word + " ";
- }
+// 26^3 = 17576 < 400,000
+let prefix = "a".repeat(400);
+let commentsWithWordsOfLargeSize = "";
+for (let ch1 = 97; ch1 < 123; ch1++) {
+ for (let ch2 = 97; ch2 < 123; ch2++) {
+ for (let ch3 = 97; ch3 < 123; ch3++) {
+ let word = String.fromCharCode(ch1, ch2, ch3);
+ commentsWithWordsOfLargeSize += prefix + word + " ";
}
}
- assert.commandFailedWithCode(
- db.runCommand(
- {insert: t.getName(), documents: [{_id: 2, comments: commentsWithWordsOfLargeSize}]}),
- 16733);
+}
+assert.commandFailedWithCode(
+ db.runCommand(
+ {insert: t.getName(), documents: [{_id: 2, comments: commentsWithWordsOfLargeSize}]}),
+ 16733);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/multiVersion/unique_index_empty_collmod.js b/jstests/multiVersion/unique_index_empty_collmod.js
index 732c6e1ef63..cd303aefde8 100644
--- a/jstests/multiVersion/unique_index_empty_collmod.js
+++ b/jstests/multiVersion/unique_index_empty_collmod.js
@@ -4,40 +4,40 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- 'use strict';
+'use strict';
- const newIndexFormatVersion = 12;
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]});
- const nodes = rst.startSet();
- rst.initiate();
+const newIndexFormatVersion = 12;
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]});
+const nodes = rst.startSet();
+rst.initiate();
- let dbName = 'test';
- let collName = 't';
- const primary = rst.getPrimary();
- const primaryDb = primary.getDB(dbName);
- const secondary = rst.getSecondary();
- const coll = primaryDb.getCollection(collName);
+let dbName = 'test';
+let collName = 't';
+const primary = rst.getPrimary();
+const primaryDb = primary.getDB(dbName);
+const secondary = rst.getSecondary();
+const coll = primaryDb.getCollection(collName);
- assert.commandWorked(coll.createIndex({a: 1}, {unique: true}));
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.commandWorked(primaryDb.adminCommand({setFeatureCompatibilityVersion: '4.0'}));
- assert.commandWorked(primaryDb.runCommand({collMod: coll.getName()}));
+assert.commandWorked(coll.createIndex({a: 1}, {unique: true}));
+assert.writeOK(coll.insert({_id: 0, a: 1}));
+assert.commandWorked(primaryDb.adminCommand({setFeatureCompatibilityVersion: '4.0'}));
+assert.commandWorked(primaryDb.runCommand({collMod: coll.getName()}));
- // Wait for replication of the index creation.
- rst.awaitReplication();
- const secondaryDb = secondary.getDB(dbName);
- const coll_secondary = secondaryDb.getCollection(collName);
- const index = coll_secondary.getIndexes();
- assert.eq(index[1].unique, true, "Expected a unique index: " + tojson(index[1]));
- // Validate that the unique index is not updated on the secondary after an empty collMod
- // command.
- const indexFormatVersion = coll_secondary.aggregate({$collStats: {storageStats: {}}})
- .next()
- .storageStats.indexDetails[index[1].name]
- .metadata.formatVersion;
- assert.eq(indexFormatVersion,
- newIndexFormatVersion,
- "Expected index format version 12 for the unique index: " + tojson(index[1]));
+// Wait for replication of the index creation.
+rst.awaitReplication();
+const secondaryDb = secondary.getDB(dbName);
+const coll_secondary = secondaryDb.getCollection(collName);
+const index = coll_secondary.getIndexes();
+assert.eq(index[1].unique, true, "Expected a unique index: " + tojson(index[1]));
+// Validate that the unique index is not updated on the secondary after an empty collMod
+// command.
+const indexFormatVersion = coll_secondary.aggregate({$collStats: {storageStats: {}}})
+ .next()
+ .storageStats.indexDetails[index[1].name]
+ .metadata.formatVersion;
+assert.eq(indexFormatVersion,
+ newIndexFormatVersion,
+ "Expected index format version 12 for the unique index: " + tojson(index[1]));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/multiVersion/update_shard_key_disallowed_fcv40.js b/jstests/multiVersion/update_shard_key_disallowed_fcv40.js
index ee7648679ab..492924c939e 100644
--- a/jstests/multiVersion/update_shard_key_disallowed_fcv40.js
+++ b/jstests/multiVersion/update_shard_key_disallowed_fcv40.js
@@ -2,83 +2,197 @@
// @tags: [uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
+"use strict";
+
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/sharding/libs/update_shard_key_helpers.js");
+
+let st = new ShardingTest({
+ shards: {rs0: {nodes: 3, binVersion: "latest"}, rs1: {nodes: 3, binVersion: "latest"}},
+ mongos: 1,
+ other: {mongosOptions: {binVersion: "latest"}, configOptions: {binVersion: "latest"}}
+});
+let mongos = st.s0;
+let kDbName = "test";
+let collName = "foo";
+let ns = kDbName + "." + collName;
+
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, st.shard0.shardName);
+
+function shardCollectionAndMoveChunks(docsToInsert, shardKey, splitDoc, moveDoc) {
+ for (let i = 0; i < docsToInsert.length; i++) {
+ assert.commandWorked(mongos.getDB(kDbName).foo.insert(docsToInsert[i]));
+ }
+
+ assert.commandWorked(mongos.getDB(kDbName).foo.createIndex(shardKey));
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: shardKey}));
+ assert.commandWorked(mongos.adminCommand({split: ns, find: splitDoc}));
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, find: moveDoc, to: st.shard1.shardName}));
+
+ assert.commandWorked(mongos.adminCommand({flushRouterConfig: 1}));
+ st.rs0.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns});
+ st.rs1.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns});
+ st.rs0.getPrimary().adminCommand({_flushDatabaseCacheUpdates: kDbName});
+ st.rs1.getPrimary().adminCommand({_flushDatabaseCacheUpdates: kDbName});
+}
+
+function assertCannotUpdateShardKey(isMixedCluster) {
+ // ------------------------------------------------
+ // Test changes to shard key run as retryable write
+ // ------------------------------------------------
+ let session = st.s.startSession({retryWrites: true});
+ let sessionDB = session.getDatabase(kDbName);
+
+ // Updates to full shard key
+ shardCollectionMoveChunks(
+ st, kDbName, ns, {x: 1}, [{x: 30}, {x: 50}, {x: 80}], {x: 50}, {x: 80});
+ cleanupOrphanedDocs(st, ns);
+
+ // Assert that updating the shard key when the doc would remain on the same shard fails for
+ // both modify and replacement updates
+ assert.writeError(sessionDB.foo.update({x: 80}, {$set: {x: 100}}));
+ assert.writeError(sessionDB.foo.update({x: 80}, {x: 100}));
+ assert.throws(function() {
+ sessionDB.foo.findAndModify({query: {x: 80}, update: {$set: {x: 100}}});
+ });
+ assert.throws(function() {
+ sessionDB.foo.findAndModify({query: {x: 80}, update: {x: 100}});
+ });
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/sharding/libs/update_shard_key_helpers.js");
+ // Assert that updating the shard key when the doc would move shards fails for both modify
+ // and replacement updates.
+ assert.writeError(sessionDB.foo.update({x: 80}, {$set: {x: 3}}));
+ assert.commandFailedWithCode(sessionDB.foo.update({x: 80}, {x: 3}),
+ [ErrorCodes.ImmutableField, ErrorCodes.InvalidOptions]);
+ assert.eq(1, mongos.getDB(kDbName).foo.find({x: 80}).itcount());
+ assert.eq(0, mongos.getDB(kDbName).foo.find({x: 3}).itcount());
- let st = new ShardingTest({
- shards: {rs0: {nodes: 3, binVersion: "latest"}, rs1: {nodes: 3, binVersion: "latest"}},
- mongos: 1,
- other: {mongosOptions: {binVersion: "latest"}, configOptions: {binVersion: "latest"}}
+ assert.throws(function() {
+ sessionDB.foo.findAndModify({query: {x: 80}, update: {$set: {x: 3}}});
});
- let mongos = st.s0;
- let kDbName = "test";
- let collName = "foo";
- let ns = kDbName + "." + collName;
-
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, st.shard0.shardName);
-
- function shardCollectionAndMoveChunks(docsToInsert, shardKey, splitDoc, moveDoc) {
- for (let i = 0; i < docsToInsert.length; i++) {
- assert.commandWorked(mongos.getDB(kDbName).foo.insert(docsToInsert[i]));
- }
-
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex(shardKey));
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: shardKey}));
- assert.commandWorked(mongos.adminCommand({split: ns, find: splitDoc}));
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, find: moveDoc, to: st.shard1.shardName}));
-
- assert.commandWorked(mongos.adminCommand({flushRouterConfig: 1}));
- st.rs0.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns});
- st.rs1.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns});
- st.rs0.getPrimary().adminCommand({_flushDatabaseCacheUpdates: kDbName});
- st.rs1.getPrimary().adminCommand({_flushDatabaseCacheUpdates: kDbName});
- }
+ assert.throws(function() {
+ sessionDB.foo.findAndModify({query: {x: 80}, update: {x: 3}});
+ });
+
+ mongos.getDB(kDbName).foo.drop();
+
+ // Updates to partial shard key
+ shardCollectionMoveChunks(st,
+ kDbName,
+ ns,
+ {x: 1, y: 1},
+ [{x: 30, y: 4}, {x: 50, y: 50}, {x: 80, y: 100}],
+ {x: 50, y: 50},
+ {x: 80, y: 100});
+ cleanupOrphanedDocs(st, ns);
+
+ // Assert that updating the shard key when the doc would remain on the same shard fails for
+ // both modify and replacement updates
+ assert.writeError(sessionDB.foo.update({x: 80}, {$set: {x: 100}}));
+ assert.writeError(sessionDB.foo.update({x: 80}, {x: 100}));
+ assert.throws(function() {
+ sessionDB.foo.findAndModify({query: {x: 80}, update: {$set: {x: 100}}});
+ });
+ assert.throws(function() {
+ sessionDB.foo.findAndModify({query: {x: 80}, update: {x: 100}});
+ });
+
+ // Assert that updating the shard key when the doc would move shards fails for both modify
+ // and replacement updates
+ assert.writeError(sessionDB.foo.update({x: 80}, {$set: {x: 3}}));
+ assert.writeError(sessionDB.foo.update({x: 80}, {x: 3}));
+ assert.throws(function() {
+ sessionDB.foo.findAndModify({query: {x: 80}, update: {$set: {x: 3}}});
+ });
+ assert.throws(function() {
+ sessionDB.foo.findAndModify({query: {x: 80}, update: {x: 3}});
+ });
+
+ mongos.getDB(kDbName).foo.drop();
+
+ // -----------------------------------------------
+ // Test changes to shard key run in a transaction
+ // -----------------------------------------------
+ session = st.s.startSession();
+ sessionDB = session.getDatabase(kDbName);
+
+ // Updates to full shard key
+ shardCollectionMoveChunks(
+ st, kDbName, ns, {x: 1}, [{x: 30}, {x: 50}, {x: 80}], {x: 50}, {x: 80});
+ cleanupOrphanedDocs(st, ns);
+
+ // Assert that updating the shard key when the doc would remain on the same shard fails for
+ // both modify and replacement updates
+ session.startTransaction();
+ assert.writeError(sessionDB.foo.update({x: 80}, {$set: {x: 100}}));
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ session.startTransaction();
+ assert.throws(function() {
+ sessionDB.foo.findAndModify({query: {x: 80}, update: {x: 100}});
+ });
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ mongos.getDB(kDbName).foo.drop();
- function assertCannotUpdateShardKey(isMixedCluster) {
- // ------------------------------------------------
- // Test changes to shard key run as retryable write
- // ------------------------------------------------
- let session = st.s.startSession({retryWrites: true});
- let sessionDB = session.getDatabase(kDbName);
+ if (isMixedCluster) {
+ // Assert that updating the shard key when the doc would move shards fails on commit
+ // because one of the participants is not in FCV 4.2. If the original write is a
+ // retryable write, the write will fail when mongos attempts to run commitTransaction.
+ // If the original write is part of a transaction, the write itself will complete
+ // successfully, but the transaction will fail to commit.
+
+ // Retryable write - updates to full shard key
+ session = st.s.startSession({retryWrites: true});
+ sessionDB = session.getDatabase(kDbName);
- // Updates to full shard key
shardCollectionMoveChunks(
st, kDbName, ns, {x: 1}, [{x: 30}, {x: 50}, {x: 80}], {x: 50}, {x: 80});
cleanupOrphanedDocs(st, ns);
- // Assert that updating the shard key when the doc would remain on the same shard fails for
- // both modify and replacement updates
- assert.writeError(sessionDB.foo.update({x: 80}, {$set: {x: 100}}));
- assert.writeError(sessionDB.foo.update({x: 80}, {x: 100}));
- assert.throws(function() {
- sessionDB.foo.findAndModify({query: {x: 80}, update: {$set: {x: 100}}});
- });
+ // Doc will move shards
+ assert.writeError(sessionDB.foo.update({x: 30}, {$set: {x: 100}}));
assert.throws(function() {
- sessionDB.foo.findAndModify({query: {x: 80}, update: {x: 100}});
+ sessionDB.foo.findAndModify({query: {x: 30}, update: {$set: {x: 100}}});
});
- // Assert that updating the shard key when the doc would move shards fails for both modify
- // and replacement updates.
- assert.writeError(sessionDB.foo.update({x: 80}, {$set: {x: 3}}));
- assert.commandFailedWithCode(sessionDB.foo.update({x: 80}, {x: 3}),
- [ErrorCodes.ImmutableField, ErrorCodes.InvalidOptions]);
- assert.eq(1, mongos.getDB(kDbName).foo.find({x: 80}).itcount());
- assert.eq(0, mongos.getDB(kDbName).foo.find({x: 3}).itcount());
-
- assert.throws(function() {
- sessionDB.foo.findAndModify({query: {x: 80}, update: {$set: {x: 3}}});
- });
- assert.throws(function() {
- sessionDB.foo.findAndModify({query: {x: 80}, update: {x: 3}});
- });
+ // Doc will remain on the same shard. Because shard 0 is on FCV 4.2, these should
+ // complete successfully.
+ sessionDB.foo.findAndModify({query: {x: 30}, update: {$set: {x: 5}}});
+ st.rs0.awaitReplication();
+ assert.eq(1, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 5}).itcount());
+ assert.eq(1, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 5}).itcount());
+ assert.eq(0, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 30}).itcount());
+ assert.eq(0, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 30}).itcount());
+
+ sessionDB.foo.findAndModify({query: {x: 5}, update: {x: 10}});
+ st.rs0.awaitReplication();
+ assert.eq(0, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 5}).itcount());
+ assert.eq(0, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 5}).itcount());
+ assert.eq(1, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 10}).itcount());
+ assert.eq(1, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 10}).itcount());
+
+ assert.commandWorked(sessionDB.foo.update({x: 10}, {$set: {x: 25}}));
+ st.rs0.awaitReplication();
+ assert.eq(1, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 25}).itcount());
+ assert.eq(1, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 25}).itcount());
+ assert.eq(0, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 10}).itcount());
+ assert.eq(0, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 10}).itcount());
+
+ assert.commandWorked(sessionDB.foo.update({x: 25}, {x: 3}));
+ st.rs0.awaitReplication();
+ assert.eq(0, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 25}).itcount());
+ assert.eq(0, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 25}).itcount());
+ assert.eq(1, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 3}).itcount());
+ assert.eq(1, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 3}).itcount());
mongos.getDB(kDbName).foo.drop();
- // Updates to partial shard key
+ // Retryable write - updates to partial shard key
shardCollectionMoveChunks(st,
kDbName,
ns,
@@ -88,186 +202,72 @@
{x: 80, y: 100});
cleanupOrphanedDocs(st, ns);
- // Assert that updating the shard key when the doc would remain on the same shard fails for
- // both modify and replacement updates
- assert.writeError(sessionDB.foo.update({x: 80}, {$set: {x: 100}}));
- assert.writeError(sessionDB.foo.update({x: 80}, {x: 100}));
+ assert.writeError(sessionDB.foo.update({x: 30}, {$set: {x: 100}}));
assert.throws(function() {
- sessionDB.foo.findAndModify({query: {x: 80}, update: {$set: {x: 100}}});
- });
- assert.throws(function() {
- sessionDB.foo.findAndModify({query: {x: 80}, update: {x: 100}});
- });
-
- // Assert that updating the shard key when the doc would move shards fails for both modify
- // and replacement updates
- assert.writeError(sessionDB.foo.update({x: 80}, {$set: {x: 3}}));
- assert.writeError(sessionDB.foo.update({x: 80}, {x: 3}));
- assert.throws(function() {
- sessionDB.foo.findAndModify({query: {x: 80}, update: {$set: {x: 3}}});
- });
- assert.throws(function() {
- sessionDB.foo.findAndModify({query: {x: 80}, update: {x: 3}});
+ sessionDB.foo.findAndModify({query: {x: 30}, update: {x: 100}});
});
mongos.getDB(kDbName).foo.drop();
- // -----------------------------------------------
- // Test changes to shard key run in a transaction
- // -----------------------------------------------
+ // Transactional writes
session = st.s.startSession();
sessionDB = session.getDatabase(kDbName);
- // Updates to full shard key
shardCollectionMoveChunks(
- st, kDbName, ns, {x: 1}, [{x: 30}, {x: 50}, {x: 80}], {x: 50}, {x: 80});
+ st, kDbName, ns, {x: 1}, [{x: 10}, {x: 30}, {x: 50}, {x: 80}], {x: 50}, {x: 80});
cleanupOrphanedDocs(st, ns);
- // Assert that updating the shard key when the doc would remain on the same shard fails for
- // both modify and replacement updates
session.startTransaction();
- assert.writeError(sessionDB.foo.update({x: 80}, {$set: {x: 100}}));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+ assert.commandWorked(sessionDB.foo.update({x: 30}, {$set: {x: 100}}));
+ assert.commandFailed(session.commitTransaction_forTesting());
+
+ assert.eq(1, mongos.getDB(kDbName).foo.find({x: 30}).itcount());
+ assert.eq(0, mongos.getDB(kDbName).foo.find({x: 100}).itcount());
session.startTransaction();
- assert.throws(function() {
- sessionDB.foo.findAndModify({query: {x: 80}, update: {x: 100}});
- });
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+ sessionDB.foo.findAndModify({query: {x: 30}, update: {x: 100}});
+ assert.commandFailed(session.commitTransaction_forTesting());
- mongos.getDB(kDbName).foo.drop();
+ assert.eq(1, mongos.getDB(kDbName).foo.find({x: 30}).itcount());
+ assert.eq(0, mongos.getDB(kDbName).foo.find({x: 100}).itcount());
- if (isMixedCluster) {
- // Assert that updating the shard key when the doc would move shards fails on commit
- // because one of the participants is not in FCV 4.2. If the original write is a
- // retryable write, the write will fail when mongos attempts to run commitTransaction.
- // If the original write is part of a transaction, the write itself will complete
- // successfully, but the transaction will fail to commit.
-
- // Retryable write - updates to full shard key
- session = st.s.startSession({retryWrites: true});
- sessionDB = session.getDatabase(kDbName);
-
- shardCollectionMoveChunks(
- st, kDbName, ns, {x: 1}, [{x: 30}, {x: 50}, {x: 80}], {x: 50}, {x: 80});
- cleanupOrphanedDocs(st, ns);
-
- // Doc will move shards
- assert.writeError(sessionDB.foo.update({x: 30}, {$set: {x: 100}}));
- assert.throws(function() {
- sessionDB.foo.findAndModify({query: {x: 30}, update: {$set: {x: 100}}});
- });
-
- // Doc will remain on the same shard. Because shard 0 is on FCV 4.2, these should
- // complete successfully.
- sessionDB.foo.findAndModify({query: {x: 30}, update: {$set: {x: 5}}});
- st.rs0.awaitReplication();
- assert.eq(1, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 5}).itcount());
- assert.eq(1, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 5}).itcount());
- assert.eq(0, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 30}).itcount());
- assert.eq(0, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 30}).itcount());
-
- sessionDB.foo.findAndModify({query: {x: 5}, update: {x: 10}});
- st.rs0.awaitReplication();
- assert.eq(0, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 5}).itcount());
- assert.eq(0, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 5}).itcount());
- assert.eq(1, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 10}).itcount());
- assert.eq(1, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 10}).itcount());
-
- assert.commandWorked(sessionDB.foo.update({x: 10}, {$set: {x: 25}}));
- st.rs0.awaitReplication();
- assert.eq(1, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 25}).itcount());
- assert.eq(1, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 25}).itcount());
- assert.eq(0, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 10}).itcount());
- assert.eq(0, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 10}).itcount());
-
- assert.commandWorked(sessionDB.foo.update({x: 25}, {x: 3}));
- st.rs0.awaitReplication();
- assert.eq(0, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 25}).itcount());
- assert.eq(0, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 25}).itcount());
- assert.eq(1, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 3}).itcount());
- assert.eq(1, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 3}).itcount());
-
- mongos.getDB(kDbName).foo.drop();
-
- // Retryable write - updates to partial shard key
- shardCollectionMoveChunks(st,
- kDbName,
- ns,
- {x: 1, y: 1},
- [{x: 30, y: 4}, {x: 50, y: 50}, {x: 80, y: 100}],
- {x: 50, y: 50},
- {x: 80, y: 100});
- cleanupOrphanedDocs(st, ns);
-
- assert.writeError(sessionDB.foo.update({x: 30}, {$set: {x: 100}}));
- assert.throws(function() {
- sessionDB.foo.findAndModify({query: {x: 30}, update: {x: 100}});
- });
-
- mongos.getDB(kDbName).foo.drop();
-
- // Transactional writes
- session = st.s.startSession();
- sessionDB = session.getDatabase(kDbName);
-
- shardCollectionMoveChunks(
- st, kDbName, ns, {x: 1}, [{x: 10}, {x: 30}, {x: 50}, {x: 80}], {x: 50}, {x: 80});
- cleanupOrphanedDocs(st, ns);
-
- session.startTransaction();
- assert.commandWorked(sessionDB.foo.update({x: 30}, {$set: {x: 100}}));
- assert.commandFailed(session.commitTransaction_forTesting());
-
- assert.eq(1, mongos.getDB(kDbName).foo.find({x: 30}).itcount());
- assert.eq(0, mongos.getDB(kDbName).foo.find({x: 100}).itcount());
-
- session.startTransaction();
- sessionDB.foo.findAndModify({query: {x: 30}, update: {x: 100}});
- assert.commandFailed(session.commitTransaction_forTesting());
-
- assert.eq(1, mongos.getDB(kDbName).foo.find({x: 30}).itcount());
- assert.eq(0, mongos.getDB(kDbName).foo.find({x: 100}).itcount());
-
- // Doc will remain on the same shard. Because shard 0 is on FCV 4.2, this should
- // complete successfully.
- session.startTransaction();
- sessionDB.foo.findAndModify({query: {x: 10}, update: {x: 1}});
- assert.commandWorked(sessionDB.foo.update({x: 30}, {$set: {x: 5}}));
- assert.commandWorked(session.commitTransaction_forTesting());
- st.rs0.awaitReplication();
-
- assert.eq(0, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 30}).itcount());
- assert.eq(0, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 30}).itcount());
- assert.eq(0, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 10}).itcount());
- assert.eq(0, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 10}).itcount());
- assert.eq(1, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 5}).itcount());
- assert.eq(1, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 5}).itcount());
- assert.eq(1, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 1}).itcount());
- assert.eq(1, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 1}).itcount());
-
- mongos.getDB(kDbName).foo.drop();
- }
+ // Doc will remain on the same shard. Because shard 0 is on FCV 4.2, this should
+ // complete successfully.
+ session.startTransaction();
+ sessionDB.foo.findAndModify({query: {x: 10}, update: {x: 1}});
+ assert.commandWorked(sessionDB.foo.update({x: 30}, {$set: {x: 5}}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ st.rs0.awaitReplication();
+
+ assert.eq(0, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 30}).itcount());
+ assert.eq(0, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 30}).itcount());
+ assert.eq(0, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 10}).itcount());
+ assert.eq(0, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 10}).itcount());
+ assert.eq(1, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 5}).itcount());
+ assert.eq(1, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 5}).itcount());
+ assert.eq(1, st.rs0.getPrimary().getDB(kDbName).foo.find({x: 1}).itcount());
+ assert.eq(1, st.rs0.getSecondaries()[0].getDB(kDbName).foo.find({x: 1}).itcount());
+
+ mongos.getDB(kDbName).foo.drop();
}
+}
- // Check that updating the shard key fails when all shards are in FCV 4.0
- assert.commandWorked(st.s.getDB("admin").runCommand({setFeatureCompatibilityVersion: "4.0"}));
- checkFCV(st.configRS.getPrimary().getDB("admin"), "4.0");
- checkFCV(st.rs0.getPrimary().getDB("admin"), "4.0");
- checkFCV(st.rs1.getPrimary().getDB("admin"), "4.0");
+// Check that updating the shard key fails when all shards are in FCV 4.0
+assert.commandWorked(st.s.getDB("admin").runCommand({setFeatureCompatibilityVersion: "4.0"}));
+checkFCV(st.configRS.getPrimary().getDB("admin"), "4.0");
+checkFCV(st.rs0.getPrimary().getDB("admin"), "4.0");
+checkFCV(st.rs1.getPrimary().getDB("admin"), "4.0");
- assertCannotUpdateShardKey(false);
+assertCannotUpdateShardKey(false);
- // Check that updating the shard key fails when shard0 is in FCV 4.2 but shard 1 is in FCV 4.0
- assert.commandWorked(
- st.rs0.getPrimary().getDB("admin").runCommand({setFeatureCompatibilityVersion: "4.2"}));
- checkFCV(st.configRS.getPrimary().getDB("admin"), "4.0");
- checkFCV(st.rs0.getPrimary().getDB("admin"), "4.2");
- checkFCV(st.rs1.getPrimary().getDB("admin"), "4.0");
+// Check that updating the shard key fails when shard0 is in FCV 4.2 but shard 1 is in FCV 4.0
+assert.commandWorked(
+ st.rs0.getPrimary().getDB("admin").runCommand({setFeatureCompatibilityVersion: "4.2"}));
+checkFCV(st.configRS.getPrimary().getDB("admin"), "4.0");
+checkFCV(st.rs0.getPrimary().getDB("admin"), "4.2");
+checkFCV(st.rs1.getPrimary().getDB("admin"), "4.0");
- assertCannotUpdateShardKey(true);
+assertCannotUpdateShardKey(true);
- st.stop();
+st.stop();
})();
diff --git a/jstests/multiVersion/upgrade_downgrade_cluster.js b/jstests/multiVersion/upgrade_downgrade_cluster.js
index 0801ae57986..7e450c87b71 100644
--- a/jstests/multiVersion/upgrade_downgrade_cluster.js
+++ b/jstests/multiVersion/upgrade_downgrade_cluster.js
@@ -18,173 +18,173 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- /**
- * @param isRSCluster {bool} use replica set shards.
- */
- var runTest = function(isRSCluster) {
- "use strict";
-
- const kMinVersion = 5;
- const kCurrentVerion = 6;
-
- jsTest.log("Starting" + (isRSCluster ? " (replica set)" : "") + " cluster" + "...");
-
- var testCRUDAndAgg = function(db) {
- assert.writeOK(db.foo.insert({x: 1}));
- assert.writeOK(db.foo.insert({x: -1}));
- assert.writeOK(db.foo.update({x: 1}, {$set: {y: 1}}));
- assert.writeOK(db.foo.update({x: -1}, {$set: {y: 1}}));
- var doc1 = db.foo.findOne({x: 1});
- assert.eq(1, doc1.y);
- var doc2 = db.foo.findOne({x: -1});
- assert.eq(1, doc2.y);
-
- // Make sure a user can always do an aggregation with an $out using the 4.0-style
- // syntax.
- // TODO SERVER-36930 This immediately invoked function can be removed when we are sure
- // all nodes in the cluster understand both the new and the old $out syntax.
- (function testAggOut() {
- db.sanity_check.drop();
- assert.eq(0, db.foo.aggregate([{$out: "sanity_check"}]).itcount());
- assert.eq(2, db.sanity_check.find().itcount());
- }());
-
- assert.writeOK(db.foo.remove({x: 1}, true));
- assert.writeOK(db.foo.remove({x: -1}, true));
- assert.eq(null, db.foo.findOne());
- };
-
- var st = new ShardingTest({
- shards: 2,
- mongos: 1,
- other: {
- mongosOptions: {binVersion: "last-stable"},
- configOptions: {binVersion: "last-stable"},
- shardOptions: {binVersion: "last-stable"},
-
- rsOptions: {binVersion: "last-stable"},
- rs: isRSCluster,
- shardAsReplicaSet: false
- }
- });
- st.configRS.awaitReplication();
-
- // check that config.version document gets initialized properly
- var version = st.s.getCollection('config.version').findOne();
- assert.eq(version.minCompatibleVersion, kMinVersion);
- assert.eq(version.currentVersion, kCurrentVerion);
- var clusterID = version.clusterId;
- assert.neq(null, clusterID);
- assert.eq(version.excluding, undefined);
-
- // Setup sharded collection
- assert.commandWorked(st.s.adminCommand({enableSharding: 'sharded'}));
- st.ensurePrimaryShard('sharded', st.shard0.shardName);
-
- assert.commandWorked(st.s.adminCommand({shardCollection: 'sharded.foo', key: {x: 1}}));
- assert.commandWorked(st.s.adminCommand({split: 'sharded.foo', middle: {x: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: 'sharded.foo', find: {x: 1}, to: st.shard1.shardName}));
-
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
-
- // upgrade the config servers first
- jsTest.log('upgrading config servers');
- st.upgradeCluster("latest", {upgradeMongos: false, upgradeShards: false});
-
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
-
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
-
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
-
- // Then upgrade the shards.
- jsTest.log('upgrading shard servers');
- st.upgradeCluster("latest", {upgradeMongos: false, upgradeConfigs: false});
-
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
-
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
-
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
-
- // Finally, upgrade mongos
- jsTest.log('upgrading mongos servers');
- st.upgradeCluster("latest", {upgradeConfigs: false, upgradeShards: false});
-
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
-
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
-
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
-
- // Check that version document is unmodified.
- version = st.s.getCollection('config.version').findOne();
- assert.eq(version.minCompatibleVersion, kMinVersion);
- assert.eq(version.currentVersion, kCurrentVerion);
- assert.eq(clusterID, version.clusterId);
- assert.eq(version.excluding, undefined);
-
- ///////////////////////////////////////////////////////////////////////////////////////////
- // Downgrade back
+/**
+ * @param isRSCluster {bool} use replica set shards.
+ */
+var runTest = function(isRSCluster) {
+ "use strict";
+
+ const kMinVersion = 5;
+ const kCurrentVerion = 6;
+
+ jsTest.log("Starting" + (isRSCluster ? " (replica set)" : "") + " cluster" +
+ "...");
+
+ var testCRUDAndAgg = function(db) {
+ assert.writeOK(db.foo.insert({x: 1}));
+ assert.writeOK(db.foo.insert({x: -1}));
+ assert.writeOK(db.foo.update({x: 1}, {$set: {y: 1}}));
+ assert.writeOK(db.foo.update({x: -1}, {$set: {y: 1}}));
+ var doc1 = db.foo.findOne({x: 1});
+ assert.eq(1, doc1.y);
+ var doc2 = db.foo.findOne({x: -1});
+ assert.eq(1, doc2.y);
+
+ // Make sure a user can always do an aggregation with an $out using the 4.0-style
+ // syntax.
+ // TODO SERVER-36930 This immediately invoked function can be removed when we are sure
+ // all nodes in the cluster understand both the new and the old $out syntax.
+ (function testAggOut() {
+ db.sanity_check.drop();
+ assert.eq(0, db.foo.aggregate([{$out: "sanity_check"}]).itcount());
+ assert.eq(2, db.sanity_check.find().itcount());
+ }());
+
+ assert.writeOK(db.foo.remove({x: 1}, true));
+ assert.writeOK(db.foo.remove({x: -1}, true));
+ assert.eq(null, db.foo.findOne());
+ };
- jsTest.log('downgrading mongos servers');
- st.upgradeCluster("last-stable", {upgradeConfigs: false, upgradeShards: false});
+ var st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ other: {
+ mongosOptions: {binVersion: "last-stable"},
+ configOptions: {binVersion: "last-stable"},
+ shardOptions: {binVersion: "last-stable"},
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
+ rsOptions: {binVersion: "last-stable"},
+ rs: isRSCluster,
+ shardAsReplicaSet: false
+ }
+ });
+ st.configRS.awaitReplication();
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
+ // check that config.version document gets initialized properly
+ var version = st.s.getCollection('config.version').findOne();
+ assert.eq(version.minCompatibleVersion, kMinVersion);
+ assert.eq(version.currentVersion, kCurrentVerion);
+ var clusterID = version.clusterId;
+ assert.neq(null, clusterID);
+ assert.eq(version.excluding, undefined);
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
+ // Setup sharded collection
+ assert.commandWorked(st.s.adminCommand({enableSharding: 'sharded'}));
+ st.ensurePrimaryShard('sharded', st.shard0.shardName);
- jsTest.log('downgrading shard servers');
- st.upgradeCluster("last-stable", {upgradeMongos: false, upgradeConfigs: false});
+ assert.commandWorked(st.s.adminCommand({shardCollection: 'sharded.foo', key: {x: 1}}));
+ assert.commandWorked(st.s.adminCommand({split: 'sharded.foo', middle: {x: 0}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: 'sharded.foo', find: {x: 1}, to: st.shard1.shardName}));
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
+ // upgrade the config servers first
+ jsTest.log('upgrading config servers');
+ st.upgradeCluster("latest", {upgradeMongos: false, upgradeShards: false});
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
- jsTest.log('downgrading config servers');
- st.upgradeCluster("last-stable", {upgradeMongos: false, upgradeShards: false});
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
+ // Then upgrade the shards.
+ jsTest.log('upgrading shard servers');
+ st.upgradeCluster("latest", {upgradeMongos: false, upgradeConfigs: false});
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
- // Check that version document is unmodified.
- version = st.s.getCollection('config.version').findOne();
- assert.eq(version.minCompatibleVersion, kMinVersion);
- assert.eq(version.currentVersion, kCurrentVerion);
- assert.eq(clusterID, version.clusterId);
- assert.eq(version.excluding, undefined);
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
- st.stop();
- };
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ // Finally, upgrade mongos
+ jsTest.log('upgrading mongos servers');
+ st.upgradeCluster("latest", {upgradeConfigs: false, upgradeShards: false});
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ // Check that version document is unmodified.
+ version = st.s.getCollection('config.version').findOne();
+ assert.eq(version.minCompatibleVersion, kMinVersion);
+ assert.eq(version.currentVersion, kCurrentVerion);
+ assert.eq(clusterID, version.clusterId);
+ assert.eq(version.excluding, undefined);
+
+ ///////////////////////////////////////////////////////////////////////////////////////////
+ // Downgrade back
+
+ jsTest.log('downgrading mongos servers');
+ st.upgradeCluster("last-stable", {upgradeConfigs: false, upgradeShards: false});
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ jsTest.log('downgrading shard servers');
+ st.upgradeCluster("last-stable", {upgradeMongos: false, upgradeConfigs: false});
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ jsTest.log('downgrading config servers');
+ st.upgradeCluster("last-stable", {upgradeMongos: false, upgradeShards: false});
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ // Check that version document is unmodified.
+ version = st.s.getCollection('config.version').findOne();
+ assert.eq(version.minCompatibleVersion, kMinVersion);
+ assert.eq(version.currentVersion, kCurrentVerion);
+ assert.eq(clusterID, version.clusterId);
+ assert.eq(version.excluding, undefined);
- runTest(false);
- runTest(true);
+ st.stop();
+};
+runTest(false);
+runTest(true);
})();
diff --git a/jstests/multiVersion/verify_versions_test.js b/jstests/multiVersion/verify_versions_test.js
index 8041cd5bff7..c6dbfa64e2c 100644
--- a/jstests/multiVersion/verify_versions_test.js
+++ b/jstests/multiVersion/verify_versions_test.js
@@ -10,81 +10,81 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- function assertBinVersionsEqual(v1, v2) {
- assert(MongoRunner.areBinVersionsTheSame(v1, v2),
- "Expected \"" + v1 + "\" to equal \"" + v2 + "\"");
- }
-
- function assertBinVersionsNotEqual(v1, v2) {
- assert(!MongoRunner.areBinVersionsTheSame(v1, v2),
- "Expected \"" + v1 + "\" not to equal \"" + v2 + "\"");
- }
-
- function assertBinVersionComparesHigher(v1, v2) {
- assert.eq(1,
- MongoRunner.compareBinVersions(v1, v2),
- "Expected \"" + v1 + "\" to compare higher than \"" + v2 + "\"");
- }
-
- function assertBinVersionComparesLower(v1, v2) {
- assert.eq(-1,
- MongoRunner.compareBinVersions(v1, v2),
- "Expected \"" + v1 + "\" to compare lower than \"" + v2 + "\"");
- }
-
- function assertBinVersionComparesEqual(v1, v2) {
- assert.eq(0,
- MongoRunner.compareBinVersions(v1, v2),
- "Expected \"" + v1 + "\" to compare equal to \"" + v2 + "\"");
- }
-
- // The current version is in the 4.2 series. This has to be changed very time we bump
- // the major version pair, but it provides a useful test of assumptions.
- assertBinVersionsEqual("4.2", version());
- assertBinVersionComparesEqual("4.2", version());
-
- // "latest" is the same version as the shell, "last-stable" is not.
- assertBinVersionsEqual("latest", version());
- assertBinVersionsEqual("", "latest");
- assertBinVersionsEqual("", version());
-
- assertBinVersionComparesEqual("latest", version());
- assertBinVersionComparesEqual("", "latest");
- assertBinVersionComparesEqual("", version());
-
- assertBinVersionsNotEqual("latest", "last-stable");
- assertBinVersionsNotEqual("last-stable", version());
-
- assertBinVersionComparesHigher("latest", "last-stable");
- assertBinVersionComparesLower("last-stable", version());
-
- // 3.2 means 3.2.z for any value of z. It does not mean 3.0 or 3.0.w.
- assertBinVersionsEqual("3.2", "3.2.4");
- assertBinVersionsEqual("3.2.4", "3.2");
- assertBinVersionsNotEqual("3.2", "3.0");
- assertBinVersionsNotEqual("3.0.9", "3.2.9");
-
- assertBinVersionComparesEqual("3.2", "3.2.4");
- assertBinVersionComparesEqual("3.2.4", "3.2");
- assertBinVersionComparesHigher("3.2", "3.0");
- assertBinVersionComparesLower("3.0.9", "3.2.9");
-
- assertBinVersionsEqual("3.4", "3.4.0-abcd");
- assertBinVersionsEqual("3.4.0", "3.4.0-abcd");
-
- assertBinVersionComparesEqual("3.4", "3.4.0-abcd");
- assertBinVersionComparesEqual("3.4.0", "3.4.0-abcd");
- assertBinVersionComparesHigher("3.6.0", "3.4.0-abcd");
- assertBinVersionComparesHigher("4.0.0", "3.6.99-abcd");
- assertBinVersionComparesHigher("3.4.1", "3.4.0-abcd");
- assertBinVersionComparesLower("3.4.0-abc", "3.4.1-xyz");
-
- // Prohibit versions that don't have at least two components (3 is no good, 3.2 is).
- assert.throws(MongoRunner.areBinVersionsTheSame, ["3", "3.2"]);
- assert.throws(MongoRunner.areBinVersionsTheSame, ["3.2", "3"]);
-
- // Throw an error when versions differ only by githash.
- assert.throws(MongoRunner.compareBinVersions, ["3.4.1-abc", "3.4.1-xyz"]);
+"use strict";
+
+function assertBinVersionsEqual(v1, v2) {
+ assert(MongoRunner.areBinVersionsTheSame(v1, v2),
+ "Expected \"" + v1 + "\" to equal \"" + v2 + "\"");
+}
+
+function assertBinVersionsNotEqual(v1, v2) {
+ assert(!MongoRunner.areBinVersionsTheSame(v1, v2),
+ "Expected \"" + v1 + "\" not to equal \"" + v2 + "\"");
+}
+
+function assertBinVersionComparesHigher(v1, v2) {
+ assert.eq(1,
+ MongoRunner.compareBinVersions(v1, v2),
+ "Expected \"" + v1 + "\" to compare higher than \"" + v2 + "\"");
+}
+
+function assertBinVersionComparesLower(v1, v2) {
+ assert.eq(-1,
+ MongoRunner.compareBinVersions(v1, v2),
+ "Expected \"" + v1 + "\" to compare lower than \"" + v2 + "\"");
+}
+
+function assertBinVersionComparesEqual(v1, v2) {
+ assert.eq(0,
+ MongoRunner.compareBinVersions(v1, v2),
+ "Expected \"" + v1 + "\" to compare equal to \"" + v2 + "\"");
+}
+
+// The current version is in the 4.2 series. This has to be changed very time we bump
+// the major version pair, but it provides a useful test of assumptions.
+assertBinVersionsEqual("4.2", version());
+assertBinVersionComparesEqual("4.2", version());
+
+// "latest" is the same version as the shell, "last-stable" is not.
+assertBinVersionsEqual("latest", version());
+assertBinVersionsEqual("", "latest");
+assertBinVersionsEqual("", version());
+
+assertBinVersionComparesEqual("latest", version());
+assertBinVersionComparesEqual("", "latest");
+assertBinVersionComparesEqual("", version());
+
+assertBinVersionsNotEqual("latest", "last-stable");
+assertBinVersionsNotEqual("last-stable", version());
+
+assertBinVersionComparesHigher("latest", "last-stable");
+assertBinVersionComparesLower("last-stable", version());
+
+// 3.2 means 3.2.z for any value of z. It does not mean 3.0 or 3.0.w.
+assertBinVersionsEqual("3.2", "3.2.4");
+assertBinVersionsEqual("3.2.4", "3.2");
+assertBinVersionsNotEqual("3.2", "3.0");
+assertBinVersionsNotEqual("3.0.9", "3.2.9");
+
+assertBinVersionComparesEqual("3.2", "3.2.4");
+assertBinVersionComparesEqual("3.2.4", "3.2");
+assertBinVersionComparesHigher("3.2", "3.0");
+assertBinVersionComparesLower("3.0.9", "3.2.9");
+
+assertBinVersionsEqual("3.4", "3.4.0-abcd");
+assertBinVersionsEqual("3.4.0", "3.4.0-abcd");
+
+assertBinVersionComparesEqual("3.4", "3.4.0-abcd");
+assertBinVersionComparesEqual("3.4.0", "3.4.0-abcd");
+assertBinVersionComparesHigher("3.6.0", "3.4.0-abcd");
+assertBinVersionComparesHigher("4.0.0", "3.6.99-abcd");
+assertBinVersionComparesHigher("3.4.1", "3.4.0-abcd");
+assertBinVersionComparesLower("3.4.0-abc", "3.4.1-xyz");
+
+// Prohibit versions that don't have at least two components (3 is no good, 3.2 is).
+assert.throws(MongoRunner.areBinVersionsTheSame, ["3", "3.2"]);
+assert.throws(MongoRunner.areBinVersionsTheSame, ["3.2", "3"]);
+
+// Throw an error when versions differ only by githash.
+assert.throws(MongoRunner.compareBinVersions, ["3.4.1-abc", "3.4.1-xyz"]);
}());
diff --git a/jstests/multiVersion/view_definition_feature_compatibility_version.js b/jstests/multiVersion/view_definition_feature_compatibility_version.js
index c8d9eb54d12..221dfd6d77d 100644
--- a/jstests/multiVersion/view_definition_feature_compatibility_version.js
+++ b/jstests/multiVersion/view_definition_feature_compatibility_version.js
@@ -9,14 +9,14 @@
*/
(function() {
- "use strict";
+"use strict";
- const testName = "view_definition_feature_compatibility_version_multiversion";
- const dbpath = MongoRunner.dataPath + testName;
+const testName = "view_definition_feature_compatibility_version_multiversion";
+const dbpath = MongoRunner.dataPath + testName;
- // In order to avoid restarting the server for each test case, we declare all the test cases up
- // front, and test them all at once.
- const pipelinesWithNewFeatures = [
+// In order to avoid restarting the server for each test case, we declare all the test cases up
+// front, and test them all at once.
+const pipelinesWithNewFeatures = [
[{$addFields: {x: {$round: 4.57}}}],
[{$addFields: {x: {$trunc: [4.57, 1]}}}],
[{$addFields: {x: {$regexFind: {input: "string", regex: /st/}}}}],
@@ -49,148 +49,147 @@
}],
];
- let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest"});
- assert.neq(null, conn, "mongod was unable to start up");
- let testDB = conn.getDB(testName);
+let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest"});
+assert.neq(null, conn, "mongod was unable to start up");
+let testDB = conn.getDB(testName);
+
+// Explicitly set feature compatibility version 4.2.
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
+
+// Test that we are able to create a new view with any of the new features.
+pipelinesWithNewFeatures.forEach(
+ (pipe, i) => assert.commandWorked(
+ testDB.createView("firstView" + i, "coll", pipe),
+ `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV 4.2`));
+
+// Test that we are able to create a new view with any of the new features.
+pipelinesWithNewFeatures.forEach(function(pipe, i) {
+ assert(testDB["firstView" + i].drop(), `Drop of view with pipeline ${tojson(pipe)} failed`);
+ assert.commandWorked(testDB.createView("firstView" + i, "coll", []));
+ assert.commandWorked(
+ testDB.runCommand({collMod: "firstView" + i, viewOn: "coll", pipeline: pipe}),
+ `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV 4.2`);
+});
+
+// Create an empty view which we will attempt to update to use 4.0 query features under
+// feature compatibility mode 4.0.
+assert.commandWorked(testDB.createView("emptyView", "coll", []));
+
+// Set the feature compatibility version to 4.0.
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+
+// Read against an existing view using 4.2 query features should not fail.
+pipelinesWithNewFeatures.forEach(
+ (pipe, i) => assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
+ `Failed to query view with pipeline ${tojson(pipe)}`));
+
+// Trying to create a new view using 4.2 query features should fail.
+pipelinesWithNewFeatures.forEach(
+ (pipe, i) => assert.commandFailedWithCode(
+ testDB.createView("view_fail" + i, "coll", pipe),
+ ErrorCodes.QueryFeatureNotAllowed,
+ `Expected *not* to be able to create view with pipeline ${tojson(pipe)} while in FCV 4.0`));
+
+// Trying to update existing view to use 4.2 query features should also fail.
+pipelinesWithNewFeatures.forEach(
+ (pipe, i) => assert.commandFailedWithCode(
+ testDB.runCommand({collMod: "emptyView", viewOn: "coll", pipeline: pipe}),
+ ErrorCodes.QueryFeatureNotAllowed,
+ `Expected *not* to be able to modify view to use pipeline ${tojson(pipe)} while in FCV
+ 4.0`));
- // Explicitly set feature compatibility version 4.2.
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
+MongoRunner.stopMongod(conn);
- // Test that we are able to create a new view with any of the new features.
- pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandWorked(
- testDB.createView("firstView" + i, "coll", pipe),
- `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV 4.2`));
+// Starting up a 4.0 mongod with 4.2 query features will succeed.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "4.0", noCleanData: true});
+assert.neq(null, conn, "mongod 4.0 was unable to start up");
+testDB = conn.getDB(testName);
- // Test that we are able to create a new view with any of the new features.
- pipelinesWithNewFeatures.forEach(function(pipe, i) {
- assert(testDB["firstView" + i].drop(), `Drop of view with pipeline ${tojson(pipe)} failed`);
- assert.commandWorked(testDB.createView("firstView" + i, "coll", []));
- assert.commandWorked(
- testDB.runCommand({collMod: "firstView" + i, viewOn: "coll", pipeline: pipe}),
- `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV 4.2`);
- });
-
- // Create an empty view which we will attempt to update to use 4.0 query features under
- // feature compatibility mode 4.0.
- assert.commandWorked(testDB.createView("emptyView", "coll", []));
-
- // Set the feature compatibility version to 4.0.
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
-
- // Read against an existing view using 4.2 query features should not fail.
- pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
- `Failed to query view with pipeline ${tojson(pipe)}`));
-
- // Trying to create a new view using 4.2 query features should fail.
- pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandFailedWithCode(
- testDB.createView("view_fail" + i, "coll", pipe),
- ErrorCodes.QueryFeatureNotAllowed,
- `Expected *not* to be able to create view with pipeline ${tojson(pipe)} while in FCV 4.0`));
-
- // Trying to update existing view to use 4.2 query features should also fail.
- pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandFailedWithCode(
- testDB.runCommand({collMod: "emptyView", viewOn: "coll", pipeline: pipe}),
- ErrorCodes.QueryFeatureNotAllowed,
- `Expected *not* to be able to modify view to use pipeline ${tojson(pipe)} while in FCV
- 4.0`));
+// Reads will fail against views with 4.2 query features when running a 4.0 binary.
+// Not checking the code returned on failure as it is not uniform across the various
+// 'pipeline' arguments tested.
+pipelinesWithNewFeatures.forEach(
+ (pipe, i) => assert.commandFailed(
+ testDB.runCommand({find: "firstView" + i}),
+ `Expected read against view with pipeline ${tojson(pipe)} to fail on 4.0 binary`));
+
+// Test that a read against a view that does not contain 4.2 query features succeeds.
+assert.commandWorked(testDB.runCommand({find: "emptyView"}));
- MongoRunner.stopMongod(conn);
-
- // Starting up a 4.0 mongod with 4.2 query features will succeed.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "4.0", noCleanData: true});
- assert.neq(null, conn, "mongod 4.0 was unable to start up");
- testDB = conn.getDB(testName);
-
- // Reads will fail against views with 4.2 query features when running a 4.0 binary.
- // Not checking the code returned on failure as it is not uniform across the various
- // 'pipeline' arguments tested.
- pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandFailed(
- testDB.runCommand({find: "firstView" + i}),
- `Expected read against view with pipeline ${tojson(pipe)} to fail on 4.0 binary`));
-
- // Test that a read against a view that does not contain 4.2 query features succeeds.
- assert.commandWorked(testDB.runCommand({find: "emptyView"}));
-
- MongoRunner.stopMongod(conn);
-
- // Starting up a 4.2 mongod should succeed, even though the feature compatibility version is
- // still set to 4.0.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
- assert.neq(null, conn, "mongod was unable to start up");
- testDB = conn.getDB(testName);
-
- // Read against an existing view using 4.2 query features should not fail.
- pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
- `Failed to query view with pipeline ${tojson(pipe)}`));
-
- // Set the feature compatibility version back to 4.2.
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
-
- pipelinesWithNewFeatures.forEach(function(pipe, i) {
- assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
- `Failed to query view with pipeline ${tojson(pipe)}`);
- // Test that we are able to create a new view with any of the new features.
- assert.commandWorked(
- testDB.createView("secondView" + i, "coll", pipe),
- `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV 4.2`);
-
- // Test that we are able to update an existing view to use any of the new features.
- assert(testDB["secondView" + i].drop(),
- `Drop of view with pipeline ${tojson(pipe)} failed`);
- assert.commandWorked(testDB.createView("secondView" + i, "coll", []));
- assert.commandWorked(
- testDB.runCommand({collMod: "secondView" + i, viewOn: "coll", pipeline: pipe}),
- `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV 4.2`);
- });
-
- // Set the feature compatibility version to 4.0 and then restart with
- // internalValidateFeaturesAsMaster=false.
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- binVersion: "latest",
- noCleanData: true,
- setParameter: "internalValidateFeaturesAsMaster=false"
- });
- assert.neq(null, conn, "mongod was unable to start up");
- testDB = conn.getDB(testName);
-
- pipelinesWithNewFeatures.forEach(function(pipe, i) {
- // Even though the feature compatibility version is 4.0, we should still be able to create a
- // view using 4.2 query features, because internalValidateFeaturesAsMaster is false.
- assert.commandWorked(
- testDB.createView("thirdView" + i, "coll", pipe),
- `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV 4.0 ` +
- `with internalValidateFeaturesAsMaster=false`);
-
- // We should also be able to modify a view to use 4.2 query features.
- assert(testDB["thirdView" + i].drop(), `Drop of view with pipeline ${tojson(pipe)} failed`);
- assert.commandWorked(testDB.createView("thirdView" + i, "coll", []));
- assert.commandWorked(
- testDB.runCommand({collMod: "thirdView" + i, viewOn: "coll", pipeline: pipe}),
- `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV 4.0 ` +
- `with internalValidateFeaturesAsMaster=false`);
- });
-
- MongoRunner.stopMongod(conn);
-
- // Starting up a 4.0 mongod with 4.2 query features.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "4.0", noCleanData: true});
- assert.neq(null, conn, "mongod 4.0 was unable to start up");
- testDB = conn.getDB(testName);
-
- // Existing views with 4.2 query features can be dropped.
- pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert(testDB["firstView" + i].drop(),
- `Drop of view with pipeline ${tojson(pipe)} failed`));
- assert(testDB.system.views.drop(), "Drop of system.views collection failed");
-
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
+
+// Starting up a 4.2 mongod should succeed, even though the feature compatibility version is
+// still set to 4.0.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
+assert.neq(null, conn, "mongod was unable to start up");
+testDB = conn.getDB(testName);
+
+// Read against an existing view using 4.2 query features should not fail.
+pipelinesWithNewFeatures.forEach(
+ (pipe, i) => assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
+ `Failed to query view with pipeline ${tojson(pipe)}`));
+
+// Set the feature compatibility version back to 4.2.
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
+
+pipelinesWithNewFeatures.forEach(function(pipe, i) {
+ assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
+ `Failed to query view with pipeline ${tojson(pipe)}`);
+ // Test that we are able to create a new view with any of the new features.
+ assert.commandWorked(
+ testDB.createView("secondView" + i, "coll", pipe),
+ `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV 4.2`);
+
+ // Test that we are able to update an existing view to use any of the new features.
+ assert(testDB["secondView" + i].drop(), `Drop of view with pipeline ${tojson(pipe)} failed`);
+ assert.commandWorked(testDB.createView("secondView" + i, "coll", []));
+ assert.commandWorked(
+ testDB.runCommand({collMod: "secondView" + i, viewOn: "coll", pipeline: pipe}),
+ `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV 4.2`);
+});
+
+// Set the feature compatibility version to 4.0 and then restart with
+// internalValidateFeaturesAsMaster=false.
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ binVersion: "latest",
+ noCleanData: true,
+ setParameter: "internalValidateFeaturesAsMaster=false"
+});
+assert.neq(null, conn, "mongod was unable to start up");
+testDB = conn.getDB(testName);
+
+pipelinesWithNewFeatures.forEach(function(pipe, i) {
+ // Even though the feature compatibility version is 4.0, we should still be able to create a
+ // view using 4.2 query features, because internalValidateFeaturesAsMaster is false.
+ assert.commandWorked(
+ testDB.createView("thirdView" + i, "coll", pipe),
+ `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV 4.0 ` +
+ `with internalValidateFeaturesAsMaster=false`);
+
+ // We should also be able to modify a view to use 4.2 query features.
+ assert(testDB["thirdView" + i].drop(), `Drop of view with pipeline ${tojson(pipe)} failed`);
+ assert.commandWorked(testDB.createView("thirdView" + i, "coll", []));
+ assert.commandWorked(
+ testDB.runCommand({collMod: "thirdView" + i, viewOn: "coll", pipeline: pipe}),
+ `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV 4.0 ` +
+ `with internalValidateFeaturesAsMaster=false`);
+});
+
+MongoRunner.stopMongod(conn);
+
+// Starting up a 4.0 mongod with 4.2 query features.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "4.0", noCleanData: true});
+assert.neq(null, conn, "mongod 4.0 was unable to start up");
+testDB = conn.getDB(testName);
+
+// Existing views with 4.2 query features can be dropped.
+pipelinesWithNewFeatures.forEach((pipe, i) =>
+ assert(testDB["firstView" + i].drop(),
+ `Drop of view with pipeline ${tojson(pipe)} failed`));
+assert(testDB.system.views.drop(), "Drop of system.views collection failed");
+
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/multiVersion/wildcard_index_feature_compatability_version.js b/jstests/multiVersion/wildcard_index_feature_compatability_version.js
index bb4516d54c2..4d0698b862e 100644
--- a/jstests/multiVersion/wildcard_index_feature_compatability_version.js
+++ b/jstests/multiVersion/wildcard_index_feature_compatability_version.js
@@ -8,150 +8,154 @@
* - A downgraded 4.0 node with a $** index fails to start due to Fatal Assertion 28782.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For isCollscan.
- load("jstests/multiVersion/libs/multi_rs.js"); // For upgradeSet.
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+load("jstests/libs/analyze_plan.js"); // For isCollscan.
+load("jstests/multiVersion/libs/multi_rs.js"); // For upgradeSet.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- TestData.skipCheckDBHashes = true; // Skip db hashes when restarting the replset.
+TestData.skipCheckDBHashes = true; // Skip db hashes when restarting the replset.
- const nodeOptions40 = {binVersion: "last-stable"};
- const nodeOptions42 = {binVersion: "latest"};
+const nodeOptions40 = {
+ binVersion: "last-stable"
+};
+const nodeOptions42 = {
+ binVersion: "latest"
+};
- // Set up a new replSet consisting of 3 nodes, initially running on 4.0 binaries.
- const rst = new ReplSetTest({nodes: 3, nodeOptions: nodeOptions40});
+// Set up a new replSet consisting of 3 nodes, initially running on 4.0 binaries.
+const rst = new ReplSetTest({nodes: 3, nodeOptions: nodeOptions40});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
-
- rst.initiate();
-
- let testDB = rst.getPrimary().getDB(jsTestName());
- let coll = testDB.wildcard_index_fcv;
- coll.drop();
-
- // Verifies that the instance is running with the specified binary version and FCV.
- function assertVersionAndFCV(db, versions, fcv) {
- const majorMinorVersion = db.version().substring(0, 3);
- versions = (Array.isArray(versions) ? versions : [versions]);
- assert(versions.includes(majorMinorVersion));
- assert.eq(
- assert.commandWorked(db.adminCommand({getParameter: 1, featureCompatibilityVersion: 1}))
- .featureCompatibilityVersion.version,
- fcv);
- }
-
- // Restarts the given replset nodes, or the entire replset if no nodes are specified.
- function restartReplSetNodes(replSet, nodes, options) {
- const defaultOpts = {remember: true, appendOptions: true, startClean: false};
- options = Object.assign(defaultOpts, (options || {}));
- nodes = (nodes || replSet.nodes);
- assert(Array.isArray(nodes));
- for (let node of nodes) {
- // Merge the new options into the existing options for the given nodes.
- Object.assign(replSet.nodeOptions[`n${replSet.getNodeId(node)}`], options);
- }
- replSet.restart(nodes, options);
- }
-
- // Verify that the replset is on binary version 4.0 and FCV 4.0.
- assertVersionAndFCV(testDB, "4.0", "4.0");
-
- jsTestLog("Cannot create a $** index on a replset running binary 4.0.");
- assert.commandFailedWithCode(coll.createIndex({"$**": 1}), ErrorCodes.CannotCreateIndex);
-
- // Upgrade the set to the new binary version, but keep the feature compatibility version at 4.0.
- rst.upgradeSet(nodeOptions42);
- testDB = rst.getPrimary().getDB(jsTestName());
- coll = testDB.wildcard_index_fcv;
- assertVersionAndFCV(testDB, ["4.1", "4.2"], "4.0");
-
- jsTestLog("Cannot create a $** index on binary 4.2 with FCV 4.0.");
- assert.commandFailedWithCode(coll.createIndex({"$**": 1}), ErrorCodes.CannotCreateIndex);
-
- jsTestLog("Can create a $** index on binary 4.2 with FCV 4.2.");
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
- assert.commandWorked(coll.createIndex({"$**": 1}));
- assert.commandWorked(coll.insert([{a: 1, b: 1}, {a: 2, b: 2}]));
- rst.awaitReplication();
-
- // Confirm that the index can be used to answer queries.
- let explainOutput = assert.commandWorked(coll.find({a: {$gt: 1}}).explain()).queryPlanner;
- assert(!isCollscan(testDB, explainOutput.winningPlan), () => tojson(explainOutput));
-
- jsTestLog("Can use an existing $** after downgrading FCV to 4.0.");
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
- explainOutput = assert.commandWorked(coll.find({b: {$gt: 1}}).explain()).queryPlanner;
- assert(!isCollscan(testDB, explainOutput.winningPlan), () => tojson(explainOutput));
-
- jsTestLog("Cannot create a new $** after downgrading FCV to 4.0.");
- let coll_other = testDB.wildcard_index_fcv_other;
- assert.commandFailedWithCode(coll_other.createIndex({"$**": 1}), ErrorCodes.CannotCreateIndex);
-
- jsTestLog("Can restart the replset in FCV 4.0 with a $** index present.");
- restartReplSetNodes(rst);
- testDB = rst.getPrimary().getDB(jsTestName());
- coll = testDB.wildcard_index_fcv;
- assertVersionAndFCV(testDB, ["4.1", "4.2"], "4.0");
-
- // Verify that we can still successfully run queries on the $** index.
- explainOutput = assert.commandWorked(coll.find({a: {$gt: 1}}).explain()).queryPlanner;
- assert(!isCollscan(testDB, explainOutput.winningPlan), () => tojson(explainOutput));
-
- jsTestLog("Can restart the Secondaries in FCV 4.0 and resync the $** index from the Primary.");
- restartReplSetNodes(rst, rst.getSecondaries(), {startClean: true});
- rst.awaitSecondaryNodes();
- rst.awaitReplication();
- // Verify that the Secondaries have both recreated the $** index.
- let secondaries = rst.getSecondaries();
- assert.eq(secondaries.length, 2);
- for (let sec of secondaries) {
- assert.eq(sec.getCollection(coll.getFullName())
- .aggregate([{$indexStats: {}}, {$match: {"key.$**": 1}}])
- .toArray()
- .length,
- 1);
- }
-
- jsTestLog("Can drop an existing $** index in FCV 4.0.");
- assert.commandWorked(coll.dropIndex({"$**": 1}));
-
- jsTestLog("Cannot recreate the dropped $** index in FCV 4.0.");
- assert.commandFailedWithCode(coll.createIndex({"$**": 1}), ErrorCodes.CannotCreateIndex);
-
- // Set the FCV to 4.2 and re-create the $** index. We need to test that a 4.0 binary fails to
- // start when a wildcard index that was built on 4.2 is still present in the catalog.
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
- assert.commandWorked(coll.createIndex({"$**": 1}));
-
- jsTestLog("Cannot start 4.0 binary with $** index present.");
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
- assertVersionAndFCV(testDB, ["4.1", "4.2"], "4.0");
- secondaries = rst.getSecondaries();
- assert.eq(secondaries.length, 2);
- rst.awaitReplication();
- try {
- restartReplSetNodes(rst, [secondaries[0]], nodeOptions40);
- assert(false, "Expected 'restartReplSetNodes' to throw");
- } catch (err) {
- assert.eq(err.message, `Failed to start node ${rst.getNodeId(secondaries[0])}`);
- // In most cases we expect the node to fail with 28782 because it sees the wildcard index in
- // its catalog on startup and doesn't recognize the format. However in some cases the node
- // will start up having not completely persisted the index build before shutting down. In
- // these cases the node will attempt to re-build the index on startup and encounter a
- // different error (40590) upon trying to rebuild the wildcard index.
- assert(rawMongoProgramOutput().match("Fatal Assertion 28782") ||
- rawMongoProgramOutput().match("Fatal Assertion 40590"));
- }
-
- jsTestLog("Restart the failed node on binary 4.2 and gracefully shut down the replset.");
- Object.assign(rst.nodeOptions[`n${rst.getNodeId(secondaries[0])}`], nodeOptions42);
- rst.start(secondaries[0], nodeOptions42);
-
- rst.awaitReplication();
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
rst.stopSet();
+ return;
+}
+
+rst.initiate();
+
+let testDB = rst.getPrimary().getDB(jsTestName());
+let coll = testDB.wildcard_index_fcv;
+coll.drop();
+
+// Verifies that the instance is running with the specified binary version and FCV.
+function assertVersionAndFCV(db, versions, fcv) {
+ const majorMinorVersion = db.version().substring(0, 3);
+ versions = (Array.isArray(versions) ? versions : [versions]);
+ assert(versions.includes(majorMinorVersion));
+ assert.eq(
+ assert.commandWorked(db.adminCommand({getParameter: 1, featureCompatibilityVersion: 1}))
+ .featureCompatibilityVersion.version,
+ fcv);
+}
+
+// Restarts the given replset nodes, or the entire replset if no nodes are specified.
+function restartReplSetNodes(replSet, nodes, options) {
+ const defaultOpts = {remember: true, appendOptions: true, startClean: false};
+ options = Object.assign(defaultOpts, (options || {}));
+ nodes = (nodes || replSet.nodes);
+ assert(Array.isArray(nodes));
+ for (let node of nodes) {
+ // Merge the new options into the existing options for the given nodes.
+ Object.assign(replSet.nodeOptions[`n${replSet.getNodeId(node)}`], options);
+ }
+ replSet.restart(nodes, options);
+}
+
+// Verify that the replset is on binary version 4.0 and FCV 4.0.
+assertVersionAndFCV(testDB, "4.0", "4.0");
+
+jsTestLog("Cannot create a $** index on a replset running binary 4.0.");
+assert.commandFailedWithCode(coll.createIndex({"$**": 1}), ErrorCodes.CannotCreateIndex);
+
+// Upgrade the set to the new binary version, but keep the feature compatibility version at 4.0.
+rst.upgradeSet(nodeOptions42);
+testDB = rst.getPrimary().getDB(jsTestName());
+coll = testDB.wildcard_index_fcv;
+assertVersionAndFCV(testDB, ["4.1", "4.2"], "4.0");
+
+jsTestLog("Cannot create a $** index on binary 4.2 with FCV 4.0.");
+assert.commandFailedWithCode(coll.createIndex({"$**": 1}), ErrorCodes.CannotCreateIndex);
+
+jsTestLog("Can create a $** index on binary 4.2 with FCV 4.2.");
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
+assert.commandWorked(coll.createIndex({"$**": 1}));
+assert.commandWorked(coll.insert([{a: 1, b: 1}, {a: 2, b: 2}]));
+rst.awaitReplication();
+
+// Confirm that the index can be used to answer queries.
+let explainOutput = assert.commandWorked(coll.find({a: {$gt: 1}}).explain()).queryPlanner;
+assert(!isCollscan(testDB, explainOutput.winningPlan), () => tojson(explainOutput));
+
+jsTestLog("Can use an existing $** after downgrading FCV to 4.0.");
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+explainOutput = assert.commandWorked(coll.find({b: {$gt: 1}}).explain()).queryPlanner;
+assert(!isCollscan(testDB, explainOutput.winningPlan), () => tojson(explainOutput));
+
+jsTestLog("Cannot create a new $** after downgrading FCV to 4.0.");
+let coll_other = testDB.wildcard_index_fcv_other;
+assert.commandFailedWithCode(coll_other.createIndex({"$**": 1}), ErrorCodes.CannotCreateIndex);
+
+jsTestLog("Can restart the replset in FCV 4.0 with a $** index present.");
+restartReplSetNodes(rst);
+testDB = rst.getPrimary().getDB(jsTestName());
+coll = testDB.wildcard_index_fcv;
+assertVersionAndFCV(testDB, ["4.1", "4.2"], "4.0");
+
+// Verify that we can still successfully run queries on the $** index.
+explainOutput = assert.commandWorked(coll.find({a: {$gt: 1}}).explain()).queryPlanner;
+assert(!isCollscan(testDB, explainOutput.winningPlan), () => tojson(explainOutput));
+
+jsTestLog("Can restart the Secondaries in FCV 4.0 and resync the $** index from the Primary.");
+restartReplSetNodes(rst, rst.getSecondaries(), {startClean: true});
+rst.awaitSecondaryNodes();
+rst.awaitReplication();
+// Verify that the Secondaries have both recreated the $** index.
+let secondaries = rst.getSecondaries();
+assert.eq(secondaries.length, 2);
+for (let sec of secondaries) {
+ assert.eq(sec.getCollection(coll.getFullName())
+ .aggregate([{$indexStats: {}}, {$match: {"key.$**": 1}}])
+ .toArray()
+ .length,
+ 1);
+}
+
+jsTestLog("Can drop an existing $** index in FCV 4.0.");
+assert.commandWorked(coll.dropIndex({"$**": 1}));
+
+jsTestLog("Cannot recreate the dropped $** index in FCV 4.0.");
+assert.commandFailedWithCode(coll.createIndex({"$**": 1}), ErrorCodes.CannotCreateIndex);
+
+// Set the FCV to 4.2 and re-create the $** index. We need to test that a 4.0 binary fails to
+// start when a wildcard index that was built on 4.2 is still present in the catalog.
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
+assert.commandWorked(coll.createIndex({"$**": 1}));
+
+jsTestLog("Cannot start 4.0 binary with $** index present.");
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+assertVersionAndFCV(testDB, ["4.1", "4.2"], "4.0");
+secondaries = rst.getSecondaries();
+assert.eq(secondaries.length, 2);
+rst.awaitReplication();
+try {
+ restartReplSetNodes(rst, [secondaries[0]], nodeOptions40);
+ assert(false, "Expected 'restartReplSetNodes' to throw");
+} catch (err) {
+ assert.eq(err.message, `Failed to start node ${rst.getNodeId(secondaries[0])}`);
+ // In most cases we expect the node to fail with 28782 because it sees the wildcard index in
+ // its catalog on startup and doesn't recognize the format. However in some cases the node
+ // will start up having not completely persisted the index build before shutting down. In
+ // these cases the node will attempt to re-build the index on startup and encounter a
+ // different error (40590) upon trying to rebuild the wildcard index.
+ assert(rawMongoProgramOutput().match("Fatal Assertion 28782") ||
+ rawMongoProgramOutput().match("Fatal Assertion 40590"));
+}
+
+jsTestLog("Restart the failed node on binary 4.2 and gracefully shut down the replset.");
+Object.assign(rst.nodeOptions[`n${rst.getNodeId(secondaries[0])}`], nodeOptions42);
+rst.start(secondaries[0], nodeOptions42);
+
+rst.awaitReplication();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/abandon_snapshot_for_each_collection_from_db.js b/jstests/noPassthrough/abandon_snapshot_for_each_collection_from_db.js
index 432ee93eaa7..55aeff8b8bf 100644
--- a/jstests/noPassthrough/abandon_snapshot_for_each_collection_from_db.js
+++ b/jstests/noPassthrough/abandon_snapshot_for_each_collection_from_db.js
@@ -9,40 +9,39 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
- const db = rst.getPrimary().getDB(dbName);
- assert.commandWorked(db.createCollection(collName));
+const db = rst.getPrimary().getDB(dbName);
+assert.commandWorked(db.createCollection(collName));
- const failpoint = 'hangBeforeGettingNextCollection';
+const failpoint = 'hangBeforeGettingNextCollection';
- // Hang 'forEachCollectionFromDb' after iterating through the first collection.
- assert.commandWorked(db.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
+// Hang 'forEachCollectionFromDb' after iterating through the first collection.
+assert.commandWorked(db.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
- TestData.failpoint = failpoint;
- const awaitCreateCollections = startParallelShell(() => {
- // The 'forEachCollectionFromDb' helper doesn't iterate in collection name order, so we need
- // to insert multiple collections to have at least one next collection when the
- // CollectionCatalog iterator is incremented.
- for (let i = 0; i < 25; i++) {
- const collName = "a".repeat(i + 1);
- assert.commandWorked(db.createCollection(collName));
- }
+TestData.failpoint = failpoint;
+const awaitCreateCollections = startParallelShell(() => {
+ // The 'forEachCollectionFromDb' helper doesn't iterate in collection name order, so we need
+ // to insert multiple collections to have at least one next collection when the
+ // CollectionCatalog iterator is incremented.
+ for (let i = 0; i < 25; i++) {
+ const collName = "a".repeat(i + 1);
+ assert.commandWorked(db.createCollection(collName));
+ }
- // Let 'forEachCollectionFromDb' iterate to the next collection.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: TestData.failpoint, mode: "off"}));
- }, rst.getPrimary().port);
+ // Let 'forEachCollectionFromDb' iterate to the next collection.
+ assert.commandWorked(db.adminCommand({configureFailPoint: TestData.failpoint, mode: "off"}));
+}, rst.getPrimary().port);
- assert.commandWorked(db.stats());
- awaitCreateCollections();
+assert.commandWorked(db.stats());
+awaitCreateCollections();
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/absent_ns_field_in_index_specs.js b/jstests/noPassthrough/absent_ns_field_in_index_specs.js
index 07477fdd1e4..4428415e1b3 100644
--- a/jstests/noPassthrough/absent_ns_field_in_index_specs.js
+++ b/jstests/noPassthrough/absent_ns_field_in_index_specs.js
@@ -9,67 +9,70 @@
* @tags: [requires_replication, requires_persistence]
*/
(function() {
- 'use strict';
+'use strict';
- const dbName = 'test';
- const collName = 'absent_ns';
+const dbName = 'test';
+const collName = 'absent_ns';
- let replSet = new ReplSetTest({name: 'absentNsField', nodes: 2});
- replSet.startSet();
- replSet.initiate();
+let replSet = new ReplSetTest({name: 'absentNsField', nodes: 2});
+replSet.startSet();
+replSet.initiate();
- const primary = replSet.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const primaryColl = primaryDB.getCollection(collName);
+const primary = replSet.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const primaryColl = primaryDB.getCollection(collName);
- const secondary = replSet.getSecondary();
- const secondaryDB = secondary.getDB(dbName);
+const secondary = replSet.getSecondary();
+const secondaryDB = secondary.getDB(dbName);
- // The primary will not generate the 'ns' field for index specs, but the secondary will.
- assert.commandWorked(primary.getDB('admin').runCommand(
- {setParameter: 1, disableIndexSpecNamespaceGeneration: 1}));
+// The primary will not generate the 'ns' field for index specs, but the secondary will.
+assert.commandWorked(
+ primary.getDB('admin').runCommand({setParameter: 1, disableIndexSpecNamespaceGeneration: 1}));
- assert.commandWorked(primaryColl.insert({x: 100}));
- assert.commandWorked(primaryColl.createIndex({x: 1}));
+assert.commandWorked(primaryColl.insert({x: 100}));
+assert.commandWorked(primaryColl.createIndex({x: 1}));
- replSet.awaitReplication();
+replSet.awaitReplication();
- let specPrimary =
- assert.commandWorked(primaryDB.runCommand({listIndexes: collName})).cursor.firstBatch[1];
- let specSecondary =
- assert.commandWorked(secondaryDB.runCommand({listIndexes: collName})).cursor.firstBatch[1];
+let specPrimary =
+ assert.commandWorked(primaryDB.runCommand({listIndexes: collName})).cursor.firstBatch[1];
+let specSecondary =
+ assert.commandWorked(secondaryDB.runCommand({listIndexes: collName})).cursor.firstBatch[1];
- assert.eq(false, specPrimary.hasOwnProperty('ns'));
- assert.eq(true, specSecondary.hasOwnProperty('ns'));
- assert.eq(dbName + '.' + collName, specSecondary.ns);
+assert.eq(false, specPrimary.hasOwnProperty('ns'));
+assert.eq(true, specSecondary.hasOwnProperty('ns'));
+assert.eq(dbName + '.' + collName, specSecondary.ns);
- replSet.stopSet(/*signal=*/null, /*forRestart=*/true);
+replSet.stopSet(/*signal=*/null, /*forRestart=*/true);
- // The primaries index spec has no 'ns' field and the secondaries index spec does have the 'ns'
- // field. Restart the nodes as standalone and ensure that the primaries index spec gets updated
- // with the 'ns' field. No changes should be necessary to the secondaries index spec, but
- // verify that it still has the 'ns' field.
- const options = {dbpath: primary.dbpath, noCleanData: true};
- let conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
+// The primaries index spec has no 'ns' field and the secondaries index spec does have the 'ns'
+// field. Restart the nodes as standalone and ensure that the primaries index spec gets updated
+// with the 'ns' field. No changes should be necessary to the secondaries index spec, but
+// verify that it still has the 'ns' field.
+const options = {
+ dbpath: primary.dbpath,
+ noCleanData: true
+};
+let conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
- let db = conn.getDB(dbName);
- let spec = assert.commandWorked(db.runCommand({listIndexes: collName})).cursor.firstBatch[1];
+let db = conn.getDB(dbName);
+let spec = assert.commandWorked(db.runCommand({listIndexes: collName})).cursor.firstBatch[1];
- assert.eq(true, spec.hasOwnProperty('ns'));
- assert.eq(dbName + '.' + collName, spec.ns);
+assert.eq(true, spec.hasOwnProperty('ns'));
+assert.eq(dbName + '.' + collName, spec.ns);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
- options.dbpath = secondary.dbpath;
- conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
+options.dbpath = secondary.dbpath;
+conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
- db = conn.getDB(dbName);
- spec = assert.commandWorked(db.runCommand({listIndexes: collName})).cursor.firstBatch[1];
+db = conn.getDB(dbName);
+spec = assert.commandWorked(db.runCommand({listIndexes: collName})).cursor.firstBatch[1];
- assert.eq(true, spec.hasOwnProperty('ns'));
- assert.eq(dbName + '.' + collName, spec.ns);
+assert.eq(true, spec.hasOwnProperty('ns'));
+assert.eq(dbName + '.' + collName, spec.ns);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/afterClusterTime_committed_reads.js b/jstests/noPassthrough/afterClusterTime_committed_reads.js
index 5c488ca69f4..5212c59f6f5 100644
--- a/jstests/noPassthrough/afterClusterTime_committed_reads.js
+++ b/jstests/noPassthrough/afterClusterTime_committed_reads.js
@@ -2,73 +2,72 @@
// majority commit point to move past 'afterClusterTime' before they can commit.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries.
+load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries.
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
- const session =
- rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- const primaryDB = session.getDatabase(dbName);
+const session = rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
+const primaryDB = session.getDatabase(dbName);
- let txnNumber = 0;
+let txnNumber = 0;
- function testReadConcernLevel(level) {
- // Stop replication.
- stopReplicationOnSecondaries(rst);
+function testReadConcernLevel(level) {
+ // Stop replication.
+ stopReplicationOnSecondaries(rst);
- // Perform a write and get its op time.
- const res = assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{}]}));
- assert(res.hasOwnProperty("opTime"), tojson(res));
- assert(res.opTime.hasOwnProperty("ts"), tojson(res));
- const clusterTime = res.opTime.ts;
+ // Perform a write and get its op time.
+ const res = assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{}]}));
+ assert(res.hasOwnProperty("opTime"), tojson(res));
+ assert(res.opTime.hasOwnProperty("ts"), tojson(res));
+ const clusterTime = res.opTime.ts;
- // A majority-committed read-only transaction on the primary after the new cluster time
- // should time out at commit time waiting for the cluster time to be majority committed.
- assert.commandWorked(primaryDB.runCommand({
- find: collName,
- txnNumber: NumberLong(++txnNumber),
- startTransaction: true,
- autocommit: false,
- readConcern: {level: level, afterClusterTime: clusterTime}
- }));
- assert.commandFailedWithCode(primaryDB.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: "majority"},
- maxTimeMS: 1000
- }),
- ErrorCodes.MaxTimeMSExpired);
+ // A majority-committed read-only transaction on the primary after the new cluster time
+ // should time out at commit time waiting for the cluster time to be majority committed.
+ assert.commandWorked(primaryDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(++txnNumber),
+ startTransaction: true,
+ autocommit: false,
+ readConcern: {level: level, afterClusterTime: clusterTime}
+ }));
+ assert.commandFailedWithCode(primaryDB.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: "majority"},
+ maxTimeMS: 1000
+ }),
+ ErrorCodes.MaxTimeMSExpired);
- // Restart replication.
- restartReplicationOnSecondaries(rst);
+ // Restart replication.
+ restartReplicationOnSecondaries(rst);
- // A majority-committed read-only transaction on the primary after the new cluster time now
- // succeeds.
- assert.commandWorked(primaryDB.runCommand({
- find: collName,
- txnNumber: NumberLong(++txnNumber),
- startTransaction: true,
- autocommit: false,
- readConcern: {level: level, afterClusterTime: clusterTime}
- }));
- assert.commandWorked(primaryDB.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
- }
+ // A majority-committed read-only transaction on the primary after the new cluster time now
+ // succeeds.
+ assert.commandWorked(primaryDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(++txnNumber),
+ startTransaction: true,
+ autocommit: false,
+ readConcern: {level: level, afterClusterTime: clusterTime}
+ }));
+ assert.commandWorked(primaryDB.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+ }));
+}
- testReadConcernLevel("majority");
- testReadConcernLevel("snapshot");
+testReadConcernLevel("majority");
+testReadConcernLevel("snapshot");
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/after_cluster_time.js b/jstests/noPassthrough/after_cluster_time.js
index 1137e8495f2..78485d10a31 100644
--- a/jstests/noPassthrough/after_cluster_time.js
+++ b/jstests/noPassthrough/after_cluster_time.js
@@ -1,71 +1,71 @@
// This test verifies readConcern:afterClusterTime behavior on a standalone mongod.
// @tags: [requires_replication, requires_majority_read_concern]
(function() {
- "use strict";
- var standalone =
- MongoRunner.runMongod({enableMajorityReadConcern: "", storageEngine: "wiredTiger"});
+"use strict";
+var standalone =
+ MongoRunner.runMongod({enableMajorityReadConcern: "", storageEngine: "wiredTiger"});
- var testDB = standalone.getDB("test");
+var testDB = standalone.getDB("test");
- assert.commandWorked(testDB.runCommand({insert: "after_cluster_time", documents: [{x: 1}]}));
+assert.commandWorked(testDB.runCommand({insert: "after_cluster_time", documents: [{x: 1}]}));
- // Majority reads without afterClusterTime succeed.
- assert.commandWorked(
- testDB.runCommand({find: "after_cluster_time", readConcern: {level: "majority"}}),
- "expected majority read without afterClusterTime to succeed on standalone mongod");
+// Majority reads without afterClusterTime succeed.
+assert.commandWorked(
+ testDB.runCommand({find: "after_cluster_time", readConcern: {level: "majority"}}),
+ "expected majority read without afterClusterTime to succeed on standalone mongod");
- // afterClusterTime reads without a level fail.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {find: "after_cluster_time", readConcern: {afterClusterTime: Timestamp(0, 0)}}),
- ErrorCodes.InvalidOptions,
- "expected non-majority afterClusterTime read to fail on standalone mongod");
+// afterClusterTime reads without a level fail.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {find: "after_cluster_time", readConcern: {afterClusterTime: Timestamp(0, 0)}}),
+ ErrorCodes.InvalidOptions,
+ "expected non-majority afterClusterTime read to fail on standalone mongod");
- // afterClusterTime reads with null timestamps are rejected.
- assert.commandFailedWithCode(
- testDB.runCommand({
- find: "after_cluster_time",
- readConcern: {level: "majority", afterClusterTime: Timestamp(0, 0)}
- }),
- ErrorCodes.InvalidOptions,
- "expected afterClusterTime read with null timestamp to fail on standalone mongod");
-
- // Standalones don't support any operations with clusterTime.
- assert.commandFailedWithCode(testDB.runCommand({
+// afterClusterTime reads with null timestamps are rejected.
+assert.commandFailedWithCode(
+ testDB.runCommand({
find: "after_cluster_time",
- readConcern: {level: "majority", afterClusterTime: Timestamp(0, 1)}
+ readConcern: {level: "majority", afterClusterTime: Timestamp(0, 0)}
}),
- ErrorCodes.IllegalOperation,
- "expected afterClusterTime read to fail on standalone mongod");
- MongoRunner.stopMongod(standalone);
+ ErrorCodes.InvalidOptions,
+ "expected afterClusterTime read with null timestamp to fail on standalone mongod");
+
+// Standalones don't support any operations with clusterTime.
+assert.commandFailedWithCode(testDB.runCommand({
+ find: "after_cluster_time",
+ readConcern: {level: "majority", afterClusterTime: Timestamp(0, 1)}
+}),
+ ErrorCodes.IllegalOperation,
+ "expected afterClusterTime read to fail on standalone mongod");
+MongoRunner.stopMongod(standalone);
- var rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- var adminDBRS = rst.getPrimary().getDB("admin");
+var rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+var adminDBRS = rst.getPrimary().getDB("admin");
- var res = adminDBRS.runCommand({ping: 1});
- assert.commandWorked(res);
- assert(res.hasOwnProperty("$clusterTime"), tojson(res));
- assert(res.$clusterTime.hasOwnProperty("clusterTime"), tojson(res));
- var clusterTime = res.$clusterTime.clusterTime;
- // afterClusterTime is not allowed in ping command.
- assert.commandFailedWithCode(
- adminDBRS.runCommand({ping: 1, readConcern: {afterClusterTime: clusterTime}}),
- ErrorCodes.InvalidOptions,
- "expected afterClusterTime fail in ping");
+var res = adminDBRS.runCommand({ping: 1});
+assert.commandWorked(res);
+assert(res.hasOwnProperty("$clusterTime"), tojson(res));
+assert(res.$clusterTime.hasOwnProperty("clusterTime"), tojson(res));
+var clusterTime = res.$clusterTime.clusterTime;
+// afterClusterTime is not allowed in ping command.
+assert.commandFailedWithCode(
+ adminDBRS.runCommand({ping: 1, readConcern: {afterClusterTime: clusterTime}}),
+ ErrorCodes.InvalidOptions,
+ "expected afterClusterTime fail in ping");
- // afterClusterTime is not allowed in serverStatus command.
- assert.commandFailedWithCode(
- adminDBRS.runCommand({serverStatus: 1, readConcern: {afterClusterTime: clusterTime}}),
- ErrorCodes.InvalidOptions,
- "expected afterClusterTime fail in serverStatus");
+// afterClusterTime is not allowed in serverStatus command.
+assert.commandFailedWithCode(
+ adminDBRS.runCommand({serverStatus: 1, readConcern: {afterClusterTime: clusterTime}}),
+ ErrorCodes.InvalidOptions,
+ "expected afterClusterTime fail in serverStatus");
- // afterClusterTime is not allowed in currentOp command.
- assert.commandFailedWithCode(
- adminDBRS.runCommand({currentOp: 1, readConcern: {afterClusterTime: clusterTime}}),
- ErrorCodes.InvalidOptions,
- "expected afterClusterTime fail in serverStatus");
+// afterClusterTime is not allowed in currentOp command.
+assert.commandFailedWithCode(
+ adminDBRS.runCommand({currentOp: 1, readConcern: {afterClusterTime: clusterTime}}),
+ ErrorCodes.InvalidOptions,
+ "expected afterClusterTime fail in serverStatus");
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/agg_explain_read_concern.js b/jstests/noPassthrough/agg_explain_read_concern.js
index e3f0d7b8d94..9d386973450 100644
--- a/jstests/noPassthrough/agg_explain_read_concern.js
+++ b/jstests/noPassthrough/agg_explain_read_concern.js
@@ -3,69 +3,69 @@
* @tags: [requires_majority_read_concern]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- const rst = new ReplSetTest(
- {name: "aggExplainReadConcernSet", nodes: 1, nodeOptions: {enableMajorityReadConcern: ""}});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest(
+ {name: "aggExplainReadConcernSet", nodes: 1, nodeOptions: {enableMajorityReadConcern: ""}});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const session = primary.getDB("test").getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase("test");
- const coll = sessionDB.agg_explain_read_concern;
+const primary = rst.getPrimary();
+const session = primary.getDB("test").getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase("test");
+const coll = sessionDB.agg_explain_read_concern;
- // Test that explain is legal with readConcern "local".
- assert.commandWorked(coll.explain().aggregate([], {readConcern: {level: "local"}}));
- assert.commandWorked(sessionDB.runCommand(
- {aggregate: coll.getName(), pipeline: [], explain: true, readConcern: {level: "local"}}));
- assert.commandWorked(sessionDB.runCommand({
- explain: {aggregate: coll.getName(), pipeline: [], cursor: {}},
- readConcern: {level: "local"}
- }));
+// Test that explain is legal with readConcern "local".
+assert.commandWorked(coll.explain().aggregate([], {readConcern: {level: "local"}}));
+assert.commandWorked(sessionDB.runCommand(
+ {aggregate: coll.getName(), pipeline: [], explain: true, readConcern: {level: "local"}}));
+assert.commandWorked(sessionDB.runCommand({
+ explain: {aggregate: coll.getName(), pipeline: [], cursor: {}},
+ readConcern: {level: "local"}
+}));
- // Test that explain is illegal with other readConcern levels.
- const nonLocalReadConcerns = ["majority", "available", "linearizable"];
- nonLocalReadConcerns.forEach(function(readConcernLevel) {
- let aggCmd = {
- aggregate: coll.getName(),
- pipeline: [],
- explain: true,
- readConcern: {level: readConcernLevel}
- };
- let explainCmd = {
- explain: {aggregate: coll.getName(), pipeline: [], cursor: {}},
- readConcern: {level: readConcernLevel}
- };
+// Test that explain is illegal with other readConcern levels.
+const nonLocalReadConcerns = ["majority", "available", "linearizable"];
+nonLocalReadConcerns.forEach(function(readConcernLevel) {
+ let aggCmd = {
+ aggregate: coll.getName(),
+ pipeline: [],
+ explain: true,
+ readConcern: {level: readConcernLevel}
+ };
+ let explainCmd = {
+ explain: {aggregate: coll.getName(), pipeline: [], cursor: {}},
+ readConcern: {level: readConcernLevel}
+ };
- assert.throws(() => coll.explain().aggregate([], {readConcern: {level: readConcernLevel}}));
+ assert.throws(() => coll.explain().aggregate([], {readConcern: {level: readConcernLevel}}));
- let cmdRes = sessionDB.runCommand(aggCmd);
- assert.commandFailedWithCode(cmdRes, ErrorCodes.InvalidOptions, tojson(cmdRes));
- let expectedErrStr = "aggregate command cannot run with a readConcern other than 'local'";
- assert.neq(cmdRes.errmsg.indexOf(expectedErrStr), -1, tojson(cmdRes));
+ let cmdRes = sessionDB.runCommand(aggCmd);
+ assert.commandFailedWithCode(cmdRes, ErrorCodes.InvalidOptions, tojson(cmdRes));
+ let expectedErrStr = "aggregate command cannot run with a readConcern other than 'local'";
+ assert.neq(cmdRes.errmsg.indexOf(expectedErrStr), -1, tojson(cmdRes));
- cmdRes = sessionDB.runCommand(explainCmd);
- assert.commandFailedWithCode(cmdRes, ErrorCodes.InvalidOptions, tojson(cmdRes));
- expectedErrStr = "Command does not support read concern";
- assert.neq(cmdRes.errmsg.indexOf(expectedErrStr), -1, tojson(cmdRes));
- });
+ cmdRes = sessionDB.runCommand(explainCmd);
+ assert.commandFailedWithCode(cmdRes, ErrorCodes.InvalidOptions, tojson(cmdRes));
+ expectedErrStr = "Command does not support read concern";
+ assert.neq(cmdRes.errmsg.indexOf(expectedErrStr), -1, tojson(cmdRes));
+});
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/aggregation_cursor_invalidations.js b/jstests/noPassthrough/aggregation_cursor_invalidations.js
index 387fbe56952..7192e9595bc 100644
--- a/jstests/noPassthrough/aggregation_cursor_invalidations.js
+++ b/jstests/noPassthrough/aggregation_cursor_invalidations.js
@@ -10,114 +10,115 @@
* @tags: [do_not_wrap_aggregations_in_facets, requires_capped]
*/
(function() {
- 'use strict';
-
- // This test runs a getMore in a parallel shell, which will not inherit the implicit session of
- // the cursor establishing command.
- TestData.disableImplicitSessions = true;
-
- // The DocumentSourceCursor which wraps PlanExecutors will batch results internally. We use the
- // 'internalDocumentSourceCursorBatchSizeBytes' parameter to disable this behavior so that we
- // can easily pause a pipeline in a state where it will need to request more results from the
- // PlanExecutor.
- const options = {setParameter: 'internalDocumentSourceCursorBatchSizeBytes=1'};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
-
- const testDB = conn.getDB('test');
-
- // Make sure the number of results is greater than the batchSize to ensure the results
- // cannot all fit in one batch.
- const batchSize = 2;
- const numMatches = batchSize + 1;
- const sourceCollection = testDB.source;
- const foreignCollection = testDB.foreign;
-
- /**
- * Populates both 'sourceCollection' and 'foreignCollection' with values of 'local' and
- * 'foreign' in the range [0, 'numMatches').
- */
- function setup() {
- sourceCollection.drop();
- foreignCollection.drop();
- for (let i = 0; i < numMatches; ++i) {
- assert.writeOK(sourceCollection.insert({_id: i, local: i}));
-
- // We want to be able to pause a $lookup stage in a state where it has returned some but
- // not all of the results for a single lookup, so we need to insert at least
- // 'numMatches' matches for each source document.
- for (let j = 0; j < numMatches; ++j) {
- assert.writeOK(foreignCollection.insert({_id: numMatches * i + j, foreign: i}));
- }
- }
- }
-
- // Check that there are no cursors still open on the source collection. If any are found, the
- // test will fail and print a list of idle cursors. This should be called each time we
- // expect a cursor to have been destroyed.
- function assertNoOpenCursorsOnSourceCollection() {
- const cursors =
- testDB.getSiblingDB("admin")
- .aggregate([
- {"$currentOp": {"idleCursors": true}},
- {
- "$match": {ns: sourceCollection.getFullName(), "type": "idleCursor"}
-
- }
- ])
- .toArray();
- assert.eq(
- cursors.length, 0, "Did not expect to find any cursors, but found " + tojson(cursors));
- }
-
- const defaultAggregateCmdSmallBatch = {
- aggregate: sourceCollection.getName(),
- pipeline: [],
- cursor: {
- batchSize: batchSize,
- },
- };
-
- // Test that dropping the source collection between an aggregate and a getMore will cause an
- // aggregation pipeline to fail during the getMore if it needs to fetch more results from the
- // collection.
- setup();
- let res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
-
- sourceCollection.drop();
-
- let getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
- ErrorCodes.QueryPlanKilled,
- 'expected getMore to fail because the source collection was dropped');
-
- // Make sure the cursors were cleaned up.
- assertNoOpenCursorsOnSourceCollection();
-
- // Test that dropping the source collection between an aggregate and a getMore will *not* cause
- // an aggregation pipeline to fail during the getMore if it *does not need* to fetch more
- // results from the collection.
- setup();
- res = assert.commandWorked(testDB.runCommand({
- aggregate: sourceCollection.getName(),
- pipeline: [{$sort: {x: 1}}],
- cursor: {
- batchSize: batchSize,
- },
- }));
+'use strict';
+
+// This test runs a getMore in a parallel shell, which will not inherit the implicit session of
+// the cursor establishing command.
+TestData.disableImplicitSessions = true;
+
+// The DocumentSourceCursor which wraps PlanExecutors will batch results internally. We use the
+// 'internalDocumentSourceCursorBatchSizeBytes' parameter to disable this behavior so that we
+// can easily pause a pipeline in a state where it will need to request more results from the
+// PlanExecutor.
+const options = {
+ setParameter: 'internalDocumentSourceCursorBatchSizeBytes=1'
+};
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
+
+const testDB = conn.getDB('test');
+
+// Make sure the number of results is greater than the batchSize to ensure the results
+// cannot all fit in one batch.
+const batchSize = 2;
+const numMatches = batchSize + 1;
+const sourceCollection = testDB.source;
+const foreignCollection = testDB.foreign;
+/**
+ * Populates both 'sourceCollection' and 'foreignCollection' with values of 'local' and
+ * 'foreign' in the range [0, 'numMatches').
+ */
+function setup() {
sourceCollection.drop();
-
- getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- assert.commandWorked(testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}));
-
- // Test that dropping a $lookup stage's foreign collection between an aggregate and a getMore
- // will *not* cause an aggregation pipeline to fail during the getMore if it needs to fetch more
- // results from the foreign collection. It will instead return no matches for subsequent
- // lookups, as if the foreign collection was empty.
- setup();
- res = assert.commandWorked(testDB.runCommand({
+ foreignCollection.drop();
+ for (let i = 0; i < numMatches; ++i) {
+ assert.writeOK(sourceCollection.insert({_id: i, local: i}));
+
+ // We want to be able to pause a $lookup stage in a state where it has returned some but
+ // not all of the results for a single lookup, so we need to insert at least
+ // 'numMatches' matches for each source document.
+ for (let j = 0; j < numMatches; ++j) {
+ assert.writeOK(foreignCollection.insert({_id: numMatches * i + j, foreign: i}));
+ }
+ }
+}
+
+// Check that there are no cursors still open on the source collection. If any are found, the
+// test will fail and print a list of idle cursors. This should be called each time we
+// expect a cursor to have been destroyed.
+function assertNoOpenCursorsOnSourceCollection() {
+ const cursors = testDB.getSiblingDB("admin")
+ .aggregate([
+ {"$currentOp": {"idleCursors": true}},
+ {
+ "$match": {ns: sourceCollection.getFullName(), "type": "idleCursor"}
+
+ }
+ ])
+ .toArray();
+ assert.eq(
+ cursors.length, 0, "Did not expect to find any cursors, but found " + tojson(cursors));
+}
+
+const defaultAggregateCmdSmallBatch = {
+ aggregate: sourceCollection.getName(),
+ pipeline: [],
+ cursor: {
+ batchSize: batchSize,
+ },
+};
+
+// Test that dropping the source collection between an aggregate and a getMore will cause an
+// aggregation pipeline to fail during the getMore if it needs to fetch more results from the
+// collection.
+setup();
+let res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
+
+sourceCollection.drop();
+
+let getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+assert.commandFailedWithCode(
+ testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
+ ErrorCodes.QueryPlanKilled,
+ 'expected getMore to fail because the source collection was dropped');
+
+// Make sure the cursors were cleaned up.
+assertNoOpenCursorsOnSourceCollection();
+
+// Test that dropping the source collection between an aggregate and a getMore will *not* cause
+// an aggregation pipeline to fail during the getMore if it *does not need* to fetch more
+// results from the collection.
+setup();
+res = assert.commandWorked(testDB.runCommand({
+ aggregate: sourceCollection.getName(),
+ pipeline: [{$sort: {x: 1}}],
+ cursor: {
+ batchSize: batchSize,
+ },
+}));
+
+sourceCollection.drop();
+
+getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+assert.commandWorked(testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}));
+
+// Test that dropping a $lookup stage's foreign collection between an aggregate and a getMore
+// will *not* cause an aggregation pipeline to fail during the getMore if it needs to fetch more
+// results from the foreign collection. It will instead return no matches for subsequent
+// lookups, as if the foreign collection was empty.
+setup();
+res = assert.commandWorked(testDB.runCommand({
aggregate: sourceCollection.getName(),
pipeline: [
{
@@ -134,25 +135,25 @@
},
}));
- foreignCollection.drop();
- getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- res = testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName});
- assert.commandWorked(
- res, 'expected getMore to succeed despite the foreign collection being dropped');
- res.cursor.nextBatch.forEach(function(aggResult) {
- assert.eq(aggResult.results,
- [],
- 'expected results of $lookup into non-existent collection to be empty');
- });
-
- // Make sure the cursors were cleaned up.
- assertNoOpenCursorsOnSourceCollection();
-
- // Test that a $lookup stage will properly clean up its cursor if it becomes invalidated between
- // batches of a single lookup. This is the same scenario as above, but with the $lookup stage
- // left in a state where it has returned some but not all of the matches for a single lookup.
- setup();
- res = assert.commandWorked(testDB.runCommand({
+foreignCollection.drop();
+getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+res = testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName});
+assert.commandWorked(res,
+ 'expected getMore to succeed despite the foreign collection being dropped');
+res.cursor.nextBatch.forEach(function(aggResult) {
+ assert.eq(aggResult.results,
+ [],
+ 'expected results of $lookup into non-existent collection to be empty');
+});
+
+// Make sure the cursors were cleaned up.
+assertNoOpenCursorsOnSourceCollection();
+
+// Test that a $lookup stage will properly clean up its cursor if it becomes invalidated between
+// batches of a single lookup. This is the same scenario as above, but with the $lookup stage
+// left in a state where it has returned some but not all of the matches for a single lookup.
+setup();
+res = assert.commandWorked(testDB.runCommand({
aggregate: sourceCollection.getName(),
pipeline: [
{
@@ -172,22 +173,22 @@
},
}));
- foreignCollection.drop();
- getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
- ErrorCodes.QueryPlanKilled,
- 'expected getMore to fail because the foreign collection was dropped');
-
- // Make sure the cursors were cleaned up.
- assertNoOpenCursorsOnSourceCollection();
-
- // Test that dropping a $graphLookup stage's foreign collection between an aggregate and a
- // getMore will *not* cause an aggregation pipeline to fail during the getMore if it needs to
- // fetch more results from the foreign collection. It will instead return no matches for
- // subsequent lookups, as if the foreign collection was empty.
- setup();
- res = assert.commandWorked(testDB.runCommand({
+foreignCollection.drop();
+getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+assert.commandFailedWithCode(
+ testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
+ ErrorCodes.QueryPlanKilled,
+ 'expected getMore to fail because the foreign collection was dropped');
+
+// Make sure the cursors were cleaned up.
+assertNoOpenCursorsOnSourceCollection();
+
+// Test that dropping a $graphLookup stage's foreign collection between an aggregate and a
+// getMore will *not* cause an aggregation pipeline to fail during the getMore if it needs to
+// fetch more results from the foreign collection. It will instead return no matches for
+// subsequent lookups, as if the foreign collection was empty.
+setup();
+res = assert.commandWorked(testDB.runCommand({
aggregate: sourceCollection.getName(),
pipeline: [
{
@@ -205,19 +206,19 @@
},
}));
- foreignCollection.drop();
- getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- res = testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName});
- assert.commandWorked(
- res, 'expected getMore to succeed despite the foreign collection being dropped');
-
- // Make sure the cursors were cleaned up.
- assertNoOpenCursorsOnSourceCollection();
-
- // Test that the getMore still succeeds if the $graphLookup is followed by an $unwind on the
- // 'as' field and the collection is dropped between the initial request and a getMore.
- setup();
- res = assert.commandWorked(testDB.runCommand({
+foreignCollection.drop();
+getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+res = testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName});
+assert.commandWorked(res,
+ 'expected getMore to succeed despite the foreign collection being dropped');
+
+// Make sure the cursors were cleaned up.
+assertNoOpenCursorsOnSourceCollection();
+
+// Test that the getMore still succeeds if the $graphLookup is followed by an $unwind on the
+// 'as' field and the collection is dropped between the initial request and a getMore.
+setup();
+res = assert.commandWorked(testDB.runCommand({
aggregate: sourceCollection.getName(),
pipeline: [
{
@@ -236,149 +237,146 @@
},
}));
- foreignCollection.drop();
- getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- res = testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName});
- assert.commandWorked(
- res, 'expected getMore to succeed despite the foreign collection being dropped');
-
- // Make sure the cursors were cleaned up.
- assertNoOpenCursorsOnSourceCollection();
-
- // Test that dropping the database will kill an aggregation's cursor, causing a subsequent
- // getMore to fail.
- setup();
- res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
-
- assert.commandWorked(sourceCollection.getDB().dropDatabase());
- getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
-
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
- ErrorCodes.QueryPlanKilled,
- 'expected getMore to fail because the database was dropped');
-
- assertNoOpenCursorsOnSourceCollection();
-
- // Test that killing an aggregation's cursor by inserting enough documents to force a truncation
- // of a capped collection will cause a subsequent getMore to fail.
- sourceCollection.drop();
- foreignCollection.drop();
- const maxCappedSizeBytes = 64 * 1024;
- const maxNumDocs = 10;
- assert.commandWorked(testDB.runCommand({
- create: sourceCollection.getName(),
- capped: true,
- size: maxCappedSizeBytes,
- max: maxNumDocs
- }));
- // Fill up about half of the collection.
- for (let i = 0; i < maxNumDocs / 2; ++i) {
- assert.writeOK(sourceCollection.insert({_id: i}));
- }
- // Start an aggregation.
- assert.gt(maxNumDocs / 2, batchSize);
- res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
- // Insert enough to force a truncation.
- for (let i = maxNumDocs / 2; i < 2 * maxNumDocs; ++i) {
- assert.writeOK(sourceCollection.insert({_id: i}));
- }
- assert.eq(maxNumDocs, sourceCollection.count());
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
- ErrorCodes.CappedPositionLost,
- 'expected getMore to fail because the capped collection was truncated');
-
- // Test that killing an aggregation's cursor via the killCursors command will cause a subsequent
- // getMore to fail.
- setup();
- res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
-
- const killCursorsNamespace = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- assert.commandWorked(
- testDB.runCommand({killCursors: killCursorsNamespace, cursors: [res.cursor.id]}));
-
- assertNoOpenCursorsOnSourceCollection();
-
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
- ErrorCodes.CursorNotFound,
- 'expected getMore to fail because the cursor was killed');
-
- // Test that killing an aggregation's operation via the killOp command will cause a getMore to
- // fail.
- setup();
- res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
-
- // Use a failpoint to cause a getMore to hang indefinitely.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'waitAfterPinningCursorBeforeGetMoreBatch', mode: 'alwaysOn'}));
- const curOpFilter = {'command.getMore': res.cursor.id};
- assert.eq(0, testDB.currentOp(curOpFilter).inprog.length);
-
- getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- const parallelShellCode = 'assert.commandFailedWithCode(db.getSiblingDB(\'' + testDB.getName() +
- '\').runCommand({getMore: ' + res.cursor.id.toString() + ', collection: \'' +
- getMoreCollName +
- '\'}), ErrorCodes.Interrupted, \'expected getMore command to be interrupted by killOp\');';
-
- // Start a getMore and wait for it to hang.
- const awaitParallelShell = startParallelShell(parallelShellCode, conn.port);
- assert.soon(function() {
- return assert.commandWorked(testDB.currentOp(curOpFilter)).inprog.length === 1;
- }, 'expected getMore operation to remain active');
-
- // Wait until we know the failpoint has been reached.
- assert.soon(function() {
- const filter = {"msg": "waitAfterPinningCursorBeforeGetMoreBatch"};
- return assert.commandWorked(testDB.currentOp(filter)).inprog.length === 1;
+foreignCollection.drop();
+getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+res = testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName});
+assert.commandWorked(res,
+ 'expected getMore to succeed despite the foreign collection being dropped');
+
+// Make sure the cursors were cleaned up.
+assertNoOpenCursorsOnSourceCollection();
+
+// Test that dropping the database will kill an aggregation's cursor, causing a subsequent
+// getMore to fail.
+setup();
+res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
+
+assert.commandWorked(sourceCollection.getDB().dropDatabase());
+getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+
+assert.commandFailedWithCode(
+ testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
+ ErrorCodes.QueryPlanKilled,
+ 'expected getMore to fail because the database was dropped');
+
+assertNoOpenCursorsOnSourceCollection();
+
+// Test that killing an aggregation's cursor by inserting enough documents to force a truncation
+// of a capped collection will cause a subsequent getMore to fail.
+sourceCollection.drop();
+foreignCollection.drop();
+const maxCappedSizeBytes = 64 * 1024;
+const maxNumDocs = 10;
+assert.commandWorked(testDB.runCommand(
+ {create: sourceCollection.getName(), capped: true, size: maxCappedSizeBytes, max: maxNumDocs}));
+// Fill up about half of the collection.
+for (let i = 0; i < maxNumDocs / 2; ++i) {
+ assert.writeOK(sourceCollection.insert({_id: i}));
+}
+// Start an aggregation.
+assert.gt(maxNumDocs / 2, batchSize);
+res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
+// Insert enough to force a truncation.
+for (let i = maxNumDocs / 2; i < 2 * maxNumDocs; ++i) {
+ assert.writeOK(sourceCollection.insert({_id: i}));
+}
+assert.eq(maxNumDocs, sourceCollection.count());
+assert.commandFailedWithCode(
+ testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
+ ErrorCodes.CappedPositionLost,
+ 'expected getMore to fail because the capped collection was truncated');
+
+// Test that killing an aggregation's cursor via the killCursors command will cause a subsequent
+// getMore to fail.
+setup();
+res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
+
+const killCursorsNamespace = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+assert.commandWorked(
+ testDB.runCommand({killCursors: killCursorsNamespace, cursors: [res.cursor.id]}));
+
+assertNoOpenCursorsOnSourceCollection();
+
+assert.commandFailedWithCode(
+ testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
+ ErrorCodes.CursorNotFound,
+ 'expected getMore to fail because the cursor was killed');
+
+// Test that killing an aggregation's operation via the killOp command will cause a getMore to
+// fail.
+setup();
+res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
+
+// Use a failpoint to cause a getMore to hang indefinitely.
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'waitAfterPinningCursorBeforeGetMoreBatch', mode: 'alwaysOn'}));
+const curOpFilter = {
+ 'command.getMore': res.cursor.id
+};
+assert.eq(0, testDB.currentOp(curOpFilter).inprog.length);
+
+getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+const parallelShellCode = 'assert.commandFailedWithCode(db.getSiblingDB(\'' + testDB.getName() +
+ '\').runCommand({getMore: ' + res.cursor.id.toString() + ', collection: \'' + getMoreCollName +
+ '\'}), ErrorCodes.Interrupted, \'expected getMore command to be interrupted by killOp\');';
+
+// Start a getMore and wait for it to hang.
+const awaitParallelShell = startParallelShell(parallelShellCode, conn.port);
+assert.soon(function() {
+ return assert.commandWorked(testDB.currentOp(curOpFilter)).inprog.length === 1;
+}, 'expected getMore operation to remain active');
+
+// Wait until we know the failpoint has been reached.
+assert.soon(function() {
+ const filter = {"msg": "waitAfterPinningCursorBeforeGetMoreBatch"};
+ return assert.commandWorked(testDB.currentOp(filter)).inprog.length === 1;
+});
+
+// Kill the operation.
+const opId = assert.commandWorked(testDB.currentOp(curOpFilter)).inprog[0].opid;
+assert.commandWorked(testDB.killOp(opId));
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'waitAfterPinningCursorBeforeGetMoreBatch', mode: 'off'}));
+assert.eq(0, awaitParallelShell());
+
+assertNoOpenCursorsOnSourceCollection();
+
+assert.commandFailedWithCode(
+ testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
+ ErrorCodes.CursorNotFound,
+ 'expected getMore to fail because the cursor was killed');
+
+// Test that a cursor timeout of an aggregation's cursor will cause a subsequent getMore to
+// fail.
+setup();
+res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
+
+let serverStatus = assert.commandWorked(testDB.serverStatus());
+const expectedNumTimedOutCursors = serverStatus.metrics.cursor.timedOut + 1;
+
+// Wait until the idle cursor background job has killed the aggregation cursor.
+assert.commandWorked(testDB.adminCommand({setParameter: 1, cursorTimeoutMillis: 10}));
+const cursorTimeoutFrequencySeconds = 1;
+assert.commandWorked(testDB.adminCommand(
+ {setParameter: 1, clientCursorMonitorFrequencySecs: cursorTimeoutFrequencySeconds}));
+assert.soon(
+ function() {
+ serverStatus = assert.commandWorked(testDB.serverStatus());
+ return serverStatus.metrics.cursor.timedOut == expectedNumTimedOutCursors;
+ },
+ function() {
+ return 'aggregation cursor failed to time out, expected ' + expectedNumTimedOutCursors +
+ ' timed out cursors: ' + tojson(serverStatus.metrics.cursor);
});
- // Kill the operation.
- const opId = assert.commandWorked(testDB.currentOp(curOpFilter)).inprog[0].opid;
- assert.commandWorked(testDB.killOp(opId));
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'waitAfterPinningCursorBeforeGetMoreBatch', mode: 'off'}));
- assert.eq(0, awaitParallelShell());
-
- assertNoOpenCursorsOnSourceCollection();
-
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
- ErrorCodes.CursorNotFound,
- 'expected getMore to fail because the cursor was killed');
-
- // Test that a cursor timeout of an aggregation's cursor will cause a subsequent getMore to
- // fail.
- setup();
- res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
-
- let serverStatus = assert.commandWorked(testDB.serverStatus());
- const expectedNumTimedOutCursors = serverStatus.metrics.cursor.timedOut + 1;
-
- // Wait until the idle cursor background job has killed the aggregation cursor.
- assert.commandWorked(testDB.adminCommand({setParameter: 1, cursorTimeoutMillis: 10}));
- const cursorTimeoutFrequencySeconds = 1;
- assert.commandWorked(testDB.adminCommand(
- {setParameter: 1, clientCursorMonitorFrequencySecs: cursorTimeoutFrequencySeconds}));
- assert.soon(
- function() {
- serverStatus = assert.commandWorked(testDB.serverStatus());
- return serverStatus.metrics.cursor.timedOut == expectedNumTimedOutCursors;
- },
- function() {
- return 'aggregation cursor failed to time out, expected ' + expectedNumTimedOutCursors +
- ' timed out cursors: ' + tojson(serverStatus.metrics.cursor);
- });
-
- assertNoOpenCursorsOnSourceCollection();
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
- ErrorCodes.CursorNotFound,
- 'expected getMore to fail because the cursor was killed');
-
- // Test that a cursor will properly be cleaned up on server shutdown.
- setup();
- res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
- assert.eq(0, MongoRunner.stopMongod(conn), 'expected mongod to shutdown cleanly');
+assertNoOpenCursorsOnSourceCollection();
+assert.commandFailedWithCode(
+ testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
+ ErrorCodes.CursorNotFound,
+ 'expected getMore to fail because the cursor was killed');
+
+// Test that a cursor will properly be cleaned up on server shutdown.
+setup();
+res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
+assert.eq(0, MongoRunner.stopMongod(conn), 'expected mongod to shutdown cleanly');
})();
diff --git a/jstests/noPassthrough/aggregation_log_namespace.js b/jstests/noPassthrough/aggregation_log_namespace.js
index ad9f6b6d7b1..a45a3a96597 100644
--- a/jstests/noPassthrough/aggregation_log_namespace.js
+++ b/jstests/noPassthrough/aggregation_log_namespace.js
@@ -2,53 +2,52 @@
// command when a pipeline contains a stage that can write into an output collection.
// @tags: [requires_profiling]
(function() {
- 'use strict';
-
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachKindOfWriteStage.
- load("jstests/libs/check_log.js"); // For checkLogs.containsWithCount.
-
- // Runs the given 'pipeline' and verifies that the namespace is correctly logged in the global
- // log for the aggregate command. The 'comment' parameter is used to match a log entry against
- // the aggregate command.
- function verifyLoggedNamespace({pipeline, comment}) {
- assert.commandWorked(db.runCommand(
- {aggregate: source.getName(), comment: comment, pipeline: pipeline, cursor: {}}));
- checkLog.containsWithCount(
- conn,
- `command ${source.getFullName()} appName: "MongoDB Shell" ` +
- `command: aggregate { aggregate: "${source.getName()}", comment: "${comment}"`,
- 1);
- }
-
- const mongodOptions = {};
- const conn = MongoRunner.runMongod(mongodOptions);
- assert.neq(null, conn, `mongod failed to start with options ${tojson(mongodOptions)}`);
-
- const db = conn.getDB(`${jsTest.name()}_db`);
- const source = db.getCollection(`${jsTest.name()}_source`);
- source.drop();
- const target = db.getCollection(`${jsTest.name()}_target`);
- target.drop();
-
- // Make sure each command gets logged.
- assert.commandWorked(db.setProfilingLevel(1, {slowms: 0}));
-
- // Test stages that can write into an output collection.
- withEachKindOfWriteStage(
- target,
- (stage) => verifyLoggedNamespace({pipeline: [stage], comment: Object.keys(stage)[0]}));
-
- // Test each $merge mode.
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => verifyLoggedNamespace({
- pipeline: [{
- $merge: {
- into: target.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }],
- comment: `merge_${whenMatchedMode}_${whenNotMatchedMode}`
- }));
-
- MongoRunner.stopMongod(conn);
+'use strict';
+
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachKindOfWriteStage.
+load("jstests/libs/check_log.js"); // For checkLogs.containsWithCount.
+
+// Runs the given 'pipeline' and verifies that the namespace is correctly logged in the global
+// log for the aggregate command. The 'comment' parameter is used to match a log entry against
+// the aggregate command.
+function verifyLoggedNamespace({pipeline, comment}) {
+ assert.commandWorked(db.runCommand(
+ {aggregate: source.getName(), comment: comment, pipeline: pipeline, cursor: {}}));
+ checkLog.containsWithCount(
+ conn,
+ `command ${source.getFullName()} appName: "MongoDB Shell" ` +
+ `command: aggregate { aggregate: "${source.getName()}", comment: "${comment}"`,
+ 1);
+}
+
+const mongodOptions = {};
+const conn = MongoRunner.runMongod(mongodOptions);
+assert.neq(null, conn, `mongod failed to start with options ${tojson(mongodOptions)}`);
+
+const db = conn.getDB(`${jsTest.name()}_db`);
+const source = db.getCollection(`${jsTest.name()}_source`);
+source.drop();
+const target = db.getCollection(`${jsTest.name()}_target`);
+target.drop();
+
+// Make sure each command gets logged.
+assert.commandWorked(db.setProfilingLevel(1, {slowms: 0}));
+
+// Test stages that can write into an output collection.
+withEachKindOfWriteStage(
+ target, (stage) => verifyLoggedNamespace({pipeline: [stage], comment: Object.keys(stage)[0]}));
+
+// Test each $merge mode.
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => verifyLoggedNamespace({
+ pipeline: [{
+ $merge: {
+ into: target.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }],
+ comment: `merge_${whenMatchedMode}_${whenNotMatchedMode}`
+ }));
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/aggregation_zero_batchsize.js b/jstests/noPassthrough/aggregation_zero_batchsize.js
index 3360c6d0856..d143c75ede6 100644
--- a/jstests/noPassthrough/aggregation_zero_batchsize.js
+++ b/jstests/noPassthrough/aggregation_zero_batchsize.js
@@ -3,86 +3,86 @@
* retrieved via getMores.
*/
(function() {
- "use strict";
+"use strict";
- const mongodOptions = {};
- const conn = MongoRunner.runMongod(mongodOptions);
- assert.neq(null, conn, "mongod failed to start with options " + tojson(mongodOptions));
+const mongodOptions = {};
+const conn = MongoRunner.runMongod(mongodOptions);
+assert.neq(null, conn, "mongod failed to start with options " + tojson(mongodOptions));
- const testDB = conn.getDB("test");
- const coll = testDB[jsTest.name];
- coll.drop();
+const testDB = conn.getDB("test");
+const coll = testDB[jsTest.name];
+coll.drop();
- // Test that an aggregate is successful on a non-existent collection.
- assert.eq(0,
- coll.aggregate([]).toArray().length,
- "expected no results from an aggregation on an empty collection");
+// Test that an aggregate is successful on a non-existent collection.
+assert.eq(0,
+ coll.aggregate([]).toArray().length,
+ "expected no results from an aggregation on an empty collection");
- // Test that an aggregate is successful on a non-existent collection with a batchSize of 0, and
- // that a getMore will succeed with an empty result set.
- let res = assert.commandWorked(
- testDB.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 0}}));
+// Test that an aggregate is successful on a non-existent collection with a batchSize of 0, and
+// that a getMore will succeed with an empty result set.
+let res = assert.commandWorked(
+ testDB.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 0}}));
- let cursor = new DBCommandCursor(testDB, res);
- assert.eq(
- 0, cursor.itcount(), "expected no results from getMore of aggregation on empty collection");
+let cursor = new DBCommandCursor(testDB, res);
+assert.eq(
+ 0, cursor.itcount(), "expected no results from getMore of aggregation on empty collection");
- // Test that an aggregation can return *all* matching data via getMores if the initial aggregate
- // used a batchSize of 0.
- const nDocs = 1000;
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nDocs; i++) {
- bulk.insert({_id: i, stringField: "string"});
- }
- assert.writeOK(bulk.execute());
+// Test that an aggregation can return *all* matching data via getMores if the initial aggregate
+// used a batchSize of 0.
+const nDocs = 1000;
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < nDocs; i++) {
+ bulk.insert({_id: i, stringField: "string"});
+}
+assert.writeOK(bulk.execute());
- res = assert.commandWorked(
- testDB.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 0}}));
- cursor = new DBCommandCursor(testDB, res);
- assert.eq(nDocs, cursor.itcount(), "expected all results to be returned via getMores");
+res = assert.commandWorked(
+ testDB.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 0}}));
+cursor = new DBCommandCursor(testDB, res);
+assert.eq(nDocs, cursor.itcount(), "expected all results to be returned via getMores");
- // Test that an error in a getMore will destroy the cursor.
- function assertNumOpenCursors(nExpectedOpen) {
- let serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- assert.eq(nExpectedOpen,
- serverStatus.metrics.cursor.open.total,
- "expected to find " + nExpectedOpen + " open cursor(s): " +
- tojson(serverStatus.metrics.cursor));
- }
+// Test that an error in a getMore will destroy the cursor.
+function assertNumOpenCursors(nExpectedOpen) {
+ let serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ assert.eq(nExpectedOpen,
+ serverStatus.metrics.cursor.open.total,
+ "expected to find " + nExpectedOpen +
+ " open cursor(s): " + tojson(serverStatus.metrics.cursor));
+}
- // Issue an aggregate command that will fail *at runtime*, so the error will happen in a
- // getMore.
- assertNumOpenCursors(0);
- res = assert.commandWorked(testDB.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$project: {invalidComputation: {$add: [1, "$stringField"]}}}],
- cursor: {batchSize: 0}
- }));
- cursor = new DBCommandCursor(testDB, res);
- assertNumOpenCursors(1);
+// Issue an aggregate command that will fail *at runtime*, so the error will happen in a
+// getMore.
+assertNumOpenCursors(0);
+res = assert.commandWorked(testDB.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$project: {invalidComputation: {$add: [1, "$stringField"]}}}],
+ cursor: {batchSize: 0}
+}));
+cursor = new DBCommandCursor(testDB, res);
+assertNumOpenCursors(1);
- assert.throws(() => cursor.itcount(), [], "expected getMore to fail");
- assertNumOpenCursors(0);
+assert.throws(() => cursor.itcount(), [], "expected getMore to fail");
+assertNumOpenCursors(0);
- // Test that an error in a getMore using a $out stage will destroy the cursor. This test is
- // intended to reproduce SERVER-26608.
+// Test that an error in a getMore using a $out stage will destroy the cursor. This test is
+// intended to reproduce SERVER-26608.
- // Issue an aggregate command that will fail *at runtime*, so the error will happen in a
- // getMore.
- res = assert.commandWorked(testDB.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$out: "validated_collection"}],
- cursor: {batchSize: 0}
- }));
- cursor = new DBCommandCursor(testDB, res);
- assertNumOpenCursors(1);
+// Issue an aggregate command that will fail *at runtime*, so the error will happen in a
+// getMore.
+res = assert.commandWorked(testDB.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$out: "validated_collection"}],
+ cursor: {batchSize: 0}
+}));
+cursor = new DBCommandCursor(testDB, res);
+assertNumOpenCursors(1);
- // Add a document validation rule to the $out collection so that insertion will fail.
- assert.commandWorked(testDB.runCommand(
- {create: "validated_collection", validator: {stringField: {$type: "int"}}}));
+// Add a document validation rule to the $out collection so that insertion will fail.
+assert.commandWorked(
+ testDB.runCommand({create: "validated_collection", validator: {stringField: {$type: "int"}}}));
- assert.throws(() => cursor.itcount(), [], "expected getMore to fail");
- assertNumOpenCursors(0);
+assert.throws(() => cursor.itcount(), [], "expected getMore to fail");
+assertNumOpenCursors(0);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/apply_ops_DDL_operation_does_not_take_global_X.js b/jstests/noPassthrough/apply_ops_DDL_operation_does_not_take_global_X.js
index d8cd49f7995..3e855455985 100644
--- a/jstests/noPassthrough/apply_ops_DDL_operation_does_not_take_global_X.js
+++ b/jstests/noPassthrough/apply_ops_DDL_operation_does_not_take_global_X.js
@@ -5,75 +5,72 @@
*/
(function() {
- 'use strict';
+'use strict';
+
+const testDBName = 'test';
+const readDBName = 'read';
+const readCollName = 'readColl';
+const testCollName = 'testColl';
+const renameCollName = 'renameColl';
+
+const rst = new ReplSetTest({name: jsTestName(), nodes: 2});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+
+assert.commandWorked(
+ primary.getDB(readDBName)
+ .runCommand({insert: readCollName, documents: [{x: 1}], writeConcern: {w: 2}}));
+
+// The find will hang and holds a global IS lock.
+assert.commandWorked(secondary.getDB("admin").runCommand(
+ {configureFailPoint: "waitInFindBeforeMakingBatch", mode: "alwaysOn"}));
+
+const findWait = startParallelShell(function() {
+ db.getMongo().setSlaveOk();
+ assert.eq(
+ db.getSiblingDB('read').getCollection('readColl').find().comment('read hangs').itcount(),
+ 1);
+}, secondary.port);
+
+assert.soon(function() {
+ let findOp = secondary.getDB('admin')
+ .aggregate([{$currentOp: {}}, {$match: {'command.comment': 'read hangs'}}])
+ .toArray();
+ return findOp.length == 1;
+});
+
+{
+ // Run a series of DDL commands, none of which should take the global X lock.
+ const testDB = primary.getDB(testDBName);
+ assert.commandWorked(testDB.runCommand({create: testCollName, writeConcern: {w: 2}}));
- const testDBName = 'test';
- const readDBName = 'read';
- const readCollName = 'readColl';
- const testCollName = 'testColl';
- const renameCollName = 'renameColl';
-
- const rst = new ReplSetTest({name: jsTestName(), nodes: 2});
- rst.startSet();
- rst.initiate();
+ assert.commandWorked(
+ testDB.runCommand({collMod: testCollName, validator: {v: 1}, writeConcern: {w: 2}}));
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
+ assert.commandWorked(testDB.runCommand({
+ createIndexes: testCollName,
+ indexes: [{key: {x: 1}, name: 'x_1'}],
+ writeConcern: {w: 2}
+ }));
assert.commandWorked(
- primary.getDB(readDBName)
- .runCommand({insert: readCollName, documents: [{x: 1}], writeConcern: {w: 2}}));
-
- // The find will hang and holds a global IS lock.
- assert.commandWorked(secondary.getDB("admin").runCommand(
- {configureFailPoint: "waitInFindBeforeMakingBatch", mode: "alwaysOn"}));
-
- const findWait = startParallelShell(function() {
- db.getMongo().setSlaveOk();
- assert.eq(db.getSiblingDB('read')
- .getCollection('readColl')
- .find()
- .comment('read hangs')
- .itcount(),
- 1);
- }, secondary.port);
-
- assert.soon(function() {
- let findOp = secondary.getDB('admin')
- .aggregate([{$currentOp: {}}, {$match: {'command.comment': 'read hangs'}}])
- .toArray();
- return findOp.length == 1;
- });
-
- {
- // Run a series of DDL commands, none of which should take the global X lock.
- const testDB = primary.getDB(testDBName);
- assert.commandWorked(testDB.runCommand({create: testCollName, writeConcern: {w: 2}}));
-
- assert.commandWorked(
- testDB.runCommand({collMod: testCollName, validator: {v: 1}, writeConcern: {w: 2}}));
-
- assert.commandWorked(testDB.runCommand({
- createIndexes: testCollName,
- indexes: [{key: {x: 1}, name: 'x_1'}],
- writeConcern: {w: 2}
- }));
-
- assert.commandWorked(
- testDB.runCommand({dropIndexes: testCollName, index: 'x_1', writeConcern: {w: 2}}));
-
- assert.commandWorked(primary.getDB('admin').runCommand({
- renameCollection: testDBName + '.' + testCollName,
- to: testDBName + '.' + renameCollName,
- writeConcern: {w: 2}
- }));
-
- assert.commandWorked(testDB.runCommand({drop: renameCollName, writeConcern: {w: 2}}));
- }
-
- assert.commandWorked(secondary.getDB("admin").runCommand(
- {configureFailPoint: "waitInFindBeforeMakingBatch", mode: "off"}));
- findWait();
-
- rst.stopSet();
+ testDB.runCommand({dropIndexes: testCollName, index: 'x_1', writeConcern: {w: 2}}));
+
+ assert.commandWorked(primary.getDB('admin').runCommand({
+ renameCollection: testDBName + '.' + testCollName,
+ to: testDBName + '.' + renameCollName,
+ writeConcern: {w: 2}
+ }));
+
+ assert.commandWorked(testDB.runCommand({drop: renameCollName, writeConcern: {w: 2}}));
+}
+
+assert.commandWorked(secondary.getDB("admin").runCommand(
+ {configureFailPoint: "waitInFindBeforeMakingBatch", mode: "off"}));
+findWait();
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/apply_ops_mode.js b/jstests/noPassthrough/apply_ops_mode.js
index 3515f4d8040..385cf0d532b 100644
--- a/jstests/noPassthrough/apply_ops_mode.js
+++ b/jstests/noPassthrough/apply_ops_mode.js
@@ -5,89 +5,85 @@
*/
(function() {
- 'use strict';
- load('jstests/libs/feature_compatibility_version.js');
-
- var standalone = MongoRunner.runMongod();
- var db = standalone.getDB("test");
-
- var coll = db.getCollection("apply_ops_mode1");
- coll.drop();
- assert.writeOK(coll.insert({_id: 1}));
-
- // ------------ Testing normal updates ---------------
-
- var id = ObjectId();
- var updateOp = {op: 'u', ns: coll.getFullName(), o: {_id: id, x: 1}, o2: {_id: id}};
- assert.commandFailed(db.adminCommand({applyOps: [updateOp], alwaysUpsert: false}));
- assert.eq(coll.count({x: 1}), 0);
-
- // Test that 'InitialSync' does not override 'alwaysUpsert: false'.
- assert.commandFailed(db.adminCommand(
- {applyOps: [updateOp], alwaysUpsert: false, oplogApplicationMode: "InitialSync"}));
- assert.eq(coll.count({x: 1}), 0);
-
- // Test parsing failure.
- assert.commandFailedWithCode(
- db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "BadMode"}),
- ErrorCodes.FailedToParse);
- assert.commandFailedWithCode(db.adminCommand({applyOps: [updateOp], oplogApplicationMode: 5}),
- ErrorCodes.TypeMismatch);
-
- // Test default succeeds.
- assert.commandWorked(db.adminCommand({applyOps: [updateOp]}));
- assert.eq(coll.count({x: 1}), 1);
-
- // Use new collection to make logs cleaner.
- coll = db.getCollection("apply_ops_mode2");
- coll.drop();
- updateOp.ns = coll.getFullName();
- assert.writeOK(coll.insert({_id: 1}));
-
- // Test default succeeds in 'InitialSync' mode.
- assert.commandWorked(
- db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "InitialSync"}));
- assert.eq(coll.count({x: 1}), 1);
-
- // ------------ Testing fCV updates ---------------
-
- var adminDB = db.getSiblingDB("admin");
- const systemVersionColl = adminDB.getCollection("system.version");
-
- updateOp = {
- op: 'u',
- ns: systemVersionColl.getFullName(),
- o: {_id: "featureCompatibilityVersion", version: lastStableFCV},
- o2: {_id: "featureCompatibilityVersion"}
- };
- assert.commandFailed(
- db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "InitialSync"}));
-
- assert.commandWorked(db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "ApplyOps"}));
-
- // Test default succeeds.
- updateOp.o.targetVersion = latestFCV;
- assert.commandWorked(db.adminCommand({
- applyOps: [updateOp],
- }));
-
- // ------------ Testing commands on the fCV collection ---------------
-
- var collModOp = {
- op: 'c',
- ns: systemVersionColl.getDB() + ".$cmd",
- o: {collMod: systemVersionColl.getName(), validationLevel: "off"},
- };
- assert.commandFailed(
- db.adminCommand({applyOps: [collModOp], oplogApplicationMode: "InitialSync"}));
-
- assert.commandWorked(
- db.adminCommand({applyOps: [collModOp], oplogApplicationMode: "ApplyOps"}));
-
- // Test default succeeds.
- assert.commandWorked(db.adminCommand({
- applyOps: [collModOp],
- }));
-
- MongoRunner.stopMongod(standalone);
+'use strict';
+load('jstests/libs/feature_compatibility_version.js');
+
+var standalone = MongoRunner.runMongod();
+var db = standalone.getDB("test");
+
+var coll = db.getCollection("apply_ops_mode1");
+coll.drop();
+assert.writeOK(coll.insert({_id: 1}));
+
+// ------------ Testing normal updates ---------------
+
+var id = ObjectId();
+var updateOp = {op: 'u', ns: coll.getFullName(), o: {_id: id, x: 1}, o2: {_id: id}};
+assert.commandFailed(db.adminCommand({applyOps: [updateOp], alwaysUpsert: false}));
+assert.eq(coll.count({x: 1}), 0);
+
+// Test that 'InitialSync' does not override 'alwaysUpsert: false'.
+assert.commandFailed(db.adminCommand(
+ {applyOps: [updateOp], alwaysUpsert: false, oplogApplicationMode: "InitialSync"}));
+assert.eq(coll.count({x: 1}), 0);
+
+// Test parsing failure.
+assert.commandFailedWithCode(
+ db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "BadMode"}),
+ ErrorCodes.FailedToParse);
+assert.commandFailedWithCode(db.adminCommand({applyOps: [updateOp], oplogApplicationMode: 5}),
+ ErrorCodes.TypeMismatch);
+
+// Test default succeeds.
+assert.commandWorked(db.adminCommand({applyOps: [updateOp]}));
+assert.eq(coll.count({x: 1}), 1);
+
+// Use new collection to make logs cleaner.
+coll = db.getCollection("apply_ops_mode2");
+coll.drop();
+updateOp.ns = coll.getFullName();
+assert.writeOK(coll.insert({_id: 1}));
+
+// Test default succeeds in 'InitialSync' mode.
+assert.commandWorked(db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "InitialSync"}));
+assert.eq(coll.count({x: 1}), 1);
+
+// ------------ Testing fCV updates ---------------
+
+var adminDB = db.getSiblingDB("admin");
+const systemVersionColl = adminDB.getCollection("system.version");
+
+updateOp = {
+ op: 'u',
+ ns: systemVersionColl.getFullName(),
+ o: {_id: "featureCompatibilityVersion", version: lastStableFCV},
+ o2: {_id: "featureCompatibilityVersion"}
+};
+assert.commandFailed(db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "InitialSync"}));
+
+assert.commandWorked(db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "ApplyOps"}));
+
+// Test default succeeds.
+updateOp.o.targetVersion = latestFCV;
+assert.commandWorked(db.adminCommand({
+ applyOps: [updateOp],
+}));
+
+// ------------ Testing commands on the fCV collection ---------------
+
+var collModOp = {
+ op: 'c',
+ ns: systemVersionColl.getDB() + ".$cmd",
+ o: {collMod: systemVersionColl.getName(), validationLevel: "off"},
+};
+assert.commandFailed(db.adminCommand({applyOps: [collModOp], oplogApplicationMode: "InitialSync"}));
+
+assert.commandWorked(db.adminCommand({applyOps: [collModOp], oplogApplicationMode: "ApplyOps"}));
+
+// Test default succeeds.
+assert.commandWorked(db.adminCommand({
+ applyOps: [collModOp],
+}));
+
+MongoRunner.stopMongod(standalone);
})();
diff --git a/jstests/noPassthrough/apply_ops_overwrite_admin_system_version.js b/jstests/noPassthrough/apply_ops_overwrite_admin_system_version.js
index f02fad241d3..ca679919ab9 100644
--- a/jstests/noPassthrough/apply_ops_overwrite_admin_system_version.js
+++ b/jstests/noPassthrough/apply_ops_overwrite_admin_system_version.js
@@ -1,41 +1,41 @@
(function() {
- "use strict";
- load("jstests/libs/feature_compatibility_version.js");
- var standalone = MongoRunner.runMongod();
- var adminDB = standalone.getDB("admin");
+"use strict";
+load("jstests/libs/feature_compatibility_version.js");
+var standalone = MongoRunner.runMongod();
+var adminDB = standalone.getDB("admin");
- // Get the uuid of the original admin.system.version.
- var res = adminDB.runCommand({listCollections: 1, filter: {name: "system.version"}});
- assert.commandWorked(res, "failed to list collections");
- assert.eq(1, res.cursor.firstBatch.length);
- var originalUUID = res.cursor.firstBatch[0].info.uuid;
- var newUUID = UUID();
+// Get the uuid of the original admin.system.version.
+var res = adminDB.runCommand({listCollections: 1, filter: {name: "system.version"}});
+assert.commandWorked(res, "failed to list collections");
+assert.eq(1, res.cursor.firstBatch.length);
+var originalUUID = res.cursor.firstBatch[0].info.uuid;
+var newUUID = UUID();
- // Create new collection, insert new FCV document and then delete the
- // original collection.
- var createNewAdminSystemVersionCollection =
- {op: "c", ns: "admin.$cmd", ui: newUUID, o: {create: "system.version"}};
- var insertFCVDocument = {
- op: "i",
- ns: "admin.system.version",
- o: {_id: "featureCompatibilityVersion", version: latestFCV}
- };
- var dropOriginalAdminSystemVersionCollection =
- {op: "c", ns: "admin.$cmd", ui: originalUUID, o: {drop: "admin.tmp_system_version"}};
- var cmd = {
- applyOps: [
- createNewAdminSystemVersionCollection,
- insertFCVDocument,
- dropOriginalAdminSystemVersionCollection
- ]
- };
- assert.commandWorked(adminDB.runCommand(cmd), "failed command " + tojson(cmd));
+// Create new collection, insert new FCV document and then delete the
+// original collection.
+var createNewAdminSystemVersionCollection =
+ {op: "c", ns: "admin.$cmd", ui: newUUID, o: {create: "system.version"}};
+var insertFCVDocument = {
+ op: "i",
+ ns: "admin.system.version",
+ o: {_id: "featureCompatibilityVersion", version: latestFCV}
+};
+var dropOriginalAdminSystemVersionCollection =
+ {op: "c", ns: "admin.$cmd", ui: originalUUID, o: {drop: "admin.tmp_system_version"}};
+var cmd = {
+ applyOps: [
+ createNewAdminSystemVersionCollection,
+ insertFCVDocument,
+ dropOriginalAdminSystemVersionCollection
+ ]
+};
+assert.commandWorked(adminDB.runCommand(cmd), "failed command " + tojson(cmd));
- // Now admin.system.version is overwritten with the new entry.
- res = adminDB.runCommand({listCollections: 1, filter: {name: "system.version"}});
- assert.commandWorked(res, "failed to list collections");
- assert.eq(1, res.cursor.firstBatch.length);
- assert.eq(newUUID, res.cursor.firstBatch[0].info.uuid);
+// Now admin.system.version is overwritten with the new entry.
+res = adminDB.runCommand({listCollections: 1, filter: {name: "system.version"}});
+assert.commandWorked(res, "failed to list collections");
+assert.eq(1, res.cursor.firstBatch.length);
+assert.eq(newUUID, res.cursor.firstBatch[0].info.uuid);
- MongoRunner.stopMongod(standalone);
+MongoRunner.stopMongod(standalone);
})();
diff --git a/jstests/noPassthrough/atomic_rename_collection.js b/jstests/noPassthrough/atomic_rename_collection.js
index c8e24f1c591..a6f39c1c40f 100644
--- a/jstests/noPassthrough/atomic_rename_collection.js
+++ b/jstests/noPassthrough/atomic_rename_collection.js
@@ -1,47 +1,47 @@
// @tags: [requires_replication]
(function() {
- // SERVER-28285 When renameCollection drops the target collection, it should just generate
- // a single oplog entry, so we cannot end up in a state where the drop has succeeded, but
- // the rename didn't.
- let rs = new ReplSetTest({nodes: 1});
- rs.startSet();
- rs.initiate();
+// SERVER-28285 When renameCollection drops the target collection, it should just generate
+// a single oplog entry, so we cannot end up in a state where the drop has succeeded, but
+// the rename didn't.
+let rs = new ReplSetTest({nodes: 1});
+rs.startSet();
+rs.initiate();
- let prim = rs.getPrimary();
- let first = prim.getDB("first");
- let second = prim.getDB("second");
- let local = prim.getDB("local");
+let prim = rs.getPrimary();
+let first = prim.getDB("first");
+let second = prim.getDB("second");
+let local = prim.getDB("local");
- // Test both for rename within a database as across databases.
- const tests = [
- {
- source: first.x,
- target: first.y,
- expectedOplogEntries: 1,
- },
- {
- source: first.x,
- target: second.x,
- expectedOplogEntries: 4,
- }
- ];
- tests.forEach((test) => {
- test.source.drop();
- assert.writeOK(test.source.insert({}));
- assert.writeOK(test.target.insert({}));
+// Test both for rename within a database as across databases.
+const tests = [
+ {
+ source: first.x,
+ target: first.y,
+ expectedOplogEntries: 1,
+ },
+ {
+ source: first.x,
+ target: second.x,
+ expectedOplogEntries: 4,
+ }
+];
+tests.forEach((test) => {
+ test.source.drop();
+ assert.writeOK(test.source.insert({}));
+ assert.writeOK(test.target.insert({}));
- let ts = local.oplog.rs.find().sort({$natural: -1}).limit(1).next().ts;
- let cmd = {
- renameCollection: test.source.toString(),
- to: test.target.toString(),
- dropTarget: true
- };
- assert.commandWorked(local.adminCommand(cmd), tojson(cmd));
- ops = local.oplog.rs.find({ts: {$gt: ts}}).sort({$natural: 1}).toArray();
- assert.eq(ops.length,
- test.expectedOplogEntries,
- "renameCollection was supposed to only generate " + test.expectedOplogEntries +
- " oplog entries: " + tojson(ops));
- });
- rs.stopSet();
+ let ts = local.oplog.rs.find().sort({$natural: -1}).limit(1).next().ts;
+ let cmd = {
+ renameCollection: test.source.toString(),
+ to: test.target.toString(),
+ dropTarget: true
+ };
+ assert.commandWorked(local.adminCommand(cmd), tojson(cmd));
+ ops = local.oplog.rs.find({ts: {$gt: ts}}).sort({$natural: 1}).toArray();
+ assert.eq(ops.length,
+ test.expectedOplogEntries,
+ "renameCollection was supposed to only generate " + test.expectedOplogEntries +
+ " oplog entries: " + tojson(ops));
+});
+rs.stopSet();
})();
diff --git a/jstests/noPassthrough/auth_reject_mismatching_logical_times.js b/jstests/noPassthrough/auth_reject_mismatching_logical_times.js
index c67482ae5b9..ca4e3da965c 100644
--- a/jstests/noPassthrough/auth_reject_mismatching_logical_times.js
+++ b/jstests/noPassthrough/auth_reject_mismatching_logical_times.js
@@ -4,73 +4,72 @@
* @tags: [requires_replication, requires_sharding]
*/
(function() {
- "use strict";
+"use strict";
- // Given a valid cluster time object, returns one with the same signature, but a mismatching
- // cluster time.
- function mismatchingLogicalTime(lt) {
- return Object.merge(lt, {clusterTime: Timestamp(lt.clusterTime.getTime() + 100, 0)});
- }
+// Given a valid cluster time object, returns one with the same signature, but a mismatching
+// cluster time.
+function mismatchingLogicalTime(lt) {
+ return Object.merge(lt, {clusterTime: Timestamp(lt.clusterTime.getTime() + 100, 0)});
+}
- function assertRejectsMismatchingLogicalTime(db) {
- let validTime = db.runCommand({isMaster: 1}).$clusterTime;
- let mismatchingTime = mismatchingLogicalTime(validTime);
+function assertRejectsMismatchingLogicalTime(db) {
+ let validTime = db.runCommand({isMaster: 1}).$clusterTime;
+ let mismatchingTime = mismatchingLogicalTime(validTime);
- assert.commandFailedWithCode(
- db.runCommand({isMaster: 1, $clusterTime: mismatchingTime}),
- ErrorCodes.TimeProofMismatch,
- "expected command with mismatching cluster time and signature to be rejected");
- }
+ assert.commandFailedWithCode(
+ db.runCommand({isMaster: 1, $clusterTime: mismatchingTime}),
+ ErrorCodes.TimeProofMismatch,
+ "expected command with mismatching cluster time and signature to be rejected");
+}
- function assertAcceptsValidLogicalTime(db) {
- let validTime = db.runCommand({isMaster: 1}).$clusterTime;
- assert.commandWorked(
- testDB.runCommand({isMaster: 1, $clusterTime: validTime}),
- "expected command with valid cluster time and signature to be accepted");
- }
+function assertAcceptsValidLogicalTime(db) {
+ let validTime = db.runCommand({isMaster: 1}).$clusterTime;
+ assert.commandWorked(testDB.runCommand({isMaster: 1, $clusterTime: validTime}),
+ "expected command with valid cluster time and signature to be accepted");
+}
- // Start the sharding test with auth on.
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const st = new ShardingTest({
- mongos: 1,
- manualAddShard: true,
- other: {keyFile: "jstests/libs/key1", shardAsReplicaSet: false}
- });
+// Start the sharding test with auth on.
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+const st = new ShardingTest({
+ mongos: 1,
+ manualAddShard: true,
+ other: {keyFile: "jstests/libs/key1", shardAsReplicaSet: false}
+});
- // Create admin user and authenticate as them.
- st.s.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
- st.s.getDB("admin").auth("foo", "bar");
+// Create admin user and authenticate as them.
+st.s.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
+st.s.getDB("admin").auth("foo", "bar");
- // Add shard with auth enabled.
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet({keyFile: "jstests/libs/key1", shardsvr: ""});
+// Add shard with auth enabled.
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet({keyFile: "jstests/libs/key1", shardsvr: ""});
- // TODO: Wait for stable recovery timestamp when SERVER-32672 is fixed.
- rst.initiateWithAnyNodeAsPrimary(
- null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
- assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
+// TODO: Wait for stable recovery timestamp when SERVER-32672 is fixed.
+rst.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
- const testDB = st.s.getDB("test");
+const testDB = st.s.getDB("test");
- // Unsharded collections reject mismatching cluster times and accept valid ones.
- assertRejectsMismatchingLogicalTime(testDB);
- assertAcceptsValidLogicalTime(testDB);
+// Unsharded collections reject mismatching cluster times and accept valid ones.
+assertRejectsMismatchingLogicalTime(testDB);
+assertAcceptsValidLogicalTime(testDB);
- // Initialize sharding.
- assert.commandWorked(testDB.adminCommand({enableSharding: "test"}));
- assert.commandWorked(
- testDB.adminCommand({shardCollection: testDB.foo.getFullName(), key: {_id: 1}}));
+// Initialize sharding.
+assert.commandWorked(testDB.adminCommand({enableSharding: "test"}));
+assert.commandWorked(
+ testDB.adminCommand({shardCollection: testDB.foo.getFullName(), key: {_id: 1}}));
- // Sharded collections reject mismatching cluster times and accept valid ones.
- assertRejectsMismatchingLogicalTime(testDB);
- assertAcceptsValidLogicalTime(testDB);
+// Sharded collections reject mismatching cluster times and accept valid ones.
+assertRejectsMismatchingLogicalTime(testDB);
+assertAcceptsValidLogicalTime(testDB);
- // Shards and config servers also reject mismatching times and accept valid ones.
- assertRejectsMismatchingLogicalTime(rst.getPrimary().getDB("test"));
- assertAcceptsValidLogicalTime(rst.getPrimary().getDB("test"));
- assertRejectsMismatchingLogicalTime(st.configRS.getPrimary().getDB("admin"));
- assertAcceptsValidLogicalTime(st.configRS.getPrimary().getDB("admin"));
+// Shards and config servers also reject mismatching times and accept valid ones.
+assertRejectsMismatchingLogicalTime(rst.getPrimary().getDB("test"));
+assertAcceptsValidLogicalTime(rst.getPrimary().getDB("test"));
+assertRejectsMismatchingLogicalTime(st.configRS.getPrimary().getDB("admin"));
+assertAcceptsValidLogicalTime(st.configRS.getPrimary().getDB("admin"));
- st.stop();
- rst.stopSet();
+st.stop();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/auto_retry_on_network_error.js b/jstests/noPassthrough/auto_retry_on_network_error.js
index 03e486a5a05..1c5f8465ebb 100644
--- a/jstests/noPassthrough/auto_retry_on_network_error.js
+++ b/jstests/noPassthrough/auto_retry_on_network_error.js
@@ -4,110 +4,111 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
-
- load("jstests/libs/retryable_writes_util.js");
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- TestData.networkErrorAndTxnOverrideConfig = {retryOnNetworkErrors: true};
- load('jstests/libs/override_methods/network_error_and_txn_override.js');
- load("jstests/replsets/rslib.js");
-
- function getThreadName(db) {
- let myUri = db.adminCommand({whatsmyuri: 1}).you;
- return db.getSiblingDB("admin")
- .aggregate([{$currentOp: {localOps: true}}, {$match: {client: myUri}}])
- .toArray()[0]
- .desc;
- }
-
- function failNextCommand(db, command) {
- let threadName = getThreadName(db);
-
- assert.commandWorked(db.adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 1},
- data: {
- closeConnection: true,
- failCommands: [command],
- threadName: threadName,
- }
- }));
- }
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
-
- // awaitLastStableRecoveryTimestamp runs an 'appendOplogNote' command which is not retryable.
- rst.initiateWithAnyNodeAsPrimary(
- null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
-
- const dbName = "test";
- const collName = "auto_retry";
-
- // The override requires the connection to be run under a session. Use the replica set URL to
- // allow automatic re-targeting of the primary on NotMaster errors.
- const db = new Mongo(rst.getURL()).startSession({retryWrites: true}).getDatabase(dbName);
-
- // Commands with no disconnections should work as normal.
- assert.commandWorked(db.runCommand({ping: 1}));
- assert.commandWorked(db.runCommandWithMetadata({ping: 1}, {}).commandReply);
-
- // Read commands are automatically retried on network errors.
- failNextCommand(db, "find");
- assert.commandWorked(db.runCommand({find: collName}));
-
- failNextCommand(db, "find");
- assert.commandWorked(db.runCommandWithMetadata({find: collName}, {}).commandReply);
-
- // Retryable write commands that can be retried succeed.
- failNextCommand(db, "insert");
- assert.writeOK(db[collName].insert({x: 1}));
-
- failNextCommand(db, "insert");
- assert.commandWorked(db.runCommandWithMetadata({
- insert: collName,
- documents: [{x: 2}, {x: 3}],
- txnNumber: NumberLong(10),
- lsid: {id: UUID()}
- },
- {})
- .commandReply);
-
- // Retryable write commands that cannot be retried (i.e. no transaction number, no session id,
- // or are unordered) throw.
- failNextCommand(db, "insert");
- assert.throws(function() {
- db.runCommand({insert: collName, documents: [{x: 1}, {x: 2}], ordered: false});
- });
-
- // The previous command shouldn't have been retried, so run a command to successfully re-target
- // the primary, so the connection to it can be closed.
- assert.commandWorked(db.runCommandWithMetadata({ping: 1}, {}).commandReply);
-
- failNextCommand(db, "insert");
- assert.throws(function() {
- db.runCommandWithMetadata({insert: collName, documents: [{x: 1}, {x: 2}], ordered: false},
- {});
- });
-
- // getMore commands can't be retried because we won't know whether the cursor was advanced or
- // not.
- let cursorId = assert.commandWorked(db.runCommand({find: collName, batchSize: 0})).cursor.id;
- failNextCommand(db, "getMore");
- assert.throws(function() {
- db.runCommand({getMore: cursorId, collection: collName});
- });
-
- cursorId = assert.commandWorked(db.runCommand({find: collName, batchSize: 0})).cursor.id;
- failNextCommand(db, "getMore");
- assert.throws(function() {
- db.runCommandWithMetadata({getMore: cursorId, collection: collName}, {});
- });
-
- rst.stopSet();
+"use strict";
+
+load("jstests/libs/retryable_writes_util.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+TestData.networkErrorAndTxnOverrideConfig = {
+ retryOnNetworkErrors: true
+};
+load('jstests/libs/override_methods/network_error_and_txn_override.js');
+load("jstests/replsets/rslib.js");
+
+function getThreadName(db) {
+ let myUri = db.adminCommand({whatsmyuri: 1}).you;
+ return db.getSiblingDB("admin")
+ .aggregate([{$currentOp: {localOps: true}}, {$match: {client: myUri}}])
+ .toArray()[0]
+ .desc;
+}
+
+function failNextCommand(db, command) {
+ let threadName = getThreadName(db);
+
+ assert.commandWorked(db.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 1},
+ data: {
+ closeConnection: true,
+ failCommands: [command],
+ threadName: threadName,
+ }
+ }));
+}
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+
+// awaitLastStableRecoveryTimestamp runs an 'appendOplogNote' command which is not retryable.
+rst.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+
+const dbName = "test";
+const collName = "auto_retry";
+
+// The override requires the connection to be run under a session. Use the replica set URL to
+// allow automatic re-targeting of the primary on NotMaster errors.
+const db = new Mongo(rst.getURL()).startSession({retryWrites: true}).getDatabase(dbName);
+
+// Commands with no disconnections should work as normal.
+assert.commandWorked(db.runCommand({ping: 1}));
+assert.commandWorked(db.runCommandWithMetadata({ping: 1}, {}).commandReply);
+
+// Read commands are automatically retried on network errors.
+failNextCommand(db, "find");
+assert.commandWorked(db.runCommand({find: collName}));
+
+failNextCommand(db, "find");
+assert.commandWorked(db.runCommandWithMetadata({find: collName}, {}).commandReply);
+
+// Retryable write commands that can be retried succeed.
+failNextCommand(db, "insert");
+assert.writeOK(db[collName].insert({x: 1}));
+
+failNextCommand(db, "insert");
+assert.commandWorked(db.runCommandWithMetadata({
+ insert: collName,
+ documents: [{x: 2}, {x: 3}],
+ txnNumber: NumberLong(10),
+ lsid: {id: UUID()}
+ },
+ {})
+ .commandReply);
+
+// Retryable write commands that cannot be retried (i.e. no transaction number, no session id,
+// or are unordered) throw.
+failNextCommand(db, "insert");
+assert.throws(function() {
+ db.runCommand({insert: collName, documents: [{x: 1}, {x: 2}], ordered: false});
+});
+
+// The previous command shouldn't have been retried, so run a command to successfully re-target
+// the primary, so the connection to it can be closed.
+assert.commandWorked(db.runCommandWithMetadata({ping: 1}, {}).commandReply);
+
+failNextCommand(db, "insert");
+assert.throws(function() {
+ db.runCommandWithMetadata({insert: collName, documents: [{x: 1}, {x: 2}], ordered: false}, {});
+});
+
+// getMore commands can't be retried because we won't know whether the cursor was advanced or
+// not.
+let cursorId = assert.commandWorked(db.runCommand({find: collName, batchSize: 0})).cursor.id;
+failNextCommand(db, "getMore");
+assert.throws(function() {
+ db.runCommand({getMore: cursorId, collection: collName});
+});
+
+cursorId = assert.commandWorked(db.runCommand({find: collName, batchSize: 0})).cursor.id;
+failNextCommand(db, "getMore");
+assert.throws(function() {
+ db.runCommandWithMetadata({getMore: cursorId, collection: collName}, {});
+});
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/backup_restore_fsync_lock.js b/jstests/noPassthrough/backup_restore_fsync_lock.js
index 86f45dace3f..a6728a2af10 100644
--- a/jstests/noPassthrough/backup_restore_fsync_lock.js
+++ b/jstests/noPassthrough/backup_restore_fsync_lock.js
@@ -17,9 +17,9 @@
load("jstests/noPassthrough/libs/backup_restore.js");
(function() {
- "use strict";
+"use strict";
- // Run the fsyncLock test. Will return before testing for any engine that doesn't
- // support fsyncLock
- new BackupRestoreTest({backup: 'fsyncLock'}).run();
+// Run the fsyncLock test. Will return before testing for any engine that doesn't
+// support fsyncLock
+new BackupRestoreTest({backup: 'fsyncLock'}).run();
}());
diff --git a/jstests/noPassthrough/backup_restore_rolling.js b/jstests/noPassthrough/backup_restore_rolling.js
index ddc995e4f5a..8196409c7b5 100644
--- a/jstests/noPassthrough/backup_restore_rolling.js
+++ b/jstests/noPassthrough/backup_restore_rolling.js
@@ -17,28 +17,28 @@
load("jstests/noPassthrough/libs/backup_restore.js");
(function() {
- "use strict";
+"use strict";
- // Grab the storage engine, default is wiredTiger
- var storageEngine = jsTest.options().storageEngine || "wiredTiger";
+// Grab the storage engine, default is wiredTiger
+var storageEngine = jsTest.options().storageEngine || "wiredTiger";
- // Skip this test if not running with the "wiredTiger" storage engine.
- if (storageEngine !== 'wiredTiger') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
- return;
- }
+// Skip this test if not running with the "wiredTiger" storage engine.
+if (storageEngine !== 'wiredTiger') {
+ jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
+ return;
+}
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
- // if rsync is not available on the host, then this test is skipped
- if (!runProgram('bash', '-c', 'which rsync')) {
- new BackupRestoreTest({backup: 'rolling', clientTime: 30000}).run();
- } else {
- jsTestLog("Skipping test for " + storageEngine + ' rolling');
- }
+// if rsync is not available on the host, then this test is skipped
+if (!runProgram('bash', '-c', 'which rsync')) {
+ new BackupRestoreTest({backup: 'rolling', clientTime: 30000}).run();
+} else {
+ jsTestLog("Skipping test for " + storageEngine + ' rolling');
+}
}());
diff --git a/jstests/noPassthrough/backup_restore_stop_start.js b/jstests/noPassthrough/backup_restore_stop_start.js
index a96c7c535bd..3aea0d4cb4f 100644
--- a/jstests/noPassthrough/backup_restore_stop_start.js
+++ b/jstests/noPassthrough/backup_restore_stop_start.js
@@ -17,7 +17,7 @@
load("jstests/noPassthrough/libs/backup_restore.js");
(function() {
- "use strict";
+"use strict";
- new BackupRestoreTest({backup: 'stopStart', clientTime: 30000}).run();
+new BackupRestoreTest({backup: 'stopStart', clientTime: 30000}).run();
}());
diff --git a/jstests/noPassthrough/bind_all_ipv6.js b/jstests/noPassthrough/bind_all_ipv6.js
index 9e47ccd5796..9663f964118 100644
--- a/jstests/noPassthrough/bind_all_ipv6.js
+++ b/jstests/noPassthrough/bind_all_ipv6.js
@@ -1,10 +1,10 @@
// Startup with --bind_ip_all and --ipv6 should not fail with address already in use.
(function() {
- 'use strict';
+'use strict';
- const mongo = MongoRunner.runMongod({ipv6: "", bind_ip_all: ""});
- assert(mongo !== null, "Database is not running");
- assert.commandWorked(mongo.getDB("test").isMaster(), "isMaster failed");
- MongoRunner.stopMongod(mongo);
+const mongo = MongoRunner.runMongod({ipv6: "", bind_ip_all: ""});
+assert(mongo !== null, "Database is not running");
+assert.commandWorked(mongo.getDB("test").isMaster(), "isMaster failed");
+MongoRunner.stopMongod(mongo);
}());
diff --git a/jstests/noPassthrough/bind_ip_all.js b/jstests/noPassthrough/bind_ip_all.js
index e840cb2e404..216b41b2ca8 100644
--- a/jstests/noPassthrough/bind_ip_all.js
+++ b/jstests/noPassthrough/bind_ip_all.js
@@ -1,23 +1,23 @@
// Startup with --bind_ip_all should override net.bindIp and vice versa.
(function() {
- 'use strict';
+'use strict';
- const port = allocatePort();
- const BINDIP = 'jstests/noPassthrough/libs/net.bindIp_localhost.yaml';
- const BINDIPALL = 'jstests/noPassthrough/libs/net.bindIpAll.yaml';
+const port = allocatePort();
+const BINDIP = 'jstests/noPassthrough/libs/net.bindIp_localhost.yaml';
+const BINDIPALL = 'jstests/noPassthrough/libs/net.bindIpAll.yaml';
- function runTest(config, opt, expectStar, expectLocalhost) {
- clearRawMongoProgramOutput();
- const mongod =
- runMongoProgram('./mongod', '--port', port, '--config', config, opt, '--outputConfig');
- assert.eq(mongod, 0);
- const output = rawMongoProgramOutput();
- assert.eq(output.search(/bindIp: "\*"/) >= 0, expectStar, output);
- assert.eq(output.search(/bindIp: localhost/) >= 0, expectLocalhost, output);
- assert.eq(output.search(/bindIpAll:/) >= 0, false, output);
- }
+function runTest(config, opt, expectStar, expectLocalhost) {
+ clearRawMongoProgramOutput();
+ const mongod =
+ runMongoProgram('./mongod', '--port', port, '--config', config, opt, '--outputConfig');
+ assert.eq(mongod, 0);
+ const output = rawMongoProgramOutput();
+ assert.eq(output.search(/bindIp: "\*"/) >= 0, expectStar, output);
+ assert.eq(output.search(/bindIp: localhost/) >= 0, expectLocalhost, output);
+ assert.eq(output.search(/bindIpAll:/) >= 0, false, output);
+}
- runTest(BINDIP, '--bind_ip_all', true, false);
- runTest(BINDIPALL, '--bind_ip=localhost', false, true);
+runTest(BINDIP, '--bind_ip_all', true, false);
+runTest(BINDIPALL, '--bind_ip=localhost', false, true);
}());
diff --git a/jstests/noPassthrough/bind_localhost.js b/jstests/noPassthrough/bind_localhost.js
index 959c4b70541..242b559831d 100644
--- a/jstests/noPassthrough/bind_localhost.js
+++ b/jstests/noPassthrough/bind_localhost.js
@@ -1,15 +1,15 @@
// Log bound addresses at startup.
(function() {
- 'use strict';
+'use strict';
- const mongo = MongoRunner.runMongod({ipv6: '', bind_ip: 'localhost', useLogFiles: true});
- assert.neq(mongo, null, "Database is not running");
- const log = cat(mongo.fullOptions.logFile);
- print(log);
- assert(log.includes('Listening on 127.0.0.1'), "Not listening on AF_INET");
- if (!_isWindows()) {
- assert(log.match(/Listening on .*\.sock/), "Not listening on AF_UNIX");
- }
- MongoRunner.stopMongod(mongo);
+const mongo = MongoRunner.runMongod({ipv6: '', bind_ip: 'localhost', useLogFiles: true});
+assert.neq(mongo, null, "Database is not running");
+const log = cat(mongo.fullOptions.logFile);
+print(log);
+assert(log.includes('Listening on 127.0.0.1'), "Not listening on AF_INET");
+if (!_isWindows()) {
+ assert(log.match(/Listening on .*\.sock/), "Not listening on AF_UNIX");
+}
+MongoRunner.stopMongod(mongo);
}());
diff --git a/jstests/noPassthrough/block_compressor_options.js b/jstests/noPassthrough/block_compressor_options.js
index ebc21f41ceb..129a2a567df 100644
--- a/jstests/noPassthrough/block_compressor_options.js
+++ b/jstests/noPassthrough/block_compressor_options.js
@@ -15,38 +15,38 @@
* @tags: [requires_persistence,requires_wiredtiger]
*/
(function() {
- 'use strict';
+'use strict';
- // On the first iteration, start a mongod. Subsequent iterations will close and restart on the
- // same dbpath.
- let firstIteration = true;
- let compressors = ['none', 'snappy', 'zlib', 'zstd'];
- let mongo;
- for (let compressor of compressors) {
- jsTestLog({"Starting with compressor": compressor});
- if (firstIteration) {
- mongo = MongoRunner.runMongod({
- wiredTigerCollectionBlockCompressor: compressor,
- wiredTigerJournalCompressor: compressor
- });
- firstIteration = false;
- } else {
- MongoRunner.stopMongod(mongo);
- mongo = MongoRunner.runMongod({
- restart: true,
- dbpath: mongo.dbpath,
- cleanData: false,
- wiredTigerCollectionBlockCompressor: compressor
- });
- }
- mongo.getDB('db')[compressor].insert({});
+// On the first iteration, start a mongod. Subsequent iterations will close and restart on the
+// same dbpath.
+let firstIteration = true;
+let compressors = ['none', 'snappy', 'zlib', 'zstd'];
+let mongo;
+for (let compressor of compressors) {
+ jsTestLog({"Starting with compressor": compressor});
+ if (firstIteration) {
+ mongo = MongoRunner.runMongod({
+ wiredTigerCollectionBlockCompressor: compressor,
+ wiredTigerJournalCompressor: compressor
+ });
+ firstIteration = false;
+ } else {
+ MongoRunner.stopMongod(mongo);
+ mongo = MongoRunner.runMongod({
+ restart: true,
+ dbpath: mongo.dbpath,
+ cleanData: false,
+ wiredTigerCollectionBlockCompressor: compressor
+ });
}
+ mongo.getDB('db')[compressor].insert({});
+}
- for (let compressor of compressors) {
- jsTestLog({"Asserting collection compressor": compressor});
- let stats = mongo.getDB('db')[compressor].stats();
- assert(stats['wiredTiger']['creationString'].search('block_compressor=' + compressor) > -1);
- }
+for (let compressor of compressors) {
+ jsTestLog({"Asserting collection compressor": compressor});
+ let stats = mongo.getDB('db')[compressor].stats();
+ assert(stats['wiredTiger']['creationString'].search('block_compressor=' + compressor) > -1);
+}
- MongoRunner.stopMongod(mongo);
+MongoRunner.stopMongod(mongo);
}());
diff --git a/jstests/noPassthrough/change_stream_concurrent_implicit_db_create.js b/jstests/noPassthrough/change_stream_concurrent_implicit_db_create.js
index a9fd668f304..ff211b2424f 100644
--- a/jstests/noPassthrough/change_stream_concurrent_implicit_db_create.js
+++ b/jstests/noPassthrough/change_stream_concurrent_implicit_db_create.js
@@ -4,50 +4,48 @@
// This test uses the WiredTiger storage engine, which does not support running without journaling.
// @tags: [requires_replication,requires_journaling]
(function() {
- "use strict";
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+"use strict";
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- const rst = new ReplSetTest({nodes: 1});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
- rst.initiate();
- const db = rst.getPrimary().getDB("test");
+const rst = new ReplSetTest({nodes: 1});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+rst.initiate();
+const db = rst.getPrimary().getDB("test");
- let unique_dbName = jsTestName();
- const sleepShell = startParallelShell(() => {
- assert.commandFailedWithCode(db.adminCommand({sleep: 1, lock: "w", seconds: 600}),
- ErrorCodes.Interrupted);
- }, rst.getPrimary().port);
- assert.soon(
- () =>
- db.getSiblingDB("admin").currentOp({"command.sleep": 1, active: true}).inprog.length ===
- 1);
- const sleepOps = db.getSiblingDB("admin").currentOp({"command.sleep": 1, active: true}).inprog;
- assert.eq(sleepOps.length, 1);
- const sleepOpId = sleepOps[0].opid;
+let unique_dbName = jsTestName();
+const sleepShell = startParallelShell(() => {
+ assert.commandFailedWithCode(db.adminCommand({sleep: 1, lock: "w", seconds: 600}),
+ ErrorCodes.Interrupted);
+}, rst.getPrimary().port);
+assert.soon(
+ () =>
+ db.getSiblingDB("admin").currentOp({"command.sleep": 1, active: true}).inprog.length === 1);
+const sleepOps = db.getSiblingDB("admin").currentOp({"command.sleep": 1, active: true}).inprog;
+assert.eq(sleepOps.length, 1);
+const sleepOpId = sleepOps[0].opid;
- // Start two concurrent shells which will both attempt to create the database which does not yet
- // exist.
- const openChangeStreamCode = `const cursor = db.getSiblingDB("${unique_dbName}").test.watch();`;
- const changeStreamShell1 = startParallelShell(openChangeStreamCode, rst.getPrimary().port);
- const changeStreamShell2 = startParallelShell(openChangeStreamCode, rst.getPrimary().port);
+// Start two concurrent shells which will both attempt to create the database which does not yet
+// exist.
+const openChangeStreamCode = `const cursor = db.getSiblingDB("${unique_dbName}").test.watch();`;
+const changeStreamShell1 = startParallelShell(openChangeStreamCode, rst.getPrimary().port);
+const changeStreamShell2 = startParallelShell(openChangeStreamCode, rst.getPrimary().port);
- // Wait until we can see both change streams have started and are waiting to acquire the lock
- // held by the sleep command.
- assert.soon(
- () =>
- db.currentOp({"command.aggregate": "test", waitingForLock: true}).inprog.length === 2);
- assert.commandWorked(db.adminCommand({killOp: 1, op: sleepOpId}));
+// Wait until we can see both change streams have started and are waiting to acquire the lock
+// held by the sleep command.
+assert.soon(
+ () => db.currentOp({"command.aggregate": "test", waitingForLock: true}).inprog.length === 2);
+assert.commandWorked(db.adminCommand({killOp: 1, op: sleepOpId}));
- sleepShell();
+sleepShell();
- // Before the fix for SERVER-34333, the operations in these shells would be deadlocked with each
- // other and never complete.
- changeStreamShell1();
- changeStreamShell2();
+// Before the fix for SERVER-34333, the operations in these shells would be deadlocked with each
+// other and never complete.
+changeStreamShell1();
+changeStreamShell2();
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/change_stream_failover.js b/jstests/noPassthrough/change_stream_failover.js
index 8168c7722de..b8ec132fdd8 100644
--- a/jstests/noPassthrough/change_stream_failover.js
+++ b/jstests/noPassthrough/change_stream_failover.js
@@ -3,90 +3,89 @@
// This test uses the WiredTiger storage engine, which does not support running without journaling.
// @tags: [requires_replication,requires_journaling]
(function() {
- "use strict";
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
-
- const rst = new ReplSetTest({nodes: 3});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
-
- rst.initiate();
-
- for (let key of Object.keys(ChangeStreamWatchMode)) {
- const watchMode = ChangeStreamWatchMode[key];
- jsTestLog("Running test for mode " + watchMode);
-
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB("test");
- const coll = assertDropAndRecreateCollection(primaryDB, "change_stream_failover");
-
- // Be sure we'll only read from the primary.
- primary.setReadPref("primary");
-
- // Open a changeStream on the primary.
- const cst =
- new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, primaryDB));
-
- let changeStream = cst.getChangeStream({watchMode: watchMode, coll: coll});
-
- // Be sure we can read from the change stream. Use {w: "majority"} so that we're still
- // guaranteed to be able to read after the failover.
- assert.writeOK(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
- assert.writeOK(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(coll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
-
- const firstChange = cst.getOneChange(changeStream);
- assert.docEq(firstChange.fullDocument, {_id: 0});
-
- // Make the primary step down
- assert.commandWorked(primaryDB.adminCommand({replSetStepDown: 30}));
-
- // Now wait for another primary to be elected.
- const newPrimary = rst.getPrimary();
- // Be sure we got a different node that the previous primary.
- assert.neq(newPrimary.port, primary.port);
-
- cst.assertNextChangesEqual({
- cursor: changeStream,
- expectedChanges: [{
+"use strict";
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+
+const rst = new ReplSetTest({nodes: 3});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+
+rst.initiate();
+
+for (let key of Object.keys(ChangeStreamWatchMode)) {
+ const watchMode = ChangeStreamWatchMode[key];
+ jsTestLog("Running test for mode " + watchMode);
+
+ const primary = rst.getPrimary();
+ const primaryDB = primary.getDB("test");
+ const coll = assertDropAndRecreateCollection(primaryDB, "change_stream_failover");
+
+ // Be sure we'll only read from the primary.
+ primary.setReadPref("primary");
+
+ // Open a changeStream on the primary.
+ const cst = new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, primaryDB));
+
+ let changeStream = cst.getChangeStream({watchMode: watchMode, coll: coll});
+
+ // Be sure we can read from the change stream. Use {w: "majority"} so that we're still
+ // guaranteed to be able to read after the failover.
+ assert.writeOK(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+ assert.writeOK(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+ assert.writeOK(coll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+
+ const firstChange = cst.getOneChange(changeStream);
+ assert.docEq(firstChange.fullDocument, {_id: 0});
+
+ // Make the primary step down
+ assert.commandWorked(primaryDB.adminCommand({replSetStepDown: 30}));
+
+ // Now wait for another primary to be elected.
+ const newPrimary = rst.getPrimary();
+ // Be sure we got a different node that the previous primary.
+ assert.neq(newPrimary.port, primary.port);
+
+ cst.assertNextChangesEqual({
+ cursor: changeStream,
+ expectedChanges: [{
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1},
+ ns: {db: primaryDB.getName(), coll: coll.getName()},
+ operationType: "insert",
+ }]
+ });
+
+ // Now resume using the resume token from the first change (before the failover).
+ const resumeCursor =
+ cst.getChangeStream({watchMode: watchMode, coll: coll, resumeAfter: firstChange._id});
+
+ // Be sure we can read the 2nd and 3rd changes.
+ cst.assertNextChangesEqual({
+ cursor: resumeCursor,
+ expectedChanges: [
+ {
documentKey: {_id: 1},
fullDocument: {_id: 1},
ns: {db: primaryDB.getName(), coll: coll.getName()},
operationType: "insert",
- }]
- });
-
- // Now resume using the resume token from the first change (before the failover).
- const resumeCursor =
- cst.getChangeStream({watchMode: watchMode, coll: coll, resumeAfter: firstChange._id});
-
- // Be sure we can read the 2nd and 3rd changes.
- cst.assertNextChangesEqual({
- cursor: resumeCursor,
- expectedChanges: [
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1},
- ns: {db: primaryDB.getName(), coll: coll.getName()},
- operationType: "insert",
- },
- {
- documentKey: {_id: 2},
- fullDocument: {_id: 2},
- ns: {db: primaryDB.getName(), coll: coll.getName()},
- operationType: "insert",
- }
- ]
- });
-
- // Unfreeze the original primary so that it can stand for election again.
- assert.commandWorked(primaryDB.adminCommand({replSetFreeze: 0}));
- }
+ },
+ {
+ documentKey: {_id: 2},
+ fullDocument: {_id: 2},
+ ns: {db: primaryDB.getName(), coll: coll.getName()},
+ operationType: "insert",
+ }
+ ]
+ });
- rst.stopSet();
+ // Unfreeze the original primary so that it can stand for election again.
+ assert.commandWorked(primaryDB.adminCommand({replSetFreeze: 0}));
+}
+
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/change_stream_resume_before_add_shard.js b/jstests/noPassthrough/change_stream_resume_before_add_shard.js
index d987b55bea5..c3b46e9b79c 100644
--- a/jstests/noPassthrough/change_stream_resume_before_add_shard.js
+++ b/jstests/noPassthrough/change_stream_resume_before_add_shard.js
@@ -4,113 +4,115 @@
* @tags: [uses_change_streams, requires_sharding]
*/
(function() {
- "use strict";
-
- const rsNodeOptions = {setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}};
- const st =
- new ShardingTest({shards: 1, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
-
- const mongosDB = st.s.getDB(jsTestName());
- const coll = mongosDB.test;
-
- // Helper function to confirm that a stream sees an expected sequence of documents. This
- // function also pushes all observed changes into the supplied 'eventList' array.
- function assertAllEventsObserved(changeStream, expectedDocs, eventList) {
- for (let expectedDoc of expectedDocs) {
- assert.soon(() => changeStream.hasNext());
- const nextEvent = changeStream.next();
- assert.eq(nextEvent.fullDocument, expectedDoc);
- if (eventList) {
- eventList.push(nextEvent);
- }
+"use strict";
+
+const rsNodeOptions = {
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
+};
+const st =
+ new ShardingTest({shards: 1, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
+
+const mongosDB = st.s.getDB(jsTestName());
+const coll = mongosDB.test;
+
+// Helper function to confirm that a stream sees an expected sequence of documents. This
+// function also pushes all observed changes into the supplied 'eventList' array.
+function assertAllEventsObserved(changeStream, expectedDocs, eventList) {
+ for (let expectedDoc of expectedDocs) {
+ assert.soon(() => changeStream.hasNext());
+ const nextEvent = changeStream.next();
+ assert.eq(nextEvent.fullDocument, expectedDoc);
+ if (eventList) {
+ eventList.push(nextEvent);
}
}
-
- // Helper function to add a new ReplSetTest shard into the cluster. Using single-node shards
- // ensures that the "initiating set" entry cannot be rolled back.
- function addShardToCluster(shardName) {
- const replTest = new ReplSetTest({name: shardName, nodes: 1, nodeOptions: rsNodeOptions});
- replTest.startSet({shardsvr: ""});
- replTest.initiate();
- assert.commandWorked(st.s.adminCommand({addShard: replTest.getURL(), name: shardName}));
-
- // Verify that the new shard's first oplog entry contains the string "initiating set". This
- // is used by change streams as a sentinel to indicate that no writes have occurred on the
- // replica set before this point.
- const firstOplogEntry = replTest.getPrimary().getCollection("local.oplog.rs").findOne();
- assert.docEq(firstOplogEntry.o, {msg: "initiating set"});
- assert.eq(firstOplogEntry.op, "n");
-
- return replTest;
- }
-
- // Helper function to resume from each event in a given list and confirm that the resumed stream
- // sees the subsequent events in the correct expected order.
- function assertCanResumeFromEachEvent(eventList) {
- for (let i = 0; i < eventList.length; ++i) {
- const resumedStream = coll.watch([], {resumeAfter: eventList[i]._id});
- for (let j = i + 1; j < eventList.length; ++j) {
- assert.soon(() => resumedStream.hasNext());
- assert.docEq(resumedStream.next(), eventList[j]);
- }
- resumedStream.close();
+}
+
+// Helper function to add a new ReplSetTest shard into the cluster. Using single-node shards
+// ensures that the "initiating set" entry cannot be rolled back.
+function addShardToCluster(shardName) {
+ const replTest = new ReplSetTest({name: shardName, nodes: 1, nodeOptions: rsNodeOptions});
+ replTest.startSet({shardsvr: ""});
+ replTest.initiate();
+ assert.commandWorked(st.s.adminCommand({addShard: replTest.getURL(), name: shardName}));
+
+ // Verify that the new shard's first oplog entry contains the string "initiating set". This
+ // is used by change streams as a sentinel to indicate that no writes have occurred on the
+ // replica set before this point.
+ const firstOplogEntry = replTest.getPrimary().getCollection("local.oplog.rs").findOne();
+ assert.docEq(firstOplogEntry.o, {msg: "initiating set"});
+ assert.eq(firstOplogEntry.op, "n");
+
+ return replTest;
+}
+
+// Helper function to resume from each event in a given list and confirm that the resumed stream
+// sees the subsequent events in the correct expected order.
+function assertCanResumeFromEachEvent(eventList) {
+ for (let i = 0; i < eventList.length; ++i) {
+ const resumedStream = coll.watch([], {resumeAfter: eventList[i]._id});
+ for (let j = i + 1; j < eventList.length; ++j) {
+ assert.soon(() => resumedStream.hasNext());
+ assert.docEq(resumedStream.next(), eventList[j]);
}
+ resumedStream.close();
}
-
- // Open a change stream on the unsharded test collection.
- const csCursor = coll.watch();
- assert(!csCursor.hasNext());
- const changeList = [];
-
- // Insert some docs into the unsharded collection, and obtain a change stream event for each.
- const insertedDocs = [{_id: 1}, {_id: 2}, {_id: 3}];
- assert.commandWorked(coll.insert(insertedDocs));
- assertAllEventsObserved(csCursor, insertedDocs, changeList);
-
- // Verify that, for a brand new shard, we can start at an operation time before the set existed.
- let startAtDawnOfTimeCursor = coll.watch([], {startAtOperationTime: Timestamp(1, 1)});
- assertAllEventsObserved(startAtDawnOfTimeCursor, insertedDocs);
- startAtDawnOfTimeCursor.close();
-
- // Add a new shard into the cluster. Wait three seconds so that its initiation time is
- // guaranteed to be later than any of the events in the existing shard's oplog.
- const newShard1 = sleep(3000) || addShardToCluster("newShard1");
-
- // .. and confirm that we can resume from any point before the shard was added.
- assertCanResumeFromEachEvent(changeList);
-
- // Now shard the collection on _id and move one chunk to the new shard.
- st.shardColl(coll, {_id: 1}, {_id: 3}, false);
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: coll.getFullName(), find: {_id: 3}, to: "newShard1", _waitForDelete: true}));
-
- // Insert some new documents into the new shard and verify that the original stream sees them.
- const newInsertedDocs = [{_id: 4}, {_id: 5}];
- assert.commandWorked(coll.insert(newInsertedDocs));
- assertAllEventsObserved(csCursor, newInsertedDocs, changeList);
-
- // Add a third shard into the cluster...
- const newShard2 = sleep(3000) || addShardToCluster("newShard2");
-
- // ... and verify that we can resume the stream from any of the preceding events.
- assertCanResumeFromEachEvent(changeList);
-
- // Now drop the collection, and verify that we can still resume from any point.
- assert(coll.drop());
- for (let expectedEvent of["drop", "invalidate"]) {
- assert.soon(() => csCursor.hasNext());
- assert.eq(csCursor.next().operationType, expectedEvent);
- }
- assertCanResumeFromEachEvent(changeList);
-
- // Verify that we can start at an operation time before the cluster existed and see all events.
- startAtDawnOfTimeCursor = coll.watch([], {startAtOperationTime: Timestamp(1, 1)});
- assertAllEventsObserved(startAtDawnOfTimeCursor, insertedDocs.concat(newInsertedDocs));
- startAtDawnOfTimeCursor.close();
-
- st.stop();
-
- // Stop the new shards manually since the ShardingTest doesn't know anything about them.
- newShard1.stopSet();
- newShard2.stopSet();
+}
+
+// Open a change stream on the unsharded test collection.
+const csCursor = coll.watch();
+assert(!csCursor.hasNext());
+const changeList = [];
+
+// Insert some docs into the unsharded collection, and obtain a change stream event for each.
+const insertedDocs = [{_id: 1}, {_id: 2}, {_id: 3}];
+assert.commandWorked(coll.insert(insertedDocs));
+assertAllEventsObserved(csCursor, insertedDocs, changeList);
+
+// Verify that, for a brand new shard, we can start at an operation time before the set existed.
+let startAtDawnOfTimeCursor = coll.watch([], {startAtOperationTime: Timestamp(1, 1)});
+assertAllEventsObserved(startAtDawnOfTimeCursor, insertedDocs);
+startAtDawnOfTimeCursor.close();
+
+// Add a new shard into the cluster. Wait three seconds so that its initiation time is
+// guaranteed to be later than any of the events in the existing shard's oplog.
+const newShard1 = sleep(3000) || addShardToCluster("newShard1");
+
+// .. and confirm that we can resume from any point before the shard was added.
+assertCanResumeFromEachEvent(changeList);
+
+// Now shard the collection on _id and move one chunk to the new shard.
+st.shardColl(coll, {_id: 1}, {_id: 3}, false);
+assert.commandWorked(st.s.adminCommand(
+ {moveChunk: coll.getFullName(), find: {_id: 3}, to: "newShard1", _waitForDelete: true}));
+
+// Insert some new documents into the new shard and verify that the original stream sees them.
+const newInsertedDocs = [{_id: 4}, {_id: 5}];
+assert.commandWorked(coll.insert(newInsertedDocs));
+assertAllEventsObserved(csCursor, newInsertedDocs, changeList);
+
+// Add a third shard into the cluster...
+const newShard2 = sleep(3000) || addShardToCluster("newShard2");
+
+// ... and verify that we can resume the stream from any of the preceding events.
+assertCanResumeFromEachEvent(changeList);
+
+// Now drop the collection, and verify that we can still resume from any point.
+assert(coll.drop());
+for (let expectedEvent of ["drop", "invalidate"]) {
+ assert.soon(() => csCursor.hasNext());
+ assert.eq(csCursor.next().operationType, expectedEvent);
+}
+assertCanResumeFromEachEvent(changeList);
+
+// Verify that we can start at an operation time before the cluster existed and see all events.
+startAtDawnOfTimeCursor = coll.watch([], {startAtOperationTime: Timestamp(1, 1)});
+assertAllEventsObserved(startAtDawnOfTimeCursor, insertedDocs.concat(newInsertedDocs));
+startAtDawnOfTimeCursor.close();
+
+st.stop();
+
+// Stop the new shards manually since the ShardingTest doesn't know anything about them.
+newShard1.stopSet();
+newShard2.stopSet();
})();
diff --git a/jstests/noPassthrough/change_stream_sharded_startafter_invalidate.js b/jstests/noPassthrough/change_stream_sharded_startafter_invalidate.js
index 6dca178f6a3..4cffce18e1a 100644
--- a/jstests/noPassthrough/change_stream_sharded_startafter_invalidate.js
+++ b/jstests/noPassthrough/change_stream_sharded_startafter_invalidate.js
@@ -4,41 +4,41 @@
// bug described in SERVER-41196.
// @tags: [requires_sharding, uses_change_streams]
(function() {
- "use strict";
+"use strict";
- // The edge case we are testing occurs on an unsharded collection in a sharded cluster. We
- // create a cluster with just one shard to ensure the test never blocks for another shard.
- const st = new ShardingTest(
- {shards: 1, mongos: 1, rs: {nodes: 1, setParameter: {writePeriodicNoops: false}}});
+// The edge case we are testing occurs on an unsharded collection in a sharded cluster. We
+// create a cluster with just one shard to ensure the test never blocks for another shard.
+const st = new ShardingTest(
+ {shards: 1, mongos: 1, rs: {nodes: 1, setParameter: {writePeriodicNoops: false}}});
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
- // Start a change stream that matches on the invalidate event.
- const changeStream = mongosColl.watch([{'$match': {'operationType': 'invalidate'}}]);
+// Start a change stream that matches on the invalidate event.
+const changeStream = mongosColl.watch([{'$match': {'operationType': 'invalidate'}}]);
- // Create the collection by inserting into it and then drop the collection, thereby generating
- // an invalidate event.
- assert.commandWorked(mongosColl.insert({_id: 1}));
- assert(mongosColl.drop());
- assert.soon(() => changeStream.hasNext());
- const invalidateEvent = changeStream.next();
+// Create the collection by inserting into it and then drop the collection, thereby generating
+// an invalidate event.
+assert.commandWorked(mongosColl.insert({_id: 1}));
+assert(mongosColl.drop());
+assert.soon(() => changeStream.hasNext());
+const invalidateEvent = changeStream.next();
- // Resuming the change stream using the invalidate event allows us to see events after the drop.
- const resumeStream = mongosColl.watch([], {startAfter: invalidateEvent["_id"]});
+// Resuming the change stream using the invalidate event allows us to see events after the drop.
+const resumeStream = mongosColl.watch([], {startAfter: invalidateEvent["_id"]});
- // The PBRT returned with the first (empty) batch should match the resume token we supplied.
- assert.eq(bsonWoCompare(resumeStream.getResumeToken(), invalidateEvent["_id"]), 0);
+// The PBRT returned with the first (empty) batch should match the resume token we supplied.
+assert.eq(bsonWoCompare(resumeStream.getResumeToken(), invalidateEvent["_id"]), 0);
- // Initially, there should be no events visible after the drop.
- assert(!resumeStream.hasNext());
+// Initially, there should be no events visible after the drop.
+assert(!resumeStream.hasNext());
- // Add one last event and make sure the change stream sees it.
- assert.commandWorked(mongosColl.insert({_id: 2}));
- assert.soon(() => resumeStream.hasNext());
- const afterDrop = resumeStream.next();
- assert.eq(afterDrop.operationType, "insert");
- assert.eq(afterDrop.fullDocument, {_id: 2});
+// Add one last event and make sure the change stream sees it.
+assert.commandWorked(mongosColl.insert({_id: 2}));
+assert.soon(() => resumeStream.hasNext());
+const afterDrop = resumeStream.next();
+assert.eq(afterDrop.operationType, "insert");
+assert.eq(afterDrop.fullDocument, {_id: 2});
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/change_stream_transaction.js b/jstests/noPassthrough/change_stream_transaction.js
index fb244c18366..8de51656cfa 100644
--- a/jstests/noPassthrough/change_stream_transaction.js
+++ b/jstests/noPassthrough/change_stream_transaction.js
@@ -8,277 +8,268 @@
* ]
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js"); // For PrepareHelpers.
-
- const dbName = "test";
- const collName = "change_stream_transaction";
-
- /**
- * This test sets an internal parameter in order to force transactions with more than 4
- * operations to span multiple oplog entries, making it easier to test that scenario.
- */
- const maxOpsInOplogEntry = 4;
-
- /**
- * Asserts that the expected operation type and documentKey are found on the change stream
- * cursor. Returns the change stream document.
- */
- function assertWriteVisible(cursor, operationType, documentKey) {
- assert.soon(() => cursor.hasNext());
- const changeDoc = cursor.next();
- assert.eq(operationType, changeDoc.operationType, changeDoc);
- assert.eq(documentKey, changeDoc.documentKey, changeDoc);
- return changeDoc;
- }
-
- /**
- * Asserts that the expected operation type and documentKey are found on the change stream
- * cursor. Pushes the corresponding resume token and change stream document to an array.
- */
- function assertWriteVisibleWithCapture(cursor, operationType, documentKey, changeList) {
- const changeDoc = assertWriteVisible(cursor, operationType, documentKey);
- changeList.push(changeDoc);
- }
-
- /**
- * Asserts that there are no changes waiting on the change stream cursor.
- */
- function assertNoChanges(cursor) {
- assert(!cursor.hasNext(), () => {
- return "Unexpected change set: " + tojson(cursor.toArray());
- });
- }
-
- function runTest(conn) {
- const db = conn.getDB(dbName);
- const coll = db.getCollection(collName);
- const unwatchedColl = db.getCollection(collName + "_unwatched");
- let changeList = [];
-
- // Collections must be created outside of any transaction.
- assert.commandWorked(db.createCollection(coll.getName()));
- assert.commandWorked(db.createCollection(unwatchedColl.getName()));
-
- //
- // Start transaction 1.
- //
- const session1 = db.getMongo().startSession();
- const sessionDb1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDb1[collName];
- session1.startTransaction({readConcern: {level: "majority"}});
-
- //
- // Start transaction 2.
- //
- const session2 = db.getMongo().startSession();
- const sessionDb2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDb2[collName];
- session2.startTransaction({readConcern: {level: "majority"}});
-
- //
- // Start transaction 3.
- //
- const session3 = db.getMongo().startSession();
- const sessionDb3 = session3.getDatabase(dbName);
- const sessionColl3 = sessionDb3[collName];
- session3.startTransaction({readConcern: {level: "majority"}});
-
- // Open a change stream on the test collection.
- const changeStreamCursor = coll.watch();
-
- // Insert a document and confirm that the change stream has it.
- assert.commandWorked(coll.insert({_id: "no-txn-doc-1"}, {writeConcern: {w: "majority"}}));
- assertWriteVisibleWithCapture(
- changeStreamCursor, "insert", {_id: "no-txn-doc-1"}, changeList);
-
- // Insert two documents under each transaction and confirm no change stream updates.
- assert.commandWorked(sessionColl1.insert([{_id: "txn1-doc-1"}, {_id: "txn1-doc-2"}]));
- assert.commandWorked(sessionColl2.insert([{_id: "txn2-doc-1"}, {_id: "txn2-doc-2"}]));
- assertNoChanges(changeStreamCursor);
-
- // Update one document under each transaction and confirm no change stream updates.
- assert.commandWorked(sessionColl1.update({_id: "txn1-doc-1"}, {$set: {"updated": 1}}));
- assert.commandWorked(sessionColl2.update({_id: "txn2-doc-1"}, {$set: {"updated": 1}}));
- assertNoChanges(changeStreamCursor);
+"use strict";
- // Update and then remove the second doc under each transaction and confirm no change stream
- // events are seen.
- assert.commandWorked(
- sessionColl1.update({_id: "txn1-doc-2"}, {$set: {"update-before-delete": 1}}));
- assert.commandWorked(
- sessionColl2.update({_id: "txn2-doc-2"}, {$set: {"update-before-delete": 1}}));
- assert.commandWorked(sessionColl1.remove({_id: "txn1-doc-2"}));
- assert.commandWorked(sessionColl2.remove({_id: "txn2-doc-2"}));
- assertNoChanges(changeStreamCursor);
+load("jstests/core/txns/libs/prepare_helpers.js"); // For PrepareHelpers.
- // Perform a write to the 'session1' transaction in a collection that is not being watched
- // by 'changeStreamCursor'. We do not expect to see this write in the change stream either
- // now or on commit.
- assert.commandWorked(
- sessionDb1[unwatchedColl.getName()].insert({_id: "txn1-doc-unwatched-collection"}));
- assertNoChanges(changeStreamCursor);
+const dbName = "test";
+const collName = "change_stream_transaction";
- // Perform a write to the 'session3' transaction in a collection that is not being watched
- // by 'changeStreamCursor'. We do not expect to see this write in the change stream either
- // now or on commit.
- assert.commandWorked(
- sessionDb3[unwatchedColl.getName()].insert({_id: "txn3-doc-unwatched-collection"}));
- assertNoChanges(changeStreamCursor);
-
- // Perform a write outside of a transaction and confirm that the change stream sees only
- // this write.
- assert.commandWorked(coll.insert({_id: "no-txn-doc-2"}, {writeConcern: {w: "majority"}}));
- assertWriteVisibleWithCapture(
- changeStreamCursor, "insert", {_id: "no-txn-doc-2"}, changeList);
- assertNoChanges(changeStreamCursor);
-
- let prepareTimestampTxn1;
- prepareTimestampTxn1 = PrepareHelpers.prepareTransaction(session1);
- assertNoChanges(changeStreamCursor);
-
- assert.commandWorked(coll.insert({_id: "no-txn-doc-3"}, {writeConcern: {w: "majority"}}));
- assertWriteVisibleWithCapture(
- changeStreamCursor, "insert", {_id: "no-txn-doc-3"}, changeList);
-
- //
- // Commit first transaction and confirm expected changes.
- //
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestampTxn1));
- assertWriteVisibleWithCapture(
- changeStreamCursor, "insert", {_id: "txn1-doc-1"}, changeList);
- assertWriteVisibleWithCapture(
- changeStreamCursor, "insert", {_id: "txn1-doc-2"}, changeList);
- assertWriteVisibleWithCapture(
- changeStreamCursor, "update", {_id: "txn1-doc-1"}, changeList);
- assertWriteVisibleWithCapture(
- changeStreamCursor, "update", {_id: "txn1-doc-2"}, changeList);
- assertWriteVisibleWithCapture(
- changeStreamCursor, "delete", {_id: "txn1-doc-2"}, changeList);
- assertNoChanges(changeStreamCursor);
-
- // Transition the second transaction to prepared. We skip capturing the prepare
- // timestamp it is not required for abortTransaction_forTesting().
- PrepareHelpers.prepareTransaction(session2);
- assertNoChanges(changeStreamCursor);
+/**
+ * This test sets an internal parameter in order to force transactions with more than 4
+ * operations to span multiple oplog entries, making it easier to test that scenario.
+ */
+const maxOpsInOplogEntry = 4;
- assert.commandWorked(coll.insert({_id: "no-txn-doc-4"}, {writeConcern: {w: "majority"}}));
- assertWriteVisibleWithCapture(
- changeStreamCursor, "insert", {_id: "no-txn-doc-4"}, changeList);
+/**
+ * Asserts that the expected operation type and documentKey are found on the change stream
+ * cursor. Returns the change stream document.
+ */
+function assertWriteVisible(cursor, operationType, documentKey) {
+ assert.soon(() => cursor.hasNext());
+ const changeDoc = cursor.next();
+ assert.eq(operationType, changeDoc.operationType, changeDoc);
+ assert.eq(documentKey, changeDoc.documentKey, changeDoc);
+ return changeDoc;
+}
+
+/**
+ * Asserts that the expected operation type and documentKey are found on the change stream
+ * cursor. Pushes the corresponding resume token and change stream document to an array.
+ */
+function assertWriteVisibleWithCapture(cursor, operationType, documentKey, changeList) {
+ const changeDoc = assertWriteVisible(cursor, operationType, documentKey);
+ changeList.push(changeDoc);
+}
- //
- // Abort second transaction.
- //
- session2.abortTransaction_forTesting();
+/**
+ * Asserts that there are no changes waiting on the change stream cursor.
+ */
+function assertNoChanges(cursor) {
+ assert(!cursor.hasNext(), () => {
+ return "Unexpected change set: " + tojson(cursor.toArray());
+ });
+}
+
+function runTest(conn) {
+ const db = conn.getDB(dbName);
+ const coll = db.getCollection(collName);
+ const unwatchedColl = db.getCollection(collName + "_unwatched");
+ let changeList = [];
+
+ // Collections must be created outside of any transaction.
+ assert.commandWorked(db.createCollection(coll.getName()));
+ assert.commandWorked(db.createCollection(unwatchedColl.getName()));
+
+ //
+ // Start transaction 1.
+ //
+ const session1 = db.getMongo().startSession();
+ const sessionDb1 = session1.getDatabase(dbName);
+ const sessionColl1 = sessionDb1[collName];
+ session1.startTransaction({readConcern: {level: "majority"}});
+
+ //
+ // Start transaction 2.
+ //
+ const session2 = db.getMongo().startSession();
+ const sessionDb2 = session2.getDatabase(dbName);
+ const sessionColl2 = sessionDb2[collName];
+ session2.startTransaction({readConcern: {level: "majority"}});
+
+ //
+ // Start transaction 3.
+ //
+ const session3 = db.getMongo().startSession();
+ const sessionDb3 = session3.getDatabase(dbName);
+ const sessionColl3 = sessionDb3[collName];
+ session3.startTransaction({readConcern: {level: "majority"}});
+
+ // Open a change stream on the test collection.
+ const changeStreamCursor = coll.watch();
+
+ // Insert a document and confirm that the change stream has it.
+ assert.commandWorked(coll.insert({_id: "no-txn-doc-1"}, {writeConcern: {w: "majority"}}));
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", {_id: "no-txn-doc-1"}, changeList);
+
+ // Insert two documents under each transaction and confirm no change stream updates.
+ assert.commandWorked(sessionColl1.insert([{_id: "txn1-doc-1"}, {_id: "txn1-doc-2"}]));
+ assert.commandWorked(sessionColl2.insert([{_id: "txn2-doc-1"}, {_id: "txn2-doc-2"}]));
+ assertNoChanges(changeStreamCursor);
+
+ // Update one document under each transaction and confirm no change stream updates.
+ assert.commandWorked(sessionColl1.update({_id: "txn1-doc-1"}, {$set: {"updated": 1}}));
+ assert.commandWorked(sessionColl2.update({_id: "txn2-doc-1"}, {$set: {"updated": 1}}));
+ assertNoChanges(changeStreamCursor);
+
+ // Update and then remove the second doc under each transaction and confirm no change stream
+ // events are seen.
+ assert.commandWorked(
+ sessionColl1.update({_id: "txn1-doc-2"}, {$set: {"update-before-delete": 1}}));
+ assert.commandWorked(
+ sessionColl2.update({_id: "txn2-doc-2"}, {$set: {"update-before-delete": 1}}));
+ assert.commandWorked(sessionColl1.remove({_id: "txn1-doc-2"}));
+ assert.commandWorked(sessionColl2.remove({_id: "txn2-doc-2"}));
+ assertNoChanges(changeStreamCursor);
+
+ // Perform a write to the 'session1' transaction in a collection that is not being watched
+ // by 'changeStreamCursor'. We do not expect to see this write in the change stream either
+ // now or on commit.
+ assert.commandWorked(
+ sessionDb1[unwatchedColl.getName()].insert({_id: "txn1-doc-unwatched-collection"}));
+ assertNoChanges(changeStreamCursor);
+
+ // Perform a write to the 'session3' transaction in a collection that is not being watched
+ // by 'changeStreamCursor'. We do not expect to see this write in the change stream either
+ // now or on commit.
+ assert.commandWorked(
+ sessionDb3[unwatchedColl.getName()].insert({_id: "txn3-doc-unwatched-collection"}));
+ assertNoChanges(changeStreamCursor);
+
+ // Perform a write outside of a transaction and confirm that the change stream sees only
+ // this write.
+ assert.commandWorked(coll.insert({_id: "no-txn-doc-2"}, {writeConcern: {w: "majority"}}));
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", {_id: "no-txn-doc-2"}, changeList);
+ assertNoChanges(changeStreamCursor);
+
+ let prepareTimestampTxn1;
+ prepareTimestampTxn1 = PrepareHelpers.prepareTransaction(session1);
+ assertNoChanges(changeStreamCursor);
+
+ assert.commandWorked(coll.insert({_id: "no-txn-doc-3"}, {writeConcern: {w: "majority"}}));
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", {_id: "no-txn-doc-3"}, changeList);
+
+ //
+ // Commit first transaction and confirm expected changes.
+ //
+ assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestampTxn1));
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", {_id: "txn1-doc-1"}, changeList);
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", {_id: "txn1-doc-2"}, changeList);
+ assertWriteVisibleWithCapture(changeStreamCursor, "update", {_id: "txn1-doc-1"}, changeList);
+ assertWriteVisibleWithCapture(changeStreamCursor, "update", {_id: "txn1-doc-2"}, changeList);
+ assertWriteVisibleWithCapture(changeStreamCursor, "delete", {_id: "txn1-doc-2"}, changeList);
+ assertNoChanges(changeStreamCursor);
+
+ // Transition the second transaction to prepared. We skip capturing the prepare
+ // timestamp it is not required for abortTransaction_forTesting().
+ PrepareHelpers.prepareTransaction(session2);
+ assertNoChanges(changeStreamCursor);
+
+ assert.commandWorked(coll.insert({_id: "no-txn-doc-4"}, {writeConcern: {w: "majority"}}));
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", {_id: "no-txn-doc-4"}, changeList);
+
+ //
+ // Abort second transaction.
+ //
+ session2.abortTransaction_forTesting();
+ assertNoChanges(changeStreamCursor);
+
+ //
+ // Start transaction 4.
+ //
+ const session4 = db.getMongo().startSession();
+ const sessionDb4 = session4.getDatabase(dbName);
+ const sessionColl4 = sessionDb4[collName];
+ session4.startTransaction({readConcern: {level: "majority"}});
+
+ // Perform enough writes to fill up one applyOps.
+ const txn4Inserts = Array.from({length: maxOpsInOplogEntry},
+ (_, index) => ({_id: {name: "txn4-doc", index: index}}));
+ txn4Inserts.forEach(function(doc) {
+ sessionColl4.insert(doc);
assertNoChanges(changeStreamCursor);
+ });
- //
- // Start transaction 4.
- //
- const session4 = db.getMongo().startSession();
- const sessionDb4 = session4.getDatabase(dbName);
- const sessionColl4 = sessionDb4[collName];
- session4.startTransaction({readConcern: {level: "majority"}});
-
- // Perform enough writes to fill up one applyOps.
- const txn4Inserts = Array.from({length: maxOpsInOplogEntry},
- (_, index) => ({_id: {name: "txn4-doc", index: index}}));
- txn4Inserts.forEach(function(doc) {
- sessionColl4.insert(doc);
- assertNoChanges(changeStreamCursor);
- });
-
- // Perform enough writes to an unwatched collection to fill up a second applyOps. We
- // specifically want to test the case where a multi-applyOps transaction has no relevant
- // updates in its final applyOps.
- txn4Inserts.forEach(function(doc) {
- assert.commandWorked(sessionDb4[unwatchedColl.getName()].insert(doc));
- assertNoChanges(changeStreamCursor);
- });
-
- //
- // Start transaction 5.
- //
- const session5 = db.getMongo().startSession();
- const sessionDb5 = session5.getDatabase(dbName);
- const sessionColl5 = sessionDb5[collName];
- session5.startTransaction({readConcern: {level: "majority"}});
-
- // Perform enough writes to span 3 applyOps entries.
- const txn5Inserts = Array.from({length: 3 * maxOpsInOplogEntry},
- (_, index) => ({_id: {name: "txn5-doc", index: index}}));
- txn5Inserts.forEach(function(doc) {
- assert.commandWorked(sessionColl5.insert(doc));
- assertNoChanges(changeStreamCursor);
- });
-
- //
- // Prepare and commit transaction 5.
- //
- const prepareTimestampTxn5 = PrepareHelpers.prepareTransaction(session5);
+ // Perform enough writes to an unwatched collection to fill up a second applyOps. We
+ // specifically want to test the case where a multi-applyOps transaction has no relevant
+ // updates in its final applyOps.
+ txn4Inserts.forEach(function(doc) {
+ assert.commandWorked(sessionDb4[unwatchedColl.getName()].insert(doc));
assertNoChanges(changeStreamCursor);
- assert.commandWorked(PrepareHelpers.commitTransaction(session5, prepareTimestampTxn5));
- txn5Inserts.forEach(function(doc) {
- assertWriteVisibleWithCapture(changeStreamCursor, "insert", doc, changeList);
- });
-
- //
- // Commit transaction 4 without preparing.
- //
- session4.commitTransaction();
- txn4Inserts.forEach(function(doc) {
- assertWriteVisibleWithCapture(changeStreamCursor, "insert", doc, changeList);
- });
+ });
+
+ //
+ // Start transaction 5.
+ //
+ const session5 = db.getMongo().startSession();
+ const sessionDb5 = session5.getDatabase(dbName);
+ const sessionColl5 = sessionDb5[collName];
+ session5.startTransaction({readConcern: {level: "majority"}});
+
+ // Perform enough writes to span 3 applyOps entries.
+ const txn5Inserts = Array.from({length: 3 * maxOpsInOplogEntry},
+ (_, index) => ({_id: {name: "txn5-doc", index: index}}));
+ txn5Inserts.forEach(function(doc) {
+ assert.commandWorked(sessionColl5.insert(doc));
assertNoChanges(changeStreamCursor);
-
- changeStreamCursor.close();
-
- // Test that change stream resume returns the expected set of documents at each point
- // captured by this test.
- for (let i = 0; i < changeList.length; ++i) {
- const resumeCursor = coll.watch([], {startAfter: changeList[i]._id});
-
- for (let x = (i + 1); x < changeList.length; ++x) {
- const expectedChangeDoc = changeList[x];
- assertWriteVisible(
- resumeCursor, expectedChangeDoc.operationType, expectedChangeDoc.documentKey);
- }
-
- assertNoChanges(resumeCursor);
- resumeCursor.close();
+ });
+
+ //
+ // Prepare and commit transaction 5.
+ //
+ const prepareTimestampTxn5 = PrepareHelpers.prepareTransaction(session5);
+ assertNoChanges(changeStreamCursor);
+ assert.commandWorked(PrepareHelpers.commitTransaction(session5, prepareTimestampTxn5));
+ txn5Inserts.forEach(function(doc) {
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", doc, changeList);
+ });
+
+ //
+ // Commit transaction 4 without preparing.
+ //
+ session4.commitTransaction();
+ txn4Inserts.forEach(function(doc) {
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", doc, changeList);
+ });
+ assertNoChanges(changeStreamCursor);
+
+ changeStreamCursor.close();
+
+ // Test that change stream resume returns the expected set of documents at each point
+ // captured by this test.
+ for (let i = 0; i < changeList.length; ++i) {
+ const resumeCursor = coll.watch([], {startAfter: changeList[i]._id});
+
+ for (let x = (i + 1); x < changeList.length; ++x) {
+ const expectedChangeDoc = changeList[x];
+ assertWriteVisible(
+ resumeCursor, expectedChangeDoc.operationType, expectedChangeDoc.documentKey);
}
- //
- // Prepare and commit the third transaction and confirm that there are no visible changes.
- //
- let prepareTimestampTxn3;
- prepareTimestampTxn3 = PrepareHelpers.prepareTransaction(session3);
- assertNoChanges(changeStreamCursor);
-
- assert.commandWorked(PrepareHelpers.commitTransaction(session3, prepareTimestampTxn3));
- assertNoChanges(changeStreamCursor);
-
- assert.commandWorked(db.dropDatabase());
- }
-
- let replSetTestDescription = {nodes: 1};
- if (!jsTest.options().setParameters.hasOwnProperty(
- "maxNumberOfTransactionOperationsInSingleOplogEntry")) {
- // Configure the replica set to use our value for maxOpsInOplogEntry.
- replSetTestDescription.nodeOptions = {
- setParameter: {maxNumberOfTransactionOperationsInSingleOplogEntry: maxOpsInOplogEntry}
- };
- } else {
- // The test is executing in a build variant that already defines its own override value for
- // maxNumberOfTransactionOperationsInSingleOplogEntry. Even though the build variant's
- // choice for this override won't test the same edge cases, the test should still succeed.
+ assertNoChanges(resumeCursor);
+ resumeCursor.close();
}
- const rst = new ReplSetTest(replSetTestDescription);
- rst.startSet();
- rst.initiate();
-
- runTest(rst.getPrimary());
- rst.stopSet();
+ //
+ // Prepare and commit the third transaction and confirm that there are no visible changes.
+ //
+ let prepareTimestampTxn3;
+ prepareTimestampTxn3 = PrepareHelpers.prepareTransaction(session3);
+ assertNoChanges(changeStreamCursor);
+
+ assert.commandWorked(PrepareHelpers.commitTransaction(session3, prepareTimestampTxn3));
+ assertNoChanges(changeStreamCursor);
+
+ assert.commandWorked(db.dropDatabase());
+}
+
+let replSetTestDescription = {nodes: 1};
+if (!jsTest.options().setParameters.hasOwnProperty(
+ "maxNumberOfTransactionOperationsInSingleOplogEntry")) {
+ // Configure the replica set to use our value for maxOpsInOplogEntry.
+ replSetTestDescription.nodeOptions = {
+ setParameter: {maxNumberOfTransactionOperationsInSingleOplogEntry: maxOpsInOplogEntry}
+ };
+} else {
+ // The test is executing in a build variant that already defines its own override value for
+ // maxNumberOfTransactionOperationsInSingleOplogEntry. Even though the build variant's
+ // choice for this override won't test the same edge cases, the test should still succeed.
+}
+const rst = new ReplSetTest(replSetTestDescription);
+rst.startSet();
+rst.initiate();
+
+runTest(rst.getPrimary());
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/change_streams_collation_chunk_migration.js b/jstests/noPassthrough/change_streams_collation_chunk_migration.js
index 4be1044d2d9..51d0536900d 100644
--- a/jstests/noPassthrough/change_streams_collation_chunk_migration.js
+++ b/jstests/noPassthrough/change_streams_collation_chunk_migration.js
@@ -4,61 +4,64 @@
* @tags: [requires_replication, requires_journaling]
*/
(function() {
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/change_stream_util.js"); // For 'ChangeStreamTest'.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/change_stream_util.js"); // For 'ChangeStreamTest'.
- const st = new ShardingTest({
- shards: 2,
- mongos: 1,
- rs: {
- nodes: 1,
- },
- });
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ rs: {
+ nodes: 1,
+ },
+});
- const testDB = st.s.getDB(jsTestName());
+const testDB = st.s.getDB(jsTestName());
- // Enable sharding on the test database and ensure that the primary is shard0.
- assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
+// Enable sharding on the test database and ensure that the primary is shard0.
+assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
+st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
- const caseInsensitiveCollectionName = "change_stream_case_insensitive";
- const caseInsensitive = {locale: "en_US", strength: 2};
+const caseInsensitiveCollectionName = "change_stream_case_insensitive";
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
- // Create the collection with a case-insensitive collation, then shard it on {shardKey: 1}.
- const caseInsensitiveCollection = assertDropAndRecreateCollection(
- testDB, caseInsensitiveCollectionName, {collation: caseInsensitive});
- assert.commandWorked(
- caseInsensitiveCollection.createIndex({shardKey: 1}, {collation: {locale: "simple"}}));
- assert.commandWorked(testDB.adminCommand({
- shardCollection: caseInsensitiveCollection.getFullName(),
- key: {shardKey: 1},
- collation: {locale: "simple"}
- }));
+// Create the collection with a case-insensitive collation, then shard it on {shardKey: 1}.
+const caseInsensitiveCollection = assertDropAndRecreateCollection(
+ testDB, caseInsensitiveCollectionName, {collation: caseInsensitive});
+assert.commandWorked(
+ caseInsensitiveCollection.createIndex({shardKey: 1}, {collation: {locale: "simple"}}));
+assert.commandWorked(testDB.adminCommand({
+ shardCollection: caseInsensitiveCollection.getFullName(),
+ key: {shardKey: 1},
+ collation: {locale: "simple"}
+}));
- // Verify that the collection does not exist on shard1.
- assert(!st.shard1.getCollection(caseInsensitiveCollection.getFullName()).exists());
+// Verify that the collection does not exist on shard1.
+assert(!st.shard1.getCollection(caseInsensitiveCollection.getFullName()).exists());
- // Now open a change stream on the collection.
- const cst = new ChangeStreamTest(testDB);
- const csCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey.shardKey"}}],
- collection: caseInsensitiveCollection
- });
+// Now open a change stream on the collection.
+const cst = new ChangeStreamTest(testDB);
+const csCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey.shardKey"}}],
+ collection: caseInsensitiveCollection
+});
- // Insert some documents into the collection.
- assert.commandWorked(caseInsensitiveCollection.insert({shardKey: 0, text: "aBc"}));
- assert.commandWorked(caseInsensitiveCollection.insert({shardKey: 1, text: "abc"}));
+// Insert some documents into the collection.
+assert.commandWorked(caseInsensitiveCollection.insert({shardKey: 0, text: "aBc"}));
+assert.commandWorked(caseInsensitiveCollection.insert({shardKey: 1, text: "abc"}));
- // Move a chunk from shard0 to shard1. This will create the collection on shard1.
- assert.commandWorked(testDB.adminCommand({
- moveChunk: caseInsensitiveCollection.getFullName(),
- find: {shardKey: 1},
- to: st.rs1.getURL(),
- _waitForDelete: false
- }));
+// Move a chunk from shard0 to shard1. This will create the collection on shard1.
+assert.commandWorked(testDB.adminCommand({
+ moveChunk: caseInsensitiveCollection.getFullName(),
+ find: {shardKey: 1},
+ to: st.rs1.getURL(),
+ _waitForDelete: false
+}));
- // Attempt to read from the change stream. We should see both inserts, without an invalidation.
- cst.assertNextChangesEqual({cursor: csCursor, expectedChanges: [{docId: 0}, {docId: 1}]});
+// Attempt to read from the change stream. We should see both inserts, without an invalidation.
+cst.assertNextChangesEqual({cursor: csCursor, expectedChanges: [{docId: 0}, {docId: 1}]});
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/change_streams_require_majority_read_concern.js b/jstests/noPassthrough/change_streams_require_majority_read_concern.js
index 8481ba586f1..6fdc4c2ee37 100644
--- a/jstests/noPassthrough/change_streams_require_majority_read_concern.js
+++ b/jstests/noPassthrough/change_streams_require_majority_read_concern.js
@@ -1,97 +1,96 @@
// Tests that the $changeStream requires read concern majority.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/namespace_utils.js"); // For getCollectionNameFromFullNamespace.
- load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/namespace_utils.js"); // For getCollectionNameFromFullNamespace.
+load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries.
- const rst = new ReplSetTest({nodes: 2, nodeOptions: {enableMajorityReadConcern: ""}});
+const rst = new ReplSetTest({nodes: 2, nodeOptions: {enableMajorityReadConcern: ""}});
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
- rst.initiate();
+rst.initiate();
- const name = "change_stream_require_majority_read_concern";
- const db = rst.getPrimary().getDB(name);
+const name = "change_stream_require_majority_read_concern";
+const db = rst.getPrimary().getDB(name);
- // Use ChangeStreamTest to verify that the pipeline returns expected results.
- const cst = new ChangeStreamTest(db);
+// Use ChangeStreamTest to verify that the pipeline returns expected results.
+const cst = new ChangeStreamTest(db);
- // Attempts to get a document from the cursor with awaitData disabled, and asserts if a
- // document is present.
- function assertNextBatchIsEmpty(cursor) {
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "disableAwaitDataForGetMoreCmd", mode: "alwaysOn"}));
- let res = assert.commandWorked(db.runCommand({
- getMore: cursor.id,
- collection: getCollectionNameFromFullNamespace(cursor.ns),
- batchSize: 1
- }));
- assert.eq(res.cursor.nextBatch.length, 0);
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "disableAwaitDataForGetMoreCmd", mode: "off"}));
- }
+// Attempts to get a document from the cursor with awaitData disabled, and asserts if a
+// document is present.
+function assertNextBatchIsEmpty(cursor) {
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "disableAwaitDataForGetMoreCmd", mode: "alwaysOn"}));
+ let res = assert.commandWorked(db.runCommand({
+ getMore: cursor.id,
+ collection: getCollectionNameFromFullNamespace(cursor.ns),
+ batchSize: 1
+ }));
+ assert.eq(res.cursor.nextBatch.length, 0);
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "disableAwaitDataForGetMoreCmd", mode: "off"}));
+}
- // Test read concerns other than "majority" are not supported.
- const primaryColl = db.foo;
- assert.writeOK(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- let res = primaryColl.runCommand({
- aggregate: primaryColl.getName(),
- pipeline: [{$changeStream: {}}],
- cursor: {},
- readConcern: {level: "local"},
- });
- assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
- res = primaryColl.runCommand({
- aggregate: primaryColl.getName(),
- pipeline: [{$changeStream: {}}],
- cursor: {},
- readConcern: {level: "linearizable"},
- });
- assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
+// Test read concerns other than "majority" are not supported.
+const primaryColl = db.foo;
+assert.writeOK(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+let res = primaryColl.runCommand({
+ aggregate: primaryColl.getName(),
+ pipeline: [{$changeStream: {}}],
+ cursor: {},
+ readConcern: {level: "local"},
+});
+assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
+res = primaryColl.runCommand({
+ aggregate: primaryColl.getName(),
+ pipeline: [{$changeStream: {}}],
+ cursor: {},
+ readConcern: {level: "linearizable"},
+});
+assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
- // Test that explicit read concern "majority" works.
- res = primaryColl.runCommand({
- aggregate: primaryColl.getName(),
- pipeline: [{$changeStream: {}}],
- cursor: {},
- readConcern: {level: "majority"},
- });
- assert.commandWorked(res);
+// Test that explicit read concern "majority" works.
+res = primaryColl.runCommand({
+ aggregate: primaryColl.getName(),
+ pipeline: [{$changeStream: {}}],
+ cursor: {},
+ readConcern: {level: "majority"},
+});
+assert.commandWorked(res);
- // Test not specifying readConcern defaults to "majority" read concern.
- stopReplicationOnSecondaries(rst);
- // Verify that the document just inserted cannot be returned.
- let cursor =
- cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: primaryColl});
- assert.eq(cursor.firstBatch.length, 0);
+// Test not specifying readConcern defaults to "majority" read concern.
+stopReplicationOnSecondaries(rst);
+// Verify that the document just inserted cannot be returned.
+let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: primaryColl});
+assert.eq(cursor.firstBatch.length, 0);
- // Insert a document on the primary only.
- assert.writeOK(primaryColl.insert({_id: 2}, {writeConcern: {w: 1}}));
- assertNextBatchIsEmpty(cursor);
+// Insert a document on the primary only.
+assert.writeOK(primaryColl.insert({_id: 2}, {writeConcern: {w: 1}}));
+assertNextBatchIsEmpty(cursor);
- // Restart data replicaiton and wait until the new write becomes visible.
- restartReplicationOnSecondaries(rst);
- rst.awaitLastOpCommitted();
+// Restart data replicaiton and wait until the new write becomes visible.
+restartReplicationOnSecondaries(rst);
+rst.awaitLastOpCommitted();
- // Verify that the expected doc is returned because it has been committed.
- let doc = cst.getOneChange(cursor);
- assert.docEq(doc.operationType, "insert");
- assert.docEq(doc.fullDocument, {_id: 2});
- rst.stopSet();
+// Verify that the expected doc is returned because it has been committed.
+let doc = cst.getOneChange(cursor);
+assert.docEq(doc.operationType, "insert");
+assert.docEq(doc.fullDocument, {_id: 2});
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/change_streams_required_privileges.js b/jstests/noPassthrough/change_streams_required_privileges.js
index 71ccd81758e..137896a3f8f 100644
--- a/jstests/noPassthrough/change_streams_required_privileges.js
+++ b/jstests/noPassthrough/change_streams_required_privileges.js
@@ -2,341 +2,331 @@
// This test uses the WiredTiger storage engine, which does not support running without journaling.
// @tags: [requires_replication,requires_journaling]
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- const rst = new ReplSetTest({nodes: 1});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
- rst.initiate();
- const password = "test_password";
- rst.getPrimary().getDB("admin").createUser(
- {user: "userAdmin", pwd: password, roles: [{db: "admin", role: "userAdminAnyDatabase"}]});
- rst.restart(0, {auth: ''});
+const rst = new ReplSetTest({nodes: 1});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+rst.initiate();
+const password = "test_password";
+rst.getPrimary().getDB("admin").createUser(
+ {user: "userAdmin", pwd: password, roles: [{db: "admin", role: "userAdminAnyDatabase"}]});
+rst.restart(0, {auth: ''});
- const db = rst.getPrimary().getDB("test");
- const coll = db.coll;
- const adminDB = db.getSiblingDB("admin");
+const db = rst.getPrimary().getDB("test");
+const coll = db.coll;
+const adminDB = db.getSiblingDB("admin");
- // Wrap different sections of the test in separate functions to make the scoping clear.
- (function createRoles() {
- assert(adminDB.auth("userAdmin", password));
- // Create some collection-level roles.
- db.createRole({
- role: "write",
- roles: [],
- privileges: [{
- resource: {db: db.getName(), collection: coll.getName()},
- actions: ["insert", "update", "remove"]
- }]
- });
- db.createRole({
- role: "find_only",
- roles: [],
- privileges:
- [{resource: {db: db.getName(), collection: coll.getName()}, actions: ["find"]}]
- });
- db.createRole({
- role: "find_and_change_stream",
- roles: [],
- privileges: [{
- resource: {db: db.getName(), collection: coll.getName()},
- actions: ["find", "changeStream"]
- }]
- });
- db.createRole({
- role: "change_stream_only",
- roles: [],
- privileges: [{
- resource: {db: db.getName(), collection: coll.getName()},
- actions: ["changeStream"]
- }]
- });
+// Wrap different sections of the test in separate functions to make the scoping clear.
+(function createRoles() {
+ assert(adminDB.auth("userAdmin", password));
+ // Create some collection-level roles.
+ db.createRole({
+ role: "write",
+ roles: [],
+ privileges: [{
+ resource: {db: db.getName(), collection: coll.getName()},
+ actions: ["insert", "update", "remove"]
+ }]
+ });
+ db.createRole({
+ role: "find_only",
+ roles: [],
+ privileges: [{resource: {db: db.getName(), collection: coll.getName()}, actions: ["find"]}]
+ });
+ db.createRole({
+ role: "find_and_change_stream",
+ roles: [],
+ privileges: [{
+ resource: {db: db.getName(), collection: coll.getName()},
+ actions: ["find", "changeStream"]
+ }]
+ });
+ db.createRole({
+ role: "change_stream_only",
+ roles: [],
+ privileges:
+ [{resource: {db: db.getName(), collection: coll.getName()}, actions: ["changeStream"]}]
+ });
- // Create some privileges at the database level.
- db.createRole({
- role: "db_write",
- roles: [],
- privileges: [{
- resource: {db: db.getName(), collection: ""},
- actions: ["insert", "update", "remove"]
- }]
- });
- db.createRole({
- role: "db_find_only",
- roles: [],
- privileges: [{resource: {db: db.getName(), collection: ""}, actions: ["find"]}]
- });
- db.createRole({
- role: "db_find_and_change_stream",
- roles: [],
- privileges: [{
- resource: {db: db.getName(), collection: ""},
- actions: ["find", "changeStream"]
- }]
- });
- db.createRole({
- role: "db_change_stream_only",
- roles: [],
- privileges:
- [{resource: {db: db.getName(), collection: ""}, actions: ["changeStream"]}]
- });
+ // Create some privileges at the database level.
+ db.createRole({
+ role: "db_write",
+ roles: [],
+ privileges: [
+ {resource: {db: db.getName(), collection: ""},
+ actions: ["insert", "update", "remove"]}
+ ]
+ });
+ db.createRole({
+ role: "db_find_only",
+ roles: [],
+ privileges: [{resource: {db: db.getName(), collection: ""}, actions: ["find"]}]
+ });
+ db.createRole({
+ role: "db_find_and_change_stream",
+ roles: [],
+ privileges:
+ [{resource: {db: db.getName(), collection: ""}, actions: ["find", "changeStream"]}]
+ });
+ db.createRole({
+ role: "db_change_stream_only",
+ roles: [],
+ privileges: [{resource: {db: db.getName(), collection: ""}, actions: ["changeStream"]}]
+ });
- // Create some privileges at the admin database level.
- adminDB.createRole({
- role: "admin_db_write",
- roles: [],
- privileges: [{
- resource: {db: db.getName(), collection: ""},
- actions: ["insert", "update", "remove"]
- }]
- });
- adminDB.createRole({
- role: "admin_db_find_only",
- roles: [],
- privileges: [{resource: {db: "admin", collection: ""}, actions: ["find"]}]
- });
- adminDB.createRole({
- role: "admin_db_find_and_change_stream",
- roles: [],
- privileges:
- [{resource: {db: "admin", collection: ""}, actions: ["find", "changeStream"]}]
- });
- adminDB.createRole({
- role: "admin_db_change_stream_only",
- roles: [],
- privileges: [{resource: {db: "admin", collection: ""}, actions: ["changeStream"]}]
- });
+ // Create some privileges at the admin database level.
+ adminDB.createRole({
+ role: "admin_db_write",
+ roles: [],
+ privileges: [
+ {resource: {db: db.getName(), collection: ""},
+ actions: ["insert", "update", "remove"]}
+ ]
+ });
+ adminDB.createRole({
+ role: "admin_db_find_only",
+ roles: [],
+ privileges: [{resource: {db: "admin", collection: ""}, actions: ["find"]}]
+ });
+ adminDB.createRole({
+ role: "admin_db_find_and_change_stream",
+ roles: [],
+ privileges: [{resource: {db: "admin", collection: ""}, actions: ["find", "changeStream"]}]
+ });
+ adminDB.createRole({
+ role: "admin_db_change_stream_only",
+ roles: [],
+ privileges: [{resource: {db: "admin", collection: ""}, actions: ["changeStream"]}]
+ });
- // Create some roles at the any-db, any-collection level.
- adminDB.createRole({
- role: "any_db_find_only",
- roles: [],
- privileges: [{resource: {db: "", collection: ""}, actions: ["find"]}]
- });
- adminDB.createRole({
- role: "any_db_find_and_change_stream",
- roles: [],
- privileges: [{resource: {db: "", collection: ""}, actions: ["find", "changeStream"]}]
- });
- adminDB.createRole({
- role: "any_db_change_stream_only",
- roles: [],
- privileges: [{resource: {db: "", collection: ""}, actions: ["changeStream"]}]
- });
+ // Create some roles at the any-db, any-collection level.
+ adminDB.createRole({
+ role: "any_db_find_only",
+ roles: [],
+ privileges: [{resource: {db: "", collection: ""}, actions: ["find"]}]
+ });
+ adminDB.createRole({
+ role: "any_db_find_and_change_stream",
+ roles: [],
+ privileges: [{resource: {db: "", collection: ""}, actions: ["find", "changeStream"]}]
+ });
+ adminDB.createRole({
+ role: "any_db_change_stream_only",
+ roles: [],
+ privileges: [{resource: {db: "", collection: ""}, actions: ["changeStream"]}]
+ });
- // Create some roles at the cluster level.
- adminDB.createRole({
- role: "cluster_find_only",
- roles: [],
- privileges: [{resource: {cluster: true}, actions: ["find"]}]
- });
- adminDB.createRole({
- role: "cluster_find_and_change_stream",
- roles: [],
- privileges: [{resource: {cluster: true}, actions: ["find", "changeStream"]}]
- });
- adminDB.createRole({
- role: "cluster_change_stream_only",
- roles: [],
- privileges: [{resource: {cluster: true}, actions: ["changeStream"]}]
- });
- }());
+ // Create some roles at the cluster level.
+ adminDB.createRole({
+ role: "cluster_find_only",
+ roles: [],
+ privileges: [{resource: {cluster: true}, actions: ["find"]}]
+ });
+ adminDB.createRole({
+ role: "cluster_find_and_change_stream",
+ roles: [],
+ privileges: [{resource: {cluster: true}, actions: ["find", "changeStream"]}]
+ });
+ adminDB.createRole({
+ role: "cluster_change_stream_only",
+ roles: [],
+ privileges: [{resource: {cluster: true}, actions: ["changeStream"]}]
+ });
+}());
- (function createUsers() {
- // Create some users for a specific collection. Use the name of the role as the name of the
- // user.
- for (let role of["write", "find_only", "find_and_change_stream", "change_stream_only"]) {
- db.createUser({user: role, pwd: password, roles: [role]});
- }
+(function createUsers() {
+ // Create some users for a specific collection. Use the name of the role as the name of the
+ // user.
+ for (let role of ["write", "find_only", "find_and_change_stream", "change_stream_only"]) {
+ db.createUser({user: role, pwd: password, roles: [role]});
+ }
- // Create some users at the database level. Use the name of the role as the name of the
- // user, except for the built-in roles.
- for (let role of["db_write",
- "db_find_only",
- "db_find_and_change_stream",
- "db_change_stream_only"]) {
- db.createUser({user: role, pwd: password, roles: [role]});
- }
- db.createUser({user: "db_read", pwd: password, roles: ["read"]});
+ // Create some users at the database level. Use the name of the role as the name of the
+ // user, except for the built-in roles.
+ for (let role of
+ ["db_write", "db_find_only", "db_find_and_change_stream", "db_change_stream_only"]) {
+ db.createUser({user: role, pwd: password, roles: [role]});
+ }
+ db.createUser({user: "db_read", pwd: password, roles: ["read"]});
- // Create some users on the admin database. Use the name of the role as the name of the
- // user, except for the built-in roles.
- for (let role of["admin_db_write",
- "admin_db_find_only",
- "admin_db_find_and_change_stream",
- "admin_db_change_stream_only"]) {
- adminDB.createUser({user: role, pwd: password, roles: [role]});
- }
- adminDB.createUser({user: "admin_db_read", pwd: password, roles: ["read"]});
+ // Create some users on the admin database. Use the name of the role as the name of the
+ // user, except for the built-in roles.
+ for (let role of ["admin_db_write",
+ "admin_db_find_only",
+ "admin_db_find_and_change_stream",
+ "admin_db_change_stream_only"]) {
+ adminDB.createUser({user: role, pwd: password, roles: [role]});
+ }
+ adminDB.createUser({user: "admin_db_read", pwd: password, roles: ["read"]});
- // Create some users with privileges on all databases. Use the name of the role as the name
- // of the user, except for the built-in roles.
- for (let role of["any_db_find_only",
- "any_db_find_and_change_stream",
- "any_db_change_stream_only"]) {
- adminDB.createUser({user: role, pwd: password, roles: [role]});
- }
+ // Create some users with privileges on all databases. Use the name of the role as the name
+ // of the user, except for the built-in roles.
+ for (let role of ["any_db_find_only",
+ "any_db_find_and_change_stream",
+ "any_db_change_stream_only"]) {
+ adminDB.createUser({user: role, pwd: password, roles: [role]});
+ }
- // Create some users on the whole cluster. Use the name of the role as the name of the user.
- for (let role of["cluster_find_only",
- "cluster_find_and_change_stream",
- "cluster_change_stream_only"]) {
- adminDB.createUser({user: role, pwd: password, roles: [role]});
- }
- }());
+ // Create some users on the whole cluster. Use the name of the role as the name of the user.
+ for (let role of ["cluster_find_only",
+ "cluster_find_and_change_stream",
+ "cluster_change_stream_only"]) {
+ adminDB.createUser({user: role, pwd: password, roles: [role]});
+ }
+}());
- (function testPrivilegesForSingleCollection() {
- // Test that users without the required privileges cannot open a change stream. A user
- // needs both the 'find' and 'changeStream' action on the collection. Note in particular
- // that the whole-cluster privileges (specified with {cluster: true}) is not enough to open
- // a change stream on any particular collection.
- for (let userWithoutPrivileges of[{db: db, name: "find_only"},
- {db: db, name: "change_stream_only"},
- {db: db, name: "write"},
- {db: db, name: "db_find_only"},
- {db: db, name: "db_change_stream_only"},
- {db: db, name: "db_write"},
- {db: adminDB, name: "admin_db_find_only"},
- {db: adminDB, name: "admin_db_find_and_change_stream"},
- {db: adminDB, name: "admin_db_change_stream_only"},
- {db: adminDB, name: "admin_db_read"},
- {db: adminDB, name: "any_db_find_only"},
- {db: adminDB, name: "any_db_change_stream_only"},
- {db: adminDB, name: "cluster_find_only"},
- {db: adminDB, name: "cluster_find_and_change_stream"},
- {db: adminDB, name: "cluster_change_stream_only"}]) {
- jsTestLog(`Testing user ${tojson(userWithoutPrivileges)} cannot open a change stream ` +
- `on a collection`);
- const db = userWithoutPrivileges.db;
- assert(db.auth(userWithoutPrivileges.name, password));
+(function testPrivilegesForSingleCollection() {
+ // Test that users without the required privileges cannot open a change stream. A user
+ // needs both the 'find' and 'changeStream' action on the collection. Note in particular
+ // that the whole-cluster privileges (specified with {cluster: true}) is not enough to open
+ // a change stream on any particular collection.
+ for (let userWithoutPrivileges of [{db: db, name: "find_only"},
+ {db: db, name: "change_stream_only"},
+ {db: db, name: "write"},
+ {db: db, name: "db_find_only"},
+ {db: db, name: "db_change_stream_only"},
+ {db: db, name: "db_write"},
+ {db: adminDB, name: "admin_db_find_only"},
+ {db: adminDB, name: "admin_db_find_and_change_stream"},
+ {db: adminDB, name: "admin_db_change_stream_only"},
+ {db: adminDB, name: "admin_db_read"},
+ {db: adminDB, name: "any_db_find_only"},
+ {db: adminDB, name: "any_db_change_stream_only"},
+ {db: adminDB, name: "cluster_find_only"},
+ {db: adminDB, name: "cluster_find_and_change_stream"},
+ {db: adminDB, name: "cluster_change_stream_only"}]) {
+ jsTestLog(`Testing user ${tojson(userWithoutPrivileges)} cannot open a change stream ` +
+ `on a collection`);
+ const db = userWithoutPrivileges.db;
+ assert(db.auth(userWithoutPrivileges.name, password));
- assert.commandFailedWithCode(
- coll.getDB().runCommand(
- {aggregate: coll.getName(), pipeline: [{$changeStream: {}}], cursor: {}}),
- ErrorCodes.Unauthorized);
+ assert.commandFailedWithCode(
+ coll.getDB().runCommand(
+ {aggregate: coll.getName(), pipeline: [{$changeStream: {}}], cursor: {}}),
+ ErrorCodes.Unauthorized);
- db.logout();
- }
+ db.logout();
+ }
- // Test that a user with the required privileges can open a change stream.
- for (let userWithPrivileges of[{db: db, name: "find_and_change_stream"},
- {db: db, name: "db_find_and_change_stream"},
- {db: db, name: "db_read"},
- {db: adminDB, name: "any_db_find_and_change_stream"}]) {
- jsTestLog(`Testing user ${tojson(userWithPrivileges)} _can_ open a change stream on a` +
- ` collection`);
- const db = userWithPrivileges.db;
- assert(db.auth(userWithPrivileges.name, password));
+ // Test that a user with the required privileges can open a change stream.
+ for (let userWithPrivileges of [{db: db, name: "find_and_change_stream"},
+ {db: db, name: "db_find_and_change_stream"},
+ {db: db, name: "db_read"},
+ {db: adminDB, name: "any_db_find_and_change_stream"}]) {
+ jsTestLog(`Testing user ${tojson(userWithPrivileges)} _can_ open a change stream on a` +
+ ` collection`);
+ const db = userWithPrivileges.db;
+ assert(db.auth(userWithPrivileges.name, password));
- assert.doesNotThrow(() => coll.watch());
+ assert.doesNotThrow(() => coll.watch());
- db.logout();
- }
- }());
+ db.logout();
+ }
+}());
- (function testPrivilegesForWholeDB() {
- // Test that users without the required privileges cannot open a change stream. A user needs
- // both the 'find' and 'changeStream' action on the database. Note in particular that the
- // whole-cluster privileges (specified with {cluster: true}) is not enough to open a change
- // stream on the whole database.
- for (let userWithoutPrivileges of[{db: db, name: "find_only"},
- {db: db, name: "change_stream_only"},
- {db: db, name: "find_and_change_stream"},
- {db: db, name: "write"},
- {db: db, name: "db_find_only"},
- {db: db, name: "db_change_stream_only"},
- {db: db, name: "db_write"},
- {db: adminDB, name: "admin_db_find_only"},
- {db: adminDB, name: "admin_db_find_and_change_stream"},
- {db: adminDB, name: "admin_db_change_stream_only"},
- {db: adminDB, name: "admin_db_read"},
- {db: adminDB, name: "any_db_find_only"},
- {db: adminDB, name: "any_db_change_stream_only"},
- {db: adminDB, name: "cluster_find_only"},
- {db: adminDB, name: "cluster_find_and_change_stream"},
- {db: adminDB, name: "cluster_change_stream_only"}]) {
- jsTestLog(`Testing user ${tojson(userWithoutPrivileges)} cannot open a change stream` +
- ` on the whole database`);
- const db = userWithoutPrivileges.db;
- assert(db.auth(userWithoutPrivileges.name, password));
+(function testPrivilegesForWholeDB() {
+ // Test that users without the required privileges cannot open a change stream. A user needs
+ // both the 'find' and 'changeStream' action on the database. Note in particular that the
+ // whole-cluster privileges (specified with {cluster: true}) is not enough to open a change
+ // stream on the whole database.
+ for (let userWithoutPrivileges of [{db: db, name: "find_only"},
+ {db: db, name: "change_stream_only"},
+ {db: db, name: "find_and_change_stream"},
+ {db: db, name: "write"},
+ {db: db, name: "db_find_only"},
+ {db: db, name: "db_change_stream_only"},
+ {db: db, name: "db_write"},
+ {db: adminDB, name: "admin_db_find_only"},
+ {db: adminDB, name: "admin_db_find_and_change_stream"},
+ {db: adminDB, name: "admin_db_change_stream_only"},
+ {db: adminDB, name: "admin_db_read"},
+ {db: adminDB, name: "any_db_find_only"},
+ {db: adminDB, name: "any_db_change_stream_only"},
+ {db: adminDB, name: "cluster_find_only"},
+ {db: adminDB, name: "cluster_find_and_change_stream"},
+ {db: adminDB, name: "cluster_change_stream_only"}]) {
+ jsTestLog(`Testing user ${tojson(userWithoutPrivileges)} cannot open a change stream` +
+ ` on the whole database`);
+ const db = userWithoutPrivileges.db;
+ assert(db.auth(userWithoutPrivileges.name, password));
- assert.commandFailedWithCode(
- coll.getDB().runCommand(
- {aggregate: 1, pipeline: [{$changeStream: {}}], cursor: {}}),
- ErrorCodes.Unauthorized);
+ assert.commandFailedWithCode(
+ coll.getDB().runCommand({aggregate: 1, pipeline: [{$changeStream: {}}], cursor: {}}),
+ ErrorCodes.Unauthorized);
- db.logout();
- }
+ db.logout();
+ }
- // Test that a user with the required privileges can open a change stream.
- for (let userWithPrivileges of[{db: db, name: "db_find_and_change_stream"},
- {db: db, name: "db_read"},
- {db: adminDB, name: "any_db_find_and_change_stream"}]) {
- jsTestLog(`Testing user ${tojson(userWithPrivileges)} _can_ open a change stream on` +
- ` the whole database`);
- const db = userWithPrivileges.db;
- assert(db.auth(userWithPrivileges.name, password));
+ // Test that a user with the required privileges can open a change stream.
+ for (let userWithPrivileges of [{db: db, name: "db_find_and_change_stream"},
+ {db: db, name: "db_read"},
+ {db: adminDB, name: "any_db_find_and_change_stream"}]) {
+ jsTestLog(`Testing user ${tojson(userWithPrivileges)} _can_ open a change stream on` +
+ ` the whole database`);
+ const db = userWithPrivileges.db;
+ assert(db.auth(userWithPrivileges.name, password));
- assert.doesNotThrow(() => coll.getDB().watch());
+ assert.doesNotThrow(() => coll.getDB().watch());
- db.logout();
- }
- }());
+ db.logout();
+ }
+}());
- (function testPrivilegesForWholeCluster() {
- // Test that users without the required privileges cannot open a change stream. A user needs
- // both the 'find' and 'changeStream' action on _any_ resource. Note in particular that the
- // whole-cluster privileges (specified with {cluster: true}) is not enough to open a change
- // stream on the whole cluster.
- for (let userWithoutPrivileges of[{db: db, name: "find_only"},
- {db: db, name: "change_stream_only"},
- {db: db, name: "find_and_change_stream"},
- {db: db, name: "write"},
- {db: db, name: "db_find_only"},
- {db: db, name: "db_find_and_change_stream"},
- {db: db, name: "db_change_stream_only"},
- {db: db, name: "db_read"},
- {db: db, name: "db_write"},
- {db: adminDB, name: "admin_db_find_only"},
- {db: adminDB, name: "admin_db_find_and_change_stream"},
- {db: adminDB, name: "admin_db_change_stream_only"},
- {db: adminDB, name: "admin_db_read"},
- {db: adminDB, name: "any_db_find_only"},
- {db: adminDB, name: "any_db_change_stream_only"},
- {db: adminDB, name: "cluster_find_only"},
- {db: adminDB, name: "cluster_change_stream_only"},
- {db: adminDB, name: "cluster_find_and_change_stream"}]) {
- jsTestLog(`Testing user ${tojson(userWithoutPrivileges)} cannot open a change stream` +
- ` on the whole cluster`);
- const db = userWithoutPrivileges.db;
- assert(db.auth(userWithoutPrivileges.name, password));
+(function testPrivilegesForWholeCluster() {
+ // Test that users without the required privileges cannot open a change stream. A user needs
+ // both the 'find' and 'changeStream' action on _any_ resource. Note in particular that the
+ // whole-cluster privileges (specified with {cluster: true}) is not enough to open a change
+ // stream on the whole cluster.
+ for (let userWithoutPrivileges of [{db: db, name: "find_only"},
+ {db: db, name: "change_stream_only"},
+ {db: db, name: "find_and_change_stream"},
+ {db: db, name: "write"},
+ {db: db, name: "db_find_only"},
+ {db: db, name: "db_find_and_change_stream"},
+ {db: db, name: "db_change_stream_only"},
+ {db: db, name: "db_read"},
+ {db: db, name: "db_write"},
+ {db: adminDB, name: "admin_db_find_only"},
+ {db: adminDB, name: "admin_db_find_and_change_stream"},
+ {db: adminDB, name: "admin_db_change_stream_only"},
+ {db: adminDB, name: "admin_db_read"},
+ {db: adminDB, name: "any_db_find_only"},
+ {db: adminDB, name: "any_db_change_stream_only"},
+ {db: adminDB, name: "cluster_find_only"},
+ {db: adminDB, name: "cluster_change_stream_only"},
+ {db: adminDB, name: "cluster_find_and_change_stream"}]) {
+ jsTestLog(`Testing user ${tojson(userWithoutPrivileges)} cannot open a change stream` +
+ ` on the whole cluster`);
+ const db = userWithoutPrivileges.db;
+ assert(db.auth(userWithoutPrivileges.name, password));
- assert.commandFailedWithCode(adminDB.runCommand({
- aggregate: 1,
- pipeline: [{$changeStream: {allChangesForCluster: true}}],
- cursor: {}
- }),
- ErrorCodes.Unauthorized);
+ assert.commandFailedWithCode(adminDB.runCommand({
+ aggregate: 1,
+ pipeline: [{$changeStream: {allChangesForCluster: true}}],
+ cursor: {}
+ }),
+ ErrorCodes.Unauthorized);
- db.logout();
- }
+ db.logout();
+ }
- // Test that a user with the required privileges can open a change stream.
- for (let userWithPrivileges of[{db: adminDB, name: "any_db_find_and_change_stream"}]) {
- jsTestLog(`Testing user ${tojson(userWithPrivileges)} _can_ open a change stream` +
- ` on the whole cluster`);
- const db = userWithPrivileges.db;
- assert(db.auth(userWithPrivileges.name, password));
+ // Test that a user with the required privileges can open a change stream.
+ for (let userWithPrivileges of [{db: adminDB, name: "any_db_find_and_change_stream"}]) {
+ jsTestLog(`Testing user ${tojson(userWithPrivileges)} _can_ open a change stream` +
+ ` on the whole cluster`);
+ const db = userWithPrivileges.db;
+ assert(db.auth(userWithPrivileges.name, password));
- assert.doesNotThrow(() => db.getMongo().watch());
+ assert.doesNotThrow(() => db.getMongo().watch());
- db.logout();
- }
- }());
- rst.stopSet();
+ db.logout();
+ }
+}());
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/change_streams_resume_at_same_clustertime.js b/jstests/noPassthrough/change_streams_resume_at_same_clustertime.js
index 73cb523ce49..1dd5bdd83ed 100644
--- a/jstests/noPassthrough/change_streams_resume_at_same_clustertime.js
+++ b/jstests/noPassthrough/change_streams_resume_at_same_clustertime.js
@@ -5,65 +5,64 @@
* @tags: [requires_replication, requires_journaling, requires_majority_read_concern]
*/
(function() {
- "use strict";
+"use strict";
- const st =
- new ShardingTest({shards: 2, rs: {nodes: 1, setParameter: {writePeriodicNoops: false}}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1, setParameter: {writePeriodicNoops: false}}});
- const mongosDB = st.s.startSession({causalConsistency: true}).getDatabase(jsTestName());
- const mongosColl = mongosDB.test;
+const mongosDB = st.s.startSession({causalConsistency: true}).getDatabase(jsTestName());
+const mongosColl = mongosDB.test;
- // Enable sharding on the test DB and ensure its primary is shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+// Enable sharding on the test DB and ensure its primary is shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- // Shard on {_id:1}, split at {_id:0}, and move the upper chunk to shard1.
- st.shardColl(mongosColl, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName(), true);
+// Shard on {_id:1}, split at {_id:0}, and move the upper chunk to shard1.
+st.shardColl(mongosColl, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName(), true);
- // Write one document to each shard.
- assert.commandWorked(mongosColl.insert({_id: -10}));
- assert.commandWorked(mongosColl.insert({_id: 10}));
+// Write one document to each shard.
+assert.commandWorked(mongosColl.insert({_id: -10}));
+assert.commandWorked(mongosColl.insert({_id: 10}));
- // Open a change stream cursor to listen for subsequent events.
- let csCursor = mongosColl.watch([], {cursor: {batchSize: 1}});
+// Open a change stream cursor to listen for subsequent events.
+let csCursor = mongosColl.watch([], {cursor: {batchSize: 1}});
- // Update both documents in the collection, such that the events are likely to have the same
- // clusterTime. We update twice to ensure that the PBRT for both shards moves past the first two
- // updates.
- assert.commandWorked(mongosColl.update({}, {$set: {updated: 1}}, {multi: true}));
- assert.commandWorked(mongosColl.update({}, {$set: {updatedAgain: 1}}, {multi: true}));
+// Update both documents in the collection, such that the events are likely to have the same
+// clusterTime. We update twice to ensure that the PBRT for both shards moves past the first two
+// updates.
+assert.commandWorked(mongosColl.update({}, {$set: {updated: 1}}, {multi: true}));
+assert.commandWorked(mongosColl.update({}, {$set: {updatedAgain: 1}}, {multi: true}));
- // Retrieve the first two events and confirm that they are in order with non-descending
- // clusterTime. Unfortunately we cannot guarantee that clusterTime will be identical, since it
- // is based on each shard's local value and there are operations beyond noop write that can
- // bump the oplog timestamp. We expect however that they will be identical for most test runs,
- // so there is value in testing.
- let clusterTime = null, updateEvent = null;
- for (let x = 0; x < 2; ++x) {
- assert.soon(() => csCursor.hasNext());
- updateEvent = csCursor.next();
- clusterTime = (clusterTime || updateEvent.clusterTime);
- assert.gte(updateEvent.clusterTime, clusterTime);
- assert.eq(updateEvent.updateDescription.updatedFields.updated, 1);
- }
+// Retrieve the first two events and confirm that they are in order with non-descending
+// clusterTime. Unfortunately we cannot guarantee that clusterTime will be identical, since it
+// is based on each shard's local value and there are operations beyond noop write that can
+// bump the oplog timestamp. We expect however that they will be identical for most test runs,
+// so there is value in testing.
+let clusterTime = null, updateEvent = null;
+for (let x = 0; x < 2; ++x) {
assert.soon(() => csCursor.hasNext());
+ updateEvent = csCursor.next();
+ clusterTime = (clusterTime || updateEvent.clusterTime);
+ assert.gte(updateEvent.clusterTime, clusterTime);
+ assert.eq(updateEvent.updateDescription.updatedFields.updated, 1);
+}
+assert.soon(() => csCursor.hasNext());
- // Update both documents again, so that we will have something to observe after resuming.
- assert.commandWorked(mongosColl.update({}, {$set: {updatedYetAgain: 1}}, {multi: true}));
+// Update both documents again, so that we will have something to observe after resuming.
+assert.commandWorked(mongosColl.update({}, {$set: {updatedYetAgain: 1}}, {multi: true}));
- // Resume from the second update, and confirm that we only see events starting with the third
- // and fourth updates. We use batchSize:1 to induce mongoD to send each individual event to the
- // mongoS when resuming, rather than scanning all the way to the most recent point in its oplog.
- csCursor = mongosColl.watch([], {resumeAfter: updateEvent._id, cursor: {batchSize: 1}});
- clusterTime = updateEvent = null;
- for (let x = 0; x < 2; ++x) {
- assert.soon(() => csCursor.hasNext());
- updateEvent = csCursor.next();
- clusterTime = (clusterTime || updateEvent.clusterTime);
- assert.gte(updateEvent.clusterTime, clusterTime);
- assert.eq(updateEvent.updateDescription.updatedFields.updatedAgain, 1);
- }
+// Resume from the second update, and confirm that we only see events starting with the third
+// and fourth updates. We use batchSize:1 to induce mongoD to send each individual event to the
+// mongoS when resuming, rather than scanning all the way to the most recent point in its oplog.
+csCursor = mongosColl.watch([], {resumeAfter: updateEvent._id, cursor: {batchSize: 1}});
+clusterTime = updateEvent = null;
+for (let x = 0; x < 2; ++x) {
assert.soon(() => csCursor.hasNext());
+ updateEvent = csCursor.next();
+ clusterTime = (clusterTime || updateEvent.clusterTime);
+ assert.gte(updateEvent.clusterTime, clusterTime);
+ assert.eq(updateEvent.updateDescription.updatedFields.updatedAgain, 1);
+}
+assert.soon(() => csCursor.hasNext());
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/change_streams_resume_same_clustertime_different_uuid.js b/jstests/noPassthrough/change_streams_resume_same_clustertime_different_uuid.js
index 6ac410870a9..e8cdf1dc722 100644
--- a/jstests/noPassthrough/change_streams_resume_same_clustertime_different_uuid.js
+++ b/jstests/noPassthrough/change_streams_resume_same_clustertime_different_uuid.js
@@ -5,94 +5,94 @@
* @tags: [requires_sharding, uses_change_streams]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/fixture_helpers.js"); // For runCommandOnEachPrimary.
+load("jstests/libs/fixture_helpers.js"); // For runCommandOnEachPrimary.
- // Asserts that the expected operation type and documentKey are found on the change stream
- // cursor. Returns the change stream document.
- function assertWriteVisible({cursor, opType, docKey}) {
- assert.soon(() => cursor.hasNext());
- const changeDoc = cursor.next();
- assert.eq(opType, changeDoc.operationType, changeDoc);
- assert.eq(docKey, changeDoc.documentKey, changeDoc);
- return changeDoc;
- }
+// Asserts that the expected operation type and documentKey are found on the change stream
+// cursor. Returns the change stream document.
+function assertWriteVisible({cursor, opType, docKey}) {
+ assert.soon(() => cursor.hasNext());
+ const changeDoc = cursor.next();
+ assert.eq(opType, changeDoc.operationType, changeDoc);
+ assert.eq(docKey, changeDoc.documentKey, changeDoc);
+ return changeDoc;
+}
- // Create a new cluster with 2 shards. Disable periodic no-ops to ensure that we have control
- // over the ordering of events across the cluster.
- const st = new ShardingTest({
- shards: 2,
- rs: {nodes: 1, setParameter: {writePeriodicNoops: false, periodicNoopIntervalSecs: 1}}
- });
+// Create a new cluster with 2 shards. Disable periodic no-ops to ensure that we have control
+// over the ordering of events across the cluster.
+const st = new ShardingTest({
+ shards: 2,
+ rs: {nodes: 1, setParameter: {writePeriodicNoops: false, periodicNoopIntervalSecs: 1}}
+});
- // Create two databases. We will place one of these on each shard.
- const mongosDB0 = st.s.getDB(`${jsTestName()}_0`);
- const mongosDB1 = st.s.getDB(`${jsTestName()}_1`);
- const adminDB = st.s.getDB("admin");
+// Create two databases. We will place one of these on each shard.
+const mongosDB0 = st.s.getDB(`${jsTestName()}_0`);
+const mongosDB1 = st.s.getDB(`${jsTestName()}_1`);
+const adminDB = st.s.getDB("admin");
- // Enable sharding on mongosDB0 and ensure its primary is shard0.
- assert.commandWorked(mongosDB0.adminCommand({enableSharding: mongosDB0.getName()}));
- st.ensurePrimaryShard(mongosDB0.getName(), st.rs0.getURL());
+// Enable sharding on mongosDB0 and ensure its primary is shard0.
+assert.commandWorked(mongosDB0.adminCommand({enableSharding: mongosDB0.getName()}));
+st.ensurePrimaryShard(mongosDB0.getName(), st.rs0.getURL());
- // Enable sharding on mongosDB1 and ensure its primary is shard1.
- assert.commandWorked(mongosDB1.adminCommand({enableSharding: mongosDB1.getName()}));
- st.ensurePrimaryShard(mongosDB1.getName(), st.rs1.getURL());
+// Enable sharding on mongosDB1 and ensure its primary is shard1.
+assert.commandWorked(mongosDB1.adminCommand({enableSharding: mongosDB1.getName()}));
+st.ensurePrimaryShard(mongosDB1.getName(), st.rs1.getURL());
- // Open a connection to a different collection on each shard. We use direct connections to
- // ensure that the oplog timestamps across the shards overlap.
- const coll0 = st.rs0.getPrimary().getCollection(`${mongosDB0.getName()}.test`);
- const coll1 = st.rs1.getPrimary().getCollection(`${mongosDB1.getName()}.test`);
+// Open a connection to a different collection on each shard. We use direct connections to
+// ensure that the oplog timestamps across the shards overlap.
+const coll0 = st.rs0.getPrimary().getCollection(`${mongosDB0.getName()}.test`);
+const coll1 = st.rs1.getPrimary().getCollection(`${mongosDB1.getName()}.test`);
- // Open a change stream on the test cluster. We will capture events in 'changeList'.
- const changeStreamCursor = adminDB.aggregate([{$changeStream: {allChangesForCluster: true}}]);
- const changeList = [];
+// Open a change stream on the test cluster. We will capture events in 'changeList'.
+const changeStreamCursor = adminDB.aggregate([{$changeStream: {allChangesForCluster: true}}]);
+const changeList = [];
- // Insert ten documents on each shard, alternating between the two collections.
- for (let i = 0; i < 20; ++i) {
- const coll = (i % 2 ? coll1 : coll0);
- assert.commandWorked(coll.insert({shard: (i % 2)}));
- }
+// Insert ten documents on each shard, alternating between the two collections.
+for (let i = 0; i < 20; ++i) {
+ const coll = (i % 2 ? coll1 : coll0);
+ assert.commandWorked(coll.insert({shard: (i % 2)}));
+}
- // Verify that each shard now has ten total documents present in the associated collection.
- assert.eq(st.rs0.getPrimary().getCollection(coll0.getFullName()).count(), 10);
- assert.eq(st.rs1.getPrimary().getCollection(coll1.getFullName()).count(), 10);
+// Verify that each shard now has ten total documents present in the associated collection.
+assert.eq(st.rs0.getPrimary().getCollection(coll0.getFullName()).count(), 10);
+assert.eq(st.rs1.getPrimary().getCollection(coll1.getFullName()).count(), 10);
- // Re-enable 'writePeriodicNoops' to ensure that all change stream events are returned.
- FixtureHelpers.runCommandOnEachPrimary(
- {db: adminDB, cmdObj: {setParameter: 1, writePeriodicNoops: true}});
+// Re-enable 'writePeriodicNoops' to ensure that all change stream events are returned.
+FixtureHelpers.runCommandOnEachPrimary(
+ {db: adminDB, cmdObj: {setParameter: 1, writePeriodicNoops: true}});
- // Read the stream of events, capture them in 'changeList', and confirm that all events occurred
- // at or later than the clusterTime of the first event. Unfortunately, we cannot guarantee that
- // corresponding events occurred at the same clusterTime on both shards; we expect, however,
- // that this will be true in the vast majority of runs, and so there is value in testing.
- for (let i = 0; i < 20; ++i) {
- assert.soon(() => changeStreamCursor.hasNext());
- changeList.push(changeStreamCursor.next());
- }
- const clusterTime = changeList[0].clusterTime;
- for (let event of changeList) {
- assert.gte(event.clusterTime, clusterTime);
- }
+// Read the stream of events, capture them in 'changeList', and confirm that all events occurred
+// at or later than the clusterTime of the first event. Unfortunately, we cannot guarantee that
+// corresponding events occurred at the same clusterTime on both shards; we expect, however,
+// that this will be true in the vast majority of runs, and so there is value in testing.
+for (let i = 0; i < 20; ++i) {
+ assert.soon(() => changeStreamCursor.hasNext());
+ changeList.push(changeStreamCursor.next());
+}
+const clusterTime = changeList[0].clusterTime;
+for (let event of changeList) {
+ assert.gte(event.clusterTime, clusterTime);
+}
- // Test that resuming from each event returns the expected set of subsequent documents.
- for (let i = 0; i < changeList.length; ++i) {
- const resumeCursor = adminDB.aggregate(
- [{$changeStream: {allChangesForCluster: true, resumeAfter: changeList[i]._id}}]);
+// Test that resuming from each event returns the expected set of subsequent documents.
+for (let i = 0; i < changeList.length; ++i) {
+ const resumeCursor = adminDB.aggregate(
+ [{$changeStream: {allChangesForCluster: true, resumeAfter: changeList[i]._id}}]);
- // Confirm that the first event in the resumed stream matches the next event recorded in
- // 'changeList' from the original stream. The order of the events should be stable across
- // resumes from any point.
- for (let x = (i + 1); x < changeList.length; ++x) {
- const expectedChangeDoc = changeList[x];
- assertWriteVisible({
- cursor: resumeCursor,
- opType: expectedChangeDoc.operationType,
- docKey: expectedChangeDoc.documentKey
- });
- }
- resumeCursor.close();
+ // Confirm that the first event in the resumed stream matches the next event recorded in
+ // 'changeList' from the original stream. The order of the events should be stable across
+ // resumes from any point.
+ for (let x = (i + 1); x < changeList.length; ++x) {
+ const expectedChangeDoc = changeList[x];
+ assertWriteVisible({
+ cursor: resumeCursor,
+ opType: expectedChangeDoc.operationType,
+ docKey: expectedChangeDoc.documentKey
+ });
}
+ resumeCursor.close();
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/change_streams_resume_token_applyops_overlap.js b/jstests/noPassthrough/change_streams_resume_token_applyops_overlap.js
index c5a27b57e63..0509ff2b3cd 100644
--- a/jstests/noPassthrough/change_streams_resume_token_applyops_overlap.js
+++ b/jstests/noPassthrough/change_streams_resume_token_applyops_overlap.js
@@ -5,94 +5,94 @@
* @tags: [requires_sharding, uses_multi_shard_transaction, uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- // Asserts that the expected operation type and documentKey are found on the change stream
- // cursor. Returns the change stream document.
- function assertWriteVisible({cursor, opType, docKey}) {
- assert.soon(() => cursor.hasNext());
- const changeDoc = cursor.next();
- assert.eq(opType, changeDoc.operationType, changeDoc);
- assert.eq(docKey, changeDoc.documentKey, changeDoc);
- return changeDoc;
- }
+// Asserts that the expected operation type and documentKey are found on the change stream
+// cursor. Returns the change stream document.
+function assertWriteVisible({cursor, opType, docKey}) {
+ assert.soon(() => cursor.hasNext());
+ const changeDoc = cursor.next();
+ assert.eq(opType, changeDoc.operationType, changeDoc);
+ assert.eq(docKey, changeDoc.documentKey, changeDoc);
+ return changeDoc;
+}
- // Create a new cluster with 2 shards. Enable 1-second period no-ops to ensure that all relevant
- // events eventually become available.
- const st = new ShardingTest({
- shards: 2,
- rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
- });
+// Create a new cluster with 2 shards. Enable 1-second period no-ops to ensure that all relevant
+// events eventually become available.
+const st = new ShardingTest({
+ shards: 2,
+ rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
+});
- const mongosDB = st.s.getDB(jsTestName());
- const mongosColl = mongosDB.test;
+const mongosDB = st.s.getDB(jsTestName());
+const mongosColl = mongosDB.test;
- // Enable sharding on the test DB and ensure its primary is shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+// Enable sharding on the test DB and ensure its primary is shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- // Shard on {shard:1}, split at {shard:1}, and move the upper chunk to shard1.
- st.shardColl(mongosColl, {shard: 1}, {shard: 1}, {shard: 1}, mongosDB.getName(), true);
+// Shard on {shard:1}, split at {shard:1}, and move the upper chunk to shard1.
+st.shardColl(mongosColl, {shard: 1}, {shard: 1}, {shard: 1}, mongosDB.getName(), true);
- // Seed each shard with one document.
- assert.commandWorked(
- mongosColl.insert([{shard: 0, _id: "initial_doc"}, {shard: 1, _id: "initial doc"}]));
+// Seed each shard with one document.
+assert.commandWorked(
+ mongosColl.insert([{shard: 0, _id: "initial_doc"}, {shard: 1, _id: "initial doc"}]));
- // Start a transaction which will be used to write documents across both shards.
- const session = mongosDB.getMongo().startSession();
- const sessionDB = session.getDatabase(mongosDB.getName());
- const sessionColl = sessionDB[mongosColl.getName()];
- session.startTransaction({readConcern: {level: "majority"}});
+// Start a transaction which will be used to write documents across both shards.
+const session = mongosDB.getMongo().startSession();
+const sessionDB = session.getDatabase(mongosDB.getName());
+const sessionColl = sessionDB[mongosColl.getName()];
+session.startTransaction({readConcern: {level: "majority"}});
- // Open a change stream on the test collection. We will capture events in 'changeList'.
- const changeStreamCursor = mongosColl.watch();
- const changeList = [];
+// Open a change stream on the test collection. We will capture events in 'changeList'.
+const changeStreamCursor = mongosColl.watch();
+const changeList = [];
- // Insert four documents on each shard under the transaction.
- assert.commandWorked(
- sessionColl.insert([{shard: 0, _id: "txn1-doc-0"}, {shard: 1, _id: "txn1-doc-1"}]));
- assert.commandWorked(
- sessionColl.insert([{shard: 0, _id: "txn1-doc-2"}, {shard: 1, _id: "txn1-doc-3"}]));
- assert.commandWorked(
- sessionColl.insert([{shard: 0, _id: "txn1-doc-4"}, {shard: 1, _id: "txn1-doc-5"}]));
- assert.commandWorked(
- sessionColl.insert([{shard: 0, _id: "txn1-doc-6"}, {shard: 1, _id: "txn1-doc-7"}]));
+// Insert four documents on each shard under the transaction.
+assert.commandWorked(
+ sessionColl.insert([{shard: 0, _id: "txn1-doc-0"}, {shard: 1, _id: "txn1-doc-1"}]));
+assert.commandWorked(
+ sessionColl.insert([{shard: 0, _id: "txn1-doc-2"}, {shard: 1, _id: "txn1-doc-3"}]));
+assert.commandWorked(
+ sessionColl.insert([{shard: 0, _id: "txn1-doc-4"}, {shard: 1, _id: "txn1-doc-5"}]));
+assert.commandWorked(
+ sessionColl.insert([{shard: 0, _id: "txn1-doc-6"}, {shard: 1, _id: "txn1-doc-7"}]));
- // Commit the transaction.
- assert.commandWorked(session.commitTransaction_forTesting());
+// Commit the transaction.
+assert.commandWorked(session.commitTransaction_forTesting());
- // Read the stream of events, capture them in 'changeList', and confirm that all events occurred
- // at or later than the clusterTime of the first event. Unfortunately, we cannot guarantee that
- // all events occurred at the same clusterTime on both shards, even in the case where all events
- // occur within a single transaction. We expect, however, that this will be true in the vast
- // majority of test runs, and so there is value in retaining this test.
- for (let i = 0; i < 8; ++i) {
- assert.soon(() => changeStreamCursor.hasNext());
- changeList.push(changeStreamCursor.next());
- }
- const clusterTime = changeList[0].clusterTime;
- for (let event of changeList) {
- assert.gte(event.clusterTime, clusterTime);
- }
+// Read the stream of events, capture them in 'changeList', and confirm that all events occurred
+// at or later than the clusterTime of the first event. Unfortunately, we cannot guarantee that
+// all events occurred at the same clusterTime on both shards, even in the case where all events
+// occur within a single transaction. We expect, however, that this will be true in the vast
+// majority of test runs, and so there is value in retaining this test.
+for (let i = 0; i < 8; ++i) {
+ assert.soon(() => changeStreamCursor.hasNext());
+ changeList.push(changeStreamCursor.next());
+}
+const clusterTime = changeList[0].clusterTime;
+for (let event of changeList) {
+ assert.gte(event.clusterTime, clusterTime);
+}
- // Test that resuming from each event returns the expected set of subsequent documents.
- for (let i = 0; i < changeList.length; ++i) {
- const resumeCursor = mongosColl.watch([], {startAfter: changeList[i]._id});
+// Test that resuming from each event returns the expected set of subsequent documents.
+for (let i = 0; i < changeList.length; ++i) {
+ const resumeCursor = mongosColl.watch([], {startAfter: changeList[i]._id});
- // Confirm that the first event in the resumed stream matches the next event recorded in
- // 'changeList' from the original stream. The order of the events should be stable across
- // resumes from any point.
- for (let x = (i + 1); x < changeList.length; ++x) {
- const expectedChangeDoc = changeList[x];
- assertWriteVisible({
- cursor: resumeCursor,
- opType: expectedChangeDoc.operationType,
- docKey: expectedChangeDoc.documentKey
- });
- }
- assert(!resumeCursor.hasNext(), () => `Unexpected event: ${tojson(resumeCursor.next())}`);
- resumeCursor.close();
+ // Confirm that the first event in the resumed stream matches the next event recorded in
+ // 'changeList' from the original stream. The order of the events should be stable across
+ // resumes from any point.
+ for (let x = (i + 1); x < changeList.length; ++x) {
+ const expectedChangeDoc = changeList[x];
+ assertWriteVisible({
+ cursor: resumeCursor,
+ opType: expectedChangeDoc.operationType,
+ docKey: expectedChangeDoc.documentKey
+ });
}
+ assert(!resumeCursor.hasNext(), () => `Unexpected event: ${tojson(resumeCursor.next())}`);
+ resumeCursor.close();
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/change_streams_shell_helper_resume_token.js b/jstests/noPassthrough/change_streams_shell_helper_resume_token.js
index 4e6e42c6406..0e62c649d00 100644
--- a/jstests/noPassthrough/change_streams_shell_helper_resume_token.js
+++ b/jstests/noPassthrough/change_streams_shell_helper_resume_token.js
@@ -5,95 +5,95 @@
* @tags: [requires_journaling]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- // Create a new single-node replica set, and ensure that it can support $changeStream.
- const rst = new ReplSetTest({nodes: 1});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
- rst.initiate();
+// Create a new single-node replica set, and ensure that it can support $changeStream.
+const rst = new ReplSetTest({nodes: 1});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+rst.initiate();
- const db = rst.getPrimary().getDB(jsTestName());
- const collName = "change_stream_shell_helper_resume_token";
- const csCollection = assertDropAndRecreateCollection(db, collName);
- const otherCollection = assertDropAndRecreateCollection(db, "unrelated_" + collName);
+const db = rst.getPrimary().getDB(jsTestName());
+const collName = "change_stream_shell_helper_resume_token";
+const csCollection = assertDropAndRecreateCollection(db, collName);
+const otherCollection = assertDropAndRecreateCollection(db, "unrelated_" + collName);
- const batchSize = 5;
- let docId = 0;
+const batchSize = 5;
+let docId = 0;
- // Test that getResumeToken() returns the postBatchResumeToken when an empty batch is received.
- const csCursor = csCollection.watch([], {cursor: {batchSize: batchSize}});
- assert(!csCursor.hasNext());
- let curResumeToken = csCursor.getResumeToken();
- assert.neq(undefined, curResumeToken);
+// Test that getResumeToken() returns the postBatchResumeToken when an empty batch is received.
+const csCursor = csCollection.watch([], {cursor: {batchSize: batchSize}});
+assert(!csCursor.hasNext());
+let curResumeToken = csCursor.getResumeToken();
+assert.neq(undefined, curResumeToken);
- // Test that advancing the oplog time updates the postBatchResumeToken, even with no results.
- assert.commandWorked(otherCollection.insert({}));
- let prevResumeToken = curResumeToken;
- assert.soon(() => {
- assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
- prevResumeToken = curResumeToken;
- curResumeToken = csCursor.getResumeToken();
- assert.neq(undefined, curResumeToken);
- return bsonWoCompare(curResumeToken, prevResumeToken) > 0;
- });
+// Test that advancing the oplog time updates the postBatchResumeToken, even with no results.
+assert.commandWorked(otherCollection.insert({}));
+let prevResumeToken = curResumeToken;
+assert.soon(() => {
+ assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
+ prevResumeToken = curResumeToken;
+ curResumeToken = csCursor.getResumeToken();
+ assert.neq(undefined, curResumeToken);
+ return bsonWoCompare(curResumeToken, prevResumeToken) > 0;
+});
- // Insert 9 documents into the collection, followed by a write to the unrelated collection.
- for (let i = 0; i < 9; ++i) {
- assert.commandWorked(csCollection.insert({_id: ++docId}));
- }
- assert.commandWorked(otherCollection.insert({}));
+// Insert 9 documents into the collection, followed by a write to the unrelated collection.
+for (let i = 0; i < 9; ++i) {
+ assert.commandWorked(csCollection.insert({_id: ++docId}));
+}
+assert.commandWorked(otherCollection.insert({}));
- // Retrieve the first batch of events from the cursor.
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+// Retrieve the first batch of events from the cursor.
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
- // We have not yet iterated any of the events. Verify that the resume token is unchanged.
- assert.docEq(curResumeToken, csCursor.getResumeToken());
+// We have not yet iterated any of the events. Verify that the resume token is unchanged.
+assert.docEq(curResumeToken, csCursor.getResumeToken());
- // For each event in the first batch, the resume token should match the document's _id.
- let currentDoc = null;
- while (csCursor.objsLeftInBatch()) {
- currentDoc = csCursor.next();
- prevResumeToken = curResumeToken;
- curResumeToken = csCursor.getResumeToken();
- assert.docEq(curResumeToken, currentDoc._id);
- assert.gt(bsonWoCompare(curResumeToken, prevResumeToken), 0);
- }
+// For each event in the first batch, the resume token should match the document's _id.
+let currentDoc = null;
+while (csCursor.objsLeftInBatch()) {
+ currentDoc = csCursor.next();
+ prevResumeToken = curResumeToken;
+ curResumeToken = csCursor.getResumeToken();
+ assert.docEq(curResumeToken, currentDoc._id);
+ assert.gt(bsonWoCompare(curResumeToken, prevResumeToken), 0);
+}
- // Retrieve the second batch of events from the cursor.
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+// Retrieve the second batch of events from the cursor.
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
- // We haven't pulled any events out of the cursor yet, so the resumeToken should be unchanged.
- assert.docEq(curResumeToken, csCursor.getResumeToken());
+// We haven't pulled any events out of the cursor yet, so the resumeToken should be unchanged.
+assert.docEq(curResumeToken, csCursor.getResumeToken());
- // For all but the final event, the resume token should match the document's _id.
- while ((currentDoc = csCursor.next()).fullDocument._id < docId) {
- assert.soon(() => csCursor.hasNext());
- prevResumeToken = curResumeToken;
- curResumeToken = csCursor.getResumeToken();
- assert.docEq(curResumeToken, currentDoc._id);
- assert.gt(bsonWoCompare(curResumeToken, prevResumeToken), 0);
- }
- // When we reach here, 'currentDoc' is the final document in the batch, but we have not yet
- // updated the resume token. Assert that this resume token sorts before currentDoc's.
+// For all but the final event, the resume token should match the document's _id.
+while ((currentDoc = csCursor.next()).fullDocument._id < docId) {
+ assert.soon(() => csCursor.hasNext());
prevResumeToken = curResumeToken;
- assert.gt(bsonWoCompare(currentDoc._id, prevResumeToken), 0);
+ curResumeToken = csCursor.getResumeToken();
+ assert.docEq(curResumeToken, currentDoc._id);
+ assert.gt(bsonWoCompare(curResumeToken, prevResumeToken), 0);
+}
+// When we reach here, 'currentDoc' is the final document in the batch, but we have not yet
+// updated the resume token. Assert that this resume token sorts before currentDoc's.
+prevResumeToken = curResumeToken;
+assert.gt(bsonWoCompare(currentDoc._id, prevResumeToken), 0);
- // After we have pulled the final document out of the cursor, the resume token should be the
- // postBatchResumeToken rather than the document's _id. Because we inserted an item into the
- // unrelated collection to push the oplog past the final event returned by the change stream,
- // this will be strictly greater than the final document's _id.
- assert.soon(() => {
- curResumeToken = csCursor.getResumeToken();
- assert(!csCursor.hasNext(), () => tojson(csCursor.next()));
- return bsonWoCompare(curResumeToken, currentDoc._id) > 0;
- });
+// After we have pulled the final document out of the cursor, the resume token should be the
+// postBatchResumeToken rather than the document's _id. Because we inserted an item into the
+// unrelated collection to push the oplog past the final event returned by the change stream,
+// this will be strictly greater than the final document's _id.
+assert.soon(() => {
+ curResumeToken = csCursor.getResumeToken();
+ assert(!csCursor.hasNext(), () => tojson(csCursor.next()));
+ return bsonWoCompare(curResumeToken, currentDoc._id) > 0;
+});
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/change_streams_update_lookup_collation.js b/jstests/noPassthrough/change_streams_update_lookup_collation.js
index 97c7e4013a5..996ce0e2c98 100644
--- a/jstests/noPassthrough/change_streams_update_lookup_collation.js
+++ b/jstests/noPassthrough/change_streams_update_lookup_collation.js
@@ -4,98 +4,99 @@
// Collation is only supported with the find command, not with op query.
// @tags: [requires_find_command, uses_change_streams]
(function() {
- "use strict";
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const db = rst.getPrimary().getDB("test");
- const coll = db[jsTestName()];
- const caseInsensitive = {locale: "en_US", strength: 2};
- assert.commandWorked(db.createCollection(coll.getName(), {collation: caseInsensitive}));
-
- // Insert some documents that have similar _ids, but differ by case and diacritics. These _ids
- // would all match the collation on the strengthOneChangeStream, but should not be confused
- // during the update lookup using the strength 2 collection default collation.
- assert.writeOK(coll.insert({_id: "abc", x: "abc"}));
- assert.writeOK(coll.insert({_id: "abç", x: "ABC"}));
- assert.writeOK(coll.insert({_id: "åbC", x: "AbÇ"}));
-
- const changeStreamDefaultCollation = coll.aggregate(
- [{$changeStream: {fullDocument: "updateLookup"}}, {$match: {"fullDocument.x": "abc"}}],
- {collation: caseInsensitive});
-
- // Strength one will consider "ç" equal to "c" and "C".
- const strengthOneCollation = {locale: "en_US", strength: 1};
- const strengthOneChangeStream = coll.aggregate(
- [{$changeStream: {fullDocument: "updateLookup"}}, {$match: {"fullDocument.x": "abc"}}],
- {collation: strengthOneCollation});
-
- assert.writeOK(coll.update({_id: "abc"}, {$set: {updated: true}}));
-
- // Track the number of _id index usages to prove that the update lookup uses the _id index (and
- // therefore is using the correct collation for the lookup).
- function numIdIndexUsages() {
- return coll.aggregate([{$indexStats: {}}, {$match: {name: "_id_"}}])
- .toArray()[0]
- .accesses.ops;
- }
- const idIndexUsagesBeforeIteration = numIdIndexUsages();
-
- // Both cursors should produce a document describing this update, since the "x" value of the
- // first document will match both filters.
- assert.soon(() => changeStreamDefaultCollation.hasNext());
- assert.docEq(changeStreamDefaultCollation.next().fullDocument,
- {_id: "abc", x: "abc", updated: true});
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 1);
- assert.docEq(strengthOneChangeStream.next().fullDocument,
- {_id: "abc", x: "abc", updated: true});
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 2);
-
- assert.writeOK(coll.update({_id: "abç"}, {$set: {updated: true}}));
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 3);
-
- // Again, both cursors should produce a document describing this update.
- assert.soon(() => changeStreamDefaultCollation.hasNext());
- assert.docEq(changeStreamDefaultCollation.next().fullDocument,
- {_id: "abç", x: "ABC", updated: true});
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 4);
- assert.docEq(strengthOneChangeStream.next().fullDocument,
- {_id: "abç", x: "ABC", updated: true});
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 5);
-
- assert.writeOK(coll.update({_id: "åbC"}, {$set: {updated: true}}));
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 6);
-
- // Both $changeStream stages will see this update and both will look up the full document using
- // the foreign collection's default collation. However, the changeStreamDefaultCollation's
- // subsequent $match stage will reject the document because it does not consider "AbÇ" equal to
- // "abc". Only the strengthOneChangeStream will output the final document.
- assert.soon(() => strengthOneChangeStream.hasNext());
- assert.docEq(strengthOneChangeStream.next().fullDocument,
- {_id: "åbC", x: "AbÇ", updated: true});
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 7);
- assert(!changeStreamDefaultCollation.hasNext());
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 8);
-
- changeStreamDefaultCollation.close();
- strengthOneChangeStream.close();
- rst.stopSet();
+"use strict";
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const db = rst.getPrimary().getDB("test");
+const coll = db[jsTestName()];
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
+assert.commandWorked(db.createCollection(coll.getName(), {collation: caseInsensitive}));
+
+// Insert some documents that have similar _ids, but differ by case and diacritics. These _ids
+// would all match the collation on the strengthOneChangeStream, but should not be confused
+// during the update lookup using the strength 2 collection default collation.
+assert.writeOK(coll.insert({_id: "abc", x: "abc"}));
+assert.writeOK(coll.insert({_id: "abç", x: "ABC"}));
+assert.writeOK(coll.insert({_id: "åbC", x: "AbÇ"}));
+
+const changeStreamDefaultCollation = coll.aggregate(
+ [{$changeStream: {fullDocument: "updateLookup"}}, {$match: {"fullDocument.x": "abc"}}],
+ {collation: caseInsensitive});
+
+// Strength one will consider "ç" equal to "c" and "C".
+const strengthOneCollation = {
+ locale: "en_US",
+ strength: 1
+};
+const strengthOneChangeStream = coll.aggregate(
+ [{$changeStream: {fullDocument: "updateLookup"}}, {$match: {"fullDocument.x": "abc"}}],
+ {collation: strengthOneCollation});
+
+assert.writeOK(coll.update({_id: "abc"}, {$set: {updated: true}}));
+
+// Track the number of _id index usages to prove that the update lookup uses the _id index (and
+// therefore is using the correct collation for the lookup).
+function numIdIndexUsages() {
+ return coll.aggregate([{$indexStats: {}}, {$match: {name: "_id_"}}]).toArray()[0].accesses.ops;
+}
+const idIndexUsagesBeforeIteration = numIdIndexUsages();
+
+// Both cursors should produce a document describing this update, since the "x" value of the
+// first document will match both filters.
+assert.soon(() => changeStreamDefaultCollation.hasNext());
+assert.docEq(changeStreamDefaultCollation.next().fullDocument,
+ {_id: "abc", x: "abc", updated: true});
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 1);
+assert.docEq(strengthOneChangeStream.next().fullDocument, {_id: "abc", x: "abc", updated: true});
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 2);
+
+assert.writeOK(coll.update({_id: "abç"}, {$set: {updated: true}}));
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 3);
+
+// Again, both cursors should produce a document describing this update.
+assert.soon(() => changeStreamDefaultCollation.hasNext());
+assert.docEq(changeStreamDefaultCollation.next().fullDocument,
+ {_id: "abç", x: "ABC", updated: true});
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 4);
+assert.docEq(strengthOneChangeStream.next().fullDocument, {_id: "abç", x: "ABC", updated: true});
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 5);
+
+assert.writeOK(coll.update({_id: "åbC"}, {$set: {updated: true}}));
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 6);
+
+// Both $changeStream stages will see this update and both will look up the full document using
+// the foreign collection's default collation. However, the changeStreamDefaultCollation's
+// subsequent $match stage will reject the document because it does not consider "AbÇ" equal to
+// "abc". Only the strengthOneChangeStream will output the final document.
+assert.soon(() => strengthOneChangeStream.hasNext());
+assert.docEq(strengthOneChangeStream.next().fullDocument, {_id: "åbC", x: "AbÇ", updated: true});
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 7);
+assert(!changeStreamDefaultCollation.hasNext());
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 8);
+
+changeStreamDefaultCollation.close();
+strengthOneChangeStream.close();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/characterize_index_builds_on_restart.js b/jstests/noPassthrough/characterize_index_builds_on_restart.js
index 37cffa27ae9..ed055059bbf 100644
--- a/jstests/noPassthrough/characterize_index_builds_on_restart.js
+++ b/jstests/noPassthrough/characterize_index_builds_on_restart.js
@@ -8,239 +8,237 @@
* @tags: [requires_replication, requires_persistence, requires_majority_read_concern]
*/
(function() {
- 'use strict';
-
- load("jstests/libs/check_log.js");
- load("jstests/replsets/rslib.js");
-
- const dbName = "testDb";
- const collName = "testColl";
-
- const firstIndex = "firstIndex";
- const secondIndex = "secondIndex";
- const thirdIndex = "thirdIndex";
- const fourthIndex = "fourthIndex";
-
- const indexesToBuild = [
- {key: {i: 1}, name: firstIndex, background: true},
- {key: {j: 1}, name: secondIndex, background: true},
- {key: {i: 1, j: 1}, name: thirdIndex, background: true},
- {key: {i: -1, j: 1, k: -1}, name: fourthIndex, background: true},
- ];
-
- function startStandalone() {
- let mongod = MongoRunner.runMongod({cleanData: true});
- let db = mongod.getDB(dbName);
- db.dropDatabase();
- return mongod;
+'use strict';
+
+load("jstests/libs/check_log.js");
+load("jstests/replsets/rslib.js");
+
+const dbName = "testDb";
+const collName = "testColl";
+
+const firstIndex = "firstIndex";
+const secondIndex = "secondIndex";
+const thirdIndex = "thirdIndex";
+const fourthIndex = "fourthIndex";
+
+const indexesToBuild = [
+ {key: {i: 1}, name: firstIndex, background: true},
+ {key: {j: 1}, name: secondIndex, background: true},
+ {key: {i: 1, j: 1}, name: thirdIndex, background: true},
+ {key: {i: -1, j: 1, k: -1}, name: fourthIndex, background: true},
+];
+
+function startStandalone() {
+ let mongod = MongoRunner.runMongod({cleanData: true});
+ let db = mongod.getDB(dbName);
+ db.dropDatabase();
+ return mongod;
+}
+
+function restartStandalone(old) {
+ jsTest.log("Restarting mongod");
+ MongoRunner.stopMongod(old);
+ return MongoRunner.runMongod({restart: true, dbpath: old.dbpath, cleanData: false});
+}
+
+function shutdownStandalone(mongod) {
+ MongoRunner.stopMongod(mongod);
+}
+
+function startReplSet() {
+ let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2, nodeOptions: {syncdelay: 1}});
+ let nodes = replSet.nodeList();
+
+ // We need an arbiter to ensure that the primary doesn't step down when we restart the
+ // secondary
+ replSet.startSet({startClean: true});
+ replSet.initiate(
+ {_id: "indexBuilds", members: [{_id: 0, host: nodes[0]}, {_id: 1, host: nodes[1]}]});
+
+ replSet.getPrimary().getDB(dbName).dropDatabase();
+ return replSet;
+}
+
+function stopReplSet(replSet) {
+ replSet.stopSet();
+}
+
+function addTestDocuments(db) {
+ let size = 100;
+ jsTest.log("Creating " + size + " test documents.");
+ var bulk = db.getCollection(collName).initializeUnorderedBulkOp();
+ for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i, j: i * i, k: 1});
}
-
- function restartStandalone(old) {
- jsTest.log("Restarting mongod");
- MongoRunner.stopMongod(old);
- return MongoRunner.runMongod({restart: true, dbpath: old.dbpath, cleanData: false});
+ assert.writeOK(bulk.execute());
+}
+
+function startIndexBuildOnSecondaryAndLeaveUnfinished(primaryDB, writeConcern, secondaryDB) {
+ jsTest.log("Starting an index build on the secondary and leaving it unfinished.");
+
+ assert.commandWorked(secondaryDB.adminCommand(
+ {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "alwaysOn"}));
+
+ // Do not generate the 'ns' field for index specs on the primary to test the absence of the
+ // field on restart.
+ assert.commandWorked(
+ primaryDB.adminCommand({setParameter: 1, disableIndexSpecNamespaceGeneration: 1}));
+
+ try {
+ let res = assert.commandWorked(primaryDB.runCommand(
+ {createIndexes: collName, indexes: indexesToBuild, writeConcern: {w: writeConcern}}));
+
+ // Wait till all four index builds hang.
+ checkLog.containsWithCount(
+ secondaryDB,
+ "Index build interrupted due to \'leaveIndexBuildUnfinishedForShutdown\' " +
+ "failpoint. Mimicing shutdown error code.",
+ 4);
+
+ // Wait until the secondary has a checkpoint timestamp beyond the index oplog entry. On
+ // restart, replication recovery will not replay the createIndex oplog entries.
+ jsTest.log("Waiting for unfinished index build to be in checkpoint.");
+ assert.soon(() => {
+ let replSetStatus = assert.commandWorked(
+ secondaryDB.getSiblingDB("admin").runCommand({replSetGetStatus: 1}));
+ if (replSetStatus.lastStableCheckpointTimestamp >= res.operationTime)
+ return true;
+ });
+ } finally {
+ assert.commandWorked(secondaryDB.adminCommand(
+ {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "off"}));
}
-
- function shutdownStandalone(mongod) {
- MongoRunner.stopMongod(mongod);
+}
+
+function checkForIndexRebuild(mongod, indexName, shouldExist) {
+ let adminDB = mongod.getDB("admin");
+ let collDB = mongod.getDB(dbName);
+ let logs = adminDB.runCommand({getLog: "global"});
+
+ let rebuildIndexLogEntry = false;
+ let dropIndexLogEntry = false;
+
+ /**
+ * The log should contain the following lines if it rebuilds or drops the index:
+ * Rebuilding index. Collection: `collNss` Index: `indexName`
+ * Dropping unfinished index. Collection: `collNss` Index: `indexName`
+ */
+ let rebuildIndexLine =
+ "Rebuilding index. Collection: " + dbName + "." + collName + " Index: " + indexName;
+ let dropIndexLine = "Dropping unfinished index. Collection: " + dbName + "." + collName +
+ " Index: " + indexName;
+ for (let line = 0; line < logs.log.length; line++) {
+ if (logs.log[line].includes(rebuildIndexLine))
+ rebuildIndexLogEntry = true;
+ else if (logs.log[line].includes(dropIndexLine))
+ dropIndexLogEntry = true;
}
- function startReplSet() {
- let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2, nodeOptions: {syncdelay: 1}});
- let nodes = replSet.nodeList();
-
- // We need an arbiter to ensure that the primary doesn't step down when we restart the
- // secondary
- replSet.startSet({startClean: true});
- replSet.initiate(
- {_id: "indexBuilds", members: [{_id: 0, host: nodes[0]}, {_id: 1, host: nodes[1]}]});
+ // Can't be either missing both entries or have both entries for the given index name.
+ assert.neq(rebuildIndexLogEntry, dropIndexLogEntry);
- replSet.getPrimary().getDB(dbName).dropDatabase();
- return replSet;
- }
-
- function stopReplSet(replSet) {
- replSet.stopSet();
- }
+ // Ensure the index either exists or doesn't exist in the collection depending on the result
+ // of the log.
+ let collIndexes = collDB.getCollection(collName).getIndexes();
- function addTestDocuments(db) {
- let size = 100;
- jsTest.log("Creating " + size + " test documents.");
- var bulk = db.getCollection(collName).initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i, j: i * i, k: 1});
+ let foundIndexEntry = false;
+ for (let index = 0; index < collIndexes.length; index++) {
+ assert.eq(true, collIndexes[index].hasOwnProperty('ns'));
+ if (collIndexes[index].name == indexName) {
+ foundIndexEntry = true;
+ break;
}
- assert.writeOK(bulk.execute());
}
- function startIndexBuildOnSecondaryAndLeaveUnfinished(primaryDB, writeConcern, secondaryDB) {
- jsTest.log("Starting an index build on the secondary and leaving it unfinished.");
-
- assert.commandWorked(secondaryDB.adminCommand(
- {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "alwaysOn"}));
-
- // Do not generate the 'ns' field for index specs on the primary to test the absence of the
- // field on restart.
- assert.commandWorked(
- primaryDB.adminCommand({setParameter: 1, disableIndexSpecNamespaceGeneration: 1}));
-
- try {
- let res = assert.commandWorked(primaryDB.runCommand({
- createIndexes: collName,
- indexes: indexesToBuild,
- writeConcern: {w: writeConcern}
- }));
-
- // Wait till all four index builds hang.
- checkLog.containsWithCount(
- secondaryDB,
- "Index build interrupted due to \'leaveIndexBuildUnfinishedForShutdown\' " +
- "failpoint. Mimicing shutdown error code.",
- 4);
-
- // Wait until the secondary has a checkpoint timestamp beyond the index oplog entry. On
- // restart, replication recovery will not replay the createIndex oplog entries.
- jsTest.log("Waiting for unfinished index build to be in checkpoint.");
- assert.soon(() => {
- let replSetStatus = assert.commandWorked(
- secondaryDB.getSiblingDB("admin").runCommand({replSetGetStatus: 1}));
- if (replSetStatus.lastStableCheckpointTimestamp >= res.operationTime)
- return true;
- });
- } finally {
- assert.commandWorked(secondaryDB.adminCommand(
- {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "off"}));
- }
+ // If the log claims it rebuilt an unfinished index, the index must exist.
+ assert.eq(rebuildIndexLogEntry, foundIndexEntry);
+
+ // If the log claims it dropped an unfinished index, the index must not exist.
+ assert.eq(dropIndexLogEntry, !foundIndexEntry);
+
+ // Ensure our characterization matches the outcome of the index build.
+ assert.eq(foundIndexEntry, (shouldExist ? true : false));
+
+ if (foundIndexEntry)
+ jsTest.log("Rebuilt unfinished index. Collection: " + dbName + "." + collName +
+ " Index: " + indexName);
+ else
+ jsTest.log("Dropped unfinished index. Collection: " + dbName + "." + collName +
+ " Index: " + indexName);
+}
+
+function standaloneToStandaloneTest() {
+ let mongod = startStandalone();
+ let collDB = mongod.getDB(dbName);
+
+ addTestDocuments(collDB);
+
+ jsTest.log("Starting an index build on a standalone and leaving it unfinished.");
+ assert.commandWorked(collDB.adminCommand(
+ {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "alwaysOn"}));
+ try {
+ assert.commandFailedWithCode(
+ collDB.runCommand({createIndexes: collName, indexes: indexesToBuild}),
+ ErrorCodes.InterruptedAtShutdown);
+ } finally {
+ assert.commandWorked(collDB.adminCommand(
+ {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "off"}));
}
- function checkForIndexRebuild(mongod, indexName, shouldExist) {
- let adminDB = mongod.getDB("admin");
- let collDB = mongod.getDB(dbName);
- let logs = adminDB.runCommand({getLog: "global"});
-
- let rebuildIndexLogEntry = false;
- let dropIndexLogEntry = false;
-
- /** The log should contain the following lines if it rebuilds or drops the index:
- * Rebuilding index. Collection: `collNss` Index: `indexName`
- * Dropping unfinished index. Collection: `collNss` Index: `indexName`
- */
- let rebuildIndexLine =
- "Rebuilding index. Collection: " + dbName + "." + collName + " Index: " + indexName;
- let dropIndexLine = "Dropping unfinished index. Collection: " + dbName + "." + collName +
- " Index: " + indexName;
- for (let line = 0; line < logs.log.length; line++) {
- if (logs.log[line].includes(rebuildIndexLine))
- rebuildIndexLogEntry = true;
- else if (logs.log[line].includes(dropIndexLine))
- dropIndexLogEntry = true;
- }
+ mongod = restartStandalone(mongod);
- // Can't be either missing both entries or have both entries for the given index name.
- assert.neq(rebuildIndexLogEntry, dropIndexLogEntry);
+ checkForIndexRebuild(mongod, firstIndex, /*shouldExist=*/false);
+ checkForIndexRebuild(mongod, secondIndex, /*shouldExist=*/false);
+ checkForIndexRebuild(mongod, thirdIndex, /*shouldExist=*/false);
+ checkForIndexRebuild(mongod, fourthIndex, /*shouldExist=*/false);
- // Ensure the index either exists or doesn't exist in the collection depending on the result
- // of the log.
- let collIndexes = collDB.getCollection(collName).getIndexes();
+ shutdownStandalone(mongod);
+}
- let foundIndexEntry = false;
- for (let index = 0; index < collIndexes.length; index++) {
- assert.eq(true, collIndexes[index].hasOwnProperty('ns'));
- if (collIndexes[index].name == indexName) {
- foundIndexEntry = true;
- break;
- }
- }
+function secondaryToStandaloneTest() {
+ let replSet = startReplSet();
+ let primary = replSet.getPrimary();
+ let secondary = replSet.getSecondary();
- // If the log claims it rebuilt an unfinished index, the index must exist.
- assert.eq(rebuildIndexLogEntry, foundIndexEntry);
+ let primaryDB = primary.getDB(dbName);
+ let secondaryDB = secondary.getDB(dbName);
- // If the log claims it dropped an unfinished index, the index must not exist.
- assert.eq(dropIndexLogEntry, !foundIndexEntry);
+ addTestDocuments(primaryDB);
- // Ensure our characterization matches the outcome of the index build.
- assert.eq(foundIndexEntry, (shouldExist ? true : false));
+ // Make sure the documents get replicated on the secondary.
+ replSet.awaitReplication();
- if (foundIndexEntry)
- jsTest.log("Rebuilt unfinished index. Collection: " + dbName + "." + collName +
- " Index: " + indexName);
- else
- jsTest.log("Dropped unfinished index. Collection: " + dbName + "." + collName +
- " Index: " + indexName);
- }
+ startIndexBuildOnSecondaryAndLeaveUnfinished(primaryDB, /*writeConcern=*/2, secondaryDB);
- function standaloneToStandaloneTest() {
- let mongod = startStandalone();
- let collDB = mongod.getDB(dbName);
+ replSet.stopSet(/*signal=*/null, /*forRestart=*/true);
- addTestDocuments(collDB);
+ let mongod = restartStandalone(secondary);
- jsTest.log("Starting an index build on a standalone and leaving it unfinished.");
- assert.commandWorked(collDB.adminCommand(
- {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "alwaysOn"}));
- try {
- assert.commandFailedWithCode(
- collDB.runCommand({createIndexes: collName, indexes: indexesToBuild}),
- ErrorCodes.InterruptedAtShutdown);
- } finally {
- assert.commandWorked(collDB.adminCommand(
- {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "off"}));
- }
-
- mongod = restartStandalone(mongod);
+ checkForIndexRebuild(mongod, firstIndex, /*shouldExist=*/true);
+ checkForIndexRebuild(mongod, secondIndex, /*shouldExist=*/true);
+ checkForIndexRebuild(mongod, thirdIndex, /*shouldExist=*/true);
+ checkForIndexRebuild(mongod, fourthIndex, /*shouldExist=*/true);
- checkForIndexRebuild(mongod, firstIndex, /*shouldExist=*/false);
- checkForIndexRebuild(mongod, secondIndex, /*shouldExist=*/false);
- checkForIndexRebuild(mongod, thirdIndex, /*shouldExist=*/false);
- checkForIndexRebuild(mongod, fourthIndex, /*shouldExist=*/false);
+ shutdownStandalone(mongod);
- shutdownStandalone(mongod);
+ mongod = restartStandalone(primary);
+ let specs = mongod.getDB(dbName).getCollection(collName).getIndexes();
+ assert.eq(specs.length, 5);
+ for (let index = 0; index < specs.length; index++) {
+ assert.eq(true, specs[index].hasOwnProperty('ns'));
}
- function secondaryToStandaloneTest() {
- let replSet = startReplSet();
- let primary = replSet.getPrimary();
- let secondary = replSet.getSecondary();
-
- let primaryDB = primary.getDB(dbName);
- let secondaryDB = secondary.getDB(dbName);
-
- addTestDocuments(primaryDB);
-
- // Make sure the documents get replicated on the secondary.
- replSet.awaitReplication();
-
- startIndexBuildOnSecondaryAndLeaveUnfinished(primaryDB, /*writeConcern=*/2, secondaryDB);
-
- replSet.stopSet(/*signal=*/null, /*forRestart=*/true);
-
- let mongod = restartStandalone(secondary);
-
- checkForIndexRebuild(mongod, firstIndex, /*shouldExist=*/true);
- checkForIndexRebuild(mongod, secondIndex, /*shouldExist=*/true);
- checkForIndexRebuild(mongod, thirdIndex, /*shouldExist=*/true);
- checkForIndexRebuild(mongod, fourthIndex, /*shouldExist=*/true);
-
- shutdownStandalone(mongod);
-
- mongod = restartStandalone(primary);
- let specs = mongod.getDB(dbName).getCollection(collName).getIndexes();
- assert.eq(specs.length, 5);
- for (let index = 0; index < specs.length; index++) {
- assert.eq(true, specs[index].hasOwnProperty('ns'));
- }
-
- shutdownStandalone(mongod);
- }
+ shutdownStandalone(mongod);
+}
- /* Begin tests */
- jsTest.log("Restarting nodes as standalone with unfinished indexes.");
+/* Begin tests */
+jsTest.log("Restarting nodes as standalone with unfinished indexes.");
- // Standalone restarts as standalone
- jsTest.log("Restarting standalone mongod.");
- standaloneToStandaloneTest();
+// Standalone restarts as standalone
+jsTest.log("Restarting standalone mongod.");
+standaloneToStandaloneTest();
- // Replica set node restarts as standalone
- jsTest.log("Restarting replica set node mongod.");
- secondaryToStandaloneTest();
+// Replica set node restarts as standalone
+jsTest.log("Restarting replica set node mongod.");
+secondaryToStandaloneTest();
})();
diff --git a/jstests/noPassthrough/child_op_numyields.js b/jstests/noPassthrough/child_op_numyields.js
index 04c79d308ba..fbc5dc773dc 100644
--- a/jstests/noPassthrough/child_op_numyields.js
+++ b/jstests/noPassthrough/child_op_numyields.js
@@ -3,42 +3,42 @@
* as the latter are popped off the CurOp stack.
*/
(function() {
- "use strict";
-
- // Start a single mongoD using MongoRunner.
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
-
- // Create the test DB and collection.
- const testDB = conn.getDB("currentop_yield");
- const adminDB = conn.getDB("admin");
- const testColl = testDB.test;
-
- // Queries current operations until a single matching operation is found.
- function awaitMatchingCurrentOp(match) {
- let currentOp = null;
- assert.soon(() => {
- currentOp = adminDB.aggregate([{$currentOp: {}}, match]).toArray();
- return (currentOp.length === 1);
- });
- return currentOp[0];
- }
-
- // Executes a bulk remove using the specified 'docsToRemove' array, captures the 'numYields'
- // metrics from each child op, and confirms that the parent op's 'numYields' total is equivalent
- // to the sum of the child ops.
- function runYieldTest(docsToRemove) {
- // Sets parameters such that all operations will yield & the operation hangs on the server
- // when we need to test.
- assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangBeforeChildRemoveOpFinishes", mode: "alwaysOn"}));
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangAfterAllChildRemoveOpsArePopped", mode: "alwaysOn"}));
-
- // Starts parallel shell to run the command that will hang.
- const awaitShell = startParallelShell(`{
+"use strict";
+
+// Start a single mongoD using MongoRunner.
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
+
+// Create the test DB and collection.
+const testDB = conn.getDB("currentop_yield");
+const adminDB = conn.getDB("admin");
+const testColl = testDB.test;
+
+// Queries current operations until a single matching operation is found.
+function awaitMatchingCurrentOp(match) {
+ let currentOp = null;
+ assert.soon(() => {
+ currentOp = adminDB.aggregate([{$currentOp: {}}, match]).toArray();
+ return (currentOp.length === 1);
+ });
+ return currentOp[0];
+}
+
+// Executes a bulk remove using the specified 'docsToRemove' array, captures the 'numYields'
+// metrics from each child op, and confirms that the parent op's 'numYields' total is equivalent
+// to the sum of the child ops.
+function runYieldTest(docsToRemove) {
+ // Sets parameters such that all operations will yield & the operation hangs on the server
+ // when we need to test.
+ assert.commandWorked(
+ testDB.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangBeforeChildRemoveOpFinishes", mode: "alwaysOn"}));
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangAfterAllChildRemoveOpsArePopped", mode: "alwaysOn"}));
+
+ // Starts parallel shell to run the command that will hang.
+ const awaitShell = startParallelShell(`{
const testDB = db.getSiblingDB("currentop_yield");
const bulkRemove = testDB.test.initializeOrderedBulkOp();
for(let doc of ${tojsononeline(docsToRemove)}) {
@@ -48,73 +48,73 @@
}`,
testDB.getMongo().port);
- let childOpId = null;
- let childYields = 0;
-
- // Get child operations and sum yields. Each child op encounters two failpoints while
- // running: 'hangBeforeChildRemoveOpFinishes' followed by 'hangBeforeChildRemoveOpIsPopped'.
- // We use these two failpoints as an 'airlock', hanging at the first while we enable the
- // second, then hanging at the second while we enable the first, to ensure that each child
- // op is caught and their individual 'numYields' recorded.
- for (let childCount = 0; childCount < docsToRemove.length; childCount++) {
- // Wait for the child op to hit the first of two failpoints.
- let childCurOp = awaitMatchingCurrentOp(
- {$match: {ns: testColl.getFullName(), msg: "hangBeforeChildRemoveOpFinishes"}});
-
- // Add the child's yield count to the running total, and record the opid.
- assert(childOpId === null || childOpId === childCurOp.opid);
- assert.gt(childCurOp.numYields, 0);
- childYields += childCurOp.numYields;
- childOpId = childCurOp.opid;
-
- // Enable the subsequent 'hangBeforeChildRemoveOpIsPopped' failpoint, just after the
- // child op finishes but before it is popped from the stack.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangBeforeChildRemoveOpIsPopped", mode: "alwaysOn"}));
+ let childOpId = null;
+ let childYields = 0;
+
+ // Get child operations and sum yields. Each child op encounters two failpoints while
+ // running: 'hangBeforeChildRemoveOpFinishes' followed by 'hangBeforeChildRemoveOpIsPopped'.
+ // We use these two failpoints as an 'airlock', hanging at the first while we enable the
+ // second, then hanging at the second while we enable the first, to ensure that each child
+ // op is caught and their individual 'numYields' recorded.
+ for (let childCount = 0; childCount < docsToRemove.length; childCount++) {
+ // Wait for the child op to hit the first of two failpoints.
+ let childCurOp = awaitMatchingCurrentOp(
+ {$match: {ns: testColl.getFullName(), msg: "hangBeforeChildRemoveOpFinishes"}});
+
+ // Add the child's yield count to the running total, and record the opid.
+ assert(childOpId === null || childOpId === childCurOp.opid);
+ assert.gt(childCurOp.numYields, 0);
+ childYields += childCurOp.numYields;
+ childOpId = childCurOp.opid;
+
+ // Enable the subsequent 'hangBeforeChildRemoveOpIsPopped' failpoint, just after the
+ // child op finishes but before it is popped from the stack.
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangBeforeChildRemoveOpIsPopped", mode: "alwaysOn"}));
- // Let the operation proceed to the 'hangBeforeChildRemoveOpIsPopped' failpoint.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangBeforeChildRemoveOpFinishes", mode: "off"}));
- awaitMatchingCurrentOp(
- {$match: {ns: testColl.getFullName(), msg: "hangBeforeChildRemoveOpIsPopped"}});
-
- // If this is not the final child op, re-enable the 'hangBeforeChildRemoveOpFinishes'
- // failpoint from earlier so that we don't miss the next child.
- if (childCount + 1 < docsToRemove.length) {
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangBeforeChildRemoveOpFinishes", mode: "alwaysOn"}));
- }
+ // Let the operation proceed to the 'hangBeforeChildRemoveOpIsPopped' failpoint.
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangBeforeChildRemoveOpFinishes", mode: "off"}));
+ awaitMatchingCurrentOp(
+ {$match: {ns: testColl.getFullName(), msg: "hangBeforeChildRemoveOpIsPopped"}});
- // Finally, allow the operation to continue.
+ // If this is not the final child op, re-enable the 'hangBeforeChildRemoveOpFinishes'
+ // failpoint from earlier so that we don't miss the next child.
+ if (childCount + 1 < docsToRemove.length) {
assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangBeforeChildRemoveOpIsPopped", mode: "off"}));
+ {configureFailPoint: "hangBeforeChildRemoveOpFinishes", mode: "alwaysOn"}));
}
- // Wait for the operation to hit the 'hangAfterAllChildRemoveOpsArePopped' failpoint, then
- // take the total number of yields recorded by the parent op.
- const parentCurOp = awaitMatchingCurrentOp(
- {$match: {opid: childOpId, op: "command", msg: "hangAfterAllChildRemoveOpsArePopped"}});
+ // Finally, allow the operation to continue.
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangBeforeChildRemoveOpIsPopped", mode: "off"}));
+ }
- // Verify that the parent's yield count equals the sum of the child ops' yields.
- assert.eq(parentCurOp.numYields, childYields);
- assert.eq(parentCurOp.opid, childOpId);
+ // Wait for the operation to hit the 'hangAfterAllChildRemoveOpsArePopped' failpoint, then
+ // take the total number of yields recorded by the parent op.
+ const parentCurOp = awaitMatchingCurrentOp(
+ {$match: {opid: childOpId, op: "command", msg: "hangAfterAllChildRemoveOpsArePopped"}});
- // Allow the parent operation to complete.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangAfterAllChildRemoveOpsArePopped", mode: "off"}));
+ // Verify that the parent's yield count equals the sum of the child ops' yields.
+ assert.eq(parentCurOp.numYields, childYields);
+ assert.eq(parentCurOp.opid, childOpId);
- // Wait for the parallel shell to complete.
- awaitShell();
- }
+ // Allow the parent operation to complete.
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangAfterAllChildRemoveOpsArePopped", mode: "off"}));
+
+ // Wait for the parallel shell to complete.
+ awaitShell();
+}
- // Test that a parent remove op inherits the sum of its children's yields for a single remove.
- assert.commandWorked(testDB.test.insert({a: 2}));
- runYieldTest([{a: 2}]);
+// Test that a parent remove op inherits the sum of its children's yields for a single remove.
+assert.commandWorked(testDB.test.insert({a: 2}));
+runYieldTest([{a: 2}]);
- // Test that a parent remove op inherits the sum of its children's yields for multiple removes.
- const docsToTest = [{a: 1}, {a: 2}, {a: 3}, {a: 4}, {a: 5}];
- assert.commandWorked(testDB.test.insert(docsToTest));
- runYieldTest(docsToTest);
+// Test that a parent remove op inherits the sum of its children's yields for multiple removes.
+const docsToTest = [{a: 1}, {a: 2}, {a: 3}, {a: 4}, {a: 5}];
+assert.commandWorked(testDB.test.insert(docsToTest));
+runYieldTest(docsToTest);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/client_metadata_log.js b/jstests/noPassthrough/client_metadata_log.js
index f1b90492ce6..419a19a9ebb 100644
--- a/jstests/noPassthrough/client_metadata_log.js
+++ b/jstests/noPassthrough/client_metadata_log.js
@@ -3,64 +3,64 @@
* @tags: [requires_sharding]
*/
(function() {
- 'use strict';
+'use strict';
- let checkLog = function(conn) {
- let coll = conn.getCollection("test.foo");
- assert.writeOK(coll.insert({_id: 1}));
+let checkLog = function(conn) {
+ let coll = conn.getCollection("test.foo");
+ assert.writeOK(coll.insert({_id: 1}));
- print(`Checking ${conn.fullOptions.logFile} for client metadata message`);
- let log = cat(conn.fullOptions.logFile);
+ print(`Checking ${conn.fullOptions.logFile} for client metadata message`);
+ let log = cat(conn.fullOptions.logFile);
- assert(
- /received client metadata from .*: { application: { name: ".*" }, driver: { name: ".*", version: ".*" }, os: { type: ".*", name: ".*", architecture: ".*", version: ".*" } }/
- .test(log),
- "'received client metadata' log line missing in log file!\n" + "Log file contents: " +
- conn.fullOptions.logFile +
- "\n************************************************************\n" + log +
- "\n************************************************************");
- };
+ assert(
+ /received client metadata from .*: { application: { name: ".*" }, driver: { name: ".*", version: ".*" }, os: { type: ".*", name: ".*", architecture: ".*", version: ".*" } }/
+ .test(log),
+ "'received client metadata' log line missing in log file!\n" +
+ "Log file contents: " + conn.fullOptions.logFile +
+ "\n************************************************************\n" + log +
+ "\n************************************************************");
+};
- // Test MongoD
- let testMongoD = function() {
- let conn = MongoRunner.runMongod({useLogFiles: true});
- assert.neq(null, conn, 'mongod was unable to start up');
+// Test MongoD
+let testMongoD = function() {
+ let conn = MongoRunner.runMongod({useLogFiles: true});
+ assert.neq(null, conn, 'mongod was unable to start up');
- checkLog(conn);
+ checkLog(conn);
- MongoRunner.stopMongod(conn);
- };
+ MongoRunner.stopMongod(conn);
+};
- // Test MongoS
- let testMongoS = function() {
- let options = {
- mongosOptions: {useLogFiles: true},
- };
+// Test MongoS
+let testMongoS = function() {
+ let options = {
+ mongosOptions: {useLogFiles: true},
+ };
- let st = new ShardingTest({shards: 1, mongos: 1, other: options});
+ let st = new ShardingTest({shards: 1, mongos: 1, other: options});
- checkLog(st.s0);
+ checkLog(st.s0);
- // Validate db.currentOp() contains mongos information
- let curOp = st.s0.adminCommand({currentOp: 1});
- print(tojson(curOp));
+ // Validate db.currentOp() contains mongos information
+ let curOp = st.s0.adminCommand({currentOp: 1});
+ print(tojson(curOp));
- var inprogSample = null;
- for (let inprog of curOp.inprog) {
- if (inprog.hasOwnProperty("clientMetadata") &&
- inprog.clientMetadata.hasOwnProperty("mongos")) {
- inprogSample = inprog;
- break;
- }
+ var inprogSample = null;
+ for (let inprog of curOp.inprog) {
+ if (inprog.hasOwnProperty("clientMetadata") &&
+ inprog.clientMetadata.hasOwnProperty("mongos")) {
+ inprogSample = inprog;
+ break;
}
+ }
- assert.neq(inprogSample.clientMetadata.mongos.host, "unknown");
- assert.neq(inprogSample.clientMetadata.mongos.client, "unknown");
- assert.neq(inprogSample.clientMetadata.mongos.version, "unknown");
+ assert.neq(inprogSample.clientMetadata.mongos.host, "unknown");
+ assert.neq(inprogSample.clientMetadata.mongos.client, "unknown");
+ assert.neq(inprogSample.clientMetadata.mongos.version, "unknown");
- st.stop();
- };
+ st.stop();
+};
- testMongoD();
- testMongoS();
+testMongoD();
+testMongoS();
})();
diff --git a/jstests/noPassthrough/client_metadata_slowlog.js b/jstests/noPassthrough/client_metadata_slowlog.js
index 993d7c47914..aab419023fe 100644
--- a/jstests/noPassthrough/client_metadata_slowlog.js
+++ b/jstests/noPassthrough/client_metadata_slowlog.js
@@ -2,32 +2,32 @@
* Test that verifies client metadata is logged as part of slow query logging in MongoD.
*/
(function() {
- 'use strict';
+'use strict';
- let conn = MongoRunner.runMongod({useLogFiles: true});
- assert.neq(null, conn, 'mongod was unable to start up');
+let conn = MongoRunner.runMongod({useLogFiles: true});
+assert.neq(null, conn, 'mongod was unable to start up');
- let coll = conn.getCollection("test.foo");
- assert.writeOK(coll.insert({_id: 1}));
+let coll = conn.getCollection("test.foo");
+assert.writeOK(coll.insert({_id: 1}));
- // Do a really slow query beyond the 100ms threshold
- let count = coll.count({
- $where: function() {
- sleep(1000);
- return true;
- }
- });
- assert.eq(count, 1, "expected 1 document");
+// Do a really slow query beyond the 100ms threshold
+let count = coll.count({
+ $where: function() {
+ sleep(1000);
+ return true;
+ }
+});
+assert.eq(count, 1, "expected 1 document");
- print(`Checking ${conn.fullOptions.logFile} for client metadata message`);
- let log = cat(conn.fullOptions.logFile);
- assert(
- /COMMAND .* command test.foo appName: "MongoDB Shell" command: count { count: "foo", query: { \$where: function\(\)/
- .test(log),
- "'slow query' log line missing in mongod log file!\n" + "Log file contents: " +
- conn.fullOptions.logFile +
- "\n************************************************************\n" + log +
- "\n************************************************************");
+print(`Checking ${conn.fullOptions.logFile} for client metadata message`);
+let log = cat(conn.fullOptions.logFile);
+assert(
+ /COMMAND .* command test.foo appName: "MongoDB Shell" command: count { count: "foo", query: { \$where: function\(\)/
+ .test(log),
+ "'slow query' log line missing in mongod log file!\n" +
+ "Log file contents: " + conn.fullOptions.logFile +
+ "\n************************************************************\n" + log +
+ "\n************************************************************");
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js b/jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js
index 9e138eda22a..ebfd9456121 100644
--- a/jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js
+++ b/jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js
@@ -6,40 +6,40 @@
*/
(function() {
- "use strict";
- let replSet = new ReplSetTest({name: "server35671", nodes: 1});
- let setFailpointBool = (failpointName, alwaysOn, times) => {
- if (times) {
- return db.adminCommand({configureFailPoint: failpointName, mode: {"times": times}});
- } else if (alwaysOn) {
- return db.adminCommand({configureFailPoint: failpointName, mode: "alwaysOn"});
- } else {
- return db.adminCommand({configureFailPoint: failpointName, mode: "off"});
- }
- };
- replSet.startSet();
- replSet.initiate();
- var db = replSet.getPrimary();
- setFailpointBool("hangAfterStartingIndexBuildUnlocked", true);
+"use strict";
+let replSet = new ReplSetTest({name: "server35671", nodes: 1});
+let setFailpointBool = (failpointName, alwaysOn, times) => {
+ if (times) {
+ return db.adminCommand({configureFailPoint: failpointName, mode: {"times": times}});
+ } else if (alwaysOn) {
+ return db.adminCommand({configureFailPoint: failpointName, mode: "alwaysOn"});
+ } else {
+ return db.adminCommand({configureFailPoint: failpointName, mode: "off"});
+ }
+};
+replSet.startSet();
+replSet.initiate();
+var db = replSet.getPrimary();
+setFailpointBool("hangAfterStartingIndexBuildUnlocked", true);
- // Blocks because of failpoint
- var join = startParallelShell("db.coll.createIndex({a: 1, b: 1}, {background: true})",
- replSet.ports[0]);
+// Blocks because of failpoint
+var join =
+ startParallelShell("db.coll.createIndex({a: 1, b: 1}, {background: true})", replSet.ports[0]);
- // Let the createIndex start to run.
- assert.soon(function() {
- // Need to do getDB because getPrimary returns something slightly different.
- let res = db.getDB("test").currentOp({"command.createIndexes": "coll"});
- return res['ok'] === 1 && res["inprog"].length > 0;
- });
+// Let the createIndex start to run.
+assert.soon(function() {
+ // Need to do getDB because getPrimary returns something slightly different.
+ let res = db.getDB("test").currentOp({"command.createIndexes": "coll"});
+ return res['ok'] === 1 && res["inprog"].length > 0;
+});
- // Repeated calls should continue to fail without crashing.
- assert.commandFailed(db.adminCommand({restartCatalog: 1}));
- assert.commandFailed(db.adminCommand({restartCatalog: 1}));
- assert.commandFailed(db.adminCommand({restartCatalog: 1}));
+// Repeated calls should continue to fail without crashing.
+assert.commandFailed(db.adminCommand({restartCatalog: 1}));
+assert.commandFailed(db.adminCommand({restartCatalog: 1}));
+assert.commandFailed(db.adminCommand({restartCatalog: 1}));
- // Unset failpoint so we can join the parallel shell.
- setFailpointBool("hangAfterStartingIndexBuildUnlocked", false);
- join();
- replSet.stopSet();
+// Unset failpoint so we can join the parallel shell.
+setFailpointBool("hangAfterStartingIndexBuildUnlocked", false);
+join();
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/coll_mod_apply_ops.js b/jstests/noPassthrough/coll_mod_apply_ops.js
index d5e1cc7e4e7..27ced6b1069 100644
--- a/jstests/noPassthrough/coll_mod_apply_ops.js
+++ b/jstests/noPassthrough/coll_mod_apply_ops.js
@@ -2,43 +2,43 @@
// in applyOps.
(function() {
- "use strict";
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up with empty options");
+"use strict";
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up with empty options");
- let dbCollModName = "db_coll_mod";
- const dbCollMod = conn.getDB(dbCollModName);
- dbCollMod.dropDatabase();
- let collName = "collModTest";
- let coll = dbCollMod[collName];
+let dbCollModName = "db_coll_mod";
+const dbCollMod = conn.getDB(dbCollModName);
+dbCollMod.dropDatabase();
+let collName = "collModTest";
+let coll = dbCollMod[collName];
- // Generate a random UUID that is distinct from collModTest's UUID.
- const randomUUID = UUID();
- assert.neq(randomUUID, coll.uuid);
+// Generate a random UUID that is distinct from collModTest's UUID.
+const randomUUID = UUID();
+assert.neq(randomUUID, coll.uuid);
- // Perform a collMod to initialize validationLevel to "off".
- assert.commandWorked(dbCollMod.createCollection(collName));
- let cmd = {"collMod": collName, "validationLevel": "off"};
- let res = dbCollMod.runCommand(cmd);
- assert.commandWorked(res, 'could not run ' + tojson(cmd));
- let collectionInfosOriginal = dbCollMod.getCollectionInfos()[0];
- assert.eq(collectionInfosOriginal.options.validationLevel, "off");
+// Perform a collMod to initialize validationLevel to "off".
+assert.commandWorked(dbCollMod.createCollection(collName));
+let cmd = {"collMod": collName, "validationLevel": "off"};
+let res = dbCollMod.runCommand(cmd);
+assert.commandWorked(res, 'could not run ' + tojson(cmd));
+let collectionInfosOriginal = dbCollMod.getCollectionInfos()[0];
+assert.eq(collectionInfosOriginal.options.validationLevel, "off");
- // Perform an applyOps command with a nonexistent UUID and the same name as an existing
- // collection. applyOps should succeed because of idempotency but a NamespaceNotFound
- // uassert should be thrown during collMod application.
- let collModApplyOpsEntry = {
- "v": 2,
- "op": "c",
- "ns": dbCollModName + ".$cmd",
- "ui": randomUUID,
- "o2": {"collectionOptions_old": {"uuid": randomUUID}},
- "o": {"collMod": collName, "validationLevel": "moderate"}
- };
- assert.commandWorked(dbCollMod.adminCommand({"applyOps": [collModApplyOpsEntry]}));
+// Perform an applyOps command with a nonexistent UUID and the same name as an existing
+// collection. applyOps should succeed because of idempotency but a NamespaceNotFound
+// uassert should be thrown during collMod application.
+let collModApplyOpsEntry = {
+ "v": 2,
+ "op": "c",
+ "ns": dbCollModName + ".$cmd",
+ "ui": randomUUID,
+ "o2": {"collectionOptions_old": {"uuid": randomUUID}},
+ "o": {"collMod": collName, "validationLevel": "moderate"}
+};
+assert.commandWorked(dbCollMod.adminCommand({"applyOps": [collModApplyOpsEntry]}));
- // Ensure the collection options of the existing collection were not affected.
- assert.eq(dbCollMod.getCollectionInfos()[0].name, collName);
- assert.eq(dbCollMod.getCollectionInfos()[0].options.validationLevel, "off");
- MongoRunner.stopMongod(conn);
+// Ensure the collection options of the existing collection were not affected.
+assert.eq(dbCollMod.getCollectionInfos()[0].name, collName);
+assert.eq(dbCollMod.getCollectionInfos()[0].options.validationLevel, "off");
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/collation_clone_collection.js b/jstests/noPassthrough/collation_clone_collection.js
index 538b49d2077..14a729eb518 100644
--- a/jstests/noPassthrough/collation_clone_collection.js
+++ b/jstests/noPassthrough/collation_clone_collection.js
@@ -3,74 +3,74 @@
* used when filtering the source collection.
*/
(function() {
- "use strict";
+"use strict";
- var source = MongoRunner.runMongod({});
- assert.neq(null, source, "mongod was unable to start up");
+var source = MongoRunner.runMongod({});
+assert.neq(null, source, "mongod was unable to start up");
- var dest = MongoRunner.runMongod({});
- assert.neq(null, dest, "mongod was unable to start up");
+var dest = MongoRunner.runMongod({});
+assert.neq(null, dest, "mongod was unable to start up");
- var sourceColl = source.getDB("test").collation;
- var destColl = dest.getDB("test").collation;
+var sourceColl = source.getDB("test").collation;
+var destColl = dest.getDB("test").collation;
- assert.commandWorked(sourceColl.getDB().runCommand(
- {create: sourceColl.getName(), collation: {locale: "en", strength: 2}}));
- // We remove UUIDs before comparing as collection cloning results in a new UUID.
- var sourceCollectionInfos =
- sourceColl.getDB().getCollectionInfos({name: sourceColl.getName()}).map((collInfo) => {
- delete collInfo.info.uuid;
- return collInfo;
- });
-
- assert.writeOK(sourceColl.insert({_id: "FOO"}));
- assert.writeOK(sourceColl.insert({_id: "bar"}));
- assert.eq([{_id: "FOO"}],
- sourceColl.find({_id: "foo"}).toArray(),
- "query should have performed a case-insensitive match");
-
- assert.commandWorked(
- sourceColl.createIndex({withSimpleCollation: 1}, {collation: {locale: "simple"}}));
- assert.commandWorked(sourceColl.createIndex({withDefaultCollation: 1}));
- assert.commandWorked(
- sourceColl.createIndex({withNonDefaultCollation: 1}, {collation: {locale: "fr"}}));
- var sourceIndexInfos = sourceColl.getIndexes().map(function(indexInfo) {
- // We remove the "ns" field from the index specification when comparing whether the indexes
- // that were cloned are equivalent because they were built on a different namespace.
- delete indexInfo.ns;
- return indexInfo;
+assert.commandWorked(sourceColl.getDB().runCommand(
+ {create: sourceColl.getName(), collation: {locale: "en", strength: 2}}));
+// We remove UUIDs before comparing as collection cloning results in a new UUID.
+var sourceCollectionInfos =
+ sourceColl.getDB().getCollectionInfos({name: sourceColl.getName()}).map((collInfo) => {
+ delete collInfo.info.uuid;
+ return collInfo;
});
- // Test that the "cloneCollection" command respects the collection-default collation.
- destColl.drop();
- assert.commandWorked(destColl.getDB().runCommand({
- cloneCollection: sourceColl.getFullName(),
- from: sourceColl.getMongo().host,
- query: {_id: "foo"}
- }));
+assert.writeOK(sourceColl.insert({_id: "FOO"}));
+assert.writeOK(sourceColl.insert({_id: "bar"}));
+assert.eq([{_id: "FOO"}],
+ sourceColl.find({_id: "foo"}).toArray(),
+ "query should have performed a case-insensitive match");
- var destCollectionInfos =
- destColl.getDB().getCollectionInfos({name: destColl.getName()}).map((collInfo) => {
- delete collInfo.info.uuid;
- return collInfo;
- });
- assert.eq(sourceCollectionInfos, destCollectionInfos);
- assert.eq([{_id: "FOO"}], destColl.find({}).toArray());
+assert.commandWorked(
+ sourceColl.createIndex({withSimpleCollation: 1}, {collation: {locale: "simple"}}));
+assert.commandWorked(sourceColl.createIndex({withDefaultCollation: 1}));
+assert.commandWorked(
+ sourceColl.createIndex({withNonDefaultCollation: 1}, {collation: {locale: "fr"}}));
+var sourceIndexInfos = sourceColl.getIndexes().map(function(indexInfo) {
+ // We remove the "ns" field from the index specification when comparing whether the indexes
+ // that were cloned are equivalent because they were built on a different namespace.
+ delete indexInfo.ns;
+ return indexInfo;
+});
- var destIndexInfos = destColl.getIndexes().map(function(indexInfo) {
- // We remove the "ns" field from the index specification when comparing whether the indexes
- // that were cloned are equivalent because they were built on a different namespace.
- delete indexInfo.ns;
- return indexInfo;
+// Test that the "cloneCollection" command respects the collection-default collation.
+destColl.drop();
+assert.commandWorked(destColl.getDB().runCommand({
+ cloneCollection: sourceColl.getFullName(),
+ from: sourceColl.getMongo().host,
+ query: {_id: "foo"}
+}));
+
+var destCollectionInfos =
+ destColl.getDB().getCollectionInfos({name: destColl.getName()}).map((collInfo) => {
+ delete collInfo.info.uuid;
+ return collInfo;
});
+assert.eq(sourceCollectionInfos, destCollectionInfos);
+assert.eq([{_id: "FOO"}], destColl.find({}).toArray());
+
+var destIndexInfos = destColl.getIndexes().map(function(indexInfo) {
+ // We remove the "ns" field from the index specification when comparing whether the indexes
+ // that were cloned are equivalent because they were built on a different namespace.
+ delete indexInfo.ns;
+ return indexInfo;
+});
- assert.eq(sourceIndexInfos.length,
- destIndexInfos.length,
- "Number of indexes don't match; source: " + tojson(sourceIndexInfos) + ", dest: " +
- tojson(destIndexInfos));
- for (var i = 0; i < sourceIndexInfos.length; ++i) {
- assert.contains(sourceIndexInfos[i], destIndexInfos);
- }
- MongoRunner.stopMongod(source);
- MongoRunner.stopMongod(dest);
+assert.eq(sourceIndexInfos.length,
+ destIndexInfos.length,
+ "Number of indexes don't match; source: " + tojson(sourceIndexInfos) +
+ ", dest: " + tojson(destIndexInfos));
+for (var i = 0; i < sourceIndexInfos.length; ++i) {
+ assert.contains(sourceIndexInfos[i], destIndexInfos);
+}
+MongoRunner.stopMongod(source);
+MongoRunner.stopMongod(dest);
})();
diff --git a/jstests/noPassthrough/commands_handle_kill.js b/jstests/noPassthrough/commands_handle_kill.js
index 3838c90425c..6811bf77ec2 100644
--- a/jstests/noPassthrough/commands_handle_kill.js
+++ b/jstests/noPassthrough/commands_handle_kill.js
@@ -1,117 +1,115 @@
// Tests that commands properly handle their underlying plan executor failing or being killed.
(function() {
- 'use strict';
- const dbpath = MongoRunner.dataPath + jsTest.name();
- resetDbpath(dbpath);
- const mongod = MongoRunner.runMongod({dbpath: dbpath});
- const db = mongod.getDB("test");
- const collName = jsTest.name();
- const coll = db.getCollection(collName);
-
- // How many works it takes to yield.
- const yieldIterations = 2;
- assert.commandWorked(
- db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: yieldIterations}));
- const nDocs = yieldIterations + 2;
-
- /**
- * Asserts that 'commandResult' indicates a command failure, and returns the error message.
- */
- function assertContainsErrorMessage(commandResult) {
- assert(commandResult.ok === 0 ||
- (commandResult.ok === 1 && commandResult.writeErrors !== undefined),
- 'expected command to fail: ' + tojson(commandResult));
- if (commandResult.ok === 0) {
- return commandResult.errmsg;
- } else {
- return commandResult.writeErrors[0].errmsg;
- }
+'use strict';
+const dbpath = MongoRunner.dataPath + jsTest.name();
+resetDbpath(dbpath);
+const mongod = MongoRunner.runMongod({dbpath: dbpath});
+const db = mongod.getDB("test");
+const collName = jsTest.name();
+const coll = db.getCollection(collName);
+
+// How many works it takes to yield.
+const yieldIterations = 2;
+assert.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: yieldIterations}));
+const nDocs = yieldIterations + 2;
+
+/**
+ * Asserts that 'commandResult' indicates a command failure, and returns the error message.
+ */
+function assertContainsErrorMessage(commandResult) {
+ assert(commandResult.ok === 0 ||
+ (commandResult.ok === 1 && commandResult.writeErrors !== undefined),
+ 'expected command to fail: ' + tojson(commandResult));
+ if (commandResult.ok === 0) {
+ return commandResult.errmsg;
+ } else {
+ return commandResult.writeErrors[0].errmsg;
}
+}
- function setupCollection() {
- coll.drop();
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nDocs; i++) {
- bulk.insert({_id: i, a: i});
- }
- assert.writeOK(bulk.execute());
- assert.commandWorked(coll.createIndex({a: 1}));
+function setupCollection() {
+ coll.drop();
+ let bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < nDocs; i++) {
+ bulk.insert({_id: i, a: i});
}
+ assert.writeOK(bulk.execute());
+ assert.commandWorked(coll.createIndex({a: 1}));
+}
- /**
- * Asserts that the command given by 'cmdObj' will propagate a message from a PlanExecutor
- * failure back to the user.
- */
- function assertCommandPropogatesPlanExecutorFailure(cmdObj) {
- // Make sure the command propagates failure messages.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "alwaysOn"}));
- let res = db.runCommand(cmdObj);
- let errorMessage = assertContainsErrorMessage(res);
- assert.neq(errorMessage.indexOf("planExecutorAlwaysFails"),
- -1,
- "Expected error message to include 'planExecutorAlwaysFails', instead found: " +
- errorMessage);
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "off"}));
- }
+/**
+ * Asserts that the command given by 'cmdObj' will propagate a message from a PlanExecutor
+ * failure back to the user.
+ */
+function assertCommandPropogatesPlanExecutorFailure(cmdObj) {
+ // Make sure the command propagates failure messages.
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "alwaysOn"}));
+ let res = db.runCommand(cmdObj);
+ let errorMessage = assertContainsErrorMessage(res);
+ assert.neq(errorMessage.indexOf("planExecutorAlwaysFails"),
+ -1,
+ "Expected error message to include 'planExecutorAlwaysFails', instead found: " +
+ errorMessage);
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "off"}));
+}
- /**
- * Asserts that the command properly handles failure scenarios while using its PlanExecutor.
- * Asserts that the appropriate error message is propagated if the is a failure during
- * execution, or if the plan was killed during execution. If 'options.commandYields' is false,
- * asserts that the PlanExecutor cannot be killed, and succeeds when run concurrently with any
- * of 'invalidatingCommands'.
- *
- * @param {Object} cmdObj - The command to run.
- * @param {Boolean} [options.commandYields=true] - Whether or not this command can yield during
- * execution.
- * @param {Object} [options.curOpFilter] - The query to use to find this operation in the
- * currentOp output. The default checks that all fields of cmdObj are in the curOp command.
- * @param {Function} [options.customSetup=undefined] - A callback to do any necessary setup
- * before the command can be run, like adding a geospatial index before a geoNear command.
- * @param {Boolean} [options.usesIndex] - True if this command should scan index {a: 1}, and
- * therefore should be killed if this index is dropped.
- */
- function assertCommandPropogatesPlanExecutorKillReason(cmdObj, options) {
- options = options || {};
-
- var curOpFilter = options.curOpFilter;
- if (!curOpFilter) {
- curOpFilter = {};
- for (var arg in cmdObj) {
- curOpFilter['command.' + arg] = {$eq: cmdObj[arg]};
- }
+/**
+ * Asserts that the command properly handles failure scenarios while using its PlanExecutor.
+ * Asserts that the appropriate error message is propagated if the is a failure during
+ * execution, or if the plan was killed during execution. If 'options.commandYields' is false,
+ * asserts that the PlanExecutor cannot be killed, and succeeds when run concurrently with any
+ * of 'invalidatingCommands'.
+ *
+ * @param {Object} cmdObj - The command to run.
+ * @param {Boolean} [options.commandYields=true] - Whether or not this command can yield during
+ * execution.
+ * @param {Object} [options.curOpFilter] - The query to use to find this operation in the
+ * currentOp output. The default checks that all fields of cmdObj are in the curOp command.
+ * @param {Function} [options.customSetup=undefined] - A callback to do any necessary setup
+ * before the command can be run, like adding a geospatial index before a geoNear command.
+ * @param {Boolean} [options.usesIndex] - True if this command should scan index {a: 1}, and
+ * therefore should be killed if this index is dropped.
+ */
+function assertCommandPropogatesPlanExecutorKillReason(cmdObj, options) {
+ options = options || {};
+
+ var curOpFilter = options.curOpFilter;
+ if (!curOpFilter) {
+ curOpFilter = {};
+ for (var arg in cmdObj) {
+ curOpFilter['command.' + arg] = {$eq: cmdObj[arg]};
}
+ }
- // These are commands that will cause all running PlanExecutors to be invalidated, and the
- // error messages that should be propagated when that happens.
- const invalidatingCommands = [
- {command: {dropDatabase: 1}, message: 'collection dropped'},
- {command: {drop: collName}, message: 'collection dropped'},
- ];
-
- if (options.usesIndex) {
- invalidatingCommands.push({
- command: {dropIndexes: collName, index: {a: 1}},
- message: 'index \'a_1\' dropped'
- });
- }
+ // These are commands that will cause all running PlanExecutors to be invalidated, and the
+ // error messages that should be propagated when that happens.
+ const invalidatingCommands = [
+ {command: {dropDatabase: 1}, message: 'collection dropped'},
+ {command: {drop: collName}, message: 'collection dropped'},
+ ];
+
+ if (options.usesIndex) {
+ invalidatingCommands.push(
+ {command: {dropIndexes: collName, index: {a: 1}}, message: 'index \'a_1\' dropped'});
+ }
- for (let invalidatingCommand of invalidatingCommands) {
- setupCollection();
- if (options.customSetup !== undefined) {
- options.customSetup();
- }
+ for (let invalidatingCommand of invalidatingCommands) {
+ setupCollection();
+ if (options.customSetup !== undefined) {
+ options.customSetup();
+ }
- // Enable a failpoint that causes PlanExecutors to hang during execution.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "setYieldAllLocksHang", mode: "alwaysOn"}));
+ // Enable a failpoint that causes PlanExecutors to hang during execution.
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "setYieldAllLocksHang", mode: "alwaysOn"}));
- const canYield = options.commandYields === undefined || options.commandYields;
- // Start a parallel shell to run the command. This should hang until we unset the
- // failpoint.
- let awaitCmdFailure = startParallelShell(`
+ const canYield = options.commandYields === undefined || options.commandYields;
+ // Start a parallel shell to run the command. This should hang until we unset the
+ // failpoint.
+ let awaitCmdFailure = startParallelShell(`
let assertContainsErrorMessage = ${ assertContainsErrorMessage.toString() };
let res = db.runCommand(${ tojson(cmdObj) });
if (${ canYield }) {
@@ -130,94 +128,91 @@ if (${ canYield }) {
`,
mongod.port);
- // Wait until we can see the command running.
- assert.soon(
- function() {
- if (!canYield) {
- // The command won't yield, so we won't necessarily see it in currentOp.
- return true;
- }
- return db.currentOp({
- $and: [
- {
- ns: coll.getFullName(),
- numYields: {$gt: 0},
- },
- curOpFilter,
- ]
- }).inprog.length > 0;
- },
- function() {
- return 'expected to see command yielded in currentOp output. Command: ' +
- tojson(cmdObj) + '\n, currentOp output: ' + tojson(db.currentOp().inprog);
- });
-
- // Run the command that invalidates the PlanExecutor, then allow the PlanExecutor to
- // proceed.
- jsTestLog("Running invalidating command: " + tojson(invalidatingCommand.command));
- assert.commandWorked(db.runCommand(invalidatingCommand.command));
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "setYieldAllLocksHang", mode: "off"}));
- awaitCmdFailure();
- }
+ // Wait until we can see the command running.
+ assert.soon(
+ function() {
+ if (!canYield) {
+ // The command won't yield, so we won't necessarily see it in currentOp.
+ return true;
+ }
+ return db.currentOp({
+ $and: [
+ {
+ ns: coll.getFullName(),
+ numYields: {$gt: 0},
+ },
+ curOpFilter,
+ ]
+ }).inprog.length > 0;
+ },
+ function() {
+ return 'expected to see command yielded in currentOp output. Command: ' +
+ tojson(cmdObj) + '\n, currentOp output: ' + tojson(db.currentOp().inprog);
+ });
- setupCollection();
- if (options.customSetup !== undefined) {
- options.customSetup();
- }
- assertCommandPropogatesPlanExecutorFailure(cmdObj);
+ // Run the command that invalidates the PlanExecutor, then allow the PlanExecutor to
+ // proceed.
+ jsTestLog("Running invalidating command: " + tojson(invalidatingCommand.command));
+ assert.commandWorked(db.runCommand(invalidatingCommand.command));
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "setYieldAllLocksHang", mode: "off"}));
+ awaitCmdFailure();
}
- // Disable aggregation's batching behavior, since that can prevent the PlanExecutor from being
- // active during the command that would have caused it to be killed.
- assert.commandWorked(
- db.adminCommand({setParameter: 1, internalDocumentSourceCursorBatchSizeBytes: 1}));
- assertCommandPropogatesPlanExecutorKillReason({aggregate: collName, pipeline: [], cursor: {}});
- assertCommandPropogatesPlanExecutorKillReason(
- {aggregate: collName, pipeline: [{$match: {a: {$gte: 0}}}], cursor: {}}, {usesIndex: true});
-
- assertCommandPropogatesPlanExecutorKillReason({dataSize: coll.getFullName()},
- {commandYields: false});
-
- assertCommandPropogatesPlanExecutorKillReason("dbHash", {commandYields: false});
-
- assertCommandPropogatesPlanExecutorKillReason({count: collName, query: {a: {$gte: 0}}},
- {usesIndex: true});
-
- assertCommandPropogatesPlanExecutorKillReason(
- {distinct: collName, key: "_id", query: {a: {$gte: 0}}}, {usesIndex: true});
-
- assertCommandPropogatesPlanExecutorKillReason(
- {findAndModify: collName, query: {fakeField: {$gt: 0}}, update: {$inc: {a: 1}}});
-
- assertCommandPropogatesPlanExecutorKillReason(
- {
- aggregate: collName,
- cursor: {},
- pipeline: [{
- $geoNear: {
- near: {type: "Point", coordinates: [0, 0]},
- spherical: true,
- distanceField: "dis"
- }
- }]
- },
- {
- customSetup: function() {
- assert.commandWorked(coll.createIndex({geoField: "2dsphere"}));
- }
- });
-
- assertCommandPropogatesPlanExecutorKillReason({find: coll.getName(), filter: {}});
- assertCommandPropogatesPlanExecutorKillReason({find: coll.getName(), filter: {a: {$gte: 0}}},
- {usesIndex: true});
-
- assertCommandPropogatesPlanExecutorKillReason(
- {update: coll.getName(), updates: [{q: {a: {$gte: 0}}, u: {$set: {a: 1}}}]},
- {curOpFilter: {op: 'update'}, usesIndex: true});
-
- assertCommandPropogatesPlanExecutorKillReason(
- {delete: coll.getName(), deletes: [{q: {a: {$gte: 0}}, limit: 0}]},
- {curOpFilter: {op: 'remove'}, usesIndex: true});
- MongoRunner.stopMongod(mongod);
+ setupCollection();
+ if (options.customSetup !== undefined) {
+ options.customSetup();
+ }
+ assertCommandPropogatesPlanExecutorFailure(cmdObj);
+}
+
+// Disable aggregation's batching behavior, since that can prevent the PlanExecutor from being
+// active during the command that would have caused it to be killed.
+assert.commandWorked(
+ db.adminCommand({setParameter: 1, internalDocumentSourceCursorBatchSizeBytes: 1}));
+assertCommandPropogatesPlanExecutorKillReason({aggregate: collName, pipeline: [], cursor: {}});
+assertCommandPropogatesPlanExecutorKillReason(
+ {aggregate: collName, pipeline: [{$match: {a: {$gte: 0}}}], cursor: {}}, {usesIndex: true});
+
+assertCommandPropogatesPlanExecutorKillReason({dataSize: coll.getFullName()},
+ {commandYields: false});
+
+assertCommandPropogatesPlanExecutorKillReason("dbHash", {commandYields: false});
+
+assertCommandPropogatesPlanExecutorKillReason({count: collName, query: {a: {$gte: 0}}},
+ {usesIndex: true});
+
+assertCommandPropogatesPlanExecutorKillReason(
+ {distinct: collName, key: "_id", query: {a: {$gte: 0}}}, {usesIndex: true});
+
+assertCommandPropogatesPlanExecutorKillReason(
+ {findAndModify: collName, query: {fakeField: {$gt: 0}}, update: {$inc: {a: 1}}});
+
+assertCommandPropogatesPlanExecutorKillReason(
+ {
+ aggregate: collName,
+ cursor: {},
+ pipeline: [{
+ $geoNear:
+ {near: {type: "Point", coordinates: [0, 0]}, spherical: true, distanceField: "dis"}
+ }]
+ },
+ {
+ customSetup: function() {
+ assert.commandWorked(coll.createIndex({geoField: "2dsphere"}));
+ }
+ });
+
+assertCommandPropogatesPlanExecutorKillReason({find: coll.getName(), filter: {}});
+assertCommandPropogatesPlanExecutorKillReason({find: coll.getName(), filter: {a: {$gte: 0}}},
+ {usesIndex: true});
+
+assertCommandPropogatesPlanExecutorKillReason(
+ {update: coll.getName(), updates: [{q: {a: {$gte: 0}}, u: {$set: {a: 1}}}]},
+ {curOpFilter: {op: 'update'}, usesIndex: true});
+
+assertCommandPropogatesPlanExecutorKillReason(
+ {delete: coll.getName(), deletes: [{q: {a: {$gte: 0}}, limit: 0}]},
+ {curOpFilter: {op: 'remove'}, usesIndex: true});
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/noPassthrough/commands_preserve_exec_error_code.js b/jstests/noPassthrough/commands_preserve_exec_error_code.js
index fadb4d55116..3925b74b551 100644
--- a/jstests/noPassthrough/commands_preserve_exec_error_code.js
+++ b/jstests/noPassthrough/commands_preserve_exec_error_code.js
@@ -3,47 +3,46 @@
// 'InterruptedDueToReplStateChange',
// and also to ensure that the error is not swallowed and the diagnostic info is not lost.
(function() {
- "use strict";
+"use strict";
- const mongod = MongoRunner.runMongod({});
- assert.neq(mongod, null, "mongod failed to start up");
- const db = mongod.getDB("test");
- const coll = db.commands_preserve_exec_error_code;
- coll.drop();
+const mongod = MongoRunner.runMongod({});
+assert.neq(mongod, null, "mongod failed to start up");
+const db = mongod.getDB("test");
+const coll = db.commands_preserve_exec_error_code;
+coll.drop();
- assert.writeOK(coll.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
- assert.commandWorked(coll.createIndex({geo: "2d"}));
+assert.writeOK(coll.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
+assert.commandWorked(coll.createIndex({geo: "2d"}));
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "alwaysOn"}));
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "alwaysOn"}));
- function assertFailsWithInternalError(fn) {
- const error = assert.throws(fn);
- assert.eq(error.code, ErrorCodes.InternalError, tojson(error));
- assert.neq(-1,
- error.message.indexOf("planExecutorAlwaysFails"),
- "Expected error message to be preserved");
- }
- function assertCmdFailsWithInternalError(cmd) {
- const res =
- assert.commandFailedWithCode(db.runCommand(cmd), ErrorCodes.InternalError, tojson(cmd));
- assert.neq(-1,
- res.errmsg.indexOf("planExecutorAlwaysFails"),
- "Expected error message to be preserved");
- }
+function assertFailsWithInternalError(fn) {
+ const error = assert.throws(fn);
+ assert.eq(error.code, ErrorCodes.InternalError, tojson(error));
+ assert.neq(-1,
+ error.message.indexOf("planExecutorAlwaysFails"),
+ "Expected error message to be preserved");
+}
+function assertCmdFailsWithInternalError(cmd) {
+ const res =
+ assert.commandFailedWithCode(db.runCommand(cmd), ErrorCodes.InternalError, tojson(cmd));
+ assert.neq(-1,
+ res.errmsg.indexOf("planExecutorAlwaysFails"),
+ "Expected error message to be preserved");
+}
- assertFailsWithInternalError(() => coll.find().itcount());
- assertFailsWithInternalError(() => coll.updateOne({_id: 1}, {$set: {x: 2}}));
- assertFailsWithInternalError(() => coll.deleteOne({_id: 1}));
- assertFailsWithInternalError(() => coll.count({_id: 1}));
- assertFailsWithInternalError(() => coll.aggregate([]).itcount());
- assertFailsWithInternalError(
- () => coll.aggregate([{$geoNear: {near: [0, 0], distanceField: "d"}}]).itcount());
- assertCmdFailsWithInternalError({distinct: coll.getName(), key: "_id"});
- assertCmdFailsWithInternalError(
- {findAndModify: coll.getName(), query: {_id: 1}, update: {$set: {x: 2}}});
+assertFailsWithInternalError(() => coll.find().itcount());
+assertFailsWithInternalError(() => coll.updateOne({_id: 1}, {$set: {x: 2}}));
+assertFailsWithInternalError(() => coll.deleteOne({_id: 1}));
+assertFailsWithInternalError(() => coll.count({_id: 1}));
+assertFailsWithInternalError(() => coll.aggregate([]).itcount());
+assertFailsWithInternalError(
+ () => coll.aggregate([{$geoNear: {near: [0, 0], distanceField: "d"}}]).itcount());
+assertCmdFailsWithInternalError({distinct: coll.getName(), key: "_id"});
+assertCmdFailsWithInternalError(
+ {findAndModify: coll.getName(), query: {_id: 1}, update: {$set: {x: 2}}});
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "off"}));
- MongoRunner.stopMongod(mongod);
+assert.commandWorked(db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "off"}));
+MongoRunner.stopMongod(mongod);
}());
diff --git a/jstests/noPassthrough/commit_quorum.js b/jstests/noPassthrough/commit_quorum.js
index 7d4366fc798..58183f1ab1c 100644
--- a/jstests/noPassthrough/commit_quorum.js
+++ b/jstests/noPassthrough/commit_quorum.js
@@ -4,97 +4,97 @@
* @tags: [requires_replication]
*/
(function() {
- load("jstests/noPassthrough/libs/index_build.js");
- load("jstests/libs/check_log.js");
-
- const replSet = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+load("jstests/noPassthrough/libs/index_build.js");
+load("jstests/libs/check_log.js");
+
+const replSet = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
-
- // Allow the createIndexes command to use the index builds coordinator in single-phase mode.
- replSet.startSet({setParameter: {enableIndexBuildsCoordinatorForCreateIndexesCommand: true}});
- replSet.initiate();
-
- const testDB = replSet.getPrimary().getDB('test');
- const coll = testDB.twoPhaseIndexBuild;
-
- const bulk = coll.initializeUnorderedBulkOp();
- const numDocs = 1000;
- for (let i = 0; i < numDocs; i++) {
- bulk.insert({a: i, b: i});
- }
- assert.commandWorked(bulk.execute());
-
- const collName = "createIndexes";
-
- // Use createIndex(es) to build indexes and check the commit quorum.
- let res = assert.commandWorked(testDB[collName].createIndex({x: 1}));
- assert.eq(2, res.commitQuorum);
-
- res = assert.commandWorked(testDB[collName].createIndex({y: 1}, {}, 1));
- assert.eq(1, res.commitQuorum);
-
- res = assert.commandWorked(testDB[collName].createIndexes([{i: 1}]));
- assert.eq(2, res.commitQuorum);
-
- res = assert.commandWorked(testDB[collName].createIndexes([{j: 1}], {}, 1));
- assert.eq(1, res.commitQuorum);
-
- replSet.waitForAllIndexBuildsToFinish(testDB.getName(), collName);
-
- let awaitShell;
- try {
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangAfterIndexBuildSecondDrain", mode: "alwaysOn"}));
-
- // Starts parallel shell to run the command that will hang.
- awaitShell = startParallelShell(function() {
- // Use the index builds coordinator for a two-phase index build.
- assert.commandWorked(db.runCommand({
- twoPhaseCreateIndexes: 'twoPhaseIndexBuild',
- indexes: [{key: {a: 1}, name: 'a_1'}],
- commitQuorum: "majority"
- }));
- }, testDB.getMongo().port);
-
- checkLog.containsWithCount(replSet.getPrimary(), "Waiting for index build to complete", 5);
-
- // Test setting various commit quorums on the index build in our two node replica set.
- assert.commandFailed(testDB.runCommand(
- {setIndexCommitQuorum: 'twoPhaseIndexBuild', indexNames: ['a_1'], commitQuorum: 3}));
- assert.commandFailed(testDB.runCommand({
- setIndexCommitQuorum: 'twoPhaseIndexBuild',
- indexNames: ['a_1'],
- commitQuorum: "someTag"
- }));
+ },
+ ]
+});
- assert.commandWorked(testDB.runCommand(
- {setIndexCommitQuorum: 'twoPhaseIndexBuild', indexNames: ['a_1'], commitQuorum: 0}));
- assert.commandWorked(testDB.runCommand(
- {setIndexCommitQuorum: 'twoPhaseIndexBuild', indexNames: ['a_1'], commitQuorum: 2}));
- assert.commandWorked(testDB.runCommand({
- setIndexCommitQuorum: 'twoPhaseIndexBuild',
- indexNames: ['a_1'],
- commitQuorum: "majority"
- }));
- } finally {
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangAfterIndexBuildSecondDrain", mode: "off"}));
- }
+// Allow the createIndexes command to use the index builds coordinator in single-phase mode.
+replSet.startSet({setParameter: {enableIndexBuildsCoordinatorForCreateIndexesCommand: true}});
+replSet.initiate();
+
+const testDB = replSet.getPrimary().getDB('test');
+const coll = testDB.twoPhaseIndexBuild;
+
+const bulk = coll.initializeUnorderedBulkOp();
+const numDocs = 1000;
+for (let i = 0; i < numDocs; i++) {
+ bulk.insert({a: i, b: i});
+}
+assert.commandWorked(bulk.execute());
+
+const collName = "createIndexes";
- // Wait for the parallel shell to complete.
- awaitShell();
+// Use createIndex(es) to build indexes and check the commit quorum.
+let res = assert.commandWorked(testDB[collName].createIndex({x: 1}));
+assert.eq(2, res.commitQuorum);
- IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]);
+res = assert.commandWorked(testDB[collName].createIndex({y: 1}, {}, 1));
+assert.eq(1, res.commitQuorum);
- replSet.stopSet();
+res = assert.commandWorked(testDB[collName].createIndexes([{i: 1}]));
+assert.eq(2, res.commitQuorum);
+
+res = assert.commandWorked(testDB[collName].createIndexes([{j: 1}], {}, 1));
+assert.eq(1, res.commitQuorum);
+
+replSet.waitForAllIndexBuildsToFinish(testDB.getName(), collName);
+
+let awaitShell;
+try {
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangAfterIndexBuildSecondDrain", mode: "alwaysOn"}));
+
+ // Starts parallel shell to run the command that will hang.
+ awaitShell = startParallelShell(function() {
+ // Use the index builds coordinator for a two-phase index build.
+ assert.commandWorked(db.runCommand({
+ twoPhaseCreateIndexes: 'twoPhaseIndexBuild',
+ indexes: [{key: {a: 1}, name: 'a_1'}],
+ commitQuorum: "majority"
+ }));
+ }, testDB.getMongo().port);
+
+ checkLog.containsWithCount(replSet.getPrimary(), "Waiting for index build to complete", 5);
+
+ // Test setting various commit quorums on the index build in our two node replica set.
+ assert.commandFailed(testDB.runCommand(
+ {setIndexCommitQuorum: 'twoPhaseIndexBuild', indexNames: ['a_1'], commitQuorum: 3}));
+ assert.commandFailed(testDB.runCommand({
+ setIndexCommitQuorum: 'twoPhaseIndexBuild',
+ indexNames: ['a_1'],
+ commitQuorum: "someTag"
+ }));
+
+ assert.commandWorked(testDB.runCommand(
+ {setIndexCommitQuorum: 'twoPhaseIndexBuild', indexNames: ['a_1'], commitQuorum: 0}));
+ assert.commandWorked(testDB.runCommand(
+ {setIndexCommitQuorum: 'twoPhaseIndexBuild', indexNames: ['a_1'], commitQuorum: 2}));
+ assert.commandWorked(testDB.runCommand({
+ setIndexCommitQuorum: 'twoPhaseIndexBuild',
+ indexNames: ['a_1'],
+ commitQuorum: "majority"
+ }));
+} finally {
+ assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "hangAfterIndexBuildSecondDrain", mode: "off"}));
+}
+
+// Wait for the parallel shell to complete.
+awaitShell();
+
+IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]);
+
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/compression_options.js b/jstests/noPassthrough/compression_options.js
index c6f4ccadc68..db4b9b4d050 100644
--- a/jstests/noPassthrough/compression_options.js
+++ b/jstests/noPassthrough/compression_options.js
@@ -1,43 +1,42 @@
// Tests --networkMessageCompressors options.
(function() {
- 'use strict';
-
- var runTest = function(optionValue, expected) {
- jsTest.log("Testing with --networkMessageCompressors=\"" + optionValue + "\" expecting: " +
- expected);
- var mongo = MongoRunner.runMongod({networkMessageCompressors: optionValue});
- assert.commandWorked(mongo.adminCommand({isMaster: 1}));
- clearRawMongoProgramOutput();
- assert.eq(runMongoProgram("mongo",
- "--eval",
- "tostrictjson(db.isMaster());",
- "--port",
- mongo.port,
- "--networkMessageCompressors=snappy"),
- 0);
-
- var output = rawMongoProgramOutput()
- .split("\n")
- .map(function(str) {
- str = str.replace(/^sh[0-9]+\| /, "");
- if (!/^{/.test(str)) {
- return "";
- }
- return str;
- })
- .join("\n")
- .trim();
-
- output = JSON.parse(output);
-
- assert.eq(output.compression, expected);
- MongoRunner.stopMongod(mongo);
- };
-
- assert.isnull(MongoRunner.runMongod({networkMessageCompressors: "snappy,disabled"}));
-
- runTest("snappy", ["snappy"]);
- runTest("disabled", undefined);
-
+'use strict';
+
+var runTest = function(optionValue, expected) {
+ jsTest.log("Testing with --networkMessageCompressors=\"" + optionValue +
+ "\" expecting: " + expected);
+ var mongo = MongoRunner.runMongod({networkMessageCompressors: optionValue});
+ assert.commandWorked(mongo.adminCommand({isMaster: 1}));
+ clearRawMongoProgramOutput();
+ assert.eq(runMongoProgram("mongo",
+ "--eval",
+ "tostrictjson(db.isMaster());",
+ "--port",
+ mongo.port,
+ "--networkMessageCompressors=snappy"),
+ 0);
+
+ var output = rawMongoProgramOutput()
+ .split("\n")
+ .map(function(str) {
+ str = str.replace(/^sh[0-9]+\| /, "");
+ if (!/^{/.test(str)) {
+ return "";
+ }
+ return str;
+ })
+ .join("\n")
+ .trim();
+
+ output = JSON.parse(output);
+
+ assert.eq(output.compression, expected);
+ MongoRunner.stopMongod(mongo);
+};
+
+assert.isnull(MongoRunner.runMongod({networkMessageCompressors: "snappy,disabled"}));
+
+runTest("snappy", ["snappy"]);
+runTest("disabled", undefined);
}());
diff --git a/jstests/noPassthrough/configExpand_exec_digest.js b/jstests/noPassthrough/configExpand_exec_digest.js
index 90457f70dc8..e01c1fcd1d6 100644
--- a/jstests/noPassthrough/configExpand_exec_digest.js
+++ b/jstests/noPassthrough/configExpand_exec_digest.js
@@ -1,60 +1,57 @@
// Test config file expansion using EXEC with digests.
(function() {
- 'use strict';
-
- load('jstests/noPassthrough/libs/configExpand/lib.js');
-
- // hash === SHA256HMAC('12345', 'secret')
- const hash = 'f88c7ebe4740db59c873cecf5e1f18e3726a1ad64068a13d764b79028430ab0e';
-
- // Simple positive case.
- configExpandSuccess({
- setParameter: {
- scramIterationCount:
- {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: '736563726574'}
- }
- });
-
- // Invalid digest length.
- configExpandFailure({
- setParameter: {
- scramIteratorCount:
- {__exec: makeReflectionCmd('12345'), digest: '123', digest_key: '736563726574'}
- }
- },
- /digest: Not a valid, even length hex string/);
-
- // Invalid characters.
- configExpandFailure({
- setParameter: {
- scramIteratorCount:
- {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: '736563X26574'}
- }
- },
- /digest_key: Not a valid, even length hex string/);
-
- // Digest without key.
- configExpandFailure(
- {setParameter: {scramIteratorCount: {__exec: makeReflectionCmd('12345'), digest: hash}}},
- /digest requires digest_key/);
-
- // Empty digest_key.
- configExpandFailure({
- setParameter: {
- scramIteratorCount:
- {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: ''}
- }
- },
- /digest_key must not be empty/);
-
- // Mismatched digests.
- configExpandFailure({
- setParameter: {
- scramIteratorCount:
- {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: '736563726575'}
- }
- },
- /does not match expected digest/);
-
+'use strict';
+
+load('jstests/noPassthrough/libs/configExpand/lib.js');
+
+// hash === SHA256HMAC('12345', 'secret')
+const hash = 'f88c7ebe4740db59c873cecf5e1f18e3726a1ad64068a13d764b79028430ab0e';
+
+// Simple positive case.
+configExpandSuccess({
+ setParameter: {
+ scramIterationCount:
+ {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: '736563726574'}
+ }
+});
+
+// Invalid digest length.
+configExpandFailure({
+ setParameter: {
+ scramIteratorCount:
+ {__exec: makeReflectionCmd('12345'), digest: '123', digest_key: '736563726574'}
+ }
+},
+ /digest: Not a valid, even length hex string/);
+
+// Invalid characters.
+configExpandFailure({
+ setParameter: {
+ scramIteratorCount:
+ {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: '736563X26574'}
+ }
+},
+ /digest_key: Not a valid, even length hex string/);
+
+// Digest without key.
+configExpandFailure(
+ {setParameter: {scramIteratorCount: {__exec: makeReflectionCmd('12345'), digest: hash}}},
+ /digest requires digest_key/);
+
+// Empty digest_key.
+configExpandFailure({
+ setParameter:
+ {scramIteratorCount: {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: ''}}
+},
+ /digest_key must not be empty/);
+
+// Mismatched digests.
+configExpandFailure({
+ setParameter: {
+ scramIteratorCount:
+ {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: '736563726575'}
+ }
+},
+ /does not match expected digest/);
})();
diff --git a/jstests/noPassthrough/configExpand_exec_noexpand.js b/jstests/noPassthrough/configExpand_exec_noexpand.js
index 03e147f036a..4b07036b9c6 100644
--- a/jstests/noPassthrough/configExpand_exec_noexpand.js
+++ b/jstests/noPassthrough/configExpand_exec_noexpand.js
@@ -1,27 +1,29 @@
// Test config file expansion using EXEC.
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- // Unexpected elements.
- configExpandFailure({
- setParameter: {
- scramIterationCount: {__exec: makeReflectionCmd('12345'), foo: 'bar'},
- }
- },
- /expansion block must contain only '__exec'/);
+// Unexpected elements.
+configExpandFailure({
+ setParameter: {
+ scramIterationCount: {__exec: makeReflectionCmd('12345'), foo: 'bar'},
+ }
+},
+ /expansion block must contain only '__exec'/);
- const sicReflect = {setParameter: {scramIterationCount: {__exec: makeReflectionCmd('12345')}}};
+const sicReflect = {
+ setParameter: {scramIterationCount: {__exec: makeReflectionCmd('12345')}}
+};
- // Positive test just to be sure this works in a basic case before testing negatives.
- configExpandSuccess(sicReflect);
+// Positive test just to be sure this works in a basic case before testing negatives.
+configExpandSuccess(sicReflect);
- // Expansion not enabled.
- configExpandFailure(sicReflect, /__exec support has not been enabled/, {configExpand: 'none'});
+// Expansion not enabled.
+configExpandFailure(sicReflect, /__exec support has not been enabled/, {configExpand: 'none'});
- // Expansion enabled, but not recursively.
- configExpandFailure({__exec: makeReflectionCmd(jsToYaml(sicReflect)), type: 'yaml'},
- /__exec support has not been enabled/);
+// Expansion enabled, but not recursively.
+configExpandFailure({__exec: makeReflectionCmd(jsToYaml(sicReflect)), type: 'yaml'},
+ /__exec support has not been enabled/);
})();
diff --git a/jstests/noPassthrough/configExpand_exec_permissions.js b/jstests/noPassthrough/configExpand_exec_permissions.js
index 2aed009eda9..4563d5d20f1 100644
--- a/jstests/noPassthrough/configExpand_exec_permissions.js
+++ b/jstests/noPassthrough/configExpand_exec_permissions.js
@@ -3,30 +3,32 @@
// but that's impractical in a test suite where we're not running as root.
(function() {
- 'use strict';
+'use strict';
- if (_isWindows()) {
- print("Skipping test on windows");
- return;
- }
+if (_isWindows()) {
+ print("Skipping test on windows");
+ return;
+}
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- const sicReflect = {setParameter: {scramIterationCount: {__exec: makeReflectionCmd('12345')}}};
+const sicReflect = {
+ setParameter: {scramIterationCount: {__exec: makeReflectionCmd('12345')}}
+};
- // Positive test just to be sure this works in a basic case before testing negatives.
- configExpandSuccess(sicReflect, null, {configExpand: 'exec', chmod: 0o600});
+// Positive test just to be sure this works in a basic case before testing negatives.
+configExpandSuccess(sicReflect, null, {configExpand: 'exec', chmod: 0o600});
- // Still successful if readable by others, but not writable.
- configExpandSuccess(sicReflect, null, {configExpand: 'exec', chmod: 0o644});
+// Still successful if readable by others, but not writable.
+configExpandSuccess(sicReflect, null, {configExpand: 'exec', chmod: 0o644});
- // Fail if writable by others.
- const expect = /is writable by non-owner users/;
- configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o666});
- configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o622});
- configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o660});
- configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o606});
+// Fail if writable by others.
+const expect = /is writable by non-owner users/;
+configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o666});
+configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o622});
+configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o660});
+configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o606});
- // Explicitly world-readable/writable config file without expansions should be fine.
- configExpandSuccess({}, null, {configExpand: 'none', chmod: 0o666});
+// Explicitly world-readable/writable config file without expansions should be fine.
+configExpandSuccess({}, null, {configExpand: 'none', chmod: 0o666});
})();
diff --git a/jstests/noPassthrough/configExpand_exec_timeeout.js b/jstests/noPassthrough/configExpand_exec_timeeout.js
index 7434790fc3f..72108855d68 100644
--- a/jstests/noPassthrough/configExpand_exec_timeeout.js
+++ b/jstests/noPassthrough/configExpand_exec_timeeout.js
@@ -1,31 +1,31 @@
// Test config file expansion using EXEC.
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- // Sleep 10 seconds during request.
- configExpandSuccess({
- setParameter: {
- scramIterationCount: {__exec: makeReflectionCmd('12345', {sleep: 10})},
- }
- });
+// Sleep 10 seconds during request.
+configExpandSuccess({
+ setParameter: {
+ scramIterationCount: {__exec: makeReflectionCmd('12345', {sleep: 10})},
+ }
+});
- // Sleep 40 seconds during request, with default 30 second timeout.
- configExpandFailure({
- setParameter: {
- scramIterationCount: {__exec: makeReflectionCmd('12345', {sleep: 40})},
- }
- },
- /Timeout expired/);
+// Sleep 40 seconds during request, with default 30 second timeout.
+configExpandFailure({
+ setParameter: {
+ scramIterationCount: {__exec: makeReflectionCmd('12345', {sleep: 40})},
+ }
+},
+ /Timeout expired/);
- // Sleep 10 seconds during request, with custom 5 second timeout.
- configExpandFailure({
- setParameter: {
- scramIterationCount: {__exec: makeReflectionCmd('12345', {sleep: 10})},
- }
- },
- /Timeout expired/,
- {configExpandTimeoutSecs: 5});
+// Sleep 10 seconds during request, with custom 5 second timeout.
+configExpandFailure({
+ setParameter: {
+ scramIterationCount: {__exec: makeReflectionCmd('12345', {sleep: 10})},
+ }
+},
+ /Timeout expired/,
+ {configExpandTimeoutSecs: 5});
})();
diff --git a/jstests/noPassthrough/configExpand_exec_values.js b/jstests/noPassthrough/configExpand_exec_values.js
index 21b9e493ea1..f4c85b3713d 100644
--- a/jstests/noPassthrough/configExpand_exec_values.js
+++ b/jstests/noPassthrough/configExpand_exec_values.js
@@ -1,28 +1,27 @@
// Test config file expansion using EXEC.
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
-
- // Basic success case
- configExpandSuccess(
- {
- setParameter: {
- scramIterationCount: {__exec: makeReflectionCmd('12345')},
- scramSHA256IterationCount:
- {__exec: makeReflectionCmd("23456\n"), type: 'string', trim: 'whitespace'}
- }
- },
- function(admin) {
- const response = assert.commandWorked(admin.runCommand(
- {getParameter: 1, scramIterationCount: 1, scramSHA256IterationCount: 1}));
- assert.eq(response.scramIterationCount,
- 12345,
- "Incorrect derived config value for scramIterationCount");
- assert.eq(response.scramSHA256IterationCount,
- 23456,
- "Incorrect derived config value scramSHA256IterationCount");
- });
+load('jstests/noPassthrough/libs/configExpand/lib.js');
+// Basic success case
+configExpandSuccess(
+ {
+ setParameter: {
+ scramIterationCount: {__exec: makeReflectionCmd('12345')},
+ scramSHA256IterationCount:
+ {__exec: makeReflectionCmd("23456\n"), type: 'string', trim: 'whitespace'}
+ }
+ },
+ function(admin) {
+ const response = assert.commandWorked(admin.runCommand(
+ {getParameter: 1, scramIterationCount: 1, scramSHA256IterationCount: 1}));
+ assert.eq(response.scramIterationCount,
+ 12345,
+ "Incorrect derived config value for scramIterationCount");
+ assert.eq(response.scramSHA256IterationCount,
+ 23456,
+ "Incorrect derived config value scramSHA256IterationCount");
+ });
})();
diff --git a/jstests/noPassthrough/configExpand_exec_wholeconfig.js b/jstests/noPassthrough/configExpand_exec_wholeconfig.js
index 9fac3848271..f4c0cf5dd78 100644
--- a/jstests/noPassthrough/configExpand_exec_wholeconfig.js
+++ b/jstests/noPassthrough/configExpand_exec_wholeconfig.js
@@ -1,14 +1,14 @@
// Test config file expansion using EXEC at top level.
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- const yamlConfig = jsToYaml({setParameter: {scramIterationCount: 12345}});
- configExpandSuccess({__exec: makeReflectionCmd(yamlConfig), type: 'yaml'}, function(admin) {
- const response =
- assert.commandWorked(admin.runCommand({getParameter: 1, scramIterationCount: 1}));
- assert.eq(response.scramIterationCount, 12345, "Incorrect derived config value");
- });
+const yamlConfig = jsToYaml({setParameter: {scramIterationCount: 12345}});
+configExpandSuccess({__exec: makeReflectionCmd(yamlConfig), type: 'yaml'}, function(admin) {
+ const response =
+ assert.commandWorked(admin.runCommand({getParameter: 1, scramIterationCount: 1}));
+ assert.eq(response.scramIterationCount, 12345, "Incorrect derived config value");
+});
})();
diff --git a/jstests/noPassthrough/configExpand_rest_noexpand.js b/jstests/noPassthrough/configExpand_rest_noexpand.js
index d80f4c33ae6..28200e032dd 100644
--- a/jstests/noPassthrough/configExpand_rest_noexpand.js
+++ b/jstests/noPassthrough/configExpand_rest_noexpand.js
@@ -2,37 +2,35 @@
// @tags: [requires_http_client]
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- const web = new ConfigExpandRestServer();
- web.start();
+const web = new ConfigExpandRestServer();
+web.start();
- // Unexpected elements.
- configExpandFailure({
- setParameter: {
- scramIterationCount: {__rest: web.getStringReflectionURL('12345'), foo: 'bar'},
- }
- },
- /expansion block must contain only '__rest'/);
+// Unexpected elements.
+configExpandFailure({
+ setParameter: {
+ scramIterationCount: {__rest: web.getStringReflectionURL('12345'), foo: 'bar'},
+ }
+},
+ /expansion block must contain only '__rest'/);
- const sicReflect = {
- setParameter: {scramIterationCount: {__rest: web.getStringReflectionURL('12345')}}
- };
+const sicReflect = {
+ setParameter: {scramIterationCount: {__rest: web.getStringReflectionURL('12345')}}
+};
- // Positive test just to be sure this works in a basic case before testing negatives.
- configExpandSuccess(sicReflect);
+// Positive test just to be sure this works in a basic case before testing negatives.
+configExpandSuccess(sicReflect);
- // Expansion not enabled.
- configExpandFailure(sicReflect, /__rest support has not been enabled/, {configExpand: 'none'});
+// Expansion not enabled.
+configExpandFailure(sicReflect, /__rest support has not been enabled/, {configExpand: 'none'});
- // Expansion enabled, but not recursively.
- configExpandFailure({
- __rest: web.getURL() + '/reflect/yaml?yaml=' + encodeURI(jsToYaml(sicReflect)),
- type: 'yaml'
- },
- /__rest support has not been enabled/);
+// Expansion enabled, but not recursively.
+configExpandFailure(
+ {__rest: web.getURL() + '/reflect/yaml?yaml=' + encodeURI(jsToYaml(sicReflect)), type: 'yaml'},
+ /__rest support has not been enabled/);
- web.stop();
+web.stop();
})();
diff --git a/jstests/noPassthrough/configExpand_rest_permissions.js b/jstests/noPassthrough/configExpand_rest_permissions.js
index 318dd083bab..49749dddb9e 100644
--- a/jstests/noPassthrough/configExpand_rest_permissions.js
+++ b/jstests/noPassthrough/configExpand_rest_permissions.js
@@ -2,34 +2,34 @@
// @tags: [requires_http_client]
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- if (_isWindows()) {
- print("Skipping test on windows");
- return;
- }
+if (_isWindows()) {
+ print("Skipping test on windows");
+ return;
+}
- const web = new ConfigExpandRestServer();
- web.start();
+const web = new ConfigExpandRestServer();
+web.start();
- const sicReflect = {
- setParameter: {scramIterationCount: {__rest: web.getStringReflectionURL('12345')}}
- };
+const sicReflect = {
+ setParameter: {scramIterationCount: {__rest: web.getStringReflectionURL('12345')}}
+};
- // Positive test just to be sure this works in a basic case before testing negatives.
- configExpandSuccess(sicReflect, null, {configExpand: 'rest', chmod: 0o600});
+// Positive test just to be sure this works in a basic case before testing negatives.
+configExpandSuccess(sicReflect, null, {configExpand: 'rest', chmod: 0o600});
- // Still successful if writable by others, but not readable.
- configExpandSuccess(sicReflect, null, {configExpand: 'rest', chmod: 0o622});
+// Still successful if writable by others, but not readable.
+configExpandSuccess(sicReflect, null, {configExpand: 'rest', chmod: 0o622});
- // Fail if readable by others.
- const expect = /is readable by non-owner users/;
- configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o666});
- configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o644});
- configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o660});
- configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o606});
+// Fail if readable by others.
+const expect = /is readable by non-owner users/;
+configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o666});
+configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o644});
+configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o660});
+configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o606});
- web.stop();
+web.stop();
})();
diff --git a/jstests/noPassthrough/configExpand_rest_timeout.js b/jstests/noPassthrough/configExpand_rest_timeout.js
index 532ce4e6283..5c193c94de1 100644
--- a/jstests/noPassthrough/configExpand_rest_timeout.js
+++ b/jstests/noPassthrough/configExpand_rest_timeout.js
@@ -2,36 +2,36 @@
// @tags: [requires_http_client]
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- const web = new ConfigExpandRestServer();
- web.start();
+const web = new ConfigExpandRestServer();
+web.start();
- // Sleep 10 seconds during request.
- configExpandSuccess({
- setParameter: {
- scramIterationCount: {__rest: web.getStringReflectionURL('12345', {sleep: 10})},
- }
- });
+// Sleep 10 seconds during request.
+configExpandSuccess({
+ setParameter: {
+ scramIterationCount: {__rest: web.getStringReflectionURL('12345', {sleep: 10})},
+ }
+});
- // Sleep 40 seconds during request, with default 30 second timeout.
- configExpandFailure({
- setParameter: {
- scramIterationCount: {__rest: web.getStringReflectionURL('12345', {sleep: 40})},
- }
- },
- /Timeout was reached/);
+// Sleep 40 seconds during request, with default 30 second timeout.
+configExpandFailure({
+ setParameter: {
+ scramIterationCount: {__rest: web.getStringReflectionURL('12345', {sleep: 40})},
+ }
+},
+ /Timeout was reached/);
- // Sleep 10 seconds during request, with custom 5 second timeout.
- configExpandFailure({
- setParameter: {
- scramIterationCount: {__rest: web.getStringReflectionURL('12345', {sleep: 10})},
- }
- },
- /Timeout was reached/,
- {configExpandTimeoutSecs: 5});
+// Sleep 10 seconds during request, with custom 5 second timeout.
+configExpandFailure({
+ setParameter: {
+ scramIterationCount: {__rest: web.getStringReflectionURL('12345', {sleep: 10})},
+ }
+},
+ /Timeout was reached/,
+ {configExpandTimeoutSecs: 5});
- web.stop();
+web.stop();
})();
diff --git a/jstests/noPassthrough/configExpand_rest_values.js b/jstests/noPassthrough/configExpand_rest_values.js
index 7aa56dbfb77..6ffebe592f7 100644
--- a/jstests/noPassthrough/configExpand_rest_values.js
+++ b/jstests/noPassthrough/configExpand_rest_values.js
@@ -2,47 +2,42 @@
// @tags: [requires_http_client]
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- const web = new ConfigExpandRestServer();
- web.start();
+const web = new ConfigExpandRestServer();
+web.start();
- // Basic success case
- configExpandSuccess({
+// Basic success case
+configExpandSuccess(
+ {
setParameter: {
scramIterationCount: {__rest: web.getStringReflectionURL('12345')},
scramSHA256IterationCount:
{__rest: web.getStringReflectionURL('23456'), type: 'string', trim: 'whitespace'}
}
},
- function(admin) {
- const response = assert.commandWorked(admin.runCommand({
- getParameter: 1,
- scramIterationCount: 1,
- scramSHA256IterationCount: 1
- }));
- assert.eq(response.scramIterationCount,
- 12345,
- "Incorrect derived config value for scramIterationCount");
- assert.eq(response.scramSHA256IterationCount,
- 23456,
- "Incorrect derived config value scramSHA256IterationCount");
- });
-
- // With digest
- // SHA256HMAC('12345', 'secret')
- const hash = 'f88c7ebe4740db59c873cecf5e1f18e3726a1ad64068a13d764b79028430ab0e';
- configExpandSuccess({
- setParameter: {
- scramIterationCount: {
- __rest: web.getStringReflectionURL('12345'),
- digest: hash,
- digest_key: '736563726574'
- }
- }
+ function(admin) {
+ const response = assert.commandWorked(admin.runCommand(
+ {getParameter: 1, scramIterationCount: 1, scramSHA256IterationCount: 1}));
+ assert.eq(response.scramIterationCount,
+ 12345,
+ "Incorrect derived config value for scramIterationCount");
+ assert.eq(response.scramSHA256IterationCount,
+ 23456,
+ "Incorrect derived config value scramSHA256IterationCount");
});
- web.stop();
+// With digest
+// SHA256HMAC('12345', 'secret')
+const hash = 'f88c7ebe4740db59c873cecf5e1f18e3726a1ad64068a13d764b79028430ab0e';
+configExpandSuccess({
+ setParameter: {
+ scramIterationCount:
+ {__rest: web.getStringReflectionURL('12345'), digest: hash, digest_key: '736563726574'}
+ }
+});
+
+web.stop();
})();
diff --git a/jstests/noPassthrough/configExpand_rest_wholeconfig.js b/jstests/noPassthrough/configExpand_rest_wholeconfig.js
index 9be592e5eff..e4d6b87cfdc 100644
--- a/jstests/noPassthrough/configExpand_rest_wholeconfig.js
+++ b/jstests/noPassthrough/configExpand_rest_wholeconfig.js
@@ -2,21 +2,21 @@
// @tags: [requires_http_client]
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- const web = new ConfigExpandRestServer();
- web.start();
+const web = new ConfigExpandRestServer();
+web.start();
- const yamlConfig = jsToYaml({setParameter: {scramIterationCount: 12345}});
- configExpandSuccess(
- {__rest: web.getURL() + '/reflect/yaml?yaml=' + encodeURI(yamlConfig), type: 'yaml'},
- function(admin) {
- const response =
- assert.commandWorked(admin.runCommand({getParameter: 1, scramIterationCount: 1}));
- assert.eq(response.scramIterationCount, 12345, "Incorrect derived config value");
- });
+const yamlConfig = jsToYaml({setParameter: {scramIterationCount: 12345}});
+configExpandSuccess(
+ {__rest: web.getURL() + '/reflect/yaml?yaml=' + encodeURI(yamlConfig), type: 'yaml'},
+ function(admin) {
+ const response =
+ assert.commandWorked(admin.runCommand({getParameter: 1, scramIterationCount: 1}));
+ assert.eq(response.scramIterationCount, 12345, "Incorrect derived config value");
+ });
- web.stop();
+web.stop();
})();
diff --git a/jstests/noPassthrough/count_helper_read_preference.js b/jstests/noPassthrough/count_helper_read_preference.js
index 25aa019462f..28762ca26ee 100644
--- a/jstests/noPassthrough/count_helper_read_preference.js
+++ b/jstests/noPassthrough/count_helper_read_preference.js
@@ -1,50 +1,49 @@
// Tests that the read preference set on the connection is used when we call the count helper.
(function() {
- "use strict";
-
- var commandsRan = [];
-
- // Create a new DB object backed by a mock connection.
- function MockMongo() {
- this.getMinWireVersion = function getMinWireVersion() {
- return 0;
- };
-
- this.getMaxWireVersion = function getMaxWireVersion() {
- return 0;
- };
- }
- MockMongo.prototype = Mongo.prototype;
- MockMongo.prototype.runCommand = function(db, cmd, opts) {
- commandsRan.push({db: db, cmd: cmd, opts: opts});
- return {ok: 1, n: 100};
+"use strict";
+
+var commandsRan = [];
+
+// Create a new DB object backed by a mock connection.
+function MockMongo() {
+ this.getMinWireVersion = function getMinWireVersion() {
+ return 0;
};
- const mockMongo = new MockMongo();
- var db = new DB(mockMongo, "test");
+ this.getMaxWireVersion = function getMaxWireVersion() {
+ return 0;
+ };
+}
+MockMongo.prototype = Mongo.prototype;
+MockMongo.prototype.runCommand = function(db, cmd, opts) {
+ commandsRan.push({db: db, cmd: cmd, opts: opts});
+ return {ok: 1, n: 100};
+};
- // Attach a dummy implicit session because the mock connection cannot create sessions.
- db._session = new _DummyDriverSession(mockMongo);
+const mockMongo = new MockMongo();
+var db = new DB(mockMongo, "test");
- assert.eq(commandsRan.length, 0);
+// Attach a dummy implicit session because the mock connection cannot create sessions.
+db._session = new _DummyDriverSession(mockMongo);
- // Run a count with no readPref.
- db.getMongo().setReadPref(null);
- db.foo.count();
+assert.eq(commandsRan.length, 0);
- // Check that there is no readPref on the command document.
- assert.eq(commandsRan.length, 1);
- assert.docEq(commandsRan[0].cmd, {count: "foo", query: {}});
+// Run a count with no readPref.
+db.getMongo().setReadPref(null);
+db.foo.count();
- commandsRan = [];
+// Check that there is no readPref on the command document.
+assert.eq(commandsRan.length, 1);
+assert.docEq(commandsRan[0].cmd, {count: "foo", query: {}});
- // Run with readPref secondary.
- db.getMongo().setReadPref("secondary");
- db.foo.count();
+commandsRan = [];
- // Check that we have wrapped the command and attached the read preference.
- assert.eq(commandsRan.length, 1);
- assert.docEq(commandsRan[0].cmd,
- {query: {count: "foo", query: {}}, $readPreference: {mode: "secondary"}});
+// Run with readPref secondary.
+db.getMongo().setReadPref("secondary");
+db.foo.count();
+// Check that we have wrapped the command and attached the read preference.
+assert.eq(commandsRan.length, 1);
+assert.docEq(commandsRan[0].cmd,
+ {query: {count: "foo", query: {}}, $readPreference: {mode: "secondary"}});
})();
diff --git a/jstests/noPassthrough/create_view_does_not_take_database_X.js b/jstests/noPassthrough/create_view_does_not_take_database_X.js
index e7615152d5a..e35cae01e10 100644
--- a/jstests/noPassthrough/create_view_does_not_take_database_X.js
+++ b/jstests/noPassthrough/create_view_does_not_take_database_X.js
@@ -5,29 +5,29 @@
*/
(function() {
- "use strict";
+"use strict";
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- let db = rst.getPrimary().getDB("test");
+let db = rst.getPrimary().getDB("test");
- assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
+assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
- const session = db.getMongo().startSession();
- const sessionDb = session.getDatabase("test");
+const session = db.getMongo().startSession();
+const sessionDb = session.getDatabase("test");
- session.startTransaction();
- // This holds a database IX lock and a collection IX lock on "a".
- sessionDb.a.insert({y: 1});
+session.startTransaction();
+// This holds a database IX lock and a collection IX lock on "a".
+sessionDb.a.insert({y: 1});
- // This only requires database IX lock.
- assert.commandWorked(db.createView("view", "a", []));
+// This only requires database IX lock.
+assert.commandWorked(db.createView("view", "a", []));
- assert.eq(db.view.find().toArray().length, 1);
+assert.eq(db.view.find().toArray().length, 1);
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js b/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js
index bed13b06bec..2183e6da600 100644
--- a/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js
+++ b/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js
@@ -4,64 +4,76 @@
// @tags: [requires_sharding]
(function() {
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const st = new ShardingTest(
- {shards: 2, config: 1, other: {keyFile: "jstests/libs/key1", shardAsReplicaSet: false}});
- const kDBName = "test";
- const adminDB = st.s.getDB('admin');
- const testDB = st.s.getDB(kDBName);
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+const st = new ShardingTest(
+ {shards: 2, config: 1, other: {keyFile: "jstests/libs/key1", shardAsReplicaSet: false}});
+const kDBName = "test";
+const adminDB = st.s.getDB('admin');
+const testDB = st.s.getDB(kDBName);
- jsTest.authenticate(st.shard0);
+jsTest.authenticate(st.shard0);
- const adminUser = {db: "admin", username: "foo", password: "bar"};
- const userA = {db: "test", username: "a", password: "pwd"};
- const userB = {db: "test", username: "b", password: "pwd"};
+const adminUser = {
+ db: "admin",
+ username: "foo",
+ password: "bar"
+};
+const userA = {
+ db: "test",
+ username: "a",
+ password: "pwd"
+};
+const userB = {
+ db: "test",
+ username: "b",
+ password: "pwd"
+};
- function login(userObj) {
- st.s.getDB(userObj.db).auth(userObj.username, userObj.password);
- }
+function login(userObj) {
+ st.s.getDB(userObj.db).auth(userObj.username, userObj.password);
+}
- function logout(userObj) {
- st.s.getDB(userObj.db).runCommand({logout: 1});
- }
+function logout(userObj) {
+ st.s.getDB(userObj.db).runCommand({logout: 1});
+}
- adminDB.createUser(
- {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
+adminDB.createUser(
+ {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
- login(adminUser);
+login(adminUser);
- let coll = testDB.security_501;
- coll.drop();
+let coll = testDB.security_501;
+coll.drop();
- for (let i = 0; i < 100; i++) {
- assert.writeOK(coll.insert({_id: i}));
- }
+for (let i = 0; i < 100; i++) {
+ assert.writeOK(coll.insert({_id: i}));
+}
- // Create our two users.
- for (let user of[userA, userB]) {
- testDB.createUser({
- user: user.username,
- pwd: user.password,
- roles: [{role: "readWriteAnyDatabase", db: "admin"}]
- });
- }
- logout(adminUser);
+// Create our two users.
+for (let user of [userA, userB]) {
+ testDB.createUser({
+ user: user.username,
+ pwd: user.password,
+ roles: [{role: "readWriteAnyDatabase", db: "admin"}]
+ });
+}
+logout(adminUser);
- // As userA, run a find and get a cursor.
- login(userA);
- const cursorID =
- assert.commandWorked(testDB.runCommand({find: coll.getName(), batchSize: 2})).cursor.id;
- logout(userA);
+// As userA, run a find and get a cursor.
+login(userA);
+const cursorID =
+ assert.commandWorked(testDB.runCommand({find: coll.getName(), batchSize: 2})).cursor.id;
+logout(userA);
- // As userB, attempt to getMore the cursor ID.
- login(userB);
- assert.commandFailed(testDB.runCommand({getMore: cursorID, collection: coll.getName()}));
- logout(userB);
+// As userB, attempt to getMore the cursor ID.
+login(userB);
+assert.commandFailed(testDB.runCommand({getMore: cursorID, collection: coll.getName()}));
+logout(userB);
- // As user A again, try to getMore the cursor.
- login(userA);
- assert.commandWorked(testDB.runCommand({getMore: cursorID, collection: coll.getName()}));
- logout(userA);
+// As user A again, try to getMore the cursor.
+login(userA);
+assert.commandWorked(testDB.runCommand({getMore: cursorID, collection: coll.getName()}));
+logout(userA);
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/crud_timestamps.js b/jstests/noPassthrough/crud_timestamps.js
index a89e112ac29..07718be5bbc 100644
--- a/jstests/noPassthrough/crud_timestamps.js
+++ b/jstests/noPassthrough/crud_timestamps.js
@@ -5,107 +5,110 @@
//
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "coll";
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const testDB = rst.getPrimary().getDB(dbName);
- const coll = testDB.getCollection(collName);
-
- if (!testDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
- rst.stopSet();
- return;
- }
-
- // Turn off timestamp reaping.
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
- mode: "alwaysOn",
- }));
-
- const session = testDB.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase(dbName);
- const response = assert.commandWorked(testDB.createCollection("coll"));
- const startTime = response.operationTime;
-
- function check(atClusterTime, expected) {
- session.startTransaction({readConcern: {level: "snapshot", atClusterTime: atClusterTime}});
- // Check both a collection scan and scanning the _id index.
- [{$natural: 1}, {_id: 1}].forEach(sort => {
- let response = assert.commandWorked(
- sessionDb.runCommand({find: collName, sort: sort, singleBatch: true}));
- assert.eq(expected, response.cursor.firstBatch);
- });
- assert.commandWorked(session.commitTransaction_forTesting());
- }
-
- // insert
-
- let request = {insert: coll.getName(), documents: [{_id: 1}, {_id: 2}], ordered: false};
- assert.commandWorked(coll.runCommand(request));
-
- const oplog = rst.getPrimary().getDB("local").getCollection("oplog.rs");
- let ts1 = oplog.findOne({o: {_id: 1}}).ts;
- let ts2 = oplog.findOne({o: {_id: 2}}).ts;
-
- check(startTime, []);
- check(ts1, [{_id: 1}]);
- check(ts2, [{_id: 1}, {_id: 2}]);
-
- // upsert
-
- request = {
- update: coll.getName(),
- updates: [
- {q: {_id: 3, a: 1}, u: {$set: {a: 2}}, upsert: true},
- {q: {_id: 4, a: 1}, u: {$set: {a: 3}}, upsert: true}
- ],
- ordered: true
- };
- assert.commandWorked(coll.runCommand(request));
-
- ts1 = oplog.findOne({o: {_id: 3, a: 2}}).ts;
- ts2 = oplog.findOne({o: {_id: 4, a: 3}}).ts;
-
- check(ts1, [{_id: 1}, {_id: 2}, {_id: 3, a: 2}]);
- check(ts2, [{_id: 1}, {_id: 2}, {_id: 3, a: 2}, {_id: 4, a: 3}]);
-
- // update
-
- request = {
- update: coll.getName(),
- updates: [{q: {_id: 3, a: 2}, u: {$set: {a: 4}}}, {q: {_id: 4, a: 3}, u: {$set: {a: 5}}}],
- ordered: true
- };
- assert.commandWorked(coll.runCommand(request));
-
- ts1 = oplog.findOne({op: 'u', o2: {_id: 3}}).ts;
- ts2 = oplog.findOne({op: 'u', o2: {_id: 4}}).ts;
-
- check(ts1, [{_id: 1}, {_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 3}]);
- check(ts2, [{_id: 1}, {_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 5}]);
-
- // delete
-
- request = {delete: coll.getName(), deletes: [{q: {}, limit: 0}], ordered: false};
-
- assert.commandWorked(coll.runCommand(request));
-
- ts1 = oplog.findOne({op: 'd', o: {_id: 1}}).ts;
- ts2 = oplog.findOne({op: 'd', o: {_id: 2}}).ts;
- let ts3 = oplog.findOne({op: 'd', o: {_id: 3}}).ts;
- let ts4 = oplog.findOne({op: 'd', o: {_id: 4}}).ts;
-
- check(ts1, [{_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 5}]);
- check(ts2, [{_id: 3, a: 4}, {_id: 4, a: 5}]);
- check(ts3, [{_id: 4, a: 5}]);
- check(ts4, []);
-
- session.endSession();
- rst.stopSet();
+"use strict";
+
+const dbName = "test";
+const collName = "coll";
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const testDB = rst.getPrimary().getDB(dbName);
+const coll = testDB.getCollection(collName);
+
+if (!testDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
+ rst.stopSet();
+ return;
+}
+
+// Turn off timestamp reaping.
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
+ mode: "alwaysOn",
+}));
+
+const session = testDB.getMongo().startSession({causalConsistency: false});
+const sessionDb = session.getDatabase(dbName);
+const response = assert.commandWorked(testDB.createCollection("coll"));
+const startTime = response.operationTime;
+
+function check(atClusterTime, expected) {
+ session.startTransaction({readConcern: {level: "snapshot", atClusterTime: atClusterTime}});
+ // Check both a collection scan and scanning the _id index.
+ [{$natural: 1}, {_id: 1}].forEach(sort => {
+ let response = assert.commandWorked(
+ sessionDb.runCommand({find: collName, sort: sort, singleBatch: true}));
+ assert.eq(expected, response.cursor.firstBatch);
+ });
+ assert.commandWorked(session.commitTransaction_forTesting());
+}
+
+// insert
+
+let request = {insert: coll.getName(), documents: [{_id: 1}, {_id: 2}], ordered: false};
+assert.commandWorked(coll.runCommand(request));
+
+const oplog = rst.getPrimary().getDB("local").getCollection("oplog.rs");
+let ts1 = oplog.findOne({o: {_id: 1}}).ts;
+let ts2 = oplog.findOne({o: {_id: 2}}).ts;
+
+check(startTime, []);
+check(ts1, [{_id: 1}]);
+check(ts2, [{_id: 1}, {_id: 2}]);
+
+// upsert
+
+request = {
+ update: coll.getName(),
+ updates: [
+ {q: {_id: 3, a: 1}, u: {$set: {a: 2}}, upsert: true},
+ {q: {_id: 4, a: 1}, u: {$set: {a: 3}}, upsert: true}
+ ],
+ ordered: true
+};
+assert.commandWorked(coll.runCommand(request));
+
+ts1 = oplog.findOne({o: {_id: 3, a: 2}}).ts;
+ts2 = oplog.findOne({o: {_id: 4, a: 3}}).ts;
+
+check(ts1, [{_id: 1}, {_id: 2}, {_id: 3, a: 2}]);
+check(ts2, [{_id: 1}, {_id: 2}, {_id: 3, a: 2}, {_id: 4, a: 3}]);
+
+// update
+
+request = {
+ update: coll.getName(),
+ updates: [{q: {_id: 3, a: 2}, u: {$set: {a: 4}}}, {q: {_id: 4, a: 3}, u: {$set: {a: 5}}}],
+ ordered: true
+};
+assert.commandWorked(coll.runCommand(request));
+
+ts1 = oplog.findOne({op: 'u', o2: {_id: 3}}).ts;
+ts2 = oplog.findOne({op: 'u', o2: {_id: 4}}).ts;
+
+check(ts1, [{_id: 1}, {_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 3}]);
+check(ts2, [{_id: 1}, {_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 5}]);
+
+// delete
+
+request = {
+ delete: coll.getName(),
+ deletes: [{q: {}, limit: 0}],
+ ordered: false
+};
+
+assert.commandWorked(coll.runCommand(request));
+
+ts1 = oplog.findOne({op: 'd', o: {_id: 1}}).ts;
+ts2 = oplog.findOne({op: 'd', o: {_id: 2}}).ts;
+let ts3 = oplog.findOne({op: 'd', o: {_id: 3}}).ts;
+let ts4 = oplog.findOne({op: 'd', o: {_id: 4}}).ts;
+
+check(ts1, [{_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 5}]);
+check(ts2, [{_id: 3, a: 4}, {_id: 4, a: 5}]);
+check(ts3, [{_id: 4, a: 5}]);
+check(ts4, []);
+
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/currentop_active_cursor.js b/jstests/noPassthrough/currentop_active_cursor.js
index c0a37322050..9bfb27e2564 100644
--- a/jstests/noPassthrough/currentop_active_cursor.js
+++ b/jstests/noPassthrough/currentop_active_cursor.js
@@ -2,109 +2,107 @@
// Then test and make sure a pinned cursor shows up in the operation object.
// @tags: [requires_sharding]
(function() {
- "use strict";
- load("jstests/libs/pin_getmore_cursor.js"); // for "withPinnedCursor"
+"use strict";
+load("jstests/libs/pin_getmore_cursor.js"); // for "withPinnedCursor"
- function runTest(cursorId, coll) {
- const db = coll.getDB();
- const adminDB = db.getSiblingDB("admin");
- // Test that active cursors do not show up as idle cursors.
- const idleCursors =
- adminDB
- .aggregate([
- {"$currentOp": {"localOps": true, "idleCursors": true, "allUsers": false}},
- {"$match": {"type": "idleCursor"}}
- ])
- .toArray();
- assert.eq(idleCursors.length, 0, tojson(idleCursors));
- // Test that an active cursor shows up in currentOp.
- const activeCursors =
- adminDB
- .aggregate([
- {"$currentOp": {"localOps": true, "idleCursors": false, "allUsers": false}},
- {"$match": {"cursor": {"$exists": true}}}
- ])
- .toArray();
- assert.eq(activeCursors.length, 1, tojson(activeCursors));
- const cursorObject = activeCursors[0].cursor;
- assert.eq(cursorObject.originatingCommand.find, coll.getName(), tojson(activeCursors));
- assert.eq(cursorObject.nDocsReturned, 2, tojson(activeCursors));
- assert.eq(cursorObject.tailable, false, tojson(activeCursors));
- assert.eq(cursorObject.awaitData, false, tojson(activeCursors));
- }
- const conn = MongoRunner.runMongod({});
- let failPointName = "waitWithPinnedCursorDuringGetMoreBatch";
- withPinnedCursor({
- conn: conn,
- sessionId: null,
- db: conn.getDB("test"),
- assertFunction: runTest,
- runGetMoreFunc: function() {
- const response =
- assert.commandWorked(db.runCommand({getMore: cursorId, collection: collName}));
- },
- failPointName: failPointName,
- assertEndCounts: true
- });
+function runTest(cursorId, coll) {
+ const db = coll.getDB();
+ const adminDB = db.getSiblingDB("admin");
+ // Test that active cursors do not show up as idle cursors.
+ const idleCursors =
+ adminDB
+ .aggregate([
+ {"$currentOp": {"localOps": true, "idleCursors": true, "allUsers": false}},
+ {"$match": {"type": "idleCursor"}}
+ ])
+ .toArray();
+ assert.eq(idleCursors.length, 0, tojson(idleCursors));
+ // Test that an active cursor shows up in currentOp.
+ const activeCursors =
+ adminDB
+ .aggregate([
+ {"$currentOp": {"localOps": true, "idleCursors": false, "allUsers": false}},
+ {"$match": {"cursor": {"$exists": true}}}
+ ])
+ .toArray();
+ assert.eq(activeCursors.length, 1, tojson(activeCursors));
+ const cursorObject = activeCursors[0].cursor;
+ assert.eq(cursorObject.originatingCommand.find, coll.getName(), tojson(activeCursors));
+ assert.eq(cursorObject.nDocsReturned, 2, tojson(activeCursors));
+ assert.eq(cursorObject.tailable, false, tojson(activeCursors));
+ assert.eq(cursorObject.awaitData, false, tojson(activeCursors));
+}
+const conn = MongoRunner.runMongod({});
+let failPointName = "waitWithPinnedCursorDuringGetMoreBatch";
+withPinnedCursor({
+ conn: conn,
+ sessionId: null,
+ db: conn.getDB("test"),
+ assertFunction: runTest,
+ runGetMoreFunc: function() {
+ const response =
+ assert.commandWorked(db.runCommand({getMore: cursorId, collection: collName}));
+ },
+ failPointName: failPointName,
+ assertEndCounts: true
+});
- // Test OP_GET_MORE (legacy read mode) against a mongod.
- failPointName = "waitWithPinnedCursorDuringGetMoreBatch";
- const db = conn.getDB("test");
- db.getMongo().forceReadMode("legacy");
- withPinnedCursor({
- conn: conn,
- sessionId: null,
- db: db,
- assertFunction: runTest,
- runGetMoreFunc: function() {
- db.getMongo().forceReadMode("legacy");
- let cmdRes = {
- "cursor": {"firstBatch": [], "id": cursorId, "ns": db.jstest_with_pinned_cursor},
- "ok": 1
- };
- let cursor = new DBCommandCursor(db, cmdRes, 2);
- cursor.itcount();
- },
- failPointName: failPointName,
- assertEndCounts: true
- });
- MongoRunner.stopMongod(conn);
+// Test OP_GET_MORE (legacy read mode) against a mongod.
+failPointName = "waitWithPinnedCursorDuringGetMoreBatch";
+const db = conn.getDB("test");
+db.getMongo().forceReadMode("legacy");
+withPinnedCursor({
+ conn: conn,
+ sessionId: null,
+ db: db,
+ assertFunction: runTest,
+ runGetMoreFunc: function() {
+ db.getMongo().forceReadMode("legacy");
+ let cmdRes = {
+ "cursor": {"firstBatch": [], "id": cursorId, "ns": db.jstest_with_pinned_cursor},
+ "ok": 1
+ };
+ let cursor = new DBCommandCursor(db, cmdRes, 2);
+ cursor.itcount();
+ },
+ failPointName: failPointName,
+ assertEndCounts: true
+});
+MongoRunner.stopMongod(conn);
- // Sharded test
- failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
- let st = new ShardingTest({shards: 2, mongos: 1});
- withPinnedCursor({
- conn: st.s,
- sessionId: null,
- db: st.s.getDB("test"),
- assertFunction: runTest,
- runGetMoreFunc: function() {
- const response =
- assert.commandWorked(db.runCommand({getMore: cursorId, collection: collName}));
- },
- failPointName: failPointName,
- assertEndCounts: true
- });
-
- // Test OP_GET_MORE (legacy reead mode) against a mongos.
- withPinnedCursor({
- conn: st.s,
- sessionId: null,
- db: st.s.getDB("test"),
- assertFunction: runTest,
- runGetMoreFunc: function() {
- db.getMongo().forceReadMode("legacy");
- let cmdRes = {
- "cursor": {"firstBatch": [], "id": cursorId, "ns": db.jstest_with_pinned_cursor},
- "ok": 1
- };
- let cursor = new DBCommandCursor(db, cmdRes, 2);
- cursor.itcount();
-
- },
- failPointName: failPointName,
- assertEndCounts: true
- });
- st.stop();
+// Sharded test
+failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
+let st = new ShardingTest({shards: 2, mongos: 1});
+withPinnedCursor({
+ conn: st.s,
+ sessionId: null,
+ db: st.s.getDB("test"),
+ assertFunction: runTest,
+ runGetMoreFunc: function() {
+ const response =
+ assert.commandWorked(db.runCommand({getMore: cursorId, collection: collName}));
+ },
+ failPointName: failPointName,
+ assertEndCounts: true
+});
+// Test OP_GET_MORE (legacy reead mode) against a mongos.
+withPinnedCursor({
+ conn: st.s,
+ sessionId: null,
+ db: st.s.getDB("test"),
+ assertFunction: runTest,
+ runGetMoreFunc: function() {
+ db.getMongo().forceReadMode("legacy");
+ let cmdRes = {
+ "cursor": {"firstBatch": [], "id": cursorId, "ns": db.jstest_with_pinned_cursor},
+ "ok": 1
+ };
+ let cursor = new DBCommandCursor(db, cmdRes, 2);
+ cursor.itcount();
+ },
+ failPointName: failPointName,
+ assertEndCounts: true
+});
+st.stop();
})();
diff --git a/jstests/noPassthrough/currentop_active_transaction.js b/jstests/noPassthrough/currentop_active_transaction.js
index 01d11a367b8..f7e1d5bee78 100644
--- a/jstests/noPassthrough/currentop_active_transaction.js
+++ b/jstests/noPassthrough/currentop_active_transaction.js
@@ -5,188 +5,183 @@
*/
(function() {
- 'use strict';
- load("jstests/libs/parallel_shell_helpers.js");
-
- function transactionFn(isPrepared) {
- const collName = 'currentop_active_transaction';
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase('test');
-
- session.startTransaction({readConcern: {level: 'snapshot'}});
- sessionDB[collName].update({}, {x: 2});
- if (isPrepared) {
- // Load the prepare helpers to be called in the parallel shell.
- load('jstests/core/txns/libs/prepare_helpers.js');
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
- } else {
- assert.commandWorked(session.commitTransaction_forTesting());
- }
- }
+'use strict';
+load("jstests/libs/parallel_shell_helpers.js");
- function checkCurrentOpFields(currentOp,
- isPrepared,
- operationTime,
- timeBeforeTransactionStarts,
- timeAfterTransactionStarts,
- timeBeforeCurrentOp) {
- const transactionDocument = currentOp[0].transaction;
- assert.eq(transactionDocument.parameters.autocommit,
- false,
- "Expected 'autocommit' to be false but got " +
- transactionDocument.parameters.autocommit + " instead: " +
- tojson(transactionDocument));
- assert.docEq(transactionDocument.parameters.readConcern,
- {level: 'snapshot'},
- "Expected 'readConcern' to be level: snapshot but got " +
- tojson(transactionDocument.parameters.readConcern) + " instead: " +
- tojson(transactionDocument));
- assert.gte(transactionDocument.readTimestamp,
- operationTime,
- "Expected 'readTimestamp' to be at least " + operationTime + " but got " +
- transactionDocument.readTimestamp + " instead: " +
- tojson(transactionDocument));
- assert.gte(ISODate(transactionDocument.startWallClockTime),
- timeBeforeTransactionStarts,
- "Expected 'startWallClockTime' to be at least" + timeBeforeTransactionStarts +
- " but got " + transactionDocument.startWallClockTime + " instead: " +
- tojson(transactionDocument));
- const expectedTimeOpen = (timeBeforeCurrentOp - timeAfterTransactionStarts) * 1000;
- assert.gt(transactionDocument.timeOpenMicros,
- expectedTimeOpen,
- "Expected 'timeOpenMicros' to be at least" + expectedTimeOpen + " but got " +
- transactionDocument.timeOpenMicros + " instead: " +
- tojson(transactionDocument));
- assert.gte(transactionDocument.timeActiveMicros,
- 0,
- "Expected 'timeActiveMicros' to be at least 0: " + tojson(transactionDocument));
+function transactionFn(isPrepared) {
+ const collName = 'currentop_active_transaction';
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDB = session.getDatabase('test');
+
+ session.startTransaction({readConcern: {level: 'snapshot'}});
+ sessionDB[collName].update({}, {x: 2});
+ if (isPrepared) {
+ // Load the prepare helpers to be called in the parallel shell.
+ load('jstests/core/txns/libs/prepare_helpers.js');
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ PrepareHelpers.commitTransaction(session, prepareTimestamp);
+ } else {
+ assert.commandWorked(session.commitTransaction_forTesting());
+ }
+}
+
+function checkCurrentOpFields(currentOp,
+ isPrepared,
+ operationTime,
+ timeBeforeTransactionStarts,
+ timeAfterTransactionStarts,
+ timeBeforeCurrentOp) {
+ const transactionDocument = currentOp[0].transaction;
+ assert.eq(transactionDocument.parameters.autocommit,
+ false,
+ "Expected 'autocommit' to be false but got " +
+ transactionDocument.parameters.autocommit +
+ " instead: " + tojson(transactionDocument));
+ assert.docEq(transactionDocument.parameters.readConcern,
+ {level: 'snapshot'},
+ "Expected 'readConcern' to be level: snapshot but got " +
+ tojson(transactionDocument.parameters.readConcern) +
+ " instead: " + tojson(transactionDocument));
+ assert.gte(transactionDocument.readTimestamp,
+ operationTime,
+ "Expected 'readTimestamp' to be at least " + operationTime + " but got " +
+ transactionDocument.readTimestamp + " instead: " + tojson(transactionDocument));
+ assert.gte(ISODate(transactionDocument.startWallClockTime),
+ timeBeforeTransactionStarts,
+ "Expected 'startWallClockTime' to be at least" + timeBeforeTransactionStarts +
+ " but got " + transactionDocument.startWallClockTime +
+ " instead: " + tojson(transactionDocument));
+ const expectedTimeOpen = (timeBeforeCurrentOp - timeAfterTransactionStarts) * 1000;
+ assert.gt(transactionDocument.timeOpenMicros,
+ expectedTimeOpen,
+ "Expected 'timeOpenMicros' to be at least" + expectedTimeOpen + " but got " +
+ transactionDocument.timeOpenMicros + " instead: " + tojson(transactionDocument));
+ assert.gte(transactionDocument.timeActiveMicros,
+ 0,
+ "Expected 'timeActiveMicros' to be at least 0: " + tojson(transactionDocument));
+ assert.gte(transactionDocument.timeInactiveMicros,
+ 0,
+ "Expected 'timeInactiveMicros' to be at least 0: " + tojson(transactionDocument));
+ const actualExpiryTime = ISODate(transactionDocument.expiryTime).getTime();
+ const expectedExpiryTime =
+ ISODate(transactionDocument.startWallClockTime).getTime() + transactionLifeTime * 1000;
+ assert.eq(expectedExpiryTime,
+ actualExpiryTime,
+ "Expected 'expiryTime' to be " + expectedExpiryTime + " but got " + actualExpiryTime +
+ " instead: " + tojson(transactionDocument));
+ if (isPrepared) {
assert.gte(
- transactionDocument.timeInactiveMicros,
+ transactionDocument.timePreparedMicros,
0,
- "Expected 'timeInactiveMicros' to be at least 0: " + tojson(transactionDocument));
- const actualExpiryTime = ISODate(transactionDocument.expiryTime).getTime();
- const expectedExpiryTime =
- ISODate(transactionDocument.startWallClockTime).getTime() + transactionLifeTime * 1000;
- assert.eq(expectedExpiryTime,
- actualExpiryTime,
- "Expected 'expiryTime' to be " + expectedExpiryTime + " but got " +
- actualExpiryTime + " instead: " + tojson(transactionDocument));
- if (isPrepared) {
- assert.gte(
- transactionDocument.timePreparedMicros,
- 0,
- "Expected 'timePreparedMicros' to be at least 0: " + tojson(transactionDocument));
- }
+ "Expected 'timePreparedMicros' to be at least 0: " + tojson(transactionDocument));
}
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const collName = 'currentop_active_transaction';
- const testDB = rst.getPrimary().getDB('test');
- const adminDB = rst.getPrimary().getDB('admin');
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB[collName].insert({x: 1}, {writeConcern: {w: "majority"}}));
-
- // Run an operation prior to starting the transaction and save its operation time. We will use
- // this later to assert that our subsequent transaction's readTimestamp is greater than or equal
- // to this operation time.
- let res = assert.commandWorked(testDB.runCommand({insert: collName, documents: [{x: 1}]}));
-
- // Set and save the transaction's lifetime. We will use this later to assert that our
- // transaction's expiry time is equal to its start time + lifetime.
- const transactionLifeTime = 10;
- assert.commandWorked(testDB.adminCommand(
- {setParameter: 1, transactionLifetimeLimitSeconds: transactionLifeTime}));
-
- // This will make the transaction hang.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'hangAfterSettingPrepareStartTime', mode: 'alwaysOn'}));
-
- let timeBeforeTransactionStarts = new ISODate();
- let isPrepared = true;
- const joinPreparedTransaction =
- startParallelShell(funWithArgs(transactionFn, isPrepared), rst.ports[0]);
-
- const prepareTransactionFilter = {
- active: true,
- 'lsid': {$exists: true},
- 'transaction.parameters.txnNumber': {$eq: 0},
- 'transaction.parameters.autocommit': {$eq: false},
- 'transaction.timePreparedMicros': {$exists: true}
- };
-
- // Keep running currentOp() until we see the transaction subdocument.
- assert.soon(function() {
- return 1 ===
- adminDB.aggregate([{$currentOp: {}}, {$match: prepareTransactionFilter}]).itcount();
- });
-
- let timeAfterTransactionStarts = new ISODate();
- // Sleep here to allow some time between timeAfterTransactionStarts and timeBeforeCurrentOp to
- // elapse.
- sleep(100);
- let timeBeforeCurrentOp = new ISODate();
- // Check that the currentOp's transaction subdocument's fields align with our expectations.
- let currentOp =
- adminDB.aggregate([{$currentOp: {}}, {$match: prepareTransactionFilter}]).toArray();
- checkCurrentOpFields(currentOp,
- isPrepared,
- res.operationTime,
- timeBeforeTransactionStarts,
- timeAfterTransactionStarts,
- timeBeforeCurrentOp);
-
- // Now the transaction can proceed.
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangAfterSettingPrepareStartTime', mode: 'off'}));
- joinPreparedTransaction();
-
- // Conduct the same test but with a non-prepared transaction.
- res = assert.commandWorked(testDB.runCommand({insert: collName, documents: [{x: 1}]}));
-
- // This will make the transaction hang.
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'alwaysOn'}));
-
- timeBeforeTransactionStarts = new ISODate();
- isPrepared = false;
- const joinTransaction =
- startParallelShell(funWithArgs(transactionFn, isPrepared), rst.ports[0]);
-
- const transactionFilter = {
- active: true,
- 'lsid': {$exists: true},
- 'transaction.parameters.txnNumber': {$eq: 0},
- 'transaction.parameters.autocommit': {$eq: false},
- 'transaction.timePreparedMicros': {$exists: false}
- };
-
- // Keep running currentOp() until we see the transaction subdocument.
- assert.soon(function() {
- return 1 === adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).itcount();
- });
-
- timeAfterTransactionStarts = new ISODate();
- // Sleep here to allow some time between timeAfterTransactionStarts and timeBeforeCurrentOp to
- // elapse.
- sleep(100);
- timeBeforeCurrentOp = new ISODate();
- // Check that the currentOp's transaction subdocument's fields align with our expectations.
- currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).toArray();
- checkCurrentOpFields(currentOp,
- isPrepared,
- res.operationTime,
- timeBeforeTransactionStarts,
- timeAfterTransactionStarts,
- timeBeforeCurrentOp);
-
- // Now the transaction can proceed.
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'off'}));
- joinTransaction();
-
- rst.stopSet();
+}
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const collName = 'currentop_active_transaction';
+const testDB = rst.getPrimary().getDB('test');
+const adminDB = rst.getPrimary().getDB('admin');
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB[collName].insert({x: 1}, {writeConcern: {w: "majority"}}));
+
+// Run an operation prior to starting the transaction and save its operation time. We will use
+// this later to assert that our subsequent transaction's readTimestamp is greater than or equal
+// to this operation time.
+let res = assert.commandWorked(testDB.runCommand({insert: collName, documents: [{x: 1}]}));
+
+// Set and save the transaction's lifetime. We will use this later to assert that our
+// transaction's expiry time is equal to its start time + lifetime.
+const transactionLifeTime = 10;
+assert.commandWorked(
+ testDB.adminCommand({setParameter: 1, transactionLifetimeLimitSeconds: transactionLifeTime}));
+
+// This will make the transaction hang.
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'hangAfterSettingPrepareStartTime', mode: 'alwaysOn'}));
+
+let timeBeforeTransactionStarts = new ISODate();
+let isPrepared = true;
+const joinPreparedTransaction =
+ startParallelShell(funWithArgs(transactionFn, isPrepared), rst.ports[0]);
+
+const prepareTransactionFilter = {
+ active: true,
+ 'lsid': {$exists: true},
+ 'transaction.parameters.txnNumber': {$eq: 0},
+ 'transaction.parameters.autocommit': {$eq: false},
+ 'transaction.timePreparedMicros': {$exists: true}
+};
+
+// Keep running currentOp() until we see the transaction subdocument.
+assert.soon(function() {
+ return 1 ===
+ adminDB.aggregate([{$currentOp: {}}, {$match: prepareTransactionFilter}]).itcount();
+});
+
+let timeAfterTransactionStarts = new ISODate();
+// Sleep here to allow some time between timeAfterTransactionStarts and timeBeforeCurrentOp to
+// elapse.
+sleep(100);
+let timeBeforeCurrentOp = new ISODate();
+// Check that the currentOp's transaction subdocument's fields align with our expectations.
+let currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: prepareTransactionFilter}]).toArray();
+checkCurrentOpFields(currentOp,
+ isPrepared,
+ res.operationTime,
+ timeBeforeTransactionStarts,
+ timeAfterTransactionStarts,
+ timeBeforeCurrentOp);
+
+// Now the transaction can proceed.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAfterSettingPrepareStartTime', mode: 'off'}));
+joinPreparedTransaction();
+
+// Conduct the same test but with a non-prepared transaction.
+res = assert.commandWorked(testDB.runCommand({insert: collName, documents: [{x: 1}]}));
+
+// This will make the transaction hang.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'alwaysOn'}));
+
+timeBeforeTransactionStarts = new ISODate();
+isPrepared = false;
+const joinTransaction = startParallelShell(funWithArgs(transactionFn, isPrepared), rst.ports[0]);
+
+const transactionFilter = {
+ active: true,
+ 'lsid': {$exists: true},
+ 'transaction.parameters.txnNumber': {$eq: 0},
+ 'transaction.parameters.autocommit': {$eq: false},
+ 'transaction.timePreparedMicros': {$exists: false}
+};
+
+// Keep running currentOp() until we see the transaction subdocument.
+assert.soon(function() {
+ return 1 === adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).itcount();
+});
+
+timeAfterTransactionStarts = new ISODate();
+// Sleep here to allow some time between timeAfterTransactionStarts and timeBeforeCurrentOp to
+// elapse.
+sleep(100);
+timeBeforeCurrentOp = new ISODate();
+// Check that the currentOp's transaction subdocument's fields align with our expectations.
+currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).toArray();
+checkCurrentOpFields(currentOp,
+ isPrepared,
+ res.operationTime,
+ timeBeforeTransactionStarts,
+ timeAfterTransactionStarts,
+ timeBeforeCurrentOp);
+
+// Now the transaction can proceed.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'off'}));
+joinTransaction();
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/currentop_inactive_transaction_includes_last_client_info.js b/jstests/noPassthrough/currentop_inactive_transaction_includes_last_client_info.js
index e02beac5366..25a5857d851 100644
--- a/jstests/noPassthrough/currentop_inactive_transaction_includes_last_client_info.js
+++ b/jstests/noPassthrough/currentop_inactive_transaction_includes_last_client_info.js
@@ -6,62 +6,66 @@
*/
(function() {
- 'use strict';
+'use strict';
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const collName = 'currentop_last_client_info';
- const dbName = 'test';
- const testDB = rst.getPrimary().getDB(dbName);
- const adminDB = rst.getPrimary().getDB('admin');
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB[collName].insert({x: 1}, {writeConcern: {w: "majority"}}));
+const collName = 'currentop_last_client_info';
+const dbName = 'test';
+const testDB = rst.getPrimary().getDB(dbName);
+const adminDB = rst.getPrimary().getDB('admin');
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB[collName].insert({x: 1}, {writeConcern: {w: "majority"}}));
- // Start a new Session.
- const lsid = assert.commandWorked(testDB.runCommand({startSession: 1})).id;
- const txnNumber = NumberLong(0);
- assert.commandWorked(testDB.runCommand({
- find: collName,
- lsid: lsid,
- txnNumber: txnNumber,
- readConcern: {level: "snapshot"},
- startTransaction: true,
- autocommit: false
- }));
+// Start a new Session.
+const lsid = assert.commandWorked(testDB.runCommand({startSession: 1})).id;
+const txnNumber = NumberLong(0);
+assert.commandWorked(testDB.runCommand({
+ find: collName,
+ lsid: lsid,
+ txnNumber: txnNumber,
+ readConcern: {level: "snapshot"},
+ startTransaction: true,
+ autocommit: false
+}));
- const currentOpFilter = {active: false, 'lsid.id': {$eq: lsid.id}, 'client': {$exists: true}};
+const currentOpFilter = {
+ active: false,
+ 'lsid.id': {$eq: lsid.id},
+ 'client': {$exists: true}
+};
- let currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: currentOpFilter}]).toArray();
- assert.eq(currentOp.length, 1);
+let currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: currentOpFilter}]).toArray();
+assert.eq(currentOp.length, 1);
- let currentOpEntry = currentOp[0];
- const connectionId = currentOpEntry.connectionId;
- // Check that the currentOp object contains information about the last client that has run an
- // operation and that its values align with our expectations.
- assert.eq(currentOpEntry.appName, "MongoDB Shell");
- assert.eq(currentOpEntry.clientMetadata.application.name, "MongoDB Shell");
- assert.eq(currentOpEntry.clientMetadata.driver.name, "MongoDB Internal Client");
+let currentOpEntry = currentOp[0];
+const connectionId = currentOpEntry.connectionId;
+// Check that the currentOp object contains information about the last client that has run an
+// operation and that its values align with our expectations.
+assert.eq(currentOpEntry.appName, "MongoDB Shell");
+assert.eq(currentOpEntry.clientMetadata.application.name, "MongoDB Shell");
+assert.eq(currentOpEntry.clientMetadata.driver.name, "MongoDB Internal Client");
- // Create a new Client and run another operation on the same session.
- const otherClient = new Mongo(rst.getPrimary().host);
- assert.commandWorked(otherClient.getDB(dbName).runCommand(
- {find: collName, lsid: lsid, txnNumber: txnNumber, autocommit: false}));
+// Create a new Client and run another operation on the same session.
+const otherClient = new Mongo(rst.getPrimary().host);
+assert.commandWorked(otherClient.getDB(dbName).runCommand(
+ {find: collName, lsid: lsid, txnNumber: txnNumber, autocommit: false}));
- currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: currentOpFilter}]).toArray();
- currentOpEntry = currentOp[0];
- // Check that the last client that has ran an operation against this session has a different
- // connectionId than the previous client.
- assert.neq(currentOpEntry.connectionId, connectionId);
+currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: currentOpFilter}]).toArray();
+currentOpEntry = currentOp[0];
+// Check that the last client that has ran an operation against this session has a different
+// connectionId than the previous client.
+assert.neq(currentOpEntry.connectionId, connectionId);
- assert.commandWorked(testDB.adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: txnNumber,
- autocommit: false,
- writeConcern: {w: 'majority'}
- }));
+assert.commandWorked(testDB.adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: txnNumber,
+ autocommit: false,
+ writeConcern: {w: 'majority'}
+}));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/currentop_includes_await_time.js b/jstests/noPassthrough/currentop_includes_await_time.js
index 3a5ad0bca4f..5a5dee2f5ce 100644
--- a/jstests/noPassthrough/currentop_includes_await_time.js
+++ b/jstests/noPassthrough/currentop_includes_await_time.js
@@ -4,50 +4,50 @@
* @tags: [requires_capped]
*/
(function() {
- "use test";
-
- // This test runs a getMore in a parallel shell, which will not inherit the implicit session of
- // the cursor establishing command.
- TestData.disableImplicitSessions = true;
-
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("test");
- const coll = testDB.currentop_includes_await_time;
-
- coll.drop();
- assert.commandWorked(testDB.createCollection(coll.getName(), {capped: true, size: 1024}));
- assert.writeOK(coll.insert({_id: 1}));
-
- let cmdRes = assert.commandWorked(
- testDB.runCommand({find: coll.getName(), tailable: true, awaitData: true}));
-
- TestData.commandResult = cmdRes;
- let cleanupShell = startParallelShell(function() {
- db.getSiblingDB("test").runCommand({
- getMore: TestData.commandResult.cursor.id,
- collection: "currentop_includes_await_time",
- maxTimeMS: 5 * 60 * 1000,
- });
- }, conn.port);
-
- assert.soon(function() {
- // This filter ensures that the getMore 'secs_running' and 'microsecs_running' fields are
- // sufficiently large that they appear to include time spent blocking waiting for capped
- // inserts.
- let ops = testDB.currentOp({
- "command.getMore": {$exists: true},
- "ns": coll.getFullName(),
- secs_running: {$gte: 2},
- microsecs_running: {$gte: 2 * 1000 * 1000}
- });
- return ops.inprog.length === 1;
- }, printjson(testDB.currentOp()));
-
- // A capped insertion should unblock the getMore, allowing the test to complete before the
- // getMore's awaitData time expires.
- assert.writeOK(coll.insert({_id: 2}));
-
- cleanupShell();
- MongoRunner.stopMongod(conn);
+"use test";
+
+// This test runs a getMore in a parallel shell, which will not inherit the implicit session of
+// the cursor establishing command.
+TestData.disableImplicitSessions = true;
+
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
+const testDB = conn.getDB("test");
+const coll = testDB.currentop_includes_await_time;
+
+coll.drop();
+assert.commandWorked(testDB.createCollection(coll.getName(), {capped: true, size: 1024}));
+assert.writeOK(coll.insert({_id: 1}));
+
+let cmdRes = assert.commandWorked(
+ testDB.runCommand({find: coll.getName(), tailable: true, awaitData: true}));
+
+TestData.commandResult = cmdRes;
+let cleanupShell = startParallelShell(function() {
+ db.getSiblingDB("test").runCommand({
+ getMore: TestData.commandResult.cursor.id,
+ collection: "currentop_includes_await_time",
+ maxTimeMS: 5 * 60 * 1000,
+ });
+}, conn.port);
+
+assert.soon(function() {
+ // This filter ensures that the getMore 'secs_running' and 'microsecs_running' fields are
+ // sufficiently large that they appear to include time spent blocking waiting for capped
+ // inserts.
+ let ops = testDB.currentOp({
+ "command.getMore": {$exists: true},
+ "ns": coll.getFullName(),
+ secs_running: {$gte: 2},
+ microsecs_running: {$gte: 2 * 1000 * 1000}
+ });
+ return ops.inprog.length === 1;
+}, printjson(testDB.currentOp()));
+
+// A capped insertion should unblock the getMore, allowing the test to complete before the
+// getMore's awaitData time expires.
+assert.writeOK(coll.insert({_id: 2}));
+
+cleanupShell();
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/currentop_query.js b/jstests/noPassthrough/currentop_query.js
index b2aa9b2284f..15e655d568a 100644
--- a/jstests/noPassthrough/currentop_query.js
+++ b/jstests/noPassthrough/currentop_query.js
@@ -4,648 +4,632 @@
* @tags: [requires_replication, requires_sharding]
*/
(function() {
- "use strict";
-
- // This test runs manual getMores using different connections, which will not inherit the
- // implicit session of the cursor establishing command.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-
- // Set up a 2-shard cluster. Configure 'internalQueryExecYieldIterations' on both shards such
- // that operations will yield on each PlanExecuter iteration.
- const st = new ShardingTest({
- name: jsTestName(),
- shards: 2,
- rs: {nodes: 1, setParameter: {internalQueryExecYieldIterations: 1}}
- });
-
- // Obtain one mongoS connection and a second direct to the shard.
- const rsConn = st.rs0.getPrimary();
- const mongosConn = st.s;
-
- const mongosDB = mongosConn.getDB("currentop_query");
- const mongosColl = mongosDB.currentop_query;
-
- // Enable sharding on the the test database and ensure that the primary is on shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), rsConn.name);
-
- // On a sharded cluster, aggregations which are dispatched to multiple shards first establish
- // zero-batch cursors and only hit the failpoints on the following getMore. This helper takes a
- // generic command object and creates an appropriate filter given the use-case.
- function commandOrOriginatingCommand(cmdObj, isRemoteShardCurOp) {
- const cmdFieldName = (isRemoteShardCurOp ? "cursor.originatingCommand" : "command");
- const cmdFilter = {};
- for (let subFieldName in cmdObj) {
- cmdFilter[`${cmdFieldName}.${subFieldName}`] = cmdObj[subFieldName];
- }
- return cmdFilter;
+"use strict";
+
+// This test runs manual getMores using different connections, which will not inherit the
+// implicit session of the cursor establishing command.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+
+// Set up a 2-shard cluster. Configure 'internalQueryExecYieldIterations' on both shards such
+// that operations will yield on each PlanExecuter iteration.
+const st = new ShardingTest({
+ name: jsTestName(),
+ shards: 2,
+ rs: {nodes: 1, setParameter: {internalQueryExecYieldIterations: 1}}
+});
+
+// Obtain one mongoS connection and a second direct to the shard.
+const rsConn = st.rs0.getPrimary();
+const mongosConn = st.s;
+
+const mongosDB = mongosConn.getDB("currentop_query");
+const mongosColl = mongosDB.currentop_query;
+
+// Enable sharding on the the test database and ensure that the primary is on shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), rsConn.name);
+
+// On a sharded cluster, aggregations which are dispatched to multiple shards first establish
+// zero-batch cursors and only hit the failpoints on the following getMore. This helper takes a
+// generic command object and creates an appropriate filter given the use-case.
+function commandOrOriginatingCommand(cmdObj, isRemoteShardCurOp) {
+ const cmdFieldName = (isRemoteShardCurOp ? "cursor.originatingCommand" : "command");
+ const cmdFilter = {};
+ for (let subFieldName in cmdObj) {
+ cmdFilter[`${cmdFieldName}.${subFieldName}`] = cmdObj[subFieldName];
+ }
+ return cmdFilter;
+}
+
+// Drops and re-creates the sharded test collection.
+function dropAndRecreateTestCollection() {
+ assert(mongosColl.drop());
+ assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: "hashed"}}));
+}
+
+/**
+ * @param {connection} conn - The connection through which to run the test suite.
+ * @param {string} readMode - The read mode to use for the parallel shell. This allows
+ * testing currentOp() output for both OP_QUERY and OP_GET_MORE queries, as well as "find" and
+ * "getMore" commands.
+ * @params {function} currentOp - Function which takes a database object and a filter, and
+ * returns an array of matching current operations. This allows us to test output for both the
+ * currentOp command and the $currentOp aggregation stage.
+ * @params {boolean} truncatedOps - if true, we expect operations that exceed the maximum
+ * currentOp size to be truncated in the output 'command' field, and we run only a subset of
+ * tests designed to exercise that scenario. If false, we expect the entire operation to be
+ * returned.
+ * @params {boolean} localOps - if true, we expect currentOp to return operations running on a
+ * mongoS itself rather than on the shards.
+ */
+function runTests({conn, readMode, currentOp, truncatedOps, localOps}) {
+ const testDB = conn.getDB("currentop_query");
+ const coll = testDB.currentop_query;
+ dropAndRecreateTestCollection();
+
+ for (let i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({_id: i, a: i}));
}
- // Drops and re-creates the sharded test collection.
- function dropAndRecreateTestCollection() {
- assert(mongosColl.drop());
- assert.commandWorked(mongosDB.adminCommand(
- {shardCollection: mongosColl.getFullName(), key: {_id: "hashed"}}));
+ const isLocalMongosCurOp = (FixtureHelpers.isMongos(testDB) && localOps);
+ const isRemoteShardCurOp = (FixtureHelpers.isMongos(testDB) && !localOps);
+
+ // If 'truncatedOps' is true, run only the subset of tests designed to validate the
+ // truncation behaviour. Otherwise, run the standard set of tests which assume that
+ // truncation will not occur.
+ if (truncatedOps) {
+ runTruncationTests();
+ } else {
+ runStandardTests();
}
/**
- * @param {connection} conn - The connection through which to run the test suite.
- * @param {string} readMode - The read mode to use for the parallel shell. This allows
- * testing currentOp() output for both OP_QUERY and OP_GET_MORE queries, as well as "find" and
- * "getMore" commands.
- * @params {function} currentOp - Function which takes a database object and a filter, and
- * returns an array of matching current operations. This allows us to test output for both the
- * currentOp command and the $currentOp aggregation stage.
- * @params {boolean} truncatedOps - if true, we expect operations that exceed the maximum
- * currentOp size to be truncated in the output 'command' field, and we run only a subset of
- * tests designed to exercise that scenario. If false, we expect the entire operation to be
- * returned.
- * @params {boolean} localOps - if true, we expect currentOp to return operations running on a
- * mongoS itself rather than on the shards.
+ * Captures currentOp() for a given test command/operation and confirms that namespace,
+ * operation type and planSummary are correct.
+ *
+ * @param {Object} testObj - Contains test arguments.
+ * @param {function} testObj.test - A function that runs the desired test op/cmd.
+ * @param {string} testObj.planSummary - A string containing the expected planSummary.
+ * @param {Object} testObj.currentOpFilter - A filter to be used to narrow currentOp()
+ * output to only the relevant operation or command.
+ * @param {string} [testObj.command] - The command to test against. Will look for this to
+ * be a key in the currentOp().query object.
+ * @param {string} [testObj.operation] - The operation to test against. Will look for this
+ * to be the value of the currentOp().op field.
*/
- function runTests({conn, readMode, currentOp, truncatedOps, localOps}) {
- const testDB = conn.getDB("currentop_query");
- const coll = testDB.currentop_query;
- dropAndRecreateTestCollection();
-
- for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({_id: i, a: i}));
+ function confirmCurrentOpContents(testObj) {
+ // Force queries to hang on yield to allow for currentOp capture.
+ FixtureHelpers.runCommandOnEachPrimary({
+ db: conn.getDB("admin"),
+ cmdObj: {
+ configureFailPoint: "setYieldAllLocksHang",
+ mode: "alwaysOn",
+ data: {namespace: mongosColl.getFullName()}
+ }
+ });
+
+ // Set the test configuration in TestData for the parallel shell test.
+ TestData.shellReadMode = readMode;
+ TestData.currentOpTest = testObj.test;
+ TestData.currentOpCollName = "currentop_query";
+
+ // Wrapper function which sets the readMode and DB before running the test function
+ // found at TestData.currentOpTest.
+ function doTest() {
+ const testDB = db.getSiblingDB(TestData.currentOpCollName);
+ testDB.getMongo().forceReadMode(TestData.shellReadMode);
+ TestData.currentOpTest(testDB);
}
- const isLocalMongosCurOp = (FixtureHelpers.isMongos(testDB) && localOps);
- const isRemoteShardCurOp = (FixtureHelpers.isMongos(testDB) && !localOps);
+ // Run the operation in the background.
+ var awaitShell = startParallelShell(doTest, testDB.getMongo().port);
- // If 'truncatedOps' is true, run only the subset of tests designed to validate the
- // truncation behaviour. Otherwise, run the standard set of tests which assume that
- // truncation will not occur.
- if (truncatedOps) {
- runTruncationTests();
- } else {
- runStandardTests();
+ // Augment the currentOpFilter with additional known predicates.
+ if (!testObj.currentOpFilter.ns) {
+ testObj.currentOpFilter.ns = coll.getFullName();
+ }
+ if (!isLocalMongosCurOp) {
+ testObj.currentOpFilter.planSummary = testObj.planSummary;
+ }
+ if (testObj.hasOwnProperty("command")) {
+ testObj.currentOpFilter["command." + testObj.command] = {$exists: true};
+ } else if (testObj.hasOwnProperty("operation")) {
+ testObj.currentOpFilter.op = testObj.operation;
}
- /**
- * Captures currentOp() for a given test command/operation and confirms that namespace,
- * operation type and planSummary are correct.
- *
- * @param {Object} testObj - Contains test arguments.
- * @param {function} testObj.test - A function that runs the desired test op/cmd.
- * @param {string} testObj.planSummary - A string containing the expected planSummary.
- * @param {Object} testObj.currentOpFilter - A filter to be used to narrow currentOp()
- * output to only the relevant operation or command.
- * @param {string} [testObj.command] - The command to test against. Will look for this to
- * be a key in the currentOp().query object.
- * @param {string} [testObj.operation] - The operation to test against. Will look for this
- * to be the value of the currentOp().op field.
- */
- function confirmCurrentOpContents(testObj) {
- // Force queries to hang on yield to allow for currentOp capture.
- FixtureHelpers.runCommandOnEachPrimary({
- db: conn.getDB("admin"),
- cmdObj: {
- configureFailPoint: "setYieldAllLocksHang",
- mode: "alwaysOn",
- data: {namespace: mongosColl.getFullName()}
+ // Capture currentOp record for the query and confirm that the 'query' and 'planSummary'
+ // fields contain the content expected. We are indirectly testing the 'ns' field as well
+ // with the currentOp query argument.
+ assert.soon(
+ function() {
+ var result = currentOp(testDB, testObj.currentOpFilter, truncatedOps, localOps);
+ assert.commandWorked(result);
+
+ if (result.inprog.length > 0) {
+ result.inprog.forEach((op) => {
+ assert.eq(op.appName, "MongoDB Shell", tojson(result));
+ assert.eq(
+ op.clientMetadata.application.name, "MongoDB Shell", tojson(result));
+ });
+ return true;
}
- });
-
- // Set the test configuration in TestData for the parallel shell test.
- TestData.shellReadMode = readMode;
- TestData.currentOpTest = testObj.test;
- TestData.currentOpCollName = "currentop_query";
-
- // Wrapper function which sets the readMode and DB before running the test function
- // found at TestData.currentOpTest.
- function doTest() {
- const testDB = db.getSiblingDB(TestData.currentOpCollName);
- testDB.getMongo().forceReadMode(TestData.shellReadMode);
- TestData.currentOpTest(testDB);
- }
-
- // Run the operation in the background.
- var awaitShell = startParallelShell(doTest, testDB.getMongo().port);
- // Augment the currentOpFilter with additional known predicates.
- if (!testObj.currentOpFilter.ns) {
- testObj.currentOpFilter.ns = coll.getFullName();
- }
- if (!isLocalMongosCurOp) {
- testObj.currentOpFilter.planSummary = testObj.planSummary;
- }
- if (testObj.hasOwnProperty("command")) {
- testObj.currentOpFilter["command." + testObj.command] = {$exists: true};
- } else if (testObj.hasOwnProperty("operation")) {
- testObj.currentOpFilter.op = testObj.operation;
- }
-
- // Capture currentOp record for the query and confirm that the 'query' and 'planSummary'
- // fields contain the content expected. We are indirectly testing the 'ns' field as well
- // with the currentOp query argument.
- assert.soon(
- function() {
- var result = currentOp(testDB, testObj.currentOpFilter, truncatedOps, localOps);
- assert.commandWorked(result);
-
- if (result.inprog.length > 0) {
- result.inprog.forEach((op) => {
- assert.eq(op.appName, "MongoDB Shell", tojson(result));
- assert.eq(op.clientMetadata.application.name,
- "MongoDB Shell",
- tojson(result));
- });
- return true;
- }
-
- return false;
- },
- function() {
- return "Failed to find operation from " + tojson(testObj.currentOpFilter) +
- " in currentOp() output: " +
- tojson(currentOp(testDB, {}, truncatedOps, localOps)) +
- (isLocalMongosCurOp
- ? ", with localOps=false: " +
- tojson(currentOp(testDB, {}, truncatedOps, false))
- : "");
- });
-
- // Allow the query to complete.
- FixtureHelpers.runCommandOnEachPrimary({
- db: conn.getDB("admin"),
- cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
+ return false;
+ },
+ function() {
+ return "Failed to find operation from " + tojson(testObj.currentOpFilter) +
+ " in currentOp() output: " +
+ tojson(currentOp(testDB, {}, truncatedOps, localOps)) +
+ (isLocalMongosCurOp ? ", with localOps=false: " +
+ tojson(currentOp(testDB, {}, truncatedOps, false))
+ : "");
});
- awaitShell();
- delete TestData.currentOpCollName;
- delete TestData.currentOpTest;
- delete TestData.shellReadMode;
- }
+ // Allow the query to complete.
+ FixtureHelpers.runCommandOnEachPrimary({
+ db: conn.getDB("admin"),
+ cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
+ });
- /**
- * Runs a set of tests to verify that the currentOp output appears as expected. These tests
- * assume that the 'truncateOps' parameter is false, so no command objects in the currentOp
- * output will be truncated to string.
- */
- function runStandardTests() {
- //
- // Confirm currentOp content for commands defined in 'testList'.
- //
- var testList = [
- {
- test: function(db) {
- assert.eq(db.currentop_query
- .aggregate([{$match: {a: 1, $comment: "currentop_query"}}], {
- collation: {locale: "fr"},
- hint: {_id: 1},
- comment: "currentop_query_2"
- })
- .itcount(),
- 1);
- },
- planSummary: "IXSCAN { _id: 1 }",
- currentOpFilter: commandOrOriginatingCommand({
- "aggregate": {$exists: true},
- "pipeline.0.$match.$comment": "currentop_query",
- "comment": "currentop_query_2",
- "collation": {locale: "fr"},
- "hint": {_id: 1}
- },
- isRemoteShardCurOp)
- },
- {
- test: function(db) {
- assert.eq(db.currentop_query.find({a: 1, $comment: "currentop_query"})
- .collation({locale: "fr"})
- .count(),
- 1);
- },
- command: "count",
- planSummary: "COLLSCAN",
- currentOpFilter: {
- "command.query.$comment": "currentop_query",
- "command.collation": {locale: "fr"}
- }
+ awaitShell();
+ delete TestData.currentOpCollName;
+ delete TestData.currentOpTest;
+ delete TestData.shellReadMode;
+ }
+
+ /**
+ * Runs a set of tests to verify that the currentOp output appears as expected. These tests
+ * assume that the 'truncateOps' parameter is false, so no command objects in the currentOp
+ * output will be truncated to string.
+ */
+ function runStandardTests() {
+ //
+ // Confirm currentOp content for commands defined in 'testList'.
+ //
+ var testList = [
+ {
+ test: function(db) {
+ assert.eq(db.currentop_query
+ .aggregate([{$match: {a: 1, $comment: "currentop_query"}}], {
+ collation: {locale: "fr"},
+ hint: {_id: 1},
+ comment: "currentop_query_2"
+ })
+ .itcount(),
+ 1);
},
- {
- test: function(db) {
- assert.eq(db.currentop_query.distinct("a",
- {a: 1, $comment: "currentop_query"},
- {collation: {locale: "fr"}}),
- [1]);
- },
- command: "distinct",
- planSummary: "COLLSCAN",
- currentOpFilter: {
- "command.query.$comment": "currentop_query",
- "command.collation": {locale: "fr"}
- }
+ planSummary: "IXSCAN { _id: 1 }",
+ currentOpFilter: commandOrOriginatingCommand({
+ "aggregate": {$exists: true},
+ "pipeline.0.$match.$comment": "currentop_query",
+ "comment": "currentop_query_2",
+ "collation": {locale: "fr"},
+ "hint": {_id: 1}
},
- {
- test: function(db) {
- assert.eq(
- db.currentop_query.find({a: 1}).comment("currentop_query").itcount(), 1);
- },
- command: "find",
- planSummary: "COLLSCAN",
- currentOpFilter: {"command.comment": "currentop_query"}
+ isRemoteShardCurOp)
+ },
+ {
+ test: function(db) {
+ assert.eq(db.currentop_query.find({a: 1, $comment: "currentop_query"})
+ .collation({locale: "fr"})
+ .count(),
+ 1);
},
- {
- test: function(db) {
- assert.eq(db.currentop_query.findAndModify({
- query: {_id: 1, a: 1, $comment: "currentop_query"},
- update: {$inc: {b: 1}},
- collation: {locale: "fr"}
- }),
- {"_id": 1, "a": 1});
- },
- command: "findandmodify",
- planSummary: "IXSCAN { _id: 1 }",
- currentOpFilter: {
- "command.query.$comment": "currentop_query",
- "command.collation": {locale: "fr"}
- }
+ command: "count",
+ planSummary: "COLLSCAN",
+ currentOpFilter: {
+ "command.query.$comment": "currentop_query",
+ "command.collation": {locale: "fr"}
+ }
+ },
+ {
+ test: function(db) {
+ assert.eq(
+ db.currentop_query.distinct(
+ "a", {a: 1, $comment: "currentop_query"}, {collation: {locale: "fr"}}),
+ [1]);
},
- {
- test: function(db) {
- assert.commandWorked(
- db.currentop_query.mapReduce(() => {},
- (a, b) => {},
- {
- query: {$comment: "currentop_query"},
- out: {inline: 1},
- }));
- },
- command: "mapreduce",
- planSummary: "COLLSCAN",
- currentOpFilter: {
- "command.query.$comment": "currentop_query",
- "ns": /^currentop_query.*currentop_query/
- }
+ command: "distinct",
+ planSummary: "COLLSCAN",
+ currentOpFilter: {
+ "command.query.$comment": "currentop_query",
+ "command.collation": {locale: "fr"}
+ }
+ },
+ {
+ test: function(db) {
+ assert.eq(db.currentop_query.find({a: 1}).comment("currentop_query").itcount(),
+ 1);
},
- {
- test: function(db) {
- assert.writeOK(db.currentop_query.remove({a: 2, $comment: "currentop_query"},
- {collation: {locale: "fr"}}));
- },
- operation: "remove",
- planSummary: "COLLSCAN",
- currentOpFilter:
- (isLocalMongosCurOp
- ? {"command.delete": coll.getName(), "command.ordered": true}
- : {
- "command.q.$comment": "currentop_query",
- "command.collation": {locale: "fr"}
- })
+ command: "find",
+ planSummary: "COLLSCAN",
+ currentOpFilter: {"command.comment": "currentop_query"}
+ },
+ {
+ test: function(db) {
+ assert.eq(db.currentop_query.findAndModify({
+ query: {_id: 1, a: 1, $comment: "currentop_query"},
+ update: {$inc: {b: 1}},
+ collation: {locale: "fr"}
+ }),
+ {"_id": 1, "a": 1});
},
- {
- test: function(db) {
- assert.writeOK(
- db.currentop_query.update({a: 1, $comment: "currentop_query"},
- {$inc: {b: 1}},
- {collation: {locale: "fr"}, multi: true}));
- },
- operation: "update",
- planSummary: "COLLSCAN",
- currentOpFilter:
- (isLocalMongosCurOp
- ? {"command.update": coll.getName(), "command.ordered": true}
- : {
- "command.q.$comment": "currentop_query",
- "command.collation": {locale: "fr"}
- })
+ command: "findandmodify",
+ planSummary: "IXSCAN { _id: 1 }",
+ currentOpFilter: {
+ "command.query.$comment": "currentop_query",
+ "command.collation": {locale: "fr"}
}
- ];
-
- testList.forEach(confirmCurrentOpContents);
-
- //
- // Confirm currentOp contains collation for find command.
- //
- if (readMode === "commands") {
- confirmCurrentOpContents({
- test: function(db) {
- assert.eq(db.currentop_query.find({a: 1})
- .comment("currentop_query")
- .collation({locale: "fr"})
- .itcount(),
- 1);
- },
- command: "find",
- planSummary: "COLLSCAN",
- currentOpFilter: {
- "command.comment": "currentop_query",
- "command.collation": {locale: "fr"}
- }
- });
- }
-
- //
- // Confirm currentOp content for the $geoNear aggregation stage.
- //
- dropAndRecreateTestCollection();
- for (let i = 0; i < 10; ++i) {
- assert.commandWorked(
- coll.insert({a: i, loc: {type: "Point", coordinates: [i, i]}}));
- }
- assert.commandWorked(coll.createIndex({loc: "2dsphere"}));
- confirmCurrentOpContents({
+ },
+ {
test: function(db) {
- assert.commandWorked(db.runCommand({
- aggregate: "currentop_query",
- cursor: {},
- pipeline: [{
- $geoNear: {
- near: {type: "Point", coordinates: [1, 1]},
- distanceField: "dist",
- spherical: true,
- query: {$comment: "currentop_query"},
- }
- }],
- collation: {locale: "fr"},
- comment: "currentop_query",
+ assert.commandWorked(db.currentop_query.mapReduce(() => {}, (a, b) => {}, {
+ query: {$comment: "currentop_query"},
+ out: {inline: 1},
}));
},
- planSummary: "GEO_NEAR_2DSPHERE { loc: \"2dsphere\" }",
- currentOpFilter: commandOrOriginatingCommand({
- "aggregate": {$exists: true},
- "pipeline.0.$geoNear.query.$comment": "currentop_query",
- "collation": {locale: "fr"},
- "comment": "currentop_query",
- },
- isRemoteShardCurOp)
- });
-
- //
- // Confirm currentOp content for getMore. This case tests command and legacy getMore
- // with originating find and aggregate commands.
- //
- dropAndRecreateTestCollection();
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
-
- const originatingCommands = {
- find:
- {find: "currentop_query", filter: {}, comment: "currentop_query", batchSize: 0},
- aggregate: {
- aggregate: "currentop_query",
- pipeline: [{$match: {}}],
- comment: "currentop_query",
- cursor: {batchSize: 0}
+ command: "mapreduce",
+ planSummary: "COLLSCAN",
+ currentOpFilter: {
+ "command.query.$comment": "currentop_query",
+ "ns": /^currentop_query.*currentop_query/
}
- };
-
- for (let cmdName in originatingCommands) {
- const cmdObj = originatingCommands[cmdName];
- const cmdRes = testDB.runCommand(cmdObj);
- assert.commandWorked(cmdRes);
-
- TestData.commandResult = cmdRes;
-
- // If this is a non-localOps test running via mongoS, then the cursorID we obtained
- // above is the ID of the mongoS cursor, and will not match the IDs of any of the
- // individual shard cursors in the currentOp output. We therefore don't perform an
- // exact match on 'command.getMore', but only verify that the cursor ID is non-zero.
- const filter = {
- "command.getMore":
- (isRemoteShardCurOp ? {$gt: 0} : TestData.commandResult.cursor.id),
- [`cursor.originatingCommand.${cmdName}`]:
- {$exists: true}, "cursor.originatingCommand.comment": "currentop_query"
- };
-
- confirmCurrentOpContents({
- test: function(db) {
- const cursor = new DBCommandCursor(db, TestData.commandResult, 5);
- assert.eq(cursor.itcount(), 10);
- },
- command: "getMore",
- planSummary: "COLLSCAN",
- currentOpFilter: filter
- });
-
- delete TestData.commandResult;
- }
-
- //
- // Confirm that currentOp displays upconverted getMore and originatingCommand in the
- // case of a legacy query.
- //
- if (readMode === "legacy") {
- let filter = {
- "command.getMore": {$gt: 0},
- "command.collection": "currentop_query",
- "command.batchSize": 2,
- "cursor.originatingCommand.find": "currentop_query",
- "cursor.originatingCommand.ntoreturn": 2,
- "cursor.originatingCommand.comment": "currentop_query"
- };
-
- confirmCurrentOpContents({
- test: function(db) {
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-
- // Temporarily disable hanging yields so that we can iterate the first
- // batch.
- FixtureHelpers.runCommandOnEachPrimary({
- db: db.getSiblingDB("admin"),
- cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
- });
-
- let cursor =
- db.currentop_query.find({}).comment("currentop_query").batchSize(2);
-
- // Exhaust the current batch so that the next request will force a getMore.
- while (cursor.objsLeftInBatch() > 0) {
- cursor.next();
- }
-
- // Set yields to hang so that we can check currentOp output.
- FixtureHelpers.runCommandOnEachPrimary({
- db: db.getSiblingDB("admin"),
- cmdObj: {
- configureFailPoint: "setYieldAllLocksHang",
- mode: "alwaysOn",
- data: {namespace: db.currentop_query.getFullName()}
- }
- });
-
- assert.eq(cursor.itcount(), 8);
- },
- operation: "getmore",
- planSummary: "COLLSCAN",
- currentOpFilter: filter
- });
+ },
+ {
+ test: function(db) {
+ assert.writeOK(db.currentop_query.remove({a: 2, $comment: "currentop_query"},
+ {collation: {locale: "fr"}}));
+ },
+ operation: "remove",
+ planSummary: "COLLSCAN",
+ currentOpFilter: (isLocalMongosCurOp
+ ? {"command.delete": coll.getName(), "command.ordered": true}
+ : {
+ "command.q.$comment": "currentop_query",
+ "command.collation": {locale: "fr"}
+ })
+ },
+ {
+ test: function(db) {
+ assert.writeOK(
+ db.currentop_query.update({a: 1, $comment: "currentop_query"},
+ {$inc: {b: 1}},
+ {collation: {locale: "fr"}, multi: true}));
+ },
+ operation: "update",
+ planSummary: "COLLSCAN",
+ currentOpFilter: (isLocalMongosCurOp
+ ? {"command.update": coll.getName(), "command.ordered": true}
+ : {
+ "command.q.$comment": "currentop_query",
+ "command.collation": {locale: "fr"}
+ })
}
+ ];
- //
- // Confirm that a legacy query whose filter contains a field named 'query' appears as
- // expected in currentOp. This test ensures that upconverting a legacy query correctly
- // identifies this as a user field rather than a wrapped filter spec.
- //
- if (readMode === "legacy") {
- confirmCurrentOpContents({
- test: function(db) {
- assert.eq(
- db.currentop_query.find({query: "foo", $comment: "currentop_query"})
- .itcount(),
- 0);
- },
- command: "find",
- planSummary: "COLLSCAN",
- currentOpFilter: {
- "command.filter.$comment": "currentop_query",
- "command.filter.query": "foo"
- }
- });
- }
- }
-
- /**
- * Runs a set of tests to verify that currentOp will serialize objects exceeding ~1000 bytes
- * to string when the 'truncateOps' parameter is set.
- */
- function runTruncationTests() {
- dropAndRecreateTestCollection();
- assert.writeOK(coll.insert({a: 1}));
-
- // When the currentOp command serializes the query object as a string, individual string
- // values inside it are truncated at 150 characters. To test "total length" truncation
- // we need to pass multiple values, each smaller than 150 bytes.
- TestData.queryFilter = {
- "1": "1".repeat(149),
- "2": "2".repeat(149),
- "3": "3".repeat(149),
- "4": "4".repeat(149),
- "5": "5".repeat(149),
- "6": "6".repeat(149),
- "7": "7".repeat(149),
- };
-
- var truncatedQueryString = "^\\{ find: \"currentop_query\", filter: \\{ " +
- "1: \"1{149}\", 2: \"2{149}\", 3: \"3{149}\", 4: \"4{149}\", 5: \"5{149}\", " +
- "6: \"6{149}\", 7: \"7+\\.\\.\\.";
-
- let currentOpFilter;
-
- currentOpFilter = {
- "command.$truncated": {$regex: truncatedQueryString},
- "command.comment": "currentop_query"
- };
+ testList.forEach(confirmCurrentOpContents);
+ //
+ // Confirm currentOp contains collation for find command.
+ //
+ if (readMode === "commands") {
confirmCurrentOpContents({
test: function(db) {
- assert.eq(db.currentop_query.find(TestData.queryFilter)
+ assert.eq(db.currentop_query.find({a: 1})
.comment("currentop_query")
+ .collation({locale: "fr"})
.itcount(),
- 0);
+ 1);
},
+ command: "find",
planSummary: "COLLSCAN",
- currentOpFilter: currentOpFilter
+ currentOpFilter:
+ {"command.comment": "currentop_query", "command.collation": {locale: "fr"}}
});
+ }
+
+ //
+ // Confirm currentOp content for the $geoNear aggregation stage.
+ //
+ dropAndRecreateTestCollection();
+ for (let i = 0; i < 10; ++i) {
+ assert.commandWorked(coll.insert({a: i, loc: {type: "Point", coordinates: [i, i]}}));
+ }
+ assert.commandWorked(coll.createIndex({loc: "2dsphere"}));
+ confirmCurrentOpContents({
+ test: function(db) {
+ assert.commandWorked(db.runCommand({
+ aggregate: "currentop_query",
+ cursor: {},
+ pipeline: [{
+ $geoNear: {
+ near: {type: "Point", coordinates: [1, 1]},
+ distanceField: "dist",
+ spherical: true,
+ query: {$comment: "currentop_query"},
+ }
+ }],
+ collation: {locale: "fr"},
+ comment: "currentop_query",
+ }));
+ },
+ planSummary: "GEO_NEAR_2DSPHERE { loc: \"2dsphere\" }",
+ currentOpFilter: commandOrOriginatingCommand({
+ "aggregate": {$exists: true},
+ "pipeline.0.$geoNear.query.$comment": "currentop_query",
+ "collation": {locale: "fr"},
+ "comment": "currentop_query",
+ },
+ isRemoteShardCurOp)
+ });
+
+ //
+ // Confirm currentOp content for getMore. This case tests command and legacy getMore
+ // with originating find and aggregate commands.
+ //
+ dropAndRecreateTestCollection();
+ for (let i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+ }
- // Verify that an originatingCommand truncated by currentOp appears as { $truncated:
- // <string>, comment: <string> }.
- const cmdRes = testDB.runCommand({
- find: "currentop_query",
- filter: TestData.queryFilter,
+ const originatingCommands = {
+ find: {find: "currentop_query", filter: {}, comment: "currentop_query", batchSize: 0},
+ aggregate: {
+ aggregate: "currentop_query",
+ pipeline: [{$match: {}}],
comment: "currentop_query",
- batchSize: 0
- });
+ cursor: {batchSize: 0}
+ }
+ };
+
+ for (let cmdName in originatingCommands) {
+ const cmdObj = originatingCommands[cmdName];
+ const cmdRes = testDB.runCommand(cmdObj);
assert.commandWorked(cmdRes);
TestData.commandResult = cmdRes;
- currentOpFilter = {
+ // If this is a non-localOps test running via mongoS, then the cursorID we obtained
+ // above is the ID of the mongoS cursor, and will not match the IDs of any of the
+ // individual shard cursors in the currentOp output. We therefore don't perform an
+ // exact match on 'command.getMore', but only verify that the cursor ID is non-zero.
+ const filter = {
"command.getMore":
(isRemoteShardCurOp ? {$gt: 0} : TestData.commandResult.cursor.id),
- "cursor.originatingCommand.$truncated": {$regex: truncatedQueryString},
+ [`cursor.originatingCommand.${cmdName}`]: {$exists: true},
"cursor.originatingCommand.comment": "currentop_query"
};
confirmCurrentOpContents({
test: function(db) {
- var cursor = new DBCommandCursor(db, TestData.commandResult, 5);
- assert.eq(cursor.itcount(), 0);
+ const cursor = new DBCommandCursor(db, TestData.commandResult, 5);
+ assert.eq(cursor.itcount(), 10);
},
+ command: "getMore",
planSummary: "COLLSCAN",
- currentOpFilter: currentOpFilter
+ currentOpFilter: filter
});
delete TestData.commandResult;
+ }
+
+ //
+ // Confirm that currentOp displays upconverted getMore and originatingCommand in the
+ // case of a legacy query.
+ //
+ if (readMode === "legacy") {
+ let filter = {
+ "command.getMore": {$gt: 0},
+ "command.collection": "currentop_query",
+ "command.batchSize": 2,
+ "cursor.originatingCommand.find": "currentop_query",
+ "cursor.originatingCommand.ntoreturn": 2,
+ "cursor.originatingCommand.comment": "currentop_query"
+ };
+
+ confirmCurrentOpContents({
+ test: function(db) {
+ load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+
+ // Temporarily disable hanging yields so that we can iterate the first
+ // batch.
+ FixtureHelpers.runCommandOnEachPrimary({
+ db: db.getSiblingDB("admin"),
+ cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
+ });
+
+ let cursor =
+ db.currentop_query.find({}).comment("currentop_query").batchSize(2);
- // Verify that an aggregation truncated by currentOp appears as { $truncated: <string>,
- // comment: <string> } when a comment parameter is present.
- truncatedQueryString =
- "^\\{ aggregate: \"currentop_query\", pipeline: \\[ \\{ \\$match: \\{ " +
- "1: \"1{149}\", 2: \"2{149}\", 3: \"3{149}\", 4: \"4{149}\", 5: \"5{149}\", " +
- "6: \"6{149}\", 7: \"7+\\.\\.\\.";
+ // Exhaust the current batch so that the next request will force a getMore.
+ while (cursor.objsLeftInBatch() > 0) {
+ cursor.next();
+ }
- currentOpFilter = commandOrOriginatingCommand(
- {"$truncated": {$regex: truncatedQueryString}, "comment": "currentop_query"},
- isRemoteShardCurOp);
+ // Set yields to hang so that we can check currentOp output.
+ FixtureHelpers.runCommandOnEachPrimary({
+ db: db.getSiblingDB("admin"),
+ cmdObj: {
+ configureFailPoint: "setYieldAllLocksHang",
+ mode: "alwaysOn",
+ data: {namespace: db.currentop_query.getFullName()}
+ }
+ });
+
+ assert.eq(cursor.itcount(), 8);
+ },
+ operation: "getmore",
+ planSummary: "COLLSCAN",
+ currentOpFilter: filter
+ });
+ }
+ //
+ // Confirm that a legacy query whose filter contains a field named 'query' appears as
+ // expected in currentOp. This test ensures that upconverting a legacy query correctly
+ // identifies this as a user field rather than a wrapped filter spec.
+ //
+ if (readMode === "legacy") {
confirmCurrentOpContents({
test: function(db) {
- assert.eq(db.currentop_query
- .aggregate([{$match: TestData.queryFilter}],
- {comment: "currentop_query"})
+ assert.eq(db.currentop_query.find({query: "foo", $comment: "currentop_query"})
.itcount(),
0);
},
+ command: "find",
planSummary: "COLLSCAN",
- currentOpFilter: currentOpFilter
+ currentOpFilter:
+ {"command.filter.$comment": "currentop_query", "command.filter.query": "foo"}
});
-
- delete TestData.queryFilter;
}
}
- function currentOpCommand(inputDB, filter, truncatedOps, localOps) {
- return inputDB.currentOp(Object.assign(filter, {$truncateOps: truncatedOps}));
- }
+ /**
+ * Runs a set of tests to verify that currentOp will serialize objects exceeding ~1000 bytes
+ * to string when the 'truncateOps' parameter is set.
+ */
+ function runTruncationTests() {
+ dropAndRecreateTestCollection();
+ assert.writeOK(coll.insert({a: 1}));
+
+ // When the currentOp command serializes the query object as a string, individual string
+ // values inside it are truncated at 150 characters. To test "total length" truncation
+ // we need to pass multiple values, each smaller than 150 bytes.
+ TestData.queryFilter = {
+ "1": "1".repeat(149),
+ "2": "2".repeat(149),
+ "3": "3".repeat(149),
+ "4": "4".repeat(149),
+ "5": "5".repeat(149),
+ "6": "6".repeat(149),
+ "7": "7".repeat(149),
+ };
+
+ var truncatedQueryString = "^\\{ find: \"currentop_query\", filter: \\{ " +
+ "1: \"1{149}\", 2: \"2{149}\", 3: \"3{149}\", 4: \"4{149}\", 5: \"5{149}\", " +
+ "6: \"6{149}\", 7: \"7+\\.\\.\\.";
- function currentOpAgg(inputDB, filter, truncatedOps, localOps) {
- return {
- inprog: inputDB.getSiblingDB("admin")
- .aggregate([
- {
- $currentOp: {
- localOps: (localOps || false),
- truncateOps: (truncatedOps || false)
- }
- },
- {$match: filter}
- ])
- .toArray(),
- ok: 1
+ let currentOpFilter;
+
+ currentOpFilter = {
+ "command.$truncated": {$regex: truncatedQueryString},
+ "command.comment": "currentop_query"
};
- }
- for (let connType of[rsConn, mongosConn]) {
- for (let readMode of["commands", "legacy"]) {
- for (let truncatedOps of[false, true]) {
- for (let localOps of[false, true]) {
- // Run all tests using the $currentOp aggregation stage.
- runTests({
- conn: connType,
- readMode: readMode,
- currentOp: currentOpAgg,
- localOps: localOps,
- truncatedOps: truncatedOps
- });
- }
- // Run tests using the currentOp command. The 'localOps' parameter is not supported.
+ confirmCurrentOpContents({
+ test: function(db) {
+ assert.eq(db.currentop_query.find(TestData.queryFilter)
+ .comment("currentop_query")
+ .itcount(),
+ 0);
+ },
+ planSummary: "COLLSCAN",
+ currentOpFilter: currentOpFilter
+ });
+
+ // Verify that an originatingCommand truncated by currentOp appears as { $truncated:
+ // <string>, comment: <string> }.
+ const cmdRes = testDB.runCommand({
+ find: "currentop_query",
+ filter: TestData.queryFilter,
+ comment: "currentop_query",
+ batchSize: 0
+ });
+ assert.commandWorked(cmdRes);
+
+ TestData.commandResult = cmdRes;
+
+ currentOpFilter = {
+ "command.getMore": (isRemoteShardCurOp ? {$gt: 0} : TestData.commandResult.cursor.id),
+ "cursor.originatingCommand.$truncated": {$regex: truncatedQueryString},
+ "cursor.originatingCommand.comment": "currentop_query"
+ };
+
+ confirmCurrentOpContents({
+ test: function(db) {
+ var cursor = new DBCommandCursor(db, TestData.commandResult, 5);
+ assert.eq(cursor.itcount(), 0);
+ },
+ planSummary: "COLLSCAN",
+ currentOpFilter: currentOpFilter
+ });
+
+ delete TestData.commandResult;
+
+ // Verify that an aggregation truncated by currentOp appears as { $truncated: <string>,
+ // comment: <string> } when a comment parameter is present.
+ truncatedQueryString =
+ "^\\{ aggregate: \"currentop_query\", pipeline: \\[ \\{ \\$match: \\{ " +
+ "1: \"1{149}\", 2: \"2{149}\", 3: \"3{149}\", 4: \"4{149}\", 5: \"5{149}\", " +
+ "6: \"6{149}\", 7: \"7+\\.\\.\\.";
+
+ currentOpFilter = commandOrOriginatingCommand(
+ {"$truncated": {$regex: truncatedQueryString}, "comment": "currentop_query"},
+ isRemoteShardCurOp);
+
+ confirmCurrentOpContents({
+ test: function(db) {
+ assert.eq(
+ db.currentop_query
+ .aggregate([{$match: TestData.queryFilter}], {comment: "currentop_query"})
+ .itcount(),
+ 0);
+ },
+ planSummary: "COLLSCAN",
+ currentOpFilter: currentOpFilter
+ });
+
+ delete TestData.queryFilter;
+ }
+}
+
+function currentOpCommand(inputDB, filter, truncatedOps, localOps) {
+ return inputDB.currentOp(Object.assign(filter, {$truncateOps: truncatedOps}));
+}
+
+function currentOpAgg(inputDB, filter, truncatedOps, localOps) {
+ return {
+ inprog:
+ inputDB.getSiblingDB("admin")
+ .aggregate([
+ {
+ $currentOp:
+ {localOps: (localOps || false), truncateOps: (truncatedOps || false)}
+ },
+ {$match: filter}
+ ])
+ .toArray(),
+ ok: 1
+ };
+}
+
+for (let connType of [rsConn, mongosConn]) {
+ for (let readMode of ["commands", "legacy"]) {
+ for (let truncatedOps of [false, true]) {
+ for (let localOps of [false, true]) {
+ // Run all tests using the $currentOp aggregation stage.
runTests({
conn: connType,
readMode: readMode,
- currentOp: currentOpCommand,
- localOps: false,
+ currentOp: currentOpAgg,
+ localOps: localOps,
truncatedOps: truncatedOps
});
}
+ // Run tests using the currentOp command. The 'localOps' parameter is not supported.
+ runTests({
+ conn: connType,
+ readMode: readMode,
+ currentOp: currentOpCommand,
+ localOps: false,
+ truncatedOps: truncatedOps
+ });
}
}
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/currentop_transaction_metrics.js b/jstests/noPassthrough/currentop_transaction_metrics.js
index d676167c2c2..b65c39963f7 100644
--- a/jstests/noPassthrough/currentop_transaction_metrics.js
+++ b/jstests/noPassthrough/currentop_transaction_metrics.js
@@ -5,68 +5,68 @@
*/
(function() {
- 'use strict';
- load("jstests/core/txns/libs/prepare_helpers.js");
+'use strict';
+load("jstests/core/txns/libs/prepare_helpers.js");
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const collName = 'currentop_transaction_metrics';
- const testDB = rst.getPrimary().getDB('test');
- const adminDB = rst.getPrimary().getDB('admin');
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB[collName].insert({x: 1}, {writeConcern: {w: "majority"}}));
+const collName = 'currentop_transaction_metrics';
+const testDB = rst.getPrimary().getDB('test');
+const adminDB = rst.getPrimary().getDB('admin');
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB[collName].insert({x: 1}, {writeConcern: {w: "majority"}}));
- const session = adminDB.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase('test');
+const session = adminDB.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase('test');
- session.startTransaction();
- // Run a few operations so that the transaction goes through several active/inactive periods.
- assert.commandWorked(sessionDB[collName].update({}, {a: 1}));
- assert.commandWorked(sessionDB[collName].insert({_id: "insert-1"}));
- assert.commandWorked(sessionDB[collName].insert({_id: "insert-2"}));
- assert.commandWorked(sessionDB[collName].insert({_id: "insert-3"}));
+session.startTransaction();
+// Run a few operations so that the transaction goes through several active/inactive periods.
+assert.commandWorked(sessionDB[collName].update({}, {a: 1}));
+assert.commandWorked(sessionDB[collName].insert({_id: "insert-1"}));
+assert.commandWorked(sessionDB[collName].insert({_id: "insert-2"}));
+assert.commandWorked(sessionDB[collName].insert({_id: "insert-3"}));
- const transactionFilter = {
- active: false,
- 'lsid': {$exists: true},
- 'transaction.parameters.txnNumber': {$eq: 0},
- 'transaction.parameters.autocommit': {$eq: false},
- 'transaction.timePreparedMicros': {$exists: false}
- };
+const transactionFilter = {
+ active: false,
+ 'lsid': {$exists: true},
+ 'transaction.parameters.txnNumber': {$eq: 0},
+ 'transaction.parameters.autocommit': {$eq: false},
+ 'transaction.timePreparedMicros': {$exists: false}
+};
- let currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).toArray();
- assert.eq(currentOp.length, 1);
+let currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).toArray();
+assert.eq(currentOp.length, 1);
- // Check that the currentOp's transaction subdocument's fields align with our expectations.
- let transactionDocument = currentOp[0].transaction;
- assert.gte(transactionDocument.timeOpenMicros,
- transactionDocument.timeActiveMicros + transactionDocument.timeInactiveMicros);
+// Check that the currentOp's transaction subdocument's fields align with our expectations.
+let transactionDocument = currentOp[0].transaction;
+assert.gte(transactionDocument.timeOpenMicros,
+ transactionDocument.timeActiveMicros + transactionDocument.timeInactiveMicros);
- // Check that preparing the transaction enables the 'timePreparedMicros' field in currentOp.
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+// Check that preparing the transaction enables the 'timePreparedMicros' field in currentOp.
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- const prepareTransactionFilter = {
- active: false,
- 'lsid': {$exists: true},
- 'transaction.parameters.txnNumber': {$eq: 0},
- 'transaction.parameters.autocommit': {$eq: false},
- 'transaction.timePreparedMicros': {$exists: true}
- };
+const prepareTransactionFilter = {
+ active: false,
+ 'lsid': {$exists: true},
+ 'transaction.parameters.txnNumber': {$eq: 0},
+ 'transaction.parameters.autocommit': {$eq: false},
+ 'transaction.timePreparedMicros': {$exists: true}
+};
- currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: prepareTransactionFilter}]).toArray();
- assert.eq(currentOp.length, 1);
+currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: prepareTransactionFilter}]).toArray();
+assert.eq(currentOp.length, 1);
- // Check that the currentOp's transaction subdocument's fields align with our expectations.
- const prepareTransactionDocument = currentOp[0].transaction;
- assert.gte(prepareTransactionDocument.timeOpenMicros,
- prepareTransactionDocument.timeActiveMicros +
- prepareTransactionDocument.timeInactiveMicros);
- assert.gte(prepareTransactionDocument.timePreparedMicros, 0);
+// Check that the currentOp's transaction subdocument's fields align with our expectations.
+const prepareTransactionDocument = currentOp[0].transaction;
+assert.gte(
+ prepareTransactionDocument.timeOpenMicros,
+ prepareTransactionDocument.timeActiveMicros + prepareTransactionDocument.timeInactiveMicros);
+assert.gte(prepareTransactionDocument.timePreparedMicros, 0);
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
- session.endSession();
+PrepareHelpers.commitTransaction(session, prepareTimestamp);
+session.endSession();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/cycle_detection_test.js b/jstests/noPassthrough/cycle_detection_test.js
index f708decae79..c4fa17b59b0 100644
--- a/jstests/noPassthrough/cycle_detection_test.js
+++ b/jstests/noPassthrough/cycle_detection_test.js
@@ -2,89 +2,89 @@
* Tests for the Graph#findCycle() method.
*/
(function() {
- 'use strict';
-
- load('jstests/libs/cycle_detection.js'); // for Graph
-
- (function testLinearChainHasNoCycle() {
- const graph = new Graph();
- graph.addEdge('A', 'B');
- graph.addEdge('B', 'C');
- graph.addEdge('C', 'D');
-
- assert.eq([], graph.findCycle());
- })();
-
- (function testGraphWithoutCycleButCommonAncestor() {
- const graph = new Graph();
- graph.addEdge('A', 'B');
- graph.addEdge('A', 'C');
- graph.addEdge('B', 'D');
- graph.addEdge('C', 'D');
-
- assert.eq([], graph.findCycle());
- })();
-
- (function testEmptyGraphHasNoCycle() {
- const graph = new Graph();
- assert.eq([], graph.findCycle());
- })();
-
- (function testGraphWithAllNodesInCycle() {
- const graph = new Graph();
- graph.addEdge(1, 2);
- graph.addEdge(2, 3);
- graph.addEdge(3, 4);
- graph.addEdge(4, 5);
- graph.addEdge(5, 1);
-
- assert.eq([1, 2, 3, 4, 5, 1], graph.findCycle());
- })();
-
- (function testGraphWithSomeNodesNotInCycle() {
- const graph = new Graph();
- graph.addEdge(1, 2);
- graph.addEdge(2, 3);
- graph.addEdge(3, 4);
- graph.addEdge(4, 5);
- graph.addEdge(5, 3);
-
- assert.eq([3, 4, 5, 3], graph.findCycle());
- })();
-
- (function testGraphWithSelfLoopConsideredCycle() {
- const graph = new Graph();
- graph.addEdge(0, 0);
- assert.eq([0, 0], graph.findCycle());
- })();
-
- (function testGraphUsesNonReferentialEquality() {
- const w = {a: new NumberInt(1)};
- const x = {a: new NumberInt(1)};
- const y = {a: new NumberLong(1)};
- const z = {a: 1};
-
- let graph = new Graph();
- graph.addEdge(w, x);
- assert.eq([w, x], graph.findCycle());
-
- graph = new Graph();
- graph.addEdge(w, y);
- assert.eq([], graph.findCycle());
-
- graph = new Graph();
- graph.addEdge(w, z);
- assert.eq([w, z], graph.findCycle());
- })();
-
- (function testGraphMinimizesCycleUsingNonReferentialEquality() {
- const graph = new Graph();
- graph.addEdge({a: 1}, {a: 2});
- graph.addEdge({a: 2}, {a: 3});
- graph.addEdge({a: 3}, {a: 4});
- graph.addEdge({a: 4}, {a: 5});
- graph.addEdge({a: 5}, {a: 3});
-
- assert.eq([{a: 3}, {a: 4}, {a: 5}, {a: 3}], graph.findCycle());
- })();
+'use strict';
+
+load('jstests/libs/cycle_detection.js'); // for Graph
+
+(function testLinearChainHasNoCycle() {
+ const graph = new Graph();
+ graph.addEdge('A', 'B');
+ graph.addEdge('B', 'C');
+ graph.addEdge('C', 'D');
+
+ assert.eq([], graph.findCycle());
+})();
+
+(function testGraphWithoutCycleButCommonAncestor() {
+ const graph = new Graph();
+ graph.addEdge('A', 'B');
+ graph.addEdge('A', 'C');
+ graph.addEdge('B', 'D');
+ graph.addEdge('C', 'D');
+
+ assert.eq([], graph.findCycle());
+})();
+
+(function testEmptyGraphHasNoCycle() {
+ const graph = new Graph();
+ assert.eq([], graph.findCycle());
+})();
+
+(function testGraphWithAllNodesInCycle() {
+ const graph = new Graph();
+ graph.addEdge(1, 2);
+ graph.addEdge(2, 3);
+ graph.addEdge(3, 4);
+ graph.addEdge(4, 5);
+ graph.addEdge(5, 1);
+
+ assert.eq([1, 2, 3, 4, 5, 1], graph.findCycle());
+})();
+
+(function testGraphWithSomeNodesNotInCycle() {
+ const graph = new Graph();
+ graph.addEdge(1, 2);
+ graph.addEdge(2, 3);
+ graph.addEdge(3, 4);
+ graph.addEdge(4, 5);
+ graph.addEdge(5, 3);
+
+ assert.eq([3, 4, 5, 3], graph.findCycle());
+})();
+
+(function testGraphWithSelfLoopConsideredCycle() {
+ const graph = new Graph();
+ graph.addEdge(0, 0);
+ assert.eq([0, 0], graph.findCycle());
+})();
+
+(function testGraphUsesNonReferentialEquality() {
+ const w = {a: new NumberInt(1)};
+ const x = {a: new NumberInt(1)};
+ const y = {a: new NumberLong(1)};
+ const z = {a: 1};
+
+ let graph = new Graph();
+ graph.addEdge(w, x);
+ assert.eq([w, x], graph.findCycle());
+
+ graph = new Graph();
+ graph.addEdge(w, y);
+ assert.eq([], graph.findCycle());
+
+ graph = new Graph();
+ graph.addEdge(w, z);
+ assert.eq([w, z], graph.findCycle());
+})();
+
+(function testGraphMinimizesCycleUsingNonReferentialEquality() {
+ const graph = new Graph();
+ graph.addEdge({a: 1}, {a: 2});
+ graph.addEdge({a: 2}, {a: 3});
+ graph.addEdge({a: 3}, {a: 4});
+ graph.addEdge({a: 4}, {a: 5});
+ graph.addEdge({a: 5}, {a: 3});
+
+ assert.eq([{a: 3}, {a: 4}, {a: 5}, {a: 3}], graph.findCycle());
+})();
})();
diff --git a/jstests/noPassthrough/data_consistency_checks.js b/jstests/noPassthrough/data_consistency_checks.js
index dcddefaf882..94c44f3e49b 100644
--- a/jstests/noPassthrough/data_consistency_checks.js
+++ b/jstests/noPassthrough/data_consistency_checks.js
@@ -9,193 +9,190 @@
var db;
(function() {
- "use strict";
-
- // We skip doing the data consistency checks while terminating the cluster because they conflict
- // with the counts of the number of times the "dbhash" and "validate" commands are run.
- TestData.skipCollectionAndIndexValidation = true;
- TestData.skipCheckDBHashes = true;
-
- function makePatternForDBHash(dbName) {
- return new RegExp("COMMAND.*command " + dbName +
- "\\.\\$cmd appName: \"MongoDB Shell\" command: db[Hh]ash",
- "g");
+"use strict";
+
+// We skip doing the data consistency checks while terminating the cluster because they conflict
+// with the counts of the number of times the "dbhash" and "validate" commands are run.
+TestData.skipCollectionAndIndexValidation = true;
+TestData.skipCheckDBHashes = true;
+
+function makePatternForDBHash(dbName) {
+ return new RegExp(
+ "COMMAND.*command " + dbName + "\\.\\$cmd appName: \"MongoDB Shell\" command: db[Hh]ash",
+ "g");
+}
+
+function makePatternForValidate(dbName, collName) {
+ return new RegExp("COMMAND.*command " + dbName +
+ "\\.\\$cmd appName: \"MongoDB Shell\" command: validate { validate: \"" +
+ collName + "\"",
+ "g");
+}
+
+function countMatches(pattern, output) {
+ assert(pattern.global, "the 'g' flag must be used to find all matches");
+
+ let numMatches = 0;
+ while (pattern.exec(output) !== null) {
+ ++numMatches;
}
-
- function makePatternForValidate(dbName, collName) {
- return new RegExp(
- "COMMAND.*command " + dbName +
- "\\.\\$cmd appName: \"MongoDB Shell\" command: validate { validate: \"" + collName +
- "\"",
- "g");
+ return numMatches;
+}
+
+function runDataConsistencyChecks(testCase) {
+ db = testCase.conn.getDB("test");
+ try {
+ clearRawMongoProgramOutput();
+
+ load("jstests/hooks/run_check_repl_dbhash.js");
+ load("jstests/hooks/run_validate_collections.js");
+
+ // We terminate the processes to ensure that the next call to rawMongoProgramOutput()
+ // will return all of their output.
+ testCase.teardown();
+ return rawMongoProgramOutput();
+ } finally {
+ db = undefined;
}
+}
+
+(function testReplicaSetWithVotingSecondaries() {
+ const numNodes = 2;
+ const rst = new ReplSetTest({
+ nodes: numNodes,
+ nodeOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
+ }
+ });
+ rst.startSet();
+ rst.initiateWithNodeZeroAsPrimary();
+
+ // Insert a document so the "dbhash" and "validate" commands have some actual work to do.
+ assert.commandWorked(rst.nodes[0].getDB("test").mycoll.insert({}));
+ const output = runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
+
+ let pattern = makePatternForDBHash("test");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
+
+ pattern = makePatternForValidate("test", "mycoll");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
+})();
- function countMatches(pattern, output) {
- assert(pattern.global, "the 'g' flag must be used to find all matches");
-
- let numMatches = 0;
- while (pattern.exec(output) !== null) {
- ++numMatches;
+(function testReplicaSetWithNonVotingSecondaries() {
+ const numNodes = 2;
+ const rst = new ReplSetTest({
+ nodes: numNodes,
+ nodeOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
}
- return numMatches;
+ });
+ rst.startSet();
+
+ const replSetConfig = rst.getReplSetConfig();
+ for (let i = 1; i < numNodes; ++i) {
+ replSetConfig.members[i].priority = 0;
+ replSetConfig.members[i].votes = 0;
}
+ rst.initiate(replSetConfig);
- function runDataConsistencyChecks(testCase) {
- db = testCase.conn.getDB("test");
- try {
- clearRawMongoProgramOutput();
+ // Insert a document so the "dbhash" and "validate" commands have some actual work to do.
+ assert.commandWorked(rst.nodes[0].getDB("test").mycoll.insert({}));
+ const output = runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
- load("jstests/hooks/run_check_repl_dbhash.js");
- load("jstests/hooks/run_validate_collections.js");
+ let pattern = makePatternForDBHash("test");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
- // We terminate the processes to ensure that the next call to rawMongoProgramOutput()
- // will return all of their output.
- testCase.teardown();
- return rawMongoProgramOutput();
- } finally {
- db = undefined;
- }
- }
+ pattern = makePatternForValidate("test", "mycoll");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
+})();
- (function testReplicaSetWithVotingSecondaries() {
- const numNodes = 2;
- const rst = new ReplSetTest({
- nodes: numNodes,
- nodeOptions: {
- setParameter: {logComponentVerbosity: tojson({command: 1})},
- }
- });
- rst.startSet();
- rst.initiateWithNodeZeroAsPrimary();
-
- // Insert a document so the "dbhash" and "validate" commands have some actual work to do.
- assert.commandWorked(rst.nodes[0].getDB("test").mycoll.insert({}));
- const output =
- runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
-
- let pattern = makePatternForDBHash("test");
- assert.eq(numNodes,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from each node in the log output");
-
- pattern = makePatternForValidate("test", "mycoll");
- assert.eq(numNodes,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from each node in the log output");
- })();
-
- (function testReplicaSetWithNonVotingSecondaries() {
- const numNodes = 2;
- const rst = new ReplSetTest({
- nodes: numNodes,
- nodeOptions: {
- setParameter: {logComponentVerbosity: tojson({command: 1})},
- }
- });
- rst.startSet();
-
- const replSetConfig = rst.getReplSetConfig();
- for (let i = 1; i < numNodes; ++i) {
- replSetConfig.members[i].priority = 0;
- replSetConfig.members[i].votes = 0;
+(function testShardedClusterWithOneNodeCSRS() {
+ const st = new ShardingTest({
+ mongos: 1,
+ config: 1,
+ configOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
+ },
+ shards: 1
+ });
+
+ // We shard a collection in order to guarantee that at least one collection on the "config"
+ // database exists for when we go to run the data consistency checks against the CSRS.
+ st.shardColl(st.s.getDB("test").mycoll, {_id: 1}, false);
+
+ const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
+
+ let pattern = makePatternForDBHash("config");
+ assert.eq(0,
+ countMatches(pattern, output),
+ "expected not to find " + tojson(pattern) + " in the log output for 1-node CSRS");
+
+ // The choice of using the "config.collections" collection here is mostly arbitrary as the
+ // "config.databases" and "config.chunks" collections are also implicitly created as part of
+ // sharding a collection.
+ pattern = makePatternForValidate("config", "collections");
+ assert.eq(1,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " in the log output for 1-node CSRS");
+})();
+
+(function testShardedCluster() {
+ const st = new ShardingTest({
+ mongos: 1,
+ config: 3,
+ configOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
+ },
+ shards: 1,
+ rs: {nodes: 2},
+ rsOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
}
- rst.initiate(replSetConfig);
-
- // Insert a document so the "dbhash" and "validate" commands have some actual work to do.
- assert.commandWorked(rst.nodes[0].getDB("test").mycoll.insert({}));
- const output =
- runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
-
- let pattern = makePatternForDBHash("test");
- assert.eq(numNodes,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from each node in the log output");
-
- pattern = makePatternForValidate("test", "mycoll");
- assert.eq(numNodes,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from each node in the log output");
- })();
-
- (function testShardedClusterWithOneNodeCSRS() {
- const st = new ShardingTest({
- mongos: 1,
- config: 1,
- configOptions: {
- setParameter: {logComponentVerbosity: tojson({command: 1})},
- },
- shards: 1
- });
-
- // We shard a collection in order to guarantee that at least one collection on the "config"
- // database exists for when we go to run the data consistency checks against the CSRS.
- st.shardColl(st.s.getDB("test").mycoll, {_id: 1}, false);
-
- const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
-
- let pattern = makePatternForDBHash("config");
- assert.eq(0,
- countMatches(pattern, output),
- "expected not to find " + tojson(pattern) + " in the log output for 1-node CSRS");
-
- // The choice of using the "config.collections" collection here is mostly arbitrary as the
- // "config.databases" and "config.chunks" collections are also implicitly created as part of
- // sharding a collection.
- pattern = makePatternForValidate("config", "collections");
- assert.eq(1,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " in the log output for 1-node CSRS");
- })();
-
- (function testShardedCluster() {
- const st = new ShardingTest({
- mongos: 1,
- config: 3,
- configOptions: {
- setParameter: {logComponentVerbosity: tojson({command: 1})},
- },
- shards: 1,
- rs: {nodes: 2},
- rsOptions: {
- setParameter: {logComponentVerbosity: tojson({command: 1})},
- }
- });
-
- // We shard a collection in order to guarantee that at least one collection on the "config"
- // database exists for when we go to run the data consistency checks against the CSRS.
- st.shardColl(st.s.getDB("test").mycoll, {_id: 1}, false);
-
- // Insert a document so the "dbhash" and "validate" commands have some actual work to do on
- // the replica set shard.
- assert.commandWorked(st.s.getDB("test").mycoll.insert({_id: 0}));
- const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
-
- // The "config" database exists on both the CSRS and the replica set shards due to the
- // "config.transactions" collection.
- let pattern = makePatternForDBHash("config");
- assert.eq(5,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) +
- " from each CSRS node and each replica set shard node in the log output");
-
- // The choice of using the "config.collections" collection here is mostly arbitrary as the
- // "config.databases" and "config.chunks" collections are also implicitly created as part of
- // sharding a collection.
- pattern = makePatternForValidate("config", "collections");
- assert.eq(3,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from each CSRS node in the log output");
-
- pattern = makePatternForDBHash("test");
- assert.eq(2,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) +
- " from each replica set shard node in the log output");
-
- pattern = makePatternForValidate("test", "mycoll");
- assert.eq(2,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) +
- " from each replica set shard node in the log output");
- })();
+ });
+
+ // We shard a collection in order to guarantee that at least one collection on the "config"
+ // database exists for when we go to run the data consistency checks against the CSRS.
+ st.shardColl(st.s.getDB("test").mycoll, {_id: 1}, false);
+
+ // Insert a document so the "dbhash" and "validate" commands have some actual work to do on
+ // the replica set shard.
+ assert.commandWorked(st.s.getDB("test").mycoll.insert({_id: 0}));
+ const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
+
+ // The "config" database exists on both the CSRS and the replica set shards due to the
+ // "config.transactions" collection.
+ let pattern = makePatternForDBHash("config");
+ assert.eq(5,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) +
+ " from each CSRS node and each replica set shard node in the log output");
+
+ // The choice of using the "config.collections" collection here is mostly arbitrary as the
+ // "config.databases" and "config.chunks" collections are also implicitly created as part of
+ // sharding a collection.
+ pattern = makePatternForValidate("config", "collections");
+ assert.eq(3,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each CSRS node in the log output");
+
+ pattern = makePatternForDBHash("test");
+ assert.eq(2,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) +
+ " from each replica set shard node in the log output");
+
+ pattern = makePatternForValidate("test", "mycoll");
+ assert.eq(2,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) +
+ " from each replica set shard node in the log output");
+})();
})();
diff --git a/jstests/noPassthrough/dbhash_capped_collection.js b/jstests/noPassthrough/dbhash_capped_collection.js
index 195f003bea6..adf288bf1e9 100644
--- a/jstests/noPassthrough/dbhash_capped_collection.js
+++ b/jstests/noPassthrough/dbhash_capped_collection.js
@@ -4,52 +4,52 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const db = rst.getPrimary().getDB("test");
-
- // We create a capped collection as well as a non-capped collection and verify that the "capped"
- // field in the dbHash command response only lists the capped one.
- assert.commandWorked(db.runCommand({create: "noncapped"}));
- assert.commandWorked(db.runCommand({create: "capped", capped: true, size: 4096}));
- let res = assert.commandWorked(db.runCommand({dbHash: 1}));
- assert.eq(["capped"], res.capped);
-
- // If the capped collection is excluded from the list of collections to md5sum, then it won't
- // appear in the "capped" field either.
- res = assert.commandWorked(db.runCommand({dbHash: 1, collections: ["noncapped"]}));
- assert.eq([], res.capped);
-
- {
- const session = db.getMongo().startSession();
-
- const hashesDefault = rst.getHashesUsingSessions([session], db.getName());
- const hashesFilterCapped =
- rst.getHashesUsingSessions([session], db.getName(), {filterCapped: true});
- const hashesNoFilterCapped =
- rst.getHashesUsingSessions([session], db.getName(), {filterCapped: false});
-
- assert.eq(["noncapped"],
- Object.keys(hashesFilterCapped[0].collections),
- "capped collection should have been filtered out");
- assert.eq(["capped", "noncapped"],
- Object.keys(hashesNoFilterCapped[0].collections).sort(),
- "capped collection should not have been filtered out");
- assert.eq(hashesDefault[0].collections,
- hashesFilterCapped[0].collections,
- "getHashesUsingSessions() should default to filter out capped collections");
-
- const hashes = rst.getHashes(db.getName());
- assert.eq(hashesNoFilterCapped[0].collections,
- hashes.master.collections,
- "getHashes() should default to not filter out capped collections");
-
- session.endSession();
- }
-
- rst.stopSet();
+"use strict";
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const db = rst.getPrimary().getDB("test");
+
+// We create a capped collection as well as a non-capped collection and verify that the "capped"
+// field in the dbHash command response only lists the capped one.
+assert.commandWorked(db.runCommand({create: "noncapped"}));
+assert.commandWorked(db.runCommand({create: "capped", capped: true, size: 4096}));
+let res = assert.commandWorked(db.runCommand({dbHash: 1}));
+assert.eq(["capped"], res.capped);
+
+// If the capped collection is excluded from the list of collections to md5sum, then it won't
+// appear in the "capped" field either.
+res = assert.commandWorked(db.runCommand({dbHash: 1, collections: ["noncapped"]}));
+assert.eq([], res.capped);
+
+{
+ const session = db.getMongo().startSession();
+
+ const hashesDefault = rst.getHashesUsingSessions([session], db.getName());
+ const hashesFilterCapped =
+ rst.getHashesUsingSessions([session], db.getName(), {filterCapped: true});
+ const hashesNoFilterCapped =
+ rst.getHashesUsingSessions([session], db.getName(), {filterCapped: false});
+
+ assert.eq(["noncapped"],
+ Object.keys(hashesFilterCapped[0].collections),
+ "capped collection should have been filtered out");
+ assert.eq(["capped", "noncapped"],
+ Object.keys(hashesNoFilterCapped[0].collections).sort(),
+ "capped collection should not have been filtered out");
+ assert.eq(hashesDefault[0].collections,
+ hashesFilterCapped[0].collections,
+ "getHashesUsingSessions() should default to filter out capped collections");
+
+ const hashes = rst.getHashes(db.getName());
+ assert.eq(hashesNoFilterCapped[0].collections,
+ hashes.master.collections,
+ "getHashes() should default to not filter out capped collections");
+
+ session.endSession();
+}
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/devnull.js b/jstests/noPassthrough/devnull.js
index c2e09279b5e..2244875efc9 100644
--- a/jstests/noPassthrough/devnull.js
+++ b/jstests/noPassthrough/devnull.js
@@ -1,13 +1,13 @@
(function() {
- var mongo = MongoRunner.runMongod({storageEngine: "devnull"});
+var mongo = MongoRunner.runMongod({storageEngine: "devnull"});
- db = mongo.getDB("test");
+db = mongo.getDB("test");
- res = db.foo.insert({x: 1});
- assert.eq(1, res.nInserted, tojson(res));
+res = db.foo.insert({x: 1});
+assert.eq(1, res.nInserted, tojson(res));
- // Skip collection validation during stopMongod if invalid storage engine.
- TestData.skipCollectionAndIndexValidation = true;
+// Skip collection validation during stopMongod if invalid storage engine.
+TestData.skipCollectionAndIndexValidation = true;
- MongoRunner.stopMongod(mongo);
+MongoRunner.stopMongod(mongo);
}());
diff --git a/jstests/noPassthrough/directoryperdb.js b/jstests/noPassthrough/directoryperdb.js
index ce123ae08fb..56fe3c1f645 100644
--- a/jstests/noPassthrough/directoryperdb.js
+++ b/jstests/noPassthrough/directoryperdb.js
@@ -8,39 +8,39 @@
*/
(function() {
- 'use strict';
+'use strict';
- var baseDir = "jstests_directoryperdb";
- var dbpath = MongoRunner.dataPath + baseDir + "/";
+var baseDir = "jstests_directoryperdb";
+var dbpath = MongoRunner.dataPath + baseDir + "/";
- var isDirectoryPerDBSupported =
- jsTest.options().storageEngine == "wiredTiger" || !jsTest.options().storageEngine;
+var isDirectoryPerDBSupported =
+ jsTest.options().storageEngine == "wiredTiger" || !jsTest.options().storageEngine;
- var m = MongoRunner.runMongod({dbpath: dbpath, directoryperdb: ''});
+var m = MongoRunner.runMongod({dbpath: dbpath, directoryperdb: ''});
- if (!isDirectoryPerDBSupported) {
- assert.isnull(m, 'storage engine without directoryperdb support should fail to start up');
- return;
- } else {
- assert(m, 'storage engine with directoryperdb support failed to start up');
- }
+if (!isDirectoryPerDBSupported) {
+ assert.isnull(m, 'storage engine without directoryperdb support should fail to start up');
+ return;
+} else {
+ assert(m, 'storage engine with directoryperdb support failed to start up');
+}
- var db = m.getDB("foo");
- db.bar.insert({x: 1});
- assert.eq(1, db.bar.count());
+var db = m.getDB("foo");
+db.bar.insert({x: 1});
+assert.eq(1, db.bar.count());
- db.adminCommand({fsync: 1});
- var dbpathFiles = listFiles(dbpath);
- var files = dbpathFiles.filter(function(z) {
- return z.name.endsWith("/foo");
- });
- assert.eq(1, files.length, 'dbpath does not contain "foo" directory: ' + tojson(dbpathFiles));
+db.adminCommand({fsync: 1});
+var dbpathFiles = listFiles(dbpath);
+var files = dbpathFiles.filter(function(z) {
+ return z.name.endsWith("/foo");
+});
+assert.eq(1, files.length, 'dbpath does not contain "foo" directory: ' + tojson(dbpathFiles));
- files = listFiles(files[0].name);
- assert(files.length > 0);
+files = listFiles(files[0].name);
+assert(files.length > 0);
- MongoRunner.stopMongod(m);
+MongoRunner.stopMongod(m);
- // Subsequent attempt to start server using same dbpath without directoryperdb should fail.
- assert.isnull(MongoRunner.runMongod({dbpath: dbpath, restart: true}));
+// Subsequent attempt to start server using same dbpath without directoryperdb should fail.
+assert.isnull(MongoRunner.runMongod({dbpath: dbpath, restart: true}));
}());
diff --git a/jstests/noPassthrough/disable_majority_reads_restart.js b/jstests/noPassthrough/disable_majority_reads_restart.js
index 596eabad052..0d21f0d07f3 100644
--- a/jstests/noPassthrough/disable_majority_reads_restart.js
+++ b/jstests/noPassthrough/disable_majority_reads_restart.js
@@ -5,78 +5,78 @@
* requires_wiredtiger]
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- // Insert a document and ensure it is in the stable checkpoint by restarting.
- let coll = rst.getPrimary().getDB(dbName)[collName];
- assert.commandWorked(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
- rst.stopSet(undefined, true);
- rst.startSet(undefined, true);
+// Insert a document and ensure it is in the stable checkpoint by restarting.
+let coll = rst.getPrimary().getDB(dbName)[collName];
+assert.commandWorked(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+rst.stopSet(undefined, true);
+rst.startSet(undefined, true);
- // Disable snapshotting on all members of the replica set so that further operations do not
- // enter the majority snapshot.
- assert.commandWorked(rst.getPrimary().adminCommand(
- {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
+// Disable snapshotting on all members of the replica set so that further operations do not
+// enter the majority snapshot.
+assert.commandWorked(
+ rst.getPrimary().adminCommand({configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
- // Insert a document that will not be in a stable checkpoint.
- coll = rst.getPrimary().getDB(dbName)[collName];
- assert.commandWorked(coll.insert({_id: 1}));
+// Insert a document that will not be in a stable checkpoint.
+coll = rst.getPrimary().getDB(dbName)[collName];
+assert.commandWorked(coll.insert({_id: 1}));
- // Restart the node with enableMajorityReadConcern:false.
- rst.stopSet(undefined, true);
- rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
+// Restart the node with enableMajorityReadConcern:false.
+rst.stopSet(undefined, true);
+rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
- // Both inserts should be reflected in the data and the oplog.
- coll = rst.getPrimary().getDB(dbName)[collName];
- assert.eq([{_id: 0}, {_id: 1}], coll.find().sort({_id: 1}).toArray());
- let oplog = rst.getPrimary().getDB("local").oplog.rs;
- assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
- assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
+// Both inserts should be reflected in the data and the oplog.
+coll = rst.getPrimary().getDB(dbName)[collName];
+assert.eq([{_id: 0}, {_id: 1}], coll.find().sort({_id: 1}).toArray());
+let oplog = rst.getPrimary().getDB("local").oplog.rs;
+assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
+assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
- // Restart the node with enableMajorityReadConcern:false without adding any documents.
- rst.stopSet(undefined, true);
- rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
+// Restart the node with enableMajorityReadConcern:false without adding any documents.
+rst.stopSet(undefined, true);
+rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
- // Both inserts should still be reflected in the data and the oplog.
- coll = rst.getPrimary().getDB(dbName)[collName];
- assert.eq([{_id: 0}, {_id: 1}], coll.find().sort({_id: 1}).toArray());
- oplog = rst.getPrimary().getDB("local").oplog.rs;
- assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
- assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
+// Both inserts should still be reflected in the data and the oplog.
+coll = rst.getPrimary().getDB(dbName)[collName];
+assert.eq([{_id: 0}, {_id: 1}], coll.find().sort({_id: 1}).toArray());
+oplog = rst.getPrimary().getDB("local").oplog.rs;
+assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
+assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
- // Insert another document.
- assert.commandWorked(coll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+// Insert another document.
+assert.commandWorked(coll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
- // Restart the node with enableMajorityReadConcern:false.
- rst.stopSet(undefined, true);
- rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
+// Restart the node with enableMajorityReadConcern:false.
+rst.stopSet(undefined, true);
+rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
- // All three inserts should be reflected in the data and the oplog.
- coll = rst.getPrimary().getDB(dbName)[collName];
- assert.eq([{_id: 0}, {_id: 1}, {_id: 2}], coll.find().sort({_id: 1}).toArray());
- oplog = rst.getPrimary().getDB("local").oplog.rs;
- assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
- assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
- assert.eq(1, oplog.find({o: {_id: 2}}).itcount());
+// All three inserts should be reflected in the data and the oplog.
+coll = rst.getPrimary().getDB(dbName)[collName];
+assert.eq([{_id: 0}, {_id: 1}, {_id: 2}], coll.find().sort({_id: 1}).toArray());
+oplog = rst.getPrimary().getDB("local").oplog.rs;
+assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
+assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
+assert.eq(1, oplog.find({o: {_id: 2}}).itcount());
- // Restart the node with enableMajorityReadConcern:true.
- rst.stopSet(undefined, true);
- rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
+// Restart the node with enableMajorityReadConcern:true.
+rst.stopSet(undefined, true);
+rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
- // All three inserts should still be reflected in the data and the oplog.
- coll = rst.getPrimary().getDB(dbName)[collName];
- assert.eq([{_id: 0}, {_id: 1}, {_id: 2}], coll.find().sort({_id: 1}).toArray());
- oplog = rst.getPrimary().getDB("local").oplog.rs;
- assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
- assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
- assert.eq(1, oplog.find({o: {_id: 2}}).itcount());
+// All three inserts should still be reflected in the data and the oplog.
+coll = rst.getPrimary().getDB(dbName)[collName];
+assert.eq([{_id: 0}, {_id: 1}, {_id: 2}], coll.find().sort({_id: 1}).toArray());
+oplog = rst.getPrimary().getDB("local").oplog.rs;
+assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
+assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
+assert.eq(1, oplog.find({o: {_id: 2}}).itcount());
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/disabled_test_parameters.js b/jstests/noPassthrough/disabled_test_parameters.js
index 948883278ad..38ebed92310 100644
--- a/jstests/noPassthrough/disabled_test_parameters.js
+++ b/jstests/noPassthrough/disabled_test_parameters.js
@@ -1,44 +1,44 @@
// Test that test-only set parameters are disabled.
(function() {
- 'use strict';
+'use strict';
- function assertFails(opts) {
- assert.eq(null, MongoRunner.runMongod(opts), "Mongod startup up");
- }
+function assertFails(opts) {
+ assert.eq(null, MongoRunner.runMongod(opts), "Mongod startup up");
+}
- function assertStarts(opts) {
- const mongod = MongoRunner.runMongod(opts);
- assert(mongod, "Mongod startup up");
- MongoRunner.stopMongod(mongod);
- }
+function assertStarts(opts) {
+ const mongod = MongoRunner.runMongod(opts);
+ assert(mongod, "Mongod startup up");
+ MongoRunner.stopMongod(mongod);
+}
- setJsTestOption('enableTestCommands', false);
+setJsTestOption('enableTestCommands', false);
- // enableTestCommands not specified.
- assertFails({
+// enableTestCommands not specified.
+assertFails({
+ 'setParameter': {
+ enableIndexBuildsCoordinatorForCreateIndexesCommand: 'false',
+ },
+});
+
+// enableTestCommands specified as truthy.
+['1', 'true'].forEach(v => {
+ assertStarts({
'setParameter': {
+ enableTestCommands: v,
enableIndexBuildsCoordinatorForCreateIndexesCommand: 'false',
},
});
+});
- // enableTestCommands specified as truthy.
- ['1', 'true'].forEach(v => {
- assertStarts({
- 'setParameter': {
- enableTestCommands: v,
- enableIndexBuildsCoordinatorForCreateIndexesCommand: 'false',
- },
- });
- });
-
- // enableTestCommands specified as falsy.
- ['0', 'false'].forEach(v => {
- assertFails({
- 'setParameter': {
- enableTestCommands: v,
- enableIndexBuildsCoordinatorForCreateIndexesCommand: 'false',
- },
- });
+// enableTestCommands specified as falsy.
+['0', 'false'].forEach(v => {
+ assertFails({
+ 'setParameter': {
+ enableTestCommands: v,
+ enableIndexBuildsCoordinatorForCreateIndexesCommand: 'false',
+ },
});
+});
}());
diff --git a/jstests/noPassthrough/do_not_drop_coll_after_succesful_out.js b/jstests/noPassthrough/do_not_drop_coll_after_succesful_out.js
index 0e6d4cdbfb2..98f9bd41dfd 100644
--- a/jstests/noPassthrough/do_not_drop_coll_after_succesful_out.js
+++ b/jstests/noPassthrough/do_not_drop_coll_after_succesful_out.js
@@ -1,28 +1,28 @@
// Confirms that there's no attempt to drop a temp collection after $out is performed.
(function() {
- "use strict";
+"use strict";
- // Prevent the mongo shell from gossiping its cluster time, since this will increase the amount
- // of data logged for each op.
- TestData.skipGossipingClusterTime = true;
+// Prevent the mongo shell from gossiping its cluster time, since this will increase the amount
+// of data logged for each op.
+TestData.skipGossipingClusterTime = true;
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("test");
- const coll = testDB.do_not_drop_coll_after_succesful_out;
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
+const testDB = conn.getDB("test");
+const coll = testDB.do_not_drop_coll_after_succesful_out;
- assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1}));
- assert.commandWorked(testDB.setLogLevel(2, "command"));
- assert.commandWorked(testDB.adminCommand({clearLog: "global"}));
+assert.commandWorked(testDB.setLogLevel(2, "command"));
+assert.commandWorked(testDB.adminCommand({clearLog: "global"}));
- coll.aggregate([{$out: coll.getName() + "_out"}]);
- const log = assert.commandWorked(testDB.adminCommand({getLog: "global"})).log;
+coll.aggregate([{$out: coll.getName() + "_out"}]);
+const log = assert.commandWorked(testDB.adminCommand({getLog: "global"})).log;
- for (let i = 0; i < log.length; ++i) {
- const line = log[i];
- assert.eq(line.indexOf("drop test.tmp.agg_out"), -1, line);
- }
+for (let i = 0; i < log.length; ++i) {
+ const line = log[i];
+ assert.eq(line.indexOf("drop test.tmp.agg_out"), -1, line);
+}
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/do_not_rebuild_indexes_before_repair.js b/jstests/noPassthrough/do_not_rebuild_indexes_before_repair.js
index 12fd52a09f2..cfb1d102019 100644
--- a/jstests/noPassthrough/do_not_rebuild_indexes_before_repair.js
+++ b/jstests/noPassthrough/do_not_rebuild_indexes_before_repair.js
@@ -6,61 +6,60 @@
* @tags: [requires_persistence, requires_replication, requires_majority_read_concern]
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "indexRebuild";
- const collName = "coll";
+const dbName = "indexRebuild";
+const collName = "coll";
- const rst = new ReplSetTest({
- name: "doNotRebuildIndexesBeforeRepair",
- nodes: 2,
- nodeOptions:
- {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
- });
- const nodes = rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({
+ name: "doNotRebuildIndexesBeforeRepair",
+ nodes: 2,
+ nodeOptions: {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
+});
+const nodes = rst.startSet();
+rst.initiate();
- if (!rst.getPrimary().adminCommand("serverStatus").storageEngine.supportsSnapshotReadConcern) {
- // Only snapshotting storage engines can pause advancing the stable timestamp allowing us
- // to get into a state where indexes exist, but the underlying tables were dropped.
- rst.stopSet();
- return;
- }
+if (!rst.getPrimary().adminCommand("serverStatus").storageEngine.supportsSnapshotReadConcern) {
+ // Only snapshotting storage engines can pause advancing the stable timestamp allowing us
+ // to get into a state where indexes exist, but the underlying tables were dropped.
+ rst.stopSet();
+ return;
+}
- let coll = rst.getPrimary().getDB(dbName)[collName];
- assert.commandWorked(coll.createIndexes([{a: 1}, {b: 1}], {}, {writeConcern: {w: "majority"}}));
- assert.eq(3, coll.getIndexes().length);
- rst.awaitReplication(undefined, ReplSetTest.OpTimeType.LAST_DURABLE);
+let coll = rst.getPrimary().getDB(dbName)[collName];
+assert.commandWorked(coll.createIndexes([{a: 1}, {b: 1}], {}, {writeConcern: {w: "majority"}}));
+assert.eq(3, coll.getIndexes().length);
+rst.awaitReplication(undefined, ReplSetTest.OpTimeType.LAST_DURABLE);
- // Lock the index entries into a stable checkpoint by shutting down.
- rst.stopSet(undefined, true);
- rst.startSet(undefined, true);
+// Lock the index entries into a stable checkpoint by shutting down.
+rst.stopSet(undefined, true);
+rst.startSet(undefined, true);
- // Disable snapshotting on all members of the replica set so that further operations do not
- // enter the majority snapshot.
- nodes.forEach(node => assert.commandWorked(node.adminCommand(
- {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
+// Disable snapshotting on all members of the replica set so that further operations do not
+// enter the majority snapshot.
+nodes.forEach(node => assert.commandWorked(node.adminCommand(
+ {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
- // Dropping the index would normally modify the collection metadata and drop the
- // table. Because we're not advancing the stable timestamp and we're going to crash the
- // server, the catalog change won't take effect, but ident being dropped will.
- coll = rst.getPrimary().getDB(dbName)[collName];
- assert.commandWorked(coll.dropIndexes());
- rst.awaitReplication();
+// Dropping the index would normally modify the collection metadata and drop the
+// table. Because we're not advancing the stable timestamp and we're going to crash the
+// server, the catalog change won't take effect, but ident being dropped will.
+coll = rst.getPrimary().getDB(dbName)[collName];
+assert.commandWorked(coll.dropIndexes());
+rst.awaitReplication();
- let primaryDbpath = rst.getPrimary().dbpath;
- let primaryPort = rst.getPrimary().port;
- rst.stopSet(9, true, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+let primaryDbpath = rst.getPrimary().dbpath;
+let primaryPort = rst.getPrimary().port;
+rst.stopSet(9, true, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- // This should succeed in rebuilding the indexes, but only after the databases have been
- // repaired.
- assert.eq(
- 0, runMongoProgram("mongod", "--repair", "--port", primaryPort, "--dbpath", primaryDbpath));
+// This should succeed in rebuilding the indexes, but only after the databases have been
+// repaired.
+assert.eq(0,
+ runMongoProgram("mongod", "--repair", "--port", primaryPort, "--dbpath", primaryDbpath));
- // Restarting the replica set would roll back the index drop. Instead we want to start a
- // standalone and verify that repair rebuilt the indexes.
- let mongod = MongoRunner.runMongod({dbpath: primaryDbpath, noCleanData: true});
- assert.eq(3, mongod.getDB(dbName)[collName].getIndexes().length);
+// Restarting the replica set would roll back the index drop. Instead we want to start a
+// standalone and verify that repair rebuilt the indexes.
+let mongod = MongoRunner.runMongod({dbpath: primaryDbpath, noCleanData: true});
+assert.eq(3, mongod.getDB(dbName)[collName].getIndexes().length);
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/noPassthrough/document_count_functions.js b/jstests/noPassthrough/document_count_functions.js
index c8accb8c5c3..c1dd15ee591 100644
--- a/jstests/noPassthrough/document_count_functions.js
+++ b/jstests/noPassthrough/document_count_functions.js
@@ -3,58 +3,56 @@
* Tests the countDocuments and estimatedDocumentCount commands.
*/
(function() {
- "use strict";
+"use strict";
- const standalone = MongoRunner.runMongod();
- const dbName = "test";
- const db = standalone.getDB(dbName);
- const collName = "document_count_functions";
- const coll = db.getCollection(collName);
+const standalone = MongoRunner.runMongod();
+const dbName = "test";
+const db = standalone.getDB(dbName);
+const collName = "document_count_functions";
+const coll = db.getCollection(collName);
- coll.drop();
+coll.drop();
- assert.commandWorked(coll.insert({i: 1, j: 1}));
- assert.commandWorked(coll.insert({i: 2, j: 1}));
- assert.commandWorked(coll.insert({i: 2, j: 2}));
+assert.commandWorked(coll.insert({i: 1, j: 1}));
+assert.commandWorked(coll.insert({i: 2, j: 1}));
+assert.commandWorked(coll.insert({i: 2, j: 2}));
- // Base case: Pass a valid query into countDocuments without any extra options.
- assert.eq(1, coll.countDocuments({i: 1}));
- assert.eq(2, coll.countDocuments({i: 2}));
+// Base case: Pass a valid query into countDocuments without any extra options.
+assert.eq(1, coll.countDocuments({i: 1}));
+assert.eq(2, coll.countDocuments({i: 2}));
- // Base case: Call estimatedDocumentCount without any extra options.
- assert.eq(3, coll.estimatedDocumentCount());
+// Base case: Call estimatedDocumentCount without any extra options.
+assert.eq(3, coll.estimatedDocumentCount());
- assert.commandWorked(coll.insert({i: 1, j: 2}));
- assert.commandWorked(coll.insert({i: 1, j: 3}));
- assert.commandWorked(coll.insert({i: 1, j: 4}));
+assert.commandWorked(coll.insert({i: 1, j: 2}));
+assert.commandWorked(coll.insert({i: 1, j: 3}));
+assert.commandWorked(coll.insert({i: 1, j: 4}));
- // Limit case: Limit the number of documents to count. There are 4 {i: 1} documents,
- // but we will set the limit to 3.
- assert.eq(3, coll.countDocuments({i: 1}, {limit: 3}));
+// Limit case: Limit the number of documents to count. There are 4 {i: 1} documents,
+// but we will set the limit to 3.
+assert.eq(3, coll.countDocuments({i: 1}, {limit: 3}));
- // Skip case: Skip a certain number of documents for the count. We will skip 2, meaning
- // that we will have 2 left.
- assert.eq(2, coll.countDocuments({i: 1}, {skip: 2}));
+// Skip case: Skip a certain number of documents for the count. We will skip 2, meaning
+// that we will have 2 left.
+assert.eq(2, coll.countDocuments({i: 1}, {skip: 2}));
- assert.commandWorked(coll.ensureIndex({i: 1}));
+assert.commandWorked(coll.ensureIndex({i: 1}));
- // Aggregate stage case: Add an option that gets added as an aggregation argument.
- assert.eq(4, coll.countDocuments({i: 1}, {hint: {i: 1}}));
+// Aggregate stage case: Add an option that gets added as an aggregation argument.
+assert.eq(4, coll.countDocuments({i: 1}, {hint: {i: 1}}));
- // Set fail point to make sure estimatedDocumentCount times out.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: 'maxTimeAlwaysTimeOut', mode: 'alwaysOn'}));
+// Set fail point to make sure estimatedDocumentCount times out.
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: 'maxTimeAlwaysTimeOut', mode: 'alwaysOn'}));
- // maxTimeMS case: Expect an error if an operation times out.
- assert.commandFailedWithCode(assert.throws(function() {
- coll.estimatedDocumentCount({maxTimeMS: 100});
- }),
- ErrorCodes.MaxTimeMSExpired);
+// maxTimeMS case: Expect an error if an operation times out.
+assert.commandFailedWithCode(assert.throws(function() {
+ coll.estimatedDocumentCount({maxTimeMS: 100});
+ }),
+ ErrorCodes.MaxTimeMSExpired);
- // Disable fail point.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: 'maxTimeAlwaysTimeOut', mode: 'off'}));
-
- MongoRunner.stopMongod(standalone);
+// Disable fail point.
+assert.commandWorked(db.adminCommand({configureFailPoint: 'maxTimeAlwaysTimeOut', mode: 'off'}));
+MongoRunner.stopMongod(standalone);
})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/drop_connections_replSet.js b/jstests/noPassthrough/drop_connections_replSet.js
index d8e07397afe..b9ec6c93368 100644
--- a/jstests/noPassthrough/drop_connections_replSet.js
+++ b/jstests/noPassthrough/drop_connections_replSet.js
@@ -4,51 +4,51 @@
*/
(function() {
- "use strict";
-
- const rst = new ReplSetTest({nodes: 3});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- rst.awaitSecondaryNodes();
-
- function getConnPoolHosts() {
- const ret = primary.adminCommand({connPoolStats: 1});
- assert.commandWorked(ret);
- jsTestLog("Connection pool stats by host: " + tojson(ret.hosts));
- return ret.hosts;
- }
-
- // To test the dropConnections command, first remove the secondary. This should have no effect
- // on the existing connection pool, but it'll prevent the primary from reconnecting to it after
- // dropConnections. Then, execute dropConnections and check that the primary has 0 connections
- // to the secondary.
- const cfg = primary.getDB('local').system.replset.findOne();
- const memberHost = cfg.members[2].host;
- assert.eq(memberHost in getConnPoolHosts(), true);
-
- const removedMember = cfg.members.splice(2, 1);
- assert.eq(removedMember[0].host, memberHost);
- cfg.version++;
-
- jsTestLog("Reconfiguring to omit " + memberHost);
- assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
-
- // Reconfig did not affect the connection pool
- assert.eq(memberHost in getConnPoolHosts(), true);
-
- // Test dropConnections
- jsTestLog("Dropping connections to " + memberHost);
- assert.commandWorked(primary.adminCommand({dropConnections: 1, hostAndPort: [memberHost]}));
- assert.soon(() => {
- return !(memberHost in getConnPoolHosts());
- });
-
- // Need to re-add removed node, or the test complains about the replset config
- cfg.members.push(removedMember[0]);
- cfg.version++;
- assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
-
- rst.stopSet();
+"use strict";
+
+const rst = new ReplSetTest({nodes: 3});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+rst.awaitSecondaryNodes();
+
+function getConnPoolHosts() {
+ const ret = primary.adminCommand({connPoolStats: 1});
+ assert.commandWorked(ret);
+ jsTestLog("Connection pool stats by host: " + tojson(ret.hosts));
+ return ret.hosts;
+}
+
+// To test the dropConnections command, first remove the secondary. This should have no effect
+// on the existing connection pool, but it'll prevent the primary from reconnecting to it after
+// dropConnections. Then, execute dropConnections and check that the primary has 0 connections
+// to the secondary.
+const cfg = primary.getDB('local').system.replset.findOne();
+const memberHost = cfg.members[2].host;
+assert.eq(memberHost in getConnPoolHosts(), true);
+
+const removedMember = cfg.members.splice(2, 1);
+assert.eq(removedMember[0].host, memberHost);
+cfg.version++;
+
+jsTestLog("Reconfiguring to omit " + memberHost);
+assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
+
+// Reconfig did not affect the connection pool
+assert.eq(memberHost in getConnPoolHosts(), true);
+
+// Test dropConnections
+jsTestLog("Dropping connections to " + memberHost);
+assert.commandWorked(primary.adminCommand({dropConnections: 1, hostAndPort: [memberHost]}));
+assert.soon(() => {
+ return !(memberHost in getConnPoolHosts());
+});
+
+// Need to re-add removed node, or the test complains about the replset config
+cfg.members.push(removedMember[0]);
+cfg.version++;
+assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/drop_connections_sharded.js b/jstests/noPassthrough/drop_connections_sharded.js
index 7d2f605946e..205992b2dfc 100644
--- a/jstests/noPassthrough/drop_connections_sharded.js
+++ b/jstests/noPassthrough/drop_connections_sharded.js
@@ -4,49 +4,49 @@
*/
(function() {
- "use strict";
-
- const st = new ShardingTest({
- config: {nodes: 1},
- shards: 1,
- rs0: {nodes: 3},
- mongos: 1,
- });
- const mongos = st.s0;
- const rst = st.rs0;
- const primary = rst.getPrimary();
-
- mongos.adminCommand({multicast: {ping: 0}});
-
- function getConnPoolHosts() {
- const ret = mongos.adminCommand({connPoolStats: 1});
- assert.commandWorked(ret);
- jsTestLog("Connection pool stats by host: " + tojson(ret.hosts));
- return ret.hosts;
- }
-
- const cfg = primary.getDB('local').system.replset.findOne();
- const memberHost = cfg.members[2].host;
- assert.eq(memberHost in getConnPoolHosts(), true);
-
- const removedMember = cfg.members.splice(2, 1);
- assert.eq(removedMember[0].host, memberHost);
- cfg.version++;
-
- jsTestLog("Reconfiguring to omit " + memberHost);
- assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
- assert.eq(memberHost in getConnPoolHosts(), true);
-
- jsTestLog("Dropping connections to " + memberHost);
- assert.commandWorked(mongos.adminCommand({dropConnections: 1, hostAndPort: [memberHost]}));
- assert.soon(() => {
- return !(memberHost in getConnPoolHosts());
- });
-
- // need to re-add removed node or test complain about the replset config
- cfg.members.push(removedMember[0]);
- cfg.version++;
- assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
-
- st.stop();
+"use strict";
+
+const st = new ShardingTest({
+ config: {nodes: 1},
+ shards: 1,
+ rs0: {nodes: 3},
+ mongos: 1,
+});
+const mongos = st.s0;
+const rst = st.rs0;
+const primary = rst.getPrimary();
+
+mongos.adminCommand({multicast: {ping: 0}});
+
+function getConnPoolHosts() {
+ const ret = mongos.adminCommand({connPoolStats: 1});
+ assert.commandWorked(ret);
+ jsTestLog("Connection pool stats by host: " + tojson(ret.hosts));
+ return ret.hosts;
+}
+
+const cfg = primary.getDB('local').system.replset.findOne();
+const memberHost = cfg.members[2].host;
+assert.eq(memberHost in getConnPoolHosts(), true);
+
+const removedMember = cfg.members.splice(2, 1);
+assert.eq(removedMember[0].host, memberHost);
+cfg.version++;
+
+jsTestLog("Reconfiguring to omit " + memberHost);
+assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
+assert.eq(memberHost in getConnPoolHosts(), true);
+
+jsTestLog("Dropping connections to " + memberHost);
+assert.commandWorked(mongos.adminCommand({dropConnections: 1, hostAndPort: [memberHost]}));
+assert.soon(() => {
+ return !(memberHost in getConnPoolHosts());
+});
+
+// need to re-add removed node or test complain about the replset config
+cfg.members.push(removedMember[0]);
+cfg.version++;
+assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
+
+st.stop();
})();
diff --git a/jstests/noPassthrough/drop_view_does_not_take_database_X.js b/jstests/noPassthrough/drop_view_does_not_take_database_X.js
index 02efa840085..69cafb65f58 100644
--- a/jstests/noPassthrough/drop_view_does_not_take_database_X.js
+++ b/jstests/noPassthrough/drop_view_does_not_take_database_X.js
@@ -5,29 +5,29 @@
*/
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/libs/check_log.js");
- const conn = MongoRunner.runMongod({});
- const db = conn.getDB("test");
+const conn = MongoRunner.runMongod({});
+const db = conn.getDB("test");
- assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
- assert.commandWorked(db.createView("view", "a", []));
+assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
+assert.commandWorked(db.createView("view", "a", []));
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "hangDuringDropCollection", mode: "alwaysOn"}));
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: "hangDuringDropCollection", mode: "alwaysOn"}));
- // This only holds a database IX lock.
- const awaitDrop =
- startParallelShell(() => assert(db.getSiblingDB("test")["view"].drop()), conn.port);
- checkLog.contains(conn, "hangDuringDropCollection fail point enabled");
+// This only holds a database IX lock.
+const awaitDrop =
+ startParallelShell(() => assert(db.getSiblingDB("test")["view"].drop()), conn.port);
+checkLog.contains(conn, "hangDuringDropCollection fail point enabled");
- // This takes a database IX lock and should not be blocked.
- assert.commandWorked(db.runCommand({insert: "a", documents: [{y: 1}]}));
+// This takes a database IX lock and should not be blocked.
+assert.commandWorked(db.runCommand({insert: "a", documents: [{y: 1}]}));
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "hangDuringDropCollection", mode: "off"}));
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: "hangDuringDropCollection", mode: "off"}));
- awaitDrop();
- MongoRunner.stopMongod(conn);
+awaitDrop();
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/dropcollection_duplicate_fields.js b/jstests/noPassthrough/dropcollection_duplicate_fields.js
index a2a1c1c8839..22e7c3fdeeb 100644
--- a/jstests/noPassthrough/dropcollection_duplicate_fields.js
+++ b/jstests/noPassthrough/dropcollection_duplicate_fields.js
@@ -4,26 +4,25 @@
*/
(function() {
- "use strict";
- var conn = MongoRunner.runMongod();
- var db = conn.getDB('test');
+"use strict";
+var conn = MongoRunner.runMongod();
+var db = conn.getDB('test');
- let coll = db.dropcollection_duplicate_fields;
- // Repeat 100 times for the sake of probabilities
- for (let i = 0; i < 100; i++) {
- coll.drop();
- coll.insert({x: 1});
+let coll = db.dropcollection_duplicate_fields;
+// Repeat 100 times for the sake of probabilities
+for (let i = 0; i < 100; i++) {
+ coll.drop();
+ coll.insert({x: 1});
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: 'WTWriteConflictException', mode: {activationProbability: 0.1}}));
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: 'WTWriteConflictException', mode: {activationProbability: 0.1}}));
- // will blow up if res is not valid
- let res = db.runCommand({drop: 'dropcollection_duplicate_fields'});
+ // will blow up if res is not valid
+ let res = db.runCommand({drop: 'dropcollection_duplicate_fields'});
- assert.commandWorked(
- db.adminCommand({configureFailPoint: 'WTWriteConflictException', mode: "off"}));
- }
-
- MongoRunner.stopMongod(conn);
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: 'WTWriteConflictException', mode: "off"}));
+}
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/dropdatabase_respect_maxtimems.js b/jstests/noPassthrough/dropdatabase_respect_maxtimems.js
index 972d21c6e24..db93575c993 100644
--- a/jstests/noPassthrough/dropdatabase_respect_maxtimems.js
+++ b/jstests/noPassthrough/dropdatabase_respect_maxtimems.js
@@ -3,62 +3,62 @@
* @tags: [requires_replication, uses_transactions]
*/
(function() {
- const rst = ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const adminDB = rst.getPrimary().getDB("admin");
- const txnDB = rst.getPrimary().getDB("txn");
- const dropDB = rst.getPrimary().getDB("drop");
+const adminDB = rst.getPrimary().getDB("admin");
+const txnDB = rst.getPrimary().getDB("txn");
+const dropDB = rst.getPrimary().getDB("drop");
- (function assertColletionDropCanBeInterrupted() {
- assert.commandWorked(txnDB.foo.insert({}));
- assert.commandWorked(dropDB.bar.insert({}));
- const session = txnDB.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase("txn");
- session.startTransaction();
- assert.commandWorked(sessionDB.foo.insert({}));
- assert.commandFailedWithCode(dropDB.runCommand({dropDatabase: 1, maxTimeMS: 100}),
- ErrorCodes.MaxTimeMSExpired);
+(function assertColletionDropCanBeInterrupted() {
+ assert.commandWorked(txnDB.foo.insert({}));
+ assert.commandWorked(dropDB.bar.insert({}));
+ const session = txnDB.getMongo().startSession({causalConsistency: false});
+ const sessionDB = session.getDatabase("txn");
+ session.startTransaction();
+ assert.commandWorked(sessionDB.foo.insert({}));
+ assert.commandFailedWithCode(dropDB.runCommand({dropDatabase: 1, maxTimeMS: 100}),
+ ErrorCodes.MaxTimeMSExpired);
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
- })();
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+})();
- (function assertDatabaseDropCanBeInterrupted() {
- assert.commandWorked(txnDB.foo.insert({}));
- assert.commandWorked(dropDB.bar.insert({}));
+(function assertDatabaseDropCanBeInterrupted() {
+ assert.commandWorked(txnDB.foo.insert({}));
+ assert.commandWorked(dropDB.bar.insert({}));
- assert.commandWorked(rst.getPrimary().adminCommand(
- {configureFailPoint: "dropDatabaseHangAfterAllCollectionsDrop", mode: "alwaysOn"}));
+ assert.commandWorked(rst.getPrimary().adminCommand(
+ {configureFailPoint: "dropDatabaseHangAfterAllCollectionsDrop", mode: "alwaysOn"}));
- // This will get blocked by the failpoint when collection drop phase finishes.
- let dropDatabaseShell = startParallelShell(
- "assert.commandFailedWithCode(db.getSiblingDB(\"drop\").runCommand({dropDatabase: 1, maxTimeMS: 5000}), ErrorCodes.MaxTimeMSExpired);",
- rst.getPrimary().port);
+ // This will get blocked by the failpoint when collection drop phase finishes.
+ let dropDatabaseShell = startParallelShell(
+ "assert.commandFailedWithCode(db.getSiblingDB(\"drop\").runCommand({dropDatabase: 1, maxTimeMS: 5000}), ErrorCodes.MaxTimeMSExpired);",
+ rst.getPrimary().port);
- assert.soon(function() {
- const sessionFilter = {active: true, "command.dropDatabase": 1};
- const res = adminDB.aggregate([{$currentOp: {}}, {$match: sessionFilter}]);
- return res.hasNext();
- }, "Timeout waiting for dropDatabase to start");
+ assert.soon(function() {
+ const sessionFilter = {active: true, "command.dropDatabase": 1};
+ const res = adminDB.aggregate([{$currentOp: {}}, {$match: sessionFilter}]);
+ return res.hasNext();
+ }, "Timeout waiting for dropDatabase to start");
- const session = txnDB.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase("txn");
- session.startTransaction();
- assert.commandWorked(sessionDB.foo.insert({}));
+ const session = txnDB.getMongo().startSession({causalConsistency: false});
+ const sessionDB = session.getDatabase("txn");
+ session.startTransaction();
+ assert.commandWorked(sessionDB.foo.insert({}));
- // dropDatabase now gets unblocked by the failpoint but will immediately
- // get blocked by acquiring the GlobalWrite lock for dropping the database.
- assert.commandWorked(rst.getPrimary().adminCommand(
- {configureFailPoint: "dropDatabaseHangAfterAllCollectionsDrop", mode: "off"}));
+ // dropDatabase now gets unblocked by the failpoint but will immediately
+ // get blocked by acquiring the GlobalWrite lock for dropping the database.
+ assert.commandWorked(rst.getPrimary().adminCommand(
+ {configureFailPoint: "dropDatabaseHangAfterAllCollectionsDrop", mode: "off"}));
- // This should timeout.
- dropDatabaseShell();
+ // This should timeout.
+ dropDatabaseShell();
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
- })();
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+})();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/durable_view_catalog.js b/jstests/noPassthrough/durable_view_catalog.js
index 06702fa5acd..23de01b4b30 100644
--- a/jstests/noPassthrough/durable_view_catalog.js
+++ b/jstests/noPassthrough/durable_view_catalog.js
@@ -5,93 +5,84 @@
* @tags: [requires_persistence]
*/
(function() {
- 'use strict';
+'use strict';
- // The following test verifies that writeConcern: {j: true} ensures that the view catalog is
- // durable.
- let dbpath = MongoRunner.dataPath + '_durable_view_catalog';
- resetDbpath(dbpath);
+// The following test verifies that writeConcern: {j: true} ensures that the view catalog is
+// durable.
+let dbpath = MongoRunner.dataPath + '_durable_view_catalog';
+resetDbpath(dbpath);
- let mongodArgs = {dbpath: dbpath, noCleanData: true, journal: ''};
+let mongodArgs = {dbpath: dbpath, noCleanData: true, journal: ''};
- // Start a mongod.
- let conn = MongoRunner.runMongod(mongodArgs);
- assert.neq(null, conn, 'mongod was unable to start up');
+// Start a mongod.
+let conn = MongoRunner.runMongod(mongodArgs);
+assert.neq(null, conn, 'mongod was unable to start up');
- // Now connect to the mongod, create, remove and modify views and then abruptly stop the server.
- let viewsDB = conn.getDB('test');
- let pipe = [{$match: {}}];
- assert.commandWorked(
- viewsDB.runCommand({create: "view1", viewOn: "collection", pipeline: pipe}));
- assert.commandWorked(
- viewsDB.runCommand({create: "view2", viewOn: "collection", pipeline: pipe}));
- assert.commandWorked(
- viewsDB.runCommand({create: "view3", viewOn: "collection", pipeline: pipe}));
- assert.commandWorked(viewsDB.runCommand({collMod: "view3", viewOn: "view2"}));
- // On the final modification, require a sync to ensure durability.
- assert.commandWorked(viewsDB.runCommand({drop: "view1", writeConcern: {j: 1}}));
+// Now connect to the mongod, create, remove and modify views and then abruptly stop the server.
+let viewsDB = conn.getDB('test');
+let pipe = [{$match: {}}];
+assert.commandWorked(viewsDB.runCommand({create: "view1", viewOn: "collection", pipeline: pipe}));
+assert.commandWorked(viewsDB.runCommand({create: "view2", viewOn: "collection", pipeline: pipe}));
+assert.commandWorked(viewsDB.runCommand({create: "view3", viewOn: "collection", pipeline: pipe}));
+assert.commandWorked(viewsDB.runCommand({collMod: "view3", viewOn: "view2"}));
+// On the final modification, require a sync to ensure durability.
+assert.commandWorked(viewsDB.runCommand({drop: "view1", writeConcern: {j: 1}}));
- // Hard kill the mongod to ensure the data was indeed synced to durable storage.
- MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+// Hard kill the mongod to ensure the data was indeed synced to durable storage.
+MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- // Restart the mongod.
- conn = MongoRunner.runMongod(mongodArgs);
- assert.neq(null, conn, 'mongod was unable to restart after receiving a SIGKILL');
+// Restart the mongod.
+conn = MongoRunner.runMongod(mongodArgs);
+assert.neq(null, conn, 'mongod was unable to restart after receiving a SIGKILL');
- // Check that our journaled write still is present.
- viewsDB = conn.getDB('test');
- let actualViews = viewsDB.system.views.find().toArray();
- let expectedViews = [
- {"_id": "test.view2", "viewOn": "collection", "pipeline": pipe},
- {"_id": "test.view3", "viewOn": "view2", "pipeline": pipe}
- ];
- assert.eq(actualViews, expectedViews, "view definitions not correctly persisted");
- let listedViews = viewsDB.runCommand({listCollections: 1, filter: {type: "view"}})
- .cursor.firstBatch.map((function(x) {
- return {
- _id: "test." + x.name,
- viewOn: x.options.viewOn,
- pipeline: x.options.pipeline
- };
- }));
- assert.sameMembers(
- listedViews, expectedViews, "persisted view definitions not correctly loaded");
+// Check that our journaled write still is present.
+viewsDB = conn.getDB('test');
+let actualViews = viewsDB.system.views.find().toArray();
+let expectedViews = [
+ {"_id": "test.view2", "viewOn": "collection", "pipeline": pipe},
+ {"_id": "test.view3", "viewOn": "view2", "pipeline": pipe}
+];
+assert.eq(actualViews, expectedViews, "view definitions not correctly persisted");
+let listedViews =
+ viewsDB.runCommand({listCollections: 1, filter: {type: "view"}})
+ .cursor.firstBatch.map((function(x) {
+ return {_id: "test." + x.name, viewOn: x.options.viewOn, pipeline: x.options.pipeline};
+ }));
+assert.sameMembers(listedViews, expectedViews, "persisted view definitions not correctly loaded");
- // Insert an invalid view definition directly into system.views to bypass normal validation.
- assert.writeOK(viewsDB.system.views.insert({_id: "badView", pipeline: "badType"}));
+// Insert an invalid view definition directly into system.views to bypass normal validation.
+assert.writeOK(viewsDB.system.views.insert({_id: "badView", pipeline: "badType"}));
- // Skip collection validation during stopMongod if invalid views exists.
- TestData.skipValidationOnInvalidViewDefinitions = true;
+// Skip collection validation during stopMongod if invalid views exists.
+TestData.skipValidationOnInvalidViewDefinitions = true;
- // Restarting the mongod should succeed despite the presence of invalid view definitions.
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod(mongodArgs);
- assert.neq(
- null,
- conn,
- "after inserting bad views, failed to restart mongod with options: " + tojson(mongodArgs));
+// Restarting the mongod should succeed despite the presence of invalid view definitions.
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod(mongodArgs);
+assert.neq(
+ null,
+ conn,
+ "after inserting bad views, failed to restart mongod with options: " + tojson(mongodArgs));
- // Now that the database's view catalog has been marked as invalid, all view operations in that
- // database should fail.
- viewsDB = conn.getDB("test");
- assert.commandFailedWithCode(viewsDB.runCommand({find: "view2"}),
- ErrorCodes.InvalidViewDefinition);
- assert.commandFailedWithCode(viewsDB.runCommand({create: "view4", viewOn: "collection"}),
- ErrorCodes.InvalidViewDefinition);
- assert.commandFailedWithCode(viewsDB.runCommand({collMod: "view2", viewOn: "view4"}),
- ErrorCodes.InvalidViewDefinition);
- assert.commandFailedWithCode(viewsDB.runCommand({drop: "view4"}),
- ErrorCodes.InvalidViewDefinition);
- assert.commandFailedWithCode(viewsDB.runCommand({listCollections: 1}),
- ErrorCodes.InvalidViewDefinition);
+// Now that the database's view catalog has been marked as invalid, all view operations in that
+// database should fail.
+viewsDB = conn.getDB("test");
+assert.commandFailedWithCode(viewsDB.runCommand({find: "view2"}), ErrorCodes.InvalidViewDefinition);
+assert.commandFailedWithCode(viewsDB.runCommand({create: "view4", viewOn: "collection"}),
+ ErrorCodes.InvalidViewDefinition);
+assert.commandFailedWithCode(viewsDB.runCommand({collMod: "view2", viewOn: "view4"}),
+ ErrorCodes.InvalidViewDefinition);
+assert.commandFailedWithCode(viewsDB.runCommand({drop: "view4"}), ErrorCodes.InvalidViewDefinition);
+assert.commandFailedWithCode(viewsDB.runCommand({listCollections: 1}),
+ ErrorCodes.InvalidViewDefinition);
- // Manually remove the invalid view definition from system.views, and then verify that view
- // operations work successfully without requiring a server restart.
- assert.writeOK(viewsDB.system.views.remove({_id: "badView"}));
- assert.commandWorked(viewsDB.runCommand({find: "view2"}));
- assert.commandWorked(viewsDB.runCommand({create: "view4", viewOn: "collection"}));
- assert.commandWorked(viewsDB.runCommand({collMod: "view2", viewOn: "view4"}));
- assert.commandWorked(viewsDB.runCommand({drop: "view4"}));
- assert.commandWorked(viewsDB.runCommand({listCollections: 1}));
- MongoRunner.stopMongod(conn);
+// Manually remove the invalid view definition from system.views, and then verify that view
+// operations work successfully without requiring a server restart.
+assert.writeOK(viewsDB.system.views.remove({_id: "badView"}));
+assert.commandWorked(viewsDB.runCommand({find: "view2"}));
+assert.commandWorked(viewsDB.runCommand({create: "view4", viewOn: "collection"}));
+assert.commandWorked(viewsDB.runCommand({collMod: "view2", viewOn: "view4"}));
+assert.commandWorked(viewsDB.runCommand({drop: "view4"}));
+assert.commandWorked(viewsDB.runCommand({listCollections: 1}));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/end_sessions_command.js b/jstests/noPassthrough/end_sessions_command.js
index 4999410e953..3f32d95c42f 100644
--- a/jstests/noPassthrough/end_sessions_command.js
+++ b/jstests/noPassthrough/end_sessions_command.js
@@ -1,93 +1,92 @@
(function() {
- "use script";
-
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
-
- var res;
- var refresh = {refreshLogicalSessionCacheNow: 1};
- var startSession = {startSession: 1};
-
- // Start up a standalone server.
- var conn = MongoRunner.runMongod();
- var admin = conn.getDB("admin");
- var config = conn.getDB("config");
-
- // Trigger an initial refresh, as a sanity check.
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
- var sessions = [];
- for (var i = 0; i < 20; i++) {
- res = admin.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
- sessions.push(res);
- }
-
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
- assert.eq(config.system.sessions.count(), 20, "refresh should have written 20 session records");
-
- var endSessionsIds = [];
- for (var i = 0; i < 10; i++) {
- endSessionsIds.push(sessions[i].id);
- }
- res = admin.runCommand({endSessions: endSessionsIds});
- assert.commandWorked(res, "failed to end sessions");
-
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
- assert.eq(config.system.sessions.count(),
- 10,
- "endSessions and refresh should result in 10 remaining sessions");
-
- // double delete the remaining 10
- endSessionsIds = [];
- for (var i = 10; i < 20; i++) {
- endSessionsIds.push(sessions[i].id);
- endSessionsIds.push(sessions[i].id);
- }
-
- res = admin.runCommand({endSessions: endSessionsIds});
- assert.commandWorked(res, "failed to end sessions");
-
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
+"use script";
+
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
+
+var res;
+var refresh = {refreshLogicalSessionCacheNow: 1};
+var startSession = {startSession: 1};
+
+// Start up a standalone server.
+var conn = MongoRunner.runMongod();
+var admin = conn.getDB("admin");
+var config = conn.getDB("config");
+
+// Trigger an initial refresh, as a sanity check.
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+var sessions = [];
+for (var i = 0; i < 20; i++) {
+ res = admin.runCommand(startSession);
+ assert.commandWorked(res, "unable to start session");
+ sessions.push(res);
+}
+
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+assert.eq(config.system.sessions.count(), 20, "refresh should have written 20 session records");
+
+var endSessionsIds = [];
+for (var i = 0; i < 10; i++) {
+ endSessionsIds.push(sessions[i].id);
+}
+res = admin.runCommand({endSessions: endSessionsIds});
+assert.commandWorked(res, "failed to end sessions");
+
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+assert.eq(config.system.sessions.count(),
+ 10,
+ "endSessions and refresh should result in 10 remaining sessions");
+
+// double delete the remaining 10
+endSessionsIds = [];
+for (var i = 10; i < 20; i++) {
+ endSessionsIds.push(sessions[i].id);
+ endSessionsIds.push(sessions[i].id);
+}
+
+res = admin.runCommand({endSessions: endSessionsIds});
+assert.commandWorked(res, "failed to end sessions");
+
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+assert.eq(config.system.sessions.count(),
+ 0,
+ "endSessions and refresh should result in 0 remaining sessions");
+
+// delete some sessions that were never created
+res = admin.runCommand({
+ endSessions: [
+ {"id": UUID("bacb219c-214c-47f9-a94a-6c7f434b3bae")},
+ {"id": UUID("bacb219c-214c-47f9-a94a-6c7f434b3baf")}
+ ]
+});
+
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+// verify that end on the session handle actually ends sessions
+{
+ var session = conn.startSession();
+
+ assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
+ "do something to tickle the session");
+ assert.commandWorked(session.getDatabase("admin").runCommand(refresh), "failed to refresh");
+ assert.eq(config.system.sessions.count(), 1, "usersInfo should have written 1 session record");
+
+ session.endSession();
+ assert.commandWorked(admin.runCommand(refresh), "failed to refresh");
assert.eq(config.system.sessions.count(),
0,
"endSessions and refresh should result in 0 remaining sessions");
+}
- // delete some sessions that were never created
- res = admin.runCommand({
- endSessions: [
- {"id": UUID("bacb219c-214c-47f9-a94a-6c7f434b3bae")},
- {"id": UUID("bacb219c-214c-47f9-a94a-6c7f434b3baf")}
- ]
- });
-
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
- // verify that end on the session handle actually ends sessions
- {
- var session = conn.startSession();
-
- assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
- "do something to tickle the session");
- assert.commandWorked(session.getDatabase("admin").runCommand(refresh), "failed to refresh");
- assert.eq(
- config.system.sessions.count(), 1, "usersInfo should have written 1 session record");
-
- session.endSession();
- assert.commandWorked(admin.runCommand(refresh), "failed to refresh");
- assert.eq(config.system.sessions.count(),
- 0,
- "endSessions and refresh should result in 0 remaining sessions");
- }
-
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/exchange_in_session.js b/jstests/noPassthrough/exchange_in_session.js
index 20261c0c081..b78d45d27be 100644
--- a/jstests/noPassthrough/exchange_in_session.js
+++ b/jstests/noPassthrough/exchange_in_session.js
@@ -5,81 +5,81 @@
* @tags: [requires_sharding, uses_transactions]
*/
(function() {
- // This test manually simulates a session, which is not compatible with implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test manually simulates a session, which is not compatible with implicit sessions.
+TestData.disableImplicitSessions = true;
- // Start a sharded cluster. For this test, we'll just need to talk to the shard directly.
- const st = new ShardingTest({shards: 1, mongos: 1});
+// Start a sharded cluster. For this test, we'll just need to talk to the shard directly.
+const st = new ShardingTest({shards: 1, mongos: 1});
- const adminDB = st.shard0.getDB("admin");
- const session = st.shard0.getDB("test").getMongo().startSession();
- const shardDB = session.getDatabase("test");
- const coll = shardDB.exchange_in_session;
+const adminDB = st.shard0.getDB("admin");
+const session = st.shard0.getDB("test").getMongo().startSession();
+const shardDB = session.getDatabase("test");
+const coll = shardDB.exchange_in_session;
- let bigString = '';
- for (let i = 0; i < 20; i++) {
- bigString += 's';
- }
+let bigString = '';
+for (let i = 0; i < 20; i++) {
+ bigString += 's';
+}
- // Insert some documents.
- const nDocs = 50;
- for (let i = 0; i < nDocs; i++) {
- assert.commandWorked(coll.insert({_id: i, bigString: bigString}));
- }
+// Insert some documents.
+const nDocs = 50;
+for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert({_id: i, bigString: bigString}));
+}
- session.startTransaction();
+session.startTransaction();
- // Set up an Exchange with two cursors.
- let res = assert.commandWorked(shardDB.runCommand({
- aggregate: coll.getName(),
- pipeline: [],
- exchange: {
- policy: 'keyRange',
- consumers: NumberInt(2),
- key: {_id: 1},
- boundaries: [{a: MinKey}, {a: nDocs / 2}, {a: MaxKey}],
- consumerIds: [NumberInt(0), NumberInt(1)],
- bufferSize: NumberInt(128)
- },
- cursor: {batchSize: 0},
- }));
+// Set up an Exchange with two cursors.
+let res = assert.commandWorked(shardDB.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [],
+ exchange: {
+ policy: 'keyRange',
+ consumers: NumberInt(2),
+ key: {_id: 1},
+ boundaries: [{a: MinKey}, {a: nDocs / 2}, {a: MaxKey}],
+ consumerIds: [NumberInt(0), NumberInt(1)],
+ bufferSize: NumberInt(128)
+ },
+ cursor: {batchSize: 0},
+}));
- function spawnShellToIterateCursor(cursorId) {
- let code = `const cursor = ${tojson(cursorId)};`;
- code += `const sessionId = ${tojson(session.getSessionId())};`;
- code += `const collName = "${coll.getName()}";`;
- function iterateCursorWithNoDocs() {
- const getMoreCmd = {
- getMore: cursor.id,
- collection: collName,
- batchSize: 4,
- lsid: sessionId,
- txnNumber: NumberLong(0),
- autocommit: false
- };
+function spawnShellToIterateCursor(cursorId) {
+ let code = `const cursor = ${tojson(cursorId)};`;
+ code += `const sessionId = ${tojson(session.getSessionId())};`;
+ code += `const collName = "${coll.getName()}";`;
+ function iterateCursorWithNoDocs() {
+ const getMoreCmd = {
+ getMore: cursor.id,
+ collection: collName,
+ batchSize: 4,
+ lsid: sessionId,
+ txnNumber: NumberLong(0),
+ autocommit: false
+ };
- let resp = null;
- while (!resp || resp.cursor.id != 0) {
- resp = assert.commandWorked(db.runCommand(getMoreCmd));
- }
+ let resp = null;
+ while (!resp || resp.cursor.id != 0) {
+ resp = assert.commandWorked(db.runCommand(getMoreCmd));
}
- code += `(${iterateCursorWithNoDocs.toString()})();`;
- return startParallelShell(code, st.rs0.getPrimary().port);
}
+ code += `(${iterateCursorWithNoDocs.toString()})();`;
+ return startParallelShell(code, st.rs0.getPrimary().port);
+}
- let parallelShells = [];
- for (let curs of res.cursors) {
- parallelShells.push(spawnShellToIterateCursor(curs.cursor));
- }
+let parallelShells = [];
+for (let curs of res.cursors) {
+ parallelShells.push(spawnShellToIterateCursor(curs.cursor));
+}
- assert.soon(function() {
- for (let waitFn of parallelShells) {
- waitFn();
- }
- return true;
- });
+assert.soon(function() {
+ for (let waitFn of parallelShells) {
+ waitFn();
+ }
+ return true;
+});
- assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(session.abortTransaction_forTesting());
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/exhaust_option_disallowed_in_session.js b/jstests/noPassthrough/exhaust_option_disallowed_in_session.js
index 983a0f90682..1ba1014dc2f 100644
--- a/jstests/noPassthrough/exhaust_option_disallowed_in_session.js
+++ b/jstests/noPassthrough/exhaust_option_disallowed_in_session.js
@@ -2,31 +2,31 @@
* Make sure the 'exhaust' query option is not able to be used in a session.
*/
(function() {
- "use strict";
+"use strict";
- let conn = MongoRunner.runMongod();
+let conn = MongoRunner.runMongod();
- const dbName = 'test';
- const collName = 'coll';
+const dbName = 'test';
+const collName = 'coll';
- const session = conn.startSession();
- const sessionColl = session.getDatabase(dbName).getCollection(collName);
- const testColl = conn.getDB(dbName).getCollection(collName);
+const session = conn.startSession();
+const sessionColl = session.getDatabase(dbName).getCollection(collName);
+const testColl = conn.getDB(dbName).getCollection(collName);
- testColl.drop();
+testColl.drop();
- // Create a collection to query.
- assert.commandWorked(testColl.insert({_id: 1}));
+// Create a collection to query.
+assert.commandWorked(testColl.insert({_id: 1}));
- // Exhaust outside of session should work.
- let docs = testColl.find().addOption(DBQuery.Option.exhaust).toArray();
- assert.docEq([{_id: 1}], docs);
+// Exhaust outside of session should work.
+let docs = testColl.find().addOption(DBQuery.Option.exhaust).toArray();
+assert.docEq([{_id: 1}], docs);
- // Exhaust in session should fail.
- assert.throws(() => {
- sessionColl.find().addOption(DBQuery.Option.exhaust).toArray();
- });
+// Exhaust in session should fail.
+assert.throws(() => {
+ sessionColl.find().addOption(DBQuery.Option.exhaust).toArray();
+});
- session.endSession();
- MongoRunner.stopMongod(conn);
+session.endSession();
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/exit_logging.js b/jstests/noPassthrough/exit_logging.js
index 4e6be5c811f..168f63a4579 100644
--- a/jstests/noPassthrough/exit_logging.js
+++ b/jstests/noPassthrough/exit_logging.js
@@ -5,105 +5,104 @@
(function() {
- function makeShutdownByCrashFn(crashHow) {
- return function(conn) {
- var admin = conn.getDB("admin");
- assert.commandWorked(admin.runCommand(
- {configureFailPoint: "crashOnShutdown", mode: "alwaysOn", data: {how: crashHow}}));
- admin.shutdownServer();
- };
- }
-
- function makeRegExMatchFn(pattern) {
- return function(text) {
- return pattern.test(text);
- };
- }
-
- function testShutdownLogging(launcher, crashFn, matchFn, expectedExitCode) {
- clearRawMongoProgramOutput();
- var conn = launcher.start({});
-
- function checkOutput() {
- var logContents = rawMongoProgramOutput();
- function printLog() {
- // We can't just return a string because it will be well over the max
- // line length.
- // So we just print manually.
- print("================ BEGIN LOG CONTENTS ==================");
- logContents.split(/\n/).forEach((line) => {
- print(line);
- });
- print("================ END LOG CONTENTS =====================");
- return "";
- }
-
- assert(matchFn(logContents), printLog);
+function makeShutdownByCrashFn(crashHow) {
+ return function(conn) {
+ var admin = conn.getDB("admin");
+ assert.commandWorked(admin.runCommand(
+ {configureFailPoint: "crashOnShutdown", mode: "alwaysOn", data: {how: crashHow}}));
+ admin.shutdownServer();
+ };
+}
+
+function makeRegExMatchFn(pattern) {
+ return function(text) {
+ return pattern.test(text);
+ };
+}
+
+function testShutdownLogging(launcher, crashFn, matchFn, expectedExitCode) {
+ clearRawMongoProgramOutput();
+ var conn = launcher.start({});
+
+ function checkOutput() {
+ var logContents = rawMongoProgramOutput();
+ function printLog() {
+ // We can't just return a string because it will be well over the max
+ // line length.
+ // So we just print manually.
+ print("================ BEGIN LOG CONTENTS ==================");
+ logContents.split(/\n/).forEach((line) => {
+ print(line);
+ });
+ print("================ END LOG CONTENTS =====================");
+ return "";
}
- crashFn(conn);
- launcher.stop(conn, undefined, {allowedExitCode: expectedExitCode});
- checkOutput();
- }
-
- function runAllTests(launcher) {
- const SIGSEGV = 11;
- const SIGABRT = 6;
- testShutdownLogging(launcher, function(conn) {
- conn.getDB('admin').shutdownServer();
- }, makeRegExMatchFn(/shutdown command received/), MongoRunner.EXIT_CLEAN);
-
- testShutdownLogging(launcher,
- makeShutdownByCrashFn('fault'),
- makeRegExMatchFn(/Invalid access at address[\s\S]*printStackTrace/),
- -SIGSEGV);
-
- testShutdownLogging(launcher,
- makeShutdownByCrashFn('abort'),
- makeRegExMatchFn(/Got signal[\s\S]*printStackTrace/),
- -SIGABRT);
- }
-
- if (_isWindows()) {
- print("SKIPPING TEST ON WINDOWS");
- return;
+ assert(matchFn(logContents), printLog);
}
- if (_isAddressSanitizerActive()) {
- print("SKIPPING TEST ON ADDRESS SANITIZER BUILD");
- return;
- }
-
- (function testMongod() {
- print("********************\nTesting exit logging in mongod\n********************");
-
- runAllTests({
- start: function(opts) {
- var actualOpts = {nojournal: ""};
- Object.extend(actualOpts, opts);
- return MongoRunner.runMongod(actualOpts);
- },
-
- stop: MongoRunner.stopMongod
- });
- }());
-
- (function testMongos() {
- print("********************\nTesting exit logging in mongos\n********************");
+ crashFn(conn);
+ launcher.stop(conn, undefined, {allowedExitCode: expectedExitCode});
+ checkOutput();
+}
+
+function runAllTests(launcher) {
+ const SIGSEGV = 11;
+ const SIGABRT = 6;
+ testShutdownLogging(launcher, function(conn) {
+ conn.getDB('admin').shutdownServer();
+ }, makeRegExMatchFn(/shutdown command received/), MongoRunner.EXIT_CLEAN);
+
+ testShutdownLogging(launcher,
+ makeShutdownByCrashFn('fault'),
+ makeRegExMatchFn(/Invalid access at address[\s\S]*printStackTrace/),
+ -SIGSEGV);
+
+ testShutdownLogging(launcher,
+ makeShutdownByCrashFn('abort'),
+ makeRegExMatchFn(/Got signal[\s\S]*printStackTrace/),
+ -SIGABRT);
+}
+
+if (_isWindows()) {
+ print("SKIPPING TEST ON WINDOWS");
+ return;
+}
+
+if (_isAddressSanitizerActive()) {
+ print("SKIPPING TEST ON ADDRESS SANITIZER BUILD");
+ return;
+}
+
+(function testMongod() {
+ print("********************\nTesting exit logging in mongod\n********************");
+
+ runAllTests({
+ start: function(opts) {
+ var actualOpts = {nojournal: ""};
+ Object.extend(actualOpts, opts);
+ return MongoRunner.runMongod(actualOpts);
+ },
+
+ stop: MongoRunner.stopMongod
+ });
+}());
- var st = new ShardingTest({shards: 1});
- var mongosLauncher = {
- start: function(opts) {
- var actualOpts = {configdb: st._configDB};
- Object.extend(actualOpts, opts);
- return MongoRunner.runMongos(actualOpts);
- },
+(function testMongos() {
+ print("********************\nTesting exit logging in mongos\n********************");
- stop: MongoRunner.stopMongos
- };
+ var st = new ShardingTest({shards: 1});
+ var mongosLauncher = {
+ start: function(opts) {
+ var actualOpts = {configdb: st._configDB};
+ Object.extend(actualOpts, opts);
+ return MongoRunner.runMongos(actualOpts);
+ },
- runAllTests(mongosLauncher);
- st.stop();
- }());
+ stop: MongoRunner.stopMongos
+ };
+ runAllTests(mongosLauncher);
+ st.stop();
+}());
}());
diff --git a/jstests/noPassthrough/failcommand_failpoint_not_parallel.js b/jstests/noPassthrough/failcommand_failpoint_not_parallel.js
index 2241dbe5d89..a9d776d0079 100644
--- a/jstests/noPassthrough/failcommand_failpoint_not_parallel.js
+++ b/jstests/noPassthrough/failcommand_failpoint_not_parallel.js
@@ -1,24 +1,23 @@
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn);
- const db = conn.getDB("test_failcommand_noparallel");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn);
+const db = conn.getDB("test_failcommand_noparallel");
- // Test times when closing connection.
- assert.commandWorked(db.adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 2},
- data: {
- closeConnection: true,
- failCommands: ["find"],
- }
- }));
- assert.throws(() => db.runCommand({find: "c"}));
- assert.throws(() => db.runCommand({find: "c"}));
- assert.commandWorked(db.runCommand({find: "c"}));
- assert.commandWorked(db.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
-
- MongoRunner.stopMongod(conn);
+// Test times when closing connection.
+assert.commandWorked(db.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 2},
+ data: {
+ closeConnection: true,
+ failCommands: ["find"],
+ }
+}));
+assert.throws(() => db.runCommand({find: "c"}));
+assert.throws(() => db.runCommand({find: "c"}));
+assert.commandWorked(db.runCommand({find: "c"}));
+assert.commandWorked(db.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/feature_compatibility_version.js b/jstests/noPassthrough/feature_compatibility_version.js
index 64cd6a3a5f7..4f7cd42f450 100644
--- a/jstests/noPassthrough/feature_compatibility_version.js
+++ b/jstests/noPassthrough/feature_compatibility_version.js
@@ -2,55 +2,55 @@
// the value of the featureCompatibilityVersion server parameter.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/feature_compatibility_version.js");
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
- let adminDB = conn.getDB("admin");
+let adminDB = conn.getDB("admin");
- // Initially the featureCompatibilityVersion is latestFCV.
- checkFCV(adminDB, latestFCV);
+// Initially the featureCompatibilityVersion is latestFCV.
+checkFCV(adminDB, latestFCV);
- // Updating the featureCompatibilityVersion document changes the featureCompatibilityVersion
- // server parameter.
- assert.writeOK(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
- {$set: {version: lastStableFCV}}));
- checkFCV(adminDB, lastStableFCV);
+// Updating the featureCompatibilityVersion document changes the featureCompatibilityVersion
+// server parameter.
+assert.writeOK(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {version: lastStableFCV}}));
+checkFCV(adminDB, lastStableFCV);
- assert.writeOK(
- adminDB.system.version.update({_id: "featureCompatibilityVersion"},
- {$set: {version: lastStableFCV, targetVersion: latestFCV}}));
- checkFCV(adminDB, lastStableFCV, latestFCV);
+assert.writeOK(
+ adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {version: lastStableFCV, targetVersion: latestFCV}}));
+checkFCV(adminDB, lastStableFCV, latestFCV);
- assert.writeOK(adminDB.system.version.update(
- {_id: "featureCompatibilityVersion"},
- {$set: {version: lastStableFCV, targetVersion: lastStableFCV}}));
- checkFCV(adminDB, lastStableFCV, lastStableFCV);
+assert.writeOK(
+ adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {version: lastStableFCV, targetVersion: lastStableFCV}}));
+checkFCV(adminDB, lastStableFCV, lastStableFCV);
- assert.writeOK(
- adminDB.system.version.update({_id: "featureCompatibilityVersion"},
- {$set: {version: latestFCV}, $unset: {targetVersion: true}}));
- checkFCV(adminDB, latestFCV);
+assert.writeOK(
+ adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {version: latestFCV}, $unset: {targetVersion: true}}));
+checkFCV(adminDB, latestFCV);
- // Updating the featureCompatibilityVersion document with an invalid version fails.
- assert.writeErrorWithCode(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
- {$set: {version: "3.2"}}),
- ErrorCodes.BadValue);
- checkFCV(adminDB, latestFCV);
+// Updating the featureCompatibilityVersion document with an invalid version fails.
+assert.writeErrorWithCode(
+ adminDB.system.version.update({_id: "featureCompatibilityVersion"}, {$set: {version: "3.2"}}),
+ ErrorCodes.BadValue);
+checkFCV(adminDB, latestFCV);
- // Updating the featureCompatibilityVersion document with an invalid targetVersion fails.
- assert.writeErrorWithCode(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
- {$set: {targetVersion: lastStableFCV}}),
- ErrorCodes.BadValue);
- checkFCV(adminDB, latestFCV);
+// Updating the featureCompatibilityVersion document with an invalid targetVersion fails.
+assert.writeErrorWithCode(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {targetVersion: lastStableFCV}}),
+ ErrorCodes.BadValue);
+checkFCV(adminDB, latestFCV);
- assert.writeErrorWithCode(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
- {$set: {targetVersion: latestFCV}}),
- ErrorCodes.BadValue);
- checkFCV(adminDB, latestFCV);
+assert.writeErrorWithCode(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {targetVersion: latestFCV}}),
+ ErrorCodes.BadValue);
+checkFCV(adminDB, latestFCV);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/filemd5_kill_during_yield.js b/jstests/noPassthrough/filemd5_kill_during_yield.js
index 250b6f23696..e2f74bcb1ce 100644
--- a/jstests/noPassthrough/filemd5_kill_during_yield.js
+++ b/jstests/noPassthrough/filemd5_kill_during_yield.js
@@ -2,47 +2,47 @@
// up the PlanExecutor without crashing the server. This test was designed to reproduce
// SERVER-35361.
(function() {
- "use strict";
-
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn);
- const db = conn.getDB("test");
- db.fs.chunks.drop();
- assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "64string")}));
- assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 1, data: new BinData(0, "test")}));
- db.fs.chunks.ensureIndex({files_id: 1, n: 1});
-
- const kFailPointName = "waitInFilemd5DuringManualYield";
- assert.commandWorked(db.adminCommand({configureFailPoint: kFailPointName, mode: "alwaysOn"}));
-
- const failingMD5Shell =
- startParallelShell(() => assert.commandFailedWithCode(
- db.runCommand({filemd5: 1, root: "fs"}), ErrorCodes.Interrupted),
- conn.port);
-
- // Wait for filemd5 to manually yield and hang.
- let opId;
- assert.soon(
- () => {
- const filter = {ns: "test.fs.chunks", "command.filemd5": 1, msg: kFailPointName};
- const result =
- db.getSiblingDB("admin").aggregate([{$currentOp: {}}, {$match: filter}]).toArray();
-
- if (result.length === 1) {
- opId = result[0].opid;
-
- return true;
- }
-
- return false;
- },
- () => "Failed to find operation in currentOp() output: " +
- tojson(db.currentOp({"ns": coll.getFullName()})));
-
- // Kill the operation, then disable the failpoint so the command recognizes it's been killed.
- assert.commandWorked(db.killOp(opId));
- assert.commandWorked(db.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
-
- failingMD5Shell();
- MongoRunner.stopMongod(conn);
+"use strict";
+
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn);
+const db = conn.getDB("test");
+db.fs.chunks.drop();
+assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "64string")}));
+assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 1, data: new BinData(0, "test")}));
+db.fs.chunks.ensureIndex({files_id: 1, n: 1});
+
+const kFailPointName = "waitInFilemd5DuringManualYield";
+assert.commandWorked(db.adminCommand({configureFailPoint: kFailPointName, mode: "alwaysOn"}));
+
+const failingMD5Shell =
+ startParallelShell(() => assert.commandFailedWithCode(db.runCommand({filemd5: 1, root: "fs"}),
+ ErrorCodes.Interrupted),
+ conn.port);
+
+// Wait for filemd5 to manually yield and hang.
+let opId;
+assert.soon(
+ () => {
+ const filter = {ns: "test.fs.chunks", "command.filemd5": 1, msg: kFailPointName};
+ const result =
+ db.getSiblingDB("admin").aggregate([{$currentOp: {}}, {$match: filter}]).toArray();
+
+ if (result.length === 1) {
+ opId = result[0].opid;
+
+ return true;
+ }
+
+ return false;
+ },
+ () => "Failed to find operation in currentOp() output: " +
+ tojson(db.currentOp({"ns": coll.getFullName()})));
+
+// Kill the operation, then disable the failpoint so the command recognizes it's been killed.
+assert.commandWorked(db.killOp(opId));
+assert.commandWorked(db.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
+
+failingMD5Shell();
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/find_by_uuid_and_rename.js b/jstests/noPassthrough/find_by_uuid_and_rename.js
index 206d9a0cbbd..6bdde57bd80 100644
--- a/jstests/noPassthrough/find_by_uuid_and_rename.js
+++ b/jstests/noPassthrough/find_by_uuid_and_rename.js
@@ -3,60 +3,59 @@
//
(function() {
- "use strict";
- const dbName = "do_concurrent_rename";
- const collName = "collA";
- const otherName = "collB";
- const repeatFind = 100;
- load("jstests/noPassthrough/libs/concurrent_rename.js");
- load("jstests/libs/parallel_shell_helpers.js");
-
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
- jsTestLog("Create collection.");
- let findRenameDB = conn.getDB(dbName);
- findRenameDB.dropDatabase();
- assert.commandWorked(findRenameDB.runCommand({"create": collName}));
- assert.commandWorked(
- findRenameDB.runCommand({insert: collName, documents: [{fooField: 'FOO'}]}));
-
- let infos = findRenameDB.getCollectionInfos();
- let uuid = infos[0].info.uuid;
- const findCmd = {"find": uuid};
-
- // Assert 'find' command by UUID works.
- assert.commandWorked(findRenameDB.runCommand(findCmd));
-
- jsTestLog("Start parallel shell for renames.");
- let renameShell =
- startParallelShell(funWithArgs(doRenames, dbName, collName, otherName), conn.port);
-
- // Wait until we receive confirmation that the parallel shell has started.
- assert.soon(() => conn.getDB("test").await_data.findOne({_id: "signal parent shell"}) !== null,
- "Expected parallel shell to insert a document.");
-
- jsTestLog("Start 'find' commands.");
- while (conn.getDB("test").await_data.findOne({_id: "rename has ended"}) == null) {
- for (let i = 0; i < repeatFind; i++) {
- let res = findRenameDB.runCommand(findCmd);
-
- // This is an acceptable transient error until SERVER-31695 has been completed.
- if (res.code === ErrorCodes.QueryPlanKilled) {
- print("Ignoring transient QueryPlanKilled error: " + res.errmsg);
- continue;
- }
- assert.commandWorked(res, "could not run " + tojson(findCmd));
- let cursor = new DBCommandCursor(findRenameDB, res);
- let errMsg = "expected more data from command " + tojson(findCmd) + ", with result " +
- tojson(res);
- assert(cursor.hasNext(), errMsg);
- let doc = cursor.next();
- assert.eq(doc.fooField, "FOO");
- assert(!cursor.hasNext(),
- "expected to have exhausted cursor for results " + tojson(res));
+"use strict";
+const dbName = "do_concurrent_rename";
+const collName = "collA";
+const otherName = "collB";
+const repeatFind = 100;
+load("jstests/noPassthrough/libs/concurrent_rename.js");
+load("jstests/libs/parallel_shell_helpers.js");
+
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
+jsTestLog("Create collection.");
+let findRenameDB = conn.getDB(dbName);
+findRenameDB.dropDatabase();
+assert.commandWorked(findRenameDB.runCommand({"create": collName}));
+assert.commandWorked(findRenameDB.runCommand({insert: collName, documents: [{fooField: 'FOO'}]}));
+
+let infos = findRenameDB.getCollectionInfos();
+let uuid = infos[0].info.uuid;
+const findCmd = {
+ "find": uuid
+};
+
+// Assert 'find' command by UUID works.
+assert.commandWorked(findRenameDB.runCommand(findCmd));
+
+jsTestLog("Start parallel shell for renames.");
+let renameShell =
+ startParallelShell(funWithArgs(doRenames, dbName, collName, otherName), conn.port);
+
+// Wait until we receive confirmation that the parallel shell has started.
+assert.soon(() => conn.getDB("test").await_data.findOne({_id: "signal parent shell"}) !== null,
+ "Expected parallel shell to insert a document.");
+
+jsTestLog("Start 'find' commands.");
+while (conn.getDB("test").await_data.findOne({_id: "rename has ended"}) == null) {
+ for (let i = 0; i < repeatFind; i++) {
+ let res = findRenameDB.runCommand(findCmd);
+
+ // This is an acceptable transient error until SERVER-31695 has been completed.
+ if (res.code === ErrorCodes.QueryPlanKilled) {
+ print("Ignoring transient QueryPlanKilled error: " + res.errmsg);
+ continue;
}
+ assert.commandWorked(res, "could not run " + tojson(findCmd));
+ let cursor = new DBCommandCursor(findRenameDB, res);
+ let errMsg =
+ "expected more data from command " + tojson(findCmd) + ", with result " + tojson(res);
+ assert(cursor.hasNext(), errMsg);
+ let doc = cursor.next();
+ assert.eq(doc.fooField, "FOO");
+ assert(!cursor.hasNext(), "expected to have exhausted cursor for results " + tojson(res));
}
- renameShell();
- MongoRunner.stopMongod(conn);
-
+}
+renameShell();
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/flow_control_logging.js b/jstests/noPassthrough/flow_control_logging.js
index 0d4744b4559..bd3478aa7dd 100644
--- a/jstests/noPassthrough/flow_control_logging.js
+++ b/jstests/noPassthrough/flow_control_logging.js
@@ -8,51 +8,47 @@
* ]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
+load("jstests/libs/check_log.js");
- const replSet = new ReplSetTest({name: "flow_control_logging", nodes: 3});
- replSet.startSet({
- setParameter: {
- flowControlSamplePeriod:
- 1, // Increase resolution to detect lag in a light write workload.
- flowControlWarnThresholdSeconds: 1,
- // Configure flow control to engage after one second of lag.
- flowControlTargetLagSeconds: 1,
- flowControlThresholdLagPercentage: 1,
- // Use a speedy no-op writer to avoid needing a robust background writer.
- writePeriodicNoops: true,
- periodicNoopIntervalSecs:
- 2 // replSet.initiate() can hang with a one second interval for reasons.
- }
- });
- replSet.initiate();
-
- // Stop replication which will pin the commit point.
- for (let sec of replSet.getSecondaries()) {
- assert.commandWorked(sec.adminCommand({
- configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries",
- mode: "alwaysOn"
- }));
+const replSet = new ReplSetTest({name: "flow_control_logging", nodes: 3});
+replSet.startSet({
+ setParameter: {
+ flowControlSamplePeriod: 1, // Increase resolution to detect lag in a light write workload.
+ flowControlWarnThresholdSeconds: 1,
+ // Configure flow control to engage after one second of lag.
+ flowControlTargetLagSeconds: 1,
+ flowControlThresholdLagPercentage: 1,
+ // Use a speedy no-op writer to avoid needing a robust background writer.
+ writePeriodicNoops: true,
+ periodicNoopIntervalSecs:
+ 2 // replSet.initiate() can hang with a one second interval for reasons.
}
+});
+replSet.initiate();
- const timeoutMilliseconds = 30 * 1000;
- // The test has stopped replication and the primary's no-op writer is configured to create an
- // oplog entry every other second. Once the primary notices the sustainer rate is not moving, it
- // should start logging a warning once per second. This check waits for two log messages to make
- // sure the appropriate state variables are being reset.
- checkLog.containsWithAtLeastCount(
- replSet.getPrimary(),
- "Flow control is engaged and the sustainer point is not moving.",
- 2,
- timeoutMilliseconds);
+// Stop replication which will pin the commit point.
+for (let sec of replSet.getSecondaries()) {
+ assert.commandWorked(sec.adminCommand(
+ {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "alwaysOn"}));
+}
- // Restart replication so the replica set will shut down.
- for (let sec of replSet.getSecondaries()) {
- assert.commandWorked(sec.adminCommand(
- {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "off"}));
- }
+const timeoutMilliseconds = 30 * 1000;
+// The test has stopped replication and the primary's no-op writer is configured to create an
+// oplog entry every other second. Once the primary notices the sustainer rate is not moving, it
+// should start logging a warning once per second. This check waits for two log messages to make
+// sure the appropriate state variables are being reset.
+checkLog.containsWithAtLeastCount(replSet.getPrimary(),
+ "Flow control is engaged and the sustainer point is not moving.",
+ 2,
+ timeoutMilliseconds);
+
+// Restart replication so the replica set will shut down.
+for (let sec of replSet.getSecondaries()) {
+ assert.commandWorked(sec.adminCommand(
+ {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "off"}));
+}
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/flow_control_replica_set.js b/jstests/noPassthrough/flow_control_replica_set.js
index 43fa022b284..025c04e4e5b 100644
--- a/jstests/noPassthrough/flow_control_replica_set.js
+++ b/jstests/noPassthrough/flow_control_replica_set.js
@@ -12,55 +12,55 @@
* ]
*/
(function() {
- "use strict";
+"use strict";
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- const primary = replTest.getPrimary();
+const primary = replTest.getPrimary();
- assert.commandWorked(primary.adminCommand({
- configureFailPoint: "flowControlTicketOverride",
- mode: "alwaysOn",
- data: {"numTickets": 1000 * 1000 * 1000}
- }));
- // Sleep 2 seconds for the failpoint to take effect.
- sleep(2000);
+assert.commandWorked(primary.adminCommand({
+ configureFailPoint: "flowControlTicketOverride",
+ mode: "alwaysOn",
+ data: {"numTickets": 1000 * 1000 * 1000}
+}));
+// Sleep 2 seconds for the failpoint to take effect.
+sleep(2000);
- let result = benchRun({
- host: primary.host,
- seconds: 5,
- parallel: 5,
- ops: [{op: "insert", ns: "foo.bar", doc: {field: "value"}}]
- });
- jsTestLog({CalibratingRun: result});
+let result = benchRun({
+ host: primary.host,
+ seconds: 5,
+ parallel: 5,
+ ops: [{op: "insert", ns: "foo.bar", doc: {field: "value"}}]
+});
+jsTestLog({CalibratingRun: result});
- let insertRate = result["insert"];
- let throttledRate = insertRate / 2;
- assert.commandWorked(primary.adminCommand({
- configureFailPoint: "flowControlTicketOverride",
- mode: "alwaysOn",
- data: {"numTickets": NumberInt(throttledRate)}
- }));
- // Sleep 2 seconds for the failpoint to take effect.
- sleep(2000);
+let insertRate = result["insert"];
+let throttledRate = insertRate / 2;
+assert.commandWorked(primary.adminCommand({
+ configureFailPoint: "flowControlTicketOverride",
+ mode: "alwaysOn",
+ data: {"numTickets": NumberInt(throttledRate)}
+}));
+// Sleep 2 seconds for the failpoint to take effect.
+sleep(2000);
- result = benchRun({
- host: primary.host,
- seconds: 5,
- parallel: 5,
- ops: [{op: "insert", ns: "foo.bar", doc: {field: "value"}}]
- });
- jsTestLog({ThrottledRun: result, ThrottedRate: throttledRate});
- let maxAllowedRate = 1.5 * throttledRate;
- let minAllowedRate = 0.5 * throttledRate;
- assert.gt(result["insert"], minAllowedRate);
- assert.lt(result["insert"], maxAllowedRate);
+result = benchRun({
+ host: primary.host,
+ seconds: 5,
+ parallel: 5,
+ ops: [{op: "insert", ns: "foo.bar", doc: {field: "value"}}]
+});
+jsTestLog({ThrottledRun: result, ThrottedRate: throttledRate});
+let maxAllowedRate = 1.5 * throttledRate;
+let minAllowedRate = 0.5 * throttledRate;
+assert.gt(result["insert"], minAllowedRate);
+assert.lt(result["insert"], maxAllowedRate);
- // Cautiously unset to avoid any interaction with shutdown.
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "flowControlTicketOverride", mode: "off"}));
+// Cautiously unset to avoid any interaction with shutdown.
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "flowControlTicketOverride", mode: "off"}));
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/noPassthrough/ftdc_connection_pool.js b/jstests/noPassthrough/ftdc_connection_pool.js
index f1fb7336aa0..cd284fed4a5 100644
--- a/jstests/noPassthrough/ftdc_connection_pool.js
+++ b/jstests/noPassthrough/ftdc_connection_pool.js
@@ -7,30 +7,30 @@
load('jstests/libs/ftdc.js');
(function() {
- 'use strict';
- const testPath = MongoRunner.toRealPath('ftdc_dir');
- const st = new ShardingTest({
- shards: 2,
- mongos: {
- s0: {setParameter: {diagnosticDataCollectionDirectoryPath: testPath}},
- }
- });
+'use strict';
+const testPath = MongoRunner.toRealPath('ftdc_dir');
+const st = new ShardingTest({
+ shards: 2,
+ mongos: {
+ s0: {setParameter: {diagnosticDataCollectionDirectoryPath: testPath}},
+ }
+});
- const admin = st.s0.getDB('admin');
- const stats = verifyGetDiagnosticData(admin).connPoolStats;
- jsTestLog(`Diagnostic connection pool stats: ${tojson(stats)}`);
+const admin = st.s0.getDB('admin');
+const stats = verifyGetDiagnosticData(admin).connPoolStats;
+jsTestLog(`Diagnostic connection pool stats: ${tojson(stats)}`);
- assert(stats.hasOwnProperty('totalInUse'));
- assert(stats.hasOwnProperty('totalAvailable'));
- assert(stats.hasOwnProperty('totalCreated'));
- assert(stats.hasOwnProperty('totalRefreshing'));
+assert(stats.hasOwnProperty('totalInUse'));
+assert(stats.hasOwnProperty('totalAvailable'));
+assert(stats.hasOwnProperty('totalCreated'));
+assert(stats.hasOwnProperty('totalRefreshing'));
- // The connPoolStats command reply has "hosts", but FTDC's stats do not.
- assert(!stats.hasOwnProperty('hosts'));
+// The connPoolStats command reply has "hosts", but FTDC's stats do not.
+assert(!stats.hasOwnProperty('hosts'));
- // Check a few properties, without attempting to be thorough.
- assert(stats.connectionsInUsePerPool.hasOwnProperty('NetworkInterfaceTL-ShardRegistry'));
- assert(stats.replicaSetPingTimesMillis.hasOwnProperty(st.configRS.name));
+// Check a few properties, without attempting to be thorough.
+assert(stats.connectionsInUsePerPool.hasOwnProperty('NetworkInterfaceTL-ShardRegistry'));
+assert(stats.replicaSetPingTimesMillis.hasOwnProperty(st.configRS.name));
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/ftdc_setdirectory.js b/jstests/noPassthrough/ftdc_setdirectory.js
index 33877883fa3..caace9a9343 100644
--- a/jstests/noPassthrough/ftdc_setdirectory.js
+++ b/jstests/noPassthrough/ftdc_setdirectory.js
@@ -5,140 +5,136 @@
load('jstests/libs/ftdc.js');
(function() {
- 'use strict';
- let testPath1 = MongoRunner.toRealPath('ftdc_setdir1');
- let testPath2 = MongoRunner.toRealPath('ftdc_setdir2');
- let testPath3 = MongoRunner.toRealPath('ftdc_setdir3');
- // SERVER-30394: Use a directory relative to the current working directory.
- let testPath4 = 'ftdc_setdir4/';
- let testLog3 = testPath3 + "mongos_ftdc.log";
- let testLog4 = testPath4 + "mongos_ftdc.log";
-
- // Make the log file directory for mongos.
- mkdir(testPath3);
- mkdir(testPath4);
-
- // Startup 3 mongos:
- // 1. Normal MongoS with no log file to verify FTDC can be startup at runtime with a path.
- // 2. MongoS with explict diagnosticDataCollectionDirectoryPath setParameter at startup.
- // 3. MongoS with log file to verify automatic FTDC path computation works.
- let st = new ShardingTest({
- shards: 1,
- mongos: {
- s0: {verbose: 0},
- s1: {setParameter: {diagnosticDataCollectionDirectoryPath: testPath2}},
- s2: {logpath: testLog3},
- s3: {logpath: testLog4}
- }
- });
-
- let admin1 = st.s0.getDB('admin');
- let admin2 = st.s1.getDB('admin');
- let admin3 = st.s2.getDB('admin');
- let admin4 = st.s3.getDB('admin');
-
- function setParam(admin, obj) {
- var ret = admin.runCommand(Object.extend({setParameter: 1}, obj));
- return ret;
+'use strict';
+let testPath1 = MongoRunner.toRealPath('ftdc_setdir1');
+let testPath2 = MongoRunner.toRealPath('ftdc_setdir2');
+let testPath3 = MongoRunner.toRealPath('ftdc_setdir3');
+// SERVER-30394: Use a directory relative to the current working directory.
+let testPath4 = 'ftdc_setdir4/';
+let testLog3 = testPath3 + "mongos_ftdc.log";
+let testLog4 = testPath4 + "mongos_ftdc.log";
+
+// Make the log file directory for mongos.
+mkdir(testPath3);
+mkdir(testPath4);
+
+// Startup 3 mongos:
+// 1. Normal MongoS with no log file to verify FTDC can be startup at runtime with a path.
+// 2. MongoS with explict diagnosticDataCollectionDirectoryPath setParameter at startup.
+// 3. MongoS with log file to verify automatic FTDC path computation works.
+let st = new ShardingTest({
+ shards: 1,
+ mongos: {
+ s0: {verbose: 0},
+ s1: {setParameter: {diagnosticDataCollectionDirectoryPath: testPath2}},
+ s2: {logpath: testLog3},
+ s3: {logpath: testLog4}
}
+});
- function getParam(admin, field) {
- var q = {getParameter: 1};
- q[field] = 1;
+let admin1 = st.s0.getDB('admin');
+let admin2 = st.s1.getDB('admin');
+let admin3 = st.s2.getDB('admin');
+let admin4 = st.s3.getDB('admin');
- var ret = admin.runCommand(q);
- assert.commandWorked(ret);
- return ret[field];
- }
+function setParam(admin, obj) {
+ var ret = admin.runCommand(Object.extend({setParameter: 1}, obj));
+ return ret;
+}
- // Verify FTDC can be started at runtime.
- function verifyFTDCDisabledOnStartup() {
- jsTestLog("Running verifyFTDCDisabledOnStartup");
- verifyCommonFTDCParameters(admin1, false);
+function getParam(admin, field) {
+ var q = {getParameter: 1};
+ q[field] = 1;
- // 1. Try to enable and fail
- assert.commandFailed(setParam(admin1, {"diagnosticDataCollectionEnabled": 1}));
+ var ret = admin.runCommand(q);
+ assert.commandWorked(ret);
+ return ret[field];
+}
- // 2. Set path and succeed
- assert.commandWorked(
- setParam(admin1, {"diagnosticDataCollectionDirectoryPath": testPath1}));
+// Verify FTDC can be started at runtime.
+function verifyFTDCDisabledOnStartup() {
+ jsTestLog("Running verifyFTDCDisabledOnStartup");
+ verifyCommonFTDCParameters(admin1, false);
- // 3. Set path again and fail
- assert.commandFailed(
- setParam(admin1, {"diagnosticDataCollectionDirectoryPath": testPath1}));
+ // 1. Try to enable and fail
+ assert.commandFailed(setParam(admin1, {"diagnosticDataCollectionEnabled": 1}));
- // 4. Enable successfully
- assert.commandWorked(setParam(admin1, {"diagnosticDataCollectionEnabled": 1}));
+ // 2. Set path and succeed
+ assert.commandWorked(setParam(admin1, {"diagnosticDataCollectionDirectoryPath": testPath1}));
- // 5. Validate getDiagnosticData returns FTDC data now
- jsTestLog("Verifying FTDC getDiagnosticData");
- verifyGetDiagnosticData(admin1);
- }
+ // 3. Set path again and fail
+ assert.commandFailed(setParam(admin1, {"diagnosticDataCollectionDirectoryPath": testPath1}));
- // Verify FTDC is already running if there was a path set at startup.
- function verifyFTDCStartsWithPath() {
- jsTestLog("Running verifyFTDCStartsWithPath");
- verifyCommonFTDCParameters(admin2, true);
+ // 4. Enable successfully
+ assert.commandWorked(setParam(admin1, {"diagnosticDataCollectionEnabled": 1}));
- // 1. Set path fail
- assert.commandFailed(
- setParam(admin2, {"diagnosticDataCollectionDirectoryPath": testPath2}));
+ // 5. Validate getDiagnosticData returns FTDC data now
+ jsTestLog("Verifying FTDC getDiagnosticData");
+ verifyGetDiagnosticData(admin1);
+}
- // 2. Enable successfully
- assert.commandWorked(setParam(admin2, {"diagnosticDataCollectionEnabled": 1}));
+// Verify FTDC is already running if there was a path set at startup.
+function verifyFTDCStartsWithPath() {
+ jsTestLog("Running verifyFTDCStartsWithPath");
+ verifyCommonFTDCParameters(admin2, true);
- // 3. Validate getDiagnosticData returns FTDC data now
- jsTestLog("Verifying FTDC getDiagnosticData");
- verifyGetDiagnosticData(admin2);
- }
+ // 1. Set path fail
+ assert.commandFailed(setParam(admin2, {"diagnosticDataCollectionDirectoryPath": testPath2}));
- function normpath(path) {
- // On Windows, strip the drive path because MongoRunner.toRealPath() returns a Unix Path
- // while FTDC returns a Windows path.
- return path.replace(/\\/g, "/").replace(/\w:/, "");
- }
+ // 2. Enable successfully
+ assert.commandWorked(setParam(admin2, {"diagnosticDataCollectionEnabled": 1}));
- // Verify FTDC is already running if there was a path set at startup.
- function verifyFTDCStartsWithLogFile() {
- jsTestLog("Running verifyFTDCStartsWithLogFile");
- verifyCommonFTDCParameters(admin3, true);
+ // 3. Validate getDiagnosticData returns FTDC data now
+ jsTestLog("Verifying FTDC getDiagnosticData");
+ verifyGetDiagnosticData(admin2);
+}
- // 1. Verify that path is computed correctly.
- let computedPath = getParam(admin3, "diagnosticDataCollectionDirectoryPath");
- assert.eq(normpath(computedPath), normpath(testPath3 + "mongos_ftdc.diagnostic.data"));
+function normpath(path) {
+ // On Windows, strip the drive path because MongoRunner.toRealPath() returns a Unix Path
+ // while FTDC returns a Windows path.
+ return path.replace(/\\/g, "/").replace(/\w:/, "");
+}
- // 2. Set path fail
- assert.commandFailed(
- setParam(admin3, {"diagnosticDataCollectionDirectoryPath": testPath3}));
+// Verify FTDC is already running if there was a path set at startup.
+function verifyFTDCStartsWithLogFile() {
+ jsTestLog("Running verifyFTDCStartsWithLogFile");
+ verifyCommonFTDCParameters(admin3, true);
- // 3. Enable successfully
- assert.commandWorked(setParam(admin3, {"diagnosticDataCollectionEnabled": 1}));
+ // 1. Verify that path is computed correctly.
+ let computedPath = getParam(admin3, "diagnosticDataCollectionDirectoryPath");
+ assert.eq(normpath(computedPath), normpath(testPath3 + "mongos_ftdc.diagnostic.data"));
- // 4. Validate getDiagnosticData returns FTDC data now
- jsTestLog("Verifying FTDC getDiagnosticData");
- verifyGetDiagnosticData(admin3);
- }
+ // 2. Set path fail
+ assert.commandFailed(setParam(admin3, {"diagnosticDataCollectionDirectoryPath": testPath3}));
- // Verify FTDC is already running if there is a relative log file path.
- function verifyFTDCStartsWithRelativeLogFile() {
- jsTestLog("Running verifyFTDCStartsWithRelativeLogFile");
- verifyCommonFTDCParameters(admin4, true);
+ // 3. Enable successfully
+ assert.commandWorked(setParam(admin3, {"diagnosticDataCollectionEnabled": 1}));
- // Skip verification of diagnosticDataCollectionDirectoryPath because it relies on comparing
- // cwd vs dbPath.
+ // 4. Validate getDiagnosticData returns FTDC data now
+ jsTestLog("Verifying FTDC getDiagnosticData");
+ verifyGetDiagnosticData(admin3);
+}
- // 1. Enable successfully
- assert.commandWorked(setParam(admin4, {"diagnosticDataCollectionEnabled": 1}));
+// Verify FTDC is already running if there is a relative log file path.
+function verifyFTDCStartsWithRelativeLogFile() {
+ jsTestLog("Running verifyFTDCStartsWithRelativeLogFile");
+ verifyCommonFTDCParameters(admin4, true);
- // 2. Validate getDiagnosticData returns FTDC data now
- jsTestLog("Verifying FTDC getDiagnosticData");
- verifyGetDiagnosticData(admin4);
- }
+ // Skip verification of diagnosticDataCollectionDirectoryPath because it relies on comparing
+ // cwd vs dbPath.
+
+ // 1. Enable successfully
+ assert.commandWorked(setParam(admin4, {"diagnosticDataCollectionEnabled": 1}));
+
+ // 2. Validate getDiagnosticData returns FTDC data now
+ jsTestLog("Verifying FTDC getDiagnosticData");
+ verifyGetDiagnosticData(admin4);
+}
- verifyFTDCDisabledOnStartup();
- verifyFTDCStartsWithPath();
- verifyFTDCStartsWithLogFile();
- verifyFTDCStartsWithRelativeLogFile();
+verifyFTDCDisabledOnStartup();
+verifyFTDCStartsWithPath();
+verifyFTDCStartsWithLogFile();
+verifyFTDCStartsWithRelativeLogFile();
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/ftdc_setparam.js b/jstests/noPassthrough/ftdc_setparam.js
index 4e9b2459311..8a810807541 100644
--- a/jstests/noPassthrough/ftdc_setparam.js
+++ b/jstests/noPassthrough/ftdc_setparam.js
@@ -1,19 +1,19 @@
// validate command line ftdc parameter parsing
(function() {
- 'use strict';
- var m = MongoRunner.runMongod({setParameter: "diagnosticDataCollectionPeriodMillis=101"});
+'use strict';
+var m = MongoRunner.runMongod({setParameter: "diagnosticDataCollectionPeriodMillis=101"});
- // Check the defaults are correct
- //
- function getparam(field) {
- var q = {getParameter: 1};
- q[field] = 1;
+// Check the defaults are correct
+//
+function getparam(field) {
+ var q = {getParameter: 1};
+ q[field] = 1;
- var ret = m.getDB("admin").runCommand(q);
- return ret[field];
- }
+ var ret = m.getDB("admin").runCommand(q);
+ return ret[field];
+}
- assert.eq(getparam("diagnosticDataCollectionPeriodMillis"), 101);
- MongoRunner.stopMongod(m);
+assert.eq(getparam("diagnosticDataCollectionPeriodMillis"), 101);
+MongoRunner.stopMongod(m);
})();
diff --git a/jstests/noPassthrough/geo_full.js b/jstests/noPassthrough/geo_full.js
index 7bebdce9dba..7ffd8e90c50 100644
--- a/jstests/noPassthrough/geo_full.js
+++ b/jstests/noPassthrough/geo_full.js
@@ -19,549 +19,533 @@
//
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/geo_math.js");
+load("jstests/libs/geo_math.js");
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod failed to start.");
- const db = conn.getDB("test");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod failed to start.");
+const db = conn.getDB("test");
- var randEnvironment = function() {
-
- // Normal earth environment
- if (Random.rand() < 0.5) {
- return {
- max: 180,
- min: -180,
- bits: Math.floor(Random.rand() * 32) + 1,
- earth: true,
- bucketSize: 360 / (4 * 1024 * 1024 * 1024)
- };
- }
-
- var scales = [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000];
- var scale = scales[Math.floor(Random.rand() * scales.length)];
- var offset = Random.rand() * scale;
-
- var max = Random.rand() * scale + offset;
- var min = -Random.rand() * scale + offset;
- var bits = Math.floor(Random.rand() * 32) + 1;
- var bits = Math.floor(Random.rand() * 32) + 1;
- var range = max - min;
- var bucketSize = range / (4 * 1024 * 1024 * 1024);
-
- return {max: max, min: min, bits: bits, earth: false, bucketSize: bucketSize};
- };
+var randEnvironment = function() {
+ // Normal earth environment
+ if (Random.rand() < 0.5) {
+ return {
+ max: 180,
+ min: -180,
+ bits: Math.floor(Random.rand() * 32) + 1,
+ earth: true,
+ bucketSize: 360 / (4 * 1024 * 1024 * 1024)
+ };
+ }
- var randPoint = function(env, query) {
+ var scales = [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000];
+ var scale = scales[Math.floor(Random.rand() * scales.length)];
+ var offset = Random.rand() * scale;
- if (query && Random.rand() > 0.5)
- return query.exact;
+ var max = Random.rand() * scale + offset;
+ var min = -Random.rand() * scale + offset;
+ var bits = Math.floor(Random.rand() * 32) + 1;
+ var bits = Math.floor(Random.rand() * 32) + 1;
+ var range = max - min;
+ var bucketSize = range / (4 * 1024 * 1024 * 1024);
- if (env.earth)
- return [Random.rand() * 360 - 180, Random.rand() * 180 - 90];
+ return {max: max, min: min, bits: bits, earth: false, bucketSize: bucketSize};
+};
- var range = env.max - env.min;
- return [Random.rand() * range + env.min, Random.rand() * range + env.min];
- };
+var randPoint = function(env, query) {
+ if (query && Random.rand() > 0.5)
+ return query.exact;
- var randLocType = function(loc, wrapIn) {
- return randLocTypes([loc], wrapIn)[0];
- };
+ if (env.earth)
+ return [Random.rand() * 360 - 180, Random.rand() * 180 - 90];
- var randLocTypes = function(locs, wrapIn) {
+ var range = env.max - env.min;
+ return [Random.rand() * range + env.min, Random.rand() * range + env.min];
+};
- var rLocs = [];
+var randLocType = function(loc, wrapIn) {
+ return randLocTypes([loc], wrapIn)[0];
+};
- for (var i = 0; i < locs.length; i++) {
- rLocs.push(locs[i]);
- }
+var randLocTypes = function(locs, wrapIn) {
+ var rLocs = [];
- if (wrapIn) {
- var wrappedLocs = [];
- for (var i = 0; i < rLocs.length; i++) {
- var wrapper = {};
- wrapper[wrapIn] = rLocs[i];
- wrappedLocs.push(wrapper);
- }
+ for (var i = 0; i < locs.length; i++) {
+ rLocs.push(locs[i]);
+ }
- return wrappedLocs;
+ if (wrapIn) {
+ var wrappedLocs = [];
+ for (var i = 0; i < rLocs.length; i++) {
+ var wrapper = {};
+ wrapper[wrapIn] = rLocs[i];
+ wrappedLocs.push(wrapper);
}
- return rLocs;
- };
-
- var randDataType = function() {
-
- var scales = [1, 10, 100, 1000, 10000];
- var docScale = scales[Math.floor(Random.rand() * scales.length)];
- var locScale = scales[Math.floor(Random.rand() * scales.length)];
+ return wrappedLocs;
+ }
- var numDocs = 40000;
- var maxLocs = 40000;
- // Make sure we don't blow past our test resources
- while (numDocs * maxLocs > 40000) {
- numDocs = Math.floor(Random.rand() * docScale) + 1;
- maxLocs = Math.floor(Random.rand() * locScale) + 1;
- }
+ return rLocs;
+};
- return {numDocs: numDocs, maxLocs: maxLocs};
- };
+var randDataType = function() {
+ var scales = [1, 10, 100, 1000, 10000];
+ var docScale = scales[Math.floor(Random.rand() * scales.length)];
+ var locScale = scales[Math.floor(Random.rand() * scales.length)];
- function computexscandist(latDegrees, maxDistDegrees) {
- // See s2cap.cc
- //
- // Compute the range of longitudes covered by the cap. We use the law
- // of sines for spherical triangles. Consider the triangle ABC where
- // A is the north pole, B is the center of the cap, and C is the point
- // of tangency between the cap boundary and a line of longitude. Then
- // C is a right angle, and letting a,b,c denote the sides opposite A,B,C,
- // we have sin(a)/sin(A) = sin(c)/sin(C), or sin(A) = sin(a)/sin(c).
- // Here "a" is the cap angle, and "c" is the colatitude (90 degrees
- // minus the latitude). This formula also works for negative latitudes.
- //
- // Angle A is the difference of longitudes of B and C.
- var sin_c = Math.cos(deg2rad(latDegrees));
- var sin_a = Math.sin(deg2rad(maxDistDegrees));
- if (sin_a > sin_c) {
- // Double floating number error, return invalid distance
- return 180;
- }
- var angleA = Math.asin(sin_a / sin_c);
- return rad2deg(angleA);
+ var numDocs = 40000;
+ var maxLocs = 40000;
+ // Make sure we don't blow past our test resources
+ while (numDocs * maxLocs > 40000) {
+ numDocs = Math.floor(Random.rand() * docScale) + 1;
+ maxLocs = Math.floor(Random.rand() * locScale) + 1;
}
- function errorMarginForPoint(env) {
- if (!env.bits) {
- return 0.01;
- }
- var scalingFactor = Math.pow(2, env.bits);
- return ((env.max - env.min) / scalingFactor) * Math.sqrt(2);
+ return {numDocs: numDocs, maxLocs: maxLocs};
+};
+
+function computexscandist(latDegrees, maxDistDegrees) {
+ // See s2cap.cc
+ //
+ // Compute the range of longitudes covered by the cap. We use the law
+ // of sines for spherical triangles. Consider the triangle ABC where
+ // A is the north pole, B is the center of the cap, and C is the point
+ // of tangency between the cap boundary and a line of longitude. Then
+ // C is a right angle, and letting a,b,c denote the sides opposite A,B,C,
+ // we have sin(a)/sin(A) = sin(c)/sin(C), or sin(A) = sin(a)/sin(c).
+ // Here "a" is the cap angle, and "c" is the colatitude (90 degrees
+ // minus the latitude). This formula also works for negative latitudes.
+ //
+ // Angle A is the difference of longitudes of B and C.
+ var sin_c = Math.cos(deg2rad(latDegrees));
+ var sin_a = Math.sin(deg2rad(maxDistDegrees));
+ if (sin_a > sin_c) {
+ // Double floating number error, return invalid distance
+ return 180;
}
+ var angleA = Math.asin(sin_a / sin_c);
+ return rad2deg(angleA);
+}
- function pointIsOK(startPoint, radius, env) {
- var error = errorMarginForPoint(env);
- var distDegrees = rad2deg(radius) + error;
- // TODO SERVER-24440: Points close to the north and south poles may fail to be returned by
- // $nearSphere queries answered using a "2d" index. We have empirically found that points
- // with latitudes between 89 and 90 degrees are potentially affected by this issue, so we
- // additionally reject any coordinates with a latitude that falls within that range.
- if ((startPoint[1] + distDegrees > 89) || (startPoint[1] - distDegrees < -89)) {
- return false;
- }
- var xscandist = computexscandist(startPoint[1], distDegrees);
- return (startPoint[0] + xscandist < 180) && (startPoint[0] - xscandist > -180);
+function errorMarginForPoint(env) {
+ if (!env.bits) {
+ return 0.01;
}
-
- var randQuery = function(env) {
- var center = randPoint(env);
-
- var sphereRadius = -1;
- var sphereCenter = null;
- if (env.earth) {
- // Get a start point that doesn't require wrapping
- // TODO: Are we a bit too aggressive with wrapping issues?
- var i;
- for (i = 0; i < 5; i++) {
- sphereRadius = Random.rand() * 45 * Math.PI / 180;
- sphereCenter = randPoint(env);
- if (pointIsOK(sphereCenter, sphereRadius, env)) {
- break;
- }
+ var scalingFactor = Math.pow(2, env.bits);
+ return ((env.max - env.min) / scalingFactor) * Math.sqrt(2);
+}
+
+function pointIsOK(startPoint, radius, env) {
+ var error = errorMarginForPoint(env);
+ var distDegrees = rad2deg(radius) + error;
+ // TODO SERVER-24440: Points close to the north and south poles may fail to be returned by
+ // $nearSphere queries answered using a "2d" index. We have empirically found that points
+ // with latitudes between 89 and 90 degrees are potentially affected by this issue, so we
+ // additionally reject any coordinates with a latitude that falls within that range.
+ if ((startPoint[1] + distDegrees > 89) || (startPoint[1] - distDegrees < -89)) {
+ return false;
+ }
+ var xscandist = computexscandist(startPoint[1], distDegrees);
+ return (startPoint[0] + xscandist < 180) && (startPoint[0] - xscandist > -180);
+}
+
+var randQuery = function(env) {
+ var center = randPoint(env);
+
+ var sphereRadius = -1;
+ var sphereCenter = null;
+ if (env.earth) {
+ // Get a start point that doesn't require wrapping
+ // TODO: Are we a bit too aggressive with wrapping issues?
+ var i;
+ for (i = 0; i < 5; i++) {
+ sphereRadius = Random.rand() * 45 * Math.PI / 180;
+ sphereCenter = randPoint(env);
+ if (pointIsOK(sphereCenter, sphereRadius, env)) {
+ break;
}
- if (i == 5)
- sphereRadius = -1;
}
+ if (i == 5)
+ sphereRadius = -1;
+ }
- var box = [randPoint(env), randPoint(env)];
-
- var boxPoly = [
- [box[0][0], box[0][1]],
- [box[0][0], box[1][1]],
- [box[1][0], box[1][1]],
- [box[1][0], box[0][1]]
- ];
-
- if (box[0][0] > box[1][0]) {
- var swap = box[0][0];
- box[0][0] = box[1][0];
- box[1][0] = swap;
- }
-
- if (box[0][1] > box[1][1]) {
- var swap = box[0][1];
- box[0][1] = box[1][1];
- box[1][1] = swap;
- }
-
- return {
- center: center,
- radius: box[1][0] - box[0][0],
- exact: randPoint(env),
- sphereCenter: sphereCenter,
- sphereRadius: sphereRadius,
- box: box,
- boxPoly: boxPoly
- };
- };
-
- var resultTypes = {
- "exact": function(loc) {
- return query.exact[0] == loc[0] && query.exact[1] == loc[1];
- },
- "center": function(loc) {
- return Geo.distance(query.center, loc) <= query.radius;
- },
- "box": function(loc) {
- return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
- loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
-
- },
- "sphere": function(loc) {
- return (query.sphereRadius >= 0
- ? (Geo.sphereDistance(query.sphereCenter, loc) <= query.sphereRadius)
- : false);
- },
- "poly": function(loc) {
- return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
- loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
- }
- };
-
- var queryResults = function(locs, query, results) {
+ var box = [randPoint(env), randPoint(env)];
- if (!results["center"]) {
- for (var type in resultTypes) {
- results[type] = {docsIn: 0, docsOut: 0, locsIn: 0, locsOut: 0};
- }
- }
+ var boxPoly = [
+ [box[0][0], box[0][1]],
+ [box[0][0], box[1][1]],
+ [box[1][0], box[1][1]],
+ [box[1][0], box[0][1]]
+ ];
- var indResults = {};
- for (var type in resultTypes) {
- indResults[type] = {docIn: false, locsIn: 0, locsOut: 0};
- }
-
- for (var type in resultTypes) {
- var docIn = false;
- for (var i = 0; i < locs.length; i++) {
- if (resultTypes[type](locs[i])) {
- results[type].locsIn++;
- indResults[type].locsIn++;
- indResults[type].docIn = true;
- } else {
- results[type].locsOut++;
- indResults[type].locsOut++;
- }
- }
- if (indResults[type].docIn)
- results[type].docsIn++;
- else
- results[type].docsOut++;
- }
-
- return indResults;
- };
+ if (box[0][0] > box[1][0]) {
+ var swap = box[0][0];
+ box[0][0] = box[1][0];
+ box[1][0] = swap;
+ }
- var randQueryAdditions = function(doc, indResults) {
+ if (box[0][1] > box[1][1]) {
+ var swap = box[0][1];
+ box[0][1] = box[1][1];
+ box[1][1] = swap;
+ }
- for (var type in resultTypes) {
- var choice = Random.rand();
- if (Random.rand() < 0.25)
- doc[type] = (indResults[type].docIn ? {docIn: "yes"} : {docIn: "no"});
- else if (Random.rand() < 0.5)
- doc[type] = (indResults[type].docIn ? {docIn: ["yes"]} : {docIn: ["no"]});
- else if (Random.rand() < 0.75)
- doc[type] = (indResults[type].docIn ? [{docIn: "yes"}] : [{docIn: "no"}]);
- else
- doc[type] = (indResults[type].docIn ? [{docIn: ["yes"]}] : [{docIn: ["no"]}]);
- }
+ return {
+ center: center,
+ radius: box[1][0] - box[0][0],
+ exact: randPoint(env),
+ sphereCenter: sphereCenter,
+ sphereRadius: sphereRadius,
+ box: box,
+ boxPoly: boxPoly
};
+};
+
+var resultTypes = {
+ "exact": function(loc) {
+ return query.exact[0] == loc[0] && query.exact[1] == loc[1];
+ },
+ "center": function(loc) {
+ return Geo.distance(query.center, loc) <= query.radius;
+ },
+ "box": function(loc) {
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
+ },
+ "sphere": function(loc) {
+ return (query.sphereRadius >= 0
+ ? (Geo.sphereDistance(query.sphereCenter, loc) <= query.sphereRadius)
+ : false);
+ },
+ "poly": function(loc) {
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
+ }
+};
- var randIndexAdditions = function(indexDoc) {
-
+var queryResults = function(locs, query, results) {
+ if (!results["center"]) {
for (var type in resultTypes) {
- if (Random.rand() < 0.5)
- continue;
-
- var choice = Random.rand();
- if (Random.rand() < 0.5)
- indexDoc[type] = 1;
- else
- indexDoc[type + ".docIn"] = 1;
- }
- };
-
- var randYesQuery = function() {
-
- var choice = Math.floor(Random.rand() * 7);
- if (choice == 0)
- return {$ne: "no"};
- else if (choice == 1)
- return "yes";
- else if (choice == 2)
- return /^yes/;
- else if (choice == 3)
- return {$in: ["good", "yes", "ok"]};
- else if (choice == 4)
- return {$exists: true};
- else if (choice == 5)
- return {$nin: ["bad", "no", "not ok"]};
- else if (choice == 6)
- return {$not: /^no/};
- };
-
- var locArray = function(loc) {
- if (loc.x)
- return [loc.x, loc.y];
- if (!loc.length)
- return [loc[0], loc[1]];
- return loc;
- };
-
- var locsArray = function(locs) {
- if (locs.loc) {
- const arr = [];
- for (var i = 0; i < locs.loc.length; i++)
- arr.push(locArray(locs.loc[i]));
- return arr;
- } else {
- const arr = [];
- for (var i = 0; i < locs.length; i++)
- arr.push(locArray(locs[i].loc));
- return arr;
+ results[type] = {docsIn: 0, docsOut: 0, locsIn: 0, locsOut: 0};
}
- };
-
- var minBoxSize = function(env, box) {
- return env.bucketSize * Math.pow(2, minBucketScale(env, box));
- };
-
- var minBucketScale = function(env, box) {
-
- if (box.length && box[0].length)
- box = [box[0][0] - box[1][0], box[0][1] - box[1][1]];
-
- if (box.length)
- box = Math.max(box[0], box[1]);
-
- print(box);
- print(env.bucketSize);
-
- return Math.ceil(Math.log(box / env.bucketSize) / Math.log(2));
+ }
- };
+ var indResults = {};
+ for (var type in resultTypes) {
+ indResults[type] = {docIn: false, locsIn: 0, locsOut: 0};
+ }
- // TODO: Add spherical $uniqueDocs tests
- var numTests = 100;
-
- // Our seed will change every time this is run, but
- // each individual test will be reproducible given
- // that seed and test number
- var seed = new Date().getTime();
- // seed = 175 + 288 + 12
-
- for (var test = 0; test < numTests; test++) {
- Random.srand(seed + test);
- // Random.srand( 42240 )
- // Random.srand( 7344 )
- var t = db.testAllGeo;
- t.drop();
-
- print("Generating test environment #" + test);
- var env = randEnvironment();
- // env.bits = 11
- var query = randQuery(env);
- var data = randDataType();
- // data.numDocs = 5; data.maxLocs = 1;
- var paddingSize = Math.floor(Random.rand() * 10 + 1);
- var results = {};
- var totalPoints = 0;
- print("Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs +
- " locs ");
-
- var bulk = t.initializeUnorderedBulkOp();
- for (var i = 0; i < data.numDocs; i++) {
- var numLocs = Math.floor(Random.rand() * data.maxLocs + 1);
- totalPoints += numLocs;
-
- var multiPoint = [];
- for (var p = 0; p < numLocs; p++) {
- var point = randPoint(env, query);
- multiPoint.push(point);
+ for (var type in resultTypes) {
+ var docIn = false;
+ for (var i = 0; i < locs.length; i++) {
+ if (resultTypes[type](locs[i])) {
+ results[type].locsIn++;
+ indResults[type].locsIn++;
+ indResults[type].docIn = true;
+ } else {
+ results[type].locsOut++;
+ indResults[type].locsOut++;
}
-
- var indResults = queryResults(multiPoint, query, results);
-
- var doc;
- // Nest the keys differently
- if (Random.rand() < 0.5)
- doc = {locs: {loc: randLocTypes(multiPoint)}};
- else
- doc = {locs: randLocTypes(multiPoint, "loc")};
-
- randQueryAdditions(doc, indResults);
-
- doc._id = i;
- bulk.insert(doc);
}
- assert.writeOK(bulk.execute());
-
- var indexDoc = {"locs.loc": "2d"};
- randIndexAdditions(indexDoc);
-
- // "earth" is used to drive test setup and not a valid createIndexes option or required at
- // this point. It must be removed before calling ensureIndexes().
- delete env.earth;
-
- assert.commandWorked(t.ensureIndex(indexDoc, env));
- assert.isnull(db.getLastError());
+ if (indResults[type].docIn)
+ results[type].docsIn++;
+ else
+ results[type].docsOut++;
+ }
- var padding = "x";
- for (var i = 0; i < paddingSize; i++)
- padding = padding + padding;
+ return indResults;
+};
+
+var randQueryAdditions = function(doc, indResults) {
+ for (var type in resultTypes) {
+ var choice = Random.rand();
+ if (Random.rand() < 0.25)
+ doc[type] = (indResults[type].docIn ? {docIn: "yes"} : {docIn: "no"});
+ else if (Random.rand() < 0.5)
+ doc[type] = (indResults[type].docIn ? {docIn: ["yes"]} : {docIn: ["no"]});
+ else if (Random.rand() < 0.75)
+ doc[type] = (indResults[type].docIn ? [{docIn: "yes"}] : [{docIn: "no"}]);
+ else
+ doc[type] = (indResults[type].docIn ? [{docIn: ["yes"]}] : [{docIn: ["no"]}]);
+ }
+};
+
+var randIndexAdditions = function(indexDoc) {
+ for (var type in resultTypes) {
+ if (Random.rand() < 0.5)
+ continue;
+
+ var choice = Random.rand();
+ if (Random.rand() < 0.5)
+ indexDoc[type] = 1;
+ else
+ indexDoc[type + ".docIn"] = 1;
+ }
+};
+
+var randYesQuery = function() {
+ var choice = Math.floor(Random.rand() * 7);
+ if (choice == 0)
+ return {$ne: "no"};
+ else if (choice == 1)
+ return "yes";
+ else if (choice == 2)
+ return /^yes/;
+ else if (choice == 3)
+ return {$in: ["good", "yes", "ok"]};
+ else if (choice == 4)
+ return {$exists: true};
+ else if (choice == 5)
+ return {$nin: ["bad", "no", "not ok"]};
+ else if (choice == 6)
+ return {$not: /^no/};
+};
+
+var locArray = function(loc) {
+ if (loc.x)
+ return [loc.x, loc.y];
+ if (!loc.length)
+ return [loc[0], loc[1]];
+ return loc;
+};
+
+var locsArray = function(locs) {
+ if (locs.loc) {
+ const arr = [];
+ for (var i = 0; i < locs.loc.length; i++)
+ arr.push(locArray(locs.loc[i]));
+ return arr;
+ } else {
+ const arr = [];
+ for (var i = 0; i < locs.length; i++)
+ arr.push(locArray(locs[i].loc));
+ return arr;
+ }
+};
+
+var minBoxSize = function(env, box) {
+ return env.bucketSize * Math.pow(2, minBucketScale(env, box));
+};
+
+var minBucketScale = function(env, box) {
+ if (box.length && box[0].length)
+ box = [box[0][0] - box[1][0], box[0][1] - box[1][1]];
+
+ if (box.length)
+ box = Math.max(box[0], box[1]);
+
+ print(box);
+ print(env.bucketSize);
+
+ return Math.ceil(Math.log(box / env.bucketSize) / Math.log(2));
+};
+
+// TODO: Add spherical $uniqueDocs tests
+var numTests = 100;
+
+// Our seed will change every time this is run, but
+// each individual test will be reproducible given
+// that seed and test number
+var seed = new Date().getTime();
+// seed = 175 + 288 + 12
+
+for (var test = 0; test < numTests; test++) {
+ Random.srand(seed + test);
+ // Random.srand( 42240 )
+ // Random.srand( 7344 )
+ var t = db.testAllGeo;
+ t.drop();
+
+ print("Generating test environment #" + test);
+ var env = randEnvironment();
+ // env.bits = 11
+ var query = randQuery(env);
+ var data = randDataType();
+ // data.numDocs = 5; data.maxLocs = 1;
+ var paddingSize = Math.floor(Random.rand() * 10 + 1);
+ var results = {};
+ var totalPoints = 0;
+ print("Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs +
+ " locs ");
+
+ var bulk = t.initializeUnorderedBulkOp();
+ for (var i = 0; i < data.numDocs; i++) {
+ var numLocs = Math.floor(Random.rand() * data.maxLocs + 1);
+ totalPoints += numLocs;
+
+ var multiPoint = [];
+ for (var p = 0; p < numLocs; p++) {
+ var point = randPoint(env, query);
+ multiPoint.push(point);
+ }
- print(padding);
+ var indResults = queryResults(multiPoint, query, results);
- printjson({
- seed: seed,
- test: test,
- env: env,
- query: query,
- data: data,
- results: results,
- paddingSize: paddingSize
- });
+ var doc;
+ // Nest the keys differently
+ if (Random.rand() < 0.5)
+ doc = {locs: {loc: randLocTypes(multiPoint)}};
+ else
+ doc = {locs: randLocTypes(multiPoint, "loc")};
- // exact
- print("Exact query...");
- assert.eq(
- results.exact.docsIn,
- t.find({"locs.loc": randLocType(query.exact), "exact.docIn": randYesQuery()}).count());
+ randQueryAdditions(doc, indResults);
- // $center
- print("Center query...");
- print("Min box : " + minBoxSize(env, query.radius));
+ doc._id = i;
+ bulk.insert(doc);
+ }
+ assert.writeOK(bulk.execute());
+
+ var indexDoc = {"locs.loc": "2d"};
+ randIndexAdditions(indexDoc);
+
+ // "earth" is used to drive test setup and not a valid createIndexes option or required at
+ // this point. It must be removed before calling ensureIndexes().
+ delete env.earth;
+
+ assert.commandWorked(t.ensureIndex(indexDoc, env));
+ assert.isnull(db.getLastError());
+
+ var padding = "x";
+ for (var i = 0; i < paddingSize; i++)
+ padding = padding + padding;
+
+ print(padding);
+
+ printjson({
+ seed: seed,
+ test: test,
+ env: env,
+ query: query,
+ data: data,
+ results: results,
+ paddingSize: paddingSize
+ });
+
+ // exact
+ print("Exact query...");
+ assert.eq(
+ results.exact.docsIn,
+ t.find({"locs.loc": randLocType(query.exact), "exact.docIn": randYesQuery()}).count());
+
+ // $center
+ print("Center query...");
+ print("Min box : " + minBoxSize(env, query.radius));
+ assert.eq(results.center.docsIn,
+ t.find({
+ "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: 1}},
+ "center.docIn": randYesQuery()
+ }).count());
+
+ print("Center query update...");
+ var res = t.update({
+ "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: true}},
+ "center.docIn": randYesQuery()
+ },
+ {$set: {centerPaddingA: padding}},
+ false,
+ true);
+ assert.eq(results.center.docsIn, res.nModified);
+
+ if (query.sphereRadius >= 0) {
+ print("Center sphere query...");
+ // $centerSphere
assert.eq(
- results.center.docsIn,
+ results.sphere.docsIn,
t.find({
- "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: 1}},
- "center.docIn": randYesQuery()
+ "locs.loc": {$within: {$centerSphere: [query.sphereCenter, query.sphereRadius]}},
+ "sphere.docIn": randYesQuery()
}).count());
- print("Center query update...");
- var res = t.update({
- "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: true}},
- "center.docIn": randYesQuery()
- },
- {$set: {centerPaddingA: padding}},
- false,
- true);
- assert.eq(results.center.docsIn, res.nModified);
-
- if (query.sphereRadius >= 0) {
- print("Center sphere query...");
- // $centerSphere
- assert.eq(results.sphere.docsIn,
- t.find({
- "locs.loc":
- {$within: {$centerSphere: [query.sphereCenter, query.sphereRadius]}},
- "sphere.docIn": randYesQuery()
- }).count());
-
- print("Center sphere query update...");
- res = t.update({
- "locs.loc": {
- $within: {
- $centerSphere: [query.sphereCenter, query.sphereRadius],
- $uniqueDocs: true
- }
- },
- "sphere.docIn": randYesQuery()
+ print("Center sphere query update...");
+ res = t.update({
+ "locs.loc": {
+ $within:
+ {$centerSphere: [query.sphereCenter, query.sphereRadius], $uniqueDocs: true}
},
- {$set: {spherePaddingA: padding}},
- false,
- true);
- assert.eq(results.sphere.docsIn, res.nModified);
- }
+ "sphere.docIn": randYesQuery()
+ },
+ {$set: {spherePaddingA: padding}},
+ false,
+ true);
+ assert.eq(results.sphere.docsIn, res.nModified);
+ }
- // $box
- print("Box query...");
- assert.eq(results.box.docsIn,
- t.find({
- "locs.loc": {$within: {$box: query.box, $uniqueDocs: true}},
- "box.docIn": randYesQuery()
- }).count());
-
- // $polygon
- print("Polygon query...");
- assert.eq(results.poly.docsIn, t.find({
- "locs.loc": {$within: {$polygon: query.boxPoly}},
- "poly.docIn": randYesQuery()
- }).count());
-
- // $near
- print("Near query...");
+ // $box
+ print("Box query...");
+ assert.eq(results.box.docsIn, t.find({
+ "locs.loc": {$within: {$box: query.box, $uniqueDocs: true}},
+ "box.docIn": randYesQuery()
+ }).count());
+
+ // $polygon
+ print("Polygon query...");
+ assert.eq(results.poly.docsIn, t.find({
+ "locs.loc": {$within: {$polygon: query.boxPoly}},
+ "poly.docIn": randYesQuery()
+ }).count());
+
+ // $near
+ print("Near query...");
+ assert.eq(results.center.docsIn,
+ t.find({"locs.loc": {$near: query.center, $maxDistance: query.radius}}).count(true),
+ "Near query: center: " + query.center + "; radius: " + query.radius +
+ "; docs: " + results.center.docsIn + "; locs: " + results.center.locsIn);
+
+ if (query.sphereRadius >= 0) {
+ print("Near sphere query...");
+ // $centerSphere
assert.eq(
- results.center.docsIn,
- t.find({"locs.loc": {$near: query.center, $maxDistance: query.radius}}).count(true),
- "Near query: center: " + query.center + "; radius: " + query.radius + "; docs: " +
- results.center.docsIn + "; locs: " + results.center.locsIn);
-
- if (query.sphereRadius >= 0) {
- print("Near sphere query...");
- // $centerSphere
- assert.eq(results.sphere.docsIn,
- t.find({
- "locs.loc":
- {$nearSphere: query.sphereCenter, $maxDistance: query.sphereRadius}
- }).count(true),
- "Near sphere query: sphere center: " + query.sphereCenter + "; radius: " +
- query.sphereRadius + "; docs: " + results.sphere.docsIn + "; locs: " +
- results.sphere.locsIn);
- }
+ results.sphere.docsIn,
+ t.find({
+ "locs.loc": {$nearSphere: query.sphereCenter, $maxDistance: query.sphereRadius}
+ }).count(true),
+ "Near sphere query: sphere center: " + query.sphereCenter +
+ "; radius: " + query.sphereRadius + "; docs: " + results.sphere.docsIn +
+ "; locs: " + results.sphere.locsIn);
+ }
- // $geoNear aggregation stage.
- const aggregationLimit = 2 * results.center.docsIn;
- if (aggregationLimit > 0) {
- var output = t.aggregate([
- {
- $geoNear: {
- near: query.center,
- maxDistance: query.radius,
- includeLocs: "pt",
- distanceField: "dis",
- }
- },
- {$limit: aggregationLimit}
- ]).toArray();
-
- const errmsg = {
- limit: aggregationLimit,
- center: query.center,
- radius: query.radius,
- docs: results.center.docsIn,
- locs: results.center.locsIn,
- actualResult: output
- };
- assert.eq(results.center.docsIn, output.length, tojson(errmsg));
-
- let lastDistance = 0;
- for (var i = 0; i < output.length; i++) {
- var retDistance = output[i].dis;
- assert.close(retDistance, Geo.distance(locArray(query.center), output[i].pt));
- assert.lte(retDistance, query.radius);
- assert.gte(retDistance, lastDistance);
- lastDistance = retDistance;
- }
+ // $geoNear aggregation stage.
+ const aggregationLimit = 2 * results.center.docsIn;
+ if (aggregationLimit > 0) {
+ var output = t.aggregate([
+ {
+ $geoNear: {
+ near: query.center,
+ maxDistance: query.radius,
+ includeLocs: "pt",
+ distanceField: "dis",
+ }
+ },
+ {$limit: aggregationLimit}
+ ]).toArray();
+
+ const errmsg = {
+ limit: aggregationLimit,
+ center: query.center,
+ radius: query.radius,
+ docs: results.center.docsIn,
+ locs: results.center.locsIn,
+ actualResult: output
+ };
+ assert.eq(results.center.docsIn, output.length, tojson(errmsg));
+
+ let lastDistance = 0;
+ for (var i = 0; i < output.length; i++) {
+ var retDistance = output[i].dis;
+ assert.close(retDistance, Geo.distance(locArray(query.center), output[i].pt));
+ assert.lte(retDistance, query.radius);
+ assert.gte(retDistance, lastDistance);
+ lastDistance = retDistance;
}
-
- // $polygon
- print("Polygon remove...");
- res = t.remove(
- {"locs.loc": {$within: {$polygon: query.boxPoly}}, "poly.docIn": randYesQuery()});
- assert.eq(results.poly.docsIn, res.nRemoved);
}
- MongoRunner.stopMongod(conn);
+ // $polygon
+ print("Polygon remove...");
+ res =
+ t.remove({"locs.loc": {$within: {$polygon: query.boxPoly}}, "poly.docIn": randYesQuery()});
+ assert.eq(results.poly.docsIn, res.nRemoved);
+}
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/geo_mnypts_plus_fields.js b/jstests/noPassthrough/geo_mnypts_plus_fields.js
index 6eb52933161..9f402db0d16 100644
--- a/jstests/noPassthrough/geo_mnypts_plus_fields.js
+++ b/jstests/noPassthrough/geo_mnypts_plus_fields.js
@@ -1,107 +1,103 @@
// Test sanity of geo queries with a lot of points
(function() {
- "use strict";
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod failed to start.");
- const db = conn.getDB("test");
+"use strict";
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod failed to start.");
+const db = conn.getDB("test");
- var maxFields = 3;
+var maxFields = 3;
- for (var fields = 1; fields < maxFields; fields++) {
- var coll = db.testMnyPts;
- coll.drop();
+for (var fields = 1; fields < maxFields; fields++) {
+ var coll = db.testMnyPts;
+ coll.drop();
- var totalPts = 500 * 1000;
+ var totalPts = 500 * 1000;
- var bulk = coll.initializeUnorderedBulkOp();
- // Add points in a 100x100 grid
- for (var i = 0; i < totalPts; i++) {
- var ii = i % 10000;
+ var bulk = coll.initializeUnorderedBulkOp();
+ // Add points in a 100x100 grid
+ for (var i = 0; i < totalPts; i++) {
+ var ii = i % 10000;
- var doc = {loc: [ii % 100, Math.floor(ii / 100)]};
+ var doc = {loc: [ii % 100, Math.floor(ii / 100)]};
- // Add fields with different kinds of data
- for (var j = 0; j < fields; j++) {
- var field = null;
-
- if (j % 3 == 0) {
- // Make half the points not searchable
- field = "abcdefg" + (i % 2 == 0 ? "h" : "");
- } else if (j % 3 == 1) {
- field = new Date();
- } else {
- field = true;
- }
-
- doc["field" + j] = field;
- }
-
- bulk.insert(doc);
- }
- assert.writeOK(bulk.execute());
-
- // Create the query for the additional fields
- const queryFields = {};
+ // Add fields with different kinds of data
for (var j = 0; j < fields; j++) {
var field = null;
if (j % 3 == 0) {
- field = "abcdefg";
+ // Make half the points not searchable
+ field = "abcdefg" + (i % 2 == 0 ? "h" : "");
} else if (j % 3 == 1) {
- field = {$lte: new Date()};
+ field = new Date();
} else {
field = true;
}
- queryFields["field" + j] = field;
+ doc["field" + j] = field;
}
- coll.ensureIndex({loc: "2d"});
-
- // Check that quarter of points in each quadrant
- for (var i = 0; i < 4; i++) {
- var x = i % 2;
- var y = Math.floor(i / 2);
-
- var box = [[0, 0], [49, 49]];
- box[0][0] += (x == 1 ? 50 : 0);
- box[1][0] += (x == 1 ? 50 : 0);
- box[0][1] += (y == 1 ? 50 : 0);
- box[1][1] += (y == 1 ? 50 : 0);
-
- // Now only half of each result comes back
- assert.eq(totalPts / (4 * 2),
- coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).count());
- assert.eq(
- totalPts / (4 * 2),
- coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).itcount());
+ bulk.insert(doc);
+ }
+ assert.writeOK(bulk.execute());
+
+ // Create the query for the additional fields
+ const queryFields = {};
+ for (var j = 0; j < fields; j++) {
+ var field = null;
+
+ if (j % 3 == 0) {
+ field = "abcdefg";
+ } else if (j % 3 == 1) {
+ field = {$lte: new Date()};
+ } else {
+ field = true;
}
- // Check that half of points in each half
- for (var i = 0; i < 2; i++) {
- var box = [[0, 0], [49, 99]];
- box[0][0] += (i == 1 ? 50 : 0);
- box[1][0] += (i == 1 ? 50 : 0);
-
- assert.eq(totalPts / (2 * 2),
- coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).count());
- assert.eq(
- totalPts / (2 * 2),
- coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).itcount());
- }
+ queryFields["field" + j] = field;
+ }
+
+ coll.ensureIndex({loc: "2d"});
+
+ // Check that quarter of points in each quadrant
+ for (var i = 0; i < 4; i++) {
+ var x = i % 2;
+ var y = Math.floor(i / 2);
+
+ var box = [[0, 0], [49, 49]];
+ box[0][0] += (x == 1 ? 50 : 0);
+ box[1][0] += (x == 1 ? 50 : 0);
+ box[0][1] += (y == 1 ? 50 : 0);
+ box[1][1] += (y == 1 ? 50 : 0);
- // Check that all but corner set of points in radius
- var circle = [[0, 0], (100 - 1) * Math.sqrt(2) - 0.25];
+ // Now only half of each result comes back
+ assert.eq(totalPts / (4 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).count());
+ assert.eq(totalPts / (4 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).itcount());
+ }
+
+ // Check that half of points in each half
+ for (var i = 0; i < 2; i++) {
+ var box = [[0, 0], [49, 99]];
+ box[0][0] += (i == 1 ? 50 : 0);
+ box[1][0] += (i == 1 ? 50 : 0);
- // All [99,x] pts are field0 : "abcdefg"
- assert.eq(
- totalPts / 2 - totalPts / (100 * 100),
- coll.find(Object.extend({loc: {$within: {$center: circle}}}, queryFields)).count());
- assert.eq(
- totalPts / 2 - totalPts / (100 * 100),
- coll.find(Object.extend({loc: {$within: {$center: circle}}}, queryFields)).itcount());
+ assert.eq(totalPts / (2 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).count());
+ assert.eq(totalPts / (2 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).itcount());
}
- MongoRunner.stopMongod(conn);
+ // Check that all but corner set of points in radius
+ var circle = [[0, 0], (100 - 1) * Math.sqrt(2) - 0.25];
+
+ // All [99,x] pts are field0 : "abcdefg"
+ assert.eq(totalPts / 2 - totalPts / (100 * 100),
+ coll.find(Object.extend({loc: {$within: {$center: circle}}}, queryFields)).count());
+ assert.eq(totalPts / 2 - totalPts / (100 * 100),
+ coll.find(Object.extend({loc: {$within: {$center: circle}}}, queryFields)).itcount());
+}
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/geo_near_random1.js b/jstests/noPassthrough/geo_near_random1.js
index 9c59e21c9a0..06dcf86c819 100644
--- a/jstests/noPassthrough/geo_near_random1.js
+++ b/jstests/noPassthrough/geo_near_random1.js
@@ -1,22 +1,22 @@
// this tests all points using $near
var db;
(function() {
- "use strict";
- load("jstests/libs/geo_near_random.js");
+"use strict";
+load("jstests/libs/geo_near_random.js");
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod failed to start.");
- db = conn.getDB("test");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod failed to start.");
+db = conn.getDB("test");
- var test = new GeoNearRandomTest("weekly.geo_near_random1");
+var test = new GeoNearRandomTest("weekly.geo_near_random1");
- test.insertPts(1000);
+test.insertPts(1000);
- test.testPt([0, 0]);
- test.testPt(test.mkPt());
- test.testPt(test.mkPt());
- test.testPt(test.mkPt());
- test.testPt(test.mkPt());
+test.testPt([0, 0]);
+test.testPt(test.mkPt());
+test.testPt(test.mkPt());
+test.testPt(test.mkPt());
+test.testPt(test.mkPt());
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/geo_near_random2.js b/jstests/noPassthrough/geo_near_random2.js
index aa09ebac6ff..b5ec59af112 100644
--- a/jstests/noPassthrough/geo_near_random2.js
+++ b/jstests/noPassthrough/geo_near_random2.js
@@ -1,30 +1,33 @@
// this tests 1% of all points using $near and $nearSphere
var db;
(function() {
- "use strict";
- load("jstests/libs/geo_near_random.js");
+"use strict";
+load("jstests/libs/geo_near_random.js");
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod failed to start.");
- db = conn.getDB("test");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod failed to start.");
+db = conn.getDB("test");
- var test = new GeoNearRandomTest("weekly.geo_near_random2");
+var test = new GeoNearRandomTest("weekly.geo_near_random2");
- test.insertPts(50000);
+test.insertPts(50000);
- const opts = {sphere: 0, nToTest: test.nPts * 0.01};
- test.testPt([0, 0], opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
+const opts = {
+ sphere: 0,
+ nToTest: test.nPts * 0.01
+};
+test.testPt([0, 0], opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
- opts.sphere = 1;
- test.testPt([0, 0], opts);
- test.testPt(test.mkPt(0.8), opts);
- test.testPt(test.mkPt(0.8), opts);
- test.testPt(test.mkPt(0.8), opts);
- test.testPt(test.mkPt(0.8), opts);
+opts.sphere = 1;
+test.testPt([0, 0], opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/global_operation_latency_histogram.js b/jstests/noPassthrough/global_operation_latency_histogram.js
index 90d24903ef6..2f103e70a96 100644
--- a/jstests/noPassthrough/global_operation_latency_histogram.js
+++ b/jstests/noPassthrough/global_operation_latency_histogram.js
@@ -2,167 +2,166 @@
// @tags: [requires_replication]
(function() {
- "use strict";
- var name = "operationalLatencyHistogramTest";
-
- var mongo = MongoRunner.runMongod();
- var testDB = mongo.getDB("test");
- var testColl = testDB[name + "coll"];
-
- testColl.drop();
-
- function getHistogramStats() {
- return testDB.serverStatus({opLatencies: {histograms: 1}}).opLatencies;
- }
-
- var lastHistogram = getHistogramStats();
-
- // Checks that the difference in the histogram is what we expect, and also
- // accounts for the serverStatus command itself.
- function checkHistogramDiff(reads, writes, commands) {
- var thisHistogram = getHistogramStats();
- assert.eq(thisHistogram.reads.ops - lastHistogram.reads.ops, reads);
- assert.eq(thisHistogram.writes.ops - lastHistogram.writes.ops, writes);
- // Running the server status itself will increment command stats by one.
- assert.eq(thisHistogram.commands.ops - lastHistogram.commands.ops, commands + 1);
- return thisHistogram;
- }
-
- // Insert
- var numRecords = 100;
- for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.insert({_id: i}));
- }
- lastHistogram = checkHistogramDiff(0, numRecords, 0);
-
- // Update
- for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.update({_id: i}, {x: i}));
- }
- lastHistogram = checkHistogramDiff(0, numRecords, 0);
-
- // Find
- var cursors = [];
- for (var i = 0; i < numRecords; i++) {
- cursors[i] = testColl.find({x: {$gte: i}}).batchSize(2);
- assert.eq(cursors[i].next()._id, i);
- }
- lastHistogram = checkHistogramDiff(numRecords, 0, 0);
-
- // GetMore
- for (var i = 0; i < numRecords / 2; i++) {
- // Trigger two getmore commands.
- assert.eq(cursors[i].next()._id, i + 1);
- assert.eq(cursors[i].next()._id, i + 2);
- assert.eq(cursors[i].next()._id, i + 3);
- assert.eq(cursors[i].next()._id, i + 4);
- }
- lastHistogram = checkHistogramDiff(numRecords, 0, 0);
-
- // KillCursors
- // The last cursor has no additional results, hence does not need to be closed.
- for (var i = 0; i < numRecords - 1; i++) {
- cursors[i].close();
- }
- lastHistogram = checkHistogramDiff(0, 0, numRecords - 1);
-
- // Remove
- for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.remove({_id: i}));
- }
- lastHistogram = checkHistogramDiff(0, numRecords, 0);
-
- // Upsert
- for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1}));
- }
- lastHistogram = checkHistogramDiff(0, numRecords, 0);
-
- // Aggregate
- for (var i = 0; i < numRecords; i++) {
- testColl.aggregate([{$match: {x: i}}, {$group: {_id: "$x"}}]);
- }
- lastHistogram = checkHistogramDiff(numRecords, 0, 0);
-
- // Count
- for (var i = 0; i < numRecords; i++) {
- testColl.count({x: i});
- }
- lastHistogram = checkHistogramDiff(numRecords, 0, 0);
-
- // FindAndModify
- testColl.findAndModify({query: {}, update: {pt: {type: "Point", coordinates: [0, 0]}}});
- lastHistogram = checkHistogramDiff(0, 1, 0);
-
- // CreateIndex
- assert.commandWorked(testColl.createIndex({pt: "2dsphere"}));
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // $geoNear aggregation stage
- assert.commandWorked(testDB.runCommand({
- aggregate: testColl.getName(),
- pipeline: [{
- $geoNear: {
- near: {type: "Point", coordinates: [0, 0]},
- spherical: true,
- distanceField: "dist",
- }
- }],
- cursor: {},
- }));
- lastHistogram = checkHistogramDiff(1, 0, 0);
-
- // GetIndexes
- testColl.getIndexes();
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // Reindex
- assert.commandWorked(testColl.reIndex());
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // DropIndex
- assert.commandWorked(testColl.dropIndex({pt: "2dsphere"}));
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // Explain
- testColl.explain().find().next();
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // CollStats
- assert.commandWorked(testDB.runCommand({collStats: testColl.getName()}));
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // CollMod
- assert.commandWorked(
- testDB.runCommand({collStats: testColl.getName(), validationLevel: "off"}));
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // Compact
- var commandResult = testDB.runCommand({compact: testColl.getName()});
- // If storage engine supports compact, it should count as a command.
- if (!commandResult.ok) {
- assert.commandFailedWithCode(commandResult, ErrorCodes.CommandNotSupported);
- }
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // DataSize
- testColl.dataSize();
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // PlanCache
- testColl.getPlanCache().listQueryShapes();
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // ServerStatus
- assert.commandWorked(testDB.serverStatus());
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // WhatsMyURI
- assert.commandWorked(testColl.runCommand("whatsmyuri"));
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // Test non-command.
- assert.commandFailed(testColl.runCommand("IHopeNobodyEverMakesThisACommand"));
- lastHistogram = checkHistogramDiff(0, 0, 1);
- MongoRunner.stopMongod(mongo);
+"use strict";
+var name = "operationalLatencyHistogramTest";
+
+var mongo = MongoRunner.runMongod();
+var testDB = mongo.getDB("test");
+var testColl = testDB[name + "coll"];
+
+testColl.drop();
+
+function getHistogramStats() {
+ return testDB.serverStatus({opLatencies: {histograms: 1}}).opLatencies;
+}
+
+var lastHistogram = getHistogramStats();
+
+// Checks that the difference in the histogram is what we expect, and also
+// accounts for the serverStatus command itself.
+function checkHistogramDiff(reads, writes, commands) {
+ var thisHistogram = getHistogramStats();
+ assert.eq(thisHistogram.reads.ops - lastHistogram.reads.ops, reads);
+ assert.eq(thisHistogram.writes.ops - lastHistogram.writes.ops, writes);
+ // Running the server status itself will increment command stats by one.
+ assert.eq(thisHistogram.commands.ops - lastHistogram.commands.ops, commands + 1);
+ return thisHistogram;
+}
+
+// Insert
+var numRecords = 100;
+for (var i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.insert({_id: i}));
+}
+lastHistogram = checkHistogramDiff(0, numRecords, 0);
+
+// Update
+for (var i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.update({_id: i}, {x: i}));
+}
+lastHistogram = checkHistogramDiff(0, numRecords, 0);
+
+// Find
+var cursors = [];
+for (var i = 0; i < numRecords; i++) {
+ cursors[i] = testColl.find({x: {$gte: i}}).batchSize(2);
+ assert.eq(cursors[i].next()._id, i);
+}
+lastHistogram = checkHistogramDiff(numRecords, 0, 0);
+
+// GetMore
+for (var i = 0; i < numRecords / 2; i++) {
+ // Trigger two getmore commands.
+ assert.eq(cursors[i].next()._id, i + 1);
+ assert.eq(cursors[i].next()._id, i + 2);
+ assert.eq(cursors[i].next()._id, i + 3);
+ assert.eq(cursors[i].next()._id, i + 4);
+}
+lastHistogram = checkHistogramDiff(numRecords, 0, 0);
+
+// KillCursors
+// The last cursor has no additional results, hence does not need to be closed.
+for (var i = 0; i < numRecords - 1; i++) {
+ cursors[i].close();
+}
+lastHistogram = checkHistogramDiff(0, 0, numRecords - 1);
+
+// Remove
+for (var i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.remove({_id: i}));
+}
+lastHistogram = checkHistogramDiff(0, numRecords, 0);
+
+// Upsert
+for (var i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1}));
+}
+lastHistogram = checkHistogramDiff(0, numRecords, 0);
+
+// Aggregate
+for (var i = 0; i < numRecords; i++) {
+ testColl.aggregate([{$match: {x: i}}, {$group: {_id: "$x"}}]);
+}
+lastHistogram = checkHistogramDiff(numRecords, 0, 0);
+
+// Count
+for (var i = 0; i < numRecords; i++) {
+ testColl.count({x: i});
+}
+lastHistogram = checkHistogramDiff(numRecords, 0, 0);
+
+// FindAndModify
+testColl.findAndModify({query: {}, update: {pt: {type: "Point", coordinates: [0, 0]}}});
+lastHistogram = checkHistogramDiff(0, 1, 0);
+
+// CreateIndex
+assert.commandWorked(testColl.createIndex({pt: "2dsphere"}));
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// $geoNear aggregation stage
+assert.commandWorked(testDB.runCommand({
+ aggregate: testColl.getName(),
+ pipeline: [{
+ $geoNear: {
+ near: {type: "Point", coordinates: [0, 0]},
+ spherical: true,
+ distanceField: "dist",
+ }
+ }],
+ cursor: {},
+}));
+lastHistogram = checkHistogramDiff(1, 0, 0);
+
+// GetIndexes
+testColl.getIndexes();
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// Reindex
+assert.commandWorked(testColl.reIndex());
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// DropIndex
+assert.commandWorked(testColl.dropIndex({pt: "2dsphere"}));
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// Explain
+testColl.explain().find().next();
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// CollStats
+assert.commandWorked(testDB.runCommand({collStats: testColl.getName()}));
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// CollMod
+assert.commandWorked(testDB.runCommand({collStats: testColl.getName(), validationLevel: "off"}));
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// Compact
+var commandResult = testDB.runCommand({compact: testColl.getName()});
+// If storage engine supports compact, it should count as a command.
+if (!commandResult.ok) {
+ assert.commandFailedWithCode(commandResult, ErrorCodes.CommandNotSupported);
+}
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// DataSize
+testColl.dataSize();
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// PlanCache
+testColl.getPlanCache().listQueryShapes();
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// ServerStatus
+assert.commandWorked(testDB.serverStatus());
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// WhatsMyURI
+assert.commandWorked(testColl.runCommand("whatsmyuri"));
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// Test non-command.
+assert.commandFailed(testColl.runCommand("IHopeNobodyEverMakesThisACommand"));
+lastHistogram = checkHistogramDiff(0, 0, 1);
+MongoRunner.stopMongod(mongo);
}());
diff --git a/jstests/noPassthrough/global_transaction_latency_histogram.js b/jstests/noPassthrough/global_transaction_latency_histogram.js
index 16bba6fb313..56e8a2ca4c9 100644
--- a/jstests/noPassthrough/global_transaction_latency_histogram.js
+++ b/jstests/noPassthrough/global_transaction_latency_histogram.js
@@ -1,120 +1,121 @@
// Checks that the global histogram counter for transactions are updated as we expect.
// @tags: [requires_replication, uses_transactions]
(function() {
- "use strict";
-
- // Set up the replica set.
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const primary = rst.getPrimary();
-
- // Set up the test database.
- const dbName = "test";
- const collName = "global_transaction_latency_histogram";
-
- const testDB = primary.getDB(dbName);
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- // Start the session.
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
-
- function getHistogramStats() {
- return testDB.serverStatus({opLatencies: {histograms: 1}}).opLatencies;
- }
-
- // Checks that the actual value is within a minimum on the bound of the expected value. All
- // arguments must be in the same units.
- function assertLowerBound(expected, actual, bound) {
- assert.gte(actual, expected - bound);
- }
-
- // This function checks the diff between the last histogram and the current histogram, not the
- // absolute values.
- function checkHistogramDiff(lastHistogram, thisHistogram, fields) {
- for (let key in fields) {
- if (fields.hasOwnProperty(key)) {
- assert.eq(thisHistogram[key].ops - lastHistogram[key].ops, fields[key]);
- }
+"use strict";
+
+// Set up the replica set.
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const primary = rst.getPrimary();
+
+// Set up the test database.
+const dbName = "test";
+const collName = "global_transaction_latency_histogram";
+
+const testDB = primary.getDB(dbName);
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+// Start the session.
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+
+function getHistogramStats() {
+ return testDB.serverStatus({opLatencies: {histograms: 1}}).opLatencies;
+}
+
+// Checks that the actual value is within a minimum on the bound of the expected value. All
+// arguments must be in the same units.
+function assertLowerBound(expected, actual, bound) {
+ assert.gte(actual, expected - bound);
+}
+
+// This function checks the diff between the last histogram and the current histogram, not the
+// absolute values.
+function checkHistogramDiff(lastHistogram, thisHistogram, fields) {
+ for (let key in fields) {
+ if (fields.hasOwnProperty(key)) {
+ assert.eq(thisHistogram[key].ops - lastHistogram[key].ops, fields[key]);
}
- return thisHistogram;
}
-
- // This function checks the diff between the last histogram's accumulated transactions latency
- // and this histogram's accumulated transactions latency is within a reasonable bound of what
- // we expect.
- function checkHistogramLatencyDiff(lastHistogram, thisHistogram, sleepTime) {
- let latencyDiff = thisHistogram.transactions.latency - lastHistogram.transactions.latency;
- // Check the bound in microseconds, which is the unit the latency is in. We do not check
- // upper bound because of unknown extra server latency.
- assertLowerBound(sleepTime * 1000, latencyDiff, 50000);
- return thisHistogram;
- }
-
- let lastHistogram = getHistogramStats();
-
- // Verify the base stats are correct.
- lastHistogram = checkHistogramDiff(lastHistogram,
- getHistogramStats(),
- {"reads": 0, "writes": 0, "commands": 1, "transactions": 0});
-
- // Test histogram increments on a successful transaction. "commitTransaction" and "serverStatus"
- // commands are counted towards the "commands" counter.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
- assert.commandWorked(session.commitTransaction_forTesting());
- lastHistogram = checkHistogramDiff(lastHistogram,
- getHistogramStats(),
- {"reads": 0, "writes": 1, "commands": 2, "transactions": 1});
-
- // Test histogram increments on aborted transaction due to error (duplicate insert).
+ return thisHistogram;
+}
+
+// This function checks the diff between the last histogram's accumulated transactions latency
+// and this histogram's accumulated transactions latency is within a reasonable bound of what
+// we expect.
+function checkHistogramLatencyDiff(lastHistogram, thisHistogram, sleepTime) {
+ let latencyDiff = thisHistogram.transactions.latency - lastHistogram.transactions.latency;
+ // Check the bound in microseconds, which is the unit the latency is in. We do not check
+ // upper bound because of unknown extra server latency.
+ assertLowerBound(sleepTime * 1000, latencyDiff, 50000);
+ return thisHistogram;
+}
+
+let lastHistogram = getHistogramStats();
+
+// Verify the base stats are correct.
+lastHistogram = checkHistogramDiff(lastHistogram,
+ getHistogramStats(),
+ {"reads": 0, "writes": 0, "commands": 1, "transactions": 0});
+
+// Test histogram increments on a successful transaction. "commitTransaction" and "serverStatus"
+// commands are counted towards the "commands" counter.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
+assert.commandWorked(session.commitTransaction_forTesting());
+lastHistogram = checkHistogramDiff(lastHistogram,
+ getHistogramStats(),
+ {"reads": 0, "writes": 1, "commands": 2, "transactions": 1});
+
+// Test histogram increments on aborted transaction due to error (duplicate insert).
+session.startTransaction();
+assert.commandFailedWithCode(sessionColl.insert({_id: "insert-1"}), ErrorCodes.DuplicateKey);
+lastHistogram = checkHistogramDiff(lastHistogram,
+ getHistogramStats(),
+ {"reads": 0, "writes": 1, "commands": 1, "transactions": 1});
+
+// Ensure that the transaction was aborted on failure.
+assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+lastHistogram = checkHistogramDiff(lastHistogram,
+ getHistogramStats(),
+ {"reads": 0, "writes": 0, "commands": 2, "transactions": 0});
+
+// Test histogram increments on an aborted transaction. "abortTransaction" command is counted
+// towards the "commands" counter.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "insert-2"}));
+assert.commandWorked(session.abortTransaction_forTesting());
+lastHistogram = checkHistogramDiff(lastHistogram,
+ getHistogramStats(),
+ {"reads": 0, "writes": 1, "commands": 2, "transactions": 1});
+
+// Test histogram increments on a multi-statement committed transaction.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "insert-3"}));
+assert.commandWorked(sessionColl.insert({_id: "insert-4"}));
+assert.eq(sessionColl.find({_id: "insert-1"}).itcount(), 1);
+assert.commandWorked(session.commitTransaction_forTesting());
+lastHistogram = checkHistogramDiff(lastHistogram,
+ getHistogramStats(),
+ {"reads": 1, "writes": 2, "commands": 2, "transactions": 1});
+
+// Test that the cumulative transaction latency counter is updated appropriately after a
+// sequence of back-to-back 200 ms transactions.
+const sleepTime = 200;
+for (let i = 0; i < 3; i++) {
session.startTransaction();
- assert.commandFailedWithCode(sessionColl.insert({_id: "insert-1"}), ErrorCodes.DuplicateKey);
- lastHistogram = checkHistogramDiff(lastHistogram,
- getHistogramStats(),
- {"reads": 0, "writes": 1, "commands": 1, "transactions": 1});
-
- // Ensure that the transaction was aborted on failure.
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- lastHistogram = checkHistogramDiff(lastHistogram,
- getHistogramStats(),
- {"reads": 0, "writes": 0, "commands": 2, "transactions": 0});
-
- // Test histogram increments on an aborted transaction. "abortTransaction" command is counted
- // towards the "commands" counter.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "insert-2"}));
- assert.commandWorked(session.abortTransaction_forTesting());
- lastHistogram = checkHistogramDiff(lastHistogram,
- getHistogramStats(),
- {"reads": 0, "writes": 1, "commands": 2, "transactions": 1});
-
- // Test histogram increments on a multi-statement committed transaction.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "insert-3"}));
- assert.commandWorked(sessionColl.insert({_id: "insert-4"}));
assert.eq(sessionColl.find({_id: "insert-1"}).itcount(), 1);
+ sleep(sleepTime);
assert.commandWorked(session.commitTransaction_forTesting());
- lastHistogram = checkHistogramDiff(lastHistogram,
- getHistogramStats(),
- {"reads": 1, "writes": 2, "commands": 2, "transactions": 1});
-
- // Test that the cumulative transaction latency counter is updated appropriately after a
- // sequence of back-to-back 200 ms transactions.
- const sleepTime = 200;
- for (let i = 0; i < 3; i++) {
- session.startTransaction();
- assert.eq(sessionColl.find({_id: "insert-1"}).itcount(), 1);
- sleep(sleepTime);
- assert.commandWorked(session.commitTransaction_forTesting());
- lastHistogram = checkHistogramLatencyDiff(lastHistogram, getHistogramStats(), sleepTime);
- }
+ lastHistogram = checkHistogramLatencyDiff(lastHistogram, getHistogramStats(), sleepTime);
+}
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/hostname_bind_ips.js b/jstests/noPassthrough/hostname_bind_ips.js
index d7d87e99ec5..1de16bde73e 100644
--- a/jstests/noPassthrough/hostname_bind_ips.js
+++ b/jstests/noPassthrough/hostname_bind_ips.js
@@ -2,20 +2,20 @@
// binding to localhost and enabling IPv6.
(function() {
- 'use strict';
+'use strict';
- const proc = MongoRunner.runMongod({bind_ip: "localhost", "ipv6": ""});
- assert.neq(proc, null);
+const proc = MongoRunner.runMongod({bind_ip: "localhost", "ipv6": ""});
+assert.neq(proc, null);
- assert.soon(function() {
- try {
- const uri = 'mongodb://127.0.0.1:' + proc.port + '/test';
- const conn = new Mongo(uri);
- assert.commandWorked(conn.adminCommand({ping: 1}));
- return true;
- } catch (e) {
- return false;
- }
- }, "Cannot connect to 127.0.0.1 when bound to localhost", 30 * 1000);
- MongoRunner.stopMongod(proc);
+assert.soon(function() {
+ try {
+ const uri = 'mongodb://127.0.0.1:' + proc.port + '/test';
+ const conn = new Mongo(uri);
+ assert.commandWorked(conn.adminCommand({ping: 1}));
+ return true;
+ } catch (e) {
+ return false;
+ }
+}, "Cannot connect to 127.0.0.1 when bound to localhost", 30 * 1000);
+MongoRunner.stopMongod(proc);
})();
diff --git a/jstests/noPassthrough/http_client_keep_alive.js b/jstests/noPassthrough/http_client_keep_alive.js
index a8d802d929e..689231dbe03 100644
--- a/jstests/noPassthrough/http_client_keep_alive.js
+++ b/jstests/noPassthrough/http_client_keep_alive.js
@@ -2,61 +2,59 @@
// @tags: [requires_http_client]
(function() {
- 'use strict';
-
- load('jstests/noPassthrough/libs/configExpand/lib.js');
-
- function runTest(mongod, web) {
- assert(mongod);
- const admin = mongod.getDB('admin');
-
- // Only bother with this test when using curl >= 7.57.0.
- const http_status = admin.adminCommand({serverStatus: 1, http_client: 1});
- const http_client = assert.commandWorked(http_status).http_client;
- if (http_client.type !== 'curl') {
- print("*** Skipping test, not using curl");
- return;
- }
-
- printjson(http_client);
- if (http_client.running.version_num < 0x73900) {
- // 39 hex == 57 dec, so 0x73900 == 7.57.0
- print(
- "*** Skipping test, curl < 7.57.0 does not support connection pooling via share interface");
- return;
- }
-
- // Issue a series of requests to the mock server.
- for (let i = 0; i < 10; ++i) {
- const cmd =
- admin.runCommand({httpClientRequest: 1, uri: web.getStringReflectionURL(i)});
- const reflect = assert.commandWorked(cmd).body;
- assert.eq(reflect, i, "Mock server reflected something unexpected.");
- }
-
- // Check connect count.
- const countCmd =
- admin.runCommand({httpClientRequest: 1, uri: web.getURL() + '/connect_count'});
- const count = assert.commandWorked(countCmd).body;
- assert.eq(count, 1, "Connections were not kept alive.");
-
- // Force the open connection to close.
- const closeCmd =
- admin.runCommand({httpClientRequest: 1, uri: web.getURL() + '/connection_close'});
- const close = assert.commandWorked(closeCmd).body;
- assert.eq(close, 'closed');
-
- // Check count with new connection.
- const connectsCmd =
- admin.runCommand({httpClientRequest: 1, uri: web.getURL() + '/connect_count'});
- const connects = assert.commandWorked(connectsCmd).body;
- assert.eq(connects, 2, "Connection count incorrect.");
+'use strict';
+
+load('jstests/noPassthrough/libs/configExpand/lib.js');
+
+function runTest(mongod, web) {
+ assert(mongod);
+ const admin = mongod.getDB('admin');
+
+ // Only bother with this test when using curl >= 7.57.0.
+ const http_status = admin.adminCommand({serverStatus: 1, http_client: 1});
+ const http_client = assert.commandWorked(http_status).http_client;
+ if (http_client.type !== 'curl') {
+ print("*** Skipping test, not using curl");
+ return;
+ }
+
+ printjson(http_client);
+ if (http_client.running.version_num < 0x73900) {
+ // 39 hex == 57 dec, so 0x73900 == 7.57.0
+ print(
+ "*** Skipping test, curl < 7.57.0 does not support connection pooling via share interface");
+ return;
+ }
+
+ // Issue a series of requests to the mock server.
+ for (let i = 0; i < 10; ++i) {
+ const cmd = admin.runCommand({httpClientRequest: 1, uri: web.getStringReflectionURL(i)});
+ const reflect = assert.commandWorked(cmd).body;
+ assert.eq(reflect, i, "Mock server reflected something unexpected.");
}
- const web = new ConfigExpandRestServer();
- web.start();
- const mongod = MongoRunner.runMongod({setParameter: 'enableTestCommands=1'});
- runTest(mongod, web);
- MongoRunner.stopMongod(mongod);
- web.stop();
+ // Check connect count.
+ const countCmd = admin.runCommand({httpClientRequest: 1, uri: web.getURL() + '/connect_count'});
+ const count = assert.commandWorked(countCmd).body;
+ assert.eq(count, 1, "Connections were not kept alive.");
+
+ // Force the open connection to close.
+ const closeCmd =
+ admin.runCommand({httpClientRequest: 1, uri: web.getURL() + '/connection_close'});
+ const close = assert.commandWorked(closeCmd).body;
+ assert.eq(close, 'closed');
+
+ // Check count with new connection.
+ const connectsCmd =
+ admin.runCommand({httpClientRequest: 1, uri: web.getURL() + '/connect_count'});
+ const connects = assert.commandWorked(connectsCmd).body;
+ assert.eq(connects, 2, "Connection count incorrect.");
+}
+
+const web = new ConfigExpandRestServer();
+web.start();
+const mongod = MongoRunner.runMongod({setParameter: 'enableTestCommands=1'});
+runTest(mongod, web);
+MongoRunner.stopMongod(mongod);
+web.stop();
})();
diff --git a/jstests/noPassthrough/hybrid_geo_index_remove_invalid_doc.js b/jstests/noPassthrough/hybrid_geo_index_remove_invalid_doc.js
index a99533a8bbf..728f5566a5e 100644
--- a/jstests/noPassthrough/hybrid_geo_index_remove_invalid_doc.js
+++ b/jstests/noPassthrough/hybrid_geo_index_remove_invalid_doc.js
@@ -5,67 +5,67 @@
* @tags: [requires_document_locking, requires_replication]
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/index_build.js');
+load('jstests/noPassthrough/libs/index_build.js');
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.commandWorked(testDB.createCollection(coll.getName()));
+assert.commandWorked(testDB.createCollection(coll.getName()));
- // Insert an invalid geo document that will be removed before the indexer starts a collecton
- // scan.
- assert.commandWorked(coll.insert({
- _id: 0,
- b: {type: 'invalid_geo_json_type', coordinates: [100, 100]},
- }));
+// Insert an invalid geo document that will be removed before the indexer starts a collecton
+// scan.
+assert.commandWorked(coll.insert({
+ _id: 0,
+ b: {type: 'invalid_geo_json_type', coordinates: [100, 100]},
+}));
- // We are using this fail point to pause the index build before it starts the collection scan.
- // This is important for this test because we are mutating the collection state before the index
- // builder is able to observe the invalid geo document.
- // By comparison, IndexBuildTest.pauseIndexBuilds() stalls the index build in the middle of the
- // collection scan.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'alwaysOn'}));
+// We are using this fail point to pause the index build before it starts the collection scan.
+// This is important for this test because we are mutating the collection state before the index
+// builder is able to observe the invalid geo document.
+// By comparison, IndexBuildTest.pauseIndexBuilds() stalls the index build in the middle of the
+// collection scan.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'alwaysOn'}));
- const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {b: '2dsphere'});
- IndexBuildTest.waitForIndexBuildToStart(testDB);
+const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {b: '2dsphere'});
+IndexBuildTest.waitForIndexBuildToStart(testDB);
- // Insert a valid geo document to initialize the hybrid index builder's side table state.
- assert.commandWorked(coll.insert({
- b: {type: 'Polygon', coordinates: [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]},
- }));
+// Insert a valid geo document to initialize the hybrid index builder's side table state.
+assert.commandWorked(coll.insert({
+ b: {type: 'Polygon', coordinates: [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]},
+}));
- // Removing the invalid geo document should not cause any issues for the side table accounting.
- assert.commandWorked(coll.remove({_id: 0}));
+// Removing the invalid geo document should not cause any issues for the side table accounting.
+assert.commandWorked(coll.remove({_id: 0}));
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'off'}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'off'}));
- // Wait for the index build to finish. Since the invalid geo document is removed before the
- // index build scans the collection, the index should be built successfully.
- createIdx();
- IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'b_2dsphere']);
+// Wait for the index build to finish. Since the invalid geo document is removed before the
+// index build scans the collection, the index should be built successfully.
+createIdx();
+IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'b_2dsphere']);
- let res = assert.commandWorked(coll.validate({full: true}));
- assert(res.valid, 'validation failed on primary: ' + tojson(res));
+let res = assert.commandWorked(coll.validate({full: true}));
+assert(res.valid, 'validation failed on primary: ' + tojson(res));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/hybrid_geo_index_update_invalid_doc.js b/jstests/noPassthrough/hybrid_geo_index_update_invalid_doc.js
index 3492726334d..d01ad9f4e82 100644
--- a/jstests/noPassthrough/hybrid_geo_index_update_invalid_doc.js
+++ b/jstests/noPassthrough/hybrid_geo_index_update_invalid_doc.js
@@ -5,63 +5,63 @@
* @tags: [requires_document_locking, requires_replication]
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/index_build.js');
+load('jstests/noPassthrough/libs/index_build.js');
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.commandWorked(testDB.createCollection(coll.getName()));
+assert.commandWorked(testDB.createCollection(coll.getName()));
- // Insert an invalid geo document that will be removed before the indexer starts a collecton
- // scan.
- assert.commandWorked(coll.insert({
- _id: 0,
- b: {type: 'invalid_geo_json_type', coordinates: [100, 100]},
- }));
+// Insert an invalid geo document that will be removed before the indexer starts a collecton
+// scan.
+assert.commandWorked(coll.insert({
+ _id: 0,
+ b: {type: 'invalid_geo_json_type', coordinates: [100, 100]},
+}));
- // We are using this fail point to pause the index build before it starts the collection scan.
- // This is important for this test because we are mutating the collection state before the index
- // builder is able to observe the invalid geo document.
- // By comparison, IndexBuildTest.pauseIndexBuilds() stalls the index build in the middle of the
- // collection scan.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'alwaysOn'}));
+// We are using this fail point to pause the index build before it starts the collection scan.
+// This is important for this test because we are mutating the collection state before the index
+// builder is able to observe the invalid geo document.
+// By comparison, IndexBuildTest.pauseIndexBuilds() stalls the index build in the middle of the
+// collection scan.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'alwaysOn'}));
- const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {b: '2dsphere'});
- IndexBuildTest.waitForIndexBuildToStart(testDB);
+const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {b: '2dsphere'});
+IndexBuildTest.waitForIndexBuildToStart(testDB);
- // Fixing the invalid geo document should not cause any issues for the side table accounting.
- assert.commandWorked(coll.update(
- {_id: 0}, {b: {type: 'Polygon', coordinates: [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]}}));
+// Fixing the invalid geo document should not cause any issues for the side table accounting.
+assert.commandWorked(coll.update(
+ {_id: 0}, {b: {type: 'Polygon', coordinates: [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]}}));
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'off'}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'off'}));
- // Wait for the index build to finish. Since the invalid geo document is removed before the
- // index build scans the collection, the index should be built successfully.
- createIdx();
- IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'b_2dsphere']);
+// Wait for the index build to finish. Since the invalid geo document is removed before the
+// index build scans the collection, the index should be built successfully.
+createIdx();
+IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'b_2dsphere']);
- let res = assert.commandWorked(coll.validate({full: true}));
- assert(res.valid, 'validation failed on primary: ' + tojson(res));
+let res = assert.commandWorked(coll.validate({full: true}));
+assert(res.valid, 'validation failed on primary: ' + tojson(res));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/hybrid_index_with_updates.js b/jstests/noPassthrough/hybrid_index_with_updates.js
index 869dce5b26c..3b9c2d89f7a 100644
--- a/jstests/noPassthrough/hybrid_index_with_updates.js
+++ b/jstests/noPassthrough/hybrid_index_with_updates.js
@@ -5,130 +5,130 @@
* @tags: [requires_document_locking]
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
-
- let conn = MongoRunner.runMongod();
- let testDB = conn.getDB('test');
-
- let turnFailPointOn = function(failPointName, data) {
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: failPointName, mode: "alwaysOn", data: data || {}}));
- };
-
- let turnFailPointOff = function(failPointName) {
- assert.commandWorked(testDB.adminCommand({configureFailPoint: failPointName, mode: "off"}));
- };
-
- let totalDocs = 0;
- let crudOpsForPhase = function(coll, phase) {
- let bulk = coll.initializeUnorderedBulkOp();
-
- // Create 1000 documents in a specific range for this phase.
- for (let i = 0; i < 1000; i++) {
- bulk.insert({i: (phase * 1000) + i});
- }
- totalDocs += 1000;
-
- if (phase <= 0) {
- assert.commandWorked(bulk.execute());
- return;
- }
-
- // Update 50 documents.
- // For example, if phase is 2, documents [100, 150) will be updated to [-100, -150).
- let start = (phase - 1) * 100;
- for (let j = start; j < (100 * phase) - 50; j++) {
- bulk.find({i: j}).update({$set: {i: -j}});
- }
- // Delete 25 documents.
- // Similarly, if phase is 2, documents [150, 200) will be removed.
- for (let j = start + 50; j < 100 * phase; j++) {
- bulk.find({i: j}).remove();
- }
- totalDocs -= 50;
+"use strict";
+load("jstests/libs/check_log.js");
+
+let conn = MongoRunner.runMongod();
+let testDB = conn.getDB('test');
+
+let turnFailPointOn = function(failPointName, data) {
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: failPointName, mode: "alwaysOn", data: data || {}}));
+};
+
+let turnFailPointOff = function(failPointName) {
+ assert.commandWorked(testDB.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+};
+
+let totalDocs = 0;
+let crudOpsForPhase = function(coll, phase) {
+ let bulk = coll.initializeUnorderedBulkOp();
+
+ // Create 1000 documents in a specific range for this phase.
+ for (let i = 0; i < 1000; i++) {
+ bulk.insert({i: (phase * 1000) + i});
+ }
+ totalDocs += 1000;
+
+ if (phase <= 0) {
assert.commandWorked(bulk.execute());
- };
+ return;
+ }
+
+ // Update 50 documents.
+ // For example, if phase is 2, documents [100, 150) will be updated to [-100, -150).
+ let start = (phase - 1) * 100;
+ for (let j = start; j < (100 * phase) - 50; j++) {
+ bulk.find({i: j}).update({$set: {i: -j}});
+ }
+ // Delete 25 documents.
+ // Similarly, if phase is 2, documents [150, 200) will be removed.
+ for (let j = start + 50; j < 100 * phase; j++) {
+ bulk.find({i: j}).remove();
+ }
+ totalDocs -= 50;
+
+ assert.commandWorked(bulk.execute());
+};
- crudOpsForPhase(testDB.hybrid, 0);
- assert.eq(totalDocs, testDB.hybrid.count());
+crudOpsForPhase(testDB.hybrid, 0);
+assert.eq(totalDocs, testDB.hybrid.count());
- // Hang the build after the first document.
- let stopKey = {'i': 1};
- turnFailPointOn("hangBeforeIndexBuildOf", stopKey);
+// Hang the build after the first document.
+let stopKey = {'i': 1};
+turnFailPointOn("hangBeforeIndexBuildOf", stopKey);
- // Start the background build.
- let bgBuild = startParallelShell(function() {
- assert.commandWorked(db.hybrid.createIndex({i: 1}, {background: true}));
- }, conn.port);
+// Start the background build.
+let bgBuild = startParallelShell(function() {
+ assert.commandWorked(db.hybrid.createIndex({i: 1}, {background: true}));
+}, conn.port);
- checkLog.contains(conn, "Hanging before index build of i=1");
+checkLog.contains(conn, "Hanging before index build of i=1");
- // Phase 1: Collection scan and external sort
- // Insert documents while doing the bulk build.
- crudOpsForPhase(testDB.hybrid, 1);
- assert.eq(totalDocs, testDB.hybrid.count());
+// Phase 1: Collection scan and external sort
+// Insert documents while doing the bulk build.
+crudOpsForPhase(testDB.hybrid, 1);
+assert.eq(totalDocs, testDB.hybrid.count());
- // Enable pause after bulk dump into index.
- turnFailPointOn("hangAfterIndexBuildDumpsInsertsFromBulk");
+// Enable pause after bulk dump into index.
+turnFailPointOn("hangAfterIndexBuildDumpsInsertsFromBulk");
- // Wait for the bulk insert to complete.
- turnFailPointOff("hangBeforeIndexBuildOf");
- checkLog.contains(conn, "Hanging after dumping inserts from bulk builder");
+// Wait for the bulk insert to complete.
+turnFailPointOff("hangBeforeIndexBuildOf");
+checkLog.contains(conn, "Hanging after dumping inserts from bulk builder");
- // Phase 2: First drain
- // Do some updates, inserts and deletes after the bulk builder has finished.
+// Phase 2: First drain
+// Do some updates, inserts and deletes after the bulk builder has finished.
- // Hang after yielding
- turnFailPointOn("hangDuringIndexBuildDrainYield", {namespace: testDB.hybrid.getFullName()});
+// Hang after yielding
+turnFailPointOn("hangDuringIndexBuildDrainYield", {namespace: testDB.hybrid.getFullName()});
- // Enable pause after first drain.
- turnFailPointOn("hangAfterIndexBuildFirstDrain");
+// Enable pause after first drain.
+turnFailPointOn("hangAfterIndexBuildFirstDrain");
- crudOpsForPhase(testDB.hybrid, 2);
- assert.eq(totalDocs, testDB.hybrid.count());
+crudOpsForPhase(testDB.hybrid, 2);
+assert.eq(totalDocs, testDB.hybrid.count());
- // Allow first drain to start.
- turnFailPointOff("hangAfterIndexBuildDumpsInsertsFromBulk");
+// Allow first drain to start.
+turnFailPointOff("hangAfterIndexBuildDumpsInsertsFromBulk");
- // Ensure the operation yields during the drain, then attempt some operations.
- checkLog.contains(conn, "Hanging index build during drain yield");
- assert.commandWorked(testDB.hybrid.insert({i: "during yield"}));
- assert.commandWorked(testDB.hybrid.remove({i: "during yield"}));
- turnFailPointOff("hangDuringIndexBuildDrainYield");
+// Ensure the operation yields during the drain, then attempt some operations.
+checkLog.contains(conn, "Hanging index build during drain yield");
+assert.commandWorked(testDB.hybrid.insert({i: "during yield"}));
+assert.commandWorked(testDB.hybrid.remove({i: "during yield"}));
+turnFailPointOff("hangDuringIndexBuildDrainYield");
- // Wait for first drain to finish.
- checkLog.contains(conn, "Hanging after index build first drain");
+// Wait for first drain to finish.
+checkLog.contains(conn, "Hanging after index build first drain");
- // Phase 3: Second drain
- // Enable pause after second drain.
- turnFailPointOn("hangAfterIndexBuildSecondDrain");
+// Phase 3: Second drain
+// Enable pause after second drain.
+turnFailPointOn("hangAfterIndexBuildSecondDrain");
- // Add inserts that must be consumed in the second drain.
- crudOpsForPhase(testDB.hybrid, 3);
- assert.eq(totalDocs, testDB.hybrid.count());
+// Add inserts that must be consumed in the second drain.
+crudOpsForPhase(testDB.hybrid, 3);
+assert.eq(totalDocs, testDB.hybrid.count());
- // Allow second drain to start.
- turnFailPointOff("hangAfterIndexBuildFirstDrain");
+// Allow second drain to start.
+turnFailPointOff("hangAfterIndexBuildFirstDrain");
- // Wait for second drain to finish.
- checkLog.contains(conn, "Hanging after index build second drain");
+// Wait for second drain to finish.
+checkLog.contains(conn, "Hanging after index build second drain");
- // Phase 4: Final drain and commit.
- // Add inserts that must be consumed in the final drain.
- crudOpsForPhase(testDB.hybrid, 4);
- assert.eq(totalDocs, testDB.hybrid.count());
+// Phase 4: Final drain and commit.
+// Add inserts that must be consumed in the final drain.
+crudOpsForPhase(testDB.hybrid, 4);
+assert.eq(totalDocs, testDB.hybrid.count());
- // Allow final drain to start.
- turnFailPointOff("hangAfterIndexBuildSecondDrain");
+// Allow final drain to start.
+turnFailPointOff("hangAfterIndexBuildSecondDrain");
- // Wait for build to complete.
- bgBuild();
+// Wait for build to complete.
+bgBuild();
- assert.eq(totalDocs, testDB.hybrid.count());
- assert.commandWorked(testDB.hybrid.validate({full: true}));
+assert.eq(totalDocs, testDB.hybrid.count());
+assert.commandWorked(testDB.hybrid.validate({full: true}));
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/hybrid_partial_geo_index.js b/jstests/noPassthrough/hybrid_partial_geo_index.js
index 8e204647cdf..7418c489eea 100644
--- a/jstests/noPassthrough/hybrid_partial_geo_index.js
+++ b/jstests/noPassthrough/hybrid_partial_geo_index.js
@@ -4,68 +4,70 @@
* @tags: [requires_document_locking, requires_replication]
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/index_build.js');
+load('jstests/noPassthrough/libs/index_build.js');
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.commandWorked(testDB.createCollection(coll.getName()));
+assert.commandWorked(testDB.createCollection(coll.getName()));
- IndexBuildTest.pauseIndexBuilds(primary);
+IndexBuildTest.pauseIndexBuilds(primary);
- // Create a 2dsphere partial index for documents where 'a', the field in the filter expression,
- // is greater than 0.
- const partialIndex = {b: '2dsphere'};
- const createIdx = IndexBuildTest.startIndexBuild(
- primary, coll.getFullName(), partialIndex, {partialFilterExpression: {a: {$gt: 0}}});
- IndexBuildTest.waitForIndexBuildToStart(testDB);
+// Create a 2dsphere partial index for documents where 'a', the field in the filter expression,
+// is greater than 0.
+const partialIndex = {
+ b: '2dsphere'
+};
+const createIdx = IndexBuildTest.startIndexBuild(
+ primary, coll.getFullName(), partialIndex, {partialFilterExpression: {a: {$gt: 0}}});
+IndexBuildTest.waitForIndexBuildToStart(testDB);
- // This document has an invalid geoJSON format (duplicated points), but will not be indexed.
- const unindexedDoc = {
- _id: 0,
- a: -1,
- b: {type: "Polygon", coordinates: [[[0, 0], [0, 1], [1, 1], [0, 1], [0, 0]]]},
- };
+// This document has an invalid geoJSON format (duplicated points), but will not be indexed.
+const unindexedDoc = {
+ _id: 0,
+ a: -1,
+ b: {type: "Polygon", coordinates: [[[0, 0], [0, 1], [1, 1], [0, 1], [0, 0]]]},
+};
- // This document has valid geoJson, and will be indexed.
- const indexedDoc = {
- _id: 1,
- a: 1,
- b: {type: "Polygon", coordinates: [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]},
- };
+// This document has valid geoJson, and will be indexed.
+const indexedDoc = {
+ _id: 1,
+ a: 1,
+ b: {type: "Polygon", coordinates: [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]},
+};
- assert.commandWorked(coll.insert(unindexedDoc));
- assert.commandWorked(coll.insert(indexedDoc));
+assert.commandWorked(coll.insert(unindexedDoc));
+assert.commandWorked(coll.insert(indexedDoc));
- // Removing unindexed document should succeed without error.
- assert.commandWorked(coll.remove({_id: 0}));
+// Removing unindexed document should succeed without error.
+assert.commandWorked(coll.remove({_id: 0}));
- IndexBuildTest.resumeIndexBuilds(primary);
+IndexBuildTest.resumeIndexBuilds(primary);
- // Wait for the index build to finish.
- createIdx();
- IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'b_2dsphere']);
+// Wait for the index build to finish.
+createIdx();
+IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'b_2dsphere']);
- let res = assert.commandWorked(coll.validate({full: true}));
- assert(res.valid, 'validation failed on primary: ' + tojson(res));
+let res = assert.commandWorked(coll.validate({full: true}));
+assert(res.valid, 'validation failed on primary: ' + tojson(res));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/hybrid_partial_index_update.js b/jstests/noPassthrough/hybrid_partial_index_update.js
index 79d9f9cb48e..878cd334ce4 100644
--- a/jstests/noPassthrough/hybrid_partial_index_update.js
+++ b/jstests/noPassthrough/hybrid_partial_index_update.js
@@ -4,53 +4,55 @@
* @tags: [requires_document_locking, requires_replication]
*/
(function() {
- 'use strict';
-
- load('jstests/noPassthrough/libs/index_build.js');
-
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+'use strict';
+
+load('jstests/noPassthrough/libs/index_build.js');
+
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.commandWorked(testDB.createCollection(coll.getName()));
+assert.commandWorked(testDB.createCollection(coll.getName()));
- IndexBuildTest.pauseIndexBuilds(primary);
+IndexBuildTest.pauseIndexBuilds(primary);
- // Create a partial index for documents where 'a', the field in the filter expression,
- // is equal to 1.
- const partialIndex = {a: 1};
- const createIdx = IndexBuildTest.startIndexBuild(
- primary, coll.getFullName(), partialIndex, {partialFilterExpression: {a: 1}});
- IndexBuildTest.waitForIndexBuildToStart(testDB);
+// Create a partial index for documents where 'a', the field in the filter expression,
+// is equal to 1.
+const partialIndex = {
+ a: 1
+};
+const createIdx = IndexBuildTest.startIndexBuild(
+ primary, coll.getFullName(), partialIndex, {partialFilterExpression: {a: 1}});
+IndexBuildTest.waitForIndexBuildToStart(testDB);
- assert.commandWorked(coll.insert({_id: 0, a: 1}));
+assert.commandWorked(coll.insert({_id: 0, a: 1}));
- // Update the document so that it no longer meets the partial index criteria.
- assert.commandWorked(coll.update({_id: 0}, {$set: {a: 0}}));
+// Update the document so that it no longer meets the partial index criteria.
+assert.commandWorked(coll.update({_id: 0}, {$set: {a: 0}}));
- IndexBuildTest.resumeIndexBuilds(primary);
+IndexBuildTest.resumeIndexBuilds(primary);
- // Wait for the index build to finish.
- createIdx();
- IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']);
+// Wait for the index build to finish.
+createIdx();
+IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']);
- let res = assert.commandWorked(coll.validate({full: true}));
- assert(res.valid, 'validation failed on primary: ' + tojson(res));
+let res = assert.commandWorked(coll.validate({full: true}));
+assert(res.valid, 'validation failed on primary: ' + tojson(res));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/hybrid_sparse_compound_geo_index.js b/jstests/noPassthrough/hybrid_sparse_compound_geo_index.js
index a6f50f70151..e9a74de9982 100644
--- a/jstests/noPassthrough/hybrid_sparse_compound_geo_index.js
+++ b/jstests/noPassthrough/hybrid_sparse_compound_geo_index.js
@@ -5,47 +5,47 @@
* @tags: [requires_document_locking, requires_replication]
*/
(function() {
- 'use strict';
-
- load('jstests/noPassthrough/libs/index_build.js');
-
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+'use strict';
+
+load('jstests/noPassthrough/libs/index_build.js');
+
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.commandWorked(testDB.createCollection(coll.getName()));
+assert.commandWorked(testDB.createCollection(coll.getName()));
- IndexBuildTest.pauseIndexBuilds(primary);
+IndexBuildTest.pauseIndexBuilds(primary);
- const createIdx = IndexBuildTest.startIndexBuild(
- primary, coll.getFullName(), {a: 1, b: '2dsphere'}, {sparse: true});
- IndexBuildTest.waitForIndexBuildToStart(testDB);
+const createIdx = IndexBuildTest.startIndexBuild(
+ primary, coll.getFullName(), {a: 1, b: '2dsphere'}, {sparse: true});
+IndexBuildTest.waitForIndexBuildToStart(testDB);
- assert.commandWorked(coll.insert({a: [1, 2]}));
+assert.commandWorked(coll.insert({a: [1, 2]}));
- IndexBuildTest.resumeIndexBuilds(primary);
+IndexBuildTest.resumeIndexBuilds(primary);
- // Wait for the index build to finish.
- createIdx();
- IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1_b_2dsphere']);
+// Wait for the index build to finish.
+createIdx();
+IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1_b_2dsphere']);
- let res = assert.commandWorked(coll.validate({full: true}));
- assert(res.valid, 'validation failed on primary: ' + tojson(res));
+let res = assert.commandWorked(coll.validate({full: true}));
+assert(res.valid, 'validation failed on primary: ' + tojson(res));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/hybrid_unique_index_with_updates.js b/jstests/noPassthrough/hybrid_unique_index_with_updates.js
index 849e155b7e7..38a83b30a37 100644
--- a/jstests/noPassthrough/hybrid_unique_index_with_updates.js
+++ b/jstests/noPassthrough/hybrid_unique_index_with_updates.js
@@ -6,178 +6,177 @@
* @tags: [requires_document_locking, requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
+load("jstests/libs/check_log.js");
- let replSetTest = new ReplSetTest({name: "hybrid_updates", nodes: 2});
- replSetTest.startSet();
- replSetTest.initiate();
+let replSetTest = new ReplSetTest({name: "hybrid_updates", nodes: 2});
+replSetTest.startSet();
+replSetTest.initiate();
- let conn = replSetTest.getPrimary();
- let testDB = conn.getDB('test');
+let conn = replSetTest.getPrimary();
+let testDB = conn.getDB('test');
- // Enables a failpoint, runs 'hitFailpointFunc' to hit the failpoint, then runs
- // 'duringFailpointFunc' while the failpoint is active.
- let doDuringFailpoint = function(
- failPointName, logMessage, hitFailpointFunc, duringFailpointFunc, i) {
- clearRawMongoProgramOutput();
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: failPointName, mode: "alwaysOn", data: {"i": i}}));
+// Enables a failpoint, runs 'hitFailpointFunc' to hit the failpoint, then runs
+// 'duringFailpointFunc' while the failpoint is active.
+let doDuringFailpoint = function(
+ failPointName, logMessage, hitFailpointFunc, duringFailpointFunc, i) {
+ clearRawMongoProgramOutput();
+ assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: failPointName, mode: "alwaysOn", data: {"i": i}}));
- hitFailpointFunc();
+ hitFailpointFunc();
- assert.soon(() => rawMongoProgramOutput().indexOf(logMessage) >= 0);
+ assert.soon(() => rawMongoProgramOutput().indexOf(logMessage) >= 0);
- duringFailpointFunc();
+ duringFailpointFunc();
- assert.commandWorked(testDB.adminCommand({configureFailPoint: failPointName, mode: "off"}));
- };
+ assert.commandWorked(testDB.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+};
- const docsToInsert = 1000;
- let setUp = function(coll) {
- coll.drop();
+const docsToInsert = 1000;
+let setUp = function(coll) {
+ coll.drop();
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < docsToInsert; i++) {
- bulk.insert({i: i});
- }
- assert.commandWorked(bulk.execute());
+ let bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < docsToInsert; i++) {
+ bulk.insert({i: i});
+ }
+ assert.commandWorked(bulk.execute());
+};
+
+let buildIndexInBackground = function(coll, expectDuplicateKeyError) {
+ const createIndexFunction = function(collFullName) {
+ const coll = db.getMongo().getCollection(collFullName);
+ return coll.createIndex({i: 1}, {background: true, unique: true});
};
+ const assertFunction = expectDuplicateKeyError ? function(collFullName) {
+ assert.commandFailedWithCode(createIndexFunction(collFullName), ErrorCodes.DuplicateKey);
+ } : function(collFullName) {
+ assert.commandWorked(createIndexFunction(collFullName));
+ };
+ return startParallelShell('const createIndexFunction = ' + createIndexFunction + ';\n' +
+ 'const assertFunction = ' + assertFunction + ';\n' +
+ 'assertFunction("' + coll.getFullName() + '")',
+ conn.port);
+};
- let buildIndexInBackground = function(coll, expectDuplicateKeyError) {
- const createIndexFunction = function(collFullName) {
- const coll = db.getMongo().getCollection(collFullName);
- return coll.createIndex({i: 1}, {background: true, unique: true});
- };
- const assertFunction = expectDuplicateKeyError ? function(collFullName) {
- assert.commandFailedWithCode(createIndexFunction(collFullName),
- ErrorCodes.DuplicateKey);
- } : function(collFullName) {
- assert.commandWorked(createIndexFunction(collFullName));
- };
- return startParallelShell('const createIndexFunction = ' + createIndexFunction + ';\n' +
- 'const assertFunction = ' + assertFunction + ';\n' +
- 'assertFunction("' + coll.getFullName() + '")',
- conn.port);
+/**
+ * Run a background index build on a unique index under different configurations. Introduce
+ * duplicate keys on the index that may cause it to fail or succeed, depending on the following
+ * optional parameters:
+ * {
+ * // Which operation used to introduce a duplicate key.
+ * operation {string}: "insert", "update"
+ *
+ * // Whether or not resolve the duplicate key before completing the build.
+ * resolve {bool}
+ *
+ * // Which phase of the index build to introduce the duplicate key.
+ * phase {number}: 0-4
+ * }
+ */
+let runTest = function(config) {
+ jsTestLog("running test with config: " + tojson(config));
+
+ const collName = Object.keys(config).length
+ ? 'hybrid_' + config.operation[0] + '_r' + Number(config.resolve) + '_p' + config.phase
+ : 'hybrid';
+ const coll = testDB.getCollection(collName);
+ setUp(coll);
+
+ // Expect the build to fail with a duplicate key error if we insert a duplicate key and
+ // don't resolve it.
+ let expectDuplicate = config.resolve === false;
+
+ let awaitBuild;
+ let buildIndex = function() {
+ awaitBuild = buildIndexInBackground(coll, expectDuplicate);
};
- /**
- * Run a background index build on a unique index under different configurations. Introduce
- * duplicate keys on the index that may cause it to fail or succeed, depending on the following
- * optional parameters:
- * {
- * // Which operation used to introduce a duplicate key.
- * operation {string}: "insert", "update"
- *
- * // Whether or not resolve the duplicate key before completing the build.
- * resolve {bool}
- *
- * // Which phase of the index build to introduce the duplicate key.
- * phase {number}: 0-4
- * }
- */
- let runTest = function(config) {
- jsTestLog("running test with config: " + tojson(config));
-
- const collName = Object.keys(config).length
- ? 'hybrid_' + config.operation[0] + '_r' + Number(config.resolve) + '_p' + config.phase
- : 'hybrid';
- const coll = testDB.getCollection(collName);
- setUp(coll);
-
- // Expect the build to fail with a duplicate key error if we insert a duplicate key and
- // don't resolve it.
- let expectDuplicate = config.resolve === false;
-
- let awaitBuild;
- let buildIndex = function() {
- awaitBuild = buildIndexInBackground(coll, expectDuplicate);
- };
-
- // Introduce a duplicate key, either from an insert or update. Optionally, follow-up with an
- // operation that will resolve the duplicate by removing it or updating it.
- const dup = {i: 0};
- let doOperation = function() {
- if ("insert" == config.operation) {
- assert.commandWorked(coll.insert(dup));
- if (config.resolve) {
- assert.commandWorked(coll.deleteOne(dup));
- }
- } else if ("update" == config.operation) {
- assert.commandWorked(coll.update(dup, {i: 1}));
- if (config.resolve) {
- assert.commandWorked(coll.update({i: 1}, dup));
- }
+ // Introduce a duplicate key, either from an insert or update. Optionally, follow-up with an
+ // operation that will resolve the duplicate by removing it or updating it.
+ const dup = {i: 0};
+ let doOperation = function() {
+ if ("insert" == config.operation) {
+ assert.commandWorked(coll.insert(dup));
+ if (config.resolve) {
+ assert.commandWorked(coll.deleteOne(dup));
+ }
+ } else if ("update" == config.operation) {
+ assert.commandWorked(coll.update(dup, {i: 1}));
+ if (config.resolve) {
+ assert.commandWorked(coll.update({i: 1}, dup));
}
- };
-
- const stopKey = 0;
- switch (config.phase) {
- // Just build the index without any failpoints.
- case undefined:
- buildIndex();
- break;
- // Hang before scanning the first document.
- case 0:
- doDuringFailpoint("hangBeforeIndexBuildOf",
- "Hanging before index build of i=" + stopKey,
- buildIndex,
- doOperation,
- stopKey);
- break;
- // Hang after scanning the first document.
- case 1:
- doDuringFailpoint("hangAfterIndexBuildOf",
- "Hanging after index build of i=" + stopKey,
- buildIndex,
- doOperation,
- stopKey);
- break;
- // Hang before the first drain and after dumping the keys from the external sorter into
- // the index.
- case 2:
- doDuringFailpoint("hangAfterIndexBuildDumpsInsertsFromBulk",
- "Hanging after dumping inserts from bulk builder",
- buildIndex,
- doOperation);
- break;
- // Hang before the second drain.
- case 3:
- doDuringFailpoint("hangAfterIndexBuildFirstDrain",
- "Hanging after index build first drain",
- buildIndex,
- doOperation);
- break;
- // Hang before the final drain and commit.
- case 4:
- doDuringFailpoint("hangAfterIndexBuildSecondDrain",
- "Hanging after index build second drain",
- buildIndex,
- doOperation);
- break;
- default:
- assert(false, "Invalid phase: " + config.phase);
}
+ };
- awaitBuild();
+ const stopKey = 0;
+ switch (config.phase) {
+ // Just build the index without any failpoints.
+ case undefined:
+ buildIndex();
+ break;
+ // Hang before scanning the first document.
+ case 0:
+ doDuringFailpoint("hangBeforeIndexBuildOf",
+ "Hanging before index build of i=" + stopKey,
+ buildIndex,
+ doOperation,
+ stopKey);
+ break;
+ // Hang after scanning the first document.
+ case 1:
+ doDuringFailpoint("hangAfterIndexBuildOf",
+ "Hanging after index build of i=" + stopKey,
+ buildIndex,
+ doOperation,
+ stopKey);
+ break;
+ // Hang before the first drain and after dumping the keys from the external sorter into
+ // the index.
+ case 2:
+ doDuringFailpoint("hangAfterIndexBuildDumpsInsertsFromBulk",
+ "Hanging after dumping inserts from bulk builder",
+ buildIndex,
+ doOperation);
+ break;
+ // Hang before the second drain.
+ case 3:
+ doDuringFailpoint("hangAfterIndexBuildFirstDrain",
+ "Hanging after index build first drain",
+ buildIndex,
+ doOperation);
+ break;
+ // Hang before the final drain and commit.
+ case 4:
+ doDuringFailpoint("hangAfterIndexBuildSecondDrain",
+ "Hanging after index build second drain",
+ buildIndex,
+ doOperation);
+ break;
+ default:
+ assert(false, "Invalid phase: " + config.phase);
+ }
- let expectedDocs = docsToInsert;
- expectedDocs += (config.operation == "insert" && config.resolve === false) ? 1 : 0;
+ awaitBuild();
- assert.eq(expectedDocs, coll.count());
- assert.eq(expectedDocs, coll.find().itcount());
- assert.commandWorked(coll.validate({full: true}));
- };
+ let expectedDocs = docsToInsert;
+ expectedDocs += (config.operation == "insert" && config.resolve === false) ? 1 : 0;
- runTest({});
+ assert.eq(expectedDocs, coll.count());
+ assert.eq(expectedDocs, coll.find().itcount());
+ assert.commandWorked(coll.validate({full: true}));
+};
- for (let i = 0; i <= 4; i++) {
- runTest({operation: "insert", resolve: true, phase: i});
- runTest({operation: "insert", resolve: false, phase: i});
- runTest({operation: "update", resolve: true, phase: i});
- runTest({operation: "update", resolve: false, phase: i});
- }
+runTest({});
+
+for (let i = 0; i <= 4; i++) {
+ runTest({operation: "insert", resolve: true, phase: i});
+ runTest({operation: "insert", resolve: false, phase: i});
+ runTest({operation: "update", resolve: true, phase: i});
+ runTest({operation: "update", resolve: false, phase: i});
+}
- replSetTest.stopSet();
+replSetTest.stopSet();
})();
diff --git a/jstests/noPassthrough/hyphenated_database_name.js b/jstests/noPassthrough/hyphenated_database_name.js
index 6387e8f167f..0290e4444d9 100644
--- a/jstests/noPassthrough/hyphenated_database_name.js
+++ b/jstests/noPassthrough/hyphenated_database_name.js
@@ -4,21 +4,21 @@
* @tags: [requires_persistence]
*/
(function() {
- "use strict";
- var isDirectoryPerDBSupported = jsTest.options().storageEngine == "wiredTiger" ||
- jsTest.options().storageEngine == "mmapv1" || !jsTest.options().storageEngine;
- if (!isDirectoryPerDBSupported)
- return;
+"use strict";
+var isDirectoryPerDBSupported = jsTest.options().storageEngine == "wiredTiger" ||
+ jsTest.options().storageEngine == "mmapv1" || !jsTest.options().storageEngine;
+if (!isDirectoryPerDBSupported)
+ return;
- const dbName = "test-hyphen";
- let conn = MongoRunner.runMongod({directoryperdb: ''});
+const dbName = "test-hyphen";
+let conn = MongoRunner.runMongod({directoryperdb: ''});
- conn.getDB(dbName).a.insert({x: 1});
- let res = conn.getDB(dbName).runCommand({dbStats: 1, scale: 1});
- jsTestLog("dbStats: " + tojson(res));
- assert(res.db == "test-hyphen");
- assert(res.fsUsedSize > 0);
- assert(res.fsTotalSize > 0);
+conn.getDB(dbName).a.insert({x: 1});
+let res = conn.getDB(dbName).runCommand({dbStats: 1, scale: 1});
+jsTestLog("dbStats: " + tojson(res));
+assert(res.db == "test-hyphen");
+assert(res.fsUsedSize > 0);
+assert(res.fsTotalSize > 0);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/ignore_notablescan.js b/jstests/noPassthrough/ignore_notablescan.js
index ccdfa9ebfce..255b646f757 100644
--- a/jstests/noPassthrough/ignore_notablescan.js
+++ b/jstests/noPassthrough/ignore_notablescan.js
@@ -1,73 +1,71 @@
// Test that 'notablescan' parameter does not affect queries internal namespaces.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- function runTests(ServerType) {
- const s = new ServerType();
+function runTests(ServerType) {
+ const s = new ServerType();
- const configDB = s.getConn().getDB("config");
- const session = s.getConn().getDB(dbName).getMongo().startSession();
- const primaryDB = session.getDatabase(dbName);
+ const configDB = s.getConn().getDB("config");
+ const session = s.getConn().getDB(dbName).getMongo().startSession();
+ const primaryDB = session.getDatabase(dbName);
- // Implicitly create the collection outside of the transaction.
- assert.writeOK(primaryDB.getCollection(collName).insert({x: 1}));
+ // Implicitly create the collection outside of the transaction.
+ assert.writeOK(primaryDB.getCollection(collName).insert({x: 1}));
- // Run a transaction so the 'config.transactions' collection is implicitly created.
- session.startTransaction();
- assert.writeOK(primaryDB.getCollection(collName).insert({x: 2}));
- assert.commandWorked(session.commitTransaction_forTesting());
+ // Run a transaction so the 'config.transactions' collection is implicitly created.
+ session.startTransaction();
+ assert.writeOK(primaryDB.getCollection(collName).insert({x: 2}));
+ assert.commandWorked(session.commitTransaction_forTesting());
- // Run a predicate query that would fail if we did not ignore the 'notablescan' flag.
- assert.eq(configDB.transactions.find({any_nonexistent_field: {$exists: true}}).itcount(),
- 0);
+ // Run a predicate query that would fail if we did not ignore the 'notablescan' flag.
+ assert.eq(configDB.transactions.find({any_nonexistent_field: {$exists: true}}).itcount(), 0);
- // Run the same query against the user created collection honoring the 'notablescan' flag.
- // This will cause the query to fail as there is no viable query plan. Unfortunately,
- // the reported query error code is the cryptic 'BadValue'.
- assert.commandFailedWithCode(
- primaryDB.runCommand(
- {find: collName, filter: {any_nonexistent_field: {$exists: true}}}),
- ErrorCodes.BadValue);
+ // Run the same query against the user created collection honoring the 'notablescan' flag.
+ // This will cause the query to fail as there is no viable query plan. Unfortunately,
+ // the reported query error code is the cryptic 'BadValue'.
+ assert.commandFailedWithCode(
+ primaryDB.runCommand({find: collName, filter: {any_nonexistent_field: {$exists: true}}}),
+ ErrorCodes.BadValue);
- s.stop();
- }
+ s.stop();
+}
- function Sharding() {
- this.st = new ShardingTest({
- shards: 2,
- config: 1,
- other: {
- shardOptions: {setParameter: {notablescan: true}},
- configOptions: {setParameter: {notablescan: true}}
- }
- });
- }
+function Sharding() {
+ this.st = new ShardingTest({
+ shards: 2,
+ config: 1,
+ other: {
+ shardOptions: {setParameter: {notablescan: true}},
+ configOptions: {setParameter: {notablescan: true}}
+ }
+ });
+}
- Sharding.prototype.stop = function() {
- this.st.stop();
- };
+Sharding.prototype.stop = function() {
+ this.st.stop();
+};
- Sharding.prototype.getConn = function() {
- return this.st.s0;
- };
+Sharding.prototype.getConn = function() {
+ return this.st.s0;
+};
- function ReplSet() {
- this.rst = new ReplSetTest({nodes: 1, nodeOptions: {setParameter: {notablescan: true}}});
- this.rst.startSet();
- this.rst.initiate();
- }
+function ReplSet() {
+ this.rst = new ReplSetTest({nodes: 1, nodeOptions: {setParameter: {notablescan: true}}});
+ this.rst.startSet();
+ this.rst.initiate();
+}
- ReplSet.prototype.stop = function() {
- this.rst.stopSet();
- };
+ReplSet.prototype.stop = function() {
+ this.rst.stopSet();
+};
- ReplSet.prototype.getConn = function() {
- return this.rst.getPrimary();
- };
+ReplSet.prototype.getConn = function() {
+ return this.rst.getPrimary();
+};
- [ReplSet, Sharding].forEach(runTests);
+[ReplSet, Sharding].forEach(runTests);
}());
diff --git a/jstests/noPassthrough/implicit_sessions.js b/jstests/noPassthrough/implicit_sessions.js
index 77204e098e9..f0bb9d972f9 100644
--- a/jstests/noPassthrough/implicit_sessions.js
+++ b/jstests/noPassthrough/implicit_sessions.js
@@ -2,245 +2,244 @@
* Verifies behavior around implicit sessions in the mongo shell.
*/
(function() {
- "use strict";
-
- /**
- * Runs the given function, inspecting the outgoing command object and making assertions about
- * its logical session id.
- */
- function inspectCommandForSessionId(func, {shouldIncludeId, expectedId, differentFromId}) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
-
- const sentinel = {};
- let cmdObjSeen = sentinel;
-
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- cmdObjSeen = cmdObj;
- return mongoRunCommandOriginal.apply(this, arguments);
- };
-
- try {
- assert.doesNotThrow(func);
- } finally {
- Mongo.prototype.runCommand = mongoRunCommandOriginal;
- }
+"use strict";
- if (cmdObjSeen === sentinel) {
- throw new Error("Mongo.prototype.runCommand() was never called: " + func.toString());
- }
+/**
+ * Runs the given function, inspecting the outgoing command object and making assertions about
+ * its logical session id.
+ */
+function inspectCommandForSessionId(func, {shouldIncludeId, expectedId, differentFromId}) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- // If the command is in a wrapped form, then we look for the actual command object inside
- // the query/$query object.
- let cmdName = Object.keys(cmdObjSeen)[0];
- if (cmdName === "query" || cmdName === "$query") {
- cmdObjSeen = cmdObjSeen[cmdName];
- cmdName = Object.keys(cmdObjSeen)[0];
- }
+ const sentinel = {};
+ let cmdObjSeen = sentinel;
- if (shouldIncludeId) {
- assert(cmdObjSeen.hasOwnProperty("lsid"),
- "Expected operation " + tojson(cmdObjSeen) + " to have a logical session id.");
-
- if (expectedId) {
- assert(bsonBinaryEqual(expectedId, cmdObjSeen.lsid),
- "The sent session id did not match the expected, sent: " +
- tojson(cmdObjSeen.lsid) + ", expected: " + tojson(expectedId));
- }
-
- if (differentFromId) {
- assert(!bsonBinaryEqual(differentFromId, cmdObjSeen.lsid),
- "The sent session id was not different from the expected, sent: " +
- tojson(cmdObjSeen.lsid) + ", expected: " + tojson(differentFromId));
- }
-
- } else {
- assert(
- !cmdObjSeen.hasOwnProperty("lsid"),
- "Expected operation " + tojson(cmdObjSeen) + " to not have a logical session id.");
- }
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ cmdObjSeen = cmdObj;
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
- return cmdObjSeen.lsid;
+ try {
+ assert.doesNotThrow(func);
+ } finally {
+ Mongo.prototype.runCommand = mongoRunCommandOriginal;
}
- // Tests regular behavior of implicit sessions.
- function runTest() {
- const conn = MongoRunner.runMongod();
-
- // Commands run on a database without an explicit session should use an implicit one.
- const testDB = conn.getDB("test");
- const coll = testDB.getCollection("foo");
- const implicitId = inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
- }, {shouldIncludeId: true});
-
- // Unacknowledged writes have no session id.
- inspectCommandForSessionId(function() {
- coll.insert({x: 1}, {writeConcern: {w: 0}});
- }, {shouldIncludeId: false});
+ if (cmdObjSeen === sentinel) {
+ throw new Error("Mongo.prototype.runCommand() was never called: " + func.toString());
+ }
- assert(bsonBinaryEqual(testDB.getSession().getSessionId(), implicitId),
- "Expected the id of the database's implicit session to match the one sent, sent: " +
- tojson(implicitId) + " db session id: " +
- tojson(testDB.getSession().getSessionId()));
-
- // Implicit sessions are not causally consistent.
- assert(!testDB.getSession().getOptions().isCausalConsistency(),
- "Expected the database's implicit session to not be causally consistent");
-
- // Further commands run on the same database should reuse the implicit session.
- inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
-
- // New collections from the same database should inherit the implicit session.
- const collTwo = testDB.getCollection("bar");
- inspectCommandForSessionId(function() {
- assert.writeOK(collTwo.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
-
- // Sibling databases should inherit the implicit session.
- let siblingColl = testDB.getSiblingDB("foo").getCollection("bar");
- inspectCommandForSessionId(function() {
- assert.writeOK(siblingColl.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
-
- // A new database from the same connection should inherit the implicit session.
- const newCollSameConn = conn.getDB("testTwo").getCollection("foo");
- inspectCommandForSessionId(function() {
- assert.writeOK(newCollSameConn.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
-
- // A new database from a new connection should use a different implicit session.
- const newCollNewConn = new Mongo(conn.host).getDB("test").getCollection("foo");
- inspectCommandForSessionId(function() {
- assert.writeOK(newCollNewConn.insert({x: 1}));
- }, {shouldIncludeId: true, differentFromId: implicitId});
-
- // The original implicit session should still live on the first database.
- inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
-
- // Databases created from an explicit session should override any implicit sessions.
- const session = conn.startSession();
- const sessionColl = session.getDatabase("test").getCollection("foo");
- const explicitId = inspectCommandForSessionId(function() {
- assert.writeOK(sessionColl.insert({x: 1}));
- }, {shouldIncludeId: true, differentFromId: implicitId});
-
- assert(bsonBinaryEqual(session.getSessionId(), explicitId),
- "Expected the id of the explicit session to match the one sent, sent: " +
- tojson(explicitId) + " explicit session id: " + tojson(session.getSessionId()));
- assert(bsonBinaryEqual(sessionColl.getDB().getSession().getSessionId(), explicitId),
- "Expected id of the database's session to match the explicit session's id, sent: " +
- tojson(sessionColl.getDB().getSession().getSessionId()) +
- ", explicit session id: " + tojson(session.getSessionId()));
-
- // The original implicit session should still live on the first database.
- inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
-
- // New databases on the same connection as the explicit session should still inherit the
- // original implicit session.
- const newCollSameConnAfter = conn.getDB("testThree").getCollection("foo");
- inspectCommandForSessionId(function() {
- assert.writeOK(newCollSameConnAfter.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
-
- session.endSession();
- MongoRunner.stopMongod(conn);
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ let cmdName = Object.keys(cmdObjSeen)[0];
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObjSeen = cmdObjSeen[cmdName];
+ cmdName = Object.keys(cmdObjSeen)[0];
}
- // Tests behavior when the test flag to disable implicit sessions is changed.
- function runTestTransitionToDisabled() {
- const conn = MongoRunner.runMongod();
+ if (shouldIncludeId) {
+ assert(cmdObjSeen.hasOwnProperty("lsid"),
+ "Expected operation " + tojson(cmdObjSeen) + " to have a logical session id.");
- // Existing implicit sessions should be erased when the disable flag is set.
- const coll = conn.getDB("test").getCollection("foo");
- const implicitId = inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
- }, {shouldIncludeId: true});
+ if (expectedId) {
+ assert(bsonBinaryEqual(expectedId, cmdObjSeen.lsid),
+ "The sent session id did not match the expected, sent: " +
+ tojson(cmdObjSeen.lsid) + ", expected: " + tojson(expectedId));
+ }
- TestData.disableImplicitSessions = true;
+ if (differentFromId) {
+ assert(!bsonBinaryEqual(differentFromId, cmdObjSeen.lsid),
+ "The sent session id was not different from the expected, sent: " +
+ tojson(cmdObjSeen.lsid) + ", expected: " + tojson(differentFromId));
+ }
- inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
- }, {shouldIncludeId: false});
+ } else {
+ assert(!cmdObjSeen.hasOwnProperty("lsid"),
+ "Expected operation " + tojson(cmdObjSeen) + " to not have a logical session id.");
+ }
- // After the flag is unset, databases using existing connections with implicit sessions will
- // use the original implicit sessions again and new connections will create and use new
- // implicit sessions.
- TestData.disableImplicitSessions = false;
+ return cmdObjSeen.lsid;
+}
+
+// Tests regular behavior of implicit sessions.
+function runTest() {
+ const conn = MongoRunner.runMongod();
+
+ // Commands run on a database without an explicit session should use an implicit one.
+ const testDB = conn.getDB("test");
+ const coll = testDB.getCollection("foo");
+ const implicitId = inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: true});
+
+ // Unacknowledged writes have no session id.
+ inspectCommandForSessionId(function() {
+ coll.insert({x: 1}, {writeConcern: {w: 0}});
+ }, {shouldIncludeId: false});
+
+ assert(bsonBinaryEqual(testDB.getSession().getSessionId(), implicitId),
+ "Expected the id of the database's implicit session to match the one sent, sent: " +
+ tojson(implicitId) +
+ " db session id: " + tojson(testDB.getSession().getSessionId()));
+
+ // Implicit sessions are not causally consistent.
+ assert(!testDB.getSession().getOptions().isCausalConsistency(),
+ "Expected the database's implicit session to not be causally consistent");
+
+ // Further commands run on the same database should reuse the implicit session.
+ inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
+
+ // New collections from the same database should inherit the implicit session.
+ const collTwo = testDB.getCollection("bar");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(collTwo.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
+
+ // Sibling databases should inherit the implicit session.
+ let siblingColl = testDB.getSiblingDB("foo").getCollection("bar");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(siblingColl.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
+
+ // A new database from the same connection should inherit the implicit session.
+ const newCollSameConn = conn.getDB("testTwo").getCollection("foo");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(newCollSameConn.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
+
+ // A new database from a new connection should use a different implicit session.
+ const newCollNewConn = new Mongo(conn.host).getDB("test").getCollection("foo");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(newCollNewConn.insert({x: 1}));
+ }, {shouldIncludeId: true, differentFromId: implicitId});
+
+ // The original implicit session should still live on the first database.
+ inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
+
+ // Databases created from an explicit session should override any implicit sessions.
+ const session = conn.startSession();
+ const sessionColl = session.getDatabase("test").getCollection("foo");
+ const explicitId = inspectCommandForSessionId(function() {
+ assert.writeOK(sessionColl.insert({x: 1}));
+ }, {shouldIncludeId: true, differentFromId: implicitId});
+
+ assert(bsonBinaryEqual(session.getSessionId(), explicitId),
+ "Expected the id of the explicit session to match the one sent, sent: " +
+ tojson(explicitId) + " explicit session id: " + tojson(session.getSessionId()));
+ assert(bsonBinaryEqual(sessionColl.getDB().getSession().getSessionId(), explicitId),
+ "Expected id of the database's session to match the explicit session's id, sent: " +
+ tojson(sessionColl.getDB().getSession().getSessionId()) +
+ ", explicit session id: " + tojson(session.getSessionId()));
+
+ // The original implicit session should still live on the first database.
+ inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
+
+ // New databases on the same connection as the explicit session should still inherit the
+ // original implicit session.
+ const newCollSameConnAfter = conn.getDB("testThree").getCollection("foo");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(newCollSameConnAfter.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
+
+ session.endSession();
+ MongoRunner.stopMongod(conn);
+}
+
+// Tests behavior when the test flag to disable implicit sessions is changed.
+function runTestTransitionToDisabled() {
+ const conn = MongoRunner.runMongod();
+
+ // Existing implicit sessions should be erased when the disable flag is set.
+ const coll = conn.getDB("test").getCollection("foo");
+ const implicitId = inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: true});
- inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
+ TestData.disableImplicitSessions = true;
- const newColl = conn.getDB("test").getCollection("foo");
- inspectCommandForSessionId(function() {
- assert.writeOK(newColl.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
+ inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: false});
- const newCollNewConn = new Mongo(conn.host).getDB("test").getCollection("foo");
- inspectCommandForSessionId(function() {
- assert.writeOK(newCollNewConn.insert({x: 1}));
- }, {shouldIncludeId: true, differentFromId: implicitId});
+ // After the flag is unset, databases using existing connections with implicit sessions will
+ // use the original implicit sessions again and new connections will create and use new
+ // implicit sessions.
+ TestData.disableImplicitSessions = false;
- // Explicit sessions should not be affected by the disable flag being set.
- const session = conn.startSession();
- const sessionColl = session.getDatabase("test").getCollection("foo");
- const explicitId = inspectCommandForSessionId(function() {
- assert.writeOK(sessionColl.insert({x: 1}));
- }, {shouldIncludeId: true});
+ inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
- TestData.disableImplicitSessions = true;
+ const newColl = conn.getDB("test").getCollection("foo");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(newColl.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
- inspectCommandForSessionId(function() {
- assert.writeOK(sessionColl.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: explicitId});
+ const newCollNewConn = new Mongo(conn.host).getDB("test").getCollection("foo");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(newCollNewConn.insert({x: 1}));
+ }, {shouldIncludeId: true, differentFromId: implicitId});
- session.endSession();
- MongoRunner.stopMongod(conn);
- }
+ // Explicit sessions should not be affected by the disable flag being set.
+ const session = conn.startSession();
+ const sessionColl = session.getDatabase("test").getCollection("foo");
+ const explicitId = inspectCommandForSessionId(function() {
+ assert.writeOK(sessionColl.insert({x: 1}));
+ }, {shouldIncludeId: true});
- // Tests behavior of implicit sessions when they are disabled via a test flag.
- function runTestDisabled() {
- const conn = MongoRunner.runMongod();
+ TestData.disableImplicitSessions = true;
- // Commands run without an explicit session should not use an implicit one.
- const coll = conn.getDB("test").getCollection("foo");
- inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
+ inspectCommandForSessionId(function() {
+ assert.writeOK(sessionColl.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: explicitId});
+
+ session.endSession();
+ MongoRunner.stopMongod(conn);
+}
+
+// Tests behavior of implicit sessions when they are disabled via a test flag.
+function runTestDisabled() {
+ const conn = MongoRunner.runMongod();
+
+ // Commands run without an explicit session should not use an implicit one.
+ const coll = conn.getDB("test").getCollection("foo");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: false});
+
+ // Explicit sessions should still include session ids.
+ const session = conn.startSession();
+ const sessionColl = session.getDatabase("test").getCollection("foo");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(sessionColl.insert({x: 1}));
+ }, {shouldIncludeId: true});
+
+ // Commands run in a parallel shell inherit the disable flag.
+ TestData.inspectCommandForSessionId = inspectCommandForSessionId;
+ const awaitShell = startParallelShell(function() {
+ const parallelColl = db.getCollection("foo");
+ TestData.inspectCommandForSessionId(function() {
+ assert.writeOK(parallelColl.insert({x: 1}));
}, {shouldIncludeId: false});
+ }, conn.port);
+ awaitShell();
- // Explicit sessions should still include session ids.
- const session = conn.startSession();
- const sessionColl = session.getDatabase("test").getCollection("foo");
- inspectCommandForSessionId(function() {
- assert.writeOK(sessionColl.insert({x: 1}));
- }, {shouldIncludeId: true});
-
- // Commands run in a parallel shell inherit the disable flag.
- TestData.inspectCommandForSessionId = inspectCommandForSessionId;
- const awaitShell = startParallelShell(function() {
- const parallelColl = db.getCollection("foo");
- TestData.inspectCommandForSessionId(function() {
- assert.writeOK(parallelColl.insert({x: 1}));
- }, {shouldIncludeId: false});
- }, conn.port);
- awaitShell();
-
- session.endSession();
- MongoRunner.stopMongod(conn);
- }
+ session.endSession();
+ MongoRunner.stopMongod(conn);
+}
- runTest();
+runTest();
- runTestTransitionToDisabled();
+runTestTransitionToDisabled();
- assert(_shouldUseImplicitSessions());
+assert(_shouldUseImplicitSessions());
- TestData.disableImplicitSessions = true;
- runTestDisabled();
+TestData.disableImplicitSessions = true;
+runTestDisabled();
})();
diff --git a/jstests/noPassthrough/index_builds_ignore_prepare_conflicts.js b/jstests/noPassthrough/index_builds_ignore_prepare_conflicts.js
index 0875b4cec97..352541b890d 100644
--- a/jstests/noPassthrough/index_builds_ignore_prepare_conflicts.js
+++ b/jstests/noPassthrough/index_builds_ignore_prepare_conflicts.js
@@ -10,105 +10,104 @@
* ]
*/
(function() {
- "use strict";
+"use strict";
+
+load("jstests/core/txns/libs/prepare_helpers.js"); // for PrepareHelpers
+load("jstests/noPassthrough/libs/index_build.js"); // for IndexBuildTest
+
+const replSetTest = new ReplSetTest({
+ name: "index_builds_ignore_prepare_conflicts",
+ nodes: [
+ {},
+ {rsConfig: {priority: 0}},
+ ],
+});
+replSetTest.startSet();
+replSetTest.initiate();
+
+const primary = replSetTest.getPrimary();
+const primaryDB = primary.getDB('test');
+
+let numDocs = 10;
+let setUp = function(coll) {
+ coll.drop();
+ let bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < numDocs; i++) {
+ bulk.insert({i: i});
+ }
+ assert.commandWorked(bulk.execute());
+};
- load("jstests/core/txns/libs/prepare_helpers.js"); // for PrepareHelpers
- load("jstests/noPassthrough/libs/index_build.js"); // for IndexBuildTest
-
- const replSetTest = new ReplSetTest({
- name: "index_builds_ignore_prepare_conflicts",
- nodes: [
- {},
- {rsConfig: {priority: 0}},
- ],
+/**
+ * Run a background index build, and depending on the provided node, 'conn', ensure that a
+ * prepared update does not introduce prepare conflicts on the index builder.
+ */
+let runTest = function(conn) {
+ const testDB = conn.getDB('test');
+
+ const collName = 'index_builds_ignore_prepare_conflicts';
+ const coll = primaryDB.getCollection(collName);
+ setUp(coll);
+
+ // Start and pause an index build.
+ IndexBuildTest.pauseIndexBuilds(conn);
+ const awaitBuild = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {i: 1});
+ const opId = IndexBuildTest.waitForIndexBuildToStart(testDB, collName, "i_1");
+
+ // This insert will block until the index build pauses and releases its exclusive lock.
+ // This guarantees that the subsequent transaction can immediately acquire a lock and not
+ // fail with a LockTimeout error.
+ assert.commandWorked(coll.insert({i: numDocs++}));
+
+ // Start a session and introduce a document that is in a prepared state, but should be
+ // ignored by the index build, at least until the transaction commits.
+ const session = primaryDB.getMongo().startSession();
+ const sessionDB = session.getDatabase('test');
+ const sessionColl = sessionDB.getCollection(collName);
+ session.startTransaction();
+ assert.commandWorked(sessionColl.update({i: 0}, {i: "prepared"}));
+ // Use w:1 because the secondary will be unable to replicate the prepare while an index
+ // build is running.
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
+
+ // Let the index build continue until just before it completes. Set the failpoint just
+ // before the second drain, which would take lock that conflicts with the prepared
+ // transaction and prevent the index build from completing entirely.
+ const failPointName = "hangAfterIndexBuildFirstDrain";
+ clearRawMongoProgramOutput();
+ assert.commandWorked(conn.adminCommand({configureFailPoint: failPointName, mode: "alwaysOn"}));
+
+ // Unpause the index build from the first failpoint so that it can resume and pause at the
+ // next failpoint.
+ IndexBuildTest.resumeIndexBuilds(conn);
+ assert.soon(() =>
+ rawMongoProgramOutput().indexOf("Hanging after index build first drain") >= 0);
+
+ // Right before the index build completes, ensure no prepare conflicts were hit.
+ IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId, (op) => {
+ printjson(op);
+ assert.eq(undefined, op.prepareReadConflicts);
});
- replSetTest.startSet();
- replSetTest.initiate();
-
- const primary = replSetTest.getPrimary();
- const primaryDB = primary.getDB('test');
-
- let numDocs = 10;
- let setUp = function(coll) {
- coll.drop();
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; i++) {
- bulk.insert({i: i});
- }
- assert.commandWorked(bulk.execute());
- };
-
- /**
- * Run a background index build, and depending on the provided node, 'conn', ensure that a
- * prepared update does not introduce prepare conflicts on the index builder.
- */
- let runTest = function(conn) {
- const testDB = conn.getDB('test');
-
- const collName = 'index_builds_ignore_prepare_conflicts';
- const coll = primaryDB.getCollection(collName);
- setUp(coll);
-
- // Start and pause an index build.
- IndexBuildTest.pauseIndexBuilds(conn);
- const awaitBuild = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {i: 1});
- const opId = IndexBuildTest.waitForIndexBuildToStart(testDB, collName, "i_1");
-
- // This insert will block until the index build pauses and releases its exclusive lock.
- // This guarantees that the subsequent transaction can immediately acquire a lock and not
- // fail with a LockTimeout error.
- assert.commandWorked(coll.insert({i: numDocs++}));
-
- // Start a session and introduce a document that is in a prepared state, but should be
- // ignored by the index build, at least until the transaction commits.
- const session = primaryDB.getMongo().startSession();
- const sessionDB = session.getDatabase('test');
- const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.update({i: 0}, {i: "prepared"}));
- // Use w:1 because the secondary will be unable to replicate the prepare while an index
- // build is running.
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
-
- // Let the index build continue until just before it completes. Set the failpoint just
- // before the second drain, which would take lock that conflicts with the prepared
- // transaction and prevent the index build from completing entirely.
- const failPointName = "hangAfterIndexBuildFirstDrain";
- clearRawMongoProgramOutput();
- assert.commandWorked(
- conn.adminCommand({configureFailPoint: failPointName, mode: "alwaysOn"}));
-
- // Unpause the index build from the first failpoint so that it can resume and pause at the
- // next failpoint.
- IndexBuildTest.resumeIndexBuilds(conn);
- assert.soon(
- () => rawMongoProgramOutput().indexOf("Hanging after index build first drain") >= 0);
-
- // Right before the index build completes, ensure no prepare conflicts were hit.
- IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId, (op) => {
- printjson(op);
- assert.eq(undefined, op.prepareReadConflicts);
- });
-
- // Because prepare uses w:1, ensure it is majority committed before committing the
- // transaction.
- PrepareHelpers.awaitMajorityCommitted(replSetTest, prepareTimestamp);
-
- // Commit the transaction before completing the index build, releasing locks which will
- // allow the index build to complete.
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- // Allow the index build to complete.
- assert.commandWorked(conn.adminCommand({configureFailPoint: failPointName, mode: "off"}));
-
- awaitBuild();
- IndexBuildTest.waitForIndexBuildToStop(testDB, collName, "i_1");
-
- assert.eq(numDocs, coll.count());
- assert.eq(numDocs, coll.find().itcount());
- };
-
- runTest(replSetTest.getPrimary());
-
- replSetTest.stopSet();
+
+ // Because prepare uses w:1, ensure it is majority committed before committing the
+ // transaction.
+ PrepareHelpers.awaitMajorityCommitted(replSetTest, prepareTimestamp);
+
+ // Commit the transaction before completing the index build, releasing locks which will
+ // allow the index build to complete.
+ assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+ // Allow the index build to complete.
+ assert.commandWorked(conn.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+
+ awaitBuild();
+ IndexBuildTest.waitForIndexBuildToStop(testDB, collName, "i_1");
+
+ assert.eq(numDocs, coll.count());
+ assert.eq(numDocs, coll.find().itcount());
+};
+
+runTest(replSetTest.getPrimary());
+
+replSetTest.stopSet();
})();
diff --git a/jstests/noPassthrough/index_killop_standalone.js b/jstests/noPassthrough/index_killop_standalone.js
index fd1e4662859..2bb2376237b 100644
--- a/jstests/noPassthrough/index_killop_standalone.js
+++ b/jstests/noPassthrough/index_killop_standalone.js
@@ -2,47 +2,47 @@
* Confirms that both foreground and background index builds can be aborted using killop.
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/noPassthrough/libs/index_build.js');
+load('jstests/noPassthrough/libs/index_build.js');
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("test");
- assert.commandWorked(testDB.dropDatabase());
- assert.writeOK(testDB.test.insert({a: 1}));
- const coll = testDB.test;
+const testDB = conn.getDB("test");
+assert.commandWorked(testDB.dropDatabase());
+assert.writeOK(testDB.test.insert({a: 1}));
+const coll = testDB.test;
- // Test that building an index with 'options' can be aborted using killop.
- function testAbortIndexBuild(options) {
- IndexBuildTest.pauseIndexBuilds(conn);
+// Test that building an index with 'options' can be aborted using killop.
+function testAbortIndexBuild(options) {
+ IndexBuildTest.pauseIndexBuilds(conn);
- const createIdx = IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {a: 1}, options);
+ const createIdx = IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {a: 1}, options);
- // When the index build starts, find its op id.
- const opId = IndexBuildTest.waitForIndexBuildToStart(testDB);
+ // When the index build starts, find its op id.
+ const opId = IndexBuildTest.waitForIndexBuildToStart(testDB);
- // Kill the index build.
- assert.commandWorked(testDB.killOp(opId));
+ // Kill the index build.
+ assert.commandWorked(testDB.killOp(opId));
- // Wait for the index build to stop.
- try {
- IndexBuildTest.waitForIndexBuildToStop(testDB);
- } finally {
- IndexBuildTest.resumeIndexBuilds(conn);
- }
+ // Wait for the index build to stop.
+ try {
+ IndexBuildTest.waitForIndexBuildToStop(testDB);
+ } finally {
+ IndexBuildTest.resumeIndexBuilds(conn);
+ }
- const exitCode = createIdx({checkExitSuccess: false});
- assert.neq(
- 0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
+ const exitCode = createIdx({checkExitSuccess: false});
+ assert.neq(
+ 0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
- // Check that no new index has been created. This verifies that the index build was aborted
- // rather than successfully completed.
- IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
- }
+ // Check that no new index has been created. This verifies that the index build was aborted
+ // rather than successfully completed.
+ IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
+}
- testAbortIndexBuild({background: true});
- testAbortIndexBuild({background: false});
- MongoRunner.stopMongod(conn);
+testAbortIndexBuild({background: true});
+testAbortIndexBuild({background: false});
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/index_partial_no_explain_cmds.js b/jstests/noPassthrough/index_partial_no_explain_cmds.js
index 5083ab2881e..f1295e5531c 100644
--- a/jstests/noPassthrough/index_partial_no_explain_cmds.js
+++ b/jstests/noPassthrough/index_partial_no_explain_cmds.js
@@ -1,57 +1,57 @@
// Test partial indexes with commands that don't use explain. These commands are tested against
// mongod with the --notablescan flag set, so that they fail if the index is not used.
(function() {
- "use strict";
- var runner = MongoRunner.runMongod({setParameter: "notablescan=1"});
- var coll = runner.getDB("test").index_partial_no_explain_cmds;
- var ret;
-
- coll.drop();
-
- assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}}));
-
- assert.writeOK(coll.insert({_id: 1, x: 5, a: 2})); // Not in index.
- assert.writeOK(coll.insert({_id: 2, x: 6, a: 1})); // In index.
-
- // Verify we will throw if the partial index can't be used.
- assert.throws(function() {
- coll.find({x: {$gt: 1}, a: 2}).itcount();
- });
-
- //
- // Test mapReduce.
- //
-
- var mapFunc = function() {
- emit(this._id, 1);
- };
- var reduceFunc = function(keyId, countArray) {
- return Array.sum(countArray);
- };
-
- ret = coll.mapReduce(mapFunc, reduceFunc, {out: "inline", query: {x: {$gt: 1}, a: 1}});
- assert.eq(1, ret.counts.input);
-
- //
- // Test distinct.
- //
-
- ret = coll.distinct("a", {x: {$gt: 1}, a: 1});
- assert.eq(1, ret.length);
- ret = coll.distinct("x", {x: {$gt: 1}, a: 1});
- assert.eq(1, ret.length);
- assert.throws(function() {
- printjson(coll.distinct("a", {a: 0}));
- });
- assert.throws(function() {
- printjson(coll.distinct("x", {a: 0}));
- });
-
- // SERVER-19511 regression test: distinct with no query predicate should return the correct
- // number of results. This query should not be allowed to use the partial index, so it should
- // use a collection scan instead. Although this test enables --notablescan, this does not cause
- // operations to fail if they have no query predicate.
- ret = coll.distinct("x");
- assert.eq(2, ret.length);
- MongoRunner.stopMongod(runner);
+"use strict";
+var runner = MongoRunner.runMongod({setParameter: "notablescan=1"});
+var coll = runner.getDB("test").index_partial_no_explain_cmds;
+var ret;
+
+coll.drop();
+
+assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}}));
+
+assert.writeOK(coll.insert({_id: 1, x: 5, a: 2})); // Not in index.
+assert.writeOK(coll.insert({_id: 2, x: 6, a: 1})); // In index.
+
+// Verify we will throw if the partial index can't be used.
+assert.throws(function() {
+ coll.find({x: {$gt: 1}, a: 2}).itcount();
+});
+
+//
+// Test mapReduce.
+//
+
+var mapFunc = function() {
+ emit(this._id, 1);
+};
+var reduceFunc = function(keyId, countArray) {
+ return Array.sum(countArray);
+};
+
+ret = coll.mapReduce(mapFunc, reduceFunc, {out: "inline", query: {x: {$gt: 1}, a: 1}});
+assert.eq(1, ret.counts.input);
+
+//
+// Test distinct.
+//
+
+ret = coll.distinct("a", {x: {$gt: 1}, a: 1});
+assert.eq(1, ret.length);
+ret = coll.distinct("x", {x: {$gt: 1}, a: 1});
+assert.eq(1, ret.length);
+assert.throws(function() {
+ printjson(coll.distinct("a", {a: 0}));
+});
+assert.throws(function() {
+ printjson(coll.distinct("x", {a: 0}));
+});
+
+// SERVER-19511 regression test: distinct with no query predicate should return the correct
+// number of results. This query should not be allowed to use the partial index, so it should
+// use a collection scan instead. Although this test enables --notablescan, this does not cause
+// operations to fail if they have no query predicate.
+ret = coll.distinct("x");
+assert.eq(2, ret.length);
+MongoRunner.stopMongod(runner);
})();
diff --git a/jstests/noPassthrough/index_version_autoupgrade.js b/jstests/noPassthrough/index_version_autoupgrade.js
index fef289ddca5..9a8769da7e4 100644
--- a/jstests/noPassthrough/index_version_autoupgrade.js
+++ b/jstests/noPassthrough/index_version_autoupgrade.js
@@ -3,138 +3,135 @@
* indexes when they are rebuilt on a collection.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/get_index_helpers.js");
- var conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
+var conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
- var testDB = conn.getDB("test");
- assert.commandWorked(testDB.runCommand({create: "index_version_autoupgrade"}));
- var allIndexes = testDB.index_version_autoupgrade.getIndexes();
+var testDB = conn.getDB("test");
+assert.commandWorked(testDB.runCommand({create: "index_version_autoupgrade"}));
+var allIndexes = testDB.index_version_autoupgrade.getIndexes();
+var spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
+assert.neq(null, spec, "Index with key pattern {_id: 1} not found: " + tojson(allIndexes));
+var defaultIndexVersion = spec.v;
+assert.lte(2, defaultIndexVersion, "Expected the defaultIndexVersion to be at least v=2");
+
+/**
+ * Tests whether the execution of the 'commandFn' function automatically upgrades the index
+ * version of existing indexes.
+ *
+ * The 'commandFn' function takes a single argument of the collection to act on and returns a
+ * collection to validate the index versions of. Most often the 'commandFn' function returns
+ * its input collection, but is able to return a reference to a different collection to support
+ * testing the effects of cloning commands.
+ *
+ * If 'doesAutoUpgrade' is true, then this function verifies that the indexes on the returned
+ * collection have been upgraded to the 'defaultIndexVersion'. If 'doesAutoUpgrade' is false,
+ * then this function verifies that the indexes on the returned collection are unchanged.
+ */
+function testIndexVersionAutoUpgrades(commandFn, doesAutoUpgrade) {
+ testDB.dropDatabase();
+ var coll = testDB.index_version_autoupgrade;
+
+ // Create a v=1 _id index.
+ assert.commandWorked(testDB.createCollection("index_version_autoupgrade",
+ {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
+ var allIndexes = coll.getIndexes();
var spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
assert.neq(null, spec, "Index with key pattern {_id: 1} not found: " + tojson(allIndexes));
- var defaultIndexVersion = spec.v;
- assert.lte(2, defaultIndexVersion, "Expected the defaultIndexVersion to be at least v=2");
-
- /**
- * Tests whether the execution of the 'commandFn' function automatically upgrades the index
- * version of existing indexes.
- *
- * The 'commandFn' function takes a single argument of the collection to act on and returns a
- * collection to validate the index versions of. Most often the 'commandFn' function returns
- * its input collection, but is able to return a reference to a different collection to support
- * testing the effects of cloning commands.
- *
- * If 'doesAutoUpgrade' is true, then this function verifies that the indexes on the returned
- * collection have been upgraded to the 'defaultIndexVersion'. If 'doesAutoUpgrade' is false,
- * then this function verifies that the indexes on the returned collection are unchanged.
- */
- function testIndexVersionAutoUpgrades(commandFn, doesAutoUpgrade) {
- testDB.dropDatabase();
- var coll = testDB.index_version_autoupgrade;
-
- // Create a v=1 _id index.
- assert.commandWorked(testDB.createCollection(
- "index_version_autoupgrade", {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
- var allIndexes = coll.getIndexes();
- var spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
- assert.neq(null, spec, "Index with key pattern {_id: 1} not found: " + tojson(allIndexes));
- assert.eq(1, spec.v, "Expected a v=1 index to be built: " + tojson(spec));
-
- assert.commandWorked(coll.createIndex({withoutAnyOptions: 1}));
- allIndexes = coll.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {withoutAnyOptions: 1});
- assert.neq(
- null,
- spec,
- "Index with key pattern {withoutAnyOptions: 1} not found: " + tojson(allIndexes));
- assert.eq(defaultIndexVersion,
- spec.v,
- "Expected an index with the default version to be built: " + tojson(spec));
-
- assert.commandWorked(coll.createIndex({withV1: 1}, {v: 1}));
- allIndexes = coll.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {withV1: 1});
- assert.neq(
- null, spec, "Index with key pattern {withV1: 1} not found: " + tojson(allIndexes));
- assert.eq(1, spec.v, "Expected a v=1 index to be built: " + tojson(spec));
-
- assert.commandWorked(coll.createIndex({withV2: 1}, {v: 2}));
- allIndexes = coll.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {withV2: 1});
- assert.neq(
- null, spec, "Index with key pattern {withV2: 1} not found: " + tojson(allIndexes));
- assert.eq(2, spec.v, "Expected a v=2 index to be built: " + tojson(spec));
-
- var collToVerify = commandFn(coll);
- var expectedResults;
-
- if (doesAutoUpgrade) {
- expectedResults = [
- {keyPattern: {_id: 1}, version: defaultIndexVersion},
- {keyPattern: {withoutAnyOptions: 1}, version: defaultIndexVersion},
- {keyPattern: {withV1: 1}, version: defaultIndexVersion},
- {keyPattern: {withV2: 1}, version: defaultIndexVersion},
- ];
-
- } else {
- expectedResults = [
- {keyPattern: {_id: 1}, version: 1},
- {keyPattern: {withoutAnyOptions: 1}, version: defaultIndexVersion},
- {keyPattern: {withV1: 1}, version: 1},
- {keyPattern: {withV2: 1}, version: 2},
- ];
- }
-
- expectedResults.forEach(function(expected) {
- var allIndexes = collToVerify.getIndexes();
- var spec = GetIndexHelpers.findByKeyPattern(allIndexes, expected.keyPattern);
- assert.neq(null,
- spec,
- "Index with key pattern " + tojson(expected.keyPattern) + " not found: " +
- tojson(allIndexes));
- assert.eq(expected.version,
- spec.v,
- "Expected index to be rebuilt with " +
- (doesAutoUpgrade ? "the default" : "its original") + " version: " +
- tojson(spec));
- });
+ assert.eq(1, spec.v, "Expected a v=1 index to be built: " + tojson(spec));
+
+ assert.commandWorked(coll.createIndex({withoutAnyOptions: 1}));
+ allIndexes = coll.getIndexes();
+ spec = GetIndexHelpers.findByKeyPattern(allIndexes, {withoutAnyOptions: 1});
+ assert.neq(null,
+ spec,
+ "Index with key pattern {withoutAnyOptions: 1} not found: " + tojson(allIndexes));
+ assert.eq(defaultIndexVersion,
+ spec.v,
+ "Expected an index with the default version to be built: " + tojson(spec));
+
+ assert.commandWorked(coll.createIndex({withV1: 1}, {v: 1}));
+ allIndexes = coll.getIndexes();
+ spec = GetIndexHelpers.findByKeyPattern(allIndexes, {withV1: 1});
+ assert.neq(null, spec, "Index with key pattern {withV1: 1} not found: " + tojson(allIndexes));
+ assert.eq(1, spec.v, "Expected a v=1 index to be built: " + tojson(spec));
+
+ assert.commandWorked(coll.createIndex({withV2: 1}, {v: 2}));
+ allIndexes = coll.getIndexes();
+ spec = GetIndexHelpers.findByKeyPattern(allIndexes, {withV2: 1});
+ assert.neq(null, spec, "Index with key pattern {withV2: 1} not found: " + tojson(allIndexes));
+ assert.eq(2, spec.v, "Expected a v=2 index to be built: " + tojson(spec));
+
+ var collToVerify = commandFn(coll);
+ var expectedResults;
+
+ if (doesAutoUpgrade) {
+ expectedResults = [
+ {keyPattern: {_id: 1}, version: defaultIndexVersion},
+ {keyPattern: {withoutAnyOptions: 1}, version: defaultIndexVersion},
+ {keyPattern: {withV1: 1}, version: defaultIndexVersion},
+ {keyPattern: {withV2: 1}, version: defaultIndexVersion},
+ ];
+
+ } else {
+ expectedResults = [
+ {keyPattern: {_id: 1}, version: 1},
+ {keyPattern: {withoutAnyOptions: 1}, version: defaultIndexVersion},
+ {keyPattern: {withV1: 1}, version: 1},
+ {keyPattern: {withV2: 1}, version: 2},
+ ];
}
- // Test that the "reIndex" command upgrades all existing indexes to the latest version.
- testIndexVersionAutoUpgrades(function(coll) {
- assert.commandWorked(coll.getDB().runCommand({reIndex: coll.getName()}));
- return coll;
- }, true);
-
- // Test that the "compact" command doesn't upgrade existing indexes to the latest version.
- testIndexVersionAutoUpgrades(function(coll) {
- var res = coll.getDB().runCommand({compact: coll.getName()});
- if (res.ok === 0) {
- // Ephemeral storage engines don't support the "compact" command. The existing indexes
- // should remain unchanged.
- assert.commandFailedWithCode(res, ErrorCodes.CommandNotSupported);
- } else {
- assert.commandWorked(res);
- }
- return coll;
- }, false);
-
- // Test that the "cloneCollection" command doesn't upgrade existing indexes to the latest
- // version.
- var cloneConn = MongoRunner.runMongod({});
- assert.neq(null, cloneConn, "mongod was unable to start up");
- testIndexVersionAutoUpgrades(function(coll) {
- var cloneDB = cloneConn.getDB(coll.getDB().getName());
- assert.commandWorked(cloneDB.runCommand({
- cloneCollection: coll.getFullName(),
- from: conn.host,
- }));
- return cloneDB[coll.getName()];
- }, false);
- MongoRunner.stopMongod(cloneConn);
-
- MongoRunner.stopMongod(conn);
+ expectedResults.forEach(function(expected) {
+ var allIndexes = collToVerify.getIndexes();
+ var spec = GetIndexHelpers.findByKeyPattern(allIndexes, expected.keyPattern);
+ assert.neq(null,
+ spec,
+ "Index with key pattern " + tojson(expected.keyPattern) +
+ " not found: " + tojson(allIndexes));
+ assert.eq(expected.version,
+ spec.v,
+ "Expected index to be rebuilt with " +
+ (doesAutoUpgrade ? "the default" : "its original") +
+ " version: " + tojson(spec));
+ });
+}
+
+// Test that the "reIndex" command upgrades all existing indexes to the latest version.
+testIndexVersionAutoUpgrades(function(coll) {
+ assert.commandWorked(coll.getDB().runCommand({reIndex: coll.getName()}));
+ return coll;
+}, true);
+
+// Test that the "compact" command doesn't upgrade existing indexes to the latest version.
+testIndexVersionAutoUpgrades(function(coll) {
+ var res = coll.getDB().runCommand({compact: coll.getName()});
+ if (res.ok === 0) {
+ // Ephemeral storage engines don't support the "compact" command. The existing indexes
+ // should remain unchanged.
+ assert.commandFailedWithCode(res, ErrorCodes.CommandNotSupported);
+ } else {
+ assert.commandWorked(res);
+ }
+ return coll;
+}, false);
+
+// Test that the "cloneCollection" command doesn't upgrade existing indexes to the latest
+// version.
+var cloneConn = MongoRunner.runMongod({});
+assert.neq(null, cloneConn, "mongod was unable to start up");
+testIndexVersionAutoUpgrades(function(coll) {
+ var cloneDB = cloneConn.getDB(coll.getDB().getName());
+ assert.commandWorked(cloneDB.runCommand({
+ cloneCollection: coll.getFullName(),
+ from: conn.host,
+ }));
+ return cloneDB[coll.getName()];
+}, false);
+MongoRunner.stopMongod(cloneConn);
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/index_version_v2.js b/jstests/noPassthrough/index_version_v2.js
index b3fe65841d3..886c7c39590 100644
--- a/jstests/noPassthrough/index_version_v2.js
+++ b/jstests/noPassthrough/index_version_v2.js
@@ -6,119 +6,116 @@
* the KeyString format.
*/
(function() {
- "use strict";
-
- const storageEnginesUsingKeyString = new Set(["wiredTiger", "inMemory", "rocksdb"]);
-
- function getIndexSpecByName(coll, indexName) {
- const indexes = coll.getIndexes();
- const indexesFilteredByName = indexes.filter(spec => spec.name === indexName);
- assert.eq(1,
- indexesFilteredByName.length,
- "index '" + indexName + "' not found: " + tojson(indexes));
- return indexesFilteredByName[0];
- }
-
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
-
- const testDB = conn.getDB("test");
- const storageEngine = testDB.serverStatus().storageEngine.name;
-
- //
- // Index version v=2
- //
-
- testDB.dropDatabase();
-
- // Test that the _id index of a collection is created with v=2 by default.
- assert.commandWorked(testDB.runCommand({create: "index_version"}));
- let indexSpec = getIndexSpecByName(testDB.index_version, "_id_");
- assert.eq(2, indexSpec.v, tojson(indexSpec));
-
- // Test that an index created on an existing collection is created with v=2 by default.
- assert.commandWorked(testDB.index_version.createIndex({defaultToV2: 1}, {name: "defaultToV2"}));
- indexSpec = getIndexSpecByName(testDB.index_version, "defaultToV2");
- assert.eq(2, indexSpec.v, tojson(indexSpec));
-
- // Test that creating an index with v=2 succeeds.
- assert.commandWorked(testDB.index_version.createIndex({withV2: 1}, {v: 2, name: "withV2"}));
- indexSpec = getIndexSpecByName(testDB.index_version, "withV2");
- assert.eq(2, indexSpec.v, tojson(indexSpec));
-
- // Test that creating a collection with a non-simple default collation succeeds.
- assert.commandWorked(testDB.runCommand({create: "collation", collation: {locale: "en"}}));
- indexSpec = getIndexSpecByName(testDB.collation, "_id_");
- assert.eq(2, indexSpec.v, tojson(indexSpec));
-
- // Test that creating an index with a non-simple collation succeeds.
- assert.commandWorked(
- testDB.collation.createIndex({str: 1}, {name: "withCollation", collation: {locale: "fr"}}));
- indexSpec = getIndexSpecByName(testDB.collation, "withCollation");
- assert.eq(2, indexSpec.v, tojson(indexSpec));
-
- // Test that indexing decimal data succeeds.
- assert.writeOK(testDB.decimal.insert({_id: new NumberDecimal("42")}));
-
- //
- // Index version v=1
- //
-
- testDB.dropDatabase();
-
- // Test that creating an index with v=1 succeeds.
- assert.commandWorked(testDB.index_version.createIndex({withV1: 1}, {v: 1, name: "withV1"}));
- indexSpec = getIndexSpecByName(testDB.index_version, "withV1");
- assert.eq(1, indexSpec.v, tojson(indexSpec));
-
- // Test that creating an index with v=1 and a simple collation returns an error.
- assert.commandFailed(
- testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "simple"}}));
-
- // Test that creating an index with v=1 and a non-simple collation returns an error.
- assert.commandFailed(
- testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "en", strength: 2}}));
-
- // Test that creating an index with v=1 and a simple collation on a collection with a non-simple
- // default collation returns an error.
- testDB.collation.drop();
- assert.commandWorked(testDB.runCommand({create: "collation", collation: {locale: "en"}}));
- assert.commandFailed(
- testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "simple"}}));
-
- // Test that creating an index with v=1 and a non-simple collation on a collection with a
- // non-simple default collation returns an error.
- testDB.collation.drop();
- assert.commandWorked(testDB.runCommand({create: "collation", collation: {locale: "en"}}));
- assert.commandFailed(
- testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "en", strength: 2}}));
-
- // Test that indexing decimal data with a v=1 index returns an error on storage engines using
- // the KeyString format.
- assert.commandWorked(testDB.decimal.createIndex({num: 1}, {v: 1}));
- if (storageEnginesUsingKeyString.has(storageEngine)) {
- assert.writeErrorWithCode(testDB.decimal.insert({num: new NumberDecimal("42")}),
- ErrorCodes.UnsupportedFormat);
- } else {
- assert.writeOK(testDB.decimal.insert({num: new NumberDecimal("42")}));
- }
-
- //
- // Index version v=0
- //
-
- testDB.dropDatabase();
-
- // Test that attempting to create an index with v=0 returns an error.
- assert.commandFailed(testDB.index_version.createIndex({withV0: 1}, {v: 0}));
-
- //
- // Index version v=3
- //
-
- testDB.dropDatabase();
-
- // Test that attempting to create an index with v=3 returns an error.
- assert.commandFailed(testDB.index_version.createIndex({withV3: 1}, {v: 3}));
- MongoRunner.stopMongod(conn);
+"use strict";
+
+const storageEnginesUsingKeyString = new Set(["wiredTiger", "inMemory", "rocksdb"]);
+
+function getIndexSpecByName(coll, indexName) {
+ const indexes = coll.getIndexes();
+ const indexesFilteredByName = indexes.filter(spec => spec.name === indexName);
+ assert.eq(
+ 1, indexesFilteredByName.length, "index '" + indexName + "' not found: " + tojson(indexes));
+ return indexesFilteredByName[0];
+}
+
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
+
+const testDB = conn.getDB("test");
+const storageEngine = testDB.serverStatus().storageEngine.name;
+
+//
+// Index version v=2
+//
+
+testDB.dropDatabase();
+
+// Test that the _id index of a collection is created with v=2 by default.
+assert.commandWorked(testDB.runCommand({create: "index_version"}));
+let indexSpec = getIndexSpecByName(testDB.index_version, "_id_");
+assert.eq(2, indexSpec.v, tojson(indexSpec));
+
+// Test that an index created on an existing collection is created with v=2 by default.
+assert.commandWorked(testDB.index_version.createIndex({defaultToV2: 1}, {name: "defaultToV2"}));
+indexSpec = getIndexSpecByName(testDB.index_version, "defaultToV2");
+assert.eq(2, indexSpec.v, tojson(indexSpec));
+
+// Test that creating an index with v=2 succeeds.
+assert.commandWorked(testDB.index_version.createIndex({withV2: 1}, {v: 2, name: "withV2"}));
+indexSpec = getIndexSpecByName(testDB.index_version, "withV2");
+assert.eq(2, indexSpec.v, tojson(indexSpec));
+
+// Test that creating a collection with a non-simple default collation succeeds.
+assert.commandWorked(testDB.runCommand({create: "collation", collation: {locale: "en"}}));
+indexSpec = getIndexSpecByName(testDB.collation, "_id_");
+assert.eq(2, indexSpec.v, tojson(indexSpec));
+
+// Test that creating an index with a non-simple collation succeeds.
+assert.commandWorked(
+ testDB.collation.createIndex({str: 1}, {name: "withCollation", collation: {locale: "fr"}}));
+indexSpec = getIndexSpecByName(testDB.collation, "withCollation");
+assert.eq(2, indexSpec.v, tojson(indexSpec));
+
+// Test that indexing decimal data succeeds.
+assert.writeOK(testDB.decimal.insert({_id: new NumberDecimal("42")}));
+
+//
+// Index version v=1
+//
+
+testDB.dropDatabase();
+
+// Test that creating an index with v=1 succeeds.
+assert.commandWorked(testDB.index_version.createIndex({withV1: 1}, {v: 1, name: "withV1"}));
+indexSpec = getIndexSpecByName(testDB.index_version, "withV1");
+assert.eq(1, indexSpec.v, tojson(indexSpec));
+
+// Test that creating an index with v=1 and a simple collation returns an error.
+assert.commandFailed(testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "simple"}}));
+
+// Test that creating an index with v=1 and a non-simple collation returns an error.
+assert.commandFailed(
+ testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "en", strength: 2}}));
+
+// Test that creating an index with v=1 and a simple collation on a collection with a non-simple
+// default collation returns an error.
+testDB.collation.drop();
+assert.commandWorked(testDB.runCommand({create: "collation", collation: {locale: "en"}}));
+assert.commandFailed(testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "simple"}}));
+
+// Test that creating an index with v=1 and a non-simple collation on a collection with a
+// non-simple default collation returns an error.
+testDB.collation.drop();
+assert.commandWorked(testDB.runCommand({create: "collation", collation: {locale: "en"}}));
+assert.commandFailed(
+ testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "en", strength: 2}}));
+
+// Test that indexing decimal data with a v=1 index returns an error on storage engines using
+// the KeyString format.
+assert.commandWorked(testDB.decimal.createIndex({num: 1}, {v: 1}));
+if (storageEnginesUsingKeyString.has(storageEngine)) {
+ assert.writeErrorWithCode(testDB.decimal.insert({num: new NumberDecimal("42")}),
+ ErrorCodes.UnsupportedFormat);
+} else {
+ assert.writeOK(testDB.decimal.insert({num: new NumberDecimal("42")}));
+}
+
+//
+// Index version v=0
+//
+
+testDB.dropDatabase();
+
+// Test that attempting to create an index with v=0 returns an error.
+assert.commandFailed(testDB.index_version.createIndex({withV0: 1}, {v: 0}));
+
+//
+// Index version v=3
+//
+
+testDB.dropDatabase();
+
+// Test that attempting to create an index with v=3 returns an error.
+assert.commandFailed(testDB.index_version.createIndex({withV3: 1}, {v: 3}));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js
index 7b280ed9d07..1b06b881e30 100644
--- a/jstests/noPassthrough/indexbg1.js
+++ b/jstests/noPassthrough/indexbg1.js
@@ -2,131 +2,131 @@
// @tags: [SERVER-40561]
(function() {
- "use strict";
-
- load("jstests/noPassthrough/libs/index_build.js");
-
- const conn = MongoRunner.runMongod({nojournal: ""});
- assert.neq(null, conn, "mongod failed to start.");
- var db = conn.getDB("test");
- var baseName = "jstests_indexbg1";
-
- var parallel = function() {
- return db[baseName + "_parallelStatus"];
- };
-
- var resetParallel = function() {
- parallel().drop();
- };
-
- // Return the PID to call `waitpid` on for clean shutdown.
- var doParallel = function(work) {
- resetParallel();
- print("doParallel: " + work);
- return startMongoProgramNoConnect(
- "mongo",
- "--eval",
- work + "; db." + baseName + "_parallelStatus.save( {done:1} );",
- db.getMongo().host);
- };
-
- var doneParallel = function() {
- return !!parallel().findOne();
- };
-
- var waitParallel = function() {
- assert.soon(function() {
- return doneParallel();
- }, "parallel did not finish in time", 300000, 1000);
- };
-
- var size = 400 * 1000;
- var bgIndexBuildPid;
- while (1) { // if indexing finishes before we can run checks, try indexing w/ more data
- print("size: " + size);
-
- var fullName = "db." + baseName;
- var t = db[baseName];
- t.drop();
-
- var bulk = db.jstests_indexbg1.initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i});
- }
- assert.writeOK(bulk.execute());
+"use strict";
+
+load("jstests/noPassthrough/libs/index_build.js");
+
+const conn = MongoRunner.runMongod({nojournal: ""});
+assert.neq(null, conn, "mongod failed to start.");
+var db = conn.getDB("test");
+var baseName = "jstests_indexbg1";
+
+var parallel = function() {
+ return db[baseName + "_parallelStatus"];
+};
+
+var resetParallel = function() {
+ parallel().drop();
+};
+
+// Return the PID to call `waitpid` on for clean shutdown.
+var doParallel = function(work) {
+ resetParallel();
+ print("doParallel: " + work);
+ return startMongoProgramNoConnect(
+ "mongo",
+ "--eval",
+ work + "; db." + baseName + "_parallelStatus.save( {done:1} );",
+ db.getMongo().host);
+};
+
+var doneParallel = function() {
+ return !!parallel().findOne();
+};
+
+var waitParallel = function() {
+ assert.soon(function() {
+ return doneParallel();
+ }, "parallel did not finish in time", 300000, 1000);
+};
+
+var size = 400 * 1000;
+var bgIndexBuildPid;
+while (1) { // if indexing finishes before we can run checks, try indexing w/ more data
+ print("size: " + size);
+
+ var fullName = "db." + baseName;
+ var t = db[baseName];
+ t.drop();
+
+ var bulk = db.jstests_indexbg1.initializeUnorderedBulkOp();
+ for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
+ }
+ assert.writeOK(bulk.execute());
+ assert.eq(size, t.count());
+
+ bgIndexBuildPid = doParallel(fullName + ".ensureIndex( {i:1}, {background:true} )");
+ try {
+ // wait for indexing to start
+ print("wait for indexing to start");
+ IndexBuildTest.waitForIndexBuildToStart(db);
+ print("started.");
+ sleep(1000); // there is a race between when the index build shows up in curop and
+ // when it first attempts to grab a write lock.
assert.eq(size, t.count());
-
- bgIndexBuildPid = doParallel(fullName + ".ensureIndex( {i:1}, {background:true} )");
- try {
- // wait for indexing to start
- print("wait for indexing to start");
- IndexBuildTest.waitForIndexBuildToStart(db);
- print("started.");
- sleep(1000); // there is a race between when the index build shows up in curop and
- // when it first attempts to grab a write lock.
- assert.eq(size, t.count());
- assert.eq(100, t.findOne({i: 100}).i);
- var q = t.find();
- for (i = 0; i < 120; ++i) { // getmore
- q.next();
- assert(q.hasNext(), "no next");
- }
- var ex = t.find({i: 100}).limit(-1).explain("executionStats");
- printjson(ex);
- assert(ex.executionStats.totalKeysExamined < 1000,
- "took too long to find 100: " + tojson(ex));
-
- assert.writeOK(t.remove({i: 40}, true)); // table scan
- assert.writeOK(t.update({i: 10}, {i: -10})); // should scan 10
-
- var id = t.find().hint({$natural: -1}).next()._id;
-
- assert.writeOK(t.update({_id: id}, {i: -2}));
- assert.writeOK(t.save({i: -50}));
- assert.writeOK(t.save({i: size + 2}));
-
- assert.eq(size + 1, t.count());
-
- print("finished with checks");
- } catch (e) {
- // only a failure if we're still indexing
- // wait for parallel status to update to reflect indexing status
- print("caught exception: " + e);
- sleep(1000);
- if (!doneParallel()) {
- throw e;
- }
- print("but that's OK");
+ assert.eq(100, t.findOne({i: 100}).i);
+ var q = t.find();
+ for (i = 0; i < 120; ++i) { // getmore
+ q.next();
+ assert(q.hasNext(), "no next");
}
+ var ex = t.find({i: 100}).limit(-1).explain("executionStats");
+ printjson(ex);
+ assert(ex.executionStats.totalKeysExamined < 1000,
+ "took too long to find 100: " + tojson(ex));
+
+ assert.writeOK(t.remove({i: 40}, true)); // table scan
+ assert.writeOK(t.update({i: 10}, {i: -10})); // should scan 10
- print("going to check if index is done");
+ var id = t.find().hint({$natural: -1}).next()._id;
+
+ assert.writeOK(t.update({_id: id}, {i: -2}));
+ assert.writeOK(t.save({i: -50}));
+ assert.writeOK(t.save({i: size + 2}));
+
+ assert.eq(size + 1, t.count());
+
+ print("finished with checks");
+ } catch (e) {
+ // only a failure if we're still indexing
+ // wait for parallel status to update to reflect indexing status
+ print("caught exception: " + e);
+ sleep(1000);
if (!doneParallel()) {
- break;
+ throw e;
}
- print("indexing finished too soon, retrying...");
- // Although the index build finished, ensure the shell has exited.
- waitProgram(bgIndexBuildPid);
- size *= 2;
- assert(size < 200000000, "unable to run checks in parallel with index creation");
+ print("but that's OK");
}
- print("our tests done, waiting for parallel to finish");
- waitParallel();
- // Ensure the shell has exited cleanly. Otherwise the test harness may send a SIGTERM which can
- // lead to a false test failure.
+ print("going to check if index is done");
+ if (!doneParallel()) {
+ break;
+ }
+ print("indexing finished too soon, retrying...");
+ // Although the index build finished, ensure the shell has exited.
waitProgram(bgIndexBuildPid);
- print("finished");
-
- assert.eq(1, t.count({i: -10}));
- assert.eq(1, t.count({i: -2}));
- assert.eq(1, t.count({i: -50}));
- assert.eq(1, t.count({i: size + 2}));
- assert.eq(0, t.count({i: 40}));
- print("about to drop index");
- t.dropIndex({i: 1});
- var gle = db.getLastError();
- printjson(gle);
- assert(!gle);
-
- MongoRunner.stopMongod(conn);
+ size *= 2;
+ assert(size < 200000000, "unable to run checks in parallel with index creation");
+}
+
+print("our tests done, waiting for parallel to finish");
+waitParallel();
+// Ensure the shell has exited cleanly. Otherwise the test harness may send a SIGTERM which can
+// lead to a false test failure.
+waitProgram(bgIndexBuildPid);
+print("finished");
+
+assert.eq(1, t.count({i: -10}));
+assert.eq(1, t.count({i: -2}));
+assert.eq(1, t.count({i: -50}));
+assert.eq(1, t.count({i: size + 2}));
+assert.eq(0, t.count({i: 40}));
+print("about to drop index");
+t.dropIndex({i: 1});
+var gle = db.getLastError();
+printjson(gle);
+assert(!gle);
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js
index a037dc97dd3..c7a119048ec 100644
--- a/jstests/noPassthrough/indexbg2.js
+++ b/jstests/noPassthrough/indexbg2.js
@@ -2,156 +2,156 @@
// @tags: [SERVER-40561, requires_document_locking]
(function() {
- "use strict";
+"use strict";
+
+load("jstests/libs/check_log.js");
+
+const conn = MongoRunner.runMongod({nojournal: ""});
+assert.neq(null, conn, "mongod failed to start.");
+
+let db = conn.getDB("test");
+let baseName = "jstests_index12";
+
+let parallel = function() {
+ return db[baseName + "_parallelStatus"];
+};
+
+let resetParallel = function() {
+ parallel().drop();
+};
+
+// Return the PID to call `waitpid` on for clean shutdown.
+let doParallel = function(work) {
+ resetParallel();
+ return startMongoProgramNoConnect(
+ "mongo",
+ "--eval",
+ work + "; db." + baseName + "_parallelStatus.save( {done:1} );",
+ db.getMongo().host);
+};
+
+let indexBuild = function() {
+ let fullName = "db." + baseName;
+ return doParallel(fullName + ".ensureIndex( {i:1}, {background:true, unique:true} )");
+};
+
+let doneParallel = function() {
+ return !!parallel().findOne();
+};
+
+let waitParallel = function() {
+ assert.soon(function() {
+ return doneParallel();
+ }, "parallel did not finish in time", 300000, 1000);
+};
+
+let turnFailPointOn = function(failPointName, i) {
+ assert.commandWorked(
+ conn.adminCommand({configureFailPoint: failPointName, mode: "alwaysOn", data: {"i": i}}));
+};
+
+let turnFailPointOff = function(failPointName) {
+ assert.commandWorked(conn.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+};
+
+// Unique background index build fails when there exists duplicate indexed values
+// for the duration of the build.
+let failOnExistingDuplicateValue = function(coll) {
+ let duplicateKey = 0;
+ assert.writeOK(coll.save({i: duplicateKey}));
+
+ let bgIndexBuildPid = indexBuild();
+ waitProgram(bgIndexBuildPid);
+ assert.eq(1, coll.getIndexes().length, "Index should fail. There exist duplicate values.");
+
+ // Revert to unique key set
+ coll.deleteOne({i: duplicateKey});
+};
+
+// Unique background index build fails when started with a unique key set,
+// but a document with a duplicate key is inserted prior to that key being indexed.
+let failOnInsertedDuplicateValue = function(coll) {
+ let duplicateKey = 7;
+
+ turnFailPointOn("hangBeforeIndexBuildOf", duplicateKey);
+
+ let bgIndexBuildPid;
+ try {
+ bgIndexBuildPid = indexBuild();
+ jsTestLog("Waiting to hang before index build of i=" + duplicateKey);
+ checkLog.contains(conn, "Hanging before index build of i=" + duplicateKey);
- load("jstests/libs/check_log.js");
-
- const conn = MongoRunner.runMongod({nojournal: ""});
- assert.neq(null, conn, "mongod failed to start.");
-
- let db = conn.getDB("test");
- let baseName = "jstests_index12";
-
- let parallel = function() {
- return db[baseName + "_parallelStatus"];
- };
-
- let resetParallel = function() {
- parallel().drop();
- };
-
- // Return the PID to call `waitpid` on for clean shutdown.
- let doParallel = function(work) {
- resetParallel();
- return startMongoProgramNoConnect(
- "mongo",
- "--eval",
- work + "; db." + baseName + "_parallelStatus.save( {done:1} );",
- db.getMongo().host);
- };
-
- let indexBuild = function() {
- let fullName = "db." + baseName;
- return doParallel(fullName + ".ensureIndex( {i:1}, {background:true, unique:true} )");
- };
-
- let doneParallel = function() {
- return !!parallel().findOne();
- };
-
- let waitParallel = function() {
- assert.soon(function() {
- return doneParallel();
- }, "parallel did not finish in time", 300000, 1000);
- };
-
- let turnFailPointOn = function(failPointName, i) {
- assert.commandWorked(conn.adminCommand(
- {configureFailPoint: failPointName, mode: "alwaysOn", data: {"i": i}}));
- };
-
- let turnFailPointOff = function(failPointName) {
- assert.commandWorked(conn.adminCommand({configureFailPoint: failPointName, mode: "off"}));
- };
-
- // Unique background index build fails when there exists duplicate indexed values
- // for the duration of the build.
- let failOnExistingDuplicateValue = function(coll) {
- let duplicateKey = 0;
assert.writeOK(coll.save({i: duplicateKey}));
+ } finally {
+ turnFailPointOff("hangBeforeIndexBuildOf");
+ }
- let bgIndexBuildPid = indexBuild();
- waitProgram(bgIndexBuildPid);
- assert.eq(1, coll.getIndexes().length, "Index should fail. There exist duplicate values.");
-
- // Revert to unique key set
- coll.deleteOne({i: duplicateKey});
- };
-
- // Unique background index build fails when started with a unique key set,
- // but a document with a duplicate key is inserted prior to that key being indexed.
- let failOnInsertedDuplicateValue = function(coll) {
- let duplicateKey = 7;
-
- turnFailPointOn("hangBeforeIndexBuildOf", duplicateKey);
+ waitProgram(bgIndexBuildPid);
+ assert.eq(1,
+ coll.getIndexes().length,
+ "Index should fail. Duplicate key is inserted prior to that key being indexed.");
- let bgIndexBuildPid;
- try {
- bgIndexBuildPid = indexBuild();
- jsTestLog("Waiting to hang before index build of i=" + duplicateKey);
- checkLog.contains(conn, "Hanging before index build of i=" + duplicateKey);
+ // Revert to unique key set
+ coll.deleteOne({i: duplicateKey});
+};
- assert.writeOK(coll.save({i: duplicateKey}));
- } finally {
- turnFailPointOff("hangBeforeIndexBuildOf");
- }
+// Unique background index build succeeds:
+// 1) when a document is inserted and removed with a key that has already been indexed
+// 2) when a document with a key not present in the initial set is inserted and removed
+let succeedWithoutWriteErrors = function(coll, newKey) {
+ let duplicateKey = 3;
- waitProgram(bgIndexBuildPid);
- assert.eq(1,
- coll.getIndexes().length,
- "Index should fail. Duplicate key is inserted prior to that key being indexed.");
+ turnFailPointOn("hangAfterIndexBuildOf", duplicateKey);
- // Revert to unique key set
- coll.deleteOne({i: duplicateKey});
- };
+ let bgIndexBuildPid;
+ try {
+ bgIndexBuildPid = indexBuild();
- // Unique background index build succeeds:
- // 1) when a document is inserted and removed with a key that has already been indexed
- // 2) when a document with a key not present in the initial set is inserted and removed
- let succeedWithoutWriteErrors = function(coll, newKey) {
- let duplicateKey = 3;
+ jsTestLog("Waiting to hang after index build of i=" + duplicateKey);
+ checkLog.contains(conn, "Hanging after index build of i=" + duplicateKey);
- turnFailPointOn("hangAfterIndexBuildOf", duplicateKey);
+ assert.commandWorked(coll.insert({i: duplicateKey, n: true}));
- let bgIndexBuildPid;
- try {
- bgIndexBuildPid = indexBuild();
+ // First insert on key not present in initial set.
+ assert.commandWorked(coll.insert({i: newKey, n: true}));
- jsTestLog("Waiting to hang after index build of i=" + duplicateKey);
- checkLog.contains(conn, "Hanging after index build of i=" + duplicateKey);
+ // Remove duplicates before completing the index build.
+ assert.commandWorked(coll.deleteOne({i: duplicateKey, n: true}));
+ assert.commandWorked(coll.deleteOne({i: newKey, n: true}));
- assert.commandWorked(coll.insert({i: duplicateKey, n: true}));
+ } finally {
+ turnFailPointOff("hangAfterIndexBuildOf");
+ }
- // First insert on key not present in initial set.
- assert.commandWorked(coll.insert({i: newKey, n: true}));
+ waitProgram(bgIndexBuildPid);
+ assert.eq(2, coll.getIndexes().length, "Index build should succeed");
+};
- // Remove duplicates before completing the index build.
- assert.commandWorked(coll.deleteOne({i: duplicateKey, n: true}));
- assert.commandWorked(coll.deleteOne({i: newKey, n: true}));
-
- } finally {
- turnFailPointOff("hangAfterIndexBuildOf");
- }
-
- waitProgram(bgIndexBuildPid);
- assert.eq(2, coll.getIndexes().length, "Index build should succeed");
- };
-
- let doTest = function() {
- "use strict";
- const size = 10;
+let doTest = function() {
+ "use strict";
+ const size = 10;
- let coll = db[baseName];
- coll.drop();
+ let coll = db[baseName];
+ coll.drop();
- for (let i = 0; i < size; ++i) {
- assert.writeOK(coll.save({i: i}));
- }
- assert.eq(size, coll.count());
- assert.eq(1, coll.getIndexes().length, "_id index should already exist");
+ for (let i = 0; i < size; ++i) {
+ assert.writeOK(coll.save({i: i}));
+ }
+ assert.eq(size, coll.count());
+ assert.eq(1, coll.getIndexes().length, "_id index should already exist");
- failOnExistingDuplicateValue(coll);
- assert.eq(size, coll.count());
+ failOnExistingDuplicateValue(coll);
+ assert.eq(size, coll.count());
- failOnInsertedDuplicateValue(coll);
- assert.eq(size, coll.count());
+ failOnInsertedDuplicateValue(coll);
+ assert.eq(size, coll.count());
- succeedWithoutWriteErrors(coll, size);
+ succeedWithoutWriteErrors(coll, size);
- waitParallel();
- };
+ waitParallel();
+};
- doTest();
+doTest();
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/indexbg_drop.js b/jstests/noPassthrough/indexbg_drop.js
index 78bfb6f3e44..6ee8e47a54d 100644
--- a/jstests/noPassthrough/indexbg_drop.js
+++ b/jstests/noPassthrough/indexbg_drop.js
@@ -4,82 +4,82 @@
* @tags: [requires_replication]
*/
(function() {
- 'use strict';
-
- load('jstests/noPassthrough/libs/index_build.js');
-
- var dbname = 'dropbgindex';
- var collection = 'jstests_feh';
- var size = 100;
-
- // Setup the replica set.
- var replTest = new ReplSetTest({name: 'bgIndex', nodes: 3});
- var nodes = replTest.nodeList();
- printjson(nodes);
-
- // We need an arbiter to ensure that the primary doesn't step down when we restart the
- // secondary.
- replTest.startSet();
- replTest.initiate({
- "_id": "bgIndex",
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], "arbiterOnly": true}
- ]
- });
-
- var master = replTest.getPrimary();
- var second = replTest.getSecondary();
-
- var masterDB = master.getDB(dbname);
- var secondDB = second.getDB(dbname);
-
- var dc = {dropIndexes: collection, index: "i_1"};
-
- // Setup collections.
- masterDB.dropDatabase();
- jsTest.log("Creating test data " + size + " documents");
- Random.setRandomSeed();
- var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp();
- for (i = 0; i < size; ++i) {
- bulk.insert({i: Random.rand()});
- }
- assert.writeOK(bulk.execute({w: 2, wtimeout: replTest.kDefaultTimeoutMS}));
-
- assert.commandWorked(secondDB.adminCommand(
- {configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"}));
-
- jsTest.log("Starting background indexing for test of: " + tojson(dc));
-
- // Add another index to be sure the drop command works.
- masterDB.getCollection(collection).ensureIndex({b: 1});
- masterDB.getCollection(collection).ensureIndex({i: 1}, {background: true});
-
- // Make sure the index build has started on the secondary.
- IndexBuildTest.waitForIndexBuildToStart(secondDB);
-
- jsTest.log("Dropping indexes");
- masterDB.runCommand({dropIndexes: collection, index: "*"});
-
- jsTest.log("Waiting on replication");
- assert.commandWorked(
- secondDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "off"}));
- replTest.awaitReplication();
-
- print("Index list on master:");
- masterDB.getCollection(collection).getIndexes().forEach(printjson);
-
- // Need to assert.soon because the drop only marks the index for removal
- // the removal itself is asynchronous and may take another moment before it happens.
- var i = 0;
- assert.soon(function() {
- print("Index list on secondary (run " + i + "):");
- secondDB.getCollection(collection).getIndexes().forEach(printjson);
-
- i++;
- return 1 === secondDB.getCollection(collection).getIndexes().length;
- }, "secondary did not drop index");
-
- replTest.stopSet();
+'use strict';
+
+load('jstests/noPassthrough/libs/index_build.js');
+
+var dbname = 'dropbgindex';
+var collection = 'jstests_feh';
+var size = 100;
+
+// Setup the replica set.
+var replTest = new ReplSetTest({name: 'bgIndex', nodes: 3});
+var nodes = replTest.nodeList();
+printjson(nodes);
+
+// We need an arbiter to ensure that the primary doesn't step down when we restart the
+// secondary.
+replTest.startSet();
+replTest.initiate({
+ "_id": "bgIndex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
+
+var master = replTest.getPrimary();
+var second = replTest.getSecondary();
+
+var masterDB = master.getDB(dbname);
+var secondDB = second.getDB(dbname);
+
+var dc = {dropIndexes: collection, index: "i_1"};
+
+// Setup collections.
+masterDB.dropDatabase();
+jsTest.log("Creating test data " + size + " documents");
+Random.setRandomSeed();
+var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp();
+for (i = 0; i < size; ++i) {
+ bulk.insert({i: Random.rand()});
+}
+assert.writeOK(bulk.execute({w: 2, wtimeout: replTest.kDefaultTimeoutMS}));
+
+assert.commandWorked(
+ secondDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"}));
+
+jsTest.log("Starting background indexing for test of: " + tojson(dc));
+
+// Add another index to be sure the drop command works.
+masterDB.getCollection(collection).ensureIndex({b: 1});
+masterDB.getCollection(collection).ensureIndex({i: 1}, {background: true});
+
+// Make sure the index build has started on the secondary.
+IndexBuildTest.waitForIndexBuildToStart(secondDB);
+
+jsTest.log("Dropping indexes");
+masterDB.runCommand({dropIndexes: collection, index: "*"});
+
+jsTest.log("Waiting on replication");
+assert.commandWorked(
+ secondDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "off"}));
+replTest.awaitReplication();
+
+print("Index list on master:");
+masterDB.getCollection(collection).getIndexes().forEach(printjson);
+
+// Need to assert.soon because the drop only marks the index for removal
+// the removal itself is asynchronous and may take another moment before it happens.
+var i = 0;
+assert.soon(function() {
+ print("Index list on secondary (run " + i + "):");
+ secondDB.getCollection(collection).getIndexes().forEach(printjson);
+
+ i++;
+ return 1 === secondDB.getCollection(collection).getIndexes().length;
+}, "secondary did not drop index");
+
+replTest.stopSet();
}());
diff --git a/jstests/noPassthrough/indexbg_killop_apply_ops.js b/jstests/noPassthrough/indexbg_killop_apply_ops.js
index 526900232a3..6929395bc87 100644
--- a/jstests/noPassthrough/indexbg_killop_apply_ops.js
+++ b/jstests/noPassthrough/indexbg_killop_apply_ops.js
@@ -6,72 +6,71 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/noPassthrough/libs/index_build.js');
+load('jstests/noPassthrough/libs/index_build.js');
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 1}));
- IndexBuildTest.pauseIndexBuilds(primary);
+IndexBuildTest.pauseIndexBuilds(primary);
- const applyOpsCmd = {
- applyOps: [
- {
- op: 'c',
- ns: testDB.getCollection('$cmd').getFullName(),
- o: {
- createIndexes: coll.getName(),
- v: 2,
- name: 'a_1',
- key: {a: 1},
- background: true,
- },
+const applyOpsCmd = {
+ applyOps: [
+ {
+ op: 'c',
+ ns: testDB.getCollection('$cmd').getFullName(),
+ o: {
+ createIndexes: coll.getName(),
+ v: 2,
+ name: 'a_1',
+ key: {a: 1},
+ background: true,
},
- ]
- };
- const createIdx = startParallelShell(
- 'assert.commandWorked(db.adminCommand(' + tojson(applyOpsCmd) + '))', primary.port);
+ },
+ ]
+};
+const createIdx = startParallelShell(
+ 'assert.commandWorked(db.adminCommand(' + tojson(applyOpsCmd) + '))', primary.port);
- // When the index build starts, find its op id.
- const opId = IndexBuildTest.waitForIndexBuildToStart(testDB);
+// When the index build starts, find its op id.
+const opId = IndexBuildTest.waitForIndexBuildToStart(testDB);
- IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId);
+IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId);
- // Kill the index build. This should have no effect.
- assert.commandWorked(testDB.killOp(opId));
+// Kill the index build. This should have no effect.
+assert.commandWorked(testDB.killOp(opId));
- // Wait for the index build to stop.
- try {
- IndexBuildTest.waitForIndexBuildToStop(testDB);
- } finally {
- IndexBuildTest.resumeIndexBuilds(primary);
- }
+// Wait for the index build to stop.
+try {
+ IndexBuildTest.waitForIndexBuildToStop(testDB);
+} finally {
+ IndexBuildTest.resumeIndexBuilds(primary);
+}
- const exitCode = createIdx({checkExitSuccess: false});
- assert.neq(
- 0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
+const exitCode = createIdx({checkExitSuccess: false});
+assert.neq(0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
- // Check that index was created on the primary despite the attempted killOp().
- IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
+// Check that index was created on the primary despite the attempted killOp().
+IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/indexbg_killop_primary.js b/jstests/noPassthrough/indexbg_killop_primary.js
index b4074408840..cc2e36eac8f 100644
--- a/jstests/noPassthrough/indexbg_killop_primary.js
+++ b/jstests/noPassthrough/indexbg_killop_primary.js
@@ -3,58 +3,57 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
-
- load('jstests/noPassthrough/libs/index_build.js');
-
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+"use strict";
+
+load('jstests/noPassthrough/libs/index_build.js');
+
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 1}));
- IndexBuildTest.pauseIndexBuilds(primary);
+IndexBuildTest.pauseIndexBuilds(primary);
- const createIdx =
- IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {background: true});
+const createIdx =
+ IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {background: true});
- // When the index build starts, find its op id.
- const opId = IndexBuildTest.waitForIndexBuildToStart(testDB);
+// When the index build starts, find its op id.
+const opId = IndexBuildTest.waitForIndexBuildToStart(testDB);
- IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId);
+IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId);
- // Kill the index build.
- assert.commandWorked(testDB.killOp(opId));
+// Kill the index build.
+assert.commandWorked(testDB.killOp(opId));
- // Wait for the index build to stop.
- try {
- IndexBuildTest.waitForIndexBuildToStop(testDB);
- } finally {
- IndexBuildTest.resumeIndexBuilds(primary);
- }
+// Wait for the index build to stop.
+try {
+ IndexBuildTest.waitForIndexBuildToStop(testDB);
+} finally {
+ IndexBuildTest.resumeIndexBuilds(primary);
+}
- const exitCode = createIdx({checkExitSuccess: false});
- assert.neq(
- 0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
+const exitCode = createIdx({checkExitSuccess: false});
+assert.neq(0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
- // Check that no new index has been created. This verifies that the index build was aborted
- // rather than successfully completed.
- IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
+// Check that no new index has been created. This verifies that the index build was aborted
+// rather than successfully completed.
+IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/indexbg_killop_secondary.js b/jstests/noPassthrough/indexbg_killop_secondary.js
index 272fbfa108d..261d65788de 100644
--- a/jstests/noPassthrough/indexbg_killop_secondary.js
+++ b/jstests/noPassthrough/indexbg_killop_secondary.js
@@ -3,58 +3,58 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
-
- load('jstests/noPassthrough/libs/index_build.js');
-
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+"use strict";
+
+load('jstests/noPassthrough/libs/index_build.js');
+
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 1}));
- const secondary = rst.getSecondary();
- IndexBuildTest.pauseIndexBuilds(secondary);
+const secondary = rst.getSecondary();
+IndexBuildTest.pauseIndexBuilds(secondary);
- const createIdx =
- IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {background: true});
+const createIdx =
+ IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {background: true});
- // When the index build starts, find its op id.
- const secondaryDB = secondary.getDB(testDB.getName());
- const opId = IndexBuildTest.waitForIndexBuildToStart(secondaryDB);
+// When the index build starts, find its op id.
+const secondaryDB = secondary.getDB(testDB.getName());
+const opId = IndexBuildTest.waitForIndexBuildToStart(secondaryDB);
- IndexBuildTest.assertIndexBuildCurrentOpContents(secondaryDB, opId);
+IndexBuildTest.assertIndexBuildCurrentOpContents(secondaryDB, opId);
- // Kill the index build. This should have no effect.
- assert.commandWorked(secondaryDB.killOp(opId));
+// Kill the index build. This should have no effect.
+assert.commandWorked(secondaryDB.killOp(opId));
- // Wait for the index build to stop.
- IndexBuildTest.resumeIndexBuilds(secondary);
- IndexBuildTest.waitForIndexBuildToStop(secondaryDB);
+// Wait for the index build to stop.
+IndexBuildTest.resumeIndexBuilds(secondary);
+IndexBuildTest.waitForIndexBuildToStop(secondaryDB);
- // Expect successful createIndex command invocation in parallel shell. A new index should be
- // present on the primary.
- createIdx();
- IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']);
+// Expect successful createIndex command invocation in parallel shell. A new index should be
+// present on the primary.
+createIdx();
+IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']);
- // Check that index was created on the secondary despite the attempted killOp().
- const secondaryColl = secondaryDB.getCollection(coll.getName());
- IndexBuildTest.assertIndexes(secondaryColl, 2, ['_id_', 'a_1']);
+// Check that index was created on the secondary despite the attempted killOp().
+const secondaryColl = secondaryDB.getCollection(coll.getName());
+IndexBuildTest.assertIndexes(secondaryColl, 2, ['_id_', 'a_1']);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/indexbg_shutdown.js b/jstests/noPassthrough/indexbg_shutdown.js
index 9e21209ff1f..88007a29e1a 100644
--- a/jstests/noPassthrough/indexbg_shutdown.js
+++ b/jstests/noPassthrough/indexbg_shutdown.js
@@ -7,96 +7,96 @@
*/
(function() {
- "use strict";
-
- load('jstests/libs/check_log.js');
- load('jstests/noPassthrough/libs/index_build.js');
-
- var dbname = 'bgIndexSec';
- var collection = 'bgIndexShutdown';
- var size = 100;
-
- // Set up replica set
- const replTest = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+"use strict";
+
+load('jstests/libs/check_log.js');
+load('jstests/noPassthrough/libs/index_build.js');
+
+var dbname = 'bgIndexSec';
+var collection = 'bgIndexShutdown';
+var size = 100;
+
+// Set up replica set
+const replTest = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = replTest.startSet();
- replTest.initiate();
-
- var master = replTest.getPrimary();
- var second = replTest.getSecondary();
-
- var secondaryId = replTest.getNodeId(second);
-
- var masterDB = master.getDB(dbname);
- var secondDB = second.getDB(dbname);
-
- masterDB.dropDatabase();
- jsTest.log("creating test data " + size + " documents");
- const masterColl = masterDB.getCollection(collection);
- var bulk = masterColl.initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i, j: i * i});
- }
- assert.writeOK(bulk.execute());
-
- IndexBuildTest.pauseIndexBuilds(second);
-
- jsTest.log("Starting background indexing");
- // Using a write concern to wait for the background index build to finish on the primary node
- // and be started on the secondary node (but not completed, as the oplog entry is written before
- // the background index build finishes).
- const indexSpecs = [
- {key: {i: -1, j: -1}, name: 'ij1', background: true},
- {key: {i: -1, j: 1}, name: 'ij2', background: true},
- {key: {i: 1, j: -1}, name: 'ij3', background: true},
- {key: {i: 1, j: 1}, name: 'ij4', background: true}
- ];
-
- assert.commandWorked(masterDB.runCommand({
- createIndexes: collection,
- indexes: indexSpecs,
- writeConcern: {w: 2},
- }));
- const indexes = masterColl.getIndexes();
- // Number of indexes passed to createIndexes plus one for the _id index.
- assert.eq(indexSpecs.length + 1, indexes.length, tojson(indexes));
-
- // Wait for index builds to start on the secondary.
- const opId = IndexBuildTest.waitForIndexBuildToStart(secondDB);
- jsTestLog('Index builds started on secondary. Op ID of one of the builds: ' + opId);
-
- // Kill the index build. This should have no effect.
- assert.commandWorked(secondDB.killOp(opId));
-
- // There should be a message for each index we tried to create.
- checkLog.containsWithCount(
- replTest.getSecondary(),
- 'index build: starting on ' + masterColl.getFullName() + ' properties: { v: 2, key: { i:',
- indexSpecs.length);
-
- jsTest.log("Restarting secondary to retry replication");
-
- // Secondary should restart cleanly.
- replTest.restart(secondaryId, {}, /*wait=*/true);
-
- // There should again be a message for each index we tried to create, because the server
- // restarts the interrupted index build upon process startup. Note, the RAMLog is reset on
- // restart, so there should just be one set of messages in the RAMLog after restart, even though
- // the message was logged twice in total.
- checkLog.containsWithCount(
- replTest.getSecondary(),
- 'index build: starting on ' + masterColl.getFullName() + ' properties: { v: 2, key: { i:',
- indexSpecs.length);
-
- replTest.stopSet();
+ },
+ ]
+});
+const nodes = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+var second = replTest.getSecondary();
+
+var secondaryId = replTest.getNodeId(second);
+
+var masterDB = master.getDB(dbname);
+var secondDB = second.getDB(dbname);
+
+masterDB.dropDatabase();
+jsTest.log("creating test data " + size + " documents");
+const masterColl = masterDB.getCollection(collection);
+var bulk = masterColl.initializeUnorderedBulkOp();
+for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i, j: i * i});
+}
+assert.writeOK(bulk.execute());
+
+IndexBuildTest.pauseIndexBuilds(second);
+
+jsTest.log("Starting background indexing");
+// Using a write concern to wait for the background index build to finish on the primary node
+// and be started on the secondary node (but not completed, as the oplog entry is written before
+// the background index build finishes).
+const indexSpecs = [
+ {key: {i: -1, j: -1}, name: 'ij1', background: true},
+ {key: {i: -1, j: 1}, name: 'ij2', background: true},
+ {key: {i: 1, j: -1}, name: 'ij3', background: true},
+ {key: {i: 1, j: 1}, name: 'ij4', background: true}
+];
+
+assert.commandWorked(masterDB.runCommand({
+ createIndexes: collection,
+ indexes: indexSpecs,
+ writeConcern: {w: 2},
+}));
+const indexes = masterColl.getIndexes();
+// Number of indexes passed to createIndexes plus one for the _id index.
+assert.eq(indexSpecs.length + 1, indexes.length, tojson(indexes));
+
+// Wait for index builds to start on the secondary.
+const opId = IndexBuildTest.waitForIndexBuildToStart(secondDB);
+jsTestLog('Index builds started on secondary. Op ID of one of the builds: ' + opId);
+
+// Kill the index build. This should have no effect.
+assert.commandWorked(secondDB.killOp(opId));
+
+// There should be a message for each index we tried to create.
+checkLog.containsWithCount(
+ replTest.getSecondary(),
+ 'index build: starting on ' + masterColl.getFullName() + ' properties: { v: 2, key: { i:',
+ indexSpecs.length);
+
+jsTest.log("Restarting secondary to retry replication");
+
+// Secondary should restart cleanly.
+replTest.restart(secondaryId, {}, /*wait=*/true);
+
+// There should again be a message for each index we tried to create, because the server
+// restarts the interrupted index build upon process startup. Note, the RAMLog is reset on
+// restart, so there should just be one set of messages in the RAMLog after restart, even though
+// the message was logged twice in total.
+checkLog.containsWithCount(
+ replTest.getSecondary(),
+ 'index build: starting on ' + masterColl.getFullName() + ' properties: { v: 2, key: { i:',
+ indexSpecs.length);
+
+replTest.stopSet();
}());
diff --git a/jstests/noPassthrough/initial_sync_wt_cache_full.js b/jstests/noPassthrough/initial_sync_wt_cache_full.js
index c1a6638ea50..90d19a172ab 100644
--- a/jstests/noPassthrough/initial_sync_wt_cache_full.js
+++ b/jstests/noPassthrough/initial_sync_wt_cache_full.js
@@ -3,69 +3,68 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- 'use strict';
- load('jstests/libs/check_log.js');
+'use strict';
+load('jstests/libs/check_log.js');
- const rst = new ReplSetTest({
- nodes: [
- {
- slowms: 30000, // Don't log slow operations on primary.
+const rst = new ReplSetTest({
+ nodes: [
+ {
+ slowms: 30000, // Don't log slow operations on primary.
+ },
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
- // Constrain the storage engine cache size to make it easier to fill it up with
- // unflushed modifications.
- wiredTigerCacheSizeGB: 1,
- },
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ // Constrain the storage engine cache size to make it easier to fill it up with
+ // unflushed modifications.
+ wiredTigerCacheSizeGB: 1,
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const mydb = primary.getDB('test');
- const coll = mydb.getCollection('t');
+const primary = rst.getPrimary();
+const mydb = primary.getDB('test');
+const coll = mydb.getCollection('t');
- const numDocs = 2;
- const minDocSizeMB = 10;
+const numDocs = 2;
+const minDocSizeMB = 10;
- for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
- coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
- {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- }
- assert.eq(numDocs, coll.find().itcount());
+for (let i = 0; i < numDocs; ++i) {
+ assert.writeOK(
+ coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
+ {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+}
+assert.eq(numDocs, coll.find().itcount());
- const secondary = rst.restart(1, {
- startClean: true,
- setParameter:
- 'failpoint.initialSyncHangBeforeCopyingDatabases=' + tojson({mode: 'alwaysOn'})
- });
+const secondary = rst.restart(1, {
+ startClean: true,
+ setParameter: 'failpoint.initialSyncHangBeforeCopyingDatabases=' + tojson({mode: 'alwaysOn'})
+});
- const batchOpsLimit =
- assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
- .replBatchLimitOperations;
- jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' +
- batchOpsLimit + ' operations per batch.');
+const batchOpsLimit =
+ assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
+ .replBatchLimitOperations;
+jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' + batchOpsLimit +
+ ' operations per batch.');
- const numUpdates = 400;
- jsTestLog('Buffering ' + numUpdates + ' updates to ' + numDocs + ' documents on secondary.');
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
- for (let i = 0; i < numDocs; ++i) {
- for (let j = 0; j < numUpdates; ++j) {
- assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
- }
+const numUpdates = 400;
+jsTestLog('Buffering ' + numUpdates + ' updates to ' + numDocs + ' documents on secondary.');
+checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
+for (let i = 0; i < numDocs; ++i) {
+ for (let j = 0; j < numUpdates; ++j) {
+ assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
}
+}
- jsTestLog('Applying updates on secondary ' + secondary.host);
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
- rst.awaitReplication();
+jsTestLog('Applying updates on secondary ' + secondary.host);
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
+rst.awaitReplication();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/inmem_config_str.js b/jstests/noPassthrough/inmem_config_str.js
index 8e330f57b75..ecc34ab0634 100644
--- a/jstests/noPassthrough/inmem_config_str.js
+++ b/jstests/noPassthrough/inmem_config_str.js
@@ -1,17 +1,17 @@
// SERVER-28179 Test the startup of in-memory storage engine using --inMemoryEngineConfigString
(function() {
- 'use strict';
+'use strict';
- if (jsTest.options().storageEngine !== "inMemory") {
- jsTestLog("Skipping test because storageEngine is not inMemory");
- return;
- }
+if (jsTest.options().storageEngine !== "inMemory") {
+ jsTestLog("Skipping test because storageEngine is not inMemory");
+ return;
+}
- var mongod = MongoRunner.runMongod({
- storageEngine: 'inMemory',
- inMemoryEngineConfigString: 'eviction=(threads_min=1)',
- });
- assert.neq(null, mongod, "mongod failed to started up with --inMemoryEngineConfigString");
+var mongod = MongoRunner.runMongod({
+ storageEngine: 'inMemory',
+ inMemoryEngineConfigString: 'eviction=(threads_min=1)',
+});
+assert.neq(null, mongod, "mongod failed to started up with --inMemoryEngineConfigString");
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
}());
diff --git a/jstests/noPassthrough/inmem_full.js b/jstests/noPassthrough/inmem_full.js
index a73a7f0ad69..84e85f31955 100644
--- a/jstests/noPassthrough/inmem_full.js
+++ b/jstests/noPassthrough/inmem_full.js
@@ -1,87 +1,87 @@
// SERVER-22599 Test behavior of in-memory storage engine with full cache.
(function() {
- 'use strict';
+'use strict';
- if (jsTest.options().storageEngine !== "inMemory") {
- jsTestLog("Skipping test because storageEngine is not inMemory");
- return;
- }
+if (jsTest.options().storageEngine !== "inMemory") {
+ jsTestLog("Skipping test because storageEngine is not inMemory");
+ return;
+}
- Random.setRandomSeed();
+Random.setRandomSeed();
- // Return array of approximately 1kB worth of random numbers.
- function randomArray() {
- var arr = [];
- for (var j = 0; j < 85; j++)
- arr[j] = Random.rand();
- return arr;
- }
+// Return array of approximately 1kB worth of random numbers.
+function randomArray() {
+ var arr = [];
+ for (var j = 0; j < 85; j++)
+ arr[j] = Random.rand();
+ return arr;
+}
- // Return a document of approximately 10kB in size with arrays of random numbers.
- function randomDoc() {
- var doc = {};
- for (var c of "abcdefghij")
- doc[c] = randomArray();
- return doc;
- }
+// Return a document of approximately 10kB in size with arrays of random numbers.
+function randomDoc() {
+ var doc = {};
+ for (var c of "abcdefghij")
+ doc[c] = randomArray();
+ return doc;
+}
- // Return an array with random documents totalling about 1Mb.
- function randomBatch(batchSize) {
- var batch = [];
- for (var j = 0; j < batchSize; j++)
- batch[j] = randomDoc();
- return batch;
- }
+// Return an array with random documents totalling about 1Mb.
+function randomBatch(batchSize) {
+ var batch = [];
+ for (var j = 0; j < batchSize; j++)
+ batch[j] = randomDoc();
+ return batch;
+}
- const cacheMB = 128;
- const cacheKB = 1024 * cacheMB;
- const docSizeKB = Object.bsonsize(randomDoc()) / 1024;
- const batchSize = 100;
- const batch = randomBatch(batchSize);
+const cacheMB = 128;
+const cacheKB = 1024 * cacheMB;
+const docSizeKB = Object.bsonsize(randomDoc()) / 1024;
+const batchSize = 100;
+const batch = randomBatch(batchSize);
- var mongod = MongoRunner.runMongod({
- storageEngine: 'inMemory',
- inMemoryEngineConfigString: 'cache_size=' + cacheMB + "M,",
- });
- assert.neq(null, mongod, "mongod failed to started up with --inMemoryEngineConfigString");
- var db = mongod.getDB("test");
- var t = db.large;
+var mongod = MongoRunner.runMongod({
+ storageEngine: 'inMemory',
+ inMemoryEngineConfigString: 'cache_size=' + cacheMB + "M,",
+});
+assert.neq(null, mongod, "mongod failed to started up with --inMemoryEngineConfigString");
+var db = mongod.getDB("test");
+var t = db.large;
- // Insert documents until full.
- var res;
- var count = 0;
- for (var j = 0; j < 1000; j++) {
- res = t.insert(batch);
- assert.gte(res.nInserted, 0, tojson(res));
- count += res.nInserted;
- if (res.hasErrors())
- break;
- assert.eq(res.nInserted, batchSize, tojson(res));
- print("Inserted " + count + " documents");
- }
- assert.writeError(res, "didn't get ExceededMemoryLimit but should have");
+// Insert documents until full.
+var res;
+var count = 0;
+for (var j = 0; j < 1000; j++) {
+ res = t.insert(batch);
+ assert.gte(res.nInserted, 0, tojson(res));
+ count += res.nInserted;
+ if (res.hasErrors())
+ break;
+ assert.eq(res.nInserted, batchSize, tojson(res));
print("Inserted " + count + " documents");
+}
+assert.writeError(res, "didn't get ExceededMemoryLimit but should have");
+print("Inserted " + count + " documents");
- // Should have encountered exactly one memory full error.
- assert.eq(res.getWriteErrorCount(), 1, tojson(res));
- assert.eq(res.getWriteErrorAt(0).code, ErrorCodes.ExceededMemoryLimit, tojson(res));
+// Should have encountered exactly one memory full error.
+assert.eq(res.getWriteErrorCount(), 1, tojson(res));
+assert.eq(res.getWriteErrorAt(0).code, ErrorCodes.ExceededMemoryLimit, tojson(res));
- // Should encounter memory full at between 75% and 150% of total capacity.
- assert.gt(count * docSizeKB, cacheKB * 0.75, "inserted data size is at least 75% of capacity");
- assert.lt(count * docSizeKB, cacheKB * 1.50, "inserted data size is at most 150% of capacity");
+// Should encounter memory full at between 75% and 150% of total capacity.
+assert.gt(count * docSizeKB, cacheKB * 0.75, "inserted data size is at least 75% of capacity");
+assert.lt(count * docSizeKB, cacheKB * 1.50, "inserted data size is at most 150% of capacity");
- // Indexes are sufficiently large that it should be impossible to add a new one.
- assert.commandFailedWithCode(t.createIndex({a: 1}), ErrorCodes.ExceededMemoryLimit);
+// Indexes are sufficiently large that it should be impossible to add a new one.
+assert.commandFailedWithCode(t.createIndex({a: 1}), ErrorCodes.ExceededMemoryLimit);
- // An aggregate copying all 'a' and 'b' fields should run out of memory.
- // Can't test the specific error code, because it depends on whether the collection
- // creation already fails, or just the writing. Agg wraps the original error code.
- assert.commandFailed(
- t.runCommand("aggregate", {pipeline: [{$project: {a: 1, b: 1}}, {$out: "test.out"}]}));
+// An aggregate copying all 'a' and 'b' fields should run out of memory.
+// Can't test the specific error code, because it depends on whether the collection
+// creation already fails, or just the writing. Agg wraps the original error code.
+assert.commandFailed(
+ t.runCommand("aggregate", {pipeline: [{$project: {a: 1, b: 1}}, {$out: "test.out"}]}));
- // Should still be able to query.
- assert.eq(t.find({}).itcount(), count, "cannot find expected number of documents");
- assert.eq(t.aggregate([{$group: {_id: null, count: {$sum: 1}}}]).next().count,
- count,
- "cannot aggregate expected number of documents");
+// Should still be able to query.
+assert.eq(t.find({}).itcount(), count, "cannot find expected number of documents");
+assert.eq(t.aggregate([{$group: {_id: null, count: {$sum: 1}}}]).next().count,
+ count,
+ "cannot aggregate expected number of documents");
}());
diff --git a/jstests/noPassthrough/internal_validate_features_as_master.js b/jstests/noPassthrough/internal_validate_features_as_master.js
index d60ee184b7c..710174cf8b3 100644
--- a/jstests/noPassthrough/internal_validate_features_as_master.js
+++ b/jstests/noPassthrough/internal_validate_features_as_master.js
@@ -1,32 +1,30 @@
// Tests the internalValidateFeaturesAsMaster server parameter.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/get_index_helpers.js");
- // internalValidateFeaturesAsMaster can be set via startup parameter.
- let conn = MongoRunner.runMongod({setParameter: "internalValidateFeaturesAsMaster=1"});
- assert.neq(null, conn, "mongod was unable to start up");
- let res = conn.adminCommand({getParameter: 1, internalValidateFeaturesAsMaster: 1});
- assert.commandWorked(res);
- assert.eq(res.internalValidateFeaturesAsMaster, true);
- MongoRunner.stopMongod(conn);
+// internalValidateFeaturesAsMaster can be set via startup parameter.
+let conn = MongoRunner.runMongod({setParameter: "internalValidateFeaturesAsMaster=1"});
+assert.neq(null, conn, "mongod was unable to start up");
+let res = conn.adminCommand({getParameter: 1, internalValidateFeaturesAsMaster: 1});
+assert.commandWorked(res);
+assert.eq(res.internalValidateFeaturesAsMaster, true);
+MongoRunner.stopMongod(conn);
- // internalValidateFeaturesAsMaster cannot be set with --replSet.
- conn = MongoRunner.runMongod(
- {replSet: "replSetName", setParameter: "internalValidateFeaturesAsMaster=0"});
- assert.eq(null, conn, "mongod was unexpectedly able to start up");
+// internalValidateFeaturesAsMaster cannot be set with --replSet.
+conn = MongoRunner.runMongod(
+ {replSet: "replSetName", setParameter: "internalValidateFeaturesAsMaster=0"});
+assert.eq(null, conn, "mongod was unexpectedly able to start up");
- conn = MongoRunner.runMongod(
- {replSet: "replSetName", setParameter: "internalValidateFeaturesAsMaster=1"});
- assert.eq(null, conn, "mongod was unexpectedly able to start up");
+conn = MongoRunner.runMongod(
+ {replSet: "replSetName", setParameter: "internalValidateFeaturesAsMaster=1"});
+assert.eq(null, conn, "mongod was unexpectedly able to start up");
- // internalValidateFeaturesAsMaster cannot be set via runtime parameter.
- conn = MongoRunner.runMongod({});
- assert.commandFailed(
- conn.adminCommand({setParameter: 1, internalValidateFeaturesAsMaster: true}));
- assert.commandFailed(
- conn.adminCommand({setParameter: 1, internalValidateFeaturesAsMaster: false}));
- MongoRunner.stopMongod(conn);
+// internalValidateFeaturesAsMaster cannot be set via runtime parameter.
+conn = MongoRunner.runMongod({});
+assert.commandFailed(conn.adminCommand({setParameter: 1, internalValidateFeaturesAsMaster: true}));
+assert.commandFailed(conn.adminCommand({setParameter: 1, internalValidateFeaturesAsMaster: false}));
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/jsHeapLimit.js b/jstests/noPassthrough/jsHeapLimit.js
index 512a79332ad..0089955c4a5 100644
--- a/jstests/noPassthrough/jsHeapLimit.js
+++ b/jstests/noPassthrough/jsHeapLimit.js
@@ -1,26 +1,24 @@
(function() {
- "use strict";
+"use strict";
- const options = {setParameter: "jsHeapLimitMB=1000"};
- const conn = MongoRunner.runMongod(options);
+const options = {
+ setParameter: "jsHeapLimitMB=1000"
+};
+const conn = MongoRunner.runMongod(options);
- // verify JSHeapLimitMB set from the shell
- var assertLimit = function() {
- assert.eq(999, getJSHeapLimitMB());
- };
- var exitCode = runMongoProgram("mongo",
- conn.host,
- "--jsHeapLimitMB",
- 999,
- "--eval",
- "(" + assertLimit.toString() + ")();");
- assert.eq(0, exitCode);
+// verify JSHeapLimitMB set from the shell
+var assertLimit = function() {
+ assert.eq(999, getJSHeapLimitMB());
+};
+var exitCode = runMongoProgram(
+ "mongo", conn.host, "--jsHeapLimitMB", 999, "--eval", "(" + assertLimit.toString() + ")();");
+assert.eq(0, exitCode);
- // verify the JSHeapLimitMB set from Mongod
- const db = conn.getDB('test');
- const res = db.adminCommand({getParameter: 1, jsHeapLimitMB: 1});
- assert.commandWorked(res);
- assert.eq(1000, res.jsHeapLimitMB);
+// verify the JSHeapLimitMB set from Mongod
+const db = conn.getDB('test');
+const res = db.adminCommand({getParameter: 1, jsHeapLimitMB: 1});
+assert.commandWorked(res);
+assert.eq(1000, res.jsHeapLimitMB);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/js_exceptions.js b/jstests/noPassthrough/js_exceptions.js
index fe7cb4aa48f..27c7f6c2a41 100644
--- a/jstests/noPassthrough/js_exceptions.js
+++ b/jstests/noPassthrough/js_exceptions.js
@@ -3,120 +3,120 @@
*
*/
(function() {
- 'use strict';
- let tests = [
- {
- callback: function() {
- UUID("asdf");
- },
- match: "Error: Invalid UUID string: asdf :",
- stack: true,
+'use strict';
+let tests = [
+ {
+ callback: function() {
+ UUID("asdf");
},
- {
- callback: function() {
- throw {};
- },
- match: "uncaught exception: \\\[object Object\\\] :",
- stack: undefined,
+ match: "Error: Invalid UUID string: asdf :",
+ stack: true,
+ },
+ {
+ callback: function() {
+ throw {};
},
- {
- callback: function() {
- throw "asdf";
- },
- match: "uncaught exception: asdf",
- stack: false,
+ match: "uncaught exception: \\\[object Object\\\] :",
+ stack: undefined,
+ },
+ {
+ callback: function() {
+ throw "asdf";
},
- {
- callback: function() {
- throw 1;
- },
- match: "uncaught exception: 1",
- stack: false,
+ match: "uncaught exception: asdf",
+ stack: false,
+ },
+ {
+ callback: function() {
+ throw 1;
},
- {
- callback: function() {
- foo.bar();
- },
- match: "uncaught exception: ReferenceError: foo is not defined :",
- stack: true,
+ match: "uncaught exception: 1",
+ stack: false,
+ },
+ {
+ callback: function() {
+ foo.bar();
},
- {
- callback: function() {
- throw function() {};
- },
- match: "function\\\(\\\) {} :",
- stack: undefined,
+ match: "uncaught exception: ReferenceError: foo is not defined :",
+ stack: true,
+ },
+ {
+ callback: function() {
+ throw function() {};
},
- {
- callback: function() {
- try {
- UUID("asdf");
- } catch (e) {
- throw(e.constructor());
- }
- },
- match: "uncaught exception: Error :",
- stack: true,
+ match: "function\\\(\\\) {} :",
+ stack: undefined,
+ },
+ {
+ callback: function() {
+ try {
+ UUID("asdf");
+ } catch (e) {
+ throw (e.constructor());
+ }
},
- {
- callback: function() {
- try {
- UUID("asdf");
- } catch (e) {
- throw(e.prototype);
- }
- },
- match: "uncaught exception: undefined",
- stack: false,
+ match: "uncaught exception: Error :",
+ stack: true,
+ },
+ {
+ callback: function() {
+ try {
+ UUID("asdf");
+ } catch (e) {
+ throw (e.prototype);
+ }
},
- ];
- function recurser(depth, limit, callback) {
- if (++depth >= limit) {
- callback();
- } else {
- recurser(depth, limit, callback);
- }
- }
- function assertMatch(m, l) {
- assert(m.test(l), m + " didn't match \"" + l + "\"");
+ match: "uncaught exception: undefined",
+ stack: false,
+ },
+];
+function recurser(depth, limit, callback) {
+ if (++depth >= limit) {
+ callback();
+ } else {
+ recurser(depth, limit, callback);
}
- tests.forEach(function(t) {
- let code = tojson(recurser);
- [1, 2, 10].forEach(function(depth) {
- clearRawMongoProgramOutput();
- assert.throws(startParallelShell(
- code + ";\nrecurser(0," + depth + "," + tojson(t.callback) + ");", false, true));
- let output = rawMongoProgramOutput();
- let lines = output.split(/\s*\n/);
- let matchShellExp = false;
- while (lines.length > 0 & matchShellExp !== true) {
- let line = lines.shift();
- if (line.match(/MongoDB shell version/)) {
- matchShellExp = true;
- }
+}
+function assertMatch(m, l) {
+ assert(m.test(l), m + " didn't match \"" + l + "\"");
+}
+tests.forEach(function(t) {
+ let code = tojson(recurser);
+ [1, 2, 10].forEach(function(depth) {
+ clearRawMongoProgramOutput();
+ assert.throws(startParallelShell(
+ code + ";\nrecurser(0," + depth + "," + tojson(t.callback) + ");", false, true));
+ let output = rawMongoProgramOutput();
+ let lines = output.split(/\s*\n/);
+ let matchShellExp = false;
+ while (lines.length > 0 & matchShellExp !== true) {
+ let line = lines.shift();
+ if (line.match(/MongoDB shell version/)) {
+ matchShellExp = true;
}
- assert(matchShellExp);
- assertMatch(/^\s*$/, lines.pop());
- assertMatch(/exiting with code/, lines.pop());
- assertMatch(new RegExp("\\\[js\\\] " + t.match + "$"), lines.shift());
+ }
+ assert(matchShellExp);
+ assertMatch(/^\s*$/, lines.pop());
+ assertMatch(/exiting with code/, lines.pop());
+ assertMatch(new RegExp("\\\[js\\\] " + t.match + "$"), lines.shift());
- if (t.stack == true) {
- assert.eq(lines.length,
- depth + 2); // plus one for the shell and one for the callback
- lines.forEach(function(l) {
- assertMatch(/\@\(shell eval\):\d+:\d+/, l);
- });
- lines.pop();
- lines.shift();
- lines.forEach(function(l) {
- assertMatch(/recurser\@/, l);
- });
- } else if (t.stack == false) {
- assert.eq(lines.length, 0);
- } else if (t.stack == undefined) {
- assert.eq(lines.length, 1);
- assertMatch(/undefined/, lines.pop());
- }
- });
+ if (t.stack == true) {
+ assert.eq(lines.length,
+ depth + 2); // plus one for the shell and one for the callback
+ lines.forEach(function(l) {
+ assertMatch(/\@\(shell eval\):\d+:\d+/, l);
+ });
+ lines.pop();
+ lines.shift();
+ lines.forEach(function(l) {
+ assertMatch(/recurser\@/, l);
+ });
+ } else if (t.stack == false) {
+ assert.eq(lines.length, 0);
+ } else if (t.stack == undefined) {
+ assert.eq(lines.length, 1);
+ assertMatch(/undefined/, lines.pop());
+ }
});
+});
})();
diff --git a/jstests/noPassthrough/js_protection.js b/jstests/noPassthrough/js_protection.js
index eda42395cd9..7783488a663 100644
--- a/jstests/noPassthrough/js_protection.js
+++ b/jstests/noPassthrough/js_protection.js
@@ -11,85 +11,84 @@
*/
(function() {
- "use strict";
-
- var testServer = MongoRunner.runMongod({setParameter: "javascriptProtection=true"});
- assert.neq(
- null, testServer, "failed to start mongod with --setParameter=javascriptProtection=true");
-
- var db = testServer.getDB("test");
- var t = db.js_protection;
-
- function assertMongoClientCorrect() {
- var functionToEval = function() {
- var doc = db.js_protection.findOne({_id: 0});
- assert.neq(null, doc);
- assert(doc.hasOwnProperty("myFunc"));
- assert.neq("function",
- typeof doc.myFunc,
- "value of BSON type Code shouldn't have been eval()ed automatically");
-
- assert.eq("undefined", typeof addOne, "addOne function has already been defined");
- db.loadServerScripts();
- assert.neq(
- "undefined", typeof addOne, "addOne function should have been eval()ed locally");
- assert.eq(5, addOne(4));
- };
-
- var exitCode = runMongoProgram("mongo",
- "--port",
- testServer.port,
- "--enableJavaScriptProtection",
- "--eval",
- "(" + functionToEval.toString() + ")();");
- assert.eq(0, exitCode);
- }
+"use strict";
- function assertNoStoredWhere() {
- t.insertOne({name: "testdoc", val: 0, y: 0});
+var testServer = MongoRunner.runMongod({setParameter: "javascriptProtection=true"});
+assert.neq(
+ null, testServer, "failed to start mongod with --setParameter=javascriptProtection=true");
- var res = t.update({$where: "addOne(this.val) === 1"}, {$set: {y: 100}}, false, true);
- assert.writeError(res);
+var db = testServer.getDB("test");
+var t = db.js_protection;
- var doc = t.findOne({name: "testdoc"});
- assert.neq(null, doc);
- assert.eq(0, doc.y, tojson(doc));
-
- res = t.update({
- $where: function() {
- return this.val === 0;
- }
- },
- {$set: {y: 100}},
- false,
- true);
- assert.writeOK(res);
-
- doc = t.findOne({name: "testdoc"});
+function assertMongoClientCorrect() {
+ var functionToEval = function() {
+ var doc = db.js_protection.findOne({_id: 0});
assert.neq(null, doc);
- assert.eq(100, doc.y, tojson(doc));
- }
+ assert(doc.hasOwnProperty("myFunc"));
+ assert.neq("function",
+ typeof doc.myFunc,
+ "value of BSON type Code shouldn't have been eval()ed automatically");
+
+ assert.eq("undefined", typeof addOne, "addOne function has already been defined");
+ db.loadServerScripts();
+ assert.neq("undefined", typeof addOne, "addOne function should have been eval()ed locally");
+ assert.eq(5, addOne(4));
+ };
+
+ var exitCode = runMongoProgram("mongo",
+ "--port",
+ testServer.port,
+ "--enableJavaScriptProtection",
+ "--eval",
+ "(" + functionToEval.toString() + ")();");
+ assert.eq(0, exitCode);
+}
+
+function assertNoStoredWhere() {
+ t.insertOne({name: "testdoc", val: 0, y: 0});
+
+ var res = t.update({$where: "addOne(this.val) === 1"}, {$set: {y: 100}}, false, true);
+ assert.writeError(res);
+
+ var doc = t.findOne({name: "testdoc"});
+ assert.neq(null, doc);
+ assert.eq(0, doc.y, tojson(doc));
+
+ res = t.update({
+ $where: function() {
+ return this.val === 0;
+ }
+ },
+ {$set: {y: 100}},
+ false,
+ true);
+ assert.writeOK(res);
- /**
- * ACTUAL TEST
- */
+ doc = t.findOne({name: "testdoc"});
+ assert.neq(null, doc);
+ assert.eq(100, doc.y, tojson(doc));
+}
- db.system.js.insertOne({
- _id: "addOne",
- value: function(x) {
- return x + 1;
- }
- });
+/**
+ * ACTUAL TEST
+ */
- t.insertOne({
- _id: 0,
- myFunc: function() {
- return "testval";
- }
- });
+db.system.js.insertOne({
+ _id: "addOne",
+ value: function(x) {
+ return x + 1;
+ }
+});
+
+t.insertOne({
+ _id: 0,
+ myFunc: function() {
+ return "testval";
+ }
+});
- assertMongoClientCorrect();
- assertNoStoredWhere();
+assertMongoClientCorrect();
+assertNoStoredWhere();
- MongoRunner.stopMongod(testServer);
+MongoRunner.stopMongod(testServer);
})();
diff --git a/jstests/noPassthrough/js_protection_roundtrip.js b/jstests/noPassthrough/js_protection_roundtrip.js
index 59a1623419b..5c0c0b4da10 100644
--- a/jstests/noPassthrough/js_protection_roundtrip.js
+++ b/jstests/noPassthrough/js_protection_roundtrip.js
@@ -8,50 +8,50 @@
* 'CodeWScope'.
*/
(function() {
- "use strict";
+"use strict";
- var testServer = MongoRunner.runMongod();
- assert.neq(null, testServer, "failed to start mongod");
- var db = testServer.getDB("test");
- var t = db.js_protection_roundtrip;
+var testServer = MongoRunner.runMongod();
+assert.neq(null, testServer, "failed to start mongod");
+var db = testServer.getDB("test");
+var t = db.js_protection_roundtrip;
- function withoutJavaScriptProtection() {
- var doc = db.js_protection_roundtrip.findOne({_id: 0});
- assert.neq(doc, null);
- assert.eq(typeof doc.myFunc, "function", "myFunc should have been presented as a function");
- assert.eq(doc.myFunc(), "yes");
- }
+function withoutJavaScriptProtection() {
+ var doc = db.js_protection_roundtrip.findOne({_id: 0});
+ assert.neq(doc, null);
+ assert.eq(typeof doc.myFunc, "function", "myFunc should have been presented as a function");
+ assert.eq(doc.myFunc(), "yes");
+}
- function withJavaScriptProtection() {
- var doc = db.js_protection_roundtrip.findOne({_id: 0});
- assert.neq(doc, null);
- assert(doc.myFunc instanceof Code, "myFunc should have been a Code object");
- doc.myFunc = eval("(" + doc.myFunc.code + ")");
- assert.eq(doc.myFunc(), "yes");
- }
+function withJavaScriptProtection() {
+ var doc = db.js_protection_roundtrip.findOne({_id: 0});
+ assert.neq(doc, null);
+ assert(doc.myFunc instanceof Code, "myFunc should have been a Code object");
+ doc.myFunc = eval("(" + doc.myFunc.code + ")");
+ assert.eq(doc.myFunc(), "yes");
+}
- function testFunctionUnmarshall(jsProtection, evalFunc) {
- var evalString = "(" + tojson(evalFunc) + ")();";
- var protectionFlag =
- jsProtection ? "--enableJavaScriptProtection" : "--disableJavaScriptProtection";
- var exitCode = runMongoProgram(
- "mongo", "--port", testServer.port, protectionFlag, "--eval", evalString);
- assert.eq(exitCode, 0);
- }
+function testFunctionUnmarshall(jsProtection, evalFunc) {
+ var evalString = "(" + tojson(evalFunc) + ")();";
+ var protectionFlag =
+ jsProtection ? "--enableJavaScriptProtection" : "--disableJavaScriptProtection";
+ var exitCode =
+ runMongoProgram("mongo", "--port", testServer.port, protectionFlag, "--eval", evalString);
+ assert.eq(exitCode, 0);
+}
- /**
- * ACTUAL TEST
- */
- var result = t.insert({
- _id: 0,
- myFunc: function() {
- return "yes";
- }
- });
- assert.writeOK(result);
+/**
+ * ACTUAL TEST
+ */
+var result = t.insert({
+ _id: 0,
+ myFunc: function() {
+ return "yes";
+ }
+});
+assert.writeOK(result);
- testFunctionUnmarshall(true, withJavaScriptProtection);
- testFunctionUnmarshall(false, withoutJavaScriptProtection);
+testFunctionUnmarshall(true, withJavaScriptProtection);
+testFunctionUnmarshall(false, withoutJavaScriptProtection);
- MongoRunner.stopMongod(testServer);
+MongoRunner.stopMongod(testServer);
})();
diff --git a/jstests/noPassthrough/json_schema_ignore_unknown_keywords.js b/jstests/noPassthrough/json_schema_ignore_unknown_keywords.js
index f16a757c3f5..5ed0be8f101 100644
--- a/jstests/noPassthrough/json_schema_ignore_unknown_keywords.js
+++ b/jstests/noPassthrough/json_schema_ignore_unknown_keywords.js
@@ -3,57 +3,56 @@
* ignores unknown keywords within $jsonSchema.
*/
(function() {
- "use strict";
-
- load("jstests/libs/assert_schema_match.js");
-
- const options = {setParameter: "internalQueryIgnoreUnknownJSONSchemaKeywords=1"};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, "mongod was unable to start up with options: " + tojson(options));
-
- const testDB = conn.getDB("test");
- const coll = testDB.getCollection("jstests_json_schema_ignore_unsupported");
-
- assertSchemaMatch(coll, {my_keyword: "ignored", minProperties: 2}, {_id: 0}, false);
- assertSchemaMatch(coll, {my_keyword: "ignored", minProperties: 2}, {_id: 0, a: 1}, true);
- assertSchemaMatch(
- coll, {properties: {a: {my_keyword: "ignored", minProperties: 1}}}, {a: {b: 1}}, true);
-
- // Test that the same query knob does not change the behavior for unsupported keywords.
- {
- let res =
- coll.runCommand({find: coll.getName(), query: {$jsonSchema: {default: {_id: 0}}}});
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand({
- find: coll.getName(),
- query: {$jsonSchema: {definitions: {numberField: {type: "number"}}}}
- });
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {format: "email"}}});
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res =
- coll.runCommand({find: coll.getName(), query: {$jsonSchema: {id: "someschema.json"}}});
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand({
- find: coll.getName(),
- query: {$jsonSchema: {properties: {a: {$ref: "#/definitions/positiveInt"}}}}
- });
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand(
- {find: coll.getName(), query: {$jsonSchema: {$schema: "hyper-schema"}}});
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand({
- find: coll.getName(),
- query: {$jsonSchema: {$schema: "http://json-schema.org/draft-04/schema#"}}
- });
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
- }
-
- MongoRunner.stopMongod(conn);
+"use strict";
+
+load("jstests/libs/assert_schema_match.js");
+
+const options = {
+ setParameter: "internalQueryIgnoreUnknownJSONSchemaKeywords=1"
+};
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, "mongod was unable to start up with options: " + tojson(options));
+
+const testDB = conn.getDB("test");
+const coll = testDB.getCollection("jstests_json_schema_ignore_unsupported");
+
+assertSchemaMatch(coll, {my_keyword: "ignored", minProperties: 2}, {_id: 0}, false);
+assertSchemaMatch(coll, {my_keyword: "ignored", minProperties: 2}, {_id: 0, a: 1}, true);
+assertSchemaMatch(
+ coll, {properties: {a: {my_keyword: "ignored", minProperties: 1}}}, {a: {b: 1}}, true);
+
+// Test that the same query knob does not change the behavior for unsupported keywords.
+{
+ let res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {default: {_id: 0}}}});
+ assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+ res = coll.runCommand({
+ find: coll.getName(),
+ query: {$jsonSchema: {definitions: {numberField: {type: "number"}}}}
+ });
+ assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+ res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {format: "email"}}});
+ assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+ res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {id: "someschema.json"}}});
+ assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+ res = coll.runCommand({
+ find: coll.getName(),
+ query: {$jsonSchema: {properties: {a: {$ref: "#/definitions/positiveInt"}}}}
+ });
+ assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+ res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {$schema: "hyper-schema"}}});
+ assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+ res = coll.runCommand({
+ find: coll.getName(),
+ query: {$jsonSchema: {$schema: "http://json-schema.org/draft-04/schema#"}}
+ });
+ assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+}
+
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/kill_pinned_cursor.js b/jstests/noPassthrough/kill_pinned_cursor.js
index f7233bc0d66..86786fd3084 100644
--- a/jstests/noPassthrough/kill_pinned_cursor.js
+++ b/jstests/noPassthrough/kill_pinned_cursor.js
@@ -11,107 +11,104 @@
// batches are generated, this requires some special machinery to keep a cursor permanently pinned.
(function() {
- "use strict";
+"use strict";
- // This test runs manual getMores using different connections, which will not inherit the
- // implicit session of the cursor establishing command.
- TestData.disableImplicitSessions = true;
+// This test runs manual getMores using different connections, which will not inherit the
+// implicit session of the cursor establishing command.
+TestData.disableImplicitSessions = true;
- load("jstests/libs/fixture_helpers.js"); // For "isMongos".
- load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
- const st = new ShardingTest({shards: 2});
+load("jstests/libs/fixture_helpers.js"); // For "isMongos".
+load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
+const st = new ShardingTest({shards: 2});
- // Enables the specified 'failPointName', executes 'runGetMoreFunc' function in a parallel
- // shell, waits for the the failpoint to be hit, then kills the cursor and confirms that the
- // kill was successful.
- function runPinnedCursorKillTest({conn, failPointName, runGetMoreFunc}) {
- function assertFunction(cursorId, coll) {
- const db = coll.getDB();
- // Kill the cursor associated with the command and assert that the kill succeeded.
- let cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [cursorId]});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursorsKilled, [cursorId]);
- assert.eq(cmdRes.cursorsAlive, []);
- assert.eq(cmdRes.cursorsNotFound, []);
- assert.eq(cmdRes.cursorsUnknown, []);
- }
- withPinnedCursor({
- conn: conn,
- sessionId: null,
- db: conn.getDB("test"),
- assertFunction: assertFunction,
- runGetMoreFunc: runGetMoreFunc,
- failPointName: failPointName,
- assertEndCounts: true
- });
+// Enables the specified 'failPointName', executes 'runGetMoreFunc' function in a parallel
+// shell, waits for the the failpoint to be hit, then kills the cursor and confirms that the
+// kill was successful.
+function runPinnedCursorKillTest({conn, failPointName, runGetMoreFunc}) {
+ function assertFunction(cursorId, coll) {
+ const db = coll.getDB();
+ // Kill the cursor associated with the command and assert that the kill succeeded.
+ let cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [cursorId]});
+ assert.commandWorked(cmdRes);
+ assert.eq(cmdRes.cursorsKilled, [cursorId]);
+ assert.eq(cmdRes.cursorsAlive, []);
+ assert.eq(cmdRes.cursorsNotFound, []);
+ assert.eq(cmdRes.cursorsUnknown, []);
}
+ withPinnedCursor({
+ conn: conn,
+ sessionId: null,
+ db: conn.getDB("test"),
+ assertFunction: assertFunction,
+ runGetMoreFunc: runGetMoreFunc,
+ failPointName: failPointName,
+ assertEndCounts: true
+ });
+}
- // Test that killing the pinned cursor before it starts building the batch results in a
- // CursorKilled exception on a replica set.
- const rs0Conn = st.rs0.getPrimary();
- const testParameters = {
- conn: rs0Conn,
- failPointName: "waitAfterPinningCursorBeforeGetMoreBatch",
- runGetMoreFunc: function() {
- const response = db.runCommand({getMore: cursorId, collection: collName});
- // We expect that the operation will get interrupted and fail.
- assert.commandFailedWithCode(response, ErrorCodes.CursorKilled);
- }
- };
- runPinnedCursorKillTest(testParameters);
+// Test that killing the pinned cursor before it starts building the batch results in a
+// CursorKilled exception on a replica set.
+const rs0Conn = st.rs0.getPrimary();
+const testParameters = {
+ conn: rs0Conn,
+ failPointName: "waitAfterPinningCursorBeforeGetMoreBatch",
+ runGetMoreFunc: function() {
+ const response = db.runCommand({getMore: cursorId, collection: collName});
+ // We expect that the operation will get interrupted and fail.
+ assert.commandFailedWithCode(response, ErrorCodes.CursorKilled);
+ }
+};
+runPinnedCursorKillTest(testParameters);
- // Check the case where a killCursor is run as we're building a getMore batch on mongod.
- (function() {
- testParameters.conn = rs0Conn;
- testParameters.failPointName = "waitWithPinnedCursorDuringGetMoreBatch";
+// Check the case where a killCursor is run as we're building a getMore batch on mongod.
+(function() {
+testParameters.conn = rs0Conn;
+testParameters.failPointName = "waitWithPinnedCursorDuringGetMoreBatch";
- // Force yield to occur on every PlanExecutor iteration, so that the getMore is guaranteed
- // to check for interrupts.
- assert.commandWorked(testParameters.conn.getDB("admin").runCommand(
- {setParameter: 1, internalQueryExecYieldIterations: 1}));
- runPinnedCursorKillTest(testParameters);
- })();
+// Force yield to occur on every PlanExecutor iteration, so that the getMore is guaranteed
+// to check for interrupts.
+assert.commandWorked(testParameters.conn.getDB("admin").runCommand(
+ {setParameter: 1, internalQueryExecYieldIterations: 1}));
+runPinnedCursorKillTest(testParameters);
+})();
- (function() {
- // Run the equivalent test on the mongos. This time, we will force the shards to hang as
- // well. This is so that we can guarantee that the mongos is checking for interruption at
- // the appropriate time, and not just propagating an error it receives from the mongods.
- testParameters.failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
- FixtureHelpers.runCommandOnEachPrimary({
- db: st.s.getDB("admin"),
- cmdObj: {
- configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch",
- mode: "alwaysOn"
- }
- });
- testParameters.conn = st.s;
- runPinnedCursorKillTest(testParameters);
- FixtureHelpers.runCommandOnEachPrimary({
- db: st.s.getDB("admin"),
- cmdObj: {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "off"}
- });
- })();
+(function() {
+// Run the equivalent test on the mongos. This time, we will force the shards to hang as
+// well. This is so that we can guarantee that the mongos is checking for interruption at
+// the appropriate time, and not just propagating an error it receives from the mongods.
+testParameters.failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
+FixtureHelpers.runCommandOnEachPrimary({
+ db: st.s.getDB("admin"),
+ cmdObj: {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "alwaysOn"}
+});
+testParameters.conn = st.s;
+runPinnedCursorKillTest(testParameters);
+FixtureHelpers.runCommandOnEachPrimary({
+ db: st.s.getDB("admin"),
+ cmdObj: {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "off"}
+});
+})();
- // Check this case where the interrupt comes in after the batch has been built, and is about to
- // be returned. This is relevant for both mongod and mongos.
- const connsToRunOn = [st.s, rs0Conn];
- for (let conn of connsToRunOn) {
- jsTestLog("Running on conn: " + tojson(conn));
+// Check this case where the interrupt comes in after the batch has been built, and is about to
+// be returned. This is relevant for both mongod and mongos.
+const connsToRunOn = [st.s, rs0Conn];
+for (let conn of connsToRunOn) {
+ jsTestLog("Running on conn: " + tojson(conn));
- // Test that, if the pinned cursor is killed after it has finished building a batch, that
- // batch is returned to the client but a subsequent getMore will fail with a
- // 'CursorNotFound' error.
- testParameters.failPointName = "waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch";
- testParameters.runGetMoreFunc = function() {
- const getMoreCmd = {getMore: cursorId, collection: collName, batchSize: 2};
- // We expect that the first getMore will succeed, while the second fails because the
- // cursor has been killed.
- assert.commandWorked(db.runCommand(getMoreCmd));
- assert.commandFailedWithCode(db.runCommand(getMoreCmd), ErrorCodes.CursorNotFound);
- };
+ // Test that, if the pinned cursor is killed after it has finished building a batch, that
+ // batch is returned to the client but a subsequent getMore will fail with a
+ // 'CursorNotFound' error.
+ testParameters.failPointName = "waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch";
+ testParameters.runGetMoreFunc = function() {
+ const getMoreCmd = {getMore: cursorId, collection: collName, batchSize: 2};
+ // We expect that the first getMore will succeed, while the second fails because the
+ // cursor has been killed.
+ assert.commandWorked(db.runCommand(getMoreCmd));
+ assert.commandFailedWithCode(db.runCommand(getMoreCmd), ErrorCodes.CursorNotFound);
+ };
- runPinnedCursorKillTest(testParameters);
- }
+ runPinnedCursorKillTest(testParameters);
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/kill_sessions.js b/jstests/noPassthrough/kill_sessions.js
index 0211dba95ef..f0e7a05a4ba 100644
--- a/jstests/noPassthrough/kill_sessions.js
+++ b/jstests/noPassthrough/kill_sessions.js
@@ -1,13 +1,13 @@
load("jstests/libs/kill_sessions.js");
(function() {
- 'use strict';
+'use strict';
- // TODO SERVER-35447: This test involves killing all sessions, which will not work as expected
- // if the kill command is sent with an implicit session.
- TestData.disableImplicitSessions = true;
+// TODO SERVER-35447: This test involves killing all sessions, which will not work as expected
+// if the kill command is sent with an implicit session.
+TestData.disableImplicitSessions = true;
- var conn = MongoRunner.runMongod();
- KillSessionsTestHelper.runNoAuth(conn, conn, [conn]);
- MongoRunner.stopMongod(conn);
+var conn = MongoRunner.runMongod();
+KillSessionsTestHelper.runNoAuth(conn, conn, [conn]);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/killop.js b/jstests/noPassthrough/killop.js
index 69305f25747..53f14b1f838 100644
--- a/jstests/noPassthrough/killop.js
+++ b/jstests/noPassthrough/killop.js
@@ -2,72 +2,71 @@
// @tags: [requires_replication, requires_sharding]
(function() {
- "use strict";
-
- const dbName = "killop";
- const collName = "test";
-
- // 'conn' is a connection to either a mongod when testing a replicaset or a mongos when testing
- // a sharded cluster. 'shardConn' is a connection to the mongod we enable failpoints on.
- function runTest(conn, shardConn) {
- const db = conn.getDB(dbName);
- assert.commandWorked(db.dropDatabase());
- assert.writeOK(db.getCollection(collName).insert({x: 1}));
-
- assert.commandWorked(
- shardConn.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
- assert.commandWorked(shardConn.adminCommand(
- {"configureFailPoint": "setYieldAllLocksHang", "mode": "alwaysOn"}));
-
- const queryToKill = "assert.commandWorked(db.getSiblingDB('" + dbName +
- "').runCommand({find: '" + collName + "', filter: {x: 1}}));";
- const awaitShell = startParallelShell(queryToKill, conn.port);
- let opId;
-
- assert.soon(
- function() {
- const result =
- db.currentOp({"ns": dbName + "." + collName, "command.filter": {x: 1}});
- assert.commandWorked(result);
- if (result.inprog.length === 1 && result.inprog[0].numYields > 0) {
- opId = result.inprog[0].opid;
- return true;
- }
-
- return false;
- },
- function() {
- return "Failed to find operation in currentOp() output: " +
- tojson(db.currentOp({"ns": dbName + "." + collName}));
- });
-
- assert.commandWorked(db.killOp(opId));
-
- let result = db.currentOp({"ns": dbName + "." + collName, "command.filter": {x: 1}});
- assert.commandWorked(result);
- assert(result.inprog.length === 1, tojson(db.currentOp()));
- assert(result.inprog[0].hasOwnProperty("killPending"));
- assert.eq(true, result.inprog[0].killPending);
-
- assert.commandWorked(
- shardConn.adminCommand({"configureFailPoint": "setYieldAllLocksHang", "mode": "off"}));
-
- const exitCode = awaitShell({checkExitSuccess: false});
- assert.neq(0, exitCode, "Expected shell to exit with failure due to operation kill");
-
- result = db.currentOp({"ns": dbName + "." + collName, "query.filter": {x: 1}});
- assert.commandWorked(result);
- assert(result.inprog.length === 0, tojson(db.currentOp()));
- }
-
- const st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
- const shardConn = st.rs0.getPrimary();
-
- // Test killOp against mongod.
- runTest(shardConn, shardConn);
-
- // Test killOp against mongos.
- runTest(st.s, shardConn);
-
- st.stop();
+"use strict";
+
+const dbName = "killop";
+const collName = "test";
+
+// 'conn' is a connection to either a mongod when testing a replicaset or a mongos when testing
+// a sharded cluster. 'shardConn' is a connection to the mongod we enable failpoints on.
+function runTest(conn, shardConn) {
+ const db = conn.getDB(dbName);
+ assert.commandWorked(db.dropDatabase());
+ assert.writeOK(db.getCollection(collName).insert({x: 1}));
+
+ assert.commandWorked(
+ shardConn.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
+ assert.commandWorked(
+ shardConn.adminCommand({"configureFailPoint": "setYieldAllLocksHang", "mode": "alwaysOn"}));
+
+ const queryToKill = "assert.commandWorked(db.getSiblingDB('" + dbName +
+ "').runCommand({find: '" + collName + "', filter: {x: 1}}));";
+ const awaitShell = startParallelShell(queryToKill, conn.port);
+ let opId;
+
+ assert.soon(
+ function() {
+ const result = db.currentOp({"ns": dbName + "." + collName, "command.filter": {x: 1}});
+ assert.commandWorked(result);
+ if (result.inprog.length === 1 && result.inprog[0].numYields > 0) {
+ opId = result.inprog[0].opid;
+ return true;
+ }
+
+ return false;
+ },
+ function() {
+ return "Failed to find operation in currentOp() output: " +
+ tojson(db.currentOp({"ns": dbName + "." + collName}));
+ });
+
+ assert.commandWorked(db.killOp(opId));
+
+ let result = db.currentOp({"ns": dbName + "." + collName, "command.filter": {x: 1}});
+ assert.commandWorked(result);
+ assert(result.inprog.length === 1, tojson(db.currentOp()));
+ assert(result.inprog[0].hasOwnProperty("killPending"));
+ assert.eq(true, result.inprog[0].killPending);
+
+ assert.commandWorked(
+ shardConn.adminCommand({"configureFailPoint": "setYieldAllLocksHang", "mode": "off"}));
+
+ const exitCode = awaitShell({checkExitSuccess: false});
+ assert.neq(0, exitCode, "Expected shell to exit with failure due to operation kill");
+
+ result = db.currentOp({"ns": dbName + "." + collName, "query.filter": {x: 1}});
+ assert.commandWorked(result);
+ assert(result.inprog.length === 0, tojson(db.currentOp()));
+}
+
+const st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
+const shardConn = st.rs0.getPrimary();
+
+// Test killOp against mongod.
+runTest(shardConn, shardConn);
+
+// Test killOp against mongos.
+runTest(st.s, shardConn);
+
+st.stop();
})();
diff --git a/jstests/noPassthrough/latency_includes_lock_acquisition_time.js b/jstests/noPassthrough/latency_includes_lock_acquisition_time.js
index e3f10dade92..5b1757188e7 100644
--- a/jstests/noPassthrough/latency_includes_lock_acquisition_time.js
+++ b/jstests/noPassthrough/latency_includes_lock_acquisition_time.js
@@ -4,152 +4,151 @@
* @tags: [requires_profiling]
*/
(function() {
- "use strict";
+"use strict";
- /**
- * Configures the server to wait for 'millis' while acquiring locks in the CRUD path, then
- * invokes the no-arguments function 'func', then disables the aforementioned lock wait
- * behavior.
- */
- function runWithWait(millis, func) {
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "setAutoGetCollectionWait",
- mode: "alwaysOn",
- data: {waitForMillis: millis}
- }));
- func();
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "setAutoGetCollectionWait",
- mode: "off",
- }));
- }
-
- load("jstests/libs/check_log.js");
- load("jstests/libs/profiler.js");
+/**
+ * Configures the server to wait for 'millis' while acquiring locks in the CRUD path, then
+ * invokes the no-arguments function 'func', then disables the aforementioned lock wait
+ * behavior.
+ */
+function runWithWait(millis, func) {
+ assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "setAutoGetCollectionWait",
+ mode: "alwaysOn",
+ data: {waitForMillis: millis}
+ }));
+ func();
+ assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "setAutoGetCollectionWait",
+ mode: "off",
+ }));
+}
- let hangMillis = 200;
- let padding = hangMillis / 10;
+load("jstests/libs/check_log.js");
+load("jstests/libs/profiler.js");
- let conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
- let testDB = conn.getDB("test");
- let testColl = testDB.lock_acquisition_time;
+let hangMillis = 200;
+let padding = hangMillis / 10;
- function runTests() {
- // Profile all operations.
- assert.commandWorked(testDB.setProfilingLevel(0));
- testDB.system.profile.drop();
- assert.commandWorked(testDB.setProfilingLevel(2));
+let conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
+let testDB = conn.getDB("test");
+let testColl = testDB.lock_acquisition_time;
- // Test that insert profiler/logs include lock acquisition time. Rather than parsing the log
- // lines, we are just verifying that the log line appears, which implies that the recorded
- // latency exceeds slowms.
- runWithWait(hangMillis, function() {
- assert.writeOK(testColl.insert({a: 1}));
- });
- let profileEntry;
- if (conn.writeMode() === "commands") {
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.insert": testColl.getName(),
- });
- } else {
- profileEntry = getLatestProfilerEntry(testDB, {
- op: "insert",
- ns: testColl.getFullName(),
- });
- }
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn,
- conn.writeMode() === "commands"
- ? "insert { insert: \"lock_acquisition_time\""
- : "insert test.lock_acquisition_time");
+function runTests() {
+ // Profile all operations.
+ assert.commandWorked(testDB.setProfilingLevel(0));
+ testDB.system.profile.drop();
+ assert.commandWorked(testDB.setProfilingLevel(2));
- // Test that update profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.writeOK(testColl.update({}, {$set: {b: 1}}));
- });
+ // Test that insert profiler/logs include lock acquisition time. Rather than parsing the log
+ // lines, we are just verifying that the log line appears, which implies that the recorded
+ // latency exceeds slowms.
+ runWithWait(hangMillis, function() {
+ assert.writeOK(testColl.insert({a: 1}));
+ });
+ let profileEntry;
+ if (conn.writeMode() === "commands") {
profileEntry = getLatestProfilerEntry(testDB, {
ns: testColl.getFullName(),
- "command.u": {$eq: {$set: {b: 1}}},
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn, "update { update: \"lock_acquisition_time\"");
-
- // Test that find profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.eq(1, testColl.find({b: 1}).itcount());
+ "command.insert": testColl.getName(),
});
+ } else {
profileEntry = getLatestProfilerEntry(testDB, {
+ op: "insert",
ns: testColl.getFullName(),
- "command.find": testColl.getName(),
});
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn, "find { find: \"lock_acquisition_time\"");
+ }
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn,
+ conn.writeMode() === "commands" ? "insert { insert: \"lock_acquisition_time\""
+ : "insert test.lock_acquisition_time");
- // Test that getMore profiler/logs include lock acquisition time.
- assert.writeOK(testColl.insert([{a: 2}, {a: 3}]));
- runWithWait(hangMillis, function() {
- // Include a batchSize in order to ensure that a getMore is issued.
- assert.eq(3, testColl.find().batchSize(2).itcount());
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.getMore": {$exists: true},
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn, "originatingCommand: { find: \"lock_acquisition_time\"");
- assert.writeOK(testColl.remove({a: {$gt: 1}}));
+ // Test that update profiler/logs include lock acquisition time.
+ runWithWait(hangMillis, function() {
+ assert.writeOK(testColl.update({}, {$set: {b: 1}}));
+ });
+ profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.u": {$eq: {$set: {b: 1}}},
+ });
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn, "update { update: \"lock_acquisition_time\"");
- // Test that aggregate profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.eq(1, testColl.aggregate([{$match: {b: 1}}]).itcount());
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.aggregate": testColl.getName(),
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn, "aggregate { aggregate: \"lock_acquisition_time\"");
+ // Test that find profiler/logs include lock acquisition time.
+ runWithWait(hangMillis, function() {
+ assert.eq(1, testColl.find({b: 1}).itcount());
+ });
+ profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.find": testColl.getName(),
+ });
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn, "find { find: \"lock_acquisition_time\"");
- // Test that count profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.eq(1, testColl.count());
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.count": testColl.getName(),
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn, "count { count: \"lock_acquisition_time\"");
+ // Test that getMore profiler/logs include lock acquisition time.
+ assert.writeOK(testColl.insert([{a: 2}, {a: 3}]));
+ runWithWait(hangMillis, function() {
+ // Include a batchSize in order to ensure that a getMore is issued.
+ assert.eq(3, testColl.find().batchSize(2).itcount());
+ });
+ profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.getMore": {$exists: true},
+ });
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn, "originatingCommand: { find: \"lock_acquisition_time\"");
+ assert.writeOK(testColl.remove({a: {$gt: 1}}));
- // Test that distinct profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.eq([1], testColl.distinct("a"));
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.distinct": testColl.getName(),
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn, "distinct { distinct: \"lock_acquisition_time\"");
+ // Test that aggregate profiler/logs include lock acquisition time.
+ runWithWait(hangMillis, function() {
+ assert.eq(1, testColl.aggregate([{$match: {b: 1}}]).itcount());
+ });
+ profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.aggregate": testColl.getName(),
+ });
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn, "aggregate { aggregate: \"lock_acquisition_time\"");
- // Test that delete profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.writeOK(testColl.remove({b: 1}));
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.q": {b: 1},
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn, "delete { delete: \"lock_acquisition_time\"");
- }
+ // Test that count profiler/logs include lock acquisition time.
+ runWithWait(hangMillis, function() {
+ assert.eq(1, testColl.count());
+ });
+ profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.count": testColl.getName(),
+ });
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn, "count { count: \"lock_acquisition_time\"");
+
+ // Test that distinct profiler/logs include lock acquisition time.
+ runWithWait(hangMillis, function() {
+ assert.eq([1], testColl.distinct("a"));
+ });
+ profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.distinct": testColl.getName(),
+ });
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn, "distinct { distinct: \"lock_acquisition_time\"");
+
+ // Test that delete profiler/logs include lock acquisition time.
+ runWithWait(hangMillis, function() {
+ assert.writeOK(testColl.remove({b: 1}));
+ });
+ profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.q": {b: 1},
+ });
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn, "delete { delete: \"lock_acquisition_time\"");
+}
- // Run the tests once with read and write commands and once with legacy ops.
- runTests();
- conn.forceWriteMode("compatibility");
- conn.forceReadMode("legacy");
- runTests();
- MongoRunner.stopMongod(conn);
+// Run the tests once with read and write commands and once with legacy ops.
+runTests();
+conn.forceWriteMode("compatibility");
+conn.forceReadMode("legacy");
+runTests();
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/launcher_test.js b/jstests/noPassthrough/launcher_test.js
index a4d00ae19f7..a9fc9384c4b 100644
--- a/jstests/noPassthrough/launcher_test.js
+++ b/jstests/noPassthrough/launcher_test.js
@@ -1,32 +1,31 @@
// Note: This test cannot be run in parallel because all output from child processes of the same
// shell is multiplexed to the same buffer.
(function() {
- "use strict";
+"use strict";
- // Note: the windows command line length limit is 8191 characters, so keep this string length
- // under that.
- const numLines = 300;
- const lineContents = "lots of super fun text\n".repeat(numLines).trim();
+// Note: the windows command line length limit is 8191 characters, so keep this string length
+// under that.
+const numLines = 300;
+const lineContents = "lots of super fun text\n".repeat(numLines).trim();
- var echoTest = function() {
- clearRawMongoProgramOutput();
+var echoTest = function() {
+ clearRawMongoProgramOutput();
- // This will produce `numLines` + 1 lines of output because echo isn't being called with
- // `-n`. This will block until the program exits.
- var exitCode = runProgram("echo", lineContents);
- var output = rawMongoProgramOutput();
+ // This will produce `numLines` + 1 lines of output because echo isn't being called with
+ // `-n`. This will block until the program exits.
+ var exitCode = runProgram("echo", lineContents);
+ var output = rawMongoProgramOutput();
- assert.eq(0, exitCode);
+ assert.eq(0, exitCode);
- assert.eq(numLines,
- output.split('\n').length - 1,
- "didn't wait for program's output buffer to finish being consumed");
- };
-
- // The motivating failure for the test was a race in runProgram. Empirically, 10 runs has always
- // been sufficient for this to fail. 16 gives the test some leeway.
- for (var i = 0; i < 16; i++) {
- echoTest();
- }
+ assert.eq(numLines,
+ output.split('\n').length - 1,
+ "didn't wait for program's output buffer to finish being consumed");
+};
+// The motivating failure for the test was a race in runProgram. Empirically, 10 runs has always
+// been sufficient for this to fail. 16 gives the test some leeway.
+for (var i = 0; i < 16; i++) {
+ echoTest();
+}
})();
diff --git a/jstests/noPassthrough/libs/backup_restore.js b/jstests/noPassthrough/libs/backup_restore.js
index b18fb9e25d6..37411b9d061 100644
--- a/jstests/noPassthrough/libs/backup_restore.js
+++ b/jstests/noPassthrough/libs/backup_restore.js
@@ -152,8 +152,8 @@ var BackupRestoreTest = function(options) {
assert(options.backup, "Backup option not supplied");
assert.contains(options.backup,
allowedBackupKeys,
- 'invalid option: ' + tojson(options.backup) + '; valid options are: ' +
- tojson(allowedBackupKeys));
+ 'invalid option: ' + tojson(options.backup) +
+ '; valid options are: ' + tojson(allowedBackupKeys));
// Number of nodes in initial replica set (default 3)
var numNodes = options.nodes || 3;
diff --git a/jstests/noPassthrough/libs/configExpand/lib.js b/jstests/noPassthrough/libs/configExpand/lib.js
index c1ba975565b..c3125d99a2f 100644
--- a/jstests/noPassthrough/libs/configExpand/lib.js
+++ b/jstests/noPassthrough/libs/configExpand/lib.js
@@ -4,8 +4,8 @@
class ConfigExpandRestServer {
/**
- * Create a new webserver.
- */
+ * Create a new webserver.
+ */
constructor() {
load('jstests/libs/python.js');
this.python = getPython3Binary();
diff --git a/jstests/noPassthrough/libs/index_build.js b/jstests/noPassthrough/libs/index_build.js
index 1d3171e13f5..8de49ceb06e 100644
--- a/jstests/noPassthrough/libs/index_build.js
+++ b/jstests/noPassthrough/libs/index_build.js
@@ -71,8 +71,8 @@ class IndexBuildTest {
const inprog = database.currentOp({opid: opId}).inprog;
assert.eq(1,
inprog.length,
- 'unable to find opid ' + opId + ' in currentOp() result: ' +
- tojson(database.currentOp()));
+ 'unable to find opid ' + opId +
+ ' in currentOp() result: ' + tojson(database.currentOp()));
const op = inprog[0];
assert.eq(opId, op.opid, 'db.currentOp() returned wrong index build info: ' + tojson(op));
if (onOperationFn) {
@@ -98,16 +98,14 @@ class IndexBuildTest {
assert.eq(0, res.cursor.id);
// A map of index specs keyed by index name.
- const indexMap = res.cursor.firstBatch.reduce(
- (m, spec) => {
- if (spec.hasOwnProperty('buildUUID')) {
- m[spec.spec.name] = spec;
- } else {
- m[spec.name] = spec;
- }
- return m;
- },
- {});
+ const indexMap = res.cursor.firstBatch.reduce((m, spec) => {
+ if (spec.hasOwnProperty('buildUUID')) {
+ m[spec.spec.name] = spec;
+ } else {
+ m[spec.name] = spec;
+ }
+ return m;
+ }, {});
// Check ready indexes.
for (let name of readyIndexes) {
diff --git a/jstests/noPassthrough/list_databases_and_rename_collection.js b/jstests/noPassthrough/list_databases_and_rename_collection.js
index 9faebcb7dc8..d5504d2582d 100644
--- a/jstests/noPassthrough/list_databases_and_rename_collection.js
+++ b/jstests/noPassthrough/list_databases_and_rename_collection.js
@@ -3,57 +3,57 @@
//
(function() {
- "use strict";
- const dbName = "do_concurrent_rename";
- const collName = "collA";
- const otherName = "collB";
- const repeatListDatabases = 20;
- const listDatabasesCmd = {"listDatabases": 1};
- load("jstests/noPassthrough/libs/concurrent_rename.js");
- load("jstests/libs/parallel_shell_helpers.js");
-
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
- jsTestLog("Create collection.");
- let listRenameDB = conn.getDB(dbName);
- listRenameDB.dropDatabase();
- assert.commandWorked(listRenameDB.runCommand({"create": collName}));
-
- let testDB = conn.getDB("test");
- testDB.dropDatabase();
-
- jsTestLog("Verify database exists.");
- let cmdRes = listRenameDB.adminCommand(listDatabasesCmd);
- assert.commandWorked(cmdRes, "expected " + tojson(listDatabasesCmd) + " to be successful.");
- assert(cmdRes.hasOwnProperty("databases"),
- "expected " + tojson(cmdRes) + " to have a databases property.");
- assert(cmdRes.databases.map(d => d.name).includes(dbName),
- "expected " + tojson(cmdRes) + " to include " + dbName);
-
- jsTestLog("Start parallel shell");
- let renameShell =
- startParallelShell(funWithArgs(doRenames, dbName, collName, otherName), conn.port);
-
- // Wait until we receive confirmation that the parallel shell has started.
- assert.soon(() => conn.getDB("test").await_data.findOne({_id: "signal parent shell"}) !== null);
-
- jsTestLog("Start listDatabases.");
- while (conn.getDB("test").await_data.findOne({_id: "rename has ended"}) == null) {
- for (let i = 0; i < repeatListDatabases; i++) {
- cmdRes = listRenameDB.adminCommand(listDatabasesCmd);
- assert.commandWorked(cmdRes,
- "expected " + tojson(listDatabasesCmd) + " to be successful.");
- // Database should always exist.
- assert(cmdRes.hasOwnProperty("databases"),
- "expected " + tojson(cmdRes) + " to have a databases property.");
- assert(cmdRes.databases.map(d => d.name).includes(dbName),
- "expected " + tojson(cmdRes) + " to include " + dbName);
- }
+"use strict";
+const dbName = "do_concurrent_rename";
+const collName = "collA";
+const otherName = "collB";
+const repeatListDatabases = 20;
+const listDatabasesCmd = {
+ "listDatabases": 1
+};
+load("jstests/noPassthrough/libs/concurrent_rename.js");
+load("jstests/libs/parallel_shell_helpers.js");
+
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
+jsTestLog("Create collection.");
+let listRenameDB = conn.getDB(dbName);
+listRenameDB.dropDatabase();
+assert.commandWorked(listRenameDB.runCommand({"create": collName}));
+
+let testDB = conn.getDB("test");
+testDB.dropDatabase();
+
+jsTestLog("Verify database exists.");
+let cmdRes = listRenameDB.adminCommand(listDatabasesCmd);
+assert.commandWorked(cmdRes, "expected " + tojson(listDatabasesCmd) + " to be successful.");
+assert(cmdRes.hasOwnProperty("databases"),
+ "expected " + tojson(cmdRes) + " to have a databases property.");
+assert(cmdRes.databases.map(d => d.name).includes(dbName),
+ "expected " + tojson(cmdRes) + " to include " + dbName);
+
+jsTestLog("Start parallel shell");
+let renameShell =
+ startParallelShell(funWithArgs(doRenames, dbName, collName, otherName), conn.port);
+
+// Wait until we receive confirmation that the parallel shell has started.
+assert.soon(() => conn.getDB("test").await_data.findOne({_id: "signal parent shell"}) !== null);
+
+jsTestLog("Start listDatabases.");
+while (conn.getDB("test").await_data.findOne({_id: "rename has ended"}) == null) {
+ for (let i = 0; i < repeatListDatabases; i++) {
+ cmdRes = listRenameDB.adminCommand(listDatabasesCmd);
+ assert.commandWorked(cmdRes, "expected " + tojson(listDatabasesCmd) + " to be successful.");
+ // Database should always exist.
+ assert(cmdRes.hasOwnProperty("databases"),
+ "expected " + tojson(cmdRes) + " to have a databases property.");
+ assert(cmdRes.databases.map(d => d.name).includes(dbName),
+ "expected " + tojson(cmdRes) + " to include " + dbName);
}
+}
- jsTestLog("Finished running listDatabases.");
-
- renameShell();
- MongoRunner.stopMongod(conn);
+jsTestLog("Finished running listDatabases.");
+renameShell();
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/list_indexes_ready_and_in_progress.js b/jstests/noPassthrough/list_indexes_ready_and_in_progress.js
index 0ec11308b8a..a1970beea83 100644
--- a/jstests/noPassthrough/list_indexes_ready_and_in_progress.js
+++ b/jstests/noPassthrough/list_indexes_ready_and_in_progress.js
@@ -2,39 +2,39 @@
* Tests that the listIndexes command shows ready and in-progress indexes.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/noPassthrough/libs/index_build.js");
+load("jstests/noPassthrough/libs/index_build.js");
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("test");
- assert.commandWorked(testDB.dropDatabase());
+const testDB = conn.getDB("test");
+assert.commandWorked(testDB.dropDatabase());
- let coll = testDB.list_indexes_ready_and_in_progress;
- coll.drop();
- assert.commandWorked(testDB.createCollection(coll.getName()));
- IndexBuildTest.assertIndexes(coll, 1, ["_id_"]);
- assert.commandWorked(coll.createIndex({a: 1}));
- IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]);
+let coll = testDB.list_indexes_ready_and_in_progress;
+coll.drop();
+assert.commandWorked(testDB.createCollection(coll.getName()));
+IndexBuildTest.assertIndexes(coll, 1, ["_id_"]);
+assert.commandWorked(coll.createIndex({a: 1}));
+IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]);
- IndexBuildTest.pauseIndexBuilds(conn);
- const createIdx =
- IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {b: 1}, {background: true});
- IndexBuildTest.waitForIndexBuildToStart(testDB);
+IndexBuildTest.pauseIndexBuilds(conn);
+const createIdx =
+ IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {b: 1}, {background: true});
+IndexBuildTest.waitForIndexBuildToStart(testDB);
- // The listIndexes command supports returning all indexes, including ones that are not ready.
- IndexBuildTest.assertIndexes(coll, 3, ["_id_", "a_1"], ["b_1"], {includeBuildUUIDs: true});
+// The listIndexes command supports returning all indexes, including ones that are not ready.
+IndexBuildTest.assertIndexes(coll, 3, ["_id_", "a_1"], ["b_1"], {includeBuildUUIDs: true});
- IndexBuildTest.resumeIndexBuilds(conn);
+IndexBuildTest.resumeIndexBuilds(conn);
- // Wait for the index build to stop.
- IndexBuildTest.waitForIndexBuildToStop(testDB);
+// Wait for the index build to stop.
+IndexBuildTest.waitForIndexBuildToStop(testDB);
- const exitCode = createIdx();
- assert.eq(0, exitCode, 'expected shell to exit cleanly');
+const exitCode = createIdx();
+assert.eq(0, exitCode, 'expected shell to exit cleanly');
- IndexBuildTest.assertIndexes(coll, 3, ["_id_", "a_1", "b_1"]);
- MongoRunner.stopMongod(conn);
+IndexBuildTest.assertIndexes(coll, 3, ["_id_", "a_1", "b_1"]);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/list_indexes_with_build_uuids.js b/jstests/noPassthrough/list_indexes_with_build_uuids.js
index 76bea4b5a36..a52b58578a5 100644
--- a/jstests/noPassthrough/list_indexes_with_build_uuids.js
+++ b/jstests/noPassthrough/list_indexes_with_build_uuids.js
@@ -4,80 +4,78 @@
* @tags: [requires_replication]
*/
(function() {
- 'use strict';
-
- const dbName = "test";
- const collName = "coll";
-
- const firstIndexName = "first";
- const secondIndexName = "second";
-
- function addTestDocuments(db) {
- let size = 100;
- jsTest.log("Creating " + size + " test documents.");
- var bulk = db.getCollection(collName).initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i, j: i * i});
- }
- assert.writeOK(bulk.execute());
+'use strict';
+
+const dbName = "test";
+const collName = "coll";
+
+const firstIndexName = "first";
+const secondIndexName = "second";
+
+function addTestDocuments(db) {
+ let size = 100;
+ jsTest.log("Creating " + size + " test documents.");
+ var bulk = db.getCollection(collName).initializeUnorderedBulkOp();
+ for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i, j: i * i});
}
+ assert.writeOK(bulk.execute());
+}
- let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2});
- let nodes = replSet.nodeList();
+let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2});
+let nodes = replSet.nodeList();
- replSet.startSet({startClean: true});
- replSet.initiate({
- _id: "indexBuilds",
- members: [
- {_id: 0, host: nodes[0]},
- {_id: 1, host: nodes[1], votes: 0, priority: 0},
- ]
- });
+replSet.startSet({startClean: true});
+replSet.initiate({
+ _id: "indexBuilds",
+ members: [
+ {_id: 0, host: nodes[0]},
+ {_id: 1, host: nodes[1], votes: 0, priority: 0},
+ ]
+});
- let primary = replSet.getPrimary();
- let primaryDB = primary.getDB(dbName);
+let primary = replSet.getPrimary();
+let primaryDB = primary.getDB(dbName);
- let secondary = replSet.getSecondary();
- let secondaryDB = secondary.getDB(dbName);
+let secondary = replSet.getSecondary();
+let secondaryDB = secondary.getDB(dbName);
- addTestDocuments(primaryDB);
- replSet.awaitReplication();
+addTestDocuments(primaryDB);
+replSet.awaitReplication();
- // Build and finish the first index.
- assert.commandWorked(primaryDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {i: 1}, name: firstIndexName, background: true}]
- }));
- replSet.waitForAllIndexBuildsToFinish(dbName, collName);
+// Build and finish the first index.
+assert.commandWorked(primaryDB.runCommand(
+ {createIndexes: collName, indexes: [{key: {i: 1}, name: firstIndexName, background: true}]}));
+replSet.waitForAllIndexBuildsToFinish(dbName, collName);
- // Start hanging index builds on the secondary.
- assert.commandWorked(secondaryDB.adminCommand(
- {configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"}));
+// Start hanging index builds on the secondary.
+assert.commandWorked(secondaryDB.adminCommand(
+ {configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"}));
- // Build and hang on the second index.
- assert.commandWorked(primaryDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {j: 1}, name: secondIndexName, background: true}],
- writeConcern: {w: 2}
- }));
+// Build and hang on the second index.
+assert.commandWorked(primaryDB.runCommand({
+ createIndexes: collName,
+ indexes: [{key: {j: 1}, name: secondIndexName, background: true}],
+ writeConcern: {w: 2}
+}));
- // Check the listIndexes() output.
- let res = secondaryDB.runCommand({listIndexes: collName, includeBuildUUIDs: true});
+// Check the listIndexes() output.
+let res = secondaryDB.runCommand({listIndexes: collName, includeBuildUUIDs: true});
- assert.commandWorked(res);
- let indexes = res.cursor.firstBatch;
- assert.eq(3, indexes.length);
+assert.commandWorked(res);
+let indexes = res.cursor.firstBatch;
+assert.eq(3, indexes.length);
- jsTest.log(indexes);
+jsTest.log(indexes);
- assert.eq(indexes[0].name, "_id_");
- assert.eq(indexes[1].name, "first");
- assert.eq(indexes[2].spec.name, "second");
- assert(indexes[2].hasOwnProperty("buildUUID"));
+assert.eq(indexes[0].name, "_id_");
+assert.eq(indexes[1].name, "first");
+assert.eq(indexes[2].spec.name, "second");
+assert(indexes[2].hasOwnProperty("buildUUID"));
- // Allow the secondary to finish the index build.
- assert.commandWorked(
- secondaryDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "off"}));
+// Allow the secondary to finish the index build.
+assert.commandWorked(
+ secondaryDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "off"}));
- replSet.stopSet();
+replSet.stopSet();
}());
diff --git a/jstests/noPassthrough/lock_file.js b/jstests/noPassthrough/lock_file.js
index 5ff9a897bf9..63311e22bfb 100644
--- a/jstests/noPassthrough/lock_file.js
+++ b/jstests/noPassthrough/lock_file.js
@@ -2,30 +2,28 @@
// containing the process ID regardless of the storage engine requested.
(function() {
- // Ensures that mongod.lock exists and returns size of file.
- function getMongodLockFileSize(dir) {
- var files = listFiles(dir);
- for (var i in files) {
- var file = files[i];
- if (!file.isDirectory && file.baseName == 'mongod.lock') {
- return file.size;
- }
+// Ensures that mongod.lock exists and returns size of file.
+function getMongodLockFileSize(dir) {
+ var files = listFiles(dir);
+ for (var i in files) {
+ var file = files[i];
+ if (!file.isDirectory && file.baseName == 'mongod.lock') {
+ return file.size;
}
- assert(false, 'mongod.lock not found in data directory ' + dir);
}
+ assert(false, 'mongod.lock not found in data directory ' + dir);
+}
- var baseName = "jstests_lock_file";
- var dbpath = MongoRunner.dataPath + baseName + '/';
+var baseName = "jstests_lock_file";
+var dbpath = MongoRunner.dataPath + baseName + '/';
- // Test framework will append --storageEngine command line option.
- var mongod = MongoRunner.runMongod({dbpath: dbpath});
- assert.neq(0,
- getMongodLockFileSize(dbpath),
- 'mongod.lock should not be empty while server is running');
+// Test framework will append --storageEngine command line option.
+var mongod = MongoRunner.runMongod({dbpath: dbpath});
+assert.neq(
+ 0, getMongodLockFileSize(dbpath), 'mongod.lock should not be empty while server is running');
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
- // mongod.lock must be empty after shutting server down.
- assert.eq(
- 0, getMongodLockFileSize(dbpath), 'mongod.lock not truncated after shutting server down');
+// mongod.lock must be empty after shutting server down.
+assert.eq(0, getMongodLockFileSize(dbpath), 'mongod.lock not truncated after shutting server down');
}());
diff --git a/jstests/noPassthrough/lock_file_fail_to_open.js b/jstests/noPassthrough/lock_file_fail_to_open.js
index 59d5fadbb5f..a53c6688b9f 100644
--- a/jstests/noPassthrough/lock_file_fail_to_open.js
+++ b/jstests/noPassthrough/lock_file_fail_to_open.js
@@ -1,27 +1,27 @@
// Tests that MongoD fails to start with the correct error message if mongod.lock exists in the
// dbpath.
(function() {
- "use strict";
+"use strict";
- var baseName = "jstests_lock_file_fail_to_open";
+var baseName = "jstests_lock_file_fail_to_open";
- var dbPath = MongoRunner.dataPath + baseName + "/";
+var dbPath = MongoRunner.dataPath + baseName + "/";
- // Start a MongoD just to get a lockfile in place.
- var mongo1 = MongoRunner.runMongod({dbpath: dbPath, waitForConnect: true});
+// Start a MongoD just to get a lockfile in place.
+var mongo1 = MongoRunner.runMongod({dbpath: dbPath, waitForConnect: true});
- clearRawMongoProgramOutput();
- // Start another one which should fail to start as there is already a lockfile in its
- // dbpath.
- var mongo2 = null;
- mongo2 = MongoRunner.runMongod({dbpath: dbPath, noCleanData: true});
- // We should have failed to start.
- assert(mongo2 === null);
+clearRawMongoProgramOutput();
+// Start another one which should fail to start as there is already a lockfile in its
+// dbpath.
+var mongo2 = null;
+mongo2 = MongoRunner.runMongod({dbpath: dbPath, noCleanData: true});
+// We should have failed to start.
+assert(mongo2 === null);
- var logContents = rawMongoProgramOutput();
- assert(logContents.indexOf("Unable to lock the lock file") > 0 ||
- // Windows error message is different.
- logContents.indexOf("Unable to create/open the lock file") > 0);
+var logContents = rawMongoProgramOutput();
+assert(logContents.indexOf("Unable to lock the lock file") > 0 ||
+ // Windows error message is different.
+ logContents.indexOf("Unable to create/open the lock file") > 0);
- MongoRunner.stopMongod(mongo1);
+MongoRunner.stopMongod(mongo1);
})();
diff --git a/jstests/noPassthrough/lock_stats.js b/jstests/noPassthrough/lock_stats.js
index 85e6350ab0c..1274dd326c4 100644
--- a/jstests/noPassthrough/lock_stats.js
+++ b/jstests/noPassthrough/lock_stats.js
@@ -3,66 +3,66 @@
// This test uses the fsync command to induce locking.
// @tags: [requires_fsync]
(function() {
- 'use strict';
+'use strict';
- function testBlockTime(blockTimeMillis) {
- // Lock the database, and in parallel start an operation that needs the lock, so it blocks.
- assert.commandWorked(db.fsyncLock());
- var startStats = db.serverStatus().locks.Global;
- var startTime = new Date();
- var minBlockedMillis = blockTimeMillis;
- // This is just some command that requires a MODE_X global lock that conflicts.
- var s = startParallelShell(
- 'assert.commandWorked(db.getSiblingDB(\'nonexisting\').dropDatabase());', conn.port);
+function testBlockTime(blockTimeMillis) {
+ // Lock the database, and in parallel start an operation that needs the lock, so it blocks.
+ assert.commandWorked(db.fsyncLock());
+ var startStats = db.serverStatus().locks.Global;
+ var startTime = new Date();
+ var minBlockedMillis = blockTimeMillis;
+ // This is just some command that requires a MODE_X global lock that conflicts.
+ var s = startParallelShell(
+ 'assert.commandWorked(db.getSiblingDB(\'nonexisting\').dropDatabase());', conn.port);
- // Wait until we see somebody waiting to acquire the lock, defend against unset stats.
- assert.soon((function() {
- var stats = db.serverStatus().locks.Global;
- if (!stats.acquireWaitCount || !stats.acquireWaitCount.W)
- return false;
- if (!stats.timeAcquiringMicros || !stats.timeAcquiringMicros.W)
- return false;
- if (!startStats.acquireWaitCount || !startStats.acquireWaitCount.W)
- return true;
- return stats.acquireWaitCount.W > startStats.acquireWaitCount.W;
- }));
+ // Wait until we see somebody waiting to acquire the lock, defend against unset stats.
+ assert.soon((function() {
+ var stats = db.serverStatus().locks.Global;
+ if (!stats.acquireWaitCount || !stats.acquireWaitCount.W)
+ return false;
+ if (!stats.timeAcquiringMicros || !stats.timeAcquiringMicros.W)
+ return false;
+ if (!startStats.acquireWaitCount || !startStats.acquireWaitCount.W)
+ return true;
+ return stats.acquireWaitCount.W > startStats.acquireWaitCount.W;
+ }));
- // Sleep for minBlockedMillis, so the acquirer would have to wait at least that long.
- sleep(minBlockedMillis);
- db.fsyncUnlock();
+ // Sleep for minBlockedMillis, so the acquirer would have to wait at least that long.
+ sleep(minBlockedMillis);
+ db.fsyncUnlock();
- // Wait for the parallel shell to finish, so its stats will have been recorded.
- s();
+ // Wait for the parallel shell to finish, so its stats will have been recorded.
+ s();
- // The fsync command from the shell cannot have possibly been blocked longer than this.
- var maxBlockedMillis = new Date() - startTime;
- var endStats = db.serverStatus().locks.Global;
+ // The fsync command from the shell cannot have possibly been blocked longer than this.
+ var maxBlockedMillis = new Date() - startTime;
+ var endStats = db.serverStatus().locks.Global;
- // The server was just started, so initial stats may be missing.
- if (!startStats.acquireWaitCount || !startStats.acquireWaitCount.W) {
- startStats.acquireWaitCount = {W: 0};
- }
- if (!startStats.timeAcquiringMicros || !startStats.timeAcquiringMicros.W) {
- startStats.timeAcquiringMicros = {W: 0};
- }
+ // The server was just started, so initial stats may be missing.
+ if (!startStats.acquireWaitCount || !startStats.acquireWaitCount.W) {
+ startStats.acquireWaitCount = {W: 0};
+ }
+ if (!startStats.timeAcquiringMicros || !startStats.timeAcquiringMicros.W) {
+ startStats.timeAcquiringMicros = {W: 0};
+ }
- var acquireWaitCount = endStats.acquireWaitCount.W - startStats.acquireWaitCount.W;
- var blockedMillis =
- Math.floor((endStats.timeAcquiringMicros.W - startStats.timeAcquiringMicros.W) / 1000);
+ var acquireWaitCount = endStats.acquireWaitCount.W - startStats.acquireWaitCount.W;
+ var blockedMillis =
+ Math.floor((endStats.timeAcquiringMicros.W - startStats.timeAcquiringMicros.W) / 1000);
- // Require that no other commands run (and maybe acquire locks) in parallel.
- assert.eq(acquireWaitCount, 1, "other commands ran in parallel, can't check timing");
- assert.gte(blockedMillis, minBlockedMillis, "reported time acquiring lock is too low");
- assert.lte(blockedMillis, maxBlockedMillis, "reported time acquiring lock is too high");
- return ({
- blockedMillis: blockedMillis,
- minBlockedMillis: minBlockedMillis,
- maxBlockedMillis: maxBlockedMillis
- });
- }
+ // Require that no other commands run (and maybe acquire locks) in parallel.
+ assert.eq(acquireWaitCount, 1, "other commands ran in parallel, can't check timing");
+ assert.gte(blockedMillis, minBlockedMillis, "reported time acquiring lock is too low");
+ assert.lte(blockedMillis, maxBlockedMillis, "reported time acquiring lock is too high");
+ return ({
+ blockedMillis: blockedMillis,
+ minBlockedMillis: minBlockedMillis,
+ maxBlockedMillis: maxBlockedMillis
+ });
+}
- var conn = MongoRunner.runMongod();
- var db = conn.getDB('test');
- printjson([1, 10, 100, 500, 1000, 1500].map(testBlockTime));
- MongoRunner.stopMongod(conn);
+var conn = MongoRunner.runMongod();
+var db = conn.getDB('test');
+printjson([1, 10, 100, 500, 1000, 1500].map(testBlockTime));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/lock_stats_suboperation_curop.js b/jstests/noPassthrough/lock_stats_suboperation_curop.js
index 5d1b804d917..84e67ae6699 100644
--- a/jstests/noPassthrough/lock_stats_suboperation_curop.js
+++ b/jstests/noPassthrough/lock_stats_suboperation_curop.js
@@ -19,65 +19,65 @@
* @tags: [requires_fsync, requires_document_locking]
*/
(function() {
- 'use strict';
+'use strict';
- const conn = MongoRunner.runMongod();
- const db = conn.getDB('test');
- const coll = db.books;
- const blockedMillis = 2000;
- assert.commandWorked(coll.insert({title: 'Adventures of Huckleberry'}));
- assert.commandWorked(coll.insert({title: '1984'}));
- assert.commandWorked(coll.insert({title: 'Animal Farm'}));
- // Create the output collection beforehand so that $out will execute a code path which triggers
- // the index creation sub-operation.
- db['favorite'].createIndex({foo: 1});
+const conn = MongoRunner.runMongod();
+const db = conn.getDB('test');
+const coll = db.books;
+const blockedMillis = 2000;
+assert.commandWorked(coll.insert({title: 'Adventures of Huckleberry'}));
+assert.commandWorked(coll.insert({title: '1984'}));
+assert.commandWorked(coll.insert({title: 'Animal Farm'}));
+// Create the output collection beforehand so that $out will execute a code path which triggers
+// the index creation sub-operation.
+db['favorite'].createIndex({foo: 1});
- db.setProfilingLevel(0, -1);
+db.setProfilingLevel(0, -1);
- // Lock the database, and then start an operation that needs the lock, so it blocks.
- assert.commandWorked(db.fsyncLock());
+// Lock the database, and then start an operation that needs the lock, so it blocks.
+assert.commandWorked(db.fsyncLock());
- // Turn 'hangAfterStartingIndexBuildUnlocked' failpoint on, which blocks any index builds.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'alwaysOn'}));
+// Turn 'hangAfterStartingIndexBuildUnlocked' failpoint on, which blocks any index builds.
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'alwaysOn'}));
- // Aggregation with $out which will block on creating the temporary collection due to the
- // FsyncLock.
- const dollarOutAggregationShell = startParallelShell(function() {
- // Simple aggregation which copies a document to the output collection.
- assert.commandWorked(db.runCommand({
- aggregate: 'books',
- pipeline: [{$match: {title: '1984'}}, {$out: 'favorite'}],
- cursor: {}
- }));
- }, conn.port);
+// Aggregation with $out which will block on creating the temporary collection due to the
+// FsyncLock.
+const dollarOutAggregationShell = startParallelShell(function() {
+ // Simple aggregation which copies a document to the output collection.
+ assert.commandWorked(db.runCommand({
+ aggregate: 'books',
+ pipeline: [{$match: {title: '1984'}}, {$out: 'favorite'}],
+ cursor: {}
+ }));
+}, conn.port);
- // Wait for sub-operation createCollection to get blocked.
- assert.soon(function() {
- let res = db.currentOp({"command.create": {$exists: true}, waitingForLock: true});
- return res.inprog.length == 1;
- });
+// Wait for sub-operation createCollection to get blocked.
+assert.soon(function() {
+ let res = db.currentOp({"command.create": {$exists: true}, waitingForLock: true});
+ return res.inprog.length == 1;
+});
- sleep(blockedMillis);
+sleep(blockedMillis);
- // Unlock the database. Sub-operation createCollection can proceed.
- db.fsyncUnlock();
+// Unlock the database. Sub-operation createCollection can proceed.
+db.fsyncUnlock();
- // Wait for sub-operation createIndex to get blocked after acquiring all the locks.
- let res;
- assert.soon(function() {
- res = db.currentOp(
- {"command.createIndexes": {$exists: true}, "lockStats.Global": {$exists: true}});
- return res.inprog.length == 1;
- });
- jsTestLog(tojson(res.inprog[0]));
- // Assert that sub-operation 'createIndex' has 0 lock wait time. Before SERVER-26854, it
- // erroneously reported `blockedMillis` as it counted the lock wait time for the previous
- // sub-operation.
- assert(!('timeAcquiringMicros' in res.inprog[0].lockStats.Global));
+// Wait for sub-operation createIndex to get blocked after acquiring all the locks.
+let res;
+assert.soon(function() {
+ res = db.currentOp(
+ {"command.createIndexes": {$exists: true}, "lockStats.Global": {$exists: true}});
+ return res.inprog.length == 1;
+});
+jsTestLog(tojson(res.inprog[0]));
+// Assert that sub-operation 'createIndex' has 0 lock wait time. Before SERVER-26854, it
+// erroneously reported `blockedMillis` as it counted the lock wait time for the previous
+// sub-operation.
+assert(!('timeAcquiringMicros' in res.inprog[0].lockStats.Global));
- assert.commandWorked(
- db.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'off'}));
- dollarOutAggregationShell();
- MongoRunner.stopMongod(conn);
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'off'}));
+dollarOutAggregationShell();
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/lock_stats_suboperation_logs.js b/jstests/noPassthrough/lock_stats_suboperation_logs.js
index 3d533f1363d..cb56bfdc262 100644
--- a/jstests/noPassthrough/lock_stats_suboperation_logs.js
+++ b/jstests/noPassthrough/lock_stats_suboperation_logs.js
@@ -18,84 +18,84 @@
* @tags: [requires_fsync]
*/
(function() {
- 'use strict';
+'use strict';
- const conn = MongoRunner.runMongod();
- const db = conn.getDB('test');
- const coll = db.books;
- const blockedMillis = 2000;
- assert.commandWorked(coll.insert({title: 'Adventures of Huckleberry'}));
- assert.commandWorked(coll.insert({title: '1984'}));
- assert.commandWorked(coll.insert({title: 'Animal Farm'}));
- // The server will log every operation.
- db.setProfilingLevel(0, -1);
- // Create the output collection beforehand so that $out will execute a code path which triggers
- // the index creation sub-operation.
- db['favorite'].insert({foo: 1});
+const conn = MongoRunner.runMongod();
+const db = conn.getDB('test');
+const coll = db.books;
+const blockedMillis = 2000;
+assert.commandWorked(coll.insert({title: 'Adventures of Huckleberry'}));
+assert.commandWorked(coll.insert({title: '1984'}));
+assert.commandWorked(coll.insert({title: 'Animal Farm'}));
+// The server will log every operation.
+db.setProfilingLevel(0, -1);
+// Create the output collection beforehand so that $out will execute a code path which triggers
+// the index creation sub-operation.
+db['favorite'].insert({foo: 1});
- // Lock the database, and then start an operation that needs the lock, so it blocks.
- assert.commandWorked(db.fsyncLock());
+// Lock the database, and then start an operation that needs the lock, so it blocks.
+assert.commandWorked(db.fsyncLock());
- // Aggregation with $out which will block on creating the temporary collection due to the
- // FsyncLock.
- const dollarOutAggregationShell = startParallelShell(function() {
- // Simple aggregation which copies a document to the output collection.
- assert.commandWorked(db.runCommand({
- aggregate: 'books',
- pipeline: [{$match: {title: '1984'}}, {$out: 'favorite'}],
- cursor: {}
- }));
- }, conn.port);
+// Aggregation with $out which will block on creating the temporary collection due to the
+// FsyncLock.
+const dollarOutAggregationShell = startParallelShell(function() {
+ // Simple aggregation which copies a document to the output collection.
+ assert.commandWorked(db.runCommand({
+ aggregate: 'books',
+ pipeline: [{$match: {title: '1984'}}, {$out: 'favorite'}],
+ cursor: {}
+ }));
+}, conn.port);
- // Sub-operation createCollection starts to get blocked.
- assert.soon(function() {
- let res = db.currentOp({waitingForLock: true});
- return res.inprog.length == 1;
- });
+// Sub-operation createCollection starts to get blocked.
+assert.soon(function() {
+ let res = db.currentOp({waitingForLock: true});
+ return res.inprog.length == 1;
+});
- sleep(blockedMillis);
+sleep(blockedMillis);
- clearRawMongoProgramOutput();
- // Unlock the database. Sub-operation createCollection can proceed
- // and so do all the following sub-operations.
- db.fsyncUnlock();
+clearRawMongoProgramOutput();
+// Unlock the database. Sub-operation createCollection can proceed
+// and so do all the following sub-operations.
+db.fsyncUnlock();
- dollarOutAggregationShell();
- assert.eq(db['favorite'].count(), 1);
+dollarOutAggregationShell();
+assert.eq(db['favorite'].count(), 1);
- // Stopping the mongod also waits until all of its logs have been read by the mongo shell.
- MongoRunner.stopMongod(conn);
+// Stopping the mongod also waits until all of its logs have been read by the mongo shell.
+MongoRunner.stopMongod(conn);
- let mongodLogs = rawMongoProgramOutput();
- let lines = mongodLogs.split('\n');
- const lockWaitTimeRegex = /timeAcquiringMicros: { [wW]: ([0-9]+)/;
- let match;
- let firstOpWaitTime;
- let parentOpWaitTime;
- let numWaitedForLocks = 0;
+let mongodLogs = rawMongoProgramOutput();
+let lines = mongodLogs.split('\n');
+const lockWaitTimeRegex = /timeAcquiringMicros: { [wW]: ([0-9]+)/;
+let match;
+let firstOpWaitTime;
+let parentOpWaitTime;
+let numWaitedForLocks = 0;
- for (let line of lines) {
- if ((match = lockWaitTimeRegex.exec(line)) !== null) {
- let lockWaitTime = match[1];
- // Ignoring 'noise' lock stats from other operations such as locks taken during
- // validation stage.
- if (lockWaitTime < blockedMillis * 1000)
- continue;
- if (firstOpWaitTime === undefined)
- firstOpWaitTime = lockWaitTime;
- else
- parentOpWaitTime = lockWaitTime;
- numWaitedForLocks++;
- jsTestLog('Operation/Sub-operation log: ');
- jsTestLog(line);
- }
+for (let line of lines) {
+ if ((match = lockWaitTimeRegex.exec(line)) !== null) {
+ let lockWaitTime = match[1];
+ // Ignoring 'noise' lock stats from other operations such as locks taken during
+ // validation stage.
+ if (lockWaitTime < blockedMillis * 1000)
+ continue;
+ if (firstOpWaitTime === undefined)
+ firstOpWaitTime = lockWaitTime;
+ else
+ parentOpWaitTime = lockWaitTime;
+ numWaitedForLocks++;
+ jsTestLog('Operation/Sub-operation log: ');
+ jsTestLog(line);
}
+}
- // Only the logs of 'parent command' (aggregation with $out) and the first
- // sub-operation(createCollection) have the information about the long wait for the lock.
- assert.eq(numWaitedForLocks, 2);
+// Only the logs of 'parent command' (aggregation with $out) and the first
+// sub-operation(createCollection) have the information about the long wait for the lock.
+assert.eq(numWaitedForLocks, 2);
- // Total waiting time should be greater than or equal to the waiting time of the
- // first sub-operation.
- assert(parentOpWaitTime >= firstOpWaitTime);
+// Total waiting time should be greater than or equal to the waiting time of the
+// first sub-operation.
+assert(parentOpWaitTime >= firstOpWaitTime);
})();
diff --git a/jstests/noPassthrough/log_and_profile_query_hash.js b/jstests/noPassthrough/log_and_profile_query_hash.js
index 2a0757689a6..50395061e54 100644
--- a/jstests/noPassthrough/log_and_profile_query_hash.js
+++ b/jstests/noPassthrough/log_and_profile_query_hash.js
@@ -2,155 +2,161 @@
//
// Confirms that profiled find queries and corresponding logs have matching queryHashes.
(function() {
- "use strict";
-
- // For getLatestProfilerEntry().
- load("jstests/libs/profiler.js");
-
- // Prevent the mongo shell from gossiping its cluster time, since this will increase the amount
- // of data logged for each op. For some of the testcases below, including the cluster time would
- // cause them to be truncated at the 512-byte RamLog limit, and some of the fields we need to
- // check would be lost.
- TestData.skipGossipingClusterTime = true;
-
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("jstests_query_shape_hash");
- const coll = testDB.test;
-
- const profileEntryFilter = {op: "query"};
-
- assert.commandWorked(testDB.setProfilingLevel(2, {"slowms": 0}));
- assert.commandWorked(testDB.setLogLevel(0, "query"));
-
- // Parses the logLine and profileEntry into similar string representations with no white spaces.
- // Returns true if the logLine command components correspond to the profile entry. This is
- // sufficient for the purpose of testing query hashes.
- function logMatchesEntry(logLine, profileEntry) {
- if (logLine.indexOf("command: find { find: \"test\"") >= 0 &&
- logLine.indexOf(profileEntry["command"]["comment"]) >= 0) {
- return true;
- }
- return false;
- }
-
- // Fetch the log line that corresponds to the profile entry. If there is no such line, return
- // null.
- function retrieveLogLine(log, profileEntry) {
- const logLine = log.reduce((acc, line) => {
- if (logMatchesEntry(line, profileEntry)) {
- // Assert that the matching does not pick up more than one line corresponding to
- // the entry.
- assert.eq(acc, null);
- return line;
- }
- return acc;
- }, null);
- return logLine;
+"use strict";
+
+// For getLatestProfilerEntry().
+load("jstests/libs/profiler.js");
+
+// Prevent the mongo shell from gossiping its cluster time, since this will increase the amount
+// of data logged for each op. For some of the testcases below, including the cluster time would
+// cause them to be truncated at the 512-byte RamLog limit, and some of the fields we need to
+// check would be lost.
+TestData.skipGossipingClusterTime = true;
+
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
+const testDB = conn.getDB("jstests_query_shape_hash");
+const coll = testDB.test;
+
+const profileEntryFilter = {
+ op: "query"
+};
+
+assert.commandWorked(testDB.setProfilingLevel(2, {"slowms": 0}));
+assert.commandWorked(testDB.setLogLevel(0, "query"));
+
+// Parses the logLine and profileEntry into similar string representations with no white spaces.
+// Returns true if the logLine command components correspond to the profile entry. This is
+// sufficient for the purpose of testing query hashes.
+function logMatchesEntry(logLine, profileEntry) {
+ if (logLine.indexOf("command: find { find: \"test\"") >= 0 &&
+ logLine.indexOf(profileEntry["command"]["comment"]) >= 0) {
+ return true;
}
-
- // Run the find command, retrieve the corresponding profile object and log line, then ensure
- // that both the profile object and log line have matching stable query hashes (if any).
- function runTestsAndGetHashes(db, {comment, test, hasQueryHash}) {
- assert.commandWorked(db.adminCommand({clearLog: "global"}));
- assert.doesNotThrow(() => test(db, comment));
- const log = assert.commandWorked(db.adminCommand({getLog: "global"})).log;
- const profileEntry =
- getLatestProfilerEntry(testDB, {op: "query", "command.comment": comment});
- // Parse the profile entry to retrieve the corresponding log entry.
- const logLine = retrieveLogLine(log, profileEntry);
- assert.neq(logLine, null);
-
- // Confirm that the query hashes either exist or don't exist in both log and profile
- // entries. If the queryHash and planCacheKey exist, ensure that the hashes from the
- // profile entry match the log line.
- assert.eq(hasQueryHash, profileEntry.hasOwnProperty("queryHash"));
- assert.eq(hasQueryHash, profileEntry.hasOwnProperty("planCacheKey"));
- assert.eq(hasQueryHash, (logLine.indexOf(profileEntry["queryHash"]) >= 0));
- assert.eq(hasQueryHash, (logLine.indexOf(profileEntry["planCacheKey"]) >= 0));
- if (hasQueryHash) {
- return {
- queryHash: profileEntry["queryHash"],
- planCacheKey: profileEntry["planCacheKey"]
- };
+ return false;
+}
+
+// Fetch the log line that corresponds to the profile entry. If there is no such line, return
+// null.
+function retrieveLogLine(log, profileEntry) {
+ const logLine = log.reduce((acc, line) => {
+ if (logMatchesEntry(line, profileEntry)) {
+ // Assert that the matching does not pick up more than one line corresponding to
+ // the entry.
+ assert.eq(acc, null);
+ return line;
}
- return null;
+ return acc;
+ }, null);
+ return logLine;
+}
+
+// Run the find command, retrieve the corresponding profile object and log line, then ensure
+// that both the profile object and log line have matching stable query hashes (if any).
+function runTestsAndGetHashes(db, {comment, test, hasQueryHash}) {
+ assert.commandWorked(db.adminCommand({clearLog: "global"}));
+ assert.doesNotThrow(() => test(db, comment));
+ const log = assert.commandWorked(db.adminCommand({getLog: "global"})).log;
+ const profileEntry = getLatestProfilerEntry(testDB, {op: "query", "command.comment": comment});
+ // Parse the profile entry to retrieve the corresponding log entry.
+ const logLine = retrieveLogLine(log, profileEntry);
+ assert.neq(logLine, null);
+
+ // Confirm that the query hashes either exist or don't exist in both log and profile
+ // entries. If the queryHash and planCacheKey exist, ensure that the hashes from the
+ // profile entry match the log line.
+ assert.eq(hasQueryHash, profileEntry.hasOwnProperty("queryHash"));
+ assert.eq(hasQueryHash, profileEntry.hasOwnProperty("planCacheKey"));
+ assert.eq(hasQueryHash, (logLine.indexOf(profileEntry["queryHash"]) >= 0));
+ assert.eq(hasQueryHash, (logLine.indexOf(profileEntry["planCacheKey"]) >= 0));
+ if (hasQueryHash) {
+ return {queryHash: profileEntry["queryHash"], planCacheKey: profileEntry["planCacheKey"]};
}
-
- // Add data and indices.
- const nDocs = 200;
- for (let i = 0; i < nDocs; i++) {
- assert.commandWorked(coll.insert({a: i, b: -1, c: 1}));
- }
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
-
- const queryA = {a: {$gte: 3}, b: 32};
- const queryB = {a: {$gte: 199}, b: -1};
- const projectionB = {_id: 0, b: 1};
- const sortC = {c: -1};
-
- const testList = [
- {
- comment: "Test0 find query",
- test: function(db, comment) {
- assert.eq(200, db.test.find().comment(comment).itcount());
- },
- hasQueryHash: false
+ return null;
+}
+
+// Add data and indices.
+const nDocs = 200;
+for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert({a: i, b: -1, c: 1}));
+}
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
+
+const queryA = {
+ a: {$gte: 3},
+ b: 32
+};
+const queryB = {
+ a: {$gte: 199},
+ b: -1
+};
+const projectionB = {
+ _id: 0,
+ b: 1
+};
+const sortC = {
+ c: -1
+};
+
+const testList = [
+ {
+ comment: "Test0 find query",
+ test: function(db, comment) {
+ assert.eq(200, db.test.find().comment(comment).itcount());
},
- {
- comment: "Test1 find query",
- test: function(db, comment) {
- assert.eq(1,
- db.test.find(queryB, projectionB).sort(sortC).comment(comment).itcount(),
- 'unexpected document count');
- },
- hasQueryHash: true
+ hasQueryHash: false
+ },
+ {
+ comment: "Test1 find query",
+ test: function(db, comment) {
+ assert.eq(1,
+ db.test.find(queryB, projectionB).sort(sortC).comment(comment).itcount(),
+ 'unexpected document count');
},
- {
- comment: "Test2 find query",
- test: function(db, comment) {
- assert.eq(0,
- db.test.find(queryA, projectionB).sort(sortC).comment(comment).itcount(),
- 'unexpected document count');
- },
- hasQueryHash: true
- }
- ];
-
- const hashValues = testList.map((testCase) => runTestsAndGetHashes(testDB, testCase));
-
- // Confirm that the same shape of query has the same hashes.
- assert.neq(hashValues[0], hashValues[1]);
- assert.eq(hashValues[1], hashValues[2]);
-
- // Test that the expected 'planCacheKey' and 'queryHash' are included in the transitional
- // log lines when an inactive cache entry is created.
- assert.commandWorked(testDB.setLogLevel(1, "query"));
- const testInactiveCreationLog = {
- comment: "Test Creating inactive entry.",
+ hasQueryHash: true
+ },
+ {
+ comment: "Test2 find query",
test: function(db, comment) {
assert.eq(0,
- db.test.find({b: {$lt: 12}, a: {$eq: 500}})
- .sort({a: -1})
- .comment(comment)
- .itcount(),
+ db.test.find(queryA, projectionB).sort(sortC).comment(comment).itcount(),
'unexpected document count');
},
hasQueryHash: true
-
- };
- const onCreationHashes = runTestsAndGetHashes(testDB, testInactiveCreationLog);
- const log = assert.commandWorked(testDB.adminCommand({getLog: "global"})).log;
-
- // Fetch the line that logs when an inactive cache entry is created for the query with
- // 'planCacheKey' and 'queryHash'. Confirm only one line does this.
- const creationLogList = log.filter(
- logLine =>
- (logLine.indexOf("Creating inactive cache entry for query shape query") != -1 &&
- logLine.indexOf("planCacheKey " + String(onCreationHashes.planCacheKey)) != -1 &&
- logLine.indexOf("queryHash " + String(onCreationHashes.queryHash)) != -1));
- assert.eq(1, creationLogList.length);
-
- MongoRunner.stopMongod(conn);
+ }
+];
+
+const hashValues = testList.map((testCase) => runTestsAndGetHashes(testDB, testCase));
+
+// Confirm that the same shape of query has the same hashes.
+assert.neq(hashValues[0], hashValues[1]);
+assert.eq(hashValues[1], hashValues[2]);
+
+// Test that the expected 'planCacheKey' and 'queryHash' are included in the transitional
+// log lines when an inactive cache entry is created.
+assert.commandWorked(testDB.setLogLevel(1, "query"));
+const testInactiveCreationLog = {
+ comment: "Test Creating inactive entry.",
+ test: function(db, comment) {
+ assert.eq(
+ 0,
+ db.test.find({b: {$lt: 12}, a: {$eq: 500}}).sort({a: -1}).comment(comment).itcount(),
+ 'unexpected document count');
+ },
+ hasQueryHash: true
+
+};
+const onCreationHashes = runTestsAndGetHashes(testDB, testInactiveCreationLog);
+const log = assert.commandWorked(testDB.adminCommand({getLog: "global"})).log;
+
+// Fetch the line that logs when an inactive cache entry is created for the query with
+// 'planCacheKey' and 'queryHash'. Confirm only one line does this.
+const creationLogList = log.filter(
+ logLine => (logLine.indexOf("Creating inactive cache entry for query shape query") != -1 &&
+ logLine.indexOf("planCacheKey " + String(onCreationHashes.planCacheKey)) != -1 &&
+ logLine.indexOf("queryHash " + String(onCreationHashes.queryHash)) != -1));
+assert.eq(1, creationLogList.length);
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/log_find_getmore.js b/jstests/noPassthrough/log_find_getmore.js
index 94447948632..dc7f6d83c91 100644
--- a/jstests/noPassthrough/log_find_getmore.js
+++ b/jstests/noPassthrough/log_find_getmore.js
@@ -4,168 +4,166 @@
* @tags: [requires_profiling]
*/
(function() {
- "use strict";
+"use strict";
- // For checkLog and getLatestProfilerEntry.
- load("jstests/libs/check_log.js");
- load("jstests/libs/profiler.js");
+// For checkLog and getLatestProfilerEntry.
+load("jstests/libs/check_log.js");
+load("jstests/libs/profiler.js");
- function assertLogLineContains(conn, parts) {
- if (typeof(parts) == 'string') {
- return assertLogLineContains(conn, [parts]);
- }
- assert.soon(function() {
- const logLines = checkLog.getGlobalLog(conn);
- let foundAll = false;
- for (let l = 0; l < logLines.length && !foundAll; l++) {
- for (let p = 0; p < parts.length; p++) {
- if (logLines[l].indexOf(parts[p]) == -1) {
- break;
- }
- foundAll = (p == parts.length - 1);
+function assertLogLineContains(conn, parts) {
+ if (typeof (parts) == 'string') {
+ return assertLogLineContains(conn, [parts]);
+ }
+ assert.soon(function() {
+ const logLines = checkLog.getGlobalLog(conn);
+ let foundAll = false;
+ for (let l = 0; l < logLines.length && !foundAll; l++) {
+ for (let p = 0; p < parts.length; p++) {
+ if (logLines[l].indexOf(parts[p]) == -1) {
+ break;
}
+ foundAll = (p == parts.length - 1);
}
- return foundAll;
- }, "failed to find log line containing all of " + tojson(parts));
- print("FOUND: " + tojsononeline(parts));
- }
+ }
+ return foundAll;
+ }, "failed to find log line containing all of " + tojson(parts));
+ print("FOUND: " + tojsononeline(parts));
+}
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("log_getmore");
- const coll = testDB.test;
+const testDB = conn.getDB("log_getmore");
+const coll = testDB.test;
- assert.commandWorked(testDB.dropDatabase());
+assert.commandWorked(testDB.dropDatabase());
- for (let i = 1; i <= 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
+for (let i = 1; i <= 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
- assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({a: 1}));
- // Set the diagnostic logging threshold to capture all operations, and enable profiling so that
- // we can easily retrieve cursor IDs in all cases.
- assert.commandWorked(testDB.setProfilingLevel(2, -1));
+// Set the diagnostic logging threshold to capture all operations, and enable profiling so that
+// we can easily retrieve cursor IDs in all cases.
+assert.commandWorked(testDB.setProfilingLevel(2, -1));
- //
- // Command tests.
- //
- testDB.getMongo().forceReadMode("commands");
+//
+// Command tests.
+//
+testDB.getMongo().forceReadMode("commands");
- // TEST: Verify the log format of the find command.
- let cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).skip(1).limit(10).hint({a: 1}).batchSize(5);
- cursor.next(); // Perform initial query and retrieve first document in batch.
+// TEST: Verify the log format of the find command.
+let cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).skip(1).limit(10).hint({a: 1}).batchSize(5);
+cursor.next(); // Perform initial query and retrieve first document in batch.
- let cursorid = getLatestProfilerEntry(testDB).cursorid;
+let cursorid = getLatestProfilerEntry(testDB).cursorid;
- let logLine =
- 'command log_getmore.test appName: "MongoDB Shell" command: find { find: "test", filter:' +
- ' { a: { $gt: 0.0 } }, skip: 1.0, batchSize: 5.0, limit: 10.0, singleBatch: false, sort:' +
- ' { a: 1.0 }, hint: { a: 1.0 }';
+let logLine =
+ 'command log_getmore.test appName: "MongoDB Shell" command: find { find: "test", filter:' +
+ ' { a: { $gt: 0.0 } }, skip: 1.0, batchSize: 5.0, limit: 10.0, singleBatch: false, sort:' +
+ ' { a: 1.0 }, hint: { a: 1.0 }';
- // Check the logs to verify that find appears as above.
- assertLogLineContains(conn, logLine);
+// Check the logs to verify that find appears as above.
+assertLogLineContains(conn, logLine);
- // TEST: Verify the log format of a getMore command following a find command.
+// TEST: Verify the log format of a getMore command following a find command.
- assert.eq(cursor.itcount(), 8); // Iterate the cursor established above to trigger getMore.
+assert.eq(cursor.itcount(), 8); // Iterate the cursor established above to trigger getMore.
- /**
- * Be sure to avoid rounding errors when converting a cursor ID to a string, since converting a
- * NumberLong to a string may not preserve all digits.
- */
- function cursorIdToString(cursorId) {
- let cursorIdString = cursorId.toString();
- if (cursorIdString.indexOf("NumberLong") === -1) {
- return cursorIdString;
- }
- return cursorIdString.substring("NumberLong(\"".length,
- cursorIdString.length - "\")".length);
+/**
+ * Be sure to avoid rounding errors when converting a cursor ID to a string, since converting a
+ * NumberLong to a string may not preserve all digits.
+ */
+function cursorIdToString(cursorId) {
+ let cursorIdString = cursorId.toString();
+ if (cursorIdString.indexOf("NumberLong") === -1) {
+ return cursorIdString;
}
+ return cursorIdString.substring("NumberLong(\"".length, cursorIdString.length - "\")".length);
+}
- logLine = [
- 'command log_getmore.test appName: "MongoDB Shell" command: getMore { getMore: ' +
- cursorIdToString(cursorid) + ', collection: "test", batchSize: 5.0',
- 'originatingCommand: { find: "test", ' +
- 'filter: { a: { $gt: 0.0 } }, skip: 1.0, batchSize: 5.0, limit: 10.0, singleBatch: ' +
- 'false, sort: { a: 1.0 }, hint: { a: 1.0 }'
- ];
+logLine = [
+ 'command log_getmore.test appName: "MongoDB Shell" command: getMore { getMore: ' +
+ cursorIdToString(cursorid) + ', collection: "test", batchSize: 5.0',
+ 'originatingCommand: { find: "test", ' +
+ 'filter: { a: { $gt: 0.0 } }, skip: 1.0, batchSize: 5.0, limit: 10.0, singleBatch: ' +
+ 'false, sort: { a: 1.0 }, hint: { a: 1.0 }'
+];
- assertLogLineContains(conn, logLine);
+assertLogLineContains(conn, logLine);
- // TEST: Verify the log format of a getMore command following an aggregation.
- cursor = coll.aggregate([{$match: {a: {$gt: 0}}}], {cursor: {batchSize: 0}, hint: {a: 1}});
- cursorid = getLatestProfilerEntry(testDB).cursorid;
+// TEST: Verify the log format of a getMore command following an aggregation.
+cursor = coll.aggregate([{$match: {a: {$gt: 0}}}], {cursor: {batchSize: 0}, hint: {a: 1}});
+cursorid = getLatestProfilerEntry(testDB).cursorid;
- assert.eq(cursor.itcount(), 10);
+assert.eq(cursor.itcount(), 10);
- logLine = [
- 'command log_getmore.test appName: "MongoDB Shell" command: getMore { getMore: ' +
- cursorIdToString(cursorid) + ', collection: "test"',
- 'originatingCommand: { aggregate: "test", pipeline: ' +
- '[ { $match: { a: { $gt: 0.0 } } } ], cursor: { batchSize: 0.0 }, hint: { a: 1.0 }'
- ];
+logLine = [
+ 'command log_getmore.test appName: "MongoDB Shell" command: getMore { getMore: ' +
+ cursorIdToString(cursorid) + ', collection: "test"',
+ 'originatingCommand: { aggregate: "test", pipeline: ' +
+ '[ { $match: { a: { $gt: 0.0 } } } ], cursor: { batchSize: 0.0 }, hint: { a: 1.0 }'
+];
- assertLogLineContains(conn, logLine);
+assertLogLineContains(conn, logLine);
- //
- // Legacy tests.
- //
- testDB.getMongo().forceReadMode("legacy");
+//
+// Legacy tests.
+//
+testDB.getMongo().forceReadMode("legacy");
- // TEST: Verify the log format of a legacy find. This should be upconverted to resemble a find
- // command.
- cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).skip(1).limit(10).hint({a: 1}).batchSize(5);
- cursor.next();
+// TEST: Verify the log format of a legacy find. This should be upconverted to resemble a find
+// command.
+cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).skip(1).limit(10).hint({a: 1}).batchSize(5);
+cursor.next();
- cursorid = getLatestProfilerEntry(testDB).cursorid;
+cursorid = getLatestProfilerEntry(testDB).cursorid;
- logLine =
- 'query log_getmore.test appName: "MongoDB Shell" command: { find: "test", filter: { a: ' +
- '{ $gt: 0.0 } }, skip: 1, ntoreturn: 5, sort: { a: 1.0 }, hint: { a: 1.0 }';
+logLine = 'query log_getmore.test appName: "MongoDB Shell" command: { find: "test", filter: { a: ' +
+ '{ $gt: 0.0 } }, skip: 1, ntoreturn: 5, sort: { a: 1.0 }, hint: { a: 1.0 }';
- assertLogLineContains(conn, logLine);
+assertLogLineContains(conn, logLine);
- // TEST: Verify that a query whose filter contains a field named 'query' appears as expected in
- // the logs. This test ensures that upconverting a legacy query correctly identifies this as a
- // user field rather than a wrapped filter spec.
- coll.find({query: "foo"}).itcount();
+// TEST: Verify that a query whose filter contains a field named 'query' appears as expected in
+// the logs. This test ensures that upconverting a legacy query correctly identifies this as a
+// user field rather than a wrapped filter spec.
+coll.find({query: "foo"}).itcount();
- logLine =
- 'query log_getmore.test appName: "MongoDB Shell" command: { find: "test", filter: { query:' +
- ' "foo" } }';
+logLine =
+ 'query log_getmore.test appName: "MongoDB Shell" command: { find: "test", filter: { query:' +
+ ' "foo" } }';
- assertLogLineContains(conn, logLine);
+assertLogLineContains(conn, logLine);
- // TEST: Verify that a legacy getMore following a find is logged in the expected format. This
- // should be upconverted to resemble a getMore command, with the preceding upconverted legacy
- // find in the originatingCommand field.
+// TEST: Verify that a legacy getMore following a find is logged in the expected format. This
+// should be upconverted to resemble a getMore command, with the preceding upconverted legacy
+// find in the originatingCommand field.
- assert.eq(cursor.itcount(), 8); // Iterate the cursor established above to trigger getMore.
+assert.eq(cursor.itcount(), 8); // Iterate the cursor established above to trigger getMore.
- logLine = 'getmore log_getmore.test appName: "MongoDB Shell" command: { getMore: ' +
- cursorIdToString(cursorid) +
- ', collection: "test", batchSize: 5 } originatingCommand: { find: "test", filter: { a: {' +
- ' $gt: 0.0 } }, skip: 1, ntoreturn: 5, sort: { a: 1.0 }, hint: { a: 1.0 }';
+logLine = 'getmore log_getmore.test appName: "MongoDB Shell" command: { getMore: ' +
+ cursorIdToString(cursorid) +
+ ', collection: "test", batchSize: 5 } originatingCommand: { find: "test", filter: { a: {' +
+ ' $gt: 0.0 } }, skip: 1, ntoreturn: 5, sort: { a: 1.0 }, hint: { a: 1.0 }';
- assertLogLineContains(conn, logLine);
+assertLogLineContains(conn, logLine);
- // TEST: Verify that a legacy getMore following an aggregation is logged in the expected format.
- // This should be upconverted to resemble a getMore command, with the preceding aggregation in
- // the originatingCommand field.
- cursor = coll.aggregate([{$match: {a: {$gt: 0}}}], {cursor: {batchSize: 0}, hint: {a: 1}});
- cursorid = getLatestProfilerEntry(testDB).cursorid;
+// TEST: Verify that a legacy getMore following an aggregation is logged in the expected format.
+// This should be upconverted to resemble a getMore command, with the preceding aggregation in
+// the originatingCommand field.
+cursor = coll.aggregate([{$match: {a: {$gt: 0}}}], {cursor: {batchSize: 0}, hint: {a: 1}});
+cursorid = getLatestProfilerEntry(testDB).cursorid;
- assert.eq(cursor.itcount(), 10);
+assert.eq(cursor.itcount(), 10);
- logLine = [
- 'getmore log_getmore.test appName: "MongoDB Shell" command: { getMore: ' +
- cursorIdToString(cursorid) + ', collection: "test", batchSize: 0',
- 'originatingCommand: { aggregate: "test", pipeline:' +
- ' [ { $match: { a: { $gt: 0.0 } } } ], cursor: { batchSize: 0.0 }, hint: { a: 1.0 }'
- ];
+logLine = [
+ 'getmore log_getmore.test appName: "MongoDB Shell" command: { getMore: ' +
+ cursorIdToString(cursorid) + ', collection: "test", batchSize: 0',
+ 'originatingCommand: { aggregate: "test", pipeline:' +
+ ' [ { $match: { a: { $gt: 0.0 } } } ], cursor: { batchSize: 0.0 }, hint: { a: 1.0 }'
+];
- assertLogLineContains(conn, logLine);
- MongoRunner.stopMongod(conn);
+assertLogLineContains(conn, logLine);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js b/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
index 27a06a1ecec..0b4cda5794e 100644
--- a/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
+++ b/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
@@ -5,252 +5,253 @@
* @tags: [requires_replication, requires_sharding]
*/
(function() {
- "use strict";
-
- // This test looks for exact matches in log output, which does not account for implicit
- // sessions.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- load("jstests/libs/check_log.js"); // For formatAsLogLine.
-
- // Prevent the mongo shell from gossiping its cluster time, since this will increase the amount
- // of data logged for each op. For some of the testcases below, including the cluster time would
- // cause them to be truncated at the 512-byte RamLog limit, and some of the fields we need to
- // check would be lost.
- TestData.skipGossipingClusterTime = true;
-
- // Set up a 2-shard single-node replicaset cluster.
- const stParams = {name: jsTestName(), shards: 2, rs: {nodes: 1}};
- const st = new ShardingTest(stParams);
-
- // Obtain one mongoS connection and a second direct to the shard.
- const shardConn = st.rs0.getPrimary();
- const mongosConn = st.s;
-
- const dbName = "logtest";
-
- const mongosDB = mongosConn.getDB(dbName);
- const shardDB = shardConn.getDB(dbName);
-
- // Enable sharding on the the test database and ensure that the primary is on shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), shardConn.name);
-
- // Drops and re-shards the test collection, then splits at {_id: 0} and moves the upper chunk to
- // the second shard.
- function dropAndRecreateTestCollection() {
- assert(mongosDB.test.drop());
- st.shardColl(mongosDB.test, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName(), true);
- }
-
- // Configures logging parameters on the target environment, constructs a list of test operations
- // depending on the deployment type, runs each of these in turn, and searches the logs for the
- // corresponding output. Returns a pair of arrays [testsRun, logLines]; the former is the set of
- // test cases that were run, while the latter contains the logline for each test, or null if no
- // such logline was found.
- function runLoggingTests({db, readWriteMode, slowMs, logLevel, sampleRate}) {
- dropAndRecreateTestCollection();
-
- const coll = db.test;
-
- // Transparently handles assert.writeOK for legacy writes.
- function assertWriteOK(writeResponse) {
- if (!writeResponse) {
- assert(db.getMongo().writeMode !== "commands");
- assert(db.runCommand({getLastError: 1}).err == null);
- } else {
- assert.commandWorked(writeResponse);
- }
+"use strict";
+
+// This test looks for exact matches in log output, which does not account for implicit
+// sessions.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/check_log.js"); // For formatAsLogLine.
+
+// Prevent the mongo shell from gossiping its cluster time, since this will increase the amount
+// of data logged for each op. For some of the testcases below, including the cluster time would
+// cause them to be truncated at the 512-byte RamLog limit, and some of the fields we need to
+// check would be lost.
+TestData.skipGossipingClusterTime = true;
+
+// Set up a 2-shard single-node replicaset cluster.
+const stParams = {
+ name: jsTestName(),
+ shards: 2,
+ rs: {nodes: 1}
+};
+const st = new ShardingTest(stParams);
+
+// Obtain one mongoS connection and a second direct to the shard.
+const shardConn = st.rs0.getPrimary();
+const mongosConn = st.s;
+
+const dbName = "logtest";
+
+const mongosDB = mongosConn.getDB(dbName);
+const shardDB = shardConn.getDB(dbName);
+
+// Enable sharding on the the test database and ensure that the primary is on shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), shardConn.name);
+
+// Drops and re-shards the test collection, then splits at {_id: 0} and moves the upper chunk to
+// the second shard.
+function dropAndRecreateTestCollection() {
+ assert(mongosDB.test.drop());
+ st.shardColl(mongosDB.test, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName(), true);
+}
+
+// Configures logging parameters on the target environment, constructs a list of test operations
+// depending on the deployment type, runs each of these in turn, and searches the logs for the
+// corresponding output. Returns a pair of arrays [testsRun, logLines]; the former is the set of
+// test cases that were run, while the latter contains the logline for each test, or null if no
+// such logline was found.
+function runLoggingTests({db, readWriteMode, slowMs, logLevel, sampleRate}) {
+ dropAndRecreateTestCollection();
+
+ const coll = db.test;
+
+ // Transparently handles assert.writeOK for legacy writes.
+ function assertWriteOK(writeResponse) {
+ if (!writeResponse) {
+ assert(db.getMongo().writeMode !== "commands");
+ assert(db.runCommand({getLastError: 1}).err == null);
+ } else {
+ assert.commandWorked(writeResponse);
}
+ }
- for (let i = 1; i <= 5; ++i) {
- assertWriteOK(coll.insert({_id: i, a: i, loc: {type: "Point", coordinates: [i, i]}}));
- assertWriteOK(
- coll.insert({_id: -i, a: -i, loc: {type: "Point", coordinates: [-i, -i]}}));
- }
- assertWriteOK(coll.createIndex({loc: "2dsphere"}));
-
- const isMongos = FixtureHelpers.isMongos(db);
-
- // Set the shell read/write mode.
- db.getMongo().forceWriteMode(readWriteMode);
- db.getMongo().forceReadMode(readWriteMode);
-
- // Build a string that identifies the parameters of this test run. Individual ops will
- // use this string as their comment where applicable, and we also print it to the logs.
- const logFormatTestComment = (isMongos ? 'mongos' : 'mongod') + "_" + readWriteMode +
- "_slowms:" + slowMs + "_logLevel:" + logLevel + "_sampleRate:" + sampleRate;
- jsTestLog(logFormatTestComment);
-
- // Set all logging parameters. If slowMs is null, we set a high threshold here so that
- // logLevel can be tested in cases where operations should not otherwise be logged.
- assert.commandWorked(db.adminCommand(
- {profile: 0, slowms: (slowMs == null) ? 1000000 : slowMs, sampleRate: sampleRate}));
- assert.commandWorked(db.setLogLevel(logLevel, "command"));
- assert.commandWorked(db.setLogLevel(logLevel, "write"));
-
- // Certain fields in the log lines on mongoD are not applicable in their counterparts on
- // mongoS, and vice-versa. Ignore these fields when examining the logs of an instance on
- // which we do not expect them to appear.
- const ignoreFields =
+ for (let i = 1; i <= 5; ++i) {
+ assertWriteOK(coll.insert({_id: i, a: i, loc: {type: "Point", coordinates: [i, i]}}));
+ assertWriteOK(coll.insert({_id: -i, a: -i, loc: {type: "Point", coordinates: [-i, -i]}}));
+ }
+ assertWriteOK(coll.createIndex({loc: "2dsphere"}));
+
+ const isMongos = FixtureHelpers.isMongos(db);
+
+ // Set the shell read/write mode.
+ db.getMongo().forceWriteMode(readWriteMode);
+ db.getMongo().forceReadMode(readWriteMode);
+
+ // Build a string that identifies the parameters of this test run. Individual ops will
+ // use this string as their comment where applicable, and we also print it to the logs.
+ const logFormatTestComment = (isMongos ? 'mongos' : 'mongod') + "_" + readWriteMode +
+ "_slowms:" + slowMs + "_logLevel:" + logLevel + "_sampleRate:" + sampleRate;
+ jsTestLog(logFormatTestComment);
+
+ // Set all logging parameters. If slowMs is null, we set a high threshold here so that
+ // logLevel can be tested in cases where operations should not otherwise be logged.
+ assert.commandWorked(db.adminCommand(
+ {profile: 0, slowms: (slowMs == null) ? 1000000 : slowMs, sampleRate: sampleRate}));
+ assert.commandWorked(db.setLogLevel(logLevel, "command"));
+ assert.commandWorked(db.setLogLevel(logLevel, "write"));
+
+ // Certain fields in the log lines on mongoD are not applicable in their counterparts on
+ // mongoS, and vice-versa. Ignore these fields when examining the logs of an instance on
+ // which we do not expect them to appear.
+ const ignoreFields =
(isMongos
? ["docsExamined", "keysExamined", "keysInserted", "keysDeleted", "planSummary",
"usedDisk", "hasSortStage"]
: ["nShards"]);
- // Legacy operations do not produce a 'command: <name>' field in the log.
- if (readWriteMode === "legacy") {
- ignoreFields.push("command");
- }
+ // Legacy operations do not produce a 'command: <name>' field in the log.
+ if (readWriteMode === "legacy") {
+ ignoreFields.push("command");
+ }
- function confirmLogContents(db, {test, logFields}, testIndex) {
- // Clear the log before running the test, to guarantee that we do not match against any
- // similar tests which may have run previously.
- assert.commandWorked(db.adminCommand({clearLog: "global"}));
+ function confirmLogContents(db, {test, logFields}, testIndex) {
+ // Clear the log before running the test, to guarantee that we do not match against any
+ // similar tests which may have run previously.
+ assert.commandWorked(db.adminCommand({clearLog: "global"}));
- // Run the given command in order to generate a log line. If slowMs is non-null and
- // greater than 0, apply that slowMs to every second test.
- if (slowMs != null && slowMs > 0) {
- db.adminCommand({profile: 0, slowms: (testIndex % 2 ? slowMs : -1)});
- }
- assert.doesNotThrow(() => test(db));
-
- // Confirm whether the operation was logged or not.
- const globalLog = assert.commandWorked(db.adminCommand({getLog: "global"}));
- return findMatchingLogLine(globalLog.log, logFields, ignoreFields);
+ // Run the given command in order to generate a log line. If slowMs is non-null and
+ // greater than 0, apply that slowMs to every second test.
+ if (slowMs != null && slowMs > 0) {
+ db.adminCommand({profile: 0, slowms: (testIndex % 2 ? slowMs : -1)});
}
+ assert.doesNotThrow(() => test(db));
+
+ // Confirm whether the operation was logged or not.
+ const globalLog = assert.commandWorked(db.adminCommand({getLog: "global"}));
+ return findMatchingLogLine(globalLog.log, logFields, ignoreFields);
+ }
- //
- // Defines the set of test operations and associated log output fields.
- //
- const testList = [
- {
- test: function(db) {
- assert.eq(db.test
- .aggregate([{$match: {a: 1}}], {
- comment: logFormatTestComment,
- collation: {locale: "fr"},
- hint: {_id: 1},
- })
- .itcount(),
- 1);
- },
- logFields: {
- command: "aggregate",
- aggregate: coll.getName(),
- pipeline: [{$match: {a: 1}}],
- comment: logFormatTestComment,
- collation: {locale: "fr"},
- hint: {_id: 1},
- planSummary: "IXSCAN { _id: 1 }",
- cursorExhausted: 1,
- docsExamined: 10,
- keysExamined: 10,
- nreturned: 1,
- nShards: stParams.shards
- }
+ //
+ // Defines the set of test operations and associated log output fields.
+ //
+ const testList = [
+ {
+ test: function(db) {
+ assert.eq(db.test
+ .aggregate([{$match: {a: 1}}], {
+ comment: logFormatTestComment,
+ collation: {locale: "fr"},
+ hint: {_id: 1},
+ })
+ .itcount(),
+ 1);
},
- {
- test: function(db) {
- assert.eq(db.test.find({a: 1, $comment: logFormatTestComment})
- .collation({locale: "fr"})
- .count(),
- 1);
- },
- logFields: {
- command: "count",
- count: coll.getName(),
- query: {a: 1, $comment: logFormatTestComment},
- collation: {locale: "fr"},
- planSummary: "COLLSCAN"
- }
+ logFields: {
+ command: "aggregate",
+ aggregate: coll.getName(),
+ pipeline: [{$match: {a: 1}}],
+ comment: logFormatTestComment,
+ collation: {locale: "fr"},
+ hint: {_id: 1},
+ planSummary: "IXSCAN { _id: 1 }",
+ cursorExhausted: 1,
+ docsExamined: 10,
+ keysExamined: 10,
+ nreturned: 1,
+ nShards: stParams.shards
+ }
+ },
+ {
+ test: function(db) {
+ assert.eq(db.test.find({a: 1, $comment: logFormatTestComment})
+ .collation({locale: "fr"})
+ .count(),
+ 1);
},
- {
- test: function(db) {
- assert.eq(
- db.test.distinct(
- "a", {a: 1, $comment: logFormatTestComment}, {collation: {locale: "fr"}}),
- [1]);
- },
- logFields: {
- command: "distinct",
- distinct: coll.getName(),
- query: {a: 1, $comment: logFormatTestComment},
- planSummary: "COLLSCAN",
- $comment: logFormatTestComment,
- collation: {locale: "fr"}
- }
+ logFields: {
+ command: "count",
+ count: coll.getName(),
+ query: {a: 1, $comment: logFormatTestComment},
+ collation: {locale: "fr"},
+ planSummary: "COLLSCAN"
+ }
+ },
+ {
+ test: function(db) {
+ assert.eq(
+ db.test.distinct(
+ "a", {a: 1, $comment: logFormatTestComment}, {collation: {locale: "fr"}}),
+ [1]);
},
- {
- test: function(db) {
- assert.eq(db.test.find({_id: 1}).comment(logFormatTestComment).itcount(), 1);
- },
- logFields: {
- command: "find",
- find: coll.getName(),
- comment: logFormatTestComment,
- planSummary: "IDHACK",
- cursorExhausted: 1,
- keysExamined: 1,
- docsExamined: 1,
- nreturned: 1,
- nShards: 1
- }
+ logFields: {
+ command: "distinct",
+ distinct: coll.getName(),
+ query: {a: 1, $comment: logFormatTestComment},
+ planSummary: "COLLSCAN",
+ $comment: logFormatTestComment,
+ collation: {locale: "fr"}
+ }
+ },
+ {
+ test: function(db) {
+ assert.eq(db.test.find({_id: 1}).comment(logFormatTestComment).itcount(), 1);
},
- {
- test: function(db) {
- assert.eq(db.test.findAndModify({
- query: {_id: 1, a: 1, $comment: logFormatTestComment},
- update: {$inc: {b: 1}},
- collation: {locale: "fr"}
- }),
- {_id: 1, a: 1, loc: {type: "Point", coordinates: [1, 1]}});
- },
- // TODO SERVER-34208: display FAM update metrics in mongoS logs.
- logFields: Object.assign((isMongos ? {} : {nMatched: 1, nModified: 1}), {
- command: "findAndModify",
- findandmodify: coll.getName(),
- planSummary: "IXSCAN { _id: 1 }",
- keysExamined: 1,
- docsExamined: 1,
- $comment: logFormatTestComment,
- collation: {locale: "fr"}
- })
+ logFields: {
+ command: "find",
+ find: coll.getName(),
+ comment: logFormatTestComment,
+ planSummary: "IDHACK",
+ cursorExhausted: 1,
+ keysExamined: 1,
+ docsExamined: 1,
+ nreturned: 1,
+ nShards: 1
+ }
+ },
+ {
+ test: function(db) {
+ assert.eq(db.test.findAndModify({
+ query: {_id: 1, a: 1, $comment: logFormatTestComment},
+ update: {$inc: {b: 1}},
+ collation: {locale: "fr"}
+ }),
+ {_id: 1, a: 1, loc: {type: "Point", coordinates: [1, 1]}});
},
- {
- test: function(db) {
- assert.commandWorked(db.test.mapReduce(() => {},
- (a, b) => {},
- {
- query: {$comment: logFormatTestComment},
- out: {inline: 1},
- }));
- },
- logFields: {
- command: "mapReduce",
- mapreduce: coll.getName(),
- planSummary: "COLLSCAN",
- keysExamined: 0,
- docsExamined: 10,
- $comment: logFormatTestComment,
- out: {inline: 1}
- }
+ // TODO SERVER-34208: display FAM update metrics in mongoS logs.
+ logFields: Object.assign((isMongos ? {} : {nMatched: 1, nModified: 1}), {
+ command: "findAndModify",
+ findandmodify: coll.getName(),
+ planSummary: "IXSCAN { _id: 1 }",
+ keysExamined: 1,
+ docsExamined: 1,
+ $comment: logFormatTestComment,
+ collation: {locale: "fr"}
+ })
+ },
+ {
+ test: function(db) {
+ assert.commandWorked(db.test.mapReduce(() => {}, (a, b) => {}, {
+ query: {$comment: logFormatTestComment},
+ out: {inline: 1},
+ }));
},
- {
- test: function(db) {
- assertWriteOK(db.test.update(
- {a: 1, $comment: logFormatTestComment}, {$inc: {b: 1}}, {multi: true}));
- },
- logFields: (isMongos ? {
- command: "update",
- update: coll.getName(),
- ordered: true,
- nMatched: 1,
- nModified: 1,
- nShards: stParams.shards
- }
- : {
+ logFields: {
+ command: "mapReduce",
+ mapreduce: coll.getName(),
+ planSummary: "COLLSCAN",
+ keysExamined: 0,
+ docsExamined: 10,
+ $comment: logFormatTestComment,
+ out: {inline: 1}
+ }
+ },
+ {
+ test: function(db) {
+ assertWriteOK(db.test.update(
+ {a: 1, $comment: logFormatTestComment}, {$inc: {b: 1}}, {multi: true}));
+ },
+ logFields: (isMongos ? {
+ command: "update",
+ update: coll.getName(),
+ ordered: true,
+ nMatched: 1,
+ nModified: 1,
+ nShards: stParams.shards
+ }
+ : {
q: {a: 1, $comment: logFormatTestComment},
u: {$inc: {b: 1}},
multi: true,
@@ -259,24 +260,24 @@
docsExamined: 10,
nMatched: 1,
nModified: 1
- })
+ })
+ },
+ {
+ test: function(db) {
+ assertWriteOK(db.test.update({_id: 100, $comment: logFormatTestComment},
+ {$inc: {b: 1}},
+ {multi: true, upsert: true}));
},
- {
- test: function(db) {
- assertWriteOK(db.test.update({_id: 100, $comment: logFormatTestComment},
- {$inc: {b: 1}},
- {multi: true, upsert: true}));
- },
- logFields: (isMongos ? {
- command: "update",
- update: coll.getName(),
- ordered: true,
- nMatched: 0,
- nModified: 0,
- upsert: 1,
- nShards: 1
- }
- : {
+ logFields: (isMongos ? {
+ command: "update",
+ update: coll.getName(),
+ ordered: true,
+ nMatched: 0,
+ nModified: 0,
+ upsert: 1,
+ nShards: 1
+ }
+ : {
q: {_id: 100, $comment: logFormatTestComment},
u: {$inc: {b: 1}},
multi: true,
@@ -286,32 +287,32 @@
nMatched: 0,
nModified: 0,
upsert: 1
- })
+ })
+ },
+ {
+ test: function(db) {
+ assertWriteOK(db.test.insert({z: 1, comment: logFormatTestComment}));
},
- {
- test: function(db) {
- assertWriteOK(db.test.insert({z: 1, comment: logFormatTestComment}));
- },
- logFields: {
- command: "insert",
- insert: `${coll.getName()}|${coll.getFullName()}`,
- keysInserted: 1,
- ninserted: 1,
- nShards: 1
- }
+ logFields: {
+ command: "insert",
+ insert: `${coll.getName()}|${coll.getFullName()}`,
+ keysInserted: 1,
+ ninserted: 1,
+ nShards: 1
+ }
+ },
+ {
+ test: function(db) {
+ assertWriteOK(db.test.remove({z: 1, $comment: logFormatTestComment}));
},
- {
- test: function(db) {
- assertWriteOK(db.test.remove({z: 1, $comment: logFormatTestComment}));
- },
- logFields: (isMongos ? {
- command: "delete",
- delete: coll.getName(),
- ordered: true,
- ndeleted: 1,
- nShards: stParams.shards
- }
- : {
+ logFields: (isMongos ? {
+ command: "delete",
+ delete: coll.getName(),
+ ordered: true,
+ ndeleted: 1,
+ nShards: stParams.shards
+ }
+ : {
q: {z: 1, $comment: logFormatTestComment},
limit: 0,
planSummary: "COLLSCAN",
@@ -319,195 +320,183 @@
docsExamined: 12,
ndeleted: 1,
keysDeleted: 1
- })
+ })
+ },
+ {
+ test: function(db) {
+ const originalSortBytes = db.adminCommand(
+ {getParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 1});
+ assert.commandWorked(originalSortBytes);
+ assert.commandWorked(db.adminCommand(
+ {setParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 10}));
+ assert.eq(coll.aggregate([{$match: {a: 1}}, {$sort: {a: 1}}], {allowDiskUse: true})
+ .itcount(),
+ 1);
+ assert.commandWorked(db.adminCommand({
+ setParameter: 1,
+ internalDocumentSourceSortMaxBlockingSortBytes:
+ originalSortBytes.internalDocumentSourceSortMaxBlockingSortBytes
+ }));
},
- {
- test: function(db) {
- const originalSortBytes = db.adminCommand(
- {getParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 1});
- assert.commandWorked(originalSortBytes);
- assert.commandWorked(db.adminCommand(
- {setParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 10}));
- assert.eq(
- coll.aggregate([{$match: {a: 1}}, {$sort: {a: 1}}], {allowDiskUse: true})
- .itcount(),
- 1);
- assert.commandWorked(db.adminCommand({
- setParameter: 1,
- internalDocumentSourceSortMaxBlockingSortBytes:
- originalSortBytes.internalDocumentSourceSortMaxBlockingSortBytes
- }));
- },
- logFields:
- {command: "aggregate", aggregate: coll.getName(), hasSortStage: 1, usedDisk: 1}
- }
- ];
-
- // Confirm log contains collation for find command.
- if (readWriteMode === "commands") {
- testList.push({
- test: function(db) {
- assert.eq(db.test.find({_id: {$in: [1, 5]}})
- .comment(logFormatTestComment)
- .collation({locale: "fr"})
- .itcount(),
- 2);
- },
- logFields: {
- command: "find",
- find: coll.getName(),
- planSummary: "IXSCAN { _id: 1 }",
- comment: logFormatTestComment,
- collation: {locale: "fr"},
- cursorExhausted: 1,
- keysExamined: 4,
- docsExamined: 2,
- nreturned: 2,
- nShards: 1
- }
- });
+ logFields:
+ {command: "aggregate", aggregate: coll.getName(), hasSortStage: 1, usedDisk: 1}
}
-
- // Confirm log content for getMore on both find and aggregate cursors.
- const originatingCommands = {
- find: {find: coll.getName(), batchSize: 0},
- aggregate: {aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 0}}
- };
-
- for (let cmdName in originatingCommands) {
- const cmdObj = originatingCommands[cmdName];
- const cmdRes = assert.commandWorked(db.runCommand(cmdObj));
-
- testList.push({
- test: function(db) {
- const cursor = new DBCommandCursor(db, cmdRes);
- assert.eq(cursor.itcount(), 11);
- },
- logFields: Object.assign({getMore: cmdRes.cursor.id}, cmdObj, {
- cursorid: cmdRes.cursor.id,
- planSummary: "COLLSCAN",
- cursorExhausted: 1,
- docsExamined: 11,
- keysExamined: 0,
- nreturned: 11,
- nShards: stParams.shards
- })
- });
- }
-
- // Run each of the test in the array, recording the log line found for each.
- const logLines =
- testList.map((testCase, arrIndex) => confirmLogContents(db, testCase, arrIndex));
-
- return [testList, logLines];
+ ];
+
+ // Confirm log contains collation for find command.
+ if (readWriteMode === "commands") {
+ testList.push({
+ test: function(db) {
+ assert.eq(db.test.find({_id: {$in: [1, 5]}})
+ .comment(logFormatTestComment)
+ .collation({locale: "fr"})
+ .itcount(),
+ 2);
+ },
+ logFields: {
+ command: "find",
+ find: coll.getName(),
+ planSummary: "IXSCAN { _id: 1 }",
+ comment: logFormatTestComment,
+ collation: {locale: "fr"},
+ cursorExhausted: 1,
+ keysExamined: 4,
+ docsExamined: 2,
+ nreturned: 2,
+ nShards: 1
+ }
+ });
}
- //
- // Helper functions.
- //
+ // Confirm log content for getMore on both find and aggregate cursors.
+ const originatingCommands = {
+ find: {find: coll.getName(), batchSize: 0},
+ aggregate: {aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 0}}
+ };
- // Finds and returns a logline containing all the specified fields, or null if no such logline
- // was found. The regex escape function used here is drawn from the following:
- // https://stackoverflow.com/questions/3561493/is-there-a-regexp-escape-function-in-javascript
- // https://github.com/ljharb/regexp.escape
- function findMatchingLogLine(logLines, fields, ignoreFields) {
- function escapeRegex(input) {
- return (typeof input === "string"
- ? input.replace(/[\^\$\\\.\*\+\?\(\)\[\]\{\}]/g, '\\$&')
- : input);
- }
- function lineMatches(line, fields, ignoreFields) {
- const fieldNames =
- Object.keys(fields).filter((fieldName) => !ignoreFields.includes(fieldName));
- return fieldNames.every((fieldName) => {
- const fieldValue = fields[fieldName];
- let regex = escapeRegex(fieldName) + ":? ?(" +
- escapeRegex(checkLog.formatAsLogLine(fieldValue)) + "|" +
- escapeRegex(checkLog.formatAsLogLine(fieldValue, true)) + ")";
- const match = line.match(regex);
- return match && match[0];
- });
- }
+ for (let cmdName in originatingCommands) {
+ const cmdObj = originatingCommands[cmdName];
+ const cmdRes = assert.commandWorked(db.runCommand(cmdObj));
- for (let line of logLines) {
- if (lineMatches(line, fields, ignoreFields)) {
- return line;
- }
- }
- return null;
+ testList.push({
+ test: function(db) {
+ const cursor = new DBCommandCursor(db, cmdRes);
+ assert.eq(cursor.itcount(), 11);
+ },
+ logFields: Object.assign({getMore: cmdRes.cursor.id}, cmdObj, {
+ cursorid: cmdRes.cursor.id,
+ planSummary: "COLLSCAN",
+ cursorExhausted: 1,
+ docsExamined: 11,
+ keysExamined: 0,
+ nreturned: 11,
+ nShards: stParams.shards
+ })
+ });
}
- // In cases where some tests were not logged, this helper will identify and return them.
- function getUnloggedTests(testsRun, logLines) {
- return testsRun.filter((testCase, arrIndex) => !logLines[arrIndex]);
+ // Run each of the test in the array, recording the log line found for each.
+ const logLines =
+ testList.map((testCase, arrIndex) => confirmLogContents(db, testCase, arrIndex));
+
+ return [testList, logLines];
+}
+
+//
+// Helper functions.
+//
+
+// Finds and returns a logline containing all the specified fields, or null if no such logline
+// was found. The regex escape function used here is drawn from the following:
+// https://stackoverflow.com/questions/3561493/is-there-a-regexp-escape-function-in-javascript
+// https://github.com/ljharb/regexp.escape
+function findMatchingLogLine(logLines, fields, ignoreFields) {
+ function escapeRegex(input) {
+ return (typeof input === "string" ? input.replace(/[\^\$\\\.\*\+\?\(\)\[\]\{\}]/g, '\\$&')
+ : input);
+ }
+ function lineMatches(line, fields, ignoreFields) {
+ const fieldNames =
+ Object.keys(fields).filter((fieldName) => !ignoreFields.includes(fieldName));
+ return fieldNames.every((fieldName) => {
+ const fieldValue = fields[fieldName];
+ let regex = escapeRegex(fieldName) + ":? ?(" +
+ escapeRegex(checkLog.formatAsLogLine(fieldValue)) + "|" +
+ escapeRegex(checkLog.formatAsLogLine(fieldValue, true)) + ")";
+ const match = line.match(regex);
+ return match && match[0];
+ });
}
- //
- // Test cases for varying values of logLevel, slowms, and sampleRate.
- //
-
- for (let testDB of[shardDB, mongosDB]) {
- for (let readWriteMode of["commands", "legacy"]) {
- // Test that all operations are logged when slowMs is < 0 and sampleRate is 1 at the
- // default logLevel.
- let [testsRun, logLines] = runLoggingTests({
- db: testDB,
- readWriteMode: readWriteMode,
- slowMs: -1,
- logLevel: 0,
- sampleRate: 1.0
- });
- let unlogged = getUnloggedTests(testsRun, logLines);
- assert.eq(unlogged.length, 0, () => tojson(unlogged));
-
- // Test that only some operations are logged when sampleRate is < 1 at the default
- // logLevel, even when slowMs is < 0. The actual sample rate is probabilistic, and may
- // therefore vary quite significantly from 0.5. However, we have already established
- // that with sampleRate 1 *all* ops are logged, so here it is sufficient to confirm that
- // some ops are not. We repeat the test 5 times to minimize the odds of failure.
- let sampleRateTestsRun = 0, sampleRateTestsLogged = 0;
- for (let i = 0; i < 5; i++) {
- [testsRun, logLines] = runLoggingTests({
- db: testDB,
- readWriteMode: readWriteMode,
- slowMs: -1,
- logLevel: 0,
- sampleRate: 0.5
- });
- unlogged = getUnloggedTests(testsRun, logLines);
- sampleRateTestsLogged += (testsRun.length - unlogged.length);
- sampleRateTestsRun += testsRun.length;
- }
- assert.betweenEx(0, sampleRateTestsLogged, sampleRateTestsRun);
-
- // Test that only operations which exceed slowMs are logged when slowMs > 0 and
- // sampleRate is 1, at the default logLevel. The given value of slowMs will be applied
- // to every second op in the test, so only half of the ops should be logged.
+ for (let line of logLines) {
+ if (lineMatches(line, fields, ignoreFields)) {
+ return line;
+ }
+ }
+ return null;
+}
+
+// In cases where some tests were not logged, this helper will identify and return them.
+function getUnloggedTests(testsRun, logLines) {
+ return testsRun.filter((testCase, arrIndex) => !logLines[arrIndex]);
+}
+
+//
+// Test cases for varying values of logLevel, slowms, and sampleRate.
+//
+
+for (let testDB of [shardDB, mongosDB]) {
+ for (let readWriteMode of ["commands", "legacy"]) {
+ // Test that all operations are logged when slowMs is < 0 and sampleRate is 1 at the
+ // default logLevel.
+ let [testsRun, logLines] = runLoggingTests(
+ {db: testDB, readWriteMode: readWriteMode, slowMs: -1, logLevel: 0, sampleRate: 1.0});
+ let unlogged = getUnloggedTests(testsRun, logLines);
+ assert.eq(unlogged.length, 0, () => tojson(unlogged));
+
+ // Test that only some operations are logged when sampleRate is < 1 at the default
+ // logLevel, even when slowMs is < 0. The actual sample rate is probabilistic, and may
+ // therefore vary quite significantly from 0.5. However, we have already established
+ // that with sampleRate 1 *all* ops are logged, so here it is sufficient to confirm that
+ // some ops are not. We repeat the test 5 times to minimize the odds of failure.
+ let sampleRateTestsRun = 0, sampleRateTestsLogged = 0;
+ for (let i = 0; i < 5; i++) {
[testsRun, logLines] = runLoggingTests({
db: testDB,
readWriteMode: readWriteMode,
- slowMs: 1000000,
+ slowMs: -1,
logLevel: 0,
- sampleRate: 1.0
- });
- unlogged = getUnloggedTests(testsRun, logLines);
- assert.eq(unlogged.length, Math.floor(testsRun.length / 2), () => tojson(unlogged));
-
- // Test that all operations are logged when logLevel is 1, regardless of sampleRate and
- // slowMs. We pass 'null' for slowMs to signify that a high threshold should be set
- // (such that, at logLevel 0, no operations would be logged) and that this value should
- // be applied for all operations, rather than for every second op as in the case of the
- // slowMs test.
- [testsRun, logLines] = runLoggingTests({
- db: testDB,
- readWriteMode: readWriteMode,
- slowMs: null,
- logLevel: 1,
sampleRate: 0.5
});
unlogged = getUnloggedTests(testsRun, logLines);
- assert.eq(unlogged.length, 0, () => tojson(unlogged));
+ sampleRateTestsLogged += (testsRun.length - unlogged.length);
+ sampleRateTestsRun += testsRun.length;
}
+ assert.betweenEx(0, sampleRateTestsLogged, sampleRateTestsRun);
+
+ // Test that only operations which exceed slowMs are logged when slowMs > 0 and
+ // sampleRate is 1, at the default logLevel. The given value of slowMs will be applied
+ // to every second op in the test, so only half of the ops should be logged.
+ [testsRun, logLines] = runLoggingTests({
+ db: testDB,
+ readWriteMode: readWriteMode,
+ slowMs: 1000000,
+ logLevel: 0,
+ sampleRate: 1.0
+ });
+ unlogged = getUnloggedTests(testsRun, logLines);
+ assert.eq(unlogged.length, Math.floor(testsRun.length / 2), () => tojson(unlogged));
+
+ // Test that all operations are logged when logLevel is 1, regardless of sampleRate and
+ // slowMs. We pass 'null' for slowMs to signify that a high threshold should be set
+ // (such that, at logLevel 0, no operations would be logged) and that this value should
+ // be applied for all operations, rather than for every second op as in the case of the
+ // slowMs test.
+ [testsRun, logLines] = runLoggingTests(
+ {db: testDB, readWriteMode: readWriteMode, slowMs: null, logLevel: 1, sampleRate: 0.5});
+ unlogged = getUnloggedTests(testsRun, logLines);
+ assert.eq(unlogged.length, 0, () => tojson(unlogged));
}
- st.stop();
+}
+st.stop();
})();
diff --git a/jstests/noPassthrough/logical_session_cache_find_getmore.js b/jstests/noPassthrough/logical_session_cache_find_getmore.js
index a005b1c0ef5..4857443d032 100644
--- a/jstests/noPassthrough/logical_session_cache_find_getmore.js
+++ b/jstests/noPassthrough/logical_session_cache_find_getmore.js
@@ -1,28 +1,28 @@
(function() {
- 'use strict';
+'use strict';
- TestData.disableImplicitSessions = true;
+TestData.disableImplicitSessions = true;
- var conn = MongoRunner.runMongod({setParameter: {maxSessions: 2}});
- var testDB = conn.getDB("test");
+var conn = MongoRunner.runMongod({setParameter: {maxSessions: 2}});
+var testDB = conn.getDB("test");
- assert.writeOK(testDB.foo.insert({data: 1}));
- assert.writeOK(testDB.foo.insert({data: 2}));
+assert.writeOK(testDB.foo.insert({data: 1}));
+assert.writeOK(testDB.foo.insert({data: 2}));
- for (var i = 0; i < 2; i++) {
- var session = conn.startSession();
- var db = session.getDatabase("test");
- var res = assert.commandWorked(db.runCommand({find: "foo", batchSize: 1}),
- "unable to run find when the cache is not full");
- var cursorId = res.cursor.id;
- assert.commandWorked(db.runCommand({getMore: cursorId, collection: "foo"}),
- "unable to run getMore when the cache is not full");
- }
+for (var i = 0; i < 2; i++) {
+ var session = conn.startSession();
+ var db = session.getDatabase("test");
+ var res = assert.commandWorked(db.runCommand({find: "foo", batchSize: 1}),
+ "unable to run find when the cache is not full");
+ var cursorId = res.cursor.id;
+ assert.commandWorked(db.runCommand({getMore: cursorId, collection: "foo"}),
+ "unable to run getMore when the cache is not full");
+}
- var session3 = conn.startSession();
- var db = session3.getDatabase("test");
- assert.commandFailed(db.runCommand({find: "foo", batchSize: 1}),
- "able to run find when the cache is full");
+var session3 = conn.startSession();
+var db = session3.getDatabase("test");
+assert.commandFailed(db.runCommand({find: "foo", batchSize: 1}),
+ "able to run find when the cache is full");
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/logical_session_cursor_checks.js b/jstests/noPassthrough/logical_session_cursor_checks.js
index a12f46fc583..5664fb1ef28 100644
--- a/jstests/noPassthrough/logical_session_cursor_checks.js
+++ b/jstests/noPassthrough/logical_session_cursor_checks.js
@@ -1,97 +1,97 @@
// @tags: [requires_sharding]
(function() {
- 'use strict';
-
- function runFixture(Fixture) {
- var fixture = new Fixture();
- var conn = fixture.getConn();
- var admin = conn.getDB("admin");
- var data = conn.getDB("data_storage");
-
- admin.createUser({user: 'admin', pwd: 'admin', roles: jsTest.adminUserRoles});
- admin.auth("admin", "admin");
- data.createUser({user: 'admin', pwd: 'admin', roles: jsTest.basicUserRoles});
- data.createUser({user: 'user0', pwd: 'password', roles: jsTest.basicUserRoles});
- admin.logout();
-
- data.auth("user0", "password");
- assert.writeOK(data.test.insert({name: "first", data: 1}));
- assert.writeOK(data.test.insert({name: "second", data: 2}));
-
- // Test that getMore works correctly on the same session.
- {
- var session1 = conn.startSession();
- var session2 = conn.startSession();
- var res = assert.commandWorked(
- session1.getDatabase("data_storage").runCommand({find: "test", batchSize: 0}));
- var cursorId = res.cursor.id;
- assert.commandWorked(session1.getDatabase("data_storage")
- .runCommand({getMore: cursorId, collection: "test"}));
-
- session2.endSession();
- session1.endSession();
- }
-
- // Test that getMore correctly gives an error, when using a cursor on a different session.
- {
- var session1 = conn.startSession();
- var session2 = conn.startSession();
- var res = assert.commandWorked(
- session1.getDatabase("data_storage").runCommand({find: "test", batchSize: 0}));
- var cursorId = res.cursor.id;
- assert.commandFailed(session2.getDatabase("data_storage")
- .runCommand({getMore: cursorId, collection: "test"}));
-
- session2.endSession();
- session1.endSession();
- }
-
- // Test that query.js driven getMore works correctly on the same session.
- {
- var session1 = conn.startSession();
- var session2 = conn.startSession();
- var cursor = session1.getDatabase("data_storage").test.find({}).batchSize(1);
- cursor.next();
- cursor.next();
- cursor.close();
-
- session2.endSession();
- session1.endSession();
- }
-
- fixture.stop();
+'use strict';
+
+function runFixture(Fixture) {
+ var fixture = new Fixture();
+ var conn = fixture.getConn();
+ var admin = conn.getDB("admin");
+ var data = conn.getDB("data_storage");
+
+ admin.createUser({user: 'admin', pwd: 'admin', roles: jsTest.adminUserRoles});
+ admin.auth("admin", "admin");
+ data.createUser({user: 'admin', pwd: 'admin', roles: jsTest.basicUserRoles});
+ data.createUser({user: 'user0', pwd: 'password', roles: jsTest.basicUserRoles});
+ admin.logout();
+
+ data.auth("user0", "password");
+ assert.writeOK(data.test.insert({name: "first", data: 1}));
+ assert.writeOK(data.test.insert({name: "second", data: 2}));
+
+ // Test that getMore works correctly on the same session.
+ {
+ var session1 = conn.startSession();
+ var session2 = conn.startSession();
+ var res = assert.commandWorked(
+ session1.getDatabase("data_storage").runCommand({find: "test", batchSize: 0}));
+ var cursorId = res.cursor.id;
+ assert.commandWorked(session1.getDatabase("data_storage")
+ .runCommand({getMore: cursorId, collection: "test"}));
+
+ session2.endSession();
+ session1.endSession();
}
- function Standalone() {
- this.standalone = MongoRunner.runMongod({auth: "", nojournal: ""});
+ // Test that getMore correctly gives an error, when using a cursor on a different session.
+ {
+ var session1 = conn.startSession();
+ var session2 = conn.startSession();
+ var res = assert.commandWorked(
+ session1.getDatabase("data_storage").runCommand({find: "test", batchSize: 0}));
+ var cursorId = res.cursor.id;
+ assert.commandFailed(session2.getDatabase("data_storage")
+ .runCommand({getMore: cursorId, collection: "test"}));
+
+ session2.endSession();
+ session1.endSession();
}
- Standalone.prototype.stop = function() {
- MongoRunner.stopMongod(this.standalone);
- };
-
- Standalone.prototype.getConn = function() {
- return this.standalone;
- };
-
- function Sharding() {
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- this.st = new ShardingTest({
- shards: 1,
- config: 1,
- mongos: 1,
- other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
- });
+ // Test that query.js driven getMore works correctly on the same session.
+ {
+ var session1 = conn.startSession();
+ var session2 = conn.startSession();
+ var cursor = session1.getDatabase("data_storage").test.find({}).batchSize(1);
+ cursor.next();
+ cursor.next();
+ cursor.close();
+
+ session2.endSession();
+ session1.endSession();
}
- Sharding.prototype.stop = function() {
- this.st.stop();
- };
+ fixture.stop();
+}
- Sharding.prototype.getConn = function() {
- return this.st.s0;
- };
+function Standalone() {
+ this.standalone = MongoRunner.runMongod({auth: "", nojournal: ""});
+}
- [Standalone, Sharding].forEach(runFixture);
+Standalone.prototype.stop = function() {
+ MongoRunner.stopMongod(this.standalone);
+};
+
+Standalone.prototype.getConn = function() {
+ return this.standalone;
+};
+
+function Sharding() {
+ // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+ this.st = new ShardingTest({
+ shards: 1,
+ config: 1,
+ mongos: 1,
+ other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
+ });
+}
+
+Sharding.prototype.stop = function() {
+ this.st.stop();
+};
+
+Sharding.prototype.getConn = function() {
+ return this.st.s0;
+};
+
+[Standalone, Sharding].forEach(runFixture);
})();
diff --git a/jstests/noPassthrough/loglong.js b/jstests/noPassthrough/loglong.js
index 9e3aa577f3b..db60b5f1745 100644
--- a/jstests/noPassthrough/loglong.js
+++ b/jstests/noPassthrough/loglong.js
@@ -2,53 +2,55 @@
// make sure very long long lines get truncated
(function() {
- "use strict";
+"use strict";
- const options = {setParameter: "maxLogSizeKB=9"};
- const conn = MongoRunner.runMongod(options);
+const options = {
+ setParameter: "maxLogSizeKB=9"
+};
+const conn = MongoRunner.runMongod(options);
- var db = conn.getDB('db');
- var res = db.adminCommand({getParameter: 1, maxLogSizeKB: 1});
- assert.eq(9, res.maxLogSizeKB);
+var db = conn.getDB('db');
+var res = db.adminCommand({getParameter: 1, maxLogSizeKB: 1});
+assert.eq(9, res.maxLogSizeKB);
- var t = db.loglong;
- t.drop();
+var t = db.loglong;
+t.drop();
- t.insert({x: 1});
+t.insert({x: 1});
- var n = 0;
- var query = {x: []};
- while (Object.bsonsize(query) < 30000) {
- query.x.push(n++);
- }
+var n = 0;
+var query = {x: []};
+while (Object.bsonsize(query) < 30000) {
+ query.x.push(n++);
+}
- assertLogTruncated(db, t, 9);
+assertLogTruncated(db, t, 9);
- var res = db.adminCommand({setParameter: 1, maxLogSizeKB: 8});
- assert.eq(res.ok, 1);
+var res = db.adminCommand({setParameter: 1, maxLogSizeKB: 8});
+assert.eq(res.ok, 1);
- assertLogTruncated(db, t, 8);
+assertLogTruncated(db, t, 8);
- function assertLogTruncated(db, t, maxLogSize) {
- var before = db.adminCommand({setParameter: 1, logLevel: 1});
+function assertLogTruncated(db, t, maxLogSize) {
+ var before = db.adminCommand({setParameter: 1, logLevel: 1});
- t.findOne(query);
+ t.findOne(query);
- var x = db.adminCommand({setParameter: 1, logLevel: before.was});
- assert.eq(1, x.was, tojson(x));
+ var x = db.adminCommand({setParameter: 1, logLevel: before.was});
+ assert.eq(1, x.was, tojson(x));
- var log = db.adminCommand({getLog: "global"}).log;
+ var log = db.adminCommand({getLog: "global"}).log;
- var found = false;
- var toFind = "warning: log line attempted (16kB) over max size (" + maxLogSize + "kB)";
- for (var i = log.length - 1; i >= 0; i--) {
- if (log[i].indexOf(toFind) >= 0) {
- found = true;
- break;
- }
+ var found = false;
+ var toFind = "warning: log line attempted (16kB) over max size (" + maxLogSize + "kB)";
+ for (var i = log.length - 1; i >= 0; i--) {
+ if (log[i].indexOf(toFind) >= 0) {
+ found = true;
+ break;
}
-
- assert(found, tojson(log));
}
- MongoRunner.stopMongod(conn);
+
+ assert(found, tojson(log));
+}
+MongoRunner.stopMongod(conn);
})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/lookup_max_intermediate_size.js b/jstests/noPassthrough/lookup_max_intermediate_size.js
index 33f9976c058..378a4498afb 100644
--- a/jstests/noPassthrough/lookup_max_intermediate_size.js
+++ b/jstests/noPassthrough/lookup_max_intermediate_size.js
@@ -5,107 +5,106 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
(function() {
- "use strict";
-
- // Used by testPipeline to sort result documents. All _ids must be primitives.
- function compareId(a, b) {
- if (a._id < b._id) {
- return -1;
- }
- if (a._id > b._id) {
- return 1;
- }
- return 0;
- }
+"use strict";
- // Helper for testing that pipeline returns correct set of results.
- function testPipeline(pipeline, expectedResult, collection) {
- assert.eq(collection.aggregate(pipeline).toArray().sort(compareId),
- expectedResult.sort(compareId));
+// Used by testPipeline to sort result documents. All _ids must be primitives.
+function compareId(a, b) {
+ if (a._id < b._id) {
+ return -1;
+ }
+ if (a._id > b._id) {
+ return 1;
+ }
+ return 0;
+}
+
+// Helper for testing that pipeline returns correct set of results.
+function testPipeline(pipeline, expectedResult, collection) {
+ assert.eq(collection.aggregate(pipeline).toArray().sort(compareId),
+ expectedResult.sort(compareId));
+}
+
+function runTest(coll, from) {
+ const db = null; // Using the db variable is banned in this function.
+
+ //
+ // Confirm aggregation will not fail if intermediate $lookup stage exceeds 16 MB.
+ //
+ assert.commandWorked(coll.insert([
+ {"_id": 3, "same": 1},
+ ]));
+
+ const bigString = new Array(1025).toString();
+ const doc = {_id: new ObjectId(), x: bigString, same: 1};
+ const docSize = Object.bsonsize(doc);
+
+ // Number of documents in lookup to exceed maximum BSON document size.
+ // Using 20 MB instead to be safe.
+ let numDocs = Math.floor(20 * 1024 * 1024 / docSize);
+
+ let bulk = from.initializeUnorderedBulkOp();
+ for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({x: bigString, same: 1});
}
+ assert.commandWorked(bulk.execute());
- function runTest(coll, from) {
- const db = null; // Using the db variable is banned in this function.
+ let pipeline = [
+ {$lookup: {from: "from", localField: "same", foreignField: "same", as: "arr20mb"}},
+ {$project: {_id: 1}}
+ ];
- //
- // Confirm aggregation will not fail if intermediate $lookup stage exceeds 16 MB.
- //
- assert.commandWorked(coll.insert([
- {"_id": 3, "same": 1},
- ]));
+ let expectedResults = [{_id: 3}];
- const bigString = new Array(1025).toString();
- const doc = {_id: new ObjectId(), x: bigString, same: 1};
- const docSize = Object.bsonsize(doc);
+ testPipeline(pipeline, expectedResults, coll);
- // Number of documents in lookup to exceed maximum BSON document size.
- // Using 20 MB instead to be safe.
- let numDocs = Math.floor(20 * 1024 * 1024 / docSize);
+ //
+ // Confirm aggregation will fail if intermediate $lookup stage exceeds
+ // internalLookupStageIntermediateDocumentMaxSizeBytes, set to 30 MB.
+ //
- let bulk = from.initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; ++i) {
- bulk.insert({x: bigString, same: 1});
- }
- assert.commandWorked(bulk.execute());
+ // Number of documents to exceed maximum intermediate $lookup stage document size.
+ // Using 35 MB total to be safe (20 MB from previous test + 15 MB).
+ numDocs = Math.floor(15 * 1024 * 1024 / docSize);
- let pipeline = [
- {$lookup: {from: "from", localField: "same", foreignField: "same", as: "arr20mb"}},
- {$project: {_id: 1}}
- ];
+ bulk = from.initializeUnorderedBulkOp();
+ for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({x: bigString, same: 1});
+ }
+ assert.commandWorked(bulk.execute());
- let expectedResults = [{_id: 3}];
+ pipeline = [
+ {$lookup: {from: "from", localField: "same", foreignField: "same", as: "arr35mb"}},
+ {$project: {_id: 1}}
+ ];
- testPipeline(pipeline, expectedResults, coll);
+ assertErrorCode(coll, pipeline, 4568);
+}
- //
- // Confirm aggregation will fail if intermediate $lookup stage exceeds
- // internalLookupStageIntermediateDocumentMaxSizeBytes, set to 30 MB.
- //
+// Run tests on single node.
+const standalone = MongoRunner.runMongod();
+const db = standalone.getDB("test");
- // Number of documents to exceed maximum intermediate $lookup stage document size.
- // Using 35 MB total to be safe (20 MB from previous test + 15 MB).
- numDocs = Math.floor(15 * 1024 * 1024 / docSize);
+assert.commandWorked(db.adminCommand(
+ {setParameter: 1, internalLookupStageIntermediateDocumentMaxSizeBytes: 30 * 1024 * 1024}));
- bulk = from.initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; ++i) {
- bulk.insert({x: bigString, same: 1});
- }
- assert.commandWorked(bulk.execute());
+runTest(db.lookUp, db.from);
- pipeline = [
- {$lookup: {from: "from", localField: "same", foreignField: "same", as: "arr35mb"}},
- {$project: {_id: 1}}
- ];
+MongoRunner.stopMongod(standalone);
- assertErrorCode(coll, pipeline, 4568);
+// Run tests in a sharded environment.
+const sharded = new ShardingTest({
+ mongos: 1,
+ shards: 2,
+ rs: {
+ nodes: 1,
+ setParameter: {internalLookupStageIntermediateDocumentMaxSizeBytes: 30 * 1024 * 1024}
}
+});
- // Run tests on single node.
- const standalone = MongoRunner.runMongod();
- const db = standalone.getDB("test");
-
- assert.commandWorked(db.adminCommand(
- {setParameter: 1, internalLookupStageIntermediateDocumentMaxSizeBytes: 30 * 1024 * 1024}));
-
- runTest(db.lookUp, db.from);
-
- MongoRunner.stopMongod(standalone);
-
- // Run tests in a sharded environment.
- const sharded = new ShardingTest({
- mongos: 1,
- shards: 2,
- rs: {
- nodes: 1,
- setParameter:
- {internalLookupStageIntermediateDocumentMaxSizeBytes: 30 * 1024 * 1024}
- }
- });
-
- assert(sharded.adminCommand({enableSharding: "test"}));
+assert(sharded.adminCommand({enableSharding: "test"}));
- assert(sharded.adminCommand({shardCollection: "test.lookUp", key: {_id: 'hashed'}}));
- runTest(sharded.getDB('test').lookUp, sharded.getDB('test').from);
+assert(sharded.adminCommand({shardCollection: "test.lookUp", key: {_id: 'hashed'}}));
+runTest(sharded.getDB('test').lookUp, sharded.getDB('test').from);
- sharded.stop();
+sharded.stop();
}());
diff --git a/jstests/noPassthrough/low_js_heap_limit.js b/jstests/noPassthrough/low_js_heap_limit.js
index 7ef5d99d583..a50072b5bf4 100644
--- a/jstests/noPassthrough/low_js_heap_limit.js
+++ b/jstests/noPassthrough/low_js_heap_limit.js
@@ -1,18 +1,18 @@
// SERVER-26596 This tests that you can set a very low heap limit for javascript, and that it will
// fail to run any javascript, but won't crash the server.
(function() {
- 'use strict';
+'use strict';
- const conn = MongoRunner.runMongod();
- var db = conn.getDB('db');
+const conn = MongoRunner.runMongod();
+var db = conn.getDB('db');
- assert.commandWorked(db.adminCommand({setParameter: 1, jsHeapLimitMB: 1}));
+assert.commandWorked(db.adminCommand({setParameter: 1, jsHeapLimitMB: 1}));
- db.foo.insert({x: 1});
- const e = assert.throws(() => db.foo.findOne({$where: 'sleep(10000);'}));
- assert.eq(e.code, ErrorCodes.ExceededMemoryLimit);
+db.foo.insert({x: 1});
+const e = assert.throws(() => db.foo.findOne({$where: 'sleep(10000);'}));
+assert.eq(e.code, ErrorCodes.ExceededMemoryLimit);
- var returnCode = runProgram("mongo", "--jsHeapLimitMB=1", "--nodb", "--eval='exit();'");
- assert.eq(returnCode, 1);
- MongoRunner.stopMongod(conn);
+var returnCode = runProgram("mongo", "--jsHeapLimitMB=1", "--nodb", "--eval='exit();'");
+assert.eq(returnCode, 1);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/match_expression_optimization_failpoint.js b/jstests/noPassthrough/match_expression_optimization_failpoint.js
index 9b30b41a767..590102ba8e8 100644
--- a/jstests/noPassthrough/match_expression_optimization_failpoint.js
+++ b/jstests/noPassthrough/match_expression_optimization_failpoint.js
@@ -1,42 +1,42 @@
// Tests that match expression optimization works properly when the failpoint isn't triggered, and
// is disabled properly when it is triggered.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For aggPlan functions.
- Random.setRandomSeed();
+load("jstests/libs/analyze_plan.js"); // For aggPlan functions.
+Random.setRandomSeed();
- const conn = MongoRunner.runMongod({});
- assert.neq(conn, null, "Mongod failed to start up.");
- const testDb = conn.getDB("test");
- const coll = testDb.agg_opt;
+const conn = MongoRunner.runMongod({});
+assert.neq(conn, null, "Mongod failed to start up.");
+const testDb = conn.getDB("test");
+const coll = testDb.agg_opt;
- const kTestZip = 44100;
- for (let i = 0; i < 25; ++i) {
- assert.commandWorked(coll.insert(
- {_id: kTestZip + i, city: "Cleveland", pop: Random.randInt(100000), state: "OH"}));
- }
+const kTestZip = 44100;
+for (let i = 0; i < 25; ++i) {
+ assert.commandWorked(coll.insert(
+ {_id: kTestZip + i, city: "Cleveland", pop: Random.randInt(100000), state: "OH"}));
+}
- const pipeline = [{$match: {_id: {$in: [kTestZip]}}}, {$sort: {_id: 1}}];
+const pipeline = [{$match: {_id: {$in: [kTestZip]}}}, {$sort: {_id: 1}}];
- const enabledPlan = coll.explain().aggregate(pipeline);
- // Test that a single equality condition $in was optimized to an $eq.
- assert.eq(enabledPlan.queryPlanner.parsedQuery._id.$eq, kTestZip);
+const enabledPlan = coll.explain().aggregate(pipeline);
+// Test that a single equality condition $in was optimized to an $eq.
+assert.eq(enabledPlan.queryPlanner.parsedQuery._id.$eq, kTestZip);
- const enabledResult = coll.aggregate(pipeline).toArray();
+const enabledResult = coll.aggregate(pipeline).toArray();
- // Enable a failpoint that will cause match expression optimizations to be skipped.
- assert.commandWorked(testDb.adminCommand(
- {configureFailPoint: "disableMatchExpressionOptimization", mode: "alwaysOn"}));
+// Enable a failpoint that will cause match expression optimizations to be skipped.
+assert.commandWorked(testDb.adminCommand(
+ {configureFailPoint: "disableMatchExpressionOptimization", mode: "alwaysOn"}));
- const disabledPlan = coll.explain().aggregate(pipeline);
- // Test that the $in query still exists and hasn't been optimized to an $eq.
- assert.eq(disabledPlan.queryPlanner.parsedQuery._id.$in, [kTestZip]);
+const disabledPlan = coll.explain().aggregate(pipeline);
+// Test that the $in query still exists and hasn't been optimized to an $eq.
+assert.eq(disabledPlan.queryPlanner.parsedQuery._id.$in, [kTestZip]);
- const disabledResult = coll.aggregate(pipeline).toArray();
+const disabledResult = coll.aggregate(pipeline).toArray();
- // Test that the result is the same with and without optimizations enabled (result is sorted).
- assert.eq(enabledResult, disabledResult);
+// Test that the result is the same with and without optimizations enabled (result is sorted).
+assert.eq(enabledResult, disabledResult);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/maxTransactionLockRequestTimeoutMillis_serverParameter.js b/jstests/noPassthrough/maxTransactionLockRequestTimeoutMillis_serverParameter.js
index fe24cb47f4d..00428d7abe2 100644
--- a/jstests/noPassthrough/maxTransactionLockRequestTimeoutMillis_serverParameter.js
+++ b/jstests/noPassthrough/maxTransactionLockRequestTimeoutMillis_serverParameter.js
@@ -1,18 +1,18 @@
// Tests the maxTransactionLockRequestTimeoutMillis server parameter.
(function() {
- 'use strict';
+'use strict';
- load("jstests/noPassthrough/libs/server_parameter_helpers.js");
+load("jstests/noPassthrough/libs/server_parameter_helpers.js");
- // Valid parameter values are in the range (-infinity, infinity).
- testNumericServerParameter("maxTransactionLockRequestTimeoutMillis",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 5 /*defaultValue*/,
- 30 /*nonDefaultValidValue*/,
- false /*hasLowerBound*/,
- "unused" /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+// Valid parameter values are in the range (-infinity, infinity).
+testNumericServerParameter("maxTransactionLockRequestTimeoutMillis",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 5 /*defaultValue*/,
+ 30 /*nonDefaultValidValue*/,
+ false /*hasLowerBound*/,
+ "unused" /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
})();
diff --git a/jstests/noPassthrough/max_acceptable_logical_clock_drift_secs_parameter.js b/jstests/noPassthrough/max_acceptable_logical_clock_drift_secs_parameter.js
index 3f833d3220b..9b513afafe2 100644
--- a/jstests/noPassthrough/max_acceptable_logical_clock_drift_secs_parameter.js
+++ b/jstests/noPassthrough/max_acceptable_logical_clock_drift_secs_parameter.js
@@ -5,55 +5,52 @@
* @tags: [requires_sharding]
*/
(function() {
- "use strict";
-
- // maxAcceptableLogicalClockDriftSecs cannot be negative, zero, or a non-number.
- let conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: -1}});
- assert.eq(null, conn, "expected server to reject negative maxAcceptableLogicalClockDriftSecs");
-
- conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: 0}});
- assert.eq(null, conn, "expected server to reject zero maxAcceptableLogicalClockDriftSecs");
-
- conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: "value"}});
- assert.eq(
- null, conn, "expected server to reject non-numeric maxAcceptableLogicalClockDriftSecs");
-
- conn = MongoRunner.runMongod(
- {setParameter: {maxAcceptableLogicalClockDriftSecs: new Timestamp(50, 0)}});
- assert.eq(
- null, conn, "expected server to reject non-numeric maxAcceptableLogicalClockDriftSecs");
-
- // Any positive number is valid.
- conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: 1}});
- assert.neq(null, conn, "failed to start mongod with valid maxAcceptableLogicalClockDriftSecs");
- MongoRunner.stopMongod(conn);
-
- conn = MongoRunner.runMongod({
- setParameter: {maxAcceptableLogicalClockDriftSecs: 60 * 60 * 24 * 365 * 10}
- }); // 10 years.
- assert.neq(null, conn, "failed to start mongod with valid maxAcceptableLogicalClockDriftSecs");
- MongoRunner.stopMongod(conn);
-
- // Verify maxAcceptableLogicalClockDriftSecs works as expected in a sharded cluster.
- const maxDriftValue = 100;
- const st = new ShardingTest({
- shards: 1,
- shardOptions: {setParameter: {maxAcceptableLogicalClockDriftSecs: maxDriftValue}},
- mongosOptions: {setParameter: {maxAcceptableLogicalClockDriftSecs: maxDriftValue}}
- });
- let testDB = st.s.getDB("test");
-
- // Contact cluster to get initial cluster time.
- let res = assert.commandWorked(testDB.runCommand({isMaster: 1}));
- let lt = res.$clusterTime;
-
- // Try to advance cluster time by more than the max acceptable drift, which should fail the rate
- // limiter.
- let tooFarTime = Object.assign(
- {}, lt, {clusterTime: new Timestamp(lt.clusterTime.getTime() + (maxDriftValue * 2), 0)});
- assert.commandFailedWithCode(testDB.runCommand({isMaster: 1, $clusterTime: tooFarTime}),
- ErrorCodes.ClusterTimeFailsRateLimiter,
- "expected command to not pass the rate limiter");
-
- st.stop();
+"use strict";
+
+// maxAcceptableLogicalClockDriftSecs cannot be negative, zero, or a non-number.
+let conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: -1}});
+assert.eq(null, conn, "expected server to reject negative maxAcceptableLogicalClockDriftSecs");
+
+conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: 0}});
+assert.eq(null, conn, "expected server to reject zero maxAcceptableLogicalClockDriftSecs");
+
+conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: "value"}});
+assert.eq(null, conn, "expected server to reject non-numeric maxAcceptableLogicalClockDriftSecs");
+
+conn = MongoRunner.runMongod(
+ {setParameter: {maxAcceptableLogicalClockDriftSecs: new Timestamp(50, 0)}});
+assert.eq(null, conn, "expected server to reject non-numeric maxAcceptableLogicalClockDriftSecs");
+
+// Any positive number is valid.
+conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: 1}});
+assert.neq(null, conn, "failed to start mongod with valid maxAcceptableLogicalClockDriftSecs");
+MongoRunner.stopMongod(conn);
+
+conn = MongoRunner.runMongod(
+ {setParameter: {maxAcceptableLogicalClockDriftSecs: 60 * 60 * 24 * 365 * 10}}); // 10 years.
+assert.neq(null, conn, "failed to start mongod with valid maxAcceptableLogicalClockDriftSecs");
+MongoRunner.stopMongod(conn);
+
+// Verify maxAcceptableLogicalClockDriftSecs works as expected in a sharded cluster.
+const maxDriftValue = 100;
+const st = new ShardingTest({
+ shards: 1,
+ shardOptions: {setParameter: {maxAcceptableLogicalClockDriftSecs: maxDriftValue}},
+ mongosOptions: {setParameter: {maxAcceptableLogicalClockDriftSecs: maxDriftValue}}
+});
+let testDB = st.s.getDB("test");
+
+// Contact cluster to get initial cluster time.
+let res = assert.commandWorked(testDB.runCommand({isMaster: 1}));
+let lt = res.$clusterTime;
+
+// Try to advance cluster time by more than the max acceptable drift, which should fail the rate
+// limiter.
+let tooFarTime = Object.assign(
+ {}, lt, {clusterTime: new Timestamp(lt.clusterTime.getTime() + (maxDriftValue * 2), 0)});
+assert.commandFailedWithCode(testDB.runCommand({isMaster: 1, $clusterTime: tooFarTime}),
+ ErrorCodes.ClusterTimeFailsRateLimiter,
+ "expected command to not pass the rate limiter");
+
+st.stop();
})();
diff --git a/jstests/noPassthrough/max_bson_depth_parameter.js b/jstests/noPassthrough/max_bson_depth_parameter.js
index ec71f659e6d..bd39676bb98 100644
--- a/jstests/noPassthrough/max_bson_depth_parameter.js
+++ b/jstests/noPassthrough/max_bson_depth_parameter.js
@@ -3,32 +3,32 @@
* given an invalid depth.
*/
(function() {
- "use strict";
+"use strict";
- const kTestName = "max_bson_depth_parameter";
+const kTestName = "max_bson_depth_parameter";
- // Start mongod with a valid BSON depth, then test that it accepts and rejects command
- // appropriately based on the depth.
- let conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=5"});
- assert.neq(null, conn, "Failed to start mongod");
- let testDB = conn.getDB("test");
+// Start mongod with a valid BSON depth, then test that it accepts and rejects command
+// appropriately based on the depth.
+let conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=5"});
+assert.neq(null, conn, "Failed to start mongod");
+let testDB = conn.getDB("test");
- assert.commandWorked(testDB.runCommand({ping: 1}), "Failed to run a command on the server");
- assert.commandFailedWithCode(
- testDB.runCommand({find: "coll", filter: {x: {x: {x: {x: {x: {x: 1}}}}}}}),
- ErrorCodes.Overflow,
- "Expected server to reject command for exceeding the nesting depth limit");
+assert.commandWorked(testDB.runCommand({ping: 1}), "Failed to run a command on the server");
+assert.commandFailedWithCode(
+ testDB.runCommand({find: "coll", filter: {x: {x: {x: {x: {x: {x: 1}}}}}}}),
+ ErrorCodes.Overflow,
+ "Expected server to reject command for exceeding the nesting depth limit");
- // Confirm depth limits for $lookup.
- assert.writeOK(testDB.coll1.insert({_id: 1}));
- assert.writeOK(testDB.coll2.insert({_id: 1}));
+// Confirm depth limits for $lookup.
+assert.writeOK(testDB.coll1.insert({_id: 1}));
+assert.writeOK(testDB.coll2.insert({_id: 1}));
- assert.commandWorked(testDB.runCommand({
- aggregate: "coll1",
- pipeline: [{$lookup: {from: "coll2", as: "as", pipeline: []}}],
- cursor: {}
- }));
- assert.commandFailedWithCode(
+assert.commandWorked(testDB.runCommand({
+ aggregate: "coll1",
+ pipeline: [{$lookup: {from: "coll2", as: "as", pipeline: []}}],
+ cursor: {}
+}));
+assert.commandFailedWithCode(
testDB.runCommand({
aggregate: "coll1",
pipeline: [{
@@ -43,11 +43,11 @@
ErrorCodes.Overflow,
"Expected server to reject command for exceeding the nesting depth limit");
- // Restart mongod with a negative maximum BSON depth and test that it fails to start.
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=-4"});
- assert.eq(null, conn, "Expected mongod to fail at startup because depth was negative");
+// Restart mongod with a negative maximum BSON depth and test that it fails to start.
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=-4"});
+assert.eq(null, conn, "Expected mongod to fail at startup because depth was negative");
- conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=1"});
- assert.eq(null, conn, "Expected mongod to fail at startup because depth was too low");
+conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=1"});
+assert.eq(null, conn, "Expected mongod to fail at startup because depth was too low");
}());
diff --git a/jstests/noPassthrough/max_conns_override.js b/jstests/noPassthrough/max_conns_override.js
index 5fa28804856..07d012d663f 100644
--- a/jstests/noPassthrough/max_conns_override.js
+++ b/jstests/noPassthrough/max_conns_override.js
@@ -1,45 +1,45 @@
(function() {
- 'use strict';
- const configuredMaxConns = 5;
- const configuredReadyAdminThreads = 3;
- let conn = MongoRunner.runMongod({
- config: "jstests/noPassthrough/libs/max_conns_override_config.yaml",
- // We check a specific field in this executor's serverStatus section
- serviceExecutor: "synchronous",
- });
+'use strict';
+const configuredMaxConns = 5;
+const configuredReadyAdminThreads = 3;
+let conn = MongoRunner.runMongod({
+ config: "jstests/noPassthrough/libs/max_conns_override_config.yaml",
+ // We check a specific field in this executor's serverStatus section
+ serviceExecutor: "synchronous",
+});
- // Use up all the maxConns with junk connections, all of these should succeed
- let maxConns = [];
- for (let i = 0; i < 5; i++) {
- maxConns.push(new Mongo(`127.0.0.1:${conn.port}`));
- let tmpDb = maxConns[maxConns.length - 1].getDB("admin");
- assert.commandWorked(tmpDb.runCommand({isMaster: 1}));
- }
+// Use up all the maxConns with junk connections, all of these should succeed
+let maxConns = [];
+for (let i = 0; i < 5; i++) {
+ maxConns.push(new Mongo(`127.0.0.1:${conn.port}`));
+ let tmpDb = maxConns[maxConns.length - 1].getDB("admin");
+ assert.commandWorked(tmpDb.runCommand({isMaster: 1}));
+}
- // Get serverStatus to check that we have the right number of threads in the right places
- let status = conn.getDB("admin").runCommand({serverStatus: 1});
- const connectionsStatus = status["connections"];
- const reservedExecutorStatus = connectionsStatus["adminConnections"];
- const normalExecutorStatus = status["network"]["serviceExecutorTaskStats"];
+// Get serverStatus to check that we have the right number of threads in the right places
+let status = conn.getDB("admin").runCommand({serverStatus: 1});
+const connectionsStatus = status["connections"];
+const reservedExecutorStatus = connectionsStatus["adminConnections"];
+const normalExecutorStatus = status["network"]["serviceExecutorTaskStats"];
- // Log these serverStatus sections so we can debug this easily
- print("connections status section: ", tojson(connectionsStatus));
- print("normal executor status section: ", tojson(normalExecutorStatus));
+// Log these serverStatus sections so we can debug this easily
+print("connections status section: ", tojson(connectionsStatus));
+print("normal executor status section: ", tojson(normalExecutorStatus));
- // The number of "available" connections should be less than zero, because we've used
- // all of maxConns. We're over the limit!
- assert.lt(connectionsStatus["available"], 0);
- // The number of "current" connections should be greater than maxConns
- assert.gt(connectionsStatus["current"], configuredMaxConns);
- // The number of ready threads should be the number of readyThreads we configured, since
- // every thread spawns a new thread on startup
- assert.eq(reservedExecutorStatus["readyThreads"] + reservedExecutorStatus["startingThreads"],
- configuredReadyAdminThreads);
- // The number of running admin threads should be greater than the readyThreads, because
- // one is being used right now
- assert.gt(reservedExecutorStatus["threadsRunning"], reservedExecutorStatus["readyThreads"]);
- // The normal serviceExecutor should only be running maxConns number of threads
- assert.eq(normalExecutorStatus["threadsRunning"], configuredMaxConns);
+// The number of "available" connections should be less than zero, because we've used
+// all of maxConns. We're over the limit!
+assert.lt(connectionsStatus["available"], 0);
+// The number of "current" connections should be greater than maxConns
+assert.gt(connectionsStatus["current"], configuredMaxConns);
+// The number of ready threads should be the number of readyThreads we configured, since
+// every thread spawns a new thread on startup
+assert.eq(reservedExecutorStatus["readyThreads"] + reservedExecutorStatus["startingThreads"],
+ configuredReadyAdminThreads);
+// The number of running admin threads should be greater than the readyThreads, because
+// one is being used right now
+assert.gt(reservedExecutorStatus["threadsRunning"], reservedExecutorStatus["readyThreads"]);
+// The normal serviceExecutor should only be running maxConns number of threads
+assert.eq(normalExecutorStatus["threadsRunning"], configuredMaxConns);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/max_time_ms_repl_targeting.js b/jstests/noPassthrough/max_time_ms_repl_targeting.js
index 792885ab0f3..de90a7a0d24 100644
--- a/jstests/noPassthrough/max_time_ms_repl_targeting.js
+++ b/jstests/noPassthrough/max_time_ms_repl_targeting.js
@@ -1,69 +1,69 @@
// SERVER-35132 Test that we still honor maxTimeMs during replica set targeting.
// @tags: [requires_replication]
(function() {
- 'use strict';
- var st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 2}});
- var kDbName = 'test';
- var ns = 'test.foo';
- var mongos = st.s0;
- var testColl = mongos.getCollection(ns);
+'use strict';
+var st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 2}});
+var kDbName = 'test';
+var ns = 'test.foo';
+var mongos = st.s0;
+var testColl = mongos.getCollection(ns);
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- // Since this test is timing sensitive, retry on failures since they could be transient.
- // If broken, this would *always* fail so if it ever passes this build is fine (or time went
- // backwards).
- const tryFiveTimes = function(name, f) {
- jsTestLog(`Starting test ${name}`);
+// Since this test is timing sensitive, retry on failures since they could be transient.
+// If broken, this would *always* fail so if it ever passes this build is fine (or time went
+// backwards).
+const tryFiveTimes = function(name, f) {
+ jsTestLog(`Starting test ${name}`);
- for (var trial = 1; trial <= 5; trial++) {
- try {
- f();
- } catch (e) {
- if (trial < 5) {
- jsTestLog(`Ignoring error during trial ${trial} of test ${name}`);
- continue;
- }
-
- jsTestLog(`Failed 5 times in test ${name}. There is probably a bug here.`);
- throw e;
+ for (var trial = 1; trial <= 5; trial++) {
+ try {
+ f();
+ } catch (e) {
+ if (trial < 5) {
+ jsTestLog(`Ignoring error during trial ${trial} of test ${name}`);
+ continue;
}
+
+ jsTestLog(`Failed 5 times in test ${name}. There is probably a bug here.`);
+ throw e;
}
- };
+ }
+};
- const runTest = function() {
- // Sanity Check
- assert.eq(testColl.find({_id: 1}).next(), {_id: 1});
+const runTest = function() {
+ // Sanity Check
+ assert.eq(testColl.find({_id: 1}).next(), {_id: 1});
- // MaxTimeMS with satisfiable readPref
- assert.eq(testColl.find({_id: 1}).readPref("secondary").maxTimeMS(1000).next(), {_id: 1});
+ // MaxTimeMS with satisfiable readPref
+ assert.eq(testColl.find({_id: 1}).readPref("secondary").maxTimeMS(1000).next(), {_id: 1});
- let ex = null;
+ let ex = null;
- // MaxTimeMS with unsatisfiable readPref
- const time = Date.timeFunc(() => {
- ex = assert.throws(() => {
- testColl.find({_id: 1})
- .readPref("secondary", [{tag: "noSuchTag"}])
- .maxTimeMS(1000)
- .next();
- });
+ // MaxTimeMS with unsatisfiable readPref
+ const time = Date.timeFunc(() => {
+ ex = assert.throws(() => {
+ testColl.find({_id: 1})
+ .readPref("secondary", [{tag: "noSuchTag"}])
+ .maxTimeMS(1000)
+ .next();
});
+ });
- assert.gte(time, 1000); // Make sure we at least waited 1 second.
- assert.lt(time, 15 * 1000); // We used to wait 20 seconds before timing out.
+ assert.gte(time, 1000); // Make sure we at least waited 1 second.
+ assert.lt(time, 15 * 1000); // We used to wait 20 seconds before timing out.
- assert.eq(ex.code, ErrorCodes.MaxTimeMSExpired);
- };
+ assert.eq(ex.code, ErrorCodes.MaxTimeMSExpired);
+};
- testColl.insert({_id: 1}, {writeConcern: {w: 2}});
- tryFiveTimes("totally unsharded", runTest);
+testColl.insert({_id: 1}, {writeConcern: {w: 2}});
+tryFiveTimes("totally unsharded", runTest);
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- tryFiveTimes("sharded db", runTest);
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+tryFiveTimes("sharded db", runTest);
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 1}}));
- tryFiveTimes("sharded collection", runTest);
+assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 1}}));
+tryFiveTimes("sharded collection", runTest);
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/member_id_too_large.js b/jstests/noPassthrough/member_id_too_large.js
index 9e514d49a8e..c265df315ca 100644
--- a/jstests/noPassthrough/member_id_too_large.js
+++ b/jstests/noPassthrough/member_id_too_large.js
@@ -2,36 +2,38 @@
// members in the set, followed by waiting for writeConcern with "w" values equal to size of set.
// @tags: [requires_replication]
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
- jsTestLog("replSetInitiate with member _id greater than number of members");
+jsTestLog("replSetInitiate with member _id greater than number of members");
- let conf = rst.getReplSetConfig();
- conf.members[1]._id = 2;
+let conf = rst.getReplSetConfig();
+conf.members[1]._id = 2;
- rst.initiate(conf);
+rst.initiate(conf);
- const dbName = "test";
- const collName = "test";
- const primary = rst.getPrimary();
- const testColl = primary.getDB(dbName).getCollection(collName);
- const doc = {a: 1};
+const dbName = "test";
+const collName = "test";
+const primary = rst.getPrimary();
+const testColl = primary.getDB(dbName).getCollection(collName);
+const doc = {
+ a: 1
+};
- assert.commandWorked(testColl.insert(doc, {writeConcern: {w: 2}}));
+assert.commandWorked(testColl.insert(doc, {writeConcern: {w: 2}}));
- jsTestLog("replSetReconfig with member _id greater than number of members");
+jsTestLog("replSetReconfig with member _id greater than number of members");
- let secondary2 = MongoRunner.runMongod({replSet: rst.name});
- conf = rst.getReplSetConfigFromNode();
- conf.version++;
- conf.members.push({_id: 5, host: secondary2.host});
- assert.commandWorked(primary.getDB("admin").runCommand({replSetReconfig: conf}));
- assert.commandWorked(testColl.insert(doc, {writeConcern: {w: 2}}));
- assert.commandWorked(testColl.insert(doc, {writeConcern: {w: 3}}));
+let secondary2 = MongoRunner.runMongod({replSet: rst.name});
+conf = rst.getReplSetConfigFromNode();
+conf.version++;
+conf.members.push({_id: 5, host: secondary2.host});
+assert.commandWorked(primary.getDB("admin").runCommand({replSetReconfig: conf}));
+assert.commandWorked(testColl.insert(doc, {writeConcern: {w: 2}}));
+assert.commandWorked(testColl.insert(doc, {writeConcern: {w: 3}}));
- MongoRunner.stopMongod(secondary2);
- rst.stopSet();
+MongoRunner.stopMongod(secondary2);
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/merge_max_time_ms.js b/jstests/noPassthrough/merge_max_time_ms.js
index e7bebd8a2cb..fb55b13604b 100644
--- a/jstests/noPassthrough/merge_max_time_ms.js
+++ b/jstests/noPassthrough/merge_max_time_ms.js
@@ -3,262 +3,251 @@
* @tags: [requires_sharding, requires_replication]
*/
(function() {
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode().
- load("jstests/libs/fixture_helpers.js"); // For isMongos().
- load("jstests/libs/profiler.js"); // For profilerHasSingleMatchingEntryOrThrow.
-
- const kDBName = "test";
- const kSourceCollName = "merge_max_time_ms_source";
- const kDestCollName = "merge_max_time_ms_dest";
- const nDocs = 10;
-
- /**
- * Helper for populating the collection.
- */
- function insertDocs(coll) {
- for (let i = 0; i < nDocs; i++) {
- assert.commandWorked(coll.insert({_id: i}));
- }
- }
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode().
+load("jstests/libs/fixture_helpers.js"); // For isMongos().
+load("jstests/libs/profiler.js"); // For profilerHasSingleMatchingEntryOrThrow.
- /**
- * Wait until the server sets its CurOp "msg" to the failpoint name, indicating that it's
- * hanging.
- */
- function waitUntilServerHangsOnFailPoint(conn, fpName) {
- // Be sure that the server is hanging on the failpoint.
- assert.soon(function() {
- const filter = {"msg": fpName};
- const ops = conn.getDB("admin")
- .aggregate([{$currentOp: {allUsers: true}}, {$match: filter}])
- .toArray();
- return ops.length == 1;
- });
- }
+const kDBName = "test";
+const kSourceCollName = "merge_max_time_ms_source";
+const kDestCollName = "merge_max_time_ms_dest";
+const nDocs = 10;
- /**
- * Given a $merge parameters mongod connection, run a $out aggregation against 'conn' which
- * hangs on the given failpoint and ensure that the $out maxTimeMS expires.
- */
- function forceAggregationToHangAndCheckMaxTimeMsExpires(
- whenMatched, whenNotMatched, conn, failPointName) {
- // Use a short maxTimeMS so that the test completes in a reasonable amount of time. We will
- // use the 'maxTimeNeverTimeOut' failpoint to ensure that the operation does not
- // prematurely time out.
- const maxTimeMS = 1000 * 2;
-
- // Enable a failPoint so that the write will hang.
- let failpointCommand = {
- configureFailPoint: failPointName,
- mode: "alwaysOn",
- data: {nss: kDBName + "." + kDestCollName}
- };
-
- assert.commandWorked(conn.getDB("admin").runCommand(failpointCommand));
-
- // Make sure we don't run out of time before the failpoint is hit.
- assert.commandWorked(conn.getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}));
-
- // Build the parallel shell function.
- let shellStr = `const sourceColl = db['${kSourceCollName}'];`;
- shellStr += `const destColl = db['${kDestCollName}'];`;
- shellStr += `const maxTimeMS = ${maxTimeMS};`;
- shellStr += `const whenMatched = ${tojson(whenMatched)};`;
- shellStr += `const whenNotMatched = '${whenNotMatched}';`;
- const runAggregate = function() {
- const pipeline = [{
- $merge: {
- into: destColl.getName(),
- whenMatched: whenMatched,
- whenNotMatched: whenNotMatched
- }
- }];
- const err = assert.throws(() => sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS}));
- assert.eq(err.code, ErrorCodes.MaxTimeMSExpired, "expected aggregation to fail");
- };
- shellStr += `(${runAggregate.toString()})();`;
- const awaitShell = startParallelShell(shellStr, conn.port);
-
- waitUntilServerHangsOnFailPoint(conn, failPointName);
-
- assert.commandWorked(conn.getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}));
-
- // The aggregation running in the parallel shell will hang on the failpoint, burning
- // its time. Wait until the maxTimeMS has definitely expired.
- sleep(maxTimeMS + 2000);
-
- // Now drop the failpoint, allowing the aggregation to proceed. It should hit an
- // interrupt check and terminate immediately.
- assert.commandWorked(
- conn.getDB("admin").runCommand({configureFailPoint: failPointName, mode: "off"}));
-
- // Wait for the parallel shell to finish.
- assert.eq(awaitShell(), 0);
+/**
+ * Helper for populating the collection.
+ */
+function insertDocs(coll) {
+ for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert({_id: i}));
}
+}
- function runUnshardedTest(whenMatched, whenNotMatched, conn) {
- jsTestLog("Running unsharded test in whenMatched: " + whenMatched + " whenNotMatched: " +
- whenNotMatched);
- // The target collection will always be empty so we do not test the setting that will cause
- // only failure.
- if (whenNotMatched == "fail") {
- return;
- }
-
- const sourceColl = conn.getDB(kDBName)[kSourceCollName];
- const destColl = conn.getDB(kDBName)[kDestCollName];
- assert.commandWorked(destColl.remove({}));
-
- // Be sure we're able to read from a cursor with a maxTimeMS set on it.
- (function() {
- // Use a long maxTimeMS, since we expect the operation to finish.
- const maxTimeMS = 1000 * 600;
- const pipeline = [{
- $merge: {
- into: destColl.getName(),
- whenMatched: whenMatched,
- whenNotMatched: whenNotMatched
- }
- }];
- assert.doesNotThrow(() => sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS}));
- })();
-
- assert.commandWorked(destColl.remove({}));
-
- // Force the aggregation to hang while the batch is being written. The failpoint changes
- // depending on the mode. If 'whenMatched' is set to "fail" then the implementation will end
- // up issuing insert commands instead of updates.
- const kFailPointName =
- whenMatched == "fail" ? "hangDuringBatchInsert" : "hangDuringBatchUpdate";
- forceAggregationToHangAndCheckMaxTimeMsExpires(
- whenMatched, whenNotMatched, conn, kFailPointName);
-
- assert.commandWorked(destColl.remove({}));
-
- // Force the aggregation to hang while the batch is being built.
- forceAggregationToHangAndCheckMaxTimeMsExpires(
- whenMatched, whenNotMatched, conn, "hangWhileBuildingDocumentSourceMergeBatch");
+/**
+ * Wait until the server sets its CurOp "msg" to the failpoint name, indicating that it's
+ * hanging.
+ */
+function waitUntilServerHangsOnFailPoint(conn, fpName) {
+ // Be sure that the server is hanging on the failpoint.
+ assert.soon(function() {
+ const filter = {"msg": fpName};
+ const ops = conn.getDB("admin")
+ .aggregate([{$currentOp: {allUsers: true}}, {$match: filter}])
+ .toArray();
+ return ops.length == 1;
+ });
+}
+
+/**
+ * Given a $merge parameters mongod connection, run a $out aggregation against 'conn' which
+ * hangs on the given failpoint and ensure that the $out maxTimeMS expires.
+ */
+function forceAggregationToHangAndCheckMaxTimeMsExpires(
+ whenMatched, whenNotMatched, conn, failPointName) {
+ // Use a short maxTimeMS so that the test completes in a reasonable amount of time. We will
+ // use the 'maxTimeNeverTimeOut' failpoint to ensure that the operation does not
+ // prematurely time out.
+ const maxTimeMS = 1000 * 2;
+
+ // Enable a failPoint so that the write will hang.
+ let failpointCommand = {
+ configureFailPoint: failPointName,
+ mode: "alwaysOn",
+ data: {nss: kDBName + "." + kDestCollName}
+ };
+
+ assert.commandWorked(conn.getDB("admin").runCommand(failpointCommand));
+
+ // Make sure we don't run out of time before the failpoint is hit.
+ assert.commandWorked(conn.getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}));
+
+ // Build the parallel shell function.
+ let shellStr = `const sourceColl = db['${kSourceCollName}'];`;
+ shellStr += `const destColl = db['${kDestCollName}'];`;
+ shellStr += `const maxTimeMS = ${maxTimeMS};`;
+ shellStr += `const whenMatched = ${tojson(whenMatched)};`;
+ shellStr += `const whenNotMatched = '${whenNotMatched}';`;
+ const runAggregate = function() {
+ const pipeline = [{
+ $merge:
+ {into: destColl.getName(), whenMatched: whenMatched, whenNotMatched: whenNotMatched}
+ }];
+ const err = assert.throws(() => sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS}));
+ assert.eq(err.code, ErrorCodes.MaxTimeMSExpired, "expected aggregation to fail");
+ };
+ shellStr += `(${runAggregate.toString()})();`;
+ const awaitShell = startParallelShell(shellStr, conn.port);
+
+ waitUntilServerHangsOnFailPoint(conn, failPointName);
+
+ assert.commandWorked(
+ conn.getDB("admin").runCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}));
+
+ // The aggregation running in the parallel shell will hang on the failpoint, burning
+ // its time. Wait until the maxTimeMS has definitely expired.
+ sleep(maxTimeMS + 2000);
+
+ // Now drop the failpoint, allowing the aggregation to proceed. It should hit an
+ // interrupt check and terminate immediately.
+ assert.commandWorked(
+ conn.getDB("admin").runCommand({configureFailPoint: failPointName, mode: "off"}));
+
+ // Wait for the parallel shell to finish.
+ assert.eq(awaitShell(), 0);
+}
+
+function runUnshardedTest(whenMatched, whenNotMatched, conn) {
+ jsTestLog("Running unsharded test in whenMatched: " + whenMatched +
+ " whenNotMatched: " + whenNotMatched);
+ // The target collection will always be empty so we do not test the setting that will cause
+ // only failure.
+ if (whenNotMatched == "fail") {
+ return;
}
- // Run on a standalone.
+ const sourceColl = conn.getDB(kDBName)[kSourceCollName];
+ const destColl = conn.getDB(kDBName)[kDestCollName];
+ assert.commandWorked(destColl.remove({}));
+
+ // Be sure we're able to read from a cursor with a maxTimeMS set on it.
(function() {
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, 'mongod was unable to start up');
- insertDocs(conn.getDB(kDBName)[kSourceCollName]);
- withEachMergeMode(
- (mode) => runUnshardedTest(mode.whenMatchedMode, mode.whenNotMatchedMode, conn));
- MongoRunner.stopMongod(conn);
+ // Use a long maxTimeMS, since we expect the operation to finish.
+ const maxTimeMS = 1000 * 600;
+ const pipeline = [{
+ $merge:
+ {into: destColl.getName(), whenMatched: whenMatched, whenNotMatched: whenNotMatched}
+ }];
+ assert.doesNotThrow(() => sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS}));
})();
- // Runs a $merge against 'mongosConn' and verifies that the maxTimeMS value is included in the
- // command sent to mongod. Since the actual timeout can unreliably happen in mongos before even
- // reaching the shard, we instead set a very large timeout and verify that the command sent to
- // mongod includes the maxTimeMS.
- function runShardedTest(whenMatched, whenNotMatched, mongosConn, mongodConn, comment) {
- jsTestLog("Running sharded test in whenMatched: " + whenMatched + " whenNotMatched: " +
- whenNotMatched);
- // The target collection will always be empty so we do not test the setting that will cause
- // only failure.
- if (whenNotMatched == "fail") {
- return;
- }
-
- // Set a large timeout since we expect the command to finish.
- const maxTimeMS = 1000 * 20;
-
- const sourceColl = mongosConn.getDB(kDBName)[kSourceCollName];
- const destColl = mongosConn.getDB(kDBName)[kDestCollName];
- assert.commandWorked(destColl.remove({}));
-
- // Make sure we don't timeout in mongos before even reaching the shards.
- assert.commandWorked(mongosConn.getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}));
-
- const cursor = sourceColl.aggregate([{
- $merge: {
- into: destColl.getName(),
- whenMatched: whenMatched,
- whenNotMatched: whenNotMatched
- }
- }],
- {maxTimeMS: maxTimeMS, comment: comment});
- assert(!cursor.hasNext());
-
- // Filter the profiler entries on the existence of $merge, since aggregations through mongos
- // will include an extra aggregation with an empty pipeline to establish cursors on the
- // shards.
- assert.soon(function() {
- return mongodConn.getDB(kDBName)
- .system.profile
- .find({
- "command.aggregate": kSourceCollName,
- "command.pipeline.$merge": {"$exists": true},
- "command.comment": comment,
- "command.maxTimeMS": maxTimeMS,
- })
- .itcount() == 1;
- });
-
- assert.commandWorked(mongosConn.getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}));
+ assert.commandWorked(destColl.remove({}));
+
+ // Force the aggregation to hang while the batch is being written. The failpoint changes
+ // depending on the mode. If 'whenMatched' is set to "fail" then the implementation will end
+ // up issuing insert commands instead of updates.
+ const kFailPointName =
+ whenMatched == "fail" ? "hangDuringBatchInsert" : "hangDuringBatchUpdate";
+ forceAggregationToHangAndCheckMaxTimeMsExpires(
+ whenMatched, whenNotMatched, conn, kFailPointName);
+
+ assert.commandWorked(destColl.remove({}));
+
+ // Force the aggregation to hang while the batch is being built.
+ forceAggregationToHangAndCheckMaxTimeMsExpires(
+ whenMatched, whenNotMatched, conn, "hangWhileBuildingDocumentSourceMergeBatch");
+}
+
+// Run on a standalone.
+(function() {
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, 'mongod was unable to start up');
+insertDocs(conn.getDB(kDBName)[kSourceCollName]);
+withEachMergeMode((mode) => runUnshardedTest(mode.whenMatchedMode, mode.whenNotMatchedMode, conn));
+MongoRunner.stopMongod(conn);
+})();
+
+// Runs a $merge against 'mongosConn' and verifies that the maxTimeMS value is included in the
+// command sent to mongod. Since the actual timeout can unreliably happen in mongos before even
+// reaching the shard, we instead set a very large timeout and verify that the command sent to
+// mongod includes the maxTimeMS.
+function runShardedTest(whenMatched, whenNotMatched, mongosConn, mongodConn, comment) {
+ jsTestLog("Running sharded test in whenMatched: " + whenMatched +
+ " whenNotMatched: " + whenNotMatched);
+ // The target collection will always be empty so we do not test the setting that will cause
+ // only failure.
+ if (whenNotMatched == "fail") {
+ return;
}
- // Run on a sharded cluster.
- (function() {
- const st = new ShardingTest({shards: 2});
-
- // Ensure shard 0 is the primary shard. This is so that the $merge stage is guaranteed to
- // run on it.
- assert.commandWorked(st.s.getDB("admin").runCommand({enableSharding: kDBName}));
- st.ensurePrimaryShard(kDBName, st.shard0.name);
-
- // Set up the source collection to be sharded in a way such that each node will have some
- // documents for the remainder of the test.
- // shard 0: [MinKey, 5]
- // shard 1: [5, MaxKey]
- st.shardColl(kSourceCollName,
- {_id: 1}, // key
- {_id: 5}, // split
- {_id: 6}, // move
- kDBName);
- insertDocs(st.s.getDB(kDBName)[kSourceCollName]);
-
- // Start the profiler on each shard so that we can examine the $out's maxTimeMS.
- assert.commandWorked(st.shard0.getDB(kDBName).setProfilingLevel(2));
- assert.commandWorked(st.shard1.getDB(kDBName).setProfilingLevel(2));
-
- // // Run the test with 'destColl' unsharded.
- withEachMergeMode((mode) => runShardedTest(mode.whenMatchedMode,
- mode.whenNotMatchedMode,
- st.s,
- st.shard0,
- mode + "_unshardedDest"));
-
- // Run the test with 'destColl' sharded. This means that writes will be sent to both
- // shards, and if either one hangs, the MaxTimeMS will expire.
- // Shard the destination collection.
- st.shardColl(kDestCollName,
- {_id: 1}, // key
- {_id: 5}, // split
- {_id: 6}, // move
- kDBName);
-
- jsTestLog("Running test forcing shard " + st.shard0.name + " to hang");
- withEachMergeMode((mode) => runShardedTest(mode.whenMatchedMode,
- mode.whenNotMatchedMode,
- st.s,
- st.shard0,
- mode + "_shardedDest_" + st.shard0.name));
-
- jsTestLog("Running test forcing shard " + st.shard1.name + " to hang");
- withEachMergeMode((mode) => runShardedTest(mode.whenMatchedMode,
- mode.whenNotMatchedMode,
- st.s,
- st.shard1,
- mode + "_shardedDest_" + st.shard1.name));
-
- st.stop();
- })();
+ // Set a large timeout since we expect the command to finish.
+ const maxTimeMS = 1000 * 20;
+
+ const sourceColl = mongosConn.getDB(kDBName)[kSourceCollName];
+ const destColl = mongosConn.getDB(kDBName)[kDestCollName];
+ assert.commandWorked(destColl.remove({}));
+
+ // Make sure we don't timeout in mongos before even reaching the shards.
+ assert.commandWorked(mongosConn.getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}));
+
+ const cursor = sourceColl.aggregate(
+ [{
+ $merge:
+ {into: destColl.getName(), whenMatched: whenMatched, whenNotMatched: whenNotMatched}
+ }],
+ {maxTimeMS: maxTimeMS, comment: comment});
+ assert(!cursor.hasNext());
+
+ // Filter the profiler entries on the existence of $merge, since aggregations through mongos
+ // will include an extra aggregation with an empty pipeline to establish cursors on the
+ // shards.
+ assert.soon(function() {
+ return mongodConn.getDB(kDBName)
+ .system.profile
+ .find({
+ "command.aggregate": kSourceCollName,
+ "command.pipeline.$merge": {"$exists": true},
+ "command.comment": comment,
+ "command.maxTimeMS": maxTimeMS,
+ })
+ .itcount() == 1;
+ });
+
+ assert.commandWorked(mongosConn.getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}));
+}
+
+// Run on a sharded cluster.
+(function() {
+const st = new ShardingTest({shards: 2});
+
+// Ensure shard 0 is the primary shard. This is so that the $merge stage is guaranteed to
+// run on it.
+assert.commandWorked(st.s.getDB("admin").runCommand({enableSharding: kDBName}));
+st.ensurePrimaryShard(kDBName, st.shard0.name);
+
+// Set up the source collection to be sharded in a way such that each node will have some
+// documents for the remainder of the test.
+// shard 0: [MinKey, 5]
+// shard 1: [5, MaxKey]
+st.shardColl(kSourceCollName,
+ {_id: 1}, // key
+ {_id: 5}, // split
+ {_id: 6}, // move
+ kDBName);
+insertDocs(st.s.getDB(kDBName)[kSourceCollName]);
+
+// Start the profiler on each shard so that we can examine the $out's maxTimeMS.
+assert.commandWorked(st.shard0.getDB(kDBName).setProfilingLevel(2));
+assert.commandWorked(st.shard1.getDB(kDBName).setProfilingLevel(2));
+
+// // Run the test with 'destColl' unsharded.
+withEachMergeMode(
+ (mode) => runShardedTest(
+ mode.whenMatchedMode, mode.whenNotMatchedMode, st.s, st.shard0, mode + "_unshardedDest"));
+
+// Run the test with 'destColl' sharded. This means that writes will be sent to both
+// shards, and if either one hangs, the MaxTimeMS will expire.
+// Shard the destination collection.
+st.shardColl(kDestCollName,
+ {_id: 1}, // key
+ {_id: 5}, // split
+ {_id: 6}, // move
+ kDBName);
+
+jsTestLog("Running test forcing shard " + st.shard0.name + " to hang");
+withEachMergeMode((mode) => runShardedTest(mode.whenMatchedMode,
+ mode.whenNotMatchedMode,
+ st.s,
+ st.shard0,
+ mode + "_shardedDest_" + st.shard0.name));
+
+jsTestLog("Running test forcing shard " + st.shard1.name + " to hang");
+withEachMergeMode((mode) => runShardedTest(mode.whenMatchedMode,
+ mode.whenNotMatchedMode,
+ st.s,
+ st.shard1,
+ mode + "_shardedDest_" + st.shard1.name));
+
+st.stop();
+})();
})();
diff --git a/jstests/noPassthrough/minvalid2.js b/jstests/noPassthrough/minvalid2.js
index b9ce32fe153..b5f29a8a97c 100644
--- a/jstests/noPassthrough/minvalid2.js
+++ b/jstests/noPassthrough/minvalid2.js
@@ -65,8 +65,8 @@ printjson(lastOp);
// crash.
local.replset.minvalid.update({},
{
- ts: new Timestamp(lastOp.ts.t, lastOp.ts.i + 1),
- t: NumberLong(-1),
+ ts: new Timestamp(lastOp.ts.t, lastOp.ts.i + 1),
+ t: NumberLong(-1),
},
{upsert: true});
printjson(local.replset.minvalid.findOne());
diff --git a/jstests/noPassthrough/mongoebench_test.js b/jstests/noPassthrough/mongoebench_test.js
index 1ae0e6ba29f..cb531963e2b 100644
--- a/jstests/noPassthrough/mongoebench_test.js
+++ b/jstests/noPassthrough/mongoebench_test.js
@@ -2,50 +2,50 @@
* Tests for the mongoebench executable.
*/
(function() {
- "use strict";
-
- load("jstests/libs/mongoebench.js"); // for runMongoeBench
-
- if (jsTest.options().storageEngine !== "mobile") {
- print("Skipping test because storage engine isn't mobile");
- return;
- }
-
- const dbpath = MongoRunner.dataPath + "mongoebench_test";
- resetDbpath(dbpath);
-
- // Test that the operations in the "pre" section of the configuration are run exactly once.
- runMongoeBench( // Force clang-format to break this line.
- {
- pre: [{
- op: "insert",
- ns: "test.mongoebench_test",
- doc: {pre: {"#SEQ_INT": {seq_id: 0, start: 0, step: 1, unique: true}}}
- }],
- ops: [{
- op: "update",
- ns: "test.mongoebench_test",
- update: {$inc: {ops: 1}},
- multi: true,
- }]
- },
- {dbpath});
-
- const output = cat(dbpath + "/perf.json");
- const stats = assert.doesNotThrow(
- JSON.parse, [output], "failed to parse output file as strict JSON: " + output);
- assert.eq({$numberLong: "0"},
- stats.errCount,
- () => "stats file reports errors but exit code was zero: " + tojson(stats));
- assert(stats.hasOwnProperty("totalOps/s"),
- () => "stats file doesn't report ops per second: " + tojson(stats));
-
- const conn = MongoRunner.runMongod({dbpath, noCleanData: true});
- assert.neq(null, conn, "failed to start mongod after running mongoebench");
-
- const db = conn.getDB("test");
- const count = db.mongoebench_test.find().itcount();
- assert.eq(1, count, "ops in 'pre' section ran more than once or didn't run at all");
-
- MongoRunner.stopMongod(conn);
+"use strict";
+
+load("jstests/libs/mongoebench.js"); // for runMongoeBench
+
+if (jsTest.options().storageEngine !== "mobile") {
+ print("Skipping test because storage engine isn't mobile");
+ return;
+}
+
+const dbpath = MongoRunner.dataPath + "mongoebench_test";
+resetDbpath(dbpath);
+
+// Test that the operations in the "pre" section of the configuration are run exactly once.
+runMongoeBench( // Force clang-format to break this line.
+ {
+ pre: [{
+ op: "insert",
+ ns: "test.mongoebench_test",
+ doc: {pre: {"#SEQ_INT": {seq_id: 0, start: 0, step: 1, unique: true}}}
+ }],
+ ops: [{
+ op: "update",
+ ns: "test.mongoebench_test",
+ update: {$inc: {ops: 1}},
+ multi: true,
+ }]
+ },
+ {dbpath});
+
+const output = cat(dbpath + "/perf.json");
+const stats = assert.doesNotThrow(
+ JSON.parse, [output], "failed to parse output file as strict JSON: " + output);
+assert.eq({$numberLong: "0"},
+ stats.errCount,
+ () => "stats file reports errors but exit code was zero: " + tojson(stats));
+assert(stats.hasOwnProperty("totalOps/s"),
+ () => "stats file doesn't report ops per second: " + tojson(stats));
+
+const conn = MongoRunner.runMongod({dbpath, noCleanData: true});
+assert.neq(null, conn, "failed to start mongod after running mongoebench");
+
+const db = conn.getDB("test");
+const count = db.mongoebench_test.find().itcount();
+assert.eq(1, count, "ops in 'pre' section ran more than once or didn't run at all");
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/mongos_exhausts_stale_config_retries.js b/jstests/noPassthrough/mongos_exhausts_stale_config_retries.js
index e2418738995..843d6e2631a 100644
--- a/jstests/noPassthrough/mongos_exhausts_stale_config_retries.js
+++ b/jstests/noPassthrough/mongos_exhausts_stale_config_retries.js
@@ -3,61 +3,60 @@
//
// @tags: [requires_sharding]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
-
- const st = new ShardingTest({shards: 2, config: 1});
- const testDB = st.s.getDB(dbName);
-
- // Only testing the command read and write modes.
- assert(testDB.getMongo().readMode() === "commands");
- assert(testDB.getMongo().writeMode() === "commands");
-
- // Shard a collection with the only chunk on shard0.
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
-
- const sourcePrimary = st.rs0.getPrimary();
- const recipientPrimary = st.rs1.getPrimary();
-
- // Disable the best-effort recipient metadata refresh after migrations and move the chunk
- // between shards so the recipient shard, shard1, is stale.
- assert.commandWorked(sourcePrimary.adminCommand(
- {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
-
- // Disable metadata refreshes on the recipient shard so it will indefinitely return StaleConfig.
- assert.commandWorked(recipientPrimary.adminCommand(
- {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "alwaysOn"}));
-
- // Test various read and write commands that are sent with shard versions and thus can return
- // StaleConfig. Batch writes, i.e. insert/update/delete, return batch responses with ok:1 and
- // NoProgressMade write errors when retries are exhausted, so they are excluded.
- const kCommands = [
- {aggregate: collName, pipeline: [], cursor: {}},
- {count: collName},
- {distinct: collName, query: {}, key: "_id"},
- {find: collName},
- {findAndModify: collName, query: {_id: 0}, update: {$set: {x: 1}}},
- ];
-
- kCommands.forEach((cmd) => {
- // The recipient shard should return StaleConfig until mongos exhausts its retries and
- // returns the final StaleConfig to the client.
- assert.commandFailedWithCode(testDB.runCommand(cmd),
- ErrorCodes.StaleConfig,
- "expected to fail with StaleConfig, cmd: " + tojson(cmd));
- });
-
- assert.commandWorked(sourcePrimary.adminCommand(
- {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "off"}));
- assert.commandWorked(recipientPrimary.adminCommand(
- {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "off"}));
-
- st.stop();
+"use strict";
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
+
+const st = new ShardingTest({shards: 2, config: 1});
+const testDB = st.s.getDB(dbName);
+
+// Only testing the command read and write modes.
+assert(testDB.getMongo().readMode() === "commands");
+assert(testDB.getMongo().writeMode() === "commands");
+
+// Shard a collection with the only chunk on shard0.
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+
+const sourcePrimary = st.rs0.getPrimary();
+const recipientPrimary = st.rs1.getPrimary();
+
+// Disable the best-effort recipient metadata refresh after migrations and move the chunk
+// between shards so the recipient shard, shard1, is stale.
+assert.commandWorked(sourcePrimary.adminCommand(
+ {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
+
+// Disable metadata refreshes on the recipient shard so it will indefinitely return StaleConfig.
+assert.commandWorked(recipientPrimary.adminCommand(
+ {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "alwaysOn"}));
+
+// Test various read and write commands that are sent with shard versions and thus can return
+// StaleConfig. Batch writes, i.e. insert/update/delete, return batch responses with ok:1 and
+// NoProgressMade write errors when retries are exhausted, so they are excluded.
+const kCommands = [
+ {aggregate: collName, pipeline: [], cursor: {}},
+ {count: collName},
+ {distinct: collName, query: {}, key: "_id"},
+ {find: collName},
+ {findAndModify: collName, query: {_id: 0}, update: {$set: {x: 1}}},
+];
+
+kCommands.forEach((cmd) => {
+ // The recipient shard should return StaleConfig until mongos exhausts its retries and
+ // returns the final StaleConfig to the client.
+ assert.commandFailedWithCode(testDB.runCommand(cmd),
+ ErrorCodes.StaleConfig,
+ "expected to fail with StaleConfig, cmd: " + tojson(cmd));
+});
+
+assert.commandWorked(sourcePrimary.adminCommand(
+ {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "off"}));
+assert.commandWorked(recipientPrimary.adminCommand(
+ {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "off"}));
+
+st.stop();
})();
diff --git a/jstests/noPassthrough/nested_tojson.js b/jstests/noPassthrough/nested_tojson.js
index c5137cd3cef..886e9cf3784 100644
--- a/jstests/noPassthrough/nested_tojson.js
+++ b/jstests/noPassthrough/nested_tojson.js
@@ -1,31 +1,30 @@
(function() {
- "use strict";
+"use strict";
- const tooMuchRecursion = (1 << 16);
+const tooMuchRecursion = (1 << 16);
- const nestobj = (depth) => {
- let doc = {};
- let cur = doc;
- for (let i = 0; i < depth; i++) {
- cur[i] = {};
- cur = cur[i];
- }
- cur['a'] = 'foo';
- return doc;
- };
+const nestobj = (depth) => {
+ let doc = {};
+ let cur = doc;
+ for (let i = 0; i < depth; i++) {
+ cur[i] = {};
+ cur = cur[i];
+ }
+ cur['a'] = 'foo';
+ return doc;
+};
- const nestarr = (depth) => {
- let doc = [0];
- let cur = doc;
- for (let i = 0; i < depth; i++) {
- cur[0] = [0];
- cur = cur[0];
- }
- cur[0] = 'foo';
- return doc;
- };
+const nestarr = (depth) => {
+ let doc = [0];
+ let cur = doc;
+ for (let i = 0; i < depth; i++) {
+ cur[0] = [0];
+ cur = cur[0];
+ }
+ cur[0] = 'foo';
+ return doc;
+};
- assert.doesNotThrow(
- tojson, [nestobj(tooMuchRecursion)], 'failed to print deeply nested object');
- assert.doesNotThrow(tojson, [nestarr(tooMuchRecursion)], 'failed to print deeply nested array');
+assert.doesNotThrow(tojson, [nestobj(tooMuchRecursion)], 'failed to print deeply nested object');
+assert.doesNotThrow(tojson, [nestarr(tooMuchRecursion)], 'failed to print deeply nested array');
})();
diff --git a/jstests/noPassthrough/non_atomic_apply_ops_logging.js b/jstests/noPassthrough/non_atomic_apply_ops_logging.js
index 77654a81bb9..64ff7159d3a 100644
--- a/jstests/noPassthrough/non_atomic_apply_ops_logging.js
+++ b/jstests/noPassthrough/non_atomic_apply_ops_logging.js
@@ -2,79 +2,79 @@
// and atomic ops are collectively logged in applyOps.
// @tags: [requires_replication]
(function() {
- "use strict";
+"use strict";
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- let primary = rst.getPrimary();
- let testDB = primary.getDB("test");
- let oplogColl = primary.getDB("local").oplog.rs;
- let testCollName = "testColl";
- let rerenamedCollName = "rerenamedColl";
+let primary = rst.getPrimary();
+let testDB = primary.getDB("test");
+let oplogColl = primary.getDB("local").oplog.rs;
+let testCollName = "testColl";
+let rerenamedCollName = "rerenamedColl";
- testDB.runCommand({drop: testCollName});
- testDB.runCommand({drop: rerenamedCollName});
- assert.commandWorked(testDB.runCommand({create: testCollName}));
- let testColl = testDB[testCollName];
+testDB.runCommand({drop: testCollName});
+testDB.runCommand({drop: rerenamedCollName});
+assert.commandWorked(testDB.runCommand({create: testCollName}));
+let testColl = testDB[testCollName];
- // Ensure atomic apply ops logging only produces one oplog entry
- // per call to apply ops and does not log individual operations
- // separately.
- assert.commandWorked(testDB.runCommand({
- applyOps: [
- {op: "i", ns: testColl.getFullName(), o: {_id: 1, a: "foo"}},
- {op: "i", ns: testColl.getFullName(), o: {_id: 2, a: "bar"}}
- ]
- }));
- assert.eq(oplogColl.find({"o.applyOps": {"$exists": true}}).count(), 1);
- assert.eq(oplogColl.find({op: "i", ns: testColl.getFullName()}).count(), 0);
- // Ensure non-atomic apply ops logging produces an oplog entry for
- // each operation in the apply ops call and no record of applyOps
- // appears for these operations.
- assert.commandWorked(testDB.runCommand({
- applyOps: [
- {
- op: "c",
- ns: "test.$cmd",
- o: {
- renameCollection: "test.testColl",
- to: "test.renamedColl",
- stayTemp: false,
- dropTarget: false
- }
- },
- {
- op: "c",
- ns: "test.$cmd",
- o: {
- renameCollection: "test.renamedColl",
- to: "test." + rerenamedCollName,
- stayTemp: false,
- dropTarget: false
- }
+// Ensure atomic apply ops logging only produces one oplog entry
+// per call to apply ops and does not log individual operations
+// separately.
+assert.commandWorked(testDB.runCommand({
+ applyOps: [
+ {op: "i", ns: testColl.getFullName(), o: {_id: 1, a: "foo"}},
+ {op: "i", ns: testColl.getFullName(), o: {_id: 2, a: "bar"}}
+ ]
+}));
+assert.eq(oplogColl.find({"o.applyOps": {"$exists": true}}).count(), 1);
+assert.eq(oplogColl.find({op: "i", ns: testColl.getFullName()}).count(), 0);
+// Ensure non-atomic apply ops logging produces an oplog entry for
+// each operation in the apply ops call and no record of applyOps
+// appears for these operations.
+assert.commandWorked(testDB.runCommand({
+ applyOps: [
+ {
+ op: "c",
+ ns: "test.$cmd",
+ o: {
+ renameCollection: "test.testColl",
+ to: "test.renamedColl",
+ stayTemp: false,
+ dropTarget: false
}
- ]
- }));
- assert.eq(oplogColl.find({"o.renameCollection": {"$exists": true}}).count(), 2);
- assert.eq(oplogColl.find({"o.applyOps": {"$exists": true}}).count(), 1);
+ },
+ {
+ op: "c",
+ ns: "test.$cmd",
+ o: {
+ renameCollection: "test.renamedColl",
+ to: "test." + rerenamedCollName,
+ stayTemp: false,
+ dropTarget: false
+ }
+ }
+ ]
+}));
+assert.eq(oplogColl.find({"o.renameCollection": {"$exists": true}}).count(), 2);
+assert.eq(oplogColl.find({"o.applyOps": {"$exists": true}}).count(), 1);
- // Ensure that applyOps respects the 'allowAtomic' boolean flag on CRUD operations that it would
- // have applied atomically.
- assert.commandWorked(testDB.createCollection(testColl.getName()));
- assert.commandFailedWithCode(testDB.runCommand({applyOps: [], allowAtomic: 'must be boolean'}),
- ErrorCodes.TypeMismatch,
- 'allowAtomic flag must be a boolean.');
- assert.commandWorked(testDB.runCommand({
- applyOps: [
- {op: "i", ns: testColl.getFullName(), o: {_id: 3, a: "augh"}},
- {op: "i", ns: testColl.getFullName(), o: {_id: 4, a: "blah"}}
- ],
- allowAtomic: false,
- }));
- assert.eq(oplogColl.find({"o.applyOps": {"$exists": true}}).count(), 1);
- assert.eq(oplogColl.find({op: "i", ns: testColl.getFullName()}).count(), 2);
+// Ensure that applyOps respects the 'allowAtomic' boolean flag on CRUD operations that it would
+// have applied atomically.
+assert.commandWorked(testDB.createCollection(testColl.getName()));
+assert.commandFailedWithCode(testDB.runCommand({applyOps: [], allowAtomic: 'must be boolean'}),
+ ErrorCodes.TypeMismatch,
+ 'allowAtomic flag must be a boolean.');
+assert.commandWorked(testDB.runCommand({
+ applyOps: [
+ {op: "i", ns: testColl.getFullName(), o: {_id: 3, a: "augh"}},
+ {op: "i", ns: testColl.getFullName(), o: {_id: 4, a: "blah"}}
+ ],
+ allowAtomic: false,
+}));
+assert.eq(oplogColl.find({"o.applyOps": {"$exists": true}}).count(), 1);
+assert.eq(oplogColl.find({op: "i", ns: testColl.getFullName()}).count(), 2);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/noncapped_oplog_creation.js b/jstests/noPassthrough/noncapped_oplog_creation.js
index 87dc37e6ed6..577074e1bb9 100644
--- a/jstests/noPassthrough/noncapped_oplog_creation.js
+++ b/jstests/noPassthrough/noncapped_oplog_creation.js
@@ -3,36 +3,36 @@
* oplog collection.
*/
(function() {
- 'use strict';
+'use strict';
- var dbpath = MongoRunner.dataPath + 'noncapped_oplog_creation';
- resetDbpath(dbpath);
+var dbpath = MongoRunner.dataPath + 'noncapped_oplog_creation';
+resetDbpath(dbpath);
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- });
- assert.neq(null, conn, 'mongod was unable to start up');
+var conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+});
+assert.neq(null, conn, 'mongod was unable to start up');
- var localDB = conn.getDB('local');
+var localDB = conn.getDB('local');
- // Test that explicitly creating a non-capped oplog collection fails.
- assert.commandFailed(localDB.createCollection('oplog.fake', {capped: false}));
+// Test that explicitly creating a non-capped oplog collection fails.
+assert.commandFailed(localDB.createCollection('oplog.fake', {capped: false}));
- // Test that inserting into the replica set oplog fails when implicitly creating a non-capped
- // collection.
- assert.writeError(localDB.oplog.rs.insert({}));
+// Test that inserting into the replica set oplog fails when implicitly creating a non-capped
+// collection.
+assert.writeError(localDB.oplog.rs.insert({}));
- // Test that inserting into the master-slave oplog fails when implicitly creating a non-capped
- // collection.
- assert.commandFailed(localDB.runCommand({godinsert: 'oplog.$main', obj: {}}));
+// Test that inserting into the master-slave oplog fails when implicitly creating a non-capped
+// collection.
+assert.commandFailed(localDB.runCommand({godinsert: 'oplog.$main', obj: {}}));
- // Test that creating a non-capped oplog collection fails when using $out.
- assert.writeOK(localDB.input.insert({}));
- assert.commandFailed(localDB.runCommand({
- aggregate: 'input',
- pipeline: [{$out: 'oplog.aggregation'}],
- }));
+// Test that creating a non-capped oplog collection fails when using $out.
+assert.writeOK(localDB.input.insert({}));
+assert.commandFailed(localDB.runCommand({
+ aggregate: 'input',
+ pipeline: [{$out: 'oplog.aggregation'}],
+}));
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/ns1.js b/jstests/noPassthrough/ns1.js
index 130ddf77db2..63c7baacb0f 100644
--- a/jstests/noPassthrough/ns1.js
+++ b/jstests/noPassthrough/ns1.js
@@ -1,51 +1,51 @@
(function() {
- "use strict";
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod failed to start.");
- let mydb = conn.getDB("test_ns1");
+"use strict";
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod failed to start.");
+let mydb = conn.getDB("test_ns1");
- const check = function(n, isNew) {
- var coll = mydb["x" + n];
- if (isNew) {
- assert.eq(0, coll.count(), "pop a: " + n);
- assert.writeOK(coll.insert({_id: n}));
- }
- assert.eq(1, coll.count(), "pop b: " + n);
- assert.eq(n, coll.findOne()._id, "pop c: " + n);
- return coll;
- };
+const check = function(n, isNew) {
+ var coll = mydb["x" + n];
+ if (isNew) {
+ assert.eq(0, coll.count(), "pop a: " + n);
+ assert.writeOK(coll.insert({_id: n}));
+ }
+ assert.eq(1, coll.count(), "pop b: " + n);
+ assert.eq(n, coll.findOne()._id, "pop c: " + n);
+ return coll;
+};
- let max = 0;
+let max = 0;
- for (; max < 1000; max++) {
- check(max, true);
- }
+for (; max < 1000; max++) {
+ check(max, true);
+}
- function checkall(removed) {
- for (var i = 0; i < max; i++) {
- if (removed == i) {
- assert.eq(0, mydb["x" + i].count(), "should be 0 : " + removed);
- } else {
- check(i, false);
- }
+function checkall(removed) {
+ for (var i = 0; i < max; i++) {
+ if (removed == i) {
+ assert.eq(0, mydb["x" + i].count(), "should be 0 : " + removed);
+ } else {
+ check(i, false);
}
}
+}
- checkall();
+checkall();
- Random.srand(123124);
- const its = max / 2;
- print("its: " + its);
- for (let i = 0; i < its; i++) {
- const x = Random.randInt(max);
- check(x, false).drop();
- checkall(x);
- check(x, true);
- if ((i + 1) % 20 == 0) {
- print(i + "/" + its);
- }
+Random.srand(123124);
+const its = max / 2;
+print("its: " + its);
+for (let i = 0; i < its; i++) {
+ const x = Random.randInt(max);
+ check(x, false).drop();
+ checkall(x);
+ check(x, true);
+ if ((i + 1) % 20 == 0) {
+ print(i + "/" + its);
}
- print("yay");
+}
+print("yay");
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js b/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js
index 50e1be2c262..22fe3b12276 100644
--- a/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js
+++ b/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js
@@ -5,72 +5,71 @@
* @tags: [requires_journaling, requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/analyze_plan.js'); // For aggPlanHasStage().
+load('jstests/libs/analyze_plan.js'); // For aggPlanHasStage().
- // Set up a 2-shard cluster.
- const st = new ShardingTest({name: jsTestName(), shards: 2, rs: {nodes: 1}});
+// Set up a 2-shard cluster.
+const st = new ShardingTest({name: jsTestName(), shards: 2, rs: {nodes: 1}});
- // Obtain a connection to the mongoS and one direct connection to each shard.
- const shard0 = st.rs0.getPrimary();
- const shard1 = st.rs1.getPrimary();
- const mongos = st.s;
+// Obtain a connection to the mongoS and one direct connection to each shard.
+const shard0 = st.rs0.getPrimary();
+const shard1 = st.rs1.getPrimary();
+const mongos = st.s;
- const configDB = mongos.getDB("config");
+const configDB = mongos.getDB("config");
- const mongosDB = mongos.getDB(jsTestName());
- const mongosColl = mongosDB.test;
+const mongosDB = mongos.getDB(jsTestName());
+const mongosColl = mongosDB.test;
- const shard0DB = shard0.getDB(jsTestName());
- const shard0Coll = shard0DB.test;
+const shard0DB = shard0.getDB(jsTestName());
+const shard0Coll = shard0DB.test;
- const shard1DB = shard1.getDB(jsTestName());
- const shard1Coll = shard1DB.test;
+const shard1DB = shard1.getDB(jsTestName());
+const shard1Coll = shard1DB.test;
- const shard1AdminDB = shard1.getDB("admin");
+const shard1AdminDB = shard1.getDB("admin");
- const shardNames = [st.rs0.name, st.rs1.name];
+const shardNames = [st.rs0.name, st.rs1.name];
- // Helper function that runs a $sample aggregation, confirms that the results are correct, and
- // verifies that the expected optimized or unoptimized $sample stage ran on each shard.
- function runSampleAndConfirmResults({sampleSize, comment, expectedPlanSummaries}) {
- // Run the aggregation via mongoS with the given 'comment' parameter.
- assert.eq(
- mongosColl.aggregate([{$sample: {size: sampleSize}}], {comment: comment}).itcount(),
- sampleSize);
+// Helper function that runs a $sample aggregation, confirms that the results are correct, and
+// verifies that the expected optimized or unoptimized $sample stage ran on each shard.
+function runSampleAndConfirmResults({sampleSize, comment, expectedPlanSummaries}) {
+ // Run the aggregation via mongoS with the given 'comment' parameter.
+ assert.eq(mongosColl.aggregate([{$sample: {size: sampleSize}}], {comment: comment}).itcount(),
+ sampleSize);
- // Obtain the explain output for the aggregation.
- const explainOut =
- assert.commandWorked(mongosColl.explain().aggregate([{$sample: {size: sampleSize}}]));
+ // Obtain the explain output for the aggregation.
+ const explainOut =
+ assert.commandWorked(mongosColl.explain().aggregate([{$sample: {size: sampleSize}}]));
- // Verify that the expected $sample stage, optimized or unoptimized, ran on each shard.
- for (let idx in expectedPlanSummaries) {
- const shardExplain = explainOut.shards[shardNames[idx]];
- for (let planSummary of expectedPlanSummaries[idx]) {
- assert(aggPlanHasStage(shardExplain, planSummary));
- }
+ // Verify that the expected $sample stage, optimized or unoptimized, ran on each shard.
+ for (let idx in expectedPlanSummaries) {
+ const shardExplain = explainOut.shards[shardNames[idx]];
+ for (let planSummary of expectedPlanSummaries[idx]) {
+ assert(aggPlanHasStage(shardExplain, planSummary));
}
}
+}
- // Enable sharding on the the test database and ensure that the primary is shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), shard0.name);
+// Enable sharding on the the test database and ensure that the primary is shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), shard0.name);
- // Shard the collection on {_id: 1}, split at {_id: 0} and move the empty upper chunk to shard1.
- st.shardColl(mongosColl.getName(), {_id: 1}, {_id: 0}, {_id: 0}, mongosDB.getName());
+// Shard the collection on {_id: 1}, split at {_id: 0} and move the empty upper chunk to shard1.
+st.shardColl(mongosColl.getName(), {_id: 1}, {_id: 0}, {_id: 0}, mongosDB.getName());
- // Write some documents to the lower chunk on shard0.
- for (let i = (-200); i < 0; ++i) {
- assert.commandWorked(mongosColl.insert({_id: i}));
- }
+// Write some documents to the lower chunk on shard0.
+for (let i = (-200); i < 0; ++i) {
+ assert.commandWorked(mongosColl.insert({_id: i}));
+}
- // Set a failpoint to hang after cloning documents to shard1 but before committing.
- shard0DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "alwaysOn"});
- shard1DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "alwaysOn"});
+// Set a failpoint to hang after cloning documents to shard1 but before committing.
+shard0DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "alwaysOn"});
+shard1DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "alwaysOn"});
- // Spawn a parallel shell to move the lower chunk from shard0 to shard1.
- const awaitMoveChunkShell = startParallelShell(`
+// Spawn a parallel shell to move the lower chunk from shard0 to shard1.
+const awaitMoveChunkShell = startParallelShell(`
assert.commandWorked(db.adminCommand({
moveChunk: "${mongosColl.getFullName()}",
find: {_id: -1},
@@ -80,75 +79,70 @@
`,
mongosDB.getMongo().port);
- // Wait until we see that all documents have been cloned to shard1.
- assert.soon(() => {
- return shard0Coll.find().itcount() === shard1Coll.find().itcount();
- });
-
- // Confirm that shard0 still owns the chunk, according to the config DB metadata.
- assert.eq(configDB.chunks.count({max: {_id: 0}, shard: `${jsTestName()}-rs0`}), 1);
-
- // Run a $sample aggregation without committing the chunk migration. We expect to see that the
- // optimized $sample stage was used on shard0, which own the documents. Despite the fact that
- // there are 200 documents on shard1 and we should naively have used the random-cursor
- // optimization, confirm that we instead detected that the documents were orphans and used the
- // non-optimized $sample stage.
- runSampleAndConfirmResults({
- sampleSize: 1,
- comment: "sample_with_only_orphans_on_shard1",
- expectedPlanSummaries: [["QUEUED_DATA", "MULTI_ITERATOR"], ["COLLSCAN"]]
- });
-
- // Confirm that shard0 still owns the chunk.
- assert.eq(configDB.chunks.count({max: {_id: 0}, shard: `${jsTestName()}-rs0`}), 1);
-
- // Release the failpoints and wait for the parallel moveChunk shell to complete.
- shard0DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "off"});
- shard1DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "off"});
- awaitMoveChunkShell();
-
- // Confirm that shard1 now owns the chunk.
- assert.eq(configDB.chunks.count({max: {_id: 0}, shard: `${jsTestName()}-rs1`}), 1);
-
- // Move the lower chunk back to shard0.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: -1},
- to: shard0.name,
- waitForDelete: true
- }));
-
- // Write 1 legitimate document and 100 orphans directly to shard1, which owns the upper chunk.
- assert.eq(configDB.chunks.count({min: {_id: 0}, shard: `${jsTestName()}-rs1`}), 1);
- for (let i = -100; i < 1; ++i) {
- assert.commandWorked(shard1Coll.insert({_id: i}));
- }
-
- // Confirm that there are 101 documents on shard1 and mongoS can see the 1 non-orphan.
- assert.eq(mongosColl.find({_id: {$gte: 0}}).itcount(), 1);
- assert.eq(shard1Coll.count(), 101);
-
- // Re-run the $sample aggregation. On shard1 we should again use the non-optimized stage, since
- // despite the fact that there are 101 documents present, only 1 is owned by the shard.
- runSampleAndConfirmResults({
- sampleSize: 1,
- comment: "sample_with_1_doc_100_orphans_on_shard1",
- expectedPlanSummaries: [["QUEUED_DATA", "MULTI_ITERATOR"], ["COLLSCAN"]]
- });
-
- // Write 199 additional documents to the upper chunk which still resides on shard1.
- assert.eq(configDB.chunks.count({min: {_id: 0}, shard: `${jsTestName()}-rs1`}), 1);
- for (let i = 1; i < 200; ++i) {
- assert.commandWorked(mongosColl.insert({_id: i}));
- }
-
- // Re-run the $sample aggregation and confirm that the optimized stage now runs on both shards.
- runSampleAndConfirmResults({
- sampleSize: 1,
- comment: "sample_with_200_docs_100_orphans_on_shard1",
- expectedPlanSummaries:
- [["QUEUED_DATA", "MULTI_ITERATOR"], ["QUEUED_DATA", "MULTI_ITERATOR"]]
- });
-
- st.stop();
+// Wait until we see that all documents have been cloned to shard1.
+assert.soon(() => {
+ return shard0Coll.find().itcount() === shard1Coll.find().itcount();
+});
+
+// Confirm that shard0 still owns the chunk, according to the config DB metadata.
+assert.eq(configDB.chunks.count({max: {_id: 0}, shard: `${jsTestName()}-rs0`}), 1);
+
+// Run a $sample aggregation without committing the chunk migration. We expect to see that the
+// optimized $sample stage was used on shard0, which own the documents. Despite the fact that
+// there are 200 documents on shard1 and we should naively have used the random-cursor
+// optimization, confirm that we instead detected that the documents were orphans and used the
+// non-optimized $sample stage.
+runSampleAndConfirmResults({
+ sampleSize: 1,
+ comment: "sample_with_only_orphans_on_shard1",
+ expectedPlanSummaries: [["QUEUED_DATA", "MULTI_ITERATOR"], ["COLLSCAN"]]
+});
+
+// Confirm that shard0 still owns the chunk.
+assert.eq(configDB.chunks.count({max: {_id: 0}, shard: `${jsTestName()}-rs0`}), 1);
+
+// Release the failpoints and wait for the parallel moveChunk shell to complete.
+shard0DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "off"});
+shard1DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "off"});
+awaitMoveChunkShell();
+
+// Confirm that shard1 now owns the chunk.
+assert.eq(configDB.chunks.count({max: {_id: 0}, shard: `${jsTestName()}-rs1`}), 1);
+
+// Move the lower chunk back to shard0.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: -1}, to: shard0.name, waitForDelete: true}));
+
+// Write 1 legitimate document and 100 orphans directly to shard1, which owns the upper chunk.
+assert.eq(configDB.chunks.count({min: {_id: 0}, shard: `${jsTestName()}-rs1`}), 1);
+for (let i = -100; i < 1; ++i) {
+ assert.commandWorked(shard1Coll.insert({_id: i}));
+}
+
+// Confirm that there are 101 documents on shard1 and mongoS can see the 1 non-orphan.
+assert.eq(mongosColl.find({_id: {$gte: 0}}).itcount(), 1);
+assert.eq(shard1Coll.count(), 101);
+
+// Re-run the $sample aggregation. On shard1 we should again use the non-optimized stage, since
+// despite the fact that there are 101 documents present, only 1 is owned by the shard.
+runSampleAndConfirmResults({
+ sampleSize: 1,
+ comment: "sample_with_1_doc_100_orphans_on_shard1",
+ expectedPlanSummaries: [["QUEUED_DATA", "MULTI_ITERATOR"], ["COLLSCAN"]]
+});
+
+// Write 199 additional documents to the upper chunk which still resides on shard1.
+assert.eq(configDB.chunks.count({min: {_id: 0}, shard: `${jsTestName()}-rs1`}), 1);
+for (let i = 1; i < 200; ++i) {
+ assert.commandWorked(mongosColl.insert({_id: i}));
+}
+
+// Re-run the $sample aggregation and confirm that the optimized stage now runs on both shards.
+runSampleAndConfirmResults({
+ sampleSize: 1,
+ comment: "sample_with_200_docs_100_orphans_on_shard1",
+ expectedPlanSummaries: [["QUEUED_DATA", "MULTI_ITERATOR"], ["QUEUED_DATA", "MULTI_ITERATOR"]]
+});
+
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/out_majority_read_replset.js b/jstests/noPassthrough/out_majority_read_replset.js
index 6452a8c93f9..222bc3a0503 100644
--- a/jstests/noPassthrough/out_majority_read_replset.js
+++ b/jstests/noPassthrough/out_majority_read_replset.js
@@ -1,44 +1,44 @@
// Tests the $out and read concern majority.
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries.
- const rst = new ReplSetTest({nodes: 2, nodeOptions: {enableMajorityReadConcern: ""}});
+const rst = new ReplSetTest({nodes: 2, nodeOptions: {enableMajorityReadConcern: ""}});
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
- rst.initiate();
+rst.initiate();
- const name = "out_majority_read";
- const db = rst.getPrimary().getDB(name);
+const name = "out_majority_read";
+const db = rst.getPrimary().getDB(name);
- const sourceColl = db.sourceColl;
+const sourceColl = db.sourceColl;
- assert.commandWorked(sourceColl.insert({_id: 1, state: 'before'}));
- rst.awaitLastOpCommitted();
+assert.commandWorked(sourceColl.insert({_id: 1, state: 'before'}));
+rst.awaitLastOpCommitted();
- stopReplicationOnSecondaries(rst);
+stopReplicationOnSecondaries(rst);
- // Create the index that is not majority commited
- assert.commandWorked(sourceColl.createIndex({state: 1}, {name: "secondIndex"}));
+// Create the index that is not majority commited
+assert.commandWorked(sourceColl.createIndex({state: 1}, {name: "secondIndex"}));
- // Run the $out in the parallel shell as it will block in the metadata until the shapshot is
- // advanced.
- const awaitShell = startParallelShell(`{
+// Run the $out in the parallel shell as it will block in the metadata until the shapshot is
+// advanced.
+const awaitShell = startParallelShell(`{
const testDB = db.getSiblingDB("${name}");
const sourceColl = testDB.sourceColl;
@@ -55,17 +55,17 @@
}`,
db.getMongo().port);
- // Wait for the $out before restarting the replication.
- assert.soon(function() {
- const filter = {"command.aggregate": "sourceColl"};
- return assert.commandWorked(db.currentOp(filter)).inprog.length === 1;
- });
+// Wait for the $out before restarting the replication.
+assert.soon(function() {
+ const filter = {"command.aggregate": "sourceColl"};
+ return assert.commandWorked(db.currentOp(filter)).inprog.length === 1;
+});
- // Restart data replicaiton and wait until the new write becomes visible.
- restartReplicationOnSecondaries(rst);
- rst.awaitLastOpCommitted();
+// Restart data replicaiton and wait until the new write becomes visible.
+restartReplicationOnSecondaries(rst);
+rst.awaitLastOpCommitted();
- awaitShell();
+awaitShell();
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/out_max_time_ms.js b/jstests/noPassthrough/out_max_time_ms.js
index 578ab60a6e2..29c00b6834f 100644
--- a/jstests/noPassthrough/out_max_time_ms.js
+++ b/jstests/noPassthrough/out_max_time_ms.js
@@ -3,126 +3,125 @@
* @tags: [requires_sharding, requires_replication]
*/
(function() {
- load("jstests/libs/fixture_helpers.js"); // For isMongos().
- load("jstests/libs/profiler.js"); // For profilerHasSingleMatchingEntryOrThrow.
-
- const kDBName = "test";
- const kSourceCollName = "out_max_time_ms_source";
- const kDestCollName = "out_max_time_ms_dest";
- const nDocs = 10;
-
- /**
- * Helper for populating the collection.
- */
- function insertDocs(coll) {
- for (let i = 0; i < nDocs; i++) {
- assert.commandWorked(coll.insert({_id: i}));
- }
- }
+load("jstests/libs/fixture_helpers.js"); // For isMongos().
+load("jstests/libs/profiler.js"); // For profilerHasSingleMatchingEntryOrThrow.
- /**
- * Wait until the server sets its CurOp "msg" to the failpoint name, indicating that it's
- * hanging.
- */
- function waitUntilServerHangsOnFailPoint(conn, fpName) {
- // Be sure that the server is hanging on the failpoint.
- assert.soon(function() {
- const filter = {"msg": fpName};
- const ops = conn.getDB("admin")
- .aggregate([{$currentOp: {allUsers: true}}, {$match: filter}])
- .toArray();
- return ops.length == 1;
- });
- }
+const kDBName = "test";
+const kSourceCollName = "out_max_time_ms_source";
+const kDestCollName = "out_max_time_ms_dest";
+const nDocs = 10;
- /**
- * Given a mongod connection, run a $out aggregation against 'conn' which hangs on the given
- * failpoint and ensure that the $out maxTimeMS expires.
- */
- function forceAggregationToHangAndCheckMaxTimeMsExpires(conn, failPointName) {
- // Use a short maxTimeMS so that the test completes in a reasonable amount of time. We will
- // use the 'maxTimeNeverTimeOut' failpoint to ensure that the operation does not prematurely
- // time out.
- const maxTimeMS = 1000 * 2;
-
- // Enable a failPoint so that the write will hang.
- let failpointCommand = {
- configureFailPoint: failPointName,
- mode: "alwaysOn",
- };
-
- assert.commandWorked(conn.getDB("admin").runCommand(failpointCommand));
-
- // Make sure we don't run out of time before the failpoint is hit.
- assert.commandWorked(conn.getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}));
-
- // Build the parallel shell function.
- let shellStr = `const sourceColl = db['${kSourceCollName}'];`;
- shellStr += `const destColl = db['${kDestCollName}'];`;
- shellStr += `const maxTimeMS = ${maxTimeMS};`;
- const runAggregate = function() {
- const pipeline = [{$out: destColl.getName()}];
- const err = assert.throws(() => sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS}));
- assert.eq(err.code, ErrorCodes.MaxTimeMSExpired, "expected aggregation to fail");
- };
- shellStr += `(${runAggregate.toString()})();`;
- const awaitShell = startParallelShell(shellStr, conn.port);
-
- waitUntilServerHangsOnFailPoint(conn, failPointName);
-
- assert.commandWorked(conn.getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}));
-
- // The aggregation running in the parallel shell will hang on the failpoint, burning
- // its time. Wait until the maxTimeMS has definitely expired.
- sleep(maxTimeMS + 2000);
-
- // Now drop the failpoint, allowing the aggregation to proceed. It should hit an
- // interrupt check and terminate immediately.
- assert.commandWorked(
- conn.getDB("admin").runCommand({configureFailPoint: failPointName, mode: "off"}));
-
- // Wait for the parallel shell to finish.
- assert.eq(awaitShell(), 0);
+/**
+ * Helper for populating the collection.
+ */
+function insertDocs(coll) {
+ for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert({_id: i}));
}
+}
- function runUnshardedTest(conn) {
- jsTestLog("Running unsharded test");
-
- const sourceColl = conn.getDB(kDBName)[kSourceCollName];
- const destColl = conn.getDB(kDBName)[kDestCollName];
- assert.commandWorked(destColl.remove({}));
+/**
+ * Wait until the server sets its CurOp "msg" to the failpoint name, indicating that it's
+ * hanging.
+ */
+function waitUntilServerHangsOnFailPoint(conn, fpName) {
+ // Be sure that the server is hanging on the failpoint.
+ assert.soon(function() {
+ const filter = {"msg": fpName};
+ const ops = conn.getDB("admin")
+ .aggregate([{$currentOp: {allUsers: true}}, {$match: filter}])
+ .toArray();
+ return ops.length == 1;
+ });
+}
- // Be sure we're able to read from a cursor with a maxTimeMS set on it.
- (function() {
- // Use a long maxTimeMS, since we expect the operation to finish.
- const maxTimeMS = 1000 * 600;
- const pipeline = [{$out: destColl.getName()}];
- const cursor = sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS});
- assert(!cursor.hasNext());
- assert.eq(destColl.countDocuments({_id: {$exists: true}}), nDocs);
- })();
+/**
+ * Given a mongod connection, run a $out aggregation against 'conn' which hangs on the given
+ * failpoint and ensure that the $out maxTimeMS expires.
+ */
+function forceAggregationToHangAndCheckMaxTimeMsExpires(conn, failPointName) {
+ // Use a short maxTimeMS so that the test completes in a reasonable amount of time. We will
+ // use the 'maxTimeNeverTimeOut' failpoint to ensure that the operation does not prematurely
+ // time out.
+ const maxTimeMS = 1000 * 2;
+
+ // Enable a failPoint so that the write will hang.
+ let failpointCommand = {
+ configureFailPoint: failPointName,
+ mode: "alwaysOn",
+ };
+
+ assert.commandWorked(conn.getDB("admin").runCommand(failpointCommand));
+
+ // Make sure we don't run out of time before the failpoint is hit.
+ assert.commandWorked(conn.getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}));
+
+ // Build the parallel shell function.
+ let shellStr = `const sourceColl = db['${kSourceCollName}'];`;
+ shellStr += `const destColl = db['${kDestCollName}'];`;
+ shellStr += `const maxTimeMS = ${maxTimeMS};`;
+ const runAggregate = function() {
+ const pipeline = [{$out: destColl.getName()}];
+ const err = assert.throws(() => sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS}));
+ assert.eq(err.code, ErrorCodes.MaxTimeMSExpired, "expected aggregation to fail");
+ };
+ shellStr += `(${runAggregate.toString()})();`;
+ const awaitShell = startParallelShell(shellStr, conn.port);
+
+ waitUntilServerHangsOnFailPoint(conn, failPointName);
+
+ assert.commandWorked(
+ conn.getDB("admin").runCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}));
+
+ // The aggregation running in the parallel shell will hang on the failpoint, burning
+ // its time. Wait until the maxTimeMS has definitely expired.
+ sleep(maxTimeMS + 2000);
+
+ // Now drop the failpoint, allowing the aggregation to proceed. It should hit an
+ // interrupt check and terminate immediately.
+ assert.commandWorked(
+ conn.getDB("admin").runCommand({configureFailPoint: failPointName, mode: "off"}));
+
+ // Wait for the parallel shell to finish.
+ assert.eq(awaitShell(), 0);
+}
+
+function runUnshardedTest(conn) {
+ jsTestLog("Running unsharded test");
+
+ const sourceColl = conn.getDB(kDBName)[kSourceCollName];
+ const destColl = conn.getDB(kDBName)[kDestCollName];
+ assert.commandWorked(destColl.remove({}));
+
+ // Be sure we're able to read from a cursor with a maxTimeMS set on it.
+ (function() {
+ // Use a long maxTimeMS, since we expect the operation to finish.
+ const maxTimeMS = 1000 * 600;
+ const pipeline = [{$out: destColl.getName()}];
+ const cursor = sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS});
+ assert(!cursor.hasNext());
+ assert.eq(destColl.countDocuments({_id: {$exists: true}}), nDocs);
+ })();
- assert.commandWorked(destColl.remove({}));
+ assert.commandWorked(destColl.remove({}));
- // Force the aggregation to hang while the batch is being written.
- const kFailPointName = "hangDuringBatchInsert";
- forceAggregationToHangAndCheckMaxTimeMsExpires(conn, kFailPointName);
+ // Force the aggregation to hang while the batch is being written.
+ const kFailPointName = "hangDuringBatchInsert";
+ forceAggregationToHangAndCheckMaxTimeMsExpires(conn, kFailPointName);
- assert.commandWorked(destColl.remove({}));
+ assert.commandWorked(destColl.remove({}));
- // Force the aggregation to hang while the batch is being built.
- forceAggregationToHangAndCheckMaxTimeMsExpires(conn,
- "hangWhileBuildingDocumentSourceOutBatch");
- }
+ // Force the aggregation to hang while the batch is being built.
+ forceAggregationToHangAndCheckMaxTimeMsExpires(conn, "hangWhileBuildingDocumentSourceOutBatch");
+}
- // Run on a standalone.
- (function() {
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, 'mongod was unable to start up');
- insertDocs(conn.getDB(kDBName)[kSourceCollName]);
- runUnshardedTest(conn);
- MongoRunner.stopMongod(conn);
- })();
+// Run on a standalone.
+(function() {
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, 'mongod was unable to start up');
+insertDocs(conn.getDB(kDBName)[kSourceCollName]);
+runUnshardedTest(conn);
+MongoRunner.stopMongod(conn);
+})();
})();
diff --git a/jstests/noPassthrough/out_merge_majority_read.js b/jstests/noPassthrough/out_merge_majority_read.js
index 125219841b3..a525a192a42 100644
--- a/jstests/noPassthrough/out_merge_majority_read.js
+++ b/jstests/noPassthrough/out_merge_majority_read.js
@@ -8,86 +8,86 @@
*/
(function() {
- 'use strict';
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
-
- const testServer = MongoRunner.runMongod();
- const db = testServer.getDB("test");
- if (!db.serverStatus().storageEngine.supportsCommittedReads) {
- print("Skipping read_majority.js since storageEngine doesn't support it.");
- MongoRunner.stopMongod(testServer);
- return;
- }
+'use strict';
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+const testServer = MongoRunner.runMongod();
+const db = testServer.getDB("test");
+if (!db.serverStatus().storageEngine.supportsCommittedReads) {
+ print("Skipping read_majority.js since storageEngine doesn't support it.");
MongoRunner.stopMongod(testServer);
+ return;
+}
+MongoRunner.stopMongod(testServer);
- function runTests(sourceColl, mongodConnection) {
- function makeSnapshot() {
- return assert.commandWorked(mongodConnection.adminCommand("makeSnapshot")).name;
- }
- function setCommittedSnapshot(snapshot) {
- assert.commandWorked(mongodConnection.adminCommand({"setCommittedSnapshot": snapshot}));
- }
- const db = sourceColl.getDB();
- const targetColl = db.targetColl;
- const targetReplaceDocsColl = db.targetReplaceDocsColl;
-
- assert.commandWorked(sourceColl.remove({}));
- assert.commandWorked(targetColl.remove({}));
- assert.commandWorked(targetReplaceDocsColl.remove({}));
- setCommittedSnapshot(makeSnapshot());
-
- // Insert a single document and make it visible by advancing the snapshot.
- assert.commandWorked(sourceColl.insert({_id: 1, state: 'before'}));
- assert.commandWorked(targetReplaceDocsColl.insert({_id: 1, state: 'before'}));
- setCommittedSnapshot(makeSnapshot());
-
- // This insert will not be visible to $merge.
- assert.commandWorked(sourceColl.insert({_id: 2, state: 'before'}));
- assert.commandWorked(targetReplaceDocsColl.insert({_id: 2, state: 'before'}));
- // Similarly this update will not be visible.
- assert.commandWorked(sourceColl.update({_id: 1}, {state: 'after'}));
- assert.commandWorked(targetReplaceDocsColl.update({_id: 1}, {state: 'after'}));
-
- // Make sure we see only the first document.
- let res = sourceColl.aggregate([], {readConcern: {level: 'majority'}});
- assert.eq(res.itcount(), 1);
-
- // Run $merge with whenNotMatched set to "insert". It will pick only the first document.
- // Also it will not see the update ('after').
- res = sourceColl.aggregate(
- [
- {$match: {state: 'before'}},
- {$project: {state: 'merge'}},
- {
+function runTests(sourceColl, mongodConnection) {
+ function makeSnapshot() {
+ return assert.commandWorked(mongodConnection.adminCommand("makeSnapshot")).name;
+ }
+ function setCommittedSnapshot(snapshot) {
+ assert.commandWorked(mongodConnection.adminCommand({"setCommittedSnapshot": snapshot}));
+ }
+ const db = sourceColl.getDB();
+ const targetColl = db.targetColl;
+ const targetReplaceDocsColl = db.targetReplaceDocsColl;
+
+ assert.commandWorked(sourceColl.remove({}));
+ assert.commandWorked(targetColl.remove({}));
+ assert.commandWorked(targetReplaceDocsColl.remove({}));
+ setCommittedSnapshot(makeSnapshot());
+
+ // Insert a single document and make it visible by advancing the snapshot.
+ assert.commandWorked(sourceColl.insert({_id: 1, state: 'before'}));
+ assert.commandWorked(targetReplaceDocsColl.insert({_id: 1, state: 'before'}));
+ setCommittedSnapshot(makeSnapshot());
+
+ // This insert will not be visible to $merge.
+ assert.commandWorked(sourceColl.insert({_id: 2, state: 'before'}));
+ assert.commandWorked(targetReplaceDocsColl.insert({_id: 2, state: 'before'}));
+ // Similarly this update will not be visible.
+ assert.commandWorked(sourceColl.update({_id: 1}, {state: 'after'}));
+ assert.commandWorked(targetReplaceDocsColl.update({_id: 1}, {state: 'after'}));
+
+ // Make sure we see only the first document.
+ let res = sourceColl.aggregate([], {readConcern: {level: 'majority'}});
+ assert.eq(res.itcount(), 1);
+
+ // Run $merge with whenNotMatched set to "insert". It will pick only the first document.
+ // Also it will not see the update ('after').
+ res = sourceColl.aggregate(
+ [
+ {$match: {state: 'before'}},
+ {$project: {state: 'merge'}},
+ {
$merge: {
into: {db: targetColl.getDB().getName(), coll: targetColl.getName()},
whenMatched: "fail",
whenNotMatched: "insert"
}
- }
- ],
- {readConcern: {level: 'majority'}});
-
- assert.eq(res.itcount(), 0);
-
- res = targetColl.find().sort({_id: 1});
- // Only a single document is visible ($merge did not see the second insert).
- assert.docEq(res.next(), {_id: 1, state: 'merge'});
- assert(res.isExhausted());
-
- // The same $merge but with whenMatched set to "replace".
- res = sourceColl.aggregate(
- [
- {$match: {state: 'before'}},
- {$project: {state: 'merge'}},
- {
+ }
+ ],
+ {readConcern: {level: 'majority'}});
+
+ assert.eq(res.itcount(), 0);
+
+ res = targetColl.find().sort({_id: 1});
+ // Only a single document is visible ($merge did not see the second insert).
+ assert.docEq(res.next(), {_id: 1, state: 'merge'});
+ assert(res.isExhausted());
+
+ // The same $merge but with whenMatched set to "replace".
+ res = sourceColl.aggregate(
+ [
+ {$match: {state: 'before'}},
+ {$project: {state: 'merge'}},
+ {
$merge: {
into: {
db: targetReplaceDocsColl.getDB().getName(),
@@ -96,120 +96,120 @@
whenMatched: "replace",
whenNotMatched: "insert"
}
- }
- ],
- {readConcern: {level: 'majority'}});
- assert.eq(res.itcount(), 0);
-
- setCommittedSnapshot(makeSnapshot());
-
- res = targetReplaceDocsColl.find().sort({_id: 1});
- // The first document must overwrite the update that the read portion of $merge did not see.
- assert.docEq(res.next(), {_id: 1, state: 'merge'});
- // The second document is the result of the independent insert that $merge did not see.
- assert.docEq(res.next(), {_id: 2, state: 'before'});
- assert(res.isExhausted());
-
- assert.commandWorked(targetColl.remove({}));
- setCommittedSnapshot(makeSnapshot());
-
- // Insert a document that will collide with $merge insert. The insert is not majority
- // commited.
- assert.commandWorked(targetColl.insert({_id: 1, state: 'collision'}));
-
- res = db.runCommand({
- aggregate: sourceColl.getName(),
- pipeline: [
- {$project: {state: 'merge'}},
- {
- $merge: {
- into: {db: targetColl.getDB().getName(), coll: targetColl.getName()},
- whenMatched: "fail",
- whenNotMatched: "insert"
- }
- }
- ],
- cursor: {},
- readConcern: {level: 'majority'}
- });
-
- assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
-
- // Remove the documents (not majority).
- assert.commandWorked(targetColl.remove({_id: 1}));
- assert.commandWorked(targetColl.remove({_id: 2}));
-
- // $merge should successfuly 'overwrite' the collection as it is 'empty' (not majority).
- res = targetReplaceDocsColl.aggregate(
- [
- {$match: {state: 'before'}},
- {$project: {state: 'merge'}},
- {
+ }
+ ],
+ {readConcern: {level: 'majority'}});
+ assert.eq(res.itcount(), 0);
+
+ setCommittedSnapshot(makeSnapshot());
+
+ res = targetReplaceDocsColl.find().sort({_id: 1});
+ // The first document must overwrite the update that the read portion of $merge did not see.
+ assert.docEq(res.next(), {_id: 1, state: 'merge'});
+ // The second document is the result of the independent insert that $merge did not see.
+ assert.docEq(res.next(), {_id: 2, state: 'before'});
+ assert(res.isExhausted());
+
+ assert.commandWorked(targetColl.remove({}));
+ setCommittedSnapshot(makeSnapshot());
+
+ // Insert a document that will collide with $merge insert. The insert is not majority
+ // commited.
+ assert.commandWorked(targetColl.insert({_id: 1, state: 'collision'}));
+
+ res = db.runCommand({
+ aggregate: sourceColl.getName(),
+ pipeline: [
+ {$project: {state: 'merge'}},
+ {
$merge: {
into: {db: targetColl.getDB().getName(), coll: targetColl.getName()},
whenMatched: "fail",
whenNotMatched: "insert"
}
- }
- ],
- {readConcern: {level: 'majority'}});
+ }
+ ],
+ cursor: {},
+ readConcern: {level: 'majority'}
+ });
- assert.eq(res.itcount(), 0);
+ assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
- setCommittedSnapshot(makeSnapshot());
+ // Remove the documents (not majority).
+ assert.commandWorked(targetColl.remove({_id: 1}));
+ assert.commandWorked(targetColl.remove({_id: 2}));
- res = targetColl.find().sort({_id: 1});
- // Only a single document is visible ($merge did not see the second insert).
- assert.docEq(res.next(), {_id: 2, state: 'merge'});
- assert(res.isExhausted());
+ // $merge should successfuly 'overwrite' the collection as it is 'empty' (not majority).
+ res = targetReplaceDocsColl.aggregate(
+ [
+ {$match: {state: 'before'}},
+ {$project: {state: 'merge'}},
+ {
+ $merge: {
+ into: {db: targetColl.getDB().getName(), coll: targetColl.getName()},
+ whenMatched: "fail",
+ whenNotMatched: "insert"
+ }
+ }
+ ],
+ {readConcern: {level: 'majority'}});
+
+ assert.eq(res.itcount(), 0);
+
+ setCommittedSnapshot(makeSnapshot());
+
+ res = targetColl.find().sort({_id: 1});
+ // Only a single document is visible ($merge did not see the second insert).
+ assert.docEq(res.next(), {_id: 2, state: 'merge'});
+ assert(res.isExhausted());
+}
+
+const replTest = new ReplSetTest({
+ nodes: 1,
+ oplogSize: 2,
+ nodeOptions: {
+ setParameter: 'testingSnapshotBehaviorInIsolation=true',
+ enableMajorityReadConcern: '',
+ shardsvr: ''
}
+});
+replTest.startSet();
+// Cannot wait for a stable recovery timestamp with 'testingSnapshotBehaviorInIsolation' set.
+replTest.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
- const replTest = new ReplSetTest({
- nodes: 1,
- oplogSize: 2,
- nodeOptions: {
- setParameter: 'testingSnapshotBehaviorInIsolation=true',
- enableMajorityReadConcern: '',
- shardsvr: ''
- }
- });
- replTest.startSet();
- // Cannot wait for a stable recovery timestamp with 'testingSnapshotBehaviorInIsolation' set.
- replTest.initiateWithAnyNodeAsPrimary(
- null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+const mongod = replTest.getPrimary();
- const mongod = replTest.getPrimary();
+(function testSingleNode() {
+ const db = mongod.getDB("singleNode");
+ runTests(db.collection, mongod);
+})();
- (function testSingleNode() {
- const db = mongod.getDB("singleNode");
- runTests(db.collection, mongod);
- })();
+const shardingTest = new ShardingTest({
+ shards: 0,
+ mongos: 1,
+});
+assert(shardingTest.adminCommand({addShard: replTest.getURL()}));
- const shardingTest = new ShardingTest({
- shards: 0,
- mongos: 1,
- });
- assert(shardingTest.adminCommand({addShard: replTest.getURL()}));
-
- (function testUnshardedDBThroughMongos() {
- const db = shardingTest.getDB("throughMongos");
- runTests(db.unshardedDB, mongod);
- })();
+(function testUnshardedDBThroughMongos() {
+ const db = shardingTest.getDB("throughMongos");
+ runTests(db.unshardedDB, mongod);
+})();
- shardingTest.adminCommand({enableSharding: 'throughMongos'});
+shardingTest.adminCommand({enableSharding: 'throughMongos'});
- (function testUnshardedCollectionThroughMongos() {
- const db = shardingTest.getDB("throughMongos");
- runTests(db.unshardedCollection, mongod);
- })();
+(function testUnshardedCollectionThroughMongos() {
+ const db = shardingTest.getDB("throughMongos");
+ runTests(db.unshardedCollection, mongod);
+})();
- (function testShardedCollectionThroughMongos() {
- const db = shardingTest.getDB("throughMongos");
- const collection = db.shardedCollection;
- shardingTest.adminCommand({shardCollection: collection.getFullName(), key: {_id: 1}});
- runTests(collection, mongod);
- })();
+(function testShardedCollectionThroughMongos() {
+ const db = shardingTest.getDB("throughMongos");
+ const collection = db.shardedCollection;
+ shardingTest.adminCommand({shardCollection: collection.getFullName(), key: {_id: 1}});
+ runTests(collection, mongod);
+})();
- shardingTest.stop();
- replTest.stopSet();
+shardingTest.stop();
+replTest.stopSet();
})();
diff --git a/jstests/noPassthrough/parse_zone_info.js b/jstests/noPassthrough/parse_zone_info.js
index e8336f121da..c254d9d966d 100644
--- a/jstests/noPassthrough/parse_zone_info.js
+++ b/jstests/noPassthrough/parse_zone_info.js
@@ -1,20 +1,20 @@
// Tests the parsing of the timeZoneInfo parameter.
(function() {
- // Test that a bad file causes startup to fail.
- let conn = MongoRunner.runMongod({timeZoneInfo: "jstests/libs/config_files/bad_timezone_info"});
- assert.eq(conn, null, "expected launching mongod with bad timezone rules to fail");
- assert.neq(-1, rawMongoProgramOutput().indexOf("Fatal assertion 40475"));
+// Test that a bad file causes startup to fail.
+let conn = MongoRunner.runMongod({timeZoneInfo: "jstests/libs/config_files/bad_timezone_info"});
+assert.eq(conn, null, "expected launching mongod with bad timezone rules to fail");
+assert.neq(-1, rawMongoProgramOutput().indexOf("Fatal assertion 40475"));
- // Test that a non-existent directory causes startup to fail.
- conn = MongoRunner.runMongod({timeZoneInfo: "jstests/libs/config_files/missing_directory"});
- assert.eq(conn, null, "expected launching mongod with bad timezone rules to fail");
+// Test that a non-existent directory causes startup to fail.
+conn = MongoRunner.runMongod({timeZoneInfo: "jstests/libs/config_files/missing_directory"});
+assert.eq(conn, null, "expected launching mongod with bad timezone rules to fail");
- // Look for either old or new error message
- assert(rawMongoProgramOutput().indexOf("Failed to create service context") != -1 ||
- rawMongoProgramOutput().indexOf("Failed global initialization") != -1);
+// Look for either old or new error message
+assert(rawMongoProgramOutput().indexOf("Failed to create service context") != -1 ||
+ rawMongoProgramOutput().indexOf("Failed global initialization") != -1);
- // Test that startup can succeed with a good file.
- conn = MongoRunner.runMongod({timeZoneInfo: "jstests/libs/config_files/good_timezone_info"});
- assert.neq(conn, null, "expected launching mongod with good timezone rules to succeed");
- MongoRunner.stopMongod(conn);
+// Test that startup can succeed with a good file.
+conn = MongoRunner.runMongod({timeZoneInfo: "jstests/libs/config_files/good_timezone_info"});
+assert.neq(conn, null, "expected launching mongod with good timezone rules to succeed");
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/partial_unique_indexes.js b/jstests/noPassthrough/partial_unique_indexes.js
index 2928790c65f..ca41fa4bb66 100644
--- a/jstests/noPassthrough/partial_unique_indexes.js
+++ b/jstests/noPassthrough/partial_unique_indexes.js
@@ -3,46 +3,44 @@
* crud operations correctly handle WriteConflictExceptions.
*/
(function() {
- "strict";
-
- let conn = MongoRunner.runMongod();
- let testDB = conn.getDB("test");
-
- let t = testDB.jstests_parallel_allops;
- t.drop();
-
- t.createIndex({x: 1, _id: 1}, {partialFilterExpression: {_id: {$lt: 500}}, unique: true});
- t.createIndex({y: -1, _id: 1}, {unique: true});
- t.createIndex({x: -1}, {partialFilterExpression: {_id: {$gte: 500}}, unique: false});
- t.createIndex({y: 1}, {unique: false});
-
- let _id = {"#RAND_INT": [0, 1000]};
- let ops = [
- {op: "remove", ns: t.getFullName(), query: {_id}},
- {op: "update", ns: t.getFullName(), query: {_id}, update: {$inc: {x: 1}}, upsert: true},
- {op: "update", ns: t.getFullName(), query: {_id}, update: {$inc: {y: 1}}, upsert: true},
- ];
-
- let seconds = 5;
- let parallel = 5;
- let host = testDB.getMongo().host;
-
- let benchArgs = {ops, seconds, parallel, host};
-
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: 'WTWriteConflictExceptionForReads',
- mode: {activationProbability: 0.01}
- }));
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'WTWriteConflictException', mode: {activationProbability: 0.01}}));
- res = benchRun(benchArgs);
- printjson({res});
-
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'WTWriteConflictException', mode: "off"}));
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'WTWriteConflictExceptionForReads', mode: "off"}));
- res = t.validate();
- assert(res.valid, tojson(res));
- MongoRunner.stopMongod(conn);
+"strict";
+
+let conn = MongoRunner.runMongod();
+let testDB = conn.getDB("test");
+
+let t = testDB.jstests_parallel_allops;
+t.drop();
+
+t.createIndex({x: 1, _id: 1}, {partialFilterExpression: {_id: {$lt: 500}}, unique: true});
+t.createIndex({y: -1, _id: 1}, {unique: true});
+t.createIndex({x: -1}, {partialFilterExpression: {_id: {$gte: 500}}, unique: false});
+t.createIndex({y: 1}, {unique: false});
+
+let _id = {"#RAND_INT": [0, 1000]};
+let ops = [
+ {op: "remove", ns: t.getFullName(), query: {_id}},
+ {op: "update", ns: t.getFullName(), query: {_id}, update: {$inc: {x: 1}}, upsert: true},
+ {op: "update", ns: t.getFullName(), query: {_id}, update: {$inc: {y: 1}}, upsert: true},
+];
+
+let seconds = 5;
+let parallel = 5;
+let host = testDB.getMongo().host;
+
+let benchArgs = {ops, seconds, parallel, host};
+
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'WTWriteConflictExceptionForReads', mode: {activationProbability: 0.01}}));
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'WTWriteConflictException', mode: {activationProbability: 0.01}}));
+res = benchRun(benchArgs);
+printjson({res});
+
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'WTWriteConflictException', mode: "off"}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'WTWriteConflictExceptionForReads', mode: "off"}));
+res = t.validate();
+assert(res.valid, tojson(res));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/pipeline_optimization_failpoint.js b/jstests/noPassthrough/pipeline_optimization_failpoint.js
index af3e294bf8e..6181da559ad 100644
--- a/jstests/noPassthrough/pipeline_optimization_failpoint.js
+++ b/jstests/noPassthrough/pipeline_optimization_failpoint.js
@@ -1,51 +1,51 @@
// Tests that pipeline optimization works properly when the failpoint isn't triggered, and is
// disabled properly when it is triggered.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For aggPlan functions.
- Random.setRandomSeed();
+load("jstests/libs/analyze_plan.js"); // For aggPlan functions.
+Random.setRandomSeed();
- const conn = MongoRunner.runMongod({});
- assert.neq(conn, null, "Mongod failed to start up.");
- const testDb = conn.getDB("test");
- const coll = testDb.agg_opt;
+const conn = MongoRunner.runMongod({});
+assert.neq(conn, null, "Mongod failed to start up.");
+const testDb = conn.getDB("test");
+const coll = testDb.agg_opt;
- const pops = new Set();
- for (let i = 0; i < 25; ++i) {
- let pop;
- do {
- pop = Random.randInt(100000);
- } while (pops.has(pop));
- pops.add(pop);
+const pops = new Set();
+for (let i = 0; i < 25; ++i) {
+ let pop;
+ do {
+ pop = Random.randInt(100000);
+ } while (pops.has(pop));
+ pops.add(pop);
- assert.commandWorked(coll.insert({_id: i, city: "Cleveland", pop: pop, state: "OH"}));
- }
+ assert.commandWorked(coll.insert({_id: i, city: "Cleveland", pop: pop, state: "OH"}));
+}
- const pipeline = [{$match: {state: "OH"}}, {$sort: {pop: -1}}, {$limit: 10}];
+const pipeline = [{$match: {state: "OH"}}, {$sort: {pop: -1}}, {$limit: 10}];
- const enabledPlan = coll.explain().aggregate(pipeline);
- // Test that sort and the limit were combined.
- assert.eq(aggPlanHasStage(enabledPlan, "$limit"), false);
- assert.eq(aggPlanHasStage(enabledPlan, "$sort"), true);
- assert.eq(enabledPlan.stages.length, 2);
+const enabledPlan = coll.explain().aggregate(pipeline);
+// Test that sort and the limit were combined.
+assert.eq(aggPlanHasStage(enabledPlan, "$limit"), false);
+assert.eq(aggPlanHasStage(enabledPlan, "$sort"), true);
+assert.eq(enabledPlan.stages.length, 2);
- const enabledResult = coll.aggregate(pipeline).toArray();
+const enabledResult = coll.aggregate(pipeline).toArray();
- // Enable a failpoint that will cause pipeline optimizations to be skipped.
- assert.commandWorked(
- testDb.adminCommand({configureFailPoint: "disablePipelineOptimization", mode: "alwaysOn"}));
+// Enable a failpoint that will cause pipeline optimizations to be skipped.
+assert.commandWorked(
+ testDb.adminCommand({configureFailPoint: "disablePipelineOptimization", mode: "alwaysOn"}));
- const disabledPlan = coll.explain().aggregate(pipeline);
- // Test that the $limit still exists and hasn't been optimized away.
- assert.eq(aggPlanHasStage(disabledPlan, "$limit"), true);
- assert.eq(aggPlanHasStage(disabledPlan, "$sort"), true);
- assert.eq(disabledPlan.stages.length, 3);
+const disabledPlan = coll.explain().aggregate(pipeline);
+// Test that the $limit still exists and hasn't been optimized away.
+assert.eq(aggPlanHasStage(disabledPlan, "$limit"), true);
+assert.eq(aggPlanHasStage(disabledPlan, "$sort"), true);
+assert.eq(disabledPlan.stages.length, 3);
- const disabledResult = coll.aggregate(pipeline).toArray();
+const disabledResult = coll.aggregate(pipeline).toArray();
- // Test that the result is the same with and without optimizations enabled.
- assert.eq(enabledResult, disabledResult);
+// Test that the result is the same with and without optimizations enabled.
+assert.eq(enabledResult, disabledResult);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/plan_cache_index_create.js b/jstests/noPassthrough/plan_cache_index_create.js
index ee36f4d96eb..cc79a81bb25 100644
--- a/jstests/noPassthrough/plan_cache_index_create.js
+++ b/jstests/noPassthrough/plan_cache_index_create.js
@@ -4,159 +4,158 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "coll";
-
- // Returns whether there is an active index build.
- function indexBuildIsRunning(testDB, indexName) {
- const indexBuildFilter = {
- "command.createIndexes": collName,
- "command.indexes.0.name": indexName,
- "msg": /^Index Build/
- };
- const curOp =
- testDB.getSiblingDB("admin").aggregate([{$currentOp: {}}, {$match: indexBuildFilter}]);
- return curOp.hasNext();
+"use strict";
+
+const dbName = "test";
+const collName = "coll";
+
+// Returns whether there is an active index build.
+function indexBuildIsRunning(testDB, indexName) {
+ const indexBuildFilter = {
+ "command.createIndexes": collName,
+ "command.indexes.0.name": indexName,
+ "msg": /^Index Build/
+ };
+ const curOp =
+ testDB.getSiblingDB("admin").aggregate([{$currentOp: {}}, {$match: indexBuildFilter}]);
+ return curOp.hasNext();
+}
+
+// Returns whether a cached plan exists for 'query'.
+function assertDoesNotHaveCachedPlan(coll, query) {
+ const key = {query: query};
+ const cmdRes = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
+ assert(cmdRes.hasOwnProperty('plans') && cmdRes.plans.length == 0, tojson(cmdRes));
+}
+
+// Returns the cached plan for 'query'.
+function getIndexNameForCachedPlan(coll, query) {
+ const key = {query: query};
+ const cmdRes = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
+ assert(Array.isArray(cmdRes.plans) && cmdRes.plans.length > 0, tojson(cmdRes));
+ return cmdRes.plans[0].reason.stats.inputStage.indexName;
+}
+
+function runTest({rst, readDB, writeDB}) {
+ const readColl = readDB.getCollection(collName);
+ const writeColl = writeDB.getCollection(collName);
+
+ assert.commandWorked(writeDB.runCommand({dropDatabase: 1, writeConcern: {w: "majority"}}));
+
+ const bulk = writeColl.initializeUnorderedBulkOp();
+ for (let i = 0; i < 100; ++i) {
+ bulk.insert({x: i, y: i % 10, z: 0});
}
+ assert.commandWorked(bulk.execute({w: "majority"}));
+ // We start with a baseline of 2 existing indexes as we will not cache plans when only a
+ // single plan exists.
+ assert.commandWorked(writeDB.runCommand({
+ createIndexes: collName,
+ indexes: [
+ {key: {y: 1}, name: "less_selective", background: false},
+ {key: {z: 1}, name: "least_selective", background: false}
+ ],
+ writeConcern: {w: "majority"}
+ }));
+
+ rst.waitForAllIndexBuildsToFinish(dbName, collName);
+
+ //
+ // Confirm that the plan cache is reset on start and completion of a background index build.
+ //
+
+ // Execute a find and confirm that a cached plan exists for an existing index.
+ const filter = {x: 50, y: 0, z: 0};
+ assert.eq(readColl.find(filter).itcount(), 1);
+ assert.eq("less_selective", getIndexNameForCachedPlan(readColl, filter));
+
+ // Enable a failpoint that will cause an index build to block just after start. This will
+ // allow us to examine PlanCache contents while index creation is in flight.
+ assert.commandWorked(
+ readDB.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'alwaysOn'}));
+
+ // Build a "most selective" index in the background.
+ TestData.dbName = dbName;
+ TestData.collName = collName;
+ const createIdxShell = startParallelShell(function() {
+ const testDB = db.getSiblingDB(TestData.dbName);
+ assert.commandWorked(testDB.runCommand({
+ createIndexes: TestData.collName,
+ indexes: [{key: {x: 1}, name: "most_selective", background: true}],
+ writeConcern: {w: "majority"}
+ }));
+ }, writeDB.getMongo().port);
- // Returns whether a cached plan exists for 'query'.
- function assertDoesNotHaveCachedPlan(coll, query) {
- const key = {query: query};
- const cmdRes = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
- assert(cmdRes.hasOwnProperty('plans') && cmdRes.plans.length == 0, tojson(cmdRes));
- }
+ // Confirm that the index build has started.
+ assert.soon(() => indexBuildIsRunning(readDB, "most_selective"),
+ "Index build operation not found after starting via parallelShell");
- // Returns the cached plan for 'query'.
- function getIndexNameForCachedPlan(coll, query) {
- const key = {query: query};
- const cmdRes = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
- assert(Array.isArray(cmdRes.plans) && cmdRes.plans.length > 0, tojson(cmdRes));
- return cmdRes.plans[0].reason.stats.inputStage.indexName;
- }
+ // Confirm that there are no cached plans post index build start.
+ assertDoesNotHaveCachedPlan(readColl, filter);
- function runTest({rst, readDB, writeDB}) {
- const readColl = readDB.getCollection(collName);
- const writeColl = writeDB.getCollection(collName);
-
- assert.commandWorked(writeDB.runCommand({dropDatabase: 1, writeConcern: {w: "majority"}}));
-
- const bulk = writeColl.initializeUnorderedBulkOp();
- for (let i = 0; i < 100; ++i) {
- bulk.insert({x: i, y: i % 10, z: 0});
- }
- assert.commandWorked(bulk.execute({w: "majority"}));
- // We start with a baseline of 2 existing indexes as we will not cache plans when only a
- // single plan exists.
- assert.commandWorked(writeDB.runCommand({
- createIndexes: collName,
- indexes: [
- {key: {y: 1}, name: "less_selective", background: false},
- {key: {z: 1}, name: "least_selective", background: false}
- ],
- writeConcern: {w: "majority"}
- }));
+ // Execute a find and confirm that a previously built index is the cached plan.
+ assert.eq(readColl.find(filter).itcount(), 1);
+ assert.eq("less_selective", getIndexNameForCachedPlan(readColl, filter));
- rst.waitForAllIndexBuildsToFinish(dbName, collName);
-
- //
- // Confirm that the plan cache is reset on start and completion of a background index build.
- //
-
- // Execute a find and confirm that a cached plan exists for an existing index.
- const filter = {x: 50, y: 0, z: 0};
- assert.eq(readColl.find(filter).itcount(), 1);
- assert.eq("less_selective", getIndexNameForCachedPlan(readColl, filter));
-
- // Enable a failpoint that will cause an index build to block just after start. This will
- // allow us to examine PlanCache contents while index creation is in flight.
- assert.commandWorked(readDB.adminCommand(
- {configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'alwaysOn'}));
-
- // Build a "most selective" index in the background.
- TestData.dbName = dbName;
- TestData.collName = collName;
- const createIdxShell = startParallelShell(function() {
- const testDB = db.getSiblingDB(TestData.dbName);
- assert.commandWorked(testDB.runCommand({
- createIndexes: TestData.collName,
- indexes: [{key: {x: 1}, name: "most_selective", background: true}],
- writeConcern: {w: "majority"}
- }));
-
- }, writeDB.getMongo().port);
-
- // Confirm that the index build has started.
- assert.soon(() => indexBuildIsRunning(readDB, "most_selective"),
- "Index build operation not found after starting via parallelShell");
-
- // Confirm that there are no cached plans post index build start.
- assertDoesNotHaveCachedPlan(readColl, filter);
-
- // Execute a find and confirm that a previously built index is the cached plan.
- assert.eq(readColl.find(filter).itcount(), 1);
- assert.eq("less_selective", getIndexNameForCachedPlan(readColl, filter));
-
- // Disable the hang and wait for the index build to complete.
- assert.commandWorked(
- readDB.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'off'}));
- assert.soon(() => !indexBuildIsRunning(readDB, "most_selective"));
- createIdxShell({checkExitSuccess: true});
-
- rst.waitForAllIndexBuildsToFinish(dbName, collName);
-
- // Confirm that there are no cached plans post index build.
- assertDoesNotHaveCachedPlan(readColl, filter);
-
- // Now that the index has been built, execute another find and confirm that the newly
- // created index is used.
- assert.eq(readColl.find(filter).itcount(), 1);
- assert.eq("most_selective", getIndexNameForCachedPlan(readColl, filter));
-
- // Drop the newly created index and confirm that the plan cache has been cleared.
- assert.commandWorked(writeDB.runCommand(
- {dropIndexes: collName, index: {x: 1}, writeConcern: {w: "majority"}}));
- assertDoesNotHaveCachedPlan(readColl, filter);
-
- //
- // Confirm that the plan cache is reset post foreground index build.
- //
-
- // Execute a find and confirm that an existing index is in the cache.
- assert.eq(readColl.find(filter).itcount(), 1);
- assert.eq("less_selective", getIndexNameForCachedPlan(readColl, filter));
-
- // Build a "most selective" index in the foreground.
- assert.commandWorked(writeDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {x: 1}, name: "most_selective", background: false}],
- writeConcern: {w: "majority"}
- }));
+ // Disable the hang and wait for the index build to complete.
+ assert.commandWorked(
+ readDB.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'off'}));
+ assert.soon(() => !indexBuildIsRunning(readDB, "most_selective"));
+ createIdxShell({checkExitSuccess: true});
- rst.waitForAllIndexBuildsToFinish(dbName, collName);
+ rst.waitForAllIndexBuildsToFinish(dbName, collName);
- // Confirm that there are no cached plans post index build.
- assertDoesNotHaveCachedPlan(readColl, filter);
+ // Confirm that there are no cached plans post index build.
+ assertDoesNotHaveCachedPlan(readColl, filter);
- // Execute a find and confirm that the newly created index is used.
- assert.eq(readColl.find(filter).itcount(), 1);
- assert.eq("most_selective", getIndexNameForCachedPlan(readColl, filter));
+ // Now that the index has been built, execute another find and confirm that the newly
+ // created index is used.
+ assert.eq(readColl.find(filter).itcount(), 1);
+ assert.eq("most_selective", getIndexNameForCachedPlan(readColl, filter));
- // Drop the newly created index and confirm that the plan cache has been cleared.
- assert.commandWorked(writeDB.runCommand(
- {dropIndexes: collName, index: {x: 1}, writeConcern: {w: "majority"}}));
- assertDoesNotHaveCachedPlan(readColl, filter);
- }
+ // Drop the newly created index and confirm that the plan cache has been cleared.
+ assert.commandWorked(
+ writeDB.runCommand({dropIndexes: collName, index: {x: 1}, writeConcern: {w: "majority"}}));
+ assertDoesNotHaveCachedPlan(readColl, filter);
+
+ //
+ // Confirm that the plan cache is reset post foreground index build.
+ //
+
+ // Execute a find and confirm that an existing index is in the cache.
+ assert.eq(readColl.find(filter).itcount(), 1);
+ assert.eq("less_selective", getIndexNameForCachedPlan(readColl, filter));
+
+ // Build a "most selective" index in the foreground.
+ assert.commandWorked(writeDB.runCommand({
+ createIndexes: collName,
+ indexes: [{key: {x: 1}, name: "most_selective", background: false}],
+ writeConcern: {w: "majority"}
+ }));
+
+ rst.waitForAllIndexBuildsToFinish(dbName, collName);
+
+ // Confirm that there are no cached plans post index build.
+ assertDoesNotHaveCachedPlan(readColl, filter);
+
+ // Execute a find and confirm that the newly created index is used.
+ assert.eq(readColl.find(filter).itcount(), 1);
+ assert.eq("most_selective", getIndexNameForCachedPlan(readColl, filter));
+
+ // Drop the newly created index and confirm that the plan cache has been cleared.
+ assert.commandWorked(
+ writeDB.runCommand({dropIndexes: collName, index: {x: 1}, writeConcern: {w: "majority"}}));
+ assertDoesNotHaveCachedPlan(readColl, filter);
+}
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
- const primaryDB = rst.getPrimary().getDB(dbName);
- const secondaryDB = rst.getSecondary().getDB(dbName);
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+const primaryDB = rst.getPrimary().getDB(dbName);
+const secondaryDB = rst.getSecondary().getDB(dbName);
- runTest({rst: rst, readDB: primaryDB, writeDB: primaryDB});
- runTest({rst: rst, readDB: secondaryDB, writeDB: primaryDB});
+runTest({rst: rst, readDB: primaryDB, writeDB: primaryDB});
+runTest({rst: rst, readDB: secondaryDB, writeDB: primaryDB});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/plan_cache_list_plans_new_format.js b/jstests/noPassthrough/plan_cache_list_plans_new_format.js
index f8f96d56cbf..7c29a4b7cd4 100644
--- a/jstests/noPassthrough/plan_cache_list_plans_new_format.js
+++ b/jstests/noPassthrough/plan_cache_list_plans_new_format.js
@@ -1,59 +1,79 @@
// Confirms the planCacheListPlans output format.
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("jstests_plan_cache_list_plans_new_format");
- const coll = testDB.test;
- assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalQueryCacheListPlansNewOutput: true}));
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
+const testDB = conn.getDB("jstests_plan_cache_list_plans_new_format");
+const coll = testDB.test;
+assert.commandWorked(
+ testDB.adminCommand({setParameter: 1, internalQueryCacheListPlansNewOutput: true}));
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
- const testQuery = {"a": {"$gte": 0}, "b": 32};
- const testSort = {"c": -1};
- const testProjection = {};
+const testQuery = {
+ "a": {"$gte": 0},
+ "b": 32
+};
+const testSort = {
+ "c": -1
+};
+const testProjection = {};
- // Validate planCacheListPlans result fields for a query shape with a corresponding cache entry.
- assert.eq(0, coll.find(testQuery).sort(testSort).itcount());
- let key = {query: testQuery, sort: testSort, projection: testProjection};
- let res = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
+// Validate planCacheListPlans result fields for a query shape with a corresponding cache entry.
+assert.eq(0, coll.find(testQuery).sort(testSort).itcount());
+let key = {query: testQuery, sort: testSort, projection: testProjection};
+let res = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
- // Confirm both the existence and contents of "createdFromQuery".
- assert(res.hasOwnProperty("createdFromQuery"), `planCacheListPlans should return a result with
+// Confirm both the existence and contents of "createdFromQuery".
+assert(res.hasOwnProperty("createdFromQuery"),
+ `planCacheListPlans should return a result with
field "createdFromQuery"`);
- assert.eq(res.createdFromQuery.query, testQuery, `createdFromQuery should contain field "query"
+assert.eq(res.createdFromQuery.query,
+ testQuery,
+ `createdFromQuery should contain field "query"
with value ${testQuery}, instead got "createdFromQuery": ${res.createdFromQuery}`);
- assert.eq(res.createdFromQuery.sort, testSort, `createdFromQuery should contain field "sort"
+assert.eq(res.createdFromQuery.sort,
+ testSort,
+ `createdFromQuery should contain field "sort"
with value ${testSort}, instead got "createdFromQuery": ${res.createdFromQuery}`);
- assert.eq(res.createdFromQuery.projection, testProjection, `createdFromQuery should contain
+assert.eq(res.createdFromQuery.projection, testProjection, `createdFromQuery should contain
field "projection" with value ${testProjection}, instead got "createdFromQuery":
${res.createdFromQuery}`);
- // Confirm 'res' contains 'works' and a valid 'queryHash' field.
- assert(res.hasOwnProperty("works"), `planCacheListPlans result is missing "works" field`);
- assert.gt(res.works, 0, `planCacheListPlans result field "works" should be greater than 0`);
- assert(res.hasOwnProperty("queryHash"), `planCacheListPlans result is missing "queryHash"
+// Confirm 'res' contains 'works' and a valid 'queryHash' field.
+assert(res.hasOwnProperty("works"), `planCacheListPlans result is missing "works" field`);
+assert.gt(res.works, 0, `planCacheListPlans result field "works" should be greater than 0`);
+assert(res.hasOwnProperty("queryHash"),
+ `planCacheListPlans result is missing "queryHash"
field`);
- assert.eq(8, res.queryHash.length, `planCacheListPlans result field "queryHash" should be 8
+assert.eq(8,
+ res.queryHash.length,
+ `planCacheListPlans result field "queryHash" should be 8
characters long`);
- // Validate that 'cachedPlan' and 'creationExecStats' fields exist and both have consistent
- // information about the winning plan.
- assert(res.hasOwnProperty("cachedPlan"), `planCacheListPlans result is missing field
+// Validate that 'cachedPlan' and 'creationExecStats' fields exist and both have consistent
+// information about the winning plan.
+assert(res.hasOwnProperty("cachedPlan"),
+ `planCacheListPlans result is missing field
"cachedPlan" field`);
- assert(res.hasOwnProperty("creationExecStats"), `planCacheListPlans result is missing
+assert(res.hasOwnProperty("creationExecStats"),
+ `planCacheListPlans result is missing
"creationExecStats" field`);
- assert.gte(res.creationExecStats.length, 2, `creationExecStats should contain stats for both the
+assert.gte(res.creationExecStats.length,
+ 2,
+ `creationExecStats should contain stats for both the
winning plan and all rejected plans. Thus, should contain at least 2 elements but got:
${res.creationStats}`);
- let cachedStage = assert(res.cachedPlan.stage, `cachedPlan should have field "stage"`);
- let winningExecStage = assert(res.creationExecStats[0].executionStages, `creationExecStats[0]
+let cachedStage = assert(res.cachedPlan.stage, `cachedPlan should have field "stage"`);
+let winningExecStage = assert(res.creationExecStats[0].executionStages,
+ `creationExecStats[0]
should have field "executionStages"`);
- assert.eq(cachedStage, winningExecStage, `Information about the winning plan in "cachedPlan" is
+assert.eq(cachedStage,
+ winningExecStage,
+ `Information about the winning plan in "cachedPlan" is
inconsistent with the first element in "creationExecStats".`);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/plan_cache_stats_agg_source.js b/jstests/noPassthrough/plan_cache_stats_agg_source.js
index cee1aa15907..bd90c0e4942 100644
--- a/jstests/noPassthrough/plan_cache_stats_agg_source.js
+++ b/jstests/noPassthrough/plan_cache_stats_agg_source.js
@@ -2,175 +2,167 @@
* Tests for the $planCacheStats aggregation metadata source.
*/
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js");
-
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod failed to start up");
-
- const testDb = conn.getDB("test");
- const coll = testDb.plan_cache_stats_agg_source;
-
- // Returns a BSON object representing the plan cache entry for the query shape {a: 1, b: 1}.
- function getSingleEntryStats() {
- const cursor = coll.aggregate(
- [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1}}}]);
- assert(cursor.hasNext());
- const entryStats = cursor.next();
- assert(!cursor.hasNext());
- return entryStats;
- }
-
- // Fails when the collection does not exist.
- assert.commandFailedWithCode(
- testDb.runCommand(
- {aggregate: coll.getName(), pipeline: [{$planCacheStats: {}}], cursor: {}}),
- 50933);
-
- // Create a collection with two indices.
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
-
- // Should return an empty result set when there are no cache entries yet.
- assert.eq(0, coll.aggregate([{$planCacheStats: {}}]).itcount());
-
- // Run three distinct query shapes and check that there are three cache entries.
- assert.eq(0, coll.find({a: 1, b: 1}).itcount());
- assert.eq(0, coll.find({a: 1, b: 1, c: 1}).itcount());
- assert.eq(0, coll.find({a: 1, b: 1, d: 1}).itcount());
- assert.eq(3, coll.aggregate([{$planCacheStats: {}}]).itcount());
-
- // We should be able to find particular cache entries by maching on the query from which the
- // entry was created.
- assert.eq(
- 1,
- coll.aggregate([{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1}}}])
- .itcount());
- assert.eq(
- 1,
- coll.aggregate(
- [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1, c: 1}}}])
- .itcount());
- assert.eq(
- 1,
- coll.aggregate(
- [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1, d: 1}}}])
- .itcount());
-
- // A similar match on a query filter that was never run should turn up nothing.
- assert.eq(
- 0,
- coll.aggregate(
- [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1, e: 1}}}])
- .itcount());
-
- // Test $group over the plan cache metadata.
- assert.eq(1,
- coll.aggregate([{$planCacheStats: {}}, {$group: {_id: "$createdFromQuery.query.a"}}])
- .itcount());
-
- // Explain should show that a $match gets absorbed into the $planCacheStats stage.
- let explain = assert.commandWorked(coll.explain().aggregate(
- [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1}}}]));
- assert.eq(explain.stages.length, 1);
- const planCacheStatsExplain = getAggPlanStage(explain, "$planCacheStats");
- assert.neq(planCacheStatsExplain, null);
- assert(planCacheStatsExplain.hasOwnProperty("$planCacheStats"));
- assert(planCacheStatsExplain.$planCacheStats.hasOwnProperty("match"));
- assert.eq(planCacheStatsExplain.$planCacheStats.match,
- {"createdFromQuery.query": {a: 1, b: 1}});
-
- // Get the plan cache metadata for a particular query.
- let entryStats = getSingleEntryStats();
-
- // Verify that the entry has the expected 'createdFromQuery' field.
- assert(entryStats.hasOwnProperty("createdFromQuery"));
- assert.eq(entryStats.createdFromQuery.query, {a: 1, b: 1});
- assert.eq(entryStats.createdFromQuery.sort, {});
- assert.eq(entryStats.createdFromQuery.projection, {});
- assert(!entryStats.createdFromQuery.hasOwnProperty("collation"));
-
- // Verify that $planCacheStats reports the same 'queryHash' and 'planCacheKey' as explain
- // for this query shape.
- explain = assert.commandWorked(coll.find({a: 1, b: 1}).explain());
- assert.eq(entryStats.queryHash, explain.queryPlanner.queryHash);
- assert.eq(entryStats.planCacheKey, explain.queryPlanner.planCacheKey);
-
- // Since the query shape was only run once, the plan cache entry should not be active.
- assert.eq(entryStats.isActive, false);
-
- // Sanity check 'works' value.
- assert(entryStats.hasOwnProperty("works"));
- assert.gt(entryStats.works, 0);
-
- // Check that the cached plan is an index scan either on {a: 1} or {b: 1}.
- assert(entryStats.hasOwnProperty("cachedPlan"));
- const ixscanStage = getPlanStage(entryStats.cachedPlan, "IXSCAN");
- assert.neq(ixscanStage, null);
- assert(bsonWoCompare(ixscanStage.keyPattern, {a: 1}) === 0 ||
- bsonWoCompare(ixscanStage.keyPattern, {b: 1}) === 0);
-
- // Verify that the 'timeOfCreation' for the entry is now +/- one day.
- const now = new Date();
- const yesterday = (new Date()).setDate(now.getDate() - 1);
- const tomorrow = (new Date()).setDate(now.getDate() + 1);
- assert(entryStats.hasOwnProperty("timeOfCreation"));
- assert.gt(entryStats.timeOfCreation, yesterday);
- assert.lt(entryStats.timeOfCreation, tomorrow);
-
- // There should be at least two plans in 'creationExecStats', and each should have at least one
- // index scan.
- assert(entryStats.hasOwnProperty("creationExecStats"));
- assert.gte(entryStats.creationExecStats.length, 2);
- for (let plan of entryStats.creationExecStats) {
- assert(plan.hasOwnProperty("executionStages"));
- const ixscanStages = getPlanStages(plan.executionStages, "IXSCAN");
- assert.gt(ixscanStages.length, 0);
- }
-
- // Assert that the entry has an array of at least two scores, and that all scores are greater
- // than 1.
- assert(entryStats.hasOwnProperty("candidatePlanScores"));
- assert.gte(entryStats.candidatePlanScores.length, 2);
- for (let score of entryStats.candidatePlanScores) {
- assert.gt(score, 1);
- }
-
- // Should report that no index filter is set.
- assert.eq(false, entryStats.indexFilterSet);
-
- // After creating an index filter on a different query shape, $planCacheStats should still
- // report that no index filter is set. Setting a filter clears the cache, so we rerun the query
- // associated with the cache entry.
- assert.commandWorked(testDb.runCommand({
- planCacheSetFilter: coll.getName(),
- query: {a: 1, b: 1, c: 1},
- indexes: [{a: 1}, {b: 1}]
- }));
- assert.eq(2, coll.aggregate([{$planCacheStats: {}}]).itcount());
- assert.eq(0, coll.find({a: 1, b: 1, c: 1}).itcount());
- assert.eq(3, coll.aggregate([{$planCacheStats: {}}]).itcount());
- entryStats = getSingleEntryStats();
- assert.eq(false, entryStats.indexFilterSet);
-
- // Create an index filter on shape {a: 1, b: 1}, and verify that indexFilterSet is now true.
- assert.commandWorked(testDb.runCommand(
- {planCacheSetFilter: coll.getName(), query: {a: 1, b: 1}, indexes: [{a: 1}, {b: 1}]}));
- assert.eq(2, coll.aggregate([{$planCacheStats: {}}]).itcount());
- assert.eq(0, coll.find({a: 1, b: 1}).itcount());
- assert.eq(3, coll.aggregate([{$planCacheStats: {}}]).itcount());
- entryStats = getSingleEntryStats();
- assert.eq(true, entryStats.indexFilterSet);
-
- // Should throw an error if $planCacheStats is not first.
- assert.throws(
- () => coll.aggregate([{$match: {createdFromQuery: {a: 1, b: 1}}}, {$planCacheStats: {}}]));
-
- // If the plan cache is cleared, then there are no longer any results returned by
- // $planCacheStats.
- assert.commandWorked(testDb.runCommand({planCacheClear: coll.getName()}));
- assert.eq(0, coll.aggregate([{$planCacheStats: {}}]).itcount());
-
- MongoRunner.stopMongod(conn);
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod failed to start up");
+
+const testDb = conn.getDB("test");
+const coll = testDb.plan_cache_stats_agg_source;
+
+// Returns a BSON object representing the plan cache entry for the query shape {a: 1, b: 1}.
+function getSingleEntryStats() {
+ const cursor =
+ coll.aggregate([{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1}}}]);
+ assert(cursor.hasNext());
+ const entryStats = cursor.next();
+ assert(!cursor.hasNext());
+ return entryStats;
+}
+
+// Fails when the collection does not exist.
+assert.commandFailedWithCode(
+ testDb.runCommand({aggregate: coll.getName(), pipeline: [{$planCacheStats: {}}], cursor: {}}),
+ 50933);
+
+// Create a collection with two indices.
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
+
+// Should return an empty result set when there are no cache entries yet.
+assert.eq(0, coll.aggregate([{$planCacheStats: {}}]).itcount());
+
+// Run three distinct query shapes and check that there are three cache entries.
+assert.eq(0, coll.find({a: 1, b: 1}).itcount());
+assert.eq(0, coll.find({a: 1, b: 1, c: 1}).itcount());
+assert.eq(0, coll.find({a: 1, b: 1, d: 1}).itcount());
+assert.eq(3, coll.aggregate([{$planCacheStats: {}}]).itcount());
+
+// We should be able to find particular cache entries by maching on the query from which the
+// entry was created.
+assert.eq(
+ 1,
+ coll.aggregate([{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1}}}])
+ .itcount());
+assert.eq(1,
+ coll.aggregate(
+ [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1, c: 1}}}])
+ .itcount());
+assert.eq(1,
+ coll.aggregate(
+ [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1, d: 1}}}])
+ .itcount());
+
+// A similar match on a query filter that was never run should turn up nothing.
+assert.eq(0,
+ coll.aggregate(
+ [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1, e: 1}}}])
+ .itcount());
+
+// Test $group over the plan cache metadata.
+assert.eq(1,
+ coll.aggregate([{$planCacheStats: {}}, {$group: {_id: "$createdFromQuery.query.a"}}])
+ .itcount());
+
+// Explain should show that a $match gets absorbed into the $planCacheStats stage.
+let explain = assert.commandWorked(coll.explain().aggregate(
+ [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1}}}]));
+assert.eq(explain.stages.length, 1);
+const planCacheStatsExplain = getAggPlanStage(explain, "$planCacheStats");
+assert.neq(planCacheStatsExplain, null);
+assert(planCacheStatsExplain.hasOwnProperty("$planCacheStats"));
+assert(planCacheStatsExplain.$planCacheStats.hasOwnProperty("match"));
+assert.eq(planCacheStatsExplain.$planCacheStats.match, {"createdFromQuery.query": {a: 1, b: 1}});
+
+// Get the plan cache metadata for a particular query.
+let entryStats = getSingleEntryStats();
+
+// Verify that the entry has the expected 'createdFromQuery' field.
+assert(entryStats.hasOwnProperty("createdFromQuery"));
+assert.eq(entryStats.createdFromQuery.query, {a: 1, b: 1});
+assert.eq(entryStats.createdFromQuery.sort, {});
+assert.eq(entryStats.createdFromQuery.projection, {});
+assert(!entryStats.createdFromQuery.hasOwnProperty("collation"));
+
+// Verify that $planCacheStats reports the same 'queryHash' and 'planCacheKey' as explain
+// for this query shape.
+explain = assert.commandWorked(coll.find({a: 1, b: 1}).explain());
+assert.eq(entryStats.queryHash, explain.queryPlanner.queryHash);
+assert.eq(entryStats.planCacheKey, explain.queryPlanner.planCacheKey);
+
+// Since the query shape was only run once, the plan cache entry should not be active.
+assert.eq(entryStats.isActive, false);
+
+// Sanity check 'works' value.
+assert(entryStats.hasOwnProperty("works"));
+assert.gt(entryStats.works, 0);
+
+// Check that the cached plan is an index scan either on {a: 1} or {b: 1}.
+assert(entryStats.hasOwnProperty("cachedPlan"));
+const ixscanStage = getPlanStage(entryStats.cachedPlan, "IXSCAN");
+assert.neq(ixscanStage, null);
+assert(bsonWoCompare(ixscanStage.keyPattern, {a: 1}) === 0 ||
+ bsonWoCompare(ixscanStage.keyPattern, {b: 1}) === 0);
+
+// Verify that the 'timeOfCreation' for the entry is now +/- one day.
+const now = new Date();
+const yesterday = (new Date()).setDate(now.getDate() - 1);
+const tomorrow = (new Date()).setDate(now.getDate() + 1);
+assert(entryStats.hasOwnProperty("timeOfCreation"));
+assert.gt(entryStats.timeOfCreation, yesterday);
+assert.lt(entryStats.timeOfCreation, tomorrow);
+
+// There should be at least two plans in 'creationExecStats', and each should have at least one
+// index scan.
+assert(entryStats.hasOwnProperty("creationExecStats"));
+assert.gte(entryStats.creationExecStats.length, 2);
+for (let plan of entryStats.creationExecStats) {
+ assert(plan.hasOwnProperty("executionStages"));
+ const ixscanStages = getPlanStages(plan.executionStages, "IXSCAN");
+ assert.gt(ixscanStages.length, 0);
+}
+
+// Assert that the entry has an array of at least two scores, and that all scores are greater
+// than 1.
+assert(entryStats.hasOwnProperty("candidatePlanScores"));
+assert.gte(entryStats.candidatePlanScores.length, 2);
+for (let score of entryStats.candidatePlanScores) {
+ assert.gt(score, 1);
+}
+
+// Should report that no index filter is set.
+assert.eq(false, entryStats.indexFilterSet);
+
+// After creating an index filter on a different query shape, $planCacheStats should still
+// report that no index filter is set. Setting a filter clears the cache, so we rerun the query
+// associated with the cache entry.
+assert.commandWorked(testDb.runCommand(
+ {planCacheSetFilter: coll.getName(), query: {a: 1, b: 1, c: 1}, indexes: [{a: 1}, {b: 1}]}));
+assert.eq(2, coll.aggregate([{$planCacheStats: {}}]).itcount());
+assert.eq(0, coll.find({a: 1, b: 1, c: 1}).itcount());
+assert.eq(3, coll.aggregate([{$planCacheStats: {}}]).itcount());
+entryStats = getSingleEntryStats();
+assert.eq(false, entryStats.indexFilterSet);
+
+// Create an index filter on shape {a: 1, b: 1}, and verify that indexFilterSet is now true.
+assert.commandWorked(testDb.runCommand(
+ {planCacheSetFilter: coll.getName(), query: {a: 1, b: 1}, indexes: [{a: 1}, {b: 1}]}));
+assert.eq(2, coll.aggregate([{$planCacheStats: {}}]).itcount());
+assert.eq(0, coll.find({a: 1, b: 1}).itcount());
+assert.eq(3, coll.aggregate([{$planCacheStats: {}}]).itcount());
+entryStats = getSingleEntryStats();
+assert.eq(true, entryStats.indexFilterSet);
+
+// Should throw an error if $planCacheStats is not first.
+assert.throws(
+ () => coll.aggregate([{$match: {createdFromQuery: {a: 1, b: 1}}}, {$planCacheStats: {}}]));
+
+// If the plan cache is cleared, then there are no longer any results returned by
+// $planCacheStats.
+assert.commandWorked(testDb.runCommand({planCacheClear: coll.getName()}));
+assert.eq(0, coll.aggregate([{$planCacheStats: {}}]).itcount());
+
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/port_options.js b/jstests/noPassthrough/port_options.js
index 8f4d4becc3e..72fb5bf755d 100644
--- a/jstests/noPassthrough/port_options.js
+++ b/jstests/noPassthrough/port_options.js
@@ -1,55 +1,55 @@
// Check --port= edge behaviors.
(function() {
- 'use strict';
- jsTest.log("Setting port=0 is okay unless binding to multiple IP interfaces.");
-
- function runTest(bindIP, expectOk) {
- jsTest.log("".concat("Testing with bindIP=[", bindIP, "], expectOk=[", expectOk, "]"));
-
- clearRawMongoProgramOutput();
-
- let pid = startMongoProgramNoConnect(
- "mongod", "--ipv6", "--dbpath", MongoRunner.dataDir, "--bind_ip", bindIP, "--port", 0);
- jsTest.log("".concat("pid=[", pid, "]"));
-
- if (expectOk) {
- let port;
-
- // We use assert.soonNoExcept() here because the mongod may not be logging yet.
- assert.soonNoExcept(() => {
- const logContents = rawMongoProgramOutput();
- const found = logContents.match(/waiting for connections on port (\d+)/);
- if (found !== null) {
- print("Found message from mongod with port it is listening on: " + found[0]);
- port = found[1];
- return true;
- }
- });
-
- const connStr = `127.0.0.1:${port}`;
- print("Attempting to connect to " + connStr);
-
- let conn;
- assert.soonNoExcept(() => {
- conn = new Mongo(connStr);
+'use strict';
+jsTest.log("Setting port=0 is okay unless binding to multiple IP interfaces.");
+
+function runTest(bindIP, expectOk) {
+ jsTest.log("".concat("Testing with bindIP=[", bindIP, "], expectOk=[", expectOk, "]"));
+
+ clearRawMongoProgramOutput();
+
+ let pid = startMongoProgramNoConnect(
+ "mongod", "--ipv6", "--dbpath", MongoRunner.dataDir, "--bind_ip", bindIP, "--port", 0);
+ jsTest.log("".concat("pid=[", pid, "]"));
+
+ if (expectOk) {
+ let port;
+
+ // We use assert.soonNoExcept() here because the mongod may not be logging yet.
+ assert.soonNoExcept(() => {
+ const logContents = rawMongoProgramOutput();
+ const found = logContents.match(/waiting for connections on port (\d+)/);
+ if (found !== null) {
+ print("Found message from mongod with port it is listening on: " + found[0]);
+ port = found[1];
return true;
- });
- assert.commandWorked(conn.adminCommand({ping: 1}));
-
- stopMongoProgramByPid(pid);
- } else {
- const ec = waitProgram(pid);
- assert.eq(ec, MongoRunner.EXIT_NET_ERROR);
- assert.soonNoExcept(() => {
- const logContents = rawMongoProgramOutput();
- const found = logContents.match(
- /Port 0 \(ephemeral port\) is not allowed when listening on multiple IP interfaces/);
- return (found !== null);
- }, "No warning issued for invalid port=0 usage");
- }
+ }
+ });
+
+ const connStr = `127.0.0.1:${port}`;
+ print("Attempting to connect to " + connStr);
+
+ let conn;
+ assert.soonNoExcept(() => {
+ conn = new Mongo(connStr);
+ return true;
+ });
+ assert.commandWorked(conn.adminCommand({ping: 1}));
+
+ stopMongoProgramByPid(pid);
+ } else {
+ const ec = waitProgram(pid);
+ assert.eq(ec, MongoRunner.EXIT_NET_ERROR);
+ assert.soonNoExcept(() => {
+ const logContents = rawMongoProgramOutput();
+ const found = logContents.match(
+ /Port 0 \(ephemeral port\) is not allowed when listening on multiple IP interfaces/);
+ return (found !== null);
+ }, "No warning issued for invalid port=0 usage");
}
+}
- runTest("127.0.0.1", true);
- runTest("127.0.0.1,::1", false);
+runTest("127.0.0.1", true);
+runTest("127.0.0.1,::1", false);
}());
diff --git a/jstests/noPassthrough/predictive_connpool.js b/jstests/noPassthrough/predictive_connpool.js
index c38d01601e2..d92d1ba9a2f 100644
--- a/jstests/noPassthrough/predictive_connpool.js
+++ b/jstests/noPassthrough/predictive_connpool.js
@@ -5,155 +5,155 @@ load("jstests/libs/parallelTester.js");
*/
(function() {
- "use strict";
-
- const st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 2, protocolVersion: 1}});
- const kDbName = 'test';
- const mongosClient = st.s;
- const mongos = mongosClient.getDB(kDbName);
- const rst = st.rs0;
- const primary = rst.getPrimary();
- const secondary = rst.getSecondaries()[0];
-
- const cfg = primary.getDB('local').system.replset.findOne();
- const allHosts = cfg.members.map(x => x.host);
- const primaryOnly = [primary.name];
- const secondaryOnly = [secondary.name];
-
- function configureReplSetFailpoint(name, modeValue) {
- st.rs0.nodes.forEach(function(node) {
- assert.commandWorked(node.getDB("admin").runCommand({
- configureFailPoint: name,
- mode: modeValue,
- data: {shouldCheckForInterrupt: true},
- }));
- });
+"use strict";
+
+const st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 2, protocolVersion: 1}});
+const kDbName = 'test';
+const mongosClient = st.s;
+const mongos = mongosClient.getDB(kDbName);
+const rst = st.rs0;
+const primary = rst.getPrimary();
+const secondary = rst.getSecondaries()[0];
+
+const cfg = primary.getDB('local').system.replset.findOne();
+const allHosts = cfg.members.map(x => x.host);
+const primaryOnly = [primary.name];
+const secondaryOnly = [secondary.name];
+
+function configureReplSetFailpoint(name, modeValue) {
+ st.rs0.nodes.forEach(function(node) {
+ assert.commandWorked(node.getDB("admin").runCommand({
+ configureFailPoint: name,
+ mode: modeValue,
+ data: {shouldCheckForInterrupt: true},
+ }));
+ });
+}
+
+var threads = [];
+
+function launchFinds({times, readPref, shouldFail}) {
+ jsTestLog("Starting " + times + " connections");
+ for (var i = 0; i < times; i++) {
+ var thread = new Thread(function(connStr, readPref, dbName, shouldFail) {
+ var client = new Mongo(connStr);
+ const ret = client.getDB(dbName).runCommand(
+ {find: "test", limit: 1, "$readPreference": {mode: readPref}});
+
+ if (shouldFail) {
+ assert.commandFailed(ret);
+ } else {
+ assert.commandWorked(ret);
+ }
+ }, st.s.host, readPref, kDbName, shouldFail);
+ thread.start();
+ threads.push(thread);
}
-
- var threads = [];
-
- function launchFinds({times, readPref, shouldFail}) {
- jsTestLog("Starting " + times + " connections");
- for (var i = 0; i < times; i++) {
- var thread = new Thread(function(connStr, readPref, dbName, shouldFail) {
- var client = new Mongo(connStr);
- const ret = client.getDB(dbName).runCommand(
- {find: "test", limit: 1, "$readPreference": {mode: readPref}});
-
- if (shouldFail) {
- assert.commandFailed(ret);
- } else {
- assert.commandWorked(ret);
- }
- }, st.s.host, readPref, kDbName, shouldFail);
- thread.start();
- threads.push(thread);
+}
+
+function updateSetParameters(params) {
+ var cmd = Object.assign({"setParameter": 1}, params);
+ assert.commandWorked(mongos.adminCommand(cmd));
+}
+
+function dropConnections() {
+ assert.commandWorked(mongos.adminCommand({dropConnections: 1, hostAndPort: allHosts}));
+}
+
+var currentCheckNum = 0;
+function hasConnPoolStats(args) {
+ const checkNum = currentCheckNum++;
+ jsTestLog("Check #" + checkNum + ": " + tojson(args));
+ var {ready, pending, active, hosts, isAbsent} = args;
+
+ ready = ready ? ready : 0;
+ pending = pending ? pending : 0;
+ active = active ? active : 0;
+ hosts = hosts ? hosts : allHosts;
+
+ function checkStats(res, host) {
+ var stats = res.hosts[host];
+ if (!stats) {
+ jsTestLog("Connection stats for " + host + " are absent");
+ return isAbsent;
}
- }
- function updateSetParameters(params) {
- var cmd = Object.assign({"setParameter": 1}, params);
- assert.commandWorked(mongos.adminCommand(cmd));
+ jsTestLog("Connection stats for " + host + ": " + tojson(stats));
+ return stats.available == ready && stats.refreshing == pending && stats.inUse == active;
}
- function dropConnections() {
- assert.commandWorked(mongos.adminCommand({dropConnections: 1, hostAndPort: allHosts}));
+ function checkAllStats() {
+ var res = mongos.adminCommand({connPoolStats: 1});
+ return hosts.map(host => checkStats(res, host)).every(x => x);
}
- var currentCheckNum = 0;
- function hasConnPoolStats(args) {
- const checkNum = currentCheckNum++;
- jsTestLog("Check #" + checkNum + ": " + tojson(args));
- var {ready, pending, active, hosts, isAbsent} = args;
-
- ready = ready ? ready : 0;
- pending = pending ? pending : 0;
- active = active ? active : 0;
- hosts = hosts ? hosts : allHosts;
-
- function checkStats(res, host) {
- var stats = res.hosts[host];
- if (!stats) {
- jsTestLog("Connection stats for " + host + " are absent");
- return isAbsent;
- }
-
- jsTestLog("Connection stats for " + host + ": " + tojson(stats));
- return stats.available == ready && stats.refreshing == pending && stats.inUse == active;
- }
-
- function checkAllStats() {
- var res = mongos.adminCommand({connPoolStats: 1});
- return hosts.map(host => checkStats(res, host)).every(x => x);
- }
-
- assert.soon(checkAllStats, "Check #" + checkNum + " failed", 10000);
+ assert.soon(checkAllStats, "Check #" + checkNum + " failed", 10000);
- jsTestLog("Check #" + checkNum + " successful");
- }
+ jsTestLog("Check #" + checkNum + " successful");
+}
- function checkConnPoolStats() {
- const ret = mongos.runCommand({"connPoolStats": 1});
- const poolStats = ret["pools"]["NetworkInterfaceTL-TaskExecutorPool-0"];
- jsTestLog(poolStats);
- }
+function checkConnPoolStats() {
+ const ret = mongos.runCommand({"connPoolStats": 1});
+ const poolStats = ret["pools"]["NetworkInterfaceTL-TaskExecutorPool-0"];
+ jsTestLog(poolStats);
+}
- function walkThroughBehavior({primaryFollows, secondaryFollows}) {
- // Start pooling with a ping
- mongos.adminCommand({multicast: {ping: 0}});
- checkConnPoolStats();
+function walkThroughBehavior({primaryFollows, secondaryFollows}) {
+ // Start pooling with a ping
+ mongos.adminCommand({multicast: {ping: 0}});
+ checkConnPoolStats();
- // Block connections from finishing
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
+ // Block connections from finishing
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
- // Launch a bunch of primary finds
- launchFinds({times: 10, readPref: "primary"});
+ // Launch a bunch of primary finds
+ launchFinds({times: 10, readPref: "primary"});
- // Confirm we follow
- hasConnPoolStats({active: 10, hosts: primaryOnly});
- if (secondaryFollows) {
- hasConnPoolStats({ready: 10, hosts: secondaryOnly});
- }
- checkConnPoolStats();
+ // Confirm we follow
+ hasConnPoolStats({active: 10, hosts: primaryOnly});
+ if (secondaryFollows) {
+ hasConnPoolStats({ready: 10, hosts: secondaryOnly});
+ }
+ checkConnPoolStats();
- // Launch a bunch of secondary finds
- launchFinds({times: 20, readPref: "secondary"});
+ // Launch a bunch of secondary finds
+ launchFinds({times: 20, readPref: "secondary"});
- // Confirm we follow
- hasConnPoolStats({active: 20, hosts: secondaryOnly});
- if (primaryFollows) {
- hasConnPoolStats({ready: 10, active: 10, hosts: primaryOnly});
- }
- checkConnPoolStats();
+ // Confirm we follow
+ hasConnPoolStats({active: 20, hosts: secondaryOnly});
+ if (primaryFollows) {
+ hasConnPoolStats({ready: 10, active: 10, hosts: primaryOnly});
+ }
+ checkConnPoolStats();
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
- dropConnections();
- }
+ dropConnections();
+}
- assert.writeOK(mongos.test.insert({x: 1}));
- assert.writeOK(mongos.test.insert({x: 2}));
- assert.writeOK(mongos.test.insert({x: 3}));
- st.rs0.awaitReplication();
+assert.writeOK(mongos.test.insert({x: 1}));
+assert.writeOK(mongos.test.insert({x: 2}));
+assert.writeOK(mongos.test.insert({x: 3}));
+st.rs0.awaitReplication();
- jsTestLog("Following disabled");
- walkThroughBehavior({primaryFollows: false, secondaryFollows: false});
+jsTestLog("Following disabled");
+walkThroughBehavior({primaryFollows: false, secondaryFollows: false});
- jsTestLog("Following primary node");
- updateSetParameters({ShardingTaskExecutorPoolReplicaSetMatching: "matchPrimaryNode"});
- walkThroughBehavior({primaryFollows: false, secondaryFollows: true});
+jsTestLog("Following primary node");
+updateSetParameters({ShardingTaskExecutorPoolReplicaSetMatching: "matchPrimaryNode"});
+walkThroughBehavior({primaryFollows: false, secondaryFollows: true});
- // jsTestLog("Following busiest node");
- // updateSetParameters({ShardingTaskExecutorPoolReplicaSetMatching: "matchBusiestNode"});
- // walkThroughBehavior({primaryFollows: true, secondaryFollows: true});
+// jsTestLog("Following busiest node");
+// updateSetParameters({ShardingTaskExecutorPoolReplicaSetMatching: "matchBusiestNode"});
+// walkThroughBehavior({primaryFollows: true, secondaryFollows: true});
- jsTestLog("Reseting to disabled");
- updateSetParameters({ShardingTaskExecutorPoolReplicaSetMatching: "disabled"});
- walkThroughBehavior({primaryFollows: false, secondaryFollows: false});
+jsTestLog("Reseting to disabled");
+updateSetParameters({ShardingTaskExecutorPoolReplicaSetMatching: "disabled"});
+walkThroughBehavior({primaryFollows: false, secondaryFollows: false});
- threads.forEach(function(thread) {
- thread.join();
- });
+threads.forEach(function(thread) {
+ thread.join();
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/profile_agg_multiple_batches.js b/jstests/noPassthrough/profile_agg_multiple_batches.js
index 00fb738aca2..6d21e254bde 100644
--- a/jstests/noPassthrough/profile_agg_multiple_batches.js
+++ b/jstests/noPassthrough/profile_agg_multiple_batches.js
@@ -3,33 +3,35 @@
// @tags: [requires_profiling]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/profiler.js");
+load("jstests/libs/profiler.js");
- // Setting internalDocumentSourceCursorBatchSizeBytes=1 ensures that multiple batches pass
- // through DocumentSourceCursor.
- const options = {setParameter: "internalDocumentSourceCursorBatchSizeBytes=1"};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, "mongod was unable to start up with options: " + tojson(options));
+// Setting internalDocumentSourceCursorBatchSizeBytes=1 ensures that multiple batches pass
+// through DocumentSourceCursor.
+const options = {
+ setParameter: "internalDocumentSourceCursorBatchSizeBytes=1"
+};
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, "mongod was unable to start up with options: " + tojson(options));
- const testDB = conn.getDB("test");
- const coll = testDB.getCollection("coll");
+const testDB = conn.getDB("test");
+const coll = testDB.getCollection("coll");
- testDB.setProfilingLevel(2);
+testDB.setProfilingLevel(2);
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
+for (let i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
- assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({a: 1}));
- assert.eq(8, coll.aggregate([{$match: {a: {$gte: 2}}}, {$group: {_id: "$b"}}]).itcount());
+assert.eq(8, coll.aggregate([{$match: {a: {$gte: 2}}}, {$group: {_id: "$b"}}]).itcount());
- const profileObj = getLatestProfilerEntry(testDB);
+const profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.keysExamined, 8, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 8, tojson(profileObj));
+assert.eq(profileObj.keysExamined, 8, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 8, tojson(profileObj));
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/profile_interrupted_op.js b/jstests/noPassthrough/profile_interrupted_op.js
index 3fa681cee71..f49a126731d 100644
--- a/jstests/noPassthrough/profile_interrupted_op.js
+++ b/jstests/noPassthrough/profile_interrupted_op.js
@@ -6,70 +6,69 @@
// @tags: [requires_persistence, requires_profiling]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js"); // For checkLog.
+load("jstests/libs/check_log.js"); // For checkLog.
- //
- // Start mongo with profiling disabled, create an empty database, and populate it with a
- // collection that has one document.
- //
- let standalone = MongoRunner.runMongod({profile: "0"});
+//
+// Start mongo with profiling disabled, create an empty database, and populate it with a
+// collection that has one document.
+//
+let standalone = MongoRunner.runMongod({profile: "0"});
- let db = standalone.getDB("profile_interrupted_op");
- assert.commandWorked(db.dropDatabase());
+let db = standalone.getDB("profile_interrupted_op");
+assert.commandWorked(db.dropDatabase());
- let coll = db.getCollection("test");
- assert.commandWorked(coll.insert({a: 1}));
+let coll = db.getCollection("test");
+assert.commandWorked(coll.insert({a: 1}));
- //
- // Stop the mongod and then restart it, this time with profiling enabled. Note that enabling
- // profiling on a running database would create the 'system.profile' collection, which we don't
- // yet want created for this test.
- //
- MongoRunner.stopMongod(standalone);
- standalone = MongoRunner.runMongod(
- {restart: true, cleanData: false, dbpath: standalone.dbpath, profile: "2"});
+//
+// Stop the mongod and then restart it, this time with profiling enabled. Note that enabling
+// profiling on a running database would create the 'system.profile' collection, which we don't
+// yet want created for this test.
+//
+MongoRunner.stopMongod(standalone);
+standalone = MongoRunner.runMongod(
+ {restart: true, cleanData: false, dbpath: standalone.dbpath, profile: "2"});
- //
- // Execute a query that will get interrupted for exceeding its 'maxTimeMS' value. The profiler
- // will attempt to create the 'system.profile' collection while the operation context is already
- // marked as interrupted.
- //
- db = standalone.getDB("profile_interrupted_op");
- coll = db.getCollection("test");
- const err = assert.throws(function() {
- coll.find({
- $where: function() {
- sleep(3600);
- return true;
- }
- })
- .maxTimeMS(1000)
- .count();
- });
- assert.contains(err.code,
- [ErrorCodes.MaxTimeMSExpired, ErrorCodes.Interrupted, ErrorCodes.InternalError],
- err);
+//
+// Execute a query that will get interrupted for exceeding its 'maxTimeMS' value. The profiler
+// will attempt to create the 'system.profile' collection while the operation context is already
+// marked as interrupted.
+//
+db = standalone.getDB("profile_interrupted_op");
+coll = db.getCollection("test");
+const err = assert.throws(function() {
+ coll.find({
+ $where: function() {
+ sleep(3600);
+ return true;
+ }
+ })
+ .maxTimeMS(1000)
+ .count();
+});
+assert.contains(
+ err.code, [ErrorCodes.MaxTimeMSExpired, ErrorCodes.Interrupted, ErrorCodes.InternalError], err);
- //
- // Profiling is not necessary for the rest of the test. We turn it off to make sure it doesn't
- // interfere with any remaining commands.
- //
- db.setProfilingLevel(0);
+//
+// Profiling is not necessary for the rest of the test. We turn it off to make sure it doesn't
+// interfere with any remaining commands.
+//
+db.setProfilingLevel(0);
- //
- // The mongod should print out a warning to indicate the potential need for a manually created
- // 'system.profile' collection.
- //
- checkLog.contains(standalone, "Manually create profile collection");
+//
+// The mongod should print out a warning to indicate the potential need for a manually created
+// 'system.profile' collection.
+//
+checkLog.contains(standalone, "Manually create profile collection");
- //
- // The mongod should not create the 'system.profile' collection automatically.
- //
- const res = db.runCommand({listCollections: 1, filter: {name: "system.profile"}});
- assert.commandWorked(res);
- assert.eq(res.cursor.firstBatch, [], res);
+//
+// The mongod should not create the 'system.profile' collection automatically.
+//
+const res = db.runCommand({listCollections: 1, filter: {name: "system.profile"}});
+assert.commandWorked(res);
+assert.eq(res.cursor.firstBatch, [], res);
- MongoRunner.stopMongod(standalone);
+MongoRunner.stopMongod(standalone);
})();
diff --git a/jstests/noPassthrough/query_knobs_validation.js b/jstests/noPassthrough/query_knobs_validation.js
index 536f4d6f995..9bec3018c1d 100644
--- a/jstests/noPassthrough/query_knobs_validation.js
+++ b/jstests/noPassthrough/query_knobs_validation.js
@@ -6,166 +6,165 @@
*/
(function() {
- "use strict";
-
- const conn = MongoRunner.runMongod();
- const testDB = conn.getDB("admin");
- const expectedParamDefaults = {
- internalQueryPlanEvaluationWorks: 10000,
- internalQueryPlanEvaluationCollFraction: 0.3,
- internalQueryPlanEvaluationMaxResults: 101,
- internalQueryCacheSize: 5000,
- internalQueryCacheFeedbacksStored: 20,
- internalQueryCacheEvictionRatio: 10.0,
- internalQueryCacheWorksGrowthCoefficient: 2.0,
- internalQueryCacheDisableInactiveEntries: false,
- internalQueryCacheListPlansNewOutput: false,
- internalQueryPlannerMaxIndexedSolutions: 64,
- internalQueryEnumerationMaxOrSolutions: 10,
- internalQueryEnumerationMaxIntersectPerAnd: 3,
- internalQueryForceIntersectionPlans: false,
- internalQueryPlannerEnableIndexIntersection: true,
- internalQueryPlannerEnableHashIntersection: false,
- internalQueryPlanOrChildrenIndependently: true,
- internalQueryMaxScansToExplode: 200,
- internalQueryExecMaxBlockingSortBytes: 32 * 1024 * 1024,
- internalQueryExecYieldIterations: 128,
- internalQueryExecYieldPeriodMS: 10,
- internalQueryFacetBufferSizeBytes: 100 * 1024 * 1024,
- internalDocumentSourceCursorBatchSizeBytes: 4 * 1024 * 1024,
- internalDocumentSourceLookupCacheSizeBytes: 100 * 1024 * 1024,
- internalDocumentSourceSortMaxBlockingSortBytes: 100 * 1024 * 1024,
- internalLookupStageIntermediateDocumentMaxSizeBytes: 100 * 1024 * 1024,
- internalDocumentSourceGroupMaxMemoryBytes: 100 * 1024 * 1024,
- // Should be half the value of 'internalQueryExecYieldIterations' parameter.
- internalInsertMaxBatchSize: 64,
- internalQueryPlannerGenerateCoveredWholeIndexScans: false,
- internalQueryIgnoreUnknownJSONSchemaKeywords: false,
- internalQueryProhibitBlockingMergeOnMongoS: false,
- };
-
- function assertDefaultParameterValues() {
- // For each parameter in 'expectedParamDefaults' verify that the value returned by
- // 'getParameter' is same as the expected value.
- for (let paramName in expectedParamDefaults) {
- const expectedParamValue = expectedParamDefaults[paramName];
- const getParamRes =
- assert.commandWorked(testDB.adminCommand({getParameter: 1, [paramName]: 1}));
- assert.eq(getParamRes[paramName], expectedParamValue);
- }
- }
-
- function assertSetParameterSucceeds(paramName, value) {
- assert.commandWorked(testDB.adminCommand({setParameter: 1, [paramName]: value}));
- // Verify that the set parameter actually worked by doing a get and verifying the value.
+"use strict";
+
+const conn = MongoRunner.runMongod();
+const testDB = conn.getDB("admin");
+const expectedParamDefaults = {
+ internalQueryPlanEvaluationWorks: 10000,
+ internalQueryPlanEvaluationCollFraction: 0.3,
+ internalQueryPlanEvaluationMaxResults: 101,
+ internalQueryCacheSize: 5000,
+ internalQueryCacheFeedbacksStored: 20,
+ internalQueryCacheEvictionRatio: 10.0,
+ internalQueryCacheWorksGrowthCoefficient: 2.0,
+ internalQueryCacheDisableInactiveEntries: false,
+ internalQueryCacheListPlansNewOutput: false,
+ internalQueryPlannerMaxIndexedSolutions: 64,
+ internalQueryEnumerationMaxOrSolutions: 10,
+ internalQueryEnumerationMaxIntersectPerAnd: 3,
+ internalQueryForceIntersectionPlans: false,
+ internalQueryPlannerEnableIndexIntersection: true,
+ internalQueryPlannerEnableHashIntersection: false,
+ internalQueryPlanOrChildrenIndependently: true,
+ internalQueryMaxScansToExplode: 200,
+ internalQueryExecMaxBlockingSortBytes: 32 * 1024 * 1024,
+ internalQueryExecYieldIterations: 128,
+ internalQueryExecYieldPeriodMS: 10,
+ internalQueryFacetBufferSizeBytes: 100 * 1024 * 1024,
+ internalDocumentSourceCursorBatchSizeBytes: 4 * 1024 * 1024,
+ internalDocumentSourceLookupCacheSizeBytes: 100 * 1024 * 1024,
+ internalDocumentSourceSortMaxBlockingSortBytes: 100 * 1024 * 1024,
+ internalLookupStageIntermediateDocumentMaxSizeBytes: 100 * 1024 * 1024,
+ internalDocumentSourceGroupMaxMemoryBytes: 100 * 1024 * 1024,
+ // Should be half the value of 'internalQueryExecYieldIterations' parameter.
+ internalInsertMaxBatchSize: 64,
+ internalQueryPlannerGenerateCoveredWholeIndexScans: false,
+ internalQueryIgnoreUnknownJSONSchemaKeywords: false,
+ internalQueryProhibitBlockingMergeOnMongoS: false,
+};
+
+function assertDefaultParameterValues() {
+ // For each parameter in 'expectedParamDefaults' verify that the value returned by
+ // 'getParameter' is same as the expected value.
+ for (let paramName in expectedParamDefaults) {
+ const expectedParamValue = expectedParamDefaults[paramName];
const getParamRes =
assert.commandWorked(testDB.adminCommand({getParameter: 1, [paramName]: 1}));
- assert.eq(getParamRes[paramName], value);
- }
-
- function assertSetParameterFails(paramName, value) {
- assert.commandFailedWithCode(testDB.adminCommand({setParameter: 1, [paramName]: value}),
- ErrorCodes.BadValue);
+ assert.eq(getParamRes[paramName], expectedParamValue);
}
+}
+
+function assertSetParameterSucceeds(paramName, value) {
+ assert.commandWorked(testDB.adminCommand({setParameter: 1, [paramName]: value}));
+ // Verify that the set parameter actually worked by doing a get and verifying the value.
+ const getParamRes =
+ assert.commandWorked(testDB.adminCommand({getParameter: 1, [paramName]: 1}));
+ assert.eq(getParamRes[paramName], value);
+}
+
+function assertSetParameterFails(paramName, value) {
+ assert.commandFailedWithCode(testDB.adminCommand({setParameter: 1, [paramName]: value}),
+ ErrorCodes.BadValue);
+}
+
+// Verify that the default values are set as expected when the server starts up.
+assertDefaultParameterValues();
+
+assertSetParameterSucceeds("internalQueryPlanEvaluationWorks", 11);
+assertSetParameterFails("internalQueryPlanEvaluationWorks", 0);
+assertSetParameterFails("internalQueryPlanEvaluationWorks", -1);
+
+assertSetParameterSucceeds("internalQueryPlanEvaluationCollFraction", 0.0);
+assertSetParameterSucceeds("internalQueryPlanEvaluationCollFraction", 0.444);
+assertSetParameterSucceeds("internalQueryPlanEvaluationCollFraction", 1.0);
+assertSetParameterFails("internalQueryPlanEvaluationCollFraction", -0.1);
+assertSetParameterFails("internalQueryPlanEvaluationCollFraction", 1.0001);
+
+assertSetParameterSucceeds("internalQueryPlanEvaluationMaxResults", 11);
+assertSetParameterSucceeds("internalQueryPlanEvaluationMaxResults", 0);
+assertSetParameterFails("internalQueryPlanEvaluationMaxResults", -1);
+
+assertSetParameterSucceeds("internalQueryCacheSize", 1);
+assertSetParameterSucceeds("internalQueryCacheSize", 0);
+assertSetParameterFails("internalQueryCacheSize", -1);
+
+assertSetParameterSucceeds("internalQueryCacheFeedbacksStored", 1);
+assertSetParameterSucceeds("internalQueryCacheFeedbacksStored", 0);
+assertSetParameterFails("internalQueryCacheFeedbacksStored", -1);
+
+assertSetParameterSucceeds("internalQueryCacheEvictionRatio", 1.0);
+assertSetParameterSucceeds("internalQueryCacheEvictionRatio", 0.0);
+assertSetParameterFails("internalQueryCacheEvictionRatio", -0.1);
+
+assertSetParameterSucceeds("internalQueryCacheWorksGrowthCoefficient", 1.1);
+assertSetParameterFails("internalQueryCacheWorksGrowthCoefficient", 1.0);
+assertSetParameterFails("internalQueryCacheWorksGrowthCoefficient", 0.1);
+
+assertSetParameterSucceeds("internalQueryPlannerMaxIndexedSolutions", 11);
+assertSetParameterSucceeds("internalQueryPlannerMaxIndexedSolutions", 0);
+assertSetParameterFails("internalQueryPlannerMaxIndexedSolutions", -1);
+
+assertSetParameterSucceeds("internalQueryEnumerationMaxOrSolutions", 11);
+assertSetParameterSucceeds("internalQueryEnumerationMaxOrSolutions", 0);
+assertSetParameterFails("internalQueryEnumerationMaxOrSolutions", -1);
+
+assertSetParameterSucceeds("internalQueryEnumerationMaxIntersectPerAnd", 11);
+assertSetParameterSucceeds("internalQueryEnumerationMaxIntersectPerAnd", 0);
+assertSetParameterFails("internalQueryEnumerationMaxIntersectPerAnd", -1);
+
+assertSetParameterSucceeds("internalQueryMaxScansToExplode", 11);
+assertSetParameterSucceeds("internalQueryMaxScansToExplode", 0);
+assertSetParameterFails("internalQueryMaxScansToExplode", -1);
+
+assertSetParameterSucceeds("internalQueryExecMaxBlockingSortBytes", 11);
+assertSetParameterSucceeds("internalQueryExecMaxBlockingSortBytes", 0);
+assertSetParameterFails("internalQueryExecMaxBlockingSortBytes", -1);
+
+assertSetParameterSucceeds("internalQueryExecYieldIterations", 10);
+assertSetParameterSucceeds("internalQueryExecYieldIterations", 0);
+assertSetParameterSucceeds("internalQueryExecYieldIterations", -1);
+
+assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 1);
+assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 0);
+assertSetParameterFails("internalQueryExecYieldPeriodMS", -1);
+
+assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 11);
+assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 0);
+assertSetParameterFails("internalQueryExecYieldPeriodMS", -1);
+
+assertSetParameterSucceeds("internalQueryFacetBufferSizeBytes", 1);
+assertSetParameterFails("internalQueryFacetBufferSizeBytes", 0);
+assertSetParameterFails("internalQueryFacetBufferSizeBytes", -1);
+
+assertSetParameterSucceeds("internalDocumentSourceSortMaxBlockingSortBytes", 11);
+assertSetParameterFails("internalDocumentSourceSortMaxBlockingSortBytes", 0);
+assertSetParameterFails("internalDocumentSourceSortMaxBlockingSortBytes", -1);
+
+assertSetParameterSucceeds("internalDocumentSourceGroupMaxMemoryBytes", 11);
+assertSetParameterFails("internalDocumentSourceGroupMaxMemoryBytes", 0);
+assertSetParameterFails("internalDocumentSourceGroupMaxMemoryBytes", -1);
+
+// Internal BSON max object size is slightly larger than the max user object size, to
+// accommodate command metadata.
+const bsonUserSizeLimit = assert.commandWorked(testDB.isMaster()).maxBsonObjectSize;
+const bsonObjMaxInternalSize = bsonUserSizeLimit + 16 * 1024;
+
+assertSetParameterFails("internalLookupStageIntermediateDocumentMaxSizeBytes", 1);
+assertSetParameterSucceeds("internalLookupStageIntermediateDocumentMaxSizeBytes",
+ bsonObjMaxInternalSize);
+
+assertSetParameterSucceeds("internalInsertMaxBatchSize", 11);
+assertSetParameterFails("internalInsertMaxBatchSize", 0);
+assertSetParameterFails("internalInsertMaxBatchSize", -1);
+
+assertSetParameterSucceeds("internalDocumentSourceCursorBatchSizeBytes", 11);
+assertSetParameterSucceeds("internalDocumentSourceCursorBatchSizeBytes", 0);
+assertSetParameterFails("internalDocumentSourceCursorBatchSizeBytes", -1);
- // Verify that the default values are set as expected when the server starts up.
- assertDefaultParameterValues();
-
- assertSetParameterSucceeds("internalQueryPlanEvaluationWorks", 11);
- assertSetParameterFails("internalQueryPlanEvaluationWorks", 0);
- assertSetParameterFails("internalQueryPlanEvaluationWorks", -1);
-
- assertSetParameterSucceeds("internalQueryPlanEvaluationCollFraction", 0.0);
- assertSetParameterSucceeds("internalQueryPlanEvaluationCollFraction", 0.444);
- assertSetParameterSucceeds("internalQueryPlanEvaluationCollFraction", 1.0);
- assertSetParameterFails("internalQueryPlanEvaluationCollFraction", -0.1);
- assertSetParameterFails("internalQueryPlanEvaluationCollFraction", 1.0001);
-
- assertSetParameterSucceeds("internalQueryPlanEvaluationMaxResults", 11);
- assertSetParameterSucceeds("internalQueryPlanEvaluationMaxResults", 0);
- assertSetParameterFails("internalQueryPlanEvaluationMaxResults", -1);
-
- assertSetParameterSucceeds("internalQueryCacheSize", 1);
- assertSetParameterSucceeds("internalQueryCacheSize", 0);
- assertSetParameterFails("internalQueryCacheSize", -1);
-
- assertSetParameterSucceeds("internalQueryCacheFeedbacksStored", 1);
- assertSetParameterSucceeds("internalQueryCacheFeedbacksStored", 0);
- assertSetParameterFails("internalQueryCacheFeedbacksStored", -1);
-
- assertSetParameterSucceeds("internalQueryCacheEvictionRatio", 1.0);
- assertSetParameterSucceeds("internalQueryCacheEvictionRatio", 0.0);
- assertSetParameterFails("internalQueryCacheEvictionRatio", -0.1);
-
- assertSetParameterSucceeds("internalQueryCacheWorksGrowthCoefficient", 1.1);
- assertSetParameterFails("internalQueryCacheWorksGrowthCoefficient", 1.0);
- assertSetParameterFails("internalQueryCacheWorksGrowthCoefficient", 0.1);
-
- assertSetParameterSucceeds("internalQueryPlannerMaxIndexedSolutions", 11);
- assertSetParameterSucceeds("internalQueryPlannerMaxIndexedSolutions", 0);
- assertSetParameterFails("internalQueryPlannerMaxIndexedSolutions", -1);
-
- assertSetParameterSucceeds("internalQueryEnumerationMaxOrSolutions", 11);
- assertSetParameterSucceeds("internalQueryEnumerationMaxOrSolutions", 0);
- assertSetParameterFails("internalQueryEnumerationMaxOrSolutions", -1);
-
- assertSetParameterSucceeds("internalQueryEnumerationMaxIntersectPerAnd", 11);
- assertSetParameterSucceeds("internalQueryEnumerationMaxIntersectPerAnd", 0);
- assertSetParameterFails("internalQueryEnumerationMaxIntersectPerAnd", -1);
-
- assertSetParameterSucceeds("internalQueryMaxScansToExplode", 11);
- assertSetParameterSucceeds("internalQueryMaxScansToExplode", 0);
- assertSetParameterFails("internalQueryMaxScansToExplode", -1);
-
- assertSetParameterSucceeds("internalQueryExecMaxBlockingSortBytes", 11);
- assertSetParameterSucceeds("internalQueryExecMaxBlockingSortBytes", 0);
- assertSetParameterFails("internalQueryExecMaxBlockingSortBytes", -1);
-
- assertSetParameterSucceeds("internalQueryExecYieldIterations", 10);
- assertSetParameterSucceeds("internalQueryExecYieldIterations", 0);
- assertSetParameterSucceeds("internalQueryExecYieldIterations", -1);
-
- assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 1);
- assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 0);
- assertSetParameterFails("internalQueryExecYieldPeriodMS", -1);
-
- assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 11);
- assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 0);
- assertSetParameterFails("internalQueryExecYieldPeriodMS", -1);
-
- assertSetParameterSucceeds("internalQueryFacetBufferSizeBytes", 1);
- assertSetParameterFails("internalQueryFacetBufferSizeBytes", 0);
- assertSetParameterFails("internalQueryFacetBufferSizeBytes", -1);
-
- assertSetParameterSucceeds("internalDocumentSourceSortMaxBlockingSortBytes", 11);
- assertSetParameterFails("internalDocumentSourceSortMaxBlockingSortBytes", 0);
- assertSetParameterFails("internalDocumentSourceSortMaxBlockingSortBytes", -1);
-
- assertSetParameterSucceeds("internalDocumentSourceGroupMaxMemoryBytes", 11);
- assertSetParameterFails("internalDocumentSourceGroupMaxMemoryBytes", 0);
- assertSetParameterFails("internalDocumentSourceGroupMaxMemoryBytes", -1);
-
- // Internal BSON max object size is slightly larger than the max user object size, to
- // accommodate command metadata.
- const bsonUserSizeLimit = assert.commandWorked(testDB.isMaster()).maxBsonObjectSize;
- const bsonObjMaxInternalSize = bsonUserSizeLimit + 16 * 1024;
-
- assertSetParameterFails("internalLookupStageIntermediateDocumentMaxSizeBytes", 1);
- assertSetParameterSucceeds("internalLookupStageIntermediateDocumentMaxSizeBytes",
- bsonObjMaxInternalSize);
-
- assertSetParameterSucceeds("internalInsertMaxBatchSize", 11);
- assertSetParameterFails("internalInsertMaxBatchSize", 0);
- assertSetParameterFails("internalInsertMaxBatchSize", -1);
-
- assertSetParameterSucceeds("internalDocumentSourceCursorBatchSizeBytes", 11);
- assertSetParameterSucceeds("internalDocumentSourceCursorBatchSizeBytes", 0);
- assertSetParameterFails("internalDocumentSourceCursorBatchSizeBytes", -1);
-
- assertSetParameterSucceeds("internalDocumentSourceLookupCacheSizeBytes", 11);
- assertSetParameterSucceeds("internalDocumentSourceLookupCacheSizeBytes", 0);
- assertSetParameterFails("internalDocumentSourceLookupCacheSizeBytes", -1);
-
- MongoRunner.stopMongod(conn);
+assertSetParameterSucceeds("internalDocumentSourceLookupCacheSizeBytes", 11);
+assertSetParameterSucceeds("internalDocumentSourceLookupCacheSizeBytes", 0);
+assertSetParameterFails("internalDocumentSourceLookupCacheSizeBytes", -1);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/query_yield1.js b/jstests/noPassthrough/query_yield1.js
index 4effd6b370f..74485c45cb0 100644
--- a/jstests/noPassthrough/query_yield1.js
+++ b/jstests/noPassthrough/query_yield1.js
@@ -1,93 +1,93 @@
(function() {
- "use strict";
- if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
- const conn = MongoRunner.runMongod({nojournal: ""});
- assert.neq(null, conn, "mongod failed to start.");
- db = conn.getDB("test");
-
- t = db.query_yield1;
- t.drop();
-
- N = 20000;
- i = 0;
-
- q = function() {
- var x = this.n;
- for (var i = 0; i < 250; i++) {
- x = x * 2;
+"use strict";
+if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
+ const conn = MongoRunner.runMongod({nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
+ db = conn.getDB("test");
+
+ t = db.query_yield1;
+ t.drop();
+
+ N = 20000;
+ i = 0;
+
+ q = function() {
+ var x = this.n;
+ for (var i = 0; i < 250; i++) {
+ x = x * 2;
+ }
+ return false;
+ };
+
+ while (true) {
+ fill = function() {
+ var bulk = t.initializeUnorderedBulkOp();
+ for (; i < N; i++) {
+ bulk.insert({_id: i, n: 1});
}
- return false;
+ assert.writeOK(bulk.execute());
};
- while (true) {
- fill = function() {
- var bulk = t.initializeUnorderedBulkOp();
- for (; i < N; i++) {
- bulk.insert({_id: i, n: 1});
- }
- assert.writeOK(bulk.execute());
- };
-
- timeQuery = function() {
- return Date.timeFunc(function() {
- assert.eq(0, t.find(q).itcount());
- });
- };
-
- fill();
- timeQuery();
- timeQuery();
- time = timeQuery();
- print(N + "\t" + time);
- if (time > 2000)
- break;
-
- N *= 2;
- }
+ timeQuery = function() {
+ return Date.timeFunc(function() {
+ assert.eq(0, t.find(q).itcount());
+ });
+ };
- // --- test 1
+ fill();
+ timeQuery();
+ timeQuery();
+ time = timeQuery();
+ print(N + "\t" + time);
+ if (time > 2000)
+ break;
- assert.eq(0, db.currentOp().inprog.length, "setup broken");
+ N *= 2;
+ }
- join = startParallelShell(
- "print( 0 == db.query_yield1.find( function(){ var x=this.n; for ( var i=0; i<500; i++ ){ x = x * 2; } return false; } ).itcount() ); ");
+ // --- test 1
- assert.soon(function() {
- var x = db.currentOp().inprog;
- return x.length > 0;
- }, "never doing query", 2000, 1);
+ assert.eq(0, db.currentOp().inprog.length, "setup broken");
- print("start query");
+ join = startParallelShell(
+ "print( 0 == db.query_yield1.find( function(){ var x=this.n; for ( var i=0; i<500; i++ ){ x = x * 2; } return false; } ).itcount() ); ");
- num = 0;
- start = new Date();
- biggestMe = 0;
- while (((new Date()).getTime() - start) < (time * 2)) {
- var me = Date.timeFunc(function() {
- t.insert({x: 1});
- });
- var x = db.currentOp();
+ assert.soon(function() {
+ var x = db.currentOp().inprog;
+ return x.length > 0;
+ }, "never doing query", 2000, 1);
- if (num++ == 0) {
- assert.eq(1, x.inprog.length, "nothing in prog");
- }
+ print("start query");
- if (me > biggestMe) {
- biggestMe = me;
- print("biggestMe: " + biggestMe);
- }
-
- assert.gt(200, me, "took too long for me to run");
+ num = 0;
+ start = new Date();
+ biggestMe = 0;
+ while (((new Date()).getTime() - start) < (time * 2)) {
+ var me = Date.timeFunc(function() {
+ t.insert({x: 1});
+ });
+ var x = db.currentOp();
- if (x.inprog.length == 0)
- break;
+ if (num++ == 0) {
+ assert.eq(1, x.inprog.length, "nothing in prog");
}
- join();
+ if (me > biggestMe) {
+ biggestMe = me;
+ print("biggestMe: " + biggestMe);
+ }
- var x = db.currentOp();
- assert.eq(0, x.inprog.length, "weird 2");
+ assert.gt(200, me, "took too long for me to run");
- MongoRunner.stopMongod(conn);
+ if (x.inprog.length == 0)
+ break;
}
+
+ join();
+
+ var x = db.currentOp();
+ assert.eq(0, x.inprog.length, "weird 2");
+
+ MongoRunner.stopMongod(conn);
+}
})();
diff --git a/jstests/noPassthrough/query_yield2.js b/jstests/noPassthrough/query_yield2.js
index e5257653fd1..46816da5aea 100644
--- a/jstests/noPassthrough/query_yield2.js
+++ b/jstests/noPassthrough/query_yield2.js
@@ -1,153 +1,153 @@
(function() {
- "use strict";
- if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
+"use strict";
+if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
- var currentOp;
- var N;
- var i;
- var t;
- var q;
- var len;
- var num;
- var start;
- var insertTime;
+ var currentOp;
+ var N;
+ var i;
+ var t;
+ var q;
+ var len;
+ var num;
+ var start;
+ var insertTime;
- const conn = MongoRunner.runMongod({nojournal: ""});
- assert.neq(null, conn, "mongod failed to start.");
- db = conn.getDB("test");
+ const conn = MongoRunner.runMongod({nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
+ db = conn.getDB("test");
- t = db.query_yield2;
- t.drop();
+ t = db.query_yield2;
+ t.drop();
- N = 200;
- i = 0;
+ N = 200;
+ i = 0;
- q = function() {
- var x = this.n;
- for (var i = 0; i < 25000; i++) {
- x = x * 2;
- }
- return false;
- };
+ q = function() {
+ var x = this.n;
+ for (var i = 0; i < 25000; i++) {
+ x = x * 2;
+ }
+ return false;
+ };
- print("Shell ==== Creating test.query_yield2 collection ...");
- print(
- "Shell ==== Adding documents until a time-wasting query takes over 2 seconds to complete");
- while (true) {
- fill = function() {
- var bulk = t.initializeUnorderedBulkOp();
- for (; i < N; ++i) {
- bulk.insert({_id: i, n: 1});
- }
- assert.writeOK(bulk.execute());
- };
- timeQuery = function() {
- return Date.timeFunc(function() {
- assert.eq(0, t.find(q).itcount());
- });
- };
- print("Shell ==== Adding document IDs from " + i + " to " + (N - 1));
- fill();
- print("Shell ==== Running warm-up query 1");
- timeQuery();
- print("Shell ==== Running warm-up query 2");
- timeQuery();
- print("Shell ==== Running timed query ...");
- time = timeQuery();
- print("Shell ==== Query across " + N + " documents took " + time + " ms");
- if (time > 2000) {
- print("Shell ==== Reached desired 2000 ms mark (at " + time +
- " ms), proceding to next step");
- break;
+ print("Shell ==== Creating test.query_yield2 collection ...");
+ print(
+ "Shell ==== Adding documents until a time-wasting query takes over 2 seconds to complete");
+ while (true) {
+ fill = function() {
+ var bulk = t.initializeUnorderedBulkOp();
+ for (; i < N; ++i) {
+ bulk.insert({_id: i, n: 1});
}
- N *= 2;
- print("Shell ==== Did not reach 2000 ms, increasing fill point to " + N + " documents");
+ assert.writeOK(bulk.execute());
+ };
+ timeQuery = function() {
+ return Date.timeFunc(function() {
+ assert.eq(0, t.find(q).itcount());
+ });
+ };
+ print("Shell ==== Adding document IDs from " + i + " to " + (N - 1));
+ fill();
+ print("Shell ==== Running warm-up query 1");
+ timeQuery();
+ print("Shell ==== Running warm-up query 2");
+ timeQuery();
+ print("Shell ==== Running timed query ...");
+ time = timeQuery();
+ print("Shell ==== Query across " + N + " documents took " + time + " ms");
+ if (time > 2000) {
+ print("Shell ==== Reached desired 2000 ms mark (at " + time +
+ " ms), proceding to next step");
+ break;
}
+ N *= 2;
+ print("Shell ==== Did not reach 2000 ms, increasing fill point to " + N + " documents");
+ }
- print("Shell ==== Testing db.currentOp to make sure nothing is in progress");
- print("Shell ==== Dump of db.currentOp:");
+ print("Shell ==== Testing db.currentOp to make sure nothing is in progress");
+ print("Shell ==== Dump of db.currentOp:");
+ currentOp = db.currentOp();
+ print(tojson(currentOp));
+ len = currentOp.inprog.length;
+ if (len) {
+ print("Shell ==== This test is broken: db.currentOp().inprog.length is " + len);
+ throw Error("query_yield2.js test is broken");
+ }
+ print("Shell ==== The test is working so far: db.currentOp().inprog.length is " + len);
+
+ print("Shell ==== Starting parallel shell to test if slow query will yield to write");
+ join = startParallelShell(
+ "print( 0 == db.query_yield2.find( function(){ var x=this.n; for ( var i=0; i<50000; i++ ){ x = x * 2; } return false; } ).itcount() ); ");
+
+ print("Shell ==== Waiting until db.currentOp().inprog becomes non-empty");
+ assert.soon(function() {
currentOp = db.currentOp();
- print(tojson(currentOp));
len = currentOp.inprog.length;
if (len) {
- print("Shell ==== This test is broken: db.currentOp().inprog.length is " + len);
- throw Error("query_yield2.js test is broken");
+ print("Shell ==== Wait satisfied: db.currentOp().inprog.length is " + len);
+ print("Shell ==== Dump of db.currentOp:");
+ print(tojson(currentOp));
+ print("Shell ==== Checking if this currentOp is the query we are waiting for");
+ if (currentOp.inprog[0].ns == "test.query_yield2" &&
+ currentOp.inprog[0].query["$where"]) {
+ print("Shell ==== Yes, we found the query we are waiting for");
+ return true;
+ }
+ if (currentOp.inprog[0].ns == "" && currentOp.inprog[0].query["whatsmyuri"]) {
+ print("Shell ==== No, we found a \"whatsmyuri\" query, waiting some more");
+ return false;
+ }
+ print(
+ "Shell ==== No, we found something other than our query or a \"whatsmyuri\", waiting some more");
+ return false;
}
- print("Shell ==== The test is working so far: db.currentOp().inprog.length is " + len);
+ return len > 0;
+ }, "Wait failed, db.currentOp().inprog never became non-empty", 2000, 1);
- print("Shell ==== Starting parallel shell to test if slow query will yield to write");
- join = startParallelShell(
- "print( 0 == db.query_yield2.find( function(){ var x=this.n; for ( var i=0; i<50000; i++ ){ x = x * 2; } return false; } ).itcount() ); ");
-
- print("Shell ==== Waiting until db.currentOp().inprog becomes non-empty");
- assert.soon(function() {
- currentOp = db.currentOp();
- len = currentOp.inprog.length;
- if (len) {
- print("Shell ==== Wait satisfied: db.currentOp().inprog.length is " + len);
+ print(
+ "Shell ==== Now that we have seen db.currentOp().inprog show that our query is running, we start the real test");
+ num = 0;
+ start = new Date();
+ while (((new Date()).getTime() - start) < (time * 2)) {
+ if (num == 0) {
+ print("Shell ==== Starting loop " + num + ", inserting 1 document");
+ }
+ insertTime = Date.timeFunc(function() {
+ t.insert({x: 1});
+ });
+ currentOp = db.currentOp();
+ len = currentOp.inprog.length;
+ print("Shell ==== Time to insert document " + num + " was " + insertTime +
+ " ms, db.currentOp().inprog.length is " + len);
+ if (num++ == 0) {
+ if (len != 1) {
+ print("Shell ==== TEST FAILED! db.currentOp().inprog.length is " + len);
print("Shell ==== Dump of db.currentOp:");
print(tojson(currentOp));
- print("Shell ==== Checking if this currentOp is the query we are waiting for");
- if (currentOp.inprog[0].ns == "test.query_yield2" &&
- currentOp.inprog[0].query["$where"]) {
- print("Shell ==== Yes, we found the query we are waiting for");
- return true;
- }
- if (currentOp.inprog[0].ns == "" && currentOp.inprog[0].query["whatsmyuri"]) {
- print("Shell ==== No, we found a \"whatsmyuri\" query, waiting some more");
- return false;
- }
- print(
- "Shell ==== No, we found something other than our query or a \"whatsmyuri\", waiting some more");
- return false;
- }
- return len > 0;
- }, "Wait failed, db.currentOp().inprog never became non-empty", 2000, 1);
-
- print(
- "Shell ==== Now that we have seen db.currentOp().inprog show that our query is running, we start the real test");
- num = 0;
- start = new Date();
- while (((new Date()).getTime() - start) < (time * 2)) {
- if (num == 0) {
- print("Shell ==== Starting loop " + num + ", inserting 1 document");
- }
- insertTime = Date.timeFunc(function() {
- t.insert({x: 1});
- });
- currentOp = db.currentOp();
- len = currentOp.inprog.length;
- print("Shell ==== Time to insert document " + num + " was " + insertTime +
- " ms, db.currentOp().inprog.length is " + len);
- if (num++ == 0) {
- if (len != 1) {
- print("Shell ==== TEST FAILED! db.currentOp().inprog.length is " + len);
- print("Shell ==== Dump of db.currentOp:");
- print(tojson(currentOp));
- throw Error("TEST FAILED!");
- }
- }
- assert.gt(200,
- insertTime,
- "Insert took too long (" + insertTime + " ms), should be less than 200 ms");
- if (currentOp.inprog.length == 0) {
- break;
+ throw Error("TEST FAILED!");
}
}
+ assert.gt(200,
+ insertTime,
+ "Insert took too long (" + insertTime + " ms), should be less than 200 ms");
+ if (currentOp.inprog.length == 0) {
+ break;
+ }
+ }
- print("Shell ==== Finished inserting documents, reader also finished");
- print("Shell ==== Waiting for parallel shell to exit");
- join();
+ print("Shell ==== Finished inserting documents, reader also finished");
+ print("Shell ==== Waiting for parallel shell to exit");
+ join();
- currentOp = db.currentOp();
- len = currentOp.inprog.length;
- if (len != 0) {
- print("Shell ==== Final sanity check FAILED! db.currentOp().inprog.length is " + len);
- print("Shell ==== Dump of db.currentOp:");
- print(tojson(currentOp));
- throw Error("TEST FAILED!");
- }
- print("Shell ==== Test completed successfully, shutting down server");
- MongoRunner.stopMongod(conn);
+ currentOp = db.currentOp();
+ len = currentOp.inprog.length;
+ if (len != 0) {
+ print("Shell ==== Final sanity check FAILED! db.currentOp().inprog.length is " + len);
+ print("Shell ==== Dump of db.currentOp:");
+ print(tojson(currentOp));
+ throw Error("TEST FAILED!");
}
+ print("Shell ==== Test completed successfully, shutting down server");
+ MongoRunner.stopMongod(conn);
+}
})();
diff --git a/jstests/noPassthrough/query_yield_reset_timer.js b/jstests/noPassthrough/query_yield_reset_timer.js
index 3bdb81730f7..cd7d9cf7d16 100644
--- a/jstests/noPassthrough/query_yield_reset_timer.js
+++ b/jstests/noPassthrough/query_yield_reset_timer.js
@@ -1,45 +1,45 @@
// Tests the reset logic for the periodic query yield timer. Regression test for SERVER-21341.
(function() {
- 'use strict';
- var dbpath = MongoRunner.dataPath + jsTest.name();
- resetDbpath(dbpath);
- var mongod = MongoRunner.runMongod({dbpath: dbpath});
- var coll = mongod.getDB("test").getCollection(jsTest.name());
+'use strict';
+var dbpath = MongoRunner.dataPath + jsTest.name();
+resetDbpath(dbpath);
+var mongod = MongoRunner.runMongod({dbpath: dbpath});
+var coll = mongod.getDB("test").getCollection(jsTest.name());
- // Configure the server so that queries are expected to yield after every 10 work cycles, or
- // after every 500 milliseconds (whichever comes first). In addition, enable a failpoint that
- // introduces a sleep delay of 1 second during each yield.
- assert.commandWorked(
- coll.getDB().adminCommand({setParameter: 1, internalQueryExecYieldIterations: 10}));
- assert.commandWorked(
- coll.getDB().adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 500}));
- assert.commandWorked(coll.getDB().adminCommand({
- configureFailPoint: "setYieldAllLocksWait",
- namespace: coll.getFullName(),
- mode: "alwaysOn",
- data: {waitForMillis: 1000}
- }));
+// Configure the server so that queries are expected to yield after every 10 work cycles, or
+// after every 500 milliseconds (whichever comes first). In addition, enable a failpoint that
+// introduces a sleep delay of 1 second during each yield.
+assert.commandWorked(
+ coll.getDB().adminCommand({setParameter: 1, internalQueryExecYieldIterations: 10}));
+assert.commandWorked(
+ coll.getDB().adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 500}));
+assert.commandWorked(coll.getDB().adminCommand({
+ configureFailPoint: "setYieldAllLocksWait",
+ namespace: coll.getFullName(),
+ mode: "alwaysOn",
+ data: {waitForMillis: 1000}
+}));
- // Insert 40 documents in the collection, perform a collection scan, and verify that it yields
- // about 4 times. Since each group of 10 documents should always be processed in less than 500
- // milliseconds, we expect to hit only iteration-based yields for this query, and no
- // timing-based yields. 40 documents total divided by 10 documents per yield gives us an
- // estimated yield count of 4 yields.
- //
- // Note also that we have a 1-second sleep delay during each yield, and we expect this delay to
- // not change our expectation to hit zero timing-based yields. Timing-based yields only consider
- // time spent during query execution since the last yield; since our sleep delay of 1 second is
- // not during query execution, it should never count towards our 500 millisecond threshold for a
- // timing-based yield (incorrect accounting for timing-based yields was the cause for
- // SERVER-21341).
- for (var i = 0; i < 40; ++i) {
- assert.writeOK(coll.insert({}));
- }
- var explainRes = coll.find().explain("executionStats");
- // We expect 4 yields, but we throw in a fudge factor of 2 for test reliability. We also can
- // use "saveState" calls as a proxy for "number of yields" here, because we expect our entire
- // result set to be returned in a single batch.
- assert.gt(explainRes.executionStats.executionStages.saveState, 4 / 2, tojson(explainRes));
- assert.lt(explainRes.executionStats.executionStages.saveState, 4 * 2, tojson(explainRes));
- MongoRunner.stopMongod(mongod);
+// Insert 40 documents in the collection, perform a collection scan, and verify that it yields
+// about 4 times. Since each group of 10 documents should always be processed in less than 500
+// milliseconds, we expect to hit only iteration-based yields for this query, and no
+// timing-based yields. 40 documents total divided by 10 documents per yield gives us an
+// estimated yield count of 4 yields.
+//
+// Note also that we have a 1-second sleep delay during each yield, and we expect this delay to
+// not change our expectation to hit zero timing-based yields. Timing-based yields only consider
+// time spent during query execution since the last yield; since our sleep delay of 1 second is
+// not during query execution, it should never count towards our 500 millisecond threshold for a
+// timing-based yield (incorrect accounting for timing-based yields was the cause for
+// SERVER-21341).
+for (var i = 0; i < 40; ++i) {
+ assert.writeOK(coll.insert({}));
+}
+var explainRes = coll.find().explain("executionStats");
+// We expect 4 yields, but we throw in a fudge factor of 2 for test reliability. We also can
+// use "saveState" calls as a proxy for "number of yields" here, because we expect our entire
+// result set to be returned in a single batch.
+assert.gt(explainRes.executionStats.executionStages.saveState, 4 / 2, tojson(explainRes));
+assert.lt(explainRes.executionStats.executionStages.saveState, 4 * 2, tojson(explainRes));
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/noPassthrough/queryable_backup_mode_incompatible_options.js b/jstests/noPassthrough/queryable_backup_mode_incompatible_options.js
index 3c9f09ba38c..d344d2648c2 100644
--- a/jstests/noPassthrough/queryable_backup_mode_incompatible_options.js
+++ b/jstests/noPassthrough/queryable_backup_mode_incompatible_options.js
@@ -9,58 +9,54 @@
// Check that starting mongod with both --queryableBackupMode and --replSet fails.
(function() {
- "use strict";
+"use strict";
- var name = "queryable_backup_mode_repl_set";
- var dbdir = MongoRunner.dataPath + name + "/";
+var name = "queryable_backup_mode_repl_set";
+var dbdir = MongoRunner.dataPath + name + "/";
- resetDbpath(dbdir);
+resetDbpath(dbdir);
- // Insert dummy document to ensure startup failure isn't due to lack of storage metadata file.
- var conn = MongoRunner.runMongod({dbpath: dbdir, noCleanData: true});
- assert.neq(null, conn, "mongod was unable to start up");
+// Insert dummy document to ensure startup failure isn't due to lack of storage metadata file.
+var conn = MongoRunner.runMongod({dbpath: dbdir, noCleanData: true});
+assert.neq(null, conn, "mongod was unable to start up");
- var coll = conn.getCollection('test.foo');
- coll.insertOne({a: 1});
- MongoRunner.stopMongod(conn);
+var coll = conn.getCollection('test.foo');
+coll.insertOne({a: 1});
+MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod(
- {dbpath: dbdir, noCleanData: true, queryableBackupMode: '', replSet: 'bar'});
+conn = MongoRunner.runMongod(
+ {dbpath: dbdir, noCleanData: true, queryableBackupMode: '', replSet: 'bar'});
- assert.eq(
- null,
- conn,
- "mongod should fail to start when both --queryableBackupMode and --replSet are provided");
+assert.eq(null,
+ conn,
+ "mongod should fail to start when both --queryableBackupMode and --replSet are provided");
- conn = MongoRunner.runMongod(
- {dbpath: dbdir, noCleanData: true, queryableBackupMode: '', configsvr: ''});
+conn = MongoRunner.runMongod(
+ {dbpath: dbdir, noCleanData: true, queryableBackupMode: '', configsvr: ''});
- assert.eq(
- null,
- conn,
- "mongod should fail to start when both --queryableBackupMode and --configsvr are provided");
+assert.eq(
+ null,
+ conn,
+ "mongod should fail to start when both --queryableBackupMode and --configsvr are provided");
- conn = MongoRunner.runMongod(
- {dbpath: dbdir, noCleanData: true, queryableBackupMode: '', upgrade: ''});
+conn =
+ MongoRunner.runMongod({dbpath: dbdir, noCleanData: true, queryableBackupMode: '', upgrade: ''});
- assert.eq(
- null,
- conn,
- "mongod should fail to start when both --queryableBackupMode and --upgrade are provided");
+assert.eq(null,
+ conn,
+ "mongod should fail to start when both --queryableBackupMode and --upgrade are provided");
- conn = MongoRunner.runMongod(
- {dbpath: dbdir, noCleanData: true, queryableBackupMode: '', repair: ''});
+conn =
+ MongoRunner.runMongod({dbpath: dbdir, noCleanData: true, queryableBackupMode: '', repair: ''});
- assert.eq(
- null,
- conn,
- "mongod should fail to start when both --queryableBackupMode and --repair are provided");
+assert.eq(null,
+ conn,
+ "mongod should fail to start when both --queryableBackupMode and --repair are provided");
- conn = MongoRunner.runMongod(
- {dbpath: dbdir, noCleanData: true, queryableBackupMode: '', profile: 1});
+conn =
+ MongoRunner.runMongod({dbpath: dbdir, noCleanData: true, queryableBackupMode: '', profile: 1});
- assert.eq(
- null,
- conn,
- "mongod should fail to start when both --queryableBackupMode and --profile are provided");
+assert.eq(null,
+ conn,
+ "mongod should fail to start when both --queryableBackupMode and --profile are provided");
})();
diff --git a/jstests/noPassthrough/readConcern_atClusterTime.js b/jstests/noPassthrough/readConcern_atClusterTime.js
index 222d7b31ec8..926145d64e5 100644
--- a/jstests/noPassthrough/readConcern_atClusterTime.js
+++ b/jstests/noPassthrough/readConcern_atClusterTime.js
@@ -12,159 +12,142 @@ function _getClusterTime(rst) {
}
(function() {
- "use strict";
+"use strict";
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+const dbName = "test";
+const collName = "coll";
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const testDB = rst.getPrimary().getDB(dbName);
- const dbName = "test";
- const collName = "coll";
+if (!testDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
+ rst.stopSet();
+ return;
+}
- const rst = new ReplSetTest({nodes: 1});
+const session = testDB.getMongo().startSession({causalConsistency: false});
+const sessionDb = session.getDatabase(dbName);
+
+const clusterTime = _getClusterTime(rst);
+
+// 'atClusterTime' can be used with readConcern level 'snapshot'.
+session.startTransaction({readConcern: {level: "snapshot", atClusterTime: clusterTime}});
+assert.commandWorked(sessionDb.runCommand({find: collName}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// 'atClusterTime' cannot be greater than the current cluster time.
+const futureClusterTime = new Timestamp(clusterTime.getTime() + 1000, 1);
+session.startTransaction({readConcern: {level: "snapshot", atClusterTime: futureClusterTime}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' must have type Timestamp.
+session.startTransaction({readConcern: {level: "snapshot", atClusterTime: "bad"}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.TypeMismatch);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' cannot be used with readConcern level 'majority'.
+session.startTransaction({readConcern: {level: "majority", atClusterTime: clusterTime}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' cannot be used with readConcern level 'local'.
+session.startTransaction({readConcern: {level: "local", atClusterTime: clusterTime}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' cannot be used with readConcern level 'available'.
+session.startTransaction({readConcern: {level: "available", atClusterTime: clusterTime}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' cannot be used with readConcern level 'linearizable'.
+session.startTransaction({readConcern: {level: "linearizable", atClusterTime: clusterTime}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' cannot be used without readConcern level (level is 'local' by default).
+session.startTransaction({readConcern: {atClusterTime: clusterTime}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' cannot be used with 'afterOpTime'.
+session.startTransaction({
+ readConcern:
+ {level: "snapshot", atClusterTime: clusterTime, afterOpTime: {ts: Timestamp(1, 2), t: 1}}
+});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' cannot be used outside of a session.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {find: collName, readConcern: {level: "snapshot", atClusterTime: clusterTime}}),
+ ErrorCodes.InvalidOptions);
+
+// 'atClusterTime' cannot be used with 'afterClusterTime'.
+session.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: clusterTime, afterClusterTime: clusterTime}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+session.endSession();
+rst.stopSet();
+
+// readConcern with 'atClusterTime' should succeed regardless of value of 'enableTestCommands'.
+{
+ jsTest.setOption('enableTestCommands', false);
+ let rst = new ReplSetTest({nodes: 1});
rst.startSet();
rst.initiate();
- const testDB = rst.getPrimary().getDB(dbName);
-
- if (!testDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
- rst.stopSet();
- return;
- }
-
- const session = testDB.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase(dbName);
-
- const clusterTime = _getClusterTime(rst);
-
- // 'atClusterTime' can be used with readConcern level 'snapshot'.
- session.startTransaction({readConcern: {level: "snapshot", atClusterTime: clusterTime}});
+ let session =
+ rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
+ let sessionDb = session.getDatabase(dbName);
+ session.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)}});
assert.commandWorked(sessionDb.runCommand({find: collName}));
assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+ rst.stopSet();
- // 'atClusterTime' cannot be greater than the current cluster time.
- const futureClusterTime = new Timestamp(clusterTime.getTime() + 1000, 1);
- session.startTransaction({readConcern: {level: "snapshot", atClusterTime: futureClusterTime}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // 'atClusterTime' must have type Timestamp.
- session.startTransaction({readConcern: {level: "snapshot", atClusterTime: "bad"}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.TypeMismatch);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // 'atClusterTime' cannot be used with readConcern level 'majority'.
- session.startTransaction({readConcern: {level: "majority", atClusterTime: clusterTime}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // 'atClusterTime' cannot be used with readConcern level 'local'.
- session.startTransaction({readConcern: {level: "local", atClusterTime: clusterTime}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // 'atClusterTime' cannot be used with readConcern level 'available'.
- session.startTransaction({readConcern: {level: "available", atClusterTime: clusterTime}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // 'atClusterTime' cannot be used with readConcern level 'linearizable'.
- session.startTransaction({readConcern: {level: "linearizable", atClusterTime: clusterTime}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // 'atClusterTime' cannot be used without readConcern level (level is 'local' by default).
- session.startTransaction({readConcern: {atClusterTime: clusterTime}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // 'atClusterTime' cannot be used with 'afterOpTime'.
- session.startTransaction({
- readConcern: {
- level: "snapshot",
- atClusterTime: clusterTime,
- afterOpTime: {ts: Timestamp(1, 2), t: 1}
- }
- });
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+ jsTest.setOption('enableTestCommands', true);
+ rst = new ReplSetTest({nodes: 1});
+ rst.startSet();
+ rst.initiate();
+ session = rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
+ sessionDb = session.getDatabase(dbName);
+ session.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)}});
+ assert.commandWorked(sessionDb.runCommand({find: collName}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+ rst.stopSet();
+}
- // 'atClusterTime' cannot be used outside of a session.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {find: collName, readConcern: {level: "snapshot", atClusterTime: clusterTime}}),
- ErrorCodes.InvalidOptions);
-
- // 'atClusterTime' cannot be used with 'afterClusterTime'.
- session.startTransaction({
- readConcern:
- {level: "snapshot", atClusterTime: clusterTime, afterClusterTime: clusterTime}
- });
+// readConcern with 'atClusterTime' is not allowed when enableMajorityReadConcern=false.
+{
+ let rst = new ReplSetTest({nodes: [{"enableMajorityReadConcern": "false"}]});
+ rst.startSet();
+ rst.initiate();
+ let session =
+ rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
+ let sessionDb = session.getDatabase(dbName);
+ session.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)}});
assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
assert.commandFailedWithCode(session.abortTransaction_forTesting(),
ErrorCodes.NoSuchTransaction);
-
session.endSession();
rst.stopSet();
-
- // readConcern with 'atClusterTime' should succeed regardless of value of 'enableTestCommands'.
- {
- jsTest.setOption('enableTestCommands', false);
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- let session =
- rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- let sessionDb = session.getDatabase(dbName);
- session.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)}});
- assert.commandWorked(sessionDb.runCommand({find: collName}));
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
- rst.stopSet();
-
- jsTest.setOption('enableTestCommands', true);
- rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- session =
- rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- sessionDb = session.getDatabase(dbName);
- session.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)}});
- assert.commandWorked(sessionDb.runCommand({find: collName}));
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
- rst.stopSet();
- }
-
- // readConcern with 'atClusterTime' is not allowed when enableMajorityReadConcern=false.
- {
- let rst = new ReplSetTest({nodes: [{"enableMajorityReadConcern": "false"}]});
- rst.startSet();
- rst.initiate();
- let session =
- rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- let sessionDb = session.getDatabase(dbName);
- session.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- session.endSession();
- rst.stopSet();
- }
-
+}
}());
diff --git a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
index 0a20621ed3e..c065ae258aa 100644
--- a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
+++ b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
@@ -3,107 +3,104 @@
// as an actual opTime on another shard.
// @tags: [requires_sharding, uses_transactions, uses_atclustertime]
(function() {
- "use strict";
- load("jstests/replsets/rslib.js");
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
+"use strict";
+load("jstests/replsets/rslib.js");
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
+if (!assert.commandWorked(conn.getDB("test").serverStatus())
+ .storageEngine.supportsSnapshotReadConcern) {
+ MongoRunner.stopMongod(conn);
+ return;
+}
+MongoRunner.stopMongod(conn);
+
+const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
+
+// Create database "test0" on shard 0.
+const testDB0 = st.s.getDB("test0");
+assert.commandWorked(testDB0.adminCommand({enableSharding: testDB0.getName()}));
+st.ensurePrimaryShard(testDB0.getName(), st.shard0.shardName);
+assert.commandWorked(testDB0.createCollection("coll0"));
+
+// Create a database "test1" on shard 1.
+const testDB1 = st.s.getDB("test1");
+assert.commandWorked(testDB1.adminCommand({enableSharding: testDB1.getName()}));
+st.ensurePrimaryShard(testDB1.getName(), st.shard1.shardName);
+assert.commandWorked(testDB1.createCollection("coll1"));
+
+const PropagationPreferenceOptions = Object.freeze({kShard: 0, kConfig: 1});
+
+let testNoopWrite = (fromDbName, fromColl, toRS, toDbName, toColl, propagationPreference) => {
+ const fromDBFromMongos = st.s.getDB(fromDbName);
+ const toDBFromMongos = st.s.getDB(toDbName);
+ const configFromMongos = st.s.getDB("config");
+
+ const oplog = toRS.getPrimary().getCollection("local.oplog.rs");
+ let findRes = oplog.findOne({o: {$eq: {"noop write for afterClusterTime read concern": 1}}});
+ assert(!findRes);
+
+ // Perform a write on the fromDB and get its op time.
+ let res = assert.commandWorked(
+ fromDBFromMongos.runCommand({insert: fromColl, documents: [{_id: 0}]}));
+ assert(res.hasOwnProperty("operationTime"), tojson(res));
+ let clusterTime = res.operationTime;
+
+ // Propagate 'clusterTime' to toRS or the config server. This ensures that its next
+ // write will be at time >= 'clusterTime'. We cannot use toDBFromMongos to propagate
+ // 'clusterTime' to the config server, because mongos only routes to the config server
+ // for the 'config' and 'admin' databases.
+ if (propagationPreference == PropagationPreferenceOptions.kConfig) {
+ configFromMongos.coll1.find().itcount();
+ } else {
+ toDBFromMongos.toColl.find().itcount();
}
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
- if (!assert.commandWorked(conn.getDB("test").serverStatus())
- .storageEngine.supportsSnapshotReadConcern) {
- MongoRunner.stopMongod(conn);
- return;
+ // Attempt a snapshot read at 'clusterTime' on toRS. Test that it performs a noop write
+ // to advance its lastApplied optime past 'clusterTime'. The snapshot read itself may
+ // fail if the noop write advances the node's majority commit point past 'clusterTime'
+ // and it releases that snapshot.
+ const toRSSession =
+ toRS.getPrimary().getDB(toDBFromMongos).getMongo().startSession({causalConsistency: false});
+
+ toRSSession.startTransaction({readConcern: {level: "snapshot", atClusterTime: clusterTime}});
+ res = toRSSession.getDatabase(toDBFromMongos).runCommand({find: toColl});
+ if (res.ok === 0) {
+ assert.commandFailedWithCode(res, ErrorCodes.SnapshotTooOld);
+ assert.commandFailedWithCode(toRSSession.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ } else {
+ assert.commandWorked(toRSSession.commitTransaction_forTesting());
}
- MongoRunner.stopMongod(conn);
- const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
-
- // Create database "test0" on shard 0.
- const testDB0 = st.s.getDB("test0");
- assert.commandWorked(testDB0.adminCommand({enableSharding: testDB0.getName()}));
- st.ensurePrimaryShard(testDB0.getName(), st.shard0.shardName);
- assert.commandWorked(testDB0.createCollection("coll0"));
-
- // Create a database "test1" on shard 1.
- const testDB1 = st.s.getDB("test1");
- assert.commandWorked(testDB1.adminCommand({enableSharding: testDB1.getName()}));
- st.ensurePrimaryShard(testDB1.getName(), st.shard1.shardName);
- assert.commandWorked(testDB1.createCollection("coll1"));
-
- const PropagationPreferenceOptions = Object.freeze({kShard: 0, kConfig: 1});
-
- let testNoopWrite = (fromDbName, fromColl, toRS, toDbName, toColl, propagationPreference) => {
- const fromDBFromMongos = st.s.getDB(fromDbName);
- const toDBFromMongos = st.s.getDB(toDbName);
- const configFromMongos = st.s.getDB("config");
-
- const oplog = toRS.getPrimary().getCollection("local.oplog.rs");
- let findRes =
- oplog.findOne({o: {$eq: {"noop write for afterClusterTime read concern": 1}}});
- assert(!findRes);
-
- // Perform a write on the fromDB and get its op time.
- let res = assert.commandWorked(
- fromDBFromMongos.runCommand({insert: fromColl, documents: [{_id: 0}]}));
- assert(res.hasOwnProperty("operationTime"), tojson(res));
- let clusterTime = res.operationTime;
-
- // Propagate 'clusterTime' to toRS or the config server. This ensures that its next
- // write will be at time >= 'clusterTime'. We cannot use toDBFromMongos to propagate
- // 'clusterTime' to the config server, because mongos only routes to the config server
- // for the 'config' and 'admin' databases.
- if (propagationPreference == PropagationPreferenceOptions.kConfig) {
- configFromMongos.coll1.find().itcount();
- } else {
- toDBFromMongos.toColl.find().itcount();
- }
-
- // Attempt a snapshot read at 'clusterTime' on toRS. Test that it performs a noop write
- // to advance its lastApplied optime past 'clusterTime'. The snapshot read itself may
- // fail if the noop write advances the node's majority commit point past 'clusterTime'
- // and it releases that snapshot.
- const toRSSession = toRS.getPrimary().getDB(toDBFromMongos).getMongo().startSession({
- causalConsistency: false
- });
-
- toRSSession.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: clusterTime}});
- res = toRSSession.getDatabase(toDBFromMongos).runCommand({find: toColl});
- if (res.ok === 0) {
- assert.commandFailedWithCode(res, ErrorCodes.SnapshotTooOld);
- assert.commandFailedWithCode(toRSSession.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- } else {
- assert.commandWorked(toRSSession.commitTransaction_forTesting());
- }
-
- const toRSOpTime = getLastOpTime(toRS.getPrimary()).ts;
-
- assert.gte(toRSOpTime, clusterTime);
-
- findRes = oplog.findOne({o: {$eq: {"noop write for afterClusterTime read concern": 1}}});
- assert(findRes);
- };
-
- //
- // Test noop write. Read from the destination shard.
- //
-
- testNoopWrite("test0", "coll0", st.rs1, "test1", "coll1", PropagationPreferenceOptions.kShard);
-
- //
- // Test noop write. Read from the config server's primary.
- //
-
- testNoopWrite(
- "test0", "coll2", st.configRS, "test1", "coll3", PropagationPreferenceOptions.kConfig);
-
- st.stop();
+ const toRSOpTime = getLastOpTime(toRS.getPrimary()).ts;
+
+ assert.gte(toRSOpTime, clusterTime);
+
+ findRes = oplog.findOne({o: {$eq: {"noop write for afterClusterTime read concern": 1}}});
+ assert(findRes);
+};
+
+//
+// Test noop write. Read from the destination shard.
+//
+
+testNoopWrite("test0", "coll0", st.rs1, "test1", "coll1", PropagationPreferenceOptions.kShard);
+
+//
+// Test noop write. Read from the config server's primary.
+//
+
+testNoopWrite(
+ "test0", "coll2", st.configRS, "test1", "coll3", PropagationPreferenceOptions.kConfig);
+
+st.stop();
}());
diff --git a/jstests/noPassthrough/readConcern_atClusterTime_snapshot_selection.js b/jstests/noPassthrough/readConcern_atClusterTime_snapshot_selection.js
index e9e92c88da1..49b3b16da66 100644
--- a/jstests/noPassthrough/readConcern_atClusterTime_snapshot_selection.js
+++ b/jstests/noPassthrough/readConcern_atClusterTime_snapshot_selection.js
@@ -4,93 +4,91 @@
//
// @tags: [uses_transactions, requires_majority_read_concern]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js"); // For stopServerReplication.
+load("jstests/libs/write_concern_util.js"); // For stopServerReplication.
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- const rst = new ReplSetTest({nodes: 3, settings: {chainingAllowed: false}});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 3, settings: {chainingAllowed: false}});
+rst.startSet();
+rst.initiate();
- const primarySession =
- rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- const primaryDB = primarySession.getDatabase(dbName);
+const primarySession =
+ rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
+const primaryDB = primarySession.getDatabase(dbName);
- const secondaryConns = rst.getSecondaries();
- const secondaryConn0 = secondaryConns[0];
- const secondaryConn1 = secondaryConns[1];
- const secondarySession =
- secondaryConn0.getDB(dbName).getMongo().startSession({causalConsistency: false});
- const secondaryDB0 = secondarySession.getDatabase(dbName);
+const secondaryConns = rst.getSecondaries();
+const secondaryConn0 = secondaryConns[0];
+const secondaryConn1 = secondaryConns[1];
+const secondarySession =
+ secondaryConn0.getDB(dbName).getMongo().startSession({causalConsistency: false});
+const secondaryDB0 = secondarySession.getDatabase(dbName);
- // Create the collection and insert one document. Get the op time of the write.
- let res = assert.commandWorked(primaryDB.runCommand(
- {insert: collName, documents: [{_id: "before"}], writeConcern: {w: "majority"}}));
- const clusterTimePrimaryBefore = res.opTime.ts;
+// Create the collection and insert one document. Get the op time of the write.
+let res = assert.commandWorked(primaryDB.runCommand(
+ {insert: collName, documents: [{_id: "before"}], writeConcern: {w: "majority"}}));
+const clusterTimePrimaryBefore = res.opTime.ts;
- // Wait for the majority commit point on 'secondaryDB0' to include the {_id: "before"} write.
- assert.soonNoExcept(function() {
- return assert
- .commandWorked(secondaryDB0.runCommand(
- {find: collName, readConcern: {level: "majority"}, maxTimeMS: 10000}))
- .cursor.firstBatch.length === 1;
- });
+// Wait for the majority commit point on 'secondaryDB0' to include the {_id: "before"} write.
+assert.soonNoExcept(function() {
+ return assert
+ .commandWorked(secondaryDB0.runCommand(
+ {find: collName, readConcern: {level: "majority"}, maxTimeMS: 10000}))
+ .cursor.firstBatch.length === 1;
+});
- // Stop replication on both secondaries.
- stopServerReplication(secondaryConn0);
- stopServerReplication(secondaryConn1);
+// Stop replication on both secondaries.
+stopServerReplication(secondaryConn0);
+stopServerReplication(secondaryConn1);
- // Perform write and get the op time of the write.
- res =
- assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{_id: "after"}]}));
- assert(res.hasOwnProperty("opTime"), tojson(res));
- assert(res.opTime.hasOwnProperty("ts"), tojson(res));
- let clusterTimeAfter = res.opTime.ts;
+// Perform write and get the op time of the write.
+res = assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{_id: "after"}]}));
+assert(res.hasOwnProperty("opTime"), tojson(res));
+assert(res.opTime.hasOwnProperty("ts"), tojson(res));
+let clusterTimeAfter = res.opTime.ts;
- // A read on the primary at the old cluster time should not include the write.
- primarySession.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: clusterTimePrimaryBefore}});
- res = assert.commandWorked(primaryDB.runCommand({find: collName}));
- assert.commandWorked(primarySession.commitTransaction_forTesting());
- assert.eq(res.cursor.firstBatch.length, 1, printjson(res));
- assert.eq(res.cursor.firstBatch[0]._id, "before", printjson(res));
+// A read on the primary at the old cluster time should not include the write.
+primarySession.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: clusterTimePrimaryBefore}});
+res = assert.commandWorked(primaryDB.runCommand({find: collName}));
+assert.commandWorked(primarySession.commitTransaction_forTesting());
+assert.eq(res.cursor.firstBatch.length, 1, printjson(res));
+assert.eq(res.cursor.firstBatch[0]._id, "before", printjson(res));
- // A read on the primary at the new cluster time should succeed because transactions implement
- // speculative behavior, but the attempt to commit the transaction should time out waiting for
- // the transaction to be majority committed.
- primarySession.startTransaction({
- readConcern: {level: "snapshot", atClusterTime: clusterTimeAfter},
- writeConcern: {w: "majority", wtimeout: 1000}
- });
- res = assert.commandWorked(primaryDB.runCommand({find: collName}));
- assert.eq(res.cursor.firstBatch.length, 2, printjson(res));
- assert.commandFailedWithCode(primarySession.commitTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
+// A read on the primary at the new cluster time should succeed because transactions implement
+// speculative behavior, but the attempt to commit the transaction should time out waiting for
+// the transaction to be majority committed.
+primarySession.startTransaction({
+ readConcern: {level: "snapshot", atClusterTime: clusterTimeAfter},
+ writeConcern: {w: "majority", wtimeout: 1000}
+});
+res = assert.commandWorked(primaryDB.runCommand({find: collName}));
+assert.eq(res.cursor.firstBatch.length, 2, printjson(res));
+assert.commandFailedWithCode(primarySession.commitTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
- // A read on the primary at the new cluster time succeeds.
- primarySession.startTransaction({
- readConcern: {level: "snapshot", atClusterTime: clusterTimeAfter},
- writeConcern: {w: "majority"}
- });
- res = assert.commandWorked(primaryDB.runCommand({find: collName}));
- assert.eq(res.cursor.firstBatch.length, 2, printjson(res));
- // Restart replication on one of the secondaries.
- restartServerReplication(secondaryConn1);
- // This time the transaction should commit.
- assert.commandWorked(primarySession.commitTransaction_forTesting());
+// A read on the primary at the new cluster time succeeds.
+primarySession.startTransaction({
+ readConcern: {level: "snapshot", atClusterTime: clusterTimeAfter},
+ writeConcern: {w: "majority"}
+});
+res = assert.commandWorked(primaryDB.runCommand({find: collName}));
+assert.eq(res.cursor.firstBatch.length, 2, printjson(res));
+// Restart replication on one of the secondaries.
+restartServerReplication(secondaryConn1);
+// This time the transaction should commit.
+assert.commandWorked(primarySession.commitTransaction_forTesting());
- // Restart replication on the lagged secondary.
- restartServerReplication(secondaryConn0);
+// Restart replication on the lagged secondary.
+restartServerReplication(secondaryConn0);
- // A read at a time that is too old fails.
- primarySession.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: Timestamp(1, 1)}});
- assert.commandFailedWithCode(primaryDB.runCommand({find: collName}), ErrorCodes.SnapshotTooOld);
- assert.commandFailedWithCode(primarySession.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// A read at a time that is too old fails.
+primarySession.startTransaction({readConcern: {level: "snapshot", atClusterTime: Timestamp(1, 1)}});
+assert.commandFailedWithCode(primaryDB.runCommand({find: collName}), ErrorCodes.SnapshotTooOld);
+assert.commandFailedWithCode(primarySession.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/readConcern_snapshot.js b/jstests/noPassthrough/readConcern_snapshot.js
index 63413d16820..0416cd689ce 100644
--- a/jstests/noPassthrough/readConcern_snapshot.js
+++ b/jstests/noPassthrough/readConcern_snapshot.js
@@ -1,138 +1,134 @@
// Test parsing of readConcern level 'snapshot'.
// @tags: [requires_replication, uses_transactions]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "coll";
-
- //
- // Configurations.
- //
-
- // Transactions should fail on storage engines that do not support them.
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- let session =
- rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- let sessionDb = session.getDatabase(dbName);
- if (!sessionDb.serverStatus().storageEngine.supportsSnapshotReadConcern) {
- // Transactions with readConcern snapshot fail.
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- [ErrorCodes.NoSuchTransaction, ErrorCodes.IllegalOperation]);
-
- // Transactions without readConcern snapshot fail.
- session.startTransaction();
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- [ErrorCodes.NoSuchTransaction, ErrorCodes.IllegalOperation]);
-
- rst.stopSet();
- return;
- }
- session.endSession();
- rst.stopSet();
-
- // readConcern 'snapshot' is not allowed on a standalone.
- const conn = MongoRunner.runMongod();
- session = conn.startSession({causalConsistency: false});
- sessionDb = session.getDatabase(dbName);
- assert.neq(null, conn, "mongod was unable to start up");
+"use strict";
+
+const dbName = "test";
+const collName = "coll";
+
+//
+// Configurations.
+//
+
+// Transactions should fail on storage engines that do not support them.
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+let session = rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
+let sessionDb = session.getDatabase(dbName);
+if (!sessionDb.serverStatus().storageEngine.supportsSnapshotReadConcern) {
+ // Transactions with readConcern snapshot fail.
session.startTransaction({readConcern: {level: "snapshot"}});
assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
ErrorCodes.IllegalOperation);
assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.IllegalOperation);
- session.endSession();
- MongoRunner.stopMongod(conn);
-
- // readConcern 'snapshot' is allowed on a replica set primary.
- rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
- assert.commandWorked(rst.getPrimary().getDB(dbName).runCommand(
- {create: collName, writeConcern: {w: "majority"}}));
- session = rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- sessionDb = session.getDatabase(dbName);
- session.startTransaction({writeConcern: {w: "majority"}, readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionDb.coll.insert({}));
- assert.commandWorked(sessionDb.runCommand({find: collName}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // readConcern 'snapshot' is allowed with 'afterClusterTime'.
+ [ErrorCodes.NoSuchTransaction, ErrorCodes.IllegalOperation]);
+
+ // Transactions without readConcern snapshot fail.
session.startTransaction();
- let pingRes = assert.commandWorked(rst.getPrimary().adminCommand({ping: 1}));
- assert(pingRes.hasOwnProperty("$clusterTime"), tojson(pingRes));
- assert(pingRes.$clusterTime.hasOwnProperty("clusterTime"), tojson(pingRes));
- assert.commandWorked(sessionDb.runCommand({
- find: collName,
- readConcern: {level: "snapshot", afterClusterTime: pingRes.$clusterTime.clusterTime}
- }));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // readConcern 'snapshot' is not allowed with 'afterOpTime'.
- session.startTransaction(
- {readConcern: {level: "snapshot", afterOpTime: {ts: Timestamp(1, 2), t: 1}}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
+ ErrorCodes.IllegalOperation);
assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- session.endSession();
-
- pingRes = assert.commandWorked(rst.getSecondary().adminCommand({ping: 1}));
- assert(pingRes.hasOwnProperty("$clusterTime"), tojson(pingRes));
- assert(pingRes.$clusterTime.hasOwnProperty("clusterTime"), tojson(pingRes));
-
- session.startTransaction(
- {readConcern: {level: "snapshot", afterClusterTime: pingRes.$clusterTime.clusterTime}});
- assert.commandWorked(sessionDb.runCommand({find: collName}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- session.endSession();
- rst.stopSet();
+ [ErrorCodes.NoSuchTransaction, ErrorCodes.IllegalOperation]);
- //
- // Commands.
- //
-
- rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- let testDB = rst.getPrimary().getDB(dbName);
- let coll = testDB.coll;
- assert.commandWorked(coll.createIndex({geo: "2d"}));
- assert.commandWorked(testDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}],
- writeConcern: {w: "majority"}
- }));
-
- session = testDB.getMongo().startSession({causalConsistency: false});
- sessionDb = session.getDatabase(dbName);
-
- // readConcern 'snapshot' is supported by find.
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- assert.commandWorked(sessionDb.runCommand({find: collName}));
-
- // readConcern 'snapshot' is supported by aggregate.
- assert.commandWorked(sessionDb.runCommand({aggregate: collName, pipeline: [], cursor: {}}));
-
- // readConcern 'snapshot' is supported by distinct.
- assert.commandWorked(sessionDb.runCommand({distinct: collName, key: "x"}));
-
- // readConcern 'snapshot' is supported by geoSearch.
- assert.commandWorked(
- sessionDb.runCommand({geoSearch: collName, near: [0, 0], maxDistance: 1, search: {a: 1}}));
-
- // readConcern 'snapshot' is not supported by non-CRUD commands.
- assert.commandFailedWithCode(
- sessionDb.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandWorked(session.abortTransaction_forTesting());
- session.endSession();
rst.stopSet();
+ return;
+}
+session.endSession();
+rst.stopSet();
+
+// readConcern 'snapshot' is not allowed on a standalone.
+const conn = MongoRunner.runMongod();
+session = conn.startSession({causalConsistency: false});
+sessionDb = session.getDatabase(dbName);
+assert.neq(null, conn, "mongod was unable to start up");
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.IllegalOperation);
+session.endSession();
+MongoRunner.stopMongod(conn);
+
+// readConcern 'snapshot' is allowed on a replica set primary.
+rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+assert.commandWorked(
+ rst.getPrimary().getDB(dbName).runCommand({create: collName, writeConcern: {w: "majority"}}));
+session = rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
+sessionDb = session.getDatabase(dbName);
+session.startTransaction({writeConcern: {w: "majority"}, readConcern: {level: "snapshot"}});
+assert.commandWorked(sessionDb.coll.insert({}));
+assert.commandWorked(sessionDb.runCommand({find: collName}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// readConcern 'snapshot' is allowed with 'afterClusterTime'.
+session.startTransaction();
+let pingRes = assert.commandWorked(rst.getPrimary().adminCommand({ping: 1}));
+assert(pingRes.hasOwnProperty("$clusterTime"), tojson(pingRes));
+assert(pingRes.$clusterTime.hasOwnProperty("clusterTime"), tojson(pingRes));
+assert.commandWorked(sessionDb.runCommand({
+ find: collName,
+ readConcern: {level: "snapshot", afterClusterTime: pingRes.$clusterTime.clusterTime}
+}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// readConcern 'snapshot' is not allowed with 'afterOpTime'.
+session.startTransaction(
+ {readConcern: {level: "snapshot", afterOpTime: {ts: Timestamp(1, 2), t: 1}}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+session.endSession();
+
+pingRes = assert.commandWorked(rst.getSecondary().adminCommand({ping: 1}));
+assert(pingRes.hasOwnProperty("$clusterTime"), tojson(pingRes));
+assert(pingRes.$clusterTime.hasOwnProperty("clusterTime"), tojson(pingRes));
+
+session.startTransaction(
+ {readConcern: {level: "snapshot", afterClusterTime: pingRes.$clusterTime.clusterTime}});
+assert.commandWorked(sessionDb.runCommand({find: collName}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+session.endSession();
+rst.stopSet();
+
+//
+// Commands.
+//
+
+rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+let testDB = rst.getPrimary().getDB(dbName);
+let coll = testDB.coll;
+assert.commandWorked(coll.createIndex({geo: "2d"}));
+assert.commandWorked(testDB.runCommand({
+ createIndexes: collName,
+ indexes: [{key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}],
+ writeConcern: {w: "majority"}
+}));
+
+session = testDB.getMongo().startSession({causalConsistency: false});
+sessionDb = session.getDatabase(dbName);
+
+// readConcern 'snapshot' is supported by find.
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+assert.commandWorked(sessionDb.runCommand({find: collName}));
+
+// readConcern 'snapshot' is supported by aggregate.
+assert.commandWorked(sessionDb.runCommand({aggregate: collName, pipeline: [], cursor: {}}));
+
+// readConcern 'snapshot' is supported by distinct.
+assert.commandWorked(sessionDb.runCommand({distinct: collName, key: "x"}));
+
+// readConcern 'snapshot' is supported by geoSearch.
+assert.commandWorked(
+ sessionDb.runCommand({geoSearch: collName, near: [0, 0], maxDistance: 1, search: {a: 1}}));
+
+// readConcern 'snapshot' is not supported by non-CRUD commands.
+assert.commandFailedWithCode(
+ sessionDb.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandWorked(session.abortTransaction_forTesting());
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/readConcern_snapshot_mongos.js b/jstests/noPassthrough/readConcern_snapshot_mongos.js
index 74a2e6e0ffe..ab346a12937 100644
--- a/jstests/noPassthrough/readConcern_snapshot_mongos.js
+++ b/jstests/noPassthrough/readConcern_snapshot_mongos.js
@@ -1,131 +1,130 @@
// Test parsing of readConcern level 'snapshot' on mongos.
// @tags: [requires_replication,requires_sharding, uses_transactions, uses_atclustertime]
(function() {
- "use strict";
-
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- // Runs the command as the first in a multi statement txn that is aborted right after, expecting
- // success.
- function expectSuccessInTxnThenAbort(session, sessionConn, cmdObj) {
- session.startTransaction();
- assert.commandWorked(sessionConn.runCommand(cmdObj));
- assert.commandWorked(session.abortTransaction_forTesting());
- }
-
- // Runs the command as the first in a multi statement txn that is aborted right after, expecting
- // failure with the given error code.
- function expectFailInTxnThenAbort(session, sessionConn, expectedErrorCode, cmdObj) {
- session.startTransaction();
- assert.commandFailedWithCode(sessionConn.runCommand(cmdObj), expectedErrorCode);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- }
-
- const dbName = "test";
- const collName = "coll";
-
- let st = new ShardingTest({shards: 1, rs: {nodes: 2}, config: 2, mongos: 1});
- let testDB = st.getDB(dbName);
- let coll = testDB.coll;
-
- // Insert data to create the collection.
- assert.writeOK(testDB[collName].insert({x: 1}));
-
- flushRoutersAndRefreshShardMetadata(st, {ns: dbName + "." + collName, dbNames: [dbName]});
-
- // noPassthrough tests
-
- // readConcern 'snapshot' is not allowed outside session context.
- assert.commandFailedWithCode(
- testDB.runCommand({find: collName, readConcern: {level: "snapshot"}}),
- ErrorCodes.InvalidOptions);
-
- let session = testDB.getMongo().startSession({causalConsistency: false});
- let sessionDb = session.getDatabase(dbName);
-
- // readConcern 'snapshot' is not allowed outside transaction context.
- assert.commandFailedWithCode(sessionDb.runCommand({
- find: collName,
- readConcern: {level: "snapshot"},
- }),
- ErrorCodes.InvalidOptions);
-
- // readConcern 'snapshot' is not allowed with 'atClusterTime'.
- let pingRes = assert.commandWorked(st.s0.adminCommand({ping: 1}));
- assert(pingRes.hasOwnProperty("$clusterTime"), tojson(pingRes));
- assert(pingRes.$clusterTime.hasOwnProperty("clusterTime"), tojson(pingRes));
- const clusterTime = pingRes.$clusterTime.clusterTime;
-
- expectFailInTxnThenAbort(session, sessionDb, ErrorCodes.InvalidOptions, {
- find: collName,
- readConcern: {level: "snapshot", atClusterTime: clusterTime},
- });
-
- // Passthrough tests. There are parts not implemented on mongod and mongos, they are tracked by
- // separate jiras
-
- // readConcern 'snapshot' is supported by insert on mongos in a transaction.
- expectSuccessInTxnThenAbort(session, sessionDb, {
- insert: collName,
- documents: [{_id: "single-insert"}],
- readConcern: {level: "snapshot"},
- });
-
- // readConcern 'snapshot' is supported by update on mongos in a transaction.
- expectSuccessInTxnThenAbort(session, sessionDb, {
- update: collName,
- updates: [{q: {_id: 0}, u: {$inc: {a: 1}}}],
- readConcern: {level: "snapshot"},
- });
-
- // readConcern 'snapshot' is supported by delete on mongos in a transaction.
- expectSuccessInTxnThenAbort(session, sessionDb, {
- delete: collName,
- deletes: [{q: {}, limit: 1}],
- readConcern: {level: "snapshot"},
- });
-
- // readConcern 'snapshot' is supported by findAndModify on mongos in a transaction.
- expectSuccessInTxnThenAbort(session, sessionDb, {
- findAndModify: collName,
- query: {},
- update: {$set: {a: 1}},
- readConcern: {level: "snapshot"},
- });
-
- expectSuccessInTxnThenAbort(session, sessionDb, {
- aggregate: collName,
- pipeline: [],
- cursor: {},
- readConcern: {level: "snapshot"},
- });
-
- // readConcern 'snapshot' is supported by find on mongos.
- expectSuccessInTxnThenAbort(session, sessionDb, {
- find: collName,
- readConcern: {level: "snapshot"},
- });
-
- // readConcern 'snapshot' is supported by distinct on mongos.
- expectSuccessInTxnThenAbort(session, sessionDb, {
- distinct: collName,
- key: "x",
- readConcern: {level: "snapshot"},
- });
-
- // readConcern 'snapshot' is allowed with 'afterClusterTime'.
- expectSuccessInTxnThenAbort(session, sessionDb, {
- find: collName,
- readConcern: {level: "snapshot", afterClusterTime: clusterTime},
- });
-
- expectSuccessInTxnThenAbort(session, sessionDb, {
- aggregate: collName,
- pipeline: [],
- cursor: {},
- readConcern: {level: "snapshot", afterClusterTime: clusterTime},
- });
-
- st.stop();
+"use strict";
+
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+// Runs the command as the first in a multi statement txn that is aborted right after, expecting
+// success.
+function expectSuccessInTxnThenAbort(session, sessionConn, cmdObj) {
+ session.startTransaction();
+ assert.commandWorked(sessionConn.runCommand(cmdObj));
+ assert.commandWorked(session.abortTransaction_forTesting());
+}
+
+// Runs the command as the first in a multi statement txn that is aborted right after, expecting
+// failure with the given error code.
+function expectFailInTxnThenAbort(session, sessionConn, expectedErrorCode, cmdObj) {
+ session.startTransaction();
+ assert.commandFailedWithCode(sessionConn.runCommand(cmdObj), expectedErrorCode);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+}
+
+const dbName = "test";
+const collName = "coll";
+
+let st = new ShardingTest({shards: 1, rs: {nodes: 2}, config: 2, mongos: 1});
+let testDB = st.getDB(dbName);
+let coll = testDB.coll;
+
+// Insert data to create the collection.
+assert.writeOK(testDB[collName].insert({x: 1}));
+
+flushRoutersAndRefreshShardMetadata(st, {ns: dbName + "." + collName, dbNames: [dbName]});
+
+// noPassthrough tests
+
+// readConcern 'snapshot' is not allowed outside session context.
+assert.commandFailedWithCode(testDB.runCommand({find: collName, readConcern: {level: "snapshot"}}),
+ ErrorCodes.InvalidOptions);
+
+let session = testDB.getMongo().startSession({causalConsistency: false});
+let sessionDb = session.getDatabase(dbName);
+
+// readConcern 'snapshot' is not allowed outside transaction context.
+assert.commandFailedWithCode(sessionDb.runCommand({
+ find: collName,
+ readConcern: {level: "snapshot"},
+}),
+ ErrorCodes.InvalidOptions);
+
+// readConcern 'snapshot' is not allowed with 'atClusterTime'.
+let pingRes = assert.commandWorked(st.s0.adminCommand({ping: 1}));
+assert(pingRes.hasOwnProperty("$clusterTime"), tojson(pingRes));
+assert(pingRes.$clusterTime.hasOwnProperty("clusterTime"), tojson(pingRes));
+const clusterTime = pingRes.$clusterTime.clusterTime;
+
+expectFailInTxnThenAbort(session, sessionDb, ErrorCodes.InvalidOptions, {
+ find: collName,
+ readConcern: {level: "snapshot", atClusterTime: clusterTime},
+});
+
+// Passthrough tests. There are parts not implemented on mongod and mongos, they are tracked by
+// separate jiras
+
+// readConcern 'snapshot' is supported by insert on mongos in a transaction.
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ insert: collName,
+ documents: [{_id: "single-insert"}],
+ readConcern: {level: "snapshot"},
+});
+
+// readConcern 'snapshot' is supported by update on mongos in a transaction.
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ update: collName,
+ updates: [{q: {_id: 0}, u: {$inc: {a: 1}}}],
+ readConcern: {level: "snapshot"},
+});
+
+// readConcern 'snapshot' is supported by delete on mongos in a transaction.
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ delete: collName,
+ deletes: [{q: {}, limit: 1}],
+ readConcern: {level: "snapshot"},
+});
+
+// readConcern 'snapshot' is supported by findAndModify on mongos in a transaction.
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ findAndModify: collName,
+ query: {},
+ update: {$set: {a: 1}},
+ readConcern: {level: "snapshot"},
+});
+
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ aggregate: collName,
+ pipeline: [],
+ cursor: {},
+ readConcern: {level: "snapshot"},
+});
+
+// readConcern 'snapshot' is supported by find on mongos.
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ find: collName,
+ readConcern: {level: "snapshot"},
+});
+
+// readConcern 'snapshot' is supported by distinct on mongos.
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ distinct: collName,
+ key: "x",
+ readConcern: {level: "snapshot"},
+});
+
+// readConcern 'snapshot' is allowed with 'afterClusterTime'.
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ find: collName,
+ readConcern: {level: "snapshot", afterClusterTime: clusterTime},
+});
+
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ aggregate: collName,
+ pipeline: [],
+ cursor: {},
+ readConcern: {level: "snapshot", afterClusterTime: clusterTime},
+});
+
+st.stop();
}());
diff --git a/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js b/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js
index e92126186fb..22eaa2fbf89 100644
--- a/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js
+++ b/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js
@@ -2,46 +2,45 @@
//
// @tags: [requires_sharding]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "coll";
-
- // Runs multiple commands with read concern level "snapshot" in a session,
- // expecting success.
- function expectSnapshotReadConcernIsSupported() {
- const st = new ShardingTest({shards: 1, config: 1});
- const session = st.s.startSession({causalConsistency: false});
- let txnNumber = 0;
-
- assert.commandWorked(session.getDatabase(dbName).runCommand({
- find: collName,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber++),
- startTransaction: true,
- autocommit: false
- }));
-
- assert.commandWorked(session.getDatabase(dbName).runCommand({
- aggregate: collName,
- pipeline: [],
- cursor: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber++),
- startTransaction: true,
- autocommit: false
- }));
-
- session.endSession();
- st.stop();
- }
-
- // Snapshot readConcern should succeed when 'enableTestCommands' is set to false.
- jsTest.setOption("enableTestCommands", false);
- expectSnapshotReadConcernIsSupported();
-
- // Snapshot readConcern should succeed when 'enableTestCommands' is set to true.
- jsTest.setOption("enableTestCommands", true);
- expectSnapshotReadConcernIsSupported();
-
+"use strict";
+
+const dbName = "test";
+const collName = "coll";
+
+// Runs multiple commands with read concern level "snapshot" in a session,
+// expecting success.
+function expectSnapshotReadConcernIsSupported() {
+ const st = new ShardingTest({shards: 1, config: 1});
+ const session = st.s.startSession({causalConsistency: false});
+ let txnNumber = 0;
+
+ assert.commandWorked(session.getDatabase(dbName).runCommand({
+ find: collName,
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber++),
+ startTransaction: true,
+ autocommit: false
+ }));
+
+ assert.commandWorked(session.getDatabase(dbName).runCommand({
+ aggregate: collName,
+ pipeline: [],
+ cursor: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber++),
+ startTransaction: true,
+ autocommit: false
+ }));
+
+ session.endSession();
+ st.stop();
+}
+
+// Snapshot readConcern should succeed when 'enableTestCommands' is set to false.
+jsTest.setOption("enableTestCommands", false);
+expectSnapshotReadConcernIsSupported();
+
+// Snapshot readConcern should succeed when 'enableTestCommands' is set to true.
+jsTest.setOption("enableTestCommands", true);
+expectSnapshotReadConcernIsSupported();
}());
diff --git a/jstests/noPassthrough/read_concern_helper.js b/jstests/noPassthrough/read_concern_helper.js
index b83b48bdf34..d8cb159b0c7 100644
--- a/jstests/noPassthrough/read_concern_helper.js
+++ b/jstests/noPassthrough/read_concern_helper.js
@@ -1,27 +1,27 @@
// This tests readConcern handling for the find/findOne shell helpers.
// @tags: [requires_majority_read_concern]
(function() {
- "use strict";
- var testServer = MongoRunner.runMongod();
- if (!testServer.getDB('admin').serverStatus().storageEngine.supportsCommittedReads) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- MongoRunner.stopMongod(testServer);
- return;
- }
- var coll = testServer.getDB("test").readMajority;
+"use strict";
+var testServer = MongoRunner.runMongod();
+if (!testServer.getDB('admin').serverStatus().storageEngine.supportsCommittedReads) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ MongoRunner.stopMongod(testServer);
+ return;
+}
+var coll = testServer.getDB("test").readMajority;
- assert.doesNotThrow(function() {
- coll.find({_id: "foo"}).readConcern("majority").itcount();
- });
- assert.doesNotThrow(function() {
- coll.findOne({_id: "foo"}, {}, {}, "majority");
- });
- assert.doesNotThrow(function() {
- coll.count({_id: "foo"}, {readConcern: "majority"});
- });
- assert.doesNotThrow(function() {
- coll.find({_id: "foo"}).readConcern("majority").count();
- });
+assert.doesNotThrow(function() {
+ coll.find({_id: "foo"}).readConcern("majority").itcount();
+});
+assert.doesNotThrow(function() {
+ coll.findOne({_id: "foo"}, {}, {}, "majority");
+});
+assert.doesNotThrow(function() {
+ coll.count({_id: "foo"}, {readConcern: "majority"});
+});
+assert.doesNotThrow(function() {
+ coll.find({_id: "foo"}).readConcern("majority").count();
+});
- MongoRunner.stopMongod(testServer);
+MongoRunner.stopMongod(testServer);
}());
diff --git a/jstests/noPassthrough/read_concern_snapshot_aggregation.js b/jstests/noPassthrough/read_concern_snapshot_aggregation.js
index 9c36b6ebf2e..2cb5fe26fd6 100644
--- a/jstests/noPassthrough/read_concern_snapshot_aggregation.js
+++ b/jstests/noPassthrough/read_concern_snapshot_aggregation.js
@@ -4,129 +4,129 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- const kAdminDB = "admin";
- const kCollName = "coll";
- const kConfigDB = "config";
- const kDBName = "test";
- const kWCMajority = {writeConcern: {w: "majority"}};
+const kAdminDB = "admin";
+const kCollName = "coll";
+const kConfigDB = "config";
+const kDBName = "test";
+const kWCMajority = {
+ writeConcern: {w: "majority"}
+};
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- let session =
- rst.getPrimary().getDB(kDBName).getMongo().startSession({causalConsistency: false});
- let sessionDB = session.getDatabase(kDBName);
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+let session = rst.getPrimary().getDB(kDBName).getMongo().startSession({causalConsistency: false});
+let sessionDB = session.getDatabase(kDBName);
- let txnNumber = NumberLong(0);
- assert.commandWorked(sessionDB.runCommand({create: kCollName, writeConcern: {w: "majority"}}));
+let txnNumber = NumberLong(0);
+assert.commandWorked(sessionDB.runCommand({create: kCollName, writeConcern: {w: "majority"}}));
- function testSnapshotAggFailsWithCode(coll, pipeline, code) {
- let cmd = {aggregate: coll, pipeline: pipeline, cursor: {}};
+function testSnapshotAggFailsWithCode(coll, pipeline, code) {
+ let cmd = {aggregate: coll, pipeline: pipeline, cursor: {}};
- let cmdAsSnapshotRead = Object.extend({}, cmd);
- cmdAsSnapshotRead.txnNumber = NumberLong(++txnNumber);
- cmdAsSnapshotRead.readConcern = {level: "snapshot"};
- cmdAsSnapshotRead.autocommit = false;
- cmdAsSnapshotRead.startTransaction = true;
- assert.commandFailedWithCode(sessionDB.runCommand(cmdAsSnapshotRead), code);
+ let cmdAsSnapshotRead = Object.extend({}, cmd);
+ cmdAsSnapshotRead.txnNumber = NumberLong(++txnNumber);
+ cmdAsSnapshotRead.readConcern = {level: "snapshot"};
+ cmdAsSnapshotRead.autocommit = false;
+ cmdAsSnapshotRead.startTransaction = true;
+ assert.commandFailedWithCode(sessionDB.runCommand(cmdAsSnapshotRead), code);
- // As a sanity check, also make sure that the command succeeds when run without a txn number
- // and without a readConcern.
- assert.commandWorked(sessionDB.runCommand(cmd));
- }
+ // As a sanity check, also make sure that the command succeeds when run without a txn number
+ // and without a readConcern.
+ assert.commandWorked(sessionDB.runCommand(cmd));
+}
- // Test that $changeStream is disallowed with transactions.
- // TODO SERVER-37221: Remove the check for 'supportsCommittedReads'.
- if (sessionDB.serverStatus().storageEngine.supportsCommittedReads) {
- testSnapshotAggFailsWithCode(
- kCollName, [{$changeStream: {}}], ErrorCodes.OperationNotSupportedInTransaction);
- }
-
- // Test that $collStats is disallowed with transactions.
- testSnapshotAggFailsWithCode(
- kCollName, [{$collStats: {}}], ErrorCodes.OperationNotSupportedInTransaction);
-
- // Test that $indexStats is disallowed with transactions.
+// Test that $changeStream is disallowed with transactions.
+// TODO SERVER-37221: Remove the check for 'supportsCommittedReads'.
+if (sessionDB.serverStatus().storageEngine.supportsCommittedReads) {
testSnapshotAggFailsWithCode(
- kCollName, [{$indexStats: {}}], ErrorCodes.OperationNotSupportedInTransaction);
+ kCollName, [{$changeStream: {}}], ErrorCodes.OperationNotSupportedInTransaction);
+}
- // Test that $listLocalSessions is disallowed with transactions.
- testSnapshotAggFailsWithCode(
- 1, [{$listLocalSessions: {}}], ErrorCodes.OperationNotSupportedInTransaction);
+// Test that $collStats is disallowed with transactions.
+testSnapshotAggFailsWithCode(
+ kCollName, [{$collStats: {}}], ErrorCodes.OperationNotSupportedInTransaction);
- // Test that $out is disallowed with transactions.
- testSnapshotAggFailsWithCode(
- kCollName, [{$out: "out"}], ErrorCodes.OperationNotSupportedInTransaction);
+// Test that $indexStats is disallowed with transactions.
+testSnapshotAggFailsWithCode(
+ kCollName, [{$indexStats: {}}], ErrorCodes.OperationNotSupportedInTransaction);
- // Test that $listSessions is disallowed with transactions. This stage must be run against
- // 'system.sessions' in the config database, which cannot be queried in a transaction.
- sessionDB = session.getDatabase(kConfigDB);
- testSnapshotAggFailsWithCode(
- "system.sessions", [{$listSessions: {}}], ErrorCodes.OperationNotSupportedInTransaction);
+// Test that $listLocalSessions is disallowed with transactions.
+testSnapshotAggFailsWithCode(
+ 1, [{$listLocalSessions: {}}], ErrorCodes.OperationNotSupportedInTransaction);
- // Test that $currentOp is disallowed with transactions. We have to reassign 'sessionDB' to
- // refer to the admin database, because $currentOp pipelines are required to run against
- // 'admin'. Queries against 'admin' are not permitted in a transaction.
- sessionDB = session.getDatabase(kAdminDB);
- testSnapshotAggFailsWithCode(
- 1, [{$currentOp: {}}], ErrorCodes.OperationNotSupportedInTransaction);
- sessionDB = session.getDatabase(kDBName);
+// Test that $out is disallowed with transactions.
+testSnapshotAggFailsWithCode(
+ kCollName, [{$out: "out"}], ErrorCodes.OperationNotSupportedInTransaction);
- // Helper for testing that aggregation stages which involve a local and foreign collection
- // ($lookup and $graphLookup) obey the expected readConcern "snapshot" isolation semantics.
- //
- // Inserts 'localDocsPre' into the 'local' collection and 'foreignDocsPre' into the 'foreign'
- // collection. Then runs the first batch of 'pipeline', before inserting 'localDocsPost' into
- // 'local' and 'foreignDocsPost' into 'foreign'. Iterates the remainder of the aggregation
- // cursor and verifies that the result set matches 'expectedResults'.
- function testLookupReadConcernSnapshotIsolation(
- {localDocsPre, foreignDocsPre, localDocsPost, foreignDocsPost, pipeline, expectedResults}) {
- sessionDB.runCommand({drop: "local", writeConcern: {w: "majority"}});
- sessionDB.runCommand({drop: "foreign", writeConcern: {w: "majority"}});
- let localColl = sessionDB.local;
- let foreignColl = sessionDB.foreign;
- assert.commandWorked(localColl.insert(localDocsPre, kWCMajority));
- assert.commandWorked(foreignColl.insert(foreignDocsPre, kWCMajority));
- let cmdRes = sessionDB.runCommand({
- aggregate: localColl.getName(),
- pipeline: pipeline,
- cursor: {batchSize: 0},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(++txnNumber),
- startTransaction: true,
- autocommit: false
- });
- assert.commandWorked(cmdRes);
- assert.neq(0, cmdRes.cursor.id);
- assert.eq(0, cmdRes.cursor.firstBatch.length);
+// Test that $listSessions is disallowed with transactions. This stage must be run against
+// 'system.sessions' in the config database, which cannot be queried in a transaction.
+sessionDB = session.getDatabase(kConfigDB);
+testSnapshotAggFailsWithCode(
+ "system.sessions", [{$listSessions: {}}], ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandWorked(localColl.insert(localDocsPost, kWCMajority));
- assert.commandWorked(foreignColl.insert(foreignDocsPost, kWCMajority));
- let results =
- new DBCommandCursor(sessionDB, cmdRes, undefined, undefined, NumberLong(txnNumber))
- .toArray();
- assert.eq(results, expectedResults);
- assert.commandWorked(sessionDB.adminCommand(
- {commitTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
- }
+// Test that $currentOp is disallowed with transactions. We have to reassign 'sessionDB' to
+// refer to the admin database, because $currentOp pipelines are required to run against
+// 'admin'. Queries against 'admin' are not permitted in a transaction.
+sessionDB = session.getDatabase(kAdminDB);
+testSnapshotAggFailsWithCode(1, [{$currentOp: {}}], ErrorCodes.OperationNotSupportedInTransaction);
+sessionDB = session.getDatabase(kDBName);
- // Test that snapshot isolation works with $lookup using localField/foreignField syntax.
- testLookupReadConcernSnapshotIsolation({
- localDocsPre: [{_id: 0}, {_id: 1}, {_id: 2}],
- foreignDocsPre: [{_id: 1}],
- localDocsPost: [{_id: 3}],
- foreignDocsPost: [{_id: 2}, {_id: 3}],
- pipeline: [
- {$lookup: {from: "foreign", localField: "_id", foreignField: "_id", as: "as"}},
- {$sort: {_id: 1}}
- ],
- expectedResults: [{_id: 0, as: []}, {_id: 1, as: [{_id: 1}]}, {_id: 2, as: []}]
+// Helper for testing that aggregation stages which involve a local and foreign collection
+// ($lookup and $graphLookup) obey the expected readConcern "snapshot" isolation semantics.
+//
+// Inserts 'localDocsPre' into the 'local' collection and 'foreignDocsPre' into the 'foreign'
+// collection. Then runs the first batch of 'pipeline', before inserting 'localDocsPost' into
+// 'local' and 'foreignDocsPost' into 'foreign'. Iterates the remainder of the aggregation
+// cursor and verifies that the result set matches 'expectedResults'.
+function testLookupReadConcernSnapshotIsolation(
+ {localDocsPre, foreignDocsPre, localDocsPost, foreignDocsPost, pipeline, expectedResults}) {
+ sessionDB.runCommand({drop: "local", writeConcern: {w: "majority"}});
+ sessionDB.runCommand({drop: "foreign", writeConcern: {w: "majority"}});
+ let localColl = sessionDB.local;
+ let foreignColl = sessionDB.foreign;
+ assert.commandWorked(localColl.insert(localDocsPre, kWCMajority));
+ assert.commandWorked(foreignColl.insert(foreignDocsPre, kWCMajority));
+ let cmdRes = sessionDB.runCommand({
+ aggregate: localColl.getName(),
+ pipeline: pipeline,
+ cursor: {batchSize: 0},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(++txnNumber),
+ startTransaction: true,
+ autocommit: false
});
+ assert.commandWorked(cmdRes);
+ assert.neq(0, cmdRes.cursor.id);
+ assert.eq(0, cmdRes.cursor.firstBatch.length);
+
+ assert.commandWorked(localColl.insert(localDocsPost, kWCMajority));
+ assert.commandWorked(foreignColl.insert(foreignDocsPost, kWCMajority));
+ let results =
+ new DBCommandCursor(sessionDB, cmdRes, undefined, undefined, NumberLong(txnNumber))
+ .toArray();
+ assert.eq(results, expectedResults);
+ assert.commandWorked(sessionDB.adminCommand(
+ {commitTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+}
- // Test that snapshot isolation works with $lookup into a nested pipeline.
- testLookupReadConcernSnapshotIsolation({
+// Test that snapshot isolation works with $lookup using localField/foreignField syntax.
+testLookupReadConcernSnapshotIsolation({
+ localDocsPre: [{_id: 0}, {_id: 1}, {_id: 2}],
+ foreignDocsPre: [{_id: 1}],
+ localDocsPost: [{_id: 3}],
+ foreignDocsPost: [{_id: 2}, {_id: 3}],
+ pipeline: [
+ {$lookup: {from: "foreign", localField: "_id", foreignField: "_id", as: "as"}},
+ {$sort: {_id: 1}}
+ ],
+ expectedResults: [{_id: 0, as: []}, {_id: 1, as: [{_id: 1}]}, {_id: 2, as: []}]
+});
+
+// Test that snapshot isolation works with $lookup into a nested pipeline.
+testLookupReadConcernSnapshotIsolation({
localDocsPre: [{_id: 0}, {_id: 1}, {_id: 2}],
foreignDocsPre: [{_id: 1}],
localDocsPost: [{_id: 3}],
@@ -145,8 +145,8 @@
expectedResults: [{_id: 0, as: []}, {_id: 1, as: [{_id: 1}]}, {_id: 2, as: []}]
});
- // Test that snapshot isolation works with $graphLookup.
- testLookupReadConcernSnapshotIsolation({
+// Test that snapshot isolation works with $graphLookup.
+testLookupReadConcernSnapshotIsolation({
localDocsPre: [{_id: 0}, {_id: 1}, {_id: 2}],
foreignDocsPre: [{_id: 1, linkTo: 2}],
localDocsPost: [{_id: 3}],
@@ -167,97 +167,94 @@
[{_id: 0, as: []}, {_id: 1, as: [{_id: 1, linkTo: 2}]}, {_id: 2, as: []}]
});
- // Test that snapshot isolation works for $geoNear. Special care is taken to test snapshot
- // isolation across getMore for $geoNear as it is an initial document source.
- assert.commandWorked(sessionDB.runCommand({drop: kCollName, writeConcern: {w: "majority"}}));
- assert.commandWorked(sessionDB.runCommand({
- createIndexes: kCollName,
- indexes: [{key: {geo: "2dsphere"}, name: "geo_2dsphere"}],
- writeConcern: {w: "majority"}
- }));
+// Test that snapshot isolation works for $geoNear. Special care is taken to test snapshot
+// isolation across getMore for $geoNear as it is an initial document source.
+assert.commandWorked(sessionDB.runCommand({drop: kCollName, writeConcern: {w: "majority"}}));
+assert.commandWorked(sessionDB.runCommand({
+ createIndexes: kCollName,
+ indexes: [{key: {geo: "2dsphere"}, name: "geo_2dsphere"}],
+ writeConcern: {w: "majority"}
+}));
- const coll = sessionDB.getCollection(kCollName);
- let bulk = coll.initializeUnorderedBulkOp();
- const numInitialGeoInsert = 4;
- for (let i = 0; i < numInitialGeoInsert; ++i) {
- bulk.insert({_id: i, geo: {type: "Point", coordinates: [0, 0]}});
- }
- assert.commandWorked(bulk.execute({w: "majority"}));
+const coll = sessionDB.getCollection(kCollName);
+let bulk = coll.initializeUnorderedBulkOp();
+const numInitialGeoInsert = 4;
+for (let i = 0; i < numInitialGeoInsert; ++i) {
+ bulk.insert({_id: i, geo: {type: "Point", coordinates: [0, 0]}});
+}
+assert.commandWorked(bulk.execute({w: "majority"}));
- let cmdRes = assert.commandWorked(sessionDB.runCommand({
- aggregate: kCollName,
- pipeline: [{
- $geoNear: {
- spherical: true,
- near: {type: "Point", coordinates: [0, 0]},
- distanceField: "distance"
- }
- }],
- txnNumber: NumberLong(++txnNumber),
- readConcern: {level: "snapshot"},
- autocommit: false,
- startTransaction: true,
- cursor: {batchSize: 0}
- }));
- assert(cmdRes.hasOwnProperty("cursor"));
- const cursorId = cmdRes.cursor.id;
- assert.neq(cursorId, 0);
+let cmdRes = assert.commandWorked(sessionDB.runCommand({
+ aggregate: kCollName,
+ pipeline: [{
+ $geoNear:
+ {spherical: true, near: {type: "Point", coordinates: [0, 0]}, distanceField: "distance"}
+ }],
+ txnNumber: NumberLong(++txnNumber),
+ readConcern: {level: "snapshot"},
+ autocommit: false,
+ startTransaction: true,
+ cursor: {batchSize: 0}
+}));
+assert(cmdRes.hasOwnProperty("cursor"));
+const cursorId = cmdRes.cursor.id;
+assert.neq(cursorId, 0);
- assert.commandWorked(
- coll.insert({_id: numInitialGeoInsert, geo: {type: "Point", coordinates: [0, 0]}},
- {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ coll.insert({_id: numInitialGeoInsert, geo: {type: "Point", coordinates: [0, 0]}},
+ {writeConcern: {w: "majority"}}));
- cmdRes = assert.commandWorked(sessionDB.runCommand({
- getMore: NumberLong(cursorId),
- collection: kCollName,
- autocommit: false,
- txnNumber: NumberLong(txnNumber)
- }));
- assert.commandWorked(sessionDB.adminCommand(
- {commitTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
- assert(cmdRes.hasOwnProperty("cursor"));
- assert(cmdRes.cursor.hasOwnProperty("nextBatch"));
- assert.eq(cmdRes.cursor.nextBatch.length, numInitialGeoInsert);
+cmdRes = assert.commandWorked(sessionDB.runCommand({
+ getMore: NumberLong(cursorId),
+ collection: kCollName,
+ autocommit: false,
+ txnNumber: NumberLong(txnNumber)
+}));
+assert.commandWorked(sessionDB.adminCommand(
+ {commitTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+assert(cmdRes.hasOwnProperty("cursor"));
+assert(cmdRes.cursor.hasOwnProperty("nextBatch"));
+assert.eq(cmdRes.cursor.nextBatch.length, numInitialGeoInsert);
- // Test that snapshot reads are legal for $facet.
- assert.commandWorked(sessionDB.runCommand({drop: kCollName, writeConcern: {w: "majority"}}));
- assert.commandWorked(coll.insert(
- [
- {group1: 1, group2: 1, val: 1},
- {group1: 1, group2: 2, val: 2},
- {group1: 2, group2: 2, val: 8}
- ],
- kWCMajority));
+// Test that snapshot reads are legal for $facet.
+assert.commandWorked(sessionDB.runCommand({drop: kCollName, writeConcern: {w: "majority"}}));
+assert.commandWorked(coll.insert(
+ [
+ {group1: 1, group2: 1, val: 1},
+ {group1: 1, group2: 2, val: 2},
+ {group1: 2, group2: 2, val: 8}
+ ],
+ kWCMajority));
- cmdRes = sessionDB.runCommand({
- aggregate: kCollName,
- pipeline: [
- {
- $facet: {
- g1: [{$group: {_id: "$group1", sum: {$sum: "$val"}}}, {$sort: {_id: 1}}],
- g2: [{$group: {_id: "$group2", sum: {$sum: "$val"}}}, {$sort: {_id: 1}}]
- }
- },
- {$unwind: "$g1"},
- {$unwind: "$g2"},
- {$sort: {"g1._id": 1, "g2._id": 1}}
- ],
- cursor: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(++txnNumber),
- startTransaction: true,
- autocommit: false
- });
- assert.commandWorked(sessionDB.adminCommand(
- {commitTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
- assert.commandWorked(cmdRes);
- assert.eq(0, cmdRes.cursor.id);
- assert.eq(cmdRes.cursor.firstBatch, [
- {g1: {_id: 1, sum: 3}, g2: {_id: 1, sum: 1}},
- {g1: {_id: 1, sum: 3}, g2: {_id: 2, sum: 10}},
- {g1: {_id: 2, sum: 8}, g2: {_id: 1, sum: 1}},
- {g1: {_id: 2, sum: 8}, g2: {_id: 2, sum: 10}}
- ]);
+cmdRes = sessionDB.runCommand({
+ aggregate: kCollName,
+ pipeline: [
+ {
+ $facet: {
+ g1: [{$group: {_id: "$group1", sum: {$sum: "$val"}}}, {$sort: {_id: 1}}],
+ g2: [{$group: {_id: "$group2", sum: {$sum: "$val"}}}, {$sort: {_id: 1}}]
+ }
+ },
+ {$unwind: "$g1"},
+ {$unwind: "$g2"},
+ {$sort: {"g1._id": 1, "g2._id": 1}}
+ ],
+ cursor: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(++txnNumber),
+ startTransaction: true,
+ autocommit: false
+});
+assert.commandWorked(sessionDB.adminCommand(
+ {commitTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+assert.commandWorked(cmdRes);
+assert.eq(0, cmdRes.cursor.id);
+assert.eq(cmdRes.cursor.firstBatch, [
+ {g1: {_id: 1, sum: 3}, g2: {_id: 1, sum: 1}},
+ {g1: {_id: 1, sum: 3}, g2: {_id: 2, sum: 10}},
+ {g1: {_id: 2, sum: 8}, g2: {_id: 1, sum: 1}},
+ {g1: {_id: 2, sum: 8}, g2: {_id: 2, sum: 10}}
+]);
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js b/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js
index eefbe613f84..3a65ab021b5 100644
--- a/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js
+++ b/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js
@@ -2,108 +2,107 @@
// for the snapshot's point in time.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const kDbName = "test";
- const kCollName = "coll";
+const kDbName = "test";
+const kCollName = "coll";
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const testDB = rst.getPrimary().getDB(kDbName);
- const adminDB = testDB.getSiblingDB("admin");
- const coll = testDB.getCollection(kCollName);
+const testDB = rst.getPrimary().getDB(kDbName);
+const adminDB = testDB.getSiblingDB("admin");
+const coll = testDB.getCollection(kCollName);
- // Waits for the operation to reach the "hangAfterPreallocateSnapshot" failpoint.
- function waitForOp(curOpFilter) {
- assert.soon(
- function() {
- const res =
- adminDB
- .aggregate([
- {$currentOp: {}},
- {$match: {$and: [curOpFilter, {msg: "hangAfterPreallocateSnapshot"}]}}
- ])
- .toArray();
- if (res.length === 1) {
- return true;
- }
- return false;
- },
- function() {
- return "Failed to find operation in $currentOp output: " +
- tojson(adminDB.aggregate([{$currentOp: {}}]).toArray());
- });
- }
+// Waits for the operation to reach the "hangAfterPreallocateSnapshot" failpoint.
+function waitForOp(curOpFilter) {
+ assert.soon(
+ function() {
+ const res =
+ adminDB
+ .aggregate([
+ {$currentOp: {}},
+ {$match: {$and: [curOpFilter, {msg: "hangAfterPreallocateSnapshot"}]}}
+ ])
+ .toArray();
+ if (res.length === 1) {
+ return true;
+ }
+ return false;
+ },
+ function() {
+ return "Failed to find operation in $currentOp output: " +
+ tojson(adminDB.aggregate([{$currentOp: {}}]).toArray());
+ });
+}
- function testCommand(cmd, curOpFilter) {
- coll.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({
- createIndexes: kCollName,
- indexes:
- [{key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}]
- }));
- assert.commandWorked(coll.insert({x: 1}, {writeConcern: {w: "majority"}}));
+function testCommand(cmd, curOpFilter) {
+ coll.drop({writeConcern: {w: "majority"}});
+ assert.commandWorked(testDB.runCommand({
+ createIndexes: kCollName,
+ indexes: [{key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}]
+ }));
+ assert.commandWorked(coll.insert({x: 1}, {writeConcern: {w: "majority"}}));
- // Start a command with readConcern "snapshot" that hangs after establishing a storage
- // engine transaction.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangAfterPreallocateSnapshot", mode: "alwaysOn"}));
+ // Start a command with readConcern "snapshot" that hangs after establishing a storage
+ // engine transaction.
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangAfterPreallocateSnapshot", mode: "alwaysOn"}));
- const awaitCommand = startParallelShell(
- "const session = db.getMongo().startSession();" +
- "const sessionDb = session.getDatabase('test');" +
- "session.startTransaction({readConcern: {level: 'snapshot'}});" +
- "const res = sessionDb.runCommand(" + tojson(cmd) + ");" +
- "assert.commandFailedWithCode(res, ErrorCodes.SnapshotUnavailable);" +
- "assert.eq(res.errorLabels, ['TransientTransactionError']);" +
- "session.endSession();",
- rst.ports[0]);
+ const awaitCommand = startParallelShell(
+ "const session = db.getMongo().startSession();" +
+ "const sessionDb = session.getDatabase('test');" +
+ "session.startTransaction({readConcern: {level: 'snapshot'}});" +
+ "const res = sessionDb.runCommand(" + tojson(cmd) + ");" +
+ "assert.commandFailedWithCode(res, ErrorCodes.SnapshotUnavailable);" +
+ "assert.eq(res.errorLabels, ['TransientTransactionError']);" +
+ "session.endSession();",
+ rst.ports[0]);
- waitForOp(curOpFilter);
+ waitForOp(curOpFilter);
- // Create an index on the collection the command was executed against. This will move the
- // collection's minimum visible timestamp to a point later than the point-in-time referenced
- // by the transaction snapshot.
- assert.commandWorked(testDB.runCommand({
- createIndexes: kCollName,
- indexes: [{key: {x: 1}, name: "x_1"}],
- writeConcern: {w: "majority"}
- }));
+ // Create an index on the collection the command was executed against. This will move the
+ // collection's minimum visible timestamp to a point later than the point-in-time referenced
+ // by the transaction snapshot.
+ assert.commandWorked(testDB.runCommand({
+ createIndexes: kCollName,
+ indexes: [{key: {x: 1}, name: "x_1"}],
+ writeConcern: {w: "majority"}
+ }));
- // Disable the hang and check for parallel shell success. Success indicates that the command
- // failed due to collection metadata invalidation.
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "hangAfterPreallocateSnapshot", mode: "off"}));
+ // Disable the hang and check for parallel shell success. Success indicates that the command
+ // failed due to collection metadata invalidation.
+ assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "hangAfterPreallocateSnapshot", mode: "off"}));
- awaitCommand();
- }
+ awaitCommand();
+}
- testCommand({aggregate: kCollName, pipeline: [], cursor: {}},
- {"command.aggregate": kCollName, "command.readConcern.level": "snapshot"});
- testCommand({delete: kCollName, deletes: [{q: {x: 1}, limit: 1}]},
- {"command.delete": kCollName, "command.readConcern.level": "snapshot"});
- testCommand({distinct: kCollName, key: "x"},
- {"command.distinct": kCollName, "command.readConcern.level": "snapshot"});
- testCommand({find: kCollName},
- {"command.find": kCollName, "command.readConcern.level": "snapshot"});
- testCommand({findAndModify: kCollName, query: {x: 1}, remove: true}, {
- "command.findAndModify": kCollName,
- "command.remove": true,
- "command.readConcern.level": "snapshot"
- });
- testCommand({findAndModify: kCollName, query: {x: 1}, update: {$set: {x: 2}}}, {
- "command.findAndModify": kCollName,
- "command.update.$set": {x: 2},
- "command.readConcern.level": "snapshot"
- });
- testCommand({geoSearch: kCollName, near: [0, 0], maxDistance: 1, search: {a: 1}},
- {"command.geoSearch": kCollName, "command.readConcern.level": "snapshot"});
- testCommand({insert: kCollName, documents: [{x: 1}]},
- {"command.insert": kCollName, "command.readConcern.level": "snapshot"});
- testCommand({update: kCollName, updates: [{q: {x: 1}, u: {$set: {x: 2}}}]},
- {"command.update": kCollName, "command.readConcern.level": "snapshot"});
+testCommand({aggregate: kCollName, pipeline: [], cursor: {}},
+ {"command.aggregate": kCollName, "command.readConcern.level": "snapshot"});
+testCommand({delete: kCollName, deletes: [{q: {x: 1}, limit: 1}]},
+ {"command.delete": kCollName, "command.readConcern.level": "snapshot"});
+testCommand({distinct: kCollName, key: "x"},
+ {"command.distinct": kCollName, "command.readConcern.level": "snapshot"});
+testCommand({find: kCollName},
+ {"command.find": kCollName, "command.readConcern.level": "snapshot"});
+testCommand({findAndModify: kCollName, query: {x: 1}, remove: true}, {
+ "command.findAndModify": kCollName,
+ "command.remove": true,
+ "command.readConcern.level": "snapshot"
+});
+testCommand({findAndModify: kCollName, query: {x: 1}, update: {$set: {x: 2}}}, {
+ "command.findAndModify": kCollName,
+ "command.update.$set": {x: 2},
+ "command.readConcern.level": "snapshot"
+});
+testCommand({geoSearch: kCollName, near: [0, 0], maxDistance: 1, search: {a: 1}},
+ {"command.geoSearch": kCollName, "command.readConcern.level": "snapshot"});
+testCommand({insert: kCollName, documents: [{x: 1}]},
+ {"command.insert": kCollName, "command.readConcern.level": "snapshot"});
+testCommand({update: kCollName, updates: [{q: {x: 1}, u: {$set: {x: 2}}}]},
+ {"command.update": kCollName, "command.readConcern.level": "snapshot"});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/read_concern_snapshot_yielding.js b/jstests/noPassthrough/read_concern_snapshot_yielding.js
index 063e175030a..a5a2605cbae 100644
--- a/jstests/noPassthrough/read_concern_snapshot_yielding.js
+++ b/jstests/noPassthrough/read_concern_snapshot_yielding.js
@@ -3,152 +3,180 @@
// storage engine resources.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
+"use strict";
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+const dbName = "test";
+const collName = "coll";
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const db = rst.getPrimary().getDB(dbName);
+const adminDB = db.getSiblingDB("admin");
+const coll = db.coll;
+TestData.numDocs = 4;
+
+// Set 'internalQueryExecYieldIterations' to 2 to ensure that commands yield on the second try
+// (i.e. after they have established a snapshot but before they have returned any documents).
+assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 2}));
+
+function waitForOpId(curOpFilter) {
+ let opId;
+ // Wait until we know the failpoint 'setInterruptOnlyPlansCheckForInterruptHang' has been
+ // reached.
+ assert.soon(
+ function() {
+ const res = adminDB
+ .aggregate([
+ {$currentOp: {}},
+ {
+ $match: {
+ $and: [
+ {ns: coll.getFullName()},
+ curOpFilter,
+ {"msg": "setInterruptOnlyPlansCheckForInterruptHang"}
+ ]
+ }
+ }
+ ])
+ .toArray();
+
+ if (res.length === 1) {
+ opId = res[0].opid;
+ return true;
+ }
+ return false;
+ },
+ function() {
+ return "Failed to find operation in $currentOp output: " +
+ tojson(adminDB.aggregate([{$currentOp: {}}, {$match: {ns: coll.getFullName()}}])
+ .toArray());
+ });
+ return opId;
+}
+
+function assertKillPending(opId) {
+ const res =
+ adminDB.aggregate([{$currentOp: {}}, {$match: {ns: coll.getFullName(), opid: opId}}])
+ .toArray();
+ assert.eq(
+ res.length,
+ 1,
+ tojson(
+ adminDB.aggregate([{$currentOp: {}}, {$match: {ns: coll.getFullName()}}]).toArray()));
+ assert(res[0].hasOwnProperty("killPending"), tojson(res));
+ assert.eq(true, res[0].killPending, tojson(res));
+}
+
+function populateCollection() {
+ db.coll.drop({writeConcern: {w: "majority"}});
+ for (let i = 0; i < TestData.numDocs; i++) {
+ assert.commandWorked(
+ db.coll.insert({_id: i, x: 1, location: [0, 0]}, {writeConcern: {w: "majority"}}));
}
- const dbName = "test";
- const collName = "coll";
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const db = rst.getPrimary().getDB(dbName);
- const adminDB = db.getSiblingDB("admin");
- const coll = db.coll;
- TestData.numDocs = 4;
-
- // Set 'internalQueryExecYieldIterations' to 2 to ensure that commands yield on the second try
- // (i.e. after they have established a snapshot but before they have returned any documents).
- assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 2}));
-
- function waitForOpId(curOpFilter) {
- let opId;
- // Wait until we know the failpoint 'setInterruptOnlyPlansCheckForInterruptHang' has been
- // reached.
- assert.soon(
- function() {
- const res =
- adminDB
- .aggregate([
- {$currentOp: {}},
- {
- $match: {
- $and: [
- {ns: coll.getFullName()},
- curOpFilter,
- {"msg": "setInterruptOnlyPlansCheckForInterruptHang"}
- ]
- }
- }
- ])
- .toArray();
-
- if (res.length === 1) {
- opId = res[0].opid;
- return true;
- }
- return false;
- },
- function() {
- return "Failed to find operation in $currentOp output: " +
- tojson(adminDB.aggregate([{$currentOp: {}}, {$match: {ns: coll.getFullName()}}])
- .toArray());
- });
- return opId;
- }
-
- function assertKillPending(opId) {
- const res =
- adminDB.aggregate([{$currentOp: {}}, {$match: {ns: coll.getFullName(), opid: opId}}])
- .toArray();
- assert.eq(res.length,
- 1,
- tojson(adminDB.aggregate([{$currentOp: {}}, {$match: {ns: coll.getFullName()}}])
- .toArray()));
- assert(res[0].hasOwnProperty("killPending"), tojson(res));
- assert.eq(true, res[0].killPending, tojson(res));
- }
-
- function populateCollection() {
- db.coll.drop({writeConcern: {w: "majority"}});
- for (let i = 0; i < TestData.numDocs; i++) {
- assert.commandWorked(
- db.coll.insert({_id: i, x: 1, location: [0, 0]}, {writeConcern: {w: "majority"}}));
- }
-
- assert.commandWorked(db.runCommand({
- createIndexes: "coll",
- indexes: [{key: {location: "2d"}, name: "geo_2d"}],
- writeConcern: {w: "majority"}
- }));
- }
-
- function testCommand(awaitCommandFn, curOpFilter, testWriteConflict) {
- //
- // Test that the command can be killed.
- //
-
+ assert.commandWorked(db.runCommand({
+ createIndexes: "coll",
+ indexes: [{key: {location: "2d"}, name: "geo_2d"}],
+ writeConcern: {w: "majority"}
+ }));
+}
+
+function testCommand(awaitCommandFn, curOpFilter, testWriteConflict) {
+ //
+ // Test that the command can be killed.
+ //
+
+ TestData.txnNumber++;
+ populateCollection();
+
+ // Start a command that hangs before checking for interrupt.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
+ let awaitCommand = startParallelShell(awaitCommandFn, rst.ports[0]);
+
+ // Kill the command, and check that it is set to killPending.
+ let opId = waitForOpId(curOpFilter);
+ assert.commandWorked(db.killOp(opId));
+ assertKillPending(opId);
+
+ // Remove the hang, and check that the command is killed.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
+ let exitCode = awaitCommand({checkExitSuccess: false});
+ assert.neq(0, exitCode, "Expected shell to exit with failure due to operation kill");
+
+ //
+ // Test that the command does not yield locks.
+ //
+
+ TestData.txnNumber++;
+ populateCollection();
+
+ // Start a command that hangs before checking for interrupt.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
+ awaitCommand = startParallelShell(awaitCommandFn, rst.ports[0]);
+ waitForOpId(curOpFilter);
+
+ // Start a drop. This should block behind the command, since the command does not yield
+ // locks.
+ let awaitDrop = startParallelShell(function() {
+ db.getSiblingDB("test").coll.drop({writeConcern: {w: "majority"}});
+ }, rst.ports[0]);
+
+ // Remove the hang. The command should complete successfully.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
+ awaitCommand();
+
+ // Now the drop can complete.
+ awaitDrop();
+
+ //
+ // Test that the command does not read data that is inserted during its execution.
+ // 'awaitCommandFn' should fail if it reads the following document:
+ // {_id: <numDocs>, x: 1, new: 1, location: [0, 0]}
+ //
+
+ TestData.txnNumber++;
+ populateCollection();
+
+ // Start a command that hangs before checking for interrupt.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
+ awaitCommand = startParallelShell(awaitCommandFn, rst.ports[0]);
+ waitForOpId(curOpFilter);
+
+ // Insert data that should not be read by the command.
+ assert.commandWorked(db.coll.insert({_id: TestData.numDocs, x: 1, new: 1, location: [0, 0]},
+ {writeConcern: {w: "majority"}}));
+
+ // Remove the hang. The command should complete successfully.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
+ awaitCommand();
+
+ //
+ // Test that the command fails if a write conflict occurs. 'awaitCommandFn' should write to
+ // the following document: {_id: <numDocs>, x: 1, new: 1, location: [0, 0]}
+ //
+
+ if (testWriteConflict) {
TestData.txnNumber++;
populateCollection();
- // Start a command that hangs before checking for interrupt.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
- let awaitCommand = startParallelShell(awaitCommandFn, rst.ports[0]);
-
- // Kill the command, and check that it is set to killPending.
- let opId = waitForOpId(curOpFilter);
- assert.commandWorked(db.killOp(opId));
- assertKillPending(opId);
-
- // Remove the hang, and check that the command is killed.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
- let exitCode = awaitCommand({checkExitSuccess: false});
- assert.neq(0, exitCode, "Expected shell to exit with failure due to operation kill");
-
- //
- // Test that the command does not yield locks.
- //
-
- TestData.txnNumber++;
- populateCollection();
-
- // Start a command that hangs before checking for interrupt.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
- awaitCommand = startParallelShell(awaitCommandFn, rst.ports[0]);
- waitForOpId(curOpFilter);
-
- // Start a drop. This should block behind the command, since the command does not yield
- // locks.
- let awaitDrop = startParallelShell(function() {
- db.getSiblingDB("test").coll.drop({writeConcern: {w: "majority"}});
- }, rst.ports[0]);
-
- // Remove the hang. The command should complete successfully.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
- awaitCommand();
-
- // Now the drop can complete.
- awaitDrop();
-
- //
- // Test that the command does not read data that is inserted during its execution.
- // 'awaitCommandFn' should fail if it reads the following document:
- // {_id: <numDocs>, x: 1, new: 1, location: [0, 0]}
- //
-
- TestData.txnNumber++;
- populateCollection();
+ // Insert the document that the command will write to.
+ assert.commandWorked(db.coll.insert({_id: TestData.numDocs, x: 1, new: 1, location: [0, 0]},
+ {writeConcern: {w: "majority"}}));
// Start a command that hangs before checking for interrupt.
assert.commandWorked(db.adminCommand(
@@ -156,177 +184,142 @@
awaitCommand = startParallelShell(awaitCommandFn, rst.ports[0]);
waitForOpId(curOpFilter);
- // Insert data that should not be read by the command.
- assert.commandWorked(db.coll.insert({_id: TestData.numDocs, x: 1, new: 1, location: [0, 0]},
- {writeConcern: {w: "majority"}}));
+ // Update the document that the command will write to.
+ assert.commandWorked(db.coll.update(
+ {_id: TestData.numDocs}, {$set: {conflict: true}}, {writeConcern: {w: "majority"}}));
- // Remove the hang. The command should complete successfully.
+ // Remove the hang. The command should fail.
assert.commandWorked(db.adminCommand(
{configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
- awaitCommand();
-
- //
- // Test that the command fails if a write conflict occurs. 'awaitCommandFn' should write to
- // the following document: {_id: <numDocs>, x: 1, new: 1, location: [0, 0]}
- //
-
- if (testWriteConflict) {
- TestData.txnNumber++;
- populateCollection();
-
- // Insert the document that the command will write to.
- assert.commandWorked(
- db.coll.insert({_id: TestData.numDocs, x: 1, new: 1, location: [0, 0]},
- {writeConcern: {w: "majority"}}));
-
- // Start a command that hangs before checking for interrupt.
- assert.commandWorked(db.adminCommand({
- configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang",
- mode: "alwaysOn"
- }));
- awaitCommand = startParallelShell(awaitCommandFn, rst.ports[0]);
- waitForOpId(curOpFilter);
-
- // Update the document that the command will write to.
- assert.commandWorked(db.coll.update({_id: TestData.numDocs},
- {$set: {conflict: true}},
- {writeConcern: {w: "majority"}}));
-
- // Remove the hang. The command should fail.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
- exitCode = awaitCommand({checkExitSuccess: false});
- assert.neq(0, exitCode, "Expected shell to exit with failure due to WriteConflict");
- }
+ exitCode = awaitCommand({checkExitSuccess: false});
+ assert.neq(0, exitCode, "Expected shell to exit with failure due to WriteConflict");
}
-
- // Test find.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandWorked(sessionDb.runCommand({find: "coll", filter: {x: 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(res.cursor.firstBatch.length, TestData.numDocs, tojson(res));
- }, {"command.filter": {x: 1}});
-
- // Test getMore on a find established cursor.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
- const initialFindBatchSize = 2;
- const cursorId = assert
- .commandWorked(sessionDb.runCommand(
- {find: "coll", filter: {x: 1}, batchSize: initialFindBatchSize}))
- .cursor.id;
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
- const res = assert.commandWorked(sessionDb.runCommand(
- {getMore: NumberLong(cursorId), collection: "coll", batchSize: TestData.numDocs}));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(
- res.cursor.nextBatch.length, TestData.numDocs - initialFindBatchSize, tojson(res));
- }, {"cursor.originatingCommand.filter": {x: 1}});
-
- // Test aggregate.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandWorked(
- sessionDb.runCommand({aggregate: "coll", pipeline: [{$match: {x: 1}}], cursor: {}}));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(res.cursor.firstBatch.length, TestData.numDocs, tojson(res));
- }, {"command.pipeline": [{$match: {x: 1}}]});
-
- // Test getMore with an initial find batchSize of 0. Interrupt behavior of a getMore is not
- // expected to change with a change of batchSize in the originating command.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
- const initialFindBatchSize = 0;
- const cursorId = assert
- .commandWorked(sessionDb.runCommand(
- {find: "coll", filter: {x: 1}, batchSize: initialFindBatchSize}))
- .cursor.id;
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
- const res = assert.commandWorked(
- sessionDb.runCommand({getMore: NumberLong(cursorId), collection: "coll"}));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(
- res.cursor.nextBatch.length, TestData.numDocs - initialFindBatchSize, tojson(res));
- }, {"cursor.originatingCommand.filter": {x: 1}});
-
- // Test distinct.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandWorked(sessionDb.runCommand({distinct: "coll", key: "_id"}));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert(res.hasOwnProperty("values"));
- assert.eq(res.values.length, 4, tojson(res));
- }, {"command.distinct": "coll"});
-
- // Test update.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandWorked(sessionDb.runCommand({
- update: "coll",
- updates:
- [{q: {}, u: {$set: {updated: true}}}, {q: {new: 1}, u: {$set: {updated: true}}}]
- }));
- assert.commandWorked(session.commitTransaction_forTesting());
- // Only update one existing doc committed before the transaction.
- assert.eq(res.n, 1, tojson(res));
- assert.eq(res.nModified, 1, tojson(res));
- }, {op: "update"}, true);
-
- // Test delete.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandWorked(sessionDb.runCommand(
- {delete: "coll", deletes: [{q: {}, limit: 1}, {q: {new: 1}, limit: 1}]}));
- assert.commandWorked(session.commitTransaction_forTesting());
- // Only remove one existing doc committed before the transaction.
- assert.eq(res.n, 1, tojson(res));
- }, {op: "remove"}, true);
-
- // Test findAndModify.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandWorked(sessionDb.runCommand(
- {findAndModify: "coll", query: {new: 1}, update: {$set: {findAndModify: 1}}}));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert(res.hasOwnProperty("lastErrorObject"));
- assert.eq(res.lastErrorObject.n, 0, tojson(res));
- assert.eq(res.lastErrorObject.updatedExisting, false, tojson(res));
- }, {"command.findAndModify": "coll"}, true);
-
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandWorked(sessionDb.runCommand(
- {findAndModify: "coll", query: {new: 1}, update: {$set: {findAndModify: 1}}}));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert(res.hasOwnProperty("lastErrorObject"));
- assert.eq(res.lastErrorObject.n, 0, tojson(res));
- assert.eq(res.lastErrorObject.updatedExisting, false, tojson(res));
- }, {"command.findAndModify": "coll"}, true);
-
- rst.stopSet();
+}
+
+// Test find.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandWorked(sessionDb.runCommand({find: "coll", filter: {x: 1}}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ assert.eq(res.cursor.firstBatch.length, TestData.numDocs, tojson(res));
+}, {"command.filter": {x: 1}});
+
+// Test getMore on a find established cursor.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
+ const initialFindBatchSize = 2;
+ const cursorId = assert
+ .commandWorked(sessionDb.runCommand(
+ {find: "coll", filter: {x: 1}, batchSize: initialFindBatchSize}))
+ .cursor.id;
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
+ const res = assert.commandWorked(sessionDb.runCommand(
+ {getMore: NumberLong(cursorId), collection: "coll", batchSize: TestData.numDocs}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ assert.eq(res.cursor.nextBatch.length, TestData.numDocs - initialFindBatchSize, tojson(res));
+}, {"cursor.originatingCommand.filter": {x: 1}});
+
+// Test aggregate.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandWorked(
+ sessionDb.runCommand({aggregate: "coll", pipeline: [{$match: {x: 1}}], cursor: {}}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ assert.eq(res.cursor.firstBatch.length, TestData.numDocs, tojson(res));
+}, {"command.pipeline": [{$match: {x: 1}}]});
+
+// Test getMore with an initial find batchSize of 0. Interrupt behavior of a getMore is not
+// expected to change with a change of batchSize in the originating command.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
+ const initialFindBatchSize = 0;
+ const cursorId = assert
+ .commandWorked(sessionDb.runCommand(
+ {find: "coll", filter: {x: 1}, batchSize: initialFindBatchSize}))
+ .cursor.id;
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
+ const res = assert.commandWorked(
+ sessionDb.runCommand({getMore: NumberLong(cursorId), collection: "coll"}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ assert.eq(res.cursor.nextBatch.length, TestData.numDocs - initialFindBatchSize, tojson(res));
+}, {"cursor.originatingCommand.filter": {x: 1}});
+
+// Test distinct.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandWorked(sessionDb.runCommand({distinct: "coll", key: "_id"}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ assert(res.hasOwnProperty("values"));
+ assert.eq(res.values.length, 4, tojson(res));
+}, {"command.distinct": "coll"});
+
+// Test update.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandWorked(sessionDb.runCommand({
+ update: "coll",
+ updates: [{q: {}, u: {$set: {updated: true}}}, {q: {new: 1}, u: {$set: {updated: true}}}]
+ }));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ // Only update one existing doc committed before the transaction.
+ assert.eq(res.n, 1, tojson(res));
+ assert.eq(res.nModified, 1, tojson(res));
+}, {op: "update"}, true);
+
+// Test delete.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandWorked(sessionDb.runCommand(
+ {delete: "coll", deletes: [{q: {}, limit: 1}, {q: {new: 1}, limit: 1}]}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ // Only remove one existing doc committed before the transaction.
+ assert.eq(res.n, 1, tojson(res));
+}, {op: "remove"}, true);
+
+// Test findAndModify.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandWorked(sessionDb.runCommand(
+ {findAndModify: "coll", query: {new: 1}, update: {$set: {findAndModify: 1}}}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ assert(res.hasOwnProperty("lastErrorObject"));
+ assert.eq(res.lastErrorObject.n, 0, tojson(res));
+ assert.eq(res.lastErrorObject.updatedExisting, false, tojson(res));
+}, {"command.findAndModify": "coll"}, true);
+
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandWorked(sessionDb.runCommand(
+ {findAndModify: "coll", query: {new: 1}, update: {$set: {findAndModify: 1}}}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ assert(res.hasOwnProperty("lastErrorObject"));
+ assert.eq(res.lastErrorObject.n, 0, tojson(res));
+ assert.eq(res.lastErrorObject.updatedExisting, false, tojson(res));
+}, {"command.findAndModify": "coll"}, true);
+
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/read_majority.js b/jstests/noPassthrough/read_majority.js
index debdec99faf..3e03b8124ae 100644
--- a/jstests/noPassthrough/read_majority.js
+++ b/jstests/noPassthrough/read_majority.js
@@ -17,211 +17,209 @@
load("jstests/libs/analyze_plan.js");
(function() {
- "use strict";
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
+"use strict";
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+// Tests the functionality for committed reads for the given read concern level.
+function testReadConcernLevel(level) {
+ var replTest = new ReplSetTest({
+ nodes: 1,
+ oplogSize: 2,
+ nodeOptions:
+ {setParameter: 'testingSnapshotBehaviorInIsolation=true', enableMajorityReadConcern: ''}
+ });
+ replTest.startSet();
+ // Cannot wait for a stable recovery timestamp with 'testingSnapshotBehaviorInIsolation'
+ // set.
+ replTest.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+
+ const session =
+ replTest.getPrimary().getDB("test").getMongo().startSession({causalConsistency: false});
+ const db = session.getDatabase("test");
+ const t = db.coll;
+
+ function assertNoSnapshotAvailableForReadConcernLevel() {
+ var res =
+ t.runCommand('find', {batchSize: 2, readConcern: {level: level}, maxTimeMS: 1000});
+ assert.commandFailed(res);
+ assert.eq(res.code, ErrorCodes.MaxTimeMSExpired);
}
- // Tests the functionality for committed reads for the given read concern level.
- function testReadConcernLevel(level) {
- var replTest = new ReplSetTest({
- nodes: 1,
- oplogSize: 2,
- nodeOptions: {
- setParameter: 'testingSnapshotBehaviorInIsolation=true',
- enableMajorityReadConcern: ''
- }
- });
- replTest.startSet();
- // Cannot wait for a stable recovery timestamp with 'testingSnapshotBehaviorInIsolation'
- // set.
- replTest.initiateWithAnyNodeAsPrimary(
- null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
-
- const session =
- replTest.getPrimary().getDB("test").getMongo().startSession({causalConsistency: false});
- const db = session.getDatabase("test");
- const t = db.coll;
-
- function assertNoSnapshotAvailableForReadConcernLevel() {
- var res =
- t.runCommand('find', {batchSize: 2, readConcern: {level: level}, maxTimeMS: 1000});
- assert.commandFailed(res);
- assert.eq(res.code, ErrorCodes.MaxTimeMSExpired);
- }
-
- function getCursorForReadConcernLevel() {
- var res = t.runCommand('find', {batchSize: 2, readConcern: {level: level}});
- assert.commandWorked(res);
- return new DBCommandCursor(db, res, 2, undefined);
- }
+ function getCursorForReadConcernLevel() {
+ var res = t.runCommand('find', {batchSize: 2, readConcern: {level: level}});
+ assert.commandWorked(res);
+ return new DBCommandCursor(db, res, 2, undefined);
+ }
- function getAggCursorForReadConcernLevel() {
- var res = t.runCommand(
- 'aggregate', {pipeline: [], cursor: {batchSize: 2}, readConcern: {level: level}});
- assert.commandWorked(res);
- return new DBCommandCursor(db, res, 2, undefined);
- }
+ function getAggCursorForReadConcernLevel() {
+ var res = t.runCommand('aggregate',
+ {pipeline: [], cursor: {batchSize: 2}, readConcern: {level: level}});
+ assert.commandWorked(res);
+ return new DBCommandCursor(db, res, 2, undefined);
+ }
- function getExplainPlan(query) {
- var res = db.runCommand({explain: {find: t.getName(), filter: query}});
- return assert.commandWorked(res).queryPlanner.winningPlan;
- }
+ function getExplainPlan(query) {
+ var res = db.runCommand({explain: {find: t.getName(), filter: query}});
+ return assert.commandWorked(res).queryPlanner.winningPlan;
+ }
- //
- // Actual Test
- //
-
- // Ensure killOp will work on an op that is waiting for snapshots to be created
- var blockedReader = startParallelShell(
- "const session = db.getMongo().startSession({causalConsistency: false}); " +
- "const sessionDB = session.getDatabase(db.getName()); " +
- "sessionDB.coll.runCommand('find', {batchSize: 2, readConcern: {level: \"" + level +
- "\"}});",
- replTest.ports[0]);
-
- assert.soon(function() {
- var curOps = db.currentOp(true);
- jsTestLog("curOp output: " + tojson(curOps));
- for (var i in curOps.inprog) {
- var op = curOps.inprog[i];
- if (op.op === 'query' && op.ns === "test.$cmd" && op.command.find === 'coll') {
- db.killOp(op.opid);
- return true;
- }
+ //
+ // Actual Test
+ //
+
+ // Ensure killOp will work on an op that is waiting for snapshots to be created
+ var blockedReader = startParallelShell(
+ "const session = db.getMongo().startSession({causalConsistency: false}); " +
+ "const sessionDB = session.getDatabase(db.getName()); " +
+ "sessionDB.coll.runCommand('find', {batchSize: 2, readConcern: {level: \"" + level +
+ "\"}});",
+ replTest.ports[0]);
+
+ assert.soon(function() {
+ var curOps = db.currentOp(true);
+ jsTestLog("curOp output: " + tojson(curOps));
+ for (var i in curOps.inprog) {
+ var op = curOps.inprog[i];
+ if (op.op === 'query' && op.ns === "test.$cmd" && op.command.find === 'coll') {
+ db.killOp(op.opid);
+ return true;
}
- return false;
- }, "could not kill an op that was waiting for a snapshot", 60 * 1000);
- blockedReader();
-
- var snapshot1 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
- assert.commandWorked(db.runCommand({create: "coll"}));
- var snapshot2 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
-
- for (var i = 0; i < 10; i++) {
- assert.writeOK(t.insert({_id: i, version: 3}));
}
+ return false;
+ }, "could not kill an op that was waiting for a snapshot", 60 * 1000);
+ blockedReader();
- assertNoSnapshotAvailableForReadConcernLevel();
-
- var snapshot3 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
-
- assertNoSnapshotAvailableForReadConcernLevel();
-
- assert.writeOK(t.update({}, {$set: {version: 4}}, false, true));
- var snapshot4 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
-
- // Collection didn't exist in snapshot 1.
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot1}));
- assertNoSnapshotAvailableForReadConcernLevel();
-
- // Collection existed but was empty in snapshot 2.
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot2}));
- assert.eq(getCursorForReadConcernLevel().itcount(), 0);
- assert.eq(getAggCursorForReadConcernLevel().itcount(), 0);
-
- // In snapshot 3 the collection was filled with {version: 3} documents.
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot3}));
- assert.eq(getAggCursorForReadConcernLevel().itcount(), 10);
- getAggCursorForReadConcernLevel().forEach(function(doc) {
- // Note: agg uses internal batching so can't reliably test flipping snapshot. However,
- // it uses the same mechanism as find, so if one works, both should.
- assert.eq(doc.version, 3);
- });
-
- assert.eq(getCursorForReadConcernLevel().itcount(), 10);
- var cursor = getCursorForReadConcernLevel(); // Note: uses batchsize=2.
- assert.eq(cursor.next().version, 3);
- assert.eq(cursor.next().version, 3);
- assert(!cursor.objsLeftInBatch());
-
- // In snapshot 4 the collection was filled with {version: 3} documents.
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot4}));
-
- // This triggers a getMore which sees the new version.
- assert.eq(cursor.next().version, 4);
- assert.eq(cursor.next().version, 4);
-
- // Adding an index bumps the min snapshot for a collection as of SERVER-20260. This may
- // change to just filter that index out from query planning as part of SERVER-20439.
- t.ensureIndex({version: 1});
- assertNoSnapshotAvailableForReadConcernLevel();
-
- // To use the index, a snapshot created after the index was completed must be marked
- // committed.
- var newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
- assertNoSnapshotAvailableForReadConcernLevel();
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
- assert.eq(getCursorForReadConcernLevel().itcount(), 10);
- assert.eq(getAggCursorForReadConcernLevel().itcount(), 10);
- assert(isIxscan(db, getExplainPlan({version: 1})));
-
- // Dropping an index does bump the min snapshot.
- t.dropIndex({version: 1});
- assertNoSnapshotAvailableForReadConcernLevel();
-
- // To use the collection again, a snapshot created after the dropIndex must be marked
- // committed.
- newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
- assertNoSnapshotAvailableForReadConcernLevel();
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
- assert.eq(getCursorForReadConcernLevel().itcount(), 10);
-
- // Reindex bumps the min snapshot.
- assert.writeOK(t.bump.insert({a: 1})); // Bump timestamp.
- t.reIndex();
- assertNoSnapshotAvailableForReadConcernLevel();
- newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
- assertNoSnapshotAvailableForReadConcernLevel();
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
- assert.eq(getCursorForReadConcernLevel().itcount(), 10);
-
- // Dropping the collection is visible in the committed snapshot, even though it hasn't been
- // marked committed yet. This is allowed by the current specification even though it
- // violates strict read-committed semantics since we don't guarantee them on metadata
- // operations.
- t.drop();
- assert.eq(getCursorForReadConcernLevel().itcount(), 0);
- assert.eq(getAggCursorForReadConcernLevel().itcount(), 0);
-
- // Creating a new collection with the same name hides the collection until that operation is
- // in the committed view.
- t.insert({_id: 0, version: 8});
- assertNoSnapshotAvailableForReadConcernLevel();
- newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
- assertNoSnapshotAvailableForReadConcernLevel();
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
- assert.eq(getCursorForReadConcernLevel().itcount(), 1);
- assert.eq(getAggCursorForReadConcernLevel().itcount(), 1);
-
- // Commands that only support read concern 'local', (such as ping) must work when it is
- // explicitly specified and fail for majority-committed read concern levels.
- assert.commandWorked(db.adminCommand({ping: 1, readConcern: {level: 'local'}}));
- var res = assert.commandFailed(db.adminCommand({ping: 1, readConcern: {level: level}}));
- assert.eq(res.code, ErrorCodes.InvalidOptions);
-
- // Agg $out supports majority committed reads.
- assert.commandWorked(t.runCommand(
- 'aggregate', {pipeline: [{$out: 'out'}], cursor: {}, readConcern: {level: 'local'}}));
- assert.commandWorked(t.runCommand(
- 'aggregate', {pipeline: [{$out: 'out'}], cursor: {}, readConcern: {level: level}}));
-
- replTest.stopSet();
- }
-
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
- const db = conn.getDB("test");
- const supportsCommittedReads =
- assert.commandWorked(db.serverStatus()).storageEngine.supportsCommittedReads;
- MongoRunner.stopMongod(conn);
+ var snapshot1 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assert.commandWorked(db.runCommand({create: "coll"}));
+ var snapshot2 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
- if (supportsCommittedReads) {
- testReadConcernLevel("majority");
+ for (var i = 0; i < 10; i++) {
+ assert.writeOK(t.insert({_id: i, version: 3}));
}
+
+ assertNoSnapshotAvailableForReadConcernLevel();
+
+ var snapshot3 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+
+ assertNoSnapshotAvailableForReadConcernLevel();
+
+ assert.writeOK(t.update({}, {$set: {version: 4}}, false, true));
+ var snapshot4 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+
+ // Collection didn't exist in snapshot 1.
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot1}));
+ assertNoSnapshotAvailableForReadConcernLevel();
+
+ // Collection existed but was empty in snapshot 2.
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot2}));
+ assert.eq(getCursorForReadConcernLevel().itcount(), 0);
+ assert.eq(getAggCursorForReadConcernLevel().itcount(), 0);
+
+ // In snapshot 3 the collection was filled with {version: 3} documents.
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot3}));
+ assert.eq(getAggCursorForReadConcernLevel().itcount(), 10);
+ getAggCursorForReadConcernLevel().forEach(function(doc) {
+ // Note: agg uses internal batching so can't reliably test flipping snapshot. However,
+ // it uses the same mechanism as find, so if one works, both should.
+ assert.eq(doc.version, 3);
+ });
+
+ assert.eq(getCursorForReadConcernLevel().itcount(), 10);
+ var cursor = getCursorForReadConcernLevel(); // Note: uses batchsize=2.
+ assert.eq(cursor.next().version, 3);
+ assert.eq(cursor.next().version, 3);
+ assert(!cursor.objsLeftInBatch());
+
+ // In snapshot 4 the collection was filled with {version: 3} documents.
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot4}));
+
+ // This triggers a getMore which sees the new version.
+ assert.eq(cursor.next().version, 4);
+ assert.eq(cursor.next().version, 4);
+
+ // Adding an index bumps the min snapshot for a collection as of SERVER-20260. This may
+ // change to just filter that index out from query planning as part of SERVER-20439.
+ t.ensureIndex({version: 1});
+ assertNoSnapshotAvailableForReadConcernLevel();
+
+ // To use the index, a snapshot created after the index was completed must be marked
+ // committed.
+ var newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assertNoSnapshotAvailableForReadConcernLevel();
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
+ assert.eq(getCursorForReadConcernLevel().itcount(), 10);
+ assert.eq(getAggCursorForReadConcernLevel().itcount(), 10);
+ assert(isIxscan(db, getExplainPlan({version: 1})));
+
+ // Dropping an index does bump the min snapshot.
+ t.dropIndex({version: 1});
+ assertNoSnapshotAvailableForReadConcernLevel();
+
+ // To use the collection again, a snapshot created after the dropIndex must be marked
+ // committed.
+ newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assertNoSnapshotAvailableForReadConcernLevel();
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
+ assert.eq(getCursorForReadConcernLevel().itcount(), 10);
+
+ // Reindex bumps the min snapshot.
+ assert.writeOK(t.bump.insert({a: 1})); // Bump timestamp.
+ t.reIndex();
+ assertNoSnapshotAvailableForReadConcernLevel();
+ newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assertNoSnapshotAvailableForReadConcernLevel();
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
+ assert.eq(getCursorForReadConcernLevel().itcount(), 10);
+
+ // Dropping the collection is visible in the committed snapshot, even though it hasn't been
+ // marked committed yet. This is allowed by the current specification even though it
+ // violates strict read-committed semantics since we don't guarantee them on metadata
+ // operations.
+ t.drop();
+ assert.eq(getCursorForReadConcernLevel().itcount(), 0);
+ assert.eq(getAggCursorForReadConcernLevel().itcount(), 0);
+
+ // Creating a new collection with the same name hides the collection until that operation is
+ // in the committed view.
+ t.insert({_id: 0, version: 8});
+ assertNoSnapshotAvailableForReadConcernLevel();
+ newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assertNoSnapshotAvailableForReadConcernLevel();
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
+ assert.eq(getCursorForReadConcernLevel().itcount(), 1);
+ assert.eq(getAggCursorForReadConcernLevel().itcount(), 1);
+
+ // Commands that only support read concern 'local', (such as ping) must work when it is
+ // explicitly specified and fail for majority-committed read concern levels.
+ assert.commandWorked(db.adminCommand({ping: 1, readConcern: {level: 'local'}}));
+ var res = assert.commandFailed(db.adminCommand({ping: 1, readConcern: {level: level}}));
+ assert.eq(res.code, ErrorCodes.InvalidOptions);
+
+ // Agg $out supports majority committed reads.
+ assert.commandWorked(t.runCommand(
+ 'aggregate', {pipeline: [{$out: 'out'}], cursor: {}, readConcern: {level: 'local'}}));
+ assert.commandWorked(t.runCommand(
+ 'aggregate', {pipeline: [{$out: 'out'}], cursor: {}, readConcern: {level: level}}));
+
+ replTest.stopSet();
+}
+
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
+const db = conn.getDB("test");
+const supportsCommittedReads =
+ assert.commandWorked(db.serverStatus()).storageEngine.supportsCommittedReads;
+MongoRunner.stopMongod(conn);
+
+if (supportsCommittedReads) {
+ testReadConcernLevel("majority");
+}
}());
diff --git a/jstests/noPassthrough/read_majority_reads.js b/jstests/noPassthrough/read_majority_reads.js
index 578f17d748f..f76363a0b28 100644
--- a/jstests/noPassthrough/read_majority_reads.js
+++ b/jstests/noPassthrough/read_majority_reads.js
@@ -15,233 +15,232 @@
*/
(function() {
- 'use strict';
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
-
- var testServer = MongoRunner.runMongod();
- var db = testServer.getDB("test");
- if (!db.serverStatus().storageEngine.supportsCommittedReads) {
- print("Skipping read_majority.js since storageEngine doesn't support it.");
- MongoRunner.stopMongod(testServer);
- return;
- }
+'use strict';
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+var testServer = MongoRunner.runMongod();
+var db = testServer.getDB("test");
+if (!db.serverStatus().storageEngine.supportsCommittedReads) {
+ print("Skipping read_majority.js since storageEngine doesn't support it.");
MongoRunner.stopMongod(testServer);
-
- function makeCursor(db, result) {
- return new DBCommandCursor(db, result);
- }
-
- // These test cases are functions that return a cursor of the documents in collections without
- // fetching them yet.
- var cursorTestCases = {
- find: function(coll) {
- return makeCursor(coll.getDB(),
- assert.commandWorked(coll.runCommand(
- 'find', {readConcern: {level: 'majority'}, batchSize: 0})));
- },
- aggregate: function(coll) {
- return makeCursor(
- coll.getDB(),
- assert.commandWorked(coll.runCommand(
- 'aggregate',
- {readConcern: {level: 'majority'}, cursor: {batchSize: 0}, pipeline: []})));
- },
- aggregateGeoNear: function(coll) {
- return makeCursor(coll.getDB(), assert.commandWorked(coll.runCommand('aggregate', {
- readConcern: {level: 'majority'},
- cursor: {batchSize: 0},
- pipeline: [{$geoNear: {near: [0, 0], distanceField: "d", spherical: true}}]
- })));
- },
- };
-
- // These test cases have a run method that will be passed a collection with a single object with
- // _id: 1 and a state field that equals either "before" or "after". The collection will also
- // contain both a 2dsphere and a geoHaystack index to enable testing commands that depend on
- // them. The return value from the run method is expected to be the value of expectedBefore or
- // expectedAfter depending on the state of the state field.
- var nonCursorTestCases = {
- count_before: {
- run: function(coll) {
- var res = coll.runCommand(
- 'count', {readConcern: {level: 'majority'}, query: {state: 'before'}});
- assert.commandWorked(res);
- return res.n;
- },
- expectedBefore: 1,
- expectedAfter: 0,
+ return;
+}
+MongoRunner.stopMongod(testServer);
+
+function makeCursor(db, result) {
+ return new DBCommandCursor(db, result);
+}
+
+// These test cases are functions that return a cursor of the documents in collections without
+// fetching them yet.
+var cursorTestCases = {
+ find: function(coll) {
+ return makeCursor(coll.getDB(),
+ assert.commandWorked(coll.runCommand(
+ 'find', {readConcern: {level: 'majority'}, batchSize: 0})));
+ },
+ aggregate: function(coll) {
+ return makeCursor(
+ coll.getDB(),
+ assert.commandWorked(coll.runCommand(
+ 'aggregate',
+ {readConcern: {level: 'majority'}, cursor: {batchSize: 0}, pipeline: []})));
+ },
+ aggregateGeoNear: function(coll) {
+ return makeCursor(coll.getDB(), assert.commandWorked(coll.runCommand('aggregate', {
+ readConcern: {level: 'majority'},
+ cursor: {batchSize: 0},
+ pipeline: [{$geoNear: {near: [0, 0], distanceField: "d", spherical: true}}]
+ })));
+ },
+};
+
+// These test cases have a run method that will be passed a collection with a single object with
+// _id: 1 and a state field that equals either "before" or "after". The collection will also
+// contain both a 2dsphere and a geoHaystack index to enable testing commands that depend on
+// them. The return value from the run method is expected to be the value of expectedBefore or
+// expectedAfter depending on the state of the state field.
+var nonCursorTestCases = {
+ count_before: {
+ run: function(coll) {
+ var res = coll.runCommand('count',
+ {readConcern: {level: 'majority'}, query: {state: 'before'}});
+ assert.commandWorked(res);
+ return res.n;
},
- count_after: {
- run: function(coll) {
- var res = coll.runCommand(
- 'count', {readConcern: {level: 'majority'}, query: {state: 'after'}});
- assert.commandWorked(res);
- return res.n;
- },
- expectedBefore: 0,
- expectedAfter: 1,
+ expectedBefore: 1,
+ expectedAfter: 0,
+ },
+ count_after: {
+ run: function(coll) {
+ var res = coll.runCommand('count',
+ {readConcern: {level: 'majority'}, query: {state: 'after'}});
+ assert.commandWorked(res);
+ return res.n;
},
- distinct: {
- run: function(coll) {
- var res =
- coll.runCommand('distinct', {readConcern: {level: 'majority'}, key: 'state'});
- assert.commandWorked(res);
- assert.eq(res.values.length, 1, tojson(res));
- return res.values[0];
- },
- expectedBefore: 'before',
- expectedAfter: 'after',
+ expectedBefore: 0,
+ expectedAfter: 1,
+ },
+ distinct: {
+ run: function(coll) {
+ var res = coll.runCommand('distinct', {readConcern: {level: 'majority'}, key: 'state'});
+ assert.commandWorked(res);
+ assert.eq(res.values.length, 1, tojson(res));
+ return res.values[0];
},
- geoSearch: {
- run: function(coll) {
- var res = coll.runCommand('geoSearch', {
- readConcern: {level: 'majority'},
- near: [0, 0],
- search: {_id: 1}, // Needed due to SERVER-23158.
- maxDistance: 1,
- });
- assert.commandWorked(res);
- assert.eq(res.results.length, 1, tojson(res));
- return res.results[0].state;
- },
- expectedBefore: 'before',
- expectedAfter: 'after',
+ expectedBefore: 'before',
+ expectedAfter: 'after',
+ },
+ geoSearch: {
+ run: function(coll) {
+ var res = coll.runCommand('geoSearch', {
+ readConcern: {level: 'majority'},
+ near: [0, 0],
+ search: {_id: 1}, // Needed due to SERVER-23158.
+ maxDistance: 1,
+ });
+ assert.commandWorked(res);
+ assert.eq(res.results.length, 1, tojson(res));
+ return res.results[0].state;
},
- };
-
- function runTests(coll, mongodConnection) {
- function makeSnapshot() {
- return assert.commandWorked(mongodConnection.adminCommand("makeSnapshot")).name;
- }
- function setCommittedSnapshot(snapshot) {
- assert.commandWorked(mongodConnection.adminCommand({"setCommittedSnapshot": snapshot}));
- }
-
- assert.commandWorked(coll.createIndex({point: '2dsphere'}));
- for (var testName in cursorTestCases) {
- jsTestLog('Running ' + testName + ' against ' + coll.toString());
- var getCursor = cursorTestCases[testName];
-
- // Setup initial state.
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.save({_id: 1, state: 'before', point: [0, 0]}));
- setCommittedSnapshot(makeSnapshot());
-
- // Check initial conditions.
- assert.eq(getCursor(coll).next().state, 'before');
-
- // Change state without making it committed.
- assert.writeOK(coll.save({_id: 1, state: 'after', point: [0, 0]}));
-
- // Cursor still sees old state.
- assert.eq(getCursor(coll).next().state, 'before');
-
- // Create a cursor before the update is visible.
- var oldCursor = getCursor(coll);
-
- // Making a snapshot doesn't make the update visible yet.
- var snapshot = makeSnapshot();
- assert.eq(getCursor(coll).next().state, 'before');
-
- // Setting it as committed does for both new and old cursors.
- setCommittedSnapshot(snapshot);
- assert.eq(getCursor(coll).next().state, 'after');
- assert.eq(oldCursor.next().state, 'after');
- }
-
- assert.commandWorked(coll.ensureIndex({point: 'geoHaystack', _id: 1}, {bucketSize: 1}));
- for (var testName in nonCursorTestCases) {
- jsTestLog('Running ' + testName + ' against ' + coll.toString());
- var getResult = nonCursorTestCases[testName].run;
- var expectedBefore = nonCursorTestCases[testName].expectedBefore;
- var expectedAfter = nonCursorTestCases[testName].expectedAfter;
-
- // Setup initial state.
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.save({_id: 1, state: 'before', point: [0, 0]}));
- setCommittedSnapshot(makeSnapshot());
-
- // Check initial conditions.
- assert.eq(getResult(coll), expectedBefore);
-
- // Change state without making it committed.
- assert.writeOK(coll.save({_id: 1, state: 'after', point: [0, 0]}));
-
- // Cursor still sees old state.
- assert.eq(getResult(coll), expectedBefore);
-
- // Making a snapshot doesn't make the update visible yet.
- var snapshot = makeSnapshot();
- assert.eq(getResult(coll), expectedBefore);
-
- // Setting it as committed does.
- setCommittedSnapshot(snapshot);
- assert.eq(getResult(coll), expectedAfter);
- }
+ expectedBefore: 'before',
+ expectedAfter: 'after',
+ },
+};
+
+function runTests(coll, mongodConnection) {
+ function makeSnapshot() {
+ return assert.commandWorked(mongodConnection.adminCommand("makeSnapshot")).name;
+ }
+ function setCommittedSnapshot(snapshot) {
+ assert.commandWorked(mongodConnection.adminCommand({"setCommittedSnapshot": snapshot}));
+ }
+
+ assert.commandWorked(coll.createIndex({point: '2dsphere'}));
+ for (var testName in cursorTestCases) {
+ jsTestLog('Running ' + testName + ' against ' + coll.toString());
+ var getCursor = cursorTestCases[testName];
+
+ // Setup initial state.
+ assert.writeOK(coll.remove({}));
+ assert.writeOK(coll.save({_id: 1, state: 'before', point: [0, 0]}));
+ setCommittedSnapshot(makeSnapshot());
+
+ // Check initial conditions.
+ assert.eq(getCursor(coll).next().state, 'before');
+
+ // Change state without making it committed.
+ assert.writeOK(coll.save({_id: 1, state: 'after', point: [0, 0]}));
+
+ // Cursor still sees old state.
+ assert.eq(getCursor(coll).next().state, 'before');
+
+ // Create a cursor before the update is visible.
+ var oldCursor = getCursor(coll);
+
+ // Making a snapshot doesn't make the update visible yet.
+ var snapshot = makeSnapshot();
+ assert.eq(getCursor(coll).next().state, 'before');
+
+ // Setting it as committed does for both new and old cursors.
+ setCommittedSnapshot(snapshot);
+ assert.eq(getCursor(coll).next().state, 'after');
+ assert.eq(oldCursor.next().state, 'after');
+ }
+
+ assert.commandWorked(coll.ensureIndex({point: 'geoHaystack', _id: 1}, {bucketSize: 1}));
+ for (var testName in nonCursorTestCases) {
+ jsTestLog('Running ' + testName + ' against ' + coll.toString());
+ var getResult = nonCursorTestCases[testName].run;
+ var expectedBefore = nonCursorTestCases[testName].expectedBefore;
+ var expectedAfter = nonCursorTestCases[testName].expectedAfter;
+
+ // Setup initial state.
+ assert.writeOK(coll.remove({}));
+ assert.writeOK(coll.save({_id: 1, state: 'before', point: [0, 0]}));
+ setCommittedSnapshot(makeSnapshot());
+
+ // Check initial conditions.
+ assert.eq(getResult(coll), expectedBefore);
+
+ // Change state without making it committed.
+ assert.writeOK(coll.save({_id: 1, state: 'after', point: [0, 0]}));
+
+ // Cursor still sees old state.
+ assert.eq(getResult(coll), expectedBefore);
+
+ // Making a snapshot doesn't make the update visible yet.
+ var snapshot = makeSnapshot();
+ assert.eq(getResult(coll), expectedBefore);
+
+ // Setting it as committed does.
+ setCommittedSnapshot(snapshot);
+ assert.eq(getResult(coll), expectedAfter);
+ }
+}
+
+var replTest = new ReplSetTest({
+ nodes: 1,
+ oplogSize: 2,
+ nodeOptions: {
+ setParameter: 'testingSnapshotBehaviorInIsolation=true',
+ enableMajorityReadConcern: '',
+ shardsvr: ''
}
+});
+replTest.startSet();
+// Cannot wait for a stable recovery timestamp with 'testingSnapshotBehaviorInIsolation' set.
+replTest.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+
+var mongod = replTest.getPrimary();
+
+(function testSingleNode() {
+ var db = mongod.getDB("singleNode");
+ runTests(db.collection, mongod);
+})();
+
+var shardingTest = new ShardingTest({
+ shards: 0,
+ mongos: 1,
+});
+assert(shardingTest.adminCommand({addShard: replTest.getURL()}));
+
+// Remove tests of commands that aren't supported at all through mongos, even on unsharded
+// collections.
+['geoSearch'].forEach(function(cmd) {
+ // Make sure it really isn't supported.
+ assert.eq(shardingTest.getDB('test').coll.runCommand(cmd).code, ErrorCodes.CommandNotFound);
+ delete cursorTestCases[cmd];
+ delete nonCursorTestCases[cmd];
+});
+
+(function testUnshardedDBThroughMongos() {
+ var db = shardingTest.getDB("throughMongos");
+ runTests(db.unshardedDB, mongod);
+})();
+
+shardingTest.adminCommand({enableSharding: 'throughMongos'});
+
+(function testUnshardedCollectionThroughMongos() {
+ var db = shardingTest.getDB("throughMongos");
+ runTests(db.unshardedCollection, mongod);
+})();
+
+(function testShardedCollectionThroughMongos() {
+ var db = shardingTest.getDB("throughMongos");
+ var collection = db.shardedCollection;
+ shardingTest.adminCommand({shardCollection: collection.getFullName(), key: {_id: 1}});
+ runTests(collection, mongod);
+})();
- var replTest = new ReplSetTest({
- nodes: 1,
- oplogSize: 2,
- nodeOptions: {
- setParameter: 'testingSnapshotBehaviorInIsolation=true',
- enableMajorityReadConcern: '',
- shardsvr: ''
- }
- });
- replTest.startSet();
- // Cannot wait for a stable recovery timestamp with 'testingSnapshotBehaviorInIsolation' set.
- replTest.initiateWithAnyNodeAsPrimary(
- null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
-
- var mongod = replTest.getPrimary();
-
- (function testSingleNode() {
- var db = mongod.getDB("singleNode");
- runTests(db.collection, mongod);
- })();
-
- var shardingTest = new ShardingTest({
- shards: 0,
- mongos: 1,
- });
- assert(shardingTest.adminCommand({addShard: replTest.getURL()}));
-
- // Remove tests of commands that aren't supported at all through mongos, even on unsharded
- // collections.
- ['geoSearch'].forEach(function(cmd) {
- // Make sure it really isn't supported.
- assert.eq(shardingTest.getDB('test').coll.runCommand(cmd).code, ErrorCodes.CommandNotFound);
- delete cursorTestCases[cmd];
- delete nonCursorTestCases[cmd];
- });
-
- (function testUnshardedDBThroughMongos() {
- var db = shardingTest.getDB("throughMongos");
- runTests(db.unshardedDB, mongod);
- })();
-
- shardingTest.adminCommand({enableSharding: 'throughMongos'});
-
- (function testUnshardedCollectionThroughMongos() {
- var db = shardingTest.getDB("throughMongos");
- runTests(db.unshardedCollection, mongod);
- })();
-
- (function testShardedCollectionThroughMongos() {
- var db = shardingTest.getDB("throughMongos");
- var collection = db.shardedCollection;
- shardingTest.adminCommand({shardCollection: collection.getFullName(), key: {_id: 1}});
- runTests(collection, mongod);
- })();
-
- shardingTest.stop();
- replTest.stopSet();
+shardingTest.stop();
+replTest.stopSet();
})();
diff --git a/jstests/noPassthrough/rebuild_multiple_indexes_at_startup.js b/jstests/noPassthrough/rebuild_multiple_indexes_at_startup.js
index 108860cfd6a..7416b41bde8 100644
--- a/jstests/noPassthrough/rebuild_multiple_indexes_at_startup.js
+++ b/jstests/noPassthrough/rebuild_multiple_indexes_at_startup.js
@@ -5,50 +5,49 @@
* @tags: [requires_persistence, requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({
- name: "rebuildMultipleIndexesAtStartup",
- nodes: 2,
- nodeOptions:
- {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
- });
- const nodes = rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({
+ name: "rebuildMultipleIndexesAtStartup",
+ nodes: 2,
+ nodeOptions: {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
+});
+const nodes = rst.startSet();
+rst.initiate();
- if (!rst.getPrimary().adminCommand("serverStatus").storageEngine.supportsSnapshotReadConcern) {
- // Only snapshotting storage engines can pause advancing the stable timestamp allowing us
- // to get into a state where indexes exist, but the underlying tables were dropped.
- rst.stopSet();
- return;
- }
+if (!rst.getPrimary().adminCommand("serverStatus").storageEngine.supportsSnapshotReadConcern) {
+ // Only snapshotting storage engines can pause advancing the stable timestamp allowing us
+ // to get into a state where indexes exist, but the underlying tables were dropped.
+ rst.stopSet();
+ return;
+}
- let coll = rst.getPrimary().getDB("indexRebuild")["coll"];
- assert.commandWorked(coll.createIndexes([{a: 1}, {b: 1}], {}, {writeConcern: {w: "majority"}}));
- assert.eq(3, coll.getIndexes().length);
- rst.awaitReplication(undefined, ReplSetTest.OpTimeType.LAST_DURABLE);
+let coll = rst.getPrimary().getDB("indexRebuild")["coll"];
+assert.commandWorked(coll.createIndexes([{a: 1}, {b: 1}], {}, {writeConcern: {w: "majority"}}));
+assert.eq(3, coll.getIndexes().length);
+rst.awaitReplication(undefined, ReplSetTest.OpTimeType.LAST_DURABLE);
- // Lock the index entries into a stable checkpoint by shutting down.
- rst.stopSet(undefined, true);
- rst.startSet(undefined, true);
+// Lock the index entries into a stable checkpoint by shutting down.
+rst.stopSet(undefined, true);
+rst.startSet(undefined, true);
- // Disable snapshotting on all members of the replica set so that further operations do not
- // enter the majority snapshot.
- nodes.forEach(node => assert.commandWorked(node.adminCommand(
- {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
+// Disable snapshotting on all members of the replica set so that further operations do not
+// enter the majority snapshot.
+nodes.forEach(node => assert.commandWorked(node.adminCommand(
+ {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
- // Dropping the index would normally modify the collection metadata and drop the
- // table. Because we're not advancing the stable timestamp and we're going to crash the
- // server, the catalog change won't take effect, but the WT table being dropped will.
- coll = rst.getPrimary().getDB("indexRebuild")["coll"];
- assert.commandWorked(coll.dropIndexes());
- rst.awaitReplication();
- rst.stopSet(9, true, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+// Dropping the index would normally modify the collection metadata and drop the
+// table. Because we're not advancing the stable timestamp and we're going to crash the
+// server, the catalog change won't take effect, but the WT table being dropped will.
+coll = rst.getPrimary().getDB("indexRebuild")["coll"];
+assert.commandWorked(coll.dropIndexes());
+rst.awaitReplication();
+rst.stopSet(9, true, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- // Restarting the replica set should rebuild both indexes on both nodes. Just to be dropped
- // again by replication recovery. Starting up successfully is a passing test run.
- rst.startSet(undefined, true);
- coll = rst.getPrimary().getDB("indexRebuild")["coll"];
- assert.eq(1, coll.getIndexes().length);
- rst.stopSet();
+// Restarting the replica set should rebuild both indexes on both nodes. Just to be dropped
+// again by replication recovery. Starting up successfully is a passing test run.
+rst.startSet(undefined, true);
+coll = rst.getPrimary().getDB("indexRebuild")["coll"];
+assert.eq(1, coll.getIndexes().length);
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/recovery_wt_cache_full.js b/jstests/noPassthrough/recovery_wt_cache_full.js
index dff22ad959a..7d7dc171296 100644
--- a/jstests/noPassthrough/recovery_wt_cache_full.js
+++ b/jstests/noPassthrough/recovery_wt_cache_full.js
@@ -4,94 +4,93 @@
* requires_majority_read_concern]
*/
(function() {
- 'use strict';
- load('jstests/libs/check_log.js');
+'use strict';
+load('jstests/libs/check_log.js');
- const rst = new ReplSetTest({
- nodes: [
- {
- slowms: 30000, // Don't log slow operations on primary.
- },
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
- // Do not specify storage engine in this node's options because this will
- // prevent us from overriding it on restart.
+const rst = new ReplSetTest({
+ nodes: [
+ {
+ slowms: 30000, // Don't log slow operations on primary.
+ },
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet({
- // Start with a larger storage engine cache size to allow the secondary to write
- // the oplog entries to disk. This setting will be adjusted downwards upon restart to
- // test recovery behavior as the cache fills up.
- // This overrides the --storageEngineCacheSideGB setting passed to resmoke.py but does not
- // affect the default cache size on restart.
- wiredTigerCacheSizeGB: 10,
- });
- rst.initiate();
+ // Do not specify storage engine in this node's options because this will
+ // prevent us from overriding it on restart.
+ },
+ ]
+});
+const nodes = rst.startSet({
+ // Start with a larger storage engine cache size to allow the secondary to write
+ // the oplog entries to disk. This setting will be adjusted downwards upon restart to
+ // test recovery behavior as the cache fills up.
+ // This overrides the --storageEngineCacheSideGB setting passed to resmoke.py but does not
+ // affect the default cache size on restart.
+ wiredTigerCacheSizeGB: 10,
+});
+rst.initiate();
- const primary = rst.getPrimary();
- const mydb = primary.getDB('test');
- const coll = mydb.getCollection('t');
+const primary = rst.getPrimary();
+const mydb = primary.getDB('test');
+const coll = mydb.getCollection('t');
- const numDocs = 2;
- const minDocSizeMB = 10;
+const numDocs = 2;
+const minDocSizeMB = 10;
- for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
- coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
- {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- }
- assert.eq(numDocs, coll.find().itcount());
+for (let i = 0; i < numDocs; ++i) {
+ assert.writeOK(
+ coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
+ {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+}
+assert.eq(numDocs, coll.find().itcount());
- let secondary = rst.getSecondary();
- const batchOpsLimit =
- assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
- .replBatchLimitOperations;
- jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' +
- batchOpsLimit + ' operations per batch.');
+let secondary = rst.getSecondary();
+const batchOpsLimit =
+ assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
+ .replBatchLimitOperations;
+jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' + batchOpsLimit +
+ ' operations per batch.');
- // Disable snapshotting on secondary so that further operations do not enter the majority
- // snapshot.
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
+// Disable snapshotting on secondary so that further operations do not enter the majority
+// snapshot.
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
- const numUpdates = 500;
- jsTestLog('Writing ' + numUpdates + ' updates to ' + numDocs +
- ' documents on secondary after disabling snapshots.');
- for (let i = 0; i < numDocs; ++i) {
- for (let j = 0; j < numUpdates; ++j) {
- assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
- }
+const numUpdates = 500;
+jsTestLog('Writing ' + numUpdates + ' updates to ' + numDocs +
+ ' documents on secondary after disabling snapshots.');
+for (let i = 0; i < numDocs; ++i) {
+ for (let j = 0; j < numUpdates; ++j) {
+ assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
}
+}
- jsTestLog('Waiting for updates on secondary ' + secondary.host +
- ' to be written to the oplog.');
- rst.awaitReplication();
+jsTestLog('Waiting for updates on secondary ' + secondary.host + ' to be written to the oplog.');
+rst.awaitReplication();
- secondary = rst.restart(1, {
- setParameter: {
- logComponentVerbosity: tojsononeline({storage: {recovery: 2}}),
- },
- // Constrain the storage engine cache size to make it easier to fill it up with unflushed
- // modification.
- wiredTigerCacheSizeGB: 1,
- });
+secondary = rst.restart(1, {
+ setParameter: {
+ logComponentVerbosity: tojsononeline({storage: {recovery: 2}}),
+ },
+ // Constrain the storage engine cache size to make it easier to fill it up with unflushed
+ // modification.
+ wiredTigerCacheSizeGB: 1,
+});
- // Verify storage engine cache size in effect during recovery.
- const actualCacheSizeGB = assert.commandWorked(secondary.adminCommand({getCmdLineOpts: 1}))
- .parsed.storage.wiredTiger.engineConfig.cacheSizeGB;
- jsTestLog('Secondary was restarted with a storage cache size of ' + actualCacheSizeGB + ' GB.');
- assert.eq(1, actualCacheSizeGB);
+// Verify storage engine cache size in effect during recovery.
+const actualCacheSizeGB = assert.commandWorked(secondary.adminCommand({getCmdLineOpts: 1}))
+ .parsed.storage.wiredTiger.engineConfig.cacheSizeGB;
+jsTestLog('Secondary was restarted with a storage cache size of ' + actualCacheSizeGB + ' GB.');
+assert.eq(1, actualCacheSizeGB);
- checkLog.contains(secondary, 'Starting recovery oplog application');
- jsTestLog('Applying updates on secondary ' + secondary.host + ' during recovery.');
+checkLog.contains(secondary, 'Starting recovery oplog application');
+jsTestLog('Applying updates on secondary ' + secondary.host + ' during recovery.');
- // This ensures that the node is able to complete recovery and transition to SECONDARY.
- rst.awaitReplication();
+// This ensures that the node is able to complete recovery and transition to SECONDARY.
+rst.awaitReplication();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/refresh_logical_session_cache_now.js b/jstests/noPassthrough/refresh_logical_session_cache_now.js
index f1c87d482ea..ac11c138c6f 100644
--- a/jstests/noPassthrough/refresh_logical_session_cache_now.js
+++ b/jstests/noPassthrough/refresh_logical_session_cache_now.js
@@ -1,49 +1,48 @@
(function() {
- "use script";
+"use script";
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
- var res;
- var refresh = {refreshLogicalSessionCacheNow: 1};
- var startSession = {startSession: 1};
+var res;
+var refresh = {refreshLogicalSessionCacheNow: 1};
+var startSession = {startSession: 1};
- // Start up a standalone server.
- var conn = MongoRunner.runMongod();
- var admin = conn.getDB("admin");
- var config = conn.getDB("config");
+// Start up a standalone server.
+var conn = MongoRunner.runMongod();
+var admin = conn.getDB("admin");
+var config = conn.getDB("config");
- // Trigger an initial refresh, as a sanity check.
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
+// Trigger an initial refresh, as a sanity check.
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
- // Start a session. Should not be in the collection yet.
- res = admin.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
+// Start a session. Should not be in the collection yet.
+res = admin.runCommand(startSession);
+assert.commandWorked(res, "unable to start session");
- assert.eq(config.system.sessions.count(), 0, "should not have session records yet");
+assert.eq(config.system.sessions.count(), 0, "should not have session records yet");
- // Trigger a refresh. Session should now be in the collection.
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
+// Trigger a refresh. Session should now be in the collection.
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
- assert.eq(config.system.sessions.count(), 1, "should have written session records");
+assert.eq(config.system.sessions.count(), 1, "should have written session records");
- // Start some new sessions. Should not be in the collection yet.
- var numSessions = 100;
- for (var i = 0; i < numSessions; i++) {
- res = admin.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
- }
+// Start some new sessions. Should not be in the collection yet.
+var numSessions = 100;
+for (var i = 0; i < numSessions; i++) {
+ res = admin.runCommand(startSession);
+ assert.commandWorked(res, "unable to start session");
+}
- assert.eq(config.system.sessions.count(), 1, "should not have more session records yet");
+assert.eq(config.system.sessions.count(), 1, "should not have more session records yet");
- // Trigger another refresh. All sessions should now be in the collection.
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
+// Trigger another refresh. All sessions should now be in the collection.
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
- assert.eq(
- config.system.sessions.count(), numSessions + 1, "should have written session records");
- MongoRunner.stopMongod(conn);
+assert.eq(config.system.sessions.count(), numSessions + 1, "should have written session records");
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/refresh_sessions_command.js b/jstests/noPassthrough/refresh_sessions_command.js
index 4386b61429e..a0a65fb4695 100644
--- a/jstests/noPassthrough/refresh_sessions_command.js
+++ b/jstests/noPassthrough/refresh_sessions_command.js
@@ -1,96 +1,93 @@
(function() {
- "use strict";
-
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
-
- var conn;
- var admin;
- var result;
- var startSession = {startSession: 1};
-
- // Run initial tests without auth.
- conn = MongoRunner.runMongod();
- admin = conn.getDB("admin");
-
- result = admin.runCommand(startSession);
- assert.commandWorked(result, "failed to startSession");
- var lsid = result.id;
-
- // Test that we can run refreshSessions unauthenticated if --auth is off.
- result = admin.runCommand({refreshSessions: [lsid]});
- assert.commandWorked(result, "could not run refreshSessions unauthenticated without --auth");
-
- // Test that we can run refreshSessions authenticated if --auth is off.
- admin.createUser(
- {user: 'admin', pwd: 'admin', roles: ['readAnyDatabase', 'userAdminAnyDatabase']});
- admin.auth("admin", "admin");
- result = admin.runCommand(startSession);
- var lsid2 = result.id;
- result = admin.runCommand({refreshSessions: [lsid2]});
- assert.commandWorked(result, "could not run refreshSessions logged in with --auth off");
-
- // Turn on auth for further testing.
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({auth: "", nojournal: "", setParameter: {maxSessions: 3}});
- admin = conn.getDB("admin");
-
- admin.createUser(
- {user: 'admin', pwd: 'admin', roles: ['readAnyDatabase', 'userAdminAnyDatabase']});
- admin.auth("admin", "admin");
-
- result = admin.runCommand({
- createRole: 'readSessionsCollection',
- privileges: [{resource: {db: 'config', collection: 'system.sessions'}, actions: ['find']}],
- roles: []
- });
- assert.commandWorked(result, "couldn't make readSessionsCollection role");
-
- admin.createUser(
- {user: 'readSessionsCollection', pwd: 'pwd', roles: ['readSessionsCollection']});
- admin.logout();
-
- // Test that we cannot run refreshSessions unauthenticated if --auth is on.
- result = admin.runCommand({refreshSessions: [lsid]});
- assert.commandFailed(result, "able to run refreshSessions without authenticating");
-
- // Test that we can run refreshSessions on our own sessions authenticated if --auth is on.
- admin.auth("admin", "admin");
- result = admin.runCommand(startSession);
- var lsid3 = result.id;
- result = admin.runCommand({refreshSessions: [lsid3]});
- assert.commandWorked(result, "unable to run refreshSessions while logged in");
-
- // Test that we can refresh "others'" sessions (new ones) when authenticated with --auth.
- result = admin.runCommand({refreshSessions: [lsid]});
- assert.commandWorked(result, "unable to refresh novel lsids");
-
- // Test that sending a mix of known and new sessions is fine
- result = admin.runCommand({refreshSessions: [lsid, lsid2, lsid3]});
- assert.commandWorked(result, "unable to refresh mix of known and unknown lsids");
-
- // Test that sending a set of sessions with duplicates is fine
- result = admin.runCommand({refreshSessions: [lsid, lsid, lsid, lsid]});
- assert.commandWorked(result, "unable to refresh with duplicate lsids in the set");
-
- // Test that we can run refreshSessions with an empty set of sessions.
- result = admin.runCommand({refreshSessions: []});
- assert.commandWorked(result, "unable to refresh empty set of lsids");
-
- // Test that we cannot run refreshSessions when the cache is full.
- var lsid4 = {"id": UUID()};
- result = admin.runCommand({refreshSessions: [lsid4]});
- assert.commandFailed(result, "able to run refreshSessions when the cache is full");
-
- // Test that once we force a refresh, all of these sessions are in the sessions collection.
- admin.logout();
- admin.auth("readSessionsCollection", "pwd");
- result = admin.runCommand({refreshLogicalSessionCacheNow: 1});
- assert.commandWorked(result, "could not force refresh");
-
- var config = conn.getDB("config");
- assert.eq(config.system.sessions.count(), 3, "should have refreshed all session records");
-
- MongoRunner.stopMongod(conn);
+"use strict";
+
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
+
+var conn;
+var admin;
+var result;
+var startSession = {startSession: 1};
+
+// Run initial tests without auth.
+conn = MongoRunner.runMongod();
+admin = conn.getDB("admin");
+
+result = admin.runCommand(startSession);
+assert.commandWorked(result, "failed to startSession");
+var lsid = result.id;
+
+// Test that we can run refreshSessions unauthenticated if --auth is off.
+result = admin.runCommand({refreshSessions: [lsid]});
+assert.commandWorked(result, "could not run refreshSessions unauthenticated without --auth");
+
+// Test that we can run refreshSessions authenticated if --auth is off.
+admin.createUser({user: 'admin', pwd: 'admin', roles: ['readAnyDatabase', 'userAdminAnyDatabase']});
+admin.auth("admin", "admin");
+result = admin.runCommand(startSession);
+var lsid2 = result.id;
+result = admin.runCommand({refreshSessions: [lsid2]});
+assert.commandWorked(result, "could not run refreshSessions logged in with --auth off");
+
+// Turn on auth for further testing.
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({auth: "", nojournal: "", setParameter: {maxSessions: 3}});
+admin = conn.getDB("admin");
+
+admin.createUser({user: 'admin', pwd: 'admin', roles: ['readAnyDatabase', 'userAdminAnyDatabase']});
+admin.auth("admin", "admin");
+
+result = admin.runCommand({
+ createRole: 'readSessionsCollection',
+ privileges: [{resource: {db: 'config', collection: 'system.sessions'}, actions: ['find']}],
+ roles: []
+});
+assert.commandWorked(result, "couldn't make readSessionsCollection role");
+
+admin.createUser({user: 'readSessionsCollection', pwd: 'pwd', roles: ['readSessionsCollection']});
+admin.logout();
+
+// Test that we cannot run refreshSessions unauthenticated if --auth is on.
+result = admin.runCommand({refreshSessions: [lsid]});
+assert.commandFailed(result, "able to run refreshSessions without authenticating");
+
+// Test that we can run refreshSessions on our own sessions authenticated if --auth is on.
+admin.auth("admin", "admin");
+result = admin.runCommand(startSession);
+var lsid3 = result.id;
+result = admin.runCommand({refreshSessions: [lsid3]});
+assert.commandWorked(result, "unable to run refreshSessions while logged in");
+
+// Test that we can refresh "others'" sessions (new ones) when authenticated with --auth.
+result = admin.runCommand({refreshSessions: [lsid]});
+assert.commandWorked(result, "unable to refresh novel lsids");
+
+// Test that sending a mix of known and new sessions is fine
+result = admin.runCommand({refreshSessions: [lsid, lsid2, lsid3]});
+assert.commandWorked(result, "unable to refresh mix of known and unknown lsids");
+
+// Test that sending a set of sessions with duplicates is fine
+result = admin.runCommand({refreshSessions: [lsid, lsid, lsid, lsid]});
+assert.commandWorked(result, "unable to refresh with duplicate lsids in the set");
+
+// Test that we can run refreshSessions with an empty set of sessions.
+result = admin.runCommand({refreshSessions: []});
+assert.commandWorked(result, "unable to refresh empty set of lsids");
+
+// Test that we cannot run refreshSessions when the cache is full.
+var lsid4 = {"id": UUID()};
+result = admin.runCommand({refreshSessions: [lsid4]});
+assert.commandFailed(result, "able to run refreshSessions when the cache is full");
+
+// Test that once we force a refresh, all of these sessions are in the sessions collection.
+admin.logout();
+admin.auth("readSessionsCollection", "pwd");
+result = admin.runCommand({refreshLogicalSessionCacheNow: 1});
+assert.commandWorked(result, "could not force refresh");
+
+var config = conn.getDB("config");
+assert.eq(config.system.sessions.count(), 3, "should have refreshed all session records");
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/reindex_crash_rebuilds_id_index.js b/jstests/noPassthrough/reindex_crash_rebuilds_id_index.js
index c0efd29ba42..b0fe7e7c4ac 100644
--- a/jstests/noPassthrough/reindex_crash_rebuilds_id_index.js
+++ b/jstests/noPassthrough/reindex_crash_rebuilds_id_index.js
@@ -9,44 +9,47 @@
*/
(function() {
- load("jstests/libs/get_index_helpers.js"); // For GetIndexHelpers.
-
- const baseName = 'reindex_crash_rebuilds_id_index';
- const collName = baseName;
- const dbpath = MongoRunner.dataPath + baseName + '/';
- resetDbpath(dbpath);
-
- const mongodOptions = {dbpath: dbpath, noCleanData: true};
- let conn = MongoRunner.runMongod(mongodOptions);
-
- let testDB = conn.getDB('test');
- let testColl = testDB.getCollection(collName);
-
- // Insert a single document and create the collection.
- testColl.insert({a: 1});
- let spec = GetIndexHelpers.findByKeyPattern(testColl.getIndexes(), {_id: 1});
- assert.neq(null, spec, "_id index not found");
- assert.eq("_id_", spec.name, tojson(spec));
-
- // Enable a failpoint that causes reIndex to crash after dropping the indexes but before
- // rebuilding them.
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'reIndexCrashAfterDrop', mode: 'alwaysOn'}));
- assert.throws(() => testColl.runCommand({reIndex: collName}));
-
- // The server should have crashed from the failpoint.
- MongoRunner.stopMongod(conn, null, {allowedExitCode: MongoRunner.EXIT_ABRUPT});
-
- // The server should start up successfully after rebuilding the _id index.
- conn = MongoRunner.runMongod(mongodOptions);
- testDB = conn.getDB('test');
- testColl = testDB.getCollection(collName);
- assert(testColl.exists());
-
- // The _id index should exist.
- spec = GetIndexHelpers.findByKeyPattern(testColl.getIndexes(), {_id: 1});
- assert.neq(null, spec, "_id index not found");
- assert.eq("_id_", spec.name, tojson(spec));
-
- MongoRunner.stopMongod(conn);
+load("jstests/libs/get_index_helpers.js"); // For GetIndexHelpers.
+
+const baseName = 'reindex_crash_rebuilds_id_index';
+const collName = baseName;
+const dbpath = MongoRunner.dataPath + baseName + '/';
+resetDbpath(dbpath);
+
+const mongodOptions = {
+ dbpath: dbpath,
+ noCleanData: true
+};
+let conn = MongoRunner.runMongod(mongodOptions);
+
+let testDB = conn.getDB('test');
+let testColl = testDB.getCollection(collName);
+
+// Insert a single document and create the collection.
+testColl.insert({a: 1});
+let spec = GetIndexHelpers.findByKeyPattern(testColl.getIndexes(), {_id: 1});
+assert.neq(null, spec, "_id index not found");
+assert.eq("_id_", spec.name, tojson(spec));
+
+// Enable a failpoint that causes reIndex to crash after dropping the indexes but before
+// rebuilding them.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'reIndexCrashAfterDrop', mode: 'alwaysOn'}));
+assert.throws(() => testColl.runCommand({reIndex: collName}));
+
+// The server should have crashed from the failpoint.
+MongoRunner.stopMongod(conn, null, {allowedExitCode: MongoRunner.EXIT_ABRUPT});
+
+// The server should start up successfully after rebuilding the _id index.
+conn = MongoRunner.runMongod(mongodOptions);
+testDB = conn.getDB('test');
+testColl = testDB.getCollection(collName);
+assert(testColl.exists());
+
+// The _id index should exist.
+spec = GetIndexHelpers.findByKeyPattern(testColl.getIndexes(), {_id: 1});
+assert.neq(null, spec, "_id index not found");
+assert.eq("_id_", spec.name, tojson(spec));
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/repair_flag_transport_layer.js b/jstests/noPassthrough/repair_flag_transport_layer.js
index 987b07cd8d5..4898da084b0 100644
--- a/jstests/noPassthrough/repair_flag_transport_layer.js
+++ b/jstests/noPassthrough/repair_flag_transport_layer.js
@@ -3,20 +3,18 @@
*/
(function() {
- "use strict";
- let dbpath = MongoRunner.dataPath + "repair_flag_transport_layer";
- resetDbpath(dbpath);
+"use strict";
+let dbpath = MongoRunner.dataPath + "repair_flag_transport_layer";
+resetDbpath(dbpath);
- function runTest(conn) {
- let returnCode =
- runNonMongoProgram("mongod", "--port", conn.port, "--repair", "--dbpath", dbpath);
- assert.eq(
- returnCode, 0, "expected mongod --repair to execute successfully regardless of port");
- }
+function runTest(conn) {
+ let returnCode =
+ runNonMongoProgram("mongod", "--port", conn.port, "--repair", "--dbpath", dbpath);
+ assert.eq(returnCode, 0, "expected mongod --repair to execute successfully regardless of port");
+}
- let conn = MongoRunner.runMongod();
-
- runTest(conn);
- MongoRunner.stopMongod(conn);
+let conn = MongoRunner.runMongod();
+runTest(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/repl_set_resize_oplog.js b/jstests/noPassthrough/repl_set_resize_oplog.js
index 23682467f9d..0720746e732 100644
--- a/jstests/noPassthrough/repl_set_resize_oplog.js
+++ b/jstests/noPassthrough/repl_set_resize_oplog.js
@@ -4,44 +4,42 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- "use strict";
+"use strict";
- let replSet = new ReplSetTest({nodes: 2, oplogSize: 50});
- replSet.startSet();
- replSet.initiate();
+let replSet = new ReplSetTest({nodes: 2, oplogSize: 50});
+replSet.startSet();
+replSet.initiate();
- let primary = replSet.getPrimary();
+let primary = replSet.getPrimary();
- const MB = 1024 * 1024;
- const GB = 1024 * MB;
- const PB = 1024 * GB;
- const EB = 1024 * PB;
+const MB = 1024 * 1024;
+const GB = 1024 * MB;
+const PB = 1024 * GB;
+const EB = 1024 * PB;
- assert.eq(primary.getDB('local').oplog.rs.stats().maxSize, 50 * MB);
+assert.eq(primary.getDB('local').oplog.rs.stats().maxSize, 50 * MB);
- // Too small: 990MB
- assert.commandFailedWithCode(
- primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: 900}),
- ErrorCodes.InvalidOptions,
- "Expected replSetResizeOplog to fail because the size was too small");
+// Too small: 990MB
+assert.commandFailedWithCode(primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: 900}),
+ ErrorCodes.InvalidOptions,
+ "Expected replSetResizeOplog to fail because the size was too small");
- // Way too small: -1GB
- assert.commandFailedWithCode(
- primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: -1 * GB / MB}),
- ErrorCodes.InvalidOptions,
- "Expected replSetResizeOplog to fail because the size was too small");
+// Way too small: -1GB
+assert.commandFailedWithCode(
+ primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: -1 * GB / MB}),
+ ErrorCodes.InvalidOptions,
+ "Expected replSetResizeOplog to fail because the size was too small");
- // Too big: 8EB
- assert.commandFailedWithCode(
- primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: 8 * EB / MB}),
- ErrorCodes.InvalidOptions,
- "Expected replSetResizeOplog to fail because the size was too big");
+// Too big: 8EB
+assert.commandFailedWithCode(
+ primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: 8 * EB / MB}),
+ ErrorCodes.InvalidOptions,
+ "Expected replSetResizeOplog to fail because the size was too big");
- // The maximum: 1PB
- assert.commandWorked(
- primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: 1 * PB / MB}));
+// The maximum: 1PB
+assert.commandWorked(primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: 1 * PB / MB}));
- assert.eq(primary.getDB('local').oplog.rs.stats().maxSize, 1 * PB);
+assert.eq(primary.getDB('local').oplog.rs.stats().maxSize, 1 * PB);
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/repl_write_threads_start_param.js b/jstests/noPassthrough/repl_write_threads_start_param.js
index e18d8de1259..f80f0f81655 100644
--- a/jstests/noPassthrough/repl_write_threads_start_param.js
+++ b/jstests/noPassthrough/repl_write_threads_start_param.js
@@ -5,37 +5,36 @@
// 4) cannot be altered at run time
(function() {
- "use strict";
+"use strict";
- // too low a count
- clearRawMongoProgramOutput();
- var mongo = MongoRunner.runMongod({setParameter: 'replWriterThreadCount=0'});
- assert.soon(function() {
- return rawMongoProgramOutput().match(
- "Invalid value for parameter replWriterThreadCount: 0 is not greater than or equal to 1");
- }, "mongod started with too low a value for replWriterThreadCount");
+// too low a count
+clearRawMongoProgramOutput();
+var mongo = MongoRunner.runMongod({setParameter: 'replWriterThreadCount=0'});
+assert.soon(function() {
+ return rawMongoProgramOutput().match(
+ "Invalid value for parameter replWriterThreadCount: 0 is not greater than or equal to 1");
+}, "mongod started with too low a value for replWriterThreadCount");
- // too high a count
- clearRawMongoProgramOutput();
- mongo = MongoRunner.runMongod({setParameter: 'replWriterThreadCount=257'});
- assert.soon(function() {
- return rawMongoProgramOutput().match(
- "Invalid value for parameter replWriterThreadCount: 257 is not less than or equal to 256");
- }, "mongod started with too high a value for replWriterThreadCount");
+// too high a count
+clearRawMongoProgramOutput();
+mongo = MongoRunner.runMongod({setParameter: 'replWriterThreadCount=257'});
+assert.soon(function() {
+ return rawMongoProgramOutput().match(
+ "Invalid value for parameter replWriterThreadCount: 257 is not less than or equal to 256");
+}, "mongod started with too high a value for replWriterThreadCount");
- // proper count
- clearRawMongoProgramOutput();
- mongo = MongoRunner.runMongod({setParameter: 'replWriterThreadCount=24'});
- assert.neq(null, mongo, "mongod failed to start with a suitable replWriterThreadCount value");
- assert(!rawMongoProgramOutput().match("Invalid value for parameter replWriterThreadCount"),
- "despite accepting the replWriterThreadCount value, mongod logged an error");
+// proper count
+clearRawMongoProgramOutput();
+mongo = MongoRunner.runMongod({setParameter: 'replWriterThreadCount=24'});
+assert.neq(null, mongo, "mongod failed to start with a suitable replWriterThreadCount value");
+assert(!rawMongoProgramOutput().match("Invalid value for parameter replWriterThreadCount"),
+ "despite accepting the replWriterThreadCount value, mongod logged an error");
- // getParameter to confirm the value was set
- var result = mongo.getDB("admin").runCommand({getParameter: 1, replWriterThreadCount: 1});
- assert.eq(24, result.replWriterThreadCount, "replWriterThreadCount was not set internally");
+// getParameter to confirm the value was set
+var result = mongo.getDB("admin").runCommand({getParameter: 1, replWriterThreadCount: 1});
+assert.eq(24, result.replWriterThreadCount, "replWriterThreadCount was not set internally");
- // setParameter to ensure it is not possible
- assert.commandFailed(
- mongo.getDB("admin").runCommand({setParameter: 1, replWriterThreadCount: 1}));
- MongoRunner.stopMongod(mongo);
+// setParameter to ensure it is not possible
+assert.commandFailed(mongo.getDB("admin").runCommand({setParameter: 1, replWriterThreadCount: 1}));
+MongoRunner.stopMongod(mongo);
}());
diff --git a/jstests/noPassthrough/replica_set_connection_error_codes.js b/jstests/noPassthrough/replica_set_connection_error_codes.js
index 7deebdfcc27..d431415ee6d 100644
--- a/jstests/noPassthrough/replica_set_connection_error_codes.js
+++ b/jstests/noPassthrough/replica_set_connection_error_codes.js
@@ -4,81 +4,80 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
-
- // Set the refresh period to 10 min to rule out races
- _setShellFailPoint({
- configureFailPoint: "modifyReplicaSetMonitorDefaultRefreshPeriod",
- mode: "alwaysOn",
- data: {
- period: 10 * 60,
- },
- });
-
- const rst = new ReplSetTest({
- nodes: 3,
- nodeOptions: {
- setParameter:
- {"failpoint.respondWithNotPrimaryInCommandDispatch": tojson({mode: "alwaysOn"})}
- }
- });
- rst.startSet();
- rst.initiate();
-
- const directConn = rst.getPrimary();
- const rsConn = new Mongo(rst.getURL());
- assert(rsConn.isReplicaSetConnection(),
- "expected " + rsConn.host + " to be a replica set connection string");
-
- const[secondary1, secondary2] = rst.getSecondaries();
-
- function stepDownPrimary(rst) {
- const awaitShell = startParallelShell(
- () => assert.commandWorked(db.adminCommand({replSetStepDown: 60, force: true})),
- directConn.port);
-
- // We wait for the primary to transition to the SECONDARY state to ensure we're waiting
- // until after the parallel shell has started the replSetStepDown command and the server is
- // paused at the failpoint.Do not attempt to reconnect to the node, since the node will be
- // holding the global X lock at the failpoint.
- const reconnectNode = false;
- rst.waitForState(directConn, ReplSetTest.State.SECONDARY, null, reconnectNode);
-
- return awaitShell;
+"use strict";
+
+// Set the refresh period to 10 min to rule out races
+_setShellFailPoint({
+ configureFailPoint: "modifyReplicaSetMonitorDefaultRefreshPeriod",
+ mode: "alwaysOn",
+ data: {
+ period: 10 * 60,
+ },
+});
+
+const rst = new ReplSetTest({
+ nodes: 3,
+ nodeOptions: {
+ setParameter:
+ {"failpoint.respondWithNotPrimaryInCommandDispatch": tojson({mode: "alwaysOn"})}
}
-
- const failpoint = "stepdownHangBeforePerformingPostMemberStateUpdateActions";
- assert.commandWorked(
- directConn.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
-
- const awaitShell = stepDownPrimary(rst);
-
- // Wait for a new primary to be elected and agreed upon by nodes.
- rst.getPrimary();
- rst.awaitNodesAgreeOnPrimary();
-
- // DBClientRS will continue to send command requests to the node it believed to be primary even
- // after it stepped down so long as it hasn't closed its connection.
- assert.commandFailedWithCode(rsConn.getDB("test").runCommand({create: "mycoll"}),
- ErrorCodes.NotMaster);
-
- // However, once the server responds back with a ErrorCodes.NotMaster error, DBClientRS will
- // cause the ReplicaSetMonitor to attempt to discover the current primary.
- assert.commandWorked(rsConn.getDB("test").runCommand({create: "mycoll"}));
-
- try {
- assert.commandWorked(directConn.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- } catch (e) {
- if (!isNetworkError(e)) {
- throw e;
- }
-
- // We ignore network errors because it's possible that depending on how quickly the server
- // closes connections that the connection would get closed before the server has a chance to
- // respond to the configureFailPoint command with ok=1.
+});
+rst.startSet();
+rst.initiate();
+
+const directConn = rst.getPrimary();
+const rsConn = new Mongo(rst.getURL());
+assert(rsConn.isReplicaSetConnection(),
+ "expected " + rsConn.host + " to be a replica set connection string");
+
+const [secondary1, secondary2] = rst.getSecondaries();
+
+function stepDownPrimary(rst) {
+ const awaitShell = startParallelShell(
+ () => assert.commandWorked(db.adminCommand({replSetStepDown: 60, force: true})),
+ directConn.port);
+
+ // We wait for the primary to transition to the SECONDARY state to ensure we're waiting
+ // until after the parallel shell has started the replSetStepDown command and the server is
+ // paused at the failpoint.Do not attempt to reconnect to the node, since the node will be
+ // holding the global X lock at the failpoint.
+ const reconnectNode = false;
+ rst.waitForState(directConn, ReplSetTest.State.SECONDARY, null, reconnectNode);
+
+ return awaitShell;
+}
+
+const failpoint = "stepdownHangBeforePerformingPostMemberStateUpdateActions";
+assert.commandWorked(directConn.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
+
+const awaitShell = stepDownPrimary(rst);
+
+// Wait for a new primary to be elected and agreed upon by nodes.
+rst.getPrimary();
+rst.awaitNodesAgreeOnPrimary();
+
+// DBClientRS will continue to send command requests to the node it believed to be primary even
+// after it stepped down so long as it hasn't closed its connection.
+assert.commandFailedWithCode(rsConn.getDB("test").runCommand({create: "mycoll"}),
+ ErrorCodes.NotMaster);
+
+// However, once the server responds back with a ErrorCodes.NotMaster error, DBClientRS will
+// cause the ReplicaSetMonitor to attempt to discover the current primary.
+assert.commandWorked(rsConn.getDB("test").runCommand({create: "mycoll"}));
+
+try {
+ assert.commandWorked(directConn.adminCommand({configureFailPoint: failpoint, mode: "off"}));
+} catch (e) {
+ if (!isNetworkError(e)) {
+ throw e;
}
- awaitShell();
+ // We ignore network errors because it's possible that depending on how quickly the server
+ // closes connections that the connection would get closed before the server has a chance to
+ // respond to the configureFailPoint command with ok=1.
+}
+
+awaitShell();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/replica_set_connection_getmore.js b/jstests/noPassthrough/replica_set_connection_getmore.js
index acc1d7e31c3..e7167fbd5eb 100644
--- a/jstests/noPassthrough/replica_set_connection_getmore.js
+++ b/jstests/noPassthrough/replica_set_connection_getmore.js
@@ -4,44 +4,44 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
-
- const dbName = "test";
- const collName = "getmore";
-
- // We create our own replica set connection because 'rst.nodes' is an array of direct
- // connections to each individual node.
- var conn = new Mongo(rst.getURL());
-
- // We force a read mode of "compatibility" so that we can test Mongo.prototype.readMode()
- // resolves to "commands" independently of the --readMode passed to the mongo shell running this
- // test.
- conn.forceReadMode("compatibility");
- assert.eq("commands",
- conn.readMode(),
- "replica set connections created by the mongo shell should use 'commands' read mode");
- var coll = conn.getDB(dbName)[collName];
- coll.drop();
-
- // Insert several document so that we can use a cursor to fetch them in multiple batches.
- var res = coll.insert([{}, {}, {}, {}, {}]);
- assert.writeOK(res);
- assert.eq(5, res.nInserted);
-
- // Wait for the secondary to catch up because we're going to try and do reads from it.
- rst.awaitReplication();
-
- // Establish a cursor on the secondary and verify that the getMore operations are routed to it.
- var cursor = coll.find().readPref("secondary").batchSize(2);
- assert.eq(5, cursor.itcount(), "failed to read the documents from the secondary");
-
- // Verify that queries work when the read mode is forced to "legacy" reads.
- conn.forceReadMode("legacy");
- var cursor = coll.find().readPref("secondary").batchSize(2);
- assert.eq(5, cursor.itcount(), "failed to read the documents from the secondary");
-
- rst.stopSet();
+"use strict";
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+
+const dbName = "test";
+const collName = "getmore";
+
+// We create our own replica set connection because 'rst.nodes' is an array of direct
+// connections to each individual node.
+var conn = new Mongo(rst.getURL());
+
+// We force a read mode of "compatibility" so that we can test Mongo.prototype.readMode()
+// resolves to "commands" independently of the --readMode passed to the mongo shell running this
+// test.
+conn.forceReadMode("compatibility");
+assert.eq("commands",
+ conn.readMode(),
+ "replica set connections created by the mongo shell should use 'commands' read mode");
+var coll = conn.getDB(dbName)[collName];
+coll.drop();
+
+// Insert several document so that we can use a cursor to fetch them in multiple batches.
+var res = coll.insert([{}, {}, {}, {}, {}]);
+assert.writeOK(res);
+assert.eq(5, res.nInserted);
+
+// Wait for the secondary to catch up because we're going to try and do reads from it.
+rst.awaitReplication();
+
+// Establish a cursor on the secondary and verify that the getMore operations are routed to it.
+var cursor = coll.find().readPref("secondary").batchSize(2);
+assert.eq(5, cursor.itcount(), "failed to read the documents from the secondary");
+
+// Verify that queries work when the read mode is forced to "legacy" reads.
+conn.forceReadMode("legacy");
+var cursor = coll.find().readPref("secondary").batchSize(2);
+assert.eq(5, cursor.itcount(), "failed to read the documents from the secondary");
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/replica_set_connection_stepdown.js b/jstests/noPassthrough/replica_set_connection_stepdown.js
index ab11d72d465..15fee060876 100644
--- a/jstests/noPassthrough/replica_set_connection_stepdown.js
+++ b/jstests/noPassthrough/replica_set_connection_stepdown.js
@@ -4,67 +4,66 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const directConn = rst.getPrimary();
- const rsConn = new Mongo(rst.getURL());
- assert(rsConn.isReplicaSetConnection(),
- "expected " + rsConn.host + " to be a replica set connection string");
+const directConn = rst.getPrimary();
+const rsConn = new Mongo(rst.getURL());
+assert(rsConn.isReplicaSetConnection(),
+ "expected " + rsConn.host + " to be a replica set connection string");
- function stepDownPrimary(rst) {
- const awaitShell = startParallelShell(
- () => assert.commandWorked(db.adminCommand({replSetStepDown: 60, force: true})),
- directConn.port);
+function stepDownPrimary(rst) {
+ const awaitShell = startParallelShell(
+ () => assert.commandWorked(db.adminCommand({replSetStepDown: 60, force: true})),
+ directConn.port);
- // We wait for the primary to transition to the SECONDARY state to ensure we're waiting
- // until after the parallel shell has started the replSetStepDown command and the server is
- // paused at the failpoint. Do not attempt to reconnect to the node, since the node will be
- // holding the global X lock at the failpoint.
- const reconnectNode = false;
- rst.waitForState(directConn, ReplSetTest.State.SECONDARY, null, reconnectNode);
+ // We wait for the primary to transition to the SECONDARY state to ensure we're waiting
+ // until after the parallel shell has started the replSetStepDown command and the server is
+ // paused at the failpoint. Do not attempt to reconnect to the node, since the node will be
+ // holding the global X lock at the failpoint.
+ const reconnectNode = false;
+ rst.waitForState(directConn, ReplSetTest.State.SECONDARY, null, reconnectNode);
- return awaitShell;
- }
-
- const failpoint = "stepdownHangBeforePerformingPostMemberStateUpdateActions";
- assert.commandWorked(
- directConn.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
+ return awaitShell;
+}
- const awaitShell = stepDownPrimary(rst);
+const failpoint = "stepdownHangBeforePerformingPostMemberStateUpdateActions";
+assert.commandWorked(directConn.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
- const error = assert.throws(function() {
- // DBClientRS will continue to send command requests to the node it believed to be primary
- // even after it stepped down so long as it hasn't closed its connection. But this may also
- // throw if the ReplicaSetMonitor's backgroud refresh has already noticed that this node is
- // no longer primary.
- assert.commandFailedWithCode(rsConn.getDB("test").runCommand({find: "mycoll"}),
- ErrorCodes.NotMasterNoSlaveOk);
+const awaitShell = stepDownPrimary(rst);
- // However, once the server responds back with a "not master" error, DBClientRS will cause
- // the ReplicaSetMonitor to attempt to discover the current primary, which will cause this
- // to definitely throw.
- rsConn.getDB("test").runCommand({find: "mycoll"});
- });
- assert(/Could not find host/.test(error.toString()),
- "find command failed for a reason other than being unable to discover a new primary: " +
- tojson(error));
+const error = assert.throws(function() {
+ // DBClientRS will continue to send command requests to the node it believed to be primary
+ // even after it stepped down so long as it hasn't closed its connection. But this may also
+ // throw if the ReplicaSetMonitor's backgroud refresh has already noticed that this node is
+ // no longer primary.
+ assert.commandFailedWithCode(rsConn.getDB("test").runCommand({find: "mycoll"}),
+ ErrorCodes.NotMasterNoSlaveOk);
- try {
- assert.commandWorked(directConn.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- } catch (e) {
- if (!isNetworkError(e)) {
- throw e;
- }
+ // However, once the server responds back with a "not master" error, DBClientRS will cause
+ // the ReplicaSetMonitor to attempt to discover the current primary, which will cause this
+ // to definitely throw.
+ rsConn.getDB("test").runCommand({find: "mycoll"});
+});
+assert(/Could not find host/.test(error.toString()),
+ "find command failed for a reason other than being unable to discover a new primary: " +
+ tojson(error));
- // We ignore network errors because it's possible that depending on how quickly the server
- // closes connections that the connection would get closed before the server has a chance to
- // respond to the configureFailPoint command with ok=1.
+try {
+ assert.commandWorked(directConn.adminCommand({configureFailPoint: failpoint, mode: "off"}));
+} catch (e) {
+ if (!isNetworkError(e)) {
+ throw e;
}
- awaitShell();
- rst.stopSet();
+ // We ignore network errors because it's possible that depending on how quickly the server
+ // closes connections that the connection would get closed before the server has a chance to
+ // respond to the configureFailPoint command with ok=1.
+}
+
+awaitShell();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/report_post_batch_resume_token_mongod.js b/jstests/noPassthrough/report_post_batch_resume_token_mongod.js
index cf7dd55b1d0..389151169a6 100644
--- a/jstests/noPassthrough/report_post_batch_resume_token_mongod.js
+++ b/jstests/noPassthrough/report_post_batch_resume_token_mongod.js
@@ -3,112 +3,112 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
-
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
-
- // Create a new single-node replica set, and ensure that it can support $changeStream.
- const rst = new ReplSetTest({nodes: 1});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
- rst.initiate();
-
- const db = rst.getPrimary().getDB(jsTestName());
- const collName = "report_post_batch_resume_token";
- const testCollection = assertDropAndRecreateCollection(db, collName);
- const otherCollection = assertDropAndRecreateCollection(db, "unrelated_" + collName);
- const adminDB = db.getSiblingDB("admin");
-
- let docId = 0; // Tracks _id of documents inserted to ensure that we do not duplicate.
- const batchSize = 2;
-
- // Start watching the test collection in order to capture a resume token.
- let csCursor = testCollection.watch();
-
- // Write some documents to the test collection and get the resume token from the first doc.
- for (let i = 0; i < 5; ++i) {
- assert.commandWorked(testCollection.insert({_id: docId++}));
- }
- const resumeTokenFromDoc = csCursor.next()._id;
- csCursor.close();
-
- // Test that postBatchResumeToken is present on a non-empty initial aggregate batch.
- assert.soon(() => {
- csCursor = testCollection.watch([], {resumeAfter: resumeTokenFromDoc});
- csCursor.close(); // We don't need any results after the initial batch.
- return csCursor.objsLeftInBatch();
- });
- while (csCursor.objsLeftInBatch()) {
- csCursor.next();
- }
- let initialAggPBRT = csCursor.getResumeToken();
- assert.neq(undefined, initialAggPBRT);
-
- // Test that the PBRT is correctly updated when reading events from within a transaction.
- const session = db.getMongo().startSession();
- const sessionDB = session.getDatabase(db.getName());
-
- const sessionColl = sessionDB[testCollection.getName()];
- const sessionOtherColl = sessionDB[otherCollection.getName()];
- session.startTransaction();
-
- // Open a stream of batchSize:2 and grab the PBRT of the initial batch.
- csCursor = testCollection.watch([], {cursor: {batchSize: batchSize}});
- initialAggPBRT = csCursor.getResumeToken();
- assert.eq(csCursor.objsLeftInBatch(), 0);
-
- // Write 3 documents to testCollection and 1 to the unrelated collection within the transaction.
- for (let i = 0; i < 3; ++i) {
- assert.commandWorked(sessionColl.insert({_id: docId++}));
- }
- assert.commandWorked(sessionOtherColl.insert({}));
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
-
- // Grab the next 2 events, which should be the first 2 events in the transaction.
- assert(csCursor.hasNext()); // Causes a getMore to be dispatched.
- assert.eq(csCursor.objsLeftInBatch(), 2);
-
- // The clusterTime should be the same on each, but the resume token keeps advancing.
- const txnEvent1 = csCursor.next(), txnEvent2 = csCursor.next();
- const txnClusterTime = txnEvent1.clusterTime;
- assert.eq(txnEvent2.clusterTime, txnClusterTime);
- assert.gt(bsonWoCompare(txnEvent1._id, initialAggPBRT), 0);
- assert.gt(bsonWoCompare(txnEvent2._id, txnEvent1._id), 0);
-
- // The PBRT of the first transaction batch is equal to the last document's resumeToken.
- let getMorePBRT = csCursor.getResumeToken();
- assert.eq(bsonWoCompare(getMorePBRT, txnEvent2._id), 0);
-
- // Save this PBRT so that we can test resuming from it later on.
- const resumePBRT = getMorePBRT;
-
- // Now get the next batch. This contains the third of the four transaction operations.
- let previousGetMorePBRT = getMorePBRT;
- assert(csCursor.hasNext()); // Causes a getMore to be dispatched.
- assert.eq(csCursor.objsLeftInBatch(), 1);
-
- // The clusterTime of this event is the same as the two events from the previous batch, but its
- // resume token is greater than the previous PBRT.
- const txnEvent3 = csCursor.next();
- assert.eq(txnEvent3.clusterTime, txnClusterTime);
- assert.gt(bsonWoCompare(txnEvent3._id, previousGetMorePBRT), 0);
-
- // Because we wrote to the unrelated collection, the final event in the transaction does not
- // appear in the batch. But in this case it also does not allow our PBRT to advance beyond the
- // last event in the batch, because the unrelated event is within the same transaction and
- // therefore has the same clusterTime.
- getMorePBRT = csCursor.getResumeToken();
- assert.eq(bsonWoCompare(getMorePBRT, txnEvent3._id), 0);
-
- // Confirm that resuming from the PBRT of the first batch gives us the third transaction write.
- csCursor = testCollection.watch([], {resumeAfter: resumePBRT});
- assert.docEq(csCursor.next(), txnEvent3);
- assert(!csCursor.hasNext());
+"use strict";
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+
+// Create a new single-node replica set, and ensure that it can support $changeStream.
+const rst = new ReplSetTest({nodes: 1});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
rst.stopSet();
+ return;
+}
+rst.initiate();
+
+const db = rst.getPrimary().getDB(jsTestName());
+const collName = "report_post_batch_resume_token";
+const testCollection = assertDropAndRecreateCollection(db, collName);
+const otherCollection = assertDropAndRecreateCollection(db, "unrelated_" + collName);
+const adminDB = db.getSiblingDB("admin");
+
+let docId = 0; // Tracks _id of documents inserted to ensure that we do not duplicate.
+const batchSize = 2;
+
+// Start watching the test collection in order to capture a resume token.
+let csCursor = testCollection.watch();
+
+// Write some documents to the test collection and get the resume token from the first doc.
+for (let i = 0; i < 5; ++i) {
+ assert.commandWorked(testCollection.insert({_id: docId++}));
+}
+const resumeTokenFromDoc = csCursor.next()._id;
+csCursor.close();
+
+// Test that postBatchResumeToken is present on a non-empty initial aggregate batch.
+assert.soon(() => {
+ csCursor = testCollection.watch([], {resumeAfter: resumeTokenFromDoc});
+ csCursor.close(); // We don't need any results after the initial batch.
+ return csCursor.objsLeftInBatch();
+});
+while (csCursor.objsLeftInBatch()) {
+ csCursor.next();
+}
+let initialAggPBRT = csCursor.getResumeToken();
+assert.neq(undefined, initialAggPBRT);
+
+// Test that the PBRT is correctly updated when reading events from within a transaction.
+const session = db.getMongo().startSession();
+const sessionDB = session.getDatabase(db.getName());
+
+const sessionColl = sessionDB[testCollection.getName()];
+const sessionOtherColl = sessionDB[otherCollection.getName()];
+session.startTransaction();
+
+// Open a stream of batchSize:2 and grab the PBRT of the initial batch.
+csCursor = testCollection.watch([], {cursor: {batchSize: batchSize}});
+initialAggPBRT = csCursor.getResumeToken();
+assert.eq(csCursor.objsLeftInBatch(), 0);
+
+// Write 3 documents to testCollection and 1 to the unrelated collection within the transaction.
+for (let i = 0; i < 3; ++i) {
+ assert.commandWorked(sessionColl.insert({_id: docId++}));
+}
+assert.commandWorked(sessionOtherColl.insert({}));
+assert.commandWorked(session.commitTransaction_forTesting());
+session.endSession();
+
+// Grab the next 2 events, which should be the first 2 events in the transaction.
+assert(csCursor.hasNext()); // Causes a getMore to be dispatched.
+assert.eq(csCursor.objsLeftInBatch(), 2);
+
+// The clusterTime should be the same on each, but the resume token keeps advancing.
+const txnEvent1 = csCursor.next(), txnEvent2 = csCursor.next();
+const txnClusterTime = txnEvent1.clusterTime;
+assert.eq(txnEvent2.clusterTime, txnClusterTime);
+assert.gt(bsonWoCompare(txnEvent1._id, initialAggPBRT), 0);
+assert.gt(bsonWoCompare(txnEvent2._id, txnEvent1._id), 0);
+
+// The PBRT of the first transaction batch is equal to the last document's resumeToken.
+let getMorePBRT = csCursor.getResumeToken();
+assert.eq(bsonWoCompare(getMorePBRT, txnEvent2._id), 0);
+
+// Save this PBRT so that we can test resuming from it later on.
+const resumePBRT = getMorePBRT;
+
+// Now get the next batch. This contains the third of the four transaction operations.
+let previousGetMorePBRT = getMorePBRT;
+assert(csCursor.hasNext()); // Causes a getMore to be dispatched.
+assert.eq(csCursor.objsLeftInBatch(), 1);
+
+// The clusterTime of this event is the same as the two events from the previous batch, but its
+// resume token is greater than the previous PBRT.
+const txnEvent3 = csCursor.next();
+assert.eq(txnEvent3.clusterTime, txnClusterTime);
+assert.gt(bsonWoCompare(txnEvent3._id, previousGetMorePBRT), 0);
+
+// Because we wrote to the unrelated collection, the final event in the transaction does not
+// appear in the batch. But in this case it also does not allow our PBRT to advance beyond the
+// last event in the batch, because the unrelated event is within the same transaction and
+// therefore has the same clusterTime.
+getMorePBRT = csCursor.getResumeToken();
+assert.eq(bsonWoCompare(getMorePBRT, txnEvent3._id), 0);
+
+// Confirm that resuming from the PBRT of the first batch gives us the third transaction write.
+csCursor = testCollection.watch([], {resumeAfter: resumePBRT});
+assert.docEq(csCursor.next(), txnEvent3);
+assert(!csCursor.hasNext());
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/restart_catalog_preserves_min_visible.js b/jstests/noPassthrough/restart_catalog_preserves_min_visible.js
index 4020873089d..18127ce27d1 100644
--- a/jstests/noPassthrough/restart_catalog_preserves_min_visible.js
+++ b/jstests/noPassthrough/restart_catalog_preserves_min_visible.js
@@ -12,33 +12,34 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- let replSet = new ReplSetTest({name: "server35317", nodes: 1});
- replSet.startSet();
- replSet.initiate();
+let replSet = new ReplSetTest({name: "server35317", nodes: 1});
+replSet.startSet();
+replSet.initiate();
- let prim = replSet.getPrimary();
- let beforeIndexBuild = assert.commandWorked(prim.adminCommand(
- {configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
- mode: "alwaysOn"}))["operationTime"];
- assert.commandWorked(prim.getDB("test").coll.insert({c: 1}));
- assert.commandWorked(prim.getDB("test").coll.createIndex({c: 1}));
- assert.commandWorked(prim.adminCommand({restartCatalog: 1}));
+let prim = replSet.getPrimary();
+let beforeIndexBuild = assert.commandWorked(prim.adminCommand({
+ configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
+ mode: "alwaysOn"
+}))["operationTime"];
+assert.commandWorked(prim.getDB("test").coll.insert({c: 1}));
+assert.commandWorked(prim.getDB("test").coll.createIndex({c: 1}));
+assert.commandWorked(prim.adminCommand({restartCatalog: 1}));
- let session = prim.startSession({causalConsistency: false});
- let sessionDb = session.getDatabase("test");
- // Prior to fixing SERVER-35317, this would crash a debug build, or return success on a
- // non-debug build. Now it should return an error. Specifically, this fails because we're
- // trying to read behind the minimum visible snapshot timestamp for the `test.coll`
- // collection.
- assert.commandFailed(sessionDb.runCommand({
- find: "coll",
- filter: {c: 1},
- readConcern: {level: "snapshot", atClusterTime: beforeIndexBuild},
- txnNumber: NumberLong(0)
- }));
+let session = prim.startSession({causalConsistency: false});
+let sessionDb = session.getDatabase("test");
+// Prior to fixing SERVER-35317, this would crash a debug build, or return success on a
+// non-debug build. Now it should return an error. Specifically, this fails because we're
+// trying to read behind the minimum visible snapshot timestamp for the `test.coll`
+// collection.
+assert.commandFailed(sessionDb.runCommand({
+ find: "coll",
+ filter: {c: 1},
+ readConcern: {level: "snapshot", atClusterTime: beforeIndexBuild},
+ txnNumber: NumberLong(0)
+}));
- session.endSession();
- replSet.stopSet();
+session.endSession();
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/restart_catalog_sharded_cluster.js b/jstests/noPassthrough/restart_catalog_sharded_cluster.js
index 696d62c2af8..782fa9aa913 100644
--- a/jstests/noPassthrough/restart_catalog_sharded_cluster.js
+++ b/jstests/noPassthrough/restart_catalog_sharded_cluster.js
@@ -3,211 +3,213 @@
* @tags: [requires_replication, requires_sharding, requires_majority_read_concern]
*/
(function() {
- "use strict";
-
- // Only run this test if the storage engine is "wiredTiger" or "inMemory".
- const acceptedStorageEngines = ["wiredTiger", "inMemory"];
- const currentStorageEngine = jsTest.options().storageEngine || "wiredTiger";
- if (!acceptedStorageEngines.includes(currentStorageEngine)) {
- jsTest.log("Refusing to run restartCatalog test on " + currentStorageEngine +
- " storage engine");
- return;
+"use strict";
+
+// Only run this test if the storage engine is "wiredTiger" or "inMemory".
+const acceptedStorageEngines = ["wiredTiger", "inMemory"];
+const currentStorageEngine = jsTest.options().storageEngine || "wiredTiger";
+if (!acceptedStorageEngines.includes(currentStorageEngine)) {
+ jsTest.log("Refusing to run restartCatalog test on " + currentStorageEngine +
+ " storage engine");
+ return;
+}
+
+// Helper function for sorting documents in JavaScript.
+function sortOn(fieldName) {
+ return (doc1, doc2) => {
+ return bsonWoCompare({_: doc1[fieldName]}, {_: doc2[fieldName]});
+ };
+}
+
+const st = new ShardingTest({
+ name: "restart_catalog_sharded_cluster",
+ mongos: 1,
+ config: 1,
+ shards: {
+ rs: true,
+ rs0: {nodes: 1},
+ rs1: {nodes: 1},
+ },
+ other: {
+ enableBalancer: false,
+ configOptions: {setParameter: "enableTestCommands=1"},
+ shardOptions: {setParameter: "enableTestCommands=1"},
}
-
- // Helper function for sorting documents in JavaScript.
- function sortOn(fieldName) {
- return (doc1, doc2) => {
- return bsonWoCompare({_: doc1[fieldName]}, {_: doc2[fieldName]});
- };
- }
-
- const st = new ShardingTest({
- name: "restart_catalog_sharded_cluster",
- mongos: 1,
- config: 1,
- shards: {
- rs: true,
- rs0: {nodes: 1},
- rs1: {nodes: 1},
- },
- other: {
- enableBalancer: false,
- configOptions: {setParameter: "enableTestCommands=1"},
- shardOptions: {setParameter: "enableTestCommands=1"},
- }
- });
- const mongos = st.s0;
- const shard0 = st.shard0;
- const shard1 = st.shard1;
-
- const dbName = "drinks";
-
- // Create a sharded collection and distribute chunks amongst the shards.
- const coffees = [
- {_id: "americano", price: 1.5},
- {_id: "espresso", price: 2.0},
- {_id: "starbucks", price: 1000.0}
- ];
- const coffeeColl = mongos.getDB(dbName).getCollection("coffee");
- assert.commandWorked(mongos.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, shard0.shardName);
- assert.commandWorked(
- mongos.adminCommand({shardCollection: coffeeColl.getFullName(), key: {price: 1}}));
- const splitPoint = 50.0;
- assert.commandWorked(
- mongos.adminCommand({split: coffeeColl.getFullName(), middle: {price: splitPoint}}));
- for (let coffee of coffees) {
- assert.commandWorked(coffeeColl.insert(coffee, {writeConcern: {w: "majority"}}));
- }
- assert.commandWorked(mongos.adminCommand({
- moveChunk: coffeeColl.getFullName(),
- find: {price: 1000.0},
- to: shard1.shardName,
- _waitForDelete: true
- }));
- assert.commandWorked(mongos.adminCommand({
- moveChunk: coffeeColl.getFullName(),
- find: {price: 0.0},
- to: shard0.shardName,
- _waitForDelete: true
- }));
-
- // Create an unsharded collection and throw some data in.
- const teaColl = mongos.getDB(dbName).getCollection("tea");
- const teas = [
- {_id: "darjeeling", price: 2.0},
- {_id: "earl gray", price: 1.5},
- {_id: "sencha", price: 3.5}
- ];
- for (let tea of teas) {
- assert.commandWorked(teaColl.insert(tea, {writeConcern: {w: "majority"}}));
- }
-
- // Run queries on both the sharded and unsharded collection.
- function assertShardsHaveExpectedData() {
- const dbShard0 = shard0.getDB(dbName);
- const dbShard1 = shard1.getDB(dbName);
-
- // Assert that we can find all documents in the unsharded collection by either asking
- // mongos, or consulting the primary shard directly.
- assert.eq(teaColl.find().sort({_id: 1}).readConcern("majority").toArray(),
- teas.sort(sortOn("_id")),
- "couldn't find all unsharded data via mongos");
- assert.eq(dbShard0.tea.find().sort({_id: 1}).toArray(),
- teas.sort(sortOn("_id")),
- "couldn't find all unsharded data directly via primary shard");
- assert.eq(teaColl.find().sort({price: 1}).toArray(), teas.sort(sortOn("price")));
-
- // Assert that we can find all documents in the sharded collection via scatter-gather.
- assert.eq(coffeeColl.find().sort({_id: 1}).readConcern("majority").toArray(),
- coffees.sort(sortOn("_id")),
- "couldn't find all sharded data via mongos scatter-gather");
-
- // Assert that we can find all documents via a query that targets multiple shards.
- assert.eq(coffeeColl.find({price: {$gt: 0}}).sort({price: 1}).toArray(),
- coffees.sort(sortOn("price")),
- "couldn't find all sharded data via mongos multi-shard targeted query");
-
- // Assert that we can find all sharded documents on shard0 by shard targeting via mongos,
- // and by consulting shard0 directly.
- const dataShard0 = coffees.filter(drink => drink.price < splitPoint).sort(sortOn("_id"));
- assert.eq(coffeeColl.find({price: {$lt: splitPoint}}).sort({_id: 1}).toArray(),
- dataShard0,
- "couldn't find shard0 data via targeting through mongos");
- jsTest.log(tojson(dbShard0.getCollectionInfos()));
- assert.eq(dbShard0.coffee.find().toArray(),
- dataShard0,
- "couldn't find shard0 data by directly asking shard0");
-
- // Assert that we can find all sharded documents on shard1 by shard targeting via mongos,
- // and by consulting shard1 directly.
- const dataShard1 = coffees.filter(drink => drink.price >= splitPoint).sort(sortOn("_id"));
- assert.eq(coffeeColl.find({price: {$gte: splitPoint}}).sort({_id: 1}).toArray(),
- dataShard1,
- "couldn't find shard1 data via targeting through mongos");
- assert.eq(dbShard1.coffee.find().toArray(),
- dataShard1,
- "couldn't find shard1 data by directly asking shard1");
- }
- assertShardsHaveExpectedData();
-
- // Run queries on the metadata stored in the config servers.
- function assertConfigServersHaveExpectedData() {
- const configDBViaMongos = mongos.getDB("config");
- const configDBViaConfigSvr = st.config0.getDB("config");
- const projectOnlyShard = {_id: 0, shard: 1};
-
- // Assert that we can find documents for chunk metadata, both via mongos and by asking the
- // config server primary directly.
- const smallestChunk = {"max.price": splitPoint};
- const smallestChunkShard = {shard: "restart_catalog_sharded_cluster-rs0"};
- assert.eq(configDBViaMongos.chunks.find(smallestChunk, projectOnlyShard).toArray(),
- [smallestChunkShard]);
- assert.eq(configDBViaConfigSvr.chunks.find(smallestChunk, projectOnlyShard).toArray(),
- [smallestChunkShard]);
-
- const largestChunk = {"min.price": splitPoint};
- const largestChunkShard = {shard: "restart_catalog_sharded_cluster-rs1"};
- assert.eq(configDBViaMongos.chunks.find(largestChunk, projectOnlyShard).toArray(),
- [largestChunkShard]);
- assert.eq(configDBViaConfigSvr.chunks.find(largestChunk, projectOnlyShard).toArray(),
- [largestChunkShard]);
- }
- assertConfigServersHaveExpectedData();
-
- // Restart the catalog on the config server primary, then assert that both collection data and
- // sharding metadata are as expected.
- assert.commandWorked(st.config0.getDB("admin").runCommand({restartCatalog: 1}));
- assertConfigServersHaveExpectedData();
- assertShardsHaveExpectedData();
-
- // Remember what indexes are present, then restart the catalog on all shards via mongos.
- const teaIndexesBeforeRestart = teaColl.getIndexes().sort(sortOn("_id"));
- const coffeeIndexesBeforeRestart = coffeeColl.getIndexes().sort(sortOn("_id"));
- assert.commandWorked(mongos.adminCommand({restartCatalog: 1}));
-
- // Verify that the data in the collections and the metadata have not changed.
- assertConfigServersHaveExpectedData();
- assertShardsHaveExpectedData();
-
- // Verify that both the sharded and unsharded collection have the same indexes as prior to the
- // restart.
- const teaIndexesAfterRestart = teaColl.getIndexes().sort(sortOn("_id"));
- assert.eq(teaIndexesBeforeRestart, teaIndexesAfterRestart);
- const coffeeIndexesAfterRestart = coffeeColl.getIndexes().sort(sortOn("_id"));
- assert.eq(coffeeIndexesBeforeRestart, coffeeIndexesAfterRestart);
-
- // Create new indexes on both collections and verify that queries return the same results.
- [teaColl, coffeeColl].forEach(coll => {
- assert.commandWorked(coll.createIndex({price: -1}));
- assert.commandWorked(coll.createIndex({price: 1, _id: 1}));
- });
- assertShardsHaveExpectedData();
-
- // Modify the existing collections.
- const validator = {price: {$gt: 0}};
- [teaColl, coffeeColl].forEach(coll => {
- assert.commandWorked(coll.runCommand("collMod", {validator: validator}));
- assert.writeErrorWithCode(coll.insert({price: -1}), ErrorCodes.DocumentValidationFailure);
- });
-
- // Perform another write, implicitly creating a new collection and database.
- const secondTestDB = mongos.getDB("restart_catalog_sharded_cluster_2");
- const foodColl = secondTestDB.getCollection("food");
- const doc = {_id: "apple", category: "fruit"};
- assert.commandWorked(foodColl.insert(doc));
- assert.commandWorked(foodColl.createIndex({category: 1}));
- assert.eq(foodColl.find().toArray(), [doc]);
-
- // Shard the new collection and verify we can find its data again.
- assert.commandWorked(mongos.adminCommand({enableSharding: secondTestDB.getName()}));
- assert.commandWorked(
- mongos.adminCommand({shardCollection: foodColl.getFullName(), key: {category: 1}}));
- assert.eq(foodColl.find().toArray(), [doc]);
-
- // Build a new index on the new collection.
- assert.commandWorked(foodColl.createIndex({category: -1}));
- assert.eq(foodColl.find().hint({category: -1}).toArray(), [doc]);
-
- st.stop();
+});
+const mongos = st.s0;
+const shard0 = st.shard0;
+const shard1 = st.shard1;
+
+const dbName = "drinks";
+
+// Create a sharded collection and distribute chunks amongst the shards.
+const coffees = [
+ {_id: "americano", price: 1.5},
+ {_id: "espresso", price: 2.0},
+ {_id: "starbucks", price: 1000.0}
+];
+const coffeeColl = mongos.getDB(dbName).getCollection("coffee");
+assert.commandWorked(mongos.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, shard0.shardName);
+assert.commandWorked(
+ mongos.adminCommand({shardCollection: coffeeColl.getFullName(), key: {price: 1}}));
+const splitPoint = 50.0;
+assert.commandWorked(
+ mongos.adminCommand({split: coffeeColl.getFullName(), middle: {price: splitPoint}}));
+for (let coffee of coffees) {
+ assert.commandWorked(coffeeColl.insert(coffee, {writeConcern: {w: "majority"}}));
+}
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: coffeeColl.getFullName(),
+ find: {price: 1000.0},
+ to: shard1.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: coffeeColl.getFullName(),
+ find: {price: 0.0},
+ to: shard0.shardName,
+ _waitForDelete: true
+}));
+
+// Create an unsharded collection and throw some data in.
+const teaColl = mongos.getDB(dbName).getCollection("tea");
+const teas =
+ [{_id: "darjeeling", price: 2.0}, {_id: "earl gray", price: 1.5}, {_id: "sencha", price: 3.5}];
+for (let tea of teas) {
+ assert.commandWorked(teaColl.insert(tea, {writeConcern: {w: "majority"}}));
+}
+
+// Run queries on both the sharded and unsharded collection.
+function assertShardsHaveExpectedData() {
+ const dbShard0 = shard0.getDB(dbName);
+ const dbShard1 = shard1.getDB(dbName);
+
+ // Assert that we can find all documents in the unsharded collection by either asking
+ // mongos, or consulting the primary shard directly.
+ assert.eq(teaColl.find().sort({_id: 1}).readConcern("majority").toArray(),
+ teas.sort(sortOn("_id")),
+ "couldn't find all unsharded data via mongos");
+ assert.eq(dbShard0.tea.find().sort({_id: 1}).toArray(),
+ teas.sort(sortOn("_id")),
+ "couldn't find all unsharded data directly via primary shard");
+ assert.eq(teaColl.find().sort({price: 1}).toArray(), teas.sort(sortOn("price")));
+
+ // Assert that we can find all documents in the sharded collection via scatter-gather.
+ assert.eq(coffeeColl.find().sort({_id: 1}).readConcern("majority").toArray(),
+ coffees.sort(sortOn("_id")),
+ "couldn't find all sharded data via mongos scatter-gather");
+
+ // Assert that we can find all documents via a query that targets multiple shards.
+ assert.eq(coffeeColl.find({price: {$gt: 0}}).sort({price: 1}).toArray(),
+ coffees.sort(sortOn("price")),
+ "couldn't find all sharded data via mongos multi-shard targeted query");
+
+ // Assert that we can find all sharded documents on shard0 by shard targeting via mongos,
+ // and by consulting shard0 directly.
+ const dataShard0 = coffees.filter(drink => drink.price < splitPoint).sort(sortOn("_id"));
+ assert.eq(coffeeColl.find({price: {$lt: splitPoint}}).sort({_id: 1}).toArray(),
+ dataShard0,
+ "couldn't find shard0 data via targeting through mongos");
+ jsTest.log(tojson(dbShard0.getCollectionInfos()));
+ assert.eq(dbShard0.coffee.find().toArray(),
+ dataShard0,
+ "couldn't find shard0 data by directly asking shard0");
+
+ // Assert that we can find all sharded documents on shard1 by shard targeting via mongos,
+ // and by consulting shard1 directly.
+ const dataShard1 = coffees.filter(drink => drink.price >= splitPoint).sort(sortOn("_id"));
+ assert.eq(coffeeColl.find({price: {$gte: splitPoint}}).sort({_id: 1}).toArray(),
+ dataShard1,
+ "couldn't find shard1 data via targeting through mongos");
+ assert.eq(dbShard1.coffee.find().toArray(),
+ dataShard1,
+ "couldn't find shard1 data by directly asking shard1");
+}
+assertShardsHaveExpectedData();
+
+// Run queries on the metadata stored in the config servers.
+function assertConfigServersHaveExpectedData() {
+ const configDBViaMongos = mongos.getDB("config");
+ const configDBViaConfigSvr = st.config0.getDB("config");
+ const projectOnlyShard = {_id: 0, shard: 1};
+
+ // Assert that we can find documents for chunk metadata, both via mongos and by asking the
+ // config server primary directly.
+ const smallestChunk = {"max.price": splitPoint};
+ const smallestChunkShard = {shard: "restart_catalog_sharded_cluster-rs0"};
+ assert.eq(configDBViaMongos.chunks.find(smallestChunk, projectOnlyShard).toArray(),
+ [smallestChunkShard]);
+ assert.eq(configDBViaConfigSvr.chunks.find(smallestChunk, projectOnlyShard).toArray(),
+ [smallestChunkShard]);
+
+ const largestChunk = {"min.price": splitPoint};
+ const largestChunkShard = {shard: "restart_catalog_sharded_cluster-rs1"};
+ assert.eq(configDBViaMongos.chunks.find(largestChunk, projectOnlyShard).toArray(),
+ [largestChunkShard]);
+ assert.eq(configDBViaConfigSvr.chunks.find(largestChunk, projectOnlyShard).toArray(),
+ [largestChunkShard]);
+}
+assertConfigServersHaveExpectedData();
+
+// Restart the catalog on the config server primary, then assert that both collection data and
+// sharding metadata are as expected.
+assert.commandWorked(st.config0.getDB("admin").runCommand({restartCatalog: 1}));
+assertConfigServersHaveExpectedData();
+assertShardsHaveExpectedData();
+
+// Remember what indexes are present, then restart the catalog on all shards via mongos.
+const teaIndexesBeforeRestart = teaColl.getIndexes().sort(sortOn("_id"));
+const coffeeIndexesBeforeRestart = coffeeColl.getIndexes().sort(sortOn("_id"));
+assert.commandWorked(mongos.adminCommand({restartCatalog: 1}));
+
+// Verify that the data in the collections and the metadata have not changed.
+assertConfigServersHaveExpectedData();
+assertShardsHaveExpectedData();
+
+// Verify that both the sharded and unsharded collection have the same indexes as prior to the
+// restart.
+const teaIndexesAfterRestart = teaColl.getIndexes().sort(sortOn("_id"));
+assert.eq(teaIndexesBeforeRestart, teaIndexesAfterRestart);
+const coffeeIndexesAfterRestart = coffeeColl.getIndexes().sort(sortOn("_id"));
+assert.eq(coffeeIndexesBeforeRestart, coffeeIndexesAfterRestart);
+
+// Create new indexes on both collections and verify that queries return the same results.
+[teaColl, coffeeColl].forEach(coll => {
+ assert.commandWorked(coll.createIndex({price: -1}));
+ assert.commandWorked(coll.createIndex({price: 1, _id: 1}));
+});
+assertShardsHaveExpectedData();
+
+// Modify the existing collections.
+const validator = {
+ price: {$gt: 0}
+};
+[teaColl, coffeeColl].forEach(coll => {
+ assert.commandWorked(coll.runCommand("collMod", {validator: validator}));
+ assert.writeErrorWithCode(coll.insert({price: -1}), ErrorCodes.DocumentValidationFailure);
+});
+
+// Perform another write, implicitly creating a new collection and database.
+const secondTestDB = mongos.getDB("restart_catalog_sharded_cluster_2");
+const foodColl = secondTestDB.getCollection("food");
+const doc = {
+ _id: "apple",
+ category: "fruit"
+};
+assert.commandWorked(foodColl.insert(doc));
+assert.commandWorked(foodColl.createIndex({category: 1}));
+assert.eq(foodColl.find().toArray(), [doc]);
+
+// Shard the new collection and verify we can find its data again.
+assert.commandWorked(mongos.adminCommand({enableSharding: secondTestDB.getName()}));
+assert.commandWorked(
+ mongos.adminCommand({shardCollection: foodColl.getFullName(), key: {category: 1}}));
+assert.eq(foodColl.find().toArray(), [doc]);
+
+// Build a new index on the new collection.
+assert.commandWorked(foodColl.createIndex({category: -1}));
+assert.eq(foodColl.find().hint({category: -1}).toArray(), [doc]);
+
+st.stop();
}());
diff --git a/jstests/noPassthrough/restart_node_with_bridge.js b/jstests/noPassthrough/restart_node_with_bridge.js
index 004b595a208..e4398f7b13c 100644
--- a/jstests/noPassthrough/restart_node_with_bridge.js
+++ b/jstests/noPassthrough/restart_node_with_bridge.js
@@ -5,60 +5,59 @@
* @tags: [requires_persistence, requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js"); // for reconnect
+load("jstests/replsets/rslib.js"); // for reconnect
- const rst = new ReplSetTest({
- nodes: [{}, {rsConfig: {priority: 0, votes: 0}}],
- useBridge: true,
- });
+const rst = new ReplSetTest({
+ nodes: [{}, {rsConfig: {priority: 0, votes: 0}}],
+ useBridge: true,
+});
- rst.startSet();
- rst.initiate();
- rst.awaitNodesAgreeOnPrimary();
+rst.startSet();
+rst.initiate();
+rst.awaitNodesAgreeOnPrimary();
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
- const primaryDB = primary.getDB("test");
- const primaryColl = primaryDB.getCollection("restart_node_with_bridge");
+const primaryDB = primary.getDB("test");
+const primaryColl = primaryDB.getCollection("restart_node_with_bridge");
- function assertWriteReplicates() {
- assert.commandWorked(primaryColl.update(
- {_id: 0}, {$inc: {counter: 1}}, {upsert: true, writeConcern: {w: 2}}));
- }
+function assertWriteReplicates() {
+ assert.commandWorked(
+ primaryColl.update({_id: 0}, {$inc: {counter: 1}}, {upsert: true, writeConcern: {w: 2}}));
+}
- function assertWriteFailsToReplicate() {
- assert.commandFailedWithCode(
- primaryColl.update(
- {_id: 0}, {$inc: {counter: 1}}, {writeConcern: {w: 2, wtimeout: 1000}}),
- ErrorCodes.WriteConcernFailed);
- }
+function assertWriteFailsToReplicate() {
+ assert.commandFailedWithCode(
+ primaryColl.update({_id: 0}, {$inc: {counter: 1}}, {writeConcern: {w: 2, wtimeout: 1000}}),
+ ErrorCodes.WriteConcernFailed);
+}
- // By default, the primary should be connected to the secondary. Replicating a write should
- // therefore succeed.
- assertWriteReplicates();
+// By default, the primary should be connected to the secondary. Replicating a write should
+// therefore succeed.
+assertWriteReplicates();
- // We disconnect the primary from the secondary and verify that replicating a write fails.
- primary.disconnect(secondary);
- assertWriteFailsToReplicate();
+// We disconnect the primary from the secondary and verify that replicating a write fails.
+primary.disconnect(secondary);
+assertWriteFailsToReplicate();
- // We restart the secondary and verify that replicating a write still fails.
- rst.restart(secondary);
- assertWriteFailsToReplicate();
+// We restart the secondary and verify that replicating a write still fails.
+rst.restart(secondary);
+assertWriteFailsToReplicate();
- // We restart the primary and verify that replicating a write still fails.
- rst.restart(primary);
- rst.getPrimary();
- // Note that we specify 'primaryDB' to avoid having reconnect() send a message directly to the
- // mongod process rather than going through the mongobridge process as well.
- reconnect(primaryDB);
- assertWriteFailsToReplicate();
+// We restart the primary and verify that replicating a write still fails.
+rst.restart(primary);
+rst.getPrimary();
+// Note that we specify 'primaryDB' to avoid having reconnect() send a message directly to the
+// mongod process rather than going through the mongobridge process as well.
+reconnect(primaryDB);
+assertWriteFailsToReplicate();
- // We reconnect the primary to the secondary and verify that replicating a write succeeds.
- primary.reconnect(secondary);
- assertWriteReplicates();
+// We reconnect the primary to the secondary and verify that replicating a write succeeds.
+primary.reconnect(secondary);
+assertWriteReplicates();
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/retry_network_error_test.js b/jstests/noPassthrough/retry_network_error_test.js
index df8da41a5d4..e8fe4a78047 100644
--- a/jstests/noPassthrough/retry_network_error_test.js
+++ b/jstests/noPassthrough/retry_network_error_test.js
@@ -4,44 +4,43 @@
*/
(function() {
- "use strict";
- let node = MongoRunner.runMongod();
- let hostname = node.host;
+"use strict";
+let node = MongoRunner.runMongod();
+let hostname = node.host;
- jsTestLog("Test connecting to a healthy node.");
- let numRetries = 5;
- let sleepMs = 50;
- let attempts = 0;
+jsTestLog("Test connecting to a healthy node.");
+let numRetries = 5;
+let sleepMs = 50;
+let attempts = 0;
+retryOnNetworkError(function() {
+ attempts++;
+ new Mongo(hostname);
+}, numRetries, sleepMs);
+assert.eq(attempts, 1);
+
+jsTestLog("Test connecting to a node that is down.");
+MongoRunner.stopMongod(node);
+attempts = 0;
+try {
retryOnNetworkError(function() {
attempts++;
new Mongo(hostname);
}, numRetries, sleepMs);
- assert.eq(attempts, 1);
-
- jsTestLog("Test connecting to a node that is down.");
- MongoRunner.stopMongod(node);
- attempts = 0;
- try {
- retryOnNetworkError(function() {
- attempts++;
- new Mongo(hostname);
- }, numRetries, sleepMs);
- } catch (e) {
- jsTestLog("Caught exception after exhausting retries: " + e);
- }
- assert.eq(attempts, numRetries + 1);
-
- jsTestLog("Test connecting to a node with an invalid hostname.");
- let invalidHostname = "very-invalid-host-name";
- attempts = 0;
- try {
- retryOnNetworkError(function() {
- attempts++;
- new Mongo(invalidHostname);
- }, numRetries, sleepMs);
- } catch (e) {
- jsTestLog("Caught exception after exhausting retries: " + e);
- }
- assert.eq(attempts, numRetries + 1);
+} catch (e) {
+ jsTestLog("Caught exception after exhausting retries: " + e);
+}
+assert.eq(attempts, numRetries + 1);
+jsTestLog("Test connecting to a node with an invalid hostname.");
+let invalidHostname = "very-invalid-host-name";
+attempts = 0;
+try {
+ retryOnNetworkError(function() {
+ attempts++;
+ new Mongo(invalidHostname);
+ }, numRetries, sleepMs);
+} catch (e) {
+ jsTestLog("Caught exception after exhausting retries: " + e);
+}
+assert.eq(attempts, numRetries + 1);
}()); \ No newline at end of file
diff --git a/jstests/noPassthrough/retryable_writes_standalone_api.js b/jstests/noPassthrough/retryable_writes_standalone_api.js
index ff091624358..228a2c8ea99 100644
--- a/jstests/noPassthrough/retryable_writes_standalone_api.js
+++ b/jstests/noPassthrough/retryable_writes_standalone_api.js
@@ -2,24 +2,24 @@
* Verify behavior of retryable write commands on a standalone mongod.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- const standalone = MongoRunner.runMongod();
- const testDB = standalone.getDB("test");
+const standalone = MongoRunner.runMongod();
+const testDB = standalone.getDB("test");
- // Commands sent to standalone nodes are not allowed to have transaction numbers.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {insert: "foo", documents: [{x: 1}], txnNumber: NumberLong(1), lsid: {id: UUID()}}),
- ErrorCodes.IllegalOperation,
- "expected command with transaction number to fail on standalone mongod");
+// Commands sent to standalone nodes are not allowed to have transaction numbers.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {insert: "foo", documents: [{x: 1}], txnNumber: NumberLong(1), lsid: {id: UUID()}}),
+ ErrorCodes.IllegalOperation,
+ "expected command with transaction number to fail on standalone mongod");
- MongoRunner.stopMongod(standalone);
+MongoRunner.stopMongod(standalone);
}());
diff --git a/jstests/noPassthrough/rollback_wt_cache_full.js b/jstests/noPassthrough/rollback_wt_cache_full.js
index c5d74431310..6ea271b1dba 100644
--- a/jstests/noPassthrough/rollback_wt_cache_full.js
+++ b/jstests/noPassthrough/rollback_wt_cache_full.js
@@ -4,87 +4,89 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/replsets/libs/rollback_test.js');
+load('jstests/replsets/libs/rollback_test.js');
- // Use constrained cache size for both data bearing nodes so that it doesn't matter which node
- // RollbackTest selects as the rollback node.
- const nodeOptions = {
- // Don't log slow operations.
- slowms: 30000,
- // Constrain the storage engine cache size to make it easier to fill it up with unflushed
- // modifications.
- // This test uses a smaller cache size than the other wt_cache_full.js tests because it
- // has to work with the hard-coded 300 MB refetch limit in the pre-4.0 rollback
- // implementation.
- wiredTigerCacheSizeGB: 0.5,
- };
- const rst = new ReplSetTest({
- nodes: 3,
- nodeOptions: nodeOptions,
- useBridge: true,
- });
+// Use constrained cache size for both data bearing nodes so that it doesn't matter which node
+// RollbackTest selects as the rollback node.
+const nodeOptions = {
+ // Don't log slow operations.
+ slowms: 30000,
+ // Constrain the storage engine cache size to make it easier to fill it up with unflushed
+ // modifications.
+ // This test uses a smaller cache size than the other wt_cache_full.js tests because it
+ // has to work with the hard-coded 300 MB refetch limit in the pre-4.0 rollback
+ // implementation.
+ wiredTigerCacheSizeGB: 0.5,
+};
+const rst = new ReplSetTest({
+ nodes: 3,
+ nodeOptions: nodeOptions,
+ useBridge: true,
+});
- rst.startSet();
- let config = rst.getReplSetConfig();
- config.members[2].priority = 0;
- config.settings = {chainingAllowed: false};
- rst.initiate(config);
+rst.startSet();
+let config = rst.getReplSetConfig();
+config.members[2].priority = 0;
+config.settings = {
+ chainingAllowed: false
+};
+rst.initiate(config);
- // Prior to 4.0, rollback imposed a 300 MB limit on the total size of documents to refetch from
- // the sync source. Therefore, we select values for numDocs and minDocSizeMB, while accounting
- // for some small per-document overhead, such that we are able to stay under this 300 MB limit.
- // This test uses single updates, rather than the multiple updates in the other wt_cache_full.js
- // tests because the refetching logic in the pre-4.0 algorithm depends on which documents were
- // modified, not on the number of modifications to each document.
- // This test has been observed to hang under some non-standard build platforms so we are
- // giving ourselves a slightly larger allowance of 5 documents from the theoretical maximum
- // of documents calculated from the rollback size limit.
- // Using a numDocs value of (maxDocs - 5) is sufficiently large enough to reproduce the memory
- // pressure issue in 3.6.5 but small enough for this test to perform uniformly across most of
- // the platforms in our continuous integration system.
- const rollbackSizeLimitMB = 300;
- const minDocSizeMB = 10;
- const largeString = 'x'.repeat(minDocSizeMB * 1024 * 1024);
- // TODO(SERVER-39774): Increase numDocs to Math.floor(rollbackSizeLimitMB / minDocSizeMB).
- const numDocs = 1;
+// Prior to 4.0, rollback imposed a 300 MB limit on the total size of documents to refetch from
+// the sync source. Therefore, we select values for numDocs and minDocSizeMB, while accounting
+// for some small per-document overhead, such that we are able to stay under this 300 MB limit.
+// This test uses single updates, rather than the multiple updates in the other wt_cache_full.js
+// tests because the refetching logic in the pre-4.0 algorithm depends on which documents were
+// modified, not on the number of modifications to each document.
+// This test has been observed to hang under some non-standard build platforms so we are
+// giving ourselves a slightly larger allowance of 5 documents from the theoretical maximum
+// of documents calculated from the rollback size limit.
+// Using a numDocs value of (maxDocs - 5) is sufficiently large enough to reproduce the memory
+// pressure issue in 3.6.5 but small enough for this test to perform uniformly across most of
+// the platforms in our continuous integration system.
+const rollbackSizeLimitMB = 300;
+const minDocSizeMB = 10;
+const largeString = 'x'.repeat(minDocSizeMB * 1024 * 1024);
+// TODO(SERVER-39774): Increase numDocs to Math.floor(rollbackSizeLimitMB / minDocSizeMB).
+const numDocs = 1;
- // Operations that will be present on both nodes, before the common point.
- const collName = 'test.t';
- let CommonOps = (node) => {
- const coll = node.getCollection(collName);
- jsTestLog('Inserting ' + numDocs + ' documents of ' + minDocSizeMB + ' MB each into ' +
- collName + '.');
- for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(coll.save(
- {_id: i, a: 0, x: largeString},
- {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- }
- assert.eq(numDocs, coll.find().itcount());
- };
+// Operations that will be present on both nodes, before the common point.
+const collName = 'test.t';
+let CommonOps = (node) => {
+ const coll = node.getCollection(collName);
+ jsTestLog('Inserting ' + numDocs + ' documents of ' + minDocSizeMB + ' MB each into ' +
+ collName + '.');
+ for (let i = 0; i < numDocs; ++i) {
+ assert.writeOK(
+ coll.save({_id: i, a: 0, x: largeString},
+ {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+ }
+ assert.eq(numDocs, coll.find().itcount());
+};
- // Operations that will be performed on the rollback node past the common point.
- let RollbackOps = (node) => {
- const coll = node.getCollection(collName);
- jsTestLog('Updating ' + numDocs +
- ' documents on the primary. These updates will be rolled back.');
- for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(coll.update({_id: i}, {$inc: {a: 1}}));
- }
- };
+// Operations that will be performed on the rollback node past the common point.
+let RollbackOps = (node) => {
+ const coll = node.getCollection(collName);
+ jsTestLog('Updating ' + numDocs +
+ ' documents on the primary. These updates will be rolled back.');
+ for (let i = 0; i < numDocs; ++i) {
+ assert.writeOK(coll.update({_id: i}, {$inc: {a: 1}}));
+ }
+};
- // Set up Rollback Test.
- const rollbackTest = new RollbackTest(rst.name, rst);
- CommonOps(rollbackTest.getPrimary());
+// Set up Rollback Test.
+const rollbackTest = new RollbackTest(rst.name, rst);
+CommonOps(rollbackTest.getPrimary());
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- rollbackTest.stop();
+rollbackTest.stop();
})();
diff --git a/jstests/noPassthrough/rollback_wt_drop.js b/jstests/noPassthrough/rollback_wt_drop.js
index fcf9c2522d9..8c235695439 100644
--- a/jstests/noPassthrough/rollback_wt_drop.js
+++ b/jstests/noPassthrough/rollback_wt_drop.js
@@ -3,150 +3,148 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- 'use strict';
-
- load('jstests/replsets/libs/rollback_test.js');
-
- // Returns list of collections in database, including pending drops.
- // Assumes all collections fit in first batch of results.
- function listCollections(database) {
- return assert
- .commandWorked(database.runCommand({listCollections: 1, includePendingDrops: true}))
- .cursor.firstBatch;
- }
-
- // Operations that will be present on both nodes, before the common point.
- const collName = 'test.t';
- const renameTargetCollName = 'test.x';
- const noOpsToRollbackCollName = 'test.k';
- let CommonOps = (node) => {
- const coll = node.getCollection(collName);
- const mydb = coll.getDB();
- assert.commandWorked(mydb.createCollection(coll.getName()));
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.insert({_id: 0, a: 0}));
-
- // Replicate a drop.
- const replicatedDropCollName = 'w';
- const collToDrop = mydb.getCollection(replicatedDropCollName);
- assert.commandWorked(mydb.createCollection(collToDrop.getName()));
- assert(collToDrop.drop());
-
- // This collection will be dropped during a rename.
- const renameTargetColl = node.getCollection(renameTargetCollName);
- assert.commandWorked(mydb.createCollection(renameTargetColl.getName()));
- assert.commandWorked(renameTargetColl.createIndex({b: 1}));
- assert.commandWorked(renameTargetColl.insert({_id: 8, b: 8}));
- assert.commandWorked(renameTargetColl.insert({_id: 9, b: 9}));
-
- // This collection will be dropped without any CRUD ops to rollback.
- const noOpsToRollbackColl = node.getCollection(noOpsToRollbackCollName);
- assert.commandWorked(mydb.createCollection(noOpsToRollbackColl.getName()));
- assert.commandWorked(noOpsToRollbackColl.createIndex({c: 1}));
- assert.commandWorked(noOpsToRollbackColl.insert({_id: 20, c: 20}));
- assert.commandWorked(noOpsToRollbackColl.insert({_id: 21, c: 21}));
- };
-
- // Operations that will be performed on the rollback node past the common point.
- let RollbackOps = (node) => {
- const coll = node.getCollection(collName);
-
- // Rollback algorithm may refer to dropped collection if it has to undo an insert.
- assert.commandWorked(coll.insert({_id: 1, a: 1}));
-
- const mydb = coll.getDB();
- const collectionsBeforeDrop = listCollections(mydb);
- assert(coll.drop());
- const collectionsAfterDrop = listCollections(mydb);
- const supportsPendingDrops = mydb.serverStatus().storageEngine.supportsPendingDrops;
- jsTestLog('supportsPendingDrops = ' + supportsPendingDrops);
- if (!supportsPendingDrops) {
- assert.eq(collectionsAfterDrop.length,
- collectionsBeforeDrop.length,
- 'listCollections did not report the same number of collections in database ' +
- mydb.getName() + ' after dropping collection ' + coll.getFullName() +
- '. Before: ' + tojson(collectionsBeforeDrop) + '. After: ' +
- tojson(collectionsAfterDrop));
- } else {
- assert.lt(collectionsAfterDrop.length,
- collectionsBeforeDrop.length,
- 'listCollections did not report fewer collections in database ' +
- mydb.getName() + ' after dropping collection ' + coll.getFullName() +
- '. Before: ' + tojson(collectionsBeforeDrop) + '. After: ' +
- tojson(collectionsAfterDrop));
- assert.gt(mydb.serverStatus().storageEngine.dropPendingIdents,
- 0,
- 'There is no drop pending ident in the storage engine.');
- }
-
- const renameTargetColl = node.getCollection(renameTargetCollName);
- assert.commandWorked(renameTargetColl.insert({_id: 10, b: 10}));
- assert.commandWorked(renameTargetColl.insert({_id: 11, b: 11}));
- const renameSourceColl = mydb.getCollection('z');
- assert.commandWorked(mydb.createCollection(renameSourceColl.getName()));
- assert.commandWorked(renameSourceColl.renameCollection(renameTargetColl.getName(), true));
-
- const noOpsToRollbackColl = node.getCollection(noOpsToRollbackCollName);
- assert(noOpsToRollbackColl.drop());
-
- // This collection will not exist after rollback.
- const tempColl = node.getCollection('test.a');
- assert.commandWorked(mydb.createCollection(tempColl.getName()));
- assert.commandWorked(tempColl.insert({_id: 100, y: 100}));
- assert(tempColl.drop());
-
- // restartCatalog should not remove drop-pending idents.
- assert.commandWorked(mydb.adminCommand({restartCatalog: 1}));
- };
-
- // Set up Rollback Test.
- const rollbackTest = new RollbackTest();
- CommonOps(rollbackTest.getPrimary());
-
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
-
- {
- // Check collection drop oplog entry.
- const replTest = rollbackTest.getTestFixture();
- const ops = replTest.dumpOplog(rollbackNode, {ns: 'test.$cmd', 'o.drop': 't'});
- assert.eq(1, ops.length);
- const op = ops[0];
- assert(op.hasOwnProperty('o2'), 'expected o2 field in drop oplog entry: ' + tojson(op));
- assert(op.o2.hasOwnProperty('numRecords'),
- 'expected count in drop oplog entry: ' + tojson(op));
- assert.eq(2, op.o2.numRecords, 'incorrect count in drop oplog entry: ' + tojson(op));
- }
-
- // Check collection rename oplog entry.
- {
- const replTest = rollbackTest.getTestFixture();
- const ops = replTest.dumpOplog(
- rollbackNode, {ns: 'test.$cmd', 'o.renameCollection': 'test.z', 'o.to': 'test.x'});
- assert.eq(1, ops.length);
- const op = ops[0];
- assert(op.hasOwnProperty('o2'), 'expected o2 field in rename oplog entry: ' + tojson(op));
- assert(op.o2.hasOwnProperty('numRecords'),
- 'expected count in rename oplog entry: ' + tojson(op));
- assert.eq(4, op.o2.numRecords, 'incorrect count in rename oplog entry: ' + tojson(op));
+'use strict';
+
+load('jstests/replsets/libs/rollback_test.js');
+
+// Returns list of collections in database, including pending drops.
+// Assumes all collections fit in first batch of results.
+function listCollections(database) {
+ return assert
+ .commandWorked(database.runCommand({listCollections: 1, includePendingDrops: true}))
+ .cursor.firstBatch;
+}
+
+// Operations that will be present on both nodes, before the common point.
+const collName = 'test.t';
+const renameTargetCollName = 'test.x';
+const noOpsToRollbackCollName = 'test.k';
+let CommonOps = (node) => {
+ const coll = node.getCollection(collName);
+ const mydb = coll.getDB();
+ assert.commandWorked(mydb.createCollection(coll.getName()));
+ assert.commandWorked(coll.createIndex({a: 1}));
+ assert.commandWorked(coll.insert({_id: 0, a: 0}));
+
+ // Replicate a drop.
+ const replicatedDropCollName = 'w';
+ const collToDrop = mydb.getCollection(replicatedDropCollName);
+ assert.commandWorked(mydb.createCollection(collToDrop.getName()));
+ assert(collToDrop.drop());
+
+ // This collection will be dropped during a rename.
+ const renameTargetColl = node.getCollection(renameTargetCollName);
+ assert.commandWorked(mydb.createCollection(renameTargetColl.getName()));
+ assert.commandWorked(renameTargetColl.createIndex({b: 1}));
+ assert.commandWorked(renameTargetColl.insert({_id: 8, b: 8}));
+ assert.commandWorked(renameTargetColl.insert({_id: 9, b: 9}));
+
+ // This collection will be dropped without any CRUD ops to rollback.
+ const noOpsToRollbackColl = node.getCollection(noOpsToRollbackCollName);
+ assert.commandWorked(mydb.createCollection(noOpsToRollbackColl.getName()));
+ assert.commandWorked(noOpsToRollbackColl.createIndex({c: 1}));
+ assert.commandWorked(noOpsToRollbackColl.insert({_id: 20, c: 20}));
+ assert.commandWorked(noOpsToRollbackColl.insert({_id: 21, c: 21}));
+};
+
+// Operations that will be performed on the rollback node past the common point.
+let RollbackOps = (node) => {
+ const coll = node.getCollection(collName);
+
+ // Rollback algorithm may refer to dropped collection if it has to undo an insert.
+ assert.commandWorked(coll.insert({_id: 1, a: 1}));
+
+ const mydb = coll.getDB();
+ const collectionsBeforeDrop = listCollections(mydb);
+ assert(coll.drop());
+ const collectionsAfterDrop = listCollections(mydb);
+ const supportsPendingDrops = mydb.serverStatus().storageEngine.supportsPendingDrops;
+ jsTestLog('supportsPendingDrops = ' + supportsPendingDrops);
+ if (!supportsPendingDrops) {
+ assert.eq(collectionsAfterDrop.length,
+ collectionsBeforeDrop.length,
+ 'listCollections did not report the same number of collections in database ' +
+ mydb.getName() + ' after dropping collection ' + coll.getFullName() +
+ '. Before: ' + tojson(collectionsBeforeDrop) +
+ '. After: ' + tojson(collectionsAfterDrop));
+ } else {
+ assert.lt(collectionsAfterDrop.length,
+ collectionsBeforeDrop.length,
+ 'listCollections did not report fewer collections in database ' + mydb.getName() +
+ ' after dropping collection ' + coll.getFullName() + '. Before: ' +
+ tojson(collectionsBeforeDrop) + '. After: ' + tojson(collectionsAfterDrop));
+ assert.gt(mydb.serverStatus().storageEngine.dropPendingIdents,
+ 0,
+ 'There is no drop pending ident in the storage engine.');
}
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
-
- // Check collection count.
- const primary = rollbackTest.getPrimary();
- const coll = primary.getCollection(collName);
- assert.eq(1, coll.find().itcount());
- assert.eq(1, coll.count());
- const renameTargetColl = primary.getCollection(renameTargetCollName);
- assert.eq(2, renameTargetColl.find().itcount());
- assert.eq(2, renameTargetColl.count());
- const noOpsToRollbackColl = primary.getCollection(noOpsToRollbackCollName);
- assert.eq(2, noOpsToRollbackColl.find().itcount());
- assert.eq(2, noOpsToRollbackColl.count());
-
- rollbackTest.stop();
+ const renameTargetColl = node.getCollection(renameTargetCollName);
+ assert.commandWorked(renameTargetColl.insert({_id: 10, b: 10}));
+ assert.commandWorked(renameTargetColl.insert({_id: 11, b: 11}));
+ const renameSourceColl = mydb.getCollection('z');
+ assert.commandWorked(mydb.createCollection(renameSourceColl.getName()));
+ assert.commandWorked(renameSourceColl.renameCollection(renameTargetColl.getName(), true));
+
+ const noOpsToRollbackColl = node.getCollection(noOpsToRollbackCollName);
+ assert(noOpsToRollbackColl.drop());
+
+ // This collection will not exist after rollback.
+ const tempColl = node.getCollection('test.a');
+ assert.commandWorked(mydb.createCollection(tempColl.getName()));
+ assert.commandWorked(tempColl.insert({_id: 100, y: 100}));
+ assert(tempColl.drop());
+
+ // restartCatalog should not remove drop-pending idents.
+ assert.commandWorked(mydb.adminCommand({restartCatalog: 1}));
+};
+
+// Set up Rollback Test.
+const rollbackTest = new RollbackTest();
+CommonOps(rollbackTest.getPrimary());
+
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
+
+{
+ // Check collection drop oplog entry.
+ const replTest = rollbackTest.getTestFixture();
+ const ops = replTest.dumpOplog(rollbackNode, {ns: 'test.$cmd', 'o.drop': 't'});
+ assert.eq(1, ops.length);
+ const op = ops[0];
+ assert(op.hasOwnProperty('o2'), 'expected o2 field in drop oplog entry: ' + tojson(op));
+ assert(op.o2.hasOwnProperty('numRecords'), 'expected count in drop oplog entry: ' + tojson(op));
+ assert.eq(2, op.o2.numRecords, 'incorrect count in drop oplog entry: ' + tojson(op));
+}
+
+// Check collection rename oplog entry.
+{
+ const replTest = rollbackTest.getTestFixture();
+ const ops = replTest.dumpOplog(
+ rollbackNode, {ns: 'test.$cmd', 'o.renameCollection': 'test.z', 'o.to': 'test.x'});
+ assert.eq(1, ops.length);
+ const op = ops[0];
+ assert(op.hasOwnProperty('o2'), 'expected o2 field in rename oplog entry: ' + tojson(op));
+ assert(op.o2.hasOwnProperty('numRecords'),
+ 'expected count in rename oplog entry: ' + tojson(op));
+ assert.eq(4, op.o2.numRecords, 'incorrect count in rename oplog entry: ' + tojson(op));
+}
+
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+
+// Check collection count.
+const primary = rollbackTest.getPrimary();
+const coll = primary.getCollection(collName);
+assert.eq(1, coll.find().itcount());
+assert.eq(1, coll.count());
+const renameTargetColl = primary.getCollection(renameTargetCollName);
+assert.eq(2, renameTargetColl.find().itcount());
+assert.eq(2, renameTargetColl.count());
+const noOpsToRollbackColl = primary.getCollection(noOpsToRollbackCollName);
+assert.eq(2, noOpsToRollbackColl.find().itcount());
+assert.eq(2, noOpsToRollbackColl.count());
+
+rollbackTest.stop();
})();
diff --git a/jstests/noPassthrough/router_transactions_metrics.js b/jstests/noPassthrough/router_transactions_metrics.js
index 9fd6c5f83eb..778f2adebea 100644
--- a/jstests/noPassthrough/router_transactions_metrics.js
+++ b/jstests/noPassthrough/router_transactions_metrics.js
@@ -2,576 +2,575 @@
// basic cases.
// @tags: [uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
-
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- // Verifies the transaction server status response has the fields that we expect.
- function verifyServerStatusFields(res) {
- const expectedFields = [
- "totalStarted",
- "totalAborted",
- "abortCause",
- "totalCommitted",
- "totalContactedParticipants",
- "totalParticipantsAtCommit",
- "totalRequestsTargeted",
- "commitTypes",
- ];
-
- assert(
- res.hasOwnProperty("transactions"),
- "Expected serverStatus response to have a 'transactions' field, res: " + tojson(res));
-
- assert.hasFields(res.transactions,
- expectedFields,
- "The 'transactions' field did not have all of the expected fields, res: " +
- tojson(res.transactions));
-
- assert.eq(expectedFields.length,
- Object.keys(res.transactions).length,
- "the 'transactions' field had an unexpected number of fields, res: " +
- tojson(res.transactions));
-
- // Verify the "commitTypes" sub-object has the expected fields.
- const commitTypes = [
- "noShards",
- "singleShard",
- "singleWriteShard",
- "readOnly",
- "twoPhaseCommit",
- "recoverWithToken",
- ];
- const commitTypeFields = ["initiated", "successful", "successfulDurationMicros"];
-
- assert.hasFields(res.transactions.commitTypes,
- commitTypes,
- "The 'transactions' field did not have each expected commit type, res: " +
- tojson(res.transactions));
-
- assert.eq(commitTypes.length,
- Object.keys(res.transactions.commitTypes).length,
- "the 'transactions' field had an unexpected number of commit types, res: " +
- tojson(res.transactions));
-
- commitTypes.forEach((type) => {
- assert.hasFields(res.transactions.commitTypes[type],
- commitTypeFields,
- "commit type " + type +
- " did not have all the expected fields, commit types: " +
- tojson(res.transactions.commitTypes));
-
- assert.eq(commitTypeFields.length,
- Object.keys(res.transactions.commitTypes[type]).length,
- "commit type " + type +
- " had an unexpected number of fields, commit types: " +
- tojson(res.transactions.commitTypes));
- });
+"use strict";
+
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+// Verifies the transaction server status response has the fields that we expect.
+function verifyServerStatusFields(res) {
+ const expectedFields = [
+ "totalStarted",
+ "totalAborted",
+ "abortCause",
+ "totalCommitted",
+ "totalContactedParticipants",
+ "totalParticipantsAtCommit",
+ "totalRequestsTargeted",
+ "commitTypes",
+ ];
+
+ assert(res.hasOwnProperty("transactions"),
+ "Expected serverStatus response to have a 'transactions' field, res: " + tojson(res));
+
+ assert.hasFields(res.transactions,
+ expectedFields,
+ "The 'transactions' field did not have all of the expected fields, res: " +
+ tojson(res.transactions));
+
+ assert.eq(expectedFields.length,
+ Object.keys(res.transactions).length,
+ "the 'transactions' field had an unexpected number of fields, res: " +
+ tojson(res.transactions));
+
+ // Verify the "commitTypes" sub-object has the expected fields.
+ const commitTypes = [
+ "noShards",
+ "singleShard",
+ "singleWriteShard",
+ "readOnly",
+ "twoPhaseCommit",
+ "recoverWithToken",
+ ];
+ const commitTypeFields = ["initiated", "successful", "successfulDurationMicros"];
+
+ assert.hasFields(res.transactions.commitTypes,
+ commitTypes,
+ "The 'transactions' field did not have each expected commit type, res: " +
+ tojson(res.transactions));
+
+ assert.eq(commitTypes.length,
+ Object.keys(res.transactions.commitTypes).length,
+ "the 'transactions' field had an unexpected number of commit types, res: " +
+ tojson(res.transactions));
+
+ commitTypes.forEach((type) => {
+ assert.hasFields(res.transactions.commitTypes[type],
+ commitTypeFields,
+ "commit type " + type +
+ " did not have all the expected fields, commit types: " +
+ tojson(res.transactions.commitTypes));
+
+ assert.eq(commitTypeFields.length,
+ Object.keys(res.transactions.commitTypes[type]).length,
+ "commit type " + type + " had an unexpected number of fields, commit types: " +
+ tojson(res.transactions.commitTypes));
+ });
+}
+
+class ExpectedCommitType {
+ constructor() {
+ this.initiated = 0;
+ this.successful = 0;
+ this.successfulDurationMicros = 0;
}
+}
- class ExpectedCommitType {
- constructor() {
- this.initiated = 0;
- this.successful = 0;
- this.successfulDurationMicros = 0;
- }
+class ExpectedAbortCause {
+ constructor() {
}
-
- class ExpectedAbortCause {
- constructor() {
- }
+}
+
+class ExpectedTransactionServerStatus {
+ constructor() {
+ this.totalStarted = 0;
+ this.totalAborted = 0;
+ this.abortCause = new ExpectedAbortCause();
+ this.totalCommitted = 0;
+ this.totalContactedParticipants = 0;
+ this.totalParticipantsAtCommit = 0;
+ this.totalRequestsTargeted = 0;
+ this.commitTypes = {
+ noShards: new ExpectedCommitType(),
+ singleShard: new ExpectedCommitType(),
+ singleWriteShard: new ExpectedCommitType(),
+ readOnly: new ExpectedCommitType(),
+ twoPhaseCommit: new ExpectedCommitType(),
+ recoverWithToken: new ExpectedCommitType(),
+ };
}
-
- class ExpectedTransactionServerStatus {
- constructor() {
- this.totalStarted = 0;
- this.totalAborted = 0;
- this.abortCause = new ExpectedAbortCause();
- this.totalCommitted = 0;
- this.totalContactedParticipants = 0;
- this.totalParticipantsAtCommit = 0;
- this.totalRequestsTargeted = 0;
- this.commitTypes = {
- noShards: new ExpectedCommitType(),
- singleShard: new ExpectedCommitType(),
- singleWriteShard: new ExpectedCommitType(),
- readOnly: new ExpectedCommitType(),
- twoPhaseCommit: new ExpectedCommitType(),
- recoverWithToken: new ExpectedCommitType(),
- };
+}
+
+// Verifies the transaction values in the server status response match the provided values.
+function verifyServerStatusValues(st, expectedStats) {
+ const res = assert.commandWorked(st.s.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(res);
+
+ const stats = res.transactions;
+ assert.eq(expectedStats.totalStarted,
+ stats.totalStarted,
+ "unexpected totalStarted, res: " + tojson(stats));
+ assert.eq(expectedStats.totalAborted,
+ stats.totalAborted,
+ "unexpected totalAborted, res: " + tojson(stats));
+ assert.eq(expectedStats.totalCommitted,
+ stats.totalCommitted,
+ "unexpected totalCommitted, res: " + tojson(stats));
+ assert.eq(expectedStats.totalContactedParticipants,
+ stats.totalContactedParticipants,
+ "unexpected totalContactedParticipants, res: " + tojson(stats));
+ assert.eq(expectedStats.totalParticipantsAtCommit,
+ stats.totalParticipantsAtCommit,
+ "unexpected totalParticipantsAtCommit, res: " + tojson(stats));
+ assert.eq(expectedStats.totalRequestsTargeted,
+ stats.totalRequestsTargeted,
+ "unexpected totalRequestsTargeted, res: " + tojson(stats));
+
+ const commitTypes = res.transactions.commitTypes;
+ Object.keys(commitTypes).forEach((commitType) => {
+ assert.eq(
+ expectedStats.commitTypes[commitType].initiated,
+ commitTypes[commitType].initiated,
+ "unexpected initiated for " + commitType + ", commit types: " + tojson(commitTypes));
+ assert.eq(
+ expectedStats.commitTypes[commitType].successful,
+ commitTypes[commitType].successful,
+ "unexpected successful for " + commitType + ", commit types: " + tojson(commitTypes));
+
+ assert.lte(expectedStats.commitTypes[commitType].successfulDurationMicros,
+ commitTypes[commitType].successfulDurationMicros,
+ "unexpected successfulDurationMicros for " + commitType +
+ ", commit types: " + tojson(commitTypes));
+ expectedStats.commitTypes[commitType].successfulDurationMicros =
+ commitTypes[commitType].successfulDurationMicros;
+
+ if (commitTypes[commitType].successful != 0) {
+ assert.gt(commitTypes[commitType].successfulDurationMicros,
+ 0,
+ "unexpected successfulDurationMicros for " + commitType +
+ ", commit types: " + tojson(commitTypes));
}
- }
-
- // Verifies the transaction values in the server status response match the provided values.
- function verifyServerStatusValues(st, expectedStats) {
- const res = assert.commandWorked(st.s.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(res);
-
- const stats = res.transactions;
- assert.eq(expectedStats.totalStarted,
- stats.totalStarted,
- "unexpected totalStarted, res: " + tojson(stats));
- assert.eq(expectedStats.totalAborted,
- stats.totalAborted,
- "unexpected totalAborted, res: " + tojson(stats));
- assert.eq(expectedStats.totalCommitted,
- stats.totalCommitted,
- "unexpected totalCommitted, res: " + tojson(stats));
- assert.eq(expectedStats.totalContactedParticipants,
- stats.totalContactedParticipants,
- "unexpected totalContactedParticipants, res: " + tojson(stats));
- assert.eq(expectedStats.totalParticipantsAtCommit,
- stats.totalParticipantsAtCommit,
- "unexpected totalParticipantsAtCommit, res: " + tojson(stats));
- assert.eq(expectedStats.totalRequestsTargeted,
- stats.totalRequestsTargeted,
- "unexpected totalRequestsTargeted, res: " + tojson(stats));
-
- const commitTypes = res.transactions.commitTypes;
- Object.keys(commitTypes).forEach((commitType) => {
- assert.eq(expectedStats.commitTypes[commitType].initiated,
- commitTypes[commitType].initiated,
- "unexpected initiated for " + commitType + ", commit types: " +
- tojson(commitTypes));
- assert.eq(expectedStats.commitTypes[commitType].successful,
- commitTypes[commitType].successful,
- "unexpected successful for " + commitType + ", commit types: " +
- tojson(commitTypes));
-
- assert.lte(expectedStats.commitTypes[commitType].successfulDurationMicros,
- commitTypes[commitType].successfulDurationMicros,
- "unexpected successfulDurationMicros for " + commitType +
- ", commit types: " + tojson(commitTypes));
- expectedStats.commitTypes[commitType].successfulDurationMicros =
- commitTypes[commitType].successfulDurationMicros;
-
- if (commitTypes[commitType].successful != 0) {
- assert.gt(commitTypes[commitType].successfulDurationMicros,
- 0,
- "unexpected successfulDurationMicros for " + commitType +
- ", commit types: " + tojson(commitTypes));
- }
- });
-
- const abortCause = res.transactions.abortCause;
- Object.keys(abortCause).forEach((cause) => {
- assert.eq(expectedStats.abortCause[cause],
- abortCause[cause],
- "unexpected abortCause for " + cause + ", res: " + tojson(stats));
- });
-
- assert.eq(Object.keys(abortCause).length,
- Object.keys(expectedStats.abortCause).length,
- "the 'transactions' field had an unexpected number of abort causes, res: " +
- tojson(stats));
- }
-
- function abortFromUnderneath(st, session) {
- st._rs.forEach((rs) => {
- assert.commandWorkedOrFailedWithCode(rs.test.getPrimary().adminCommand({
- abortTransaction: 1,
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
- });
- }
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
-
- const st = new ShardingTest({shards: 2, mongos: 2, config: 1});
-
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
+ });
+
+ const abortCause = res.transactions.abortCause;
+ Object.keys(abortCause).forEach((cause) => {
+ assert.eq(expectedStats.abortCause[cause],
+ abortCause[cause],
+ "unexpected abortCause for " + cause + ", res: " + tojson(stats));
+ });
+
+ assert.eq(
+ Object.keys(abortCause).length,
+ Object.keys(expectedStats.abortCause).length,
+ "the 'transactions' field had an unexpected number of abort causes, res: " + tojson(stats));
+}
+
+function abortFromUnderneath(st, session) {
+ st._rs.forEach((rs) => {
+ assert.commandWorkedOrFailedWithCode(rs.test.getPrimary().adminCommand({
+ abortTransaction: 1,
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+ autocommit: false
+ }),
+ ErrorCodes.NoSuchTransaction);
+ });
+}
- const otherRouterSession = st.s1.startSession();
- const otherRouterSessionDB = otherRouterSession.getDatabase(dbName);
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
- // Set up two chunks: [-inf, 0), [0, inf) one on each shard, with one document in each.
+const st = new ShardingTest({shards: 2, mongos: 2, config: 1});
- assert.commandWorked(sessionDB[collName].insert({_id: -1}));
- assert.commandWorked(sessionDB[collName].insert({_id: 1}));
+const session = st.s.startSession();
+const sessionDB = session.getDatabase(dbName);
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
- flushRoutersAndRefreshShardMetadata(st, {ns});
+const otherRouterSession = st.s1.startSession();
+const otherRouterSessionDB = otherRouterSession.getDatabase(dbName);
- let expectedStats = new ExpectedTransactionServerStatus();
+// Set up two chunks: [-inf, 0), [0, inf) one on each shard, with one document in each.
- //
- // Helpers for setting up transactions that will trigger the various commit paths.
- //
+assert.commandWorked(sessionDB[collName].insert({_id: -1}));
+assert.commandWorked(sessionDB[collName].insert({_id: 1}));
- function startNoShardsTransaction() {
- session.startTransaction();
- assert.commandWorked(session.getDatabase("doesntExist").runCommand({find: collName}));
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
+flushRoutersAndRefreshShardMetadata(st, {ns});
- expectedStats.totalStarted += 1;
- verifyServerStatusValues(st, expectedStats);
- }
+let expectedStats = new ExpectedTransactionServerStatus();
- function startSingleShardTransaction() {
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({x: 1}));
+//
+// Helpers for setting up transactions that will trigger the various commit paths.
+//
- expectedStats.totalStarted += 1;
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- }
+function startNoShardsTransaction() {
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("doesntExist").runCommand({find: collName}));
- function startSingleWriteShardTransaction() {
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({x: 1}));
+ expectedStats.totalStarted += 1;
+ verifyServerStatusValues(st, expectedStats);
+}
- expectedStats.totalStarted += 1;
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
+function startSingleShardTransaction() {
+ session.startTransaction();
+ assert.commandWorked(sessionDB[collName].insert({x: 1}));
- assert.commandWorked(sessionDB.runCommand({find: collName}));
+ expectedStats.totalStarted += 1;
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+}
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 2;
- verifyServerStatusValues(st, expectedStats);
- }
+function startSingleWriteShardTransaction() {
+ session.startTransaction();
+ assert.commandWorked(sessionDB[collName].insert({x: 1}));
- function startReadOnlyTransaction() {
- session.startTransaction();
- assert.commandWorked(sessionDB.runCommand({find: collName}));
+ expectedStats.totalStarted += 1;
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
- expectedStats.totalStarted += 1;
- expectedStats.totalContactedParticipants += 2;
- expectedStats.totalRequestsTargeted += 2;
- verifyServerStatusValues(st, expectedStats);
- }
+ assert.commandWorked(sessionDB.runCommand({find: collName}));
- function startTwoPhaseCommitTransaction() {
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({_id: -5}));
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 2;
+ verifyServerStatusValues(st, expectedStats);
+}
- expectedStats.totalStarted += 1;
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
+function startReadOnlyTransaction() {
+ session.startTransaction();
+ assert.commandWorked(sessionDB.runCommand({find: collName}));
- assert.commandWorked(sessionDB[collName].insert({_id: 5}));
+ expectedStats.totalStarted += 1;
+ expectedStats.totalContactedParticipants += 2;
+ expectedStats.totalRequestsTargeted += 2;
+ verifyServerStatusValues(st, expectedStats);
+}
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- }
+function startTwoPhaseCommitTransaction() {
+ session.startTransaction();
+ assert.commandWorked(sessionDB[collName].insert({_id: -5}));
- function setUpTransactionToRecoverCommit({shouldCommit}) {
- otherRouterSession.startTransaction();
- let resWithRecoveryToken = assert.commandWorked(
- otherRouterSessionDB.runCommand({insert: collName, documents: [{x: 5}]}));
- if (shouldCommit) {
- assert.commandWorked(otherRouterSession.commitTransaction_forTesting());
- } else {
- assert.commandWorked(otherRouterSession.abortTransaction_forTesting());
- }
+ expectedStats.totalStarted += 1;
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
- // The stats on the main mongos shouldn't have changed.
- verifyServerStatusValues(st, expectedStats);
+ assert.commandWorked(sessionDB[collName].insert({_id: 5}));
- return resWithRecoveryToken.recoveryToken;
- }
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+}
- //
- // Test cases for serverStatus output.
- //
-
- jsTest.log("Default values.");
- (() => {
- verifyServerStatusValues(st, expectedStats);
- })();
-
- // Note committing a no shards transaction can only succeed.
- jsTest.log("Committed no shards transaction.");
- (() => {
- startNoShardsTransaction();
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- expectedStats.totalCommitted += 1;
- expectedStats.commitTypes.noShards.initiated += 1;
- expectedStats.commitTypes.noShards.successful += 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Successful single shard transaction.");
- (() => {
- startSingleShardTransaction();
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- expectedStats.totalCommitted += 1;
- expectedStats.commitTypes.singleShard.initiated += 1;
- expectedStats.commitTypes.singleShard.successful += 1;
- expectedStats.totalParticipantsAtCommit += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Failed single shard transaction.");
- (() => {
- startSingleShardTransaction();
-
- abortFromUnderneath(st, session);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["NoSuchTransaction"] = 1;
- expectedStats.commitTypes.singleShard.initiated += 1;
- expectedStats.totalParticipantsAtCommit += 1;
- // The one shard is targeted for the commit then the implicit abort.
- expectedStats.totalRequestsTargeted += 1 + 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Successful single write shard transaction.");
- (() => {
- startSingleWriteShardTransaction();
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- expectedStats.totalCommitted += 1;
- expectedStats.commitTypes.singleWriteShard.initiated += 1;
- expectedStats.commitTypes.singleWriteShard.successful += 1;
- expectedStats.totalParticipantsAtCommit += 2;
- expectedStats.totalRequestsTargeted += 2;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Failed single write shard transaction.");
- (() => {
- startSingleWriteShardTransaction();
-
- abortFromUnderneath(st, session);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["NoSuchTransaction"] += 1;
- expectedStats.commitTypes.singleWriteShard.initiated += 1;
- expectedStats.totalParticipantsAtCommit += 2;
- // In a single write shard commit, all read shards are committed first, then the
- // write shards, so if committing on a read shard fails, the write shards aren't targeted.
- // The implicit abort after will target all shards.
- expectedStats.totalRequestsTargeted += 1 + 2;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Successful read only transaction.");
- (() => {
- startReadOnlyTransaction();
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- expectedStats.totalCommitted += 1;
- expectedStats.commitTypes.readOnly.initiated += 1;
- expectedStats.commitTypes.readOnly.successful += 1;
- expectedStats.totalParticipantsAtCommit += 2;
- expectedStats.totalRequestsTargeted += 2;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Failed read only transaction.");
- (() => {
- startReadOnlyTransaction();
-
- abortFromUnderneath(st, session);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["NoSuchTransaction"] += 1;
- expectedStats.commitTypes.readOnly.initiated += 1;
- expectedStats.totalParticipantsAtCommit += 2;
- // Both shards are targeted for the commit then the implicit abort.
- expectedStats.totalRequestsTargeted += 2 + 2;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Successful two phase commit transaction.");
- (() => {
- startTwoPhaseCommitTransaction();
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- expectedStats.totalCommitted += 1;
- expectedStats.commitTypes.twoPhaseCommit.initiated += 1;
- expectedStats.commitTypes.twoPhaseCommit.successful += 1;
- expectedStats.totalParticipantsAtCommit += 2;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
-
- // Remove the inserted documents.
- assert.commandWorked(sessionDB[collName].remove({_id: {$in: [-5, 5]}}));
- })();
-
- jsTest.log("Failed two phase commit transaction.");
- (() => {
- startTwoPhaseCommitTransaction();
-
- abortFromUnderneath(st, session);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["NoSuchTransaction"] += 1;
- expectedStats.commitTypes.twoPhaseCommit.initiated += 1;
- expectedStats.totalParticipantsAtCommit += 2;
- // There are no implicit aborts after two phase commit, so the coordinator is targeted once.
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Recover successful commit result.");
- (() => {
- const recoveryToken = setUpTransactionToRecoverCommit({shouldCommit: true});
-
- assert.commandWorked(st.s.adminCommand({
- commitTransaction: 1,
- lsid: otherRouterSession.getSessionId(),
- txnNumber: otherRouterSession.getTxnNumber_forTesting(),
- autocommit: false, recoveryToken
- }));
-
- expectedStats.totalStarted += 1;
- expectedStats.totalCommitted += 1;
- expectedStats.commitTypes.recoverWithToken.initiated += 1;
- expectedStats.commitTypes.recoverWithToken.successful += 1;
- // The participant stats shouldn't increase if we're recovering commit.
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Recover failed commit result.");
- (() => {
- const recoveryToken = setUpTransactionToRecoverCommit({shouldCommit: false});
-
- assert.commandFailedWithCode(st.s.adminCommand({
- commitTransaction: 1,
- lsid: otherRouterSession.getSessionId(),
- txnNumber: otherRouterSession.getTxnNumber_forTesting(),
- autocommit: false, recoveryToken
- }),
- ErrorCodes.NoSuchTransaction);
-
- expectedStats.totalStarted += 1;
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["NoSuchTransaction"] += 1;
- expectedStats.commitTypes.recoverWithToken.initiated += 1;
- // The participant stats shouldn't increase if we're recovering commit.
- // There are no implicit aborts during commit recovery, so the recovery shard is targeted
- // once.
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Empty recovery token.");
- (() => {
- otherRouterSession.startTransaction();
- let resWithEmptyRecoveryToken =
- assert.commandWorked(otherRouterSessionDB.runCommand({find: collName}));
+function setUpTransactionToRecoverCommit({shouldCommit}) {
+ otherRouterSession.startTransaction();
+ let resWithRecoveryToken = assert.commandWorked(
+ otherRouterSessionDB.runCommand({insert: collName, documents: [{x: 5}]}));
+ if (shouldCommit) {
assert.commandWorked(otherRouterSession.commitTransaction_forTesting());
+ } else {
+ assert.commandWorked(otherRouterSession.abortTransaction_forTesting());
+ }
- // The stats on the main mongos shouldn't have changed.
- verifyServerStatusValues(st, expectedStats);
-
- assert.commandFailedWithCode(st.s.adminCommand({
- commitTransaction: 1,
- lsid: otherRouterSession.getSessionId(),
- txnNumber: otherRouterSession.getTxnNumber_forTesting(),
- autocommit: false,
- recoveryToken: resWithEmptyRecoveryToken.recoveryToken
- }),
- ErrorCodes.NoSuchTransaction);
-
- expectedStats.totalStarted += 1;
- expectedStats.commitTypes.recoverWithToken.initiated += 1;
- // No requests are targeted and the decision isn't learned, so total committed/aborted and
- // total requests sent shouldn't change.
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Explicitly aborted transaction.");
- (() => {
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({x: 2}));
-
- expectedStats.totalStarted += 1;
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
-
- assert.commandWorked(session.abortTransaction_forTesting());
-
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["abort"] = 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Implicitly aborted transaction.");
- (() => {
- session.startTransaction();
- assert.commandFailedWithCode(sessionDB[collName].insert({_id: 1}), ErrorCodes.DuplicateKey);
-
- expectedStats.totalStarted += 1;
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["DuplicateKey"] = 1;
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 2; // Plus one for the implicit abort.
- verifyServerStatusValues(st, expectedStats);
-
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // A failed abortTransaction leads to an implicit abort, so two requests are targeted.
- expectedStats.totalRequestsTargeted += 2;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Abandoned transaction.");
- (() => {
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({_id: -15}));
-
- expectedStats.totalStarted += 1;
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
-
- session.startTransaction_forTesting({}, {ignoreActiveTxn: true});
- assert.commandWorked(sessionDB[collName].insert({_id: -15}));
-
- expectedStats.totalStarted += 1;
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 1;
- // The router never learned if the previous transaction committed or aborted, so the aborted
- // counter shouldn't be incremented.
- verifyServerStatusValues(st, expectedStats);
-
- // Abort to clear the shell's session state.
- assert.commandWorked(session.abortTransaction_forTesting());
-
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["abort"] += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- session.endSession();
- st.stop();
+ // The stats on the main mongos shouldn't have changed.
+ verifyServerStatusValues(st, expectedStats);
+
+ return resWithRecoveryToken.recoveryToken;
+}
+
+//
+// Test cases for serverStatus output.
+//
+
+jsTest.log("Default values.");
+(() => {
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+// Note committing a no shards transaction can only succeed.
+jsTest.log("Committed no shards transaction.");
+(() => {
+ startNoShardsTransaction();
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ expectedStats.totalCommitted += 1;
+ expectedStats.commitTypes.noShards.initiated += 1;
+ expectedStats.commitTypes.noShards.successful += 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Successful single shard transaction.");
+(() => {
+ startSingleShardTransaction();
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ expectedStats.totalCommitted += 1;
+ expectedStats.commitTypes.singleShard.initiated += 1;
+ expectedStats.commitTypes.singleShard.successful += 1;
+ expectedStats.totalParticipantsAtCommit += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Failed single shard transaction.");
+(() => {
+ startSingleShardTransaction();
+
+ abortFromUnderneath(st, session);
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["NoSuchTransaction"] = 1;
+ expectedStats.commitTypes.singleShard.initiated += 1;
+ expectedStats.totalParticipantsAtCommit += 1;
+ // The one shard is targeted for the commit then the implicit abort.
+ expectedStats.totalRequestsTargeted += 1 + 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Successful single write shard transaction.");
+(() => {
+ startSingleWriteShardTransaction();
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ expectedStats.totalCommitted += 1;
+ expectedStats.commitTypes.singleWriteShard.initiated += 1;
+ expectedStats.commitTypes.singleWriteShard.successful += 1;
+ expectedStats.totalParticipantsAtCommit += 2;
+ expectedStats.totalRequestsTargeted += 2;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Failed single write shard transaction.");
+(() => {
+ startSingleWriteShardTransaction();
+
+ abortFromUnderneath(st, session);
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["NoSuchTransaction"] += 1;
+ expectedStats.commitTypes.singleWriteShard.initiated += 1;
+ expectedStats.totalParticipantsAtCommit += 2;
+ // In a single write shard commit, all read shards are committed first, then the
+ // write shards, so if committing on a read shard fails, the write shards aren't targeted.
+ // The implicit abort after will target all shards.
+ expectedStats.totalRequestsTargeted += 1 + 2;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Successful read only transaction.");
+(() => {
+ startReadOnlyTransaction();
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ expectedStats.totalCommitted += 1;
+ expectedStats.commitTypes.readOnly.initiated += 1;
+ expectedStats.commitTypes.readOnly.successful += 1;
+ expectedStats.totalParticipantsAtCommit += 2;
+ expectedStats.totalRequestsTargeted += 2;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Failed read only transaction.");
+(() => {
+ startReadOnlyTransaction();
+
+ abortFromUnderneath(st, session);
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["NoSuchTransaction"] += 1;
+ expectedStats.commitTypes.readOnly.initiated += 1;
+ expectedStats.totalParticipantsAtCommit += 2;
+ // Both shards are targeted for the commit then the implicit abort.
+ expectedStats.totalRequestsTargeted += 2 + 2;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Successful two phase commit transaction.");
+(() => {
+ startTwoPhaseCommitTransaction();
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ expectedStats.totalCommitted += 1;
+ expectedStats.commitTypes.twoPhaseCommit.initiated += 1;
+ expectedStats.commitTypes.twoPhaseCommit.successful += 1;
+ expectedStats.totalParticipantsAtCommit += 2;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+
+ // Remove the inserted documents.
+ assert.commandWorked(sessionDB[collName].remove({_id: {$in: [-5, 5]}}));
+})();
+
+jsTest.log("Failed two phase commit transaction.");
+(() => {
+ startTwoPhaseCommitTransaction();
+
+ abortFromUnderneath(st, session);
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["NoSuchTransaction"] += 1;
+ expectedStats.commitTypes.twoPhaseCommit.initiated += 1;
+ expectedStats.totalParticipantsAtCommit += 2;
+ // There are no implicit aborts after two phase commit, so the coordinator is targeted once.
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Recover successful commit result.");
+(() => {
+ const recoveryToken = setUpTransactionToRecoverCommit({shouldCommit: true});
+
+ assert.commandWorked(st.s.adminCommand({
+ commitTransaction: 1,
+ lsid: otherRouterSession.getSessionId(),
+ txnNumber: otherRouterSession.getTxnNumber_forTesting(),
+ autocommit: false,
+ recoveryToken
+ }));
+
+ expectedStats.totalStarted += 1;
+ expectedStats.totalCommitted += 1;
+ expectedStats.commitTypes.recoverWithToken.initiated += 1;
+ expectedStats.commitTypes.recoverWithToken.successful += 1;
+ // The participant stats shouldn't increase if we're recovering commit.
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Recover failed commit result.");
+(() => {
+ const recoveryToken = setUpTransactionToRecoverCommit({shouldCommit: false});
+
+ assert.commandFailedWithCode(st.s.adminCommand({
+ commitTransaction: 1,
+ lsid: otherRouterSession.getSessionId(),
+ txnNumber: otherRouterSession.getTxnNumber_forTesting(),
+ autocommit: false,
+ recoveryToken
+ }),
+ ErrorCodes.NoSuchTransaction);
+
+ expectedStats.totalStarted += 1;
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["NoSuchTransaction"] += 1;
+ expectedStats.commitTypes.recoverWithToken.initiated += 1;
+ // The participant stats shouldn't increase if we're recovering commit.
+ // There are no implicit aborts during commit recovery, so the recovery shard is targeted
+ // once.
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Empty recovery token.");
+(() => {
+ otherRouterSession.startTransaction();
+ let resWithEmptyRecoveryToken =
+ assert.commandWorked(otherRouterSessionDB.runCommand({find: collName}));
+ assert.commandWorked(otherRouterSession.commitTransaction_forTesting());
+
+ // The stats on the main mongos shouldn't have changed.
+ verifyServerStatusValues(st, expectedStats);
+
+ assert.commandFailedWithCode(st.s.adminCommand({
+ commitTransaction: 1,
+ lsid: otherRouterSession.getSessionId(),
+ txnNumber: otherRouterSession.getTxnNumber_forTesting(),
+ autocommit: false,
+ recoveryToken: resWithEmptyRecoveryToken.recoveryToken
+ }),
+ ErrorCodes.NoSuchTransaction);
+
+ expectedStats.totalStarted += 1;
+ expectedStats.commitTypes.recoverWithToken.initiated += 1;
+ // No requests are targeted and the decision isn't learned, so total committed/aborted and
+ // total requests sent shouldn't change.
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Explicitly aborted transaction.");
+(() => {
+ session.startTransaction();
+ assert.commandWorked(sessionDB[collName].insert({x: 2}));
+
+ expectedStats.totalStarted += 1;
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+
+ assert.commandWorked(session.abortTransaction_forTesting());
+
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["abort"] = 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Implicitly aborted transaction.");
+(() => {
+ session.startTransaction();
+ assert.commandFailedWithCode(sessionDB[collName].insert({_id: 1}), ErrorCodes.DuplicateKey);
+
+ expectedStats.totalStarted += 1;
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["DuplicateKey"] = 1;
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 2; // Plus one for the implicit abort.
+ verifyServerStatusValues(st, expectedStats);
+
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ // A failed abortTransaction leads to an implicit abort, so two requests are targeted.
+ expectedStats.totalRequestsTargeted += 2;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Abandoned transaction.");
+(() => {
+ session.startTransaction();
+ assert.commandWorked(sessionDB[collName].insert({_id: -15}));
+
+ expectedStats.totalStarted += 1;
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+
+ session.startTransaction_forTesting({}, {ignoreActiveTxn: true});
+ assert.commandWorked(sessionDB[collName].insert({_id: -15}));
+
+ expectedStats.totalStarted += 1;
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ // The router never learned if the previous transaction committed or aborted, so the aborted
+ // counter shouldn't be incremented.
+ verifyServerStatusValues(st, expectedStats);
+
+ // Abort to clear the shell's session state.
+ assert.commandWorked(session.abortTransaction_forTesting());
+
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["abort"] += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+session.endSession();
+st.stop();
}());
diff --git a/jstests/noPassthrough/server_read_concern_metrics.js b/jstests/noPassthrough/server_read_concern_metrics.js
index 0907138e0fd..b5c78c166fb 100644
--- a/jstests/noPassthrough/server_read_concern_metrics.js
+++ b/jstests/noPassthrough/server_read_concern_metrics.js
@@ -1,360 +1,358 @@
// Tests readConcern level metrics in the serverStatus output.
// @tags: [uses_transactions, requires_majority_read_concern]
(function() {
- "use strict";
+"use strict";
- // Verifies that the server status response has the fields that we expect.
- function verifyServerStatusFields(serverStatusResponse) {
- assert(serverStatusResponse.hasOwnProperty("opReadConcernCounters"),
- "Expected the serverStatus response to have a 'opReadConcernCounters' field\n" +
- tojson(serverStatusResponse));
- assert(
- serverStatusResponse.opReadConcernCounters.hasOwnProperty("available"),
- "The 'opReadConcernCounters' field in serverStatus did not have the 'available' field\n" +
- tojson(serverStatusResponse.opReadConcernCounters));
- assert(
- serverStatusResponse.opReadConcernCounters.hasOwnProperty("linearizable"),
- "The 'opReadConcernCounters' field in serverStatus did not have the 'linearizable' field\n" +
- tojson(serverStatusResponse.opReadConcernCounters));
- assert(
- serverStatusResponse.opReadConcernCounters.hasOwnProperty("local"),
- "The 'opReadConcernCounters' field in serverStatus did not have the 'local' field\n" +
- tojson(serverStatusResponse.opReadConcernCounters));
- assert(
- serverStatusResponse.opReadConcernCounters.hasOwnProperty("majority"),
- "The 'opReadConcernCounters' field in serverStatus did not have the 'majority' field\n" +
- tojson(serverStatusResponse.opReadConcernCounters));
- assert(
- serverStatusResponse.opReadConcernCounters.hasOwnProperty("snapshot"),
- "The 'opReadConcernCounters' field in serverStatus did not have the 'snapshot' field\n" +
- tojson(serverStatusResponse.opReadConcernCounters));
- assert(serverStatusResponse.opReadConcernCounters.hasOwnProperty("none"),
- "The 'opReadConcernCounters' field in serverStatus did not have the 'none' field\n" +
- tojson(serverStatusResponse.opReadConcernCounters));
- }
+// Verifies that the server status response has the fields that we expect.
+function verifyServerStatusFields(serverStatusResponse) {
+ assert(serverStatusResponse.hasOwnProperty("opReadConcernCounters"),
+ "Expected the serverStatus response to have a 'opReadConcernCounters' field\n" +
+ tojson(serverStatusResponse));
+ assert(
+ serverStatusResponse.opReadConcernCounters.hasOwnProperty("available"),
+ "The 'opReadConcernCounters' field in serverStatus did not have the 'available' field\n" +
+ tojson(serverStatusResponse.opReadConcernCounters));
+ assert(
+ serverStatusResponse.opReadConcernCounters.hasOwnProperty("linearizable"),
+ "The 'opReadConcernCounters' field in serverStatus did not have the 'linearizable' field\n" +
+ tojson(serverStatusResponse.opReadConcernCounters));
+ assert(serverStatusResponse.opReadConcernCounters.hasOwnProperty("local"),
+ "The 'opReadConcernCounters' field in serverStatus did not have the 'local' field\n" +
+ tojson(serverStatusResponse.opReadConcernCounters));
+ assert(serverStatusResponse.opReadConcernCounters.hasOwnProperty("majority"),
+ "The 'opReadConcernCounters' field in serverStatus did not have the 'majority' field\n" +
+ tojson(serverStatusResponse.opReadConcernCounters));
+ assert(serverStatusResponse.opReadConcernCounters.hasOwnProperty("snapshot"),
+ "The 'opReadConcernCounters' field in serverStatus did not have the 'snapshot' field\n" +
+ tojson(serverStatusResponse.opReadConcernCounters));
+ assert(serverStatusResponse.opReadConcernCounters.hasOwnProperty("none"),
+ "The 'opReadConcernCounters' field in serverStatus did not have the 'none' field\n" +
+ tojson(serverStatusResponse.opReadConcernCounters));
+}
- // Verifies that the given value of the server status response is incremented in the way
- // we expect.
- function verifyServerStatusChange(initialStats, newStats, valueName, expectedIncrement) {
- assert.eq(initialStats[valueName] + expectedIncrement,
- newStats[valueName],
- "expected " + valueName + " to increase by " + expectedIncrement +
- ", initialStats: " + tojson(initialStats) + ", newStats: " +
- tojson(newStats));
- }
+// Verifies that the given value of the server status response is incremented in the way
+// we expect.
+function verifyServerStatusChange(initialStats, newStats, valueName, expectedIncrement) {
+ assert.eq(initialStats[valueName] + expectedIncrement,
+ newStats[valueName],
+ "expected " + valueName + " to increase by " + expectedIncrement +
+ ", initialStats: " + tojson(initialStats) + ", newStats: " + tojson(newStats));
+}
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const primary = rst.getPrimary();
- const dbName = "test";
- const collName = "server_read_concern_metrics";
- const testDB = primary.getDB(dbName);
- const testColl = testDB[collName];
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testColl.insert({_id: 0}));
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const primary = rst.getPrimary();
+const dbName = "test";
+const collName = "server_read_concern_metrics";
+const testDB = primary.getDB(dbName);
+const testColl = testDB[collName];
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testColl.insert({_id: 0}));
- // Run an initial transaction to get config.transactions state into memory.
- session.startTransaction();
- assert.eq(sessionColl.find().itcount(), 1);
- assert.commandWorked(session.abortTransaction_forTesting());
+// Run an initial transaction to get config.transactions state into memory.
+session.startTransaction();
+assert.eq(sessionColl.find().itcount(), 1);
+assert.commandWorked(session.abortTransaction_forTesting());
- // Get initial serverStatus.
- let serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
+// Get initial serverStatus.
+let serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(serverStatus);
- // Run a find with no readConcern.
- assert.eq(testColl.find().itcount(), 1);
- let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
- serverStatus = newStatus;
+// Run a find with no readConcern.
+assert.eq(testColl.find().itcount(), 1);
+let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
+serverStatus = newStatus;
- // Run a find with a readConcern with no level.
- assert.commandWorked(
- testDB.runCommand({find: collName, readConcern: {afterClusterTime: Timestamp(1, 1)}}));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
- serverStatus = newStatus;
+// Run a find with a readConcern with no level.
+assert.commandWorked(
+ testDB.runCommand({find: collName, readConcern: {afterClusterTime: Timestamp(1, 1)}}));
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
+serverStatus = newStatus;
- // Run a legacy query.
- primary.forceReadMode("legacy");
- assert.eq(testColl.find().itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
- primary.forceReadMode("commands");
- serverStatus = newStatus;
+// Run a legacy query.
+primary.forceReadMode("legacy");
+assert.eq(testColl.find().itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
+primary.forceReadMode("commands");
+serverStatus = newStatus;
- // Run a find with a readConcern level available.
- assert.eq(testColl.find().readConcern("available").itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- serverStatus = newStatus;
+// Run a find with a readConcern level available.
+assert.eq(testColl.find().readConcern("available").itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+serverStatus = newStatus;
- // Run a find with a readConcern level linearizable.
- assert.eq(testColl.find().readConcern("linearizable").itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- serverStatus = newStatus;
+// Run a find with a readConcern level linearizable.
+assert.eq(testColl.find().readConcern("linearizable").itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+serverStatus = newStatus;
- // Run a find with a readConcern level local.
- assert.eq(testColl.find().readConcern("local").itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- serverStatus = newStatus;
+// Run a find with a readConcern level local.
+assert.eq(testColl.find().readConcern("local").itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+serverStatus = newStatus;
- // Run a find with a readConcern level majority.
- assert.eq(testColl.find().readConcern("majority").itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- serverStatus = newStatus;
+// Run a find with a readConcern level majority.
+assert.eq(testColl.find().readConcern("majority").itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+serverStatus = newStatus;
- // Run a find in a transaction with readConcern level snapshot.
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.eq(sessionColl.find().itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- assert.commandWorked(session.abortTransaction_forTesting());
- serverStatus = newStatus;
+// Run a find in a transaction with readConcern level snapshot.
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.eq(sessionColl.find().itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+assert.commandWorked(session.abortTransaction_forTesting());
+serverStatus = newStatus;
- // Run a find in a transaction with no specified readConcern level.
- session.startTransaction();
- assert.eq(sessionColl.find().itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
- assert.commandWorked(session.abortTransaction_forTesting());
- serverStatus = newStatus;
+// Run a find in a transaction with no specified readConcern level.
+session.startTransaction();
+assert.eq(sessionColl.find().itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
+assert.commandWorked(session.abortTransaction_forTesting());
+serverStatus = newStatus;
- // Run a find in a transaction with readConcern level local.
- session.startTransaction({readConcern: {level: "local"}});
- assert.eq(sessionColl.find().itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- assert.commandWorked(session.abortTransaction_forTesting());
- serverStatus = newStatus;
+// Run a find in a transaction with readConcern level local.
+session.startTransaction({readConcern: {level: "local"}});
+assert.eq(sessionColl.find().itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+assert.commandWorked(session.abortTransaction_forTesting());
+serverStatus = newStatus;
- // Run a find in a transaction with readConcern level majority.
- session.startTransaction({readConcern: {level: "majority"}});
- assert.eq(sessionColl.find().itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- serverStatus = newStatus;
+// Run a find in a transaction with readConcern level majority.
+session.startTransaction({readConcern: {level: "majority"}});
+assert.eq(sessionColl.find().itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+serverStatus = newStatus;
- // Run a second find in the same transaction. It will inherit the readConcern from the
- // transaction.
- assert.eq(sessionColl.find().itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- assert.commandWorked(session.abortTransaction_forTesting());
- serverStatus = newStatus;
+// Run a second find in the same transaction. It will inherit the readConcern from the
+// transaction.
+assert.eq(sessionColl.find().itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+assert.commandWorked(session.abortTransaction_forTesting());
+serverStatus = newStatus;
- // Aggregation does not count toward readConcern metrics. Aggregation is counted as a 'command'
- // in the 'opCounters' serverStatus section, and we only track the readConcern of queries
- // tracked in 'opCounters.query'.
- assert.eq(testColl.aggregate([]).itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- serverStatus = newStatus;
+// Aggregation does not count toward readConcern metrics. Aggregation is counted as a 'command'
+// in the 'opCounters' serverStatus section, and we only track the readConcern of queries
+// tracked in 'opCounters.query'.
+assert.eq(testColl.aggregate([]).itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+serverStatus = newStatus;
- // The count command does not count toward readConcern metrics. The count command is counted as
- // a 'command' in the 'opCounters' serverStatus section, and we only track the readConcern of
- // queries tracked in 'opCounters.query'.
- assert.eq(testColl.count({_id: 0}), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- serverStatus = newStatus;
+// The count command does not count toward readConcern metrics. The count command is counted as
+// a 'command' in the 'opCounters' serverStatus section, and we only track the readConcern of
+// queries tracked in 'opCounters.query'.
+assert.eq(testColl.count({_id: 0}), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+serverStatus = newStatus;
- // getMore does not count toward readConcern metrics. getMore inherits the readConcern of the
- // originating command. It is not counted in 'opCounters.query'.
- let res = assert.commandWorked(testDB.runCommand({find: collName, batchSize: 0}));
- serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- assert.commandWorked(testDB.runCommand({getMore: res.cursor.id, collection: collName}));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+// getMore does not count toward readConcern metrics. getMore inherits the readConcern of the
+// originating command. It is not counted in 'opCounters.query'.
+let res = assert.commandWorked(testDB.runCommand({find: collName, batchSize: 0}));
+serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+assert.commandWorked(testDB.runCommand({getMore: res.cursor.id, collection: collName}));
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/server_transaction_metrics.js b/jstests/noPassthrough/server_transaction_metrics.js
index 4bd3c02c9d5..402e72da964 100644
--- a/jstests/noPassthrough/server_transaction_metrics.js
+++ b/jstests/noPassthrough/server_transaction_metrics.js
@@ -1,220 +1,202 @@
// Tests multi-document transactions metrics in the serverStatus output.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- // Verifies that the server status response has the fields that we expect.
- function verifyServerStatusFields(serverStatusResponse) {
- assert(serverStatusResponse.hasOwnProperty("transactions"),
- "Expected the serverStatus response to have a 'transactions' field\n" +
- tojson(serverStatusResponse));
- assert(serverStatusResponse.transactions.hasOwnProperty("currentActive"),
- "The 'transactions' field in serverStatus did not have the 'currentActive' field\n" +
- tojson(serverStatusResponse.transactions));
- assert(
- serverStatusResponse.transactions.hasOwnProperty("currentInactive"),
- "The 'transactions' field in serverStatus did not have the 'currentInactive' field\n" +
- tojson(serverStatusResponse.transactions));
- assert(serverStatusResponse.transactions.hasOwnProperty("currentOpen"),
- "The 'transactions' field in serverStatus did not have the 'currentOpen' field\n" +
- tojson(serverStatusResponse.transactions));
- assert(serverStatusResponse.transactions.hasOwnProperty("totalAborted"),
- "The 'transactions' field in serverStatus did not have the 'totalAborted' field\n" +
- tojson(serverStatusResponse.transactions));
- assert(
- serverStatusResponse.transactions.hasOwnProperty("totalCommitted"),
- "The 'transactions' field in serverStatus did not have the 'totalCommitted' field\n" +
- tojson(serverStatusResponse.transactions));
- assert(serverStatusResponse.transactions.hasOwnProperty("totalStarted"),
- "The 'transactions' field in serverStatus did not have the 'totalStarted' field\n" +
- tojson(serverStatusResponse.transactions));
- }
-
- // Verifies that the given value of the server status response is incremented in the way
- // we expect.
- function verifyServerStatusChange(initialStats, newStats, valueName, expectedIncrement) {
- assert.eq(initialStats[valueName] + expectedIncrement,
- newStats[valueName],
- "expected " + valueName + " to increase by " + expectedIncrement);
- }
-
- // Set up the replica set.
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const primary = rst.getPrimary();
-
- // Set up the test database.
- const dbName = "test";
- const collName = "server_transactions_metrics";
- const testDB = primary.getDB(dbName);
- const adminDB = rst.getPrimary().getDB('admin');
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- // Start the session.
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
+"use strict";
+
+// Verifies that the server status response has the fields that we expect.
+function verifyServerStatusFields(serverStatusResponse) {
+ assert(serverStatusResponse.hasOwnProperty("transactions"),
+ "Expected the serverStatus response to have a 'transactions' field\n" +
+ tojson(serverStatusResponse));
+ assert(serverStatusResponse.transactions.hasOwnProperty("currentActive"),
+ "The 'transactions' field in serverStatus did not have the 'currentActive' field\n" +
+ tojson(serverStatusResponse.transactions));
+ assert(serverStatusResponse.transactions.hasOwnProperty("currentInactive"),
+ "The 'transactions' field in serverStatus did not have the 'currentInactive' field\n" +
+ tojson(serverStatusResponse.transactions));
+ assert(serverStatusResponse.transactions.hasOwnProperty("currentOpen"),
+ "The 'transactions' field in serverStatus did not have the 'currentOpen' field\n" +
+ tojson(serverStatusResponse.transactions));
+ assert(serverStatusResponse.transactions.hasOwnProperty("totalAborted"),
+ "The 'transactions' field in serverStatus did not have the 'totalAborted' field\n" +
+ tojson(serverStatusResponse.transactions));
+ assert(serverStatusResponse.transactions.hasOwnProperty("totalCommitted"),
+ "The 'transactions' field in serverStatus did not have the 'totalCommitted' field\n" +
+ tojson(serverStatusResponse.transactions));
+ assert(serverStatusResponse.transactions.hasOwnProperty("totalStarted"),
+ "The 'transactions' field in serverStatus did not have the 'totalStarted' field\n" +
+ tojson(serverStatusResponse.transactions));
+}
+
+// Verifies that the given value of the server status response is incremented in the way
+// we expect.
+function verifyServerStatusChange(initialStats, newStats, valueName, expectedIncrement) {
+ assert.eq(initialStats[valueName] + expectedIncrement,
+ newStats[valueName],
+ "expected " + valueName + " to increase by " + expectedIncrement);
+}
+
+// Set up the replica set.
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const primary = rst.getPrimary();
+
+// Set up the test database.
+const dbName = "test";
+const collName = "server_transactions_metrics";
+const testDB = primary.getDB(dbName);
+const adminDB = rst.getPrimary().getDB('admin');
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+// Start the session.
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+
+// Get state of server status before the transaction.
+let initialStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(initialStatus);
+
+// This transaction will commit.
+jsTest.log("Start a transaction and then commit it.");
+
+// Compare server status after starting a transaction with the server status before.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
+
+let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+// Verify that the open transaction counter is incremented while inside the transaction.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
+// Verify that when not running an operation, the transaction is inactive.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 1);
+
+// Compare server status after the transaction commit with the server status before.
+assert.commandWorked(session.commitTransaction_forTesting());
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalStarted", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalCommitted", 1);
+// Verify that current open counter is decremented on commit.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
+// Verify that both active and inactive are 0 on commit.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
+
+// This transaction will abort.
+jsTest.log("Start a transaction and then abort it.");
+
+// Compare server status after starting a transaction with the server status before.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "insert-2"}));
+
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+// Verify that the open transaction counter is incremented while inside the transaction.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
+// Verify that when not running an operation, the transaction is inactive.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 1);
+
+// Compare server status after the transaction abort with the server status before.
+assert.commandWorked(session.abortTransaction_forTesting());
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalStarted", 2);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalCommitted", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalAborted", 1);
+// Verify that current open counter is decremented on abort.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
+// Verify that both active and inactive are 0 on abort.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
+
+// This transaction will abort due to a duplicate key insert.
+jsTest.log("Start a transaction that will abort on a duplicated key error.");
+
+// Compare server status after starting a transaction with the server status before.
+session.startTransaction();
+// Inserting a new document will work fine, and the transaction starts.
+assert.commandWorked(sessionColl.insert({_id: "insert-3"}));
+
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+// Verify that the open transaction counter is incremented while inside the transaction.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
+// Verify that when not running an operation, the transaction is inactive.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 1);
+
+// Compare server status after the transaction abort with the server status before.
+// The duplicated insert will fail, causing the transaction to abort.
+assert.commandFailedWithCode(sessionColl.insert({_id: "insert-3"}), ErrorCodes.DuplicateKey);
+// Ensure that the transaction was aborted on failure.
+assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalStarted", 3);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalCommitted", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalAborted", 2);
+// Verify that current open counter is decremented on abort caused by an error.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
+// Verify that both active and inactive are 0 on abort.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
+
+// Hang the transaction on a failpoint in the middle of an operation to check active and
+// inactive counters while operation is running inside a transaction.
+jsTest.log("Start a transaction that will hang in the middle of an operation due to a fail point.");
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'alwaysOn'}));
+
+const transactionFn = function() {
+ const collName = 'server_transactions_metrics';
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase('test');
const sessionColl = sessionDb[collName];
- // Get state of server status before the transaction.
- let initialStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(initialStatus);
-
- // This transaction will commit.
- jsTest.log("Start a transaction and then commit it.");
-
- // Compare server status after starting a transaction with the server status before.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
-
- let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- // Verify that the open transaction counter is incremented while inside the transaction.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
- // Verify that when not running an operation, the transaction is inactive.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 1);
-
- // Compare server status after the transaction commit with the server status before.
+ session.startTransaction({readConcern: {level: 'snapshot'}});
+ assert.commandWorked(sessionColl.update({}, {"update-1": 2}));
assert.commandWorked(session.commitTransaction_forTesting());
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalStarted", 1);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalCommitted", 1);
- // Verify that current open counter is decremented on commit.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
- // Verify that both active and inactive are 0 on commit.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
-
- // This transaction will abort.
- jsTest.log("Start a transaction and then abort it.");
-
- // Compare server status after starting a transaction with the server status before.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "insert-2"}));
-
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- // Verify that the open transaction counter is incremented while inside the transaction.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
- // Verify that when not running an operation, the transaction is inactive.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 1);
-
- // Compare server status after the transaction abort with the server status before.
- assert.commandWorked(session.abortTransaction_forTesting());
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalStarted", 2);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalCommitted", 1);
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalAborted", 1);
- // Verify that current open counter is decremented on abort.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
- // Verify that both active and inactive are 0 on abort.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
-
- // This transaction will abort due to a duplicate key insert.
- jsTest.log("Start a transaction that will abort on a duplicated key error.");
-
- // Compare server status after starting a transaction with the server status before.
- session.startTransaction();
- // Inserting a new document will work fine, and the transaction starts.
- assert.commandWorked(sessionColl.insert({_id: "insert-3"}));
-
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- // Verify that the open transaction counter is incremented while inside the transaction.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
- // Verify that when not running an operation, the transaction is inactive.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 1);
-
- // Compare server status after the transaction abort with the server status before.
- // The duplicated insert will fail, causing the transaction to abort.
- assert.commandFailedWithCode(sessionColl.insert({_id: "insert-3"}), ErrorCodes.DuplicateKey);
- // Ensure that the transaction was aborted on failure.
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalStarted", 3);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalCommitted", 1);
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalAborted", 2);
- // Verify that current open counter is decremented on abort caused by an error.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
- // Verify that both active and inactive are 0 on abort.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
-
- // Hang the transaction on a failpoint in the middle of an operation to check active and
- // inactive counters while operation is running inside a transaction.
- jsTest.log(
- "Start a transaction that will hang in the middle of an operation due to a fail point.");
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'alwaysOn'}));
-
- const transactionFn = function() {
- const collName = 'server_transactions_metrics';
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase('test');
- const sessionColl = sessionDb[collName];
-
- session.startTransaction({readConcern: {level: 'snapshot'}});
- assert.commandWorked(sessionColl.update({}, {"update-1": 2}));
- assert.commandWorked(session.commitTransaction_forTesting());
+};
+const transactionProcess = startParallelShell(transactionFn, primary.port);
+
+// Keep running currentOp() until we see the transaction subdocument.
+assert.soon(function() {
+ const transactionFilter = {
+ active: true,
+ 'lsid': {$exists: true},
+ 'transaction.parameters.txnNumber': {$eq: 0}
};
- const transactionProcess = startParallelShell(transactionFn, primary.port);
-
- // Keep running currentOp() until we see the transaction subdocument.
- assert.soon(function() {
- const transactionFilter =
- {active: true, 'lsid': {$exists: true}, 'transaction.parameters.txnNumber': {$eq: 0}};
- return 1 === adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).itcount();
- });
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- // Verify that the open transaction counter is incremented while inside the transaction.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
- // Verify that the metrics show that the transaction is active while inside the operation.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 1);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
-
- // Now the transaction can proceed.
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'off'}));
- transactionProcess();
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- // Verify that current open counter is decremented on commit.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
- // Verify that both active and inactive are 0 after the transaction finishes.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
-
- // End the session and stop the replica set.
- session.endSession();
- rst.stopSet();
+ return 1 === adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).itcount();
+});
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+// Verify that the open transaction counter is incremented while inside the transaction.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
+// Verify that the metrics show that the transaction is active while inside the operation.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
+
+// Now the transaction can proceed.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'off'}));
+transactionProcess();
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+// Verify that current open counter is decremented on commit.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
+// Verify that both active and inactive are 0 after the transaction finishes.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
+
+// End the session and stop the replica set.
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/server_transaction_metrics_for_prepared_transactions.js b/jstests/noPassthrough/server_transaction_metrics_for_prepared_transactions.js
index 172e9e3e5a9..a41e66dfc2d 100644
--- a/jstests/noPassthrough/server_transaction_metrics_for_prepared_transactions.js
+++ b/jstests/noPassthrough/server_transaction_metrics_for_prepared_transactions.js
@@ -3,175 +3,175 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/rslib.js");
-
- /**
- * Verifies that the serverStatus response has the fields that we expect.
- */
- function verifyServerStatusFields(serverStatusResponse) {
- assert(serverStatusResponse.hasOwnProperty("transactions"),
- "Expected the serverStatus response to have a 'transactions' field: " +
- tojson(serverStatusResponse));
- assert(serverStatusResponse.transactions.hasOwnProperty("totalPrepared"),
- "Expected the serverStatus response to have a 'totalPrepared' field: " +
- tojson(serverStatusResponse));
- assert(serverStatusResponse.transactions.hasOwnProperty("totalPreparedThenCommitted"),
- "Expected the serverStatus response to have a 'totalPreparedThenCommitted' field: " +
- tojson(serverStatusResponse));
- assert(serverStatusResponse.transactions.hasOwnProperty("totalPreparedThenAborted"),
- "Expected the serverStatus response to have a 'totalPreparedThenAborted' field: " +
- tojson(serverStatusResponse));
- assert(serverStatusResponse.transactions.hasOwnProperty("currentPrepared"),
- "Expected the serverStatus response to have a 'currentPrepared' field: " +
- tojson(serverStatusResponse));
- }
-
- /**
- * Verifies that the given value of the server status response is incremented in the way
- * we expect.
- */
- function verifyServerStatusChange(initialStats, newStats, valueName, expectedIncrement) {
- assert.eq(initialStats[valueName] + expectedIncrement,
- newStats[valueName],
- "expected " + valueName + " to increase by " + expectedIncrement);
- }
-
- /**
- * Verifies that the timestamp of the oldest active transaction in the transactions table
- * is greater than the lower bound and less than or equal to the upper bound.
- */
- function verifyOldestActiveTransactionTimestamp(testDB, lowerBound, upperBound) {
- let res = assert.commandWorked(
- testDB.getSiblingDB("config").getCollection("transactions").runCommand("find", {
- "filter": {"state": {"$in": ["prepared", "inProgress"]}},
- "sort": {"startOpTime": 1},
- "readConcern": {"level": "local"},
- "limit": 1
- }));
-
- let entry = res.cursor.firstBatch[0];
- assert.neq(undefined, entry);
-
- assert.lt(lowerBound,
- entry.startOpTime.ts,
- "oldest active transaction timestamp should be greater than the lower bound");
- assert.lte(
- entry.startOpTime.ts,
- upperBound,
- "oldest active transaction timestamp should be less than or equal to the upper bound");
- }
-
- // Set up the replica set.
- const rst = new ReplSetTest({nodes: 1});
-
- rst.startSet();
- rst.initiate();
- const primary = rst.getPrimary();
-
- // Set up the test database.
- const dbName = "test";
- const collName = "server_transactions_metrics_for_prepared_transactions";
- const testDB = primary.getDB(dbName);
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- // Start the session.
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
-
- // Get state of server status before the transaction.
- const initialStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(initialStatus);
-
- // Test server metrics for a prepared transaction that is committed.
- jsTest.log("Prepare a transaction and then commit it");
-
- const doc1 = {_id: 1, x: 1};
-
- // Start transaction and prepare transaction.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc1));
-
- const opTimeBeforePrepareForCommit = getLastOpTime(primary);
- const prepareTimestampForCommit = PrepareHelpers.prepareTransaction(session);
-
- // Verify the total and current prepared transaction counter is updated and the oldest active
- // oplog entry timestamp is shown.
- let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPrepared", 1);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentPrepared", 1);
-
- // Verify that the prepare entry has the oldest timestamp of any active transaction
- // in the transactions table.
- verifyOldestActiveTransactionTimestamp(
- testDB, opTimeBeforePrepareForCommit.ts, prepareTimestampForCommit);
-
- // Verify the total prepared and committed transaction counters are updated after a commit
- // and that the current prepared counter is decremented.
- PrepareHelpers.commitTransaction(session, prepareTimestampForCommit);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPreparedThenCommitted", 1);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentPrepared", 0);
-
- // Verify that other prepared transaction metrics have not changed.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPreparedThenAborted", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPrepared", 1);
-
- // Test server metrics for a prepared transaction that is aborted.
- jsTest.log("Prepare a transaction and then abort it");
-
- const doc2 = {_id: 2, x: 2};
-
- // Start transaction and prepare transaction.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc2));
-
- const opTimeBeforePrepareForAbort = getLastOpTime(primary);
- const prepareTimestampForAbort = PrepareHelpers.prepareTransaction(session);
-
- // Verify that the total and current prepared counter is updated and the oldest active oplog
- // entry timestamp is shown.
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPrepared", 2);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentPrepared", 1);
-
- // Verify that the prepare entry has the oldest timestamp of any active transaction
- // in the transactions table.
- verifyOldestActiveTransactionTimestamp(
- testDB, opTimeBeforePrepareForAbort.ts, prepareTimestampForAbort);
-
- // Verify the total prepared and aborted transaction counters are updated after an abort and the
- // current prepared counter is decremented.
- assert.commandWorked(session.abortTransaction_forTesting());
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPreparedThenAborted", 1);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentPrepared", 0);
-
- // Verify that other prepared transaction metrics have not changed.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPreparedThenCommitted", 1);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPrepared", 2);
-
- // End the session and stop the replica set.
- session.endSession();
- rst.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/rslib.js");
+
+/**
+ * Verifies that the serverStatus response has the fields that we expect.
+ */
+function verifyServerStatusFields(serverStatusResponse) {
+ assert(serverStatusResponse.hasOwnProperty("transactions"),
+ "Expected the serverStatus response to have a 'transactions' field: " +
+ tojson(serverStatusResponse));
+ assert(serverStatusResponse.transactions.hasOwnProperty("totalPrepared"),
+ "Expected the serverStatus response to have a 'totalPrepared' field: " +
+ tojson(serverStatusResponse));
+ assert(serverStatusResponse.transactions.hasOwnProperty("totalPreparedThenCommitted"),
+ "Expected the serverStatus response to have a 'totalPreparedThenCommitted' field: " +
+ tojson(serverStatusResponse));
+ assert(serverStatusResponse.transactions.hasOwnProperty("totalPreparedThenAborted"),
+ "Expected the serverStatus response to have a 'totalPreparedThenAborted' field: " +
+ tojson(serverStatusResponse));
+ assert(serverStatusResponse.transactions.hasOwnProperty("currentPrepared"),
+ "Expected the serverStatus response to have a 'currentPrepared' field: " +
+ tojson(serverStatusResponse));
+}
+
+/**
+ * Verifies that the given value of the server status response is incremented in the way
+ * we expect.
+ */
+function verifyServerStatusChange(initialStats, newStats, valueName, expectedIncrement) {
+ assert.eq(initialStats[valueName] + expectedIncrement,
+ newStats[valueName],
+ "expected " + valueName + " to increase by " + expectedIncrement);
+}
+
+/**
+ * Verifies that the timestamp of the oldest active transaction in the transactions table
+ * is greater than the lower bound and less than or equal to the upper bound.
+ */
+function verifyOldestActiveTransactionTimestamp(testDB, lowerBound, upperBound) {
+ let res = assert.commandWorked(
+ testDB.getSiblingDB("config").getCollection("transactions").runCommand("find", {
+ "filter": {"state": {"$in": ["prepared", "inProgress"]}},
+ "sort": {"startOpTime": 1},
+ "readConcern": {"level": "local"},
+ "limit": 1
+ }));
+
+ let entry = res.cursor.firstBatch[0];
+ assert.neq(undefined, entry);
+
+ assert.lt(lowerBound,
+ entry.startOpTime.ts,
+ "oldest active transaction timestamp should be greater than the lower bound");
+ assert.lte(
+ entry.startOpTime.ts,
+ upperBound,
+ "oldest active transaction timestamp should be less than or equal to the upper bound");
+}
+
+// Set up the replica set.
+const rst = new ReplSetTest({nodes: 1});
+
+rst.startSet();
+rst.initiate();
+const primary = rst.getPrimary();
+
+// Set up the test database.
+const dbName = "test";
+const collName = "server_transactions_metrics_for_prepared_transactions";
+const testDB = primary.getDB(dbName);
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+// Start the session.
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+
+// Get state of server status before the transaction.
+const initialStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(initialStatus);
+
+// Test server metrics for a prepared transaction that is committed.
+jsTest.log("Prepare a transaction and then commit it");
+
+const doc1 = {
+ _id: 1,
+ x: 1
+};
+
+// Start transaction and prepare transaction.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc1));
+
+const opTimeBeforePrepareForCommit = getLastOpTime(primary);
+const prepareTimestampForCommit = PrepareHelpers.prepareTransaction(session);
+
+// Verify the total and current prepared transaction counter is updated and the oldest active
+// oplog entry timestamp is shown.
+let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalPrepared", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentPrepared", 1);
+
+// Verify that the prepare entry has the oldest timestamp of any active transaction
+// in the transactions table.
+verifyOldestActiveTransactionTimestamp(
+ testDB, opTimeBeforePrepareForCommit.ts, prepareTimestampForCommit);
+
+// Verify the total prepared and committed transaction counters are updated after a commit
+// and that the current prepared counter is decremented.
+PrepareHelpers.commitTransaction(session, prepareTimestampForCommit);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ initialStatus.transactions, newStatus.transactions, "totalPreparedThenCommitted", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentPrepared", 0);
+
+// Verify that other prepared transaction metrics have not changed.
+verifyServerStatusChange(
+ initialStatus.transactions, newStatus.transactions, "totalPreparedThenAborted", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalPrepared", 1);
+
+// Test server metrics for a prepared transaction that is aborted.
+jsTest.log("Prepare a transaction and then abort it");
+
+const doc2 = {
+ _id: 2,
+ x: 2
+};
+
+// Start transaction and prepare transaction.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc2));
+
+const opTimeBeforePrepareForAbort = getLastOpTime(primary);
+const prepareTimestampForAbort = PrepareHelpers.prepareTransaction(session);
+
+// Verify that the total and current prepared counter is updated and the oldest active oplog
+// entry timestamp is shown.
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalPrepared", 2);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentPrepared", 1);
+
+// Verify that the prepare entry has the oldest timestamp of any active transaction
+// in the transactions table.
+verifyOldestActiveTransactionTimestamp(
+ testDB, opTimeBeforePrepareForAbort.ts, prepareTimestampForAbort);
+
+// Verify the total prepared and aborted transaction counters are updated after an abort and the
+// current prepared counter is decremented.
+assert.commandWorked(session.abortTransaction_forTesting());
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ initialStatus.transactions, newStatus.transactions, "totalPreparedThenAborted", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentPrepared", 0);
+
+// Verify that other prepared transaction metrics have not changed.
+verifyServerStatusChange(
+ initialStatus.transactions, newStatus.transactions, "totalPreparedThenCommitted", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalPrepared", 2);
+
+// End the session and stop the replica set.
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/server_transaction_metrics_kill_sessions.js b/jstests/noPassthrough/server_transaction_metrics_kill_sessions.js
index 3b3fd3ec94b..a4f7aba5a08 100644
--- a/jstests/noPassthrough/server_transaction_metrics_kill_sessions.js
+++ b/jstests/noPassthrough/server_transaction_metrics_kill_sessions.js
@@ -1,82 +1,83 @@
// Tests multi-document transactions metrics are still correct after 'killSessions'.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- // Verifies that the given value of the transaction metrics is incremented in the way we expect.
- function verifyMetricsChange(initialStats, newStats, valueName, expectedIncrement) {
- assert.eq(initialStats[valueName] + expectedIncrement,
- newStats[valueName],
- "expected " + valueName + " to increase by " + expectedIncrement +
- ".\nInitial stats: " + tojson(initialStats) + "; New stats: " +
- tojson(newStats));
- }
-
- // Set up the replica set and enable majority read concern for atClusterTime snapshot reads.
- const rst = new ReplSetTest({nodes: 1, nodeOptions: {enableMajorityReadConcern: "true"}});
- rst.startSet();
- rst.initiate();
-
- const dbName = "test";
- const collName = "server_transactions_metrics_kill_sessions";
- const testDB = rst.getPrimary().getDB(dbName);
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- const sessionOptions = {causalConsistency: false};
- let session = testDB.getMongo().startSession(sessionOptions);
- let sessionDb = session.getDatabase(dbName);
-
- let initialMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
-
- jsTest.log("Start a transaction.");
- session.startTransaction();
- assert.commandWorked(sessionDb.runCommand({find: collName}));
-
- let newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
- verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 1);
- verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 1);
-
- jsTest.log("Kill session " + tojson(session.getSessionId()) + ".");
- assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]}));
-
- newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
- verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "totalCommitted", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "totalAborted", 1);
- verifyMetricsChange(initialMetrics, newMetrics, "totalStarted", 1);
-
- session.endSession();
-
- session = testDB.getMongo().startSession(sessionOptions);
- sessionDb = session.getDatabase(dbName);
-
- jsTest.log("Start a snapshot transaction at a time that is too old.");
- session.startTransaction({readConcern: {level: "snapshot", atClusterTime: Timestamp(1, 1)}});
- // Operation runs unstashTransactionResources() and throws prior to onUnstash(). As a result,
- // the transaction will be implicitly aborted.
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.SnapshotTooOld);
-
- newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
- verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 0);
-
- // Kill the session that threw exception before.
- jsTest.log("Kill session " + tojson(session.getSessionId()) + ".");
- assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]}));
-
- newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
- verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "totalCommitted", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "totalAborted", 2);
- verifyMetricsChange(initialMetrics, newMetrics, "totalStarted", 2);
-
- session.endSession();
-
- rst.stopSet();
+"use strict";
+
+// Verifies that the given value of the transaction metrics is incremented in the way we expect.
+function verifyMetricsChange(initialStats, newStats, valueName, expectedIncrement) {
+ assert.eq(initialStats[valueName] + expectedIncrement,
+ newStats[valueName],
+ "expected " + valueName + " to increase by " + expectedIncrement +
+ ".\nInitial stats: " + tojson(initialStats) + "; New stats: " + tojson(newStats));
+}
+
+// Set up the replica set and enable majority read concern for atClusterTime snapshot reads.
+const rst = new ReplSetTest({nodes: 1, nodeOptions: {enableMajorityReadConcern: "true"}});
+rst.startSet();
+rst.initiate();
+
+const dbName = "test";
+const collName = "server_transactions_metrics_kill_sessions";
+const testDB = rst.getPrimary().getDB(dbName);
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+const sessionOptions = {
+ causalConsistency: false
+};
+let session = testDB.getMongo().startSession(sessionOptions);
+let sessionDb = session.getDatabase(dbName);
+
+let initialMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
+
+jsTest.log("Start a transaction.");
+session.startTransaction();
+assert.commandWorked(sessionDb.runCommand({find: collName}));
+
+let newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
+verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 1);
+verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 1);
+
+jsTest.log("Kill session " + tojson(session.getSessionId()) + ".");
+assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]}));
+
+newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
+verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "totalCommitted", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "totalAborted", 1);
+verifyMetricsChange(initialMetrics, newMetrics, "totalStarted", 1);
+
+session.endSession();
+
+session = testDB.getMongo().startSession(sessionOptions);
+sessionDb = session.getDatabase(dbName);
+
+jsTest.log("Start a snapshot transaction at a time that is too old.");
+session.startTransaction({readConcern: {level: "snapshot", atClusterTime: Timestamp(1, 1)}});
+// Operation runs unstashTransactionResources() and throws prior to onUnstash(). As a result,
+// the transaction will be implicitly aborted.
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.SnapshotTooOld);
+
+newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
+verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 0);
+
+// Kill the session that threw exception before.
+jsTest.log("Kill session " + tojson(session.getSessionId()) + ".");
+assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]}));
+
+newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
+verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "totalCommitted", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "totalAborted", 2);
+verifyMetricsChange(initialMetrics, newMetrics, "totalStarted", 2);
+
+session.endSession();
+
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/server_transaction_metrics_secondary.js b/jstests/noPassthrough/server_transaction_metrics_secondary.js
index 0f28b9e5667..9464dd77fc1 100644
--- a/jstests/noPassthrough/server_transaction_metrics_secondary.js
+++ b/jstests/noPassthrough/server_transaction_metrics_secondary.js
@@ -1,79 +1,80 @@
// Test that transactions run on secondaries do not change the serverStatus transaction metrics.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- jsTest.setOption("enableTestCommands", false);
- TestData.authenticationDatabase = "local";
+jsTest.setOption("enableTestCommands", false);
+TestData.authenticationDatabase = "local";
- const dbName = "test";
- const collName = "server_transaction_metrics_secondary";
+const dbName = "test";
+const collName = "server_transaction_metrics_secondary";
- // Start up the replica set. We want a stable topology, so make the secondary unelectable.
- const replTest = new ReplSetTest({name: collName, nodes: 2});
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[1].priority = 0;
- replTest.initiate(config);
+// Start up the replica set. We want a stable topology, so make the secondary unelectable.
+const replTest = new ReplSetTest({name: collName, nodes: 2});
+replTest.startSet();
+let config = replTest.getReplSetConfig();
+config.members[1].priority = 0;
+replTest.initiate(config);
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
- // Set slaveOk=true so that normal read commands would be allowed on the secondary.
- secondary.setSlaveOk(true);
+// Set slaveOk=true so that normal read commands would be allowed on the secondary.
+secondary.setSlaveOk(true);
- // Create a test collection that we can run commands against.
- assert.commandWorked(primary.getDB(dbName)[collName].insert({_id: 0}));
- replTest.awaitLastOpCommitted();
+// Create a test collection that we can run commands against.
+assert.commandWorked(primary.getDB(dbName)[collName].insert({_id: 0}));
+replTest.awaitLastOpCommitted();
- // Initiate a session on the secondary.
- const sessionOptions = {causalConsistency: false};
- const secondarySession = secondary.getDB(dbName).getMongo().startSession(sessionOptions);
- let secDb = secondarySession.getDatabase(dbName);
- let metrics;
+// Initiate a session on the secondary.
+const sessionOptions = {
+ causalConsistency: false
+};
+const secondarySession = secondary.getDB(dbName).getMongo().startSession(sessionOptions);
+let secDb = secondarySession.getDatabase(dbName);
+let metrics;
- jsTestLog("Trying to start transaction on secondary.");
- secondarySession.startTransaction();
+jsTestLog("Trying to start transaction on secondary.");
+secondarySession.startTransaction();
- // Initially there are no transactions in the system.
- metrics = assert.commandWorked(secondary.adminCommand({serverStatus: 1, repl: 0, metrics: 0}))
- .transactions;
- assert.eq(0, metrics.currentActive);
- assert.eq(0, metrics.currentInactive);
- assert.eq(0, metrics.currentOpen);
- assert.eq(0, metrics.totalAborted);
- assert.eq(0, metrics.totalCommitted);
- assert.eq(0, metrics.totalStarted);
+// Initially there are no transactions in the system.
+metrics = assert.commandWorked(secondary.adminCommand({serverStatus: 1, repl: 0, metrics: 0}))
+ .transactions;
+assert.eq(0, metrics.currentActive);
+assert.eq(0, metrics.currentInactive);
+assert.eq(0, metrics.currentOpen);
+assert.eq(0, metrics.totalAborted);
+assert.eq(0, metrics.totalCommitted);
+assert.eq(0, metrics.totalStarted);
- jsTestLog("Run transaction statement.");
- assert.eq(assert.throws(() => secDb[collName].findOne({_id: 0})).code, ErrorCodes.NotMaster);
+jsTestLog("Run transaction statement.");
+assert.eq(assert.throws(() => secDb[collName].findOne({_id: 0})).code, ErrorCodes.NotMaster);
- // The metrics are not affected.
- metrics = assert.commandWorked(secondary.adminCommand({serverStatus: 1, repl: 0, metrics: 0}))
- .transactions;
- assert.eq(0, metrics.currentActive);
- assert.eq(0, metrics.currentInactive);
- assert.eq(0, metrics.currentOpen);
- assert.eq(0, metrics.totalAborted);
- assert.eq(0, metrics.totalCommitted);
- assert.eq(0, metrics.totalStarted);
+// The metrics are not affected.
+metrics = assert.commandWorked(secondary.adminCommand({serverStatus: 1, repl: 0, metrics: 0}))
+ .transactions;
+assert.eq(0, metrics.currentActive);
+assert.eq(0, metrics.currentInactive);
+assert.eq(0, metrics.currentOpen);
+assert.eq(0, metrics.totalAborted);
+assert.eq(0, metrics.totalCommitted);
+assert.eq(0, metrics.totalStarted);
- jsTestLog("Abort the transaction.");
- assert.commandFailedWithCode(secondarySession.abortTransaction_forTesting(),
- ErrorCodes.NotMaster);
+jsTestLog("Abort the transaction.");
+assert.commandFailedWithCode(secondarySession.abortTransaction_forTesting(), ErrorCodes.NotMaster);
- // The metrics are not affected.
- metrics = assert.commandWorked(secondary.adminCommand({serverStatus: 1, repl: 0, metrics: 0}))
- .transactions;
- assert.eq(0, metrics.currentActive);
- assert.eq(0, metrics.currentInactive);
- assert.eq(0, metrics.currentOpen);
- assert.eq(0, metrics.totalAborted);
- assert.eq(0, metrics.totalCommitted);
- assert.eq(0, metrics.totalStarted);
+// The metrics are not affected.
+metrics = assert.commandWorked(secondary.adminCommand({serverStatus: 1, repl: 0, metrics: 0}))
+ .transactions;
+assert.eq(0, metrics.currentActive);
+assert.eq(0, metrics.currentInactive);
+assert.eq(0, metrics.currentOpen);
+assert.eq(0, metrics.totalAborted);
+assert.eq(0, metrics.totalCommitted);
+assert.eq(0, metrics.totalStarted);
- jsTestLog("Done trying transaction on secondary.");
- secondarySession.endSession();
+jsTestLog("Done trying transaction on secondary.");
+secondarySession.endSession();
- replTest.stopSet();
+replTest.stopSet();
}()); \ No newline at end of file
diff --git a/jstests/noPassthrough/server_write_concern_metrics.js b/jstests/noPassthrough/server_write_concern_metrics.js
index d9ea528f1c7..88ad7d5b13c 100644
--- a/jstests/noPassthrough/server_write_concern_metrics.js
+++ b/jstests/noPassthrough/server_write_concern_metrics.js
@@ -1,213 +1,212 @@
// Tests writeConcern metrics in the serverStatus output.
// @tags: [requires_persistence, requires_journaling, requires_replication]
(function() {
- "use strict";
-
- // Verifies that the server status response has the fields that we expect.
- function verifyServerStatusFields(serverStatusResponse) {
- assert(serverStatusResponse.hasOwnProperty("opWriteConcernCounters"),
- "Expected the serverStatus response to have a 'opWriteConcernCounters' field\n" +
- tojson(serverStatusResponse));
- assert(
- serverStatusResponse.opWriteConcernCounters.hasOwnProperty("insert"),
- "The 'opWriteConcernCounters' field in serverStatus did not have the 'insert' field\n" +
- tojson(serverStatusResponse.opWriteConcernCounters));
- assert(
- serverStatusResponse.opWriteConcernCounters.hasOwnProperty("update"),
- "The 'opWriteConcernCounters' field in serverStatus did not have the 'update' field\n" +
- tojson(serverStatusResponse.opWriteConcernCounters));
- assert(
- serverStatusResponse.opWriteConcernCounters.hasOwnProperty("delete"),
- "The 'opWriteConcernCounters' field in serverStatus did not have the 'delete' field\n" +
- tojson(serverStatusResponse.opWriteConcernCounters));
+"use strict";
+
+// Verifies that the server status response has the fields that we expect.
+function verifyServerStatusFields(serverStatusResponse) {
+ assert(serverStatusResponse.hasOwnProperty("opWriteConcernCounters"),
+ "Expected the serverStatus response to have a 'opWriteConcernCounters' field\n" +
+ tojson(serverStatusResponse));
+ assert(serverStatusResponse.opWriteConcernCounters.hasOwnProperty("insert"),
+ "The 'opWriteConcernCounters' field in serverStatus did not have the 'insert' field\n" +
+ tojson(serverStatusResponse.opWriteConcernCounters));
+ assert(serverStatusResponse.opWriteConcernCounters.hasOwnProperty("update"),
+ "The 'opWriteConcernCounters' field in serverStatus did not have the 'update' field\n" +
+ tojson(serverStatusResponse.opWriteConcernCounters));
+ assert(serverStatusResponse.opWriteConcernCounters.hasOwnProperty("delete"),
+ "The 'opWriteConcernCounters' field in serverStatus did not have the 'delete' field\n" +
+ tojson(serverStatusResponse.opWriteConcernCounters));
+}
+
+// Verifies that the given path of the server status response is incremented in the way we
+// expect, and no other changes occurred. This function modifies its inputs.
+function verifyServerStatusChange(initialStats, newStats, path, expectedIncrement) {
+ // Traverse to the parent of the changed element.
+ let pathComponents = path.split(".");
+ let initialParent = initialStats;
+ let newParent = newStats;
+ for (let i = 0; i < pathComponents.length - 1; i++) {
+ assert(initialParent.hasOwnProperty(pathComponents[i]),
+ "initialStats did not contain component " + i + " of path " + path +
+ ", initialStats: " + tojson(initialStats));
+ initialParent = initialParent[pathComponents[i]];
+
+ assert(newParent.hasOwnProperty(pathComponents[i]),
+ "newStats did not contain component " + i + " of path " + path +
+ ", newStats: " + tojson(newStats));
+ newParent = newParent[pathComponents[i]];
}
- // Verifies that the given path of the server status response is incremented in the way we
- // expect, and no other changes occurred. This function modifies its inputs.
- function verifyServerStatusChange(initialStats, newStats, path, expectedIncrement) {
- // Traverse to the parent of the changed element.
- let pathComponents = path.split(".");
- let initialParent = initialStats;
- let newParent = newStats;
- for (let i = 0; i < pathComponents.length - 1; i++) {
- assert(initialParent.hasOwnProperty(pathComponents[i]),
- "initialStats did not contain component " + i + " of path " + path +
- ", initialStats: " + tojson(initialStats));
- initialParent = initialParent[pathComponents[i]];
-
- assert(newParent.hasOwnProperty(pathComponents[i]),
- "newStats did not contain component " + i + " of path " + path + ", newStats: " +
- tojson(newStats));
- newParent = newParent[pathComponents[i]];
- }
-
- // Test the expected increment of the changed element. The element may not exist in the
- // initial stats, in which case it is treated as 0.
- let lastPathComponent = pathComponents[pathComponents.length - 1];
- let initialValue = 0;
- if (initialParent.hasOwnProperty(lastPathComponent)) {
- initialValue = initialParent[lastPathComponent];
- }
- assert(newParent.hasOwnProperty(lastPathComponent),
- "newStats did not contain last component of path " + path + ", newStats: " +
- tojson(newStats));
- assert.eq(initialValue + expectedIncrement,
- newParent[lastPathComponent],
- "expected " + path + " to increase by " + expectedIncrement + ", initialStats: " +
- tojson(initialStats) + ", newStats: " + tojson(newStats));
-
- // Delete the changed element.
- delete initialParent[lastPathComponent];
- delete newParent[lastPathComponent];
-
- // The stats objects should be equal without the changed element.
- assert.eq(0,
- bsonWoCompare(initialStats, newStats),
- "expected initialStats and newStats to be equal after removing " + path +
- ", initialStats: " + tojson(initialStats) + ", newStats: " +
- tojson(newStats));
+ // Test the expected increment of the changed element. The element may not exist in the
+ // initial stats, in which case it is treated as 0.
+ let lastPathComponent = pathComponents[pathComponents.length - 1];
+ let initialValue = 0;
+ if (initialParent.hasOwnProperty(lastPathComponent)) {
+ initialValue = initialParent[lastPathComponent];
}
-
- const rst = new ReplSetTest(
- {nodes: 2, nodeOptions: {setParameter: 'reportOpWriteConcernCountersInServerStatus=true'}});
- rst.startSet();
- let config = rst.getReplSetConfig();
- config.members[1].priority = 0;
- config.members[0].tags = {dc_va: "rack1"};
- config.settings = {getLastErrorModes: {myTag: {dc_va: 1}}};
- rst.initiate(config);
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- const dbName = "test";
- const collName = "server_write_concern_metrics";
- const testDB = primary.getDB(dbName);
- const testColl = testDB[collName];
-
- function resetCollection() {
- testColl.drop();
- assert.commandWorked(testDB.createCollection(collName));
- }
-
- function testWriteConcernMetrics(cmd, opName, inc) {
- // Run command with no writeConcern.
- resetCollection();
- let serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(testDB.runCommand(cmd));
- let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusChange(serverStatus.opWriteConcernCounters,
- newStatus.opWriteConcernCounters,
- opName + ".none",
- inc);
-
- // Run command with writeConcern {j: true}. This should be counted as having no 'w' value.
- resetCollection();
- serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(
- testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {j: true}})));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusChange(serverStatus.opWriteConcernCounters,
- newStatus.opWriteConcernCounters,
- opName + ".none",
- inc);
-
- // Run command with writeConcern {w: "majority"}.
- resetCollection();
- serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(testDB.runCommand(
- Object.assign(Object.assign({}, cmd), {writeConcern: {w: "majority"}})));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusChange(serverStatus.opWriteConcernCounters,
- newStatus.opWriteConcernCounters,
- opName + ".wmajority",
- inc);
-
- // Run command with writeConcern {w: 0}.
- resetCollection();
- serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(
- testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: 0}})));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusChange(serverStatus.opWriteConcernCounters,
- newStatus.opWriteConcernCounters,
- opName + ".wnum.0",
- inc);
-
- // Run command with writeConcern {w: 1}.
- resetCollection();
- serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(
- testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: 1}})));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusChange(serverStatus.opWriteConcernCounters,
- newStatus.opWriteConcernCounters,
- opName + ".wnum.1",
- inc);
-
- // Run command with writeConcern {w: 2}.
- resetCollection();
- serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(
- testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: 2}})));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusChange(serverStatus.opWriteConcernCounters,
- newStatus.opWriteConcernCounters,
- opName + ".wnum.2",
- inc);
-
- // Run command with writeConcern {w: "myTag"}.
- resetCollection();
- serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(
- testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: "myTag"}})));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusChange(serverStatus.opWriteConcernCounters,
- newStatus.opWriteConcernCounters,
- opName + ".wtag.myTag",
- inc);
-
- // writeConcern metrics are not tracked on the secondary.
- resetCollection();
- serverStatus = assert.commandWorked(secondary.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(testDB.runCommand(cmd));
- newStatus = assert.commandWorked(secondary.adminCommand({serverStatus: 1}));
- assert.eq(
- 0,
- bsonWoCompare(serverStatus.opWriteConcernCounters, newStatus.opWriteConcernCounters),
- "expected no change in secondary writeConcern metrics, before: " +
- tojson(serverStatus) + ", after: " + tojson(newStatus));
- }
-
- // Test single insert/update/delete.
- testWriteConcernMetrics({insert: collName, documents: [{}]}, "insert", 1);
- testWriteConcernMetrics({update: collName, updates: [{q: {}, u: {$set: {a: 1}}}]}, "update", 1);
- testWriteConcernMetrics({delete: collName, deletes: [{q: {}, limit: 1}]}, "delete", 1);
-
- // Test batch writes.
- testWriteConcernMetrics({insert: collName, documents: [{}, {}]}, "insert", 2);
- testWriteConcernMetrics(
- {update: collName, updates: [{q: {}, u: {$set: {a: 1}}}, {q: {}, u: {$set: {a: 1}}}]},
- "update",
- 2);
- testWriteConcernMetrics(
- {delete: collName, deletes: [{q: {}, limit: 1}, {q: {}, limit: 1}]}, "delete", 2);
-
- // Test applyOps.
- testWriteConcernMetrics(
- {applyOps: [{op: "i", ns: testColl.getFullName(), o: {_id: 0}}]}, "insert", 1);
- testWriteConcernMetrics(
- {applyOps: [{op: "u", ns: testColl.getFullName(), o2: {_id: 0}, o: {$set: {a: 1}}}]},
- "update",
- 1);
- testWriteConcernMetrics(
- {applyOps: [{op: "d", ns: testColl.getFullName(), o: {_id: 0}}]}, "delete", 1);
-
- rst.stopSet();
+ assert(newParent.hasOwnProperty(lastPathComponent),
+ "newStats did not contain last component of path " + path +
+ ", newStats: " + tojson(newStats));
+ assert.eq(initialValue + expectedIncrement,
+ newParent[lastPathComponent],
+ "expected " + path + " to increase by " + expectedIncrement +
+ ", initialStats: " + tojson(initialStats) + ", newStats: " + tojson(newStats));
+
+ // Delete the changed element.
+ delete initialParent[lastPathComponent];
+ delete newParent[lastPathComponent];
+
+ // The stats objects should be equal without the changed element.
+ assert.eq(0,
+ bsonWoCompare(initialStats, newStats),
+ "expected initialStats and newStats to be equal after removing " + path +
+ ", initialStats: " + tojson(initialStats) + ", newStats: " + tojson(newStats));
+}
+
+const rst = new ReplSetTest(
+ {nodes: 2, nodeOptions: {setParameter: 'reportOpWriteConcernCountersInServerStatus=true'}});
+rst.startSet();
+let config = rst.getReplSetConfig();
+config.members[1].priority = 0;
+config.members[0].tags = {
+ dc_va: "rack1"
+};
+config.settings = {
+ getLastErrorModes: {myTag: {dc_va: 1}}
+};
+rst.initiate(config);
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+const dbName = "test";
+const collName = "server_write_concern_metrics";
+const testDB = primary.getDB(dbName);
+const testColl = testDB[collName];
+
+function resetCollection() {
+ testColl.drop();
+ assert.commandWorked(testDB.createCollection(collName));
+}
+
+function testWriteConcernMetrics(cmd, opName, inc) {
+ // Run command with no writeConcern.
+ resetCollection();
+ let serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(testDB.runCommand(cmd));
+ let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".none",
+ inc);
+
+ // Run command with writeConcern {j: true}. This should be counted as having no 'w' value.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {j: true}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".none",
+ inc);
+
+ // Run command with writeConcern {w: "majority"}.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: "majority"}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".wmajority",
+ inc);
+
+ // Run command with writeConcern {w: 0}.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: 0}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".wnum.0",
+ inc);
+
+ // Run command with writeConcern {w: 1}.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: 1}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".wnum.1",
+ inc);
+
+ // Run command with writeConcern {w: 2}.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: 2}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".wnum.2",
+ inc);
+
+ // Run command with writeConcern {w: "myTag"}.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: "myTag"}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".wtag.myTag",
+ inc);
+
+ // writeConcern metrics are not tracked on the secondary.
+ resetCollection();
+ serverStatus = assert.commandWorked(secondary.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(testDB.runCommand(cmd));
+ newStatus = assert.commandWorked(secondary.adminCommand({serverStatus: 1}));
+ assert.eq(0,
+ bsonWoCompare(serverStatus.opWriteConcernCounters, newStatus.opWriteConcernCounters),
+ "expected no change in secondary writeConcern metrics, before: " +
+ tojson(serverStatus) + ", after: " + tojson(newStatus));
+}
+
+// Test single insert/update/delete.
+testWriteConcernMetrics({insert: collName, documents: [{}]}, "insert", 1);
+testWriteConcernMetrics({update: collName, updates: [{q: {}, u: {$set: {a: 1}}}]}, "update", 1);
+testWriteConcernMetrics({delete: collName, deletes: [{q: {}, limit: 1}]}, "delete", 1);
+
+// Test batch writes.
+testWriteConcernMetrics({insert: collName, documents: [{}, {}]}, "insert", 2);
+testWriteConcernMetrics(
+ {update: collName, updates: [{q: {}, u: {$set: {a: 1}}}, {q: {}, u: {$set: {a: 1}}}]},
+ "update",
+ 2);
+testWriteConcernMetrics(
+ {delete: collName, deletes: [{q: {}, limit: 1}, {q: {}, limit: 1}]}, "delete", 2);
+
+// Test applyOps.
+testWriteConcernMetrics(
+ {applyOps: [{op: "i", ns: testColl.getFullName(), o: {_id: 0}}]}, "insert", 1);
+testWriteConcernMetrics(
+ {applyOps: [{op: "u", ns: testColl.getFullName(), o2: {_id: 0}, o: {$set: {a: 1}}}]},
+ "update",
+ 1);
+testWriteConcernMetrics(
+ {applyOps: [{op: "d", ns: testColl.getFullName(), o: {_id: 0}}]}, "delete", 1);
+
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/session_w0.js b/jstests/noPassthrough/session_w0.js
index dd219581f43..5f6f29c0ec8 100644
--- a/jstests/noPassthrough/session_w0.js
+++ b/jstests/noPassthrough/session_w0.js
@@ -2,19 +2,18 @@
* Explicit shell session should prohibit w: 0 writes.
*/
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod();
- const session = conn.startSession();
- const sessionColl = session.getDatabase("test").getCollection("foo");
- const err = assert.throws(() => {
- sessionColl.insert({x: 1}, {writeConcern: {w: 0}});
- });
+const conn = MongoRunner.runMongod();
+const session = conn.startSession();
+const sessionColl = session.getDatabase("test").getCollection("foo");
+const err = assert.throws(() => {
+ sessionColl.insert({x: 1}, {writeConcern: {w: 0}});
+});
- assert.includes(err.toString(),
- "Unacknowledged writes are prohibited with sessions",
- "wrong error message");
+assert.includes(
+ err.toString(), "Unacknowledged writes are prohibited with sessions", "wrong error message");
- session.endSession();
- MongoRunner.stopMongod(conn);
+session.endSession();
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/sessions_collection_auto_healing.js b/jstests/noPassthrough/sessions_collection_auto_healing.js
index f12ed2fb91b..8f1851aa408 100644
--- a/jstests/noPassthrough/sessions_collection_auto_healing.js
+++ b/jstests/noPassthrough/sessions_collection_auto_healing.js
@@ -1,60 +1,59 @@
load('jstests/libs/sessions_collection.js');
(function() {
- "use strict";
-
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
-
- let timeoutMinutes = 5;
-
- var startSession = {startSession: 1};
- var conn = MongoRunner.runMongod(
- {setParameter: "localLogicalSessionTimeoutMinutes=" + timeoutMinutes});
-
- var admin = conn.getDB("admin");
- var config = conn.getDB("config");
-
- // Test that we can use sessions before the sessions collection exists.
- {
- validateSessionsCollection(conn, false, false, timeoutMinutes);
- assert.commandWorked(admin.runCommand({startSession: 1}));
- validateSessionsCollection(conn, false, false, timeoutMinutes);
- }
-
- // Test that a refresh will create the sessions collection.
- {
- assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(conn, true, true, timeoutMinutes);
- }
-
- // Test that a refresh will (re)create the TTL index on the sessions collection.
- {
- assert.commandWorked(config.system.sessions.dropIndex({lastUse: 1}));
- validateSessionsCollection(conn, true, false, timeoutMinutes);
- assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(conn, true, true, timeoutMinutes);
- }
-
- MongoRunner.stopMongod(conn);
-
- timeoutMinutes = 4;
- conn = MongoRunner.runMongod({
- restart: conn,
- cleanData: false,
- setParameter: "localLogicalSessionTimeoutMinutes=" + timeoutMinutes
- });
- admin = conn.getDB("admin");
- config = conn.getDB("config");
-
- // Test that a change to the TTL index expiration on restart will generate a collMod to change
- // the expiration time.
- {
- assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(conn, true, true, timeoutMinutes);
- }
-
- MongoRunner.stopMongod(conn);
-
+"use strict";
+
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
+
+let timeoutMinutes = 5;
+
+var startSession = {startSession: 1};
+var conn =
+ MongoRunner.runMongod({setParameter: "localLogicalSessionTimeoutMinutes=" + timeoutMinutes});
+
+var admin = conn.getDB("admin");
+var config = conn.getDB("config");
+
+// Test that we can use sessions before the sessions collection exists.
+{
+ validateSessionsCollection(conn, false, false, timeoutMinutes);
+ assert.commandWorked(admin.runCommand({startSession: 1}));
+ validateSessionsCollection(conn, false, false, timeoutMinutes);
+}
+
+// Test that a refresh will create the sessions collection.
+{
+ assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(conn, true, true, timeoutMinutes);
+}
+
+// Test that a refresh will (re)create the TTL index on the sessions collection.
+{
+ assert.commandWorked(config.system.sessions.dropIndex({lastUse: 1}));
+ validateSessionsCollection(conn, true, false, timeoutMinutes);
+ assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(conn, true, true, timeoutMinutes);
+}
+
+MongoRunner.stopMongod(conn);
+
+timeoutMinutes = 4;
+conn = MongoRunner.runMongod({
+ restart: conn,
+ cleanData: false,
+ setParameter: "localLogicalSessionTimeoutMinutes=" + timeoutMinutes
+});
+admin = conn.getDB("admin");
+config = conn.getDB("config");
+
+// Test that a change to the TTL index expiration on restart will generate a collMod to change
+// the expiration time.
+{
+ assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(conn, true, true, timeoutMinutes);
+}
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/set_step_params.js b/jstests/noPassthrough/set_step_params.js
index 08a4e36422b..d3fbe5deb02 100644
--- a/jstests/noPassthrough/set_step_params.js
+++ b/jstests/noPassthrough/set_step_params.js
@@ -5,270 +5,270 @@ load("jstests/libs/parallelTester.js");
*/
(function() {
- "use strict";
-
- const kDbName = 'test';
-
- const minConns = 4;
- var stepParams = {
- ShardingTaskExecutorPoolMinSize: minConns,
- ShardingTaskExecutorPoolMaxSize: 10,
- ShardingTaskExecutorPoolMaxConnecting: 5,
- ShardingTaskExecutorPoolHostTimeoutMS: 300000,
- ShardingTaskExecutorPoolRefreshRequirementMS: 60000,
- ShardingTaskExecutorPoolRefreshTimeoutMS: 20000,
- ShardingTaskExecutorPoolReplicaSetMatching: "disabled",
- };
-
- const st = new ShardingTest({
- config: {nodes: 1},
- shards: 1,
- rs0: {nodes: 1},
- mongos: [{setParameter: stepParams}],
+"use strict";
+
+const kDbName = 'test';
+
+const minConns = 4;
+var stepParams = {
+ ShardingTaskExecutorPoolMinSize: minConns,
+ ShardingTaskExecutorPoolMaxSize: 10,
+ ShardingTaskExecutorPoolMaxConnecting: 5,
+ ShardingTaskExecutorPoolHostTimeoutMS: 300000,
+ ShardingTaskExecutorPoolRefreshRequirementMS: 60000,
+ ShardingTaskExecutorPoolRefreshTimeoutMS: 20000,
+ ShardingTaskExecutorPoolReplicaSetMatching: "disabled",
+};
+
+const st = new ShardingTest({
+ config: {nodes: 1},
+ shards: 1,
+ rs0: {nodes: 1},
+ mongos: [{setParameter: stepParams}],
+});
+const mongos = st.s0;
+const rst = st.rs0;
+const primary = rst.getPrimary();
+
+const cfg = primary.getDB('local').system.replset.findOne();
+const allHosts = cfg.members.map(x => x.host);
+const mongosDB = mongos.getDB(kDbName);
+const primaryOnly = [primary.name];
+
+function configureReplSetFailpoint(name, modeValue) {
+ st.rs0.nodes.forEach(function(node) {
+ assert.commandWorked(node.getDB("admin").runCommand({
+ configureFailPoint: name,
+ mode: modeValue,
+ data: {shouldCheckForInterrupt: true},
+ }));
});
- const mongos = st.s0;
- const rst = st.rs0;
- const primary = rst.getPrimary();
-
- const cfg = primary.getDB('local').system.replset.findOne();
- const allHosts = cfg.members.map(x => x.host);
- const mongosDB = mongos.getDB(kDbName);
- const primaryOnly = [primary.name];
-
- function configureReplSetFailpoint(name, modeValue) {
- st.rs0.nodes.forEach(function(node) {
- assert.commandWorked(node.getDB("admin").runCommand({
- configureFailPoint: name,
- mode: modeValue,
- data: {shouldCheckForInterrupt: true},
- }));
- });
+}
+
+var threads = [];
+function launchFinds({times, readPref, shouldFail}) {
+ jsTestLog("Starting " + times + " connections");
+ for (var i = 0; i < times; i++) {
+ var thread = new Thread(function(connStr, readPref, dbName, shouldFail) {
+ var client = new Mongo(connStr);
+ const ret = client.getDB(dbName).runCommand(
+ {find: "test", limit: 1, "$readPreference": {mode: readPref}});
+
+ if (shouldFail) {
+ assert.commandFailed(ret);
+ } else {
+ assert.commandWorked(ret);
+ }
+ }, st.s.host, readPref, kDbName, shouldFail);
+ thread.start();
+ threads.push(thread);
}
-
- var threads = [];
- function launchFinds({times, readPref, shouldFail}) {
- jsTestLog("Starting " + times + " connections");
- for (var i = 0; i < times; i++) {
- var thread = new Thread(function(connStr, readPref, dbName, shouldFail) {
- var client = new Mongo(connStr);
- const ret = client.getDB(dbName).runCommand(
- {find: "test", limit: 1, "$readPreference": {mode: readPref}});
-
- if (shouldFail) {
- assert.commandFailed(ret);
- } else {
- assert.commandWorked(ret);
- }
- }, st.s.host, readPref, kDbName, shouldFail);
- thread.start();
- threads.push(thread);
+}
+
+var currentCheckNum = 0;
+function hasConnPoolStats(args) {
+ const checkNum = currentCheckNum++;
+ jsTestLog("Check #" + checkNum + ": " + tojson(args));
+ var {ready, pending, active, hosts, isAbsent} = args;
+
+ ready = ready ? ready : 0;
+ pending = pending ? pending : 0;
+ active = active ? active : 0;
+ hosts = hosts ? hosts : allHosts;
+
+ function checkStats(res, host) {
+ var stats = res.hosts[host];
+ if (!stats) {
+ jsTestLog("Connection stats for " + host + " are absent");
+ return isAbsent;
}
+
+ jsTestLog("Connection stats for " + host + ": " + tojson(stats));
+ return stats.available == ready && stats.refreshing == pending && stats.inUse == active;
}
- var currentCheckNum = 0;
- function hasConnPoolStats(args) {
- const checkNum = currentCheckNum++;
- jsTestLog("Check #" + checkNum + ": " + tojson(args));
- var {ready, pending, active, hosts, isAbsent} = args;
-
- ready = ready ? ready : 0;
- pending = pending ? pending : 0;
- active = active ? active : 0;
- hosts = hosts ? hosts : allHosts;
-
- function checkStats(res, host) {
- var stats = res.hosts[host];
- if (!stats) {
- jsTestLog("Connection stats for " + host + " are absent");
- return isAbsent;
- }
+ function checkAllStats() {
+ var res = mongos.adminCommand({connPoolStats: 1});
+ return hosts.map(host => checkStats(res, host)).every(x => x);
+ }
- jsTestLog("Connection stats for " + host + ": " + tojson(stats));
- return stats.available == ready && stats.refreshing == pending && stats.inUse == active;
- }
+ assert.soon(checkAllStats, "Check #" + checkNum + " failed", 10000);
- function checkAllStats() {
- var res = mongos.adminCommand({connPoolStats: 1});
- return hosts.map(host => checkStats(res, host)).every(x => x);
- }
+ jsTestLog("Check #" + checkNum + " successful");
+}
- assert.soon(checkAllStats, "Check #" + checkNum + " failed", 10000);
+function updateSetParameters(params) {
+ var cmd = Object.assign({"setParameter": 1}, params);
+ assert.commandWorked(mongos.adminCommand(cmd));
+}
- jsTestLog("Check #" + checkNum + " successful");
- }
+function dropConnections() {
+ assert.commandWorked(mongos.adminCommand({dropConnections: 1, hostAndPort: allHosts}));
+}
- function updateSetParameters(params) {
- var cmd = Object.assign({"setParameter": 1}, params);
- assert.commandWorked(mongos.adminCommand(cmd));
- }
+function resetPools() {
+ dropConnections();
+ mongos.adminCommand({multicast: {ping: 0}});
+ hasConnPoolStats({ready: 4});
+}
- function dropConnections() {
- assert.commandWorked(mongos.adminCommand({dropConnections: 1, hostAndPort: allHosts}));
- }
+function runSubTest(name, fun) {
+ jsTestLog("Running test for " + name);
- function resetPools() {
- dropConnections();
- mongos.adminCommand({multicast: {ping: 0}});
- hasConnPoolStats({ready: 4});
- }
+ resetPools();
- function runSubTest(name, fun) {
- jsTestLog("Running test for " + name);
+ fun();
- resetPools();
+ updateSetParameters(stepParams);
+}
- fun();
+assert.writeOK(mongosDB.test.insert({x: 1}));
+assert.writeOK(mongosDB.test.insert({x: 2}));
+assert.writeOK(mongosDB.test.insert({x: 3}));
+st.rs0.awaitReplication();
- updateSetParameters(stepParams);
- }
+runSubTest("MinSize", function() {
+ dropConnections();
- assert.writeOK(mongosDB.test.insert({x: 1}));
- assert.writeOK(mongosDB.test.insert({x: 2}));
- assert.writeOK(mongosDB.test.insert({x: 3}));
- st.rs0.awaitReplication();
+ // Launch an initial find to trigger to min
+ launchFinds({times: 1, readPref: "primary"});
+ hasConnPoolStats({ready: minConns});
- runSubTest("MinSize", function() {
- dropConnections();
+ // Increase by one
+ updateSetParameters({ShardingTaskExecutorPoolMinSize: 5});
+ hasConnPoolStats({ready: 5});
- // Launch an initial find to trigger to min
- launchFinds({times: 1, readPref: "primary"});
- hasConnPoolStats({ready: minConns});
+ // Increase to MaxSize
+ updateSetParameters({ShardingTaskExecutorPoolMinSize: 10});
+ hasConnPoolStats({ready: 10});
- // Increase by one
- updateSetParameters({ShardingTaskExecutorPoolMinSize: 5});
- hasConnPoolStats({ready: 5});
+ // Decrease to zero
+ updateSetParameters({ShardingTaskExecutorPoolMinSize: 0});
+});
- // Increase to MaxSize
- updateSetParameters({ShardingTaskExecutorPoolMinSize: 10});
- hasConnPoolStats({ready: 10});
+runSubTest("MaxSize", function() {
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
+ dropConnections();
- // Decrease to zero
- updateSetParameters({ShardingTaskExecutorPoolMinSize: 0});
- });
+ // Launch 10 blocked finds
+ launchFinds({times: 10, readPref: "primary"});
+ hasConnPoolStats({active: 10, hosts: primaryOnly});
- runSubTest("MaxSize", function() {
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
- dropConnections();
+ // Increase by 5 and Launch another 4 blocked finds
+ updateSetParameters({ShardingTaskExecutorPoolMaxSize: 15});
+ launchFinds({times: 4, readPref: "primary"});
+ hasConnPoolStats({active: 14, hosts: primaryOnly});
- // Launch 10 blocked finds
- launchFinds({times: 10, readPref: "primary"});
- hasConnPoolStats({active: 10, hosts: primaryOnly});
+ // Launch yet another 2, these should add only 1 connection
+ launchFinds({times: 2, readPref: "primary"});
+ hasConnPoolStats({active: 15, hosts: primaryOnly});
- // Increase by 5 and Launch another 4 blocked finds
- updateSetParameters({ShardingTaskExecutorPoolMaxSize: 15});
- launchFinds({times: 4, readPref: "primary"});
- hasConnPoolStats({active: 14, hosts: primaryOnly});
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
+ hasConnPoolStats({ready: 15, pending: 0, hosts: primaryOnly});
+});
- // Launch yet another 2, these should add only 1 connection
- launchFinds({times: 2, readPref: "primary"});
- hasConnPoolStats({active: 15, hosts: primaryOnly});
+// Test maxConnecting
+runSubTest("MaxConnecting", function() {
+ const maxPending1 = 2;
+ const maxPending2 = 4;
+ const conns = 6;
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
- hasConnPoolStats({ready: 15, pending: 0, hosts: primaryOnly});
+ updateSetParameters({
+ ShardingTaskExecutorPoolMaxSize: 100,
+ ShardingTaskExecutorPoolMaxConnecting: maxPending1,
});
- // Test maxConnecting
- runSubTest("MaxConnecting", function() {
- const maxPending1 = 2;
- const maxPending2 = 4;
- const conns = 6;
-
- updateSetParameters({
- ShardingTaskExecutorPoolMaxSize: 100,
- ShardingTaskExecutorPoolMaxConnecting: maxPending1,
- });
-
- configureReplSetFailpoint("waitInIsMaster", "alwaysOn");
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
- dropConnections();
-
- // Go to the limit of maxConnecting, so we're stuck here
- launchFinds({times: maxPending1, readPref: "primary"});
- hasConnPoolStats({pending: maxPending1});
-
- // More won't run right now
- launchFinds({times: conns - maxPending1, readPref: "primary"});
- hasConnPoolStats({pending: maxPending1});
-
- // If we increase our limit, it should fill in some of the connections
- updateSetParameters({ShardingTaskExecutorPoolMaxConnecting: maxPending2});
- hasConnPoolStats({pending: maxPending2});
-
- // Dropping the limit doesn't cause us to drop pending
- updateSetParameters({ShardingTaskExecutorPoolMaxConnecting: maxPending1});
- hasConnPoolStats({pending: maxPending2});
-
- // Release our pending and walk away
- configureReplSetFailpoint("waitInIsMaster", "off");
- hasConnPoolStats({active: conns});
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
+ configureReplSetFailpoint("waitInIsMaster", "alwaysOn");
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
+ dropConnections();
+
+ // Go to the limit of maxConnecting, so we're stuck here
+ launchFinds({times: maxPending1, readPref: "primary"});
+ hasConnPoolStats({pending: maxPending1});
+
+ // More won't run right now
+ launchFinds({times: conns - maxPending1, readPref: "primary"});
+ hasConnPoolStats({pending: maxPending1});
+
+ // If we increase our limit, it should fill in some of the connections
+ updateSetParameters({ShardingTaskExecutorPoolMaxConnecting: maxPending2});
+ hasConnPoolStats({pending: maxPending2});
+
+ // Dropping the limit doesn't cause us to drop pending
+ updateSetParameters({ShardingTaskExecutorPoolMaxConnecting: maxPending1});
+ hasConnPoolStats({pending: maxPending2});
+
+ // Release our pending and walk away
+ configureReplSetFailpoint("waitInIsMaster", "off");
+ hasConnPoolStats({active: conns});
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
+});
+
+runSubTest("Timeouts", function() {
+ const conns = minConns;
+ const pendingTimeoutMS = 5000;
+ const toRefreshTimeoutMS = 1000;
+ const idleTimeoutMS1 = 20000;
+ const idleTimeoutMS2 = 15500;
+
+ // Updating separately since the validation depends on existing params
+ updateSetParameters({
+ ShardingTaskExecutorPoolRefreshTimeoutMS: pendingTimeoutMS,
+ });
+ updateSetParameters({
+ ShardingTaskExecutorPoolRefreshRequirementMS: toRefreshTimeoutMS,
});
+ updateSetParameters({
+ ShardingTaskExecutorPoolHostTimeoutMS: idleTimeoutMS1,
+ });
+
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
+ dropConnections();
+
+ // Make ready connections
+ launchFinds({times: conns, readPref: "primary"});
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
+ hasConnPoolStats({ready: conns});
+
+ // Block refreshes and wait for the toRefresh timeout
+ configureReplSetFailpoint("waitInIsMaster", "alwaysOn");
+ sleep(toRefreshTimeoutMS);
+
+ // Confirm that we're in pending for all of our conns
+ hasConnPoolStats({pending: conns});
- runSubTest("Timeouts", function() {
- const conns = minConns;
- const pendingTimeoutMS = 5000;
- const toRefreshTimeoutMS = 1000;
- const idleTimeoutMS1 = 20000;
- const idleTimeoutMS2 = 15500;
-
- // Updating separately since the validation depends on existing params
- updateSetParameters({
- ShardingTaskExecutorPoolRefreshTimeoutMS: pendingTimeoutMS,
- });
- updateSetParameters({
- ShardingTaskExecutorPoolRefreshRequirementMS: toRefreshTimeoutMS,
- });
- updateSetParameters({
- ShardingTaskExecutorPoolHostTimeoutMS: idleTimeoutMS1,
- });
-
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
- dropConnections();
-
- // Make ready connections
- launchFinds({times: conns, readPref: "primary"});
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
- hasConnPoolStats({ready: conns});
-
- // Block refreshes and wait for the toRefresh timeout
- configureReplSetFailpoint("waitInIsMaster", "alwaysOn");
- sleep(toRefreshTimeoutMS);
-
- // Confirm that we're in pending for all of our conns
- hasConnPoolStats({pending: conns});
-
- // Set our min conns to 0 to make sure we don't refresh after pending timeout
- updateSetParameters({
- ShardingTaskExecutorPoolMinSize: 0,
- });
-
- // Wait for our pending timeout
- sleep(pendingTimeoutMS);
- hasConnPoolStats({});
-
- configureReplSetFailpoint("waitInIsMaster", "off");
-
- // Reset the min conns to make sure normal refresh doesn't extend the timeout
- updateSetParameters({
- ShardingTaskExecutorPoolMinSize: minConns,
- });
-
- // Wait for our host timeout and confirm the pool drops
- sleep(idleTimeoutMS1);
- hasConnPoolStats({isAbsent: true});
-
- // Reset the pool
- resetPools();
-
- // Sleep for a shorter timeout and then update so we're already expired
- sleep(idleTimeoutMS2);
- updateSetParameters({ShardingTaskExecutorPoolHostTimeoutMS: idleTimeoutMS2});
- hasConnPoolStats({isAbsent: true});
+ // Set our min conns to 0 to make sure we don't refresh after pending timeout
+ updateSetParameters({
+ ShardingTaskExecutorPoolMinSize: 0,
});
- threads.forEach(function(thread) {
- thread.join();
+ // Wait for our pending timeout
+ sleep(pendingTimeoutMS);
+ hasConnPoolStats({});
+
+ configureReplSetFailpoint("waitInIsMaster", "off");
+
+ // Reset the min conns to make sure normal refresh doesn't extend the timeout
+ updateSetParameters({
+ ShardingTaskExecutorPoolMinSize: minConns,
});
- st.stop();
+ // Wait for our host timeout and confirm the pool drops
+ sleep(idleTimeoutMS1);
+ hasConnPoolStats({isAbsent: true});
+
+ // Reset the pool
+ resetPools();
+
+ // Sleep for a shorter timeout and then update so we're already expired
+ sleep(idleTimeoutMS2);
+ updateSetParameters({ShardingTaskExecutorPoolHostTimeoutMS: idleTimeoutMS2});
+ hasConnPoolStats({isAbsent: true});
+});
+
+threads.forEach(function(thread) {
+ thread.join();
+});
+
+st.stop();
})();
diff --git a/jstests/noPassthrough/setshellparameter.js b/jstests/noPassthrough/setshellparameter.js
index 9fd17abb605..deed3dc4076 100644
--- a/jstests/noPassthrough/setshellparameter.js
+++ b/jstests/noPassthrough/setshellparameter.js
@@ -1,22 +1,20 @@
// Test --setShellParameter CLI switch.
(function() {
- 'use strict';
+'use strict';
- function test(ssp, succeed) {
- const result =
- runMongoProgram('./mongo', '--setShellParameter', ssp, '--nodb', '--eval', ';');
- assert.eq(0 == result,
- succeed,
- '--setShellParameter ' + ssp + 'worked/didn\'t-work unexpectedly');
- }
+function test(ssp, succeed) {
+ const result = runMongoProgram('./mongo', '--setShellParameter', ssp, '--nodb', '--eval', ';');
+ assert.eq(
+ 0 == result, succeed, '--setShellParameter ' + ssp + 'worked/didn\'t-work unexpectedly');
+}
- // Whitelisted
- test('disabledSecureAllocatorDomains=foo', true);
+// Whitelisted
+test('disabledSecureAllocatorDomains=foo', true);
- // Not whitelisted
- test('enableTestCommands=1', false);
+// Not whitelisted
+test('enableTestCommands=1', false);
- // Unknown
- test('theAnswerToTheQuestionOfLifeTheUniverseAndEverything=42', false);
+// Unknown
+test('theAnswerToTheQuestionOfLifeTheUniverseAndEverything=42', false);
})();
diff --git a/jstests/noPassthrough/shard_fixture_selftest.js b/jstests/noPassthrough/shard_fixture_selftest.js
index b4b56ba74d6..dde664865c1 100644
--- a/jstests/noPassthrough/shard_fixture_selftest.js
+++ b/jstests/noPassthrough/shard_fixture_selftest.js
@@ -2,55 +2,55 @@
// @tags: [requires_sharding]
(function() {
- 'use strict';
+'use strict';
- load('jstests/concurrency/fsm_libs/shard_fixture.js');
+load('jstests/concurrency/fsm_libs/shard_fixture.js');
- const rsTestOriginal = new ShardingTest({
- shards: 2,
- mongos: 2,
- config: 2,
- shardAsReplicaSet: true,
- });
+const rsTestOriginal = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ config: 2,
+ shardAsReplicaSet: true,
+});
- const rsTestWrapper =
- new FSMShardingTest(`mongodb://${rsTestOriginal.s0.host},${rsTestOriginal.s1.host}`);
+const rsTestWrapper =
+ new FSMShardingTest(`mongodb://${rsTestOriginal.s0.host},${rsTestOriginal.s1.host}`);
- assert.eq(rsTestWrapper.s(0).host, rsTestOriginal.s0.host);
- assert.eq(rsTestWrapper.s(1).host, rsTestOriginal.s1.host);
- assert.eq(rsTestWrapper.s(2), rsTestOriginal.s2); // Both should be undefined.
+assert.eq(rsTestWrapper.s(0).host, rsTestOriginal.s0.host);
+assert.eq(rsTestWrapper.s(1).host, rsTestOriginal.s1.host);
+assert.eq(rsTestWrapper.s(2), rsTestOriginal.s2); // Both should be undefined.
- assert.eq(rsTestWrapper.shard(0).host, rsTestOriginal.shard0.host);
- assert.eq(rsTestWrapper.shard(1).host, rsTestOriginal.shard1.host);
- assert.eq(rsTestWrapper.shard(2), rsTestOriginal.shard2); // Both should be undefined.
+assert.eq(rsTestWrapper.shard(0).host, rsTestOriginal.shard0.host);
+assert.eq(rsTestWrapper.shard(1).host, rsTestOriginal.shard1.host);
+assert.eq(rsTestWrapper.shard(2), rsTestOriginal.shard2); // Both should be undefined.
- assert.eq(rsTestWrapper.rs(0).getURL(), rsTestOriginal.rs0.getURL());
- assert.eq(rsTestWrapper.rs(1).getURL(), rsTestOriginal.rs1.getURL());
- assert.eq(rsTestWrapper.rs(2), rsTestOriginal.rs2); // Both should be undefined.
+assert.eq(rsTestWrapper.rs(0).getURL(), rsTestOriginal.rs0.getURL());
+assert.eq(rsTestWrapper.rs(1).getURL(), rsTestOriginal.rs1.getURL());
+assert.eq(rsTestWrapper.rs(2), rsTestOriginal.rs2); // Both should be undefined.
- assert.eq(rsTestWrapper.d(0), rsTestOriginal.d0); // Both should be undefined.
+assert.eq(rsTestWrapper.d(0), rsTestOriginal.d0); // Both should be undefined.
- assert.eq(rsTestWrapper.c(0).host, rsTestOriginal.c0.host);
- assert.eq(rsTestWrapper.c(1).host, rsTestOriginal.c1.host);
- assert.eq(rsTestWrapper.c(2), rsTestOriginal.c2); // Both should be undefined.
+assert.eq(rsTestWrapper.c(0).host, rsTestOriginal.c0.host);
+assert.eq(rsTestWrapper.c(1).host, rsTestOriginal.c1.host);
+assert.eq(rsTestWrapper.c(2), rsTestOriginal.c2); // Both should be undefined.
- rsTestOriginal.stop();
+rsTestOriginal.stop();
- const dTestOriginal = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- shardAsReplicaSet: false,
- });
+const dTestOriginal = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ shardAsReplicaSet: false,
+});
- const dTestWrapper = new FSMShardingTest(dTestOriginal.s.host);
+const dTestWrapper = new FSMShardingTest(dTestOriginal.s.host);
- assert.eq(dTestWrapper.shard(0).host, dTestOriginal.shard0.host);
- assert.eq(dTestWrapper.s(0).host, dTestOriginal.s0.host);
- assert.eq(dTestWrapper.d(0).host, dTestOriginal.d0.host);
- assert.eq(dTestWrapper.c(0).host, dTestOriginal.c0.host);
+assert.eq(dTestWrapper.shard(0).host, dTestOriginal.shard0.host);
+assert.eq(dTestWrapper.s(0).host, dTestOriginal.s0.host);
+assert.eq(dTestWrapper.d(0).host, dTestOriginal.d0.host);
+assert.eq(dTestWrapper.c(0).host, dTestOriginal.c0.host);
- assert.eq(dTestWrapper.rs(0), dTestOriginal.rs0); // Both should be undefined.
+assert.eq(dTestWrapper.rs(0), dTestOriginal.rs0); // Both should be undefined.
- dTestOriginal.stop();
+dTestOriginal.stop();
})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/shell_appname_uri.js b/jstests/noPassthrough/shell_appname_uri.js
index e7c43164c11..c3a087c1a5b 100644
--- a/jstests/noPassthrough/shell_appname_uri.js
+++ b/jstests/noPassthrough/shell_appname_uri.js
@@ -1,77 +1,77 @@
// @tags: [requires_profiling]
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod();
- const uri = "mongodb://" + conn.host + "/test";
- const tests = [];
+const conn = MongoRunner.runMongod();
+const uri = "mongodb://" + conn.host + "/test";
+const tests = [];
- // Asserts that system.profile contains only entries
- // with application.name = appname (or undefined)
- function assertProfileOnlyContainsAppName(db, appname) {
- const res = db.system.profile.distinct("appName");
- assert(res.length > 0, "system.profile does not contain any docs");
- if (res.length > 1 || res.indexOf(appname) === -1) {
- // Dump collection.
- print("dumping db.system.profile");
- db.system.profile.find().forEach((doc) => printjsononeline(doc));
- doassert(`system.profile expected to only have appName=${appname}` +
- ` but found ${tojson(res)}`);
- }
+// Asserts that system.profile contains only entries
+// with application.name = appname (or undefined)
+function assertProfileOnlyContainsAppName(db, appname) {
+ const res = db.system.profile.distinct("appName");
+ assert(res.length > 0, "system.profile does not contain any docs");
+ if (res.length > 1 || res.indexOf(appname) === -1) {
+ // Dump collection.
+ print("dumping db.system.profile");
+ db.system.profile.find().forEach((doc) => printjsononeline(doc));
+ doassert(`system.profile expected to only have appName=${appname}` +
+ ` but found ${tojson(res)}`);
}
+}
- tests.push(function testDefaultAppName() {
- const db = new Mongo(uri).getDB("test");
- assert.commandWorked(db.coll.insert({}));
- assertProfileOnlyContainsAppName(db, "MongoDB Shell");
- });
+tests.push(function testDefaultAppName() {
+ const db = new Mongo(uri).getDB("test");
+ assert.commandWorked(db.coll.insert({}));
+ assertProfileOnlyContainsAppName(db, "MongoDB Shell");
+});
- tests.push(function testAppName() {
- const db = new Mongo(uri + "?appName=TestAppName").getDB("test");
- assert.commandWorked(db.coll.insert({}));
- assertProfileOnlyContainsAppName(db, "TestAppName");
- });
+tests.push(function testAppName() {
+ const db = new Mongo(uri + "?appName=TestAppName").getDB("test");
+ assert.commandWorked(db.coll.insert({}));
+ assertProfileOnlyContainsAppName(db, "TestAppName");
+});
- tests.push(function testMultiWordAppName() {
- const db = new Mongo(uri + "?appName=Test%20App%20Name").getDB("test");
- assert.commandWorked(db.coll.insert({}));
- assertProfileOnlyContainsAppName(db, "Test App Name");
- });
+tests.push(function testMultiWordAppName() {
+ const db = new Mongo(uri + "?appName=Test%20App%20Name").getDB("test");
+ assert.commandWorked(db.coll.insert({}));
+ assertProfileOnlyContainsAppName(db, "Test App Name");
+});
- tests.push(function testLongAppName() {
- // From MongoDB Handshake specification:
- // The client.application.name cannot exceed 128 bytes. MongoDB will return an error if
- // these limits are not adhered to, which will result in handshake failure. Drivers MUST
- // validate these values and truncate driver provided values if necessary.
- const longAppName = "a".repeat(129);
- assert.throws(() => new Mongo(uri + "?appName=" + longAppName));
+tests.push(function testLongAppName() {
+ // From MongoDB Handshake specification:
+ // The client.application.name cannot exceed 128 bytes. MongoDB will return an error if
+ // these limits are not adhered to, which will result in handshake failure. Drivers MUST
+ // validate these values and truncate driver provided values if necessary.
+ const longAppName = "a".repeat(129);
+ assert.throws(() => new Mongo(uri + "?appName=" + longAppName));
- // But a 128 character appname should connect without issue.
- const notTooLongAppName = "a".repeat(128);
- const db = new Mongo(uri + "?appName=" + notTooLongAppName).getDB("test");
- assert.commandWorked(db.coll.insert({}));
- assertProfileOnlyContainsAppName(db, notTooLongAppName);
- });
+ // But a 128 character appname should connect without issue.
+ const notTooLongAppName = "a".repeat(128);
+ const db = new Mongo(uri + "?appName=" + notTooLongAppName).getDB("test");
+ assert.commandWorked(db.coll.insert({}));
+ assertProfileOnlyContainsAppName(db, notTooLongAppName);
+});
- tests.push(function testLongAppNameWithMultiByteUTF8() {
- // Each epsilon character is two bytes in UTF-8.
- const longAppName = "\u0190".repeat(65);
- assert.throws(() => new Mongo(uri + "?appName=" + longAppName));
+tests.push(function testLongAppNameWithMultiByteUTF8() {
+ // Each epsilon character is two bytes in UTF-8.
+ const longAppName = "\u0190".repeat(65);
+ assert.throws(() => new Mongo(uri + "?appName=" + longAppName));
- // But a 128 character appname should connect without issue.
- const notTooLongAppName = "\u0190".repeat(64);
- const db = new Mongo(uri + "?appName=" + notTooLongAppName).getDB("test");
- assert.commandWorked(db.coll.insert({}));
- assertProfileOnlyContainsAppName(db, notTooLongAppName);
- });
+ // But a 128 character appname should connect without issue.
+ const notTooLongAppName = "\u0190".repeat(64);
+ const db = new Mongo(uri + "?appName=" + notTooLongAppName).getDB("test");
+ assert.commandWorked(db.coll.insert({}));
+ assertProfileOnlyContainsAppName(db, notTooLongAppName);
+});
- tests.forEach((test) => {
- const db = conn.getDB("test");
- db.dropDatabase();
- // Entries in db.system.profile have application name.
- db.setProfilingLevel(2);
- test();
- });
+tests.forEach((test) => {
+ const db = conn.getDB("test");
+ db.dropDatabase();
+ // Entries in db.system.profile have application name.
+ db.setProfilingLevel(2);
+ test();
+});
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/shell_can_retry_writes.js b/jstests/noPassthrough/shell_can_retry_writes.js
index 60ef8df9cd1..e07b64e287f 100644
--- a/jstests/noPassthrough/shell_can_retry_writes.js
+++ b/jstests/noPassthrough/shell_can_retry_writes.js
@@ -4,169 +4,169 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const db = primary.startSession({retryWrites: true}).getDatabase("test");
- const coll = db.shell_can_retry_writes;
+const primary = rst.getPrimary();
+const db = primary.startSession({retryWrites: true}).getDatabase("test");
+const coll = db.shell_can_retry_writes;
- function testCommandCanBeRetried(func, expected = true) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+function testCommandCanBeRetried(func, expected = true) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- const sentinel = {};
- let cmdObjSeen = sentinel;
+ const sentinel = {};
+ let cmdObjSeen = sentinel;
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- cmdObjSeen = cmdObj;
- return mongoRunCommandOriginal.apply(this, arguments);
- };
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ cmdObjSeen = cmdObj;
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
- try {
- assert.doesNotThrow(func);
- } finally {
- Mongo.prototype.runCommand = mongoRunCommandOriginal;
- }
+ try {
+ assert.doesNotThrow(func);
+ } finally {
+ Mongo.prototype.runCommand = mongoRunCommandOriginal;
+ }
- if (cmdObjSeen === sentinel) {
- throw new Error("Mongo.prototype.runCommand() was never called: " + func.toString());
- }
+ if (cmdObjSeen === sentinel) {
+ throw new Error("Mongo.prototype.runCommand() was never called: " + func.toString());
+ }
- let cmdName = Object.keys(cmdObjSeen)[0];
+ let cmdName = Object.keys(cmdObjSeen)[0];
- // If the command is in a wrapped form, then we look for the actual command object inside
- // the query/$query object.
- if (cmdName === "query" || cmdName === "$query") {
- cmdObjSeen = cmdObjSeen[cmdName];
- cmdName = Object.keys(cmdObjSeen)[0];
- }
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObjSeen = cmdObjSeen[cmdName];
+ cmdName = Object.keys(cmdObjSeen)[0];
+ }
- assert(cmdObjSeen.hasOwnProperty("lsid"),
- "Expected operation " + tojson(cmdObjSeen) + " to have a logical session id: " +
+ assert(cmdObjSeen.hasOwnProperty("lsid"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to have a logical session id: " + func.toString());
+
+ if (expected) {
+ assert(
+ cmdObjSeen.hasOwnProperty("txnNumber"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to be assigned a transaction number since it can be retried: " + func.toString());
+ } else {
+ assert(!cmdObjSeen.hasOwnProperty("txnNumber"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to not be assigned a transaction number since it cannot be retried: " +
func.toString());
-
- if (expected) {
- assert(cmdObjSeen.hasOwnProperty("txnNumber"),
- "Expected operation " + tojson(cmdObjSeen) +
- " to be assigned a transaction number since it can be retried: " +
- func.toString());
- } else {
- assert(!cmdObjSeen.hasOwnProperty("txnNumber"),
- "Expected operation " + tojson(cmdObjSeen) +
- " to not be assigned a transaction number since it cannot be retried: " +
- func.toString());
- }
}
+}
- testCommandCanBeRetried(function() {
- coll.insertOne({_id: 0});
- });
+testCommandCanBeRetried(function() {
+ coll.insertOne({_id: 0});
+});
- testCommandCanBeRetried(function() {
- coll.updateOne({_id: 0}, {$set: {a: 1}});
- });
+testCommandCanBeRetried(function() {
+ coll.updateOne({_id: 0}, {$set: {a: 1}});
+});
- testCommandCanBeRetried(function() {
- coll.updateOne({_id: 1}, {$set: {a: 2}}, {upsert: true});
- });
+testCommandCanBeRetried(function() {
+ coll.updateOne({_id: 1}, {$set: {a: 2}}, {upsert: true});
+});
- testCommandCanBeRetried(function() {
- coll.deleteOne({_id: 1});
- });
+testCommandCanBeRetried(function() {
+ coll.deleteOne({_id: 1});
+});
- testCommandCanBeRetried(function() {
- coll.insertMany([{_id: 2, b: 3}, {_id: 3, b: 4}], {ordered: true});
- });
+testCommandCanBeRetried(function() {
+ coll.insertMany([{_id: 2, b: 3}, {_id: 3, b: 4}], {ordered: true});
+});
- testCommandCanBeRetried(function() {
- coll.insertMany([{_id: 4}, {_id: 5}], {ordered: false});
- });
+testCommandCanBeRetried(function() {
+ coll.insertMany([{_id: 4}, {_id: 5}], {ordered: false});
+});
- testCommandCanBeRetried(function() {
- coll.updateMany({a: {$gt: 0}}, {$set: {c: 7}});
- }, false);
+testCommandCanBeRetried(function() {
+ coll.updateMany({a: {$gt: 0}}, {$set: {c: 7}});
+}, false);
- testCommandCanBeRetried(function() {
- coll.deleteMany({b: {$lt: 5}});
- }, false);
+testCommandCanBeRetried(function() {
+ coll.deleteMany({b: {$lt: 5}});
+}, false);
- //
- // Tests for writeConcern.
- //
+//
+// Tests for writeConcern.
+//
- testCommandCanBeRetried(function() {
- coll.insertOne({_id: 1}, {w: 1});
- });
+testCommandCanBeRetried(function() {
+ coll.insertOne({_id: 1}, {w: 1});
+});
- testCommandCanBeRetried(function() {
- coll.insertOne({_id: "majority"}, {w: "majority"});
- });
+testCommandCanBeRetried(function() {
+ coll.insertOne({_id: "majority"}, {w: "majority"});
+});
- //
- // Tests for bulkWrite().
- //
+//
+// Tests for bulkWrite().
+//
- testCommandCanBeRetried(function() {
- coll.bulkWrite([{insertOne: {document: {_id: 10}}}]);
- });
+testCommandCanBeRetried(function() {
+ coll.bulkWrite([{insertOne: {document: {_id: 10}}}]);
+});
- testCommandCanBeRetried(function() {
- coll.bulkWrite([{updateOne: {filter: {_id: 10}, update: {$set: {a: 1}}}}]);
- });
+testCommandCanBeRetried(function() {
+ coll.bulkWrite([{updateOne: {filter: {_id: 10}, update: {$set: {a: 1}}}}]);
+});
- testCommandCanBeRetried(function() {
- coll.bulkWrite([{updateOne: {filter: {_id: 10}, update: {$set: {a: 2}}, upsert: true}}]);
- });
+testCommandCanBeRetried(function() {
+ coll.bulkWrite([{updateOne: {filter: {_id: 10}, update: {$set: {a: 2}}, upsert: true}}]);
+});
- testCommandCanBeRetried(function() {
- coll.bulkWrite([{deleteOne: {filter: {_id: 10}}}]);
- });
+testCommandCanBeRetried(function() {
+ coll.bulkWrite([{deleteOne: {filter: {_id: 10}}}]);
+});
- testCommandCanBeRetried(function() {
- coll.bulkWrite(
- [{insertOne: {document: {_id: 20, b: 3}}}, {insertOne: {document: {_id: 30, b: 4}}}],
- {ordered: true});
- });
+testCommandCanBeRetried(function() {
+ coll.bulkWrite(
+ [{insertOne: {document: {_id: 20, b: 3}}}, {insertOne: {document: {_id: 30, b: 4}}}],
+ {ordered: true});
+});
- testCommandCanBeRetried(function() {
- coll.bulkWrite([{insertOne: {document: {_id: 40}}}, {insertOne: {document: {_id: 50}}}],
- {ordered: false});
- });
+testCommandCanBeRetried(function() {
+ coll.bulkWrite([{insertOne: {document: {_id: 40}}}, {insertOne: {document: {_id: 50}}}],
+ {ordered: false});
+});
- testCommandCanBeRetried(function() {
- coll.bulkWrite([{updateMany: {filter: {a: {$gt: 0}}, update: {$set: {c: 7}}}}]);
- }, false);
+testCommandCanBeRetried(function() {
+ coll.bulkWrite([{updateMany: {filter: {a: {$gt: 0}}, update: {$set: {c: 7}}}}]);
+}, false);
- testCommandCanBeRetried(function() {
- coll.bulkWrite([{deleteMany: {filter: {b: {$lt: 5}}}}]);
- }, false);
+testCommandCanBeRetried(function() {
+ coll.bulkWrite([{deleteMany: {filter: {b: {$lt: 5}}}}]);
+}, false);
- //
- // Tests for wrappers around "findAndModify" command.
- //
+//
+// Tests for wrappers around "findAndModify" command.
+//
- testCommandCanBeRetried(function() {
- coll.findOneAndUpdate({_id: 100}, {$set: {d: 9}}, {upsert: true});
- });
+testCommandCanBeRetried(function() {
+ coll.findOneAndUpdate({_id: 100}, {$set: {d: 9}}, {upsert: true});
+});
- testCommandCanBeRetried(function() {
- coll.findOneAndReplace({_id: 100}, {e: 11});
- });
+testCommandCanBeRetried(function() {
+ coll.findOneAndReplace({_id: 100}, {e: 11});
+});
- testCommandCanBeRetried(function() {
- coll.findOneAndDelete({e: {$exists: true}});
- });
+testCommandCanBeRetried(function() {
+ coll.findOneAndDelete({e: {$exists: true}});
+});
- db.getSession().endSession();
- rst.stopSet();
+db.getSession().endSession();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/shell_can_use_read_concern.js b/jstests/noPassthrough/shell_can_use_read_concern.js
index 183da4686ec..f3d567960e0 100644
--- a/jstests/noPassthrough/shell_can_use_read_concern.js
+++ b/jstests/noPassthrough/shell_can_use_read_concern.js
@@ -4,231 +4,226 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- // This test makes assertions on commands run without logical session ids.
- TestData.disableImplicitSessions = true;
+// This test makes assertions on commands run without logical session ids.
+TestData.disableImplicitSessions = true;
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
+const primary = rst.getPrimary();
- function runTests({withSession}) {
- let db;
+function runTests({withSession}) {
+ let db;
- if (withSession) {
- primary.setCausalConsistency(false);
- db = primary.startSession({causalConsistency: true}).getDatabase("test");
- } else {
- primary.setCausalConsistency(true);
- db = primary.getDB("test");
+ if (withSession) {
+ primary.setCausalConsistency(false);
+ db = primary.startSession({causalConsistency: true}).getDatabase("test");
+ } else {
+ primary.setCausalConsistency(true);
+ db = primary.getDB("test");
+ }
+
+ const coll = db.shell_can_use_read_concern;
+ coll.drop();
+
+ function testCommandCanBeCausallyConsistent(func, {
+ expectedSession: expectedSession = withSession,
+ expectedAfterClusterTime: expectedAfterClusterTime = true
+ } = {}) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+
+ const sentinel = {};
+ let cmdObjSeen = sentinel;
+
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ cmdObjSeen = cmdObj;
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
+
+ try {
+ assert.doesNotThrow(func);
+ } finally {
+ Mongo.prototype.runCommand = mongoRunCommandOriginal;
}
- const coll = db.shell_can_use_read_concern;
- coll.drop();
-
- function testCommandCanBeCausallyConsistent(func, {
- expectedSession: expectedSession = withSession,
- expectedAfterClusterTime: expectedAfterClusterTime = true
- } = {}) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
-
- const sentinel = {};
- let cmdObjSeen = sentinel;
-
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- cmdObjSeen = cmdObj;
- return mongoRunCommandOriginal.apply(this, arguments);
- };
-
- try {
- assert.doesNotThrow(func);
- } finally {
- Mongo.prototype.runCommand = mongoRunCommandOriginal;
- }
-
- if (cmdObjSeen === sentinel) {
- throw new Error("Mongo.prototype.runCommand() was never called: " +
- func.toString());
- }
-
- let cmdName = Object.keys(cmdObjSeen)[0];
-
- // If the command is in a wrapped form, then we look for the actual command object
- // inside
- // the query/$query object.
- if (cmdName === "query" || cmdName === "$query") {
- cmdObjSeen = cmdObjSeen[cmdName];
- cmdName = Object.keys(cmdObjSeen)[0];
- }
-
- if (expectedSession) {
- assert(cmdObjSeen.hasOwnProperty("lsid"),
- "Expected operation " + tojson(cmdObjSeen) +
- " to have a logical session id: " + func.toString());
- } else {
- assert(!cmdObjSeen.hasOwnProperty("lsid"),
- "Expected operation " + tojson(cmdObjSeen) +
- " to not have a logical session id: " + func.toString());
- }
-
- if (expectedAfterClusterTime) {
- assert(cmdObjSeen.hasOwnProperty("readConcern"),
- "Expected operation " + tojson(cmdObjSeen) +
- " to have a readConcern object since it can be causally consistent: " +
- func.toString());
-
- const readConcern = cmdObjSeen.readConcern;
- assert(readConcern.hasOwnProperty("afterClusterTime"),
- "Expected operation " + tojson(cmdObjSeen) +
- " to specify afterClusterTime since it can be causally consistent: " +
- func.toString());
- } else {
- assert(!cmdObjSeen.hasOwnProperty("readConcern"),
- "Expected operation " + tojson(cmdObjSeen) + " to not have a readConcern" +
- " object since it cannot be causally consistent: " + func.toString());
- }
+ if (cmdObjSeen === sentinel) {
+ throw new Error("Mongo.prototype.runCommand() was never called: " + func.toString());
}
- //
- // Tests for the "find" and "getMore" commands.
- //
-
- {
- testCommandCanBeCausallyConsistent(function() {
- assert.writeOK(coll.insert([{}, {}, {}, {}, {}]));
- }, {expectedSession: withSession, expectedAfterClusterTime: false});
-
- testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(
- db.runCommand({find: coll.getName(), batchSize: 5, singleBatch: true}));
- });
-
- const cursor = coll.find().batchSize(2);
-
- testCommandCanBeCausallyConsistent(function() {
- cursor.next();
- cursor.next();
- });
-
- testCommandCanBeCausallyConsistent(function() {
- cursor.next();
- cursor.next();
- cursor.next();
- assert(!cursor.hasNext());
- }, {
- expectedSession: withSession,
- expectedAfterClusterTime: false,
- });
+ let cmdName = Object.keys(cmdObjSeen)[0];
+
+ // If the command is in a wrapped form, then we look for the actual command object
+ // inside
+ // the query/$query object.
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObjSeen = cmdObjSeen[cmdName];
+ cmdName = Object.keys(cmdObjSeen)[0];
}
- //
- // Tests for the "count" command.
- //
+ if (expectedSession) {
+ assert(cmdObjSeen.hasOwnProperty("lsid"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to have a logical session id: " + func.toString());
+ } else {
+ assert(!cmdObjSeen.hasOwnProperty("lsid"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to not have a logical session id: " + func.toString());
+ }
- testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(db.runCommand({count: coll.getName()}));
- });
+ if (expectedAfterClusterTime) {
+ assert(cmdObjSeen.hasOwnProperty("readConcern"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to have a readConcern object since it can be causally consistent: " +
+ func.toString());
+
+ const readConcern = cmdObjSeen.readConcern;
+ assert(readConcern.hasOwnProperty("afterClusterTime"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to specify afterClusterTime since it can be causally consistent: " +
+ func.toString());
+ } else {
+ assert(!cmdObjSeen.hasOwnProperty("readConcern"),
+ "Expected operation " + tojson(cmdObjSeen) + " to not have a readConcern" +
+ " object since it cannot be causally consistent: " + func.toString());
+ }
+ }
- testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(db.runCommand({query: {count: coll.getName()}}));
- });
+ //
+ // Tests for the "find" and "getMore" commands.
+ //
+ {
testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(db.runCommand({$query: {count: coll.getName()}}));
- });
+ assert.writeOK(coll.insert([{}, {}, {}, {}, {}]));
+ }, {expectedSession: withSession, expectedAfterClusterTime: false});
testCommandCanBeCausallyConsistent(function() {
- assert.eq(5, coll.count());
+ assert.commandWorked(
+ db.runCommand({find: coll.getName(), batchSize: 5, singleBatch: true}));
});
- //
- // Tests for the "distinct" command.
- //
+ const cursor = coll.find().batchSize(2);
testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(db.runCommand({distinct: coll.getName(), key: "_id"}));
+ cursor.next();
+ cursor.next();
});
testCommandCanBeCausallyConsistent(function() {
- const values = coll.distinct("_id");
- assert.eq(5, values.length, tojson(values));
+ cursor.next();
+ cursor.next();
+ cursor.next();
+ assert(!cursor.hasNext());
+ }, {
+ expectedSession: withSession,
+ expectedAfterClusterTime: false,
});
+ }
- //
- // Tests for the "aggregate" command.
- //
-
- {
- testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(db.runCommand(
- {aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 5}}));
- });
-
- testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [],
- cursor: {batchSize: 5},
- explain: true
- }));
- });
-
- let cursor;
-
- testCommandCanBeCausallyConsistent(function() {
- cursor = coll.aggregate([], {cursor: {batchSize: 2}});
- cursor.next();
- cursor.next();
- });
-
- testCommandCanBeCausallyConsistent(function() {
- cursor.next();
- cursor.next();
- cursor.next();
- assert(!cursor.hasNext());
- }, {
- expectedSession: withSession,
- expectedAfterClusterTime: false,
- });
- }
+ //
+ // Tests for the "count" command.
+ //
+
+ testCommandCanBeCausallyConsistent(function() {
+ assert.commandWorked(db.runCommand({count: coll.getName()}));
+ });
+
+ testCommandCanBeCausallyConsistent(function() {
+ assert.commandWorked(db.runCommand({query: {count: coll.getName()}}));
+ });
+
+ testCommandCanBeCausallyConsistent(function() {
+ assert.commandWorked(db.runCommand({$query: {count: coll.getName()}}));
+ });
+
+ testCommandCanBeCausallyConsistent(function() {
+ assert.eq(5, coll.count());
+ });
- //
- // Tests for the "geoSearch" command.
- //
+ //
+ // Tests for the "distinct" command.
+ //
+ testCommandCanBeCausallyConsistent(function() {
+ assert.commandWorked(db.runCommand({distinct: coll.getName(), key: "_id"}));
+ });
+
+ testCommandCanBeCausallyConsistent(function() {
+ const values = coll.distinct("_id");
+ assert.eq(5, values.length, tojson(values));
+ });
+
+ //
+ // Tests for the "aggregate" command.
+ //
+
+ {
testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(coll.createIndex({loc: "geoHaystack", other: 1}, {bucketSize: 1}));
- }, {expectedSession: withSession, expectedAfterClusterTime: false});
+ assert.commandWorked(
+ db.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 5}}));
+ });
testCommandCanBeCausallyConsistent(function() {
assert.commandWorked(db.runCommand(
- {geoSearch: coll.getName(), near: [0, 0], maxDistance: 1, search: {}}));
+ {aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 5}, explain: true}));
});
- //
- // Tests for the "explain" command.
- //
+ let cursor;
testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(db.runCommand({explain: {find: coll.getName()}}));
+ cursor = coll.aggregate([], {cursor: {batchSize: 2}});
+ cursor.next();
+ cursor.next();
});
testCommandCanBeCausallyConsistent(function() {
- coll.find().explain();
+ cursor.next();
+ cursor.next();
+ cursor.next();
+ assert(!cursor.hasNext());
+ }, {
+ expectedSession: withSession,
+ expectedAfterClusterTime: false,
});
+ }
- testCommandCanBeCausallyConsistent(function() {
- coll.explain().find().finish();
- });
+ //
+ // Tests for the "geoSearch" command.
+ //
- db.getSession().endSession();
- }
+ testCommandCanBeCausallyConsistent(function() {
+ assert.commandWorked(coll.createIndex({loc: "geoHaystack", other: 1}, {bucketSize: 1}));
+ }, {expectedSession: withSession, expectedAfterClusterTime: false});
+
+ testCommandCanBeCausallyConsistent(function() {
+ assert.commandWorked(
+ db.runCommand({geoSearch: coll.getName(), near: [0, 0], maxDistance: 1, search: {}}));
+ });
+
+ //
+ // Tests for the "explain" command.
+ //
+
+ testCommandCanBeCausallyConsistent(function() {
+ assert.commandWorked(db.runCommand({explain: {find: coll.getName()}}));
+ });
+
+ testCommandCanBeCausallyConsistent(function() {
+ coll.find().explain();
+ });
+
+ testCommandCanBeCausallyConsistent(function() {
+ coll.explain().find().finish();
+ });
+
+ db.getSession().endSession();
+}
- runTests({withSession: false});
- runTests({withSession: true});
+runTests({withSession: false});
+runTests({withSession: true});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/shell_check_program_extension.js b/jstests/noPassthrough/shell_check_program_extension.js
index c8932b212c5..72b5b47b645 100644
--- a/jstests/noPassthrough/shell_check_program_extension.js
+++ b/jstests/noPassthrough/shell_check_program_extension.js
@@ -3,15 +3,15 @@
*/
(function() {
- 'use strict';
+'use strict';
- if (_isWindows()) {
- const filename = 'jstests/noPassthrough/libs/testWindowsExtension.bat';
+if (_isWindows()) {
+ const filename = 'jstests/noPassthrough/libs/testWindowsExtension.bat';
- clearRawMongoProgramOutput();
- const result = runMongoProgram(filename);
- assert.eq(result, 42);
- } else {
- jsTestLog("This test is only relevant for Windows environments.");
- }
+ clearRawMongoProgramOutput();
+ const result = runMongoProgram(filename);
+ assert.eq(result, 42);
+} else {
+ jsTestLog("This test is only relevant for Windows environments.");
+}
})();
diff --git a/jstests/noPassthrough/shell_cmd_assertions.js b/jstests/noPassthrough/shell_cmd_assertions.js
index de61b88355c..4bc800663f8 100644
--- a/jstests/noPassthrough/shell_cmd_assertions.js
+++ b/jstests/noPassthrough/shell_cmd_assertions.js
@@ -3,360 +3,357 @@
*/
(function() {
- "use strict";
-
- const conn = MongoRunner.runMongod();
- const db = conn.getDB("commandAssertions");
- const kFakeErrCode = 1234567890;
- const tests = [];
-
- const sampleWriteConcernError = {
- n: 1,
- ok: 1,
- writeConcernError: {
- code: ErrorCodes.WriteConcernFailed,
- codeName: "WriteConcernFailed",
- errmsg: "waiting for replication timed out",
- errInfo: {
- wtimeout: true,
- },
+"use strict";
+
+const conn = MongoRunner.runMongod();
+const db = conn.getDB("commandAssertions");
+const kFakeErrCode = 1234567890;
+const tests = [];
+
+const sampleWriteConcernError = {
+ n: 1,
+ ok: 1,
+ writeConcernError: {
+ code: ErrorCodes.WriteConcernFailed,
+ codeName: "WriteConcernFailed",
+ errmsg: "waiting for replication timed out",
+ errInfo: {
+ wtimeout: true,
},
- };
-
- function setup() {
- db.coll.drop();
- assert.writeOK(db.coll.insert({_id: 1}));
- }
-
- // Raw command responses.
- tests.push(function rawCommandOk() {
- const res = db.runCommand({"ping": 1});
- assert.doesNotThrow(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.throws(() => assert.commandFailed(res));
- assert.throws(() => assert.commandFailedWithCode(res, 0));
- });
-
- function _assertMsgFunctionExecution(
- assertFunc, assertParameter, {expectException: expectException = false} = {}) {
- var msgFunctionCalled = false;
- var expectedAssert = assert.doesNotThrow;
-
- if (expectException) {
- expectedAssert = assert.throws;
- }
-
- expectedAssert(() => {
- assertFunc(assertParameter, () => {
- msgFunctionCalled = true;
- });
- });
-
- assert.eq(
- expectException, msgFunctionCalled, "msg function execution should match assertion");
+ },
+};
+
+function setup() {
+ db.coll.drop();
+ assert.writeOK(db.coll.insert({_id: 1}));
+}
+
+// Raw command responses.
+tests.push(function rawCommandOk() {
+ const res = db.runCommand({"ping": 1});
+ assert.doesNotThrow(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.throws(() => assert.commandFailed(res));
+ assert.throws(() => assert.commandFailedWithCode(res, 0));
+});
+
+function _assertMsgFunctionExecution(
+ assertFunc, assertParameter, {expectException: expectException = false} = {}) {
+ var msgFunctionCalled = false;
+ var expectedAssert = assert.doesNotThrow;
+
+ if (expectException) {
+ expectedAssert = assert.throws;
}
- tests.push(function msgFunctionOnlyCalledOnFailure() {
- const res = db.runCommand({"ping": 1});
-
- _assertMsgFunctionExecution(assert.commandWorked, res, {expectException: false});
- _assertMsgFunctionExecution(
- assert.commandWorkedIgnoringWriteErrors, res, {expectException: false});
- _assertMsgFunctionExecution(assert.commandFailed, res, {expectException: true});
-
- var msgFunctionCalled = false;
- assert.throws(() => assert.commandFailedWithCode(res, 0, () => {
+ expectedAssert(() => {
+ assertFunc(assertParameter, () => {
msgFunctionCalled = true;
- }));
- assert.eq(true, msgFunctionCalled, "msg function execution should match assertion");
- });
-
- tests.push(function rawCommandErr() {
- const res = db.runCommand({"IHopeNobodyEverMakesThisACommand": 1});
- assert.throws(() => assert.commandWorked(res));
- assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.CommandNotFound));
- // commandFailedWithCode should succeed if any of the passed error codes are matched.
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.CommandNotFound, kFakeErrCode]));
- assert.doesNotThrow(() => assert.commandWorkedOrFailedWithCode(
- res,
- [ErrorCodes.CommandNotFound, kFakeErrCode],
- "threw even though failed with correct error codes"));
- assert.throws(
- () => assert.commandWorkedOrFailedWithCode(
- res, [kFakeErrCode], "didn't throw even though failed with incorrect error code"));
- });
-
- tests.push(function rawCommandWriteOk() {
- const res = db.runCommand({insert: "coll", documents: [{_id: 2}]});
- assert.doesNotThrow(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.throws(() => assert.commandFailed(res));
- assert.throws(() => assert.commandFailedWithCode(res, 0));
- assert.doesNotThrow(
- () => assert.commandWorkedOrFailedWithCode(res, 0, "threw even though succeeded"));
- });
-
- tests.push(function rawCommandWriteErr() {
- const res = db.runCommand({insert: "coll", documents: [{_id: 1}]});
- assert.throws(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
- assert.throws(
- () => assert.commandWorkedOrFailedWithCode(
- res, [ErrorCodes.DuplicateKey, kFakeErrCode], "expected to throw on write error"));
- assert.throws(() => assert.commandWorkedOrFailedWithCode(
- res, [kFakeErrCode], "expected to throw on write error"));
- });
-
- tests.push(function collInsertWriteOk() {
- const res = db.coll.insert({_id: 2});
- assert(res instanceof WriteResult);
- assert.doesNotThrow(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.throws(() => assert.commandFailed(res));
- assert.throws(() => assert.commandFailedWithCode(res, 0));
- });
-
- tests.push(function collInsertWriteErr() {
- const res = db.coll.insert({_id: 1});
- assert(res instanceof WriteResult);
- assert.throws(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
- });
-
- tests.push(function collMultiInsertWriteOk() {
- const res = db.coll.insert([{_id: 3}, {_id: 2}]);
- assert(res instanceof BulkWriteResult);
- assert.doesNotThrow(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.throws(() => assert.commandFailed(res));
- assert.throws(() => assert.commandFailedWithCode(res, 0));
- assert.throws(
- () => assert.commandWorkedOrFailedWithCode(res, 0, "threw even though succeeded"));
- });
-
- tests.push(function collMultiInsertWriteErr() {
- const res = db.coll.insert([{_id: 1}, {_id: 2}]);
- assert(res instanceof BulkWriteResult);
- assert.throws(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
- });
-
- // Test when the insert command fails with ok:0 (i.e. not failing due to write err)
- tests.push(function collInsertCmdErr() {
- const res = db.coll.insert({x: 1}, {writeConcern: {"bad": 1}});
- assert(res instanceof WriteCommandError);
- assert.throws(() => assert.commandWorked(res));
- assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.FailedToParse));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.FailedToParse, kFakeErrCode]));
- });
-
- tests.push(function collMultiInsertCmdErr() {
- const res = db.coll.insert([{x: 1}, {x: 2}], {writeConcern: {"bad": 1}});
- assert(res instanceof WriteCommandError);
- assert.throws(() => assert.commandWorked(res));
- assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.FailedToParse));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.FailedToParse, kFakeErrCode]));
- assert.doesNotThrow(() => assert.commandWorkedOrFailedWithCode(
- res,
- [ErrorCodes.FailedToParse, kFakeErrCode],
- "threw even though failed with correct error codes"));
- assert.throws(
- () => assert.commandWorkedOrFailedWithCode(
- res, [kFakeErrCode], "didn't throw even though failed with incorrect error codes"));
- });
-
- tests.push(function mapReduceOk() {
- const res = db.coll.mapReduce(
- function() {
- emit(this._id, 0);
- },
- function(k, v) {
- return v[0];
- },
- {out: "coll_out"});
- assert(res instanceof MapReduceResult);
- assert.doesNotThrow(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.throws(() => assert.commandFailed(res));
- assert.throws(() => assert.commandFailedWithCode(res, 0));
- });
-
- tests.push(function mapReduceErr() {
- // db.coll.mapReduce throws if the command response has ok:0
- // Instead manually construct a MapReduceResult with ok:0
- const res = new MapReduceResult(db, {
- "ok": 0,
- "errmsg": "Example Error",
- "code": ErrorCodes.JSInterpreterFailure,
- "codeName": "JSInterpreterFailure"
});
- assert.throws(() => assert.commandWorked(res));
- assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() =>
- assert.commandFailedWithCode(res, ErrorCodes.JSInterpreterFailure));
- assert.doesNotThrow(() => assert.commandFailedWithCode(
- res, [ErrorCodes.JSInterpreterFailure, kFakeErrCode]));
});
- tests.push(function crudInsertOneOk() {
- const res = db.coll.insertOne({_id: 2});
- assert(res.hasOwnProperty("acknowledged"));
- assert.doesNotThrow(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.throws(() => assert.commandFailed(res));
- assert.throws(() => assert.commandFailedWithCode(res, 0));
- });
-
- tests.push(function crudInsertOneErr() {
- let threw = false;
- let res = null;
- try {
- db.coll.insertOne({_id: 1});
- } catch (e) {
- threw = true;
- res = e;
- }
- assert(threw);
- assert(res instanceof WriteError);
- assert.throws(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
- });
-
- tests.push(function crudInsertManyOk() {
- const res = db.coll.insertMany([{_id: 2}, {_id: 3}]);
- assert(res.hasOwnProperty("acknowledged"));
- assert.doesNotThrow(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.throws(() => assert.commandFailed(res));
- assert.throws(() => assert.commandFailedWithCode(res, 0));
+ assert.eq(expectException, msgFunctionCalled, "msg function execution should match assertion");
+}
+
+tests.push(function msgFunctionOnlyCalledOnFailure() {
+ const res = db.runCommand({"ping": 1});
+
+ _assertMsgFunctionExecution(assert.commandWorked, res, {expectException: false});
+ _assertMsgFunctionExecution(
+ assert.commandWorkedIgnoringWriteErrors, res, {expectException: false});
+ _assertMsgFunctionExecution(assert.commandFailed, res, {expectException: true});
+
+ var msgFunctionCalled = false;
+ assert.throws(() => assert.commandFailedWithCode(res, 0, () => {
+ msgFunctionCalled = true;
+ }));
+ assert.eq(true, msgFunctionCalled, "msg function execution should match assertion");
+});
+
+tests.push(function rawCommandErr() {
+ const res = db.runCommand({"IHopeNobodyEverMakesThisACommand": 1});
+ assert.throws(() => assert.commandWorked(res));
+ assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.CommandNotFound));
+ // commandFailedWithCode should succeed if any of the passed error codes are matched.
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.CommandNotFound, kFakeErrCode]));
+ assert.doesNotThrow(() => assert.commandWorkedOrFailedWithCode(
+ res,
+ [ErrorCodes.CommandNotFound, kFakeErrCode],
+ "threw even though failed with correct error codes"));
+ assert.throws(
+ () => assert.commandWorkedOrFailedWithCode(
+ res, [kFakeErrCode], "didn't throw even though failed with incorrect error code"));
+});
+
+tests.push(function rawCommandWriteOk() {
+ const res = db.runCommand({insert: "coll", documents: [{_id: 2}]});
+ assert.doesNotThrow(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.throws(() => assert.commandFailed(res));
+ assert.throws(() => assert.commandFailedWithCode(res, 0));
+ assert.doesNotThrow(
+ () => assert.commandWorkedOrFailedWithCode(res, 0, "threw even though succeeded"));
+});
+
+tests.push(function rawCommandWriteErr() {
+ const res = db.runCommand({insert: "coll", documents: [{_id: 1}]});
+ assert.throws(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+ assert.throws(
+ () => assert.commandWorkedOrFailedWithCode(
+ res, [ErrorCodes.DuplicateKey, kFakeErrCode], "expected to throw on write error"));
+ assert.throws(() => assert.commandWorkedOrFailedWithCode(
+ res, [kFakeErrCode], "expected to throw on write error"));
+});
+
+tests.push(function collInsertWriteOk() {
+ const res = db.coll.insert({_id: 2});
+ assert(res instanceof WriteResult);
+ assert.doesNotThrow(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.throws(() => assert.commandFailed(res));
+ assert.throws(() => assert.commandFailedWithCode(res, 0));
+});
+
+tests.push(function collInsertWriteErr() {
+ const res = db.coll.insert({_id: 1});
+ assert(res instanceof WriteResult);
+ assert.throws(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+});
+
+tests.push(function collMultiInsertWriteOk() {
+ const res = db.coll.insert([{_id: 3}, {_id: 2}]);
+ assert(res instanceof BulkWriteResult);
+ assert.doesNotThrow(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.throws(() => assert.commandFailed(res));
+ assert.throws(() => assert.commandFailedWithCode(res, 0));
+ assert.throws(() =>
+ assert.commandWorkedOrFailedWithCode(res, 0, "threw even though succeeded"));
+});
+
+tests.push(function collMultiInsertWriteErr() {
+ const res = db.coll.insert([{_id: 1}, {_id: 2}]);
+ assert(res instanceof BulkWriteResult);
+ assert.throws(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+});
+
+// Test when the insert command fails with ok:0 (i.e. not failing due to write err)
+tests.push(function collInsertCmdErr() {
+ const res = db.coll.insert({x: 1}, {writeConcern: {"bad": 1}});
+ assert(res instanceof WriteCommandError);
+ assert.throws(() => assert.commandWorked(res));
+ assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.FailedToParse));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.FailedToParse, kFakeErrCode]));
+});
+
+tests.push(function collMultiInsertCmdErr() {
+ const res = db.coll.insert([{x: 1}, {x: 2}], {writeConcern: {"bad": 1}});
+ assert(res instanceof WriteCommandError);
+ assert.throws(() => assert.commandWorked(res));
+ assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.FailedToParse));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.FailedToParse, kFakeErrCode]));
+ assert.doesNotThrow(() => assert.commandWorkedOrFailedWithCode(
+ res,
+ [ErrorCodes.FailedToParse, kFakeErrCode],
+ "threw even though failed with correct error codes"));
+ assert.throws(
+ () => assert.commandWorkedOrFailedWithCode(
+ res, [kFakeErrCode], "didn't throw even though failed with incorrect error codes"));
+});
+
+tests.push(function mapReduceOk() {
+ const res = db.coll.mapReduce(
+ function() {
+ emit(this._id, 0);
+ },
+ function(k, v) {
+ return v[0];
+ },
+ {out: "coll_out"});
+ assert(res instanceof MapReduceResult);
+ assert.doesNotThrow(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.throws(() => assert.commandFailed(res));
+ assert.throws(() => assert.commandFailedWithCode(res, 0));
+});
+
+tests.push(function mapReduceErr() {
+ // db.coll.mapReduce throws if the command response has ok:0
+ // Instead manually construct a MapReduceResult with ok:0
+ const res = new MapReduceResult(db, {
+ "ok": 0,
+ "errmsg": "Example Error",
+ "code": ErrorCodes.JSInterpreterFailure,
+ "codeName": "JSInterpreterFailure"
});
-
- tests.push(function crudInsertManyErr() {
- let threw = false;
- let res = null;
- try {
- db.coll.insertMany([{_id: 1}, {_id: 2}]);
- } catch (e) {
- threw = true;
- res = e;
- }
- assert(threw);
- assert(res instanceof BulkWriteError);
- assert.throws(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+ assert.throws(() => assert.commandWorked(res));
+ assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.JSInterpreterFailure));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.JSInterpreterFailure, kFakeErrCode]));
+});
+
+tests.push(function crudInsertOneOk() {
+ const res = db.coll.insertOne({_id: 2});
+ assert(res.hasOwnProperty("acknowledged"));
+ assert.doesNotThrow(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.throws(() => assert.commandFailed(res));
+ assert.throws(() => assert.commandFailedWithCode(res, 0));
+});
+
+tests.push(function crudInsertOneErr() {
+ let threw = false;
+ let res = null;
+ try {
+ db.coll.insertOne({_id: 1});
+ } catch (e) {
+ threw = true;
+ res = e;
+ }
+ assert(threw);
+ assert(res instanceof WriteError);
+ assert.throws(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+});
+
+tests.push(function crudInsertManyOk() {
+ const res = db.coll.insertMany([{_id: 2}, {_id: 3}]);
+ assert(res.hasOwnProperty("acknowledged"));
+ assert.doesNotThrow(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.throws(() => assert.commandFailed(res));
+ assert.throws(() => assert.commandFailedWithCode(res, 0));
+});
+
+tests.push(function crudInsertManyErr() {
+ let threw = false;
+ let res = null;
+ try {
+ db.coll.insertMany([{_id: 1}, {_id: 2}]);
+ } catch (e) {
+ threw = true;
+ res = e;
+ }
+ assert(threw);
+ assert(res instanceof BulkWriteError);
+ assert.throws(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+});
+
+tests.push(function rawMultiWriteErr() {
+ // Do an unordered bulk insert with duplicate keys to produce multiple write errors.
+ const res = db.runCommand({"insert": "coll", documents: [{_id: 1}, {_id: 1}], ordered: false});
+ assert(res.writeErrors.length == 2, "did not get multiple write errors");
+ assert.throws(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+});
+
+tests.push(function bulkMultiWriteErr() {
+ // Do an unordered bulk insert with duplicate keys to produce multiple write errors.
+ const res = db.coll.insert([{_id: 1}, {_id: 1}], {ordered: false});
+ assert.throws(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+});
+
+tests.push(function writeConcernErrorCausesCommandWorkedToAssert() {
+ const result = sampleWriteConcernError;
+
+ assert.throws(() => {
+ assert.commandWorked(result);
});
+});
- tests.push(function rawMultiWriteErr() {
- // Do an unordered bulk insert with duplicate keys to produce multiple write errors.
- const res =
- db.runCommand({"insert": "coll", documents: [{_id: 1}, {_id: 1}], ordered: false});
- assert(res.writeErrors.length == 2, "did not get multiple write errors");
- assert.throws(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
- });
+tests.push(function writeConcernErrorCausesCommandFailedToPass() {
+ const result = sampleWriteConcernError;
- tests.push(function bulkMultiWriteErr() {
- // Do an unordered bulk insert with duplicate keys to produce multiple write errors.
- const res = db.coll.insert([{_id: 1}, {_id: 1}], {ordered: false});
- assert.throws(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+ assert.doesNotThrow(() => {
+ assert.commandFailed(result);
+ assert.commandFailedWithCode(result, ErrorCodes.WriteConcernFailed);
});
+});
- tests.push(function writeConcernErrorCausesCommandWorkedToAssert() {
- const result = sampleWriteConcernError;
+tests.push(function writeConcernErrorCanBeIgnored() {
+ const result = sampleWriteConcernError;
- assert.throws(() => {
- assert.commandWorked(result);
- });
+ assert.doesNotThrow(() => {
+ assert.commandWorkedIgnoringWriteConcernErrors(result);
});
+});
- tests.push(function writeConcernErrorCausesCommandFailedToPass() {
- const result = sampleWriteConcernError;
+tests.push(function invalidResponsesAttemptToProvideInformationToCommandWorks() {
+ const invalidResponses = [undefined, 'not a valid response', 42];
- assert.doesNotThrow(() => {
- assert.commandFailed(result);
- assert.commandFailedWithCode(result, ErrorCodes.WriteConcernFailed);
+ invalidResponses.forEach((invalidRes) => {
+ const error = assert.throws(() => {
+ assert.commandWorked(invalidRes);
});
- });
-
- tests.push(function writeConcernErrorCanBeIgnored() {
- const result = sampleWriteConcernError;
- assert.doesNotThrow(() => {
- assert.commandWorkedIgnoringWriteConcernErrors(result);
- });
+ assert.gte(error.message.indexOf(invalidRes), 0);
+ assert.gte(error.message.indexOf(typeof invalidRes), 0);
});
+});
- tests.push(function invalidResponsesAttemptToProvideInformationToCommandWorks() {
- const invalidResponses = [undefined, 'not a valid response', 42];
+tests.push(function invalidResponsesAttemptToProvideInformationCommandFailed() {
+ const invalidResponses = [undefined, 'not a valid response', 42];
- invalidResponses.forEach((invalidRes) => {
- const error = assert.throws(() => {
- assert.commandWorked(invalidRes);
- });
-
- assert.gte(error.message.indexOf(invalidRes), 0);
- assert.gte(error.message.indexOf(typeof invalidRes), 0);
+ invalidResponses.forEach((invalidRes) => {
+ const error = assert.throws(() => {
+ assert.commandFailed(invalidRes);
});
- });
-
- tests.push(function invalidResponsesAttemptToProvideInformationCommandFailed() {
- const invalidResponses = [undefined, 'not a valid response', 42];
- invalidResponses.forEach((invalidRes) => {
- const error = assert.throws(() => {
- assert.commandFailed(invalidRes);
- });
-
- assert.gte(error.message.indexOf(invalidRes), 0);
- assert.gte(error.message.indexOf(typeof invalidRes), 0);
- });
+ assert.gte(error.message.indexOf(invalidRes), 0);
+ assert.gte(error.message.indexOf(typeof invalidRes), 0);
});
+});
- tests.forEach((test) => {
- jsTest.log(`Starting test '${test.name}'`);
- setup();
- test();
- });
+tests.forEach((test) => {
+ jsTest.log(`Starting test '${test.name}'`);
+ setup();
+ test();
+});
- /* cleanup */
- MongoRunner.stopMongod(conn);
+/* cleanup */
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/shell_disable_majority_reads.js b/jstests/noPassthrough/shell_disable_majority_reads.js
index 2bd62c46b1c..fa44f462646 100644
--- a/jstests/noPassthrough/shell_disable_majority_reads.js
+++ b/jstests/noPassthrough/shell_disable_majority_reads.js
@@ -2,34 +2,34 @@
// @tags: [requires_wiredtiger, requires_replication, requires_majority_read_concern,
// requires_persistence]
(function() {
- "use strict";
+"use strict";
- // Majority reads are enabled by default.
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+// Majority reads are enabled by default.
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- let serverStatus = rst.getPrimary().getDB("test").serverStatus();
- assert(serverStatus.storageEngine.supportsCommittedReads, tojson(serverStatus));
- rst.stopSet();
+let serverStatus = rst.getPrimary().getDB("test").serverStatus();
+assert(serverStatus.storageEngine.supportsCommittedReads, tojson(serverStatus));
+rst.stopSet();
- // Explicitly enable majority reads.
- TestData.enableMajorityReadConcern = true;
- rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+// Explicitly enable majority reads.
+TestData.enableMajorityReadConcern = true;
+rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- serverStatus = rst.getPrimary().getDB("test").serverStatus();
- assert(serverStatus.storageEngine.supportsCommittedReads, tojson(serverStatus));
- rst.stopSet();
+serverStatus = rst.getPrimary().getDB("test").serverStatus();
+assert(serverStatus.storageEngine.supportsCommittedReads, tojson(serverStatus));
+rst.stopSet();
- // Explicitly disable majority reads.
- TestData.enableMajorityReadConcern = false;
- rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+// Explicitly disable majority reads.
+TestData.enableMajorityReadConcern = false;
+rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- serverStatus = rst.getPrimary().getDB("test").serverStatus();
- assert(!serverStatus.storageEngine.supportsCommittedReads, tojson(serverStatus));
- rst.stopSet();
+serverStatus = rst.getPrimary().getDB("test").serverStatus();
+assert(!serverStatus.storageEngine.supportsCommittedReads, tojson(serverStatus));
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/shell_gossip_cluster_time.js b/jstests/noPassthrough/shell_gossip_cluster_time.js
index 462ad7e34da..119ba1e23dc 100644
--- a/jstests/noPassthrough/shell_gossip_cluster_time.js
+++ b/jstests/noPassthrough/shell_gossip_cluster_time.js
@@ -4,129 +4,124 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
+const primary = rst.getPrimary();
- const session1 = primary.startSession();
- const session2 = primary.startSession();
+const session1 = primary.startSession();
+const session2 = primary.startSession();
- const db = primary.getDB("test");
- const coll = db.shell_gossip_cluster_time;
+const db = primary.getDB("test");
+const coll = db.shell_gossip_cluster_time;
- function testCommandGossipedWithClusterTime(func, expectedClusterTime) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+function testCommandGossipedWithClusterTime(func, expectedClusterTime) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- const sentinel = {};
- let cmdObjSeen = sentinel;
+ const sentinel = {};
+ let cmdObjSeen = sentinel;
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- cmdObjSeen = cmdObj;
- return mongoRunCommandOriginal.apply(this, arguments);
- };
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ cmdObjSeen = cmdObj;
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
- try {
- assert.doesNotThrow(func);
- } finally {
- Mongo.prototype.runCommand = mongoRunCommandOriginal;
- }
+ try {
+ assert.doesNotThrow(func);
+ } finally {
+ Mongo.prototype.runCommand = mongoRunCommandOriginal;
+ }
- if (cmdObjSeen === sentinel) {
- throw new Error("Mongo.prototype.runCommand() was never called: " + func.toString());
- }
+ if (cmdObjSeen === sentinel) {
+ throw new Error("Mongo.prototype.runCommand() was never called: " + func.toString());
+ }
- let cmdName = Object.keys(cmdObjSeen)[0];
+ let cmdName = Object.keys(cmdObjSeen)[0];
- // If the command is in a wrapped form, then we look for the actual command object inside
- // the query/$query object.
- if (cmdName === "query" || cmdName === "$query") {
- cmdObjSeen = cmdObjSeen[cmdName];
- cmdName = Object.keys(cmdObjSeen)[0];
- }
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObjSeen = cmdObjSeen[cmdName];
+ cmdName = Object.keys(cmdObjSeen)[0];
+ }
- if (expectedClusterTime === undefined) {
- assert(!cmdObjSeen.hasOwnProperty("$clusterTime"),
- "Expected operation " + tojson(cmdObjSeen) +
- " to not have a $clusterTime object: " + func.toString());
- } else {
- assert(cmdObjSeen.hasOwnProperty("$clusterTime"),
- "Expected operation " + tojson(cmdObjSeen) + " to have a $clusterTime object: " +
- func.toString());
+ if (expectedClusterTime === undefined) {
+ assert(!cmdObjSeen.hasOwnProperty("$clusterTime"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to not have a $clusterTime object: " + func.toString());
+ } else {
+ assert(cmdObjSeen.hasOwnProperty("$clusterTime"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to have a $clusterTime object: " + func.toString());
- assert(bsonBinaryEqual(expectedClusterTime, cmdObjSeen.$clusterTime));
- }
+ assert(bsonBinaryEqual(expectedClusterTime, cmdObjSeen.$clusterTime));
}
-
- assert(
- session1.getClusterTime() === undefined,
- "session1 has yet to be used, but has clusterTime: " + tojson(session1.getClusterTime()));
- assert(
- session2.getClusterTime() === undefined,
- "session2 has yet to be used, but has clusterTime: " + tojson(session2.getClusterTime()));
-
- // Advance the clusterTime outside of either of the sessions.
- testCommandGossipedWithClusterTime(function() {
- assert.writeOK(coll.insert({}));
- }, primary.getClusterTime());
-
- assert(
- session1.getClusterTime() === undefined,
- "session1 has yet to be used, but has clusterTime: " + tojson(session1.getClusterTime()));
- assert(
- session2.getClusterTime() === undefined,
- "session2 has yet to be used, but has clusterTime: " + tojson(session2.getClusterTime()));
-
- // Performing an operation with session1 should use the highest clusterTime seen by the client
- // since session1 hasn't been used yet.
- testCommandGossipedWithClusterTime(function() {
- const coll = session1.getDatabase("test").mycoll;
- assert.writeOK(coll.insert({}));
- }, primary.getClusterTime());
-
- assert.eq(session1.getClusterTime(), primary.getClusterTime());
-
- testCommandGossipedWithClusterTime(function() {
- const coll = session1.getDatabase("test").mycoll;
- assert.writeOK(coll.insert({}));
- }, session1.getClusterTime());
-
- assert(
- session2.getClusterTime() === undefined,
- "session2 has yet to be used, but has clusterTime: " + tojson(session2.getClusterTime()));
-
- primary.resetClusterTime_forTesting();
- assert(primary.getClusterTime() === undefined,
- "client's cluster time should have been reset, but has clusterTime: " +
- tojson(primary.getClusterTime()));
-
- // Performing an operation with session2 should use the highest clusterTime seen by session2
- // since the client's clusterTime has been reset.
- session2.advanceClusterTime(session1.getClusterTime());
- testCommandGossipedWithClusterTime(function() {
- const coll = session2.getDatabase("test").mycoll;
- assert.writeOK(coll.insert({}));
- }, session2.getClusterTime());
-
- assert.eq(session2.getClusterTime(), primary.getClusterTime());
-
- primary.resetClusterTime_forTesting();
- assert(primary.getClusterTime() === undefined,
- "client's cluster time should have been reset, but has clusterTime: " +
- tojson(primary.getClusterTime()));
-
- // Performing an operation with session2 should use the highest clusterTime seen by session2
- // since the highest clusterTime seen by session1 is behind that of session2's.
- primary.advanceClusterTime(session1.getClusterTime());
- testCommandGossipedWithClusterTime(function() {
- const coll = session2.getDatabase("test").mycoll;
- assert.writeOK(coll.insert({}));
- }, session2.getClusterTime());
-
- rst.stopSet();
+}
+
+assert(session1.getClusterTime() === undefined,
+ "session1 has yet to be used, but has clusterTime: " + tojson(session1.getClusterTime()));
+assert(session2.getClusterTime() === undefined,
+ "session2 has yet to be used, but has clusterTime: " + tojson(session2.getClusterTime()));
+
+// Advance the clusterTime outside of either of the sessions.
+testCommandGossipedWithClusterTime(function() {
+ assert.writeOK(coll.insert({}));
+}, primary.getClusterTime());
+
+assert(session1.getClusterTime() === undefined,
+ "session1 has yet to be used, but has clusterTime: " + tojson(session1.getClusterTime()));
+assert(session2.getClusterTime() === undefined,
+ "session2 has yet to be used, but has clusterTime: " + tojson(session2.getClusterTime()));
+
+// Performing an operation with session1 should use the highest clusterTime seen by the client
+// since session1 hasn't been used yet.
+testCommandGossipedWithClusterTime(function() {
+ const coll = session1.getDatabase("test").mycoll;
+ assert.writeOK(coll.insert({}));
+}, primary.getClusterTime());
+
+assert.eq(session1.getClusterTime(), primary.getClusterTime());
+
+testCommandGossipedWithClusterTime(function() {
+ const coll = session1.getDatabase("test").mycoll;
+ assert.writeOK(coll.insert({}));
+}, session1.getClusterTime());
+
+assert(session2.getClusterTime() === undefined,
+ "session2 has yet to be used, but has clusterTime: " + tojson(session2.getClusterTime()));
+
+primary.resetClusterTime_forTesting();
+assert(primary.getClusterTime() === undefined,
+ "client's cluster time should have been reset, but has clusterTime: " +
+ tojson(primary.getClusterTime()));
+
+// Performing an operation with session2 should use the highest clusterTime seen by session2
+// since the client's clusterTime has been reset.
+session2.advanceClusterTime(session1.getClusterTime());
+testCommandGossipedWithClusterTime(function() {
+ const coll = session2.getDatabase("test").mycoll;
+ assert.writeOK(coll.insert({}));
+}, session2.getClusterTime());
+
+assert.eq(session2.getClusterTime(), primary.getClusterTime());
+
+primary.resetClusterTime_forTesting();
+assert(primary.getClusterTime() === undefined,
+ "client's cluster time should have been reset, but has clusterTime: " +
+ tojson(primary.getClusterTime()));
+
+// Performing an operation with session2 should use the highest clusterTime seen by session2
+// since the highest clusterTime seen by session1 is behind that of session2's.
+primary.advanceClusterTime(session1.getClusterTime());
+testCommandGossipedWithClusterTime(function() {
+ const coll = session2.getDatabase("test").mycoll;
+ assert.writeOK(coll.insert({}));
+}, session2.getClusterTime());
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/shell_helper_use_database.js b/jstests/noPassthrough/shell_helper_use_database.js
index 553e6df34d9..4fe5eea737a 100644
--- a/jstests/noPassthrough/shell_helper_use_database.js
+++ b/jstests/noPassthrough/shell_helper_use_database.js
@@ -7,33 +7,33 @@
var db;
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
- db = conn.getDB("db1");
- assert.eq("db1", db.getName());
+db = conn.getDB("db1");
+assert.eq("db1", db.getName());
- // Tests that shellHelper.use() updates the global 'db' object to refer to a DB object with the
- // database name specified.
- shellHelper.use("db2");
- assert.eq("db2", db.getName());
+// Tests that shellHelper.use() updates the global 'db' object to refer to a DB object with the
+// database name specified.
+shellHelper.use("db2");
+assert.eq("db2", db.getName());
- // Replace the global 'db' object with a DB object from a new session and verify that
- // shellHelper.use() still works.
- db = conn.startSession().getDatabase("db1");
- assert.eq("db1", db.getName());
+// Replace the global 'db' object with a DB object from a new session and verify that
+// shellHelper.use() still works.
+db = conn.startSession().getDatabase("db1");
+assert.eq("db1", db.getName());
- const session = db.getSession();
+const session = db.getSession();
- // Tests that shellHelper.use() updates the global 'db' object to refer to a DB object with the
- // database name specified. The DB objects should have the same underlying DriverSession object.
- shellHelper.use("db2");
- assert.eq("db2", db.getName());
+// Tests that shellHelper.use() updates the global 'db' object to refer to a DB object with the
+// database name specified. The DB objects should have the same underlying DriverSession object.
+shellHelper.use("db2");
+assert.eq("db2", db.getName());
- assert(session === db.getSession(), "session wasn't inherited as part of switching databases");
+assert(session === db.getSession(), "session wasn't inherited as part of switching databases");
- session.endSession();
- MongoRunner.stopMongod(conn);
+session.endSession();
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/shell_history.js b/jstests/noPassthrough/shell_history.js
index a83425553df..98adac83dc9 100644
--- a/jstests/noPassthrough/shell_history.js
+++ b/jstests/noPassthrough/shell_history.js
@@ -2,103 +2,102 @@
// appropriate permissions (where relevant).
(function() {
- "use strict";
-
- // Use dataPath because it includes the trailing "/" or "\".
- var tmpHome = MongoRunner.dataPath;
- // Ensure it exists and is a dir (eg. if running without resmoke.py and /data/db doesn't exist).
- mkdir(tmpHome);
- removeFile(tmpHome + ".dbshell");
-
- var args = [];
- var cmdline = "mongo --nodb";
- var redirection = "";
- var env = {};
- if (_isWindows()) {
- args.push("cmd.exe");
- args.push("/c");
-
- // Input is set to NUL. The output must also be redirected to NUL, otherwise running the
- // jstest manually has strange terminal IO behaviour.
- redirection = "< NUL > NUL";
-
- // USERPROFILE set to the tmp homedir.
- // Since NUL is a character device, isatty() will return true, which means that .mongorc.js
- // will be created in the HOMEDRIVE + HOMEPATH location, so we must set them also.
- if (tmpHome.match("^[a-zA-Z]:")) {
- var tmpHomeDrive = tmpHome.substr(0, 2);
- var tmpHomePath = tmpHome.substr(2);
- } else {
- var _pwd = pwd();
- assert(_pwd.match("^[a-zA-Z]:"), "pwd must include drive");
- var tmpHomeDrive = _pwd.substr(0, 2);
- var tmpHomePath = tmpHome;
- }
- env = {USERPROFILE: tmpHome, HOMEDRIVE: tmpHomeDrive, HOMEPATH: tmpHomePath};
-
+"use strict";
+
+// Use dataPath because it includes the trailing "/" or "\".
+var tmpHome = MongoRunner.dataPath;
+// Ensure it exists and is a dir (eg. if running without resmoke.py and /data/db doesn't exist).
+mkdir(tmpHome);
+removeFile(tmpHome + ".dbshell");
+
+var args = [];
+var cmdline = "mongo --nodb";
+var redirection = "";
+var env = {};
+if (_isWindows()) {
+ args.push("cmd.exe");
+ args.push("/c");
+
+ // Input is set to NUL. The output must also be redirected to NUL, otherwise running the
+ // jstest manually has strange terminal IO behaviour.
+ redirection = "< NUL > NUL";
+
+ // USERPROFILE set to the tmp homedir.
+ // Since NUL is a character device, isatty() will return true, which means that .mongorc.js
+ // will be created in the HOMEDRIVE + HOMEPATH location, so we must set them also.
+ if (tmpHome.match("^[a-zA-Z]:")) {
+ var tmpHomeDrive = tmpHome.substr(0, 2);
+ var tmpHomePath = tmpHome.substr(2);
} else {
- args.push("sh");
- args.push("-c");
+ var _pwd = pwd();
+ assert(_pwd.match("^[a-zA-Z]:"), "pwd must include drive");
+ var tmpHomeDrive = _pwd.substr(0, 2);
+ var tmpHomePath = tmpHome;
+ }
+ env = {USERPROFILE: tmpHome, HOMEDRIVE: tmpHomeDrive, HOMEPATH: tmpHomePath};
- // Use the mongo shell from the current dir, same as resmoke.py does.
- // Doesn't handle resmoke's --mongo= option.
- cmdline = "./" + cmdline;
+} else {
+ args.push("sh");
+ args.push("-c");
- // Set umask to 0 prior to running the shell.
- cmdline = "umask 0 ; " + cmdline;
+ // Use the mongo shell from the current dir, same as resmoke.py does.
+ // Doesn't handle resmoke's --mongo= option.
+ cmdline = "./" + cmdline;
- // stdin is /dev/null.
- redirection = "< /dev/null";
+ // Set umask to 0 prior to running the shell.
+ cmdline = "umask 0 ; " + cmdline;
- // HOME set to the tmp homedir.
- if (!tmpHome.startsWith("/")) {
- tmpHome = pwd() + "/" + tmpHome;
- }
- env = {HOME: tmpHome};
+ // stdin is /dev/null.
+ redirection = "< /dev/null";
+
+ // HOME set to the tmp homedir.
+ if (!tmpHome.startsWith("/")) {
+ tmpHome = pwd() + "/" + tmpHome;
}
+ env = {HOME: tmpHome};
+}
- // Add redirection to cmdline, and add cmdline to args.
- cmdline += " " + redirection;
- args.push(cmdline);
- jsTestLog("Running args:\n " + tojson(args) + "\nwith env:\n " + tojson(env));
- var pid = _startMongoProgram({args, env});
- var rc = waitProgram(pid);
+// Add redirection to cmdline, and add cmdline to args.
+cmdline += " " + redirection;
+args.push(cmdline);
+jsTestLog("Running args:\n " + tojson(args) + "\nwith env:\n " + tojson(env));
+var pid = _startMongoProgram({args, env});
+var rc = waitProgram(pid);
- assert.eq(rc, 0);
+assert.eq(rc, 0);
- var files = listFiles(tmpHome);
- jsTestLog(tojson(files));
+var files = listFiles(tmpHome);
+jsTestLog(tojson(files));
- var findFile = function(baseName) {
- for (var i = 0; i < files.length; i++) {
- if (files[i].baseName === baseName) {
- return files[i];
- }
+var findFile = function(baseName) {
+ for (var i = 0; i < files.length; i++) {
+ if (files[i].baseName === baseName) {
+ return files[i];
}
- return undefined;
- };
-
- var targetFile = ".dbshell";
- var file = findFile(targetFile);
-
- assert.neq(typeof(file), "undefined", targetFile + " should exist, but it doesn't");
- assert.eq(file.isDirectory, false, targetFile + " should not be a directory, but it is");
- assert.eq(file.size, 0, targetFile + " should be empty, but it isn't");
-
- if (!_isWindows()) {
- // On Unix, check that the file has the correct mode (permissions).
- // The shell has no way to stat a file.
- // There is no stat utility in POSIX.
- // `ls -l` is POSIX, so this is the best that we have.
- // Check for exactly "-rw-------".
- clearRawMongoProgramOutput();
- var rc = runProgram("ls", "-l", file.name);
- assert.eq(rc, 0);
-
- var output = rawMongoProgramOutput();
- var fields = output.split(" ");
- // First field is the prefix, second field is the `ls -l` permissions.
- assert.eq(fields[1].substr(0, 10), "-rw-------", targetFile + " has bad permissions");
}
+ return undefined;
+};
+
+var targetFile = ".dbshell";
+var file = findFile(targetFile);
+
+assert.neq(typeof (file), "undefined", targetFile + " should exist, but it doesn't");
+assert.eq(file.isDirectory, false, targetFile + " should not be a directory, but it is");
+assert.eq(file.size, 0, targetFile + " should be empty, but it isn't");
+
+if (!_isWindows()) {
+ // On Unix, check that the file has the correct mode (permissions).
+ // The shell has no way to stat a file.
+ // There is no stat utility in POSIX.
+ // `ls -l` is POSIX, so this is the best that we have.
+ // Check for exactly "-rw-------".
+ clearRawMongoProgramOutput();
+ var rc = runProgram("ls", "-l", file.name);
+ assert.eq(rc, 0);
+ var output = rawMongoProgramOutput();
+ var fields = output.split(" ");
+ // First field is the prefix, second field is the `ls -l` permissions.
+ assert.eq(fields[1].substr(0, 10), "-rw-------", targetFile + " has bad permissions");
+}
})();
diff --git a/jstests/noPassthrough/shell_interactive.js b/jstests/noPassthrough/shell_interactive.js
index 970e4f1d10c..ea23099a546 100644
--- a/jstests/noPassthrough/shell_interactive.js
+++ b/jstests/noPassthrough/shell_interactive.js
@@ -2,23 +2,22 @@
// and true when running in interactive mode
(function() {
- "use strict";
-
- if (!_isWindows()) {
- clearRawMongoProgramOutput();
- var rc = runProgram("./mongo", "--nodb", "--quiet", "--eval", "print(isInteractive())");
- assert.eq(rc, 0);
- var output = rawMongoProgramOutput();
- var response = (output.split('\n').slice(-2)[0]).split(' ')[1];
- assert.eq(response, "false", "Expected 'false' in script mode");
- // now try interactive
- clearRawMongoProgramOutput();
- rc = runProgram(
- "./mongo", "--nodb", "--quiet", "--shell", "--eval", "print(isInteractive()); quit()");
- assert.eq(rc, 0);
- output = rawMongoProgramOutput();
- response = (output.split('\n').slice(-2)[0]).split(' ')[1];
- assert.eq(response, "true", "Expected 'true' in interactive mode");
- }
+"use strict";
+if (!_isWindows()) {
+ clearRawMongoProgramOutput();
+ var rc = runProgram("./mongo", "--nodb", "--quiet", "--eval", "print(isInteractive())");
+ assert.eq(rc, 0);
+ var output = rawMongoProgramOutput();
+ var response = (output.split('\n').slice(-2)[0]).split(' ')[1];
+ assert.eq(response, "false", "Expected 'false' in script mode");
+ // now try interactive
+ clearRawMongoProgramOutput();
+ rc = runProgram(
+ "./mongo", "--nodb", "--quiet", "--shell", "--eval", "print(isInteractive()); quit()");
+ assert.eq(rc, 0);
+ output = rawMongoProgramOutput();
+ response = (output.split('\n').slice(-2)[0]).split(' ')[1];
+ assert.eq(response, "true", "Expected 'true' in interactive mode");
+}
})();
diff --git a/jstests/noPassthrough/shell_load_file.js b/jstests/noPassthrough/shell_load_file.js
index bdba591694c..6da5cf27baf 100644
--- a/jstests/noPassthrough/shell_load_file.js
+++ b/jstests/noPassthrough/shell_load_file.js
@@ -2,43 +2,41 @@
* Tests the exception handling behavior of the load() function across nested calls.
*/
(function() {
- "use strict";
+"use strict";
- let isMain = true;
+let isMain = true;
- if (TestData.hasOwnProperty("loadDepth")) {
- isMain = false;
- ++TestData.loadDepth;
- } else {
- TestData.loadDepth = 0;
- TestData.loadErrors = [];
- }
+if (TestData.hasOwnProperty("loadDepth")) {
+ isMain = false;
+ ++TestData.loadDepth;
+} else {
+ TestData.loadDepth = 0;
+ TestData.loadErrors = [];
+}
- if (TestData.loadDepth >= 3) {
- throw new Error("Intentionally thrown");
- }
+if (TestData.loadDepth >= 3) {
+ throw new Error("Intentionally thrown");
+}
- try {
- load("jstests/noPassthrough/shell_load_file.js");
- } catch (e) {
- TestData.loadErrors.push(e);
+try {
+ load("jstests/noPassthrough/shell_load_file.js");
+} catch (e) {
+ TestData.loadErrors.push(e);
- if (!isMain) {
- throw e;
- }
+ if (!isMain) {
+ throw e;
}
+}
- assert(isMain,
- "only the root caller of load() needs to check the generated JavaScript exceptions");
+assert(isMain, "only the root caller of load() needs to check the generated JavaScript exceptions");
- for (let i = 0; i < TestData.loadErrors.length; ++i) {
- const error = TestData.loadErrors[i];
- assert.eq("error loading js file: jstests/noPassthrough/shell_load_file.js", error.message);
- assert(
- /@jstests\/noPassthrough\/shell_load_file.js:/.test(error.stack) ||
- /@jstests\\noPassthrough\\shell_load_file.js:/.test(error.stack),
- () =>
- "JavaScript stacktrace from load() didn't include file paths (AKA stack frames): " +
- error.stack);
- }
+for (let i = 0; i < TestData.loadErrors.length; ++i) {
+ const error = TestData.loadErrors[i];
+ assert.eq("error loading js file: jstests/noPassthrough/shell_load_file.js", error.message);
+ assert(
+ /@jstests\/noPassthrough\/shell_load_file.js:/.test(error.stack) ||
+ /@jstests\\noPassthrough\\shell_load_file.js:/.test(error.stack),
+ () => "JavaScript stacktrace from load() didn't include file paths (AKA stack frames): " +
+ error.stack);
+}
})();
diff --git a/jstests/noPassthrough/shell_mongobridge_port_allocation.js b/jstests/noPassthrough/shell_mongobridge_port_allocation.js
index a61eda2fc87..964080682db 100644
--- a/jstests/noPassthrough/shell_mongobridge_port_allocation.js
+++ b/jstests/noPassthrough/shell_mongobridge_port_allocation.js
@@ -6,74 +6,73 @@
* @tags: [requires_replication, requires_sharding]
*/
(function() {
- "use strict";
+"use strict";
- function checkBridgeOffset(node, processType) {
- const bridgePort = node.port;
- const serverPort =
- assert.commandWorked(node.adminCommand({getCmdLineOpts: 1})).parsed.net.port;
- assert.neq(bridgePort,
- serverPort,
- node + " is a connection to " + processType + " rather than to mongobridge");
- assert.eq(bridgePort + MongoBridge.kBridgeOffset,
- serverPort,
- "corresponding mongobridge and " + processType +
- " ports should be staggered by a multiple of 10");
- }
+function checkBridgeOffset(node, processType) {
+ const bridgePort = node.port;
+ const serverPort = assert.commandWorked(node.adminCommand({getCmdLineOpts: 1})).parsed.net.port;
+ assert.neq(bridgePort,
+ serverPort,
+ node + " is a connection to " + processType + " rather than to mongobridge");
+ assert.eq(bridgePort + MongoBridge.kBridgeOffset,
+ serverPort,
+ "corresponding mongobridge and " + processType +
+ " ports should be staggered by a multiple of 10");
+}
- // We use >5 nodes to ensure that allocating twice as many ports doesn't interfere with having
- // the corresponding mongobridge and mongod ports staggered by a multiple of 10.
- const rst = new ReplSetTest({nodes: 7, useBridge: true});
- rst.startSet();
+// We use >5 nodes to ensure that allocating twice as many ports doesn't interfere with having
+// the corresponding mongobridge and mongod ports staggered by a multiple of 10.
+const rst = new ReplSetTest({nodes: 7, useBridge: true});
+rst.startSet();
- // Rig the election so that the primary remains stable throughout this test despite the replica
- // set having a larger number of members.
- const replSetConfig = rst.getReplSetConfig();
- for (let i = 1; i < rst.nodes.length; ++i) {
- replSetConfig.members[i].priority = 0;
- replSetConfig.members[i].votes = 0;
- }
- rst.initiate(replSetConfig);
+// Rig the election so that the primary remains stable throughout this test despite the replica
+// set having a larger number of members.
+const replSetConfig = rst.getReplSetConfig();
+for (let i = 1; i < rst.nodes.length; ++i) {
+ replSetConfig.members[i].priority = 0;
+ replSetConfig.members[i].votes = 0;
+}
+rst.initiate(replSetConfig);
- for (let node of rst.nodes) {
- checkBridgeOffset(node, "mongod");
- }
+for (let node of rst.nodes) {
+ checkBridgeOffset(node, "mongod");
+}
- rst.stopSet();
+rst.stopSet();
- // We run ShardingTest under mongobridge with both 1-node replica set shards and stand-alone
- // mongod shards.
- for (let options of[{rs: {nodes: 1}}, {rs: false, shardAsReplicaSet: false}]) {
- resetAllocatedPorts();
+// We run ShardingTest under mongobridge with both 1-node replica set shards and stand-alone
+// mongod shards.
+for (let options of [{rs: {nodes: 1}}, {rs: false, shardAsReplicaSet: false}]) {
+ resetAllocatedPorts();
- const numMongos = 5;
- const numShards = 5;
- const st = new ShardingTest(Object.assign({
- mongos: numMongos,
- shards: numShards,
- config: {nodes: 1},
- useBridge: true,
- },
- options));
+ const numMongos = 5;
+ const numShards = 5;
+ const st = new ShardingTest(Object.assign({
+ mongos: numMongos,
+ shards: numShards,
+ config: {nodes: 1},
+ useBridge: true,
+ },
+ options));
- for (let i = 0; i < numMongos; ++i) {
- checkBridgeOffset(st["s" + i], "mongos");
- }
+ for (let i = 0; i < numMongos; ++i) {
+ checkBridgeOffset(st["s" + i], "mongos");
+ }
- for (let configServer of st.configRS.nodes) {
- checkBridgeOffset(configServer, "config server");
- }
+ for (let configServer of st.configRS.nodes) {
+ checkBridgeOffset(configServer, "config server");
+ }
- for (let i = 0; i < numShards; ++i) {
- if (options.rs) {
- for (let node of st["rs" + i].nodes) {
- checkBridgeOffset(node, "shard");
- }
- } else {
- checkBridgeOffset(st["d" + i], "shard");
+ for (let i = 0; i < numShards; ++i) {
+ if (options.rs) {
+ for (let node of st["rs" + i].nodes) {
+ checkBridgeOffset(node, "shard");
}
+ } else {
+ checkBridgeOffset(st["d" + i], "shard");
}
-
- st.stop();
}
+
+ st.stop();
+}
})();
diff --git a/jstests/noPassthrough/shell_quit.js b/jstests/noPassthrough/shell_quit.js
index 7ff45368f00..17721119846 100644
--- a/jstests/noPassthrough/shell_quit.js
+++ b/jstests/noPassthrough/shell_quit.js
@@ -1,17 +1,17 @@
(function() {
- 'use strict';
- var checkShell = function(retCode) {
- var args = [
- "mongo",
- "--nodb",
- "--eval",
- "quit(" + retCode + ");",
- ];
+'use strict';
+var checkShell = function(retCode) {
+ var args = [
+ "mongo",
+ "--nodb",
+ "--eval",
+ "quit(" + retCode + ");",
+ ];
- var actualRetCode = _runMongoProgram.apply(null, args);
- assert.eq(retCode, actualRetCode);
- };
+ var actualRetCode = _runMongoProgram.apply(null, args);
+ assert.eq(retCode, actualRetCode);
+};
- checkShell(0);
- checkShell(5);
+checkShell(0);
+checkShell(5);
})();
diff --git a/jstests/noPassthrough/shell_retry_writes_on_retryable_errors.js b/jstests/noPassthrough/shell_retry_writes_on_retryable_errors.js
index ca052915fe7..0010d54bff2 100644
--- a/jstests/noPassthrough/shell_retry_writes_on_retryable_errors.js
+++ b/jstests/noPassthrough/shell_retry_writes_on_retryable_errors.js
@@ -4,129 +4,127 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
-
- const dbName = "test";
- const collName = jsTest.name();
-
- const rsConn = new Mongo(rst.getURL());
- const db = rsConn.startSession({retryWrites: true}).getDatabase(dbName);
-
- // We configure the mongo shell to log its retry attempts so there are more diagnostics
- // available in case this test ever fails.
- TestData.logRetryAttempts = true;
-
- /**
- * The testCommandIsRetried() function serves as the fixture for writing test cases which run
- * commands against the server and assert that the mongo shell retries them correctly.
- *
- * The 'testFn' parameter is a function that performs an arbitrary number of operations against
- * the database. The command requests that the mongo shell attempts to send to the server
- * (including any command requests which are retried) are then specified as the sole argument to
- * the 'assertFn' parameter.
- *
- * The testFn(enableCapture, disableCapture) function can also selectively turn on and off the
- * capturing of command requests by calling the functions it receives for its first and second
- * parameters, respectively.
- */
- function testCommandIsRetried(testFn, assertFn) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- const cmdObjsSeen = [];
-
- let shouldCaptureCmdObjs = true;
-
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- if (shouldCaptureCmdObjs) {
- cmdObjsSeen.push(cmdObj);
- }
-
- return mongoRunCommandOriginal.apply(this, arguments);
- };
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
- try {
- assert.doesNotThrow(() => testFn(
- () => {
- shouldCaptureCmdObjs = true;
- },
- () => {
- shouldCaptureCmdObjs = false;
- }));
- } finally {
- Mongo.prototype.runCommand = mongoRunCommandOriginal;
- }
+const dbName = "test";
+const collName = jsTest.name();
+
+const rsConn = new Mongo(rst.getURL());
+const db = rsConn.startSession({retryWrites: true}).getDatabase(dbName);
+
+// We configure the mongo shell to log its retry attempts so there are more diagnostics
+// available in case this test ever fails.
+TestData.logRetryAttempts = true;
+
+/**
+ * The testCommandIsRetried() function serves as the fixture for writing test cases which run
+ * commands against the server and assert that the mongo shell retries them correctly.
+ *
+ * The 'testFn' parameter is a function that performs an arbitrary number of operations against
+ * the database. The command requests that the mongo shell attempts to send to the server
+ * (including any command requests which are retried) are then specified as the sole argument to
+ * the 'assertFn' parameter.
+ *
+ * The testFn(enableCapture, disableCapture) function can also selectively turn on and off the
+ * capturing of command requests by calling the functions it receives for its first and second
+ * parameters, respectively.
+ */
+function testCommandIsRetried(testFn, assertFn) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+ const cmdObjsSeen = [];
+
+ let shouldCaptureCmdObjs = true;
- if (cmdObjsSeen.length === 0) {
- throw new Error("Mongo.prototype.runCommand() was never called: " + testFn.toString());
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ if (shouldCaptureCmdObjs) {
+ cmdObjsSeen.push(cmdObj);
}
- assertFn(cmdObjsSeen);
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
+
+ try {
+ assert.doesNotThrow(() => testFn(
+ () => {
+ shouldCaptureCmdObjs = true;
+ },
+ () => {
+ shouldCaptureCmdObjs = false;
+ }));
+ } finally {
+ Mongo.prototype.runCommand = mongoRunCommandOriginal;
}
- testCommandIsRetried(
- function testInsertRetriedOnWriteConcernError(enableCapture, disableCapture) {
- disableCapture();
- const secondary = rst.getSecondary();
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"});
-
- try {
- enableCapture();
- const res = db[collName].insert({}, {writeConcern: {w: 2, wtimeout: 1000}});
- assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
- disableCapture();
- } finally {
- // We disable the failpoint in a finally block to prevent a misleading fassert()
- // message from being logged by the secondary when it is shut down with the
- // failpoint enabled.
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"});
- }
- },
- function assertInsertRetriedExactlyOnce(cmdObjsSeen) {
- assert.eq(2, cmdObjsSeen.length, () => tojson(cmdObjsSeen));
- assert(cmdObjsSeen.every(cmdObj => Object.keys(cmdObj)[0] === "insert"),
- () => "expected both attempts to be insert requests: " + tojson(cmdObjsSeen));
- assert.eq(
- cmdObjsSeen[0], cmdObjsSeen[1], "command request changed between retry attempts");
- });
+ if (cmdObjsSeen.length === 0) {
+ throw new Error("Mongo.prototype.runCommand() was never called: " + testFn.toString());
+ }
- testCommandIsRetried(
- function testUpdateRetriedOnRetryableCommandError(enableCapture, disableCapture) {
- disableCapture();
+ assertFn(cmdObjsSeen);
+}
- const primary = rst.getPrimary();
- primary.adminCommand({
- configureFailPoint: "onPrimaryTransactionalWrite",
- data: {
- closeConnection: false,
- failBeforeCommitExceptionCode: ErrorCodes.InterruptedDueToReplStateChange
- },
- mode: {times: 1}
- });
+testCommandIsRetried(
+ function testInsertRetriedOnWriteConcernError(enableCapture, disableCapture) {
+ disableCapture();
+ const secondary = rst.getSecondary();
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"});
+ try {
enableCapture();
- const res = db[collName].update({}, {$set: {a: 1}});
- assert.commandWorked(res);
+ const res = db[collName].insert({}, {writeConcern: {w: 2, wtimeout: 1000}});
+ assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
disableCapture();
-
- primary.adminCommand({configureFailPoint: "onPrimaryTransactionalWrite", mode: "off"});
- },
- function assertUpdateRetriedExactlyOnce(cmdObjsSeen) {
- assert.eq(2, cmdObjsSeen.length, () => tojson(cmdObjsSeen));
- assert(cmdObjsSeen.every(cmdObj => Object.keys(cmdObj)[0] === "update"),
- () => "expected both attempts to be update requests: " + tojson(cmdObjsSeen));
- assert.eq(
- cmdObjsSeen[0], cmdObjsSeen[1], "command request changed between retry attempts");
+ } finally {
+ // We disable the failpoint in a finally block to prevent a misleading fassert()
+ // message from being logged by the secondary when it is shut down with the
+ // failpoint enabled.
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"});
+ }
+ },
+ function assertInsertRetriedExactlyOnce(cmdObjsSeen) {
+ assert.eq(2, cmdObjsSeen.length, () => tojson(cmdObjsSeen));
+ assert(cmdObjsSeen.every(cmdObj => Object.keys(cmdObj)[0] === "insert"),
+ () => "expected both attempts to be insert requests: " + tojson(cmdObjsSeen));
+ assert.eq(cmdObjsSeen[0], cmdObjsSeen[1], "command request changed between retry attempts");
+ });
+
+testCommandIsRetried(
+ function testUpdateRetriedOnRetryableCommandError(enableCapture, disableCapture) {
+ disableCapture();
+
+ const primary = rst.getPrimary();
+ primary.adminCommand({
+ configureFailPoint: "onPrimaryTransactionalWrite",
+ data: {
+ closeConnection: false,
+ failBeforeCommitExceptionCode: ErrorCodes.InterruptedDueToReplStateChange
+ },
+ mode: {times: 1}
});
- rst.stopSet();
+ enableCapture();
+ const res = db[collName].update({}, {$set: {a: 1}});
+ assert.commandWorked(res);
+ disableCapture();
+
+ primary.adminCommand({configureFailPoint: "onPrimaryTransactionalWrite", mode: "off"});
+ },
+ function assertUpdateRetriedExactlyOnce(cmdObjsSeen) {
+ assert.eq(2, cmdObjsSeen.length, () => tojson(cmdObjsSeen));
+ assert(cmdObjsSeen.every(cmdObj => Object.keys(cmdObj)[0] === "update"),
+ () => "expected both attempts to be update requests: " + tojson(cmdObjsSeen));
+ assert.eq(cmdObjsSeen[0], cmdObjsSeen[1], "command request changed between retry attempts");
+ });
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/shell_retry_writes_uri.js b/jstests/noPassthrough/shell_retry_writes_uri.js
index a83fa33eb46..bb591438280 100644
--- a/jstests/noPassthrough/shell_retry_writes_uri.js
+++ b/jstests/noPassthrough/shell_retry_writes_uri.js
@@ -1,148 +1,142 @@
// @tags: [requires_replication]
(function() {
- "use strict";
-
- load("jstests/libs/retryable_writes_util.js");
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- let mongoUri = "mongodb://" + rst.nodes.map((node) => node.host).join(",") + "/test";
- let conn = rst.nodes[0];
-
- // There are three ways to enable retryable writes in the mongo shell.
- // 1. (cmdline flag) start mongo shell with --retryWrites
- // 2. (uri param) connect to a uri like mongodb://.../test?retryWrites=true
- // 3. (session option) in mongo shell create a new session with {retryWrite: true}
-
- function runShellScript(uri, cmdArgs, insertShouldHaveTxnNumber, shellFn) {
- // This function is stringified and called immediately in the mongo --eval.
- function testWrapper(insertShouldHaveTxnNumber, shellFn) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- let insertFound = false;
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- let cmdObjSeen = cmdObj;
- let cmdName = Object.keys(cmdObjSeen)[0];
-
- if (cmdName === "query" || cmdName === "$query") {
- cmdObjSeen = cmdObjSeen[cmdName];
- cmdName = Object.keys(cmdObj)[0];
+"use strict";
+
+load("jstests/libs/retryable_writes_util.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+let mongoUri = "mongodb://" + rst.nodes.map((node) => node.host).join(",") + "/test";
+let conn = rst.nodes[0];
+
+// There are three ways to enable retryable writes in the mongo shell.
+// 1. (cmdline flag) start mongo shell with --retryWrites
+// 2. (uri param) connect to a uri like mongodb://.../test?retryWrites=true
+// 3. (session option) in mongo shell create a new session with {retryWrite: true}
+
+function runShellScript(uri, cmdArgs, insertShouldHaveTxnNumber, shellFn) {
+ // This function is stringified and called immediately in the mongo --eval.
+ function testWrapper(insertShouldHaveTxnNumber, shellFn) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+ let insertFound = false;
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ let cmdObjSeen = cmdObj;
+ let cmdName = Object.keys(cmdObjSeen)[0];
+
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObjSeen = cmdObjSeen[cmdName];
+ cmdName = Object.keys(cmdObj)[0];
+ }
+
+ if (cmdName === "insert") {
+ insertFound = true;
+ if (insertShouldHaveTxnNumber) {
+ assert(cmdObjSeen.hasOwnProperty("txnNumber"),
+ "insert sent without expected txnNumber");
+ } else {
+ assert(!cmdObjSeen.hasOwnProperty("txnNumber"),
+ "insert sent with txnNumber unexpectedly");
}
+ }
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
- if (cmdName === "insert") {
- insertFound = true;
- if (insertShouldHaveTxnNumber) {
- assert(cmdObjSeen.hasOwnProperty("txnNumber"),
- "insert sent without expected txnNumber");
- } else {
- assert(!cmdObjSeen.hasOwnProperty("txnNumber"),
- "insert sent with txnNumber unexpectedly");
- }
- }
- return mongoRunCommandOriginal.apply(this, arguments);
- };
-
- shellFn();
- assert(insertFound, "test did not run insert command");
- }
-
- // Construct the string to be passed to eval.
- let script = "(" + testWrapper.toString() + ")(";
- script += insertShouldHaveTxnNumber + ",";
- script += shellFn.toString();
- script += ")";
-
- let args = ["./mongo", uri, "--eval", script].concat(cmdArgs);
- let exitCode = runMongoProgram(...args);
- assert.eq(exitCode, 0, `shell script "${shellFn.name}" exited with ${exitCode}`);
+ shellFn();
+ assert(insertFound, "test did not run insert command");
}
- // Tests --retryWrites command line parameter.
- runShellScript(mongoUri, ["--retryWrites"], true, function flagWorks() {
- assert(db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be true");
- assert.writeOK(db.coll.insert({}), "cannot insert");
- });
-
- // The uri param should override --retryWrites.
- runShellScript(
- mongoUri + "?retryWrites=false", ["--retryWrites"], false, function flagOverridenByUri() {
- assert(!db.getSession().getOptions().shouldRetryWrites(),
- "retryWrites should be false");
- assert.writeOK(db.coll.insert({}), "cannot insert");
- });
-
- // Even if initial connection has retryWrites=false in uri, new connections should not be
- // overriden.
- runShellScript(mongoUri + "?retryWrites=false",
- ["--retryWrites"],
- true,
- function flagNotOverridenByNewConn() {
- let connUri = db.getMongo().host; // does not have ?retryWrites=false.
- let sess = new Mongo(connUri).startSession();
- assert(sess.getOptions().shouldRetryWrites(), "retryWrites should be true");
- assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
- });
-
- // Unless that uri also specifies retryWrites.
- runShellScript(mongoUri + "?retryWrites=false",
- ["--retryWrites"],
- false,
- function flagOverridenInNewConn() {
- let connUri = "mongodb://" + db.getMongo().host + "/test?retryWrites=false";
- let sess = new Mongo(connUri).startSession();
- assert(!sess.getOptions().shouldRetryWrites(),
- "retryWrites should be false");
- assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
- });
-
- // Session options should override --retryWrites as well.
- runShellScript(mongoUri, ["--retryWrites"], false, function flagOverridenByOpts() {
- let connUri = "mongodb://" + db.getMongo().host + "/test";
- let sess = new Mongo(connUri).startSession({retryWrites: false});
- assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
- assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
- });
-
- // Test uri retryWrites parameter.
- runShellScript(mongoUri + "?retryWrites=true", [], true, function uriTrueWorks() {
- assert(db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be true");
- assert.writeOK(db.coll.insert({}), "cannot insert");
- });
-
- // Test that uri retryWrites=false works.
- runShellScript(mongoUri + "?retryWrites=false", [], false, function uriFalseWorks() {
+ // Construct the string to be passed to eval.
+ let script = "(" + testWrapper.toString() + ")(";
+ script += insertShouldHaveTxnNumber + ",";
+ script += shellFn.toString();
+ script += ")";
+
+ let args = ["./mongo", uri, "--eval", script].concat(cmdArgs);
+ let exitCode = runMongoProgram(...args);
+ assert.eq(exitCode, 0, `shell script "${shellFn.name}" exited with ${exitCode}`);
+}
+
+// Tests --retryWrites command line parameter.
+runShellScript(mongoUri, ["--retryWrites"], true, function flagWorks() {
+ assert(db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be true");
+ assert.writeOK(db.coll.insert({}), "cannot insert");
+});
+
+// The uri param should override --retryWrites.
+runShellScript(
+ mongoUri + "?retryWrites=false", ["--retryWrites"], false, function flagOverridenByUri() {
assert(!db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be false");
assert.writeOK(db.coll.insert({}), "cannot insert");
});
- // Test SessionOptions retryWrites option.
- runShellScript(mongoUri, [], true, function sessOptTrueWorks() {
- let connUri = "mongodb://" + db.getMongo().host + "/test";
- let sess = new Mongo(connUri).startSession({retryWrites: true});
+// Even if initial connection has retryWrites=false in uri, new connections should not be
+// overriden.
+runShellScript(
+ mongoUri + "?retryWrites=false", ["--retryWrites"], true, function flagNotOverridenByNewConn() {
+ let connUri = db.getMongo().host; // does not have ?retryWrites=false.
+ let sess = new Mongo(connUri).startSession();
assert(sess.getOptions().shouldRetryWrites(), "retryWrites should be true");
assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
});
- // Test that SessionOptions retryWrites:false works.
- runShellScript(mongoUri, [], false, function sessOptFalseWorks() {
- let connUri = "mongodb://" + db.getMongo().host + "/test";
- let sess = new Mongo(connUri).startSession({retryWrites: false});
- assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
- assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
- });
-
- // Test that session option overrides uri option.
- runShellScript(mongoUri + "?retryWrites=true", [], false, function sessOptOverridesUri() {
- let sess = db.getMongo().startSession({retryWrites: false});
+// Unless that uri also specifies retryWrites.
+runShellScript(
+ mongoUri + "?retryWrites=false", ["--retryWrites"], false, function flagOverridenInNewConn() {
+ let connUri = "mongodb://" + db.getMongo().host + "/test?retryWrites=false";
+ let sess = new Mongo(connUri).startSession();
assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
});
- rst.stopSet();
+// Session options should override --retryWrites as well.
+runShellScript(mongoUri, ["--retryWrites"], false, function flagOverridenByOpts() {
+ let connUri = "mongodb://" + db.getMongo().host + "/test";
+ let sess = new Mongo(connUri).startSession({retryWrites: false});
+ assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
+ assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
+});
+
+// Test uri retryWrites parameter.
+runShellScript(mongoUri + "?retryWrites=true", [], true, function uriTrueWorks() {
+ assert(db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be true");
+ assert.writeOK(db.coll.insert({}), "cannot insert");
+});
+
+// Test that uri retryWrites=false works.
+runShellScript(mongoUri + "?retryWrites=false", [], false, function uriFalseWorks() {
+ assert(!db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be false");
+ assert.writeOK(db.coll.insert({}), "cannot insert");
+});
+
+// Test SessionOptions retryWrites option.
+runShellScript(mongoUri, [], true, function sessOptTrueWorks() {
+ let connUri = "mongodb://" + db.getMongo().host + "/test";
+ let sess = new Mongo(connUri).startSession({retryWrites: true});
+ assert(sess.getOptions().shouldRetryWrites(), "retryWrites should be true");
+ assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
+});
+
+// Test that SessionOptions retryWrites:false works.
+runShellScript(mongoUri, [], false, function sessOptFalseWorks() {
+ let connUri = "mongodb://" + db.getMongo().host + "/test";
+ let sess = new Mongo(connUri).startSession({retryWrites: false});
+ assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
+ assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
+});
+
+// Test that session option overrides uri option.
+runShellScript(mongoUri + "?retryWrites=true", [], false, function sessOptOverridesUri() {
+ let sess = db.getMongo().startSession({retryWrites: false});
+ assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
+ assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
+});
+
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/shell_session_option_defaults.js b/jstests/noPassthrough/shell_session_option_defaults.js
index 16c4e3860d6..b865914a99b 100644
--- a/jstests/noPassthrough/shell_session_option_defaults.js
+++ b/jstests/noPassthrough/shell_session_option_defaults.js
@@ -2,80 +2,80 @@
* Tests the default values for causal consistency and retryable writes as part of SessionOptions.
*/
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod();
+const conn = MongoRunner.runMongod();
- let session = conn.startSession();
- assert(session.getOptions().isCausalConsistency(),
- "Causal consistency should be implicitly enabled for an explicit session");
- assert(!session.getOptions().shouldRetryWrites(),
- "Retryable writes should not be implicitly enabled for an explicit session");
- session.endSession();
+let session = conn.startSession();
+assert(session.getOptions().isCausalConsistency(),
+ "Causal consistency should be implicitly enabled for an explicit session");
+assert(!session.getOptions().shouldRetryWrites(),
+ "Retryable writes should not be implicitly enabled for an explicit session");
+session.endSession();
- session = conn.startSession({causalConsistency: true});
- assert(session.getOptions().isCausalConsistency(),
- "Causal consistency should be able to be explicitly enabled");
- assert(!session.getOptions().shouldRetryWrites(),
- "Retryable writes should not be implicitly enabled for an explicit session");
- session.endSession();
+session = conn.startSession({causalConsistency: true});
+assert(session.getOptions().isCausalConsistency(),
+ "Causal consistency should be able to be explicitly enabled");
+assert(!session.getOptions().shouldRetryWrites(),
+ "Retryable writes should not be implicitly enabled for an explicit session");
+session.endSession();
- session = conn.startSession({causalConsistency: false});
- assert(!session.getOptions().isCausalConsistency(),
- "Causal consistency should be able to be explicitly disabled");
- assert(!session.getOptions().shouldRetryWrites(),
- "Retryable writes should not be implicitly enabled for an explicit session");
- session.endSession();
+session = conn.startSession({causalConsistency: false});
+assert(!session.getOptions().isCausalConsistency(),
+ "Causal consistency should be able to be explicitly disabled");
+assert(!session.getOptions().shouldRetryWrites(),
+ "Retryable writes should not be implicitly enabled for an explicit session");
+session.endSession();
+
+session = conn.startSession({retryWrites: false});
+assert(session.getOptions().isCausalConsistency(),
+ "Causal consistency should be implicitly enabled for an explicit session");
+assert(!session.getOptions().shouldRetryWrites(),
+ "Retryable writes should be able to be explicitly disabled");
+session.endSession();
+
+session = conn.startSession({retryWrites: true});
+assert(session.getOptions().isCausalConsistency(),
+ "Causal consistency should be implicitly enabled for an explicit session");
+assert(session.getOptions().shouldRetryWrites(),
+ "Retryable writes should be able to be explicitly enabled");
+session.endSession();
+
+function runMongoShellWithRetryWritesEnabled(func) {
+ const args = [MongoRunner.mongoShellPath];
+ args.push("--port", conn.port);
+ args.push("--retryWrites");
- session = conn.startSession({retryWrites: false});
+ const jsCode = "(" + func.toString() + ")()";
+ args.push("--eval", jsCode);
+
+ const exitCode = runMongoProgram.apply(null, args);
+ assert.eq(0, exitCode, "Encountered an error in the other mongo shell");
+}
+
+runMongoShellWithRetryWritesEnabled(function() {
+ let session = db.getSession();
+ assert(session.getOptions().isCausalConsistency(),
+ "Causal consistency should be implicitly enabled for an explicit session");
+ assert(session.getOptions().shouldRetryWrites(),
+ "Retryable writes should be implicitly enabled on default session when using" +
+ " --retryWrites");
+
+ session = db.getMongo().startSession({retryWrites: false});
assert(session.getOptions().isCausalConsistency(),
"Causal consistency should be implicitly enabled for an explicit session");
assert(!session.getOptions().shouldRetryWrites(),
"Retryable writes should be able to be explicitly disabled");
session.endSession();
- session = conn.startSession({retryWrites: true});
+ session = db.getMongo().startSession();
assert(session.getOptions().isCausalConsistency(),
"Causal consistency should be implicitly enabled for an explicit session");
assert(session.getOptions().shouldRetryWrites(),
- "Retryable writes should be able to be explicitly enabled");
+ "Retryable writes should be implicitly enabled on new sessions when using" +
+ " --retryWrites");
session.endSession();
+});
- function runMongoShellWithRetryWritesEnabled(func) {
- const args = [MongoRunner.mongoShellPath];
- args.push("--port", conn.port);
- args.push("--retryWrites");
-
- const jsCode = "(" + func.toString() + ")()";
- args.push("--eval", jsCode);
-
- const exitCode = runMongoProgram.apply(null, args);
- assert.eq(0, exitCode, "Encountered an error in the other mongo shell");
- }
-
- runMongoShellWithRetryWritesEnabled(function() {
- let session = db.getSession();
- assert(session.getOptions().isCausalConsistency(),
- "Causal consistency should be implicitly enabled for an explicit session");
- assert(session.getOptions().shouldRetryWrites(),
- "Retryable writes should be implicitly enabled on default session when using" +
- " --retryWrites");
-
- session = db.getMongo().startSession({retryWrites: false});
- assert(session.getOptions().isCausalConsistency(),
- "Causal consistency should be implicitly enabled for an explicit session");
- assert(!session.getOptions().shouldRetryWrites(),
- "Retryable writes should be able to be explicitly disabled");
- session.endSession();
-
- session = db.getMongo().startSession();
- assert(session.getOptions().isCausalConsistency(),
- "Causal consistency should be implicitly enabled for an explicit session");
- assert(session.getOptions().shouldRetryWrites(),
- "Retryable writes should be implicitly enabled on new sessions when using" +
- " --retryWrites");
- session.endSession();
- });
-
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/shutdown_while_fsync_locked.js b/jstests/noPassthrough/shutdown_while_fsync_locked.js
index 5d611741ce2..d234fa444eb 100644
--- a/jstests/noPassthrough/shutdown_while_fsync_locked.js
+++ b/jstests/noPassthrough/shutdown_while_fsync_locked.js
@@ -2,14 +2,14 @@
* Ensure that we allow mongod to shutdown cleanly while being fsync locked.
*/
(function() {
- "use strict";
+"use strict";
- let conn = MongoRunner.runMongod();
- let db = conn.getDB("test");
+let conn = MongoRunner.runMongod();
+let db = conn.getDB("test");
- for (let i = 0; i < 10; i++) {
- assert.commandWorked(db.adminCommand({fsync: 1, lock: 1}));
- }
+for (let i = 0; i < 10; i++) {
+ assert.commandWorked(db.adminCommand({fsync: 1, lock: 1}));
+}
- MongoRunner.stopMongod(conn, MongoRunner.EXIT_CLEAN, {skipValidation: true});
+MongoRunner.stopMongod(conn, MongoRunner.EXIT_CLEAN, {skipValidation: true});
}());
diff --git a/jstests/noPassthrough/skip_sharding_configuration_checks.js b/jstests/noPassthrough/skip_sharding_configuration_checks.js
index eb067f94a52..8573a223979 100644
--- a/jstests/noPassthrough/skip_sharding_configuration_checks.js
+++ b/jstests/noPassthrough/skip_sharding_configuration_checks.js
@@ -4,51 +4,50 @@
* requires_majority_read_concern]
*/
(function() {
- 'use strict';
-
- function expectState(rst, state) {
- assert.soon(function() {
- var status = rst.status();
- if (status.myState != state) {
- print("Waiting for state " + state + " in replSetGetStatus output: " +
- tojson(status));
- }
- return status.myState == state;
- });
- }
-
- let configSvr = MongoRunner.runMongod(
- {configsvr: "", setParameter: 'skipShardingConfigurationChecks=true'});
- assert.eq(configSvr, null);
-
- let shardSvr =
- MongoRunner.runMongod({shardsvr: "", setParameter: 'skipShardingConfigurationChecks=true'});
- assert.eq(shardSvr, null);
-
- var st = new ShardingTest({name: "skipConfig", shards: {rs0: {nodes: 1}}});
- var configRS = st.configRS;
- var shardRS = st.rs0;
-
- shardRS.stopSet(15, true);
- configRS.stopSet(undefined, true);
-
- jsTestLog("Restarting configRS as a standalone ReplicaSet");
-
- for (let i = 0; i < configRS.nodes.length; i++) {
- delete configRS.nodes[i].fullOptions.configsvr;
- configRS.nodes[i].fullOptions.setParameter = 'skipShardingConfigurationChecks=true';
- }
- configRS.startSet({}, true);
- expectState(configRS, ReplSetTest.State.PRIMARY);
- configRS.stopSet();
-
- jsTestLog("Restarting shardRS as a standalone ReplicaSet");
- for (let i = 0; i < shardRS.nodes.length; i++) {
- delete shardRS.nodes[i].fullOptions.shardsvr;
- shardRS.nodes[i].fullOptions.setParameter = 'skipShardingConfigurationChecks=true';
- }
- shardRS.startSet({}, true);
- expectState(shardRS, ReplSetTest.State.PRIMARY);
- shardRS.stopSet();
- MongoRunner.stopMongos(st.s);
+'use strict';
+
+function expectState(rst, state) {
+ assert.soon(function() {
+ var status = rst.status();
+ if (status.myState != state) {
+ print("Waiting for state " + state + " in replSetGetStatus output: " + tojson(status));
+ }
+ return status.myState == state;
+ });
+}
+
+let configSvr =
+ MongoRunner.runMongod({configsvr: "", setParameter: 'skipShardingConfigurationChecks=true'});
+assert.eq(configSvr, null);
+
+let shardSvr =
+ MongoRunner.runMongod({shardsvr: "", setParameter: 'skipShardingConfigurationChecks=true'});
+assert.eq(shardSvr, null);
+
+var st = new ShardingTest({name: "skipConfig", shards: {rs0: {nodes: 1}}});
+var configRS = st.configRS;
+var shardRS = st.rs0;
+
+shardRS.stopSet(15, true);
+configRS.stopSet(undefined, true);
+
+jsTestLog("Restarting configRS as a standalone ReplicaSet");
+
+for (let i = 0; i < configRS.nodes.length; i++) {
+ delete configRS.nodes[i].fullOptions.configsvr;
+ configRS.nodes[i].fullOptions.setParameter = 'skipShardingConfigurationChecks=true';
+}
+configRS.startSet({}, true);
+expectState(configRS, ReplSetTest.State.PRIMARY);
+configRS.stopSet();
+
+jsTestLog("Restarting shardRS as a standalone ReplicaSet");
+for (let i = 0; i < shardRS.nodes.length; i++) {
+ delete shardRS.nodes[i].fullOptions.shardsvr;
+ shardRS.nodes[i].fullOptions.setParameter = 'skipShardingConfigurationChecks=true';
+}
+shardRS.startSet({}, true);
+expectState(shardRS, ReplSetTest.State.PRIMARY);
+shardRS.stopSet();
+MongoRunner.stopMongos(st.s);
})();
diff --git a/jstests/noPassthrough/skip_write_conflict_retries_failpoint.js b/jstests/noPassthrough/skip_write_conflict_retries_failpoint.js
index 538ba853938..fb56434fd61 100644
--- a/jstests/noPassthrough/skip_write_conflict_retries_failpoint.js
+++ b/jstests/noPassthrough/skip_write_conflict_retries_failpoint.js
@@ -9,59 +9,59 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB("test");
- const testColl = testDB.getCollection("skip_write_conflict_retries_failpoint");
+const primary = rst.getPrimary();
+const testDB = primary.getDB("test");
+const testColl = testDB.getCollection("skip_write_conflict_retries_failpoint");
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(testDB.getName());
- const sessionColl = sessionDB.getCollection(testColl.getName());
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(testDB.getName());
+const sessionColl = sessionDB.getCollection(testColl.getName());
- assert.commandWorked(testColl.runCommand(
- "createIndexes",
- {indexes: [{key: {a: 1}, name: "a_1", unique: true}], writeConcern: {w: "majority"}}));
+assert.commandWorked(testColl.runCommand(
+ "createIndexes",
+ {indexes: [{key: {a: 1}, name: "a_1", unique: true}], writeConcern: {w: "majority"}}));
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "alwaysOn"}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "alwaysOn"}));
- // A non-transactional insert would ordinarily keep retrying if it conflicts with a write
- // operation performed inside a multi-statement transaction. However, with the
- // "skipWriteConflictRetries" failpoint enabled, the non-transactional insert should immediately
- // fail with a WriteConflict error response.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "from transaction", a: 0}));
+// A non-transactional insert would ordinarily keep retrying if it conflicts with a write
+// operation performed inside a multi-statement transaction. However, with the
+// "skipWriteConflictRetries" failpoint enabled, the non-transactional insert should immediately
+// fail with a WriteConflict error response.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "from transaction", a: 0}));
- assert.commandFailedWithCode(testColl.insert({_id: "from outside transaction", a: 0}),
- ErrorCodes.WriteConflict);
+assert.commandFailedWithCode(testColl.insert({_id: "from outside transaction", a: 0}),
+ ErrorCodes.WriteConflict);
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(testColl.findOne({a: 0}), {_id: "from transaction", a: 0});
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.eq(testColl.findOne({a: 0}), {_id: "from transaction", a: 0});
- // A non-transactional update would ordinarily keep retrying if it conflicts with a write
- // operation performed inside a multi-statement transaction. However, with the
- // "skipWriteConflictRetries" failpoint enabled, the non-transactional insert should immediately
- // fail with a WriteConflict error response.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "from prepared transaction", a: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+// A non-transactional update would ordinarily keep retrying if it conflicts with a write
+// operation performed inside a multi-statement transaction. However, with the
+// "skipWriteConflictRetries" failpoint enabled, the non-transactional insert should immediately
+// fail with a WriteConflict error response.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "from prepared transaction", a: 1}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandFailedWithCode(testColl.update({_id: "from transaction"}, {$set: {a: 1}}),
- ErrorCodes.WriteConflict);
+assert.commandFailedWithCode(testColl.update({_id: "from transaction"}, {$set: {a: 1}}),
+ ErrorCodes.WriteConflict);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- assert.eq(testColl.findOne({a: 1}), {_id: "from prepared transaction", a: 1});
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.eq(testColl.findOne({a: 1}), {_id: "from prepared transaction", a: 1});
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "off"}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "off"}));
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/snapshotWindow_serverParameters.js b/jstests/noPassthrough/snapshotWindow_serverParameters.js
index 4b41954ed61..fa0f004c511 100644
--- a/jstests/noPassthrough/snapshotWindow_serverParameters.js
+++ b/jstests/noPassthrough/snapshotWindow_serverParameters.js
@@ -2,84 +2,84 @@
// and via setParameter command.
(function() {
- 'use strict';
+'use strict';
- load("jstests/noPassthrough/libs/server_parameter_helpers.js");
+load("jstests/noPassthrough/libs/server_parameter_helpers.js");
- // Valid parameter values are in the range [0, infinity).
- testNumericServerParameter("maxTargetSnapshotHistoryWindowInSeconds",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 5 /*defaultValue*/,
- 30 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- -1 /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+// Valid parameter values are in the range [0, infinity).
+testNumericServerParameter("maxTargetSnapshotHistoryWindowInSeconds",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 5 /*defaultValue*/,
+ 30 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ -1 /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
- // Valid parameter values are in the range [0, 100].
- testNumericServerParameter("cachePressureThreshold",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 50 /*defaultValue*/,
- 70 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- -1 /*lowerOutOfBounds*/,
- true /*hasUpperBound*/,
- 101 /*upperOutOfBounds*/);
+// Valid parameter values are in the range [0, 100].
+testNumericServerParameter("cachePressureThreshold",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 50 /*defaultValue*/,
+ 70 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ -1 /*lowerOutOfBounds*/,
+ true /*hasUpperBound*/,
+ 101 /*upperOutOfBounds*/);
- // Valid parameter values are in the range (0, 1).
- testNumericServerParameter("snapshotWindowMultiplicativeDecrease",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 0.75 /*defaultValue*/,
- 0.50 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- -1 /*lowerOutOfBounds*/,
- true /*hasUpperBound*/,
- 1.1 /*upperOutOfBounds*/);
+// Valid parameter values are in the range (0, 1).
+testNumericServerParameter("snapshotWindowMultiplicativeDecrease",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 0.75 /*defaultValue*/,
+ 0.50 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ -1 /*lowerOutOfBounds*/,
+ true /*hasUpperBound*/,
+ 1.1 /*upperOutOfBounds*/);
- // Valid parameter values are in the range [1, infinity).
- testNumericServerParameter("snapshotWindowAdditiveIncreaseSeconds",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 2 /*defaultValue*/,
- 10 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- 0 /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+// Valid parameter values are in the range [1, infinity).
+testNumericServerParameter("snapshotWindowAdditiveIncreaseSeconds",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 2 /*defaultValue*/,
+ 10 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ 0 /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
- // Valid parameter values are in the range [1, infinity).
- testNumericServerParameter("checkCachePressurePeriodSeconds",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 5 /*defaultValue*/,
- 8 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- 0 /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+// Valid parameter values are in the range [1, infinity).
+testNumericServerParameter("checkCachePressurePeriodSeconds",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 5 /*defaultValue*/,
+ 8 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ 0 /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
- // Valid parameter values are in the range [1, infinity).
- testNumericServerParameter("minMillisBetweenSnapshotWindowInc",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 500 /*defaultValue*/,
- 2 * 1000 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- 0 /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+// Valid parameter values are in the range [1, infinity).
+testNumericServerParameter("minMillisBetweenSnapshotWindowInc",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 500 /*defaultValue*/,
+ 2 * 1000 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ 0 /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
- // Valid parameter values are in the range [1, infinity).
- testNumericServerParameter("minMillisBetweenSnapshotWindowDec",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 500 /*defaultValue*/,
- 2 * 1000 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- 0 /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+// Valid parameter values are in the range [1, infinity).
+testNumericServerParameter("minMillisBetweenSnapshotWindowDec",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 500 /*defaultValue*/,
+ 2 * 1000 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ 0 /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
})();
diff --git a/jstests/noPassthrough/snapshot_cursor_integrity.js b/jstests/noPassthrough/snapshot_cursor_integrity.js
index 6916bee74e7..b69eb97f848 100644
--- a/jstests/noPassthrough/snapshot_cursor_integrity.js
+++ b/jstests/noPassthrough/snapshot_cursor_integrity.js
@@ -2,155 +2,155 @@
// transaction/session. Specifically tests this in the context of snapshot cursors.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- // This test makes assertions on commands run without logical session ids.
- TestData.disableImplicitSessions = true;
-
- const dbName = "test";
- const collName = "coll";
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const primaryDB = rst.getPrimary().getDB(dbName);
-
- const session1 = primaryDB.getMongo().startSession();
- const sessionDB1 = session1.getDatabase(dbName);
-
- const session2 = primaryDB.getMongo().startSession();
- const sessionDB2 = session2.getDatabase(dbName);
-
- const bulk = primaryDB.coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 10; ++i) {
- bulk.insert({_id: i});
- }
- assert.commandWorked(bulk.execute({w: "majority"}));
-
- // Establish a snapshot cursor in session1.
- let res = assert.commandWorked(sessionDB1.runCommand({
- find: collName,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(0),
- autocommit: false,
- startTransaction: true,
- batchSize: 2
- }));
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- let cursorID = res.cursor.id;
-
- // The cursor may not be iterated outside of any session.
- assert.commandFailedWithCode(
- primaryDB.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50737);
-
- // The cursor can still be iterated in session1.
- assert.commandWorked(sessionDB1.runCommand({
- getMore: cursorID,
- collection: collName,
- autocommit: false,
- txnNumber: NumberLong(0),
- batchSize: 2
- }));
-
- // The cursor may not be iterated in a different session.
- assert.commandFailedWithCode(
- sessionDB2.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50738);
-
- // The cursor can still be iterated in session1.
- assert.commandWorked(sessionDB1.runCommand({
- getMore: cursorID,
- collection: collName,
- autocommit: false,
- txnNumber: NumberLong(0),
- batchSize: 2
- }));
-
- // The cursor may not be iterated outside of any transaction.
- assert.commandFailedWithCode(
- sessionDB1.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50740);
-
- // The cursor can still be iterated in its transaction in session1.
- assert.commandWorked(sessionDB1.runCommand({
- getMore: cursorID,
- collection: collName,
- autocommit: false,
- txnNumber: NumberLong(0),
- batchSize: 2
- }));
-
- // The cursor may not be iterated in a different transaction on session1.
- assert.commandWorked(sessionDB1.runCommand({
- find: collName,
- txnNumber: NumberLong(1),
- autocommit: false,
- readConcern: {level: "snapshot"},
- startTransaction: true
- }));
- assert.commandFailedWithCode(sessionDB1.runCommand({
- getMore: cursorID,
- collection: collName,
- autocommit: false,
- txnNumber: NumberLong(1),
- batchSize: 2
- }),
- 50741);
-
- // The cursor can no longer be iterated because its transaction has ended.
- assert.commandFailedWithCode(sessionDB1.runCommand({
- getMore: cursorID,
- collection: collName,
- autocommit: false,
- txnNumber: NumberLong(0),
- batchSize: 2
- }),
- ErrorCodes.TransactionTooOld);
-
- // Kill the cursor.
- assert.commandWorked(
- sessionDB1.runCommand({killCursors: sessionDB1.coll.getName(), cursors: [cursorID]}));
-
- // Establish a cursor outside of any transaction in session1.
- res = assert.commandWorked(sessionDB1.runCommand({find: collName, batchSize: 2}));
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- cursorID = res.cursor.id;
-
- // The cursor may not be iterated inside a transaction.
- assert.commandWorked(sessionDB1.runCommand({
- find: collName,
- txnNumber: NumberLong(2),
- autocommit: false,
- readConcern: {level: "snapshot"},
- startTransaction: true
- }));
- assert.commandFailedWithCode(sessionDB1.runCommand({
- getMore: cursorID,
- collection: collName,
- autocommit: false,
- txnNumber: NumberLong(2),
- batchSize: 2
- }),
- 50739);
-
- // The cursor can still be iterated outside of any transaction. Exhaust the cursor.
- assert.commandWorked(sessionDB1.runCommand({getMore: cursorID, collection: collName}));
-
- // Establish a cursor outside of any session.
- res = assert.commandWorked(primaryDB.runCommand({find: collName, batchSize: 2}));
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- cursorID = res.cursor.id;
-
- // The cursor may not be iterated inside a session.
- assert.commandFailedWithCode(
- sessionDB1.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50736);
-
- // The cursor can still be iterated outside of any session. Exhaust the cursor.
- assert.commandWorked(primaryDB.runCommand({getMore: cursorID, collection: collName}));
-
- session1.endSession();
- session2.endSession();
- rst.stopSet();
+"use strict";
+
+// This test makes assertions on commands run without logical session ids.
+TestData.disableImplicitSessions = true;
+
+const dbName = "test";
+const collName = "coll";
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const primaryDB = rst.getPrimary().getDB(dbName);
+
+const session1 = primaryDB.getMongo().startSession();
+const sessionDB1 = session1.getDatabase(dbName);
+
+const session2 = primaryDB.getMongo().startSession();
+const sessionDB2 = session2.getDatabase(dbName);
+
+const bulk = primaryDB.coll.initializeUnorderedBulkOp();
+for (let i = 0; i < 10; ++i) {
+ bulk.insert({_id: i});
+}
+assert.commandWorked(bulk.execute({w: "majority"}));
+
+// Establish a snapshot cursor in session1.
+let res = assert.commandWorked(sessionDB1.runCommand({
+ find: collName,
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(0),
+ autocommit: false,
+ startTransaction: true,
+ batchSize: 2
+}));
+assert(res.hasOwnProperty("cursor"));
+assert(res.cursor.hasOwnProperty("id"));
+let cursorID = res.cursor.id;
+
+// The cursor may not be iterated outside of any session.
+assert.commandFailedWithCode(
+ primaryDB.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50737);
+
+// The cursor can still be iterated in session1.
+assert.commandWorked(sessionDB1.runCommand({
+ getMore: cursorID,
+ collection: collName,
+ autocommit: false,
+ txnNumber: NumberLong(0),
+ batchSize: 2
+}));
+
+// The cursor may not be iterated in a different session.
+assert.commandFailedWithCode(
+ sessionDB2.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50738);
+
+// The cursor can still be iterated in session1.
+assert.commandWorked(sessionDB1.runCommand({
+ getMore: cursorID,
+ collection: collName,
+ autocommit: false,
+ txnNumber: NumberLong(0),
+ batchSize: 2
+}));
+
+// The cursor may not be iterated outside of any transaction.
+assert.commandFailedWithCode(
+ sessionDB1.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50740);
+
+// The cursor can still be iterated in its transaction in session1.
+assert.commandWorked(sessionDB1.runCommand({
+ getMore: cursorID,
+ collection: collName,
+ autocommit: false,
+ txnNumber: NumberLong(0),
+ batchSize: 2
+}));
+
+// The cursor may not be iterated in a different transaction on session1.
+assert.commandWorked(sessionDB1.runCommand({
+ find: collName,
+ txnNumber: NumberLong(1),
+ autocommit: false,
+ readConcern: {level: "snapshot"},
+ startTransaction: true
+}));
+assert.commandFailedWithCode(sessionDB1.runCommand({
+ getMore: cursorID,
+ collection: collName,
+ autocommit: false,
+ txnNumber: NumberLong(1),
+ batchSize: 2
+}),
+ 50741);
+
+// The cursor can no longer be iterated because its transaction has ended.
+assert.commandFailedWithCode(sessionDB1.runCommand({
+ getMore: cursorID,
+ collection: collName,
+ autocommit: false,
+ txnNumber: NumberLong(0),
+ batchSize: 2
+}),
+ ErrorCodes.TransactionTooOld);
+
+// Kill the cursor.
+assert.commandWorked(
+ sessionDB1.runCommand({killCursors: sessionDB1.coll.getName(), cursors: [cursorID]}));
+
+// Establish a cursor outside of any transaction in session1.
+res = assert.commandWorked(sessionDB1.runCommand({find: collName, batchSize: 2}));
+assert(res.hasOwnProperty("cursor"));
+assert(res.cursor.hasOwnProperty("id"));
+cursorID = res.cursor.id;
+
+// The cursor may not be iterated inside a transaction.
+assert.commandWorked(sessionDB1.runCommand({
+ find: collName,
+ txnNumber: NumberLong(2),
+ autocommit: false,
+ readConcern: {level: "snapshot"},
+ startTransaction: true
+}));
+assert.commandFailedWithCode(sessionDB1.runCommand({
+ getMore: cursorID,
+ collection: collName,
+ autocommit: false,
+ txnNumber: NumberLong(2),
+ batchSize: 2
+}),
+ 50739);
+
+// The cursor can still be iterated outside of any transaction. Exhaust the cursor.
+assert.commandWorked(sessionDB1.runCommand({getMore: cursorID, collection: collName}));
+
+// Establish a cursor outside of any session.
+res = assert.commandWorked(primaryDB.runCommand({find: collName, batchSize: 2}));
+assert(res.hasOwnProperty("cursor"));
+assert(res.cursor.hasOwnProperty("id"));
+cursorID = res.cursor.id;
+
+// The cursor may not be iterated inside a session.
+assert.commandFailedWithCode(
+ sessionDB1.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50736);
+
+// The cursor can still be iterated outside of any session. Exhaust the cursor.
+assert.commandWorked(primaryDB.runCommand({getMore: cursorID, collection: collName}));
+
+session1.endSession();
+session2.endSession();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/snapshot_cursor_shutdown_stepdown.js b/jstests/noPassthrough/snapshot_cursor_shutdown_stepdown.js
index f73eb952043..a9cc8f40d9b 100644
--- a/jstests/noPassthrough/snapshot_cursor_shutdown_stepdown.js
+++ b/jstests/noPassthrough/snapshot_cursor_shutdown_stepdown.js
@@ -1,99 +1,98 @@
// Tests that stashed transaction resources are destroyed at shutdown and stepdown.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "coll";
-
- //
- // Test that stashed transaction resources are destroyed at shutdown.
- //
-
- let rst = new ReplSetTest({nodes: 1});
+"use strict";
+
+const dbName = "test";
+const collName = "coll";
+
+//
+// Test that stashed transaction resources are destroyed at shutdown.
+//
+
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+let primaryDB = rst.getPrimary().getDB(dbName);
+
+let session = primaryDB.getMongo().startSession();
+let sessionDB = session.getDatabase(dbName);
+
+for (let i = 0; i < 4; i++) {
+ assert.commandWorked(sessionDB.coll.insert({_id: i}, {writeConcern: {w: "majority"}}));
+}
+
+// Create a snapshot read cursor.
+assert.commandWorked(sessionDB.runCommand({
+ find: collName,
+ batchSize: 2,
+ readConcern: {level: "snapshot"},
+ startTransaction: true,
+ autocommit: false,
+ txnNumber: NumberLong(0)
+}));
+
+// It should be possible to shut down the server without hanging. We must skip collection
+// validation, since this will hang.
+const signal = true; // Use default kill signal.
+const forRestart = false;
+rst.stopSet(signal, forRestart, {skipValidation: true});
+
+function testStepdown(stepdownFunc) {
+ rst = new ReplSetTest({nodes: 2});
rst.startSet();
rst.initiate();
- let primaryDB = rst.getPrimary().getDB(dbName);
+ const primary = rst.getPrimary();
+ const primaryDB = primary.getDB(dbName);
- let session = primaryDB.getMongo().startSession();
- let sessionDB = session.getDatabase(dbName);
+ const session = primaryDB.getMongo().startSession();
+ const sessionDB = session.getDatabase(dbName);
for (let i = 0; i < 4; i++) {
assert.commandWorked(sessionDB.coll.insert({_id: i}, {writeConcern: {w: "majority"}}));
}
// Create a snapshot read cursor.
- assert.commandWorked(sessionDB.runCommand({
+ const res = assert.commandWorked(sessionDB.runCommand({
find: collName,
batchSize: 2,
readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(0),
startTransaction: true,
- autocommit: false,
- txnNumber: NumberLong(0)
+ autocommit: false
}));
-
- // It should be possible to shut down the server without hanging. We must skip collection
- // validation, since this will hang.
- const signal = true; // Use default kill signal.
- const forRestart = false;
- rst.stopSet(signal, forRestart, {skipValidation: true});
-
- function testStepdown(stepdownFunc) {
- rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB(dbName);
-
- const session = primaryDB.getMongo().startSession();
- const sessionDB = session.getDatabase(dbName);
-
- for (let i = 0; i < 4; i++) {
- assert.commandWorked(sessionDB.coll.insert({_id: i}, {writeConcern: {w: "majority"}}));
- }
-
- // Create a snapshot read cursor.
- const res = assert.commandWorked(sessionDB.runCommand({
- find: collName,
- batchSize: 2,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(0),
- startTransaction: true,
- autocommit: false
- }));
- assert(res.hasOwnProperty("cursor"), tojson(res));
- assert(res.cursor.hasOwnProperty("id"), tojson(res));
- const cursorId = res.cursor.id;
-
- // It should be possible to step down the primary without hanging.
- stepdownFunc(rst);
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- // Kill the cursor.
- assert.commandWorked(sessionDB.runCommand({killCursors: collName, cursors: [cursorId]}));
- rst.stopSet();
- }
-
- //
- // Test that stashed transaction resources are destroyed at stepdown triggered by
- // replSetStepDown.
- //
- function replSetStepDown(replSetTest) {
- assert.commandWorked(
- replSetTest.getPrimary().adminCommand({replSetStepDown: 60, force: true}));
- }
- testStepdown(replSetStepDown);
-
- //
- // Test that stashed transaction resources are destroyed at stepdown triggered by loss of
- // quorum.
- //
- function stepDownOnLossOfQuorum(replSetTest) {
- const secondary = rst.getSecondary();
- const secondaryId = rst.getNodeId(secondary);
- rst.stop(secondaryId);
- }
- testStepdown(stepDownOnLossOfQuorum);
+ assert(res.hasOwnProperty("cursor"), tojson(res));
+ assert(res.cursor.hasOwnProperty("id"), tojson(res));
+ const cursorId = res.cursor.id;
+
+ // It should be possible to step down the primary without hanging.
+ stepdownFunc(rst);
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+ // Kill the cursor.
+ assert.commandWorked(sessionDB.runCommand({killCursors: collName, cursors: [cursorId]}));
+ rst.stopSet();
+}
+
+//
+// Test that stashed transaction resources are destroyed at stepdown triggered by
+// replSetStepDown.
+//
+function replSetStepDown(replSetTest) {
+ assert.commandWorked(replSetTest.getPrimary().adminCommand({replSetStepDown: 60, force: true}));
+}
+testStepdown(replSetStepDown);
+
+//
+// Test that stashed transaction resources are destroyed at stepdown triggered by loss of
+// quorum.
+//
+function stepDownOnLossOfQuorum(replSetTest) {
+ const secondary = rst.getSecondary();
+ const secondaryId = rst.getNodeId(secondary);
+ rst.stop(secondaryId);
+}
+testStepdown(stepDownOnLossOfQuorum);
})();
diff --git a/jstests/noPassthrough/snapshot_reads.js b/jstests/noPassthrough/snapshot_reads.js
index 75b2bc9c41b..9c82a24af7e 100644
--- a/jstests/noPassthrough/snapshot_reads.js
+++ b/jstests/noPassthrough/snapshot_reads.js
@@ -1,131 +1,129 @@
// Tests snapshot isolation on readConcern level snapshot read.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "coll";
-
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- let conf = rst.getReplSetConfig();
- conf.members[1].votes = 0;
- conf.members[1].priority = 0;
- rst.initiate(conf);
-
- const primaryDB = rst.getPrimary().getDB(dbName);
-
- function parseCursor(cmdResult) {
- if (cmdResult.hasOwnProperty("cursor")) {
- assert(cmdResult.cursor.hasOwnProperty("id"));
- return cmdResult.cursor;
- } else if (cmdResult.hasOwnProperty("cursors") && cmdResult.cursors.length === 1 &&
- cmdResult.cursors[0].hasOwnProperty("cursor")) {
- assert(cmdResult.cursors[0].cursor.hasOwnProperty("id"));
- return cmdResult.cursors[0].cursor;
- }
-
- throw Error("parseCursor failed to find cursor object. Command Result: " +
- tojson(cmdResult));
+"use strict";
+
+const dbName = "test";
+const collName = "coll";
+
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+let conf = rst.getReplSetConfig();
+conf.members[1].votes = 0;
+conf.members[1].priority = 0;
+rst.initiate(conf);
+
+const primaryDB = rst.getPrimary().getDB(dbName);
+
+function parseCursor(cmdResult) {
+ if (cmdResult.hasOwnProperty("cursor")) {
+ assert(cmdResult.cursor.hasOwnProperty("id"));
+ return cmdResult.cursor;
+ } else if (cmdResult.hasOwnProperty("cursors") && cmdResult.cursors.length === 1 &&
+ cmdResult.cursors[0].hasOwnProperty("cursor")) {
+ assert(cmdResult.cursors[0].cursor.hasOwnProperty("id"));
+ return cmdResult.cursors[0].cursor;
}
- function runTest({useCausalConsistency, establishCursorCmd, readConcern}) {
- let cmdName = Object.getOwnPropertyNames(establishCursorCmd)[0];
+ throw Error("parseCursor failed to find cursor object. Command Result: " + tojson(cmdResult));
+}
+
+function runTest({useCausalConsistency, establishCursorCmd, readConcern}) {
+ let cmdName = Object.getOwnPropertyNames(establishCursorCmd)[0];
- jsTestLog(`Test establishCursorCmd: ${cmdName},
+ jsTestLog(`Test establishCursorCmd: ${cmdName},
useCausalConsistency: ${useCausalConsistency},
readConcern: ${tojson(readConcern)}`);
- primaryDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+ primaryDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- const session =
- primaryDB.getMongo().startSession({causalConsistency: useCausalConsistency});
- const sessionDb = session.getDatabase(dbName);
+ const session = primaryDB.getMongo().startSession({causalConsistency: useCausalConsistency});
+ const sessionDb = session.getDatabase(dbName);
- const bulk = primaryDB.coll.initializeUnorderedBulkOp();
- for (let x = 0; x < 10; ++x) {
- bulk.insert({_id: x});
- }
- assert.commandWorked(bulk.execute({w: "majority"}));
-
- session.startTransaction({readConcern: readConcern});
-
- // Establish a snapshot batchSize:0 cursor.
- let res = assert.commandWorked(sessionDb.runCommand(establishCursorCmd));
- let cursor = parseCursor(res);
-
- assert(cursor.hasOwnProperty("firstBatch"), tojson(res));
- assert.eq(0, cursor.firstBatch.length, tojson(res));
- assert.neq(cursor.id, 0);
-
- // Insert an 11th document which should not be visible to the snapshot cursor. This write is
- // performed outside of the session.
- assert.writeOK(primaryDB.coll.insert({_id: 10}, {writeConcern: {w: "majority"}}));
-
- // Fetch the first 5 documents.
- res = assert.commandWorked(
- sessionDb.runCommand({getMore: cursor.id, collection: collName, batchSize: 5}));
- cursor = parseCursor(res);
- assert.neq(0, cursor.id, tojson(res));
- assert(cursor.hasOwnProperty("nextBatch"), tojson(res));
- assert.eq(5, cursor.nextBatch.length, tojson(res));
-
- // Exhaust the cursor, retrieving the remainder of the result set. Performing a second
- // getMore tests snapshot isolation across multiple getMore invocations.
- res = assert.commandWorked(
- sessionDb.runCommand({getMore: cursor.id, collection: collName, batchSize: 20}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // The cursor has been exhausted.
- cursor = parseCursor(res);
- assert.eq(0, cursor.id, tojson(res));
-
- // Only the remaining 5 of the initial 10 documents are returned. The 11th document is not
- // part of the result set.
- assert(cursor.hasOwnProperty("nextBatch"), tojson(res));
- assert.eq(5, cursor.nextBatch.length, tojson(res));
-
- // Perform a second snapshot read under a new transaction.
- session.startTransaction({readConcern: readConcern});
- res = assert.commandWorked(
- sessionDb.runCommand({find: collName, sort: {_id: 1}, batchSize: 20}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // The cursor has been exhausted.
- cursor = parseCursor(res);
- assert.eq(0, cursor.id, tojson(res));
-
- // All 11 documents are returned.
- assert(cursor.hasOwnProperty("firstBatch"), tojson(res));
- assert.eq(11, cursor.firstBatch.length, tojson(res));
-
- session.endSession();
+ const bulk = primaryDB.coll.initializeUnorderedBulkOp();
+ for (let x = 0; x < 10; ++x) {
+ bulk.insert({_id: x});
}
-
- // Test transaction reads using find or aggregate. Inserts outside
- // transaction aren't visible, even after they are majority-committed.
- // (This is a requirement for readConcern snapshot, but it is merely an
- // implementation detail for majority or for the default, local. At some
- // point, it would be desirable to have a transaction with readConcern
- // local or majority see writes from other sessions. However, our current
- // implementation of ensuring any data we read does not get rolled back
- // relies on the fact that we read from a single WT snapshot, since we
- // choose the timestamp to wait on in the first command of the
- // transaction.)
- let findCmd = {find: collName, sort: {_id: 1}, batchSize: 0};
- let aggCmd = {aggregate: collName, pipeline: [{$sort: {_id: 1}}], cursor: {batchSize: 0}};
-
- for (let establishCursorCmd of[findCmd, aggCmd]) {
- for (let useCausalConsistency of[false, true]) {
- for (let readConcern of[{level: "snapshot"}, {level: "majority"}, null]) {
- runTest({
- establishCursorCmd: establishCursorCmd,
- useCausalConsistency: useCausalConsistency,
- readConcern: readConcern
- });
- }
+ assert.commandWorked(bulk.execute({w: "majority"}));
+
+ session.startTransaction({readConcern: readConcern});
+
+ // Establish a snapshot batchSize:0 cursor.
+ let res = assert.commandWorked(sessionDb.runCommand(establishCursorCmd));
+ let cursor = parseCursor(res);
+
+ assert(cursor.hasOwnProperty("firstBatch"), tojson(res));
+ assert.eq(0, cursor.firstBatch.length, tojson(res));
+ assert.neq(cursor.id, 0);
+
+ // Insert an 11th document which should not be visible to the snapshot cursor. This write is
+ // performed outside of the session.
+ assert.writeOK(primaryDB.coll.insert({_id: 10}, {writeConcern: {w: "majority"}}));
+
+ // Fetch the first 5 documents.
+ res = assert.commandWorked(
+ sessionDb.runCommand({getMore: cursor.id, collection: collName, batchSize: 5}));
+ cursor = parseCursor(res);
+ assert.neq(0, cursor.id, tojson(res));
+ assert(cursor.hasOwnProperty("nextBatch"), tojson(res));
+ assert.eq(5, cursor.nextBatch.length, tojson(res));
+
+ // Exhaust the cursor, retrieving the remainder of the result set. Performing a second
+ // getMore tests snapshot isolation across multiple getMore invocations.
+ res = assert.commandWorked(
+ sessionDb.runCommand({getMore: cursor.id, collection: collName, batchSize: 20}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // The cursor has been exhausted.
+ cursor = parseCursor(res);
+ assert.eq(0, cursor.id, tojson(res));
+
+ // Only the remaining 5 of the initial 10 documents are returned. The 11th document is not
+ // part of the result set.
+ assert(cursor.hasOwnProperty("nextBatch"), tojson(res));
+ assert.eq(5, cursor.nextBatch.length, tojson(res));
+
+ // Perform a second snapshot read under a new transaction.
+ session.startTransaction({readConcern: readConcern});
+ res =
+ assert.commandWorked(sessionDb.runCommand({find: collName, sort: {_id: 1}, batchSize: 20}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // The cursor has been exhausted.
+ cursor = parseCursor(res);
+ assert.eq(0, cursor.id, tojson(res));
+
+ // All 11 documents are returned.
+ assert(cursor.hasOwnProperty("firstBatch"), tojson(res));
+ assert.eq(11, cursor.firstBatch.length, tojson(res));
+
+ session.endSession();
+}
+
+// Test transaction reads using find or aggregate. Inserts outside
+// transaction aren't visible, even after they are majority-committed.
+// (This is a requirement for readConcern snapshot, but it is merely an
+// implementation detail for majority or for the default, local. At some
+// point, it would be desirable to have a transaction with readConcern
+// local or majority see writes from other sessions. However, our current
+// implementation of ensuring any data we read does not get rolled back
+// relies on the fact that we read from a single WT snapshot, since we
+// choose the timestamp to wait on in the first command of the
+// transaction.)
+let findCmd = {find: collName, sort: {_id: 1}, batchSize: 0};
+let aggCmd = {aggregate: collName, pipeline: [{$sort: {_id: 1}}], cursor: {batchSize: 0}};
+
+for (let establishCursorCmd of [findCmd, aggCmd]) {
+ for (let useCausalConsistency of [false, true]) {
+ for (let readConcern of [{level: "snapshot"}, {level: "majority"}, null]) {
+ runTest({
+ establishCursorCmd: establishCursorCmd,
+ useCausalConsistency: useCausalConsistency,
+ readConcern: readConcern
+ });
}
}
+}
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/socket_disconnect_kills.js b/jstests/noPassthrough/socket_disconnect_kills.js
index 5214bf58ad4..3d6eb513b24 100644
--- a/jstests/noPassthrough/socket_disconnect_kills.js
+++ b/jstests/noPassthrough/socket_disconnect_kills.js
@@ -11,217 +11,213 @@
// @tags: [requires_sharding]
(function() {
- "use strict";
-
- const testName = "socket_disconnect_kills";
-
- // Used to generate unique appnames
- let id = 0;
-
- // client - A client connection for curop (and that holds the hostname)
- // pre - A callback to run with the timing out socket
- // post - A callback to run after everything else has resolved (cleanup)
- //
- // Returns false if the op was gone from current op
- function check(client, pre, post) {
- const interval = 200;
- const timeout = 10000;
- const socketTimeout = 5000;
-
- const host = client.host;
-
- // Make a socket which will timeout
- id++;
- let conn = new Mongo(
- `mongodb://${host}/?socketTimeoutMS=${socketTimeout}&appName=${testName}${id}`);
-
- // Make sure it works at all
- assert.commandWorked(conn.adminCommand({ping: 1}));
-
- try {
- // Make sure that whatever operation we ran had a network error
- assert.throws(function() {
- try {
- pre(conn);
- } catch (e) {
- throw e;
- }
- }, [], "error doing query: failed: network error while attempting");
-
- // Spin until the op leaves currentop, or timeout passes
- const start = new Date();
-
- while (1) {
- if (!client.getDB("admin")
- .aggregate([
- {$currentOp: {localOps: true}},
- {$match: {appName: testName + id}},
- ])
- .itcount()) {
- return false;
- }
-
- if (((new Date()).getTime() - start.getTime()) > timeout) {
- return true;
- }
-
- sleep(interval);
- }
- } finally {
- post();
- }
- }
-
- function runWithCuropFailPointEnabled(client, failPointName) {
- return function(entry) {
- entry[0](client,
- function(client) {
- assert.commandWorked(client.adminCommand({
- configureFailPoint: failPointName,
- mode: "alwaysOn",
- data: {shouldCheckForInterrupt: true},
- }));
-
- entry[1](client);
- },
- function() {
- assert.commandWorked(
- client.adminCommand({configureFailPoint: failPointName, mode: "off"}));
- });
- };
- }
-
- function runWithCmdFailPointEnabled(client) {
- return function(entry) {
- const failPointName = "waitInCommandMarkKillOnClientDisconnect";
-
- entry[0](client,
- function(client) {
- assert.commandWorked(client.adminCommand({
- configureFailPoint: failPointName,
- mode: "alwaysOn",
- data: {appName: testName + id},
- }));
-
- entry[1](client);
- },
- function() {
- assert.commandWorked(
- client.adminCommand({configureFailPoint: failPointName, mode: "off"}));
- });
- };
- }
+"use strict";
- function checkClosedEarly(client, pre, post) {
- assert(!check(client, pre, post), "operation killed on socket disconnect");
- }
-
- function checkNotClosedEarly(client, pre, post) {
- assert(check(client, pre, post), "operation not killed on socket disconnect");
- }
+const testName = "socket_disconnect_kills";
- function runCommand(cmd) {
- return function(client) {
- assert.commandWorked(client.getDB(testName).runCommand(cmd));
- };
- }
+// Used to generate unique appnames
+let id = 0;
- function runTests(client) {
- let admin = client.getDB("admin");
+// client - A client connection for curop (and that holds the hostname)
+// pre - A callback to run with the timing out socket
+// post - A callback to run after everything else has resolved (cleanup)
+//
+// Returns false if the op was gone from current op
+function check(client, pre, post) {
+ const interval = 200;
+ const timeout = 10000;
+ const socketTimeout = 5000;
+
+ const host = client.host;
+
+ // Make a socket which will timeout
+ id++;
+ let conn =
+ new Mongo(`mongodb://${host}/?socketTimeoutMS=${socketTimeout}&appName=${testName}${id}`);
+
+ // Make sure it works at all
+ assert.commandWorked(conn.adminCommand({ping: 1}));
+
+ try {
+ // Make sure that whatever operation we ran had a network error
+ assert.throws(function() {
+ try {
+ pre(conn);
+ } catch (e) {
+ throw e;
+ }
+ }, [], "error doing query: failed: network error while attempting");
+
+ // Spin until the op leaves currentop, or timeout passes
+ const start = new Date();
+
+ while (1) {
+ if (!client.getDB("admin")
+ .aggregate([
+ {$currentOp: {localOps: true}},
+ {$match: {appName: testName + id}},
+ ])
+ .itcount()) {
+ return false;
+ }
- assert.writeOK(client.getDB(testName).test.insert({x: 1}));
- assert.writeOK(client.getDB(testName).test.insert({x: 2}));
- assert.writeOK(client.getDB(testName).test.insert({x: 3}));
+ if (((new Date()).getTime() - start.getTime()) > timeout) {
+ return true;
+ }
- [[checkClosedEarly, runCommand({find: "test", filter: {}})],
- [
- checkClosedEarly,
- runCommand({
- find: "test",
- filter: {
- $where: function() {
- sleep(100000);
- }
- }
- })
- ],
- [
- checkClosedEarly,
- runCommand({
- find: "test",
- filter: {
- $where: function() {
- while (true) {
- }
- }
- }
- })
- ],
- [
- checkClosedEarly,
- function(client) {
- client.forceReadMode("legacy");
- assert(client.getDB(testName).test.findOne({}));
- }
- ],
- ].forEach(runWithCuropFailPointEnabled(client, "waitInFindBeforeMakingBatch"));
-
- // After SERVER-39475, re-enable these tests and add negative testing for $out cursors.
- const serverSupportsEarlyDisconnectOnGetMore = false;
- if (serverSupportsEarlyDisconnectOnGetMore) {
- [[
- checkClosedEarly,
- function(client) {
- let result = assert.commandWorked(
- client.getDB(testName).runCommand({find: "test", filter: {}, batchSize: 0}));
- assert.commandWorked(client.getDB(testName).runCommand(
- {getMore: result.cursor.id, collection: "test"}));
- }
- ],
- [
- checkClosedEarly,
- function(client) {
- client.forceReadMode("legacy");
- var cursor = client.getDB(testName).test.find({}).batchSize(2);
- assert(cursor.next());
- assert(cursor.next());
- assert(cursor.next());
- }
- ],
- ].forEach(runWithCuropFailPointEnabled(client,
- "waitAfterPinningCursorBeforeGetMoreBatch"));
+ sleep(interval);
}
-
- [[checkClosedEarly, runCommand({aggregate: "test", pipeline: [], cursor: {}})],
+ } finally {
+ post();
+ }
+}
+
+function runWithCuropFailPointEnabled(client, failPointName) {
+ return function(entry) {
+ entry[0](client,
+ function(client) {
+ assert.commandWorked(client.adminCommand({
+ configureFailPoint: failPointName,
+ mode: "alwaysOn",
+ data: {shouldCheckForInterrupt: true},
+ }));
+
+ entry[1](client);
+ },
+ function() {
+ assert.commandWorked(
+ client.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+ });
+ };
+}
+
+function runWithCmdFailPointEnabled(client) {
+ return function(entry) {
+ const failPointName = "waitInCommandMarkKillOnClientDisconnect";
+
+ entry[0](client,
+ function(client) {
+ assert.commandWorked(client.adminCommand({
+ configureFailPoint: failPointName,
+ mode: "alwaysOn",
+ data: {appName: testName + id},
+ }));
+
+ entry[1](client);
+ },
+ function() {
+ assert.commandWorked(
+ client.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+ });
+ };
+}
+
+function checkClosedEarly(client, pre, post) {
+ assert(!check(client, pre, post), "operation killed on socket disconnect");
+}
+
+function checkNotClosedEarly(client, pre, post) {
+ assert(check(client, pre, post), "operation not killed on socket disconnect");
+}
+
+function runCommand(cmd) {
+ return function(client) {
+ assert.commandWorked(client.getDB(testName).runCommand(cmd));
+ };
+}
+
+function runTests(client) {
+ let admin = client.getDB("admin");
+
+ assert.writeOK(client.getDB(testName).test.insert({x: 1}));
+ assert.writeOK(client.getDB(testName).test.insert({x: 2}));
+ assert.writeOK(client.getDB(testName).test.insert({x: 3}));
+
+ [[checkClosedEarly, runCommand({find: "test", filter: {}})],
+ [
+ checkClosedEarly,
+ runCommand({
+ find: "test",
+ filter: {
+ $where: function() {
+ sleep(100000);
+ }
+ }
+ })
+ ],
+ [
+ checkClosedEarly,
+ runCommand({
+ find: "test",
+ filter: {
+ $where: function() {
+ while (true) {
+ }
+ }
+ }
+ })
+ ],
+ [
+ checkClosedEarly,
+ function(client) {
+ client.forceReadMode("legacy");
+ assert(client.getDB(testName).test.findOne({}));
+ }
+ ],
+ ].forEach(runWithCuropFailPointEnabled(client, "waitInFindBeforeMakingBatch"));
+
+ // After SERVER-39475, re-enable these tests and add negative testing for $out cursors.
+ const serverSupportsEarlyDisconnectOnGetMore = false;
+ if (serverSupportsEarlyDisconnectOnGetMore) {
+ [[
+ checkClosedEarly,
+ function(client) {
+ let result = assert.commandWorked(
+ client.getDB(testName).runCommand({find: "test", filter: {}, batchSize: 0}));
+ assert.commandWorked(client.getDB(testName).runCommand(
+ {getMore: result.cursor.id, collection: "test"}));
+ }
+ ],
[
- checkNotClosedEarly,
- runCommand({aggregate: "test", pipeline: [{$out: "out"}], cursor: {}})
+ checkClosedEarly,
+ function(client) {
+ client.forceReadMode("legacy");
+ var cursor = client.getDB(testName).test.find({}).batchSize(2);
+ assert(cursor.next());
+ assert(cursor.next());
+ assert(cursor.next());
+ }
],
- ].forEach(runWithCmdFailPointEnabled(client));
-
- [[checkClosedEarly, runCommand({count: "test"})],
- [checkClosedEarly, runCommand({distinct: "test", key: "x"})],
- [checkClosedEarly, runCommand({authenticate: "test", user: "x", pass: "y"})],
- [checkClosedEarly, runCommand({getnonce: 1})],
- [checkClosedEarly, runCommand({saslStart: 1})],
- [checkClosedEarly, runCommand({saslContinue: 1})],
- [checkClosedEarly, runCommand({ismaster: 1})],
- [checkClosedEarly, runCommand({listCollections: 1})],
- [checkClosedEarly, runCommand({listDatabases: 1})],
- [checkClosedEarly, runCommand({listIndexes: "test"})],
- ].forEach(runWithCmdFailPointEnabled(client));
+ ].forEach(runWithCuropFailPointEnabled(client, "waitAfterPinningCursorBeforeGetMoreBatch"));
}
- {
- let proc = MongoRunner.runMongod();
- assert.neq(proc, null);
- runTests(proc);
- MongoRunner.stopMongod(proc);
- }
-
- {
- let st = ShardingTest({mongo: 1, config: 1, shards: 1});
- runTests(st.s0);
- st.stop();
- }
+ [[checkClosedEarly, runCommand({aggregate: "test", pipeline: [], cursor: {}})],
+ [checkNotClosedEarly, runCommand({aggregate: "test", pipeline: [{$out: "out"}], cursor: {}})],
+ ].forEach(runWithCmdFailPointEnabled(client));
+
+ [[checkClosedEarly, runCommand({count: "test"})],
+ [checkClosedEarly, runCommand({distinct: "test", key: "x"})],
+ [checkClosedEarly, runCommand({authenticate: "test", user: "x", pass: "y"})],
+ [checkClosedEarly, runCommand({getnonce: 1})],
+ [checkClosedEarly, runCommand({saslStart: 1})],
+ [checkClosedEarly, runCommand({saslContinue: 1})],
+ [checkClosedEarly, runCommand({ismaster: 1})],
+ [checkClosedEarly, runCommand({listCollections: 1})],
+ [checkClosedEarly, runCommand({listDatabases: 1})],
+ [checkClosedEarly, runCommand({listIndexes: "test"})],
+ ].forEach(runWithCmdFailPointEnabled(client));
+}
+
+{
+ let proc = MongoRunner.runMongod();
+ assert.neq(proc, null);
+ runTests(proc);
+ MongoRunner.stopMongod(proc);
+}
+
+{
+ let st = ShardingTest({mongo: 1, config: 1, shards: 1});
+ runTests(st.s0);
+ st.stop();
+}
})();
diff --git a/jstests/noPassthrough/standalone_replication_recovery.js b/jstests/noPassthrough/standalone_replication_recovery.js
index 1def927772c..6ee47fc9c20 100644
--- a/jstests/noPassthrough/standalone_replication_recovery.js
+++ b/jstests/noPassthrough/standalone_replication_recovery.js
@@ -7,155 +7,150 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/rslib.js");
- load("jstests/libs/write_concern_util.js");
-
- const name = 'standalone_replication_recovery';
- const dbName = name;
- const collName = 'srr_coll';
- const logLevel = tojson({storage: {recovery: 2}});
-
- const rst = new ReplSetTest({
- nodes: 2,
- });
-
- function getColl(conn) {
- return conn.getDB(dbName)[collName];
+"use strict";
+load("jstests/replsets/rslib.js");
+load("jstests/libs/write_concern_util.js");
+
+const name = 'standalone_replication_recovery';
+const dbName = name;
+const collName = 'srr_coll';
+const logLevel = tojson({storage: {recovery: 2}});
+
+const rst = new ReplSetTest({
+ nodes: 2,
+});
+
+function getColl(conn) {
+ return conn.getDB(dbName)[collName];
+}
+
+function assertDocsInColl(node, nums) {
+ let results = getColl(node).find().sort({_id: 1}).toArray();
+ let expected = nums.map((i) => ({_id: i}));
+ if (!friendlyEqual(results, expected)) {
+ rst.dumpOplog(node, {}, 100);
}
-
- function assertDocsInColl(node, nums) {
- let results = getColl(node).find().sort({_id: 1}).toArray();
- let expected = nums.map((i) => ({_id: i}));
- if (!friendlyEqual(results, expected)) {
- rst.dumpOplog(node, {}, 100);
- }
- assert.eq(results, expected, "actual (left) != expected (right)");
- }
-
- jsTestLog("Test that an empty standalone fails trying to recover.");
- assert.throws(
- () => rst.start(0, {noReplSet: true, setParameter: {recoverFromOplogAsStandalone: true}}));
-
- jsTestLog("Initiating as a replica set.");
- // Restart as a replica set node without the flag so we can add operations to the oplog.
- let nodes = rst.startSet({setParameter: {logComponentVerbosity: logLevel}});
- let node = nodes[0];
- let secondary = nodes[1];
- rst.initiate({
- _id: name,
- members: [{_id: 0, host: node.host}, {_id: 2, host: secondary.host, priority: 0}]
- });
-
- // Create the collection with w:majority and then perform a clean restart to ensure that
- // the collection is in a stable checkpoint.
- assert.commandWorked(node.getDB(dbName).runCommand({
- create: collName,
- writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}
- }));
- assertDocsInColl(node, []);
- node = rst.restart(node, {"noReplSet": false});
- reconnect(node);
- assert.eq(rst.getPrimary(), node);
-
- // Keep node 0 the primary, but prevent it from committing any writes.
- stopServerReplication(secondary);
-
- assert.commandWorked(getColl(node).insert({_id: 3}, {writeConcern: {w: 1, j: 1}}));
- assert.commandWorked(getColl(node).insert({_id: 4}, {writeConcern: {w: 1, j: 1}}));
- assert.commandWorked(getColl(node).insert({_id: 5}, {writeConcern: {w: 1, j: 1}}));
- assertDocsInColl(node, [3, 4, 5]);
-
- jsTestLog("Test that if we kill the node, recovery still plays.");
- rst.stop(node, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- node = rst.restart(node, {"noReplSet": false});
- reconnect(node);
- assert.eq(rst.getPrimary(), node);
- assertDocsInColl(node, [3, 4, 5]);
-
- jsTestLog("Test that a replica set node cannot start up with the parameter set.");
- assert.throws(() => rst.restart(0, {
- setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}
- }));
-
- jsTestLog("Test that on restart as a standalone we only see committed writes by default.");
- node =
- rst.start(node, {noReplSet: true, setParameter: {logComponentVerbosity: logLevel}}, true);
- reconnect(node);
- assertDocsInColl(node, []);
-
- jsTestLog("Test that on restart with the flag set we play recovery.");
- node = rst.restart(node, {
- noReplSet: true,
- setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}
- });
- reconnect(node);
- assertDocsInColl(node, [3, 4, 5]);
-
- jsTestLog("Test that we go into read-only mode.");
- assert.commandFailedWithCode(getColl(node).insert({_id: 1}), ErrorCodes.IllegalOperation);
-
- jsTestLog("Test that we cannot set the parameter during standalone runtime.");
- assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: true}));
- assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: false}));
-
- jsTestLog("Test that on restart after standalone recovery we do not see replicated writes.");
- node = rst.restart(node, {
- noReplSet: true,
- setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
- });
- reconnect(node);
- assertDocsInColl(node, []);
- assert.commandWorked(getColl(node).insert({_id: 6}));
- assertDocsInColl(node, [6]);
- node = rst.restart(node, {
- noReplSet: true,
- setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}
- });
- reconnect(node);
- assertDocsInColl(node, [3, 4, 5, 6]);
-
- jsTestLog("Test that we can restart again as a replica set node.");
- node = rst.restart(node, {
- noReplSet: false,
- setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
- });
- reconnect(node);
- assert.eq(rst.getPrimary(), node);
- assertDocsInColl(node, [3, 4, 5, 6]);
-
- jsTestLog("Test that we cannot set the parameter during replica set runtime.");
- assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: true}));
- assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: false}));
-
- jsTestLog("Test that we can still recover as a standalone.");
- assert.commandWorked(getColl(node).insert({_id: 7}));
- assertDocsInColl(node, [3, 4, 5, 6, 7]);
- node = rst.restart(node, {
- noReplSet: true,
- setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
- });
- reconnect(node);
- assertDocsInColl(node, [6]);
- node = rst.restart(node, {
- noReplSet: true,
- setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}
- });
- reconnect(node);
- assertDocsInColl(node, [3, 4, 5, 6, 7]);
-
- jsTestLog("Restart as a replica set node so that the test can complete successfully.");
- node = rst.restart(node, {
- noReplSet: false,
- setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
- });
- reconnect(node);
- assert.eq(rst.getPrimary(), node);
- assertDocsInColl(node, [3, 4, 5, 6, 7]);
-
- restartServerReplication(secondary);
-
- // Skip checking db hashes since we do a write as a standalone.
- TestData.skipCheckDBHashes = true;
- rst.stopSet();
+ assert.eq(results, expected, "actual (left) != expected (right)");
+}
+
+jsTestLog("Test that an empty standalone fails trying to recover.");
+assert.throws(
+ () => rst.start(0, {noReplSet: true, setParameter: {recoverFromOplogAsStandalone: true}}));
+
+jsTestLog("Initiating as a replica set.");
+// Restart as a replica set node without the flag so we can add operations to the oplog.
+let nodes = rst.startSet({setParameter: {logComponentVerbosity: logLevel}});
+let node = nodes[0];
+let secondary = nodes[1];
+rst.initiate(
+ {_id: name, members: [{_id: 0, host: node.host}, {_id: 2, host: secondary.host, priority: 0}]});
+
+// Create the collection with w:majority and then perform a clean restart to ensure that
+// the collection is in a stable checkpoint.
+assert.commandWorked(node.getDB(dbName).runCommand(
+ {create: collName, writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+assertDocsInColl(node, []);
+node = rst.restart(node, {"noReplSet": false});
+reconnect(node);
+assert.eq(rst.getPrimary(), node);
+
+// Keep node 0 the primary, but prevent it from committing any writes.
+stopServerReplication(secondary);
+
+assert.commandWorked(getColl(node).insert({_id: 3}, {writeConcern: {w: 1, j: 1}}));
+assert.commandWorked(getColl(node).insert({_id: 4}, {writeConcern: {w: 1, j: 1}}));
+assert.commandWorked(getColl(node).insert({_id: 5}, {writeConcern: {w: 1, j: 1}}));
+assertDocsInColl(node, [3, 4, 5]);
+
+jsTestLog("Test that if we kill the node, recovery still plays.");
+rst.stop(node, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+node = rst.restart(node, {"noReplSet": false});
+reconnect(node);
+assert.eq(rst.getPrimary(), node);
+assertDocsInColl(node, [3, 4, 5]);
+
+jsTestLog("Test that a replica set node cannot start up with the parameter set.");
+assert.throws(
+ () => rst.restart(
+ 0, {setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}}));
+
+jsTestLog("Test that on restart as a standalone we only see committed writes by default.");
+node = rst.start(node, {noReplSet: true, setParameter: {logComponentVerbosity: logLevel}}, true);
+reconnect(node);
+assertDocsInColl(node, []);
+
+jsTestLog("Test that on restart with the flag set we play recovery.");
+node = rst.restart(node, {
+ noReplSet: true,
+ setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}
+});
+reconnect(node);
+assertDocsInColl(node, [3, 4, 5]);
+
+jsTestLog("Test that we go into read-only mode.");
+assert.commandFailedWithCode(getColl(node).insert({_id: 1}), ErrorCodes.IllegalOperation);
+
+jsTestLog("Test that we cannot set the parameter during standalone runtime.");
+assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: true}));
+assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: false}));
+
+jsTestLog("Test that on restart after standalone recovery we do not see replicated writes.");
+node = rst.restart(node, {
+ noReplSet: true,
+ setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
+});
+reconnect(node);
+assertDocsInColl(node, []);
+assert.commandWorked(getColl(node).insert({_id: 6}));
+assertDocsInColl(node, [6]);
+node = rst.restart(node, {
+ noReplSet: true,
+ setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}
+});
+reconnect(node);
+assertDocsInColl(node, [3, 4, 5, 6]);
+
+jsTestLog("Test that we can restart again as a replica set node.");
+node = rst.restart(node, {
+ noReplSet: false,
+ setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
+});
+reconnect(node);
+assert.eq(rst.getPrimary(), node);
+assertDocsInColl(node, [3, 4, 5, 6]);
+
+jsTestLog("Test that we cannot set the parameter during replica set runtime.");
+assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: true}));
+assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: false}));
+
+jsTestLog("Test that we can still recover as a standalone.");
+assert.commandWorked(getColl(node).insert({_id: 7}));
+assertDocsInColl(node, [3, 4, 5, 6, 7]);
+node = rst.restart(node, {
+ noReplSet: true,
+ setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
+});
+reconnect(node);
+assertDocsInColl(node, [6]);
+node = rst.restart(node, {
+ noReplSet: true,
+ setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}
+});
+reconnect(node);
+assertDocsInColl(node, [3, 4, 5, 6, 7]);
+
+jsTestLog("Restart as a replica set node so that the test can complete successfully.");
+node = rst.restart(node, {
+ noReplSet: false,
+ setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
+});
+reconnect(node);
+assert.eq(rst.getPrimary(), node);
+assertDocsInColl(node, [3, 4, 5, 6, 7]);
+
+restartServerReplication(secondary);
+
+// Skip checking db hashes since we do a write as a standalone.
+TestData.skipCheckDBHashes = true;
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/start_session_command.js b/jstests/noPassthrough/start_session_command.js
index bb542e255fc..5c2bbd4b38b 100644
--- a/jstests/noPassthrough/start_session_command.js
+++ b/jstests/noPassthrough/start_session_command.js
@@ -1,103 +1,101 @@
(function() {
- 'use strict';
+'use strict';
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
- var conn;
- var admin;
- var foo;
- var result;
- const request = {startSession: 1};
+var conn;
+var admin;
+var foo;
+var result;
+const request = {
+ startSession: 1
+};
- conn = MongoRunner.runMongod({setParameter: {maxSessions: 2}});
- admin = conn.getDB("admin");
+conn = MongoRunner.runMongod({setParameter: {maxSessions: 2}});
+admin = conn.getDB("admin");
- // ensure that the cache is empty
- var serverStatus = assert.commandWorked(admin.adminCommand({serverStatus: 1}));
- assert.eq(0, serverStatus.logicalSessionRecordCache.activeSessionsCount);
+// ensure that the cache is empty
+var serverStatus = assert.commandWorked(admin.adminCommand({serverStatus: 1}));
+assert.eq(0, serverStatus.logicalSessionRecordCache.activeSessionsCount);
- // test that we can run startSession unauthenticated when the server is running without --auth
+// test that we can run startSession unauthenticated when the server is running without --auth
- result = admin.runCommand(request);
- assert.commandWorked(
- result,
- "failed test that we can run startSession unauthenticated when the server is running without --auth");
- assert(result.id, "failed test that our session response has an id");
- assert.eq(
- result.timeoutMinutes, 30, "failed test that our session record has the correct timeout");
+result = admin.runCommand(request);
+assert.commandWorked(
+ result,
+ "failed test that we can run startSession unauthenticated when the server is running without --auth");
+assert(result.id, "failed test that our session response has an id");
+assert.eq(result.timeoutMinutes, 30, "failed test that our session record has the correct timeout");
- // test that startSession added to the cache
- serverStatus = assert.commandWorked(admin.adminCommand({serverStatus: 1}));
- assert.eq(1, serverStatus.logicalSessionRecordCache.activeSessionsCount);
+// test that startSession added to the cache
+serverStatus = assert.commandWorked(admin.adminCommand({serverStatus: 1}));
+assert.eq(1, serverStatus.logicalSessionRecordCache.activeSessionsCount);
- // test that we can run startSession authenticated when the server is running without --auth
+// test that we can run startSession authenticated when the server is running without --auth
- admin.createUser({user: 'user0', pwd: 'password', roles: []});
- admin.auth("user0", "password");
+admin.createUser({user: 'user0', pwd: 'password', roles: []});
+admin.auth("user0", "password");
- result = admin.runCommand(request);
- assert.commandWorked(
- result,
- "failed test that we can run startSession authenticated when the server is running without --auth");
- assert(result.id, "failed test that our session response has an id");
- assert.eq(
- result.timeoutMinutes, 30, "failed test that our session record has the correct timeout");
+result = admin.runCommand(request);
+assert.commandWorked(
+ result,
+ "failed test that we can run startSession authenticated when the server is running without --auth");
+assert(result.id, "failed test that our session response has an id");
+assert.eq(result.timeoutMinutes, 30, "failed test that our session record has the correct timeout");
- assert.commandFailed(admin.runCommand(request),
- "failed test that we can't run startSession when the cache is full");
- MongoRunner.stopMongod(conn);
+assert.commandFailed(admin.runCommand(request),
+ "failed test that we can't run startSession when the cache is full");
+MongoRunner.stopMongod(conn);
- //
+//
- conn = MongoRunner.runMongod({auth: "", nojournal: ""});
- admin = conn.getDB("admin");
- foo = conn.getDB("foo");
+conn = MongoRunner.runMongod({auth: "", nojournal: ""});
+admin = conn.getDB("admin");
+foo = conn.getDB("foo");
- // test that we can't run startSession unauthenticated when the server is running with --auth
+// test that we can't run startSession unauthenticated when the server is running with --auth
- assert.commandFailed(
- admin.runCommand(request),
- "failed test that we can't run startSession unauthenticated when the server is running with --auth");
+assert.commandFailed(
+ admin.runCommand(request),
+ "failed test that we can't run startSession unauthenticated when the server is running with --auth");
- //
+//
- admin.createUser({user: 'admin', pwd: 'admin', roles: jsTest.adminUserRoles});
- admin.auth("admin", "admin");
- admin.createUser({user: 'user0', pwd: 'password', roles: jsTest.basicUserRoles});
- foo.createUser({user: 'user1', pwd: 'password', roles: jsTest.basicUserRoles});
- admin.createUser({user: 'user2', pwd: 'password', roles: []});
- admin.logout();
+admin.createUser({user: 'admin', pwd: 'admin', roles: jsTest.adminUserRoles});
+admin.auth("admin", "admin");
+admin.createUser({user: 'user0', pwd: 'password', roles: jsTest.basicUserRoles});
+foo.createUser({user: 'user1', pwd: 'password', roles: jsTest.basicUserRoles});
+admin.createUser({user: 'user2', pwd: 'password', roles: []});
+admin.logout();
- // test that we can run startSession authenticated as one user with proper permissions
+// test that we can run startSession authenticated as one user with proper permissions
- admin.auth("user0", "password");
- result = admin.runCommand(request);
- assert.commandWorked(
- result,
- "failed test that we can run startSession authenticated as one user with proper permissions");
- assert(result.id, "failed test that our session response has an id");
- assert.eq(
- result.timeoutMinutes, 30, "failed test that our session record has the correct timeout");
+admin.auth("user0", "password");
+result = admin.runCommand(request);
+assert.commandWorked(
+ result,
+ "failed test that we can run startSession authenticated as one user with proper permissions");
+assert(result.id, "failed test that our session response has an id");
+assert.eq(result.timeoutMinutes, 30, "failed test that our session record has the correct timeout");
- // test that we cant run startSession authenticated as two users with proper permissions
+// test that we cant run startSession authenticated as two users with proper permissions
- foo.auth("user1", "password");
- assert.commandFailed(
- admin.runCommand(request),
- "failed test that we cant run startSession authenticated as two users with proper permissions");
+foo.auth("user1", "password");
+assert.commandFailed(
+ admin.runCommand(request),
+ "failed test that we cant run startSession authenticated as two users with proper permissions");
- // test that we cant run startSession authenticated as one user without proper permissions
+// test that we cant run startSession authenticated as one user without proper permissions
- admin.logout();
- admin.auth("user2", "password");
- assert.commandFailed(
- admin.runCommand(request),
- "failed test that we cant run startSession authenticated as one user without proper permissions");
+admin.logout();
+admin.auth("user2", "password");
+assert.commandFailed(
+ admin.runCommand(request),
+ "failed test that we cant run startSession authenticated as one user without proper permissions");
- //
-
- MongoRunner.stopMongod(conn);
+//
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/startup_logging.js b/jstests/noPassthrough/startup_logging.js
index d1b73bb1c56..ced10a08d04 100644
--- a/jstests/noPassthrough/startup_logging.js
+++ b/jstests/noPassthrough/startup_logging.js
@@ -4,32 +4,31 @@
(function() {
- 'use strict';
-
- function makeRegExMatchFn(pattern) {
- return function(text) {
- return pattern.test(text);
- };
- }
-
- function testStartupLogging(launcher, matchFn, expectedExitCode) {
- assert(matchFn(rawMongoProgramOutput()));
- }
-
- function validateWaitingMessage(launcher) {
- clearRawMongoProgramOutput();
- var conn = launcher.start({});
- launcher.stop(conn, undefined, {});
- testStartupLogging(launcher, makeRegExMatchFn(/waiting for connections on port/));
- }
-
- print("********************\nTesting startup logging in mongod\n********************");
-
- validateWaitingMessage({
- start: function(opts) {
- return MongoRunner.runMongod(opts);
- },
- stop: MongoRunner.stopMongod
- });
-
+'use strict';
+
+function makeRegExMatchFn(pattern) {
+ return function(text) {
+ return pattern.test(text);
+ };
+}
+
+function testStartupLogging(launcher, matchFn, expectedExitCode) {
+ assert(matchFn(rawMongoProgramOutput()));
+}
+
+function validateWaitingMessage(launcher) {
+ clearRawMongoProgramOutput();
+ var conn = launcher.start({});
+ launcher.stop(conn, undefined, {});
+ testStartupLogging(launcher, makeRegExMatchFn(/waiting for connections on port/));
+}
+
+print("********************\nTesting startup logging in mongod\n********************");
+
+validateWaitingMessage({
+ start: function(opts) {
+ return MongoRunner.runMongod(opts);
+ },
+ stop: MongoRunner.stopMongod
+});
}());
diff --git a/jstests/noPassthrough/step_down_during_drop_database.js b/jstests/noPassthrough/step_down_during_drop_database.js
index 51a21afce76..5480605b1c3 100644
--- a/jstests/noPassthrough/step_down_during_drop_database.js
+++ b/jstests/noPassthrough/step_down_during_drop_database.js
@@ -6,55 +6,55 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
+load("jstests/libs/check_log.js");
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- const replSet = new ReplSetTest({nodes: 2});
- replSet.startSet();
- replSet.initiate();
+const replSet = new ReplSetTest({nodes: 2});
+replSet.startSet();
+replSet.initiate();
- let primary = replSet.getPrimary();
- let testDB = primary.getDB(dbName);
+let primary = replSet.getPrimary();
+let testDB = primary.getDB(dbName);
- const size = 5;
- jsTest.log("Creating " + size + " test documents.");
- var bulk = testDB.getCollection(collName).initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i});
- }
- assert.writeOK(bulk.execute());
- replSet.awaitReplication();
+const size = 5;
+jsTest.log("Creating " + size + " test documents.");
+var bulk = testDB.getCollection(collName).initializeUnorderedBulkOp();
+for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
+}
+assert.writeOK(bulk.execute());
+replSet.awaitReplication();
- const failpoint = "dropDatabaseHangAfterAllCollectionsDrop";
- assert.commandWorked(primary.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
+const failpoint = "dropDatabaseHangAfterAllCollectionsDrop";
+assert.commandWorked(primary.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
- // Run the dropDatabase command and stepdown the primary while it is running.
- const awaitShell = startParallelShell(() => {
- db.dropDatabase();
- }, testDB.getMongo().port);
+// Run the dropDatabase command and stepdown the primary while it is running.
+const awaitShell = startParallelShell(() => {
+ db.dropDatabase();
+}, testDB.getMongo().port);
- // Ensure the dropDatabase command has begun before stepping down.
- checkLog.contains(primary,
- "dropDatabase - fail point dropDatabaseHangAfterAllCollectionsDrop " +
- "enabled. Blocking until fail point is disabled.");
+// Ensure the dropDatabase command has begun before stepping down.
+checkLog.contains(primary,
+ "dropDatabase - fail point dropDatabaseHangAfterAllCollectionsDrop " +
+ "enabled. Blocking until fail point is disabled.");
- assert.commandWorked(testDB.adminCommand({replSetStepDown: 60, force: true}));
- replSet.waitForState(primary, ReplSetTest.State.SECONDARY);
+assert.commandWorked(testDB.adminCommand({replSetStepDown: 60, force: true}));
+replSet.waitForState(primary, ReplSetTest.State.SECONDARY);
- assert.commandWorked(primary.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- awaitShell();
+assert.commandWorked(primary.adminCommand({configureFailPoint: failpoint, mode: "off"}));
+awaitShell();
- primary = replSet.getPrimary();
- testDB = primary.getDB(dbName);
+primary = replSet.getPrimary();
+testDB = primary.getDB(dbName);
- // Run dropDatabase on the new primary. The secondary (formerly the primary) should be able to
- // drop the database too.
- testDB.dropDatabase();
- replSet.awaitReplication();
+// Run dropDatabase on the new primary. The secondary (formerly the primary) should be able to
+// drop the database too.
+testDB.dropDatabase();
+replSet.awaitReplication();
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/stepdown_query.js b/jstests/noPassthrough/stepdown_query.js
index 6351493bbb4..4e8cc001840 100644
--- a/jstests/noPassthrough/stepdown_query.js
+++ b/jstests/noPassthrough/stepdown_query.js
@@ -8,72 +8,72 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- // Set the refresh period to 10 min to rule out races
- _setShellFailPoint({
- configureFailPoint: "modifyReplicaSetMonitorDefaultRefreshPeriod",
- mode: "alwaysOn",
- data: {
- period: 10 * 60,
- },
- });
-
- var dbName = "test";
- var collName = jsTest.name();
+// Set the refresh period to 10 min to rule out races
+_setShellFailPoint({
+ configureFailPoint: "modifyReplicaSetMonitorDefaultRefreshPeriod",
+ mode: "alwaysOn",
+ data: {
+ period: 10 * 60,
+ },
+});
- function runTest(host, rst, waitForPrimary) {
- // We create a new connection to 'host' here instead of passing in the original connection.
- // This to work around the fact that connections created by ReplSetTest already have slaveOk
- // set on them, but we need a connection with slaveOk not set for this test.
- var conn = new Mongo(host);
- var coll = conn.getDB(dbName).getCollection(collName);
- assert(!coll.exists());
- assert.writeOK(coll.insert([{}, {}, {}, {}, {}]));
- var cursor = coll.find().batchSize(2);
- // Retrieve the first batch of results.
- cursor.next();
- cursor.next();
- assert.eq(0, cursor.objsLeftInBatch());
- var primary = rst.getPrimary();
- var secondary = rst.getSecondary();
- assert.commandWorked(primary.getDB("admin").runCommand({replSetStepDown: 60, force: true}));
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- if (waitForPrimary) {
- rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
- }
- // When the primary steps down, it closes all client connections. Since 'conn' may be a
- // direct connection to the primary and the shell doesn't automatically retry operations on
- // network errors, we run a dummy operation here to force the shell to reconnect.
- try {
- conn.getDB("admin").runCommand("ping");
- } catch (e) {
- }
+var dbName = "test";
+var collName = jsTest.name();
- // Even though our connection doesn't have slaveOk set, we should still be able to iterate
- // our cursor and kill our cursor.
- assert(cursor.hasNext());
- assert.doesNotThrow(function() {
- cursor.close();
- });
+function runTest(host, rst, waitForPrimary) {
+ // We create a new connection to 'host' here instead of passing in the original connection.
+ // This to work around the fact that connections created by ReplSetTest already have slaveOk
+ // set on them, but we need a connection with slaveOk not set for this test.
+ var conn = new Mongo(host);
+ var coll = conn.getDB(dbName).getCollection(collName);
+ assert(!coll.exists());
+ assert.writeOK(coll.insert([{}, {}, {}, {}, {}]));
+ var cursor = coll.find().batchSize(2);
+ // Retrieve the first batch of results.
+ cursor.next();
+ cursor.next();
+ assert.eq(0, cursor.objsLeftInBatch());
+ var primary = rst.getPrimary();
+ var secondary = rst.getSecondary();
+ assert.commandWorked(primary.getDB("admin").runCommand({replSetStepDown: 60, force: true}));
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+ if (waitForPrimary) {
+ rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
+ }
+ // When the primary steps down, it closes all client connections. Since 'conn' may be a
+ // direct connection to the primary and the shell doesn't automatically retry operations on
+ // network errors, we run a dummy operation here to force the shell to reconnect.
+ try {
+ conn.getDB("admin").runCommand("ping");
+ } catch (e) {
}
- // Test querying a replica set primary directly.
- var rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- runTest(rst.getPrimary().host, rst, false);
- rst.stopSet();
+ // Even though our connection doesn't have slaveOk set, we should still be able to iterate
+ // our cursor and kill our cursor.
+ assert(cursor.hasNext());
+ assert.doesNotThrow(function() {
+ cursor.close();
+ });
+}
+
+// Test querying a replica set primary directly.
+var rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+runTest(rst.getPrimary().host, rst, false);
+rst.stopSet();
- rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
- runTest(rst.getURL(), rst, true);
- rst.stopSet();
+rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+runTest(rst.getURL(), rst, true);
+rst.stopSet();
- // Test querying a replica set primary through mongos.
- var st = new ShardingTest({shards: 1, rs: {nodes: 2}, config: 2});
- rst = st.rs0;
- runTest(st.s0.host, rst, true);
- st.stop();
+// Test querying a replica set primary through mongos.
+var st = new ShardingTest({shards: 1, rs: {nodes: 2}, config: 2});
+rst = st.rs0;
+runTest(st.s0.host, rst, true);
+st.stop();
})();
diff --git a/jstests/noPassthrough/sync_write.js b/jstests/noPassthrough/sync_write.js
index 8908d6bad5e..a4c0d1ebe38 100644
--- a/jstests/noPassthrough/sync_write.js
+++ b/jstests/noPassthrough/sync_write.js
@@ -5,29 +5,29 @@
* @tags: [requires_persistence]
*/
(function() {
- 'use strict';
+'use strict';
- // The following test verifies that writeConcern: {j: true} ensures that data is durable.
- var dbpath = MongoRunner.dataPath + 'sync_write';
- resetDbpath(dbpath);
+// The following test verifies that writeConcern: {j: true} ensures that data is durable.
+var dbpath = MongoRunner.dataPath + 'sync_write';
+resetDbpath(dbpath);
- var mongodArgs = {dbpath: dbpath, noCleanData: true, journal: ''};
+var mongodArgs = {dbpath: dbpath, noCleanData: true, journal: ''};
- // Start a mongod.
- var conn = MongoRunner.runMongod(mongodArgs);
- assert.neq(null, conn, 'mongod was unable to start up');
+// Start a mongod.
+var conn = MongoRunner.runMongod(mongodArgs);
+assert.neq(null, conn, 'mongod was unable to start up');
- // Now connect to the mongod, do a journaled write and abruptly stop the server.
- var testDB = conn.getDB('test');
- assert.writeOK(testDB.synced.insert({synced: true}, {writeConcern: {j: true}}));
- MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+// Now connect to the mongod, do a journaled write and abruptly stop the server.
+var testDB = conn.getDB('test');
+assert.writeOK(testDB.synced.insert({synced: true}, {writeConcern: {j: true}}));
+MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- // Restart the mongod.
- conn = MongoRunner.runMongod(mongodArgs);
- assert.neq(null, conn, 'mongod was unable to restart after receiving a SIGKILL');
+// Restart the mongod.
+conn = MongoRunner.runMongod(mongodArgs);
+assert.neq(null, conn, 'mongod was unable to restart after receiving a SIGKILL');
- // Check that our journaled write still is present.
- testDB = conn.getDB('test');
- assert.eq(1, testDB.synced.count({synced: true}), 'synced write was not found');
- MongoRunner.stopMongod(conn);
+// Check that our journaled write still is present.
+testDB = conn.getDB('test');
+assert.eq(1, testDB.synced.count({synced: true}), 'synced write was not found');
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/system_indexes.js b/jstests/noPassthrough/system_indexes.js
index fb0991b1abb..a1d9365e0a9 100644
--- a/jstests/noPassthrough/system_indexes.js
+++ b/jstests/noPassthrough/system_indexes.js
@@ -1,83 +1,83 @@
-/** Ensure that authorization system collections' indexes are correctly generated.
+/**
+ * Ensure that authorization system collections' indexes are correctly generated.
*
* This test requires users to persist across a restart.
* @tags: [requires_persistence]
*/
(function() {
- let conn = MongoRunner.runMongod();
- let config = conn.getDB("config");
- let db = conn.getDB("admin");
+let conn = MongoRunner.runMongod();
+let config = conn.getDB("config");
+let db = conn.getDB("admin");
- // TEST: User and role collections start off with no indexes
- assert.eq(0, db.system.users.getIndexes().length);
- assert.eq(0, db.system.roles.getIndexes().length);
+// TEST: User and role collections start off with no indexes
+assert.eq(0, db.system.users.getIndexes().length);
+assert.eq(0, db.system.roles.getIndexes().length);
- // TEST: User and role creation generates indexes
- db.createUser({user: "user", pwd: "pwd", roles: []});
- assert.eq(2, db.system.users.getIndexes().length);
+// TEST: User and role creation generates indexes
+db.createUser({user: "user", pwd: "pwd", roles: []});
+assert.eq(2, db.system.users.getIndexes().length);
- db.createRole({role: "role", privileges: [], roles: []});
- assert.eq(2, db.system.roles.getIndexes().length);
+db.createRole({role: "role", privileges: [], roles: []});
+assert.eq(2, db.system.roles.getIndexes().length);
- // TEST: Destroying admin.system.users index and restarting will recreate it
- assert.commandWorked(db.system.users.dropIndexes());
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({restart: conn, cleanData: false});
- db = conn.getDB("admin");
- assert.eq(2, db.system.users.getIndexes().length);
- assert.eq(2, db.system.roles.getIndexes().length);
+// TEST: Destroying admin.system.users index and restarting will recreate it
+assert.commandWorked(db.system.users.dropIndexes());
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({restart: conn, cleanData: false});
+db = conn.getDB("admin");
+assert.eq(2, db.system.users.getIndexes().length);
+assert.eq(2, db.system.roles.getIndexes().length);
- // TEST: Destroying admin.system.roles index and restarting will recreate it
- assert.commandWorked(db.system.roles.dropIndexes());
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({restart: conn, cleanData: false});
- db = conn.getDB("admin");
- assert.eq(2, db.system.users.getIndexes().length);
- assert.eq(2, db.system.roles.getIndexes().length);
+// TEST: Destroying admin.system.roles index and restarting will recreate it
+assert.commandWorked(db.system.roles.dropIndexes());
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({restart: conn, cleanData: false});
+db = conn.getDB("admin");
+assert.eq(2, db.system.users.getIndexes().length);
+assert.eq(2, db.system.roles.getIndexes().length);
- // TEST: Destroying both authorization indexes and restarting will recreate them
- assert.commandWorked(db.system.users.dropIndexes());
- assert.commandWorked(db.system.roles.dropIndexes());
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({restart: conn, cleanData: false});
- db = conn.getDB("admin");
- assert.eq(2, db.system.users.getIndexes().length);
- assert.eq(2, db.system.roles.getIndexes().length);
+// TEST: Destroying both authorization indexes and restarting will recreate them
+assert.commandWorked(db.system.users.dropIndexes());
+assert.commandWorked(db.system.roles.dropIndexes());
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({restart: conn, cleanData: false});
+db = conn.getDB("admin");
+assert.eq(2, db.system.users.getIndexes().length);
+assert.eq(2, db.system.roles.getIndexes().length);
- // TEST: Destroying the admin.system.users index and restarting will recreate it, even if
- // admin.system.roles does not exist
- // Use _mergeAuthzCollections to clear admin.system.users and admin.system.roles.
- assert.commandWorked(db.adminCommand({
- _mergeAuthzCollections: 1,
- tempUsersCollection: 'admin.tempusers',
- tempRolesCollection: 'admin.temproles',
- db: "",
- drop: true
- }));
- db.createUser({user: "user", pwd: "pwd", roles: []});
- assert.commandWorked(db.system.users.dropIndexes());
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({restart: conn, cleanData: false});
- db = conn.getDB("admin");
- assert.eq(2, db.system.users.getIndexes().length);
-
- // TEST: Destroying the admin.system.roles index and restarting will recreate it, even if
- // admin.system.users does not exist
- // Use _mergeAuthzCollections to clear admin.system.users and admin.system.roles.
- assert.commandWorked(db.adminCommand({
- _mergeAuthzCollections: 1,
- tempUsersCollection: 'admin.tempusers',
- tempRolesCollection: 'admin.temproles',
- db: "",
- drop: true
- }));
- db.createRole({role: "role", privileges: [], roles: []});
- assert.commandWorked(db.system.roles.dropIndexes());
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({restart: conn, cleanData: false});
- db = conn.getDB("admin");
- assert.eq(2, db.system.roles.getIndexes().length);
- MongoRunner.stopMongod(conn);
+// TEST: Destroying the admin.system.users index and restarting will recreate it, even if
+// admin.system.roles does not exist
+// Use _mergeAuthzCollections to clear admin.system.users and admin.system.roles.
+assert.commandWorked(db.adminCommand({
+ _mergeAuthzCollections: 1,
+ tempUsersCollection: 'admin.tempusers',
+ tempRolesCollection: 'admin.temproles',
+ db: "",
+ drop: true
+}));
+db.createUser({user: "user", pwd: "pwd", roles: []});
+assert.commandWorked(db.system.users.dropIndexes());
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({restart: conn, cleanData: false});
+db = conn.getDB("admin");
+assert.eq(2, db.system.users.getIndexes().length);
+// TEST: Destroying the admin.system.roles index and restarting will recreate it, even if
+// admin.system.users does not exist
+// Use _mergeAuthzCollections to clear admin.system.users and admin.system.roles.
+assert.commandWorked(db.adminCommand({
+ _mergeAuthzCollections: 1,
+ tempUsersCollection: 'admin.tempusers',
+ tempRolesCollection: 'admin.temproles',
+ db: "",
+ drop: true
+}));
+db.createRole({role: "role", privileges: [], roles: []});
+assert.commandWorked(db.system.roles.dropIndexes());
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({restart: conn, cleanData: false});
+db = conn.getDB("admin");
+assert.eq(2, db.system.roles.getIndexes().length);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js b/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js
index e1aa184efa2..0d29b065e7b 100644
--- a/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js
+++ b/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js
@@ -3,27 +3,26 @@
// This test was designed to reproduce SERVER-33942 against a mongos.
// @tags: [requires_sharding, requires_capped]
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 2});
+const st = new ShardingTest({shards: 2});
- const db = st.s.getDB("test");
- const coll = db.capped;
- assert.commandWorked(db.runCommand({create: "capped", capped: true, size: 1024}));
- assert.writeOK(coll.insert({}));
- const findResult = assert.commandWorked(
- db.runCommand({find: "capped", filter: {}, tailable: true, awaitData: true}));
+const db = st.s.getDB("test");
+const coll = db.capped;
+assert.commandWorked(db.runCommand({create: "capped", capped: true, size: 1024}));
+assert.writeOK(coll.insert({}));
+const findResult = assert.commandWorked(
+ db.runCommand({find: "capped", filter: {}, tailable: true, awaitData: true}));
- const cursorId = findResult.cursor.id;
- assert.neq(cursorId, 0);
+const cursorId = findResult.cursor.id;
+assert.neq(cursorId, 0);
- // Test that the getMores on this tailable cursor are immune to interrupt.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}));
- assert.commandWorked(db.runCommand({getMore: cursorId, collection: "capped", maxTimeMS: 30}));
- assert.commandWorked(db.runCommand({getMore: cursorId, collection: "capped"}));
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}));
+// Test that the getMores on this tailable cursor are immune to interrupt.
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}));
+assert.commandWorked(db.runCommand({getMore: cursorId, collection: "capped", maxTimeMS: 30}));
+assert.commandWorked(db.runCommand({getMore: cursorId, collection: "capped"}));
+assert.commandWorked(db.adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}));
- st.stop();
+st.stop();
}());
diff --git a/jstests/noPassthrough/thread_args.js b/jstests/noPassthrough/thread_args.js
index b633acb1081..03273d76af3 100644
--- a/jstests/noPassthrough/thread_args.js
+++ b/jstests/noPassthrough/thread_args.js
@@ -2,45 +2,45 @@
* This test makes makes sure Thread and ScopedThread work with --enableJavaScriptProtection
*/
(function() {
- 'use strict';
- load('jstests/libs/parallelTester.js');
+'use strict';
+load('jstests/libs/parallelTester.js');
- function testThread(threadType) {
- function threadFn(args) {
- // Ensure objects are passed through properly
- assert(args instanceof Object);
- // Ensure functions inside objects are still functions
- assert(args.func1 instanceof Function);
- assert(args.func1());
- // Ensure Code objects are converted to functions
- assert(args.func2 instanceof Function);
- assert(args.func2());
- // Ensure arrays are passed through properly
- assert(args.funcArray instanceof Array);
- // Ensure functions inside arrays are still functions.
- assert(args.funcArray[0] instanceof Function);
- assert(args.funcArray[0]());
- return true;
- }
+function testThread(threadType) {
+ function threadFn(args) {
+ // Ensure objects are passed through properly
+ assert(args instanceof Object);
+ // Ensure functions inside objects are still functions
+ assert(args.func1 instanceof Function);
+ assert(args.func1());
+ // Ensure Code objects are converted to functions
+ assert(args.func2 instanceof Function);
+ assert(args.func2());
+ // Ensure arrays are passed through properly
+ assert(args.funcArray instanceof Array);
+ // Ensure functions inside arrays are still functions.
+ assert(args.funcArray[0] instanceof Function);
+ assert(args.funcArray[0]());
+ return true;
+ }
- function returnTrue() {
- return true;
- }
+ function returnTrue() {
+ return true;
+ }
- var args = {
- func1: returnTrue,
- // Pass some Code objects to simulate what happens with --enableJavaScriptProtection
- func2: new Code(returnTrue.toString()),
- funcArray: [new Code(returnTrue.toString())]
- };
+ var args = {
+ func1: returnTrue,
+ // Pass some Code objects to simulate what happens with --enableJavaScriptProtection
+ func2: new Code(returnTrue.toString()),
+ funcArray: [new Code(returnTrue.toString())]
+ };
- var thread = new threadType(threadFn, args);
- thread.start();
- thread.join();
- assert(thread.returnData());
- }
+ var thread = new threadType(threadFn, args);
+ thread.start();
+ thread.join();
+ assert(thread.returnData());
+}
- // Test both Thread and ScopedThread
- testThread(Thread);
- testThread(ScopedThread);
+// Test both Thread and ScopedThread
+testThread(Thread);
+testThread(ScopedThread);
}());
diff --git a/jstests/noPassthrough/timestamp_index_builds.js b/jstests/noPassthrough/timestamp_index_builds.js
index e5ffa405d45..41f5ecfb42c 100644
--- a/jstests/noPassthrough/timestamp_index_builds.js
+++ b/jstests/noPassthrough/timestamp_index_builds.js
@@ -18,84 +18,82 @@
* @tags: [requires_replication, requires_persistence, requires_majority_read_concern]
*/
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({
- name: "timestampingIndexBuilds",
- nodes: 2,
- nodeOptions:
- {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
- });
- const nodes = rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({
+ name: "timestampingIndexBuilds",
+ nodes: 2,
+ nodeOptions: {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
+});
+const nodes = rst.startSet();
+rst.initiate();
- if (!rst.getPrimary().adminCommand("serverStatus").storageEngine.supportsSnapshotReadConcern) {
- // Only snapshotting storage engines require correct timestamping of index builds.
- rst.stopSet();
- return;
- }
+if (!rst.getPrimary().adminCommand("serverStatus").storageEngine.supportsSnapshotReadConcern) {
+ // Only snapshotting storage engines require correct timestamping of index builds.
+ rst.stopSet();
+ return;
+}
- function getColl(conn) {
- return conn.getDB("timestampingIndexBuild")["coll"];
- }
+function getColl(conn) {
+ return conn.getDB("timestampingIndexBuild")["coll"];
+}
- let coll = getColl(rst.getPrimary());
+let coll = getColl(rst.getPrimary());
- // Create a collection and wait for the stable timestamp to exceed its creation on both nodes.
- assert.commandWorked(
- coll.insert({}, {writeConcern: {w: "majority", wtimeout: rst.kDefaultTimeoutMS}}));
+// Create a collection and wait for the stable timestamp to exceed its creation on both nodes.
+assert.commandWorked(
+ coll.insert({}, {writeConcern: {w: "majority", wtimeout: rst.kDefaultTimeoutMS}}));
- // Wait for the stable timestamp to match the latest oplog entry on both nodes.
- rst.awaitLastOpCommitted();
+// Wait for the stable timestamp to match the latest oplog entry on both nodes.
+rst.awaitLastOpCommitted();
- // Disable snapshotting on all members of the replica set so that further operations do not
- // enter the majority snapshot.
- nodes.forEach(node => assert.commandWorked(node.adminCommand(
- {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
+// Disable snapshotting on all members of the replica set so that further operations do not
+// enter the majority snapshot.
+nodes.forEach(node => assert.commandWorked(node.adminCommand(
+ {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
- assert.commandWorked(coll.createIndexes([{foo: 1}], {background: true}));
- rst.awaitReplication();
+assert.commandWorked(coll.createIndexes([{foo: 1}], {background: true}));
+rst.awaitReplication();
- rst.stopSet(undefined, true);
+rst.stopSet(undefined, true);
- // The `disableSnapshotting` failpoint is no longer in effect. Bring up and analyze each node
- // separately. The client does not need to perform any writes from here on out.
- for (let nodeIdx = 0; nodeIdx < 2; ++nodeIdx) {
- let node = nodes[nodeIdx];
- let nodeIdentity = tojsononeline({nodeIdx: nodeIdx, dbpath: node.dbpath, port: node.port});
+// The `disableSnapshotting` failpoint is no longer in effect. Bring up and analyze each node
+// separately. The client does not need to perform any writes from here on out.
+for (let nodeIdx = 0; nodeIdx < 2; ++nodeIdx) {
+ let node = nodes[nodeIdx];
+ let nodeIdentity = tojsononeline({nodeIdx: nodeIdx, dbpath: node.dbpath, port: node.port});
- // Bringing up the node as a standalone should only find the `_id` index.
- {
- jsTestLog("Starting as a standalone. Ensure only the `_id` index exists. Node: " +
- nodeIdentity);
- let conn = rst.start(nodeIdx, {noReplSet: true, noCleanData: true});
- assert.neq(null, conn, "failed to restart node");
- assert.eq(1, getColl(conn).getIndexes().length);
- rst.stop(nodeIdx);
- }
+ // Bringing up the node as a standalone should only find the `_id` index.
+ {
+ jsTestLog("Starting as a standalone. Ensure only the `_id` index exists. Node: " +
+ nodeIdentity);
+ let conn = rst.start(nodeIdx, {noReplSet: true, noCleanData: true});
+ assert.neq(null, conn, "failed to restart node");
+ assert.eq(1, getColl(conn).getIndexes().length);
+ rst.stop(nodeIdx);
+ }
- // Bringing up the node with `--replSet` will run oplog recovery. The `foo` index will be
- // rebuilt, but not become "stable".
- {
- jsTestLog("Starting as a replica set. Both indexes should exist. Node: " +
- nodeIdentity);
- let conn = rst.start(nodeIdx, {startClean: false}, true);
- conn.setSlaveOk();
- assert.eq(2, getColl(conn).getIndexes().length);
- rst.stop(nodeIdx);
- }
+ // Bringing up the node with `--replSet` will run oplog recovery. The `foo` index will be
+ // rebuilt, but not become "stable".
+ {
+ jsTestLog("Starting as a replica set. Both indexes should exist. Node: " + nodeIdentity);
+ let conn = rst.start(nodeIdx, {startClean: false}, true);
+ conn.setSlaveOk();
+ assert.eq(2, getColl(conn).getIndexes().length);
+ rst.stop(nodeIdx);
+ }
- // Restarting the node as a standalone once again only shows the `_id` index.
- {
- jsTestLog(
- "Starting as a standalone after replication startup recovery. Ensure only the `_id` index exists. Node: " +
- nodeIdentity);
- let conn = rst.start(nodeIdx, {noReplSet: true, noCleanData: true});
- assert.neq(null, conn, "failed to restart node");
- assert.eq(1, getColl(conn).getIndexes().length);
- rst.stop(nodeIdx);
- }
+ // Restarting the node as a standalone once again only shows the `_id` index.
+ {
+ jsTestLog(
+ "Starting as a standalone after replication startup recovery. Ensure only the `_id` index exists. Node: " +
+ nodeIdentity);
+ let conn = rst.start(nodeIdx, {noReplSet: true, noCleanData: true});
+ assert.neq(null, conn, "failed to restart node");
+ assert.eq(1, getColl(conn).getIndexes().length);
+ rst.stop(nodeIdx);
}
+}
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/traffic_reading.js b/jstests/noPassthrough/traffic_reading.js
index d9569868002..aa29d360387 100644
--- a/jstests/noPassthrough/traffic_reading.js
+++ b/jstests/noPassthrough/traffic_reading.js
@@ -1,85 +1,83 @@
// tests for the traffic_recording commands.
(function() {
- // Variables for this test
- const recordingDir = MongoRunner.toRealDir("$dataDir/traffic_recording/");
- const recordingFile = "recording.txt";
- const recordingFilePath = MongoRunner.toRealDir(recordingDir + "/" + recordingFile);
- const replayFilePath = MongoRunner.toRealDir(recordingDir + "/replay.txt");
+// Variables for this test
+const recordingDir = MongoRunner.toRealDir("$dataDir/traffic_recording/");
+const recordingFile = "recording.txt";
+const recordingFilePath = MongoRunner.toRealDir(recordingDir + "/" + recordingFile);
+const replayFilePath = MongoRunner.toRealDir(recordingDir + "/replay.txt");
- assert.throws(function() {
- convertTrafficRecordingToBSON("notarealfileatall");
- });
+assert.throws(function() {
+ convertTrafficRecordingToBSON("notarealfileatall");
+});
- // Create the recording directory if it does not already exist
- mkdir(recordingDir);
+// Create the recording directory if it does not already exist
+mkdir(recordingDir);
- // Create the options and run mongod
- var opts = {auth: "", setParameter: "trafficRecordingDirectory=" + recordingDir};
- m = MongoRunner.runMongod(opts);
+// Create the options and run mongod
+var opts = {auth: "", setParameter: "trafficRecordingDirectory=" + recordingDir};
+m = MongoRunner.runMongod(opts);
- // Get the port of the host
- var serverPort = m.port;
+// Get the port of the host
+var serverPort = m.port;
- // Create necessary users
- adminDB = m.getDB("admin");
- const testDB = m.getDB("test");
- const coll = testDB.getCollection("foo");
- adminDB.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
- adminDB.auth("admin", "pass");
+// Create necessary users
+adminDB = m.getDB("admin");
+const testDB = m.getDB("test");
+const coll = testDB.getCollection("foo");
+adminDB.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
+adminDB.auth("admin", "pass");
- // Start recording traffic
- assert.commandWorked(
- adminDB.runCommand({'startRecordingTraffic': 1, 'filename': 'recording.txt'}));
+// Start recording traffic
+assert.commandWorked(adminDB.runCommand({'startRecordingTraffic': 1, 'filename': 'recording.txt'}));
- // Run a few commands
- assert.commandWorked(testDB.runCommand({"serverStatus": 1}));
- assert.commandWorked(coll.insert({"name": "foo biz bar"}));
- assert.eq("foo biz bar", coll.findOne().name);
- assert.commandWorked(coll.insert({"name": "foo bar"}));
- assert.eq("foo bar", coll.findOne({"name": "foo bar"}).name);
- assert.commandWorked(coll.deleteOne({}));
- assert.eq(1, coll.aggregate().toArray().length);
- assert.commandWorked(coll.update({}, {}));
+// Run a few commands
+assert.commandWorked(testDB.runCommand({"serverStatus": 1}));
+assert.commandWorked(coll.insert({"name": "foo biz bar"}));
+assert.eq("foo biz bar", coll.findOne().name);
+assert.commandWorked(coll.insert({"name": "foo bar"}));
+assert.eq("foo bar", coll.findOne({"name": "foo bar"}).name);
+assert.commandWorked(coll.deleteOne({}));
+assert.eq(1, coll.aggregate().toArray().length);
+assert.commandWorked(coll.update({}, {}));
- // Stop recording traffic
- assert.commandWorked(testDB.runCommand({'stopRecordingTraffic': 1}));
+// Stop recording traffic
+assert.commandWorked(testDB.runCommand({'stopRecordingTraffic': 1}));
- // Shutdown Mongod
- MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'password'});
+// Shutdown Mongod
+MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'password'});
- // Counters
- var numRequest = 0;
- var numResponse = 0;
- var opTypes = {};
+// Counters
+var numRequest = 0;
+var numResponse = 0;
+var opTypes = {};
- // Pass filepath to traffic_reader helper method to get recorded info in BSON
- var res = convertTrafficRecordingToBSON(recordingFilePath);
+// Pass filepath to traffic_reader helper method to get recorded info in BSON
+var res = convertTrafficRecordingToBSON(recordingFilePath);
- // Iterate through the results and assert the above commands are properly recorded
- res.forEach((obj) => {
- assert.eq(obj["rawop"]["header"]["opcode"], 2013);
- assert.eq(obj["seenconnectionnum"], 1);
- var responseTo = obj["rawop"]["header"]["responseto"];
- if (responseTo == 0) {
- assert.eq(obj["destendpoint"], serverPort.toString());
- numRequest++;
- } else {
- assert.eq(obj["srcendpoint"], serverPort.toString());
- numResponse++;
- }
- opTypes[obj["opType"]] = (opTypes[obj["opType"]] || 0) + 1;
- });
+// Iterate through the results and assert the above commands are properly recorded
+res.forEach((obj) => {
+ assert.eq(obj["rawop"]["header"]["opcode"], 2013);
+ assert.eq(obj["seenconnectionnum"], 1);
+ var responseTo = obj["rawop"]["header"]["responseto"];
+ if (responseTo == 0) {
+ assert.eq(obj["destendpoint"], serverPort.toString());
+ numRequest++;
+ } else {
+ assert.eq(obj["srcendpoint"], serverPort.toString());
+ numResponse++;
+ }
+ opTypes[obj["opType"]] = (opTypes[obj["opType"]] || 0) + 1;
+});
- // Assert there is a response for every request
- assert.eq(numResponse, numRequest);
-
- // Assert the opTypes were correct
- assert.eq(opTypes['isMaster'], opTypes["ismaster"]);
- assert.eq(opTypes['find'], 2);
- assert.eq(opTypes['insert'], 2);
- assert.eq(opTypes['delete'], 1);
- assert.eq(opTypes['update'], 1);
- assert.eq(opTypes['aggregate'], 1);
- assert.eq(opTypes['stopRecordingTraffic'], 1);
+// Assert there is a response for every request
+assert.eq(numResponse, numRequest);
+// Assert the opTypes were correct
+assert.eq(opTypes['isMaster'], opTypes["ismaster"]);
+assert.eq(opTypes['find'], 2);
+assert.eq(opTypes['insert'], 2);
+assert.eq(opTypes['delete'], 1);
+assert.eq(opTypes['update'], 1);
+assert.eq(opTypes['aggregate'], 1);
+assert.eq(opTypes['stopRecordingTraffic'], 1);
})();
diff --git a/jstests/noPassthrough/traffic_reading_legacy.js b/jstests/noPassthrough/traffic_reading_legacy.js
index 9224edf926a..69cda3be58f 100644
--- a/jstests/noPassthrough/traffic_reading_legacy.js
+++ b/jstests/noPassthrough/traffic_reading_legacy.js
@@ -1,72 +1,70 @@
// tests for the traffic_recording commands.
(function() {
- var baseName = "jstests_traffic_recording";
+var baseName = "jstests_traffic_recording";
- // Variables for this test
- const recordingDir = MongoRunner.toRealDir("$dataDir/traffic_recording/");
- const recordingFile = "recording.txt";
- const recordingFilePath = MongoRunner.toRealDir(recordingDir + "/" + recordingFile);
+// Variables for this test
+const recordingDir = MongoRunner.toRealDir("$dataDir/traffic_recording/");
+const recordingFile = "recording.txt";
+const recordingFilePath = MongoRunner.toRealDir(recordingDir + "/" + recordingFile);
- // Create the recording directory if it does not already exist
- mkdir(recordingDir);
+// Create the recording directory if it does not already exist
+mkdir(recordingDir);
- // Create the options and run mongod
- var opts = {auth: "", setParameter: "trafficRecordingDirectory=" + recordingDir};
- m = MongoRunner.runMongod(opts);
+// Create the options and run mongod
+var opts = {auth: "", setParameter: "trafficRecordingDirectory=" + recordingDir};
+m = MongoRunner.runMongod(opts);
- // Get the port of the host
- var serverPort = m.port;
+// Get the port of the host
+var serverPort = m.port;
- // Set the readMode and writeMode to legacy
- m.forceReadMode("legacy");
- m.forceWriteMode("legacy");
+// Set the readMode and writeMode to legacy
+m.forceReadMode("legacy");
+m.forceWriteMode("legacy");
- // Create necessary users
- adminDB = m.getDB("admin");
- const testDB = m.getDB("test");
- const coll = testDB.getCollection("foo");
- adminDB.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
- adminDB.auth("admin", "pass");
+// Create necessary users
+adminDB = m.getDB("admin");
+const testDB = m.getDB("test");
+const coll = testDB.getCollection("foo");
+adminDB.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
+adminDB.auth("admin", "pass");
- // Start recording traffic
- assert.commandWorked(
- adminDB.runCommand({'startRecordingTraffic': 1, 'filename': 'recording.txt'}));
+// Start recording traffic
+assert.commandWorked(adminDB.runCommand({'startRecordingTraffic': 1, 'filename': 'recording.txt'}));
- // Run a few commands
- testDB.runCommand({"serverStatus": 1});
- coll.insert({"name": "foo biz bar"});
- coll.findOne();
- coll.insert({"name": "foo bar"});
- coll.findOne({"name": "foo bar"});
- coll.deleteOne({});
+// Run a few commands
+testDB.runCommand({"serverStatus": 1});
+coll.insert({"name": "foo biz bar"});
+coll.findOne();
+coll.insert({"name": "foo bar"});
+coll.findOne({"name": "foo bar"});
+coll.deleteOne({});
- // Stop recording traffic
- assert.commandWorked(testDB.runCommand({'stopRecordingTraffic': 1}));
+// Stop recording traffic
+assert.commandWorked(testDB.runCommand({'stopRecordingTraffic': 1}));
- // Shutdown Mongod
- MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'password'});
+// Shutdown Mongod
+MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'password'});
- // Counters
- var opCodes = {};
+// Counters
+var opCodes = {};
- // Pass filepath to traffic_reader helper method to get recorded info in BSON
- var res = convertTrafficRecordingToBSON(recordingFilePath);
+// Pass filepath to traffic_reader helper method to get recorded info in BSON
+var res = convertTrafficRecordingToBSON(recordingFilePath);
- // Iterate through the results and assert the above commands are properly recorded
- res.forEach((obj) => {
- opCodes[obj["rawop"]["header"]["opcode"]] =
- (opCodes[obj["rawop"]["header"]["opcode"]] || 0) + 1;
- assert.eq(obj["seenconnectionnum"], 1);
- var responseTo = obj["rawop"]["header"]["responseto"];
- if (responseTo == 0) {
- assert.eq(obj["destendpoint"], serverPort.toString());
- } else {
- assert.eq(obj["srcendpoint"], serverPort.toString());
- }
- });
-
- // ensure legacy operations worked properly
- assert.eq(opCodes[2002], 2);
- assert.eq(opCodes[2006], 1);
+// Iterate through the results and assert the above commands are properly recorded
+res.forEach((obj) => {
+ opCodes[obj["rawop"]["header"]["opcode"]] =
+ (opCodes[obj["rawop"]["header"]["opcode"]] || 0) + 1;
+ assert.eq(obj["seenconnectionnum"], 1);
+ var responseTo = obj["rawop"]["header"]["responseto"];
+ if (responseTo == 0) {
+ assert.eq(obj["destendpoint"], serverPort.toString());
+ } else {
+ assert.eq(obj["srcendpoint"], serverPort.toString());
+ }
+});
+// ensure legacy operations worked properly
+assert.eq(opCodes[2002], 2);
+assert.eq(opCodes[2006], 1);
})();
diff --git a/jstests/noPassthrough/traffic_recording.js b/jstests/noPassthrough/traffic_recording.js
index 03828809a81..e748deb7e7b 100644
--- a/jstests/noPassthrough/traffic_recording.js
+++ b/jstests/noPassthrough/traffic_recording.js
@@ -1,126 +1,125 @@
// tests for the traffic_recording commands.
(function() {
- function getDB(client) {
- let db = client.getDB("admin");
- db.auth("admin", "pass");
+function getDB(client) {
+ let db = client.getDB("admin");
+ db.auth("admin", "pass");
- return db;
- }
+ return db;
+}
- function runTest(client, restartCommand) {
- let db = getDB(client);
-
- let res = db.runCommand({'startRecordingTraffic': 1, 'filename': 'notARealPath'});
- assert.eq(res.ok, false);
- assert.eq(res["errmsg"], "Traffic recording directory not set");
-
- const path = MongoRunner.toRealDir("$dataDir/traffic_recording/");
- mkdir(path);
-
- if (!jsTest.isMongos(client)) {
- setJsTestOption("enableTestCommands", 0);
- client = restartCommand({
- trafficRecordingDirectory: path,
- AlwaysRecordTraffic: "notARealPath",
- enableTestCommands: 0,
- });
- setJsTestOption("enableTestCommands", 1);
- assert.eq(null, client, "AlwaysRecordTraffic and not enableTestCommands should fail");
- }
+function runTest(client, restartCommand) {
+ let db = getDB(client);
+
+ let res = db.runCommand({'startRecordingTraffic': 1, 'filename': 'notARealPath'});
+ assert.eq(res.ok, false);
+ assert.eq(res["errmsg"], "Traffic recording directory not set");
+
+ const path = MongoRunner.toRealDir("$dataDir/traffic_recording/");
+ mkdir(path);
+ if (!jsTest.isMongos(client)) {
+ setJsTestOption("enableTestCommands", 0);
client = restartCommand({
trafficRecordingDirectory: path,
AlwaysRecordTraffic: "notARealPath",
- enableTestCommands: 1
+ enableTestCommands: 0,
});
- assert.neq(null, client, "AlwaysRecordTraffic and with enableTestCommands should suceed");
- db = getDB(client);
-
- assert(db.runCommand({"serverStatus": 1}).trafficRecording.running);
-
- client = restartCommand({trafficRecordingDirectory: path});
- db = getDB(client);
-
- res = db.runCommand({'startRecordingTraffic': 1, 'filename': 'notARealPath'});
- assert.eq(res.ok, true);
-
- // Running the command again should fail
- res = db.runCommand({'startRecordingTraffic': 1, 'filename': 'notARealPath'});
- assert.eq(res.ok, false);
- assert.eq(res["errmsg"], "Traffic recording already active");
-
- // Running the serverStatus command should return the relevant information
- res = db.runCommand({"serverStatus": 1});
- assert("trafficRecording" in res);
- let trafficStats = res["trafficRecording"];
- assert.eq(trafficStats["running"], true);
-
- // Assert that the current file size is growing
- res = db.runCommand({"serverStatus": 1});
- assert("trafficRecording" in res);
- let trafficStats2 = res["trafficRecording"];
- assert.eq(trafficStats2["running"], true);
- assert(trafficStats2["currentFileSize"] >= trafficStats["currentFileSize"]);
-
- // Running the stopRecordingTraffic command should succeed
- res = db.runCommand({'stopRecordingTraffic': 1});
- assert.eq(res.ok, true);
-
- // Running the stopRecordingTraffic command again should fail
- res = db.runCommand({'stopRecordingTraffic': 1});
- assert.eq(res.ok, false);
- assert.eq(res["errmsg"], "Traffic recording not active");
-
- // Running the serverStatus command should return running is false
- res = db.runCommand({"serverStatus": 1});
- assert("trafficRecording" in res);
- trafficStats = res["trafficRecording"];
- assert.eq(trafficStats["running"], false);
-
- return client;
+ setJsTestOption("enableTestCommands", 1);
+ assert.eq(null, client, "AlwaysRecordTraffic and not enableTestCommands should fail");
}
- {
- let m = MongoRunner.runMongod({auth: ""});
-
- let db = m.getDB("admin");
-
- db.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
- db.auth("admin", "pass");
+ client = restartCommand({
+ trafficRecordingDirectory: path,
+ AlwaysRecordTraffic: "notARealPath",
+ enableTestCommands: 1
+ });
+ assert.neq(null, client, "AlwaysRecordTraffic and with enableTestCommands should suceed");
+ db = getDB(client);
+
+ assert(db.runCommand({"serverStatus": 1}).trafficRecording.running);
+
+ client = restartCommand({trafficRecordingDirectory: path});
+ db = getDB(client);
+
+ res = db.runCommand({'startRecordingTraffic': 1, 'filename': 'notARealPath'});
+ assert.eq(res.ok, true);
+
+ // Running the command again should fail
+ res = db.runCommand({'startRecordingTraffic': 1, 'filename': 'notARealPath'});
+ assert.eq(res.ok, false);
+ assert.eq(res["errmsg"], "Traffic recording already active");
+
+ // Running the serverStatus command should return the relevant information
+ res = db.runCommand({"serverStatus": 1});
+ assert("trafficRecording" in res);
+ let trafficStats = res["trafficRecording"];
+ assert.eq(trafficStats["running"], true);
+
+ // Assert that the current file size is growing
+ res = db.runCommand({"serverStatus": 1});
+ assert("trafficRecording" in res);
+ let trafficStats2 = res["trafficRecording"];
+ assert.eq(trafficStats2["running"], true);
+ assert(trafficStats2["currentFileSize"] >= trafficStats["currentFileSize"]);
+
+ // Running the stopRecordingTraffic command should succeed
+ res = db.runCommand({'stopRecordingTraffic': 1});
+ assert.eq(res.ok, true);
+
+ // Running the stopRecordingTraffic command again should fail
+ res = db.runCommand({'stopRecordingTraffic': 1});
+ assert.eq(res.ok, false);
+ assert.eq(res["errmsg"], "Traffic recording not active");
+
+ // Running the serverStatus command should return running is false
+ res = db.runCommand({"serverStatus": 1});
+ assert("trafficRecording" in res);
+ trafficStats = res["trafficRecording"];
+ assert.eq(trafficStats["running"], false);
+
+ return client;
+}
+
+{
+ let m = MongoRunner.runMongod({auth: ""});
+
+ let db = m.getDB("admin");
+
+ db.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
+ db.auth("admin", "pass");
+
+ m = runTest(m, function(setParams) {
+ if (m) {
+ MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'pass'});
+ }
+ m = MongoRunner.runMongod({auth: "", setParameter: setParams});
- m = runTest(m, function(setParams) {
- if (m) {
- MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'pass'});
- }
- m = MongoRunner.runMongod({auth: "", setParameter: setParams});
+ if (m) {
+ m.getDB("admin").createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
+ }
- if (m) {
- m.getDB("admin").createUser(
- {user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
- }
+ return m;
+ });
- return m;
- });
+ MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'pass'});
+}
- MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'pass'});
- }
+{
+ let shardTest = new ShardingTest({
+ config: 1,
+ mongos: 1,
+ shards: 0,
+ });
- {
- let shardTest = new ShardingTest({
- config: 1,
- mongos: 1,
- shards: 0,
+ runTest(shardTest.s, function(setParams) {
+ shardTest.restartMongos(0, {
+ restart: true,
+ setParameter: setParams,
});
- runTest(shardTest.s, function(setParams) {
- shardTest.restartMongos(0, {
- restart: true,
- setParameter: setParams,
- });
+ return shardTest.s;
+ });
- return shardTest.s;
- });
-
- shardTest.stop();
- }
+ shardTest.stop();
+}
})();
diff --git a/jstests/noPassthrough/transactionLifetimeLimitSeconds_serverParameter.js b/jstests/noPassthrough/transactionLifetimeLimitSeconds_serverParameter.js
index 6477fc8a4ca..82dba9f08be 100644
--- a/jstests/noPassthrough/transactionLifetimeLimitSeconds_serverParameter.js
+++ b/jstests/noPassthrough/transactionLifetimeLimitSeconds_serverParameter.js
@@ -2,20 +2,20 @@
// startup and via setParameter command. Valid parameter values are in the range [1, infinity).
(function() {
- 'use strict';
+'use strict';
- load("jstests/noPassthrough/libs/server_parameter_helpers.js");
+load("jstests/noPassthrough/libs/server_parameter_helpers.js");
- // transactionLifetimeLimitSeconds is set to be higher than its default value in test suites.
- delete TestData.transactionLifetimeLimitSeconds;
+// transactionLifetimeLimitSeconds is set to be higher than its default value in test suites.
+delete TestData.transactionLifetimeLimitSeconds;
- testNumericServerParameter("transactionLifetimeLimitSeconds",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 60 /*defaultValue*/,
- 30 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- 0 /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+testNumericServerParameter("transactionLifetimeLimitSeconds",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 60 /*defaultValue*/,
+ 30 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ 0 /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
})();
diff --git a/jstests/noPassthrough/transaction_reaper.js b/jstests/noPassthrough/transaction_reaper.js
index b0574c099c7..5f0536f0d7e 100644
--- a/jstests/noPassthrough/transaction_reaper.js
+++ b/jstests/noPassthrough/transaction_reaper.js
@@ -1,167 +1,166 @@
// @tags: [requires_replication, requires_sharding]
(function() {
- 'use strict';
+'use strict';
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- function Repl(lifetime) {
- this.rst = new ReplSetTest({
- nodes: 1,
- nodeOptions: {setParameter: {TransactionRecordMinimumLifetimeMinutes: lifetime}},
- });
- this.rst.startSet();
- this.rst.initiate();
- }
+function Repl(lifetime) {
+ this.rst = new ReplSetTest({
+ nodes: 1,
+ nodeOptions: {setParameter: {TransactionRecordMinimumLifetimeMinutes: lifetime}},
+ });
+ this.rst.startSet();
+ this.rst.initiate();
+}
+
+Repl.prototype.stop = function() {
+ this.rst.stopSet();
+};
+
+Repl.prototype.getConn = function() {
+ return this.rst.getPrimary();
+};
+
+Repl.prototype.getTransactionConn = function() {
+ return this.rst.getPrimary();
+};
+
+function Sharding(lifetime) {
+ this.st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ other: {
+ rs: true,
+ rsOptions: {setParameter: {TransactionRecordMinimumLifetimeMinutes: lifetime}},
+ rs0: {nodes: 1},
+ },
+ });
+
+ this.st.s0.getDB("admin").runCommand({enableSharding: "test"});
+ this.st.s0.getDB("admin").runCommand({shardCollection: "test.test", key: {_id: 1}});
+
+ // Ensure that the sessions collection exists.
+ assert.commandWorked(this.st.c0.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(
+ this.st.rs0.getPrimary().getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1}));
+}
+
+Sharding.prototype.stop = function() {
+ this.st.stop();
+};
+
+Sharding.prototype.getConn = function() {
+ return this.st.s0;
+};
+
+Sharding.prototype.getTransactionConn = function() {
+ return this.st.rs0.getPrimary();
+};
- Repl.prototype.stop = function() {
- this.rst.stopSet();
- };
-
- Repl.prototype.getConn = function() {
- return this.rst.getPrimary();
- };
-
- Repl.prototype.getTransactionConn = function() {
- return this.rst.getPrimary();
- };
-
- function Sharding(lifetime) {
- this.st = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- other: {
- rs: true,
- rsOptions: {setParameter: {TransactionRecordMinimumLifetimeMinutes: lifetime}},
- rs0: {nodes: 1},
- },
- });
-
- this.st.s0.getDB("admin").runCommand({enableSharding: "test"});
- this.st.s0.getDB("admin").runCommand({shardCollection: "test.test", key: {_id: 1}});
-
- // Ensure that the sessions collection exists.
- assert.commandWorked(
- this.st.c0.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1}));
- assert.commandWorked(
- this.st.rs0.getPrimary().getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1}));
+const nSessions = 1500;
+
+function Fixture(impl) {
+ this.impl = impl;
+ this.conn = impl.getConn();
+ this.transactionConn = impl.getTransactionConn();
+
+ this.sessions = [];
+
+ for (var i = 0; i < nSessions; i++) {
+ // make a session and get it to the collection
+ var session = this.conn.startSession({retryWrites: 1});
+ session.getDatabase("test").test.count({});
+ this.sessions.push(session);
}
- Sharding.prototype.stop = function() {
- this.st.stop();
- };
-
- Sharding.prototype.getConn = function() {
- return this.st.s0;
- };
-
- Sharding.prototype.getTransactionConn = function() {
- return this.st.rs0.getPrimary();
- };
-
- const nSessions = 1500;
-
- function Fixture(impl) {
- this.impl = impl;
- this.conn = impl.getConn();
- this.transactionConn = impl.getTransactionConn();
-
- this.sessions = [];
-
- for (var i = 0; i < nSessions; i++) {
- // make a session and get it to the collection
- var session = this.conn.startSession({retryWrites: 1});
- session.getDatabase("test").test.count({});
- this.sessions.push(session);
- }
-
- this.refresh();
- this.assertOutstandingTransactions(0);
- this.assertOutstandingSessions(nSessions);
-
- for (var i = 0; i < nSessions; i++) {
- // make a session and get it to the collection
- var session = this.sessions[i];
- assert.writeOK(session.getDatabase("test").test.save({a: 1}));
- }
-
- // Ensure a write flushes a transaction
- this.assertOutstandingTransactions(nSessions);
- this.assertOutstandingSessions(nSessions);
-
- // Ensure a refresh/reap doesn't remove the transaction
- this.refresh();
- this.reap();
- this.assertOutstandingTransactions(nSessions);
- this.assertOutstandingSessions(nSessions);
+ this.refresh();
+ this.assertOutstandingTransactions(0);
+ this.assertOutstandingSessions(nSessions);
+
+ for (var i = 0; i < nSessions; i++) {
+ // make a session and get it to the collection
+ var session = this.sessions[i];
+ assert.writeOK(session.getDatabase("test").test.save({a: 1}));
}
- Fixture.prototype.assertOutstandingTransactions = function(count) {
- assert.eq(count, this.transactionConn.getDB("config").transactions.count());
- };
-
- Fixture.prototype.assertOutstandingSessions = function(count) {
- assert.eq(count, this.getDB("config").system.sessions.count());
- };
-
- Fixture.prototype.refresh = function() {
- assert.commandWorked(this.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1}));
- };
-
- Fixture.prototype.reap = function() {
- assert.commandWorked(
- this.transactionConn.getDB("admin").runCommand({reapLogicalSessionCacheNow: 1}));
- };
-
- Fixture.prototype.getDB = function(db) {
- return this.conn.getDB(db);
- };
-
- Fixture.prototype.stop = function() {
- this.sessions.forEach(function(session) {
- session.endSession();
- });
- return this.impl.stop();
- };
-
- [Repl, Sharding].forEach(function(Impl) {
- {
- var fixture = new Fixture(new Impl(-1));
- // Remove a session
- fixture.getDB("config").system.sessions.remove({});
- fixture.assertOutstandingTransactions(nSessions);
- fixture.assertOutstandingSessions(0);
-
- // See the transaction get reaped as a result
- fixture.reap();
- fixture.assertOutstandingTransactions(0);
- fixture.assertOutstandingSessions(0);
-
- fixture.stop();
- }
-
- {
- var fixture = new Fixture(new Impl(30));
- // Remove a session
- fixture.getDB("config").system.sessions.remove({});
- fixture.assertOutstandingTransactions(nSessions);
- fixture.assertOutstandingSessions(0);
-
- // See the transaction was not reaped as a result
- fixture.reap();
- fixture.assertOutstandingTransactions(nSessions);
- fixture.assertOutstandingSessions(0);
-
- fixture.stop();
- }
+ // Ensure a write flushes a transaction
+ this.assertOutstandingTransactions(nSessions);
+ this.assertOutstandingSessions(nSessions);
+
+ // Ensure a refresh/reap doesn't remove the transaction
+ this.refresh();
+ this.reap();
+ this.assertOutstandingTransactions(nSessions);
+ this.assertOutstandingSessions(nSessions);
+}
+
+Fixture.prototype.assertOutstandingTransactions = function(count) {
+ assert.eq(count, this.transactionConn.getDB("config").transactions.count());
+};
+
+Fixture.prototype.assertOutstandingSessions = function(count) {
+ assert.eq(count, this.getDB("config").system.sessions.count());
+};
+
+Fixture.prototype.refresh = function() {
+ assert.commandWorked(this.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1}));
+};
+
+Fixture.prototype.reap = function() {
+ assert.commandWorked(
+ this.transactionConn.getDB("admin").runCommand({reapLogicalSessionCacheNow: 1}));
+};
+
+Fixture.prototype.getDB = function(db) {
+ return this.conn.getDB(db);
+};
+
+Fixture.prototype.stop = function() {
+ this.sessions.forEach(function(session) {
+ session.endSession();
});
+ return this.impl.stop();
+};
+
+[Repl, Sharding].forEach(function(Impl) {
+ {
+ var fixture = new Fixture(new Impl(-1));
+ // Remove a session
+ fixture.getDB("config").system.sessions.remove({});
+ fixture.assertOutstandingTransactions(nSessions);
+ fixture.assertOutstandingSessions(0);
+
+ // See the transaction get reaped as a result
+ fixture.reap();
+ fixture.assertOutstandingTransactions(0);
+ fixture.assertOutstandingSessions(0);
+
+ fixture.stop();
+ }
+
+ {
+ var fixture = new Fixture(new Impl(30));
+ // Remove a session
+ fixture.getDB("config").system.sessions.remove({});
+ fixture.assertOutstandingTransactions(nSessions);
+ fixture.assertOutstandingSessions(0);
+
+ // See the transaction was not reaped as a result
+ fixture.reap();
+ fixture.assertOutstandingTransactions(nSessions);
+ fixture.assertOutstandingSessions(0);
+
+ fixture.stop();
+ }
+});
})();
diff --git a/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js b/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js
index 9dee2103f94..48de0c880c8 100644
--- a/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js
+++ b/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js
@@ -9,59 +9,59 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/libs/check_log.js");
- const name = "transaction_write_with_snapshot_unavailable";
- const replTest = new ReplSetTest({name: name, nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const name = "transaction_write_with_snapshot_unavailable";
+const replTest = new ReplSetTest({name: name, nodes: 1});
+replTest.startSet();
+replTest.initiate();
- const dbName = name;
- const dbNameB = dbName + "B";
- const collName = "collection";
- const collNameB = collName + "B";
+const dbName = name;
+const dbNameB = dbName + "B";
+const collName = "collection";
+const collNameB = collName + "B";
- const primary = replTest.getPrimary();
- const primaryDB = primary.getDB(dbName);
+const primary = replTest.getPrimary();
+const primaryDB = primary.getDB(dbName);
- assert.commandWorked(primaryDB[collName].insertOne({}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(primaryDB[collName].insertOne({}, {writeConcern: {w: "majority"}}));
- function testOp(cmd) {
- let op = Object.getOwnPropertyNames(cmd)[0];
- let session = primary.startSession();
- let sessionDB = session.getDatabase(name);
+function testOp(cmd) {
+ let op = Object.getOwnPropertyNames(cmd)[0];
+ let session = primary.startSession();
+ let sessionDB = session.getDatabase(name);
- jsTestLog(
- `Testing that SnapshotUnavailable during ${op} is labelled TransientTransactionError`);
+ jsTestLog(
+ `Testing that SnapshotUnavailable during ${op} is labelled TransientTransactionError`);
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [{}]}));
- // Create collection outside transaction, cannot write to it in the transaction
- assert.commandWorked(primaryDB.getSiblingDB(dbNameB).runCommand({create: collNameB}));
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [{}]}));
+ // Create collection outside transaction, cannot write to it in the transaction
+ assert.commandWorked(primaryDB.getSiblingDB(dbNameB).runCommand({create: collNameB}));
- let res;
- try {
- res = sessionDB.getSiblingDB(dbNameB).runCommand(cmd);
- assert.commandFailedWithCode(res, ErrorCodes.SnapshotUnavailable);
- assert.eq(res.ok, 0);
- assert(!res.hasOwnProperty("writeErrors"));
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- } catch (ex) {
- printjson(cmd);
- printjson(res);
- throw ex;
- }
-
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.commandWorked(primaryDB.getSiblingDB(dbNameB).runCommand(
- {dropDatabase: 1, writeConcern: {w: "majority"}}));
+ let res;
+ try {
+ res = sessionDB.getSiblingDB(dbNameB).runCommand(cmd);
+ assert.commandFailedWithCode(res, ErrorCodes.SnapshotUnavailable);
+ assert.eq(res.ok, 0);
+ assert(!res.hasOwnProperty("writeErrors"));
+ assert.eq(res.errorLabels, ["TransientTransactionError"]);
+ } catch (ex) {
+ printjson(cmd);
+ printjson(res);
+ throw ex;
}
- testOp({insert: collNameB, documents: [{_id: 0}]});
- testOp({update: collNameB, updates: [{q: {}, u: {$set: {x: 1}}}]});
- testOp({delete: collNameB, deletes: [{q: {_id: 0}, limit: 1}]});
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ assert.commandWorked(primaryDB.getSiblingDB(dbNameB).runCommand(
+ {dropDatabase: 1, writeConcern: {w: "majority"}}));
+}
+
+testOp({insert: collNameB, documents: [{_id: 0}]});
+testOp({update: collNameB, updates: [{q: {}, u: {$set: {x: 1}}}]});
+testOp({delete: collNameB, deletes: [{q: {_id: 0}, limit: 1}]});
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/noPassthrough/transactions_work_with_in_memory_engine.js b/jstests/noPassthrough/transactions_work_with_in_memory_engine.js
index 7966656e390..fef2349265f 100644
--- a/jstests/noPassthrough/transactions_work_with_in_memory_engine.js
+++ b/jstests/noPassthrough/transactions_work_with_in_memory_engine.js
@@ -6,35 +6,37 @@
* engine (SERVER-36023).
*/
(function() {
- "use strict";
+"use strict";
- if (jsTest.options().storageEngine !== "inMemory") {
- jsTestLog("Skipping test because storageEngine is not inMemory");
- return;
- }
+if (jsTest.options().storageEngine !== "inMemory") {
+ jsTestLog("Skipping test because storageEngine is not inMemory");
+ return;
+}
- const dbName = "test";
- const collName = "transactions_work_with_in_memory_engine";
+const dbName = "test";
+const collName = "transactions_work_with_in_memory_engine";
- const replTest = new ReplSetTest({name: collName, nodes: 1});
- replTest.startSet({storageEngine: "inMemory"});
- replTest.initiate();
+const replTest = new ReplSetTest({name: collName, nodes: 1});
+replTest.startSet({storageEngine: "inMemory"});
+replTest.initiate();
- const primary = replTest.getPrimary();
+const primary = replTest.getPrimary();
- // Initiate a session.
- const sessionOptions = {causalConsistency: false};
- const session = primary.getDB(dbName).getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
+// Initiate a session.
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = primary.getDB(dbName).getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
- // Create collection.
- assert.commandWorked(sessionDb[collName].insert({x: 0}));
+// Create collection.
+assert.commandWorked(sessionDb[collName].insert({x: 0}));
- // Execute a transaction that should succeed.
- session.startTransaction();
- assert.commandWorked(sessionDb[collName].insert({x: 1}));
- assert.commandWorked(session.commitTransaction_forTesting());
+// Execute a transaction that should succeed.
+session.startTransaction();
+assert.commandWorked(sessionDb[collName].insert({x: 1}));
+assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
- replTest.stopSet();
+session.endSession();
+replTest.stopSet();
}());
diff --git a/jstests/noPassthrough/ttlMonitorSleepSecs_parameter.js b/jstests/noPassthrough/ttlMonitorSleepSecs_parameter.js
index 93eaa49500e..7d5c3ea00d2 100644
--- a/jstests/noPassthrough/ttlMonitorSleepSecs_parameter.js
+++ b/jstests/noPassthrough/ttlMonitorSleepSecs_parameter.js
@@ -1,18 +1,19 @@
// Tests the ttlMonitorSleepSecs parameter
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/server_parameter_helpers.js');
+load('jstests/noPassthrough/libs/server_parameter_helpers.js');
- testNumericServerParameter('ttlMonitorSleepSecs',
- true, // is Startup Param
- false, // is runtime param
- 60, // default value
- 30, // valid, non-default value
- true, // has lower bound
- 0, // out of bound value (below lower bound)
- false, // has upper bound
- 'unused' // out of bounds value (above upper bound)
- );
+testNumericServerParameter(
+ 'ttlMonitorSleepSecs',
+ true, // is Startup Param
+ false, // is runtime param
+ 60, // default value
+ 30, // valid, non-default value
+ true, // has lower bound
+ 0, // out of bound value (below lower bound)
+ false, // has upper bound
+ 'unused' // out of bounds value (above upper bound)
+);
})();
diff --git a/jstests/noPassthrough/ttl_capped.js b/jstests/noPassthrough/ttl_capped.js
index d3d383cc984..c9eabbc0df7 100644
--- a/jstests/noPassthrough/ttl_capped.js
+++ b/jstests/noPassthrough/ttl_capped.js
@@ -4,83 +4,83 @@
* @tags: [requires_capped]
*/
(function() {
- "use strict";
+"use strict";
- var dbpath = MongoRunner.dataPath + "ttl_capped";
- resetDbpath(dbpath);
+var dbpath = MongoRunner.dataPath + "ttl_capped";
+resetDbpath(dbpath);
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- setParameter: "ttlMonitorSleepSecs=1",
- });
- assert.neq(null, conn, "mongod was unable to start up");
+var conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ setParameter: "ttlMonitorSleepSecs=1",
+});
+assert.neq(null, conn, "mongod was unable to start up");
- var testDB = conn.getDB("test");
+var testDB = conn.getDB("test");
- assert.commandWorked(testDB.adminCommand({setParameter: 1, ttlMonitorEnabled: false}));
+assert.commandWorked(testDB.adminCommand({setParameter: 1, ttlMonitorEnabled: false}));
- var now = Date.now();
- var expireAfterSeconds = 10;
+var now = Date.now();
+var expireAfterSeconds = 10;
- var numCollectionsToCreate = 20;
- var width = numCollectionsToCreate.toString().length;
+var numCollectionsToCreate = 20;
+var width = numCollectionsToCreate.toString().length;
- // Create 'numCollectionsToCreate' collections with a TTL index, where every third collection is
- // capped. We create many collections with a TTL index to increase the odds that the TTL monitor
- // would process a non-capped collection after a capped collection. This allows us to verify
- // that the TTL monitor continues processing the remaining collections after encountering an
- // error processing a capped collection.
- for (var i = 0; i < numCollectionsToCreate; i++) {
- var collName = "ttl" + i.zeroPad(width);
- if (i % 3 === 1) {
- assert.commandWorked(testDB.createCollection(collName, {capped: true, size: 4096}));
- }
+// Create 'numCollectionsToCreate' collections with a TTL index, where every third collection is
+// capped. We create many collections with a TTL index to increase the odds that the TTL monitor
+// would process a non-capped collection after a capped collection. This allows us to verify
+// that the TTL monitor continues processing the remaining collections after encountering an
+// error processing a capped collection.
+for (var i = 0; i < numCollectionsToCreate; i++) {
+ var collName = "ttl" + i.zeroPad(width);
+ if (i % 3 === 1) {
+ assert.commandWorked(testDB.createCollection(collName, {capped: true, size: 4096}));
+ }
- // Create a TTL index on the 'date' field of the collection.
- var res = testDB[collName].ensureIndex({date: 1}, {expireAfterSeconds: expireAfterSeconds});
- assert.commandWorked(res);
+ // Create a TTL index on the 'date' field of the collection.
+ var res = testDB[collName].ensureIndex({date: 1}, {expireAfterSeconds: expireAfterSeconds});
+ assert.commandWorked(res);
- // Insert a single document with a 'date' field that is already expired according to the
- // index definition.
- assert.writeOK(testDB[collName].insert({date: new Date(now - expireAfterSeconds * 1000)}));
- }
+ // Insert a single document with a 'date' field that is already expired according to the
+ // index definition.
+ assert.writeOK(testDB[collName].insert({date: new Date(now - expireAfterSeconds * 1000)}));
+}
- // Increase the verbosity of the TTL monitor's output.
- assert.commandWorked(testDB.adminCommand({setParameter: 1, logComponentVerbosity: {index: 1}}));
+// Increase the verbosity of the TTL monitor's output.
+assert.commandWorked(testDB.adminCommand({setParameter: 1, logComponentVerbosity: {index: 1}}));
- // Enable the TTL monitor and wait for it to run.
- var ttlPasses = testDB.serverStatus().metrics.ttl.passes;
- assert.commandWorked(testDB.adminCommand({setParameter: 1, ttlMonitorEnabled: true}));
+// Enable the TTL monitor and wait for it to run.
+var ttlPasses = testDB.serverStatus().metrics.ttl.passes;
+assert.commandWorked(testDB.adminCommand({setParameter: 1, ttlMonitorEnabled: true}));
- var timeoutSeconds = 60;
- assert.soon(
- function checkIfTTLMonitorRan() {
- // The 'ttl.passes' metric is incremented when the TTL monitor starts processing the
- // indexes, so we wait for it to be incremented twice to know that the TTL monitor
- // finished processing the indexes at least once.
- return testDB.serverStatus().metrics.ttl.passes >= ttlPasses + 2;
- },
- function msg() {
- return "TTL monitor didn't run within " + timeoutSeconds + " seconds";
- },
- timeoutSeconds * 1000);
+var timeoutSeconds = 60;
+assert.soon(
+ function checkIfTTLMonitorRan() {
+ // The 'ttl.passes' metric is incremented when the TTL monitor starts processing the
+ // indexes, so we wait for it to be incremented twice to know that the TTL monitor
+ // finished processing the indexes at least once.
+ return testDB.serverStatus().metrics.ttl.passes >= ttlPasses + 2;
+ },
+ function msg() {
+ return "TTL monitor didn't run within " + timeoutSeconds + " seconds";
+ },
+ timeoutSeconds * 1000);
- for (var i = 0; i < numCollectionsToCreate; i++) {
- var coll = testDB["ttl" + i.zeroPad(width)];
- var count = coll.count();
- if (i % 3 === 1) {
- assert.eq(1,
- count,
- "the TTL monitor shouldn't have removed expired documents from" +
- " the capped collection '" + coll.getFullName() + "'");
- } else {
- assert.eq(0,
- count,
- "the TTL monitor didn't removed expired documents from the" +
- " collection '" + coll.getFullName() + "'");
- }
+for (var i = 0; i < numCollectionsToCreate; i++) {
+ var coll = testDB["ttl" + i.zeroPad(width)];
+ var count = coll.count();
+ if (i % 3 === 1) {
+ assert.eq(1,
+ count,
+ "the TTL monitor shouldn't have removed expired documents from" +
+ " the capped collection '" + coll.getFullName() + "'");
+ } else {
+ assert.eq(0,
+ count,
+ "the TTL monitor didn't removed expired documents from the" +
+ " collection '" + coll.getFullName() + "'");
}
+}
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/ttl_partial_index.js b/jstests/noPassthrough/ttl_partial_index.js
index d818f86d28d..af4c9c1a7fb 100644
--- a/jstests/noPassthrough/ttl_partial_index.js
+++ b/jstests/noPassthrough/ttl_partial_index.js
@@ -1,31 +1,31 @@
// Test that the TTL monitor will correctly use TTL indexes that are also partial indexes.
// SERVER-17984.
(function() {
- "use strict";
- // Launch mongod with shorter TTL monitor sleep interval.
- var runner = MongoRunner.runMongod({setParameter: "ttlMonitorSleepSecs=1"});
- var coll = runner.getDB("test").ttl_partial_index;
- coll.drop();
+"use strict";
+// Launch mongod with shorter TTL monitor sleep interval.
+var runner = MongoRunner.runMongod({setParameter: "ttlMonitorSleepSecs=1"});
+var coll = runner.getDB("test").ttl_partial_index;
+coll.drop();
- // Create TTL partial index.
- assert.commandWorked(coll.ensureIndex(
- {x: 1}, {expireAfterSeconds: 0, partialFilterExpression: {z: {$exists: true}}}));
+// Create TTL partial index.
+assert.commandWorked(coll.ensureIndex(
+ {x: 1}, {expireAfterSeconds: 0, partialFilterExpression: {z: {$exists: true}}}));
- var now = new Date();
- assert.writeOK(coll.insert({x: now, z: 2}));
- assert.writeOK(coll.insert({x: now}));
+var now = new Date();
+assert.writeOK(coll.insert({x: now, z: 2}));
+assert.writeOK(coll.insert({x: now}));
- // Wait for the TTL monitor to run at least twice (in case we weren't finished setting up our
- // collection when it ran the first time).
- var ttlPass = coll.getDB().serverStatus().metrics.ttl.passes;
- assert.soon(function() {
- return coll.getDB().serverStatus().metrics.ttl.passes >= ttlPass + 2;
- }, "TTL monitor didn't run before timing out.");
+// Wait for the TTL monitor to run at least twice (in case we weren't finished setting up our
+// collection when it ran the first time).
+var ttlPass = coll.getDB().serverStatus().metrics.ttl.passes;
+assert.soon(function() {
+ return coll.getDB().serverStatus().metrics.ttl.passes >= ttlPass + 2;
+}, "TTL monitor didn't run before timing out.");
- assert.eq(0,
- coll.find({z: {$exists: true}}).hint({x: 1}).itcount(),
- "Wrong number of documents in partial index, after TTL monitor run");
- assert.eq(
- 1, coll.find().itcount(), "Wrong number of documents in collection, after TTL monitor run");
- MongoRunner.stopMongod(runner);
+assert.eq(0,
+ coll.find({z: {$exists: true}}).hint({x: 1}).itcount(),
+ "Wrong number of documents in partial index, after TTL monitor run");
+assert.eq(
+ 1, coll.find().itcount(), "Wrong number of documents in collection, after TTL monitor run");
+MongoRunner.stopMongod(runner);
})();
diff --git a/jstests/noPassthrough/two_phase_index_build.js b/jstests/noPassthrough/two_phase_index_build.js
index d427571b2ec..084a360df11 100644
--- a/jstests/noPassthrough/two_phase_index_build.js
+++ b/jstests/noPassthrough/two_phase_index_build.js
@@ -6,71 +6,71 @@
(function() {
- // For 'assertIndexes'.
- load("jstests/noPassthrough/libs/index_build.js");
-
- const replSet = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+// For 'assertIndexes'.
+load("jstests/noPassthrough/libs/index_build.js");
+
+const replSet = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
+ },
+ ]
+});
- // Allow the createIndexes command to use the index builds coordinator in single-phase mode.
- replSet.startSet({setParameter: {enableIndexBuildsCoordinatorForCreateIndexesCommand: true}});
- replSet.initiate();
+// Allow the createIndexes command to use the index builds coordinator in single-phase mode.
+replSet.startSet({setParameter: {enableIndexBuildsCoordinatorForCreateIndexesCommand: true}});
+replSet.initiate();
- const testDB = replSet.getPrimary().getDB('test');
- const coll = testDB.twoPhaseIndexBuild;
- const collName = coll.getName();
- const secondaryColl = replSet.getSecondary().getDB('test')[collName];
+const testDB = replSet.getPrimary().getDB('test');
+const coll = testDB.twoPhaseIndexBuild;
+const collName = coll.getName();
+const secondaryColl = replSet.getSecondary().getDB('test')[collName];
- const bulk = coll.initializeUnorderedBulkOp();
- const numDocs = 1000;
- for (let i = 0; i < numDocs; i++) {
- bulk.insert({a: i, b: i});
- }
- assert.commandWorked(bulk.execute());
+const bulk = coll.initializeUnorderedBulkOp();
+const numDocs = 1000;
+for (let i = 0; i < numDocs; i++) {
+ bulk.insert({a: i, b: i});
+}
+assert.commandWorked(bulk.execute());
- // Use index builds coordinator for a two-phase build
- assert.commandWorked(testDB.runCommand(
- {twoPhaseCreateIndexes: coll.getName(), indexes: [{key: {a: 1}, name: 'a_1'}]}));
+// Use index builds coordinator for a two-phase build
+assert.commandWorked(testDB.runCommand(
+ {twoPhaseCreateIndexes: coll.getName(), indexes: [{key: {a: 1}, name: 'a_1'}]}));
- IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]);
- assert.eq(numDocs, coll.find({a: {$gte: 0}}).hint({a: 1}).itcount());
+IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]);
+assert.eq(numDocs, coll.find({a: {$gte: 0}}).hint({a: 1}).itcount());
- const cmdNs = testDB.getName() + ".$cmd";
- const localDB = testDB.getSiblingDB("local");
- const oplogColl = localDB.oplog.rs;
+const cmdNs = testDB.getName() + ".$cmd";
+const localDB = testDB.getSiblingDB("local");
+const oplogColl = localDB.oplog.rs;
- // Ensure both oplog entries were written to the oplog.
- assert.eq(1, oplogColl.find({op: "c", ns: cmdNs, "o.startIndexBuild": collName}).itcount());
- assert.eq(1, oplogColl.find({op: "c", ns: cmdNs, "o.commitIndexBuild": collName}).itcount());
+// Ensure both oplog entries were written to the oplog.
+assert.eq(1, oplogColl.find({op: "c", ns: cmdNs, "o.startIndexBuild": collName}).itcount());
+assert.eq(1, oplogColl.find({op: "c", ns: cmdNs, "o.commitIndexBuild": collName}).itcount());
- // Ensure the secondary builds the index.
- replSet.waitForAllIndexBuildsToFinish(testDB.getName(), collName);
- IndexBuildTest.assertIndexes(secondaryColl, 2, ["_id_", "a_1"]);
+// Ensure the secondary builds the index.
+replSet.waitForAllIndexBuildsToFinish(testDB.getName(), collName);
+IndexBuildTest.assertIndexes(secondaryColl, 2, ["_id_", "a_1"]);
- // Use index build coordinator for a single-phase index build through the createIndexes
- // command.
- assert.commandWorked(
- testDB.runCommand({createIndexes: coll.getName(), indexes: [{key: {b: 1}, name: 'b_1'}]}));
+// Use index build coordinator for a single-phase index build through the createIndexes
+// command.
+assert.commandWorked(
+ testDB.runCommand({createIndexes: coll.getName(), indexes: [{key: {b: 1}, name: 'b_1'}]}));
- IndexBuildTest.assertIndexes(coll, 3, ["_id_", "a_1", "b_1"]);
- assert.eq(numDocs, coll.find({a: {$gte: 0}}).hint({b: 1}).itcount());
+IndexBuildTest.assertIndexes(coll, 3, ["_id_", "a_1", "b_1"]);
+assert.eq(numDocs, coll.find({a: {$gte: 0}}).hint({b: 1}).itcount());
- // Ensure only one oplog entry was written to the oplog.
- assert.eq(1, oplogColl.find({op: "c", ns: cmdNs, "o.createIndexes": collName}).itcount());
+// Ensure only one oplog entry was written to the oplog.
+assert.eq(1, oplogColl.find({op: "c", ns: cmdNs, "o.createIndexes": collName}).itcount());
- // Ensure the secondary builds the index.
- replSet.waitForAllIndexBuildsToFinish(testDB.getName(), collName);
- IndexBuildTest.assertIndexes(secondaryColl, 3, ["_id_", "a_1", "b_1"]);
+// Ensure the secondary builds the index.
+replSet.waitForAllIndexBuildsToFinish(testDB.getName(), collName);
+IndexBuildTest.assertIndexes(secondaryColl, 3, ["_id_", "a_1", "b_1"]);
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/two_phase_index_build_ops_disabled_through_applyops.js b/jstests/noPassthrough/two_phase_index_build_ops_disabled_through_applyops.js
index 72ec4cdb7cb..5d19e39f40e 100644
--- a/jstests/noPassthrough/two_phase_index_build_ops_disabled_through_applyops.js
+++ b/jstests/noPassthrough/two_phase_index_build_ops_disabled_through_applyops.js
@@ -7,50 +7,43 @@
(function() {
- const replSet = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+const replSet = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
-
- replSet.startSet();
- replSet.initiate();
-
- const testDB = replSet.getPrimary().getDB('test');
- const coll = testDB.twoPhaseIndexBuild;
- const cmdNs = testDB.getName() + ".$cmd";
-
- coll.insert({a: 1});
-
- assert.commandFailedWithCode(testDB.adminCommand({
- applyOps: [
- {op: "c", ns: cmdNs, o: {startIndexBuild: coll.getName(), key: {a: 1}, name: 'a_1'}}
- ]
- }),
- [ErrorCodes.CommandNotSupported, ErrorCodes.FailedToParse]);
-
- assert.commandFailedWithCode(testDB.adminCommand({
- applyOps: [{
- op: "c",
- ns: cmdNs,
- o: {commitIndexBuild: coll.getName(), key: {a: 1}, name: 'a_1'}
- }]
- }),
- [ErrorCodes.CommandNotSupported, ErrorCodes.FailedToParse]);
-
- assert.commandFailedWithCode(testDB.adminCommand({
- applyOps: [
- {op: "c", ns: cmdNs, o: {abortIndexBuild: coll.getName(), key: {a: 1}, name: 'a_1'}}
- ]
- }),
- [ErrorCodes.CommandNotSupported, ErrorCodes.FailedToParse]);
-
- replSet.stopSet();
+ },
+ ]
+});
+
+replSet.startSet();
+replSet.initiate();
+
+const testDB = replSet.getPrimary().getDB('test');
+const coll = testDB.twoPhaseIndexBuild;
+const cmdNs = testDB.getName() + ".$cmd";
+
+coll.insert({a: 1});
+
+assert.commandFailedWithCode(testDB.adminCommand({
+ applyOps: [{op: "c", ns: cmdNs, o: {startIndexBuild: coll.getName(), key: {a: 1}, name: 'a_1'}}]
+}),
+ [ErrorCodes.CommandNotSupported, ErrorCodes.FailedToParse]);
+
+assert.commandFailedWithCode(testDB.adminCommand({
+ applyOps:
+ [{op: "c", ns: cmdNs, o: {commitIndexBuild: coll.getName(), key: {a: 1}, name: 'a_1'}}]
+}),
+ [ErrorCodes.CommandNotSupported, ErrorCodes.FailedToParse]);
+
+assert.commandFailedWithCode(testDB.adminCommand({
+ applyOps: [{op: "c", ns: cmdNs, o: {abortIndexBuild: coll.getName(), key: {a: 1}, name: 'a_1'}}]
+}),
+ [ErrorCodes.CommandNotSupported, ErrorCodes.FailedToParse]);
+
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/txn_override_causal_consistency.js b/jstests/noPassthrough/txn_override_causal_consistency.js
index 8ec6c0d276c..ac7c9758c96 100644
--- a/jstests/noPassthrough/txn_override_causal_consistency.js
+++ b/jstests/noPassthrough/txn_override_causal_consistency.js
@@ -5,206 +5,206 @@
* @tags: [requires_replication, uses_transactions]
*/
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "foo";
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const conn = new Mongo(rst.getPrimary().host);
-
- // Create the collection so the override doesn't try to when it is not expected.
- assert.commandWorked(conn.getDB(dbName).createCollection(collName));
-
- // Override runCommand to add each command it sees to a global array that can be inspected by
- // this test and to allow mocking certain responses.
- let cmdObjsSeen = [];
- let mockNetworkError, mockFirstResponse, mockFirstCommitResponse;
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- cmdObjsSeen.push(cmdObj);
-
- if (mockNetworkError) {
- mockNetworkError = undefined;
- throw new Error("network error");
- }
-
- if (mockFirstResponse) {
- const mockedRes = mockFirstResponse;
- mockFirstResponse = undefined;
- return mockedRes;
- }
-
- const cmdName = Object.keys(cmdObj)[0];
- if (cmdName === "commitTransaction" && mockFirstCommitResponse) {
- const mockedRes = mockFirstCommitResponse;
- mockFirstCommitResponse = undefined;
- return mockedRes;
- }
-
- return mongoRunCommandOriginal.apply(this, arguments);
- };
-
- // Runs the given function with a collection from a session made with the sessionOptions on
- // TestData and asserts the seen commands that would start a transaction have or do not have
- // afterClusterTime.
- function inspectFirstCommandForAfterClusterTime(conn, cmdName, isCausal, expectRetry, func) {
- const session = conn.startSession(TestData.sessionOptions);
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB[collName];
-
- cmdObjsSeen = [];
- func(sessionColl);
-
- // Find all requests sent with the expected command name, in case the scenario allows
- // retrying more than once or expects to end with a commit.
- let cmds = [];
- if (!expectRetry) {
- assert.eq(1, cmdObjsSeen.length);
- cmds.push(cmdObjsSeen[0]);
- } else {
- assert.lt(1, cmdObjsSeen.length);
- cmds = cmdObjsSeen.filter(obj => Object.keys(obj)[0] === cmdName);
- }
-
- for (let cmd of cmds) {
- if (isCausal) {
- assert(cmd.hasOwnProperty("$clusterTime"),
- "Expected " + tojson(cmd) + " to have a $clusterTime.");
- assert(cmd.hasOwnProperty("readConcern"),
- "Expected " + tojson(cmd) + " to have a read concern.");
- assert(cmd.readConcern.hasOwnProperty("afterClusterTime"),
- "Expected " + tojson(cmd) + " to have an afterClusterTime.");
- } else {
- if (TestData.hasOwnProperty("enableMajorityReadConcern") &&
- TestData.enableMajorityReadConcern === false) {
- // Commands not allowed in a transaction without causal consistency will not
- // have a read concern on variants that don't enable majority read concern.
- continue;
- }
-
- assert(cmd.hasOwnProperty("readConcern"),
- "Expected " + tojson(cmd) + " to have a read concern.");
- assert(!cmd.readConcern.hasOwnProperty("afterClusterTime"),
- "Expected " + tojson(cmd) + " to not have an afterClusterTime.");
- }
- }
-
- // Run a command not runnable in a transaction to reset the override's transaction state.
- assert.commandWorked(sessionDB.runCommand({ping: 1}));
-
- session.endSession();
+"use strict";
+
+const dbName = "test";
+const collName = "foo";
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const conn = new Mongo(rst.getPrimary().host);
+
+// Create the collection so the override doesn't try to when it is not expected.
+assert.commandWorked(conn.getDB(dbName).createCollection(collName));
+
+// Override runCommand to add each command it sees to a global array that can be inspected by
+// this test and to allow mocking certain responses.
+let cmdObjsSeen = [];
+let mockNetworkError, mockFirstResponse, mockFirstCommitResponse;
+const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ cmdObjsSeen.push(cmdObj);
+
+ if (mockNetworkError) {
+ mockNetworkError = undefined;
+ throw new Error("network error");
}
- // Helper methods for testing specific commands.
-
- function testInsert(conn, isCausal, expectRetry) {
- inspectFirstCommandForAfterClusterTime(conn, "insert", isCausal, expectRetry, (coll) => {
- assert.writeOK(coll.insert({x: 1}));
- });
+ if (mockFirstResponse) {
+ const mockedRes = mockFirstResponse;
+ mockFirstResponse = undefined;
+ return mockedRes;
}
- function testFind(conn, isCausal, expectRetry) {
- inspectFirstCommandForAfterClusterTime(conn, "find", isCausal, expectRetry, (coll) => {
- assert.eq(0, coll.find({y: 1}).itcount());
- });
+ const cmdName = Object.keys(cmdObj)[0];
+ if (cmdName === "commitTransaction" && mockFirstCommitResponse) {
+ const mockedRes = mockFirstCommitResponse;
+ mockFirstCommitResponse = undefined;
+ return mockedRes;
}
- function testCount(conn, isCausal, expectRetry) {
- inspectFirstCommandForAfterClusterTime(conn, "count", isCausal, expectRetry, (coll) => {
- assert.eq(0, coll.count({y: 1}));
- });
+ return mongoRunCommandOriginal.apply(this, arguments);
+};
+
+// Runs the given function with a collection from a session made with the sessionOptions on
+// TestData and asserts the seen commands that would start a transaction have or do not have
+// afterClusterTime.
+function inspectFirstCommandForAfterClusterTime(conn, cmdName, isCausal, expectRetry, func) {
+ const session = conn.startSession(TestData.sessionOptions);
+ const sessionDB = session.getDatabase(dbName);
+ const sessionColl = sessionDB[collName];
+
+ cmdObjsSeen = [];
+ func(sessionColl);
+
+ // Find all requests sent with the expected command name, in case the scenario allows
+ // retrying more than once or expects to end with a commit.
+ let cmds = [];
+ if (!expectRetry) {
+ assert.eq(1, cmdObjsSeen.length);
+ cmds.push(cmdObjsSeen[0]);
+ } else {
+ assert.lt(1, cmdObjsSeen.length);
+ cmds = cmdObjsSeen.filter(obj => Object.keys(obj)[0] === cmdName);
}
- function testCommit(conn, isCausal, expectRetry) {
- inspectFirstCommandForAfterClusterTime(conn, "find", isCausal, expectRetry, (coll) => {
- assert.eq(0, coll.find({y: 1}).itcount());
- assert.commandWorked(coll.getDB().runCommand({ping: 1})); // commits the transaction.
- });
- }
+ for (let cmd of cmds) {
+ if (isCausal) {
+ assert(cmd.hasOwnProperty("$clusterTime"),
+ "Expected " + tojson(cmd) + " to have a $clusterTime.");
+ assert(cmd.hasOwnProperty("readConcern"),
+ "Expected " + tojson(cmd) + " to have a read concern.");
+ assert(cmd.readConcern.hasOwnProperty("afterClusterTime"),
+ "Expected " + tojson(cmd) + " to have an afterClusterTime.");
+ } else {
+ if (TestData.hasOwnProperty("enableMajorityReadConcern") &&
+ TestData.enableMajorityReadConcern === false) {
+ // Commands not allowed in a transaction without causal consistency will not
+ // have a read concern on variants that don't enable majority read concern.
+ continue;
+ }
- // Load the txn_override after creating the spy, so the spy will see commands after being
- // transformed by the override. Also configure network error retries because several suites use
- // both.
- TestData.networkErrorAndTxnOverrideConfig = {
- wrapCRUDinTransactions: true,
- retryOnNetworkErrors: true
- };
- load("jstests/libs/override_methods/network_error_and_txn_override.js");
-
- TestData.logRetryAttempts = true;
-
- // Run a command to guarantee operation time is initialized on the database's session.
- assert.commandWorked(conn.getDB(dbName).runCommand({ping: 1}));
-
- function runTest() {
- for (let isCausal of[false, true]) {
- jsTestLog("Testing with isCausal = " + isCausal);
- TestData.sessionOptions = {causalConsistency: isCausal};
-
- // Commands that accept read and write concern allowed in a transaction.
- testInsert(conn, isCausal, false /*expectRetry*/);
- testFind(conn, isCausal, false /*expectRetry*/);
-
- // Command that can accept read concern not allowed in a transaction.
- testCount(conn, isCausal, false /*expectRetry*/);
-
- // Command that attempts to implicitly create a collection.
- conn.getDB(dbName)[collName].drop();
- testInsert(conn, isCausal, true /*expectRetry*/);
-
- // Command that can accept read concern with retryable error.
- mockFirstResponse = {ok: 0, code: ErrorCodes.CursorKilled};
- testFind(conn, isCausal, true /*expectRetry*/);
-
- // Commands that can accept read and write concern with network error.
- mockNetworkError = true;
- testInsert(conn, isCausal, true /*expectRetry*/);
-
- mockNetworkError = true;
- testFind(conn, isCausal, true /*expectRetry*/);
-
- // Command that can accept read concern not allowed in a transaction with network error.
- mockNetworkError = true;
- testCount(conn, isCausal, true /*expectRetry*/);
-
- // Commands that can accept read and write concern with transient transaction error.
- mockFirstResponse = {
- ok: 0,
- code: ErrorCodes.NoSuchTransaction,
- errorLabels: ["TransientTransactionError"]
- };
- testFind(conn, isCausal, true /*expectRetry*/);
-
- mockFirstResponse = {
- ok: 0,
- code: ErrorCodes.NoSuchTransaction,
- errorLabels: ["TransientTransactionError"]
- };
- testInsert(conn, isCausal, true /*expectRetry*/);
-
- // Transient transaction error on commit attempt.
- mockFirstCommitResponse = {
- ok: 0,
- code: ErrorCodes.NoSuchTransaction,
- errorLabels: ["TransientTransactionError"]
- };
- testCommit(conn, isCausal, true /*expectRetry*/);
-
- // Network error on commit attempt.
- mockFirstCommitResponse = {ok: 0, code: ErrorCodes.NotMaster};
- testCommit(conn, isCausal, true /*expectRetry*/);
+ assert(cmd.hasOwnProperty("readConcern"),
+ "Expected " + tojson(cmd) + " to have a read concern.");
+ assert(!cmd.readConcern.hasOwnProperty("afterClusterTime"),
+ "Expected " + tojson(cmd) + " to not have an afterClusterTime.");
}
}
- runTest();
+ // Run a command not runnable in a transaction to reset the override's transaction state.
+ assert.commandWorked(sessionDB.runCommand({ping: 1}));
+
+ session.endSession();
+}
+
+// Helper methods for testing specific commands.
+
+function testInsert(conn, isCausal, expectRetry) {
+ inspectFirstCommandForAfterClusterTime(conn, "insert", isCausal, expectRetry, (coll) => {
+ assert.writeOK(coll.insert({x: 1}));
+ });
+}
+
+function testFind(conn, isCausal, expectRetry) {
+ inspectFirstCommandForAfterClusterTime(conn, "find", isCausal, expectRetry, (coll) => {
+ assert.eq(0, coll.find({y: 1}).itcount());
+ });
+}
+
+function testCount(conn, isCausal, expectRetry) {
+ inspectFirstCommandForAfterClusterTime(conn, "count", isCausal, expectRetry, (coll) => {
+ assert.eq(0, coll.count({y: 1}));
+ });
+}
+
+function testCommit(conn, isCausal, expectRetry) {
+ inspectFirstCommandForAfterClusterTime(conn, "find", isCausal, expectRetry, (coll) => {
+ assert.eq(0, coll.find({y: 1}).itcount());
+ assert.commandWorked(coll.getDB().runCommand({ping: 1})); // commits the transaction.
+ });
+}
+
+// Load the txn_override after creating the spy, so the spy will see commands after being
+// transformed by the override. Also configure network error retries because several suites use
+// both.
+TestData.networkErrorAndTxnOverrideConfig = {
+ wrapCRUDinTransactions: true,
+ retryOnNetworkErrors: true
+};
+load("jstests/libs/override_methods/network_error_and_txn_override.js");
+
+TestData.logRetryAttempts = true;
+
+// Run a command to guarantee operation time is initialized on the database's session.
+assert.commandWorked(conn.getDB(dbName).runCommand({ping: 1}));
+
+function runTest() {
+ for (let isCausal of [false, true]) {
+ jsTestLog("Testing with isCausal = " + isCausal);
+ TestData.sessionOptions = {causalConsistency: isCausal};
+
+ // Commands that accept read and write concern allowed in a transaction.
+ testInsert(conn, isCausal, false /*expectRetry*/);
+ testFind(conn, isCausal, false /*expectRetry*/);
+
+ // Command that can accept read concern not allowed in a transaction.
+ testCount(conn, isCausal, false /*expectRetry*/);
+
+ // Command that attempts to implicitly create a collection.
+ conn.getDB(dbName)[collName].drop();
+ testInsert(conn, isCausal, true /*expectRetry*/);
+
+ // Command that can accept read concern with retryable error.
+ mockFirstResponse = {ok: 0, code: ErrorCodes.CursorKilled};
+ testFind(conn, isCausal, true /*expectRetry*/);
+
+ // Commands that can accept read and write concern with network error.
+ mockNetworkError = true;
+ testInsert(conn, isCausal, true /*expectRetry*/);
+
+ mockNetworkError = true;
+ testFind(conn, isCausal, true /*expectRetry*/);
+
+ // Command that can accept read concern not allowed in a transaction with network error.
+ mockNetworkError = true;
+ testCount(conn, isCausal, true /*expectRetry*/);
+
+ // Commands that can accept read and write concern with transient transaction error.
+ mockFirstResponse = {
+ ok: 0,
+ code: ErrorCodes.NoSuchTransaction,
+ errorLabels: ["TransientTransactionError"]
+ };
+ testFind(conn, isCausal, true /*expectRetry*/);
+
+ mockFirstResponse = {
+ ok: 0,
+ code: ErrorCodes.NoSuchTransaction,
+ errorLabels: ["TransientTransactionError"]
+ };
+ testInsert(conn, isCausal, true /*expectRetry*/);
+
+ // Transient transaction error on commit attempt.
+ mockFirstCommitResponse = {
+ ok: 0,
+ code: ErrorCodes.NoSuchTransaction,
+ errorLabels: ["TransientTransactionError"]
+ };
+ testCommit(conn, isCausal, true /*expectRetry*/);
+
+ // Network error on commit attempt.
+ mockFirstCommitResponse = {ok: 0, code: ErrorCodes.NotMaster};
+ testCommit(conn, isCausal, true /*expectRetry*/);
+ }
+}
+
+runTest();
- // With read concern majority disabled.
- TestData.enableMajorityReadConcern = false;
- runTest();
- delete TestData.enableMajorityReadConcern;
+// With read concern majority disabled.
+TestData.enableMajorityReadConcern = false;
+runTest();
+delete TestData.enableMajorityReadConcern;
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/umask.js b/jstests/noPassthrough/umask.js
index d8869f78f56..8d7234b15d3 100644
--- a/jstests/noPassthrough/umask.js
+++ b/jstests/noPassthrough/umask.js
@@ -5,74 +5,76 @@
* @tags: [ requires_wiredtiger ]
*/
(function() {
- 'use strict';
- // We only test this on POSIX since that's the only platform where umasks make sense
- if (_isWindows()) {
- return;
- }
+'use strict';
+// We only test this on POSIX since that's the only platform where umasks make sense
+if (_isWindows()) {
+ return;
+}
- const oldUmask = new Number(umask(0));
- jsTestLog("Setting umask to really permissive 000 mode, old mode was " + oldUmask.toString(8));
+const oldUmask = new Number(umask(0));
+jsTestLog("Setting umask to really permissive 000 mode, old mode was " + oldUmask.toString(8));
- const defaultUmask = Number.parseInt("600", 8);
- const permissiveUmask = Number.parseInt("666", 8);
+const defaultUmask = Number.parseInt("600", 8);
+const permissiveUmask = Number.parseInt("666", 8);
- // Any files that have some explicit permissions set on them should be added to this list
- const exceptions = [
- // The lock file gets created with explicit 644 permissions
- 'mongod.lock',
- // Mobile se files get created with 644 permissions when honoring the system umask
- 'mobile.sqlite',
- 'mobile.sqlite-shm',
- 'mobile.sqlite-wal',
- ];
+// Any files that have some explicit permissions set on them should be added to this list
+const exceptions = [
+ // The lock file gets created with explicit 644 permissions
+ 'mongod.lock',
+ // Mobile se files get created with 644 permissions when honoring the system umask
+ 'mobile.sqlite',
+ 'mobile.sqlite-shm',
+ 'mobile.sqlite-wal',
+];
- let mongodOptions = MongoRunner.mongodOptions({
- useLogFiles: true,
- cleanData: true,
- });
+let mongodOptions = MongoRunner.mongodOptions({
+ useLogFiles: true,
+ cleanData: true,
+});
- if (buildInfo()["modules"].some((mod) => {
- return mod == "enterprise";
- })) {
- mongodOptions.auditDestination = "file";
- mongodOptions.auditPath = mongodOptions.dbpath + "/audit.log";
- mongodOptions.auditFormat = "JSON";
- }
+if (buildInfo()["modules"].some((mod) => {
+ return mod == "enterprise";
+ })) {
+ mongodOptions.auditDestination = "file";
+ mongodOptions.auditPath = mongodOptions.dbpath + "/audit.log";
+ mongodOptions.auditFormat = "JSON";
+}
- const checkMask = (topDir, expected, honoringUmask) => {
- const maybeNot = honoringUmask ? "" : " not";
- const processDirectory = (dir) => {
- jsTestLog(`Checking ${dir}`);
- ls(dir).forEach((file) => {
- if (file.endsWith("/")) {
- return processDirectory(file);
- } else if (exceptions.some((exception) => {
- return file.endsWith(exception);
- })) {
- return;
- }
- const mode = new Number(getFileMode(file));
- const modeStr = mode.toString(8);
- const msg = `Mode for ${file} is ${modeStr} when${maybeNot} honoring system umask`;
- assert.eq(mode.valueOf(), expected, msg);
- });
- };
-
- processDirectory(topDir);
+const checkMask = (topDir, expected, honoringUmask) => {
+ const maybeNot = honoringUmask ? "" : " not";
+ const processDirectory = (dir) => {
+ jsTestLog(`Checking ${dir}`);
+ ls(dir).forEach((file) => {
+ if (file.endsWith("/")) {
+ return processDirectory(file);
+ } else if (exceptions.some((exception) => {
+ return file.endsWith(exception);
+ })) {
+ return;
+ }
+ const mode = new Number(getFileMode(file));
+ const modeStr = mode.toString(8);
+ const msg = `Mode for ${file} is ${modeStr} when${maybeNot} honoring system umask`;
+ assert.eq(mode.valueOf(), expected, msg);
+ });
};
- // First we start up the mongod normally, all the files except mongod.lock should have the mode
- // 0600
- let conn = MongoRunner.runMongod(mongodOptions);
- MongoRunner.stopMongod(conn);
- checkMask(conn.fullOptions.dbpath, defaultUmask, false);
+ processDirectory(topDir);
+};
+
+// First we start up the mongod normally, all the files except mongod.lock should have the mode
+// 0600
+let conn = MongoRunner.runMongod(mongodOptions);
+MongoRunner.stopMongod(conn);
+checkMask(conn.fullOptions.dbpath, defaultUmask, false);
- // Restart the mongod with honorSystemUmask, all files should have the mode 0666
- mongodOptions.setParameter = {honorSystemUmask: true};
- conn = MongoRunner.runMongod(mongodOptions);
- MongoRunner.stopMongod(conn);
- checkMask(conn.fullOptions.dbpath, permissiveUmask, false);
+// Restart the mongod with honorSystemUmask, all files should have the mode 0666
+mongodOptions.setParameter = {
+ honorSystemUmask: true
+};
+conn = MongoRunner.runMongod(mongodOptions);
+MongoRunner.stopMongod(conn);
+checkMask(conn.fullOptions.dbpath, permissiveUmask, false);
- umask(oldUmask.valueOf());
+umask(oldUmask.valueOf());
})();
diff --git a/jstests/noPassthrough/unix_socket.js b/jstests/noPassthrough/unix_socket.js
index ff1a18afadf..fc1ad2abf58 100644
--- a/jstests/noPassthrough/unix_socket.js
+++ b/jstests/noPassthrough/unix_socket.js
@@ -10,112 +10,112 @@
*/
//@tags: [requires_sharding]
(function() {
- 'use strict';
- // This test will only work on POSIX machines.
- if (_isWindows()) {
- return;
- }
-
- // Do not fail if this test leaves unterminated processes because testSockOptions
- // is expected to throw before it calls stopMongod.
- TestData.failIfUnterminatedProcesses = false;
-
- var doesLogMatchRegex = function(logArray, regex) {
- for (let i = (logArray.length - 1); i >= 0; i--) {
- var regexInLine = regex.exec(logArray[i]);
- if (regexInLine != null) {
- return true;
- }
- }
- return false;
- };
-
- var checkSocket = function(path) {
- assert.eq(fileExists(path), true);
- var conn = new Mongo(path);
- assert.commandWorked(conn.getDB("admin").runCommand("ping"),
- `Expected ping command to succeed for ${path}`);
- };
-
- var testSockOptions = function(bindPath, expectSockPath, optDict, bindSep = ',', optMongos) {
- var optDict = optDict || {};
- if (bindPath) {
- optDict["bind_ip"] = `${MongoRunner.dataDir}/${bindPath}${bindSep}127.0.0.1`;
+'use strict';
+// This test will only work on POSIX machines.
+if (_isWindows()) {
+ return;
+}
+
+// Do not fail if this test leaves unterminated processes because testSockOptions
+// is expected to throw before it calls stopMongod.
+TestData.failIfUnterminatedProcesses = false;
+
+var doesLogMatchRegex = function(logArray, regex) {
+ for (let i = (logArray.length - 1); i >= 0; i--) {
+ var regexInLine = regex.exec(logArray[i]);
+ if (regexInLine != null) {
+ return true;
}
+ }
+ return false;
+};
+
+var checkSocket = function(path) {
+ assert.eq(fileExists(path), true);
+ var conn = new Mongo(path);
+ assert.commandWorked(conn.getDB("admin").runCommand("ping"),
+ `Expected ping command to succeed for ${path}`);
+};
+
+var testSockOptions = function(bindPath, expectSockPath, optDict, bindSep = ',', optMongos) {
+ var optDict = optDict || {};
+ if (bindPath) {
+ optDict["bind_ip"] = `${MongoRunner.dataDir}/${bindPath}${bindSep}127.0.0.1`;
+ }
- var conn, shards;
- if (optMongos) {
- shards = new ShardingTest({shards: 1, mongos: 1, other: {mongosOptions: optDict}});
- assert.neq(shards, null, "Expected cluster to start okay");
- conn = shards.s0;
- } else {
- conn = MongoRunner.runMongod(optDict);
- }
+ var conn, shards;
+ if (optMongos) {
+ shards = new ShardingTest({shards: 1, mongos: 1, other: {mongosOptions: optDict}});
+ assert.neq(shards, null, "Expected cluster to start okay");
+ conn = shards.s0;
+ } else {
+ conn = MongoRunner.runMongod(optDict);
+ }
- assert.neq(conn, null, `Expected ${optMongos ? "mongos" : "mongod"} to start okay`);
+ assert.neq(conn, null, `Expected ${optMongos ? "mongos" : "mongod"} to start okay`);
- const defaultUNIXSocket = `/tmp/mongodb-${conn.port}.sock`;
- var checkPath = defaultUNIXSocket;
- if (expectSockPath) {
- checkPath = `${MongoRunner.dataDir}/${expectSockPath}`;
- }
+ const defaultUNIXSocket = `/tmp/mongodb-${conn.port}.sock`;
+ var checkPath = defaultUNIXSocket;
+ if (expectSockPath) {
+ checkPath = `${MongoRunner.dataDir}/${expectSockPath}`;
+ }
- checkSocket(checkPath);
+ checkSocket(checkPath);
- // Test the naming of the unix socket
- var log = conn.adminCommand({getLog: 'global'});
- assert.commandWorked(log, "Expected getting the log to work");
- var ll = log.log;
- var re = new RegExp("anonymous unix socket");
- assert(doesLogMatchRegex(ll, re), "Log message did not contain 'anonymous unix socket'");
+ // Test the naming of the unix socket
+ var log = conn.adminCommand({getLog: 'global'});
+ assert.commandWorked(log, "Expected getting the log to work");
+ var ll = log.log;
+ var re = new RegExp("anonymous unix socket");
+ assert(doesLogMatchRegex(ll, re), "Log message did not contain 'anonymous unix socket'");
- if (optMongos) {
- shards.stop();
- } else {
- MongoRunner.stopMongod(conn);
- }
+ if (optMongos) {
+ shards.stop();
+ } else {
+ MongoRunner.stopMongod(conn);
+ }
- assert.eq(fileExists(checkPath), false);
- };
-
- // Check that the default unix sockets work
- testSockOptions();
- testSockOptions(undefined, undefined, undefined, ',', true);
-
- // Check that a custom unix socket path works
- testSockOptions("testsock.socket", "testsock.socket");
- testSockOptions("testsock.socket", "testsock.socket", undefined, ',', true);
-
- // Check that a custom unix socket path works with spaces
- testSockOptions("test sock.socket", "test sock.socket");
- testSockOptions("test sock.socket", "test sock.socket", undefined, ',', true);
-
- // Check that a custom unix socket path works with spaces before the comma and after
- testSockOptions("testsock.socket ", "testsock.socket", undefined, ', ');
- testSockOptions("testsock.socket ", "testsock.socket", undefined, ', ', true);
-
- // Check that a bad UNIX path breaks
- assert.throws(function() {
- var badname = "a".repeat(200) + ".socket";
- testSockOptions(badname, badname);
- });
-
- // Check that if UNIX sockets are disabled that we aren't able to connect over UNIX sockets
- assert.throws(function() {
- testSockOptions(undefined, undefined, {nounixsocket: ""});
- });
-
- // Check the unixSocketPrefix option
- var socketPrefix = `${MongoRunner.dataDir}/socketdir`;
- mkdir(socketPrefix);
- var port = allocatePort();
- testSockOptions(
- undefined, `socketdir/mongodb-${port}.sock`, {unixSocketPrefix: socketPrefix, port: port});
-
- port = allocatePort();
- testSockOptions(undefined,
- `socketdir/mongodb-${port}.sock`,
- {unixSocketPrefix: socketPrefix, port: port},
- ',',
- true);
+ assert.eq(fileExists(checkPath), false);
+};
+
+// Check that the default unix sockets work
+testSockOptions();
+testSockOptions(undefined, undefined, undefined, ',', true);
+
+// Check that a custom unix socket path works
+testSockOptions("testsock.socket", "testsock.socket");
+testSockOptions("testsock.socket", "testsock.socket", undefined, ',', true);
+
+// Check that a custom unix socket path works with spaces
+testSockOptions("test sock.socket", "test sock.socket");
+testSockOptions("test sock.socket", "test sock.socket", undefined, ',', true);
+
+// Check that a custom unix socket path works with spaces before the comma and after
+testSockOptions("testsock.socket ", "testsock.socket", undefined, ', ');
+testSockOptions("testsock.socket ", "testsock.socket", undefined, ', ', true);
+
+// Check that a bad UNIX path breaks
+assert.throws(function() {
+ var badname = "a".repeat(200) + ".socket";
+ testSockOptions(badname, badname);
+});
+
+// Check that if UNIX sockets are disabled that we aren't able to connect over UNIX sockets
+assert.throws(function() {
+ testSockOptions(undefined, undefined, {nounixsocket: ""});
+});
+
+// Check the unixSocketPrefix option
+var socketPrefix = `${MongoRunner.dataDir}/socketdir`;
+mkdir(socketPrefix);
+var port = allocatePort();
+testSockOptions(
+ undefined, `socketdir/mongodb-${port}.sock`, {unixSocketPrefix: socketPrefix, port: port});
+
+port = allocatePort();
+testSockOptions(undefined,
+ `socketdir/mongodb-${port}.sock`,
+ {unixSocketPrefix: socketPrefix, port: port},
+ ',',
+ true);
})();
diff --git a/jstests/noPassthrough/unknown-set-parameter.js b/jstests/noPassthrough/unknown-set-parameter.js
index 1e72694e276..f5e6c2b10b8 100644
--- a/jstests/noPassthrough/unknown-set-parameter.js
+++ b/jstests/noPassthrough/unknown-set-parameter.js
@@ -1,36 +1,35 @@
// Verify error is produced when specifying an invalid set parameter.
(function() {
- 'use strict';
+'use strict';
- function tryRun(arg) {
- // runMongoProgram helpfully makes certain that we pass a port when invoking mongod.
- return runMongoProgram('./mongod', '--port', 0, '--setParameter', arg, '--outputConfig');
- }
+function tryRun(arg) {
+ // runMongoProgram helpfully makes certain that we pass a port when invoking mongod.
+ return runMongoProgram('./mongod', '--port', 0, '--setParameter', arg, '--outputConfig');
+}
- // Positive case, valid setparam.
- clearRawMongoProgramOutput();
- const valid = tryRun('enableTestCommands=1');
- assert.eq(valid, 0);
- const validOutput = rawMongoProgramOutput();
- assert.gte(validOutput.search(/enableTestCommands: 1/), 0, validOutput);
+// Positive case, valid setparam.
+clearRawMongoProgramOutput();
+const valid = tryRun('enableTestCommands=1');
+assert.eq(valid, 0);
+const validOutput = rawMongoProgramOutput();
+assert.gte(validOutput.search(/enableTestCommands: 1/), 0, validOutput);
- // Negative case, invalid setparam.
- clearRawMongoProgramOutput();
- const foo = tryRun('foo=bar');
- assert.neq(foo, 0);
- const fooOutput = rawMongoProgramOutput();
- assert.gte(fooOutput.search(/Unknown --setParameter 'foo'/), 0, fooOutput);
-
- // Negative case, valid but unavailable setparam.
- clearRawMongoProgramOutput();
- const graph = tryRun('roleGraphInvalidationIsFatal=true');
- assert.neq(graph, 0);
- const graphOutput = rawMongoProgramOutput();
- assert.gte(
- graphOutput.search(
- /--setParameter 'roleGraphInvalidationIsFatal' only available when used with 'enableTestCommands'/),
- 0,
- fooOutput);
+// Negative case, invalid setparam.
+clearRawMongoProgramOutput();
+const foo = tryRun('foo=bar');
+assert.neq(foo, 0);
+const fooOutput = rawMongoProgramOutput();
+assert.gte(fooOutput.search(/Unknown --setParameter 'foo'/), 0, fooOutput);
+// Negative case, valid but unavailable setparam.
+clearRawMongoProgramOutput();
+const graph = tryRun('roleGraphInvalidationIsFatal=true');
+assert.neq(graph, 0);
+const graphOutput = rawMongoProgramOutput();
+assert.gte(
+ graphOutput.search(
+ /--setParameter 'roleGraphInvalidationIsFatal' only available when used with 'enableTestCommands'/),
+ 0,
+ fooOutput);
}());
diff --git a/jstests/noPassthrough/unsupported_change_stream_deployments.js b/jstests/noPassthrough/unsupported_change_stream_deployments.js
index 3f24a8b0f2c..c342341da69 100644
--- a/jstests/noPassthrough/unsupported_change_stream_deployments.js
+++ b/jstests/noPassthrough/unsupported_change_stream_deployments.js
@@ -2,60 +2,59 @@
// @tags: [requires_sharding, uses_change_streams]
(function() {
- "use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- load("jstests/libs/feature_compatibility_version.js"); // For checkFCV.
+"use strict";
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+load("jstests/libs/feature_compatibility_version.js"); // For checkFCV.
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- function assertChangeStreamNotSupportedOnConnection(conn) {
- const notReplicaSetErrorCode = 40573;
- assertErrorCode(
- conn.getDB("test").non_existent, [{$changeStream: {}}], notReplicaSetErrorCode);
- assertErrorCode(conn.getDB("test").non_existent,
- [{$changeStream: {fullDocument: "updateLookup"}}],
- notReplicaSetErrorCode);
- }
+function assertChangeStreamNotSupportedOnConnection(conn) {
+ const notReplicaSetErrorCode = 40573;
+ assertErrorCode(conn.getDB("test").non_existent, [{$changeStream: {}}], notReplicaSetErrorCode);
+ assertErrorCode(conn.getDB("test").non_existent,
+ [{$changeStream: {fullDocument: "updateLookup"}}],
+ notReplicaSetErrorCode);
+}
- const conn = MongoRunner.runMongod({enableMajorityReadConcern: ""});
- assert.neq(null, conn, "mongod was unable to start up");
- // $changeStream cannot run on a non-existent database.
- assert.writeOK(conn.getDB("test").ensure_db_exists.insert({}));
- assertChangeStreamNotSupportedOnConnection(conn);
- assert.eq(0, MongoRunner.stopMongod(conn));
+const conn = MongoRunner.runMongod({enableMajorityReadConcern: ""});
+assert.neq(null, conn, "mongod was unable to start up");
+// $changeStream cannot run on a non-existent database.
+assert.writeOK(conn.getDB("test").ensure_db_exists.insert({}));
+assertChangeStreamNotSupportedOnConnection(conn);
+assert.eq(0, MongoRunner.stopMongod(conn));
- // Test a sharded cluster with standalone shards.
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const clusterWithStandalones = new ShardingTest({
- shards: 2,
- other: {shardOptions: {enableMajorityReadConcern: ""}},
- config: 1,
- shardAsReplicaSet: false
- });
- // Make sure the database exists before running any commands.
- const mongosDB = clusterWithStandalones.getDB("test");
- // enableSharding will create the db at the cluster level but not on the shards. $changeStream
- // through mongoS will be allowed to run on the shards despite the lack of a database.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: "test"}));
- assertChangeStreamNotSupportedOnConnection(clusterWithStandalones.s);
- // Shard the 'ensure_db_exists' collection on a hashed key before running $changeStream on the
- // shards directly. This will ensure that the database is created on both shards.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: "test.ensure_db_exists", key: {_id: "hashed"}}));
- assertChangeStreamNotSupportedOnConnection(clusterWithStandalones.shard0);
- assertChangeStreamNotSupportedOnConnection(clusterWithStandalones.shard1);
- clusterWithStandalones.stop();
+// Test a sharded cluster with standalone shards.
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+const clusterWithStandalones = new ShardingTest({
+ shards: 2,
+ other: {shardOptions: {enableMajorityReadConcern: ""}},
+ config: 1,
+ shardAsReplicaSet: false
+});
+// Make sure the database exists before running any commands.
+const mongosDB = clusterWithStandalones.getDB("test");
+// enableSharding will create the db at the cluster level but not on the shards. $changeStream
+// through mongoS will be allowed to run on the shards despite the lack of a database.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: "test"}));
+assertChangeStreamNotSupportedOnConnection(clusterWithStandalones.s);
+// Shard the 'ensure_db_exists' collection on a hashed key before running $changeStream on the
+// shards directly. This will ensure that the database is created on both shards.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: "test.ensure_db_exists", key: {_id: "hashed"}}));
+assertChangeStreamNotSupportedOnConnection(clusterWithStandalones.shard0);
+assertChangeStreamNotSupportedOnConnection(clusterWithStandalones.shard1);
+clusterWithStandalones.stop();
}());
diff --git a/jstests/noPassthrough/update_now_clustertime_replset.js b/jstests/noPassthrough/update_now_clustertime_replset.js
index 65503d7d5c3..fe3db8e4512 100644
--- a/jstests/noPassthrough/update_now_clustertime_replset.js
+++ b/jstests/noPassthrough/update_now_clustertime_replset.js
@@ -8,169 +8,126 @@
* @tags: [requires_find_command, requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({name: jsTestName(), nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({name: jsTestName(), nodes: 1});
+rst.startSet();
+rst.initiate();
- const db = rst.getPrimary().getDB(jsTestName());
- const otherColl = db.other;
- const coll = db.test;
- otherColl.drop();
- coll.drop();
+const db = rst.getPrimary().getDB(jsTestName());
+const otherColl = db.other;
+const coll = db.test;
+otherColl.drop();
+coll.drop();
- // Insert N docs, with the _id field set to the current Date. We sleep for a short period
- // between insertions, such that the Date value increases for each successive document.
- let bulk = coll.initializeUnorderedBulkOp();
- const _idStart = new Date();
- const numDocs = 10;
- for (let i = 0; i < numDocs; ++i) {
- bulk.insert({_id: new Date(), insertClusterTime: new Timestamp(0, 0)});
- if (i < numDocs - 1) {
- sleep(100);
- }
+// Insert N docs, with the _id field set to the current Date. We sleep for a short period
+// between insertions, such that the Date value increases for each successive document.
+let bulk = coll.initializeUnorderedBulkOp();
+const _idStart = new Date();
+const numDocs = 10;
+for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({_id: new Date(), insertClusterTime: new Timestamp(0, 0)});
+ if (i < numDocs - 1) {
+ sleep(100);
}
- const _idEnd = new Date();
-
- assert.commandWorked(bulk.execute());
-
- // Test that $$NOW and $$CLUSTER_TIME are available and remain constant across all updated
- // documents.
- let writeResult =
- assert.commandWorked(coll.update({$where: "sleep(10); return true"},
- [{$addFields: {now: "$$NOW", ctime: "$$CLUSTER_TIME"}}],
- {multi: true}));
+}
+const _idEnd = new Date();
- assert.eq(writeResult.nMatched, numDocs);
- assert.eq(writeResult.nModified, numDocs);
+assert.commandWorked(bulk.execute());
- let results = coll.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now instanceof Date);
- assert(results[0].ctime instanceof Timestamp);
- for (let result of results) {
- assert.eq(result.now, results[0].now);
- assert.eq(result.ctime, results[0].ctime);
- }
+// Test that $$NOW and $$CLUSTER_TIME are available and remain constant across all updated
+// documents.
+let writeResult =
+ assert.commandWorked(coll.update({$where: "sleep(10); return true"},
+ [{$addFields: {now: "$$NOW", ctime: "$$CLUSTER_TIME"}}],
+ {multi: true}));
- // Test that $$NOW and $$CLUSTER_TIME advance between updates but remain constant across all
- // updates in a given batch.
- writeResult = assert.commandWorked(db.runCommand({
- update: coll.getName(),
- updates: [
- {
- q: {$where: "sleep(10); return true"},
- u: [{$addFields: {now2: "$$NOW", ctime2: "$$CLUSTER_TIME"}}],
- multi: true
- },
- {
- q: {$where: "sleep(10); return true"},
- u: [{$addFields: {now3: "$$NOW", ctime3: "$$CLUSTER_TIME"}}],
- multi: true
- }
- ]
- }));
+assert.eq(writeResult.nMatched, numDocs);
+assert.eq(writeResult.nModified, numDocs);
- assert.eq(writeResult.n, numDocs * 2);
- assert.eq(writeResult.nModified, numDocs * 2);
+let results = coll.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now instanceof Date);
+assert(results[0].ctime instanceof Timestamp);
+for (let result of results) {
+ assert.eq(result.now, results[0].now);
+ assert.eq(result.ctime, results[0].ctime);
+}
- results = coll.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now2 instanceof Date);
- assert(results[0].ctime2 instanceof Timestamp);
- for (let result of results) {
- // The now2 and ctime2 fields are greater than the values from the previous update.
- assert.gt(result.now2, result.now);
- assert.gt(result.ctime2, result.ctime);
- // The now2 and ctime2 fields are the same across all documents.
- assert.eq(result.now2, results[0].now2);
- assert.eq(result.ctime2, results[0].ctime2);
- // The now2 and ctime2 fields are the same as now3 and ctime3 across all documents.
- assert.eq(result.now2, result.now3);
- assert.eq(result.ctime2, result.ctime3);
- }
-
- // Test that $$NOW and $$CLUSTER_TIME can be used in the query portion of an update.
- const _idMidpoint = new Date(_idStart.getTime() + (_idEnd.getTime() - _idStart.getTime()) / 2);
- writeResult =
- assert.commandWorked(coll.update({
- $expr: {
- $and: [
- {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
- {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
- ]
- }
+// Test that $$NOW and $$CLUSTER_TIME advance between updates but remain constant across all
+// updates in a given batch.
+writeResult = assert.commandWorked(db.runCommand({
+ update: coll.getName(),
+ updates: [
+ {
+ q: {$where: "sleep(10); return true"},
+ u: [{$addFields: {now2: "$$NOW", ctime2: "$$CLUSTER_TIME"}}],
+ multi: true
},
- [{$addFields: {now4: "$$NOW", ctime4: "$$CLUSTER_TIME"}}],
- {multi: true}));
-
- assert.lt(writeResult.nMatched, numDocs);
- assert.lt(writeResult.nModified, numDocs);
-
- results = coll.find().sort({_id: 1}).toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now4 instanceof Date);
- assert(results[0].ctime4 instanceof Timestamp);
- for (let result of results) {
- if (result._id.getTime() < _idMidpoint.getTime()) {
- assert.eq(result.now4, results[0].now4);
- assert.eq(result.ctime4, results[0].ctime4);
- assert.gt(result.now4, result.now3);
- assert.gt(result.ctime4, result.ctime3);
- } else {
- assert.eq(result.now4, undefined);
- assert.eq(result.ctime4, undefined);
+ {
+ q: {$where: "sleep(10); return true"},
+ u: [{$addFields: {now3: "$$NOW", ctime3: "$$CLUSTER_TIME"}}],
+ multi: true
}
- }
+ ]
+}));
- // Test that we can explain() an update command that uses $$NOW and $$CLUSTER_TIME.
- assert.commandWorked(
- coll.explain().update(
- {
- $expr: {
- $and: [
- {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
- {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
- ]
- }
- },
- [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
- {multi: true}));
+assert.eq(writeResult.n, numDocs * 2);
+assert.eq(writeResult.nModified, numDocs * 2);
- // Test that $$NOW and $$CLUSTER_TIME can be used when issuing updates via the Bulk API, and
- // remain constant across all updates within a single bulk operation.
- // TODO SERVER-41174: Note that if the bulk update operation exceeds the maximum BSON command
- // size, it may issue two or more separate update commands. $$NOW and $$CLUSTER_TIME will be
- // constant within each update command, but not across commands.
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({$where: "sleep(10); return true"}).update([
- {$addFields: {now5: "$$NOW", ctime5: "$$CLUSTER_TIME"}}
- ]);
- bulk.find({$where: "sleep(10); return true"}).update([
- {$addFields: {now6: "$$NOW", ctime6: "$$CLUSTER_TIME"}}
- ]);
- writeResult = assert.commandWorked(bulk.execute());
+results = coll.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now2 instanceof Date);
+assert(results[0].ctime2 instanceof Timestamp);
+for (let result of results) {
+ // The now2 and ctime2 fields are greater than the values from the previous update.
+ assert.gt(result.now2, result.now);
+ assert.gt(result.ctime2, result.ctime);
+ // The now2 and ctime2 fields are the same across all documents.
+ assert.eq(result.now2, results[0].now2);
+ assert.eq(result.ctime2, results[0].ctime2);
+ // The now2 and ctime2 fields are the same as now3 and ctime3 across all documents.
+ assert.eq(result.now2, result.now3);
+ assert.eq(result.ctime2, result.ctime3);
+}
- assert.eq(writeResult.nMatched, numDocs * 2);
- assert.eq(writeResult.nModified, numDocs * 2);
+// Test that $$NOW and $$CLUSTER_TIME can be used in the query portion of an update.
+const _idMidpoint = new Date(_idStart.getTime() + (_idEnd.getTime() - _idStart.getTime()) / 2);
+writeResult =
+ assert.commandWorked(coll.update({
+ $expr: {
+ $and: [
+ {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
+ {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
+ ]
+ }
+ },
+ [{$addFields: {now4: "$$NOW", ctime4: "$$CLUSTER_TIME"}}],
+ {multi: true}));
- results = coll.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now5 instanceof Date);
- assert(results[0].ctime5 instanceof Timestamp);
- for (let result of results) {
- // The now5 and ctime5 fields are the same across all documents.
- assert.eq(result.now5, results[0].now5);
- assert.eq(result.ctime5, results[0].ctime5);
- // The now5 and ctime5 fields are the same as now6 and ctime6 across all documents.
- assert.eq(result.now5, result.now6);
- assert.eq(result.ctime5, result.ctime6);
+assert.lt(writeResult.nMatched, numDocs);
+assert.lt(writeResult.nModified, numDocs);
+
+results = coll.find().sort({_id: 1}).toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now4 instanceof Date);
+assert(results[0].ctime4 instanceof Timestamp);
+for (let result of results) {
+ if (result._id.getTime() < _idMidpoint.getTime()) {
+ assert.eq(result.now4, results[0].now4);
+ assert.eq(result.ctime4, results[0].ctime4);
+ assert.gt(result.now4, result.now3);
+ assert.gt(result.ctime4, result.ctime3);
+ } else {
+ assert.eq(result.now4, undefined);
+ assert.eq(result.ctime4, undefined);
}
+}
- // Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify query and update.
- let returnedDoc = coll.findAndModify({
- query: {
+// Test that we can explain() an update command that uses $$NOW and $$CLUSTER_TIME.
+assert.commandWorked(
+ coll.explain().update(
+ {
$expr: {
$and: [
{$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
@@ -178,113 +135,152 @@
]
}
},
- update: [{$addFields: {nowFAM: "$$NOW", ctimeFAM: "$$CLUSTER_TIME"}}],
- sort: {_id: 1},
- new: true
- });
- assert(returnedDoc.nowFAM instanceof Date);
- assert(returnedDoc.ctimeFAM instanceof Timestamp);
- assert.gt(returnedDoc.nowFAM, returnedDoc.now4);
- assert.gt(returnedDoc.ctimeFAM, returnedDoc.ctime4);
+ [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
+ {multi: true}));
- results = coll.find({nowFAM: {$exists: true}, ctimeFAM: {$exists: true}}).toArray();
- assert.eq(results.length, 1);
- assert.docEq(results[0], returnedDoc);
+// Test that $$NOW and $$CLUSTER_TIME can be used when issuing updates via the Bulk API, and
+// remain constant across all updates within a single bulk operation.
+// TODO SERVER-41174: Note that if the bulk update operation exceeds the maximum BSON command
+// size, it may issue two or more separate update commands. $$NOW and $$CLUSTER_TIME will be
+// constant within each update command, but not across commands.
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({$where: "sleep(10); return true"}).update([
+ {$addFields: {now5: "$$NOW", ctime5: "$$CLUSTER_TIME"}}
+]);
+bulk.find({$where: "sleep(10); return true"}).update([
+ {$addFields: {now6: "$$NOW", ctime6: "$$CLUSTER_TIME"}}
+]);
+writeResult = assert.commandWorked(bulk.execute());
- // Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify upsert.
- returnedDoc = coll.findAndModify({
- query: {fieldDoesNotExist: {$exists: true}},
- update:
- [{$addFields: {_id: "$$NOW", nowFAMUpsert: "$$NOW", ctimeFAMUpsert: "$$CLUSTER_TIME"}}],
- sort: {_id: 1},
- upsert: true,
- new: true
- });
- assert(returnedDoc.nowFAMUpsert instanceof Date);
- assert(returnedDoc.ctimeFAMUpsert instanceof Timestamp);
+assert.eq(writeResult.nMatched, numDocs * 2);
+assert.eq(writeResult.nModified, numDocs * 2);
- assert.eq(coll.find().itcount(), numDocs + 1);
- results = coll.find({nowFAMUpsert: {$exists: true}, ctimeFAMUpsert: {$exists: true}}).toArray();
- assert.eq(results.length, 1);
- assert.docEq(results[0], returnedDoc);
+results = coll.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now5 instanceof Date);
+assert(results[0].ctime5 instanceof Timestamp);
+for (let result of results) {
+ // The now5 and ctime5 fields are the same across all documents.
+ assert.eq(result.now5, results[0].now5);
+ assert.eq(result.ctime5, results[0].ctime5);
+ // The now5 and ctime5 fields are the same as now6 and ctime6 across all documents.
+ assert.eq(result.now5, result.now6);
+ assert.eq(result.ctime5, result.ctime6);
+}
- // Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify delete.
- returnedDoc = coll.findAndModify({
- query: {
- nowFAMUpsert: {$exists: true},
- ctimeFAMUpsert: {$exists: true},
- $expr: {
- $and: [
- {$lt: ["$nowFAMUpsert", "$$NOW"]},
- {$gt: ["$$CLUSTER_TIME", "$ctimeFAMUpsert"]}
- ]
- }
- },
- sort: {_id: 1},
- remove: true
- });
- assert.eq(coll.find({nowFAMUpsert: {$exists: true}}).itcount(), 0);
- assert.eq(coll.find().itcount(), numDocs);
- assert.neq(returnedDoc, null);
+// Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify query and update.
+let returnedDoc = coll.findAndModify({
+ query: {
+ $expr: {
+ $and: [
+ {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
+ {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
+ ]
+ }
+ },
+ update: [{$addFields: {nowFAM: "$$NOW", ctimeFAM: "$$CLUSTER_TIME"}}],
+ sort: {_id: 1},
+ new: true
+});
+assert(returnedDoc.nowFAM instanceof Date);
+assert(returnedDoc.ctimeFAM instanceof Timestamp);
+assert.gt(returnedDoc.nowFAM, returnedDoc.now4);
+assert.gt(returnedDoc.ctimeFAM, returnedDoc.ctime4);
- // Test that we can explain() a findAndModify command that uses $$NOW and $$CLUSTER_TIME.
- assert.commandWorked(coll.explain().findAndModify({
- query: {
- $expr: {
- $and: [
- {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
- {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
- ]
- }
- },
- update:
- [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
- sort: {_id: 1},
- new: true
- }));
+results = coll.find({nowFAM: {$exists: true}, ctimeFAM: {$exists: true}}).toArray();
+assert.eq(results.length, 1);
+assert.docEq(results[0], returnedDoc);
- // Test that we can use $$NOW and $$CLUSTER_TIME in an update via a $merge aggregation. We first
- // use $merge to copy the current contents of 'coll' into 'otherColl'.
- assert.commandWorked(db.createCollection(otherColl.getName()));
- assert.doesNotThrow(() => coll.aggregate([
- {$merge: {into: otherColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}
- ]));
- // Run an aggregation which adds $$NOW and $$CLUSTER_TIME fields into the pipeline document,
- // then do the same to the documents in the output collection via a pipeline update.
- assert.doesNotThrow(() => coll.aggregate([
- {$addFields: {aggNow: "$$NOW", aggCT: "$$CLUSTER_TIME"}},
- {
- $merge: {
- into: otherColl.getName(),
- let : {aggNow: "$aggNow", aggCT: "$aggCT"},
- whenMatched: [{
- $addFields: {
- aggNow: "$$aggNow",
- aggCT: "$$aggCT",
- mergeNow: "$$NOW",
- mergeCT: "$$CLUSTER_TIME"
- }
- }],
- whenNotMatched: "fail"
- }
+// Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify upsert.
+returnedDoc = coll.findAndModify({
+ query: {fieldDoesNotExist: {$exists: true}},
+ update: [{$addFields: {_id: "$$NOW", nowFAMUpsert: "$$NOW", ctimeFAMUpsert: "$$CLUSTER_TIME"}}],
+ sort: {_id: 1},
+ upsert: true,
+ new: true
+});
+assert(returnedDoc.nowFAMUpsert instanceof Date);
+assert(returnedDoc.ctimeFAMUpsert instanceof Timestamp);
+
+assert.eq(coll.find().itcount(), numDocs + 1);
+results = coll.find({nowFAMUpsert: {$exists: true}, ctimeFAMUpsert: {$exists: true}}).toArray();
+assert.eq(results.length, 1);
+assert.docEq(results[0], returnedDoc);
+
+// Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify delete.
+returnedDoc = coll.findAndModify({
+ query: {
+ nowFAMUpsert: {$exists: true},
+ ctimeFAMUpsert: {$exists: true},
+ $expr: {
+ $and:
+ [{$lt: ["$nowFAMUpsert", "$$NOW"]}, {$gt: ["$$CLUSTER_TIME", "$ctimeFAMUpsert"]}]
+ }
+ },
+ sort: {_id: 1},
+ remove: true
+});
+assert.eq(coll.find({nowFAMUpsert: {$exists: true}}).itcount(), 0);
+assert.eq(coll.find().itcount(), numDocs);
+assert.neq(returnedDoc, null);
+
+// Test that we can explain() a findAndModify command that uses $$NOW and $$CLUSTER_TIME.
+assert.commandWorked(coll.explain().findAndModify({
+ query: {
+ $expr: {
+ $and: [
+ {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
+ {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
+ ]
+ }
+ },
+ update: [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
+ sort: {_id: 1},
+ new: true
+}));
+
+// Test that we can use $$NOW and $$CLUSTER_TIME in an update via a $merge aggregation. We first
+// use $merge to copy the current contents of 'coll' into 'otherColl'.
+assert.commandWorked(db.createCollection(otherColl.getName()));
+assert.doesNotThrow(
+ () => coll.aggregate(
+ [{$merge: {into: otherColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}]));
+// Run an aggregation which adds $$NOW and $$CLUSTER_TIME fields into the pipeline document,
+// then do the same to the documents in the output collection via a pipeline update.
+assert.doesNotThrow(() => coll.aggregate([
+ {$addFields: {aggNow: "$$NOW", aggCT: "$$CLUSTER_TIME"}},
+ {
+ $merge: {
+ into: otherColl.getName(),
+ let : {aggNow: "$aggNow", aggCT: "$aggCT"},
+ whenMatched: [{
+ $addFields: {
+ aggNow: "$$aggNow",
+ aggCT: "$$aggCT",
+ mergeNow: "$$NOW",
+ mergeCT: "$$CLUSTER_TIME"
+ }
+ }],
+ whenNotMatched: "fail"
}
- ]));
- // Verify that the agg pipeline's $$NOW and $$CLUSTER_TIME match the $merge update pipeline's.
- results = otherColl.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].mergeNow instanceof Date);
- assert(results[0].mergeCT instanceof Timestamp);
- for (let result of results) {
- // The mergeNow and mergeCT fields are greater than the values from the previous updates.
- assert.gt(result.mergeNow, result.now5);
- assert.gt(result.mergeCT, result.ctime5);
- // The mergeNow and mergeCT fields are the same across all documents.
- assert.eq(result.mergeNow, results[0].mergeNow);
- assert.eq(result.mergeCT, results[0].mergeCT);
- // The mergeNow and mergeCT fields are the same as aggNow and aggCT across all documents.
- assert.eq(result.mergeNow, result.aggNow);
- assert.eq(result.mergeCT, result.aggCT);
}
+]));
+// Verify that the agg pipeline's $$NOW and $$CLUSTER_TIME match the $merge update pipeline's.
+results = otherColl.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].mergeNow instanceof Date);
+assert(results[0].mergeCT instanceof Timestamp);
+for (let result of results) {
+ // The mergeNow and mergeCT fields are greater than the values from the previous updates.
+ assert.gt(result.mergeNow, result.now5);
+ assert.gt(result.mergeCT, result.ctime5);
+ // The mergeNow and mergeCT fields are the same across all documents.
+ assert.eq(result.mergeNow, results[0].mergeNow);
+ assert.eq(result.mergeCT, results[0].mergeCT);
+ // The mergeNow and mergeCT fields are the same as aggNow and aggCT across all documents.
+ assert.eq(result.mergeNow, result.aggNow);
+ assert.eq(result.mergeCT, result.aggCT);
+}
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/update_now_clustertime_sharding.js b/jstests/noPassthrough/update_now_clustertime_sharding.js
index 52bb168c168..4d35fff55dd 100644
--- a/jstests/noPassthrough/update_now_clustertime_sharding.js
+++ b/jstests/noPassthrough/update_now_clustertime_sharding.js
@@ -8,111 +8,144 @@
* @tags: [requires_find_command, requires_sharding]
*/
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({name: jsTestName(), mongos: 1, shards: 2, rs: {nodes: 1}});
+const st = new ShardingTest({name: jsTestName(), mongos: 1, shards: 2, rs: {nodes: 1}});
- const db = st.s.getDB(jsTestName());
- const otherColl = db.other;
- const coll = db.test;
- otherColl.drop();
- coll.drop();
+const db = st.s.getDB(jsTestName());
+const otherColl = db.other;
+const coll = db.test;
+otherColl.drop();
+coll.drop();
- // Enable sharding on the test DB and ensure its primary is shard0.
- assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
- st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
+// Enable sharding on the test DB and ensure its primary is shard0.
+assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
+st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
- // Create a sharded collection on {shard: 1}, split across the cluster at {shard: 1}. Do this
- // for both 'coll' and 'otherColl' so that the latter can be used for $merge tests later.
- for (let collToShard of[coll, otherColl]) {
- st.shardColl(collToShard, {shard: 1}, {shard: 1}, {shard: 1});
- }
+// Create a sharded collection on {shard: 1}, split across the cluster at {shard: 1}. Do this
+// for both 'coll' and 'otherColl' so that the latter can be used for $merge tests later.
+for (let collToShard of [coll, otherColl]) {
+ st.shardColl(collToShard, {shard: 1}, {shard: 1}, {shard: 1});
+}
- // Insert N docs, with the _id field set to the current Date. Sleep for a short period between
- // insertions, such that the Date value increases for each successive document. We additionally
- // ensure that the insertions alternate between the two shards by setting the shard key to
- // either 0 or 1.
- let bulk = coll.initializeUnorderedBulkOp();
- const _idStart = new Date();
- const numDocs = 10;
- for (let i = 0; i < numDocs; ++i) {
- bulk.insert({_id: new Date(), insertClusterTime: new Timestamp(0, 0), shard: (i % 2)});
- if (i < numDocs - 1) {
- sleep(100);
- }
+// Insert N docs, with the _id field set to the current Date. Sleep for a short period between
+// insertions, such that the Date value increases for each successive document. We additionally
+// ensure that the insertions alternate between the two shards by setting the shard key to
+// either 0 or 1.
+let bulk = coll.initializeUnorderedBulkOp();
+const _idStart = new Date();
+const numDocs = 10;
+for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({_id: new Date(), insertClusterTime: new Timestamp(0, 0), shard: (i % 2)});
+ if (i < numDocs - 1) {
+ sleep(100);
}
- const _idEnd = new Date();
+}
+const _idEnd = new Date();
- assert.commandWorked(bulk.execute());
+assert.commandWorked(bulk.execute());
- // Test that we cannot issue an update to mongoS with runtime constants already present.
- assert.commandFailedWithCode(db.runCommand({
- update: coll.getName(),
- updates: [{q: {}, u: {$set: {operationFailsBeforeApplyingUpdates: true}}}],
- runtimeConstants: {localNow: new Date(), clusterTime: new Timestamp(0, 0)}
- }),
- 51195);
+// Test that we cannot issue an update to mongoS with runtime constants already present.
+assert.commandFailedWithCode(db.runCommand({
+ update: coll.getName(),
+ updates: [{q: {}, u: {$set: {operationFailsBeforeApplyingUpdates: true}}}],
+ runtimeConstants: {localNow: new Date(), clusterTime: new Timestamp(0, 0)}
+}),
+ 51195);
- // Test that $$NOW and $$CLUSTER_TIME are available and remain constant across all updated
- // documents.
- let writeResult =
- assert.commandWorked(coll.update({$where: "sleep(10); return true"},
- [{$addFields: {now: "$$NOW", ctime: "$$CLUSTER_TIME"}}],
- {multi: true}));
+// Test that $$NOW and $$CLUSTER_TIME are available and remain constant across all updated
+// documents.
+let writeResult =
+ assert.commandWorked(coll.update({$where: "sleep(10); return true"},
+ [{$addFields: {now: "$$NOW", ctime: "$$CLUSTER_TIME"}}],
+ {multi: true}));
- assert.eq(writeResult.nMatched, numDocs);
- assert.eq(writeResult.nModified, numDocs);
+assert.eq(writeResult.nMatched, numDocs);
+assert.eq(writeResult.nModified, numDocs);
- let results = coll.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now instanceof Date);
- assert(results[0].ctime instanceof Timestamp);
- for (let result of results) {
- assert.eq(result.now, results[0].now);
- assert.eq(result.ctime, results[0].ctime);
- }
+let results = coll.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now instanceof Date);
+assert(results[0].ctime instanceof Timestamp);
+for (let result of results) {
+ assert.eq(result.now, results[0].now);
+ assert.eq(result.ctime, results[0].ctime);
+}
- // Test that $$NOW and $$CLUSTER_TIME advance between updates but remain constant across all
- // updates in a given batch.
- writeResult = assert.commandWorked(db.runCommand({
- update: coll.getName(),
- updates: [
- {
- q: {$where: "sleep(10); return true"},
- u: [{$addFields: {now2: "$$NOW", ctime2: "$$CLUSTER_TIME"}}],
- multi: true
- },
- {
- q: {$where: "sleep(10); return true"},
- u: [{$addFields: {now3: "$$NOW", ctime3: "$$CLUSTER_TIME"}}],
- multi: true
- }
- ]
- }));
+// Test that $$NOW and $$CLUSTER_TIME advance between updates but remain constant across all
+// updates in a given batch.
+writeResult = assert.commandWorked(db.runCommand({
+ update: coll.getName(),
+ updates: [
+ {
+ q: {$where: "sleep(10); return true"},
+ u: [{$addFields: {now2: "$$NOW", ctime2: "$$CLUSTER_TIME"}}],
+ multi: true
+ },
+ {
+ q: {$where: "sleep(10); return true"},
+ u: [{$addFields: {now3: "$$NOW", ctime3: "$$CLUSTER_TIME"}}],
+ multi: true
+ }
+ ]
+}));
+
+assert.eq(writeResult.n, numDocs * 2);
+assert.eq(writeResult.nModified, numDocs * 2);
- assert.eq(writeResult.n, numDocs * 2);
- assert.eq(writeResult.nModified, numDocs * 2);
+results = coll.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now2 instanceof Date);
+assert(results[0].ctime2 instanceof Timestamp);
+for (let result of results) {
+ // The now2 and ctime2 fields are greater than the values from the previous update.
+ assert.gt(result.now2, result.now);
+ assert.gt(result.ctime2, result.ctime);
+ // The now2 and ctime2 fields are the same across all documents.
+ assert.eq(result.now2, results[0].now2);
+ assert.eq(result.ctime2, results[0].ctime2);
+ // The now2 and ctime2 fields are the same as now3 and ctime3 across all documents.
+ assert.eq(result.now2, result.now3);
+ assert.eq(result.ctime2, result.ctime3);
+}
+
+// Test that $$NOW and $$CLUSTER_TIME can be used in the query portion of an update.
+const _idMidpoint = new Date(_idStart.getTime() + (_idEnd.getTime() - _idStart.getTime()) / 2);
+writeResult =
+ assert.commandWorked(coll.update({
+ $expr: {
+ $and: [
+ {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
+ {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
+ ]
+ }
+ },
+ [{$addFields: {now4: "$$NOW", ctime4: "$$CLUSTER_TIME"}}],
+ {multi: true}));
- results = coll.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now2 instanceof Date);
- assert(results[0].ctime2 instanceof Timestamp);
- for (let result of results) {
- // The now2 and ctime2 fields are greater than the values from the previous update.
- assert.gt(result.now2, result.now);
- assert.gt(result.ctime2, result.ctime);
- // The now2 and ctime2 fields are the same across all documents.
- assert.eq(result.now2, results[0].now2);
- assert.eq(result.ctime2, results[0].ctime2);
- // The now2 and ctime2 fields are the same as now3 and ctime3 across all documents.
- assert.eq(result.now2, result.now3);
- assert.eq(result.ctime2, result.ctime3);
+assert.lt(writeResult.nMatched, numDocs);
+assert.lt(writeResult.nModified, numDocs);
+
+results = coll.find().sort({_id: 1}).toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now4 instanceof Date);
+assert(results[0].ctime4 instanceof Timestamp);
+for (let result of results) {
+ if (result._id.getTime() < _idMidpoint.getTime()) {
+ assert.eq(result.now4, results[0].now4);
+ assert.eq(result.ctime4, results[0].ctime4);
+ assert.gt(result.now4, result.now3);
+ assert.gt(result.ctime4, result.ctime3);
+ } else {
+ assert.eq(result.now4, undefined);
+ assert.eq(result.ctime4, undefined);
}
+}
- // Test that $$NOW and $$CLUSTER_TIME can be used in the query portion of an update.
- const _idMidpoint = new Date(_idStart.getTime() + (_idEnd.getTime() - _idStart.getTime()) / 2);
- writeResult =
- assert.commandWorked(coll.update({
+// Test that we can explain() an update command that uses $$NOW and $$CLUSTER_TIME.
+assert.commandWorked(
+ coll.explain().update(
+ {
$expr: {
$and: [
{$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
@@ -120,200 +153,163 @@
]
}
},
- [{$addFields: {now4: "$$NOW", ctime4: "$$CLUSTER_TIME"}}],
- {multi: true}));
-
- assert.lt(writeResult.nMatched, numDocs);
- assert.lt(writeResult.nModified, numDocs);
-
- results = coll.find().sort({_id: 1}).toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now4 instanceof Date);
- assert(results[0].ctime4 instanceof Timestamp);
- for (let result of results) {
- if (result._id.getTime() < _idMidpoint.getTime()) {
- assert.eq(result.now4, results[0].now4);
- assert.eq(result.ctime4, results[0].ctime4);
- assert.gt(result.now4, result.now3);
- assert.gt(result.ctime4, result.ctime3);
- } else {
- assert.eq(result.now4, undefined);
- assert.eq(result.ctime4, undefined);
- }
- }
-
- // Test that we can explain() an update command that uses $$NOW and $$CLUSTER_TIME.
- assert.commandWorked(
- coll.explain().update(
- {
- $expr: {
- $and: [
- {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
- {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
- ]
- }
- },
- [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
- {multi: true}));
+ [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
+ {multi: true}));
- // Test that $$NOW and $$CLUSTER_TIME can be used when issuing updates via the Bulk API, and
- // remain constant across all updates within a single bulk operation.
- // TODO SERVER-41174: Note that if the bulk update operation exceeds the maximum BSON command
- // size, it may issue two or more separate update commands. $$NOW and $$CLUSTER_TIME will be
- // constant within each update command, but not across commands.
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({$where: "sleep(10); return true"}).update([
- {$addFields: {now5: "$$NOW", ctime5: "$$CLUSTER_TIME"}}
- ]);
- bulk.find({$where: "sleep(10); return true"}).update([
- {$addFields: {now6: "$$NOW", ctime6: "$$CLUSTER_TIME"}}
- ]);
- writeResult = assert.commandWorked(bulk.execute());
+// Test that $$NOW and $$CLUSTER_TIME can be used when issuing updates via the Bulk API, and
+// remain constant across all updates within a single bulk operation.
+// TODO SERVER-41174: Note that if the bulk update operation exceeds the maximum BSON command
+// size, it may issue two or more separate update commands. $$NOW and $$CLUSTER_TIME will be
+// constant within each update command, but not across commands.
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({$where: "sleep(10); return true"}).update([
+ {$addFields: {now5: "$$NOW", ctime5: "$$CLUSTER_TIME"}}
+]);
+bulk.find({$where: "sleep(10); return true"}).update([
+ {$addFields: {now6: "$$NOW", ctime6: "$$CLUSTER_TIME"}}
+]);
+writeResult = assert.commandWorked(bulk.execute());
- assert.eq(writeResult.nMatched, numDocs * 2);
- assert.eq(writeResult.nModified, numDocs * 2);
+assert.eq(writeResult.nMatched, numDocs * 2);
+assert.eq(writeResult.nModified, numDocs * 2);
- results = coll.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now5 instanceof Date);
- assert(results[0].ctime5 instanceof Timestamp);
- for (let result of results) {
- // The now5 and ctime5 fields are the same across all documents.
- assert.eq(result.now5, results[0].now5);
- assert.eq(result.ctime5, results[0].ctime5);
- // The now5 and ctime5 fields are the same as now6 and ctime6 across all documents.
- assert.eq(result.now5, result.now6);
- assert.eq(result.ctime5, result.ctime6);
- }
+results = coll.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now5 instanceof Date);
+assert(results[0].ctime5 instanceof Timestamp);
+for (let result of results) {
+ // The now5 and ctime5 fields are the same across all documents.
+ assert.eq(result.now5, results[0].now5);
+ assert.eq(result.ctime5, results[0].ctime5);
+ // The now5 and ctime5 fields are the same as now6 and ctime6 across all documents.
+ assert.eq(result.now5, result.now6);
+ assert.eq(result.ctime5, result.ctime6);
+}
- // Test that we cannot issue a findAndModify to mongoS with runtime constants already present.
- assert.commandFailedWithCode(db.runCommand({
- findAndModify: coll.getName(),
- query: {},
- update: {$set: {operationFailsBeforeApplyingUpdates: true}},
- runtimeConstants: {localNow: new Date(), clusterTime: new Timestamp(0, 0)}
- }),
- 51196);
+// Test that we cannot issue a findAndModify to mongoS with runtime constants already present.
+assert.commandFailedWithCode(db.runCommand({
+ findAndModify: coll.getName(),
+ query: {},
+ update: {$set: {operationFailsBeforeApplyingUpdates: true}},
+ runtimeConstants: {localNow: new Date(), clusterTime: new Timestamp(0, 0)}
+}),
+ 51196);
- // Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify query and update.
- let returnedDoc = coll.findAndModify({
- query: {
- shard: 0,
- $expr: {
- $and: [
- {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
- {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
- ]
- }
- },
- update: [{$addFields: {nowFAM: "$$NOW", ctimeFAM: "$$CLUSTER_TIME"}}],
- sort: {_id: 1},
- new: true
- });
- assert(returnedDoc.nowFAM instanceof Date);
- assert(returnedDoc.ctimeFAM instanceof Timestamp);
- assert.gt(returnedDoc.nowFAM, returnedDoc.now4);
- assert.gt(returnedDoc.ctimeFAM, returnedDoc.ctime4);
+// Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify query and update.
+let returnedDoc = coll.findAndModify({
+ query: {
+ shard: 0,
+ $expr: {
+ $and: [
+ {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
+ {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
+ ]
+ }
+ },
+ update: [{$addFields: {nowFAM: "$$NOW", ctimeFAM: "$$CLUSTER_TIME"}}],
+ sort: {_id: 1},
+ new: true
+});
+assert(returnedDoc.nowFAM instanceof Date);
+assert(returnedDoc.ctimeFAM instanceof Timestamp);
+assert.gt(returnedDoc.nowFAM, returnedDoc.now4);
+assert.gt(returnedDoc.ctimeFAM, returnedDoc.ctime4);
- results = coll.find({nowFAM: {$exists: true}, ctimeFAM: {$exists: true}}).toArray();
- assert.eq(results.length, 1);
- assert.docEq(results[0], returnedDoc);
+results = coll.find({nowFAM: {$exists: true}, ctimeFAM: {$exists: true}}).toArray();
+assert.eq(results.length, 1);
+assert.docEq(results[0], returnedDoc);
- // Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify upsert.
- returnedDoc = coll.findAndModify({
- query: {shard: 0, fieldDoesNotExist: {$exists: true}},
- update:
- [{$addFields: {_id: "$$NOW", nowFAMUpsert: "$$NOW", ctimeFAMUpsert: "$$CLUSTER_TIME"}}],
- sort: {_id: 1},
- upsert: true,
- new: true
- });
- assert(returnedDoc.nowFAMUpsert instanceof Date);
- assert(returnedDoc.ctimeFAMUpsert instanceof Timestamp);
+// Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify upsert.
+returnedDoc = coll.findAndModify({
+ query: {shard: 0, fieldDoesNotExist: {$exists: true}},
+ update: [{$addFields: {_id: "$$NOW", nowFAMUpsert: "$$NOW", ctimeFAMUpsert: "$$CLUSTER_TIME"}}],
+ sort: {_id: 1},
+ upsert: true,
+ new: true
+});
+assert(returnedDoc.nowFAMUpsert instanceof Date);
+assert(returnedDoc.ctimeFAMUpsert instanceof Timestamp);
- assert.eq(coll.find().itcount(), numDocs + 1);
- results = coll.find({nowFAMUpsert: {$exists: true}, ctimeFAMUpsert: {$exists: true}}).toArray();
- assert.eq(results.length, 1);
- assert.docEq(results[0], returnedDoc);
+assert.eq(coll.find().itcount(), numDocs + 1);
+results = coll.find({nowFAMUpsert: {$exists: true}, ctimeFAMUpsert: {$exists: true}}).toArray();
+assert.eq(results.length, 1);
+assert.docEq(results[0], returnedDoc);
- // Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify delete.
- returnedDoc = coll.findAndModify({
- query: {
- shard: 0,
- nowFAMUpsert: {$exists: true},
- ctimeFAMUpsert: {$exists: true},
- $expr: {
- $and: [
- {$lt: ["$nowFAMUpsert", "$$NOW"]},
- {$gt: ["$$CLUSTER_TIME", "$ctimeFAMUpsert"]}
- ]
- }
- },
- sort: {_id: 1},
- remove: true
- });
- assert.eq(coll.find({nowFAMUpsert: {$exists: true}}).itcount(), 0);
- assert.eq(coll.find().itcount(), numDocs);
- assert.neq(returnedDoc, null);
+// Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify delete.
+returnedDoc = coll.findAndModify({
+ query: {
+ shard: 0,
+ nowFAMUpsert: {$exists: true},
+ ctimeFAMUpsert: {$exists: true},
+ $expr: {
+ $and:
+ [{$lt: ["$nowFAMUpsert", "$$NOW"]}, {$gt: ["$$CLUSTER_TIME", "$ctimeFAMUpsert"]}]
+ }
+ },
+ sort: {_id: 1},
+ remove: true
+});
+assert.eq(coll.find({nowFAMUpsert: {$exists: true}}).itcount(), 0);
+assert.eq(coll.find().itcount(), numDocs);
+assert.neq(returnedDoc, null);
- // Test that we can explain() a findAndModify command that uses $$NOW and $$CLUSTER_TIME.
- assert.commandWorked(coll.explain().findAndModify({
- query: {
- shard: 0,
- $expr: {
- $and: [
- {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
- {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
- ]
- }
- },
- update:
- [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
- sort: {_id: 1},
- new: true
- }));
+// Test that we can explain() a findAndModify command that uses $$NOW and $$CLUSTER_TIME.
+assert.commandWorked(coll.explain().findAndModify({
+ query: {
+ shard: 0,
+ $expr: {
+ $and: [
+ {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
+ {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
+ ]
+ }
+ },
+ update: [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
+ sort: {_id: 1},
+ new: true
+}));
- // Test that we can use $$NOW and $$CLUSTER_TIME in an update via a $merge aggregation. We first
- // use $merge to copy the current contents of 'coll' into 'otherColl'.
- assert.doesNotThrow(() => coll.aggregate([
- {$merge: {into: otherColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}
- ]));
- // Run an aggregation which adds $$NOW and $$CLUSTER_TIME fields into the pipeline document,
- // then do the same to the documents in the output collection via a pipeline update.
- assert.doesNotThrow(() => coll.aggregate([
- {$addFields: {aggNow: "$$NOW", aggCT: "$$CLUSTER_TIME"}},
- {
- $merge: {
- into: otherColl.getName(),
- let : {aggNow: "$aggNow", aggCT: "$aggCT"},
- whenMatched: [{
- $addFields: {
- aggNow: "$$aggNow",
- aggCT: "$$aggCT",
- mergeNow: "$$NOW",
- mergeCT: "$$CLUSTER_TIME"
- }
- }],
- whenNotMatched: "fail"
- }
+// Test that we can use $$NOW and $$CLUSTER_TIME in an update via a $merge aggregation. We first
+// use $merge to copy the current contents of 'coll' into 'otherColl'.
+assert.doesNotThrow(
+ () => coll.aggregate(
+ [{$merge: {into: otherColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}]));
+// Run an aggregation which adds $$NOW and $$CLUSTER_TIME fields into the pipeline document,
+// then do the same to the documents in the output collection via a pipeline update.
+assert.doesNotThrow(() => coll.aggregate([
+ {$addFields: {aggNow: "$$NOW", aggCT: "$$CLUSTER_TIME"}},
+ {
+ $merge: {
+ into: otherColl.getName(),
+ let : {aggNow: "$aggNow", aggCT: "$aggCT"},
+ whenMatched: [{
+ $addFields: {
+ aggNow: "$$aggNow",
+ aggCT: "$$aggCT",
+ mergeNow: "$$NOW",
+ mergeCT: "$$CLUSTER_TIME"
+ }
+ }],
+ whenNotMatched: "fail"
}
- ]));
- // Verify that the agg pipeline's $$NOW and $$CLUSTER_TIME match the $merge update pipeline's.
- results = otherColl.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].mergeNow instanceof Date);
- assert(results[0].mergeCT instanceof Timestamp);
- for (let result of results) {
- // The mergeNow and mergeCT fields are greater than the values from the previous updates.
- assert.gt(result.mergeNow, result.now5);
- assert.gt(result.mergeCT, result.ctime5);
- // The mergeNow and mergeCT fields are the same across all documents.
- assert.eq(result.mergeNow, results[0].mergeNow);
- assert.eq(result.mergeCT, results[0].mergeCT);
- // The mergeNow and mergeCT fields are the same as aggNow and aggCT across all documents.
- assert.eq(result.mergeNow, result.aggNow);
- assert.eq(result.mergeCT, result.aggCT);
}
+]));
+// Verify that the agg pipeline's $$NOW and $$CLUSTER_TIME match the $merge update pipeline's.
+results = otherColl.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].mergeNow instanceof Date);
+assert(results[0].mergeCT instanceof Timestamp);
+for (let result of results) {
+ // The mergeNow and mergeCT fields are greater than the values from the previous updates.
+ assert.gt(result.mergeNow, result.now5);
+ assert.gt(result.mergeCT, result.ctime5);
+ // The mergeNow and mergeCT fields are the same across all documents.
+ assert.eq(result.mergeNow, results[0].mergeNow);
+ assert.eq(result.mergeCT, results[0].mergeCT);
+ // The mergeNow and mergeCT fields are the same as aggNow and aggCT across all documents.
+ assert.eq(result.mergeNow, result.aggNow);
+ assert.eq(result.mergeCT, result.aggCT);
+}
- st.stop();
+st.stop();
}());
diff --git a/jstests/noPassthrough/update_post_image_validation.js b/jstests/noPassthrough/update_post_image_validation.js
index 75d0c4ddfcf..ad78227a09b 100644
--- a/jstests/noPassthrough/update_post_image_validation.js
+++ b/jstests/noPassthrough/update_post_image_validation.js
@@ -1,28 +1,28 @@
// Verify that the update system correctly rejects invalid entries during post-image validation.
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("test");
+const testDB = conn.getDB("test");
- // Test validation of elements added to an array that is represented in a "deserialized" format
- // in mutablebson. The added element is invalid because it is a DBRef with a missing $id.
- assert.writeOK(testDB.coll.insert({_id: 0, a: []}));
- assert.writeErrorWithCode(
- testDB.coll.update({_id: 0}, {$set: {"a.1": 0, "a.0": {$ref: "coll", $db: "test"}}}),
- ErrorCodes.InvalidDBRef);
- assert.docEq(testDB.coll.findOne({_id: 0}), {_id: 0, a: []});
+// Test validation of elements added to an array that is represented in a "deserialized" format
+// in mutablebson. The added element is invalid because it is a DBRef with a missing $id.
+assert.writeOK(testDB.coll.insert({_id: 0, a: []}));
+assert.writeErrorWithCode(
+ testDB.coll.update({_id: 0}, {$set: {"a.1": 0, "a.0": {$ref: "coll", $db: "test"}}}),
+ ErrorCodes.InvalidDBRef);
+assert.docEq(testDB.coll.findOne({_id: 0}), {_id: 0, a: []});
- // Test validation of modified array elements that are accessed using a string that is
- // numerically equivalent to their fieldname. The modified element is invalid because it is a
- // DBRef with a missing $id.
- assert.writeOK(testDB.coll.insert({_id: 1, a: [0]}));
- assert.writeErrorWithCode(
- testDB.coll.update({_id: 1}, {$set: {"a.00": {$ref: "coll", $db: "test"}}}),
- ErrorCodes.InvalidDBRef);
- assert.docEq(testDB.coll.findOne({_id: 1}), {_id: 1, a: [0]});
+// Test validation of modified array elements that are accessed using a string that is
+// numerically equivalent to their fieldname. The modified element is invalid because it is a
+// DBRef with a missing $id.
+assert.writeOK(testDB.coll.insert({_id: 1, a: [0]}));
+assert.writeErrorWithCode(
+ testDB.coll.update({_id: 1}, {$set: {"a.00": {$ref: "coll", $db: "test"}}}),
+ ErrorCodes.InvalidDBRef);
+assert.docEq(testDB.coll.findOne({_id: 1}), {_id: 1, a: [0]});
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/update_server-5552.js b/jstests/noPassthrough/update_server-5552.js
index c0a8dc4fb2e..dd18a14d72c 100644
--- a/jstests/noPassthrough/update_server-5552.js
+++ b/jstests/noPassthrough/update_server-5552.js
@@ -1,38 +1,38 @@
var db;
(function() {
- "use strict";
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod failed to start.");
- db = conn.getDB("test");
+"use strict";
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod failed to start.");
+db = conn.getDB("test");
- const t = db.foo;
- t.drop();
+const t = db.foo;
+t.drop();
- const N = 10000;
+const N = 10000;
- var bulk = t.initializeUnorderedBulkOp();
- for (let i = 0; i < N; i++) {
- bulk.insert({_id: i, x: 1});
- }
- assert.writeOK(bulk.execute());
+var bulk = t.initializeUnorderedBulkOp();
+for (let i = 0; i < N; i++) {
+ bulk.insert({_id: i, x: 1});
+}
+assert.writeOK(bulk.execute());
- const join = startParallelShell(
- "while( db.foo.findOne( { _id : 0 } ).x == 1 ); db.foo.ensureIndex( { x : 1 } );");
+const join = startParallelShell(
+ "while( db.foo.findOne( { _id : 0 } ).x == 1 ); db.foo.ensureIndex( { x : 1 } );");
- t.update({
- $where: function() {
- sleep(1);
- return true;
- }
- },
- {$set: {x: 5}},
- false,
- true);
- db.getLastError();
+t.update({
+ $where: function() {
+ sleep(1);
+ return true;
+ }
+},
+ {$set: {x: 5}},
+ false,
+ true);
+db.getLastError();
- join();
+join();
- assert.eq(N, t.find({x: 5}).count());
+assert.eq(N, t.find({x: 5}).count());
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/upsert_duplicate_key_retry.js b/jstests/noPassthrough/upsert_duplicate_key_retry.js
index 5841f5a7eb0..c2015642b0e 100644
--- a/jstests/noPassthrough/upsert_duplicate_key_retry.js
+++ b/jstests/noPassthrough/upsert_duplicate_key_retry.js
@@ -10,81 +10,79 @@
*/
(function() {
- "use strict";
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const testDB = rst.getPrimary().getDB("test");
- const adminDB = testDB.getSiblingDB("admin");
- const collName = "upsert_duplicate_key_retry";
- const testColl = testDB.getCollection(collName);
-
- testDB.runCommand({drop: collName});
-
- // Queries current operations until 'count' matching operations are found.
- function awaitMatchingCurrentOpCount(message, count) {
- assert.soon(() => {
- const currentOp =
- adminDB.aggregate([{$currentOp: {}}, {$match: {msg: message}}]).toArray();
- return (currentOp.length === count);
- });
- }
-
- function performUpsert() {
- // This function is called from startParallelShell(), so closed-over variables will not be
- // available. We must re-obtain the value of 'testColl' in the function body.
- const testColl = db.getMongo().getDB("test").getCollection("upsert_duplicate_key_retry");
- assert.commandWorked(testColl.update({x: 3}, {$inc: {y: 1}}, {upsert: true}));
- }
-
- assert.commandWorked(testColl.createIndex({x: 1}, {unique: true}));
-
- // Will hang upsert operations just prior to performing an insert.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangBeforeUpsertPerformsInsert", mode: "alwaysOn"}));
-
- const awaitUpdate1 = startParallelShell(performUpsert, rst.ports[0]);
- const awaitUpdate2 = startParallelShell(performUpsert, rst.ports[0]);
-
- awaitMatchingCurrentOpCount("hangBeforeUpsertPerformsInsert", 2);
-
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "hangBeforeUpsertPerformsInsert", mode: "off"}));
-
- awaitUpdate1();
- awaitUpdate2();
-
- const cursor = testColl.find({}, {_id: 0});
- assert.eq(cursor.next(), {x: 3, y: 2});
- assert(!cursor.hasNext(), cursor.toArray());
-
- // Confirm that oplog entries exist for both insert and update operation.
- const oplogColl = testDB.getSiblingDB("local").getCollection("oplog.rs");
- assert.eq(1, oplogColl.find({"op": "i", "ns": "test.upsert_duplicate_key_retry"}).itcount());
- assert.eq(1, oplogColl.find({"op": "u", "ns": "test.upsert_duplicate_key_retry"}).itcount());
-
- //
- // Confirm DuplicateKey error for cases that should not be retried.
- //
- assert.commandWorked(testDB.runCommand({drop: collName}));
- assert.commandWorked(testColl.createIndex({x: 1}, {unique: true}));
-
- // DuplicateKey error on replacement-style upsert, where the unique index key value to be
- // written does not match the value of the query predicate.
- assert.commandWorked(testColl.createIndex({x: 1}, {unique: true}));
- assert.commandWorked(testColl.insert({_id: 1, 'a': 12345}));
- assert.commandFailedWithCode(testColl.update({x: 3}, {}, {upsert: true}),
- ErrorCodes.DuplicateKey);
-
- // DuplicateKey error on update-style upsert, where the unique index key value to be written
- // does not match the value of the query predicate.
- assert.commandWorked(testColl.remove({}));
- assert.commandWorked(testColl.insert({x: 3}));
- assert.commandWorked(testColl.insert({x: 4}));
- assert.commandFailedWithCode(testColl.update({x: 3}, {$inc: {x: 1}}, {upsert: true}),
- ErrorCodes.DuplicateKey);
-
- rst.stopSet();
+"use strict";
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const testDB = rst.getPrimary().getDB("test");
+const adminDB = testDB.getSiblingDB("admin");
+const collName = "upsert_duplicate_key_retry";
+const testColl = testDB.getCollection(collName);
+
+testDB.runCommand({drop: collName});
+
+// Queries current operations until 'count' matching operations are found.
+function awaitMatchingCurrentOpCount(message, count) {
+ assert.soon(() => {
+ const currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: {msg: message}}]).toArray();
+ return (currentOp.length === count);
+ });
+}
+
+function performUpsert() {
+ // This function is called from startParallelShell(), so closed-over variables will not be
+ // available. We must re-obtain the value of 'testColl' in the function body.
+ const testColl = db.getMongo().getDB("test").getCollection("upsert_duplicate_key_retry");
+ assert.commandWorked(testColl.update({x: 3}, {$inc: {y: 1}}, {upsert: true}));
+}
+
+assert.commandWorked(testColl.createIndex({x: 1}, {unique: true}));
+
+// Will hang upsert operations just prior to performing an insert.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "hangBeforeUpsertPerformsInsert", mode: "alwaysOn"}));
+
+const awaitUpdate1 = startParallelShell(performUpsert, rst.ports[0]);
+const awaitUpdate2 = startParallelShell(performUpsert, rst.ports[0]);
+
+awaitMatchingCurrentOpCount("hangBeforeUpsertPerformsInsert", 2);
+
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "hangBeforeUpsertPerformsInsert", mode: "off"}));
+
+awaitUpdate1();
+awaitUpdate2();
+
+const cursor = testColl.find({}, {_id: 0});
+assert.eq(cursor.next(), {x: 3, y: 2});
+assert(!cursor.hasNext(), cursor.toArray());
+
+// Confirm that oplog entries exist for both insert and update operation.
+const oplogColl = testDB.getSiblingDB("local").getCollection("oplog.rs");
+assert.eq(1, oplogColl.find({"op": "i", "ns": "test.upsert_duplicate_key_retry"}).itcount());
+assert.eq(1, oplogColl.find({"op": "u", "ns": "test.upsert_duplicate_key_retry"}).itcount());
+
+//
+// Confirm DuplicateKey error for cases that should not be retried.
+//
+assert.commandWorked(testDB.runCommand({drop: collName}));
+assert.commandWorked(testColl.createIndex({x: 1}, {unique: true}));
+
+// DuplicateKey error on replacement-style upsert, where the unique index key value to be
+// written does not match the value of the query predicate.
+assert.commandWorked(testColl.createIndex({x: 1}, {unique: true}));
+assert.commandWorked(testColl.insert({_id: 1, 'a': 12345}));
+assert.commandFailedWithCode(testColl.update({x: 3}, {}, {upsert: true}), ErrorCodes.DuplicateKey);
+
+// DuplicateKey error on update-style upsert, where the unique index key value to be written
+// does not match the value of the query predicate.
+assert.commandWorked(testColl.remove({}));
+assert.commandWorked(testColl.insert({x: 3}));
+assert.commandWorked(testColl.insert({x: 4}));
+assert.commandFailedWithCode(testColl.update({x: 3}, {$inc: {x: 1}}, {upsert: true}),
+ ErrorCodes.DuplicateKey);
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/use_disk.js b/jstests/noPassthrough/use_disk.js
index ec7778d23da..7cb10d796ac 100644
--- a/jstests/noPassthrough/use_disk.js
+++ b/jstests/noPassthrough/use_disk.js
@@ -3,150 +3,146 @@
// Confirms that profiled aggregation execution contains expected values for usedDisk.
(function() {
- "use strict";
+"use strict";
- // For getLatestProfilerEntry and getProfilerProtocolStringForCommand
- load("jstests/libs/profiler.js");
- const conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=8"});
- const testDB = conn.getDB("profile_agg");
- const coll = testDB.getCollection("test");
+// For getLatestProfilerEntry and getProfilerProtocolStringForCommand
+load("jstests/libs/profiler.js");
+const conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=8"});
+const testDB = conn.getDB("profile_agg");
+const coll = testDB.getCollection("test");
- testDB.setProfilingLevel(2);
+testDB.setProfilingLevel(2);
- function resetCollection() {
- coll.drop();
- for (var i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
+function resetCollection() {
+ coll.drop();
+ for (var i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
}
- function resetForeignCollection() {
- testDB.foreign.drop();
- const forColl = testDB.getCollection("foreign");
- for (var i = 4; i < 18; i += 2)
- assert.writeOK(forColl.insert({b: i}));
- }
- //
- // Confirm hasSortStage with in-memory sort.
- //
- resetCollection();
- //
- // Confirm 'usedDisk' is not set if 'allowDiskUse' is set but no stages need to use disk.
- //
- coll.aggregate([{$match: {a: {$gte: 2}}}], {allowDiskUse: true});
- var profileObj = getLatestProfilerEntry(testDB);
- assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
-
- resetCollection();
- coll.aggregate([{$match: {a: {$gte: 2}}}, {$sort: {a: 1}}], {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
- assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
-
- assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 10}));
- assert.eq(8,
- coll.aggregate([{$match: {a: {$gte: 2}}}, {$sort: {a: 1}}], {allowDiskUse: true})
- .itcount());
- profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.usedDisk, true, tojson(profileObj));
- assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
-
- //
- // Confirm that disk use is correctly detected for the $facet stage.
- //
- resetCollection();
- coll.aggregate([{$facet: {"aSort": [{$sortByCount: "$a"}]}}], {allowDiskUse: true});
-
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.usedDisk, true, tojson(profileObj));
-
- //
- // Confirm that usedDisk is correctly detected for the $group stage.
- //
- resetCollection();
-
- coll.aggregate([{$group: {"_id": {$avg: "$a"}}}], {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
-
- assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalDocumentSourceGroupMaxMemoryBytes: 10}));
- resetCollection();
- coll.aggregate([{$group: {"_id": {$avg: "$a"}}}], {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.usedDisk, true, tojson(profileObj));
-
- //
- // Confirm that usedDisk is correctly detected for the $lookup stage with a subsequent $unwind.
- //
- resetCollection();
- resetForeignCollection();
- coll.aggregate(
- [
- {$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}},
- {$unwind: "$same"}
- ],
- {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.usedDisk, true, tojson(profileObj));
-
- //
- // Confirm that usedDisk is correctly detected for the $lookup stage without a subsequent
- // $unwind.
- //
- resetCollection();
- resetForeignCollection();
- coll.aggregate(
- [{$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}}],
- {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.usedDisk, true, tojson(profileObj));
-
- //
- // Confirm that usedDisk is correctly detected when $limit is set after the $lookup stage.
- //
- resetCollection();
- resetForeignCollection();
- coll.aggregate(
- [
- {$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}},
- {$unwind: "$same"},
- {$limit: 3}
- ],
- {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.usedDisk, true, tojson(profileObj));
-
- //
- // Confirm that usedDisk is correctly detected when $limit is set before the $lookup stage.
- //
- resetCollection();
- resetForeignCollection();
- coll.aggregate(
- [
- {$limit: 1},
- {$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}},
- {$unwind: "$same"}
- ],
- {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.usedDisk, true, tojson(profileObj));
-
- //
- // Test that usedDisk is not set for a $lookup with a pipeline that does not use disk.
- //
- assert.commandWorked(testDB.adminCommand(
- {setParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 100 * 1024 * 1024}));
- resetCollection();
- resetForeignCollection();
- coll.aggregate(
- [{
- $lookup:
- {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "otherTest", as: "same"}
- }],
- {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
- MongoRunner.stopMongod(conn);
+}
+function resetForeignCollection() {
+ testDB.foreign.drop();
+ const forColl = testDB.getCollection("foreign");
+ for (var i = 4; i < 18; i += 2)
+ assert.writeOK(forColl.insert({b: i}));
+}
+//
+// Confirm hasSortStage with in-memory sort.
+//
+resetCollection();
+//
+// Confirm 'usedDisk' is not set if 'allowDiskUse' is set but no stages need to use disk.
+//
+coll.aggregate([{$match: {a: {$gte: 2}}}], {allowDiskUse: true});
+var profileObj = getLatestProfilerEntry(testDB);
+assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
+
+resetCollection();
+coll.aggregate([{$match: {a: {$gte: 2}}}, {$sort: {a: 1}}], {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
+assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
+
+assert.commandWorked(
+ testDB.adminCommand({setParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 10}));
+assert.eq(
+ 8, coll.aggregate([{$match: {a: {$gte: 2}}}, {$sort: {a: 1}}], {allowDiskUse: true}).itcount());
+profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.usedDisk, true, tojson(profileObj));
+assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
+
+//
+// Confirm that disk use is correctly detected for the $facet stage.
+//
+resetCollection();
+coll.aggregate([{$facet: {"aSort": [{$sortByCount: "$a"}]}}], {allowDiskUse: true});
+
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.usedDisk, true, tojson(profileObj));
+
+//
+// Confirm that usedDisk is correctly detected for the $group stage.
+//
+resetCollection();
+
+coll.aggregate([{$group: {"_id": {$avg: "$a"}}}], {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
+
+assert.commandWorked(
+ testDB.adminCommand({setParameter: 1, internalDocumentSourceGroupMaxMemoryBytes: 10}));
+resetCollection();
+coll.aggregate([{$group: {"_id": {$avg: "$a"}}}], {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.usedDisk, true, tojson(profileObj));
+
+//
+// Confirm that usedDisk is correctly detected for the $lookup stage with a subsequent $unwind.
+//
+resetCollection();
+resetForeignCollection();
+coll.aggregate(
+ [
+ {$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}},
+ {$unwind: "$same"}
+ ],
+ {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.usedDisk, true, tojson(profileObj));
+
+//
+// Confirm that usedDisk is correctly detected for the $lookup stage without a subsequent
+// $unwind.
+//
+resetCollection();
+resetForeignCollection();
+coll.aggregate(
+ [{$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}}],
+ {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.usedDisk, true, tojson(profileObj));
+
+//
+// Confirm that usedDisk is correctly detected when $limit is set after the $lookup stage.
+//
+resetCollection();
+resetForeignCollection();
+coll.aggregate(
+ [
+ {$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}},
+ {$unwind: "$same"},
+ {$limit: 3}
+ ],
+ {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.usedDisk, true, tojson(profileObj));
+
+//
+// Confirm that usedDisk is correctly detected when $limit is set before the $lookup stage.
+//
+resetCollection();
+resetForeignCollection();
+coll.aggregate(
+ [
+ {$limit: 1},
+ {$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}},
+ {$unwind: "$same"}
+ ],
+ {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.usedDisk, true, tojson(profileObj));
+
+//
+// Test that usedDisk is not set for a $lookup with a pipeline that does not use disk.
+//
+assert.commandWorked(testDB.adminCommand(
+ {setParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 100 * 1024 * 1024}));
+resetCollection();
+resetForeignCollection();
+coll.aggregate(
+ [{$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "otherTest", as: "same"}}],
+ {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/utf8_paths.js b/jstests/noPassthrough/utf8_paths.js
index 49cb5a63bac..b7b17355457 100644
--- a/jstests/noPassthrough/utf8_paths.js
+++ b/jstests/noPassthrough/utf8_paths.js
@@ -2,36 +2,36 @@
* Test that verifies mongod can start using paths that contain UTF-8 characters that are not ASCII.
*/
(function() {
- 'use strict';
- var db_name = "ελληνικά";
- var path = MongoRunner.dataPath + "Росси́я";
-
- mkdir(path);
-
- // Test MongoD
- let testMongoD = function() {
- let options = {
- dbpath: path,
- useLogFiles: true,
- pidfilepath: path + "/pidfile",
- };
+'use strict';
+var db_name = "ελληνικά";
+var path = MongoRunner.dataPath + "Росси́я";
+
+mkdir(path);
+
+// Test MongoD
+let testMongoD = function() {
+ let options = {
+ dbpath: path,
+ useLogFiles: true,
+ pidfilepath: path + "/pidfile",
+ };
- // directoryperdb is only supported with the wiredTiger storage engine
- if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
- options["directoryperdb"] = "";
- }
+ // directoryperdb is only supported with the wiredTiger storage engine
+ if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
+ options["directoryperdb"] = "";
+ }
- let conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up');
+ let conn = MongoRunner.runMongod(options);
+ assert.neq(null, conn, 'mongod was unable to start up');
- let coll = conn.getCollection(db_name + ".foo");
- assert.writeOK(coll.insert({_id: 1}));
+ let coll = conn.getCollection(db_name + ".foo");
+ assert.writeOK(coll.insert({_id: 1}));
- MongoRunner.stopMongod(conn);
- };
+ MongoRunner.stopMongod(conn);
+};
- testMongoD();
+testMongoD();
- // Start a second time to test things like log rotation.
- testMongoD();
+// Start a second time to test things like log rotation.
+testMongoD();
})();
diff --git a/jstests/noPassthrough/validate_hook_resume_fcv_upgrade.js b/jstests/noPassthrough/validate_hook_resume_fcv_upgrade.js
index 8a3c1d0a5ac..4b4cacfbbba 100644
--- a/jstests/noPassthrough/validate_hook_resume_fcv_upgrade.js
+++ b/jstests/noPassthrough/validate_hook_resume_fcv_upgrade.js
@@ -7,195 +7,191 @@
var db;
(function() {
- "use strict";
-
- load("jstests/libs/feature_compatibility_version.js");
-
- // We skip doing the data consistency checks while terminating the cluster because they conflict
- // with the counts of the number of times the "validate" command is run.
- TestData.skipCollectionAndIndexValidation = true;
-
- function makePatternForValidate(dbName, collName) {
- return new RegExp(
- "COMMAND.*command " + dbName +
- "\\.\\$cmd appName: \"MongoDB Shell\" command: validate { validate: \"" + collName +
- "\"",
- "g");
+"use strict";
+
+load("jstests/libs/feature_compatibility_version.js");
+
+// We skip doing the data consistency checks while terminating the cluster because they conflict
+// with the counts of the number of times the "validate" command is run.
+TestData.skipCollectionAndIndexValidation = true;
+
+function makePatternForValidate(dbName, collName) {
+ return new RegExp("COMMAND.*command " + dbName +
+ "\\.\\$cmd appName: \"MongoDB Shell\" command: validate { validate: \"" +
+ collName + "\"",
+ "g");
+}
+
+function makePatternForSetFCV(targetVersion) {
+ return new RegExp(
+ "COMMAND.*command.*appName: \"MongoDB Shell\" command: setFeatureCompatibilityVersion" +
+ " { setFeatureCompatibilityVersion: \"" + targetVersion + "\"",
+ "g");
+}
+
+function countMatches(pattern, output) {
+ assert(pattern.global, "the 'g' flag must be used to find all matches");
+
+ let numMatches = 0;
+ while (pattern.exec(output) !== null) {
+ ++numMatches;
}
-
- function makePatternForSetFCV(targetVersion) {
- return new RegExp(
- "COMMAND.*command.*appName: \"MongoDB Shell\" command: setFeatureCompatibilityVersion" +
- " { setFeatureCompatibilityVersion: \"" + targetVersion + "\"",
- "g");
+ return numMatches;
+}
+
+function runValidateHook(testCase) {
+ db = testCase.conn.getDB("test");
+ TestData.forceValidationWithFeatureCompatibilityVersion = latestFCV;
+ try {
+ clearRawMongoProgramOutput();
+
+ load("jstests/hooks/run_validate_collections.js");
+
+ // We terminate the processes to ensure that the next call to rawMongoProgramOutput()
+ // will return all of their output.
+ testCase.teardown();
+ return rawMongoProgramOutput();
+ } finally {
+ db = undefined;
+ TestData.forceValidationWithFeatureCompatibilityVersion = undefined;
}
-
- function countMatches(pattern, output) {
- assert(pattern.global, "the 'g' flag must be used to find all matches");
-
- let numMatches = 0;
- while (pattern.exec(output) !== null) {
- ++numMatches;
+}
+
+function testStandalone(additionalSetupFn, {
+ expectedAtTeardownFCV,
+ expectedSetLastStableFCV: expectedSetLastStableFCV = 0,
+ expectedSetLatestFCV: expectedSetLatestFCV = 0
+} = {}) {
+ const conn =
+ MongoRunner.runMongod({setParameter: {logComponentVerbosity: tojson({command: 1})}});
+ assert.neq(conn, "mongod was unable to start up");
+
+ // Insert a document so the "validate" command has some actual work to do.
+ assert.commandWorked(conn.getDB("test").mycoll.insert({}));
+
+ // Run the additional setup function to put the server into the desired state.
+ additionalSetupFn(conn);
+
+ const output = runValidateHook({
+ conn: conn,
+ teardown: () => {
+ // The validate hook should leave the server with a feature compatibility version of
+ // 'expectedAtTeardownFCV' and no targetVersion.
+ checkFCV(conn.getDB("admin"), expectedAtTeardownFCV);
+ MongoRunner.stopMongod(conn);
}
- return numMatches;
+ });
+
+ const pattern = makePatternForValidate("test", "mycoll");
+ assert.eq(1,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from mongod in the log output");
+
+ for (let [targetVersion, expectedCount] of [[lastStableFCV, expectedSetLastStableFCV],
+ [latestFCV, expectedSetLatestFCV]]) {
+ // Since the additionalSetupFn() function may run the setFeatureCompatibilityVersion
+ // command and we don't have a guarantee those log messages were cleared when
+ // clearRawMongoProgramOutput() was called, we assert 'expectedSetLastStableFCV' and
+ // 'expectedSetLatestFCV' as lower bounds.
+ const pattern = makePatternForSetFCV(targetVersion);
+ assert.lte(expectedCount,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from mongod in the log output");
}
+}
+
+function forceInterruptedUpgradeOrDowngrade(conn, targetVersion) {
+ // We create a separate connection to the server exclusively for running the
+ // setFeatureCompatibilityVersion command so only that operation is ever interrupted by
+ // the checkForInterruptFail failpoint.
+ const setFCVConn = new Mongo(conn.host);
+ const myUriRes = assert.commandWorked(setFCVConn.adminCommand({whatsmyuri: 1}));
+ const myUri = myUriRes.you;
+
+ const curOpRes = assert.commandWorked(setFCVConn.adminCommand({currentOp: 1, client: myUri}));
+ const threadName = curOpRes.inprog[0].desc;
+
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "checkForInterruptFail",
+ mode: "alwaysOn",
+ data: {threadName, chance: 0.05},
+ }));
+
+ let attempts = 0;
+ assert.soon(
+ function() {
+ let res = setFCVConn.adminCommand({setFeatureCompatibilityVersion: targetVersion});
+
+ if (res.ok === 1) {
+ assert.commandWorked(res);
+ } else {
+ assert.commandFailedWithCode(res, ErrorCodes.Interrupted);
+ }
- function runValidateHook(testCase) {
- db = testCase.conn.getDB("test");
- TestData.forceValidationWithFeatureCompatibilityVersion = latestFCV;
- try {
- clearRawMongoProgramOutput();
-
- load("jstests/hooks/run_validate_collections.js");
-
- // We terminate the processes to ensure that the next call to rawMongoProgramOutput()
- // will return all of their output.
- testCase.teardown();
- return rawMongoProgramOutput();
- } finally {
- db = undefined;
- TestData.forceValidationWithFeatureCompatibilityVersion = undefined;
- }
- }
+ ++attempts;
+
+ res = assert.commandWorked(
+ conn.adminCommand({getParameter: 1, featureCompatibilityVersion: 1}));
- function testStandalone(additionalSetupFn, {
- expectedAtTeardownFCV,
- expectedSetLastStableFCV: expectedSetLastStableFCV = 0,
- expectedSetLatestFCV: expectedSetLatestFCV = 0
- } = {}) {
- const conn =
- MongoRunner.runMongod({setParameter: {logComponentVerbosity: tojson({command: 1})}});
- assert.neq(conn, "mongod was unable to start up");
-
- // Insert a document so the "validate" command has some actual work to do.
- assert.commandWorked(conn.getDB("test").mycoll.insert({}));
-
- // Run the additional setup function to put the server into the desired state.
- additionalSetupFn(conn);
-
- const output = runValidateHook({
- conn: conn,
- teardown: () => {
- // The validate hook should leave the server with a feature compatibility version of
- // 'expectedAtTeardownFCV' and no targetVersion.
- checkFCV(conn.getDB("admin"), expectedAtTeardownFCV);
- MongoRunner.stopMongod(conn);
+ if (res.featureCompatibilityVersion.hasOwnProperty("targetVersion")) {
+ checkFCV(conn.getDB("admin"), lastStableFCV, targetVersion);
+ jsTest.log(`Reached partially downgraded state after ${attempts} attempts`);
+ return true;
}
- });
-
- const pattern = makePatternForValidate("test", "mycoll");
- assert.eq(1,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from mongod in the log output");
-
- for (let [targetVersion, expectedCount] of[[lastStableFCV, expectedSetLastStableFCV],
- [latestFCV, expectedSetLatestFCV]]) {
- // Since the additionalSetupFn() function may run the setFeatureCompatibilityVersion
- // command and we don't have a guarantee those log messages were cleared when
- // clearRawMongoProgramOutput() was called, we assert 'expectedSetLastStableFCV' and
- // 'expectedSetLatestFCV' as lower bounds.
- const pattern = makePatternForSetFCV(targetVersion);
- assert.lte(expectedCount,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from mongod in the log output");
- }
- }
- function forceInterruptedUpgradeOrDowngrade(conn, targetVersion) {
- // We create a separate connection to the server exclusively for running the
- // setFeatureCompatibilityVersion command so only that operation is ever interrupted by
- // the checkForInterruptFail failpoint.
- const setFCVConn = new Mongo(conn.host);
- const myUriRes = assert.commandWorked(setFCVConn.adminCommand({whatsmyuri: 1}));
- const myUri = myUriRes.you;
-
- const curOpRes =
- assert.commandWorked(setFCVConn.adminCommand({currentOp: 1, client: myUri}));
- const threadName = curOpRes.inprog[0].desc;
-
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "checkForInterruptFail",
- mode: "alwaysOn",
- data: {threadName, chance: 0.05},
- }));
-
- let attempts = 0;
- assert.soon(
- function() {
- let res = setFCVConn.adminCommand({setFeatureCompatibilityVersion: targetVersion});
-
- if (res.ok === 1) {
- assert.commandWorked(res);
- } else {
- assert.commandFailedWithCode(res, ErrorCodes.Interrupted);
- }
-
- ++attempts;
-
- res = assert.commandWorked(
- conn.adminCommand({getParameter: 1, featureCompatibilityVersion: 1}));
-
- if (res.featureCompatibilityVersion.hasOwnProperty("targetVersion")) {
- checkFCV(conn.getDB("admin"), lastStableFCV, targetVersion);
- jsTest.log(`Reached partially downgraded state after ${attempts} attempts`);
- return true;
- }
-
- // Either upgrade the feature compatibility version so we can try downgrading again,
- // or downgrade the feature compatibility version so we can try upgrading again.
- // Note that we're using 'conn' rather than 'setFCVConn' to avoid the upgrade being
- // interrupted.
- assert.commandWorked(conn.adminCommand({
- setFeatureCompatibilityVersion: targetVersion === lastStableFCV ? latestFCV
- : lastStableFCV
- }));
- },
- "failed to get featureCompatibilityVersion document into a partially downgraded" +
- " state");
-
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "checkForInterruptFail",
- mode: "off",
- }));
- }
+ // Either upgrade the feature compatibility version so we can try downgrading again,
+ // or downgrade the feature compatibility version so we can try upgrading again.
+ // Note that we're using 'conn' rather than 'setFCVConn' to avoid the upgrade being
+ // interrupted.
+ assert.commandWorked(conn.adminCommand({
+ setFeatureCompatibilityVersion: targetVersion === lastStableFCV ? latestFCV
+ : lastStableFCV
+ }));
+ },
+ "failed to get featureCompatibilityVersion document into a partially downgraded" +
+ " state");
+
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "checkForInterruptFail",
+ mode: "off",
+ }));
+}
+
+(function testStandaloneInLatestFCV() {
+ testStandalone(conn => {
+ checkFCV(conn.getDB("admin"), latestFCV);
+ }, {expectedAtTeardownFCV: latestFCV});
+})();
- (function testStandaloneInLatestFCV() {
- testStandalone(conn => {
- checkFCV(conn.getDB("admin"), latestFCV);
- }, {expectedAtTeardownFCV: latestFCV});
- })();
-
- (function testStandaloneInLastStableFCV() {
- testStandalone(conn => {
- assert.commandWorked(
- conn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(conn.getDB("admin"), lastStableFCV);
- }, {
- expectedAtTeardownFCV: lastStableFCV,
- expectedSetLastStableFCV: 1,
- expectedSetLatestFCV: 1
- });
- })();
-
- (function testStandaloneWithInterruptedFCVDowngrade() {
- testStandalone(conn => {
- forceInterruptedUpgradeOrDowngrade(conn, lastStableFCV);
- }, {
- expectedAtTeardownFCV: lastStableFCV,
- expectedSetLastStableFCV: 2,
- expectedSetLatestFCV: 1
- });
- })();
-
- (function testStandaloneWithInterruptedFCVUpgrade() {
- testStandalone(conn => {
- assert.commandWorked(
- conn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- forceInterruptedUpgradeOrDowngrade(conn, latestFCV);
- }, {
- expectedAtTeardownFCV: lastStableFCV,
- expectedSetLastStableFCV: 1,
- expectedSetLatestFCV: 1
- });
- })();
+(function testStandaloneInLastStableFCV() {
+ testStandalone(conn => {
+ assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+ checkFCV(conn.getDB("admin"), lastStableFCV);
+ }, {
+ expectedAtTeardownFCV: lastStableFCV,
+ expectedSetLastStableFCV: 1,
+ expectedSetLatestFCV: 1
+ });
+})();
+
+(function testStandaloneWithInterruptedFCVDowngrade() {
+ testStandalone(conn => {
+ forceInterruptedUpgradeOrDowngrade(conn, lastStableFCV);
+ }, {
+ expectedAtTeardownFCV: lastStableFCV,
+ expectedSetLastStableFCV: 2,
+ expectedSetLatestFCV: 1
+ });
+})();
+
+(function testStandaloneWithInterruptedFCVUpgrade() {
+ testStandalone(conn => {
+ assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+ forceInterruptedUpgradeOrDowngrade(conn, latestFCV);
+ }, {
+ expectedAtTeardownFCV: lastStableFCV,
+ expectedSetLastStableFCV: 1,
+ expectedSetLatestFCV: 1
+ });
+})();
})();
diff --git a/jstests/noPassthrough/verify_session_cache_updates.js b/jstests/noPassthrough/verify_session_cache_updates.js
index 48622ba7b95..47d6068d5cf 100644
--- a/jstests/noPassthrough/verify_session_cache_updates.js
+++ b/jstests/noPassthrough/verify_session_cache_updates.js
@@ -1,76 +1,76 @@
// @tags: [requires_sharding]
(function() {
- 'use strict';
+'use strict';
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
- function runTest(conn) {
- for (var i = 0; i < 10; ++i) {
- conn.getDB("test").test.save({a: i});
- }
+function runTest(conn) {
+ for (var i = 0; i < 10; ++i) {
+ conn.getDB("test").test.save({a: i});
+ }
- function verify(conn, nRecords) {
- conn.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
- assert.eq(nRecords, conn.getDB("config").system.sessions.find({}).count());
- }
+ function verify(conn, nRecords) {
+ conn.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
+ assert.eq(nRecords, conn.getDB("config").system.sessions.find({}).count());
+ }
- function getLastUse(conn) {
- conn.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
- return conn.getDB("config").system.sessions.findOne({}).lastUse;
- }
+ function getLastUse(conn) {
+ conn.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
+ return conn.getDB("config").system.sessions.findOne({}).lastUse;
+ }
- // initially we have no sessions
- verify(conn, 0);
+ // initially we have no sessions
+ verify(conn, 0);
- // Calling startSession in the shell doesn't initiate the session
- var session = conn.startSession();
- verify(conn, 0);
+ // Calling startSession in the shell doesn't initiate the session
+ var session = conn.startSession();
+ verify(conn, 0);
- // running a command that doesn't require auth does touch
- session.getDatabase("admin").runCommand("isMaster");
- verify(conn, 1);
+ // running a command that doesn't require auth does touch
+ session.getDatabase("admin").runCommand("isMaster");
+ verify(conn, 1);
- // running a session updating command does touch
+ // running a session updating command does touch
+ session.getDatabase("admin").runCommand({serverStatus: 1});
+ verify(conn, 1);
+
+ // running a session updating command updates last use
+ {
+ var lastUse = getLastUse(conn);
+ sleep(200);
session.getDatabase("admin").runCommand({serverStatus: 1});
verify(conn, 1);
-
- // running a session updating command updates last use
- {
- var lastUse = getLastUse(conn);
- sleep(200);
- session.getDatabase("admin").runCommand({serverStatus: 1});
- verify(conn, 1);
- assert.gt(getLastUse(conn), lastUse);
- }
-
- // verify that reading from a cursor updates last use
- {
- var cursor = session.getDatabase("test").test.find({}).batchSize(1);
- cursor.next();
- var lastUse = getLastUse(conn);
- sleep(200);
- verify(conn, 1);
- cursor.next();
- assert.gt(getLastUse(conn), lastUse);
- }
-
- session.endSession();
+ assert.gt(getLastUse(conn), lastUse);
}
+ // verify that reading from a cursor updates last use
{
- var mongod = MongoRunner.runMongod({nojournal: ""});
- runTest(mongod);
- MongoRunner.stopMongod(mongod);
+ var cursor = session.getDatabase("test").test.find({}).batchSize(1);
+ cursor.next();
+ var lastUse = getLastUse(conn);
+ sleep(200);
+ verify(conn, 1);
+ cursor.next();
+ assert.gt(getLastUse(conn), lastUse);
}
- {
- var st = new ShardingTest({shards: 1, mongos: 1, config: 1});
- st.rs0.getPrimary().getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
+ session.endSession();
+}
- runTest(st.s0);
- st.stop();
- }
+{
+ var mongod = MongoRunner.runMongod({nojournal: ""});
+ runTest(mongod);
+ MongoRunner.stopMongod(mongod);
+}
+
+{
+ var st = new ShardingTest({shards: 1, mongos: 1, config: 1});
+ st.rs0.getPrimary().getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
+
+ runTest(st.s0);
+ st.stop();
+}
})();
diff --git a/jstests/noPassthrough/verify_sessions_expiration.js b/jstests/noPassthrough/verify_sessions_expiration.js
index cdf34928772..7940b995253 100644
--- a/jstests/noPassthrough/verify_sessions_expiration.js
+++ b/jstests/noPassthrough/verify_sessions_expiration.js
@@ -14,126 +14,130 @@
// replace it in the config.system.sessions collection.
(function() {
- "use strict";
-
- // This test makes assertions about the number of logical session records.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
-
- const refresh = {refreshLogicalSessionCacheNow: 1};
- const startSession = {startSession: 1};
- const failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
-
- function refreshSessionsAndVerifyCount(config, expectedCount) {
- config.runCommand(refresh);
- assert.eq(config.system.sessions.count(), expectedCount);
- }
-
- function getSessions(config) {
- return config.system.sessions.aggregate([{'$listSessions': {allUsers: true}}]).toArray();
- }
-
- function verifyOpenCursorCount(db, expectedCount) {
- assert.eq(db.serverStatus().metrics.cursor.open.total, expectedCount);
- }
-
- const dbName = "test";
- const testCollName = "verify_sessions_find_get_more";
-
- let conn = MongoRunner.runMongod();
- let db = conn.getDB(dbName);
- let config = conn.getDB("config");
-
- // 1. Verify that sessions expire from config.system.sessions after the timeout has passed.
- for (let i = 0; i < 5; i++) {
- let res = db.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
- }
- refreshSessionsAndVerifyCount(config, 5);
-
- // Manually delete entries in config.system.sessions to simulate TTL expiration.
- assert.commandWorked(config.system.sessions.remove({}));
- refreshSessionsAndVerifyCount(config, 0);
-
- // 2. Verify that getMores after finds will update the 'lastUse' field on documents in the
- // config.system.sessions collection.
- for (let i = 0; i < 10; i++) {
- db[testCollName].insert({_id: i, a: i, b: 1});
- }
-
- let cursors = [];
- for (let i = 0; i < 5; i++) {
- let session = db.getMongo().startSession({});
- assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
- "initialize the session");
- cursors.push(session.getDatabase(dbName)[testCollName].find({b: 1}).batchSize(1));
- assert(cursors[i].hasNext());
+"use strict";
+
+// This test makes assertions about the number of logical session records.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
+
+const refresh = {
+ refreshLogicalSessionCacheNow: 1
+};
+const startSession = {
+ startSession: 1
+};
+const failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
+
+function refreshSessionsAndVerifyCount(config, expectedCount) {
+ config.runCommand(refresh);
+ assert.eq(config.system.sessions.count(), expectedCount);
+}
+
+function getSessions(config) {
+ return config.system.sessions.aggregate([{'$listSessions': {allUsers: true}}]).toArray();
+}
+
+function verifyOpenCursorCount(db, expectedCount) {
+ assert.eq(db.serverStatus().metrics.cursor.open.total, expectedCount);
+}
+
+const dbName = "test";
+const testCollName = "verify_sessions_find_get_more";
+
+let conn = MongoRunner.runMongod();
+let db = conn.getDB(dbName);
+let config = conn.getDB("config");
+
+// 1. Verify that sessions expire from config.system.sessions after the timeout has passed.
+for (let i = 0; i < 5; i++) {
+ let res = db.runCommand(startSession);
+ assert.commandWorked(res, "unable to start session");
+}
+refreshSessionsAndVerifyCount(config, 5);
+
+// Manually delete entries in config.system.sessions to simulate TTL expiration.
+assert.commandWorked(config.system.sessions.remove({}));
+refreshSessionsAndVerifyCount(config, 0);
+
+// 2. Verify that getMores after finds will update the 'lastUse' field on documents in the
+// config.system.sessions collection.
+for (let i = 0; i < 10; i++) {
+ db[testCollName].insert({_id: i, a: i, b: 1});
+}
+
+let cursors = [];
+for (let i = 0; i < 5; i++) {
+ let session = db.getMongo().startSession({});
+ assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
+ "initialize the session");
+ cursors.push(session.getDatabase(dbName)[testCollName].find({b: 1}).batchSize(1));
+ assert(cursors[i].hasNext());
+}
+
+refreshSessionsAndVerifyCount(config, 5);
+verifyOpenCursorCount(config, 5);
+
+let sessionsCollectionArray;
+let lastUseValues = [];
+for (let i = 0; i < 3; i++) {
+ for (let j = 0; j < cursors.length; j++) {
+ cursors[j].next();
}
refreshSessionsAndVerifyCount(config, 5);
verifyOpenCursorCount(config, 5);
- let sessionsCollectionArray;
- let lastUseValues = [];
- for (let i = 0; i < 3; i++) {
- for (let j = 0; j < cursors.length; j++) {
- cursors[j].next();
- }
-
- refreshSessionsAndVerifyCount(config, 5);
- verifyOpenCursorCount(config, 5);
-
- sessionsCollectionArray = getSessions(config);
+ sessionsCollectionArray = getSessions(config);
- if (i == 0) {
- for (let j = 0; j < sessionsCollectionArray.length; j++) {
- lastUseValues.push(sessionsCollectionArray[j].lastUse);
- }
- } else {
- for (let j = 0; j < sessionsCollectionArray.length; j++) {
- assert.gt(sessionsCollectionArray[j].lastUse, lastUseValues[j]);
- lastUseValues[j] = sessionsCollectionArray[j].lastUse;
- }
+ if (i == 0) {
+ for (let j = 0; j < sessionsCollectionArray.length; j++) {
+ lastUseValues.push(sessionsCollectionArray[j].lastUse);
+ }
+ } else {
+ for (let j = 0; j < sessionsCollectionArray.length; j++) {
+ assert.gt(sessionsCollectionArray[j].lastUse, lastUseValues[j]);
+ lastUseValues[j] = sessionsCollectionArray[j].lastUse;
}
}
-
- // 3. Verify that letting sessions expire (simulated by manual deletion) will kill their
- // cursors.
- assert.commandWorked(config.system.sessions.remove({}));
-
- refreshSessionsAndVerifyCount(config, 0);
- verifyOpenCursorCount(config, 0);
-
- for (let i = 0; i < cursors.length; i++) {
- assert.commandFailedWithCode(
- db.runCommand({getMore: cursors[i]._cursor._cursorid, collection: testCollName}),
- ErrorCodes.CursorNotFound,
- 'expected getMore to fail because the cursor was killed');
- }
-
- // 4. Verify that an expired session (simulated by manual deletion) that has a currently running
- // operation will be vivified during the logical session cache refresh.
- let pinnedCursorSession = db.getMongo().startSession();
- withPinnedCursor({
- conn: conn,
- db: pinnedCursorSession.getDatabase(dbName),
- assertFunction: (cursorId, coll) => {
- assert.commandWorked(config.system.sessions.remove({}));
-
- refreshSessionsAndVerifyCount(config, 1);
- verifyOpenCursorCount(config, 1);
-
- let db = coll.getDB();
- assert.commandWorked(db.runCommand({killCursors: coll.getName(), cursors: [cursorId]}));
- },
- sessionId: pinnedCursorSession,
- runGetMoreFunc: () => {
- assert.commandFailed(
- db.runCommand({getMore: cursorId, collection: collName, lsid: sessionId}));
- },
- failPointName: failPointName,
- });
-
- MongoRunner.stopMongod(conn);
+}
+
+// 3. Verify that letting sessions expire (simulated by manual deletion) will kill their
+// cursors.
+assert.commandWorked(config.system.sessions.remove({}));
+
+refreshSessionsAndVerifyCount(config, 0);
+verifyOpenCursorCount(config, 0);
+
+for (let i = 0; i < cursors.length; i++) {
+ assert.commandFailedWithCode(
+ db.runCommand({getMore: cursors[i]._cursor._cursorid, collection: testCollName}),
+ ErrorCodes.CursorNotFound,
+ 'expected getMore to fail because the cursor was killed');
+}
+
+// 4. Verify that an expired session (simulated by manual deletion) that has a currently running
+// operation will be vivified during the logical session cache refresh.
+let pinnedCursorSession = db.getMongo().startSession();
+withPinnedCursor({
+ conn: conn,
+ db: pinnedCursorSession.getDatabase(dbName),
+ assertFunction: (cursorId, coll) => {
+ assert.commandWorked(config.system.sessions.remove({}));
+
+ refreshSessionsAndVerifyCount(config, 1);
+ verifyOpenCursorCount(config, 1);
+
+ let db = coll.getDB();
+ assert.commandWorked(db.runCommand({killCursors: coll.getName(), cursors: [cursorId]}));
+ },
+ sessionId: pinnedCursorSession,
+ runGetMoreFunc: () => {
+ assert.commandFailed(
+ db.runCommand({getMore: cursorId, collection: collName, lsid: sessionId}));
+ },
+ failPointName: failPointName,
+});
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/view_catalog_deadlock_with_rename.js b/jstests/noPassthrough/view_catalog_deadlock_with_rename.js
index ec6e6dd107c..ec3aa9d21fd 100644
--- a/jstests/noPassthrough/view_catalog_deadlock_with_rename.js
+++ b/jstests/noPassthrough/view_catalog_deadlock_with_rename.js
@@ -7,30 +7,30 @@
* The fix is to always lock 'system.views' collection in the end.
*/
(function() {
- 'use strict';
+'use strict';
- const conn = MongoRunner.runMongod();
- const db = conn.getDB('test');
+const conn = MongoRunner.runMongod();
+const db = conn.getDB('test');
- assert.commandWorked(db.runCommand({insert: 'a', documents: [{x: 1}]}));
- assert.commandWorked(db.runCommand({insert: 'b', documents: [{x: 1}]}));
+assert.commandWorked(db.runCommand({insert: 'a', documents: [{x: 1}]}));
+assert.commandWorked(db.runCommand({insert: 'b', documents: [{x: 1}]}));
- assert.commandWorked(db.createView('viewA', 'a', []));
+assert.commandWorked(db.createView('viewA', 'a', []));
- // Will cause a view catalog reload.
- assert.commandWorked(db.runCommand(
- {insert: 'system.views', documents: [{_id: 'test.viewB', viewOn: 'b', pipeline: []}]}));
+// Will cause a view catalog reload.
+assert.commandWorked(db.runCommand(
+ {insert: 'system.views', documents: [{_id: 'test.viewB', viewOn: 'b', pipeline: []}]}));
- const renameSystemViews = startParallelShell(function() {
- // This used to first lock 'test.system.views' and then 'test.aaabb' in X mode.
- assert.commandWorked(
- db.adminCommand({renameCollection: 'test.system.views', to: 'test.aaabb'}));
- }, conn.port);
+const renameSystemViews = startParallelShell(function() {
+ // This used to first lock 'test.system.views' and then 'test.aaabb' in X mode.
+ assert.commandWorked(
+ db.adminCommand({renameCollection: 'test.system.views', to: 'test.aaabb'}));
+}, conn.port);
- // This triggers view catalog reload. Therefore it first locked 'test.aaabb' in IX mode and then
- // 'test.system.views' in IS mode.
- assert.commandWorked(db.runCommand({delete: 'aaabb', deletes: [{q: {x: 2}, limit: 1}]}));
+// This triggers view catalog reload. Therefore it first locked 'test.aaabb' in IX mode and then
+// 'test.system.views' in IS mode.
+assert.commandWorked(db.runCommand({delete: 'aaabb', deletes: [{q: {x: 2}, limit: 1}]}));
- renameSystemViews();
- MongoRunner.stopMongod(conn);
+renameSystemViews();
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/views_legacy.js b/jstests/noPassthrough/views_legacy.js
index c52aca93bc3..8ded34730ae 100644
--- a/jstests/noPassthrough/views_legacy.js
+++ b/jstests/noPassthrough/views_legacy.js
@@ -3,82 +3,81 @@
* legacy write mode. Also confirms that legacy killCursors execution is successful.
*/
(function() {
- "use strict";
-
- let conn = MongoRunner.runMongod({});
-
- let viewsDB = conn.getDB("views_legacy");
- assert.commandWorked(viewsDB.dropDatabase());
- assert.commandWorked(viewsDB.createView("view", "collection", []));
- let coll = viewsDB.getCollection("collection");
-
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
-
- conn.forceReadMode("legacy");
- conn.forceWriteMode("legacy");
-
- //
- // Legacy getMore is explicitly prohibited on views; you must use the getMore command.
- //
- let cmdRes =
- viewsDB.runCommand({find: "view", filter: {a: {$gt: 0}}, sort: {a: 1}, batchSize: 0});
- assert.commandWorked(cmdRes);
- let cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
-
- let err = assert.throws(function() {
- cursor.itcount();
- }, [], "Legacy getMore expected to fail on a view cursor");
- assert.eq(ErrorCodes.CommandNotSupportedOnView, err.code, tojson(err));
-
- //
- // Legacy killcursors is expected to work on views.
- //
- cmdRes = viewsDB.runCommand({find: "view", filter: {a: {$gt: 0}}, sort: {a: 1}, batchSize: 0});
- assert.commandWorked(cmdRes);
- cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
-
- // When DBCommandCursor is constructed under legacy readMode, cursor.close() will execute a
- // legacy killcursors operation.
- cursor.close();
- assert.gleSuccess(viewsDB, "legacy killcursors expected to work on view cursor");
-
- //
- // A view should reject all write CRUD operations performed in legacy write mode.
- //
- viewsDB.view.insert({x: 1});
- assert.gleErrorCode(viewsDB, ErrorCodes.CommandNotSupportedOnView);
-
- viewsDB.view.remove({x: 1});
- assert.gleErrorCode(viewsDB, ErrorCodes.CommandNotSupportedOnView);
-
- viewsDB.view.update({x: 1}, {x: 2});
- assert.gleErrorCode(viewsDB, ErrorCodes.CommandNotSupportedOnView);
-
- //
- // Legacy find is explicitly prohibited on views; you must use the find command.
- //
- let res = assert.throws(function() {
- viewsDB.view.find({x: 1}).toArray();
- });
- assert.eq(res.code, ErrorCodes.CommandNotSupportedOnView, tojson(res));
-
- // Ensure that legacy getMore succeeds even when a cursor is established on a namespace whose
- // database does not exist. Legacy getMore must check that the cursor is not over a view, and
- // this must handle the case where the namespace is not a view by virtue of the database not
- // existing.
- assert.commandWorked(viewsDB.dropDatabase());
-
- cmdRes = viewsDB.runCommand({find: "view", filter: {a: {$gt: 0}}, sort: {a: 1}, batchSize: 0});
- assert.commandWorked(cmdRes);
- cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
- assert.eq(0, cursor.itcount());
-
- cmdRes = viewsDB.runCommand({aggregate: "view", pipeline: [], cursor: {batchSize: 0}});
- assert.commandWorked(cmdRes);
- cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
- assert.eq(0, cursor.itcount());
-
- MongoRunner.stopMongod(conn);
+"use strict";
+
+let conn = MongoRunner.runMongod({});
+
+let viewsDB = conn.getDB("views_legacy");
+assert.commandWorked(viewsDB.dropDatabase());
+assert.commandWorked(viewsDB.createView("view", "collection", []));
+let coll = viewsDB.getCollection("collection");
+
+for (let i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+
+conn.forceReadMode("legacy");
+conn.forceWriteMode("legacy");
+
+//
+// Legacy getMore is explicitly prohibited on views; you must use the getMore command.
+//
+let cmdRes = viewsDB.runCommand({find: "view", filter: {a: {$gt: 0}}, sort: {a: 1}, batchSize: 0});
+assert.commandWorked(cmdRes);
+let cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
+
+let err = assert.throws(function() {
+ cursor.itcount();
+}, [], "Legacy getMore expected to fail on a view cursor");
+assert.eq(ErrorCodes.CommandNotSupportedOnView, err.code, tojson(err));
+
+//
+// Legacy killcursors is expected to work on views.
+//
+cmdRes = viewsDB.runCommand({find: "view", filter: {a: {$gt: 0}}, sort: {a: 1}, batchSize: 0});
+assert.commandWorked(cmdRes);
+cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
+
+// When DBCommandCursor is constructed under legacy readMode, cursor.close() will execute a
+// legacy killcursors operation.
+cursor.close();
+assert.gleSuccess(viewsDB, "legacy killcursors expected to work on view cursor");
+
+//
+// A view should reject all write CRUD operations performed in legacy write mode.
+//
+viewsDB.view.insert({x: 1});
+assert.gleErrorCode(viewsDB, ErrorCodes.CommandNotSupportedOnView);
+
+viewsDB.view.remove({x: 1});
+assert.gleErrorCode(viewsDB, ErrorCodes.CommandNotSupportedOnView);
+
+viewsDB.view.update({x: 1}, {x: 2});
+assert.gleErrorCode(viewsDB, ErrorCodes.CommandNotSupportedOnView);
+
+//
+// Legacy find is explicitly prohibited on views; you must use the find command.
+//
+let res = assert.throws(function() {
+ viewsDB.view.find({x: 1}).toArray();
+});
+assert.eq(res.code, ErrorCodes.CommandNotSupportedOnView, tojson(res));
+
+// Ensure that legacy getMore succeeds even when a cursor is established on a namespace whose
+// database does not exist. Legacy getMore must check that the cursor is not over a view, and
+// this must handle the case where the namespace is not a view by virtue of the database not
+// existing.
+assert.commandWorked(viewsDB.dropDatabase());
+
+cmdRes = viewsDB.runCommand({find: "view", filter: {a: {$gt: 0}}, sort: {a: 1}, batchSize: 0});
+assert.commandWorked(cmdRes);
+cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
+assert.eq(0, cursor.itcount());
+
+cmdRes = viewsDB.runCommand({aggregate: "view", pipeline: [], cursor: {batchSize: 0}});
+assert.commandWorked(cmdRes);
+cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
+assert.eq(0, cursor.itcount());
+
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/wiredTigerMaxCacheOverflowSizeGB_serverParameter.js b/jstests/noPassthrough/wiredTigerMaxCacheOverflowSizeGB_serverParameter.js
index f048f2cbf04..43eec0690f9 100644
--- a/jstests/noPassthrough/wiredTigerMaxCacheOverflowSizeGB_serverParameter.js
+++ b/jstests/noPassthrough/wiredTigerMaxCacheOverflowSizeGB_serverParameter.js
@@ -5,18 +5,18 @@
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/noPassthrough/libs/server_parameter_helpers.js");
+load("jstests/noPassthrough/libs/server_parameter_helpers.js");
- // Valid parameter values are in the range [0.1, infinity) or 0 (unbounded).
- testNumericServerParameter("wiredTigerMaxCacheOverflowSizeGB",
- false /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 0 /*defaultValue*/,
- 0.1 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- 0.09 /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+// Valid parameter values are in the range [0.1, infinity) or 0 (unbounded).
+testNumericServerParameter("wiredTigerMaxCacheOverflowSizeGB",
+ false /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 0 /*defaultValue*/,
+ 0.1 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ 0.09 /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
})();
diff --git a/jstests/noPassthrough/write_conflict_wildcard.js b/jstests/noPassthrough/write_conflict_wildcard.js
index f5662cdf119..ae2fcca5fc4 100644
--- a/jstests/noPassthrough/write_conflict_wildcard.js
+++ b/jstests/noPassthrough/write_conflict_wildcard.js
@@ -3,37 +3,35 @@
* interacting with the storage layer to retrieve multikey paths.
*/
(function() {
- "strict";
+"strict";
- const conn = MongoRunner.runMongod();
- const testDB = conn.getDB("test");
+const conn = MongoRunner.runMongod();
+const testDB = conn.getDB("test");
- const coll = testDB.write_conflict_wildcard;
- coll.drop();
+const coll = testDB.write_conflict_wildcard;
+coll.drop();
- assert.commandWorked(coll.createIndex({"$**": 1}));
+assert.commandWorked(coll.createIndex({"$**": 1}));
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: 'WTWriteConflictExceptionForReads',
- mode: {activationProbability: 0.01}
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'WTWriteConflictExceptionForReads', mode: {activationProbability: 0.01}}));
+for (let i = 0; i < 1000; ++i) {
+ // Insert documents with a couple different multikey paths to increase the number of records
+ // scanned during multikey path computation in the wildcard index.
+ assert.commandWorked(coll.insert({
+ _id: i,
+ i: i,
+ a: [{x: i - 1}, {x: i}, {x: i + 1}],
+ b: [],
+ longerName: [{nested: [1, 2]}, {nested: 4}]
}));
- for (let i = 0; i < 1000; ++i) {
- // Insert documents with a couple different multikey paths to increase the number of records
- // scanned during multikey path computation in the wildcard index.
- assert.commandWorked(coll.insert({
- _id: i,
- i: i,
- a: [{x: i - 1}, {x: i}, {x: i + 1}],
- b: [],
- longerName: [{nested: [1, 2]}, {nested: 4}]
- }));
- assert.eq(coll.find({i: i}).hint({"$**": 1}).itcount(), 1);
- if (i > 0) {
- assert.eq(coll.find({"a.x": i}).hint({"$**": 1}).itcount(), 2);
- }
+ assert.eq(coll.find({i: i}).hint({"$**": 1}).itcount(), 1);
+ if (i > 0) {
+ assert.eq(coll.find({"a.x": i}).hint({"$**": 1}).itcount(), 2);
}
+}
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'WTWriteConflictExceptionForReads', mode: "off"}));
- MongoRunner.stopMongod(conn);
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'WTWriteConflictExceptionForReads', mode: "off"}));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/write_local.js b/jstests/noPassthrough/write_local.js
index 6eff980d4a9..252baf3d871 100644
--- a/jstests/noPassthrough/write_local.js
+++ b/jstests/noPassthrough/write_local.js
@@ -1,46 +1,45 @@
// SERVER-22011: Deadlock in ticket distribution
// @tags: [requires_replication, requires_capped]
(function() {
- 'use strict';
+'use strict';
- // Limit concurrent WiredTiger transactions to maximize locking issues, harmless for other SEs.
- var options = {verbose: 1};
+// Limit concurrent WiredTiger transactions to maximize locking issues, harmless for other SEs.
+var options = {verbose: 1};
- // Create a new single node replicaSet
- var replTest =
- new ReplSetTest({name: "write_local", nodes: 1, oplogSize: 1, nodeOptions: options});
- replTest.startSet();
- replTest.initiate();
- var mongod = replTest.getPrimary();
- mongod.adminCommand({setParameter: 1, wiredTigerConcurrentWriteTransactions: 1});
+// Create a new single node replicaSet
+var replTest = new ReplSetTest({name: "write_local", nodes: 1, oplogSize: 1, nodeOptions: options});
+replTest.startSet();
+replTest.initiate();
+var mongod = replTest.getPrimary();
+mongod.adminCommand({setParameter: 1, wiredTigerConcurrentWriteTransactions: 1});
- var local = mongod.getDB('local');
+var local = mongod.getDB('local');
- // Start inserting documents in test.capped and local.capped capped collections.
- var shells = ['test', 'local'].map(function(dbname) {
- var mydb = local.getSiblingDB(dbname);
- mydb.capped.drop();
- mydb.createCollection('capped', {capped: true, size: 20 * 1000});
- return startParallelShell('var mydb=db.getSiblingDB("' + dbname + '"); ' +
- '(function() { ' +
- ' for(var i=0; i < 10*1000; i++) { ' +
- ' mydb.capped.insert({ x: i }); ' +
- ' } ' +
- '})();',
- mongod.port);
- });
+// Start inserting documents in test.capped and local.capped capped collections.
+var shells = ['test', 'local'].map(function(dbname) {
+ var mydb = local.getSiblingDB(dbname);
+ mydb.capped.drop();
+ mydb.createCollection('capped', {capped: true, size: 20 * 1000});
+ return startParallelShell('var mydb=db.getSiblingDB("' + dbname + '"); ' +
+ '(function() { ' +
+ ' for(var i=0; i < 10*1000; i++) { ' +
+ ' mydb.capped.insert({ x: i }); ' +
+ ' } ' +
+ '})();',
+ mongod.port);
+});
- // The following causes inconsistent locking order in the ticket system, depending on
- // timeouts to avoid deadlock.
- var oldObjects = 0;
- for (var i = 0; i < 1000; i++) {
- print(local.stats().objects);
- sleep(1);
- }
+// The following causes inconsistent locking order in the ticket system, depending on
+// timeouts to avoid deadlock.
+var oldObjects = 0;
+for (var i = 0; i < 1000; i++) {
+ print(local.stats().objects);
+ sleep(1);
+}
- // Wait for parallel shells to terminate and stop our replset.
- shells.forEach((function(f) {
- f();
- }));
- replTest.stopSet();
+// Wait for parallel shells to terminate and stop our replset.
+shells.forEach((function(f) {
+ f();
+}));
+replTest.stopSet();
}());
diff --git a/jstests/noPassthrough/wt_cache_full.js b/jstests/noPassthrough/wt_cache_full.js
index a5f08aa0815..29be77da891 100644
--- a/jstests/noPassthrough/wt_cache_full.js
+++ b/jstests/noPassthrough/wt_cache_full.js
@@ -3,63 +3,62 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- 'use strict';
+'use strict';
- const rst = new ReplSetTest({
- nodes: [
- {
- slowms: 30000, // Don't log slow operations on primary.
+const rst = new ReplSetTest({
+ nodes: [
+ {
+ slowms: 30000, // Don't log slow operations on primary.
+ },
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
- // Constrain the storage engine cache size to make it easier to fill it up with
- // unflushed modifications.
- wiredTigerCacheSizeGB: 1,
- },
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ // Constrain the storage engine cache size to make it easier to fill it up with
+ // unflushed modifications.
+ wiredTigerCacheSizeGB: 1,
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const mydb = primary.getDB('test');
- const coll = mydb.getCollection('t');
+const primary = rst.getPrimary();
+const mydb = primary.getDB('test');
+const coll = mydb.getCollection('t');
- const numDocs = 2;
- const minDocSizeMB = 10;
+const numDocs = 2;
+const minDocSizeMB = 10;
- for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
- coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
- {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- }
- assert.eq(numDocs, coll.find().itcount());
+for (let i = 0; i < numDocs; ++i) {
+ assert.writeOK(
+ coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
+ {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+}
+assert.eq(numDocs, coll.find().itcount());
- const numUpdates = 500;
- const secondary = rst.getSecondary();
- const batchOpsLimit =
- assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
- .replBatchLimitOperations;
- jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' +
- batchOpsLimit + ' operations per batch.');
+const numUpdates = 500;
+const secondary = rst.getSecondary();
+const batchOpsLimit =
+ assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
+ .replBatchLimitOperations;
+jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' + batchOpsLimit +
+ ' operations per batch.');
- jsTestLog('Buffering ' + numUpdates + ' updates to ' + numDocs + ' documents on secondary.');
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
- for (let i = 0; i < numDocs; ++i) {
- for (let j = 0; j < numUpdates; ++j) {
- assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
- }
+jsTestLog('Buffering ' + numUpdates + ' updates to ' + numDocs + ' documents on secondary.');
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+for (let i = 0; i < numDocs; ++i) {
+ for (let j = 0; j < numUpdates; ++j) {
+ assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
}
+}
- jsTestLog('Applying updates on secondary ' + secondary.host);
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
- rst.awaitReplication();
+jsTestLog('Applying updates on secondary ' + secondary.host);
+assert.commandWorked(secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+rst.awaitReplication();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js b/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js
index 6c438f7e79e..71383f91454 100644
--- a/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js
+++ b/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js
@@ -3,71 +3,71 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- 'use strict';
+'use strict';
- const rst = new ReplSetTest({
- nodes: [
- {
- slowms: 30000, // Don't log slow operations on primary.
- },
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+const rst = new ReplSetTest({
+ nodes: [
+ {
+ slowms: 30000, // Don't log slow operations on primary.
+ },
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ],
- nodeOptions: {
- // Constrain the storage engine cache size to make it easier to fill it up with
- // unflushed modifications.
- wiredTigerCacheSizeGB: 1,
},
- });
- const nodes = rst.startSet();
- rst.initiate();
+ ],
+ nodeOptions: {
+ // Constrain the storage engine cache size to make it easier to fill it up with
+ // unflushed modifications.
+ wiredTigerCacheSizeGB: 1,
+ },
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const mydb = primary.getDB('test');
- const coll = mydb.getCollection('t');
+const primary = rst.getPrimary();
+const mydb = primary.getDB('test');
+const coll = mydb.getCollection('t');
- const numDocs = 2;
- const minDocSizeMB = 10;
+const numDocs = 2;
+const minDocSizeMB = 10;
- for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
- coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
- {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- }
- assert.eq(numDocs, coll.find().itcount());
+for (let i = 0; i < numDocs; ++i) {
+ assert.writeOK(
+ coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
+ {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+}
+assert.eq(numDocs, coll.find().itcount());
- const numUpdates = 500;
- const secondary = rst.getSecondary();
- const batchOpsLimit =
- assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
- .replBatchLimitOperations;
- jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' +
- batchOpsLimit + ' operations per batch.');
+const numUpdates = 500;
+const secondary = rst.getSecondary();
+const batchOpsLimit =
+ assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
+ .replBatchLimitOperations;
+jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' + batchOpsLimit +
+ ' operations per batch.');
- jsTestLog('Buffering ' + numUpdates + ' updates to ' + numDocs + ' documents on secondary.');
- const session = primary.startSession();
- const sessionDB = session.getDatabase(mydb.getName());
- const sessionColl = sessionDB.getCollection(coll.getName());
- session.startTransaction();
- for (let i = 0; i < numDocs; ++i) {
- for (let j = 0; j < numUpdates; ++j) {
- assert.writeOK(sessionColl.update({_id: i}, {$inc: {i: 1}}));
- }
+jsTestLog('Buffering ' + numUpdates + ' updates to ' + numDocs + ' documents on secondary.');
+const session = primary.startSession();
+const sessionDB = session.getDatabase(mydb.getName());
+const sessionColl = sessionDB.getCollection(coll.getName());
+session.startTransaction();
+for (let i = 0; i < numDocs; ++i) {
+ for (let j = 0; j < numUpdates; ++j) {
+ assert.writeOK(sessionColl.update({_id: i}, {$inc: {i: 1}}));
}
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
+}
+assert.commandWorked(session.commitTransaction_forTesting());
+session.endSession();
- jsTestLog('Applying updates on secondary ' + secondary.host);
+jsTestLog('Applying updates on secondary ' + secondary.host);
- // If the secondary is unable to apply all the operations in the unprepared transaction within
- // a single batch with the constrained cache settings, the replica set will not reach a stable
- // state.
- rst.awaitReplication();
+// If the secondary is unable to apply all the operations in the unprepared transaction within
+// a single batch with the constrained cache settings, the replica set will not reach a stable
+// state.
+rst.awaitReplication();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/wt_cache_full_restart.js b/jstests/noPassthrough/wt_cache_full_restart.js
index 5ee7fa9c935..29aed83c67f 100644
--- a/jstests/noPassthrough/wt_cache_full_restart.js
+++ b/jstests/noPassthrough/wt_cache_full_restart.js
@@ -3,68 +3,68 @@
* @tags: [requires_replication, requires_persistence, requires_wiredtiger]
*/
(function() {
- 'use strict';
+'use strict';
- const rst = new ReplSetTest({
- nodes: [
- {
- slowms: 30000, // Don't log slow operations on primary.
+const rst = new ReplSetTest({
+ nodes: [
+ {
+ slowms: 30000, // Don't log slow operations on primary.
+ },
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
- // Constrain the storage engine cache size to make it easier to fill it up with
- // unflushed modifications.
- wiredTigerCacheSizeGB: 1,
- },
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ // Constrain the storage engine cache size to make it easier to fill it up with
+ // unflushed modifications.
+ wiredTigerCacheSizeGB: 1,
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const mydb = primary.getDB('test');
- const coll = mydb.getCollection('t');
+const primary = rst.getPrimary();
+const mydb = primary.getDB('test');
+const coll = mydb.getCollection('t');
- const numDocs = 2;
- const minDocSizeMB = 10;
+const numDocs = 2;
+const minDocSizeMB = 10;
- for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
- coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
- {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- }
- assert.eq(numDocs, coll.find().itcount());
+for (let i = 0; i < numDocs; ++i) {
+ assert.writeOK(
+ coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
+ {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+}
+assert.eq(numDocs, coll.find().itcount());
- const numUpdates = 500;
- let secondary = rst.getSecondary();
- const batchOpsLimit =
- assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
- .replBatchLimitOperations;
- jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' +
- batchOpsLimit + ' operations per batch.');
+const numUpdates = 500;
+let secondary = rst.getSecondary();
+const batchOpsLimit =
+ assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
+ .replBatchLimitOperations;
+jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' + batchOpsLimit +
+ ' operations per batch.');
- jsTestLog('Stopping secondary ' + secondary.host + '.');
- rst.stop(1);
- jsTestLog('Stopped secondary. Writing ' + numUpdates + ' updates to ' + numDocs +
- ' documents on primary ' + primary.host + '.');
- const startTime = Date.now();
- for (let i = 0; i < numDocs; ++i) {
- for (let j = 0; j < numUpdates; ++j) {
- assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
- }
+jsTestLog('Stopping secondary ' + secondary.host + '.');
+rst.stop(1);
+jsTestLog('Stopped secondary. Writing ' + numUpdates + ' updates to ' + numDocs +
+ ' documents on primary ' + primary.host + '.');
+const startTime = Date.now();
+for (let i = 0; i < numDocs; ++i) {
+ for (let j = 0; j < numUpdates; ++j) {
+ assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
}
- const totalTime = Date.now() - startTime;
- jsTestLog('Wrote ' + numUpdates + ' updates to ' + numDocs + ' documents on primary ' +
- primary.host + '. Elapsed: ' + totalTime + ' ms.');
+}
+const totalTime = Date.now() - startTime;
+jsTestLog('Wrote ' + numUpdates + ' updates to ' + numDocs + ' documents on primary ' +
+ primary.host + '. Elapsed: ' + totalTime + ' ms.');
- secondary = rst.restart(1);
- jsTestLog('Restarted secondary ' + secondary.host +
- '. Waiting for secondary to apply updates from primary.');
- rst.awaitReplication();
+secondary = rst.restart(1);
+jsTestLog('Restarted secondary ' + secondary.host +
+ '. Waiting for secondary to apply updates from primary.');
+rst.awaitReplication();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
index ae7f6eac4af..3470a04e24b 100644
--- a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
+++ b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
@@ -8,68 +8,68 @@
* @tags: [resource_intensive]
*/
(function() {
- "use strict";
+"use strict";
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
- // Skip db hash check because delayed secondary will not catch up to primary.
- TestData.skipCheckDBHashes = true;
+// Skip db hash check because delayed secondary will not catch up to primary.
+TestData.skipCheckDBHashes = true;
- // Skip this test if not running with the "wiredTiger" storage engine.
- var storageEngine = jsTest.options().storageEngine || "wiredTiger";
- if (storageEngine !== "wiredTiger") {
- print('Skipping test because storageEngine is not "wiredTiger"');
- return;
- } else if (jsTest.options().wiredTigerCollectionConfigString === "type=lsm") {
- // Readers of old data, such as a lagged secondary, can lead to stalls when using
- // WiredTiger's LSM tree.
- print("WT-3742: Skipping test because we're running with WiredTiger's LSM tree");
- return;
- } else {
- var rst = new ReplSetTest({
- nodes: 2,
- // We are going to insert at least 100 MB of data with a long slave
- // delay. Configure an appropriately large oplog size.
- oplogSize: 200,
- });
+// Skip this test if not running with the "wiredTiger" storage engine.
+var storageEngine = jsTest.options().storageEngine || "wiredTiger";
+if (storageEngine !== "wiredTiger") {
+ print('Skipping test because storageEngine is not "wiredTiger"');
+ return;
+} else if (jsTest.options().wiredTigerCollectionConfigString === "type=lsm") {
+ // Readers of old data, such as a lagged secondary, can lead to stalls when using
+ // WiredTiger's LSM tree.
+ print("WT-3742: Skipping test because we're running with WiredTiger's LSM tree");
+ return;
+} else {
+ var rst = new ReplSetTest({
+ nodes: 2,
+ // We are going to insert at least 100 MB of data with a long slave
+ // delay. Configure an appropriately large oplog size.
+ oplogSize: 200,
+ });
- var conf = rst.getReplSetConfig();
- conf.members[1].votes = 1;
- conf.members[1].priority = 0;
- conf.members[1].slaveDelay = 24 * 60 * 60;
+ var conf = rst.getReplSetConfig();
+ conf.members[1].votes = 1;
+ conf.members[1].priority = 0;
+ conf.members[1].slaveDelay = 24 * 60 * 60;
- rst.startSet();
- // We cannot wait for a stable recovery timestamp due to the slaveDelay.
- rst.initiateWithAnyNodeAsPrimary(
- conf, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
- var master = rst.getPrimary(); // Waits for PRIMARY state.
+ rst.startSet();
+ // We cannot wait for a stable recovery timestamp due to the slaveDelay.
+ rst.initiateWithAnyNodeAsPrimary(
+ conf, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+ var master = rst.getPrimary(); // Waits for PRIMARY state.
- // Reconfigure primary with a small cache size so less data needs to be
- // inserted to make the cache full while trying to trigger a stall.
- assert.commandWorked(master.adminCommand(
- {setParameter: 1, "wiredTigerEngineRuntimeConfig": "cache_size=100MB"}));
+ // Reconfigure primary with a small cache size so less data needs to be
+ // inserted to make the cache full while trying to trigger a stall.
+ assert.commandWorked(master.adminCommand(
+ {setParameter: 1, "wiredTigerEngineRuntimeConfig": "cache_size=100MB"}));
- var coll = master.getCollection("test.coll");
- var bigstr = "a".repeat(4000);
+ var coll = master.getCollection("test.coll");
+ var bigstr = "a".repeat(4000);
- // Do not insert with a writeConcern because we want the delayed slave
- // to fall behind in replication. This is crucial apart from having a
- // readConcern to pin updates in memory on the primary. To prevent the
- // slave from falling off the oplog, we configure the oplog large enough
- // to accomodate all the inserts.
- for (var i = 0; i < 250; i++) {
- let batch = coll.initializeUnorderedBulkOp();
- for (var j = 0; j < 100; j++) {
- batch.insert({a: bigstr});
- }
- assert.writeOK(batch.execute());
+ // Do not insert with a writeConcern because we want the delayed slave
+ // to fall behind in replication. This is crucial apart from having a
+ // readConcern to pin updates in memory on the primary. To prevent the
+ // slave from falling off the oplog, we configure the oplog large enough
+ // to accomodate all the inserts.
+ for (var i = 0; i < 250; i++) {
+ let batch = coll.initializeUnorderedBulkOp();
+ for (var j = 0; j < 100; j++) {
+ batch.insert({a: bigstr});
}
- rst.stopSet();
+ assert.writeOK(batch.execute());
}
+ rst.stopSet();
+}
})();
diff --git a/jstests/noPassthrough/wt_disable_majority_reads.js b/jstests/noPassthrough/wt_disable_majority_reads.js
index 57249723d2c..65cba8a8588 100644
--- a/jstests/noPassthrough/wt_disable_majority_reads.js
+++ b/jstests/noPassthrough/wt_disable_majority_reads.js
@@ -1,32 +1,32 @@
// @tags: [requires_wiredtiger, requires_replication]
(function() {
- "use strict";
+"use strict";
- var rst = new ReplSetTest({
- nodes: [
- {"enableMajorityReadConcern": ""},
- {"enableMajorityReadConcern": "false"},
- {"enableMajorityReadConcern": "true"}
- ]
- });
- rst.startSet();
- rst.initiate();
- rst.awaitSecondaryNodes();
+var rst = new ReplSetTest({
+ nodes: [
+ {"enableMajorityReadConcern": ""},
+ {"enableMajorityReadConcern": "false"},
+ {"enableMajorityReadConcern": "true"}
+ ]
+});
+rst.startSet();
+rst.initiate();
+rst.awaitSecondaryNodes();
- rst.getPrimary().getDB("test").getCollection("test").insert({});
- rst.awaitReplication();
+rst.getPrimary().getDB("test").getCollection("test").insert({});
+rst.awaitReplication();
- // Node 0 is using the default, which is `enableMajorityReadConcern: true`. Thus a majority
- // read should succeed.
- assert.commandWorked(rst.nodes[0].getDB("test").runCommand(
- {"find": "test", "readConcern": {"level": "majority"}}));
- // Node 1 disables majority reads. Check for the appropriate error code.
- assert.commandFailedWithCode(rst.nodes[1].getDB("test").runCommand(
- {"find": "test", "readConcern": {"level": "majority"}}),
- ErrorCodes.ReadConcernMajorityNotEnabled);
- // Same as Node 0.
- assert.commandWorked(rst.nodes[2].getDB("test").runCommand(
- {"find": "test", "readConcern": {"level": "majority"}}));
+// Node 0 is using the default, which is `enableMajorityReadConcern: true`. Thus a majority
+// read should succeed.
+assert.commandWorked(
+ rst.nodes[0].getDB("test").runCommand({"find": "test", "readConcern": {"level": "majority"}}));
+// Node 1 disables majority reads. Check for the appropriate error code.
+assert.commandFailedWithCode(
+ rst.nodes[1].getDB("test").runCommand({"find": "test", "readConcern": {"level": "majority"}}),
+ ErrorCodes.ReadConcernMajorityNotEnabled);
+// Same as Node 0.
+assert.commandWorked(
+ rst.nodes[2].getDB("test").runCommand({"find": "test", "readConcern": {"level": "majority"}}));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/wt_index_option_defaults.js b/jstests/noPassthrough/wt_index_option_defaults.js
index 46e91a174e6..ce4f1a1c78d 100644
--- a/jstests/noPassthrough/wt_index_option_defaults.js
+++ b/jstests/noPassthrough/wt_index_option_defaults.js
@@ -7,152 +7,151 @@
* inMemoryIndexConfigString.
*/
(function() {
- 'use strict';
-
- var engine = 'wiredTiger';
- if (jsTest.options().storageEngine) {
- engine = jsTest.options().storageEngine;
+'use strict';
+
+var engine = 'wiredTiger';
+if (jsTest.options().storageEngine) {
+ engine = jsTest.options().storageEngine;
+}
+
+// Skip this test if not running with the right storage engine.
+if (engine !== 'wiredTiger' && engine !== 'inMemory') {
+ jsTest.log('Skipping test because storageEngine is not "wiredTiger" or "inMemory"');
+ return;
+}
+
+// Skip this test when 'xxxIndexConfigString' is already set in TestData.
+// TODO: This test can be enabled when MongoRunner supports combining WT config strings with
+// commas.
+if (jsTest.options()[engine + 'IndexConfigString']) {
+ jsTest.log('Skipping test because system-wide defaults for index options are already set');
+ return;
+}
+
+// Use different values for the same configuration string key to test that index-specific
+// options override collection-wide options, and that collection-wide options override
+// system-wide options.
+var systemWideConfigString = 'split_pct=70,';
+var collectionWideConfigString = 'split_pct=75,';
+var indexSpecificConfigString = 'split_pct=80,';
+
+// Start up a mongod with system-wide defaults for index options and create a collection without
+// any additional options. Tests than an index without any additional options should take on the
+// system-wide defaults, whereas an index with additional options should override the
+// system-wide defaults.
+runTest({});
+
+// Start up a mongod with system-wide defaults for index options and create a collection with
+// additional options. Tests than an index without any additional options should take on the
+// collection-wide defaults, whereas an index with additional options should override the
+// collection-wide defaults.
+runTest({indexOptionDefaults: collectionWideConfigString});
+
+function runTest(collOptions) {
+ var hasIndexOptionDefaults = collOptions.hasOwnProperty('indexOptionDefaults');
+
+ var dbpath = MongoRunner.dataPath + 'wt_index_option_defaults';
+ resetDbpath(dbpath);
+
+ // Start a mongod with system-wide defaults for engine-specific index options.
+ var conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ [engine + 'IndexConfigString']: systemWideConfigString,
+ });
+ assert.neq(null, conn, 'mongod was unable to start up');
+
+ var testDB = conn.getDB('test');
+ var cmdObj = {create: 'coll'};
+
+ // Apply collection-wide defaults for engine-specific index options if any were
+ // specified.
+ if (hasIndexOptionDefaults) {
+ cmdObj.indexOptionDefaults = {
+ storageEngine: {[engine]: {configString: collOptions.indexOptionDefaults}}
+ };
}
+ assert.commandWorked(testDB.runCommand(cmdObj));
- // Skip this test if not running with the right storage engine.
- if (engine !== 'wiredTiger' && engine !== 'inMemory') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger" or "inMemory"');
- return;
- }
+ // Create an index that does not specify any engine-specific options.
+ assert.commandWorked(testDB.coll.createIndex({a: 1}, {name: 'without_options'}));
- // Skip this test when 'xxxIndexConfigString' is already set in TestData.
- // TODO: This test can be enabled when MongoRunner supports combining WT config strings with
- // commas.
- if (jsTest.options()[engine + 'IndexConfigString']) {
- jsTest.log('Skipping test because system-wide defaults for index options are already set');
- return;
- }
+ // Create an index that specifies engine-specific index options.
+ assert.commandWorked(testDB.coll.createIndex({b: 1}, {
+ name: 'with_options',
+ storageEngine: {[engine]: {configString: indexSpecificConfigString}}
+ }));
- // Use different values for the same configuration string key to test that index-specific
- // options override collection-wide options, and that collection-wide options override
- // system-wide options.
- var systemWideConfigString = 'split_pct=70,';
- var collectionWideConfigString = 'split_pct=75,';
- var indexSpecificConfigString = 'split_pct=80,';
-
- // Start up a mongod with system-wide defaults for index options and create a collection without
- // any additional options. Tests than an index without any additional options should take on the
- // system-wide defaults, whereas an index with additional options should override the
- // system-wide defaults.
- runTest({});
-
- // Start up a mongod with system-wide defaults for index options and create a collection with
- // additional options. Tests than an index without any additional options should take on the
- // collection-wide defaults, whereas an index with additional options should override the
- // collection-wide defaults.
- runTest({indexOptionDefaults: collectionWideConfigString});
-
- function runTest(collOptions) {
- var hasIndexOptionDefaults = collOptions.hasOwnProperty('indexOptionDefaults');
-
- var dbpath = MongoRunner.dataPath + 'wt_index_option_defaults';
- resetDbpath(dbpath);
-
- // Start a mongod with system-wide defaults for engine-specific index options.
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- [engine + 'IndexConfigString']: systemWideConfigString,
- });
- assert.neq(null, conn, 'mongod was unable to start up');
-
- var testDB = conn.getDB('test');
- var cmdObj = {create: 'coll'};
-
- // Apply collection-wide defaults for engine-specific index options if any were
- // specified.
- if (hasIndexOptionDefaults) {
- cmdObj.indexOptionDefaults = {
- storageEngine: {[engine]: {configString: collOptions.indexOptionDefaults}}
- };
- }
- assert.commandWorked(testDB.runCommand(cmdObj));
-
- // Create an index that does not specify any engine-specific options.
- assert.commandWorked(testDB.coll.createIndex({a: 1}, {name: 'without_options'}));
-
- // Create an index that specifies engine-specific index options.
- assert.commandWorked(testDB.coll.createIndex({b: 1}, {
- name: 'with_options',
- storageEngine: {[engine]: {configString: indexSpecificConfigString}}
- }));
-
- var collStats = testDB.runCommand({collStats: 'coll'});
- assert.commandWorked(collStats);
-
- checkIndexWithoutOptions(collStats.indexDetails);
- checkIndexWithOptions(collStats.indexDetails);
-
- MongoRunner.stopMongod(conn);
-
- function checkIndexWithoutOptions(indexDetails) {
- var indexSpec = getIndexSpecByName(testDB.coll, 'without_options');
- assert(!indexSpec.hasOwnProperty('storageEngine'),
- 'no storage engine options should have been set in the index spec: ' +
- tojson(indexSpec));
-
- var creationString = indexDetails.without_options.creationString;
- if (hasIndexOptionDefaults) {
- assert.eq(-1,
- creationString.indexOf(systemWideConfigString),
- 'system-wide index option present in the creation string even though a ' +
- 'collection-wide option was specified: ' + creationString);
- assert.lte(0,
- creationString.indexOf(collectionWideConfigString),
- 'collection-wide index option not present in the creation string: ' +
- creationString);
- } else {
- assert.lte(0,
- creationString.indexOf(systemWideConfigString),
- 'system-wide index option not present in the creation string: ' +
- creationString);
- assert.eq(-1,
- creationString.indexOf(collectionWideConfigString),
- 'collection-wide index option present in creation string even though ' +
- 'it was not specified: ' + creationString);
- }
+ var collStats = testDB.runCommand({collStats: 'coll'});
+ assert.commandWorked(collStats);
- assert.eq(-1,
- creationString.indexOf(indexSpecificConfigString),
- 'index-specific option present in creation string even though it was not' +
- ' specified: ' + creationString);
- }
+ checkIndexWithoutOptions(collStats.indexDetails);
+ checkIndexWithOptions(collStats.indexDetails);
- function checkIndexWithOptions(indexDetails) {
- var indexSpec = getIndexSpecByName(testDB.coll, 'with_options');
- assert(indexSpec.hasOwnProperty('storageEngine'),
- 'storage engine options should have been set in the index spec: ' +
- tojson(indexSpec));
- assert.docEq({[engine]: {configString: indexSpecificConfigString}},
- indexSpec.storageEngine,
- engine + ' index options not present in the index spec');
+ MongoRunner.stopMongod(conn);
- var creationString = indexDetails.with_options.creationString;
+ function checkIndexWithoutOptions(indexDetails) {
+ var indexSpec = getIndexSpecByName(testDB.coll, 'without_options');
+ assert(!indexSpec.hasOwnProperty('storageEngine'),
+ 'no storage engine options should have been set in the index spec: ' +
+ tojson(indexSpec));
+
+ var creationString = indexDetails.without_options.creationString;
+ if (hasIndexOptionDefaults) {
assert.eq(-1,
creationString.indexOf(systemWideConfigString),
- 'system-wide index option present in the creation string even though an ' +
- 'index-specific option was specified: ' + creationString);
- assert.eq(-1,
- creationString.indexOf(collectionWideConfigString),
- 'system-wide index option present in the creation string even though an ' +
- 'index-specific option was specified: ' + creationString);
+ 'system-wide index option present in the creation string even though a ' +
+ 'collection-wide option was specified: ' + creationString);
+ assert.lte(0,
+ creationString.indexOf(collectionWideConfigString),
+ 'collection-wide index option not present in the creation string: ' +
+ creationString);
+ } else {
assert.lte(
0,
- creationString.indexOf(indexSpecificConfigString),
- 'index-specific option not present in the creation string: ' + creationString);
+ creationString.indexOf(systemWideConfigString),
+ 'system-wide index option not present in the creation string: ' + creationString);
+ assert.eq(-1,
+ creationString.indexOf(collectionWideConfigString),
+ 'collection-wide index option present in creation string even though ' +
+ 'it was not specified: ' + creationString);
}
+
+ assert.eq(-1,
+ creationString.indexOf(indexSpecificConfigString),
+ 'index-specific option present in creation string even though it was not' +
+ ' specified: ' + creationString);
}
- function getIndexSpecByName(coll, indexName) {
- var indexes = coll.getIndexes().filter(function(spec) {
- return spec.name === indexName;
- });
- assert.eq(1, indexes.length, 'index "' + indexName + '" not found');
- return indexes[0];
+ function checkIndexWithOptions(indexDetails) {
+ var indexSpec = getIndexSpecByName(testDB.coll, 'with_options');
+ assert(
+ indexSpec.hasOwnProperty('storageEngine'),
+ 'storage engine options should have been set in the index spec: ' + tojson(indexSpec));
+ assert.docEq({[engine]: {configString: indexSpecificConfigString}},
+ indexSpec.storageEngine,
+ engine + ' index options not present in the index spec');
+
+ var creationString = indexDetails.with_options.creationString;
+ assert.eq(-1,
+ creationString.indexOf(systemWideConfigString),
+ 'system-wide index option present in the creation string even though an ' +
+ 'index-specific option was specified: ' + creationString);
+ assert.eq(-1,
+ creationString.indexOf(collectionWideConfigString),
+ 'system-wide index option present in the creation string even though an ' +
+ 'index-specific option was specified: ' + creationString);
+ assert.lte(0,
+ creationString.indexOf(indexSpecificConfigString),
+ 'index-specific option not present in the creation string: ' + creationString);
}
+}
+
+function getIndexSpecByName(coll, indexName) {
+ var indexes = coll.getIndexes().filter(function(spec) {
+ return spec.name === indexName;
+ });
+ assert.eq(1, indexes.length, 'index "' + indexName + '" not found');
+ return indexes[0];
+}
})();
diff --git a/jstests/noPassthrough/wt_malformed_creation_string.js b/jstests/noPassthrough/wt_malformed_creation_string.js
index 4067cca329f..e6ba7d08e31 100644
--- a/jstests/noPassthrough/wt_malformed_creation_string.js
+++ b/jstests/noPassthrough/wt_malformed_creation_string.js
@@ -2,59 +2,59 @@
* Tests that a null embedded malformed string is rejected gracefully.
*/
(function() {
- 'use strict';
-
- var engine = 'wiredTiger';
- if (jsTest.options().storageEngine) {
- engine = jsTest.options().storageEngine;
+'use strict';
+
+var engine = 'wiredTiger';
+if (jsTest.options().storageEngine) {
+ engine = jsTest.options().storageEngine;
+}
+
+// Skip this test if not running with the right storage engine.
+if (engine !== 'wiredTiger' && engine !== 'inMemory') {
+ jsTest.log('Skipping test because storageEngine is not "wiredTiger" or "inMemory"');
+ return;
+}
+
+// Build an array of malformed strings to test
+var malformedStrings = ["\u0000000", "\0,", "bl\0ah", "split_pct=30,\0split_pct=35,"];
+
+// Start up a mongod.
+// Test that collection and index creation with malformed creation strings fail gracefully.
+runTest();
+
+function runTest() {
+ var dbpath = MongoRunner.dataPath + 'wt_malformed_creation_string';
+ resetDbpath(dbpath);
+
+ // Start a mongod
+ var conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ });
+ assert.neq(null, conn, 'mongod was unable to start up');
+
+ var testDB = conn.getDB('test');
+
+ // Collection creation with malformed string should fail
+ for (var i = 0; i < malformedStrings.length; i++) {
+ assert.commandFailedWithCode(
+ testDB.createCollection(
+ 'coll', {storageEngine: {[engine]: {configString: malformedStrings[i]}}}),
+ ErrorCodes.FailedToParse);
}
- // Skip this test if not running with the right storage engine.
- if (engine !== 'wiredTiger' && engine !== 'inMemory') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger" or "inMemory"');
- return;
- }
+ // Create collection to test index creation on
+ assert.commandWorked(testDB.createCollection('coll'));
- // Build an array of malformed strings to test
- var malformedStrings = ["\u0000000", "\0,", "bl\0ah", "split_pct=30,\0split_pct=35,"];
-
- // Start up a mongod.
- // Test that collection and index creation with malformed creation strings fail gracefully.
- runTest();
-
- function runTest() {
- var dbpath = MongoRunner.dataPath + 'wt_malformed_creation_string';
- resetDbpath(dbpath);
-
- // Start a mongod
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- });
- assert.neq(null, conn, 'mongod was unable to start up');
-
- var testDB = conn.getDB('test');
-
- // Collection creation with malformed string should fail
- for (var i = 0; i < malformedStrings.length; i++) {
- assert.commandFailedWithCode(
- testDB.createCollection(
- 'coll', {storageEngine: {[engine]: {configString: malformedStrings[i]}}}),
- ErrorCodes.FailedToParse);
- }
-
- // Create collection to test index creation on
- assert.commandWorked(testDB.createCollection('coll'));
-
- // Index creation with malformed string should fail
- for (var i = 0; i < malformedStrings.length; i++) {
- assert.commandFailedWithCode(testDB.coll.createIndex({a: 1}, {
- name: 'with_malformed_str',
- storageEngine: {[engine]: {configString: malformedStrings[i]}}
- }),
- ErrorCodes.FailedToParse);
- }
-
- MongoRunner.stopMongod(conn);
+ // Index creation with malformed string should fail
+ for (var i = 0; i < malformedStrings.length; i++) {
+ assert.commandFailedWithCode(testDB.coll.createIndex({a: 1}, {
+ name: 'with_malformed_str',
+ storageEngine: {[engine]: {configString: malformedStrings[i]}}
+ }),
+ ErrorCodes.FailedToParse);
}
+
+ MongoRunner.stopMongod(conn);
+}
})();
diff --git a/jstests/noPassthrough/wt_nojournal_skip_recovery.js b/jstests/noPassthrough/wt_nojournal_skip_recovery.js
index 97988f84d83..df813e080d3 100644
--- a/jstests/noPassthrough/wt_nojournal_skip_recovery.js
+++ b/jstests/noPassthrough/wt_nojournal_skip_recovery.js
@@ -4,95 +4,95 @@
* Also verifies that deleting the journal/ directory allows those operations to safely be ignored.
*/
(function() {
- 'use strict';
+'use strict';
- // Skip this test if not running with the "wiredTiger" storage engine.
- if (jsTest.options().storageEngine && jsTest.options().storageEngine !== 'wiredTiger') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
- return;
- }
+// Skip this test if not running with the "wiredTiger" storage engine.
+if (jsTest.options().storageEngine && jsTest.options().storageEngine !== 'wiredTiger') {
+ jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
+ return;
+}
- // Skip this test until we figure out why journaled writes are replayed after last checkpoint.
- TestData.skipCollectionAndIndexValidation = true;
+// Skip this test until we figure out why journaled writes are replayed after last checkpoint.
+TestData.skipCollectionAndIndexValidation = true;
- var dbpath = MongoRunner.dataPath + 'wt_nojournal_skip_recovery';
- resetDbpath(dbpath);
+var dbpath = MongoRunner.dataPath + 'wt_nojournal_skip_recovery';
+resetDbpath(dbpath);
- // Start a mongod with journaling enabled.
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- journal: '',
- // Wait an hour between checkpoints to ensure one isn't created after the fsync command is
- // executed and before the mongod is terminated. This is necessary to ensure that exactly 90
- // documents with the 'journaled' field exist in the collection.
- wiredTigerEngineConfigString: 'checkpoint=(wait=3600)'
- });
- assert.neq(null, conn, 'mongod was unable to start up');
+// Start a mongod with journaling enabled.
+var conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ journal: '',
+ // Wait an hour between checkpoints to ensure one isn't created after the fsync command is
+ // executed and before the mongod is terminated. This is necessary to ensure that exactly 90
+ // documents with the 'journaled' field exist in the collection.
+ wiredTigerEngineConfigString: 'checkpoint=(wait=3600)'
+});
+assert.neq(null, conn, 'mongod was unable to start up');
- // Execute unjournaled inserts, but periodically do a journaled insert. Triggers a checkpoint
- // prior to the mongod being terminated.
- var awaitShell = startParallelShell(function() {
- for (let loopNum = 1; true; ++loopNum) {
- var bulk = db.nojournal.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; ++i) {
- bulk.insert({unjournaled: i});
- }
- assert.writeOK(bulk.execute({j: false}));
- assert.writeOK(db.nojournal.insert({journaled: loopNum}, {writeConcern: {j: true}}));
+// Execute unjournaled inserts, but periodically do a journaled insert. Triggers a checkpoint
+// prior to the mongod being terminated.
+var awaitShell = startParallelShell(function() {
+ for (let loopNum = 1; true; ++loopNum) {
+ var bulk = db.nojournal.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; ++i) {
+ bulk.insert({unjournaled: i});
+ }
+ assert.writeOK(bulk.execute({j: false}));
+ assert.writeOK(db.nojournal.insert({journaled: loopNum}, {writeConcern: {j: true}}));
- // Create a checkpoint slightly before the mongod is terminated.
- if (loopNum === 90) {
- assert.commandWorked(db.adminCommand({fsync: 1}));
- }
+ // Create a checkpoint slightly before the mongod is terminated.
+ if (loopNum === 90) {
+ assert.commandWorked(db.adminCommand({fsync: 1}));
}
- }, conn.port);
+ }
+}, conn.port);
- // After some journaled write operations have been performed against the mongod, send a SIGKILL
- // to the process to trigger an unclean shutdown.
- assert.soon(
- function() {
- var count = conn.getDB('test').nojournal.count({journaled: {$exists: true}});
- if (count >= 100) {
- MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- return true;
- }
- return false;
- },
- 'the parallel shell did not perform at least 100 journaled inserts',
- 5 * 60 * 1000 /*timeout ms*/);
+// After some journaled write operations have been performed against the mongod, send a SIGKILL
+// to the process to trigger an unclean shutdown.
+assert.soon(
+ function() {
+ var count = conn.getDB('test').nojournal.count({journaled: {$exists: true}});
+ if (count >= 100) {
+ MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+ return true;
+ }
+ return false;
+ },
+ 'the parallel shell did not perform at least 100 journaled inserts',
+ 5 * 60 * 1000 /*timeout ms*/);
- var exitCode = awaitShell({checkExitSuccess: false});
- assert.neq(0, exitCode, 'expected shell to exit abnormally due to mongod being terminated');
+var exitCode = awaitShell({checkExitSuccess: false});
+assert.neq(0, exitCode, 'expected shell to exit abnormally due to mongod being terminated');
- // Restart the mongod with journaling disabled, but configure it to error if the database needs
- // recovery.
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- nojournal: '',
- wiredTigerEngineConfigString: 'log=(recover=error)',
- });
- assert.eq(null, conn, 'mongod should not have started up because it requires recovery');
+// Restart the mongod with journaling disabled, but configure it to error if the database needs
+// recovery.
+conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ nojournal: '',
+ wiredTigerEngineConfigString: 'log=(recover=error)',
+});
+assert.eq(null, conn, 'mongod should not have started up because it requires recovery');
- // Remove the journal files.
- assert(removeFile(dbpath + '/journal'), 'failed to remove the journal directory');
+// Remove the journal files.
+assert(removeFile(dbpath + '/journal'), 'failed to remove the journal directory');
- // Restart the mongod with journaling disabled again.
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- nojournal: '',
- wiredTigerEngineConfigString: 'log=(recover=error)',
- });
- assert.neq(null, conn, 'mongod was unable to start up after removing the journal directory');
+// Restart the mongod with journaling disabled again.
+conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ nojournal: '',
+ wiredTigerEngineConfigString: 'log=(recover=error)',
+});
+assert.neq(null, conn, 'mongod was unable to start up after removing the journal directory');
- var count = conn.getDB('test').nojournal.count({journaled: {$exists: true}});
- assert.lte(90, count, 'missing documents that were present in the last checkpoint');
- assert.gte(90,
- count,
- 'journaled write operations since the last checkpoint should not have been' +
- ' replayed');
+var count = conn.getDB('test').nojournal.count({journaled: {$exists: true}});
+assert.lte(90, count, 'missing documents that were present in the last checkpoint');
+assert.gte(90,
+ count,
+ 'journaled write operations since the last checkpoint should not have been' +
+ ' replayed');
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/wt_nojournal_toggle.js b/jstests/noPassthrough/wt_nojournal_toggle.js
index f09f92f6a95..50d5483aa26 100644
--- a/jstests/noPassthrough/wt_nojournal_toggle.js
+++ b/jstests/noPassthrough/wt_nojournal_toggle.js
@@ -3,121 +3,121 @@
* when the mongod is killed and restarted with --nojournal.
*/
(function() {
- 'use strict';
-
- // Skip this test if not running with the "wiredTiger" storage engine.
- if (jsTest.options().storageEngine && jsTest.options().storageEngine !== 'wiredTiger') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
- return;
- }
-
- // Returns a function that primarily executes unjournaled inserts, but periodically does a
- // journaled insert. If 'checkpoint' is true, then the fsync command is run to create a
- // checkpoint prior to the mongod being terminated.
- function insertFunctionFactory(checkpoint) {
- var insertFunction = function() {
- for (var iter = 0; iter < 1000; ++iter) {
- var bulk = db.nojournal.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; ++i) {
- bulk.insert({unjournaled: i});
- }
- assert.writeOK(bulk.execute({j: false}));
- assert.writeOK(db.nojournal.insert({journaled: iter}, {writeConcern: {j: true}}));
- if (__checkpoint_template_placeholder__ && iter === 50) {
- assert.commandWorked(db.adminCommand({fsync: 1}));
- }
+'use strict';
+
+// Skip this test if not running with the "wiredTiger" storage engine.
+if (jsTest.options().storageEngine && jsTest.options().storageEngine !== 'wiredTiger') {
+ jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
+ return;
+}
+
+// Returns a function that primarily executes unjournaled inserts, but periodically does a
+// journaled insert. If 'checkpoint' is true, then the fsync command is run to create a
+// checkpoint prior to the mongod being terminated.
+function insertFunctionFactory(checkpoint) {
+ var insertFunction = function() {
+ for (var iter = 0; iter < 1000; ++iter) {
+ var bulk = db.nojournal.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; ++i) {
+ bulk.insert({unjournaled: i});
}
- };
-
- return '(' +
- insertFunction.toString().replace('__checkpoint_template_placeholder__',
- checkpoint.toString()) +
- ')();';
- }
-
- function runTest(options) {
- var dbpath = MongoRunner.dataPath + 'wt_nojournal_toggle';
- resetDbpath(dbpath);
-
- // Start a mongod with journaling enabled.
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- journal: '',
- });
- assert.neq(null, conn, 'mongod was unable to start up');
-
- // Run a mixture of journaled and unjournaled write operations against the mongod.
- var awaitShell = startParallelShell(insertFunctionFactory(options.checkpoint), conn.port);
-
- // After some journaled write operations have been performed against the mongod, send a
- // SIGKILL to the process to trigger an unclean shutdown.
- assert.soon(function() {
- var testDB = conn.getDB('test');
- var count = testDB.nojournal.count({journaled: {$exists: true}});
- if (count >= 100) {
- // We saw 100 journaled inserts, but visibility does not guarantee durability, so
- // do an extra journaled write to make all visible commits durable, before killing
- // the mongod.
- assert.writeOK(testDB.nojournal.insert({final: true}, {writeConcern: {j: true}}));
- MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- return true;
+ assert.writeOK(bulk.execute({j: false}));
+ assert.writeOK(db.nojournal.insert({journaled: iter}, {writeConcern: {j: true}}));
+ if (__checkpoint_template_placeholder__ && iter === 50) {
+ assert.commandWorked(db.adminCommand({fsync: 1}));
}
- return false;
- }, 'the parallel shell did not perform at least 100 journaled inserts');
-
- var exitCode = awaitShell({checkExitSuccess: false});
- assert.neq(0, exitCode, 'expected shell to exit abnormally due to mongod being terminated');
-
- // Restart the mongod with journaling disabled.
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- nojournal: '',
- });
- assert.neq(null, conn, 'mongod was unable to restart after receiving a SIGKILL');
-
+ }
+ };
+
+ return '(' +
+ insertFunction.toString().replace('__checkpoint_template_placeholder__',
+ checkpoint.toString()) +
+ ')();';
+}
+
+function runTest(options) {
+ var dbpath = MongoRunner.dataPath + 'wt_nojournal_toggle';
+ resetDbpath(dbpath);
+
+ // Start a mongod with journaling enabled.
+ var conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ journal: '',
+ });
+ assert.neq(null, conn, 'mongod was unable to start up');
+
+ // Run a mixture of journaled and unjournaled write operations against the mongod.
+ var awaitShell = startParallelShell(insertFunctionFactory(options.checkpoint), conn.port);
+
+ // After some journaled write operations have been performed against the mongod, send a
+ // SIGKILL to the process to trigger an unclean shutdown.
+ assert.soon(function() {
var testDB = conn.getDB('test');
- assert.eq(1, testDB.nojournal.count({final: true}), 'final journaled write was not found');
- assert.lte(100,
- testDB.nojournal.count({journaled: {$exists: true}}),
- 'journaled write operations since the last checkpoint were not replayed');
-
- var initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations'];
- assert.writeOK(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
- assert.eq(initialNumLogWrites,
- testDB.serverStatus().wiredTiger.log['log write operations'],
- 'journaling is still enabled even though --nojournal was specified');
-
- MongoRunner.stopMongod(conn);
-
- // Restart the mongod with journaling enabled.
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- journal: '',
- });
- assert.neq(null, conn, 'mongod was unable to start up after re-enabling journaling');
-
- // Change the database object to connect to the restarted mongod.
- testDB = conn.getDB('test');
- initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations'];
-
- assert.writeOK(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
- assert.lt(initialNumLogWrites,
- testDB.serverStatus().wiredTiger.log['log write operations'],
- 'journaling is still disabled even though --journal was specified');
-
- MongoRunner.stopMongod(conn);
- }
-
- // Operations from the journal should be replayed even when the mongod is terminated before
- // anything is written to disk.
- jsTest.log('Running the test without ever creating a checkpoint');
- runTest({checkpoint: false});
-
- // Repeat the test again, but ensure that some data is written to disk before the mongod is
- // terminated.
- jsTest.log('Creating a checkpoint part-way through running the test');
- runTest({checkpoint: true});
+ var count = testDB.nojournal.count({journaled: {$exists: true}});
+ if (count >= 100) {
+ // We saw 100 journaled inserts, but visibility does not guarantee durability, so
+ // do an extra journaled write to make all visible commits durable, before killing
+ // the mongod.
+ assert.writeOK(testDB.nojournal.insert({final: true}, {writeConcern: {j: true}}));
+ MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+ return true;
+ }
+ return false;
+ }, 'the parallel shell did not perform at least 100 journaled inserts');
+
+ var exitCode = awaitShell({checkExitSuccess: false});
+ assert.neq(0, exitCode, 'expected shell to exit abnormally due to mongod being terminated');
+
+ // Restart the mongod with journaling disabled.
+ conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ nojournal: '',
+ });
+ assert.neq(null, conn, 'mongod was unable to restart after receiving a SIGKILL');
+
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.nojournal.count({final: true}), 'final journaled write was not found');
+ assert.lte(100,
+ testDB.nojournal.count({journaled: {$exists: true}}),
+ 'journaled write operations since the last checkpoint were not replayed');
+
+ var initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations'];
+ assert.writeOK(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
+ assert.eq(initialNumLogWrites,
+ testDB.serverStatus().wiredTiger.log['log write operations'],
+ 'journaling is still enabled even though --nojournal was specified');
+
+ MongoRunner.stopMongod(conn);
+
+ // Restart the mongod with journaling enabled.
+ conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ journal: '',
+ });
+ assert.neq(null, conn, 'mongod was unable to start up after re-enabling journaling');
+
+ // Change the database object to connect to the restarted mongod.
+ testDB = conn.getDB('test');
+ initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations'];
+
+ assert.writeOK(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
+ assert.lt(initialNumLogWrites,
+ testDB.serverStatus().wiredTiger.log['log write operations'],
+ 'journaling is still disabled even though --journal was specified');
+
+ MongoRunner.stopMongod(conn);
+}
+
+// Operations from the journal should be replayed even when the mongod is terminated before
+// anything is written to disk.
+jsTest.log('Running the test without ever creating a checkpoint');
+runTest({checkpoint: false});
+
+// Repeat the test again, but ensure that some data is written to disk before the mongod is
+// terminated.
+jsTest.log('Creating a checkpoint part-way through running the test');
+runTest({checkpoint: true});
})();
diff --git a/jstests/noPassthrough/wt_operation_stats.js b/jstests/noPassthrough/wt_operation_stats.js
index b9c84e356f3..e273dd34170 100644
--- a/jstests/noPassthrough/wt_operation_stats.js
+++ b/jstests/noPassthrough/wt_operation_stats.js
@@ -7,83 +7,83 @@
// @tags: [requires_profiling]
(function() {
- 'use strict';
-
- load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
-
- const readStatRegx = /storage:{ data: { bytesRead: ([0-9]+)/;
-
- let checkLogStats = function() {
- // Check if the log output contains the expected statistics.
- let mongodLogs = rawMongoProgramOutput();
- let lines = mongodLogs.split('\n');
- let match;
- let logLineCount = 0;
- for (let line of lines) {
- if ((match = readStatRegx.exec(line)) !== null) {
- jsTestLog(line);
- logLineCount++;
- }
- }
- assert.gte(logLineCount, 1);
- };
-
- let checkSystemProfileStats = function(profileObj, statName) {
- // Check that the profiled operation contains the expected statistics.
- assert(profileObj.hasOwnProperty("storage"), tojson(profileObj));
- assert(profileObj.storage.hasOwnProperty("data"), tojson(profileObj));
- assert(profileObj.storage.data.hasOwnProperty(statName), tojson(profileObj));
- };
-
- // This test can only be run if the storageEngine is wiredTiger
- if (jsTest.options().storageEngine && (jsTest.options().storageEngine !== "wiredTiger")) {
- jsTestLog("Skipping test because storageEngine is not wiredTiger");
- } else {
- let name = "wt_op_stat";
-
- jsTestLog("run mongod");
- let conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
- let testDB = conn.getDB(name);
-
- // Insert 200 documents of size 1K each, spanning multiple pages in the btree.
- let value = 'a'.repeat(1024);
-
- jsTestLog("insert data");
- for (let i = 0; i < 200; i++) {
- assert.writeOK(testDB.foo.insert({x: value}));
+'use strict';
+
+load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
+
+const readStatRegx = /storage:{ data: { bytesRead: ([0-9]+)/;
+
+let checkLogStats = function() {
+ // Check if the log output contains the expected statistics.
+ let mongodLogs = rawMongoProgramOutput();
+ let lines = mongodLogs.split('\n');
+ let match;
+ let logLineCount = 0;
+ for (let line of lines) {
+ if ((match = readStatRegx.exec(line)) !== null) {
+ jsTestLog(line);
+ logLineCount++;
}
+ }
+ assert.gte(logLineCount, 1);
+};
+
+let checkSystemProfileStats = function(profileObj, statName) {
+ // Check that the profiled operation contains the expected statistics.
+ assert(profileObj.hasOwnProperty("storage"), tojson(profileObj));
+ assert(profileObj.storage.hasOwnProperty("data"), tojson(profileObj));
+ assert(profileObj.storage.data.hasOwnProperty(statName), tojson(profileObj));
+};
+
+// This test can only be run if the storageEngine is wiredTiger
+if (jsTest.options().storageEngine && (jsTest.options().storageEngine !== "wiredTiger")) {
+ jsTestLog("Skipping test because storageEngine is not wiredTiger");
+} else {
+ let name = "wt_op_stat";
+
+ jsTestLog("run mongod");
+ let conn = MongoRunner.runMongod();
+ assert.neq(null, conn, "mongod was unable to start up");
+ let testDB = conn.getDB(name);
+
+ // Insert 200 documents of size 1K each, spanning multiple pages in the btree.
+ let value = 'a'.repeat(1024);
+
+ jsTestLog("insert data");
+ for (let i = 0; i < 200; i++) {
+ assert.writeOK(testDB.foo.insert({x: value}));
+ }
- let connport = conn.port;
- MongoRunner.stopMongod(conn);
-
- // Restart the server
- conn = MongoRunner.runMongod({
- restart: true,
- port: connport,
- slowms: "0",
- });
-
- clearRawMongoProgramOutput();
-
- // Scan the collection and check the bytes read statistic in the slowop log and
- // system.profile.
- testDB = conn.getDB(name);
- testDB.setProfilingLevel(2);
- jsTestLog("read data");
- let cur = testDB.foo.find();
- while (cur.hasNext()) {
- cur.next();
- }
+ let connport = conn.port;
+ MongoRunner.stopMongod(conn);
+
+ // Restart the server
+ conn = MongoRunner.runMongod({
+ restart: true,
+ port: connport,
+ slowms: "0",
+ });
+
+ clearRawMongoProgramOutput();
+
+ // Scan the collection and check the bytes read statistic in the slowop log and
+ // system.profile.
+ testDB = conn.getDB(name);
+ testDB.setProfilingLevel(2);
+ jsTestLog("read data");
+ let cur = testDB.foo.find();
+ while (cur.hasNext()) {
+ cur.next();
+ }
- // Look for the storage statistics in the profiled output of the find command.
- let profileObj = getLatestProfilerEntry(testDB, {op: "query", ns: "wt_op_stat.foo"});
- checkSystemProfileStats(profileObj, "bytesRead");
+ // Look for the storage statistics in the profiled output of the find command.
+ let profileObj = getLatestProfilerEntry(testDB, {op: "query", ns: "wt_op_stat.foo"});
+ checkSystemProfileStats(profileObj, "bytesRead");
- // Stopping the mongod waits until all of its logs have been read by the mongo shell.
- MongoRunner.stopMongod(conn);
- checkLogStats();
+ // Stopping the mongod waits until all of its logs have been read by the mongo shell.
+ MongoRunner.stopMongod(conn);
+ checkLogStats();
- jsTestLog("Success!");
- }
+ jsTestLog("Success!");
+}
})();
diff --git a/jstests/noPassthrough/wt_prepare_conflict.js b/jstests/noPassthrough/wt_prepare_conflict.js
index 67fffd10210..c562c0eab2f 100644
--- a/jstests/noPassthrough/wt_prepare_conflict.js
+++ b/jstests/noPassthrough/wt_prepare_conflict.js
@@ -4,61 +4,49 @@
* @tag: [requires_wiredtiger]
*/
(function() {
- "strict";
-
- let conn = MongoRunner.runMongod();
- let testDB = conn.getDB("test");
-
- let t = testDB.prepare_conflict;
- t.drop();
-
- // Test different types of operations: removals, updates, and index operations.
- assert.commandWorked(t.createIndex({x: 1}));
- assert.commandWorked(
- t.createIndex({y: 1}, {partialFilterExpression: {_id: {$gte: 500}}, unique: true}));
- let rand = {"#RAND_INT": [0, 1000]};
- let ops = [
- {op: "remove", ns: t.getFullName(), query: {_id: rand}},
- {op: "findOne", ns: t.getFullName(), query: {_id: rand}},
- {
- op: "update",
- ns: t.getFullName(),
- query: {_id: rand},
- update: {$inc: {x: 1}},
- upsert: true
- },
- {op: "findOne", ns: t.getFullName(), query: {x: rand}},
- {
- op: "update",
- ns: t.getFullName(),
- query: {_id: rand},
- update: {$inc: {y: 1}},
- upsert: true
- },
- {op: "findOne", ns: t.getFullName(), query: {y: rand}},
- {op: "findOne", ns: t.getFullName(), query: {_id: rand}},
- ];
-
- let seconds = 5;
- let parallel = 5;
- let host = testDB.getMongo().host;
-
- let benchArgs = {ops, seconds, parallel, host};
-
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'WTAlwaysNotifyPrepareConflictWaiters', mode: 'alwaysOn'}));
-
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'WTPrepareConflictForReads', mode: {activationProbability: 0.05}}));
-
- res = benchRun(benchArgs);
-
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'WTPrepareConflictForReads', mode: "off"}));
-
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'WTAlwaysNotifyPrepareConflictWaiters', mode: 'off'}));
- res = t.validate();
- assert(res.valid, tojson(res));
- MongoRunner.stopMongod(conn);
+"strict";
+
+let conn = MongoRunner.runMongod();
+let testDB = conn.getDB("test");
+
+let t = testDB.prepare_conflict;
+t.drop();
+
+// Test different types of operations: removals, updates, and index operations.
+assert.commandWorked(t.createIndex({x: 1}));
+assert.commandWorked(
+ t.createIndex({y: 1}, {partialFilterExpression: {_id: {$gte: 500}}, unique: true}));
+let rand = {"#RAND_INT": [0, 1000]};
+let ops = [
+ {op: "remove", ns: t.getFullName(), query: {_id: rand}},
+ {op: "findOne", ns: t.getFullName(), query: {_id: rand}},
+ {op: "update", ns: t.getFullName(), query: {_id: rand}, update: {$inc: {x: 1}}, upsert: true},
+ {op: "findOne", ns: t.getFullName(), query: {x: rand}},
+ {op: "update", ns: t.getFullName(), query: {_id: rand}, update: {$inc: {y: 1}}, upsert: true},
+ {op: "findOne", ns: t.getFullName(), query: {y: rand}},
+ {op: "findOne", ns: t.getFullName(), query: {_id: rand}},
+];
+
+let seconds = 5;
+let parallel = 5;
+let host = testDB.getMongo().host;
+
+let benchArgs = {ops, seconds, parallel, host};
+
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'WTAlwaysNotifyPrepareConflictWaiters', mode: 'alwaysOn'}));
+
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'WTPrepareConflictForReads', mode: {activationProbability: 0.05}}));
+
+res = benchRun(benchArgs);
+
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'WTPrepareConflictForReads', mode: "off"}));
+
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'WTAlwaysNotifyPrepareConflictWaiters', mode: 'off'}));
+res = t.validate();
+assert(res.valid, tojson(res));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/wt_skip_prepare_conflicts_retries_failpoint.js b/jstests/noPassthrough/wt_skip_prepare_conflicts_retries_failpoint.js
index 02d20790a40..c93eb10e415 100644
--- a/jstests/noPassthrough/wt_skip_prepare_conflicts_retries_failpoint.js
+++ b/jstests/noPassthrough/wt_skip_prepare_conflicts_retries_failpoint.js
@@ -6,64 +6,63 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB("test");
- const testColl = testDB.getCollection("wt_skip_prepare_conflict_retries_failpoint");
+const primary = rst.getPrimary();
+const testDB = primary.getDB("test");
+const testColl = testDB.getCollection("wt_skip_prepare_conflict_retries_failpoint");
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(testDB.getName());
- const sessionColl = sessionDB.getCollection(testColl.getName());
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(testDB.getName());
+const sessionColl = sessionDB.getCollection(testColl.getName());
- assert.commandWorked(testDB.runCommand({profile: 2}));
+assert.commandWorked(testDB.runCommand({profile: 2}));
- assert.commandWorked(
- testColl.insert({_id: 1, note: "from before transaction"}, {w: "majority"}));
+assert.commandWorked(testColl.insert({_id: 1, note: "from before transaction"}, {w: "majority"}));
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "WTSkipPrepareConflictRetries", mode: "alwaysOn"}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "WTSkipPrepareConflictRetries", mode: "alwaysOn"}));
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "alwaysOn"}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "alwaysOn"}));
- // A non-transactional operation conflicting with a write operation performed inside a
- // multistatement transaction can encounter a WT_PREPARE_CONFLICT in the wiredtiger
- // layer under several circumstances, such as performing an insert, update, or find
- // on a document that is in a prepare statement. The non-transactional operation
- // would then be retried after the prepared transaction commits or aborts. However, with the
- // "WTSkipPrepareConflictRetries"failpoint enabled, the non-transactional operation would
- // instead return with a WT_ROLLBACK error. This would then get bubbled up as a
- // WriteConflictException. Enabling the "skipWriteConflictRetries" failpoint then prevents
- // the higher layers from retrying the entire operation.
- session.startTransaction();
+// A non-transactional operation conflicting with a write operation performed inside a
+// multistatement transaction can encounter a WT_PREPARE_CONFLICT in the wiredtiger
+// layer under several circumstances, such as performing an insert, update, or find
+// on a document that is in a prepare statement. The non-transactional operation
+// would then be retried after the prepared transaction commits or aborts. However, with the
+// "WTSkipPrepareConflictRetries"failpoint enabled, the non-transactional operation would
+// instead return with a WT_ROLLBACK error. This would then get bubbled up as a
+// WriteConflictException. Enabling the "skipWriteConflictRetries" failpoint then prevents
+// the higher layers from retrying the entire operation.
+session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {$set: {note: "from prepared transaction"}}));
+assert.commandWorked(sessionColl.update({_id: 1}, {$set: {note: "from prepared transaction"}}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandFailedWithCode(
- testColl.update({_id: 1}, {$set: {note: "outside prepared transaction"}}),
- ErrorCodes.WriteConflict);
+assert.commandFailedWithCode(
+ testColl.update({_id: 1}, {$set: {note: "outside prepared transaction"}}),
+ ErrorCodes.WriteConflict);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- const profileEntry =
- testDB.system.profile.findOne({"command.u.$set.note": "outside prepared transaction"});
- assert.gte(profileEntry.prepareReadConflicts, 1);
+const profileEntry =
+ testDB.system.profile.findOne({"command.u.$set.note": "outside prepared transaction"});
+assert.gte(profileEntry.prepareReadConflicts, 1);
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "WTSkipPrepareConflictRetries", mode: "off"}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "WTSkipPrepareConflictRetries", mode: "off"}));
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "off"}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "off"}));
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/wt_unclean_shutdown.js b/jstests/noPassthrough/wt_unclean_shutdown.js
index 74dceda2a5c..6b11d8004c9 100644
--- a/jstests/noPassthrough/wt_unclean_shutdown.js
+++ b/jstests/noPassthrough/wt_unclean_shutdown.js
@@ -10,122 +10,122 @@
load('jstests/libs/parallelTester.js'); // For ScopedThread
(function() {
- 'use strict';
-
- // Skip this test if not running with the "wiredTiger" storage engine.
- if (jsTest.options().storageEngine && jsTest.options().storageEngine !== 'wiredTiger') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
- return;
- }
+'use strict';
+
+// Skip this test if not running with the "wiredTiger" storage engine.
+if (jsTest.options().storageEngine && jsTest.options().storageEngine !== 'wiredTiger') {
+ jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
+ return;
+}
+
+var dbpath = MongoRunner.dataPath + 'wt_unclean_shutdown';
+resetDbpath(dbpath);
+
+var conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ // Modify some WT settings:
+ // - Disable checkpoints based on log size so that we know no checkpoint gets written.
+ // - Explicitly set checkpoints to 60 seconds in case the default ever changes.
+ // - Turn off archiving and compression for easier debugging if there is a failure.
+ // - Make the maximum file size small to encourage lots of file changes. WT-2706 was
+ // related to log file switches.
+ wiredTigerEngineConfigString:
+ 'checkpoint=(wait=60,log_size=0),log=(archive=false,compressor=none,file_max=10M)'
+});
+assert.neq(null, conn, 'mongod was unable to start up');
+
+var insertWorkload = function(host, start, end) {
+ var conn = new Mongo(host);
+ var testDB = conn.getDB('test');
+
+ // Create a record larger than 128K which is the threshold to doing an unbuffered log
+ // write in WiredTiger.
+ var largeString = 'a'.repeat(1024 * 128);
+
+ for (var i = start; i < end; i++) {
+ var doc = {_id: i, x: 0};
+ // One of the bugs, WT-2696, was related to large records that used the unbuffered
+ // log code. Periodically insert the large record to stress that code path.
+ if (i % 30 === 0) {
+ doc.x = largeString;
+ }
- var dbpath = MongoRunner.dataPath + 'wt_unclean_shutdown';
- resetDbpath(dbpath);
-
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- // Modify some WT settings:
- // - Disable checkpoints based on log size so that we know no checkpoint gets written.
- // - Explicitly set checkpoints to 60 seconds in case the default ever changes.
- // - Turn off archiving and compression for easier debugging if there is a failure.
- // - Make the maximum file size small to encourage lots of file changes. WT-2706 was
- // related to log file switches.
- wiredTigerEngineConfigString:
- 'checkpoint=(wait=60,log_size=0),log=(archive=false,compressor=none,file_max=10M)'
- });
- assert.neq(null, conn, 'mongod was unable to start up');
-
- var insertWorkload = function(host, start, end) {
- var conn = new Mongo(host);
- var testDB = conn.getDB('test');
-
- // Create a record larger than 128K which is the threshold to doing an unbuffered log
- // write in WiredTiger.
- var largeString = 'a'.repeat(1024 * 128);
-
- for (var i = start; i < end; i++) {
- var doc = {_id: i, x: 0};
- // One of the bugs, WT-2696, was related to large records that used the unbuffered
- // log code. Periodically insert the large record to stress that code path.
- if (i % 30 === 0) {
- doc.x = largeString;
- }
-
- try {
- testDB.coll.insert(doc);
- } catch (e) {
- // Terminate the loop when mongod is killed.
- break;
- }
+ try {
+ testDB.coll.insert(doc);
+ } catch (e) {
+ // Terminate the loop when mongod is killed.
+ break;
}
- // Return i, the last record we were trying to insert. It is possible that mongod gets
- // killed in the middle but not finding a record at the end is okay. We're only
- // interested in records missing in the middle.
- return {start: start, end: i};
- };
-
- // Start the insert workload threads with partitioned input spaces.
- // We don't run long enough for threads to overlap. Adjust the per thread value if needed.
- var max_per_thread = 1000000;
- var num_threads = 8;
- var threads = [];
- for (var i = 0; i < num_threads; i++) {
- var t = new ScopedThread(
- insertWorkload, conn.host, i * max_per_thread, max_per_thread + (i * max_per_thread));
- threads.push(t);
- t.start();
}
-
- // Sleep for sometime less than a minute so that mongod has not yet written a checkpoint.
- // That will force WT to run recovery all the way from the beginning and we can detect missing
- // records. Sleep for 40 seconds to generate plenty of workload.
- sleep(40000);
-
- // Mongod needs an unclean shutdown so that WT recovery is forced on restart and we can detect
- // any missing records.
- MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
-
- // Retrieve the start and end data from each thread.
- var retData = [];
- threads.forEach(function(t) {
- t.join();
- retData.push(t.returnData());
- });
-
- // Restart the mongod. This forces WT to run recovery.
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- wiredTigerEngineConfigString: 'log=(archive=false,compressor=none,file_max=10M)'
- });
- assert.neq(null, conn, 'mongod should have restarted');
-
- // Verify that every item between start and end for every thread exists in the collection now
- // that recovery has completed.
- var coll = conn.getDB('test').coll;
- for (var i = 0; i < retData.length; i++) {
- // For each start and end, verify every data item exists.
- var thread_data = retData[i];
- var absent = null;
- var missing = null;
- for (var j = thread_data.start; j <= thread_data.end; j++) {
- var idExists = coll.find({_id: j}).count() > 0;
- // The verification is a bit complex. We only want to fail if records in the middle
- // of the range are missing. Records at the end may be missing due to when mongod
- // was killed and records in memory are lost. It is only a bug if a record is missing
- // and a subsequent record exists.
- if (!idExists) {
- absent = j;
- } else if (absent !== null) {
- missing = absent;
- break;
- }
+ // Return i, the last record we were trying to insert. It is possible that mongod gets
+ // killed in the middle but not finding a record at the end is okay. We're only
+ // interested in records missing in the middle.
+ return {start: start, end: i};
+};
+
+// Start the insert workload threads with partitioned input spaces.
+// We don't run long enough for threads to overlap. Adjust the per thread value if needed.
+var max_per_thread = 1000000;
+var num_threads = 8;
+var threads = [];
+for (var i = 0; i < num_threads; i++) {
+ var t = new ScopedThread(
+ insertWorkload, conn.host, i * max_per_thread, max_per_thread + (i * max_per_thread));
+ threads.push(t);
+ t.start();
+}
+
+// Sleep for sometime less than a minute so that mongod has not yet written a checkpoint.
+// That will force WT to run recovery all the way from the beginning and we can detect missing
+// records. Sleep for 40 seconds to generate plenty of workload.
+sleep(40000);
+
+// Mongod needs an unclean shutdown so that WT recovery is forced on restart and we can detect
+// any missing records.
+MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+
+// Retrieve the start and end data from each thread.
+var retData = [];
+threads.forEach(function(t) {
+ t.join();
+ retData.push(t.returnData());
+});
+
+// Restart the mongod. This forces WT to run recovery.
+conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ wiredTigerEngineConfigString: 'log=(archive=false,compressor=none,file_max=10M)'
+});
+assert.neq(null, conn, 'mongod should have restarted');
+
+// Verify that every item between start and end for every thread exists in the collection now
+// that recovery has completed.
+var coll = conn.getDB('test').coll;
+for (var i = 0; i < retData.length; i++) {
+ // For each start and end, verify every data item exists.
+ var thread_data = retData[i];
+ var absent = null;
+ var missing = null;
+ for (var j = thread_data.start; j <= thread_data.end; j++) {
+ var idExists = coll.find({_id: j}).count() > 0;
+ // The verification is a bit complex. We only want to fail if records in the middle
+ // of the range are missing. Records at the end may be missing due to when mongod
+ // was killed and records in memory are lost. It is only a bug if a record is missing
+ // and a subsequent record exists.
+ if (!idExists) {
+ absent = j;
+ } else if (absent !== null) {
+ missing = absent;
+ break;
}
- assert.eq(null,
- missing,
- 'Thread ' + i + ' missing id ' + missing + ' start and end for all threads: ' +
- tojson(retData));
}
+ assert.eq(null,
+ missing,
+ 'Thread ' + i + ' missing id ' + missing +
+ ' start and end for all threads: ' + tojson(retData));
+}
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/yield_during_writes.js b/jstests/noPassthrough/yield_during_writes.js
index 4d05c725659..d1e6845b58e 100644
--- a/jstests/noPassthrough/yield_during_writes.js
+++ b/jstests/noPassthrough/yield_during_writes.js
@@ -1,43 +1,43 @@
// Ensure that multi-update and multi-remove operations yield regularly.
// @tags: [requires_profiling]
(function() {
- 'use strict';
-
- function countOpYields(coll, op) {
- const profileEntry = coll.getDB()
- .system.profile.find({ns: coll.getFullName()})
- .sort({$natural: -1})
- .limit(1)
- .next();
- assert.eq(profileEntry.op, op);
- return profileEntry.numYield;
- }
-
- const nDocsToInsert = 300;
- const worksPerYield = 50;
-
- // Start a mongod that will yield every 50 work cycles.
- const mongod = MongoRunner.runMongod({
- setParameter: `internalQueryExecYieldIterations=${worksPerYield}`,
- profile: 2,
- });
- assert.neq(null, mongod, 'mongod was unable to start up');
-
- const coll = mongod.getDB('test').yield_during_writes;
- coll.drop();
-
- for (let i = 0; i < nDocsToInsert; i++) {
- assert.writeOK(coll.insert({_id: i}));
- }
-
- // A multi-update doing a collection scan should yield about nDocsToInsert / worksPerYield
- // times.
- assert.writeOK(coll.update({}, {$inc: {counter: 1}}, {multi: true}));
- assert.gt(countOpYields(coll, 'update'), (nDocsToInsert / worksPerYield) - 2);
-
- // Likewise, a multi-remove should also yield approximately every worksPerYield documents.
- assert.writeOK(coll.remove({}, {multi: true}));
- assert.gt(countOpYields(coll, 'remove'), (nDocsToInsert / worksPerYield) - 2);
-
- MongoRunner.stopMongod(mongod);
+'use strict';
+
+function countOpYields(coll, op) {
+ const profileEntry = coll.getDB()
+ .system.profile.find({ns: coll.getFullName()})
+ .sort({$natural: -1})
+ .limit(1)
+ .next();
+ assert.eq(profileEntry.op, op);
+ return profileEntry.numYield;
+}
+
+const nDocsToInsert = 300;
+const worksPerYield = 50;
+
+// Start a mongod that will yield every 50 work cycles.
+const mongod = MongoRunner.runMongod({
+ setParameter: `internalQueryExecYieldIterations=${worksPerYield}`,
+ profile: 2,
+});
+assert.neq(null, mongod, 'mongod was unable to start up');
+
+const coll = mongod.getDB('test').yield_during_writes;
+coll.drop();
+
+for (let i = 0; i < nDocsToInsert; i++) {
+ assert.writeOK(coll.insert({_id: i}));
+}
+
+// A multi-update doing a collection scan should yield about nDocsToInsert / worksPerYield
+// times.
+assert.writeOK(coll.update({}, {$inc: {counter: 1}}, {multi: true}));
+assert.gt(countOpYields(coll, 'update'), (nDocsToInsert / worksPerYield) - 2);
+
+// Likewise, a multi-remove should also yield approximately every worksPerYield documents.
+assert.writeOK(coll.remove({}, {multi: true}));
+assert.gt(countOpYields(coll, 'remove'), (nDocsToInsert / worksPerYield) - 2);
+
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/noPassthroughWithMongod/apply_ops_errors.js b/jstests/noPassthroughWithMongod/apply_ops_errors.js
index 9441d006e43..0cf4a789718 100644
--- a/jstests/noPassthroughWithMongod/apply_ops_errors.js
+++ b/jstests/noPassthroughWithMongod/apply_ops_errors.js
@@ -13,45 +13,45 @@
*/
(function() {
- "use strict";
- var coll = db.apply_ops_errors;
- coll.drop();
-
- // Scenario 1: only one operation
- assert.eq(0, coll.find().count(), "test collection not empty");
- coll.ensureIndex({x: 1}, {unique: true});
- coll.insert({_id: 1, x: "init"});
-
- var res = db.runCommand({
- applyOps: [
- {op: "i", ns: coll.getFullName(), o: {_id: 2, x: "init"}},
- ]
- });
-
- assert.eq(1, res.applied);
- assert(res.code);
- assert(res.errmsg);
- assert.eq([false], res.results);
- assert.eq(0, res.ok);
-
- coll.drop();
-
- // Scenario 2: Three operations, first two should run, second should fail.
- assert.eq(0, coll.find().count(), "test collection not empty");
- coll.ensureIndex({x: 1}, {unique: true});
- coll.insert({_id: 1, x: "init"});
-
- var res = db.runCommand({
- applyOps: [
- {op: "i", ns: coll.getFullName(), o: {_id: 3, x: "not init"}},
- {op: "i", ns: coll.getFullName(), o: {_id: 4, x: "init"}},
- {op: "i", ns: coll.getFullName(), o: {_id: 5, x: "not init again"}},
- ]
- });
-
- assert.eq(2, res.applied);
- assert(res.code);
- assert(res.errmsg);
- assert.eq([false, false], res.results);
- assert.eq(0, res.ok);
+"use strict";
+var coll = db.apply_ops_errors;
+coll.drop();
+
+// Scenario 1: only one operation
+assert.eq(0, coll.find().count(), "test collection not empty");
+coll.ensureIndex({x: 1}, {unique: true});
+coll.insert({_id: 1, x: "init"});
+
+var res = db.runCommand({
+ applyOps: [
+ {op: "i", ns: coll.getFullName(), o: {_id: 2, x: "init"}},
+ ]
+});
+
+assert.eq(1, res.applied);
+assert(res.code);
+assert(res.errmsg);
+assert.eq([false], res.results);
+assert.eq(0, res.ok);
+
+coll.drop();
+
+// Scenario 2: Three operations, first two should run, second should fail.
+assert.eq(0, coll.find().count(), "test collection not empty");
+coll.ensureIndex({x: 1}, {unique: true});
+coll.insert({_id: 1, x: "init"});
+
+var res = db.runCommand({
+ applyOps: [
+ {op: "i", ns: coll.getFullName(), o: {_id: 3, x: "not init"}},
+ {op: "i", ns: coll.getFullName(), o: {_id: 4, x: "init"}},
+ {op: "i", ns: coll.getFullName(), o: {_id: 5, x: "not init again"}},
+ ]
+});
+
+assert.eq(2, res.applied);
+assert(res.code);
+assert(res.errmsg);
+assert.eq([false, false], res.results);
+assert.eq(0, res.ok);
})();
diff --git a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
index c043a6dd056..e6db1e1bcb9 100644
--- a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
+++ b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
@@ -1,102 +1,93 @@
// Tests the "writeCmd" and "readCmd" options to benchRun().
(function() {
- "use strict";
+"use strict";
- var coll = db.bench_test_crud_commands;
- coll.drop();
- assert.commandWorked(coll.getDB().createCollection(coll.getName()));
+var coll = db.bench_test_crud_commands;
+coll.drop();
+assert.commandWorked(coll.getDB().createCollection(coll.getName()));
- function makeDocument(docSize) {
- var doc = {"fieldName": ""};
- var longString = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
- while (Object.bsonsize(doc) < docSize) {
- if (Object.bsonsize(doc) < docSize - longString.length) {
- doc.fieldName += longString;
- } else {
- doc.fieldName += "x";
- }
+function makeDocument(docSize) {
+ var doc = {"fieldName": ""};
+ var longString = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
+ while (Object.bsonsize(doc) < docSize) {
+ if (Object.bsonsize(doc) < docSize - longString.length) {
+ doc.fieldName += longString;
+ } else {
+ doc.fieldName += "x";
}
- return doc;
}
+ return doc;
+}
- function executeBenchRun(benchOps) {
- var benchArgs = {ops: benchOps, parallel: 2, seconds: 5, host: db.getMongo().host};
- if (jsTest.options().auth) {
- benchArgs['db'] = 'admin';
- benchArgs['username'] = jsTest.options().authUser;
- benchArgs['password'] = jsTest.options().authPassword;
- }
- return benchRun(benchArgs);
+function executeBenchRun(benchOps) {
+ var benchArgs = {ops: benchOps, parallel: 2, seconds: 5, host: db.getMongo().host};
+ if (jsTest.options().auth) {
+ benchArgs['db'] = 'admin';
+ benchArgs['username'] = jsTest.options().authUser;
+ benchArgs['password'] = jsTest.options().authPassword;
}
+ return benchRun(benchArgs);
+}
+
+function testInsert(docs, writeCmd, wc) {
+ coll.drop();
- function testInsert(docs, writeCmd, wc) {
- coll.drop();
+ var res = executeBenchRun(
+ [{ns: coll.getFullName(), op: "insert", doc: docs, writeCmd: writeCmd, writeConcern: wc}]);
- var res = executeBenchRun([{
- ns: coll.getFullName(),
- op: "insert",
- doc: docs,
- writeCmd: writeCmd,
- writeConcern: wc
- }]);
+ assert.gt(coll.count(), 0);
+ assert.eq(coll.findOne({}, {_id: 0}), docs[0]);
+}
- assert.gt(coll.count(), 0);
- assert.eq(coll.findOne({}, {_id: 0}), docs[0]);
+function testFind(readCmd) {
+ coll.drop();
+ for (var i = 0; i < 100; i++) {
+ assert.writeOK(coll.insert({}));
}
- function testFind(readCmd) {
- coll.drop();
- for (var i = 0; i < 100; i++) {
- assert.writeOK(coll.insert({}));
- }
+ var res = executeBenchRun([
+ {ns: coll.getFullName(), op: "find", query: {}, batchSize: NumberInt(10), readCmd: readCmd}
+ ]);
+ assert.gt(res.query, 0, tojson(res));
+}
- var res = executeBenchRun([{
- ns: coll.getFullName(),
- op: "find",
- query: {},
- batchSize: NumberInt(10),
- readCmd: readCmd
- }]);
- assert.gt(res.query, 0, tojson(res));
+function testFindOne(readCmd) {
+ coll.drop();
+ for (var i = 0; i < 100; i++) {
+ assert.writeOK(coll.insert({}));
}
- function testFindOne(readCmd) {
- coll.drop();
- for (var i = 0; i < 100; i++) {
- assert.writeOK(coll.insert({}));
- }
+ var res =
+ executeBenchRun([{ns: coll.getFullName(), op: "findOne", query: {}, readCmd: readCmd}]);
+ assert.gt(res.findOne, 0, tojson(res));
+}
- var res =
- executeBenchRun([{ns: coll.getFullName(), op: "findOne", query: {}, readCmd: readCmd}]);
- assert.gt(res.findOne, 0, tojson(res));
+function testWriteConcern(writeCmd) {
+ var bigDoc = makeDocument(260 * 1024);
+ var docs = [];
+ for (var i = 0; i < 100; i++) {
+ docs.push({x: 1});
}
- function testWriteConcern(writeCmd) {
- var bigDoc = makeDocument(260 * 1024);
- var docs = [];
- for (var i = 0; i < 100; i++) {
- docs.push({x: 1});
- }
+ testInsert([bigDoc], writeCmd, {});
+ testInsert(docs, writeCmd, {});
+ testInsert(docs, writeCmd, {"w": "majority"});
+ testInsert(docs, writeCmd, {"w": 1, "j": false});
- testInsert([bigDoc], writeCmd, {});
- testInsert(docs, writeCmd, {});
- testInsert(docs, writeCmd, {"w": "majority"});
- testInsert(docs, writeCmd, {"w": 1, "j": false});
-
- var storageEnginesWithoutJournaling = new Set(["ephemeralForTest", "inMemory"]);
- var runningWithoutJournaling = TestData.noJournal ||
- storageEnginesWithoutJournaling.has(db.serverStatus().storageEngine.name);
- if (!runningWithoutJournaling) {
- // Only test journaled writes if the server actually supports them.
- testInsert(docs, writeCmd, {"j": true});
- }
+ var storageEnginesWithoutJournaling = new Set(["ephemeralForTest", "inMemory"]);
+ var runningWithoutJournaling = TestData.noJournal ||
+ storageEnginesWithoutJournaling.has(db.serverStatus().storageEngine.name);
+ if (!runningWithoutJournaling) {
+ // Only test journaled writes if the server actually supports them.
+ testInsert(docs, writeCmd, {"j": true});
}
+}
- testWriteConcern(false);
- testWriteConcern(true);
+testWriteConcern(false);
+testWriteConcern(true);
- testFind(false);
- testFind(true);
- testFindOne(false);
- testFindOne(true);
+testFind(false);
+testFind(true);
+testFindOne(false);
+testFindOne(true);
})();
diff --git a/jstests/noPassthroughWithMongod/capped_truncate.js b/jstests/noPassthroughWithMongod/capped_truncate.js
index 1f4cf236c57..25e023c890d 100644
--- a/jstests/noPassthroughWithMongod/capped_truncate.js
+++ b/jstests/noPassthroughWithMongod/capped_truncate.js
@@ -8,50 +8,50 @@
* @tags: [SERVER-21658, requires_capped]
*/
(function() {
- 'use strict';
-
- db.capped_truncate.drop();
- assert.commandWorked(
- db.runCommand({create: "capped_truncate", capped: true, size: 1000, autoIndexId: true}));
- var t = db.capped_truncate;
-
- // It is an error to remove a non-positive number of documents.
- assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: -1}),
- "captrunc didn't return an error when attempting to remove a negative " +
- "number of documents");
- assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: 0}),
- "captrunc didn't return an error when attempting to remove 0 documents");
-
- for (var j = 1; j <= 10; j++) {
- assert.writeOK(t.insert({x: j}));
- }
-
- // It is an error to try and remove more documents than what exist in the capped collection.
- assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: 20}),
- "captrunc didn't return an error when attempting to remove more" +
- " documents than what the collection contains");
-
- assert.commandWorked(db.runCommand({captrunc: "capped_truncate", n: 5, inc: false}));
- assert.eq(5, t.count(), "wrong number of documents in capped collection after truncate");
- assert.eq(5, t.distinct("_id").length, "wrong number of entries in _id index after truncate");
-
- var last = t.find({}, {_id: 1}).sort({_id: -1}).next();
- assert.neq(null,
- t.findOne({_id: last._id}),
- tojson(last) + " is in _id index, but not in capped collection after truncate");
-
- // It is an error to run the captrunc command on a nonexistent collection.
- assert.commandFailed(db.runCommand({captrunc: "nonexistent", n: 1}),
- "captrunc didn't return an error for a nonexistent collection");
-
- // It is an error to run the captrunc command on a non-capped collection.
- var collName = "noncapped";
- db[collName].drop();
-
- assert.commandWorked(db.runCommand({create: collName, capped: false}));
- for (var j = 1; j <= 10; j++) {
- assert.writeOK(db[collName].insert({x: j}));
- }
- assert.commandFailed(db.runCommand({captrunc: collName, n: 5}),
- "captrunc didn't return an error for a non-capped collection");
+'use strict';
+
+db.capped_truncate.drop();
+assert.commandWorked(
+ db.runCommand({create: "capped_truncate", capped: true, size: 1000, autoIndexId: true}));
+var t = db.capped_truncate;
+
+// It is an error to remove a non-positive number of documents.
+assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: -1}),
+ "captrunc didn't return an error when attempting to remove a negative " +
+ "number of documents");
+assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: 0}),
+ "captrunc didn't return an error when attempting to remove 0 documents");
+
+for (var j = 1; j <= 10; j++) {
+ assert.writeOK(t.insert({x: j}));
+}
+
+// It is an error to try and remove more documents than what exist in the capped collection.
+assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: 20}),
+ "captrunc didn't return an error when attempting to remove more" +
+ " documents than what the collection contains");
+
+assert.commandWorked(db.runCommand({captrunc: "capped_truncate", n: 5, inc: false}));
+assert.eq(5, t.count(), "wrong number of documents in capped collection after truncate");
+assert.eq(5, t.distinct("_id").length, "wrong number of entries in _id index after truncate");
+
+var last = t.find({}, {_id: 1}).sort({_id: -1}).next();
+assert.neq(null,
+ t.findOne({_id: last._id}),
+ tojson(last) + " is in _id index, but not in capped collection after truncate");
+
+// It is an error to run the captrunc command on a nonexistent collection.
+assert.commandFailed(db.runCommand({captrunc: "nonexistent", n: 1}),
+ "captrunc didn't return an error for a nonexistent collection");
+
+// It is an error to run the captrunc command on a non-capped collection.
+var collName = "noncapped";
+db[collName].drop();
+
+assert.commandWorked(db.runCommand({create: collName, capped: false}));
+for (var j = 1; j <= 10; j++) {
+ assert.writeOK(db[collName].insert({x: j}));
+}
+assert.commandFailed(db.runCommand({captrunc: collName, n: 5}),
+ "captrunc didn't return an error for a non-capped collection");
})();
diff --git a/jstests/noPassthroughWithMongod/captrunc_cursor_invalidation.js b/jstests/noPassthroughWithMongod/captrunc_cursor_invalidation.js
index 3b1f7337133..2526fed9636 100644
--- a/jstests/noPassthroughWithMongod/captrunc_cursor_invalidation.js
+++ b/jstests/noPassthroughWithMongod/captrunc_cursor_invalidation.js
@@ -3,35 +3,35 @@
//
// @tags: [requires_capped]
(function() {
- "use strict";
+"use strict";
- const coll = db.captrunc_cursor_invalidation;
- coll.drop();
+const coll = db.captrunc_cursor_invalidation;
+coll.drop();
- // Create a capped collection with four documents.
- assert.commandWorked(db.createCollection(coll.getName(), {capped: true, size: 1024}));
- const numDocs = 4;
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; ++i) {
- bulk.insert({_id: i});
- }
- assert.commandWorked(bulk.execute());
+// Create a capped collection with four documents.
+assert.commandWorked(db.createCollection(coll.getName(), {capped: true, size: 1024}));
+const numDocs = 4;
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({_id: i});
+}
+assert.commandWorked(bulk.execute());
- // Open a tailable cursor against the capped collection.
- const findRes = assert.commandWorked(db.runCommand({find: coll.getName(), tailable: true}));
- assert.neq(findRes.cursor.id, 0);
- assert.eq(findRes.cursor.ns, coll.getFullName());
- assert.eq(findRes.cursor.firstBatch.length, 4);
- const cursorId = findRes.cursor.id;
+// Open a tailable cursor against the capped collection.
+const findRes = assert.commandWorked(db.runCommand({find: coll.getName(), tailable: true}));
+assert.neq(findRes.cursor.id, 0);
+assert.eq(findRes.cursor.ns, coll.getFullName());
+assert.eq(findRes.cursor.firstBatch.length, 4);
+const cursorId = findRes.cursor.id;
- // Truncate the capped collection so that the cursor's position no longer exists.
- assert.commandWorked(db.runCommand({captrunc: coll.getName(), n: 2}));
+// Truncate the capped collection so that the cursor's position no longer exists.
+assert.commandWorked(db.runCommand({captrunc: coll.getName(), n: 2}));
- // A subsequent getMore should fail with 'CappedPositionLost'.
- assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: coll.getName()}),
- ErrorCodes.CappedPositionLost);
+// A subsequent getMore should fail with 'CappedPositionLost'.
+assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: coll.getName()}),
+ ErrorCodes.CappedPositionLost);
- // The cursor has now been destroyed, so another getMore should fail with 'CursorNotFound'.
- assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: coll.getName()}),
- ErrorCodes.CursorNotFound);
+// The cursor has now been destroyed, so another getMore should fail with 'CursorNotFound'.
+assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: coll.getName()}),
+ ErrorCodes.CursorNotFound);
}());
diff --git a/jstests/noPassthroughWithMongod/coll_mod_takes_database_x_lock.js b/jstests/noPassthroughWithMongod/coll_mod_takes_database_x_lock.js
index bf78d13f887..b7fba31017e 100644
--- a/jstests/noPassthroughWithMongod/coll_mod_takes_database_x_lock.js
+++ b/jstests/noPassthroughWithMongod/coll_mod_takes_database_x_lock.js
@@ -2,32 +2,32 @@
* Ensures that the 'collMod' command takes a database MODE_X lock during a no-op.
*/
(function() {
- 'use strict';
+'use strict';
- const failpoint = 'hangAfterDatabaseLock';
- assert.commandWorked(db.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
+const failpoint = 'hangAfterDatabaseLock';
+assert.commandWorked(db.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
- const conn = db.getMongo();
- db.createCollection('foo');
+const conn = db.getMongo();
+db.createCollection('foo');
- // Run a no-op collMod command.
- const awaitParallelShell = startParallelShell(() => {
- assert.commandWorked(db.runCommand({collMod: 'foo'}));
- }, conn.port);
+// Run a no-op collMod command.
+const awaitParallelShell = startParallelShell(() => {
+ assert.commandWorked(db.runCommand({collMod: 'foo'}));
+}, conn.port);
- // Check that the database MODE_X lock is being held by checking in lockInfo.
- assert.soon(() => {
- let lockInfo = assert.commandWorked(db.adminCommand({lockInfo: 1})).lockInfo;
- for (let i = 0; i < lockInfo.length; i++) {
- let resourceId = lockInfo[i].resourceId;
- if (resourceId.includes("Database") && resourceId.includes("test")) {
- return true;
- }
+// Check that the database MODE_X lock is being held by checking in lockInfo.
+assert.soon(() => {
+ let lockInfo = assert.commandWorked(db.adminCommand({lockInfo: 1})).lockInfo;
+ for (let i = 0; i < lockInfo.length; i++) {
+ let resourceId = lockInfo[i].resourceId;
+ if (resourceId.includes("Database") && resourceId.includes("test")) {
+ return true;
}
+ }
- return false;
- });
+ return false;
+});
- assert.commandWorked(db.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- awaitParallelShell();
+assert.commandWorked(db.adminCommand({configureFailPoint: failpoint, mode: "off"}));
+awaitParallelShell();
})();
diff --git a/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js b/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js
index b6b4a9fc93d..e10c85a9b4d 100644
--- a/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js
+++ b/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js
@@ -2,70 +2,70 @@
* Ensures that the 'collStats' command lists indexes that are ready and in-progress.
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/index_build.js');
+load('jstests/noPassthrough/libs/index_build.js');
- const collName = "collstats_show_ready_and_in_progress_indexes";
- const testDB = db.getSiblingDB("test");
- const testColl = db.getCollection(collName);
- testColl.drop();
+const collName = "collstats_show_ready_and_in_progress_indexes";
+const testDB = db.getSiblingDB("test");
+const testColl = db.getCollection(collName);
+testColl.drop();
- const bulk = testColl.initializeUnorderedBulkOp();
- for (let i = 0; i < 5; ++i) {
- bulk.insert({a: i, b: i * i});
- }
- assert.commandWorked(bulk.execute());
+const bulk = testColl.initializeUnorderedBulkOp();
+for (let i = 0; i < 5; ++i) {
+ bulk.insert({a: i, b: i * i});
+}
+assert.commandWorked(bulk.execute());
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "hangAfterStartingIndexBuildUnlocked", mode: "alwaysOn"}));
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: "hangAfterStartingIndexBuildUnlocked", mode: "alwaysOn"}));
- let awaitParallelShell;
- try {
- jsTest.log("Starting a parallel shell to run two background index builds");
- awaitParallelShell = startParallelShell(() => {
- db.getSiblingDB("test").runCommand({
- createIndexes: "collstats_show_ready_and_in_progress_indexes",
- indexes: [
- {key: {a: 1}, name: 'a_1', background: true},
- {key: {b: 1}, name: 'b_1', background: true}
- ]
- });
- }, db.getMongo().port);
+let awaitParallelShell;
+try {
+ jsTest.log("Starting a parallel shell to run two background index builds");
+ awaitParallelShell = startParallelShell(() => {
+ db.getSiblingDB("test").runCommand({
+ createIndexes: "collstats_show_ready_and_in_progress_indexes",
+ indexes: [
+ {key: {a: 1}, name: 'a_1', background: true},
+ {key: {b: 1}, name: 'b_1', background: true}
+ ]
+ });
+ }, db.getMongo().port);
- jsTest.log("Waiting until the index build begins.");
- // Note that we cannot use checkLog here to wait for the failpoint logging because this test
- // shares a mongod with other tests that might have already provoked identical failpoint
- // logging.
- IndexBuildTest.waitForIndexBuildToStart(testDB);
+ jsTest.log("Waiting until the index build begins.");
+ // Note that we cannot use checkLog here to wait for the failpoint logging because this test
+ // shares a mongod with other tests that might have already provoked identical failpoint
+ // logging.
+ IndexBuildTest.waitForIndexBuildToStart(testDB);
- jsTest.log("Running collStats on collection '" + collName +
- "' to check for expected 'indexSizes', 'nindexes' and 'indexBuilds' results");
- const collStatsRes = assert.commandWorked(db.runCommand({collStats: collName}));
+ jsTest.log("Running collStats on collection '" + collName +
+ "' to check for expected 'indexSizes', 'nindexes' and 'indexBuilds' results");
+ const collStatsRes = assert.commandWorked(db.runCommand({collStats: collName}));
- assert(typeof(collStatsRes.indexSizes._id_) != 'undefined',
- "expected 'indexSizes._id_' to exist: " + tojson(collStatsRes));
- assert(typeof(collStatsRes.indexSizes.a_1) != 'undefined',
- "expected 'indexSizes.a_1' to exist: " + tojson(collStatsRes));
- assert(typeof(collStatsRes.indexSizes.b_1) != 'undefined',
- "expected 'indexSizes.b_1' to exist: " + tojson(collStatsRes));
+ assert(typeof (collStatsRes.indexSizes._id_) != 'undefined',
+ "expected 'indexSizes._id_' to exist: " + tojson(collStatsRes));
+ assert(typeof (collStatsRes.indexSizes.a_1) != 'undefined',
+ "expected 'indexSizes.a_1' to exist: " + tojson(collStatsRes));
+ assert(typeof (collStatsRes.indexSizes.b_1) != 'undefined',
+ "expected 'indexSizes.b_1' to exist: " + tojson(collStatsRes));
- assert.eq(3, collStatsRes.nindexes, "expected 'nindexes' to be 3: " + tojson(collStatsRes));
+ assert.eq(3, collStatsRes.nindexes, "expected 'nindexes' to be 3: " + tojson(collStatsRes));
- assert.eq(2,
- collStatsRes.indexBuilds.length,
- "expected to find 2 entries in 'indexBuilds': " + tojson(collStatsRes));
- assert.eq('a_1',
- collStatsRes.indexBuilds[0],
- "expected to find an 'a_1' index build:" + tojson(collStatsRes));
- assert.eq('b_1',
- collStatsRes.indexBuilds[1],
- "expected to find an 'b_1' index build:" + tojson(collStatsRes));
- } finally {
- // Ensure the failpoint is unset, even if there are assertion failures, so that we do not
- // hang the test/mongod.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "hangAfterStartingIndexBuildUnlocked", mode: "off"}));
- awaitParallelShell();
- }
+ assert.eq(2,
+ collStatsRes.indexBuilds.length,
+ "expected to find 2 entries in 'indexBuilds': " + tojson(collStatsRes));
+ assert.eq('a_1',
+ collStatsRes.indexBuilds[0],
+ "expected to find an 'a_1' index build:" + tojson(collStatsRes));
+ assert.eq('b_1',
+ collStatsRes.indexBuilds[1],
+ "expected to find an 'b_1' index build:" + tojson(collStatsRes));
+} finally {
+ // Ensure the failpoint is unset, even if there are assertion failures, so that we do not
+ // hang the test/mongod.
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "hangAfterStartingIndexBuildUnlocked", mode: "off"}));
+ awaitParallelShell();
+}
})();
diff --git a/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js b/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
index 0e12eb05a97..25083f1484c 100644
--- a/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
+++ b/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
@@ -6,168 +6,168 @@
*/
(function() {
- "use strict";
- var collName = 'leaves';
- var coll = db[collName];
-
- var commands = [];
-
- commands.push({
- req: {insert: collName, documents: [{type: 'maple'}]},
- setupFunc: function() {},
- confirmFunc: function() {
- assert.eq(coll.count({type: 'maple'}), 1);
- }
- });
-
- commands.push({
- req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]},
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.getIndexes().length, 1);
- },
- confirmFunc: function() {
- assert.eq(coll.getIndexes().length, 2);
- }
- });
-
- commands.push({
- req: {
- update: collName,
- updates: [{
- q: {type: 'oak'},
- u: [{$set: {type: 'ginkgo'}}],
- }],
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- }
- });
-
- commands.push({
- req: {
- findAndModify: collName,
- query: {type: 'oak'},
- update: {$set: {type: 'ginkgo'}},
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- }
- });
-
- commands.push({
- req: {
- findAndModify: collName,
- query: {type: 'oak'},
- update: [{$set: {type: 'ginkgo'}}],
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- }
- });
-
- commands.push({
- req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1, type: "willow"}}]},
- setupFunc: function() {
- coll.insert({_id: 1, type: 'oak'});
- assert.eq(coll.count({type: 'willow'}), 0);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'willow'}), 1);
- }
- });
+"use strict";
+var collName = 'leaves';
+var coll = db[collName];
- commands.push({
- req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}], cursor: {}},
- setupFunc: function() {
- coll.insert({_id: 1, type: 'oak'});
- coll.insert({_id: 2, type: 'maple'});
- },
- confirmFunc: function() {
- assert.eq(db.foo.count({type: 'oak'}), 1);
- assert.eq(db.foo.count({type: 'maple'}), 1);
- db.foo.drop();
- }
- });
+var commands = [];
- commands.push({
- req: {
- mapReduce: collName,
- map: function() {
- this.tags.forEach(function(z) {
- emit(z, 1);
- });
- },
- reduce: function(key, values) {
- return {count: values.length};
- },
- out: "foo"
+commands.push({
+ req: {insert: collName, documents: [{type: 'maple'}]},
+ setupFunc: function() {},
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'maple'}), 1);
+ }
+});
+
+commands.push({
+ req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]},
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.getIndexes().length, 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.getIndexes().length, 2);
+ }
+});
+
+commands.push({
+ req: {
+ update: collName,
+ updates: [{
+ q: {type: 'oak'},
+ u: [{$set: {type: 'ginkgo'}}],
+ }],
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ }
+});
+
+commands.push({
+ req: {
+ findAndModify: collName,
+ query: {type: 'oak'},
+ update: {$set: {type: 'ginkgo'}},
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ }
+});
+
+commands.push({
+ req: {
+ findAndModify: collName,
+ query: {type: 'oak'},
+ update: [{$set: {type: 'ginkgo'}}],
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ }
+});
+
+commands.push({
+ req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1, type: "willow"}}]},
+ setupFunc: function() {
+ coll.insert({_id: 1, type: 'oak'});
+ assert.eq(coll.count({type: 'willow'}), 0);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'willow'}), 1);
+ }
+});
+
+commands.push({
+ req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}], cursor: {}},
+ setupFunc: function() {
+ coll.insert({_id: 1, type: 'oak'});
+ coll.insert({_id: 2, type: 'maple'});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.count({type: 'oak'}), 1);
+ assert.eq(db.foo.count({type: 'maple'}), 1);
+ db.foo.drop();
+ }
+});
+
+commands.push({
+ req: {
+ mapReduce: collName,
+ map: function() {
+ this.tags.forEach(function(z) {
+ emit(z, 1);
+ });
},
- setupFunc: function() {
- coll.insert({x: 1, tags: ["a", "b"]});
- coll.insert({x: 2, tags: ["b", "c"]});
- coll.insert({x: 3, tags: ["c", "a"]});
- coll.insert({x: 4, tags: ["b", "c"]});
+ reduce: function(key, values) {
+ return {count: values.length};
},
- confirmFunc: function() {
- assert.eq(db.foo.findOne({_id: 'a'}).value.count, 2);
- assert.eq(db.foo.findOne({_id: 'b'}).value.count, 3);
- assert.eq(db.foo.findOne({_id: 'c'}).value.count, 3);
- db.foo.drop();
- }
- });
-
- function testValidWriteConcern(cmd) {
- cmd.req.writeConcern = {w: 1, j: true};
- jsTest.log("Testing " + tojson(cmd.req));
-
- coll.drop();
- cmd.setupFunc();
- var res = db.runCommand(cmd.req);
- assert.commandWorked(res);
- assert(!res.writeConcernError, 'command had writeConcernError: ' + tojson(res));
- cmd.confirmFunc();
+ out: "foo"
+ },
+ setupFunc: function() {
+ coll.insert({x: 1, tags: ["a", "b"]});
+ coll.insert({x: 2, tags: ["b", "c"]});
+ coll.insert({x: 3, tags: ["c", "a"]});
+ coll.insert({x: 4, tags: ["b", "c"]});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.findOne({_id: 'a'}).value.count, 2);
+ assert.eq(db.foo.findOne({_id: 'b'}).value.count, 3);
+ assert.eq(db.foo.findOne({_id: 'c'}).value.count, 3);
+ db.foo.drop();
}
-
- function testInvalidWriteConcern(wc, cmd) {
- cmd.req.writeConcern = wc;
- jsTest.log("Testing " + tojson(cmd.req));
-
- coll.drop();
- cmd.setupFunc();
- var res = coll.runCommand(cmd.req);
- // These commands should fail because standalone writeConcerns are found to be invalid at
- // the validation stage when the writeConcern is parsed, before the command is run.
- assert.commandFailed(res);
- }
-
- var invalidWriteConcerns = [{w: 'invalid'}, {w: 2}, {j: 'invalid'}];
-
- commands.forEach(function(cmd) {
- testValidWriteConcern(cmd);
- invalidWriteConcerns.forEach(function(wc) {
- testInvalidWriteConcern(wc, cmd);
- });
+});
+
+function testValidWriteConcern(cmd) {
+ cmd.req.writeConcern = {w: 1, j: true};
+ jsTest.log("Testing " + tojson(cmd.req));
+
+ coll.drop();
+ cmd.setupFunc();
+ var res = db.runCommand(cmd.req);
+ assert.commandWorked(res);
+ assert(!res.writeConcernError, 'command had writeConcernError: ' + tojson(res));
+ cmd.confirmFunc();
+}
+
+function testInvalidWriteConcern(wc, cmd) {
+ cmd.req.writeConcern = wc;
+ jsTest.log("Testing " + tojson(cmd.req));
+
+ coll.drop();
+ cmd.setupFunc();
+ var res = coll.runCommand(cmd.req);
+ // These commands should fail because standalone writeConcerns are found to be invalid at
+ // the validation stage when the writeConcern is parsed, before the command is run.
+ assert.commandFailed(res);
+}
+
+var invalidWriteConcerns = [{w: 'invalid'}, {w: 2}, {j: 'invalid'}];
+
+commands.forEach(function(cmd) {
+ testValidWriteConcern(cmd);
+ invalidWriteConcerns.forEach(function(wc) {
+ testInvalidWriteConcern(wc, cmd);
});
+});
})();
diff --git a/jstests/noPassthroughWithMongod/connections_opened.js b/jstests/noPassthroughWithMongod/connections_opened.js
index ecd2076f2af..20c53b0db1d 100644
--- a/jstests/noPassthroughWithMongod/connections_opened.js
+++ b/jstests/noPassthroughWithMongod/connections_opened.js
@@ -30,12 +30,14 @@ function createPersistentConnection() {
function createTemporaryConnection() {
// Retry connecting until you are successful
- var pollString = "var conn = null;" + "assert.soon(function() {" + "try { conn = new Mongo(\"" +
- db.getMongo().host + "\"); return conn" + "} catch (x) {return false;}}, " +
+ var pollString = "var conn = null;" +
+ "assert.soon(function() {" +
+ "try { conn = new Mongo(\"" + db.getMongo().host + "\"); return conn" +
+ "} catch (x) {return false;}}, " +
"\"Timed out waiting for temporary connection to connect\", 30000, 5000);";
// Poll the signal collection until it is told to terminate.
- pollString += "assert.soon(function() {" + "return conn.getDB('" + testDB +
- "').getCollection('" + signalCollection + "')" +
+ pollString += "assert.soon(function() {" +
+ "return conn.getDB('" + testDB + "').getCollection('" + signalCollection + "')" +
".findOne().stop;}, \"Parallel shell never told to terminate\", 10 * 60000);";
return startParallelShell(pollString, null, true);
}
diff --git a/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js b/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
index 8a36f7ee11e..61f38ce4b94 100644
--- a/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
+++ b/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
@@ -1,77 +1,77 @@
(function() {
- "use strict";
- var t = db.create_indexes_shell_helper;
- t.drop();
+"use strict";
+var t = db.create_indexes_shell_helper;
+t.drop();
- var mongo = db.getMongo();
+var mongo = db.getMongo();
- try {
- var commandsRan = [];
- var insertsRan = [];
- var mockMongo = {
- writeMode: function() {
- return "commands";
- },
- getSlaveOk: function() {
- return true;
- },
- runCommand: function(db, cmd, opts) {
- commandsRan.push({db: db, cmd: cmd, opts: opts});
- return {ok: 1.0};
- },
- getWriteConcern: function() {
- return null;
- },
- useWriteCommands: function() {
- return true;
- },
- hasWriteCommands: function() {
- return true;
- },
- getMinWireVersion: function() {
- return mongo.getMinWireVersion();
- },
- getMaxWireVersion: function() {
- return mongo.getMaxWireVersion();
- },
- isReplicaSetMember: function() {
- return mongo.isReplicaSetMember();
- },
- isMongos: function() {
- return mongo.isMongos();
- },
- isCausalConsistency: function() {
- return false;
- },
- getClusterTime: function() {
- return null;
- },
- };
+try {
+ var commandsRan = [];
+ var insertsRan = [];
+ var mockMongo = {
+ writeMode: function() {
+ return "commands";
+ },
+ getSlaveOk: function() {
+ return true;
+ },
+ runCommand: function(db, cmd, opts) {
+ commandsRan.push({db: db, cmd: cmd, opts: opts});
+ return {ok: 1.0};
+ },
+ getWriteConcern: function() {
+ return null;
+ },
+ useWriteCommands: function() {
+ return true;
+ },
+ hasWriteCommands: function() {
+ return true;
+ },
+ getMinWireVersion: function() {
+ return mongo.getMinWireVersion();
+ },
+ getMaxWireVersion: function() {
+ return mongo.getMaxWireVersion();
+ },
+ isReplicaSetMember: function() {
+ return mongo.isReplicaSetMember();
+ },
+ isMongos: function() {
+ return mongo.isMongos();
+ },
+ isCausalConsistency: function() {
+ return false;
+ },
+ getClusterTime: function() {
+ return null;
+ },
+ };
- db._mongo = mockMongo;
- db._session = new _DummyDriverSession(mockMongo);
+ db._mongo = mockMongo;
+ db._session = new _DummyDriverSession(mockMongo);
- t.createIndexes([{x: 1}]);
- assert.eq(commandsRan.length, 1);
- assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
- assert.eq(commandsRan[0].cmd["indexes"][0], {key: {x: 1}, name: "x_1"});
+ t.createIndexes([{x: 1}]);
+ assert.eq(commandsRan.length, 1);
+ assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
+ assert.eq(commandsRan[0].cmd["indexes"][0], {key: {x: 1}, name: "x_1"});
- commandsRan = [];
+ commandsRan = [];
- t.createIndexes([{y: 1}, {z: -1}]);
- assert.eq(commandsRan.length, 1);
- assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
- assert.eq(commandsRan[0].cmd["indexes"][0], {key: {y: 1}, name: "y_1"});
- assert.eq(commandsRan[0].cmd["indexes"][1], {key: {z: -1}, name: "z_-1"});
+ t.createIndexes([{y: 1}, {z: -1}]);
+ assert.eq(commandsRan.length, 1);
+ assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
+ assert.eq(commandsRan[0].cmd["indexes"][0], {key: {y: 1}, name: "y_1"});
+ assert.eq(commandsRan[0].cmd["indexes"][1], {key: {z: -1}, name: "z_-1"});
- commandsRan = [];
+ commandsRan = [];
- t.createIndex({a: 1});
- assert.eq(commandsRan.length, 1);
- assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
- assert.eq(commandsRan[0].cmd["indexes"][0], {key: {a: 1}, name: "a_1"});
- } finally {
- db._mongo = mongo;
- db._session = new _DummyDriverSession(mongo);
- }
+ t.createIndex({a: 1});
+ assert.eq(commandsRan.length, 1);
+ assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
+ assert.eq(commandsRan[0].cmd["indexes"][0], {key: {a: 1}, name: "a_1"});
+} finally {
+ db._mongo = mongo;
+ db._session = new _DummyDriverSession(mongo);
+}
}());
diff --git a/jstests/noPassthroughWithMongod/create_indexes_waits_for_already_in_progress.js b/jstests/noPassthroughWithMongod/create_indexes_waits_for_already_in_progress.js
index a3ba81bb4ed..08c3b26f5c4 100644
--- a/jstests/noPassthroughWithMongod/create_indexes_waits_for_already_in_progress.js
+++ b/jstests/noPassthroughWithMongod/create_indexes_waits_for_already_in_progress.js
@@ -23,116 +23,117 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/libs/parallel_shell_helpers.js");
- load('jstests/libs/test_background_ops.js');
-
- const dbName = "test";
- const collName = "create_indexes_waits_for_already_in_progress";
- const testDB = db.getSiblingDB(dbName);
- const testColl = testDB.getCollection(collName);
- const indexSpecB = {key: {b: 1}, name: "the_b_1_index"};
- const indexSpecC = {key: {c: 1}, name: "the_c_1_index"};
-
- testColl.drop();
- assert.commandWorked(testDB.adminCommand({clearLog: 'global'}));
-
- // TODO (SERVER-40952): currently createIndexes will hold an X lock for the duration of the
- // build if the collection is not created beforehand. This test needs that not to happen, so we
- // can pause a build and a subsequently issued request can get an IX lock.
- assert.commandWorked(testDB.runCommand({create: collName}));
-
- function runSuccessfulIndexBuild(dbName, collName, indexSpec, requestNumber) {
- jsTest.log("Index build request " + requestNumber + " starting...");
- const res =
- db.getSiblingDB(dbName).runCommand({createIndexes: collName, indexes: [indexSpec]});
- jsTest.log("Index build request " + requestNumber + ", expected to succeed, result: " +
- tojson(res));
- assert.commandWorked(res);
- }
-
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'alwaysOn'}));
- let joinFirstIndexBuild;
- let joinSecondIndexBuild;
- try {
- jsTest.log("Starting a parallel shell to run first index build request...");
- joinFirstIndexBuild = startParallelShell(
- funWithArgs(runSuccessfulIndexBuild, dbName, collName, indexSpecB, 1),
- db.getMongo().port);
-
- jsTest.log("Waiting for first index build to get started...");
- checkLog.contains(db.getMongo(),
- "Hanging index build due to failpoint 'hangAfterSettingUpIndexBuild'");
-
- jsTest.log("Starting a parallel shell to run second index build request...");
- joinSecondIndexBuild = startParallelShell(
- funWithArgs(runSuccessfulIndexBuild, dbName, collName, indexSpecB, 2),
- db.getMongo().port);
-
- jsTest.log("Waiting for second index build request to wait behind the first...");
- checkLog.contains(db.getMongo(),
- "but found that at least one of the indexes is already being built");
- } finally {
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'off'}));
- }
-
- // The second request stalled behind the first, so now all we need to do is check that they both
- // complete successfully.
- joinFirstIndexBuild();
- joinSecondIndexBuild();
-
- // Make sure the parallel shells sucessfully built the index. We should have the _id index and
- // the 'the_b_1_index' index just built in the parallel shells.
- assert.eq(testColl.getIndexes().length, 2);
-
- // Lastly, if the first request fails transiently, then the second should restart the index
- // build.
- assert.commandWorked(testDB.adminCommand({clearLog: 'global'}));
-
- function runFailedIndexBuild(dbName, collName, indexSpec, requestNumber) {
- const res =
- db.getSiblingDB(dbName).runCommand({createIndexes: collName, indexes: [indexSpec]});
- jsTest.log("Index build request " + requestNumber + ", expected to fail, result: " +
- tojson(res));
- assert.commandFailedWithCode(res, ErrorCodes.InternalError);
- }
-
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/libs/parallel_shell_helpers.js");
+load('jstests/libs/test_background_ops.js');
+
+const dbName = "test";
+const collName = "create_indexes_waits_for_already_in_progress";
+const testDB = db.getSiblingDB(dbName);
+const testColl = testDB.getCollection(collName);
+const indexSpecB = {
+ key: {b: 1},
+ name: "the_b_1_index"
+};
+const indexSpecC = {
+ key: {c: 1},
+ name: "the_c_1_index"
+};
+
+testColl.drop();
+assert.commandWorked(testDB.adminCommand({clearLog: 'global'}));
+
+// TODO (SERVER-40952): currently createIndexes will hold an X lock for the duration of the
+// build if the collection is not created beforehand. This test needs that not to happen, so we
+// can pause a build and a subsequently issued request can get an IX lock.
+assert.commandWorked(testDB.runCommand({create: collName}));
+
+function runSuccessfulIndexBuild(dbName, collName, indexSpec, requestNumber) {
+ jsTest.log("Index build request " + requestNumber + " starting...");
+ const res = db.getSiblingDB(dbName).runCommand({createIndexes: collName, indexes: [indexSpec]});
+ jsTest.log("Index build request " + requestNumber +
+ ", expected to succeed, result: " + tojson(res));
+ assert.commandWorked(res);
+}
+
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'alwaysOn'}));
+let joinFirstIndexBuild;
+let joinSecondIndexBuild;
+try {
+ jsTest.log("Starting a parallel shell to run first index build request...");
+ joinFirstIndexBuild = startParallelShell(
+ funWithArgs(runSuccessfulIndexBuild, dbName, collName, indexSpecB, 1), db.getMongo().port);
+
+ jsTest.log("Waiting for first index build to get started...");
+ checkLog.contains(db.getMongo(),
+ "Hanging index build due to failpoint 'hangAfterSettingUpIndexBuild'");
+
+ jsTest.log("Starting a parallel shell to run second index build request...");
+ joinSecondIndexBuild = startParallelShell(
+ funWithArgs(runSuccessfulIndexBuild, dbName, collName, indexSpecB, 2), db.getMongo().port);
+
+ jsTest.log("Waiting for second index build request to wait behind the first...");
+ checkLog.contains(db.getMongo(),
+ "but found that at least one of the indexes is already being built");
+} finally {
+ assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'off'}));
+}
+
+// The second request stalled behind the first, so now all we need to do is check that they both
+// complete successfully.
+joinFirstIndexBuild();
+joinSecondIndexBuild();
+
+// Make sure the parallel shells sucessfully built the index. We should have the _id index and
+// the 'the_b_1_index' index just built in the parallel shells.
+assert.eq(testColl.getIndexes().length, 2);
+
+// Lastly, if the first request fails transiently, then the second should restart the index
+// build.
+assert.commandWorked(testDB.adminCommand({clearLog: 'global'}));
+
+function runFailedIndexBuild(dbName, collName, indexSpec, requestNumber) {
+ const res = db.getSiblingDB(dbName).runCommand({createIndexes: collName, indexes: [indexSpec]});
+ jsTest.log("Index build request " + requestNumber +
+ ", expected to fail, result: " + tojson(res));
+ assert.commandFailedWithCode(res, ErrorCodes.InternalError);
+}
+
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAndThenFailIndexBuild', mode: 'alwaysOn'}));
+let joinFailedIndexBuild;
+let joinSuccessfulIndexBuild;
+try {
+ jsTest.log("Starting a parallel shell to run third index build request...");
+ joinFailedIndexBuild = startParallelShell(
+ funWithArgs(runFailedIndexBuild, dbName, collName, indexSpecC, 3), db.getMongo().port);
+
+ jsTest.log("Waiting for third index build to get started...");
+ checkLog.contains(db.getMongo(),
+ "Hanging index build due to failpoint 'hangAndThenFailIndexBuild'");
+
+ jsTest.log("Starting a parallel shell to run fourth index build request...");
+ joinSuccessfulIndexBuild = startParallelShell(
+ funWithArgs(runSuccessfulIndexBuild, dbName, collName, indexSpecC, 4), db.getMongo().port);
+
+ jsTest.log("Waiting for fourth index build request to wait behind the third...");
+ checkLog.contains(db.getMongo(),
+ "but found that at least one of the indexes is already being built");
+} finally {
assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangAndThenFailIndexBuild', mode: 'alwaysOn'}));
- let joinFailedIndexBuild;
- let joinSuccessfulIndexBuild;
- try {
- jsTest.log("Starting a parallel shell to run third index build request...");
- joinFailedIndexBuild = startParallelShell(
- funWithArgs(runFailedIndexBuild, dbName, collName, indexSpecC, 3), db.getMongo().port);
-
- jsTest.log("Waiting for third index build to get started...");
- checkLog.contains(db.getMongo(),
- "Hanging index build due to failpoint 'hangAndThenFailIndexBuild'");
-
- jsTest.log("Starting a parallel shell to run fourth index build request...");
- joinSuccessfulIndexBuild = startParallelShell(
- funWithArgs(runSuccessfulIndexBuild, dbName, collName, indexSpecC, 4),
- db.getMongo().port);
-
- jsTest.log("Waiting for fourth index build request to wait behind the third...");
- checkLog.contains(db.getMongo(),
- "but found that at least one of the indexes is already being built");
- } finally {
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangAndThenFailIndexBuild', mode: 'off'}));
- }
-
- // The second request stalled behind the first, so now all we need to do is check that they both
- // complete as expected: the first should fail; the second should succeed.
- joinFailedIndexBuild();
- joinSuccessfulIndexBuild();
-
- // Make sure the parallel shells sucessfully built the index. We should now have the _id index,
- // the 'the_b_1_index' index and the 'the_c_1_index' just built in the parallel shells.
- assert.eq(testColl.getIndexes().length, 3);
+ testDB.adminCommand({configureFailPoint: 'hangAndThenFailIndexBuild', mode: 'off'}));
+}
+
+// The second request stalled behind the first, so now all we need to do is check that they both
+// complete as expected: the first should fail; the second should succeed.
+joinFailedIndexBuild();
+joinSuccessfulIndexBuild();
+
+// Make sure the parallel shells sucessfully built the index. We should now have the _id index,
+// the 'the_b_1_index' index and the 'the_c_1_index' just built in the parallel shells.
+assert.eq(testColl.getIndexes().length, 3);
})();
diff --git a/jstests/noPassthroughWithMongod/currentop_includes_connid.js b/jstests/noPassthroughWithMongod/currentop_includes_connid.js
index b9b9ceeeb90..a58bba44479 100644
--- a/jstests/noPassthroughWithMongod/currentop_includes_connid.js
+++ b/jstests/noPassthroughWithMongod/currentop_includes_connid.js
@@ -1,12 +1,12 @@
(function() {
- "use strict";
+"use strict";
- let res = assert.commandWorked(db.runCommand({whatsmyuri: 1}));
- const myUri = res.you;
+let res = assert.commandWorked(db.runCommand({whatsmyuri: 1}));
+const myUri = res.you;
- res = assert.commandWorked(db.adminCommand({currentOp: 1, client: myUri}));
- const threadName = res.inprog[0].desc;
- const connectionId = res.inprog[0].connectionId;
+res = assert.commandWorked(db.adminCommand({currentOp: 1, client: myUri}));
+const threadName = res.inprog[0].desc;
+const connectionId = res.inprog[0].connectionId;
- assert.eq("conn" + connectionId, threadName, tojson(res));
+assert.eq("conn" + connectionId, threadName, tojson(res));
})();
diff --git a/jstests/noPassthroughWithMongod/currentop_plan_summary_no_dup.js b/jstests/noPassthroughWithMongod/currentop_plan_summary_no_dup.js
index c82c75ff35d..1e4cf5764b8 100644
--- a/jstests/noPassthroughWithMongod/currentop_plan_summary_no_dup.js
+++ b/jstests/noPassthroughWithMongod/currentop_plan_summary_no_dup.js
@@ -1,57 +1,57 @@
// Tests that planSummary is not duplicated in an active getmore currentOp entry.
(function() {
- "use strict";
-
- // This test runs a getMore in a parallel shell, which will not inherit the implicit session of
- // the cursor establishing command.
- TestData.disableImplicitSessions = true;
-
- const collName = "currentop_plan_summary_no_dup";
- const coll = db.getCollection(collName);
- coll.drop();
- for (let i = 0; i < 200; i++) {
- assert.commandWorked(coll.insert({x: 1}));
- }
-
- // Create a long-running getMore operation by sleeping for every document.
- const cmdRes = assert.commandWorked(db.runCommand({
- find: collName,
- filter: {
- $where: function() {
- sleep(100);
- return true;
- }
- },
- batchSize: 0
- }));
- const cmdStr = 'db.runCommand({getMore: ' + cmdRes.cursor.id.toString() + ', collection: "' +
- collName + '"})';
- const awaitShell = startParallelShell(cmdStr);
-
- assert.soon(function() {
- const currOp = db.currentOp({"op": "getmore"});
-
- assert("inprog" in currOp);
- if (currOp.inprog.length === 0) {
- return false;
+"use strict";
+
+// This test runs a getMore in a parallel shell, which will not inherit the implicit session of
+// the cursor establishing command.
+TestData.disableImplicitSessions = true;
+
+const collName = "currentop_plan_summary_no_dup";
+const coll = db.getCollection(collName);
+coll.drop();
+for (let i = 0; i < 200; i++) {
+ assert.commandWorked(coll.insert({x: 1}));
+}
+
+// Create a long-running getMore operation by sleeping for every document.
+const cmdRes = assert.commandWorked(db.runCommand({
+ find: collName,
+ filter: {
+ $where: function() {
+ sleep(100);
+ return true;
}
+ },
+ batchSize: 0
+}));
+const cmdStr =
+ 'db.runCommand({getMore: ' + cmdRes.cursor.id.toString() + ', collection: "' + collName + '"})';
+const awaitShell = startParallelShell(cmdStr);
+
+assert.soon(function() {
+ const currOp = db.currentOp({"op": "getmore"});
+
+ assert("inprog" in currOp);
+ if (currOp.inprog.length === 0) {
+ return false;
+ }
- const getmoreOp = currOp.inprog[0];
- if (!("planSummary" in getmoreOp)) {
- print("getMore op does not yet contain planSummary:");
- printjson(getmoreOp);
- return false;
- }
+ const getmoreOp = currOp.inprog[0];
+ if (!("planSummary" in getmoreOp)) {
+ print("getMore op does not yet contain planSummary:");
+ printjson(getmoreOp);
+ return false;
+ }
- // getmoreOp should only contain a top-level plan summary.
- // Check that it doesn't contain a sub-level duplicate.
- assert(!getmoreOp.cursor.hasOwnProperty("planSummary"),
- "getmore contains duplicated planSummary: " + tojson(getmoreOp));
+ // getmoreOp should only contain a top-level plan summary.
+ // Check that it doesn't contain a sub-level duplicate.
+ assert(!getmoreOp.cursor.hasOwnProperty("planSummary"),
+ "getmore contains duplicated planSummary: " + tojson(getmoreOp));
- // Kill the op so that the test won't run for a long time.
- db.killOp(getmoreOp.opid);
+ // Kill the op so that the test won't run for a long time.
+ db.killOp(getmoreOp.opid);
- return true;
- });
- awaitShell();
+ return true;
+});
+awaitShell();
}());
diff --git a/jstests/noPassthroughWithMongod/cursor_server_status_metrics.js b/jstests/noPassthroughWithMongod/cursor_server_status_metrics.js
index b8f8a240662..865809f63b2 100644
--- a/jstests/noPassthroughWithMongod/cursor_server_status_metrics.js
+++ b/jstests/noPassthroughWithMongod/cursor_server_status_metrics.js
@@ -2,85 +2,85 @@
* Tests for serverStatus metrics.cursor stats.
*/
(function() {
- var coll = db[jsTest.name()];
- coll.drop();
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.insert({_id: 2}));
- assert.writeOK(coll.insert({_id: 3}));
+var coll = db[jsTest.name()];
+coll.drop();
+assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(coll.insert({_id: 2}));
+assert.writeOK(coll.insert({_id: 3}));
- assert.eq(3, coll.find().count());
+assert.eq(3, coll.find().count());
- function getCurrentCursorsOpen() {
- return db.serverStatus().metrics.cursor.open.total;
- }
+function getCurrentCursorsOpen() {
+ return db.serverStatus().metrics.cursor.open.total;
+}
- function getCurrentCursorsPinned() {
- return db.serverStatus().metrics.cursor.open.pinned;
- }
+function getCurrentCursorsPinned() {
+ return db.serverStatus().metrics.cursor.open.pinned;
+}
- var initialTotalOpen = getCurrentCursorsOpen();
+var initialTotalOpen = getCurrentCursorsOpen();
- // We expect no pinned cursors
- assert.eq(0, getCurrentCursorsPinned());
+// We expect no pinned cursors
+assert.eq(0, getCurrentCursorsPinned());
- // Total open cursors should not have changed after exhausting a cursor.
- assert.eq(3, coll.find().itcount());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- assert.eq(3, coll.find().batchSize(2).itcount());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- assert.eq(3, coll.find().batchSize(1).itcount());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+// Total open cursors should not have changed after exhausting a cursor.
+assert.eq(3, coll.find().itcount());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+assert.eq(3, coll.find().batchSize(2).itcount());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+assert.eq(3, coll.find().batchSize(1).itcount());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- assert.eq(3, coll.aggregate([]).itcount());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- assert.eq(3, coll.aggregate([], {cursor: {batchSize: 2}}).itcount());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- assert.eq(3, coll.aggregate([], {cursor: {batchSize: 1}}).itcount());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+assert.eq(3, coll.aggregate([]).itcount());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+assert.eq(3, coll.aggregate([], {cursor: {batchSize: 2}}).itcount());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+assert.eq(3, coll.aggregate([], {cursor: {batchSize: 1}}).itcount());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- // Total pinned cursors should remain zero exhausting a cursor.
- assert.eq(3, coll.find().itcount());
- assert.eq(0, getCurrentCursorsPinned());
- assert.eq(3, coll.find().batchSize(2).itcount());
- assert.eq(0, getCurrentCursorsPinned());
- assert.eq(3, coll.find().batchSize(1).itcount());
- assert.eq(0, getCurrentCursorsPinned());
+// Total pinned cursors should remain zero exhausting a cursor.
+assert.eq(3, coll.find().itcount());
+assert.eq(0, getCurrentCursorsPinned());
+assert.eq(3, coll.find().batchSize(2).itcount());
+assert.eq(0, getCurrentCursorsPinned());
+assert.eq(3, coll.find().batchSize(1).itcount());
+assert.eq(0, getCurrentCursorsPinned());
- assert.eq(3, coll.aggregate([]).itcount());
- assert.eq(0, getCurrentCursorsPinned());
- assert.eq(3, coll.aggregate([], {cursor: {batchSize: 2}}).itcount());
- assert.eq(0, getCurrentCursorsPinned());
- assert.eq(3, coll.aggregate([], {cursor: {batchSize: 1}}).itcount());
- assert.eq(0, getCurrentCursorsPinned());
+assert.eq(3, coll.aggregate([]).itcount());
+assert.eq(0, getCurrentCursorsPinned());
+assert.eq(3, coll.aggregate([], {cursor: {batchSize: 2}}).itcount());
+assert.eq(0, getCurrentCursorsPinned());
+assert.eq(3, coll.aggregate([], {cursor: {batchSize: 1}}).itcount());
+assert.eq(0, getCurrentCursorsPinned());
- // This cursor should remain open on the server, but not pinned.
- var cursor = coll.find().batchSize(2);
- cursor.next();
- assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
- assert.eq(0, getCurrentCursorsPinned());
+// This cursor should remain open on the server, but not pinned.
+var cursor = coll.find().batchSize(2);
+cursor.next();
+assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
+assert.eq(0, getCurrentCursorsPinned());
- // Same should be true after pulling the second document out of the cursor, since we haven't
- // issued a getMore yet.
- cursor.next();
- assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
- assert.eq(0, getCurrentCursorsPinned());
+// Same should be true after pulling the second document out of the cursor, since we haven't
+// issued a getMore yet.
+cursor.next();
+assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
+assert.eq(0, getCurrentCursorsPinned());
- // Cursor no longer reported as open after being exhausted.
- cursor.next();
- assert(!cursor.hasNext());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- assert.eq(0, getCurrentCursorsPinned());
+// Cursor no longer reported as open after being exhausted.
+cursor.next();
+assert(!cursor.hasNext());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+assert.eq(0, getCurrentCursorsPinned());
- // Same behavior expected for an aggregation cursor.
- var cursor = coll.aggregate([], {cursor: {batchSize: 2}});
- cursor.next();
- assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
- assert.eq(0, getCurrentCursorsPinned());
- cursor.next();
- assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
- assert.eq(0, getCurrentCursorsPinned());
- cursor.next();
- assert(!cursor.hasNext());
- assert.eq(initialTotalOpen, getCurrentCursorsOpen());
- assert.eq(0, getCurrentCursorsPinned());
+// Same behavior expected for an aggregation cursor.
+var cursor = coll.aggregate([], {cursor: {batchSize: 2}});
+cursor.next();
+assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
+assert.eq(0, getCurrentCursorsPinned());
+cursor.next();
+assert.eq(initialTotalOpen + 1, getCurrentCursorsOpen());
+assert.eq(0, getCurrentCursorsPinned());
+cursor.next();
+assert(!cursor.hasNext());
+assert.eq(initialTotalOpen, getCurrentCursorsOpen());
+assert.eq(0, getCurrentCursorsPinned());
}());
diff --git a/jstests/noPassthroughWithMongod/dbcommand_cursor_throws_on_closed_conn.js b/jstests/noPassthroughWithMongod/dbcommand_cursor_throws_on_closed_conn.js
index f2041253e3b..9903c7a5835 100644
--- a/jstests/noPassthroughWithMongod/dbcommand_cursor_throws_on_closed_conn.js
+++ b/jstests/noPassthroughWithMongod/dbcommand_cursor_throws_on_closed_conn.js
@@ -1,17 +1,17 @@
(function() {
- "use strict";
+"use strict";
- var testDB = db.getSiblingDB('dbcommand_cursor_throws_on_closed_conn');
- testDB.dropDatabase();
- var coll = testDB.collection;
- var conn = testDB.getMongo();
- conn.forceReadMode("commands");
- assert.commandWorked(coll.save({}));
- var res = assert.commandWorked(testDB.runCommand({
- find: coll.getName(),
- batchSize: 0,
- }));
+var testDB = db.getSiblingDB('dbcommand_cursor_throws_on_closed_conn');
+testDB.dropDatabase();
+var coll = testDB.collection;
+var conn = testDB.getMongo();
+conn.forceReadMode("commands");
+assert.commandWorked(coll.save({}));
+var res = assert.commandWorked(testDB.runCommand({
+ find: coll.getName(),
+ batchSize: 0,
+}));
- conn.close();
- assert.throws(() => new DBCommandCursor(testDB, res));
+conn.close();
+assert.throws(() => new DBCommandCursor(testDB, res));
}());
diff --git a/jstests/noPassthroughWithMongod/default_read_pref.js b/jstests/noPassthroughWithMongod/default_read_pref.js
index e5daba20d8a..12e8962a0a7 100644
--- a/jstests/noPassthroughWithMongod/default_read_pref.js
+++ b/jstests/noPassthroughWithMongod/default_read_pref.js
@@ -2,55 +2,54 @@
// on read commands run with an 'unset' read preference.
(function() {
- "use strict";
+"use strict";
- var mongo = db.getMongo();
- try {
- var commandsRan = [];
- db._mongo = {
- getSlaveOk: function() {
- return false;
- },
- getReadPrefMode: function() {
- return mongo.getReadPrefMode();
- },
- getReadPref: function() {
- return mongo.getReadPref();
- },
- runCommand: function(db, cmd, opts) {
- commandsRan.push({db: db, cmd: cmd, opts: opts});
- return {ok: 1};
- },
- getMinWireVersion: function() {
- return mongo.getMinWireVersion();
- },
- getMaxWireVersion: function() {
- return mongo.getMaxWireVersion();
- },
- isReplicaSetMember: function() {
- return mongo.isReplicaSetMember();
- },
- isMongos: function() {
- return mongo.isMongos();
- },
- isCausalConsistency: function() {
- return false;
- },
- getClusterTime: function() {
- return null;
- },
- };
- db._session = new _DummyDriverSession(db._mongo);
+var mongo = db.getMongo();
+try {
+ var commandsRan = [];
+ db._mongo = {
+ getSlaveOk: function() {
+ return false;
+ },
+ getReadPrefMode: function() {
+ return mongo.getReadPrefMode();
+ },
+ getReadPref: function() {
+ return mongo.getReadPref();
+ },
+ runCommand: function(db, cmd, opts) {
+ commandsRan.push({db: db, cmd: cmd, opts: opts});
+ return {ok: 1};
+ },
+ getMinWireVersion: function() {
+ return mongo.getMinWireVersion();
+ },
+ getMaxWireVersion: function() {
+ return mongo.getMaxWireVersion();
+ },
+ isReplicaSetMember: function() {
+ return mongo.isReplicaSetMember();
+ },
+ isMongos: function() {
+ return mongo.isMongos();
+ },
+ isCausalConsistency: function() {
+ return false;
+ },
+ getClusterTime: function() {
+ return null;
+ },
+ };
+ db._session = new _DummyDriverSession(db._mongo);
- db.runReadCommand({ping: 1});
- assert.eq(commandsRan.length, 1);
- assert.docEq(commandsRan[0].cmd, {ping: 1}, "The command should not have been wrapped.");
- assert.eq(
- commandsRan[0].opts & DBQuery.Option.slaveOk, 0, "The slaveOk bit should not be set.");
-
- } finally {
- db._mongo = mongo;
- db._session = new _DummyDriverSession(mongo);
- }
+ db.runReadCommand({ping: 1});
+ assert.eq(commandsRan.length, 1);
+ assert.docEq(commandsRan[0].cmd, {ping: 1}, "The command should not have been wrapped.");
+ assert.eq(
+ commandsRan[0].opts & DBQuery.Option.slaveOk, 0, "The slaveOk bit should not be set.");
+} finally {
+ db._mongo = mongo;
+ db._session = new _DummyDriverSession(mongo);
+}
})();
diff --git a/jstests/noPassthroughWithMongod/dup_bgindex.js b/jstests/noPassthroughWithMongod/dup_bgindex.js
index 298f585d543..02ac4bf7870 100644
--- a/jstests/noPassthroughWithMongod/dup_bgindex.js
+++ b/jstests/noPassthroughWithMongod/dup_bgindex.js
@@ -1,19 +1,19 @@
// Try to create two identical indexes, via background. Shouldn't be allowed by the server.
(function() {
- var t = db.duplIndexTest;
- t.drop();
- docs = [];
- for (var i = 0; i < 10000; i++) {
- docs.push({name: "foo", z: {a: 17, b: 4}, i: i});
- }
- assert.commandWorked(t.insert(docs));
- var cmd = "assert.commandWorked(db.duplIndexTest.ensureIndex( { i : 1 }, {background:true} ));";
- var join1 = startParallelShell(cmd);
- var join2 = startParallelShell(cmd);
- assert.commandWorked(t.ensureIndex({i: 1}, {background: true}));
- assert.eq(1, t.find({i: 1}).count(), "Should find only one doc");
- assert.commandWorked(t.dropIndex({i: 1}));
- assert.eq(1, t.find({i: 1}).count(), "Should find only one doc");
- join1();
- join2();
+var t = db.duplIndexTest;
+t.drop();
+docs = [];
+for (var i = 0; i < 10000; i++) {
+ docs.push({name: "foo", z: {a: 17, b: 4}, i: i});
+}
+assert.commandWorked(t.insert(docs));
+var cmd = "assert.commandWorked(db.duplIndexTest.ensureIndex( { i : 1 }, {background:true} ));";
+var join1 = startParallelShell(cmd);
+var join2 = startParallelShell(cmd);
+assert.commandWorked(t.ensureIndex({i: 1}, {background: true}));
+assert.eq(1, t.find({i: 1}).count(), "Should find only one doc");
+assert.commandWorked(t.dropIndex({i: 1}));
+assert.eq(1, t.find({i: 1}).count(), "Should find only one doc");
+join1();
+join2();
})();
diff --git a/jstests/noPassthroughWithMongod/exchangeProducer.js b/jstests/noPassthroughWithMongod/exchangeProducer.js
index f3f23ee4e0d..5d609e04634 100644
--- a/jstests/noPassthroughWithMongod/exchangeProducer.js
+++ b/jstests/noPassthroughWithMongod/exchangeProducer.js
@@ -7,138 +7,258 @@
TestData.disableImplicitSessions = true;
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- const coll = db.testCollection;
- coll.drop();
+const coll = db.testCollection;
+coll.drop();
- const numDocs = 10000;
+const numDocs = 10000;
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; ++i) {
- bulk.insert({a: i, b: 'abcdefghijklmnopqrstuvxyz', c: {d: i}, e: [0, {f: i}]});
- }
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({a: i, b: 'abcdefghijklmnopqrstuvxyz', c: {d: i}, e: [0, {f: i}]});
+}
+
+assert.commandWorked(bulk.execute());
- assert.commandWorked(bulk.execute());
-
- /**
- * A consumer runs in a parallel shell reading the cursor until exhausted and then asserts that
- * it got the correct number of documents.
- *
- * @param {Object} cursor - the cursor that a consumer will read
- * @param {int} count - number of expected documents
- */
- function countingConsumer(cursor, count) {
- let shell = startParallelShell(`{
+/**
+ * A consumer runs in a parallel shell reading the cursor until exhausted and then asserts that
+ * it got the correct number of documents.
+ *
+ * @param {Object} cursor - the cursor that a consumer will read
+ * @param {int} count - number of expected documents
+ */
+function countingConsumer(cursor, count) {
+ let shell = startParallelShell(`{
const dbCursor = new DBCommandCursor(db, ${tojsononeline(cursor)});
assert.eq(${count}, dbCursor.itcount())
}`);
- return shell;
- }
+ return shell;
+}
- /**
- * A consumer runs in a parallel shell reading the cursor expecting an error.
- *
- * @param {Object} cursor - the cursor that a consumer will read
- * @param {int} code - the expected error code
- */
- function failingConsumer(cursor, code) {
- let shell = startParallelShell(`{
+/**
+ * A consumer runs in a parallel shell reading the cursor expecting an error.
+ *
+ * @param {Object} cursor - the cursor that a consumer will read
+ * @param {int} code - the expected error code
+ */
+function failingConsumer(cursor, code) {
+ let shell = startParallelShell(`{
const dbCursor = new DBCommandCursor(db, ${tojsononeline(cursor)});
const cmdRes = db.runCommand({getMore: dbCursor._cursorid, collection: dbCursor._collName});
assert.commandFailedWithCode(cmdRes, ${code});
}`);
- return shell;
- }
-
- const numConsumers = 4;
- // For simplicity we assume that we can evenly distribute documents among consumers.
- assert.eq(0, numDocs % numConsumers);
-
- (function testParameterValidation() {
- const tooManyConsumers = 101;
- assertErrorCode(coll, [], 50950, "Expected too many consumers", {
- exchange: {
- policy: "roundrobin",
- consumers: NumberInt(tooManyConsumers),
- bufferSize: NumberInt(1024)
- },
- cursor: {batchSize: 0}
- });
-
- const bufferTooLarge = 200 * 1024 * 1024; // 200 MB
- assertErrorCode(coll, [], 50951, "Expected buffer too large", {
- exchange: {
- policy: "roundrobin",
- consumers: NumberInt(numConsumers),
- bufferSize: NumberInt(bufferTooLarge)
- },
- cursor: {batchSize: 0}
- });
+ return shell;
+}
+
+const numConsumers = 4;
+// For simplicity we assume that we can evenly distribute documents among consumers.
+assert.eq(0, numDocs % numConsumers);
+
+(function testParameterValidation() {
+ const tooManyConsumers = 101;
+ assertErrorCode(coll, [], 50950, "Expected too many consumers", {
+ exchange: {
+ policy: "roundrobin",
+ consumers: NumberInt(tooManyConsumers),
+ bufferSize: NumberInt(1024)
+ },
+ cursor: {batchSize: 0}
+ });
+
+ const bufferTooLarge = 200 * 1024 * 1024; // 200 MB
+ assertErrorCode(coll, [], 50951, "Expected buffer too large", {
+ exchange: {
+ policy: "roundrobin",
+ consumers: NumberInt(numConsumers),
+ bufferSize: NumberInt(bufferTooLarge)
+ },
+ cursor: {batchSize: 0}
+ });
+})();
- })();
+/**
+ * RoundRobin - evenly distribute documents to consumers.
+ */
+(function testRoundRobin() {
+ let res = assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [],
+ exchange:
+ {policy: "roundrobin", consumers: NumberInt(numConsumers), bufferSize: NumberInt(1024)},
+ cursor: {batchSize: 0}
+ }));
+ assert.eq(numConsumers, res.cursors.length);
+
+ let parallelShells = [];
+
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells.push(countingConsumer(res.cursors[i], numDocs / numConsumers));
+ }
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells[i]();
+ }
+})();
- /**
- * RoundRobin - evenly distribute documents to consumers.
- */
- (function testRoundRobin() {
- let res = assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [],
- exchange: {
- policy: "roundrobin",
- consumers: NumberInt(numConsumers),
- bufferSize: NumberInt(1024)
- },
- cursor: {batchSize: 0}
- }));
- assert.eq(numConsumers, res.cursors.length);
+/**
+ * Broadcast - send a document to all consumers.
+ */
+(function testBroadcast() {
+ let res = assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [],
+ exchange:
+ {policy: "broadcast", consumers: NumberInt(numConsumers), bufferSize: NumberInt(1024)},
+ cursor: {batchSize: 0}
+ }));
+ assert.eq(numConsumers, res.cursors.length);
+
+ let parallelShells = [];
+
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells.push(countingConsumer(res.cursors[i], numDocs));
+ }
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells[i]();
+ }
+})();
- let parallelShells = [];
+/**
+ * Range - send documents to consumer based on the range of values of the 'a' field.
+ */
+(function testRange() {
+ let res = assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [],
+ exchange: {
+ policy: "keyRange",
+ consumers: NumberInt(numConsumers),
+ bufferSize: NumberInt(1024),
+ key: {a: 1},
+ boundaries: [{a: MinKey}, {a: 2500}, {a: 5000}, {a: 7500}, {a: MaxKey}],
+ consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
+ },
+ cursor: {batchSize: 0}
+ }));
+ assert.eq(numConsumers, res.cursors.length);
+
+ let parallelShells = [];
+
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells.push(countingConsumer(res.cursors[i], numDocs / numConsumers));
+ }
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells[i]();
+ }
+})();
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells.push(countingConsumer(res.cursors[i], numDocs / numConsumers));
- }
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells[i]();
- }
- })();
+/**
+ * Range with more complex pipeline.
+ */
+(function testRangeComplex() {
+ let res = assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$match: {a: {$gte: 5000}}}, {$sort: {a: -1}}, {$project: {_id: 0, b: 0}}],
+ exchange: {
+ policy: "keyRange",
+ consumers: NumberInt(numConsumers),
+ bufferSize: NumberInt(1024),
+ key: {a: 1},
+ boundaries: [{a: MinKey}, {a: 2500}, {a: 5000}, {a: 7500}, {a: MaxKey}],
+ consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
+ },
+ cursor: {batchSize: 0}
+ }));
+ assert.eq(numConsumers, res.cursors.length);
+
+ let parallelShells = [];
+
+ parallelShells.push(countingConsumer(res.cursors[0], 0));
+ parallelShells.push(countingConsumer(res.cursors[1], 0));
+ parallelShells.push(countingConsumer(res.cursors[2], 2500));
+ parallelShells.push(countingConsumer(res.cursors[3], 2500));
+
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells[i]();
+ }
+})();
- /**
- * Broadcast - send a document to all consumers.
- */
- (function testBroadcast() {
- let res = assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [],
- exchange: {
- policy: "broadcast",
- consumers: NumberInt(numConsumers),
- bufferSize: NumberInt(1024)
- },
- cursor: {batchSize: 0}
- }));
- assert.eq(numConsumers, res.cursors.length);
+/**
+ * Range with a dotted path.
+ */
+(function testRangeDottedPath() {
+ let res = assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [],
+ exchange: {
+ policy: "keyRange",
+ consumers: NumberInt(numConsumers),
+ bufferSize: NumberInt(1024),
+ key: {"c.d": 1},
+ boundaries:
+ [{"c.d": MinKey}, {"c.d": 2500}, {"c.d": 5000}, {"c.d": 7500}, {"c.d": MaxKey}],
+ consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
+ },
+ cursor: {batchSize: 0}
+ }));
+ assert.eq(numConsumers, res.cursors.length);
+
+ let parallelShells = [];
+
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells.push(countingConsumer(res.cursors[i], numDocs / numConsumers));
+ }
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells[i]();
+ }
+})();
- let parallelShells = [];
+/**
+ * Range with a dotted path and array.
+ */
+(function testRangeDottedPath() {
+ let res = assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [],
+ exchange: {
+ policy: "keyRange",
+ consumers: NumberInt(numConsumers),
+ bufferSize: NumberInt(1024),
+ key: {"e.f": 1},
+ boundaries:
+ [{"e.f": MinKey}, {"e.f": 2500}, {"e.f": 5000}, {"e.f": 7500}, {"e.f": MaxKey}],
+ consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
+ },
+ cursor: {batchSize: 0}
+ }));
+ assert.eq(numConsumers, res.cursors.length);
+
+ let parallelShells = [];
+
+ // The e.f field contains an array and hence the exchange cannot compute the range. Instead
+ // it sends all such documents to the consumer 0 by fiat.
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells.push(countingConsumer(res.cursors[i], i == 0 ? numDocs : 0));
+ }
+ for (let i = 0; i < numConsumers; ++i) {
+ parallelShells[i]();
+ }
+})();
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells.push(countingConsumer(res.cursors[i], numDocs));
- }
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells[i]();
- }
- })();
+/**
+ * Range - simulate an exception in loading the batch.
+ */
+(function testRangeFailLoad() {
+ const kFailPointName = "exchangeFailLoadNextBatch";
+ try {
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: kFailPointName, mode: "alwaysOn"}));
- /**
- * Range - send documents to consumer based on the range of values of the 'a' field.
- */
- (function testRange() {
let res = assert.commandWorked(db.runCommand({
aggregate: coll.getName(),
pipeline: [],
@@ -155,148 +275,19 @@ TestData.disableImplicitSessions = true;
assert.eq(numConsumers, res.cursors.length);
let parallelShells = [];
+ failingConsumer(res.cursors[0], ErrorCodes.FailPointEnabled)();
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells.push(countingConsumer(res.cursors[i], numDocs / numConsumers));
+ // After the first consumer sees an error, each subsequent consumer should see an
+ // 'ExchangePassthrough' error.
+ for (let i = 0; i < numConsumers - 1; ++i) {
+ parallelShells.push(
+ failingConsumer(res.cursors[i + 1], ErrorCodes.ExchangePassthrough));
}
- for (let i = 0; i < numConsumers; ++i) {
+ for (let i = 0; i < numConsumers - 1; ++i) {
parallelShells[i]();
}
- })();
-
- /**
- * Range with more complex pipeline.
- */
- (function testRangeComplex() {
- let res = assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$match: {a: {$gte: 5000}}}, {$sort: {a: -1}}, {$project: {_id: 0, b: 0}}],
- exchange: {
- policy: "keyRange",
- consumers: NumberInt(numConsumers),
- bufferSize: NumberInt(1024),
- key: {a: 1},
- boundaries: [{a: MinKey}, {a: 2500}, {a: 5000}, {a: 7500}, {a: MaxKey}],
- consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
- },
- cursor: {batchSize: 0}
- }));
- assert.eq(numConsumers, res.cursors.length);
-
- let parallelShells = [];
-
- parallelShells.push(countingConsumer(res.cursors[0], 0));
- parallelShells.push(countingConsumer(res.cursors[1], 0));
- parallelShells.push(countingConsumer(res.cursors[2], 2500));
- parallelShells.push(countingConsumer(res.cursors[3], 2500));
-
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells[i]();
- }
- })();
-
- /**
- * Range with a dotted path.
- */
- (function testRangeDottedPath() {
- let res = assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [],
- exchange: {
- policy: "keyRange",
- consumers: NumberInt(numConsumers),
- bufferSize: NumberInt(1024),
- key: {"c.d": 1},
- boundaries:
- [{"c.d": MinKey}, {"c.d": 2500}, {"c.d": 5000}, {"c.d": 7500}, {"c.d": MaxKey}],
- consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
- },
- cursor: {batchSize: 0}
- }));
- assert.eq(numConsumers, res.cursors.length);
-
- let parallelShells = [];
-
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells.push(countingConsumer(res.cursors[i], numDocs / numConsumers));
- }
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells[i]();
- }
- })();
-
- /**
- * Range with a dotted path and array.
- */
- (function testRangeDottedPath() {
- let res = assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [],
- exchange: {
- policy: "keyRange",
- consumers: NumberInt(numConsumers),
- bufferSize: NumberInt(1024),
- key: {"e.f": 1},
- boundaries:
- [{"e.f": MinKey}, {"e.f": 2500}, {"e.f": 5000}, {"e.f": 7500}, {"e.f": MaxKey}],
- consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
- },
- cursor: {batchSize: 0}
- }));
- assert.eq(numConsumers, res.cursors.length);
-
- let parallelShells = [];
-
- // The e.f field contains an array and hence the exchange cannot compute the range. Instead
- // it sends all such documents to the consumer 0 by fiat.
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells.push(countingConsumer(res.cursors[i], i == 0 ? numDocs : 0));
- }
- for (let i = 0; i < numConsumers; ++i) {
- parallelShells[i]();
- }
- })();
-
- /**
- * Range - simulate an exception in loading the batch.
- */
- (function testRangeFailLoad() {
- const kFailPointName = "exchangeFailLoadNextBatch";
- try {
- assert.commandWorked(
- db.adminCommand({configureFailPoint: kFailPointName, mode: "alwaysOn"}));
-
- let res = assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [],
- exchange: {
- policy: "keyRange",
- consumers: NumberInt(numConsumers),
- bufferSize: NumberInt(1024),
- key: {a: 1},
- boundaries: [{a: MinKey}, {a: 2500}, {a: 5000}, {a: 7500}, {a: MaxKey}],
- consumerIds: [NumberInt(0), NumberInt(1), NumberInt(2), NumberInt(3)]
- },
- cursor: {batchSize: 0}
- }));
- assert.eq(numConsumers, res.cursors.length);
-
- let parallelShells = [];
- failingConsumer(res.cursors[0], ErrorCodes.FailPointEnabled)();
-
- // After the first consumer sees an error, each subsequent consumer should see an
- // 'ExchangePassthrough' error.
- for (let i = 0; i < numConsumers - 1; ++i) {
- parallelShells.push(
- failingConsumer(res.cursors[i + 1], ErrorCodes.ExchangePassthrough));
- }
- for (let i = 0; i < numConsumers - 1; ++i) {
- parallelShells[i]();
- }
- } finally {
- assert.commandWorked(
- db.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
- }
- })();
-
+ } finally {
+ assert.commandWorked(db.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
+ }
+})();
})();
diff --git a/jstests/noPassthroughWithMongod/external_sort_text_agg.js b/jstests/noPassthroughWithMongod/external_sort_text_agg.js
index b08a7c79a44..089432b88d0 100644
--- a/jstests/noPassthroughWithMongod/external_sort_text_agg.js
+++ b/jstests/noPassthroughWithMongod/external_sort_text_agg.js
@@ -10,9 +10,9 @@ for (i = 0; i < 100; i++) {
var score = t.find({$text: {$search: "asdf"}}, {score: {$meta: 'textScore'}}).next().score;
var res = t.aggregate(
[
- {$match: {$text: {$search: "asdf"}}},
- {$sort: {"_id": 1}},
- {$project: {string: "$text", score: {$meta: "textScore"}}}
+ {$match: {$text: {$search: "asdf"}}},
+ {$sort: {"_id": 1}},
+ {$project: {string: "$text", score: {$meta: "textScore"}}}
],
{allowDiskUse: true});
// we must use .next() rather than a $limit because a $limit will optimize away the external sort
diff --git a/jstests/noPassthroughWithMongod/ftdc_params.js b/jstests/noPassthroughWithMongod/ftdc_params.js
index 08714040fcb..12c450ed932 100644
--- a/jstests/noPassthroughWithMongod/ftdc_params.js
+++ b/jstests/noPassthroughWithMongod/ftdc_params.js
@@ -3,8 +3,8 @@
load('jstests/libs/ftdc.js');
(function() {
- 'use strict';
- var admin = db.getSiblingDB("admin");
+'use strict';
+var admin = db.getSiblingDB("admin");
- verifyCommonFTDCParameters(admin, true);
+verifyCommonFTDCParameters(admin, true);
})();
diff --git a/jstests/noPassthroughWithMongod/geo_polygon.js b/jstests/noPassthroughWithMongod/geo_polygon.js
index 7c23442d4de..4d10d62a7dd 100644
--- a/jstests/noPassthroughWithMongod/geo_polygon.js
+++ b/jstests/noPassthroughWithMongod/geo_polygon.js
@@ -51,14 +51,13 @@ for (var n = 0; n < numTests; n++) {
341,
"Square Missing Chunk Test",
true);
- assert.between(
- 21 - 2,
- t.find({
- loc: {"$within": {"$polygon": [[0, 0], [0, 2], [2, 2], [2, 0], [1, 1]]}}
- }).count(),
- 21,
- "Square Missing Chunk Test 2",
- true);
+ assert.between(21 - 2,
+ t.find({
+ loc: {"$within": {"$polygon": [[0, 0], [0, 2], [2, 2], [2, 0], [1, 1]]}}
+ }).count(),
+ 21,
+ "Square Missing Chunk Test 2",
+ true);
}
assert.eq(1,
diff --git a/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js b/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js
index cef102b8e6d..a56d105fa4a 100644
--- a/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js
+++ b/jstests/noPassthroughWithMongod/getmore_awaitdata_opcounters.js
@@ -3,46 +3,46 @@
* @tags: [requires_capped]
*/
(function() {
- "use strict";
+"use strict";
- const coll = db.getmore_awaitdata_opcounters;
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {capped: true, size: 1024}));
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.insert({_id: 2}));
- assert.writeOK(coll.insert({_id: 3}));
+const coll = db.getmore_awaitdata_opcounters;
+coll.drop();
+assert.commandWorked(db.createCollection(coll.getName(), {capped: true, size: 1024}));
+assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(coll.insert({_id: 2}));
+assert.writeOK(coll.insert({_id: 3}));
- function getGlobalLatencyStats() {
- return db.serverStatus().opLatencies.reads;
- }
+function getGlobalLatencyStats() {
+ return db.serverStatus().opLatencies.reads;
+}
- function getCollectionLatencyStats() {
- return coll.latencyStats().next().latencyStats.reads;
- }
+function getCollectionLatencyStats() {
+ return coll.latencyStats().next().latencyStats.reads;
+}
- function getTop() {
- return db.adminCommand({top: 1}).totals[coll.getFullName()];
- }
+function getTop() {
+ return db.adminCommand({top: 1}).totals[coll.getFullName()];
+}
- // Global latency histogram from serverStatus should record two read ops, one for find and one
- // for getMore.
- let oldGlobalLatency = getGlobalLatencyStats();
- assert.eq(3, coll.find().tailable(true).itcount());
- let newGlobalLatency = getGlobalLatencyStats();
- assert.eq(2, newGlobalLatency.ops - oldGlobalLatency.ops);
+// Global latency histogram from serverStatus should record two read ops, one for find and one
+// for getMore.
+let oldGlobalLatency = getGlobalLatencyStats();
+assert.eq(3, coll.find().tailable(true).itcount());
+let newGlobalLatency = getGlobalLatencyStats();
+assert.eq(2, newGlobalLatency.ops - oldGlobalLatency.ops);
- // Per-collection latency histogram should record three read ops, one for find, one for getMore,
- // and one for the aggregation command used to retrieve the stats themselves.
- let oldCollLatency = getCollectionLatencyStats();
- assert.eq(3, coll.find().tailable(true).itcount());
- let newCollLatency = getCollectionLatencyStats();
- assert.eq(3, newCollLatency.ops - oldCollLatency.ops);
+// Per-collection latency histogram should record three read ops, one for find, one for getMore,
+// and one for the aggregation command used to retrieve the stats themselves.
+let oldCollLatency = getCollectionLatencyStats();
+assert.eq(3, coll.find().tailable(true).itcount());
+let newCollLatency = getCollectionLatencyStats();
+assert.eq(3, newCollLatency.ops - oldCollLatency.ops);
- // Top separates counters for getMore and find. We should see a delta of one getMore op and one
- // find op.
- let oldTop = getTop();
- assert.eq(3, coll.find().tailable(true).itcount());
- let newTop = getTop();
- assert.eq(1, newTop.getmore.count - oldTop.getmore.count);
- assert.eq(1, newTop.queries.count - oldTop.queries.count);
+// Top separates counters for getMore and find. We should see a delta of one getMore op and one
+// find op.
+let oldTop = getTop();
+assert.eq(3, coll.find().tailable(true).itcount());
+let newTop = getTop();
+assert.eq(1, newTop.getmore.count - oldTop.getmore.count);
+assert.eq(1, newTop.queries.count - oldTop.queries.count);
}());
diff --git a/jstests/noPassthroughWithMongod/host_connection_string_validation.js b/jstests/noPassthroughWithMongod/host_connection_string_validation.js
index c37c834d903..07ba793151f 100644
--- a/jstests/noPassthroughWithMongod/host_connection_string_validation.js
+++ b/jstests/noPassthroughWithMongod/host_connection_string_validation.js
@@ -1,120 +1,119 @@
// Test --host.
(function() {
- // This "inner_mode" method of spawning a mongod and re-running was copied from
- // ipv6_connection_string_validation.js
- if ("undefined" == typeof inner_mode) {
- // Start a mongod with --ipv6
- jsTest.log("Outer mode test starting mongod with --ipv6");
- // NOTE: bind_ip arg is present to test if it can parse ipv6 addresses (::1 in this case).
- // Unfortunately, having bind_ip = ::1 won't work in the test framework (But does work when
- // tested manually), so 127.0.0.1 is also present so the test mongo shell can connect
- // with that address.
- var mongod = MongoRunner.runMongod({ipv6: "", bind_ip: "::1,127.0.0.1"});
- if (mongod == null) {
- jsTest.log("Unable to run test because ipv6 is not on machine, see BF-10990");
- return;
- }
- var args = [
- "mongo",
- "--nodb",
- "--ipv6",
- "--host",
- "::1",
- "--port",
- mongod.port,
- "--eval",
- "inner_mode=true;port=" + mongod.port + ";",
- "jstests/noPassthroughWithMongod/host_connection_string_validation.js"
- ];
- var exitCode = _runMongoProgram.apply(null, args);
- jsTest.log("Inner mode test finished, exit code was " + exitCode);
-
- MongoRunner.stopMongod(mongod);
- // Pass the inner test's exit code back as the outer test's exit code
- if (exitCode != 0) {
- doassert("inner test failed with exit code " + exitCode);
- }
+// This "inner_mode" method of spawning a mongod and re-running was copied from
+// ipv6_connection_string_validation.js
+if ("undefined" == typeof inner_mode) {
+ // Start a mongod with --ipv6
+ jsTest.log("Outer mode test starting mongod with --ipv6");
+ // NOTE: bind_ip arg is present to test if it can parse ipv6 addresses (::1 in this case).
+ // Unfortunately, having bind_ip = ::1 won't work in the test framework (But does work when
+ // tested manually), so 127.0.0.1 is also present so the test mongo shell can connect
+ // with that address.
+ var mongod = MongoRunner.runMongod({ipv6: "", bind_ip: "::1,127.0.0.1"});
+ if (mongod == null) {
+ jsTest.log("Unable to run test because ipv6 is not on machine, see BF-10990");
return;
}
+ var args = [
+ "mongo",
+ "--nodb",
+ "--ipv6",
+ "--host",
+ "::1",
+ "--port",
+ mongod.port,
+ "--eval",
+ "inner_mode=true;port=" + mongod.port + ";",
+ "jstests/noPassthroughWithMongod/host_connection_string_validation.js"
+ ];
+ var exitCode = _runMongoProgram.apply(null, args);
+ jsTest.log("Inner mode test finished, exit code was " + exitCode);
+
+ MongoRunner.stopMongod(mongod);
+ // Pass the inner test's exit code back as the outer test's exit code
+ if (exitCode != 0) {
+ doassert("inner test failed with exit code " + exitCode);
+ }
+ return;
+}
- var testHost = function(host, shouldSucceed) {
- var exitCode = runMongoProgram('mongo', '--ipv6', '--eval', ';', '--host', host);
- if (shouldSucceed) {
- if (exitCode !== 0) {
- doassert("failed to connect with `--host " + host +
- "`, but expected success. Exit code: " + exitCode);
- }
- } else {
- if (exitCode === 0) {
- doassert("successfully connected with `--host " + host +
- "`, but expected to fail.");
- }
+var testHost = function(host, shouldSucceed) {
+ var exitCode = runMongoProgram('mongo', '--ipv6', '--eval', ';', '--host', host);
+ if (shouldSucceed) {
+ if (exitCode !== 0) {
+ doassert("failed to connect with `--host " + host +
+ "`, but expected success. Exit code: " + exitCode);
}
- };
+ } else {
+ if (exitCode === 0) {
+ doassert("successfully connected with `--host " + host + "`, but expected to fail.");
+ }
+ }
+};
- var goodStrings = [
- "[::1]:27999",
- "localhost:27999",
- "127.0.0.1:27999",
- "[0:0:0:0:0:0:0:1]:27999",
- "[0000:0000:0000:0000:0000:0000:0000:0001]:27999",
- ];
+var goodStrings = [
+ "[::1]:27999",
+ "localhost:27999",
+ "127.0.0.1:27999",
+ "[0:0:0:0:0:0:0:1]:27999",
+ "[0000:0000:0000:0000:0000:0000:0000:0001]:27999",
+];
- var goodSocketStrings = [
- "/tmp/mongodb-27999.sock",
- ];
+var goodSocketStrings = [
+ "/tmp/mongodb-27999.sock",
+];
- var badStrings = [
- "::1:27999",
- "::1:65536",
- "::1]:27999",
- ":",
- ":27999",
- "[::1:]27999",
- "[::1:27999",
- "[::1]:",
- "[::1]:123456",
- "[::1]:1cat",
- "[::1]:65536",
- "[::1]:cat",
- "0:0::0:0:1:27999",
- "0000:0000:0000:0000:0000:0000:0000:0001:27999",
- "127.0.0.1:",
- "127.0.0.1:123456",
- "127.0.0.1:1cat",
- "127.0.0.1:65536",
- "127.0.0.1:cat",
- "a[::1:]27999",
- "a[127.0.0.1]:27999",
- "localhost:",
- ];
+var badStrings = [
+ "::1:27999",
+ "::1:65536",
+ "::1]:27999",
+ ":",
+ ":27999",
+ "[::1:]27999",
+ "[::1:27999",
+ "[::1]:",
+ "[::1]:123456",
+ "[::1]:1cat",
+ "[::1]:65536",
+ "[::1]:cat",
+ "0:0::0:0:1:27999",
+ "0000:0000:0000:0000:0000:0000:0000:0001:27999",
+ "127.0.0.1:",
+ "127.0.0.1:123456",
+ "127.0.0.1:1cat",
+ "127.0.0.1:65536",
+ "127.0.0.1:cat",
+ "a[::1:]27999",
+ "a[127.0.0.1]:27999",
+ "localhost:",
+];
- function runUriTestFor(i, connectionString, isGood) {
- connectionString = connectionString.replace("27999", "" + port);
- print("Testing " + (isGood ? "good" : "bad") + " connection string " + i + "...");
- print(" * testing " + connectionString);
- testHost(connectionString, isGood);
- print(" * testing mongodb://" + encodeURIComponent(connectionString));
- testHost("mongodb://" + encodeURIComponent(connectionString), isGood);
- }
+function runUriTestFor(i, connectionString, isGood) {
+ connectionString = connectionString.replace("27999", "" + port);
+ print("Testing " + (isGood ? "good" : "bad") + " connection string " + i + "...");
+ print(" * testing " + connectionString);
+ testHost(connectionString, isGood);
+ print(" * testing mongodb://" + encodeURIComponent(connectionString));
+ testHost("mongodb://" + encodeURIComponent(connectionString), isGood);
+}
- var i;
- jsTest.log("TESTING " + goodStrings.length + " good uri strings");
- for (i = 0; i < goodStrings.length; ++i) {
- runUriTestFor(i, goodStrings[i], true);
- }
+var i;
+jsTest.log("TESTING " + goodStrings.length + " good uri strings");
+for (i = 0; i < goodStrings.length; ++i) {
+ runUriTestFor(i, goodStrings[i], true);
+}
- if (!_isWindows()) {
- jsTest.log("TESTING " + goodSocketStrings.length + " good uri socket strings");
- for (i = 0; i < goodSocketStrings.length; ++i) {
- runUriTestFor(i, goodSocketStrings[i], true);
- }
+if (!_isWindows()) {
+ jsTest.log("TESTING " + goodSocketStrings.length + " good uri socket strings");
+ for (i = 0; i < goodSocketStrings.length; ++i) {
+ runUriTestFor(i, goodSocketStrings[i], true);
}
+}
- jsTest.log("TESTING " + badStrings.length + " bad uri strings");
- for (i = 0; i < badStrings.length; ++i) {
- runUriTestFor(i, badStrings[i], false);
- }
+jsTest.log("TESTING " + badStrings.length + " bad uri strings");
+for (i = 0; i < badStrings.length; ++i) {
+ runUriTestFor(i, badStrings[i], false);
+}
- jsTest.log("SUCCESSFUL test completion");
+jsTest.log("SUCCESSFUL test completion");
})();
diff --git a/jstests/noPassthroughWithMongod/index_boundary_values_validate.js b/jstests/noPassthroughWithMongod/index_boundary_values_validate.js
index fd9ce6f5d21..5ff5a44ef93 100644
--- a/jstests/noPassthroughWithMongod/index_boundary_values_validate.js
+++ b/jstests/noPassthroughWithMongod/index_boundary_values_validate.js
@@ -3,30 +3,28 @@
'use strict';
(function() {
- var t = db.index_boundary_values_validate;
- t.drop();
+var t = db.index_boundary_values_validate;
+t.drop();
- assert.writeOK(t.insert({a: MaxKey, b: MaxKey}));
- assert.writeOK(t.insert({a: MaxKey, b: MinKey}));
- assert.writeOK(t.insert({a: MinKey, b: MaxKey}));
- assert.writeOK(t.insert({a: MinKey, b: MinKey}));
+assert.writeOK(t.insert({a: MaxKey, b: MaxKey}));
+assert.writeOK(t.insert({a: MaxKey, b: MinKey}));
+assert.writeOK(t.insert({a: MinKey, b: MaxKey}));
+assert.writeOK(t.insert({a: MinKey, b: MinKey}));
- assert.writeOK(t.insert({a: {}}));
- assert.writeOK(t.insert({b: {}}));
- assert.writeOK(t.insert({unindexed_field: {}}));
- assert.writeOK(t.insert({a: {}, b: {}}));
+assert.writeOK(t.insert({a: {}}));
+assert.writeOK(t.insert({b: {}}));
+assert.writeOK(t.insert({unindexed_field: {}}));
+assert.writeOK(t.insert({a: {}, b: {}}));
- assert.commandWorked(t.createIndex({a: 1, b: 1}));
- assert.commandWorked(t.createIndex({a: 1, b: -1}));
- assert.commandWorked(t.createIndex({a: -1, b: 1}));
- assert.commandWorked(t.createIndex({a: -1, b: -1}));
+assert.commandWorked(t.createIndex({a: 1, b: 1}));
+assert.commandWorked(t.createIndex({a: 1, b: -1}));
+assert.commandWorked(t.createIndex({a: -1, b: 1}));
+assert.commandWorked(t.createIndex({a: -1, b: -1}));
- var res = t.validate(true);
- assert.commandWorked(res);
+var res = t.validate(true);
+assert.commandWorked(res);
- assert.eq(
- res.nrecords, 8, 'the collection had an unexpected number of records:\n' + tojson(res));
- assert.eq(
- res.nIndexes, 5, 'the collection had an unexpected number of indexes:\n' + tojson(res));
- assert.eq(res.valid, true, 'the collection failed validation:\n' + tojson(res));
+assert.eq(res.nrecords, 8, 'the collection had an unexpected number of records:\n' + tojson(res));
+assert.eq(res.nIndexes, 5, 'the collection had an unexpected number of indexes:\n' + tojson(res));
+assert.eq(res.valid, true, 'the collection failed validation:\n' + tojson(res));
})();
diff --git a/jstests/noPassthroughWithMongod/index_limits_not_bypassed.js b/jstests/noPassthroughWithMongod/index_limits_not_bypassed.js
index bc55bda6550..33e2e5cec8e 100644
--- a/jstests/noPassthroughWithMongod/index_limits_not_bypassed.js
+++ b/jstests/noPassthroughWithMongod/index_limits_not_bypassed.js
@@ -3,36 +3,35 @@
* the 'createIndexes()' command to create multiple indexes in one request.
*/
(function() {
- "use strict";
+"use strict";
- const collName = "index_limits_not_bypassed";
- const coll = db.getCollection(collName);
- coll.drop();
+const collName = "index_limits_not_bypassed";
+const coll = db.getCollection(collName);
+coll.drop();
- // A single collection can have no more than 64 indexes. We'll create 62 indexes here to
- // have a total of 63 indexes (the _id index and the 62 about to be created).
- for (let index = 0; index < 62; index++) {
- let spec = {};
- spec[index] = 1;
- assert.commandWorked(coll.createIndex(spec));
- }
+// A single collection can have no more than 64 indexes. We'll create 62 indexes here to
+// have a total of 63 indexes (the _id index and the 62 about to be created).
+for (let index = 0; index < 62; index++) {
+ let spec = {};
+ spec[index] = 1;
+ assert.commandWorked(coll.createIndex(spec));
+}
- let indexes = db.runCommand({listIndexes: collName});
- assert.eq(63, indexes.cursor.firstBatch.length);
+let indexes = db.runCommand({listIndexes: collName});
+assert.eq(63, indexes.cursor.firstBatch.length);
- // Creating multiple indexes via 'createIndexes()' shouldn't bypass index limits.
- assert.commandFailedWithCode(coll.createIndexes([{x: 1}, {y: 1}]),
- ErrorCodes.CannotCreateIndex);
+// Creating multiple indexes via 'createIndexes()' shouldn't bypass index limits.
+assert.commandFailedWithCode(coll.createIndexes([{x: 1}, {y: 1}]), ErrorCodes.CannotCreateIndex);
- assert.commandFailedWithCode(coll.dropIndex("x"), ErrorCodes.IndexNotFound);
- assert.commandFailedWithCode(coll.dropIndex("y"), ErrorCodes.IndexNotFound);
+assert.commandFailedWithCode(coll.dropIndex("x"), ErrorCodes.IndexNotFound);
+assert.commandFailedWithCode(coll.dropIndex("y"), ErrorCodes.IndexNotFound);
- // Try to create two text indexes at the same time using 'createIndexes()'. The limit for text
- // indexes is one per collection.
- assert.commandFailedWithCode(
- coll.createIndexes([{x: "text", weights: {x: 5}}, {y: "text", weights: {y: 10}}]),
- ErrorCodes.CannotCreateIndex);
+// Try to create two text indexes at the same time using 'createIndexes()'. The limit for text
+// indexes is one per collection.
+assert.commandFailedWithCode(
+ coll.createIndexes([{x: "text", weights: {x: 5}}, {y: "text", weights: {y: 10}}]),
+ ErrorCodes.CannotCreateIndex);
- assert.commandFailedWithCode(coll.dropIndex("x"), ErrorCodes.IndexNotFound);
- assert.commandFailedWithCode(coll.dropIndex("y"), ErrorCodes.IndexNotFound);
+assert.commandFailedWithCode(coll.dropIndex("x"), ErrorCodes.IndexNotFound);
+assert.commandFailedWithCode(coll.dropIndex("y"), ErrorCodes.IndexNotFound);
}());
diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
index da3f2c9f1d0..446502905cb 100644
--- a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
+++ b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
@@ -6,81 +6,81 @@
// @tags: [requires_persistence, requires_journaling, requires_replication]
(function() {
- 'use strict';
+'use strict';
- // Set up replica set
- var replTest = new ReplSetTest({name: 'bgIndex', nodes: 3});
- var nodes = replTest.nodeList();
+// Set up replica set
+var replTest = new ReplSetTest({name: 'bgIndex', nodes: 3});
+var nodes = replTest.nodeList();
- // We need an arbiter to ensure that the primary doesn't step down
- // when we restart the secondary.
- replTest.startSet();
- replTest.initiate({
- "_id": "bgIndex",
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], "arbiterOnly": true}
- ]
- });
+// We need an arbiter to ensure that the primary doesn't step down
+// when we restart the secondary.
+replTest.startSet();
+replTest.initiate({
+ "_id": "bgIndex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
- var master = replTest.getPrimary();
- var second = replTest.getSecondary();
+var master = replTest.getPrimary();
+var second = replTest.getSecondary();
- var masterDB = master.getDB('bgIndexSec');
- var secondDB = second.getDB('bgIndexSec');
+var masterDB = master.getDB('bgIndexSec');
+var secondDB = second.getDB('bgIndexSec');
- var collectionName = 'jstests_bgsec';
+var collectionName = 'jstests_bgsec';
- var coll = masterDB.getCollection(collectionName);
+var coll = masterDB.getCollection(collectionName);
- var size = 100;
+var size = 100;
- var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i});
- }
- assert.writeOK(bulk.execute({j: true}));
- assert.eq(size, coll.count(), 'unexpected number of documents after bulk insert.');
+var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp();
+for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
+}
+assert.writeOK(bulk.execute({j: true}));
+assert.eq(size, coll.count(), 'unexpected number of documents after bulk insert.');
- // Make sure the documents make it to the secondary.
- replTest.awaitReplication();
+// Make sure the documents make it to the secondary.
+replTest.awaitReplication();
- assert.commandWorked(secondDB.adminCommand(
- {configureFailPoint: 'leaveIndexBuildUnfinishedForShutdown', mode: 'alwaysOn'}));
- try {
- coll.createIndex({i: 1}, {background: true});
- masterDB.getLastError(2);
- assert.eq(2, coll.getIndexes().length);
+assert.commandWorked(secondDB.adminCommand(
+ {configureFailPoint: 'leaveIndexBuildUnfinishedForShutdown', mode: 'alwaysOn'}));
+try {
+ coll.createIndex({i: 1}, {background: true});
+ masterDB.getLastError(2);
+ assert.eq(2, coll.getIndexes().length);
- // Make sure all writes are durable on the secondary so that we can restart it knowing that
- // the index build will be found on startup.
- // Waiting for durable is important for both (A) the record that we started the index build
- // so it is rebuild on restart, and (B) the update to minvalid to show that we've already
- // applied the oplog entry so it isn't replayed. If (A) is present without (B), then there
- // are two ways that the index can be rebuilt on startup and this test is only for the one
- // triggered by (A).
- secondDB.adminCommand({fsync: 1});
- } finally {
- assert.commandWorked(secondDB.adminCommand(
- {configureFailPoint: 'leaveIndexBuildUnfinishedForShutdown', mode: 'off'}));
- }
+ // Make sure all writes are durable on the secondary so that we can restart it knowing that
+ // the index build will be found on startup.
+ // Waiting for durable is important for both (A) the record that we started the index build
+ // so it is rebuild on restart, and (B) the update to minvalid to show that we've already
+ // applied the oplog entry so it isn't replayed. If (A) is present without (B), then there
+ // are two ways that the index can be rebuilt on startup and this test is only for the one
+ // triggered by (A).
+ secondDB.adminCommand({fsync: 1});
+} finally {
+ assert.commandWorked(secondDB.adminCommand(
+ {configureFailPoint: 'leaveIndexBuildUnfinishedForShutdown', mode: 'off'}));
+}
- MongoRunner.stopMongod(second);
- replTest.start(second, {}, /*restart=*/true, /*wait=*/true);
+MongoRunner.stopMongod(second);
+replTest.start(second, {}, /*restart=*/true, /*wait=*/true);
- // Make sure secondary comes back.
- assert.soon(function() {
- try {
- secondDB.isMaster(); // trigger a reconnect if needed
- return true;
- } catch (e) {
- return false;
- }
- }, "secondary didn't restart", 30000, 1000);
+// Make sure secondary comes back.
+assert.soon(function() {
+ try {
+ secondDB.isMaster(); // trigger a reconnect if needed
+ return true;
+ } catch (e) {
+ return false;
+ }
+}, "secondary didn't restart", 30000, 1000);
- assert.soon(function() {
- return 2 == secondDB.getCollection(collectionName).getIndexes().length;
- }, "Index build not resumed after restart", 30000, 50);
- replTest.stopSet();
+assert.soon(function() {
+ return 2 == secondDB.getCollection(collectionName).getIndexes().length;
+}, "Index build not resumed after restart", 30000, 50);
+replTest.stopSet();
}());
diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_secondary_noretry.js b/jstests/noPassthroughWithMongod/indexbg_restart_secondary_noretry.js
index 97bcaff5412..16165ce3f96 100644
--- a/jstests/noPassthroughWithMongod/indexbg_restart_secondary_noretry.js
+++ b/jstests/noPassthroughWithMongod/indexbg_restart_secondary_noretry.js
@@ -6,90 +6,90 @@
// @tags: [requires_persistence, requires_journaling, requires_replication]
(function() {
- 'use strict';
-
- // Assert that running `mongod` with `--noIndexBuildRetry` and `--replSet` does not startup.
- {
- // If code breaks the incompatibility between `--noIndexBuildRetry` and `--replSet`, using
- // `notAStorageEngine` will cause a failure later in execution that returns a different
- // exit code (100).
- var process = MongoRunner.runMongod({
- noIndexBuildRetry: "",
- replSet: "rs0",
- storageEngine: "notAStorageEngine",
- waitForConnect: false
- });
- var exitCode = waitProgram(process.pid);
- assert.eq(1, exitCode);
- }
-
- // Skip db hash check because secondary will have different number of indexes due to the
- // --noIndexBuildRetry command line option.
- TestData.skipCheckDBHashes = true;
-
- // Set up replica set.
- var replTest = new ReplSetTest({name: 'bgIndexNoRetry', nodes: 3});
- var nodenames = replTest.nodeList();
-
- var nodes = replTest.startSet();
- replTest.initiate({
- "_id": "bgIndexNoRetry",
- "members": [
- {"_id": 0, "host": nodenames[0]},
- {"_id": 1, "host": nodenames[1]},
- {"_id": 2, "host": nodenames[2], arbiterOnly: true}
- ]
+'use strict';
+
+// Assert that running `mongod` with `--noIndexBuildRetry` and `--replSet` does not startup.
+{
+ // If code breaks the incompatibility between `--noIndexBuildRetry` and `--replSet`, using
+ // `notAStorageEngine` will cause a failure later in execution that returns a different
+ // exit code (100).
+ var process = MongoRunner.runMongod({
+ noIndexBuildRetry: "",
+ replSet: "rs0",
+ storageEngine: "notAStorageEngine",
+ waitForConnect: false
});
+ var exitCode = waitProgram(process.pid);
+ assert.eq(1, exitCode);
+}
+
+// Skip db hash check because secondary will have different number of indexes due to the
+// --noIndexBuildRetry command line option.
+TestData.skipCheckDBHashes = true;
+
+// Set up replica set.
+var replTest = new ReplSetTest({name: 'bgIndexNoRetry', nodes: 3});
+var nodenames = replTest.nodeList();
+
+var nodes = replTest.startSet();
+replTest.initiate({
+ "_id": "bgIndexNoRetry",
+ "members": [
+ {"_id": 0, "host": nodenames[0]},
+ {"_id": 1, "host": nodenames[1]},
+ {"_id": 2, "host": nodenames[2], arbiterOnly: true}
+ ]
+});
+
+var master = replTest.getPrimary();
+var second = replTest.getSecondary();
+
+var masterDB = master.getDB('bgIndexNoRetrySec');
+var secondDB = second.getDB('bgIndexNoRetrySec');
+
+var collectionName = 'jstests_bgsec';
+
+var size = 100;
+
+var masterColl = masterDB.getCollection(collectionName);
+var bulk = masterColl.initializeUnorderedBulkOp();
+for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
+}
+assert.writeOK(bulk.execute({j: true}));
+assert.eq(size, masterColl.count(), 'unexpected number of documents after bulk insert.');
+
+// Make sure the documents get replicated to the secondary.
+replTest.awaitReplication();
+
+assert.commandWorked(secondDB.adminCommand(
+ {configureFailPoint: 'hangAfterStartingIndexBuildUnlocked', mode: 'alwaysOn'}));
+masterColl.createIndex({i: 1}, {background: true});
+masterDB.getLastError(2);
+assert.eq(2, masterColl.getIndexes().length);
+
+// Kill -9 and restart the secondary, after making sure all writes are durable.
+// Waiting for durable is important for both (A) the record that we started the index build so
+// it is rebuild on restart, and (B) the update to minvalid to show that we've already applied
+// the oplog entry so it isn't replayed. If (A) is present without (B), then there are two ways
+// that the index can be rebuilt on startup and this test is only for the one triggered by (A).
+secondDB.adminCommand({fsync: 1});
+replTest.stop(second, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+replTest.start(
+ second, {"noReplSet": true, "noIndexBuildRetry": ""}, /*restart*/ true, /*wait=*/false);
+
+// Make sure secondary comes back.
+assert.soon(function() {
+ try {
+ secondDB.isMaster(); // trigger a reconnect if needed
+ return true;
+ } catch (e) {
+ return false;
+ }
+}, "secondary didn't restart", 60000, 1000);
- var master = replTest.getPrimary();
- var second = replTest.getSecondary();
-
- var masterDB = master.getDB('bgIndexNoRetrySec');
- var secondDB = second.getDB('bgIndexNoRetrySec');
-
- var collectionName = 'jstests_bgsec';
-
- var size = 100;
+var secondaryColl = secondDB.getCollection(collectionName);
- var masterColl = masterDB.getCollection(collectionName);
- var bulk = masterColl.initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i});
- }
- assert.writeOK(bulk.execute({j: true}));
- assert.eq(size, masterColl.count(), 'unexpected number of documents after bulk insert.');
-
- // Make sure the documents get replicated to the secondary.
- replTest.awaitReplication();
-
- assert.commandWorked(secondDB.adminCommand(
- {configureFailPoint: 'hangAfterStartingIndexBuildUnlocked', mode: 'alwaysOn'}));
- masterColl.createIndex({i: 1}, {background: true});
- masterDB.getLastError(2);
- assert.eq(2, masterColl.getIndexes().length);
-
- // Kill -9 and restart the secondary, after making sure all writes are durable.
- // Waiting for durable is important for both (A) the record that we started the index build so
- // it is rebuild on restart, and (B) the update to minvalid to show that we've already applied
- // the oplog entry so it isn't replayed. If (A) is present without (B), then there are two ways
- // that the index can be rebuilt on startup and this test is only for the one triggered by (A).
- secondDB.adminCommand({fsync: 1});
- replTest.stop(second, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- replTest.start(
- second, {"noReplSet": true, "noIndexBuildRetry": ""}, /*restart*/ true, /*wait=*/false);
-
- // Make sure secondary comes back.
- assert.soon(function() {
- try {
- secondDB.isMaster(); // trigger a reconnect if needed
- return true;
- } catch (e) {
- return false;
- }
- }, "secondary didn't restart", 60000, 1000);
-
- var secondaryColl = secondDB.getCollection(collectionName);
-
- assert.neq(2, secondaryColl.getIndexes().length);
- replTest.stopSet();
+assert.neq(2, secondaryColl.getIndexes().length);
+replTest.stopSet();
}());
diff --git a/jstests/noPassthroughWithMongod/indexbg_updates.js b/jstests/noPassthroughWithMongod/indexbg_updates.js
index c3465f78047..5511c83074d 100644
--- a/jstests/noPassthroughWithMongod/indexbg_updates.js
+++ b/jstests/noPassthroughWithMongod/indexbg_updates.js
@@ -4,57 +4,57 @@
// Variation of index_multi.js
(function() {
- "use strict";
- Random.setRandomSeed();
-
- var coll = db.getSiblingDB("indexbg_updates").coll;
- coll.drop();
-
- var numDocs = 10000;
-
- var bulk = coll.initializeUnorderedBulkOp();
- print("Populate the collection with random data");
- for (var i = 0; i < numDocs; i++) {
- var doc = {"_id": i, "field0": Random.rand()};
-
- bulk.insert(doc);
- }
- assert.writeOK(bulk.execute());
-
- // Perform a bulk update on a single document, targeting the updates on the
- // field being actively indexed in the background
- bulk = coll.initializeUnorderedBulkOp();
- for (i = 0; i < numDocs; i++) {
- var criteria = {"_id": 1000};
- var mod = {};
-
- if (Random.rand() < .8) {
- mod["$set"] = {};
- mod["$set"]["field0"] = Random.rand();
- } else {
- mod["$unset"] = {};
- mod["$unset"]["field0"] = true;
- }
-
- bulk.find(criteria).update(mod);
+"use strict";
+Random.setRandomSeed();
+
+var coll = db.getSiblingDB("indexbg_updates").coll;
+coll.drop();
+
+var numDocs = 10000;
+
+var bulk = coll.initializeUnorderedBulkOp();
+print("Populate the collection with random data");
+for (var i = 0; i < numDocs; i++) {
+ var doc = {"_id": i, "field0": Random.rand()};
+
+ bulk.insert(doc);
+}
+assert.writeOK(bulk.execute());
+
+// Perform a bulk update on a single document, targeting the updates on the
+// field being actively indexed in the background
+bulk = coll.initializeUnorderedBulkOp();
+for (i = 0; i < numDocs; i++) {
+ var criteria = {"_id": 1000};
+ var mod = {};
+
+ if (Random.rand() < .8) {
+ mod["$set"] = {};
+ mod["$set"]["field0"] = Random.rand();
+ } else {
+ mod["$unset"] = {};
+ mod["$unset"]["field0"] = true;
}
- // Build an index in the background on field0
- var backgroundIndexBuildShell = startParallelShell(
- function() {
- var coll = db.getSiblingDB("indexbg_updates").coll;
- assert.commandWorked(coll.createIndex({"field0": 1}, {"background": true}));
- },
- null, // port -- use default
- false // noconnect
- );
+ bulk.find(criteria).update(mod);
+}
+
+// Build an index in the background on field0
+var backgroundIndexBuildShell = startParallelShell(
+ function() {
+ var coll = db.getSiblingDB("indexbg_updates").coll;
+ assert.commandWorked(coll.createIndex({"field0": 1}, {"background": true}));
+ },
+ null, // port -- use default
+ false // noconnect
+);
- print("Do some sets and unsets");
- assert.writeOK(bulk.execute());
+print("Do some sets and unsets");
+assert.writeOK(bulk.execute());
- print("Start background index build");
- backgroundIndexBuildShell();
+print("Start background index build");
+backgroundIndexBuildShell();
- var explain = coll.find().hint({"field0": 1}).explain();
- assert("queryPlanner" in explain, tojson(explain));
+var explain = coll.find().hint({"field0": 1}).explain();
+assert("queryPlanner" in explain, tojson(explain));
}());
diff --git a/jstests/noPassthroughWithMongod/insertMulti.js b/jstests/noPassthroughWithMongod/insertMulti.js
index 2000669d698..2e09e799a2f 100644
--- a/jstests/noPassthroughWithMongod/insertMulti.js
+++ b/jstests/noPassthroughWithMongod/insertMulti.js
@@ -1,53 +1,53 @@
// check the insertMulti path works, including the error handling
(function() {
- "use strict";
+"use strict";
- function makeDocument(docSize) {
- var doc = {"fieldName": ""};
- var longString = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
- while (Object.bsonsize(doc) < docSize) {
- if (Object.bsonsize(doc) < docSize - longString.length) {
- doc.fieldName += longString;
- } else {
- doc.fieldName += "x";
- }
+function makeDocument(docSize) {
+ var doc = {"fieldName": ""};
+ var longString = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
+ while (Object.bsonsize(doc) < docSize) {
+ if (Object.bsonsize(doc) < docSize - longString.length) {
+ doc.fieldName += longString;
+ } else {
+ doc.fieldName += "x";
}
- return doc;
}
+ return doc;
+}
- db.getMongo().forceWriteMode('legacy');
- var t = db.foo;
+db.getMongo().forceWriteMode('legacy');
+var t = db.foo;
- t.drop();
- t.insert([{_id: 1}, {_id: 2}]);
- assert.eq(t.count(), 2);
- t.insert([{_id: 3}, {_id: 2}, {_id: 4}], 0); // no ContinueOnError
- assert.eq(t.count(), 3);
- assert.eq(t.count({"_id": 1}), 1);
- assert.eq(t.count({"_id": 2}), 1);
- assert.eq(t.count({"_id": 3}), 1);
- assert.eq(t.count({"_id": 4}), 0);
+t.drop();
+t.insert([{_id: 1}, {_id: 2}]);
+assert.eq(t.count(), 2);
+t.insert([{_id: 3}, {_id: 2}, {_id: 4}], 0); // no ContinueOnError
+assert.eq(t.count(), 3);
+assert.eq(t.count({"_id": 1}), 1);
+assert.eq(t.count({"_id": 2}), 1);
+assert.eq(t.count({"_id": 3}), 1);
+assert.eq(t.count({"_id": 4}), 0);
- t.drop();
- t.insert([{_id: 1}, {_id: 2}]);
- assert.eq(t.count(), 2);
- t.insert([{_id: 3}, {_id: 2}, {_id: 4}], 1); // ContinueOnError
- assert.eq(t.count(), 4);
- assert.eq(t.count({"_id": 1}), 1);
- assert.eq(t.count({"_id": 2}), 1);
- assert.eq(t.count({"_id": 3}), 1);
- assert.eq(t.count({"_id": 4}), 1);
+t.drop();
+t.insert([{_id: 1}, {_id: 2}]);
+assert.eq(t.count(), 2);
+t.insert([{_id: 3}, {_id: 2}, {_id: 4}], 1); // ContinueOnError
+assert.eq(t.count(), 4);
+assert.eq(t.count({"_id": 1}), 1);
+assert.eq(t.count({"_id": 2}), 1);
+assert.eq(t.count({"_id": 3}), 1);
+assert.eq(t.count({"_id": 4}), 1);
- // Push a large vector in bigger than the subset size we'll break it up into
- t.drop();
- var doc = makeDocument(16 * 1024);
- var docs = [];
- for (var i = 0; i < 1000; i++)
- docs.push(Object.extend({}, doc));
- t.insert(docs);
- assert.eq(null, t.getDB().getLastError());
- assert.eq(t.count(), docs.length);
+// Push a large vector in bigger than the subset size we'll break it up into
+t.drop();
+var doc = makeDocument(16 * 1024);
+var docs = [];
+for (var i = 0; i < 1000; i++)
+ docs.push(Object.extend({}, doc));
+t.insert(docs);
+assert.eq(null, t.getDB().getLastError());
+assert.eq(t.count(), docs.length);
- t.drop();
+t.drop();
})();
diff --git a/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js b/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js
index 9687900f9b3..52d96f15eae 100644
--- a/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js
+++ b/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js
@@ -5,166 +5,166 @@
// The outer mode test starts a mongod with --ipv6 and then starts a mongo shell with --ipv6
// and a command line to run the test in inner_mode. The inner mode test is the actual test.
(function() {
- if ("undefined" == typeof inner_mode) {
- // Start a mongod with --ipv6
- jsTest.log("Outer mode test starting mongod with --ipv6");
- // NOTE: bind_ip arg is present to test if it can parse ipv6 addresses (::1 in this case).
- // Unfortunately, having bind_ip = ::1 won't work in the test framework (But does work when
- // tested manually), so 127.0.0.1 is also present so the test mongo shell can connect
- // with that address.
- var mongod = MongoRunner.runMongod({ipv6: "", bind_ip: "::1,127.0.0.1"});
- if (mongod == null) {
- jsTest.log("Unable to run test because ipv6 is not on machine, see BF-10990");
- return;
- }
- var args = [
- "mongo",
- "--nodb",
- "--ipv6",
- "--host",
- "::1",
- "--port",
- mongod.port,
- "--eval",
- "inner_mode=true;port=" + mongod.port + ";",
- "jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js"
- ];
- var exitCode = _runMongoProgram.apply(null, args);
- jsTest.log("Inner mode test finished, exit code was " + exitCode);
-
- // Pass the inner test's exit code back as the outer test's exit code
- if (exitCode != 0) {
- doassert("inner test failed with exit code " + exitCode);
- }
- MongoRunner.stopMongod(mongod);
+if ("undefined" == typeof inner_mode) {
+ // Start a mongod with --ipv6
+ jsTest.log("Outer mode test starting mongod with --ipv6");
+ // NOTE: bind_ip arg is present to test if it can parse ipv6 addresses (::1 in this case).
+ // Unfortunately, having bind_ip = ::1 won't work in the test framework (But does work when
+ // tested manually), so 127.0.0.1 is also present so the test mongo shell can connect
+ // with that address.
+ var mongod = MongoRunner.runMongod({ipv6: "", bind_ip: "::1,127.0.0.1"});
+ if (mongod == null) {
+ jsTest.log("Unable to run test because ipv6 is not on machine, see BF-10990");
return;
}
-
- var goodStrings = [
- "localhost:27999/test",
- "[::1]:27999/test",
- "[0:0:0:0:0:0:0:1]:27999/test",
- "[0000:0000:0000:0000:0000:0000:0000:0001]:27999/test",
- "localhost:27999",
- "[::1]:27999",
- "[0:0:0:0:0:0:0:1]:27999",
- "[0000:0000:0000:0000:0000:0000:0000:0001]:27999",
+ var args = [
+ "mongo",
+ "--nodb",
+ "--ipv6",
+ "--host",
+ "::1",
+ "--port",
+ mongod.port,
+ "--eval",
+ "inner_mode=true;port=" + mongod.port + ";",
+ "jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js"
];
+ var exitCode = _runMongoProgram.apply(null, args);
+ jsTest.log("Inner mode test finished, exit code was " + exitCode);
- var missingConnString = /^Missing connection string$/;
- var incorrectType = /^Incorrect type/;
- var emptyConnString = /^Empty connection string$/;
- var badHost = /^Failed to parse mongodb/;
- var emptyHost = /^Empty host component/;
- var noPort = /^No digits/;
- var badPort = /^Bad digit/;
- var invalidPort = /^Port number \d+ out of range/;
- var moreThanOneColon = /^More than one ':' detected/;
- var charBeforeSquareBracket = /^'\[' present, but not first character/;
- var noCloseBracket = /^ipv6 address is missing closing '\]'/;
- var noOpenBracket = /^'\]' present without '\['/;
- var noColonPrePort = /^missing colon after '\]' before the port/;
- var badStrings = [
- {s: undefined, r: missingConnString},
- {s: 7, r: incorrectType},
- {s: null, r: incorrectType},
- {s: "", r: emptyConnString},
- {s: " ", r: emptyConnString},
- {s: ":", r: emptyHost},
- {s: "/", r: badHost},
- {s: ":/", r: emptyHost},
- {s: ":/test", r: emptyHost},
- {s: ":27999/", r: emptyHost},
- {s: ":27999/test", r: emptyHost},
- {s: "/test", r: badHost},
- {s: "localhost:/test", r: noPort},
- {s: "[::1]:/test", r: noPort},
- {s: "[::1]:cat/test", r: badPort},
- {s: "[::1]:1cat/test", r: badPort},
- {s: "[::1]:123456/test", r: invalidPort},
- {s: "[::1]:65536/test", r: invalidPort},
- {s: "127.0.0.1:65536/test", r: invalidPort},
- {s: "::1:27999/test", r: moreThanOneColon},
- {s: "0:0::0:0:1:27999/test", r: moreThanOneColon},
- {s: "0000:0000:0000:0000:0000:0000:0000:0001:27999/test", r: moreThanOneColon},
- {s: "a[127.0.0.1]:27999/", r: charBeforeSquareBracket},
- {s: "a[::1:]27999/", r: charBeforeSquareBracket},
- {s: "[::1:27999/", r: noCloseBracket},
- {s: "[::1:]27999/", r: noColonPrePort},
- {s: "::1]:27999/", r: noOpenBracket},
- ];
+ // Pass the inner test's exit code back as the outer test's exit code
+ if (exitCode != 0) {
+ doassert("inner test failed with exit code " + exitCode);
+ }
+ MongoRunner.stopMongod(mongod);
+ return;
+}
- var substitutePort = function(connectionString) {
- // This will be called with non-strings as well as strings, so we need to catch exceptions
- try {
- return connectionString.replace("27999", "" + port);
- } catch (e) {
- return connectionString;
- }
- };
+var goodStrings = [
+ "localhost:27999/test",
+ "[::1]:27999/test",
+ "[0:0:0:0:0:0:0:1]:27999/test",
+ "[0000:0000:0000:0000:0000:0000:0000:0001]:27999/test",
+ "localhost:27999",
+ "[::1]:27999",
+ "[0:0:0:0:0:0:0:1]:27999",
+ "[0000:0000:0000:0000:0000:0000:0000:0001]:27999",
+];
- var testGood = function(i, connectionString) {
- print("\n---\nTesting good connection string " + i + " (\"" + connectionString + "\") ...");
- var gotException = false;
- var exception;
- try {
- var connectDB = connect(connectionString);
- connectDB = null;
- } catch (e) {
- gotException = true;
- exception = e;
- }
- if (!gotException) {
- print("Good connection string " + i + " (\"" + connectionString +
- "\") correctly validated");
- return;
- }
- var message = "FAILED to correctly validate goodString " + i + " (\"" + connectionString +
- "\"): exception was \"" + tojson(exception) + "\"";
- doassert(message);
- };
+var missingConnString = /^Missing connection string$/;
+var incorrectType = /^Incorrect type/;
+var emptyConnString = /^Empty connection string$/;
+var badHost = /^Failed to parse mongodb/;
+var emptyHost = /^Empty host component/;
+var noPort = /^No digits/;
+var badPort = /^Bad digit/;
+var invalidPort = /^Port number \d+ out of range/;
+var moreThanOneColon = /^More than one ':' detected/;
+var charBeforeSquareBracket = /^'\[' present, but not first character/;
+var noCloseBracket = /^ipv6 address is missing closing '\]'/;
+var noOpenBracket = /^'\]' present without '\['/;
+var noColonPrePort = /^missing colon after '\]' before the port/;
+var badStrings = [
+ {s: undefined, r: missingConnString},
+ {s: 7, r: incorrectType},
+ {s: null, r: incorrectType},
+ {s: "", r: emptyConnString},
+ {s: " ", r: emptyConnString},
+ {s: ":", r: emptyHost},
+ {s: "/", r: badHost},
+ {s: ":/", r: emptyHost},
+ {s: ":/test", r: emptyHost},
+ {s: ":27999/", r: emptyHost},
+ {s: ":27999/test", r: emptyHost},
+ {s: "/test", r: badHost},
+ {s: "localhost:/test", r: noPort},
+ {s: "[::1]:/test", r: noPort},
+ {s: "[::1]:cat/test", r: badPort},
+ {s: "[::1]:1cat/test", r: badPort},
+ {s: "[::1]:123456/test", r: invalidPort},
+ {s: "[::1]:65536/test", r: invalidPort},
+ {s: "127.0.0.1:65536/test", r: invalidPort},
+ {s: "::1:27999/test", r: moreThanOneColon},
+ {s: "0:0::0:0:1:27999/test", r: moreThanOneColon},
+ {s: "0000:0000:0000:0000:0000:0000:0000:0001:27999/test", r: moreThanOneColon},
+ {s: "a[127.0.0.1]:27999/", r: charBeforeSquareBracket},
+ {s: "a[::1:]27999/", r: charBeforeSquareBracket},
+ {s: "[::1:27999/", r: noCloseBracket},
+ {s: "[::1:]27999/", r: noColonPrePort},
+ {s: "::1]:27999/", r: noOpenBracket},
+];
- var testBad = function(i, connectionString, errorRegex) {
- print("\n---\nTesting bad connection string " + i + " (\"" + connectionString + "\") ...");
- var gotException = false;
- var gotCorrectErrorText = false;
- var exception;
- try {
- var connectDB = connect(connectionString);
- connectDB = null;
- } catch (e) {
- gotException = true;
- exception = e;
- if (errorRegex.test(e.message)) {
- gotCorrectErrorText = true;
- }
- }
- if (gotCorrectErrorText) {
- print("Bad connection string " + i + " (\"" + connectionString +
- "\") correctly rejected:\n" + tojson(exception));
- return;
- }
- var message = "FAILED to generate correct exception for badString " + i + " (\"" +
- connectionString + "\"): ";
- if (gotException) {
- message += "exception was \"" + tojson(exception) + "\", it should have matched \"" +
- errorRegex.toString() + "\"";
- } else {
- message += "no exception was thrown";
- }
- doassert(message);
- };
+var substitutePort = function(connectionString) {
+ // This will be called with non-strings as well as strings, so we need to catch exceptions
+ try {
+ return connectionString.replace("27999", "" + port);
+ } catch (e) {
+ return connectionString;
+ }
+};
- var i;
- jsTest.log("TESTING " + goodStrings.length + " good connection strings");
- for (i = 0; i < goodStrings.length; ++i) {
- testGood(i, substitutePort(goodStrings[i]));
+var testGood = function(i, connectionString) {
+ print("\n---\nTesting good connection string " + i + " (\"" + connectionString + "\") ...");
+ var gotException = false;
+ var exception;
+ try {
+ var connectDB = connect(connectionString);
+ connectDB = null;
+ } catch (e) {
+ gotException = true;
+ exception = e;
}
+ if (!gotException) {
+ print("Good connection string " + i + " (\"" + connectionString +
+ "\") correctly validated");
+ return;
+ }
+ var message = "FAILED to correctly validate goodString " + i + " (\"" + connectionString +
+ "\"): exception was \"" + tojson(exception) + "\"";
+ doassert(message);
+};
- jsTest.log("TESTING " + badStrings.length + " bad connection strings");
- for (i = 0; i < badStrings.length; ++i) {
- testBad(i, substitutePort(badStrings[i].s), badStrings[i].r);
+var testBad = function(i, connectionString, errorRegex) {
+ print("\n---\nTesting bad connection string " + i + " (\"" + connectionString + "\") ...");
+ var gotException = false;
+ var gotCorrectErrorText = false;
+ var exception;
+ try {
+ var connectDB = connect(connectionString);
+ connectDB = null;
+ } catch (e) {
+ gotException = true;
+ exception = e;
+ if (errorRegex.test(e.message)) {
+ gotCorrectErrorText = true;
+ }
}
+ if (gotCorrectErrorText) {
+ print("Bad connection string " + i + " (\"" + connectionString +
+ "\") correctly rejected:\n" + tojson(exception));
+ return;
+ }
+ var message = "FAILED to generate correct exception for badString " + i + " (\"" +
+ connectionString + "\"): ";
+ if (gotException) {
+ message += "exception was \"" + tojson(exception) + "\", it should have matched \"" +
+ errorRegex.toString() + "\"";
+ } else {
+ message += "no exception was thrown";
+ }
+ doassert(message);
+};
+
+var i;
+jsTest.log("TESTING " + goodStrings.length + " good connection strings");
+for (i = 0; i < goodStrings.length; ++i) {
+ testGood(i, substitutePort(goodStrings[i]));
+}
+
+jsTest.log("TESTING " + badStrings.length + " bad connection strings");
+for (i = 0; i < badStrings.length; ++i) {
+ testBad(i, substitutePort(badStrings[i].s), badStrings[i].r);
+}
- jsTest.log("SUCCESSFUL test completion");
+jsTest.log("SUCCESSFUL test completion");
})();
diff --git a/jstests/noPassthroughWithMongod/isMaster_feature_compatibility_version.js b/jstests/noPassthroughWithMongod/isMaster_feature_compatibility_version.js
index ab22bfb5f6c..10a019e95cf 100644
--- a/jstests/noPassthroughWithMongod/isMaster_feature_compatibility_version.js
+++ b/jstests/noPassthroughWithMongod/isMaster_feature_compatibility_version.js
@@ -4,54 +4,53 @@
// upgrading, or downgrading.
//
(function() {
- "use strict";
- load('./jstests/libs/feature_compatibility_version.js');
+"use strict";
+load('./jstests/libs/feature_compatibility_version.js');
- const adminDB = db.getSiblingDB("admin");
- const isMasterCommand = {
- isMaster: 1,
- internalClient: {minWireVersion: NumberInt(0), maxWireVersion: NumberInt(7)}
- };
+const adminDB = db.getSiblingDB("admin");
+const isMasterCommand = {
+ isMaster: 1,
+ internalClient: {minWireVersion: NumberInt(0), maxWireVersion: NumberInt(7)}
+};
- // When the featureCompatibilityVersion is equal to the upgrade version, running isMaster with
- // internalClient returns minWireVersion == maxWireVersion.
- checkFCV(adminDB, latestFCV);
- let res = adminDB.runCommand(isMasterCommand);
- assert.commandWorked(res);
- assert.eq(res.minWireVersion, res.maxWireVersion, tojson(res));
+// When the featureCompatibilityVersion is equal to the upgrade version, running isMaster with
+// internalClient returns minWireVersion == maxWireVersion.
+checkFCV(adminDB, latestFCV);
+let res = adminDB.runCommand(isMasterCommand);
+assert.commandWorked(res);
+assert.eq(res.minWireVersion, res.maxWireVersion, tojson(res));
- // When the featureCompatibilityVersion is upgrading, running isMaster with internalClient
- // returns minWireVersion == maxWireVersion.
- assert.writeOK(
- adminDB.system.version.update({_id: "featureCompatibilityVersion"},
- {$set: {version: lastStableFCV, targetVersion: latestFCV}}));
- res = adminDB.runCommand(isMasterCommand);
- assert.commandWorked(res);
- assert.eq(res.minWireVersion, res.maxWireVersion, tojson(res));
+// When the featureCompatibilityVersion is upgrading, running isMaster with internalClient
+// returns minWireVersion == maxWireVersion.
+assert.writeOK(
+ adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {version: lastStableFCV, targetVersion: latestFCV}}));
+res = adminDB.runCommand(isMasterCommand);
+assert.commandWorked(res);
+assert.eq(res.minWireVersion, res.maxWireVersion, tojson(res));
- // When the featureCompatibilityVersion is downgrading, running isMaster with internalClient
- // returns minWireVersion == maxWireVersion.
- assert.writeOK(adminDB.system.version.update(
- {_id: "featureCompatibilityVersion"},
- {$set: {version: lastStableFCV, targetVersion: lastStableFCV}}));
- res = adminDB.runCommand(isMasterCommand);
- assert.commandWorked(res);
- assert.eq(res.minWireVersion, res.maxWireVersion, tojson(res));
+// When the featureCompatibilityVersion is downgrading, running isMaster with internalClient
+// returns minWireVersion == maxWireVersion.
+assert.writeOK(
+ adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {version: lastStableFCV, targetVersion: lastStableFCV}}));
+res = adminDB.runCommand(isMasterCommand);
+assert.commandWorked(res);
+assert.eq(res.minWireVersion, res.maxWireVersion, tojson(res));
- // When the featureCompatibilityVersion is equal to the downgrade version, running isMaster with
- // internalClient returns minWireVersion + 1 == maxWireVersion.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- res = adminDB.runCommand(isMasterCommand);
- assert.commandWorked(res);
- assert.eq(res.minWireVersion + 1, res.maxWireVersion, tojson(res));
-
- // When the internalClient field is missing from the isMaster command, the response returns the
- // full wire version range from minWireVersion == 0 to maxWireVersion == latest version, even if
- // the featureCompatibilityVersion is equal to the upgrade version.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
- res = adminDB.runCommand({isMaster: 1});
- assert.commandWorked(res);
- assert.eq(res.minWireVersion, 0, tojson(res));
- assert.lt(res.minWireVersion, res.maxWireVersion, tojson(res));
+// When the featureCompatibilityVersion is equal to the downgrade version, running isMaster with
+// internalClient returns minWireVersion + 1 == maxWireVersion.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+res = adminDB.runCommand(isMasterCommand);
+assert.commandWorked(res);
+assert.eq(res.minWireVersion + 1, res.maxWireVersion, tojson(res));
+// When the internalClient field is missing from the isMaster command, the response returns the
+// full wire version range from minWireVersion == 0 to maxWireVersion == latest version, even if
+// the featureCompatibilityVersion is equal to the upgrade version.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
+res = adminDB.runCommand({isMaster: 1});
+assert.commandWorked(res);
+assert.eq(res.minWireVersion, 0, tojson(res));
+assert.lt(res.minWireVersion, res.maxWireVersion, tojson(res));
})();
diff --git a/jstests/noPassthroughWithMongod/log_component_helpers.js b/jstests/noPassthroughWithMongod/log_component_helpers.js
index 405b2bdaca3..d767c9f7e00 100644
--- a/jstests/noPassthroughWithMongod/log_component_helpers.js
+++ b/jstests/noPassthroughWithMongod/log_component_helpers.js
@@ -1,45 +1,44 @@
// Basic sanity check of log component helpers
(function(db) {
- "use strict";
- var mongo = db.getMongo();
-
- // Get current log component setttings. We will reset to these later.
- var originalSettings =
- assert.commandWorked(db.adminCommand({getParameter: 1, logComponentVerbosity: 1}))
- .logComponentVerbosity;
-
- // getLogComponents
- var components1 = mongo.getLogComponents();
- assert.docEq(components1, originalSettings);
-
- // getLogComponents via db
- var components2 = db.getLogComponents();
- assert.docEq(components2, originalSettings);
-
- // setLogLevel - default component
- mongo.setLogLevel(2);
- assert.eq(mongo.getLogComponents().verbosity, 2);
-
- db.setLogLevel(0);
- assert.eq(mongo.getLogComponents().verbosity, 0);
-
- // setLogLevel - valid log component
- mongo.setLogLevel(2, "storage.journal");
- assert.eq(mongo.getLogComponents().storage.journal.verbosity, 2);
-
- db.setLogLevel(1, "storage.journal");
- assert.eq(mongo.getLogComponents().storage.journal.verbosity, 1);
-
- // setLogLevel - invalid argument
- assert.throws(function() {
- mongo.setLogLevel(2, 24);
- });
- assert.throws(function() {
- db.setLogLevel(2, ["array", "not.allowed"]);
- });
-
- // Restore originalSettings
- assert.commandWorked(
- db.adminCommand({setParameter: 1, logComponentVerbosity: originalSettings}));
+"use strict";
+var mongo = db.getMongo();
+
+// Get current log component setttings. We will reset to these later.
+var originalSettings =
+ assert.commandWorked(db.adminCommand({getParameter: 1, logComponentVerbosity: 1}))
+ .logComponentVerbosity;
+
+// getLogComponents
+var components1 = mongo.getLogComponents();
+assert.docEq(components1, originalSettings);
+
+// getLogComponents via db
+var components2 = db.getLogComponents();
+assert.docEq(components2, originalSettings);
+
+// setLogLevel - default component
+mongo.setLogLevel(2);
+assert.eq(mongo.getLogComponents().verbosity, 2);
+
+db.setLogLevel(0);
+assert.eq(mongo.getLogComponents().verbosity, 0);
+
+// setLogLevel - valid log component
+mongo.setLogLevel(2, "storage.journal");
+assert.eq(mongo.getLogComponents().storage.journal.verbosity, 2);
+
+db.setLogLevel(1, "storage.journal");
+assert.eq(mongo.getLogComponents().storage.journal.verbosity, 1);
+
+// setLogLevel - invalid argument
+assert.throws(function() {
+ mongo.setLogLevel(2, 24);
+});
+assert.throws(function() {
+ db.setLogLevel(2, ["array", "not.allowed"]);
+});
+
+// Restore originalSettings
+assert.commandWorked(db.adminCommand({setParameter: 1, logComponentVerbosity: originalSettings}));
}(db));
diff --git a/jstests/noPassthroughWithMongod/logpath.js b/jstests/noPassthroughWithMongod/logpath.js
index eea71efdc4f..bb39282871f 100644
--- a/jstests/noPassthroughWithMongod/logpath.js
+++ b/jstests/noPassthroughWithMongod/logpath.js
@@ -4,7 +4,8 @@ var name = "logpath";
var token = "logpath_token";
var dbdir = MongoRunner.dataPath + name + "/"; // this will work under windows as well as linux
-var basedir = MongoRunner.dataPath + name + "files" + "/";
+var basedir = MongoRunner.dataPath + name + "files" +
+ "/";
var logdir = basedir + "logdir/";
var testdir = basedir + "testdir/";
var sfile = _isWindows() ? "NUL" : "/dev/null";
diff --git a/jstests/noPassthroughWithMongod/moveprimary-replset.js b/jstests/noPassthroughWithMongod/moveprimary-replset.js
index 016b2e215c0..9b1e9f7d3bc 100644
--- a/jstests/noPassthroughWithMongod/moveprimary-replset.js
+++ b/jstests/noPassthroughWithMongod/moveprimary-replset.js
@@ -4,56 +4,56 @@
// @tags: [requires_replication, requires_sharding]
(function() {
- "use strict";
-
- var numDocs = 10000;
- var baseName = "moveprimary-replset";
- var testDBName = baseName;
- var testCollName = 'coll';
-
- var shardingTestConfig = {
- name: baseName,
- mongos: 1,
- shards: 2,
- config: 3,
- rs: {nodes: 3},
- other: {manualAddShard: true}
- };
-
- var shardingTest = new ShardingTest(shardingTestConfig);
-
- var replSet1 = shardingTest.rs0;
- var replSet2 = shardingTest.rs1;
-
- var repset1DB = replSet1.getPrimary().getDB(testDBName);
- for (var i = 1; i <= numDocs; i++) {
- repset1DB[testCollName].insert({x: i});
- }
- replSet1.awaitReplication();
-
- var mongosConn = shardingTest.s;
- var testDB = mongosConn.getDB(testDBName);
-
- mongosConn.adminCommand({addshard: replSet1.getURL()});
-
- testDB[testCollName].update({}, {$set: {y: 'hello'}}, false /*upsert*/, true /*multi*/);
- assert.eq(testDB[testCollName].count({y: 'hello'}),
- numDocs,
- 'updating and counting docs via mongos failed');
-
- mongosConn.adminCommand({addshard: replSet2.getURL()});
-
- assert.commandWorked(
- mongosConn.getDB('admin').runCommand({moveprimary: testDBName, to: replSet2.getURL()}));
- mongosConn.getDB('admin').printShardingStatus();
- assert.eq(testDB.getSiblingDB("config").databases.findOne({"_id": testDBName}).primary,
- replSet2.name,
- "Failed to change primary shard for unsharded database.");
-
- testDB[testCollName].update({}, {$set: {z: 'world'}}, false /*upsert*/, true /*multi*/);
- assert.eq(testDB[testCollName].count({z: 'world'}),
- numDocs,
- 'updating and counting docs via mongos failed');
-
- shardingTest.stop();
+"use strict";
+
+var numDocs = 10000;
+var baseName = "moveprimary-replset";
+var testDBName = baseName;
+var testCollName = 'coll';
+
+var shardingTestConfig = {
+ name: baseName,
+ mongos: 1,
+ shards: 2,
+ config: 3,
+ rs: {nodes: 3},
+ other: {manualAddShard: true}
+};
+
+var shardingTest = new ShardingTest(shardingTestConfig);
+
+var replSet1 = shardingTest.rs0;
+var replSet2 = shardingTest.rs1;
+
+var repset1DB = replSet1.getPrimary().getDB(testDBName);
+for (var i = 1; i <= numDocs; i++) {
+ repset1DB[testCollName].insert({x: i});
+}
+replSet1.awaitReplication();
+
+var mongosConn = shardingTest.s;
+var testDB = mongosConn.getDB(testDBName);
+
+mongosConn.adminCommand({addshard: replSet1.getURL()});
+
+testDB[testCollName].update({}, {$set: {y: 'hello'}}, false /*upsert*/, true /*multi*/);
+assert.eq(testDB[testCollName].count({y: 'hello'}),
+ numDocs,
+ 'updating and counting docs via mongos failed');
+
+mongosConn.adminCommand({addshard: replSet2.getURL()});
+
+assert.commandWorked(
+ mongosConn.getDB('admin').runCommand({moveprimary: testDBName, to: replSet2.getURL()}));
+mongosConn.getDB('admin').printShardingStatus();
+assert.eq(testDB.getSiblingDB("config").databases.findOne({"_id": testDBName}).primary,
+ replSet2.name,
+ "Failed to change primary shard for unsharded database.");
+
+testDB[testCollName].update({}, {$set: {z: 'world'}}, false /*upsert*/, true /*multi*/);
+assert.eq(testDB[testCollName].count({z: 'world'}),
+ numDocs,
+ 'updating and counting docs via mongos failed');
+
+shardingTest.stop();
})();
diff --git a/jstests/noPassthroughWithMongod/mr_writeconflict.js b/jstests/noPassthroughWithMongod/mr_writeconflict.js
index af54a150df4..204c9040c22 100644
--- a/jstests/noPassthroughWithMongod/mr_writeconflict.js
+++ b/jstests/noPassthroughWithMongod/mr_writeconflict.js
@@ -1,70 +1,69 @@
// SERVER-16262: Write-conflict during map-reduce operations
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/parallelTester.js');
+load('jstests/libs/parallelTester.js');
- var makeDoc = function(keyLimit, valueLimit) {
- return {_id: ObjectId(), key: Random.randInt(keyLimit), value: Random.randInt(valueLimit)};
- };
+var makeDoc = function(keyLimit, valueLimit) {
+ return {_id: ObjectId(), key: Random.randInt(keyLimit), value: Random.randInt(valueLimit)};
+};
- var main = function() {
-
- function mapper() {
- var obj = {};
- obj[this.value] = 1;
- emit(this.key, obj);
- }
+var main = function() {
+ function mapper() {
+ var obj = {};
+ obj[this.value] = 1;
+ emit(this.key, obj);
+ }
- function reducer(key, values) {
- var res = {};
+ function reducer(key, values) {
+ var res = {};
- values.forEach(function(obj) {
- Object.keys(obj).forEach(function(value) {
- if (!res.hasOwnProperty(value)) {
- res[value] = 0;
- }
- res[value] += obj[value];
- });
+ values.forEach(function(obj) {
+ Object.keys(obj).forEach(function(value) {
+ if (!res.hasOwnProperty(value)) {
+ res[value] = 0;
+ }
+ res[value] += obj[value];
});
+ });
- return res;
- }
+ return res;
+ }
- for (var i = 0; i < 10; i++) {
- // Have all threads combine their results into the same collection
- var res = db.source.mapReduce(mapper, reducer, {out: {reduce: 'dest'}});
- assert.commandWorked(res);
- }
- };
+ for (var i = 0; i < 10; i++) {
+ // Have all threads combine their results into the same collection
+ var res = db.source.mapReduce(mapper, reducer, {out: {reduce: 'dest'}});
+ assert.commandWorked(res);
+ }
+};
- Random.setRandomSeed();
+Random.setRandomSeed();
- var numDocs = 200;
- var bulk = db.source.initializeUnorderedBulkOp();
- var i;
- for (i = 0; i < numDocs; ++i) {
- var doc = makeDoc(numDocs / 100, numDocs / 10);
- bulk.insert(doc);
- }
+var numDocs = 200;
+var bulk = db.source.initializeUnorderedBulkOp();
+var i;
+for (i = 0; i < numDocs; ++i) {
+ var doc = makeDoc(numDocs / 100, numDocs / 10);
+ bulk.insert(doc);
+}
- var res = bulk.execute();
- assert.writeOK(res);
- assert.eq(numDocs, res.nInserted);
+var res = bulk.execute();
+assert.writeOK(res);
+assert.eq(numDocs, res.nInserted);
- db.dest.drop();
- assert.commandWorked(db.createCollection('dest'));
+db.dest.drop();
+assert.commandWorked(db.createCollection('dest'));
- var numThreads = 6;
- var t = [];
- for (i = 0; i < numThreads - 1; ++i) {
- t[i] = new ScopedThread(main);
- t[i].start();
- }
+var numThreads = 6;
+var t = [];
+for (i = 0; i < numThreads - 1; ++i) {
+ t[i] = new ScopedThread(main);
+ t[i].start();
+}
- main();
- for (i = 0; i < numThreads - 1; ++i) {
- t[i].join();
- }
+main();
+for (i = 0; i < numThreads - 1; ++i) {
+ t[i].join();
+}
}());
diff --git a/jstests/noPassthroughWithMongod/ne_array_indexability.js b/jstests/noPassthroughWithMongod/ne_array_indexability.js
index 284389c1303..606956c5753 100644
--- a/jstests/noPassthroughWithMongod/ne_array_indexability.js
+++ b/jstests/noPassthroughWithMongod/ne_array_indexability.js
@@ -2,44 +2,44 @@
* Test that $ne: [] queries are cached correctly. See SERVER-39764.
*/
(function() {
- const coll = db.ne_array_indexability;
- coll.drop();
-
- coll.createIndex({"obj": 1});
- coll.createIndex({"obj": 1, "abc": 1});
-
- assert.commandWorked(coll.insert({obj: "hi there"}));
-
- function runTest(queryToCache, queryToRunAfterCaching) {
- assert.eq(coll.find(queryToCache).itcount(), 1);
- assert.eq(coll.find(queryToCache).itcount(), 1);
-
- const cacheEntries =
- coll.aggregate([
- {$planCacheStats: {}},
- {
- $match: {
- isActive: true,
- createdFromQuery: {query: queryToCache, sort: {}, projection: {}}
- }
+const coll = db.ne_array_indexability;
+coll.drop();
+
+coll.createIndex({"obj": 1});
+coll.createIndex({"obj": 1, "abc": 1});
+
+assert.commandWorked(coll.insert({obj: "hi there"}));
+
+function runTest(queryToCache, queryToRunAfterCaching) {
+ assert.eq(coll.find(queryToCache).itcount(), 1);
+ assert.eq(coll.find(queryToCache).itcount(), 1);
+
+ const cacheEntries =
+ coll.aggregate([
+ {$planCacheStats: {}},
+ {
+ $match: {
+ isActive: true,
+ createdFromQuery: {query: queryToCache, sort: {}, projection: {}}
}
- ])
- .toArray();
- assert.eq(cacheEntries.length, 1);
+ }
+ ])
+ .toArray();
+ assert.eq(cacheEntries.length, 1);
- assert.eq(coll.find(queryToRunAfterCaching).itcount(), 1);
+ assert.eq(coll.find(queryToRunAfterCaching).itcount(), 1);
- const explain = assert.commandWorked(coll.find(queryToRunAfterCaching).explain());
- // The query with the $ne: array should have the same queryHash, but a different
- // planCacheKey.
- assert.eq(explain.queryPlanner.queryHash, cacheEntries[0].queryHash);
- assert.neq(explain.queryPlanner.planCacheKey, cacheEntries[0].planCacheKey);
- }
+ const explain = assert.commandWorked(coll.find(queryToRunAfterCaching).explain());
+ // The query with the $ne: array should have the same queryHash, but a different
+ // planCacheKey.
+ assert.eq(explain.queryPlanner.queryHash, cacheEntries[0].queryHash);
+ assert.neq(explain.queryPlanner.planCacheKey, cacheEntries[0].planCacheKey);
+}
- runTest({'obj': {$ne: 'def'}}, {'obj': {$ne: [[1]]}});
+runTest({'obj': {$ne: 'def'}}, {'obj': {$ne: [[1]]}});
- // Clear the cache.
- assert.commandWorked(coll.runCommand('planCacheClear'));
+// Clear the cache.
+assert.commandWorked(coll.runCommand('planCacheClear'));
- runTest({'obj': {$nin: ['abc', 'def']}}, {'obj': {$nin: [[1], 'abc']}});
+runTest({'obj': {$nin: ['abc', 'def']}}, {'obj': {$nin: [[1], 'abc']}});
})();
diff --git a/jstests/noPassthroughWithMongod/now_variable.js b/jstests/noPassthroughWithMongod/now_variable.js
index b64d029b436..a3b1f793941 100644
--- a/jstests/noPassthroughWithMongod/now_variable.js
+++ b/jstests/noPassthroughWithMongod/now_variable.js
@@ -2,122 +2,121 @@
* Tests for the $$NOW and $$CLUSTER_TIME system variable.
*/
(function() {
- "use strict";
-
- const coll = db[jsTest.name()];
- const otherColl = db[coll.getName() + "_other"];
- otherColl.drop();
- coll.drop();
- db["viewWithNow"].drop();
- db["viewWithClusterTime"].drop();
-
- // Insert simple documents into the main test collection. Aggregation and view pipelines will
- // augment these docs with time-based fields.
- const numdocs = 1000;
- let bulk = coll.initializeUnorderedBulkOp();
+"use strict";
+
+const coll = db[jsTest.name()];
+const otherColl = db[coll.getName() + "_other"];
+otherColl.drop();
+coll.drop();
+db["viewWithNow"].drop();
+db["viewWithClusterTime"].drop();
+
+// Insert simple documents into the main test collection. Aggregation and view pipelines will
+// augment these docs with time-based fields.
+const numdocs = 1000;
+let bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < numdocs; ++i) {
+ bulk.insert({_id: i});
+}
+assert.commandWorked(bulk.execute());
+
+// Insert into another collection with pre-made fields for testing the find() command.
+bulk = otherColl.initializeUnorderedBulkOp();
+const timeFieldValue = new Date();
+for (let i = 0; i < numdocs; ++i) {
+ bulk.insert({_id: i, timeField: timeFieldValue, clusterTimeField: new Timestamp(0, 1)});
+}
+assert.commandWorked(bulk.execute());
+
+assert.commandWorked(
+ db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$NOW"}}]));
+const viewWithNow = db["viewWithNow"];
+
+assert.commandWorked(db.createView(
+ "viewWithClusterTime", coll.getName(), [{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
+const viewWithClusterTime = db["viewWithClusterTime"];
+
+function runTests(query) {
+ const results = query().toArray();
+ assert.eq(results.length, numdocs);
+
+ // Make sure the values are the same for all documents
for (let i = 0; i < numdocs; ++i) {
- bulk.insert({_id: i});
+ assert.eq(results[0].timeField, results[i].timeField);
}
- assert.commandWorked(bulk.execute());
- // Insert into another collection with pre-made fields for testing the find() command.
- bulk = otherColl.initializeUnorderedBulkOp();
- const timeFieldValue = new Date();
- for (let i = 0; i < numdocs; ++i) {
- bulk.insert({_id: i, timeField: timeFieldValue, clusterTimeField: new Timestamp(0, 1)});
- }
- assert.commandWorked(bulk.execute());
-
- assert.commandWorked(
- db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$NOW"}}]));
- const viewWithNow = db["viewWithNow"];
-
- assert.commandWorked(db.createView(
- "viewWithClusterTime", coll.getName(), [{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
- const viewWithClusterTime = db["viewWithClusterTime"];
-
- function runTests(query) {
- const results = query().toArray();
- assert.eq(results.length, numdocs);
-
- // Make sure the values are the same for all documents
- for (let i = 0; i < numdocs; ++i) {
- assert.eq(results[0].timeField, results[i].timeField);
- }
-
- // Sleep for a while and then rerun.
- sleep(3000);
-
- const resultsLater = query().toArray();
- assert.eq(resultsLater.length, numdocs);
-
- // Later results should be later in time.
- assert.lte(results[0].timeField, resultsLater[0].timeField);
- }
-
- function runTestsExpectFailure(query) {
- const results = query();
- // Expect to see "Builtin variable '$$CLUSTER_TIME' is not available" error.
- assert.commandFailedWithCode(results, 51144);
- }
-
- function baseCollectionNowFind() {
- return otherColl.find({$expr: {$lte: ["$timeField", "$$NOW"]}});
- }
-
- function baseCollectionClusterTimeFind() {
- return db.runCommand({
- find: otherColl.getName(),
- filter: {$expr: {$lt: ["$clusterTimeField", "$$CLUSTER_TIME"]}}
- });
- }
-
- function baseCollectionNowAgg() {
- return coll.aggregate([{$addFields: {timeField: "$$NOW"}}]);
- }
-
- function baseCollectionClusterTimeAgg() {
- return db.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$addFields: {timeField: "$$CLUSTER_TIME"}}],
- cursor: {}
- });
- }
-
- function fromViewWithNow() {
- return viewWithNow.find();
- }
-
- function fromViewWithClusterTime() {
- return db.runCommand({find: viewWithClusterTime.getName()});
- }
-
- function withExprNow() {
- return viewWithNow.find({$expr: {$eq: ["$timeField", "$$NOW"]}});
- }
-
- function withExprClusterTime() {
- return db.runCommand({
- find: viewWithClusterTime.getName(),
- filter: {$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}}
- });
- }
-
- // Test that $$NOW is usable in all contexts.
- runTests(baseCollectionNowFind);
- runTests(baseCollectionNowAgg);
- runTests(fromViewWithNow);
- runTests(withExprNow);
-
- // Test that $$NOW can be used in explain for both find and aggregate.
- assert.commandWorked(coll.explain().find({$expr: {$lte: ["$timeField", "$$NOW"]}}).finish());
- assert.commandWorked(
- viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$NOW"]}}).finish());
- assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$NOW"}}]));
-
- // $$CLUSTER_TIME is not available on a standalone mongod.
- runTestsExpectFailure(baseCollectionClusterTimeFind);
- runTestsExpectFailure(baseCollectionClusterTimeAgg);
- runTestsExpectFailure(fromViewWithClusterTime);
- runTestsExpectFailure(withExprClusterTime);
+ // Sleep for a while and then rerun.
+ sleep(3000);
+
+ const resultsLater = query().toArray();
+ assert.eq(resultsLater.length, numdocs);
+
+ // Later results should be later in time.
+ assert.lte(results[0].timeField, resultsLater[0].timeField);
+}
+
+function runTestsExpectFailure(query) {
+ const results = query();
+ // Expect to see "Builtin variable '$$CLUSTER_TIME' is not available" error.
+ assert.commandFailedWithCode(results, 51144);
+}
+
+function baseCollectionNowFind() {
+ return otherColl.find({$expr: {$lte: ["$timeField", "$$NOW"]}});
+}
+
+function baseCollectionClusterTimeFind() {
+ return db.runCommand({
+ find: otherColl.getName(),
+ filter: {$expr: {$lt: ["$clusterTimeField", "$$CLUSTER_TIME"]}}
+ });
+}
+
+function baseCollectionNowAgg() {
+ return coll.aggregate([{$addFields: {timeField: "$$NOW"}}]);
+}
+
+function baseCollectionClusterTimeAgg() {
+ return db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$addFields: {timeField: "$$CLUSTER_TIME"}}],
+ cursor: {}
+ });
+}
+
+function fromViewWithNow() {
+ return viewWithNow.find();
+}
+
+function fromViewWithClusterTime() {
+ return db.runCommand({find: viewWithClusterTime.getName()});
+}
+
+function withExprNow() {
+ return viewWithNow.find({$expr: {$eq: ["$timeField", "$$NOW"]}});
+}
+
+function withExprClusterTime() {
+ return db.runCommand({
+ find: viewWithClusterTime.getName(),
+ filter: {$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}}
+ });
+}
+
+// Test that $$NOW is usable in all contexts.
+runTests(baseCollectionNowFind);
+runTests(baseCollectionNowAgg);
+runTests(fromViewWithNow);
+runTests(withExprNow);
+
+// Test that $$NOW can be used in explain for both find and aggregate.
+assert.commandWorked(coll.explain().find({$expr: {$lte: ["$timeField", "$$NOW"]}}).finish());
+assert.commandWorked(viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$NOW"]}}).finish());
+assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$NOW"}}]));
+
+// $$CLUSTER_TIME is not available on a standalone mongod.
+runTestsExpectFailure(baseCollectionClusterTimeFind);
+runTestsExpectFailure(baseCollectionClusterTimeAgg);
+runTestsExpectFailure(fromViewWithClusterTime);
+runTestsExpectFailure(withExprClusterTime);
}());
diff --git a/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js b/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js
index 52829d74717..c19b4a14fcb 100644
--- a/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js
+++ b/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js
@@ -3,62 +3,62 @@
* hijack the cached plan for an earlier $not-$in query.
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/analyze_plan.js'); // For isCollScan.
+load('jstests/libs/analyze_plan.js'); // For isCollScan.
- const coll = db.plan_cache_not_in_regex;
- coll.drop();
+const coll = db.plan_cache_not_in_regex;
+coll.drop();
- // Helper function which obtains the cached plan, if any, for a given query shape.
- function getPlanForCacheEntry(query, proj, sort) {
- const key = {query: query, sort: sort, projection: proj};
- const cursor = coll.aggregate([
- {$planCacheStats: {}},
- {
- $match: {
- "createdFromQuery.query": query,
- "createdFromQuery.projection": proj,
- "createdFromQuery.sort": sort
- }
+// Helper function which obtains the cached plan, if any, for a given query shape.
+function getPlanForCacheEntry(query, proj, sort) {
+ const key = {query: query, sort: sort, projection: proj};
+ const cursor = coll.aggregate([
+ {$planCacheStats: {}},
+ {
+ $match: {
+ "createdFromQuery.query": query,
+ "createdFromQuery.projection": proj,
+ "createdFromQuery.sort": sort
}
- ]);
- const entryStats = cursor.toArray();
- assert.eq(entryStats.length, 1, `Expected one cached plan, found: ${tojson(entryStats)}`);
- return entryStats.shift();
- }
+ }
+ ]);
+ const entryStats = cursor.toArray();
+ assert.eq(entryStats.length, 1, `Expected one cached plan, found: ${tojson(entryStats)}`);
+ return entryStats.shift();
+}
- // Insert a document containing a field 'a', and create two indexes that can support queries on
- // this field. This is to ensure that the plan we choose will be cached, since if only a single
- // index is available, the solution will not be cached.
- assert.commandWorked(coll.insert({a: "foo"}));
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+// Insert a document containing a field 'a', and create two indexes that can support queries on
+// this field. This is to ensure that the plan we choose will be cached, since if only a single
+// index is available, the solution will not be cached.
+assert.commandWorked(coll.insert({a: "foo"}));
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
- // Repeat the test for query, query with projection, and query with projection and sort.
- for (let [proj, sort] of[[{}, {}], [{_id: 0, a: 1}, {}], [{_id: 0, a: 1}, {a: 1}]]) {
- // Perform a plain $not-$in query on 'a' and confirm that the plan is cached.
- const queryShape = {a: {$not: {$in: [32, 33]}}};
+// Repeat the test for query, query with projection, and query with projection and sort.
+for (let [proj, sort] of [[{}, {}], [{_id: 0, a: 1}, {}], [{_id: 0, a: 1}, {a: 1}]]) {
+ // Perform a plain $not-$in query on 'a' and confirm that the plan is cached.
+ const queryShape = {a: {$not: {$in: [32, 33]}}};
+ assert.eq(1, coll.find(queryShape, proj).sort(sort).itcount());
+ let cacheEntry = getPlanForCacheEntry(queryShape, proj, sort);
+ assert(cacheEntry);
+
+ // If the cached plan is inactive, perform the same query to activate it.
+ if (cacheEntry.isActive === false) {
assert.eq(1, coll.find(queryShape, proj).sort(sort).itcount());
- let cacheEntry = getPlanForCacheEntry(queryShape, proj, sort);
+ cacheEntry = getPlanForCacheEntry(queryShape, proj, sort);
assert(cacheEntry);
+ assert(cacheEntry.isActive);
+ }
- // If the cached plan is inactive, perform the same query to activate it.
- if (cacheEntry.isActive === false) {
- assert.eq(1, coll.find(queryShape, proj).sort(sort).itcount());
- cacheEntry = getPlanForCacheEntry(queryShape, proj, sort);
- assert(cacheEntry);
- assert(cacheEntry.isActive);
- }
-
- // Now perform a $not-$in-$regex query, confirm that it obtains the correct results, and
- // that it used a COLLSCAN rather than planning from the cache.
- const explainOutput = assert.commandWorked(
- coll.find({a: {$not: {$in: [34, /bar/]}}}).explain("executionStats"));
- assert(isCollscan(coll.getDB(), explainOutput.queryPlanner.winningPlan));
- assert.eq(1, explainOutput.executionStats.nReturned);
+ // Now perform a $not-$in-$regex query, confirm that it obtains the correct results, and
+ // that it used a COLLSCAN rather than planning from the cache.
+ const explainOutput =
+ assert.commandWorked(coll.find({a: {$not: {$in: [34, /bar/]}}}).explain("executionStats"));
+ assert(isCollscan(coll.getDB(), explainOutput.queryPlanner.winningPlan));
+ assert.eq(1, explainOutput.executionStats.nReturned);
- // Flush the plan cache before the next iteration.
- coll.getPlanCache().clear();
- }
+ // Flush the plan cache before the next iteration.
+ coll.getPlanCache().clear();
+}
})(); \ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/plan_cache_replanning.js b/jstests/noPassthroughWithMongod/plan_cache_replanning.js
index fa901b5e8d1..3882a2c4106 100644
--- a/jstests/noPassthroughWithMongod/plan_cache_replanning.js
+++ b/jstests/noPassthroughWithMongod/plan_cache_replanning.js
@@ -2,121 +2,130 @@
* This test will attempt to create a scenario where the plan cache entry for a given query shape
* oscillates. It achieves this by creating two indexes, A and B, on a collection, and interleaving
* queries which are "ideal" for index A with queries that are "ideal" for index B.
-*/
+ */
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/analyze_plan.js'); // For getPlanStage().
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load('jstests/libs/analyze_plan.js'); // For getPlanStage().
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- const coll = assertDropAndRecreateCollection(db, "plan_cache_replanning");
+const coll = assertDropAndRecreateCollection(db, "plan_cache_replanning");
- function getPlansForCacheEntry(query) {
- let key = {query: query, sort: {}, projection: {}};
- let res = coll.runCommand("planCacheListPlans", key);
- assert.commandWorked(res, `planCacheListPlans(${tojson(key)}) failed`);
- assert(res.hasOwnProperty("plans"),
- `plans missing from planCacheListPlans(${tojson(key)}) failed`);
+function getPlansForCacheEntry(query) {
+ let key = {query: query, sort: {}, projection: {}};
+ let res = coll.runCommand("planCacheListPlans", key);
+ assert.commandWorked(res, `planCacheListPlans(${tojson(key)}) failed`);
+ assert(res.hasOwnProperty("plans"),
+ `plans missing from planCacheListPlans(${tojson(key)}) failed`);
- return res;
- }
-
- function planHasIxScanStageForKey(planStats, keyPattern) {
- const stage = getPlanStage(planStats, "IXSCAN");
- if (stage === null) {
- return false;
- }
-
- return bsonWoCompare(keyPattern, stage.keyPattern) == 0;
- }
-
- const queryShape = {a: 1, b: 1};
+ return res;
+}
- // Carefully construct a collection so that some queries will do well with an {a: 1} index
- // and others with a {b: 1} index.
- for (let i = 1000; i < 1100; i++) {
- assert.commandWorked(coll.insert({a: 1, b: i}));
+function planHasIxScanStageForKey(planStats, keyPattern) {
+ const stage = getPlanStage(planStats, "IXSCAN");
+ if (stage === null) {
+ return false;
}
- for (let i = 1000; i < 1100; i++) {
- assert.commandWorked(coll.insert({a: i, b: 2}));
- }
-
- // This query will be quick with {a: 1} index, and far slower {b: 1} index. With the {a: 1}
- // index, the server should only need to examine one document. Using {b: 1}, it will have to
- // scan through each document which has 2 as the value of the 'b' field.
- const aIndexQuery = {a: 1099, b: 2};
- // Opposite of 'aIndexQuery'. Should be quick if the {b: 1} index is used, and slower if the
- // {a: 1} index is used.
- const bIndexQuery = {a: 1, b: 1099};
-
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
-
- // Run a query where the {b: 1} index will easily win.
- assert.eq(1, coll.find(bIndexQuery).itcount());
-
- // The plan cache should now hold an inactive entry.
- let entry = getPlansForCacheEntry(queryShape);
- let entryWorks = entry.works;
- assert.eq(entry.isActive, false);
- assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
-
- // Re-run the query. The inactive cache entry should be promoted to an active entry.
- assert.eq(1, coll.find(bIndexQuery).itcount());
- entry = getPlansForCacheEntry(queryShape);
- assert.eq(entry.isActive, true);
- assert.eq(entry.works, entryWorks);
- assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
-
- // Now we will attempt to oscillate the cache entry by interleaving queries which should use
- // the {a:1} and {b:1} index. When the plan using the {b: 1} index is in the cache, running a
- // query which should use the {a: 1} index will perform very poorly, and trigger
- // replanning (and vice versa).
-
- // The {b: 1} plan is currently in the cache. Run the query which should use the {a: 1}
- // index. The current cache entry will be deactivated, and then the cache entry for the {a: 1}
- // will overwrite it (as active).
- assert.eq(1, coll.find(aIndexQuery).itcount());
- entry = getPlansForCacheEntry(queryShape);
- assert.eq(entry.isActive, true);
- assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {a: 1}), true);
-
- // Run the query which should use the {b: 1} index.
- assert.eq(1, coll.find(bIndexQuery).itcount());
- entry = getPlansForCacheEntry(queryShape);
- assert.eq(entry.isActive, true);
- assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
-
- // The {b: 1} plan is again in the cache. Run the query which should use the {a: 1}
- // index.
- assert.eq(1, coll.find(aIndexQuery).itcount());
- entry = getPlansForCacheEntry(queryShape);
- assert.eq(entry.isActive, true);
- assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {a: 1}), true);
-
- // The {a: 1} plan is back in the cache. Run the query which would perform better on the plan
- // using the {b: 1} index, and ensure that plan gets written to the cache.
- assert.eq(1, coll.find(bIndexQuery).itcount());
- entry = getPlansForCacheEntry(queryShape);
- entryWorks = entry.works;
- assert.eq(entry.isActive, true);
- assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
-
- // Now run a plan that will perform poorly with both indices (it will be required to scan 500
- // documents). This will result in replanning (and the cache entry being deactivated). However,
- // the new plan will have a very high works value, and will not replace the existing cache
- // entry. It will only bump the existing cache entry's works value.
- for (let i = 0; i < 500; i++) {
- assert.commandWorked(coll.insert({a: 3, b: 3}));
- }
- assert.eq(500, coll.find({a: 3, b: 3}).itcount());
-
- // The cache entry should have been deactivated.
- entry = getPlansForCacheEntry(queryShape);
- assert.eq(entry.isActive, false);
- assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
-
- // The works value should have doubled.
- assert.eq(entry.works, entryWorks * 2);
+ return bsonWoCompare(keyPattern, stage.keyPattern) == 0;
+}
+
+const queryShape = {
+ a: 1,
+ b: 1
+};
+
+// Carefully construct a collection so that some queries will do well with an {a: 1} index
+// and others with a {b: 1} index.
+for (let i = 1000; i < 1100; i++) {
+ assert.commandWorked(coll.insert({a: 1, b: i}));
+}
+
+for (let i = 1000; i < 1100; i++) {
+ assert.commandWorked(coll.insert({a: i, b: 2}));
+}
+
+// This query will be quick with {a: 1} index, and far slower {b: 1} index. With the {a: 1}
+// index, the server should only need to examine one document. Using {b: 1}, it will have to
+// scan through each document which has 2 as the value of the 'b' field.
+const aIndexQuery = {
+ a: 1099,
+ b: 2
+};
+// Opposite of 'aIndexQuery'. Should be quick if the {b: 1} index is used, and slower if the
+// {a: 1} index is used.
+const bIndexQuery = {
+ a: 1,
+ b: 1099
+};
+
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
+
+// Run a query where the {b: 1} index will easily win.
+assert.eq(1, coll.find(bIndexQuery).itcount());
+
+// The plan cache should now hold an inactive entry.
+let entry = getPlansForCacheEntry(queryShape);
+let entryWorks = entry.works;
+assert.eq(entry.isActive, false);
+assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
+
+// Re-run the query. The inactive cache entry should be promoted to an active entry.
+assert.eq(1, coll.find(bIndexQuery).itcount());
+entry = getPlansForCacheEntry(queryShape);
+assert.eq(entry.isActive, true);
+assert.eq(entry.works, entryWorks);
+assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
+
+// Now we will attempt to oscillate the cache entry by interleaving queries which should use
+// the {a:1} and {b:1} index. When the plan using the {b: 1} index is in the cache, running a
+// query which should use the {a: 1} index will perform very poorly, and trigger
+// replanning (and vice versa).
+
+// The {b: 1} plan is currently in the cache. Run the query which should use the {a: 1}
+// index. The current cache entry will be deactivated, and then the cache entry for the {a: 1}
+// will overwrite it (as active).
+assert.eq(1, coll.find(aIndexQuery).itcount());
+entry = getPlansForCacheEntry(queryShape);
+assert.eq(entry.isActive, true);
+assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {a: 1}), true);
+
+// Run the query which should use the {b: 1} index.
+assert.eq(1, coll.find(bIndexQuery).itcount());
+entry = getPlansForCacheEntry(queryShape);
+assert.eq(entry.isActive, true);
+assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
+
+// The {b: 1} plan is again in the cache. Run the query which should use the {a: 1}
+// index.
+assert.eq(1, coll.find(aIndexQuery).itcount());
+entry = getPlansForCacheEntry(queryShape);
+assert.eq(entry.isActive, true);
+assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {a: 1}), true);
+
+// The {a: 1} plan is back in the cache. Run the query which would perform better on the plan
+// using the {b: 1} index, and ensure that plan gets written to the cache.
+assert.eq(1, coll.find(bIndexQuery).itcount());
+entry = getPlansForCacheEntry(queryShape);
+entryWorks = entry.works;
+assert.eq(entry.isActive, true);
+assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
+
+// Now run a plan that will perform poorly with both indices (it will be required to scan 500
+// documents). This will result in replanning (and the cache entry being deactivated). However,
+// the new plan will have a very high works value, and will not replace the existing cache
+// entry. It will only bump the existing cache entry's works value.
+for (let i = 0; i < 500; i++) {
+ assert.commandWorked(coll.insert({a: 3, b: 3}));
+}
+assert.eq(500, coll.find({a: 3, b: 3}).itcount());
+
+// The cache entry should have been deactivated.
+entry = getPlansForCacheEntry(queryShape);
+assert.eq(entry.isActive, false);
+assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
+
+// The works value should have doubled.
+assert.eq(entry.works, entryWorks * 2);
})();
diff --git a/jstests/noPassthroughWithMongod/query_oplogreplay.js b/jstests/noPassthroughWithMongod/query_oplogreplay.js
index 9a8a9e8da77..75fc38cce35 100644
--- a/jstests/noPassthroughWithMongod/query_oplogreplay.js
+++ b/jstests/noPassthroughWithMongod/query_oplogreplay.js
@@ -2,231 +2,223 @@
// @tags: [requires_replication, requires_capped]
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js");
-
- function test(t) {
- t.drop();
- assert.commandWorked(
- t.getDB().createCollection(t.getName(), {capped: true, size: 16 * 1024}));
-
- const isOplog = t.getName().startsWith("oplog.");
-
- /**
- * Helper function for making timestamps with the property that if i < j, then makeTS(i) <
- * makeTS(j).
- */
- function makeTS(i) {
- return Timestamp(1000, i);
- }
-
- for (let i = 1; i <= 100; i++) {
- assert.writeOK(t.insert({_id: i, ts: makeTS(i)}));
- }
-
- // Missing 'ts' field.
- assert.throws(function() {
- t.find().addOption(DBQuery.Option.oplogReplay).next();
- });
- assert.throws(function() {
- t.find({_id: 3}).addOption(DBQuery.Option.oplogReplay).next();
- });
-
- // 'ts' field is not top-level.
- assert.throws(function() {
- t.find({$or: [{ts: {$gt: makeTS(3)}}, {foo: 3}]})
- .addOption(DBQuery.Option.oplogReplay)
- .next();
- });
- assert.throws(function() {
- t.find({$nor: [{ts: {$gt: makeTS(4)}}, {foo: 4}]})
- .addOption(DBQuery.Option.oplogReplay)
- .next();
- });
-
- // There is no $eq, $gt or $gte predicate on 'ts'.
- assert.throws(function() {
- t.find({ts: {$lt: makeTS(4)}}).addOption(DBQuery.Option.oplogReplay).next();
- });
- assert.throws(function() {
- t.find({ts: {$lt: makeTS(4)}, _id: 3}).addOption(DBQuery.Option.oplogReplay).next();
- });
-
- // A $gt query on just the 'ts' field should return the next document after the timestamp.
- var cursor = t.find({ts: {$gt: makeTS(20)}}).addOption(DBQuery.Option.oplogReplay);
- assert.eq(21, cursor.next()["_id"]);
- assert.eq(22, cursor.next()["_id"]);
-
- // A $gte query on the 'ts' field should include the timestamp.
- cursor = t.find({ts: {$gte: makeTS(20)}}).addOption(DBQuery.Option.oplogReplay);
- assert.eq(20, cursor.next()["_id"]);
- assert.eq(21, cursor.next()["_id"]);
-
- // An $eq query on the 'ts' field should return the single record with the timestamp.
- cursor = t.find({ts: {$eq: makeTS(20)}}).addOption(DBQuery.Option.oplogReplay);
- assert.eq(20, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- // An AND with both a $gt and $lt query on the 'ts' field will correctly return results in
- // the proper bounds.
- cursor = t.find({
- $and: [{ts: {$lt: makeTS(5)}}, {ts: {$gt: makeTS(1)}}]
- }).addOption(DBQuery.Option.oplogReplay);
- assert.eq(2, cursor.next()["_id"]);
- assert.eq(3, cursor.next()["_id"]);
- assert.eq(4, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- // An AND with multiple predicates on the 'ts' field correctly returns results on the
- // tightest range.
- cursor = t.find({
- $and: [
- {ts: {$gte: makeTS(2)}},
- {ts: {$gt: makeTS(3)}},
- {ts: {$lte: makeTS(7)}},
- {ts: {$lt: makeTS(7)}}
- ]
- }).addOption(DBQuery.Option.oplogReplay);
- assert.eq(4, cursor.next()["_id"]);
- assert.eq(5, cursor.next()["_id"]);
- assert.eq(6, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- // An AND with an $eq predicate in conjunction with other bounds correctly returns one
- // result.
- cursor = t.find({
- $and: [
- {ts: {$gte: makeTS(1)}},
- {ts: {$gt: makeTS(2)}},
- {ts: {$eq: makeTS(5)}},
- {ts: {$lte: makeTS(8)}},
- {ts: {$lt: makeTS(8)}}
- ]
- }).addOption(DBQuery.Option.oplogReplay);
- assert.eq(5, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- // An $eq query stops scanning after passing the max timestamp.
- let res = t.find({ts: {$eq: makeTS(10)}})
- .addOption(DBQuery.Option.oplogReplay)
- .explain("executionStats");
- assert.commandWorked(res);
- // If this is the oplog, we expect to be able to seek directly to the entry with a 'ts' of
- // 10. Otherwise, we have to scan from the beginning of the oplog.
- let expectedDocsExamined = isOplog ? 2 : 11;
- assert.lte(res.executionStats.totalDocsExamined, expectedDocsExamined, tojson(res));
- let collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(
- null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
-
- // An AND with an $lt predicate stops scanning after passing the max timestamp.
- res = t.find({$and: [{ts: {$gte: makeTS(1)}}, {ts: {$lt: makeTS(10)}}]})
- .addOption(DBQuery.Option.oplogReplay)
- .explain("executionStats");
- assert.commandWorked(res);
- assert.lte(res.executionStats.totalDocsExamined, 11, tojson(res));
- collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(
- null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
-
- // An AND with an $lte predicate stops scanning after passing the max timestamp.
- res = t.find({$and: [{ts: {$gte: makeTS(1)}}, {ts: {$lte: makeTS(10)}}]})
- .addOption(DBQuery.Option.oplogReplay)
- .explain("executionStats");
- assert.commandWorked(res);
- assert.lte(res.executionStats.totalDocsExamined, 12, tojson(res));
- collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(
- null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
-
- // The max timestamp is respected even when the min timestamp is smaller than the lowest
- // timestamp in the collection.
- res = t.find({$and: [{ts: {$gte: makeTS(0)}}, {ts: {$lte: makeTS(10)}}]})
- .addOption(DBQuery.Option.oplogReplay)
- .explain("executionStats");
- assert.commandWorked(res);
- assert.lte(res.executionStats.totalDocsExamined, 12, tojson(res));
- collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(
- null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
-
- // An AND with redundant $eq/$lt/$lte predicates stops scanning after passing the max
- // timestamp.
- res = t.find({
- $and: [
- {ts: {$gte: makeTS(0)}},
- {ts: {$lte: makeTS(10)}},
- {ts: {$eq: makeTS(5)}},
- {ts: {$lt: makeTS(20)}}
- ]
- })
- .addOption(DBQuery.Option.oplogReplay)
- .explain("executionStats");
- assert.commandWorked(res);
- // If this is the oplog, we expect to be able to seek directly to the entry with a 'ts' of
- // 5. Otherwise, we have to scan from the beginning of the oplog.
- expectedDocsExamined = isOplog ? 2 : 11;
- assert.lte(res.executionStats.totalDocsExamined, expectedDocsExamined, tojson(res));
- collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(
- null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- assert.eq(makeTS(5), collScanStage.maxTs, tojson(res));
-
- // An $eq query for a non-existent timestamp scans a single oplog document.
- res = t.find({ts: {$eq: makeTS(200)}})
- .addOption(DBQuery.Option.oplogReplay)
- .explain("executionStats");
- assert.commandWorked(res);
- // If this is the oplog, we expect to be able to seek directly to the end of the oplog.
- // Otherwise, we have to scan the entire oplog before determining that 'ts' 100 does not
- // exist.
- expectedDocsExamined = isOplog ? 1 : 100;
- assert.lte(res.executionStats.totalDocsExamined, expectedDocsExamined, tojson(res));
- collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(
- null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- assert.eq(makeTS(200), collScanStage.maxTs, tojson(res));
-
- // When the filter matches the last document within the timestamp range, the collection scan
- // examines at most one more document.
- res = t.find({$and: [{ts: {$gte: makeTS(4)}}, {ts: {$lte: makeTS(8)}}, {_id: 8}]})
- .addOption(DBQuery.Option.oplogReplay)
- .explain("executionStats");
- assert.commandWorked(res);
- // If this is the oplog, we expect to be able to seek directly to the start of the 'ts'
- // range. Otherwise, we have to scan the capped collection from the beginning.
- expectedDocsExamined = isOplog ? 6 : 9;
- assert.lte(res.executionStats.totalDocsExamined, expectedDocsExamined, tojson(res));
- collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
- assert.neq(
- null, collScanStage, "no collection scan found in explain output: " + tojson(res));
- assert.eq(makeTS(8), collScanStage.maxTs, tojson(res));
-
- // A query over both 'ts' and '_id' should only pay attention to the 'ts' field for finding
- // the oplog start (SERVER-13566).
- cursor = t.find({ts: {$gte: makeTS(20)}, _id: 25}).addOption(DBQuery.Option.oplogReplay);
- assert.eq(25, cursor.next()["_id"]);
- assert(!cursor.hasNext());
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+
+function test(t) {
+ t.drop();
+ assert.commandWorked(t.getDB().createCollection(t.getName(), {capped: true, size: 16 * 1024}));
+
+ const isOplog = t.getName().startsWith("oplog.");
+
+ /**
+ * Helper function for making timestamps with the property that if i < j, then makeTS(i) <
+ * makeTS(j).
+ */
+ function makeTS(i) {
+ return Timestamp(1000, i);
}
- jsTestLog("Non-oplog.");
- // Test that oplog replay on a non-oplog collection succeeds.
- test(db.jstests_query_oplogreplay);
+ for (let i = 1; i <= 100; i++) {
+ assert.writeOK(t.insert({_id: i, ts: makeTS(i)}));
+ }
- jsTestLog("Oplog.");
- // Test that oplog replay on the actual oplog succeeds.
- test(db.getSiblingDB("local").oplog.jstests_query_oplogreplay);
+ // Missing 'ts' field.
+ assert.throws(function() {
+ t.find().addOption(DBQuery.Option.oplogReplay).next();
+ });
+ assert.throws(function() {
+ t.find({_id: 3}).addOption(DBQuery.Option.oplogReplay).next();
+ });
- // Test that oplog replay on a non-capped collection fails.
- const coll = db.jstests_query_oplogreplay;
- coll.drop();
- assert.commandWorked(coll.getDB().createCollection(coll.getName()));
+ // 'ts' field is not top-level.
+ assert.throws(function() {
+ t.find({$or: [{ts: {$gt: makeTS(3)}}, {foo: 3}]})
+ .addOption(DBQuery.Option.oplogReplay)
+ .next();
+ });
assert.throws(function() {
- coll.find({ts: {$gt: "abcd"}}).addOption(DBQuery.Option.oplogReplay).next();
+ t.find({$nor: [{ts: {$gt: makeTS(4)}}, {foo: 4}]})
+ .addOption(DBQuery.Option.oplogReplay)
+ .next();
});
+
+ // There is no $eq, $gt or $gte predicate on 'ts'.
+ assert.throws(function() {
+ t.find({ts: {$lt: makeTS(4)}}).addOption(DBQuery.Option.oplogReplay).next();
+ });
+ assert.throws(function() {
+ t.find({ts: {$lt: makeTS(4)}, _id: 3}).addOption(DBQuery.Option.oplogReplay).next();
+ });
+
+ // A $gt query on just the 'ts' field should return the next document after the timestamp.
+ var cursor = t.find({ts: {$gt: makeTS(20)}}).addOption(DBQuery.Option.oplogReplay);
+ assert.eq(21, cursor.next()["_id"]);
+ assert.eq(22, cursor.next()["_id"]);
+
+ // A $gte query on the 'ts' field should include the timestamp.
+ cursor = t.find({ts: {$gte: makeTS(20)}}).addOption(DBQuery.Option.oplogReplay);
+ assert.eq(20, cursor.next()["_id"]);
+ assert.eq(21, cursor.next()["_id"]);
+
+ // An $eq query on the 'ts' field should return the single record with the timestamp.
+ cursor = t.find({ts: {$eq: makeTS(20)}}).addOption(DBQuery.Option.oplogReplay);
+ assert.eq(20, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ // An AND with both a $gt and $lt query on the 'ts' field will correctly return results in
+ // the proper bounds.
+ cursor = t.find({
+ $and: [{ts: {$lt: makeTS(5)}}, {ts: {$gt: makeTS(1)}}]
+ }).addOption(DBQuery.Option.oplogReplay);
+ assert.eq(2, cursor.next()["_id"]);
+ assert.eq(3, cursor.next()["_id"]);
+ assert.eq(4, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ // An AND with multiple predicates on the 'ts' field correctly returns results on the
+ // tightest range.
+ cursor = t.find({
+ $and: [
+ {ts: {$gte: makeTS(2)}},
+ {ts: {$gt: makeTS(3)}},
+ {ts: {$lte: makeTS(7)}},
+ {ts: {$lt: makeTS(7)}}
+ ]
+ }).addOption(DBQuery.Option.oplogReplay);
+ assert.eq(4, cursor.next()["_id"]);
+ assert.eq(5, cursor.next()["_id"]);
+ assert.eq(6, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ // An AND with an $eq predicate in conjunction with other bounds correctly returns one
+ // result.
+ cursor = t.find({
+ $and: [
+ {ts: {$gte: makeTS(1)}},
+ {ts: {$gt: makeTS(2)}},
+ {ts: {$eq: makeTS(5)}},
+ {ts: {$lte: makeTS(8)}},
+ {ts: {$lt: makeTS(8)}}
+ ]
+ }).addOption(DBQuery.Option.oplogReplay);
+ assert.eq(5, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ // An $eq query stops scanning after passing the max timestamp.
+ let res = t.find({ts: {$eq: makeTS(10)}})
+ .addOption(DBQuery.Option.oplogReplay)
+ .explain("executionStats");
+ assert.commandWorked(res);
+ // If this is the oplog, we expect to be able to seek directly to the entry with a 'ts' of
+ // 10. Otherwise, we have to scan from the beginning of the oplog.
+ let expectedDocsExamined = isOplog ? 2 : 11;
+ assert.lte(res.executionStats.totalDocsExamined, expectedDocsExamined, tojson(res));
+ let collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+ assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+ assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
+
+ // An AND with an $lt predicate stops scanning after passing the max timestamp.
+ res = t.find({$and: [{ts: {$gte: makeTS(1)}}, {ts: {$lt: makeTS(10)}}]})
+ .addOption(DBQuery.Option.oplogReplay)
+ .explain("executionStats");
+ assert.commandWorked(res);
+ assert.lte(res.executionStats.totalDocsExamined, 11, tojson(res));
+ collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+ assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+ assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
+
+ // An AND with an $lte predicate stops scanning after passing the max timestamp.
+ res = t.find({$and: [{ts: {$gte: makeTS(1)}}, {ts: {$lte: makeTS(10)}}]})
+ .addOption(DBQuery.Option.oplogReplay)
+ .explain("executionStats");
+ assert.commandWorked(res);
+ assert.lte(res.executionStats.totalDocsExamined, 12, tojson(res));
+ collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+ assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+ assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
+
+ // The max timestamp is respected even when the min timestamp is smaller than the lowest
+ // timestamp in the collection.
+ res = t.find({$and: [{ts: {$gte: makeTS(0)}}, {ts: {$lte: makeTS(10)}}]})
+ .addOption(DBQuery.Option.oplogReplay)
+ .explain("executionStats");
+ assert.commandWorked(res);
+ assert.lte(res.executionStats.totalDocsExamined, 12, tojson(res));
+ collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+ assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+ assert.eq(makeTS(10), collScanStage.maxTs, tojson(res));
+
+ // An AND with redundant $eq/$lt/$lte predicates stops scanning after passing the max
+ // timestamp.
+ res = t.find({
+ $and: [
+ {ts: {$gte: makeTS(0)}},
+ {ts: {$lte: makeTS(10)}},
+ {ts: {$eq: makeTS(5)}},
+ {ts: {$lt: makeTS(20)}}
+ ]
+ })
+ .addOption(DBQuery.Option.oplogReplay)
+ .explain("executionStats");
+ assert.commandWorked(res);
+ // If this is the oplog, we expect to be able to seek directly to the entry with a 'ts' of
+ // 5. Otherwise, we have to scan from the beginning of the oplog.
+ expectedDocsExamined = isOplog ? 2 : 11;
+ assert.lte(res.executionStats.totalDocsExamined, expectedDocsExamined, tojson(res));
+ collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+ assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+ assert.eq(makeTS(5), collScanStage.maxTs, tojson(res));
+
+ // An $eq query for a non-existent timestamp scans a single oplog document.
+ res = t.find({ts: {$eq: makeTS(200)}})
+ .addOption(DBQuery.Option.oplogReplay)
+ .explain("executionStats");
+ assert.commandWorked(res);
+ // If this is the oplog, we expect to be able to seek directly to the end of the oplog.
+ // Otherwise, we have to scan the entire oplog before determining that 'ts' 100 does not
+ // exist.
+ expectedDocsExamined = isOplog ? 1 : 100;
+ assert.lte(res.executionStats.totalDocsExamined, expectedDocsExamined, tojson(res));
+ collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+ assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+ assert.eq(makeTS(200), collScanStage.maxTs, tojson(res));
+
+ // When the filter matches the last document within the timestamp range, the collection scan
+ // examines at most one more document.
+ res = t.find({$and: [{ts: {$gte: makeTS(4)}}, {ts: {$lte: makeTS(8)}}, {_id: 8}]})
+ .addOption(DBQuery.Option.oplogReplay)
+ .explain("executionStats");
+ assert.commandWorked(res);
+ // If this is the oplog, we expect to be able to seek directly to the start of the 'ts'
+ // range. Otherwise, we have to scan the capped collection from the beginning.
+ expectedDocsExamined = isOplog ? 6 : 9;
+ assert.lte(res.executionStats.totalDocsExamined, expectedDocsExamined, tojson(res));
+ collScanStage = getPlanStage(res.executionStats.executionStages, "COLLSCAN");
+ assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
+ assert.eq(makeTS(8), collScanStage.maxTs, tojson(res));
+
+ // A query over both 'ts' and '_id' should only pay attention to the 'ts' field for finding
+ // the oplog start (SERVER-13566).
+ cursor = t.find({ts: {$gte: makeTS(20)}, _id: 25}).addOption(DBQuery.Option.oplogReplay);
+ assert.eq(25, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+}
+
+jsTestLog("Non-oplog.");
+// Test that oplog replay on a non-oplog collection succeeds.
+test(db.jstests_query_oplogreplay);
+
+jsTestLog("Oplog.");
+// Test that oplog replay on the actual oplog succeeds.
+test(db.getSiblingDB("local").oplog.jstests_query_oplogreplay);
+
+// Test that oplog replay on a non-capped collection fails.
+const coll = db.jstests_query_oplogreplay;
+coll.drop();
+assert.commandWorked(coll.getDB().createCollection(coll.getName()));
+assert.throws(function() {
+ coll.find({ts: {$gt: "abcd"}}).addOption(DBQuery.Option.oplogReplay).next();
+});
}());
diff --git a/jstests/noPassthroughWithMongod/renameWithWCE.js b/jstests/noPassthroughWithMongod/renameWithWCE.js
index f09c45ebeff..0c232ec0b37 100644
--- a/jstests/noPassthroughWithMongod/renameWithWCE.js
+++ b/jstests/noPassthroughWithMongod/renameWithWCE.js
@@ -4,52 +4,52 @@
*/
// @tags: [requires_profiling]
(function() {
- // Set up namespaces a and b.
- var admin = db.getMongo().getDB("admin");
- var db_a = db.getMongo().getDB("db_a");
- var db_b = db.getMongo().getDB("db_b");
-
- var a = db_a.rename7;
- var b = db_b.rename7;
-
- // Ensure that the databases are created
- db_a.coll.insert({});
- db_b.coll.insert({});
-
- a.drop();
- b.drop();
-
- // Put some documents and indexes in a.
- a.save({a: 1});
- a.save({a: 2});
- a.save({a: 3});
- a.ensureIndex({a: 1});
- a.ensureIndex({b: 1});
-
- assert.commandWorked(admin.runCommand({renameCollection: "db_a.rename7", to: "db_b.rename7"}));
-
- assert.eq(0, a.find().count());
- assert(db_a.getCollectionNames().indexOf("rename7") < 0);
-
- assert.eq(3, b.find().count());
- assert(db_b.getCollectionNames().indexOf("rename7") >= 0);
-
- a.drop();
- b.drop();
-
- // Test that the dropTarget option works when renaming across databases.
- a.save({});
- b.save({});
- assert.commandFailed(admin.runCommand({renameCollection: "db_a.rename7", to: "db_b.rename7"}));
-
- // Ensure that a WCE during renaming doesn't cause a failure.
- assert.commandWorked(db_a.setProfilingLevel(2)); // So we can check WCE happens.
- assert.commandWorked(db_a.adminCommand(
- {"configureFailPoint": 'writeConflictInRenameCollCopyToTmp', "mode": {times: 1}}));
- assert.commandWorked(
- admin.runCommand({renameCollection: "db_a.rename7", to: "db_b.rename7", dropTarget: true}));
- assert.gte(db_a.system.profile.findOne().writeConflicts, 1); // Make sure that our WCE happened
- assert.commandWorked(db_a.setProfilingLevel(0));
- a.drop();
- b.drop();
+// Set up namespaces a and b.
+var admin = db.getMongo().getDB("admin");
+var db_a = db.getMongo().getDB("db_a");
+var db_b = db.getMongo().getDB("db_b");
+
+var a = db_a.rename7;
+var b = db_b.rename7;
+
+// Ensure that the databases are created
+db_a.coll.insert({});
+db_b.coll.insert({});
+
+a.drop();
+b.drop();
+
+// Put some documents and indexes in a.
+a.save({a: 1});
+a.save({a: 2});
+a.save({a: 3});
+a.ensureIndex({a: 1});
+a.ensureIndex({b: 1});
+
+assert.commandWorked(admin.runCommand({renameCollection: "db_a.rename7", to: "db_b.rename7"}));
+
+assert.eq(0, a.find().count());
+assert(db_a.getCollectionNames().indexOf("rename7") < 0);
+
+assert.eq(3, b.find().count());
+assert(db_b.getCollectionNames().indexOf("rename7") >= 0);
+
+a.drop();
+b.drop();
+
+// Test that the dropTarget option works when renaming across databases.
+a.save({});
+b.save({});
+assert.commandFailed(admin.runCommand({renameCollection: "db_a.rename7", to: "db_b.rename7"}));
+
+// Ensure that a WCE during renaming doesn't cause a failure.
+assert.commandWorked(db_a.setProfilingLevel(2)); // So we can check WCE happens.
+assert.commandWorked(db_a.adminCommand(
+ {"configureFailPoint": 'writeConflictInRenameCollCopyToTmp', "mode": {times: 1}}));
+assert.commandWorked(
+ admin.runCommand({renameCollection: "db_a.rename7", to: "db_b.rename7", dropTarget: true}));
+assert.gte(db_a.system.profile.findOne().writeConflicts, 1); // Make sure that our WCE happened
+assert.commandWorked(db_a.setProfilingLevel(0));
+a.drop();
+b.drop();
})();
diff --git a/jstests/noPassthroughWithMongod/replset_host_connection_validation.js b/jstests/noPassthroughWithMongod/replset_host_connection_validation.js
index 159efefd3b1..29ca7436b92 100644
--- a/jstests/noPassthroughWithMongod/replset_host_connection_validation.js
+++ b/jstests/noPassthroughWithMongod/replset_host_connection_validation.js
@@ -2,84 +2,84 @@
// @tags: [requires_replication]
(function() {
- 'use strict';
+'use strict';
- const replSetName = 'hostTestReplSetName';
+const replSetName = 'hostTestReplSetName';
- // This "inner_mode" method of spawning a replset and re-running was copied from
- // host_connection_string_validation.js
- if ("undefined" == typeof inner_mode) {
- jsTest.log("Outer mode test starting a replica set");
+// This "inner_mode" method of spawning a replset and re-running was copied from
+// host_connection_string_validation.js
+if ("undefined" == typeof inner_mode) {
+ jsTest.log("Outer mode test starting a replica set");
- const replTest = new ReplSetTest({name: replSetName, nodes: 2});
- replTest.startSet();
- replTest.initiate();
+ const replTest = new ReplSetTest({name: replSetName, nodes: 2});
+ replTest.startSet();
+ replTest.initiate();
- const primary = replTest.getPrimary();
+ const primary = replTest.getPrimary();
- const args = [
- "mongo",
- "--nodb",
- "--eval",
- "inner_mode=true;port=" + primary.port + ";",
- "jstests/noPassthroughWithMongod/replset_host_connection_validation.js"
- ];
- const exitCode = _runMongoProgram(...args);
- jsTest.log("Inner mode test finished, exit code was " + exitCode);
+ const args = [
+ "mongo",
+ "--nodb",
+ "--eval",
+ "inner_mode=true;port=" + primary.port + ";",
+ "jstests/noPassthroughWithMongod/replset_host_connection_validation.js"
+ ];
+ const exitCode = _runMongoProgram(...args);
+ jsTest.log("Inner mode test finished, exit code was " + exitCode);
- replTest.stopSet();
- // Pass the inner test's exit code back as the outer test's exit code
- if (exitCode != 0) {
- doassert("inner test failed with exit code " + exitCode);
- }
- return;
+ replTest.stopSet();
+ // Pass the inner test's exit code back as the outer test's exit code
+ if (exitCode != 0) {
+ doassert("inner test failed with exit code " + exitCode);
}
+ return;
+}
- function testHost(host, uri, ok) {
- const exitCode = runMongoProgram('mongo', '--eval', ';', '--host', host, uri);
- if (ok) {
- assert.eq(exitCode, 0, "failed to connect with `--host " + host + "`");
- } else {
- assert.neq(exitCode, 0, "unexpectedly succeeded to connect with `--host " + host + "`");
- }
+function testHost(host, uri, ok) {
+ const exitCode = runMongoProgram('mongo', '--eval', ';', '--host', host, uri);
+ if (ok) {
+ assert.eq(exitCode, 0, "failed to connect with `--host " + host + "`");
+ } else {
+ assert.neq(exitCode, 0, "unexpectedly succeeded to connect with `--host " + host + "`");
}
+}
- function runConnectionStringTestFor(connectionString, uri, ok) {
- print("* Testing: --host " + connectionString + " " + uri);
- if (!ok) {
- print(" This should fail");
- }
- testHost(connectionString, uri, ok);
+function runConnectionStringTestFor(connectionString, uri, ok) {
+ print("* Testing: --host " + connectionString + " " + uri);
+ if (!ok) {
+ print(" This should fail");
}
+ testHost(connectionString, uri, ok);
+}
- function expSuccess(str) {
- runConnectionStringTestFor(str, '', true);
- if (!str.startsWith('mongodb://')) {
- runConnectionStringTestFor(str, 'dbname', true);
- }
+function expSuccess(str) {
+ runConnectionStringTestFor(str, '', true);
+ if (!str.startsWith('mongodb://')) {
+ runConnectionStringTestFor(str, 'dbname', true);
}
+}
- function expFailure(str) {
- runConnectionStringTestFor(str, '', false);
- }
+function expFailure(str) {
+ runConnectionStringTestFor(str, '', false);
+}
- expSuccess(`localhost:${port}`);
- expSuccess(`${replSetName}/localhost:${port}`);
- expSuccess(`${replSetName}/localhost:${port},[::1]:${port}`);
- expSuccess(`${replSetName}/localhost:${port},`);
- expSuccess(`${replSetName}/localhost:${port},,`);
- expSuccess(`mongodb://localhost:${port}/admin?replicaSet=${replSetName}`);
- expSuccess(`mongodb://localhost:${port}`);
+expSuccess(`localhost:${port}`);
+expSuccess(`${replSetName}/localhost:${port}`);
+expSuccess(`${replSetName}/localhost:${port},[::1]:${port}`);
+expSuccess(`${replSetName}/localhost:${port},`);
+expSuccess(`${replSetName}/localhost:${port},,`);
+expSuccess(`mongodb://localhost:${port}/admin?replicaSet=${replSetName}`);
+expSuccess(`mongodb://localhost:${port}`);
- expFailure(',');
- expFailure(',,');
- expFailure(`${replSetName}/`);
- expFailure(`${replSetName}/,`);
- expFailure(`${replSetName}/,,`);
- expFailure(`${replSetName}//not/a/socket`);
- expFailure(`mongodb://localhost:${port}/admin?replicaSet=`);
- expFailure('mongodb://localhost:');
- expFailure(`mongodb://:${port}`);
+expFailure(',');
+expFailure(',,');
+expFailure(`${replSetName}/`);
+expFailure(`${replSetName}/,`);
+expFailure(`${replSetName}/,,`);
+expFailure(`${replSetName}//not/a/socket`);
+expFailure(`mongodb://localhost:${port}/admin?replicaSet=`);
+expFailure('mongodb://localhost:');
+expFailure(`mongodb://:${port}`);
- jsTest.log("SUCCESSFUL test completion");
+jsTest.log("SUCCESSFUL test completion");
})();
diff --git a/jstests/noPassthroughWithMongod/rpc_protocols.js b/jstests/noPassthroughWithMongod/rpc_protocols.js
index ef847e2a359..95e028526c0 100644
--- a/jstests/noPassthroughWithMongod/rpc_protocols.js
+++ b/jstests/noPassthroughWithMongod/rpc_protocols.js
@@ -8,65 +8,66 @@
var RPC_PROTOCOLS = {OP_QUERY: "opQueryOnly", OP_MSG: "opMsgOnly"};
(function() {
- "use strict";
+"use strict";
- db.rpcProtocols.drop();
+db.rpcProtocols.drop();
- var oldProfilingLevel = db.getProfilingLevel();
+var oldProfilingLevel = db.getProfilingLevel();
- assert.commandWorked(db.setProfilingLevel(2));
+assert.commandWorked(db.setProfilingLevel(2));
- function runInShell(rpcProtocol, func) {
- assert(0 == _runMongoProgram("mongo",
- "--rpcProtocols=" + rpcProtocol,
- "--readMode=commands", // ensure we use the find command.
- "--eval",
- "(" + func.toString() + ")();",
- db.getMongo().host));
- }
+function runInShell(rpcProtocol, func) {
+ assert(0 ==
+ _runMongoProgram("mongo",
+ "--rpcProtocols=" + rpcProtocol,
+ "--readMode=commands", // ensure we use the find command.
+ "--eval",
+ "(" + func.toString() + ")();",
+ db.getMongo().host));
+}
- // Test that --rpcProtocols=opQueryOnly forces OP_QUERY commands.
- runInShell(RPC_PROTOCOLS.OP_QUERY, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opQueryCommandLine").itcount();
- });
- var profileDoc = db.system.profile.findOne({"command.comment": "opQueryCommandLine"});
- assert(profileDoc !== null);
- assert.eq(profileDoc.protocol, "op_query");
+// Test that --rpcProtocols=opQueryOnly forces OP_QUERY commands.
+runInShell(RPC_PROTOCOLS.OP_QUERY, function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opQueryCommandLine").itcount();
+});
+var profileDoc = db.system.profile.findOne({"command.comment": "opQueryCommandLine"});
+assert(profileDoc !== null);
+assert.eq(profileDoc.protocol, "op_query");
- // Test that --rpcProtocols=opMsgOnly forces OP_MSG commands.
- runInShell(RPC_PROTOCOLS.OP_MSG, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opMsgOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opMsgCommandLine").itcount();
- });
- profileDoc = db.system.profile.findOne({"command.comment": "opMsgCommandLine"});
- assert(profileDoc !== null);
- assert.eq(profileDoc.protocol, "op_msg");
+// Test that --rpcProtocols=opMsgOnly forces OP_MSG commands.
+runInShell(RPC_PROTOCOLS.OP_MSG, function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opMsgOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opMsgCommandLine").itcount();
+});
+profileDoc = db.system.profile.findOne({"command.comment": "opMsgCommandLine"});
+assert(profileDoc !== null);
+assert.eq(profileDoc.protocol, "op_msg");
- // Test that .setClientRPCProtocols("opQueryOnly") forces OP_QUERY commands. We start the shell
- // in OP_MSG only mode, then switch it to OP_QUERY mode at runtime.
- runInShell(RPC_PROTOCOLS.OP_MSG, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opMsgOnly");
- db.getMongo().setClientRPCProtocols("opQueryOnly");
- assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opQueryRuntime").itcount();
- });
- profileDoc = db.system.profile.findOne({"command.comment": "opQueryRuntime"});
- assert(profileDoc !== null);
- assert.eq(profileDoc.protocol, "op_query");
+// Test that .setClientRPCProtocols("opQueryOnly") forces OP_QUERY commands. We start the shell
+// in OP_MSG only mode, then switch it to OP_QUERY mode at runtime.
+runInShell(RPC_PROTOCOLS.OP_MSG, function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opMsgOnly");
+ db.getMongo().setClientRPCProtocols("opQueryOnly");
+ assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opQueryRuntime").itcount();
+});
+profileDoc = db.system.profile.findOne({"command.comment": "opQueryRuntime"});
+assert(profileDoc !== null);
+assert.eq(profileDoc.protocol, "op_query");
- // Test that .setClientRPCProtocols("opMsgOnly") forces OP_MSG commands. We start the
- // shell in OP_QUERY only mode, then switch it to OP_MSG mode at runtime.
- runInShell(RPC_PROTOCOLS.OP_QUERY, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
- db.getMongo().setClientRPCProtocols("opMsgOnly");
- assert(db.getMongo().getClientRPCProtocols() === "opMsgOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opMsgRuntime").itcount();
- });
- profileDoc = db.system.profile.findOne({"command.comment": "opMsgRuntime"});
- assert(profileDoc !== null);
- assert.eq(profileDoc.protocol, "op_msg");
+// Test that .setClientRPCProtocols("opMsgOnly") forces OP_MSG commands. We start the
+// shell in OP_QUERY only mode, then switch it to OP_MSG mode at runtime.
+runInShell(RPC_PROTOCOLS.OP_QUERY, function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
+ db.getMongo().setClientRPCProtocols("opMsgOnly");
+ assert(db.getMongo().getClientRPCProtocols() === "opMsgOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opMsgRuntime").itcount();
+});
+profileDoc = db.system.profile.findOne({"command.comment": "opMsgRuntime"});
+assert(profileDoc !== null);
+assert.eq(profileDoc.protocol, "op_msg");
- // Reset profiling level.
- assert.commandWorked(db.setProfilingLevel(oldProfilingLevel));
+// Reset profiling level.
+assert.commandWorked(db.setProfilingLevel(oldProfilingLevel));
})();
diff --git a/jstests/noPassthroughWithMongod/shell_advance_cluster_time.js b/jstests/noPassthroughWithMongod/shell_advance_cluster_time.js
index db3c4904896..c0d4580aae5 100644
--- a/jstests/noPassthroughWithMongod/shell_advance_cluster_time.js
+++ b/jstests/noPassthroughWithMongod/shell_advance_cluster_time.js
@@ -3,27 +3,27 @@
*/
(function() {
- assert.throws(function() {
- db.getMongo().advanceClusterTime();
- });
+assert.throws(function() {
+ db.getMongo().advanceClusterTime();
+});
- assert.throws(function() {
- db.getMongo().advanceClusterTime(123);
- });
+assert.throws(function() {
+ db.getMongo().advanceClusterTime(123);
+});
- assert.throws(function() {
- db.getMongo().advanceClusterTime('abc');
- });
+assert.throws(function() {
+ db.getMongo().advanceClusterTime('abc');
+});
- db.getMongo().advanceClusterTime({'clusterTime': 123});
+db.getMongo().advanceClusterTime({'clusterTime': 123});
- assert.eq({'clusterTime': 123}, db.getMongo().getClusterTime());
+assert.eq({'clusterTime': 123}, db.getMongo().getClusterTime());
- db.getMongo().advanceClusterTime({'clusterTime': 100});
+db.getMongo().advanceClusterTime({'clusterTime': 100});
- assert.eq({'clusterTime': 123}, db.getMongo().getClusterTime());
+assert.eq({'clusterTime': 123}, db.getMongo().getClusterTime());
- db.getMongo().advanceClusterTime({'clusterTime': 456});
+db.getMongo().advanceClusterTime({'clusterTime': 456});
- assert.eq({'clusterTime': 456}, db.getMongo().getClusterTime());
+assert.eq({'clusterTime': 456}, db.getMongo().getClusterTime());
})();
diff --git a/jstests/noPassthroughWithMongod/shelllimit.js b/jstests/noPassthroughWithMongod/shelllimit.js
index 3b270bddc12..cd021a6df61 100644
--- a/jstests/noPassthroughWithMongod/shelllimit.js
+++ b/jstests/noPassthroughWithMongod/shelllimit.js
@@ -1,21 +1,21 @@
// This checks to make sure that cursors with a limit get killed by the shell
// after all their results have been returned. See SERVER-17792.
(function() {
- "use strict";
+"use strict";
- var t = db.cursor_limit_test;
- t.drop();
- var pre = db.serverStatus().metrics.cursor.open.total;
+var t = db.cursor_limit_test;
+t.drop();
+var pre = db.serverStatus().metrics.cursor.open.total;
- for (var i = 1; i <= 5; i++) {
- t.save({a: i});
- }
+for (var i = 1; i <= 5; i++) {
+ t.save({a: i});
+}
- var c = t.find().limit(3);
- while (c.hasNext()) {
- var v = c.next();
- }
+var c = t.find().limit(3);
+while (c.hasNext()) {
+ var v = c.next();
+}
- assert.eq(pre, db.serverStatus().metrics.cursor.open.total);
- t.drop();
+assert.eq(pre, db.serverStatus().metrics.cursor.open.total);
+t.drop();
}());
diff --git a/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js b/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js
index 591c5fffd95..600d8be4733 100644
--- a/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js
+++ b/jstests/noPassthroughWithMongod/tailable_getmore_does_not_timeout.js
@@ -3,34 +3,34 @@
// This test was designed to reproduce SERVER-33942 against a mongod.
// @tags: [requires_capped]
(function() {
- "use strict";
+"use strict";
- // This test runs a getMore in a parallel shell, which will not inherit the implicit session of
- // the cursor establishing command.
- TestData.disableImplicitSessions = true;
+// This test runs a getMore in a parallel shell, which will not inherit the implicit session of
+// the cursor establishing command.
+TestData.disableImplicitSessions = true;
- const coll = db.tailable_getmore_no_timeout;
- coll.drop();
+const coll = db.tailable_getmore_no_timeout;
+coll.drop();
- assert.commandWorked(db.runCommand({create: coll.getName(), capped: true, size: 1024}));
+assert.commandWorked(db.runCommand({create: coll.getName(), capped: true, size: 1024}));
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({_id: i}));
- }
+for (let i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({_id: i}));
+}
- const findResponse = assert.commandWorked(
- db.runCommand({find: coll.getName(), filter: {}, tailable: true, awaitData: true}));
- const cursorId = findResponse.cursor.id;
- assert.neq(0, cursorId);
+const findResponse = assert.commandWorked(
+ db.runCommand({find: coll.getName(), filter: {}, tailable: true, awaitData: true}));
+const cursorId = findResponse.cursor.id;
+assert.neq(0, cursorId);
- // Start an operation in a parallel shell that holds the lock for a while.
- const awaitSleepShell = startParallelShell(
- () => assert.commandFailedWithCode(db.adminCommand({sleep: 1, lock: "w", secs: 600}),
- ErrorCodes.Interrupted));
+// Start an operation in a parallel shell that holds the lock for a while.
+const awaitSleepShell = startParallelShell(
+ () => assert.commandFailedWithCode(db.adminCommand({sleep: 1, lock: "w", secs: 600}),
+ ErrorCodes.Interrupted));
- // Start a getMore and verify that it is waiting for the lock.
- const getMoreMaxTimeMS = 10;
- const awaitGetMoreShell = startParallelShell(`
+// Start a getMore and verify that it is waiting for the lock.
+const getMoreMaxTimeMS = 10;
+const awaitGetMoreShell = startParallelShell(`
// Wait for the sleep command to take the lock.
assert.soon(() => db.getSiblingDB("admin")
.currentOp({"command.sleep": 1, active: true})
@@ -43,20 +43,19 @@
}));
`);
- // Wait to see the getMore waiting on the lock.
- assert.soon(
- () =>
- db.currentOp({"command.getMore": cursorId, waitingForLock: true}).inprog.length === 1);
+// Wait to see the getMore waiting on the lock.
+assert.soon(
+ () => db.currentOp({"command.getMore": cursorId, waitingForLock: true}).inprog.length === 1);
- // Sleep for twice the maxTimeMS to prove that the getMore won't time out waiting for the lock.
- sleep(2 * getMoreMaxTimeMS);
+// Sleep for twice the maxTimeMS to prove that the getMore won't time out waiting for the lock.
+sleep(2 * getMoreMaxTimeMS);
- // Then kill the command with the lock, allowing the getMore to continue successfully.
- const sleepOps = db.getSiblingDB("admin").currentOp({"command.sleep": 1, active: true}).inprog;
- assert.eq(sleepOps.length, 1);
- const sleepOpId = sleepOps[0].opid;
- assert.commandWorked(db.adminCommand({killOp: 1, op: sleepOpId}));
+// Then kill the command with the lock, allowing the getMore to continue successfully.
+const sleepOps = db.getSiblingDB("admin").currentOp({"command.sleep": 1, active: true}).inprog;
+assert.eq(sleepOps.length, 1);
+const sleepOpId = sleepOps[0].opid;
+assert.commandWorked(db.adminCommand({killOp: 1, op: sleepOpId}));
- awaitSleepShell();
- awaitGetMoreShell();
+awaitSleepShell();
+awaitGetMoreShell();
}());
diff --git a/jstests/noPassthroughWithMongod/temp_namespace.js b/jstests/noPassthroughWithMongod/temp_namespace.js
index f33d2abfd76..257874d5966 100644
--- a/jstests/noPassthroughWithMongod/temp_namespace.js
+++ b/jstests/noPassthroughWithMongod/temp_namespace.js
@@ -8,16 +8,14 @@ testname = 'temp_namespace_sw';
var conn = MongoRunner.runMongod();
d = conn.getDB('test');
assert.commandWorked(d.runCommand({
- applyOps:
- [{op: "c", ns: d.getName() + ".$cmd", o: {create: testname + 'temp1', temp: true}}]
+ applyOps: [{op: "c", ns: d.getName() + ".$cmd", o: {create: testname + 'temp1', temp: true}}]
}));
d[testname + 'temp1'].ensureIndex({x: 1});
assert.commandWorked(d.runCommand(
{applyOps: [{op: "c", ns: d.getName() + ".$cmd", o: {create: testname + 'temp2', temp: 1}}]}));
d[testname + 'temp2'].ensureIndex({x: 1});
assert.commandWorked(d.runCommand({
- applyOps:
- [{op: "c", ns: d.getName() + ".$cmd", o: {create: testname + 'keep1', temp: false}}]
+ applyOps: [{op: "c", ns: d.getName() + ".$cmd", o: {create: testname + 'keep1', temp: false}}]
}));
assert.commandWorked(d.runCommand(
{applyOps: [{op: "c", ns: d.getName() + ".$cmd", o: {create: testname + 'keep2', temp: 0}}]}));
diff --git a/jstests/noPassthroughWithMongod/top_drop.js b/jstests/noPassthroughWithMongod/top_drop.js
index 17949e59ff6..b280aa20efd 100644
--- a/jstests/noPassthroughWithMongod/top_drop.js
+++ b/jstests/noPassthroughWithMongod/top_drop.js
@@ -3,77 +3,77 @@
* TODO(SERVER-21167): Move this test from noPassthrough to core.
*/
(function() {
- "use strict";
+"use strict";
- let topDB = db.getSiblingDB("topdrop");
- assert.commandWorked(topDB.dropDatabase());
+let topDB = db.getSiblingDB("topdrop");
+assert.commandWorked(topDB.dropDatabase());
- // Asserts that the output of top contains exactly these collection entries for topDB.
- function checkTopEntries(expectedEntries) {
- let res = topDB.adminCommand("top");
- assert.commandWorked(res, "Failed to run the top command");
+// Asserts that the output of top contains exactly these collection entries for topDB.
+function checkTopEntries(expectedEntries) {
+ let res = topDB.adminCommand("top");
+ assert.commandWorked(res, "Failed to run the top command");
- let entriesInTop = Object.keys(res.totals).filter(function(ns) {
- // This filter only includes non-system collections in our test database.
- const dbPrefix = topDB.getName() + ".";
- const systemCollectionPrefix = "system.";
- return ns.startsWith(dbPrefix) && !ns.startsWith(dbPrefix + systemCollectionPrefix);
- });
- let expectedEntryNames = expectedEntries.map(function(coll) {
- return coll.getFullName();
- });
+ let entriesInTop = Object.keys(res.totals).filter(function(ns) {
+ // This filter only includes non-system collections in our test database.
+ const dbPrefix = topDB.getName() + ".";
+ const systemCollectionPrefix = "system.";
+ return ns.startsWith(dbPrefix) && !ns.startsWith(dbPrefix + systemCollectionPrefix);
+ });
+ let expectedEntryNames = expectedEntries.map(function(coll) {
+ return coll.getFullName();
+ });
- const entriesAreEqual = friendlyEqual(entriesInTop.sort(), expectedEntryNames.sort());
- if (!entriesAreEqual) {
- // TODO(SERVER-26750): This block can be removed once SERVER-26750 is resolved.
- jsTest.log("Warning: expected to see " + tojson(expectedEntryNames) +
- " in top, but got " + tojson(entriesInTop));
+ const entriesAreEqual = friendlyEqual(entriesInTop.sort(), expectedEntryNames.sort());
+ if (!entriesAreEqual) {
+ // TODO(SERVER-26750): This block can be removed once SERVER-26750 is resolved.
+ jsTest.log("Warning: expected to see " + tojson(expectedEntryNames) + " in top, but got " +
+ tojson(entriesInTop));
- assert.lt(expectedEntryNames.length,
- entriesInTop.length,
- "Fewer entries in top than expected; got " + tojson(entriesInTop) +
- " but expected " + tojson(expectedEntryNames) + "\nFull top output:\n" +
- tojson(res.totals));
+ assert.lt(expectedEntryNames.length,
+ entriesInTop.length,
+ "Fewer entries in top than expected; got " + tojson(entriesInTop) +
+ " but expected " + tojson(expectedEntryNames) + "\nFull top output:\n" +
+ tojson(res.totals));
- // We allow an unexpected entry in top if the insert counter has been cleared. This is
- // probably due to a background job releasing an AutoGetCollectionForReadCommand for
- // that collection.
- entriesInTop.forEach(function(coll) {
- if (expectedEntryNames.includes(coll)) {
- return;
- }
+ // We allow an unexpected entry in top if the insert counter has been cleared. This is
+ // probably due to a background job releasing an AutoGetCollectionForReadCommand for
+ // that collection.
+ entriesInTop.forEach(function(coll) {
+ if (expectedEntryNames.includes(coll)) {
+ return;
+ }
- let topStats = res.totals[coll];
- assert.eq(0,
- res.totals[coll].insert.count,
- coll + " has unexpected insert entries in top. Full top output:\n" +
- tojson(res.totals));
- });
- }
+ let topStats = res.totals[coll];
+ assert.eq(0,
+ res.totals[coll].insert.count,
+ coll + " has unexpected insert entries in top. Full top output:\n" +
+ tojson(res.totals));
+ });
}
+}
- // Create a few entries in top.
- assert.writeOK(topDB.coll1.insert({}));
- assert.writeOK(topDB.coll2.insert({}));
- assert.writeOK(topDB.coll3.insert({}));
- checkTopEntries([topDB.coll1, topDB.coll2, topDB.coll3]);
+// Create a few entries in top.
+assert.writeOK(topDB.coll1.insert({}));
+assert.writeOK(topDB.coll2.insert({}));
+assert.writeOK(topDB.coll3.insert({}));
+checkTopEntries([topDB.coll1, topDB.coll2, topDB.coll3]);
- // Check that dropping a collection removes that collection but leaves the others.
- assert.commandWorked(topDB.runCommand({drop: "coll3"}));
- checkTopEntries([topDB.coll1, topDB.coll2]);
+// Check that dropping a collection removes that collection but leaves the others.
+assert.commandWorked(topDB.runCommand({drop: "coll3"}));
+checkTopEntries([topDB.coll1, topDB.coll2]);
- // Check that dropping the database removes the remaining collections.
- assert.commandWorked(topDB.dropDatabase());
- checkTopEntries([]);
+// Check that dropping the database removes the remaining collections.
+assert.commandWorked(topDB.dropDatabase());
+checkTopEntries([]);
- // Check that top doesn't keep state about non-existent collections.
- assert.commandWorked(topDB.dropDatabase());
- topDB.foo.find().itcount();
- topDB.baz.update({}, {$set: {a: 1}});
- topDB.bar.remove({});
+// Check that top doesn't keep state about non-existent collections.
+assert.commandWorked(topDB.dropDatabase());
+topDB.foo.find().itcount();
+topDB.baz.update({}, {$set: {a: 1}});
+topDB.bar.remove({});
- let res = topDB.adminCommand("top");
- checkTopEntries([]);
+let res = topDB.adminCommand("top");
+checkTopEntries([]);
- assert.commandWorked(topDB.dropDatabase());
+assert.commandWorked(topDB.dropDatabase());
}());
diff --git a/jstests/noPassthroughWithMongod/ttl_repl.js b/jstests/noPassthroughWithMongod/ttl_repl.js
index 97257da5ce6..b0c7c342987 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl.js
@@ -1,4 +1,5 @@
-/** Test TTL collections with replication
+/**
+ * Test TTL collections with replication
* Part 1: Initiate replica set. Insert some docs and create a TTL index.
* Check that the correct # of docs age out.
* Part 2: Add a new member to the set. Check that it also gets the correct # of docs.
diff --git a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
index 44250c4f1d2..4616a306084 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
@@ -1,4 +1,5 @@
-/** This tests ensures that when a stand-alone server is started with something in
+/**
+ * This tests ensures that when a stand-alone server is started with something in
* local.system.replset, it doesn't start the TTL monitor (SERVER-6609). The test creates a
* dummy replset config & TTL collection, then restarts the member and ensures that it doesn't
* time out the docs in the TTL collection. Then it removes the "config" and
diff --git a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js
index 135bc41a608..a9fb3c46108 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js
@@ -1,4 +1,5 @@
-/** Test TTL docs are not deleted from secondaries directly
+/**
+ * Test TTL docs are not deleted from secondaries directly
* @tags: [requires_replication]
*/
diff --git a/jstests/noPassthroughWithMongod/ttl_sharded.js b/jstests/noPassthroughWithMongod/ttl_sharded.js
index 197832f6933..6788dfcdcf9 100644
--- a/jstests/noPassthroughWithMongod/ttl_sharded.js
+++ b/jstests/noPassthroughWithMongod/ttl_sharded.js
@@ -1,4 +1,5 @@
-/** Simple test of sharding TTL collections.
+/**
+ * Simple test of sharding TTL collections.
* - Creates a new collection with a TTL index
* - Shards it, and moves one chunk containing half the docs to another shard.
* - Checks that both shards have TTL index, and docs get deleted on both shards.
diff --git a/jstests/noPassthroughWithMongod/validate_command.js b/jstests/noPassthroughWithMongod/validate_command.js
index 9c52c9acad7..7f5a8533705 100644
--- a/jstests/noPassthroughWithMongod/validate_command.js
+++ b/jstests/noPassthroughWithMongod/validate_command.js
@@ -1,40 +1,40 @@
// Tests that the basic values returned from the validate command are correct
(function() {
- // Set the number of documents to insert
- var count = 10;
+// Set the number of documents to insert
+var count = 10;
- function testValidate(output) {
- assert.eq(output.nrecords, count, "validate returned an invalid count");
- assert.eq(output.nIndexes, 3, "validate returned an invalid number of indexes");
+function testValidate(output) {
+ assert.eq(output.nrecords, count, "validate returned an invalid count");
+ assert.eq(output.nIndexes, 3, "validate returned an invalid number of indexes");
- var indexNames = output.keysPerIndex;
+ var indexNames = output.keysPerIndex;
- for (var i in indexNames) {
- if (!indexNames.hasOwnProperty(i))
- continue;
- assert.eq(indexNames[i], count, "validate returned an invalid number of indexes");
- }
+ for (var i in indexNames) {
+ if (!indexNames.hasOwnProperty(i))
+ continue;
+ assert.eq(indexNames[i], count, "validate returned an invalid number of indexes");
}
+}
- // Test to confirm that validate is working as expected.
+// Test to confirm that validate is working as expected.
- // SETUP DATA
- t = db.jstests_validate;
- t.drop();
+// SETUP DATA
+t = db.jstests_validate;
+t.drop();
- for (var i = 0; i < count; i++) {
- t.insert({x: i});
- }
+for (var i = 0; i < count; i++) {
+ t.insert({x: i});
+}
- t.ensureIndex({x: 1}, {name: "forward"});
- t.ensureIndex({x: -1}, {name: "reverse"});
+t.ensureIndex({x: 1}, {name: "forward"});
+t.ensureIndex({x: -1}, {name: "reverse"});
- // TEST NORMAL VALIDATE
- var output = t.validate();
- testValidate(output);
+// TEST NORMAL VALIDATE
+var output = t.validate();
+testValidate(output);
- // TEST FULL
- var output = t.validate({full: true});
- testValidate(output);
+// TEST FULL
+var output = t.validate({full: true});
+testValidate(output);
}()); \ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/validate_interrupt.js b/jstests/noPassthroughWithMongod/validate_interrupt.js
index 7b76551f4e4..c19e682eae3 100644
--- a/jstests/noPassthroughWithMongod/validate_interrupt.js
+++ b/jstests/noPassthroughWithMongod/validate_interrupt.js
@@ -4,43 +4,43 @@
'use strict';
(function() {
- var t = db.validate_interrupt;
- t.drop();
-
- var bulk = t.initializeUnorderedBulkOp();
-
- var i;
- for (i = 0; i < 1000; i++) {
- bulk.insert({a: i});
- }
- assert.writeOK(bulk.execute());
-
- function setTimeoutFailPoint(mode) {
- var res = db.adminCommand({configureFailPoint: 'maxTimeAlwaysTimeOut', mode: mode});
- assert.commandWorked(res);
- }
-
- setTimeoutFailPoint('alwaysOn');
- var res = t.runCommand({validate: t.getName(), full: true, maxTimeMS: 1});
- setTimeoutFailPoint('off');
-
- // Sanity check to make sure the failpoint is turned off.
- assert.commandWorked(t.runCommand({validate: t.getName(), full: true}));
-
- if (res.ok === 0) {
- assert.eq(res.code,
- ErrorCodes.MaxTimeMSExpired,
- 'validate command did not time out:\n' + tojson(res));
- } else {
- // validate() should only succeed if it EBUSY'd. See SERVER-23131.
- var numWarnings = res.warnings.length;
- // validate() could EBUSY when verifying the index and/or the RecordStore, so EBUSY could
- // appear once or twice.
- assert((numWarnings === 1) || (numWarnings === 2),
- 'Expected 1 or 2 validation warnings:\n' + tojson(res));
- assert(res.warnings[0].includes('EBUSY'), 'Expected an EBUSY warning:\n' + tojson(res));
- if (numWarnings === 2) {
- assert(res.warnings[1].includes('EBUSY'), 'Expected an EBUSY warning:\n' + tojson(res));
- }
+var t = db.validate_interrupt;
+t.drop();
+
+var bulk = t.initializeUnorderedBulkOp();
+
+var i;
+for (i = 0; i < 1000; i++) {
+ bulk.insert({a: i});
+}
+assert.writeOK(bulk.execute());
+
+function setTimeoutFailPoint(mode) {
+ var res = db.adminCommand({configureFailPoint: 'maxTimeAlwaysTimeOut', mode: mode});
+ assert.commandWorked(res);
+}
+
+setTimeoutFailPoint('alwaysOn');
+var res = t.runCommand({validate: t.getName(), full: true, maxTimeMS: 1});
+setTimeoutFailPoint('off');
+
+// Sanity check to make sure the failpoint is turned off.
+assert.commandWorked(t.runCommand({validate: t.getName(), full: true}));
+
+if (res.ok === 0) {
+ assert.eq(res.code,
+ ErrorCodes.MaxTimeMSExpired,
+ 'validate command did not time out:\n' + tojson(res));
+} else {
+ // validate() should only succeed if it EBUSY'd. See SERVER-23131.
+ var numWarnings = res.warnings.length;
+ // validate() could EBUSY when verifying the index and/or the RecordStore, so EBUSY could
+ // appear once or twice.
+ assert((numWarnings === 1) || (numWarnings === 2),
+ 'Expected 1 or 2 validation warnings:\n' + tojson(res));
+ assert(res.warnings[0].includes('EBUSY'), 'Expected an EBUSY warning:\n' + tojson(res));
+ if (numWarnings === 2) {
+ assert(res.warnings[1].includes('EBUSY'), 'Expected an EBUSY warning:\n' + tojson(res));
}
+}
})();
diff --git a/jstests/noPassthroughWithMongod/views_invalid.js b/jstests/noPassthroughWithMongod/views_invalid.js
index 1749a9900d7..a525b68d32f 100644
--- a/jstests/noPassthroughWithMongod/views_invalid.js
+++ b/jstests/noPassthroughWithMongod/views_invalid.js
@@ -1,67 +1,63 @@
(function() {
- "use strict";
-
- const dbname = 'views_invalid';
- let invalidDB = db.getSiblingDB(dbname);
-
- // Wait for the invalid view definition to be replicated to any secondaries and then drop the
- // database.
- assert.writeOK(invalidDB.system.views.insert({z: '\0\uFFFFf'}),
- {writeConcern: {w: "majority"}});
- invalidDB.dropDatabase();
-
- // Create a database with one valid and one invalid view through direct system.views writes.
- assert.writeOK(invalidDB.coll.insert({x: 1}));
- assert.writeOK(
- invalidDB.system.views.insert({_id: dbname + '.view', viewOn: 'coll', pipeline: []}));
- assert.eq(invalidDB.view.findOne({}, {_id: 0}),
- {x: 1},
- 'find on view created with direct write to views catalog should work');
- assert.writeOK(invalidDB.system.views.insert({_id: 'invalid', pipeline: 3.0}));
-
- // Check that view-related commands fail with an invalid view catalog, but other commands on
- // existing collections still succeed.
- assert.commandFailedWithCode(
- invalidDB.runCommand({find: 'view'}),
- ErrorCodes.InvalidViewDefinition,
- 'find on existing view in DB with invalid system.views should fail');
-
- assert.eq(invalidDB.coll.findOne({}, {_id: 0}),
- {x: 1},
- 'find on existing collection in DB with invalid views catalog should work');
-
- assert.writeOK(invalidDB.coll.insert({x: 2}),
- 'insert in existing collection in DB with invalid views catalog should work');
-
- assert.writeError(invalidDB.x.insert({x: 2}),
- 'insert into new collection in DB with invalid views catalog should fail');
-
- assert.commandWorked(
- invalidDB.runCommand({drop: 'coll'}),
- 'dropping an existing collection in DB with invalid views catalog should work');
-
- assert.commandFailedWithCode(
- invalidDB.runCommand({drop: 'view'}),
- ErrorCodes.InvalidViewDefinition,
- 'dropping an existing view in DB with invalid views catalog should fail');
-
- assert.commandFailedWithCode(
- invalidDB.createCollection('x'),
- ErrorCodes.InvalidViewDefinition,
- 'creating a collection in DB with invalid views catalog should fail');
-
- assert.commandFailedWithCode(
- invalidDB.runCommand({find: 'x'}),
- ErrorCodes.InvalidViewDefinition,
- 'find on non-existent collection in DB with invalid system.views should fail');
-
- // Now fix the database by removing the invalid system.views entry, and check all is OK.
- assert.writeOK(invalidDB.system.views.remove({_id: 'invalid'}),
- 'should be able to remove invalid view with direct write to view catalog');
- assert.writeOK(
- invalidDB.coll.insert({x: 1}),
- 'after remove invalid view from catalog, should be able to create new collection');
- assert.eq(invalidDB.view.findOne({}, {_id: 0}),
- {x: 1},
- 'find on view should work again after removing invalid view from catalog');
+"use strict";
+
+const dbname = 'views_invalid';
+let invalidDB = db.getSiblingDB(dbname);
+
+// Wait for the invalid view definition to be replicated to any secondaries and then drop the
+// database.
+assert.writeOK(invalidDB.system.views.insert({z: '\0\uFFFFf'}), {writeConcern: {w: "majority"}});
+invalidDB.dropDatabase();
+
+// Create a database with one valid and one invalid view through direct system.views writes.
+assert.writeOK(invalidDB.coll.insert({x: 1}));
+assert.writeOK(
+ invalidDB.system.views.insert({_id: dbname + '.view', viewOn: 'coll', pipeline: []}));
+assert.eq(invalidDB.view.findOne({}, {_id: 0}),
+ {x: 1},
+ 'find on view created with direct write to views catalog should work');
+assert.writeOK(invalidDB.system.views.insert({_id: 'invalid', pipeline: 3.0}));
+
+// Check that view-related commands fail with an invalid view catalog, but other commands on
+// existing collections still succeed.
+assert.commandFailedWithCode(invalidDB.runCommand({find: 'view'}),
+ ErrorCodes.InvalidViewDefinition,
+ 'find on existing view in DB with invalid system.views should fail');
+
+assert.eq(invalidDB.coll.findOne({}, {_id: 0}),
+ {x: 1},
+ 'find on existing collection in DB with invalid views catalog should work');
+
+assert.writeOK(invalidDB.coll.insert({x: 2}),
+ 'insert in existing collection in DB with invalid views catalog should work');
+
+assert.writeError(invalidDB.x.insert({x: 2}),
+ 'insert into new collection in DB with invalid views catalog should fail');
+
+assert.commandWorked(
+ invalidDB.runCommand({drop: 'coll'}),
+ 'dropping an existing collection in DB with invalid views catalog should work');
+
+assert.commandFailedWithCode(
+ invalidDB.runCommand({drop: 'view'}),
+ ErrorCodes.InvalidViewDefinition,
+ 'dropping an existing view in DB with invalid views catalog should fail');
+
+assert.commandFailedWithCode(invalidDB.createCollection('x'),
+ ErrorCodes.InvalidViewDefinition,
+ 'creating a collection in DB with invalid views catalog should fail');
+
+assert.commandFailedWithCode(
+ invalidDB.runCommand({find: 'x'}),
+ ErrorCodes.InvalidViewDefinition,
+ 'find on non-existent collection in DB with invalid system.views should fail');
+
+// Now fix the database by removing the invalid system.views entry, and check all is OK.
+assert.writeOK(invalidDB.system.views.remove({_id: 'invalid'}),
+ 'should be able to remove invalid view with direct write to view catalog');
+assert.writeOK(invalidDB.coll.insert({x: 1}),
+ 'after remove invalid view from catalog, should be able to create new collection');
+assert.eq(invalidDB.view.findOne({}, {_id: 0}),
+ {x: 1},
+ 'find on view should work again after removing invalid view from catalog');
})();
diff --git a/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js b/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js
index 65ebef5ccf3..9664cfe4538 100644
--- a/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js
+++ b/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js
@@ -3,42 +3,40 @@
* collection or index with the same WiredTiger options.
*/
(function() {
- 'use strict';
+'use strict';
- // Skip this test if not running with the "wiredTiger" storage engine.
- if (db.serverStatus().storageEngine.name !== 'wiredTiger') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
- return;
- }
+// Skip this test if not running with the "wiredTiger" storage engine.
+if (db.serverStatus().storageEngine.name !== 'wiredTiger') {
+ jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
+ return;
+}
- var collNamePrefix = 'wt_roundtrip_creation_string';
+var collNamePrefix = 'wt_roundtrip_creation_string';
- // Drop the collections used by the test to ensure that the create commands don't fail because
- // the collections already exist.
- db[collNamePrefix].source.drop();
- db[collNamePrefix].dest.drop();
+// Drop the collections used by the test to ensure that the create commands don't fail because
+// the collections already exist.
+db[collNamePrefix].source.drop();
+db[collNamePrefix].dest.drop();
- assert.commandWorked(db.createCollection(collNamePrefix + '.source'));
- assert.commandWorked(db[collNamePrefix].source.createIndex({a: 1}, {name: 'a_1'}));
+assert.commandWorked(db.createCollection(collNamePrefix + '.source'));
+assert.commandWorked(db[collNamePrefix].source.createIndex({a: 1}, {name: 'a_1'}));
- var collStats = db.runCommand({collStats: collNamePrefix + '.source'});
- assert.commandWorked(collStats);
+var collStats = db.runCommand({collStats: collNamePrefix + '.source'});
+assert.commandWorked(collStats);
- assert.commandWorked(
- db.runCommand({
- create: collNamePrefix + '.dest',
- storageEngine: {wiredTiger: {configString: collStats.wiredTiger.creationString}}
- }),
- 'unable to create collection using the creation string of another collection');
+assert.commandWorked(db.runCommand({
+ create: collNamePrefix + '.dest',
+ storageEngine: {wiredTiger: {configString: collStats.wiredTiger.creationString}}
+}),
+ 'unable to create collection using the creation string of another collection');
- assert.commandWorked(db.runCommand({
- createIndexes: collNamePrefix + '.dest',
- indexes: [{
- key: {b: 1},
- name: 'b_1',
- storageEngine:
- {wiredTiger: {configString: collStats.indexDetails.a_1.creationString}}
- }]
- }),
- 'unable to create index using the creation string of another index');
+assert.commandWorked(db.runCommand({
+ createIndexes: collNamePrefix + '.dest',
+ indexes: [{
+ key: {b: 1},
+ name: 'b_1',
+ storageEngine: {wiredTiger: {configString: collStats.indexDetails.a_1.creationString}}
+ }]
+}),
+ 'unable to create index using the creation string of another index');
})();
diff --git a/jstests/parallel/shellfork.js b/jstests/parallel/shellfork.js
index 571f917fc4a..65be8abb33d 100644
--- a/jstests/parallel/shellfork.js
+++ b/jstests/parallel/shellfork.js
@@ -31,7 +31,7 @@ assert.eq(7, z.returnData());
t = 1;
z = new ScopedThread(function() {
- assert(typeof(t) == "undefined", "t not undefined");
+ assert(typeof (t) == "undefined", "t not undefined");
t = 5;
return t;
});
diff --git a/jstests/parallel/update_serializability2.js b/jstests/parallel/update_serializability2.js
index f28ddf9bd5a..84b27049e53 100644
--- a/jstests/parallel/update_serializability2.js
+++ b/jstests/parallel/update_serializability2.js
@@ -1,27 +1,27 @@
(function() {
- "use strict";
- var t = db.update_serializability1;
- t.drop();
+"use strict";
+var t = db.update_serializability1;
+t.drop();
- var N = 100000;
+var N = 100000;
- var bulk = t.initializeUnorderedBulkOp();
- for (var i = 0; i < N; i++) {
- bulk.insert({_id: i, a: i, b: N - i, x: 1, y: 1});
- }
- bulk.execute();
+var bulk = t.initializeUnorderedBulkOp();
+for (var i = 0; i < N; i++) {
+ bulk.insert({_id: i, a: i, b: N - i, x: 1, y: 1});
+}
+bulk.execute();
- t.ensureIndex({a: 1});
- t.ensureIndex({b: 1});
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
- var s1 = startParallelShell(
- "db.update_serializability1.update( { a : { $gte : 0 } }, { $set : { x : 2 } }, false, true );");
- var s2 = startParallelShell("db.update_serializability1.update( { b : { $lte : " + N +
- " } }, { $set : { y : 2 } }, false, true );");
+var s1 = startParallelShell(
+ "db.update_serializability1.update( { a : { $gte : 0 } }, { $set : { x : 2 } }, false, true );");
+var s2 = startParallelShell("db.update_serializability1.update( { b : { $lte : " + N +
+ " } }, { $set : { y : 2 } }, false, true );");
- s1();
- s2();
+s1();
+s2();
- // both operations should happen on every document
- assert.eq(N, t.find({x: 2, y: 2}).count());
+// both operations should happen on every document
+assert.eq(N, t.find({x: 2, y: 2}).count());
})();
diff --git a/jstests/perf/mr_bench.js b/jstests/perf/mr_bench.js
index ea506578d86..c141e112163 100644
--- a/jstests/perf/mr_bench.js
+++ b/jstests/perf/mr_bench.js
@@ -47,7 +47,7 @@ m2 = function() {
r = function(k, vals) {
var tmp = {};
vals.forEach(function(i) {
- if (typeof(i) == 'string') {
+ if (typeof (i) == 'string') {
tmp[i] = true;
} else {
for (var z in i)
diff --git a/jstests/perf/v8_mapreduce.js b/jstests/perf/v8_mapreduce.js
index c2123c89403..46afcf37082 100644
--- a/jstests/perf/v8_mapreduce.js
+++ b/jstests/perf/v8_mapreduce.js
@@ -77,7 +77,6 @@ if (/V8/.test(interpreterVersion()) && db.runCommand({buildinfo: 1}).javascriptE
db.getLastError();
var mrWorkFunction = function() {
-
function verifyOutput(out) {
// printjson(out);
assert.eq(out.counts.input, 51200, "input count is wrong");
diff --git a/jstests/readonly/aggregate.js b/jstests/readonly/aggregate.js
index 9640c231900..a79e7ed19d2 100644
--- a/jstests/readonly/aggregate.js
+++ b/jstests/readonly/aggregate.js
@@ -9,63 +9,62 @@ runReadOnlyTest(function() {
assert.doesNotThrow(() => {
writableCollection.insertMany([
{
- award: "Best Picture",
- nominations: [
- {title: "The Big Short"},
- {title: "Bridge of Spies"},
- {title: "Brooklyn"},
- {title: "Max Max: Fury Road"},
- {title: "The Martian"},
- {title: "The Revenant"},
- {title: "Room"},
- {title: "Spotlight"}
- ]
+ award: "Best Picture",
+ nominations: [
+ {title: "The Big Short"},
+ {title: "Bridge of Spies"},
+ {title: "Brooklyn"},
+ {title: "Max Max: Fury Road"},
+ {title: "The Martian"},
+ {title: "The Revenant"},
+ {title: "Room"},
+ {title: "Spotlight"}
+ ]
},
{
- award: "Best Actor",
- nominations: [
- {title: "Trumbo", person: "Bryan Cranston"},
- {title: "The Martian", person: "Matt Damon"},
- {title: "The Revenant", person: "Leonardo DiCaprio"},
- {title: "Steve Jobs", person: "Michael Fassbender"},
- {title: "The Danish Girl", person: "Eddie Redmayne"}
- ]
+ award: "Best Actor",
+ nominations: [
+ {title: "Trumbo", person: "Bryan Cranston"},
+ {title: "The Martian", person: "Matt Damon"},
+ {title: "The Revenant", person: "Leonardo DiCaprio"},
+ {title: "Steve Jobs", person: "Michael Fassbender"},
+ {title: "The Danish Girl", person: "Eddie Redmayne"}
+ ]
},
{
- award: "Best Actress",
- nominations: [
- {title: "Carol", person: "Cate Blanchett"},
- {title: "Room", person: "Brie Larson"},
- {title: "Joy", person: "Jennifer Lawrence"},
- {title: "45 Years", person: "Charlotte Rampling"},
- {title: "Brooklyn", person: "Saoirse Ronan"}
- ]
+ award: "Best Actress",
+ nominations: [
+ {title: "Carol", person: "Cate Blanchett"},
+ {title: "Room", person: "Brie Larson"},
+ {title: "Joy", person: "Jennifer Lawrence"},
+ {title: "45 Years", person: "Charlotte Rampling"},
+ {title: "Brooklyn", person: "Saoirse Ronan"}
+ ]
},
{
- award: "Best Supporting Actor",
- nominations: [
- {title: "The Big Short", person: "Christian Bale"},
- {title: "The Revenant", person: "Tom Hardy"},
- {title: "Spotlight", person: "Mark Ruffalo"},
- {title: "Bridge Of Spies", person: "Mark Rylance"},
- {title: "Creed", person: "Sylvester Stallone"}
- ]
+ award: "Best Supporting Actor",
+ nominations: [
+ {title: "The Big Short", person: "Christian Bale"},
+ {title: "The Revenant", person: "Tom Hardy"},
+ {title: "Spotlight", person: "Mark Ruffalo"},
+ {title: "Bridge Of Spies", person: "Mark Rylance"},
+ {title: "Creed", person: "Sylvester Stallone"}
+ ]
},
{
- award: "Best Supporting Actress",
- nominations: [
- {title: "The Hateful Eight", person: "Jennifer Jason Leigh"},
- {title: "Carol", person: "Rooney Mara"},
- {title: "Spotlight", person: "Rachel McAdams"},
- {title: "The Danish Girl", person: "Alicia Vikander"},
- {title: "Steve Jobs", person: "Kate Winslet"}
- ]
+ award: "Best Supporting Actress",
+ nominations: [
+ {title: "The Hateful Eight", person: "Jennifer Jason Leigh"},
+ {title: "Carol", person: "Rooney Mara"},
+ {title: "Spotlight", person: "Rachel McAdams"},
+ {title: "The Danish Girl", person: "Alicia Vikander"},
+ {title: "Steve Jobs", person: "Kate Winslet"}
+ ]
}
]);
});
},
exec: function(readableCollection) {
-
// Find titles nominated for the most awards.
var mostAwardsPipeline = [
{$unwind: "$nominations"},
diff --git a/jstests/readonly/catalog_ops.js b/jstests/readonly/catalog_ops.js
index e219e2eb0c1..9f8353ee61c 100644
--- a/jstests/readonly/catalog_ops.js
+++ b/jstests/readonly/catalog_ops.js
@@ -9,7 +9,6 @@ runReadOnlyTest(function() {
indexSpecs: [{a: 1}, {a: 1, b: -1}, {a: 1, b: 1, c: -1}],
load: function(writableCollection) {
-
// Catalog guarantees are neccessarily weaker in sharded systems since mongos is not
// read-only aware.
if (TestData.fixture === "sharded")
@@ -29,7 +28,6 @@ runReadOnlyTest(function() {
}
},
exec: function(readableCollection) {
-
// Catalog guarantees are neccessarily weaker in sharded systems since mongos is not
// read-only aware.
if (TestData.fixture === "sharded")
diff --git a/jstests/readonly/geo.js b/jstests/readonly/geo.js
index 2ba43f597b4..ea13761248a 100644
--- a/jstests/readonly/geo.js
+++ b/jstests/readonly/geo.js
@@ -12,17 +12,17 @@ runReadOnlyTest(function() {
var locDocs = [
{name: "Berry Park", loc: {type: "Point", coordinates: [40.722396, -73.9573645]}},
{
- name: "Northern Territory",
- loc: {type: "Point", coordinates: [40.7252334, -73.9595218]}
+ name: "Northern Territory",
+ loc: {type: "Point", coordinates: [40.7252334, -73.9595218]}
},
{
- name: "Kent Ale House",
- loc: {type: "Point", coordinates: [40.7223364, -73.9614495]}
+ name: "Kent Ale House",
+ loc: {type: "Point", coordinates: [40.7223364, -73.9614495]}
},
{name: "The Shanty", loc: {type: "Point", coordinates: [40.7185752, -73.9510538]}},
{
- name: "The Counting Room",
- loc: {type: "Point", coordinates: [40.7209601, -73.9588041]}
+ name: "The Counting Room",
+ loc: {type: "Point", coordinates: [40.7209601, -73.9588041]}
},
{name: "Kinfolk 94", loc: {type: "Point", coordinates: [40.7217058, -73.9605489]}}
];
@@ -30,18 +30,19 @@ runReadOnlyTest(function() {
writableCollection.insertMany(locDocs);
},
exec: function(readableCollection) {
- const res = readableCollection
- .aggregate([
- {
- $geoNear: {
- near: {type: "Point", coordinates: [40.7211404, -73.9591494]},
- distanceField: "dist",
- spherical: true,
- }
- },
- {$limit: 1}
- ])
- .toArray();
+ const res =
+ readableCollection
+ .aggregate([
+ {
+ $geoNear: {
+ near: {type: "Point", coordinates: [40.7211404, -73.9591494]},
+ distanceField: "dist",
+ spherical: true,
+ }
+ },
+ {$limit: 1}
+ ])
+ .toArray();
assert.eq(res[0].name, "The Counting Room", printjson(res));
}
};
diff --git a/jstests/readonly/lib/read_only_test.js b/jstests/readonly/lib/read_only_test.js
index ef04350d5e0..04e1e2186ba 100644
--- a/jstests/readonly/lib/read_only_test.js
+++ b/jstests/readonly/lib/read_only_test.js
@@ -1,175 +1,175 @@
var StandaloneFixture, ShardedFixture, runReadOnlyTest, zip2, cycleN;
(function() {
- "use strict";
+"use strict";
- function makeDirectoryReadOnly(dir) {
- if (_isWindows()) {
- run("attrib", "+r", dir + "\\*.*", "/s");
- } else {
- run("chmod", "-R", "a-w", dir);
- }
+function makeDirectoryReadOnly(dir) {
+ if (_isWindows()) {
+ run("attrib", "+r", dir + "\\*.*", "/s");
+ } else {
+ run("chmod", "-R", "a-w", dir);
}
+}
- function makeDirectoryWritable(dir) {
- if (_isWindows()) {
- run("attrib", "-r", dir + "\\*.*", "/s");
- } else {
- run("chmod", "-R", "a+w", dir);
- }
+function makeDirectoryWritable(dir) {
+ if (_isWindows()) {
+ run("attrib", "-r", dir + "\\*.*", "/s");
+ } else {
+ run("chmod", "-R", "a+w", dir);
}
+}
- StandaloneFixture = function() {};
+StandaloneFixture = function() {};
- StandaloneFixture.prototype.runLoadPhase = function runLoadPhase(test) {
- this.mongod = MongoRunner.runMongod({});
- this.dbpath = this.mongod.dbpath;
+StandaloneFixture.prototype.runLoadPhase = function runLoadPhase(test) {
+ this.mongod = MongoRunner.runMongod({});
+ this.dbpath = this.mongod.dbpath;
- test.load(this.mongod.getDB("test")[test.name]);
- assert.commandWorked(this.mongod.getDB("local").dropDatabase());
- MongoRunner.stopMongod(this.mongod);
- };
+ test.load(this.mongod.getDB("test")[test.name]);
+ assert.commandWorked(this.mongod.getDB("local").dropDatabase());
+ MongoRunner.stopMongod(this.mongod);
+};
- StandaloneFixture.prototype.runExecPhase = function runExecPhase(test) {
- try {
- makeDirectoryReadOnly(this.dbpath);
+StandaloneFixture.prototype.runExecPhase = function runExecPhase(test) {
+ try {
+ makeDirectoryReadOnly(this.dbpath);
- var options = {queryableBackupMode: "", noCleanData: true, dbpath: this.dbpath};
+ var options = {queryableBackupMode: "", noCleanData: true, dbpath: this.dbpath};
- this.mongod = MongoRunner.runMongod(options);
- assert.neq(this.mongod, null);
+ this.mongod = MongoRunner.runMongod(options);
+ assert.neq(this.mongod, null);
- test.exec(this.mongod.getDB("test")[test.name]);
+ test.exec(this.mongod.getDB("test")[test.name]);
- MongoRunner.stopMongod(this.mongod);
- } finally {
- makeDirectoryWritable(this.dbpath);
- }
- };
-
- ShardedFixture = function() {
- this.nShards = 3;
- };
-
- ShardedFixture.prototype.runLoadPhase = function runLoadPhase(test) {
- // TODO: SERVER-33830 remove shardAsReplicaSet: false
- this.shardingTest =
- new ShardingTest({mongos: 1, shards: this.nShards, other: {shardAsReplicaSet: false}});
-
- this.paths = this.shardingTest.getDBPaths();
-
- jsTest.log("sharding test collection...");
-
- // Use a hashed shard key so we actually hit multiple shards.
- this.shardingTest.shardColl(test.name, {_id: "hashed"}, false);
-
- test.load(this.shardingTest.getDB("test")[test.name]);
- };
-
- ShardedFixture.prototype.runExecPhase = function runExecPhase(test) {
- jsTest.log("restarting shards...");
- try {
- for (var i = 0; i < this.nShards; ++i) {
- // Write the shard's shardIdentity to a config file under
- // sharding._overrideShardIdentity, since the shardIdentity must be provided through
- // overrideShardIdentity when running in queryableBackupMode, and is only allowed to
- // be set via config file.
-
- var shardIdentity = this.shardingTest["d" + i]
- .getDB("admin")
- .getCollection("system.version")
- .findOne({_id: "shardIdentity"});
- assert.neq(null, shardIdentity);
-
- // Construct a string representation of the config file (replace all instances of
- // multiple consecutive whitespace characters in the string representation of the
- // shardIdentity JSON document, including newlines, with single white spaces).
- var configFileStr = "sharding:\n _overrideShardIdentity: '" +
- tojson(shardIdentity).replace(/\s+/g, ' ') + "'";
-
- // Use the os-specific path delimiter.
- var delim = _isWindows() ? '\\' : '/';
- var configFilePath = this.paths[i] + delim + "config-for-shard-" + i + ".yml";
-
- writeFile(configFilePath, configFileStr);
-
- var opts = {
- config: configFilePath,
- queryableBackupMode: "",
- shardsvr: "",
- dbpath: this.paths[i]
- };
-
- assert.commandWorked(this.shardingTest["d" + i].getDB("local").dropDatabase());
- this.shardingTest.restartMongod(i, opts, () => {
- makeDirectoryReadOnly(this.paths[i]);
- });
- }
+ MongoRunner.stopMongod(this.mongod);
+ } finally {
+ makeDirectoryWritable(this.dbpath);
+ }
+};
- jsTest.log("restarting mongos...");
+ShardedFixture = function() {
+ this.nShards = 3;
+};
- this.shardingTest.restartMongos(0);
+ShardedFixture.prototype.runLoadPhase = function runLoadPhase(test) {
+ // TODO: SERVER-33830 remove shardAsReplicaSet: false
+ this.shardingTest =
+ new ShardingTest({mongos: 1, shards: this.nShards, other: {shardAsReplicaSet: false}});
- test.exec(this.shardingTest.getDB("test")[test.name]);
+ this.paths = this.shardingTest.getDBPaths();
- this.paths.forEach((path) => {
- makeDirectoryWritable(path);
- });
+ jsTest.log("sharding test collection...");
+
+ // Use a hashed shard key so we actually hit multiple shards.
+ this.shardingTest.shardColl(test.name, {_id: "hashed"}, false);
- this.shardingTest.stop();
- } finally {
- this.paths.forEach((path) => {
- makeDirectoryWritable(path);
+ test.load(this.shardingTest.getDB("test")[test.name]);
+};
+
+ShardedFixture.prototype.runExecPhase = function runExecPhase(test) {
+ jsTest.log("restarting shards...");
+ try {
+ for (var i = 0; i < this.nShards; ++i) {
+ // Write the shard's shardIdentity to a config file under
+ // sharding._overrideShardIdentity, since the shardIdentity must be provided through
+ // overrideShardIdentity when running in queryableBackupMode, and is only allowed to
+ // be set via config file.
+
+ var shardIdentity =
+ this.shardingTest["d" + i].getDB("admin").getCollection("system.version").findOne({
+ _id: "shardIdentity"
+ });
+ assert.neq(null, shardIdentity);
+
+ // Construct a string representation of the config file (replace all instances of
+ // multiple consecutive whitespace characters in the string representation of the
+ // shardIdentity JSON document, including newlines, with single white spaces).
+ var configFileStr = "sharding:\n _overrideShardIdentity: '" +
+ tojson(shardIdentity).replace(/\s+/g, ' ') + "'";
+
+ // Use the os-specific path delimiter.
+ var delim = _isWindows() ? '\\' : '/';
+ var configFilePath = this.paths[i] + delim + "config-for-shard-" + i + ".yml";
+
+ writeFile(configFilePath, configFileStr);
+
+ var opts = {
+ config: configFilePath,
+ queryableBackupMode: "",
+ shardsvr: "",
+ dbpath: this.paths[i]
+ };
+
+ assert.commandWorked(this.shardingTest["d" + i].getDB("local").dropDatabase());
+ this.shardingTest.restartMongod(i, opts, () => {
+ makeDirectoryReadOnly(this.paths[i]);
});
}
- };
- runReadOnlyTest = function(test) {
- printjson(test);
+ jsTest.log("restarting mongos...");
- assert.eq(typeof(test.exec), "function");
- assert.eq(typeof(test.load), "function");
- assert.eq(typeof(test.name), "string");
+ this.shardingTest.restartMongos(0);
- var fixtureType = TestData.fixture || "standalone";
+ test.exec(this.shardingTest.getDB("test")[test.name]);
- var fixture = null;
- if (fixtureType === "standalone") {
- fixture = new StandaloneFixture();
- } else if (fixtureType === "sharded") {
- fixture = new ShardedFixture();
- } else {
- throw new Error("fixtureType must be one of either 'standalone' or 'sharded'");
- }
+ this.paths.forEach((path) => {
+ makeDirectoryWritable(path);
+ });
+
+ this.shardingTest.stop();
+ } finally {
+ this.paths.forEach((path) => {
+ makeDirectoryWritable(path);
+ });
+ }
+};
- jsTest.log("starting load phase for test: " + test.name);
- fixture.runLoadPhase(test);
+runReadOnlyTest = function(test) {
+ printjson(test);
- jsTest.log("starting execution phase for test: " + test.name);
- fixture.runExecPhase(test);
- };
+ assert.eq(typeof (test.exec), "function");
+ assert.eq(typeof (test.load), "function");
+ assert.eq(typeof (test.name), "string");
+
+ var fixtureType = TestData.fixture || "standalone";
+
+ var fixture = null;
+ if (fixtureType === "standalone") {
+ fixture = new StandaloneFixture();
+ } else if (fixtureType === "sharded") {
+ fixture = new ShardedFixture();
+ } else {
+ throw new Error("fixtureType must be one of either 'standalone' or 'sharded'");
+ }
- cycleN = function * (arr, N) {
- for (var i = 0; i < N; ++i) {
- yield arr[i % arr.length];
+ jsTest.log("starting load phase for test: " + test.name);
+ fixture.runLoadPhase(test);
+
+ jsTest.log("starting execution phase for test: " + test.name);
+ fixture.runExecPhase(test);
+};
+
+cycleN = function*(arr, N) {
+ for (var i = 0; i < N; ++i) {
+ yield arr[i % arr.length];
+ }
+};
+
+zip2 = function*(iter1, iter2) {
+ var n1 = iter1.next();
+ var n2 = iter2.next();
+ while (!n1.done || !n2.done) {
+ var res = [];
+ if (!n1.done) {
+ res.push(n1.value);
+ n1 = iter1.next();
}
- };
-
- zip2 = function * (iter1, iter2) {
- var n1 = iter1.next();
- var n2 = iter2.next();
- while (!n1.done || !n2.done) {
- var res = [];
- if (!n1.done) {
- res.push(n1.value);
- n1 = iter1.next();
- }
- if (!n2.done) {
- res.push(n2.value);
- n2 = iter2.next();
- }
-
- yield res;
+ if (!n2.done) {
+ res.push(n2.value);
+ n2 = iter2.next();
}
- };
+
+ yield res;
+ }
+};
}());
diff --git a/jstests/readonly/temp_collection.js b/jstests/readonly/temp_collection.js
index 5f32d723b71..adb10506d19 100644
--- a/jstests/readonly/temp_collection.js
+++ b/jstests/readonly/temp_collection.js
@@ -15,11 +15,9 @@ runReadOnlyTest((function() {
writableDB[collName].drop();
assert.commandWorked(writableDB.runCommand({
- applyOps: [{
- op: "c",
- ns: writableDB.getName() + ".$cmd",
- o: {create: collName, temp: true}
- }]
+ applyOps: [
+ {op: "c", ns: writableDB.getName() + ".$cmd", o: {create: collName, temp: true}}
+ ]
}));
var collectionInfos = writableDB.getCollectionInfos();
diff --git a/jstests/replsets/abort_in_progress_transactions_on_step_up.js b/jstests/replsets/abort_in_progress_transactions_on_step_up.js
index ef2dda4cf80..a47baee071a 100644
--- a/jstests/replsets/abort_in_progress_transactions_on_step_up.js
+++ b/jstests/replsets/abort_in_progress_transactions_on_step_up.js
@@ -4,139 +4,144 @@
* @tags: [uses_transactions, exclude_from_large_txns]
*/
(function() {
- "use strict";
- load("jstests/replsets/rslib.js"); // For reconnect()
- load("jstests/libs/check_log.js");
-
- function getTxnTableEntry(db) {
- let txnTableEntries = db.getSiblingDB("config")["transactions"].find().toArray();
- assert.eq(txnTableEntries.length, 1);
- return txnTableEntries[0];
- }
-
- const replTest = new ReplSetTest({
- nodes: 3,
- nodeOptions: {
- setParameter: {
- maxNumberOfTransactionOperationsInSingleOplogEntry: 1,
- bgSyncOplogFetcherBatchSize: 1
- }
- },
- });
-
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- // Disable primary catchup and chaining.
- config.settings = {catchUpTimeoutMillis: 0, chainingAllowed: false};
- replTest.initiate(config);
-
- setLogVerbosity(replTest.nodes, {"replication": {"verbosity": 3}});
-
- const dbName = jsTest.name();
- const collName = "coll";
-
- const primary = replTest.nodes[0];
- const testDB = primary.getDB(dbName);
- const newPrimary = replTest.nodes[1];
- const newTestDB = newPrimary.getDB(dbName);
-
- testDB.dropDatabase();
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- // Prevent the priority: 0 node from fetching new ops so that it can vote for the new primary.
- assert.commandWorked(
- replTest.nodes[2].adminCommand({configureFailPoint: 'stopReplProducer', mode: 'alwaysOn'}));
-
- jsTest.log("Stop secondary oplog replication before the last operation in the transaction.");
- // The stopReplProducerOnDocument failpoint ensures that secondary stops replicating before
- // applying the last operation in the transaction. This depends on the oplog fetcher batch size
- // being 1.
- assert.commandWorked(newPrimary.adminCommand({
- configureFailPoint: "stopReplProducerOnDocument",
- mode: "alwaysOn",
- data: {document: {"applyOps.o._id": "last in txn"}}
- }));
-
- jsTestLog("Starting transaction");
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- session.startTransaction({writeConcern: {w: "majority", wtimeout: 500}});
-
- const doc = {_id: "first in txn on primary " + primary};
- assert.commandWorked(sessionDB.getCollection(collName).insert(doc));
- assert.commandWorked(sessionDB.getCollection(collName).insert({_id: "last in txn"}));
-
- jsTestLog("Committing transaction but fail on replication");
- let res = session.commitTransaction_forTesting();
- assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
-
- // Remember the commit OpTime on primary.
- let txnTableEntry = getTxnTableEntry(testDB);
- assert.eq(txnTableEntry.state, "committed");
- const commitOpTime = getTxnTableEntry(testDB).lastWriteOpTime;
-
- jsTestLog("Wait for the new primary to block on fail point.");
- checkLog.contains(newPrimary, "stopReplProducerOnDocument fail point is enabled.");
-
- // Now the transaction should be in-progress on newPrimary.
- txnTableEntry = getTxnTableEntry(newTestDB);
- assert.eq(txnTableEntry.state, "inProgress");
- // The startOpTime should be less than the commit optime.
- assert.eq(rs.compareOpTimes(txnTableEntry.startOpTime, commitOpTime), -1);
-
- jsTestLog("Stepping down primary via heartbeat.");
- assert.commandWorked(newPrimary.adminCommand({replSetStepUp: 1}));
- replTest.awaitNodesAgreeOnPrimary();
- reconnect(primary);
-
- // Make sure we won't apply the whole transaction by any chance.
- jsTestLog("Wait for the new primary to stop replication after primary catch-up.");
- checkLog.contains(newPrimary, "Stopping replication producer");
-
- jsTestLog("Enable replication on the new primary so that it can finish state transition");
- assert.commandWorked(newPrimary.adminCommand({
- configureFailPoint: "stopReplProducerOnDocument",
- mode: "off",
- }));
-
- assert.eq(replTest.getPrimary(), newPrimary);
- assert.commandWorked(
- replTest.nodes[2].adminCommand({configureFailPoint: 'stopReplProducer', mode: 'off'}));
- replTest.awaitReplication();
-
- jsTestLog("The transaction has been aborted on the new primary.");
- // Create a proxy session to reuse the session state of the old primary.
- const newSession = new _DelegatingDriverSession(newPrimary, session);
- const newSessionDB = newSession.getDatabase(dbName);
- // The transaction has been aborted.
- assert.commandFailedWithCode(newSessionDB.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(newSession.getTxnNumber_forTesting()),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.NoSuchTransaction);
-
- // The old primary rolls back the local committed transaction.
- assert.eq(testDB.getCollection(collName).find().itcount(), 0);
- assert.eq(newTestDB.getCollection(collName).find().itcount(), 0);
-
- // The transaction table should be the same on both old and new primaries.
- txnTableEntry = getTxnTableEntry(newTestDB);
- assert.eq(txnTableEntry.state, "aborted");
- assert(!txnTableEntry.hasOwnProperty("startOpTime"));
- txnTableEntry = getTxnTableEntry(testDB);
- assert.eq(txnTableEntry.state, "aborted");
- assert(!txnTableEntry.hasOwnProperty("startOpTime"));
-
- jsTestLog("Running another transaction on the new primary");
- newSession.startTransaction({writeConcern: {w: 3}});
- const secondDoc = {_id: "second-doc"};
- assert.commandWorked(newSession.getDatabase(dbName).getCollection(collName).insert(secondDoc));
- assert.commandWorked(newSession.commitTransaction_forTesting());
- assert.docEq(testDB.getCollection(collName).find().toArray(), [secondDoc]);
- assert.docEq(newTestDB.getCollection(collName).find().toArray(), [secondDoc]);
-
- replTest.stopSet();
+"use strict";
+load("jstests/replsets/rslib.js"); // For reconnect()
+load("jstests/libs/check_log.js");
+
+function getTxnTableEntry(db) {
+ let txnTableEntries = db.getSiblingDB("config")["transactions"].find().toArray();
+ assert.eq(txnTableEntries.length, 1);
+ return txnTableEntries[0];
+}
+
+const replTest = new ReplSetTest({
+ nodes: 3,
+ nodeOptions: {
+ setParameter:
+ {maxNumberOfTransactionOperationsInSingleOplogEntry: 1, bgSyncOplogFetcherBatchSize: 1}
+ },
+});
+
+replTest.startSet();
+let config = replTest.getReplSetConfig();
+config.members[2].priority = 0;
+// Disable primary catchup and chaining.
+config.settings = {
+ catchUpTimeoutMillis: 0,
+ chainingAllowed: false
+};
+replTest.initiate(config);
+
+setLogVerbosity(replTest.nodes, {"replication": {"verbosity": 3}});
+
+const dbName = jsTest.name();
+const collName = "coll";
+
+const primary = replTest.nodes[0];
+const testDB = primary.getDB(dbName);
+const newPrimary = replTest.nodes[1];
+const newTestDB = newPrimary.getDB(dbName);
+
+testDB.dropDatabase();
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+// Prevent the priority: 0 node from fetching new ops so that it can vote for the new primary.
+assert.commandWorked(
+ replTest.nodes[2].adminCommand({configureFailPoint: 'stopReplProducer', mode: 'alwaysOn'}));
+
+jsTest.log("Stop secondary oplog replication before the last operation in the transaction.");
+// The stopReplProducerOnDocument failpoint ensures that secondary stops replicating before
+// applying the last operation in the transaction. This depends on the oplog fetcher batch size
+// being 1.
+assert.commandWorked(newPrimary.adminCommand({
+ configureFailPoint: "stopReplProducerOnDocument",
+ mode: "alwaysOn",
+ data: {document: {"applyOps.o._id": "last in txn"}}
+}));
+
+jsTestLog("Starting transaction");
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+session.startTransaction({writeConcern: {w: "majority", wtimeout: 500}});
+
+const doc = {
+ _id: "first in txn on primary " + primary
+};
+assert.commandWorked(sessionDB.getCollection(collName).insert(doc));
+assert.commandWorked(sessionDB.getCollection(collName).insert({_id: "last in txn"}));
+
+jsTestLog("Committing transaction but fail on replication");
+let res = session.commitTransaction_forTesting();
+assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
+
+// Remember the commit OpTime on primary.
+let txnTableEntry = getTxnTableEntry(testDB);
+assert.eq(txnTableEntry.state, "committed");
+const commitOpTime = getTxnTableEntry(testDB).lastWriteOpTime;
+
+jsTestLog("Wait for the new primary to block on fail point.");
+checkLog.contains(newPrimary, "stopReplProducerOnDocument fail point is enabled.");
+
+// Now the transaction should be in-progress on newPrimary.
+txnTableEntry = getTxnTableEntry(newTestDB);
+assert.eq(txnTableEntry.state, "inProgress");
+// The startOpTime should be less than the commit optime.
+assert.eq(rs.compareOpTimes(txnTableEntry.startOpTime, commitOpTime), -1);
+
+jsTestLog("Stepping down primary via heartbeat.");
+assert.commandWorked(newPrimary.adminCommand({replSetStepUp: 1}));
+replTest.awaitNodesAgreeOnPrimary();
+reconnect(primary);
+
+// Make sure we won't apply the whole transaction by any chance.
+jsTestLog("Wait for the new primary to stop replication after primary catch-up.");
+checkLog.contains(newPrimary, "Stopping replication producer");
+
+jsTestLog("Enable replication on the new primary so that it can finish state transition");
+assert.commandWorked(newPrimary.adminCommand({
+ configureFailPoint: "stopReplProducerOnDocument",
+ mode: "off",
+}));
+
+assert.eq(replTest.getPrimary(), newPrimary);
+assert.commandWorked(
+ replTest.nodes[2].adminCommand({configureFailPoint: 'stopReplProducer', mode: 'off'}));
+replTest.awaitReplication();
+
+jsTestLog("The transaction has been aborted on the new primary.");
+// Create a proxy session to reuse the session state of the old primary.
+const newSession = new _DelegatingDriverSession(newPrimary, session);
+const newSessionDB = newSession.getDatabase(dbName);
+// The transaction has been aborted.
+assert.commandFailedWithCode(newSessionDB.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(newSession.getTxnNumber_forTesting()),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}),
+ ErrorCodes.NoSuchTransaction);
+
+// The old primary rolls back the local committed transaction.
+assert.eq(testDB.getCollection(collName).find().itcount(), 0);
+assert.eq(newTestDB.getCollection(collName).find().itcount(), 0);
+
+// The transaction table should be the same on both old and new primaries.
+txnTableEntry = getTxnTableEntry(newTestDB);
+assert.eq(txnTableEntry.state, "aborted");
+assert(!txnTableEntry.hasOwnProperty("startOpTime"));
+txnTableEntry = getTxnTableEntry(testDB);
+assert.eq(txnTableEntry.state, "aborted");
+assert(!txnTableEntry.hasOwnProperty("startOpTime"));
+
+jsTestLog("Running another transaction on the new primary");
+newSession.startTransaction({writeConcern: {w: 3}});
+const secondDoc = {
+ _id: "second-doc"
+};
+assert.commandWorked(newSession.getDatabase(dbName).getCollection(collName).insert(secondDoc));
+assert.commandWorked(newSession.commitTransaction_forTesting());
+assert.docEq(testDB.getCollection(collName).find().toArray(), [secondDoc]);
+assert.docEq(newTestDB.getCollection(collName).find().toArray(), [secondDoc]);
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/agg_write_concern_zero_batch_size.js b/jstests/replsets/agg_write_concern_zero_batch_size.js
index c5b64999166..7e9d91c70c7 100644
--- a/jstests/replsets/agg_write_concern_zero_batch_size.js
+++ b/jstests/replsets/agg_write_concern_zero_batch_size.js
@@ -1,73 +1,73 @@
// Tests that an aggregate sent with batchSize: 0 will still obey the write concern sent on the
// original request, even though the writes happen in the getMore.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachKindOfWriteStage.
- load("jstests/libs/write_concern_util.js"); // For [stop|restart]ServerReplication.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachKindOfWriteStage.
+load("jstests/libs/write_concern_util.js"); // For [stop|restart]ServerReplication.
- // Start a replica set with two nodes: one with the default configuration and one with priority
- // zero to ensure we don't have any elections.
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
+// Start a replica set with two nodes: one with the default configuration and one with priority
+// zero to ensure we don't have any elections.
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
- const testDB = rst.getPrimary().getDB("test");
- const source = testDB.agg_write_concern_zero_batch_size;
- const target = testDB.agg_write_concern_zero_batch_size_target;
- assert.commandWorked(source.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
+const testDB = rst.getPrimary().getDB("test");
+const source = testDB.agg_write_concern_zero_batch_size;
+const target = testDB.agg_write_concern_zero_batch_size_target;
+assert.commandWorked(source.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
- withEachKindOfWriteStage(target, (stageSpec) => {
- assert.commandWorked(target.remove({}));
+withEachKindOfWriteStage(target, (stageSpec) => {
+ assert.commandWorked(target.remove({}));
- // Start an aggregate cursor with a writing stage, but use batchSize: 0 to prevent any
- // writes from happening in this command.
- const response = assert.commandWorked(testDB.runCommand({
- aggregate: source.getName(),
- pipeline: [stageSpec],
- writeConcern: {w: 2, wtimeout: 100},
- cursor: {batchSize: 0}
- }));
- assert.neq(response.cursor.id, 0);
+ // Start an aggregate cursor with a writing stage, but use batchSize: 0 to prevent any
+ // writes from happening in this command.
+ const response = assert.commandWorked(testDB.runCommand({
+ aggregate: source.getName(),
+ pipeline: [stageSpec],
+ writeConcern: {w: 2, wtimeout: 100},
+ cursor: {batchSize: 0}
+ }));
+ assert.neq(response.cursor.id, 0);
- stopServerReplication(rst.getSecondary());
+ stopServerReplication(rst.getSecondary());
- const getMoreResponse = assert.commandFailedWithCode(
- testDB.runCommand({getMore: response.cursor.id, collection: source.getName()}),
- ErrorCodes.WriteConcernFailed);
+ const getMoreResponse = assert.commandFailedWithCode(
+ testDB.runCommand({getMore: response.cursor.id, collection: source.getName()}),
+ ErrorCodes.WriteConcernFailed);
- // Test the same thing but using the shell helpers.
- let error = assert.throws(
- () => source
- .aggregate([stageSpec],
- {cursor: {batchSize: 0}, writeConcern: {w: 2, wtimeout: 100}})
- .itcount());
- // Unfortunately this is the best way we have to check that the cause of the failure was due
- // to write concern. The aggregate shell helper will assert the command worked. When this
- // fails (as we expect due to write concern) it will create a new error object which loses
- // all structure and just preserves the information as text.
- assert(error instanceof Error);
- assert(tojson(error).indexOf("writeConcernError") != -1, tojson(error));
+ // Test the same thing but using the shell helpers.
+ let error = assert.throws(
+ () => source
+ .aggregate([stageSpec],
+ {cursor: {batchSize: 0}, writeConcern: {w: 2, wtimeout: 100}})
+ .itcount());
+ // Unfortunately this is the best way we have to check that the cause of the failure was due
+ // to write concern. The aggregate shell helper will assert the command worked. When this
+ // fails (as we expect due to write concern) it will create a new error object which loses
+ // all structure and just preserves the information as text.
+ assert(error instanceof Error);
+ assert(tojson(error).indexOf("writeConcernError") != -1, tojson(error));
- // Now test without batchSize just to be sure.
- error = assert.throws(
- () => source.aggregate([stageSpec], {writeConcern: {w: 2, wtimeout: 100}}));
- assert(error instanceof Error);
- assert(tojson(error).indexOf("writeConcernError") != -1, tojson(error));
+ // Now test without batchSize just to be sure.
+ error =
+ assert.throws(() => source.aggregate([stageSpec], {writeConcern: {w: 2, wtimeout: 100}}));
+ assert(error instanceof Error);
+ assert(tojson(error).indexOf("writeConcernError") != -1, tojson(error));
- // Now switch to legacy OP_GET_MORE read mode. We should get a different error indicating
- // that using writeConcern in this way is unsupported.
- source.getDB().getMongo().forceReadMode("legacy");
- error = assert.throws(
- () => source
- .aggregate([stageSpec],
- {cursor: {batchSize: 0}, writeConcern: {w: 2, wtimeout: 100}})
- .itcount());
- assert.eq(error.code, 31124);
- source.getDB().getMongo().forceReadMode("commands");
+ // Now switch to legacy OP_GET_MORE read mode. We should get a different error indicating
+ // that using writeConcern in this way is unsupported.
+ source.getDB().getMongo().forceReadMode("legacy");
+ error = assert.throws(
+ () => source
+ .aggregate([stageSpec],
+ {cursor: {batchSize: 0}, writeConcern: {w: 2, wtimeout: 100}})
+ .itcount());
+ assert.eq(error.code, 31124);
+ source.getDB().getMongo().forceReadMode("commands");
- restartServerReplication(rst.getSecondary());
- });
+ restartServerReplication(rst.getSecondary());
+});
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/aggregation_write_concern.js b/jstests/replsets/aggregation_write_concern.js
index 8622fc0f113..807da4bf244 100644
--- a/jstests/replsets/aggregation_write_concern.js
+++ b/jstests/replsets/aggregation_write_concern.js
@@ -3,47 +3,47 @@
* not wait for the writeConcern specified to be satisfied.
*/
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries,
- // restartReplicationOnSecondaries
- const name = "aggregation_write_concern";
-
- const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
-
- replTest.startSet();
- replTest.initiate();
-
- const testDB = replTest.getPrimary().getDB(name);
- const collectionName = "test";
-
- // Stop replication and perform a w: 1 write. This will block subsequent 'writeConcern:
- // majority' reads if the read command waits on writeConcern.
-
- stopReplicationOnSecondaries(replTest);
- assert.commandWorked(
- testDB.runCommand({insert: collectionName, documents: [{_id: 1}], writeConcern: {w: 1}}));
-
- // A read-only aggregation accepts the writeConcern option but does not wait for it.
- let res = assert.commandWorked(testDB.runCommand({
- aggregate: collectionName,
- pipeline: [{$match: {_id: 1}}],
- cursor: {},
- writeConcern: {w: "majority"}
- }));
- assert(res.cursor.firstBatch.length);
- assert.eq(res.cursor.firstBatch[0], {_id: 1});
-
- // An aggregation pipeline that writes will block on writeConcern.
- assert.commandFailedWithCode(testDB.runCommand({
- aggregate: collectionName,
- pipeline: [{$match: {_id: 1}}, {$out: collectionName + "_out"}],
- cursor: {},
- writeConcern: {w: "majority", wtimeout: 1000}
- }),
- ErrorCodes.WriteConcernFailed);
-
- restartReplicationOnSecondaries(replTest);
- replTest.awaitLastOpCommitted();
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries,
+ // restartReplicationOnSecondaries
+const name = "aggregation_write_concern";
+
+const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+
+replTest.startSet();
+replTest.initiate();
+
+const testDB = replTest.getPrimary().getDB(name);
+const collectionName = "test";
+
+// Stop replication and perform a w: 1 write. This will block subsequent 'writeConcern:
+// majority' reads if the read command waits on writeConcern.
+
+stopReplicationOnSecondaries(replTest);
+assert.commandWorked(
+ testDB.runCommand({insert: collectionName, documents: [{_id: 1}], writeConcern: {w: 1}}));
+
+// A read-only aggregation accepts the writeConcern option but does not wait for it.
+let res = assert.commandWorked(testDB.runCommand({
+ aggregate: collectionName,
+ pipeline: [{$match: {_id: 1}}],
+ cursor: {},
+ writeConcern: {w: "majority"}
+}));
+assert(res.cursor.firstBatch.length);
+assert.eq(res.cursor.firstBatch[0], {_id: 1});
+
+// An aggregation pipeline that writes will block on writeConcern.
+assert.commandFailedWithCode(testDB.runCommand({
+ aggregate: collectionName,
+ pipeline: [{$match: {_id: 1}}, {$out: collectionName + "_out"}],
+ cursor: {},
+ writeConcern: {w: "majority", wtimeout: 1000}
+}),
+ ErrorCodes.WriteConcernFailed);
+
+restartReplicationOnSecondaries(replTest);
+replTest.awaitLastOpCommitted();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/already_checked_out_session.js b/jstests/replsets/already_checked_out_session.js
index 3a4ae11840e..ac5a76fbfe9 100644
--- a/jstests/replsets/already_checked_out_session.js
+++ b/jstests/replsets/already_checked_out_session.js
@@ -5,85 +5,81 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/parallelTester.js");
+load("jstests/libs/parallelTester.js");
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const db = primary.getDB("test");
+const primary = rst.getPrimary();
+const db = primary.getDB("test");
- function doInsertWithSession(host, lsid, txnNumber) {
- try {
- const conn = new Mongo(host);
- const db = conn.getDB("test");
- assert.commandWorked(db.runCommand({
- insert: "mycoll",
- documents: [{_id: txnNumber}],
- lsid: {id: eval(lsid)},
- txnNumber: NumberLong(txnNumber),
- }));
- return {ok: 1};
- } catch (e) {
- print("doInsertWithSession failed with " + e.toString());
- return {ok: 0, error: e.toString(), stack: e.stack};
- }
+function doInsertWithSession(host, lsid, txnNumber) {
+ try {
+ const conn = new Mongo(host);
+ const db = conn.getDB("test");
+ assert.commandWorked(db.runCommand({
+ insert: "mycoll",
+ documents: [{_id: txnNumber}],
+ lsid: {id: eval(lsid)},
+ txnNumber: NumberLong(txnNumber),
+ }));
+ return {ok: 1};
+ } catch (e) {
+ print("doInsertWithSession failed with " + e.toString());
+ return {ok: 0, error: e.toString(), stack: e.stack};
}
+}
- let thread1;
- let thread2;
+let thread1;
+let thread2;
- // We fsyncLock the server so that a transaction operation will block waiting for a lock.
- assert.commandWorked(db.fsyncLock());
- try {
- // JavaScript objects backed by C++ objects (e.g. BSON values) do not serialize correctly
- // when passed through the ScopedThread constructor. To work around this behavior, we
- // instead pass a stringified form of the JavaScript object through the ScopedThread
- // constructor and use eval() to rehydrate it.
- const lsid = UUID();
- thread1 = new ScopedThread(doInsertWithSession, primary.host, tojson(lsid), 1);
- thread1.start();
+// We fsyncLock the server so that a transaction operation will block waiting for a lock.
+assert.commandWorked(db.fsyncLock());
+try {
+ // JavaScript objects backed by C++ objects (e.g. BSON values) do not serialize correctly
+ // when passed through the ScopedThread constructor. To work around this behavior, we
+ // instead pass a stringified form of the JavaScript object through the ScopedThread
+ // constructor and use eval() to rehydrate it.
+ const lsid = UUID();
+ thread1 = new ScopedThread(doInsertWithSession, primary.host, tojson(lsid), 1);
+ thread1.start();
- assert.soon(
- () => {
- const ops = db.currentOp({
- "command.insert": "mycoll",
- "command.txnNumber": {$eq: 1},
- waitingForLock: true
- });
- return ops.inprog.length === 1;
- },
- () => {
- return "insert operation with txnNumber 1 was not found: " + tojson(db.currentOp());
- });
+ assert.soon(
+ () => {
+ const ops = db.currentOp(
+ {"command.insert": "mycoll", "command.txnNumber": {$eq: 1}, waitingForLock: true});
+ return ops.inprog.length === 1;
+ },
+ () => {
+ return "insert operation with txnNumber 1 was not found: " + tojson(db.currentOp());
+ });
- thread2 = new ScopedThread(doInsertWithSession, primary.host, tojson(lsid), 2);
- thread2.start();
+ thread2 = new ScopedThread(doInsertWithSession, primary.host, tojson(lsid), 2);
+ thread2.start();
- // Run currentOp() again to ensure that thread2 has started its insert command.
- assert.soon(
- () => {
- const ops =
- db.currentOp({"command.insert": "mycoll", "command.txnNumber": {$eq: 2}});
- return ops.inprog.length === 1;
- },
- () => {
- return "insert operation with txnNumber 2 was not found: " + tojson(db.currentOp());
- });
- } finally {
- // We run the fsyncUnlock command in a finally block to avoid leaving the server fsyncLock'd
- // if the test were to fail.
- assert.commandWorked(db.fsyncUnlock());
- }
+ // Run currentOp() again to ensure that thread2 has started its insert command.
+ assert.soon(
+ () => {
+ const ops = db.currentOp({"command.insert": "mycoll", "command.txnNumber": {$eq: 2}});
+ return ops.inprog.length === 1;
+ },
+ () => {
+ return "insert operation with txnNumber 2 was not found: " + tojson(db.currentOp());
+ });
+} finally {
+ // We run the fsyncUnlock command in a finally block to avoid leaving the server fsyncLock'd
+ // if the test were to fail.
+ assert.commandWorked(db.fsyncUnlock());
+}
- thread1.join();
- thread2.join();
+thread1.join();
+thread2.join();
- assert.commandWorked(thread1.returnData());
- assert.commandWorked(thread2.returnData());
+assert.commandWorked(thread1.returnData());
+assert.commandWorked(thread2.returnData());
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/apply_batch_only_goes_forward.js b/jstests/replsets/apply_batch_only_goes_forward.js
index 2f8dc8d1ebc..a82ca0880cb 100644
--- a/jstests/replsets/apply_batch_only_goes_forward.js
+++ b/jstests/replsets/apply_batch_only_goes_forward.js
@@ -19,88 +19,90 @@
TestData.skipCheckDBHashes = true;
(function() {
- "use strict";
+"use strict";
- function tsToDate(ts) {
- return new Date(ts.getTime() * 1000);
- }
+function tsToDate(ts) {
+ return new Date(ts.getTime() * 1000);
+}
- var replTest =
- new ReplSetTest({name: "apply_batch_only_goes_forward", nodes: [{}, {}, {arbiter: true}]});
+var replTest =
+ new ReplSetTest({name: "apply_batch_only_goes_forward", nodes: [{}, {}, {arbiter: true}]});
- var nodes = replTest.startSet();
- replTest.initiate();
- var master = replTest.getPrimary();
- var mTest = master.getDB("test");
- var mLocal = master.getDB("local");
- var mMinvalid = mLocal["replset.minvalid"];
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+var mTest = master.getDB("test");
+var mLocal = master.getDB("local");
+var mMinvalid = mLocal["replset.minvalid"];
- var slave = replTest.getSecondary();
- var sTest = slave.getDB("test");
- var sLocal = slave.getDB("local");
- var sMinvalid = sLocal["replset.minvalid"];
- var stepDownSecs = 30;
- var stepDownCmd = {replSetStepDown: stepDownSecs, force: true};
+var slave = replTest.getSecondary();
+var sTest = slave.getDB("test");
+var sLocal = slave.getDB("local");
+var sMinvalid = sLocal["replset.minvalid"];
+var stepDownSecs = 30;
+var stepDownCmd = {replSetStepDown: stepDownSecs, force: true};
- // Write op
- assert.writeOK(mTest.foo.save(
- {}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- replTest.waitForState(slave, ReplSetTest.State.SECONDARY);
- assert.writeOK(mTest.foo.save(
- {}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+// Write op
+assert.writeOK(
+ mTest.foo.save({}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+replTest.waitForState(slave, ReplSetTest.State.SECONDARY);
+assert.writeOK(
+ mTest.foo.save({}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- // Set minvalid to something far in the future for the current primary, to simulate recovery.
- // Note: This is so far in the future (5 days) that it will never become secondary.
- var farFutureTS = new Timestamp(
- Math.floor(new Date().getTime() / 1000) + (60 * 60 * 24 * 5 /* in five days*/), 0);
+// Set minvalid to something far in the future for the current primary, to simulate recovery.
+// Note: This is so far in the future (5 days) that it will never become secondary.
+var farFutureTS = new Timestamp(
+ Math.floor(new Date().getTime() / 1000) + (60 * 60 * 24 * 5 /* in five days*/), 0);
- jsTest.log("future TS: " + tojson(farFutureTS) + ", date:" + tsToDate(farFutureTS));
- // We do an update in case there is a minvalid document on the primary already.
- // If the doc doesn't exist then upsert:true will create it, and the writeConcern ensures
- // that update returns details of the write, like whether an update or insert was performed.
- const minValidUpdate = {$set: {ts: farFutureTS}};
- jsTestLog("Current minvalid is " + tojson(mMinvalid.findOne()));
- jsTestLog("Updating minValid to: " + tojson(minValidUpdate));
- printjson(assert.writeOK(mMinvalid.update(
- {},
- minValidUpdate,
- {upsert: true, writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}})));
+jsTest.log("future TS: " + tojson(farFutureTS) + ", date:" + tsToDate(farFutureTS));
+// We do an update in case there is a minvalid document on the primary already.
+// If the doc doesn't exist then upsert:true will create it, and the writeConcern ensures
+// that update returns details of the write, like whether an update or insert was performed.
+const minValidUpdate = {
+ $set: {ts: farFutureTS}
+};
+jsTestLog("Current minvalid is " + tojson(mMinvalid.findOne()));
+jsTestLog("Updating minValid to: " + tojson(minValidUpdate));
+printjson(assert.writeOK(mMinvalid.update(
+ {},
+ minValidUpdate,
+ {upsert: true, writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}})));
- jsTest.log('Restarting primary ' + master.host +
- ' with updated minValid. This node will go into RECOVERING upon restart. ' +
- 'Secondary ' + slave.host + ' will become new primary.');
- clearRawMongoProgramOutput();
- replTest.restart(master);
- printjson(sLocal.adminCommand("isMaster"));
- replTest.waitForState(master, ReplSetTest.State.RECOVERING);
+jsTest.log('Restarting primary ' + master.host +
+ ' with updated minValid. This node will go into RECOVERING upon restart. ' +
+ 'Secondary ' + slave.host + ' will become new primary.');
+clearRawMongoProgramOutput();
+replTest.restart(master);
+printjson(sLocal.adminCommand("isMaster"));
+replTest.waitForState(master, ReplSetTest.State.RECOVERING);
- replTest.awaitNodesAgreeOnPrimary();
- // Slave is now master... Do a write to advance the optime on the primary so that it will be
- // considered as a sync source - this is more relevant to PV0 because we do not write a new
- // entry to the oplog on becoming primary.
- assert.writeOK(replTest.getPrimary().getDB("test").foo.save(
- {}, {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+replTest.awaitNodesAgreeOnPrimary();
+// Slave is now master... Do a write to advance the optime on the primary so that it will be
+// considered as a sync source - this is more relevant to PV0 because we do not write a new
+// entry to the oplog on becoming primary.
+assert.writeOK(replTest.getPrimary().getDB("test").foo.save(
+ {}, {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- // Sync source selection will log this message if it does not detect min valid in the sync
- // source candidate's oplog.
- assert.soon(function() {
- return rawMongoProgramOutput().match(
- 'it does not contain the necessary operations for us to reach a consistent state');
- });
+// Sync source selection will log this message if it does not detect min valid in the sync
+// source candidate's oplog.
+assert.soon(function() {
+ return rawMongoProgramOutput().match(
+ 'it does not contain the necessary operations for us to reach a consistent state');
+});
- assert.soon(function() {
- var mv;
- try {
- mv = mMinvalid.findOne();
- } catch (e) {
- return false;
- }
- var msg = "ts !=, " + farFutureTS + "(" + tsToDate(farFutureTS) + "), mv:" + tojson(mv) +
- " - " + tsToDate(mv.ts);
- assert.eq(farFutureTS, mv.ts, msg);
- return true;
- });
+assert.soon(function() {
+ var mv;
+ try {
+ mv = mMinvalid.findOne();
+ } catch (e) {
+ return false;
+ }
+ var msg = "ts !=, " + farFutureTS + "(" + tsToDate(farFutureTS) + "), mv:" + tojson(mv) +
+ " - " + tsToDate(mv.ts);
+ assert.eq(farFutureTS, mv.ts, msg);
+ return true;
+});
- // Shut down the set and finish the test.
- replTest.stopSet();
+// Shut down the set and finish the test.
+replTest.stopSet();
})();
diff --git a/jstests/replsets/apply_batches_totalMillis.js b/jstests/replsets/apply_batches_totalMillis.js
index 9e093211cb6..fd8b2872065 100644
--- a/jstests/replsets/apply_batches_totalMillis.js
+++ b/jstests/replsets/apply_batches_totalMillis.js
@@ -5,59 +5,58 @@
*/
(function() {
- "use strict";
-
- // Gets the value of metrics.repl.apply.batches.totalMillis.
- function getTotalMillis(node) {
- return assert.commandWorked(node.adminCommand({serverStatus: 1}))
- .metrics.repl.apply.batches.totalMillis;
- }
-
- // Do a bulk insert of documents as: {{key: 0}, {key: 1}, {key: 2}, ... , {key: num-1}}
- function performBulkInsert(coll, key, num) {
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < num; i++) {
- let doc = {};
- doc[key] = i;
- bulk.insert(doc);
- }
- assert.writeOK(bulk.execute());
- rst.awaitReplication();
+"use strict";
+
+// Gets the value of metrics.repl.apply.batches.totalMillis.
+function getTotalMillis(node) {
+ return assert.commandWorked(node.adminCommand({serverStatus: 1}))
+ .metrics.repl.apply.batches.totalMillis;
+}
+
+// Do a bulk insert of documents as: {{key: 0}, {key: 1}, {key: 2}, ... , {key: num-1}}
+function performBulkInsert(coll, key, num) {
+ let bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < num; i++) {
+ let doc = {};
+ doc[key] = i;
+ bulk.insert(doc);
}
+ assert.writeOK(bulk.execute());
+ rst.awaitReplication();
+}
- let name = "apply_batches_totalMillis";
- let rst = new ReplSetTest({name: name, nodes: 2});
- rst.startSet();
- rst.initiate();
-
- let primary = rst.getPrimary();
- let secondary = rst.getSecondary();
- let coll = primary.getDB(name)["foo"];
+let name = "apply_batches_totalMillis";
+let rst = new ReplSetTest({name: name, nodes: 2});
+rst.startSet();
+rst.initiate();
- // Perform an initial write on the system and ensure steady state.
- assert.writeOK(coll.insert({init: 0}));
- rst.awaitReplication();
- let baseTime = getTotalMillis(secondary);
+let primary = rst.getPrimary();
+let secondary = rst.getSecondary();
+let coll = primary.getDB(name)["foo"];
- // Introduce a small load and wait for it to be replicated.
- performBulkInsert(coll, "small", 1000);
+// Perform an initial write on the system and ensure steady state.
+assert.writeOK(coll.insert({init: 0}));
+rst.awaitReplication();
+let baseTime = getTotalMillis(secondary);
- // Record the time spent applying the small load.
- let timeAfterSmall = getTotalMillis(secondary);
- let deltaSmall = timeAfterSmall - baseTime;
+// Introduce a small load and wait for it to be replicated.
+performBulkInsert(coll, "small", 1000);
- // Insert a significantly larger load.
- performBulkInsert(coll, "large", 20000);
+// Record the time spent applying the small load.
+let timeAfterSmall = getTotalMillis(secondary);
+let deltaSmall = timeAfterSmall - baseTime;
- // Record the time spent applying the large load.
- let timeAfterLarge = getTotalMillis(secondary);
- let deltaLarge = timeAfterLarge - timeAfterSmall;
+// Insert a significantly larger load.
+performBulkInsert(coll, "large", 20000);
- jsTestLog(`Recorded deltas: {small: ${deltaSmall}ms, large: ${deltaLarge}ms}.`);
+// Record the time spent applying the large load.
+let timeAfterLarge = getTotalMillis(secondary);
+let deltaLarge = timeAfterLarge - timeAfterSmall;
- // We should have recorded at least as much time on the second load as we did on the first.
- // This is a crude comparison that is only taken to check that the timer is used correctly.
- assert(deltaLarge >= deltaSmall, "Expected a higher net totalMillis for the larger load.");
- rst.stopSet();
+jsTestLog(`Recorded deltas: {small: ${deltaSmall}ms, large: ${deltaLarge}ms}.`);
+// We should have recorded at least as much time on the second load as we did on the first.
+// This is a crude comparison that is only taken to check that the timer is used correctly.
+assert(deltaLarge >= deltaSmall, "Expected a higher net totalMillis for the larger load.");
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/apply_ops_concurrent_non_atomic_different_db.js b/jstests/replsets/apply_ops_concurrent_non_atomic_different_db.js
index 05cb6f9e996..a5444cb51cb 100644
--- a/jstests/replsets/apply_ops_concurrent_non_atomic_different_db.js
+++ b/jstests/replsets/apply_ops_concurrent_non_atomic_different_db.js
@@ -1,11 +1,11 @@
(function() {
- 'use strict';
+'use strict';
- load('jstests/replsets/libs/apply_ops_concurrent_non_atomic.js');
+load('jstests/replsets/libs/apply_ops_concurrent_non_atomic.js');
- new ApplyOpsConcurrentNonAtomicTest({
- ns1: 'test1.coll1',
- ns2: 'test2.coll2',
- requiresDocumentLevelConcurrency: false,
- }).run();
+new ApplyOpsConcurrentNonAtomicTest({
+ ns1: 'test1.coll1',
+ ns2: 'test2.coll2',
+ requiresDocumentLevelConcurrency: false,
+}).run();
}());
diff --git a/jstests/replsets/apply_ops_concurrent_non_atomic_same_collection.js b/jstests/replsets/apply_ops_concurrent_non_atomic_same_collection.js
index 004eeaaa52f..d6346e95a3a 100644
--- a/jstests/replsets/apply_ops_concurrent_non_atomic_same_collection.js
+++ b/jstests/replsets/apply_ops_concurrent_non_atomic_same_collection.js
@@ -1,11 +1,11 @@
(function() {
- 'use strict';
+'use strict';
- load('jstests/replsets/libs/apply_ops_concurrent_non_atomic.js');
+load('jstests/replsets/libs/apply_ops_concurrent_non_atomic.js');
- new ApplyOpsConcurrentNonAtomicTest({
- ns1: 'test.coll',
- ns2: 'test.coll',
- requiresDocumentLevelConcurrency: true,
- }).run();
+new ApplyOpsConcurrentNonAtomicTest({
+ ns1: 'test.coll',
+ ns2: 'test.coll',
+ requiresDocumentLevelConcurrency: true,
+}).run();
}());
diff --git a/jstests/replsets/apply_ops_concurrent_non_atomic_same_db.js b/jstests/replsets/apply_ops_concurrent_non_atomic_same_db.js
index 10f874382a5..5553aa1341a 100644
--- a/jstests/replsets/apply_ops_concurrent_non_atomic_same_db.js
+++ b/jstests/replsets/apply_ops_concurrent_non_atomic_same_db.js
@@ -1,11 +1,11 @@
(function() {
- 'use strict';
+'use strict';
- load('jstests/replsets/libs/apply_ops_concurrent_non_atomic.js');
+load('jstests/replsets/libs/apply_ops_concurrent_non_atomic.js');
- new ApplyOpsConcurrentNonAtomicTest({
- ns1: 'test.coll1',
- ns2: 'test.coll2',
- requiresDocumentLevelConcurrency: false,
- }).run();
+new ApplyOpsConcurrentNonAtomicTest({
+ ns1: 'test.coll1',
+ ns2: 'test.coll2',
+ requiresDocumentLevelConcurrency: false,
+}).run();
}());
diff --git a/jstests/replsets/apply_ops_create_indexes.js b/jstests/replsets/apply_ops_create_indexes.js
index ea97ce5fd9d..f2244efcba9 100644
--- a/jstests/replsets/apply_ops_create_indexes.js
+++ b/jstests/replsets/apply_ops_create_indexes.js
@@ -3,121 +3,114 @@
* correctly (see SERVER-31435).
*/
(function() {
- "use strict";
- let ensureIndexExists = function(testDB, collName, indexName, expectedNumIndexes) {
- let cmd = {listIndexes: collName};
- let res = testDB.runCommand(cmd);
- assert.commandWorked(res, "could not run " + tojson(cmd));
- let indexes = new DBCommandCursor(testDB, res).toArray();
+"use strict";
+let ensureIndexExists = function(testDB, collName, indexName, expectedNumIndexes) {
+ let cmd = {listIndexes: collName};
+ let res = testDB.runCommand(cmd);
+ assert.commandWorked(res, "could not run " + tojson(cmd));
+ let indexes = new DBCommandCursor(testDB, res).toArray();
- assert.eq(indexes.length, expectedNumIndexes);
+ assert.eq(indexes.length, expectedNumIndexes);
- let foundIndex = false;
- for (let i = 0; i < indexes.length; ++i) {
- if (indexes[i].name == indexName) {
- foundIndex = true;
- }
+ let foundIndex = false;
+ for (let i = 0; i < indexes.length; ++i) {
+ if (indexes[i].name == indexName) {
+ foundIndex = true;
}
- assert(foundIndex,
- "did not find the index '" + indexName + "' amongst the collection indexes: " +
- tojson(indexes));
- };
+ }
+ assert(foundIndex,
+ "did not find the index '" + indexName +
+ "' amongst the collection indexes: " + tojson(indexes));
+};
- let ensureOplogEntryExists = function(localDB, indexName) {
- // Make sure the oplog entry for index creation exists in the oplog.
- let cmd = {find: "oplog.rs"};
- let res = localDB.runCommand(cmd);
- assert.commandWorked(res, "could not run " + tojson(cmd));
- let cursor = new DBCommandCursor(localDB, res);
- let errMsg =
- "expected more data from command " + tojson(cmd) + ", with result " + tojson(res);
- assert(cursor.hasNext(), errMsg);
- let oplog = localDB.getCollection("oplog.rs");
- let query = {$and: [{"o.createIndexes": {$exists: true}}, {"o.name": indexName}]};
- let resCursor = oplog.find(query);
- assert.eq(resCursor.count(),
- 1,
- "Expected the query " + tojson(query) + " to return exactly 1 document");
- };
+let ensureOplogEntryExists = function(localDB, indexName) {
+ // Make sure the oplog entry for index creation exists in the oplog.
+ let cmd = {find: "oplog.rs"};
+ let res = localDB.runCommand(cmd);
+ assert.commandWorked(res, "could not run " + tojson(cmd));
+ let cursor = new DBCommandCursor(localDB, res);
+ let errMsg = "expected more data from command " + tojson(cmd) + ", with result " + tojson(res);
+ assert(cursor.hasNext(), errMsg);
+ let oplog = localDB.getCollection("oplog.rs");
+ let query = {$and: [{"o.createIndexes": {$exists: true}}, {"o.name": indexName}]};
+ let resCursor = oplog.find(query);
+ assert.eq(resCursor.count(),
+ 1,
+ "Expected the query " + tojson(query) + " to return exactly 1 document");
+};
- let rst = new ReplSetTest({nodes: 3});
- rst.startSet();
- rst.initiate();
+let rst = new ReplSetTest({nodes: 3});
+rst.startSet();
+rst.initiate();
- let collName = "create_indexes_col";
- let dbName = "create_indexes_db";
+let collName = "create_indexes_col";
+let dbName = "create_indexes_db";
- let primaryTestDB = rst.getPrimary().getDB(dbName);
- let cmd = {"create": collName};
- let res = primaryTestDB.runCommand(cmd);
- assert.commandWorked(res, "could not run " + tojson(cmd));
- rst.awaitReplication();
+let primaryTestDB = rst.getPrimary().getDB(dbName);
+let cmd = {"create": collName};
+let res = primaryTestDB.runCommand(cmd);
+assert.commandWorked(res, "could not run " + tojson(cmd));
+rst.awaitReplication();
- // Create an index via the applyOps command with the createIndexes command format and make sure
- // it exists.
- let uuid = primaryTestDB.getCollectionInfos()[0].info.uuid;
- let cmdFormatIndexNameA = "a_1";
- cmd = {
- applyOps: [{
- op: "c",
- ns: dbName + "." + collName,
- ui: uuid,
- o: {createIndexes: collName, v: 2, key: {a: 1}, name: cmdFormatIndexNameA}
- }]
- };
- res = primaryTestDB.runCommand(cmd);
- assert.commandWorked(res, "could not run " + tojson(cmd));
- rst.awaitReplication();
- ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameA, 2);
+// Create an index via the applyOps command with the createIndexes command format and make sure
+// it exists.
+let uuid = primaryTestDB.getCollectionInfos()[0].info.uuid;
+let cmdFormatIndexNameA = "a_1";
+cmd = {
+ applyOps: [{
+ op: "c",
+ ns: dbName + "." + collName,
+ ui: uuid,
+ o: {createIndexes: collName, v: 2, key: {a: 1}, name: cmdFormatIndexNameA}
+ }]
+};
+res = primaryTestDB.runCommand(cmd);
+assert.commandWorked(res, "could not run " + tojson(cmd));
+rst.awaitReplication();
+ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameA, 2);
- // Same as directly above, but ensure that applyOps createIndexes can work without a uuid.
- let cmdFormatIndexNameB = "b_1";
- cmd = {
- applyOps: [{
- op: "c",
- ns: dbName + "." + collName,
- o: {createIndexes: collName, v: 2, key: {b: 1}, name: cmdFormatIndexNameB}
- }]
- };
- res = primaryTestDB.runCommand(cmd);
- assert.commandWorked(res, "could not run " + tojson(cmd));
- rst.awaitReplication();
- ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameB, 3);
+// Same as directly above, but ensure that applyOps createIndexes can work without a uuid.
+let cmdFormatIndexNameB = "b_1";
+cmd = {
+ applyOps: [{
+ op: "c",
+ ns: dbName + "." + collName,
+ o: {createIndexes: collName, v: 2, key: {b: 1}, name: cmdFormatIndexNameB}
+ }]
+};
+res = primaryTestDB.runCommand(cmd);
+assert.commandWorked(res, "could not run " + tojson(cmd));
+rst.awaitReplication();
+ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameB, 3);
- // Test with a background index.
- let cmdFormatIndexNameC = "c_1";
- cmd = {
- applyOps: [{
- op: "c",
- ns: dbName + "." + collName,
- ui: uuid,
- o: {
- createIndexes: collName,
- v: 2,
- key: {c: 1},
- name: cmdFormatIndexNameC,
- background: true
- }
- }]
- };
- assert.commandWorked(primaryTestDB.runCommand(cmd));
- rst.awaitReplication();
- ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameC, 4);
+// Test with a background index.
+let cmdFormatIndexNameC = "c_1";
+cmd = {
+ applyOps: [{
+ op: "c",
+ ns: dbName + "." + collName,
+ ui: uuid,
+ o: {createIndexes: collName, v: 2, key: {c: 1}, name: cmdFormatIndexNameC, background: true}
+ }]
+};
+assert.commandWorked(primaryTestDB.runCommand(cmd));
+rst.awaitReplication();
+ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameC, 4);
- let localDB = rst.getPrimary().getDB("local");
- ensureOplogEntryExists(localDB, cmdFormatIndexNameA);
- ensureOplogEntryExists(localDB, cmdFormatIndexNameB);
- ensureOplogEntryExists(localDB, cmdFormatIndexNameC);
+let localDB = rst.getPrimary().getDB("local");
+ensureOplogEntryExists(localDB, cmdFormatIndexNameA);
+ensureOplogEntryExists(localDB, cmdFormatIndexNameB);
+ensureOplogEntryExists(localDB, cmdFormatIndexNameC);
- // Make sure the indexes were replicated to the secondaries.
- rst.waitForAllIndexBuildsToFinish(dbName, collName);
- let secondaries = rst.getSecondaries();
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryTestDB = secondaries[j].getDB(dbName);
- ensureIndexExists(secondaryTestDB, collName, cmdFormatIndexNameA, 4);
- ensureIndexExists(secondaryTestDB, collName, cmdFormatIndexNameB, 4);
- ensureIndexExists(secondaryTestDB, collName, cmdFormatIndexNameC, 4);
- }
+// Make sure the indexes were replicated to the secondaries.
+rst.waitForAllIndexBuildsToFinish(dbName, collName);
+let secondaries = rst.getSecondaries();
+for (let j = 0; j < secondaries.length; j++) {
+ let secondaryTestDB = secondaries[j].getDB(dbName);
+ ensureIndexExists(secondaryTestDB, collName, cmdFormatIndexNameA, 4);
+ ensureIndexExists(secondaryTestDB, collName, cmdFormatIndexNameB, 4);
+ ensureIndexExists(secondaryTestDB, collName, cmdFormatIndexNameC, 4);
+}
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/apply_ops_create_view.js b/jstests/replsets/apply_ops_create_view.js
index 0aac71ee490..9535790f5a9 100644
--- a/jstests/replsets/apply_ops_create_view.js
+++ b/jstests/replsets/apply_ops_create_view.js
@@ -1,19 +1,21 @@
(function() {
- // Test applyOps behavior for view creation.
- "use strict";
+// Test applyOps behavior for view creation.
+"use strict";
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- const db = replTest.getPrimary().getDB('test');
+const db = replTest.getPrimary().getDB('test');
- assert.commandWorked(db.createCollection("bar"));
- assert.writeOK(db.bar.insert({a: 1, b: "hi"}));
+assert.commandWorked(db.createCollection("bar"));
+assert.writeOK(db.bar.insert({a: 1, b: "hi"}));
- const cmd = {applyOps: [{op: "c", ns: db + ".$cmd", o: {create: "foo", viewOn: "bar"}}]};
- assert.commandWorked(db.runCommand(cmd), tojson(cmd));
- assert.eq(db.foo.findOne({a: 1}).b, "hi");
+const cmd = {
+ applyOps: [{op: "c", ns: db + ".$cmd", o: {create: "foo", viewOn: "bar"}}]
+};
+assert.commandWorked(db.runCommand(cmd), tojson(cmd));
+assert.eq(db.foo.findOne({a: 1}).b, "hi");
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/apply_ops_create_with_uuid.js b/jstests/replsets/apply_ops_create_with_uuid.js
index d3f69fc2a71..258fd0b6525 100644
--- a/jstests/replsets/apply_ops_create_with_uuid.js
+++ b/jstests/replsets/apply_ops_create_with_uuid.js
@@ -1,51 +1,50 @@
(function() {
- // Test applyOps behavior for collection creation with explicit UUIDs.
- "use strict";
-
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
-
- const db = replTest.getPrimary().getDB('test');
-
- const uuid = UUID();
- // Two applyOps to create a foo collection with given uuid, one each for 'test' and 'test2' dbs.
- var ops = (uuid => ["test", "test2"].map(db => {
- return {op: "c", ns: db + ".$cmd", ui: uuid, o: {create: "foo"}};
- }))(uuid);
-
- function checkUUID(coll, uuid) {
- const cmd = {listCollections: 1, filter: {name: coll}};
- const res = assert.commandWorked(db.runCommand(cmd), tojson(cmd));
- assert.eq(res.cursor.firstBatch[0].info.uuid,
- uuid,
- tojson(cmd) + " did not return expected uuid: " + tojson(res));
- }
-
- jsTestLog("Create a test.foo collection with uuid " + uuid + " through applyOps.");
- let cmd = {applyOps: [ops[0]]};
- let res = assert.commandWorked(db.runCommand(cmd), tojson(cmd));
-
- // Check that test.foo has the expected UUID.
- jsTestLog("Check that test.foo has UUID " + uuid);
- checkUUID("foo", uuid);
-
- // Change the ops to refer to bar, instead of foo. Command should still work, renaming the
- // collection. Second command should fail as it tries to associate the "test2.foo" name with
- // an existing collection in the "test" database. This must fail.
- jsTestLog("Create test.bar and try to create test2.foo collections with the same UUID.");
- ops[0].o.create = "bar";
- res = assert.commandFailed(db.runCommand({applyOps: ops}));
- assert.eq(res.results,
- [true, false],
- "expected first operation " + tojson(ops[0]) + " to succeed, and second operation " +
- tojson(ops[1]) + " to fail, got " + tojson(res));
-
- jsTestLog("Check that test.bar has UUID " + uuid);
- checkUUID("bar", uuid);
- jsTestLog("Check that test.foo no longer exists");
- assert.eq(db.getCollectionInfos({name: "foo"}).length,
- 0,
- "expected foo collection to no longer exist");
- replTest.stopSet();
+// Test applyOps behavior for collection creation with explicit UUIDs.
+"use strict";
+
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
+
+const db = replTest.getPrimary().getDB('test');
+
+const uuid = UUID();
+// Two applyOps to create a foo collection with given uuid, one each for 'test' and 'test2' dbs.
+var ops = (uuid => ["test", "test2"].map(db => {
+ return {op: "c", ns: db + ".$cmd", ui: uuid, o: {create: "foo"}};
+}))(uuid);
+
+function checkUUID(coll, uuid) {
+ const cmd = {listCollections: 1, filter: {name: coll}};
+ const res = assert.commandWorked(db.runCommand(cmd), tojson(cmd));
+ assert.eq(res.cursor.firstBatch[0].info.uuid,
+ uuid,
+ tojson(cmd) + " did not return expected uuid: " + tojson(res));
+}
+
+jsTestLog("Create a test.foo collection with uuid " + uuid + " through applyOps.");
+let cmd = {applyOps: [ops[0]]};
+let res = assert.commandWorked(db.runCommand(cmd), tojson(cmd));
+
+// Check that test.foo has the expected UUID.
+jsTestLog("Check that test.foo has UUID " + uuid);
+checkUUID("foo", uuid);
+
+// Change the ops to refer to bar, instead of foo. Command should still work, renaming the
+// collection. Second command should fail as it tries to associate the "test2.foo" name with
+// an existing collection in the "test" database. This must fail.
+jsTestLog("Create test.bar and try to create test2.foo collections with the same UUID.");
+ops[0].o.create = "bar";
+res = assert.commandFailed(db.runCommand({applyOps: ops}));
+assert.eq(res.results,
+ [true, false],
+ "expected first operation " + tojson(ops[0]) + " to succeed, and second operation " +
+ tojson(ops[1]) + " to fail, got " + tojson(res));
+
+jsTestLog("Check that test.bar has UUID " + uuid);
+checkUUID("bar", uuid);
+jsTestLog("Check that test.foo no longer exists");
+assert.eq(
+ db.getCollectionInfos({name: "foo"}).length, 0, "expected foo collection to no longer exist");
+replTest.stopSet();
}());
diff --git a/jstests/replsets/apply_ops_idempotency.js b/jstests/replsets/apply_ops_idempotency.js
index 7173bbd0301..1a3167bdbde 100644
--- a/jstests/replsets/apply_ops_idempotency.js
+++ b/jstests/replsets/apply_ops_idempotency.js
@@ -1,199 +1,199 @@
(function() {
- 'use strict';
- const debug = 0;
-
- let rst = new ReplSetTest({name: "applyOpsIdempotency", nodes: 1});
- rst.startSet();
- rst.initiate();
-
- /**
- * Returns true if this database contains any drop-pending collections.
- */
- function containsDropPendingCollection(mydb) {
- const res =
- assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: true}));
- const collectionInfos = res.cursor.firstBatch;
- const collectionNames = collectionInfos.map(c => c.name);
- return Boolean(collectionNames.find(c => c.indexOf('system.drop.') == 0));
- }
-
- /**
- * Apply ops on mydb, asserting success.
- */
- function assertApplyOpsWorks(testdbs, ops) {
- // Remaining operations in ops must still be applied
- while (ops.length) {
- let cmd = {applyOps: ops};
- let res = testdbs[0].adminCommand(cmd);
- if (debug) {
- printjson({applyOps: ops, res});
- }
-
- // Wait for any drop-pending collections to be removed by the reaper before proceeding.
- assert.soon(function() {
- return !testdbs.find(mydb => containsDropPendingCollection(mydb));
- });
-
- // If the entire operation succeeded, we're done.
- if (res.ok == 1)
- return res;
-
- // Skip any operations that succeeded.
- while (res.applied-- && res.results.shift())
- ops.shift();
-
- // These errors are expected when replaying operations and should be ignored.
- if (res.code == ErrorCodes.NamespaceNotFound || res.code == ErrorCodes.DuplicateKey) {
- ops.shift();
- continue;
- }
-
- // Generate the appropriate error message.
- assert.commandWorked(res, tojson(cmd));
+'use strict';
+const debug = 0;
+
+let rst = new ReplSetTest({name: "applyOpsIdempotency", nodes: 1});
+rst.startSet();
+rst.initiate();
+
+/**
+ * Returns true if this database contains any drop-pending collections.
+ */
+function containsDropPendingCollection(mydb) {
+ const res =
+ assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: true}));
+ const collectionInfos = res.cursor.firstBatch;
+ const collectionNames = collectionInfos.map(c => c.name);
+ return Boolean(collectionNames.find(c => c.indexOf('system.drop.') == 0));
+}
+
+/**
+ * Apply ops on mydb, asserting success.
+ */
+function assertApplyOpsWorks(testdbs, ops) {
+ // Remaining operations in ops must still be applied
+ while (ops.length) {
+ let cmd = {applyOps: ops};
+ let res = testdbs[0].adminCommand(cmd);
+ if (debug) {
+ printjson({applyOps: ops, res});
}
- }
- /**
- * Run the dbHash command on mydb, assert it worked and return the md5.
- */
- function dbHash(mydb) {
- let cmd = {dbHash: 1};
- let res = mydb.runCommand(cmd);
- assert.commandWorked(res, tojson(cmd));
- return res.md5;
- }
-
- /**
- * Gather collection info and dbHash results of each of the passed databases.
- */
- function dbInfo(dbs) {
- return dbs.map((db) => {
- return {name: db.getName(), info: db.getCollectionInfos(), md5: dbHash(db)};
+ // Wait for any drop-pending collections to be removed by the reaper before proceeding.
+ assert.soon(function() {
+ return !testdbs.find(mydb => containsDropPendingCollection(mydb));
});
- }
- var getCollections = (mydb, prefixes) => prefixes.map((prefix) => mydb[prefix]);
-
- /**
- * Test functions to run and test using replay of oplog.
- */
- var tests = {
- crud: (mydb) => {
- let [x, y, z] = getCollections(mydb, ['x', 'y', 'z']);
- assert.writeOK(x.insert({_id: 1}));
- assert.writeOK(x.update({_id: 1}, {$set: {x: 1}}));
- assert.writeOK(x.remove({_id: 1}));
-
- assert.writeOK(y.update({_id: 1}, {y: 1}));
- assert.writeOK(y.insert({_id: 2, y: false, z: false}));
- assert.writeOK(y.update({_id: 2}, {y: 2}));
-
- assert.writeOK(z.insert({_id: 1, z: 1}));
- assert.writeOK(z.remove({_id: 1}));
- assert.writeOK(z.insert({_id: 1}));
- assert.writeOK(z.insert({_id: 2, z: 2}));
- },
- renameCollectionWithinDatabase: (mydb) => {
- let [x, y, z] = getCollections(mydb, ['x', 'y', 'z']);
- assert.writeOK(x.insert({_id: 1, x: 1}));
- assert.writeOK(y.insert({_id: 1, y: 1}));
-
- assert.commandWorked(x.renameCollection(z.getName()));
- assert.writeOK(z.insert({_id: 2, x: 2}));
- assert.writeOK(x.insert({_id: 2, x: false}));
- assert.writeOK(y.insert({y: 2}));
-
- assert.commandWorked(y.renameCollection(x.getName(), true));
- assert.commandWorked(z.renameCollection(y.getName()));
- },
- renameCollectionWithinDatabaseDroppingTargetByUUID: (mydb) => {
- assert.commandWorked(mydb.createCollection("x"));
- assert.commandWorked(mydb.createCollection("y"));
- assert.commandWorked(mydb.createCollection("z"));
-
- assert.commandWorked(mydb.x.renameCollection('xx'));
- // When replayed on a up-to-date db, this oplog entry may drop
- // collection z rather than collection x if the dropTarget is not
- // specified by UUID. (See SERVER-33087)
- assert.commandWorked(mydb.y.renameCollection('xx', true));
- assert.commandWorked(mydb.xx.renameCollection('yy'));
- assert.commandWorked(mydb.z.renameCollection('xx'));
- },
- renameCollectionWithinDatabaseDropTargetEvenWhenSourceIsEmpty: (mydb) => {
- assert.commandWorked(mydb.createCollection("x"));
- assert.commandWorked(mydb.createCollection("y"));
- assert.commandWorked(mydb.x.renameCollection('y', true));
- assert(mydb.y.drop());
- },
- renameCollectionAcrossDatabases: (mydb) => {
- let otherdb = mydb.getSiblingDB(mydb + '_');
- let [x, y] = getCollections(mydb, ['x', 'y']);
- let [z] = getCollections(otherdb, ['z']);
- assert.writeOK(x.insert({_id: 1, x: 1}));
- assert.writeOK(y.insert({_id: 1, y: 1}));
-
- assert.commandWorked(
- mydb.adminCommand({renameCollection: x.getFullName(), to: z.getFullName()}));
- assert.writeOK(z.insert({_id: 2, x: 2}));
- assert.writeOK(x.insert({_id: 2, x: false}));
- assert.writeOK(y.insert({y: 2}));
-
- assert.commandWorked(mydb.adminCommand(
- {renameCollection: y.getFullName(), to: x.getFullName(), dropTarget: true}));
- assert.commandWorked(
- mydb.adminCommand({renameCollection: z.getFullName(), to: y.getFullName()}));
- return [mydb, otherdb];
- },
- createIndex: (mydb) => {
- let [x, y] = getCollections(mydb, ['x', 'y']);
- assert.commandWorked(x.createIndex({x: 1}));
- assert.writeOK(x.insert({_id: 1, x: 1}));
- assert.writeOK(y.insert({_id: 1, y: 1}));
- assert.commandWorked(y.createIndex({y: 1}));
- assert.writeOK(y.insert({_id: 2, y: 2}));
- },
- };
-
- /**
- * Create a new uniquely named database, execute testFun and compute the dbHash. Then replay
- * all different suffixes of the oplog and check for the correct hash. If testFun creates
- * additional databases, it should return an array with all databases to check.
- */
- function testIdempotency(primary, testFun, testName) {
- // Create a new database name, so it's easier to filter out our oplog records later.
- let dbname = (new Date()).toISOString().match(/[-0-9T]/g).join(''); // 2017-05-30T155055713
- let mydb = primary.getDB(dbname);
-
- // Allow testFun to return the array of databases to check (default is mydb).
- let testdbs = testFun(mydb) || [mydb];
- let expectedInfo = dbInfo(testdbs);
-
- let oplog = mydb.getSiblingDB('local').oplog.rs;
- let ops = oplog
- .find({op: {$ne: 'n'}, ns: new RegExp('^' + mydb.getName())},
- {ts: 0, t: 0, h: 0, v: 0})
- .toArray();
- assert.gt(ops.length, 0, 'Could not find any matching ops in the oplog');
- testdbs.forEach((db) => assert.commandWorked(db.dropDatabase()));
+ // If the entire operation succeeded, we're done.
+ if (res.ok == 1)
+ return res;
- if (debug) {
- print(testName + ': replaying suffixes of ' + ops.length + ' operations');
- printjson(ops);
- }
+ // Skip any operations that succeeded.
+ while (res.applied-- && res.results.shift())
+ ops.shift();
- for (let j = 0; j < ops.length; j++) {
- let replayOps = ops.slice(j);
- assertApplyOpsWorks(testdbs, replayOps);
- let actualInfo = dbInfo(testdbs);
- assert.eq(actualInfo,
- expectedInfo,
- 'unexpected differences between databases after replaying final ' +
- replayOps.length + ' ops in test ' + testName + ": " + tojson(replayOps));
+ // These errors are expected when replaying operations and should be ignored.
+ if (res.code == ErrorCodes.NamespaceNotFound || res.code == ErrorCodes.DuplicateKey) {
+ ops.shift();
+ continue;
}
+
+ // Generate the appropriate error message.
+ assert.commandWorked(res, tojson(cmd));
+ }
+}
+
+/**
+ * Run the dbHash command on mydb, assert it worked and return the md5.
+ */
+function dbHash(mydb) {
+ let cmd = {dbHash: 1};
+ let res = mydb.runCommand(cmd);
+ assert.commandWorked(res, tojson(cmd));
+ return res.md5;
+}
+
+/**
+ * Gather collection info and dbHash results of each of the passed databases.
+ */
+function dbInfo(dbs) {
+ return dbs.map((db) => {
+ return {name: db.getName(), info: db.getCollectionInfos(), md5: dbHash(db)};
+ });
+}
+
+var getCollections = (mydb, prefixes) => prefixes.map((prefix) => mydb[prefix]);
+
+/**
+ * Test functions to run and test using replay of oplog.
+ */
+var tests = {
+ crud: (mydb) => {
+ let [x, y, z] = getCollections(mydb, ['x', 'y', 'z']);
+ assert.writeOK(x.insert({_id: 1}));
+ assert.writeOK(x.update({_id: 1}, {$set: {x: 1}}));
+ assert.writeOK(x.remove({_id: 1}));
+
+ assert.writeOK(y.update({_id: 1}, {y: 1}));
+ assert.writeOK(y.insert({_id: 2, y: false, z: false}));
+ assert.writeOK(y.update({_id: 2}, {y: 2}));
+
+ assert.writeOK(z.insert({_id: 1, z: 1}));
+ assert.writeOK(z.remove({_id: 1}));
+ assert.writeOK(z.insert({_id: 1}));
+ assert.writeOK(z.insert({_id: 2, z: 2}));
+ },
+ renameCollectionWithinDatabase: (mydb) => {
+ let [x, y, z] = getCollections(mydb, ['x', 'y', 'z']);
+ assert.writeOK(x.insert({_id: 1, x: 1}));
+ assert.writeOK(y.insert({_id: 1, y: 1}));
+
+ assert.commandWorked(x.renameCollection(z.getName()));
+ assert.writeOK(z.insert({_id: 2, x: 2}));
+ assert.writeOK(x.insert({_id: 2, x: false}));
+ assert.writeOK(y.insert({y: 2}));
+
+ assert.commandWorked(y.renameCollection(x.getName(), true));
+ assert.commandWorked(z.renameCollection(y.getName()));
+ },
+ renameCollectionWithinDatabaseDroppingTargetByUUID: (mydb) => {
+ assert.commandWorked(mydb.createCollection("x"));
+ assert.commandWorked(mydb.createCollection("y"));
+ assert.commandWorked(mydb.createCollection("z"));
+
+ assert.commandWorked(mydb.x.renameCollection('xx'));
+ // When replayed on a up-to-date db, this oplog entry may drop
+ // collection z rather than collection x if the dropTarget is not
+ // specified by UUID. (See SERVER-33087)
+ assert.commandWorked(mydb.y.renameCollection('xx', true));
+ assert.commandWorked(mydb.xx.renameCollection('yy'));
+ assert.commandWorked(mydb.z.renameCollection('xx'));
+ },
+ renameCollectionWithinDatabaseDropTargetEvenWhenSourceIsEmpty: (mydb) => {
+ assert.commandWorked(mydb.createCollection("x"));
+ assert.commandWorked(mydb.createCollection("y"));
+ assert.commandWorked(mydb.x.renameCollection('y', true));
+ assert(mydb.y.drop());
+ },
+ renameCollectionAcrossDatabases: (mydb) => {
+ let otherdb = mydb.getSiblingDB(mydb + '_');
+ let [x, y] = getCollections(mydb, ['x', 'y']);
+ let [z] = getCollections(otherdb, ['z']);
+ assert.writeOK(x.insert({_id: 1, x: 1}));
+ assert.writeOK(y.insert({_id: 1, y: 1}));
+
+ assert.commandWorked(
+ mydb.adminCommand({renameCollection: x.getFullName(), to: z.getFullName()}));
+ assert.writeOK(z.insert({_id: 2, x: 2}));
+ assert.writeOK(x.insert({_id: 2, x: false}));
+ assert.writeOK(y.insert({y: 2}));
+
+ assert.commandWorked(mydb.adminCommand(
+ {renameCollection: y.getFullName(), to: x.getFullName(), dropTarget: true}));
+ assert.commandWorked(
+ mydb.adminCommand({renameCollection: z.getFullName(), to: y.getFullName()}));
+ return [mydb, otherdb];
+ },
+ createIndex: (mydb) => {
+ let [x, y] = getCollections(mydb, ['x', 'y']);
+ assert.commandWorked(x.createIndex({x: 1}));
+ assert.writeOK(x.insert({_id: 1, x: 1}));
+ assert.writeOK(y.insert({_id: 1, y: 1}));
+ assert.commandWorked(y.createIndex({y: 1}));
+ assert.writeOK(y.insert({_id: 2, y: 2}));
+ },
+};
+
+/**
+ * Create a new uniquely named database, execute testFun and compute the dbHash. Then replay
+ * all different suffixes of the oplog and check for the correct hash. If testFun creates
+ * additional databases, it should return an array with all databases to check.
+ */
+function testIdempotency(primary, testFun, testName) {
+ // Create a new database name, so it's easier to filter out our oplog records later.
+ let dbname = (new Date()).toISOString().match(/[-0-9T]/g).join(''); // 2017-05-30T155055713
+ let mydb = primary.getDB(dbname);
+
+ // Allow testFun to return the array of databases to check (default is mydb).
+ let testdbs = testFun(mydb) || [mydb];
+ let expectedInfo = dbInfo(testdbs);
+
+ let oplog = mydb.getSiblingDB('local').oplog.rs;
+ let ops =
+ oplog
+ .find({op: {$ne: 'n'}, ns: new RegExp('^' + mydb.getName())}, {ts: 0, t: 0, h: 0, v: 0})
+ .toArray();
+ assert.gt(ops.length, 0, 'Could not find any matching ops in the oplog');
+ testdbs.forEach((db) => assert.commandWorked(db.dropDatabase()));
+
+ if (debug) {
+ print(testName + ': replaying suffixes of ' + ops.length + ' operations');
+ printjson(ops);
+ }
+
+ for (let j = 0; j < ops.length; j++) {
+ let replayOps = ops.slice(j);
+ assertApplyOpsWorks(testdbs, replayOps);
+ let actualInfo = dbInfo(testdbs);
+ assert.eq(actualInfo,
+ expectedInfo,
+ 'unexpected differences between databases after replaying final ' +
+ replayOps.length + ' ops in test ' + testName + ": " + tojson(replayOps));
}
+}
- for (let f in tests)
- testIdempotency(rst.getPrimary(), tests[f], f);
+for (let f in tests)
+ testIdempotency(rst.getPrimary(), tests[f], f);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/apply_ops_insert_write_conflict_atomic.js b/jstests/replsets/apply_ops_insert_write_conflict_atomic.js
index 7f8870bc75a..47d35d4e7df 100644
--- a/jstests/replsets/apply_ops_insert_write_conflict_atomic.js
+++ b/jstests/replsets/apply_ops_insert_write_conflict_atomic.js
@@ -1,10 +1,10 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/apply_ops_insert_write_conflict.js");
+load("jstests/replsets/libs/apply_ops_insert_write_conflict.js");
- new ApplyOpsInsertWriteConflictTest({
- testName: 'apply_ops_insert_write_conflict_atomic',
- atomic: true
- }).run();
+new ApplyOpsInsertWriteConflictTest({
+ testName: 'apply_ops_insert_write_conflict_atomic',
+ atomic: true
+}).run();
}());
diff --git a/jstests/replsets/apply_ops_insert_write_conflict_nonatomic.js b/jstests/replsets/apply_ops_insert_write_conflict_nonatomic.js
index 2e91de8637a..a7843716bac 100644
--- a/jstests/replsets/apply_ops_insert_write_conflict_nonatomic.js
+++ b/jstests/replsets/apply_ops_insert_write_conflict_nonatomic.js
@@ -1,10 +1,10 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/apply_ops_insert_write_conflict.js");
+load("jstests/replsets/libs/apply_ops_insert_write_conflict.js");
- new ApplyOpsInsertWriteConflictTest({
- testName: 'apply_ops_insert_write_conflict_nonatomic',
- atomic: false
- }).run();
+new ApplyOpsInsertWriteConflictTest({
+ testName: 'apply_ops_insert_write_conflict_nonatomic',
+ atomic: false
+}).run();
}());
diff --git a/jstests/replsets/apply_ops_lastop.js b/jstests/replsets/apply_ops_lastop.js
index a2d45ab459f..e1c9fdb1823 100644
--- a/jstests/replsets/apply_ops_lastop.js
+++ b/jstests/replsets/apply_ops_lastop.js
@@ -4,54 +4,49 @@
//
(function() {
- "use strict";
-
- var rs = new ReplSetTest({name: "applyOpsOptimeTest", nodes: 3, waitForKeys: true});
- rs.startSet();
- var nodes = rs.nodeList();
- rs.initiate({
- "_id": "applyOpsOptimeTest",
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], "arbiterOnly": true}
- ]
- });
- var primary = rs.getPrimary();
- var db = primary.getDB('foo');
- var coll = primary.getCollection('foo.bar');
- // Two connections
- var m1 = new Mongo(primary.host);
- var m2 = new Mongo(primary.host);
-
- var insertApplyOps = [{op: "i", ns: 'foo.bar', o: {_id: 1, a: "b"}}];
- var deleteApplyOps = [{op: "d", ns: 'foo.bar', o: {_id: 1, a: "b"}}];
- var badPreCondition = [{ns: 'foo.bar', q: {_id: 10, a: "aaa"}, res: {a: "aaa"}}];
- var majorityWriteConcern = {w: 'majority', wtimeout: 30000};
-
- // Set up some data
- assert.writeOK(coll.insert({x: 1})); // creating the collection so applyOps works
- assert.commandWorked(
- m1.getDB('foo').runCommand({applyOps: insertApplyOps, writeConcern: majorityWriteConcern}));
- var insertOp = m1.getDB('foo').getLastErrorObj('majority', 30000).lastOp;
-
- // No-op applyOps
- var res = m2.getDB('foo').runCommand({
- applyOps: deleteApplyOps,
- preCondition: badPreCondition,
- writeConcern: majorityWriteConcern
- });
- assert.commandFailed(res, "The applyOps command was expected to fail, but instead succeeded.");
- assert.eq(
- res.errmsg, "preCondition failed", "The applyOps command failed for the wrong reason.");
- var noOp = m2.getDB('foo').getLastErrorObj('majority', 30000).lastOp;
-
- // Check that each connection has the same last optime
- assert.eq(noOp,
- insertOp,
- "The connections' last optimes do " +
- "not match: applyOps failed to update lastop on no-op");
-
- rs.stopSet();
-
+"use strict";
+
+var rs = new ReplSetTest({name: "applyOpsOptimeTest", nodes: 3, waitForKeys: true});
+rs.startSet();
+var nodes = rs.nodeList();
+rs.initiate({
+ "_id": "applyOpsOptimeTest",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
+var primary = rs.getPrimary();
+var db = primary.getDB('foo');
+var coll = primary.getCollection('foo.bar');
+// Two connections
+var m1 = new Mongo(primary.host);
+var m2 = new Mongo(primary.host);
+
+var insertApplyOps = [{op: "i", ns: 'foo.bar', o: {_id: 1, a: "b"}}];
+var deleteApplyOps = [{op: "d", ns: 'foo.bar', o: {_id: 1, a: "b"}}];
+var badPreCondition = [{ns: 'foo.bar', q: {_id: 10, a: "aaa"}, res: {a: "aaa"}}];
+var majorityWriteConcern = {w: 'majority', wtimeout: 30000};
+
+// Set up some data
+assert.writeOK(coll.insert({x: 1})); // creating the collection so applyOps works
+assert.commandWorked(
+ m1.getDB('foo').runCommand({applyOps: insertApplyOps, writeConcern: majorityWriteConcern}));
+var insertOp = m1.getDB('foo').getLastErrorObj('majority', 30000).lastOp;
+
+// No-op applyOps
+var res = m2.getDB('foo').runCommand(
+ {applyOps: deleteApplyOps, preCondition: badPreCondition, writeConcern: majorityWriteConcern});
+assert.commandFailed(res, "The applyOps command was expected to fail, but instead succeeded.");
+assert.eq(res.errmsg, "preCondition failed", "The applyOps command failed for the wrong reason.");
+var noOp = m2.getDB('foo').getLastErrorObj('majority', 30000).lastOp;
+
+// Check that each connection has the same last optime
+assert.eq(noOp,
+ insertOp,
+ "The connections' last optimes do " +
+ "not match: applyOps failed to update lastop on no-op");
+
+rs.stopSet();
})();
diff --git a/jstests/replsets/apply_ops_wc.js b/jstests/replsets/apply_ops_wc.js
index 5e7747fe343..8b3b6d4e92f 100644
--- a/jstests/replsets/apply_ops_wc.js
+++ b/jstests/replsets/apply_ops_wc.js
@@ -10,130 +10,127 @@
*/
(function() {
- "use strict";
- var nodeCount = 3;
- var replTest = new ReplSetTest({name: 'applyOpsWCSet', nodes: nodeCount});
- replTest.startSet();
- var cfg = replTest.getReplSetConfig();
- cfg.settings = {};
- cfg.settings.chainingAllowed = false;
- replTest.initiate(cfg);
-
- var testDB = "applyOps-wc-test";
-
- // Get test collection.
- var master = replTest.getPrimary();
- var db = master.getDB(testDB);
- var coll = db.apply_ops_wc;
-
- function dropTestCollection() {
- coll.drop();
- assert.eq(0, coll.find().itcount(), "test collection not empty");
- }
+"use strict";
+var nodeCount = 3;
+var replTest = new ReplSetTest({name: 'applyOpsWCSet', nodes: nodeCount});
+replTest.startSet();
+var cfg = replTest.getReplSetConfig();
+cfg.settings = {};
+cfg.settings.chainingAllowed = false;
+replTest.initiate(cfg);
+
+var testDB = "applyOps-wc-test";
+
+// Get test collection.
+var master = replTest.getPrimary();
+var db = master.getDB(testDB);
+var coll = db.apply_ops_wc;
+
+function dropTestCollection() {
+ coll.drop();
+ assert.eq(0, coll.find().itcount(), "test collection not empty");
+}
+
+dropTestCollection();
+
+// Set up the applyOps command.
+var applyOpsReq = {
+ applyOps: [
+ {op: "i", ns: coll.getFullName(), o: {_id: 2, x: "b"}},
+ {op: "i", ns: coll.getFullName(), o: {_id: 3, x: "c"}},
+ {op: "i", ns: coll.getFullName(), o: {_id: 4, x: "d"}},
+ ]
+};
+
+function assertApplyOpsCommandWorked(res) {
+ assert.eq(3, res.applied);
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq([true, true, true], res.results);
+}
+
+function assertWriteConcernError(res) {
+ assert(res.writeConcernError);
+ assert(res.writeConcernError.code);
+ assert(res.writeConcernError.errmsg);
+}
+
+var invalidWriteConcerns = [{w: 'invalid'}, {w: nodeCount + 1}];
+
+function testInvalidWriteConcern(wc) {
+ jsTest.log("Testing invalid write concern " + tojson(wc));
+
+ applyOpsReq.writeConcern = wc;
+ var res = coll.runCommand(applyOpsReq);
+ assertApplyOpsCommandWorked(res);
+ assertWriteConcernError(res);
+}
+
+// Verify that invalid write concerns yield an error.
+coll.insert({_id: 1, x: "a"});
+invalidWriteConcerns.forEach(testInvalidWriteConcern);
+
+var secondaries = replTest.getSecondaries();
+
+var majorityWriteConcerns = [
+ {w: 2, wtimeout: 30000},
+ {w: 'majority', wtimeout: 30000},
+];
+
+function testMajorityWriteConcerns(wc) {
+ jsTest.log("Testing " + tojson(wc));
+
+ // Reset secondaries to ensure they can replicate.
+ secondaries[0].getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+ secondaries[1].getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+
+ // Set the writeConcern of the applyOps command.
+ applyOpsReq.writeConcern = wc;
dropTestCollection();
- // Set up the applyOps command.
- var applyOpsReq = {
- applyOps: [
- {op: "i", ns: coll.getFullName(), o: {_id: 2, x: "b"}},
- {op: "i", ns: coll.getFullName(), o: {_id: 3, x: "c"}},
- {op: "i", ns: coll.getFullName(), o: {_id: 4, x: "d"}},
- ]
- };
-
- function assertApplyOpsCommandWorked(res) {
- assert.eq(3, res.applied);
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq([true, true, true], res.results);
- }
-
- function assertWriteConcernError(res) {
- assert(res.writeConcernError);
- assert(res.writeConcernError.code);
- assert(res.writeConcernError.errmsg);
- }
-
- var invalidWriteConcerns = [{w: 'invalid'}, {w: nodeCount + 1}];
-
- function testInvalidWriteConcern(wc) {
- jsTest.log("Testing invalid write concern " + tojson(wc));
-
- applyOpsReq.writeConcern = wc;
- var res = coll.runCommand(applyOpsReq);
- assertApplyOpsCommandWorked(res);
- assertWriteConcernError(res);
- }
-
- // Verify that invalid write concerns yield an error.
+ // applyOps with a full replica set should succeed.
coll.insert({_id: 1, x: "a"});
- invalidWriteConcerns.forEach(testInvalidWriteConcern);
-
- var secondaries = replTest.getSecondaries();
-
- var majorityWriteConcerns = [
- {w: 2, wtimeout: 30000},
- {w: 'majority', wtimeout: 30000},
- ];
-
- function testMajorityWriteConcerns(wc) {
- jsTest.log("Testing " + tojson(wc));
-
- // Reset secondaries to ensure they can replicate.
- secondaries[0].getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
- secondaries[1].getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
-
- // Set the writeConcern of the applyOps command.
- applyOpsReq.writeConcern = wc;
+ var res = db.runCommand(applyOpsReq);
- dropTestCollection();
+ assertApplyOpsCommandWorked(res);
+ assert(!res.writeConcernError,
+ 'applyOps on a full replicaset had writeConcern error ' + tojson(res.writeConcernError));
- // applyOps with a full replica set should succeed.
- coll.insert({_id: 1, x: "a"});
- var res = db.runCommand(applyOpsReq);
-
- assertApplyOpsCommandWorked(res);
- assert(!res.writeConcernError,
- 'applyOps on a full replicaset had writeConcern error ' +
- tojson(res.writeConcernError));
-
- dropTestCollection();
+ dropTestCollection();
- // Stop replication at one secondary.
- secondaries[0].getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+ // Stop replication at one secondary.
+ secondaries[0].getDB('admin').runCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
- // applyOps should succeed with only 1 node not replicating.
- coll.insert({_id: 1, x: "a"});
- res = db.runCommand(applyOpsReq);
+ // applyOps should succeed with only 1 node not replicating.
+ coll.insert({_id: 1, x: "a"});
+ res = db.runCommand(applyOpsReq);
- assertApplyOpsCommandWorked(res);
- assert(!res.writeConcernError,
- 'applyOps on a replicaset with 2 working nodes had writeConcern error ' +
- tojson(res.writeConcernError));
+ assertApplyOpsCommandWorked(res);
+ assert(!res.writeConcernError,
+ 'applyOps on a replicaset with 2 working nodes had writeConcern error ' +
+ tojson(res.writeConcernError));
- dropTestCollection();
+ dropTestCollection();
- // Stop replication at a second secondary.
- secondaries[1].getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+ // Stop replication at a second secondary.
+ secondaries[1].getDB('admin').runCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
- // applyOps should fail after two nodes have stopped replicating.
- coll.insert({_id: 1, x: "a"});
- applyOpsReq.writeConcern.wtimeout = 5000;
- res = db.runCommand(applyOpsReq);
+ // applyOps should fail after two nodes have stopped replicating.
+ coll.insert({_id: 1, x: "a"});
+ applyOpsReq.writeConcern.wtimeout = 5000;
+ res = db.runCommand(applyOpsReq);
- assertApplyOpsCommandWorked(res);
- assertWriteConcernError(res);
- }
+ assertApplyOpsCommandWorked(res);
+ assertWriteConcernError(res);
+}
- majorityWriteConcerns.forEach(testMajorityWriteConcerns);
+majorityWriteConcerns.forEach(testMajorityWriteConcerns);
- // Allow clean shutdown
- secondaries[0].getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
- secondaries[1].getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+// Allow clean shutdown
+secondaries[0].getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+secondaries[1].getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/apply_transaction_with_yield.js b/jstests/replsets/apply_transaction_with_yield.js
index 19042ebcd88..67bff9b8dd3 100644
--- a/jstests/replsets/apply_transaction_with_yield.js
+++ b/jstests/replsets/apply_transaction_with_yield.js
@@ -7,38 +7,38 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- const name = "change_stream_speculative_majority";
- const replTest = new ReplSetTest({name: name, nodes: [{}, {rsConfig: {priority: 0}}]});
- replTest.startSet();
- replTest.initiate();
+const name = "change_stream_speculative_majority";
+const replTest = new ReplSetTest({name: name, nodes: [{}, {rsConfig: {priority: 0}}]});
+replTest.startSet();
+replTest.initiate();
- const dbName = name;
- const collName = "coll";
+const dbName = name;
+const collName = "coll";
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
- // Collections used in a transaction should be explicitly created first.
- assert.commandWorked(primary.getDB(dbName).createCollection(collName));
+// Collections used in a transaction should be explicitly created first.
+assert.commandWorked(primary.getDB(dbName).createCollection(collName));
- // Force the secondary to yield at ever opportunity.
- assert.commandWorked(
- secondary.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
+// Force the secondary to yield at ever opportunity.
+assert.commandWorked(
+ secondary.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
- // Create a transaction that is substantially larger than 16MB, forcing the secondary to apply
- // it in multiple batches, so that it uses the TransactionHistoryIterator.
- const session = primary.startSession();
- session.startTransaction({readConcern: {level: "majority"}});
- const sessionColl = session.getDatabase(dbName)[collName];
- for (let i = 0; i < 3; i = i + 1) {
- assert.commandWorked(sessionColl.insert({a: 'x'.repeat(15 * 1024 * 1024)}));
- }
- session.commitTransaction();
+// Create a transaction that is substantially larger than 16MB, forcing the secondary to apply
+// it in multiple batches, so that it uses the TransactionHistoryIterator.
+const session = primary.startSession();
+session.startTransaction({readConcern: {level: "majority"}});
+const sessionColl = session.getDatabase(dbName)[collName];
+for (let i = 0; i < 3; i = i + 1) {
+ assert.commandWorked(sessionColl.insert({a: 'x'.repeat(15 * 1024 * 1024)}));
+}
+session.commitTransaction();
- // Make sure the transaction has been fully applied.
- replTest.awaitReplication();
+// Make sure the transaction has been fully applied.
+replTest.awaitReplication();
- replTest.stopSet();
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/arbiters_not_included_in_w2_wc.js b/jstests/replsets/arbiters_not_included_in_w2_wc.js
index 6ea19cc55a5..ca391185e30 100644
--- a/jstests/replsets/arbiters_not_included_in_w2_wc.js
+++ b/jstests/replsets/arbiters_not_included_in_w2_wc.js
@@ -10,44 +10,43 @@
*/
(function() {
- "use strict";
+"use strict";
- const name = "arbiters_not_included_in_w2_wc";
- const rst = new ReplSetTest({name: name, nodes: 5});
- const nodes = rst.nodeList();
+const name = "arbiters_not_included_in_w2_wc";
+const rst = new ReplSetTest({name: name, nodes: 5});
+const nodes = rst.nodeList();
- rst.startSet();
- rst.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], priority: 0, votes: 0},
- {"_id": 2, "host": nodes[2], priority: 0, votes: 0},
- {"_id": 3, "host": nodes[3], "arbiterOnly": true},
- {"_id": 4, "host": nodes[4], "arbiterOnly": true}
- ]
- });
+rst.startSet();
+rst.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], priority: 0, votes: 0},
+ {"_id": 2, "host": nodes[2], priority: 0, votes: 0},
+ {"_id": 3, "host": nodes[3], "arbiterOnly": true},
+ {"_id": 4, "host": nodes[4], "arbiterOnly": true}
+ ]
+});
- const dbName = "test";
- const collName = name;
+const dbName = "test";
+const collName = name;
- const primary = rst.getPrimary();
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const primary = rst.getPrimary();
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(
- testColl.insert({"a": 1}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+assert.commandWorked(
+ testColl.insert({"a": 1}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- jsTestLog("Shutting down both secondaries");
+jsTestLog("Shutting down both secondaries");
- rst.stop(1);
- rst.stop(2);
+rst.stop(1);
+rst.stop(2);
- jsTestLog("Issuing a w:2 write and confirming that it times out");
+jsTestLog("Issuing a w:2 write and confirming that it times out");
- assert.commandFailedWithCode(
- testColl.insert({"b": 2}, {writeConcern: {w: 2, wtimeout: 5 * 1000}}),
- ErrorCodes.WriteConcernFailed);
+assert.commandFailedWithCode(testColl.insert({"b": 2}, {writeConcern: {w: 2, wtimeout: 5 * 1000}}),
+ ErrorCodes.WriteConcernFailed);
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/arbiters_not_included_in_w3_wc.js b/jstests/replsets/arbiters_not_included_in_w3_wc.js
index 8e6cbf360f7..aaf35cb4501 100644
--- a/jstests/replsets/arbiters_not_included_in_w3_wc.js
+++ b/jstests/replsets/arbiters_not_included_in_w3_wc.js
@@ -10,42 +10,41 @@
*/
(function() {
- "use strict";
+"use strict";
- const name = "arbiters_not_included_in_w3_wc";
- const rst = new ReplSetTest({name: name, nodes: 4});
- const nodes = rst.nodeList();
+const name = "arbiters_not_included_in_w3_wc";
+const rst = new ReplSetTest({name: name, nodes: 4});
+const nodes = rst.nodeList();
- rst.startSet();
- rst.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], priority: 0},
- {"_id": 2, "host": nodes[2], priority: 0, votes: 0},
- {"_id": 3, "host": nodes[3], "arbiterOnly": true}
- ]
- });
+rst.startSet();
+rst.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], priority: 0, votes: 0},
+ {"_id": 3, "host": nodes[3], "arbiterOnly": true}
+ ]
+});
- const dbName = "test";
- const collName = name;
+const dbName = "test";
+const collName = name;
- const primary = rst.getPrimary();
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const primary = rst.getPrimary();
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(
- testColl.insert({"a": 1}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+assert.commandWorked(
+ testColl.insert({"a": 1}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- jsTestLog("Shutting down the non-voting secondary");
+jsTestLog("Shutting down the non-voting secondary");
- rst.stop(2);
+rst.stop(2);
- jsTestLog("Issuing a w:3 write and confirming that it times out");
+jsTestLog("Issuing a w:3 write and confirming that it times out");
- assert.commandFailedWithCode(
- testColl.insert({"b": 2}, {writeConcern: {w: 3, wtimeout: 5 * 1000}}),
- ErrorCodes.WriteConcernFailed);
+assert.commandFailedWithCode(testColl.insert({"b": 2}, {writeConcern: {w: 3, wtimeout: 5 * 1000}}),
+ ErrorCodes.WriteConcernFailed);
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js
index 9a9f36939b2..42dc2638c28 100644
--- a/jstests/replsets/auth1.js
+++ b/jstests/replsets/auth1.js
@@ -6,212 +6,201 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- var name = "rs_auth1";
- var port = allocatePorts(5);
- var path = "jstests/libs/";
-
- // These keyFiles have their permissions set to 600 later in the test.
- var key1_600 = path + "key1";
- var key2_600 = path + "key2";
-
- // This keyFile has its permissions set to 644 later in the test.
- var key1_644 = path + "key1_644";
-
- print("try starting mongod with auth");
- var m = MongoRunner.runMongod(
- {auth: "", port: port[4], dbpath: MongoRunner.dataDir + "/wrong-auth"});
-
- assert.eq(m.getDB("local").auth("__system", ""), 0);
-
- MongoRunner.stopMongod(m);
-
- print("reset permissions");
- run("chmod", "644", key1_644);
-
- print("try starting mongod");
- m = runMongoProgram("mongod",
- "--keyFile",
- key1_644,
- "--port",
- port[0],
- "--dbpath",
- MongoRunner.dataPath + name);
-
- print("should fail with wrong permissions");
- assert.eq(
- m, _isWindows() ? 100 : 1, "mongod should exit w/ 1 (EXIT_FAILURE): permissions too open");
-
- // Pre-populate the data directory for the first replica set node, to be started later, with
- // a user's credentials.
- print("add a user to server0: foo");
- m = MongoRunner.runMongod({dbpath: MongoRunner.dataPath + name + "-0"});
- m.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
- m.getDB("test").createUser({user: "bar", pwd: "baz", roles: jsTest.basicUserRoles});
- print("make sure user is written before shutting down");
- MongoRunner.stopMongod(m);
-
- print("start up rs");
- var rs = new ReplSetTest({"name": name, "nodes": 3});
-
- // The first node is started with the pre-populated data directory.
- print("start 0 with keyFile");
- m = rs.start(0, {"keyFile": key1_600, noCleanData: true});
- print("start 1 with keyFile");
- rs.start(1, {"keyFile": key1_600});
- print("start 2 with keyFile");
- rs.start(2, {"keyFile": key1_600});
-
- var result = m.getDB("admin").auth("foo", "bar");
- assert.eq(result, 1, "login failed");
- print("Initializing replSet with config: " + tojson(rs.getReplSetConfig()));
- result = m.getDB("admin").runCommand({replSetInitiate: rs.getReplSetConfig()});
- assert.eq(result.ok, 1, "couldn't initiate: " + tojson(result));
- m.getDB('admin')
- .logout(); // In case this node doesn't become primary, make sure its not auth'd
-
- var master = rs.getPrimary();
- rs.awaitSecondaryNodes();
- var mId = rs.getNodeId(master);
- var slave = rs._slaves[0];
- assert.eq(1, master.getDB("admin").auth("foo", "bar"));
- assert.writeOK(master.getDB("test").foo.insert(
- {x: 1}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
-
- print("try some legal and illegal reads");
- var r = master.getDB("test").foo.findOne();
- assert.eq(r.x, 1);
-
- slave.setSlaveOk();
-
- function doQueryOn(p) {
- var error = assert
- .throws(
- function() {
- r = p.getDB("test").foo.findOne();
- },
- [],
- "find did not throw, returned: " + tojson(r))
- .toString();
- printjson(error);
- assert.gt(error.indexOf("command find requires authentication"), -1, "error was non-auth");
- }
-
- doQueryOn(slave);
- master.adminCommand({logout: 1});
-
- print("unauthorized:");
- printjson(master.adminCommand({replSetGetStatus: 1}));
-
- doQueryOn(master);
-
- result = slave.getDB("test").auth("bar", "baz");
- assert.eq(result, 1);
-
- r = slave.getDB("test").foo.findOne();
- assert.eq(r.x, 1);
-
- print("add some data");
- master.getDB("test").auth("bar", "baz");
- var bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
- for (var i = 0; i < 1000; i++) {
- bulk.insert({x: i, foo: "bar"});
- }
- assert.writeOK(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}));
-
- print("fail over");
- rs.stop(mId);
-
- master = rs.getPrimary();
-
- print("add some more data 1");
- master.getDB("test").auth("bar", "baz");
- bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
- for (var i = 0; i < 1000; i++) {
- bulk.insert({x: i, foo: "bar"});
- }
- assert.writeOK(bulk.execute({w: 2}));
-
- print("resync");
- rs.restart(mId, {"keyFile": key1_600});
- master = rs.getPrimary();
-
- print("add some more data 2");
- bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
- for (var i = 0; i < 1000; i++) {
- bulk.insert({x: i, foo: "bar"});
- }
- bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS});
-
- print("add member with wrong key");
- var conn = MongoRunner.runMongod({
- dbpath: MongoRunner.dataPath + name + "-3",
- port: port[3],
- replSet: "rs_auth1",
- oplogSize: 2,
- keyFile: key2_600
- });
-
- master.getDB("admin").auth("foo", "bar");
- var config = master.getDB("local").system.replset.findOne();
- config.members.push({_id: 3, host: rs.host + ":" + port[3]});
- config.version++;
+"use strict";
+
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
+
+var name = "rs_auth1";
+var port = allocatePorts(5);
+var path = "jstests/libs/";
+
+// These keyFiles have their permissions set to 600 later in the test.
+var key1_600 = path + "key1";
+var key2_600 = path + "key2";
+
+// This keyFile has its permissions set to 644 later in the test.
+var key1_644 = path + "key1_644";
+
+print("try starting mongod with auth");
+var m =
+ MongoRunner.runMongod({auth: "", port: port[4], dbpath: MongoRunner.dataDir + "/wrong-auth"});
+
+assert.eq(m.getDB("local").auth("__system", ""), 0);
+
+MongoRunner.stopMongod(m);
+
+print("reset permissions");
+run("chmod", "644", key1_644);
+
+print("try starting mongod");
+m = runMongoProgram(
+ "mongod", "--keyFile", key1_644, "--port", port[0], "--dbpath", MongoRunner.dataPath + name);
+
+print("should fail with wrong permissions");
+assert.eq(
+ m, _isWindows() ? 100 : 1, "mongod should exit w/ 1 (EXIT_FAILURE): permissions too open");
+
+// Pre-populate the data directory for the first replica set node, to be started later, with
+// a user's credentials.
+print("add a user to server0: foo");
+m = MongoRunner.runMongod({dbpath: MongoRunner.dataPath + name + "-0"});
+m.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
+m.getDB("test").createUser({user: "bar", pwd: "baz", roles: jsTest.basicUserRoles});
+print("make sure user is written before shutting down");
+MongoRunner.stopMongod(m);
+
+print("start up rs");
+var rs = new ReplSetTest({"name": name, "nodes": 3});
+
+// The first node is started with the pre-populated data directory.
+print("start 0 with keyFile");
+m = rs.start(0, {"keyFile": key1_600, noCleanData: true});
+print("start 1 with keyFile");
+rs.start(1, {"keyFile": key1_600});
+print("start 2 with keyFile");
+rs.start(2, {"keyFile": key1_600});
+
+var result = m.getDB("admin").auth("foo", "bar");
+assert.eq(result, 1, "login failed");
+print("Initializing replSet with config: " + tojson(rs.getReplSetConfig()));
+result = m.getDB("admin").runCommand({replSetInitiate: rs.getReplSetConfig()});
+assert.eq(result.ok, 1, "couldn't initiate: " + tojson(result));
+m.getDB('admin').logout(); // In case this node doesn't become primary, make sure its not auth'd
+
+var master = rs.getPrimary();
+rs.awaitSecondaryNodes();
+var mId = rs.getNodeId(master);
+var slave = rs._slaves[0];
+assert.eq(1, master.getDB("admin").auth("foo", "bar"));
+assert.writeOK(master.getDB("test").foo.insert(
+ {x: 1}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+
+print("try some legal and illegal reads");
+var r = master.getDB("test").foo.findOne();
+assert.eq(r.x, 1);
+
+slave.setSlaveOk();
+
+function doQueryOn(p) {
+ var error = assert.throws(function() {
+ r = p.getDB("test").foo.findOne();
+ }, [], "find did not throw, returned: " + tojson(r)).toString();
+ printjson(error);
+ assert.gt(error.indexOf("command find requires authentication"), -1, "error was non-auth");
+}
+
+doQueryOn(slave);
+master.adminCommand({logout: 1});
+
+print("unauthorized:");
+printjson(master.adminCommand({replSetGetStatus: 1}));
+
+doQueryOn(master);
+
+result = slave.getDB("test").auth("bar", "baz");
+assert.eq(result, 1);
+
+r = slave.getDB("test").foo.findOne();
+assert.eq(r.x, 1);
+
+print("add some data");
+master.getDB("test").auth("bar", "baz");
+var bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 1000; i++) {
+ bulk.insert({x: i, foo: "bar"});
+}
+assert.writeOK(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}));
+
+print("fail over");
+rs.stop(mId);
+
+master = rs.getPrimary();
+
+print("add some more data 1");
+master.getDB("test").auth("bar", "baz");
+bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 1000; i++) {
+ bulk.insert({x: i, foo: "bar"});
+}
+assert.writeOK(bulk.execute({w: 2}));
+
+print("resync");
+rs.restart(mId, {"keyFile": key1_600});
+master = rs.getPrimary();
+
+print("add some more data 2");
+bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 1000; i++) {
+ bulk.insert({x: i, foo: "bar"});
+}
+bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS});
+
+print("add member with wrong key");
+var conn = MongoRunner.runMongod({
+ dbpath: MongoRunner.dataPath + name + "-3",
+ port: port[3],
+ replSet: "rs_auth1",
+ oplogSize: 2,
+ keyFile: key2_600
+});
+
+master.getDB("admin").auth("foo", "bar");
+var config = master.getDB("local").system.replset.findOne();
+config.members.push({_id: 3, host: rs.host + ":" + port[3]});
+config.version++;
+try {
+ master.adminCommand({replSetReconfig: config});
+} catch (e) {
+ print("error: " + e);
+}
+master = rs.getPrimary();
+master.getDB("admin").auth("foo", "bar");
+
+print("shouldn't ever sync");
+for (var i = 0; i < 10; i++) {
+ print("iteration: " + i);
+ var results = master.adminCommand({replSetGetStatus: 1});
+ printjson(results);
+ assert(results.members[3].state != 2);
+ sleep(1000);
+}
+
+print("stop member");
+MongoRunner.stopMongod(conn);
+
+print("start back up with correct key");
+var conn = MongoRunner.runMongod({
+ dbpath: MongoRunner.dataPath + name + "-3",
+ port: port[3],
+ replSet: "rs_auth1",
+ oplogSize: 2,
+ keyFile: key1_600
+});
+
+wait(function() {
try {
- master.adminCommand({replSetReconfig: config});
- } catch (e) {
- print("error: " + e);
- }
- master = rs.getPrimary();
- master.getDB("admin").auth("foo", "bar");
-
- print("shouldn't ever sync");
- for (var i = 0; i < 10; i++) {
- print("iteration: " + i);
var results = master.adminCommand({replSetGetStatus: 1});
printjson(results);
- assert(results.members[3].state != 2);
- sleep(1000);
+ return results.members[3].state == 2;
+ } catch (e) {
+ print(e);
}
-
- print("stop member");
- MongoRunner.stopMongod(conn);
-
- print("start back up with correct key");
- var conn = MongoRunner.runMongod({
- dbpath: MongoRunner.dataPath + name + "-3",
- port: port[3],
- replSet: "rs_auth1",
- oplogSize: 2,
- keyFile: key1_600
- });
-
- wait(function() {
- try {
- var results = master.adminCommand({replSetGetStatus: 1});
- printjson(results);
- return results.members[3].state == 2;
- } catch (e) {
- print(e);
+ return false;
+});
+
+print("make sure it has the config, too");
+assert.soon(function() {
+ for (var i in rs.nodes) {
+ rs.nodes[i].setSlaveOk();
+ rs.nodes[i].getDB("admin").auth("foo", "bar");
+ config = rs.nodes[i].getDB("local").system.replset.findOne();
+ if (config.version != 2) {
+ return false;
}
- return false;
- });
-
- print("make sure it has the config, too");
- assert.soon(function() {
- for (var i in rs.nodes) {
- rs.nodes[i].setSlaveOk();
- rs.nodes[i].getDB("admin").auth("foo", "bar");
- config = rs.nodes[i].getDB("local").system.replset.findOne();
- if (config.version != 2) {
- return false;
- }
- }
- return true;
- });
- MongoRunner.stopMongod(conn);
- rs.stopSet();
+ }
+ return true;
+});
+MongoRunner.stopMongod(conn);
+rs.stopSet();
})();
diff --git a/jstests/replsets/auth2.js b/jstests/replsets/auth2.js
index 92d1c10e23a..b2eed7b4f1a 100644
--- a/jstests/replsets/auth2.js
+++ b/jstests/replsets/auth2.js
@@ -9,75 +9,75 @@
TestData.skipGossipingClusterTime = true;
(function() {
- var testInvalidAuthStates = function(replSetTest) {
- print("check that 0 is in recovering");
- replSetTest.waitForState(replSetTest.nodes[0], ReplSetTest.State.RECOVERING);
+var testInvalidAuthStates = function(replSetTest) {
+ print("check that 0 is in recovering");
+ replSetTest.waitForState(replSetTest.nodes[0], ReplSetTest.State.RECOVERING);
- print("shut down 1, 0 still in recovering.");
- replSetTest.stop(1);
- sleep(5);
-
- replSetTest.waitForState(replSetTest.nodes[0], ReplSetTest.State.RECOVERING);
-
- print("shut down 2, 0 becomes a secondary.");
- replSetTest.stop(2);
-
- replSetTest.waitForState(replSetTest.nodes[0], ReplSetTest.State.SECONDARY);
-
- replSetTest.restart(1, {"keyFile": key1});
- replSetTest.restart(2, {"keyFile": key1});
- };
-
- var name = "rs_auth2";
- var path = "jstests/libs/";
-
- // These keyFiles have their permissions set to 600 later in the test.
- var key1 = path + "key1";
- var key2 = path + "key2";
+ print("shut down 1, 0 still in recovering.");
+ replSetTest.stop(1);
+ sleep(5);
- var replSetTest = new ReplSetTest({name: name, nodes: 3, waitForKeys: true});
- var nodes = replSetTest.startSet();
- var hostnames = replSetTest.nodeList();
- replSetTest.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": hostnames[0], "priority": 2},
- {"_id": 1, "host": hostnames[1], priority: 0},
- {"_id": 2, "host": hostnames[2], priority: 0}
- ]
- });
+ replSetTest.waitForState(replSetTest.nodes[0], ReplSetTest.State.RECOVERING);
- var master = replSetTest.getPrimary();
+ print("shut down 2, 0 becomes a secondary.");
+ replSetTest.stop(2);
- print("add an admin user");
- master.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: replSetTest.kDefaultTimeoutMS});
- var m = replSetTest.nodes[0];
+ replSetTest.waitForState(replSetTest.nodes[0], ReplSetTest.State.SECONDARY);
- print("starting 1 and 2 with key file");
- replSetTest.stop(1);
replSetTest.restart(1, {"keyFile": key1});
- replSetTest.stop(2);
replSetTest.restart(2, {"keyFile": key1});
-
- // auth to all nodes with auth
- replSetTest.nodes[1].getDB("admin").auth("foo", "bar");
- replSetTest.nodes[2].getDB("admin").auth("foo", "bar");
- testInvalidAuthStates(replSetTest);
-
- print("restart mongod with bad keyFile");
-
- replSetTest.stop(0);
- m = replSetTest.restart(0, {"keyFile": key2});
-
- // auth to all nodes
- replSetTest.nodes[0].getDB("admin").auth("foo", "bar");
- replSetTest.nodes[1].getDB("admin").auth("foo", "bar");
- replSetTest.nodes[2].getDB("admin").auth("foo", "bar");
- testInvalidAuthStates(replSetTest);
-
- replSetTest.stop(0);
- m = replSetTest.restart(0, {"keyFile": key1});
-
- replSetTest.stopSet();
+};
+
+var name = "rs_auth2";
+var path = "jstests/libs/";
+
+// These keyFiles have their permissions set to 600 later in the test.
+var key1 = path + "key1";
+var key2 = path + "key2";
+
+var replSetTest = new ReplSetTest({name: name, nodes: 3, waitForKeys: true});
+var nodes = replSetTest.startSet();
+var hostnames = replSetTest.nodeList();
+replSetTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": hostnames[0], "priority": 2},
+ {"_id": 1, "host": hostnames[1], priority: 0},
+ {"_id": 2, "host": hostnames[2], priority: 0}
+ ]
+});
+
+var master = replSetTest.getPrimary();
+
+print("add an admin user");
+master.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: replSetTest.kDefaultTimeoutMS});
+var m = replSetTest.nodes[0];
+
+print("starting 1 and 2 with key file");
+replSetTest.stop(1);
+replSetTest.restart(1, {"keyFile": key1});
+replSetTest.stop(2);
+replSetTest.restart(2, {"keyFile": key1});
+
+// auth to all nodes with auth
+replSetTest.nodes[1].getDB("admin").auth("foo", "bar");
+replSetTest.nodes[2].getDB("admin").auth("foo", "bar");
+testInvalidAuthStates(replSetTest);
+
+print("restart mongod with bad keyFile");
+
+replSetTest.stop(0);
+m = replSetTest.restart(0, {"keyFile": key2});
+
+// auth to all nodes
+replSetTest.nodes[0].getDB("admin").auth("foo", "bar");
+replSetTest.nodes[1].getDB("admin").auth("foo", "bar");
+replSetTest.nodes[2].getDB("admin").auth("foo", "bar");
+testInvalidAuthStates(replSetTest);
+
+replSetTest.stop(0);
+m = replSetTest.restart(0, {"keyFile": key1});
+
+replSetTest.stopSet();
}());
diff --git a/jstests/replsets/auth_no_pri.js b/jstests/replsets/auth_no_pri.js
index d35d0ec2919..179edf015d6 100644
--- a/jstests/replsets/auth_no_pri.js
+++ b/jstests/replsets/auth_no_pri.js
@@ -1,32 +1,31 @@
// Test that you can still authenticate a replset connection to a RS with no primary (SERVER-6665).
(function() {
- 'use strict';
+'use strict';
- var NODE_COUNT = 3;
- var rs = new ReplSetTest({"nodes": NODE_COUNT, keyFile: "jstests/libs/key1"});
- var nodes = rs.startSet();
- rs.initiate();
+var NODE_COUNT = 3;
+var rs = new ReplSetTest({"nodes": NODE_COUNT, keyFile: "jstests/libs/key1"});
+var nodes = rs.startSet();
+rs.initiate();
- // Add user
- var master = rs.getPrimary();
- master.getDB("admin").createUser({user: "admin", pwd: "pwd", roles: ["root"]}, {w: NODE_COUNT});
+// Add user
+var master = rs.getPrimary();
+master.getDB("admin").createUser({user: "admin", pwd: "pwd", roles: ["root"]}, {w: NODE_COUNT});
- // Can authenticate replset connection when whole set is up.
- var conn = new Mongo(rs.getURL());
- assert(conn.getDB('admin').auth('admin', 'pwd'));
- assert.writeOK(conn.getDB('admin').foo.insert({a: 1}, {writeConcern: {w: NODE_COUNT}}));
+// Can authenticate replset connection when whole set is up.
+var conn = new Mongo(rs.getURL());
+assert(conn.getDB('admin').auth('admin', 'pwd'));
+assert.writeOK(conn.getDB('admin').foo.insert({a: 1}, {writeConcern: {w: NODE_COUNT}}));
- // Make sure there is no primary
- rs.stop(0);
- rs.stop(1);
- rs.waitForState(nodes[2], ReplSetTest.State.SECONDARY);
+// Make sure there is no primary
+rs.stop(0);
+rs.stop(1);
+rs.waitForState(nodes[2], ReplSetTest.State.SECONDARY);
- // Make sure you can still authenticate a replset connection with no primary
- var conn2 = new Mongo(rs.getURL());
- conn2.setSlaveOk(true);
- assert(conn2.getDB('admin').auth({user: 'admin', pwd: 'pwd', mechanism: "SCRAM-SHA-1"}));
- assert.eq(1, conn2.getDB('admin').foo.findOne().a);
-
- rs.stopSet();
+// Make sure you can still authenticate a replset connection with no primary
+var conn2 = new Mongo(rs.getURL());
+conn2.setSlaveOk(true);
+assert(conn2.getDB('admin').auth({user: 'admin', pwd: 'pwd', mechanism: "SCRAM-SHA-1"}));
+assert.eq(1, conn2.getDB('admin').foo.findOne().a);
+rs.stopSet();
}());
diff --git a/jstests/replsets/await_replication_timeout.js b/jstests/replsets/await_replication_timeout.js
index 2fb4e6e9471..ce89a30c296 100644
--- a/jstests/replsets/await_replication_timeout.js
+++ b/jstests/replsets/await_replication_timeout.js
@@ -1,79 +1,77 @@
// Tests timeout behavior of waiting for write concern as well as its interaction with maxTimeMs
(function() {
- "use strict";
+"use strict";
- var replTest = new ReplSetTest({nodes: 3});
- replTest.startSet();
- replTest.initiate();
- var primary = replTest.getPrimary();
- var testDB = primary.getDB('test');
- const collName = 'foo';
- var testColl = testDB.getCollection(collName);
+var replTest = new ReplSetTest({nodes: 3});
+replTest.startSet();
+replTest.initiate();
+var primary = replTest.getPrimary();
+var testDB = primary.getDB('test');
+const collName = 'foo';
+var testColl = testDB.getCollection(collName);
- // Insert a document and implicitly create the collection.
- let resetCollection = function(w) {
- assert.writeOK(testColl.insert(
- {_id: 0}, {writeConcern: {w: w, wtimeout: replTest.kDefaultTimeoutMS}}));
- assert.eq(1, testColl.find().itcount());
- };
+// Insert a document and implicitly create the collection.
+let resetCollection = function(w) {
+ assert.writeOK(
+ testColl.insert({_id: 0}, {writeConcern: {w: w, wtimeout: replTest.kDefaultTimeoutMS}}));
+ assert.eq(1, testColl.find().itcount());
+};
- resetCollection(3);
+resetCollection(3);
- // Make sure that there are only 2 nodes up so w:3 writes will always time out
- replTest.stop(2);
+// Make sure that there are only 2 nodes up so w:3 writes will always time out
+replTest.stop(2);
- // Test wtimeout
- var res = testDB.runCommand(
- {insert: collName, documents: [{a: 1}], writeConcern: {w: 3, wtimeout: 1000}});
- assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
- assert.eq(ErrorCodes.WriteConcernFailed, res.writeConcernError.code);
+// Test wtimeout
+var res = testDB.runCommand(
+ {insert: collName, documents: [{a: 1}], writeConcern: {w: 3, wtimeout: 1000}});
+assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
+assert.eq(ErrorCodes.WriteConcernFailed, res.writeConcernError.code);
- // Test maxTimeMS timeout
- res = testDB.runCommand(
- {insert: collName, documents: [{a: 1}], writeConcern: {w: 3}, maxTimeMS: 1000});
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+// Test maxTimeMS timeout
+res = testDB.runCommand(
+ {insert: collName, documents: [{a: 1}], writeConcern: {w: 3}, maxTimeMS: 1000});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
- // Test with wtimeout < maxTimeMS
- res = testDB.runCommand({
- insert: collName,
- documents: [{a: 1}],
- writeConcern: {w: 3, wtimeout: 1000},
- maxTimeMS: 10 * 1000
- });
- assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
- assert.eq(ErrorCodes.WriteConcernFailed, res.writeConcernError.code);
+// Test with wtimeout < maxTimeMS
+res = testDB.runCommand({
+ insert: collName,
+ documents: [{a: 1}],
+ writeConcern: {w: 3, wtimeout: 1000},
+ maxTimeMS: 10 * 1000
+});
+assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
+assert.eq(ErrorCodes.WriteConcernFailed, res.writeConcernError.code);
- // Test with wtimeout > maxTimeMS
- res = testDB.runCommand({
- insert: collName,
- documents: [{a: 1}],
- writeConcern: {w: 3, wtimeout: 10 * 1000},
- maxTimeMS: 1000
- });
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+// Test with wtimeout > maxTimeMS
+res = testDB.runCommand({
+ insert: collName,
+ documents: [{a: 1}],
+ writeConcern: {w: 3, wtimeout: 10 * 1000},
+ maxTimeMS: 1000
+});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
- // dropDatabase respects the 'w' field when it is stronger than the default of majority.
- res = testDB.runCommand({dropDatabase: 1, writeConcern: {w: 3, wtimeout: 1000}});
- assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
- assert.eq(ErrorCodes.WriteConcernFailed, res.writeConcernError.code);
+// dropDatabase respects the 'w' field when it is stronger than the default of majority.
+res = testDB.runCommand({dropDatabase: 1, writeConcern: {w: 3, wtimeout: 1000}});
+assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
+assert.eq(ErrorCodes.WriteConcernFailed, res.writeConcernError.code);
- resetCollection(2);
+resetCollection(2);
- // Pause application on secondary so that commit point doesn't advance, meaning that a dropped
- // database on the primary will remain in 'drop-pending' state.
- var secondary = replTest.getSecondary();
- jsTestLog("Pausing oplog application on the secondary node.");
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+// Pause application on secondary so that commit point doesn't advance, meaning that a dropped
+// database on the primary will remain in 'drop-pending' state.
+var secondary = replTest.getSecondary();
+jsTestLog("Pausing oplog application on the secondary node.");
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
- // dropDatabase defaults to 'majority' when a weaker 'w' field is provided, but respects
- // 'wtimeout'.
- res = testDB.runCommand({dropDatabase: 1, writeConcern: {w: 1, wtimeout: 1000}});
- assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
-
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
- replTest.stopSet();
+// dropDatabase defaults to 'majority' when a weaker 'w' field is provided, but respects
+// 'wtimeout'.
+res = testDB.runCommand({dropDatabase: 1, writeConcern: {w: 1, wtimeout: 1000}});
+assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
+assert.commandWorked(secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+replTest.stopSet();
})();
diff --git a/jstests/replsets/awaitdata_getmore_new_last_committed_optime.js b/jstests/replsets/awaitdata_getmore_new_last_committed_optime.js
index 29680677670..0f76df2dfe5 100644
--- a/jstests/replsets/awaitdata_getmore_new_last_committed_optime.js
+++ b/jstests/replsets/awaitdata_getmore_new_last_committed_optime.js
@@ -3,108 +3,108 @@
// while an awaitData query is running. See SERVER-35239.
(function() {
- 'use strict';
- load('jstests/replsets/rslib.js');
- load('jstests/libs/check_log.js');
+'use strict';
+load('jstests/replsets/rslib.js');
+load('jstests/libs/check_log.js');
+
+const name = 'awaitdata_getmore_new_last_committed_optime';
+const replSet = new ReplSetTest({name: name, nodes: 5, settings: {chainingAllowed: false}});
+
+replSet.startSet();
+replSet.initiate();
+
+const dbName = 'test';
+const collName = 'coll';
+
+const primary = replSet.getPrimary();
+const secondaries = replSet.getSecondaries();
+const secondary = secondaries[0];
+
+const primaryDB = primary.getDB(dbName);
- const name = 'awaitdata_getmore_new_last_committed_optime';
- const replSet = new ReplSetTest({name: name, nodes: 5, settings: {chainingAllowed: false}});
+// Create capped collection on primary and allow it to be committed.
+assert.commandWorked(primaryDB.createCollection(collName, {capped: true, size: 2048}));
+replSet.awaitReplication();
+replSet.awaitLastOpCommitted();
- replSet.startSet();
- replSet.initiate();
+// Stop data replication on 3 secondaries to prevent writes being committed.
+jsTestLog('Stopping replication');
+stopServerReplication(secondaries[1]);
+stopServerReplication(secondaries[2]);
+stopServerReplication(secondaries[3]);
+
+// Write data to primary.
+for (let i = 0; i < 2; i++) {
+ assert.commandWorked(primaryDB[collName].insert({_id: i}, {writeConcern: {w: 2}}));
+}
+
+replSet.awaitReplication(null, null, [secondary]);
+jsTestLog('Secondary has replicated data');
+
+jsTestLog('Starting parallel shell');
+// Start a parallel shell because we'll be enabling a failpoint that will make the thread hang.
+let waitForGetMoreToFinish = startParallelShell(() => {
+ load('jstests/replsets/rslib.js');
+
+ const secondary = db.getMongo();
+ secondary.setSlaveOk();
const dbName = 'test';
const collName = 'coll';
-
- const primary = replSet.getPrimary();
- const secondaries = replSet.getSecondaries();
- const secondary = secondaries[0];
-
- const primaryDB = primary.getDB(dbName);
-
- // Create capped collection on primary and allow it to be committed.
- assert.commandWorked(primaryDB.createCollection(collName, {capped: true, size: 2048}));
- replSet.awaitReplication();
- replSet.awaitLastOpCommitted();
-
- // Stop data replication on 3 secondaries to prevent writes being committed.
- jsTestLog('Stopping replication');
- stopServerReplication(secondaries[1]);
- stopServerReplication(secondaries[2]);
- stopServerReplication(secondaries[3]);
-
- // Write data to primary.
- for (let i = 0; i < 2; i++) {
- assert.commandWorked(primaryDB[collName].insert({_id: i}, {writeConcern: {w: 2}}));
- }
-
- replSet.awaitReplication(null, null, [secondary]);
- jsTestLog('Secondary has replicated data');
-
- jsTestLog('Starting parallel shell');
- // Start a parallel shell because we'll be enabling a failpoint that will make the thread hang.
- let waitForGetMoreToFinish = startParallelShell(() => {
- load('jstests/replsets/rslib.js');
-
- const secondary = db.getMongo();
- secondary.setSlaveOk();
-
- const dbName = 'test';
- const collName = 'coll';
- const awaitDataDB = db.getSiblingDB('test');
-
- // Create awaitData cursor and get all data written so that a following getMore will have to
- // wait for more data.
- let cmdRes =
- awaitDataDB.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, dbName + "." + collName);
- assert.eq(cmdRes.cursor.firstBatch.length, 2, tojson(cmdRes));
-
- // Enable failpoint.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: 'planExecutorHangBeforeShouldWaitForInserts', mode: 'alwaysOn'}));
-
- // Call getMore on awaitData cursor with lastKnownCommittedOpTime ahead of node. This will
- // hang until we've disabled the failpoint. maxTimeMS must be set otherwise the default
- // timeout for waiting for inserts is 1 second.
- const lastOpTime = getLastOpTime(secondary);
- cmdRes = awaitDataDB.runCommand({
- getMore: cmdRes.cursor.id,
- collection: collName,
- batchSize: NumberInt(2),
- maxTimeMS: 10000,
- lastKnownCommittedOpTime: lastOpTime
- });
-
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, dbName + "." + collName);
- assert.eq(cmdRes.cursor.nextBatch.length, 0, tojson(cmdRes));
- }, secondary.port);
-
- // Ensure that we've hit the failpoint before moving on.
- checkLog.contains(
- secondary, 'PlanExecutor - planExecutorHangBeforeShouldWaitForInserts fail point enabled');
-
- // Restart replication on the other nodes.
- jsTestLog('Restarting replication');
- restartServerReplication(secondaries[1]);
- restartServerReplication(secondaries[2]);
- restartServerReplication(secondaries[3]);
-
- // Wait until all nodes have committed the last op. At this point in executing the getMore,
- // the node's lastCommittedOpTime should now be ahead of the client's lastKnownCommittedOpTime.
- replSet.awaitLastOpCommitted();
- jsTestLog('All nodes caught up');
-
- // Disable failpoint.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'planExecutorHangBeforeShouldWaitForInserts', mode: 'off'}));
-
- waitForGetMoreToFinish();
- jsTestLog('Parallel shell successfully exited');
-
- replSet.stopSet();
+ const awaitDataDB = db.getSiblingDB('test');
+
+ // Create awaitData cursor and get all data written so that a following getMore will have to
+ // wait for more data.
+ let cmdRes =
+ awaitDataDB.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true});
+ assert.commandWorked(cmdRes);
+ assert.gt(cmdRes.cursor.id, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, dbName + "." + collName);
+ assert.eq(cmdRes.cursor.firstBatch.length, 2, tojson(cmdRes));
+
+ // Enable failpoint.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: 'planExecutorHangBeforeShouldWaitForInserts', mode: 'alwaysOn'}));
+
+ // Call getMore on awaitData cursor with lastKnownCommittedOpTime ahead of node. This will
+ // hang until we've disabled the failpoint. maxTimeMS must be set otherwise the default
+ // timeout for waiting for inserts is 1 second.
+ const lastOpTime = getLastOpTime(secondary);
+ cmdRes = awaitDataDB.runCommand({
+ getMore: cmdRes.cursor.id,
+ collection: collName,
+ batchSize: NumberInt(2),
+ maxTimeMS: 10000,
+ lastKnownCommittedOpTime: lastOpTime
+ });
+
+ assert.commandWorked(cmdRes);
+ assert.gt(cmdRes.cursor.id, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, dbName + "." + collName);
+ assert.eq(cmdRes.cursor.nextBatch.length, 0, tojson(cmdRes));
+}, secondary.port);
+
+// Ensure that we've hit the failpoint before moving on.
+checkLog.contains(secondary,
+ 'PlanExecutor - planExecutorHangBeforeShouldWaitForInserts fail point enabled');
+
+// Restart replication on the other nodes.
+jsTestLog('Restarting replication');
+restartServerReplication(secondaries[1]);
+restartServerReplication(secondaries[2]);
+restartServerReplication(secondaries[3]);
+
+// Wait until all nodes have committed the last op. At this point in executing the getMore,
+// the node's lastCommittedOpTime should now be ahead of the client's lastKnownCommittedOpTime.
+replSet.awaitLastOpCommitted();
+jsTestLog('All nodes caught up');
+
+// Disable failpoint.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: 'planExecutorHangBeforeShouldWaitForInserts', mode: 'off'}));
+
+waitForGetMoreToFinish();
+jsTestLog('Parallel shell successfully exited');
+
+replSet.stopSet();
})();
diff --git a/jstests/replsets/background_index.js b/jstests/replsets/background_index.js
index e92530bc985..3b302644438 100644
--- a/jstests/replsets/background_index.js
+++ b/jstests/replsets/background_index.js
@@ -1,51 +1,52 @@
-/** Tests that a background index will be successfully
+/**
+ * Tests that a background index will be successfully
* replicated to a secondary when the indexed collection
* is renamed.
*/
(function() {
- "use strict";
-
- // Bring up a 2 node replset.
- var name = "bg_index_rename";
- var rst = new ReplSetTest({name: name, nodes: 3});
- rst.startSet();
- rst.initiate();
-
- // Create and populate a collection.
- var primary = rst.getPrimary();
- var coll = primary.getCollection("test.foo");
- var adminDB = primary.getDB("admin");
-
- for (var i = 0; i < 100; i++) {
- assert.writeOK(coll.insert({_id: i, x: i * 3, str: "hello world"}));
- }
-
- // Add a background index.
- coll.ensureIndex({x: 1}, {background: true});
-
- // Rename the collection.
- assert.commandWorked(
- adminDB.runCommand({renameCollection: "test.foo", to: "bar.test", dropTarget: true}),
- "Call to renameCollection failed.");
-
- // Await replication.
- rst.awaitReplication();
-
- // Step down the primary.
- assert.commandWorked(adminDB.runCommand({replSetStepDown: 60, force: true}));
-
- // Wait for new primary.
- var newPrimary = rst.getPrimary();
- assert.neq(primary, newPrimary);
- var barDB = newPrimary.getDB("bar");
- coll = newPrimary.getCollection("bar.test");
- coll.insert({_id: 200, x: 600, str: "goodnight moon"});
-
- // Check that the new primary has the index
- // on the renamed collection.
- var indexes = barDB.runCommand({listIndexes: "test"});
- assert.eq(indexes.cursor.firstBatch.length, 2);
-
- rst.stopSet();
+"use strict";
+
+// Bring up a 2 node replset.
+var name = "bg_index_rename";
+var rst = new ReplSetTest({name: name, nodes: 3});
+rst.startSet();
+rst.initiate();
+
+// Create and populate a collection.
+var primary = rst.getPrimary();
+var coll = primary.getCollection("test.foo");
+var adminDB = primary.getDB("admin");
+
+for (var i = 0; i < 100; i++) {
+ assert.writeOK(coll.insert({_id: i, x: i * 3, str: "hello world"}));
+}
+
+// Add a background index.
+coll.ensureIndex({x: 1}, {background: true});
+
+// Rename the collection.
+assert.commandWorked(
+ adminDB.runCommand({renameCollection: "test.foo", to: "bar.test", dropTarget: true}),
+ "Call to renameCollection failed.");
+
+// Await replication.
+rst.awaitReplication();
+
+// Step down the primary.
+assert.commandWorked(adminDB.runCommand({replSetStepDown: 60, force: true}));
+
+// Wait for new primary.
+var newPrimary = rst.getPrimary();
+assert.neq(primary, newPrimary);
+var barDB = newPrimary.getDB("bar");
+coll = newPrimary.getCollection("bar.test");
+coll.insert({_id: 200, x: 600, str: "goodnight moon"});
+
+// Check that the new primary has the index
+// on the renamed collection.
+var indexes = barDB.runCommand({listIndexes: "test"});
+assert.eq(indexes.cursor.firstBatch.length, 2);
+
+rst.stopSet();
}());
diff --git a/jstests/replsets/batch_write_command_wc.js b/jstests/replsets/batch_write_command_wc.js
index 195e649fab8..9d41e7cba13 100644
--- a/jstests/replsets/batch_write_command_wc.js
+++ b/jstests/replsets/batch_write_command_wc.js
@@ -6,151 +6,174 @@
(function() {
- // Skip this test if running with the "wiredTiger" storage engine, since it requires
- // using 'nojournal' in a replica set, which is not supported when using WT.
- if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
- // WT is currently the default engine so it is used when 'storageEngine' is not set.
- jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
- return;
- }
-
- var request;
- var result;
-
- // NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
-
- jsTest.log("Starting no journal/repl set tests...");
-
- // Start a single-node replica set with no journal
- // Allows testing immediate write concern failures and wc application failures
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet({nojournal: ""});
- rst.initiate();
- var mongod = rst.getPrimary();
- var coll = mongod.getCollection("test.batch_write_command_wc");
-
- //
- // Basic insert, default WC
- coll.remove({});
- printjson(request = {insert: coll.getName(), documents: [{a: 1}]});
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert.eq(1, coll.find().itcount());
-
- //
- // Basic insert, majority WC
- coll.remove({});
- printjson(
- request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'majority'}});
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert.eq(1, coll.find().itcount());
-
- //
- // Basic insert, w:2 WC
- coll.remove({});
- printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 2}});
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert.eq(1, coll.find().itcount());
-
- //
- // Basic insert, immediate nojournal error
- coll.remove({});
- printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {j: true}});
- printjson(result = coll.runCommand(request));
- assert(!result.ok);
- assert.eq(0, coll.find().itcount());
-
- //
- // Basic insert, timeout wc error
- coll.remove({});
- printjson(
- request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 3, wtimeout: 1}});
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert(result.writeConcernError);
- assert.eq(100, result.writeConcernError.code);
- assert.eq(1, coll.find().itcount());
-
- //
- // Basic insert, wmode wc error
- coll.remove({});
- printjson(
- request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'invalid'}});
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert(result.writeConcernError);
- assert.eq(1, coll.find().itcount());
-
- //
- // Two ordered inserts, write error and wc error both reported
- coll.remove({});
- printjson(request = {
- insert: coll.getName(),
- documents: [{a: 1}, {$invalid: 'doc'}],
- writeConcern: {w: 'invalid'}
- });
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert.eq(result.writeErrors.length, 1);
- assert.eq(result.writeErrors[0].index, 1);
- assert(result.writeConcernError);
- assert.eq(1, coll.find().itcount());
-
- //
- // Two unordered inserts, write error and wc error reported
- coll.remove({});
- printjson(request = {
- insert: coll.getName(),
- documents: [{a: 1}, {$invalid: 'doc'}],
- writeConcern: {w: 'invalid'},
- ordered: false
- });
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert.eq(result.writeErrors.length, 1);
- assert.eq(result.writeErrors[0].index, 1);
- assert(result.writeConcernError);
- assert.eq(1, coll.find().itcount());
-
- //
- // Write error with empty writeConcern object.
- coll.remove({});
- request =
- {insert: coll.getName(), documents: [{_id: 1}, {_id: 1}], writeConcern: {}, ordered: false};
- result = coll.runCommand(request);
- assert(result.ok);
- assert.eq(1, result.n);
- assert.eq(result.writeErrors.length, 1);
- assert.eq(result.writeErrors[0].index, 1);
- assert.eq(null, result.writeConcernError);
- assert.eq(1, coll.find().itcount());
-
- //
- // Write error with unspecified w.
- coll.remove({});
- request = {
- insert: coll.getName(),
- documents: [{_id: 1}, {_id: 1}],
- writeConcern: {wtimeout: 1},
- ordered: false
- };
- result = assert.commandWorkedIgnoringWriteErrors(coll.runCommand(request));
- assert.eq(1, result.n);
- assert.eq(result.writeErrors.length, 1);
- assert.eq(result.writeErrors[0].index, 1);
- assert.eq(null, result.writeConcernError);
- assert.eq(1, coll.find().itcount());
-
- jsTest.log("DONE no journal/repl tests");
- rst.stopSet();
+// Skip this test if running with the "wiredTiger" storage engine, since it requires
+// using 'nojournal' in a replica set, which is not supported when using WT.
+if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
+ // WT is currently the default engine so it is used when 'storageEngine' is not set.
+ jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
+ return;
+}
+var request;
+var result;
+
+// NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
+
+jsTest.log("Starting no journal/repl set tests...");
+
+// Start a single-node replica set with no journal
+// Allows testing immediate write concern failures and wc application failures
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet({nojournal: ""});
+rst.initiate();
+var mongod = rst.getPrimary();
+var coll = mongod.getCollection("test.batch_write_command_wc");
+
+//
+// Basic insert, default WC
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}]
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert.eq(1, coll.find().itcount());
+
+//
+// Basic insert, majority WC
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 'majority'}
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert.eq(1, coll.find().itcount());
+
+//
+// Basic insert, w:2 WC
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 2}
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert.eq(1, coll.find().itcount());
+
+//
+// Basic insert, immediate nojournal error
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {j: true}
+});
+printjson(result = coll.runCommand(request));
+assert(!result.ok);
+assert.eq(0, coll.find().itcount());
+
+//
+// Basic insert, timeout wc error
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 3, wtimeout: 1}
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert(result.writeConcernError);
+assert.eq(100, result.writeConcernError.code);
+assert.eq(1, coll.find().itcount());
+
+//
+// Basic insert, wmode wc error
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 'invalid'}
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert(result.writeConcernError);
+assert.eq(1, coll.find().itcount());
+
+//
+// Two ordered inserts, write error and wc error both reported
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}, {$invalid: 'doc'}],
+ writeConcern: {w: 'invalid'}
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert.eq(result.writeErrors.length, 1);
+assert.eq(result.writeErrors[0].index, 1);
+assert(result.writeConcernError);
+assert.eq(1, coll.find().itcount());
+
+//
+// Two unordered inserts, write error and wc error reported
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}, {$invalid: 'doc'}],
+ writeConcern: {w: 'invalid'},
+ ordered: false
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert.eq(result.writeErrors.length, 1);
+assert.eq(result.writeErrors[0].index, 1);
+assert(result.writeConcernError);
+assert.eq(1, coll.find().itcount());
+
+//
+// Write error with empty writeConcern object.
+coll.remove({});
+request = {
+ insert: coll.getName(),
+ documents: [{_id: 1}, {_id: 1}],
+ writeConcern: {},
+ ordered: false
+};
+result = coll.runCommand(request);
+assert(result.ok);
+assert.eq(1, result.n);
+assert.eq(result.writeErrors.length, 1);
+assert.eq(result.writeErrors[0].index, 1);
+assert.eq(null, result.writeConcernError);
+assert.eq(1, coll.find().itcount());
+
+//
+// Write error with unspecified w.
+coll.remove({});
+request = {
+ insert: coll.getName(),
+ documents: [{_id: 1}, {_id: 1}],
+ writeConcern: {wtimeout: 1},
+ ordered: false
+};
+result = assert.commandWorkedIgnoringWriteErrors(coll.runCommand(request));
+assert.eq(1, result.n);
+assert.eq(result.writeErrors.length, 1);
+assert.eq(result.writeErrors[0].index, 1);
+assert.eq(null, result.writeConcernError);
+assert.eq(1, coll.find().itcount());
+
+jsTest.log("DONE no journal/repl tests");
+rst.stopSet();
})();
diff --git a/jstests/replsets/buildindexes.js b/jstests/replsets/buildindexes.js
index 303ed1f352b..1276ad73a61 100644
--- a/jstests/replsets/buildindexes.js
+++ b/jstests/replsets/buildindexes.js
@@ -1,68 +1,68 @@
// Check that buildIndexes config option is working
(function() {
- // Skip db hash check because secondary will have different number of indexes due to
- // buildIndexes=false on the secondary.
- TestData.skipCheckDBHashes = true;
- var name = "buildIndexes";
- var host = getHostName();
+// Skip db hash check because secondary will have different number of indexes due to
+// buildIndexes=false on the secondary.
+TestData.skipCheckDBHashes = true;
+var name = "buildIndexes";
+var host = getHostName();
- var replTest = new ReplSetTest({name: name, nodes: 3});
+var replTest = new ReplSetTest({name: name, nodes: 3});
- var nodes = replTest.startSet();
+var nodes = replTest.startSet();
- var config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- config.members[2].buildIndexes = false;
+var config = replTest.getReplSetConfig();
+config.members[2].priority = 0;
+config.members[2].buildIndexes = false;
- replTest.initiate(config);
+replTest.initiate(config);
- var master = replTest.getPrimary().getDB(name);
- var slaveConns = replTest._slaves;
- var slave = [];
- for (var i in slaveConns) {
- slaveConns[i].setSlaveOk();
- slave.push(slaveConns[i].getDB(name));
- }
- replTest.awaitReplication();
+var master = replTest.getPrimary().getDB(name);
+var slaveConns = replTest._slaves;
+var slave = [];
+for (var i in slaveConns) {
+ slaveConns[i].setSlaveOk();
+ slave.push(slaveConns[i].getDB(name));
+}
+replTest.awaitReplication();
- master.x.ensureIndex({y: 1});
+master.x.ensureIndex({y: 1});
- for (i = 0; i < 100; i++) {
- master.x.insert({x: 1, y: "abc", c: 1});
- }
+for (i = 0; i < 100; i++) {
+ master.x.insert({x: 1, y: "abc", c: 1});
+}
- replTest.awaitReplication();
+replTest.awaitReplication();
- assert.commandWorked(slave[0].runCommand({count: "x"}));
+assert.commandWorked(slave[0].runCommand({count: "x"}));
- var indexes = slave[0].stats().indexes;
- assert.eq(indexes, 2, 'number of indexes');
+var indexes = slave[0].stats().indexes;
+assert.eq(indexes, 2, 'number of indexes');
- indexes = slave[1].stats().indexes;
- assert.eq(indexes, 1);
+indexes = slave[1].stats().indexes;
+assert.eq(indexes, 1);
- indexes = slave[0].x.stats().indexSizes;
+indexes = slave[0].x.stats().indexSizes;
- var count = 0;
- for (i in indexes) {
- count++;
- if (i == "_id_") {
- continue;
- }
- assert(i.match(/y_/));
+var count = 0;
+for (i in indexes) {
+ count++;
+ if (i == "_id_") {
+ continue;
}
+ assert(i.match(/y_/));
+}
- assert.eq(count, 2);
+assert.eq(count, 2);
- indexes = slave[1].x.stats().indexSizes;
+indexes = slave[1].x.stats().indexSizes;
- count = 0;
- for (i in indexes) {
- count++;
- }
+count = 0;
+for (i in indexes) {
+ count++;
+}
- assert.eq(count, 1);
+assert.eq(count, 1);
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/buildindexes_false_with_system_indexes.js b/jstests/replsets/buildindexes_false_with_system_indexes.js
index 6275e21d0ee..2c394d3e264 100644
--- a/jstests/replsets/buildindexes_false_with_system_indexes.js
+++ b/jstests/replsets/buildindexes_false_with_system_indexes.js
@@ -5,85 +5,84 @@
* @tags: [requires_persistence]
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- const testName = "buildindexes_false_with_system_indexes";
+const testName = "buildindexes_false_with_system_indexes";
- let rst = new ReplSetTest({
- name: testName,
- nodes: [
- {},
- {rsConfig: {priority: 0}},
- {rsConfig: {priority: 0, hidden: true, buildIndexes: false}},
- ],
- });
- const nodes = rst.startSet();
- rst.initiate();
+let rst = new ReplSetTest({
+ name: testName,
+ nodes: [
+ {},
+ {rsConfig: {priority: 0}},
+ {rsConfig: {priority: 0, hidden: true, buildIndexes: false}},
+ ],
+});
+const nodes = rst.startSet();
+rst.initiate();
- let primary = rst.getPrimary();
- assert.eq(primary, nodes[0]);
- let secondary = nodes[1];
- const hidden = nodes[2];
+let primary = rst.getPrimary();
+assert.eq(primary, nodes[0]);
+let secondary = nodes[1];
+const hidden = nodes[2];
- rst.awaitReplication();
- jsTestLog("Creating a role in the admin database");
- let adminDb = primary.getDB("admin");
- adminDb.createRole(
- {role: 'test_role', roles: [{role: 'readWrite', db: 'test'}], privileges: []});
- rst.awaitReplication();
+rst.awaitReplication();
+jsTestLog("Creating a role in the admin database");
+let adminDb = primary.getDB("admin");
+adminDb.createRole({role: 'test_role', roles: [{role: 'readWrite', db: 'test'}], privileges: []});
+rst.awaitReplication();
- jsTestLog("Creating a user in the admin database");
- adminDb.createUser({user: 'test_user', pwd: 'test', roles: [{role: 'test_role', db: 'admin'}]});
- rst.awaitReplication();
+jsTestLog("Creating a user in the admin database");
+adminDb.createUser({user: 'test_user', pwd: 'test', roles: [{role: 'test_role', db: 'admin'}]});
+rst.awaitReplication();
- // Make sure the indexes we expect are present on all nodes. The buildIndexes: false node
- // should have only the _id_ index.
- let secondaryAdminDb = secondary.getDB("admin");
- const hiddenAdminDb = hidden.getDB("admin");
+// Make sure the indexes we expect are present on all nodes. The buildIndexes: false node
+// should have only the _id_ index.
+let secondaryAdminDb = secondary.getDB("admin");
+const hiddenAdminDb = hidden.getDB("admin");
- assert.eq(["_id_", "user_1_db_1"], adminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_", "role_1_db_1"], adminDb.system.roles.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_", "user_1_db_1"],
- secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_", "role_1_db_1"],
- secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], hiddenAdminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], hiddenAdminDb.system.roles.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_", "user_1_db_1"], adminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_", "role_1_db_1"], adminDb.system.roles.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_", "user_1_db_1"],
+ secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_", "role_1_db_1"],
+ secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], hiddenAdminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], hiddenAdminDb.system.roles.getIndexes().map(x => x.name).sort());
- // Drop the indexes and restart the secondary. The indexes should not be re-created.
- jsTestLog("Dropping system indexes and restarting secondary.");
- adminDb.system.users.dropIndex("user_1_db_1");
- adminDb.system.roles.dropIndex("role_1_db_1");
- rst.awaitReplication();
- assert.eq(["_id_"], adminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], adminDb.system.roles.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], hiddenAdminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], hiddenAdminDb.system.roles.getIndexes().map(x => x.name).sort());
+// Drop the indexes and restart the secondary. The indexes should not be re-created.
+jsTestLog("Dropping system indexes and restarting secondary.");
+adminDb.system.users.dropIndex("user_1_db_1");
+adminDb.system.roles.dropIndex("role_1_db_1");
+rst.awaitReplication();
+assert.eq(["_id_"], adminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], adminDb.system.roles.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], hiddenAdminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], hiddenAdminDb.system.roles.getIndexes().map(x => x.name).sort());
- secondary = rst.restart(secondary, {}, true /* wait for node to become healthy */);
- secondaryAdminDb = secondary.getDB("admin");
- assert.eq(["_id_"], secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
+secondary = rst.restart(secondary, {}, true /* wait for node to become healthy */);
+secondaryAdminDb = secondary.getDB("admin");
+assert.eq(["_id_"], secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
- jsTestLog("Now restarting primary; indexes should be created.");
- rst.restart(primary);
- primary = rst.getPrimary();
- rst.awaitReplication();
- rst.waitForAllIndexBuildsToFinish("admin", "system.users");
- rst.waitForAllIndexBuildsToFinish("admin", "system.roles");
- adminDb = primary.getDB("admin");
- assert.eq(["_id_", "user_1_db_1"], adminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_", "role_1_db_1"], adminDb.system.roles.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_", "user_1_db_1"],
- secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_", "role_1_db_1"],
- secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], hiddenAdminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], hiddenAdminDb.system.roles.getIndexes().map(x => x.name).sort());
+jsTestLog("Now restarting primary; indexes should be created.");
+rst.restart(primary);
+primary = rst.getPrimary();
+rst.awaitReplication();
+rst.waitForAllIndexBuildsToFinish("admin", "system.users");
+rst.waitForAllIndexBuildsToFinish("admin", "system.roles");
+adminDb = primary.getDB("admin");
+assert.eq(["_id_", "user_1_db_1"], adminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_", "role_1_db_1"], adminDb.system.roles.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_", "user_1_db_1"],
+ secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_", "role_1_db_1"],
+ secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], hiddenAdminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], hiddenAdminDb.system.roles.getIndexes().map(x => x.name).sort());
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/bulk_api_wc.js b/jstests/replsets/bulk_api_wc.js
index fd5413ddd29..591ad1aef58 100644
--- a/jstests/replsets/bulk_api_wc.js
+++ b/jstests/replsets/bulk_api_wc.js
@@ -6,146 +6,143 @@
(function() {
- jsTest.log("Starting bulk api write concern tests...");
-
- // Skip this test if running with the "wiredTiger" storage engine, since it requires
- // using 'nojournal' in a replica set, which is not supported when using WT.
- if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
- // WT is currently the default engine so it is used when 'storageEngine' is not set.
- jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
- return;
- }
-
- // Start a 2-node replica set with no journal
- // Allows testing immediate write concern failures and wc application failures
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet({nojournal: ""});
- rst.initiate();
- var mongod = rst.getPrimary();
- var coll = mongod.getCollection("test.bulk_api_wc");
-
- var executeTests = function() {
-
- // Create a unique index, legacy writes validate too early to use invalid documents for
- // write
- // error testing
- coll.ensureIndex({a: 1}, {unique: true});
-
- //
- // Ordered
- //
-
- //
- // Fail due to nojournal
- coll.remove({});
- var bulk = coll.initializeOrderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- assert.throws(function() {
- bulk.execute({j: true});
- });
-
- //
- // Fail due to unrecognized write concern field.
- coll.remove({});
- var bulk = coll.initializeOrderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- var result = assert.throws(function() {
- bulk.execute({x: 1});
- });
- assert.eq(
- ErrorCodes.FailedToParse, result.code, 'unexpected error code: ' + tojson(result));
- assert.eq('unrecognized write concern field: x',
- result.errmsg,
- 'unexpected error message: ' + tojson(result));
-
- //
- // Fail with write error, no write concern error even though it would fail on apply for
- // ordered
- coll.remove({});
- var bulk = coll.initializeOrderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- bulk.insert({a: 2});
- result = assert.throws(function() {
- bulk.execute({w: 'invalid'});
- });
- assert.eq(result.nInserted, 2);
- assert.eq(result.getWriteErrors()[0].index, 2);
- assert(!result.getWriteConcernError());
- assert.eq(coll.find().itcount(), 2);
-
- //
- // Unordered
- //
-
- //
- // Fail with write error, write concern error reported when unordered
- coll.remove({});
- var bulk = coll.initializeUnorderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- bulk.insert({a: 2});
- var result = assert.throws(function() {
- bulk.execute({w: 'invalid'});
- });
- assert.eq(result.nInserted, 2);
- assert.eq(result.getWriteErrors()[0].index, 2);
- assert(result.getWriteConcernError());
- assert.eq(coll.find().itcount(), 2);
-
- //
- // Fail with write error, write concern timeout reported when unordered
- // Note that wtimeout:true can only be reported when the batch is all the same, so there's
- // not
- // multiple wc errors
- coll.remove({});
- var bulk = coll.initializeUnorderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- bulk.insert({a: 2});
- var result = assert.throws(function() {
- bulk.execute({w: 3, wtimeout: 1});
- });
- assert.eq(result.nInserted, 2);
- assert.eq(result.getWriteErrors()[0].index, 2);
- assert.eq(100, result.getWriteConcernError().code);
- assert.eq(coll.find().itcount(), 2);
-
- //
- // Fail with write error and upserted, write concern error reported when unordered
- coll.remove({});
- var bulk = coll.initializeUnorderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- bulk.find({a: 3}).upsert().updateOne({a: 3});
- bulk.insert({a: 3});
- var result = assert.throws(function() {
- bulk.execute({w: 'invalid'});
- });
- assert.eq(result.nInserted, 2);
- assert.eq(result.nUpserted, 1);
- assert.eq(result.getUpsertedIds()[0].index, 2);
- assert.eq(result.getWriteErrors()[0].index, 3);
- assert(result.getWriteConcernError());
- assert.eq(coll.find().itcount(), 3);
- };
-
- // Use write commands
- coll.getMongo().useWriteCommands = function() {
- return true;
- };
- executeTests();
-
- // FAILING currently due to incorrect batch api reading of GLE
- // Use legacy opcodes
- coll.getMongo().useWriteCommands = function() {
- return false;
- };
- executeTests();
-
- jsTest.log("DONE bulk api wc tests");
- rst.stopSet();
-
+jsTest.log("Starting bulk api write concern tests...");
+
+// Skip this test if running with the "wiredTiger" storage engine, since it requires
+// using 'nojournal' in a replica set, which is not supported when using WT.
+if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
+ // WT is currently the default engine so it is used when 'storageEngine' is not set.
+ jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
+ return;
+}
+
+// Start a 2-node replica set with no journal
+// Allows testing immediate write concern failures and wc application failures
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet({nojournal: ""});
+rst.initiate();
+var mongod = rst.getPrimary();
+var coll = mongod.getCollection("test.bulk_api_wc");
+
+var executeTests = function() {
+ // Create a unique index, legacy writes validate too early to use invalid documents for
+ // write
+ // error testing
+ coll.ensureIndex({a: 1}, {unique: true});
+
+ //
+ // Ordered
+ //
+
+ //
+ // Fail due to nojournal
+ coll.remove({});
+ var bulk = coll.initializeOrderedBulkOp();
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ assert.throws(function() {
+ bulk.execute({j: true});
+ });
+
+ //
+ // Fail due to unrecognized write concern field.
+ coll.remove({});
+ var bulk = coll.initializeOrderedBulkOp();
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ var result = assert.throws(function() {
+ bulk.execute({x: 1});
+ });
+ assert.eq(ErrorCodes.FailedToParse, result.code, 'unexpected error code: ' + tojson(result));
+ assert.eq('unrecognized write concern field: x',
+ result.errmsg,
+ 'unexpected error message: ' + tojson(result));
+
+ //
+ // Fail with write error, no write concern error even though it would fail on apply for
+ // ordered
+ coll.remove({});
+ var bulk = coll.initializeOrderedBulkOp();
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ bulk.insert({a: 2});
+ result = assert.throws(function() {
+ bulk.execute({w: 'invalid'});
+ });
+ assert.eq(result.nInserted, 2);
+ assert.eq(result.getWriteErrors()[0].index, 2);
+ assert(!result.getWriteConcernError());
+ assert.eq(coll.find().itcount(), 2);
+
+ //
+ // Unordered
+ //
+
+ //
+ // Fail with write error, write concern error reported when unordered
+ coll.remove({});
+ var bulk = coll.initializeUnorderedBulkOp();
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ bulk.insert({a: 2});
+ var result = assert.throws(function() {
+ bulk.execute({w: 'invalid'});
+ });
+ assert.eq(result.nInserted, 2);
+ assert.eq(result.getWriteErrors()[0].index, 2);
+ assert(result.getWriteConcernError());
+ assert.eq(coll.find().itcount(), 2);
+
+ //
+ // Fail with write error, write concern timeout reported when unordered
+ // Note that wtimeout:true can only be reported when the batch is all the same, so there's
+ // not
+ // multiple wc errors
+ coll.remove({});
+ var bulk = coll.initializeUnorderedBulkOp();
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ bulk.insert({a: 2});
+ var result = assert.throws(function() {
+ bulk.execute({w: 3, wtimeout: 1});
+ });
+ assert.eq(result.nInserted, 2);
+ assert.eq(result.getWriteErrors()[0].index, 2);
+ assert.eq(100, result.getWriteConcernError().code);
+ assert.eq(coll.find().itcount(), 2);
+
+ //
+ // Fail with write error and upserted, write concern error reported when unordered
+ coll.remove({});
+ var bulk = coll.initializeUnorderedBulkOp();
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ bulk.find({a: 3}).upsert().updateOne({a: 3});
+ bulk.insert({a: 3});
+ var result = assert.throws(function() {
+ bulk.execute({w: 'invalid'});
+ });
+ assert.eq(result.nInserted, 2);
+ assert.eq(result.nUpserted, 1);
+ assert.eq(result.getUpsertedIds()[0].index, 2);
+ assert.eq(result.getWriteErrors()[0].index, 3);
+ assert(result.getWriteConcernError());
+ assert.eq(coll.find().itcount(), 3);
+};
+
+// Use write commands
+coll.getMongo().useWriteCommands = function() {
+ return true;
+};
+executeTests();
+
+// FAILING currently due to incorrect batch api reading of GLE
+// Use legacy opcodes
+coll.getMongo().useWriteCommands = function() {
+ return false;
+};
+executeTests();
+
+jsTest.log("DONE bulk api wc tests");
+rst.stopSet();
})();
diff --git a/jstests/replsets/capped_insert_order.js b/jstests/replsets/capped_insert_order.js
index 835c707f9fa..f1a63ea683e 100644
--- a/jstests/replsets/capped_insert_order.js
+++ b/jstests/replsets/capped_insert_order.js
@@ -2,48 +2,48 @@
// See SERVER-21483.
(function() {
- "use strict";
-
- var replTest = new ReplSetTest({name: 'capped_insert_order', nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- var master = replTest.getPrimary();
- var slave = replTest._slaves[0];
-
- var dbName = "db";
- var masterDb = master.getDB(dbName);
- var slaveDb = slave.getDB(dbName);
-
- var collectionName = "collection";
- var masterColl = masterDb[collectionName];
- var slaveColl = slaveDb[collectionName];
-
- // Making a large capped collection to ensure that every document fits.
- masterDb.createCollection(collectionName, {capped: true, size: 1024 * 1024});
-
- // Insert 1000 docs with _id from 0 to 999 inclusive.
- const nDocuments = 1000;
- var batch = masterColl.initializeOrderedBulkOp();
- for (var i = 0; i < nDocuments; i++) {
- batch.insert({_id: i});
- }
- assert.writeOK(batch.execute());
- replTest.awaitReplication();
-
- function checkCollection(coll) {
- assert.eq(coll.find().itcount(), nDocuments);
-
- var i = 0;
- coll.find().forEach(function(doc) {
- assert.eq(doc._id, i);
- i++;
- });
- assert.eq(i, nDocuments);
- }
-
- checkCollection(masterColl);
- checkCollection(slaveColl);
-
- replTest.stopSet();
+"use strict";
+
+var replTest = new ReplSetTest({name: 'capped_insert_order', nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+var slave = replTest._slaves[0];
+
+var dbName = "db";
+var masterDb = master.getDB(dbName);
+var slaveDb = slave.getDB(dbName);
+
+var collectionName = "collection";
+var masterColl = masterDb[collectionName];
+var slaveColl = slaveDb[collectionName];
+
+// Making a large capped collection to ensure that every document fits.
+masterDb.createCollection(collectionName, {capped: true, size: 1024 * 1024});
+
+// Insert 1000 docs with _id from 0 to 999 inclusive.
+const nDocuments = 1000;
+var batch = masterColl.initializeOrderedBulkOp();
+for (var i = 0; i < nDocuments; i++) {
+ batch.insert({_id: i});
+}
+assert.writeOK(batch.execute());
+replTest.awaitReplication();
+
+function checkCollection(coll) {
+ assert.eq(coll.find().itcount(), nDocuments);
+
+ var i = 0;
+ coll.find().forEach(function(doc) {
+ assert.eq(doc._id, i);
+ i++;
+ });
+ assert.eq(i, nDocuments);
+}
+
+checkCollection(masterColl);
+checkCollection(slaveColl);
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/catchup.js b/jstests/replsets/catchup.js
index b93cd78cff1..c28f5f59a31 100644
--- a/jstests/replsets/catchup.js
+++ b/jstests/replsets/catchup.js
@@ -1,207 +1,207 @@
// Test the catch-up behavior of new primaries.
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/libs/write_concern_util.js");
- load("jstests/replsets/rslib.js");
-
- var name = "catch_up";
- var rst = new ReplSetTest({name: name, nodes: 3, useBridge: true, waitForKeys: true});
-
- rst.startSet();
- var conf = rst.getReplSetConfig();
- conf.members[2].priority = 0;
- conf.settings = {
- heartbeatIntervalMillis: 500,
- electionTimeoutMillis: 10000,
- catchUpTimeoutMillis: 4 * 60 * 1000
- };
- rst.initiate(conf);
- rst.awaitSecondaryNodes();
-
- var primary = rst.getPrimary();
- var primaryColl = primary.getDB("test").coll;
-
- // Set verbosity for replication on all nodes.
- var verbosity = {
- "setParameter": 1,
- "logComponentVerbosity": {
- "replication": {"verbosity": 2},
- }
- };
- rst.nodes.forEach(function(node) {
- node.adminCommand(verbosity);
- });
-
- function stepUpNode(node) {
- assert.soonNoExcept(function() {
- assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
- rst.awaitNodesAgreeOnPrimary(rst.kDefaultTimeoutMS, rst.nodes, rst.getNodeId(node));
- return node.adminCommand('replSetGetStatus').myState == ReplSetTest.State.PRIMARY;
- }, 'failed to step up node ' + node.host, rst.kDefaultTimeoutMS);
-
- return node;
- }
-
- function checkOpInOplog(node, op, count) {
- node.getDB("admin").getMongo().setSlaveOk();
- var oplog = node.getDB("local")['oplog.rs'];
- var oplogArray = oplog.find().toArray();
- assert.eq(oplog.count(op), count, "op: " + tojson(op) + ", oplog: " + tojson(oplogArray));
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/libs/write_concern_util.js");
+load("jstests/replsets/rslib.js");
+
+var name = "catch_up";
+var rst = new ReplSetTest({name: name, nodes: 3, useBridge: true, waitForKeys: true});
+
+rst.startSet();
+var conf = rst.getReplSetConfig();
+conf.members[2].priority = 0;
+conf.settings = {
+ heartbeatIntervalMillis: 500,
+ electionTimeoutMillis: 10000,
+ catchUpTimeoutMillis: 4 * 60 * 1000
+};
+rst.initiate(conf);
+rst.awaitSecondaryNodes();
+
+var primary = rst.getPrimary();
+var primaryColl = primary.getDB("test").coll;
+
+// Set verbosity for replication on all nodes.
+var verbosity = {
+ "setParameter": 1,
+ "logComponentVerbosity": {
+ "replication": {"verbosity": 2},
}
-
- // Stop replication on secondaries, do writes and step up one of the secondaries.
- //
- // The old primary has extra writes that are not replicated to the other nodes yet,
- // but the new primary steps up, getting the vote from the the third node "voter".
- function stopReplicationAndEnforceNewPrimaryToCatchUp() {
- // Write documents that cannot be replicated to secondaries in time.
- var oldSecondaries = rst.getSecondaries();
- var oldPrimary = rst.getPrimary();
- stopServerReplication(oldSecondaries);
- for (var i = 0; i < 3; i++) {
- assert.writeOK(oldPrimary.getDB("test").foo.insert({x: i}));
- }
- var latestOpOnOldPrimary = getLatestOp(oldPrimary);
- // New primary wins immediately, but needs to catch up.
- var newPrimary = stepUpNode(oldSecondaries[0]);
- var latestOpOnNewPrimary = getLatestOp(newPrimary);
- // Check this node is not writable.
- assert.eq(newPrimary.getDB("test").isMaster().ismaster, false);
-
- return {
- oldSecondaries: oldSecondaries,
- oldPrimary: oldPrimary,
- newPrimary: newPrimary,
- voter: oldSecondaries[1],
- latestOpOnOldPrimary: latestOpOnOldPrimary,
- latestOpOnNewPrimary: latestOpOnNewPrimary
- };
- }
-
- function reconfigElectionAndCatchUpTimeout(electionTimeout, catchupTimeout) {
- // Reconnect all nodes to make sure reconfig succeeds.
- rst.nodes.forEach(reconnect);
- // Reconfigure replica set to decrease catchup timeout.
- var newConfig = rst.getReplSetConfigFromNode();
- newConfig.version++;
- newConfig.settings.catchUpTimeoutMillis = catchupTimeout;
- newConfig.settings.electionTimeoutMillis = electionTimeout;
- reconfig(rst, newConfig);
- rst.awaitReplication();
- rst.awaitNodesAgreeOnPrimary();
+};
+rst.nodes.forEach(function(node) {
+ node.adminCommand(verbosity);
+});
+
+function stepUpNode(node) {
+ assert.soonNoExcept(function() {
+ assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
+ rst.awaitNodesAgreeOnPrimary(rst.kDefaultTimeoutMS, rst.nodes, rst.getNodeId(node));
+ return node.adminCommand('replSetGetStatus').myState == ReplSetTest.State.PRIMARY;
+ }, 'failed to step up node ' + node.host, rst.kDefaultTimeoutMS);
+
+ return node;
+}
+
+function checkOpInOplog(node, op, count) {
+ node.getDB("admin").getMongo().setSlaveOk();
+ var oplog = node.getDB("local")['oplog.rs'];
+ var oplogArray = oplog.find().toArray();
+ assert.eq(oplog.count(op), count, "op: " + tojson(op) + ", oplog: " + tojson(oplogArray));
+}
+
+// Stop replication on secondaries, do writes and step up one of the secondaries.
+//
+// The old primary has extra writes that are not replicated to the other nodes yet,
+// but the new primary steps up, getting the vote from the the third node "voter".
+function stopReplicationAndEnforceNewPrimaryToCatchUp() {
+ // Write documents that cannot be replicated to secondaries in time.
+ var oldSecondaries = rst.getSecondaries();
+ var oldPrimary = rst.getPrimary();
+ stopServerReplication(oldSecondaries);
+ for (var i = 0; i < 3; i++) {
+ assert.writeOK(oldPrimary.getDB("test").foo.insert({x: i}));
}
+ var latestOpOnOldPrimary = getLatestOp(oldPrimary);
+ // New primary wins immediately, but needs to catch up.
+ var newPrimary = stepUpNode(oldSecondaries[0]);
+ var latestOpOnNewPrimary = getLatestOp(newPrimary);
+ // Check this node is not writable.
+ assert.eq(newPrimary.getDB("test").isMaster().ismaster, false);
+
+ return {
+ oldSecondaries: oldSecondaries,
+ oldPrimary: oldPrimary,
+ newPrimary: newPrimary,
+ voter: oldSecondaries[1],
+ latestOpOnOldPrimary: latestOpOnOldPrimary,
+ latestOpOnNewPrimary: latestOpOnNewPrimary
+ };
+}
- rst.awaitReplication();
-
- jsTest.log("Case 1: The primary is up-to-date after refreshing heartbeats.");
- // Should complete transition to primary immediately.
- var newPrimary = stepUpNode(rst.getSecondary());
- // Should win an election and finish the transition very quickly.
- assert.eq(newPrimary, rst.getPrimary());
- rst.awaitReplication();
-
- jsTest.log("Case 2: The primary needs to catch up, succeeds in time.");
- var stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
-
- // Disable fail point to allow replication.
- restartServerReplication(stepUpResults.oldSecondaries);
- // getPrimary() blocks until the primary finishes drain mode.
- assert.eq(stepUpResults.newPrimary, rst.getPrimary());
- // Wait for all secondaries to catch up
- rst.awaitReplication();
- // Check the latest op on old primary is preserved on the new one.
- checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 1);
- rst.awaitReplication();
-
- jsTest.log("Case 3: The primary needs to catch up, but has to change sync source to catch up.");
- // Reconfig the election timeout to be longer than 1 minute so that the third node will no
- // longer be blacklisted by the new primary if it happened to be at the beginning of the test.
- reconfigElectionAndCatchUpTimeout(3 * 60 * 1000, conf.settings.catchUpTimeoutMillis);
-
- stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
-
- // Disable fail point on the voter. Wait until it catches up with the old primary.
- restartServerReplication(stepUpResults.voter);
- assert.commandWorked(
- stepUpResults.voter.adminCommand({replSetSyncFrom: stepUpResults.oldPrimary.host}));
- // Wait until the new primary knows the last applied optime on the voter, so it will keep
- // catching up after the old primary is disconnected.
- assert.soon(function() {
- var replSetStatus =
- assert.commandWorked(stepUpResults.newPrimary.adminCommand({replSetGetStatus: 1}));
- var voterStatus = replSetStatus.members.filter(m => m.name == stepUpResults.voter.host)[0];
- return rs.compareOpTimes(voterStatus.optime, stepUpResults.latestOpOnOldPrimary) == 0;
- });
- // Disconnect the new primary and the old one.
- stepUpResults.oldPrimary.disconnect(stepUpResults.newPrimary);
- // Disable the failpoint, the new primary should sync from the other secondary.
- restartServerReplication(stepUpResults.newPrimary);
- assert.eq(stepUpResults.newPrimary, rst.getPrimary());
- checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 1);
- // Restore the broken connection
- stepUpResults.oldPrimary.reconnect(stepUpResults.newPrimary);
- rst.awaitReplication();
-
- jsTest.log("Case 4: The primary needs to catch up, fails due to timeout.");
- // Reconfig to make the catchup timeout shorter.
- reconfigElectionAndCatchUpTimeout(conf.settings.electionTimeoutMillis, 10 * 1000);
-
- stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
- // Wait until the new primary completes the transition to primary and writes a no-op.
- checkLog.contains(stepUpResults.newPrimary, "Catchup timed out after becoming primary");
- restartServerReplication(stepUpResults.newPrimary);
- assert.eq(stepUpResults.newPrimary, rst.getPrimary());
-
- // Wait for the no-op "new primary" after winning an election, so that we know it has
- // finished transition to primary.
- assert.soon(function() {
- return rs.compareOpTimes(stepUpResults.latestOpOnOldPrimary,
- getLatestOp(stepUpResults.newPrimary)) < 0;
- });
- // The extra oplog entries on the old primary are not replicated to the new one.
- checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 0);
- restartServerReplication(stepUpResults.voter);
- rst.awaitReplication();
-
- jsTest.log("Case 5: The primary needs to catch up with no timeout, then gets aborted.");
- // Reconfig to make the catchup timeout infinite.
- reconfigElectionAndCatchUpTimeout(conf.settings.electionTimeoutMillis, -1);
- stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
-
- // Abort catchup.
- assert.commandWorked(stepUpResults.newPrimary.adminCommand({replSetAbortPrimaryCatchUp: 1}));
-
- // Wait for the no-op "new primary" after winning an election, so that we know it has
- // finished transition to primary.
- assert.soon(function() {
- return rs.compareOpTimes(stepUpResults.latestOpOnOldPrimary,
- getLatestOp(stepUpResults.newPrimary)) < 0;
- });
- // The extra oplog entries on the old primary are not replicated to the new one.
- checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 0);
- restartServerReplication(stepUpResults.oldSecondaries);
- rst.awaitReplication();
- checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 0);
-
- jsTest.log("Case 6: The primary needs to catch up with no timeout, but steps down.");
- var stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
-
- // Step-down command should abort catchup.
- assert.commandWorked(stepUpResults.newPrimary.adminCommand({replSetStepDown: 60}));
-
- // Rename the primary.
- var steppedDownPrimary = stepUpResults.newPrimary;
- var newPrimary = rst.getPrimary();
- assert.neq(newPrimary, steppedDownPrimary);
-
- // Enable data replication on the stepped down primary and make sure it syncs old writes.
+function reconfigElectionAndCatchUpTimeout(electionTimeout, catchupTimeout) {
+ // Reconnect all nodes to make sure reconfig succeeds.
rst.nodes.forEach(reconnect);
- restartServerReplication(stepUpResults.oldSecondaries);
+ // Reconfigure replica set to decrease catchup timeout.
+ var newConfig = rst.getReplSetConfigFromNode();
+ newConfig.version++;
+ newConfig.settings.catchUpTimeoutMillis = catchupTimeout;
+ newConfig.settings.electionTimeoutMillis = electionTimeout;
+ reconfig(rst, newConfig);
rst.awaitReplication();
- checkOpInOplog(steppedDownPrimary, stepUpResults.latestOpOnOldPrimary, 1);
-
- rst.stopSet();
+ rst.awaitNodesAgreeOnPrimary();
+}
+
+rst.awaitReplication();
+
+jsTest.log("Case 1: The primary is up-to-date after refreshing heartbeats.");
+// Should complete transition to primary immediately.
+var newPrimary = stepUpNode(rst.getSecondary());
+// Should win an election and finish the transition very quickly.
+assert.eq(newPrimary, rst.getPrimary());
+rst.awaitReplication();
+
+jsTest.log("Case 2: The primary needs to catch up, succeeds in time.");
+var stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
+
+// Disable fail point to allow replication.
+restartServerReplication(stepUpResults.oldSecondaries);
+// getPrimary() blocks until the primary finishes drain mode.
+assert.eq(stepUpResults.newPrimary, rst.getPrimary());
+// Wait for all secondaries to catch up
+rst.awaitReplication();
+// Check the latest op on old primary is preserved on the new one.
+checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 1);
+rst.awaitReplication();
+
+jsTest.log("Case 3: The primary needs to catch up, but has to change sync source to catch up.");
+// Reconfig the election timeout to be longer than 1 minute so that the third node will no
+// longer be blacklisted by the new primary if it happened to be at the beginning of the test.
+reconfigElectionAndCatchUpTimeout(3 * 60 * 1000, conf.settings.catchUpTimeoutMillis);
+
+stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
+
+// Disable fail point on the voter. Wait until it catches up with the old primary.
+restartServerReplication(stepUpResults.voter);
+assert.commandWorked(
+ stepUpResults.voter.adminCommand({replSetSyncFrom: stepUpResults.oldPrimary.host}));
+// Wait until the new primary knows the last applied optime on the voter, so it will keep
+// catching up after the old primary is disconnected.
+assert.soon(function() {
+ var replSetStatus =
+ assert.commandWorked(stepUpResults.newPrimary.adminCommand({replSetGetStatus: 1}));
+ var voterStatus = replSetStatus.members.filter(m => m.name == stepUpResults.voter.host)[0];
+ return rs.compareOpTimes(voterStatus.optime, stepUpResults.latestOpOnOldPrimary) == 0;
+});
+// Disconnect the new primary and the old one.
+stepUpResults.oldPrimary.disconnect(stepUpResults.newPrimary);
+// Disable the failpoint, the new primary should sync from the other secondary.
+restartServerReplication(stepUpResults.newPrimary);
+assert.eq(stepUpResults.newPrimary, rst.getPrimary());
+checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 1);
+// Restore the broken connection
+stepUpResults.oldPrimary.reconnect(stepUpResults.newPrimary);
+rst.awaitReplication();
+
+jsTest.log("Case 4: The primary needs to catch up, fails due to timeout.");
+// Reconfig to make the catchup timeout shorter.
+reconfigElectionAndCatchUpTimeout(conf.settings.electionTimeoutMillis, 10 * 1000);
+
+stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
+// Wait until the new primary completes the transition to primary and writes a no-op.
+checkLog.contains(stepUpResults.newPrimary, "Catchup timed out after becoming primary");
+restartServerReplication(stepUpResults.newPrimary);
+assert.eq(stepUpResults.newPrimary, rst.getPrimary());
+
+// Wait for the no-op "new primary" after winning an election, so that we know it has
+// finished transition to primary.
+assert.soon(function() {
+ return rs.compareOpTimes(stepUpResults.latestOpOnOldPrimary,
+ getLatestOp(stepUpResults.newPrimary)) < 0;
+});
+// The extra oplog entries on the old primary are not replicated to the new one.
+checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 0);
+restartServerReplication(stepUpResults.voter);
+rst.awaitReplication();
+
+jsTest.log("Case 5: The primary needs to catch up with no timeout, then gets aborted.");
+// Reconfig to make the catchup timeout infinite.
+reconfigElectionAndCatchUpTimeout(conf.settings.electionTimeoutMillis, -1);
+stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
+
+// Abort catchup.
+assert.commandWorked(stepUpResults.newPrimary.adminCommand({replSetAbortPrimaryCatchUp: 1}));
+
+// Wait for the no-op "new primary" after winning an election, so that we know it has
+// finished transition to primary.
+assert.soon(function() {
+ return rs.compareOpTimes(stepUpResults.latestOpOnOldPrimary,
+ getLatestOp(stepUpResults.newPrimary)) < 0;
+});
+// The extra oplog entries on the old primary are not replicated to the new one.
+checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 0);
+restartServerReplication(stepUpResults.oldSecondaries);
+rst.awaitReplication();
+checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 0);
+
+jsTest.log("Case 6: The primary needs to catch up with no timeout, but steps down.");
+var stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
+
+// Step-down command should abort catchup.
+assert.commandWorked(stepUpResults.newPrimary.adminCommand({replSetStepDown: 60}));
+
+// Rename the primary.
+var steppedDownPrimary = stepUpResults.newPrimary;
+var newPrimary = rst.getPrimary();
+assert.neq(newPrimary, steppedDownPrimary);
+
+// Enable data replication on the stepped down primary and make sure it syncs old writes.
+rst.nodes.forEach(reconnect);
+restartServerReplication(stepUpResults.oldSecondaries);
+rst.awaitReplication();
+checkOpInOplog(steppedDownPrimary, stepUpResults.latestOpOnOldPrimary, 1);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/catchup_takeover_one_high_priority.js b/jstests/replsets/catchup_takeover_one_high_priority.js
index 575407c19d2..10c6b99307f 100644
--- a/jstests/replsets/catchup_takeover_one_high_priority.js
+++ b/jstests/replsets/catchup_takeover_one_high_priority.js
@@ -14,86 +14,88 @@
// that it becomes primary.
(function() {
- 'use strict';
-
- load('jstests/replsets/rslib.js');
-
- var name = 'catchup_takeover_one_high_priority';
- var replSet = new ReplSetTest({name: name, nodes: 3, useBridge: true});
-
- var nodenames = replSet.nodeList();
- var nodes = replSet.startSet();
- replSet.initiateWithAnyNodeAsPrimary({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodenames[0]},
- {"_id": 1, "host": nodenames[1]},
- {"_id": 2, "host": nodenames[2], "priority": 2}
- ]
- });
-
- // Wait until node 2 becomes primary.
- replSet.waitForState(2, ReplSetTest.State.PRIMARY, replSet.kDefaultTimeoutMS);
- jsTestLog('node 2 is now primary');
-
- replSet.awaitReplication();
-
- // Stop replication and disconnect node 2 so that it cannot do a priority takeover.
- stopServerReplication(nodes[2]);
- nodes[2].disconnect(nodes[1]);
- nodes[2].disconnect(nodes[0]);
-
- // Ensure that node 0 becomes primary.
- assert.commandWorked(nodes[0].adminCommand({replSetStepUp: 1}));
- replSet.awaitNodesAgreeOnPrimary(replSet.kDefaultTimeoutMS, nodes.slice(0, 2));
- assert.eq(ReplSetTest.State.PRIMARY,
- assert.commandWorked(nodes[0].adminCommand('replSetGetStatus')).myState,
- nodes[0].host + " was not primary after step-up");
- jsTestLog('node 0 is now primary');
-
- // Sleep for a few seconds to ensure that node 2's optime is more than 2 seconds behind.
- // This will ensure it can't do a priority takeover until it catches up.
- sleep(3000);
-
- var primary = replSet.getPrimary();
- var writeConcern = {writeConcern: {w: 2, wtimeout: replSet.kDefaultTimeoutMS}};
- assert.writeOK(primary.getDB(name).bar.insert({y: 100}, writeConcern));
-
- // Write something so that node 0 is ahead of node 1.
- stopServerReplication(nodes[1]);
- writeConcern = {writeConcern: {w: 1, wtimeout: replSet.kDefaultTimeoutMS}};
- assert.writeOK(primary.getDB(name).bar.insert({x: 100}, writeConcern));
-
- nodes[2].reconnect(nodes[0]);
- nodes[2].reconnect(nodes[1]);
-
- // Step up a lagged node.
- assert.commandWorked(nodes[1].adminCommand({replSetStepUp: 1}));
- replSet.awaitNodesAgreeOnPrimary(replSet.kDefaultTimeoutMS, nodes);
- assert.eq(ReplSetTest.State.PRIMARY,
- assert.commandWorked(nodes[1].adminCommand('replSetGetStatus')).myState,
- nodes[1].host + " was not primary after step-up");
- jsTestLog('node 1 is now primary, but cannot accept writes');
-
- // Confirm that the most up-to-date node becomes primary
- // after the default catchup delay.
- replSet.waitForState(0, ReplSetTest.State.PRIMARY, 60 * 1000);
- jsTestLog('node 0 performed catchup takeover and is now primary');
-
- // Wait until the old primary steps down.
- replSet.awaitNodesAgreeOnPrimary();
-
- // Let the nodes catchup.
- restartServerReplication(nodes[1]);
- restartServerReplication(nodes[2]);
-
- // Confirm that the highest priority node becomes primary
- // after catching up.
- replSet.waitForState(2, ReplSetTest.State.PRIMARY, 30 * 1000);
- jsTestLog('node 2 performed priority takeover and is now primary');
-
- // Wait until the old primary steps down so the connections won't be closed during stopSet().
- replSet.waitForState(0, ReplSetTest.State.SECONDARY, replSet.kDefaultTimeoutMS);
-
- replSet.stopSet();
+'use strict';
+
+load('jstests/replsets/rslib.js');
+
+var name = 'catchup_takeover_one_high_priority';
+var replSet = new ReplSetTest({name: name, nodes: 3, useBridge: true});
+
+var nodenames = replSet.nodeList();
+var nodes = replSet.startSet();
+replSet.initiateWithAnyNodeAsPrimary({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodenames[0]},
+ {"_id": 1, "host": nodenames[1]},
+ {"_id": 2, "host": nodenames[2], "priority": 2}
+ ]
+});
+
+// Wait until node 2 becomes primary.
+replSet.waitForState(2, ReplSetTest.State.PRIMARY, replSet.kDefaultTimeoutMS);
+jsTestLog('node 2 is now primary');
+
+replSet.awaitReplication();
+
+// Stop replication and disconnect node 2 so that it cannot do a priority takeover.
+stopServerReplication(nodes[2]);
+nodes[2].disconnect(nodes[1]);
+nodes[2].disconnect(nodes[0]);
+
+// Ensure that node 0 becomes primary.
+assert.commandWorked(nodes[0].adminCommand({replSetStepUp: 1}));
+replSet.awaitNodesAgreeOnPrimary(replSet.kDefaultTimeoutMS, nodes.slice(0, 2));
+assert.eq(ReplSetTest.State.PRIMARY,
+ assert.commandWorked(nodes[0].adminCommand('replSetGetStatus')).myState,
+ nodes[0].host + " was not primary after step-up");
+jsTestLog('node 0 is now primary');
+
+// Sleep for a few seconds to ensure that node 2's optime is more than 2 seconds behind.
+// This will ensure it can't do a priority takeover until it catches up.
+sleep(3000);
+
+var primary = replSet.getPrimary();
+var writeConcern = {writeConcern: {w: 2, wtimeout: replSet.kDefaultTimeoutMS}};
+assert.writeOK(primary.getDB(name).bar.insert({y: 100}, writeConcern));
+
+// Write something so that node 0 is ahead of node 1.
+stopServerReplication(nodes[1]);
+writeConcern = {
+ writeConcern: {w: 1, wtimeout: replSet.kDefaultTimeoutMS}
+};
+assert.writeOK(primary.getDB(name).bar.insert({x: 100}, writeConcern));
+
+nodes[2].reconnect(nodes[0]);
+nodes[2].reconnect(nodes[1]);
+
+// Step up a lagged node.
+assert.commandWorked(nodes[1].adminCommand({replSetStepUp: 1}));
+replSet.awaitNodesAgreeOnPrimary(replSet.kDefaultTimeoutMS, nodes);
+assert.eq(ReplSetTest.State.PRIMARY,
+ assert.commandWorked(nodes[1].adminCommand('replSetGetStatus')).myState,
+ nodes[1].host + " was not primary after step-up");
+jsTestLog('node 1 is now primary, but cannot accept writes');
+
+// Confirm that the most up-to-date node becomes primary
+// after the default catchup delay.
+replSet.waitForState(0, ReplSetTest.State.PRIMARY, 60 * 1000);
+jsTestLog('node 0 performed catchup takeover and is now primary');
+
+// Wait until the old primary steps down.
+replSet.awaitNodesAgreeOnPrimary();
+
+// Let the nodes catchup.
+restartServerReplication(nodes[1]);
+restartServerReplication(nodes[2]);
+
+// Confirm that the highest priority node becomes primary
+// after catching up.
+replSet.waitForState(2, ReplSetTest.State.PRIMARY, 30 * 1000);
+jsTestLog('node 2 performed priority takeover and is now primary');
+
+// Wait until the old primary steps down so the connections won't be closed during stopSet().
+replSet.waitForState(0, ReplSetTest.State.SECONDARY, replSet.kDefaultTimeoutMS);
+
+replSet.stopSet();
})();
diff --git a/jstests/replsets/catchup_takeover_two_nodes_ahead.js b/jstests/replsets/catchup_takeover_two_nodes_ahead.js
index 69dddd079a1..34cf44ec8f7 100644
--- a/jstests/replsets/catchup_takeover_two_nodes_ahead.js
+++ b/jstests/replsets/catchup_takeover_two_nodes_ahead.js
@@ -11,50 +11,54 @@
// Confirm that the most up-to-date node becomes primary.
(function() {
- 'use strict';
-
- load('jstests/replsets/rslib.js');
-
- var name = 'catchup_takeover_two_nodes_ahead';
- var replSet = new ReplSetTest({name: name, nodes: 5});
- var nodes = replSet.startSet();
- var config = replSet.getReplSetConfig();
- // Prevent nodes from syncing from other secondaries.
- config.settings = {chainingAllowed: false};
- replSet.initiate(config);
- replSet.awaitReplication();
-
- // Write something so that nodes 0 and 1 are ahead.
- stopServerReplication(nodes.slice(2, 5));
- var primary = replSet.getPrimary();
- var writeConcern = {writeConcern: {w: 2, wtimeout: replSet.kDefaultTimeoutMS}};
- assert.writeOK(primary.getDB(name).bar.insert({x: 100}, writeConcern));
-
- // Write something so that node 0 is ahead of node 1.
- stopServerReplication(nodes[1]);
- writeConcern = {writeConcern: {w: 1, wtimeout: replSet.kDefaultTimeoutMS}};
- assert.writeOK(primary.getDB(name).bar.insert({y: 100}, writeConcern));
-
- // Step up one of the lagged nodes.
- assert.commandWorked(nodes[2].adminCommand({replSetStepUp: 1}));
- replSet.awaitNodesAgreeOnPrimary();
- assert.eq(ReplSetTest.State.PRIMARY,
- assert.commandWorked(nodes[2].adminCommand('replSetGetStatus')).myState,
- nodes[2].host + " was not primary after step-up");
- jsTestLog('node 2 is now primary, but cannot accept writes');
-
- // Make sure that node 2 cannot write anything. Because it is lagged and replication
- // has been stopped, it shouldn't be able to become master.
- assert.commandFailedWithCode(nodes[2].getDB(name).bar.insert({z: 100}, writeConcern),
- ErrorCodes.NotMaster);
-
- // Confirm that the most up-to-date node becomes primary
- // after the default catchup delay.
- replSet.waitForState(0, ReplSetTest.State.PRIMARY, 60 * 1000);
-
- // Wait until the old primary steps down so the connections won't be closed.
- replSet.waitForState(2, ReplSetTest.State.SECONDARY, replSet.kDefaultTimeoutMS);
- // Let the nodes catchup.
- restartServerReplication(nodes.slice(1, 5));
- replSet.stopSet();
+'use strict';
+
+load('jstests/replsets/rslib.js');
+
+var name = 'catchup_takeover_two_nodes_ahead';
+var replSet = new ReplSetTest({name: name, nodes: 5});
+var nodes = replSet.startSet();
+var config = replSet.getReplSetConfig();
+// Prevent nodes from syncing from other secondaries.
+config.settings = {
+ chainingAllowed: false
+};
+replSet.initiate(config);
+replSet.awaitReplication();
+
+// Write something so that nodes 0 and 1 are ahead.
+stopServerReplication(nodes.slice(2, 5));
+var primary = replSet.getPrimary();
+var writeConcern = {writeConcern: {w: 2, wtimeout: replSet.kDefaultTimeoutMS}};
+assert.writeOK(primary.getDB(name).bar.insert({x: 100}, writeConcern));
+
+// Write something so that node 0 is ahead of node 1.
+stopServerReplication(nodes[1]);
+writeConcern = {
+ writeConcern: {w: 1, wtimeout: replSet.kDefaultTimeoutMS}
+};
+assert.writeOK(primary.getDB(name).bar.insert({y: 100}, writeConcern));
+
+// Step up one of the lagged nodes.
+assert.commandWorked(nodes[2].adminCommand({replSetStepUp: 1}));
+replSet.awaitNodesAgreeOnPrimary();
+assert.eq(ReplSetTest.State.PRIMARY,
+ assert.commandWorked(nodes[2].adminCommand('replSetGetStatus')).myState,
+ nodes[2].host + " was not primary after step-up");
+jsTestLog('node 2 is now primary, but cannot accept writes');
+
+// Make sure that node 2 cannot write anything. Because it is lagged and replication
+// has been stopped, it shouldn't be able to become master.
+assert.commandFailedWithCode(nodes[2].getDB(name).bar.insert({z: 100}, writeConcern),
+ ErrorCodes.NotMaster);
+
+// Confirm that the most up-to-date node becomes primary
+// after the default catchup delay.
+replSet.waitForState(0, ReplSetTest.State.PRIMARY, 60 * 1000);
+
+// Wait until the old primary steps down so the connections won't be closed.
+replSet.waitForState(2, ReplSetTest.State.SECONDARY, replSet.kDefaultTimeoutMS);
+// Let the nodes catchup.
+restartServerReplication(nodes.slice(1, 5));
+replSet.stopSet();
})();
diff --git a/jstests/replsets/chaining_removal.js b/jstests/replsets/chaining_removal.js
index 111a12887df..1569cad2f71 100644
--- a/jstests/replsets/chaining_removal.js
+++ b/jstests/replsets/chaining_removal.js
@@ -1,73 +1,73 @@
// ensure removing a chained node does not break reporting of replication progress (SERVER-15849)
(function() {
- "use strict";
- load("jstests/replsets/rslib.js");
+"use strict";
+load("jstests/replsets/rslib.js");
- var numNodes = 5;
- var host = getHostName();
- var name = "chaining_removal";
+var numNodes = 5;
+var host = getHostName();
+var name = "chaining_removal";
- var replTest = new ReplSetTest({name: name, nodes: numNodes});
- var nodes = replTest.startSet();
- var port = replTest.ports;
- replTest.initiate({
- _id: name,
- members: [
- {_id: 0, host: nodes[0].host, priority: 3},
- {_id: 1, host: nodes[1].host, priority: 0},
- {_id: 2, host: nodes[2].host, priority: 0},
- {_id: 3, host: nodes[3].host, priority: 0},
- {_id: 4, host: nodes[4].host, priority: 0},
- ],
- });
- replTest.awaitNodesAgreeOnPrimary(replTest.kDefaultTimeoutMS, nodes, 0);
- var primary = replTest.getPrimary();
- replTest.awaitReplication();
+var replTest = new ReplSetTest({name: name, nodes: numNodes});
+var nodes = replTest.startSet();
+var port = replTest.ports;
+replTest.initiate({
+ _id: name,
+ members: [
+ {_id: 0, host: nodes[0].host, priority: 3},
+ {_id: 1, host: nodes[1].host, priority: 0},
+ {_id: 2, host: nodes[2].host, priority: 0},
+ {_id: 3, host: nodes[3].host, priority: 0},
+ {_id: 4, host: nodes[4].host, priority: 0},
+ ],
+});
+replTest.awaitNodesAgreeOnPrimary(replTest.kDefaultTimeoutMS, nodes, 0);
+var primary = replTest.getPrimary();
+replTest.awaitReplication();
- // When setting up chaining on slow machines, we do not want slow writes or delayed heartbeats
- // to cause our nodes to invalidate the sync source provided in the 'replSetSyncFrom' command.
- // To achieve this, we disable the server parameter 'maxSyncSourceLagSecs' (see
- // repl_settings_init.cpp and TopologyCoordinatorImpl::Options) in
- // TopologyCoordinatorImpl::shouldChangeSyncSource().
- assert.commandWorked(nodes[1].getDB('admin').runCommand(
- {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'alwaysOn'}));
- assert.commandWorked(nodes[4].getDB('admin').runCommand(
- {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'alwaysOn'}));
+// When setting up chaining on slow machines, we do not want slow writes or delayed heartbeats
+// to cause our nodes to invalidate the sync source provided in the 'replSetSyncFrom' command.
+// To achieve this, we disable the server parameter 'maxSyncSourceLagSecs' (see
+// repl_settings_init.cpp and TopologyCoordinatorImpl::Options) in
+// TopologyCoordinatorImpl::shouldChangeSyncSource().
+assert.commandWorked(nodes[1].getDB('admin').runCommand(
+ {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'alwaysOn'}));
+assert.commandWorked(nodes[4].getDB('admin').runCommand(
+ {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'alwaysOn'}));
- // Force node 1 to sync directly from node 0.
- syncFrom(nodes[1], nodes[0], replTest);
- // Force node 4 to sync through node 1.
- syncFrom(nodes[4], nodes[1], replTest);
+// Force node 1 to sync directly from node 0.
+syncFrom(nodes[1], nodes[0], replTest);
+// Force node 4 to sync through node 1.
+syncFrom(nodes[4], nodes[1], replTest);
- // write that should reach all nodes
- var timeout = ReplSetTest.kDefaultTimeoutMS;
- var options = {writeConcern: {w: numNodes, wtimeout: timeout}};
- assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options));
+// write that should reach all nodes
+var timeout = ReplSetTest.kDefaultTimeoutMS;
+var options = {writeConcern: {w: numNodes, wtimeout: timeout}};
+assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options));
- // Re-enable 'maxSyncSourceLagSecs' checking on sync source.
- assert.commandWorked(nodes[1].getDB('admin').runCommand(
- {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'off'}));
- assert.commandWorked(nodes[4].getDB('admin').runCommand(
- {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'off'}));
+// Re-enable 'maxSyncSourceLagSecs' checking on sync source.
+assert.commandWorked(nodes[1].getDB('admin').runCommand(
+ {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'off'}));
+assert.commandWorked(nodes[4].getDB('admin').runCommand(
+ {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'off'}));
- var config = primary.getDB("local").system.replset.findOne();
- config.members.pop();
- config.version++;
- // remove node 4
- replTest.stop(4);
- try {
- primary.adminCommand({replSetReconfig: config});
- } catch (e) {
- print("error: " + e);
- }
+var config = primary.getDB("local").system.replset.findOne();
+config.members.pop();
+config.version++;
+// remove node 4
+replTest.stop(4);
+try {
+ primary.adminCommand({replSetReconfig: config});
+} catch (e) {
+ print("error: " + e);
+}
- // ensure writing to all four nodes still works
- primary = replTest.getPrimary();
- const liveSlaves = [nodes[1], nodes[2], nodes[3]];
- replTest.awaitReplication(null, null, liveSlaves);
- options.writeConcern.w = 4;
- assert.writeOK(primary.getDB(name).foo.insert({x: 2}, options));
+// ensure writing to all four nodes still works
+primary = replTest.getPrimary();
+const liveSlaves = [nodes[1], nodes[2], nodes[3]];
+replTest.awaitReplication(null, null, liveSlaves);
+options.writeConcern.w = 4;
+assert.writeOK(primary.getDB(name).foo.insert({x: 2}, options));
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/change_stream_speculative_majority.js b/jstests/replsets/change_stream_speculative_majority.js
index f8833c7963f..fb37968184e 100644
--- a/jstests/replsets/change_stream_speculative_majority.js
+++ b/jstests/replsets/change_stream_speculative_majority.js
@@ -4,83 +4,81 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
-
- const name = "change_stream_speculative_majority";
- const replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
- });
- replTest.startSet();
- replTest.initiate();
-
- const dbName = name;
- const collName = "coll";
-
- let primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
- let primaryDB = primary.getDB(dbName);
- let primaryColl = primaryDB[collName];
-
- // Open a change stream.
- let res = primaryDB.runCommand(
- {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}, maxTimeMS: 5000});
- assert.commandWorked(res);
- let cursorId = res.cursor.id;
-
- // Insert a document on primary and let it majority commit.
- assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-
- // Receive the first change event.
- res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName});
- let changes = res.cursor.nextBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 1});
- assert.eq(changes[0]["operationType"], "insert");
-
- // Save the resume token.
- let resumeToken = changes[0]["_id"];
-
- // This query should time out waiting for new results and return an empty batch.
- res = primary.getDB(dbName).runCommand(
- {getMore: cursorId, collection: collName, maxTimeMS: 5000});
- assert.eq(res.cursor.nextBatch, []);
-
- // Pause replication on the secondary so that writes won't majority commit.
- stopServerReplication(secondary);
-
- // Do a new write on primary.
- assert.commandWorked(primaryColl.insert({_id: 2}));
-
- // The change stream query should time out waiting for the new result to majority commit.
- res = primary.getDB(dbName).runCommand(
- {getMore: cursorId, collection: collName, maxTimeMS: 5000});
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
- // An aggregate trying to resume a stream that includes the change should also time out.
- res = primaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- cursor: {},
- maxTimeMS: 5000
- });
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
- // Resume the stream after restarting replication. We should now be able to see the new event.
- restartServerReplication(secondary);
- replTest.awaitReplication();
-
- // Re-open the stream, and receive the new event.
- res = primaryDB.runCommand(
- {aggregate: collName, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}});
- assert.commandWorked(res);
- changes = res.cursor.firstBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 2});
- assert.eq(changes[0]["operationType"], "insert");
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+
+const name = "change_stream_speculative_majority";
+const replTest = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {enableMajorityReadConcern: 'false'}
+});
+replTest.startSet();
+replTest.initiate();
+
+const dbName = name;
+const collName = "coll";
+
+let primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+let primaryDB = primary.getDB(dbName);
+let primaryColl = primaryDB[collName];
+
+// Open a change stream.
+let res = primaryDB.runCommand(
+ {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}, maxTimeMS: 5000});
+assert.commandWorked(res);
+let cursorId = res.cursor.id;
+
+// Insert a document on primary and let it majority commit.
+assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+
+// Receive the first change event.
+res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName});
+let changes = res.cursor.nextBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 1});
+assert.eq(changes[0]["operationType"], "insert");
+
+// Save the resume token.
+let resumeToken = changes[0]["_id"];
+
+// This query should time out waiting for new results and return an empty batch.
+res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName, maxTimeMS: 5000});
+assert.eq(res.cursor.nextBatch, []);
+
+// Pause replication on the secondary so that writes won't majority commit.
+stopServerReplication(secondary);
+
+// Do a new write on primary.
+assert.commandWorked(primaryColl.insert({_id: 2}));
+
+// The change stream query should time out waiting for the new result to majority commit.
+res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName, maxTimeMS: 5000});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+
+// An aggregate trying to resume a stream that includes the change should also time out.
+res = primaryDB.runCommand({
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
+ cursor: {},
+ maxTimeMS: 5000
+});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+
+// Resume the stream after restarting replication. We should now be able to see the new event.
+restartServerReplication(secondary);
+replTest.awaitReplication();
+
+// Re-open the stream, and receive the new event.
+res = primaryDB.runCommand(
+ {aggregate: collName, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}});
+assert.commandWorked(res);
+changes = res.cursor.firstBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 2});
+assert.eq(changes[0]["operationType"], "insert");
+
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_speculative_majority_conflicting_catalog_changes.js b/jstests/replsets/change_stream_speculative_majority_conflicting_catalog_changes.js
index b9a55d1c3fa..8b1e9682403 100644
--- a/jstests/replsets/change_stream_speculative_majority_conflicting_catalog_changes.js
+++ b/jstests/replsets/change_stream_speculative_majority_conflicting_catalog_changes.js
@@ -7,47 +7,47 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
+"use strict";
- const replTest = new ReplSetTest({
- name: "replset",
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
- });
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({
+ name: "replset",
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {enableMajorityReadConcern: 'false'}
+});
+replTest.startSet();
+replTest.initiate();
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- let primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
- let primaryDB = primary.getDB(dbName);
- let primaryColl = primaryDB[collName];
- let secondaryDB = secondary.getDB(dbName);
+let primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+let primaryDB = primary.getDB(dbName);
+let primaryColl = primaryDB[collName];
+let secondaryDB = secondary.getDB(dbName);
- // Insert some documents on the primary that we can index.
- var bulk = primaryColl.initializeUnorderedBulkOp();
- for (var i = 0; i < 1000; i++) {
- let doc = {};
- bulk.insert({a: i, b: i, c: i, d: i, e: i});
- }
- assert.commandWorked(bulk.execute());
+// Insert some documents on the primary that we can index.
+var bulk = primaryColl.initializeUnorderedBulkOp();
+for (var i = 0; i < 1000; i++) {
+ let doc = {};
+ bulk.insert({a: i, b: i, c: i, d: i, e: i});
+}
+assert.commandWorked(bulk.execute());
- // Start several index builds on the primary. This should make it likely that index builds are
- // in progress on the secondary while doing reads below.
- primaryColl.createIndex({a: 1});
- primaryColl.createIndex({b: 1});
- primaryColl.createIndex({c: 1});
- primaryColl.createIndex({d: 1});
- primaryColl.createIndex({e: 1});
+// Start several index builds on the primary. This should make it likely that index builds are
+// in progress on the secondary while doing reads below.
+primaryColl.createIndex({a: 1});
+primaryColl.createIndex({b: 1});
+primaryColl.createIndex({c: 1});
+primaryColl.createIndex({d: 1});
+primaryColl.createIndex({e: 1});
- // Do a bunch of change stream reads against the secondary. We are not worried about the
- // responses, since we are only verifying that the server doesn't crash.
- for (var i = 0; i < 20; i++) {
- assert.commandWorked(secondaryDB.runCommand(
- {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
- }
+// Do a bunch of change stream reads against the secondary. We are not worried about the
+// responses, since we are only verifying that the server doesn't crash.
+for (var i = 0; i < 20; i++) {
+ assert.commandWorked(
+ secondaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
+}
- replTest.stopSet();
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_speculative_majority_lastApplied_lag.js b/jstests/replsets/change_stream_speculative_majority_lastApplied_lag.js
index 2b3c503e4bd..88e3bfa47a7 100644
--- a/jstests/replsets/change_stream_speculative_majority_lastApplied_lag.js
+++ b/jstests/replsets/change_stream_speculative_majority_lastApplied_lag.js
@@ -6,102 +6,101 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
- load("jstests/libs/check_log.js"); // For checkLog.
- load("jstests/libs/parallelTester.js"); // for ScopedThread.
+load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
+load("jstests/libs/check_log.js"); // For checkLog.
+load("jstests/libs/parallelTester.js"); // for ScopedThread.
- const name = "change_stream_speculative_majority_lastApplied_lag";
- const replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
- });
- replTest.startSet();
- replTest.initiate();
+const name = "change_stream_speculative_majority_lastApplied_lag";
+const replTest = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {enableMajorityReadConcern: 'false'}
+});
+replTest.startSet();
+replTest.initiate();
- const dbName = name;
- const collName = "coll";
+const dbName = name;
+const collName = "coll";
- const primary = replTest.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const primaryColl = primaryDB[collName];
+const primary = replTest.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const primaryColl = primaryDB[collName];
- // Do a few operations on the primary and let them both majority commit. Later on we will
- // receive both of these operations in a change stream.
- let res = assert.commandWorked(primaryColl.runCommand(
- "insert", {documents: [{_id: 1, v: 0}], writeConcern: {w: "majority"}}));
- assert.commandWorked(
- primaryColl.update({_id: 1}, {$set: {v: 1}}, {writeConcern: {w: "majority"}}));
+// Do a few operations on the primary and let them both majority commit. Later on we will
+// receive both of these operations in a change stream.
+let res = assert.commandWorked(
+ primaryColl.runCommand("insert", {documents: [{_id: 1, v: 0}], writeConcern: {w: "majority"}}));
+assert.commandWorked(primaryColl.update({_id: 1}, {$set: {v: 1}}, {writeConcern: {w: "majority"}}));
- // Save this operation time so we can start a change stream from here.
- let startOperTime = res.operationTime;
+// Save this operation time so we can start a change stream from here.
+let startOperTime = res.operationTime;
- // Make the primary hang after it has completed a write but before it has advanced lastApplied
- // for that write.
- primaryDB.adminCommand(
- {configureFailPoint: "hangBeforeLogOpAdvancesLastApplied", mode: "alwaysOn"});
+// Make the primary hang after it has completed a write but before it has advanced lastApplied
+// for that write.
+primaryDB.adminCommand(
+ {configureFailPoint: "hangBeforeLogOpAdvancesLastApplied", mode: "alwaysOn"});
- // Function which will be used by the background thread to perform an update on the specified
- // host, database, and collection.
- function doUpdate(host, dbName, collName, query, update) {
- let hostDB = (new Mongo(host)).getDB(dbName);
- assert.commandWorked(hostDB[collName].update(query, update));
- }
+// Function which will be used by the background thread to perform an update on the specified
+// host, database, and collection.
+function doUpdate(host, dbName, collName, query, update) {
+ let hostDB = (new Mongo(host)).getDB(dbName);
+ assert.commandWorked(hostDB[collName].update(query, update));
+}
- // Do a document update on primary, but don't wait for it to majority commit. The write should
- // hang due to the enabled failpoint.
- jsTestLog("Starting update on primary.");
- var primaryWrite =
- new ScopedThread(doUpdate, primary.host, dbName, collName, {_id: 1}, {$set: {v: 2}});
- primaryWrite.start();
+// Do a document update on primary, but don't wait for it to majority commit. The write should
+// hang due to the enabled failpoint.
+jsTestLog("Starting update on primary.");
+var primaryWrite =
+ new ScopedThread(doUpdate, primary.host, dbName, collName, {_id: 1}, {$set: {v: 2}});
+primaryWrite.start();
- // Wait for the fail point to be hit. By the time the primary hits this fail point, the update
- // should be visible. 'lastApplied', however, has not yet been advanced yet. We check both the
- // document state and the logs to make sure we hit the failpoint for the correct operation.
- assert.soon(() => (primaryColl.findOne({_id: 1}).v === 2));
- checkLog.contains(primary, 'hangBeforeLogOpAdvancesLastApplied fail point enabled.');
+// Wait for the fail point to be hit. By the time the primary hits this fail point, the update
+// should be visible. 'lastApplied', however, has not yet been advanced yet. We check both the
+// document state and the logs to make sure we hit the failpoint for the correct operation.
+assert.soon(() => (primaryColl.findOne({_id: 1}).v === 2));
+checkLog.contains(primary, 'hangBeforeLogOpAdvancesLastApplied fail point enabled.');
- // Open a change stream on the primary. The stream should only return the initial insert and the
- // first of the two update events, since the second update is not yet majority-committed.
- // Despite the fact that the effects of the latter update are already visible to local readers,
- // speculative majority will read at min(lastApplied, allCommitted), and so change stream's
- // 'fullDocument' lookup should also *not* return the second update's uncommitted changes.
- jsTestLog("Opening a change stream on the primary.");
- const cst = new ChangeStreamTest(primaryDB);
- let cursor = cst.startWatchingChanges({
- pipeline:
- [{$changeStream: {startAtOperationTime: startOperTime, fullDocument: "updateLookup"}}],
- collection: collName
- });
+// Open a change stream on the primary. The stream should only return the initial insert and the
+// first of the two update events, since the second update is not yet majority-committed.
+// Despite the fact that the effects of the latter update are already visible to local readers,
+// speculative majority will read at min(lastApplied, allCommitted), and so change stream's
+// 'fullDocument' lookup should also *not* return the second update's uncommitted changes.
+jsTestLog("Opening a change stream on the primary.");
+const cst = new ChangeStreamTest(primaryDB);
+let cursor = cst.startWatchingChanges({
+ pipeline:
+ [{$changeStream: {startAtOperationTime: startOperTime, fullDocument: "updateLookup"}}],
+ collection: collName
+});
- cst.assertNextChangesEqual({
- cursor: cursor,
- expectedChanges: [
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1, v: 0},
- ns: {db: dbName, coll: collName},
- operationType: "insert",
- },
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1, v: 1},
- ns: {db: dbName, coll: collName},
- updateDescription: {removedFields: [], updatedFields: {v: 1}},
- operationType: "update",
- }
- ]
- });
+cst.assertNextChangesEqual({
+ cursor: cursor,
+ expectedChanges: [
+ {
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1, v: 0},
+ ns: {db: dbName, coll: collName},
+ operationType: "insert",
+ },
+ {
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1, v: 1},
+ ns: {db: dbName, coll: collName},
+ updateDescription: {removedFields: [], updatedFields: {v: 1}},
+ operationType: "update",
+ }
+ ]
+});
- // Make sure the cursor does not return any more change events.
- cursor = cst.getNextBatch(cursor);
- assert.eq(cursor.nextBatch.length, 0);
+// Make sure the cursor does not return any more change events.
+cursor = cst.getNextBatch(cursor);
+assert.eq(cursor.nextBatch.length, 0);
- // Disable the failpoint to let the test complete.
- primaryDB.adminCommand({configureFailPoint: "hangBeforeLogOpAdvancesLastApplied", mode: "off"});
+// Disable the failpoint to let the test complete.
+primaryDB.adminCommand({configureFailPoint: "hangBeforeLogOpAdvancesLastApplied", mode: "off"});
- primaryWrite.join();
- replTest.stopSet();
+primaryWrite.join();
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_speculative_majority_latest_oplog_timestamp.js b/jstests/replsets/change_stream_speculative_majority_latest_oplog_timestamp.js
index 4e1fe2179f5..a16fc41f041 100644
--- a/jstests/replsets/change_stream_speculative_majority_latest_oplog_timestamp.js
+++ b/jstests/replsets/change_stream_speculative_majority_latest_oplog_timestamp.js
@@ -12,80 +12,78 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
-
- const name = "change_stream_speculative_majority_latest_oplog_timestamp";
- const replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
- });
- replTest.startSet();
- replTest.initiate();
-
- const dbName = name;
- const collName = "coll";
- const otherCollName = "coll_other";
-
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
-
- const primaryDB = primary.getDB(dbName);
- const primaryColl = primaryDB[collName];
-
- assert.commandWorked(primaryColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-
- let res = primaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$changeStream: {}}],
- cursor: {},
- maxTimeMS: 5000,
- needsMerge: true,
- fromMongos: true
- });
-
- assert.commandWorked(res);
- let cursorId = res.cursor.id;
-
- // Insert a document on primary and let it majority commit.
- assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-
- // Receive the first change event.
- res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName});
- let changes = res.cursor.nextBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 1});
- assert.eq(changes[0]["operationType"], "insert");
-
- // Pause replication on the secondary so that writes won't majority commit.
- jsTestLog("Stopping replication to secondary.");
- stopServerReplication(secondary);
-
- // Do a write on a collection that we are not watching changes for.
- let otherWriteRes = primaryDB.runCommand({insert: otherCollName, documents: [{_id: 1}]});
- let otherWriteOpTime = otherWriteRes.operationTime;
-
- // Replication to the secondary is paused, so the write to 'otherCollName' cannot majority
- // commit. A change stream getMore is expected to return the "latest oplog timestamp" which it
- // scanned and this timestamp must be majority committed. So, this getMore should time out
- // waiting for the previous write to majority commit, even though it's on a collection that is
- // not being watched.
- res = primary.getDB(dbName).runCommand(
- {getMore: cursorId, collection: collName, maxTimeMS: 5000});
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog("Restarting replication to secondary.");
- restartServerReplication(secondary);
- replTest.awaitReplication();
-
- // Now that writes can replicate again, the previous operation should have majority committed,
- // making it safe to return as the latest oplog timestamp.
- res = primary.getDB(dbName).runCommand(
- {getMore: cursorId, collection: collName, maxTimeMS: 5000});
- assert.eq(res.cursor.nextBatch, []);
- assert.eq(otherWriteOpTime, res.$_internalLatestOplogTimestamp);
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+
+const name = "change_stream_speculative_majority_latest_oplog_timestamp";
+const replTest = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {enableMajorityReadConcern: 'false'}
+});
+replTest.startSet();
+replTest.initiate();
+
+const dbName = name;
+const collName = "coll";
+const otherCollName = "coll_other";
+
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
+
+const primaryDB = primary.getDB(dbName);
+const primaryColl = primaryDB[collName];
+
+assert.commandWorked(primaryColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+
+let res = primaryDB.runCommand({
+ aggregate: collName,
+ pipeline: [{$changeStream: {}}],
+ cursor: {},
+ maxTimeMS: 5000,
+ needsMerge: true,
+ fromMongos: true
+});
+
+assert.commandWorked(res);
+let cursorId = res.cursor.id;
+
+// Insert a document on primary and let it majority commit.
+assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+
+// Receive the first change event.
+res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName});
+let changes = res.cursor.nextBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 1});
+assert.eq(changes[0]["operationType"], "insert");
+
+// Pause replication on the secondary so that writes won't majority commit.
+jsTestLog("Stopping replication to secondary.");
+stopServerReplication(secondary);
+
+// Do a write on a collection that we are not watching changes for.
+let otherWriteRes = primaryDB.runCommand({insert: otherCollName, documents: [{_id: 1}]});
+let otherWriteOpTime = otherWriteRes.operationTime;
+
+// Replication to the secondary is paused, so the write to 'otherCollName' cannot majority
+// commit. A change stream getMore is expected to return the "latest oplog timestamp" which it
+// scanned and this timestamp must be majority committed. So, this getMore should time out
+// waiting for the previous write to majority commit, even though it's on a collection that is
+// not being watched.
+res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName, maxTimeMS: 5000});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+
+jsTestLog("Restarting replication to secondary.");
+restartServerReplication(secondary);
+replTest.awaitReplication();
+
+// Now that writes can replicate again, the previous operation should have majority committed,
+// making it safe to return as the latest oplog timestamp.
+res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName, maxTimeMS: 5000});
+assert.eq(res.cursor.nextBatch, []);
+assert.eq(otherWriteOpTime, res.$_internalLatestOplogTimestamp);
+
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_speculative_majority_optimized_wait.js b/jstests/replsets/change_stream_speculative_majority_optimized_wait.js
index 20585c11336..65bd4599722 100644
--- a/jstests/replsets/change_stream_speculative_majority_optimized_wait.js
+++ b/jstests/replsets/change_stream_speculative_majority_optimized_wait.js
@@ -6,78 +6,78 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
- const name = "change_stream_speculative_majority";
- const replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
- });
- replTest.startSet();
- replTest.initiate();
+const name = "change_stream_speculative_majority";
+const replTest = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {enableMajorityReadConcern: 'false'}
+});
+replTest.startSet();
+replTest.initiate();
- const dbName = name;
- const collName = "coll";
+const dbName = name;
+const collName = "coll";
- let primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
- let primaryDB = primary.getDB(dbName);
- let primaryColl = primaryDB[collName];
+let primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+let primaryDB = primary.getDB(dbName);
+let primaryColl = primaryDB[collName];
- // Receive 1 change to get an initial resume token.
- let res = assert.commandWorked(
- primaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
- let cursorId = res.cursor.id;
- assert.commandWorked(primaryColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
- res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName});
- assert.eq(res.cursor.nextBatch.length, 1);
- let resumeToken = res.cursor.nextBatch[0]["_id"];
+// Receive 1 change to get an initial resume token.
+let res = assert.commandWorked(
+ primaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
+let cursorId = res.cursor.id;
+assert.commandWorked(primaryColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName});
+assert.eq(res.cursor.nextBatch.length, 1);
+let resumeToken = res.cursor.nextBatch[0]["_id"];
- // Open a change stream.
- res = assert.commandWorked(
- primaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
- cursorId = res.cursor.id;
+// Open a change stream.
+res = assert.commandWorked(
+ primaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
+cursorId = res.cursor.id;
- // Insert documents to fill one batch and let them majority commit.
- let batchSize = 2;
- assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(primaryColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+// Insert documents to fill one batch and let them majority commit.
+let batchSize = 2;
+assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(primaryColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
- // Pause replication on the secondary so that writes won't majority commit.
- stopServerReplication(secondary);
+// Pause replication on the secondary so that writes won't majority commit.
+stopServerReplication(secondary);
- // Do write on primary that won't majority commit but will advance the last applied optime.
- assert.commandWorked(primaryColl.insert({_id: 3}));
+// Do write on primary that won't majority commit but will advance the last applied optime.
+assert.commandWorked(primaryColl.insert({_id: 3}));
- // Receive one batch of change events. We should be able to read only the majority committed
- // change events and no further in order to generate this batch.
- res = assert.commandWorked(primary.getDB(dbName).runCommand(
- {getMore: cursorId, collection: collName, batchSize: batchSize}));
- let changes = res.cursor.nextBatch;
- assert.eq(changes.length, 2);
- assert.eq(changes[0]["fullDocument"], {_id: 1});
- assert.eq(changes[0]["operationType"], "insert");
- assert.eq(changes[1]["fullDocument"], {_id: 2});
- assert.eq(changes[1]["operationType"], "insert");
+// Receive one batch of change events. We should be able to read only the majority committed
+// change events and no further in order to generate this batch.
+res = assert.commandWorked(primary.getDB(dbName).runCommand(
+ {getMore: cursorId, collection: collName, batchSize: batchSize}));
+let changes = res.cursor.nextBatch;
+assert.eq(changes.length, 2);
+assert.eq(changes[0]["fullDocument"], {_id: 1});
+assert.eq(changes[0]["operationType"], "insert");
+assert.eq(changes[1]["fullDocument"], {_id: 2});
+assert.eq(changes[1]["operationType"], "insert");
- // Make sure that 'aggregate' commands also utilize the optimization.
- res = assert.commandWorked(primaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- cursor: {batchSize: batchSize}
- }));
- changes = res.cursor.firstBatch;
- assert.eq(changes.length, 2);
- assert.eq(changes[0]["fullDocument"], {_id: 1});
- assert.eq(changes[0]["operationType"], "insert");
- assert.eq(changes[1]["fullDocument"], {_id: 2});
- assert.eq(changes[1]["operationType"], "insert");
+// Make sure that 'aggregate' commands also utilize the optimization.
+res = assert.commandWorked(primaryDB.runCommand({
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
+ cursor: {batchSize: batchSize}
+}));
+changes = res.cursor.firstBatch;
+assert.eq(changes.length, 2);
+assert.eq(changes[0]["fullDocument"], {_id: 1});
+assert.eq(changes[0]["operationType"], "insert");
+assert.eq(changes[1]["fullDocument"], {_id: 2});
+assert.eq(changes[1]["operationType"], "insert");
- // Let the test finish.
- restartServerReplication(secondary);
+// Let the test finish.
+restartServerReplication(secondary);
- replTest.stopSet();
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_speculative_majority_rollback.js b/jstests/replsets/change_stream_speculative_majority_rollback.js
index 2c8aa9492af..06e4fccc51d 100644
--- a/jstests/replsets/change_stream_speculative_majority_rollback.js
+++ b/jstests/replsets/change_stream_speculative_majority_rollback.js
@@ -4,102 +4,100 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- 'use strict';
-
- load("jstests/replsets/libs/rollback_test.js"); // for RollbackTest.
-
- // Disable implicit sessions so it's easy to run commands from different threads.
- TestData.disableImplicitSessions = true;
-
- const name = "change_stream_speculative_majority_rollback";
- const dbName = name;
- const collName = "coll";
-
- // Set up a replica set for use in RollbackTest. We disable majority reads on all nodes so we
- // will utilize speculative majority reads for change streams.
- const replTest = new ReplSetTest({
- name,
- nodes: 3,
- useBridge: true,
- settings: {chainingAllowed: false},
- nodeOptions: {enableMajorityReadConcern: "false"}
- });
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- replTest.initiate(config);
-
- const rollbackTest = new RollbackTest(name, replTest);
- const primary = rollbackTest.getPrimary();
- const primaryDB = primary.getDB(dbName);
- let coll = primaryDB[collName];
-
- // Create a collection.
- assert.commandWorked(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-
- // Open a change stream on the initial primary.
- let res =
- primaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}});
- assert.commandWorked(res);
- let cursorId = res.cursor.id;
-
- // Receive an initial change event and save the resume token.
- assert.commandWorked(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- res = primaryDB.runCommand({getMore: cursorId, collection: collName});
- let changes = res.cursor.nextBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 1});
- assert.eq(changes[0]["operationType"], "insert");
- let resumeToken = changes[0]["_id"];
-
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
- assert.eq(rollbackNode, primary);
-
- // Insert a few items that will be rolled back.
- assert.commandWorked(coll.insert({_id: 2}));
- assert.commandWorked(coll.insert({_id: 3}));
- assert.commandWorked(coll.insert({_id: 4}));
-
- let getChangeEvent = new ScopedThread(function(host, cursorId, dbName, collName) {
- jsTestLog("Trying to receive change event from divergent primary.");
- const nodeDB = new Mongo(host).getDB(dbName);
- try {
- return nodeDB.runCommand({getMore: eval(cursorId), collection: collName});
- } catch (e) {
- return isNetworkError(e);
- }
- }, rollbackNode.host, tojson(cursorId), dbName, collName);
- getChangeEvent.start();
-
- // Make sure the change stream query started.
- assert.soon(() => primaryDB.currentOp({"command.getMore": cursorId}).inprog.length === 1);
-
- // Do some operations on the new primary that we can receive in a resumed stream.
- let syncSource = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- coll = syncSource.getDB(dbName)[collName];
- assert.commandWorked(coll.insert({_id: 5}));
- assert.commandWorked(coll.insert({_id: 6}));
- assert.commandWorked(coll.insert({_id: 7}));
-
- // Let rollback begin and complete.
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
-
- // The change stream query should have failed when the node entered rollback.
- assert(getChangeEvent.returnData());
-
- jsTestLog("Resuming change stream against new primary.");
- res = syncSource.getDB(dbName).runCommand(
- {aggregate: collName, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}});
- changes = res.cursor.firstBatch;
- assert.eq(changes.length, 3);
- assert.eq(changes[0]["fullDocument"], {_id: 5});
- assert.eq(changes[0]["operationType"], "insert");
- assert.eq(changes[1]["fullDocument"], {_id: 6});
- assert.eq(changes[1]["operationType"], "insert");
- assert.eq(changes[2]["fullDocument"], {_id: 7});
- assert.eq(changes[2]["operationType"], "insert");
-
- rollbackTest.stop();
-
+'use strict';
+
+load("jstests/replsets/libs/rollback_test.js"); // for RollbackTest.
+
+// Disable implicit sessions so it's easy to run commands from different threads.
+TestData.disableImplicitSessions = true;
+
+const name = "change_stream_speculative_majority_rollback";
+const dbName = name;
+const collName = "coll";
+
+// Set up a replica set for use in RollbackTest. We disable majority reads on all nodes so we
+// will utilize speculative majority reads for change streams.
+const replTest = new ReplSetTest({
+ name,
+ nodes: 3,
+ useBridge: true,
+ settings: {chainingAllowed: false},
+ nodeOptions: {enableMajorityReadConcern: "false"}
+});
+replTest.startSet();
+let config = replTest.getReplSetConfig();
+config.members[2].priority = 0;
+replTest.initiate(config);
+
+const rollbackTest = new RollbackTest(name, replTest);
+const primary = rollbackTest.getPrimary();
+const primaryDB = primary.getDB(dbName);
+let coll = primaryDB[collName];
+
+// Create a collection.
+assert.commandWorked(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+
+// Open a change stream on the initial primary.
+let res = primaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}});
+assert.commandWorked(res);
+let cursorId = res.cursor.id;
+
+// Receive an initial change event and save the resume token.
+assert.commandWorked(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+res = primaryDB.runCommand({getMore: cursorId, collection: collName});
+let changes = res.cursor.nextBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 1});
+assert.eq(changes[0]["operationType"], "insert");
+let resumeToken = changes[0]["_id"];
+
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+assert.eq(rollbackNode, primary);
+
+// Insert a few items that will be rolled back.
+assert.commandWorked(coll.insert({_id: 2}));
+assert.commandWorked(coll.insert({_id: 3}));
+assert.commandWorked(coll.insert({_id: 4}));
+
+let getChangeEvent = new ScopedThread(function(host, cursorId, dbName, collName) {
+ jsTestLog("Trying to receive change event from divergent primary.");
+ const nodeDB = new Mongo(host).getDB(dbName);
+ try {
+ return nodeDB.runCommand({getMore: eval(cursorId), collection: collName});
+ } catch (e) {
+ return isNetworkError(e);
+ }
+}, rollbackNode.host, tojson(cursorId), dbName, collName);
+getChangeEvent.start();
+
+// Make sure the change stream query started.
+assert.soon(() => primaryDB.currentOp({"command.getMore": cursorId}).inprog.length === 1);
+
+// Do some operations on the new primary that we can receive in a resumed stream.
+let syncSource = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+coll = syncSource.getDB(dbName)[collName];
+assert.commandWorked(coll.insert({_id: 5}));
+assert.commandWorked(coll.insert({_id: 6}));
+assert.commandWorked(coll.insert({_id: 7}));
+
+// Let rollback begin and complete.
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+
+// The change stream query should have failed when the node entered rollback.
+assert(getChangeEvent.returnData());
+
+jsTestLog("Resuming change stream against new primary.");
+res = syncSource.getDB(dbName).runCommand(
+ {aggregate: collName, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}});
+changes = res.cursor.firstBatch;
+assert.eq(changes.length, 3);
+assert.eq(changes[0]["fullDocument"], {_id: 5});
+assert.eq(changes[0]["operationType"], "insert");
+assert.eq(changes[1]["fullDocument"], {_id: 6});
+assert.eq(changes[1]["operationType"], "insert");
+assert.eq(changes[2]["fullDocument"], {_id: 7});
+assert.eq(changes[2]["operationType"], "insert");
+
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js b/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js
index 4665009318a..29beca07a26 100644
--- a/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js
+++ b/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js
@@ -6,71 +6,70 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
- load("jstests/libs/check_log.js"); // for checkLog.
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+load("jstests/libs/check_log.js"); // for checkLog.
- const name = "speculative_majority_secondary";
- const replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
- });
- replTest.startSet();
- replTest.initiate();
+const name = "speculative_majority_secondary";
+const replTest = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {enableMajorityReadConcern: 'false'}
+});
+replTest.startSet();
+replTest.initiate();
- const dbName = name;
- const collName = "coll";
+const dbName = name;
+const collName = "coll";
- let primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
- let primaryDB = primary.getDB(dbName);
- let primaryColl = primaryDB[collName];
- let secondaryDB = secondary.getDB(dbName);
+let primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+let primaryDB = primary.getDB(dbName);
+let primaryColl = primaryDB[collName];
+let secondaryDB = secondary.getDB(dbName);
- // Do a couple writes on primary and save the first operation time, so we can start the
- // secondary change stream from this point.
- let res = assert.commandWorked(primaryColl.runCommand("insert", {documents: [{_id: 0}]}));
- let startTime = res.operationTime;
- assert.commandWorked(primaryColl.update({_id: 0}, {$set: {v: 0}}));
- replTest.awaitLastOpCommitted();
+// Do a couple writes on primary and save the first operation time, so we can start the
+// secondary change stream from this point.
+let res = assert.commandWorked(primaryColl.runCommand("insert", {documents: [{_id: 0}]}));
+let startTime = res.operationTime;
+assert.commandWorked(primaryColl.update({_id: 0}, {$set: {v: 0}}));
+replTest.awaitLastOpCommitted();
- // Make the secondary pause after it has written a batch of entries to the oplog but before it
- // has applied them.
- assert.commandWorked(secondaryDB.adminCommand(
- {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "alwaysOn"}));
+// Make the secondary pause after it has written a batch of entries to the oplog but before it
+// has applied them.
+assert.commandWorked(secondaryDB.adminCommand(
+ {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "alwaysOn"}));
- // Pause replication so that the secondary will sync and apply the set of writes from the
- // primary in a single batch.
- stopServerReplication(secondary);
+// Pause replication so that the secondary will sync and apply the set of writes from the
+// primary in a single batch.
+stopServerReplication(secondary);
- jsTestLog("Do some writes on the primary.");
- assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 1}}));
- assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 2}}));
- assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 3}}));
+jsTestLog("Do some writes on the primary.");
+assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 1}}));
+assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 2}}));
+assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 3}}));
- // Restart server replication on secondary and wait for the failpoint to be hit.
- jsTestLog("Restarting server replication on secondary.");
- restartServerReplication(secondary);
- checkLog.contains(secondary,
- "pauseBatchApplicationAfterWritingOplogEntries fail point enabled");
+// Restart server replication on secondary and wait for the failpoint to be hit.
+jsTestLog("Restarting server replication on secondary.");
+restartServerReplication(secondary);
+checkLog.contains(secondary, "pauseBatchApplicationAfterWritingOplogEntries fail point enabled");
- // Open a change stream on the secondary.
- res = assert.commandWorked(secondaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$changeStream: {startAtOperationTime: startTime}}],
- cursor: {}
- }));
+// Open a change stream on the secondary.
+res = assert.commandWorked(secondaryDB.runCommand({
+ aggregate: collName,
+ pipeline: [{$changeStream: {startAtOperationTime: startTime}}],
+ cursor: {}
+}));
- // We should not expect to see any of the ops currently being applied in the secondary batch.
- let changes = res.cursor.firstBatch;
- assert.eq(changes.length, 2);
- assert.eq(changes[0].fullDocument, {_id: 0});
- assert.eq(changes[1].updateDescription.updatedFields, {v: 0});
+// We should not expect to see any of the ops currently being applied in the secondary batch.
+let changes = res.cursor.firstBatch;
+assert.eq(changes.length, 2);
+assert.eq(changes[0].fullDocument, {_id: 0});
+assert.eq(changes[1].updateDescription.updatedFields, {v: 0});
- // Turn off the failpoint and let the test complete.
- assert.commandWorked(secondaryDB.adminCommand(
- {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "off"}));
- replTest.stopSet();
+// Turn off the failpoint and let the test complete.
+assert.commandWorked(secondaryDB.adminCommand(
+ {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "off"}));
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_stepdown.js b/jstests/replsets/change_stream_stepdown.js
index 2df2a11c8c8..1a6f6fb28cb 100644
--- a/jstests/replsets/change_stream_stepdown.js
+++ b/jstests/replsets/change_stream_stepdown.js
@@ -5,134 +5,134 @@
* @tags: [requires_wiredtiger]
*/
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
-
- const name = "change_stream_stepdown";
- const replTest = new ReplSetTest({name: name, nodes: [{}, {}]});
- replTest.startSet();
- replTest.initiate();
-
- const dbName = name;
- const collName = "change_stream_stepdown";
- const changeStreamComment = collName + "_comment";
-
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
- const primaryDb = primary.getDB(dbName);
- const secondaryDb = secondary.getDB(dbName);
- const primaryColl = primaryDb[collName];
-
- // Tell the secondary to stay secondary until we say otherwise.
- assert.commandWorked(secondaryDb.adminCommand({replSetFreeze: 999999}));
-
- // Open a change stream.
- let res = primaryDb.runCommand({
- aggregate: collName,
- pipeline: [{$changeStream: {}}],
- cursor: {},
- comment: changeStreamComment,
- maxTimeMS: 5000
- });
- assert.commandWorked(res);
- let cursorId = res.cursor.id;
-
- // Insert several documents on primary and let them majority commit.
- assert.commandWorked(
- primaryColl.insert([{_id: 1}, {_id: 2}, {_id: 3}], {writeConcern: {w: "majority"}}));
- replTest.awaitReplication();
-
- jsTestLog("Testing that changestream survives stepdown between find and getmore");
- // Step down.
- assert.commandWorked(primaryDb.adminCommand({replSetStepDown: 60, force: true}));
- replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- // Receive the first change event. This tests stepdown between find and getmore.
- res = assert.commandWorked(
- primaryDb.runCommand({getMore: cursorId, collection: collName, batchSize: 1}));
- let changes = res.cursor.nextBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 1});
- assert.eq(changes[0]["operationType"], "insert");
-
- jsTestLog("Testing that changestream survives step-up");
- // Step back up and wait for primary.
- assert.commandWorked(primaryDb.adminCommand({replSetFreeze: 0}));
- replTest.getPrimary();
-
- // Get the next one. This tests that changestreams survives a step-up.
- res = assert.commandWorked(
- primaryDb.runCommand({getMore: cursorId, collection: collName, batchSize: 1}));
- changes = res.cursor.nextBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 2});
- assert.eq(changes[0]["operationType"], "insert");
-
- jsTestLog("Testing that changestream survives stepdown between two getmores");
- // Step down again.
- assert.commandWorked(primaryDb.adminCommand({replSetStepDown: 60, force: true}));
- replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- // Get the next one. This tests that changestreams survives a step down between getmores.
- res = assert.commandWorked(
- primaryDb.runCommand({getMore: cursorId, collection: collName, batchSize: 1}));
- changes = res.cursor.nextBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 3});
- assert.eq(changes[0]["operationType"], "insert");
-
- // Step back up and wait for primary.
- assert.commandWorked(primaryDb.adminCommand({replSetFreeze: 0}));
- replTest.getPrimary();
-
- jsTestLog("Testing that changestream waiting on old primary sees docs inserted on new primary");
-
- replTest.awaitReplication(); // Ensure secondary is up to date and can win an election.
- TestData.changeStreamComment = changeStreamComment;
- TestData.secondaryHost = secondary.host;
- TestData.dbName = dbName;
- TestData.collName = collName;
- let waitForShell = startParallelShell(function() {
- // Wait for the getMore to be in progress.
- assert.soon(
- () => db.getSiblingDB("admin")
- .aggregate([
- {'$currentOp': {}},
- {
- '$match': {
- op: 'getmore',
- 'cursor.originatingCommand.comment': TestData.changeStreamComment
- }
+"use strict";
+
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+
+const name = "change_stream_stepdown";
+const replTest = new ReplSetTest({name: name, nodes: [{}, {}]});
+replTest.startSet();
+replTest.initiate();
+
+const dbName = name;
+const collName = "change_stream_stepdown";
+const changeStreamComment = collName + "_comment";
+
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
+const primaryDb = primary.getDB(dbName);
+const secondaryDb = secondary.getDB(dbName);
+const primaryColl = primaryDb[collName];
+
+// Tell the secondary to stay secondary until we say otherwise.
+assert.commandWorked(secondaryDb.adminCommand({replSetFreeze: 999999}));
+
+// Open a change stream.
+let res = primaryDb.runCommand({
+ aggregate: collName,
+ pipeline: [{$changeStream: {}}],
+ cursor: {},
+ comment: changeStreamComment,
+ maxTimeMS: 5000
+});
+assert.commandWorked(res);
+let cursorId = res.cursor.id;
+
+// Insert several documents on primary and let them majority commit.
+assert.commandWorked(
+ primaryColl.insert([{_id: 1}, {_id: 2}, {_id: 3}], {writeConcern: {w: "majority"}}));
+replTest.awaitReplication();
+
+jsTestLog("Testing that changestream survives stepdown between find and getmore");
+// Step down.
+assert.commandWorked(primaryDb.adminCommand({replSetStepDown: 60, force: true}));
+replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+// Receive the first change event. This tests stepdown between find and getmore.
+res = assert.commandWorked(
+ primaryDb.runCommand({getMore: cursorId, collection: collName, batchSize: 1}));
+let changes = res.cursor.nextBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 1});
+assert.eq(changes[0]["operationType"], "insert");
+
+jsTestLog("Testing that changestream survives step-up");
+// Step back up and wait for primary.
+assert.commandWorked(primaryDb.adminCommand({replSetFreeze: 0}));
+replTest.getPrimary();
+
+// Get the next one. This tests that changestreams survives a step-up.
+res = assert.commandWorked(
+ primaryDb.runCommand({getMore: cursorId, collection: collName, batchSize: 1}));
+changes = res.cursor.nextBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 2});
+assert.eq(changes[0]["operationType"], "insert");
+
+jsTestLog("Testing that changestream survives stepdown between two getmores");
+// Step down again.
+assert.commandWorked(primaryDb.adminCommand({replSetStepDown: 60, force: true}));
+replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+// Get the next one. This tests that changestreams survives a step down between getmores.
+res = assert.commandWorked(
+ primaryDb.runCommand({getMore: cursorId, collection: collName, batchSize: 1}));
+changes = res.cursor.nextBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 3});
+assert.eq(changes[0]["operationType"], "insert");
+
+// Step back up and wait for primary.
+assert.commandWorked(primaryDb.adminCommand({replSetFreeze: 0}));
+replTest.getPrimary();
+
+jsTestLog("Testing that changestream waiting on old primary sees docs inserted on new primary");
+
+replTest.awaitReplication(); // Ensure secondary is up to date and can win an election.
+TestData.changeStreamComment = changeStreamComment;
+TestData.secondaryHost = secondary.host;
+TestData.dbName = dbName;
+TestData.collName = collName;
+let waitForShell = startParallelShell(function() {
+ // Wait for the getMore to be in progress.
+ assert.soon(
+ () => db.getSiblingDB("admin")
+ .aggregate([
+ {'$currentOp': {}},
+ {
+ '$match': {
+ op: 'getmore',
+ 'cursor.originatingCommand.comment': TestData.changeStreamComment
}
- ])
- .itcount() == 1);
-
- const secondary = new Mongo(TestData.secondaryHost);
- const secondaryDb = secondary.getDB(TestData.dbName);
- // Step down the old primary and wait for new primary.
- assert.commandWorked(secondaryDb.adminCommand({replSetFreeze: 0}));
- assert.commandWorked(secondaryDb.adminCommand({replSetStepUp: 1, skipDryRun: true}));
- jsTestLog("Waiting for new primary");
- assert.soon(() => secondaryDb.adminCommand({isMaster: 1}).ismaster);
-
- jsTestLog("Inserting document on new primary");
- assert.commandWorked(secondaryDb[TestData.collName].insert({_id: 4}),
- {writeConcern: {w: "majority"}});
- }, primary.port);
-
- res = assert.commandWorked(primaryDb.runCommand({
- getMore: cursorId,
- collection: collName,
- batchSize: 1,
- maxTimeMS: ReplSetTest.kDefaultTimeoutMS
- }));
- changes = res.cursor.nextBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 4});
- assert.eq(changes[0]["operationType"], "insert");
-
- waitForShell();
-
- replTest.stopSet();
+ }
+ ])
+ .itcount() == 1);
+
+ const secondary = new Mongo(TestData.secondaryHost);
+ const secondaryDb = secondary.getDB(TestData.dbName);
+ // Step down the old primary and wait for new primary.
+ assert.commandWorked(secondaryDb.adminCommand({replSetFreeze: 0}));
+ assert.commandWorked(secondaryDb.adminCommand({replSetStepUp: 1, skipDryRun: true}));
+ jsTestLog("Waiting for new primary");
+ assert.soon(() => secondaryDb.adminCommand({isMaster: 1}).ismaster);
+
+ jsTestLog("Inserting document on new primary");
+ assert.commandWorked(secondaryDb[TestData.collName].insert({_id: 4}),
+ {writeConcern: {w: "majority"}});
+}, primary.port);
+
+res = assert.commandWorked(primaryDb.runCommand({
+ getMore: cursorId,
+ collection: collName,
+ batchSize: 1,
+ maxTimeMS: ReplSetTest.kDefaultTimeoutMS
+}));
+changes = res.cursor.nextBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 4});
+assert.eq(changes[0]["operationType"], "insert");
+
+waitForShell();
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/clean_shutdown_oplog_state.js b/jstests/replsets/clean_shutdown_oplog_state.js
index 0bc4855f99a..35957ed44b3 100644
--- a/jstests/replsets/clean_shutdown_oplog_state.js
+++ b/jstests/replsets/clean_shutdown_oplog_state.js
@@ -5,101 +5,97 @@
//
// @tags: [requires_persistence, requires_majority_read_concern]
(function() {
- "use strict";
+"use strict";
- // Skip db hash check because secondary restarted as standalone.
- TestData.skipCheckDBHashes = true;
+// Skip db hash check because secondary restarted as standalone.
+TestData.skipCheckDBHashes = true;
- var rst = new ReplSetTest({
- name: "name",
- nodes: 2,
- oplogSize: 500,
- });
+var rst = new ReplSetTest({
+ name: "name",
+ nodes: 2,
+ oplogSize: 500,
+});
- rst.startSet();
- var conf = rst.getReplSetConfig();
- conf.members[1].votes = 0;
- conf.members[1].priority = 0;
- printjson(conf);
- rst.initiate(conf);
+rst.startSet();
+var conf = rst.getReplSetConfig();
+conf.members[1].votes = 0;
+conf.members[1].priority = 0;
+printjson(conf);
+rst.initiate(conf);
- var primary = rst.getPrimary(); // Waits for PRIMARY state.
- var slave = rst.nodes[1];
+var primary = rst.getPrimary(); // Waits for PRIMARY state.
+var slave = rst.nodes[1];
- // Stop replication on the secondary.
- assert.commandWorked(
- slave.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+// Stop replication on the secondary.
+assert.commandWorked(slave.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
- // Prime the main collection.
- primary.getCollection("test.coll").insert({_id: -1});
+// Prime the main collection.
+primary.getCollection("test.coll").insert({_id: -1});
- // Start a w:2 write that will block until replication is resumed.
- var waitForReplStart = startParallelShell(function() {
- printjson(assert.writeOK(
- db.getCollection('side').insert({}, {writeConcern: {w: 2, wtimeout: 30 * 60 * 1000}})));
- }, primary.host.split(':')[1]);
+// Start a w:2 write that will block until replication is resumed.
+var waitForReplStart = startParallelShell(function() {
+ printjson(assert.writeOK(
+ db.getCollection('side').insert({}, {writeConcern: {w: 2, wtimeout: 30 * 60 * 1000}})));
+}, primary.host.split(':')[1]);
- // Insert a lot of data in increasing order to test.coll.
- var op = primary.getCollection("test.coll").initializeUnorderedBulkOp();
- for (var i = 0; i < 1000 * 1000; i++) {
- op.insert({_id: i});
- }
- assert.writeOK(op.execute());
+// Insert a lot of data in increasing order to test.coll.
+var op = primary.getCollection("test.coll").initializeUnorderedBulkOp();
+for (var i = 0; i < 1000 * 1000; i++) {
+ op.insert({_id: i});
+}
+assert.writeOK(op.execute());
- // Resume replication and wait for ops to start replicating, then do a clean shutdown on the
- // secondary.
- assert.commandWorked(slave.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
- waitForReplStart();
- sleep(100); // wait a bit to increase the chances of killing mid-batch.
- rst.stop(1);
+// Resume replication and wait for ops to start replicating, then do a clean shutdown on the
+// secondary.
+assert.commandWorked(slave.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+waitForReplStart();
+sleep(100); // wait a bit to increase the chances of killing mid-batch.
+rst.stop(1);
- // Restart the secondary as a standalone node.
- var options = slave.savedOptions;
- options.noCleanData = true;
- delete options.replSet;
+// Restart the secondary as a standalone node.
+var options = slave.savedOptions;
+options.noCleanData = true;
+delete options.replSet;
- var storageEngine = jsTest.options().storageEngine || "wiredTiger";
- if (storageEngine === "wiredTiger") {
- options.setParameter = options.setParameter || {};
- options.setParameter.recoverFromOplogAsStandalone = true;
- }
+var storageEngine = jsTest.options().storageEngine || "wiredTiger";
+if (storageEngine === "wiredTiger") {
+ options.setParameter = options.setParameter || {};
+ options.setParameter.recoverFromOplogAsStandalone = true;
+}
- var conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, "secondary failed to start");
+var conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, "secondary failed to start");
- // Following clean shutdown of a node, the oplog must exactly match the applied operations.
- // Additionally, the begin field must not be in the minValid document, the ts must match the
- // top of the oplog (SERVER-25353), and the oplogTruncateAfterPoint must be null (SERVER-7200
- // and SERVER-25071).
- var oplogDoc = conn.getCollection('local.oplog.rs')
- .find({ns: 'test.coll'})
- .sort({$natural: -1})
- .limit(1)[0];
- var collDoc = conn.getCollection('test.coll').find().sort({_id: -1}).limit(1)[0];
- var minValidDoc =
- conn.getCollection('local.replset.minvalid').find().sort({$natural: -1}).limit(1)[0];
- var oplogTruncateAfterPointDoc =
- conn.getCollection('local.replset.oplogTruncateAfterPoint').find().limit(1)[0];
- printjson({
- oplogDoc: oplogDoc,
- collDoc: collDoc,
- minValidDoc: minValidDoc,
- oplogTruncateAfterPointDoc: oplogTruncateAfterPointDoc
- });
- try {
- assert.eq(collDoc._id, oplogDoc.o._id);
- assert(!('begin' in minValidDoc), 'begin in minValidDoc');
- if (storageEngine !== "wiredTiger") {
- assert.eq(minValidDoc.ts, oplogDoc.ts);
- }
- assert.eq(oplogTruncateAfterPointDoc.oplogTruncateAfterPoint, Timestamp());
- } catch (e) {
- // TODO remove once SERVER-25777 is resolved.
- jsTest.log(
- "Look above and make sure clean shutdown finished without resorting to SIGKILL." +
- "\nUnfortunately that currently doesn't fail the test.");
- throw e;
+// Following clean shutdown of a node, the oplog must exactly match the applied operations.
+// Additionally, the begin field must not be in the minValid document, the ts must match the
+// top of the oplog (SERVER-25353), and the oplogTruncateAfterPoint must be null (SERVER-7200
+// and SERVER-25071).
+var oplogDoc =
+ conn.getCollection('local.oplog.rs').find({ns: 'test.coll'}).sort({$natural: -1}).limit(1)[0];
+var collDoc = conn.getCollection('test.coll').find().sort({_id: -1}).limit(1)[0];
+var minValidDoc =
+ conn.getCollection('local.replset.minvalid').find().sort({$natural: -1}).limit(1)[0];
+var oplogTruncateAfterPointDoc =
+ conn.getCollection('local.replset.oplogTruncateAfterPoint').find().limit(1)[0];
+printjson({
+ oplogDoc: oplogDoc,
+ collDoc: collDoc,
+ minValidDoc: minValidDoc,
+ oplogTruncateAfterPointDoc: oplogTruncateAfterPointDoc
+});
+try {
+ assert.eq(collDoc._id, oplogDoc.o._id);
+ assert(!('begin' in minValidDoc), 'begin in minValidDoc');
+ if (storageEngine !== "wiredTiger") {
+ assert.eq(minValidDoc.ts, oplogDoc.ts);
}
+ assert.eq(oplogTruncateAfterPointDoc.oplogTruncateAfterPoint, Timestamp());
+} catch (e) {
+ // TODO remove once SERVER-25777 is resolved.
+ jsTest.log("Look above and make sure clean shutdown finished without resorting to SIGKILL." +
+ "\nUnfortunately that currently doesn't fail the test.");
+ throw e;
+}
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/collate_id.js b/jstests/replsets/collate_id.js
index 6e6d56bc67d..588c02e979a 100644
--- a/jstests/replsets/collate_id.js
+++ b/jstests/replsets/collate_id.js
@@ -1,66 +1,68 @@
// Test that oplog application on the secondary happens correctly when the collection has a default
// collation and operations by _id which must respect the collation are issued.
(function() {
- "use strict";
+"use strict";
- Random.setRandomSeed();
+Random.setRandomSeed();
- // Return a string whose character at index 'i' in 'str' is replaced by 'character'.
- function replaceChar(str, i, character) {
- assert.eq(1, character.length);
- return str.substr(0, i) + character + str.substr(i + 1);
- }
+// Return a string whose character at index 'i' in 'str' is replaced by 'character'.
+function replaceChar(str, i, character) {
+ assert.eq(1, character.length);
+ return str.substr(0, i) + character + str.substr(i + 1);
+}
- // Return a string whose character at index 'i' has been uppercased.
- function uppercaseIth(str, i) {
- return replaceChar(str, i, str[i].toUpperCase());
- }
+// Return a string whose character at index 'i' has been uppercased.
+function uppercaseIth(str, i) {
+ return replaceChar(str, i, str[i].toUpperCase());
+}
- const caseInsensitive = {collation: {locale: "en_US", strength: 2}};
+const caseInsensitive = {
+ collation: {locale: "en_US", strength: 2}
+};
- var replTest = new ReplSetTest({name: 'testSet', nodes: 2});
- var nodes = replTest.startSet();
- replTest.initiate();
+var replTest = new ReplSetTest({name: 'testSet', nodes: 2});
+var nodes = replTest.startSet();
+replTest.initiate();
- var primary = replTest.getPrimary();
- var primaryDB = primary.getDB("test");
- var primaryColl = primaryDB.collate_id;
+var primary = replTest.getPrimary();
+var primaryDB = primary.getDB("test");
+var primaryColl = primaryDB.collate_id;
- var secondary = replTest.getSecondary();
- var secondaryDB = secondary.getDB("test");
- var secondaryColl = secondaryDB.collate_id;
+var secondary = replTest.getSecondary();
+var secondaryDB = secondary.getDB("test");
+var secondaryColl = secondaryDB.collate_id;
- // Stop the secondary from syncing. This will ensure that the writes on the primary get applied
- // on the secondary in a large batch.
- assert.commandWorked(
- secondaryDB.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
+// Stop the secondary from syncing. This will ensure that the writes on the primary get applied
+// on the secondary in a large batch.
+assert.commandWorked(
+ secondaryDB.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
- assert.commandWorked(primaryDB.createCollection(primaryColl.getName(), caseInsensitive));
+assert.commandWorked(primaryDB.createCollection(primaryColl.getName(), caseInsensitive));
- // A string of the character 'b' repeated.
- const baseStr = new Array(50).join("b");
+// A string of the character 'b' repeated.
+const baseStr = new Array(50).join("b");
- for (var i = 0; i < 1000; i++) {
- // Make an _id by uppercasing each character in "baseStr" with 0.5 probability.
- var strId = baseStr;
- for (var charIdx = 0; charIdx < baseStr.length; charIdx++) {
- if (Random.rand() < 0.5) {
- strId = uppercaseIth(strId, charIdx);
- }
+for (var i = 0; i < 1000; i++) {
+ // Make an _id by uppercasing each character in "baseStr" with 0.5 probability.
+ var strId = baseStr;
+ for (var charIdx = 0; charIdx < baseStr.length; charIdx++) {
+ if (Random.rand() < 0.5) {
+ strId = uppercaseIth(strId, charIdx);
}
-
- assert.writeOK(primaryColl.insert({_id: strId}));
- assert.writeOK(primaryColl.remove({_id: strId}));
}
- // Since the inserts and deletes happen in pairs, we should be left with an empty collection on
- // the primary.
- assert.eq(0, primaryColl.find().itcount());
+ assert.writeOK(primaryColl.insert({_id: strId}));
+ assert.writeOK(primaryColl.remove({_id: strId}));
+}
+
+// Since the inserts and deletes happen in pairs, we should be left with an empty collection on
+// the primary.
+assert.eq(0, primaryColl.find().itcount());
- // Allow the secondary to sync, and test that it also ends up with an empty collection.
- assert.commandWorked(
- secondaryDB.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
- replTest.awaitReplication();
- assert.eq(0, secondaryColl.find().itcount());
- replTest.stopSet();
+// Allow the secondary to sync, and test that it also ends up with an empty collection.
+assert.commandWorked(
+ secondaryDB.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
+replTest.awaitReplication();
+assert.eq(0, secondaryColl.find().itcount());
+replTest.stopSet();
})();
diff --git a/jstests/replsets/collection_validator_initial_sync_with_feature_compatibility_version.js b/jstests/replsets/collection_validator_initial_sync_with_feature_compatibility_version.js
index 6a9101c3586..a80c31c2970 100644
--- a/jstests/replsets/collection_validator_initial_sync_with_feature_compatibility_version.js
+++ b/jstests/replsets/collection_validator_initial_sync_with_feature_compatibility_version.js
@@ -13,79 +13,77 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
- const testName = "collection_validator_initial_sync_with_feature_compatibility";
-
- function testValidator(validator, nonMatchingDocument) {
- //
- // Create a single-node replica set.
- //
- let replTest = new ReplSetTest({name: testName, nodes: 1});
-
- replTest.startSet();
- replTest.initiate();
-
- let primary = replTest.getPrimary();
- let testDB = primary.getDB("test");
-
- //
- // Explicitly set the replica set to feature compatibility version 4.2.
- //
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
-
- //
- // Create a collection with a validator using 4.2 query features.
- //
- assert.commandWorked(testDB.createCollection("coll", {validator: validator}));
-
- //
- // Downgrade the replica set to feature compatibility version 4.0.
- //
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
-
- //
- // Add a new member to the replica set.
- //
- let secondaryDBPath = MongoRunner.dataPath + testName + "_secondary";
- resetDbpath(secondaryDBPath);
- let secondary = replTest.add({dbpath: secondaryDBPath});
- replTest.reInitiate(secondary);
- reconnect(primary);
- reconnect(secondary);
-
- //
- // Once the new member completes its initial sync, stop it, remove it from the replica set,
- // and start it back up as an individual instance.
- //
- replTest.waitForState(secondary, [ReplSetTest.State.PRIMARY, ReplSetTest.State.SECONDARY]);
-
- replTest.stopSet(undefined /* send default signal */,
- true /* don't clear data directory */);
-
- secondary = MongoRunner.runMongod({dbpath: secondaryDBPath, noCleanData: true});
- assert.neq(null, secondary, "mongod was unable to start up");
-
- //
- // Verify that the validator synced to the new member by attempting to insert a document
- // that does not validate and checking that the insert fails.
- //
- let secondaryDB = secondary.getDB("test");
- assert.writeError(secondaryDB.coll.insert(nonMatchingDocument),
- ErrorCodes.DocumentValidationFailure);
-
- //
- // Verify that, even though the existing validator still works, it is not possible to create
- // a new validator using 4.2 query features because of feature compatibility version 4.0.
- //
- assert.commandFailedWithCode(
- secondaryDB.runCommand({collMod: "coll", validator: validator}),
- ErrorCodes.QueryFeatureNotAllowed);
-
- MongoRunner.stopMongod(secondary);
- }
-
- // Ban the use of expressions that were introduced or had their parsing modified in 4.2.
- testValidator({$expr: {$eq: [{$round: "$a"}, 4]}}, {a: 5.2});
- testValidator({$expr: {$eq: [{$trunc: ["$a", 2]}, 4.1]}}, {a: 4.23});
- testValidator({$expr: {$regexMatch: {input: "$a", regex: /sentinel/}}}, {a: "no dice"});
+"use strict";
+const testName = "collection_validator_initial_sync_with_feature_compatibility";
+
+function testValidator(validator, nonMatchingDocument) {
+ //
+ // Create a single-node replica set.
+ //
+ let replTest = new ReplSetTest({name: testName, nodes: 1});
+
+ replTest.startSet();
+ replTest.initiate();
+
+ let primary = replTest.getPrimary();
+ let testDB = primary.getDB("test");
+
+ //
+ // Explicitly set the replica set to feature compatibility version 4.2.
+ //
+ assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
+
+ //
+ // Create a collection with a validator using 4.2 query features.
+ //
+ assert.commandWorked(testDB.createCollection("coll", {validator: validator}));
+
+ //
+ // Downgrade the replica set to feature compatibility version 4.0.
+ //
+ assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+
+ //
+ // Add a new member to the replica set.
+ //
+ let secondaryDBPath = MongoRunner.dataPath + testName + "_secondary";
+ resetDbpath(secondaryDBPath);
+ let secondary = replTest.add({dbpath: secondaryDBPath});
+ replTest.reInitiate(secondary);
+ reconnect(primary);
+ reconnect(secondary);
+
+ //
+ // Once the new member completes its initial sync, stop it, remove it from the replica set,
+ // and start it back up as an individual instance.
+ //
+ replTest.waitForState(secondary, [ReplSetTest.State.PRIMARY, ReplSetTest.State.SECONDARY]);
+
+ replTest.stopSet(undefined /* send default signal */, true /* don't clear data directory */);
+
+ secondary = MongoRunner.runMongod({dbpath: secondaryDBPath, noCleanData: true});
+ assert.neq(null, secondary, "mongod was unable to start up");
+
+ //
+ // Verify that the validator synced to the new member by attempting to insert a document
+ // that does not validate and checking that the insert fails.
+ //
+ let secondaryDB = secondary.getDB("test");
+ assert.writeError(secondaryDB.coll.insert(nonMatchingDocument),
+ ErrorCodes.DocumentValidationFailure);
+
+ //
+ // Verify that, even though the existing validator still works, it is not possible to create
+ // a new validator using 4.2 query features because of feature compatibility version 4.0.
+ //
+ assert.commandFailedWithCode(secondaryDB.runCommand({collMod: "coll", validator: validator}),
+ ErrorCodes.QueryFeatureNotAllowed);
+
+ MongoRunner.stopMongod(secondary);
+}
+
+// Ban the use of expressions that were introduced or had their parsing modified in 4.2.
+testValidator({$expr: {$eq: [{$round: "$a"}, 4]}}, {a: 5.2});
+testValidator({$expr: {$eq: [{$trunc: ["$a", 2]}, 4.1]}}, {a: 4.23});
+testValidator({$expr: {$regexMatch: {input: "$a", regex: /sentinel/}}}, {a: "no dice"});
}());
diff --git a/jstests/replsets/command_response_operation_time.js b/jstests/replsets/command_response_operation_time.js
index d4aecdb2248..a9ae4d6ef70 100644
--- a/jstests/replsets/command_response_operation_time.js
+++ b/jstests/replsets/command_response_operation_time.js
@@ -5,58 +5,57 @@
* @tags: [requires_majority_read_concern]
*/
(function() {
- "use strict";
-
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
-
- function assertCorrectOperationTime(operationTime, expectedTimestamp, opTimeType) {
- assert.eq(0,
- timestampCmp(operationTime, expectedTimestamp),
- "operationTime in command response, " + operationTime +
- ", does not equal the last " + opTimeType + " timestamp, " +
- expectedTimestamp);
- }
-
- var name = "command_response_operation_time";
-
- var replTest = new ReplSetTest(
- {name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ""}, waitForKeys: true});
-
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- replTest.stopSet();
- return;
- }
- replTest.initiate();
-
- var res, statusRes;
- var testDB = replTest.getPrimary().getDB(name);
-
- jsTestLog("Executing majority write.");
- res = assert.commandWorked(
- testDB.runCommand({insert: "foo", documents: [{x: 1}], writeConcern: {w: "majority"}}));
- statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
- assertCorrectOperationTime(
- res.operationTime, statusRes.optimes.lastCommittedOpTime.ts, "committed");
-
- jsTestLog("Executing local write.");
- res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 2}]}));
- statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
- assertCorrectOperationTime(res.operationTime, statusRes.optimes.appliedOpTime.ts, "applied");
-
- replTest.awaitLastOpCommitted();
-
- jsTestLog("Executing majority read.");
- res = assert.commandWorked(
- testDB.runCommand({find: "foo", filter: {x: 1}, readConcern: {level: "majority"}}));
- statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
- assertCorrectOperationTime(
- res.operationTime, statusRes.optimes.lastCommittedOpTime.ts, "committed");
-
- jsTestLog("Executing local read.");
- res = assert.commandWorked(testDB.runCommand({find: "foo", filter: {x: 1}}));
- statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
- assertCorrectOperationTime(res.operationTime, statusRes.optimes.appliedOpTime.ts, "applied");
+"use strict";
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+
+function assertCorrectOperationTime(operationTime, expectedTimestamp, opTimeType) {
+ assert.eq(0,
+ timestampCmp(operationTime, expectedTimestamp),
+ "operationTime in command response, " + operationTime + ", does not equal the last " +
+ opTimeType + " timestamp, " + expectedTimestamp);
+}
+
+var name = "command_response_operation_time";
+
+var replTest = new ReplSetTest(
+ {name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ""}, waitForKeys: true});
+
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
replTest.stopSet();
+ return;
+}
+replTest.initiate();
+
+var res, statusRes;
+var testDB = replTest.getPrimary().getDB(name);
+
+jsTestLog("Executing majority write.");
+res = assert.commandWorked(
+ testDB.runCommand({insert: "foo", documents: [{x: 1}], writeConcern: {w: "majority"}}));
+statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
+assertCorrectOperationTime(
+ res.operationTime, statusRes.optimes.lastCommittedOpTime.ts, "committed");
+
+jsTestLog("Executing local write.");
+res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 2}]}));
+statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
+assertCorrectOperationTime(res.operationTime, statusRes.optimes.appliedOpTime.ts, "applied");
+
+replTest.awaitLastOpCommitted();
+
+jsTestLog("Executing majority read.");
+res = assert.commandWorked(
+ testDB.runCommand({find: "foo", filter: {x: 1}, readConcern: {level: "majority"}}));
+statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
+assertCorrectOperationTime(
+ res.operationTime, statusRes.optimes.lastCommittedOpTime.ts, "committed");
+
+jsTestLog("Executing local read.");
+res = assert.commandWorked(testDB.runCommand({find: "foo", filter: {x: 1}}));
+statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
+assertCorrectOperationTime(res.operationTime, statusRes.optimes.appliedOpTime.ts, "applied");
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/commands_that_write_accept_wc.js b/jstests/replsets/commands_that_write_accept_wc.js
index e99ef63ddba..4a35cb747f1 100644
--- a/jstests/replsets/commands_that_write_accept_wc.js
+++ b/jstests/replsets/commands_that_write_accept_wc.js
@@ -7,186 +7,186 @@
*/
(function() {
- "use strict";
- var replTest = new ReplSetTest({
- name: 'WCSet',
- // Set priority of secondaries to zero to prevent spurious elections.
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- });
- replTest.startSet();
- replTest.initiate();
-
- var master = replTest.getPrimary();
- var dbName = "wc-test";
- var db = master.getDB(dbName);
- var collName = 'leaves';
- var coll = db[collName];
-
- function dropTestCollection() {
- replTest.awaitReplication();
- coll.drop();
- assert.eq(0, coll.find().itcount(), "test collection not empty");
+"use strict";
+var replTest = new ReplSetTest({
+ name: 'WCSet',
+ // Set priority of secondaries to zero to prevent spurious elections.
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
+});
+replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+var dbName = "wc-test";
+var db = master.getDB(dbName);
+var collName = 'leaves';
+var coll = db[collName];
+
+function dropTestCollection() {
+ replTest.awaitReplication();
+ coll.drop();
+ assert.eq(0, coll.find().itcount(), "test collection not empty");
+}
+
+dropTestCollection();
+
+var commands = [];
+
+commands.push({
+ req: {insert: collName, documents: [{type: 'maple'}]},
+ setupFunc: function() {},
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'maple'}), 1);
}
-
- dropTestCollection();
-
- var commands = [];
-
- commands.push({
- req: {insert: collName, documents: [{type: 'maple'}]},
- setupFunc: function() {},
- confirmFunc: function() {
- assert.eq(coll.count({type: 'maple'}), 1);
- }
- });
-
- commands.push({
- req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]},
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.getIndexes().length, 1);
- },
- confirmFunc: function() {
- assert.eq(coll.getIndexes().length, 2);
- }
- });
-
- commands.push({
- req: {
- update: collName,
- updates: [{
- q: {type: 'oak'},
- u: [{$set: {type: 'ginkgo'}}],
- }],
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- }
- });
-
- commands.push({
- req: {
- findAndModify: collName,
- query: {type: 'oak'},
- update: {$set: {type: 'ginkgo'}},
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- }
- });
-
- commands.push({
- req: {
- findAndModify: collName,
- query: {type: 'oak'},
- update: [{$set: {type: 'ginkgo'}}],
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- }
- });
-
- commands.push({
- req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1, type: "willow"}}]},
- setupFunc: function() {
- coll.insert({_id: 1, type: 'oak'});
- assert.eq(coll.count({type: 'willow'}), 0);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'willow'}), 1);
- }
- });
-
- commands.push({
- req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}], cursor: {}},
- setupFunc: function() {
- coll.insert({_id: 1, type: 'oak'});
- coll.insert({_id: 2, type: 'maple'});
- },
- confirmFunc: function() {
- assert.eq(db.foo.count({type: 'oak'}), 1);
- assert.eq(db.foo.count({type: 'maple'}), 1);
- db.foo.drop();
- }
- });
-
- commands.push({
- req: {
- mapReduce: collName,
- map: function() {
- this.tags.forEach(function(z) {
- emit(z, 1);
- });
- },
- reduce: function(key, values) {
- return {count: values.length};
- },
- out: "foo"
+});
+
+commands.push({
+ req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]},
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.getIndexes().length, 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.getIndexes().length, 2);
+ }
+});
+
+commands.push({
+ req: {
+ update: collName,
+ updates: [{
+ q: {type: 'oak'},
+ u: [{$set: {type: 'ginkgo'}}],
+ }],
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ }
+});
+
+commands.push({
+ req: {
+ findAndModify: collName,
+ query: {type: 'oak'},
+ update: {$set: {type: 'ginkgo'}},
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ }
+});
+
+commands.push({
+ req: {
+ findAndModify: collName,
+ query: {type: 'oak'},
+ update: [{$set: {type: 'ginkgo'}}],
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ }
+});
+
+commands.push({
+ req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1, type: "willow"}}]},
+ setupFunc: function() {
+ coll.insert({_id: 1, type: 'oak'});
+ assert.eq(coll.count({type: 'willow'}), 0);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'willow'}), 1);
+ }
+});
+
+commands.push({
+ req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}], cursor: {}},
+ setupFunc: function() {
+ coll.insert({_id: 1, type: 'oak'});
+ coll.insert({_id: 2, type: 'maple'});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.count({type: 'oak'}), 1);
+ assert.eq(db.foo.count({type: 'maple'}), 1);
+ db.foo.drop();
+ }
+});
+
+commands.push({
+ req: {
+ mapReduce: collName,
+ map: function() {
+ this.tags.forEach(function(z) {
+ emit(z, 1);
+ });
},
- setupFunc: function() {
- coll.insert({x: 1, tags: ["a", "b"]});
- coll.insert({x: 2, tags: ["b", "c"]});
- coll.insert({x: 3, tags: ["c", "a"]});
- coll.insert({x: 4, tags: ["b", "c"]});
+ reduce: function(key, values) {
+ return {count: values.length};
},
- confirmFunc: function() {
- assert.eq(db.foo.findOne({_id: 'a'}).value.count, 2);
- assert.eq(db.foo.findOne({_id: 'b'}).value.count, 3);
- assert.eq(db.foo.findOne({_id: 'c'}).value.count, 3);
- db.foo.drop();
- }
- });
-
- function testValidWriteConcern(cmd) {
- cmd.req.writeConcern = {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS};
- jsTest.log("Testing " + tojson(cmd.req));
-
- dropTestCollection();
- cmd.setupFunc();
- var res = db.runCommand(cmd.req);
- assert.commandWorked(res);
- assert(!res.writeConcernError,
- 'command on a full replicaset had writeConcernError: ' + tojson(res));
- cmd.confirmFunc();
+ out: "foo"
+ },
+ setupFunc: function() {
+ coll.insert({x: 1, tags: ["a", "b"]});
+ coll.insert({x: 2, tags: ["b", "c"]});
+ coll.insert({x: 3, tags: ["c", "a"]});
+ coll.insert({x: 4, tags: ["b", "c"]});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.findOne({_id: 'a'}).value.count, 2);
+ assert.eq(db.foo.findOne({_id: 'b'}).value.count, 3);
+ assert.eq(db.foo.findOne({_id: 'c'}).value.count, 3);
+ db.foo.drop();
}
+});
- function testInvalidWriteConcern(cmd) {
- cmd.req.writeConcern = {w: 'invalid'};
- jsTest.log("Testing " + tojson(cmd.req));
-
- dropTestCollection();
- cmd.setupFunc();
- var res = coll.runCommand(cmd.req);
- assert.commandFailedWithCode(res, ErrorCodes.UnknownReplWriteConcern);
- cmd.confirmFunc();
- }
+function testValidWriteConcern(cmd) {
+ cmd.req.writeConcern = {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS};
+ jsTest.log("Testing " + tojson(cmd.req));
- commands.forEach(function(cmd) {
- testValidWriteConcern(cmd);
- testInvalidWriteConcern(cmd);
- });
+ dropTestCollection();
+ cmd.setupFunc();
+ var res = db.runCommand(cmd.req);
+ assert.commandWorked(res);
+ assert(!res.writeConcernError,
+ 'command on a full replicaset had writeConcernError: ' + tojson(res));
+ cmd.confirmFunc();
+}
+
+function testInvalidWriteConcern(cmd) {
+ cmd.req.writeConcern = {w: 'invalid'};
+ jsTest.log("Testing " + tojson(cmd.req));
- replTest.stopSet();
+ dropTestCollection();
+ cmd.setupFunc();
+ var res = coll.runCommand(cmd.req);
+ assert.commandFailedWithCode(res, ErrorCodes.UnknownReplWriteConcern);
+ cmd.confirmFunc();
+}
+
+commands.forEach(function(cmd) {
+ testValidWriteConcern(cmd);
+ testInvalidWriteConcern(cmd);
+});
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/commit_prepared_transaction_before_stable_timestamp.js b/jstests/replsets/commit_prepared_transaction_before_stable_timestamp.js
index 71ca1189b45..56b156d8289 100644
--- a/jstests/replsets/commit_prepared_transaction_before_stable_timestamp.js
+++ b/jstests/replsets/commit_prepared_transaction_before_stable_timestamp.js
@@ -5,55 +5,55 @@
*/
(function() {
- "use strict";
- load("jstests/aggregation/extras/utils.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/aggregation/extras/utils.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- const primary = replTest.getPrimary();
+const primary = replTest.getPrimary();
- const dbName = "test";
- const collName = "commit_prepared_transaction_before_stable_timestamp";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "commit_prepared_transaction_before_stable_timestamp";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(testDB.runCommand({create: collName}));
+assert.commandWorked(testDB.runCommand({create: collName}));
- // Make sure there is no lag between the oldest timestamp and the stable timestamp so we can
- // test that committing a prepared transaction behind the oldest timestamp succeeds.
- assert.commandWorked(primary.adminCommand({
- "configureFailPoint": 'WTSetOldestTSToStableTS',
- "mode": 'alwaysOn',
- }));
+// Make sure there is no lag between the oldest timestamp and the stable timestamp so we can
+// test that committing a prepared transaction behind the oldest timestamp succeeds.
+assert.commandWorked(primary.adminCommand({
+ "configureFailPoint": 'WTSetOldestTSToStableTS',
+ "mode": 'alwaysOn',
+}));
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 1}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- jsTestLog("Do a majority write to advance the stable timestamp past the prepareTimestamp");
- // Doing a majority write after preparing the transaction ensures that the stable timestamp is
- // past the prepare timestamp because this write must be in the committed snapshot.
- assert.commandWorked(
- testColl.runCommand("insert", {documents: [{_id: 2}]}, {writeConcern: {w: "majority"}}));
+jsTestLog("Do a majority write to advance the stable timestamp past the prepareTimestamp");
+// Doing a majority write after preparing the transaction ensures that the stable timestamp is
+// past the prepare timestamp because this write must be in the committed snapshot.
+assert.commandWorked(
+ testColl.runCommand("insert", {documents: [{_id: 2}]}, {writeConcern: {w: "majority"}}));
- jsTestLog("Committing the transaction before the stable timestamp");
+jsTestLog("Committing the transaction before the stable timestamp");
- // Since we have advanced the stableTimestamp to be after the prepareTimestamp, when we commit
- // at the prepareTimestamp, we are certain that we are committing behind the stableTimestamp.
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+// Since we have advanced the stableTimestamp to be after the prepareTimestamp, when we commit
+// at the prepareTimestamp, we are certain that we are committing behind the stableTimestamp.
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- // Make sure we can see the insert from the prepared transaction.
- arrayEq(sessionColl.find().toArray(), [{_id: 1}, {_id: 2}]);
+// Make sure we can see the insert from the prepared transaction.
+arrayEq(sessionColl.find().toArray(), [{_id: 1}, {_id: 2}]);
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'WTSetOldestTSToStableTS', mode: 'off'}));
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'WTSetOldestTSToStableTS', mode: 'off'}));
- replTest.stopSet();
+replTest.stopSet();
}()); \ No newline at end of file
diff --git a/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js b/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js
index 12c02f9d642..606e3bc5019 100644
--- a/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js
+++ b/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js
@@ -13,88 +13,88 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
- const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]});
+replTest.startSet();
+replTest.initiate();
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
- const dbName = "test";
- const collName = "commit_transaction_initial_sync_data_already_applied";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "commit_transaction_initial_sync_data_already_applied";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(testColl.insert({_id: 1, a: 0}));
+assert.commandWorked(testColl.insert({_id: 1, a: 0}));
- // Ensure that the "a" field is unique
- assert.commandWorked(testColl.createIndex({"a": 1}, {unique: true}));
+// Ensure that the "a" field is unique
+assert.commandWorked(testColl.createIndex({"a": 1}, {unique: true}));
- jsTestLog("Restarting the secondary");
+jsTestLog("Restarting the secondary");
- // Restart the secondary with startClean set to true so that it goes through initial sync. Also
- // restart the node with a failpoint turned on that will pause initial sync before cloning any
- // collections, but during the period that the sync source is fetching oplog entries from the
- // sync source. This will make it so that all operations after this and before the failpoint is
- // turned off will be reflected in the data but also applied during the oplog application phase
- // of initial sync.
- secondary = replTest.restart(secondary, {
- startClean: true,
- setParameter: {
- 'failpoint.initialSyncHangBeforeCopyingDatabases': tojson({mode: 'alwaysOn'}),
- 'numInitialSyncAttempts': 1
- }
- });
+// Restart the secondary with startClean set to true so that it goes through initial sync. Also
+// restart the node with a failpoint turned on that will pause initial sync before cloning any
+// collections, but during the period that the sync source is fetching oplog entries from the
+// sync source. This will make it so that all operations after this and before the failpoint is
+// turned off will be reflected in the data but also applied during the oplog application phase
+// of initial sync.
+secondary = replTest.restart(secondary, {
+ startClean: true,
+ setParameter: {
+ 'failpoint.initialSyncHangBeforeCopyingDatabases': tojson({mode: 'alwaysOn'}),
+ 'numInitialSyncAttempts': 1
+ }
+});
- // Wait for fail point message to be logged so that we know that initial sync is paused.
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
+// Wait for fail point message to be logged so that we know that initial sync is paused.
+checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
- jsTestLog("Initial sync paused");
+jsTestLog("Initial sync paused");
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- assert.commandWorked(testColl.update({_id: 1}, {_id: 1, a: 0, b: 0}));
+assert.commandWorked(testColl.update({_id: 1}, {_id: 1, a: 0, b: 0}));
- session.startTransaction();
+session.startTransaction();
- // When the commitTransaction oplog entry is applied, this operation should fail with a
- // duplicate key error because the data will already reflect the transaction.
- assert.commandWorked(sessionColl.insert({_id: 2, a: 1}));
+// When the commitTransaction oplog entry is applied, this operation should fail with a
+// duplicate key error because the data will already reflect the transaction.
+assert.commandWorked(sessionColl.insert({_id: 2, a: 1}));
- // When the commitTransaction oplog entry is applied, this operation should succeed even though
- // the one before it fails. This is used to make sure that initial sync is applying operations
- // from a transaction in a separate storage transaction.
- assert.commandWorked(sessionColl.update({_id: 1}, {$unset: {b: 1}}));
+// When the commitTransaction oplog entry is applied, this operation should succeed even though
+// the one before it fails. This is used to make sure that initial sync is applying operations
+// from a transaction in a separate storage transaction.
+assert.commandWorked(sessionColl.update({_id: 1}, {$unset: {b: 1}}));
- assert.commandWorked(sessionColl.update({_id: 2}, {$unset: {a: 1}}));
- assert.commandWorked(sessionColl.insert({_id: 3, a: 1}));
+assert.commandWorked(sessionColl.update({_id: 2}, {$unset: {a: 1}}));
+assert.commandWorked(sessionColl.insert({_id: 3, a: 1}));
- jsTestLog("Preparing and committing a transaction");
+jsTestLog("Preparing and committing a transaction");
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- // Resume initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangBeforeCopyingDatabases", mode: "off"}));
+// Resume initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "initialSyncHangBeforeCopyingDatabases", mode: "off"}));
- // Wait for the secondary to complete initial sync.
- replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
+// Wait for the secondary to complete initial sync.
+replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
- jsTestLog("Initial sync completed");
+jsTestLog("Initial sync completed");
- // Make sure that the later operations from the transaction succeed even though the first
- // operation will fail during oplog application.
- let res = secondary.getDB(dbName).getCollection(collName).find();
- assert.eq(res.toArray(), [{_id: 1, a: 0}, {_id: 2}, {_id: 3, a: 1}], res);
+// Make sure that the later operations from the transaction succeed even though the first
+// operation will fail during oplog application.
+let res = secondary.getDB(dbName).getCollection(collName).find();
+assert.eq(res.toArray(), [{_id: 1, a: 0}, {_id: 2}, {_id: 3, a: 1}], res);
- replTest.stopSet();
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/commit_transaction_recovery.js b/jstests/replsets/commit_transaction_recovery.js
index a1a12e8c183..cdd2c49a830 100644
--- a/jstests/replsets/commit_transaction_recovery.js
+++ b/jstests/replsets/commit_transaction_recovery.js
@@ -7,64 +7,64 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- let primary = replTest.getPrimary();
+let primary = replTest.getPrimary();
- const dbName = "test";
- const collName = "commit_transaction_recovery";
- let testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "commit_transaction_recovery";
+let testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- testDB.runCommand({drop: collName});
- assert.commandWorked(testDB.runCommand({create: collName}));
+testDB.runCommand({drop: collName});
+assert.commandWorked(testDB.runCommand({create: collName}));
- let session = primary.startSession({causalConsistency: false});
- let sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+let session = primary.startSession({causalConsistency: false});
+let sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 1}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 1}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- jsTestLog("Disable snapshotting on all nodes");
- // Disable snapshotting so that future operations do not enter the majority snapshot.
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
+jsTestLog("Disable snapshotting on all nodes");
+// Disable snapshotting so that future operations do not enter the majority snapshot.
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
- jsTestLog("Committing the transaction");
- // Since the commitTimestamp is after the last snapshot, this oplog entry will be replayed
- // during replication recovery during restart.
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+jsTestLog("Committing the transaction");
+// Since the commitTimestamp is after the last snapshot, this oplog entry will be replayed
+// during replication recovery during restart.
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- jsTestLog("Restarting node");
+jsTestLog("Restarting node");
- // Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
- // unset on the node following the restart.
- replTest.restart(primary);
+// Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
+// unset on the node following the restart.
+replTest.restart(primary);
- jsTestLog("Node was restarted");
+jsTestLog("Node was restarted");
- primary = replTest.getPrimary();
- testDB = primary.getDB(dbName);
- session = primary.startSession({causalConsistency: false});
- sessionDB = session.getDatabase(dbName);
- session.startTransaction();
+primary = replTest.getPrimary();
+testDB = primary.getDB(dbName);
+session = primary.startSession({causalConsistency: false});
+sessionDB = session.getDatabase(dbName);
+session.startTransaction();
- // Make sure that we can read the document from the transaction after recovery.
- assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1});
+// Make sure that we can read the document from the transaction after recovery.
+assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1});
- // Make sure that another write on the same document from the transaction has no write conflict.
- // Also, make sure that we can run another transaction after recovery without any problems.
- assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 1}));
- prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 1});
+// Make sure that another write on the same document from the transaction has no write conflict.
+// Also, make sure that we can run another transaction after recovery without any problems.
+assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 1}));
+prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 1});
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/dbcheck.js b/jstests/replsets/dbcheck.js
index 07b268bb0ec..99ab6261b00 100644
--- a/jstests/replsets/dbcheck.js
+++ b/jstests/replsets/dbcheck.js
@@ -5,103 +5,99 @@
*/
(function() {
- "use strict";
+"use strict";
- // TODO(SERVER-31323): Re-enable when existing dbCheck issues are fixed.
- if (true)
- return;
+// TODO(SERVER-31323): Re-enable when existing dbCheck issues are fixed.
+if (true)
+ return;
- let nodeCount = 3;
- let replSet = new ReplSetTest({name: "dbCheckSet", nodes: nodeCount});
+let nodeCount = 3;
+let replSet = new ReplSetTest({name: "dbCheckSet", nodes: nodeCount});
- replSet.startSet();
- replSet.initiate();
- replSet.awaitSecondaryNodes();
+replSet.startSet();
+replSet.initiate();
+replSet.awaitSecondaryNodes();
- function forEachSecondary(f) {
- for (let secondary of replSet.getSecondaries()) {
- f(secondary);
- }
+function forEachSecondary(f) {
+ for (let secondary of replSet.getSecondaries()) {
+ f(secondary);
}
+}
- function forEachNode(f) {
- f(replSet.getPrimary());
- forEachSecondary(f);
- }
+function forEachNode(f) {
+ f(replSet.getPrimary());
+ forEachSecondary(f);
+}
- let dbName = "dbCheck-test";
- let collName = "dbcheck-collection";
+let dbName = "dbCheck-test";
+let collName = "dbcheck-collection";
- // Clear local.system.healthlog.
- function clearLog() {
- forEachNode(conn => conn.getDB("local").system.healthlog.drop());
- }
+// Clear local.system.healthlog.
+function clearLog() {
+ forEachNode(conn => conn.getDB("local").system.healthlog.drop());
+}
- function addEnoughForMultipleBatches(collection) {
- collection.insertMany([...Array(10000).keys()].map(x => ({_id: x})));
- }
+function addEnoughForMultipleBatches(collection) {
+ collection.insertMany([...Array(10000).keys()].map(x => ({_id: x})));
+}
- // Name for a collection which takes multiple batches to check and which shouldn't be modified
- // by any of the tests.
- let multiBatchSimpleCollName = "dbcheck-simple-collection";
- addEnoughForMultipleBatches(replSet.getPrimary().getDB(dbName)[multiBatchSimpleCollName]);
+// Name for a collection which takes multiple batches to check and which shouldn't be modified
+// by any of the tests.
+let multiBatchSimpleCollName = "dbcheck-simple-collection";
+addEnoughForMultipleBatches(replSet.getPrimary().getDB(dbName)[multiBatchSimpleCollName]);
- function dbCheckCompleted(db) {
- return db.currentOp().inprog.filter(x => x["desc"] == "dbCheck")[0] === undefined;
- }
+function dbCheckCompleted(db) {
+ return db.currentOp().inprog.filter(x => x["desc"] == "dbCheck")[0] === undefined;
+}
- // Wait for dbCheck to complete (on both primaries and secondaries). Fails an assertion if
- // dbCheck takes longer than maxMs.
- function awaitDbCheckCompletion(db) {
- let start = Date.now();
+// Wait for dbCheck to complete (on both primaries and secondaries). Fails an assertion if
+// dbCheck takes longer than maxMs.
+function awaitDbCheckCompletion(db) {
+ let start = Date.now();
- assert.soon(() => dbCheckCompleted(db), "dbCheck timed out");
- replSet.awaitSecondaryNodes();
- replSet.awaitReplication();
+ assert.soon(() => dbCheckCompleted(db), "dbCheck timed out");
+ replSet.awaitSecondaryNodes();
+ replSet.awaitReplication();
- // Give the health log buffers some time to flush.
- sleep(100);
- }
+ // Give the health log buffers some time to flush.
+ sleep(100);
+}
- // Check that everything in the health log shows a successful and complete check with no found
- // inconsistencies.
- function checkLogAllConsistent(conn) {
- let healthlog = conn.getDB("local").system.healthlog;
+// Check that everything in the health log shows a successful and complete check with no found
+// inconsistencies.
+function checkLogAllConsistent(conn) {
+ let healthlog = conn.getDB("local").system.healthlog;
- assert(healthlog.find().count(), "dbCheck put no batches in health log");
+ assert(healthlog.find().count(), "dbCheck put no batches in health log");
- let maxResult = healthlog.aggregate([
- {$match: {operation: "dbCheckBatch"}},
- {$group: {_id: 1, key: {$max: "$data.maxKey"}}}
- ]);
+ let maxResult = healthlog.aggregate(
+ [{$match: {operation: "dbCheckBatch"}}, {$group: {_id: 1, key: {$max: "$data.maxKey"}}}]);
- assert(maxResult.hasNext(), "dbCheck put no batches in health log");
- assert.eq(maxResult.next().key, {"$maxKey": 1}, "dbCheck batches should end at MaxKey");
+ assert(maxResult.hasNext(), "dbCheck put no batches in health log");
+ assert.eq(maxResult.next().key, {"$maxKey": 1}, "dbCheck batches should end at MaxKey");
- let minResult = healthlog.aggregate([
- {$match: {operation: "dbCheckBatch"}},
- {$group: {_id: 1, key: {$min: "$data.minKey"}}}
- ]);
+ let minResult = healthlog.aggregate(
+ [{$match: {operation: "dbCheckBatch"}}, {$group: {_id: 1, key: {$min: "$data.minKey"}}}]);
- assert(minResult.hasNext(), "dbCheck put no batches in health log");
- assert.eq(minResult.next().key, {"$minKey": 1}, "dbCheck batches should start at MinKey");
+ assert(minResult.hasNext(), "dbCheck put no batches in health log");
+ assert.eq(minResult.next().key, {"$minKey": 1}, "dbCheck batches should start at MinKey");
- // Assert no errors (i.e., found inconsistencies).
- let errs = healthlog.find({"severity": {"$ne": "info"}});
- if (errs.hasNext()) {
- assert(false, "dbCheck found inconsistency: " + tojson(errs.next()));
- }
+ // Assert no errors (i.e., found inconsistencies).
+ let errs = healthlog.find({"severity": {"$ne": "info"}});
+ if (errs.hasNext()) {
+ assert(false, "dbCheck found inconsistency: " + tojson(errs.next()));
+ }
- // Assert no failures (i.e., checks that failed to complete).
- let failedChecks = healthlog.find({"operation": "dbCheckBatch", "data.success": false});
- if (failedChecks.hasNext()) {
- assert(false, "dbCheck batch failed: " + tojson(failedChecks.next()));
- }
+ // Assert no failures (i.e., checks that failed to complete).
+ let failedChecks = healthlog.find({"operation": "dbCheckBatch", "data.success": false});
+ if (failedChecks.hasNext()) {
+ assert(false, "dbCheck batch failed: " + tojson(failedChecks.next()));
+ }
- // Finds an entry with data.minKey === MinKey, and then matches its maxKey against
- // another document's minKey, and so on, and then checks that the result of that search
- // has data.maxKey === MaxKey.
- let completeCoverage = healthlog.aggregate([
+ // Finds an entry with data.minKey === MinKey, and then matches its maxKey against
+ // another document's minKey, and so on, and then checks that the result of that search
+ // has data.maxKey === MaxKey.
+ let completeCoverage = healthlog.aggregate([
{$match: {"operation": "dbCheckBatch", "data.minKey": MinKey}},
{
$graphLookup: {
@@ -116,335 +112,330 @@
{$match: {"batchLimits.data.maxKey": MaxKey}}
]);
- assert(completeCoverage.hasNext(), "dbCheck batches do not cover full key range");
- }
+ assert(completeCoverage.hasNext(), "dbCheck batches do not cover full key range");
+}
+
+// Check that the total of all batches in the health log on `conn` is equal to the total number
+// of documents and bytes in `coll`.
+
+// Returns a document with fields "totalDocs" and "totalBytes", representing the total size of
+// the batches in the health log.
+function healthLogCounts(healthlog) {
+ let result = healthlog.aggregate([
+ {$match: {"operation": "dbCheckBatch"}},
+ {
+ $group: {
+ "_id": null,
+ "totalDocs": {$sum: "$data.count"},
+ "totalBytes": {$sum: "$data.bytes"}
+ }
+ }
+ ]);
- // Check that the total of all batches in the health log on `conn` is equal to the total number
- // of documents and bytes in `coll`.
+ assert(result.hasNext(), "dbCheck put no batches in health log");
- // Returns a document with fields "totalDocs" and "totalBytes", representing the total size of
- // the batches in the health log.
- function healthLogCounts(healthlog) {
- let result = healthlog.aggregate([
- {$match: {"operation": "dbCheckBatch"}},
- {
- $group: {
- "_id": null,
- "totalDocs": {$sum: "$data.count"},
- "totalBytes": {$sum: "$data.bytes"}
- }
- }
- ]);
+ return result.next();
+}
- assert(result.hasNext(), "dbCheck put no batches in health log");
+function checkTotalCounts(conn, coll) {
+ let result = healthLogCounts(conn.getDB("local").system.healthlog);
- return result.next();
- }
+ assert.eq(result.totalDocs, coll.count(), "dbCheck batches do not count all documents");
- function checkTotalCounts(conn, coll) {
- let result = healthLogCounts(conn.getDB("local").system.healthlog);
+ // Calculate the size on the client side, because collection.dataSize is not necessarily the
+ // sum of the document sizes.
+ let size = coll.find().toArray().reduce((x, y) => x + bsonsize(y), 0);
- assert.eq(result.totalDocs, coll.count(), "dbCheck batches do not count all documents");
+ assert.eq(result.totalBytes, size, "dbCheck batches do not count all bytes");
+}
- // Calculate the size on the client side, because collection.dataSize is not necessarily the
- // sum of the document sizes.
- let size = coll.find().toArray().reduce((x, y) => x + bsonsize(y), 0);
+// First check behavior when everything is consistent.
+function simpleTestConsistent() {
+ let master = replSet.getPrimary();
+ clearLog();
- assert.eq(result.totalBytes, size, "dbCheck batches do not count all bytes");
- }
+ assert.neq(master, undefined);
+ let db = master.getDB(dbName);
+ assert.commandWorked(db.runCommand({"dbCheck": multiBatchSimpleCollName}));
- // First check behavior when everything is consistent.
- function simpleTestConsistent() {
- let master = replSet.getPrimary();
- clearLog();
+ awaitDbCheckCompletion(db);
- assert.neq(master, undefined);
- let db = master.getDB(dbName);
- assert.commandWorked(db.runCommand({"dbCheck": multiBatchSimpleCollName}));
+ checkLogAllConsistent(master);
+ checkTotalCounts(master, db[multiBatchSimpleCollName]);
- awaitDbCheckCompletion(db);
+ forEachSecondary(function(secondary) {
+ checkLogAllConsistent(secondary);
+ checkTotalCounts(secondary, secondary.getDB(dbName)[multiBatchSimpleCollName]);
+ });
+}
- checkLogAllConsistent(master);
- checkTotalCounts(master, db[multiBatchSimpleCollName]);
+// Same thing, but now with concurrent updates.
+function concurrentTestConsistent() {
+ let master = replSet.getPrimary();
- forEachSecondary(function(secondary) {
- checkLogAllConsistent(secondary);
- checkTotalCounts(secondary, secondary.getDB(dbName)[multiBatchSimpleCollName]);
- });
+ let db = master.getDB(dbName);
+
+ // Add enough documents that dbCheck will take a few seconds.
+ db[collName].insertMany([...Array(10000).keys()].map(x => ({i: x})));
+
+ assert.commandWorked(db.runCommand({"dbCheck": collName}));
+
+ let coll = db[collName];
+
+ while (db.currentOp().inprog.filter(x => x["desc"] === "dbCheck").length) {
+ coll.updateOne({}, {"$inc": {"i": 10}});
+ coll.insertOne({"i": 42});
+ coll.deleteOne({});
}
- // Same thing, but now with concurrent updates.
- function concurrentTestConsistent() {
- let master = replSet.getPrimary();
+ awaitDbCheckCompletion(db);
- let db = master.getDB(dbName);
+ checkLogAllConsistent(master);
+ // Omit check for total counts, which might have changed with concurrent updates.
- // Add enough documents that dbCheck will take a few seconds.
- db[collName].insertMany([...Array(10000).keys()].map(x => ({i: x})));
+ forEachSecondary(secondary => checkLogAllConsistent(secondary, true));
+}
- assert.commandWorked(db.runCommand({"dbCheck": collName}));
+simpleTestConsistent();
+concurrentTestConsistent();
- let coll = db[collName];
+// Test the various other parameters.
+function testDbCheckParameters() {
+ let master = replSet.getPrimary();
+ let db = master.getDB(dbName);
- while (db.currentOp().inprog.filter(x => x["desc"] === "dbCheck").length) {
- coll.updateOne({}, {"$inc": {"i": 10}});
- coll.insertOne({"i": 42});
- coll.deleteOne({});
- }
+ // Clean up for the test.
+ clearLog();
+
+ let docSize = bsonsize({_id: 10});
- awaitDbCheckCompletion(db);
+ function checkEntryBounds(start, end) {
+ forEachNode(function(node) {
+ let healthlog = node.getDB("local").system.healthlog;
+ let keyBoundsResult = healthlog.aggregate([
+ {$match: {operation: "dbCheckBatch"}},
+ {
+ $group:
+ {_id: null, minKey: {$min: "$data.minKey"}, maxKey: {$max: "$data.maxKey"}}
+ }
+ ]);
- checkLogAllConsistent(master);
- // Omit check for total counts, which might have changed with concurrent updates.
+ assert(keyBoundsResult.hasNext(), "dbCheck put no batches in health log");
- forEachSecondary(secondary => checkLogAllConsistent(secondary, true));
+ let bounds = keyBoundsResult.next();
+ assert.eq(bounds.minKey, start, "dbCheck minKey field incorrect");
+ assert.eq(bounds.maxKey, end, "dbCheck maxKey field incorrect");
+
+ let counts = healthLogCounts(healthlog);
+ assert.eq(counts.totalDocs, end - start);
+ assert.eq(counts.totalBytes, (end - start) * docSize);
+ });
}
- simpleTestConsistent();
- concurrentTestConsistent();
-
- // Test the various other parameters.
- function testDbCheckParameters() {
- let master = replSet.getPrimary();
- let db = master.getDB(dbName);
-
- // Clean up for the test.
- clearLog();
-
- let docSize = bsonsize({_id: 10});
-
- function checkEntryBounds(start, end) {
- forEachNode(function(node) {
- let healthlog = node.getDB("local").system.healthlog;
- let keyBoundsResult = healthlog.aggregate([
- {$match: {operation: "dbCheckBatch"}},
- {
- $group: {
- _id: null,
- minKey: {$min: "$data.minKey"},
- maxKey: {$max: "$data.maxKey"}
- }
- }
- ]);
-
- assert(keyBoundsResult.hasNext(), "dbCheck put no batches in health log");
-
- let bounds = keyBoundsResult.next();
- assert.eq(bounds.minKey, start, "dbCheck minKey field incorrect");
- assert.eq(bounds.maxKey, end, "dbCheck maxKey field incorrect");
-
- let counts = healthLogCounts(healthlog);
- assert.eq(counts.totalDocs, end - start);
- assert.eq(counts.totalBytes, (end - start) * docSize);
- });
- }
+ // Run a dbCheck on just a subset of the documents
+ let start = 1000;
+ let end = 9000;
- // Run a dbCheck on just a subset of the documents
- let start = 1000;
- let end = 9000;
+ assert.commandWorked(
+ db.runCommand({dbCheck: multiBatchSimpleCollName, minKey: start, maxKey: end}));
- assert.commandWorked(
- db.runCommand({dbCheck: multiBatchSimpleCollName, minKey: start, maxKey: end}));
+ awaitDbCheckCompletion(db);
- awaitDbCheckCompletion(db);
+ checkEntryBounds(start, end);
- checkEntryBounds(start, end);
+ // Now, clear the health logs again,
+ clearLog();
- // Now, clear the health logs again,
- clearLog();
+ let maxCount = 5000;
- let maxCount = 5000;
+ // and do the same with a count constraint.
+ assert.commandWorked(db.runCommand(
+ {dbCheck: multiBatchSimpleCollName, minKey: start, maxKey: end, maxCount: maxCount}));
- // and do the same with a count constraint.
- assert.commandWorked(db.runCommand(
- {dbCheck: multiBatchSimpleCollName, minKey: start, maxKey: end, maxCount: maxCount}));
+ // We expect it to reach the count limit before reaching maxKey.
+ awaitDbCheckCompletion(db);
+ checkEntryBounds(start, start + maxCount);
- // We expect it to reach the count limit before reaching maxKey.
- awaitDbCheckCompletion(db);
- checkEntryBounds(start, start + maxCount);
+ // Finally, do the same with a size constraint.
+ clearLog();
+ let maxSize = maxCount * docSize;
+ assert.commandWorked(db.runCommand(
+ {dbCheck: multiBatchSimpleCollName, minKey: start, maxKey: end, maxSize: maxSize}));
+ awaitDbCheckCompletion(db);
+ checkEntryBounds(start, start + maxCount);
+}
- // Finally, do the same with a size constraint.
- clearLog();
- let maxSize = maxCount * docSize;
- assert.commandWorked(db.runCommand(
- {dbCheck: multiBatchSimpleCollName, minKey: start, maxKey: end, maxSize: maxSize}));
- awaitDbCheckCompletion(db);
- checkEntryBounds(start, start + maxCount);
- }
+testDbCheckParameters();
- testDbCheckParameters();
-
- // Now, test some unusual cases where the command should fail.
- function testErrorOnNonexistent() {
- let master = replSet.getPrimary();
- let db = master.getDB("this-probably-doesnt-exist");
- assert.commandFailed(db.runCommand({dbCheck: 1}),
- "dbCheck spuriously succeeded on nonexistent database");
- db = master.getDB(dbName);
- assert.commandFailed(db.runCommand({dbCheck: "this-also-probably-doesnt-exist"}),
- "dbCheck spuriously succeeded on nonexistent collection");
- }
+// Now, test some unusual cases where the command should fail.
+function testErrorOnNonexistent() {
+ let master = replSet.getPrimary();
+ let db = master.getDB("this-probably-doesnt-exist");
+ assert.commandFailed(db.runCommand({dbCheck: 1}),
+ "dbCheck spuriously succeeded on nonexistent database");
+ db = master.getDB(dbName);
+ assert.commandFailed(db.runCommand({dbCheck: "this-also-probably-doesnt-exist"}),
+ "dbCheck spuriously succeeded on nonexistent collection");
+}
- function testErrorOnSecondary() {
- let secondary = replSet.getSecondary();
- let db = secondary.getDB(dbName);
- assert.commandFailed(db.runCommand({dbCheck: collName}));
- }
+function testErrorOnSecondary() {
+ let secondary = replSet.getSecondary();
+ let db = secondary.getDB(dbName);
+ assert.commandFailed(db.runCommand({dbCheck: collName}));
+}
- function testErrorOnUnreplicated() {
- let master = replSet.getPrimary();
- let db = master.getDB("local");
+function testErrorOnUnreplicated() {
+ let master = replSet.getPrimary();
+ let db = master.getDB("local");
- assert.commandFailed(db.runCommand({dbCheck: "oplog.rs"}),
- "dbCheck spuriously succeeded on oplog");
- assert.commandFailed(master.getDB(dbName).runCommand({dbCheck: "system.profile"}),
- "dbCheck spuriously succeeded on system.profile");
- }
+ assert.commandFailed(db.runCommand({dbCheck: "oplog.rs"}),
+ "dbCheck spuriously succeeded on oplog");
+ assert.commandFailed(master.getDB(dbName).runCommand({dbCheck: "system.profile"}),
+ "dbCheck spuriously succeeded on system.profile");
+}
- testErrorOnNonexistent();
- testErrorOnSecondary();
- testErrorOnUnreplicated();
-
- // Test stepdown.
- function testSucceedsOnStepdown() {
- let master = replSet.getPrimary();
- let db = master.getDB(dbName);
-
- let nodeId = replSet.getNodeId(master);
- assert.commandWorked(db.runCommand({dbCheck: multiBatchSimpleCollName}));
-
- // Step down the master.
- assert.commandWorked(master.getDB("admin").runCommand({replSetStepDown: 0, force: true}));
-
- // Wait for the cluster to come up.
- replSet.awaitSecondaryNodes();
-
- // Find the node we ran dbCheck on.
- db = replSet.getSecondaries()
- .filter(function isPreviousMaster(node) {
- return replSet.getNodeId(node) === nodeId;
- })[0]
- .getDB(dbName);
-
- // Check that it's still responding.
- try {
- assert.commandWorked(db.runCommand({ping: 1}),
- "ping failed after stepdown during dbCheck");
- } catch (e) {
- doassert("cannot connect after dbCheck with stepdown");
- }
+testErrorOnNonexistent();
+testErrorOnSecondary();
+testErrorOnUnreplicated();
- // And that our dbCheck completed.
- assert(dbCheckCompleted(db), "dbCheck failed to terminate on stepdown");
- }
+// Test stepdown.
+function testSucceedsOnStepdown() {
+ let master = replSet.getPrimary();
+ let db = master.getDB(dbName);
- testSucceedsOnStepdown();
+ let nodeId = replSet.getNodeId(master);
+ assert.commandWorked(db.runCommand({dbCheck: multiBatchSimpleCollName}));
- function collectionUuid(db, collName) {
- return db.getCollectionInfos().filter(coll => coll.name === collName)[0].info.uuid;
+ // Step down the master.
+ assert.commandWorked(master.getDB("admin").runCommand({replSetStepDown: 0, force: true}));
+
+ // Wait for the cluster to come up.
+ replSet.awaitSecondaryNodes();
+
+ // Find the node we ran dbCheck on.
+ db = replSet.getSecondaries()
+ .filter(function isPreviousMaster(node) {
+ return replSet.getNodeId(node) === nodeId;
+ })[0]
+ .getDB(dbName);
+
+ // Check that it's still responding.
+ try {
+ assert.commandWorked(db.runCommand({ping: 1}), "ping failed after stepdown during dbCheck");
+ } catch (e) {
+ doassert("cannot connect after dbCheck with stepdown");
}
- function getDummyOplogEntry() {
- let master = replSet.getPrimary();
- let coll = master.getDB(dbName)[collName];
+ // And that our dbCheck completed.
+ assert(dbCheckCompleted(db), "dbCheck failed to terminate on stepdown");
+}
- let replSetStatus =
- assert.commandWorked(master.getDB("admin").runCommand({replSetGetStatus: 1}));
- let connStatus = replSetStatus.members.filter(m => m.self)[0];
- let lastOpTime = connStatus.optime;
+testSucceedsOnStepdown();
- let entry = master.getDB("local").oplog.rs.find().sort({$natural: -1})[0];
- entry["ui"] = collectionUuid(master.getDB(dbName), collName);
- entry["ns"] = coll.stats().ns;
- entry["ts"] = new Timestamp();
+function collectionUuid(db, collName) {
+ return db.getCollectionInfos().filter(coll => coll.name === collName)[0].info.uuid;
+}
- return entry;
- }
+function getDummyOplogEntry() {
+ let master = replSet.getPrimary();
+ let coll = master.getDB(dbName)[collName];
- // Create various inconsistencies, and check that dbCheck spots them.
- function insertOnSecondaries(doc) {
- let master = replSet.getPrimary();
- let entry = getDummyOplogEntry();
- entry["op"] = "i";
- entry["o"] = doc;
+ let replSetStatus =
+ assert.commandWorked(master.getDB("admin").runCommand({replSetGetStatus: 1}));
+ let connStatus = replSetStatus.members.filter(m => m.self)[0];
+ let lastOpTime = connStatus.optime;
- master.getDB("local").oplog.rs.insertOne(entry);
- }
+ let entry = master.getDB("local").oplog.rs.find().sort({$natural: -1})[0];
+ entry["ui"] = collectionUuid(master.getDB(dbName), collName);
+ entry["ns"] = coll.stats().ns;
+ entry["ts"] = new Timestamp();
- // Run an apply-ops-ish command on a secondary.
- function runCommandOnSecondaries(doc, ns) {
- let master = replSet.getPrimary();
- let entry = getDummyOplogEntry();
- entry["op"] = "c";
- entry["o"] = doc;
+ return entry;
+}
- if (ns !== undefined) {
- entry["ns"] = ns;
- }
+// Create various inconsistencies, and check that dbCheck spots them.
+function insertOnSecondaries(doc) {
+ let master = replSet.getPrimary();
+ let entry = getDummyOplogEntry();
+ entry["op"] = "i";
+ entry["o"] = doc;
- master.getDB("local").oplog.rs.insertOne(entry);
- }
+ master.getDB("local").oplog.rs.insertOne(entry);
+}
- // And on a primary.
- function runCommandOnPrimary(doc) {
- let master = replSet.getPrimary();
- let entry = getDummyOplogEntry();
- entry["op"] = "c";
- entry["o"] = doc;
+// Run an apply-ops-ish command on a secondary.
+function runCommandOnSecondaries(doc, ns) {
+ let master = replSet.getPrimary();
+ let entry = getDummyOplogEntry();
+ entry["op"] = "c";
+ entry["o"] = doc;
- master.getDB("admin").runCommand({applyOps: [entry]});
+ if (ns !== undefined) {
+ entry["ns"] = ns;
}
- // Just add an extra document, and test that it catches it.
- function simpleTestCatchesExtra() {
- let master = replSet.getPrimary();
- let db = master.getDB(dbName);
+ master.getDB("local").oplog.rs.insertOne(entry);
+}
- clearLog();
+// And on a primary.
+function runCommandOnPrimary(doc) {
+ let master = replSet.getPrimary();
+ let entry = getDummyOplogEntry();
+ entry["op"] = "c";
+ entry["o"] = doc;
- insertOnSecondaries({_id: 12390290});
+ master.getDB("admin").runCommand({applyOps: [entry]});
+}
- assert.commandWorked(db.runCommand({dbCheck: collName}));
- awaitDbCheckCompletion(db);
+// Just add an extra document, and test that it catches it.
+function simpleTestCatchesExtra() {
+ let master = replSet.getPrimary();
+ let db = master.getDB(dbName);
- let nErrors = replSet.getSecondary()
- .getDB("local")
- .system.healthlog.find({operation: /dbCheck.*/, severity: "error"})
- .count();
+ clearLog();
- assert.neq(nErrors, 0, "dbCheck found no errors after insertion on secondaries");
- assert.eq(nErrors, 1, "dbCheck found too many errors after single inconsistent insertion");
- }
+ insertOnSecondaries({_id: 12390290});
- // Test that dbCheck catches changing various pieces of collection metadata.
- function testCollectionMetadataChanges() {
- let master = replSet.getPrimary();
- let db = master.getDB(dbName);
- db[collName].drop();
- clearLog();
+ assert.commandWorked(db.runCommand({dbCheck: collName}));
+ awaitDbCheckCompletion(db);
- // Create the collection on the primary.
- db.createCollection(collName, {validationLevel: "off"});
+ let nErrors = replSet.getSecondary()
+ .getDB("local")
+ .system.healthlog.find({operation: /dbCheck.*/, severity: "error"})
+ .count();
- // Add an index on the secondaries.
- runCommandOnSecondaries({createIndexes: collName, v: 2, key: {"foo": 1}, name: "foo_1"},
- dbName + ".$cmd");
+ assert.neq(nErrors, 0, "dbCheck found no errors after insertion on secondaries");
+ assert.eq(nErrors, 1, "dbCheck found too many errors after single inconsistent insertion");
+}
- assert.commandWorked(db.runCommand({dbCheck: collName}));
- awaitDbCheckCompletion(db);
+// Test that dbCheck catches changing various pieces of collection metadata.
+function testCollectionMetadataChanges() {
+ let master = replSet.getPrimary();
+ let db = master.getDB(dbName);
+ db[collName].drop();
+ clearLog();
- let nErrors =
- replSet.getSecondary()
- .getDB("local")
- .system.healthlog
- .find({"operation": /dbCheck.*/, "severity": "error", "data.success": true})
- .count();
+ // Create the collection on the primary.
+ db.createCollection(collName, {validationLevel: "off"});
- assert.eq(nErrors, 1, "dbCheck found wrong number of errors after inconsistent `create`");
+ // Add an index on the secondaries.
+ runCommandOnSecondaries({createIndexes: collName, v: 2, key: {"foo": 1}, name: "foo_1"},
+ dbName + ".$cmd");
- clearLog();
- }
+ assert.commandWorked(db.runCommand({dbCheck: collName}));
+ awaitDbCheckCompletion(db);
+
+ let nErrors = replSet.getSecondary()
+ .getDB("local")
+ .system.healthlog
+ .find({"operation": /dbCheck.*/, "severity": "error", "data.success": true})
+ .count();
+
+ assert.eq(nErrors, 1, "dbCheck found wrong number of errors after inconsistent `create`");
+
+ clearLog();
+}
- simpleTestCatchesExtra();
- testCollectionMetadataChanges();
+simpleTestCatchesExtra();
+testCollectionMetadataChanges();
})();
diff --git a/jstests/replsets/dbhash_lock_acquisition.js b/jstests/replsets/dbhash_lock_acquisition.js
index 3c66ad1aa48..8fd30e7f7be 100644
--- a/jstests/replsets/dbhash_lock_acquisition.js
+++ b/jstests/replsets/dbhash_lock_acquisition.js
@@ -5,92 +5,91 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
-
- load("jstests/libs/parallelTester.js"); // for ScopedThread
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const db = primary.getDB("test");
-
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(db.getName());
+"use strict";
+
+load("jstests/libs/parallelTester.js"); // for ScopedThread
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const db = primary.getDB("test");
+
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(db.getName());
+
+// We insert a document so the dbHash command has a collection to process.
+assert.commandWorked(sessionDB.mycoll.insert({}, {writeConcern: {w: "majority"}}));
+const clusterTime = session.getOperationTime();
+
+// We then start a transaction in order to be able have a catalog operation queue up behind it.
+session.startTransaction();
+assert.commandWorked(sessionDB.mycoll.insert({}));
+
+const ops = db.currentOp({"lsid.id": session.getSessionId().id}).inprog;
+assert.eq(
+ 1, ops.length, () => "Failed to find session in currentOp() output: " + tojson(db.currentOp()));
+assert.eq(ops[0].locks,
+ {ReplicationStateTransition: "w", Global: "w", Database: "w", Collection: "w"});
+
+const threadCaptruncCmd = new ScopedThread(function(host) {
+ try {
+ const conn = new Mongo(host);
+ const db = conn.getDB("test");
+
+ // We use the captrunc command as a catalog operation that requires a MODE_X lock on the
+ // collection. This ensures we aren't having the dbHash command queue up behind it on a
+ // database-level lock. The collection isn't capped so it'll fail with an
+ // IllegalOperation error response.
+ assert.commandFailedWithCode(db.runCommand({captrunc: "mycoll", n: 1}),
+ ErrorCodes.IllegalOperation);
+ return {ok: 1};
+ } catch (e) {
+ return {ok: 0, error: e.toString(), stack: e.stack};
+ }
+}, db.getMongo().host);
+
+threadCaptruncCmd.start();
+
+assert.soon(() => {
+ const ops = db.currentOp({"command.captrunc": "mycoll", waitingForLock: true}).inprog;
+ return ops.length === 1;
+}, () => "Failed to find create collection in currentOp() output: " + tojson(db.currentOp()));
+
+const threadDBHash = new ScopedThread(function(host, clusterTime) {
+ try {
+ const conn = new Mongo(host);
+ const db = conn.getDB("test");
+ assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: eval(clusterTime),
+ }));
+ return {ok: 1};
+ } catch (e) {
+ return {ok: 0, error: e.toString(), stack: e.stack};
+ }
+}, db.getMongo().host, tojson(clusterTime));
+
+threadDBHash.start();
+
+assert.soon(() => {
+ const ops = db.currentOp({"command.dbHash": 1, waitingForLock: true}).inprog;
+ if (ops.length === 0) {
+ return false;
+ }
+ assert.eq(ops[0].locks,
+ {ReplicationStateTransition: "w", Global: "r", Database: "r", Collection: "r"});
+ return true;
+}, () => "Failed to find create collection in currentOp() output: " + tojson(db.currentOp()));
- // We insert a document so the dbHash command has a collection to process.
- assert.commandWorked(sessionDB.mycoll.insert({}, {writeConcern: {w: "majority"}}));
- const clusterTime = session.getOperationTime();
+assert.commandWorked(session.commitTransaction_forTesting());
+threadCaptruncCmd.join();
+threadDBHash.join();
- // We then start a transaction in order to be able have a catalog operation queue up behind it.
- session.startTransaction();
- assert.commandWorked(sessionDB.mycoll.insert({}));
+assert.commandWorked(threadCaptruncCmd.returnData());
+assert.commandWorked(threadDBHash.returnData());
- const ops = db.currentOp({"lsid.id": session.getSessionId().id}).inprog;
- assert.eq(1,
- ops.length,
- () => "Failed to find session in currentOp() output: " + tojson(db.currentOp()));
- assert.eq(ops[0].locks,
- {ReplicationStateTransition: "w", Global: "w", Database: "w", Collection: "w"});
-
- const threadCaptruncCmd = new ScopedThread(function(host) {
- try {
- const conn = new Mongo(host);
- const db = conn.getDB("test");
-
- // We use the captrunc command as a catalog operation that requires a MODE_X lock on the
- // collection. This ensures we aren't having the dbHash command queue up behind it on a
- // database-level lock. The collection isn't capped so it'll fail with an
- // IllegalOperation error response.
- assert.commandFailedWithCode(db.runCommand({captrunc: "mycoll", n: 1}),
- ErrorCodes.IllegalOperation);
- return {ok: 1};
- } catch (e) {
- return {ok: 0, error: e.toString(), stack: e.stack};
- }
- }, db.getMongo().host);
-
- threadCaptruncCmd.start();
-
- assert.soon(() => {
- const ops = db.currentOp({"command.captrunc": "mycoll", waitingForLock: true}).inprog;
- return ops.length === 1;
- }, () => "Failed to find create collection in currentOp() output: " + tojson(db.currentOp()));
-
- const threadDBHash = new ScopedThread(function(host, clusterTime) {
- try {
- const conn = new Mongo(host);
- const db = conn.getDB("test");
- assert.commandWorked(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: eval(clusterTime),
- }));
- return {ok: 1};
- } catch (e) {
- return {ok: 0, error: e.toString(), stack: e.stack};
- }
- }, db.getMongo().host, tojson(clusterTime));
-
- threadDBHash.start();
-
- assert.soon(() => {
- const ops = db.currentOp({"command.dbHash": 1, waitingForLock: true}).inprog;
- if (ops.length === 0) {
- return false;
- }
- assert.eq(ops[0].locks,
- {ReplicationStateTransition: "w", Global: "r", Database: "r", Collection: "r"});
- return true;
- }, () => "Failed to find create collection in currentOp() output: " + tojson(db.currentOp()));
-
- assert.commandWorked(session.commitTransaction_forTesting());
- threadCaptruncCmd.join();
- threadDBHash.join();
-
- assert.commandWorked(threadCaptruncCmd.returnData());
- assert.commandWorked(threadDBHash.returnData());
-
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
})();
diff --git a/jstests/replsets/dbhash_read_at_cluster_time.js b/jstests/replsets/dbhash_read_at_cluster_time.js
index bf7995aade6..e8c42b4e57f 100644
--- a/jstests/replsets/dbhash_read_at_cluster_time.js
+++ b/jstests/replsets/dbhash_read_at_cluster_time.js
@@ -4,117 +4,123 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
- const replSetConfig = rst.getReplSetConfig();
- replSetConfig.members[1].priority = 0;
- rst.initiate(replSetConfig);
+const replSetConfig = rst.getReplSetConfig();
+replSetConfig.members[1].priority = 0;
+rst.initiate(replSetConfig);
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
- const session = primary.startSession({causalConsistency: false});
- const db = session.getDatabase("test");
- let txnNumber = 0;
+const session = primary.startSession({causalConsistency: false});
+const db = session.getDatabase("test");
+let txnNumber = 0;
- // We prevent the replica set from advancing oldest_timestamp. This ensures that the snapshot
- // associated with 'clusterTime' is retained for the duration of this test.
- rst.nodes.forEach(conn => {
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
- mode: "alwaysOn",
- }));
- });
-
- // We insert a document and save the md5sum associated with the opTime of that write.
- assert.commandWorked(db.mycoll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- const clusterTime = db.getSession().getOperationTime();
-
- let res = assert.commandWorked(db.runCommand({
+// We prevent the replica set from advancing oldest_timestamp. This ensures that the snapshot
+// associated with 'clusterTime' is retained for the duration of this test.
+rst.nodes.forEach(conn => {
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
+ mode: "alwaysOn",
+ }));
+});
+
+// We insert a document and save the md5sum associated with the opTime of that write.
+assert.commandWorked(db.mycoll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+const clusterTime = db.getSession().getOperationTime();
+
+let res = assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: clusterTime,
+}));
+
+const hash1 = {
+ collections: res.collections,
+ md5: res.md5
+};
+
+// We insert another document to ensure the collection's contents have a different md5sum now.
+// We use a w=majority write concern to ensure that the insert has also been applied on the
+// secondary by the time we go to run the dbHash command later. This avoids a race where the
+// replication subsystem could be applying the insert operation when the dbHash command is run
+// on the secondary.
+assert.commandWorked(db.mycoll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+
+// However, using $_internalReadAtClusterTime to read at the opTime of the first insert should
+// return the same md5sum as it did originally.
+res = assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: clusterTime,
+}));
+
+const hash2 = {
+ collections: res.collections,
+ md5: res.md5
+};
+assert.eq(hash1, hash2, "primary returned different dbhash after second insert");
+
+{
+ const secondarySession = secondary.startSession({causalConsistency: false});
+ const secondaryDB = secondarySession.getDatabase("test");
+
+ // Using $_internalReadAtClusterTime to read at the opTime of the first insert should return
+ // the same md5sum on the secondary as it did on the primary.
+ res = assert.commandWorked(secondaryDB.runCommand({
dbHash: 1,
$_internalReadAtClusterTime: clusterTime,
}));
- const hash1 = {collections: res.collections, md5: res.md5};
+ const secondaryHash = {collections: res.collections, md5: res.md5};
+ assert.eq(hash1, secondaryHash, "primary and secondary have different dbhash");
+}
- // We insert another document to ensure the collection's contents have a different md5sum now.
- // We use a w=majority write concern to ensure that the insert has also been applied on the
- // secondary by the time we go to run the dbHash command later. This avoids a race where the
- // replication subsystem could be applying the insert operation when the dbHash command is run
- // on the secondary.
- assert.commandWorked(db.mycoll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+{
+ const otherSession = primary.startSession({causalConsistency: false});
+ const otherDB = otherSession.getDatabase("test");
- // However, using $_internalReadAtClusterTime to read at the opTime of the first insert should
- // return the same md5sum as it did originally.
+ // We perform another insert inside a separate transaction to cause a MODE_IX lock to be
+ // held on the collection.
+ otherSession.startTransaction();
+ assert.commandWorked(otherDB.mycoll.insert({_id: 3}));
+
+ // It should be possible to run the "dbHash" command with "$_internalReadAtClusterTime"
+ // concurrently.
res = assert.commandWorked(db.runCommand({
dbHash: 1,
$_internalReadAtClusterTime: clusterTime,
}));
- const hash2 = {collections: res.collections, md5: res.md5};
- assert.eq(hash1, hash2, "primary returned different dbhash after second insert");
-
- {
- const secondarySession = secondary.startSession({causalConsistency: false});
- const secondaryDB = secondarySession.getDatabase("test");
-
- // Using $_internalReadAtClusterTime to read at the opTime of the first insert should return
- // the same md5sum on the secondary as it did on the primary.
- res = assert.commandWorked(secondaryDB.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: clusterTime,
- }));
-
- const secondaryHash = {collections: res.collections, md5: res.md5};
- assert.eq(hash1, secondaryHash, "primary and secondary have different dbhash");
- }
-
- {
- const otherSession = primary.startSession({causalConsistency: false});
- const otherDB = otherSession.getDatabase("test");
-
- // We perform another insert inside a separate transaction to cause a MODE_IX lock to be
- // held on the collection.
- otherSession.startTransaction();
- assert.commandWorked(otherDB.mycoll.insert({_id: 3}));
-
- // It should be possible to run the "dbHash" command with "$_internalReadAtClusterTime"
- // concurrently.
- res = assert.commandWorked(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: clusterTime,
- }));
-
- const hash3 = {collections: res.collections, md5: res.md5};
- assert.eq(hash1, hash3, "primary returned different dbhash after third insert");
-
- // However, the "dbHash" command should block behind the transaction if
- // "$_internalReadAtClusterTime" wasn't specified.
- res = assert.commandFailedWithCode(db.runCommand({dbHash: 1, maxTimeMS: 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- assert.commandWorked(otherSession.abortTransaction_forTesting());
- otherSession.endSession();
- }
-
- {
- const otherSession = primary.startSession({causalConsistency: false});
- const otherDB = otherSession.getDatabase("test");
-
- // We create another collection inside a separate session to modify the collection catalog
- // at an opTime later than 'clusterTime'. This prevents further usage of the snapshot
- // associated with 'clusterTime' for snapshot reads.
- assert.commandWorked(otherDB.runCommand({create: "mycoll2"}));
- assert.commandFailedWithCode(
- db.runCommand({dbHash: 1, $_internalReadAtClusterTime: clusterTime}),
- ErrorCodes.SnapshotUnavailable);
-
- otherSession.endSession();
- }
-
- session.endSession();
- rst.stopSet();
+ const hash3 = {collections: res.collections, md5: res.md5};
+ assert.eq(hash1, hash3, "primary returned different dbhash after third insert");
+
+ // However, the "dbHash" command should block behind the transaction if
+ // "$_internalReadAtClusterTime" wasn't specified.
+ res = assert.commandFailedWithCode(db.runCommand({dbHash: 1, maxTimeMS: 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+ assert.commandWorked(otherSession.abortTransaction_forTesting());
+ otherSession.endSession();
+}
+
+{
+ const otherSession = primary.startSession({causalConsistency: false});
+ const otherDB = otherSession.getDatabase("test");
+
+ // We create another collection inside a separate session to modify the collection catalog
+ // at an opTime later than 'clusterTime'. This prevents further usage of the snapshot
+ // associated with 'clusterTime' for snapshot reads.
+ assert.commandWorked(otherDB.runCommand({create: "mycoll2"}));
+ assert.commandFailedWithCode(
+ db.runCommand({dbHash: 1, $_internalReadAtClusterTime: clusterTime}),
+ ErrorCodes.SnapshotUnavailable);
+
+ otherSession.endSession();
+}
+
+session.endSession();
+rst.stopSet();
})();
diff --git a/jstests/replsets/dbhash_system_collections.js b/jstests/replsets/dbhash_system_collections.js
index c60532dfe1c..d3f7b83c323 100644
--- a/jstests/replsets/dbhash_system_collections.js
+++ b/jstests/replsets/dbhash_system_collections.js
@@ -2,54 +2,54 @@
'use strict';
(function() {
- var rst = new ReplSetTest({name: 'dbhash_system_collections', nodes: 2});
- rst.startSet();
- rst.initiate();
-
- var primary = rst.getPrimary();
- var secondary = rst.getSecondary();
-
- var testDB = primary.getDB('test');
- assert.writeOK(testDB.system.users.insert({users: 1}));
- assert.writeOK(testDB.system.js.insert({js: 1}));
-
- var adminDB = primary.getDB('admin');
- assert.writeOK(adminDB.system.roles.insert({roles: 1}));
- assert.writeOK(adminDB.system.version.insert({version: 1}));
- assert.writeOK(adminDB.system.new_users.insert({new_users: 1}));
- assert.writeOK(adminDB.system.backup_users.insert({backup_users: 1}));
-
- rst.awaitReplication();
-
- function checkDbHash(mongo) {
- var testDB = mongo.getDB('test');
- var adminDB = mongo.getDB('admin');
-
- var replicatedSystemCollections = [
- 'system.js',
- 'system.users',
- ];
-
- var replicatedAdminSystemCollections = [
- 'system.backup_users',
- 'system.new_users',
- 'system.roles',
- 'system.version',
- ];
-
- var res = testDB.runCommand('dbhash');
- assert.commandWorked(res);
- assert.docEq(Object.keys(res.collections), replicatedSystemCollections, tojson(res));
-
- res = adminDB.runCommand('dbhash');
- assert.commandWorked(res);
- assert.docEq(Object.keys(res.collections), replicatedAdminSystemCollections, tojson(res));
-
- return res.md5;
- }
-
- var primaryMd5 = checkDbHash(primary);
- var secondaryMd5 = checkDbHash(secondary);
- assert.eq(primaryMd5, secondaryMd5, 'dbhash is different on the primary and the secondary');
- rst.stopSet();
+var rst = new ReplSetTest({name: 'dbhash_system_collections', nodes: 2});
+rst.startSet();
+rst.initiate();
+
+var primary = rst.getPrimary();
+var secondary = rst.getSecondary();
+
+var testDB = primary.getDB('test');
+assert.writeOK(testDB.system.users.insert({users: 1}));
+assert.writeOK(testDB.system.js.insert({js: 1}));
+
+var adminDB = primary.getDB('admin');
+assert.writeOK(adminDB.system.roles.insert({roles: 1}));
+assert.writeOK(adminDB.system.version.insert({version: 1}));
+assert.writeOK(adminDB.system.new_users.insert({new_users: 1}));
+assert.writeOK(adminDB.system.backup_users.insert({backup_users: 1}));
+
+rst.awaitReplication();
+
+function checkDbHash(mongo) {
+ var testDB = mongo.getDB('test');
+ var adminDB = mongo.getDB('admin');
+
+ var replicatedSystemCollections = [
+ 'system.js',
+ 'system.users',
+ ];
+
+ var replicatedAdminSystemCollections = [
+ 'system.backup_users',
+ 'system.new_users',
+ 'system.roles',
+ 'system.version',
+ ];
+
+ var res = testDB.runCommand('dbhash');
+ assert.commandWorked(res);
+ assert.docEq(Object.keys(res.collections), replicatedSystemCollections, tojson(res));
+
+ res = adminDB.runCommand('dbhash');
+ assert.commandWorked(res);
+ assert.docEq(Object.keys(res.collections), replicatedAdminSystemCollections, tojson(res));
+
+ return res.md5;
+}
+
+var primaryMd5 = checkDbHash(primary);
+var secondaryMd5 = checkDbHash(secondary);
+assert.eq(primaryMd5, secondaryMd5, 'dbhash is different on the primary and the secondary');
+rst.stopSet();
})();
diff --git a/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js b/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js
index 8b1dde46cef..2577744902e 100644
--- a/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js
+++ b/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js
@@ -16,102 +16,104 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/check_log.js");
- load("jstests/libs/parallel_shell_helpers.js");
- load('jstests/libs/test_background_ops.js');
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+load("jstests/libs/parallel_shell_helpers.js");
+load('jstests/libs/test_background_ops.js');
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const dbName = "test";
- const collName = "ddl_op_behind_prepared_transaction_fails_in_shutdown";
- let primary = rst.getPrimary();
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
- const txnDoc = {_id: 100};
+const dbName = "test";
+const collName = "ddl_op_behind_prepared_transaction_fails_in_shutdown";
+let primary = rst.getPrimary();
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+const txnDoc = {
+ _id: 100
+};
- jsTest.log("Creating a collection '" + collName + "' with data in it...");
- assert.commandWorked(testDB.createCollection(collName));
- let bulk = testColl.initializeUnorderedBulkOp();
- for (let i = 0; i < 2; ++i) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+jsTest.log("Creating a collection '" + collName + "' with data in it...");
+assert.commandWorked(testDB.createCollection(collName));
+let bulk = testColl.initializeUnorderedBulkOp();
+for (let i = 0; i < 2; ++i) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
- jsTest.log("Setting up a prepared transaction...");
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(txnDoc));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+jsTest.log("Setting up a prepared transaction...");
+const session = primary.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(txnDoc));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- function runDropCollection(dbName, collName) {
- jsTest.log("Dropping collection in parallel shell...");
- // 'db' is defined in the parallel shell 'startParallelShell' will spin up.
- const res = db.getSiblingDB(dbName).runCommand({drop: collName});
- assert.commandFailedWithCode(
- res,
- [ErrorCodes.InterruptedAtShutdown, ErrorCodes.InterruptedDueToReplStateChange],
- "parallel shell drop cmd completed in an unexpected way: " + tojson(res));
- jsTest.log("Done dropping collection in parallel shell");
- }
+function runDropCollection(dbName, collName) {
+ jsTest.log("Dropping collection in parallel shell...");
+ // 'db' is defined in the parallel shell 'startParallelShell' will spin up.
+ const res = db.getSiblingDB(dbName).runCommand({drop: collName});
+ assert.commandFailedWithCode(
+ res,
+ [ErrorCodes.InterruptedAtShutdown, ErrorCodes.InterruptedDueToReplStateChange],
+ "parallel shell drop cmd completed in an unexpected way: " + tojson(res));
+ jsTest.log("Done dropping collection in parallel shell");
+}
- // Use a failpoint to wait for the drop operation to get as close as possible to a lock request
- // before we release it and wait 1 second more for it to hopefully have time to enqueue a lock
- // request. It takes a while for the parallel shell to start up, establish a connection with the
- // server for the drop operation, etc., and we do not want to interrupt it earlier than lock
- // acquisition with the shutdown signal.
- //
- // This is best-effort, not deterministic, since we cannot place a fail point directly in the
- // locking code as that would hang everything rather than just drop.
- assert.commandWorked(primary.adminCommand(
- {configureFailPoint: 'hangDropCollectionBeforeLockAcquisition', mode: 'alwaysOn'}));
- let joinDropCollection;
- try {
- jsTest.log("Starting a parallel shell to concurrently run drop collection...");
- joinDropCollection =
- startParallelShell(funWithArgs(runDropCollection, dbName, collName), primary.port);
+// Use a failpoint to wait for the drop operation to get as close as possible to a lock request
+// before we release it and wait 1 second more for it to hopefully have time to enqueue a lock
+// request. It takes a while for the parallel shell to start up, establish a connection with the
+// server for the drop operation, etc., and we do not want to interrupt it earlier than lock
+// acquisition with the shutdown signal.
+//
+// This is best-effort, not deterministic, since we cannot place a fail point directly in the
+// locking code as that would hang everything rather than just drop.
+assert.commandWorked(primary.adminCommand(
+ {configureFailPoint: 'hangDropCollectionBeforeLockAcquisition', mode: 'alwaysOn'}));
+let joinDropCollection;
+try {
+ jsTest.log("Starting a parallel shell to concurrently run drop collection...");
+ joinDropCollection =
+ startParallelShell(funWithArgs(runDropCollection, dbName, collName), primary.port);
- jsTest.log("Waiting for drop collection to block behind the prepared transaction...");
- checkLog.contains(
- primary, "Hanging drop collection before lock acquisition while fail point is set");
- } finally {
- assert.commandWorked(primary.adminCommand(
- {configureFailPoint: 'hangDropCollectionBeforeLockAcquisition', mode: 'off'}));
- }
- sleep(1 * 1000);
+ jsTest.log("Waiting for drop collection to block behind the prepared transaction...");
+ checkLog.contains(primary,
+ "Hanging drop collection before lock acquisition while fail point is set");
+} finally {
+ assert.commandWorked(primary.adminCommand(
+ {configureFailPoint: 'hangDropCollectionBeforeLockAcquisition', mode: 'off'}));
+}
+sleep(1 * 1000);
- jsTest.log("Restarting the mongod...");
- // Skip validation because it requires a lock that the prepared transaction is blocking.
- rst.stop(primary, undefined, {skipValidation: true});
- rst.start(primary, {}, true /*restart*/);
- primary = rst.getPrimary();
+jsTest.log("Restarting the mongod...");
+// Skip validation because it requires a lock that the prepared transaction is blocking.
+rst.stop(primary, undefined, {skipValidation: true});
+rst.start(primary, {}, true /*restart*/);
+primary = rst.getPrimary();
- joinDropCollection();
+joinDropCollection();
- const numDocs = primary.getDB(dbName).getCollection(collName).find().length();
- // We expect two documents because the third is in an uncommitted transaction and not visible.
- assert.eq(
- 2,
- numDocs,
- "Expected '" + collName + "' to find 2 documents, found " + numDocs +
- ". Drop collection may have succeeded during shutdown while a transaction was in the " +
- "prepared state.");
+const numDocs = primary.getDB(dbName).getCollection(collName).find().length();
+// We expect two documents because the third is in an uncommitted transaction and not visible.
+assert.eq(
+ 2,
+ numDocs,
+ "Expected '" + collName + "' to find 2 documents, found " + numDocs +
+ ". Drop collection may have succeeded during shutdown while a transaction was in the " +
+ "prepared state.");
- // We will check that the prepared transaction is still active as expected, since we are here.
- assert.commandFailedWithCode(primary.getDB(dbName).runCommand({
- find: collName,
- filter: txnDoc,
- readConcern: {afterClusterTime: prepareTimestamp},
- maxTimeMS: 5000
- }),
- ErrorCodes.MaxTimeMSExpired);
+// We will check that the prepared transaction is still active as expected, since we are here.
+assert.commandFailedWithCode(primary.getDB(dbName).runCommand({
+ find: collName,
+ filter: txnDoc,
+ readConcern: {afterClusterTime: prepareTimestamp},
+ maxTimeMS: 5000
+}),
+ ErrorCodes.MaxTimeMSExpired);
- // Skip validation because it requires a lock that the prepared transaction is blocking.
- rst.stopSet(true /*use default exit signal*/, false /*forRestart*/, {skipValidation: true});
+// Skip validation because it requires a lock that the prepared transaction is blocking.
+rst.stopSet(true /*use default exit signal*/, false /*forRestart*/, {skipValidation: true});
})();
diff --git a/jstests/replsets/ddl_ops_after_prepare_lock_failpoint.js b/jstests/replsets/ddl_ops_after_prepare_lock_failpoint.js
index a0d19c93b57..db2a693a3cb 100644
--- a/jstests/replsets/ddl_ops_after_prepare_lock_failpoint.js
+++ b/jstests/replsets/ddl_ops_after_prepare_lock_failpoint.js
@@ -8,127 +8,130 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/get_index_helpers.js");
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const dbName = "test";
- const collName = "ddl_ops_after_prepare_lock_failpoint";
- const indexName = "test_index";
-
- const primary = rst.getPrimary();
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- // Create the collection we will be working with.
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- // Also build an index (on the same collection) which we will later attempt to drop.
- assert.commandWorked(testDB.runCommand(
- {createIndexes: collName, indexes: [{key: {"num": 1}, name: indexName}]}));
-
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 42}));
-
- PrepareHelpers.prepareTransaction(session);
-
- assert.commandWorked(primary.adminCommand(
- {configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "alwaysOn"}));
-
- /**
- * Tests that conflicting DDL ops fail immediately.
- */
-
- // Collection names for DDL ops that will fail.
- const collToDrop = collName;
- const collToRenameFrom = collName;
- const collToRenameTo = "rename_collection_to_fail";
- const indexToCreate = "create_index_to_fail";
- const indexToDrop = indexName;
-
- let testDDLOps = () => {
- // Also attempt to delete our original collection (it is in conflict anyway, but should
- // fail to acquire the db lock in the first place).
- assert.throws(function() {
- testDB.getCollection(collToDrop).drop();
- });
- assert(testDB.getCollectionNames().includes(collToDrop));
-
- // Same goes for trying to rename it.
- assert.commandFailedWithCode(
- testDB.getCollection(collToRenameFrom).renameCollection(collToRenameTo),
- ErrorCodes.LockTimeout);
- assert(testDB.getCollectionNames().includes(collToRenameFrom));
- assert(!testDB.getCollectionNames().includes(collToRenameTo));
-
- assert.commandFailedWithCode(testDB.adminCommand({
- renameCollection: testDB.getCollection(collToRenameFrom).getFullName(),
- to: testDB.getSiblingDB('test2').getCollection(collToRenameTo).getFullName(),
- }),
- ErrorCodes.LockTimeout);
-
- // Attempt to add a new index to that collection.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {createIndexes: collName, indexes: [{key: {"b": 1}, name: indexToCreate}]}),
- ErrorCodes.LockTimeout);
- assert.eq(null, GetIndexHelpers.findByName(testColl.getIndexes(), indexToCreate));
-
- // Try dropping the index we created originally. This should also fail.
- assert.commandFailedWithCode(testDB.runCommand({dropIndexes: collName, index: indexToDrop}),
- ErrorCodes.LockTimeout);
- assert.neq(null, GetIndexHelpers.findByName(testColl.getIndexes(), indexToDrop));
- };
-
- /**
- * Tests that CRUD operations on the same collection succeed.
- */
-
- const docToInsert = {num: 100};
- const docToUpdateFrom = docToInsert;
- const docToUpdateTo = {num: 101};
- const docToRemove = docToUpdateTo;
-
- let testCRUDOps = (collConn) => {
- // TODO: SERVER-40167 Having an extra document in the collection is necessary to avoid
- // prepare conflicts when deleting documents.
- assert.commandWorked(collConn.insert({num: 1}));
-
- assert.commandWorked(collConn.insert(docToInsert));
- assert.eq(100, collConn.findOne(docToInsert).num);
-
- // This will not encounter a prepare conflict because there is an index on "num" that
- // eliminates the need for using a collection scan.
- assert.commandWorked(collConn.update(docToUpdateFrom, docToUpdateTo));
- assert.eq(101, collConn.findOne(docToUpdateTo).num);
-
- assert.commandWorked(collConn.remove(docToRemove));
- assert.eq(null, collConn.findOne(docToUpdateFrom));
- assert.eq(null, collConn.findOne(docToUpdateTo));
- };
-
- // First test DDL ops (should fail).
- testDDLOps();
-
- // Then test operations outside of transactions (should succeed).
- testCRUDOps(testColl);
-
- // Also test operations as part of a transaction (should succeed).
- testCRUDOps(primary.startSession({causalConsistency: false})
- .getDatabase(dbName)
- .getCollection(collName));
-
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "off"}));
-
- assert.commandWorked(session.abortTransaction_forTesting());
- rst.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/get_index_helpers.js");
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const dbName = "test";
+const collName = "ddl_ops_after_prepare_lock_failpoint";
+const indexName = "test_index";
+
+const primary = rst.getPrimary();
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+// Create the collection we will be working with.
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+// Also build an index (on the same collection) which we will later attempt to drop.
+assert.commandWorked(
+ testDB.runCommand({createIndexes: collName, indexes: [{key: {"num": 1}, name: indexName}]}));
+
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 42}));
+
+PrepareHelpers.prepareTransaction(session);
+
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "alwaysOn"}));
+
+/**
+ * Tests that conflicting DDL ops fail immediately.
+ */
+
+// Collection names for DDL ops that will fail.
+const collToDrop = collName;
+const collToRenameFrom = collName;
+const collToRenameTo = "rename_collection_to_fail";
+const indexToCreate = "create_index_to_fail";
+const indexToDrop = indexName;
+
+let testDDLOps = () => {
+ // Also attempt to delete our original collection (it is in conflict anyway, but should
+ // fail to acquire the db lock in the first place).
+ assert.throws(function() {
+ testDB.getCollection(collToDrop).drop();
+ });
+ assert(testDB.getCollectionNames().includes(collToDrop));
+
+ // Same goes for trying to rename it.
+ assert.commandFailedWithCode(
+ testDB.getCollection(collToRenameFrom).renameCollection(collToRenameTo),
+ ErrorCodes.LockTimeout);
+ assert(testDB.getCollectionNames().includes(collToRenameFrom));
+ assert(!testDB.getCollectionNames().includes(collToRenameTo));
+
+ assert.commandFailedWithCode(testDB.adminCommand({
+ renameCollection: testDB.getCollection(collToRenameFrom).getFullName(),
+ to: testDB.getSiblingDB('test2').getCollection(collToRenameTo).getFullName(),
+ }),
+ ErrorCodes.LockTimeout);
+
+ // Attempt to add a new index to that collection.
+ assert.commandFailedWithCode(
+ testDB.runCommand(
+ {createIndexes: collName, indexes: [{key: {"b": 1}, name: indexToCreate}]}),
+ ErrorCodes.LockTimeout);
+ assert.eq(null, GetIndexHelpers.findByName(testColl.getIndexes(), indexToCreate));
+
+ // Try dropping the index we created originally. This should also fail.
+ assert.commandFailedWithCode(testDB.runCommand({dropIndexes: collName, index: indexToDrop}),
+ ErrorCodes.LockTimeout);
+ assert.neq(null, GetIndexHelpers.findByName(testColl.getIndexes(), indexToDrop));
+};
+
+/**
+ * Tests that CRUD operations on the same collection succeed.
+ */
+
+const docToInsert = {
+ num: 100
+};
+const docToUpdateFrom = docToInsert;
+const docToUpdateTo = {
+ num: 101
+};
+const docToRemove = docToUpdateTo;
+
+let testCRUDOps = (collConn) => {
+ // TODO: SERVER-40167 Having an extra document in the collection is necessary to avoid
+ // prepare conflicts when deleting documents.
+ assert.commandWorked(collConn.insert({num: 1}));
+
+ assert.commandWorked(collConn.insert(docToInsert));
+ assert.eq(100, collConn.findOne(docToInsert).num);
+
+ // This will not encounter a prepare conflict because there is an index on "num" that
+ // eliminates the need for using a collection scan.
+ assert.commandWorked(collConn.update(docToUpdateFrom, docToUpdateTo));
+ assert.eq(101, collConn.findOne(docToUpdateTo).num);
+
+ assert.commandWorked(collConn.remove(docToRemove));
+ assert.eq(null, collConn.findOne(docToUpdateFrom));
+ assert.eq(null, collConn.findOne(docToUpdateTo));
+};
+
+// First test DDL ops (should fail).
+testDDLOps();
+
+// Then test operations outside of transactions (should succeed).
+testCRUDOps(testColl);
+
+// Also test operations as part of a transaction (should succeed).
+testCRUDOps(
+ primary.startSession({causalConsistency: false}).getDatabase(dbName).getCollection(collName));
+
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "off"}));
+
+assert.commandWorked(session.abortTransaction_forTesting());
+rst.stopSet();
})();
diff --git a/jstests/replsets/disallow_adding_initialized_node1.js b/jstests/replsets/disallow_adding_initialized_node1.js
index 910e71c7d8c..7123070bf92 100644
--- a/jstests/replsets/disallow_adding_initialized_node1.js
+++ b/jstests/replsets/disallow_adding_initialized_node1.js
@@ -5,79 +5,78 @@
// detecting an inconsistent replica set ID in the heartbeat response metadata from B_0.
(function() {
- 'use strict';
- load("jstests/libs/check_log.js");
+'use strict';
+load("jstests/libs/check_log.js");
- var name = 'disallow_adding_initialized_node1';
- var replSetA = new ReplSetTest({
- name: name,
- nodes: [
- {rsConfig: {_id: 10}},
- ]
- });
- replSetA.startSet({dbpath: "$set-A-$node"});
- replSetA.initiate();
+var name = 'disallow_adding_initialized_node1';
+var replSetA = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {_id: 10}},
+ ]
+});
+replSetA.startSet({dbpath: "$set-A-$node"});
+replSetA.initiate();
- var replSetB = new ReplSetTest({
- name: name,
- nodes: [
- {rsConfig: {_id: 20}},
- ]
- });
- replSetB.startSet({dbpath: "$set-B-$node"});
- replSetB.initiate();
+var replSetB = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {_id: 20}},
+ ]
+});
+replSetB.startSet({dbpath: "$set-B-$node"});
+replSetB.initiate();
- var primaryA = replSetA.getPrimary();
- var primaryB = replSetB.getPrimary();
- jsTestLog('Before merging: primary A = ' + primaryA.host + '; primary B = ' + primaryB.host);
+var primaryA = replSetA.getPrimary();
+var primaryB = replSetB.getPrimary();
+jsTestLog('Before merging: primary A = ' + primaryA.host + '; primary B = ' + primaryB.host);
- var configA = assert.commandWorked(primaryA.adminCommand({replSetGetConfig: 1})).config;
- var configB = assert.commandWorked(primaryB.adminCommand({replSetGetConfig: 1})).config;
- assert(configA.settings.replicaSetId instanceof ObjectId);
- assert(configB.settings.replicaSetId instanceof ObjectId);
- jsTestLog('Replica set A ID = ' + configA.settings.replicaSetId);
- jsTestLog('Replica set B ID = ' + configB.settings.replicaSetId);
- assert.neq(configA.settings.replicaSetId, configB.settings.replicaSetId);
+var configA = assert.commandWorked(primaryA.adminCommand({replSetGetConfig: 1})).config;
+var configB = assert.commandWorked(primaryB.adminCommand({replSetGetConfig: 1})).config;
+assert(configA.settings.replicaSetId instanceof ObjectId);
+assert(configB.settings.replicaSetId instanceof ObjectId);
+jsTestLog('Replica set A ID = ' + configA.settings.replicaSetId);
+jsTestLog('Replica set B ID = ' + configB.settings.replicaSetId);
+assert.neq(configA.settings.replicaSetId, configB.settings.replicaSetId);
- jsTestLog("Adding replica set B's primary " + primaryB.host + " to replica set A's config");
- configA.version++;
- configA.members.push({_id: 11, host: primaryB.host});
- var reconfigResult =
- assert.commandFailedWithCode(primaryA.adminCommand({replSetReconfig: configA}),
- ErrorCodes.NewReplicaSetConfigurationIncompatible);
- var msgA = 'Our replica set ID of ' + configA.settings.replicaSetId +
- ' did not match that of ' + primaryB.host + ', which is ' + configB.settings.replicaSetId;
- assert.neq(-1, reconfigResult.errmsg.indexOf(msgA));
+jsTestLog("Adding replica set B's primary " + primaryB.host + " to replica set A's config");
+configA.version++;
+configA.members.push({_id: 11, host: primaryB.host});
+var reconfigResult =
+ assert.commandFailedWithCode(primaryA.adminCommand({replSetReconfig: configA}),
+ ErrorCodes.NewReplicaSetConfigurationIncompatible);
+var msgA = 'Our replica set ID of ' + configA.settings.replicaSetId + ' did not match that of ' +
+ primaryB.host + ', which is ' + configB.settings.replicaSetId;
+assert.neq(-1, reconfigResult.errmsg.indexOf(msgA));
- var newPrimaryA = replSetA.getPrimary();
- var newPrimaryB = replSetB.getPrimary();
- jsTestLog('After merging: primary A = ' + newPrimaryA.host + '; primary B = ' +
- newPrimaryB.host);
- assert.eq(primaryA, newPrimaryA);
- assert.eq(primaryB, newPrimaryB);
+var newPrimaryA = replSetA.getPrimary();
+var newPrimaryB = replSetB.getPrimary();
+jsTestLog('After merging: primary A = ' + newPrimaryA.host + '; primary B = ' + newPrimaryB.host);
+assert.eq(primaryA, newPrimaryA);
+assert.eq(primaryB, newPrimaryB);
- // Mismatch replica set IDs in heartbeat responses should be logged.
- var msgB = "replica set IDs do not match, ours: " + configB.settings.replicaSetId +
- "; remote node's: " + configA.settings.replicaSetId;
- checkLog.contains(primaryB, msgB);
+// Mismatch replica set IDs in heartbeat responses should be logged.
+var msgB = "replica set IDs do not match, ours: " + configB.settings.replicaSetId +
+ "; remote node's: " + configA.settings.replicaSetId;
+checkLog.contains(primaryB, msgB);
- var statusA = assert.commandWorked(primaryA.adminCommand({replSetGetStatus: 1}));
- var statusB = assert.commandWorked(primaryB.adminCommand({replSetGetStatus: 1}));
- jsTestLog('After merging: replica set status A = ' + tojson(statusA));
- jsTestLog('After merging: replica set status B = ' + tojson(statusB));
+var statusA = assert.commandWorked(primaryA.adminCommand({replSetGetStatus: 1}));
+var statusB = assert.commandWorked(primaryB.adminCommand({replSetGetStatus: 1}));
+jsTestLog('After merging: replica set status A = ' + tojson(statusA));
+jsTestLog('After merging: replica set status B = ' + tojson(statusB));
- // Replica set A's config should remain unchanged due to failed replSetReconfig command.
- assert.eq(1, statusA.members.length);
- assert.eq(10, statusA.members[0]._id);
- assert.eq(primaryA.host, statusA.members[0].name);
- assert.eq(ReplSetTest.State.PRIMARY, statusA.members[0].state);
+// Replica set A's config should remain unchanged due to failed replSetReconfig command.
+assert.eq(1, statusA.members.length);
+assert.eq(10, statusA.members[0]._id);
+assert.eq(primaryA.host, statusA.members[0].name);
+assert.eq(ReplSetTest.State.PRIMARY, statusA.members[0].state);
- // Replica set B's config should remain unchanged.
- assert.eq(1, statusB.members.length);
- assert.eq(20, statusB.members[0]._id);
- assert.eq(primaryB.host, statusB.members[0].name);
- assert.eq(ReplSetTest.State.PRIMARY, statusB.members[0].state);
+// Replica set B's config should remain unchanged.
+assert.eq(1, statusB.members.length);
+assert.eq(20, statusB.members[0]._id);
+assert.eq(primaryB.host, statusB.members[0].name);
+assert.eq(ReplSetTest.State.PRIMARY, statusB.members[0].state);
- replSetB.stopSet();
- replSetA.stopSet();
+replSetB.stopSet();
+replSetA.stopSet();
})();
diff --git a/jstests/replsets/disallow_adding_initialized_node2.js b/jstests/replsets/disallow_adding_initialized_node2.js
index e92fc77880b..12de734b411 100644
--- a/jstests/replsets/disallow_adding_initialized_node2.js
+++ b/jstests/replsets/disallow_adding_initialized_node2.js
@@ -10,87 +10,86 @@
// @tags: [requires_persistence]
(function() {
- 'use strict';
- load("jstests/libs/check_log.js");
+'use strict';
+load("jstests/libs/check_log.js");
- var name = 'disallow_adding_initialized_node2';
- var replSetA = new ReplSetTest({
- name: name,
- nodes: [
- {rsConfig: {_id: 10}},
- {rsConfig: {_id: 11, arbiterOnly: true}},
- ]
- });
- replSetA.startSet({dbpath: "$set-A-$node"});
- replSetA.initiate();
+var name = 'disallow_adding_initialized_node2';
+var replSetA = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {_id: 10}},
+ {rsConfig: {_id: 11, arbiterOnly: true}},
+ ]
+});
+replSetA.startSet({dbpath: "$set-A-$node"});
+replSetA.initiate();
- var replSetB = new ReplSetTest({
- name: name,
- nodes: [
- {rsConfig: {_id: 20}},
- ]
- });
- replSetB.startSet({dbpath: "$set-B-$node"});
- replSetB.initiate();
+var replSetB = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {_id: 20}},
+ ]
+});
+replSetB.startSet({dbpath: "$set-B-$node"});
+replSetB.initiate();
- var primaryA = replSetA.getPrimary();
- var primaryB = replSetB.getPrimary();
- jsTestLog('Before merging: primary A = ' + primaryA.host + '; primary B = ' + primaryB.host);
+var primaryA = replSetA.getPrimary();
+var primaryB = replSetB.getPrimary();
+jsTestLog('Before merging: primary A = ' + primaryA.host + '; primary B = ' + primaryB.host);
- var configA = assert.commandWorked(primaryA.adminCommand({replSetGetConfig: 1})).config;
- var configB = assert.commandWorked(primaryB.adminCommand({replSetGetConfig: 1})).config;
- assert(configA.settings.replicaSetId instanceof ObjectId);
- assert(configB.settings.replicaSetId instanceof ObjectId);
- jsTestLog('Replica set A ID = ' + configA.settings.replicaSetId);
- jsTestLog('Replica set B ID = ' + configB.settings.replicaSetId);
- assert.neq(configA.settings.replicaSetId, configB.settings.replicaSetId);
+var configA = assert.commandWorked(primaryA.adminCommand({replSetGetConfig: 1})).config;
+var configB = assert.commandWorked(primaryB.adminCommand({replSetGetConfig: 1})).config;
+assert(configA.settings.replicaSetId instanceof ObjectId);
+assert(configB.settings.replicaSetId instanceof ObjectId);
+jsTestLog('Replica set A ID = ' + configA.settings.replicaSetId);
+jsTestLog('Replica set B ID = ' + configB.settings.replicaSetId);
+assert.neq(configA.settings.replicaSetId, configB.settings.replicaSetId);
- jsTestLog("Stopping B's primary " + primaryB.host);
- replSetB.stop(0);
+jsTestLog("Stopping B's primary " + primaryB.host);
+replSetB.stop(0);
- jsTestLog("Adding replica set B's primary " + primaryB.host + " to replica set A's config");
- configA.version++;
- configA.members.push({_id: 12, host: primaryB.host});
- assert.commandWorked(primaryA.adminCommand({replSetReconfig: configA}));
+jsTestLog("Adding replica set B's primary " + primaryB.host + " to replica set A's config");
+configA.version++;
+configA.members.push({_id: 12, host: primaryB.host});
+assert.commandWorked(primaryA.adminCommand({replSetReconfig: configA}));
- jsTestLog("Restarting B's primary " + primaryB.host);
- primaryB = replSetB.start(0, {dbpath: "$set-B-$node", restart: true});
+jsTestLog("Restarting B's primary " + primaryB.host);
+primaryB = replSetB.start(0, {dbpath: "$set-B-$node", restart: true});
- var newPrimaryA = replSetA.getPrimary();
- var newPrimaryB = replSetB.getPrimary();
- jsTestLog('After merging: primary A = ' + newPrimaryA.host + '; primary B = ' +
- newPrimaryB.host);
- assert.eq(primaryA, newPrimaryA);
- assert.eq(primaryB, newPrimaryB);
+var newPrimaryA = replSetA.getPrimary();
+var newPrimaryB = replSetB.getPrimary();
+jsTestLog('After merging: primary A = ' + newPrimaryA.host + '; primary B = ' + newPrimaryB.host);
+assert.eq(primaryA, newPrimaryA);
+assert.eq(primaryB, newPrimaryB);
- // Mismatch replica set IDs in heartbeat responses should be logged.
- var msgA = "replica set IDs do not match, ours: " + configA.settings.replicaSetId +
- "; remote node's: " + configB.settings.replicaSetId;
- var msgB = "replica set IDs do not match, ours: " + configB.settings.replicaSetId +
- "; remote node's: " + configA.settings.replicaSetId;
- checkLog.contains(primaryA, msgA);
- checkLog.contains(primaryB, msgB);
+// Mismatch replica set IDs in heartbeat responses should be logged.
+var msgA = "replica set IDs do not match, ours: " + configA.settings.replicaSetId +
+ "; remote node's: " + configB.settings.replicaSetId;
+var msgB = "replica set IDs do not match, ours: " + configB.settings.replicaSetId +
+ "; remote node's: " + configA.settings.replicaSetId;
+checkLog.contains(primaryA, msgA);
+checkLog.contains(primaryB, msgB);
- var statusA = assert.commandWorked(primaryA.adminCommand({replSetGetStatus: 1}));
- var statusB = assert.commandWorked(primaryB.adminCommand({replSetGetStatus: 1}));
- jsTestLog('After merging: replica set status A = ' + tojson(statusA));
- jsTestLog('After merging: replica set status B = ' + tojson(statusB));
+var statusA = assert.commandWorked(primaryA.adminCommand({replSetGetStatus: 1}));
+var statusB = assert.commandWorked(primaryB.adminCommand({replSetGetStatus: 1}));
+jsTestLog('After merging: replica set status A = ' + tojson(statusA));
+jsTestLog('After merging: replica set status B = ' + tojson(statusB));
- // B's primary should show up in A's status as DOWN.
- assert.eq(3, statusA.members.length);
- assert.eq(10, statusA.members[0]._id);
- assert.eq(primaryA.host, statusA.members[0].name);
- assert.eq(ReplSetTest.State.PRIMARY, statusA.members[0].state);
- assert.eq(12, statusA.members[2]._id);
- assert.eq(primaryB.host, statusA.members[2].name);
- assert.eq(ReplSetTest.State.DOWN, statusA.members[2].state);
+// B's primary should show up in A's status as DOWN.
+assert.eq(3, statusA.members.length);
+assert.eq(10, statusA.members[0]._id);
+assert.eq(primaryA.host, statusA.members[0].name);
+assert.eq(ReplSetTest.State.PRIMARY, statusA.members[0].state);
+assert.eq(12, statusA.members[2]._id);
+assert.eq(primaryB.host, statusA.members[2].name);
+assert.eq(ReplSetTest.State.DOWN, statusA.members[2].state);
- // Replica set B's config should remain unchanged.
- assert.eq(1, statusB.members.length);
- assert.eq(20, statusB.members[0]._id);
- assert.eq(primaryB.host, statusB.members[0].name);
- assert.eq(ReplSetTest.State.PRIMARY, statusB.members[0].state);
+// Replica set B's config should remain unchanged.
+assert.eq(1, statusB.members.length);
+assert.eq(20, statusB.members[0]._id);
+assert.eq(primaryB.host, statusB.members[0].name);
+assert.eq(ReplSetTest.State.PRIMARY, statusB.members[0].state);
- replSetB.stopSet();
- replSetA.stopSet();
+replSetB.stopSet();
+replSetA.stopSet();
})();
diff --git a/jstests/replsets/disallow_shardsvr_transactions_wcMajorityJournal_false.js b/jstests/replsets/disallow_shardsvr_transactions_wcMajorityJournal_false.js
index 12ebc3eb40d..bb16e3966b9 100644
--- a/jstests/replsets/disallow_shardsvr_transactions_wcMajorityJournal_false.js
+++ b/jstests/replsets/disallow_shardsvr_transactions_wcMajorityJournal_false.js
@@ -6,47 +6,47 @@
*/
(function() {
- "use strict";
-
- // A testing exemption was made to allow transactions on shard server even if
- // writeConcernMajorityJournalDefault = false. So we need to disable the exemption in this test
- // in order to test the behavior.
- jsTest.setOption('enableTestCommands', false);
-
- // The following two options by default do not support enableTestCommands=false, change them
- // accordingly so this test can run.
- TestData.roleGraphInvalidationIsFatal = false;
- TestData.authenticationDatabase = "local";
-
- // Start the replica set with --shardsvr.
- const replSet = new ReplSetTest({nodes: 1, nodeOptions: {shardsvr: ""}});
- replSet.startSet();
- let conf = replSet.getReplSetConfig();
- conf.writeConcernMajorityJournalDefault = false;
- replSet.initiate(conf);
-
- const primary = replSet.getPrimary();
- const session = primary.startSession();
- const sessionDb = session.getDatabase("test");
- const sessionColl = sessionDb.getCollection("foo");
-
- jsTestLog("Test that non-transactional operations are allowed.");
- assert.commandWorked(sessionColl.insert({_id: 1}));
-
- jsTestLog("Test that transactions are not allowed.");
- session.startTransaction();
- assert.commandFailedWithCode(sessionColl.insert({_id: 2}),
- ErrorCodes.OperationNotSupportedInTransaction);
- // All commands are not allowed including abortTransaction.
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.OperationNotSupportedInTransaction);
-
- jsTestLog("Test that retryable writes are allowed.");
- assert.commandWorked(
- sessionDb.runCommand({insert: "foo", documents: [{_id: 3}], txnNumber: NumberLong(1)}));
-
- // Assert documents inserted.
- assert.docEq(sessionColl.find().sort({_id: 1}).toArray(), [{_id: 1}, {_id: 3}]);
-
- replSet.stopSet();
+"use strict";
+
+// A testing exemption was made to allow transactions on shard server even if
+// writeConcernMajorityJournalDefault = false. So we need to disable the exemption in this test
+// in order to test the behavior.
+jsTest.setOption('enableTestCommands', false);
+
+// The following two options by default do not support enableTestCommands=false, change them
+// accordingly so this test can run.
+TestData.roleGraphInvalidationIsFatal = false;
+TestData.authenticationDatabase = "local";
+
+// Start the replica set with --shardsvr.
+const replSet = new ReplSetTest({nodes: 1, nodeOptions: {shardsvr: ""}});
+replSet.startSet();
+let conf = replSet.getReplSetConfig();
+conf.writeConcernMajorityJournalDefault = false;
+replSet.initiate(conf);
+
+const primary = replSet.getPrimary();
+const session = primary.startSession();
+const sessionDb = session.getDatabase("test");
+const sessionColl = sessionDb.getCollection("foo");
+
+jsTestLog("Test that non-transactional operations are allowed.");
+assert.commandWorked(sessionColl.insert({_id: 1}));
+
+jsTestLog("Test that transactions are not allowed.");
+session.startTransaction();
+assert.commandFailedWithCode(sessionColl.insert({_id: 2}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+// All commands are not allowed including abortTransaction.
+assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.OperationNotSupportedInTransaction);
+
+jsTestLog("Test that retryable writes are allowed.");
+assert.commandWorked(
+ sessionDb.runCommand({insert: "foo", documents: [{_id: 3}], txnNumber: NumberLong(1)}));
+
+// Assert documents inserted.
+assert.docEq(sessionColl.find().sort({_id: 1}).toArray(), [{_id: 1}, {_id: 3}]);
+
+replSet.stopSet();
}());
diff --git a/jstests/replsets/disconnect_on_legacy_write_to_secondary.js b/jstests/replsets/disconnect_on_legacy_write_to_secondary.js
index 1afa8ba0f33..9a5474e190c 100644
--- a/jstests/replsets/disconnect_on_legacy_write_to_secondary.js
+++ b/jstests/replsets/disconnect_on_legacy_write_to_secondary.js
@@ -2,107 +2,106 @@
* Tests that legacy writes to secondaries result in no answer and a disconnection.
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
-
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- const collname = "disconnect_on_legacy_write_to_secondary";
- const coll = primary.getDB("test")[collname];
- const secondaryDb = secondary.getDB("test");
- const secondaryColl = secondaryDb[collname];
-
- // Never retry on network error, because this test needs to detect the network error.
- TestData.skipRetryOnNetworkError = true;
- secondary.forceWriteMode('legacy');
- assert.commandWorked(coll.insert([{_id: 'deleteme'}, {_id: 'updateme'}]));
- rst.awaitReplication();
-
- jsTestLog("Trying legacy insert on secondary");
- secondaryColl.insert({_id: 'no_insert_on_secondary'});
- let res = assert.throws(() => secondaryDb.adminCommand({ping: 1}));
+"use strict";
+
+load("jstests/libs/check_log.js");
+
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+const collname = "disconnect_on_legacy_write_to_secondary";
+const coll = primary.getDB("test")[collname];
+const secondaryDb = secondary.getDB("test");
+const secondaryColl = secondaryDb[collname];
+
+// Never retry on network error, because this test needs to detect the network error.
+TestData.skipRetryOnNetworkError = true;
+secondary.forceWriteMode('legacy');
+assert.commandWorked(coll.insert([{_id: 'deleteme'}, {_id: 'updateme'}]));
+rst.awaitReplication();
+
+jsTestLog("Trying legacy insert on secondary");
+secondaryColl.insert({_id: 'no_insert_on_secondary'});
+let res = assert.throws(() => secondaryDb.adminCommand({ping: 1}));
+assert(isNetworkError(res));
+// We should automatically reconnect after the failed command.
+assert.commandWorked(secondaryDb.adminCommand({ping: 1}));
+
+jsTestLog("Trying legacy update on secondary");
+secondaryColl.update({_id: 'updateme'}, {'$set': {x: 1}});
+res = assert.throws(() => secondaryDb.adminCommand({ping: 1}));
+assert(isNetworkError(res));
+// We should automatically reconnect after the failed command.
+assert.commandWorked(secondaryDb.adminCommand({ping: 1}));
+
+jsTestLog("Trying legacy remove on secondary");
+secondaryColl.remove({_id: 'deleteme'}, {'$set': {x: 1}});
+res = assert.throws(() => secondaryDb.adminCommand({ping: 1}));
+assert(isNetworkError(res));
+// We should automatically reconnect after the failed command.
+assert.commandWorked(secondaryDb.adminCommand({ping: 1}));
+
+// Do the stepdown tests on a separate connection to avoid interfering with the
+// ReplSetTest mechanism.
+const primaryAdmin = primary.getDB("admin");
+const primaryDataConn = new Mongo(primary.host);
+const primaryDb = primaryDataConn.getDB("test");
+const primaryColl = primaryDb[collname];
+primaryDataConn.forceWriteMode('legacy');
+
+function getNotMasterLegacyUnackWritesCounter() {
+ return assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1}))
+ .metrics.repl.network.notMasterLegacyUnacknowledgedWrites;
+}
+
+function runStepDownTest({description, failpoint, operation}) {
+ jsTestLog("Enabling failpoint to block " + description + "s");
+ assert.commandWorked(
+ primaryAdmin.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
+
+ let failedLegacyUnackWritesBefore = getNotMasterLegacyUnackWritesCounter();
+
+ jsTestLog("Trying legacy " + description + " on stepping-down primary");
+ operation();
+ checkLog.contains(primary, failpoint + " fail point enabled");
+ jsTestLog("Within " + description + ": stepping down and disabling failpoint");
+ assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+ assert.commandWorked(primaryAdmin.adminCommand({configureFailPoint: failpoint, mode: "off"}));
+ res = assert.throws(() => primaryDb.adminCommand({ping: 1}));
assert(isNetworkError(res));
// We should automatically reconnect after the failed command.
- assert.commandWorked(secondaryDb.adminCommand({ping: 1}));
-
- jsTestLog("Trying legacy update on secondary");
- secondaryColl.update({_id: 'updateme'}, {'$set': {x: 1}});
- res = assert.throws(() => secondaryDb.adminCommand({ping: 1}));
- assert(isNetworkError(res));
- // We should automatically reconnect after the failed command.
- assert.commandWorked(secondaryDb.adminCommand({ping: 1}));
-
- jsTestLog("Trying legacy remove on secondary");
- secondaryColl.remove({_id: 'deleteme'}, {'$set': {x: 1}});
- res = assert.throws(() => secondaryDb.adminCommand({ping: 1}));
- assert(isNetworkError(res));
- // We should automatically reconnect after the failed command.
- assert.commandWorked(secondaryDb.adminCommand({ping: 1}));
-
- // Do the stepdown tests on a separate connection to avoid interfering with the
- // ReplSetTest mechanism.
- const primaryAdmin = primary.getDB("admin");
- const primaryDataConn = new Mongo(primary.host);
- const primaryDb = primaryDataConn.getDB("test");
- const primaryColl = primaryDb[collname];
- primaryDataConn.forceWriteMode('legacy');
-
- function getNotMasterLegacyUnackWritesCounter() {
- return assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1}))
- .metrics.repl.network.notMasterLegacyUnacknowledgedWrites;
- }
-
- function runStepDownTest({description, failpoint, operation}) {
- jsTestLog("Enabling failpoint to block " + description + "s");
- assert.commandWorked(
- primaryAdmin.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
-
- let failedLegacyUnackWritesBefore = getNotMasterLegacyUnackWritesCounter();
-
- jsTestLog("Trying legacy " + description + " on stepping-down primary");
- operation();
- checkLog.contains(primary, failpoint + " fail point enabled");
- jsTestLog("Within " + description + ": stepping down and disabling failpoint");
- assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- assert.commandWorked(
- primaryAdmin.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- res = assert.throws(() => primaryDb.adminCommand({ping: 1}));
- assert(isNetworkError(res));
- // We should automatically reconnect after the failed command.
- assert.commandWorked(primaryDb.adminCommand({ping: 1}));
-
- // Validate the number of legacy unacknowledged writes failed due to step down resulted
- // in network disconnection.
- let failedLegacyUnackWritesAfter = getNotMasterLegacyUnackWritesCounter();
- assert.eq(failedLegacyUnackWritesAfter, failedLegacyUnackWritesBefore + 1);
-
- // Allow the primary to be re-elected, and wait for it.
- assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
- rst.getPrimary();
- }
- runStepDownTest({
- description: "insert",
- failpoint: "hangDuringBatchInsert",
- operation: () => primaryColl.insert({_id: 'no_insert_on_stepdown'})
- });
-
- runStepDownTest({
- description: "update",
- failpoint: "hangDuringBatchUpdate",
- operation: () => primaryColl.update({_id: 'updateme'}, {'$set': {x: 1}})
- });
-
- runStepDownTest({
- description: "remove",
- failpoint: "hangDuringBatchRemove",
- operation: () => primaryColl.remove({_id: 'deleteme'}, {'$set': {x: 1}})
- });
-
- rst.stopSet();
+ assert.commandWorked(primaryDb.adminCommand({ping: 1}));
+
+ // Validate the number of legacy unacknowledged writes failed due to step down resulted
+ // in network disconnection.
+ let failedLegacyUnackWritesAfter = getNotMasterLegacyUnackWritesCounter();
+ assert.eq(failedLegacyUnackWritesAfter, failedLegacyUnackWritesBefore + 1);
+
+ // Allow the primary to be re-elected, and wait for it.
+ assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
+ rst.getPrimary();
+}
+runStepDownTest({
+ description: "insert",
+ failpoint: "hangDuringBatchInsert",
+ operation: () => primaryColl.insert({_id: 'no_insert_on_stepdown'})
+});
+
+runStepDownTest({
+ description: "update",
+ failpoint: "hangDuringBatchUpdate",
+ operation: () => primaryColl.update({_id: 'updateme'}, {'$set': {x: 1}})
+});
+
+runStepDownTest({
+ description: "remove",
+ failpoint: "hangDuringBatchRemove",
+ operation: () => primaryColl.remove({_id: 'deleteme'}, {'$set': {x: 1}})
+});
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js b/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js
index b5e67365eb4..35440fcb441 100644
--- a/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js
+++ b/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js
@@ -5,110 +5,112 @@
* @tags: [requires_majority_read_concern]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+load("jstests/libs/check_log.js");
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- // Set up a ReplSetTest where nodes only sync one oplog entry at a time.
- const rst = new ReplSetTest(
- {nodes: 5, useBridge: true, nodeOptions: {setParameter: "bgSyncOplogFetcherBatchSize=1"}});
- rst.startSet();
- const config = rst.getReplSetConfig();
- // Prevent elections.
- config.settings = {electionTimeoutMillis: 12 * 60 * 60 * 1000};
- rst.initiate(config);
+// Set up a ReplSetTest where nodes only sync one oplog entry at a time.
+const rst = new ReplSetTest(
+ {nodes: 5, useBridge: true, nodeOptions: {setParameter: "bgSyncOplogFetcherBatchSize=1"}});
+rst.startSet();
+const config = rst.getReplSetConfig();
+// Prevent elections.
+config.settings = {
+ electionTimeoutMillis: 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- const nodeA = rst.nodes[0];
- const nodeB = rst.nodes[1];
- const nodeC = rst.nodes[2];
- const nodeD = rst.nodes[3];
- const nodeE = rst.nodes[4];
+const nodeA = rst.nodes[0];
+const nodeB = rst.nodes[1];
+const nodeC = rst.nodes[2];
+const nodeD = rst.nodes[3];
+const nodeE = rst.nodes[4];
- jsTest.log("Node A is primary in term 1. Node E is delayed.");
- // A: [1]
- // B: [1]
- // C: [1]
- // D: [1]
- // E:
- assert.eq(nodeA, rst.getPrimary());
- nodeE.disconnect([nodeA, nodeB, nodeC, nodeD]);
- assert.commandWorked(nodeA.getDB(dbName)[collName].insert({term: 1}));
- rst.awaitReplication(undefined, undefined, [nodeB, nodeC, nodeD]);
+jsTest.log("Node A is primary in term 1. Node E is delayed.");
+// A: [1]
+// B: [1]
+// C: [1]
+// D: [1]
+// E:
+assert.eq(nodeA, rst.getPrimary());
+nodeE.disconnect([nodeA, nodeB, nodeC, nodeD]);
+assert.commandWorked(nodeA.getDB(dbName)[collName].insert({term: 1}));
+rst.awaitReplication(undefined, undefined, [nodeB, nodeC, nodeD]);
- jsTest.log("Node B steps up in term 2 and performs a write, which is not replicated.");
- // A: [1]
- // B: [1] [2]
- // C: [1]
- // D: [1]
- // E:
- stopServerReplication([nodeA, nodeC, nodeD]);
- assert.commandWorked(nodeB.adminCommand({replSetStepUp: 1}));
- rst.waitForState(nodeA, ReplSetTest.State.SECONDARY);
- assert.eq(nodeB, rst.getPrimary());
- assert.commandWorked(nodeB.getDB(dbName)[collName].insert({term: 2}));
+jsTest.log("Node B steps up in term 2 and performs a write, which is not replicated.");
+// A: [1]
+// B: [1] [2]
+// C: [1]
+// D: [1]
+// E:
+stopServerReplication([nodeA, nodeC, nodeD]);
+assert.commandWorked(nodeB.adminCommand({replSetStepUp: 1}));
+rst.waitForState(nodeA, ReplSetTest.State.SECONDARY);
+assert.eq(nodeB, rst.getPrimary());
+assert.commandWorked(nodeB.getDB(dbName)[collName].insert({term: 2}));
- jsTest.log("Node A steps up again in term 3 with votes from A, C, and D and commits a write.");
- // A: [1] [3]
- // B: [1] [2]
- // C: [1] [3]
- // D: [1] [3]
- // E:
- nodeB.disconnect([nodeA, nodeC, nodeD, nodeE]);
- assert.commandWorked(nodeA.adminCommand({replSetStepUp: 1}));
- restartServerReplication([nodeA, nodeC, nodeD]);
- assert.soon(() => {
- // We cannot use getPrimary() here because 2 nodes report they are primary.
- return assert.commandWorked(nodeA.adminCommand({ismaster: 1})).ismaster;
- });
- assert.commandWorked(
- nodeA.getDB(dbName)[collName].insert({term: 3}, {writeConcern: {w: "majority"}}));
- assert.eq(1, nodeC.getDB(dbName)[collName].find({term: 3}).itcount());
- assert.eq(1, nodeD.getDB(dbName)[collName].find({term: 3}).itcount());
+jsTest.log("Node A steps up again in term 3 with votes from A, C, and D and commits a write.");
+// A: [1] [3]
+// B: [1] [2]
+// C: [1] [3]
+// D: [1] [3]
+// E:
+nodeB.disconnect([nodeA, nodeC, nodeD, nodeE]);
+assert.commandWorked(nodeA.adminCommand({replSetStepUp: 1}));
+restartServerReplication([nodeA, nodeC, nodeD]);
+assert.soon(() => {
+ // We cannot use getPrimary() here because 2 nodes report they are primary.
+ return assert.commandWorked(nodeA.adminCommand({ismaster: 1})).ismaster;
+});
+assert.commandWorked(
+ nodeA.getDB(dbName)[collName].insert({term: 3}, {writeConcern: {w: "majority"}}));
+assert.eq(1, nodeC.getDB(dbName)[collName].find({term: 3}).itcount());
+assert.eq(1, nodeD.getDB(dbName)[collName].find({term: 3}).itcount());
- jsTest.log("Node E syncs from a majority node and learns the new commit point in term 3.");
- // A: [1] [3]
- // B: [1] [2]
- // C: [1] [3]
- // D: [1] [3]
- // E: [1]
- // The stopReplProducerOnDocument failpoint ensures that Node E stops replicating before
- // applying the document {msg: "new primary"}, which is the first document of term 3. This
- // depends on the oplog fetcher batch size being 1.
- assert.commandWorked(nodeE.adminCommand({
- configureFailPoint: "stopReplProducerOnDocument",
- mode: "alwaysOn",
- data: {document: {msg: "new primary"}}
- }));
- nodeE.reconnect([nodeA, nodeC, nodeD]);
- checkLog.contains(nodeE, "stopReplProducerOnDocument fail point is enabled.");
- assert.soon(() => {
- return 1 === nodeE.getDB(dbName)[collName].find({term: 1}).itcount();
- });
- assert.eq(0, nodeE.getDB(dbName)[collName].find({term: 3}).itcount());
+jsTest.log("Node E syncs from a majority node and learns the new commit point in term 3.");
+// A: [1] [3]
+// B: [1] [2]
+// C: [1] [3]
+// D: [1] [3]
+// E: [1]
+// The stopReplProducerOnDocument failpoint ensures that Node E stops replicating before
+// applying the document {msg: "new primary"}, which is the first document of term 3. This
+// depends on the oplog fetcher batch size being 1.
+assert.commandWorked(nodeE.adminCommand({
+ configureFailPoint: "stopReplProducerOnDocument",
+ mode: "alwaysOn",
+ data: {document: {msg: "new primary"}}
+}));
+nodeE.reconnect([nodeA, nodeC, nodeD]);
+checkLog.contains(nodeE, "stopReplProducerOnDocument fail point is enabled.");
+assert.soon(() => {
+ return 1 === nodeE.getDB(dbName)[collName].find({term: 1}).itcount();
+});
+assert.eq(0, nodeE.getDB(dbName)[collName].find({term: 3}).itcount());
- jsTest.log("Node E switches its sync source to B and replicates the stale branch of term 2.");
- nodeE.disconnect([nodeA, nodeC, nodeD]);
- nodeB.reconnect(nodeE);
- assert.commandWorked(
- nodeE.adminCommand({configureFailPoint: "stopReplProducerOnDocument", mode: "off"}));
- assert.soon(() => {
- return 1 === nodeE.getDB(dbName)[collName].find({term: 2}).itcount();
- });
+jsTest.log("Node E switches its sync source to B and replicates the stale branch of term 2.");
+nodeE.disconnect([nodeA, nodeC, nodeD]);
+nodeB.reconnect(nodeE);
+assert.commandWorked(
+ nodeE.adminCommand({configureFailPoint: "stopReplProducerOnDocument", mode: "off"}));
+assert.soon(() => {
+ return 1 === nodeE.getDB(dbName)[collName].find({term: 2}).itcount();
+});
- jsTest.log("Node E must not return the entry in term 2 as committed.");
- assert.eq(0, nodeE.getDB(dbName)[collName].find({term: 2}).readConcern("majority").itcount());
+jsTest.log("Node E must not return the entry in term 2 as committed.");
+assert.eq(0, nodeE.getDB(dbName)[collName].find({term: 2}).readConcern("majority").itcount());
- jsTest.log("Reconnect the set. Node E must roll back successfully.");
- nodeE.reconnect([nodeA, nodeC, nodeD]);
- nodeB.reconnect([nodeA, nodeC, nodeD]);
- rst.awaitReplication();
- assert.eq(1, nodeE.getDB(dbName)[collName].find({term: 1}).itcount());
- assert.eq(0, nodeE.getDB(dbName)[collName].find({term: 2}).itcount());
- assert.eq(1, nodeE.getDB(dbName)[collName].find({term: 3}).itcount());
+jsTest.log("Reconnect the set. Node E must roll back successfully.");
+nodeE.reconnect([nodeA, nodeC, nodeD]);
+nodeB.reconnect([nodeA, nodeC, nodeD]);
+rst.awaitReplication();
+assert.eq(1, nodeE.getDB(dbName)[collName].find({term: 1}).itcount());
+assert.eq(0, nodeE.getDB(dbName)[collName].find({term: 2}).itcount());
+assert.eq(1, nodeE.getDB(dbName)[collName].find({term: 3}).itcount());
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/drain.js b/jstests/replsets/drain.js
index 41e8d475f83..e1d008aebc0 100644
--- a/jstests/replsets/drain.js
+++ b/jstests/replsets/drain.js
@@ -10,101 +10,101 @@
// 8. Ensure the ops in queue are applied and that the PRIMARY begins to accept writes as usual.
(function() {
- "use strict";
- var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate({
- "_id": "testSet",
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], "arbiterOnly": true}
- ],
- // No primary catch-up so we focus on the drain mode.
- "settings": {"catchUpTimeoutMillis": 0},
- });
+"use strict";
+var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+replSet.initiate({
+ "_id": "testSet",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ],
+ // No primary catch-up so we focus on the drain mode.
+ "settings": {"catchUpTimeoutMillis": 0},
+});
- var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
+var primary = replSet.getPrimary();
+var secondary = replSet.getSecondary();
- // Do an initial insert to prevent the secondary from going into recovery
- var numDocuments = 20;
- var bulk = primary.getDB("foo").foo.initializeUnorderedBulkOp();
- var bigString = Array(1024 * 1024).toString();
- assert.writeOK(primary.getDB("foo").foo.insert({big: bigString}));
- replSet.awaitReplication();
- assert.commandWorked(secondary.getDB("admin").runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}),
- 'failed to enable fail point on secondary');
+// Do an initial insert to prevent the secondary from going into recovery
+var numDocuments = 20;
+var bulk = primary.getDB("foo").foo.initializeUnorderedBulkOp();
+var bigString = Array(1024 * 1024).toString();
+assert.writeOK(primary.getDB("foo").foo.insert({big: bigString}));
+replSet.awaitReplication();
+assert.commandWorked(
+ secondary.getDB("admin").runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}),
+ 'failed to enable fail point on secondary');
- var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
- for (var i = 1; i < numDocuments; ++i) {
- bulk.insert({big: bigString});
- }
- assert.writeOK(bulk.execute());
- jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
- assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
+var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
+for (var i = 1; i < numDocuments; ++i) {
+ bulk.insert({big: bigString});
+}
+assert.writeOK(bulk.execute());
+jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
+assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
- assert.soon(function() {
- var serverStatus = secondary.getDB('foo').serverStatus();
- var bufferCount = serverStatus.metrics.repl.buffer.count;
- var bufferCountChange = bufferCount - bufferCountBefore;
- jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
- bufferCountChange);
- return bufferCountChange >= numDocuments - 1;
- }, 'secondary did not buffer operations for new inserts on primary', 300000, 1000);
+assert.soon(function() {
+ var serverStatus = secondary.getDB('foo').serverStatus();
+ var bufferCount = serverStatus.metrics.repl.buffer.count;
+ var bufferCountChange = bufferCount - bufferCountBefore;
+ jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
+ bufferCountChange);
+ return bufferCountChange >= numDocuments - 1;
+}, 'secondary did not buffer operations for new inserts on primary', 300000, 1000);
- // Kill primary; secondary will enter drain mode to catch up
- primary.getDB("admin").shutdownServer({force: true});
+// Kill primary; secondary will enter drain mode to catch up
+primary.getDB("admin").shutdownServer({force: true});
- replSet.waitForState(secondary, ReplSetTest.State.PRIMARY);
+replSet.waitForState(secondary, ReplSetTest.State.PRIMARY);
- // Ensure new primary is not yet writable
- jsTestLog('New primary should not be writable yet');
- assert.writeError(secondary.getDB("foo").flag.insert({sentinel: 2}));
- assert(!secondary.getDB("admin").runCommand({"isMaster": 1}).ismaster);
+// Ensure new primary is not yet writable
+jsTestLog('New primary should not be writable yet');
+assert.writeError(secondary.getDB("foo").flag.insert({sentinel: 2}));
+assert(!secondary.getDB("admin").runCommand({"isMaster": 1}).ismaster);
- // Ensure new primary is not yet readable without slaveOk bit.
- secondary.slaveOk = false;
- jsTestLog('New primary should not be readable yet, without slaveOk bit');
- var res = secondary.getDB("foo").runCommand({find: "foo"});
- assert.commandFailed(res);
- assert.eq(ErrorCodes.NotMasterNoSlaveOk,
- res.code,
- "find failed with unexpected error code: " + tojson(res));
- // Nor should it be readable with the slaveOk bit.
- secondary.slaveOk = true;
- assert.commandWorked(secondary.getDB("foo").runCommand({find: "foo"}));
+// Ensure new primary is not yet readable without slaveOk bit.
+secondary.slaveOk = false;
+jsTestLog('New primary should not be readable yet, without slaveOk bit');
+var res = secondary.getDB("foo").runCommand({find: "foo"});
+assert.commandFailed(res);
+assert.eq(ErrorCodes.NotMasterNoSlaveOk,
+ res.code,
+ "find failed with unexpected error code: " + tojson(res));
+// Nor should it be readable with the slaveOk bit.
+secondary.slaveOk = true;
+assert.commandWorked(secondary.getDB("foo").runCommand({find: "foo"}));
- assert.commandFailedWithCode(
- secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 5000,
- }),
- ErrorCodes.ExceededTimeLimit,
- 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
+assert.commandFailedWithCode(
+ secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 5000,
+ }),
+ ErrorCodes.ExceededTimeLimit,
+ 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
- // Allow draining to complete
- jsTestLog('Disabling fail point on new primary to allow draining to complete');
- assert.commandWorked(
- secondary.getDB("admin").runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
- 'failed to disable fail point on new primary');
- primary = replSet.getPrimary();
+// Allow draining to complete
+jsTestLog('Disabling fail point on new primary to allow draining to complete');
+assert.commandWorked(
+ secondary.getDB("admin").runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
+ 'failed to disable fail point on new primary');
+primary = replSet.getPrimary();
- assert.commandWorked(
- secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 30000,
- }),
- 'replSetTest waitForDrainFinish should work when draining is allowed to complete');
+assert.commandWorked(
+ secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 30000,
+ }),
+ 'replSetTest waitForDrainFinish should work when draining is allowed to complete');
- // Ensure new primary is writable
- jsTestLog('New primary should be writable after draining is complete');
- assert.writeOK(primary.getDB("foo").flag.insert({sentinel: 1}));
- // Check for at least two entries. There was one prior to freezing op application on the
- // secondary and we cannot guarantee all writes reached the secondary's op queue prior to
- // shutting down the original primary.
- assert.gte(primary.getDB("foo").foo.find().itcount(), 2);
- replSet.stopSet();
+// Ensure new primary is writable
+jsTestLog('New primary should be writable after draining is complete');
+assert.writeOK(primary.getDB("foo").flag.insert({sentinel: 1}));
+// Check for at least two entries. There was one prior to freezing op application on the
+// secondary and we cannot guarantee all writes reached the secondary's op queue prior to
+// shutting down the original primary.
+assert.gte(primary.getDB("foo").foo.find().itcount(), 2);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/drop_collections_two_phase.js b/jstests/replsets/drop_collections_two_phase.js
index 8b6b3bab79e..ac8b727834a 100644
--- a/jstests/replsets/drop_collections_two_phase.js
+++ b/jstests/replsets/drop_collections_two_phase.js
@@ -4,34 +4,34 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
- // Set up a two phase drop test.
- let testName = "drop_collection_two_phase";
- let dbName = testName;
- let collName = "collToDrop";
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+// Set up a two phase drop test.
+let testName = "drop_collection_two_phase";
+let dbName = testName;
+let collName = "collToDrop";
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
- // PREPARE collection drop.
- twoPhaseDropTest.prepareDropCollection(collName);
+// PREPARE collection drop.
+twoPhaseDropTest.prepareDropCollection(collName);
- // COMMIT collection drop.
- twoPhaseDropTest.commitDropCollection(collName);
+// COMMIT collection drop.
+twoPhaseDropTest.commitDropCollection(collName);
- twoPhaseDropTest.stop();
+twoPhaseDropTest.stop();
}());
diff --git a/jstests/replsets/drop_collections_two_phase_apply_ops_convert_to_capped.js b/jstests/replsets/drop_collections_two_phase_apply_ops_convert_to_capped.js
index 23992c17a21..26a018a863f 100644
--- a/jstests/replsets/drop_collections_two_phase_apply_ops_convert_to_capped.js
+++ b/jstests/replsets/drop_collections_two_phase_apply_ops_convert_to_capped.js
@@ -3,98 +3,97 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
- // Set up a two phase drop test.
- let testName = "drop_collection_two_phase_apply_ops_convert_to_capped";
- let dbName = testName;
- let collName = "collToDrop";
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+// Set up a two phase drop test.
+let testName = "drop_collection_two_phase_apply_ops_convert_to_capped";
+let dbName = testName;
+let collName = "collToDrop";
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
- // PREPARE collection drop.
- twoPhaseDropTest.prepareDropCollection(collName);
+// PREPARE collection drop.
+twoPhaseDropTest.prepareDropCollection(collName);
- try {
- // Converting a drop-pending collection to a capped collection returns NamespaceNotFound.
- const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
- const dropPendingCollName = dropPendingColl.name;
- const primary = replTest.getPrimary();
- const convertToCappedCmdWithName = {
- convertToCapped: dropPendingCollName,
- size: 100000,
- };
- TwoPhaseDropCollectionTest._testLog(
- 'Attempting to convert collection with system.drop namespace: ' +
- tojson(convertToCappedCmdWithName));
- assert.commandFailedWithCode(primary.getDB(dbName).runCommand(convertToCappedCmdWithName),
- ErrorCodes.NamespaceNotFound);
- let dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
- assert(dropPendingCollInfo,
- 'convertToCapped using collection name ' + dropPendingCollName +
- ' affected drop-pending collection state unexpectedly');
- assert(!dropPendingCollInfo.options.capped);
- assert(!twoPhaseDropTest.collectionExists(collName));
+try {
+ // Converting a drop-pending collection to a capped collection returns NamespaceNotFound.
+ const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
+ const dropPendingCollName = dropPendingColl.name;
+ const primary = replTest.getPrimary();
+ const convertToCappedCmdWithName = {
+ convertToCapped: dropPendingCollName,
+ size: 100000,
+ };
+ TwoPhaseDropCollectionTest._testLog(
+ 'Attempting to convert collection with system.drop namespace: ' +
+ tojson(convertToCappedCmdWithName));
+ assert.commandFailedWithCode(primary.getDB(dbName).runCommand(convertToCappedCmdWithName),
+ ErrorCodes.NamespaceNotFound);
+ let dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
+ assert(dropPendingCollInfo,
+ 'convertToCapped using collection name ' + dropPendingCollName +
+ ' affected drop-pending collection state unexpectedly');
+ assert(!dropPendingCollInfo.options.capped);
+ assert(!twoPhaseDropTest.collectionExists(collName));
- // Converting a drop-pending collection to a capped collection using applyOps with
- // system.drop namespace.
- const cmdNs = dbName + '.$cmd';
- const applyOpsCmdWithName = {
- applyOps:
- [{op: 'c', ns: cmdNs, o: {convertToCapped: dropPendingCollName, size: 100000}}]
- };
- TwoPhaseDropCollectionTest._testLog(
- 'Attempting to convert collection using applyOps with system.drop namespace: ' +
- tojson(applyOpsCmdWithName));
- // NamespaceNotFound is ignored, but the drop-pending collection shouldn't be affected.
- assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
- assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
- 'applyOps using collection name ' + dropPendingCollName +
- ' affected drop-pending collection state unexpectedly');
+ // Converting a drop-pending collection to a capped collection using applyOps with
+ // system.drop namespace.
+ const cmdNs = dbName + '.$cmd';
+ const applyOpsCmdWithName = {
+ applyOps: [{op: 'c', ns: cmdNs, o: {convertToCapped: dropPendingCollName, size: 100000}}]
+ };
+ TwoPhaseDropCollectionTest._testLog(
+ 'Attempting to convert collection using applyOps with system.drop namespace: ' +
+ tojson(applyOpsCmdWithName));
+ // NamespaceNotFound is ignored, but the drop-pending collection shouldn't be affected.
+ assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
+ assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
+ 'applyOps using collection name ' + dropPendingCollName +
+ ' affected drop-pending collection state unexpectedly');
- // Converting a drop-pending collection to a capped collection using applyOps with UUID.
- const dropPendingCollUuid = dropPendingColl.info.uuid;
- const applyOpsCmdWithUuid = {
- applyOps: [{
- op: 'c',
- ns: cmdNs,
- ui: dropPendingCollUuid,
- o: {convertToCapped: 'ignored_collection_name', size: 100000}
- }]
- };
- TwoPhaseDropCollectionTest._testLog(
- 'Attempting to convert collection using applyOps with UUID: ' +
- tojson(applyOpsCmdWithUuid));
- // NamespaceNotFound is ignored, but the drop-pending collection shouldn't be affected.
- assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
- dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
- assert(dropPendingCollInfo,
- 'applyOps using UUID ' + dropPendingCollUuid +
- ' affected drop-pending collection state unexpectedly');
- assert(!dropPendingCollInfo.options.capped);
- assert.eq(dropPendingCollUuid.hex(),
- dropPendingCollInfo.info.uuid.hex(),
- 'drop pending collection UUID does not match UUID of original collection: ' +
- tojson(dropPendingCollInfo));
- assert(!twoPhaseDropTest.collectionExists(collName));
+ // Converting a drop-pending collection to a capped collection using applyOps with UUID.
+ const dropPendingCollUuid = dropPendingColl.info.uuid;
+ const applyOpsCmdWithUuid = {
+ applyOps: [{
+ op: 'c',
+ ns: cmdNs,
+ ui: dropPendingCollUuid,
+ o: {convertToCapped: 'ignored_collection_name', size: 100000}
+ }]
+ };
+ TwoPhaseDropCollectionTest._testLog(
+ 'Attempting to convert collection using applyOps with UUID: ' +
+ tojson(applyOpsCmdWithUuid));
+ // NamespaceNotFound is ignored, but the drop-pending collection shouldn't be affected.
+ assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
+ dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
+ assert(dropPendingCollInfo,
+ 'applyOps using UUID ' + dropPendingCollUuid +
+ ' affected drop-pending collection state unexpectedly');
+ assert(!dropPendingCollInfo.options.capped);
+ assert.eq(dropPendingCollUuid.hex(),
+ dropPendingCollInfo.info.uuid.hex(),
+ 'drop pending collection UUID does not match UUID of original collection: ' +
+ tojson(dropPendingCollInfo));
+ assert(!twoPhaseDropTest.collectionExists(collName));
- // COMMIT collection drop.
- twoPhaseDropTest.commitDropCollection(collName);
- } finally {
- twoPhaseDropTest.stop();
- }
+ // COMMIT collection drop.
+ twoPhaseDropTest.commitDropCollection(collName);
+} finally {
+ twoPhaseDropTest.stop();
+}
}());
diff --git a/jstests/replsets/drop_collections_two_phase_apply_ops_create.js b/jstests/replsets/drop_collections_two_phase_apply_ops_create.js
index 792be894d8d..34998dce7ec 100644
--- a/jstests/replsets/drop_collections_two_phase_apply_ops_create.js
+++ b/jstests/replsets/drop_collections_two_phase_apply_ops_create.js
@@ -6,63 +6,58 @@
*/
(function() {
- "use strict";
-
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
-
- // Set up a two phase drop test.
- let testName = "drop_collection_two_phase_apply_ops_create";
- let dbName = testName;
- let collName = "collToDrop";
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
-
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
-
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
-
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
-
- // PREPARE collection drop.
- twoPhaseDropTest.prepareDropCollection(collName);
-
- try {
- // Create collection using applyOps with UUID that belongs to a drop-pending collection.
- const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
- const dropPendingCollName = dropPendingColl.name;
- const primary = replTest.getPrimary();
- const cmdNs = dbName + '.$cmd';
- const dropPendingCollUuid = dropPendingColl.info.uuid;
- const applyOpsCmdWithUuid = {
- applyOps: [{
- op: 'c',
- ns: cmdNs,
- ui: dropPendingCollUuid,
- o: {create: 'ignored_collection_name'}
- }]
- };
- TwoPhaseDropCollectionTest._testLog(
- 'Attempting to create collection using applyOps with UUID: ' +
- tojson(applyOpsCmdWithUuid));
- assert.commandWorked(primary.adminCommand(applyOpsCmdWithUuid));
- const dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
- assert(dropPendingCollInfo,
- 'applyOps using UUID ' + dropPendingCollUuid +
- ' changed drop-pending state on collection unexpectedly');
- assert.eq(dropPendingCollUuid.hex(),
- dropPendingCollInfo.info.uuid.hex(),
- 'drop pending collection UUID does not match UUID of original collection: ' +
- tojson(dropPendingCollInfo));
-
- // COMMIT collection drop.
- twoPhaseDropTest.commitDropCollection(collName);
- } finally {
- twoPhaseDropTest.stop();
- }
+"use strict";
+
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+
+// Set up a two phase drop test.
+let testName = "drop_collection_two_phase_apply_ops_create";
+let dbName = testName;
+let collName = "collToDrop";
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
+
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
+
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
+
+// PREPARE collection drop.
+twoPhaseDropTest.prepareDropCollection(collName);
+
+try {
+ // Create collection using applyOps with UUID that belongs to a drop-pending collection.
+ const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
+ const dropPendingCollName = dropPendingColl.name;
+ const primary = replTest.getPrimary();
+ const cmdNs = dbName + '.$cmd';
+ const dropPendingCollUuid = dropPendingColl.info.uuid;
+ const applyOpsCmdWithUuid = {
+ applyOps:
+ [{op: 'c', ns: cmdNs, ui: dropPendingCollUuid, o: {create: 'ignored_collection_name'}}]
+ };
+ TwoPhaseDropCollectionTest._testLog(
+ 'Attempting to create collection using applyOps with UUID: ' + tojson(applyOpsCmdWithUuid));
+ assert.commandWorked(primary.adminCommand(applyOpsCmdWithUuid));
+ const dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
+ assert(dropPendingCollInfo,
+ 'applyOps using UUID ' + dropPendingCollUuid +
+ ' changed drop-pending state on collection unexpectedly');
+ assert.eq(dropPendingCollUuid.hex(),
+ dropPendingCollInfo.info.uuid.hex(),
+ 'drop pending collection UUID does not match UUID of original collection: ' +
+ tojson(dropPendingCollInfo));
+
+ // COMMIT collection drop.
+ twoPhaseDropTest.commitDropCollection(collName);
+} finally {
+ twoPhaseDropTest.stop();
+}
}());
diff --git a/jstests/replsets/drop_collections_two_phase_apply_ops_drop.js b/jstests/replsets/drop_collections_two_phase_apply_ops_drop.js
index 8cfbca31166..0d83fc8602b 100644
--- a/jstests/replsets/drop_collections_two_phase_apply_ops_drop.js
+++ b/jstests/replsets/drop_collections_two_phase_apply_ops_drop.js
@@ -4,61 +4,62 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
- // Set up a two phase drop test.
- let testName = "drop_collection_two_phase_apply_ops_noop";
- let dbName = testName;
- let collName = "collToDrop";
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+// Set up a two phase drop test.
+let testName = "drop_collection_two_phase_apply_ops_noop";
+let dbName = testName;
+let collName = "collToDrop";
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
- // PREPARE collection drop.
- twoPhaseDropTest.prepareDropCollection(collName);
+// PREPARE collection drop.
+twoPhaseDropTest.prepareDropCollection(collName);
- // Drop drop-pending collection using applyOps with system.drop namespace.
- const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
- const dropPendingCollName = dropPendingColl.name;
- const primary = replTest.getPrimary();
- const cmdNs = dbName + '.$cmd';
- const applyOpsCmdWithName = {applyOps: [{op: 'c', ns: cmdNs, o: {drop: dropPendingCollName}}]};
- TwoPhaseDropCollectionTest._testLog(
- 'Attempting to drop collection using applyOps with system.drop namespace: ' +
- tojson(applyOpsCmdWithName));
- assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
- assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
- 'applyOps using collection name ' + dropPendingCollName +
- ' removed drop-pending collection unexpectedly');
+// Drop drop-pending collection using applyOps with system.drop namespace.
+const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
+const dropPendingCollName = dropPendingColl.name;
+const primary = replTest.getPrimary();
+const cmdNs = dbName + '.$cmd';
+const applyOpsCmdWithName = {
+ applyOps: [{op: 'c', ns: cmdNs, o: {drop: dropPendingCollName}}]
+};
+TwoPhaseDropCollectionTest._testLog(
+ 'Attempting to drop collection using applyOps with system.drop namespace: ' +
+ tojson(applyOpsCmdWithName));
+assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
+assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
+ 'applyOps using collection name ' + dropPendingCollName +
+ ' removed drop-pending collection unexpectedly');
- // Drop drop-pending collection using applyOps with UUID.
- const dropPendingCollUuid = dropPendingColl.info.uuid;
- const applyOpsCmdWithUuid = {
- applyOps:
- [{op: 'c', ns: cmdNs, ui: dropPendingCollUuid, o: {drop: 'ignored_collection_name'}}]
- };
- TwoPhaseDropCollectionTest._testLog('Attempting to drop collection using applyOps with UUID: ' +
- tojson(applyOpsCmdWithUuid));
- assert.commandWorked(primary.adminCommand(applyOpsCmdWithUuid));
- assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
- 'applyOps using UUID ' + dropPendingCollUuid +
- ' removed drop-pending collection unexpectedly');
+// Drop drop-pending collection using applyOps with UUID.
+const dropPendingCollUuid = dropPendingColl.info.uuid;
+const applyOpsCmdWithUuid = {
+ applyOps: [{op: 'c', ns: cmdNs, ui: dropPendingCollUuid, o: {drop: 'ignored_collection_name'}}]
+};
+TwoPhaseDropCollectionTest._testLog('Attempting to drop collection using applyOps with UUID: ' +
+ tojson(applyOpsCmdWithUuid));
+assert.commandWorked(primary.adminCommand(applyOpsCmdWithUuid));
+assert(
+ twoPhaseDropTest.collectionIsPendingDrop(collName),
+ 'applyOps using UUID ' + dropPendingCollUuid + ' removed drop-pending collection unexpectedly');
- // COMMIT collection drop.
- twoPhaseDropTest.commitDropCollection(collName);
+// COMMIT collection drop.
+twoPhaseDropTest.commitDropCollection(collName);
- twoPhaseDropTest.stop();
+twoPhaseDropTest.stop();
}());
diff --git a/jstests/replsets/drop_collections_two_phase_apply_ops_rename.js b/jstests/replsets/drop_collections_two_phase_apply_ops_rename.js
index 8db6ffaf42e..7a957df0269 100644
--- a/jstests/replsets/drop_collections_two_phase_apply_ops_rename.js
+++ b/jstests/replsets/drop_collections_two_phase_apply_ops_rename.js
@@ -4,78 +4,77 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
- // Set up a two phase drop test.
- let testName = "drop_collection_two_phase_apply_ops_rename";
- let dbName = testName;
- let collName = "collToDrop";
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+// Set up a two phase drop test.
+let testName = "drop_collection_two_phase_apply_ops_rename";
+let dbName = testName;
+let collName = "collToDrop";
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
- // PREPARE collection drop.
- twoPhaseDropTest.prepareDropCollection(collName);
+// PREPARE collection drop.
+twoPhaseDropTest.prepareDropCollection(collName);
- try {
- // Rename drop-pending collection using applyOps with system.drop namespace.
- const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
- const dropPendingCollName = dropPendingColl.name;
- const primary = replTest.getPrimary();
- const cmdNs = dbName + '.$cmd';
- const sourceNs = dbName + '.' + dropPendingCollName;
- const destNs = dbName + '.bar';
- const applyOpsCmdWithName = {
- applyOps: [{op: 'c', ns: cmdNs, o: {renameCollection: sourceNs, to: destNs}}]
- };
- TwoPhaseDropCollectionTest._testLog(
- 'Attempting to rename collection using applyOps with system.drop namespace: ' +
- tojson(applyOpsCmdWithName));
- assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
- assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
- 'applyOps using collection name ' + dropPendingCollName +
- ' renamed drop-pending collection unexpectedly');
- assert(!twoPhaseDropTest.collectionExists(collName));
+try {
+ // Rename drop-pending collection using applyOps with system.drop namespace.
+ const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
+ const dropPendingCollName = dropPendingColl.name;
+ const primary = replTest.getPrimary();
+ const cmdNs = dbName + '.$cmd';
+ const sourceNs = dbName + '.' + dropPendingCollName;
+ const destNs = dbName + '.bar';
+ const applyOpsCmdWithName = {
+ applyOps: [{op: 'c', ns: cmdNs, o: {renameCollection: sourceNs, to: destNs}}]
+ };
+ TwoPhaseDropCollectionTest._testLog(
+ 'Attempting to rename collection using applyOps with system.drop namespace: ' +
+ tojson(applyOpsCmdWithName));
+ assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
+ assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
+ 'applyOps using collection name ' + dropPendingCollName +
+ ' renamed drop-pending collection unexpectedly');
+ assert(!twoPhaseDropTest.collectionExists(collName));
- // Rename drop-pending collection using applyOps with UUID.
- const dropPendingCollUuid = dropPendingColl.info.uuid;
- const applyOpsCmdWithUuid = {
- applyOps: [{
- op: 'c',
- ns: cmdNs,
- ui: dropPendingCollUuid,
- o: {renameCollection: dbName + '.ignored_collection_name', to: destNs}
- }]
- };
- TwoPhaseDropCollectionTest._testLog(
- 'Attempting to rename collection using applyOps with UUID: ' +
- tojson(applyOpsCmdWithUuid));
- assert.commandWorked(primary.adminCommand(applyOpsCmdWithUuid));
- const dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
- assert(dropPendingCollInfo,
- 'applyOps using UUID ' + dropPendingCollUuid +
- ' renamed drop-pending collection unexpectedly');
- assert.eq(dropPendingCollUuid.hex(),
- dropPendingCollInfo.info.uuid.hex(),
- 'drop pending collection UUID does not match UUID of original collection: ' +
- tojson(dropPendingCollInfo));
+ // Rename drop-pending collection using applyOps with UUID.
+ const dropPendingCollUuid = dropPendingColl.info.uuid;
+ const applyOpsCmdWithUuid = {
+ applyOps: [{
+ op: 'c',
+ ns: cmdNs,
+ ui: dropPendingCollUuid,
+ o: {renameCollection: dbName + '.ignored_collection_name', to: destNs}
+ }]
+ };
+ TwoPhaseDropCollectionTest._testLog(
+ 'Attempting to rename collection using applyOps with UUID: ' + tojson(applyOpsCmdWithUuid));
+ assert.commandWorked(primary.adminCommand(applyOpsCmdWithUuid));
+ const dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
+ assert(dropPendingCollInfo,
+ 'applyOps using UUID ' + dropPendingCollUuid +
+ ' renamed drop-pending collection unexpectedly');
+ assert.eq(dropPendingCollUuid.hex(),
+ dropPendingCollInfo.info.uuid.hex(),
+ 'drop pending collection UUID does not match UUID of original collection: ' +
+ tojson(dropPendingCollInfo));
- // COMMIT collection drop.
- twoPhaseDropTest.commitDropCollection(collName);
- } finally {
- twoPhaseDropTest.stop();
- }
+ // COMMIT collection drop.
+ twoPhaseDropTest.commitDropCollection(collName);
+} finally {
+ twoPhaseDropTest.stop();
+}
}());
diff --git a/jstests/replsets/drop_collections_two_phase_dbhash.js b/jstests/replsets/drop_collections_two_phase_dbhash.js
index b04b631bc09..058a6f09aeb 100644
--- a/jstests/replsets/drop_collections_two_phase_dbhash.js
+++ b/jstests/replsets/drop_collections_two_phase_dbhash.js
@@ -4,51 +4,49 @@
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
- // Compute db hash for all collections on given database.
- function getDbHash(database) {
- let res =
- assert.commandWorked(database.runCommand({dbhash: 1}), "'dbHash' command failed.");
- return res.md5;
- }
+// Compute db hash for all collections on given database.
+function getDbHash(database) {
+ let res = assert.commandWorked(database.runCommand({dbhash: 1}), "'dbHash' command failed.");
+ return res.md5;
+}
- // Set up a two phase drop test.
- let testName = "drop_collection_two_phase_long_index_names";
- let dbName = testName;
- let collName = "collToDrop";
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+// Set up a two phase drop test.
+let testName = "drop_collection_two_phase_long_index_names";
+let dbName = testName;
+let collName = "collToDrop";
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
- let primaryDB = replTest.getPrimary().getDB(dbName);
+let primaryDB = replTest.getPrimary().getDB(dbName);
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
- // Save the dbHash while drop is in 'pending' state.
- twoPhaseDropTest.prepareDropCollection(collName);
- let dropPendingDbHash = getDbHash(primaryDB);
+// Save the dbHash while drop is in 'pending' state.
+twoPhaseDropTest.prepareDropCollection(collName);
+let dropPendingDbHash = getDbHash(primaryDB);
- // Save the dbHash after the drop has been committed.
- twoPhaseDropTest.commitDropCollection(collName);
- let dropCommittedDbHash = getDbHash(primaryDB);
+// Save the dbHash after the drop has been committed.
+twoPhaseDropTest.commitDropCollection(collName);
+let dropCommittedDbHash = getDbHash(primaryDB);
- // The dbHash calculation should ignore drop pending collections. Therefore, therefore, the hash
- // during prepare phase and commit phase should match.
- let failMsg = "dbHash during drop pending phase did not match dbHash after drop was committed.";
- assert.eq(dropPendingDbHash, dropCommittedDbHash, failMsg);
-
- replTest.stopSet();
+// The dbHash calculation should ignore drop pending collections. Therefore, therefore, the hash
+// during prepare phase and commit phase should match.
+let failMsg = "dbHash during drop pending phase did not match dbHash after drop was committed.";
+assert.eq(dropPendingDbHash, dropCommittedDbHash, failMsg);
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/drop_collections_two_phase_rename_drop_target.js b/jstests/replsets/drop_collections_two_phase_rename_drop_target.js
index 4f989bf337a..c5bf302e321 100644
--- a/jstests/replsets/drop_collections_two_phase_rename_drop_target.js
+++ b/jstests/replsets/drop_collections_two_phase_rename_drop_target.js
@@ -4,139 +4,139 @@
*/
(function() {
- 'use strict';
-
- load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
- load('jstests/libs/check_log.js'); // For checkLog.contains().
-
- // Return a list of all indexes for a given collection. Use 'args' as the
- // 'listIndexes' command arguments.
- // Assumes all indexes in the collection fit in the first batch of results.
- function listIndexes(database, coll, args) {
- args = args || {};
- let failMsg = "'listIndexes' command failed";
- let listIndexesCmd = {listIndexes: coll};
- let res = assert.commandWorked(database.runCommand(listIndexesCmd, args), failMsg);
- return res.cursor.firstBatch;
- }
-
- // Set up a two phase drop test.
- let testName = 'drop_collection_two_phase_rename_drop_target';
- let dbName = testName;
- let fromCollName = 'collToRename';
- let toCollName = 'collToDrop';
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
-
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
-
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
-
- // Create the collections that will be renamed and dropped.
- twoPhaseDropTest.createCollection(fromCollName);
- twoPhaseDropTest.createCollection(toCollName);
-
- // Collection renames with dropTarget set to true should handle long index names in the target
- // collection gracefully. MMAPv1 imposes a hard limit on index namespaces so we have to drop
- // indexes that are too long to store on disk after renaming the collection.
- const primary = replTest.getPrimary();
- const testDb = primary.getDB(dbName);
- const fromColl = testDb.getCollection(fromCollName);
- const toColl = testDb.getCollection(toCollName);
- let maxNsLength = 127;
- let longIndexName = ''.pad(maxNsLength - (toColl.getFullName() + '.$').length, true, 'a');
- let shortIndexName = "short_name";
-
- // In the target collection, which will be dropped, create one index with a "too long" name, and
- // one with a name of acceptable size.
- assert.commandWorked(toColl.ensureIndex({a: 1}, {name: longIndexName}));
- assert.commandWorked(toColl.ensureIndex({b: 1}, {name: shortIndexName}));
-
- // Insert documents into both collections so that we can tell them apart.
- assert.writeOK(fromColl.insert({_id: 'from'}));
- assert.writeOK(toColl.insert({_id: 'to'}));
+'use strict';
+
+load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
+load('jstests/libs/check_log.js'); // For checkLog.contains().
+
+// Return a list of all indexes for a given collection. Use 'args' as the
+// 'listIndexes' command arguments.
+// Assumes all indexes in the collection fit in the first batch of results.
+function listIndexes(database, coll, args) {
+ args = args || {};
+ let failMsg = "'listIndexes' command failed";
+ let listIndexesCmd = {listIndexes: coll};
+ let res = assert.commandWorked(database.runCommand(listIndexesCmd, args), failMsg);
+ return res.cursor.firstBatch;
+}
+
+// Set up a two phase drop test.
+let testName = 'drop_collection_two_phase_rename_drop_target';
+let dbName = testName;
+let fromCollName = 'collToRename';
+let toCollName = 'collToDrop';
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
+
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
+
+// Create the collections that will be renamed and dropped.
+twoPhaseDropTest.createCollection(fromCollName);
+twoPhaseDropTest.createCollection(toCollName);
+
+// Collection renames with dropTarget set to true should handle long index names in the target
+// collection gracefully. MMAPv1 imposes a hard limit on index namespaces so we have to drop
+// indexes that are too long to store on disk after renaming the collection.
+const primary = replTest.getPrimary();
+const testDb = primary.getDB(dbName);
+const fromColl = testDb.getCollection(fromCollName);
+const toColl = testDb.getCollection(toCollName);
+let maxNsLength = 127;
+let longIndexName = ''.pad(maxNsLength - (toColl.getFullName() + '.$').length, true, 'a');
+let shortIndexName = "short_name";
+
+// In the target collection, which will be dropped, create one index with a "too long" name, and
+// one with a name of acceptable size.
+assert.commandWorked(toColl.ensureIndex({a: 1}, {name: longIndexName}));
+assert.commandWorked(toColl.ensureIndex({b: 1}, {name: shortIndexName}));
+
+// Insert documents into both collections so that we can tell them apart.
+assert.writeOK(fromColl.insert({_id: 'from'}));
+assert.writeOK(toColl.insert({_id: 'to'}));
+replTest.awaitReplication();
+
+// Prevent renameCollection from being applied on the secondary so that we can examine the state
+// of the primary after target collection has been dropped.
+jsTestLog('Pausing oplog application on the secondary node.');
+const secondary = replTest.getSecondary();
+twoPhaseDropTest.pauseOplogApplication(secondary);
+
+// This logs each operation being applied.
+const previousLogLevel =
+ assert.commandWorked(primary.setLogLevel(1, 'storage')).was.replication.verbosity;
+
+try {
+ // When the target collection exists, the renameCollection command should fail if dropTarget
+ // flag is set to false or is omitted.
+ jsTestLog(
+ 'Checking renameCollection error handling when dropTarget is set to false and target collection exists.');
+ let dropTarget = false;
+ assert.commandFailedWithCode(fromColl.renameCollection(toCollName, dropTarget),
+ ErrorCodes.NamespaceExists);
+
+ // Rename collection with dropTarget set to true. Check collection contents after rename.
+ jsTestLog('Renaming collection ' + fromColl.getFullName() + ' to ' + toColl.getFullName() +
+ ' with dropTarget set to true.');
+ dropTarget = true;
+ assert.commandWorked(fromColl.renameCollection(toColl.getName(), dropTarget));
+ assert(!twoPhaseDropTest.collectionExists(fromCollName));
+ assert(twoPhaseDropTest.collectionExists(toCollName));
+ assert.eq({_id: 'from'}, toColl.findOne());
+
+ // Confirm that original target collection is now a drop-pending collection.
+ const isPendingDropResult = twoPhaseDropTest.collectionIsPendingDrop(toCollName);
+ assert(isPendingDropResult);
+ const droppedCollName = isPendingDropResult.name;
+ jsTestLog('Original target collection is now in a drop-pending state: ' + droppedCollName);
+
+ // Check that indexes that would violate the namespace length constraints after rename were
+ // dropped.
+ const indexes = listIndexes(testDb, droppedCollName);
+ jsTestLog('Indexes in ' + droppedCollName + ': ' + tojson(indexes));
+ assert(indexes.find(idx => idx.name === shortIndexName));
+ assert.eq(undefined, indexes.find(idx => idx.name === longIndexName));
+
+ // Check that index drop appears before collection rename in the oplog.
+ const oplogColl = primary.getCollection('local.oplog.rs');
+ const cmdNs = testDb.getCollection('$cmd').getFullName();
+ const renameOplogEntry =
+ oplogColl.findOne({ns: cmdNs, 'o.renameCollection': fromColl.getFullName()});
+ const dropIndexOplogEntry =
+ oplogColl.findOne({ns: cmdNs, o: {dropIndexes: toCollName, index: longIndexName}});
+ const renameTimestamp = renameOplogEntry.ts;
+ const dropIndexTimestamp = dropIndexOplogEntry.ts;
+ assert.lt(
+ dropIndexTimestamp,
+ renameTimestamp,
+ 'index was not dropped before collection. index drop: ' + tojson(dropIndexOplogEntry) +
+ ' . collection rename: ' + tojson(renameOplogEntry));
+
+ // COMMIT collection drop.
+ twoPhaseDropTest.resumeOplogApplication(secondary);
replTest.awaitReplication();
+ assert.soonNoExcept(function() {
+ return !twoPhaseDropTest.collectionIsPendingDrop(toCollName);
+ });
- // Prevent renameCollection from being applied on the secondary so that we can examine the state
- // of the primary after target collection has been dropped.
- jsTestLog('Pausing oplog application on the secondary node.');
- const secondary = replTest.getSecondary();
- twoPhaseDropTest.pauseOplogApplication(secondary);
-
- // This logs each operation being applied.
- const previousLogLevel =
- assert.commandWorked(primary.setLogLevel(1, 'storage')).was.replication.verbosity;
-
- try {
- // When the target collection exists, the renameCollection command should fail if dropTarget
- // flag is set to false or is omitted.
- jsTestLog(
- 'Checking renameCollection error handling when dropTarget is set to false and target collection exists.');
- let dropTarget = false;
- assert.commandFailedWithCode(fromColl.renameCollection(toCollName, dropTarget),
- ErrorCodes.NamespaceExists);
-
- // Rename collection with dropTarget set to true. Check collection contents after rename.
- jsTestLog('Renaming collection ' + fromColl.getFullName() + ' to ' + toColl.getFullName() +
- ' with dropTarget set to true.');
- dropTarget = true;
- assert.commandWorked(fromColl.renameCollection(toColl.getName(), dropTarget));
- assert(!twoPhaseDropTest.collectionExists(fromCollName));
- assert(twoPhaseDropTest.collectionExists(toCollName));
- assert.eq({_id: 'from'}, toColl.findOne());
-
- // Confirm that original target collection is now a drop-pending collection.
- const isPendingDropResult = twoPhaseDropTest.collectionIsPendingDrop(toCollName);
- assert(isPendingDropResult);
- const droppedCollName = isPendingDropResult.name;
- jsTestLog('Original target collection is now in a drop-pending state: ' + droppedCollName);
-
- // Check that indexes that would violate the namespace length constraints after rename were
- // dropped.
- const indexes = listIndexes(testDb, droppedCollName);
- jsTestLog('Indexes in ' + droppedCollName + ': ' + tojson(indexes));
- assert(indexes.find(idx => idx.name === shortIndexName));
- assert.eq(undefined, indexes.find(idx => idx.name === longIndexName));
-
- // Check that index drop appears before collection rename in the oplog.
- const oplogColl = primary.getCollection('local.oplog.rs');
- const cmdNs = testDb.getCollection('$cmd').getFullName();
- const renameOplogEntry =
- oplogColl.findOne({ns: cmdNs, 'o.renameCollection': fromColl.getFullName()});
- const dropIndexOplogEntry =
- oplogColl.findOne({ns: cmdNs, o: {dropIndexes: toCollName, index: longIndexName}});
- const renameTimestamp = renameOplogEntry.ts;
- const dropIndexTimestamp = dropIndexOplogEntry.ts;
- assert.lt(dropIndexTimestamp,
- renameTimestamp,
- 'index was not dropped before collection. index drop: ' +
- tojson(dropIndexOplogEntry) + ' . collection rename: ' +
- tojson(renameOplogEntry));
-
- // COMMIT collection drop.
- twoPhaseDropTest.resumeOplogApplication(secondary);
- replTest.awaitReplication();
- assert.soonNoExcept(function() {
- return !twoPhaseDropTest.collectionIsPendingDrop(toCollName);
- });
-
- // Confirm in the logs that the renameCollection dropped the target collection on the
- // secondary using two phase collection drop.
- checkLog.contains(secondary, 'dropCollection: ' + toColl.getFullName());
-
- // Rename target collection back to source collection. This helps to ensure the collection
- // metadata is updated correctly on both primary and secondary.
- assert.commandWorked(toColl.renameCollection(fromCollName + '_roundtrip'));
- replTest.awaitReplication();
- } finally {
- // Reset log level.
- primary.setLogLevel(previousLogLevel, 'storage');
-
- twoPhaseDropTest.stop();
- }
+ // Confirm in the logs that the renameCollection dropped the target collection on the
+ // secondary using two phase collection drop.
+ checkLog.contains(secondary, 'dropCollection: ' + toColl.getFullName());
+
+ // Rename target collection back to source collection. This helps to ensure the collection
+ // metadata is updated correctly on both primary and secondary.
+ assert.commandWorked(toColl.renameCollection(fromCollName + '_roundtrip'));
+ replTest.awaitReplication();
+} finally {
+ // Reset log level.
+ primary.setLogLevel(previousLogLevel, 'storage');
+
+ twoPhaseDropTest.stop();
+}
}());
diff --git a/jstests/replsets/drop_collections_two_phase_step_down.js b/jstests/replsets/drop_collections_two_phase_step_down.js
index 5d67d60e73a..849a1c82e29 100644
--- a/jstests/replsets/drop_collections_two_phase_step_down.js
+++ b/jstests/replsets/drop_collections_two_phase_step_down.js
@@ -13,58 +13,58 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
- // Set up a two phase drop test.
- let testName = "drop_collection_two_phase_step_down";
- let dbName = testName;
- let collName = "collToDrop";
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+// Set up a two phase drop test.
+let testName = "drop_collection_two_phase_step_down";
+let dbName = testName;
+let collName = "collToDrop";
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
- // PREPARE collection drop.
- twoPhaseDropTest.prepareDropCollection(collName);
+// PREPARE collection drop.
+twoPhaseDropTest.prepareDropCollection(collName);
- // Step primary down using {force: true} and wait for the same node to become primary again.
- // We use {force: true} because the current secondary has oplog application disabled and will
- // not be able to take over as primary.
- try {
- const primary = replTest.getPrimary();
- const primaryId = replTest.getNodeId(primary);
+// Step primary down using {force: true} and wait for the same node to become primary again.
+// We use {force: true} because the current secondary has oplog application disabled and will
+// not be able to take over as primary.
+try {
+ const primary = replTest.getPrimary();
+ const primaryId = replTest.getNodeId(primary);
- // Force step down primary.
- jsTestLog('Stepping down primary ' + primary.host + ' with {force: true}.');
- // The amount of time the node has to wait before becoming primary again.
- const stepDownSecs = 1;
- assert.commandWorked(primary.adminCommand({replSetStepDown: stepDownSecs, force: true}));
+ // Force step down primary.
+ jsTestLog('Stepping down primary ' + primary.host + ' with {force: true}.');
+ // The amount of time the node has to wait before becoming primary again.
+ const stepDownSecs = 1;
+ assert.commandWorked(primary.adminCommand({replSetStepDown: stepDownSecs, force: true}));
- // Wait for the node that stepped down to regain PRIMARY status.
- jsTestLog('Waiting for node ' + primary.host + ' to become primary again');
- assert.eq(replTest.nodes[primaryId], replTest.getPrimary());
+ // Wait for the node that stepped down to regain PRIMARY status.
+ jsTestLog('Waiting for node ' + primary.host + ' to become primary again');
+ assert.eq(replTest.nodes[primaryId], replTest.getPrimary());
- jsTestLog('Node ' + primary.host + ' is now PRIMARY again. Checking if drop-pending' +
- ' collection still exists.');
- assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
- 'After stepping down and back up again, the primary ' + primary.host +
- ' removed drop-pending collection unexpectedly');
+ jsTestLog('Node ' + primary.host + ' is now PRIMARY again. Checking if drop-pending' +
+ ' collection still exists.');
+ assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
+ 'After stepping down and back up again, the primary ' + primary.host +
+ ' removed drop-pending collection unexpectedly');
- // COMMIT collection drop.
- twoPhaseDropTest.commitDropCollection(collName);
- } finally {
- twoPhaseDropTest.stop();
- }
+ // COMMIT collection drop.
+ twoPhaseDropTest.commitDropCollection(collName);
+} finally {
+ twoPhaseDropTest.stop();
+}
}());
diff --git a/jstests/replsets/drop_collections_two_phase_write_concern.js b/jstests/replsets/drop_collections_two_phase_write_concern.js
index 27b133cb915..e7b60eb18fb 100644
--- a/jstests/replsets/drop_collections_two_phase_write_concern.js
+++ b/jstests/replsets/drop_collections_two_phase_write_concern.js
@@ -4,81 +4,87 @@
*/
(function() {
- 'use strict';
-
- load('jstests/libs/check_log.js');
- load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
-
- // Alias to logging function in two_phase_drops.js
- const testLog = TwoPhaseDropCollectionTest._testLog;
-
- /**
- * Ensures that the operation fails with a write concern timeout.
- */
- function assertTimeout(result) {
- assert.writeErrorWithCode(result, ErrorCodes.WriteConcernFailed);
- assert(result.hasWriteConcernError(), tojson(result));
- assert(result.getWriteConcernError().errInfo.wtimeout, tojson(result));
- }
-
- // Set up a two phase drop test.
- let testName = 'drop_collection_two_phase_write_concern';
- let dbName = testName;
- let collName = 'collToDrop';
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
-
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
-
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
-
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
-
- const primary = replTest.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const collForInserts = primaryDB.getCollection('collForInserts');
- const writeConcernForSuccessfulOp = {w: 'majority', wtimeout: replTest.kDefaultTimeoutMS};
- assert.writeOK(collForInserts.insert({_id: 0}, {writeConcern: writeConcernForSuccessfulOp}));
-
- // PREPARE collection drop.
- twoPhaseDropTest.prepareDropCollection(collName);
-
- const writeConcernForTimedOutOp = {w: 'majority', wtimeout: 10000};
- assertTimeout(collForInserts.insert({_id: 1}, {writeConcern: writeConcernForTimedOutOp}));
-
- // Prevent drop collection reaper from making progress after resuming oplog application.
- assert.commandWorked(primary.adminCommand(
- {configureFailPoint: 'dropPendingCollectionReaperHang', mode: 'alwaysOn'}));
-
- try {
- // Ensure that drop pending collection is not removed after resuming oplog application.
- testLog('Restarting oplog application on the secondary node.');
- twoPhaseDropTest.resumeOplogApplication(twoPhaseDropTest.replTest.getSecondary());
-
- // Ensure that we've hit the failpoint before moving on.
- checkLog.contains(primary, 'fail point dropPendingCollectionReaperHang enabled');
-
- // While the drop pending collection reaper is blocked, an operation waiting on a majority
- // write concern should time out.
- assertTimeout(collForInserts.insert({_id: 2}, {writeConcern: writeConcernForTimedOutOp}));
- } finally {
- assert.commandWorked(primary.adminCommand(
- {configureFailPoint: 'dropPendingCollectionReaperHang', mode: 'off'}));
- }
-
- // After the reaper is unblocked, an operation waiting on a majority write concern should run
- // complete successfully.
- assert.writeOK(collForInserts.insert({_id: 3}, {writeConcern: writeConcernForSuccessfulOp}));
- assert.eq(4, collForInserts.find().itcount());
-
- // COMMIT collection drop.
- twoPhaseDropTest.commitDropCollection(collName);
+'use strict';
+load('jstests/libs/check_log.js');
+load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
+
+// Alias to logging function in two_phase_drops.js
+const testLog = TwoPhaseDropCollectionTest._testLog;
+
+/**
+ * Ensures that the operation fails with a write concern timeout.
+ */
+function assertTimeout(result) {
+ assert.writeErrorWithCode(result, ErrorCodes.WriteConcernFailed);
+ assert(result.hasWriteConcernError(), tojson(result));
+ assert(result.getWriteConcernError().errInfo.wtimeout, tojson(result));
+}
+
+// Set up a two phase drop test.
+let testName = 'drop_collection_two_phase_write_concern';
+let dbName = testName;
+let collName = 'collToDrop';
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
+
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
twoPhaseDropTest.stop();
+ return;
+}
+
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
+
+const primary = replTest.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const collForInserts = primaryDB.getCollection('collForInserts');
+const writeConcernForSuccessfulOp = {
+ w: 'majority',
+ wtimeout: replTest.kDefaultTimeoutMS
+};
+assert.writeOK(collForInserts.insert({_id: 0}, {writeConcern: writeConcernForSuccessfulOp}));
+
+// PREPARE collection drop.
+twoPhaseDropTest.prepareDropCollection(collName);
+
+const writeConcernForTimedOutOp = {
+ w: 'majority',
+ wtimeout: 10000
+};
+assertTimeout(collForInserts.insert({_id: 1}, {writeConcern: writeConcernForTimedOutOp}));
+
+// Prevent drop collection reaper from making progress after resuming oplog application.
+assert.commandWorked(primary.adminCommand(
+ {configureFailPoint: 'dropPendingCollectionReaperHang', mode: 'alwaysOn'}));
+
+try {
+ // Ensure that drop pending collection is not removed after resuming oplog application.
+ testLog('Restarting oplog application on the secondary node.');
+ twoPhaseDropTest.resumeOplogApplication(twoPhaseDropTest.replTest.getSecondary());
+
+ // Ensure that we've hit the failpoint before moving on.
+ checkLog.contains(primary, 'fail point dropPendingCollectionReaperHang enabled');
+
+ // While the drop pending collection reaper is blocked, an operation waiting on a majority
+ // write concern should time out.
+ assertTimeout(collForInserts.insert({_id: 2}, {writeConcern: writeConcernForTimedOutOp}));
+} finally {
+ assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'dropPendingCollectionReaperHang', mode: 'off'}));
+}
+
+// After the reaper is unblocked, an operation waiting on a majority write concern should run
+// complete successfully.
+assert.writeOK(collForInserts.insert({_id: 3}, {writeConcern: writeConcernForSuccessfulOp}));
+assert.eq(4, collForInserts.find().itcount());
+
+// COMMIT collection drop.
+twoPhaseDropTest.commitDropCollection(collName);
+
+twoPhaseDropTest.stop();
}());
diff --git a/jstests/replsets/drop_databases_two_phase.js b/jstests/replsets/drop_databases_two_phase.js
index 065da9b66e9..5a00ebe2e9e 100644
--- a/jstests/replsets/drop_databases_two_phase.js
+++ b/jstests/replsets/drop_databases_two_phase.js
@@ -15,166 +15,164 @@
*/
(function() {
- "use strict";
-
- load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
- load('jstests/libs/check_log.js');
-
- // Returns a list of all collections in a given database. Use 'args' as the
- // 'listCollections' command arguments.
- function listCollections(database, args) {
- var args = args || {};
- var failMsg = "'listCollections' command failed";
- var res = assert.commandWorked(database.runCommand("listCollections", args), failMsg);
- return res.cursor.firstBatch;
- }
-
- // Returns a list of 'drop-pending' collections. The collection names should be of the
- // format "system.drop.<optime>.<collectionName>", where 'optime' is the optime of the
- // collection drop operation, encoded as a string, and 'collectionName' is the original
- // collection name.
- function listDropPendingCollections(database) {
- var pendingDropRegex = new RegExp("system\.drop\..*\." + collNameToDrop + "$");
- var collections = listCollections(database, {includePendingDrops: true});
- return collections.filter(c => pendingDropRegex.test(c.name));
- }
-
- // Returns a list of all collection names in a given database.
- function listCollectionNames(database, args) {
- return listCollections(database, args).map(c => c.name);
- }
-
- // Sets a fail point on a specified node.
- function setFailPoint(node, failpoint, mode) {
- assert.commandWorked(node.adminCommand({configureFailPoint: failpoint, mode: mode}));
- }
+"use strict";
+
+load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
+load('jstests/libs/check_log.js');
+
+// Returns a list of all collections in a given database. Use 'args' as the
+// 'listCollections' command arguments.
+function listCollections(database, args) {
+ var args = args || {};
+ var failMsg = "'listCollections' command failed";
+ var res = assert.commandWorked(database.runCommand("listCollections", args), failMsg);
+ return res.cursor.firstBatch;
+}
+
+// Returns a list of 'drop-pending' collections. The collection names should be of the
+// format "system.drop.<optime>.<collectionName>", where 'optime' is the optime of the
+// collection drop operation, encoded as a string, and 'collectionName' is the original
+// collection name.
+function listDropPendingCollections(database) {
+ var pendingDropRegex = new RegExp("system\.drop\..*\." + collNameToDrop + "$");
+ var collections = listCollections(database, {includePendingDrops: true});
+ return collections.filter(c => pendingDropRegex.test(c.name));
+}
+
+// Returns a list of all collection names in a given database.
+function listCollectionNames(database, args) {
+ return listCollections(database, args).map(c => c.name);
+}
+
+// Sets a fail point on a specified node.
+function setFailPoint(node, failpoint, mode) {
+ assert.commandWorked(node.adminCommand({configureFailPoint: failpoint, mode: mode}));
+}
+
+var dbNameToDrop = 'dbToDrop';
+var replTest = new ReplSetTest({nodes: [{}, {}, {arbiter: true}]});
+
+// Initiate the replica set.
+replTest.startSet();
+replTest.initiate();
+replTest.awaitReplication();
+
+var primary = replTest.getPrimary();
+var secondary = replTest.getSecondary();
+
+var dbToDrop = primary.getDB(dbNameToDrop);
+var collNameToDrop = "collectionToDrop";
+
+// Create the collection that will be dropped and let it replicate.
+var collToDrop = dbToDrop.getCollection(collNameToDrop);
+assert.writeOK(
+ collToDrop.insert({_id: 0}, {writeConcern: {w: 2, wtimeout: replTest.kDefaultTimeoutMS}}));
+assert.eq(1, collToDrop.find().itcount());
+
+// Pause application on secondary so that commit point doesn't advance, meaning that a dropped
+// database on the primary will remain in 'drop-pending' state.
+jsTestLog("Pausing oplog application on the secondary node.");
+setFailPoint(secondary, "rsSyncApplyStop", "alwaysOn");
+
+// Make sure the collection was created.
+assert.contains(collNameToDrop,
+ listCollectionNames(dbToDrop),
+ "Collection '" + collNameToDrop + "' wasn't created properly");
+/**
+ * DROP DATABASE 'Collections' PHASE
+ */
+
+// Drop the collection on the primary.
+var dropDatabaseFn = function() {
var dbNameToDrop = 'dbToDrop';
- var replTest = new ReplSetTest({nodes: [{}, {}, {arbiter: true}]});
-
- // Initiate the replica set.
- replTest.startSet();
- replTest.initiate();
- replTest.awaitReplication();
-
- var primary = replTest.getPrimary();
- var secondary = replTest.getSecondary();
-
- var dbToDrop = primary.getDB(dbNameToDrop);
- var collNameToDrop = "collectionToDrop";
-
- // Create the collection that will be dropped and let it replicate.
- var collToDrop = dbToDrop.getCollection(collNameToDrop);
- assert.writeOK(
- collToDrop.insert({_id: 0}, {writeConcern: {w: 2, wtimeout: replTest.kDefaultTimeoutMS}}));
- assert.eq(1, collToDrop.find().itcount());
-
- // Pause application on secondary so that commit point doesn't advance, meaning that a dropped
- // database on the primary will remain in 'drop-pending' state.
- jsTestLog("Pausing oplog application on the secondary node.");
- setFailPoint(secondary, "rsSyncApplyStop", "alwaysOn");
-
- // Make sure the collection was created.
- assert.contains(collNameToDrop,
- listCollectionNames(dbToDrop),
- "Collection '" + collNameToDrop + "' wasn't created properly");
-
- /**
- * DROP DATABASE 'Collections' PHASE
- */
-
- // Drop the collection on the primary.
- var dropDatabaseFn = function() {
- var dbNameToDrop = 'dbToDrop';
- var primary = db.getMongo();
- jsTestLog(
- 'Dropping database ' + dbNameToDrop + ' on primary node ' + primary.host +
- '. This command will block because oplog application is paused on the secondary.');
- var dbToDrop = db.getSiblingDB(dbNameToDrop);
- assert.commandWorked(dbToDrop.dropDatabase());
- jsTestLog('Database ' + dbNameToDrop + ' successfully dropped on primary node ' +
- primary.host);
- };
- var dropDatabaseProcess = startParallelShell(dropDatabaseFn, primary.port);
-
- // Check that primary has started two phase drop of the collection.
- jsTestLog('Waiting for primary ' + primary.host + ' to prepare two phase drop of collection ' +
+ var primary = db.getMongo();
+ jsTestLog('Dropping database ' + dbNameToDrop + ' on primary node ' + primary.host +
+ '. This command will block because oplog application is paused on the secondary.');
+ var dbToDrop = db.getSiblingDB(dbNameToDrop);
+ assert.commandWorked(dbToDrop.dropDatabase());
+ jsTestLog('Database ' + dbNameToDrop + ' successfully dropped on primary node ' + primary.host);
+};
+var dropDatabaseProcess = startParallelShell(dropDatabaseFn, primary.port);
+
+// Check that primary has started two phase drop of the collection.
+jsTestLog('Waiting for primary ' + primary.host + ' to prepare two phase drop of collection ' +
+ collToDrop.getFullName());
+assert.soonNoExcept(
+ function() {
+ return collToDrop.find().itcount() == 0;
+ },
+ 'Primary ' + primary.host + ' failed to prepare two phase drop of collection ' +
+ collToDrop.getFullName());
+
+// 'collToDrop' is no longer visible with its original name. If 'system.drop' two phase drops
+// are supported by the storage engine, check for the drop-pending namespace using
+// listCollections.
+const supportsDropPendingNamespaces =
+ TwoPhaseDropCollectionTest.supportsDropPendingNamespaces(replTest);
+if (supportsDropPendingNamespaces) {
+ var dropPendingCollections = listDropPendingCollections(dbToDrop);
+ assert.eq(1,
+ dropPendingCollections.length,
+ "Collection was not found in the 'system.drop' namespace. " +
+ "Full drop-pending collection list: " + tojson(dropPendingCollections));
+ jsTestLog('Primary ' + primary.host + ' successfully started two phase drop of collection ' +
collToDrop.getFullName());
- assert.soonNoExcept(
- function() {
- return collToDrop.find().itcount() == 0;
- },
- 'Primary ' + primary.host + ' failed to prepare two phase drop of collection ' +
- collToDrop.getFullName());
-
- // 'collToDrop' is no longer visible with its original name. If 'system.drop' two phase drops
- // are supported by the storage engine, check for the drop-pending namespace using
- // listCollections.
- const supportsDropPendingNamespaces =
- TwoPhaseDropCollectionTest.supportsDropPendingNamespaces(replTest);
- if (supportsDropPendingNamespaces) {
- var dropPendingCollections = listDropPendingCollections(dbToDrop);
- assert.eq(1,
- dropPendingCollections.length,
- "Collection was not found in the 'system.drop' namespace. " +
- "Full drop-pending collection list: " + tojson(dropPendingCollections));
- jsTestLog('Primary ' + primary.host +
- ' successfully started two phase drop of collection ' + collToDrop.getFullName());
- }
-
- // Commands that manipulate the database being dropped or perform destructive catalog operations
- // should fail with the DatabaseDropPending error code while the database is in a drop-pending
- // state.
+}
+
+// Commands that manipulate the database being dropped or perform destructive catalog operations
+// should fail with the DatabaseDropPending error code while the database is in a drop-pending
+// state.
+assert.commandFailedWithCode(
+ dbToDrop.createCollection('collectionToCreateWhileDroppingDatabase'),
+ ErrorCodes.DatabaseDropPending,
+ 'collection creation should fail while we are in the process of dropping the database');
+
+// restartCatalog can only detect that a database is in a drop-pending state when 'system.drop'
+// namespaces are supported. Since 4.2, dropped collections are managed internally by the
+// storage engine. See serverStatus().
+if (supportsDropPendingNamespaces) {
assert.commandFailedWithCode(
- dbToDrop.createCollection('collectionToCreateWhileDroppingDatabase'),
+ dbToDrop.adminCommand('restartCatalog'),
ErrorCodes.DatabaseDropPending,
- 'collection creation should fail while we are in the process of dropping the database');
-
- // restartCatalog can only detect that a database is in a drop-pending state when 'system.drop'
- // namespaces are supported. Since 4.2, dropped collections are managed internally by the
- // storage engine. See serverStatus().
- if (supportsDropPendingNamespaces) {
- assert.commandFailedWithCode(
- dbToDrop.adminCommand('restartCatalog'),
- ErrorCodes.DatabaseDropPending,
- 'restartCatalog should fail if any databases are marked drop-pending');
- } else {
- // Drop-pending idents are known only to the storage engine and will be ignored by
- // restartCatalog.
- assert.commandWorked(dbToDrop.adminCommand('restartCatalog'));
- }
-
- /**
- * DROP DATABASE 'Database' PHASE
- */
-
- // Let the secondary apply the collection drop operation, so that the replica set commit point
- // will advance, and the 'Database' phase of the database drop will complete on the primary.
- jsTestLog("Restarting oplog application on the secondary node.");
- setFailPoint(secondary, "rsSyncApplyStop", "off");
-
- jsTestLog("Waiting for collection drop operation to replicate to all nodes.");
- replTest.awaitReplication();
-
- // Make sure the collection has been fully dropped. It should not appear as
- // a normal collection or under the 'system.drop' namespace any longer. Physical collection
- // drops may happen asynchronously, any time after the drop operation is committed, so we wait
- // to make sure the collection is eventually dropped.
- assert.soonNoExcept(function() {
- var dropPendingCollections = listDropPendingCollections(dbToDrop);
- jsTestLog('Drop pending collections: ' + tojson(dropPendingCollections));
- return dropPendingCollections.length == 0;
- });
-
- jsTestLog('Waiting for dropDatabase command on ' + primary.host + ' to complete.');
- var exitCode = dropDatabaseProcess();
-
- let db = primary.getDB(dbNameToDrop);
- checkLog.contains(db.getMongo(), "dropping collection: " + dbNameToDrop + "." + collNameToDrop);
- checkLog.contains(db.getMongo(), "dropped 1 collection(s)");
-
- assert.eq(0, exitCode, 'dropDatabase command on ' + primary.host + ' failed.');
- jsTestLog('Completed dropDatabase command on ' + primary.host);
-
- replTest.stopSet();
+ 'restartCatalog should fail if any databases are marked drop-pending');
+} else {
+ // Drop-pending idents are known only to the storage engine and will be ignored by
+ // restartCatalog.
+ assert.commandWorked(dbToDrop.adminCommand('restartCatalog'));
+}
+
+/**
+ * DROP DATABASE 'Database' PHASE
+ */
+
+// Let the secondary apply the collection drop operation, so that the replica set commit point
+// will advance, and the 'Database' phase of the database drop will complete on the primary.
+jsTestLog("Restarting oplog application on the secondary node.");
+setFailPoint(secondary, "rsSyncApplyStop", "off");
+
+jsTestLog("Waiting for collection drop operation to replicate to all nodes.");
+replTest.awaitReplication();
+
+// Make sure the collection has been fully dropped. It should not appear as
+// a normal collection or under the 'system.drop' namespace any longer. Physical collection
+// drops may happen asynchronously, any time after the drop operation is committed, so we wait
+// to make sure the collection is eventually dropped.
+assert.soonNoExcept(function() {
+ var dropPendingCollections = listDropPendingCollections(dbToDrop);
+ jsTestLog('Drop pending collections: ' + tojson(dropPendingCollections));
+ return dropPendingCollections.length == 0;
+});
+
+jsTestLog('Waiting for dropDatabase command on ' + primary.host + ' to complete.');
+var exitCode = dropDatabaseProcess();
+
+let db = primary.getDB(dbNameToDrop);
+checkLog.contains(db.getMongo(), "dropping collection: " + dbNameToDrop + "." + collNameToDrop);
+checkLog.contains(db.getMongo(), "dropped 1 collection(s)");
+
+assert.eq(0, exitCode, 'dropDatabase command on ' + primary.host + ' failed.');
+jsTestLog('Completed dropDatabase command on ' + primary.host);
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/drop_db.js b/jstests/replsets/drop_db.js
index 69f34eba059..49ee3e04406 100644
--- a/jstests/replsets/drop_db.js
+++ b/jstests/replsets/drop_db.js
@@ -4,57 +4,56 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- function checkWriteConcern(testFn, checkFn) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+function checkWriteConcern(testFn, checkFn) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- const sentinel = {};
- let cmdObjSeen = sentinel;
+ const sentinel = {};
+ let cmdObjSeen = sentinel;
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- cmdObjSeen = cmdObj;
- return mongoRunCommandOriginal.apply(this, arguments);
- };
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ cmdObjSeen = cmdObj;
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
- try {
- assert.doesNotThrow(testFn);
- } finally {
- Mongo.prototype.runCommand = mongoRunCommandOriginal;
- }
-
- if (cmdObjSeen == sentinel) {
- throw new Error("Mongo.prototype.runCommand() was never called: " + testFn.toString());
- }
+ try {
+ assert.doesNotThrow(testFn);
+ } finally {
+ Mongo.prototype.runCommand = mongoRunCommandOriginal;
+ }
- checkFn(cmdObjSeen);
+ if (cmdObjSeen == sentinel) {
+ throw new Error("Mongo.prototype.runCommand() was never called: " + testFn.toString());
}
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
-
- const dbName = "dbDrop";
- const collName = "coll";
- const primaryDB = rst.getPrimary().getDB(dbName);
-
- primaryDB.createCollection(collName);
- checkWriteConcern(() => assert.commandWorked(primaryDB.dropDatabase({w: "majority"})),
- (cmdObj) => {
- assert.eq(cmdObj.writeConcern, {w: "majority"});
- });
-
- primaryDB.createCollection(collName);
- checkWriteConcern(() => assert.commandWorked(primaryDB.dropDatabase({w: 1})), (cmdObj) => {
- assert.eq(cmdObj.writeConcern, {w: 1});
- });
-
- primaryDB.createCollection(collName);
- checkWriteConcern(() => assert.commandFailedWithCode(primaryDB.dropDatabase({w: 100000}),
- ErrorCodes.UnsatisfiableWriteConcern),
- (cmdObj) => {
- assert.eq(cmdObj.writeConcern, {w: 100000});
- });
-
- rst.stopSet();
+ checkFn(cmdObjSeen);
+}
+
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+
+const dbName = "dbDrop";
+const collName = "coll";
+const primaryDB = rst.getPrimary().getDB(dbName);
+
+primaryDB.createCollection(collName);
+checkWriteConcern(() => assert.commandWorked(primaryDB.dropDatabase({w: "majority"})), (cmdObj) => {
+ assert.eq(cmdObj.writeConcern, {w: "majority"});
+});
+
+primaryDB.createCollection(collName);
+checkWriteConcern(() => assert.commandWorked(primaryDB.dropDatabase({w: 1})), (cmdObj) => {
+ assert.eq(cmdObj.writeConcern, {w: 1});
+});
+
+primaryDB.createCollection(collName);
+checkWriteConcern(() => assert.commandFailedWithCode(primaryDB.dropDatabase({w: 100000}),
+ ErrorCodes.UnsatisfiableWriteConcern),
+ (cmdObj) => {
+ assert.eq(cmdObj.writeConcern, {w: 100000});
+ });
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/drop_oplog.js b/jstests/replsets/drop_oplog.js
index 2ba8dc44c72..a53da5ae483 100644
--- a/jstests/replsets/drop_oplog.js
+++ b/jstests/replsets/drop_oplog.js
@@ -2,35 +2,35 @@
// prohibited in a replset.
(function() {
- "use strict";
- let rt = new ReplSetTest({name: "drop_oplog", nodes: 1, oplogSize: 30});
+"use strict";
+let rt = new ReplSetTest({name: "drop_oplog", nodes: 1, oplogSize: 30});
- let nodes = rt.startSet();
- rt.initiate();
- let master = rt.getPrimary();
- let localDB = master.getDB('local');
+let nodes = rt.startSet();
+rt.initiate();
+let master = rt.getPrimary();
+let localDB = master.getDB('local');
- let threw = false;
+let threw = false;
- let ret = assert.commandFailed(localDB.runCommand({drop: 'oplog.rs'}));
- assert.eq('can\'t drop live oplog while replicating', ret.errmsg);
+let ret = assert.commandFailed(localDB.runCommand({drop: 'oplog.rs'}));
+assert.eq('can\'t drop live oplog while replicating', ret.errmsg);
- let dropOutput = localDB.dropDatabase();
- assert.eq(dropOutput.ok, 0);
- assert.eq(dropOutput.errmsg, "Cannot drop 'local' database while replication is active");
+let dropOutput = localDB.dropDatabase();
+assert.eq(dropOutput.ok, 0);
+assert.eq(dropOutput.errmsg, "Cannot drop 'local' database while replication is active");
- let adminDB = master.getDB('admin');
- dropOutput = adminDB.dropDatabase();
- assert.eq(dropOutput.ok, 0);
- assert.eq(dropOutput.errmsg, "Dropping the 'admin' database is prohibited.");
+let adminDB = master.getDB('admin');
+dropOutput = adminDB.dropDatabase();
+assert.eq(dropOutput.ok, 0);
+assert.eq(dropOutput.errmsg, "Dropping the 'admin' database is prohibited.");
- let renameOutput = localDB.oplog.rs.renameCollection("poison");
- assert.eq(renameOutput.ok, 0);
- assert.eq(renameOutput.errmsg, "can't rename live oplog while replicating");
+let renameOutput = localDB.oplog.rs.renameCollection("poison");
+assert.eq(renameOutput.ok, 0);
+assert.eq(renameOutput.errmsg, "can't rename live oplog while replicating");
- assert.writeOK(localDB.foo.insert({a: 1}));
- renameOutput = localDB.foo.renameCollection("oplog.rs");
- assert.eq(renameOutput.ok, 0);
- assert.eq(renameOutput.errmsg, "can't rename to live oplog while replicating");
- rt.stopSet();
+assert.writeOK(localDB.foo.insert({a: 1}));
+renameOutput = localDB.foo.renameCollection("oplog.rs");
+assert.eq(renameOutput.ok, 0);
+assert.eq(renameOutput.errmsg, "can't rename to live oplog while replicating");
+rt.stopSet();
}());
diff --git a/jstests/replsets/election_handoff_basic.js b/jstests/replsets/election_handoff_basic.js
index c11a60612a2..2c1e27b6ece 100644
--- a/jstests/replsets/election_handoff_basic.js
+++ b/jstests/replsets/election_handoff_basic.js
@@ -5,22 +5,24 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/election_handoff.js");
+"use strict";
+load("jstests/replsets/libs/election_handoff.js");
- const testName = "election_handoff_vanilla";
- const numNodes = 2;
- const rst = ReplSetTest({name: testName, nodes: numNodes});
- const nodes = rst.nodeList();
- rst.startSet();
+const testName = "election_handoff_vanilla";
+const numNodes = 2;
+const rst = ReplSetTest({name: testName, nodes: numNodes});
+const nodes = rst.nodeList();
+rst.startSet();
- // Make sure there are no election timeouts firing for the duration of the test. This helps
- // ensure that the test will only pass if the election handoff succeeds.
- const config = rst.getReplSetConfig();
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
+// Make sure there are no election timeouts firing for the duration of the test. This helps
+// ensure that the test will only pass if the election handoff succeeds.
+const config = rst.getReplSetConfig();
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- ElectionHandoffTest.testElectionHandoff(rst, 0, 1);
+ElectionHandoffTest.testElectionHandoff(rst, 0, 1);
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/election_handoff_flip.js b/jstests/replsets/election_handoff_flip.js
index 6e6c6f7bd66..c2576023048 100644
--- a/jstests/replsets/election_handoff_flip.js
+++ b/jstests/replsets/election_handoff_flip.js
@@ -4,24 +4,26 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/election_handoff.js");
+"use strict";
+load("jstests/replsets/libs/election_handoff.js");
- const testName = "election_handoff_flip";
- const numNodes = 2;
- const rst = ReplSetTest({name: testName, nodes: numNodes});
- const nodes = rst.nodeList();
- rst.startSet();
+const testName = "election_handoff_flip";
+const numNodes = 2;
+const rst = ReplSetTest({name: testName, nodes: numNodes});
+const nodes = rst.nodeList();
+rst.startSet();
- // Make sure there are no election timeouts firing for the duration of the test. This helps
- // ensure that the test will only pass if the election handoff succeeds.
- const config = rst.getReplSetConfig();
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
+// Make sure there are no election timeouts firing for the duration of the test. This helps
+// ensure that the test will only pass if the election handoff succeeds.
+const config = rst.getReplSetConfig();
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- ElectionHandoffTest.testElectionHandoff(rst, 0, 1);
- sleep(ElectionHandoffTest.stepDownPeriodSecs * 1000);
- ElectionHandoffTest.testElectionHandoff(rst, 1, 0);
+ElectionHandoffTest.testElectionHandoff(rst, 0, 1);
+sleep(ElectionHandoffTest.stepDownPeriodSecs * 1000);
+ElectionHandoffTest.testElectionHandoff(rst, 1, 0);
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/election_handoff_higher_priority.js b/jstests/replsets/election_handoff_higher_priority.js
index 78a866a1201..12ac4914a40 100644
--- a/jstests/replsets/election_handoff_higher_priority.js
+++ b/jstests/replsets/election_handoff_higher_priority.js
@@ -6,26 +6,28 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/election_handoff.js");
+"use strict";
+load("jstests/replsets/libs/election_handoff.js");
- const testName = "election_handoff_higher_priority";
- const numNodes = 3;
- const rst = ReplSetTest({name: testName, nodes: numNodes});
- const nodes = rst.nodeList();
- rst.startSet();
+const testName = "election_handoff_higher_priority";
+const numNodes = 3;
+const rst = ReplSetTest({name: testName, nodes: numNodes});
+const nodes = rst.nodeList();
+rst.startSet();
- const config = rst.getReplSetConfig();
- config.members[0].priority = 3;
- config.members[1].priority = 1;
- config.members[2].priority = 2;
+const config = rst.getReplSetConfig();
+config.members[0].priority = 3;
+config.members[1].priority = 1;
+config.members[2].priority = 2;
- // Make sure there are no election timeouts firing for the duration of the test. This helps
- // ensure that the test will only pass if the election handoff succeeds.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
+// Make sure there are no election timeouts firing for the duration of the test. This helps
+// ensure that the test will only pass if the election handoff succeeds.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- ElectionHandoffTest.testElectionHandoff(rst, 0, 2);
+ElectionHandoffTest.testElectionHandoff(rst, 0, 2);
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/election_handoff_one_unelectable.js b/jstests/replsets/election_handoff_one_unelectable.js
index 97546cbb1ea..970b605197c 100644
--- a/jstests/replsets/election_handoff_one_unelectable.js
+++ b/jstests/replsets/election_handoff_one_unelectable.js
@@ -6,24 +6,26 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/election_handoff.js");
+"use strict";
+load("jstests/replsets/libs/election_handoff.js");
- const testName = "election_handoff_one_unelectable";
- const numNodes = 3;
- const rst = ReplSetTest({name: testName, nodes: numNodes});
- const nodes = rst.nodeList();
- rst.startSet();
+const testName = "election_handoff_one_unelectable";
+const numNodes = 3;
+const rst = ReplSetTest({name: testName, nodes: numNodes});
+const nodes = rst.nodeList();
+rst.startSet();
- const config = rst.getReplSetConfig();
- config.members[1].priority = 0;
+const config = rst.getReplSetConfig();
+config.members[1].priority = 0;
- // Make sure there are no election timeouts firing for the duration of the test. This helps
- // ensure that the test will only pass if the election handoff succeeds.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
+// Make sure there are no election timeouts firing for the duration of the test. This helps
+// ensure that the test will only pass if the election handoff succeeds.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- ElectionHandoffTest.testElectionHandoff(rst, 0, 2);
+ElectionHandoffTest.testElectionHandoff(rst, 0, 2);
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/election_handoff_via_signal.js b/jstests/replsets/election_handoff_via_signal.js
index 4bc58c95d40..bca8d4b4991 100644
--- a/jstests/replsets/election_handoff_via_signal.js
+++ b/jstests/replsets/election_handoff_via_signal.js
@@ -4,22 +4,24 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/election_handoff.js");
+"use strict";
+load("jstests/replsets/libs/election_handoff.js");
- const testName = "election_handoff_via_signal";
- const numNodes = 3;
- const rst = ReplSetTest({name: testName, nodes: numNodes});
- const nodes = rst.nodeList();
- rst.startSet();
+const testName = "election_handoff_via_signal";
+const numNodes = 3;
+const rst = ReplSetTest({name: testName, nodes: numNodes});
+const nodes = rst.nodeList();
+rst.startSet();
- // Make sure there are no election timeouts firing for the duration of the test. This helps
- // ensure that the test will only pass if the election handoff succeeds.
- const config = rst.getReplSetConfig();
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
+// Make sure there are no election timeouts firing for the duration of the test. This helps
+// ensure that the test will only pass if the election handoff succeeds.
+const config = rst.getReplSetConfig();
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- ElectionHandoffTest.testElectionHandoff(rst, 0, 1, {stepDownBySignal: true});
+ElectionHandoffTest.testElectionHandoff(rst, 0, 1, {stepDownBySignal: true});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/emptycapped.js b/jstests/replsets/emptycapped.js
index b3aa6093be2..e15322935eb 100644
--- a/jstests/replsets/emptycapped.js
+++ b/jstests/replsets/emptycapped.js
@@ -1,98 +1,96 @@
// This tests the emptycapped command in a replica set.
(function() {
- "use strict";
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
-
- var primaryTestDB = rst.getPrimary().getDB('test');
- var primaryLocalDB = rst.getPrimary().getDB('local');
- var primaryAdminDB = rst.getPrimary().getDB('admin');
- var secondaryTestDB = rst.getSecondary().getDB('test');
-
- // Truncate a non-capped collection.
- assert.writeOK(primaryTestDB.noncapped.insert({x: 1}));
- assert.commandWorked(primaryTestDB.runCommand({emptycapped: 'noncapped'}));
- assert.eq(primaryTestDB.noncapped.find().itcount(),
- 0,
- "Expected 0 documents to exist after emptying the collection");
-
- // Truncate a non-existent collection on a non-existent database.
- assert.commandWorked(rst.getPrimary().getDB('nonexistent').dropDatabase());
- assert.commandFailedWithCode(
- rst.getPrimary().getDB('nonexistent').runCommand({emptycapped: 'nonexistent'}),
- ErrorCodes.NamespaceNotFound);
-
- // Truncate a non-existent collection.
- primaryTestDB.nonexistent.drop();
- assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: 'nonexistent'}),
- ErrorCodes.NamespaceNotFound);
-
- // Truncate a capped collection.
- assert.commandWorked(primaryTestDB.createCollection("capped", {capped: true, size: 4096}));
- assert.writeOK(primaryTestDB.capped.insert({}));
- assert.eq(
- primaryTestDB.capped.find().itcount(), 1, "Expected 1 document to exist after an insert");
- assert.commandWorked(primaryTestDB.runCommand({emptycapped: 'capped'}));
- assert.eq(primaryTestDB.capped.find().itcount(),
- 0,
- "Expected 0 documents to exist after emptying the collection");
-
- // Truncate a capped collection on a secondary.
- assert.commandFailedWithCode(secondaryTestDB.runCommand({emptycapped: 'capped'}),
- ErrorCodes.NotMaster);
-
- // Truncate the oplog.
- assert.commandFailedWithCode(primaryLocalDB.runCommand({emptycapped: "oplog.rs"}),
- ErrorCodes.OplogOperationUnsupported);
-
- // Test system collections, which cannot be truncated except system.profile.
-
- // Truncate the local system.js collection.
- assert.writeOK(primaryTestDB.system.js.insert({_id: "mystring", value: "var root = this;"}));
- assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: "system.js"}),
- ErrorCodes.IllegalOperation);
-
- // Truncate the system.profile collection.
- assert.commandWorked(
- primaryTestDB.createCollection("system.profile", {capped: true, size: 4096}));
- assert.commandWorked(primaryTestDB.runCommand({profile: 2}));
- assert.commandWorked(primaryTestDB.runCommand({emptycapped: "system.profile"}));
- assert.commandWorked(primaryTestDB.runCommand({profile: 0}));
- assert(primaryTestDB.system.profile.drop(), "Failed to drop the system.profile collection");
-
- // Truncate the local system.replset collection.
- assert.commandFailedWithCode(primaryLocalDB.runCommand({emptycapped: "system.replset"}),
- ErrorCodes.IllegalOperation);
-
- // Test user & role management system collections.
- assert.commandWorked(primaryAdminDB.runCommand({
- createRole: "all1",
- privileges: [{resource: {db: "", collection: ""}, actions: ["anyAction"]}],
- roles: []
- }));
- assert.commandWorked(primaryAdminDB.runCommand(
- {createUser: "root2", pwd: "pwd", roles: [{role: "root", db: "admin"}]}));
-
- // TODO: Test system.backup_users & system.new_users.
-
- // Truncate the admin system.roles collection.
- assert.commandFailedWithCode(primaryAdminDB.runCommand({emptycapped: "system.roles"}),
- ErrorCodes.IllegalOperation);
-
- // Truncate the admin system.users collection.
- assert.commandFailedWithCode(primaryAdminDB.runCommand({emptycapped: "system.users"}),
- ErrorCodes.IllegalOperation);
-
- // Truncate the admin system.version collection.
- assert.commandFailedWithCode(primaryAdminDB.runCommand({emptycapped: "system.version"}),
- ErrorCodes.IllegalOperation);
-
- // Truncate the local system.views collection.
- assert.commandWorked(primaryTestDB.runCommand(
- {create: "view1", viewOn: "collection", pipeline: [{$match: {}}]}));
- assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: "system.views"}),
- ErrorCodes.IllegalOperation);
- rst.stopSet();
+"use strict";
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+
+var primaryTestDB = rst.getPrimary().getDB('test');
+var primaryLocalDB = rst.getPrimary().getDB('local');
+var primaryAdminDB = rst.getPrimary().getDB('admin');
+var secondaryTestDB = rst.getSecondary().getDB('test');
+
+// Truncate a non-capped collection.
+assert.writeOK(primaryTestDB.noncapped.insert({x: 1}));
+assert.commandWorked(primaryTestDB.runCommand({emptycapped: 'noncapped'}));
+assert.eq(primaryTestDB.noncapped.find().itcount(),
+ 0,
+ "Expected 0 documents to exist after emptying the collection");
+
+// Truncate a non-existent collection on a non-existent database.
+assert.commandWorked(rst.getPrimary().getDB('nonexistent').dropDatabase());
+assert.commandFailedWithCode(
+ rst.getPrimary().getDB('nonexistent').runCommand({emptycapped: 'nonexistent'}),
+ ErrorCodes.NamespaceNotFound);
+
+// Truncate a non-existent collection.
+primaryTestDB.nonexistent.drop();
+assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: 'nonexistent'}),
+ ErrorCodes.NamespaceNotFound);
+
+// Truncate a capped collection.
+assert.commandWorked(primaryTestDB.createCollection("capped", {capped: true, size: 4096}));
+assert.writeOK(primaryTestDB.capped.insert({}));
+assert.eq(primaryTestDB.capped.find().itcount(), 1, "Expected 1 document to exist after an insert");
+assert.commandWorked(primaryTestDB.runCommand({emptycapped: 'capped'}));
+assert.eq(primaryTestDB.capped.find().itcount(),
+ 0,
+ "Expected 0 documents to exist after emptying the collection");
+
+// Truncate a capped collection on a secondary.
+assert.commandFailedWithCode(secondaryTestDB.runCommand({emptycapped: 'capped'}),
+ ErrorCodes.NotMaster);
+
+// Truncate the oplog.
+assert.commandFailedWithCode(primaryLocalDB.runCommand({emptycapped: "oplog.rs"}),
+ ErrorCodes.OplogOperationUnsupported);
+
+// Test system collections, which cannot be truncated except system.profile.
+
+// Truncate the local system.js collection.
+assert.writeOK(primaryTestDB.system.js.insert({_id: "mystring", value: "var root = this;"}));
+assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: "system.js"}),
+ ErrorCodes.IllegalOperation);
+
+// Truncate the system.profile collection.
+assert.commandWorked(primaryTestDB.createCollection("system.profile", {capped: true, size: 4096}));
+assert.commandWorked(primaryTestDB.runCommand({profile: 2}));
+assert.commandWorked(primaryTestDB.runCommand({emptycapped: "system.profile"}));
+assert.commandWorked(primaryTestDB.runCommand({profile: 0}));
+assert(primaryTestDB.system.profile.drop(), "Failed to drop the system.profile collection");
+
+// Truncate the local system.replset collection.
+assert.commandFailedWithCode(primaryLocalDB.runCommand({emptycapped: "system.replset"}),
+ ErrorCodes.IllegalOperation);
+
+// Test user & role management system collections.
+assert.commandWorked(primaryAdminDB.runCommand({
+ createRole: "all1",
+ privileges: [{resource: {db: "", collection: ""}, actions: ["anyAction"]}],
+ roles: []
+}));
+assert.commandWorked(primaryAdminDB.runCommand(
+ {createUser: "root2", pwd: "pwd", roles: [{role: "root", db: "admin"}]}));
+
+// TODO: Test system.backup_users & system.new_users.
+
+// Truncate the admin system.roles collection.
+assert.commandFailedWithCode(primaryAdminDB.runCommand({emptycapped: "system.roles"}),
+ ErrorCodes.IllegalOperation);
+
+// Truncate the admin system.users collection.
+assert.commandFailedWithCode(primaryAdminDB.runCommand({emptycapped: "system.users"}),
+ ErrorCodes.IllegalOperation);
+
+// Truncate the admin system.version collection.
+assert.commandFailedWithCode(primaryAdminDB.runCommand({emptycapped: "system.version"}),
+ ErrorCodes.IllegalOperation);
+
+// Truncate the local system.views collection.
+assert.commandWorked(
+ primaryTestDB.runCommand({create: "view1", viewOn: "collection", pipeline: [{$match: {}}]}));
+assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: "system.views"}),
+ ErrorCodes.IllegalOperation);
+rst.stopSet();
})();
diff --git a/jstests/replsets/failcommand_ignores_internal.js b/jstests/replsets/failcommand_ignores_internal.js
index 1d2a4f17e93..a1d6f2c82cd 100644
--- a/jstests/replsets/failcommand_ignores_internal.js
+++ b/jstests/replsets/failcommand_ignores_internal.js
@@ -1,35 +1,39 @@
// Tests that the "failCommand" failpoint ignores commands from internal clients: SERVER-34943.
// @tags: [requires_replication]
(function() {
- "use strict";
+"use strict";
- // Prevent elections.
- const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {votes: 0, priority: 0}}]});
- replTest.startSet();
- replTest.initiate();
- const primary = replTest.getPrimary();
- const testDB = primary.getDB("test_failcommand_ignores_internal");
+// Prevent elections.
+const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {votes: 0, priority: 0}}]});
+replTest.startSet();
+replTest.initiate();
+const primary = replTest.getPrimary();
+const testDB = primary.getDB("test_failcommand_ignores_internal");
- // Enough documents for three getMores.
- assert.commandWorked(testDB.collection.insertMany([{}, {}, {}]));
- const findReply = assert.commandWorked(testDB.runCommand({find: "collection", batchSize: 0}));
- const cursorId = findReply.cursor.id;
+// Enough documents for three getMores.
+assert.commandWorked(testDB.collection.insertMany([{}, {}, {}]));
+const findReply = assert.commandWorked(testDB.runCommand({find: "collection", batchSize: 0}));
+const cursorId = findReply.cursor.id;
- // Test failing twice with a particular error code.
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 2},
- data: {errorCode: ErrorCodes.BadValue, failCommands: ["getMore"]}
- }));
- const getMore = {getMore: cursorId, collection: "collection", batchSize: 1};
- assert.commandFailedWithCode(testDB.runCommand(getMore), ErrorCodes.BadValue);
+// Test failing twice with a particular error code.
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 2},
+ data: {errorCode: ErrorCodes.BadValue, failCommands: ["getMore"]}
+}));
+const getMore = {
+ getMore: cursorId,
+ collection: "collection",
+ batchSize: 1
+};
+assert.commandFailedWithCode(testDB.runCommand(getMore), ErrorCodes.BadValue);
- // Waits for secondaries to do getMores on the oplog, which should be ignored by failCommand.
- assert.commandWorked(testDB.collection.insertOne({}, {writeConcern: {w: 2}}));
+// Waits for secondaries to do getMores on the oplog, which should be ignored by failCommand.
+assert.commandWorked(testDB.collection.insertOne({}, {writeConcern: {w: 2}}));
- // Second getMore fails but third succeeds, because configureFailPoint was passed {times: 2}.
- assert.commandFailedWithCode(testDB.runCommand(getMore), ErrorCodes.BadValue);
- assert.commandWorked(testDB.runCommand(getMore));
+// Second getMore fails but third succeeds, because configureFailPoint was passed {times: 2}.
+assert.commandFailedWithCode(testDB.runCommand(getMore), ErrorCodes.BadValue);
+assert.commandWorked(testDB.runCommand(getMore));
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/find_and_modify_wc.js b/jstests/replsets/find_and_modify_wc.js
index c8055d97ef5..236ddad1afb 100644
--- a/jstests/replsets/find_and_modify_wc.js
+++ b/jstests/replsets/find_and_modify_wc.js
@@ -2,78 +2,76 @@
// Tests writeConcerns with findAndModify command
//
(function() {
- 'use strict';
+'use strict';
- // Skip this test if running with the "wiredTiger" storage engine, since it requires
- // using 'nojournal' in a replica set, which is not supported when using WT.
- if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
- // WT is currently the default engine so it is used when 'storageEngine' is not set.
- jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
- return;
- }
+// Skip this test if running with the "wiredTiger" storage engine, since it requires
+// using 'nojournal' in a replica set, which is not supported when using WT.
+if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
+ // WT is currently the default engine so it is used when 'storageEngine' is not set.
+ jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
+ return;
+}
- var nodeCount = 3;
- var rst = new ReplSetTest({nodes: nodeCount});
- rst.startSet({nojournal: ""});
- rst.initiate();
+var nodeCount = 3;
+var rst = new ReplSetTest({nodes: nodeCount});
+rst.startSet({nojournal: ""});
+rst.initiate();
- var primary = rst.getPrimary();
- var coll = primary.getCollection("test.find_and_modify_wc");
- coll.remove({});
+var primary = rst.getPrimary();
+var coll = primary.getCollection("test.find_and_modify_wc");
+coll.remove({});
- // insert some documents
- var docs = [];
- for (var i = 1; i <= 5; ++i) {
- docs.push({i: i, j: 2 * i});
- }
- var res =
- coll.runCommand({insert: coll.getName(), documents: docs, writeConcern: {w: nodeCount}});
- assert(res.ok);
- assert.eq(5, coll.find().itcount());
+// insert some documents
+var docs = [];
+for (var i = 1; i <= 5; ++i) {
+ docs.push({i: i, j: 2 * i});
+}
+var res = coll.runCommand({insert: coll.getName(), documents: docs, writeConcern: {w: nodeCount}});
+assert(res.ok);
+assert.eq(5, coll.find().itcount());
- // use for updates in subsequent runCommand calls
- var reqUpdate = {
- findAndModify: coll.getName(),
- query: {i: 3},
- update: {$inc: {j: 1}},
- writeConcern: {w: 'majority'}
- };
+// use for updates in subsequent runCommand calls
+var reqUpdate = {
+ findAndModify: coll.getName(),
+ query: {i: 3},
+ update: {$inc: {j: 1}},
+ writeConcern: {w: 'majority'}
+};
- // Verify findAndModify returns old document new: false
- var res = coll.runCommand(reqUpdate);
- assert(res.ok);
- assert(res.value);
- // (2 * res.value.i) == 6 == res.value.j (old document)
- assert.eq(2 * res.value.i, res.value.j);
- assert(!res.writeConcernError);
+// Verify findAndModify returns old document new: false
+var res = coll.runCommand(reqUpdate);
+assert(res.ok);
+assert(res.value);
+// (2 * res.value.i) == 6 == res.value.j (old document)
+assert.eq(2 * res.value.i, res.value.j);
+assert(!res.writeConcernError);
- // Verify findAndModify returns new document with new: true
- reqUpdate.new = true;
- res = coll.runCommand(reqUpdate);
- assert(res.ok);
- assert(res.value);
- // (2 * res.value.i + 2) == 8 == res.value.j (new document after two updates)
- assert.eq(2 * res.value.i + 2, res.value.j);
- assert(!res.writeConcernError);
-
- // Verify findAndModify remove works
- res = coll.runCommand(
- {findAndModify: coll.getName(), sort: {i: 1}, remove: true, writeConcern: {w: nodeCount}});
- assert.eq(res.value.i, 1);
- assert.eq(coll.find().itcount(), 4);
- assert(!res.writeConcernError);
+// Verify findAndModify returns new document with new: true
+reqUpdate.new = true;
+res = coll.runCommand(reqUpdate);
+assert(res.ok);
+assert(res.value);
+// (2 * res.value.i + 2) == 8 == res.value.j (new document after two updates)
+assert.eq(2 * res.value.i + 2, res.value.j);
+assert(!res.writeConcernError);
- // Verify findAndModify returns writeConcernError
- // when given invalid writeConcerns
- [{w: 'invalid'}, {w: nodeCount + 1}].forEach(function(wc) {
- reqUpdate.writeConcern = wc;
- res = coll.runCommand(reqUpdate);
+// Verify findAndModify remove works
+res = coll.runCommand(
+ {findAndModify: coll.getName(), sort: {i: 1}, remove: true, writeConcern: {w: nodeCount}});
+assert.eq(res.value.i, 1);
+assert.eq(coll.find().itcount(), 4);
+assert(!res.writeConcernError);
- assert(res.writeConcernError);
- assert(res.writeConcernError.code);
- assert(res.writeConcernError.errmsg);
- });
+// Verify findAndModify returns writeConcernError
+// when given invalid writeConcerns
+[{w: 'invalid'}, {w: nodeCount + 1}].forEach(function(wc) {
+ reqUpdate.writeConcern = wc;
+ res = coll.runCommand(reqUpdate);
- rst.stopSet();
+ assert(res.writeConcernError);
+ assert(res.writeConcernError.code);
+ assert(res.writeConcernError.errmsg);
+});
+rst.stopSet();
})();
diff --git a/jstests/replsets/force_sync_source_candidate.js b/jstests/replsets/force_sync_source_candidate.js
index 4be7b3bb668..c359c4ac668 100644
--- a/jstests/replsets/force_sync_source_candidate.js
+++ b/jstests/replsets/force_sync_source_candidate.js
@@ -5,36 +5,35 @@
*/
(function() {
- "use strict";
-
- const failpointName = "forceSyncSourceCandidate";
-
- const rst = new ReplSetTest({
- nodes:
- [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- // Allow many initial sync attempts. Initial sync may fail if the sync source does not have
- // an oplog yet because it has not conducted its own initial sync yet.
- // We turn on the noop writer to encourage successful sync source selection.
- nodeOptions: {setParameter: {numInitialSyncAttempts: 100, writePeriodicNoops: true}}
- });
- const nodes = rst.startSet();
-
- function setFailPoint(node, syncSource) {
- const dataObj = {hostAndPort: syncSource.host};
- assert.commandWorked(node.adminCommand(
- {configureFailPoint: failpointName, mode: "alwaysOn", data: dataObj}));
- }
-
- setFailPoint(nodes[1], nodes[0]);
- setFailPoint(nodes[2], nodes[1]);
- setFailPoint(nodes[3], nodes[2]);
-
- rst.initiate();
- const primary = rst.getPrimary();
-
- rst.awaitSyncSource(nodes[1], nodes[0]);
- rst.awaitSyncSource(nodes[2], nodes[1]);
- rst.awaitSyncSource(nodes[3], nodes[2]);
-
- rst.stopSet();
+"use strict";
+
+const failpointName = "forceSyncSourceCandidate";
+
+const rst = new ReplSetTest({
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ // Allow many initial sync attempts. Initial sync may fail if the sync source does not have
+ // an oplog yet because it has not conducted its own initial sync yet.
+ // We turn on the noop writer to encourage successful sync source selection.
+ nodeOptions: {setParameter: {numInitialSyncAttempts: 100, writePeriodicNoops: true}}
+});
+const nodes = rst.startSet();
+
+function setFailPoint(node, syncSource) {
+ const dataObj = {hostAndPort: syncSource.host};
+ assert.commandWorked(
+ node.adminCommand({configureFailPoint: failpointName, mode: "alwaysOn", data: dataObj}));
+}
+
+setFailPoint(nodes[1], nodes[0]);
+setFailPoint(nodes[2], nodes[1]);
+setFailPoint(nodes[3], nodes[2]);
+
+rst.initiate();
+const primary = rst.getPrimary();
+
+rst.awaitSyncSource(nodes[1], nodes[0]);
+rst.awaitSyncSource(nodes[2], nodes[1]);
+rst.awaitSyncSource(nodes[3], nodes[2]);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/fsync_lock_read_secondaries.js b/jstests/replsets/fsync_lock_read_secondaries.js
index 1f86f420559..91e891f79d6 100644
--- a/jstests/replsets/fsync_lock_read_secondaries.js
+++ b/jstests/replsets/fsync_lock_read_secondaries.js
@@ -22,52 +22,51 @@
* witness as an increase in the count of documents stored on the secondary.
*/
(function() {
- "use strict";
- // Load utility methods for replica set tests
- load("jstests/replsets/rslib.js");
+"use strict";
+// Load utility methods for replica set tests
+load("jstests/replsets/rslib.js");
- var replTest = new ReplSetTest({name: 'testSet', nodes: 2, oplogSize: 5});
- // Start each mongod in the replica set. Returns a list of nodes
- var nodes = replTest.startSet();
- // This will wait for initiation
- replTest.initiate();
- var master = replTest.getPrimary();
+var replTest = new ReplSetTest({name: 'testSet', nodes: 2, oplogSize: 5});
+// Start each mongod in the replica set. Returns a list of nodes
+var nodes = replTest.startSet();
+// This will wait for initiation
+replTest.initiate();
+var master = replTest.getPrimary();
- var ret = master.getDB("admin").fsyncLock();
- if (!ret.ok) {
- assert.commandFailedWithCode(ret, ErrorCodes.CommandNotSupported);
- jsTestLog("Storage Engine does not support fsyncLock, so bailing");
- return;
- }
- master.getDB("admin").fsyncUnlock();
+var ret = master.getDB("admin").fsyncLock();
+if (!ret.ok) {
+ assert.commandFailedWithCode(ret, ErrorCodes.CommandNotSupported);
+ jsTestLog("Storage Engine does not support fsyncLock, so bailing");
+ return;
+}
+master.getDB("admin").fsyncUnlock();
- var docNum = 100;
- for (var i = 0; i < docNum; i++) {
- master.getDB("foo").bar.save({a: i});
- }
- waitForAllMembers(master.getDB("foo"));
- replTest.awaitReplication();
+var docNum = 100;
+for (var i = 0; i < docNum; i++) {
+ master.getDB("foo").bar.save({a: i});
+}
+waitForAllMembers(master.getDB("foo"));
+replTest.awaitReplication();
- // Calling getPrimary also populates '_slaves'.
- var slaves = replTest._slaves;
- slaves[0].setSlaveOk();
+// Calling getPrimary also populates '_slaves'.
+var slaves = replTest._slaves;
+slaves[0].setSlaveOk();
- assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync: 1, lock: 1}));
- var docNum = 1000;
- for (var i = 0; i < docNum; i++) {
- master.getDB("foo").bar.save({a: i});
- }
- // Issue a read query on the secondary while holding the fsync lock.
- // This is what we are testing. Previously this would block. After the fix
- // this should work just fine.
- var slave0count = slaves[0].getDB("foo").bar.find().itcount();
- assert.eq(
- slave0count, 100, "Doc count in fsync lock wrong. Expected (=100), found " + slave0count);
- assert(slaves[0].getDB("admin").fsyncUnlock().ok);
+assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync: 1, lock: 1}));
+var docNum = 1000;
+for (var i = 0; i < docNum; i++) {
+ master.getDB("foo").bar.save({a: i});
+}
+// Issue a read query on the secondary while holding the fsync lock.
+// This is what we are testing. Previously this would block. After the fix
+// this should work just fine.
+var slave0count = slaves[0].getDB("foo").bar.find().itcount();
+assert.eq(slave0count, 100, "Doc count in fsync lock wrong. Expected (=100), found " + slave0count);
+assert(slaves[0].getDB("admin").fsyncUnlock().ok);
- // The secondary should have equal or more documents than what it had before.
- assert.soon(function() {
- return slaves[0].getDB("foo").bar.find().itcount() > 100;
- }, "count of documents stored on the secondary did not increase");
- replTest.stopSet();
+// The secondary should have equal or more documents than what it had before.
+assert.soon(function() {
+ return slaves[0].getDB("foo").bar.find().itcount() > 100;
+}, "count of documents stored on the secondary did not increase");
+replTest.stopSet();
}());
diff --git a/jstests/replsets/get_replication_info_helper.js b/jstests/replsets/get_replication_info_helper.js
index 5e2a696bb80..32b0c2af766 100644
--- a/jstests/replsets/get_replication_info_helper.js
+++ b/jstests/replsets/get_replication_info_helper.js
@@ -1,49 +1,48 @@
// Tests the output of db.getReplicationInfo() and tests db.printSlaveReplicationInfo().
(function() {
- "use strict";
- var name = "getReplicationInfo";
- var replSet = new ReplSetTest({name: name, nodes: 3, oplogSize: 50});
- var nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate();
-
- var primary = replSet.getPrimary();
- for (var i = 0; i < 100; i++) {
- primary.getDB('test').foo.insert({a: i});
- }
- replSet.awaitReplication();
-
- var replInfo = primary.getDB('admin').getReplicationInfo();
- var replInfoString = tojson(replInfo);
-
- assert.eq(50, replInfo.logSizeMB, replInfoString);
- assert.lt(0, replInfo.usedMB, replInfoString);
- assert.lte(0, replInfo.timeDiff, replInfoString);
- assert.lte(0, replInfo.timeDiffHours, replInfoString);
- // Just make sure the following fields exist since it would be hard to predict their values
- assert(replInfo.tFirst, replInfoString);
- assert(replInfo.tLast, replInfoString);
- assert(replInfo.now, replInfoString);
-
- // calling this function with and without a primary, should provide sufficient code coverage
- // to catch any JS errors
- var mongo =
- startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();", primary.port);
- mongo();
- assert(rawMongoProgramOutput().match("behind the primary"));
-
- // get to a primaryless state
- for (i in replSet._slaves) {
- var secondary = replSet._slaves[i];
- secondary.getDB('admin').runCommand({replSetFreeze: 120});
- }
- assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 120, force: true}));
-
- mongo =
- startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();", primary.port);
- mongo();
- assert(rawMongoProgramOutput().match("behind the freshest"));
-
- replSet.stopSet();
+"use strict";
+var name = "getReplicationInfo";
+var replSet = new ReplSetTest({name: name, nodes: 3, oplogSize: 50});
+var nodes = replSet.nodeList();
+replSet.startSet();
+replSet.initiate();
+
+var primary = replSet.getPrimary();
+for (var i = 0; i < 100; i++) {
+ primary.getDB('test').foo.insert({a: i});
+}
+replSet.awaitReplication();
+
+var replInfo = primary.getDB('admin').getReplicationInfo();
+var replInfoString = tojson(replInfo);
+
+assert.eq(50, replInfo.logSizeMB, replInfoString);
+assert.lt(0, replInfo.usedMB, replInfoString);
+assert.lte(0, replInfo.timeDiff, replInfoString);
+assert.lte(0, replInfo.timeDiffHours, replInfoString);
+// Just make sure the following fields exist since it would be hard to predict their values
+assert(replInfo.tFirst, replInfoString);
+assert(replInfo.tLast, replInfoString);
+assert(replInfo.now, replInfoString);
+
+// calling this function with and without a primary, should provide sufficient code coverage
+// to catch any JS errors
+var mongo =
+ startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();", primary.port);
+mongo();
+assert(rawMongoProgramOutput().match("behind the primary"));
+
+// get to a primaryless state
+for (i in replSet._slaves) {
+ var secondary = replSet._slaves[i];
+ secondary.getDB('admin').runCommand({replSetFreeze: 120});
+}
+assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 120, force: true}));
+
+mongo = startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();", primary.port);
+mongo();
+assert(rawMongoProgramOutput().match("behind the freshest"));
+
+replSet.stopSet();
})();
diff --git a/jstests/replsets/get_status.js b/jstests/replsets/get_status.js
index 31a49dc1300..fab6a2035b4 100644
--- a/jstests/replsets/get_status.js
+++ b/jstests/replsets/get_status.js
@@ -4,25 +4,25 @@
*/
(function() {
- "use strict";
- var name = "getstatus";
- var numNodes = 4;
- var replTest = new ReplSetTest({name: name, nodes: numNodes});
- var nodes = replTest.startSet();
+"use strict";
+var name = "getstatus";
+var numNodes = 4;
+var replTest = new ReplSetTest({name: name, nodes: numNodes});
+var nodes = replTest.startSet();
- var config = replTest.getReplSetConfig();
- config.members[numNodes - 1].arbiterOnly = true;
- // An invalid time to get status
- var statusBeforeInitCode = 94;
- assert.commandFailedWithCode(nodes[0].getDB("admin").runCommand({replSetGetStatus: 1}),
- statusBeforeInitCode,
- "replSetGetStatus should fail before initializing.");
- replTest.initiate(config);
- replTest.awaitSecondaryNodes();
+var config = replTest.getReplSetConfig();
+config.members[numNodes - 1].arbiterOnly = true;
+// An invalid time to get status
+var statusBeforeInitCode = 94;
+assert.commandFailedWithCode(nodes[0].getDB("admin").runCommand({replSetGetStatus: 1}),
+ statusBeforeInitCode,
+ "replSetGetStatus should fail before initializing.");
+replTest.initiate(config);
+replTest.awaitSecondaryNodes();
- // A valid status
- var primary = replTest.getPrimary();
- assert.commandWorked(primary.getDB("admin").runCommand({replSetGetStatus: 1}));
+// A valid status
+var primary = replTest.getPrimary();
+assert.commandWorked(primary.getDB("admin").runCommand({replSetGetStatus: 1}));
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/groupAndMapReduce.js b/jstests/replsets/groupAndMapReduce.js
index 6dfbe3047fc..cb63de5cf6d 100644
--- a/jstests/replsets/groupAndMapReduce.js
+++ b/jstests/replsets/groupAndMapReduce.js
@@ -1,7 +1,6 @@
load("jstests/replsets/rslib.js");
doTest = function(signal) {
-
// Test basic replica set functionality.
// -- Replication
// -- Failover
@@ -83,7 +82,6 @@ doTest = function(signal) {
} catch (e) {
print("Received exception: " + e);
}
-
});
// Shut down the set and finish the test.
diff --git a/jstests/replsets/hang_before_releasing_transaction_oplog_hole.js b/jstests/replsets/hang_before_releasing_transaction_oplog_hole.js
index af74be61d60..7156721fbc3 100644
--- a/jstests/replsets/hang_before_releasing_transaction_oplog_hole.js
+++ b/jstests/replsets/hang_before_releasing_transaction_oplog_hole.js
@@ -7,63 +7,63 @@
*/
(function() {
- 'use strict';
- load("jstests/libs/check_log.js");
+'use strict';
+load("jstests/libs/check_log.js");
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const node = rst.getPrimary();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const node = rst.getPrimary();
- const name = 'hang_before_releasing_transaction_oplog_hole';
- const dbName = 'test';
- const collName = name;
- const testDB = node.getDB(dbName);
- const coll = testDB[collName];
+const name = 'hang_before_releasing_transaction_oplog_hole';
+const dbName = 'test';
+const collName = name;
+const testDB = node.getDB(dbName);
+const coll = testDB[collName];
- // Create collection before running the transaction.
- assert.commandWorked(coll.insert({a: 1}));
+// Create collection before running the transaction.
+assert.commandWorked(coll.insert({a: 1}));
- // Run a transaction in a parallel shell. The transaction will be configured to hang on commit.
- // Rather than setting a timeout on commit and forfeiting our ability to check commit for
- // success, we use a separate thread to disable the failpoint and allow the server to finish
- // committing successfully.
- function transactionFn() {
- load('jstests/core/txns/libs/prepare_helpers.js');
+// Run a transaction in a parallel shell. The transaction will be configured to hang on commit.
+// Rather than setting a timeout on commit and forfeiting our ability to check commit for
+// success, we use a separate thread to disable the failpoint and allow the server to finish
+// committing successfully.
+function transactionFn() {
+ load('jstests/core/txns/libs/prepare_helpers.js');
- const name = 'hang_before_releasing_transaction_oplog_hole';
- const dbName = 'test';
- const collName = name;
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
+ const name = 'hang_before_releasing_transaction_oplog_hole';
+ const dbName = 'test';
+ const collName = name;
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDB = session.getDatabase(dbName);
- session.startTransaction({readConcern: {level: 'snapshot'}});
- sessionDB[collName].update({}, {a: 2});
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ session.startTransaction({readConcern: {level: 'snapshot'}});
+ sessionDB[collName].update({}, {a: 2});
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- // Hang before releasing the 'commitTransaction' oplog entry hole.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: 'hangBeforeReleasingTransactionOplogHole', mode: 'alwaysOn'}));
+ // Hang before releasing the 'commitTransaction' oplog entry hole.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: 'hangBeforeReleasingTransactionOplogHole', mode: 'alwaysOn'}));
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
- }
- const joinTransaction = startParallelShell(transactionFn, rst.ports[0]);
+ PrepareHelpers.commitTransaction(session, prepareTimestamp);
+}
+const joinTransaction = startParallelShell(transactionFn, rst.ports[0]);
- jsTestLog("Waiting to hang with the oplog hole held open.");
- checkLog.contains(node, "hangBeforeReleasingTransactionOplogHole fail point enabled");
+jsTestLog("Waiting to hang with the oplog hole held open.");
+checkLog.contains(node, "hangBeforeReleasingTransactionOplogHole fail point enabled");
- jsTestLog("Waiting for 'commitTransaction' to advance lastApplied.");
- sleep(5 * 1000);
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'hangBeforeReleasingTransactionOplogHole', mode: 'off'}));
+jsTestLog("Waiting for 'commitTransaction' to advance lastApplied.");
+sleep(5 * 1000);
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'hangBeforeReleasingTransactionOplogHole', mode: 'off'}));
- jsTestLog("Joining the transaction.");
- joinTransaction();
+jsTestLog("Joining the transaction.");
+joinTransaction();
- jsTestLog("Dropping another collection.");
- // A w:majority drop on a non-existent collection will not do a write, but will still wait for
- // write concern. We double check that that still succeeds.
- testDB["otherColl"].drop({writeConcern: {w: "majority"}});
+jsTestLog("Dropping another collection.");
+// A w:majority drop on a non-existent collection will not do a write, but will still wait for
+// write concern. We double check that that still succeeds.
+testDB["otherColl"].drop({writeConcern: {w: "majority"}});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/id_index_replication.js b/jstests/replsets/id_index_replication.js
index 764619013ea..bd693104104 100644
--- a/jstests/replsets/id_index_replication.js
+++ b/jstests/replsets/id_index_replication.js
@@ -3,77 +3,75 @@
* created on the secondary when the index spec is not included in the oplog.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/get_index_helpers.js");
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- var replSetConfig = rst.getReplSetConfig();
- replSetConfig.members[1].priority = 0;
- rst.initiate(replSetConfig);
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+var replSetConfig = rst.getReplSetConfig();
+replSetConfig.members[1].priority = 0;
+rst.initiate(replSetConfig);
- var primaryDB = rst.getPrimary().getDB("test");
- var oplogColl = rst.getPrimary().getDB("local").oplog.rs;
- var secondaryDB = rst.getSecondary().getDB("test");
+var primaryDB = rst.getPrimary().getDB("test");
+var oplogColl = rst.getPrimary().getDB("local").oplog.rs;
+var secondaryDB = rst.getSecondary().getDB("test");
- function testOplogEntryIdIndexSpec(collectionName, idIndexSpec) {
- var oplogEntry = oplogColl.findOne({op: "c", "o.create": collectionName});
- assert.neq(null, oplogEntry);
- if (idIndexSpec === null) {
- assert(!oplogEntry.o.hasOwnProperty("idIndex"), tojson(oplogEntry));
- } else {
- assert.eq(0, bsonWoCompare(idIndexSpec, oplogEntry.o.idIndex), tojson(oplogEntry));
- }
+function testOplogEntryIdIndexSpec(collectionName, idIndexSpec) {
+ var oplogEntry = oplogColl.findOne({op: "c", "o.create": collectionName});
+ assert.neq(null, oplogEntry);
+ if (idIndexSpec === null) {
+ assert(!oplogEntry.o.hasOwnProperty("idIndex"), tojson(oplogEntry));
+ } else {
+ assert.eq(0, bsonWoCompare(idIndexSpec, oplogEntry.o.idIndex), tojson(oplogEntry));
}
+}
- assert.commandWorked(primaryDB.createCollection("without_version"));
- var allIndexes = primaryDB.without_version.getIndexes();
- var spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
- assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
- assert.eq(2, spec.v, "Expected primary to build a v=2 _id index: " + tojson(spec));
- testOplogEntryIdIndexSpec("without_version", spec);
+assert.commandWorked(primaryDB.createCollection("without_version"));
+var allIndexes = primaryDB.without_version.getIndexes();
+var spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
+assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
+assert.eq(2, spec.v, "Expected primary to build a v=2 _id index: " + tojson(spec));
+testOplogEntryIdIndexSpec("without_version", spec);
- assert.commandWorked(
- primaryDB.createCollection("version_v2", {idIndex: {key: {_id: 1}, name: "_id_", v: 2}}));
- allIndexes = primaryDB.version_v2.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
- assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
- assert.eq(2, spec.v, "Expected primary to build a v=2 _id index: " + tojson(spec));
- testOplogEntryIdIndexSpec("version_v2", spec);
+assert.commandWorked(
+ primaryDB.createCollection("version_v2", {idIndex: {key: {_id: 1}, name: "_id_", v: 2}}));
+allIndexes = primaryDB.version_v2.getIndexes();
+spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
+assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
+assert.eq(2, spec.v, "Expected primary to build a v=2 _id index: " + tojson(spec));
+testOplogEntryIdIndexSpec("version_v2", spec);
- assert.commandWorked(
- primaryDB.createCollection("version_v1", {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
- allIndexes = primaryDB.version_v1.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
- assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
- assert.eq(1, spec.v, "Expected primary to build a v=1 _id index: " + tojson(spec));
- testOplogEntryIdIndexSpec("version_v1", null);
+assert.commandWorked(
+ primaryDB.createCollection("version_v1", {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
+allIndexes = primaryDB.version_v1.getIndexes();
+spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
+assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
+assert.eq(1, spec.v, "Expected primary to build a v=1 _id index: " + tojson(spec));
+testOplogEntryIdIndexSpec("version_v1", null);
- rst.awaitReplication();
+rst.awaitReplication();
- // Verify that the secondary built _id indexes with the same version as on the primary.
+// Verify that the secondary built _id indexes with the same version as on the primary.
- allIndexes = secondaryDB.without_version.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
- assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
- assert.eq(
- 2,
- spec.v,
- "Expected secondary to build a v=2 _id index when explicitly requested: " + tojson(spec));
+allIndexes = secondaryDB.without_version.getIndexes();
+spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
+assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
+assert.eq(2,
+ spec.v,
+ "Expected secondary to build a v=2 _id index when explicitly requested: " + tojson(spec));
- allIndexes = secondaryDB.version_v2.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
- assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
- assert.eq(
- 2,
- spec.v,
- "Expected secondary to build a v=2 _id index when explicitly requested: " + tojson(spec));
+allIndexes = secondaryDB.version_v2.getIndexes();
+spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
+assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
+assert.eq(2,
+ spec.v,
+ "Expected secondary to build a v=2 _id index when explicitly requested: " + tojson(spec));
- allIndexes = secondaryDB.version_v1.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
- assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
- assert.eq(1, spec.v, "Expected secondary to implicitly build a v=1 _id index: " + tojson(spec));
+allIndexes = secondaryDB.version_v1.getIndexes();
+spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
+assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
+assert.eq(1, spec.v, "Expected secondary to implicitly build a v=1 _id index: " + tojson(spec));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/initial_sync2.js b/jstests/replsets/initial_sync2.js
index b08f9bd1a71..27d3f0e0e66 100644
--- a/jstests/replsets/initial_sync2.js
+++ b/jstests/replsets/initial_sync2.js
@@ -19,7 +19,6 @@ load("jstests/replsets/rslib.js");
var basename = "jstests_initsync2";
var doTest = function() {
-
jsTest.log("1. Bring up set");
var replTest = new ReplSetTest({name: basename, nodes: [{rsConfig: {priority: 2}}, {}]});
var conns = replTest.startSet();
diff --git a/jstests/replsets/initial_sync_applier_error.js b/jstests/replsets/initial_sync_applier_error.js
index 36182e58c39..2bd65f51e12 100644
--- a/jstests/replsets/initial_sync_applier_error.js
+++ b/jstests/replsets/initial_sync_applier_error.js
@@ -10,47 +10,47 @@
*/
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/libs/check_log.js");
- var name = 'initial_sync_applier_error';
- var replSet = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {arbiterOnly: true}}],
- });
+var name = 'initial_sync_applier_error';
+var replSet = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {arbiterOnly: true}}],
+});
- replSet.startSet();
- replSet.initiate();
- var primary = replSet.getPrimary();
+replSet.startSet();
+replSet.initiate();
+var primary = replSet.getPrimary();
- var coll = primary.getDB('test').getCollection(name);
- assert.writeOK(coll.insert({_id: 0, content: "hi"}));
+var coll = primary.getDB('test').getCollection(name);
+assert.writeOK(coll.insert({_id: 0, content: "hi"}));
- // Add a secondary node but make it hang after retrieving the last op on the source
- // but before copying databases.
- var secondary = replSet.add({setParameter: "numInitialSyncAttempts=2"});
- secondary.setSlaveOk();
+// Add a secondary node but make it hang after retrieving the last op on the source
+// but before copying databases.
+var secondary = replSet.add({setParameter: "numInitialSyncAttempts=2"});
+secondary.setSlaveOk();
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
- replSet.reInitiate();
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
+replSet.reInitiate();
- // Wait for fail point message to be logged.
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
+// Wait for fail point message to be logged.
+checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
- var newCollName = name + '_2';
- assert.commandWorked(coll.renameCollection(newCollName, true));
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
+var newCollName = name + '_2';
+assert.commandWorked(coll.renameCollection(newCollName, true));
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
- checkLog.contains(secondary, 'initial sync done');
+checkLog.contains(secondary, 'initial sync done');
- replSet.awaitReplication();
- replSet.awaitSecondaryNodes();
+replSet.awaitReplication();
+replSet.awaitSecondaryNodes();
- assert.eq(0, secondary.getDB('test').getCollection(name).count());
- assert.eq(1, secondary.getDB('test').getCollection(newCollName).count());
- assert.eq("hi", secondary.getDB('test').getCollection(newCollName).findOne({_id: 0}).content);
- replSet.stopSet();
+assert.eq(0, secondary.getDB('test').getCollection(name).count());
+assert.eq(1, secondary.getDB('test').getCollection(newCollName).count());
+assert.eq("hi", secondary.getDB('test').getCollection(newCollName).findOne({_id: 0}).content);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_capped_index.js b/jstests/replsets/initial_sync_capped_index.js
index 0aa3c648499..a7c1a2a3de4 100644
--- a/jstests/replsets/initial_sync_capped_index.js
+++ b/jstests/replsets/initial_sync_capped_index.js
@@ -24,97 +24,97 @@
* This is a regression test for SERVER-29197.
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
-
- /**
- * Overflow a capped collection 'coll' by continuously inserting a given document,
- * 'docToInsert'.
- */
- function overflowCappedColl(coll, docToInsert) {
- // Insert one document and save its _id.
- assert.writeOK(coll.insert(docToInsert));
- var origFirstDocId = coll.findOne()["_id"];
-
- // Detect overflow by seeing if the original first doc of the collection is still present.
- while (coll.findOne({_id: origFirstDocId})) {
- assert.commandWorked(coll.insert(docToInsert));
- }
- }
+"use strict";
- // Set up replica set.
- var testName = "initial_sync_capped_index";
- var dbName = testName;
- var replTest = new ReplSetTest({name: testName, nodes: 1});
- replTest.startSet();
- replTest.initiate();
-
- var primary = replTest.getPrimary();
- var primaryDB = primary.getDB(dbName);
- var cappedCollName = "capped_coll";
- var primaryCappedColl = primaryDB[cappedCollName];
-
- // Create a capped collection of the minimum allowed size.
- var cappedCollSize = 4096;
-
- jsTestLog("Creating capped collection of size " + cappedCollSize + " bytes.");
- assert.commandWorked(
- primaryDB.createCollection(cappedCollName, {capped: true, size: cappedCollSize}));
-
- // Overflow the capped collection.
- jsTestLog("Overflowing the capped collection.");
-
- var docSize = cappedCollSize / 8;
- var largeDoc = {a: new Array(docSize).join("*")};
- overflowCappedColl(primaryCappedColl, largeDoc);
-
- // Check that there are more than two documents in the collection. This will ensure the
- // secondary's collection cloner will send a getMore.
- assert.gt(primaryCappedColl.find().itcount(), 2);
-
- // Add a SECONDARY node. It should use batchSize=2 for its initial sync queries.
- jsTestLog("Adding secondary node.");
- replTest.add({setParameter: "collectionClonerBatchSize=2"});
-
- var secondary = replTest.getSecondary();
- var collectionClonerFailPoint = "initialSyncHangCollectionClonerAfterHandlingBatchResponse";
-
- // Make the collection cloner pause after its initial 'find' response on the capped collection.
- var nss = dbName + "." + cappedCollName;
- jsTestLog("Enabling collection cloner fail point for " + nss);
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: collectionClonerFailPoint, mode: 'alwaysOn', data: {nss: nss}}));
-
- // Let the SECONDARY begin initial sync.
- jsTestLog("Re-initiating replica set with new secondary.");
- replTest.reInitiate();
-
- jsTestLog("Waiting for the initial 'find' response of capped collection cloner to complete.");
- checkLog.contains(
- secondary,
- "initialSyncHangCollectionClonerAfterHandlingBatchResponse fail point enabled for " + nss);
-
- // Append documents to the capped collection so that the SECONDARY will clone these
- // additional documents.
- var docsToAppend = 2;
- for (var i = 0; i < docsToAppend; i++) {
- assert.writeOK(primaryDB[cappedCollName].insert(largeDoc));
- }
+load("jstests/libs/check_log.js");
- // Let the 'getMore' requests for the capped collection clone continue.
- jsTestLog("Disabling collection cloner fail point for " + nss);
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: collectionClonerFailPoint, mode: 'off', data: {nss: nss}}));
-
- // Wait until initial sync completes.
- replTest.awaitReplication();
-
- // Make sure the indexes created during initial sync are valid.
- var secondaryCappedColl = secondary.getDB(dbName)[cappedCollName];
- var validate_result = secondaryCappedColl.validate(true);
- var failMsg =
- "Index validation of '" + secondaryCappedColl.name + "' failed: " + tojson(validate_result);
- assert(validate_result.valid, failMsg);
- replTest.stopSet();
+/**
+ * Overflow a capped collection 'coll' by continuously inserting a given document,
+ * 'docToInsert'.
+ */
+function overflowCappedColl(coll, docToInsert) {
+ // Insert one document and save its _id.
+ assert.writeOK(coll.insert(docToInsert));
+ var origFirstDocId = coll.findOne()["_id"];
+
+ // Detect overflow by seeing if the original first doc of the collection is still present.
+ while (coll.findOne({_id: origFirstDocId})) {
+ assert.commandWorked(coll.insert(docToInsert));
+ }
+}
+
+// Set up replica set.
+var testName = "initial_sync_capped_index";
+var dbName = testName;
+var replTest = new ReplSetTest({name: testName, nodes: 1});
+replTest.startSet();
+replTest.initiate();
+
+var primary = replTest.getPrimary();
+var primaryDB = primary.getDB(dbName);
+var cappedCollName = "capped_coll";
+var primaryCappedColl = primaryDB[cappedCollName];
+
+// Create a capped collection of the minimum allowed size.
+var cappedCollSize = 4096;
+
+jsTestLog("Creating capped collection of size " + cappedCollSize + " bytes.");
+assert.commandWorked(
+ primaryDB.createCollection(cappedCollName, {capped: true, size: cappedCollSize}));
+
+// Overflow the capped collection.
+jsTestLog("Overflowing the capped collection.");
+
+var docSize = cappedCollSize / 8;
+var largeDoc = {a: new Array(docSize).join("*")};
+overflowCappedColl(primaryCappedColl, largeDoc);
+
+// Check that there are more than two documents in the collection. This will ensure the
+// secondary's collection cloner will send a getMore.
+assert.gt(primaryCappedColl.find().itcount(), 2);
+
+// Add a SECONDARY node. It should use batchSize=2 for its initial sync queries.
+jsTestLog("Adding secondary node.");
+replTest.add({setParameter: "collectionClonerBatchSize=2"});
+
+var secondary = replTest.getSecondary();
+var collectionClonerFailPoint = "initialSyncHangCollectionClonerAfterHandlingBatchResponse";
+
+// Make the collection cloner pause after its initial 'find' response on the capped collection.
+var nss = dbName + "." + cappedCollName;
+jsTestLog("Enabling collection cloner fail point for " + nss);
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: collectionClonerFailPoint, mode: 'alwaysOn', data: {nss: nss}}));
+
+// Let the SECONDARY begin initial sync.
+jsTestLog("Re-initiating replica set with new secondary.");
+replTest.reInitiate();
+
+jsTestLog("Waiting for the initial 'find' response of capped collection cloner to complete.");
+checkLog.contains(
+ secondary,
+ "initialSyncHangCollectionClonerAfterHandlingBatchResponse fail point enabled for " + nss);
+
+// Append documents to the capped collection so that the SECONDARY will clone these
+// additional documents.
+var docsToAppend = 2;
+for (var i = 0; i < docsToAppend; i++) {
+ assert.writeOK(primaryDB[cappedCollName].insert(largeDoc));
+}
+
+// Let the 'getMore' requests for the capped collection clone continue.
+jsTestLog("Disabling collection cloner fail point for " + nss);
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: collectionClonerFailPoint, mode: 'off', data: {nss: nss}}));
+
+// Wait until initial sync completes.
+replTest.awaitReplication();
+
+// Make sure the indexes created during initial sync are valid.
+var secondaryCappedColl = secondary.getDB(dbName)[cappedCollName];
+var validate_result = secondaryCappedColl.validate(true);
+var failMsg =
+ "Index validation of '" + secondaryCappedColl.name + "' failed: " + tojson(validate_result);
+assert(validate_result.valid, failMsg);
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_cloner_dups.js b/jstests/replsets/initial_sync_cloner_dups.js
index 23b1b989400..7132d9f2087 100644
--- a/jstests/replsets/initial_sync_cloner_dups.js
+++ b/jstests/replsets/initial_sync_cloner_dups.js
@@ -7,126 +7,126 @@
* verify collection and both indexes on the secondary have the right number of docs
*/
(function(doNotRun) {
- "use strict";
+"use strict";
- if (doNotRun) {
- return;
- }
+if (doNotRun) {
+ return;
+}
- load('jstests/libs/parallelTester.js');
+load('jstests/libs/parallelTester.js');
- Random.setRandomSeed();
+Random.setRandomSeed();
- // used to parse RAM log file
- var contains = function(logLines, func) {
- var i = logLines.length;
- while (i--) {
- printjson(logLines[i]);
- if (func(logLines[i])) {
- return true;
- }
+// used to parse RAM log file
+var contains = function(logLines, func) {
+ var i = logLines.length;
+ while (i--) {
+ printjson(logLines[i]);
+ if (func(logLines[i])) {
+ return true;
}
- return false;
- };
-
- var replTest = new ReplSetTest({name: 'cloner', nodes: 3, oplogSize: 150 /*~1.5x data size*/});
- replTest.startSet();
- var conf = replTest.getReplSetConfig();
- conf.settings = {};
- conf.settings.chainingAllowed = false;
- replTest.initiate(conf);
- replTest.awaitSecondaryNodes();
- var primary = replTest.getPrimary();
- var coll = primary.getDB('test').cloner;
- coll.drop();
- coll.createIndex({k: 1});
-
- // These need to be big enough to force initial-sync to use many batches
- var numDocs = 100 * 1000;
- var bigStr = Array(1001).toString();
- var batch = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- batch.insert({_id: i, bigStr: bigStr});
}
- batch.execute();
-
- replTest.awaitReplication();
-
- jsTestLog("Start remove/insert on primary");
- var insertAndRemove = function(host) {
- jsTestLog("starting bg writes on " + host);
- var m = new Mongo(host);
- var db = m.getDB('test');
- var coll = db.cloner;
- var numDocs = coll.count();
- for (var i = 0; !db.stop.findOne(); i++) {
- var id = Random.randInt(numDocs);
- coll.remove({_id: id});
- coll.insert({_id: id});
-
- var id = i % numDocs;
- // print(id);
- coll.remove({_id: id});
- coll.insert({_id: id});
-
- // Try to throttle this thread to prevent overloading slow machines.
- sleep(1);
- }
-
- jsTestLog("finished bg writes on " + host);
- };
- var worker = new ScopedThread(insertAndRemove, primary.host);
- worker.start();
-
- jsTestLog("add a new secondary");
- var secondary = replTest.add({});
- replTest.reInitiate();
- secondary.setSlaveOk();
- // Wait for the secondary to get ReplSetInitiate command.
- replTest.waitForState(
- secondary,
- [ReplSetTest.State.STARTUP_2, ReplSetTest.State.RECOVERING, ReplSetTest.State.SECONDARY]);
-
- // This fail point will cause the first intial sync to fail, and leave an op in the buffer to
- // verify the fix from SERVER-17807
- print("=================== failpoint enabled ==============");
- printjson(assert.commandWorked(secondary.getDB("admin").adminCommand(
- {configureFailPoint: 'failInitSyncWithBufferedEntriesLeft', mode: {times: 1}})));
- printjson(assert.commandWorked(secondary.getDB("admin").adminCommand({resync: true})));
-
- // NOTE: This is here to prevent false negatives, but it is racy and dependent on magic numbers.
- // Removed the assertion because it was too flaky. Printing a warning instead (dan)
- jsTestLog("making sure we dropped some dups");
- var res = secondary.adminCommand({getLog: "global"});
- var droppedDups = (contains(res.log, function(v) {
- return v.indexOf("index build dropped" /* NNN dups*/) != -1;
- }));
- if (!droppedDups) {
- jsTestLog(
- "Warning: Test did not trigger duplicate documents, this run will be a false negative");
+ return false;
+};
+
+var replTest = new ReplSetTest({name: 'cloner', nodes: 3, oplogSize: 150 /*~1.5x data size*/});
+replTest.startSet();
+var conf = replTest.getReplSetConfig();
+conf.settings = {};
+conf.settings.chainingAllowed = false;
+replTest.initiate(conf);
+replTest.awaitSecondaryNodes();
+var primary = replTest.getPrimary();
+var coll = primary.getDB('test').cloner;
+coll.drop();
+coll.createIndex({k: 1});
+
+// These need to be big enough to force initial-sync to use many batches
+var numDocs = 100 * 1000;
+var bigStr = Array(1001).toString();
+var batch = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ batch.insert({_id: i, bigStr: bigStr});
+}
+batch.execute();
+
+replTest.awaitReplication();
+
+jsTestLog("Start remove/insert on primary");
+var insertAndRemove = function(host) {
+ jsTestLog("starting bg writes on " + host);
+ var m = new Mongo(host);
+ var db = m.getDB('test');
+ var coll = db.cloner;
+ var numDocs = coll.count();
+ for (var i = 0; !db.stop.findOne(); i++) {
+ var id = Random.randInt(numDocs);
+ coll.remove({_id: id});
+ coll.insert({_id: id});
+
+ var id = i % numDocs;
+ // print(id);
+ coll.remove({_id: id});
+ coll.insert({_id: id});
+
+ // Try to throttle this thread to prevent overloading slow machines.
+ sleep(1);
}
- jsTestLog("stopping writes and waiting for replica set to coalesce");
- primary.getDB('test').stop.insert({});
- worker.join();
- // make sure all secondaries are caught up, after init sync
- reconnect(secondary.getDB("test"));
- replTest.awaitSecondaryNodes();
- replTest.awaitReplication();
-
- jsTestLog("check that secondary has correct counts");
- var secondaryColl = secondary.getDB('test').getCollection('cloner');
- var index = secondaryColl.find({}, {_id: 1}).hint({_id: 1}).itcount();
- var secondary_index = secondaryColl.find({}, {_id: 1}).hint({k: 1}).itcount();
- var table = secondaryColl.find({}, {_id: 1}).hint({$natural: 1}).itcount();
- if (index != table || index != secondary_index) {
- printjson({
- name: coll,
- _id_index_count: index,
- secondary_index_count: secondary_index,
- table_count: table
- });
- }
- assert.eq(index, table);
- assert.eq(table, secondary_index);
+ jsTestLog("finished bg writes on " + host);
+};
+var worker = new ScopedThread(insertAndRemove, primary.host);
+worker.start();
+
+jsTestLog("add a new secondary");
+var secondary = replTest.add({});
+replTest.reInitiate();
+secondary.setSlaveOk();
+// Wait for the secondary to get ReplSetInitiate command.
+replTest.waitForState(
+ secondary,
+ [ReplSetTest.State.STARTUP_2, ReplSetTest.State.RECOVERING, ReplSetTest.State.SECONDARY]);
+
+// This fail point will cause the first intial sync to fail, and leave an op in the buffer to
+// verify the fix from SERVER-17807
+print("=================== failpoint enabled ==============");
+printjson(assert.commandWorked(secondary.getDB("admin").adminCommand(
+ {configureFailPoint: 'failInitSyncWithBufferedEntriesLeft', mode: {times: 1}})));
+printjson(assert.commandWorked(secondary.getDB("admin").adminCommand({resync: true})));
+
+// NOTE: This is here to prevent false negatives, but it is racy and dependent on magic numbers.
+// Removed the assertion because it was too flaky. Printing a warning instead (dan)
+jsTestLog("making sure we dropped some dups");
+var res = secondary.adminCommand({getLog: "global"});
+var droppedDups = (contains(res.log, function(v) {
+ return v.indexOf("index build dropped" /* NNN dups*/) != -1;
+}));
+if (!droppedDups) {
+ jsTestLog(
+ "Warning: Test did not trigger duplicate documents, this run will be a false negative");
+}
+
+jsTestLog("stopping writes and waiting for replica set to coalesce");
+primary.getDB('test').stop.insert({});
+worker.join();
+// make sure all secondaries are caught up, after init sync
+reconnect(secondary.getDB("test"));
+replTest.awaitSecondaryNodes();
+replTest.awaitReplication();
+
+jsTestLog("check that secondary has correct counts");
+var secondaryColl = secondary.getDB('test').getCollection('cloner');
+var index = secondaryColl.find({}, {_id: 1}).hint({_id: 1}).itcount();
+var secondary_index = secondaryColl.find({}, {_id: 1}).hint({k: 1}).itcount();
+var table = secondaryColl.find({}, {_id: 1}).hint({$natural: 1}).itcount();
+if (index != table || index != secondary_index) {
+ printjson({
+ name: coll,
+ _id_index_count: index,
+ secondary_index_count: secondary_index,
+ table_count: table
+ });
+}
+assert.eq(index, table);
+assert.eq(table, secondary_index);
})(true /* Disabled until SERVER-23476 re-enabled rsync command */);
diff --git a/jstests/replsets/initial_sync_commit_prepared_transaction.js b/jstests/replsets/initial_sync_commit_prepared_transaction.js
index 81590fc0bc2..80198e82673 100644
--- a/jstests/replsets/initial_sync_commit_prepared_transaction.js
+++ b/jstests/replsets/initial_sync_commit_prepared_transaction.js
@@ -7,107 +7,109 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
-
- const config = replTest.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while the
- // secondary is restarting.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- replTest.initiate(config);
-
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
-
- const dbName = "test";
- const collName = "initial_sync_commit_prepared_transaction";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- assert.commandWorked(testColl.insert({_id: 1}));
-
- jsTestLog("Preparing a transaction that will be the oldest active transaction");
-
- // Prepare a transaction so that there is an active transaction with an oplog entry. The prepare
- // timestamp will become the beginFetchingTimestamp during initial sync.
- const session1 = primary.startSession({causalConsistency: false});
- const sessionDB1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDB1.getCollection(collName);
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({_id: 2}));
- let prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
-
- // Do another operation so that the beginFetchingTimestamp will be different from the
- // beginApplyingTimestamp.
- assert.commandWorked(testColl.insert({_id: 3}));
-
- jsTestLog("Restarting the secondary");
-
- // Restart the secondary with startClean set to true so that it goes through initial sync. Also
- // restart the node with a failpoint turned on that will pause initial sync after the secondary
- // has copied {_id: 1} and {_id: 3}. This way we can try to commit the prepared transaction
- // while initial sync is paused and know that its operations won't be copied during collection
- // cloning. Instead, the commitTransaction oplog entry must be applied during oplog application.
- replTest.stop(secondary,
- // signal
- undefined,
- // Validation would encounter a prepare conflict on the open transaction.
- {skipValidation: true});
- secondary = replTest.start(
- secondary,
- {
- startClean: true,
- setParameter: {
- 'failpoint.initialSyncHangDuringCollectionClone': tojson(
- {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 2}}),
- 'numInitialSyncAttempts': 1
- }
- },
- true /* wait */);
-
- // Wait for failpoint to be reached so we know that collection cloning is paused.
- checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
-
- jsTestLog("Running operations while collection cloning is paused");
-
- // Commit a transaction on the sync source while collection cloning is paused so that we know
- // they must be applied during the oplog application stage of initial sync.
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1));
-
- jsTestLog("Resuming initial sync");
-
- // Resume initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
-
- // Wait for the secondary to complete initial sync.
- replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
-
- jsTestLog("Initial sync completed");
-
- // Make sure the transaction committed properly and is reflected after the initial sync.
- let res = secondary.getDB(dbName).getCollection(collName).findOne({_id: 2});
- assert.docEq(res, {_id: 2}, res);
-
- // Step up the secondary after initial sync is done and make sure we can successfully run
- // another transaction.
- replTest.stepUp(secondary);
- replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
- let newPrimary = replTest.getPrimary();
- const session2 = newPrimary.startSession({causalConsistency: false});
- const sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({_id: 4}));
- let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
- assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
- res = newPrimary.getDB(dbName).getCollection(collName).findOne({_id: 4});
- assert.docEq(res, {_id: 4}, res);
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+
+const config = replTest.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while the
+// secondary is restarting.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+replTest.initiate(config);
+
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+
+const dbName = "test";
+const collName = "initial_sync_commit_prepared_transaction";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+assert.commandWorked(testColl.insert({_id: 1}));
+
+jsTestLog("Preparing a transaction that will be the oldest active transaction");
+
+// Prepare a transaction so that there is an active transaction with an oplog entry. The prepare
+// timestamp will become the beginFetchingTimestamp during initial sync.
+const session1 = primary.startSession({causalConsistency: false});
+const sessionDB1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDB1.getCollection(collName);
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({_id: 2}));
+let prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
+
+// Do another operation so that the beginFetchingTimestamp will be different from the
+// beginApplyingTimestamp.
+assert.commandWorked(testColl.insert({_id: 3}));
+
+jsTestLog("Restarting the secondary");
+
+// Restart the secondary with startClean set to true so that it goes through initial sync. Also
+// restart the node with a failpoint turned on that will pause initial sync after the secondary
+// has copied {_id: 1} and {_id: 3}. This way we can try to commit the prepared transaction
+// while initial sync is paused and know that its operations won't be copied during collection
+// cloning. Instead, the commitTransaction oplog entry must be applied during oplog application.
+replTest.stop(secondary,
+ // signal
+ undefined,
+ // Validation would encounter a prepare conflict on the open transaction.
+ {skipValidation: true});
+secondary = replTest.start(
+ secondary,
+ {
+ startClean: true,
+ setParameter: {
+ 'failpoint.initialSyncHangDuringCollectionClone': tojson(
+ {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 2}}),
+ 'numInitialSyncAttempts': 1
+ }
+ },
+ true /* wait */);
+
+// Wait for failpoint to be reached so we know that collection cloning is paused.
+checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
+
+jsTestLog("Running operations while collection cloning is paused");
+
+// Commit a transaction on the sync source while collection cloning is paused so that we know
+// they must be applied during the oplog application stage of initial sync.
+assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1));
+
+jsTestLog("Resuming initial sync");
+
+// Resume initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
+
+// Wait for the secondary to complete initial sync.
+replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
+
+jsTestLog("Initial sync completed");
+
+// Make sure the transaction committed properly and is reflected after the initial sync.
+let res = secondary.getDB(dbName).getCollection(collName).findOne({_id: 2});
+assert.docEq(res, {_id: 2}, res);
+
+// Step up the secondary after initial sync is done and make sure we can successfully run
+// another transaction.
+replTest.stepUp(secondary);
+replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
+let newPrimary = replTest.getPrimary();
+const session2 = newPrimary.startSession({causalConsistency: false});
+const sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({_id: 4}));
+let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
+res = newPrimary.getDB(dbName).getCollection(collName).findOne({_id: 4});
+assert.docEq(res, {_id: 4}, res);
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_document_validation.js b/jstests/replsets/initial_sync_document_validation.js
index 06d9388b51d..79d06f75140 100644
--- a/jstests/replsets/initial_sync_document_validation.js
+++ b/jstests/replsets/initial_sync_document_validation.js
@@ -3,27 +3,27 @@
*/
(function() {
- var name = 'initial_sync_document_validation';
- var replSet = new ReplSetTest({
- name: name,
- nodes: 2,
- });
+var name = 'initial_sync_document_validation';
+var replSet = new ReplSetTest({
+ name: name,
+ nodes: 2,
+});
- replSet.startSet();
- replSet.initiate();
- var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
+replSet.startSet();
+replSet.initiate();
+var primary = replSet.getPrimary();
+var secondary = replSet.getSecondary();
- var coll = primary.getDB('test').getCollection(name);
- assert.writeOK(coll.insert({_id: 0, x: 1}));
- assert.commandWorked(coll.runCommand("collMod", {"validator": {a: {$exists: true}}}));
+var coll = primary.getDB('test').getCollection(name);
+assert.writeOK(coll.insert({_id: 0, x: 1}));
+assert.commandWorked(coll.runCommand("collMod", {"validator": {a: {$exists: true}}}));
- secondary = replSet.restart(secondary, {startClean: true});
- replSet.awaitReplication();
- replSet.awaitSecondaryNodes();
+secondary = replSet.restart(secondary, {startClean: true});
+replSet.awaitReplication();
+replSet.awaitSecondaryNodes();
- assert.eq(1, secondary.getDB("test")[name].count());
- assert.docEq({_id: 0, x: 1}, secondary.getDB("test")[name].findOne());
+assert.eq(1, secondary.getDB("test")[name].count());
+assert.docEq({_id: 0, x: 1}, secondary.getDB("test")[name].findOne());
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_drop_collection.js b/jstests/replsets/initial_sync_drop_collection.js
index 54cc039540f..63229527ee1 100644
--- a/jstests/replsets/initial_sync_drop_collection.js
+++ b/jstests/replsets/initial_sync_drop_collection.js
@@ -1,171 +1,164 @@
// Test that CollectionCloner completes without error when a collection is dropped during cloning.
(function() {
- "use strict";
-
- // Skip db hash check because secondary cannot complete initial sync.
- TestData.skipCheckDBHashes = true;
-
- load("jstests/libs/check_log.js");
- load('jstests/replsets/libs/two_phase_drops.js');
- load("jstests/libs/uuid_util.js");
-
- // Set up replica set. Disallow chaining so nodes always sync from primary.
- const testName = "initial_sync_drop_collection";
- const dbName = testName;
- var replTest = new ReplSetTest({
- name: testName,
- nodes: [{}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- });
- replTest.startSet();
- replTest.initiate();
-
- var primary = replTest.getPrimary();
- var primaryDB = primary.getDB(dbName);
- var secondary = replTest.getSecondary();
- var secondaryDB = secondary.getDB(dbName);
- const collName = "testcoll";
- var primaryColl = primaryDB[collName];
- var secondaryColl = secondaryDB[collName];
- var pRenameColl = primaryDB["r_" + collName];
- var nss = primaryColl.getFullName();
-
- // This function adds data to the collection, restarts the secondary node with the given
- // parameters and setting the given failpoint, waits for the failpoint to be hit,
- // drops the collection, then disables the failpoint. It then optionally waits for the
- // expectedLog message and waits for the secondary to complete initial sync, then ensures
- // the collection on the secondary is empty.
- function setupTest({failPoint, secondaryStartupParams}) {
- jsTestLog("Writing data to collection.");
- assert.writeOK(primaryColl.insert([{_id: 1}, {_id: 2}]));
-
- jsTestLog("Restarting secondary with failPoint " + failPoint + " set for " + nss);
- secondaryStartupParams = secondaryStartupParams || {};
- secondaryStartupParams['failpoint.' + failPoint] =
- tojson({mode: 'alwaysOn', data: {nss: nss}});
- secondaryStartupParams['numInitialSyncAttempts'] = 1;
- replTest.restart(secondary, {startClean: true, setParameter: secondaryStartupParams});
-
- jsTestLog("Waiting for secondary to reach failPoint " + failPoint);
- checkLog.contains(secondary, failPoint + " fail point enabled for " + nss);
-
- // Restarting the secondary may have resulted in an election. Wait until the system
- // stabilizes and reaches RS_STARTUP2 state.
- replTest.getPrimary();
- replTest.waitForState(secondary, ReplSetTest.State.STARTUP_2);
+"use strict";
+
+// Skip db hash check because secondary cannot complete initial sync.
+TestData.skipCheckDBHashes = true;
+
+load("jstests/libs/check_log.js");
+load('jstests/replsets/libs/two_phase_drops.js');
+load("jstests/libs/uuid_util.js");
+
+// Set up replica set. Disallow chaining so nodes always sync from primary.
+const testName = "initial_sync_drop_collection";
+const dbName = testName;
+var replTest = new ReplSetTest(
+ {name: testName, nodes: [{}, {rsConfig: {priority: 0}}], settings: {chainingAllowed: false}});
+replTest.startSet();
+replTest.initiate();
+
+var primary = replTest.getPrimary();
+var primaryDB = primary.getDB(dbName);
+var secondary = replTest.getSecondary();
+var secondaryDB = secondary.getDB(dbName);
+const collName = "testcoll";
+var primaryColl = primaryDB[collName];
+var secondaryColl = secondaryDB[collName];
+var pRenameColl = primaryDB["r_" + collName];
+var nss = primaryColl.getFullName();
+
+// This function adds data to the collection, restarts the secondary node with the given
+// parameters and setting the given failpoint, waits for the failpoint to be hit,
+// drops the collection, then disables the failpoint. It then optionally waits for the
+// expectedLog message and waits for the secondary to complete initial sync, then ensures
+// the collection on the secondary is empty.
+function setupTest({failPoint, secondaryStartupParams}) {
+ jsTestLog("Writing data to collection.");
+ assert.writeOK(primaryColl.insert([{_id: 1}, {_id: 2}]));
+
+ jsTestLog("Restarting secondary with failPoint " + failPoint + " set for " + nss);
+ secondaryStartupParams = secondaryStartupParams || {};
+ secondaryStartupParams['failpoint.' + failPoint] = tojson({mode: 'alwaysOn', data: {nss: nss}});
+ secondaryStartupParams['numInitialSyncAttempts'] = 1;
+ replTest.restart(secondary, {startClean: true, setParameter: secondaryStartupParams});
+
+ jsTestLog("Waiting for secondary to reach failPoint " + failPoint);
+ checkLog.contains(secondary, failPoint + " fail point enabled for " + nss);
+
+ // Restarting the secondary may have resulted in an election. Wait until the system
+ // stabilizes and reaches RS_STARTUP2 state.
+ replTest.getPrimary();
+ replTest.waitForState(secondary, ReplSetTest.State.STARTUP_2);
+}
+
+function finishTest({failPoint, secondaryStartupParams, expectedLog, waitForDrop, createNew}) {
+ // Get the uuid for use in checking the log line.
+ let uuid = getUUIDFromListCollections(primaryDB, collName);
+
+ jsTestLog("Dropping collection on primary: " + primaryColl.getFullName());
+ assert(primaryColl.drop());
+
+ if (waitForDrop) {
+ jsTestLog("Waiting for drop to commit on primary");
+ TwoPhaseDropCollectionTest.waitForDropToComplete(primaryDB, collName);
}
- function finishTest({failPoint, secondaryStartupParams, expectedLog, waitForDrop, createNew}) {
- // Get the uuid for use in checking the log line.
- let uuid = getUUIDFromListCollections(primaryDB, collName);
+ if (createNew) {
+ jsTestLog("Creating a new collection with the same name: " + primaryColl.getFullName());
+ assert.writeOK(primaryColl.insert({_id: "not the same collection"}));
+ }
- jsTestLog("Dropping collection on primary: " + primaryColl.getFullName());
- assert(primaryColl.drop());
+ jsTestLog("Allowing secondary to continue.");
+ assert.commandWorked(secondary.adminCommand({configureFailPoint: failPoint, mode: 'off'}));
- if (waitForDrop) {
- jsTestLog("Waiting for drop to commit on primary");
- TwoPhaseDropCollectionTest.waitForDropToComplete(primaryDB, collName);
- }
-
- if (createNew) {
- jsTestLog("Creating a new collection with the same name: " + primaryColl.getFullName());
- assert.writeOK(primaryColl.insert({_id: "not the same collection"}));
- }
-
- jsTestLog("Allowing secondary to continue.");
- assert.commandWorked(secondary.adminCommand({configureFailPoint: failPoint, mode: 'off'}));
-
- if (expectedLog) {
- jsTestLog(eval(expectedLog));
- checkLog.contains(secondary, eval(expectedLog));
- }
-
- jsTestLog("Waiting for initial sync to complete.");
- replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
-
- let res =
- assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
- assert.eq(0, res.initialSyncStatus.failedInitialSyncAttempts);
-
- if (createNew) {
- assert.eq([{_id: "not the same collection"}], secondaryColl.find().toArray());
- assert(primaryColl.drop());
- } else {
- assert.eq(0, secondaryColl.find().itcount());
- }
- replTest.checkReplicatedDataHashes();
+ if (expectedLog) {
+ jsTestLog(eval(expectedLog));
+ checkLog.contains(secondary, eval(expectedLog));
}
- function runDropTest(params) {
- setupTest(params);
- finishTest(params);
- }
+ jsTestLog("Waiting for initial sync to complete.");
+ replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
- jsTestLog("Testing dropping between listIndexes and find.");
- runDropTest({failPoint: "initialSyncHangCollectionClonerBeforeEstablishingCursor"});
-
- jsTestLog(
- "Testing dropping between listIndexes and find, with new same-name collection created.");
- runDropTest(
- {failPoint: "initialSyncHangCollectionClonerBeforeEstablishingCursor", createNew: true});
-
- jsTestLog("Testing drop-pending between getMore calls.");
- runDropTest({
- failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
- secondaryStartupParams: {collectionClonerBatchSize: 1},
- expectedLog:
- "`CollectionCloner ns: '${nss}' uuid: ${uuid} stopped because collection was dropped.`"
- });
-
- jsTestLog("Testing drop-pending with new same-name collection created, between getMore calls.");
- runDropTest({
- failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
- secondaryStartupParams: {collectionClonerBatchSize: 1},
- expectedLog:
- "`CollectionCloner ns: '${nss}' uuid: ${uuid} stopped because collection was dropped.`",
- createNew: true
- });
-
- jsTestLog("Testing committed drop between getMore calls.");
-
- // Add another node to the set, so when we drop the collection it can commit. This other
- // secondary will be finished with initial sync when the drop happens.
- var secondary2 = replTest.add({rsConfig: {priority: 0}});
- replTest.reInitiate();
- replTest.waitForState(secondary2, ReplSetTest.State.SECONDARY);
-
- runDropTest({
- failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
- secondaryStartupParams: {collectionClonerBatchSize: 1},
- waitForDrop: true,
- expectedLog:
- "`CollectionCloner ns: '${nss}' uuid: ${uuid} stopped because collection was dropped.`"
- });
-
- jsTestLog("Testing rename between getMores.");
- setupTest({
- failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
- secondaryStartupParams: {collectionClonerBatchSize: 1},
- });
- jsTestLog("Renaming collection on primary");
- assert.commandWorked(primary.adminCommand({
- renameCollection: primaryColl.getFullName(),
- to: pRenameColl.getFullName(),
- dropTarget: false
- }));
+ let res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
+ assert.eq(0, res.initialSyncStatus.failedInitialSyncAttempts);
- jsTestLog("Allowing secondary to continue.");
- // Make sure we don't reach the fassert() indicating initial sync failure.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangBeforeFinish", mode: 'alwaysOn'}));
-
- assert.commandWorked(secondary.adminCommand({
- configureFailPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
- mode: 'off'
- }));
- jsTestLog("Waiting for initial sync to complete.");
- checkLog.contains(secondary,
- "The maximum number of retries have been exhausted for initial sync.");
- replTest.stopSet();
+ if (createNew) {
+ assert.eq([{_id: "not the same collection"}], secondaryColl.find().toArray());
+ assert(primaryColl.drop());
+ } else {
+ assert.eq(0, secondaryColl.find().itcount());
+ }
+ replTest.checkReplicatedDataHashes();
+}
+
+function runDropTest(params) {
+ setupTest(params);
+ finishTest(params);
+}
+
+jsTestLog("Testing dropping between listIndexes and find.");
+runDropTest({failPoint: "initialSyncHangCollectionClonerBeforeEstablishingCursor"});
+
+jsTestLog("Testing dropping between listIndexes and find, with new same-name collection created.");
+runDropTest(
+ {failPoint: "initialSyncHangCollectionClonerBeforeEstablishingCursor", createNew: true});
+
+jsTestLog("Testing drop-pending between getMore calls.");
+runDropTest({
+ failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
+ secondaryStartupParams: {collectionClonerBatchSize: 1},
+ expectedLog:
+ "`CollectionCloner ns: '${nss}' uuid: ${uuid} stopped because collection was dropped.`"
+});
+
+jsTestLog("Testing drop-pending with new same-name collection created, between getMore calls.");
+runDropTest({
+ failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
+ secondaryStartupParams: {collectionClonerBatchSize: 1},
+ expectedLog:
+ "`CollectionCloner ns: '${nss}' uuid: ${uuid} stopped because collection was dropped.`",
+ createNew: true
+});
+
+jsTestLog("Testing committed drop between getMore calls.");
+
+// Add another node to the set, so when we drop the collection it can commit. This other
+// secondary will be finished with initial sync when the drop happens.
+var secondary2 = replTest.add({rsConfig: {priority: 0}});
+replTest.reInitiate();
+replTest.waitForState(secondary2, ReplSetTest.State.SECONDARY);
+
+runDropTest({
+ failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
+ secondaryStartupParams: {collectionClonerBatchSize: 1},
+ waitForDrop: true,
+ expectedLog:
+ "`CollectionCloner ns: '${nss}' uuid: ${uuid} stopped because collection was dropped.`"
+});
+
+jsTestLog("Testing rename between getMores.");
+setupTest({
+ failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
+ secondaryStartupParams: {collectionClonerBatchSize: 1},
+});
+jsTestLog("Renaming collection on primary");
+assert.commandWorked(primary.adminCommand({
+ renameCollection: primaryColl.getFullName(),
+ to: pRenameColl.getFullName(),
+ dropTarget: false
+}));
+
+jsTestLog("Allowing secondary to continue.");
+// Make sure we don't reach the fassert() indicating initial sync failure.
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "initialSyncHangBeforeFinish", mode: 'alwaysOn'}));
+
+assert.commandWorked(secondary.adminCommand({
+ configureFailPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
+ mode: 'off'
+}));
+jsTestLog("Waiting for initial sync to complete.");
+checkLog.contains(secondary, "The maximum number of retries have been exhausted for initial sync.");
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_during_stepdown.js b/jstests/replsets/initial_sync_during_stepdown.js
index 10e553a6c7e..d10575f6f2e 100644
--- a/jstests/replsets/initial_sync_during_stepdown.js
+++ b/jstests/replsets/initial_sync_during_stepdown.js
@@ -2,174 +2,170 @@
* Test that stepdown during collection cloning and oplog fetching does not interrupt initial sync.
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
-
- const testName = "initialSyncDuringStepDown";
- const dbName = testName;
- const collName = "testcoll";
-
- // Start a 3 node replica set to avoid primary step down after secondary restart.
- const rst = new ReplSetTest({
- nodes: [{}, {rsConfig: {priority: 0}}, {arbiter: true}],
- settings: {chainingAllowed: false}
- });
- rst.startSet();
- rst.initiate();
-
- var primary = rst.getPrimary();
- var primaryDB = primary.getDB(dbName);
- var primaryAdmin = primary.getDB("admin");
- var primaryColl = primaryDB[collName];
- var secondary = rst.getSecondary();
- var secondaryDB = secondary.getDB(dbName);
- var secondaryColl = secondaryDB[collName];
- var dbNss = primaryDB.getName();
- var collNss = primaryColl.getFullName();
-
- function setupTest({
- failPoint,
- nss: nss = '',
- nssSuffix: nssSuffix = '',
- secondaryStartupParams: secondaryStartupParams = {}
- }) {
- jsTestLog("Writing data to collection.");
- assert.writeOK(primaryColl.insert([{_id: 1}, {_id: 2}]));
-
- jsTestLog("Stopping secondary.");
- rst.stop(secondary);
-
- jsTestLog("Enabling failpoint '" + failPoint + "' on primary (sync source).");
- assert.commandWorked(primary.adminCommand({
- configureFailPoint: failPoint,
- data: {nss: nss + nssSuffix, shouldCheckForInterrupt: true, shouldNotdropLock: true},
- mode: "alwaysOn"
- }));
-
- jsTestLog("Starting secondary.");
- secondaryStartupParams['numInitialSyncAttempts'] = 1;
- rst.start(secondary, {startClean: true, setParameter: secondaryStartupParams});
-
- // Wait until secondary reaches RS_STARTUP2 state.
- rst.waitForState(secondary, ReplSetTest.State.STARTUP_2);
- }
-
- function finishTest(
- {failPoint, nss: nss = '', DocsCopiedByOplogFetcher: DocsCopiedByOplogFetcher = 0}) {
- jsTestLog("Waiting for primary to reach failPoint '" + failPoint + "'.");
- waitForCurOpByFailPoint(primaryAdmin, new RegExp('^' + nss), failPoint);
-
- jsTestLog("Making primary step down");
- const joinStepDownThread = startParallelShell(() => {
- assert.commandWorked(db.adminCommand({"replSetStepDown": 30 * 60, "force": true}));
- }, primary.port);
-
- // Wait until the step down has started to kill user operations.
- checkLog.contains(primary, "Starting to kill user operations");
-
- jsTestLog("Allowing initial sync to continue.");
- assert.commandWorked(
- primaryAdmin.adminCommand({configureFailPoint: failPoint, mode: 'off'}));
-
- jsTestLog("Waiting for initial sync to complete.");
- rst.waitForState(secondary, ReplSetTest.State.SECONDARY);
-
- // Wait until the primary transitioned to SECONDARY state.
- joinStepDownThread();
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- jsTestLog("Validating initial sync data.");
- let res =
- assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
- assert.eq(0, res.initialSyncStatus.failedInitialSyncAttempts);
- assert.eq(2 + DocsCopiedByOplogFetcher, secondaryColl.find().itcount());
-
- // As checkReplicatedDataHashes requires primary to validate the cloned data, we need to
- // unfreeze the old primary and make it re-elected.
- assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
- rst.getPrimary();
- rst.checkReplicatedDataHashes();
-
- jsTestLog("Dropping collection '" + collName + "'.");
- assert(primaryColl.drop());
- }
-
- function runStepDownTest(params) {
- setupTest(params);
- finishTest(params);
- }
-
- jsTestLog("Testing stepdown while 'databases' cloner lists databases.");
- runStepDownTest({failPoint: "hangBeforeListDatabases"});
-
- jsTestLog("Testing stepdown while 'database' cloner lists collections.");
- runStepDownTest(
- {failPoint: "hangBeforeListCollections", nss: dbNss, nssSuffix: ".$cmd.listCollections"});
-
- jsTestLog("Testing stepdown while 'collection' cloner performs collection count.");
- runStepDownTest({failPoint: "hangBeforeCollectionCount", nss: collNss});
-
- jsTestLog("Testing stepdown while 'collection' cloner list indexes for a collection.");
- runStepDownTest({failPoint: "hangBeforeListIndexes", nss: collNss});
-
- jsTestLog("Testing stepdown while 'collection' cloner clones collection data.");
- runStepDownTest({failPoint: "waitInFindBeforeMakingBatch", nss: collNss});
-
- jsTestLog("Testing stepdown between collection data batches.");
- runStepDownTest({
- failPoint: "waitWithPinnedCursorDuringGetMoreBatch",
- nss: collNss,
- secondaryStartupParams: {collectionClonerBatchSize: 1}
- });
-
- // Restart secondary with "oplogFetcherInitialSyncMaxFetcherRestarts"
- // set to zero to avoid masking the oplog fetcher error and enable fail point
- // "waitAfterPinningCursorBeforeGetMoreBatch" which drops and reacquires read lock
- // to prevent deadlock between getmore and insert thread for ephemeral storage
- // engine.
- jsTestLog("Testing stepdown during oplog fetching");
- const oplogNss = "local.oplog.rs";
- setupTest({
- failPoint: "waitAfterPinningCursorBeforeGetMoreBatch",
- nss: oplogNss,
- secondaryStartupParams: {
- initialSyncOplogFetcherBatchSize: 1,
- oplogFetcherInitialSyncMaxFetcherRestarts: 0,
- "failpoint.initialSyncHangAfterDataCloning": tojson({mode: 'alwaysOn'})
- }
- });
-
- jsTestLog("Waiting for collection cloning to complete.");
- checkLog.contains(secondary, "initialSyncHangAfterDataCloning fail point enabled");
-
- // Insert more data so that these are replicated to secondary node via oplog fetcher.
- jsTestLog("Inserting more data on primary.");
- assert.writeOK(primaryColl.insert([{_id: 3}, {_id: 4}]));
-
- // Insert is successful. So, enable fail point "waitWithPinnedCursorDuringGetMoreBatch"
- // such that it doesn't drop locks when getmore cmd waits inside the fail point block.
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
+
+const testName = "initialSyncDuringStepDown";
+const dbName = testName;
+const collName = "testcoll";
+
+// Start a 3 node replica set to avoid primary step down after secondary restart.
+const rst = new ReplSetTest(
+ {nodes: [{}, {rsConfig: {priority: 0}}, {arbiter: true}], settings: {chainingAllowed: false}});
+rst.startSet();
+rst.initiate();
+
+var primary = rst.getPrimary();
+var primaryDB = primary.getDB(dbName);
+var primaryAdmin = primary.getDB("admin");
+var primaryColl = primaryDB[collName];
+var secondary = rst.getSecondary();
+var secondaryDB = secondary.getDB(dbName);
+var secondaryColl = secondaryDB[collName];
+var dbNss = primaryDB.getName();
+var collNss = primaryColl.getFullName();
+
+function setupTest({
+ failPoint,
+ nss: nss = '',
+ nssSuffix: nssSuffix = '',
+ secondaryStartupParams: secondaryStartupParams = {}
+}) {
+ jsTestLog("Writing data to collection.");
+ assert.writeOK(primaryColl.insert([{_id: 1}, {_id: 2}]));
+
+ jsTestLog("Stopping secondary.");
+ rst.stop(secondary);
+
+ jsTestLog("Enabling failpoint '" + failPoint + "' on primary (sync source).");
assert.commandWorked(primary.adminCommand({
- configureFailPoint: "waitWithPinnedCursorDuringGetMoreBatch",
- data: {nss: oplogNss, shouldCheckForInterrupt: true, shouldNotdropLock: true},
+ configureFailPoint: failPoint,
+ data: {nss: nss + nssSuffix, shouldCheckForInterrupt: true, shouldNotdropLock: true},
mode: "alwaysOn"
}));
- // Now, disable fail point "waitAfterPinningCursorBeforeGetMoreBatch" to allow getmore to
- // continue and hang on "waitWithPinnedCursorDuringGetMoreBatch" fail point.
- assert.commandWorked(primary.adminCommand(
- {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "off"}));
-
- // Disable fail point on secondary to allow initial sync to continue.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangAfterDataCloning", mode: "off"}));
-
- finishTest({
- failPoint: "waitWithPinnedCursorDuringGetMoreBatch",
- nss: "local.oplog.rs",
- DocsCopiedByOplogFetcher: 2
- });
-
- rst.stopSet();
+ jsTestLog("Starting secondary.");
+ secondaryStartupParams['numInitialSyncAttempts'] = 1;
+ rst.start(secondary, {startClean: true, setParameter: secondaryStartupParams});
+
+ // Wait until secondary reaches RS_STARTUP2 state.
+ rst.waitForState(secondary, ReplSetTest.State.STARTUP_2);
+}
+
+function finishTest(
+ {failPoint, nss: nss = '', DocsCopiedByOplogFetcher: DocsCopiedByOplogFetcher = 0}) {
+ jsTestLog("Waiting for primary to reach failPoint '" + failPoint + "'.");
+ waitForCurOpByFailPoint(primaryAdmin, new RegExp('^' + nss), failPoint);
+
+ jsTestLog("Making primary step down");
+ const joinStepDownThread = startParallelShell(() => {
+ assert.commandWorked(db.adminCommand({"replSetStepDown": 30 * 60, "force": true}));
+ }, primary.port);
+
+ // Wait until the step down has started to kill user operations.
+ checkLog.contains(primary, "Starting to kill user operations");
+
+ jsTestLog("Allowing initial sync to continue.");
+ assert.commandWorked(primaryAdmin.adminCommand({configureFailPoint: failPoint, mode: 'off'}));
+
+ jsTestLog("Waiting for initial sync to complete.");
+ rst.waitForState(secondary, ReplSetTest.State.SECONDARY);
+
+ // Wait until the primary transitioned to SECONDARY state.
+ joinStepDownThread();
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+ jsTestLog("Validating initial sync data.");
+ let res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
+ assert.eq(0, res.initialSyncStatus.failedInitialSyncAttempts);
+ assert.eq(2 + DocsCopiedByOplogFetcher, secondaryColl.find().itcount());
+
+ // As checkReplicatedDataHashes requires primary to validate the cloned data, we need to
+ // unfreeze the old primary and make it re-elected.
+ assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
+ rst.getPrimary();
+ rst.checkReplicatedDataHashes();
+
+ jsTestLog("Dropping collection '" + collName + "'.");
+ assert(primaryColl.drop());
+}
+
+function runStepDownTest(params) {
+ setupTest(params);
+ finishTest(params);
+}
+
+jsTestLog("Testing stepdown while 'databases' cloner lists databases.");
+runStepDownTest({failPoint: "hangBeforeListDatabases"});
+
+jsTestLog("Testing stepdown while 'database' cloner lists collections.");
+runStepDownTest(
+ {failPoint: "hangBeforeListCollections", nss: dbNss, nssSuffix: ".$cmd.listCollections"});
+
+jsTestLog("Testing stepdown while 'collection' cloner performs collection count.");
+runStepDownTest({failPoint: "hangBeforeCollectionCount", nss: collNss});
+
+jsTestLog("Testing stepdown while 'collection' cloner list indexes for a collection.");
+runStepDownTest({failPoint: "hangBeforeListIndexes", nss: collNss});
+
+jsTestLog("Testing stepdown while 'collection' cloner clones collection data.");
+runStepDownTest({failPoint: "waitInFindBeforeMakingBatch", nss: collNss});
+
+jsTestLog("Testing stepdown between collection data batches.");
+runStepDownTest({
+ failPoint: "waitWithPinnedCursorDuringGetMoreBatch",
+ nss: collNss,
+ secondaryStartupParams: {collectionClonerBatchSize: 1}
+});
+
+// Restart secondary with "oplogFetcherInitialSyncMaxFetcherRestarts"
+// set to zero to avoid masking the oplog fetcher error and enable fail point
+// "waitAfterPinningCursorBeforeGetMoreBatch" which drops and reacquires read lock
+// to prevent deadlock between getmore and insert thread for ephemeral storage
+// engine.
+jsTestLog("Testing stepdown during oplog fetching");
+const oplogNss = "local.oplog.rs";
+setupTest({
+ failPoint: "waitAfterPinningCursorBeforeGetMoreBatch",
+ nss: oplogNss,
+ secondaryStartupParams: {
+ initialSyncOplogFetcherBatchSize: 1,
+ oplogFetcherInitialSyncMaxFetcherRestarts: 0,
+ "failpoint.initialSyncHangAfterDataCloning": tojson({mode: 'alwaysOn'})
+ }
+});
+
+jsTestLog("Waiting for collection cloning to complete.");
+checkLog.contains(secondary, "initialSyncHangAfterDataCloning fail point enabled");
+
+// Insert more data so that these are replicated to secondary node via oplog fetcher.
+jsTestLog("Inserting more data on primary.");
+assert.writeOK(primaryColl.insert([{_id: 3}, {_id: 4}]));
+
+// Insert is successful. So, enable fail point "waitWithPinnedCursorDuringGetMoreBatch"
+// such that it doesn't drop locks when getmore cmd waits inside the fail point block.
+assert.commandWorked(primary.adminCommand({
+ configureFailPoint: "waitWithPinnedCursorDuringGetMoreBatch",
+ data: {nss: oplogNss, shouldCheckForInterrupt: true, shouldNotdropLock: true},
+ mode: "alwaysOn"
+}));
+
+// Now, disable fail point "waitAfterPinningCursorBeforeGetMoreBatch" to allow getmore to
+// continue and hang on "waitWithPinnedCursorDuringGetMoreBatch" fail point.
+assert.commandWorked(primary.adminCommand(
+ {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "off"}));
+
+// Disable fail point on secondary to allow initial sync to continue.
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "initialSyncHangAfterDataCloning", mode: "off"}));
+
+finishTest({
+ failPoint: "waitWithPinnedCursorDuringGetMoreBatch",
+ nss: "local.oplog.rs",
+ DocsCopiedByOplogFetcher: 2
+});
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_fail_insert_once.js b/jstests/replsets/initial_sync_fail_insert_once.js
index d85f97f5a63..0a1f0a11a8a 100644
--- a/jstests/replsets/initial_sync_fail_insert_once.js
+++ b/jstests/replsets/initial_sync_fail_insert_once.js
@@ -5,33 +5,33 @@
*/
(function() {
- var name = 'initial_sync_fail_insert_once';
- var replSet = new ReplSetTest(
- {name: name, nodes: 2, nodeOptions: {setParameter: "numInitialSyncAttempts=3"}});
+var name = 'initial_sync_fail_insert_once';
+var replSet = new ReplSetTest(
+ {name: name, nodes: 2, nodeOptions: {setParameter: "numInitialSyncAttempts=3"}});
- replSet.startSet();
- replSet.initiate();
- var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
+replSet.startSet();
+replSet.initiate();
+var primary = replSet.getPrimary();
+var secondary = replSet.getSecondary();
- var coll = primary.getDB('test').getCollection(name);
- assert.writeOK(coll.insert({_id: 0, x: 1}, {writeConcern: {w: 2}}));
+var coll = primary.getDB('test').getCollection(name);
+assert.writeOK(coll.insert({_id: 0, x: 1}, {writeConcern: {w: 2}}));
- jsTest.log("Enabling Failpoint failCollectionInserts on " + tojson(secondary));
- assert.commandWorked(secondary.getDB("admin").adminCommand({
- configureFailPoint: "failCollectionInserts",
- mode: {times: 2},
- data: {collectionNS: coll.getFullName()}
- }));
+jsTest.log("Enabling Failpoint failCollectionInserts on " + tojson(secondary));
+assert.commandWorked(secondary.getDB("admin").adminCommand({
+ configureFailPoint: "failCollectionInserts",
+ mode: {times: 2},
+ data: {collectionNS: coll.getFullName()}
+}));
- jsTest.log("Re-syncing " + tojson(secondary));
- secondary = replSet.restart(secondary, {startClean: true});
- replSet.awaitReplication();
- replSet.awaitSecondaryNodes();
+jsTest.log("Re-syncing " + tojson(secondary));
+secondary = replSet.restart(secondary, {startClean: true});
+replSet.awaitReplication();
+replSet.awaitSecondaryNodes();
- assert.eq(1, secondary.getDB("test")[name].count());
- assert.docEq({_id: 0, x: 1}, secondary.getDB("test")[name].findOne());
+assert.eq(1, secondary.getDB("test")[name].count());
+assert.docEq({_id: 0, x: 1}, secondary.getDB("test")[name].findOne());
- jsTest.log("Stopping repl set test; finished.");
- replSet.stopSet();
+jsTest.log("Stopping repl set test; finished.");
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_fcv.js b/jstests/replsets/initial_sync_fcv.js
index 01d2e8d9536..5c8d37a4bdf 100644
--- a/jstests/replsets/initial_sync_fcv.js
+++ b/jstests/replsets/initial_sync_fcv.js
@@ -5,86 +5,84 @@
*/
(function() {
- 'use strict';
-
- load("jstests/libs/feature_compatibility_version.js");
- load('jstests/libs/check_log.js');
-
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
-
- // We disallow the secondary node from voting so that the primary's featureCompatibilityVersion
- // can be modified while the secondary node is still waiting to complete its initial sync.
- const replSetConfig = rst.getReplSetConfig();
- replSetConfig.members[1].priority = 0;
- replSetConfig.members[1].votes = 0;
- rst.initiate(replSetConfig);
-
- const primary = rst.getPrimary();
- const dbName = 'foo';
- const collName = 'bar';
-
- assert.writeOK(primary.getDB(dbName).getCollection(collName).insert({a: 1}));
-
- function runInitialSync(cmd, initialFCV) {
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: initialFCV}));
-
- jsTestLog('Testing setting fCV with ' + tojson(cmd));
-
- const failPointOptions = tojson({mode: 'alwaysOn', data: {database: dbName}});
- rst.restart(1, {
- startClean: true,
- setParameter: {
- 'failpoint.initialSyncHangBeforeListCollections': failPointOptions,
- numInitialSyncAttempts: 2
- }
- });
- const secondary = rst.nodes[1];
-
- // Initial sync clones the 'admin' database first, which will set the fCV on the
- // secondary to initialFCV. We then block the secondary before issuing 'listCollections' on
- // the test database.
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeListCollections fail point enabled');
-
- // Initial sync is stopped right before 'listCollections' on the test database. We now run
- // the test command to modify the fCV.
- assert.commandWorked(primary.adminCommand(cmd));
-
- // Let initial sync finish, making sure that it fails due to the feature compatibility
- // version change.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'initialSyncHangBeforeListCollections', mode: 'off'}));
- checkLog.contains(secondary,
- 'Applying operation on feature compatibility version document');
-
- jsTestLog('Wait for both nodes to be up-to-date');
- rst.awaitSecondaryNodes();
- rst.awaitReplication();
-
- let res =
- assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
- assert.eq(res.initialSyncStatus.failedInitialSyncAttempts, 1);
-
- // We check oplogs and data hashes before we restart the second node.
- rst.checkOplogs();
- rst.checkReplicatedDataHashes();
- }
-
- // Ensure that attempting to downgrade the featureCompatibilityVersion during initial sync
- // fails.
- runInitialSync({setFeatureCompatibilityVersion: lastStableFCV}, /*initialFCV*/ latestFCV);
-
- // Ensure that attempting to upgrade the featureCompatibilityVersion during initial sync fails.
- runInitialSync({setFeatureCompatibilityVersion: latestFCV}, /*initialFCV*/ lastStableFCV);
-
- // Modifications to the featureCompatibilityVersion document during initial sync should be
- // caught and cause initial sync to fail.
- runInitialSync({
- update: 'system.version',
- updates: [{q: {_id: 'featureCompatibilityVersion'}, u: {'version': lastStableFCV}}]
- },
- /*initialFCV*/ latestFCV);
-
- rst.stopSet();
+'use strict';
+
+load("jstests/libs/feature_compatibility_version.js");
+load('jstests/libs/check_log.js');
+
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+
+// We disallow the secondary node from voting so that the primary's featureCompatibilityVersion
+// can be modified while the secondary node is still waiting to complete its initial sync.
+const replSetConfig = rst.getReplSetConfig();
+replSetConfig.members[1].priority = 0;
+replSetConfig.members[1].votes = 0;
+rst.initiate(replSetConfig);
+
+const primary = rst.getPrimary();
+const dbName = 'foo';
+const collName = 'bar';
+
+assert.writeOK(primary.getDB(dbName).getCollection(collName).insert({a: 1}));
+
+function runInitialSync(cmd, initialFCV) {
+ assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: initialFCV}));
+
+ jsTestLog('Testing setting fCV with ' + tojson(cmd));
+
+ const failPointOptions = tojson({mode: 'alwaysOn', data: {database: dbName}});
+ rst.restart(1, {
+ startClean: true,
+ setParameter: {
+ 'failpoint.initialSyncHangBeforeListCollections': failPointOptions,
+ numInitialSyncAttempts: 2
+ }
+ });
+ const secondary = rst.nodes[1];
+
+ // Initial sync clones the 'admin' database first, which will set the fCV on the
+ // secondary to initialFCV. We then block the secondary before issuing 'listCollections' on
+ // the test database.
+ checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeListCollections fail point enabled');
+
+ // Initial sync is stopped right before 'listCollections' on the test database. We now run
+ // the test command to modify the fCV.
+ assert.commandWorked(primary.adminCommand(cmd));
+
+ // Let initial sync finish, making sure that it fails due to the feature compatibility
+ // version change.
+ assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: 'initialSyncHangBeforeListCollections', mode: 'off'}));
+ checkLog.contains(secondary, 'Applying operation on feature compatibility version document');
+
+ jsTestLog('Wait for both nodes to be up-to-date');
+ rst.awaitSecondaryNodes();
+ rst.awaitReplication();
+
+ let res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
+ assert.eq(res.initialSyncStatus.failedInitialSyncAttempts, 1);
+
+ // We check oplogs and data hashes before we restart the second node.
+ rst.checkOplogs();
+ rst.checkReplicatedDataHashes();
+}
+
+// Ensure that attempting to downgrade the featureCompatibilityVersion during initial sync
+// fails.
+runInitialSync({setFeatureCompatibilityVersion: lastStableFCV}, /*initialFCV*/ latestFCV);
+
+// Ensure that attempting to upgrade the featureCompatibilityVersion during initial sync fails.
+runInitialSync({setFeatureCompatibilityVersion: latestFCV}, /*initialFCV*/ lastStableFCV);
+
+// Modifications to the featureCompatibilityVersion document during initial sync should be
+// caught and cause initial sync to fail.
+runInitialSync({
+ update: 'system.version',
+ updates: [{q: {_id: 'featureCompatibilityVersion'}, u: {'version': lastStableFCV}}]
+},
+ /*initialFCV*/ latestFCV);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp.js b/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp.js
index a15f5290ea6..eea0ebfab20 100644
--- a/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp.js
+++ b/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp.js
@@ -14,192 +14,194 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
-
- const config = replTest.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while the
- // secondary is restarting.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- replTest.initiate(config);
-
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
-
- const dbName = "test";
- const collName = "initial_sync_fetch_from_oldest_active_transaction_timestamp";
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
-
- assert.commandWorked(testColl.insert({_id: 1}));
-
- jsTestLog("Preparing a transaction that will later be committed");
-
- const session1 = primary.startSession();
- const sessionDB1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDB1.getCollection(collName);
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({_id: 2}));
- const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
-
- jsTestLog("Preparing a transaction that will later be the oldest active transaction");
-
- // Prepare a transaction so that there is an active transaction with an oplog entry. The
- // timestamp of the first oplog entry of this transaction will become the beginFetchingTimestamp
- // during initial sync.
- let session2 = primary.startSession();
- let sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
- session2.startTransaction();
- assert.commandWorked(sessionColl2.update({_id: 1}, {_id: 1, a: 1}));
- let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
-
- const lsid2 = session2.getSessionId();
- const txnNumber2 = session2.getTxnNumber_forTesting();
-
- const oplog = primary.getDB("local").getCollection("oplog.rs");
- const txnNum = session2.getTxnNumber_forTesting();
- const op = oplog.findOne({"txnNumber": txnNum, "lsid.id": session2.getSessionId().id});
- assert.neq(op, null);
- const beginFetchingTs = op.ts;
- jsTestLog("Expected beginFetchingTimestamp: " + beginFetchingTs);
-
- // Commit the first transaction so that we have an operation that is fetched during initial sync
- // but should not be applied. If this is applied, initial sync will fail because while trying to
- // apply the commitTransaction oplog entry, it will fail to get the prepare oplog
- // entry since its optime is before the beginFetchingTimestamp. Doing another operation will
- // also cause the beginApplyingTimestamp to be different from the beginFetchingTimestamp. Note
- // that since the beginApplyingTimestamp is the timestamp after which operations are applied
- // during initial sync, this commitTransaction will not be applied.
- const beginApplyingTimestamp =
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1))
- .operationTime;
-
- jsTestLog("beginApplyingTimestamp: " + beginApplyingTimestamp);
-
- // Restart the secondary with startClean set to true so that it goes through initial sync. Also
- // restart the node with a failpoint turned on that will pause initial sync after the secondary
- // has copied {_id: 1} and {_id: 2}. This way we can insert more documents when initial sync is
- // paused and know that they won't be copied during collection cloning but instead must be
- // applied during oplog application.
- replTest.stop(secondary,
- // signal
- undefined,
- // Validation would encounter a prepare conflict on the open transaction.
- {skipValidation: true});
- secondary = replTest.start(
- secondary,
- {
- startClean: true,
- setParameter: {
- 'failpoint.initialSyncHangDuringCollectionClone': tojson(
- {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 2}}),
- 'numInitialSyncAttempts': 1
- }
- },
- true /* wait */);
-
- jsTestLog("Secondary was restarted");
-
- // Wait for failpoint to be reached so we know that collection cloning is paused.
- checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
-
- jsTestLog("Running operations while collection cloning is paused");
-
- // Run some operations on the sync source while collection cloning is paused so that we know
- // they must be applied during the oplog application stage of initial sync. This will also make
- // sure that the beginApplyingTimestamp and the stopTimestamp in initial sync are different. The
- // stopTimestamp is the timestamp of the oplog entry that was last applied on the sync source
- // when the oplog application phase of initial sync begins.
- const stopTimestamp =
- assert.commandWorked(testColl.runCommand("insert", {documents: [{_id: 4}]})).operationTime;
-
- jsTestLog("stopTimestamp: " + stopTimestamp);
-
- // Resume initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
-
- jsTestLog("Initial sync resumed");
-
- // Wait for the secondary to complete initial sync.
- replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
- replTest.awaitReplication();
-
- jsTestLog("Initial sync completed");
-
- // Make sure the secondary fetched enough transaction oplog entries.
- secondary.setSlaveOk();
- const secondaryOplog = secondary.getDB("local").getCollection("oplog.rs");
- assert.eq(secondaryOplog.find({"ts": beginFetchingTs}).itcount(), 1);
-
- // Make sure the first transaction committed properly and is reflected after the initial sync.
- let res = secondary.getDB(dbName).getCollection(collName).findOne({_id: 2});
- assert.docEq(res, {_id: 2}, res);
-
- jsTestLog("Stepping up the secondary");
-
- // Step up the secondary after initial sync is done and make sure the transaction is properly
- // prepared.
- replTest.stepUp(secondary);
- replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
- const newPrimary = replTest.getPrimary();
- testDB = newPrimary.getDB(dbName);
- testColl = testDB.getCollection(collName);
-
- // Force the second session to use the same lsid and txnNumber as from before the restart. This
- // ensures that we're working with the same session and transaction.
- session2 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid2);
- session2.setTxnNumber_forTesting(txnNumber2);
- sessionDB2 = session2.getDatabase(dbName);
-
- jsTestLog("Checking that the second transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the second prepared transaction
- // after initial sync.
- assert.eq(testColl.find({_id: 1}).toArray(), [{_id: 1}]);
-
- // Make sure that another write on the same document from the second transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Make sure that we cannot add other operations to the second transaction since it is prepared.
- assert.commandFailedWithCode(sessionDB2.runCommand({
- insert: collName,
- documents: [{_id: 3}],
- txnNumber: NumberLong(txnNumber2),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Committing the second transaction");
-
- // Make sure we can successfully commit the second transaction after recovery.
- assert.commandWorked(sessionDB2.adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTimestamp2,
- txnNumber: NumberLong(txnNumber2),
- autocommit: false
- }));
- assert.eq(testColl.find({_id: 1}).toArray(), [{_id: 1, a: 1}]);
-
- jsTestLog("Attempting to run another transaction");
-
- // Make sure that we can run another conflicting transaction without any problems.
- session2.startTransaction();
- assert.commandWorked(sessionDB2[collName].update({_id: 1}, {_id: 1, a: 2}));
- prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
- assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
- assert.eq(testColl.findOne({_id: 1}), {_id: 1, a: 2});
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+
+const config = replTest.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while the
+// secondary is restarting.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+replTest.initiate(config);
+
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+
+const dbName = "test";
+const collName = "initial_sync_fetch_from_oldest_active_transaction_timestamp";
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
+
+assert.commandWorked(testColl.insert({_id: 1}));
+
+jsTestLog("Preparing a transaction that will later be committed");
+
+const session1 = primary.startSession();
+const sessionDB1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDB1.getCollection(collName);
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({_id: 2}));
+const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
+
+jsTestLog("Preparing a transaction that will later be the oldest active transaction");
+
+// Prepare a transaction so that there is an active transaction with an oplog entry. The
+// timestamp of the first oplog entry of this transaction will become the beginFetchingTimestamp
+// during initial sync.
+let session2 = primary.startSession();
+let sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
+session2.startTransaction();
+assert.commandWorked(sessionColl2.update({_id: 1}, {_id: 1, a: 1}));
+let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+
+const lsid2 = session2.getSessionId();
+const txnNumber2 = session2.getTxnNumber_forTesting();
+
+const oplog = primary.getDB("local").getCollection("oplog.rs");
+const txnNum = session2.getTxnNumber_forTesting();
+const op = oplog.findOne({"txnNumber": txnNum, "lsid.id": session2.getSessionId().id});
+assert.neq(op, null);
+const beginFetchingTs = op.ts;
+jsTestLog("Expected beginFetchingTimestamp: " + beginFetchingTs);
+
+// Commit the first transaction so that we have an operation that is fetched during initial sync
+// but should not be applied. If this is applied, initial sync will fail because while trying to
+// apply the commitTransaction oplog entry, it will fail to get the prepare oplog
+// entry since its optime is before the beginFetchingTimestamp. Doing another operation will
+// also cause the beginApplyingTimestamp to be different from the beginFetchingTimestamp. Note
+// that since the beginApplyingTimestamp is the timestamp after which operations are applied
+// during initial sync, this commitTransaction will not be applied.
+const beginApplyingTimestamp =
+ assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1))
+ .operationTime;
+
+jsTestLog("beginApplyingTimestamp: " + beginApplyingTimestamp);
+
+// Restart the secondary with startClean set to true so that it goes through initial sync. Also
+// restart the node with a failpoint turned on that will pause initial sync after the secondary
+// has copied {_id: 1} and {_id: 2}. This way we can insert more documents when initial sync is
+// paused and know that they won't be copied during collection cloning but instead must be
+// applied during oplog application.
+replTest.stop(secondary,
+ // signal
+ undefined,
+ // Validation would encounter a prepare conflict on the open transaction.
+ {skipValidation: true});
+secondary = replTest.start(
+ secondary,
+ {
+ startClean: true,
+ setParameter: {
+ 'failpoint.initialSyncHangDuringCollectionClone': tojson(
+ {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 2}}),
+ 'numInitialSyncAttempts': 1
+ }
+ },
+ true /* wait */);
+
+jsTestLog("Secondary was restarted");
+
+// Wait for failpoint to be reached so we know that collection cloning is paused.
+checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
+
+jsTestLog("Running operations while collection cloning is paused");
+
+// Run some operations on the sync source while collection cloning is paused so that we know
+// they must be applied during the oplog application stage of initial sync. This will also make
+// sure that the beginApplyingTimestamp and the stopTimestamp in initial sync are different. The
+// stopTimestamp is the timestamp of the oplog entry that was last applied on the sync source
+// when the oplog application phase of initial sync begins.
+const stopTimestamp =
+ assert.commandWorked(testColl.runCommand("insert", {documents: [{_id: 4}]})).operationTime;
+
+jsTestLog("stopTimestamp: " + stopTimestamp);
+
+// Resume initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
+
+jsTestLog("Initial sync resumed");
+
+// Wait for the secondary to complete initial sync.
+replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
+replTest.awaitReplication();
+
+jsTestLog("Initial sync completed");
+
+// Make sure the secondary fetched enough transaction oplog entries.
+secondary.setSlaveOk();
+const secondaryOplog = secondary.getDB("local").getCollection("oplog.rs");
+assert.eq(secondaryOplog.find({"ts": beginFetchingTs}).itcount(), 1);
+
+// Make sure the first transaction committed properly and is reflected after the initial sync.
+let res = secondary.getDB(dbName).getCollection(collName).findOne({_id: 2});
+assert.docEq(res, {_id: 2}, res);
+
+jsTestLog("Stepping up the secondary");
+
+// Step up the secondary after initial sync is done and make sure the transaction is properly
+// prepared.
+replTest.stepUp(secondary);
+replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
+const newPrimary = replTest.getPrimary();
+testDB = newPrimary.getDB(dbName);
+testColl = testDB.getCollection(collName);
+
+// Force the second session to use the same lsid and txnNumber as from before the restart. This
+// ensures that we're working with the same session and transaction.
+session2 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid2);
+session2.setTxnNumber_forTesting(txnNumber2);
+sessionDB2 = session2.getDatabase(dbName);
+
+jsTestLog("Checking that the second transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the second prepared transaction
+// after initial sync.
+assert.eq(testColl.find({_id: 1}).toArray(), [{_id: 1}]);
+
+// Make sure that another write on the same document from the second transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Make sure that we cannot add other operations to the second transaction since it is prepared.
+assert.commandFailedWithCode(sessionDB2.runCommand({
+ insert: collName,
+ documents: [{_id: 3}],
+ txnNumber: NumberLong(txnNumber2),
+ stmtId: NumberInt(2),
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Committing the second transaction");
+
+// Make sure we can successfully commit the second transaction after recovery.
+assert.commandWorked(sessionDB2.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTimestamp2,
+ txnNumber: NumberLong(txnNumber2),
+ autocommit: false
+}));
+assert.eq(testColl.find({_id: 1}).toArray(), [{_id: 1, a: 1}]);
+
+jsTestLog("Attempting to run another transaction");
+
+// Make sure that we can run another conflicting transaction without any problems.
+session2.startTransaction();
+assert.commandWorked(sessionDB2[collName].update({_id: 1}, {_id: 1, a: 2}));
+prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
+assert.eq(testColl.findOne({_id: 1}), {_id: 1, a: 2});
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application.js b/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application.js
index 898d1303383..9a4ed0368a5 100644
--- a/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application.js
+++ b/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application.js
@@ -18,100 +18,98 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]});
- replTest.startSet();
- replTest.initiate();
-
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
-
- const dbName = "test";
- const collName =
- "initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- assert.commandWorked(testColl.insert({_id: 1}));
-
- jsTestLog("Preparing a transaction that will later be committed");
-
- const session1 = primary.startSession({causalConsistency: false});
- const sessionDB1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDB1.getCollection(collName);
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({_id: 2}));
- const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
-
- jsTestLog("Preparing a transaction that will be the oldest active transaction");
-
- // Prepare a transaction so that there is an active transaction with an oplog entry. The
- // timestamp of the first oplog entry of this transaction will become the beginFetchingTimestamp
- // during initial sync.
- const session2 = primary.startSession({causalConsistency: false});
- const sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({_id: 3}));
- const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
-
- const oplog = primary.getDB("local").getCollection("oplog.rs");
- const txnNum = session2.getTxnNumber_forTesting();
- const op = oplog.findOne({"txnNumber": txnNum, "lsid.id": session2.getSessionId().id});
- assert.neq(op, null);
- const beginFetchingTs = op.ts;
- jsTestLog("Expected beginFetchingTimestamp: " + beginFetchingTs);
-
- // Commit the first transaction so that we have an operation that is fetched during initial sync
- // but should not be applied. If this is applied, initial sync will fail because while trying to
- // apply the commitTransaction oplog entry, it will fail to get the prepareTransaction oplog
- // entry since its optime is before the beginFetchingTimestamp. Doing another operation will
- // also cause the beginApplyingTimestamp to be different from the beginFetchingTimestamp. Note
- // that since the beginApplyingTimestamp is the timestamp after which operations are applied
- // during initial sync, this commitTransaction will not be applied.
- const beginApplyingTimestamp =
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1))
- .operationTime;
-
- jsTestLog("beginApplyingTimestamp/stopTimestamp: " + beginApplyingTimestamp);
-
- // Restart the secondary with startClean set to true so that it goes through initial sync. Since
- // we won't be running any operations during collection cloning, the beginApplyingTimestamp and
- // stopTimestamp should be the same.
- replTest.stop(secondary,
- // signal
- undefined,
- // Validation would encounter a prepare conflict on the open transaction.
- {skipValidation: true});
- secondary = replTest.start(secondary,
- {startClean: true, setParameter: {'numInitialSyncAttempts': 1}},
- true /* wait */);
- replTest.awaitSecondaryNodes();
- replTest.awaitReplication();
-
- jsTestLog("Secondary was restarted");
-
- // Wait for the secondary to complete initial sync.
- replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
-
- jsTestLog("Initial sync completed");
-
- // Make sure the secondary fetched enough transaction oplog entries.
- secondary.setSlaveOk();
- const secondaryOplog = secondary.getDB("local").getCollection("oplog.rs");
- assert.eq(secondaryOplog.find({"ts": beginFetchingTs}).itcount(), 1);
-
- // Make sure the first transaction committed properly and is reflected after the initial sync.
- let res = secondary.getDB(dbName).getCollection(collName).findOne({_id: 2});
- assert.docEq(res, {_id: 2}, res);
-
- jsTestLog("Aborting the second transaction");
-
- assert.commandWorked(session2.abortTransaction_forTesting());
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]});
+replTest.startSet();
+replTest.initiate();
+
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+
+const dbName = "test";
+const collName = "initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+assert.commandWorked(testColl.insert({_id: 1}));
+
+jsTestLog("Preparing a transaction that will later be committed");
+
+const session1 = primary.startSession({causalConsistency: false});
+const sessionDB1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDB1.getCollection(collName);
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({_id: 2}));
+const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
+
+jsTestLog("Preparing a transaction that will be the oldest active transaction");
+
+// Prepare a transaction so that there is an active transaction with an oplog entry. The
+// timestamp of the first oplog entry of this transaction will become the beginFetchingTimestamp
+// during initial sync.
+const session2 = primary.startSession({causalConsistency: false});
+const sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({_id: 3}));
+const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+
+const oplog = primary.getDB("local").getCollection("oplog.rs");
+const txnNum = session2.getTxnNumber_forTesting();
+const op = oplog.findOne({"txnNumber": txnNum, "lsid.id": session2.getSessionId().id});
+assert.neq(op, null);
+const beginFetchingTs = op.ts;
+jsTestLog("Expected beginFetchingTimestamp: " + beginFetchingTs);
+
+// Commit the first transaction so that we have an operation that is fetched during initial sync
+// but should not be applied. If this is applied, initial sync will fail because while trying to
+// apply the commitTransaction oplog entry, it will fail to get the prepareTransaction oplog
+// entry since its optime is before the beginFetchingTimestamp. Doing another operation will
+// also cause the beginApplyingTimestamp to be different from the beginFetchingTimestamp. Note
+// that since the beginApplyingTimestamp is the timestamp after which operations are applied
+// during initial sync, this commitTransaction will not be applied.
+const beginApplyingTimestamp =
+ assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1))
+ .operationTime;
+
+jsTestLog("beginApplyingTimestamp/stopTimestamp: " + beginApplyingTimestamp);
+
+// Restart the secondary with startClean set to true so that it goes through initial sync. Since
+// we won't be running any operations during collection cloning, the beginApplyingTimestamp and
+// stopTimestamp should be the same.
+replTest.stop(secondary,
+ // signal
+ undefined,
+ // Validation would encounter a prepare conflict on the open transaction.
+ {skipValidation: true});
+secondary = replTest.start(
+ secondary, {startClean: true, setParameter: {'numInitialSyncAttempts': 1}}, true /* wait */);
+replTest.awaitSecondaryNodes();
+replTest.awaitReplication();
+
+jsTestLog("Secondary was restarted");
+
+// Wait for the secondary to complete initial sync.
+replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
+
+jsTestLog("Initial sync completed");
+
+// Make sure the secondary fetched enough transaction oplog entries.
+secondary.setSlaveOk();
+const secondaryOplog = secondary.getDB("local").getCollection("oplog.rs");
+assert.eq(secondaryOplog.find({"ts": beginFetchingTs}).itcount(), 1);
+
+// Make sure the first transaction committed properly and is reflected after the initial sync.
+let res = secondary.getDB(dbName).getCollection(collName).findOne({_id: 2});
+assert.docEq(res, {_id: 2}, res);
+
+jsTestLog("Aborting the second transaction");
+
+assert.commandWorked(session2.abortTransaction_forTesting());
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_invalid_index_spec.js b/jstests/replsets/initial_sync_invalid_index_spec.js
index 1d329b94b26..24f8b773118 100644
--- a/jstests/replsets/initial_sync_invalid_index_spec.js
+++ b/jstests/replsets/initial_sync_invalid_index_spec.js
@@ -4,53 +4,53 @@
*/
(function() {
- "use strict";
+"use strict";
- // Skip db hash check because of invalid index spec.
- TestData.skipCheckDBHashes = true;
+// Skip db hash check because of invalid index spec.
+TestData.skipCheckDBHashes = true;
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- const testName = "initial_sync_invalid_index_spec";
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const testName = "initial_sync_invalid_index_spec";
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- let primaryDB = replTest.getPrimary().getDB(testName);
+let primaryDB = replTest.getPrimary().getDB(testName);
- // Create a V2 index with invalid spec field.
- primaryDB.adminCommand(
- {configureFailPoint: "skipIndexCreateFieldNameValidation", mode: "alwaysOn"});
- assert.commandWorked(primaryDB.runCommand(
- {createIndexes: "test", indexes: [{v: 2, name: "x_1", key: {x: 1}, invalidOption: 1}]}));
+// Create a V2 index with invalid spec field.
+primaryDB.adminCommand(
+ {configureFailPoint: "skipIndexCreateFieldNameValidation", mode: "alwaysOn"});
+assert.commandWorked(primaryDB.runCommand(
+ {createIndexes: "test", indexes: [{v: 2, name: "x_1", key: {x: 1}, invalidOption: 1}]}));
- // Add another node to the replica set to allow an initial sync to occur.
- var initSyncNode = replTest.add();
- var initSyncNodeAdminDB = initSyncNode.getDB("admin");
+// Add another node to the replica set to allow an initial sync to occur.
+var initSyncNode = replTest.add();
+var initSyncNodeAdminDB = initSyncNode.getDB("admin");
- clearRawMongoProgramOutput();
- reInitiateWithoutThrowingOnAbortedMember(replTest);
+clearRawMongoProgramOutput();
+reInitiateWithoutThrowingOnAbortedMember(replTest);
- assert.soon(
- function() {
- try {
- initSyncNodeAdminDB.runCommand({ping: 1});
- } catch (e) {
- return true;
- }
- return false;
- },
- "Node did not terminate due to invalid index spec during initial sync",
- ReplSetTest.kDefaultTimeoutMS);
+assert.soon(
+ function() {
+ try {
+ initSyncNodeAdminDB.runCommand({ping: 1});
+ } catch (e) {
+ return true;
+ }
+ return false;
+ },
+ "Node did not terminate due to invalid index spec during initial sync",
+ ReplSetTest.kDefaultTimeoutMS);
- replTest.stop(initSyncNode, undefined, {allowedExitCode: MongoRunner.EXIT_ABRUPT});
+replTest.stop(initSyncNode, undefined, {allowedExitCode: MongoRunner.EXIT_ABRUPT});
- const msgInvalidOption = "The field 'invalidOption' is not valid for an index specification";
- const msgInitialSyncFatalAssertion = "Fatal assertion 40088 InitialSyncFailure";
+const msgInvalidOption = "The field 'invalidOption' is not valid for an index specification";
+const msgInitialSyncFatalAssertion = "Fatal assertion 40088 InitialSyncFailure";
- assert(rawMongoProgramOutput().match(msgInvalidOption) &&
- rawMongoProgramOutput().match(msgInitialSyncFatalAssertion),
- "Initial sync should have aborted on invalid index specification");
+assert(rawMongoProgramOutput().match(msgInvalidOption) &&
+ rawMongoProgramOutput().match(msgInitialSyncFatalAssertion),
+ "Initial sync should have aborted on invalid index specification");
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_invalid_views.js b/jstests/replsets/initial_sync_invalid_views.js
index 056f7d5694e..a02498aaa40 100644
--- a/jstests/replsets/initial_sync_invalid_views.js
+++ b/jstests/replsets/initial_sync_invalid_views.js
@@ -3,36 +3,36 @@
// views were present. This test ensures that crashes no longer occur in those circumstances.
(function() {
- 'use strict';
+'use strict';
- const name = "initial_sync_invalid_views";
- let replSet = new ReplSetTest({name: name, nodes: 1});
+const name = "initial_sync_invalid_views";
+let replSet = new ReplSetTest({name: name, nodes: 1});
- let oplogSizeOnPrimary = 1; // size in MB
- replSet.startSet({oplogSize: oplogSizeOnPrimary});
- replSet.initiate();
- let primary = replSet.getPrimary();
+let oplogSizeOnPrimary = 1; // size in MB
+replSet.startSet({oplogSize: oplogSizeOnPrimary});
+replSet.initiate();
+let primary = replSet.getPrimary();
- let coll = primary.getDB('test').foo;
- assert.writeOK(coll.insert({a: 1}));
+let coll = primary.getDB('test').foo;
+assert.writeOK(coll.insert({a: 1}));
- // Add a secondary node but make it hang before copying databases.
- let secondary = replSet.add();
- secondary.setSlaveOk();
+// Add a secondary node but make it hang before copying databases.
+let secondary = replSet.add();
+secondary.setSlaveOk();
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
- replSet.reInitiate();
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
+replSet.reInitiate();
- assert.writeOK(primary.getDB('test').system.views.insert({invalid: NumberLong(1000)}));
+assert.writeOK(primary.getDB('test').system.views.insert({invalid: NumberLong(1000)}));
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
- replSet.awaitSecondaryNodes(200 * 1000);
+replSet.awaitSecondaryNodes(200 * 1000);
- // Skip collection validation during stopMongod if invalid views exists.
- TestData.skipValidationOnInvalidViewDefinitions = true;
+// Skip collection validation during stopMongod if invalid views exists.
+TestData.skipValidationOnInvalidViewDefinitions = true;
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_move_forward.js b/jstests/replsets/initial_sync_move_forward.js
index 070e3243be5..2561e16b0c1 100644
--- a/jstests/replsets/initial_sync_move_forward.js
+++ b/jstests/replsets/initial_sync_move_forward.js
@@ -12,88 +12,87 @@
// This also works for wiredTiger, because we grow the document by deleting and reinserting it, so
// the newly inserted document is included in the cursor on the source.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/get_index_helpers.js");
- var rst = new ReplSetTest({name: "initial_sync_move_forward", nodes: 1});
- rst.startSet();
- rst.initiate();
+var rst = new ReplSetTest({name: "initial_sync_move_forward", nodes: 1});
+rst.startSet();
+rst.initiate();
- var masterColl = rst.getPrimary().getDB("test").coll;
+var masterColl = rst.getPrimary().getDB("test").coll;
- // Insert 500000 documents. Make the last two documents larger, so that {_id: 0, x: 0} and {_id:
- // 1, x: 1} will fit into their positions when we grow them.
- var count = 500000;
- var bulk = masterColl.initializeUnorderedBulkOp();
- for (var i = 0; i < count - 2; ++i) {
- bulk.insert({_id: i, x: i});
- }
- var longString = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
- bulk.insert({_id: count - 2, x: count - 2, longString: longString});
- bulk.insert({_id: count - 1, x: count - 1, longString: longString});
- assert.writeOK(bulk.execute());
+// Insert 500000 documents. Make the last two documents larger, so that {_id: 0, x: 0} and {_id:
+// 1, x: 1} will fit into their positions when we grow them.
+var count = 500000;
+var bulk = masterColl.initializeUnorderedBulkOp();
+for (var i = 0; i < count - 2; ++i) {
+ bulk.insert({_id: i, x: i});
+}
+var longString = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
+bulk.insert({_id: count - 2, x: count - 2, longString: longString});
+bulk.insert({_id: count - 1, x: count - 1, longString: longString});
+assert.writeOK(bulk.execute());
- // Create a unique index on {x: 1}.
- assert.commandWorked(masterColl.ensureIndex({x: 1}, {unique: true}));
+// Create a unique index on {x: 1}.
+assert.commandWorked(masterColl.ensureIndex({x: 1}, {unique: true}));
- // Add a secondary.
- var secondary = rst.add({setParameter: "numInitialSyncAttempts=1"});
- secondary.setSlaveOk();
- var secondaryColl = secondary.getDB("test").coll;
+// Add a secondary.
+var secondary = rst.add({setParameter: "numInitialSyncAttempts=1"});
+secondary.setSlaveOk();
+var secondaryColl = secondary.getDB("test").coll;
- // Pause initial sync when the secondary has copied {_id: 0, x: 0} and {_id: 1, x: 1}.
- assert.commandWorked(secondary.adminCommand({
- configureFailPoint: "initialSyncHangDuringCollectionClone",
- data: {namespace: secondaryColl.getFullName(), numDocsToClone: 2},
- mode: "alwaysOn"
- }));
- rst.reInitiate();
- assert.soon(function() {
- var logMessages = assert.commandWorked(secondary.adminCommand({getLog: "global"})).log;
- for (var i = 0; i < logMessages.length; i++) {
- if (logMessages[i].indexOf(
- "initial sync - initialSyncHangDuringCollectionClone fail point enabled") !=
- -1) {
- return true;
- }
+// Pause initial sync when the secondary has copied {_id: 0, x: 0} and {_id: 1, x: 1}.
+assert.commandWorked(secondary.adminCommand({
+ configureFailPoint: "initialSyncHangDuringCollectionClone",
+ data: {namespace: secondaryColl.getFullName(), numDocsToClone: 2},
+ mode: "alwaysOn"
+}));
+rst.reInitiate();
+assert.soon(function() {
+ var logMessages = assert.commandWorked(secondary.adminCommand({getLog: "global"})).log;
+ for (var i = 0; i < logMessages.length; i++) {
+ if (logMessages[i].indexOf(
+ "initial sync - initialSyncHangDuringCollectionClone fail point enabled") != -1) {
+ return true;
}
- return false;
- });
+ }
+ return false;
+});
- // Delete {_id: count - 2} to make a hole. Grow {_id: 0} so that it moves into that hole. This
- // will cause the secondary to clone {_id: 0} again.
- // Change the value for 'x' so that we are not testing the uniqueness of 'x' in this case.
- assert.writeOK(masterColl.remove({_id: 0, x: 0}));
- assert.writeOK(masterColl.remove({_id: count - 2, x: count - 2}));
- assert.writeOK(masterColl.insert({_id: 0, x: count, longString: longString}));
+// Delete {_id: count - 2} to make a hole. Grow {_id: 0} so that it moves into that hole. This
+// will cause the secondary to clone {_id: 0} again.
+// Change the value for 'x' so that we are not testing the uniqueness of 'x' in this case.
+assert.writeOK(masterColl.remove({_id: 0, x: 0}));
+assert.writeOK(masterColl.remove({_id: count - 2, x: count - 2}));
+assert.writeOK(masterColl.insert({_id: 0, x: count, longString: longString}));
- // Delete {_id: count - 1} to make a hole. Grow {x: 1} so that it moves into that hole. This
- // will cause the secondary to clone {x: 1} again.
- // Change the value for _id so that we are not testing the uniqueness of _id in this case.
- assert.writeOK(masterColl.remove({_id: 1, x: 1}));
- assert.writeOK(masterColl.remove({_id: count - 1, x: count - 1}));
- assert.writeOK(masterColl.insert({_id: count, x: 1, longString: longString}));
+// Delete {_id: count - 1} to make a hole. Grow {x: 1} so that it moves into that hole. This
+// will cause the secondary to clone {x: 1} again.
+// Change the value for _id so that we are not testing the uniqueness of _id in this case.
+assert.writeOK(masterColl.remove({_id: 1, x: 1}));
+assert.writeOK(masterColl.remove({_id: count - 1, x: count - 1}));
+assert.writeOK(masterColl.insert({_id: count, x: 1, longString: longString}));
- // Resume initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
+// Resume initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
- // Wait for initial sync to finish.
- rst.awaitSecondaryNodes();
+// Wait for initial sync to finish.
+rst.awaitSecondaryNodes();
- // Check document count on secondary.
- assert.eq(count - 2, secondaryColl.find().itcount());
+// Check document count on secondary.
+assert.eq(count - 2, secondaryColl.find().itcount());
- // Check for {_id: 0} on secondary.
- assert.eq(1, secondaryColl.find({_id: 0, x: count}).itcount());
+// Check for {_id: 0} on secondary.
+assert.eq(1, secondaryColl.find({_id: 0, x: count}).itcount());
- // Check for {x: 1} on secondary.
- assert.eq(1, secondaryColl.find({_id: count, x: 1}).itcount());
+// Check for {x: 1} on secondary.
+assert.eq(1, secondaryColl.find({_id: count, x: 1}).itcount());
- // Check for unique index on secondary.
- var indexSpec = GetIndexHelpers.findByKeyPattern(secondaryColl.getIndexes(), {x: 1});
- assert.neq(null, indexSpec);
- assert.eq(true, indexSpec.unique);
- rst.stopSet();
+// Check for unique index on secondary.
+var indexSpec = GetIndexHelpers.findByKeyPattern(secondaryColl.getIndexes(), {x: 1});
+assert.neq(null, indexSpec);
+assert.eq(true, indexSpec.unique);
+rst.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_oplog_hole.js b/jstests/replsets/initial_sync_oplog_hole.js
index a6805102120..190099cd571 100644
--- a/jstests/replsets/initial_sync_oplog_hole.js
+++ b/jstests/replsets/initial_sync_oplog_hole.js
@@ -4,96 +4,95 @@
* @tags: [requires_document_locking]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
- load("jstests/replsets/rslib.js");
+load("jstests/libs/check_log.js");
+load("jstests/replsets/rslib.js");
- // Set up replica set. Disallow chaining so nodes always sync from primary.
- const testName = "initial_sync_oplog_hole";
- const dbName = testName;
- // Set up a three-node replset. The first node is primary throughout the test, the second node
- // is used as the initial sync node, and the third node is to ensure we maintain a majority (and
- // thus no election) while restarting the second.
- const replTest = new ReplSetTest({
- name: testName,
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- });
- replTest.startSet();
- replTest.initiate();
+// Set up replica set. Disallow chaining so nodes always sync from primary.
+const testName = "initial_sync_oplog_hole";
+const dbName = testName;
+// Set up a three-node replset. The first node is primary throughout the test, the second node
+// is used as the initial sync node, and the third node is to ensure we maintain a majority (and
+// thus no election) while restarting the second.
+const replTest = new ReplSetTest({
+ name: testName,
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
+});
+replTest.startSet();
+replTest.initiate();
- const primary = replTest.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const secondary = replTest.getSecondary();
- const secondaryDB = secondary.getDB(dbName);
- const collName = "testcoll";
- const primaryColl = primaryDB[collName];
- const secondaryColl = secondaryDB[collName];
- const nss = primaryColl.getFullName();
- TestData.testName = testName;
- TestData.collectionName = collName;
+const primary = replTest.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const secondary = replTest.getSecondary();
+const secondaryDB = secondary.getDB(dbName);
+const collName = "testcoll";
+const primaryColl = primaryDB[collName];
+const secondaryColl = secondaryDB[collName];
+const nss = primaryColl.getFullName();
+TestData.testName = testName;
+TestData.collectionName = collName;
- jsTestLog("Writing data before oplog hole to collection.");
- assert.writeOK(primaryColl.insert({_id: "a"}));
- // Make sure it gets written out.
- assert.eq(primaryColl.find({_id: "a"}).itcount(), 1);
+jsTestLog("Writing data before oplog hole to collection.");
+assert.writeOK(primaryColl.insert({_id: "a"}));
+// Make sure it gets written out.
+assert.eq(primaryColl.find({_id: "a"}).itcount(), 1);
- jsTest.log("Create the uncommitted write.");
- assert.commandWorked(primaryDB.adminCommand({
- configureFailPoint: "hangAfterCollectionInserts",
- mode: "alwaysOn",
- data: {collectionNS: primaryColl.getFullName(), first_id: "b"}
- }));
+jsTest.log("Create the uncommitted write.");
+assert.commandWorked(primaryDB.adminCommand({
+ configureFailPoint: "hangAfterCollectionInserts",
+ mode: "alwaysOn",
+ data: {collectionNS: primaryColl.getFullName(), first_id: "b"}
+}));
- const db = primaryDB;
- const joinHungWrite = startParallelShell(() => {
- assert.commandWorked(
- db.getSiblingDB(TestData.testName)[TestData.collectionName].insert({_id: "b"}));
- }, primary.port);
- checkLog.contains(
- primaryDB.getMongo(),
- "hangAfterCollectionInserts fail point enabled for " + primaryColl.getFullName());
+const db = primaryDB;
+const joinHungWrite = startParallelShell(() => {
+ assert.commandWorked(
+ db.getSiblingDB(TestData.testName)[TestData.collectionName].insert({_id: "b"}));
+}, primary.port);
+checkLog.contains(primaryDB.getMongo(),
+ "hangAfterCollectionInserts fail point enabled for " + primaryColl.getFullName());
- jsTest.log("Create a write following the uncommitted write.");
- assert.writeOK(primaryColl.insert({_id: "c"}));
- // Make sure it gets written out.
- assert.eq(primaryColl.find({_id: "c"}).itcount(), 1);
+jsTest.log("Create a write following the uncommitted write.");
+assert.writeOK(primaryColl.insert({_id: "c"}));
+// Make sure it gets written out.
+assert.eq(primaryColl.find({_id: "c"}).itcount(), 1);
- jsTestLog("Restarting initial sync node.");
- replTest.restart(secondary, {startClean: true});
- jsTestLog("Waiting for initial sync to start.");
- checkLog.contains(secondaryDB.getMongo(), "Starting initial sync");
+jsTestLog("Restarting initial sync node.");
+replTest.restart(secondary, {startClean: true});
+jsTestLog("Waiting for initial sync to start.");
+checkLog.contains(secondaryDB.getMongo(), "Starting initial sync");
- // The 5 seconds is because in the non-buggy case, we'll be hung waiting for the optime to
- // advance. However, if we allow the write to finish immediately, we are likely to miss the
- // race if it happens. By allowing 5 seconds, we'll never fail when we should succeed, and
- // we'll nearly always fail when we should fail.
- //
- // Once the hangAfterCollectionInserts failpoint is turned off, the write of {_id: "b"} will
- // complete and both the data and the oplog entry for the write will be written out. The oplog
- // visibility thread will then close the oplog hole.
- jsTestLog("Allow the uncommitted write to finish in 5 seconds.");
- const joinDisableFailPoint = startParallelShell(() => {
- sleep(5000);
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "hangAfterCollectionInserts", mode: "off"}));
- }, primary.port);
+// The 5 seconds is because in the non-buggy case, we'll be hung waiting for the optime to
+// advance. However, if we allow the write to finish immediately, we are likely to miss the
+// race if it happens. By allowing 5 seconds, we'll never fail when we should succeed, and
+// we'll nearly always fail when we should fail.
+//
+// Once the hangAfterCollectionInserts failpoint is turned off, the write of {_id: "b"} will
+// complete and both the data and the oplog entry for the write will be written out. The oplog
+// visibility thread will then close the oplog hole.
+jsTestLog("Allow the uncommitted write to finish in 5 seconds.");
+const joinDisableFailPoint = startParallelShell(() => {
+ sleep(5000);
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "hangAfterCollectionInserts", mode: "off"}));
+}, primary.port);
- jsTestLog("Waiting for initial sync to complete.");
- waitForState(secondary, ReplSetTest.State.SECONDARY);
+jsTestLog("Waiting for initial sync to complete.");
+waitForState(secondary, ReplSetTest.State.SECONDARY);
- jsTestLog("Joining hung write");
- joinDisableFailPoint();
- joinHungWrite();
+jsTestLog("Joining hung write");
+joinDisableFailPoint();
+joinHungWrite();
- jsTestLog("Checking that primary has all data items.");
- // Make sure the primary collection has all three data items.
- assert.docEq(primaryColl.find().toArray(), [{"_id": "a"}, {"_id": "b"}, {"_id": "c"}]);
+jsTestLog("Checking that primary has all data items.");
+// Make sure the primary collection has all three data items.
+assert.docEq(primaryColl.find().toArray(), [{"_id": "a"}, {"_id": "b"}, {"_id": "c"}]);
- jsTestLog("Checking that secondary has all data items.");
- replTest.awaitReplication();
- assert.docEq(secondaryColl.find().toArray(), [{"_id": "a"}, {"_id": "b"}, {"_id": "c"}]);
+jsTestLog("Checking that secondary has all data items.");
+replTest.awaitReplication();
+assert.docEq(secondaryColl.find().toArray(), [{"_id": "a"}, {"_id": "b"}, {"_id": "c"}]);
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_oplog_rollover.js b/jstests/replsets/initial_sync_oplog_rollover.js
index ba1da0f14a8..7ffe8c98dd4 100644
--- a/jstests/replsets/initial_sync_oplog_rollover.js
+++ b/jstests/replsets/initial_sync_oplog_rollover.js
@@ -8,66 +8,65 @@
*/
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/libs/check_log.js");
- var name = 'initial_sync_oplog_rollover';
- var replSet = new ReplSetTest({
- name: name,
- // This test requires a third node (added later) to be syncing when the oplog rolls
- // over. Rolling over the oplog requires a majority of nodes to have confirmed and
- // persisted those writes. Set the syncdelay to one to speed up checkpointing.
- nodeOptions: {syncdelay: 1},
- nodes: [
- {rsConfig: {priority: 1}},
- {rsConfig: {priority: 0}},
- ],
- });
+var name = 'initial_sync_oplog_rollover';
+var replSet = new ReplSetTest({
+ name: name,
+ // This test requires a third node (added later) to be syncing when the oplog rolls
+ // over. Rolling over the oplog requires a majority of nodes to have confirmed and
+ // persisted those writes. Set the syncdelay to one to speed up checkpointing.
+ nodeOptions: {syncdelay: 1},
+ nodes: [
+ {rsConfig: {priority: 1}},
+ {rsConfig: {priority: 0}},
+ ],
+});
- var oplogSizeOnPrimary = 1; // size in MB
- replSet.startSet({oplogSize: oplogSizeOnPrimary});
- replSet.initiate();
- var primary = replSet.getPrimary();
+var oplogSizeOnPrimary = 1; // size in MB
+replSet.startSet({oplogSize: oplogSizeOnPrimary});
+replSet.initiate();
+var primary = replSet.getPrimary();
- var coll = primary.getDB('test').foo;
- assert.writeOK(coll.insert({a: 1}));
+var coll = primary.getDB('test').foo;
+assert.writeOK(coll.insert({a: 1}));
- function getFirstOplogEntry(conn) {
- return conn.getDB('local').oplog.rs.find().sort({$natural: 1}).limit(1)[0];
- }
+function getFirstOplogEntry(conn) {
+ return conn.getDB('local').oplog.rs.find().sort({$natural: 1}).limit(1)[0];
+}
- var firstOplogEntry = getFirstOplogEntry(primary);
+var firstOplogEntry = getFirstOplogEntry(primary);
- // Add a secondary node but make it hang before copying databases.
- var secondary = replSet.add();
- secondary.setSlaveOk();
+// Add a secondary node but make it hang before copying databases.
+var secondary = replSet.add();
+secondary.setSlaveOk();
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
- replSet.reInitiate();
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
+replSet.reInitiate();
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
+checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
- // Keep inserting large documents until they roll over the oplog.
- const largeStr = new Array(4 * 1024 * oplogSizeOnPrimary).join('aaaaaaaa');
- var i = 0;
- while (bsonWoCompare(getFirstOplogEntry(primary), firstOplogEntry) === 0) {
- assert.writeOK(coll.insert({a: 2, x: i++, long_str: largeStr}));
- sleep(100);
- }
+// Keep inserting large documents until they roll over the oplog.
+const largeStr = new Array(4 * 1024 * oplogSizeOnPrimary).join('aaaaaaaa');
+var i = 0;
+while (bsonWoCompare(getFirstOplogEntry(primary), firstOplogEntry) === 0) {
+ assert.writeOK(coll.insert({a: 2, x: i++, long_str: largeStr}));
+ sleep(100);
+}
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
- replSet.awaitSecondaryNodes(200 * 1000);
+replSet.awaitSecondaryNodes(200 * 1000);
- assert.eq(i,
- secondary.getDB('test').foo.count({a: 2}),
- 'collection successfully synced to secondary');
+assert.eq(
+ i, secondary.getDB('test').foo.count({a: 2}), 'collection successfully synced to secondary');
- assert.eq(0,
- secondary.getDB('local')['temp_oplog_buffer'].find().itcount(),
- "Oplog buffer was not dropped after initial sync");
- replSet.stopSet();
+assert.eq(0,
+ secondary.getDB('local')['temp_oplog_buffer'].find().itcount(),
+ "Oplog buffer was not dropped after initial sync");
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_preserves_active_txns.js b/jstests/replsets/initial_sync_preserves_active_txns.js
index 560b781a6e1..e037c1c29de 100644
--- a/jstests/replsets/initial_sync_preserves_active_txns.js
+++ b/jstests/replsets/initial_sync_preserves_active_txns.js
@@ -11,92 +11,91 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- // A new replica set for both the commit and abort tests to ensure the same clean state.
- function doTest(commitOrAbort) {
- const replSet = new ReplSetTest({
- oplogSize: PrepareHelpers.oplogSizeMB,
- // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
- nodeOptions:
- {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
- nodes: 1
- });
+// A new replica set for both the commit and abort tests to ensure the same clean state.
+function doTest(commitOrAbort) {
+ const replSet = new ReplSetTest({
+ oplogSize: PrepareHelpers.oplogSizeMB,
+ // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
+ nodeOptions: {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: 1
+ });
- replSet.startSet(PrepareHelpers.replSetStartSetOptions);
- replSet.initiate();
- const primary = replSet.getPrimary();
- const primaryOplog = primary.getDB("local").oplog.rs;
- assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
+ replSet.initiate();
+ const primary = replSet.getPrimary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
- const coll = primary.getDB("test").test;
- assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
+ const coll = primary.getDB("test").test;
+ assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
- jsTestLog("Prepare a transaction");
+ jsTestLog("Prepare a transaction");
- const session = primary.startSession();
- session.startTransaction();
- assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- const txnEntry = primary.getDB("config").transactions.findOne();
+ const session = primary.startSession();
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ const txnEntry = primary.getDB("config").transactions.findOne();
- const oldestRequiredTimestampForCrashRecovery =
- PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
- assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
+ const oldestRequiredTimestampForCrashRecovery =
+ PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
+ assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
- // Make sure that the timestamp of the first oplog entry for this transaction matches the
- // start timestamp in the transactions table.
- let oplog = primary.getDB("local").getCollection("oplog.rs");
- const txnNum = session.getTxnNumber_forTesting();
- const op = oplog.findOne({"txnNumber": txnNum, "lsid.id": session.getSessionId().id});
- assert.neq(op, null);
- const firstTxnOpTs = op.ts;
- assert.eq(txnEntry.startOpTime.ts, firstTxnOpTs, tojson(txnEntry));
+ // Make sure that the timestamp of the first oplog entry for this transaction matches the
+ // start timestamp in the transactions table.
+ let oplog = primary.getDB("local").getCollection("oplog.rs");
+ const txnNum = session.getTxnNumber_forTesting();
+ const op = oplog.findOne({"txnNumber": txnNum, "lsid.id": session.getSessionId().id});
+ assert.neq(op, null);
+ const firstTxnOpTs = op.ts;
+ assert.eq(txnEntry.startOpTime.ts, firstTxnOpTs, tojson(txnEntry));
- jsTestLog("Insert documents until oplog exceeds oplogSize");
+ jsTestLog("Insert documents until oplog exceeds oplogSize");
- // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
- PrepareHelpers.growOplogPastMaxSize(replSet);
+ // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
+ PrepareHelpers.growOplogPastMaxSize(replSet);
- jsTestLog("Make sure the transaction's first entry is still in the oplog");
+ jsTestLog("Make sure the transaction's first entry is still in the oplog");
- assert.eq(primaryOplog.find({ts: firstTxnOpTs}).itcount(), 1);
+ assert.eq(primaryOplog.find({ts: firstTxnOpTs}).itcount(), 1);
- jsTestLog("Add a secondary node");
+ jsTestLog("Add a secondary node");
- const secondary = replSet.add({rsConfig: {votes: 0, priority: 0}});
- replSet.reInitiate();
+ const secondary = replSet.add({rsConfig: {votes: 0, priority: 0}});
+ replSet.reInitiate();
- jsTestLog("Reinitiated, awaiting secondary node");
+ jsTestLog("Reinitiated, awaiting secondary node");
- replSet.awaitSecondaryNodes();
+ replSet.awaitSecondaryNodes();
- jsTestLog("Checking secondary oplog and config.transactions");
+ jsTestLog("Checking secondary oplog and config.transactions");
- // Oplog grew past maxSize, and it includes the oldest active transaction's entry.
- const secondaryOplog = secondary.getDB("local").oplog.rs;
- assert.gt(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
- assert.eq(secondaryOplog.find({ts: firstTxnOpTs}).itcount(), 1);
+ // Oplog grew past maxSize, and it includes the oldest active transaction's entry.
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
+ assert.gt(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+ assert.eq(secondaryOplog.find({ts: firstTxnOpTs}).itcount(), 1);
- const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
- assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
+ const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
+ assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
- if (commitOrAbort === "commit") {
- jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- } else if (commitOrAbort === "abort") {
- jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
- assert.commandWorked(session.abortTransaction_forTesting());
- } else {
- throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
- }
+ if (commitOrAbort === "commit") {
+ jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
+ assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+ } else if (commitOrAbort === "abort") {
+ jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
+ assert.commandWorked(session.abortTransaction_forTesting());
+ } else {
+ throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
+ }
- replSet.awaitReplication();
+ replSet.awaitReplication();
- PrepareHelpers.awaitOplogTruncation(replSet);
- replSet.stopSet();
- }
- doTest("commit");
- doTest("abort");
+ PrepareHelpers.awaitOplogTruncation(replSet);
+ replSet.stopSet();
+}
+doTest("commit");
+doTest("abort");
})();
diff --git a/jstests/replsets/initial_sync_read_concern_no_oplog.js b/jstests/replsets/initial_sync_read_concern_no_oplog.js
index e52ac1faa06..6ad3974cea3 100644
--- a/jstests/replsets/initial_sync_read_concern_no_oplog.js
+++ b/jstests/replsets/initial_sync_read_concern_no_oplog.js
@@ -1,33 +1,33 @@
// Test that if an afterClusterTime query is issued to a node in initial sync that has not yet
// created its oplog, the node returns an error rather than crashing.
(function() {
- 'use strict';
- load('jstests/libs/check_log.js');
+'use strict';
+load('jstests/libs/check_log.js');
- const replSet = new ReplSetTest({nodes: 1});
+const replSet = new ReplSetTest({nodes: 1});
- replSet.startSet();
- replSet.initiate();
- const primary = replSet.getPrimary();
- const secondary = replSet.add();
+replSet.startSet();
+replSet.initiate();
+const primary = replSet.getPrimary();
+const secondary = replSet.add();
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'initialSyncHangBeforeCreatingOplog', mode: 'alwaysOn'}));
- replSet.reInitiate();
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCreatingOplog', mode: 'alwaysOn'}));
+replSet.reInitiate();
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeCreatingOplog fail point enabled');
+checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeCreatingOplog fail point enabled');
- assert.commandFailedWithCode(
- secondary.getDB('local').runCommand(
- {find: 'coll', limit: 1, readConcern: {afterClusterTime: Timestamp(1, 1)}}),
- ErrorCodes.NotYetInitialized);
+assert.commandFailedWithCode(
+ secondary.getDB('local').runCommand(
+ {find: 'coll', limit: 1, readConcern: {afterClusterTime: Timestamp(1, 1)}}),
+ ErrorCodes.NotYetInitialized);
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'initialSyncHangBeforeCreatingOplog', mode: 'off'}));
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCreatingOplog', mode: 'off'}));
- replSet.awaitReplication();
- replSet.awaitSecondaryNodes();
+replSet.awaitReplication();
+replSet.awaitSecondaryNodes();
- replSet.stopSet();
+replSet.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/initial_sync_rename_collection.js b/jstests/replsets/initial_sync_rename_collection.js
index c4281704dec..9c63d7c55b1 100644
--- a/jstests/replsets/initial_sync_rename_collection.js
+++ b/jstests/replsets/initial_sync_rename_collection.js
@@ -5,101 +5,99 @@
*/
(function() {
- 'use strict';
-
- load('jstests/replsets/rslib.js');
- const basename = 'initial_sync_rename_collection';
-
- jsTestLog('Bring up a replica set');
- const rst = new ReplSetTest({name: basename, nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const db0_name = "db0";
- const db1_name = "db1";
-
- const primary = rst.getPrimary();
-
- // Create two separate databases so that we can rename a collection across databases.
- const primary_db0 = primary.getDB(db0_name);
- const primary_db1 = primary.getDB(db1_name);
-
- jsTestLog("Create collections on primary");
- const collRenameWithinDB_name = 'coll_1';
- const collRenameAcrossDBs_name = 'coll_2';
- const collWithinFinal_name = 'renamed';
- const collAcrossFinal_name = 'renamed_across';
-
- // Create two collections on the same database. One will be renamed within the database
- // and the other will be renamed to a different database.
- assert.writeOK(primary_db0[collRenameWithinDB_name].save({}));
- assert.writeOK(primary_db0[collRenameAcrossDBs_name].save({}));
-
- jsTestLog('Waiting for replication');
- rst.awaitReplication();
-
- jsTestLog('Bring up a new node');
- const secondary = rst.add({setParameter: 'numInitialSyncAttempts=1'});
-
- // Add a fail point that causes the secondary's initial sync to hang before
- // copying databases.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
-
- jsTestLog('Begin initial sync on secondary');
- let conf = rst.getPrimary().getDB('admin').runCommand({replSetGetConfig: 1}).config;
- conf.members.push({_id: 1, host: secondary.host, priority: 0, votes: 0});
- conf.version++;
- assert.commandWorked(rst.getPrimary().getDB('admin').runCommand({replSetReconfig: conf}));
- assert.eq(primary, rst.getPrimary(), 'Primary changed after reconfig');
-
- // Confirm that initial sync started on the secondary node.
- jsTestLog('Waiting for initial sync to start');
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
-
- // Start renaming collections while initial sync is hanging.
- jsTestLog('Rename collection ' + db0_name + '.' + collRenameWithinDB_name + ' to ' + db0_name +
- '.' + collWithinFinal_name + ' on the sync source ' + db0_name);
- assert.commandWorked(
- primary_db0[collRenameWithinDB_name].renameCollection(collWithinFinal_name));
-
- jsTestLog('Rename collection ' + db0_name + '.' + collRenameAcrossDBs_name + ' to ' + db1_name +
- '.' + collAcrossFinal_name + ' on the sync source ' + db0_name);
- assert.commandWorked(primary.adminCommand({
- renameCollection: primary_db0[collRenameAcrossDBs_name].getFullName(),
- to: primary_db1[collAcrossFinal_name]
- .getFullName() // Collection 'renamed_across' is implicitly created.
- }));
-
- // Disable fail point so that the secondary can finish its initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
-
- jsTestLog('Wait for both nodes to be up-to-date');
- rst.awaitSecondaryNodes();
- rst.awaitReplication();
-
- const secondary_db0 = secondary.getDB(db0_name);
- const secondary_db1 = secondary.getDB(db1_name);
-
- jsTestLog('Check that collection was renamed correctly on the secondary');
- assert.eq(secondary_db0[collWithinFinal_name].find().itcount(),
- 1,
- 'renamed collection does not exist');
- assert.eq(secondary_db1[collAcrossFinal_name].find().itcount(),
- 1,
- 'renamed_across collection does not exist');
- assert.eq(secondary_db0[collRenameWithinDB_name].find().itcount(),
- 0,
- 'collection ' + collRenameWithinDB_name +
- ' still exists after it was supposed to be renamed');
- assert.eq(secondary_db0[collRenameAcrossDBs_name].find().itcount(),
- 0,
- 'collection ' + collRenameAcrossDBs_name +
- ' still exists after it was supposed to be renamed');
-
- rst.checkReplicatedDataHashes();
- rst.checkOplogs();
- rst.stopSet();
+'use strict';
+
+load('jstests/replsets/rslib.js');
+const basename = 'initial_sync_rename_collection';
+
+jsTestLog('Bring up a replica set');
+const rst = new ReplSetTest({name: basename, nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const db0_name = "db0";
+const db1_name = "db1";
+
+const primary = rst.getPrimary();
+
+// Create two separate databases so that we can rename a collection across databases.
+const primary_db0 = primary.getDB(db0_name);
+const primary_db1 = primary.getDB(db1_name);
+
+jsTestLog("Create collections on primary");
+const collRenameWithinDB_name = 'coll_1';
+const collRenameAcrossDBs_name = 'coll_2';
+const collWithinFinal_name = 'renamed';
+const collAcrossFinal_name = 'renamed_across';
+
+// Create two collections on the same database. One will be renamed within the database
+// and the other will be renamed to a different database.
+assert.writeOK(primary_db0[collRenameWithinDB_name].save({}));
+assert.writeOK(primary_db0[collRenameAcrossDBs_name].save({}));
+
+jsTestLog('Waiting for replication');
+rst.awaitReplication();
+
+jsTestLog('Bring up a new node');
+const secondary = rst.add({setParameter: 'numInitialSyncAttempts=1'});
+
+// Add a fail point that causes the secondary's initial sync to hang before
+// copying databases.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
+
+jsTestLog('Begin initial sync on secondary');
+let conf = rst.getPrimary().getDB('admin').runCommand({replSetGetConfig: 1}).config;
+conf.members.push({_id: 1, host: secondary.host, priority: 0, votes: 0});
+conf.version++;
+assert.commandWorked(rst.getPrimary().getDB('admin').runCommand({replSetReconfig: conf}));
+assert.eq(primary, rst.getPrimary(), 'Primary changed after reconfig');
+
+// Confirm that initial sync started on the secondary node.
+jsTestLog('Waiting for initial sync to start');
+checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
+
+// Start renaming collections while initial sync is hanging.
+jsTestLog('Rename collection ' + db0_name + '.' + collRenameWithinDB_name + ' to ' + db0_name +
+ '.' + collWithinFinal_name + ' on the sync source ' + db0_name);
+assert.commandWorked(primary_db0[collRenameWithinDB_name].renameCollection(collWithinFinal_name));
+
+jsTestLog('Rename collection ' + db0_name + '.' + collRenameAcrossDBs_name + ' to ' + db1_name +
+ '.' + collAcrossFinal_name + ' on the sync source ' + db0_name);
+assert.commandWorked(primary.adminCommand({
+ renameCollection: primary_db0[collRenameAcrossDBs_name].getFullName(),
+ to: primary_db1[collAcrossFinal_name]
+ .getFullName() // Collection 'renamed_across' is implicitly created.
+}));
+
+// Disable fail point so that the secondary can finish its initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
+
+jsTestLog('Wait for both nodes to be up-to-date');
+rst.awaitSecondaryNodes();
+rst.awaitReplication();
+
+const secondary_db0 = secondary.getDB(db0_name);
+const secondary_db1 = secondary.getDB(db1_name);
+
+jsTestLog('Check that collection was renamed correctly on the secondary');
+assert.eq(
+ secondary_db0[collWithinFinal_name].find().itcount(), 1, 'renamed collection does not exist');
+assert.eq(secondary_db1[collAcrossFinal_name].find().itcount(),
+ 1,
+ 'renamed_across collection does not exist');
+assert.eq(
+ secondary_db0[collRenameWithinDB_name].find().itcount(),
+ 0,
+ 'collection ' + collRenameWithinDB_name + ' still exists after it was supposed to be renamed');
+assert.eq(
+ secondary_db0[collRenameAcrossDBs_name].find().itcount(),
+ 0,
+ 'collection ' + collRenameAcrossDBs_name + ' still exists after it was supposed to be renamed');
+
+rst.checkReplicatedDataHashes();
+rst.checkOplogs();
+rst.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_replSetGetStatus.js b/jstests/replsets/initial_sync_replSetGetStatus.js
index 26914eddf06..fb79ae2e6d7 100644
--- a/jstests/replsets/initial_sync_replSetGetStatus.js
+++ b/jstests/replsets/initial_sync_replSetGetStatus.js
@@ -4,88 +4,87 @@
*/
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
-
- var name = 'initial_sync_replSetGetStatus';
- var replSet = new ReplSetTest({
- name: name,
- nodes: 1,
- });
-
- replSet.startSet();
- replSet.initiate();
- var primary = replSet.getPrimary();
-
- var coll = primary.getDB('test').foo;
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({a: 2}));
-
- // Add a secondary node but make it hang before copying databases.
- var secondary = replSet.add();
- secondary.setSlaveOk();
-
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeFinish', mode: 'alwaysOn'}));
- replSet.reInitiate();
-
- // Wait for initial sync to pause before it copies the databases.
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
-
- // Test that replSetGetStatus returns the correct results while initial sync is in progress.
- var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
- assert(!res.initialSyncStatus,
- "Response should not have an 'initialSyncStatus' field: " + tojson(res));
-
- res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
- assert(res.initialSyncStatus,
- "Response should have an 'initialSyncStatus' field: " + tojson(res));
-
- assert.commandFailedWithCode(secondary.adminCommand({replSetGetStatus: 1, initialSync: "t"}),
- ErrorCodes.TypeMismatch);
-
- assert.writeOK(coll.insert({a: 3}));
- assert.writeOK(coll.insert({a: 4}));
-
- // Let initial sync continue working.
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
-
- // Wait for initial sync to pause right before it finishes.
- checkLog.contains(secondary, 'initial sync - initialSyncHangBeforeFinish fail point enabled');
-
- // Test that replSetGetStatus returns the correct results when initial sync is at the very end.
- res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
- assert(res.initialSyncStatus, "Response should have an 'initialSyncStatus' field.");
- assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
- assert.eq(res.initialSyncStatus.appliedOps, 3);
- assert.eq(res.initialSyncStatus.failedInitialSyncAttempts, 0);
- assert.eq(res.initialSyncStatus.maxFailedInitialSyncAttempts, 10);
- assert.eq(res.initialSyncStatus.databases.databasesCloned, 3);
- assert.eq(res.initialSyncStatus.databases.test.collections, 1);
- assert.eq(res.initialSyncStatus.databases.test.clonedCollections, 1);
- assert.eq(res.initialSyncStatus.databases.test["test.foo"].documentsToCopy, 4);
- assert.eq(res.initialSyncStatus.databases.test["test.foo"].documentsCopied, 4);
- assert.eq(res.initialSyncStatus.databases.test["test.foo"].indexes, 1);
- assert.eq(res.initialSyncStatus.databases.test["test.foo"].fetchedBatches, 1);
-
- // Let initial sync finish and get into secondary state.
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeFinish', mode: 'off'}));
- replSet.awaitSecondaryNodes(60 * 1000);
-
- // Test that replSetGetStatus returns the correct results after initial sync is finished.
- res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
- assert(!res.initialSyncStatus,
- "Response should not have an 'initialSyncStatus' field: " + tojson(res));
-
- assert.commandFailedWithCode(secondary.adminCommand({replSetGetStatus: 1, initialSync: "m"}),
- ErrorCodes.TypeMismatch);
- assert.eq(0,
- secondary.getDB('local')['temp_oplog_buffer'].find().itcount(),
- "Oplog buffer was not dropped after initial sync");
- replSet.stopSet();
+"use strict";
+load("jstests/libs/check_log.js");
+
+var name = 'initial_sync_replSetGetStatus';
+var replSet = new ReplSetTest({
+ name: name,
+ nodes: 1,
+});
+
+replSet.startSet();
+replSet.initiate();
+var primary = replSet.getPrimary();
+
+var coll = primary.getDB('test').foo;
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 2}));
+
+// Add a secondary node but make it hang before copying databases.
+var secondary = replSet.add();
+secondary.setSlaveOk();
+
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeFinish', mode: 'alwaysOn'}));
+replSet.reInitiate();
+
+// Wait for initial sync to pause before it copies the databases.
+checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
+
+// Test that replSetGetStatus returns the correct results while initial sync is in progress.
+var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
+assert(!res.initialSyncStatus,
+ "Response should not have an 'initialSyncStatus' field: " + tojson(res));
+
+res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
+assert(res.initialSyncStatus, "Response should have an 'initialSyncStatus' field: " + tojson(res));
+
+assert.commandFailedWithCode(secondary.adminCommand({replSetGetStatus: 1, initialSync: "t"}),
+ ErrorCodes.TypeMismatch);
+
+assert.writeOK(coll.insert({a: 3}));
+assert.writeOK(coll.insert({a: 4}));
+
+// Let initial sync continue working.
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
+
+// Wait for initial sync to pause right before it finishes.
+checkLog.contains(secondary, 'initial sync - initialSyncHangBeforeFinish fail point enabled');
+
+// Test that replSetGetStatus returns the correct results when initial sync is at the very end.
+res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
+assert(res.initialSyncStatus, "Response should have an 'initialSyncStatus' field.");
+assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
+assert.eq(res.initialSyncStatus.appliedOps, 3);
+assert.eq(res.initialSyncStatus.failedInitialSyncAttempts, 0);
+assert.eq(res.initialSyncStatus.maxFailedInitialSyncAttempts, 10);
+assert.eq(res.initialSyncStatus.databases.databasesCloned, 3);
+assert.eq(res.initialSyncStatus.databases.test.collections, 1);
+assert.eq(res.initialSyncStatus.databases.test.clonedCollections, 1);
+assert.eq(res.initialSyncStatus.databases.test["test.foo"].documentsToCopy, 4);
+assert.eq(res.initialSyncStatus.databases.test["test.foo"].documentsCopied, 4);
+assert.eq(res.initialSyncStatus.databases.test["test.foo"].indexes, 1);
+assert.eq(res.initialSyncStatus.databases.test["test.foo"].fetchedBatches, 1);
+
+// Let initial sync finish and get into secondary state.
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeFinish', mode: 'off'}));
+replSet.awaitSecondaryNodes(60 * 1000);
+
+// Test that replSetGetStatus returns the correct results after initial sync is finished.
+res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
+assert(!res.initialSyncStatus,
+ "Response should not have an 'initialSyncStatus' field: " + tojson(res));
+
+assert.commandFailedWithCode(secondary.adminCommand({replSetGetStatus: 1, initialSync: "m"}),
+ ErrorCodes.TypeMismatch);
+assert.eq(0,
+ secondary.getDB('local')['temp_oplog_buffer'].find().itcount(),
+ "Oplog buffer was not dropped after initial sync");
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_reset_oldest_timestamp_after_failed_attempt.js b/jstests/replsets/initial_sync_reset_oldest_timestamp_after_failed_attempt.js
index d589c6320bc..0ec56197438 100644
--- a/jstests/replsets/initial_sync_reset_oldest_timestamp_after_failed_attempt.js
+++ b/jstests/replsets/initial_sync_reset_oldest_timestamp_after_failed_attempt.js
@@ -7,94 +7,94 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- // Set the number of initial sync attempts to 2 so that the test fails on unplanned failures.
- const replTest =
- new ReplSetTest({nodes: 2, nodeOptions: {setParameter: "numInitialSyncAttempts=2"}});
- replTest.startSet();
+// Set the number of initial sync attempts to 2 so that the test fails on unplanned failures.
+const replTest =
+ new ReplSetTest({nodes: 2, nodeOptions: {setParameter: "numInitialSyncAttempts=2"}});
+replTest.startSet();
- // Increase the election timeout to 24 hours so that we do not accidentally trigger an election
- // while the secondary is restarting.
- replTest.initiateWithHighElectionTimeout();
+// Increase the election timeout to 24 hours so that we do not accidentally trigger an election
+// while the secondary is restarting.
+replTest.initiateWithHighElectionTimeout();
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
- const dbName = "test";
- const collName = "initial_sync_reset_oldest_timestamp_after_failed_attempt";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "initial_sync_reset_oldest_timestamp_after_failed_attempt";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(testColl.insert({_id: 1}));
+assert.commandWorked(testColl.insert({_id: 1}));
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 2}));
+const session = primary.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 2}));
- // This will be the begin fetching point for both initial sync attempts. After the first initial
- // sync attempt fails, if the oldest timestamp isn't reset before the next attempt, the update
- // to the transaction table for this prepare will fail a WiredTiger assertion that the commit
- // timestamp for a storage transaction cannot be older than the oldest timestamp.
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+// This will be the begin fetching point for both initial sync attempts. After the first initial
+// sync attempt fails, if the oldest timestamp isn't reset before the next attempt, the update
+// to the transaction table for this prepare will fail a WiredTiger assertion that the commit
+// timestamp for a storage transaction cannot be older than the oldest timestamp.
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- jsTestLog("Prepared a transaction at timestamp: " + prepareTimestamp);
+jsTestLog("Prepared a transaction at timestamp: " + prepareTimestamp);
- replTest.stop(secondary, undefined, {skipValidation: true});
- secondary = replTest.start(
- secondary,
- {
- startClean: true,
- setParameter: {
- // Set the number of operations per batch to be 1 so that we can know exactly how
- // many batches there will be.
- "replBatchLimitOperations": 1,
- "failpoint.initialSyncHangAfterDataCloning": tojson({mode: "alwaysOn"}),
- // Allow the syncing node to write the prepare oplog entry and apply the first update
- // before failing initial sync.
- "failpoint.failInitialSyncBeforeApplyingBatch": tojson({mode: {skip: 2}}),
- }
- },
- true /* wait */);
+replTest.stop(secondary, undefined, {skipValidation: true});
+secondary = replTest.start(
+ secondary,
+ {
+ startClean: true,
+ setParameter: {
+ // Set the number of operations per batch to be 1 so that we can know exactly how
+ // many batches there will be.
+ "replBatchLimitOperations": 1,
+ "failpoint.initialSyncHangAfterDataCloning": tojson({mode: "alwaysOn"}),
+ // Allow the syncing node to write the prepare oplog entry and apply the first update
+ // before failing initial sync.
+ "failpoint.failInitialSyncBeforeApplyingBatch": tojson({mode: {skip: 2}}),
+ }
+ },
+ true /* wait */);
- // Wait for failpoint to be reached so we know that collection cloning is paused.
- checkLog.contains(secondary, "initialSyncHangAfterDataCloning fail point enabled");
+// Wait for failpoint to be reached so we know that collection cloning is paused.
+checkLog.contains(secondary, "initialSyncHangAfterDataCloning fail point enabled");
- jsTestLog("Running operations while collection cloning is paused");
+jsTestLog("Running operations while collection cloning is paused");
- // This command will be in the last batch applied before the first initial sync attempt fails.
- // If the oldest timestamp isn't reset on the next attempt, then the timestamp for this update
- // will be the oldest timestamp.
- assert.commandWorked(testColl.update({_id: 1}, {_id: 1, a: 1}));
+// This command will be in the last batch applied before the first initial sync attempt fails.
+// If the oldest timestamp isn't reset on the next attempt, then the timestamp for this update
+// will be the oldest timestamp.
+assert.commandWorked(testColl.update({_id: 1}, {_id: 1, a: 1}));
- // This entry will be applied in its own batch, so the failInitialSyncBeforeApplyingBatch
- // failpoint will cause the first initial sync attempt to fail before applying this.
- assert.commandWorked(testColl.update({_id: 1}, {_id: 1, b: 2}));
+// This entry will be applied in its own batch, so the failInitialSyncBeforeApplyingBatch
+// failpoint will cause the first initial sync attempt to fail before applying this.
+assert.commandWorked(testColl.update({_id: 1}, {_id: 1, b: 2}));
- jsTestLog("Resuming initial sync");
+jsTestLog("Resuming initial sync");
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangAfterDataCloning", mode: "off"}));
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "initialSyncHangAfterDataCloning", mode: "off"}));
- // Wait for this failpoint to be hit before turning it off and causing initial sync to fail.
- checkLog.contains(secondary, "failInitialSyncBeforeApplyingBatch fail point enabled");
+// Wait for this failpoint to be hit before turning it off and causing initial sync to fail.
+checkLog.contains(secondary, "failInitialSyncBeforeApplyingBatch fail point enabled");
- jsTestLog("Failing first initial sync attempt");
+jsTestLog("Failing first initial sync attempt");
- // Turn the failpoint off and cause initial sync to fail.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "failInitialSyncBeforeApplyingBatch", mode: "off"}));
+// Turn the failpoint off and cause initial sync to fail.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "failInitialSyncBeforeApplyingBatch", mode: "off"}));
- replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
+replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
- jsTestLog("Initial sync completed");
+jsTestLog("Initial sync completed");
- assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(session.abortTransaction_forTesting());
- replTest.stopSet();
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/initial_sync_test_fixture_test.js b/jstests/replsets/initial_sync_test_fixture_test.js
index 520ba43b2b4..c112bc58ef7 100644
--- a/jstests/replsets/initial_sync_test_fixture_test.js
+++ b/jstests/replsets/initial_sync_test_fixture_test.js
@@ -13,165 +13,164 @@
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/check_log.js");
- load("jstests/replsets/libs/initial_sync_test.js");
-
- /**
- * Helper function to check that specific messages appeared or did not appear in the logs. If
- * the command was listIndexes and we expect the message to appear, we also add the collection
- * UUID to make sure that it corresponds to the expected collection.
- */
- function checkLogForCollectionClonerMsg(node, commandName, dbname, contains, collUUID) {
- let msg = "Collection Cloner scheduled a remote command on the " + dbname + " db: { " +
- commandName;
- if (commandName === "listIndexes" && contains) {
- msg += ": " + collUUID;
- }
-
- if (contains) {
- assert(checkLog.checkContainsOnce(node, msg));
- } else {
- assert(!checkLog.checkContainsOnce(node, msg));
- }
- }
+"use strict";
- /**
- * Helper function to check that the specific message appeared exactly once in the logs and that
- * there is no other message saying that the next batch is about to be applied. This will show
- * that oplog application is paused.
- */
- function checkLogForOplogApplicationMsg(node, size) {
- let msg = "Initial Syncer is about to apply the next oplog batch of size: ";
- checkLog.containsWithCount(node, msg, 1, 5 * 1000);
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+load("jstests/replsets/libs/initial_sync_test.js");
- msg += size;
- assert(checkLog.checkContainsOnce(node, msg));
+/**
+ * Helper function to check that specific messages appeared or did not appear in the logs. If
+ * the command was listIndexes and we expect the message to appear, we also add the collection
+ * UUID to make sure that it corresponds to the expected collection.
+ */
+function checkLogForCollectionClonerMsg(node, commandName, dbname, contains, collUUID) {
+ let msg =
+ "Collection Cloner scheduled a remote command on the " + dbname + " db: { " + commandName;
+ if (commandName === "listIndexes" && contains) {
+ msg += ": " + collUUID;
}
- // Set up Initial Sync Test.
- const initialSyncTest = new InitialSyncTest();
- const primary = initialSyncTest.getPrimary();
- let secondary = initialSyncTest.getSecondary();
- const db = primary.getDB("test");
- const largeString = 'z'.repeat(10 * 1024 * 1024);
-
- assert.commandWorked(db.foo.insert({a: 1}));
- assert.commandWorked(db.bar.insert({b: 1}));
-
- // Prepare a transaction so that we know that step() can restart the secondary even if there is
- // a prepared transaction.
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase("test");
- const sessionColl = sessionDB.getCollection("foo");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({c: 1}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- // Do same listDatabases command as CollectionCloner.
- const databases =
- assert.commandWorked(primary.adminCommand({listDatabases: 1, nameOnly: true})).databases;
-
- // This step call restarts the secondary and causes it to go into initial sync.
- assert(!initialSyncTest.step());
+ if (contains) {
+ assert(checkLog.checkContainsOnce(node, msg));
+ } else {
+ assert(!checkLog.checkContainsOnce(node, msg));
+ }
+}
- secondary = initialSyncTest.getSecondary();
- secondary.setSlaveOk();
+/**
+ * Helper function to check that the specific message appeared exactly once in the logs and that
+ * there is no other message saying that the next batch is about to be applied. This will show
+ * that oplog application is paused.
+ */
+function checkLogForOplogApplicationMsg(node, size) {
+ let msg = "Initial Syncer is about to apply the next oplog batch of size: ";
+ checkLog.containsWithCount(node, msg, 1, 5 * 1000);
+
+ msg += size;
+ assert(checkLog.checkContainsOnce(node, msg));
+}
+
+// Set up Initial Sync Test.
+const initialSyncTest = new InitialSyncTest();
+const primary = initialSyncTest.getPrimary();
+let secondary = initialSyncTest.getSecondary();
+const db = primary.getDB("test");
+const largeString = 'z'.repeat(10 * 1024 * 1024);
+
+assert.commandWorked(db.foo.insert({a: 1}));
+assert.commandWorked(db.bar.insert({b: 1}));
+
+// Prepare a transaction so that we know that step() can restart the secondary even if there is
+// a prepared transaction.
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase("test");
+const sessionColl = sessionDB.getCollection("foo");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({c: 1}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+// Do same listDatabases command as CollectionCloner.
+const databases =
+ assert.commandWorked(primary.adminCommand({listDatabases: 1, nameOnly: true})).databases;
+
+// This step call restarts the secondary and causes it to go into initial sync.
+assert(!initialSyncTest.step());
+
+secondary = initialSyncTest.getSecondary();
+secondary.setSlaveOk();
+
+// Make sure that we cannot read from this node yet.
+assert.commandFailedWithCode(secondary.getDB("test").runCommand({count: "foo"}),
+ ErrorCodes.NotMasterOrSecondary);
+
+// Make sure that we saw the listDatabases call in the log messages, but didn't see any
+// listCollections or listIndexes call.
+checkLogForCollectionClonerMsg(secondary, "listDatabases", "admin", true);
+checkLogForCollectionClonerMsg(secondary, "listCollections", "admin", false);
+checkLogForCollectionClonerMsg(secondary, "listIndexes", "admin", false);
+
+// Iterate over the databases and collections in the same order that the test fixture would so
+// that we can check the log messages to make sure initial sync is paused as expected.
+for (let dbObj of databases) {
+ const dbname = dbObj.name;
+
+ // We skip the local db during the collection cloning phase of initial sync.
+ if (dbname === "local") {
+ continue;
+ }
- // Make sure that we cannot read from this node yet.
- assert.commandFailedWithCode(secondary.getDB("test").runCommand({count: "foo"}),
- ErrorCodes.NotMasterOrSecondary);
+ const database = primary.getDB(dbname);
- // Make sure that we saw the listDatabases call in the log messages, but didn't see any
- // listCollections or listIndexes call.
- checkLogForCollectionClonerMsg(secondary, "listDatabases", "admin", true);
- checkLogForCollectionClonerMsg(secondary, "listCollections", "admin", false);
- checkLogForCollectionClonerMsg(secondary, "listIndexes", "admin", false);
+ // Do same listCollections command as CollectionCloner.
+ const res = assert.commandWorked(database.runCommand(
+ {listCollections: 1, filter: {$or: [{type: "collection"}, {type: {$exists: false}}]}}));
- // Iterate over the databases and collections in the same order that the test fixture would so
- // that we can check the log messages to make sure initial sync is paused as expected.
- for (let dbObj of databases) {
- const dbname = dbObj.name;
+ // Make sure that there is only one batch.
+ assert.eq(NumberLong(0), res.cursor.id, res);
- // We skip the local db during the collection cloning phase of initial sync.
- if (dbname === "local") {
- continue;
- }
+ const collectionsCursor = res.cursor;
- const database = primary.getDB(dbname);
+ // For each database, CollectionCloner will first call listCollections.
+ assert(!initialSyncTest.step());
- // Do same listCollections command as CollectionCloner.
- const res = assert.commandWorked(database.runCommand(
- {listCollections: 1, filter: {$or: [{type: "collection"}, {type: {$exists: false}}]}}));
+ // Make sure that we cannot read from this node yet.
+ assert.commandFailedWithCode(secondary.getDB("test").runCommand({count: "foo"}),
+ ErrorCodes.NotMasterOrSecondary);
- // Make sure that there is only one batch.
- assert.eq(NumberLong(0), res.cursor.id, res);
+ // Make sure that we saw the listCollections call in the log messages, but didn't see a
+ // listIndexes call.
+ checkLogForCollectionClonerMsg(secondary, "listCollections", dbname, true);
+ checkLogForCollectionClonerMsg(secondary, "listIndexes", "admin", false);
- const collectionsCursor = res.cursor;
+ for (let collectionObj of collectionsCursor.firstBatch) {
+ assert(collectionObj.info, collectionObj);
+ const collUUID = collectionObj.info.uuid;
- // For each database, CollectionCloner will first call listCollections.
+ // For each collection, CollectionCloner will call listIndexes.
assert(!initialSyncTest.step());
// Make sure that we cannot read from this node yet.
assert.commandFailedWithCode(secondary.getDB("test").runCommand({count: "foo"}),
ErrorCodes.NotMasterOrSecondary);
- // Make sure that we saw the listCollections call in the log messages, but didn't see a
- // listIndexes call.
- checkLogForCollectionClonerMsg(secondary, "listCollections", dbname, true);
- checkLogForCollectionClonerMsg(secondary, "listIndexes", "admin", false);
-
- for (let collectionObj of collectionsCursor.firstBatch) {
- assert(collectionObj.info, collectionObj);
- const collUUID = collectionObj.info.uuid;
-
- // For each collection, CollectionCloner will call listIndexes.
- assert(!initialSyncTest.step());
+ // Make sure that we saw the listIndexes call in the log messages, but didn't
+ // see a listCollections call.
+ checkLogForCollectionClonerMsg(secondary, "listIndexes", dbname, true, collUUID);
+ checkLogForCollectionClonerMsg(secondary, "listCollections", "admin", false);
- // Make sure that we cannot read from this node yet.
- assert.commandFailedWithCode(secondary.getDB("test").runCommand({count: "foo"}),
- ErrorCodes.NotMasterOrSecondary);
-
- // Make sure that we saw the listIndexes call in the log messages, but didn't
- // see a listCollections call.
- checkLogForCollectionClonerMsg(secondary, "listIndexes", dbname, true, collUUID);
- checkLogForCollectionClonerMsg(secondary, "listCollections", "admin", false);
-
- // Perform large operations during collection cloning so that we will need multiple
- // batches during oplog application.
- assert.commandWorked(db.foo.insert({d: largeString}));
- assert.commandWorked(db.bar.insert({e: largeString}));
- }
+ // Perform large operations during collection cloning so that we will need multiple
+ // batches during oplog application.
+ assert.commandWorked(db.foo.insert({d: largeString}));
+ assert.commandWorked(db.bar.insert({e: largeString}));
}
+}
- // Check that we see the expected number of batches during oplog application.
-
- // This batch should correspond to the 'prepare' op.
- assert(!initialSyncTest.step());
- checkLogForOplogApplicationMsg(secondary, 1);
- assert(!initialSyncTest.step());
- checkLogForOplogApplicationMsg(secondary, 9);
- assert(!initialSyncTest.step());
- checkLogForOplogApplicationMsg(secondary, 1);
+// Check that we see the expected number of batches during oplog application.
- assert(initialSyncTest.step(), "Expected initial sync to have completed, but it did not");
+// This batch should correspond to the 'prepare' op.
+assert(!initialSyncTest.step());
+checkLogForOplogApplicationMsg(secondary, 1);
+assert(!initialSyncTest.step());
+checkLogForOplogApplicationMsg(secondary, 9);
+assert(!initialSyncTest.step());
+checkLogForOplogApplicationMsg(secondary, 1);
- // Abort transaction so that the data consistency checks in stop() can run.
- assert.commandWorked(session.abortTransaction_forTesting());
+assert(initialSyncTest.step(), "Expected initial sync to have completed, but it did not");
- // Issue a w:2 write to make sure the secondary has replicated the abortTransaction oplog entry.
- assert.commandWorked(primary.getDB("otherDB").otherColl.insert({x: 1}, {writeConcern: {w: 2}}));
+// Abort transaction so that the data consistency checks in stop() can run.
+assert.commandWorked(session.abortTransaction_forTesting());
- // Confirm that node can be read from and that it has the inserts that were made while the node
- // was in initial sync.
- assert.eq(secondary.getDB("test").foo.find().count(), 6);
- assert.eq(secondary.getDB("test").bar.find().count(), 6);
- assert.eq(secondary.getDB("test").foo.find().itcount(), 6);
- assert.eq(secondary.getDB("test").bar.find().itcount(), 6);
+// Issue a w:2 write to make sure the secondary has replicated the abortTransaction oplog entry.
+assert.commandWorked(primary.getDB("otherDB").otherColl.insert({x: 1}, {writeConcern: {w: 2}}));
- // Do data consistency checks at the end.
- initialSyncTest.stop();
+// Confirm that node can be read from and that it has the inserts that were made while the node
+// was in initial sync.
+assert.eq(secondary.getDB("test").foo.find().count(), 6);
+assert.eq(secondary.getDB("test").bar.find().count(), 6);
+assert.eq(secondary.getDB("test").foo.find().itcount(), 6);
+assert.eq(secondary.getDB("test").bar.find().itcount(), 6);
+// Do data consistency checks at the end.
+initialSyncTest.stop();
})();
diff --git a/jstests/replsets/initial_sync_update_missing_doc1.js b/jstests/replsets/initial_sync_update_missing_doc1.js
index 7760c3330b6..93eda8b7702 100644
--- a/jstests/replsets/initial_sync_update_missing_doc1.js
+++ b/jstests/replsets/initial_sync_update_missing_doc1.js
@@ -12,40 +12,41 @@
*/
(function() {
- load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
- load("jstests/libs/check_log.js");
+load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
+load("jstests/libs/check_log.js");
- const name = 'initial_sync_update_missing_doc1';
- const replSet = new ReplSetTest({
- name: name,
- nodes: 1,
- });
+const name = 'initial_sync_update_missing_doc1';
+const replSet = new ReplSetTest({
+ name: name,
+ nodes: 1,
+});
- replSet.startSet();
- replSet.initiate();
- const primary = replSet.getPrimary();
- const dbName = 'test';
+replSet.startSet();
+replSet.initiate();
+const primary = replSet.getPrimary();
+const dbName = 'test';
- var coll = primary.getDB(dbName).getCollection(name);
- assert.commandWorked(coll.insert({_id: 0, x: 1}));
+var coll = primary.getDB(dbName).getCollection(name);
+assert.commandWorked(coll.insert({_id: 0, x: 1}));
- // Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
- // it is syncing from the primary.
- const secondaryConfig = {rsConfig: {votes: 0, priority: 0}};
- const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
+// Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
+// it is syncing from the primary.
+const secondaryConfig = {
+ rsConfig: {votes: 0, priority: 0}
+};
+const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
- // Update and remove document on primary.
- updateRemove(coll, {_id: 0});
+// Update and remove document on primary.
+updateRemove(coll, {_id: 0});
- turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
+turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
- var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
- assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
- var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
+var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
+assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
+var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
- turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 0 /* numInserted */);
- finishAndValidate(replSet, name, firstOplogEnd, 0 /* numInserted */, 0 /* numDocuments */);
-
- replSet.stopSet();
+turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 0 /* numInserted */);
+finishAndValidate(replSet, name, firstOplogEnd, 0 /* numInserted */, 0 /* numDocuments */);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_update_missing_doc2.js b/jstests/replsets/initial_sync_update_missing_doc2.js
index 6b772cbd028..420aaee8adc 100644
--- a/jstests/replsets/initial_sync_update_missing_doc2.js
+++ b/jstests/replsets/initial_sync_update_missing_doc2.js
@@ -13,49 +13,50 @@
*/
(function() {
- load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
- load("jstests/libs/check_log.js");
+load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
+load("jstests/libs/check_log.js");
- var name = 'initial_sync_update_missing_doc2';
- var replSet = new ReplSetTest({
- name: name,
- nodes: 1,
- });
+var name = 'initial_sync_update_missing_doc2';
+var replSet = new ReplSetTest({
+ name: name,
+ nodes: 1,
+});
- replSet.startSet();
- replSet.initiate();
+replSet.startSet();
+replSet.initiate();
- const primary = replSet.getPrimary();
- const dbName = 'test';
+const primary = replSet.getPrimary();
+const dbName = 'test';
- var coll = primary.getDB(dbName).getCollection(name);
- assert.commandWorked(coll.insert({_id: 0, x: 1}));
+var coll = primary.getDB(dbName).getCollection(name);
+assert.commandWorked(coll.insert({_id: 0, x: 1}));
- // Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
- // it is syncing from the primary.
- const secondaryConfig = {rsConfig: {votes: 0, priority: 0}};
- const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
+// Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
+// it is syncing from the primary.
+const secondaryConfig = {
+ rsConfig: {votes: 0, priority: 0}
+};
+const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
- // Update and remove document on primary.
- updateRemove(coll, {_id: 0});
+// Update and remove document on primary.
+updateRemove(coll, {_id: 0});
- turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
+turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
- // Re-insert deleted document on the sync source. The secondary should be able to fetch and
- // insert this document after failing to apply the udpate.
- assert.commandWorked(coll.insert({_id: 0, x: 3}));
+// Re-insert deleted document on the sync source. The secondary should be able to fetch and
+// insert this document after failing to apply the udpate.
+assert.commandWorked(coll.insert({_id: 0, x: 3}));
- var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
- assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
- var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
+var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
+assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
+var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
- // Temporarily increase log levels so that we can see the 'Inserted missing document' log line.
- secondary.getDB('test').setLogLevel(1, 'replication');
- turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 1 /* numInserted */);
- secondary.getDB('test').setLogLevel(0, 'replication');
+// Temporarily increase log levels so that we can see the 'Inserted missing document' log line.
+secondary.getDB('test').setLogLevel(1, 'replication');
+turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 1 /* numInserted */);
+secondary.getDB('test').setLogLevel(0, 'replication');
- finishAndValidate(replSet, name, firstOplogEnd, 1 /* numInserted */, 1 /* numDocuments */);
-
- replSet.stopSet();
+finishAndValidate(replSet, name, firstOplogEnd, 1 /* numInserted */, 1 /* numDocuments */);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_update_missing_doc3.js b/jstests/replsets/initial_sync_update_missing_doc3.js
index a0f0c6cf2f0..67e44b5cd6c 100644
--- a/jstests/replsets/initial_sync_update_missing_doc3.js
+++ b/jstests/replsets/initial_sync_update_missing_doc3.js
@@ -14,59 +14,61 @@
*/
(function() {
- load("jstests/libs/check_log.js");
- load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+load("jstests/libs/check_log.js");
+load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
- var name = 'initial_sync_update_missing_doc3';
- var replSet = new ReplSetTest({
- name: name,
- nodes: 1,
- });
+var name = 'initial_sync_update_missing_doc3';
+var replSet = new ReplSetTest({
+ name: name,
+ nodes: 1,
+});
- replSet.startSet();
- replSet.initiate();
- const primary = replSet.getPrimary();
- const dbName = 'test';
+replSet.startSet();
+replSet.initiate();
+const primary = replSet.getPrimary();
+const dbName = 'test';
- // Check for 'system.drop' two phase drop support.
- if (!TwoPhaseDropCollectionTest.supportsDropPendingNamespaces(replSet)) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- replSet.stopSet();
- return;
- }
+// Check for 'system.drop' two phase drop support.
+if (!TwoPhaseDropCollectionTest.supportsDropPendingNamespaces(replSet)) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ replSet.stopSet();
+ return;
+}
- var coll = primary.getDB(dbName).getCollection(name);
- assert.commandWorked(coll.insert({_id: 0, x: 1}));
+var coll = primary.getDB(dbName).getCollection(name);
+assert.commandWorked(coll.insert({_id: 0, x: 1}));
- // Add a secondary node with priority: 0 so that we prevent elections while it is syncing
- // from the primary.
- // We cannot give the secondary votes: 0 because then it will not be able to acknowledge
- // majority writes. That means the sync source can immediately drop it's collection
- // because it alone determines the majority commit point.
- const secondaryConfig = {rsConfig: {priority: 0}};
- const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
+// Add a secondary node with priority: 0 so that we prevent elections while it is syncing
+// from the primary.
+// We cannot give the secondary votes: 0 because then it will not be able to acknowledge
+// majority writes. That means the sync source can immediately drop it's collection
+// because it alone determines the majority commit point.
+const secondaryConfig = {
+ rsConfig: {priority: 0}
+};
+const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
- // Update and remove document on primary.
- updateRemove(coll, {_id: 0});
+// Update and remove document on primary.
+updateRemove(coll, {_id: 0});
- turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
+turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
- // Re-insert deleted document.
- assert.commandWorked(coll.insert({_id: 0, x: 3}));
- // Mark the collection as drop pending so it gets renamed, but retains the UUID.
- assert.commandWorked(primary.getDB('test').runCommand({"drop": name}));
+// Re-insert deleted document.
+assert.commandWorked(coll.insert({_id: 0, x: 3}));
+// Mark the collection as drop pending so it gets renamed, but retains the UUID.
+assert.commandWorked(primary.getDB('test').runCommand({"drop": name}));
- var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
- assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
- var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
+var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
+assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
+var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
- secondary.getDB('test').setLogLevel(1, 'replication');
- turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 1);
- secondary.getDB('test').setLogLevel(0, 'replication');
+secondary.getDB('test').setLogLevel(1, 'replication');
+turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 1);
+secondary.getDB('test').setLogLevel(0, 'replication');
- replSet.awaitReplication();
- replSet.awaitSecondaryNodes();
+replSet.awaitReplication();
+replSet.awaitSecondaryNodes();
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_update_missing_doc_with_prepare.js b/jstests/replsets/initial_sync_update_missing_doc_with_prepare.js
index 2b37cd559cf..e9840f7baa7 100644
--- a/jstests/replsets/initial_sync_update_missing_doc_with_prepare.js
+++ b/jstests/replsets/initial_sync_update_missing_doc_with_prepare.js
@@ -14,84 +14,81 @@
*/
(function() {
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
- load("jstests/libs/check_log.js");
-
- function doTest(doTransactionWork, numDocuments) {
- const name = 'initial_sync_update_missing_doc_with_prepare';
- const replSet = new ReplSetTest({
- name: name,
- nodes: 1,
- });
-
- replSet.startSet();
- replSet.initiate();
- const primary = replSet.getPrimary();
- const dbName = 'test';
-
- var coll = primary.getDB(dbName).getCollection(name);
- assert.commandWorked(coll.insert({_id: 0, x: 1}));
- assert.commandWorked(coll.insert({_id: 1, x: 1}));
-
- // Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
- // it is syncing from the primary.
- const secondaryConfig = {rsConfig: {votes: 0, priority: 0}};
- const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
-
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(name);
-
- session.startTransaction();
- doTransactionWork(sessionColl, {_id: 0});
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- // This transaction is eventually aborted, so this document should exist on the secondary
- // after initial sync.
- session.startTransaction();
- doTransactionWork(sessionColl, {_id: 1});
- PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(session.abortTransaction_forTesting());
-
- turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
-
- var res =
- assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
- jsTestLog(tojson(res));
- assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
- var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
-
- turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 0 /* numInserted */);
-
- // Since we aborted the second transaction, we expect this collection to still exist after
- // initial sync.
- finishAndValidate(replSet, name, firstOplogEnd, 0 /* numInserted */, numDocuments);
-
- // Make sure the secondary has the correct documents after syncing from the primary. The
- // second document was deleted in the prepared transaction that was aborted. Therefore, it
- // should have been properly replication.
- coll = secondary.getDB(dbName).getCollection(name);
- assert.docEq(null, coll.findOne({_id: 0}), 'document on secondary matches primary');
- assert.docEq(
- {_id: 1, x: 1}, coll.findOne({_id: 1}), 'document on secondary matches primary');
-
- replSet.stopSet();
- }
-
- jsTestLog("Testing with prepared transaction");
- // Passing in a function to update and remove document on primary in a prepared transaction
- // between phrase 1 and 2. Once the secondary receives the commit for the transaction, the
- // secondary should apply each operation separately (one update, and one delete) during initial
- // sync.
- doTest(updateRemove, 1 /* numDocuments after initial sync */);
-
- jsTestLog("Testing with large prepared transaction");
- // Passing in a function to insert, update and remove large documents on primary in a large
- // prepared transaction. Once the secondary receives the commit for the transaction, the
- // secondary should apply each operation separately (one insert, one update, and one delete)
- // during initial sync.
- doTest(insertUpdateRemoveLarge, 2 /* numDocuments after initial sync */);
-
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
+load("jstests/libs/check_log.js");
+
+function doTest(doTransactionWork, numDocuments) {
+ const name = 'initial_sync_update_missing_doc_with_prepare';
+ const replSet = new ReplSetTest({
+ name: name,
+ nodes: 1,
+ });
+
+ replSet.startSet();
+ replSet.initiate();
+ const primary = replSet.getPrimary();
+ const dbName = 'test';
+
+ var coll = primary.getDB(dbName).getCollection(name);
+ assert.commandWorked(coll.insert({_id: 0, x: 1}));
+ assert.commandWorked(coll.insert({_id: 1, x: 1}));
+
+ // Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
+ // it is syncing from the primary.
+ const secondaryConfig = {rsConfig: {votes: 0, priority: 0}};
+ const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
+
+ const session = primary.startSession();
+ const sessionDB = session.getDatabase(dbName);
+ const sessionColl = sessionDB.getCollection(name);
+
+ session.startTransaction();
+ doTransactionWork(sessionColl, {_id: 0});
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+ // This transaction is eventually aborted, so this document should exist on the secondary
+ // after initial sync.
+ session.startTransaction();
+ doTransactionWork(sessionColl, {_id: 1});
+ PrepareHelpers.prepareTransaction(session);
+ assert.commandWorked(session.abortTransaction_forTesting());
+
+ turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
+
+ var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
+ jsTestLog(tojson(res));
+ assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
+ var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
+
+ turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 0 /* numInserted */);
+
+ // Since we aborted the second transaction, we expect this collection to still exist after
+ // initial sync.
+ finishAndValidate(replSet, name, firstOplogEnd, 0 /* numInserted */, numDocuments);
+
+ // Make sure the secondary has the correct documents after syncing from the primary. The
+ // second document was deleted in the prepared transaction that was aborted. Therefore, it
+ // should have been properly replication.
+ coll = secondary.getDB(dbName).getCollection(name);
+ assert.docEq(null, coll.findOne({_id: 0}), 'document on secondary matches primary');
+ assert.docEq({_id: 1, x: 1}, coll.findOne({_id: 1}), 'document on secondary matches primary');
+
+ replSet.stopSet();
+}
+
+jsTestLog("Testing with prepared transaction");
+// Passing in a function to update and remove document on primary in a prepared transaction
+// between phrase 1 and 2. Once the secondary receives the commit for the transaction, the
+// secondary should apply each operation separately (one update, and one delete) during initial
+// sync.
+doTest(updateRemove, 1 /* numDocuments after initial sync */);
+
+jsTestLog("Testing with large prepared transaction");
+// Passing in a function to insert, update and remove large documents on primary in a large
+// prepared transaction. Once the secondary receives the commit for the transaction, the
+// secondary should apply each operation separately (one insert, one update, and one delete)
+// during initial sync.
+doTest(insertUpdateRemoveLarge, 2 /* numDocuments after initial sync */);
})();
diff --git a/jstests/replsets/initial_sync_update_reinsert_missing_doc_with_prepare.js b/jstests/replsets/initial_sync_update_reinsert_missing_doc_with_prepare.js
index 2f8872e2ddd..5df204812d4 100644
--- a/jstests/replsets/initial_sync_update_reinsert_missing_doc_with_prepare.js
+++ b/jstests/replsets/initial_sync_update_reinsert_missing_doc_with_prepare.js
@@ -14,75 +14,72 @@
*/
(function() {
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
- load("jstests/libs/check_log.js");
-
- function doTest(doTransactionWork, numDocuments) {
- const name = 'initial_sync_update_missing_doc_with_prepare';
- const replSet = new ReplSetTest({
- name: name,
- nodes: 1,
- });
-
- replSet.startSet();
- replSet.initiate();
- const primary = replSet.getPrimary();
- const dbName = 'test';
-
- const coll = primary.getDB(dbName).getCollection(name);
- assert.commandWorked(coll.insert({_id: 0, x: 1}));
-
- // Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
- // it is syncing from the primary.
- const secondaryConfig = {rsConfig: {votes: 0, priority: 0}};
- const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
-
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(name);
-
- session.startTransaction();
- doTransactionWork(sessionColl, {_id: 0});
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
-
- // Re-insert deleted document on the sync source. The secondary should be able to fetch and
- // insert this document after failing to apply the udpate.
- assert.commandWorked(coll.insert({_id: 0, x: 3}));
-
- const res =
- assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
- assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
- const firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
-
- // Temporarily increase log levels so that we can see the 'Inserted missing document' log
- // line.
- secondary.getDB('test').setLogLevel(1, 'replication');
- turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 1 /* numInserted */);
- secondary.getDB('test').setLogLevel(0, 'replication');
-
- finishAndValidate(replSet, name, firstOplogEnd, 1 /* numInserted */, numDocuments);
- assert.docEq(
- {_id: 0, x: 3}, coll.findOne({_id: 0}), 'document on secondary matches primary');
-
- replSet.stopSet();
- }
-
- jsTestLog("Testing with prepared transaction");
- // Passing in a function to update and remove document on primary in a prepared transaction
- // between phrase 1 and 2. Once the secondary receives the commit for the transaction, the
- // secondary should apply each operation separately (one update, and one delete) during initial
- // sync.
- doTest(updateRemove, 1 /* numDocuments after initial sync */);
-
- jsTestLog("Testing with large prepared transaction");
- // Passing in a function to insert, update and remove large documents on primary in a large
- // prepared transaction. Once the secondary receives the commit for the transaction, the
- // secondary should apply each operation separately (one insert, one update, and one delete)
- // during initial sync.
- doTest(insertUpdateRemoveLarge, 2 /* numDocuments after initial sync */);
-
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
+load("jstests/libs/check_log.js");
+
+function doTest(doTransactionWork, numDocuments) {
+ const name = 'initial_sync_update_missing_doc_with_prepare';
+ const replSet = new ReplSetTest({
+ name: name,
+ nodes: 1,
+ });
+
+ replSet.startSet();
+ replSet.initiate();
+ const primary = replSet.getPrimary();
+ const dbName = 'test';
+
+ const coll = primary.getDB(dbName).getCollection(name);
+ assert.commandWorked(coll.insert({_id: 0, x: 1}));
+
+ // Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
+ // it is syncing from the primary.
+ const secondaryConfig = {rsConfig: {votes: 0, priority: 0}};
+ const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
+
+ const session = primary.startSession();
+ const sessionDB = session.getDatabase(dbName);
+ const sessionColl = sessionDB.getCollection(name);
+
+ session.startTransaction();
+ doTransactionWork(sessionColl, {_id: 0});
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+ turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
+
+ // Re-insert deleted document on the sync source. The secondary should be able to fetch and
+ // insert this document after failing to apply the udpate.
+ assert.commandWorked(coll.insert({_id: 0, x: 3}));
+
+ const res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 1}));
+ assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
+ const firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
+
+ // Temporarily increase log levels so that we can see the 'Inserted missing document' log
+ // line.
+ secondary.getDB('test').setLogLevel(1, 'replication');
+ turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 1 /* numInserted */);
+ secondary.getDB('test').setLogLevel(0, 'replication');
+
+ finishAndValidate(replSet, name, firstOplogEnd, 1 /* numInserted */, numDocuments);
+ assert.docEq({_id: 0, x: 3}, coll.findOne({_id: 0}), 'document on secondary matches primary');
+
+ replSet.stopSet();
+}
+
+jsTestLog("Testing with prepared transaction");
+// Passing in a function to update and remove document on primary in a prepared transaction
+// between phrase 1 and 2. Once the secondary receives the commit for the transaction, the
+// secondary should apply each operation separately (one update, and one delete) during initial
+// sync.
+doTest(updateRemove, 1 /* numDocuments after initial sync */);
+
+jsTestLog("Testing with large prepared transaction");
+// Passing in a function to insert, update and remove large documents on primary in a large
+// prepared transaction. Once the secondary receives the commit for the transaction, the
+// secondary should apply each operation separately (one insert, one update, and one delete)
+// during initial sync.
+doTest(insertUpdateRemoveLarge, 2 /* numDocuments after initial sync */);
})();
diff --git a/jstests/replsets/initial_sync_uuid_not_found.js b/jstests/replsets/initial_sync_uuid_not_found.js
index 267f468b0c4..0942ac1f54b 100644
--- a/jstests/replsets/initial_sync_uuid_not_found.js
+++ b/jstests/replsets/initial_sync_uuid_not_found.js
@@ -5,72 +5,67 @@
* results in an empty result or zero count.
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/libs/check_log.js');
+load('jstests/libs/check_log.js');
- const basename = 'initial_sync_rename_collection';
+const basename = 'initial_sync_rename_collection';
- jsTestLog('Bring up set');
- const rst = new ReplSetTest(
- {name: basename, nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
+jsTestLog('Bring up set');
+const rst = new ReplSetTest(
+ {name: basename, nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB('d');
- const primaryColl = primaryDB.coll;
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB('d');
+const primaryColl = primaryDB.coll;
- jsTestLog('Create a collection (with a UUID) and insert a document.');
- assert.writeOK(primaryColl.insert({_id: 0}));
+jsTestLog('Create a collection (with a UUID) and insert a document.');
+assert.writeOK(primaryColl.insert({_id: 0}));
- const collInfo = primaryDB.getCollectionInfos({name: primaryColl.getName()})[0];
- assert(collInfo.info.uuid,
- 'newly created collection expected to have a UUID: ' + tojson(collInfo));
+const collInfo = primaryDB.getCollectionInfos({name: primaryColl.getName()})[0];
+assert(collInfo.info.uuid, 'newly created collection expected to have a UUID: ' + tojson(collInfo));
- jsTestLog('Make sure synced');
- rst.awaitReplication();
+jsTestLog('Make sure synced');
+rst.awaitReplication();
- jsTestLog('Resync the secondary enabling failpoint');
- function ResyncWithFailpoint(failpointName, failpointData) {
- let setParameter = {numInitialSyncAttempts: 1};
- setParameter['failpoint.' + failpointName] =
- tojson({mode: 'alwaysOn', data: failpointData});
- rst.restart(1, {startClean: true, setParameter});
- const secondary = rst.nodes[1];
- assert.eq(primary, rst.getPrimary(), 'Primary changed after reconfig');
+jsTestLog('Resync the secondary enabling failpoint');
+function ResyncWithFailpoint(failpointName, failpointData) {
+ let setParameter = {numInitialSyncAttempts: 1};
+ setParameter['failpoint.' + failpointName] = tojson({mode: 'alwaysOn', data: failpointData});
+ rst.restart(1, {startClean: true, setParameter});
+ const secondary = rst.nodes[1];
+ assert.eq(primary, rst.getPrimary(), 'Primary changed after reconfig');
- jsTestLog('Wait for new node to start cloning');
- secondary.setSlaveOk();
- const secondaryDB = secondary.getDB(primaryDB.getName());
- const secondaryColl = secondaryDB[primaryColl.getName()];
+ jsTestLog('Wait for new node to start cloning');
+ secondary.setSlaveOk();
+ const secondaryDB = secondary.getDB(primaryDB.getName());
+ const secondaryColl = secondaryDB[primaryColl.getName()];
- rst.reInitiate();
- checkLog.contains(secondary, 'initial sync - ' + failpointName + ' fail point enabled');
+ rst.reInitiate();
+ checkLog.contains(secondary, 'initial sync - ' + failpointName + ' fail point enabled');
- jsTestLog('Remove collection on the primary and insert a new document, recreating it.');
- assert(primaryColl.drop());
- assert.writeOK(primaryColl.insert({_id: 0}, {writeConcern: {w: 'majority'}}));
- const newCollInfo = primaryDB.getCollectionInfos({name: primaryColl.getName()})[0];
- assert(collInfo.info.uuid,
- 'recreated collection expected to have a UUID: ' + tojson(collInfo));
- assert.neq(collInfo.info.uuid,
- newCollInfo.info.uuid,
- 'recreated collection expected to have different UUID');
+ jsTestLog('Remove collection on the primary and insert a new document, recreating it.');
+ assert(primaryColl.drop());
+ assert.writeOK(primaryColl.insert({_id: 0}, {writeConcern: {w: 'majority'}}));
+ const newCollInfo = primaryDB.getCollectionInfos({name: primaryColl.getName()})[0];
+ assert(collInfo.info.uuid, 'recreated collection expected to have a UUID: ' + tojson(collInfo));
+ assert.neq(collInfo.info.uuid,
+ newCollInfo.info.uuid,
+ 'recreated collection expected to have different UUID');
- jsTestLog('Disable failpoint and resume initial sync');
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: failpointName, mode: 'off'}));
+ jsTestLog('Disable failpoint and resume initial sync');
+ assert.commandWorked(secondary.adminCommand({configureFailPoint: failpointName, mode: 'off'}));
- jsTestLog('Wait for both nodes to be up-to-date');
- rst.awaitSecondaryNodes();
- rst.awaitReplication();
+ jsTestLog('Wait for both nodes to be up-to-date');
+ rst.awaitSecondaryNodes();
+ rst.awaitReplication();
- jsTestLog('Check consistency and shut down replica-set');
- rst.checkReplicatedDataHashes();
- }
- ResyncWithFailpoint('initialSyncHangBeforeCollectionClone',
- {namespace: primaryColl.getFullName()});
- ResyncWithFailpoint('initialSyncHangAfterListCollections', {database: primaryDB.getName()});
- rst.stopSet();
+ jsTestLog('Check consistency and shut down replica-set');
+ rst.checkReplicatedDataHashes();
+}
+ResyncWithFailpoint('initialSyncHangBeforeCollectionClone', {namespace: primaryColl.getFullName()});
+ResyncWithFailpoint('initialSyncHangAfterListCollections', {database: primaryDB.getName()});
+rst.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_views.js b/jstests/replsets/initial_sync_views.js
index bf60951837b..ae202aff0e7 100644
--- a/jstests/replsets/initial_sync_views.js
+++ b/jstests/replsets/initial_sync_views.js
@@ -3,39 +3,39 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js");
- let testName = "initial_sync_views";
- let hostName = getHostName();
+load("jstests/replsets/rslib.js");
+let testName = "initial_sync_views";
+let hostName = getHostName();
- let replTest = new ReplSetTest({name: testName, nodes: 1});
- replTest.startSet();
- replTest.initiate();
+let replTest = new ReplSetTest({name: testName, nodes: 1});
+replTest.startSet();
+replTest.initiate();
- let primaryDB = replTest.getPrimary().getDB(testName);
+let primaryDB = replTest.getPrimary().getDB(testName);
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(primaryDB.coll.insert({a: i}));
- }
+for (let i = 0; i < 10; ++i) {
+ assert.writeOK(primaryDB.coll.insert({a: i}));
+}
- // Setup view.
- assert.commandWorked(
- primaryDB.runCommand({create: "view", viewOn: "coll", pipeline: [{$match: {a: 5}}]}));
+// Setup view.
+assert.commandWorked(
+ primaryDB.runCommand({create: "view", viewOn: "coll", pipeline: [{$match: {a: 5}}]}));
- assert.eq(10, primaryDB.coll.find().itcount());
- assert.eq(1, primaryDB.view.find().itcount());
+assert.eq(10, primaryDB.coll.find().itcount());
+assert.eq(1, primaryDB.view.find().itcount());
- // Add new member to the replica set and wait for initial sync to complete.
- let secondary = replTest.add();
- replTest.reInitiate();
- replTest.awaitReplication();
- replTest.awaitSecondaryNodes();
+// Add new member to the replica set and wait for initial sync to complete.
+let secondary = replTest.add();
+replTest.reInitiate();
+replTest.awaitReplication();
+replTest.awaitSecondaryNodes();
- // Confirm secondary has expected collection and view document count.
- let secondaryDB = secondary.getDB(testName);
- assert.eq(10, secondaryDB.coll.find().itcount());
- assert.eq(1, secondaryDB.view.find().itcount());
+// Confirm secondary has expected collection and view document count.
+let secondaryDB = secondary.getDB(testName);
+assert.eq(10, secondaryDB.coll.find().itcount());
+assert.eq(1, secondaryDB.view.find().itcount());
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initiate.js b/jstests/replsets/initiate.js
index 0afa0c85bcd..994cd3b73ea 100644
--- a/jstests/replsets/initiate.js
+++ b/jstests/replsets/initiate.js
@@ -3,22 +3,22 @@
* configs, so this is just seeing if it fails when it's supposed to.
*/
(function() {
- "use strict";
- var replTest = new ReplSetTest({name: 'testSet2', nodes: 1});
- var nodes = replTest.startSet();
+"use strict";
+var replTest = new ReplSetTest({name: 'testSet2', nodes: 1});
+var nodes = replTest.startSet();
- assert.soon(function() {
- try {
- var result = nodes[0].getDB("admin").runCommand(
- {replSetInitiate: {_id: "testSet2", members: [{_id: 0, tags: ["member0"]}]}});
- printjson(result);
- return (result.errmsg.match(/bad or missing host field/) ||
- result.errmsg.match(/Missing expected field \"host\"/));
- } catch (e) {
- print(e);
- }
- return false;
- });
+assert.soon(function() {
+ try {
+ var result = nodes[0].getDB("admin").runCommand(
+ {replSetInitiate: {_id: "testSet2", members: [{_id: 0, tags: ["member0"]}]}});
+ printjson(result);
+ return (result.errmsg.match(/bad or missing host field/) ||
+ result.errmsg.match(/Missing expected field \"host\"/));
+ } catch (e) {
+ print(e);
+ }
+ return false;
+});
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/inmemory_preserves_active_txns.js b/jstests/replsets/inmemory_preserves_active_txns.js
index 2a5791b35ae..c05c24fb711 100644
--- a/jstests/replsets/inmemory_preserves_active_txns.js
+++ b/jstests/replsets/inmemory_preserves_active_txns.js
@@ -11,106 +11,104 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- // If the test runner passed --storageEngine=inMemory then we know inMemory is compiled into the
- // server. We'll actually use both inMemory and wiredTiger storage engines.
- const storageEngine = jsTest.options().storageEngine;
- if (storageEngine !== 'inMemory') {
- jsTestLog(`Skip test: storageEngine == "${storageEngine}", not "inMemory"`);
- return;
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+// If the test runner passed --storageEngine=inMemory then we know inMemory is compiled into the
+// server. We'll actually use both inMemory and wiredTiger storage engines.
+const storageEngine = jsTest.options().storageEngine;
+if (storageEngine !== 'inMemory') {
+ jsTestLog(`Skip test: storageEngine == "${storageEngine}", not "inMemory"`);
+ return;
+}
+
+// A new replica set for both the commit and abort tests to ensure the same clean state.
+function doTest(commitOrAbort) {
+ const replSet = new ReplSetTest({
+ // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
+ nodeOptions: {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: [
+ {storageEngine: "wiredTiger"},
+ // inMemory node must not be a voter, otherwise lastCommitted never advances
+ {storageEngine: "inMemory", rsConfig: {priority: 0, votes: 0}},
+ ],
+ waitForKeys: false
+ });
+
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
+ replSet.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+
+ const primary = replSet.getPrimary();
+ const secondary = replSet.getSecondary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
+ assert.lte(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+
+ const coll = primary.getDB("test").test;
+ assert.commandWorked(coll.insert({}));
+
+ jsTestLog("Prepare a transaction");
+
+ const session = primary.startSession();
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+ const oldestRequiredTimestampForCrashRecovery =
+ PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
+ assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
+
+ jsTestLog("Get transaction entry from config.transactions");
+
+ const txnEntry = primary.getDB("config").transactions.findOne();
+ // The prepare oplog entry may or may not be the first oplog entry depending on packing.
+ assert.lte(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry));
+
+ assert.soonNoExcept(() => {
+ const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
+ assert(secondaryTxnEntry);
+ assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
+ return true;
+ });
+
+ jsTestLog("Find prepare oplog entry");
+
+ const oplogEntry = PrepareHelpers.findPrepareEntry(primaryOplog);
+ assert.eq(oplogEntry.ts, prepareTimestamp, tojson(oplogEntry));
+ // Must already be written on secondary, since the config.transactions entry is.
+ const secondaryOplogEntry = PrepareHelpers.findPrepareEntry(secondaryOplog);
+ assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
+
+ jsTestLog("Insert documents until oplog exceeds oplogSize");
+
+ // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
+ PrepareHelpers.growOplogPastMaxSize(replSet);
+
+ jsTestLog(`Oplog dataSize = ${primaryOplog.dataSize()}, check the prepare entry still exists`);
+
+ assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(primaryOplog));
+ assert.soon(() => {
+ return secondaryOplog.dataSize() > PrepareHelpers.oplogSizeBytes;
+ });
+ assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(secondaryOplog));
+
+ if (commitOrAbort === "commit") {
+ jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
+ PrepareHelpers.commitTransaction(session, prepareTimestamp);
+ } else if (commitOrAbort === "abort") {
+ jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
+ assert.commandWorked(session.abortTransaction_forTesting());
+ } else {
+ throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
}
- // A new replica set for both the commit and abort tests to ensure the same clean state.
- function doTest(commitOrAbort) {
- const replSet = new ReplSetTest({
- // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
- nodeOptions:
- {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
- nodes: [
- {storageEngine: "wiredTiger"},
- // inMemory node must not be a voter, otherwise lastCommitted never advances
- {storageEngine: "inMemory", rsConfig: {priority: 0, votes: 0}},
- ],
- waitForKeys: false
- });
-
- replSet.startSet(PrepareHelpers.replSetStartSetOptions);
- replSet.initiateWithAnyNodeAsPrimary(
- null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
-
- const primary = replSet.getPrimary();
- const secondary = replSet.getSecondary();
- const primaryOplog = primary.getDB("local").oplog.rs;
- assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
- const secondaryOplog = secondary.getDB("local").oplog.rs;
- assert.lte(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
-
- const coll = primary.getDB("test").test;
- assert.commandWorked(coll.insert({}));
-
- jsTestLog("Prepare a transaction");
-
- const session = primary.startSession();
- session.startTransaction();
- assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- const oldestRequiredTimestampForCrashRecovery =
- PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
- assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
-
- jsTestLog("Get transaction entry from config.transactions");
-
- const txnEntry = primary.getDB("config").transactions.findOne();
- // The prepare oplog entry may or may not be the first oplog entry depending on packing.
- assert.lte(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry));
-
- assert.soonNoExcept(() => {
- const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
- assert(secondaryTxnEntry);
- assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
- return true;
- });
-
- jsTestLog("Find prepare oplog entry");
-
- const oplogEntry = PrepareHelpers.findPrepareEntry(primaryOplog);
- assert.eq(oplogEntry.ts, prepareTimestamp, tojson(oplogEntry));
- // Must already be written on secondary, since the config.transactions entry is.
- const secondaryOplogEntry = PrepareHelpers.findPrepareEntry(secondaryOplog);
- assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
-
- jsTestLog("Insert documents until oplog exceeds oplogSize");
-
- // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
- PrepareHelpers.growOplogPastMaxSize(replSet);
-
- jsTestLog(
- `Oplog dataSize = ${primaryOplog.dataSize()}, check the prepare entry still exists`);
-
- assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(primaryOplog));
- assert.soon(() => {
- return secondaryOplog.dataSize() > PrepareHelpers.oplogSizeBytes;
- });
- assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(secondaryOplog));
-
- if (commitOrAbort === "commit") {
- jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
- } else if (commitOrAbort === "abort") {
- jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
- assert.commandWorked(session.abortTransaction_forTesting());
- } else {
- throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
- }
-
- PrepareHelpers.awaitOplogTruncation(replSet);
-
- replSet.stopSet();
- }
+ PrepareHelpers.awaitOplogTruncation(replSet);
+
+ replSet.stopSet();
+}
- doTest("commit");
- doTest("abort");
+doTest("commit");
+doTest("abort");
})();
diff --git a/jstests/replsets/interrupted_batch_insert.js b/jstests/replsets/interrupted_batch_insert.js
index ea0371e1be2..698e157f064 100644
--- a/jstests/replsets/interrupted_batch_insert.js
+++ b/jstests/replsets/interrupted_batch_insert.js
@@ -11,115 +11,109 @@
// though there was a rollback, those inserts will violate the {ordered: true} option.
(function() {
- "use strict";
-
- load('jstests/libs/parallelTester.js');
- load("jstests/replsets/rslib.js");
-
- var name = "interrupted_batch_insert";
- var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
- var nodes = replTest.nodeList();
-
- var conns = replTest.startSet();
- replTest.initiate({
- _id: name,
- members: [
- {_id: 0, host: nodes[0]},
- {_id: 1, host: nodes[1]},
- {_id: 2, host: nodes[2], priority: 0}
- ]
+"use strict";
+
+load('jstests/libs/parallelTester.js');
+load("jstests/replsets/rslib.js");
+
+var name = "interrupted_batch_insert";
+var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
+var nodes = replTest.nodeList();
+
+var conns = replTest.startSet();
+replTest.initiate({
+ _id: name,
+ members:
+ [{_id: 0, host: nodes[0]}, {_id: 1, host: nodes[1]}, {_id: 2, host: nodes[2], priority: 0}]
+});
+
+// The test starts with node 0 as the primary.
+replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
+var primary = replTest.nodes[0];
+var collName = primary.getDB("db")[name].getFullName();
+
+var getParameterResult =
+ primary.getDB("admin").runCommand({getParameter: 1, internalInsertMaxBatchSize: 1});
+assert.commandWorked(getParameterResult);
+const batchSize = getParameterResult.internalInsertMaxBatchSize;
+
+// Prevent any writes to node 0 (the primary) from replicating to nodes 1 and 2.
+stopServerReplication(conns[1]);
+stopServerReplication(conns[2]);
+
+// Allow the primary to insert the first 5 batches of documents. After that, the fail point
+// activates, and the client thread hangs until the fail point gets turned off.
+assert.commandWorked(primary.getDB("db").adminCommand(
+ {configureFailPoint: "hangDuringBatchInsert", mode: {skip: 5}}));
+
+// In a background thread, issue an insert command to the primary that will insert 10 batches of
+// documents.
+var worker = new ScopedThread((host, collName, numToInsert) => {
+ // Insert elements [{idx: 0}, {idx: 1}, ..., {idx: numToInsert - 1}].
+ const docsToInsert = Array.from({length: numToInsert}, (_, i) => {
+ return {idx: i};
});
+ var coll = new Mongo(host).getCollection(collName);
+ assert.commandFailedWithCode(
+ coll.insert(docsToInsert, {writeConcern: {w: "majority", wtimeout: 5000}, ordered: true}),
+ ErrorCodes.InterruptedDueToReplStateChange);
+}, primary.host, collName, 10 * batchSize);
+worker.start();
- // The test starts with node 0 as the primary.
- replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
- var primary = replTest.nodes[0];
- var collName = primary.getDB("db")[name].getFullName();
-
- var getParameterResult =
- primary.getDB("admin").runCommand({getParameter: 1, internalInsertMaxBatchSize: 1});
- assert.commandWorked(getParameterResult);
- const batchSize = getParameterResult.internalInsertMaxBatchSize;
-
- // Prevent any writes to node 0 (the primary) from replicating to nodes 1 and 2.
- stopServerReplication(conns[1]);
- stopServerReplication(conns[2]);
-
- // Allow the primary to insert the first 5 batches of documents. After that, the fail point
- // activates, and the client thread hangs until the fail point gets turned off.
- assert.commandWorked(primary.getDB("db").adminCommand(
- {configureFailPoint: "hangDuringBatchInsert", mode: {skip: 5}}));
-
- // In a background thread, issue an insert command to the primary that will insert 10 batches of
- // documents.
- var worker = new ScopedThread((host, collName, numToInsert) => {
- // Insert elements [{idx: 0}, {idx: 1}, ..., {idx: numToInsert - 1}].
- const docsToInsert = Array.from({length: numToInsert}, (_, i) => {
- return {idx: i};
- });
- var coll = new Mongo(host).getCollection(collName);
- assert.commandFailedWithCode(
- coll.insert(docsToInsert,
- {writeConcern: {w: "majority", wtimeout: 5000}, ordered: true}),
- ErrorCodes.InterruptedDueToReplStateChange);
- }, primary.host, collName, 10 * batchSize);
- worker.start();
-
- // Wait long enough to guarantee that all 5 batches of inserts have executed and the primary is
- // hung on the "hangDuringBatchInsert" fail point.
- checkLog.contains(primary, "hangDuringBatchInsert fail point enabled");
-
- // Make sure the insert command is, in fact, running in the background.
- assert.eq(primary.getDB("db").currentOp({"command.insert": name, active: true}).inprog.length,
- 1);
-
- // Completely isolate the current primary (node 0), forcing it to step down.
- conns[0].disconnect(conns[1]);
- conns[0].disconnect(conns[2]);
-
- // Wait for node 1, the only other eligible node, to become the new primary.
- replTest.waitForState(replTest.nodes[1], ReplSetTest.State.PRIMARY);
- assert.eq(replTest.nodes[1], replTest.getPrimary());
-
- restartServerReplication(conns[2]);
-
- // Issue a write to the new primary.
- var collOnNewPrimary = replTest.nodes[1].getCollection(collName);
- assert.writeOK(collOnNewPrimary.insert({singleDoc: 1}, {writeConcern: {w: "majority"}}));
-
- // Isolate node 1, forcing it to step down as primary, and reconnect node 0, allowing it to step
- // up again.
- conns[1].disconnect(conns[2]);
- conns[0].reconnect(conns[2]);
-
- // Wait for node 0 to become primary again.
- replTest.waitForState(primary, ReplSetTest.State.PRIMARY);
- assert.eq(replTest.nodes[0], replTest.getPrimary());
-
- // Allow the batch insert to continue.
- assert.commandWorked(primary.getDB("db").adminCommand(
- {configureFailPoint: "hangDuringBatchInsert", mode: "off"}));
-
- // Wait until the insert command is done.
- assert.soon(
- () =>
- primary.getDB("db").currentOp({"command.insert": name, active: true}).inprog.length ===
- 0);
-
- worker.join();
-
- var docs = primary.getDB("db")[name].find({idx: {$exists: 1}}).sort({idx: 1}).toArray();
-
- // Any discontinuity in the "idx" values is an error. If an "idx" document failed to insert, all
- // the of "idx" documents after it should also have failed to insert, because the insert
- // specified {ordered: 1}. Note, if none of the inserts were successful, that's fine.
- docs.forEach((element, index) => {
- assert.eq(element.idx, index);
- });
+// Wait long enough to guarantee that all 5 batches of inserts have executed and the primary is
+// hung on the "hangDuringBatchInsert" fail point.
+checkLog.contains(primary, "hangDuringBatchInsert fail point enabled");
+
+// Make sure the insert command is, in fact, running in the background.
+assert.eq(primary.getDB("db").currentOp({"command.insert": name, active: true}).inprog.length, 1);
+
+// Completely isolate the current primary (node 0), forcing it to step down.
+conns[0].disconnect(conns[1]);
+conns[0].disconnect(conns[2]);
+
+// Wait for node 1, the only other eligible node, to become the new primary.
+replTest.waitForState(replTest.nodes[1], ReplSetTest.State.PRIMARY);
+assert.eq(replTest.nodes[1], replTest.getPrimary());
+
+restartServerReplication(conns[2]);
+
+// Issue a write to the new primary.
+var collOnNewPrimary = replTest.nodes[1].getCollection(collName);
+assert.writeOK(collOnNewPrimary.insert({singleDoc: 1}, {writeConcern: {w: "majority"}}));
+
+// Isolate node 1, forcing it to step down as primary, and reconnect node 0, allowing it to step
+// up again.
+conns[1].disconnect(conns[2]);
+conns[0].reconnect(conns[2]);
+
+// Wait for node 0 to become primary again.
+replTest.waitForState(primary, ReplSetTest.State.PRIMARY);
+assert.eq(replTest.nodes[0], replTest.getPrimary());
+
+// Allow the batch insert to continue.
+assert.commandWorked(
+ primary.getDB("db").adminCommand({configureFailPoint: "hangDuringBatchInsert", mode: "off"}));
+
+// Wait until the insert command is done.
+assert.soon(
+ () =>
+ primary.getDB("db").currentOp({"command.insert": name, active: true}).inprog.length === 0);
+
+worker.join();
+
+var docs = primary.getDB("db")[name].find({idx: {$exists: 1}}).sort({idx: 1}).toArray();
+
+// Any discontinuity in the "idx" values is an error. If an "idx" document failed to insert, all
+// the of "idx" documents after it should also have failed to insert, because the insert
+// specified {ordered: 1}. Note, if none of the inserts were successful, that's fine.
+docs.forEach((element, index) => {
+ assert.eq(element.idx, index);
+});
- // Reconnect the remaining disconnected nodes, so we can exit.
- conns[0].reconnect(conns[1]);
- conns[1].reconnect(conns[2]);
- restartServerReplication(conns[1]);
+// Reconnect the remaining disconnected nodes, so we can exit.
+conns[0].reconnect(conns[1]);
+conns[1].reconnect(conns[2]);
+restartServerReplication(conns[1]);
- replTest.stopSet(15);
+replTest.stopSet(15);
}());
diff --git a/jstests/replsets/invalid_index_spec.js b/jstests/replsets/invalid_index_spec.js
index 7bca237351e..60944e3b906 100644
--- a/jstests/replsets/invalid_index_spec.js
+++ b/jstests/replsets/invalid_index_spec.js
@@ -4,58 +4,58 @@
*/
(function() {
- "use strict";
-
- load("jstests/replsets/rslib.js");
-
- const testName = "invalid_index_spec";
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- let primaryDB = replTest.getPrimary().getDB(testName);
- let secondary = replTest.getSecondary();
- let secondaryAdminDB = secondary.getDB("admin");
-
- // Set a fail point that allows for index creation with invalid spec fields.
- primaryDB.adminCommand(
- {configureFailPoint: "skipIndexCreateFieldNameValidation", mode: "alwaysOn"});
-
- clearRawMongoProgramOutput();
-
- // Create a V1 index with invalid spec field. Expected to replicate without error or server
- // abort.
- assert.commandWorked(primaryDB.runCommand(
- {createIndexes: "test", indexes: [{v: 1, name: "w_1", key: {w: 1}, invalidOption1: 1}]}));
-
- // Create a V2 index with invalid spec field. Expected to cause server abort on replication.
- assert.commandWorked(primaryDB.runCommand(
- {createIndexes: "test", indexes: [{v: 2, name: "x_1", key: {x: 1}, invalidOption2: 1}]}));
-
- assert.soon(function() {
- try {
- secondaryAdminDB.runCommand({ping: 1});
- } catch (e) {
- return true;
- }
- return false;
- }, "Node did not terminate due to invalid index spec", 60 * 1000);
-
- // fassert() calls std::abort(), which returns a different exit code for Windows vs. other
- // platforms.
- const exitCode = MongoRunner.EXIT_ABRUPT;
- replTest.stop(secondary, undefined, {allowedExitCode: exitCode});
-
- // During the transition from the old code path in IndexBuilder to IndexBuildsCoordinator, we
- // will accept the fatal assertion code from either component.
- const msgIndexBuilder = "Fatal Assertion 50769";
- const msgIndexBuildsCoordinator = "Fatal assertion 34437";
- const msgIndexError = "InvalidIndexSpecificationOption: The field 'invalidOption2'";
-
- assert((rawMongoProgramOutput().match(msgIndexBuilder) ||
- rawMongoProgramOutput().match(msgIndexBuildsCoordinator)) &&
- rawMongoProgramOutput().match(msgIndexError),
- "Replication should have aborted on invalid index specification");
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/replsets/rslib.js");
+
+const testName = "invalid_index_spec";
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+let primaryDB = replTest.getPrimary().getDB(testName);
+let secondary = replTest.getSecondary();
+let secondaryAdminDB = secondary.getDB("admin");
+
+// Set a fail point that allows for index creation with invalid spec fields.
+primaryDB.adminCommand(
+ {configureFailPoint: "skipIndexCreateFieldNameValidation", mode: "alwaysOn"});
+
+clearRawMongoProgramOutput();
+
+// Create a V1 index with invalid spec field. Expected to replicate without error or server
+// abort.
+assert.commandWorked(primaryDB.runCommand(
+ {createIndexes: "test", indexes: [{v: 1, name: "w_1", key: {w: 1}, invalidOption1: 1}]}));
+
+// Create a V2 index with invalid spec field. Expected to cause server abort on replication.
+assert.commandWorked(primaryDB.runCommand(
+ {createIndexes: "test", indexes: [{v: 2, name: "x_1", key: {x: 1}, invalidOption2: 1}]}));
+
+assert.soon(function() {
+ try {
+ secondaryAdminDB.runCommand({ping: 1});
+ } catch (e) {
+ return true;
+ }
+ return false;
+}, "Node did not terminate due to invalid index spec", 60 * 1000);
+
+// fassert() calls std::abort(), which returns a different exit code for Windows vs. other
+// platforms.
+const exitCode = MongoRunner.EXIT_ABRUPT;
+replTest.stop(secondary, undefined, {allowedExitCode: exitCode});
+
+// During the transition from the old code path in IndexBuilder to IndexBuildsCoordinator, we
+// will accept the fatal assertion code from either component.
+const msgIndexBuilder = "Fatal Assertion 50769";
+const msgIndexBuildsCoordinator = "Fatal assertion 34437";
+const msgIndexError = "InvalidIndexSpecificationOption: The field 'invalidOption2'";
+
+assert((rawMongoProgramOutput().match(msgIndexBuilder) ||
+ rawMongoProgramOutput().match(msgIndexBuildsCoordinator)) &&
+ rawMongoProgramOutput().match(msgIndexError),
+ "Replication should have aborted on invalid index specification");
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/ismaster1.js b/jstests/replsets/ismaster1.js
index 2c9a67d4856..3cd6cb0a8dc 100644
--- a/jstests/replsets/ismaster1.js
+++ b/jstests/replsets/ismaster1.js
@@ -57,7 +57,7 @@ var checkMember = function(memberInfo) {
// make sure the result has proper values for fields with known values
var badValues = []; // each mistake will be saved as three entries (key, badvalue, goodvalue)
for (field in memberInfo.goodValues) {
- if (typeof(memberInfo.goodValues[field]) === "object") {
+ if (typeof (memberInfo.goodValues[field]) === "object") {
// assumes nested obj is disk in tags this is currently true, but may change
if (result[field].disk !== memberInfo.goodValues[field].disk) {
badValues.push("tags.disk");
@@ -92,7 +92,6 @@ config.members[3].arbiterOnly = true;
replTest.initiate(config);
var agreeOnPrimaryAndSetVersion = function(setVersion) {
-
print("Waiting for primary and replica set version " + setVersion);
var nodes = replTest.nodes;
@@ -228,8 +227,7 @@ checkMember({
ok: 1
},
wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"],
- unwantedFields:
- ["arbiterOnly", "passives", "passive", "slaveDelay", "hidden", "buildIndexes"]
+ unwantedFields: ["arbiterOnly", "passives", "passive", "slaveDelay", "hidden", "buildIndexes"]
});
checkMember({
diff --git a/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js b/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js
index 237fc1880fc..d5568cedf99 100644
--- a/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js
+++ b/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js
@@ -6,108 +6,107 @@
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/check_log.js");
-
- // Start one of the nodes with priority: 0 to avoid elections.
- var rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
-
- let primary = rst.getPrimary();
-
- const dbName = "test";
- const collName = "kill_reads_with_prepare_conflicts_during_step_down";
-
- const primaryDB = primary.getDB(dbName);
- // Used to make sure that the correct amount of operations were killed on this node
- // during stepdown.
- const primaryAdmin = primary.getDB("admin");
- const primaryColl = primaryDB[collName];
-
- let session = primary.startSession();
- const sessionID = session.getSessionId();
- let sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- assert.commandWorked(primaryAdmin.adminCommand(
- {configureFailPoint: "WTPrintPrepareConflictLog", mode: "alwaysOn"}));
-
- // Insert a document that we will later modify in a transaction.
- assert.commandWorked(primaryColl.insert({_id: 1}));
-
- jsTestLog("Start a transaction and prepare it");
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- TestData.dbName = dbName;
- TestData.collName = collName;
-
- const readBlockedOnPrepareConflictThread = startParallelShell(() => {
- const parallelTestDB = db.getSiblingDB(TestData.dbName);
- const parallelTestCollName = TestData.collName;
-
- // Advance the clusterTime with another insert.
- let res = assert.commandWorked(parallelTestDB.runCommand(
- {insert: parallelTestCollName, documents: [{advanceClusterTime: 1}]}));
- assert(res.hasOwnProperty("$clusterTime"), res);
- assert(res.$clusterTime.hasOwnProperty("clusterTime"), res);
- const clusterTime = res.$clusterTime.clusterTime;
- jsTestLog("Using afterClusterTime: " + clusterTime);
-
- // The following read should block on the prepared transaction since it will be
- // reading a conflicting document using an afterClusterTime later than the
- // prepareTimestamp.
- assert.commandFailedWithCode(parallelTestDB.runCommand({
- find: parallelTestCollName,
- filter: {_id: 1},
- readConcern: {afterClusterTime: clusterTime}
- }),
- ErrorCodes.InterruptedDueToReplStateChange);
- }, primary.port);
-
- jsTestLog("Waiting for failpoint");
- checkLog.contains(primary, "WTPrintPrepareConflictLog fail point enabled");
-
- // Once we have confirmed that the find command has hit a prepare conflict, we can perform
- // a step down.
- jsTestLog("Stepping down primary");
- assert.commandWorked(
- primaryAdmin.adminCommand({replSetStepDown: 60 * 10 /* 10 minutes */, force: true}));
-
- readBlockedOnPrepareConflictThread();
-
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- // Validate that the read operation got killed during step down.
- let replMetrics =
- assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1})).metrics.repl;
- assert.eq(replMetrics.stepDown.userOperationsKilled, 1);
-
- // Allow the primary to be re-elected, and wait for it.
- assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
- primary = rst.getPrimary();
-
- // Make sure we can successfully commit the prepared transaction.
- jsTestLog("Restoring shell session state");
- session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
- sessionDB = session.getDatabase(dbName);
- // The transaction on this session should have a txnNumber of 0. We explicitly set this
- // since createSessionWithGivenId does not restore the current txnNumber in the shell.
- session.setTxnNumber_forTesting(0);
- const txnNumber = session.getTxnNumber_forTesting();
-
- jsTestLog("Committing transaction");
- // Commit the transaction.
- assert.commandWorked(sessionDB.adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTimestamp,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- }));
-
- rst.stopSet();
+"use strict";
+
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+
+// Start one of the nodes with priority: 0 to avoid elections.
+var rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
+
+let primary = rst.getPrimary();
+
+const dbName = "test";
+const collName = "kill_reads_with_prepare_conflicts_during_step_down";
+
+const primaryDB = primary.getDB(dbName);
+// Used to make sure that the correct amount of operations were killed on this node
+// during stepdown.
+const primaryAdmin = primary.getDB("admin");
+const primaryColl = primaryDB[collName];
+
+let session = primary.startSession();
+const sessionID = session.getSessionId();
+let sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+assert.commandWorked(
+ primaryAdmin.adminCommand({configureFailPoint: "WTPrintPrepareConflictLog", mode: "alwaysOn"}));
+
+// Insert a document that we will later modify in a transaction.
+assert.commandWorked(primaryColl.insert({_id: 1}));
+
+jsTestLog("Start a transaction and prepare it");
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+TestData.dbName = dbName;
+TestData.collName = collName;
+
+const readBlockedOnPrepareConflictThread = startParallelShell(() => {
+ const parallelTestDB = db.getSiblingDB(TestData.dbName);
+ const parallelTestCollName = TestData.collName;
+
+ // Advance the clusterTime with another insert.
+ let res = assert.commandWorked(parallelTestDB.runCommand(
+ {insert: parallelTestCollName, documents: [{advanceClusterTime: 1}]}));
+ assert(res.hasOwnProperty("$clusterTime"), res);
+ assert(res.$clusterTime.hasOwnProperty("clusterTime"), res);
+ const clusterTime = res.$clusterTime.clusterTime;
+ jsTestLog("Using afterClusterTime: " + clusterTime);
+
+ // The following read should block on the prepared transaction since it will be
+ // reading a conflicting document using an afterClusterTime later than the
+ // prepareTimestamp.
+ assert.commandFailedWithCode(parallelTestDB.runCommand({
+ find: parallelTestCollName,
+ filter: {_id: 1},
+ readConcern: {afterClusterTime: clusterTime}
+ }),
+ ErrorCodes.InterruptedDueToReplStateChange);
+}, primary.port);
+
+jsTestLog("Waiting for failpoint");
+checkLog.contains(primary, "WTPrintPrepareConflictLog fail point enabled");
+
+// Once we have confirmed that the find command has hit a prepare conflict, we can perform
+// a step down.
+jsTestLog("Stepping down primary");
+assert.commandWorked(
+ primaryAdmin.adminCommand({replSetStepDown: 60 * 10 /* 10 minutes */, force: true}));
+
+readBlockedOnPrepareConflictThread();
+
+rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+// Validate that the read operation got killed during step down.
+let replMetrics = assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1})).metrics.repl;
+assert.eq(replMetrics.stepDown.userOperationsKilled, 1);
+
+// Allow the primary to be re-elected, and wait for it.
+assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
+primary = rst.getPrimary();
+
+// Make sure we can successfully commit the prepared transaction.
+jsTestLog("Restoring shell session state");
+session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
+sessionDB = session.getDatabase(dbName);
+// The transaction on this session should have a txnNumber of 0. We explicitly set this
+// since createSessionWithGivenId does not restore the current txnNumber in the shell.
+session.setTxnNumber_forTesting(0);
+const txnNumber = session.getTxnNumber_forTesting();
+
+jsTestLog("Committing transaction");
+// Commit the transaction.
+assert.commandWorked(sessionDB.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTimestamp,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+}));
+
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/kill_ttl_on_stepdown.js b/jstests/replsets/kill_ttl_on_stepdown.js
index 9dc8b619034..18738a3e7ff 100644
--- a/jstests/replsets/kill_ttl_on_stepdown.js
+++ b/jstests/replsets/kill_ttl_on_stepdown.js
@@ -5,66 +5,66 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
+load("jstests/libs/check_log.js");
- const dbName = "kill_ttl_on_stepdown";
+const dbName = "kill_ttl_on_stepdown";
- const rst = new ReplSetTest({
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {setParameter: "ttlMonitorSleepSecs=15"}
- });
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {setParameter: "ttlMonitorSleepSecs=15"}
+});
+rst.startSet();
+rst.initiate();
- let primary = rst.getPrimary();
- let db = primary.getDB(dbName);
+let primary = rst.getPrimary();
+let db = primary.getDB(dbName);
- // Create a TTL index.
- db.getCollection("test").createIndex({x: 1}, {expireAfterSeconds: 3600});
+// Create a TTL index.
+db.getCollection("test").createIndex({x: 1}, {expireAfterSeconds: 3600});
- function getNumTTLPasses() {
- let serverStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
- return serverStatus.metrics.ttl.passes;
- }
+function getNumTTLPasses() {
+ let serverStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
+ return serverStatus.metrics.ttl.passes;
+}
- // Let the TTLMonitor do some passes.
- assert.soon(() => {
- return getNumTTLPasses() > 0;
- }, "TTLMonitor never did any passes.");
+// Let the TTLMonitor do some passes.
+assert.soon(() => {
+ return getNumTTLPasses() > 0;
+}, "TTLMonitor never did any passes.");
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "hangTTLMonitorWithLock", mode: "alwaysOn"}));
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "hangTTLMonitorWithLock", mode: "alwaysOn"}));
- checkLog.contains(rst.getPrimary(), "Hanging due to hangTTLMonitorWithLock fail point");
+checkLog.contains(rst.getPrimary(), "Hanging due to hangTTLMonitorWithLock fail point");
- // See how many passes the TTLMonitor has done, before we stepdown the primary, killing it.
- let ttlPassesBeforeStepdown = getNumTTLPasses();
+// See how many passes the TTLMonitor has done, before we stepdown the primary, killing it.
+let ttlPassesBeforeStepdown = getNumTTLPasses();
- // Force a stepdown of the primary.
- assert.commandWorked(primary.getDB("admin").runCommand(
- {replSetStepDown: 60 * 10 /* 10 minutes */, force: true}));
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
- assert.commandWorked(primary.adminCommand({replSetStepUp: 1}));
+// Force a stepdown of the primary.
+assert.commandWorked(
+ primary.getDB("admin").runCommand({replSetStepDown: 60 * 10 /* 10 minutes */, force: true}));
+rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
+assert.commandWorked(primary.adminCommand({replSetStepUp: 1}));
- primary = rst.getPrimary();
+primary = rst.getPrimary();
- // Ensure the TTLMonitor was interrupted.
- checkLog.contains(primary, "TTLMonitor was interrupted");
+// Ensure the TTLMonitor was interrupted.
+checkLog.contains(primary, "TTLMonitor was interrupted");
- // Disable the failpoint on the node that stepped down.
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "hangTTLMonitorWithLock", mode: "off"}));
+// Disable the failpoint on the node that stepped down.
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "hangTTLMonitorWithLock", mode: "off"}));
- // Wait until the number TTLMonitor passes increases, informing us that the TTLMonitor thread
- // was not killed entirely and will continue to run after stepdown finishes.
- assert.soon(() => {
- if (getNumTTLPasses() > ttlPassesBeforeStepdown) {
- return true;
- }
- }, "TTLMonitor was not running after stepdown");
+// Wait until the number TTLMonitor passes increases, informing us that the TTLMonitor thread
+// was not killed entirely and will continue to run after stepdown finishes.
+assert.soon(() => {
+ if (getNumTTLPasses() > ttlPassesBeforeStepdown) {
+ return true;
+ }
+}, "TTLMonitor was not running after stepdown");
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/kills_reads_with_prepare_conflicts_during_stepup.js b/jstests/replsets/kills_reads_with_prepare_conflicts_during_stepup.js
index 3ad0bd22248..49d5a1dabc6 100644
--- a/jstests/replsets/kills_reads_with_prepare_conflicts_during_stepup.js
+++ b/jstests/replsets/kills_reads_with_prepare_conflicts_during_stepup.js
@@ -6,123 +6,125 @@
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/check_log.js");
-
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet();
-
- const config = rst.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election before
- // we make the secondary step up.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
-
- let primary = rst.getPrimary();
- let secondary = rst.getSecondary();
-
- const dbName = "test";
- const collName = "kill_reads_with_prepare_conflicts_during_step_up";
-
- const primaryDB = primary.getDB(dbName);
- const primaryColl = primaryDB[collName];
-
- let session = primary.startSession();
- const sessionID = session.getSessionId();
- let sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "WTPrintPrepareConflictLog", mode: "alwaysOn"}));
-
- // Insert a document that we will later modify in a transaction.
- assert.commandWorked(primaryColl.insert({_id: 1}));
-
- jsTestLog("Start a transaction and prepare it");
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- // Advance the clusterTime with another insert.
- const clusterTimeAfterPrepare =
- assert
- .commandWorked(primaryColl.runCommand(
- "insert", {documents: [{advanceClusterTime: 1}], writeConcern: {w: "majority"}}))
- .operationTime;
-
- // Ensure that the secondary replicates the prepare and the additional insert.
- rst.awaitReplication();
-
- // Make sure a secondary read using afterClusterTime times out when trying to
- // read a prepared document.
- const secondaryDB = secondary.getDB(dbName);
- assert.commandFailedWithCode(secondaryDB.runCommand({
- find: collName,
+"use strict";
+
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+
+const config = rst.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election before
+// we make the secondary step up.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
+
+let primary = rst.getPrimary();
+let secondary = rst.getSecondary();
+
+const dbName = "test";
+const collName = "kill_reads_with_prepare_conflicts_during_step_up";
+
+const primaryDB = primary.getDB(dbName);
+const primaryColl = primaryDB[collName];
+
+let session = primary.startSession();
+const sessionID = session.getSessionId();
+let sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "WTPrintPrepareConflictLog", mode: "alwaysOn"}));
+
+// Insert a document that we will later modify in a transaction.
+assert.commandWorked(primaryColl.insert({_id: 1}));
+
+jsTestLog("Start a transaction and prepare it");
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+// Advance the clusterTime with another insert.
+const clusterTimeAfterPrepare =
+ assert
+ .commandWorked(primaryColl.runCommand(
+ "insert", {documents: [{advanceClusterTime: 1}], writeConcern: {w: "majority"}}))
+ .operationTime;
+
+// Ensure that the secondary replicates the prepare and the additional insert.
+rst.awaitReplication();
+
+// Make sure a secondary read using afterClusterTime times out when trying to
+// read a prepared document.
+const secondaryDB = secondary.getDB(dbName);
+assert.commandFailedWithCode(secondaryDB.runCommand({
+ find: collName,
+ filter: {_id: 1},
+ readConcern: {afterClusterTime: clusterTimeAfterPrepare},
+ maxTimeMS: 2 * 1000 // 2 seconds
+}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Clear secondary log so that when we wait for the WTPrintPrepareConflictLog fail point, we
+// do not count the previous find.
+assert.commandWorked(secondaryDB.adminCommand({clearLog: "global"}));
+
+TestData.dbName = dbName;
+TestData.collName = collName;
+TestData.clusterTime = clusterTimeAfterPrepare;
+
+const waitForSecondaryReadBlockedOnPrepareConflictThread = startParallelShell(() => {
+ // Allow for secondary reads.
+ db.getMongo().setSlaveOk();
+ const parallelTestDB = db.getSiblingDB(TestData.dbName);
+ const parallelTestCollName = TestData.collName;
+
+ // The following read should block on the prepared transaction since it will be
+ // reading a conflicting document using an afterClusterTime later than the
+ // prepareTimestamp.
+ assert.commandFailedWithCode(parallelTestDB.runCommand({
+ find: parallelTestCollName,
filter: {_id: 1},
- readConcern: {afterClusterTime: clusterTimeAfterPrepare},
- maxTimeMS: 2 * 1000 // 2 seconds
+ readConcern: {afterClusterTime: TestData.clusterTime}
}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Clear secondary log so that when we wait for the WTPrintPrepareConflictLog fail point, we
- // do not count the previous find.
- assert.commandWorked(secondaryDB.adminCommand({clearLog: "global"}));
-
- TestData.dbName = dbName;
- TestData.collName = collName;
- TestData.clusterTime = clusterTimeAfterPrepare;
-
- const waitForSecondaryReadBlockedOnPrepareConflictThread = startParallelShell(() => {
- // Allow for secondary reads.
- db.getMongo().setSlaveOk();
- const parallelTestDB = db.getSiblingDB(TestData.dbName);
- const parallelTestCollName = TestData.collName;
-
- // The following read should block on the prepared transaction since it will be
- // reading a conflicting document using an afterClusterTime later than the
- // prepareTimestamp.
- assert.commandFailedWithCode(parallelTestDB.runCommand({
- find: parallelTestCollName,
- filter: {_id: 1},
- readConcern: {afterClusterTime: TestData.clusterTime}
- }),
- ErrorCodes.InterruptedDueToReplStateChange);
- }, secondary.port);
-
- jsTestLog("Waiting for failpoint");
- checkLog.contains(secondary, "WTPrintPrepareConflictLog fail point enabled");
-
- // Once we've confirmed that the find command has hit a prepare conflict on the secondary, cause
- // that secondary to step up.
- jsTestLog("Stepping up secondary");
- rst.stepUp(secondary);
-
- waitForSecondaryReadBlockedOnPrepareConflictThread();
-
- rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- primary = rst.getPrimary();
-
- // Make sure we can successfully commit the prepared transaction.
- jsTestLog("Restoring shell session state");
- session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
- sessionDB = session.getDatabase(dbName);
- // The transaction on this session should have a txnNumber of 0. We explicitly set this
- // since createSessionWithGivenId does not restore the current txnNumber in the shell.
- session.setTxnNumber_forTesting(0);
- const txnNumber = session.getTxnNumber_forTesting();
-
- jsTestLog("Committing transaction");
- // Commit the transaction.
- assert.commandWorked(sessionDB.adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTimestamp,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- }));
-
- rst.stopSet();
+ ErrorCodes.InterruptedDueToReplStateChange);
+}, secondary.port);
+
+jsTestLog("Waiting for failpoint");
+checkLog.contains(secondary, "WTPrintPrepareConflictLog fail point enabled");
+
+// Once we've confirmed that the find command has hit a prepare conflict on the secondary, cause
+// that secondary to step up.
+jsTestLog("Stepping up secondary");
+rst.stepUp(secondary);
+
+waitForSecondaryReadBlockedOnPrepareConflictThread();
+
+rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
+rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+primary = rst.getPrimary();
+
+// Make sure we can successfully commit the prepared transaction.
+jsTestLog("Restoring shell session state");
+session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
+sessionDB = session.getDatabase(dbName);
+// The transaction on this session should have a txnNumber of 0. We explicitly set this
+// since createSessionWithGivenId does not restore the current txnNumber in the shell.
+session.setTxnNumber_forTesting(0);
+const txnNumber = session.getTxnNumber_forTesting();
+
+jsTestLog("Committing transaction");
+// Commit the transaction.
+assert.commandWorked(sessionDB.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTimestamp,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+}));
+
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/last_error_reported_after_stepdown.js b/jstests/replsets/last_error_reported_after_stepdown.js
index dbf2533fccc..bfd0e43bb8a 100644
--- a/jstests/replsets/last_error_reported_after_stepdown.js
+++ b/jstests/replsets/last_error_reported_after_stepdown.js
@@ -3,111 +3,110 @@
* stepdown.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
+load("jstests/libs/check_log.js");
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const primaryAdmin = primary.getDB("admin");
- // We need a separate connection to avoid interference with the ReplSetTestMechanism.
- const primaryDataConn = new Mongo(primary.host);
- const primaryDb = primaryDataConn.getDB("test");
- const collname = "last_error_reported_after_stepdown";
- const coll = primaryDb[collname];
+const primary = rst.getPrimary();
+const primaryAdmin = primary.getDB("admin");
+// We need a separate connection to avoid interference with the ReplSetTestMechanism.
+const primaryDataConn = new Mongo(primary.host);
+const primaryDb = primaryDataConn.getDB("test");
+const collname = "last_error_reported_after_stepdown";
+const coll = primaryDb[collname];
- // Never retry on network error, because this test needs to detect the network error.
- TestData.skipRetryOnNetworkError = true;
+// Never retry on network error, because this test needs to detect the network error.
+TestData.skipRetryOnNetworkError = true;
- // This is specifically testing unacknowledged legacy writes.
- primaryDataConn.forceWriteMode('legacy');
+// This is specifically testing unacknowledged legacy writes.
+primaryDataConn.forceWriteMode('legacy');
- assert.commandWorked(
- coll.insert([{_id: 'deleteme'}, {_id: 'updateme', nullfield: null}, {_id: 'findme'}],
- {writeConcern: {w: 1}}));
- rst.awaitReplication();
-
- // Note that "operation" should always be on primaryDataConn, so the stepdown doesn't clear
- // the last error.
- function runStepDownTest({description, logMsg, operation, errorCode, nDocs}) {
- jsTestLog(`Trying ${description} on the primary, then stepping down`);
- // We need to make sure the command is complete before stepping down.
- assert.commandWorked(
- primaryAdmin.adminCommand({setParameter: 1, logComponentVerbosity: {command: 1}}));
- operation();
- // Wait for the operation to complete.
- checkLog.contains(primary, logMsg + ' appName: "MongoDB Shell"');
- assert.commandWorked(
- primaryAdmin.adminCommand({setParameter: 1, logComponentVerbosity: {command: 0}}));
- assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- var lastError = assert.commandWorked(primaryDb.runCommand({getLastError: 1}));
- if (typeof(errorCode) == "number")
- assert.eq(lastError.code,
- errorCode,
- "Expected error code " + errorCode + ", got lastError of " +
- JSON.stringify(lastError));
- else {
- assert(!lastError.err,
- "Expected no error, got lastError of " + JSON.stringify(lastError));
- }
- if (typeof(nDocs) == "number") {
- assert.eq(lastError.n, nDocs, "Wrong number of documents modified or updated");
- }
+assert.commandWorked(
+ coll.insert([{_id: 'deleteme'}, {_id: 'updateme', nullfield: null}, {_id: 'findme'}],
+ {writeConcern: {w: 1}}));
+rst.awaitReplication();
- // Allow the primary to be re-elected, and wait for it.
- assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
- rst.getPrimary();
+// Note that "operation" should always be on primaryDataConn, so the stepdown doesn't clear
+// the last error.
+function runStepDownTest({description, logMsg, operation, errorCode, nDocs}) {
+ jsTestLog(`Trying ${description} on the primary, then stepping down`);
+ // We need to make sure the command is complete before stepping down.
+ assert.commandWorked(
+ primaryAdmin.adminCommand({setParameter: 1, logComponentVerbosity: {command: 1}}));
+ operation();
+ // Wait for the operation to complete.
+ checkLog.contains(primary, logMsg + ' appName: "MongoDB Shell"');
+ assert.commandWorked(
+ primaryAdmin.adminCommand({setParameter: 1, logComponentVerbosity: {command: 0}}));
+ assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+ var lastError = assert.commandWorked(primaryDb.runCommand({getLastError: 1}));
+ if (typeof (errorCode) == "number")
+ assert.eq(
+ lastError.code,
+ errorCode,
+ "Expected error code " + errorCode + ", got lastError of " + JSON.stringify(lastError));
+ else {
+ assert(!lastError.err, "Expected no error, got lastError of " + JSON.stringify(lastError));
+ }
+ if (typeof (nDocs) == "number") {
+ assert.eq(lastError.n, nDocs, "Wrong number of documents modified or updated");
}
- // Tests which should have no errors.
- // Clear log messages to avoid picking up the log of the insertion of the 'deleteme'
- // document.
- assert.commandWorked(primaryAdmin.adminCommand({clearLog: 'global'}));
- runStepDownTest({
- description: "insert",
- logMsg: "insert " + coll.getFullName(),
- operation: () => coll.insert({_id: 0})
- });
- runStepDownTest({
- description: "update",
- logMsg: "update ",
- operation: () => coll.update({_id: 'updateme'}, {'$inc': {x: 1}}),
- nDocs: 1
- });
- runStepDownTest({
- description: "remove",
- logMsg: "remove ",
- operation: () => coll.remove({_id: 'deleteme'}),
- nDocs: 1
- });
+ // Allow the primary to be re-elected, and wait for it.
+ assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
+ rst.getPrimary();
+}
+
+// Tests which should have no errors.
+// Clear log messages to avoid picking up the log of the insertion of the 'deleteme'
+// document.
+assert.commandWorked(primaryAdmin.adminCommand({clearLog: 'global'}));
+runStepDownTest({
+ description: "insert",
+ logMsg: "insert " + coll.getFullName(),
+ operation: () => coll.insert({_id: 0})
+});
+runStepDownTest({
+ description: "update",
+ logMsg: "update ",
+ operation: () => coll.update({_id: 'updateme'}, {'$inc': {x: 1}}),
+ nDocs: 1
+});
+runStepDownTest({
+ description: "remove",
+ logMsg: "remove ",
+ operation: () => coll.remove({_id: 'deleteme'}),
+ nDocs: 1
+});
- // Tests which should have errors.
- // We repeat log messages from tests above, so clear the log first.
- assert.commandWorked(primaryAdmin.adminCommand({clearLog: 'global'}));
- runStepDownTest({
- description: "insert with error",
- logMsg: "insert " + coll.getFullName(),
- operation: () => coll.insert({_id: 0}),
- errorCode: ErrorCodes.DuplicateKey
- });
- runStepDownTest({
- description: "update with error",
- logMsg: "update ",
- operation: () => coll.update({_id: 'updateme'}, {'$inc': {nullfield: 1}}),
- errorCode: ErrorCodes.TypeMismatch,
- nDocs: 0
- });
- runStepDownTest({
- description: "remove with error",
- logMsg: "remove ",
- operation: () => coll.remove({'$nonsense': {x: 1}}),
- errorCode: ErrorCodes.BadValue,
- nDocs: 0
- });
+// Tests which should have errors.
+// We repeat log messages from tests above, so clear the log first.
+assert.commandWorked(primaryAdmin.adminCommand({clearLog: 'global'}));
+runStepDownTest({
+ description: "insert with error",
+ logMsg: "insert " + coll.getFullName(),
+ operation: () => coll.insert({_id: 0}),
+ errorCode: ErrorCodes.DuplicateKey
+});
+runStepDownTest({
+ description: "update with error",
+ logMsg: "update ",
+ operation: () => coll.update({_id: 'updateme'}, {'$inc': {nullfield: 1}}),
+ errorCode: ErrorCodes.TypeMismatch,
+ nDocs: 0
+});
+runStepDownTest({
+ description: "remove with error",
+ logMsg: "remove ",
+ operation: () => coll.remove({'$nonsense': {x: 1}}),
+ errorCode: ErrorCodes.BadValue,
+ nDocs: 0
+});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/last_op_visible.js b/jstests/replsets/last_op_visible.js
index 4b8b70a24b4..94a0b32cbf0 100644
--- a/jstests/replsets/last_op_visible.js
+++ b/jstests/replsets/last_op_visible.js
@@ -8,51 +8,50 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
+"use strict";
- var name = 'lastOpVisible';
- var replTest = new ReplSetTest(
- {name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}, waitForKeys: true});
+var name = 'lastOpVisible';
+var replTest = new ReplSetTest(
+ {name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}, waitForKeys: true});
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- replTest.stopSet();
- return;
- }
- replTest.initiate();
-
- var primary = replTest.getPrimary();
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ replTest.stopSet();
+ return;
+}
+replTest.initiate();
- // Do an insert without writeConcern.
- var res = primary.getDB(name).runCommandWithMetadata({insert: name, documents: [{x: 1}]},
- {"$replData": 1});
- assert.commandWorked(res.commandReply);
- var last_op_visible = res.commandReply["$replData"].lastOpVisible;
+var primary = replTest.getPrimary();
- // A find should return the same lastVisibleOp.
- res = primary.getDB(name).runCommandWithMetadata({find: name, readConcern: {level: "local"}},
+// Do an insert without writeConcern.
+var res = primary.getDB(name).runCommandWithMetadata({insert: name, documents: [{x: 1}]},
{"$replData": 1});
- assert.commandWorked(res.commandReply);
- assert.eq(last_op_visible, res.commandReply["$replData"].lastOpVisible);
-
- // A majority readConcern with afterOpTime: lastOpVisible should also return the same
- // lastVisibleOp.
- res = primary.getDB(name).runCommandWithMetadata(
- {find: name, readConcern: {level: "majority", afterOpTime: last_op_visible}},
- {"$replData": 1});
- assert.commandWorked(res.commandReply);
- assert.eq(last_op_visible, res.commandReply["$replData"].lastOpVisible);
-
- // Do an insert without writeConcern.
- res = primary.getDB(name).runCommandWithMetadata(
- {insert: name, documents: [{x: 1}], writeConcern: {w: "majority"}}, {"$replData": 1});
- assert.commandWorked(res.commandReply);
- last_op_visible = res.commandReply["$replData"].lastOpVisible;
-
- // A majority readConcern should return the same lastVisibleOp.
- res = primary.getDB(name).runCommandWithMetadata({find: name, readConcern: {level: "majority"}},
- {"$replData": 1});
- assert.commandWorked(res.commandReply);
- assert.eq(last_op_visible, res.commandReply["$replData"].lastOpVisible);
- replTest.stopSet();
+assert.commandWorked(res.commandReply);
+var last_op_visible = res.commandReply["$replData"].lastOpVisible;
+
+// A find should return the same lastVisibleOp.
+res = primary.getDB(name).runCommandWithMetadata({find: name, readConcern: {level: "local"}},
+ {"$replData": 1});
+assert.commandWorked(res.commandReply);
+assert.eq(last_op_visible, res.commandReply["$replData"].lastOpVisible);
+
+// A majority readConcern with afterOpTime: lastOpVisible should also return the same
+// lastVisibleOp.
+res = primary.getDB(name).runCommandWithMetadata(
+ {find: name, readConcern: {level: "majority", afterOpTime: last_op_visible}}, {"$replData": 1});
+assert.commandWorked(res.commandReply);
+assert.eq(last_op_visible, res.commandReply["$replData"].lastOpVisible);
+
+// Do an insert without writeConcern.
+res = primary.getDB(name).runCommandWithMetadata(
+ {insert: name, documents: [{x: 1}], writeConcern: {w: "majority"}}, {"$replData": 1});
+assert.commandWorked(res.commandReply);
+last_op_visible = res.commandReply["$replData"].lastOpVisible;
+
+// A majority readConcern should return the same lastVisibleOp.
+res = primary.getDB(name).runCommandWithMetadata({find: name, readConcern: {level: "majority"}},
+ {"$replData": 1});
+assert.commandWorked(res.commandReply);
+assert.eq(last_op_visible, res.commandReply["$replData"].lastOpVisible);
+replTest.stopSet();
}());
diff --git a/jstests/replsets/last_vote.js b/jstests/replsets/last_vote.js
index 6ff7198b3d3..62901259364 100644
--- a/jstests/replsets/last_vote.js
+++ b/jstests/replsets/last_vote.js
@@ -11,216 +11,207 @@
// @tags: [requires_persistence]
(function() {
- "use strict";
- load("jstests/replsets/rslib.js"); // For getLatestOp()
-
- var name = "last_vote";
- var rst = new ReplSetTest({
- name: name,
- nodes: 2,
- });
- rst.startSet();
-
- // Lower the election timeout to make the test run faster since it waits for multiple elections.
- var conf = rst.getReplSetConfig();
- conf.settings = {
- electionTimeoutMillis: 6000,
- };
- rst.initiate(conf);
-
- const lastVoteNS = 'local.replset.election';
-
- function getLastVoteDoc(conn) {
- assert.eq(
- conn.getCollection(lastVoteNS).find().itcount(), 1, 'last vote should be singleton');
- return conn.getCollection(lastVoteNS).findOne();
+"use strict";
+load("jstests/replsets/rslib.js"); // For getLatestOp()
+
+var name = "last_vote";
+var rst = new ReplSetTest({
+ name: name,
+ nodes: 2,
+});
+rst.startSet();
+
+// Lower the election timeout to make the test run faster since it waits for multiple elections.
+var conf = rst.getReplSetConfig();
+conf.settings = {
+ electionTimeoutMillis: 6000,
+};
+rst.initiate(conf);
+
+const lastVoteNS = 'local.replset.election';
+
+function getLastVoteDoc(conn) {
+ assert.eq(conn.getCollection(lastVoteNS).find().itcount(), 1, 'last vote should be singleton');
+ return conn.getCollection(lastVoteNS).findOne();
+}
+
+function setLastVoteDoc(conn, term, candidate) {
+ var newLastVote = {term: term, candidateIndex: rst.getNodeId(candidate)};
+ return assert.writeOK(conn.getCollection(lastVoteNS).update({}, newLastVote));
+}
+
+function assertNodeHasLastVote(node, term, candidate) {
+ var lastVoteDoc = getLastVoteDoc(node);
+ assert.eq(lastVoteDoc.term, term, node.host + " had wrong last vote term.");
+ assert.eq(lastVoteDoc.candidateIndex,
+ rst.getNodeId(candidate),
+ node.host + " had wrong last vote candidate.");
+}
+
+function assertCurrentTerm(node, term) {
+ var stat = assert.commandWorked(node.adminCommand({replSetGetStatus: 1}));
+ assert.eq(stat.term, term, "Term changed when it should not have");
+}
+
+jsTestLog("Test that last vote is set on successive elections");
+
+// Run a few successive elections, alternating who becomes primary.
+var numElections = 3;
+for (var i = 0; i < numElections; i++) {
+ var primary = rst.getPrimary();
+ var secondary = rst.getSecondary();
+ var term = getLatestOp(primary).t;
+
+ // SERVER-20844 ReplSetTest starts up a single node replica set then reconfigures to the
+ // correct size, so secondaries didn't vote in the first election.
+ if (i > 0) {
+ jsTestLog("Last vote should have term: " + term + " and candidate: " + primary.host +
+ ", index: " + rst.getNodeId(primary));
+ rst.nodes.forEach(function(node) {
+ assertNodeHasLastVote(node, term, primary);
+ });
}
+ assert.commandWorked(primary.adminCommand({replSetStepDown: 60 * 10, force: true}));
- function setLastVoteDoc(conn, term, candidate) {
- var newLastVote = {term: term, candidateIndex: rst.getNodeId(candidate)};
- return assert.writeOK(conn.getCollection(lastVoteNS).update({}, newLastVote));
- }
+ // Make sure a new primary has been established.
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+ rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
- function assertNodeHasLastVote(node, term, candidate) {
- var lastVoteDoc = getLastVoteDoc(node);
- assert.eq(lastVoteDoc.term, term, node.host + " had wrong last vote term.");
- assert.eq(lastVoteDoc.candidateIndex,
- rst.getNodeId(candidate),
- node.host + " had wrong last vote candidate.");
- }
+ // Reset election timeout for the old primary.
+ assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
+}
- function assertCurrentTerm(node, term) {
- var stat = assert.commandWorked(node.adminCommand({replSetGetStatus: 1}));
- assert.eq(stat.term, term, "Term changed when it should not have");
- }
+var term = getLatestOp(rst.getPrimary()).t + 100;
- jsTestLog("Test that last vote is set on successive elections");
-
- // Run a few successive elections, alternating who becomes primary.
- var numElections = 3;
- for (var i = 0; i < numElections; i++) {
- var primary = rst.getPrimary();
- var secondary = rst.getSecondary();
- var term = getLatestOp(primary).t;
-
- // SERVER-20844 ReplSetTest starts up a single node replica set then reconfigures to the
- // correct size, so secondaries didn't vote in the first election.
- if (i > 0) {
- jsTestLog("Last vote should have term: " + term + " and candidate: " + primary.host +
- ", index: " + rst.getNodeId(primary));
- rst.nodes.forEach(function(node) {
- assertNodeHasLastVote(node, term, primary);
- });
- }
- assert.commandWorked(primary.adminCommand({replSetStepDown: 60 * 10, force: true}));
-
- // Make sure a new primary has been established.
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
-
- // Reset election timeout for the old primary.
- assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
- }
+jsTestLog("Test that last vote is loaded on startup");
- var term = getLatestOp(rst.getPrimary()).t + 100;
-
- jsTestLog("Test that last vote is loaded on startup");
-
- // Ensure that all ops are replicated before stepping up node 1.
- rst.awaitReplication();
-
- // We cannot reconfig node 0 to have priority 0 if it is currently the primary,
- // so we make sure node 1 is primary.
- jsTestLog("Stepping up node 1");
- rst.stepUp(rst.nodes[1]);
-
- jsTestLog("Reconfiguring cluster to make node 0 unelectable so it stays SECONDARY on restart");
- conf = rst.getReplSetConfigFromNode();
- conf.version++;
- conf.members[0].priority = 0;
- reconfig(rst, conf);
- rst.awaitNodesAgreeOnConfigVersion();
-
- jsTestLog("Restarting node 0 as a standalone");
- var node0 = rst.restart(0, {noReplSet: true}); // Restart as a standalone node.
- jsTestLog("Stopping node 1");
- rst.stop(1); // Stop node 1 so that node 0 controls the term by itself.
- jsTestLog("Setting the lastVote on node 0 to term: " + term + " candidate: " +
- rst.nodes[0].host + ", index: 0");
- setLastVoteDoc(node0, term, rst.nodes[0]);
-
- jsTestLog("Restarting node 0 in replica set mode");
- node0 = rst.restart(0); // Restart in replSet mode again.
- rst.waitForState(node0, ReplSetTest.State.SECONDARY);
-
- assert.soonNoExcept(function() {
- assertCurrentTerm(node0, term);
- return true;
- });
-
- jsTestLog("Manually sending node 0 a dryRun replSetRequestVotes command, " +
- "expecting failure in old term");
- var response = assert.commandWorked(node0.adminCommand({
- replSetRequestVotes: 1,
- setName: name,
- dryRun: true,
- term: term - 1,
- candidateIndex: 1,
- configVersion: conf.version,
- lastCommittedOp: getLatestOp(node0)
- }));
- assert.eq(response.term,
- term,
- "replSetRequestVotes response had the wrong term: " + tojson(response));
- assert(!response.voteGranted,
- "node granted vote in term before last vote doc: " + tojson(response));
- assertNodeHasLastVote(node0, term, rst.nodes[0]);
- assertCurrentTerm(node0, term);
+// Ensure that all ops are replicated before stepping up node 1.
+rst.awaitReplication();
- jsTestLog("Manually sending node 0 a dryRun replSetRequestVotes command in same term, " +
- "expecting success but no recording of lastVote");
- response = assert.commandWorked(node0.adminCommand({
- replSetRequestVotes: 1,
- setName: name,
- dryRun: true,
- term: term,
- candidateIndex: 1,
- configVersion: conf.version,
- lastCommittedOp: getLatestOp(node0)
- }));
- assert.eq(response.term,
- term,
- "replSetRequestVotes response had the wrong term: " + tojson(response));
- assert(response.voteGranted,
- "node failed to grant dryRun vote in term equal to last vote doc: " + tojson(response));
- assert.eq(response.reason,
- "",
- "replSetRequestVotes response had the wrong reason: " + tojson(response));
- assertNodeHasLastVote(node0, term, rst.nodes[0]);
- assertCurrentTerm(node0, term);
+// We cannot reconfig node 0 to have priority 0 if it is currently the primary,
+// so we make sure node 1 is primary.
+jsTestLog("Stepping up node 1");
+rst.stepUp(rst.nodes[1]);
- jsTestLog(
- "Manually sending node 0 a replSetRequestVotes command, expecting failure in same term");
- response = assert.commandWorked(node0.adminCommand({
- replSetRequestVotes: 1,
- setName: name,
- dryRun: false,
- term: term,
- candidateIndex: 1,
- configVersion: conf.version,
- lastCommittedOp: getLatestOp(node0)
- }));
- assert.eq(response.term,
- term,
- "replSetRequestVotes response had the wrong term: " + tojson(response));
- assert(!response.voteGranted,
- "node granted vote in term of last vote doc: " + tojson(response));
- assertNodeHasLastVote(node0, term, rst.nodes[0]);
- assertCurrentTerm(node0, term);
+jsTestLog("Reconfiguring cluster to make node 0 unelectable so it stays SECONDARY on restart");
+conf = rst.getReplSetConfigFromNode();
+conf.version++;
+conf.members[0].priority = 0;
+reconfig(rst, conf);
+rst.awaitNodesAgreeOnConfigVersion();
+
+jsTestLog("Restarting node 0 as a standalone");
+var node0 = rst.restart(0, {noReplSet: true}); // Restart as a standalone node.
+jsTestLog("Stopping node 1");
+rst.stop(1); // Stop node 1 so that node 0 controls the term by itself.
+jsTestLog("Setting the lastVote on node 0 to term: " + term + " candidate: " + rst.nodes[0].host +
+ ", index: 0");
+setLastVoteDoc(node0, term, rst.nodes[0]);
- jsTestLog("Manually sending node 0 a replSetRequestVotes command, " +
- "expecting success with a recording of the new lastVote");
- response = assert.commandWorked(node0.adminCommand({
- replSetRequestVotes: 1,
- setName: name,
- dryRun: false,
- term: term + 1,
- candidateIndex: 1,
- configVersion: conf.version,
- lastCommittedOp: getLatestOp(node0)
- }));
- assert.eq(response.term,
- term + 1,
- "replSetRequestVotes response had the wrong term: " + tojson(response));
- assert(response.voteGranted,
- "node failed to grant vote in term greater than last vote doc: " + tojson(response));
- assert.eq(response.reason,
- "",
- "replSetRequestVotes response had the wrong reason: " + tojson(response));
- assertNodeHasLastVote(node0, term + 1, rst.nodes[1]);
- assertCurrentTerm(node0, term + 1);
-
- jsTestLog("Manually sending node 0 a dryRun replSetRequestVotes command in future term, " +
- "expecting success but no recording of lastVote");
- response = assert.commandWorked(node0.adminCommand({
- replSetRequestVotes: 1,
- setName: name,
- dryRun: true,
- term: term + 2,
- candidateIndex: 1,
- configVersion: conf.version,
- lastCommittedOp: getLatestOp(node0)
- }));
- assert.eq(response.term,
- term + 2,
- "replSetRequestVotes response had the wrong term: " + tojson(response));
- assert(response.voteGranted,
- "node failed to grant vote in term greater than last vote doc: " + tojson(response));
- assert.eq(response.reason,
- "",
- "replSetRequestVotes response had the wrong reason: " + tojson(response));
- assertNodeHasLastVote(node0, term + 1, rst.nodes[1]);
- assertCurrentTerm(node0, term + 2);
-
- rst.stopSet();
+jsTestLog("Restarting node 0 in replica set mode");
+node0 = rst.restart(0); // Restart in replSet mode again.
+rst.waitForState(node0, ReplSetTest.State.SECONDARY);
+
+assert.soonNoExcept(function() {
+ assertCurrentTerm(node0, term);
+ return true;
+});
+
+jsTestLog("Manually sending node 0 a dryRun replSetRequestVotes command, " +
+ "expecting failure in old term");
+var response = assert.commandWorked(node0.adminCommand({
+ replSetRequestVotes: 1,
+ setName: name,
+ dryRun: true,
+ term: term - 1,
+ candidateIndex: 1,
+ configVersion: conf.version,
+ lastCommittedOp: getLatestOp(node0)
+}));
+assert.eq(
+ response.term, term, "replSetRequestVotes response had the wrong term: " + tojson(response));
+assert(!response.voteGranted,
+ "node granted vote in term before last vote doc: " + tojson(response));
+assertNodeHasLastVote(node0, term, rst.nodes[0]);
+assertCurrentTerm(node0, term);
+
+jsTestLog("Manually sending node 0 a dryRun replSetRequestVotes command in same term, " +
+ "expecting success but no recording of lastVote");
+response = assert.commandWorked(node0.adminCommand({
+ replSetRequestVotes: 1,
+ setName: name,
+ dryRun: true,
+ term: term,
+ candidateIndex: 1,
+ configVersion: conf.version,
+ lastCommittedOp: getLatestOp(node0)
+}));
+assert.eq(
+ response.term, term, "replSetRequestVotes response had the wrong term: " + tojson(response));
+assert(response.voteGranted,
+ "node failed to grant dryRun vote in term equal to last vote doc: " + tojson(response));
+assert.eq(
+ response.reason, "", "replSetRequestVotes response had the wrong reason: " + tojson(response));
+assertNodeHasLastVote(node0, term, rst.nodes[0]);
+assertCurrentTerm(node0, term);
+
+jsTestLog("Manually sending node 0 a replSetRequestVotes command, expecting failure in same term");
+response = assert.commandWorked(node0.adminCommand({
+ replSetRequestVotes: 1,
+ setName: name,
+ dryRun: false,
+ term: term,
+ candidateIndex: 1,
+ configVersion: conf.version,
+ lastCommittedOp: getLatestOp(node0)
+}));
+assert.eq(
+ response.term, term, "replSetRequestVotes response had the wrong term: " + tojson(response));
+assert(!response.voteGranted, "node granted vote in term of last vote doc: " + tojson(response));
+assertNodeHasLastVote(node0, term, rst.nodes[0]);
+assertCurrentTerm(node0, term);
+
+jsTestLog("Manually sending node 0 a replSetRequestVotes command, " +
+ "expecting success with a recording of the new lastVote");
+response = assert.commandWorked(node0.adminCommand({
+ replSetRequestVotes: 1,
+ setName: name,
+ dryRun: false,
+ term: term + 1,
+ candidateIndex: 1,
+ configVersion: conf.version,
+ lastCommittedOp: getLatestOp(node0)
+}));
+assert.eq(response.term,
+ term + 1,
+ "replSetRequestVotes response had the wrong term: " + tojson(response));
+assert(response.voteGranted,
+ "node failed to grant vote in term greater than last vote doc: " + tojson(response));
+assert.eq(
+ response.reason, "", "replSetRequestVotes response had the wrong reason: " + tojson(response));
+assertNodeHasLastVote(node0, term + 1, rst.nodes[1]);
+assertCurrentTerm(node0, term + 1);
+
+jsTestLog("Manually sending node 0 a dryRun replSetRequestVotes command in future term, " +
+ "expecting success but no recording of lastVote");
+response = assert.commandWorked(node0.adminCommand({
+ replSetRequestVotes: 1,
+ setName: name,
+ dryRun: true,
+ term: term + 2,
+ candidateIndex: 1,
+ configVersion: conf.version,
+ lastCommittedOp: getLatestOp(node0)
+}));
+assert.eq(response.term,
+ term + 2,
+ "replSetRequestVotes response had the wrong term: " + tojson(response));
+assert(response.voteGranted,
+ "node failed to grant vote in term greater than last vote doc: " + tojson(response));
+assert.eq(
+ response.reason, "", "replSetRequestVotes response had the wrong reason: " + tojson(response));
+assertNodeHasLastVote(node0, term + 1, rst.nodes[1]);
+assertCurrentTerm(node0, term + 2);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/lastop.js b/jstests/replsets/lastop.js
index cc368c1b95c..c1fa2ffb21f 100644
--- a/jstests/replsets/lastop.js
+++ b/jstests/replsets/lastop.js
@@ -2,126 +2,125 @@
// errors based on the preexisting data (e.g. duplicate key errors, but not parse errors).
// lastOp is used as the optime to wait for when write concern waits for replication.
(function() {
- var replTest = new ReplSetTest({name: 'testSet', nodes: 1});
- replTest.startSet();
- replTest.initiate();
+var replTest = new ReplSetTest({name: 'testSet', nodes: 1});
+replTest.startSet();
+replTest.initiate();
- var primary = replTest.getPrimary();
+var primary = replTest.getPrimary();
- // Two connections
- var m1 = new Mongo(primary.host);
- var m2 = new Mongo(primary.host);
+// Two connections
+var m1 = new Mongo(primary.host);
+var m2 = new Mongo(primary.host);
- // Do a write with m1, then a write with m2, then a no-op write with m1. m1 should have a lastOp
- // of m2's write.
+// Do a write with m1, then a write with m2, then a no-op write with m1. m1 should have a lastOp
+// of m2's write.
- assert.writeOK(m1.getCollection("test.foo").insert({m1: 1}));
- var firstOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m1.getCollection("test.foo").insert({m1: 1}));
+var firstOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 99}));
- var secondOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 99}));
+var secondOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- // No-op update
- assert.writeOK(m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 1}}));
- var noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// No-op update
+assert.writeOK(m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 1}}));
+var noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, secondOp);
+assert.eq(noOp, secondOp);
- assert.writeOK(m1.getCollection("test.foo").remove({m1: 1}));
- var thirdOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m1.getCollection("test.foo").remove({m1: 1}));
+var thirdOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 98}));
- var fourthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 98}));
+var fourthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- // No-op delete
- assert.writeOK(m1.getCollection("test.foo").remove({m1: 1}));
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// No-op delete
+assert.writeOK(m1.getCollection("test.foo").remove({m1: 1}));
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, fourthOp);
+assert.eq(noOp, fourthOp);
- // Dummy write, for a new lastOp.
- assert.writeOK(m1.getCollection("test.foo").insert({m1: 99}));
- var fifthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// Dummy write, for a new lastOp.
+assert.writeOK(m1.getCollection("test.foo").insert({m1: 99}));
+var fifthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 97}));
- var sixthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 97}));
+var sixthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- // No-op find-and-modify delete
- m1.getCollection("test.foo").findAndModify({query: {m1: 1}, remove: 'true'});
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// No-op find-and-modify delete
+m1.getCollection("test.foo").findAndModify({query: {m1: 1}, remove: 'true'});
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, sixthOp);
+assert.eq(noOp, sixthOp);
- assert.commandWorked(m1.getCollection("test.foo").createIndex({x: 1}));
- var seventhOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.commandWorked(m1.getCollection("test.foo").createIndex({x: 1}));
+var seventhOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 96}));
- var eighthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 96}));
+var eighthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- // No-op create index.
- assert.commandWorked(m1.getCollection("test.foo").createIndex({x: 1}));
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// No-op create index.
+assert.commandWorked(m1.getCollection("test.foo").createIndex({x: 1}));
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, eighthOp);
+assert.eq(noOp, eighthOp);
- assert.writeOK(m1.getCollection("test.foo").insert({_id: 1, x: 1}));
- var ninthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m1.getCollection("test.foo").insert({_id: 1, x: 1}));
+var ninthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 991}));
- var tenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 991}));
+var tenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- // update with immutable field error
- assert.writeError(m1.getCollection("test.foo").update({_id: 1, x: 1}, {$set: {_id: 2}}));
- // "After applying the update to the document {_id: 1.0 , ...}, the (immutable) field '_id'
- // was found to have been altered to _id: 2.0"
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// update with immutable field error
+assert.writeError(m1.getCollection("test.foo").update({_id: 1, x: 1}, {$set: {_id: 2}}));
+// "After applying the update to the document {_id: 1.0 , ...}, the (immutable) field '_id'
+// was found to have been altered to _id: 2.0"
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, tenthOp);
+assert.eq(noOp, tenthOp);
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 992}));
- var eleventhOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 992}));
+var eleventhOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- // find-and-modify immutable field error
- try {
- m1.getCollection("test.foo")
- .findAndModify({query: {_id: 1, x: 1}, update: {$set: {_id: 2}}});
- // The findAndModify shell helper should throw.
- assert(false);
- } catch (e) {
- assert.eq(e.code, 66);
- }
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// find-and-modify immutable field error
+try {
+ m1.getCollection("test.foo").findAndModify({query: {_id: 1, x: 1}, update: {$set: {_id: 2}}});
+ // The findAndModify shell helper should throw.
+ assert(false);
+} catch (e) {
+ assert.eq(e.code, 66);
+}
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, eleventhOp);
+assert.eq(noOp, eleventhOp);
- var bigString = new Array(3000).toString();
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 994, m3: bigString}));
+var bigString = new Array(3000).toString();
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 994, m3: bigString}));
- // No-op insert
- assert.writeOK(m1.getCollection("test.foo").insert({_id: 5, x: 5}));
- var thirteenthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// No-op insert
+assert.writeOK(m1.getCollection("test.foo").insert({_id: 5, x: 5}));
+var thirteenthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 991}));
- var fourteenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 991}));
+var fourteenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- // Hits DuplicateKey error and fails insert -- no-op
- assert.writeError(m1.getCollection("test.foo").insert({_id: 5, x: 5}));
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// Hits DuplicateKey error and fails insert -- no-op
+assert.writeError(m1.getCollection("test.foo").insert({_id: 5, x: 5}));
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, fourteenthOp);
+assert.eq(noOp, fourteenthOp);
- // Test update and delete failures in legacy write mode.
- m2.forceWriteMode('legacy');
- m1.forceWriteMode('legacy');
- m2.getCollection("test.foo").insert({m2: 995});
- var fifthteenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// Test update and delete failures in legacy write mode.
+m2.forceWriteMode('legacy');
+m1.forceWriteMode('legacy');
+m2.getCollection("test.foo").insert({m2: 995});
+var fifthteenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- m1.getCollection("test.foo").remove({m1: 1});
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, fifthteenthOp);
+m1.getCollection("test.foo").remove({m1: 1});
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.eq(noOp, fifthteenthOp);
- m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 4}});
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, fifthteenthOp);
- replTest.stopSet();
+m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 4}});
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.eq(noOp, fifthteenthOp);
+replTest.stopSet();
})();
diff --git a/jstests/replsets/libs/election_handoff.js b/jstests/replsets/libs/election_handoff.js
index 2edbf122af6..f05e6b702d7 100644
--- a/jstests/replsets/libs/election_handoff.js
+++ b/jstests/replsets/libs/election_handoff.js
@@ -5,7 +5,6 @@
*/
var ElectionHandoffTest = (function() {
-
load("jstests/libs/check_log.js");
load("jstests/replsets/rslib.js");
@@ -76,10 +75,10 @@ var ElectionHandoffTest = (function() {
// If there are only two nodes in the set, verify that the old primary voted "yes".
if (numNodes === 2) {
checkLog.contains(expectedCandidate,
- `skipping dry run and running for election in term ${term+1}`);
+ `skipping dry run and running for election in term ${term + 1}`);
checkLog.contains(
expectedCandidate,
- `VoteRequester(term ${term+1}) received a yes vote from ${primary.host}`);
+ `VoteRequester(term ${term + 1}) received a yes vote from ${primary.host}`);
}
rst.awaitNodesAgreeOnPrimary();
@@ -87,5 +86,4 @@ var ElectionHandoffTest = (function() {
}
return {testElectionHandoff: testElectionHandoff, stepDownPeriodSecs: kStepDownPeriodSecs};
-
})();
diff --git a/jstests/replsets/libs/initial_sync_test.js b/jstests/replsets/libs/initial_sync_test.js
index c457d82ab2a..9e38a4edd54 100644
--- a/jstests/replsets/libs/initial_sync_test.js
+++ b/jstests/replsets/libs/initial_sync_test.js
@@ -170,7 +170,6 @@ function InitialSyncTest(name = "InitialSyncTest", replSet, timeout) {
return true;
}
return hasCompletedInitialSync();
-
}, "initial sync did not pause or complete");
}
diff --git a/jstests/replsets/libs/initial_sync_update_missing_doc.js b/jstests/replsets/libs/initial_sync_update_missing_doc.js
index b18b3a7a5b0..7a8e6823a7d 100644
--- a/jstests/replsets/libs/initial_sync_update_missing_doc.js
+++ b/jstests/replsets/libs/initial_sync_update_missing_doc.js
@@ -14,7 +14,6 @@
// must be called after reInitiateSetWithSecondary, followed by
// turnOffHangBeforeGettingMissingDocFailPoint.
var reInitiateSetWithSecondary = function(replSet, secondaryConfig) {
-
const secondary = replSet.add(secondaryConfig);
secondary.setSlaveOk();
@@ -32,14 +31,12 @@ var reInitiateSetWithSecondary = function(replSet, secondaryConfig) {
'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
return secondary;
-
};
// Must be called after reInitiateSetWithSecondary. Turns off the
// initialSyncHangBeforeCopyingDatabases fail point so that the secondary will start copying all
// non-local databases.
var turnOffHangBeforeCopyingDatabasesFailPoint = function(secondary) {
-
assert.commandWorked(secondary.getDB('admin').runCommand(
{configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
@@ -55,7 +52,6 @@ var turnOffHangBeforeCopyingDatabasesFailPoint = function(secondary) {
// initialSyncHangBeforeGettingMissingDocument fail point so that the secondary can check if the
// sync source has the missing document.
var turnOffHangBeforeGettingMissingDocFailPoint = function(primary, secondary, name, numInserted) {
-
if (numInserted === 0) {
// If we did not re-insert the missing document, insert an arbitrary document to move
// forward minValid even though the document was not found.
@@ -77,11 +73,9 @@ var turnOffHangBeforeGettingMissingDocFailPoint = function(primary, secondary, n
secondary, 'Missing document not found on source; presumably deleted later in oplog.');
}
checkLog.contains(secondary, 'initial sync done');
-
};
var finishAndValidate = function(replSet, name, firstOplogEnd, numInserted, numDocuments) {
-
replSet.awaitReplication();
replSet.awaitSecondaryNodes();
const dbName = 'test';
@@ -112,7 +106,6 @@ var finishAndValidate = function(replSet, name, firstOplogEnd, numInserted, numD
assert.eq(0,
secondary.getDB('local')['temp_oplog_buffer'].find().itcount(),
"Oplog buffer was not dropped after initial sync");
-
};
var updateRemove = function(sessionColl, query) {
diff --git a/jstests/replsets/libs/rename_across_dbs.js b/jstests/replsets/libs/rename_across_dbs.js
index 8ab9e148ecb..fe42cab63b6 100644
--- a/jstests/replsets/libs/rename_across_dbs.js
+++ b/jstests/replsets/libs/rename_across_dbs.js
@@ -68,18 +68,18 @@ var RenameAcrossDatabasesTest = function(options) {
protocolVersion: 1,
members: [
{
- _id: 0,
- host: hosts[0],
+ _id: 0,
+ host: hosts[0],
},
{
- _id: 1,
- host: hosts[1],
- priority: 0,
+ _id: 1,
+ host: hosts[1],
+ priority: 0,
},
{
- _id: 2,
- host: hosts[2],
- arbiterOnly: true,
+ _id: 2,
+ host: hosts[2],
+ arbiterOnly: true,
},
],
version: nextVersion,
@@ -160,5 +160,4 @@ var RenameAcrossDatabasesTest = function(options) {
_testLog('Test completed. Stopping replica set.');
replTest.stopSet();
};
-
};
diff --git a/jstests/replsets/libs/rollback_test.js b/jstests/replsets/libs/rollback_test.js
index d027154a71f..933a0e5acc3 100644
--- a/jstests/replsets/libs/rollback_test.js
+++ b/jstests/replsets/libs/rollback_test.js
@@ -222,7 +222,6 @@ function RollbackTest(name = "RollbackTest", replSet) {
`RBID is too large. current RBID: ${rbid}, last RBID: ${lastRBID}`);
return rbid === lastRBID + 1;
-
}, "Timed out waiting for RBID to increment on " + curSecondary.host);
} else {
log(`Skipping RBID check on ${curSecondary.host} because shutdowns ` +
diff --git a/jstests/replsets/libs/secondary_reads_test.js b/jstests/replsets/libs/secondary_reads_test.js
index da22f9b73b5..192421827f8 100644
--- a/jstests/replsets/libs/secondary_reads_test.js
+++ b/jstests/replsets/libs/secondary_reads_test.js
@@ -36,7 +36,6 @@ function SecondaryReadsTest(name = "secondary_reads_test") {
}
this.startSecondaryReaders = function(nReaders, readFn) {
-
let read = function() {
db.getMongo().setSlaveOk();
db = db.getSiblingDB(TestData.dbName);
@@ -70,7 +69,6 @@ function SecondaryReadsTest(name = "secondary_reads_test") {
// The returned function will return once the batch has reached the point where it has applied
// but not updated the last applied optime.
this.pauseSecondaryBatchApplication = function() {
-
clearRawMongoProgramOutput();
assert.commandWorked(
@@ -102,7 +100,7 @@ function SecondaryReadsTest(name = "secondary_reads_test") {
assert.writeOK(primaryDB.getCollection(signalColl).insert({_id: testDoneId}));
for (let i = 0; i < readers.length; i++) {
const await = readers[i];
- await();
+ await ();
print("reader " + i + " done");
}
readers = [];
diff --git a/jstests/replsets/libs/tags.js b/jstests/replsets/libs/tags.js
index fab982279e5..2f52516e4b3 100644
--- a/jstests/replsets/libs/tags.js
+++ b/jstests/replsets/libs/tags.js
@@ -56,56 +56,57 @@ var TagsTest = function(options) {
protocolVersion: 1,
members: [
{
- _id: 0,
- host: nodes[0],
- tags: {
- server: '0',
- dc: 'ny',
- ny: '1',
- rack: 'ny.rk1',
- },
+ _id: 0,
+ host: nodes[0],
+ tags: {
+ server: '0',
+ dc: 'ny',
+ ny: '1',
+ rack: 'ny.rk1',
+ },
},
{
- _id: 1,
- host: nodes[1],
- tags: {
- server: '1',
- dc: 'ny',
- ny: '2',
- rack: 'ny.rk1',
- },
+ _id: 1,
+ host: nodes[1],
+ tags: {
+ server: '1',
+ dc: 'ny',
+ ny: '2',
+ rack: 'ny.rk1',
+ },
},
{
- _id: 2,
- host: nodes[2],
- tags: {
- server: '2',
- dc: 'ny',
- ny: '3',
- rack: 'ny.rk2', 2: 'this',
- },
+ _id: 2,
+ host: nodes[2],
+ tags: {
+ server: '2',
+ dc: 'ny',
+ ny: '3',
+ rack: 'ny.rk2',
+ 2: 'this',
+ },
},
{
- _id: 3,
- host: nodes[3],
- priority: 0,
- tags: {
- server: '3',
- dc: 'sf',
- sf: '1',
- rack: 'sf.rk1',
- },
+ _id: 3,
+ host: nodes[3],
+ priority: 0,
+ tags: {
+ server: '3',
+ dc: 'sf',
+ sf: '1',
+ rack: 'sf.rk1',
+ },
},
{
- _id: 4,
- host: nodes[4],
- priority: 0,
- tags: {
- server: '4',
- dc: 'sf',
- sf: '2',
- rack: 'sf.rk2',
- },
+ _id: 4,
+ host: nodes[4],
+ priority: 0,
+ tags: {
+ server: '4',
+ dc: 'sf',
+ sf: '2',
+ rack: 'sf.rk2',
+ },
},
],
settings: {
@@ -171,13 +172,12 @@ var TagsTest = function(options) {
primary.forceWriteMode(options.forceWriteMode);
}
var writeConcern = {
- writeConcern:
- {w: expectedWritableNodesCount, wtimeout: replTest.kDefaultTimeoutMS}
+ writeConcern: {w: expectedWritableNodesCount, wtimeout: replTest.kDefaultTimeoutMS}
};
assert.writeOK(primary.getDB('foo').bar.insert({x: 100}, writeConcern));
jsTestLog('ensurePrimary - Successfully written a document to primary node (' +
- replTest.nodes[nodeId].host + ') using a write concern of w:' +
- expectedWritableNodesCount);
+ replTest.nodes[nodeId].host +
+ ') using a write concern of w:' + expectedWritableNodesCount);
return primary;
};
@@ -308,5 +308,4 @@ var TagsTest = function(options) {
replTest.stopSet();
};
-
};
diff --git a/jstests/replsets/libs/two_phase_drops.js b/jstests/replsets/libs/two_phase_drops.js
index 46b30cb9ed5..bb772012fdb 100644
--- a/jstests/replsets/libs/two_phase_drops.js
+++ b/jstests/replsets/libs/two_phase_drops.js
@@ -182,8 +182,8 @@ class TwoPhaseDropCollectionTest {
TwoPhaseDropCollectionTest.listCollections(db, {includePendingDrops: true});
TwoPhaseDropCollectionTest._testLog("Checking presence of drop-pending collection for " +
- collName + " in the collection list: " +
- tojson(collections));
+ collName +
+ " in the collection list: " + tojson(collections));
let pendingDropRegex = TwoPhaseDropCollectionTest.pendingDropRegex(collName);
return collections.find(c => pendingDropRegex.test(c.name));
diff --git a/jstests/replsets/linearizable_read_concern.js b/jstests/replsets/linearizable_read_concern.js
index 9b8dd68bd38..5984577a4ed 100644
--- a/jstests/replsets/linearizable_read_concern.js
+++ b/jstests/replsets/linearizable_read_concern.js
@@ -16,133 +16,133 @@ load('jstests/replsets/rslib.js');
load('jstests/libs/parallelTester.js');
load('jstests/libs/write_concern_util.js');
(function() {
- 'use strict';
- var send_linearizable_read = function() {
- // The primary will step down and throw an exception, which is expected.
- var coll = db.getSiblingDB("test").foo;
- jsTestLog('Sending in linearizable read in secondary thread');
- // 'isMaster' ensures that the following command fails (and returns a response rather than
- // an exception) before its connection is cut because of the primary step down. Refer to
- // SERVER-24574.
- assert.commandWorked(coll.runCommand({isMaster: 1, hangUpOnStepDown: false}));
- assert.commandFailedWithCode(
- coll.runCommand(
- {'find': 'foo', readConcern: {level: "linearizable"}, maxTimeMS: 60000}),
- ErrorCodes.InterruptedDueToReplStateChange);
- };
-
- var num_nodes = 3;
- var name = 'linearizable_read_concern';
- var replTest = new ReplSetTest({name: name, nodes: num_nodes, useBridge: true});
- var config = replTest.getReplSetConfig();
-
- // Increased election timeout to avoid having the primary step down while we are
- // testing linearizable functionality on an isolated primary.
- config.settings = {electionTimeoutMillis: 60000};
-
- replTest.startSet();
- replTest.initiate(config);
-
- // Without a sync source the heartbeat interval will be half of the election timeout, 30
- // seconds. It thus will take almost 30 seconds for the secondaries to set the primary as
- // their sync source and begin replicating.
- replTest.awaitReplication();
- var primary = replTest.getPrimary();
- var secondaries = replTest.getSecondaries();
-
- // Do a write to have something to read.
- assert.writeOK(primary.getDB("test").foo.insert(
- {"number": 7},
- {"writeConcern": {"w": "majority", "wtimeout": ReplSetTest.kDefaultTimeoutMS}}));
-
- jsTestLog("Testing linearizable readConcern parsing");
- // This command is sent to the primary, and the primary is fully connected so it should work.
- var goodRead = assert.writeOK(primary.getDB("test").runCommand(
- {'find': 'foo', readConcern: {level: "linearizable"}, "maxTimeMS": 60000}));
- assert.eq(goodRead.cursor.firstBatch[0].number, 7);
-
- // This fails because you cannot have a linearizable read command sent to a secondary.
- var badCmd = assert.commandFailed(secondaries[0].getDB("test").runCommand(
- {"find": "foo", readConcern: {level: "linearizable"}, "maxTimeMS": 60000}));
-
- assert.eq(badCmd.errmsg, "cannot satisfy linearizable read concern on non-primary node");
- assert.eq(badCmd.code, ErrorCodes.NotMaster);
-
- // This fails because you cannot specify 'afterOpTime' for linearizable read.
- var opTimeCmd = assert.commandFailed(primary.getDB("test").runCommand({
- "find": "foo",
- readConcern: {level: "linearizable", "afterOpTime": {ts: Timestamp(1, 2), t: 1}},
- "maxTimeMS": 60000
- }));
- assert.eq(opTimeCmd.errmsg, "afterOpTime not compatible with linearizable read concern");
- assert.eq(opTimeCmd.code, ErrorCodes.FailedToParse);
-
- // A $out aggregation is not allowed with readConcern level "linearizable".
- let outResult = assert.throws(() => primary.getDB("test").foo.aggregate(
- [{$out: "out"}], {readConcern: {level: "linearizable"}}));
- assert.eq(outResult.code, ErrorCodes.InvalidOptions);
-
- // A $merge aggregation is not allowed with readConcern level "linearizable".
- let mergeResult = assert.throws(
- () => primary.getDB("test").foo.aggregate(
- [{$merge: {into: "out", whenMatched: "replace", whenNotMatched: "insert"}}],
- {readConcern: {level: "linearizable"}}));
- assert.eq(mergeResult.code, ErrorCodes.InvalidOptions);
-
- primary = replTest.getPrimary();
-
- jsTestLog("Starting linearizablility testing");
-
- const cursorId = assert
- .commandWorked(primary.getDB("test").runCommand(
- {'find': 'foo', readConcern: {level: "linearizable"}, batchSize: 0}))
- .cursor.id;
- jsTestLog(
- "Setting up partitions such that the primary is isolated: [Secondary-Secondary] [Primary]");
- secondaries[0].disconnect(primary);
- secondaries[1].disconnect(primary);
-
- jsTestLog(
- "Testing to make sure that linearizable getMores will time out when the primary is isolated.");
- assert.commandWorked(primary.getDB("test").foo.insert({_id: 0, x: 0}));
+'use strict';
+var send_linearizable_read = function() {
+ // The primary will step down and throw an exception, which is expected.
+ var coll = db.getSiblingDB("test").foo;
+ jsTestLog('Sending in linearizable read in secondary thread');
+ // 'isMaster' ensures that the following command fails (and returns a response rather than
+ // an exception) before its connection is cut because of the primary step down. Refer to
+ // SERVER-24574.
+ assert.commandWorked(coll.runCommand({isMaster: 1, hangUpOnStepDown: false}));
assert.commandFailedWithCode(
- primary.getDB("test").runCommand({"getMore": cursorId, collection: "foo", batchSize: 1}),
- ErrorCodes.LinearizableReadConcernError);
-
- jsTestLog("Test that a linearizable read will timeout when the primary is isolated.");
- let findResult = primary.getDB("test").runCommand(
- {"find": "foo", "readConcern": {level: "linearizable"}, "maxTimeMS": 3000});
- assert.commandFailedWithCode(findResult, ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog("Testing to make sure linearizable read command does not block forever.");
-
- // Get last noop Optime before sending the linearizable read command
- // to ensure that we are waiting for the most recent noop write.
- var lastOpTimestamp = getLatestOp(primary).ts;
-
- var parallelShell = startParallelShell(send_linearizable_read, primary.port);
- // Sending a linearizable read implicitly replicates a noop to the secondaries. We need to find
- // the most recently issued noop to ensure that we call stepdown during the recently
- // issued linearizable read and not before the read (in the separate thread) has been called.
- jsTestLog("Checking end of oplog for noop");
- assert.soon(function() {
- var isEarlierTimestamp = function(ts1, ts2) {
- if (ts1.getTime() == ts2.getTime()) {
- return ts1.getInc() < ts2.getInc();
- }
- return ts1.getTime() < ts2.getTime();
- };
- var latestOp = getLatestOp(primary);
- if (latestOp.op == "n" && isEarlierTimestamp(lastOpTimestamp, latestOp.ts)) {
- return true;
+ coll.runCommand({'find': 'foo', readConcern: {level: "linearizable"}, maxTimeMS: 60000}),
+ ErrorCodes.InterruptedDueToReplStateChange);
+};
+
+var num_nodes = 3;
+var name = 'linearizable_read_concern';
+var replTest = new ReplSetTest({name: name, nodes: num_nodes, useBridge: true});
+var config = replTest.getReplSetConfig();
+
+// Increased election timeout to avoid having the primary step down while we are
+// testing linearizable functionality on an isolated primary.
+config.settings = {
+ electionTimeoutMillis: 60000
+};
+
+replTest.startSet();
+replTest.initiate(config);
+
+// Without a sync source the heartbeat interval will be half of the election timeout, 30
+// seconds. It thus will take almost 30 seconds for the secondaries to set the primary as
+// their sync source and begin replicating.
+replTest.awaitReplication();
+var primary = replTest.getPrimary();
+var secondaries = replTest.getSecondaries();
+
+// Do a write to have something to read.
+assert.writeOK(primary.getDB("test").foo.insert(
+ {"number": 7}, {"writeConcern": {"w": "majority", "wtimeout": ReplSetTest.kDefaultTimeoutMS}}));
+
+jsTestLog("Testing linearizable readConcern parsing");
+// This command is sent to the primary, and the primary is fully connected so it should work.
+var goodRead = assert.writeOK(primary.getDB("test").runCommand(
+ {'find': 'foo', readConcern: {level: "linearizable"}, "maxTimeMS": 60000}));
+assert.eq(goodRead.cursor.firstBatch[0].number, 7);
+
+// This fails because you cannot have a linearizable read command sent to a secondary.
+var badCmd = assert.commandFailed(secondaries[0].getDB("test").runCommand(
+ {"find": "foo", readConcern: {level: "linearizable"}, "maxTimeMS": 60000}));
+
+assert.eq(badCmd.errmsg, "cannot satisfy linearizable read concern on non-primary node");
+assert.eq(badCmd.code, ErrorCodes.NotMaster);
+
+// This fails because you cannot specify 'afterOpTime' for linearizable read.
+var opTimeCmd = assert.commandFailed(primary.getDB("test").runCommand({
+ "find": "foo",
+ readConcern: {level: "linearizable", "afterOpTime": {ts: Timestamp(1, 2), t: 1}},
+ "maxTimeMS": 60000
+}));
+assert.eq(opTimeCmd.errmsg, "afterOpTime not compatible with linearizable read concern");
+assert.eq(opTimeCmd.code, ErrorCodes.FailedToParse);
+
+// A $out aggregation is not allowed with readConcern level "linearizable".
+let outResult = assert.throws(() => primary.getDB("test").foo.aggregate(
+ [{$out: "out"}], {readConcern: {level: "linearizable"}}));
+assert.eq(outResult.code, ErrorCodes.InvalidOptions);
+
+// A $merge aggregation is not allowed with readConcern level "linearizable".
+let mergeResult =
+ assert.throws(() => primary.getDB("test").foo.aggregate(
+ [{$merge: {into: "out", whenMatched: "replace", whenNotMatched: "insert"}}],
+ {readConcern: {level: "linearizable"}}));
+assert.eq(mergeResult.code, ErrorCodes.InvalidOptions);
+
+primary = replTest.getPrimary();
+
+jsTestLog("Starting linearizablility testing");
+
+const cursorId = assert
+ .commandWorked(primary.getDB("test").runCommand(
+ {'find': 'foo', readConcern: {level: "linearizable"}, batchSize: 0}))
+ .cursor.id;
+jsTestLog(
+ "Setting up partitions such that the primary is isolated: [Secondary-Secondary] [Primary]");
+secondaries[0].disconnect(primary);
+secondaries[1].disconnect(primary);
+
+jsTestLog(
+ "Testing to make sure that linearizable getMores will time out when the primary is isolated.");
+assert.commandWorked(primary.getDB("test").foo.insert({_id: 0, x: 0}));
+assert.commandFailedWithCode(
+ primary.getDB("test").runCommand({"getMore": cursorId, collection: "foo", batchSize: 1}),
+ ErrorCodes.LinearizableReadConcernError);
+
+jsTestLog("Test that a linearizable read will timeout when the primary is isolated.");
+let findResult = primary.getDB("test").runCommand(
+ {"find": "foo", "readConcern": {level: "linearizable"}, "maxTimeMS": 3000});
+assert.commandFailedWithCode(findResult, ErrorCodes.MaxTimeMSExpired);
+
+jsTestLog("Testing to make sure linearizable read command does not block forever.");
+
+// Get last noop Optime before sending the linearizable read command
+// to ensure that we are waiting for the most recent noop write.
+var lastOpTimestamp = getLatestOp(primary).ts;
+
+var parallelShell = startParallelShell(send_linearizable_read, primary.port);
+// Sending a linearizable read implicitly replicates a noop to the secondaries. We need to find
+// the most recently issued noop to ensure that we call stepdown during the recently
+// issued linearizable read and not before the read (in the separate thread) has been called.
+jsTestLog("Checking end of oplog for noop");
+assert.soon(function() {
+ var isEarlierTimestamp = function(ts1, ts2) {
+ if (ts1.getTime() == ts2.getTime()) {
+ return ts1.getInc() < ts2.getInc();
}
-
- return false;
- });
- assert.eq(primary, replTest.getPrimary(), "Primary unexpectedly changed mid test.");
- jsTestLog("Making Primary step down");
- assert.commandWorked(primary.adminCommand(
- {"replSetStepDown": 100, secondaryCatchUpPeriodSecs: 0, "force": true}));
- parallelShell();
- replTest.stopSet();
+ return ts1.getTime() < ts2.getTime();
+ };
+ var latestOp = getLatestOp(primary);
+ if (latestOp.op == "n" && isEarlierTimestamp(lastOpTimestamp, latestOp.ts)) {
+ return true;
+ }
+
+ return false;
+});
+assert.eq(primary, replTest.getPrimary(), "Primary unexpectedly changed mid test.");
+jsTestLog("Making Primary step down");
+assert.commandWorked(
+ primary.adminCommand({"replSetStepDown": 100, secondaryCatchUpPeriodSecs: 0, "force": true}));
+parallelShell();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/localhost1.js b/jstests/replsets/localhost1.js
index d44f9f6b2ba..27fa1bc6cf9 100644
--- a/jstests/replsets/localhost1.js
+++ b/jstests/replsets/localhost1.js
@@ -1,16 +1,16 @@
// Test ReplSet default initiate with localhost-only binding
(function() {
- 'use strict';
+'use strict';
- // Select localhost when binding to localhost
- const rt = new ReplSetTest({name: "rsLocal", nodes: 1});
- const primary = rt.startSet({bind_ip: "127.0.0.1"})[0];
- const db = primary.getDB('admin');
- const resp = assert.commandWorked(db.adminCommand({replSetInitiate: undefined}));
- assert(resp.me.startsWith('127.0.0.1'), tojson(resp.me) + " does not start with 127.0.0.1:");
+// Select localhost when binding to localhost
+const rt = new ReplSetTest({name: "rsLocal", nodes: 1});
+const primary = rt.startSet({bind_ip: "127.0.0.1"})[0];
+const db = primary.getDB('admin');
+const resp = assert.commandWorked(db.adminCommand({replSetInitiate: undefined}));
+assert(resp.me.startsWith('127.0.0.1'), tojson(resp.me) + " does not start with 127.0.0.1:");
- // Wait for the primary to complete its election before shutting down the set.
- assert.soon(() => db.runCommand({ismaster: 1}).ismaster);
- rt.stopSet();
+// Wait for the primary to complete its election before shutting down the set.
+assert.soon(() => db.runCommand({ismaster: 1}).ismaster);
+rt.stopSet();
})();
diff --git a/jstests/replsets/localhost2.js b/jstests/replsets/localhost2.js
index aa3655dd1a2..d8078d0abbb 100644
--- a/jstests/replsets/localhost2.js
+++ b/jstests/replsets/localhost2.js
@@ -1,20 +1,18 @@
// Test ReplSet default initiate with 0.0.0.0 binding
(function() {
- 'use strict';
+'use strict';
- // Select localhost when binding to localhost
- const rt = new ReplSetTest({name: "rsLocal", nodes: 1});
- const primary = rt.startSet({bind_ip: "0.0.0.0"})[0];
- const db = primary.getDB('admin');
- const resp = assert.commandWorked(db.adminCommand({replSetInitiate: undefined}));
- assert(!resp.me.startsWith('127.0.0.1:'),
- tojson(resp.me) + " should not start with 127.0.0.1:");
- assert(!resp.me.startsWith('0.0.0.0:'), tojson(resp.me) + " should not start with 0.0.0.0:");
- assert(!resp.me.startsWith('localhost:'),
- tojson(resp.me) + " should not start with localhost:");
+// Select localhost when binding to localhost
+const rt = new ReplSetTest({name: "rsLocal", nodes: 1});
+const primary = rt.startSet({bind_ip: "0.0.0.0"})[0];
+const db = primary.getDB('admin');
+const resp = assert.commandWorked(db.adminCommand({replSetInitiate: undefined}));
+assert(!resp.me.startsWith('127.0.0.1:'), tojson(resp.me) + " should not start with 127.0.0.1:");
+assert(!resp.me.startsWith('0.0.0.0:'), tojson(resp.me) + " should not start with 0.0.0.0:");
+assert(!resp.me.startsWith('localhost:'), tojson(resp.me) + " should not start with localhost:");
- // Wait for the primary to complete its election before shutting down the set.
- assert.soon(() => db.runCommand({ismaster: 1}).ismaster);
- rt.stopSet();
+// Wait for the primary to complete its election before shutting down the set.
+assert.soon(() => db.runCommand({ismaster: 1}).ismaster);
+rt.stopSet();
})();
diff --git a/jstests/replsets/localhost3.js b/jstests/replsets/localhost3.js
index 4f46505aaa6..aa452a05eef 100644
--- a/jstests/replsets/localhost3.js
+++ b/jstests/replsets/localhost3.js
@@ -1,16 +1,16 @@
// Test ReplSet default initiate with localhost-only binding
(function() {
- 'use strict';
+'use strict';
- // Select localhost when binding to localhost
- const rt = new ReplSetTest({name: "rsLocal", nodes: 1});
- const primary = rt.startSet({bind_ip: undefined})[0];
- const db = primary.getDB('admin');
- const resp = assert.commandWorked(db.adminCommand({replSetInitiate: undefined}));
- assert(resp.me.startsWith('localhost:'), tojson(resp.me) + " should start with localhost:");
+// Select localhost when binding to localhost
+const rt = new ReplSetTest({name: "rsLocal", nodes: 1});
+const primary = rt.startSet({bind_ip: undefined})[0];
+const db = primary.getDB('admin');
+const resp = assert.commandWorked(db.adminCommand({replSetInitiate: undefined}));
+assert(resp.me.startsWith('localhost:'), tojson(resp.me) + " should start with localhost:");
- // Wait for the primary to complete its election before shutting down the set.
- assert.soon(() => db.runCommand({ismaster: 1}).ismaster);
- rt.stopSet();
+// Wait for the primary to complete its election before shutting down the set.
+assert.soon(() => db.runCommand({ismaster: 1}).ismaster);
+rt.stopSet();
})();
diff --git a/jstests/replsets/log_secondary_oplog_application.js b/jstests/replsets/log_secondary_oplog_application.js
index 39df90a068f..90a8025a9a1 100644
--- a/jstests/replsets/log_secondary_oplog_application.js
+++ b/jstests/replsets/log_secondary_oplog_application.js
@@ -6,76 +6,75 @@
*/
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/libs/check_log.js");
- let name = "log_secondary_oplog_application";
- let rst = ReplSetTest({name: name, nodes: 2});
- rst.startSet();
+let name = "log_secondary_oplog_application";
+let rst = ReplSetTest({name: name, nodes: 2});
+rst.startSet();
- let nodes = rst.nodeList();
- rst.initiate({
- "_id": name,
- "members": [{"_id": 0, "host": nodes[0]}, {"_id": 1, "host": nodes[1], "priority": 0}]
- });
+let nodes = rst.nodeList();
+rst.initiate({
+ "_id": name,
+ "members": [{"_id": 0, "host": nodes[0]}, {"_id": 1, "host": nodes[1], "priority": 0}]
+});
- let primary = rst.getPrimary();
- let secondary = rst.getSecondary();
+let primary = rst.getPrimary();
+let secondary = rst.getSecondary();
- /**
- * Part 1: Issue a fast op and make sure that we do *not* log it.
- * We ensure the op is always considered fast by vastly increasing the "slowMS" threshold.
- */
-
- // Create collection explicitly so the insert doesn't have to do it.
- assert.commandWorked(primary.getDB(name).createCollection("fastOp"));
- rst.awaitReplication();
+/**
+ * Part 1: Issue a fast op and make sure that we do *not* log it.
+ * We ensure the op is always considered fast by vastly increasing the "slowMS" threshold.
+ */
- // Set "slowMS" to a very high value (in milliseconds).
- assert.commandWorked(secondary.getDB(name).setProfilingLevel(1, 60 * 60 * 1000));
+// Create collection explicitly so the insert doesn't have to do it.
+assert.commandWorked(primary.getDB(name).createCollection("fastOp"));
+rst.awaitReplication();
- // Issue a write and make sure we replicate it.
- assert.commandWorked(primary.getDB(name)["fastOp"].insert({"fast": "cheetah"}));
- rst.awaitReplication();
+// Set "slowMS" to a very high value (in milliseconds).
+assert.commandWorked(secondary.getDB(name).setProfilingLevel(1, 60 * 60 * 1000));
- // The op should not have been logged.
- assert.throws(function() {
- checkLog.contains(secondary, "applied op: CRUD", 1 * 1000);
- });
+// Issue a write and make sure we replicate it.
+assert.commandWorked(primary.getDB(name)["fastOp"].insert({"fast": "cheetah"}));
+rst.awaitReplication();
- /**
- * Part 2: Issue a slow op and make sure that we *do* log it.
- * We use a failpoint in SyncTail::syncApply which blocks after we read the time at the start
- * of the application of the op, and we wait there to simulate slowness.
- */
+// The op should not have been logged.
+assert.throws(function() {
+ checkLog.contains(secondary, "applied op: CRUD", 1 * 1000);
+});
- // Create collection explicitly so the insert doesn't have to do it.
- assert.commandWorked(primary.getDB(name).createCollection("slowOp"));
- rst.awaitReplication();
+/**
+ * Part 2: Issue a slow op and make sure that we *do* log it.
+ * We use a failpoint in SyncTail::syncApply which blocks after we read the time at the start
+ * of the application of the op, and we wait there to simulate slowness.
+ */
- // Set "slowMS" to a low value (in milliseconds).
- assert.commandWorked(secondary.getDB(name).setProfilingLevel(1, 20));
+// Create collection explicitly so the insert doesn't have to do it.
+assert.commandWorked(primary.getDB(name).createCollection("slowOp"));
+rst.awaitReplication();
- // Hang right after taking note of the start time of the application.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "hangAfterRecordingOpApplicationStartTime", mode: "alwaysOn"}));
+// Set "slowMS" to a low value (in milliseconds).
+assert.commandWorked(secondary.getDB(name).setProfilingLevel(1, 20));
- // Issue a write and make sure we've hit the failpoint before moving on.
- assert.commandWorked(primary.getDB(name)["slowOp"].insert({"slow": "sloth"}));
- checkLog.contains(secondary,
- "syncApply - fail point hangAfterRecordingOpApplicationStartTime enabled");
+// Hang right after taking note of the start time of the application.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "hangAfterRecordingOpApplicationStartTime", mode: "alwaysOn"}));
- // Wait for an amount of time safely above the "slowMS" we set.
- sleep(0.5 * 1000);
+// Issue a write and make sure we've hit the failpoint before moving on.
+assert.commandWorked(primary.getDB(name)["slowOp"].insert({"slow": "sloth"}));
+checkLog.contains(secondary,
+ "syncApply - fail point hangAfterRecordingOpApplicationStartTime enabled");
- // Disable the failpoint so the op finish can applying.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "hangAfterRecordingOpApplicationStartTime", mode: "off"}));
+// Wait for an amount of time safely above the "slowMS" we set.
+sleep(0.5 * 1000);
- // Make sure we log that insert op.
- rst.awaitReplication();
- checkLog.contains(secondary, "applied op: CRUD");
+// Disable the failpoint so the op finish can applying.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "hangAfterRecordingOpApplicationStartTime", mode: "off"}));
- rst.stopSet();
+// Make sure we log that insert op.
+rst.awaitReplication();
+checkLog.contains(secondary, "applied op: CRUD");
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/maintenance2.js b/jstests/replsets/maintenance2.js
index 8dd17e61a37..a2d2c3f7674 100644
--- a/jstests/replsets/maintenance2.js
+++ b/jstests/replsets/maintenance2.js
@@ -1,49 +1,49 @@
// Test that certain operations fail in recovery mode.
(function() {
- "use strict";
+"use strict";
- // Replica set testing API
- // Create a new replica set test. Specify set name and the number of nodes you want.
- var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
+// Replica set testing API
+// Create a new replica set test. Specify set name and the number of nodes you want.
+var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
- // call startSet() to start each mongod in the replica set
- // this returns a list of nodes
- var nodes = replTest.startSet();
+// call startSet() to start each mongod in the replica set
+// this returns a list of nodes
+var nodes = replTest.startSet();
- // Call initiate() to send the replSetInitiate command
- // This will wait for initiation
- replTest.initiate();
+// Call initiate() to send the replSetInitiate command
+// This will wait for initiation
+replTest.initiate();
- // Call getPrimary to return a reference to the node that's been
- // elected master.
- var master = replTest.getPrimary();
+// Call getPrimary to return a reference to the node that's been
+// elected master.
+var master = replTest.getPrimary();
- // save some records
- var len = 100;
- for (var i = 0; i < len; ++i) {
- master.getDB("foo").foo.save({a: i});
- }
+// save some records
+var len = 100;
+for (var i = 0; i < len; ++i) {
+ master.getDB("foo").foo.save({a: i});
+}
- // This method will check the oplogs of the master
- // and slaves in the set and wait until the change has replicated.
- // replTest.awaitReplication();
+// This method will check the oplogs of the master
+// and slaves in the set and wait until the change has replicated.
+// replTest.awaitReplication();
- var slaves = replTest._slaves;
- assert.eq(2, slaves.length, "Expected 2 slaves but length was " + slaves.length);
+var slaves = replTest._slaves;
+assert.eq(2, slaves.length, "Expected 2 slaves but length was " + slaves.length);
- slaves.forEach(function(slave) {
- // put slave into maintenance (recovery) mode
- slave.getDB("foo").adminCommand({replSetMaintenance: 1});
+slaves.forEach(function(slave) {
+ // put slave into maintenance (recovery) mode
+ slave.getDB("foo").adminCommand({replSetMaintenance: 1});
- var stats = slave.getDB("foo").adminCommand({replSetGetStatus: 1});
- assert.eq(stats.myState, 3, "Slave should be in recovering state.");
+ var stats = slave.getDB("foo").adminCommand({replSetGetStatus: 1});
+ assert.eq(stats.myState, 3, "Slave should be in recovering state.");
- print("count should fail in recovering state...");
- slave.slaveOk = true;
- assert.commandFailed(slave.getDB("foo").runCommand({count: "foo"}));
- });
+ print("count should fail in recovering state...");
+ slave.slaveOk = true;
+ assert.commandFailed(slave.getDB("foo").runCommand({count: "foo"}));
+});
- // Shut down the set and finish the test.
- replTest.stopSet();
+// Shut down the set and finish the test.
+replTest.stopSet();
}());
diff --git a/jstests/replsets/majority_writes_wait_for_all_durable_timestamp.js b/jstests/replsets/majority_writes_wait_for_all_durable_timestamp.js
index 87d5d5c8b29..5d0d962cda8 100644
--- a/jstests/replsets/majority_writes_wait_for_all_durable_timestamp.js
+++ b/jstests/replsets/majority_writes_wait_for_all_durable_timestamp.js
@@ -6,58 +6,56 @@
* @tags: [requires_document_locking]
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
-
- function assertWriteConcernTimeout(result) {
- assert.writeErrorWithCode(result, ErrorCodes.WriteConcernFailed);
- assert(result.hasWriteConcernError(), tojson(result));
- assert(result.getWriteConcernError().errInfo.wtimeout, tojson(result));
- }
-
- const rst = new ReplSetTest({name: "writes_wait_for_all_durable", nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const dbName = "test";
- const collName = "majority_writes_wait_for_all_durable";
- const testDB = primary.getDB(dbName);
- const testColl = testDB[collName];
-
- TestData.dbName = dbName;
- TestData.collName = collName;
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
-
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "hangAfterCollectionInserts",
- mode: "alwaysOn",
- data: {collectionNS: testColl.getFullName(), first_id: "b"}
- }));
-
- jsTestLog(
- "Insert a document to hang before the insert completes to hold back the all durable timestamp.");
- const joinHungWrite = startParallelShell(() => {
- assert.commandWorked(
- db.getSiblingDB(TestData.dbName)[TestData.collName].insert({_id: "b"}));
- }, primary.port);
- jsTestLog("Checking that the log contains fail point enabled.");
- checkLog.contains(
- testDB.getMongo(),
- "hangAfterCollectionInserts fail point enabled for " + testColl.getFullName());
-
- try {
- jsTest.log("Do a write with majority write concern that should time out.");
- assertWriteConcernTimeout(
- testColl.insert({_id: 0}, {writeConcern: {w: "majority", wtimeout: 2 * 1000}}));
- } finally {
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'hangAfterCollectionInserts', mode: 'off'}));
- }
-
- joinHungWrite();
- rst.stopSet();
+"use strict";
+
+load("jstests/libs/check_log.js");
+
+function assertWriteConcernTimeout(result) {
+ assert.writeErrorWithCode(result, ErrorCodes.WriteConcernFailed);
+ assert(result.hasWriteConcernError(), tojson(result));
+ assert(result.getWriteConcernError().errInfo.wtimeout, tojson(result));
+}
+
+const rst = new ReplSetTest({name: "writes_wait_for_all_durable", nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const dbName = "test";
+const collName = "majority_writes_wait_for_all_durable";
+const testDB = primary.getDB(dbName);
+const testColl = testDB[collName];
+
+TestData.dbName = dbName;
+TestData.collName = collName;
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "hangAfterCollectionInserts",
+ mode: "alwaysOn",
+ data: {collectionNS: testColl.getFullName(), first_id: "b"}
+}));
+
+jsTestLog(
+ "Insert a document to hang before the insert completes to hold back the all durable timestamp.");
+const joinHungWrite = startParallelShell(() => {
+ assert.commandWorked(db.getSiblingDB(TestData.dbName)[TestData.collName].insert({_id: "b"}));
+}, primary.port);
+jsTestLog("Checking that the log contains fail point enabled.");
+checkLog.contains(testDB.getMongo(),
+ "hangAfterCollectionInserts fail point enabled for " + testColl.getFullName());
+
+try {
+ jsTest.log("Do a write with majority write concern that should time out.");
+ assertWriteConcernTimeout(
+ testColl.insert({_id: 0}, {writeConcern: {w: "majority", wtimeout: 2 * 1000}}));
+} finally {
+ assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'hangAfterCollectionInserts', mode: 'off'}));
+}
+
+joinHungWrite();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/maxSyncSourceLagSecs.js b/jstests/replsets/maxSyncSourceLagSecs.js
index 3663972d1fa..2e71d43dd70 100644
--- a/jstests/replsets/maxSyncSourceLagSecs.js
+++ b/jstests/replsets/maxSyncSourceLagSecs.js
@@ -3,54 +3,54 @@
// This test requires the fsync command to ensure members experience a delay.
// @tags: [requires_fsync]
(function() {
- "use strict";
- load("jstests/replsets/rslib.js");
-
- var name = "maxSyncSourceLagSecs";
- var replTest = new ReplSetTest({
- name: name,
- nodes: [
- {rsConfig: {priority: 3}},
- {rsConfig: {priority: 0}},
- {rsConfig: {priority: 0}, setParameter: 'maxSyncSourceLagSecs=3'},
- ],
- oplogSize: 5,
- });
- var nodes = replTest.nodeList();
- replTest.startSet();
- replTest.initiate();
- replTest.awaitNodesAgreeOnPrimary();
-
- var master = replTest.getPrimary();
- var slaves = replTest._slaves;
- syncFrom(slaves[0], master, replTest);
- syncFrom(slaves[1], master, replTest);
- master.getDB("foo").bar.save({a: 1});
- replTest.awaitReplication();
-
- jsTestLog("Setting sync target of slave 2 to slave 1");
- syncFrom(slaves[1], slaves[0], replTest);
- printjson(replTest.status());
-
- // need to put at least maxSyncSourceLagSecs b/w first op and subsequent ops
- // so that the shouldChangeSyncSource logic goes into effect
- sleep(4000);
-
- jsTestLog("Lock slave 1 and add some docs. Force sync target for slave 2 to change to primary");
- assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync: 1, lock: 1}));
-
- assert.soon(function() {
- master.getDB("foo").bar.insert({a: 2});
- var res = slaves[1].getDB("admin").runCommand({"replSetGetStatus": 1});
- return res.syncingTo === master.name;
- }, "sync target not changed back to primary", 100 * 1000, 2 * 1000);
- printjson(replTest.status());
-
- assert.soon(function() {
- return (slaves[1].getDB("foo").bar.count({a: 1}) > 0 &&
- slaves[1].getDB("foo").bar.count({a: 2}) > 0);
- }, "slave should have caught up after syncing to primary.");
-
- assert.commandWorked(slaves[0].getDB("admin").fsyncUnlock());
- replTest.stopSet();
+"use strict";
+load("jstests/replsets/rslib.js");
+
+var name = "maxSyncSourceLagSecs";
+var replTest = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {priority: 3}},
+ {rsConfig: {priority: 0}},
+ {rsConfig: {priority: 0}, setParameter: 'maxSyncSourceLagSecs=3'},
+ ],
+ oplogSize: 5,
+});
+var nodes = replTest.nodeList();
+replTest.startSet();
+replTest.initiate();
+replTest.awaitNodesAgreeOnPrimary();
+
+var master = replTest.getPrimary();
+var slaves = replTest._slaves;
+syncFrom(slaves[0], master, replTest);
+syncFrom(slaves[1], master, replTest);
+master.getDB("foo").bar.save({a: 1});
+replTest.awaitReplication();
+
+jsTestLog("Setting sync target of slave 2 to slave 1");
+syncFrom(slaves[1], slaves[0], replTest);
+printjson(replTest.status());
+
+// need to put at least maxSyncSourceLagSecs b/w first op and subsequent ops
+// so that the shouldChangeSyncSource logic goes into effect
+sleep(4000);
+
+jsTestLog("Lock slave 1 and add some docs. Force sync target for slave 2 to change to primary");
+assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync: 1, lock: 1}));
+
+assert.soon(function() {
+ master.getDB("foo").bar.insert({a: 2});
+ var res = slaves[1].getDB("admin").runCommand({"replSetGetStatus": 1});
+ return res.syncingTo === master.name;
+}, "sync target not changed back to primary", 100 * 1000, 2 * 1000);
+printjson(replTest.status());
+
+assert.soon(function() {
+ return (slaves[1].getDB("foo").bar.count({a: 1}) > 0 &&
+ slaves[1].getDB("foo").bar.count({a: 2}) > 0);
+}, "slave should have caught up after syncing to primary.");
+
+assert.commandWorked(slaves[0].getDB("admin").fsyncUnlock());
+replTest.stopSet();
}());
diff --git a/jstests/replsets/minimum_visible_with_cluster_time.js b/jstests/replsets/minimum_visible_with_cluster_time.js
index 7a30c386f73..4b9dc7aa5f2 100644
--- a/jstests/replsets/minimum_visible_with_cluster_time.js
+++ b/jstests/replsets/minimum_visible_with_cluster_time.js
@@ -5,105 +5,103 @@
* @tags: [requires_replication]
*/
(function() {
- 'use strict';
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
-
- const rst = new ReplSetTest({nodes: 1});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- rst.stopSet();
- return;
- }
+'use strict';
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+
+const rst = new ReplSetTest({nodes: 1});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ rst.stopSet();
+ return;
+}
- rst.initiate();
- const primary = rst.getPrimary();
+rst.initiate();
+const primary = rst.getPrimary();
+const syncName = 'sync';
+const syncColl = primary.getDB(syncName).getCollection(syncName);
+assert.commandWorked(syncColl.insert({t: 'before'}));
+
+function bumpClusterTime() {
+ jsTestLog('Beginning to bump the logical clock.');
const syncName = 'sync';
- const syncColl = primary.getDB(syncName).getCollection(syncName);
- assert.commandWorked(syncColl.insert({t: 'before'}));
-
- function bumpClusterTime() {
- jsTestLog('Beginning to bump the logical clock.');
- const syncName = 'sync';
- const syncColl = db.getSiblingDB(syncName).getCollection(syncName);
- assert.eq(syncColl.find().itcount(), 1);
- assert.commandWorked(syncColl.insert({t: 'during'}));
- assert.eq(syncColl.find().itcount(), 2);
-
- let clusterTime = new Timestamp(1, 1);
- while (true) {
- const higherClusterTime = new Timestamp(clusterTime.getTime() + 20, 1);
- const res = assert.commandWorked(db.adminCommand({
- 'isMaster': 1,
- '$clusterTime': {
- 'clusterTime': higherClusterTime,
- 'signature': {
- 'hash': BinData(0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAA='),
- 'keyId': NumberLong(0)
- }
- }
- }));
- clusterTime = res.$clusterTime.clusterTime;
-
- if (syncColl.find().itcount() === 3) {
- jsTestLog('Done bumping the logical clock.');
- return;
+ const syncColl = db.getSiblingDB(syncName).getCollection(syncName);
+ assert.eq(syncColl.find().itcount(), 1);
+ assert.commandWorked(syncColl.insert({t: 'during'}));
+ assert.eq(syncColl.find().itcount(), 2);
+
+ let clusterTime = new Timestamp(1, 1);
+ while (true) {
+ const higherClusterTime = new Timestamp(clusterTime.getTime() + 20, 1);
+ const res = assert.commandWorked(db.adminCommand({
+ 'isMaster': 1,
+ '$clusterTime': {
+ 'clusterTime': higherClusterTime,
+ 'signature':
+ {'hash': BinData(0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAA='), 'keyId': NumberLong(0)}
}
+ }));
+ clusterTime = res.$clusterTime.clusterTime;
+
+ if (syncColl.find().itcount() === 3) {
+ jsTestLog('Done bumping the logical clock.');
+ return;
}
}
+}
- const clusterTimeBumper = startParallelShell(bumpClusterTime, primary.port);
- // Wait for the logical clock to begin to be bumped.
- assert.soon(() => syncColl.find().itcount() === 2);
+const clusterTimeBumper = startParallelShell(bumpClusterTime, primary.port);
+// Wait for the logical clock to begin to be bumped.
+assert.soon(() => syncColl.find().itcount() === 2);
- function doMajorityRead(coll, expectedCount) {
- const res = assert.commandWorked(coll.runCommand('find', {
- 'filter': {x: 7},
- 'readConcern': {'level': 'majority'},
- 'maxTimeMS': rst.kDefaultTimeoutMS
- }));
- // Exhaust the cursor to avoid leaking cursors on the server.
- assert.eq(expectedCount, new DBCommandCursor(coll.getDB(), res).itcount());
- }
+function doMajorityRead(coll, expectedCount) {
+ const res = assert.commandWorked(coll.runCommand('find', {
+ 'filter': {x: 7},
+ 'readConcern': {'level': 'majority'},
+ 'maxTimeMS': rst.kDefaultTimeoutMS
+ }));
+ // Exhaust the cursor to avoid leaking cursors on the server.
+ assert.eq(expectedCount, new DBCommandCursor(coll.getDB(), res).itcount());
+}
- const dbName = 'minimum_visible_with_cluster_time';
- const collName = 'foo';
+const dbName = 'minimum_visible_with_cluster_time';
+const collName = 'foo';
- for (let i = 0; i < 10; i++) {
- const collNameI = collName + i;
- jsTestLog(`Testing ${dbName}.${collNameI}`);
+for (let i = 0; i < 10; i++) {
+ const collNameI = collName + i;
+ jsTestLog(`Testing ${dbName}.${collNameI}`);
- assert.commandWorked(primary.getDB(dbName).createCollection(collNameI));
- let coll = primary.getDB(dbName).getCollection(collNameI);
+ assert.commandWorked(primary.getDB(dbName).createCollection(collNameI));
+ let coll = primary.getDB(dbName).getCollection(collNameI);
- doMajorityRead(coll, 0);
+ doMajorityRead(coll, 0);
- assert.commandWorked(coll.insert({x: 7, y: 1}));
- assert.commandWorked(
- coll.createIndex({x: 1}, {'name': 'x_1', 'expireAfterSeconds': 60 * 60 * 23}));
+ assert.commandWorked(coll.insert({x: 7, y: 1}));
+ assert.commandWorked(
+ coll.createIndex({x: 1}, {'name': 'x_1', 'expireAfterSeconds': 60 * 60 * 23}));
- doMajorityRead(coll, 1);
+ doMajorityRead(coll, 1);
- assert.commandWorked(coll.insert({x: 7, y: 2}));
- assert.commandWorked(coll.runCommand(
- 'collMod', {'index': {'keyPattern': {x: 1}, 'expireAfterSeconds': 60 * 60 * 24}}));
- doMajorityRead(coll, 2);
+ assert.commandWorked(coll.insert({x: 7, y: 2}));
+ assert.commandWorked(coll.runCommand(
+ 'collMod', {'index': {'keyPattern': {x: 1}, 'expireAfterSeconds': 60 * 60 * 24}}));
+ doMajorityRead(coll, 2);
- assert.commandWorked(coll.insert({x: 7, y: 3}));
- assert.commandWorked(coll.dropIndexes());
+ assert.commandWorked(coll.insert({x: 7, y: 3}));
+ assert.commandWorked(coll.dropIndexes());
- doMajorityRead(coll, 3);
+ doMajorityRead(coll, 3);
- assert.commandWorked(coll.insert({x: 7, y: 4}));
- const newCollNameI = collNameI + '_new';
- assert.commandWorked(coll.renameCollection(newCollNameI));
+ assert.commandWorked(coll.insert({x: 7, y: 4}));
+ const newCollNameI = collNameI + '_new';
+ assert.commandWorked(coll.renameCollection(newCollNameI));
- coll = primary.getDB(dbName).getCollection(newCollNameI);
- doMajorityRead(coll, 4);
- }
+ coll = primary.getDB(dbName).getCollection(newCollNameI);
+ doMajorityRead(coll, 4);
+}
- jsTestLog('Waiting for logical clock thread to stop.');
- assert.commandWorked(syncColl.insert({t: 'after'}));
- clusterTimeBumper();
+jsTestLog('Waiting for logical clock thread to stop.');
+assert.commandWorked(syncColl.insert({t: 'after'}));
+clusterTimeBumper();
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/mr_nonrepl_coll_in_local_db.js b/jstests/replsets/mr_nonrepl_coll_in_local_db.js
index 2b6cd66617e..8348b65e09a 100644
--- a/jstests/replsets/mr_nonrepl_coll_in_local_db.js
+++ b/jstests/replsets/mr_nonrepl_coll_in_local_db.js
@@ -7,77 +7,77 @@
// all collections created, and checking the oplog for entries logging the creation of each of those
// collections.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
+load("jstests/libs/check_log.js");
- const name = "mr_nonrepl_coll_in_local_db";
- const replSet = new ReplSetTest({name: name, nodes: 2});
- replSet.startSet();
- replSet.initiate();
+const name = "mr_nonrepl_coll_in_local_db";
+const replSet = new ReplSetTest({name: name, nodes: 2});
+replSet.startSet();
+replSet.initiate();
- const dbName = name;
- const collName = "test";
+const dbName = name;
+const collName = "test";
- const primary = replSet.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const coll = primaryDB[collName];
+const primary = replSet.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const coll = primaryDB[collName];
- // Insert 1000 documents in the "test" collection.
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 1000; i++) {
- const array = Array.from({lengthToInsert: 10000}, _ => Math.floor(Math.random() * 100));
- bulk.insert({arr: array});
- }
- assert.writeOK(bulk.execute());
+// Insert 1000 documents in the "test" collection.
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < 1000; i++) {
+ const array = Array.from({lengthToInsert: 10000}, _ => Math.floor(Math.random() * 100));
+ bulk.insert({arr: array});
+}
+assert.writeOK(bulk.execute());
- // Run a simple map-reduce.
- const result = coll.mapReduce(
- function map() {
- return this.arr.forEach(element => emit(element, 1));
- },
- function reduce(key, values) {
- return Array.sum(values);
- },
- {query: {arr: {$exists: true}}, out: "mr_result"});
- assert.commandWorked(result);
+// Run a simple map-reduce.
+const result = coll.mapReduce(
+ function map() {
+ return this.arr.forEach(element => emit(element, 1));
+ },
+ function reduce(key, values) {
+ return Array.sum(values);
+ },
+ {query: {arr: {$exists: true}}, out: "mr_result"});
+assert.commandWorked(result);
- // Examine the logs to find a list of created collections.
- const logLines = checkLog.getGlobalLog(primaryDB);
- let createdCollections = [];
- logLines.forEach(function(line) {
- let matchResult = line.match(/createCollection: (.+) with/);
- if (matchResult) {
- createdCollections.push(matchResult[1]);
- }
- });
+// Examine the logs to find a list of created collections.
+const logLines = checkLog.getGlobalLog(primaryDB);
+let createdCollections = [];
+logLines.forEach(function(line) {
+ let matchResult = line.match(/createCollection: (.+) with/);
+ if (matchResult) {
+ createdCollections.push(matchResult[1]);
+ }
+});
- createdCollections.forEach(function(createdCollectionName) {
- if (createdCollectionName.startsWith("admin.")) {
- // Although the "admin.system.version" collection is replicated, no "c" entry gets
- // created for it in the oplog, so this test would see it as unreplicated. In general,
- // this test is not concerned with the "admin" database, so we don't examine any "admin"
- // collections.
- return;
- }
+createdCollections.forEach(function(createdCollectionName) {
+ if (createdCollectionName.startsWith("admin.")) {
+ // Although the "admin.system.version" collection is replicated, no "c" entry gets
+ // created for it in the oplog, so this test would see it as unreplicated. In general,
+ // this test is not concerned with the "admin" database, so we don't examine any "admin"
+ // collections.
+ return;
+ }
- // Search for a log entry for the creation of this collection.
- const oplogEntries = primaryDB.getSiblingDB("local")["oplog.rs"]
- .find({op: "c", "o.idIndex.ns": createdCollectionName})
- .toArray();
- if (createdCollectionName.startsWith("local.")) {
- // We do not want to see any replication of "local" collections.
- assert.eq(oplogEntries.length,
- 0,
- "Found unexpected oplog entry for creation of " + createdCollectionName +
- ": " + tojson(oplogEntries));
- } else {
- assert.eq(oplogEntries.length,
- 1,
- "Found no oplog entry or too many entries for creation of " +
- createdCollectionName + ": " + tojson(oplogEntries));
- }
- });
+ // Search for a log entry for the creation of this collection.
+ const oplogEntries = primaryDB.getSiblingDB("local")["oplog.rs"]
+ .find({op: "c", "o.idIndex.ns": createdCollectionName})
+ .toArray();
+ if (createdCollectionName.startsWith("local.")) {
+ // We do not want to see any replication of "local" collections.
+ assert.eq(oplogEntries.length,
+ 0,
+ "Found unexpected oplog entry for creation of " + createdCollectionName + ": " +
+ tojson(oplogEntries));
+ } else {
+ assert.eq(oplogEntries.length,
+ 1,
+ "Found no oplog entry or too many entries for creation of " +
+ createdCollectionName + ": " + tojson(oplogEntries));
+ }
+});
- replSet.stopSet();
+replSet.stopSet();
}());
diff --git a/jstests/replsets/multikey_write_avoids_prepare_conflict.js b/jstests/replsets/multikey_write_avoids_prepare_conflict.js
index 46d58e0b6ca..9b760039e2f 100644
--- a/jstests/replsets/multikey_write_avoids_prepare_conflict.js
+++ b/jstests/replsets/multikey_write_avoids_prepare_conflict.js
@@ -7,52 +7,52 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({name: 'multikey_write_avoids_prepare_conflict', nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- const dbName = "test";
- const collName = "coll";
-
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
- const primaryColl = primary.getDB(dbName)[collName];
-
- jsTestLog("Creating a collection and an index on the primary, with spec {x:1}.");
- assert.commandWorked(primaryColl.createIndex({x: 1}));
- replTest.awaitReplication();
-
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- jsTestLog("Preparing a transaction on primary that should set the multikey flag.");
- session.startTransaction();
- // This write should update the multikey flag in the catalog but we don't want it to generate
- // prepare conflicts. In general, it is always safe to set an index as multikey earlier than is
- // necessary.
- assert.commandWorked(sessionColl.insert({x: [1, 2]}));
- PrepareHelpers.prepareTransaction(session);
-
- jsTestLog("Switching primaries by stepping up node " + secondary);
- replTest.stepUp(secondary);
- const newPrimary = replTest.getPrimary();
- const newPrimaryColl = newPrimary.getDB(dbName)[collName];
-
- jsTestLog("Doing an insert on the new primary that should also try to set the multikey flag.");
- assert.commandWorked(newPrimaryColl.insert({x: [3, 4]}));
- replTest.awaitReplication();
-
- jsTestLog("Aborting the prepared transaction on session " + tojson(session.getSessionId()));
- assert.commandWorked(newPrimary.adminCommand({
- abortTransaction: 1,
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- autocommit: false
- }));
-
- replTest.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({name: 'multikey_write_avoids_prepare_conflict', nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+const dbName = "test";
+const collName = "coll";
+
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
+const primaryColl = primary.getDB(dbName)[collName];
+
+jsTestLog("Creating a collection and an index on the primary, with spec {x:1}.");
+assert.commandWorked(primaryColl.createIndex({x: 1}));
+replTest.awaitReplication();
+
+const session = primary.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+jsTestLog("Preparing a transaction on primary that should set the multikey flag.");
+session.startTransaction();
+// This write should update the multikey flag in the catalog but we don't want it to generate
+// prepare conflicts. In general, it is always safe to set an index as multikey earlier than is
+// necessary.
+assert.commandWorked(sessionColl.insert({x: [1, 2]}));
+PrepareHelpers.prepareTransaction(session);
+
+jsTestLog("Switching primaries by stepping up node " + secondary);
+replTest.stepUp(secondary);
+const newPrimary = replTest.getPrimary();
+const newPrimaryColl = newPrimary.getDB(dbName)[collName];
+
+jsTestLog("Doing an insert on the new primary that should also try to set the multikey flag.");
+assert.commandWorked(newPrimaryColl.insert({x: [3, 4]}));
+replTest.awaitReplication();
+
+jsTestLog("Aborting the prepared transaction on session " + tojson(session.getSessionId()));
+assert.commandWorked(newPrimary.adminCommand({
+ abortTransaction: 1,
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+ autocommit: false
+}));
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/nested_apply_ops_create_indexes.js b/jstests/replsets/nested_apply_ops_create_indexes.js
index 1349d706c37..1a4a3f9c3ea 100644
--- a/jstests/replsets/nested_apply_ops_create_indexes.js
+++ b/jstests/replsets/nested_apply_ops_create_indexes.js
@@ -2,61 +2,56 @@
* Test createIndexes while recursively locked in a nested applyOps.
*/
(function() {
- "use strict";
+"use strict";
- let ensureIndexExists = function(testDB, collName, indexName, expectedNumIndexes) {
- let cmd = {listIndexes: collName};
- let res = testDB.runCommand(cmd);
- assert.commandWorked(res, "could not run " + tojson(cmd));
- let indexes = testDB[collName].getIndexes();
-
- assert.eq(indexes.length, expectedNumIndexes);
-
- let foundIndex = indexes.some(index => index.name === indexName);
- assert(foundIndex,
- "did not find the index '" + indexName + "' amongst the collection indexes: " +
- tojson(indexes));
- };
-
- let rst = new ReplSetTest({nodes: 3});
- rst.startSet();
- rst.initiate();
-
- let collName = "col";
- let dbName = "nested_apply_ops_create_indexes";
-
- let primaryTestDB = rst.getPrimary().getDB(dbName);
- let cmd = {"create": collName};
- let res = primaryTestDB.runCommand(cmd);
- assert.commandWorked(res, "could not run " + tojson(cmd));
- rst.awaitReplication();
-
- let uuid = primaryTestDB.getCollectionInfos()[0].info.uuid;
- let cmdFormatIndexNameA = "a_1";
- cmd = {
- applyOps: [{
- op: "c",
- ns: dbName + ".$cmd",
- ui: uuid,
- o: {
- applyOps: [{
- op: "c",
- ns: dbName + "." + collName,
- ui: uuid,
- o: {
- createIndexes: collName,
- v: 2,
- key: {a: 1},
- name: cmdFormatIndexNameA
- }
- }]
- }
- }]
- };
- res = primaryTestDB.runCommand(cmd);
+let ensureIndexExists = function(testDB, collName, indexName, expectedNumIndexes) {
+ let cmd = {listIndexes: collName};
+ let res = testDB.runCommand(cmd);
assert.commandWorked(res, "could not run " + tojson(cmd));
- rst.awaitReplication();
- ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameA, 2);
-
- rst.stopSet();
+ let indexes = testDB[collName].getIndexes();
+
+ assert.eq(indexes.length, expectedNumIndexes);
+
+ let foundIndex = indexes.some(index => index.name === indexName);
+ assert(foundIndex,
+ "did not find the index '" + indexName +
+ "' amongst the collection indexes: " + tojson(indexes));
+};
+
+let rst = new ReplSetTest({nodes: 3});
+rst.startSet();
+rst.initiate();
+
+let collName = "col";
+let dbName = "nested_apply_ops_create_indexes";
+
+let primaryTestDB = rst.getPrimary().getDB(dbName);
+let cmd = {"create": collName};
+let res = primaryTestDB.runCommand(cmd);
+assert.commandWorked(res, "could not run " + tojson(cmd));
+rst.awaitReplication();
+
+let uuid = primaryTestDB.getCollectionInfos()[0].info.uuid;
+let cmdFormatIndexNameA = "a_1";
+cmd = {
+ applyOps: [{
+ op: "c",
+ ns: dbName + ".$cmd",
+ ui: uuid,
+ o: {
+ applyOps: [{
+ op: "c",
+ ns: dbName + "." + collName,
+ ui: uuid,
+ o: {createIndexes: collName, v: 2, key: {a: 1}, name: cmdFormatIndexNameA}
+ }]
+ }
+ }]
+};
+res = primaryTestDB.runCommand(cmd);
+assert.commandWorked(res, "could not run " + tojson(cmd));
+rst.awaitReplication();
+ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameA, 2);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/no_disconnect_on_stepdown.js b/jstests/replsets/no_disconnect_on_stepdown.js
index 1d71788b3d6..b5a2368c293 100644
--- a/jstests/replsets/no_disconnect_on_stepdown.js
+++ b/jstests/replsets/no_disconnect_on_stepdown.js
@@ -2,106 +2,104 @@
* Tests that stepdown terminates writes, but does not disconnect connections.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/curop_helpers.js");
+load("jstests/libs/curop_helpers.js");
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const primaryAdmin = primary.getDB("admin");
- // We need a separate connection to avoid interference with the ReplSetTestMechanism.
- const primaryDataConn = new Mongo(primary.host);
- const primaryDb = primaryDataConn.getDB("test");
- const collname = "no_disconnect_on_stepdown";
- const coll = primaryDb[collname];
+const primary = rst.getPrimary();
+const primaryAdmin = primary.getDB("admin");
+// We need a separate connection to avoid interference with the ReplSetTestMechanism.
+const primaryDataConn = new Mongo(primary.host);
+const primaryDb = primaryDataConn.getDB("test");
+const collname = "no_disconnect_on_stepdown";
+const coll = primaryDb[collname];
- // Never retry on network error, because this test needs to detect the network error.
- TestData.skipRetryOnNetworkError = true;
+// Never retry on network error, because this test needs to detect the network error.
+TestData.skipRetryOnNetworkError = true;
- // Legacy writes will still disconnect, so don't use them.
- primaryDataConn.forceWriteMode('commands');
+// Legacy writes will still disconnect, so don't use them.
+primaryDataConn.forceWriteMode('commands');
- assert.commandWorked(coll.insert([
- {_id: 'update0', updateme: true},
- {_id: 'update1', updateme: true},
- {_id: 'remove0', removeme: true},
- {_id: 'remove1', removeme: true}
- ]));
- rst.awaitReplication();
+assert.commandWorked(coll.insert([
+ {_id: 'update0', updateme: true},
+ {_id: 'update1', updateme: true},
+ {_id: 'remove0', removeme: true},
+ {_id: 'remove1', removeme: true}
+]));
+rst.awaitReplication();
- jsTestLog("Stepping down with no command in progress. Should not disconnect.");
- // If the 'primary' connection is broken on stepdown, this command will fail.
- assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- // If the 'primaryDataConn' connection was broken during stepdown, this command will fail.
- assert.commandWorked(primaryDb.adminCommand({ping: 1}));
- // Allow the primary to be re-elected, and wait for it.
- assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
- rst.getPrimary();
+jsTestLog("Stepping down with no command in progress. Should not disconnect.");
+// If the 'primary' connection is broken on stepdown, this command will fail.
+assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
+rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+// If the 'primaryDataConn' connection was broken during stepdown, this command will fail.
+assert.commandWorked(primaryDb.adminCommand({ping: 1}));
+// Allow the primary to be re-elected, and wait for it.
+assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
+rst.getPrimary();
- function runStepDownTest({description, failpoint, operation, errorCode}) {
- jsTestLog(`Trying ${description} on a stepping-down primary`);
- assert.commandWorked(primaryAdmin.adminCommand({
- configureFailPoint: failpoint,
- mode: "alwaysOn",
- data: {shouldContinueOnInterrupt: true}
- }));
+function runStepDownTest({description, failpoint, operation, errorCode}) {
+ jsTestLog(`Trying ${description} on a stepping-down primary`);
+ assert.commandWorked(primaryAdmin.adminCommand({
+ configureFailPoint: failpoint,
+ mode: "alwaysOn",
+ data: {shouldContinueOnInterrupt: true}
+ }));
- errorCode = errorCode || ErrorCodes.InterruptedDueToReplStateChange;
- const writeCommand = `db.getMongo().forceWriteMode("commands");
+ errorCode = errorCode || ErrorCodes.InterruptedDueToReplStateChange;
+ const writeCommand = `db.getMongo().forceWriteMode("commands");
assert.commandFailedWithCode(${operation}, ${errorCode});
assert.commandWorked(db.adminCommand({ping:1}));`;
- const waitForShell = startParallelShell(writeCommand, primary.port);
- waitForCurOpByFilter(primaryAdmin, {"msg": failpoint});
- assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- assert.commandWorked(
- primaryAdmin.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- try {
- waitForShell();
- } catch (ex) {
- print("Failed trying to write or ping in " + description + ", possibly disconnected.");
- throw ex;
- }
+ const waitForShell = startParallelShell(writeCommand, primary.port);
+ waitForCurOpByFilter(primaryAdmin, {"msg": failpoint});
+ assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+ assert.commandWorked(primaryAdmin.adminCommand({configureFailPoint: failpoint, mode: "off"}));
+ try {
+ waitForShell();
+ } catch (ex) {
+ print("Failed trying to write or ping in " + description + ", possibly disconnected.");
+ throw ex;
+ }
- // Validate the number of operations killed on step down and number of failed unacknowledged
- // writes resulted in network disconnection.
- let replMetrics =
- assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1})).metrics.repl;
- assert.eq(replMetrics.stepDown.userOperationsKilled, 1);
- assert.eq(replMetrics.network.notMasterUnacknowledgedWrites, 0);
+ // Validate the number of operations killed on step down and number of failed unacknowledged
+ // writes resulted in network disconnection.
+ let replMetrics =
+ assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1})).metrics.repl;
+ assert.eq(replMetrics.stepDown.userOperationsKilled, 1);
+ assert.eq(replMetrics.network.notMasterUnacknowledgedWrites, 0);
- // Allow the primary to be re-elected, and wait for it.
- assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
- rst.getPrimary();
- }
+ // Allow the primary to be re-elected, and wait for it.
+ assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
+ rst.getPrimary();
+}
- // Reduce the max batch size so the insert is reliably interrupted.
- assert.commandWorked(
- primaryAdmin.adminCommand({setParameter: 1, internalInsertMaxBatchSize: 2}));
- // Make updates and removes yield more often.
- assert.commandWorked(
- primaryAdmin.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 3}));
+// Reduce the max batch size so the insert is reliably interrupted.
+assert.commandWorked(primaryAdmin.adminCommand({setParameter: 1, internalInsertMaxBatchSize: 2}));
+// Make updates and removes yield more often.
+assert.commandWorked(
+ primaryAdmin.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 3}));
- runStepDownTest({
- description: "insert",
- failpoint: "hangWithLockDuringBatchInsert",
- operation: "db['" + collname + "'].insert([{_id:0}, {_id:1}, {_id:2}])"
- });
+runStepDownTest({
+ description: "insert",
+ failpoint: "hangWithLockDuringBatchInsert",
+ operation: "db['" + collname + "'].insert([{_id:0}, {_id:1}, {_id:2}])"
+});
- runStepDownTest({
- description: "update",
- failpoint: "hangWithLockDuringBatchUpdate",
- operation: "db['" + collname + "'].update({updateme: true}, {'$set': {x: 1}})"
- });
- runStepDownTest({
- description: "remove",
- failpoint: "hangWithLockDuringBatchRemove",
- operation: "db['" + collname + "'].remove({removeme: true})"
- });
- rst.stopSet();
+runStepDownTest({
+ description: "update",
+ failpoint: "hangWithLockDuringBatchUpdate",
+ operation: "db['" + collname + "'].update({updateme: true}, {'$set': {x: 1}})"
+});
+runStepDownTest({
+ description: "remove",
+ failpoint: "hangWithLockDuringBatchRemove",
+ operation: "db['" + collname + "'].remove({removeme: true})"
+});
+rst.stopSet();
})();
diff --git a/jstests/replsets/no_flapping_during_network_partition.js b/jstests/replsets/no_flapping_during_network_partition.js
index ce88d0bd298..e6f705c5a4a 100644
--- a/jstests/replsets/no_flapping_during_network_partition.js
+++ b/jstests/replsets/no_flapping_during_network_partition.js
@@ -11,45 +11,47 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
+load("jstests/libs/check_log.js");
- var name = "no_flapping_during_network_partition";
+var name = "no_flapping_during_network_partition";
- var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
- var nodes = replTest.startSet();
- var config = replTest.getReplSetConfig();
- config.members[0].priority = 5;
- config.members[2].arbiterOnly = true;
- config.settings = {electionTimeoutMillis: 2000};
- replTest.initiate(config);
+var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
+var nodes = replTest.startSet();
+var config = replTest.getReplSetConfig();
+config.members[0].priority = 5;
+config.members[2].arbiterOnly = true;
+config.settings = {
+ electionTimeoutMillis: 2000
+};
+replTest.initiate(config);
- function getTerm(node) {
- return node.adminCommand({replSetGetStatus: 1}).term;
- }
+function getTerm(node) {
+ return node.adminCommand({replSetGetStatus: 1}).term;
+}
- replTest.waitForState(nodes[0], ReplSetTest.State.PRIMARY);
+replTest.waitForState(nodes[0], ReplSetTest.State.PRIMARY);
- var primary = replTest.getPrimary();
- var secondary = replTest.getSecondary();
- var initialTerm = getTerm(primary);
+var primary = replTest.getPrimary();
+var secondary = replTest.getSecondary();
+var initialTerm = getTerm(primary);
- jsTestLog("Create a network partition between the primary and secondary.");
- primary.disconnect(secondary);
+jsTestLog("Create a network partition between the primary and secondary.");
+primary.disconnect(secondary);
- jsTestLog("Wait long enough for the secondary to call for an election.");
- checkLog.contains(secondary, "can see a healthy primary");
- checkLog.contains(secondary, "not running for primary");
+jsTestLog("Wait long enough for the secondary to call for an election.");
+checkLog.contains(secondary, "can see a healthy primary");
+checkLog.contains(secondary, "not running for primary");
- jsTestLog("Verify the primary and secondary do not change during the partition.");
- assert.eq(primary, replTest.getPrimary());
- assert.eq(secondary, replTest.getSecondary());
+jsTestLog("Verify the primary and secondary do not change during the partition.");
+assert.eq(primary, replTest.getPrimary());
+assert.eq(secondary, replTest.getSecondary());
- checkLog.contains(secondary, "not running for primary");
+checkLog.contains(secondary, "not running for primary");
- jsTestLog("Heal the partition.");
- primary.reconnect(secondary);
+jsTestLog("Heal the partition.");
+primary.reconnect(secondary);
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/noop_write_after_read_only_txn.js b/jstests/replsets/noop_write_after_read_only_txn.js
index a34db2d4a06..125eaf5b50f 100644
--- a/jstests/replsets/noop_write_after_read_only_txn.js
+++ b/jstests/replsets/noop_write_after_read_only_txn.js
@@ -4,88 +4,88 @@
//
// @tags: [uses_transactions]
(function() {
- "use strict";
- load('jstests/libs/write_concern_util.js');
+"use strict";
+load('jstests/libs/write_concern_util.js');
- const name = "noop_write_after_read_only_txn";
- const rst = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- });
- rst.startSet();
- rst.initiate();
+const name = "noop_write_after_read_only_txn";
+const rst = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}],
+});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const dbName = "test";
- const testDB = primary.getDB(dbName);
+const primary = rst.getPrimary();
+const dbName = "test";
+const testDB = primary.getDB(dbName);
- // Set up the collection.
- testDB.runCommand({drop: name, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.getCollection(name).insert({}, {writeConcern: {w: "majority"}}));
+// Set up the collection.
+testDB.runCommand({drop: name, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.getCollection(name).insert({}, {writeConcern: {w: "majority"}}));
- function runTest({readConcernLevel, shouldWrite, provokeWriteConcernError}) {
- jsTestLog(
- `Read concern level "${readConcernLevel}", shouldWrite: ${shouldWrite}, provokeWriteConcernError: ${provokeWriteConcernError}`);
+function runTest({readConcernLevel, shouldWrite, provokeWriteConcernError}) {
+ jsTestLog(`Read concern level "${readConcernLevel}", shouldWrite: ${
+ shouldWrite}, provokeWriteConcernError: ${provokeWriteConcernError}`);
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const txnOptions = {writeConcern: {w: "majority"}};
- if (readConcernLevel)
- txnOptions.readConcern = {level: readConcernLevel};
+ const session = primary.startSession();
+ const sessionDB = session.getDatabase(dbName);
+ const txnOptions = {writeConcern: {w: "majority"}};
+ if (readConcernLevel)
+ txnOptions.readConcern = {level: readConcernLevel};
- if (provokeWriteConcernError)
- txnOptions.writeConcern.wtimeout = 1000;
+ if (provokeWriteConcernError)
+ txnOptions.writeConcern.wtimeout = 1000;
- session.startTransaction(txnOptions);
- assert.commandWorked(sessionDB.runCommand({find: name}));
- if (shouldWrite)
- assert.commandWorked(sessionDB.getCollection(name).insert({}));
+ session.startTransaction(txnOptions);
+ assert.commandWorked(sessionDB.runCommand({find: name}));
+ if (shouldWrite)
+ assert.commandWorked(sessionDB.getCollection(name).insert({}));
- if (provokeWriteConcernError)
- stopReplicationOnSecondaries(rst);
+ if (provokeWriteConcernError)
+ stopReplicationOnSecondaries(rst);
- const commitResult =
- assert.commandWorkedIgnoringWriteConcernErrors(session.commitTransaction_forTesting());
+ const commitResult =
+ assert.commandWorkedIgnoringWriteConcernErrors(session.commitTransaction_forTesting());
- jsTestLog(`commitResult ${tojson(commitResult)}`);
- if (provokeWriteConcernError) {
- assertWriteConcernError(commitResult);
- } else {
- assert.commandWorked(commitResult);
- }
+ jsTestLog(`commitResult ${tojson(commitResult)}`);
+ if (provokeWriteConcernError) {
+ assertWriteConcernError(commitResult);
+ } else {
+ assert.commandWorked(commitResult);
+ }
- const entries = rst.findOplog(primary,
- {
- op: "n",
- ts: {$gte: commitResult.operationTime},
- "o.msg": /.*read-only transaction.*/
- },
- 1)
- .toArray();
+ const entries = rst.findOplog(primary,
+ {
+ op: "n",
+ ts: {$gte: commitResult.operationTime},
+ "o.msg": /.*read-only transaction.*/
+ },
+ 1)
+ .toArray();
- // If the transaction had a write, it should not *also* do a noop.
- if (shouldWrite) {
- assert.eq(0, entries.length, "shouldn't have written noop oplog entry");
- } else {
- assert.eq(1, entries.length, "should have written noop oplog entry");
- }
-
- jsTestLog("Ending session");
- session.endSession();
- restartReplSetReplication(rst);
+ // If the transaction had a write, it should not *also* do a noop.
+ if (shouldWrite) {
+ assert.eq(0, entries.length, "shouldn't have written noop oplog entry");
+ } else {
+ assert.eq(1, entries.length, "should have written noop oplog entry");
}
- for (let readConcernLevel of[null, "local", "majority", "snapshot"]) {
- for (let shouldWrite of[false, true]) {
- for (let provokeWriteConcernError of[false, true]) {
- runTest({
- readConcernLevel: readConcernLevel,
- shouldWrite: shouldWrite,
- provokeWriteConcernError: provokeWriteConcernError
- });
- }
+ jsTestLog("Ending session");
+ session.endSession();
+ restartReplSetReplication(rst);
+}
+
+for (let readConcernLevel of [null, "local", "majority", "snapshot"]) {
+ for (let shouldWrite of [false, true]) {
+ for (let provokeWriteConcernError of [false, true]) {
+ runTest({
+ readConcernLevel: readConcernLevel,
+ shouldWrite: shouldWrite,
+ provokeWriteConcernError: provokeWriteConcernError
+ });
}
}
+}
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/noop_writes_wait_for_write_concern.js b/jstests/replsets/noop_writes_wait_for_write_concern.js
index c8bf667c26a..a4cd7aacedc 100644
--- a/jstests/replsets/noop_writes_wait_for_write_concern.js
+++ b/jstests/replsets/noop_writes_wait_for_write_concern.js
@@ -6,231 +6,231 @@
*/
(function() {
- "use strict";
- load('jstests/libs/write_concern_util.js');
-
- var name = 'noop_writes_wait_for_write_concern';
- var replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- });
- replTest.startSet();
- replTest.initiate();
- // Stops node 1 so that all w:3 write concerns time out. We have 3 data bearing nodes so that
- // 'dropDatabase' can satisfy its implicit writeConcern: majority but still time out from the
- // explicit w:3 write concern.
- replTest.stop(1);
-
- var primary = replTest.getPrimary();
- assert.eq(primary, replTest.nodes[0]);
- var dbName = 'testDB';
- var db = primary.getDB(dbName);
- var collName = 'testColl';
- var coll = db[collName];
-
- function dropTestCollection() {
- coll.drop();
- assert.eq(0, coll.find().itcount(), "test collection not empty");
+"use strict";
+load('jstests/libs/write_concern_util.js');
+
+var name = 'noop_writes_wait_for_write_concern';
+var replTest = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+});
+replTest.startSet();
+replTest.initiate();
+// Stops node 1 so that all w:3 write concerns time out. We have 3 data bearing nodes so that
+// 'dropDatabase' can satisfy its implicit writeConcern: majority but still time out from the
+// explicit w:3 write concern.
+replTest.stop(1);
+
+var primary = replTest.getPrimary();
+assert.eq(primary, replTest.nodes[0]);
+var dbName = 'testDB';
+var db = primary.getDB(dbName);
+var collName = 'testColl';
+var coll = db[collName];
+
+function dropTestCollection() {
+ coll.drop();
+ assert.eq(0, coll.find().itcount(), "test collection not empty");
+}
+
+// Each entry in this array contains a command whose noop write concern behavior needs to be
+// tested. Entries have the following structure:
+// {
+// req: <object>, // Command request object that will result in a noop
+// // write after the setup function is called.
+//
+// setupFunc: <function()>, // Function to run to ensure that the request is a
+// // noop.
+//
+// confirmFunc: <function(res)>, // Function to run after the command is run to ensure
+// // that it executed properly. Accepts the result of
+// // the noop request to validate it.
+// }
+var commands = [];
+
+commands.push({
+ req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1}}]},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({_id: 1}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq(res.applied, 1);
+ assert.eq(res.results[0], true);
+ assert.eq(coll.find().itcount(), 1);
+ assert.eq(coll.count({_id: 1}), 1);
}
-
- // Each entry in this array contains a command whose noop write concern behavior needs to be
- // tested. Entries have the following structure:
- // {
- // req: <object>, // Command request object that will result in a noop
- // // write after the setup function is called.
- //
- // setupFunc: <function()>, // Function to run to ensure that the request is a
- // // noop.
- //
- // confirmFunc: <function(res)>, // Function to run after the command is run to ensure
- // // that it executed properly. Accepts the result of
- // // the noop request to validate it.
- // }
- var commands = [];
-
- commands.push({
- req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1}}]},
- setupFunc: function() {
- assert.writeOK(coll.insert({_id: 1}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.applied, 1);
- assert.eq(res.results[0], true);
- assert.eq(coll.find().itcount(), 1);
- assert.eq(coll.count({_id: 1}), 1);
- }
- });
-
- // 'update' where the document to update does not exist.
- commands.push({
- req: {update: collName, updates: [{q: {a: 1}, u: {b: 2}}]},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.update({a: 1}, {b: 2}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.n, 0);
- assert.eq(res.nModified, 0);
- assert.eq(coll.find().itcount(), 1);
- assert.eq(coll.count({b: 2}), 1);
- }
- });
-
- // 'update' where the update has already been done.
- commands.push({
- req: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 2}}}]},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.update({a: 1}, {$set: {b: 2}}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.n, 1);
- assert.eq(res.nModified, 0);
- assert.eq(coll.find().itcount(), 1);
- assert.eq(coll.count({a: 1, b: 2}), 1);
- }
- });
-
- commands.push({
- req: {delete: collName, deletes: [{q: {a: 1}, limit: 1}]},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.remove({a: 1}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.n, 0);
- assert.eq(coll.count({a: 1}), 0);
- }
- });
-
- commands.push({
- req: {createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.commandWorkedIgnoringWriteConcernErrors(
- db.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.numIndexesBefore, res.numIndexesAfter);
- }
- });
-
- // 'findAndModify' where the document to update does not exist.
- commands.push({
- req: {findAndModify: collName, query: {a: 1}, update: {b: 2}},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.commandWorkedIgnoringWriteConcernErrors(
- db.runCommand({findAndModify: collName, query: {a: 1}, update: {b: 2}}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.lastErrorObject.updatedExisting, false);
- assert.eq(coll.find().itcount(), 1);
- assert.eq(coll.count({b: 2}), 1);
- }
- });
-
- // 'findAndModify' where the update has already been done.
- commands.push({
- req: {findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.commandWorkedIgnoringWriteConcernErrors(
- db.runCommand({findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.lastErrorObject.updatedExisting, true);
- assert.eq(coll.find().itcount(), 1);
- assert.eq(coll.count({a: 1, b: 2}), 1);
- }
- });
-
- commands.push({
- req: {dropDatabase: 1},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({dropDatabase: 1}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- }
- });
-
- commands.push({
- req: {drop: collName},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({drop: collName}));
- },
- confirmFunc: function(res) {
- assert.commandFailedWithCode(res, ErrorCodes.NamespaceNotFound);
- }
- });
-
- commands.push({
- req: {create: collName},
- setupFunc: function() {
- assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({create: collName}));
- },
- confirmFunc: function(res) {
- assert.commandFailedWithCode(res, ErrorCodes.NamespaceExists);
- }
- });
-
- commands.push({
- req: {insert: collName, documents: [{_id: 1}]},
- setupFunc: function() {
- assert.writeOK(coll.insert({_id: 1}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteErrorsAndWriteConcernErrors(res);
- assert.eq(res.n, 0);
- assert.eq(res.writeErrors[0].code, ErrorCodes.DuplicateKey);
- assert.eq(coll.count({_id: 1}), 1);
- }
- });
-
- function testCommandWithWriteConcern(cmd) {
- // Provide a small wtimeout that we expect to time out.
- cmd.req.writeConcern = {w: 3, wtimeout: 1000};
- jsTest.log("Testing " + tojson(cmd.req));
-
- dropTestCollection();
-
- cmd.setupFunc();
-
- // We run the command on a different connection. If the the command were run on the
- // same connection, then the client last op for the noop write would be set by the setup
- // operation. By using a fresh connection the client last op begins as null.
- // This test explicitly tests that write concern for noop writes works when the
- // client last op has not already been set by a duplicate operation.
- var shell2 = new Mongo(primary.host);
-
- // We check the error code of 'res' in the 'confirmFunc'.
- var res = shell2.getDB(dbName).runCommand(cmd.req);
-
- try {
- // Tests that the command receives a write concern error. If we don't wait for write
- // concern on noop writes then we won't get a write concern error.
- assertWriteConcernError(res);
- cmd.confirmFunc(res);
- } catch (e) {
- // Make sure that we print out the response.
- printjson(res);
- throw e;
- }
+});
+
+// 'update' where the document to update does not exist.
+commands.push({
+ req: {update: collName, updates: [{q: {a: 1}, u: {b: 2}}]},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.writeOK(coll.update({a: 1}, {b: 2}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq(res.n, 0);
+ assert.eq(res.nModified, 0);
+ assert.eq(coll.find().itcount(), 1);
+ assert.eq(coll.count({b: 2}), 1);
+ }
+});
+
+// 'update' where the update has already been done.
+commands.push({
+ req: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 2}}}]},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.writeOK(coll.update({a: 1}, {$set: {b: 2}}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq(res.n, 1);
+ assert.eq(res.nModified, 0);
+ assert.eq(coll.find().itcount(), 1);
+ assert.eq(coll.count({a: 1, b: 2}), 1);
+ }
+});
+
+commands.push({
+ req: {delete: collName, deletes: [{q: {a: 1}, limit: 1}]},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.writeOK(coll.remove({a: 1}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq(res.n, 0);
+ assert.eq(coll.count({a: 1}), 0);
+ }
+});
+
+commands.push({
+ req: {createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorkedIgnoringWriteConcernErrors(
+ db.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq(res.numIndexesBefore, res.numIndexesAfter);
+ }
+});
+
+// 'findAndModify' where the document to update does not exist.
+commands.push({
+ req: {findAndModify: collName, query: {a: 1}, update: {b: 2}},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorkedIgnoringWriteConcernErrors(
+ db.runCommand({findAndModify: collName, query: {a: 1}, update: {b: 2}}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq(res.lastErrorObject.updatedExisting, false);
+ assert.eq(coll.find().itcount(), 1);
+ assert.eq(coll.count({b: 2}), 1);
+ }
+});
+
+// 'findAndModify' where the update has already been done.
+commands.push({
+ req: {findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorkedIgnoringWriteConcernErrors(
+ db.runCommand({findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq(res.lastErrorObject.updatedExisting, true);
+ assert.eq(coll.find().itcount(), 1);
+ assert.eq(coll.count({a: 1, b: 2}), 1);
+ }
+});
+
+commands.push({
+ req: {dropDatabase: 1},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({dropDatabase: 1}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ }
+});
+
+commands.push({
+ req: {drop: collName},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({drop: collName}));
+ },
+ confirmFunc: function(res) {
+ assert.commandFailedWithCode(res, ErrorCodes.NamespaceNotFound);
+ }
+});
+
+commands.push({
+ req: {create: collName},
+ setupFunc: function() {
+ assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({create: collName}));
+ },
+ confirmFunc: function(res) {
+ assert.commandFailedWithCode(res, ErrorCodes.NamespaceExists);
+ }
+});
+
+commands.push({
+ req: {insert: collName, documents: [{_id: 1}]},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({_id: 1}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteErrorsAndWriteConcernErrors(res);
+ assert.eq(res.n, 0);
+ assert.eq(res.writeErrors[0].code, ErrorCodes.DuplicateKey);
+ assert.eq(coll.count({_id: 1}), 1);
+ }
+});
+
+function testCommandWithWriteConcern(cmd) {
+ // Provide a small wtimeout that we expect to time out.
+ cmd.req.writeConcern = {w: 3, wtimeout: 1000};
+ jsTest.log("Testing " + tojson(cmd.req));
+
+ dropTestCollection();
+
+ cmd.setupFunc();
+
+ // We run the command on a different connection. If the the command were run on the
+ // same connection, then the client last op for the noop write would be set by the setup
+ // operation. By using a fresh connection the client last op begins as null.
+ // This test explicitly tests that write concern for noop writes works when the
+ // client last op has not already been set by a duplicate operation.
+ var shell2 = new Mongo(primary.host);
+
+ // We check the error code of 'res' in the 'confirmFunc'.
+ var res = shell2.getDB(dbName).runCommand(cmd.req);
+
+ try {
+ // Tests that the command receives a write concern error. If we don't wait for write
+ // concern on noop writes then we won't get a write concern error.
+ assertWriteConcernError(res);
+ cmd.confirmFunc(res);
+ } catch (e) {
+ // Make sure that we print out the response.
+ printjson(res);
+ throw e;
}
+}
- commands.forEach(function(cmd) {
- testCommandWithWriteConcern(cmd);
- });
+commands.forEach(function(cmd) {
+ testCommandWithWriteConcern(cmd);
+});
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js b/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js
index 199999574f2..e024a9853f7 100644
--- a/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js
+++ b/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js
@@ -2,71 +2,71 @@
* Tests that a no-op setFeatureCompatibilityVersion request still waits for write concern.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js"); // assertWriteConcernError
- load("jstests/replsets/rslib.js"); // reconfig
- load("jstests/libs/feature_compatibility_version.js"); // latestFCV/lastStableFCV
+load("jstests/libs/write_concern_util.js"); // assertWriteConcernError
+load("jstests/replsets/rslib.js"); // reconfig
+load("jstests/libs/feature_compatibility_version.js"); // latestFCV/lastStableFCV
- // Start a two node replica set and set its FCV to the given version, then take down one
- // node so majority write concern can no longer be satisfied and verify that a noop setFCV
- // request times out waiting for majority write concern.
- function testFCVNoop(targetVersion) {
- jsTestLog("Testing setFeatureCompatibilityVersion with targetVersion: " + targetVersion);
+// Start a two node replica set and set its FCV to the given version, then take down one
+// node so majority write concern can no longer be satisfied and verify that a noop setFCV
+// request times out waiting for majority write concern.
+function testFCVNoop(targetVersion) {
+ jsTestLog("Testing setFeatureCompatibilityVersion with targetVersion: " + targetVersion);
- const replTest = new ReplSetTest({
- nodes: [{}, {rsConfig: {priority: 0}}],
- });
- replTest.startSet();
- replTest.initiate();
+ const replTest = new ReplSetTest({
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ });
+ replTest.startSet();
+ replTest.initiate();
- const primary = replTest.getPrimary();
- assert.eq(primary, replTest.nodes[0]);
+ const primary = replTest.getPrimary();
+ assert.eq(primary, replTest.nodes[0]);
- // Set the FCV to the given target version, to ensure calling setFCV below is a no-op.
- assert.commandWorkedIgnoringWriteConcernErrors(
- primary.adminCommand({setFeatureCompatibilityVersion: targetVersion}));
+ // Set the FCV to the given target version, to ensure calling setFCV below is a no-op.
+ assert.commandWorkedIgnoringWriteConcernErrors(
+ primary.adminCommand({setFeatureCompatibilityVersion: targetVersion}));
- // Stop one node to force commands with "majority" write concern to time out. First increase
- // the election timeout to prevent the primary from stepping down before the test is over.
- let conf = replTest.getReplSetConfigFromNode();
- conf.settings = {
- electionTimeoutMillis: 1000 * 60 * 10,
- };
- conf.version += 1;
- reconfig(replTest, conf);
+ // Stop one node to force commands with "majority" write concern to time out. First increase
+ // the election timeout to prevent the primary from stepping down before the test is over.
+ let conf = replTest.getReplSetConfigFromNode();
+ conf.settings = {
+ electionTimeoutMillis: 1000 * 60 * 10,
+ };
+ conf.version += 1;
+ reconfig(replTest, conf);
- replTest.stop(1);
+ replTest.stop(1);
- // Insert a document to ensure there is a last optime.
- assert.writeOK(primary.getDB("test").foo.insert({x: 1}));
+ // Insert a document to ensure there is a last optime.
+ assert.writeOK(primary.getDB("test").foo.insert({x: 1}));
- // We run the command on a different connection. If the the command were run on the same
- // connection, then the client last op for the noop write would be the last op of the
- // previous setFCV call. By using a fresh connection the client last op begins as null. This
- // test explicitly tests that write concern for noop writes works when the client last op
- // has not already been set by a duplicate operation.
- const shell2 = new Mongo(primary.host);
+ // We run the command on a different connection. If the the command were run on the same
+ // connection, then the client last op for the noop write would be the last op of the
+ // previous setFCV call. By using a fresh connection the client last op begins as null. This
+ // test explicitly tests that write concern for noop writes works when the client last op
+ // has not already been set by a duplicate operation.
+ const shell2 = new Mongo(primary.host);
- // Use w:1 to verify setFCV internally waits for at least write concern majority, and use a
- // small wtimeout to verify it is propagated into the internal waitForWriteConcern and will
- // allow the command to timeout.
- const res = shell2.adminCommand(
- {setFeatureCompatibilityVersion: targetVersion, writeConcern: {w: 1, wtimeout: 1000}});
+ // Use w:1 to verify setFCV internally waits for at least write concern majority, and use a
+ // small wtimeout to verify it is propagated into the internal waitForWriteConcern and will
+ // allow the command to timeout.
+ const res = shell2.adminCommand(
+ {setFeatureCompatibilityVersion: targetVersion, writeConcern: {w: 1, wtimeout: 1000}});
- try {
- // Verify the command receives a write concern error. If we don't wait for write concern
- // on noop writes then we won't get a write concern error.
- assertWriteConcernError(res);
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- } catch (e) {
- printjson(res);
- throw e;
- }
-
- replTest.stopSet();
+ try {
+ // Verify the command receives a write concern error. If we don't wait for write concern
+ // on noop writes then we won't get a write concern error.
+ assertWriteConcernError(res);
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ } catch (e) {
+ printjson(res);
+ throw e;
}
- testFCVNoop(lastStableFCV);
- testFCVNoop(latestFCV);
+ replTest.stopSet();
+}
+
+testFCVNoop(lastStableFCV);
+testFCVNoop(latestFCV);
})();
diff --git a/jstests/replsets/not_master_unacknowledged_write.js b/jstests/replsets/not_master_unacknowledged_write.js
index f214a45c4d4..ac7b4cf2ef5 100644
--- a/jstests/replsets/not_master_unacknowledged_write.js
+++ b/jstests/replsets/not_master_unacknowledged_write.js
@@ -3,81 +3,82 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
-
- function getNotMasterUnackWritesCounter() {
- return assert.commandWorked(primaryDB.adminCommand({serverStatus: 1}))
- .metrics.repl.network.notMasterUnacknowledgedWrites;
- }
-
- const collName = "not_master_unacknowledged_write";
-
- var rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
- var primary = rst.getPrimary();
- var secondary = rst.getSecondary();
- var primaryDB = primary.getDB("test");
- var secondaryDB = secondary.getDB("test");
- var primaryColl = primaryDB[collName];
- var secondaryColl = secondaryDB[collName];
-
- jsTestLog("Primary on port " + primary.port + " hangs up on unacknowledged writes");
- // Do each write method with unacknowledged write concern, "wc".
- [{name: "insertOne", fn: (wc) => secondaryColl.insertOne({}, wc)},
- {name: "insertMany", fn: (wc) => secondaryColl.insertMany([{}], wc)},
- {name: "deleteOne", fn: (wc) => secondaryColl.deleteOne({}, wc)},
- {name: "deleteMany", fn: (wc) => secondaryColl.deleteMany({}, wc)},
- {name: "updateOne", fn: (wc) => secondaryColl.updateOne({}, {$set: {x: 1}}, wc)},
- {name: "updateMany", fn: (wc) => secondaryColl.updateMany({}, {$set: {x: 1}}, wc)},
- {name: "replaceOne", fn: (wc) => secondaryColl.replaceOne({}, {}, wc)},
- ].map(({name, fn}) => {
- var result = assert.throws(function() {
- // Provoke the server to hang up.
- fn({writeConcern: {w: 0}});
- // The connection is now broken and isMaster throws a network error.
- secondary.getDB("admin").isMaster();
- }, [], "network error from " + name);
-
- assert.includes(result.toString(),
- "network error while attempting to run command 'isMaster'",
- "after " + name);
- });
-
- // Unacknowledged write in progress when a stepdown occurs provokes a hangup.
- assert.commandWorked(primaryDB.adminCommand({
- configureFailPoint: "hangAfterCollectionInserts",
- mode: "alwaysOn",
- data: {collectionNS: primaryColl.getFullName()}
- }));
-
- var command = `
+"use strict";
+
+load("jstests/libs/check_log.js");
+
+function getNotMasterUnackWritesCounter() {
+ return assert.commandWorked(primaryDB.adminCommand({serverStatus: 1}))
+ .metrics.repl.network.notMasterUnacknowledgedWrites;
+}
+
+const collName = "not_master_unacknowledged_write";
+
+var rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
+var primary = rst.getPrimary();
+var secondary = rst.getSecondary();
+var primaryDB = primary.getDB("test");
+var secondaryDB = secondary.getDB("test");
+var primaryColl = primaryDB[collName];
+var secondaryColl = secondaryDB[collName];
+
+jsTestLog("Primary on port " + primary.port + " hangs up on unacknowledged writes");
+// Do each write method with unacknowledged write concern, "wc".
+[{name: "insertOne", fn: (wc) => secondaryColl.insertOne({}, wc)},
+ {name: "insertMany", fn: (wc) => secondaryColl.insertMany([{}], wc)},
+ {name: "deleteOne", fn: (wc) => secondaryColl.deleteOne({}, wc)},
+ {name: "deleteMany", fn: (wc) => secondaryColl.deleteMany({}, wc)},
+ {name: "updateOne", fn: (wc) => secondaryColl.updateOne({}, {$set: {x: 1}}, wc)},
+ {name: "updateMany", fn: (wc) => secondaryColl.updateMany({}, {$set: {x: 1}}, wc)},
+ {name: "replaceOne", fn: (wc) => secondaryColl.replaceOne({}, {}, wc)},
+].map(({name, fn}) => {
+ var result = assert.throws(function() {
+ // Provoke the server to hang up.
+ fn({writeConcern: {w: 0}});
+ // The connection is now broken and isMaster throws a network error.
+ secondary.getDB("admin").isMaster();
+ }, [], "network error from " + name);
+
+ assert.includes(result.toString(),
+ "network error while attempting to run command 'isMaster'",
+ "after " + name);
+});
+
+// Unacknowledged write in progress when a stepdown occurs provokes a hangup.
+assert.commandWorked(primaryDB.adminCommand({
+ configureFailPoint: "hangAfterCollectionInserts",
+ mode: "alwaysOn",
+ data: {collectionNS: primaryColl.getFullName()}
+}));
+
+var command =
+ `
load("jstests/libs/check_log.js");
checkLog.contains(db.getMongo(), "hangAfterCollectionInserts fail point enabled");
db.adminCommand({replSetStepDown: 60, force: true});`;
- var awaitShell = startParallelShell(command, primary.port);
+var awaitShell = startParallelShell(command, primary.port);
- let failedUnackWritesBefore = getNotMasterUnackWritesCounter();
+let failedUnackWritesBefore = getNotMasterUnackWritesCounter();
- jsTestLog("Beginning unacknowledged insert");
- primaryColl.insertOne({}, {writeConcern: {w: 0}});
+jsTestLog("Beginning unacknowledged insert");
+primaryColl.insertOne({}, {writeConcern: {w: 0}});
- jsTestLog("Step down primary on port " + primary.port);
- awaitShell({checkExitSuccess: false});
+jsTestLog("Step down primary on port " + primary.port);
+awaitShell({checkExitSuccess: false});
- jsTestLog("Unacknowledged insert during stepdown provoked disconnect");
- var result = assert.throws(function() {
- primary.getDB("admin").isMaster();
- }, [], "network");
- assert.includes(result.toString(), "network error while attempting to run command 'isMaster'");
+jsTestLog("Unacknowledged insert during stepdown provoked disconnect");
+var result = assert.throws(function() {
+ primary.getDB("admin").isMaster();
+}, [], "network");
+assert.includes(result.toString(), "network error while attempting to run command 'isMaster'");
- // Validate the number of unacknowledged writes failed due to step down resulted in network
- // disconnection.
- let failedUnackWritesAfter = getNotMasterUnackWritesCounter();
- assert.eq(failedUnackWritesAfter, failedUnackWritesBefore + 1);
+// Validate the number of unacknowledged writes failed due to step down resulted in network
+// disconnection.
+let failedUnackWritesAfter = getNotMasterUnackWritesCounter();
+assert.eq(failedUnackWritesAfter, failedUnackWritesBefore + 1);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/opcounters_repl.js b/jstests/replsets/opcounters_repl.js
index af45d96ae03..5bf31a1f5ee 100644
--- a/jstests/replsets/opcounters_repl.js
+++ b/jstests/replsets/opcounters_repl.js
@@ -6,97 +6,96 @@
*/
(function() {
- "use strict";
-
- const testName = "opcounters_repl";
- const dbName = testName;
- const rst = new ReplSetTest({name: testName, nodes: 2});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const secondary = rst.getSecondary();
-
- const collName = "coll";
- const collNs = dbName + '.' + collName;
- const primaryColl = primaryDB[collName];
-
- function getOpCounters(node) {
- return assert.commandWorked(node.adminCommand({serverStatus: 1})).opcounters;
+"use strict";
+
+const testName = "opcounters_repl";
+const dbName = testName;
+const rst = new ReplSetTest({name: testName, nodes: 2});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const secondary = rst.getSecondary();
+
+const collName = "coll";
+const collNs = dbName + '.' + collName;
+const primaryColl = primaryDB[collName];
+
+function getOpCounters(node) {
+ return assert.commandWorked(node.adminCommand({serverStatus: 1})).opcounters;
+}
+
+function getOpCountersRepl(node) {
+ return assert.commandWorked(node.adminCommand({serverStatus: 1})).opcountersRepl;
+}
+
+function getOpCountersDiff(cmdFn) {
+ // Get the counters before running cmdFn().
+ const primaryOpCountersBefore = getOpCounters(primary);
+ const secondaryOpCountersReplBefore = getOpCountersRepl(secondary);
+
+ // Run the cmd.
+ cmdFn();
+
+ // Get the counters after running cmdFn().
+ const primaryOpCountersAfter = getOpCounters(primary);
+ const secondaryOpCountersReplAfter = getOpCountersRepl(secondary);
+
+ // Calculate the diff
+ let primaryDiff = {};
+ let secondaryDiff = {};
+ for (let key in primaryOpCountersBefore) {
+ primaryDiff[key] = primaryOpCountersAfter[key] - primaryOpCountersBefore[key];
}
- function getOpCountersRepl(node) {
- return assert.commandWorked(node.adminCommand({serverStatus: 1})).opcountersRepl;
+ for (let key in secondaryOpCountersReplBefore) {
+ secondaryDiff[key] = secondaryOpCountersReplAfter[key] - secondaryOpCountersReplBefore[key];
}
-
- function getOpCountersDiff(cmdFn) {
- // Get the counters before running cmdFn().
- const primaryOpCountersBefore = getOpCounters(primary);
- const secondaryOpCountersReplBefore = getOpCountersRepl(secondary);
-
- // Run the cmd.
- cmdFn();
-
- // Get the counters after running cmdFn().
- const primaryOpCountersAfter = getOpCounters(primary);
- const secondaryOpCountersReplAfter = getOpCountersRepl(secondary);
-
- // Calculate the diff
- let primaryDiff = {};
- let secondaryDiff = {};
- for (let key in primaryOpCountersBefore) {
- primaryDiff[key] = primaryOpCountersAfter[key] - primaryOpCountersBefore[key];
- }
-
- for (let key in secondaryOpCountersReplBefore) {
- secondaryDiff[key] =
- secondaryOpCountersReplAfter[key] - secondaryOpCountersReplBefore[key];
- }
- return {primary: primaryDiff, secondary: secondaryDiff};
- }
-
- // 1. Create collection.
- let diff = getOpCountersDiff(() => {
- assert.commandWorked(primaryDB.createCollection(collName, {writeConcern: {w: 2}}));
- });
- // On primary, the command counter accounts for create command and for other internal
- // commands like replSetUpdatePosition, replSetHeartbeat, serverStatus, etc.
- assert.gte(diff.primary.command, 1);
- assert.eq(diff.secondary.command, 1);
-
- // 2. Insert a document.
- diff = getOpCountersDiff(() => {
- assert.writeOK(primaryColl.insert({_id: 0}, {writeConcern: {w: 2}}));
- });
- assert.eq(diff.primary.insert, 1);
- assert.eq(diff.secondary.insert, 1);
-
- // 3. Update a document.
- diff = getOpCountersDiff(() => {
- assert.writeOK(primaryColl.update({_id: 0}, {$set: {a: 1}}, {writeConcern: {w: 2}}));
- });
- assert.eq(diff.primary.update, 1);
- assert.eq(diff.secondary.update, 1);
-
- // 4. Delete a document.
- diff = getOpCountersDiff(() => {
- assert.writeOK(primaryColl.remove({_id: 0}, {writeConcern: {w: 2}}));
- });
- assert.eq(diff.primary.delete, 1);
- assert.eq(diff.secondary.delete, 1);
-
- // 5. Atomic insert operation via applyOps cmd.
- diff = getOpCountersDiff(() => {
- assert.commandWorked(primaryColl.runCommand(
- {applyOps: [{op: "i", ns: collNs, o: {_id: 1}}], writeConcern: {w: 2}}));
- });
- // On primary, the command counter accounts for applyOps command and for other internal
- // commands like replSetUpdatePosition, replSetHeartbeat, serverStatus, etc.
- assert.gte(diff.primary.command, 1);
- assert.eq(diff.secondary.command, 0);
- assert.eq(diff.primary.insert, 1);
- assert.eq(diff.secondary.insert, 1);
-
- rst.stopSet();
+ return {primary: primaryDiff, secondary: secondaryDiff};
+}
+
+// 1. Create collection.
+let diff = getOpCountersDiff(() => {
+ assert.commandWorked(primaryDB.createCollection(collName, {writeConcern: {w: 2}}));
+});
+// On primary, the command counter accounts for create command and for other internal
+// commands like replSetUpdatePosition, replSetHeartbeat, serverStatus, etc.
+assert.gte(diff.primary.command, 1);
+assert.eq(diff.secondary.command, 1);
+
+// 2. Insert a document.
+diff = getOpCountersDiff(() => {
+ assert.writeOK(primaryColl.insert({_id: 0}, {writeConcern: {w: 2}}));
+});
+assert.eq(diff.primary.insert, 1);
+assert.eq(diff.secondary.insert, 1);
+
+// 3. Update a document.
+diff = getOpCountersDiff(() => {
+ assert.writeOK(primaryColl.update({_id: 0}, {$set: {a: 1}}, {writeConcern: {w: 2}}));
+});
+assert.eq(diff.primary.update, 1);
+assert.eq(diff.secondary.update, 1);
+
+// 4. Delete a document.
+diff = getOpCountersDiff(() => {
+ assert.writeOK(primaryColl.remove({_id: 0}, {writeConcern: {w: 2}}));
+});
+assert.eq(diff.primary.delete, 1);
+assert.eq(diff.secondary.delete, 1);
+
+// 5. Atomic insert operation via applyOps cmd.
+diff = getOpCountersDiff(() => {
+ assert.commandWorked(primaryColl.runCommand(
+ {applyOps: [{op: "i", ns: collNs, o: {_id: 1}}], writeConcern: {w: 2}}));
+});
+// On primary, the command counter accounts for applyOps command and for other internal
+// commands like replSetUpdatePosition, replSetHeartbeat, serverStatus, etc.
+assert.gte(diff.primary.command, 1);
+assert.eq(diff.secondary.command, 0);
+assert.eq(diff.primary.insert, 1);
+assert.eq(diff.secondary.insert, 1);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/operation_time_read_and_write_concern.js b/jstests/replsets/operation_time_read_and_write_concern.js
index c1661db1d7e..59f6649ce59 100644
--- a/jstests/replsets/operation_time_read_and_write_concern.js
+++ b/jstests/replsets/operation_time_read_and_write_concern.js
@@ -4,124 +4,123 @@
* @tags: [requires_majority_read_concern]
*/
(function() {
- "use strict";
+"use strict";
- // Skip db hash check because replication is stopped on secondaries.
- TestData.skipCheckDBHashes = true;
+// Skip db hash check because replication is stopped on secondaries.
+TestData.skipCheckDBHashes = true;
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries,
- // restartReplicationOnSecondaries
- var name = "operation_time_read_and_write_concern";
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries,
+ // restartReplicationOnSecondaries
+var name = "operation_time_read_and_write_concern";
- var replTest = new ReplSetTest(
- {name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ""}, waitForKeys: true});
+var replTest = new ReplSetTest(
+ {name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ""}, waitForKeys: true});
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- replTest.stopSet();
- return;
- }
- replTest.initiate();
-
- var res;
- var testDB = replTest.getPrimary().getDB(name);
- var collectionName = "foo";
-
- // readConcern level majority:
- // operationTime is the cluster time of the last committed op in the oplog.
- jsTestLog("Testing operationTime for readConcern level majority with afterClusterTime.");
- var majorityDoc = {_id: 10, x: 1};
- var localDoc = {_id: 15, x: 2};
-
- res = assert.commandWorked(testDB.runCommand(
- {insert: collectionName, documents: [majorityDoc], writeConcern: {w: "majority"}}));
- var majorityWriteOperationTime = res.operationTime;
-
- stopReplicationOnSecondaries(replTest);
-
- res = assert.commandWorked(
- testDB.runCommand({insert: collectionName, documents: [localDoc], writeConcern: {w: 1}}));
- var localWriteOperationTime = res.operationTime;
-
- assert.gt(localWriteOperationTime, majorityWriteOperationTime);
-
- res = assert.commandWorked(testDB.runCommand({
- find: collectionName,
- readConcern: {level: "majority", afterClusterTime: majorityWriteOperationTime}
- }));
- var majorityReadOperationTime = res.operationTime;
-
- assert.eq(res.cursor.firstBatch,
- [majorityDoc],
- "only the committed document, " + tojson(majorityDoc) +
- ", should be returned for the majority read with afterClusterTime: " +
- majorityWriteOperationTime);
- assert.eq(majorityReadOperationTime,
- majorityWriteOperationTime,
- "the operationTime of the majority read, " + majorityReadOperationTime +
- ", should be the cluster time of the last committed op in the oplog, " +
- majorityWriteOperationTime);
-
- // Validate that after replication, the local write data is now returned by the same query.
- restartReplicationOnSecondaries(replTest);
- replTest.awaitLastOpCommitted();
-
- res = assert.commandWorked(testDB.runCommand({
- find: collectionName,
- sort: {_id: 1}, // So the order of the documents is defined for testing.
- readConcern: {level: "majority", afterClusterTime: majorityWriteOperationTime}
- }));
- var secondMajorityReadOperationTime = res.operationTime;
-
- assert.eq(res.cursor.firstBatch,
- [majorityDoc, localDoc],
- "expected both inserted documents, " + tojson([majorityDoc, localDoc]) +
- ", to be returned for the second majority read with afterClusterTime: " +
- majorityWriteOperationTime);
- assert.eq(secondMajorityReadOperationTime,
- localWriteOperationTime,
- "the operationTime of the second majority read, " + secondMajorityReadOperationTime +
- ", should be the cluster time of the replicated local write, " +
- localWriteOperationTime);
-
- // readConcern level linearizable is not currently supported.
- jsTestLog("Verifying readConcern linearizable with afterClusterTime is not supported.");
- res = assert.commandFailedWithCode(
- testDB.runCommand({
- find: collectionName,
- filter: localDoc,
- readConcern: {level: "linearizable", afterClusterTime: majorityReadOperationTime}
- }),
- ErrorCodes.InvalidOptions,
- "linearizable reads with afterClusterTime are not supported and should not be allowed");
-
- // writeConcern level majority:
- // operationTime is the cluster time of the write if it succeeds, or of the previous successful
- // write at the time the write was determined to have failed, or a no-op.
- jsTestLog("Testing operationTime for writeConcern level majority.");
- var successfulDoc = {_id: 1000, y: 1};
- var failedDoc = {_id: 1000, y: 2};
-
- res = assert.commandWorked(testDB.runCommand(
- {insert: collectionName, documents: [successfulDoc], writeConcern: {w: "majority"}}));
- var majorityWriteOperationTime = res.operationTime;
-
- stopReplicationOnSecondaries(replTest);
-
- res = testDB.runCommand({
- insert: collectionName,
- documents: [failedDoc],
- writeConcern: {w: "majority", wtimeout: 1000}
- });
- assert.eq(res.writeErrors[0].code, ErrorCodes.DuplicateKey);
- var failedWriteOperationTime = res.operationTime;
-
- assert.eq(
- failedWriteOperationTime,
- majorityWriteOperationTime,
- "the operationTime of the failed majority write, " + failedWriteOperationTime +
- ", should be the cluster time of the last successful write at the time it failed, " +
- majorityWriteOperationTime);
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
replTest.stopSet();
+ return;
+}
+replTest.initiate();
+
+var res;
+var testDB = replTest.getPrimary().getDB(name);
+var collectionName = "foo";
+
+// readConcern level majority:
+// operationTime is the cluster time of the last committed op in the oplog.
+jsTestLog("Testing operationTime for readConcern level majority with afterClusterTime.");
+var majorityDoc = {_id: 10, x: 1};
+var localDoc = {_id: 15, x: 2};
+
+res = assert.commandWorked(testDB.runCommand(
+ {insert: collectionName, documents: [majorityDoc], writeConcern: {w: "majority"}}));
+var majorityWriteOperationTime = res.operationTime;
+
+stopReplicationOnSecondaries(replTest);
+
+res = assert.commandWorked(
+ testDB.runCommand({insert: collectionName, documents: [localDoc], writeConcern: {w: 1}}));
+var localWriteOperationTime = res.operationTime;
+
+assert.gt(localWriteOperationTime, majorityWriteOperationTime);
+
+res = assert.commandWorked(testDB.runCommand({
+ find: collectionName,
+ readConcern: {level: "majority", afterClusterTime: majorityWriteOperationTime}
+}));
+var majorityReadOperationTime = res.operationTime;
+
+assert.eq(res.cursor.firstBatch,
+ [majorityDoc],
+ "only the committed document, " + tojson(majorityDoc) +
+ ", should be returned for the majority read with afterClusterTime: " +
+ majorityWriteOperationTime);
+assert.eq(majorityReadOperationTime,
+ majorityWriteOperationTime,
+ "the operationTime of the majority read, " + majorityReadOperationTime +
+ ", should be the cluster time of the last committed op in the oplog, " +
+ majorityWriteOperationTime);
+
+// Validate that after replication, the local write data is now returned by the same query.
+restartReplicationOnSecondaries(replTest);
+replTest.awaitLastOpCommitted();
+
+res = assert.commandWorked(testDB.runCommand({
+ find: collectionName,
+ sort: {_id: 1}, // So the order of the documents is defined for testing.
+ readConcern: {level: "majority", afterClusterTime: majorityWriteOperationTime}
+}));
+var secondMajorityReadOperationTime = res.operationTime;
+
+assert.eq(res.cursor.firstBatch,
+ [majorityDoc, localDoc],
+ "expected both inserted documents, " + tojson([majorityDoc, localDoc]) +
+ ", to be returned for the second majority read with afterClusterTime: " +
+ majorityWriteOperationTime);
+assert.eq(secondMajorityReadOperationTime,
+ localWriteOperationTime,
+ "the operationTime of the second majority read, " + secondMajorityReadOperationTime +
+ ", should be the cluster time of the replicated local write, " +
+ localWriteOperationTime);
+
+// readConcern level linearizable is not currently supported.
+jsTestLog("Verifying readConcern linearizable with afterClusterTime is not supported.");
+res = assert.commandFailedWithCode(
+ testDB.runCommand({
+ find: collectionName,
+ filter: localDoc,
+ readConcern: {level: "linearizable", afterClusterTime: majorityReadOperationTime}
+ }),
+ ErrorCodes.InvalidOptions,
+ "linearizable reads with afterClusterTime are not supported and should not be allowed");
+
+// writeConcern level majority:
+// operationTime is the cluster time of the write if it succeeds, or of the previous successful
+// write at the time the write was determined to have failed, or a no-op.
+jsTestLog("Testing operationTime for writeConcern level majority.");
+var successfulDoc = {_id: 1000, y: 1};
+var failedDoc = {_id: 1000, y: 2};
+
+res = assert.commandWorked(testDB.runCommand(
+ {insert: collectionName, documents: [successfulDoc], writeConcern: {w: "majority"}}));
+var majorityWriteOperationTime = res.operationTime;
+
+stopReplicationOnSecondaries(replTest);
+
+res = testDB.runCommand({
+ insert: collectionName,
+ documents: [failedDoc],
+ writeConcern: {w: "majority", wtimeout: 1000}
+});
+assert.eq(res.writeErrors[0].code, ErrorCodes.DuplicateKey);
+var failedWriteOperationTime = res.operationTime;
+
+assert.eq(failedWriteOperationTime,
+ majorityWriteOperationTime,
+ "the operationTime of the failed majority write, " + failedWriteOperationTime +
+ ", should be the cluster time of the last successful write at the time it failed, " +
+ majorityWriteOperationTime);
+replTest.stopSet();
})();
diff --git a/jstests/replsets/oplog_format_create_indexes.js b/jstests/replsets/oplog_format_create_indexes.js
index 0697df44b07..117d5be04bb 100644
--- a/jstests/replsets/oplog_format_create_indexes.js
+++ b/jstests/replsets/oplog_format_create_indexes.js
@@ -3,78 +3,78 @@
* creation.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/get_index_helpers.js");
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
+const primary = rst.getPrimary();
- const testDB = primary.getDB("test");
- const oplogColl = primary.getDB("local").oplog.rs;
+const testDB = primary.getDB("test");
+const oplogColl = primary.getDB("local").oplog.rs;
- function testOplogEntryContainsIndexInfoObj(coll, keyPattern, indexOptions) {
- assert.commandWorked(coll.createIndex(keyPattern, indexOptions));
- const allIndexes = coll.getIndexes();
- const indexSpec = GetIndexHelpers.findByKeyPattern(allIndexes, keyPattern);
+function testOplogEntryContainsIndexInfoObj(coll, keyPattern, indexOptions) {
+ assert.commandWorked(coll.createIndex(keyPattern, indexOptions));
+ const allIndexes = coll.getIndexes();
+ const indexSpec = GetIndexHelpers.findByKeyPattern(allIndexes, keyPattern);
- assert.neq(
- null,
- indexSpec,
- "Index with key pattern " + tojson(keyPattern) + " not found: " + tojson(allIndexes));
+ assert.neq(
+ null,
+ indexSpec,
+ "Index with key pattern " + tojson(keyPattern) + " not found: " + tojson(allIndexes));
- // Find the createIndexes command entries.
- const indexCreationOplogQuery = {
- op: "c",
- ns: testDB.getName() + ".$cmd", "o.createIndexes": coll.getName()
- };
+ // Find the createIndexes command entries.
+ const indexCreationOplogQuery = {
+ op: "c",
+ ns: testDB.getName() + ".$cmd",
+ "o.createIndexes": coll.getName()
+ };
- const allOplogEntries = oplogColl.find(indexCreationOplogQuery).toArray();
+ const allOplogEntries = oplogColl.find(indexCreationOplogQuery).toArray();
- // Preserve the JSON version of the originals, as we're going to delete fields.
- const allOplogEntriesJson = tojson(allOplogEntries);
- const indexSpecJson = tojson(indexSpec);
+ // Preserve the JSON version of the originals, as we're going to delete fields.
+ const allOplogEntriesJson = tojson(allOplogEntries);
+ const indexSpecJson = tojson(indexSpec);
- // Because of differences between the new and old oplog entries for createIndexes,
- // treat the namespace part separately and compare entries without ns field.
- const indexSpecNs = indexSpec.ns;
- delete indexSpec.ns;
- const found = allOplogEntries.filter((entry) => {
- const entryNs = entry.o.ns || testDB.getName() + "." + entry.o.createIndexes;
- const entrySpec = entry.o;
- delete entrySpec.ns;
- delete entrySpec.createIndexes;
- return indexSpecNs === entryNs && bsonWoCompare(indexSpec, entrySpec) === 0;
- });
- assert.eq(1,
- found.length,
- "Failed to find full index specification " + indexSpecJson +
- " in any oplog entry from index creation: " + allOplogEntriesJson);
+ // Because of differences between the new and old oplog entries for createIndexes,
+ // treat the namespace part separately and compare entries without ns field.
+ const indexSpecNs = indexSpec.ns;
+ delete indexSpec.ns;
+ const found = allOplogEntries.filter((entry) => {
+ const entryNs = entry.o.ns || testDB.getName() + "." + entry.o.createIndexes;
+ const entrySpec = entry.o;
+ delete entrySpec.ns;
+ delete entrySpec.createIndexes;
+ return indexSpecNs === entryNs && bsonWoCompare(indexSpec, entrySpec) === 0;
+ });
+ assert.eq(1,
+ found.length,
+ "Failed to find full index specification " + indexSpecJson +
+ " in any oplog entry from index creation: " + allOplogEntriesJson);
- assert.commandWorked(coll.dropIndex(keyPattern));
- }
+ assert.commandWorked(coll.dropIndex(keyPattern));
+}
- // Test that options both explicitly included in the command and implicitly filled in with
- // defaults by the server are serialized into the corresponding oplog entry.
- testOplogEntryContainsIndexInfoObj(testDB.oplog_format, {withoutAnyOptions: 1});
- testOplogEntryContainsIndexInfoObj(testDB.oplog_format, {withV1: 1}, {v: 1});
- testOplogEntryContainsIndexInfoObj(testDB.oplog_format,
- {partialIndex: 1},
- {partialFilterExpression: {field: {$exists: true}}});
+// Test that options both explicitly included in the command and implicitly filled in with
+// defaults by the server are serialized into the corresponding oplog entry.
+testOplogEntryContainsIndexInfoObj(testDB.oplog_format, {withoutAnyOptions: 1});
+testOplogEntryContainsIndexInfoObj(testDB.oplog_format, {withV1: 1}, {v: 1});
+testOplogEntryContainsIndexInfoObj(
+ testDB.oplog_format, {partialIndex: 1}, {partialFilterExpression: {field: {$exists: true}}});
- // Test that the representation of an index's collation in the oplog on a collection with a
- // non-simple default collation exactly matches that of the index's full specification.
- assert.commandWorked(
- testDB.runCommand({create: "oplog_format_collation", collation: {locale: "fr"}}));
- testOplogEntryContainsIndexInfoObj(testDB.oplog_format_collation, {withDefaultCollation: 1});
- testOplogEntryContainsIndexInfoObj(
- testDB.oplog_format_collation, {withNonDefaultCollation: 1}, {collation: {locale: "en"}});
- testOplogEntryContainsIndexInfoObj(testDB.oplog_format_collation, {withV1: 1}, {v: 1});
- testOplogEntryContainsIndexInfoObj(
- testDB.oplog_format_collation, {withSimpleCollation: 1}, {collation: {locale: "simple"}});
+// Test that the representation of an index's collation in the oplog on a collection with a
+// non-simple default collation exactly matches that of the index's full specification.
+assert.commandWorked(
+ testDB.runCommand({create: "oplog_format_collation", collation: {locale: "fr"}}));
+testOplogEntryContainsIndexInfoObj(testDB.oplog_format_collation, {withDefaultCollation: 1});
+testOplogEntryContainsIndexInfoObj(
+ testDB.oplog_format_collation, {withNonDefaultCollation: 1}, {collation: {locale: "en"}});
+testOplogEntryContainsIndexInfoObj(testDB.oplog_format_collation, {withV1: 1}, {v: 1});
+testOplogEntryContainsIndexInfoObj(
+ testDB.oplog_format_collation, {withSimpleCollation: 1}, {collation: {locale: "simple"}});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/oplog_replay_on_startup_with_bad_op.js b/jstests/replsets/oplog_replay_on_startup_with_bad_op.js
index ee88b7a4a98..d9d3afca25d 100644
--- a/jstests/replsets/oplog_replay_on_startup_with_bad_op.js
+++ b/jstests/replsets/oplog_replay_on_startup_with_bad_op.js
@@ -4,62 +4,62 @@
//
// @tags: [requires_persistence]
(function() {
- "use strict";
+"use strict";
- var rst = new ReplSetTest({
- nodes: 1,
- });
+var rst = new ReplSetTest({
+ nodes: 1,
+});
- rst.startSet();
- rst.initiate();
+rst.startSet();
+rst.initiate();
- var conn = rst.getPrimary(); // Waits for PRIMARY state.
- conn = rst.restart(0, {noReplSet: true}); // Restart as a standalone node.
- assert.neq(null, conn, "failed to restart");
+var conn = rst.getPrimary(); // Waits for PRIMARY state.
+conn = rst.restart(0, {noReplSet: true}); // Restart as a standalone node.
+assert.neq(null, conn, "failed to restart");
- var oplog = conn.getCollection('local.oplog.rs');
- var lastOplogDoc = conn.getCollection('local.oplog.rs').find().sort({$natural: -1}).limit(1)[0];
- var lastTs = lastOplogDoc.ts;
- var newTs = Timestamp(lastTs.t + 1, 1);
- var term = lastOplogDoc.t;
+var oplog = conn.getCollection('local.oplog.rs');
+var lastOplogDoc = conn.getCollection('local.oplog.rs').find().sort({$natural: -1}).limit(1)[0];
+var lastTs = lastOplogDoc.ts;
+var newTs = Timestamp(lastTs.t + 1, 1);
+var term = lastOplogDoc.t;
- assert.writeOK(oplog.insert({
- ts: newTs,
- t: term,
- h: 1,
- op: 'c',
- ns: 'somedb.$cmd',
- o: {thereIsNoCommandWithThisName: 1},
- }));
-
- var injectedMinValidDoc = {
- _id: ObjectId(),
+assert.writeOK(oplog.insert({
+ ts: newTs,
+ t: term,
+ h: 1,
+ op: 'c',
+ ns: 'somedb.$cmd',
+ o: {thereIsNoCommandWithThisName: 1},
+}));
- // appliedThrough
- begin: {
- ts: lastTs,
- t: term,
- },
+var injectedMinValidDoc = {
+ _id: ObjectId(),
- // minvalid:
+ // appliedThrough
+ begin: {
+ ts: lastTs,
t: term,
- ts: newTs,
- };
+ },
+
+ // minvalid:
+ t: term,
+ ts: newTs,
+};
- // This weird mechanism is the only way to bypass mongod's attempt to fill in null
- // Timestamps.
- var minValidColl = conn.getCollection('local.replset.minvalid');
- assert.writeOK(minValidColl.remove({}));
- assert.writeOK(minValidColl.update({}, {$set: injectedMinValidDoc}, {upsert: true}));
- assert.eq(minValidColl.findOne(),
- injectedMinValidDoc,
- "If the Timestamps differ, the server may be filling in the null timestamps");
+// This weird mechanism is the only way to bypass mongod's attempt to fill in null
+// Timestamps.
+var minValidColl = conn.getCollection('local.replset.minvalid');
+assert.writeOK(minValidColl.remove({}));
+assert.writeOK(minValidColl.update({}, {$set: injectedMinValidDoc}, {upsert: true}));
+assert.eq(minValidColl.findOne(),
+ injectedMinValidDoc,
+ "If the Timestamps differ, the server may be filling in the null timestamps");
- assert.throws(() => rst.restart(0)); // Restart in replSet mode again.
+assert.throws(() => rst.restart(0)); // Restart in replSet mode again.
- // fassert() calls std::abort(), which returns a different exit code for Windows vs. other
- // platforms.
- const exitCode = _isWindows() ? MongoRunner.EXIT_ABRUPT : MongoRunner.EXIT_ABORT;
- rst.stop(0, undefined, {allowedExitCode: exitCode});
- rst.stopSet();
+// fassert() calls std::abort(), which returns a different exit code for Windows vs. other
+// platforms.
+const exitCode = _isWindows() ? MongoRunner.EXIT_ABRUPT : MongoRunner.EXIT_ABORT;
+rst.stop(0, undefined, {allowedExitCode: exitCode});
+rst.stopSet();
})();
diff --git a/jstests/replsets/oplog_rollover.js b/jstests/replsets/oplog_rollover.js
index b16f54cf539..b768108fca6 100644
--- a/jstests/replsets/oplog_rollover.js
+++ b/jstests/replsets/oplog_rollover.js
@@ -1,132 +1,129 @@
/**
* Test that oplog (on both primary and secondary) rolls over when its size exceeds the configured
* maximum. This test runs on wiredTiger storage engine and inMemory storage engine (if available).
-*/
+ */
(function() {
- "use strict";
+"use strict";
- function doTest(storageEngine) {
- jsTestLog("Testing with storageEngine: " + storageEngine);
+function doTest(storageEngine) {
+ jsTestLog("Testing with storageEngine: " + storageEngine);
- const replSet = new ReplSetTest({
- // Set the syncdelay to 1s to speed up checkpointing.
- nodeOptions: {syncdelay: 1},
- nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
- });
- // Set max oplog size to 1MB.
- replSet.startSet({storageEngine: storageEngine, oplogSize: 1});
- replSet.initiate();
+ const replSet = new ReplSetTest({
+ // Set the syncdelay to 1s to speed up checkpointing.
+ nodeOptions: {syncdelay: 1},
+ nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
+ });
+ // Set max oplog size to 1MB.
+ replSet.startSet({storageEngine: storageEngine, oplogSize: 1});
+ replSet.initiate();
- const primary = replSet.getPrimary();
- const primaryOplog = primary.getDB("local").oplog.rs;
- const secondary = replSet.getSecondary();
- const secondaryOplog = secondary.getDB("local").oplog.rs;
+ const primary = replSet.getPrimary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ const secondary = replSet.getSecondary();
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
- const coll = primary.getDB("test").foo;
- // 400KB each so that oplog can keep at most two insert oplog entries.
- const longString = new Array(400 * 1024).join("a");
+ const coll = primary.getDB("test").foo;
+ // 400KB each so that oplog can keep at most two insert oplog entries.
+ const longString = new Array(400 * 1024).join("a");
- function numInsertOplogEntry(oplog) {
- return oplog.find({op: "i", "ns": "test.foo"}).itcount();
- }
-
- // Insert the first document.
- assert.commandWorked(coll.insert({_id: 0, longString: longString}, {writeConcern: {w: 2}}));
- // Test that oplog entry of the first insert exists on both primary and secondary.
- assert.eq(1, numInsertOplogEntry(primaryOplog));
- assert.eq(1, numInsertOplogEntry(secondaryOplog));
-
- // Insert the second document.
- const secondInsertTimestamp =
- assert
- .commandWorked(coll.runCommand(
- "insert",
- {documents: [{_id: 1, longString: longString}], writeConcern: {w: 2}}))
- .operationTime;
- // Test that oplog entries of both inserts exist on both primary and secondary.
- assert.eq(2, numInsertOplogEntry(primaryOplog));
- assert.eq(2, numInsertOplogEntry(secondaryOplog));
+ function numInsertOplogEntry(oplog) {
+ return oplog.find({op: "i", "ns": "test.foo"}).itcount();
+ }
- // Have a more fine-grained test for enableMajorityReadConcern=true to also test oplog
- // truncation happens at the time we expect it to happen. When
- // enableMajorityReadConcern=false the lastStableRecoveryTimestamp is not available, so
- // switch to a coarser-grained mode to only test that oplog truncation will eventually
- // happen when oplog size exceeds the configured maximum.
- if (primary.getDB('admin').serverStatus().storageEngine.supportsCommittedReads) {
- // Wait for checkpointing/stable timestamp to catch up with the second insert so oplog
- // entry of the first insert is allowed to be deleted by the oplog truncater thread when
- // a new oplog stone is created. "inMemory" WT engine does not run checkpoint thread and
- // lastStableRecoveryTimestamp is the stable timestamp in this case.
- assert.soon(
- () => {
- const primaryTimestamp =
- assert.commandWorked(primary.adminCommand({replSetGetStatus: 1}))
- .lastStableRecoveryTimestamp;
- const secondaryTimestamp =
- assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}))
- .lastStableRecoveryTimestamp;
- if (primaryTimestamp >= secondInsertTimestamp &&
- secondaryTimestamp >= secondInsertTimestamp) {
- return true;
- } else {
- jsTestLog(
- "Awaiting last stable recovery timestamp " +
- `(primary: ${primaryTimestamp}, secondary: ${secondaryTimestamp}) ` +
- `target: ${secondInsertTimestamp}`);
- return false;
- }
- },
- "Timeout waiting for checkpointing to catch up with the second insert",
- ReplSetTest.kDefaultTimeoutMS,
- 2000);
+ // Insert the first document.
+ assert.commandWorked(coll.insert({_id: 0, longString: longString}, {writeConcern: {w: 2}}));
+ // Test that oplog entry of the first insert exists on both primary and secondary.
+ assert.eq(1, numInsertOplogEntry(primaryOplog));
+ assert.eq(1, numInsertOplogEntry(secondaryOplog));
- // Insert the third document which will trigger a new oplog stone to be created. The
- // oplog truncater thread will then be unblocked on the creation of the new oplog stone
- // and will start truncating oplog entries. The oplog entry for the first insert will be
- // truncated after the oplog truncater thread finishes.
- assert.commandWorked(
- coll.insert({_id: 2, longString: longString}, {writeConcern: {w: 2}}));
+ // Insert the second document.
+ const secondInsertTimestamp =
+ assert
+ .commandWorked(coll.runCommand(
+ "insert", {documents: [{_id: 1, longString: longString}], writeConcern: {w: 2}}))
+ .operationTime;
+ // Test that oplog entries of both inserts exist on both primary and secondary.
+ assert.eq(2, numInsertOplogEntry(primaryOplog));
+ assert.eq(2, numInsertOplogEntry(secondaryOplog));
- // Test that oplog entry of the initial insert rolls over on both primary and secondary.
- // Use assert.soon to wait for oplog truncater thread to run.
- assert.soon(() => {
- return numInsertOplogEntry(primaryOplog) === 2;
- }, "Timeout waiting for oplog to roll over on primary");
- assert.soon(() => {
- return numInsertOplogEntry(secondaryOplog) === 2;
- }, "Timeout waiting for oplog to roll over on secondary");
- } else {
- // Only test that oplog truncation will eventually happen.
- let numInserted = 2;
- assert.soon(function() {
- // Insert more documents.
- assert.commandWorked(coll.insert({_id: numInserted++, longString: longString},
- {writeConcern: {w: 2}}));
- const numInsertOplogEntryPrimary = numInsertOplogEntry(primaryOplog);
- const numInsertOplogEntrySecondary = numInsertOplogEntry(secondaryOplog);
- // Oplog has been truncated if the number of insert oplog entries is less than
- // number of inserted.
- if (numInsertOplogEntryPrimary < numInserted &&
- numInsertOplogEntrySecondary < numInserted)
+ // Have a more fine-grained test for enableMajorityReadConcern=true to also test oplog
+ // truncation happens at the time we expect it to happen. When
+ // enableMajorityReadConcern=false the lastStableRecoveryTimestamp is not available, so
+ // switch to a coarser-grained mode to only test that oplog truncation will eventually
+ // happen when oplog size exceeds the configured maximum.
+ if (primary.getDB('admin').serverStatus().storageEngine.supportsCommittedReads) {
+ // Wait for checkpointing/stable timestamp to catch up with the second insert so oplog
+ // entry of the first insert is allowed to be deleted by the oplog truncater thread when
+ // a new oplog stone is created. "inMemory" WT engine does not run checkpoint thread and
+ // lastStableRecoveryTimestamp is the stable timestamp in this case.
+ assert.soon(
+ () => {
+ const primaryTimestamp =
+ assert.commandWorked(primary.adminCommand({replSetGetStatus: 1}))
+ .lastStableRecoveryTimestamp;
+ const secondaryTimestamp =
+ assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}))
+ .lastStableRecoveryTimestamp;
+ if (primaryTimestamp >= secondInsertTimestamp &&
+ secondaryTimestamp >= secondInsertTimestamp) {
return true;
- jsTestLog("Awaiting oplog truncation: number of oplog entries: " +
- `(primary: ${numInsertOplogEntryPrimary}, ` +
- `secondary: ${numInsertOplogEntrySecondary}) ` +
- `number inserted: ${numInserted}`);
- return false;
- }, "Timeout waiting for oplog to roll over", ReplSetTest.kDefaultTimeoutMS, 1000);
- }
+ } else {
+ jsTestLog("Awaiting last stable recovery timestamp " +
+ `(primary: ${primaryTimestamp}, secondary: ${secondaryTimestamp}) ` +
+ `target: ${secondInsertTimestamp}`);
+ return false;
+ }
+ },
+ "Timeout waiting for checkpointing to catch up with the second insert",
+ ReplSetTest.kDefaultTimeoutMS,
+ 2000);
- replSet.stopSet();
+ // Insert the third document which will trigger a new oplog stone to be created. The
+ // oplog truncater thread will then be unblocked on the creation of the new oplog stone
+ // and will start truncating oplog entries. The oplog entry for the first insert will be
+ // truncated after the oplog truncater thread finishes.
+ assert.commandWorked(coll.insert({_id: 2, longString: longString}, {writeConcern: {w: 2}}));
+
+ // Test that oplog entry of the initial insert rolls over on both primary and secondary.
+ // Use assert.soon to wait for oplog truncater thread to run.
+ assert.soon(() => {
+ return numInsertOplogEntry(primaryOplog) === 2;
+ }, "Timeout waiting for oplog to roll over on primary");
+ assert.soon(() => {
+ return numInsertOplogEntry(secondaryOplog) === 2;
+ }, "Timeout waiting for oplog to roll over on secondary");
+ } else {
+ // Only test that oplog truncation will eventually happen.
+ let numInserted = 2;
+ assert.soon(function() {
+ // Insert more documents.
+ assert.commandWorked(
+ coll.insert({_id: numInserted++, longString: longString}, {writeConcern: {w: 2}}));
+ const numInsertOplogEntryPrimary = numInsertOplogEntry(primaryOplog);
+ const numInsertOplogEntrySecondary = numInsertOplogEntry(secondaryOplog);
+ // Oplog has been truncated if the number of insert oplog entries is less than
+ // number of inserted.
+ if (numInsertOplogEntryPrimary < numInserted &&
+ numInsertOplogEntrySecondary < numInserted)
+ return true;
+ jsTestLog("Awaiting oplog truncation: number of oplog entries: " +
+ `(primary: ${numInsertOplogEntryPrimary}, ` +
+ `secondary: ${numInsertOplogEntrySecondary}) ` +
+ `number inserted: ${numInserted}`);
+ return false;
+ }, "Timeout waiting for oplog to roll over", ReplSetTest.kDefaultTimeoutMS, 1000);
}
- doTest("wiredTiger");
+ replSet.stopSet();
+}
- if (jsTest.options().storageEngine !== "inMemory") {
- jsTestLog(
- "Skipping inMemory test because inMemory storageEngine was not compiled into the server.");
- return;
- }
+doTest("wiredTiger");
+
+if (jsTest.options().storageEngine !== "inMemory") {
+ jsTestLog(
+ "Skipping inMemory test because inMemory storageEngine was not compiled into the server.");
+ return;
+}
- doTest("inMemory");
+doTest("inMemory");
})();
diff --git a/jstests/replsets/oplog_term.js b/jstests/replsets/oplog_term.js
index 93b650ab637..f21e01f4a98 100644
--- a/jstests/replsets/oplog_term.js
+++ b/jstests/replsets/oplog_term.js
@@ -1,34 +1,33 @@
// Term counter should be present in oplog entries under protocol version 1.
(function() {
- 'use strict';
- load('jstests/replsets/rslib.js');
+'use strict';
+load('jstests/replsets/rslib.js');
- var name = 'oplog_term';
- var replSet = new ReplSetTest({name: name, nodes: 1});
- replSet.startSet();
- replSet.initiate();
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 5 * 1000);
+var name = 'oplog_term';
+var replSet = new ReplSetTest({name: name, nodes: 1});
+replSet.startSet();
+replSet.initiate();
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 5 * 1000);
- // Default protocol version is 1 - 'term' field should present in oplog entry.
- var primary = replSet.getPrimary();
- var collection = primary.getDB('test').getCollection(name);
- assert.writeOK(collection.save({_id: 1}));
+// Default protocol version is 1 - 'term' field should present in oplog entry.
+var primary = replSet.getPrimary();
+var collection = primary.getDB('test').getCollection(name);
+assert.writeOK(collection.save({_id: 1}));
- var oplogEntry = getLatestOp(primary);
- assert(oplogEntry, 'unexpected empty oplog');
- assert.eq(collection.getFullName(),
- oplogEntry.ns,
- 'unexpected namespace in oplog entry: ' + tojson(oplogEntry));
- assert.eq(
- 1,
- oplogEntry.o._id,
- 'oplog entry does not refer to most recently inserted document: ' + tojson(oplogEntry));
- assert(oplogEntry.hasOwnProperty('t'), 'oplog entry must contain term: ' + tojson(oplogEntry));
+var oplogEntry = getLatestOp(primary);
+assert(oplogEntry, 'unexpected empty oplog');
+assert.eq(collection.getFullName(),
+ oplogEntry.ns,
+ 'unexpected namespace in oplog entry: ' + tojson(oplogEntry));
+assert.eq(1,
+ oplogEntry.o._id,
+ 'oplog entry does not refer to most recently inserted document: ' + tojson(oplogEntry));
+assert(oplogEntry.hasOwnProperty('t'), 'oplog entry must contain term: ' + tojson(oplogEntry));
- var status = assert.commandWorked(primary.adminCommand({replSetGetStatus: 1}));
- assert.eq(status.term,
- oplogEntry.t,
- 'term in oplog entry does not match term in status: ' + tojson(oplogEntry));
+var status = assert.commandWorked(primary.adminCommand({replSetGetStatus: 1}));
+assert.eq(status.term,
+ oplogEntry.t,
+ 'term in oplog entry does not match term in status: ' + tojson(oplogEntry));
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/oplog_visibility.js b/jstests/replsets/oplog_visibility.js
index ccdcf5c6d93..2c3c49f839e 100644
--- a/jstests/replsets/oplog_visibility.js
+++ b/jstests/replsets/oplog_visibility.js
@@ -6,122 +6,122 @@
* @tags: [requires_document_locking]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/parallelTester.js"); // for ScopedThread.
+load("jstests/libs/parallelTester.js"); // for ScopedThread.
- const replTest = new ReplSetTest({
- name: "oplog_visibility",
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: true}
- });
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({
+ name: "oplog_visibility",
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: true}
+});
+replTest.startSet();
+replTest.initiate();
- jsTestLog("Enabling `sleepBeforeCommit` failpoint.");
- for (let node of replTest.nodes) {
- assert.commandWorked(node.adminCommand(
- {configureFailPoint: "sleepBeforeCommit", mode: {activationProbability: 0.01}}));
- }
+jsTestLog("Enabling `sleepBeforeCommit` failpoint.");
+for (let node of replTest.nodes) {
+ assert.commandWorked(node.adminCommand(
+ {configureFailPoint: "sleepBeforeCommit", mode: {activationProbability: 0.01}}));
+}
- jsTestLog("Starting concurrent writers.");
- let stopLatch = new CountDownLatch(1);
- let writers = [];
- for (let idx = 0; idx < 2; ++idx) {
- let coll = "coll_" + idx;
- let writer = new ScopedThread(function(host, coll, stopLatch) {
- const conn = new Mongo(host);
- let id = 0;
+jsTestLog("Starting concurrent writers.");
+let stopLatch = new CountDownLatch(1);
+let writers = [];
+for (let idx = 0; idx < 2; ++idx) {
+ let coll = "coll_" + idx;
+ let writer = new ScopedThread(function(host, coll, stopLatch) {
+ const conn = new Mongo(host);
+ let id = 0;
- // Cap the amount of data being inserted to avoid rolling over a 10MiB oplog. It takes
- // ~70,000 "basic" ~150 byte oplog documents to fill a 10MiB oplog. Note this number is
- // for each of two writer threads.
- const maxDocsToInsert = 20 * 1000;
- while (stopLatch.getCount() > 0 && id < maxDocsToInsert) {
- conn.getDB("test").getCollection(coll).insert({_id: id});
- id++;
- }
- jsTestLog({"NumDocsWritten": id});
- }, replTest.getPrimary().host, coll, stopLatch);
+ // Cap the amount of data being inserted to avoid rolling over a 10MiB oplog. It takes
+ // ~70,000 "basic" ~150 byte oplog documents to fill a 10MiB oplog. Note this number is
+ // for each of two writer threads.
+ const maxDocsToInsert = 20 * 1000;
+ while (stopLatch.getCount() > 0 && id < maxDocsToInsert) {
+ conn.getDB("test").getCollection(coll).insert({_id: id});
+ id++;
+ }
+ jsTestLog({"NumDocsWritten": id});
+ }, replTest.getPrimary().host, coll, stopLatch);
- writer.start();
- writers.push(writer);
- }
+ writer.start();
+ writers.push(writer);
+}
- for (let node of replTest.nodes) {
- let testOplog = function(node) {
- let timestamps = [];
+for (let node of replTest.nodes) {
+ let testOplog = function(node) {
+ let timestamps = [];
- let local = node.getDB("local");
- let oplogStart =
- local.getCollection("oplog.rs").find().sort({$natural: -1}).limit(-1).next()["ts"];
- jsTestLog({"Node": node.host, "StartTs": oplogStart});
+ let local = node.getDB("local");
+ let oplogStart =
+ local.getCollection("oplog.rs").find().sort({$natural: -1}).limit(-1).next()["ts"];
+ jsTestLog({"Node": node.host, "StartTs": oplogStart});
- while (timestamps.length < 1000) {
- // Query with $gte to validate continuinity. Do not add this first record to the
- // recorded timestamps. Its value was already added in the last cursor.
- let cursor = local.getCollection("oplog.rs")
- .find({ts: {$gte: oplogStart}})
- .sort({$natural: 1})
- .tailable(true)
- .batchSize(100);
- assert(cursor.hasNext());
- assert.eq(oplogStart, cursor.next()["ts"]);
+ while (timestamps.length < 1000) {
+ // Query with $gte to validate continuinity. Do not add this first record to the
+ // recorded timestamps. Its value was already added in the last cursor.
+ let cursor = local.getCollection("oplog.rs")
+ .find({ts: {$gte: oplogStart}})
+ .sort({$natural: 1})
+ .tailable(true)
+ .batchSize(100);
+ assert(cursor.hasNext());
+ assert.eq(oplogStart, cursor.next()["ts"]);
- // While this method wants to capture 1000 timestamps, the cursor has a batch size
- // of 100 and this loop makes 200 iterations before getting a new cursor from a
- // fresh query. The goal is to exercise getMores, which use different code paths
- // for establishing their oplog reader transactions.
- for (let num = 0; num < 200 && timestamps.length < 1000; ++num) {
- try {
- if (cursor.hasNext() == false) {
- break;
- }
- } catch (exc) {
+ // While this method wants to capture 1000 timestamps, the cursor has a batch size
+ // of 100 and this loop makes 200 iterations before getting a new cursor from a
+ // fresh query. The goal is to exercise getMores, which use different code paths
+ // for establishing their oplog reader transactions.
+ for (let num = 0; num < 200 && timestamps.length < 1000; ++num) {
+ try {
+ if (cursor.hasNext() == false) {
break;
}
- let ts = cursor.next()["ts"];
- timestamps.push(ts);
- oplogStart = ts;
+ } catch (exc) {
+ break;
}
+ let ts = cursor.next()["ts"];
+ timestamps.push(ts);
+ oplogStart = ts;
}
+ }
- jsTestLog({"Verifying": node.host, "StartTs": timestamps[0], "EndTs": timestamps[999]});
- oplogStart = timestamps[0];
- let cursor =
- local.getCollection("oplog.rs").find({ts: {$gte: oplogStart}}).sort({$natural: 1});
- for (let observedTsIdx in timestamps) {
- let observedTs = timestamps[observedTsIdx];
- assert(cursor.hasNext());
- let actualTs = cursor.next()["ts"];
- assert.eq(actualTs, observedTs, function() {
- let prev = null;
- let next = null;
- if (observedTsIdx > 0) {
- prev = timestamps[observedTsIdx - 1];
- }
- if (observedTsIdx + 1 < timestamps.length) {
- next = timestamps[observedTsIdx + 1];
- }
+ jsTestLog({"Verifying": node.host, "StartTs": timestamps[0], "EndTs": timestamps[999]});
+ oplogStart = timestamps[0];
+ let cursor =
+ local.getCollection("oplog.rs").find({ts: {$gte: oplogStart}}).sort({$natural: 1});
+ for (let observedTsIdx in timestamps) {
+ let observedTs = timestamps[observedTsIdx];
+ assert(cursor.hasNext());
+ let actualTs = cursor.next()["ts"];
+ assert.eq(actualTs, observedTs, function() {
+ let prev = null;
+ let next = null;
+ if (observedTsIdx > 0) {
+ prev = timestamps[observedTsIdx - 1];
+ }
+ if (observedTsIdx + 1 < timestamps.length) {
+ next = timestamps[observedTsIdx + 1];
+ }
- return tojson({
- "Missing": actualTs,
- "ObservedIdx": observedTsIdx,
- "PrevObserved": prev,
- "NextObserved": next
- });
+ return tojson({
+ "Missing": actualTs,
+ "ObservedIdx": observedTsIdx,
+ "PrevObserved": prev,
+ "NextObserved": next
});
- }
- };
+ });
+ }
+ };
- jsTestLog({"Testing": node.host});
- testOplog(node);
- }
- jsTestLog("Stopping writers.");
- stopLatch.countDown();
- writers.forEach((writer) => {
- writer.join();
- });
+ jsTestLog({"Testing": node.host});
+ testOplog(node);
+}
+jsTestLog("Stopping writers.");
+stopLatch.countDown();
+writers.forEach((writer) => {
+ writer.join();
+});
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/oplog_wallclock.js b/jstests/replsets/oplog_wallclock.js
index 2da05916c1f..a744c207d7e 100644
--- a/jstests/replsets/oplog_wallclock.js
+++ b/jstests/replsets/oplog_wallclock.js
@@ -1,30 +1,30 @@
// oplog should contain the field "wt" with wallClock timestamps.
(function() {
- 'use strict';
- load('jstests/replsets/rslib.js');
+'use strict';
+load('jstests/replsets/rslib.js');
- var assertLastOplogHasWT = function(primary, msg) {
- const opLogEntry = getLatestOp(primary);
- assert(opLogEntry.hasOwnProperty('wall'),
- 'oplog entry must contain wt field: ' + tojson(opLogEntry));
- };
+var assertLastOplogHasWT = function(primary, msg) {
+ const opLogEntry = getLatestOp(primary);
+ assert(opLogEntry.hasOwnProperty('wall'),
+ 'oplog entry must contain wt field: ' + tojson(opLogEntry));
+};
- var name = 'wt_test_coll';
- var replSet = new ReplSetTest({nodes: 1, oplogSize: 2});
- replSet.startSet();
- replSet.initiate();
+var name = 'wt_test_coll';
+var replSet = new ReplSetTest({nodes: 1, oplogSize: 2});
+replSet.startSet();
+replSet.initiate();
- var primary = replSet.getPrimary();
- var collection = primary.getDB('test').getCollection(name);
+var primary = replSet.getPrimary();
+var collection = primary.getDB('test').getCollection(name);
- assert.writeOK(collection.insert({_id: 1, val: 'x'}));
- assertLastOplogHasWT(primary, 'insert');
+assert.writeOK(collection.insert({_id: 1, val: 'x'}));
+assertLastOplogHasWT(primary, 'insert');
- assert.writeOK(collection.update({_id: 1}, {val: 'y'}));
- assertLastOplogHasWT(primary, 'update');
+assert.writeOK(collection.update({_id: 1}, {val: 'y'}));
+assertLastOplogHasWT(primary, 'update');
- assert.writeOK(collection.remove({_id: 1}));
- assertLastOplogHasWT(primary, 'remove');
+assert.writeOK(collection.remove({_id: 1}));
+assertLastOplogHasWT(primary, 'remove');
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/optime.js b/jstests/replsets/optime.js
index acd72662574..867c6258441 100644
--- a/jstests/replsets/optime.js
+++ b/jstests/replsets/optime.js
@@ -40,11 +40,11 @@ function optimesAndWallTimesAreEqual(replTest, isPersistent) {
if (timestampCompare(prevOptime, currOptime) != 0 ||
wallTimeCompare(prevAppliedWallTime, currAppliedWallTime) != 0 ||
(isPersistent && wallTimeCompare(prevDurableWallTime, currDurableWallTime) != 0)) {
- jsTest.log("optimesAndWallTimesAreEqual returning false match, prevOptime: " +
- prevOptime + " latestOptime: " + currOptime + " prevAppliedWallTime: " +
- prevAppliedWallTime + " latestWallTime: " + currAppliedWallTime +
- " prevDurableWallTime: " + prevDurableWallTime + " latestDurableWallTime: " +
- currDurableWallTime);
+ jsTest.log(
+ "optimesAndWallTimesAreEqual returning false match, prevOptime: " + prevOptime +
+ " latestOptime: " + currOptime + " prevAppliedWallTime: " + prevAppliedWallTime +
+ " latestWallTime: " + currAppliedWallTime + " prevDurableWallTime: " +
+ prevDurableWallTime + " latestDurableWallTime: " + currDurableWallTime);
replTest.dumpOplog(replTest.nodes[i], {}, 20);
replTest.dumpOplog(replTest.nodes[i - 1], {}, 20);
return false;
diff --git a/jstests/replsets/prepare_conflict_read_concern_behavior.js b/jstests/replsets/prepare_conflict_read_concern_behavior.js
index a4c14ae0a46..22515e3eb73 100644
--- a/jstests/replsets/prepare_conflict_read_concern_behavior.js
+++ b/jstests/replsets/prepare_conflict_read_concern_behavior.js
@@ -20,330 +20,322 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- const conn = replTest.getPrimary();
-
- const failureTimeout = 1 * 1000; // 1 second.
- const successTimeout = 5 * 60 * 1000; // 5 minutes.
- const dbName = "test";
- const collName = "prepare_conflict_read_concern_behavior";
- const collName2 = "prepare_conflict_read_concern_behavior2";
- const testDB = conn.getDB(dbName);
- const testColl = testDB.getCollection(collName);
- const testColl2 = testDB.getCollection(collName2);
-
- const secondary = replTest.getSecondary();
- const secondaryTestDB = secondary.getDB(dbName);
-
- // Turn off timestamp reaping so that clusterTimeBeforePrepare doesn't get too old.
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+const conn = replTest.getPrimary();
+
+const failureTimeout = 1 * 1000; // 1 second.
+const successTimeout = 5 * 60 * 1000; // 5 minutes.
+const dbName = "test";
+const collName = "prepare_conflict_read_concern_behavior";
+const collName2 = "prepare_conflict_read_concern_behavior2";
+const testDB = conn.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+const testColl2 = testDB.getCollection(collName2);
+
+const secondary = replTest.getSecondary();
+const secondaryTestDB = secondary.getDB(dbName);
+
+// Turn off timestamp reaping so that clusterTimeBeforePrepare doesn't get too old.
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
+ mode: "alwaysOn",
+}));
+
+function runTest() {
+ testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+ assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+ testDB.runCommand({drop: collName2, writeConcern: {w: "majority"}});
+ assert.commandWorked(testDB.runCommand({create: collName2, writeConcern: {w: "majority"}}));
+
+ const session = conn.startSession({causalConsistency: false});
+ const sessionDB = session.getDatabase(dbName);
+ const sessionColl = sessionDB.getCollection(collName);
+
+ const read = function(read_concern, timeout, db, coll, num_expected) {
+ let res = db.runCommand({
+ find: coll,
+ filter: {in_prepared_txn: false},
+ readConcern: read_concern,
+ maxTimeMS: timeout,
+ });
+
+ if (num_expected) {
+ assert(res.cursor, tojson(res));
+ assert.eq(res.cursor.firstBatch.length, num_expected, tojson(res));
+ }
+ return res;
+ };
+
+ const dbHash = function(read_concern, db, timeout = successTimeout) {
+ let res = db.runCommand({
+ dbHash: 1,
+ readConcern: read_concern,
+ maxTimeMS: timeout,
+ });
+
+ return res;
+ };
+
+ const mapReduce = function(
+ read_concern, db, outOptions = {inline: 1}, timeout = successTimeout) {
+ let map = function() {
+ emit(this.a, this.a);
+ };
+ let reduce = function(key, vals) {
+ return 1;
+ };
+ let res = db.runCommand({
+ mapReduce: collName,
+ map: map,
+ reduce: reduce,
+ out: outOptions,
+ readConcern: read_concern,
+ maxTimeMS: timeout,
+ });
+ return res;
+ };
+
+ const validate = function(read_concern, db, timeout = successTimeout) {
+ let res = db.runCommand({
+ validate: collName,
+ readConcern: read_concern,
+ maxTimeMS: timeout,
+ });
+
+ return res;
+ };
+
+ assert.commandWorked(
+ testColl.insert({_id: 1, in_prepared_txn: false}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(testColl.insert({_id: 2, in_prepared_txn: false}));
+ assert.commandWorked(testColl2.insert({_id: 1, in_prepared_txn: false}));
+
+ session.startTransaction();
+ const clusterTimeBeforePrepare =
+ assert.commandWorked(sessionColl.runCommand("insert", {documents: [{_id: 3}]}))
+ .operationTime;
+ assert.commandWorked(sessionColl.update({_id: 2}, {_id: 2, in_prepared_txn: true}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+ const clusterTimeAfterPrepare =
+ assert
+ .commandWorked(testColl.runCommand(
+ "insert",
+ {documents: [{_id: 4, in_prepared_txn: false}], writeConcern: {w: "majority"}}))
+ .operationTime;
+
+ jsTestLog("prepareTimestamp: " + prepareTimestamp + " clusterTimeBeforePrepare: " +
+ clusterTimeBeforePrepare + " clusterTimeAfterPrepare: " + clusterTimeAfterPrepare);
+
+ assert.gt(prepareTimestamp, clusterTimeBeforePrepare);
+ assert.gt(clusterTimeAfterPrepare, prepareTimestamp);
+
+ jsTestLog("Test read with read concern 'majority' doesn't block on a prepared transaction.");
+ assert.commandWorked(read({level: 'majority'}, successTimeout, testDB, collName, 3));
+
+ jsTestLog("Test read with read concern 'local' doesn't block on a prepared transaction.");
+ assert.commandWorked(read({level: 'local'}, successTimeout, testDB, collName, 3));
+
+ jsTestLog("Test read with read concern 'available' doesn't block on a prepared transaction.");
+ assert.commandWorked(read({level: 'available'}, successTimeout, testDB, collName, 3));
+
+ jsTestLog("Test read with read concern 'linearizable' blocks on a prepared transaction.");
+ assert.commandFailedWithCode(read({level: 'linearizable'}, failureTimeout, testDB, collName),
+ ErrorCodes.MaxTimeMSExpired);
+
+ // TODO SERVER-36953: uncomment this test
+ // jsTestLog("Test afterClusterTime read before prepareTimestamp doesn't block on a " +
+ // "prepared transaction.");
+ // assert.commandWorked(read({level: 'local', afterClusterTime: clusterTimeBeforePrepare},
+ // successTimeout,
+ // testDB,
+ // collName,
+ // 2));
+
+ jsTestLog("Test afterClusterTime read after prepareTimestamp blocks on a prepared " +
+ "transaction.");
+ assert.commandFailedWithCode(read({level: 'local', afterClusterTime: clusterTimeAfterPrepare},
+ failureTimeout,
+ testDB,
+ collName),
+ ErrorCodes.MaxTimeMSExpired);
+
+ jsTestLog("Test read with afterClusterTime after prepareTimestamp on non-prepared " +
+ "documents doesn't block on a prepared transaction.");
+ assert.commandWorked(read({level: 'local', afterClusterTime: clusterTimeAfterPrepare},
+ successTimeout,
+ testDB,
+ collName2,
+ 1));
+
+ // dbHash does not accept a non local read concern or afterClusterTime and it also sets
+ // ignore_prepare=true during its execution. Therefore, dbHash should never get prepare
+ // conflicts on secondaries. dbHash acquires collection S lock for reads and it will be
+ // blocked by a prepared transaction that writes to the same collection if it is run on
+ // primaries.
+ jsTestLog("Test dbHash doesn't support afterClusterTime read.");
+ assert.commandFailedWithCode(
+ dbHash({level: 'local', afterClusterTime: clusterTimeAfterPrepare}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+
+ jsTestLog("Test dbHash doesn't support read concern other than local.");
+ assert.commandWorked(dbHash({level: 'local'}, secondaryTestDB));
+ assert.commandFailedWithCode(dbHash({level: 'available'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(dbHash({level: 'majority'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(dbHash({level: 'snapshot'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(dbHash({level: 'linearizable'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+
+ jsTestLog("Test dbHash on secondary doesn't block on a prepared transaction.");
+ assert.commandWorked(dbHash({}, secondaryTestDB));
+ jsTestLog("Test dbHash on primary blocks on collection S lock which conflicts with " +
+ "a prepared transaction.");
+ assert.commandFailedWithCode(dbHash({}, testDB, failureTimeout), ErrorCodes.MaxTimeMSExpired);
+
+ // mapReduce does not accept a non local read concern or afterClusterTime and it also sets
+ // ignore_prepare=true during its read phase. As mapReduce that writes is not allowed to run
+ // on secondaries, mapReduce should never get prepare conflicts on secondaries. mapReduce
+ // acquires collection S lock for reads and it will be blocked by a prepared transaction
+ // that writes to the same collection if it is run on primaries.
+ jsTestLog("Test mapReduce doesn't support afterClusterTime read.");
+ assert.commandFailedWithCode(
+ mapReduce({level: 'local', afterClusterTime: clusterTimeAfterPrepare}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+
+ jsTestLog("Test mapReduce doesn't support read concern other than local.");
+ assert.commandWorked(mapReduce({level: 'local'}, secondaryTestDB));
+ assert.commandFailedWithCode(mapReduce({level: 'available'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(mapReduce({level: 'majority'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(mapReduce({level: 'snapshot'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(mapReduce({level: 'linearizable'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+
+ jsTestLog("Test mapReduce that writes is not allowed to run on secondaries.");
+ // It currently returns ErrorCodes.PrimarySteppedDown in this case.
+ assert.commandFailedWithCode(mapReduce({}, secondaryTestDB, "outColl"),
+ [ErrorCodes.InvalidOptions, ErrorCodes.PrimarySteppedDown]);
+
+ jsTestLog("Test mapReduce on secondary doesn't block on a prepared transaction.");
+ assert.commandWorked(mapReduce({}, secondaryTestDB));
+
+ jsTestLog("Test mapReduce on primary blocks on collection S lock which conflicts with " +
+ "a prepared transaction.");
+ assert.commandFailedWithCode(mapReduce({}, testDB, {inline: 1}, failureTimeout),
+ ErrorCodes.MaxTimeMSExpired);
+
+ // validate does not accept a non local read concern or afterClusterTime and it also sets
+ // ignore_prepare=true during its execution. Therefore, validate should never get prepare
+ // conflicts on secondaries. validate acquires collection X lock during its execution and it
+ // will be blocked by a prepared transaction that writes to the same collection if it is run
+ // on primaries.
+ jsTestLog("Test validate doesn't support afterClusterTime read.");
+ assert.commandFailedWithCode(
+ validate({level: 'local', afterClusterTime: clusterTimeAfterPrepare}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ jsTestLog("Test validate doesn't support read concern other than local.");
+ assert.commandWorked(validate({level: 'local'}, secondaryTestDB));
+ assert.commandFailedWithCode(validate({level: 'available'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(validate({level: 'majority'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(validate({level: 'snapshot'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(validate({level: 'linearizable'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+
+ jsTestLog("Test validate on secondary doesn't block on a prepared transaction.");
+ assert.commandWorked(validate({}, secondaryTestDB));
+ jsTestLog("Test validate on primary blocks on collection X lock which conflicts with " +
+ "a prepared transaction.");
+ assert.commandFailedWithCode(validate({}, testDB, failureTimeout), ErrorCodes.MaxTimeMSExpired);
+
+ jsTestLog("Test read from an update blocks on a prepared transaction.");
+ assert.commandFailedWithCode(testDB.runCommand({
+ update: collName,
+ updates: [{q: {_id: 2}, u: {_id: 2, in_prepared_txn: false, a: 1}}],
+ maxTimeMS: failureTimeout,
+ }),
+ ErrorCodes.MaxTimeMSExpired);
+
+ // Create a second session and start a new transaction to test snapshot reads.
+ const session2 = conn.startSession({causalConsistency: false});
+ const sessionDB2 = session2.getDatabase(dbName);
+ const sessionColl2 = sessionDB2.getCollection(collName);
+ // This makes future reads in the transaction use a read timestamp after the
+ // prepareTimestamp.
+ session2.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: clusterTimeAfterPrepare}});
+
+ jsTestLog("Test read with read concern 'snapshot' and a read timestamp after " +
+ "prepareTimestamp on non-prepared documents doesn't block on a prepared " +
+ "transaction.");
+ assert.commandWorked(read({}, successTimeout, sessionDB2, collName2, 1));
+
+ jsTestLog("Test read with read concern 'snapshot' and a read timestamp after " +
+ "prepareTimestamp blocks on a prepared transaction.");
+ assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
+ ErrorCodes.MaxTimeMSExpired);
+ assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ jsTestLog("Test read with read concern 'snapshot' and atClusterTime before " +
+ "prepareTimestamp doesn't block on a prepared transaction.");
+ session2.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: clusterTimeBeforePrepare}});
+ assert.commandWorked(read({}, successTimeout, sessionDB2, collName, 2));
+ assert.commandWorked(session2.abortTransaction_forTesting());
+
+ jsTestLog("Test read from a transaction with read concern 'majority' blocks on a prepared" +
+ " transaction.");
+ session2.startTransaction({readConcern: {level: "majority"}});
+ assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
+ ErrorCodes.MaxTimeMSExpired);
+ assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ jsTestLog("Test read from a transaction with read concern 'local' blocks on a prepared " +
+ "transaction.");
+ session2.startTransaction({readConcern: {level: "local"}});
+ assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
+ ErrorCodes.MaxTimeMSExpired);
+ assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ jsTestLog("Test read from a transaction with no read concern specified blocks on a " +
+ "prepared transaction.");
+ session2.startTransaction();
+ assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
+ ErrorCodes.MaxTimeMSExpired);
+ assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ session2.endSession();
+
+ assert.commandWorked(session.abortTransaction_forTesting());
+ session.endSession();
+}
+
+try {
+ runTest();
+} finally {
+ // Turn this failpoint off so that it doesn't impact other tests in the suite.
assert.commandWorked(testDB.adminCommand({
configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
- mode: "alwaysOn",
+ mode: "off",
}));
+}
- function runTest() {
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- testDB.runCommand({drop: collName2, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName2, writeConcern: {w: "majority"}}));
-
- const session = conn.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- const read = function(read_concern, timeout, db, coll, num_expected) {
- let res = db.runCommand({
- find: coll,
- filter: {in_prepared_txn: false},
- readConcern: read_concern,
- maxTimeMS: timeout,
- });
-
- if (num_expected) {
- assert(res.cursor, tojson(res));
- assert.eq(res.cursor.firstBatch.length, num_expected, tojson(res));
- }
- return res;
- };
-
- const dbHash = function(read_concern, db, timeout = successTimeout) {
- let res = db.runCommand({
- dbHash: 1,
- readConcern: read_concern,
- maxTimeMS: timeout,
- });
-
- return res;
- };
-
- const mapReduce = function(
- read_concern, db, outOptions = {inline: 1}, timeout = successTimeout) {
- let map = function() {
- emit(this.a, this.a);
- };
- let reduce = function(key, vals) {
- return 1;
- };
- let res = db.runCommand({
- mapReduce: collName,
- map: map,
- reduce: reduce,
- out: outOptions,
- readConcern: read_concern,
- maxTimeMS: timeout,
- });
- return res;
- };
-
- const validate = function(read_concern, db, timeout = successTimeout) {
- let res = db.runCommand({
- validate: collName,
- readConcern: read_concern,
- maxTimeMS: timeout,
- });
-
- return res;
- };
-
- assert.commandWorked(
- testColl.insert({_id: 1, in_prepared_txn: false}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(testColl.insert({_id: 2, in_prepared_txn: false}));
- assert.commandWorked(testColl2.insert({_id: 1, in_prepared_txn: false}));
-
- session.startTransaction();
- const clusterTimeBeforePrepare =
- assert.commandWorked(sessionColl.runCommand("insert", {documents: [{_id: 3}]}))
- .operationTime;
- assert.commandWorked(sessionColl.update({_id: 2}, {_id: 2, in_prepared_txn: true}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- const clusterTimeAfterPrepare =
- assert
- .commandWorked(testColl.runCommand(
- "insert",
- {documents: [{_id: 4, in_prepared_txn: false}], writeConcern: {w: "majority"}}))
- .operationTime;
-
- jsTestLog("prepareTimestamp: " + prepareTimestamp + " clusterTimeBeforePrepare: " +
- clusterTimeBeforePrepare + " clusterTimeAfterPrepare: " +
- clusterTimeAfterPrepare);
-
- assert.gt(prepareTimestamp, clusterTimeBeforePrepare);
- assert.gt(clusterTimeAfterPrepare, prepareTimestamp);
-
- jsTestLog(
- "Test read with read concern 'majority' doesn't block on a prepared transaction.");
- assert.commandWorked(read({level: 'majority'}, successTimeout, testDB, collName, 3));
-
- jsTestLog("Test read with read concern 'local' doesn't block on a prepared transaction.");
- assert.commandWorked(read({level: 'local'}, successTimeout, testDB, collName, 3));
-
- jsTestLog(
- "Test read with read concern 'available' doesn't block on a prepared transaction.");
- assert.commandWorked(read({level: 'available'}, successTimeout, testDB, collName, 3));
-
- jsTestLog("Test read with read concern 'linearizable' blocks on a prepared transaction.");
- assert.commandFailedWithCode(
- read({level: 'linearizable'}, failureTimeout, testDB, collName),
- ErrorCodes.MaxTimeMSExpired);
-
- // TODO SERVER-36953: uncomment this test
- // jsTestLog("Test afterClusterTime read before prepareTimestamp doesn't block on a " +
- // "prepared transaction.");
- // assert.commandWorked(read({level: 'local', afterClusterTime: clusterTimeBeforePrepare},
- // successTimeout,
- // testDB,
- // collName,
- // 2));
-
- jsTestLog("Test afterClusterTime read after prepareTimestamp blocks on a prepared " +
- "transaction.");
- assert.commandFailedWithCode(
- read({level: 'local', afterClusterTime: clusterTimeAfterPrepare},
- failureTimeout,
- testDB,
- collName),
- ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog("Test read with afterClusterTime after prepareTimestamp on non-prepared " +
- "documents doesn't block on a prepared transaction.");
- assert.commandWorked(read({level: 'local', afterClusterTime: clusterTimeAfterPrepare},
- successTimeout,
- testDB,
- collName2,
- 1));
-
- // dbHash does not accept a non local read concern or afterClusterTime and it also sets
- // ignore_prepare=true during its execution. Therefore, dbHash should never get prepare
- // conflicts on secondaries. dbHash acquires collection S lock for reads and it will be
- // blocked by a prepared transaction that writes to the same collection if it is run on
- // primaries.
- jsTestLog("Test dbHash doesn't support afterClusterTime read.");
- assert.commandFailedWithCode(
- dbHash({level: 'local', afterClusterTime: clusterTimeAfterPrepare}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
-
- jsTestLog("Test dbHash doesn't support read concern other than local.");
- assert.commandWorked(dbHash({level: 'local'}, secondaryTestDB));
- assert.commandFailedWithCode(dbHash({level: 'available'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(dbHash({level: 'majority'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(dbHash({level: 'snapshot'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(dbHash({level: 'linearizable'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
-
- jsTestLog("Test dbHash on secondary doesn't block on a prepared transaction.");
- assert.commandWorked(dbHash({}, secondaryTestDB));
- jsTestLog("Test dbHash on primary blocks on collection S lock which conflicts with " +
- "a prepared transaction.");
- assert.commandFailedWithCode(dbHash({}, testDB, failureTimeout),
- ErrorCodes.MaxTimeMSExpired);
-
- // mapReduce does not accept a non local read concern or afterClusterTime and it also sets
- // ignore_prepare=true during its read phase. As mapReduce that writes is not allowed to run
- // on secondaries, mapReduce should never get prepare conflicts on secondaries. mapReduce
- // acquires collection S lock for reads and it will be blocked by a prepared transaction
- // that writes to the same collection if it is run on primaries.
- jsTestLog("Test mapReduce doesn't support afterClusterTime read.");
- assert.commandFailedWithCode(
- mapReduce({level: 'local', afterClusterTime: clusterTimeAfterPrepare}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
-
- jsTestLog("Test mapReduce doesn't support read concern other than local.");
- assert.commandWorked(mapReduce({level: 'local'}, secondaryTestDB));
- assert.commandFailedWithCode(mapReduce({level: 'available'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(mapReduce({level: 'majority'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(mapReduce({level: 'snapshot'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(mapReduce({level: 'linearizable'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
-
- jsTestLog("Test mapReduce that writes is not allowed to run on secondaries.");
- // It currently returns ErrorCodes.PrimarySteppedDown in this case.
- assert.commandFailedWithCode(mapReduce({}, secondaryTestDB, "outColl"),
- [ErrorCodes.InvalidOptions, ErrorCodes.PrimarySteppedDown]);
-
- jsTestLog("Test mapReduce on secondary doesn't block on a prepared transaction.");
- assert.commandWorked(mapReduce({}, secondaryTestDB));
-
- jsTestLog("Test mapReduce on primary blocks on collection S lock which conflicts with " +
- "a prepared transaction.");
- assert.commandFailedWithCode(mapReduce({}, testDB, {inline: 1}, failureTimeout),
- ErrorCodes.MaxTimeMSExpired);
-
- // validate does not accept a non local read concern or afterClusterTime and it also sets
- // ignore_prepare=true during its execution. Therefore, validate should never get prepare
- // conflicts on secondaries. validate acquires collection X lock during its execution and it
- // will be blocked by a prepared transaction that writes to the same collection if it is run
- // on primaries.
- jsTestLog("Test validate doesn't support afterClusterTime read.");
- assert.commandFailedWithCode(
- validate({level: 'local', afterClusterTime: clusterTimeAfterPrepare}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- jsTestLog("Test validate doesn't support read concern other than local.");
- assert.commandWorked(validate({level: 'local'}, secondaryTestDB));
- assert.commandFailedWithCode(validate({level: 'available'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(validate({level: 'majority'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(validate({level: 'snapshot'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(validate({level: 'linearizable'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
-
- jsTestLog("Test validate on secondary doesn't block on a prepared transaction.");
- assert.commandWorked(validate({}, secondaryTestDB));
- jsTestLog("Test validate on primary blocks on collection X lock which conflicts with " +
- "a prepared transaction.");
- assert.commandFailedWithCode(validate({}, testDB, failureTimeout),
- ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog("Test read from an update blocks on a prepared transaction.");
- assert.commandFailedWithCode(testDB.runCommand({
- update: collName,
- updates: [{q: {_id: 2}, u: {_id: 2, in_prepared_txn: false, a: 1}}],
- maxTimeMS: failureTimeout,
- }),
- ErrorCodes.MaxTimeMSExpired);
-
- // Create a second session and start a new transaction to test snapshot reads.
- const session2 = conn.startSession({causalConsistency: false});
- const sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
- // This makes future reads in the transaction use a read timestamp after the
- // prepareTimestamp.
- session2.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: clusterTimeAfterPrepare}});
-
- jsTestLog("Test read with read concern 'snapshot' and a read timestamp after " +
- "prepareTimestamp on non-prepared documents doesn't block on a prepared " +
- "transaction.");
- assert.commandWorked(read({}, successTimeout, sessionDB2, collName2, 1));
-
- jsTestLog("Test read with read concern 'snapshot' and a read timestamp after " +
- "prepareTimestamp blocks on a prepared transaction.");
- assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
- ErrorCodes.MaxTimeMSExpired);
- assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTestLog("Test read with read concern 'snapshot' and atClusterTime before " +
- "prepareTimestamp doesn't block on a prepared transaction.");
- session2.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: clusterTimeBeforePrepare}});
- assert.commandWorked(read({}, successTimeout, sessionDB2, collName, 2));
- assert.commandWorked(session2.abortTransaction_forTesting());
-
- jsTestLog("Test read from a transaction with read concern 'majority' blocks on a prepared" +
- " transaction.");
- session2.startTransaction({readConcern: {level: "majority"}});
- assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
- ErrorCodes.MaxTimeMSExpired);
- assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTestLog("Test read from a transaction with read concern 'local' blocks on a prepared " +
- "transaction.");
- session2.startTransaction({readConcern: {level: "local"}});
- assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
- ErrorCodes.MaxTimeMSExpired);
- assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTestLog("Test read from a transaction with no read concern specified blocks on a " +
- "prepared transaction.");
- session2.startTransaction();
- assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
- ErrorCodes.MaxTimeMSExpired);
- assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- session2.endSession();
-
- assert.commandWorked(session.abortTransaction_forTesting());
- session.endSession();
- }
-
- try {
- runTest();
- } finally {
- // Turn this failpoint off so that it doesn't impact other tests in the suite.
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
- mode: "off",
- }));
- }
-
- replTest.stopSet();
-
+replTest.stopSet();
}());
diff --git a/jstests/replsets/prepare_failover_rollback_commit.js b/jstests/replsets/prepare_failover_rollback_commit.js
index 979ba52eea8..487e4620ea6 100644
--- a/jstests/replsets/prepare_failover_rollback_commit.js
+++ b/jstests/replsets/prepare_failover_rollback_commit.js
@@ -9,59 +9,59 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/rollback_test.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/replsets/libs/rollback_test.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const dbName = "test";
- const collName = "prepare_failover_rollback_commit";
+const dbName = "test";
+const collName = "prepare_failover_rollback_commit";
- const rollbackTest = new RollbackTest(collName);
+const rollbackTest = new RollbackTest(collName);
- let primary = rollbackTest.getPrimary();
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+let primary = rollbackTest.getPrimary();
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- // First create the collection for all.
- assert.commandWorked(testColl.insert({"a": "baseDoc"}));
+// First create the collection for all.
+assert.commandWorked(testColl.insert({"a": "baseDoc"}));
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({"b": "transactionDoc"}));
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({"b": "transactionDoc"}));
- // Prepare a transaction. This will be replicated to the secondary.
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+// Prepare a transaction. This will be replicated to the secondary.
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- // Do a failover first, without rolling back any of the data from this test. We want the
- // current secondary to become primary and inherit the prepared transaction.
- rollbackTest.transitionToRollbackOperations();
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
+// Do a failover first, without rolling back any of the data from this test. We want the
+// current secondary to become primary and inherit the prepared transaction.
+rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
- // Now set up a rollback scenario for that new primary.
- rollbackTest.transitionToRollbackOperations();
+// Now set up a rollback scenario for that new primary.
+rollbackTest.transitionToRollbackOperations();
- // Create a proxy session to reuse the session state of the old primary.
- primary = rollbackTest.getPrimary();
- const newSession1 = new _DelegatingDriverSession(primary, session);
+// Create a proxy session to reuse the session state of the old primary.
+primary = rollbackTest.getPrimary();
+const newSession1 = new _DelegatingDriverSession(primary, session);
- // Commit the transaction on this primary. We expect the commit to roll back.
- assert.commandWorked(PrepareHelpers.commitTransaction(newSession1, prepareTimestamp));
+// Commit the transaction on this primary. We expect the commit to roll back.
+assert.commandWorked(PrepareHelpers.commitTransaction(newSession1, prepareTimestamp));
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
- // Create a proxy session to reuse the session state of the old primary.
- primary = rollbackTest.getPrimary();
- const newSession2 = new _DelegatingDriverSession(primary, session);
+// Create a proxy session to reuse the session state of the old primary.
+primary = rollbackTest.getPrimary();
+const newSession2 = new _DelegatingDriverSession(primary, session);
- // Commit the transaction for all to conclude the test.
- assert.commandWorked(PrepareHelpers.commitTransaction(newSession2, prepareTimestamp));
+// Commit the transaction for all to conclude the test.
+assert.commandWorked(PrepareHelpers.commitTransaction(newSession2, prepareTimestamp));
- rollbackTest.stop();
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/prepare_prepared_transaction_wc_timeout.js b/jstests/replsets/prepare_prepared_transaction_wc_timeout.js
index cbda29be3b1..536adff487a 100644
--- a/jstests/replsets/prepare_prepared_transaction_wc_timeout.js
+++ b/jstests/replsets/prepare_prepared_transaction_wc_timeout.js
@@ -5,76 +5,75 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/libs/retryable_writes_util.js");
- load("jstests/libs/write_concern_util.js");
+"use strict";
+load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/write_concern_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- const kNodes = 2;
+const kNodes = 2;
- const replTest = new ReplSetTest({nodes: kNodes});
- replTest.startSet({verbose: 1});
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: kNodes});
+replTest.startSet({verbose: 1});
+replTest.initiate();
- const priConn = replTest.getPrimary();
- const secConn = replTest.getSecondary();
+const priConn = replTest.getPrimary();
+const secConn = replTest.getSecondary();
- const lsid = UUID();
+const lsid = UUID();
- // Insert something into the user collection.
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- insert: 'user',
- documents: [{_id: 10}, {_id: 30}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(34),
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes);
+// Insert something into the user collection.
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ insert: 'user',
+ documents: [{_id: 10}, {_id: 30}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(34),
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes);
- // Since we must wait for writeConcern : majority in order for the prepareTimestamp to be
- // committed, this test case will timeout when we stop replication on the secondary.
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- prepareTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(39),
- autocommit: false,
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes,
- 'admin',
- function(conn) {
- assert.commandWorked(conn.getDB('test').runCommand({
- insert: 'user',
- documents: [{_id: 50}, {_id: 70}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(39),
- readConcern: {level: 'snapshot'},
- autocommit: false,
- startTransaction: true
- }));
+// Since we must wait for writeConcern : majority in order for the prepareTimestamp to be
+// committed, this test case will timeout when we stop replication on the secondary.
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ prepareTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(39),
+ autocommit: false,
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes,
+ 'admin',
+ function(conn) {
+ assert.commandWorked(conn.getDB('test').runCommand({
+ insert: 'user',
+ documents: [{_id: 50}, {_id: 70}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(39),
+ readConcern: {level: 'snapshot'},
+ autocommit: false,
+ startTransaction: true
+ }));
+ });
- });
+// Explicitly abort transaction 39 because we cannot shut down the replica set when there
+// is a prepared transaction in progress.
+// TODO: SERVER-35817 Allow shutdowns when a prepared transaction is in progress.
+assert.commandWorked(priConn.getDB('admin').runCommand({
+ abortTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(39),
+ autocommit: false,
+ writeConcern: {w: 'majority'},
+}));
- // Explicitly abort transaction 39 because we cannot shut down the replica set when there
- // is a prepared transaction in progress.
- // TODO: SERVER-35817 Allow shutdowns when a prepared transaction is in progress.
- assert.commandWorked(priConn.getDB('admin').runCommand({
- abortTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(39),
- autocommit: false,
- writeConcern: {w: 'majority'},
- }));
-
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/prepare_survives_primary_reconfig_failover.js b/jstests/replsets/prepare_survives_primary_reconfig_failover.js
index 8db3322b796..b2e0be87494 100644
--- a/jstests/replsets/prepare_survives_primary_reconfig_failover.js
+++ b/jstests/replsets/prepare_survives_primary_reconfig_failover.js
@@ -5,10 +5,10 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/prepare_failover_due_to_reconfig.js");
+"use strict";
+load("jstests/replsets/libs/prepare_failover_due_to_reconfig.js");
- let testName = "prepare_survives_primary_reconfig_failover";
+let testName = "prepare_survives_primary_reconfig_failover";
- testPrepareFailoverDueToReconfig(testName, /* reconfigOnPrimary */ true);
+testPrepareFailoverDueToReconfig(testName, /* reconfigOnPrimary */ true);
})();
diff --git a/jstests/replsets/prepare_survives_reconfig_via_heartbeat_failover.js b/jstests/replsets/prepare_survives_reconfig_via_heartbeat_failover.js
index bf4a0c565a9..ddeda40b99d 100644
--- a/jstests/replsets/prepare_survives_reconfig_via_heartbeat_failover.js
+++ b/jstests/replsets/prepare_survives_reconfig_via_heartbeat_failover.js
@@ -6,10 +6,10 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/prepare_failover_due_to_reconfig.js");
+"use strict";
+load("jstests/replsets/libs/prepare_failover_due_to_reconfig.js");
- let testName = "prepare_survives_reconfig_via_heartbeat_failover";
+let testName = "prepare_survives_reconfig_via_heartbeat_failover";
- testPrepareFailoverDueToReconfig(testName, /* reconfigOnPrimary */ false);
+testPrepareFailoverDueToReconfig(testName, /* reconfigOnPrimary */ false);
})();
diff --git a/jstests/replsets/prepare_transaction_fails_on_standalone.js b/jstests/replsets/prepare_transaction_fails_on_standalone.js
index 14eb17656a5..b9ba8813722 100644
--- a/jstests/replsets/prepare_transaction_fails_on_standalone.js
+++ b/jstests/replsets/prepare_transaction_fails_on_standalone.js
@@ -4,18 +4,18 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- const standalone = MongoRunner.runMongod();
+const standalone = MongoRunner.runMongod();
- const collName = "prepare_transaction_fails_on_standalone";
- const dbName = "test";
- const testDB = standalone.getDB(dbName);
+const collName = "prepare_transaction_fails_on_standalone";
+const dbName = "test";
+const testDB = standalone.getDB(dbName);
- assert.commandWorked(testDB.runCommand({create: collName}));
+assert.commandWorked(testDB.runCommand({create: collName}));
- assert.commandFailedWithCode(testDB.adminCommand({prepareTransaction: 1}),
- ErrorCodes.ReadConcernMajorityNotEnabled);
+assert.commandFailedWithCode(testDB.adminCommand({prepareTransaction: 1}),
+ ErrorCodes.ReadConcernMajorityNotEnabled);
- MongoRunner.stopMongod(standalone);
+MongoRunner.stopMongod(standalone);
}());
diff --git a/jstests/replsets/prepare_transaction_fails_with_arbiters.js b/jstests/replsets/prepare_transaction_fails_with_arbiters.js
index 672ef7c147a..8190eb0f019 100644
--- a/jstests/replsets/prepare_transaction_fails_with_arbiters.js
+++ b/jstests/replsets/prepare_transaction_fails_with_arbiters.js
@@ -6,36 +6,35 @@
*/
(function() {
- "use strict";
+"use strict";
- const name = "prepare_transaction_fails_with_arbiters";
- const rst = new ReplSetTest({name: name, nodes: 2});
- const nodes = rst.nodeList();
+const name = "prepare_transaction_fails_with_arbiters";
+const rst = new ReplSetTest({name: name, nodes: 2});
+const nodes = rst.nodeList();
- rst.startSet();
- rst.initiate({
- "_id": name,
- "members":
- [{"_id": 0, "host": nodes[0]}, {"_id": 1, "host": nodes[1], "arbiterOnly": true}]
- });
+rst.startSet();
+rst.initiate({
+ "_id": name,
+ "members": [{"_id": 0, "host": nodes[0]}, {"_id": 1, "host": nodes[1], "arbiterOnly": true}]
+});
- const dbName = "test";
- const collName = name;
+const dbName = "test";
+const collName = name;
- const primary = rst.getPrimary();
- const testDB = primary.getDB(dbName);
+const primary = rst.getPrimary();
+const testDB = primary.getDB(dbName);
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 42}));
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 42}));
- assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
- ErrorCodes.ReadConcernMajorityNotEnabled);
+assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
+ ErrorCodes.ReadConcernMajorityNotEnabled);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/prepare_transaction_fails_without_majority_reads.js b/jstests/replsets/prepare_transaction_fails_without_majority_reads.js
index 30cbeac87ee..f13c4f141e4 100644
--- a/jstests/replsets/prepare_transaction_fails_without_majority_reads.js
+++ b/jstests/replsets/prepare_transaction_fails_without_majority_reads.js
@@ -6,29 +6,29 @@
*/
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({nodes: 1, nodeOptions: {enableMajorityReadConcern: "false"}});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1, nodeOptions: {enableMajorityReadConcern: "false"}});
+rst.startSet();
+rst.initiate();
- const dbName = "test";
- const collName = "prepare_transaction_fails_without_majority_reads";
+const dbName = "test";
+const collName = "prepare_transaction_fails_without_majority_reads";
- const primary = rst.getPrimary();
- const testDB = primary.getDB(dbName);
+const primary = rst.getPrimary();
+const testDB = primary.getDB(dbName);
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 42}));
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 42}));
- assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
- ErrorCodes.ReadConcernMajorityNotEnabled);
+assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
+ ErrorCodes.ReadConcernMajorityNotEnabled);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/prepare_transaction_index_build.js b/jstests/replsets/prepare_transaction_index_build.js
index cac5445c139..e4dc5695913 100644
--- a/jstests/replsets/prepare_transaction_index_build.js
+++ b/jstests/replsets/prepare_transaction_index_build.js
@@ -8,76 +8,72 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
-
- const dbName = "test";
- const collName = "prepared_transactions_index_build";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- const bulk = testColl.initializeUnorderedBulkOp();
- for (let i = 0; i < 10; ++i) {
- bulk.insert({x: i});
- }
- assert.writeOK(bulk.execute());
-
- // activate failpoint to hang index build on secondary.
- secondary.getDB("admin").runCommand(
- {configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'alwaysOn'});
-
- jsTestLog("Starting a background index build.");
- assert.commandWorked(testColl.createIndexes([{x: 1}], {}, {writeConcern: {w: 2}}));
-
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- jsTestLog("Starting a transaction that should involve the index and putting it into prepare");
-
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({x: 1000}));
-
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
- jsTestLog("Prepared a transaction at " + prepareTimestamp);
-
- jsTestLog("Unblocking index build.");
-
- // finish the index build
- secondary.getDB("admin").runCommand(
- {configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'off'});
-
- // It's illegal to commit a prepared transaction before its prepare oplog entry has been
- // majority committed. So wait for prepare oplog entry to be majority committed before issuing
- // the commitTransaction command. We know the index build is also done if the prepare has
- // finished on the secondary.
- jsTestLog(
- "Waiting for prepare oplog entry to be majority committed and all index builds to finish on all nodes.");
- PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp);
-
- jsTestLog("Committing txn");
- // Commit the transaction.
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- replTest.awaitReplication();
-
- jsTestLog("Testing index integrity");
- // Index should work.
- assert.eq(1000,
- secondary.getDB(dbName)
- .getCollection(collName)
- .find({x: 1000})
- .hint({x: 1})
- .toArray()[0]
- .x);
- jsTestLog("Shutting down the set");
- replTest.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
+
+const dbName = "test";
+const collName = "prepared_transactions_index_build";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+const bulk = testColl.initializeUnorderedBulkOp();
+for (let i = 0; i < 10; ++i) {
+ bulk.insert({x: i});
+}
+assert.writeOK(bulk.execute());
+
+// activate failpoint to hang index build on secondary.
+secondary.getDB("admin").runCommand(
+ {configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'alwaysOn'});
+
+jsTestLog("Starting a background index build.");
+assert.commandWorked(testColl.createIndexes([{x: 1}], {}, {writeConcern: {w: 2}}));
+
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+jsTestLog("Starting a transaction that should involve the index and putting it into prepare");
+
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({x: 1000}));
+
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
+jsTestLog("Prepared a transaction at " + prepareTimestamp);
+
+jsTestLog("Unblocking index build.");
+
+// finish the index build
+secondary.getDB("admin").runCommand(
+ {configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'off'});
+
+// It's illegal to commit a prepared transaction before its prepare oplog entry has been
+// majority committed. So wait for prepare oplog entry to be majority committed before issuing
+// the commitTransaction command. We know the index build is also done if the prepare has
+// finished on the secondary.
+jsTestLog(
+ "Waiting for prepare oplog entry to be majority committed and all index builds to finish on all nodes.");
+PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp);
+
+jsTestLog("Committing txn");
+// Commit the transaction.
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+replTest.awaitReplication();
+
+jsTestLog("Testing index integrity");
+// Index should work.
+assert.eq(
+ 1000,
+ secondary.getDB(dbName).getCollection(collName).find({x: 1000}).hint({x: 1}).toArray()[0].x);
+jsTestLog("Shutting down the set");
+replTest.stopSet();
}());
diff --git a/jstests/replsets/prepare_transaction_read_at_cluster_time.js b/jstests/replsets/prepare_transaction_read_at_cluster_time.js
index 8b87ce293d1..53cdbe31c6c 100644
--- a/jstests/replsets/prepare_transaction_read_at_cluster_time.js
+++ b/jstests/replsets/prepare_transaction_read_at_cluster_time.js
@@ -7,154 +7,153 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/parallelTester.js");
-
- const runDBHashFn = (host, dbName, clusterTime) => {
- const conn = new Mongo(host);
- const db = conn.getDB(dbName);
-
- conn.setSlaveOk();
- let firstHash = assert.commandWorked(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: eval(clusterTime),
- }));
-
- // This code will execute once the prepared transaction is committed as the call above will
- // be blocked until an abort or commit happens. Ensure that running dbHash here yields the
- // same result as above.
- let secondHash = assert.commandWorked(db.runCommand({dbHash: 1}));
-
- assert.eq(firstHash.collections, secondHash.collections);
- assert.eq(firstHash.md5, secondHash.md5);
-
- return firstHash;
- };
-
- const runFindFn = (host, dbName, collName, clusterTime) => {
- const conn = new Mongo(host);
- const db = conn.getDB(dbName);
-
- conn.setSlaveOk();
- assert.commandWorked(db.getSiblingDB(dbName).runCommand({
- find: collName,
- $_internalReadAtClusterTime: eval(clusterTime),
- }));
- };
-
- const assertOpHasPrepareConflict = (db, commandName) => {
- assert.soon(
- () => {
- const ops = db.currentOp({
- "command.$_internalReadAtClusterTime": {$exists: true},
- ["command." + commandName]: {$exists: true},
- }).inprog;
-
- if (ops.length === 1) {
- return ops[0].prepareReadConflicts > 0;
- }
-
- return false;
- },
- () =>
- `Failed to find '${commandName}' command in the ${db.getMongo().host} currentOp()` +
- ` output: ${tojson(db.currentOp())}`);
- };
-
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
-
- const replSetConfig = rst.getReplSetConfig();
- replSetConfig.members[1].priority = 0;
- rst.initiate(replSetConfig);
-
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
-
- const dbName = "prepare_transaction_read_at_cluster_time";
- const collName = "testColl";
-
- const testDB = primary.getDB(dbName);
- const testDBSecondary = secondary.getDB(dbName);
-
- testDB.createCollection(collName);
- assert.commandWorked(testDB.getCollection(collName).insert({x: 0}));
-
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB[collName];
-
- // Perform a write inside of a prepared transaction.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({x: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- // Perform a write outside of a prepared transaction. We wait for the write to have replication
- // to the secondary because we're going to read from it at the returned operationTime.
- assert.commandWorked(testDB.getCollection(collName).insert({x: 2}, {writeConcern: {w: 2}}));
-
- // It should be possible to specify '$_internalReadAtClusterTime' as the timestamp of the
- // second write without an error for dbHash and find.
- let clusterTime = testDB.getSession().getOperationTime();
-
- // Run dbHash and find while the prepared transaction has not commit or aborted yet.
- // These should block until the prepared transaction commits or aborts if we specify
- // $_internalReadAtClusterTime to be the timestamp of the second write we did, outside of the
- // transaction.
- const dbHashPrimaryThread =
- new ScopedThread(runDBHashFn, primary.host, dbName, tojson(clusterTime));
- const dbHashSecondaryThread =
- new ScopedThread(runDBHashFn, secondary.host, dbName, tojson(clusterTime));
-
- dbHashPrimaryThread.start();
- dbHashSecondaryThread.start();
-
- assertOpHasPrepareConflict(testDB, "dbHash");
- assertOpHasPrepareConflict(testDBSecondary, "dbHash");
-
- // Run 'find' with '$_internalReadAtClusterTime' specified.
- const findPrimaryThread =
- new ScopedThread(runFindFn, primary.host, dbName, collName, tojson(clusterTime));
- const findSecondaryThread =
- new ScopedThread(runFindFn, secondary.host, dbName, collName, tojson(clusterTime));
-
- findPrimaryThread.start();
- findSecondaryThread.start();
-
- assertOpHasPrepareConflict(testDB, "find");
- assertOpHasPrepareConflict(testDBSecondary, "find");
-
- // Run a series of DDL operations which shouldn't block before committing the prepared
- // transaction.
- const otherDbName = "prepare_transaction_read_at_cluster_time_secondary_other";
- const otherTestDB = primary.getDB(otherDbName);
-
- assert.commandWorked(otherTestDB.runCommand({create: collName, writeConcern: {w: 2}}));
- assert.commandWorked(
- otherTestDB.runCommand({collMod: collName, validator: {v: 1}, writeConcern: {w: 2}}));
- assert.commandWorked(otherTestDB.runCommand(
- {createIndexes: collName, indexes: [{key: {x: 1}, name: 'x_1'}], writeConcern: {w: 2}}));
- assert.commandWorked(
- otherTestDB.runCommand({dropIndexes: collName, index: 'x_1', writeConcern: {w: 2}}));
-
- // Committing or aborting the transaction should unblock the parallel tasks.
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
- session.endSession();
-
- dbHashPrimaryThread.join();
- dbHashSecondaryThread.join();
-
- // Ensure the dbHashes across the replica set match.
- const primaryDBHash = dbHashPrimaryThread.returnData();
- const secondaryDBHash = dbHashSecondaryThread.returnData();
-
- assert.eq(primaryDBHash.collections, secondaryDBHash.collections);
- assert.eq(primaryDBHash.md5, secondaryDBHash.md5);
-
- findPrimaryThread.join();
- findSecondaryThread.join();
+"use strict";
+
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/parallelTester.js");
+
+const runDBHashFn = (host, dbName, clusterTime) => {
+ const conn = new Mongo(host);
+ const db = conn.getDB(dbName);
+
+ conn.setSlaveOk();
+ let firstHash = assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: eval(clusterTime),
+ }));
+
+ // This code will execute once the prepared transaction is committed as the call above will
+ // be blocked until an abort or commit happens. Ensure that running dbHash here yields the
+ // same result as above.
+ let secondHash = assert.commandWorked(db.runCommand({dbHash: 1}));
+
+ assert.eq(firstHash.collections, secondHash.collections);
+ assert.eq(firstHash.md5, secondHash.md5);
+
+ return firstHash;
+};
+
+const runFindFn = (host, dbName, collName, clusterTime) => {
+ const conn = new Mongo(host);
+ const db = conn.getDB(dbName);
+
+ conn.setSlaveOk();
+ assert.commandWorked(db.getSiblingDB(dbName).runCommand({
+ find: collName,
+ $_internalReadAtClusterTime: eval(clusterTime),
+ }));
+};
+
+const assertOpHasPrepareConflict = (db, commandName) => {
+ assert.soon(
+ () => {
+ const ops = db.currentOp({
+ "command.$_internalReadAtClusterTime": {$exists: true},
+ ["command." + commandName]: {$exists: true},
+ }).inprog;
+
+ if (ops.length === 1) {
+ return ops[0].prepareReadConflicts > 0;
+ }
+
+ return false;
+ },
+ () => `Failed to find '${commandName}' command in the ${db.getMongo().host} currentOp()` +
+ ` output: ${tojson(db.currentOp())}`);
+};
+
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+
+const replSetConfig = rst.getReplSetConfig();
+replSetConfig.members[1].priority = 0;
+rst.initiate(replSetConfig);
+
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+
+const dbName = "prepare_transaction_read_at_cluster_time";
+const collName = "testColl";
+
+const testDB = primary.getDB(dbName);
+const testDBSecondary = secondary.getDB(dbName);
+
+testDB.createCollection(collName);
+assert.commandWorked(testDB.getCollection(collName).insert({x: 0}));
+
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB[collName];
+
+// Perform a write inside of a prepared transaction.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({x: 1}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+// Perform a write outside of a prepared transaction. We wait for the write to have replication
+// to the secondary because we're going to read from it at the returned operationTime.
+assert.commandWorked(testDB.getCollection(collName).insert({x: 2}, {writeConcern: {w: 2}}));
+
+// It should be possible to specify '$_internalReadAtClusterTime' as the timestamp of the
+// second write without an error for dbHash and find.
+let clusterTime = testDB.getSession().getOperationTime();
+
+// Run dbHash and find while the prepared transaction has not commit or aborted yet.
+// These should block until the prepared transaction commits or aborts if we specify
+// $_internalReadAtClusterTime to be the timestamp of the second write we did, outside of the
+// transaction.
+const dbHashPrimaryThread =
+ new ScopedThread(runDBHashFn, primary.host, dbName, tojson(clusterTime));
+const dbHashSecondaryThread =
+ new ScopedThread(runDBHashFn, secondary.host, dbName, tojson(clusterTime));
+
+dbHashPrimaryThread.start();
+dbHashSecondaryThread.start();
+
+assertOpHasPrepareConflict(testDB, "dbHash");
+assertOpHasPrepareConflict(testDBSecondary, "dbHash");
+
+// Run 'find' with '$_internalReadAtClusterTime' specified.
+const findPrimaryThread =
+ new ScopedThread(runFindFn, primary.host, dbName, collName, tojson(clusterTime));
+const findSecondaryThread =
+ new ScopedThread(runFindFn, secondary.host, dbName, collName, tojson(clusterTime));
+
+findPrimaryThread.start();
+findSecondaryThread.start();
+
+assertOpHasPrepareConflict(testDB, "find");
+assertOpHasPrepareConflict(testDBSecondary, "find");
+
+// Run a series of DDL operations which shouldn't block before committing the prepared
+// transaction.
+const otherDbName = "prepare_transaction_read_at_cluster_time_secondary_other";
+const otherTestDB = primary.getDB(otherDbName);
+
+assert.commandWorked(otherTestDB.runCommand({create: collName, writeConcern: {w: 2}}));
+assert.commandWorked(
+ otherTestDB.runCommand({collMod: collName, validator: {v: 1}, writeConcern: {w: 2}}));
+assert.commandWorked(otherTestDB.runCommand(
+ {createIndexes: collName, indexes: [{key: {x: 1}, name: 'x_1'}], writeConcern: {w: 2}}));
+assert.commandWorked(
+ otherTestDB.runCommand({dropIndexes: collName, index: 'x_1', writeConcern: {w: 2}}));
+
+// Committing or aborting the transaction should unblock the parallel tasks.
+PrepareHelpers.commitTransaction(session, prepareTimestamp);
+session.endSession();
+
+dbHashPrimaryThread.join();
+dbHashSecondaryThread.join();
+
+// Ensure the dbHashes across the replica set match.
+const primaryDBHash = dbHashPrimaryThread.returnData();
+const secondaryDBHash = dbHashSecondaryThread.returnData();
+
+assert.eq(primaryDBHash.collections, secondaryDBHash.collections);
+assert.eq(primaryDBHash.md5, secondaryDBHash.md5);
- rst.stopSet();
+findPrimaryThread.join();
+findSecondaryThread.join();
+
+rst.stopSet();
}());
diff --git a/jstests/replsets/prepare_transaction_survives_state_transition_to_and_from_recovering.js b/jstests/replsets/prepare_transaction_survives_state_transition_to_and_from_recovering.js
index fcab1ae7a27..fcc8fab4f1d 100644
--- a/jstests/replsets/prepare_transaction_survives_state_transition_to_and_from_recovering.js
+++ b/jstests/replsets/prepare_transaction_survives_state_transition_to_and_from_recovering.js
@@ -6,98 +6,97 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replSet = new ReplSetTest({nodes: 2});
- replSet.startSet();
- replSet.initiate();
+const replSet = new ReplSetTest({nodes: 2});
+replSet.startSet();
+replSet.initiate();
- const primary = replSet.getPrimary();
- const secondary = replSet.getSecondary();
+const primary = replSet.getPrimary();
+const secondary = replSet.getSecondary();
- const dbName = "test";
- const collName = "prepare_transaction_survives_state_transitions_to_and_from_recovering";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "prepare_transaction_survives_state_transitions_to_and_from_recovering";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(testDB.runCommand({create: collName}));
- assert.commandWorked(testColl.insert({_id: 1}));
+assert.commandWorked(testDB.runCommand({create: collName}));
+assert.commandWorked(testColl.insert({_id: 1}));
- const session1 = primary.startSession({causalConsistency: false});
- const sessionDB1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDB1.getCollection(collName);
+const session1 = primary.startSession({causalConsistency: false});
+const sessionDB1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDB1.getCollection(collName);
- const session2 = primary.startSession({causalConsistency: false});
- const sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
+const session2 = primary.startSession({causalConsistency: false});
+const sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
- jsTestLog("Starting a transaction and putting it into prepare");
+jsTestLog("Starting a transaction and putting it into prepare");
- session1.startTransaction();
- assert.commandWorked(sessionColl1.update({_id: 1}, {_id: 1, a: 1}));
- const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
+session1.startTransaction();
+assert.commandWorked(sessionColl1.update({_id: 1}, {_id: 1, a: 1}));
+const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
- jsTestLog("Starting a second transaction and putting it into prepare");
+jsTestLog("Starting a second transaction and putting it into prepare");
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({_id: 2}));
- const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
- replSet.awaitReplication();
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({_id: 2}));
+const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+replSet.awaitReplication();
- jsTestLog("Putting secondary in maintenance mode so it will transition to RECOVERING");
+jsTestLog("Putting secondary in maintenance mode so it will transition to RECOVERING");
- assert.commandWorked(secondary.adminCommand({replSetMaintenance: 1}));
- replSet.waitForState(secondary, ReplSetTest.State.RECOVERING);
+assert.commandWorked(secondary.adminCommand({replSetMaintenance: 1}));
+replSet.waitForState(secondary, ReplSetTest.State.RECOVERING);
- jsTestLog("Commiting the second prepared transaction while a node is in the RECOVERING state");
+jsTestLog("Commiting the second prepared transaction while a node is in the RECOVERING state");
- assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
- replSet.awaitReplication();
+assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
+replSet.awaitReplication();
- jsTestLog("Taking secondary out of maintenance mode so it will transition back to SECONDARY");
+jsTestLog("Taking secondary out of maintenance mode so it will transition back to SECONDARY");
- assert.commandWorked(secondary.adminCommand({replSetMaintenance: 0}));
- replSet.waitForState(secondary, ReplSetTest.State.SECONDARY);
+assert.commandWorked(secondary.adminCommand({replSetMaintenance: 0}));
+replSet.waitForState(secondary, ReplSetTest.State.SECONDARY);
- jsTestLog("Stepping up the secondary");
+jsTestLog("Stepping up the secondary");
- replSet.stepUp(secondary);
- replSet.waitForState(secondary, ReplSetTest.State.PRIMARY);
- const newPrimary = replSet.getPrimary();
- const newPrimaryDB = newPrimary.getDB(dbName);
+replSet.stepUp(secondary);
+replSet.waitForState(secondary, ReplSetTest.State.PRIMARY);
+const newPrimary = replSet.getPrimary();
+const newPrimaryDB = newPrimary.getDB(dbName);
- // Create a proxy session to reuse the session state of the old primary.
- const newSession = new _DelegatingDriverSession(newPrimary, session1);
+// Create a proxy session to reuse the session state of the old primary.
+const newSession = new _DelegatingDriverSession(newPrimary, session1);
- jsTestLog("Make sure that the transaction is properly prepared");
+jsTestLog("Make sure that the transaction is properly prepared");
- // Make sure that we can't read changes to the document from the second transaction after
- // recovery.
- assert.eq(newPrimaryDB.getCollection(collName).find().toArray(), [{_id: 1}, {_id: 2}]);
+// Make sure that we can't read changes to the document from the second transaction after
+// recovery.
+assert.eq(newPrimaryDB.getCollection(collName).find().toArray(), [{_id: 1}, {_id: 2}]);
- // Make sure that another write on the same document from the second transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- newPrimaryDB.runCommand(
- {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 1}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
+// Make sure that another write on the same document from the second transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ newPrimaryDB.runCommand(
+ {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 1}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
- // Make sure that we cannot add other operations to the second transaction since it is prepared.
- assert.commandFailedWithCode(
- newSession.getDatabase(dbName).getCollection(collName).insert({_id: 3}),
- ErrorCodes.PreparedTransactionInProgress);
+// Make sure that we cannot add other operations to the second transaction since it is prepared.
+assert.commandFailedWithCode(
+ newSession.getDatabase(dbName).getCollection(collName).insert({_id: 3}),
+ ErrorCodes.PreparedTransactionInProgress);
- jsTestLog("Verify that the locks from the prepared transaction are still held");
+jsTestLog("Verify that the locks from the prepared transaction are still held");
- assert.commandFailedWithCode(newPrimaryDB.runCommand({drop: collName, maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
+assert.commandFailedWithCode(newPrimaryDB.runCommand({drop: collName, maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
- jsTestLog("Committing transaction");
+jsTestLog("Committing transaction");
- assert.commandWorked(PrepareHelpers.commitTransaction(newSession, prepareTimestamp1));
- replSet.awaitReplication();
-
- replSet.stopSet();
+assert.commandWorked(PrepareHelpers.commitTransaction(newSession, prepareTimestamp1));
+replSet.awaitReplication();
+replSet.stopSet();
}());
diff --git a/jstests/replsets/prepared_transaction_commands_fail_on_secondaries.js b/jstests/replsets/prepared_transaction_commands_fail_on_secondaries.js
index be5f67f2e2d..101c12252a0 100644
--- a/jstests/replsets/prepared_transaction_commands_fail_on_secondaries.js
+++ b/jstests/replsets/prepared_transaction_commands_fail_on_secondaries.js
@@ -6,74 +6,74 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const name = "prepared_transaction_commands_fail_on_secondaries";
- const rst = new ReplSetTest({
- nodes: [
- {},
- {rsConfig: {priority: 0}},
- ],
- });
- const nodes = rst.startSet();
- rst.initiate();
+const name = "prepared_transaction_commands_fail_on_secondaries";
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {rsConfig: {priority: 0}},
+ ],
+});
+const nodes = rst.startSet();
+rst.initiate();
- const dbName = "test";
- const collName = name;
+const dbName = "test";
+const collName = name;
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- const testDB = primary.getDB(dbName);
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+const testDB = primary.getDB(dbName);
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const priSession = primary.startSession({causalConsistency: false});
- const priSessionDB = priSession.getDatabase(dbName);
- const priSessionColl = priSessionDB.getCollection(collName);
+const priSession = primary.startSession({causalConsistency: false});
+const priSessionDB = priSession.getDatabase(dbName);
+const priSessionColl = priSessionDB.getCollection(collName);
- const secSession = PrepareHelpers.createSessionWithGivenId(
- secondary, priSession.getSessionId(), {causalConsistency: false});
+const secSession = PrepareHelpers.createSessionWithGivenId(
+ secondary, priSession.getSessionId(), {causalConsistency: false});
- priSession.startTransaction();
- const doc = {_id: 1};
- assert.commandWorked(priSessionColl.insert(doc));
- rst.awaitReplication();
+priSession.startTransaction();
+const doc = {
+ _id: 1
+};
+assert.commandWorked(priSessionColl.insert(doc));
+rst.awaitReplication();
- jsTestLog("Test that prepare fails on a secondary");
- const txnNumber = NumberLong(priSession.getTxnNumber_forTesting());
- assert.commandFailedWithCode(
- secSession.getDatabase('admin').adminCommand(
- {prepareTransaction: 1, txnNumber: txnNumber, autocommit: false}),
- ErrorCodes.NotMaster);
+jsTestLog("Test that prepare fails on a secondary");
+const txnNumber = NumberLong(priSession.getTxnNumber_forTesting());
+assert.commandFailedWithCode(secSession.getDatabase('admin').adminCommand(
+ {prepareTransaction: 1, txnNumber: txnNumber, autocommit: false}),
+ ErrorCodes.NotMaster);
- const prepareTimestamp = PrepareHelpers.prepareTransaction(priSession);
- rst.awaitReplication();
+const prepareTimestamp = PrepareHelpers.prepareTransaction(priSession);
+rst.awaitReplication();
- jsTestLog("Test that prepared commit fails on a secondary");
- // Add 1 to the increment so that the commitTimestamp is "after" the prepareTimestamp.
- const commitTimestamp = Timestamp(prepareTimestamp.getTime(), prepareTimestamp.getInc() + 1);
- assert.commandFailedWithCode(secSession.getDatabase('admin').adminCommand({
- commitTransaction: 1,
- commitTimestamp: commitTimestamp,
- txnNumber: txnNumber,
- autocommit: false
- }),
- ErrorCodes.NotMaster);
+jsTestLog("Test that prepared commit fails on a secondary");
+// Add 1 to the increment so that the commitTimestamp is "after" the prepareTimestamp.
+const commitTimestamp = Timestamp(prepareTimestamp.getTime(), prepareTimestamp.getInc() + 1);
+assert.commandFailedWithCode(secSession.getDatabase('admin').adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: commitTimestamp,
+ txnNumber: txnNumber,
+ autocommit: false
+}),
+ ErrorCodes.NotMaster);
- jsTestLog("Test that prepared abort fails on a secondary");
- assert.commandFailedWithCode(
- secSession.getDatabase('admin').adminCommand(
- {abortTransaction: 1, txnNumber: txnNumber, autocommit: false}),
- ErrorCodes.NotMaster);
+jsTestLog("Test that prepared abort fails on a secondary");
+assert.commandFailedWithCode(secSession.getDatabase('admin').adminCommand(
+ {abortTransaction: 1, txnNumber: txnNumber, autocommit: false}),
+ ErrorCodes.NotMaster);
- jsTestLog("Test that we can still commit the transaction");
- assert.commandWorked(PrepareHelpers.commitTransaction(priSession, commitTimestamp));
- rst.awaitReplication();
- assert.docEq(doc, testDB[collName].findOne());
- assert.eq(1, testDB[collName].find().itcount());
- assert.docEq(doc, secondary.getDB(dbName)[collName].findOne());
- assert.eq(1, secondary.getDB(dbName)[collName].find().itcount());
+jsTestLog("Test that we can still commit the transaction");
+assert.commandWorked(PrepareHelpers.commitTransaction(priSession, commitTimestamp));
+rst.awaitReplication();
+assert.docEq(doc, testDB[collName].findOne());
+assert.eq(1, testDB[collName].find().itcount());
+assert.docEq(doc, secondary.getDB(dbName)[collName].findOne());
+assert.eq(1, secondary.getDB(dbName)[collName].find().itcount());
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/prepared_transaction_on_failover.js b/jstests/replsets/prepared_transaction_on_failover.js
index e5d6bf84a1e..43689b95c1e 100644
--- a/jstests/replsets/prepared_transaction_on_failover.js
+++ b/jstests/replsets/prepared_transaction_on_failover.js
@@ -4,131 +4,128 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/rslib.js"); // For reconnect()
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- const dbName = jsTest.name();
- const collName = "coll";
- const otherDbName = dbName + "_other";
-
- function testTransactionsWithFailover(doWork, stepDown, postCommit) {
- const primary = replTest.getPrimary();
- const newPrimary = replTest.getSecondary();
- const testDB = primary.getDB(dbName);
-
- testDB.dropDatabase();
- testDB.getSiblingDB(otherDbName).dropDatabase();
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- jsTestLog("Starting transaction");
- const session = primary.startSession({causalConsistency: false});
- session.startTransaction({writeConcern: {w: "majority"}});
-
- doWork(primary, session);
-
- jsTestLog("Putting transaction into prepare");
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- replTest.awaitReplication();
-
- stepDown();
- reconnect(primary);
-
- jsTestLog("Waiting for the other node to run for election and become primary");
- assert.eq(replTest.getPrimary(), newPrimary);
-
- jsTestLog("Creating an unrelated collection");
- // Application of an unrelated DDL command needs a strong lock on secondary. Make sure
- // the prepared transactions have yielded their locks on secondary.
- assert.commandWorked(newPrimary.getDB(otherDbName).runCommand({create: collName}));
- replTest.awaitReplication();
-
- jsTestLog("Dropping the collection in use cannot acquire the lock");
- assert.commandFailedWithCode(
- newPrimary.getDB(testDB).runCommand({drop: collName, maxTimeMS: 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog("Committing transaction on the new primary");
- // Create a proxy session to reuse the session state of the old primary.
- const newSession = new _DelegatingDriverSession(newPrimary, session);
-
- assert.commandWorked(PrepareHelpers.commitTransaction(newSession, prepareTimestamp));
- replTest.awaitReplication();
-
- postCommit(primary, newPrimary);
-
- jsTestLog("Running another transaction on the new primary");
- const secondSession = newPrimary.startSession({causalConsistency: false});
- secondSession.startTransaction({writeConcern: {w: "majority"}});
- assert.commandWorked(
- secondSession.getDatabase(dbName).getCollection(collName).insert({_id: "second-doc"}));
- assert.commandWorked(secondSession.commitTransaction_forTesting());
-
- // Unfreeze the original primary so that it can stand for election again for the next test.
- assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
- }
-
- function doInsert(primary, session) {
- const doc = {_id: "txn on primary " + primary};
- jsTestLog("Inserting a document in a transaction.");
- assert.commandWorked(session.getDatabase(dbName).getCollection(collName).insert(doc));
- }
- function postInsert(primary, newPrimary) {
- const doc = {_id: "txn on primary " + primary};
- assert.docEq(doc, primary.getDB(dbName).getCollection(collName).findOne());
- assert.docEq(doc, newPrimary.getDB(dbName).getCollection(collName).findOne());
- }
-
- function doInsertTextSearch(primary, session) {
- // Create an index outside of the transaction.
- assert.commandWorked(
- primary.getDB(dbName).getCollection(collName).createIndex({text: "text"}));
-
- // Do the followings in a transaction.
- jsTestLog("Inserting a document in a transaction.");
- assert.commandWorked(
- session.getDatabase(dbName).getCollection(collName).insert({text: "text"}));
- // Text search will recursively acquire the global lock. This tests that yielding
- // recursively held locks works on step down.
- jsTestLog("Doing a text search in a transaction.");
- assert.eq(1,
- session.getDatabase(dbName)
- .getCollection(collName)
- .find({$text: {$search: "text"}})
- .itcount());
- }
- function postInsertTextSearch(primary, newPrimary) {
- assert.eq(1,
- primary.getDB(dbName)
- .getCollection(collName)
- .find({$text: {$search: "text"}})
- .itcount());
- assert.eq(1,
- newPrimary.getDB(dbName)
- .getCollection(collName)
- .find({$text: {$search: "text"}})
- .itcount());
- }
-
- function stepDownViaHeartbeat() {
- jsTestLog("Stepping down primary via heartbeat");
- replTest.stepUp(replTest.getSecondary());
- }
-
- function stepDownViaCommand() {
- jsTestLog("Stepping down primary via command");
- assert.commandWorked(replTest.getPrimary().adminCommand({replSetStepDown: 10}));
- }
-
- testTransactionsWithFailover(doInsert, stepDownViaHeartbeat, postInsert);
- testTransactionsWithFailover(doInsert, stepDownViaCommand, postInsert);
-
- testTransactionsWithFailover(doInsertTextSearch, stepDownViaHeartbeat, postInsertTextSearch);
- testTransactionsWithFailover(doInsertTextSearch, stepDownViaCommand, postInsertTextSearch);
-
- replTest.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/rslib.js"); // For reconnect()
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+const dbName = jsTest.name();
+const collName = "coll";
+const otherDbName = dbName + "_other";
+
+function testTransactionsWithFailover(doWork, stepDown, postCommit) {
+ const primary = replTest.getPrimary();
+ const newPrimary = replTest.getSecondary();
+ const testDB = primary.getDB(dbName);
+
+ testDB.dropDatabase();
+ testDB.getSiblingDB(otherDbName).dropDatabase();
+ assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+ jsTestLog("Starting transaction");
+ const session = primary.startSession({causalConsistency: false});
+ session.startTransaction({writeConcern: {w: "majority"}});
+
+ doWork(primary, session);
+
+ jsTestLog("Putting transaction into prepare");
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ replTest.awaitReplication();
+
+ stepDown();
+ reconnect(primary);
+
+ jsTestLog("Waiting for the other node to run for election and become primary");
+ assert.eq(replTest.getPrimary(), newPrimary);
+
+ jsTestLog("Creating an unrelated collection");
+ // Application of an unrelated DDL command needs a strong lock on secondary. Make sure
+ // the prepared transactions have yielded their locks on secondary.
+ assert.commandWorked(newPrimary.getDB(otherDbName).runCommand({create: collName}));
+ replTest.awaitReplication();
+
+ jsTestLog("Dropping the collection in use cannot acquire the lock");
+ assert.commandFailedWithCode(
+ newPrimary.getDB(testDB).runCommand({drop: collName, maxTimeMS: 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+ jsTestLog("Committing transaction on the new primary");
+ // Create a proxy session to reuse the session state of the old primary.
+ const newSession = new _DelegatingDriverSession(newPrimary, session);
+
+ assert.commandWorked(PrepareHelpers.commitTransaction(newSession, prepareTimestamp));
+ replTest.awaitReplication();
+
+ postCommit(primary, newPrimary);
+
+ jsTestLog("Running another transaction on the new primary");
+ const secondSession = newPrimary.startSession({causalConsistency: false});
+ secondSession.startTransaction({writeConcern: {w: "majority"}});
+ assert.commandWorked(
+ secondSession.getDatabase(dbName).getCollection(collName).insert({_id: "second-doc"}));
+ assert.commandWorked(secondSession.commitTransaction_forTesting());
+
+ // Unfreeze the original primary so that it can stand for election again for the next test.
+ assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
+}
+
+function doInsert(primary, session) {
+ const doc = {_id: "txn on primary " + primary};
+ jsTestLog("Inserting a document in a transaction.");
+ assert.commandWorked(session.getDatabase(dbName).getCollection(collName).insert(doc));
+}
+function postInsert(primary, newPrimary) {
+ const doc = {_id: "txn on primary " + primary};
+ assert.docEq(doc, primary.getDB(dbName).getCollection(collName).findOne());
+ assert.docEq(doc, newPrimary.getDB(dbName).getCollection(collName).findOne());
+}
+
+function doInsertTextSearch(primary, session) {
+ // Create an index outside of the transaction.
+ assert.commandWorked(primary.getDB(dbName).getCollection(collName).createIndex({text: "text"}));
+
+ // Do the followings in a transaction.
+ jsTestLog("Inserting a document in a transaction.");
+ assert.commandWorked(
+ session.getDatabase(dbName).getCollection(collName).insert({text: "text"}));
+ // Text search will recursively acquire the global lock. This tests that yielding
+ // recursively held locks works on step down.
+ jsTestLog("Doing a text search in a transaction.");
+ assert.eq(1,
+ session.getDatabase(dbName)
+ .getCollection(collName)
+ .find({$text: {$search: "text"}})
+ .itcount());
+}
+function postInsertTextSearch(primary, newPrimary) {
+ assert.eq(
+ 1,
+ primary.getDB(dbName).getCollection(collName).find({$text: {$search: "text"}}).itcount());
+ assert.eq(1,
+ newPrimary.getDB(dbName)
+ .getCollection(collName)
+ .find({$text: {$search: "text"}})
+ .itcount());
+}
+
+function stepDownViaHeartbeat() {
+ jsTestLog("Stepping down primary via heartbeat");
+ replTest.stepUp(replTest.getSecondary());
+}
+
+function stepDownViaCommand() {
+ jsTestLog("Stepping down primary via command");
+ assert.commandWorked(replTest.getPrimary().adminCommand({replSetStepDown: 10}));
+}
+
+testTransactionsWithFailover(doInsert, stepDownViaHeartbeat, postInsert);
+testTransactionsWithFailover(doInsert, stepDownViaCommand, postInsert);
+
+testTransactionsWithFailover(doInsertTextSearch, stepDownViaHeartbeat, postInsertTextSearch);
+testTransactionsWithFailover(doInsertTextSearch, stepDownViaCommand, postInsertTextSearch);
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/primary_casts_vote_on_stepdown.js b/jstests/replsets/primary_casts_vote_on_stepdown.js
index 6271a353ea9..f07951a69c8 100644
--- a/jstests/replsets/primary_casts_vote_on_stepdown.js
+++ b/jstests/replsets/primary_casts_vote_on_stepdown.js
@@ -6,30 +6,29 @@
* successfully.
*/
(function() {
- "use strict";
+"use strict";
- let name = "primary_casts_vote_on_stepdown";
- let replTest = new ReplSetTest({name: name, nodes: 2});
+let name = "primary_casts_vote_on_stepdown";
+let replTest = new ReplSetTest({name: name, nodes: 2});
- let nodes = replTest.startSet();
- replTest.initiate();
+let nodes = replTest.startSet();
+replTest.initiate();
- // Make sure node 0 is initially primary, and then step up node 1 and make sure it is able to
- // become primary in one election, gathering the vote of node 0, who will be forced to step
- // down in the act of granting its vote to node 1.
- jsTestLog("Make sure node 0 (" + nodes[0] + ") is primary.");
- replTest.waitForState(nodes[0], ReplSetTest.State.PRIMARY);
- let res = assert.commandWorked(nodes[0].adminCommand("replSetGetStatus"));
- let firstPrimaryTerm = res.term;
+// Make sure node 0 is initially primary, and then step up node 1 and make sure it is able to
+// become primary in one election, gathering the vote of node 0, who will be forced to step
+// down in the act of granting its vote to node 1.
+jsTestLog("Make sure node 0 (" + nodes[0] + ") is primary.");
+replTest.waitForState(nodes[0], ReplSetTest.State.PRIMARY);
+let res = assert.commandWorked(nodes[0].adminCommand("replSetGetStatus"));
+let firstPrimaryTerm = res.term;
- jsTestLog("Stepping up node 1 (" + nodes[1] + ").");
- replTest.stepUp(nodes[1]);
- replTest.waitForState(nodes[1], ReplSetTest.State.PRIMARY);
- // The election should have happened in a single attempt, so the term of the new primary should
- // be exactly 1 greater than the old primary.
- res = assert.commandWorked(nodes[1].adminCommand("replSetGetStatus"));
- assert.eq(firstPrimaryTerm + 1, res.term);
-
- replTest.stopSet();
+jsTestLog("Stepping up node 1 (" + nodes[1] + ").");
+replTest.stepUp(nodes[1]);
+replTest.waitForState(nodes[1], ReplSetTest.State.PRIMARY);
+// The election should have happened in a single attempt, so the term of the new primary should
+// be exactly 1 greater than the old primary.
+res = assert.commandWorked(nodes[1].adminCommand("replSetGetStatus"));
+assert.eq(firstPrimaryTerm + 1, res.term);
+replTest.stopSet();
})();
diff --git a/jstests/replsets/priority_takeover_cascading_priorities.js b/jstests/replsets/priority_takeover_cascading_priorities.js
index f55997aa5c2..b0493c62dfb 100644
--- a/jstests/replsets/priority_takeover_cascading_priorities.js
+++ b/jstests/replsets/priority_takeover_cascading_priorities.js
@@ -4,33 +4,33 @@
// Shut down the primary and confirm that the next highest priority node becomes primary.
// Repeat until 3 nodes are left standing.
(function() {
- 'use strict';
- load('jstests/replsets/rslib.js');
+'use strict';
+load('jstests/replsets/rslib.js');
- var name = 'priority_takeover_cascading_priorities';
- var replSet = new ReplSetTest({
- name: name,
- nodes: [
- {rsConfig: {priority: 5}},
- {rsConfig: {priority: 4}},
- {rsConfig: {priority: 3}},
- {rsConfig: {priority: 2}},
- {rsConfig: {priority: 1}},
- ]
- });
- replSet.startSet();
- replSet.initiate();
+var name = 'priority_takeover_cascading_priorities';
+var replSet = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {priority: 5}},
+ {rsConfig: {priority: 4}},
+ {rsConfig: {priority: 3}},
+ {rsConfig: {priority: 2}},
+ {rsConfig: {priority: 1}},
+ ]
+});
+replSet.startSet();
+replSet.initiate();
- replSet.waitForState(0, ReplSetTest.State.PRIMARY);
- // Wait until all nodes get the "no-op" of "new primary" after initial sync.
- waitUntilAllNodesCaughtUp(replSet.nodes);
- replSet.stop(0);
+replSet.waitForState(0, ReplSetTest.State.PRIMARY);
+// Wait until all nodes get the "no-op" of "new primary" after initial sync.
+waitUntilAllNodesCaughtUp(replSet.nodes);
+replSet.stop(0);
- replSet.waitForState(1, ReplSetTest.State.PRIMARY);
- replSet.stop(1);
+replSet.waitForState(1, ReplSetTest.State.PRIMARY);
+replSet.stop(1);
- replSet.waitForState(2, ReplSetTest.State.PRIMARY);
+replSet.waitForState(2, ReplSetTest.State.PRIMARY);
- // Cannot stop any more nodes because we will not have a majority.
- replSet.stopSet();
+// Cannot stop any more nodes because we will not have a majority.
+replSet.stopSet();
})();
diff --git a/jstests/replsets/priority_takeover_one_node_higher_priority.js b/jstests/replsets/priority_takeover_one_node_higher_priority.js
index c1d0a7e5dc3..c75af36661a 100644
--- a/jstests/replsets/priority_takeover_one_node_higher_priority.js
+++ b/jstests/replsets/priority_takeover_one_node_higher_priority.js
@@ -4,38 +4,38 @@
// Step down high priority node. Wait for the lower priority electable node to become primary.
// Eventually high priority node will run a priority takeover election to become primary.
(function() {
- 'use strict';
- load('jstests/replsets/rslib.js');
+'use strict';
+load('jstests/replsets/rslib.js');
- var name = 'priority_takeover_one_node_higher_priority';
- var replSet = new ReplSetTest({
- name: name,
- nodes: [
- {rsConfig: {priority: 3}},
- {},
- {rsConfig: {arbiterOnly: true}},
- ]
- });
- replSet.startSet();
- replSet.initiate();
+var name = 'priority_takeover_one_node_higher_priority';
+var replSet = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {priority: 3}},
+ {},
+ {rsConfig: {arbiterOnly: true}},
+ ]
+});
+replSet.startSet();
+replSet.initiate();
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
- var primary = replSet.getPrimary();
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
+var primary = replSet.getPrimary();
- replSet.awaitSecondaryNodes();
- replSet.awaitReplication();
+replSet.awaitSecondaryNodes();
+replSet.awaitReplication();
- // Primary should step down long enough for election to occur on secondary.
- var config = assert.commandWorked(primary.adminCommand({replSetGetConfig: 1})).config;
- assert.commandWorked(primary.adminCommand({replSetStepDown: replSet.kDefaultTimeoutMS / 1000}));
+// Primary should step down long enough for election to occur on secondary.
+var config = assert.commandWorked(primary.adminCommand({replSetGetConfig: 1})).config;
+assert.commandWorked(primary.adminCommand({replSetStepDown: replSet.kDefaultTimeoutMS / 1000}));
- // Step down primary and wait for node 1 to be promoted to primary.
- replSet.waitForState(replSet.nodes[1], ReplSetTest.State.PRIMARY);
+// Step down primary and wait for node 1 to be promoted to primary.
+replSet.waitForState(replSet.nodes[1], ReplSetTest.State.PRIMARY);
- // Unfreeze node 0 so it can seek election.
- assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
+// Unfreeze node 0 so it can seek election.
+assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
- // Eventually node 0 will stand for election again because it has a higher priorty.
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
- replSet.stopSet();
+// Eventually node 0 will stand for election again because it has a higher priorty.
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/priority_takeover_two_nodes_equal_priority.js b/jstests/replsets/priority_takeover_two_nodes_equal_priority.js
index 2a7a8b610df..d6cdd7efbba 100644
--- a/jstests/replsets/priority_takeover_two_nodes_equal_priority.js
+++ b/jstests/replsets/priority_takeover_two_nodes_equal_priority.js
@@ -8,48 +8,47 @@
load('jstests/replsets/rslib.js');
(function() {
- 'use strict';
-
- var name = 'priority_takeover_two_nodes_equal_priority';
- var replTest = new ReplSetTest(
- {name: name, nodes: [{rsConfig: {priority: 3}}, {rsConfig: {priority: 3}}, {}]});
- replTest.startSet();
- replTest.initiate();
-
- jsTestLog("Waiting for one of the high priority nodes to become PRIMARY.");
- var primary;
- var primaryIndex = -1;
- var defaultPriorityNodeIndex = 2;
- assert.soon(
- function() {
- primary = replTest.getPrimary();
- replTest.nodes.find(function(node, index, array) {
- if (primary.host == node.host) {
- primaryIndex = index;
- return true;
- }
- return false;
- });
- return primaryIndex !== defaultPriorityNodeIndex;
- },
- 'Neither of the high priority nodes was elected primary.',
- replTest.kDefaultTimeoutMS, // timeout
- 1000 // interval
- );
-
- jsTestLog("Stepping down the current primary.");
- assert.commandWorked(
- primary.adminCommand({replSetStepDown: 10 * 60, secondaryCatchUpPeriodSecs: 10 * 60}));
-
- // Make sure the primary has stepped down.
- assert.neq(primary, replTest.getPrimary());
-
- // We expect the other high priority node to eventually become primary.
- var expectedNewPrimaryIndex = (primaryIndex === 0) ? 1 : 0;
-
- jsTestLog("Waiting for the other high priority node to become PRIMARY.");
- var expectedNewPrimary = replTest.nodes[expectedNewPrimaryIndex];
- replTest.waitForState(expectedNewPrimary, ReplSetTest.State.PRIMARY);
- replTest.stopSet();
-
+'use strict';
+
+var name = 'priority_takeover_two_nodes_equal_priority';
+var replTest = new ReplSetTest(
+ {name: name, nodes: [{rsConfig: {priority: 3}}, {rsConfig: {priority: 3}}, {}]});
+replTest.startSet();
+replTest.initiate();
+
+jsTestLog("Waiting for one of the high priority nodes to become PRIMARY.");
+var primary;
+var primaryIndex = -1;
+var defaultPriorityNodeIndex = 2;
+assert.soon(
+ function() {
+ primary = replTest.getPrimary();
+ replTest.nodes.find(function(node, index, array) {
+ if (primary.host == node.host) {
+ primaryIndex = index;
+ return true;
+ }
+ return false;
+ });
+ return primaryIndex !== defaultPriorityNodeIndex;
+ },
+ 'Neither of the high priority nodes was elected primary.',
+ replTest.kDefaultTimeoutMS, // timeout
+ 1000 // interval
+);
+
+jsTestLog("Stepping down the current primary.");
+assert.commandWorked(
+ primary.adminCommand({replSetStepDown: 10 * 60, secondaryCatchUpPeriodSecs: 10 * 60}));
+
+// Make sure the primary has stepped down.
+assert.neq(primary, replTest.getPrimary());
+
+// We expect the other high priority node to eventually become primary.
+var expectedNewPrimaryIndex = (primaryIndex === 0) ? 1 : 0;
+
+jsTestLog("Waiting for the other high priority node to become PRIMARY.");
+var expectedNewPrimary = replTest.nodes[expectedNewPrimaryIndex];
+replTest.waitForState(expectedNewPrimary, ReplSetTest.State.PRIMARY);
+replTest.stopSet();
})();
diff --git a/jstests/replsets/read_after_optime.js b/jstests/replsets/read_after_optime.js
index c9369074fee..dad48acd925 100644
--- a/jstests/replsets/read_after_optime.js
+++ b/jstests/replsets/read_after_optime.js
@@ -1,83 +1,83 @@
// Test read after opTime functionality with maxTimeMS.
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
-
- var replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
- var config = replTest.getReplSetConfigFromNode();
-
- var runTest = function(testDB, primaryConn) {
- var dbName = testDB.getName();
- assert.writeOK(primaryConn.getDB(dbName).user.insert({x: 1}, {writeConcern: {w: 2}}));
-
- var localDB = primaryConn.getDB('local');
-
- var oplogTS = localDB.oplog.rs.find().sort({$natural: -1}).limit(1).next();
- var twoKSecTS = new Timestamp(oplogTS.ts.getTime() + 2000, 0);
-
- var term = oplogTS.t;
-
- // Test timeout with maxTimeMS
- var runTimeoutTest = function() {
- assert.commandFailedWithCode(testDB.runCommand({
- find: 'user',
- filter: {x: 1},
- readConcern: {afterOpTime: {ts: twoKSecTS, t: term}},
- maxTimeMS: 5000,
- }),
- ErrorCodes.MaxTimeMSExpired);
- };
-
- // Run the time out test 3 times with replication debug log level increased to 2
- // for first and last run. The time out message should be logged twice.
- testDB.setLogLevel(2, 'command');
- runTimeoutTest();
- testDB.setLogLevel(0, 'command');
-
- var msg = 'Command on database ' + testDB.getName() +
- ' timed out waiting for read concern to be satisfied. Command:';
- checkLog.containsWithCount(testDB.getMongo(), msg, 1);
-
- // Read concern timed out message should not be logged.
- runTimeoutTest();
-
- testDB.setLogLevel(2, 'command');
- runTimeoutTest();
- testDB.setLogLevel(0, 'command');
-
- checkLog.containsWithCount(testDB.getMongo(), msg, 2);
-
- // Test read on future afterOpTime that will eventually occur.
- primaryConn.getDB(dbName).parallelShellStarted.drop();
- oplogTS = localDB.oplog.rs.find().sort({$natural: -1}).limit(1).next();
- var insertFunc = startParallelShell('let testDB = db.getSiblingDB("' + dbName + '"); ' +
- 'sleep(3000); ' +
- 'testDB.user.insert({y: 1});',
- primaryConn.port);
-
- var twoSecTS = new Timestamp(oplogTS.ts.getTime() + 2, 0);
- var res = assert.commandWorked(testDB.runCommand({
+"use strict";
+load("jstests/libs/check_log.js");
+
+var replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+var config = replTest.getReplSetConfigFromNode();
+
+var runTest = function(testDB, primaryConn) {
+ var dbName = testDB.getName();
+ assert.writeOK(primaryConn.getDB(dbName).user.insert({x: 1}, {writeConcern: {w: 2}}));
+
+ var localDB = primaryConn.getDB('local');
+
+ var oplogTS = localDB.oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ var twoKSecTS = new Timestamp(oplogTS.ts.getTime() + 2000, 0);
+
+ var term = oplogTS.t;
+
+ // Test timeout with maxTimeMS
+ var runTimeoutTest = function() {
+ assert.commandFailedWithCode(testDB.runCommand({
find: 'user',
- filter: {y: 1},
- readConcern: {
- afterOpTime: {ts: twoSecTS, t: term},
- },
- maxTimeMS: 90 * 1000,
- }));
-
- assert.eq(null, res.code);
- assert.eq(res.cursor.firstBatch[0].y, 1);
- insertFunc();
+ filter: {x: 1},
+ readConcern: {afterOpTime: {ts: twoKSecTS, t: term}},
+ maxTimeMS: 5000,
+ }),
+ ErrorCodes.MaxTimeMSExpired);
};
- var primary = replTest.getPrimary();
- jsTest.log("test1");
- runTest(primary.getDB('test1'), primary);
- jsTest.log("test2");
- runTest(replTest.getSecondary().getDB('test2'), primary);
-
- replTest.stopSet();
+ // Run the time out test 3 times with replication debug log level increased to 2
+ // for first and last run. The time out message should be logged twice.
+ testDB.setLogLevel(2, 'command');
+ runTimeoutTest();
+ testDB.setLogLevel(0, 'command');
+
+ var msg = 'Command on database ' + testDB.getName() +
+ ' timed out waiting for read concern to be satisfied. Command:';
+ checkLog.containsWithCount(testDB.getMongo(), msg, 1);
+
+ // Read concern timed out message should not be logged.
+ runTimeoutTest();
+
+ testDB.setLogLevel(2, 'command');
+ runTimeoutTest();
+ testDB.setLogLevel(0, 'command');
+
+ checkLog.containsWithCount(testDB.getMongo(), msg, 2);
+
+ // Test read on future afterOpTime that will eventually occur.
+ primaryConn.getDB(dbName).parallelShellStarted.drop();
+ oplogTS = localDB.oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ var insertFunc = startParallelShell('let testDB = db.getSiblingDB("' + dbName + '"); ' +
+ 'sleep(3000); ' +
+ 'testDB.user.insert({y: 1});',
+ primaryConn.port);
+
+ var twoSecTS = new Timestamp(oplogTS.ts.getTime() + 2, 0);
+ var res = assert.commandWorked(testDB.runCommand({
+ find: 'user',
+ filter: {y: 1},
+ readConcern: {
+ afterOpTime: {ts: twoSecTS, t: term},
+ },
+ maxTimeMS: 90 * 1000,
+ }));
+
+ assert.eq(null, res.code);
+ assert.eq(res.cursor.firstBatch[0].y, 1);
+ insertFunc();
+};
+
+var primary = replTest.getPrimary();
+jsTest.log("test1");
+runTest(primary.getDB('test1'), primary);
+jsTest.log("test2");
+runTest(replTest.getSecondary().getDB('test2'), primary);
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/read_at_cluster_time_outside_transactions.js b/jstests/replsets/read_at_cluster_time_outside_transactions.js
index af0bbf9cf4f..e75bf2656e5 100644
--- a/jstests/replsets/read_at_cluster_time_outside_transactions.js
+++ b/jstests/replsets/read_at_cluster_time_outside_transactions.js
@@ -5,145 +5,148 @@
* @tags: [requires_document_locking, uses_transactions]
*/
(function() {
- "use strict";
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const db = primary.getDB("test");
-
- const collName = "read_at_cluster_time_outside_transactions";
- const collection = db[collName];
-
- // We prevent the replica set from advancing oldest_timestamp. This ensures that the snapshot
- // associated with 'clusterTime' is retained for the duration of this test.
- rst.nodes.forEach(conn => {
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
- mode: "alwaysOn",
- }));
- });
-
- // We insert 3 documents in order to have data to return for both the find and getMore commands
- // when using a batch size of 2. We then save the md5sum associated with the opTime of the last
- // insert.
- assert.commandWorked(collection.insert({_id: 1, comment: "should be seen by find command"}));
- assert.commandWorked(collection.insert({_id: 3, comment: "should be seen by find command"}));
- assert.commandWorked(collection.insert({_id: 5, comment: "should be seen by getMore command"}));
-
- const clusterTime = db.getSession().getOperationTime();
-
- let res = assert.commandWorked(db.runCommand({dbHash: 1}));
- const hashAfterOriginalInserts = {collections: res.collections, md5: res.md5};
-
- // The documents with _id=1 and _id=3 should be returned by the find command.
- let cursor = collection.find().sort({_id: 1}).batchSize(2);
- assert.eq({_id: 1, comment: "should be seen by find command"}, cursor.next());
- assert.eq({_id: 3, comment: "should be seen by find command"}, cursor.next());
-
- // We then insert documents with _id=2 and _id=4. The document with _id=2 is positioned behind
- // the _id index cursor and won't be returned by the getMore command. However, the document with
- // _id=4 is positioned ahead and should end up being returned.
- assert.commandWorked(
- collection.insert({_id: 2, comment: "should not be seen by getMore command"}));
- assert.commandWorked(
- collection.insert({_id: 4, comment: "should be seen by non-snapshot getMore command"}));
- assert.eq({_id: 4, comment: "should be seen by non-snapshot getMore command"}, cursor.next());
- assert.eq({_id: 5, comment: "should be seen by getMore command"}, cursor.next());
- assert(!cursor.hasNext());
-
- // When using the $_internalReadAtClusterTime option with a clusterTime from after the
- // original 3 documents were inserted, the document with _id=2 shouldn't be visible to the find
- // command because it was inserted afterwards. The same applies to the document with _id=4 and
- // the getMore command.
- res = collection.runCommand("find", {
- batchSize: 2,
- sort: {_id: 1},
- $_internalReadAtClusterTime: clusterTime,
- });
-
- const batchSize = 2;
- cursor = new DBCommandCursor(db, res, batchSize);
- assert.eq({_id: 1, comment: "should be seen by find command"}, cursor.next());
- assert.eq({_id: 3, comment: "should be seen by find command"}, cursor.next());
- assert.eq({_id: 5, comment: "should be seen by getMore command"}, cursor.next());
- assert(!cursor.hasNext());
-
- // Using the $_internalReadAtClusterTime option to read at the opTime of the last of the 3
- // original inserts should return the same md5sum as it did originally.
- res = assert.commandWorked(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: clusterTime,
- }));
+"use strict";
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const db = primary.getDB("test");
- const hashAtClusterTime = {collections: res.collections, md5: res.md5};
- assert.eq(hashAtClusterTime, hashAfterOriginalInserts);
-
- // Attempting to read at a null timestamp should return an error.
- assert.commandFailedWithCode(collection.runCommand("find", {
- batchSize: 2,
- sort: {_id: 1},
- $_internalReadAtClusterTime: new Timestamp(0, 0),
- }),
- ErrorCodes.InvalidOptions);
-
- assert.commandFailedWithCode(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: new Timestamp(0, 1),
- }),
- ErrorCodes.InvalidOptions);
-
- // Attempting to read at a clusterTime in the future should return an error.
- const futureClusterTime = new Timestamp(clusterTime.getTime() + 1000, 1);
-
- assert.commandFailedWithCode(collection.runCommand("find", {
- batchSize: 2,
- sort: {_id: 1},
- $_internalReadAtClusterTime: futureClusterTime,
- }),
- ErrorCodes.InvalidOptions);
-
- assert.commandFailedWithCode(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: futureClusterTime,
- }),
- ErrorCodes.InvalidOptions);
-
- // $_internalReadAtClusterTime is not supported in transactions.
- const session = primary.startSession();
- const sessionDB = session.getDatabase("test");
- const sessionColl = sessionDB[collName];
-
- session.startTransaction();
- assert.commandFailedWithCode(sessionColl.runCommand("find", {
- batchSize: 2,
- sort: {_id: 1},
- $_internalReadAtClusterTime: clusterTime,
- }),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // dbHash is not supported in transactions at all.
- session.startTransaction();
- assert.commandFailedWithCode(
- sessionDB.runCommand({dbHash: 1, $_internalReadAtClusterTime: clusterTime}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // Create a new collection to move the minimum visible snapshot to that operation time. Then
- // read at a cluster time behind the minimum visible snapshot which should fail.
- let newCollName = "newColl";
- assert.commandWorked(db.createCollection(newCollName));
- let createCollClusterTime = db.getSession().getOperationTime();
- res = db[newCollName].runCommand("find", {
- $_internalReadAtClusterTime:
- Timestamp(createCollClusterTime.getTime() - 1, createCollClusterTime.getInc()),
- });
- assert.commandFailedWithCode(res, ErrorCodes.SnapshotUnavailable);
-
- rst.stopSet();
+const collName = "read_at_cluster_time_outside_transactions";
+const collection = db[collName];
+
+// We prevent the replica set from advancing oldest_timestamp. This ensures that the snapshot
+// associated with 'clusterTime' is retained for the duration of this test.
+rst.nodes.forEach(conn => {
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
+ mode: "alwaysOn",
+ }));
+});
+
+// We insert 3 documents in order to have data to return for both the find and getMore commands
+// when using a batch size of 2. We then save the md5sum associated with the opTime of the last
+// insert.
+assert.commandWorked(collection.insert({_id: 1, comment: "should be seen by find command"}));
+assert.commandWorked(collection.insert({_id: 3, comment: "should be seen by find command"}));
+assert.commandWorked(collection.insert({_id: 5, comment: "should be seen by getMore command"}));
+
+const clusterTime = db.getSession().getOperationTime();
+
+let res = assert.commandWorked(db.runCommand({dbHash: 1}));
+const hashAfterOriginalInserts = {
+ collections: res.collections,
+ md5: res.md5
+};
+
+// The documents with _id=1 and _id=3 should be returned by the find command.
+let cursor = collection.find().sort({_id: 1}).batchSize(2);
+assert.eq({_id: 1, comment: "should be seen by find command"}, cursor.next());
+assert.eq({_id: 3, comment: "should be seen by find command"}, cursor.next());
+
+// We then insert documents with _id=2 and _id=4. The document with _id=2 is positioned behind
+// the _id index cursor and won't be returned by the getMore command. However, the document with
+// _id=4 is positioned ahead and should end up being returned.
+assert.commandWorked(collection.insert({_id: 2, comment: "should not be seen by getMore command"}));
+assert.commandWorked(
+ collection.insert({_id: 4, comment: "should be seen by non-snapshot getMore command"}));
+assert.eq({_id: 4, comment: "should be seen by non-snapshot getMore command"}, cursor.next());
+assert.eq({_id: 5, comment: "should be seen by getMore command"}, cursor.next());
+assert(!cursor.hasNext());
+
+// When using the $_internalReadAtClusterTime option with a clusterTime from after the
+// original 3 documents were inserted, the document with _id=2 shouldn't be visible to the find
+// command because it was inserted afterwards. The same applies to the document with _id=4 and
+// the getMore command.
+res = collection.runCommand("find", {
+ batchSize: 2,
+ sort: {_id: 1},
+ $_internalReadAtClusterTime: clusterTime,
+});
+
+const batchSize = 2;
+cursor = new DBCommandCursor(db, res, batchSize);
+assert.eq({_id: 1, comment: "should be seen by find command"}, cursor.next());
+assert.eq({_id: 3, comment: "should be seen by find command"}, cursor.next());
+assert.eq({_id: 5, comment: "should be seen by getMore command"}, cursor.next());
+assert(!cursor.hasNext());
+
+// Using the $_internalReadAtClusterTime option to read at the opTime of the last of the 3
+// original inserts should return the same md5sum as it did originally.
+res = assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: clusterTime,
+}));
+
+const hashAtClusterTime = {
+ collections: res.collections,
+ md5: res.md5
+};
+assert.eq(hashAtClusterTime, hashAfterOriginalInserts);
+
+// Attempting to read at a null timestamp should return an error.
+assert.commandFailedWithCode(collection.runCommand("find", {
+ batchSize: 2,
+ sort: {_id: 1},
+ $_internalReadAtClusterTime: new Timestamp(0, 0),
+}),
+ ErrorCodes.InvalidOptions);
+
+assert.commandFailedWithCode(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: new Timestamp(0, 1),
+}),
+ ErrorCodes.InvalidOptions);
+
+// Attempting to read at a clusterTime in the future should return an error.
+const futureClusterTime = new Timestamp(clusterTime.getTime() + 1000, 1);
+
+assert.commandFailedWithCode(collection.runCommand("find", {
+ batchSize: 2,
+ sort: {_id: 1},
+ $_internalReadAtClusterTime: futureClusterTime,
+}),
+ ErrorCodes.InvalidOptions);
+
+assert.commandFailedWithCode(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: futureClusterTime,
+}),
+ ErrorCodes.InvalidOptions);
+
+// $_internalReadAtClusterTime is not supported in transactions.
+const session = primary.startSession();
+const sessionDB = session.getDatabase("test");
+const sessionColl = sessionDB[collName];
+
+session.startTransaction();
+assert.commandFailedWithCode(sessionColl.runCommand("find", {
+ batchSize: 2,
+ sort: {_id: 1},
+ $_internalReadAtClusterTime: clusterTime,
+}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// dbHash is not supported in transactions at all.
+session.startTransaction();
+assert.commandFailedWithCode(
+ sessionDB.runCommand({dbHash: 1, $_internalReadAtClusterTime: clusterTime}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// Create a new collection to move the minimum visible snapshot to that operation time. Then
+// read at a cluster time behind the minimum visible snapshot which should fail.
+let newCollName = "newColl";
+assert.commandWorked(db.createCollection(newCollName));
+let createCollClusterTime = db.getSession().getOperationTime();
+res = db[newCollName].runCommand("find", {
+ $_internalReadAtClusterTime:
+ Timestamp(createCollClusterTime.getTime() - 1, createCollClusterTime.getInc()),
+});
+assert.commandFailedWithCode(res, ErrorCodes.SnapshotUnavailable);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/read_committed.js b/jstests/replsets/read_committed.js
index f76ea6488a1..79a9cd3b0fa 100644
--- a/jstests/replsets/read_committed.js
+++ b/jstests/replsets/read_committed.js
@@ -9,169 +9,171 @@
load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
(function() {
- "use strict";
-
- const majorityWriteConcern = {writeConcern: {w: "majority", wtimeout: 60 * 1000}};
-
- // Each test case includes a 'prepareCollection' method that sets up the initial state starting
- // with an empty collection, a 'write' method that does some write, and two arrays,
- // 'expectedBefore' and 'expectedAfter' that describe the expected contents of the collection
- // before and after the write. The 'prepareCollection' and 'write' methods should leave the
- // collection either empty or with a single document with _id: 1.
- const testCases = {
- insert: {
- prepareCollection: function(coll) {}, // No-op
- write: function(coll, writeConcern) {
- assert.writeOK(coll.insert({_id: 1}, writeConcern));
- },
- expectedBefore: [],
- expectedAfter: [{_id: 1}],
+"use strict";
+
+const majorityWriteConcern = {
+ writeConcern: {w: "majority", wtimeout: 60 * 1000}
+};
+
+// Each test case includes a 'prepareCollection' method that sets up the initial state starting
+// with an empty collection, a 'write' method that does some write, and two arrays,
+// 'expectedBefore' and 'expectedAfter' that describe the expected contents of the collection
+// before and after the write. The 'prepareCollection' and 'write' methods should leave the
+// collection either empty or with a single document with _id: 1.
+const testCases = {
+ insert: {
+ prepareCollection: function(coll) {}, // No-op
+ write: function(coll, writeConcern) {
+ assert.writeOK(coll.insert({_id: 1}, writeConcern));
},
- update: {
- prepareCollection: function(coll) {
- assert.writeOK(coll.insert({_id: 1, state: 'before'}, majorityWriteConcern));
- },
- write: function(coll, writeConcern) {
- assert.writeOK(coll.update({_id: 1}, {$set: {state: 'after'}}, writeConcern));
- },
- expectedBefore: [{_id: 1, state: 'before'}],
- expectedAfter: [{_id: 1, state: 'after'}],
+ expectedBefore: [],
+ expectedAfter: [{_id: 1}],
+ },
+ update: {
+ prepareCollection: function(coll) {
+ assert.writeOK(coll.insert({_id: 1, state: 'before'}, majorityWriteConcern));
},
- remove: {
- prepareCollection: function(coll) {
- assert.writeOK(coll.insert({_id: 1}, majorityWriteConcern));
- },
- write: function(coll, writeConcern) {
- assert.writeOK(coll.remove({_id: 1}, writeConcern));
- },
- expectedBefore: [{_id: 1}],
- expectedAfter: [],
+ write: function(coll, writeConcern) {
+ assert.writeOK(coll.update({_id: 1}, {$set: {state: 'after'}}, writeConcern));
},
+ expectedBefore: [{_id: 1, state: 'before'}],
+ expectedAfter: [{_id: 1, state: 'after'}],
+ },
+ remove: {
+ prepareCollection: function(coll) {
+ assert.writeOK(coll.insert({_id: 1}, majorityWriteConcern));
+ },
+ write: function(coll, writeConcern) {
+ assert.writeOK(coll.remove({_id: 1}, writeConcern));
+ },
+ expectedBefore: [{_id: 1}],
+ expectedAfter: [],
+ },
+};
+
+// Set up a set and grab things for later.
+var name = "read_committed";
+var replTest =
+ new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
+
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ replTest.stopSet();
+ return;
+}
+
+var nodes = replTest.nodeList();
+var config = {
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+};
+
+replTest.initiate(config);
+
+// Get connections and collection.
+var primary = replTest.getPrimary();
+var secondary = replTest._slaves[0];
+var coll = primary.getDB(name)[name];
+var secondaryColl = secondary.getDB(name)[name];
+
+function log(arg) {
+ jsTest.log(tojson(arg));
+}
+
+function doRead(coll, readConcern) {
+ readConcern.maxTimeMS = 3000;
+ var res = assert.commandWorked(coll.runCommand('find', readConcern));
+ return new DBCommandCursor(coll.getDB(), res).toArray();
+}
+
+function doDirtyRead(coll) {
+ log("doing dirty read");
+ var ret = doRead(coll, {"readConcern": {"level": "local"}});
+ log("done doing dirty read.");
+ return ret;
+}
+
+function doCommittedRead(coll) {
+ log("doing committed read");
+ var ret = doRead(coll, {"readConcern": {"level": "majority"}});
+ log("done doing committed read.");
+ return ret;
+}
+
+function readLatestOplogEntry(readConcernLevel) {
+ var oplog = primary.getDB('local').oplog.rs;
+ var res = oplog.runCommand('find', {
+ "readConcern": {"level": readConcernLevel},
+ "maxTimeMS": 3000,
+ sort: {$natural: -1},
+ limit: 1,
+ });
+ assert.commandWorked(res);
+ return new DBCommandCursor(coll.getDB(), res).toArray()[0];
+}
+
+for (var testName in testCases) {
+ jsTestLog('Running test ' + testName);
+ var test = testCases[testName];
+
+ const setUpInitialState = function setUpInitialState() {
+ assert.writeOK(coll.remove({}, majorityWriteConcern));
+ test.prepareCollection(coll);
+ // Do some sanity checks.
+ assert.eq(doDirtyRead(coll), test.expectedBefore);
+ assert.eq(doCommittedRead(coll), test.expectedBefore);
};
- // Set up a set and grab things for later.
- var name = "read_committed";
- var replTest =
- new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
-
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- replTest.stopSet();
- return;
- }
-
- var nodes = replTest.nodeList();
- var config = {
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], priority: 0},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- };
-
- replTest.initiate(config);
-
- // Get connections and collection.
- var primary = replTest.getPrimary();
- var secondary = replTest._slaves[0];
- var coll = primary.getDB(name)[name];
- var secondaryColl = secondary.getDB(name)[name];
-
- function log(arg) {
- jsTest.log(tojson(arg));
- }
-
- function doRead(coll, readConcern) {
- readConcern.maxTimeMS = 3000;
- var res = assert.commandWorked(coll.runCommand('find', readConcern));
- return new DBCommandCursor(coll.getDB(), res).toArray();
- }
-
- function doDirtyRead(coll) {
- log("doing dirty read");
- var ret = doRead(coll, {"readConcern": {"level": "local"}});
- log("done doing dirty read.");
- return ret;
- }
-
- function doCommittedRead(coll) {
- log("doing committed read");
- var ret = doRead(coll, {"readConcern": {"level": "majority"}});
- log("done doing committed read.");
- return ret;
- }
-
- function readLatestOplogEntry(readConcernLevel) {
- var oplog = primary.getDB('local').oplog.rs;
- var res = oplog.runCommand('find', {
- "readConcern": {"level": readConcernLevel},
- "maxTimeMS": 3000,
- sort: {$natural: -1},
- limit: 1,
+ // Writes done with majority write concern must be immediately visible to both dirty and
+ // committed reads.
+ setUpInitialState();
+ test.write(coll, majorityWriteConcern);
+ assert.eq(doDirtyRead(coll), test.expectedAfter);
+ assert.eq(doCommittedRead(coll), test.expectedAfter);
+
+ // Return to the initial state, then stop the secondary from applying new writes to prevent
+ // them from becoming committed.
+ setUpInitialState();
+ assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
+ const initialOplogTs = readLatestOplogEntry('local').ts;
+
+ // Writes done without majority write concern must be immediately visible to dirty read
+ // and hidden from committed reads until they have been replicated. The rules for seeing
+ // an oplog entry for a write are the same as for the write itself.
+ test.write(coll, {});
+ assert.eq(doDirtyRead(coll), test.expectedAfter);
+ assert.neq(readLatestOplogEntry('local').ts, initialOplogTs);
+ assert.eq(doCommittedRead(coll), test.expectedBefore);
+ assert.eq(readLatestOplogEntry('majority').ts, initialOplogTs);
+
+ // Try the committed read again after sleeping to ensure it doesn't only work for
+ // queries immediately after the write.
+ sleep(1000);
+ assert.eq(doCommittedRead(coll), test.expectedBefore);
+ assert.eq(readLatestOplogEntry('majority').ts, initialOplogTs);
+
+ // Restart oplog application on the secondary and ensure the committed view is updated.
+ assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
+ coll.getDB().getLastError("majority", 60 * 1000);
+ assert.eq(doCommittedRead(coll), test.expectedAfter);
+ assert.neq(readLatestOplogEntry('majority').ts, initialOplogTs);
+
+ // The secondary will be able to make the write committed soon after the primary, but there
+ // is no way to block until it does.
+ try {
+ assert.soon(function() {
+ return friendlyEqual(doCommittedRead(secondaryColl), test.expectedAfter);
});
- assert.commandWorked(res);
- return new DBCommandCursor(coll.getDB(), res).toArray()[0];
+ } catch (e) {
+ // generate useful error messages on failures.
+ assert.eq(doCommittedRead(secondaryColl), test.expectedAfter);
}
-
- for (var testName in testCases) {
- jsTestLog('Running test ' + testName);
- var test = testCases[testName];
-
- const setUpInitialState = function setUpInitialState() {
- assert.writeOK(coll.remove({}, majorityWriteConcern));
- test.prepareCollection(coll);
- // Do some sanity checks.
- assert.eq(doDirtyRead(coll), test.expectedBefore);
- assert.eq(doCommittedRead(coll), test.expectedBefore);
- };
-
- // Writes done with majority write concern must be immediately visible to both dirty and
- // committed reads.
- setUpInitialState();
- test.write(coll, majorityWriteConcern);
- assert.eq(doDirtyRead(coll), test.expectedAfter);
- assert.eq(doCommittedRead(coll), test.expectedAfter);
-
- // Return to the initial state, then stop the secondary from applying new writes to prevent
- // them from becoming committed.
- setUpInitialState();
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
- const initialOplogTs = readLatestOplogEntry('local').ts;
-
- // Writes done without majority write concern must be immediately visible to dirty read
- // and hidden from committed reads until they have been replicated. The rules for seeing
- // an oplog entry for a write are the same as for the write itself.
- test.write(coll, {});
- assert.eq(doDirtyRead(coll), test.expectedAfter);
- assert.neq(readLatestOplogEntry('local').ts, initialOplogTs);
- assert.eq(doCommittedRead(coll), test.expectedBefore);
- assert.eq(readLatestOplogEntry('majority').ts, initialOplogTs);
-
- // Try the committed read again after sleeping to ensure it doesn't only work for
- // queries immediately after the write.
- sleep(1000);
- assert.eq(doCommittedRead(coll), test.expectedBefore);
- assert.eq(readLatestOplogEntry('majority').ts, initialOplogTs);
-
- // Restart oplog application on the secondary and ensure the committed view is updated.
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
- coll.getDB().getLastError("majority", 60 * 1000);
- assert.eq(doCommittedRead(coll), test.expectedAfter);
- assert.neq(readLatestOplogEntry('majority').ts, initialOplogTs);
-
- // The secondary will be able to make the write committed soon after the primary, but there
- // is no way to block until it does.
- try {
- assert.soon(function() {
- return friendlyEqual(doCommittedRead(secondaryColl), test.expectedAfter);
- });
- } catch (e) {
- // generate useful error messages on failures.
- assert.eq(doCommittedRead(secondaryColl), test.expectedAfter);
- }
- }
- replTest.stopSet();
+}
+replTest.stopSet();
}());
diff --git a/jstests/replsets/read_committed_after_rollback.js b/jstests/replsets/read_committed_after_rollback.js
index bdb83b144a8..097c75c1564 100644
--- a/jstests/replsets/read_committed_after_rollback.js
+++ b/jstests/replsets/read_committed_after_rollback.js
@@ -8,148 +8,144 @@
load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
(function() {
- "use strict";
-
- function assertCommittedReadsBlock(coll) {
- var res =
- coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": 3000});
- assert.commandFailedWithCode(
- res,
- ErrorCodes.MaxTimeMSExpired,
- "Expected read of " + coll.getFullName() + ' on ' + coll.getMongo().host + " to block");
+"use strict";
+
+function assertCommittedReadsBlock(coll) {
+ var res = coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": 3000});
+ assert.commandFailedWithCode(
+ res,
+ ErrorCodes.MaxTimeMSExpired,
+ "Expected read of " + coll.getFullName() + ' on ' + coll.getMongo().host + " to block");
+}
+
+function doCommittedRead(coll) {
+ var res = coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": 10000});
+ assert.commandWorked(res, 'reading from ' + coll.getFullName() + ' on ' + coll.getMongo().host);
+ return new DBCommandCursor(coll.getDB(), res).toArray()[0].state;
+}
+
+function doDirtyRead(coll) {
+ var res = coll.runCommand('find', {"readConcern": {"level": "local"}});
+ assert.commandWorked(res, 'reading from ' + coll.getFullName() + ' on ' + coll.getMongo().host);
+ return new DBCommandCursor(coll.getDB(), res).toArray()[0].state;
+}
+
+// Set up a set and grab things for later.
+var name = "read_committed_after_rollback";
+var replTest = new ReplSetTest(
+ {name: name, nodes: 5, useBridge: true, nodeOptions: {enableMajorityReadConcern: ''}});
+
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ replTest.stopSet();
+ return;
+}
+
+var nodes = replTest.nodeList();
+var config = {
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], priority: 0},
+ // Note: using two arbiters to ensure that a host that can't talk to any other
+ // data-bearing node can still be elected. This also means that a write isn't considered
+ // committed until it is on all 3 data-bearing nodes, not just 2.
+ {"_id": 3, "host": nodes[3], arbiterOnly: true},
+ {"_id": 4, "host": nodes[4], arbiterOnly: true},
+ ]
+};
+
+replTest.initiate(config);
+
+// Get connections.
+var oldPrimary = replTest.getPrimary();
+var newPrimary = replTest._slaves[0];
+var pureSecondary = replTest._slaves[1];
+var arbiters = [replTest.nodes[3], replTest.nodes[4]];
+
+// This is the collection that all of the tests will use.
+var collName = name + '.collection';
+var oldPrimaryColl = oldPrimary.getCollection(collName);
+var newPrimaryColl = newPrimary.getCollection(collName);
+
+// Set up initial state.
+assert.writeOK(oldPrimaryColl.insert({_id: 1, state: 'old'},
+ {writeConcern: {w: 'majority', wtimeout: 30000}}));
+assert.eq(doDirtyRead(oldPrimaryColl), 'old');
+assert.eq(doCommittedRead(oldPrimaryColl), 'old');
+assert.eq(doDirtyRead(newPrimaryColl), 'old');
+// Note that we can't necessarily do a committed read from newPrimaryColl and get 'old', since
+// delivery of the commit level to secondaries isn't synchronized with anything
+// (we would have to hammer to reliably prove that it eventually would work).
+
+// Partition the world such that oldPrimary is still primary but can't replicate to anyone.
+// newPrimary is disconnected from the arbiters first to ensure that it can't be elected.
+newPrimary.disconnect(arbiters);
+oldPrimary.disconnect([newPrimary, pureSecondary]);
+assert.eq(doDirtyRead(newPrimaryColl), 'old');
+
+// This write will only make it to oldPrimary and will never become committed.
+assert.writeOK(oldPrimaryColl.save({_id: 1, state: 'INVALID'}));
+assert.eq(doDirtyRead(oldPrimaryColl), 'INVALID');
+assert.eq(doCommittedRead(oldPrimaryColl), 'old');
+
+// Change the partitioning so that oldPrimary is isolated, and newPrimary can be elected.
+oldPrimary.setSlaveOk();
+oldPrimary.disconnect(arbiters);
+newPrimary.reconnect(arbiters);
+assert.soon(() => newPrimary.adminCommand('isMaster').ismaster, '', 60 * 1000);
+assert.soon(function() {
+ try {
+ return !oldPrimary.adminCommand('isMaster').ismaster;
+ } catch (e) {
+ return false; // ignore disconnect errors.
}
-
- function doCommittedRead(coll) {
- var res =
- coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": 10000});
- assert.commandWorked(res,
- 'reading from ' + coll.getFullName() + ' on ' + coll.getMongo().host);
- return new DBCommandCursor(coll.getDB(), res).toArray()[0].state;
+});
+
+// Stop applier on pureSecondary to ensure that writes to newPrimary won't become committed yet.
+assert.commandWorked(
+ pureSecondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
+assert.writeOK(newPrimaryColl.save({_id: 1, state: 'new'}));
+assert.eq(doDirtyRead(newPrimaryColl), 'new');
+// Note that we still can't do a committed read from the new primary and reliably get anything,
+// since we never proved that it learned about the commit level from the old primary before
+// the new primary got elected. The new primary cannot advance the commit level until it
+// commits a write in its own term. This includes learning that a majority of nodes have
+// received such a write.
+assert.eq(doCommittedRead(oldPrimaryColl), 'old');
+
+// Reconnect oldPrimary to newPrimary, inducing rollback of the 'INVALID' write. This causes
+// oldPrimary to clear its read majority point. oldPrimary still won't be connected to enough
+// hosts to allow it to be elected, so newPrimary should stay primary for the rest of this test.
+oldPrimary.reconnect(newPrimary);
+assert.soon(function() {
+ try {
+ return oldPrimary.adminCommand('isMaster').secondary &&
+ doDirtyRead(oldPrimaryColl) == 'new';
+ } catch (e) {
+ return false; // ignore disconnect errors.
}
-
- function doDirtyRead(coll) {
- var res = coll.runCommand('find', {"readConcern": {"level": "local"}});
- assert.commandWorked(res,
- 'reading from ' + coll.getFullName() + ' on ' + coll.getMongo().host);
- return new DBCommandCursor(coll.getDB(), res).toArray()[0].state;
- }
-
- // Set up a set and grab things for later.
- var name = "read_committed_after_rollback";
- var replTest = new ReplSetTest(
- {name: name, nodes: 5, useBridge: true, nodeOptions: {enableMajorityReadConcern: ''}});
-
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- replTest.stopSet();
- return;
- }
-
- var nodes = replTest.nodeList();
- var config = {
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], priority: 0},
- // Note: using two arbiters to ensure that a host that can't talk to any other
- // data-bearing node can still be elected. This also means that a write isn't considered
- // committed until it is on all 3 data-bearing nodes, not just 2.
- {"_id": 3, "host": nodes[3], arbiterOnly: true},
- {"_id": 4, "host": nodes[4], arbiterOnly: true},
- ]
- };
-
- replTest.initiate(config);
-
- // Get connections.
- var oldPrimary = replTest.getPrimary();
- var newPrimary = replTest._slaves[0];
- var pureSecondary = replTest._slaves[1];
- var arbiters = [replTest.nodes[3], replTest.nodes[4]];
-
- // This is the collection that all of the tests will use.
- var collName = name + '.collection';
- var oldPrimaryColl = oldPrimary.getCollection(collName);
- var newPrimaryColl = newPrimary.getCollection(collName);
-
- // Set up initial state.
- assert.writeOK(oldPrimaryColl.insert({_id: 1, state: 'old'},
- {writeConcern: {w: 'majority', wtimeout: 30000}}));
- assert.eq(doDirtyRead(oldPrimaryColl), 'old');
- assert.eq(doCommittedRead(oldPrimaryColl), 'old');
- assert.eq(doDirtyRead(newPrimaryColl), 'old');
- // Note that we can't necessarily do a committed read from newPrimaryColl and get 'old', since
- // delivery of the commit level to secondaries isn't synchronized with anything
- // (we would have to hammer to reliably prove that it eventually would work).
-
- // Partition the world such that oldPrimary is still primary but can't replicate to anyone.
- // newPrimary is disconnected from the arbiters first to ensure that it can't be elected.
- newPrimary.disconnect(arbiters);
- oldPrimary.disconnect([newPrimary, pureSecondary]);
- assert.eq(doDirtyRead(newPrimaryColl), 'old');
-
- // This write will only make it to oldPrimary and will never become committed.
- assert.writeOK(oldPrimaryColl.save({_id: 1, state: 'INVALID'}));
- assert.eq(doDirtyRead(oldPrimaryColl), 'INVALID');
- assert.eq(doCommittedRead(oldPrimaryColl), 'old');
-
- // Change the partitioning so that oldPrimary is isolated, and newPrimary can be elected.
- oldPrimary.setSlaveOk();
- oldPrimary.disconnect(arbiters);
- newPrimary.reconnect(arbiters);
- assert.soon(() => newPrimary.adminCommand('isMaster').ismaster, '', 60 * 1000);
- assert.soon(function() {
- try {
- return !oldPrimary.adminCommand('isMaster').ismaster;
- } catch (e) {
- return false; // ignore disconnect errors.
- }
- });
-
- // Stop applier on pureSecondary to ensure that writes to newPrimary won't become committed yet.
- assert.commandWorked(
- pureSecondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
- assert.writeOK(newPrimaryColl.save({_id: 1, state: 'new'}));
- assert.eq(doDirtyRead(newPrimaryColl), 'new');
- // Note that we still can't do a committed read from the new primary and reliably get anything,
- // since we never proved that it learned about the commit level from the old primary before
- // the new primary got elected. The new primary cannot advance the commit level until it
- // commits a write in its own term. This includes learning that a majority of nodes have
- // received such a write.
- assert.eq(doCommittedRead(oldPrimaryColl), 'old');
-
- // Reconnect oldPrimary to newPrimary, inducing rollback of the 'INVALID' write. This causes
- // oldPrimary to clear its read majority point. oldPrimary still won't be connected to enough
- // hosts to allow it to be elected, so newPrimary should stay primary for the rest of this test.
- oldPrimary.reconnect(newPrimary);
- assert.soon(function() {
- try {
- return oldPrimary.adminCommand('isMaster').secondary &&
- doDirtyRead(oldPrimaryColl) == 'new';
- } catch (e) {
- return false; // ignore disconnect errors.
- }
- }, '', 60 * 1000);
- assert.eq(doDirtyRead(oldPrimaryColl), 'new');
-
- // Resume oplog application on pureSecondary to allow the 'new' write to be committed. It should
- // now be visible as a committed read to both oldPrimary and newPrimary.
- assert.commandWorked(
- pureSecondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
- // Do a write to the new primary so that the old primary can establish a sync source to learn
- // about the new commit.
- assert.writeOK(newPrimary.getDB(name).unrelatedCollection.insert(
- {a: 1}, {writeConcern: {w: 'majority', wtimeout: replTest.kDefaultTimeoutMS}}));
- assert.eq(doCommittedRead(newPrimaryColl), 'new');
- // Do another write to the new primary so that the old primary can be sure to receive the
- // new committed optime.
- assert.writeOK(newPrimary.getDB(name).unrelatedCollection.insert(
- {a: 2}, {writeConcern: {w: 'majority', wtimeout: replTest.kDefaultTimeoutMS}}));
- assert.eq(doCommittedRead(oldPrimaryColl), 'new');
-
- // Verify data consistency between nodes.
- replTest.checkReplicatedDataHashes();
- replTest.checkOplogs();
- replTest.stopSet();
+}, '', 60 * 1000);
+assert.eq(doDirtyRead(oldPrimaryColl), 'new');
+
+// Resume oplog application on pureSecondary to allow the 'new' write to be committed. It should
+// now be visible as a committed read to both oldPrimary and newPrimary.
+assert.commandWorked(
+ pureSecondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
+// Do a write to the new primary so that the old primary can establish a sync source to learn
+// about the new commit.
+assert.writeOK(newPrimary.getDB(name).unrelatedCollection.insert(
+ {a: 1}, {writeConcern: {w: 'majority', wtimeout: replTest.kDefaultTimeoutMS}}));
+assert.eq(doCommittedRead(newPrimaryColl), 'new');
+// Do another write to the new primary so that the old primary can be sure to receive the
+// new committed optime.
+assert.writeOK(newPrimary.getDB(name).unrelatedCollection.insert(
+ {a: 2}, {writeConcern: {w: 'majority', wtimeout: replTest.kDefaultTimeoutMS}}));
+assert.eq(doCommittedRead(oldPrimaryColl), 'new');
+
+// Verify data consistency between nodes.
+replTest.checkReplicatedDataHashes();
+replTest.checkOplogs();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/read_committed_lookup.js b/jstests/replsets/read_committed_lookup.js
index fbd4c6f5d19..18f77f9237a 100644
--- a/jstests/replsets/read_committed_lookup.js
+++ b/jstests/replsets/read_committed_lookup.js
@@ -7,40 +7,40 @@ load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajori
load("jstests/libs/read_committed_lib.js"); // For testReadCommittedLookup
(function() {
- "use strict";
-
- // Confirm majority readConcern works on a replica set.
- const replSetName = "lookup_read_majority";
- let rst = new ReplSetTest({
- nodes: 3,
- name: replSetName,
- nodeOptions: {
- enableMajorityReadConcern: "",
- shardsvr: "",
- }
- });
-
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- rst.stopSet();
- return;
+"use strict";
+
+// Confirm majority readConcern works on a replica set.
+const replSetName = "lookup_read_majority";
+let rst = new ReplSetTest({
+ nodes: 3,
+ name: replSetName,
+ nodeOptions: {
+ enableMajorityReadConcern: "",
+ shardsvr: "",
}
+});
- const nodes = rst.nodeList();
- const config = {
- _id: replSetName,
- members: [
- {_id: 0, host: nodes[0]},
- {_id: 1, host: nodes[1], priority: 0},
- {_id: 2, host: nodes[2], arbiterOnly: true},
- ]
- };
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ rst.stopSet();
+ return;
+}
- rst.initiate(config);
+const nodes = rst.nodeList();
+const config = {
+ _id: replSetName,
+ members: [
+ {_id: 0, host: nodes[0]},
+ {_id: 1, host: nodes[1], priority: 0},
+ {_id: 2, host: nodes[2], arbiterOnly: true},
+ ]
+};
- let shardSecondary = rst._slaves[0];
+rst.initiate(config);
- testReadCommittedLookup(rst.getPrimary().getDB("test"), shardSecondary, rst);
+let shardSecondary = rst._slaves[0];
- rst.stopSet();
+testReadCommittedLookup(rst.getPrimary().getDB("test"), shardSecondary, rst);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/read_committed_no_snapshots.js b/jstests/replsets/read_committed_no_snapshots.js
index 59524c24bd2..9cb4835727c 100644
--- a/jstests/replsets/read_committed_no_snapshots.js
+++ b/jstests/replsets/read_committed_no_snapshots.js
@@ -8,77 +8,74 @@
load("jstests/replsets/rslib.js"); // For reconfig and startSetIfSupportsReadMajority.
(function() {
- "use strict";
+"use strict";
- // Set up a set and grab things for later.
- var name = "read_committed_no_snapshots";
- var replTest = new ReplSetTest({
- name: name,
- nodes: [
- {},
- {rsConfig: {priority: 0}},
- {
- setParameter: {"failpoint.disableSnapshotting": "{'mode':'alwaysOn'}"},
- rsConfig: {priority: 0}
- }
- ],
- nodeOptions: {enableMajorityReadConcern: ''},
- settings: {protocolVersion: 1}
- });
+// Set up a set and grab things for later.
+var name = "read_committed_no_snapshots";
+var replTest = new ReplSetTest({
+ name: name,
+ nodes: [
+ {},
+ {rsConfig: {priority: 0}},
+ {
+ setParameter: {"failpoint.disableSnapshotting": "{'mode':'alwaysOn'}"},
+ rsConfig: {priority: 0}
+ }
+ ],
+ nodeOptions: {enableMajorityReadConcern: ''},
+ settings: {protocolVersion: 1}
+});
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- replTest.stopSet();
- return;
- }
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ replTest.stopSet();
+ return;
+}
- // Cannot wait for a stable recovery timestamp due to the no-snapshot secondary.
- replTest.initiateWithAnyNodeAsPrimary(
- null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+// Cannot wait for a stable recovery timestamp due to the no-snapshot secondary.
+replTest.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
- // Get connections and collection.
- var primary = replTest.getPrimary();
- var healthySecondary = replTest._slaves[0];
- healthySecondary.setSlaveOk();
- var noSnapshotSecondary = replTest._slaves[1];
- noSnapshotSecondary.setSlaveOk();
+// Get connections and collection.
+var primary = replTest.getPrimary();
+var healthySecondary = replTest._slaves[0];
+healthySecondary.setSlaveOk();
+var noSnapshotSecondary = replTest._slaves[1];
+noSnapshotSecondary.setSlaveOk();
- // Do a write, wait for it to replicate, and ensure it is visible.
- var res = primary.getDB(name).runCommandWithMetadata( //
- {
- insert: "foo",
- documents: [{_id: 1, state: 0}],
- writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}
- },
- {"$replData": 1});
- assert.commandWorked(res.commandReply);
+// Do a write, wait for it to replicate, and ensure it is visible.
+var res = primary.getDB(name).runCommandWithMetadata( //
+ {
+ insert: "foo",
+ documents: [{_id: 1, state: 0}],
+ writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}
+ },
+ {"$replData": 1});
+assert.commandWorked(res.commandReply);
- // We need to propagate the lastOpVisible from the primary as afterOpTime in the secondaries to
- // ensure we wait for the write to be in the majority committed view.
- var lastOp = res.commandReply["$replData"].lastOpVisible;
+// We need to propagate the lastOpVisible from the primary as afterOpTime in the secondaries to
+// ensure we wait for the write to be in the majority committed view.
+var lastOp = res.commandReply["$replData"].lastOpVisible;
- // Timeout is based on heartbeat timeout.
- assert.commandWorked(healthySecondary.getDB(name).foo.runCommand(
- 'find',
- {"readConcern": {"level": "majority", "afterOpTime": lastOp}, "maxTimeMS": 10 * 1000}));
+// Timeout is based on heartbeat timeout.
+assert.commandWorked(healthySecondary.getDB(name).foo.runCommand(
+ 'find', {"readConcern": {"level": "majority", "afterOpTime": lastOp}, "maxTimeMS": 10 * 1000}));
- // Ensure maxTimeMS times out while waiting for this snapshot
- assert.commandFailedWithCode(
- noSnapshotSecondary.getDB(name).foo.runCommand(
- 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000}),
- ErrorCodes.MaxTimeMSExpired);
+// Ensure maxTimeMS times out while waiting for this snapshot
+assert.commandFailedWithCode(noSnapshotSecondary.getDB(name).foo.runCommand(
+ 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000}),
+ ErrorCodes.MaxTimeMSExpired);
- // Reconfig to make the no-snapshot secondary the primary
- var config = primary.getDB("local").system.replset.findOne();
- config.members[0].priority = 0;
- config.members[2].priority = 1;
- config.version++;
- primary = reconfig(replTest, config, true);
+// Reconfig to make the no-snapshot secondary the primary
+var config = primary.getDB("local").system.replset.findOne();
+config.members[0].priority = 0;
+config.members[2].priority = 1;
+config.version++;
+primary = reconfig(replTest, config, true);
- // Ensure maxTimeMS times out while waiting for this snapshot
- assert.commandFailedWithCode(
- primary.getSiblingDB(name).foo.runCommand(
- 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000}),
- ErrorCodes.MaxTimeMSExpired);
- replTest.stopSet();
+// Ensure maxTimeMS times out while waiting for this snapshot
+assert.commandFailedWithCode(primary.getSiblingDB(name).foo.runCommand(
+ 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+replTest.stopSet();
})();
diff --git a/jstests/replsets/read_committed_on_secondary.js b/jstests/replsets/read_committed_on_secondary.js
index 824a0f2e0bd..ae999799879 100644
--- a/jstests/replsets/read_committed_on_secondary.js
+++ b/jstests/replsets/read_committed_on_secondary.js
@@ -7,131 +7,131 @@
load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
(function() {
- "use strict";
-
- function printStatus() {
- var primaryStatus;
- replTest.nodes.forEach((n) => {
- var status = n.getDB("admin").runCommand("replSetGetStatus");
- var self = status.members.filter((m) => m.self)[0];
- var msg = self.name + "\n";
- msg += tojson(status.optimes) + "\n";
- if (self.state == 1) { // Primary status.
- // List other members status from the primaries perspective
- msg += tojson(status.members.filter((m) => !m.self)) + "\n";
- msg += tojson(status.slaveInfo) + "\n";
- }
- jsTest.log(msg);
- });
- }
-
- function log(arg) {
- jsTest.log(tojson(arg));
- }
- // Set up a set and grab things for later.
- var name = "read_committed_on_secondary";
- var replTest =
- new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
-
- if (!startSetIfSupportsReadMajority(replTest)) {
- log("skipping test since storage engine doesn't support committed reads");
- replTest.stopSet();
- return;
- }
-
- var nodes = replTest.nodeList();
- var config = {
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], priority: 0},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- };
-
- replTest.initiate(config);
-
- // Get connections and collection.
- var primary = replTest.getPrimary();
- var secondary = replTest._slaves[0];
- var secondaryId = replTest.getNodeId(secondary);
-
- var dbPrimary = primary.getDB(name);
- var collPrimary = dbPrimary[name];
-
- var dbSecondary = secondary.getDB(name);
- var collSecondary = dbSecondary[name];
-
- function saveDoc(state) {
- log("saving doc.");
- var res = dbPrimary.runCommandWithMetadata( //
- {
- update: name,
- writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS},
- updates: [{q: {_id: 1}, u: {_id: 1, state: state}, upsert: true}],
- },
- {"$replData": 1});
- assert.commandWorked(res.commandReply);
- assert.eq(res.commandReply.writeErrors, undefined);
- log("done saving doc: optime " + tojson(res.commandReply.$replData.lastOpVisible));
- return res.commandReply.$replData.lastOpVisible;
- }
-
- function doDirtyRead(lastOp) {
- log("doing dirty read for lastOp:" + tojson(lastOp));
- var res = collSecondary.runCommand('find', {
- "readConcern": {"level": "local", "afterOpTime": lastOp},
- "maxTimeMS": replTest.kDefaultTimeoutMS
- });
- assert.commandWorked(res);
- log("done doing dirty read.");
- return new DBCommandCursor(dbSecondary, res).toArray()[0].state;
- }
-
- function doCommittedRead(lastOp) {
- log("doing committed read for optime: " + tojson(lastOp));
- var res = collSecondary.runCommand('find', {
- "readConcern": {"level": "majority", "afterOpTime": lastOp},
- "maxTimeMS": replTest.kDefaultTimeoutMS
- });
- assert.commandWorked(res);
- log("done doing committed read.");
- return new DBCommandCursor(dbSecondary, res).toArray()[0].state;
- }
-
- // Do a write, wait for it to replicate, and ensure it is visible.
- var op0 = saveDoc(0);
- assert.eq(doDirtyRead(op0), 0);
-
- printStatus();
- assert.eq(doCommittedRead(op0), 0);
-
- // Disable snapshotting on the secondary.
- secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'});
-
- // Do a write and ensure it is only visible to dirty reads
- var op1 = saveDoc(1);
- assert.eq(doDirtyRead(op1), 1);
- assert.eq(doCommittedRead(op0), 0);
-
- // Try the committed read again after sleeping to ensure it doesn't only work for queries
- // immediately after the write.
- log("sleeping");
- sleep(1000);
- assert.eq(doCommittedRead(op0), 0);
-
- // Reenable snapshotting on the secondary and ensure that committed reads are able to see the
- // new
- // state.
- log("turning off failpoint");
- secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'});
- // Do another write in order to update the committedSnapshot value.
- var op2 = saveDoc(2);
- assert.eq(doDirtyRead(op2), 2);
- log(replTest.status());
- replTest.awaitReplication();
- log(replTest.status());
- assert.eq(doCommittedRead(op2), 2);
- log("test success!");
+"use strict";
+
+function printStatus() {
+ var primaryStatus;
+ replTest.nodes.forEach((n) => {
+ var status = n.getDB("admin").runCommand("replSetGetStatus");
+ var self = status.members.filter((m) => m.self)[0];
+ var msg = self.name + "\n";
+ msg += tojson(status.optimes) + "\n";
+ if (self.state == 1) { // Primary status.
+ // List other members status from the primaries perspective
+ msg += tojson(status.members.filter((m) => !m.self)) + "\n";
+ msg += tojson(status.slaveInfo) + "\n";
+ }
+ jsTest.log(msg);
+ });
+}
+
+function log(arg) {
+ jsTest.log(tojson(arg));
+}
+// Set up a set and grab things for later.
+var name = "read_committed_on_secondary";
+var replTest =
+ new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
+
+if (!startSetIfSupportsReadMajority(replTest)) {
+ log("skipping test since storage engine doesn't support committed reads");
replTest.stopSet();
+ return;
+}
+
+var nodes = replTest.nodeList();
+var config = {
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+};
+
+replTest.initiate(config);
+
+// Get connections and collection.
+var primary = replTest.getPrimary();
+var secondary = replTest._slaves[0];
+var secondaryId = replTest.getNodeId(secondary);
+
+var dbPrimary = primary.getDB(name);
+var collPrimary = dbPrimary[name];
+
+var dbSecondary = secondary.getDB(name);
+var collSecondary = dbSecondary[name];
+
+function saveDoc(state) {
+ log("saving doc.");
+ var res = dbPrimary.runCommandWithMetadata( //
+ {
+ update: name,
+ writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS},
+ updates: [{q: {_id: 1}, u: {_id: 1, state: state}, upsert: true}],
+ },
+ {"$replData": 1});
+ assert.commandWorked(res.commandReply);
+ assert.eq(res.commandReply.writeErrors, undefined);
+ log("done saving doc: optime " + tojson(res.commandReply.$replData.lastOpVisible));
+ return res.commandReply.$replData.lastOpVisible;
+}
+
+function doDirtyRead(lastOp) {
+ log("doing dirty read for lastOp:" + tojson(lastOp));
+ var res = collSecondary.runCommand('find', {
+ "readConcern": {"level": "local", "afterOpTime": lastOp},
+ "maxTimeMS": replTest.kDefaultTimeoutMS
+ });
+ assert.commandWorked(res);
+ log("done doing dirty read.");
+ return new DBCommandCursor(dbSecondary, res).toArray()[0].state;
+}
+
+function doCommittedRead(lastOp) {
+ log("doing committed read for optime: " + tojson(lastOp));
+ var res = collSecondary.runCommand('find', {
+ "readConcern": {"level": "majority", "afterOpTime": lastOp},
+ "maxTimeMS": replTest.kDefaultTimeoutMS
+ });
+ assert.commandWorked(res);
+ log("done doing committed read.");
+ return new DBCommandCursor(dbSecondary, res).toArray()[0].state;
+}
+
+// Do a write, wait for it to replicate, and ensure it is visible.
+var op0 = saveDoc(0);
+assert.eq(doDirtyRead(op0), 0);
+
+printStatus();
+assert.eq(doCommittedRead(op0), 0);
+
+// Disable snapshotting on the secondary.
+secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'});
+
+// Do a write and ensure it is only visible to dirty reads
+var op1 = saveDoc(1);
+assert.eq(doDirtyRead(op1), 1);
+assert.eq(doCommittedRead(op0), 0);
+
+// Try the committed read again after sleeping to ensure it doesn't only work for queries
+// immediately after the write.
+log("sleeping");
+sleep(1000);
+assert.eq(doCommittedRead(op0), 0);
+
+// Reenable snapshotting on the secondary and ensure that committed reads are able to see the
+// new
+// state.
+log("turning off failpoint");
+secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'});
+// Do another write in order to update the committedSnapshot value.
+var op2 = saveDoc(2);
+assert.eq(doDirtyRead(op2), 2);
+log(replTest.status());
+replTest.awaitReplication();
+log(replTest.status());
+assert.eq(doCommittedRead(op2), 2);
+log("test success!");
+replTest.stopSet();
}());
diff --git a/jstests/replsets/read_committed_stale_history.js b/jstests/replsets/read_committed_stale_history.js
index 3ee22559749..f40841575f4 100644
--- a/jstests/replsets/read_committed_stale_history.js
+++ b/jstests/replsets/read_committed_stale_history.js
@@ -3,144 +3,144 @@
* when hearing about a commit point with a higher optime from a new primary.
*/
(function() {
- 'use strict';
-
- load("jstests/libs/check_log.js");
- load("jstests/libs/write_concern_util.js");
- load("jstests/replsets/rslib.js");
-
- var name = "readCommittedStaleHistory";
- var dbName = "wMajorityCheck";
- var collName = "stepdown";
-
- var rst = new ReplSetTest({
- name: name,
- nodes: [
- {},
- {},
- {rsConfig: {priority: 0}},
- ],
- nodeOptions: {enableMajorityReadConcern: ""},
- useBridge: true
- });
+'use strict';
+
+load("jstests/libs/check_log.js");
+load("jstests/libs/write_concern_util.js");
+load("jstests/replsets/rslib.js");
+
+var name = "readCommittedStaleHistory";
+var dbName = "wMajorityCheck";
+var collName = "stepdown";
+
+var rst = new ReplSetTest({
+ name: name,
+ nodes: [
+ {},
+ {},
+ {rsConfig: {priority: 0}},
+ ],
+ nodeOptions: {enableMajorityReadConcern: ""},
+ useBridge: true
+});
+
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ rst.stopSet();
+ return;
+}
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- rst.stopSet();
- return;
- }
-
- var nodes = rst.nodes;
- rst.initiate();
-
- /**
- * Waits for the given node to be in state primary *and* have finished drain mode and thus
- * be available for writes.
- */
- function waitForPrimary(node) {
- assert.soon(function() {
- return node.adminCommand('ismaster').ismaster;
- });
- }
-
- // Asserts that the given document is not visible in the committed snapshot on the given node.
- function checkDocNotCommitted(node, doc) {
- var docs =
- node.getDB(dbName).getCollection(collName).find(doc).readConcern('majority').toArray();
- assert.eq(0, docs.length, tojson(docs));
- }
-
- // SERVER-20844 ReplSetTest starts up a single node replica set then reconfigures to the correct
- // size for faster startup, so nodes[0] is always the first primary.
- jsTestLog("Make sure node 0 is primary.");
- var primary = rst.getPrimary();
- var secondaries = rst.getSecondaries();
- assert.eq(nodes[0], primary);
- // Wait for all data bearing nodes to get up to date.
- assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert(
- {a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}}));
-
- // Stop the secondaries from replicating.
- stopServerReplication(secondaries);
- // Stop the primary from being able to complete stepping down.
- assert.commandWorked(
- nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'alwaysOn'}));
-
- jsTestLog("Do a write that won't ever reach a majority of nodes");
- assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert({a: 2}));
-
- // Ensure that the write that was just done is not visible in the committed snapshot.
- checkDocNotCommitted(nodes[0], {a: 2});
-
- // Prevent the primary from rolling back later on.
- assert.commandWorked(
- nodes[0].adminCommand({configureFailPoint: 'rollbackHangBeforeStart', mode: 'alwaysOn'}));
-
- jsTest.log("Disconnect primary from all secondaries");
- nodes[0].disconnect(nodes[1]);
- nodes[0].disconnect(nodes[2]);
-
- // Ensure the soon-to-be primary cannot see the write from the old primary.
- assert.eq(null, nodes[1].getDB(dbName).getCollection(collName).findOne({a: 2}));
-
- jsTest.log("Wait for a new primary to be elected");
- // Allow the secondaries to replicate again.
- restartServerReplication(secondaries);
-
- waitForPrimary(nodes[1]);
-
- jsTest.log("Do a write to the new primary");
- assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert(
- {a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}}));
-
- // Ensure the new primary still cannot see the write from the old primary.
- assert.eq(null, nodes[1].getDB(dbName).getCollection(collName).findOne({a: 2}));
-
- jsTest.log("Reconnect the old primary to the rest of the nodes");
- nodes[1].reconnect(nodes[0]);
- nodes[2].reconnect(nodes[0]);
-
- // Sleep 10 seconds to allow some heartbeats to be processed, so we can verify that the
- // heartbeats don't cause the stale primary to incorrectly advance the commit point.
- sleep(10000);
-
- checkDocNotCommitted(nodes[0], {a: 2});
-
- jsTest.log("Allow the old primary to finish stepping down and become secondary");
- var res = null;
- try {
- res = nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'off'});
- } catch (e) {
- // Expected - once we disable the fail point the stepdown will proceed and it's racy whether
- // the stepdown closes all connections before or after the configureFailPoint command
- // returns
- }
- if (res) {
- assert.commandWorked(res);
- }
- rst.waitForState(nodes[0], ReplSetTest.State.SECONDARY);
- reconnect(nodes[0]);
-
- // At this point the former primary will attempt to go into rollback, but the
- // 'rollbackHangBeforeStart' will prevent it from doing so.
- checkDocNotCommitted(nodes[0], {a: 2});
- checkLog.contains(nodes[0], 'rollback - rollbackHangBeforeStart fail point enabled');
- checkDocNotCommitted(nodes[0], {a: 2});
-
- jsTest.log("Allow the original primary to roll back its write and catch up to the new primary");
- assert.adminCommandWorkedAllowingNetworkError(
- nodes[0], {configureFailPoint: 'rollbackHangBeforeStart', mode: 'off'});
-
- assert.soonNoExcept(function() {
- return null == nodes[0].getDB(dbName).getCollection(collName).findOne({a: 2});
- }, "Original primary never rolled back its write");
-
- rst.awaitReplication();
-
- // Ensure that the old primary got the write that the new primary did and sees it as committed.
- assert.neq(
- null,
- nodes[0].getDB(dbName).getCollection(collName).find({a: 3}).readConcern('majority').next());
+var nodes = rst.nodes;
+rst.initiate();
- rst.stopSet();
+/**
+ * Waits for the given node to be in state primary *and* have finished drain mode and thus
+ * be available for writes.
+ */
+function waitForPrimary(node) {
+ assert.soon(function() {
+ return node.adminCommand('ismaster').ismaster;
+ });
+}
+
+// Asserts that the given document is not visible in the committed snapshot on the given node.
+function checkDocNotCommitted(node, doc) {
+ var docs =
+ node.getDB(dbName).getCollection(collName).find(doc).readConcern('majority').toArray();
+ assert.eq(0, docs.length, tojson(docs));
+}
+
+// SERVER-20844 ReplSetTest starts up a single node replica set then reconfigures to the correct
+// size for faster startup, so nodes[0] is always the first primary.
+jsTestLog("Make sure node 0 is primary.");
+var primary = rst.getPrimary();
+var secondaries = rst.getSecondaries();
+assert.eq(nodes[0], primary);
+// Wait for all data bearing nodes to get up to date.
+assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert(
+ {a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}}));
+
+// Stop the secondaries from replicating.
+stopServerReplication(secondaries);
+// Stop the primary from being able to complete stepping down.
+assert.commandWorked(
+ nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'alwaysOn'}));
+
+jsTestLog("Do a write that won't ever reach a majority of nodes");
+assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert({a: 2}));
+
+// Ensure that the write that was just done is not visible in the committed snapshot.
+checkDocNotCommitted(nodes[0], {a: 2});
+
+// Prevent the primary from rolling back later on.
+assert.commandWorked(
+ nodes[0].adminCommand({configureFailPoint: 'rollbackHangBeforeStart', mode: 'alwaysOn'}));
+
+jsTest.log("Disconnect primary from all secondaries");
+nodes[0].disconnect(nodes[1]);
+nodes[0].disconnect(nodes[2]);
+
+// Ensure the soon-to-be primary cannot see the write from the old primary.
+assert.eq(null, nodes[1].getDB(dbName).getCollection(collName).findOne({a: 2}));
+
+jsTest.log("Wait for a new primary to be elected");
+// Allow the secondaries to replicate again.
+restartServerReplication(secondaries);
+
+waitForPrimary(nodes[1]);
+
+jsTest.log("Do a write to the new primary");
+assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert(
+ {a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}}));
+
+// Ensure the new primary still cannot see the write from the old primary.
+assert.eq(null, nodes[1].getDB(dbName).getCollection(collName).findOne({a: 2}));
+
+jsTest.log("Reconnect the old primary to the rest of the nodes");
+nodes[1].reconnect(nodes[0]);
+nodes[2].reconnect(nodes[0]);
+
+// Sleep 10 seconds to allow some heartbeats to be processed, so we can verify that the
+// heartbeats don't cause the stale primary to incorrectly advance the commit point.
+sleep(10000);
+
+checkDocNotCommitted(nodes[0], {a: 2});
+
+jsTest.log("Allow the old primary to finish stepping down and become secondary");
+var res = null;
+try {
+ res = nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'off'});
+} catch (e) {
+ // Expected - once we disable the fail point the stepdown will proceed and it's racy whether
+ // the stepdown closes all connections before or after the configureFailPoint command
+ // returns
+}
+if (res) {
+ assert.commandWorked(res);
+}
+rst.waitForState(nodes[0], ReplSetTest.State.SECONDARY);
+reconnect(nodes[0]);
+
+// At this point the former primary will attempt to go into rollback, but the
+// 'rollbackHangBeforeStart' will prevent it from doing so.
+checkDocNotCommitted(nodes[0], {a: 2});
+checkLog.contains(nodes[0], 'rollback - rollbackHangBeforeStart fail point enabled');
+checkDocNotCommitted(nodes[0], {a: 2});
+
+jsTest.log("Allow the original primary to roll back its write and catch up to the new primary");
+assert.adminCommandWorkedAllowingNetworkError(
+ nodes[0], {configureFailPoint: 'rollbackHangBeforeStart', mode: 'off'});
+
+assert.soonNoExcept(function() {
+ return null == nodes[0].getDB(dbName).getCollection(collName).findOne({a: 2});
+}, "Original primary never rolled back its write");
+
+rst.awaitReplication();
+
+// Ensure that the old primary got the write that the new primary did and sees it as committed.
+assert.neq(
+ null,
+ nodes[0].getDB(dbName).getCollection(collName).find({a: 3}).readConcern('majority').next());
+
+rst.stopSet();
}());
diff --git a/jstests/replsets/read_committed_with_catalog_changes.js b/jstests/replsets/read_committed_with_catalog_changes.js
index 0213aebe36a..2e548a20095 100644
--- a/jstests/replsets/read_committed_with_catalog_changes.js
+++ b/jstests/replsets/read_committed_with_catalog_changes.js
@@ -26,319 +26,316 @@ load("jstests/libs/parallelTester.js"); // For ScopedThread.
load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
(function() {
- "use strict";
+"use strict";
- // Each test case includes a 'prepare' method that sets up the initial state starting with a
- // database that has been dropped, a 'performOp' method that does some operation, and two
- // arrays, 'blockedCollections' and 'unblockedCollections', that list the collections that
- // should be blocked or unblocked between the time the operation is performed until it is
- // committed. If the operation is local only and isn't replicated, the test case should include
- // a 'localOnly' field set to true. Test cases are not allowed to touch any databases other than
- // the one passed in.
- const testCases = {
- createCollectionInExistingDB: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- },
- performOp: function(db) {
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other'],
+// Each test case includes a 'prepare' method that sets up the initial state starting with a
+// database that has been dropped, a 'performOp' method that does some operation, and two
+// arrays, 'blockedCollections' and 'unblockedCollections', that list the collections that
+// should be blocked or unblocked between the time the operation is performed until it is
+// committed. If the operation is local only and isn't replicated, the test case should include
+// a 'localOnly' field set to true. Test cases are not allowed to touch any databases other than
+// the one passed in.
+const testCases = {
+ createCollectionInExistingDB: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
},
- createCollectionInNewDB: {
- prepare: function(db) {},
- performOp: function(db) {
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['otherDoesNotExist'], // Only existent collections are blocked.
+ performOp: function(db) {
+ assert.writeOK(db.coll.insert({_id: 1}));
},
- dropCollection: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- performOp: function(db) {
- assert(db.coll.drop());
- },
- blockedCollections: [],
- unblockedCollections: ['coll', 'other'],
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other'],
+ },
+ createCollectionInNewDB: {
+ prepare: function(db) {},
+ performOp: function(db) {
+ assert.writeOK(db.coll.insert({_id: 1}));
},
- dropDB: {
- prepare: function(db) {
- assert.writeOK(db.coll.insert({_id: 1}));
- // Drop collection explicitly during the preparation phase while we are still able
- // to write to a majority. Otherwise, dropDatabase() will drop the collection
- // and wait for the collection drop to be replicated to a majority of the nodes.
- assert(db.coll.drop());
- },
- performOp: function(db) {
- assert.commandWorked(db.dropDatabase({w: 1}));
- },
- blockedCollections: [],
- unblockedCollections: ['coll'],
+ blockedCollections: ['coll'],
+ unblockedCollections: ['otherDoesNotExist'], // Only existent collections are blocked.
+ },
+ dropCollection: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
},
- dropAndRecreateCollection: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- performOp: function(db) {
- assert(db.coll.drop());
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other'],
+ performOp: function(db) {
+ assert(db.coll.drop());
},
- dropAndRecreateDB: {
- prepare: function(db) {
- assert.writeOK(db.coll.insert({_id: 1}));
- // Drop collection explicitly during the preparation phase while we are still able
- // to write to a majority. Otherwise, dropDatabase() will drop the collection
- // and wait for the collection drop to be replicated to a majority of the nodes.
- assert(db.coll.drop());
- },
- performOp: function(db) {
- assert.commandWorked(db.dropDatabase({w: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['otherDoesNotExist'],
+ blockedCollections: [],
+ unblockedCollections: ['coll', 'other'],
+ },
+ dropDB: {
+ prepare: function(db) {
+ assert.writeOK(db.coll.insert({_id: 1}));
+ // Drop collection explicitly during the preparation phase while we are still able
+ // to write to a majority. Otherwise, dropDatabase() will drop the collection
+ // and wait for the collection drop to be replicated to a majority of the nodes.
+ assert(db.coll.drop());
},
- renameCollectionToNewName: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.from.insert({_id: 1}));
- },
- performOp: function(db) {
- assert.commandWorked(db.from.renameCollection('coll'));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other', 'from' /*doesNotExist*/],
+ performOp: function(db) {
+ assert.commandWorked(db.dropDatabase({w: 1}));
},
- renameCollectionToExistingName: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.from.insert({_id: 'from'}));
- assert.writeOK(db.coll.insert({_id: 'coll'}));
- },
- performOp: function(db) {
- assert.commandWorked(db.from.renameCollection('coll', true));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other', 'from' /*doesNotExist*/],
+ blockedCollections: [],
+ unblockedCollections: ['coll'],
+ },
+ dropAndRecreateCollection: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
},
- createIndexForeground: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- performOp: function(db) {
- assert.commandWorked(db.coll.ensureIndex({x: 1}, {background: false}));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other'],
+ performOp: function(db) {
+ assert(db.coll.drop());
+ assert.writeOK(db.coll.insert({_id: 1}));
},
- createIndexBackground: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- performOp: function(db) {
- assert.commandWorked(db.coll.ensureIndex({x: 1}, {background: true}));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other'],
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other'],
+ },
+ dropAndRecreateDB: {
+ prepare: function(db) {
+ assert.writeOK(db.coll.insert({_id: 1}));
+ // Drop collection explicitly during the preparation phase while we are still able
+ // to write to a majority. Otherwise, dropDatabase() will drop the collection
+ // and wait for the collection drop to be replicated to a majority of the nodes.
+ assert(db.coll.drop());
},
- dropIndex: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- assert.commandWorked(db.coll.ensureIndex({x: 1}));
- },
- performOp: function(db) {
- assert.commandWorked(db.coll.dropIndex({x: 1}));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other'],
+ performOp: function(db) {
+ assert.commandWorked(db.dropDatabase({w: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
},
-
- // Remaining cases are local-only operations.
- reIndex: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- assert.commandWorked(db.coll.ensureIndex({x: 1}));
- },
- performOp: function(db) {
- assert.commandWorked(db.coll.reIndex());
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other'],
- localOnly: true,
+ blockedCollections: ['coll'],
+ unblockedCollections: ['otherDoesNotExist'],
+ },
+ renameCollectionToNewName: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.from.insert({_id: 1}));
+ },
+ performOp: function(db) {
+ assert.commandWorked(db.from.renameCollection('coll'));
+ },
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other', 'from' /*doesNotExist*/],
+ },
+ renameCollectionToExistingName: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.from.insert({_id: 'from'}));
+ assert.writeOK(db.coll.insert({_id: 'coll'}));
+ },
+ performOp: function(db) {
+ assert.commandWorked(db.from.renameCollection('coll', true));
+ },
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other', 'from' /*doesNotExist*/],
+ },
+ createIndexForeground: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
+ },
+ performOp: function(db) {
+ assert.commandWorked(db.coll.ensureIndex({x: 1}, {background: false}));
},
- compact: {
- // At least on WiredTiger, compact is fully inplace so it doesn't need to block readers.
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- assert.commandWorked(db.coll.ensureIndex({x: 1}));
- },
- performOp: function(db) {
- var res = db.coll.runCommand('compact', {force: true});
- if (res.code != ErrorCodes.CommandNotSupported) {
- // It is fine for a storage engine to support snapshots but not compact. Since
- // compact doesn't block any collections we are fine with doing a no-op here.
- // Other errors should fail the test.
- assert.commandWorked(res);
- }
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other'],
+ },
+ createIndexBackground: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
+ },
+ performOp: function(db) {
+ assert.commandWorked(db.coll.ensureIndex({x: 1}, {background: true}));
+ },
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other'],
+ },
+ dropIndex: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.coll.ensureIndex({x: 1}));
+ },
+ performOp: function(db) {
+ assert.commandWorked(db.coll.dropIndex({x: 1}));
+ },
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other'],
+ },
- },
- blockedCollections: [],
- unblockedCollections: ['coll', 'other'],
- localOnly: true,
+ // Remaining cases are local-only operations.
+ reIndex: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.coll.ensureIndex({x: 1}));
},
- };
+ performOp: function(db) {
+ assert.commandWorked(db.coll.reIndex());
+ },
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other'],
+ localOnly: true,
+ },
+ compact: {
+ // At least on WiredTiger, compact is fully inplace so it doesn't need to block readers.
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.coll.ensureIndex({x: 1}));
+ },
+ performOp: function(db) {
+ var res = db.coll.runCommand('compact', {force: true});
+ if (res.code != ErrorCodes.CommandNotSupported) {
+ // It is fine for a storage engine to support snapshots but not compact. Since
+ // compact doesn't block any collections we are fine with doing a no-op here.
+ // Other errors should fail the test.
+ assert.commandWorked(res);
+ }
+ },
+ blockedCollections: [],
+ unblockedCollections: ['coll', 'other'],
+ localOnly: true,
+ },
+};
- // Assertion helpers. These must get all state as arguments rather than through closure since
- // they may be passed in to a ScopedThread.
- function assertReadsBlock(coll) {
- var res =
- coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": 5000});
- assert.commandFailedWithCode(res,
- ErrorCodes.MaxTimeMSExpired,
- "Expected read of " + coll.getFullName() + " to block");
- }
+// Assertion helpers. These must get all state as arguments rather than through closure since
+// they may be passed in to a ScopedThread.
+function assertReadsBlock(coll) {
+ var res = coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": 5000});
+ assert.commandFailedWithCode(
+ res, ErrorCodes.MaxTimeMSExpired, "Expected read of " + coll.getFullName() + " to block");
+}
- function assertReadsSucceed(coll, timeoutMs = 20000) {
- var res =
- coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": timeoutMs});
- assert.commandWorked(res, 'reading from ' + coll.getFullName());
- // Exhaust the cursor to avoid leaking cursors on the server.
- new DBCommandCursor(coll.getDB(), res).itcount();
- }
+function assertReadsSucceed(coll, timeoutMs = 20000) {
+ var res =
+ coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": timeoutMs});
+ assert.commandWorked(res, 'reading from ' + coll.getFullName());
+ // Exhaust the cursor to avoid leaking cursors on the server.
+ new DBCommandCursor(coll.getDB(), res).itcount();
+}
- // Set up a set and grab things for later.
- var name = "read_committed_with_catalog_changes";
- var replTest =
- new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
+// Set up a set and grab things for later.
+var name = "read_committed_with_catalog_changes";
+var replTest =
+ new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- replTest.stopSet();
- return;
- }
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ replTest.stopSet();
+ return;
+}
- var nodes = replTest.nodeList();
- var config = {
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], priority: 0},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- };
+var nodes = replTest.nodeList();
+var config = {
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+};
- replTest.initiate(config);
+replTest.initiate(config);
- // Get connections.
- var primary = replTest.getPrimary();
- var secondary = replTest._slaves[0];
+// Get connections.
+var primary = replTest.getPrimary();
+var secondary = replTest._slaves[0];
- // This is the DB that all of the tests will use.
- var mainDB = primary.getDB('mainDB');
+// This is the DB that all of the tests will use.
+var mainDB = primary.getDB('mainDB');
- // This DB won't be used by any tests so it should always be unblocked.
- var otherDB = primary.getDB('otherDB');
- var otherDBCollection = otherDB.collection;
- assert.writeOK(otherDBCollection.insert(
- {}, {writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- assertReadsSucceed(otherDBCollection);
+// This DB won't be used by any tests so it should always be unblocked.
+var otherDB = primary.getDB('otherDB');
+var otherDBCollection = otherDB.collection;
+assert.writeOK(otherDBCollection.insert(
+ {}, {writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+assertReadsSucceed(otherDBCollection);
- for (var testName in testCases) {
- jsTestLog('Running test ' + testName);
- var test = testCases[testName];
+for (var testName in testCases) {
+ jsTestLog('Running test ' + testName);
+ var test = testCases[testName];
- const setUpInitialState = function setUpInitialState() {
- assert.commandWorked(mainDB.dropDatabase());
- test.prepare(mainDB);
- replTest.awaitReplication();
- // Do some sanity checks.
- assertReadsSucceed(otherDBCollection);
- test.blockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
- test.unblockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
- };
-
- // All operations, whether replicated or not, must become visible automatically as long as
- // the secondary is keeping up.
- setUpInitialState();
- test.performOp(mainDB);
+ const setUpInitialState = function setUpInitialState() {
+ assert.commandWorked(mainDB.dropDatabase());
+ test.prepare(mainDB);
+ replTest.awaitReplication();
+ // Do some sanity checks.
assertReadsSucceed(otherDBCollection);
test.blockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
test.unblockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
+ };
- // Return to the initial state, then stop the secondary from applying new writes to prevent
- // them from becoming committed.
- setUpInitialState();
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
+ // All operations, whether replicated or not, must become visible automatically as long as
+ // the secondary is keeping up.
+ setUpInitialState();
+ test.performOp(mainDB);
+ assertReadsSucceed(otherDBCollection);
+ test.blockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
+ test.unblockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
- // If the tested operation isn't replicated, do a write to the side collection before
- // performing the operation. This will ensure that the operation happens after an
- // uncommitted write which prevents it from immediately being marked as committed.
- if (test.localOnly) {
- assert.writeOK(otherDBCollection.insert({}));
- }
+ // Return to the initial state, then stop the secondary from applying new writes to prevent
+ // them from becoming committed.
+ setUpInitialState();
+ assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
- // Perform the op and ensure that blocked collections block and unblocked ones don't.
- test.performOp(mainDB);
- assertReadsSucceed(otherDBCollection);
- test.blockedCollections.forEach((name) => assertReadsBlock(mainDB[name]));
- test.unblockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
+ // If the tested operation isn't replicated, do a write to the side collection before
+ // performing the operation. This will ensure that the operation happens after an
+ // uncommitted write which prevents it from immediately being marked as committed.
+ if (test.localOnly) {
+ assert.writeOK(otherDBCollection.insert({}));
+ }
- // Use background threads to test that reads that start blocked can complete if the
- // operation they are waiting on becomes committed while the read is still blocked.
- // We don't do this when testing auth because ScopedThread's don't propagate auth
- // credentials.
- var threads = jsTest.options().auth ? [] : test.blockedCollections.map((name) => {
- // This function must get all inputs as arguments and can't use closure because it
- // is used in a ScopedThread.
- function bgThread(host, collection, assertReadsSucceed) {
- // Use a longer timeout since we expect to block for a little while (at least 2
- // seconds).
- assertReadsSucceed(new Mongo(host).getCollection(collection), 30 * 1000);
- }
- var thread = new ScopedThread(
- bgThread, primary.host, mainDB[name].getFullName(), assertReadsSucceed);
- thread.start();
- return thread;
- });
- sleep(1000); // Give the reads a chance to block.
+ // Perform the op and ensure that blocked collections block and unblocked ones don't.
+ test.performOp(mainDB);
+ assertReadsSucceed(otherDBCollection);
+ test.blockedCollections.forEach((name) => assertReadsBlock(mainDB[name]));
+ test.unblockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
- try {
- // Try the committed read again after sleeping to ensure that it still blocks even if it
- // isn't immediately after the operation.
- test.blockedCollections.forEach((name) => assertReadsBlock(mainDB[name]));
+ // Use background threads to test that reads that start blocked can complete if the
+ // operation they are waiting on becomes committed while the read is still blocked.
+ // We don't do this when testing auth because ScopedThread's don't propagate auth
+ // credentials.
+ var threads = jsTest.options().auth ? [] : test.blockedCollections.map((name) => {
+ // This function must get all inputs as arguments and can't use closure because it
+ // is used in a ScopedThread.
+ function bgThread(host, collection, assertReadsSucceed) {
+ // Use a longer timeout since we expect to block for a little while (at least 2
+ // seconds).
+ assertReadsSucceed(new Mongo(host).getCollection(collection), 30 * 1000);
+ }
+ var thread = new ScopedThread(
+ bgThread, primary.host, mainDB[name].getFullName(), assertReadsSucceed);
+ thread.start();
+ return thread;
+ });
+ sleep(1000); // Give the reads a chance to block.
- // Restart oplog application on the secondary and ensure the blocked collections become
- // unblocked.
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
- replTest.awaitReplication();
- test.blockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
+ try {
+ // Try the committed read again after sleeping to ensure that it still blocks even if it
+ // isn't immediately after the operation.
+ test.blockedCollections.forEach((name) => assertReadsBlock(mainDB[name]));
- // Wait for the threads to complete and report any errors encountered from running them.
- threads.forEach((thread) => {
- thread.join();
- thread.join = () => {}; // Make join a no-op for the finally below.
- assert(!thread.hasFailed(), "One of the threads failed. See above for details.");
- });
- } finally {
- // Make sure we wait for all threads to finish.
- threads.forEach(thread => thread.join());
- }
+ // Restart oplog application on the secondary and ensure the blocked collections become
+ // unblocked.
+ assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
+ replTest.awaitReplication();
+ test.blockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
+
+ // Wait for the threads to complete and report any errors encountered from running them.
+ threads.forEach((thread) => {
+ thread.join();
+ thread.join = () => {}; // Make join a no-op for the finally below.
+ assert(!thread.hasFailed(), "One of the threads failed. See above for details.");
+ });
+ } finally {
+ // Make sure we wait for all threads to finish.
+ threads.forEach(thread => thread.join());
}
+}
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/read_concern_majority_getmore_secondaries.js b/jstests/replsets/read_concern_majority_getmore_secondaries.js
index 6db3658733e..5d6624d2f37 100644
--- a/jstests/replsets/read_concern_majority_getmore_secondaries.js
+++ b/jstests/replsets/read_concern_majority_getmore_secondaries.js
@@ -1,80 +1,78 @@
// Test that getMore for a majority read on a secondary only reads committed data.
// @tags: [requires_majority_read_concern]
(function() {
- "use strict";
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+"use strict";
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- const name = "read_concern_majority_getmore_secondaries";
- const replSet = new ReplSetTest({
- name: name,
- nodes:
- [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- });
- replSet.startSet();
- replSet.initiate();
+const name = "read_concern_majority_getmore_secondaries";
+const replSet = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
+});
+replSet.startSet();
+replSet.initiate();
- function stopDataReplication(node) {
- jsTest.log("Stop data replication on " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
- }
+function stopDataReplication(node) {
+ jsTest.log("Stop data replication on " + node.host);
+ assert.commandWorked(
+ node.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
+}
- function startDataReplication(node) {
- jsTest.log("Start data replication on " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
- }
+function startDataReplication(node) {
+ jsTest.log("Start data replication on " + node.host);
+ assert.commandWorked(node.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
+}
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- const primary = replSet.getPrimary();
- const secondaries = replSet.getSecondaries();
- const secondary = secondaries[0];
+const primary = replSet.getPrimary();
+const secondaries = replSet.getSecondaries();
+const secondary = secondaries[0];
- const primaryDB = primary.getDB(dbName);
- const secondaryDB = secondary.getDB(dbName);
+const primaryDB = primary.getDB(dbName);
+const secondaryDB = secondary.getDB(dbName);
- // Insert data on primary and allow it to become committed.
- for (let i = 0; i < 4; i++) {
- assert.commandWorked(primaryDB[collName].insert({_id: i}));
- }
+// Insert data on primary and allow it to become committed.
+for (let i = 0; i < 4; i++) {
+ assert.commandWorked(primaryDB[collName].insert({_id: i}));
+}
- // Await commit.
- replSet.awaitReplication();
- replSet.awaitLastOpCommitted();
+// Await commit.
+replSet.awaitReplication();
+replSet.awaitLastOpCommitted();
- // Stop data replication on 2 secondaries to prevent writes being committed.
- stopDataReplication(secondaries[1]);
- stopDataReplication(secondaries[2]);
+// Stop data replication on 2 secondaries to prevent writes being committed.
+stopDataReplication(secondaries[1]);
+stopDataReplication(secondaries[2]);
- // Write more data to primary.
- for (let i = 4; i < 8; i++) {
- assert.commandWorked(primaryDB[collName].insert({_id: i}, {writeConcern: {w: 2}}));
- }
+// Write more data to primary.
+for (let i = 4; i < 8; i++) {
+ assert.commandWorked(primaryDB[collName].insert({_id: i}, {writeConcern: {w: 2}}));
+}
- // Check that it reached the secondary.
- assert.docEq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}, {_id: 7}],
- secondaryDB[collName].find().sort({_id: 1}).toArray());
+// Check that it reached the secondary.
+assert.docEq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}, {_id: 7}],
+ secondaryDB[collName].find().sort({_id: 1}).toArray());
- // It is important that this query does not do an in-memory sort. Otherwise the initial find
- // will consume all of the results from the storage engine in order to sort them, so we will not
- // be testing that the getMore does not read uncommitted data from the storage engine.
- let res = primaryDB[collName].find().sort({_id: 1}).batchSize(2).readConcern("majority");
- assert.docEq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}], res.toArray());
+// It is important that this query does not do an in-memory sort. Otherwise the initial find
+// will consume all of the results from the storage engine in order to sort them, so we will not
+// be testing that the getMore does not read uncommitted data from the storage engine.
+let res = primaryDB[collName].find().sort({_id: 1}).batchSize(2).readConcern("majority");
+assert.docEq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}], res.toArray());
- // Similarly, this query must not do an in-memory sort.
- res = secondaryDB[collName].find().sort({_id: 1}).batchSize(2).readConcern("majority");
- assert.docEq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}], res.toArray());
+// Similarly, this query must not do an in-memory sort.
+res = secondaryDB[collName].find().sort({_id: 1}).batchSize(2).readConcern("majority");
+assert.docEq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}], res.toArray());
- // Disable failpoints and shutdown.
- replSet.getSecondaries().forEach(startDataReplication);
- replSet.stopSet();
+// Disable failpoints and shutdown.
+replSet.getSecondaries().forEach(startDataReplication);
+replSet.stopSet();
}());
diff --git a/jstests/replsets/read_concern_uninitated_set.js b/jstests/replsets/read_concern_uninitated_set.js
index 0737b88e229..71f9b2c6956 100644
--- a/jstests/replsets/read_concern_uninitated_set.js
+++ b/jstests/replsets/read_concern_uninitated_set.js
@@ -5,59 +5,58 @@
* @tags: [requires_persistence, requires_majority_read_concern]
*/
(function() {
- "use strict";
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+"use strict";
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- const localDB = rst.nodes[0].getDB('local');
- assert.commandWorked(localDB.test.insert({_id: 0}));
- assert.commandWorked(localDB.runCommand({
- isMaster: 1,
- "$clusterTime": {
- "clusterTime": Timestamp(1, 1),
- "signature":
- {"hash": BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId": NumberLong(0)}
- }
- }));
- jsTestLog("Local readConcern on local database should work.");
- const res = assert.commandWorked(localDB.runCommand(
- {find: "test", filter: {}, maxTimeMS: 60000, readConcern: {level: "local"}}));
- assert.eq([{_id: 0}], res.cursor.firstBatch);
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+const localDB = rst.nodes[0].getDB('local');
+assert.commandWorked(localDB.test.insert({_id: 0}));
+assert.commandWorked(localDB.runCommand({
+ isMaster: 1,
+ "$clusterTime": {
+ "clusterTime": Timestamp(1, 1),
+ "signature": {"hash": BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId": NumberLong(0)}
+ }
+}));
+jsTestLog("Local readConcern on local database should work.");
+const res = assert.commandWorked(localDB.runCommand(
+ {find: "test", filter: {}, maxTimeMS: 60000, readConcern: {level: "local"}}));
+assert.eq([{_id: 0}], res.cursor.firstBatch);
- jsTestLog("Majority readConcern should fail with NotYetInitialized.");
- assert.commandFailedWithCode(
- localDB.runCommand(
- {find: "test", filter: {}, maxTimeMS: 60000, readConcern: {level: "majority"}}),
- ErrorCodes.NotYetInitialized);
+jsTestLog("Majority readConcern should fail with NotYetInitialized.");
+assert.commandFailedWithCode(
+ localDB.runCommand(
+ {find: "test", filter: {}, maxTimeMS: 60000, readConcern: {level: "majority"}}),
+ ErrorCodes.NotYetInitialized);
- jsTestLog("afterClusterTime readConcern should fail with NotYetInitialized.");
- assert.commandFailedWithCode(localDB.runCommand({
- find: "test",
- filter: {},
- maxTimeMS: 60000,
- readConcern: {afterClusterTime: Timestamp(1, 1)}
- }),
- ErrorCodes.NotYetInitialized);
+jsTestLog("afterClusterTime readConcern should fail with NotYetInitialized.");
+assert.commandFailedWithCode(localDB.runCommand({
+ find: "test",
+ filter: {},
+ maxTimeMS: 60000,
+ readConcern: {afterClusterTime: Timestamp(1, 1)}
+}),
+ ErrorCodes.NotYetInitialized);
- jsTestLog("oplog query should fail with NotYetInitialized.");
- assert.commandFailedWithCode(localDB.runCommand({
- find: "oplog.rs",
- filter: {ts: {$gte: Timestamp(1520004466, 2)}},
- tailable: true,
- oplogReplay: true,
- awaitData: true,
- maxTimeMS: 60000,
- batchSize: 13981010,
- term: 1,
- readConcern: {afterClusterTime: Timestamp(1, 1)}
- }),
- ErrorCodes.NotYetInitialized);
- rst.stopSet();
+jsTestLog("oplog query should fail with NotYetInitialized.");
+assert.commandFailedWithCode(localDB.runCommand({
+ find: "oplog.rs",
+ filter: {ts: {$gte: Timestamp(1520004466, 2)}},
+ tailable: true,
+ oplogReplay: true,
+ awaitData: true,
+ maxTimeMS: 60000,
+ batchSize: 13981010,
+ term: 1,
+ readConcern: {afterClusterTime: Timestamp(1, 1)}
+}),
+ ErrorCodes.NotYetInitialized);
+rst.stopSet();
}());
diff --git a/jstests/replsets/read_majority_two_arbs.js b/jstests/replsets/read_majority_two_arbs.js
index d2aeea89280..f49ebe71dd7 100644
--- a/jstests/replsets/read_majority_two_arbs.js
+++ b/jstests/replsets/read_majority_two_arbs.js
@@ -6,68 +6,67 @@
load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
(function() {
- "use strict";
+"use strict";
- function log(arg) {
- jsTest.log(tojson(arg));
- }
+function log(arg) {
+ jsTest.log(tojson(arg));
+}
- // Set up a set and grab things for later.
- var name = "read_majority_two_arbs";
- var replTest =
- new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
+// Set up a set and grab things for later.
+var name = "read_majority_two_arbs";
+var replTest =
+ new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- replTest.stopSet();
- return;
- }
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ replTest.stopSet();
+ return;
+}
- var nodes = replTest.nodeList();
- var config = {
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], arbiterOnly: true},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- };
+var nodes = replTest.nodeList();
+var config = {
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], arbiterOnly: true},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+};
- replTest.initiate(config);
+replTest.initiate(config);
- var primary = replTest.getPrimary();
- var db = primary.getDB(name);
- var t = db[name];
+var primary = replTest.getPrimary();
+var db = primary.getDB(name);
+var t = db[name];
- function doRead(readConcern) {
- readConcern.maxTimeMS = 3000;
- var res = assert.commandWorked(t.runCommand('find', readConcern));
- var docs = (new DBCommandCursor(db, res)).toArray();
- assert.gt(docs.length, 0, "no docs returned!");
- return docs[0].state;
- }
+function doRead(readConcern) {
+ readConcern.maxTimeMS = 3000;
+ var res = assert.commandWorked(t.runCommand('find', readConcern));
+ var docs = (new DBCommandCursor(db, res)).toArray();
+ assert.gt(docs.length, 0, "no docs returned!");
+ return docs[0].state;
+}
- function doDirtyRead() {
- log("doing dirty read");
- var ret = doRead({"readConcern": {"level": "local"}});
- log("done doing dirty read.");
- return ret;
- }
+function doDirtyRead() {
+ log("doing dirty read");
+ var ret = doRead({"readConcern": {"level": "local"}});
+ log("done doing dirty read.");
+ return ret;
+}
- function doCommittedRead() {
- log("doing committed read");
- var ret = doRead({"readConcern": {"level": "majority"}});
- log("done doing committed read.");
- return ret;
- }
+function doCommittedRead() {
+ log("doing committed read");
+ var ret = doRead({"readConcern": {"level": "majority"}});
+ log("done doing committed read.");
+ return ret;
+}
- jsTest.log("doing write");
- assert.writeOK(
- t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 10 * 1000}}));
- jsTest.log("doing read");
- assert.eq(doDirtyRead(), 0);
- jsTest.log("doing committed read");
- assert.eq(doCommittedRead(), 0);
- jsTest.log("stopping replTest; test completed successfully");
- replTest.stopSet();
+jsTest.log("doing write");
+assert.writeOK(t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 10 * 1000}}));
+jsTest.log("doing read");
+assert.eq(doDirtyRead(), 0);
+jsTest.log("doing committed read");
+assert.eq(doCommittedRead(), 0);
+jsTest.log("stopping replTest; test completed successfully");
+replTest.stopSet();
}());
diff --git a/jstests/replsets/read_operations_during_rollback.js b/jstests/replsets/read_operations_during_rollback.js
index 18cd188921c..bab24b6e477 100644
--- a/jstests/replsets/read_operations_during_rollback.js
+++ b/jstests/replsets/read_operations_during_rollback.js
@@ -2,99 +2,95 @@
* This test makes sure 'find' and 'getMore' commands fail correctly during rollback.
*/
(function() {
- "use strict";
-
- load("jstests/replsets/libs/rollback_test.js");
-
- const dbName = "test";
- const collName = "coll";
-
- let setFailPoint = (node, failpoint) => {
- jsTestLog("Setting fail point " + failpoint);
- assert.commandWorked(node.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
- };
-
- let clearFailPoint = (node, failpoint) => {
- jsTestLog("Clearing fail point " + failpoint);
- assert.commandWorked(node.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- };
-
- // Set up Rollback Test.
- let rollbackTest = new RollbackTest();
-
- // Insert a document to be read later.
- assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[collName].insert({}));
-
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
-
- setFailPoint(rollbackNode, "rollbackHangAfterTransitionToRollback");
-
- setFailPoint(rollbackNode, "GetMoreHangBeforeReadLock");
-
- const joinGetMoreThread = startParallelShell(() => {
- db.getMongo().setSlaveOk();
- const cursorID =
- assert.commandWorked(db.runCommand({"find": "coll", batchSize: 0})).cursor.id;
- // Make sure an outstanding read operation gets killed during rollback even though the read
- // was started before rollback. Outstanding read operations are killed during rollback and
- // their connections are closed shortly after. So we would get either an error
- // (InterruptedDueToReplStateChange) if the error message is sent out and received before
- // the connection is closed or a network error exception.
- try {
- assert.commandFailedWithCode(db.runCommand({"getMore": cursorID, collection: "coll"}),
- ErrorCodes.InterruptedDueToReplStateChange);
- } catch (e) {
- assert.includes(e.toString(), "network error while attempting to run command");
- }
- }, rollbackNode.port);
-
- const cursorIdToBeReadDuringRollback =
- assert
- .commandWorked(rollbackNode.getDB(dbName).runCommand({"find": collName, batchSize: 0}))
- .cursor.id;
-
- // Wait for 'getMore' to hang.
- checkLog.contains(rollbackNode, "GetMoreHangBeforeReadLock fail point enabled.");
-
- // Start rollback.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
-
- jsTestLog("Reconnecting to " + rollbackNode.host + " after rollback");
- reconnect(rollbackNode.getDB(dbName));
-
- // Wait for rollback to hang.
- checkLog.contains(rollbackNode, "rollbackHangAfterTransitionToRollback fail point enabled.");
-
- clearFailPoint(rollbackNode, "GetMoreHangBeforeReadLock");
-
- jsTestLog("Wait for 'getMore' thread to join.");
- joinGetMoreThread();
-
- jsTestLog("Reading during rollback.");
- // Make sure that read operations fail during rollback.
- assert.commandFailedWithCode(rollbackNode.getDB(dbName).runCommand({"find": collName}),
- ErrorCodes.NotMasterOrSecondary);
- assert.commandFailedWithCode(
- rollbackNode.getDB(dbName).runCommand(
- {"getMore": cursorIdToBeReadDuringRollback, collection: collName}),
- ErrorCodes.NotMasterOrSecondary);
-
- // Disable the best-effort check for primary-ness in the service entry point, so that we
- // exercise the real check for primary-ness in 'find' and 'getMore' commands.
- setFailPoint(rollbackNode, "skipCheckingForNotMasterInCommandDispatch");
- jsTestLog("Reading during rollback (again with command dispatch checks disabled).");
- assert.commandFailedWithCode(rollbackNode.getDB(dbName).runCommand({"find": collName}),
- ErrorCodes.NotMasterOrSecondary);
- assert.commandFailedWithCode(
- rollbackNode.getDB(dbName).runCommand(
- {"getMore": cursorIdToBeReadDuringRollback, collection: collName}),
- ErrorCodes.NotMasterOrSecondary);
-
- clearFailPoint(rollbackNode, "rollbackHangAfterTransitionToRollback");
-
- rollbackTest.transitionToSteadyStateOperations();
-
- // Check the replica set.
- rollbackTest.stop();
+"use strict";
+
+load("jstests/replsets/libs/rollback_test.js");
+
+const dbName = "test";
+const collName = "coll";
+
+let setFailPoint = (node, failpoint) => {
+ jsTestLog("Setting fail point " + failpoint);
+ assert.commandWorked(node.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
+};
+
+let clearFailPoint = (node, failpoint) => {
+ jsTestLog("Clearing fail point " + failpoint);
+ assert.commandWorked(node.adminCommand({configureFailPoint: failpoint, mode: "off"}));
+};
+
+// Set up Rollback Test.
+let rollbackTest = new RollbackTest();
+
+// Insert a document to be read later.
+assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[collName].insert({}));
+
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+
+setFailPoint(rollbackNode, "rollbackHangAfterTransitionToRollback");
+
+setFailPoint(rollbackNode, "GetMoreHangBeforeReadLock");
+
+const joinGetMoreThread = startParallelShell(() => {
+ db.getMongo().setSlaveOk();
+ const cursorID = assert.commandWorked(db.runCommand({"find": "coll", batchSize: 0})).cursor.id;
+ // Make sure an outstanding read operation gets killed during rollback even though the read
+ // was started before rollback. Outstanding read operations are killed during rollback and
+ // their connections are closed shortly after. So we would get either an error
+ // (InterruptedDueToReplStateChange) if the error message is sent out and received before
+ // the connection is closed or a network error exception.
+ try {
+ assert.commandFailedWithCode(db.runCommand({"getMore": cursorID, collection: "coll"}),
+ ErrorCodes.InterruptedDueToReplStateChange);
+ } catch (e) {
+ assert.includes(e.toString(), "network error while attempting to run command");
+ }
+}, rollbackNode.port);
+
+const cursorIdToBeReadDuringRollback =
+ assert.commandWorked(rollbackNode.getDB(dbName).runCommand({"find": collName, batchSize: 0}))
+ .cursor.id;
+
+// Wait for 'getMore' to hang.
+checkLog.contains(rollbackNode, "GetMoreHangBeforeReadLock fail point enabled.");
+
+// Start rollback.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+
+jsTestLog("Reconnecting to " + rollbackNode.host + " after rollback");
+reconnect(rollbackNode.getDB(dbName));
+
+// Wait for rollback to hang.
+checkLog.contains(rollbackNode, "rollbackHangAfterTransitionToRollback fail point enabled.");
+
+clearFailPoint(rollbackNode, "GetMoreHangBeforeReadLock");
+
+jsTestLog("Wait for 'getMore' thread to join.");
+joinGetMoreThread();
+
+jsTestLog("Reading during rollback.");
+// Make sure that read operations fail during rollback.
+assert.commandFailedWithCode(rollbackNode.getDB(dbName).runCommand({"find": collName}),
+ ErrorCodes.NotMasterOrSecondary);
+assert.commandFailedWithCode(rollbackNode.getDB(dbName).runCommand(
+ {"getMore": cursorIdToBeReadDuringRollback, collection: collName}),
+ ErrorCodes.NotMasterOrSecondary);
+
+// Disable the best-effort check for primary-ness in the service entry point, so that we
+// exercise the real check for primary-ness in 'find' and 'getMore' commands.
+setFailPoint(rollbackNode, "skipCheckingForNotMasterInCommandDispatch");
+jsTestLog("Reading during rollback (again with command dispatch checks disabled).");
+assert.commandFailedWithCode(rollbackNode.getDB(dbName).runCommand({"find": collName}),
+ ErrorCodes.NotMasterOrSecondary);
+assert.commandFailedWithCode(rollbackNode.getDB(dbName).runCommand(
+ {"getMore": cursorIdToBeReadDuringRollback, collection: collName}),
+ ErrorCodes.NotMasterOrSecondary);
+
+clearFailPoint(rollbackNode, "rollbackHangAfterTransitionToRollback");
+
+rollbackTest.transitionToSteadyStateOperations();
+
+// Check the replica set.
+rollbackTest.stop();
}());
diff --git a/jstests/replsets/read_operations_during_step_down.js b/jstests/replsets/read_operations_during_step_down.js
index 4f9507ff902..667e353d2fe 100644
--- a/jstests/replsets/read_operations_during_step_down.js
+++ b/jstests/replsets/read_operations_during_step_down.js
@@ -8,118 +8,116 @@ load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
(function() {
- "use strict";
+"use strict";
- const testName = "readOpsDuringStepDown";
- const dbName = "test";
- const collName = "coll";
+const testName = "readOpsDuringStepDown";
+const dbName = "test";
+const collName = "coll";
- var rst = new ReplSetTest({name: testName, nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
+var rst = new ReplSetTest({name: testName, nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const primaryAdmin = primary.getDB("admin");
- const primaryColl = primaryDB[collName];
- const collNss = primaryColl.getFullName();
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const primaryAdmin = primary.getDB("admin");
+const primaryColl = primaryDB[collName];
+const collNss = primaryColl.getFullName();
- TestData.dbName = dbName;
- TestData.collName = collName;
+TestData.dbName = dbName;
+TestData.collName = collName;
- jsTestLog("1. Do a document write");
- assert.writeOK(
+jsTestLog("1. Do a document write");
+assert.writeOK(
        primaryColl.insert({_id: 0}, {"writeConcern": {"w": "majority"}}));
- rst.awaitReplication();
-
- // Open a cursor on primary.
- const cursorIdToBeReadAfterStepDown =
- assert.commandWorked(primaryDB.runCommand({"find": collName, batchSize: 0})).cursor.id;
-
- jsTestLog("2. Start blocking getMore cmd before step down");
- const joinGetMoreThread = startParallelShell(() => {
- // Open another cursor on primary before step down.
- primaryDB = db.getSiblingDB(TestData.dbName);
- const cursorIdToBeReadDuringStepDown =
- assert.commandWorked(primaryDB.runCommand({"find": TestData.collName, batchSize: 0}))
- .cursor.id;
-
- // Enable the fail point for get more cmd.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "alwaysOn"}));
-
- getMoreRes = assert.commandWorked(primaryDB.runCommand(
- {"getMore": cursorIdToBeReadDuringStepDown, collection: TestData.collName}));
- assert.docEq([{_id: 0}], getMoreRes.cursor.nextBatch);
- }, primary.port);
-
- // Wait for getmore cmd to reach the fail point.
- waitForCurOpByFailPoint(primaryAdmin, collNss, "waitAfterPinningCursorBeforeGetMoreBatch");
-
- jsTestLog("2. Start blocking find cmd before step down");
- const joinFindThread = startParallelShell(() => {
- // Enable the fail point for find cmd.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "waitInFindBeforeMakingBatch", mode: "alwaysOn"}));
-
- var findRes = assert.commandWorked(
- db.getSiblingDB(TestData.dbName).runCommand({"find": TestData.collName}));
- assert.docEq([{_id: 0}], findRes.cursor.firstBatch);
-
- }, primary.port);
-
- // Wait for find cmd to reach the fail point.
- waitForCurOpByFailPoint(primaryAdmin, collNss, "waitInFindBeforeMakingBatch");
-
- jsTestLog("3. Make primary step down");
- const joinStepDownThread = startParallelShell(() => {
- assert.commandWorked(db.adminCommand({"replSetStepDown": 100, "force": true}));
- }, primary.port);
-
- // Wait until the step down has started to kill user operations.
- checkLog.contains(primary, "Starting to kill user operations");
-
- // Enable "waitAfterReadCommandFinishesExecution" fail point to make sure the find and get more
- // commands on database 'test' does not complete before step down.
- assert.commandWorked(primaryAdmin.runCommand({
- configureFailPoint: "waitAfterReadCommandFinishesExecution",
- data: {db: dbName},
- mode: "alwaysOn"
- }));
-
- jsTestLog("4. Disable fail points");
- assert.commandWorked(
- primaryAdmin.runCommand({configureFailPoint: "waitInFindBeforeMakingBatch", mode: "off"}));
- assert.commandWorked(primaryAdmin.runCommand(
- {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "off"}));
-
- // Wait until the primary transitioned to SECONDARY state.
- joinStepDownThread();
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- // We don't want to check if we have reached "waitAfterReadCommandFinishesExecution" fail point
- // because we already know that the primary has stepped down successfully. This implies that
- // the find and get more commands are still running even after the node stepped down.
- assert.commandWorked(primaryAdmin.runCommand(
- {configureFailPoint: "waitAfterReadCommandFinishesExecution", mode: "off"}));
-
- // Wait for find & getmore thread to join.
- joinGetMoreThread();
- joinFindThread();
-
- jsTestLog("5. Start get more cmd after step down");
- var getMoreRes = assert.commandWorked(
- primaryDB.runCommand({"getMore": cursorIdToBeReadAfterStepDown, collection: collName}));
+rst.awaitReplication();
+
+// Open a cursor on primary.
+const cursorIdToBeReadAfterStepDown =
+ assert.commandWorked(primaryDB.runCommand({"find": collName, batchSize: 0})).cursor.id;
+
+jsTestLog("2. Start blocking getMore cmd before step down");
+const joinGetMoreThread = startParallelShell(() => {
+ // Open another cursor on primary before step down.
+ primaryDB = db.getSiblingDB(TestData.dbName);
+ const cursorIdToBeReadDuringStepDown =
+ assert.commandWorked(primaryDB.runCommand({"find": TestData.collName, batchSize: 0}))
+ .cursor.id;
+
+ // Enable the fail point for get more cmd.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "alwaysOn"}));
+
+ getMoreRes = assert.commandWorked(primaryDB.runCommand(
+ {"getMore": cursorIdToBeReadDuringStepDown, collection: TestData.collName}));
assert.docEq([{_id: 0}], getMoreRes.cursor.nextBatch);
+}, primary.port);
- // Validate that no operations got killed on step down and no network disconnection happened due
- // to failed unacknowledged operations.
- let replMetrics =
- assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1})).metrics.repl;
- assert.eq(replMetrics.stepDown.userOperationsKilled, 0);
- // Should account for find and getmore commands issued before step down.
- assert.gte(replMetrics.stepDown.userOperationsRunning, 2);
- assert.eq(replMetrics.network.notMasterUnacknowledgedWrites, 0);
+// Wait for getmore cmd to reach the fail point.
+waitForCurOpByFailPoint(primaryAdmin, collNss, "waitAfterPinningCursorBeforeGetMoreBatch");
- rst.stopSet();
+jsTestLog("2. Start blocking find cmd before step down");
+const joinFindThread = startParallelShell(() => {
+ // Enable the fail point for find cmd.
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "waitInFindBeforeMakingBatch", mode: "alwaysOn"}));
+
+ var findRes = assert.commandWorked(
+ db.getSiblingDB(TestData.dbName).runCommand({"find": TestData.collName}));
+ assert.docEq([{_id: 0}], findRes.cursor.firstBatch);
+}, primary.port);
+
+// Wait for find cmd to reach the fail point.
+waitForCurOpByFailPoint(primaryAdmin, collNss, "waitInFindBeforeMakingBatch");
+
+jsTestLog("3. Make primary step down");
+const joinStepDownThread = startParallelShell(() => {
+ assert.commandWorked(db.adminCommand({"replSetStepDown": 100, "force": true}));
+}, primary.port);
+
+// Wait until the step down has started to kill user operations.
+checkLog.contains(primary, "Starting to kill user operations");
+
+// Enable "waitAfterReadCommandFinishesExecution" fail point to make sure the find and get more
+// commands on database 'test' does not complete before step down.
+assert.commandWorked(primaryAdmin.runCommand({
+ configureFailPoint: "waitAfterReadCommandFinishesExecution",
+ data: {db: dbName},
+ mode: "alwaysOn"
+}));
+
+jsTestLog("4. Disable fail points");
+assert.commandWorked(
+ primaryAdmin.runCommand({configureFailPoint: "waitInFindBeforeMakingBatch", mode: "off"}));
+assert.commandWorked(primaryAdmin.runCommand(
+ {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "off"}));
+
+// Wait until the primary transitioned to SECONDARY state.
+joinStepDownThread();
+rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+// We don't want to check if we have reached "waitAfterReadCommandFinishesExecution" fail point
+// because we already know that the primary has stepped down successfully. This implies that
+// the find and get more commands are still running even after the node stepped down.
+assert.commandWorked(primaryAdmin.runCommand(
+ {configureFailPoint: "waitAfterReadCommandFinishesExecution", mode: "off"}));
+
+// Wait for find & getmore thread to join.
+joinGetMoreThread();
+joinFindThread();
+
+jsTestLog("5. Start get more cmd after step down");
+var getMoreRes = assert.commandWorked(
+ primaryDB.runCommand({"getMore": cursorIdToBeReadAfterStepDown, collection: collName}));
+assert.docEq([{_id: 0}], getMoreRes.cursor.nextBatch);
+
+// Validate that no operations got killed on step down and no network disconnection happened due
+// to failed unacknowledged operations.
+let replMetrics = assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1})).metrics.repl;
+assert.eq(replMetrics.stepDown.userOperationsKilled, 0);
+// Should account for find and getmore commands issued before step down.
+assert.gte(replMetrics.stepDown.userOperationsRunning, 2);
+assert.eq(replMetrics.network.notMasterUnacknowledgedWrites, 0);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/reconfig.js b/jstests/replsets/reconfig.js
index 7ae2f41d3c8..f6f83755f1f 100644
--- a/jstests/replsets/reconfig.js
+++ b/jstests/replsets/reconfig.js
@@ -3,46 +3,45 @@
* succeed without force if force is needed.
*/
(function() {
- "use strict";
-
- // Skip db hash check because secondary is left with a different config.
- TestData.skipCheckDBHashes = true;
-
- var numNodes = 5;
- var replTest = new ReplSetTest({name: 'testSet', nodes: numNodes});
- var nodes = replTest.startSet();
- replTest.initiate();
-
- var primary = replTest.getPrimary();
-
- replTest.awaitSecondaryNodes();
-
- jsTestLog("Valid reconfig");
- var config = primary.getDB("local").system.replset.findOne();
- printjson(config);
- config.version++;
- config.members[nodes.indexOf(primary)].priority = 2;
- assert.commandWorked(primary.getDB("admin").runCommand({replSetReconfig: config}));
- replTest.awaitReplication();
-
- jsTestLog("Invalid reconfig");
- config.version++;
- var badMember = {_id: numNodes, host: "localhost:12345", priority: "High"};
- config.members.push(badMember);
- var invalidConfigCode = 93;
- assert.commandFailedWithCode(primary.adminCommand({replSetReconfig: config}),
- invalidConfigCode);
-
- jsTestLog("No force when needed.");
- config.members = config.members.slice(0, numNodes - 1);
- var secondary = replTest.getSecondary();
- config.members[nodes.indexOf(secondary)].priority = 5;
- var admin = secondary.getDB("admin");
- var forceRequiredCode = 10107;
- assert.commandFailedWithCode(admin.runCommand({replSetReconfig: config}), forceRequiredCode);
-
- jsTestLog("Force when appropriate");
- assert.commandWorked(admin.runCommand({replSetReconfig: config, force: true}));
-
- replTest.stopSet();
+"use strict";
+
+// Skip db hash check because secondary is left with a different config.
+TestData.skipCheckDBHashes = true;
+
+var numNodes = 5;
+var replTest = new ReplSetTest({name: 'testSet', nodes: numNodes});
+var nodes = replTest.startSet();
+replTest.initiate();
+
+var primary = replTest.getPrimary();
+
+replTest.awaitSecondaryNodes();
+
+jsTestLog("Valid reconfig");
+var config = primary.getDB("local").system.replset.findOne();
+printjson(config);
+config.version++;
+config.members[nodes.indexOf(primary)].priority = 2;
+assert.commandWorked(primary.getDB("admin").runCommand({replSetReconfig: config}));
+replTest.awaitReplication();
+
+jsTestLog("Invalid reconfig");
+config.version++;
+var badMember = {_id: numNodes, host: "localhost:12345", priority: "High"};
+config.members.push(badMember);
+var invalidConfigCode = 93;
+assert.commandFailedWithCode(primary.adminCommand({replSetReconfig: config}), invalidConfigCode);
+
+jsTestLog("No force when needed.");
+config.members = config.members.slice(0, numNodes - 1);
+var secondary = replTest.getSecondary();
+config.members[nodes.indexOf(secondary)].priority = 5;
+var admin = secondary.getDB("admin");
+var forceRequiredCode = 10107;
+assert.commandFailedWithCode(admin.runCommand({replSetReconfig: config}), forceRequiredCode);
+
+jsTestLog("Force when appropriate");
+assert.commandWorked(admin.runCommand({replSetReconfig: config, force: true}));
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/reconfig_during_election.js b/jstests/replsets/reconfig_during_election.js
index aaf33c8ac67..20e67a483c8 100644
--- a/jstests/replsets/reconfig_during_election.js
+++ b/jstests/replsets/reconfig_during_election.js
@@ -3,47 +3,50 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/election_handoff.js");
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/replsets/libs/election_handoff.js");
+load("jstests/libs/check_log.js");
- const rst = ReplSetTest({nodes: 2});
- const nodes = rst.startSet();
- const config = rst.getReplSetConfig();
- // Prevent elections and set heartbeat timeout >> electionHangsBeforeUpdateMemberState.
- config.settings = {electionTimeoutMillis: 12 * 60 * 60 * 1000, heartbeatTimeoutSecs: 60 * 1000};
- rst.initiate(config);
+const rst = ReplSetTest({nodes: 2});
+const nodes = rst.startSet();
+const config = rst.getReplSetConfig();
+// Prevent elections and set heartbeat timeout >> electionHangsBeforeUpdateMemberState.
+config.settings = {
+ electionTimeoutMillis: 12 * 60 * 60 * 1000,
+ heartbeatTimeoutSecs: 60 * 1000
+};
+rst.initiate(config);
- const incumbent = rst.getPrimary();
- const candidate = rst.getSecondary();
+const incumbent = rst.getPrimary();
+const candidate = rst.getSecondary();
- jsTestLog("Step down");
+jsTestLog("Step down");
- assert.commandWorked(candidate.adminCommand({
- configureFailPoint: "electionHangsBeforeUpdateMemberState",
- mode: "alwaysOn",
- data: {waitForMillis: 10 * 1000}
- }));
+assert.commandWorked(candidate.adminCommand({
+ configureFailPoint: "electionHangsBeforeUpdateMemberState",
+ mode: "alwaysOn",
+ data: {waitForMillis: 10 * 1000}
+}));
- // The incumbent sends replSetStepUp to the candidate for election handoff.
- assert.commandWorked(incumbent.adminCommand({
- replSetStepDown: ElectionHandoffTest.stepDownPeriodSecs,
- secondaryCatchUpPeriodSecs: ElectionHandoffTest.stepDownPeriodSecs / 2
- }));
+// The incumbent sends replSetStepUp to the candidate for election handoff.
+assert.commandWorked(incumbent.adminCommand({
+ replSetStepDown: ElectionHandoffTest.stepDownPeriodSecs,
+ secondaryCatchUpPeriodSecs: ElectionHandoffTest.stepDownPeriodSecs / 2
+}));
- jsTestLog("Wait for candidate to win the election");
+jsTestLog("Wait for candidate to win the election");
- checkLog.contains(
- candidate, "election succeeded - electionHangsBeforeUpdateMemberState fail point enabled");
+checkLog.contains(candidate,
+ "election succeeded - electionHangsBeforeUpdateMemberState fail point enabled");
- jsTestLog("Try to interrupt it with a reconfig");
+jsTestLog("Try to interrupt it with a reconfig");
- config.members[nodes.indexOf(candidate)].priority = 2;
- config.version++;
- assert.commandWorked(candidate.adminCommand({replSetReconfig: config, force: true}));
+config.members[nodes.indexOf(candidate)].priority = 2;
+config.version++;
+assert.commandWorked(candidate.adminCommand({replSetReconfig: config, force: true}));
- assert.commandWorked(candidate.adminCommand(
- {configureFailPoint: "electionHangsBeforeUpdateMemberState", mode: "off"}));
+assert.commandWorked(candidate.adminCommand(
+ {configureFailPoint: "electionHangsBeforeUpdateMemberState", mode: "off"}));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js b/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js
index d0aefb412a8..2cc177445e1 100644
--- a/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js
+++ b/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js
@@ -12,250 +12,252 @@
*/
(function() {
- "use strict";
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+
+const config = replTest.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while the
+// secondary is restarting.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+replTest.initiate(config);
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
-
- const config = replTest.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while the
- // secondary is restarting.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- replTest.initiate(config);
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
+const dbName = "test";
+const collName = "reconstruct_prepared_transactions_initial_sync";
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
- const dbName = "test";
- const collName = "reconstruct_prepared_transactions_initial_sync";
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
+const session1 = primary.startSession();
+const sessionDB1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDB1.getCollection(collName);
- const session1 = primary.startSession();
- const sessionDB1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDB1.getCollection(collName);
+let session2 = primary.startSession();
+let sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
- let session2 = primary.startSession();
- let sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
+let session3 = primary.startSession();
+let sessionDB3 = session3.getDatabase(dbName);
+const sessionColl3 = sessionDB3.getCollection(collName);
- let session3 = primary.startSession();
- let sessionDB3 = session3.getDatabase(dbName);
- const sessionColl3 = sessionDB3.getCollection(collName);
+assert.commandWorked(sessionColl1.insert({_id: 1}));
+assert.commandWorked(sessionColl2.insert({_id: 2}));
+assert.commandWorked(sessionColl3.insert({_id: 3}));
+assert.commandWorked(sessionColl3.insert({_id: 4}));
- assert.commandWorked(sessionColl1.insert({_id: 1}));
- assert.commandWorked(sessionColl2.insert({_id: 2}));
- assert.commandWorked(sessionColl3.insert({_id: 3}));
- assert.commandWorked(sessionColl3.insert({_id: 4}));
+jsTestLog("Preparing three transactions");
- jsTestLog("Preparing three transactions");
+session1.startTransaction();
+assert.commandWorked(sessionColl1.update({_id: 1}, {_id: 1, a: 1}));
+const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
- session1.startTransaction();
- assert.commandWorked(sessionColl1.update({_id: 1}, {_id: 1, a: 1}));
- const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
+session2.startTransaction();
+assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
+let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
- session2.startTransaction();
- assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
- let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+session3.startTransaction();
+assert.commandWorked(sessionColl3.update({_id: 3}, {_id: 3, a: 1}));
+const prepareTimestamp3 = PrepareHelpers.prepareTransaction(session3);
- session3.startTransaction();
- assert.commandWorked(sessionColl3.update({_id: 3}, {_id: 3, a: 1}));
- const prepareTimestamp3 = PrepareHelpers.prepareTransaction(session3);
+const lsid2 = session2.getSessionId();
+const txnNumber2 = session2.getTxnNumber_forTesting();
- const lsid2 = session2.getSessionId();
- const txnNumber2 = session2.getTxnNumber_forTesting();
+const lsid3 = session3.getSessionId();
+const txnNumber3 = session3.getTxnNumber_forTesting();
- const lsid3 = session3.getSessionId();
- const txnNumber3 = session3.getTxnNumber_forTesting();
+jsTestLog("Restarting the secondary");
- jsTestLog("Restarting the secondary");
+// Restart the secondary with startClean set to true so that it goes through initial sync. Also
+// restart the node with a failpoint turned on that will pause initial sync after the secondary
+// has copied {_id: 1}, {_id: 2} and {_id: 3}. This way we can do some writes on the sync source
+// while initial sync is paused and know that its operations won't be copied during collection
+// cloning. Instead, the writes must be applied during oplog application.
+replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
+secondary = replTest.start(
+ secondary,
+ {
+ startClean: true,
+ setParameter: {
+ 'failpoint.initialSyncHangDuringCollectionClone': tojson(
+ {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 3}}),
+ 'numInitialSyncAttempts': 1
+ }
+ },
+ true /* wait */);
- // Restart the secondary with startClean set to true so that it goes through initial sync. Also
- // restart the node with a failpoint turned on that will pause initial sync after the secondary
- // has copied {_id: 1}, {_id: 2} and {_id: 3}. This way we can do some writes on the sync source
- // while initial sync is paused and know that its operations won't be copied during collection
- // cloning. Instead, the writes must be applied during oplog application.
- replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
- secondary = replTest.start(
- secondary,
- {
- startClean: true,
- setParameter: {
- 'failpoint.initialSyncHangDuringCollectionClone': tojson(
- {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 3}}),
- 'numInitialSyncAttempts': 1
- }
- },
- true /* wait */);
+// Wait for failpoint to be reached so we know that collection cloning is paused.
+checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
- // Wait for failpoint to be reached so we know that collection cloning is paused.
- checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
+jsTestLog("Running operations while collection cloning is paused");
- jsTestLog("Running operations while collection cloning is paused");
+// Perform writes while collection cloning is paused so that we know they must be applied during
+// the oplog application stage of initial sync.
+assert.commandWorked(testColl.insert({_id: 5}));
+
+let session4 = primary.startSession();
+let sessionDB4 = session4.getDatabase(dbName);
+const sessionColl4 = sessionDB4.getCollection(collName);
- // Perform writes while collection cloning is paused so that we know they must be applied during
- // the oplog application stage of initial sync.
- assert.commandWorked(testColl.insert({_id: 5}));
-
- let session4 = primary.startSession();
- let sessionDB4 = session4.getDatabase(dbName);
- const sessionColl4 = sessionDB4.getCollection(collName);
-
- jsTestLog("Preparing the fourth transaction");
-
- // Prepare a transaction while collection cloning is paused so that its oplog entry must be
- // applied during the oplog application phase of initial sync.
- session4.startTransaction();
- assert.commandWorked(sessionColl4.update({_id: 4}, {_id: 4, a: 1}));
- const prepareTimestamp4 = PrepareHelpers.prepareTransaction(session4, {w: 1});
-
- jsTestLog("Resuming initial sync");
-
- // Resume initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
-
- // Wait for the secondary to complete initial sync.
- replTest.awaitSecondaryNodes();
-
- jsTestLog("Initial sync completed");
-
- secondary.setSlaveOk();
- const secondaryColl = secondary.getDB(dbName).getCollection(collName);
-
- // Make sure that while reading from the node that went through initial sync, we can't read
- // changes to the documents from any of the prepared transactions after initial sync. Also, make
- // sure that the writes that happened when collection cloning was paused happened.
- const res = secondaryColl.find().sort({_id: 1}).toArray();
- assert.eq(res, [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}], res);
-
- jsTestLog("Checking that the first transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the first prepared transaction
- // after initial sync.
- assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1});
-
- jsTestLog("Committing the first transaction");
-
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1));
- replTest.awaitReplication();
-
- // Make sure that we can see the data from a committed transaction on the secondary if it was
- // applied during secondary oplog application.
- assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 1});
-
- jsTestLog("Checking that the fourth transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the first prepared transaction
- // after initial sync.
- assert.docEq(secondaryColl.findOne({_id: 4}), {_id: 4});
-
- jsTestLog("Committing the fourth transaction");
-
- assert.commandWorked(PrepareHelpers.commitTransaction(session4, prepareTimestamp4));
- replTest.awaitReplication();
-
- // Make sure that we can see the data from a committed transaction on the secondary if it was
- // applied during secondary oplog application.
- assert.docEq(secondaryColl.findOne({_id: 4}), {_id: 4, a: 1});
-
- jsTestLog("Stepping up the secondary");
-
- // Step up the secondary after initial sync is done and make sure the other two transactions are
- // properly prepared.
- replTest.stepUp(secondary);
- replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
- const newPrimary = replTest.getPrimary();
- testDB = newPrimary.getDB(dbName);
- testColl = testDB.getCollection(collName);
-
- // Force the second session to use the same lsid and txnNumber as from before the restart. This
- // ensures that we're working with the same session and transaction.
- session2 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid2);
- session2.setTxnNumber_forTesting(txnNumber2);
- sessionDB2 = session2.getDatabase(dbName);
-
- jsTestLog("Checking that the second transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the second prepared transaction
- // after initial sync.
- assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2}]);
-
- // Make sure that another write on the same document from the second transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName, updates: [{q: {_id: 2}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Make sure that we cannot add other operations to the second transaction since it is prepared.
- assert.commandFailedWithCode(sessionDB2.runCommand({
- insert: collName,
- documents: [{_id: 6}],
- txnNumber: NumberLong(txnNumber2),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Committing the second transaction");
-
- // Make sure we can successfully commit the second transaction after recovery.
- assert.commandWorked(sessionDB2.adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTimestamp2,
- txnNumber: NumberLong(txnNumber2),
- autocommit: false
- }));
- assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2, a: 1}]);
-
- jsTestLog("Attempting to run another transaction");
-
- // Make sure that we can run another conflicting transaction without any problems
- session2.startTransaction();
- assert.commandWorked(sessionDB2[collName].update({_id: 2}, {_id: 2, a: 3}));
- prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
- assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
- assert.docEq(testColl.findOne({_id: 2}), {_id: 2, a: 3});
-
- // Force the third session to use the same lsid and txnNumber as from before the restart. This
- // ensures that we're working with the same session and transaction.
- session3 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid3);
- session3.setTxnNumber_forTesting(txnNumber3);
- sessionDB3 = session3.getDatabase(dbName);
-
- jsTestLog("Checking that the third transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the third prepared transaction
- // after initial sync.
- assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
-
- // Make sure that another write on the same document from the third transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName, updates: [{q: {_id: 3}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Make sure that we cannot add other operations to the third transaction since it is prepared.
- assert.commandFailedWithCode(sessionDB3.runCommand({
- insert: collName,
- documents: [{_id: 6}],
- txnNumber: NumberLong(txnNumber3),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Aborting the third transaction");
-
- // Make sure we can successfully abort the third transaction after recovery.
- assert.commandWorked(sessionDB3.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber3), autocommit: false}));
- assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
-
- replTest.stopSet();
+jsTestLog("Preparing the fourth transaction");
+
+// Prepare a transaction while collection cloning is paused so that its oplog entry must be
+// applied during the oplog application phase of initial sync.
+session4.startTransaction();
+assert.commandWorked(sessionColl4.update({_id: 4}, {_id: 4, a: 1}));
+const prepareTimestamp4 = PrepareHelpers.prepareTransaction(session4, {w: 1});
+
+jsTestLog("Resuming initial sync");
+
+// Resume initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
+
+// Wait for the secondary to complete initial sync.
+replTest.awaitSecondaryNodes();
+
+jsTestLog("Initial sync completed");
+
+secondary.setSlaveOk();
+const secondaryColl = secondary.getDB(dbName).getCollection(collName);
+
+// Make sure that while reading from the node that went through initial sync, we can't read
+// changes to the documents from any of the prepared transactions after initial sync. Also, make
+// sure that the writes that happened when collection cloning was paused happened.
+const res = secondaryColl.find().sort({_id: 1}).toArray();
+assert.eq(res, [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}], res);
+
+jsTestLog("Checking that the first transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the first prepared transaction
+// after initial sync.
+assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1});
+
+jsTestLog("Committing the first transaction");
+
+assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1));
+replTest.awaitReplication();
+
+// Make sure that we can see the data from a committed transaction on the secondary if it was
+// applied during secondary oplog application.
+assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 1});
+
+jsTestLog("Checking that the fourth transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the first prepared transaction
+// after initial sync.
+assert.docEq(secondaryColl.findOne({_id: 4}), {_id: 4});
+
+jsTestLog("Committing the fourth transaction");
+
+assert.commandWorked(PrepareHelpers.commitTransaction(session4, prepareTimestamp4));
+replTest.awaitReplication();
+
+// Make sure that we can see the data from a committed transaction on the secondary if it was
+// applied during secondary oplog application.
+assert.docEq(secondaryColl.findOne({_id: 4}), {_id: 4, a: 1});
+
+jsTestLog("Stepping up the secondary");
+
+// Step up the secondary after initial sync is done and make sure the other two transactions are
+// properly prepared.
+replTest.stepUp(secondary);
+replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
+const newPrimary = replTest.getPrimary();
+testDB = newPrimary.getDB(dbName);
+testColl = testDB.getCollection(collName);
+
+// Force the second session to use the same lsid and txnNumber as from before the restart. This
+// ensures that we're working with the same session and transaction.
+session2 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid2);
+session2.setTxnNumber_forTesting(txnNumber2);
+sessionDB2 = session2.getDatabase(dbName);
+
+jsTestLog("Checking that the second transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the second prepared transaction
+// after initial sync.
+assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2}]);
+
+// Make sure that another write on the same document from the second transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {update: collName, updates: [{q: {_id: 2}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Make sure that we cannot add other operations to the second transaction since it is prepared.
+assert.commandFailedWithCode(sessionDB2.runCommand({
+ insert: collName,
+ documents: [{_id: 6}],
+ txnNumber: NumberLong(txnNumber2),
+ stmtId: NumberInt(2),
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Committing the second transaction");
+
+// Make sure we can successfully commit the second transaction after recovery.
+assert.commandWorked(sessionDB2.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTimestamp2,
+ txnNumber: NumberLong(txnNumber2),
+ autocommit: false
+}));
+assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2, a: 1}]);
+
+jsTestLog("Attempting to run another transaction");
+
+// Make sure that we can run another conflicting transaction without any problems
+session2.startTransaction();
+assert.commandWorked(sessionDB2[collName].update({_id: 2}, {_id: 2, a: 3}));
+prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
+assert.docEq(testColl.findOne({_id: 2}), {_id: 2, a: 3});
+
+// Force the third session to use the same lsid and txnNumber as from before the restart. This
+// ensures that we're working with the same session and transaction.
+session3 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid3);
+session3.setTxnNumber_forTesting(txnNumber3);
+sessionDB3 = session3.getDatabase(dbName);
+
+jsTestLog("Checking that the third transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the third prepared transaction
+// after initial sync.
+assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
+
+// Make sure that another write on the same document from the third transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {update: collName, updates: [{q: {_id: 3}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Make sure that we cannot add other operations to the third transaction since it is prepared.
+assert.commandFailedWithCode(sessionDB3.runCommand({
+ insert: collName,
+ documents: [{_id: 6}],
+ txnNumber: NumberLong(txnNumber3),
+ stmtId: NumberInt(2),
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Aborting the third transaction");
+
+// Make sure we can successfully abort the third transaction after recovery.
+assert.commandWorked(sessionDB3.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber3), autocommit: false}));
+assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
+
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_index_build.js b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_index_build.js
index 274c2eb4d94..a9f35921e8e 100644
--- a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_index_build.js
+++ b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_index_build.js
@@ -7,120 +7,122 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
-
- const config = replTest.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while the
- // secondary is restarting.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- replTest.initiate(config);
-
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
-
- const dbName = "test";
- const collName = "reconstruct_prepared_transactions_initial_sync_index_build";
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
-
- assert.commandWorked(testColl.insert({_id: 0}));
-
- jsTestLog("Restarting the secondary");
-
- // Restart the secondary with startClean set to true so that it goes through initial sync. Also
- // restart the node with a failpoint turned on that will pause initial sync. This way we can do
- // some writes on the sync source while initial sync is paused and know that its operations
- // won't be copied during collection cloning. Instead, the writes must be applied during oplog
- // application.
- replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
- secondary = replTest.start(
- secondary,
- {
- startClean: true,
- setParameter: {
- 'failpoint.initialSyncHangDuringCollectionClone': tojson(
- {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 1}}),
- 'numInitialSyncAttempts': 1
- }
- },
- true /* wait */);
-
- // Wait for failpoint to be reached so we know that collection cloning is paused.
- checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
-
- jsTestLog("Running operations while collection cloning is paused");
-
- // Perform writes while collection cloning is paused so that we know they must be applied during
- // the oplog application stage of initial sync.
- assert.commandWorked(testColl.insert({_id: 1, a: 1}));
- assert.commandWorked(testColl.createIndex({a: 1}));
- // Make the index build hang on the secondary so that initial sync gets to the prepared-txn
- // reconstruct stage with the index build still running.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'hangAfterStartingIndexBuild', mode: "alwaysOn"}));
-
- let session = primary.startSession();
- let sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- jsTestLog("Preparing the transaction");
-
- // Prepare a transaction while collection cloning is paused so that its oplog entry must be
- // applied during the oplog application phase of initial sync.
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1, a: 1}, {_id: 1, a: 2}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
-
- clearRawMongoProgramOutput();
- jsTestLog("Resuming initial sync");
-
- // Resume initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
-
- // Wait for log message.
- assert.soon(
- () =>
- rawMongoProgramOutput().indexOf(
- "blocking replication until index builds are finished on test.reconstruct_prepared_transactions_initial_sync_index_build, due to prepared transaction") >=
- 0,
- "replication not hanging");
-
- // Unblock index build.
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: "off"}));
-
- // Wait for the secondary to complete initial sync.
- replTest.awaitSecondaryNodes();
-
- jsTestLog("Initial sync completed");
-
- secondary.setSlaveOk();
- const secondaryColl = secondary.getDB(dbName).getCollection(collName);
-
- // Make sure that while reading from the node that went through initial sync, we can't read
- // changes to the documents from the prepared transaction after initial sync. Also, make
- // sure that the writes that happened when collection cloning was paused happened.
- const res = secondaryColl.find().sort({_id: 1}).toArray();
- assert.eq(res, [{_id: 0}, {_id: 1, a: 1}], res);
-
- // Wait for the prepared transaction oplog entry to be majority committed before committing the
- // transaction.
- PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp);
-
- jsTestLog("Committing the transaction");
-
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- replTest.awaitReplication();
-
- // Make sure that we can see the data from the committed transaction on the secondary.
- assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 2});
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+
+const config = replTest.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while the
+// secondary is restarting.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+replTest.initiate(config);
+
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+
+const dbName = "test";
+const collName = "reconstruct_prepared_transactions_initial_sync_index_build";
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
+
+assert.commandWorked(testColl.insert({_id: 0}));
+
+jsTestLog("Restarting the secondary");
+
+// Restart the secondary with startClean set to true so that it goes through initial sync. Also
+// restart the node with a failpoint turned on that will pause initial sync. This way we can do
+// some writes on the sync source while initial sync is paused and know that its operations
+// won't be copied during collection cloning. Instead, the writes must be applied during oplog
+// application.
+replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
+secondary = replTest.start(
+ secondary,
+ {
+ startClean: true,
+ setParameter: {
+ 'failpoint.initialSyncHangDuringCollectionClone': tojson(
+ {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 1}}),
+ 'numInitialSyncAttempts': 1
+ }
+ },
+ true /* wait */);
+
+// Wait for failpoint to be reached so we know that collection cloning is paused.
+checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
+
+jsTestLog("Running operations while collection cloning is paused");
+
+// Perform writes while collection cloning is paused so that we know they must be applied during
+// the oplog application stage of initial sync.
+assert.commandWorked(testColl.insert({_id: 1, a: 1}));
+assert.commandWorked(testColl.createIndex({a: 1}));
+// Make the index build hang on the secondary so that initial sync gets to the prepared-txn
+// reconstruct stage with the index build still running.
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: "alwaysOn"}));
+
+let session = primary.startSession();
+let sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+jsTestLog("Preparing the transaction");
+
+// Prepare a transaction while collection cloning is paused so that its oplog entry must be
+// applied during the oplog application phase of initial sync.
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1, a: 1}, {_id: 1, a: 2}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
+
+clearRawMongoProgramOutput();
+jsTestLog("Resuming initial sync");
+
+// Resume initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
+
+// Wait for log message.
+assert.soon(
+ () =>
+ rawMongoProgramOutput().indexOf(
+ "blocking replication until index builds are finished on test.reconstruct_prepared_transactions_initial_sync_index_build, due to prepared transaction") >=
+ 0,
+ "replication not hanging");
+
+// Unblock index build.
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: "off"}));
+
+// Wait for the secondary to complete initial sync.
+replTest.awaitSecondaryNodes();
+
+jsTestLog("Initial sync completed");
+
+secondary.setSlaveOk();
+const secondaryColl = secondary.getDB(dbName).getCollection(collName);
+
+// Make sure that while reading from the node that went through initial sync, we can't read
+// changes to the documents from the prepared transaction after initial sync. Also, make
+// sure that the writes that happened when collection cloning was paused happened.
+const res = secondaryColl.find().sort({_id: 1}).toArray();
+assert.eq(res, [{_id: 0}, {_id: 1, a: 1}], res);
+
+// Wait for the prepared transaction oplog entry to be majority committed before committing the
+// transaction.
+PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp);
+
+jsTestLog("Committing the transaction");
+
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+replTest.awaitReplication();
+
+// Make sure that we can see the data from the committed transaction on the secondary.
+assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 2});
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_no_oplog_application.js b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_no_oplog_application.js
index 340599aed54..cf388620e20 100644
--- a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_no_oplog_application.js
+++ b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_no_oplog_application.js
@@ -9,194 +9,195 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
- const config = replTest.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while the
- // secondary is restarting.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- replTest.initiate(config);
+const config = replTest.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while the
+// secondary is restarting.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+replTest.initiate(config);
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
- const dbName = "test";
- const collName = "reconstruct_prepared_transactions_initial_sync_no_oplog_application";
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "reconstruct_prepared_transactions_initial_sync_no_oplog_application";
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
- const session1 = primary.startSession();
- const sessionDB1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDB1.getCollection(collName);
+const session1 = primary.startSession();
+const sessionDB1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDB1.getCollection(collName);
- let session2 = primary.startSession();
- let sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
+let session2 = primary.startSession();
+let sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
- let session3 = primary.startSession();
- let sessionDB3 = session3.getDatabase(dbName);
- const sessionColl3 = sessionDB3.getCollection(collName);
+let session3 = primary.startSession();
+let sessionDB3 = session3.getDatabase(dbName);
+const sessionColl3 = sessionDB3.getCollection(collName);
- assert.commandWorked(sessionColl1.insert({_id: 1}));
- assert.commandWorked(sessionColl2.insert({_id: 2}));
- assert.commandWorked(sessionColl3.insert({_id: 3}));
+assert.commandWorked(sessionColl1.insert({_id: 1}));
+assert.commandWorked(sessionColl2.insert({_id: 2}));
+assert.commandWorked(sessionColl3.insert({_id: 3}));
- jsTestLog("Preparing three transactions");
+jsTestLog("Preparing three transactions");
- session1.startTransaction();
- assert.commandWorked(sessionColl1.update({_id: 1}, {_id: 1, a: 1}));
- const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
+session1.startTransaction();
+assert.commandWorked(sessionColl1.update({_id: 1}, {_id: 1, a: 1}));
+const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
- session2.startTransaction();
- assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
- let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+session2.startTransaction();
+assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
+let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
- session3.startTransaction();
- assert.commandWorked(sessionColl3.update({_id: 3}, {_id: 3, a: 1}));
- const prepareTimestamp3 = PrepareHelpers.prepareTransaction(session3);
-
- const lsid2 = session2.getSessionId();
- const txnNumber2 = session2.getTxnNumber_forTesting();
+session3.startTransaction();
+assert.commandWorked(sessionColl3.update({_id: 3}, {_id: 3, a: 1}));
+const prepareTimestamp3 = PrepareHelpers.prepareTransaction(session3);
- const lsid3 = session3.getSessionId();
- const txnNumber3 = session3.getTxnNumber_forTesting();
-
- jsTestLog("Restarting the secondary");
-
- // Restart the secondary with startClean set to true so that it goes through initial sync.
- replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
- secondary = replTest.start(secondary,
- {startClean: true, setParameter: {'numInitialSyncAttempts': 1}},
- true /* wait */);
+const lsid2 = session2.getSessionId();
+const txnNumber2 = session2.getTxnNumber_forTesting();
+
+const lsid3 = session3.getSessionId();
+const txnNumber3 = session3.getTxnNumber_forTesting();
+
+jsTestLog("Restarting the secondary");
- // Wait for the secondary to complete initial sync.
- replTest.awaitSecondaryNodes();
-
- jsTestLog("Initial sync completed");
-
- secondary.setSlaveOk();
- const secondaryColl = secondary.getDB(dbName).getCollection(collName);
-
- // Make sure that while reading from the node that went through initial sync, we can't read
- // changes to the documents from any of the prepared transactions after initial sync.
- const res = secondaryColl.find().sort({_id: 1}).toArray();
- assert.eq(res, [{_id: 1}, {_id: 2}, {_id: 3}], res);
-
- jsTestLog("Checking that the first transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the first prepared transaction
- // after initial sync.
- assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1});
-
- jsTestLog("Committing the first transaction");
-
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1));
- replTest.awaitReplication();
-
- // Make sure that we can see the data from a committed transaction on the secondary if it was
- // applied during secondary oplog application.
- assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 1});
-
- jsTestLog("Stepping up the secondary");
-
- // Step up the secondary after initial sync is done and make sure the other two transactions are
- // properly prepared.
- replTest.stepUp(secondary);
- replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
- const newPrimary = replTest.getPrimary();
- testDB = newPrimary.getDB(dbName);
- testColl = testDB.getCollection(collName);
-
- // Force the second session to use the same lsid and txnNumber as from before the restart. This
- // ensures that we're working with the same session and transaction.
- session2 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid2);
- session2.setTxnNumber_forTesting(txnNumber2);
- sessionDB2 = session2.getDatabase(dbName);
-
- jsTestLog("Checking that the second transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the second prepared transaction
- // after initial sync.
- assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2}]);
-
- // Make sure that another write on the same document from the second transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName, updates: [{q: {_id: 2}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Make sure that we cannot add other operations to the second transaction since it is prepared.
- assert.commandFailedWithCode(sessionDB2.runCommand({
- insert: collName,
- documents: [{_id: 4}],
- txnNumber: NumberLong(txnNumber2),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Committing the second transaction");
-
- // Make sure we can successfully commit the second transaction after recovery.
- assert.commandWorked(sessionDB2.adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTimestamp2,
- txnNumber: NumberLong(txnNumber2),
- autocommit: false
- }));
- assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2, a: 1}]);
-
- jsTestLog("Attempting to run another transaction on the second session");
-
- // Make sure that we can run another conflicting transaction without any problems.
- session2.startTransaction();
- assert.commandWorked(sessionDB2[collName].update({_id: 2}, {_id: 2, a: 3}));
- prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
- assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
- assert.eq(testColl.findOne({_id: 2}), {_id: 2, a: 3});
-
- // Force the third session to use the same lsid and txnNumber as from before the restart. This
- // ensures that we're working with the same session and transaction.
- session3 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid3);
- session3.setTxnNumber_forTesting(txnNumber3);
- sessionDB3 = session3.getDatabase(dbName);
-
- jsTestLog("Checking that the third transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the third prepared transaction
- // after initial sync.
- assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
-
- // Make sure that another write on the same document from the third transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName, updates: [{q: {_id: 3}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Make sure that we cannot add other operations to the third transaction since it is prepared.
- assert.commandFailedWithCode(sessionDB3.runCommand({
- insert: collName,
- documents: [{_id: 4}],
- txnNumber: NumberLong(txnNumber3),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Aborting the third transaction");
-
- // Make sure we can successfully abort the third transaction after recovery.
- assert.commandWorked(sessionDB3.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber3), autocommit: false}));
- assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
-
- replTest.stopSet();
+// Restart the secondary with startClean set to true so that it goes through initial sync.
+replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
+secondary = replTest.start(
+ secondary, {startClean: true, setParameter: {'numInitialSyncAttempts': 1}}, true /* wait */);
+
+// Wait for the secondary to complete initial sync.
+replTest.awaitSecondaryNodes();
+
+jsTestLog("Initial sync completed");
+
+secondary.setSlaveOk();
+const secondaryColl = secondary.getDB(dbName).getCollection(collName);
+
+// Make sure that while reading from the node that went through initial sync, we can't read
+// changes to the documents from any of the prepared transactions after initial sync.
+const res = secondaryColl.find().sort({_id: 1}).toArray();
+assert.eq(res, [{_id: 1}, {_id: 2}, {_id: 3}], res);
+
+jsTestLog("Checking that the first transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the first prepared transaction
+// after initial sync.
+assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1});
+
+jsTestLog("Committing the first transaction");
+
+assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1));
+replTest.awaitReplication();
+
+// Make sure that we can see the data from a committed transaction on the secondary if it was
+// applied during secondary oplog application.
+assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 1});
+
+jsTestLog("Stepping up the secondary");
+
+// Step up the secondary after initial sync is done and make sure the other two transactions are
+// properly prepared.
+replTest.stepUp(secondary);
+replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
+const newPrimary = replTest.getPrimary();
+testDB = newPrimary.getDB(dbName);
+testColl = testDB.getCollection(collName);
+
+// Force the second session to use the same lsid and txnNumber as from before the restart. This
+// ensures that we're working with the same session and transaction.
+session2 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid2);
+session2.setTxnNumber_forTesting(txnNumber2);
+sessionDB2 = session2.getDatabase(dbName);
+
+jsTestLog("Checking that the second transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the second prepared transaction
+// after initial sync.
+assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2}]);
+
+// Make sure that another write on the same document from the second transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {update: collName, updates: [{q: {_id: 2}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Make sure that we cannot add other operations to the second transaction since it is prepared.
+assert.commandFailedWithCode(sessionDB2.runCommand({
+ insert: collName,
+ documents: [{_id: 4}],
+ txnNumber: NumberLong(txnNumber2),
+ stmtId: NumberInt(2),
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Committing the second transaction");
+
+// Make sure we can successfully commit the second transaction after recovery.
+assert.commandWorked(sessionDB2.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTimestamp2,
+ txnNumber: NumberLong(txnNumber2),
+ autocommit: false
+}));
+assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2, a: 1}]);
+
+jsTestLog("Attempting to run another transaction on the second session");
+
+// Make sure that we can run another conflicting transaction without any problems.
+session2.startTransaction();
+assert.commandWorked(sessionDB2[collName].update({_id: 2}, {_id: 2, a: 3}));
+prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
+assert.eq(testColl.findOne({_id: 2}), {_id: 2, a: 3});
+
+// Force the third session to use the same lsid and txnNumber as from before the restart. This
+// ensures that we're working with the same session and transaction.
+session3 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid3);
+session3.setTxnNumber_forTesting(txnNumber3);
+sessionDB3 = session3.getDatabase(dbName);
+
+jsTestLog("Checking that the third transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the third prepared transaction
+// after initial sync.
+assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
+
+// Make sure that another write on the same document from the third transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {update: collName, updates: [{q: {_id: 3}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Make sure that we cannot add other operations to the third transaction since it is prepared.
+assert.commandFailedWithCode(sessionDB3.runCommand({
+ insert: collName,
+ documents: [{_id: 4}],
+ txnNumber: NumberLong(txnNumber3),
+ stmtId: NumberInt(2),
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Aborting the third transaction");
+
+// Make sure we can successfully abort the third transaction after recovery.
+assert.commandWorked(sessionDB3.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber3), autocommit: false}));
+assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
+
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_on_oplog_seed.js b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_on_oplog_seed.js
index ed8547453fa..30f9c497e48 100644
--- a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_on_oplog_seed.js
+++ b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_on_oplog_seed.js
@@ -12,108 +12,110 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
- const config = replTest.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while the
- // secondary is restarting.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- replTest.initiate(config);
+const config = replTest.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while the
+// secondary is restarting.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+replTest.initiate(config);
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
- const dbName = "test";
- const collName = "reconstruct_prepared_transactions_initial_sync_on_oplog_seed";
+const dbName = "test";
+const collName = "reconstruct_prepared_transactions_initial_sync_on_oplog_seed";
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- assert.commandWorked(testColl.insert({_id: 1}));
+assert.commandWorked(testColl.insert({_id: 1}));
- jsTestLog("Restarting the secondary");
+jsTestLog("Restarting the secondary");
- // Restart the secondary with startClean set to true so that it goes through initial sync.
- replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
- secondary = replTest.start(
- secondary,
- {
- startClean: true,
- setParameter: {
- 'numInitialSyncAttempts': 2,
- // Fail point to force the first attempt to fail and hang before starting the second
- // attempt.
- 'failpoint.failAndHangInitialSync': tojson({mode: 'alwaysOn'}),
- 'failpoint.initialSyncHangDuringCollectionClone': tojson(
- {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 0}}),
- 'logComponentVerbosity': tojson({'replication': {'initialSync': 2}})
- }
- },
- true /* wait */);
+// Restart the secondary with startClean set to true so that it goes through initial sync.
+replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
+secondary = replTest.start(
+ secondary,
+ {
+ startClean: true,
+ setParameter: {
+ 'numInitialSyncAttempts': 2,
+ // Fail point to force the first attempt to fail and hang before starting the second
+ // attempt.
+ 'failpoint.failAndHangInitialSync': tojson({mode: 'alwaysOn'}),
+ 'failpoint.initialSyncHangDuringCollectionClone': tojson(
+ {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 0}}),
+ 'logComponentVerbosity': tojson({'replication': {'initialSync': 2}})
+ }
+ },
+ true /* wait */);
- // Wait for failpoint to be reached so we know that collection cloning is paused.
- checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
+// Wait for failpoint to be reached so we know that collection cloning is paused.
+checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
- jsTestLog("Running operations while collection cloning is paused");
+jsTestLog("Running operations while collection cloning is paused");
- // Perform writes while collection cloning is paused so that we know they must be applied during
- // the first attempt of initial sync.
- assert.commandWorked(testColl.insert({_id: 2}));
+// Perform writes while collection cloning is paused so that we know they must be applied during
+// the first attempt of initial sync.
+assert.commandWorked(testColl.insert({_id: 2}));
- jsTestLog("Resuming initial sync");
+jsTestLog("Resuming initial sync");
- // Resume initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
+// Resume initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
- // Wait for failpoint to be reached so we know that first attempt is finishing and is about to
- // fail.
- checkLog.contains(secondary, "failAndHangInitialSync fail point enabled");
+// Wait for failpoint to be reached so we know that first attempt is finishing and is about to
+// fail.
+checkLog.contains(secondary, "failAndHangInitialSync fail point enabled");
- jsTestLog("Preparing the transaction before the second attempt of initial sync");
+jsTestLog("Preparing the transaction before the second attempt of initial sync");
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
- jsTestLog("Resuming initial sync for the second attempt");
- // Resume initial sync.
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: "failAndHangInitialSync", mode: "off"}));
+jsTestLog("Resuming initial sync for the second attempt");
+// Resume initial sync.
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "failAndHangInitialSync", mode: "off"}));
- // Wait for the secondary to complete initial sync.
- replTest.awaitSecondaryNodes();
- PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp);
+// Wait for the secondary to complete initial sync.
+replTest.awaitSecondaryNodes();
+PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp);
- jsTestLog("Initial sync completed");
+jsTestLog("Initial sync completed");
- secondary.setSlaveOk();
- const secondaryColl = secondary.getDB(dbName).getCollection(collName);
+secondary.setSlaveOk();
+const secondaryColl = secondary.getDB(dbName).getCollection(collName);
- jsTestLog("Checking that the transaction is properly prepared");
+jsTestLog("Checking that the transaction is properly prepared");
- // Make sure that we can't read changes to the document from the prepared transaction after
- // initial sync.
- assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1});
+// Make sure that we can't read changes to the document from the prepared transaction after
+// initial sync.
+assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1});
- jsTestLog("Committing the transaction");
+jsTestLog("Committing the transaction");
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- replTest.awaitReplication();
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+replTest.awaitReplication();
- // Make sure that we can see the data from the committed transaction on the secondary if it was
- // applied during secondary oplog application.
- assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 1});
+// Make sure that we can see the data from the committed transaction on the secondary if it was
+// applied during secondary oplog application.
+assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 1});
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/recover_committed_aborted_prepared_transactions.js b/jstests/replsets/recover_committed_aborted_prepared_transactions.js
index 031682b2064..b5b88d6c549 100644
--- a/jstests/replsets/recover_committed_aborted_prepared_transactions.js
+++ b/jstests/replsets/recover_committed_aborted_prepared_transactions.js
@@ -7,130 +7,129 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/aggregation/extras/utils.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
- const dbName = "test";
- const collName = "recover_committed_aborted_prepared_transactions";
+const dbName = "test";
+const collName = "recover_committed_aborted_prepared_transactions";
- const rollbackTest = new RollbackTest(dbName);
- let primary = rollbackTest.getPrimary();
+const rollbackTest = new RollbackTest(dbName);
+let primary = rollbackTest.getPrimary();
- // Create collection we're using beforehand.
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
+// Create collection we're using beforehand.
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
- assert.commandWorked(testDB.runCommand({create: collName}));
+assert.commandWorked(testDB.runCommand({create: collName}));
- // Start two different sessions on the primary.
- let session1 = primary.startSession({causalConsistency: false});
- const sessionID1 = session1.getSessionId();
- const session2 = primary.startSession({causalConsistency: false});
+// Start two different sessions on the primary.
+let session1 = primary.startSession({causalConsistency: false});
+const sessionID1 = session1.getSessionId();
+const session2 = primary.startSession({causalConsistency: false});
- let sessionDB1 = session1.getDatabase(dbName);
- let sessionColl1 = sessionDB1.getCollection(collName);
+let sessionDB1 = session1.getDatabase(dbName);
+let sessionColl1 = sessionDB1.getCollection(collName);
- const sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
+const sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
- assert.commandWorked(sessionColl1.insert({id: 1}));
+assert.commandWorked(sessionColl1.insert({id: 1}));
- rollbackTest.awaitLastOpCommitted();
+rollbackTest.awaitLastOpCommitted();
- // Prepare a transaction on the first session which will be committed eventually.
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({id: 2}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session1);
+// Prepare a transaction on the first session which will be committed eventually.
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({id: 2}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session1);
- // Prevent the stable timestamp from moving beyond the following prepared transactions so
- // that when we replay the oplog from the stable timestamp, we correctly recover them.
+// Prevent the stable timestamp from moving beyond the following prepared transactions so
+// that when we replay the oplog from the stable timestamp, we correctly recover them.
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
+
+// The following transactions will be prepared before the common point, so they must be in
+// prepare after rollback recovery.
+
+// Prepare another transaction on the second session which will be aborted.
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({id: 3}));
+const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
+
+// Commit the first transaction.
+assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp));
+
+// Abort the second transaction.
+assert.commandWorked(session2.abortTransaction_forTesting());
+
+// Check that we have two transactions in the transactions table.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
+
+// The following write will be rolled back.
+rollbackTest.transitionToRollbackOperations();
+assert.commandWorked(testColl.insert({id: 4}));
+
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+
+try {
+ rollbackTest.transitionToSteadyStateOperations();
+} finally {
assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
-
- // The following transactions will be prepared before the common point, so they must be in
- // prepare after rollback recovery.
-
- // Prepare another transaction on the second session which will be aborted.
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({id: 3}));
- const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
-
- // Commit the first transaction.
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp));
-
- // Abort the second transaction.
- assert.commandWorked(session2.abortTransaction_forTesting());
-
- // Check that we have two transactions in the transactions table.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
-
- // The following write will be rolled back.
- rollbackTest.transitionToRollbackOperations();
- assert.commandWorked(testColl.insert({id: 4}));
-
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
-
- try {
- rollbackTest.transitionToSteadyStateOperations();
- } finally {
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
- }
-
- // Make sure there are two transactions in the transactions table.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
-
- // Make sure we can see the first two writes and the insert from the first prepared transaction.
- // Make sure we cannot see the insert from the second prepared transaction or the writes after
- // transitionToRollbackOperations.
- arrayEq(testColl.find().toArray(), [{_id: 1}, {_id: 2}]);
- arrayEq(sessionColl1.find().toArray(), [{_id: 1}, {_id: 2}]);
-
- assert.eq(testColl.count(), 2);
- assert.eq(sessionColl1.count(), 2);
-
- // Get the correct members after the topology changes.
- primary = rollbackTest.getPrimary();
- testDB = primary.getDB(dbName);
- testColl = testDB.getCollection(collName);
- const rst = rollbackTest.getTestFixture();
- const secondaries = rst.getSecondaries();
-
- // Make sure we can successfully run a prepared transaction on the same first session after
- // going through rollback. This ensures that the session state has properly been restored.
- session1 =
- PrepareHelpers.createSessionWithGivenId(primary, sessionID1, {causalConsistency: false});
- sessionDB1 = session1.getDatabase(dbName);
- sessionColl1 = sessionDB1.getCollection(collName);
- // The next transaction on this session should have a txnNumber of 1. We explicitly set this
- // since createSessionWithGivenId does not restore the current txnNumber in the shell.
- session1.setTxnNumber_forTesting(1);
-
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({_id: 5}));
- const prepareTimestamp3 = PrepareHelpers.prepareTransaction(session1);
- // Make sure we can successfully retry the commitTransaction command after rollback.
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp3));
-
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({_id: 6}));
- PrepareHelpers.prepareTransaction(session1);
- assert.commandWorked(session1.abortTransaction_forTesting());
- // Retrying the abortTransaction command should fail with a NoSuchTransaction error.
- assert.commandFailedWithCode(sessionDB1.adminCommand({
- abortTransaction: 1,
- txnNumber: NumberLong(session1.getTxnNumber_forTesting()),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
-
- // Make sure we can see the insert after committing the prepared transaction.
- arrayEq(testColl.find().toArray(), [{_id: 1}, {_id: 2}, {_id: 5}]);
- assert.eq(testColl.count(), 3);
-
- rollbackTest.stop();
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
+}
+
+// Make sure there are two transactions in the transactions table.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
+
+// Make sure we can see the first two writes and the insert from the first prepared transaction.
+// Make sure we cannot see the insert from the second prepared transaction or the writes after
+// transitionToRollbackOperations.
+arrayEq(testColl.find().toArray(), [{_id: 1}, {_id: 2}]);
+arrayEq(sessionColl1.find().toArray(), [{_id: 1}, {_id: 2}]);
+
+assert.eq(testColl.count(), 2);
+assert.eq(sessionColl1.count(), 2);
+
+// Get the correct members after the topology changes.
+primary = rollbackTest.getPrimary();
+testDB = primary.getDB(dbName);
+testColl = testDB.getCollection(collName);
+const rst = rollbackTest.getTestFixture();
+const secondaries = rst.getSecondaries();
+
+// Make sure we can successfully run a prepared transaction on the same first session after
+// going through rollback. This ensures that the session state has properly been restored.
+session1 = PrepareHelpers.createSessionWithGivenId(primary, sessionID1, {causalConsistency: false});
+sessionDB1 = session1.getDatabase(dbName);
+sessionColl1 = sessionDB1.getCollection(collName);
+// The next transaction on this session should have a txnNumber of 1. We explicitly set this
+// since createSessionWithGivenId does not restore the current txnNumber in the shell.
+session1.setTxnNumber_forTesting(1);
+
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({_id: 5}));
+const prepareTimestamp3 = PrepareHelpers.prepareTransaction(session1);
+// Make sure we can successfully retry the commitTransaction command after rollback.
+assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp3));
+
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({_id: 6}));
+PrepareHelpers.prepareTransaction(session1);
+assert.commandWorked(session1.abortTransaction_forTesting());
+// Retrying the abortTransaction command should fail with a NoSuchTransaction error.
+assert.commandFailedWithCode(sessionDB1.adminCommand({
+ abortTransaction: 1,
+ txnNumber: NumberLong(session1.getTxnNumber_forTesting()),
+ autocommit: false,
+}),
+ ErrorCodes.NoSuchTransaction);
+
+// Make sure we can see the insert after committing the prepared transaction.
+arrayEq(testColl.find().toArray(), [{_id: 1}, {_id: 2}, {_id: 5}]);
+assert.eq(testColl.count(), 3);
+
+rollbackTest.stop();
}());
diff --git a/jstests/replsets/recover_multiple_prepared_transactions_startup.js b/jstests/replsets/recover_multiple_prepared_transactions_startup.js
index 10d82bd5536..3711fbfa276 100644
--- a/jstests/replsets/recover_multiple_prepared_transactions_startup.js
+++ b/jstests/replsets/recover_multiple_prepared_transactions_startup.js
@@ -6,154 +6,154 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- let primary = replTest.getPrimary();
+let primary = replTest.getPrimary();
- const dbName = "test";
- const collName = "recover_multiple_prepared_transactions_startup";
- let testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "recover_multiple_prepared_transactions_startup";
+let testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- testDB.runCommand({drop: collName});
- assert.commandWorked(testDB.runCommand({create: collName}));
+testDB.runCommand({drop: collName});
+assert.commandWorked(testDB.runCommand({create: collName}));
- let session = primary.startSession({causalConsistency: false});
- let sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+let session = primary.startSession({causalConsistency: false});
+let sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- let session2 = primary.startSession({causalConsistency: false});
- let sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
+let session2 = primary.startSession({causalConsistency: false});
+let sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
- assert.commandWorked(sessionColl.insert({_id: 1}));
- assert.commandWorked(sessionColl2.insert({_id: 2}));
-
- jsTestLog("Disable snapshotting on all nodes");
-
- // Disable snapshotting so that future operations do not enter the majority snapshot.
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
-
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
-
- session2.startTransaction();
- assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
- let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
+assert.commandWorked(sessionColl.insert({_id: 1}));
+assert.commandWorked(sessionColl2.insert({_id: 2}));
+
+jsTestLog("Disable snapshotting on all nodes");
+
+// Disable snapshotting so that future operations do not enter the majority snapshot.
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
+
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
+
+session2.startTransaction();
+assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
+let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
- const lsid = session.getSessionId();
- const txnNumber = session.getTxnNumber_forTesting();
-
- const lsid2 = session2.getSessionId();
- const txnNumber2 = session2.getTxnNumber_forTesting();
-
- jsTestLog("Restarting node");
-
- // Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
- // unset on the node following the restart.
- replTest.stop(primary, undefined, {skipValidation: true});
- replTest.start(primary, {}, true);
-
- jsTestLog("Node was restarted");
-
- primary = replTest.getPrimary();
- testDB = primary.getDB(dbName);
-
- session = primary.startSession({causalConsistency: false});
- sessionDB = session.getDatabase(dbName);
-
- session2 = primary.startSession({causalConsistency: false});
- sessionDB2 = session.getDatabase(dbName);
-
- // Force the first session to use the same lsid and txnNumber as from before the restart. This
- // ensures that we're working with the same session and transaction.
- session._serverSession.handle.getId = () => lsid;
- session.setTxnNumber_forTesting(txnNumber);
-
- jsTestLog("Checking that the first transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the first transaction after
- // recovery.
- assert.eq(testDB[collName].find({_id: 1}).toArray(), [{_id: 1}]);
-
- // Make sure that another write on the same document from the first transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Make sure that we cannot add other operations to the first transaction since it is prepared.
- assert.commandFailedWithCode(sessionDB.runCommand({
- insert: collName,
- documents: [{_id: 3}],
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Committing the first transaction");
-
- // Make sure we can successfully commit the first transaction after recovery.
- let commitTimestamp = Timestamp(prepareTimestamp.getTime(), prepareTimestamp.getInc() + 1);
- assert.commandWorked(sessionDB.adminCommand({
- commitTransaction: 1,
- commitTimestamp: commitTimestamp,
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }));
-
- // Force the second session to use the same lsid and txnNumber as from before the restart.
- // This ensures that we're working with the same session and transaction.
- session._serverSession.handle.getId = () => lsid2;
- session.setTxnNumber_forTesting(txnNumber2);
-
- jsTestLog("Checking that the second transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the second transaction after
- // recovery.
- assert.eq(testDB[collName].find({_id: 2}).toArray(), [{_id: 2}]);
-
- // Make sure that another write on the same document from the second transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName, updates: [{q: {_id: 2}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Make sure that we cannot add other operations to the second transaction since it is prepared.
- assert.commandFailedWithCode(sessionDB2.runCommand({
- insert: collName,
- documents: [{_id: 3}],
- txnNumber: NumberLong(txnNumber2),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Aborting the second transaction");
-
- // Make sure we can successfully abort the second transaction after recovery.
- assert.commandWorked(sessionDB2.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber2), autocommit: false}));
-
- jsTestLog("Attempting to run another transction");
-
- // Make sure that we can run another conflicting transaction after recovery without any
- // problems.
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 3}));
- prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 3});
-
- replTest.stopSet();
+const lsid = session.getSessionId();
+const txnNumber = session.getTxnNumber_forTesting();
+
+const lsid2 = session2.getSessionId();
+const txnNumber2 = session2.getTxnNumber_forTesting();
+
+jsTestLog("Restarting node");
+
+// Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
+// unset on the node following the restart.
+replTest.stop(primary, undefined, {skipValidation: true});
+replTest.start(primary, {}, true);
+
+jsTestLog("Node was restarted");
+
+primary = replTest.getPrimary();
+testDB = primary.getDB(dbName);
+
+session = primary.startSession({causalConsistency: false});
+sessionDB = session.getDatabase(dbName);
+
+session2 = primary.startSession({causalConsistency: false});
+sessionDB2 = session.getDatabase(dbName);
+
+// Force the first session to use the same lsid and txnNumber as from before the restart. This
+// ensures that we're working with the same session and transaction.
+session._serverSession.handle.getId = () => lsid;
+session.setTxnNumber_forTesting(txnNumber);
+
+jsTestLog("Checking that the first transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the first transaction after
+// recovery.
+assert.eq(testDB[collName].find({_id: 1}).toArray(), [{_id: 1}]);
+
+// Make sure that another write on the same document from the first transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Make sure that we cannot add other operations to the first transaction since it is prepared.
+assert.commandFailedWithCode(sessionDB.runCommand({
+ insert: collName,
+ documents: [{_id: 3}],
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(2),
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Committing the first transaction");
+
+// Make sure we can successfully commit the first transaction after recovery.
+let commitTimestamp = Timestamp(prepareTimestamp.getTime(), prepareTimestamp.getInc() + 1);
+assert.commandWorked(sessionDB.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: commitTimestamp,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+}));
+
+// Force the second session to use the same lsid and txnNumber as from before the restart.
+// This ensures that we're working with the same session and transaction.
+session._serverSession.handle.getId = () => lsid2;
+session.setTxnNumber_forTesting(txnNumber2);
+
+jsTestLog("Checking that the second transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the second transaction after
+// recovery.
+assert.eq(testDB[collName].find({_id: 2}).toArray(), [{_id: 2}]);
+
+// Make sure that another write on the same document from the second transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {update: collName, updates: [{q: {_id: 2}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Make sure that we cannot add other operations to the second transaction since it is prepared.
+assert.commandFailedWithCode(sessionDB2.runCommand({
+ insert: collName,
+ documents: [{_id: 3}],
+ txnNumber: NumberLong(txnNumber2),
+ stmtId: NumberInt(2),
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Aborting the second transaction");
+
+// Make sure we can successfully abort the second transaction after recovery.
+assert.commandWorked(sessionDB2.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber2), autocommit: false}));
+
+jsTestLog("Attempting to run another transction");
+
+// Make sure that we can run another conflicting transaction after recovery without any
+// problems.
+session.startTransaction();
+assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 3}));
+prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 3});
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/recover_prepared_transaction_state.js b/jstests/replsets/recover_prepared_transaction_state.js
index f87d35496c8..1b054718778 100644
--- a/jstests/replsets/recover_prepared_transaction_state.js
+++ b/jstests/replsets/recover_prepared_transaction_state.js
@@ -15,185 +15,182 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/aggregation/extras/utils.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
- const dbName = "test";
- const collName = "recover_prepared_transaction_state_after_rollback";
+const dbName = "test";
+const collName = "recover_prepared_transaction_state_after_rollback";
- const rollbackTest = new RollbackTest(dbName);
- let primary = rollbackTest.getPrimary();
+const rollbackTest = new RollbackTest(dbName);
+let primary = rollbackTest.getPrimary();
- // Create collection we're using beforehand.
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+// Create collection we're using beforehand.
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(testDB.runCommand({create: collName}));
+assert.commandWorked(testDB.runCommand({create: collName}));
- // Start two different sessions on the primary.
- let session1 = primary.startSession({causalConsistency: false});
- let session2 = primary.startSession({causalConsistency: false});
+// Start two different sessions on the primary.
+let session1 = primary.startSession({causalConsistency: false});
+let session2 = primary.startSession({causalConsistency: false});
- // Save both session IDs so we can later start sessions with the same IDs and commit or
- // abort a prepared transaction on them.
- const sessionID1 = session1.getSessionId();
- const sessionID2 = session2.getSessionId();
+// Save both session IDs so we can later start sessions with the same IDs and commit or
+// abort a prepared transaction on them.
+const sessionID1 = session1.getSessionId();
+const sessionID2 = session2.getSessionId();
- let sessionDB1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDB1.getCollection(collName);
+let sessionDB1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDB1.getCollection(collName);
- let sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
+let sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
- assert.commandWorked(sessionColl1.insert({_id: 1}));
- assert.commandWorked(sessionColl1.insert({_id: 2}));
+assert.commandWorked(sessionColl1.insert({_id: 1}));
+assert.commandWorked(sessionColl1.insert({_id: 2}));
- rollbackTest.awaitLastOpCommitted();
+rollbackTest.awaitLastOpCommitted();
- // Prepare a transaction on the first session whose commit will be rolled-back.
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({_id: 3}));
- assert.commandWorked(sessionColl1.update({_id: 1}, {$set: {a: 1}}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session1);
+// Prepare a transaction on the first session whose commit will be rolled-back.
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({_id: 3}));
+assert.commandWorked(sessionColl1.update({_id: 1}, {$set: {a: 1}}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session1);
- // Prevent the stable timestamp from moving beyond the following prepared transactions so
- // that when we replay the oplog from the stable timestamp, we correctly recover them.
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
-
- // The following transactions will be prepared before the common point, so they must be in
- // prepare after rollback recovery.
-
- // Prepare another transaction on the second session whose abort will be rolled-back.
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({_id: 4}));
- assert.commandWorked(sessionColl2.update({_id: 2}, {$set: {b: 2}}));
- const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
-
- // Check that we have two transactions in the transactions table.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
-
- // This characterizes the current behavior of fastcount, which is that the two open transaction
- // count toward the value.
- assert.eq(testColl.count(), 4);
-
- // The following commit and abort will be rolled back.
- rollbackTest.transitionToRollbackOperations();
- PrepareHelpers.commitTransaction(session1, prepareTimestamp);
- assert.commandWorked(session2.abortTransaction_forTesting());
-
- // The fastcount should be accurate because there are no open transactions.
- assert.eq(testColl.count(), 3);
-
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- try {
- rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
- } finally {
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
- }
-
- // Make sure there are two transactions in the transactions table after rollback recovery.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
-
- // Make sure we can only see the first write and cannot see the writes from the prepared
- // transactions or the write that was rolled back.
- arrayEq(sessionColl1.find().toArray(), [{_id: 1}, {_id: 2}]);
- arrayEq(testColl.find().toArray(), [{_id: 1}, {_id: 2}]);
-
- // This check characterizes the current behavior of fastcount after rollback. It will not be
- // correct, but reflects the count at the point where both transactions are not yet committed or
- // aborted (because the operations were not majority committed). The count will eventually be
- // correct once the commit and abort are retried.
- assert.eq(sessionColl1.count(), 4);
- assert.eq(testColl.count(), 4);
-
- // Get the correct primary after the topology changes.
- primary = rollbackTest.getPrimary();
- rollbackTest.awaitReplication();
-
- // Make sure we can successfully commit the first rolled back prepared transaction.
- session1 =
- PrepareHelpers.createSessionWithGivenId(primary, sessionID1, {causalConsistency: false});
- sessionDB1 = session1.getDatabase(dbName);
- // The next transaction on this session should have a txnNumber of 0. We explicitly set this
- // since createSessionWithGivenId does not restore the current txnNumber in the shell.
- session1.setTxnNumber_forTesting(0);
- const txnNumber1 = session1.getTxnNumber_forTesting();
-
- // Make sure we cannot add any operations to a prepared transaction.
- assert.commandFailedWithCode(sessionDB1.runCommand({
- insert: collName,
- txnNumber: NumberLong(txnNumber1),
- documents: [{_id: 10}],
- autocommit: false,
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- // Make sure that writing to a document that was updated in the first prepared transaction
- // causes a write conflict.
- assert.commandFailedWithCode(
- sessionDB1.runCommand(
- {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- const commitTimestamp = Timestamp(prepareTimestamp.getTime(), prepareTimestamp.getInc() + 1);
- assert.commandWorked(sessionDB1.adminCommand({
- commitTransaction: 1,
- commitTimestamp: commitTimestamp,
- txnNumber: NumberLong(txnNumber1),
- autocommit: false,
- }));
- // Retry the commitTransaction command after rollback.
- assert.commandWorked(sessionDB1.adminCommand({
- commitTransaction: 1,
- commitTimestamp: commitTimestamp,
- txnNumber: NumberLong(txnNumber1),
- autocommit: false,
- }));
-
- // Make sure we can successfully abort the second recovered prepared transaction.
- session2 =
- PrepareHelpers.createSessionWithGivenId(primary, sessionID2, {causalConsistency: false});
- sessionDB2 = session2.getDatabase(dbName);
- // The next transaction on this session should have a txnNumber of 0. We explicitly set this
- // since createSessionWithGivenId does not restore the current txnNumber in the shell.
- session2.setTxnNumber_forTesting(0);
- const txnNumber2 = session2.getTxnNumber_forTesting();
-
- // Make sure we cannot add any operations to a prepared transaction.
- assert.commandFailedWithCode(sessionDB2.runCommand({
- insert: collName,
- txnNumber: NumberLong(txnNumber2),
- documents: [{_id: 10}],
- autocommit: false,
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- // Make sure that writing to a document that was updated in the second prepared transaction
- // causes a write conflict.
- assert.commandFailedWithCode(
- sessionDB2.runCommand(
- {update: collName, updates: [{q: {_id: 2}, u: {$set: {b: 3}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- assert.commandWorked(sessionDB2.adminCommand({
- abortTransaction: 1,
- txnNumber: NumberLong(txnNumber2),
- autocommit: false,
- }));
-
- rollbackTest.awaitReplication();
-
- // Make sure we can see the result of the committed prepared transaction and cannot see the
- // write from the aborted transaction.
- arrayEq(testColl.find().toArray(), [{_id: 1, a: 1}, {_id: 2}, {_id: 3}]);
- assert.eq(testColl.count(), 3);
-
- rollbackTest.stop();
+// Prevent the stable timestamp from moving beyond the following prepared transactions so
+// that when we replay the oplog from the stable timestamp, we correctly recover them.
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
+
+// The following transactions will be prepared before the common point, so they must be in
+// prepare after rollback recovery.
+
+// Prepare another transaction on the second session whose abort will be rolled-back.
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({_id: 4}));
+assert.commandWorked(sessionColl2.update({_id: 2}, {$set: {b: 2}}));
+const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
+
+// Check that we have two transactions in the transactions table.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
+// This characterizes the current behavior of fastcount, which is that the two open transaction
+// count toward the value.
+assert.eq(testColl.count(), 4);
+
+// The following commit and abort will be rolled back.
+rollbackTest.transitionToRollbackOperations();
+PrepareHelpers.commitTransaction(session1, prepareTimestamp);
+assert.commandWorked(session2.abortTransaction_forTesting());
+
+// The fastcount should be accurate because there are no open transactions.
+assert.eq(testColl.count(), 3);
+
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+try {
+ rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
+} finally {
+ assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
+}
+
+// Make sure there are two transactions in the transactions table after rollback recovery.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
+
+// Make sure we can only see the first write and cannot see the writes from the prepared
+// transactions or the write that was rolled back.
+arrayEq(sessionColl1.find().toArray(), [{_id: 1}, {_id: 2}]);
+arrayEq(testColl.find().toArray(), [{_id: 1}, {_id: 2}]);
+
+// This check characterizes the current behavior of fastcount after rollback. It will not be
+// correct, but reflects the count at the point where both transactions are not yet committed or
+// aborted (because the operations were not majority committed). The count will eventually be
+// correct once the commit and abort are retried.
+assert.eq(sessionColl1.count(), 4);
+assert.eq(testColl.count(), 4);
+
+// Get the correct primary after the topology changes.
+primary = rollbackTest.getPrimary();
+rollbackTest.awaitReplication();
+
+// Make sure we can successfully commit the first rolled back prepared transaction.
+session1 = PrepareHelpers.createSessionWithGivenId(primary, sessionID1, {causalConsistency: false});
+sessionDB1 = session1.getDatabase(dbName);
+// The next transaction on this session should have a txnNumber of 0. We explicitly set this
+// since createSessionWithGivenId does not restore the current txnNumber in the shell.
+session1.setTxnNumber_forTesting(0);
+const txnNumber1 = session1.getTxnNumber_forTesting();
+
+// Make sure we cannot add any operations to a prepared transaction.
+assert.commandFailedWithCode(sessionDB1.runCommand({
+ insert: collName,
+ txnNumber: NumberLong(txnNumber1),
+ documents: [{_id: 10}],
+ autocommit: false,
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+// Make sure that writing to a document that was updated in the first prepared transaction
+// causes a write conflict.
+assert.commandFailedWithCode(
+ sessionDB1.runCommand(
+ {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+const commitTimestamp = Timestamp(prepareTimestamp.getTime(), prepareTimestamp.getInc() + 1);
+assert.commandWorked(sessionDB1.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: commitTimestamp,
+ txnNumber: NumberLong(txnNumber1),
+ autocommit: false,
+}));
+// Retry the commitTransaction command after rollback.
+assert.commandWorked(sessionDB1.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: commitTimestamp,
+ txnNumber: NumberLong(txnNumber1),
+ autocommit: false,
+}));
+
+// Make sure we can successfully abort the second recovered prepared transaction.
+session2 = PrepareHelpers.createSessionWithGivenId(primary, sessionID2, {causalConsistency: false});
+sessionDB2 = session2.getDatabase(dbName);
+// The next transaction on this session should have a txnNumber of 0. We explicitly set this
+// since createSessionWithGivenId does not restore the current txnNumber in the shell.
+session2.setTxnNumber_forTesting(0);
+const txnNumber2 = session2.getTxnNumber_forTesting();
+
+// Make sure we cannot add any operations to a prepared transaction.
+assert.commandFailedWithCode(sessionDB2.runCommand({
+ insert: collName,
+ txnNumber: NumberLong(txnNumber2),
+ documents: [{_id: 10}],
+ autocommit: false,
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+// Make sure that writing to a document that was updated in the second prepared transaction
+// causes a write conflict.
+assert.commandFailedWithCode(
+ sessionDB2.runCommand(
+ {update: collName, updates: [{q: {_id: 2}, u: {$set: {b: 3}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+assert.commandWorked(sessionDB2.adminCommand({
+ abortTransaction: 1,
+ txnNumber: NumberLong(txnNumber2),
+ autocommit: false,
+}));
+
+rollbackTest.awaitReplication();
+
+// Make sure we can see the result of the committed prepared transaction and cannot see the
+// write from the aborted transaction.
+arrayEq(testColl.find().toArray(), [{_id: 1, a: 1}, {_id: 2}, {_id: 3}]);
+assert.eq(testColl.count(), 3);
+
+rollbackTest.stop();
}());
diff --git a/jstests/replsets/recover_prepared_transactions_startup_secondary_application.js b/jstests/replsets/recover_prepared_transactions_startup_secondary_application.js
index 38509a50898..df484d4c347 100644
--- a/jstests/replsets/recover_prepared_transactions_startup_secondary_application.js
+++ b/jstests/replsets/recover_prepared_transactions_startup_secondary_application.js
@@ -7,114 +7,114 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replTest = new ReplSetTest({nodes: 2});
- const nodes = replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 2});
+const nodes = replTest.startSet();
+replTest.initiate();
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
- const dbName = "test";
- const collName = "recover_prepared_transactions_startup_secondary_application";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "recover_prepared_transactions_startup_secondary_application";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(testDB.runCommand({create: collName}));
+assert.commandWorked(testDB.runCommand({create: collName}));
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- const session2 = primary.startSession();
- const sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
+const session2 = primary.startSession();
+const sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
- assert.commandWorked(sessionColl.insert({_id: 1}));
- assert.commandWorked(sessionColl2.insert({_id: 2}));
+assert.commandWorked(sessionColl.insert({_id: 1}));
+assert.commandWorked(sessionColl2.insert({_id: 2}));
- replTest.awaitReplication();
+replTest.awaitReplication();
- jsTestLog("Disable snapshotting on all nodes");
+jsTestLog("Disable snapshotting on all nodes");
- // Disable snapshotting on all members of the replica set so that further operations do not
- // enter the majority snapshot.
- nodes.forEach(node => assert.commandWorked(node.adminCommand(
- {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
+// Disable snapshotting on all members of the replica set so that further operations do not
+// enter the majority snapshot.
+nodes.forEach(node => assert.commandWorked(node.adminCommand(
+ {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
- jsTestLog("Prepared a transaction at " + prepareTimestamp);
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
+jsTestLog("Prepared a transaction at " + prepareTimestamp);
- session2.startTransaction();
- assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
- const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
- jsTestLog("Prepared another transaction at " + prepareTimestamp2);
+session2.startTransaction();
+assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
+const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
+jsTestLog("Prepared another transaction at " + prepareTimestamp2);
- const lsid = session.getSessionId();
- const txnNumber = session.getTxnNumber_forTesting();
+const lsid = session.getSessionId();
+const txnNumber = session.getTxnNumber_forTesting();
- const lsid2 = session2.getSessionId();
- const txnNumber2 = session2.getTxnNumber_forTesting();
+const lsid2 = session2.getSessionId();
+const txnNumber2 = session2.getTxnNumber_forTesting();
- jsTestLog("Restarting node");
+jsTestLog("Restarting node");
- // Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
- // unset on the node following the restart.
- replTest.stop(secondary, undefined, {skipValidation: true});
- secondary = replTest.start(secondary, {}, true);
+// Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
+// unset on the node following the restart.
+replTest.stop(secondary, undefined, {skipValidation: true});
+secondary = replTest.start(secondary, {}, true);
- jsTestLog("Secondary was restarted");
+jsTestLog("Secondary was restarted");
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "disableSnapshotting", mode: "off"}));
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "disableSnapshotting", mode: "off"}));
- // It's illegal to commit a prepared transaction before its prepare oplog entry has been
- // majority committed. So wait for prepare oplog entry to be majority committed before issuing
- // the commitTransaction command.
- PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp2);
+// It's illegal to commit a prepared transaction before its prepare oplog entry has been
+// majority committed. So wait for prepare oplog entry to be majority committed before issuing
+// the commitTransaction command.
+PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp2);
- // Wait for the node to complete recovery before trying to read from it.
- replTest.awaitSecondaryNodes();
- secondary.setSlaveOk();
+// Wait for the node to complete recovery before trying to read from it.
+replTest.awaitSecondaryNodes();
+secondary.setSlaveOk();
- jsTestLog("Checking that the first transaction is properly prepared");
+jsTestLog("Checking that the first transaction is properly prepared");
- // Make sure that we can't read changes to the document from either transaction after recovery.
- const secondaryTestColl = secondary.getDB(dbName).getCollection(collName);
- assert.eq(secondaryTestColl.find({_id: 1}).toArray(), [{_id: 1}]);
- assert.eq(secondaryTestColl.find({_id: 2}).toArray(), [{_id: 2}]);
+// Make sure that we can't read changes to the document from either transaction after recovery.
+const secondaryTestColl = secondary.getDB(dbName).getCollection(collName);
+assert.eq(secondaryTestColl.find({_id: 1}).toArray(), [{_id: 1}]);
+assert.eq(secondaryTestColl.find({_id: 2}).toArray(), [{_id: 2}]);
- jsTestLog("Committing the first transaction");
+jsTestLog("Committing the first transaction");
- // Make sure we can successfully commit the first transaction after recovery and that we can see
- // all its changes when we read from the secondary.
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- replTest.awaitReplication();
- assert.eq(secondaryTestColl.find().sort({_id: 1}).toArray(), [{_id: 1, a: 1}, {_id: 2}]);
+// Make sure we can successfully commit the first transaction after recovery and that we can see
+// all its changes when we read from the secondary.
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+replTest.awaitReplication();
+assert.eq(secondaryTestColl.find().sort({_id: 1}).toArray(), [{_id: 1, a: 1}, {_id: 2}]);
- jsTestLog("Aborting the second transaction");
+jsTestLog("Aborting the second transaction");
- // Make sure we can successfully abort the second transaction after recovery and that we can't
- // see any of its operations when we read from the secondary.
- assert.commandWorked(session2.abortTransaction_forTesting());
- replTest.awaitReplication();
- assert.eq(secondaryTestColl.find().sort({_id: 1}).toArray(), [{_id: 1, a: 1}, {_id: 2}]);
+// Make sure we can successfully abort the second transaction after recovery and that we can't
+// see any of its operations when we read from the secondary.
+assert.commandWorked(session2.abortTransaction_forTesting());
+replTest.awaitReplication();
+assert.eq(secondaryTestColl.find().sort({_id: 1}).toArray(), [{_id: 1, a: 1}, {_id: 2}]);
- jsTestLog("Attempting to run another transction");
+jsTestLog("Attempting to run another transction");
- // Make sure that we can run another conflicting transaction after recovery without any
- // problems and that we can see its changes when we read from the secondary.
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 3}));
- prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- assert.eq(testColl.findOne({_id: 1}), {_id: 1, a: 3});
- replTest.awaitReplication();
- assert.eq(secondaryTestColl.findOne({_id: 1}), {_id: 1, a: 3});
+// Make sure that we can run another conflicting transaction after recovery without any
+// problems and that we can see its changes when we read from the secondary.
+session.startTransaction();
+assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 3}));
+prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.eq(testColl.findOne({_id: 1}), {_id: 1, a: 3});
+replTest.awaitReplication();
+assert.eq(secondaryTestColl.findOne({_id: 1}), {_id: 1, a: 3});
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/recover_prepared_txn_with_multikey_write.js b/jstests/replsets/recover_prepared_txn_with_multikey_write.js
index 5a71d9054c0..b898fce07ba 100644
--- a/jstests/replsets/recover_prepared_txn_with_multikey_write.js
+++ b/jstests/replsets/recover_prepared_txn_with_multikey_write.js
@@ -5,39 +5,39 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- }
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
}
- ]
- });
+ }
+ ]
+});
- rst.startSet();
- rst.initiate();
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
+const primary = rst.getPrimary();
- const session = primary.getDB("test").getMongo().startSession();
- const sessionDB = session.getDatabase("test");
- const sessionColl = sessionDB.getCollection("coll");
+const session = primary.getDB("test").getMongo().startSession();
+const sessionDB = session.getDatabase("test");
+const sessionColl = sessionDB.getCollection("coll");
- // Create an index that will later be made multikey.
- sessionColl.createIndex({x: 1});
- session.startTransaction();
+// Create an index that will later be made multikey.
+sessionColl.createIndex({x: 1});
+session.startTransaction();
- // Make the index multikey.
- sessionColl.insert({x: [1, 2, 3]});
- assert.commandWorked(sessionDB.adminCommand({prepareTransaction: 1}));
+// Make the index multikey.
+sessionColl.insert({x: [1, 2, 3]});
+assert.commandWorked(sessionDB.adminCommand({prepareTransaction: 1}));
- // Do an unclean shutdown so we don't force a checkpoint, and then restart.
- rst.stop(0, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- rst.restart(0);
+// Do an unclean shutdown so we don't force a checkpoint, and then restart.
+rst.stop(0, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+rst.restart(0);
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/recovery_after_clean_shutdown_but_not_all_writes_in_snapshot.js b/jstests/replsets/recovery_after_clean_shutdown_but_not_all_writes_in_snapshot.js
index 45a005e255e..77700523439 100644
--- a/jstests/replsets/recovery_after_clean_shutdown_but_not_all_writes_in_snapshot.js
+++ b/jstests/replsets/recovery_after_clean_shutdown_but_not_all_writes_in_snapshot.js
@@ -5,75 +5,78 @@
* @tags: [requires_persistence, requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({
- name: "recoveryAfterCleanShutdown",
- nodes: 2,
- nodeOptions:
- {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
- });
- const nodes = rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({
+ name: "recoveryAfterCleanShutdown",
+ nodes: 2,
+ nodeOptions: {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
+});
+const nodes = rst.startSet();
+rst.initiate();
- const dbName = "recovery_clean_shutdown";
- let primaryDB = rst.getPrimary().getDB(dbName);
- const wMajority = {writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}};
+const dbName = "recovery_clean_shutdown";
+let primaryDB = rst.getPrimary().getDB(dbName);
+const wMajority = {
+ writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}
+};
- // Create a collection that will have all of its writes in the stable checkpoint.
- const collAllStableWrites = "allWritesInStableCheckpoint";
- assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "dan"}, wMajority));
- assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "judah"}, wMajority));
- assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "vessy"}, wMajority));
- assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "kyle"}, wMajority));
+// Create a collection that will have all of its writes in the stable checkpoint.
+const collAllStableWrites = "allWritesInStableCheckpoint";
+assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "dan"}, wMajority));
+assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "judah"}, wMajority));
+assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "vessy"}, wMajority));
+assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "kyle"}, wMajority));
- // Set up a collection with some writes that make it into the stable checkpoint.
- const collSomeStableWrites = "someWritesInStableCheckpoint";
- assert.commandWorked(primaryDB[collSomeStableWrites].insert({_id: "erjon"}, wMajority));
- assert.commandWorked(primaryDB[collSomeStableWrites].insert({_id: "jungsoo"}, wMajority));
+// Set up a collection with some writes that make it into the stable checkpoint.
+const collSomeStableWrites = "someWritesInStableCheckpoint";
+assert.commandWorked(primaryDB[collSomeStableWrites].insert({_id: "erjon"}, wMajority));
+assert.commandWorked(primaryDB[collSomeStableWrites].insert({_id: "jungsoo"}, wMajority));
- // Set up a collection whose creation is in the stable checkpoint, but will have no stable
- // writes.
- const collNoStableWrites = "noWritesInStableCheckpoint";
- assert.commandWorked(primaryDB[collNoStableWrites].runCommand("create", wMajority));
+// Set up a collection whose creation is in the stable checkpoint, but will have no stable
+// writes.
+const collNoStableWrites = "noWritesInStableCheckpoint";
+assert.commandWorked(primaryDB[collNoStableWrites].runCommand("create", wMajority));
- // Wait for all oplog entries to enter the stable checkpoint on all secondaries.
- rst.awaitLastOpCommitted();
+// Wait for all oplog entries to enter the stable checkpoint on all secondaries.
+rst.awaitLastOpCommitted();
- // Disable snapshotting on all members of the replica set so that further operations do not
- // enter the majority snapshot.
- nodes.forEach(node => assert.commandWorked(node.adminCommand(
- {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
- const w1 = {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}};
+// Disable snapshotting on all members of the replica set so that further operations do not
+// enter the majority snapshot.
+nodes.forEach(node => assert.commandWorked(node.adminCommand(
+ {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
+const w1 = {
+ writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}
+};
- // Set up a collection whose creation is not in the stable checkpoint.
- const collNoStableCreation = "creationNotInStableCheckpoint";
- assert.commandWorked(primaryDB[collNoStableCreation].runCommand("create", w1));
+// Set up a collection whose creation is not in the stable checkpoint.
+const collNoStableCreation = "creationNotInStableCheckpoint";
+assert.commandWorked(primaryDB[collNoStableCreation].runCommand("create", w1));
- // Perform writes on collections that replicate to each node but do not enter the majority
- // snapshot. These commands will be replayed during replication recovery during restart.
- [collSomeStableWrites, collNoStableWrites, collNoStableCreation].forEach(
- coll => assert.commandWorked(
- primaryDB[coll].insert({_id: "insertedAfterSnapshottingDisabled"}, w1)));
- rst.awaitReplication();
+// Perform writes on collections that replicate to each node but do not enter the majority
+// snapshot. These commands will be replayed during replication recovery during restart.
+[collSomeStableWrites, collNoStableWrites, collNoStableCreation].forEach(
+ coll => assert.commandWorked(
+ primaryDB[coll].insert({_id: "insertedAfterSnapshottingDisabled"}, w1)));
+rst.awaitReplication();
- jsTestLog("Checking collection counts after snapshotting has been disabled");
- rst.checkCollectionCounts();
+jsTestLog("Checking collection counts after snapshotting has been disabled");
+rst.checkCollectionCounts();
- // Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
- // unset on each node following the restart.
- nodes.forEach(node => rst.restart(node));
- rst.awaitNodesAgreeOnPrimary();
- primaryDB = rst.getPrimary().getDB(dbName);
+// Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
+// unset on each node following the restart.
+nodes.forEach(node => rst.restart(node));
+rst.awaitNodesAgreeOnPrimary();
+primaryDB = rst.getPrimary().getDB(dbName);
- // Perform a majority write to ensure that both nodes agree on the majority commit point.
- const collCreatedAfterRestart = "createdAfterRestart";
- assert.commandWorked(
- primaryDB[collCreatedAfterRestart].insert({_id: "insertedAfterRestart", wMajority}));
+// Perform a majority write to ensure that both nodes agree on the majority commit point.
+const collCreatedAfterRestart = "createdAfterRestart";
+assert.commandWorked(
+ primaryDB[collCreatedAfterRestart].insert({_id: "insertedAfterRestart", wMajority}));
- // Fast metadata count should be correct after restart in the face of a clean shutdown.
- jsTestLog("Checking collection counts after clean restart of all nodes");
- rst.checkCollectionCounts();
+// Fast metadata count should be correct after restart in the face of a clean shutdown.
+jsTestLog("Checking collection counts after clean restart of all nodes");
+rst.checkCollectionCounts();
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/recovery_preserves_active_txns.js b/jstests/replsets/recovery_preserves_active_txns.js
index 5896a1e01fc..005286cf152 100644
--- a/jstests/replsets/recovery_preserves_active_txns.js
+++ b/jstests/replsets/recovery_preserves_active_txns.js
@@ -11,83 +11,82 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/check_log.js");
-
- // A new replica set for both the commit and abort tests to ensure the same clean state.
- function doTest(commitOrAbort) {
- const replSet = new ReplSetTest({
- // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
- nodeOptions:
- {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
- nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
- });
-
- replSet.startSet(PrepareHelpers.replSetStartSetOptions);
- replSet.initiate();
- const primary = replSet.getPrimary();
- const primaryOplog = primary.getDB("local").oplog.rs;
- assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
-
- const coll = primary.getDB("test").test;
- assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
-
- jsTestLog("Prepare a transaction");
-
- const session = primary.startSession();
- session.startTransaction();
- assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- const oldestRequiredTimestampForCrashRecovery =
- PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
- assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
-
- jsTestLog("Insert documents until oplog exceeds oplogSize");
-
- // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
- PrepareHelpers.growOplogPastMaxSize(replSet);
-
- // Oplog grew past maxSize, and it includes the oldest active transaction's entry.
- var secondary = replSet.getSecondary();
- function checkSecondaryOplog() {
- const secondaryOplog = secondary.getDB("local").oplog.rs;
- assert.soon(() => {
- return secondaryOplog.dataSize() >= PrepareHelpers.oplogSizeBytes;
- }, "waiting for secondary oplog to grow", ReplSetTest.kDefaultTimeoutMS);
- const secondaryOplogEntry = PrepareHelpers.findPrepareEntry(secondaryOplog);
- assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
- }
- checkSecondaryOplog();
-
- jsTestLog("Restart the secondary");
-
- const secondaryId = replSet.getSecondary().nodeId;
- // Validation can't complete while the active transaction holds a lock.
- replSet.stop(secondaryId, undefined, {skipValidation: true});
- secondary = replSet.start(secondaryId, {}, true /* restart */);
-
- jsTestLog("Restarted");
-
- replSet.awaitSecondaryNodes();
- checkSecondaryOplog();
-
- if (commitOrAbort === "commit") {
- jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
- } else if (commitOrAbort === "abort") {
- jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
- assert.commandWorked(session.abortTransaction_forTesting());
- } else {
- throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
- }
-
- PrepareHelpers.awaitOplogTruncation(replSet);
-
- // ReplSetTest reacts poorly to restarting a node, end it manually.
- replSet.stopSet(true, false, {});
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+
+// A new replica set for both the commit and abort tests to ensure the same clean state.
+function doTest(commitOrAbort) {
+ const replSet = new ReplSetTest({
+ // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
+ nodeOptions: {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
+ });
+
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
+ replSet.initiate();
+ const primary = replSet.getPrimary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+
+ const coll = primary.getDB("test").test;
+ assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
+
+ jsTestLog("Prepare a transaction");
+
+ const session = primary.startSession();
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+ const oldestRequiredTimestampForCrashRecovery =
+ PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
+ assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
+
+ jsTestLog("Insert documents until oplog exceeds oplogSize");
+
+ // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
+ PrepareHelpers.growOplogPastMaxSize(replSet);
+
+ // Oplog grew past maxSize, and it includes the oldest active transaction's entry.
+ var secondary = replSet.getSecondary();
+ function checkSecondaryOplog() {
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
+ assert.soon(() => {
+ return secondaryOplog.dataSize() >= PrepareHelpers.oplogSizeBytes;
+ }, "waiting for secondary oplog to grow", ReplSetTest.kDefaultTimeoutMS);
+ const secondaryOplogEntry = PrepareHelpers.findPrepareEntry(secondaryOplog);
+ assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
}
- doTest("commit");
- doTest("abort");
+ checkSecondaryOplog();
+
+ jsTestLog("Restart the secondary");
+
+ const secondaryId = replSet.getSecondary().nodeId;
+ // Validation can't complete while the active transaction holds a lock.
+ replSet.stop(secondaryId, undefined, {skipValidation: true});
+ secondary = replSet.start(secondaryId, {}, true /* restart */);
+
+ jsTestLog("Restarted");
+
+ replSet.awaitSecondaryNodes();
+ checkSecondaryOplog();
+
+ if (commitOrAbort === "commit") {
+ jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
+ PrepareHelpers.commitTransaction(session, prepareTimestamp);
+ } else if (commitOrAbort === "abort") {
+ jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
+ assert.commandWorked(session.abortTransaction_forTesting());
+ } else {
+ throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
+ }
+
+ PrepareHelpers.awaitOplogTruncation(replSet);
+
+ // ReplSetTest reacts poorly to restarting a node, end it manually.
+ replSet.stopSet(true, false, {});
+}
+doTest("commit");
+doTest("abort");
})();
diff --git a/jstests/replsets/refresh_sessions_rs.js b/jstests/replsets/refresh_sessions_rs.js
index 4539e667d07..24d553c2df7 100644
--- a/jstests/replsets/refresh_sessions_rs.js
+++ b/jstests/replsets/refresh_sessions_rs.js
@@ -1,82 +1,80 @@
(function() {
- "use strict";
-
- // This test makes assertions about the number of logical session records.
- TestData.disableImplicitSessions = true;
-
- var refresh = {refreshLogicalSessionCacheNow: 1};
- var startSession = {startSession: 1};
-
- // Start up a replica set.
- var dbName = "config";
-
- var replTest = new ReplSetTest({name: 'refresh', nodes: 3});
- var nodes = replTest.startSet();
-
- replTest.initiate();
- var primary = replTest.getPrimary();
-
- replTest.awaitSecondaryNodes();
- var server2 = replTest._slaves[0];
- var server3 = replTest._slaves[1];
-
- var db1 = primary.getDB(dbName);
- var db2 = server2.getDB(dbName);
- var db3 = server3.getDB(dbName);
-
- var res;
-
- // The primary needs to create the sessions collection so that the secondaries can act upon it.
- // This is done by an initial refresh of the primary.
- res = db1.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
- replTest.awaitReplication();
-
- // Trigger an initial refresh on secondaries as a sanity check.
- res = db2.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
- res = db3.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
- // Connect to the primary and start a session.
- db1.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
-
- // That session should not be in db.system.sessions yet.
- assert.eq(db1.system.sessions.count(), 0, "should not have session records yet");
-
- // Connect to each replica set member and start a session.
- res = db2.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
- res = db3.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
-
- // Connect to a secondary and trigger a refresh.
- res = db2.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
- // Connect to the primary. The sessions collection here should have one record for the session
- // on the secondary.
- assert.eq(db1.system.sessions.count(), 1, "failed to refresh on the secondary");
-
- // Trigger a refresh on the primary. The sessions collection should now contain two records.
- res = db1.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
- assert.eq(
- db1.system.sessions.count(), 2, "should have two local session records after refresh");
-
- // Trigger another refresh on all members.
- res = db2.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
- res = db3.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
- res = db1.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
- // The sessions collection on the primary should now contain all records.
- assert.eq(
- db1.system.sessions.count(), 3, "should have three local session records after refresh");
-
- // Stop the test.
- replTest.stopSet();
+"use strict";
+
+// This test makes assertions about the number of logical session records.
+TestData.disableImplicitSessions = true;
+
+var refresh = {refreshLogicalSessionCacheNow: 1};
+var startSession = {startSession: 1};
+
+// Start up a replica set.
+var dbName = "config";
+
+var replTest = new ReplSetTest({name: 'refresh', nodes: 3});
+var nodes = replTest.startSet();
+
+replTest.initiate();
+var primary = replTest.getPrimary();
+
+replTest.awaitSecondaryNodes();
+var server2 = replTest._slaves[0];
+var server3 = replTest._slaves[1];
+
+var db1 = primary.getDB(dbName);
+var db2 = server2.getDB(dbName);
+var db3 = server3.getDB(dbName);
+
+var res;
+
+// The primary needs to create the sessions collection so that the secondaries can act upon it.
+// This is done by an initial refresh of the primary.
+res = db1.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+replTest.awaitReplication();
+
+// Trigger an initial refresh on secondaries as a sanity check.
+res = db2.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+res = db3.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+// Connect to the primary and start a session.
+db1.runCommand(startSession);
+assert.commandWorked(res, "unable to start session");
+
+// That session should not be in db.system.sessions yet.
+assert.eq(db1.system.sessions.count(), 0, "should not have session records yet");
+
+// Connect to each replica set member and start a session.
+res = db2.runCommand(startSession);
+assert.commandWorked(res, "unable to start session");
+res = db3.runCommand(startSession);
+assert.commandWorked(res, "unable to start session");
+
+// Connect to a secondary and trigger a refresh.
+res = db2.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+// Connect to the primary. The sessions collection here should have one record for the session
+// on the secondary.
+assert.eq(db1.system.sessions.count(), 1, "failed to refresh on the secondary");
+
+// Trigger a refresh on the primary. The sessions collection should now contain two records.
+res = db1.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+assert.eq(db1.system.sessions.count(), 2, "should have two local session records after refresh");
+
+// Trigger another refresh on all members.
+res = db2.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+res = db3.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+res = db1.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+// The sessions collection on the primary should now contain all records.
+assert.eq(db1.system.sessions.count(), 3, "should have three local session records after refresh");
+
+// Stop the test.
+replTest.stopSet();
})();
diff --git a/jstests/replsets/rename_across_dbs.js b/jstests/replsets/rename_across_dbs.js
index 7976eb65103..1bbb2249581 100644
--- a/jstests/replsets/rename_across_dbs.js
+++ b/jstests/replsets/rename_across_dbs.js
@@ -1,7 +1,7 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/rename_across_dbs.js");
+load("jstests/replsets/libs/rename_across_dbs.js");
- new RenameAcrossDatabasesTest().run();
+new RenameAcrossDatabasesTest().run();
}());
diff --git a/jstests/replsets/rename_across_dbs_drop_target.js b/jstests/replsets/rename_across_dbs_drop_target.js
index 8639f9caeff..33916a1cb09 100644
--- a/jstests/replsets/rename_across_dbs_drop_target.js
+++ b/jstests/replsets/rename_across_dbs_drop_target.js
@@ -1,8 +1,10 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/rename_across_dbs.js");
+load("jstests/replsets/libs/rename_across_dbs.js");
- const options = {dropTarget: true};
- new RenameAcrossDatabasesTest(options).run();
+const options = {
+ dropTarget: true
+};
+new RenameAcrossDatabasesTest(options).run();
}());
diff --git a/jstests/replsets/rename_collection_between_unrepl_and_repl.js b/jstests/replsets/rename_collection_between_unrepl_and_repl.js
index 07b318f7d76..e77da8a1b9b 100644
--- a/jstests/replsets/rename_collection_between_unrepl_and_repl.js
+++ b/jstests/replsets/rename_collection_between_unrepl_and_repl.js
@@ -6,39 +6,37 @@
*/
(function() {
- "use strict";
+"use strict";
- const name = "rename_collection_between_unrepl_and_repl";
- const rst = new ReplSetTest({"name": name, "nodes": 1});
- rst.startSet();
- rst.initiate();
- const primary = rst.getPrimary();
+const name = "rename_collection_between_unrepl_and_repl";
+const rst = new ReplSetTest({"name": name, "nodes": 1});
+rst.startSet();
+rst.initiate();
+const primary = rst.getPrimary();
- /**
- * Part 1: Attempt to rename from a replicated to an unreplicated namespace.
- */
- let sourceNs = "somedb.replicated";
- let targetNs = "local.unreplicated";
+/**
+ * Part 1: Attempt to rename from a replicated to an unreplicated namespace.
+ */
+let sourceNs = "somedb.replicated";
+let targetNs = "local.unreplicated";
- // Ensure that the source collection exists.
- assert.commandWorked(primary.getCollection(sourceNs).insert({"fromRepl": "toUnrepl"}));
+// Ensure that the source collection exists.
+assert.commandWorked(primary.getCollection(sourceNs).insert({"fromRepl": "toUnrepl"}));
- assert.commandFailedWithCode(
- primary.adminCommand({"renameCollection": sourceNs, "to": targetNs}),
- ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(primary.adminCommand({"renameCollection": sourceNs, "to": targetNs}),
+ ErrorCodes.IllegalOperation);
- /**
- * Part 2: Attempt to rename from an unreplicated to a replicated namespace.
- */
- sourceNs = "local.alsoUnreplicated";
- targetNs = "somedb.alsoReplicated";
+/**
+ * Part 2: Attempt to rename from an unreplicated to a replicated namespace.
+ */
+sourceNs = "local.alsoUnreplicated";
+targetNs = "somedb.alsoReplicated";
- // Ensure that the source collection exists.
- assert.commandWorked(primary.getCollection(sourceNs).insert({"fromUnrepl": "toRepl"}));
+// Ensure that the source collection exists.
+assert.commandWorked(primary.getCollection(sourceNs).insert({"fromUnrepl": "toRepl"}));
- assert.commandFailedWithCode(
- primary.adminCommand({"renameCollection": sourceNs, "to": targetNs}),
- ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(primary.adminCommand({"renameCollection": sourceNs, "to": targetNs}),
+ ErrorCodes.IllegalOperation);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/rename_collection_temp.js b/jstests/replsets/rename_collection_temp.js
index 57d33f8beb5..dc6ffd6f84a 100644
--- a/jstests/replsets/rename_collection_temp.js
+++ b/jstests/replsets/rename_collection_temp.js
@@ -4,84 +4,80 @@
// @tags: [requires_replication]
(function() {
- "use strict";
-
- function checkCollectionTemp(db, collName, expectedTempValue) {
- var collectionInformation = db.getCollectionInfos();
-
- var hasSeenCollection = false;
- for (var i = 0; i < collectionInformation.length; i++) {
- var collection = collectionInformation[i];
-
- if (collection.name === collName) {
- hasSeenCollection = true;
-
- if (expectedTempValue) {
- // We expect this collection to be temporary.
- assert.eq(collection.options.temp, true);
- } else {
- // We expect this collection to be permanent, thus the temp option will not show
- // up.
- assert.isnull(collection.options.temp);
- }
+"use strict";
+
+function checkCollectionTemp(db, collName, expectedTempValue) {
+ var collectionInformation = db.getCollectionInfos();
+
+ var hasSeenCollection = false;
+ for (var i = 0; i < collectionInformation.length; i++) {
+ var collection = collectionInformation[i];
+
+ if (collection.name === collName) {
+ hasSeenCollection = true;
+
+ if (expectedTempValue) {
+ // We expect this collection to be temporary.
+ assert.eq(collection.options.temp, true);
+ } else {
+ // We expect this collection to be permanent, thus the temp option will not show
+ // up.
+ assert.isnull(collection.options.temp);
}
}
}
+}
- var replTest = new ReplSetTest({name: 'renameCollectionTest', nodes: 2});
- var nodes = replTest.startSet();
+var replTest = new ReplSetTest({name: 'renameCollectionTest', nodes: 2});
+var nodes = replTest.startSet();
- replTest.initiate();
+replTest.initiate();
- var master = replTest.getPrimary();
+var master = replTest.getPrimary();
- // Create a temporary collection.
- var dbFoo = master.getDB("foo");
+// Create a temporary collection.
+var dbFoo = master.getDB("foo");
- assert.commandWorked(dbFoo.runCommand({
- applyOps:
- [{op: "c", ns: dbFoo.getName() + ".$cmd", o: {create: "tempColl", temp: true}}]
- }));
- checkCollectionTemp(dbFoo, "tempColl", true);
+assert.commandWorked(dbFoo.runCommand(
+ {applyOps: [{op: "c", ns: dbFoo.getName() + ".$cmd", o: {create: "tempColl", temp: true}}]}));
+checkCollectionTemp(dbFoo, "tempColl", true);
- // Rename the collection.
- assert.commandWorked(
- master.adminCommand({renameCollection: "foo.tempColl", to: "foo.permanentColl"}));
+// Rename the collection.
+assert.commandWorked(
+ master.adminCommand({renameCollection: "foo.tempColl", to: "foo.permanentColl"}));
- // Confirm that it is no longer temporary.
- checkCollectionTemp(dbFoo, "permanentColl", false);
+// Confirm that it is no longer temporary.
+checkCollectionTemp(dbFoo, "permanentColl", false);
- replTest.awaitReplication();
+replTest.awaitReplication();
- var secondary = replTest.getSecondary();
- var secondaryFoo = secondary.getDB("foo");
+var secondary = replTest.getSecondary();
+var secondaryFoo = secondary.getDB("foo");
- secondaryFoo.permanentColl.setSlaveOk(true);
+secondaryFoo.permanentColl.setSlaveOk(true);
- // Get the information on the secondary to ensure it was replicated correctly.
- checkCollectionTemp(secondaryFoo, "permanentColl", false);
+// Get the information on the secondary to ensure it was replicated correctly.
+checkCollectionTemp(secondaryFoo, "permanentColl", false);
- // Check the behavior when the "dropTarget" flag is passed to renameCollection.
- dbFoo.permanentColl.drop();
+// Check the behavior when the "dropTarget" flag is passed to renameCollection.
+dbFoo.permanentColl.drop();
- assert.commandWorked(dbFoo.runCommand({
- applyOps:
- [{op: "c", ns: dbFoo.getName() + ".$cmd", o: {create: "tempColl", temp: true}}]
- }));
- checkCollectionTemp(dbFoo, "tempColl", true);
+assert.commandWorked(dbFoo.runCommand(
+ {applyOps: [{op: "c", ns: dbFoo.getName() + ".$cmd", o: {create: "tempColl", temp: true}}]}));
+checkCollectionTemp(dbFoo, "tempColl", true);
- // Construct an empty collection that will be dropped on rename.
- assert.commandWorked(dbFoo.runCommand({create: "permanentColl"}));
+// Construct an empty collection that will be dropped on rename.
+assert.commandWorked(dbFoo.runCommand({create: "permanentColl"}));
- // Rename, dropping "permanentColl" and replacing it.
- assert.commandWorked(master.adminCommand(
- {renameCollection: "foo.tempColl", to: "foo.permanentColl", dropTarget: true}));
+// Rename, dropping "permanentColl" and replacing it.
+assert.commandWorked(master.adminCommand(
+ {renameCollection: "foo.tempColl", to: "foo.permanentColl", dropTarget: true}));
- checkCollectionTemp(dbFoo, "permanentColl", false);
+checkCollectionTemp(dbFoo, "permanentColl", false);
- replTest.awaitReplication();
+replTest.awaitReplication();
- checkCollectionTemp(secondaryFoo, "permanentColl", false);
+checkCollectionTemp(secondaryFoo, "permanentColl", false);
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js
index 07494d88e10..5a5d7a3c918 100644
--- a/jstests/replsets/replset1.js
+++ b/jstests/replsets/replset1.js
@@ -3,7 +3,6 @@ var ssl_options2;
var ssl_name;
load("jstests/replsets/rslib.js");
var doTest = function(signal) {
-
// Test basic replica set functionality.
// -- Replication
// -- Failover
diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js
index b6013c377c6..8b789db3547 100644
--- a/jstests/replsets/replset2.js
+++ b/jstests/replsets/replset2.js
@@ -1,7 +1,6 @@
load("jstests/replsets/rslib.js");
doTest = function(signal) {
-
// Test replication with write concern.
// Replica set testing API
diff --git a/jstests/replsets/replset3.js b/jstests/replsets/replset3.js
index ee12d75a0bb..8723c562601 100644
--- a/jstests/replsets/replset3.js
+++ b/jstests/replsets/replset3.js
@@ -33,7 +33,7 @@ var doTest = function(signal) {
try {
var new_master = replTest.getPrimary();
} catch (err) {
- throw("Could not elect new master before timeout.");
+ throw ("Could not elect new master before timeout.");
}
print(phase++);
diff --git a/jstests/replsets/replset4.js b/jstests/replsets/replset4.js
index 1d5920f1b9e..7cf49c2751a 100644
--- a/jstests/replsets/replset4.js
+++ b/jstests/replsets/replset4.js
@@ -1,5 +1,4 @@
doTest = function(signal) {
-
// Test orphaned master steps down
var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
diff --git a/jstests/replsets/replset5.js b/jstests/replsets/replset5.js
index 29ffc152c95..e714e034b87 100644
--- a/jstests/replsets/replset5.js
+++ b/jstests/replsets/replset5.js
@@ -2,79 +2,82 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
- // Test write concern defaults
- var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
-
- var nodes = replTest.startSet();
-
- // Initiate set with default for write concern
- var config = replTest.getReplSetConfig();
- config.settings = {};
- config.settings.getLastErrorDefaults = {'w': 3, 'wtimeout': ReplSetTest.kDefaultTimeoutMS};
- config.settings.heartbeatTimeoutSecs = 15;
- // Prevent node 2 from becoming primary, as we will attempt to set it to hidden later.
- config.members[2].priority = 0;
-
- replTest.initiate(config);
-
- //
- var master = replTest.getPrimary();
- replTest.awaitSecondaryNodes();
- var testDB = "foo";
-
- // Initial replication
- master.getDB("barDB").bar.save({a: 1});
- replTest.awaitReplication();
-
- // These writes should be replicated immediately
- var docNum = 5000;
- var bulk = master.getDB(testDB).foo.initializeUnorderedBulkOp();
- for (var n = 0; n < docNum; n++) {
- bulk.insert({n: n});
- }
-
- // should use the configured last error defaults from above, that's what we're testing.
- //
- // If you want to test failure, just add values for w and wtimeout (e.g. w=1)
- // to the following command. This will override the default set above and
- // prevent replication from happening in time for the count tests below.
- //
- var result = bulk.execute();
- var wcError = result.getWriteConcernError();
-
- if (wcError != null) {
- print("\WARNING getLastError timed out and should not have: " + result.toString());
- print("This machine seems extremely slow. Stopping test without failing it\n");
- replTest.stopSet();
- return;
- }
+"use strict";
+// Test write concern defaults
+var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
+
+var nodes = replTest.startSet();
+
+// Initiate set with default for write concern
+var config = replTest.getReplSetConfig();
+config.settings = {};
+config.settings.getLastErrorDefaults = {
+ 'w': 3,
+ 'wtimeout': ReplSetTest.kDefaultTimeoutMS
+};
+config.settings.heartbeatTimeoutSecs = 15;
+// Prevent node 2 from becoming primary, as we will attempt to set it to hidden later.
+config.members[2].priority = 0;
+
+replTest.initiate(config);
+
+//
+var master = replTest.getPrimary();
+replTest.awaitSecondaryNodes();
+var testDB = "foo";
+
+// Initial replication
+master.getDB("barDB").bar.save({a: 1});
+replTest.awaitReplication();
+
+// These writes should be replicated immediately
+var docNum = 5000;
+var bulk = master.getDB(testDB).foo.initializeUnorderedBulkOp();
+for (var n = 0; n < docNum; n++) {
+ bulk.insert({n: n});
+}
+
+// should use the configured last error defaults from above, that's what we're testing.
+//
+// If you want to test failure, just add values for w and wtimeout (e.g. w=1)
+// to the following command. This will override the default set above and
+// prevent replication from happening in time for the count tests below.
+//
+var result = bulk.execute();
+var wcError = result.getWriteConcernError();
+
+if (wcError != null) {
+ print("\WARNING getLastError timed out and should not have: " + result.toString());
+ print("This machine seems extremely slow. Stopping test without failing it\n");
+ replTest.stopSet();
+ return;
+}
- var slaves = replTest._slaves;
- slaves[0].setSlaveOk();
- slaves[1].setSlaveOk();
+var slaves = replTest._slaves;
+slaves[0].setSlaveOk();
+slaves[1].setSlaveOk();
- var slave0count = slaves[0].getDB(testDB).foo.find().itcount();
- assert(slave0count == docNum, "Slave 0 has " + slave0count + " of " + docNum + " documents!");
+var slave0count = slaves[0].getDB(testDB).foo.find().itcount();
+assert(slave0count == docNum, "Slave 0 has " + slave0count + " of " + docNum + " documents!");
- var slave1count = slaves[1].getDB(testDB).foo.find().itcount();
- assert(slave1count == docNum, "Slave 1 has " + slave1count + " of " + docNum + " documents!");
+var slave1count = slaves[1].getDB(testDB).foo.find().itcount();
+assert(slave1count == docNum, "Slave 1 has " + slave1count + " of " + docNum + " documents!");
- var master1count = master.getDB(testDB).foo.find().itcount();
- assert(master1count == docNum, "Master has " + master1count + " of " + docNum + " documents!");
+var master1count = master.getDB(testDB).foo.find().itcount();
+assert(master1count == docNum, "Master has " + master1count + " of " + docNum + " documents!");
- print("replset5.js reconfigure with hidden=1");
- config = master.getDB("local").system.replset.findOne();
+print("replset5.js reconfigure with hidden=1");
+config = master.getDB("local").system.replset.findOne();
- assert.eq(15, config.settings.heartbeatTimeoutSecs);
+assert.eq(15, config.settings.heartbeatTimeoutSecs);
- config.version++;
- config.members[2].hidden = 1;
+config.version++;
+config.members[2].hidden = 1;
- master = reconfig(replTest, config);
+master = reconfig(replTest, config);
- config = master.getSisterDB("local").system.replset.findOne();
- assert.eq(config.members[2].hidden, true);
+config = master.getSisterDB("local").system.replset.findOne();
+assert.eq(config.members[2].hidden, true);
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/replset8.js b/jstests/replsets/replset8.js
index 23169a2ff6a..65f1be46e16 100644
--- a/jstests/replsets/replset8.js
+++ b/jstests/replsets/replset8.js
@@ -2,85 +2,85 @@
// test for SERVER-6303 - if documents move backward during an initial sync.
(function() {
- "use strict";
- var rt = new ReplSetTest({name: "replset8", nodes: 1});
+"use strict";
+var rt = new ReplSetTest({name: "replset8", nodes: 1});
- var nodes = rt.startSet();
- rt.initiate();
- var master = rt.getPrimary();
- var bigstring = "a";
- var md = master.getDB('d');
- var mdc = md['c'];
+var nodes = rt.startSet();
+rt.initiate();
+var master = rt.getPrimary();
+var bigstring = "a";
+var md = master.getDB('d');
+var mdc = md['c'];
- // prep the data
+// prep the data
- // idea: create x documents of increasing size, then create x documents of size n.
- // delete first x documents. start initial sync (cloner). update all remaining
- // documents to be increasing size.
- // this should result in the updates moving the docs backwards.
+// idea: create x documents of increasing size, then create x documents of size n.
+// delete first x documents. start initial sync (cloner). update all remaining
+// documents to be increasing size.
+// this should result in the updates moving the docs backwards.
- var doccount = 5000;
- // Avoid empty extent issues
- mdc.insert({_id: -1, x: "dummy"});
+var doccount = 5000;
+// Avoid empty extent issues
+mdc.insert({_id: -1, x: "dummy"});
- jsTestLog('inserting ' + doccount + ' bigstrings');
- var bulk = mdc.initializeUnorderedBulkOp();
- for (var i = 0; i < doccount; ++i) {
- bulk.insert({_id: i, x: bigstring});
- bigstring += "a";
- }
- var result = assert.writeOK(bulk.execute());
- jsTestLog('insert 0-' + (doccount - 1) + ' result: ' + tojson(result));
- assert.eq(doccount, result.nInserted);
- assert.eq(doccount + 1, mdc.find().itcount());
+jsTestLog('inserting ' + doccount + ' bigstrings');
+var bulk = mdc.initializeUnorderedBulkOp();
+for (var i = 0; i < doccount; ++i) {
+ bulk.insert({_id: i, x: bigstring});
+ bigstring += "a";
+}
+var result = assert.writeOK(bulk.execute());
+jsTestLog('insert 0-' + (doccount - 1) + ' result: ' + tojson(result));
+assert.eq(doccount, result.nInserted);
+assert.eq(doccount + 1, mdc.find().itcount());
- jsTestLog('inserting ' + (doccount * 2) + ' documents - {_id: 0, x: 0} ... {_id: ' +
- (doccount * 2 - 1) + ', x: ' + (doccount * 2 - 1) + '}');
- bulk = mdc.initializeUnorderedBulkOp();
- for (i = doccount; i < doccount * 2; ++i) {
- bulk.insert({_id: i, x: i});
- }
- result = assert.writeOK(bulk.execute());
- jsTestLog('insert ' + doccount + '-' + (doccount * 2 - 1) + ' result: ' + tojson(result));
- assert.eq(doccount, result.nInserted);
- assert.eq(doccount * 2 + 1, mdc.find().itcount());
+jsTestLog('inserting ' + (doccount * 2) + ' documents - {_id: 0, x: 0} ... {_id: ' +
+ (doccount * 2 - 1) + ', x: ' + (doccount * 2 - 1) + '}');
+bulk = mdc.initializeUnorderedBulkOp();
+for (i = doccount; i < doccount * 2; ++i) {
+ bulk.insert({_id: i, x: i});
+}
+result = assert.writeOK(bulk.execute());
+jsTestLog('insert ' + doccount + '-' + (doccount * 2 - 1) + ' result: ' + tojson(result));
+assert.eq(doccount, result.nInserted);
+assert.eq(doccount * 2 + 1, mdc.find().itcount());
- jsTestLog('deleting ' + doccount + ' bigstrings');
- bulk = mdc.initializeUnorderedBulkOp();
- for (i = 0; i < doccount; ++i) {
- bulk.find({_id: i}).remove();
- }
- result = assert.writeOK(bulk.execute());
- jsTestLog('delete 0-' + (doccount - 1) + ' result: ' + tojson(result));
- assert.eq(doccount, result.nRemoved);
- assert.eq(doccount + 1, mdc.find().itcount());
+jsTestLog('deleting ' + doccount + ' bigstrings');
+bulk = mdc.initializeUnorderedBulkOp();
+for (i = 0; i < doccount; ++i) {
+ bulk.find({_id: i}).remove();
+}
+result = assert.writeOK(bulk.execute());
+jsTestLog('delete 0-' + (doccount - 1) + ' result: ' + tojson(result));
+assert.eq(doccount, result.nRemoved);
+assert.eq(doccount + 1, mdc.find().itcount());
- // add a secondary
- var slave = rt.add();
- rt.reInitiate();
- jsTestLog('reinitiation complete after adding new node to replicaset');
- rt.awaitSecondaryNodes();
- jsTestLog("updating documents backwards");
- // Move all documents to the beginning by growing them to sizes that should
- // fit the holes we made in phase 1
- bulk = mdc.initializeUnorderedBulkOp();
- for (i = doccount * 2; i > doccount; --i) {
- bulk.find({_id: i}).update({$set: {x: bigstring}});
- bigstring = bigstring.slice(0, -1); // remove last char
- }
- result = assert.writeOK(bulk.execute({w: rt.nodes.length}));
- jsTestLog('update ' + (doccount + 1) + '-' + (doccount * 2 - 1) + ' result: ' + tojson(result));
- assert.eq(doccount - 1, result.nMatched);
- assert.eq(doccount - 1, result.nModified);
+// add a secondary
+var slave = rt.add();
+rt.reInitiate();
+jsTestLog('reinitiation complete after adding new node to replicaset');
+rt.awaitSecondaryNodes();
+jsTestLog("updating documents backwards");
+// Move all documents to the beginning by growing them to sizes that should
+// fit the holes we made in phase 1
+bulk = mdc.initializeUnorderedBulkOp();
+for (i = doccount * 2; i > doccount; --i) {
+ bulk.find({_id: i}).update({$set: {x: bigstring}});
+ bigstring = bigstring.slice(0, -1); // remove last char
+}
+result = assert.writeOK(bulk.execute({w: rt.nodes.length}));
+jsTestLog('update ' + (doccount + 1) + '-' + (doccount * 2 - 1) + ' result: ' + tojson(result));
+assert.eq(doccount - 1, result.nMatched);
+assert.eq(doccount - 1, result.nModified);
- assert.eq(doccount + 1,
- mdc.find().itcount(),
- 'incorrect collection size on primary (fast count: ' + mdc.count() + ')');
- assert.eq(doccount + 1,
- slave.getDB('d')['c'].find().itcount(),
- 'incorrect collection size on secondary (fast count: ' +
- slave.getDB('d')['c'].count() + ')');
+assert.eq(doccount + 1,
+ mdc.find().itcount(),
+ 'incorrect collection size on primary (fast count: ' + mdc.count() + ')');
+assert.eq(
+ doccount + 1,
+ slave.getDB('d')['c'].find().itcount(),
+ 'incorrect collection size on secondary (fast count: ' + slave.getDB('d')['c'].count() + ')');
- jsTestLog("finished");
- rt.stopSet();
+jsTestLog("finished");
+rt.stopSet();
})();
diff --git a/jstests/replsets/replsetarb2.js b/jstests/replsets/replsetarb2.js
index 8e1712749e4..cda2c371180 100644
--- a/jstests/replsets/replsetarb2.js
+++ b/jstests/replsets/replsetarb2.js
@@ -1,48 +1,48 @@
// Election when master fails and remaining nodes are an arbiter and a slave.
(function() {
- "use strict";
-
- var replTest = new ReplSetTest({name: 'unicomplex', nodes: 3});
- var nodes = replTest.nodeList();
-
- var conns = replTest.startSet();
- var r = replTest.initiate({
- "_id": "unicomplex",
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], "arbiterOnly": true, "votes": 1},
- {"_id": 2, "host": nodes[2]}
- ]
- });
-
- // Make sure we have a master
- var master = replTest.getPrimary();
-
- // Make sure we have an arbiter
- assert.soon(function() {
- var res = conns[1].getDB("admin").runCommand({replSetGetStatus: 1});
- printjson(res);
- return res.myState === 7;
- }, "Aribiter failed to initialize.");
-
- var result = conns[1].getDB("admin").runCommand({isMaster: 1});
- assert(result.arbiterOnly);
- assert(!result.passive);
-
- // Wait for initial replication
- master.getDB("foo").foo.insert({a: "foo"});
- replTest.awaitReplication();
-
- // Now kill the original master
- var mId = replTest.getNodeId(master);
- replTest.stop(mId);
-
- // And make sure that the slave is promoted
- var new_master = replTest.getPrimary();
-
- var newMasterId = replTest.getNodeId(new_master);
- assert.neq(newMasterId, mId, "Secondary wasn't promoted to new primary");
-
- replTest.stopSet(15);
+"use strict";
+
+var replTest = new ReplSetTest({name: 'unicomplex', nodes: 3});
+var nodes = replTest.nodeList();
+
+var conns = replTest.startSet();
+var r = replTest.initiate({
+ "_id": "unicomplex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], "arbiterOnly": true, "votes": 1},
+ {"_id": 2, "host": nodes[2]}
+ ]
+});
+
+// Make sure we have a master
+var master = replTest.getPrimary();
+
+// Make sure we have an arbiter
+assert.soon(function() {
+ var res = conns[1].getDB("admin").runCommand({replSetGetStatus: 1});
+ printjson(res);
+ return res.myState === 7;
+}, "Aribiter failed to initialize.");
+
+var result = conns[1].getDB("admin").runCommand({isMaster: 1});
+assert(result.arbiterOnly);
+assert(!result.passive);
+
+// Wait for initial replication
+master.getDB("foo").foo.insert({a: "foo"});
+replTest.awaitReplication();
+
+// Now kill the original master
+var mId = replTest.getNodeId(master);
+replTest.stop(mId);
+
+// And make sure that the slave is promoted
+var new_master = replTest.getPrimary();
+
+var newMasterId = replTest.getNodeId(new_master);
+assert.neq(newMasterId, mId, "Secondary wasn't promoted to new primary");
+
+replTest.stopSet(15);
}());
diff --git a/jstests/replsets/replsetprio1.js b/jstests/replsets/replsetprio1.js
index dc2d5295e4e..bb1c1f7dc76 100644
--- a/jstests/replsets/replsetprio1.js
+++ b/jstests/replsets/replsetprio1.js
@@ -1,55 +1,55 @@
// should check that election happens in priority order
(function() {
- "use strict";
- var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
- var nodenames = replTest.nodeList();
-
- var nodes = replTest.startSet();
- replTest.initiateWithAnyNodeAsPrimary({
- "_id": "testSet",
- "members": [
- {"_id": 0, "host": nodenames[0], "priority": 1},
- {"_id": 1, "host": nodenames[1], "priority": 2},
- {"_id": 2, "host": nodenames[2], "priority": 3}
- ]
- });
-
- // 2 should be master (give this a while to happen, as other nodes might first be elected)
- replTest.awaitNodesAgreeOnPrimary(replTest.kDefaultTimeoutMS, nodes, 2);
-
- // wait for 1 to not appear to be master (we are about to make it master and need a clean slate
- // here)
- replTest.waitForState(nodes[1], ReplSetTest.State.SECONDARY);
-
- // Wait for election oplog entry to be replicated, to ensure 0 will vote for 1 after stopping 2.
- replTest.awaitReplication();
-
- // kill 2, 1 should take over
- replTest.stop(2);
-
- // 1 should eventually be master
- replTest.waitForState(nodes[1], ReplSetTest.State.PRIMARY);
-
- // do some writes on 1
- var master = replTest.getPrimary();
- for (var i = 0; i < 1000; i++) {
- assert.writeOK(master.getDB("foo").bar.insert({i: i}, {writeConcern: {w: 'majority'}}));
- }
-
- for (i = 0; i < 1000; i++) {
- assert.writeOK(master.getDB("bar").baz.insert({i: i}, {writeConcern: {w: 'majority'}}));
- }
-
- // bring 2 back up, 2 should wait until caught up and then become master
- replTest.restart(2);
- replTest.awaitNodesAgreeOnPrimary(replTest.kDefaultTimeoutMS, nodes, 2);
-
- // make sure nothing was rolled back
- master = replTest.getPrimary();
- for (i = 0; i < 1000; i++) {
- assert(master.getDB("foo").bar.findOne({i: i}) != null, 'checking ' + i);
- assert(master.getDB("bar").baz.findOne({i: i}) != null, 'checking ' + i);
- }
- replTest.stopSet();
+"use strict";
+var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
+var nodenames = replTest.nodeList();
+
+var nodes = replTest.startSet();
+replTest.initiateWithAnyNodeAsPrimary({
+ "_id": "testSet",
+ "members": [
+ {"_id": 0, "host": nodenames[0], "priority": 1},
+ {"_id": 1, "host": nodenames[1], "priority": 2},
+ {"_id": 2, "host": nodenames[2], "priority": 3}
+ ]
+});
+
+// 2 should be master (give this a while to happen, as other nodes might first be elected)
+replTest.awaitNodesAgreeOnPrimary(replTest.kDefaultTimeoutMS, nodes, 2);
+
+// wait for 1 to not appear to be master (we are about to make it master and need a clean slate
+// here)
+replTest.waitForState(nodes[1], ReplSetTest.State.SECONDARY);
+
+// Wait for election oplog entry to be replicated, to ensure 0 will vote for 1 after stopping 2.
+replTest.awaitReplication();
+
+// kill 2, 1 should take over
+replTest.stop(2);
+
+// 1 should eventually be master
+replTest.waitForState(nodes[1], ReplSetTest.State.PRIMARY);
+
+// do some writes on 1
+var master = replTest.getPrimary();
+for (var i = 0; i < 1000; i++) {
+ assert.writeOK(master.getDB("foo").bar.insert({i: i}, {writeConcern: {w: 'majority'}}));
+}
+
+for (i = 0; i < 1000; i++) {
+ assert.writeOK(master.getDB("bar").baz.insert({i: i}, {writeConcern: {w: 'majority'}}));
+}
+
+// bring 2 back up, 2 should wait until caught up and then become master
+replTest.restart(2);
+replTest.awaitNodesAgreeOnPrimary(replTest.kDefaultTimeoutMS, nodes, 2);
+
+// make sure nothing was rolled back
+master = replTest.getPrimary();
+for (i = 0; i < 1000; i++) {
+ assert(master.getDB("foo").bar.findOne({i: i}) != null, 'checking ' + i);
+ assert(master.getDB("bar").baz.findOne({i: i}) != null, 'checking ' + i);
+}
+replTest.stopSet();
}());
diff --git a/jstests/replsets/replsetrestart1.js b/jstests/replsets/replsetrestart1.js
index 01ab2c702e4..e090a1ff959 100644
--- a/jstests/replsets/replsetrestart1.js
+++ b/jstests/replsets/replsetrestart1.js
@@ -9,62 +9,62 @@
// @tags: [requires_persistence]
(function() {
- 'use strict';
+'use strict';
- var compare_configs = function(c1, c2) {
- assert.eq(c1.version, c2.version, 'version same');
- assert.eq(c1._id, c2._id, '_id same');
+var compare_configs = function(c1, c2) {
+ assert.eq(c1.version, c2.version, 'version same');
+ assert.eq(c1._id, c2._id, '_id same');
- for (var i in c1.members) {
- assert(c2.members[i] !== undefined, 'field ' + i + ' exists in both configs');
- assert.eq(c1.members[i]._id, c2.members[i]._id, 'id is equal in both configs');
- assert.eq(c1.members[i].host, c2.members[i].host, 'host is equal in both configs');
- }
- };
+ for (var i in c1.members) {
+ assert(c2.members[i] !== undefined, 'field ' + i + ' exists in both configs');
+ assert.eq(c1.members[i]._id, c2.members[i]._id, 'id is equal in both configs');
+ assert.eq(c1.members[i].host, c2.members[i].host, 'host is equal in both configs');
+ }
+};
- // Create a new replica set test. Specify set name and the number of nodes you want.
- var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
+// Create a new replica set test. Specify set name and the number of nodes you want.
+var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
- // call startSet() to start each mongod in the replica set
- // this returns a list of nodes
- replTest.startSet();
+// call startSet() to start each mongod in the replica set
+// this returns a list of nodes
+replTest.startSet();
- // Call initiate() to send the replSetInitiate command
- // This will wait for initiation
- replTest.initiate();
+// Call initiate() to send the replSetInitiate command
+// This will wait for initiation
+replTest.initiate();
- // Wait for at least one heartbeat to reach everyone, so that we will properly mark nodes as
- // DOWN, later.
- replTest.awaitSecondaryNodes();
+// Wait for at least one heartbeat to reach everyone, so that we will properly mark nodes as
+// DOWN, later.
+replTest.awaitSecondaryNodes();
- // Call getPrimary to return a reference to the node that's been
- // elected master.
- var master = replTest.getPrimary();
- var config1 = master.getDB("local").system.replset.findOne();
+// Call getPrimary to return a reference to the node that's been
+// elected master.
+var master = replTest.getPrimary();
+var config1 = master.getDB("local").system.replset.findOne();
- // Now we're going to shut down all nodes
- var mId = replTest.getNodeId(master);
- var s1 = replTest._slaves[0];
- var s1Id = replTest.getNodeId(s1);
- var s2 = replTest._slaves[1];
- var s2Id = replTest.getNodeId(s2);
+// Now we're going to shut down all nodes
+var mId = replTest.getNodeId(master);
+var s1 = replTest._slaves[0];
+var s1Id = replTest.getNodeId(s1);
+var s2 = replTest._slaves[1];
+var s2Id = replTest.getNodeId(s2);
- replTest.stop(s1Id);
- replTest.stop(s2Id);
- replTest.waitForState(s1, ReplSetTest.State.DOWN);
- replTest.waitForState(s2, ReplSetTest.State.DOWN);
+replTest.stop(s1Id);
+replTest.stop(s2Id);
+replTest.waitForState(s1, ReplSetTest.State.DOWN);
+replTest.waitForState(s2, ReplSetTest.State.DOWN);
- replTest.stop(mId);
+replTest.stop(mId);
- // Now let's restart these nodes
- replTest.restart(mId);
- replTest.restart(s1Id);
- replTest.restart(s2Id);
+// Now let's restart these nodes
+replTest.restart(mId);
+replTest.restart(s1Id);
+replTest.restart(s2Id);
- // Make sure that a new master comes up
- master = replTest.getPrimary();
- replTest.awaitSecondaryNodes();
- var config2 = master.getDB("local").system.replset.findOne();
- compare_configs(config1, config2);
- replTest.stopSet();
+// Make sure that a new master comes up
+master = replTest.getPrimary();
+replTest.awaitSecondaryNodes();
+var config2 = master.getDB("local").system.replset.findOne();
+compare_configs(config1, config2);
+replTest.stopSet();
}());
diff --git a/jstests/replsets/replsets_killop.js b/jstests/replsets/replsets_killop.js
index 4e41046f8c8..3fb42d6a244 100644
--- a/jstests/replsets/replsets_killop.js
+++ b/jstests/replsets/replsets_killop.js
@@ -17,9 +17,11 @@ assert.soon(function() {
});
// Start a parallel shell to insert new documents on the primary.
-inserter = startParallelShell('var bulk = db.test.initializeUnorderedBulkOp(); \
- for( i = 1; i < ' + numDocs +
- '; ++i ) { \
+inserter = startParallelShell(
+ 'var bulk = db.test.initializeUnorderedBulkOp(); \
+ for( i = 1; i < ' +
+ numDocs +
+ '; ++i ) { \
bulk.insert({ a: i }); \
} \
bulk.execute();');
diff --git a/jstests/replsets/request_primary_stepdown.js b/jstests/replsets/request_primary_stepdown.js
index 8ea0f78688c..6f4a37b1a22 100644
--- a/jstests/replsets/request_primary_stepdown.js
+++ b/jstests/replsets/request_primary_stepdown.js
@@ -4,36 +4,35 @@
// Eventually the high priority node will run a priority takeover election to become primary. During
// this election that node should make sure that it does not error in _requestRemotePrimaryStepDown.
(function() {
- 'use strict';
- load('jstests/replsets/rslib.js');
+'use strict';
+load('jstests/replsets/rslib.js');
- var name = 'request_primary_stepdown';
- var replSet = new ReplSetTest(
- {name: name, nodes: [{rsConfig: {priority: 3}}, {}, {rsConfig: {arbiterOnly: true}}]});
- replSet.startSet();
- replSet.initiate();
+var name = 'request_primary_stepdown';
+var replSet = new ReplSetTest(
+ {name: name, nodes: [{rsConfig: {priority: 3}}, {}, {rsConfig: {arbiterOnly: true}}]});
+replSet.startSet();
+replSet.initiate();
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
- replSet.awaitSecondaryNodes();
- replSet.awaitReplication();
- var primary = replSet.getPrimary();
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
+replSet.awaitSecondaryNodes();
+replSet.awaitReplication();
+var primary = replSet.getPrimary();
- assert.commandWorked(
- replSet.nodes[0].adminCommand({setParameter: 1, logComponentVerbosity: {executor: 4}}));
- clearRawMongoProgramOutput();
+assert.commandWorked(
+ replSet.nodes[0].adminCommand({setParameter: 1, logComponentVerbosity: {executor: 4}}));
+clearRawMongoProgramOutput();
- // Primary should step down long enough for election to occur on secondary.
- assert.commandWorked(
- primary.adminCommand({replSetStepDown: 70, secondaryCatchUpPeriodSecs: 60}));
+// Primary should step down long enough for election to occur on secondary.
+assert.commandWorked(primary.adminCommand({replSetStepDown: 70, secondaryCatchUpPeriodSecs: 60}));
- // Wait for node 1 to be promoted to primary after node 0 stepped down.
- replSet.waitForState(replSet.nodes[1], ReplSetTest.State.PRIMARY, 60 * 1000);
+// Wait for node 1 to be promoted to primary after node 0 stepped down.
+replSet.waitForState(replSet.nodes[1], ReplSetTest.State.PRIMARY, 60 * 1000);
- // Eventually node 0 will stand for election again because it has a higher priorty.
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 100 * 1000);
- var logContents = rawMongoProgramOutput();
- assert.eq(logContents.indexOf("stepdown period must be longer than secondaryCatchUpPeriodSecs"),
- -1,
- "_requestRemotePrimaryStepDown sent an invalid replSetStepDown command");
- replSet.stopSet();
+// Eventually node 0 will stand for election again because it has a higher priorty.
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 100 * 1000);
+var logContents = rawMongoProgramOutput();
+assert.eq(logContents.indexOf("stepdown period must be longer than secondaryCatchUpPeriodSecs"),
+ -1,
+ "_requestRemotePrimaryStepDown sent an invalid replSetStepDown command");
+replSet.stopSet();
})();
diff --git a/jstests/replsets/restore_term.js b/jstests/replsets/restore_term.js
index 05c03a2dfee..072a22eb974 100644
--- a/jstests/replsets/restore_term.js
+++ b/jstests/replsets/restore_term.js
@@ -10,52 +10,52 @@ load("jstests/replsets/rslib.js");
// storage engines.
// @tags: [requires_persistence]
(function() {
- "use strict";
-
- function getCurrentTerm(primary) {
- var res = primary.adminCommand({replSetGetStatus: 1});
- assert.commandWorked(res);
- return res.term;
- }
-
- var name = "restore_term";
- var rst = new ReplSetTest({name: name, nodes: 2});
-
- rst.startSet();
- rst.initiate();
- rst.awaitSecondaryNodes();
-
- var primary = rst.getPrimary();
- var primaryColl = primary.getDB("test").coll;
-
- // Current term may be greater than 1 if election race happens.
- var firstSuccessfulTerm = getCurrentTerm(primary);
- assert.gte(firstSuccessfulTerm, 1);
- assert.writeOK(primaryColl.insert({x: 1}, {writeConcern: {w: "majority"}}));
- assert.eq(getCurrentTerm(primary), firstSuccessfulTerm);
-
- // Check that the insert op has the initial term.
- var latestOp = getLatestOp(primary);
- assert.eq(latestOp.op, "i");
- assert.eq(latestOp.t, firstSuccessfulTerm);
-
- // Step down to increase the term.
- assert.commandWorked(primary.adminCommand({replSetStepDown: 0}));
-
- rst.awaitSecondaryNodes();
- // The secondary became the new primary now with a higher term.
- // Since there's only one secondary who may run for election, the new term is higher by 1.
- assert.eq(getCurrentTerm(rst.getPrimary()), firstSuccessfulTerm + 1);
-
- // Restart the replset and verify the term is the same.
- rst.stopSet(null /* signal */, true /* forRestart */);
- rst.startSet({restart: true});
- rst.awaitSecondaryNodes();
- primary = rst.getPrimary();
-
- assert.eq(primary.getDB("test").coll.find().itcount(), 1);
- // After restart, the new primary stands up with the newer term.
- assert.gte(getCurrentTerm(primary), firstSuccessfulTerm + 1);
-
- rst.stopSet();
+"use strict";
+
+function getCurrentTerm(primary) {
+ var res = primary.adminCommand({replSetGetStatus: 1});
+ assert.commandWorked(res);
+ return res.term;
+}
+
+var name = "restore_term";
+var rst = new ReplSetTest({name: name, nodes: 2});
+
+rst.startSet();
+rst.initiate();
+rst.awaitSecondaryNodes();
+
+var primary = rst.getPrimary();
+var primaryColl = primary.getDB("test").coll;
+
+// Current term may be greater than 1 if election race happens.
+var firstSuccessfulTerm = getCurrentTerm(primary);
+assert.gte(firstSuccessfulTerm, 1);
+assert.writeOK(primaryColl.insert({x: 1}, {writeConcern: {w: "majority"}}));
+assert.eq(getCurrentTerm(primary), firstSuccessfulTerm);
+
+// Check that the insert op has the initial term.
+var latestOp = getLatestOp(primary);
+assert.eq(latestOp.op, "i");
+assert.eq(latestOp.t, firstSuccessfulTerm);
+
+// Step down to increase the term.
+assert.commandWorked(primary.adminCommand({replSetStepDown: 0}));
+
+rst.awaitSecondaryNodes();
+// The secondary became the new primary now with a higher term.
+// Since there's only one secondary who may run for election, the new term is higher by 1.
+assert.eq(getCurrentTerm(rst.getPrimary()), firstSuccessfulTerm + 1);
+
+// Restart the replset and verify the term is the same.
+rst.stopSet(null /* signal */, true /* forRestart */);
+rst.startSet({restart: true});
+rst.awaitSecondaryNodes();
+primary = rst.getPrimary();
+
+assert.eq(primary.getDB("test").coll.find().itcount(), 1);
+// After restart, the new primary stands up with the newer term.
+assert.gte(getCurrentTerm(primary), firstSuccessfulTerm + 1);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/retryable_commit_transaction_after_failover.js b/jstests/replsets/retryable_commit_transaction_after_failover.js
index 30fd5af2904..e9c81e6b81c 100644
--- a/jstests/replsets/retryable_commit_transaction_after_failover.js
+++ b/jstests/replsets/retryable_commit_transaction_after_failover.js
@@ -1,109 +1,112 @@
// Test committed transaction state is restored after failover.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- const dbName = "test";
- const collName = "retryable_commit_transaction_after_failover";
+const dbName = "test";
+const collName = "retryable_commit_transaction_after_failover";
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
- const config = rst.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while
- // stepping up the old secondary.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
+const config = rst.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while
+// stepping up the old secondary.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- // Get the connection to the replica set using MongoDB URI.
- const conn = new Mongo(rst.getURL());
- const testDB = conn.getDB(dbName);
- const testColl = testDB[collName];
+// Get the connection to the replica set using MongoDB URI.
+const conn = new Mongo(rst.getURL());
+const testDB = conn.getDB(dbName);
+const testColl = testDB[collName];
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
- let txnNumber = 0;
- let stmtId = 0;
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+let txnNumber = 0;
+let stmtId = 0;
- const sessionOptions = {causalConsistency: false};
- let session = testDB.getMongo().startSession(sessionOptions);
- let sessionDb = session.getDatabase(dbName);
+const sessionOptions = {
+ causalConsistency: false
+};
+let session = testDB.getMongo().startSession(sessionOptions);
+let sessionDb = session.getDatabase(dbName);
- jsTest.log("commitTransaction command is retryable before failover");
- txnNumber++;
- stmtId = 0;
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "commit-txn-1"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- startTransaction: true,
- autocommit: false
- }));
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
+jsTest.log("commitTransaction command is retryable before failover");
+txnNumber++;
+stmtId = 0;
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "commit-txn-1"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ startTransaction: true,
+ autocommit: false
+}));
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
- // Retry commitTransaction.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
+// Retry commitTransaction.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
- jsTest.log("Step up the secondary");
- const oldPrimary = rst.getPrimary();
- const oldSecondary = rst.getSecondary();
- rst.stepUp(oldSecondary);
- // Wait until the other node becomes primary.
- assert.eq(oldSecondary, rst.getPrimary());
- // Reconnect the connection to the new primary.
- sessionDb.getMongo()._markNodeAsFailed(
- oldPrimary.host, ErrorCodes.NotMaster, "Notice that primary is not master");
- reconnect(sessionDb);
+jsTest.log("Step up the secondary");
+const oldPrimary = rst.getPrimary();
+const oldSecondary = rst.getSecondary();
+rst.stepUp(oldSecondary);
+// Wait until the other node becomes primary.
+assert.eq(oldSecondary, rst.getPrimary());
+// Reconnect the connection to the new primary.
+sessionDb.getMongo()._markNodeAsFailed(
+ oldPrimary.host, ErrorCodes.NotMaster, "Notice that primary is not master");
+reconnect(sessionDb);
- jsTest.log("commitTransaction command is retryable after failover");
- // Retry commitTransaction.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
+jsTest.log("commitTransaction command is retryable after failover");
+// Retry commitTransaction.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
- jsTest.log("Attempt to abort a committed transaction after failover");
- // Cannot abort the committed transaction.
- assert.commandFailedWithCode(sessionDb.adminCommand({
- abortTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.TransactionCommitted);
+jsTest.log("Attempt to abort a committed transaction after failover");
+// Cannot abort the committed transaction.
+assert.commandFailedWithCode(sessionDb.adminCommand({
+ abortTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}),
+ ErrorCodes.TransactionCommitted);
- jsTest.log("Attempt to continue a committed transaction after failover");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "commit-txn-2"}],
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false
- }),
- ErrorCodes.TransactionCommitted);
+jsTest.log("Attempt to continue a committed transaction after failover");
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "commit-txn-2"}],
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false
+}),
+ ErrorCodes.TransactionCommitted);
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/replsets/retryable_commit_transaction_after_restart.js b/jstests/replsets/retryable_commit_transaction_after_restart.js
index 802259661fb..2244525c977 100644
--- a/jstests/replsets/retryable_commit_transaction_after_restart.js
+++ b/jstests/replsets/retryable_commit_transaction_after_restart.js
@@ -1,96 +1,97 @@
// Test committed transaction state is restored after restart.
// @tags: [uses_transactions, requires_persistence]
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- const dbName = "test";
- const collName = "retryable_commit_transaction_after_restart";
+const dbName = "test";
+const collName = "retryable_commit_transaction_after_restart";
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const testDB = rst.getPrimary().getDB(dbName);
- const testColl = testDB[collName];
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const testDB = rst.getPrimary().getDB(dbName);
+const testColl = testDB[collName];
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
- let txnNumber = 0;
- let stmtId = 0;
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+let txnNumber = 0;
+let stmtId = 0;
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
- jsTest.log("commitTransaction command is retryable before restart");
- txnNumber++;
- stmtId = 0;
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "commit-txn-1"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- startTransaction: true,
- autocommit: false
- }));
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
+jsTest.log("commitTransaction command is retryable before restart");
+txnNumber++;
+stmtId = 0;
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "commit-txn-1"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ startTransaction: true,
+ autocommit: false
+}));
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
- // Retry commitTransaction.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
+// Retry commitTransaction.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
- jsTest.log("restart the single node replset");
- rst.restart(0);
- // Wait until the node becomes a primary and reconnect.
- rst.getPrimary();
- reconnect(sessionDb);
+jsTest.log("restart the single node replset");
+rst.restart(0);
+// Wait until the node becomes a primary and reconnect.
+rst.getPrimary();
+reconnect(sessionDb);
- jsTest.log("commitTransaction command is retryable after restart");
- // Retry commitTransaction.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
+jsTest.log("commitTransaction command is retryable after restart");
+// Retry commitTransaction.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
- jsTest.log("Attempt to abort a committed transaction after restart");
- // Cannot abort the committed transaction.
- assert.commandFailedWithCode(sessionDb.adminCommand({
- abortTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.TransactionCommitted);
+jsTest.log("Attempt to abort a committed transaction after restart");
+// Cannot abort the committed transaction.
+assert.commandFailedWithCode(sessionDb.adminCommand({
+ abortTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}),
+ ErrorCodes.TransactionCommitted);
- jsTest.log("Attempt to continue a committed transaction after restart");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "commit-txn-2"}],
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false
- }),
- ErrorCodes.TransactionCommitted);
+jsTest.log("Attempt to continue a committed transaction after restart");
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "commit-txn-2"}],
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false
+}),
+ ErrorCodes.TransactionCommitted);
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/replsets/retryable_prepared_commit_transaction_after_failover.js b/jstests/replsets/retryable_prepared_commit_transaction_after_failover.js
index fbd05eadffb..ba3a15b83aa 100644
--- a/jstests/replsets/retryable_prepared_commit_transaction_after_failover.js
+++ b/jstests/replsets/retryable_prepared_commit_transaction_after_failover.js
@@ -5,98 +5,100 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const dbName = "test";
- const collName = "foo";
+const dbName = "test";
+const collName = "foo";
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
- const config = rst.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while
- // stepping up the old secondary.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
+const config = rst.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while
+// stepping up the old secondary.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- const priConn = rst.getPrimary();
- const secConn = rst.getSecondary();
- assert.commandWorked(priConn.getDB(dbName).runCommand({create: collName}));
+const priConn = rst.getPrimary();
+const secConn = rst.getSecondary();
+assert.commandWorked(priConn.getDB(dbName).runCommand({create: collName}));
- const priSession = priConn.startSession();
- const priSessionDB = priSession.getDatabase(dbName);
- const priSessionColl = priSessionDB.getCollection(collName);
+const priSession = priConn.startSession();
+const priSessionDB = priSession.getDatabase(dbName);
+const priSessionColl = priSessionDB.getCollection(collName);
- jsTestLog("Prepare a transaction");
- priSession.startTransaction();
- assert.commandWorked(priSessionColl.insert({_id: 1}));
- const prepareTimestamp1 = PrepareHelpers.prepareTransaction(priSession);
+jsTestLog("Prepare a transaction");
+priSession.startTransaction();
+assert.commandWorked(priSessionColl.insert({_id: 1}));
+const prepareTimestamp1 = PrepareHelpers.prepareTransaction(priSession);
- jsTestLog("Error committing the transaction");
- // This will error in the "commit unprepared transaction" code path.
- assert.commandFailedWithCode(priSessionDB.adminCommand({commitTransaction: 1}),
- ErrorCodes.InvalidOptions);
+jsTestLog("Error committing the transaction");
+// This will error in the "commit unprepared transaction" code path.
+assert.commandFailedWithCode(priSessionDB.adminCommand({commitTransaction: 1}),
+ ErrorCodes.InvalidOptions);
- // This will error in the "commit prepared transaction" code path.
- const tooEarlyTS1 = Timestamp(prepareTimestamp1.getTime() - 1, 1);
- assert.commandFailedWithCode(
- priSessionDB.adminCommand({commitTransaction: 1, commitTimestamp: tooEarlyTS1}),
- ErrorCodes.InvalidOptions);
+// This will error in the "commit prepared transaction" code path.
+const tooEarlyTS1 = Timestamp(prepareTimestamp1.getTime() - 1, 1);
+assert.commandFailedWithCode(
+ priSessionDB.adminCommand({commitTransaction: 1, commitTimestamp: tooEarlyTS1}),
+ ErrorCodes.InvalidOptions);
- jsTestLog("Step up the secondary");
- rst.stepUp(secConn);
- assert.eq(secConn, rst.getPrimary());
- rst.waitForState(priConn, ReplSetTest.State.SECONDARY);
+jsTestLog("Step up the secondary");
+rst.stepUp(secConn);
+assert.eq(secConn, rst.getPrimary());
+rst.waitForState(priConn, ReplSetTest.State.SECONDARY);
- jsTestLog("commitTransaction command is retryable after failover");
+jsTestLog("commitTransaction command is retryable after failover");
- const secSession = new _DelegatingDriverSession(secConn, priSession);
- const secSessionDB = secSession.getDatabase(dbName);
- const secSessionColl = secSessionDB.getCollection(collName);
- assert.commandWorked(PrepareHelpers.commitTransaction(secSession, prepareTimestamp1));
+const secSession = new _DelegatingDriverSession(secConn, priSession);
+const secSessionDB = secSession.getDatabase(dbName);
+const secSessionColl = secSessionDB.getCollection(collName);
+assert.commandWorked(PrepareHelpers.commitTransaction(secSession, prepareTimestamp1));
- assert.eq(secConn.getDB(dbName)[collName].count(), 1);
- assert.eq(secConn.getDB(dbName)[collName].find().itcount(), 1);
+assert.eq(secConn.getDB(dbName)[collName].count(), 1);
+assert.eq(secConn.getDB(dbName)[collName].find().itcount(), 1);
- rst.awaitReplication();
+rst.awaitReplication();
- assert.eq(priConn.getDB(dbName)[collName].count(), 1);
- assert.eq(priConn.getDB(dbName)[collName].find().itcount(), 1);
+assert.eq(priConn.getDB(dbName)[collName].count(), 1);
+assert.eq(priConn.getDB(dbName)[collName].find().itcount(), 1);
- jsTestLog("Prepare a second transaction");
- secSession.startTransaction();
- assert.commandWorked(secSessionColl.insert({_id: 2}));
- const prepareTimestamp2 = PrepareHelpers.prepareTransaction(secSession);
+jsTestLog("Prepare a second transaction");
+secSession.startTransaction();
+assert.commandWorked(secSessionColl.insert({_id: 2}));
+const prepareTimestamp2 = PrepareHelpers.prepareTransaction(secSession);
- jsTestLog("Error committing the transaction");
- assert.commandFailedWithCode(secSessionDB.adminCommand({commitTransaction: 1}),
- ErrorCodes.InvalidOptions);
- const tooEarlyTS2 = Timestamp(prepareTimestamp2.getTime() - 1, 1);
- assert.commandFailedWithCode(
- secSessionDB.adminCommand({commitTransaction: 1, commitTimestamp: tooEarlyTS2}),
- ErrorCodes.InvalidOptions);
+jsTestLog("Error committing the transaction");
+assert.commandFailedWithCode(secSessionDB.adminCommand({commitTransaction: 1}),
+ ErrorCodes.InvalidOptions);
+const tooEarlyTS2 = Timestamp(prepareTimestamp2.getTime() - 1, 1);
+assert.commandFailedWithCode(
+ secSessionDB.adminCommand({commitTransaction: 1, commitTimestamp: tooEarlyTS2}),
+ ErrorCodes.InvalidOptions);
- jsTestLog("Step up the original primary");
- rst.stepUp(priConn);
- assert.eq(priConn, rst.getPrimary());
- rst.waitForState(secConn, ReplSetTest.State.SECONDARY);
+jsTestLog("Step up the original primary");
+rst.stepUp(priConn);
+assert.eq(priConn, rst.getPrimary());
+rst.waitForState(secConn, ReplSetTest.State.SECONDARY);
- jsTestLog("Step up the original secondary immediately");
- rst.stepUp(secConn);
- assert.eq(secConn, rst.getPrimary());
- rst.waitForState(priConn, ReplSetTest.State.SECONDARY);
+jsTestLog("Step up the original secondary immediately");
+rst.stepUp(secConn);
+assert.eq(secConn, rst.getPrimary());
+rst.waitForState(priConn, ReplSetTest.State.SECONDARY);
- assert.commandWorked(PrepareHelpers.commitTransaction(secSession, prepareTimestamp2));
+assert.commandWorked(PrepareHelpers.commitTransaction(secSession, prepareTimestamp2));
- assert.eq(secConn.getDB(dbName)[collName].count(), 2);
- assert.eq(secConn.getDB(dbName)[collName].find().itcount(), 2);
+assert.eq(secConn.getDB(dbName)[collName].count(), 2);
+assert.eq(secConn.getDB(dbName)[collName].find().itcount(), 2);
- rst.awaitReplication();
+rst.awaitReplication();
- assert.eq(priConn.getDB(dbName)[collName].count(), 2);
- assert.eq(priConn.getDB(dbName)[collName].find().itcount(), 2);
+assert.eq(priConn.getDB(dbName)[collName].count(), 2);
+assert.eq(priConn.getDB(dbName)[collName].find().itcount(), 2);
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/retryable_write_concern.js b/jstests/replsets/retryable_write_concern.js
index 65f5d4ccad7..376d966f193 100644
--- a/jstests/replsets/retryable_write_concern.js
+++ b/jstests/replsets/retryable_write_concern.js
@@ -5,236 +5,235 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/retryable_writes_util.js");
- load("jstests/libs/write_concern_util.js");
- load("jstests/libs/feature_compatibility_version.js");
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- const kNodes = 2;
-
- let replTest = new ReplSetTest({nodes: kNodes});
- replTest.startSet({verbose: 1});
- replTest.initiate();
-
- let priConn = replTest.getPrimary();
- let secConn = replTest.getSecondary();
-
- // Stopping replication on secondaries can take up to 5 seconds normally. Set a small oplog
- // getMore timeout so the test runs faster.
- assert.commandWorked(secConn.adminCommand(
- {configureFailPoint: 'setSmallOplogGetMoreMaxTimeMS', mode: 'alwaysOn'}));
-
- let lsid = UUID();
-
- // Start at an arbitrary txnNumber.
- let txnNumber = 31;
-
- txnNumber++;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- insert: 'user',
- documents: [{_id: 10}, {_id: 30}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes);
-
- txnNumber++;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- update: 'user',
- updates: [
- {q: {_id: 10}, u: {$inc: {x: 1}}},
- ],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes);
-
- txnNumber++;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- delete: 'user',
- deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 1}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes);
-
- txnNumber++;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- findAndModify: 'user',
- query: {_id: 60},
- update: {$inc: {x: 1}},
- new: true,
- upsert: true,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes);
-
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- setFeatureCompatibilityVersion: lastStableFCV,
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes,
- 'admin');
- assert.commandWorked(priConn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(priConn.getDB('admin'), lastStableFCV);
-
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- setFeatureCompatibilityVersion: latestFCV,
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes,
- 'admin');
- assert.commandWorked(priConn.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(priConn.getDB('admin'), latestFCV);
-
- txnNumber++;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- commitTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes,
- 'admin',
- function(conn) {
- assert.commandWorked(conn.getDB('test').runCommand({
- insert: 'user',
- documents: [{_id: 80}, {_id: 90}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- readConcern: {level: 'snapshot'},
- autocommit: false,
- startTransaction: true
- }));
-
- });
-
- txnNumber++;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- prepareTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes,
- 'admin',
- function(conn) {
- assert.commandWorked(conn.getDB('test').runCommand({
- insert: 'user',
- documents: [{_id: 100}, {_id: 110}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- readConcern: {level: 'snapshot'},
- autocommit: false,
- startTransaction: true
- }));
- });
- assert.commandWorked(priConn.adminCommand({
- abortTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: 'majority'},
- }));
-
- txnNumber++;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- abortTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes,
- 'admin',
- function(conn) {
- assert.commandWorked(conn.getDB('test').runCommand({
- insert: 'user',
- documents: [{_id: 120}, {_id: 130}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- readConcern: {level: 'snapshot'},
- autocommit: false,
- startTransaction: true
- }));
- assert.commandWorked(conn.adminCommand({
- prepareTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: 'majority'},
- }));
- });
-
- txnNumber++;
- assert.commandWorked(priConn.getDB('test').runCommand({
- insert: 'user',
- documents: [{_id: 140}, {_id: 150}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- readConcern: {level: 'snapshot'},
- autocommit: false,
- startTransaction: true
- }));
- const prepareTS = assert
- .commandWorked(priConn.adminCommand({
- prepareTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: 'majority'},
- }))
- .prepareTimestamp;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- commitTransaction: 1,
- commitTimestamp: prepareTS,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes,
- 'admin');
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/write_concern_util.js");
+load("jstests/libs/feature_compatibility_version.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+const kNodes = 2;
+
+let replTest = new ReplSetTest({nodes: kNodes});
+replTest.startSet({verbose: 1});
+replTest.initiate();
+
+let priConn = replTest.getPrimary();
+let secConn = replTest.getSecondary();
+
+// Stopping replication on secondaries can take up to 5 seconds normally. Set a small oplog
+// getMore timeout so the test runs faster.
+assert.commandWorked(
+ secConn.adminCommand({configureFailPoint: 'setSmallOplogGetMoreMaxTimeMS', mode: 'alwaysOn'}));
+
+let lsid = UUID();
+
+// Start at an arbitrary txnNumber.
+let txnNumber = 31;
+
+txnNumber++;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ insert: 'user',
+ documents: [{_id: 10}, {_id: 30}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes);
+
+txnNumber++;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ update: 'user',
+ updates: [
+ {q: {_id: 10}, u: {$inc: {x: 1}}},
+ ],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes);
+
+txnNumber++;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ delete: 'user',
+ deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 1}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes);
+
+txnNumber++;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ findAndModify: 'user',
+ query: {_id: 60},
+ update: {$inc: {x: 1}},
+ new: true,
+ upsert: true,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes);
+
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ setFeatureCompatibilityVersion: lastStableFCV,
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes,
+ 'admin');
+assert.commandWorked(priConn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(priConn.getDB('admin'), lastStableFCV);
+
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ setFeatureCompatibilityVersion: latestFCV,
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes,
+ 'admin');
+assert.commandWorked(priConn.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(priConn.getDB('admin'), latestFCV);
+
+txnNumber++;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ commitTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes,
+ 'admin',
+ function(conn) {
+ assert.commandWorked(conn.getDB('test').runCommand({
+ insert: 'user',
+ documents: [{_id: 80}, {_id: 90}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ readConcern: {level: 'snapshot'},
+ autocommit: false,
+ startTransaction: true
+ }));
+ });
+
+txnNumber++;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ prepareTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes,
+ 'admin',
+ function(conn) {
+ assert.commandWorked(conn.getDB('test').runCommand({
+ insert: 'user',
+ documents: [{_id: 100}, {_id: 110}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ readConcern: {level: 'snapshot'},
+ autocommit: false,
+ startTransaction: true
+ }));
+ });
+assert.commandWorked(priConn.adminCommand({
+ abortTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: 'majority'},
+}));
+
+txnNumber++;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ abortTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes,
+ 'admin',
+ function(conn) {
+ assert.commandWorked(conn.getDB('test').runCommand({
+ insert: 'user',
+ documents: [{_id: 120}, {_id: 130}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ readConcern: {level: 'snapshot'},
+ autocommit: false,
+ startTransaction: true
+ }));
+ assert.commandWorked(conn.adminCommand({
+ prepareTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: 'majority'},
+ }));
+ });
+
+txnNumber++;
+assert.commandWorked(priConn.getDB('test').runCommand({
+ insert: 'user',
+ documents: [{_id: 140}, {_id: 150}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ readConcern: {level: 'snapshot'},
+ autocommit: false,
+ startTransaction: true
+}));
+const prepareTS = assert
+ .commandWorked(priConn.adminCommand({
+ prepareTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: 'majority'},
+ }))
+ .prepareTimestamp;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ commitTransaction: 1,
+ commitTimestamp: prepareTS,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes,
+ 'admin');
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js b/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js
index 0a89dcc7390..54e826dcfeb 100644
--- a/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js
+++ b/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js
@@ -1,93 +1,93 @@
// Validates the expected behaviour of direct writes against the `config.transactions` collection
(function() {
- 'use strict';
-
- // Direct writes to config.transactions cannot be part of a session.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/retryable_writes_util.js");
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- var replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- var priConn = replTest.getPrimary();
- var db = priConn.getDB('TestDB');
- var config = priConn.getDB('config');
-
- assert.writeOK(db.user.insert({_id: 0}));
- assert.writeOK(db.user.insert({_id: 1}));
-
- const lsid1 = UUID();
- const lsid2 = UUID();
-
- const cmdObj1 = {
- update: 'user',
- updates: [{q: {_id: 0}, u: {$inc: {x: 1}}}],
- lsid: {id: lsid1},
- txnNumber: NumberLong(1)
- };
- assert.commandWorked(db.runCommand(cmdObj1));
- assert.eq(1, db.user.find({_id: 0}).toArray()[0].x);
-
- const cmdObj2 = {
- update: 'user',
- updates: [{q: {_id: 1}, u: {$inc: {x: 1}}}],
- lsid: {id: lsid2},
- txnNumber: NumberLong(1)
- };
- assert.commandWorked(db.runCommand(cmdObj2));
- assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
-
- assert.eq(1, config.transactions.find({'_id.id': lsid1}).itcount());
- assert.eq(1, config.transactions.find({'_id.id': lsid2}).itcount());
-
- // Invalidating lsid1 doesn't impact lsid2, but allows same statement to be executed again
- assert.writeOK(config.transactions.remove({'_id.id': lsid1}));
- assert.commandWorked(db.runCommand(cmdObj1));
- assert.eq(2, db.user.find({_id: 0}).toArray()[0].x);
- assert.commandWorked(db.runCommand(cmdObj2));
- assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
-
- // Ensure lsid1 is properly tracked after the recreate
- assert.commandWorked(db.runCommand(cmdObj1));
- assert.eq(2, db.user.find({_id: 0}).toArray()[0].x);
-
- // Ensure garbage data cannot be written to the `config.transactions` collection
- assert.writeError(config.transactions.insert({_id: 'String'}));
- assert.writeError(config.transactions.insert({_id: {UnknownField: 'Garbage'}}));
-
- // Ensure inserting an invalid session record manually without all the required fields causes
- // the session to not work anymore for retryable writes for that session, but not for any other
- const lsidManual = config.transactions.find({'_id.id': lsid1}).toArray()[0]._id;
- assert.writeOK(config.transactions.remove({'_id.id': lsid1}));
- assert.writeOK(config.transactions.insert({_id: lsidManual}));
-
- const lsid3 = UUID();
- assert.commandWorked(db.runCommand({
- update: 'user',
- updates: [{q: {_id: 2}, u: {$inc: {x: 1}}, upsert: true}],
- lsid: {id: lsid3},
- txnNumber: NumberLong(1)
- }));
- assert.eq(1, db.user.find({_id: 2}).toArray()[0].x);
-
- // Ensure dropping the `config.transactions` collection breaks the retryable writes feature, but
- // doesn't crash the server
- assert(config.transactions.drop());
- var res = assert.commandWorkedIgnoringWriteErrors(db.runCommand(cmdObj2));
- assert.eq(0, res.nModified);
- assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
-
- assert(config.dropDatabase());
- res = assert.commandWorkedIgnoringWriteErrors(db.runCommand(cmdObj2));
- assert.eq(0, res.nModified);
- assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
-
- replTest.stopSet();
+'use strict';
+
+// Direct writes to config.transactions cannot be part of a session.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/retryable_writes_util.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+var replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+var priConn = replTest.getPrimary();
+var db = priConn.getDB('TestDB');
+var config = priConn.getDB('config');
+
+assert.writeOK(db.user.insert({_id: 0}));
+assert.writeOK(db.user.insert({_id: 1}));
+
+const lsid1 = UUID();
+const lsid2 = UUID();
+
+const cmdObj1 = {
+ update: 'user',
+ updates: [{q: {_id: 0}, u: {$inc: {x: 1}}}],
+ lsid: {id: lsid1},
+ txnNumber: NumberLong(1)
+};
+assert.commandWorked(db.runCommand(cmdObj1));
+assert.eq(1, db.user.find({_id: 0}).toArray()[0].x);
+
+const cmdObj2 = {
+ update: 'user',
+ updates: [{q: {_id: 1}, u: {$inc: {x: 1}}}],
+ lsid: {id: lsid2},
+ txnNumber: NumberLong(1)
+};
+assert.commandWorked(db.runCommand(cmdObj2));
+assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
+
+assert.eq(1, config.transactions.find({'_id.id': lsid1}).itcount());
+assert.eq(1, config.transactions.find({'_id.id': lsid2}).itcount());
+
+// Invalidating lsid1 doesn't impact lsid2, but allows same statement to be executed again
+assert.writeOK(config.transactions.remove({'_id.id': lsid1}));
+assert.commandWorked(db.runCommand(cmdObj1));
+assert.eq(2, db.user.find({_id: 0}).toArray()[0].x);
+assert.commandWorked(db.runCommand(cmdObj2));
+assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
+
+// Ensure lsid1 is properly tracked after the recreate
+assert.commandWorked(db.runCommand(cmdObj1));
+assert.eq(2, db.user.find({_id: 0}).toArray()[0].x);
+
+// Ensure garbage data cannot be written to the `config.transactions` collection
+assert.writeError(config.transactions.insert({_id: 'String'}));
+assert.writeError(config.transactions.insert({_id: {UnknownField: 'Garbage'}}));
+
+// Ensure inserting an invalid session record manually without all the required fields causes
+// the session to not work anymore for retryable writes for that session, but not for any other
+const lsidManual = config.transactions.find({'_id.id': lsid1}).toArray()[0]._id;
+assert.writeOK(config.transactions.remove({'_id.id': lsid1}));
+assert.writeOK(config.transactions.insert({_id: lsidManual}));
+
+const lsid3 = UUID();
+assert.commandWorked(db.runCommand({
+ update: 'user',
+ updates: [{q: {_id: 2}, u: {$inc: {x: 1}}, upsert: true}],
+ lsid: {id: lsid3},
+ txnNumber: NumberLong(1)
+}));
+assert.eq(1, db.user.find({_id: 2}).toArray()[0].x);
+
+// Ensure dropping the `config.transactions` collection breaks the retryable writes feature, but
+// doesn't crash the server
+assert(config.transactions.drop());
+var res = assert.commandWorkedIgnoringWriteErrors(db.runCommand(cmdObj2));
+assert.eq(0, res.nModified);
+assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
+
+assert(config.dropDatabase());
+res = assert.commandWorkedIgnoringWriteErrors(db.runCommand(cmdObj2));
+assert.eq(0, res.nModified);
+assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/retryable_writes_failover.js b/jstests/replsets/retryable_writes_failover.js
index 30eb069906d..2073e2fbded 100644
--- a/jstests/replsets/retryable_writes_failover.js
+++ b/jstests/replsets/retryable_writes_failover.js
@@ -3,162 +3,161 @@
* failover.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- function stepDownPrimary(replTest) {
- assert.commandWorked(
- replTest.getPrimary().adminCommand({replSetStepDown: 10, force: true}));
- }
+function stepDownPrimary(replTest) {
+ assert.commandWorked(replTest.getPrimary().adminCommand({replSetStepDown: 10, force: true}));
+}
- const replTest = new ReplSetTest({nodes: 3});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 3});
+replTest.startSet();
+replTest.initiate();
- ////////////////////////////////////////////////////////////////////////
- // Test insert command
+////////////////////////////////////////////////////////////////////////
+// Test insert command
- let insertCmd = {
- insert: "foo",
- documents: [{_id: 10}, {_id: 30}],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(5)
- };
+let insertCmd = {
+ insert: "foo",
+ documents: [{_id: 10}, {_id: 30}],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(5)
+};
- // Run the command on the primary and wait for replication.
- let primary = replTest.getPrimary();
- let testDB = primary.getDB("test");
+// Run the command on the primary and wait for replication.
+let primary = replTest.getPrimary();
+let testDB = primary.getDB("test");
- let result = assert.commandWorked(testDB.runCommand(insertCmd));
- assert.eq(2, testDB.foo.find().itcount());
+let result = assert.commandWorked(testDB.runCommand(insertCmd));
+assert.eq(2, testDB.foo.find().itcount());
- replTest.awaitReplication();
+replTest.awaitReplication();
- // Step down the primary and wait for a new one.
- stepDownPrimary(replTest);
+// Step down the primary and wait for a new one.
+stepDownPrimary(replTest);
- let newPrimary = replTest.getPrimary();
- testDB = newPrimary.getDB("test");
+let newPrimary = replTest.getPrimary();
+testDB = newPrimary.getDB("test");
- let oplog = newPrimary.getDB("local").oplog.rs;
- let insertOplogEntries = oplog.find({ns: "test.foo", op: "i"}).itcount();
+let oplog = newPrimary.getDB("local").oplog.rs;
+let insertOplogEntries = oplog.find({ns: "test.foo", op: "i"}).itcount();
- // Retry the command on the secondary and verify it wasn't repeated.
- let retryResult = assert.commandWorked(testDB.runCommand(insertCmd));
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+// Retry the command on the secondary and verify it wasn't repeated.
+let retryResult = assert.commandWorked(testDB.runCommand(insertCmd));
+assert.eq(result.ok, retryResult.ok);
+assert.eq(result.n, retryResult.n);
+assert.eq(result.writeErrors, retryResult.writeErrors);
+assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
- assert.eq(2, testDB.foo.find().itcount());
+assert.eq(2, testDB.foo.find().itcount());
- assert.eq(insertOplogEntries, oplog.find({ns: "test.foo", op: "i"}).itcount());
+assert.eq(insertOplogEntries, oplog.find({ns: "test.foo", op: "i"}).itcount());
- ////////////////////////////////////////////////////////////////////////
- // Test update command
+////////////////////////////////////////////////////////////////////////
+// Test update command
- let updateCmd = {
- update: "foo",
- updates: [
- {q: {_id: 10}, u: {$inc: {x: 1}}}, // in place
- {q: {_id: 20}, u: {$inc: {y: 1}}, upsert: true},
- {q: {_id: 30}, u: {z: 1}} // replacement
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(10),
- };
+let updateCmd = {
+ update: "foo",
+ updates: [
+ {q: {_id: 10}, u: {$inc: {x: 1}}}, // in place
+ {q: {_id: 20}, u: {$inc: {y: 1}}, upsert: true},
+ {q: {_id: 30}, u: {z: 1}} // replacement
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(10),
+};
- primary = replTest.getPrimary();
- testDB = primary.getDB("test");
+primary = replTest.getPrimary();
+testDB = primary.getDB("test");
- // Run the command on the primary and wait for replication.
- result = assert.commandWorked(testDB.runCommand(updateCmd));
- assert.eq(3, testDB.foo.find().itcount());
+// Run the command on the primary and wait for replication.
+result = assert.commandWorked(testDB.runCommand(updateCmd));
+assert.eq(3, testDB.foo.find().itcount());
- replTest.awaitReplication();
+replTest.awaitReplication();
- // Step down the primary and wait for a new one.
- stepDownPrimary(replTest);
+// Step down the primary and wait for a new one.
+stepDownPrimary(replTest);
- newPrimary = replTest.getPrimary();
- testDB = newPrimary.getDB("test");
+newPrimary = replTest.getPrimary();
+testDB = newPrimary.getDB("test");
- oplog = newPrimary.getDB("local").oplog.rs;
- let updateOplogEntries = oplog.find({ns: "test.foo", op: "u"}).itcount();
+oplog = newPrimary.getDB("local").oplog.rs;
+let updateOplogEntries = oplog.find({ns: "test.foo", op: "u"}).itcount();
- // Upserts are stored as inserts if they match no existing documents.
- insertOplogEntries = oplog.find({ns: "test.foo", op: "i"}).itcount();
+// Upserts are stored as inserts if they match no existing documents.
+insertOplogEntries = oplog.find({ns: "test.foo", op: "i"}).itcount();
- // Retry the command on the secondary and verify it wasn't repeated.
- retryResult = assert.commandWorked(testDB.runCommand(updateCmd));
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.nModified, retryResult.nModified);
- assert.eq(result.upserted, retryResult.upserted);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+// Retry the command on the secondary and verify it wasn't repeated.
+retryResult = assert.commandWorked(testDB.runCommand(updateCmd));
+assert.eq(result.ok, retryResult.ok);
+assert.eq(result.n, retryResult.n);
+assert.eq(result.nModified, retryResult.nModified);
+assert.eq(result.upserted, retryResult.upserted);
+assert.eq(result.writeErrors, retryResult.writeErrors);
+assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
- assert.eq(3, testDB.foo.find().itcount());
+assert.eq(3, testDB.foo.find().itcount());
- assert.eq({_id: 10, x: 1}, testDB.foo.findOne({_id: 10}));
- assert.eq({_id: 20, y: 1}, testDB.foo.findOne({_id: 20}));
- assert.eq({_id: 30, z: 1}, testDB.foo.findOne({_id: 30}));
+assert.eq({_id: 10, x: 1}, testDB.foo.findOne({_id: 10}));
+assert.eq({_id: 20, y: 1}, testDB.foo.findOne({_id: 20}));
+assert.eq({_id: 30, z: 1}, testDB.foo.findOne({_id: 30}));
- assert.eq(updateOplogEntries, oplog.find({ns: "test.foo", op: "u"}).itcount());
- assert.eq(insertOplogEntries, oplog.find({ns: "test.foo", op: "i"}).itcount());
+assert.eq(updateOplogEntries, oplog.find({ns: "test.foo", op: "u"}).itcount());
+assert.eq(insertOplogEntries, oplog.find({ns: "test.foo", op: "i"}).itcount());
- ////////////////////////////////////////////////////////////////////////
- // Test delete command
+////////////////////////////////////////////////////////////////////////
+// Test delete command
- let deleteCmd = {
- delete: "foo",
- deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 1}],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(15),
- };
+let deleteCmd = {
+ delete: "foo",
+ deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 1}],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(15),
+};
- primary = replTest.getPrimary();
- testDB = primary.getDB("test");
+primary = replTest.getPrimary();
+testDB = primary.getDB("test");
- assert.writeOK(testDB.foo.insert({_id: 40, x: 1}));
- assert.writeOK(testDB.foo.insert({_id: 50, y: 1}));
+assert.writeOK(testDB.foo.insert({_id: 40, x: 1}));
+assert.writeOK(testDB.foo.insert({_id: 50, y: 1}));
- // Run the command on the primary and wait for replication.
- result = assert.commandWorked(testDB.runCommand(deleteCmd));
- assert.eq(1, testDB.foo.find({x: 1}).itcount());
- assert.eq(1, testDB.foo.find({y: 1}).itcount());
+// Run the command on the primary and wait for replication.
+result = assert.commandWorked(testDB.runCommand(deleteCmd));
+assert.eq(1, testDB.foo.find({x: 1}).itcount());
+assert.eq(1, testDB.foo.find({y: 1}).itcount());
- replTest.awaitReplication();
+replTest.awaitReplication();
- // Step down the primary and wait for a new one.
- stepDownPrimary(replTest);
+// Step down the primary and wait for a new one.
+stepDownPrimary(replTest);
- newPrimary = replTest.getPrimary();
- testDB = newPrimary.getDB("test");
+newPrimary = replTest.getPrimary();
+testDB = newPrimary.getDB("test");
- oplog = newPrimary.getDB("local").oplog.rs;
- let deleteOplogEntries = oplog.find({ns: "test.foo", op: "d"}).itcount();
+oplog = newPrimary.getDB("local").oplog.rs;
+let deleteOplogEntries = oplog.find({ns: "test.foo", op: "d"}).itcount();
- // Retry the command on the secondary and verify it wasn't repeated.
- retryResult = assert.commandWorked(testDB.runCommand(deleteCmd));
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+// Retry the command on the secondary and verify it wasn't repeated.
+retryResult = assert.commandWorked(testDB.runCommand(deleteCmd));
+assert.eq(result.ok, retryResult.ok);
+assert.eq(result.n, retryResult.n);
+assert.eq(result.writeErrors, retryResult.writeErrors);
+assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
- assert.eq(1, testDB.foo.find({x: 1}).itcount());
- assert.eq(1, testDB.foo.find({y: 1}).itcount());
+assert.eq(1, testDB.foo.find({x: 1}).itcount());
+assert.eq(1, testDB.foo.find({y: 1}).itcount());
- assert.eq(deleteOplogEntries, oplog.find({ns: "test.foo", op: "d"}).itcount());
+assert.eq(deleteOplogEntries, oplog.find({ns: "test.foo", op: "d"}).itcount());
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/rollback_aborted_prepared_transaction.js b/jstests/replsets/rollback_aborted_prepared_transaction.js
index 8a486323421..b8bdc857992 100644
--- a/jstests/replsets/rollback_aborted_prepared_transaction.js
+++ b/jstests/replsets/rollback_aborted_prepared_transaction.js
@@ -8,101 +8,101 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
-
- const dbName = "test";
- const collName = "rollback_aborted_prepared_transaction";
-
- const rollbackTest = new RollbackTest(dbName);
- let primary = rollbackTest.getPrimary();
-
- // Create collection we're using beforehand.
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
-
- testDB.runCommand({drop: collName});
- assert.commandWorked(testDB.runCommand({create: collName}));
- assert.commandWorked(testColl.insert({_id: 0}));
-
- // Start two sessions on the primary.
- let session = primary.startSession();
- const sessionID = session.getSessionId();
- let sessionDB = session.getDatabase(dbName);
- let sessionColl = sessionDB.getCollection(collName);
-
- let session2 = primary.startSession();
- let sessionColl2 = session2.getDatabase(dbName).getCollection(collName);
-
- // The following transaction will be rolled back.
- rollbackTest.transitionToRollbackOperations();
-
- // Prepare the transaction on the session.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 1}));
- PrepareHelpers.prepareTransaction(session, {w: 1});
-
- assert.eq(testColl.find().itcount(), 1);
- // This characterizes the current fastcount behavior, which is that active prepared transactions
- // contribute to the fastcount.
- assert.eq(testColl.count(), 2);
-
- // Abort the transaction explicitly.
- assert.commandWorked(session.abortTransaction_forTesting());
-
- assert.eq(testColl.find().itcount(), 1);
- assert.eq(testColl.count(), 1);
-
- // Test that it is impossible to commit a prepared transaction whose prepare oplog entry has not
- // yet majority committed. This also aborts the transaction.
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({_id: 2}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session2, {w: 1});
- let res = assert.commandFailedWithCode(
- PrepareHelpers.commitTransaction(session2, prepareTimestamp), ErrorCodes.InvalidOptions);
- assert(res.errmsg.includes(
- "cannot be run before its prepare oplog entry has been majority committed"),
- res);
- assert.eq(testColl.find().itcount(), 1);
- assert.eq(testColl.count(), 1);
-
- // Check that we have two transactions in the transactions table.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
-
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
-
- // Make sure there are no transactions in the transactions table. This is because both the abort
- // and prepare operations are rolled back, and the entry in the transactions table is only made
- // durable when a transaction is prepared.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 0);
-
- // Make sure the first collection only has one document since the prepared insert was rolled
- // back.
- assert.eq(sessionColl.find().itcount(), 1);
- assert.eq(sessionColl.count(), 1);
-
- // Get the new primary after the topology changes.
- primary = rollbackTest.getPrimary();
- testDB = primary.getDB(dbName);
- testColl = testDB.getCollection(collName);
-
- // Make sure we can successfully run a prepared transaction on the same session after going
- // through rollback.
- session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
- sessionDB = session.getDatabase(dbName);
- sessionColl = sessionDB.getCollection(collName);
-
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 1}));
- prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
-
- assert.eq(testColl.find().itcount(), 2);
- assert.eq(testColl.count(), 2);
-
- rollbackTest.stop();
+"use strict";
+
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
+
+const dbName = "test";
+const collName = "rollback_aborted_prepared_transaction";
+
+const rollbackTest = new RollbackTest(dbName);
+let primary = rollbackTest.getPrimary();
+
+// Create collection we're using beforehand.
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
+
+testDB.runCommand({drop: collName});
+assert.commandWorked(testDB.runCommand({create: collName}));
+assert.commandWorked(testColl.insert({_id: 0}));
+
+// Start two sessions on the primary.
+let session = primary.startSession();
+const sessionID = session.getSessionId();
+let sessionDB = session.getDatabase(dbName);
+let sessionColl = sessionDB.getCollection(collName);
+
+let session2 = primary.startSession();
+let sessionColl2 = session2.getDatabase(dbName).getCollection(collName);
+
+// The following transaction will be rolled back.
+rollbackTest.transitionToRollbackOperations();
+
+// Prepare the transaction on the session.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 1}));
+PrepareHelpers.prepareTransaction(session, {w: 1});
+
+assert.eq(testColl.find().itcount(), 1);
+// This characterizes the current fastcount behavior, which is that active prepared transactions
+// contribute to the fastcount.
+assert.eq(testColl.count(), 2);
+
+// Abort the transaction explicitly.
+assert.commandWorked(session.abortTransaction_forTesting());
+
+assert.eq(testColl.find().itcount(), 1);
+assert.eq(testColl.count(), 1);
+
+// Test that it is impossible to commit a prepared transaction whose prepare oplog entry has not
+// yet majority committed. This also aborts the transaction.
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({_id: 2}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session2, {w: 1});
+let res = assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session2, prepareTimestamp),
+ ErrorCodes.InvalidOptions);
+assert(
+ res.errmsg.includes("cannot be run before its prepare oplog entry has been majority committed"),
+ res);
+assert.eq(testColl.find().itcount(), 1);
+assert.eq(testColl.count(), 1);
+
+// Check that we have two transactions in the transactions table.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
+
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+
+// Make sure there are no transactions in the transactions table. This is because both the abort
+// and prepare operations are rolled back, and the entry in the transactions table is only made
+// durable when a transaction is prepared.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 0);
+
+// Make sure the first collection only has one document since the prepared insert was rolled
+// back.
+assert.eq(sessionColl.find().itcount(), 1);
+assert.eq(sessionColl.count(), 1);
+
+// Get the new primary after the topology changes.
+primary = rollbackTest.getPrimary();
+testDB = primary.getDB(dbName);
+testColl = testDB.getCollection(collName);
+
+// Make sure we can successfully run a prepared transaction on the same session after going
+// through rollback.
+session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
+sessionDB = session.getDatabase(dbName);
+sessionColl = sessionDB.getCollection(collName);
+
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 1}));
+prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+PrepareHelpers.commitTransaction(session, prepareTimestamp);
+
+assert.eq(testColl.find().itcount(), 2);
+assert.eq(testColl.count(), 2);
+
+rollbackTest.stop();
}());
diff --git a/jstests/replsets/rollback_after_disabling_majority_reads.js b/jstests/replsets/rollback_after_disabling_majority_reads.js
index 159bbdffb88..e8b2eeeebba 100644
--- a/jstests/replsets/rollback_after_disabling_majority_reads.js
+++ b/jstests/replsets/rollback_after_disabling_majority_reads.js
@@ -3,41 +3,43 @@
* @tags: [requires_persistence]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_test.js");
- TestData.rollbackShutdowns = true;
- const name = "rollback_after_disabling_majority_reads";
- const dbName = "test";
- const collName = "coll";
+TestData.rollbackShutdowns = true;
+const name = "rollback_after_disabling_majority_reads";
+const dbName = "test";
+const collName = "coll";
- jsTest.log("Set up a Rollback Test with enableMajorityReadConcern=true");
- const replTest = new ReplSetTest(
- {name, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "true"}});
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- config.settings = {chainingAllowed: false};
- replTest.initiate(config);
- const rollbackTest = new RollbackTest(name, replTest);
+jsTest.log("Set up a Rollback Test with enableMajorityReadConcern=true");
+const replTest = new ReplSetTest(
+ {name, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "true"}});
+replTest.startSet();
+let config = replTest.getReplSetConfig();
+config.members[2].priority = 0;
+config.settings = {
+ chainingAllowed: false
+};
+replTest.initiate(config);
+const rollbackTest = new RollbackTest(name, replTest);
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- assert.commandWorked(rollbackNode.getDB(dbName).runCommand(
- {insert: collName, documents: [{_id: "rollback op"}]}));
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+assert.commandWorked(
+ rollbackNode.getDB(dbName).runCommand({insert: collName, documents: [{_id: "rollback op"}]}));
- jsTest.log("Restart the rollback node with enableMajorityReadConcern=false");
- rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "false"});
+jsTest.log("Restart the rollback node with enableMajorityReadConcern=false");
+rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "false"});
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[collName].insert(
- {_id: "steady state op"}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[collName].insert(
+ {_id: "steady state op"}, {writeConcern: {w: "majority"}}));
- assert.eq(0, rollbackNode.getDB(dbName)[collName].find({_id: "rollback op"}).itcount());
- assert.eq(1, rollbackNode.getDB(dbName)[collName].find({_id: "steady state op"}).itcount());
+assert.eq(0, rollbackNode.getDB(dbName)[collName].find({_id: "rollback op"}).itcount());
+assert.eq(1, rollbackNode.getDB(dbName)[collName].find({_id: "steady state op"}).itcount());
- rollbackTest.stop();
+rollbackTest.stop();
}()); \ No newline at end of file
diff --git a/jstests/replsets/rollback_after_enabling_majority_reads.js b/jstests/replsets/rollback_after_enabling_majority_reads.js
index 112c932044c..b477e3dc6c6 100644
--- a/jstests/replsets/rollback_after_enabling_majority_reads.js
+++ b/jstests/replsets/rollback_after_enabling_majority_reads.js
@@ -8,73 +8,75 @@
* @tags: [requires_persistence]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_test.js");
- TestData.rollbackShutdowns = true;
- const name = "rollback_after_enabling_majority_reads";
- const dbName = "test";
- const collName = "coll";
+TestData.rollbackShutdowns = true;
+const name = "rollback_after_enabling_majority_reads";
+const dbName = "test";
+const collName = "coll";
- jsTest.log("Set up a Rollback Test with enableMajorityReadConcern=false");
- const replTest = new ReplSetTest(
- {name, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "false"}});
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- config.settings = {chainingAllowed: false};
- replTest.initiate(config);
- const rollbackTest = new RollbackTest(name, replTest);
+jsTest.log("Set up a Rollback Test with enableMajorityReadConcern=false");
+const replTest = new ReplSetTest(
+ {name, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "false"}});
+replTest.startSet();
+let config = replTest.getReplSetConfig();
+config.members[2].priority = 0;
+config.settings = {
+ chainingAllowed: false
+};
+replTest.initiate(config);
+const rollbackTest = new RollbackTest(name, replTest);
- jsTest.log("Ensure the stable timestamp is ahead of the common point on the rollback node.");
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- const operationTime = assert
- .commandWorked(rollbackNode.getDB(dbName).runCommand(
- {insert: collName, documents: [{_id: "rollback op"}]}))
- .operationTime;
+jsTest.log("Ensure the stable timestamp is ahead of the common point on the rollback node.");
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+const operationTime = assert
+ .commandWorked(rollbackNode.getDB(dbName).runCommand(
+ {insert: collName, documents: [{_id: "rollback op"}]}))
+ .operationTime;
- // Do a clean shutdown to ensure the recovery timestamp is at operationTime.
- jsTest.log("Restart the rollback node with enableMajorityReadConcern=true");
- rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "true"});
- const replSetGetStatusResponse =
- assert.commandWorked(rollbackNode.adminCommand({replSetGetStatus: 1}));
- assert.eq(replSetGetStatusResponse.lastStableCheckpointTimestamp,
- operationTime,
- tojson(replSetGetStatusResponse));
+// Do a clean shutdown to ensure the recovery timestamp is at operationTime.
+jsTest.log("Restart the rollback node with enableMajorityReadConcern=true");
+rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "true"});
+const replSetGetStatusResponse =
+ assert.commandWorked(rollbackNode.adminCommand({replSetGetStatus: 1}));
+assert.eq(replSetGetStatusResponse.lastStableCheckpointTimestamp,
+ operationTime,
+ tojson(replSetGetStatusResponse));
- // The rollback crashes because the common point is before the stable timestamp.
- jsTest.log("Attempt to roll back. This will fassert.");
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- assert.soon(() => {
- return rawMongoProgramOutput().indexOf("Fatal Assertion 51121") !== -1;
- });
+// The rollback crashes because the common point is before the stable timestamp.
+jsTest.log("Attempt to roll back. This will fassert.");
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+assert.soon(() => {
+ return rawMongoProgramOutput().indexOf("Fatal Assertion 51121") !== -1;
+});
- jsTest.log(
- "Restart the rollback node with enableMajorityReadConcern=false. Now the rollback can succeed.");
- const allowedExitCode = 14;
- rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "false"}, allowedExitCode);
+jsTest.log(
+ "Restart the rollback node with enableMajorityReadConcern=false. Now the rollback can succeed.");
+const allowedExitCode = 14;
+rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "false"}, allowedExitCode);
- // Fix counts for "local.startup_log", since they are corrupted by this rollback.
- // transitionToSteadyStateOperations() checks collection counts.
- assert.commandWorked(rollbackNode.getDB("local").runCommand({validate: "startup_log"}));
- rollbackTest.transitionToSteadyStateOperations();
+// Fix counts for "local.startup_log", since they are corrupted by this rollback.
+// transitionToSteadyStateOperations() checks collection counts.
+assert.commandWorked(rollbackNode.getDB("local").runCommand({validate: "startup_log"}));
+rollbackTest.transitionToSteadyStateOperations();
- assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[collName].insert(
- {_id: "steady state op"}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[collName].insert(
+ {_id: "steady state op"}, {writeConcern: {w: "majority"}}));
- assert.eq(0, rollbackNode.getDB(dbName)[collName].find({_id: "rollback op"}).itcount());
- assert.eq(1, rollbackNode.getDB(dbName)[collName].find({_id: "steady state op"}).itcount());
+assert.eq(0, rollbackNode.getDB(dbName)[collName].find({_id: "rollback op"}).itcount());
+assert.eq(1, rollbackNode.getDB(dbName)[collName].find({_id: "steady state op"}).itcount());
- jsTest.log("Restart the rollback node with enableMajorityReadConcern=true.");
- rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "true"});
+jsTest.log("Restart the rollback node with enableMajorityReadConcern=true.");
+rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "true"});
- jsTest.log("Rollback should succeed since the common point is at least the stable timestamp.");
- rollbackTest.transitionToRollbackOperations();
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+jsTest.log("Rollback should succeed since the common point is at least the stable timestamp.");
+rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- rollbackTest.stop();
+rollbackTest.stop();
}()); \ No newline at end of file
diff --git a/jstests/replsets/rollback_all_op_types.js b/jstests/replsets/rollback_all_op_types.js
index 7af1a3c654d..8ffc53f2faf 100644
--- a/jstests/replsets/rollback_all_op_types.js
+++ b/jstests/replsets/rollback_all_op_types.js
@@ -10,374 +10,372 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/rollback_test_deluxe.js");
+load("jstests/replsets/libs/rollback_test_deluxe.js");
- let noOp = () => {};
+let noOp = () => {};
- /**
- * All operation types that are able to be rolled back.
- *
- * Each operation type maps to an array of test objects that contains an 'init' function, an
- * 'op' function, and an optional 'description' field. Some operations depend on the current
- * state of the database, so the 'init' function provides a way to set up the database before an
- * operation is executed. All init functions are executed at the very beginning of the test, as
- * part of CommonOps. Also, to provide isolation between commands, each is given its own
- * database to execute in.
- *
- * Each operation has an array of test objects to allow testing of multiple variations of an
- * operation. Each test case in an array will be executed in isolation.
- *
- * Note: The 'dropDatabase' command is excluded and tested separately. It cannot be tested
- * directly using the RollbackTest fixture, since the command is always up-converted to use
- * majority write concern in 3.6.
- *
- */
- let rollbackOps = {
- "insert": [{
+/**
+ * All operation types that are able to be rolled back.
+ *
+ * Each operation type maps to an array of test objects that contains an 'init' function, an
+ * 'op' function, and an optional 'description' field. Some operations depend on the current
+ * state of the database, so the 'init' function provides a way to set up the database before an
+ * operation is executed. All init functions are executed at the very beginning of the test, as
+ * part of CommonOps. Also, to provide isolation between commands, each is given its own
+ * database to execute in.
+ *
+ * Each operation has an array of test objects to allow testing of multiple variations of an
+ * operation. Each test case in an array will be executed in isolation.
+ *
+ * Note: The 'dropDatabase' command is excluded and tested separately. It cannot be tested
+ * directly using the RollbackTest fixture, since the command is always up-converted to use
+ * majority write concern in 3.6.
+ *
+ */
+let rollbackOps = {
+ "insert": [{
+ init: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
+ },
+ op: (db, collName) => {
+ assert.writeOK(db[collName].insert({_id: 0}));
+ }
+ }],
+ "update": [{
+ init: (db, collName) => {
+ assert.writeOK(db[collName].insert({_id: 0, val: 0}));
+ },
+ op: (db, collName) => {
+ assert.writeOK(db[collName].update({_id: 0}, {val: 1}));
+ },
+ }],
+ "delete": [{
+ init: (db, collName) => {
+ assert.writeOK(db[collName].insert({_id: 0}));
+ },
+ op: (db, collName) => {
+ assert.writeOK(db[collName].remove({_id: 0}));
+ },
+ }],
+ "create": [{
+ init: noOp,
+ op: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
+ },
+ }],
+ "drop": [{
+ init: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
+ },
+ op: (db, collName) => {
+ assert.commandWorked(db.runCommand({drop: collName}));
+ },
+ }],
+ "createIndexes": [{
+ init: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
+ },
+ op: (db, collName) => {
+ assert.commandWorked(db.runCommand({
+ createIndexes: collName,
+ indexes: [{name: collName + "_index", key: {index_key: 1}}]
+ }));
+ }
+ }],
+ "dropIndexes": [
+ {
+ description: "singleIndex",
init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
+ assert.commandWorked(db.runCommand({
+ createIndexes: collName,
+ indexes: [{name: collName + "_index", key: {index_key: 1}}]
+ }));
+ },
+ op: (db, collName) => {
+ assert.commandWorked(
+ db.runCommand({dropIndexes: collName, index: collName + "_index"}));
+ }
+ },
+ {
+ description: "allIndexes",
+ init: (db, collName) => {
+ assert.commandWorked(db.runCommand({
+ createIndexes: collName,
+ indexes: [
+ {name: collName + "_index_0", key: {index_key_0: 1}},
+ {name: collName + "_index_1", key: {index_key_1: 1}},
+ {name: collName + "_index_2", key: {index_key_2: 1}}
+ ]
+ }));
},
op: (db, collName) => {
- assert.writeOK(db[collName].insert({_id: 0}));
+ assert.commandWorked(db.runCommand({dropIndexes: collName, index: "*"}));
}
- }],
- "update": [{
+ }
+ ],
+ "renameCollection": [
+ {
+ description: "withinSameDatabase",
init: (db, collName) => {
- assert.writeOK(db[collName].insert({_id: 0, val: 0}));
+ assert.commandWorked(db.createCollection(collName + "_source"));
},
op: (db, collName) => {
- assert.writeOK(db[collName].update({_id: 0}, {val: 1}));
+ let nss = db[collName].getFullName();
+ assert.commandWorked(
+ db.adminCommand({renameCollection: nss + "_source", to: nss + "_dest"}));
},
- }],
- "delete": [{
+ },
+ {
+ description: "acrossDatabases",
init: (db, collName) => {
- assert.writeOK(db[collName].insert({_id: 0}));
+ assert.commandWorked(db.createCollection(collName));
},
op: (db, collName) => {
- assert.writeOK(db[collName].remove({_id: 0}));
+ let sourceNss = db[collName].getFullName();
+ let destNss = db.getName() + "_dest." + collName;
+ assert.commandWorked(db.adminCommand({renameCollection: sourceNss, to: destNss}));
},
- }],
- "create": [{
- init: noOp,
- op: (db, collName) => {
+ },
+ {
+ description: "acrossDatabasesDropTarget",
+ init: (db, collName) => {
+ let dbName = db.getName();
+ let destDb = db.getSiblingDB(dbName + "_dest");
assert.commandWorked(db.createCollection(collName));
+ assert.commandWorked(destDb.createCollection(collName));
},
- }],
- "drop": [{
+ op: (db, collName) => {
+ let sourceNss = db[collName].getFullName();
+ let destNss = db.getName() + "_dest." + collName;
+ assert.commandWorked(
+ db.adminCommand({renameCollection: sourceNss, to: destNss, dropTarget: true}));
+ },
+ },
+ {
+ description: "dropTarget",
init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
+ assert.commandWorked(db.createCollection(collName + "_source"));
+ assert.commandWorked(db.createCollection(collName + "_dest"));
},
op: (db, collName) => {
- assert.commandWorked(db.runCommand({drop: collName}));
+ let nss = db[collName].getFullName();
+ assert.commandWorked(db.adminCommand(
+ {renameCollection: nss + "_source", to: nss + "_dest", dropTarget: true}));
},
- }],
- "createIndexes": [{
+ }
+
+ ],
+ "collMod": [
+ {
+ description: "allCollectionOptions",
init: (db, collName) => {
assert.commandWorked(db.createCollection(collName));
},
op: (db, collName) => {
assert.commandWorked(db.runCommand({
- createIndexes: collName,
- indexes: [{name: collName + "_index", key: {index_key: 1}}]
+ collMod: collName,
+ validator: {a: 1},
+ validationLevel: "moderate",
+ validationAction: "warn"
}));
}
- }],
- "dropIndexes": [
- {
- description: "singleIndex",
- init: (db, collName) => {
- assert.commandWorked(db.runCommand({
- createIndexes: collName,
- indexes: [{name: collName + "_index", key: {index_key: 1}}]
- }));
- },
- op: (db, collName) => {
- assert.commandWorked(
- db.runCommand({dropIndexes: collName, index: collName + "_index"}));
- }
- },
- {
- description: "allIndexes",
- init: (db, collName) => {
- assert.commandWorked(db.runCommand({
- createIndexes: collName,
- indexes: [
- {name: collName + "_index_0", key: {index_key_0: 1}},
- {name: collName + "_index_1", key: {index_key_1: 1}},
- {name: collName + "_index_2", key: {index_key_2: 1}}
- ]
- }));
- },
- op: (db, collName) => {
- assert.commandWorked(db.runCommand({dropIndexes: collName, index: "*"}));
- }
- }
- ],
- "renameCollection": [
- {
- description: "withinSameDatabase",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName + "_source"));
- },
- op: (db, collName) => {
- let nss = db[collName].getFullName();
- assert.commandWorked(
- db.adminCommand({renameCollection: nss + "_source", to: nss + "_dest"}));
- },
- },
- {
- description: "acrossDatabases",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
- },
- op: (db, collName) => {
- let sourceNss = db[collName].getFullName();
- let destNss = db.getName() + "_dest." + collName;
- assert.commandWorked(db.adminCommand({renameCollection: sourceNss, to: destNss}));
- },
- },
- {
- description: "acrossDatabasesDropTarget",
- init: (db, collName) => {
- let dbName = db.getName();
- let destDb = db.getSiblingDB(dbName + "_dest");
- assert.commandWorked(db.createCollection(collName));
- assert.commandWorked(destDb.createCollection(collName));
- },
- op: (db, collName) => {
- let sourceNss = db[collName].getFullName();
- let destNss = db.getName() + "_dest." + collName;
- assert.commandWorked(db.adminCommand(
- {renameCollection: sourceNss, to: destNss, dropTarget: true}));
- },
+ },
+ {
+ description: "validationOptionsWithoutValidator",
+ init: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
},
- {
- description: "dropTarget",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName + "_source"));
- assert.commandWorked(db.createCollection(collName + "_dest"));
- },
- op: (db, collName) => {
- let nss = db[collName].getFullName();
- assert.commandWorked(db.adminCommand(
- {renameCollection: nss + "_source", to: nss + "_dest", dropTarget: true}));
- },
+ op: (db, collName) => {
+ assert.commandWorked(db.runCommand(
+ {collMod: collName, validationLevel: "moderate", validationAction: "warn"}));
}
-
- ],
- "collMod": [
- {
- description: "allCollectionOptions",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
- },
- op: (db, collName) => {
- assert.commandWorked(db.runCommand({
- collMod: collName,
- validator: {a: 1},
- validationLevel: "moderate",
- validationAction: "warn"
- }));
- }
- },
- {
- description: "validationOptionsWithoutValidator",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
- },
- op: (db, collName) => {
- assert.commandWorked(db.runCommand(
- {collMod: collName, validationLevel: "moderate", validationAction: "warn"}));
- }
+ },
+ {
+ description: "existingValidationOptions",
+ init: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
+ assert.commandWorked(db.runCommand(
+ {collMod: collName, validationLevel: "moderate", validationAction: "warn"}));
},
- {
- description: "existingValidationOptions",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
- assert.commandWorked(db.runCommand(
- {collMod: collName, validationLevel: "moderate", validationAction: "warn"}));
- },
- op: (db, collName) => {
- assert.commandWorked(db.runCommand({
- collMod: collName,
- validator: {a: 1},
- validationLevel: "moderate",
- validationAction: "warn"
- }));
- }
+ op: (db, collName) => {
+ assert.commandWorked(db.runCommand({
+ collMod: collName,
+ validator: {a: 1},
+ validationLevel: "moderate",
+ validationAction: "warn"
+ }));
}
- ],
- "convertToCapped": [{
+ }
+ ],
+ "convertToCapped": [{
+ init: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
+ },
+ op: (db, collName) => {
+ assert.commandWorked(db.runCommand({convertToCapped: collName, size: 1024}));
+ },
+ }],
+ "applyOps": [
+ {
+ description: "multipleCRUDOps",
init: (db, collName) => {
assert.commandWorked(db.createCollection(collName));
},
+ // In 3.6 only document CRUD operations are grouped into a single applyOps oplog
+ // entry.
op: (db, collName) => {
- assert.commandWorked(db.runCommand({convertToCapped: collName, size: 1024}));
- },
- }],
- "applyOps": [
- {
- description: "multipleCRUDOps",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
- },
- // In 3.6 only document CRUD operations are grouped into a single applyOps oplog
- // entry.
- op: (db, collName) => {
- let collInfo = db.getCollectionInfos({name: collName})[0];
- let uuid = collInfo.info.uuid;
- let coll = db.getCollection(collName);
- let opsToApply = [
- {op: "i", ns: coll.getFullName(), ui: uuid, o: {_id: 0}},
- {
+ let collInfo = db.getCollectionInfos({name: collName})[0];
+ let uuid = collInfo.info.uuid;
+ let coll = db.getCollection(collName);
+ let opsToApply = [
+ {op: "i", ns: coll.getFullName(), ui: uuid, o: {_id: 0}},
+ {
op: "u",
ns: coll.getFullName(),
ui: uuid,
o: {_id: 0, val: 1},
o2: {_id: 0},
- },
- {op: "d", ns: coll.getFullName(), ui: uuid, o: {_id: 0}}
- ];
- assert.commandWorked(db.adminCommand({applyOps: opsToApply}));
- }
+ },
+ {op: "d", ns: coll.getFullName(), ui: uuid, o: {_id: 0}}
+ ];
+ assert.commandWorked(db.adminCommand({applyOps: opsToApply}));
+ }
+ },
+ {
+ description: "opWithoutUUID",
+ init: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
},
- {
- description: "opWithoutUUID",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
- },
- // In 3.6 only document CRUD operations are grouped into a single applyOps oplog
- // entry.
- op: (db, collName) => {
- let coll = db.getCollection(collName);
- let opsToApply = [
- {op: "i", ns: coll.getFullName(), o: {_id: 0}},
- ];
- assert.commandWorked(db.adminCommand({applyOps: opsToApply}));
- }
+ // In 3.6 only document CRUD operations are grouped into a single applyOps oplog
+ // entry.
+ op: (db, collName) => {
+ let coll = db.getCollection(collName);
+ let opsToApply = [
+ {op: "i", ns: coll.getFullName(), o: {_id: 0}},
+ ];
+ assert.commandWorked(db.adminCommand({applyOps: opsToApply}));
}
- ]
- };
+ }
+ ]
+};
- let testCollName = "test";
- let opNames = Object.keys(rollbackOps);
+let testCollName = "test";
+let opNames = Object.keys(rollbackOps);
- /**
- * Create the test name string given an operation name and the test case index. The test
- * name for the nth test case of an operation called "opName", with description "description",
- * will be "opName_<n>_description".
- */
- function opTestNameStr(opName, description, ind) {
- let opVariantName = opName + "_" + ind;
- if (description) {
- opVariantName = opVariantName + "_" + description;
- }
- return opVariantName;
+/**
+ * Create the test name string given an operation name and the test case index. The test
+ * name for the nth test case of an operation called "opName", with description "description",
+ * will be "opName_<n>_description".
+ */
+function opTestNameStr(opName, description, ind) {
+ let opVariantName = opName + "_" + ind;
+ if (description) {
+ opVariantName = opVariantName + "_" + description;
}
+ return opVariantName;
+}
- /**
- * Operations that will be present on both nodes, before the common point.
- */
- let CommonOps = (node) => {
- // Ensure there is at least one common op between nodes.
- node.getDB("commonOp")["test"].insert({_id: "common_op"});
+/**
+ * Operations that will be present on both nodes, before the common point.
+ */
+let CommonOps = (node) => {
+ // Ensure there is at least one common op between nodes.
+ node.getDB("commonOp")["test"].insert({_id: "common_op"});
- // Run init functions for each op type. Each is given its own database to run in and a
- // standard collection name to use.
- jsTestLog("Performing init operations for every operation type.");
- opNames.forEach(opName => {
- let opObj = rollbackOps[opName];
- opObj.forEach((opVariantObj, ind) => {
- let opVariantName = opTestNameStr(opName, opVariantObj.description, ind);
- opVariantObj.init(node.getDB(opVariantName), testCollName);
- });
+ // Run init functions for each op type. Each is given its own database to run in and a
+ // standard collection name to use.
+ jsTestLog("Performing init operations for every operation type.");
+ opNames.forEach(opName => {
+ let opObj = rollbackOps[opName];
+ opObj.forEach((opVariantObj, ind) => {
+ let opVariantName = opTestNameStr(opName, opVariantObj.description, ind);
+ opVariantObj.init(node.getDB(opVariantName), testCollName);
});
- };
-
- /**
- * Operations that will be performed on the rollback node past the common point.
- */
- let RollbackOps = (node) => {
+ });
+};
- // Returns a new object with any metadata fields from the given command object removed.
- function basicCommandObj(fullCommandObj) {
- let basicCommandObj = {};
- for (let field in fullCommandObj) {
- if (fullCommandObj.hasOwnProperty(field) && !field.startsWith("$")) {
- basicCommandObj[field] = fullCommandObj[field];
- }
+/**
+ * Operations that will be performed on the rollback node past the common point.
+ */
+let RollbackOps = (node) => {
+ // Returns a new object with any metadata fields from the given command object removed.
+ function basicCommandObj(fullCommandObj) {
+ let basicCommandObj = {};
+ for (let field in fullCommandObj) {
+ if (fullCommandObj.hasOwnProperty(field) && !field.startsWith("$")) {
+ basicCommandObj[field] = fullCommandObj[field];
}
- return basicCommandObj;
}
+ return basicCommandObj;
+ }
- // Execute the operation given by 'opFn'. 'opName' is the string identifier of the
- // operation to be executed.
- function executeOp(opName, opFn) {
- // Override 'runCommand' so we can capture the raw command object for each operation
- // and log it, to improve diagnostics.
- const runCommandOriginal = Mongo.prototype.runCommand;
- Mongo.prototype.runCommand = function(dbName, commandObj, options) {
- jsTestLog("Executing command for '" + opName + "' test: \n" +
- tojson(basicCommandObj(commandObj)));
- return runCommandOriginal.apply(this, arguments);
- };
+ // Execute the operation given by 'opFn'. 'opName' is the string identifier of the
+ // operation to be executed.
+ function executeOp(opName, opFn) {
+ // Override 'runCommand' so we can capture the raw command object for each operation
+ // and log it, to improve diagnostics.
+ const runCommandOriginal = Mongo.prototype.runCommand;
+ Mongo.prototype.runCommand = function(dbName, commandObj, options) {
+ jsTestLog("Executing command for '" + opName + "' test: \n" +
+ tojson(basicCommandObj(commandObj)));
+ return runCommandOriginal.apply(this, arguments);
+ };
- opFn(node.getDB(opName), testCollName);
+ opFn(node.getDB(opName), testCollName);
- // Reset runCommand to its normal behavior.
- Mongo.prototype.runCommand = runCommandOriginal;
- }
+ // Reset runCommand to its normal behavior.
+ Mongo.prototype.runCommand = runCommandOriginal;
+ }
- jsTestLog("Performing rollback operations for every operation type.");
- opNames.forEach(opName => {
- let opObj = rollbackOps[opName];
- // Execute all test cases for this operation type.
- jsTestLog("Performing '" + opName + "' operations.");
- opObj.forEach((opVariantObj, ind) => {
- let opVariantName = opTestNameStr(opName, opVariantObj.description, ind);
- executeOp(opVariantName, opVariantObj.op);
- });
+ jsTestLog("Performing rollback operations for every operation type.");
+ opNames.forEach(opName => {
+ let opObj = rollbackOps[opName];
+ // Execute all test cases for this operation type.
+ jsTestLog("Performing '" + opName + "' operations.");
+ opObj.forEach((opVariantObj, ind) => {
+ let opVariantName = opTestNameStr(opName, opVariantObj.description, ind);
+ executeOp(opVariantName, opVariantObj.op);
});
+ });
+};
- };
-
- // Set up Rollback Test.
- let rollbackTest = new RollbackTestDeluxe();
- CommonOps(rollbackTest.getPrimary());
+// Set up Rollback Test.
+let rollbackTest = new RollbackTestDeluxe();
+CommonOps(rollbackTest.getPrimary());
- // Perform the operations that will be rolled back.
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
+// Perform the operations that will be rolled back.
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
- // Complete cycle one of rollback. Data consistency is checked automatically after entering
- // steady state.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+// Complete cycle one of rollback. Data consistency is checked automatically after entering
+// steady state.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- // Again, perform operations that will be rolled back. This time, each node in the replica set
- // has assumed a different role and will roll back operations that were applied in a different
- // state (e.g. as a SECONDARY as opposed to a PRIMARY).
- rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
+// Again, perform operations that will be rolled back. This time, each node in the replica set
+// has assumed a different role and will roll back operations that were applied in a different
+// state (e.g. as a SECONDARY as opposed to a PRIMARY).
+rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
- // Complete cycle two of rollback.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+// Complete cycle two of rollback.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- // Perform operations that will be rolled back one more time.
- rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
+// Perform operations that will be rolled back one more time.
+rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
- // Complete cycle three of rollback. After this cycle is completed, the replica set returns to
- // its original topology.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+// Complete cycle three of rollback. After this cycle is completed, the replica set returns to
+// its original topology.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- // Check the replica set.
- rollbackTest.stop();
+// Check the replica set.
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_auth.js b/jstests/replsets/rollback_auth.js
index d7703ea3824..e85ce9b8082 100644
--- a/jstests/replsets/rollback_auth.js
+++ b/jstests/replsets/rollback_auth.js
@@ -11,211 +11,210 @@
// @tags: [requires_persistence]
(function() {
- "use strict";
-
- // Arbiters don't replicate the admin.system.keys collection, so they can never validate or sign
- // clusterTime. Gossiping a clusterTime to an arbiter as a user other than __system will fail,
- // so we skip gossiping for this test.
- //
- // TODO SERVER-32639: remove this flag.
- TestData.skipGossipingClusterTime = true;
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- // helper function for verifying contents at the end of the test
- var checkFinalResults = function(db) {
- assert.commandWorked(db.runCommand({dbStats: 1}));
- assert.commandFailedWithCode(db.runCommand({collStats: 'foo'}), authzErrorCode);
- assert.commandFailedWithCode(db.runCommand({collStats: 'bar'}), authzErrorCode);
- assert.commandWorked(db.runCommand({collStats: 'baz'}));
- assert.commandWorked(db.runCommand({collStats: 'foobar'}));
- };
-
- var authzErrorCode = 13;
-
- jsTestLog("Setting up replica set");
-
- var name = "rollbackAuth";
- var replTest = new ReplSetTest({name: name, nodes: 3, keyFile: 'jstests/libs/key1'});
- var nodes = replTest.nodeList();
- var conns = replTest.startSet();
- replTest.initiate({
- "_id": "rollbackAuth",
- "members": [
- {"_id": 0, "host": nodes[0], "priority": 3},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- });
+"use strict";
- // Make sure we have a master
- replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
- var master = replTest.getPrimary();
- var a_conn = conns[0];
- var b_conn = conns[1];
- a_conn.setSlaveOk();
- b_conn.setSlaveOk();
- var A = a_conn.getDB("admin");
- var B = b_conn.getDB("admin");
- var a = a_conn.getDB("test");
- var b = b_conn.getDB("test");
- assert.eq(master, conns[0], "conns[0] assumed to be master");
- assert.eq(a_conn, master);
-
- // Make sure we have an arbiter
- assert.soon(function() {
- var res = conns[2].getDB("admin").runCommand({replSetGetStatus: 1});
- return res.myState == 7;
- }, "Arbiter failed to initialize.");
-
- jsTestLog("Creating initial data");
-
- // Create collections that will be used in test
- A.createUser({user: 'admin', pwd: 'pwd', roles: ['root']});
- A.auth('admin', 'pwd');
- a.foo.insert({a: 1});
- a.bar.insert({a: 1});
- a.baz.insert({a: 1});
- a.foobar.insert({a: 1});
-
- // Set up user admin user
- A.createUser({user: 'userAdmin', pwd: 'pwd', roles: ['userAdminAnyDatabase']});
- A.auth('userAdmin', 'pwd'); // Logs out of admin@admin user
- B.auth('userAdmin', 'pwd');
-
- // Create a basic user and role
- A.createRole({
- role: 'replStatusRole', // To make awaitReplication() work
- roles: [],
- privileges: [
- {resource: {cluster: true}, actions: ['replSetGetStatus']},
- {resource: {db: 'local', collection: ''}, actions: ['find']},
- {resource: {db: 'local', collection: 'system.replset'}, actions: ['find']}
- ]
- });
- a.createRole({
- role: 'myRole',
- roles: [],
- privileges: [{resource: {db: 'test', collection: ''}, actions: ['dbStats']}]
- });
- a.createUser(
- {user: 'spencer', pwd: 'pwd', roles: ['myRole', {role: 'replStatusRole', db: 'admin'}]});
- assert(a.auth('spencer', 'pwd'));
-
- // wait for secondary to get this data
- assert.soon(function() {
- return b.auth('spencer', 'pwd');
+// Arbiters don't replicate the admin.system.keys collection, so they can never validate or sign
+// clusterTime. Gossiping a clusterTime to an arbiter as a user other than __system will fail,
+// so we skip gossiping for this test.
+//
+// TODO SERVER-32639: remove this flag.
+TestData.skipGossipingClusterTime = true;
+
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
+
+// helper function for verifying contents at the end of the test
+var checkFinalResults = function(db) {
+ assert.commandWorked(db.runCommand({dbStats: 1}));
+ assert.commandFailedWithCode(db.runCommand({collStats: 'foo'}), authzErrorCode);
+ assert.commandFailedWithCode(db.runCommand({collStats: 'bar'}), authzErrorCode);
+ assert.commandWorked(db.runCommand({collStats: 'baz'}));
+ assert.commandWorked(db.runCommand({collStats: 'foobar'}));
+};
+
+var authzErrorCode = 13;
+
+jsTestLog("Setting up replica set");
+
+var name = "rollbackAuth";
+var replTest = new ReplSetTest({name: name, nodes: 3, keyFile: 'jstests/libs/key1'});
+var nodes = replTest.nodeList();
+var conns = replTest.startSet();
+replTest.initiate({
+ "_id": "rollbackAuth",
+ "members": [
+ {"_id": 0, "host": nodes[0], "priority": 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
+
+// Make sure we have a master
+replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
+var master = replTest.getPrimary();
+var a_conn = conns[0];
+var b_conn = conns[1];
+a_conn.setSlaveOk();
+b_conn.setSlaveOk();
+var A = a_conn.getDB("admin");
+var B = b_conn.getDB("admin");
+var a = a_conn.getDB("test");
+var b = b_conn.getDB("test");
+assert.eq(master, conns[0], "conns[0] assumed to be master");
+assert.eq(a_conn, master);
+
+// Make sure we have an arbiter
+assert.soon(function() {
+ var res = conns[2].getDB("admin").runCommand({replSetGetStatus: 1});
+ return res.myState == 7;
+}, "Arbiter failed to initialize.");
+
+jsTestLog("Creating initial data");
+
+// Create collections that will be used in test
+A.createUser({user: 'admin', pwd: 'pwd', roles: ['root']});
+A.auth('admin', 'pwd');
+a.foo.insert({a: 1});
+a.bar.insert({a: 1});
+a.baz.insert({a: 1});
+a.foobar.insert({a: 1});
+
+// Set up user admin user
+A.createUser({user: 'userAdmin', pwd: 'pwd', roles: ['userAdminAnyDatabase']});
+A.auth('userAdmin', 'pwd'); // Logs out of admin@admin user
+B.auth('userAdmin', 'pwd');
+
+// Create a basic user and role
+A.createRole({
+ role: 'replStatusRole', // To make awaitReplication() work
+ roles: [],
+ privileges: [
+ {resource: {cluster: true}, actions: ['replSetGetStatus']},
+ {resource: {db: 'local', collection: ''}, actions: ['find']},
+ {resource: {db: 'local', collection: 'system.replset'}, actions: ['find']}
+ ]
+});
+a.createRole({
+ role: 'myRole',
+ roles: [],
+ privileges: [{resource: {db: 'test', collection: ''}, actions: ['dbStats']}]
+});
+a.createUser(
+ {user: 'spencer', pwd: 'pwd', roles: ['myRole', {role: 'replStatusRole', db: 'admin'}]});
+assert(a.auth('spencer', 'pwd'));
+
+// wait for secondary to get this data
+assert.soon(function() {
+ return b.auth('spencer', 'pwd');
+});
+
+assert.commandWorked(a.runCommand({dbStats: 1}));
+assert.commandFailedWithCode(a.runCommand({collStats: 'foo'}), authzErrorCode);
+assert.commandFailedWithCode(a.runCommand({collStats: 'bar'}), authzErrorCode);
+assert.commandFailedWithCode(a.runCommand({collStats: 'baz'}), authzErrorCode);
+assert.commandFailedWithCode(a.runCommand({collStats: 'foobar'}), authzErrorCode);
+
+assert.commandWorked(b.runCommand({dbStats: 1}));
+assert.commandFailedWithCode(b.runCommand({collStats: 'foo'}), authzErrorCode);
+assert.commandFailedWithCode(b.runCommand({collStats: 'bar'}), authzErrorCode);
+assert.commandFailedWithCode(b.runCommand({collStats: 'baz'}), authzErrorCode);
+assert.commandFailedWithCode(b.runCommand({collStats: 'foobar'}), authzErrorCode);
+
+jsTestLog("Doing writes that will eventually be rolled back");
+
+// down A and wait for B to become master
+replTest.stop(0);
+assert.soon(function() {
+ try {
+ return B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+}, "B didn't become master");
+printjson(b.adminCommand('replSetGetStatus'));
+
+// Modify the the user and role in a way that will be rolled back.
+b.grantPrivilegesToRole('myRole',
+ [{resource: {db: 'test', collection: 'foo'}, actions: ['collStats']}],
+ {}); // Default write concern will wait for majority, which will time out.
+b.createRole({
+ role: 'temporaryRole',
+ roles: [],
+ privileges: [{resource: {db: 'test', collection: 'bar'}, actions: ['collStats']}]
+},
+ {}); // Default write concern will wait for majority, which will time out.
+b.grantRolesToUser('spencer',
+ ['temporaryRole'],
+ {}); // Default write concern will wait for majority, which will time out.
+
+assert.commandWorked(b.runCommand({dbStats: 1}));
+assert.commandWorked(b.runCommand({collStats: 'foo'}));
+assert.commandWorked(b.runCommand({collStats: 'bar'}));
+assert.commandFailedWithCode(b.runCommand({collStats: 'baz'}), authzErrorCode);
+assert.commandFailedWithCode(b.runCommand({collStats: 'foobar'}), authzErrorCode);
+
+// down B, bring A back up, then wait for A to become master
+// insert new data into A so that B will need to rollback when it reconnects to A
+replTest.stop(1);
+
+replTest.restart(0);
+assert.soon(function() {
+ try {
+ return A.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+}, "A didn't become master");
+
+// A should not have the new data as it was down
+assert.commandWorked(a.runCommand({dbStats: 1}));
+assert.commandFailedWithCode(a.runCommand({collStats: 'foo'}), authzErrorCode);
+assert.commandFailedWithCode(a.runCommand({collStats: 'bar'}), authzErrorCode);
+assert.commandFailedWithCode(a.runCommand({collStats: 'baz'}), authzErrorCode);
+assert.commandFailedWithCode(a.runCommand({collStats: 'foobar'}), authzErrorCode);
+
+jsTestLog("Doing writes that should persist after the rollback");
+// Modify the user and role in a way that will persist.
+A.auth('userAdmin', 'pwd');
+// Default write concern will wait for majority, which would time out
+// so we override it with an empty write concern
+a.grantPrivilegesToRole(
+ 'myRole', [{resource: {db: 'test', collection: 'baz'}, actions: ['collStats']}], {});
+
+a.createRole({
+ role: 'persistentRole',
+ roles: [],
+ privileges: [{resource: {db: 'test', collection: 'foobar'}, actions: ['collStats']}]
+},
+ {});
+a.grantRolesToUser('spencer', ['persistentRole'], {});
+A.logout();
+a.auth('spencer', 'pwd');
+
+// A has the data we just wrote, but not what B wrote before
+checkFinalResults(a);
+
+jsTestLog("Triggering rollback");
+
+// bring B back in contact with A
+// as A is primary, B will roll back and then catch up
+replTest.restart(1);
+assert.soonNoExcept(function() {
+ authutil.asCluster(replTest.nodes, 'jstests/libs/key1', function() {
+ replTest.awaitReplication();
});
- assert.commandWorked(a.runCommand({dbStats: 1}));
- assert.commandFailedWithCode(a.runCommand({collStats: 'foo'}), authzErrorCode);
- assert.commandFailedWithCode(a.runCommand({collStats: 'bar'}), authzErrorCode);
- assert.commandFailedWithCode(a.runCommand({collStats: 'baz'}), authzErrorCode);
- assert.commandFailedWithCode(a.runCommand({collStats: 'foobar'}), authzErrorCode);
-
- assert.commandWorked(b.runCommand({dbStats: 1}));
- assert.commandFailedWithCode(b.runCommand({collStats: 'foo'}), authzErrorCode);
- assert.commandFailedWithCode(b.runCommand({collStats: 'bar'}), authzErrorCode);
- assert.commandFailedWithCode(b.runCommand({collStats: 'baz'}), authzErrorCode);
- assert.commandFailedWithCode(b.runCommand({collStats: 'foobar'}), authzErrorCode);
-
- jsTestLog("Doing writes that will eventually be rolled back");
-
- // down A and wait for B to become master
- replTest.stop(0);
- assert.soon(function() {
- try {
- return B.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- }, "B didn't become master");
- printjson(b.adminCommand('replSetGetStatus'));
-
- // Modify the the user and role in a way that will be rolled back.
- b.grantPrivilegesToRole(
- 'myRole',
- [{resource: {db: 'test', collection: 'foo'}, actions: ['collStats']}],
- {}); // Default write concern will wait for majority, which will time out.
- b.createRole({
- role: 'temporaryRole',
- roles: [],
- privileges: [{resource: {db: 'test', collection: 'bar'}, actions: ['collStats']}]
- },
- {}); // Default write concern will wait for majority, which will time out.
- b.grantRolesToUser('spencer',
- ['temporaryRole'],
- {}); // Default write concern will wait for majority, which will time out.
-
- assert.commandWorked(b.runCommand({dbStats: 1}));
- assert.commandWorked(b.runCommand({collStats: 'foo'}));
- assert.commandWorked(b.runCommand({collStats: 'bar'}));
- assert.commandFailedWithCode(b.runCommand({collStats: 'baz'}), authzErrorCode);
- assert.commandFailedWithCode(b.runCommand({collStats: 'foobar'}), authzErrorCode);
-
- // down B, bring A back up, then wait for A to become master
- // insert new data into A so that B will need to rollback when it reconnects to A
- replTest.stop(1);
-
- replTest.restart(0);
- assert.soon(function() {
- try {
- return A.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- }, "A didn't become master");
-
- // A should not have the new data as it was down
- assert.commandWorked(a.runCommand({dbStats: 1}));
- assert.commandFailedWithCode(a.runCommand({collStats: 'foo'}), authzErrorCode);
- assert.commandFailedWithCode(a.runCommand({collStats: 'bar'}), authzErrorCode);
- assert.commandFailedWithCode(a.runCommand({collStats: 'baz'}), authzErrorCode);
- assert.commandFailedWithCode(a.runCommand({collStats: 'foobar'}), authzErrorCode);
-
- jsTestLog("Doing writes that should persist after the rollback");
- // Modify the user and role in a way that will persist.
- A.auth('userAdmin', 'pwd');
- // Default write concern will wait for majority, which would time out
- // so we override it with an empty write concern
- a.grantPrivilegesToRole(
- 'myRole', [{resource: {db: 'test', collection: 'baz'}, actions: ['collStats']}], {});
-
- a.createRole({
- role: 'persistentRole',
- roles: [],
- privileges: [{resource: {db: 'test', collection: 'foobar'}, actions: ['collStats']}]
- },
- {});
- a.grantRolesToUser('spencer', ['persistentRole'], {});
- A.logout();
- a.auth('spencer', 'pwd');
-
- // A has the data we just wrote, but not what B wrote before
- checkFinalResults(a);
-
- jsTestLog("Triggering rollback");
-
- // bring B back in contact with A
- // as A is primary, B will roll back and then catch up
- replTest.restart(1);
- assert.soonNoExcept(function() {
- authutil.asCluster(replTest.nodes, 'jstests/libs/key1', function() {
- replTest.awaitReplication();
- });
-
- return b.auth('spencer', 'pwd');
- });
- // Now both A and B should agree
- checkFinalResults(a);
- checkFinalResults(b);
+ return b.auth('spencer', 'pwd');
+});
+// Now both A and B should agree
+checkFinalResults(a);
+checkFinalResults(b);
- // Verify data consistency between nodes.
- authutil.asCluster(replTest.nodes, 'jstests/libs/key1', function() {
- replTest.checkOplogs();
- });
+// Verify data consistency between nodes.
+authutil.asCluster(replTest.nodes, 'jstests/libs/key1', function() {
+ replTest.checkOplogs();
+});
- // DB hash check is done in stopSet.
- replTest.stopSet();
+// DB hash check is done in stopSet.
+replTest.stopSet();
}());
diff --git a/jstests/replsets/rollback_capped_deletions.js b/jstests/replsets/rollback_capped_deletions.js
index 213efd8b1fd..86928d2601f 100644
--- a/jstests/replsets/rollback_capped_deletions.js
+++ b/jstests/replsets/rollback_capped_deletions.js
@@ -2,47 +2,47 @@
* Tests that capped collections get the correct fastcounts after rollback.
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/replsets/libs/rollback_test.js');
+load('jstests/replsets/libs/rollback_test.js');
- const testName = 'rollback_capped_deletions';
- const dbName = testName;
- const collName = 'cappedCollName';
+const testName = 'rollback_capped_deletions';
+const dbName = testName;
+const collName = 'cappedCollName';
- const rollbackTest = new RollbackTest(testName);
- const primary = rollbackTest.getPrimary();
- const testDb = primary.getDB(dbName);
+const rollbackTest = new RollbackTest(testName);
+const primary = rollbackTest.getPrimary();
+const testDb = primary.getDB(dbName);
- assert.commandWorked(testDb.runCommand({
- 'create': collName,
- 'capped': true,
- 'size': 40,
- }));
- const coll = testDb.getCollection(collName);
- assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(testDb.runCommand({
+ 'create': collName,
+ 'capped': true,
+ 'size': 40,
+}));
+const coll = testDb.getCollection(collName);
+assert.commandWorked(coll.insert({a: 1}));
- rollbackTest.awaitLastOpCommitted();
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
+rollbackTest.awaitLastOpCommitted();
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
- assert.commandWorked(coll.insert({bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb: 1}));
- assert.commandWorked(coll.insert({cccccccccccccccccccccccccccccccccccccccccccc: 1}));
- assert.commandWorked(coll.insert({dddddddddddddddddddddddddddddddddddddddddddd: 1}));
- assert.commandWorked(coll.insert({eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee: 1}));
+assert.commandWorked(coll.insert({bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb: 1}));
+assert.commandWorked(coll.insert({cccccccccccccccccccccccccccccccccccccccccccc: 1}));
+assert.commandWorked(coll.insert({dddddddddddddddddddddddddddddddddddddddddddd: 1}));
+assert.commandWorked(coll.insert({eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee: 1}));
- rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToRollbackOperations();
- assert.commandWorked(coll.insert({ffffffffffffffffffffffffffffffffffffffffffff: 1}));
+assert.commandWorked(coll.insert({ffffffffffffffffffffffffffffffffffffffffffff: 1}));
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- try {
- rollbackTest.transitionToSteadyStateOperations();
- } finally {
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
- }
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+try {
+ rollbackTest.transitionToSteadyStateOperations();
+} finally {
+ assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
+}
- rollbackTest.stop();
+rollbackTest.stop();
})(); \ No newline at end of file
diff --git a/jstests/replsets/rollback_collmods.js b/jstests/replsets/rollback_collmods.js
index 6f910ac7a3a..6a741ec6174 100644
--- a/jstests/replsets/rollback_collmods.js
+++ b/jstests/replsets/rollback_collmods.js
@@ -4,105 +4,105 @@
*/
(function() {
- "use strict";
-
- load("jstests/replsets/libs/rollback_test_deluxe.js");
-
- const testName = "rollback_collmods";
- const dbName = testName;
-
- var coll1Name = "NoInitialValidationAtAll";
- var coll2Name = "NoInitialValidationAction";
- var coll3Name = "NoInitialValidator";
- var coll4Name = "NoInitialValidationLevel";
-
- function printCollectionOptionsForNode(node, time) {
- let opts = assert.commandWorked(node.getDB(dbName).runCommand({"listCollections": 1}));
- jsTestLog("Collection options " + time + " on " + node.host + ": " + tojson(opts));
- }
-
- function printCollectionOptions(rollbackTest, time) {
- printCollectionOptionsForNode(rollbackTest.getPrimary(), time);
- rollbackTest.getSecondaries().forEach(node => printCollectionOptionsForNode(node, time));
- }
-
- // Operations that will be present on both nodes, before the common point.
- let CommonOps = (node) => {
- let testDb = node.getDB(dbName);
- assert.writeOK(testDb[coll1Name].insert({a: 1, b: 1}));
- assert.writeOK(testDb[coll2Name].insert({a: 2, b: 2}));
- assert.writeOK(testDb[coll3Name].insert({a: 3, b: 3}));
- assert.writeOK(testDb[coll4Name].insert({a: 4, b: 4}));
-
- // Start with no validation action.
- assert.commandWorked(testDb.runCommand({
- collMod: coll2Name,
- validator: {a: 1},
- validationLevel: "moderate",
- }));
-
- // Start with no validator.
- assert.commandWorked(testDb.runCommand(
- {collMod: coll3Name, validationLevel: "moderate", validationAction: "warn"}));
-
- // Start with no validation level.
- assert.commandWorked(
- testDb.runCommand({collMod: coll4Name, validator: {a: 1}, validationAction: "warn"}));
- };
-
- // Operations that will be performed on the rollback node past the common point.
- let RollbackOps = (node) => {
- let testDb = node.getDB(dbName);
-
- // Set everything on the rollback node.
- assert.commandWorked(testDb.runCommand({
- collMod: coll1Name,
- validator: {a: 1},
- validationLevel: "moderate",
- validationAction: "warn"
- }));
-
- // Only modify the action, and never modify it again so it needs to be reset to empty.
- assert.commandWorked(testDb.runCommand({collMod: coll2Name, validationAction: "error"}));
-
- // Only modify the validator, and never modify it again so it needs to be reset to empty.
- assert.commandWorked(testDb.runCommand({collMod: coll3Name, validator: {b: 1}}));
-
- // Only modify the level, and never modify it again so it needs to be reset to empty.
- assert.commandWorked(testDb.runCommand({
- collMod: coll4Name,
- validationLevel: "moderate",
- }));
- };
-
- // Operations that will be performed on the sync source node after rollback.
- let SteadyStateOps = (node) => {
- let testDb = node.getDB(dbName);
-
- assert.commandWorked(testDb.runCommand({collMod: coll2Name, validator: {b: 1}}));
- assert.commandWorked(testDb.runCommand({collMod: coll3Name, validationAction: "error"}));
- assert.commandWorked(testDb.runCommand({collMod: coll4Name, validationAction: "error"}));
- };
-
- // Set up Rollback Test.
- let rollbackTest = new RollbackTestDeluxe(testName);
- CommonOps(rollbackTest.getPrimary());
-
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
- printCollectionOptions(rollbackTest, "before branch");
- RollbackOps(rollbackNode);
-
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- printCollectionOptions(rollbackTest, "before rollback");
- // No ops on the sync source.
-
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
- printCollectionOptions(rollbackTest, "after rollback");
-
- SteadyStateOps(rollbackTest.getPrimary());
- printCollectionOptions(rollbackTest, "at completion");
-
- rollbackTest.stop();
+"use strict";
+
+load("jstests/replsets/libs/rollback_test_deluxe.js");
+
+const testName = "rollback_collmods";
+const dbName = testName;
+
+var coll1Name = "NoInitialValidationAtAll";
+var coll2Name = "NoInitialValidationAction";
+var coll3Name = "NoInitialValidator";
+var coll4Name = "NoInitialValidationLevel";
+
+function printCollectionOptionsForNode(node, time) {
+ let opts = assert.commandWorked(node.getDB(dbName).runCommand({"listCollections": 1}));
+ jsTestLog("Collection options " + time + " on " + node.host + ": " + tojson(opts));
+}
+
+function printCollectionOptions(rollbackTest, time) {
+ printCollectionOptionsForNode(rollbackTest.getPrimary(), time);
+ rollbackTest.getSecondaries().forEach(node => printCollectionOptionsForNode(node, time));
+}
+
+// Operations that will be present on both nodes, before the common point.
+let CommonOps = (node) => {
+ let testDb = node.getDB(dbName);
+ assert.writeOK(testDb[coll1Name].insert({a: 1, b: 1}));
+ assert.writeOK(testDb[coll2Name].insert({a: 2, b: 2}));
+ assert.writeOK(testDb[coll3Name].insert({a: 3, b: 3}));
+ assert.writeOK(testDb[coll4Name].insert({a: 4, b: 4}));
+
+ // Start with no validation action.
+ assert.commandWorked(testDb.runCommand({
+ collMod: coll2Name,
+ validator: {a: 1},
+ validationLevel: "moderate",
+ }));
+
+ // Start with no validator.
+ assert.commandWorked(testDb.runCommand(
+ {collMod: coll3Name, validationLevel: "moderate", validationAction: "warn"}));
+
+ // Start with no validation level.
+ assert.commandWorked(
+ testDb.runCommand({collMod: coll4Name, validator: {a: 1}, validationAction: "warn"}));
+};
+
+// Operations that will be performed on the rollback node past the common point.
+let RollbackOps = (node) => {
+ let testDb = node.getDB(dbName);
+
+ // Set everything on the rollback node.
+ assert.commandWorked(testDb.runCommand({
+ collMod: coll1Name,
+ validator: {a: 1},
+ validationLevel: "moderate",
+ validationAction: "warn"
+ }));
+
+ // Only modify the action, and never modify it again so it needs to be reset to empty.
+ assert.commandWorked(testDb.runCommand({collMod: coll2Name, validationAction: "error"}));
+
+ // Only modify the validator, and never modify it again so it needs to be reset to empty.
+ assert.commandWorked(testDb.runCommand({collMod: coll3Name, validator: {b: 1}}));
+
+ // Only modify the level, and never modify it again so it needs to be reset to empty.
+ assert.commandWorked(testDb.runCommand({
+ collMod: coll4Name,
+ validationLevel: "moderate",
+ }));
+};
+
+// Operations that will be performed on the sync source node after rollback.
+let SteadyStateOps = (node) => {
+ let testDb = node.getDB(dbName);
+
+ assert.commandWorked(testDb.runCommand({collMod: coll2Name, validator: {b: 1}}));
+ assert.commandWorked(testDb.runCommand({collMod: coll3Name, validationAction: "error"}));
+ assert.commandWorked(testDb.runCommand({collMod: coll4Name, validationAction: "error"}));
+};
+
+// Set up Rollback Test.
+let rollbackTest = new RollbackTestDeluxe(testName);
+CommonOps(rollbackTest.getPrimary());
+
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+printCollectionOptions(rollbackTest, "before branch");
+RollbackOps(rollbackNode);
+
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+printCollectionOptions(rollbackTest, "before rollback");
+// No ops on the sync source.
+
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+printCollectionOptions(rollbackTest, "after rollback");
+
+SteadyStateOps(rollbackTest.getPrimary());
+printCollectionOptions(rollbackTest, "at completion");
+
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_crud_op_sequences.js b/jstests/replsets/rollback_crud_op_sequences.js
index 19e83b9cc92..ce21957f45e 100644
--- a/jstests/replsets/rollback_crud_op_sequences.js
+++ b/jstests/replsets/rollback_crud_op_sequences.js
@@ -14,130 +14,130 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
- // helper function for verifying contents at the end of the test
- var checkFinalResults = function(db) {
- assert.eq(0, db.bar.count({q: 70}));
- assert.eq(2, db.bar.count({q: 40}));
- assert.eq(3, db.bar.count({a: "foo"}));
- assert.eq(6, db.bar.count({q: {$gt: -1}}));
- assert.eq(1, db.bar.count({txt: "foo"}));
- assert.eq(33, db.bar.findOne({q: 0})["y"]);
- assert.eq(1, db.kap.find().itcount());
- assert.eq(0, db.kap2.find().itcount());
- };
+"use strict";
+// helper function for verifying contents at the end of the test
+var checkFinalResults = function(db) {
+ assert.eq(0, db.bar.count({q: 70}));
+ assert.eq(2, db.bar.count({q: 40}));
+ assert.eq(3, db.bar.count({a: "foo"}));
+ assert.eq(6, db.bar.count({q: {$gt: -1}}));
+ assert.eq(1, db.bar.count({txt: "foo"}));
+ assert.eq(33, db.bar.findOne({q: 0})["y"]);
+ assert.eq(1, db.kap.find().itcount());
+ assert.eq(0, db.kap2.find().itcount());
+};
- var name = "rollback_crud_op_sequences";
- var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
- var nodes = replTest.nodeList();
+var name = "rollback_crud_op_sequences";
+var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
+var nodes = replTest.nodeList();
- var conns = replTest.startSet();
- replTest.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0], priority: 3},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- });
+var conns = replTest.startSet();
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
- // Make sure we have a master and that that master is node A
- replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
- var master = replTest.getPrimary();
- var a_conn = conns[0];
- a_conn.setSlaveOk();
- var A = a_conn.getDB("admin");
- var b_conn = conns[1];
- b_conn.setSlaveOk();
- var B = b_conn.getDB("admin");
- assert.eq(master, conns[0], "conns[0] assumed to be master");
- assert.eq(a_conn, master);
+// Make sure we have a master and that that master is node A
+replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
+var master = replTest.getPrimary();
+var a_conn = conns[0];
+a_conn.setSlaveOk();
+var A = a_conn.getDB("admin");
+var b_conn = conns[1];
+b_conn.setSlaveOk();
+var B = b_conn.getDB("admin");
+assert.eq(master, conns[0], "conns[0] assumed to be master");
+assert.eq(a_conn, master);
- // Wait for initial replication
- var a = a_conn.getDB("foo");
- var b = b_conn.getDB("foo");
+// Wait for initial replication
+var a = a_conn.getDB("foo");
+var b = b_conn.getDB("foo");
- // initial data for both nodes
- assert.writeOK(a.bar.insert({q: 0}));
- assert.writeOK(a.bar.insert({q: 1, a: "foo"}));
- assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1}));
- assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"}));
- assert.writeOK(a.bar.insert({q: 40, a: 1}));
- assert.writeOK(a.bar.insert({q: 40, a: 2}));
- assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'}));
- a.createCollection("kap", {capped: true, size: 5000});
- assert.writeOK(a.kap.insert({foo: 1}));
- // going back to empty on capped is a special case and must be tested
- a.createCollection("kap2", {capped: true, size: 5501});
- replTest.awaitReplication();
+// initial data for both nodes
+assert.writeOK(a.bar.insert({q: 0}));
+assert.writeOK(a.bar.insert({q: 1, a: "foo"}));
+assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1}));
+assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"}));
+assert.writeOK(a.bar.insert({q: 40, a: 1}));
+assert.writeOK(a.bar.insert({q: 40, a: 2}));
+assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'}));
+a.createCollection("kap", {capped: true, size: 5000});
+assert.writeOK(a.kap.insert({foo: 1}));
+// going back to empty on capped is a special case and must be tested
+a.createCollection("kap2", {capped: true, size: 5501});
+replTest.awaitReplication();
- // isolate A and wait for B to become master
- conns[0].disconnect(conns[1]);
- conns[0].disconnect(conns[2]);
- assert.soon(function() {
- try {
- return B.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- }, "node B did not become master as expected", ReplSetTest.kDefaultTimeoutMS);
+// isolate A and wait for B to become master
+conns[0].disconnect(conns[1]);
+conns[0].disconnect(conns[2]);
+assert.soon(function() {
+ try {
+ return B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+}, "node B did not become master as expected", ReplSetTest.kDefaultTimeoutMS);
- // do operations on B and B alone, these will be rolled back
- assert.writeOK(b.bar.insert({q: 4}));
- assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true}));
- assert.writeOK(b.bar.remove({q: 40})); // multi remove test
- assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true}));
- // rolling back a delete will involve reinserting the item(s)
- assert.writeOK(b.bar.remove({q: 1}));
- assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}}));
- assert.writeOK(b.kap.insert({foo: 2}));
- assert.writeOK(b.kap2.insert({foo: 2}));
- // create a collection (need to roll back the whole thing)
- assert.writeOK(b.newcoll.insert({a: true}));
- // create a new empty collection (need to roll back the whole thing)
- b.createCollection("abc");
+// do operations on B and B alone, these will be rolled back
+assert.writeOK(b.bar.insert({q: 4}));
+assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true}));
+assert.writeOK(b.bar.remove({q: 40})); // multi remove test
+assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true}));
+// rolling back a delete will involve reinserting the item(s)
+assert.writeOK(b.bar.remove({q: 1}));
+assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}}));
+assert.writeOK(b.kap.insert({foo: 2}));
+assert.writeOK(b.kap2.insert({foo: 2}));
+// create a collection (need to roll back the whole thing)
+assert.writeOK(b.newcoll.insert({a: true}));
+// create a new empty collection (need to roll back the whole thing)
+b.createCollection("abc");
- // isolate B, bring A back into contact with the arbiter, then wait for A to become master
- // insert new data into A so that B will need to rollback when it reconnects to A
- conns[1].disconnect(conns[2]);
- assert.soon(function() {
- try {
- return !B.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- });
+// isolate B, bring A back into contact with the arbiter, then wait for A to become master
+// insert new data into A so that B will need to rollback when it reconnects to A
+conns[1].disconnect(conns[2]);
+assert.soon(function() {
+ try {
+ return !B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+});
- conns[0].reconnect(conns[2]);
- assert.soon(function() {
- try {
- return A.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- });
- assert.gte(a.bar.find().itcount(), 1, "count check");
- assert.writeOK(a.bar.insert({txt: 'foo'}));
- assert.writeOK(a.bar.remove({q: 70}));
- assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}}));
+conns[0].reconnect(conns[2]);
+assert.soon(function() {
+ try {
+ return A.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+});
+assert.gte(a.bar.find().itcount(), 1, "count check");
+assert.writeOK(a.bar.insert({txt: 'foo'}));
+assert.writeOK(a.bar.remove({q: 70}));
+assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}}));
- // A is 1 2 3 7 8
- // B is 1 2 3 4 5 6
- // put B back in contact with A and arbiter, as A is primary, B will rollback and then catch up
- conns[1].reconnect(conns[2]);
- conns[0].reconnect(conns[1]);
+// A is 1 2 3 7 8
+// B is 1 2 3 4 5 6
+// put B back in contact with A and arbiter, as A is primary, B will rollback and then catch up
+conns[1].reconnect(conns[2]);
+conns[0].reconnect(conns[1]);
- awaitOpTime(b_conn, a_conn);
+awaitOpTime(b_conn, a_conn);
- // await steady state and ensure the two nodes have the same contents
- replTest.awaitSecondaryNodes();
- replTest.awaitReplication();
- checkFinalResults(a);
- checkFinalResults(b);
+// await steady state and ensure the two nodes have the same contents
+replTest.awaitSecondaryNodes();
+replTest.awaitReplication();
+checkFinalResults(a);
+checkFinalResults(b);
- // Verify data consistency between nodes.
- replTest.checkReplicatedDataHashes();
- replTest.checkOplogs();
+// Verify data consistency between nodes.
+replTest.checkReplicatedDataHashes();
+replTest.checkOplogs();
- replTest.stopSet(15);
+replTest.stopSet(15);
}());
diff --git a/jstests/replsets/rollback_ddl_op_sequences.js b/jstests/replsets/rollback_ddl_op_sequences.js
index d074bb33d4e..79883eac336 100644
--- a/jstests/replsets/rollback_ddl_op_sequences.js
+++ b/jstests/replsets/rollback_ddl_op_sequences.js
@@ -14,155 +14,155 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
- // helper function for verifying contents at the end of the test
- var checkFinalResults = function(db) {
- assert.eq(2, db.b.getIndexes().length);
- assert.eq(2, db.oldname.getIndexes().length);
- assert.eq(2, db.oldname.find().itcount());
- assert.eq(1, db.kap.find().itcount());
- assert(db.kap.isCapped());
- assert.eq(0, db.bar.count({q: 70}));
- assert.eq(33, db.bar.findOne({q: 0})["y"]);
- assert.eq(0, db.bar.count({q: 70}));
- assert.eq(1, db.bar.count({txt: "foo"}));
- assert.eq(200, db.bar.count({i: {$gt: -1}}));
- assert.eq(6, db.bar.count({q: {$gt: -1}}));
- assert.eq(0, db.getSiblingDB("abc").foo.find().itcount());
- assert.eq(0, db.getSiblingDB("abc").bar.find().itcount());
- };
+"use strict";
+// helper function for verifying contents at the end of the test
+var checkFinalResults = function(db) {
+ assert.eq(2, db.b.getIndexes().length);
+ assert.eq(2, db.oldname.getIndexes().length);
+ assert.eq(2, db.oldname.find().itcount());
+ assert.eq(1, db.kap.find().itcount());
+ assert(db.kap.isCapped());
+ assert.eq(0, db.bar.count({q: 70}));
+ assert.eq(33, db.bar.findOne({q: 0})["y"]);
+ assert.eq(0, db.bar.count({q: 70}));
+ assert.eq(1, db.bar.count({txt: "foo"}));
+ assert.eq(200, db.bar.count({i: {$gt: -1}}));
+ assert.eq(6, db.bar.count({q: {$gt: -1}}));
+ assert.eq(0, db.getSiblingDB("abc").foo.find().itcount());
+ assert.eq(0, db.getSiblingDB("abc").bar.find().itcount());
+};
- var name = "rollback_ddl_op_sequences";
- var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
- var nodes = replTest.nodeList();
+var name = "rollback_ddl_op_sequences";
+var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
+var nodes = replTest.nodeList();
- var conns = replTest.startSet();
- replTest.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0], priority: 3},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- });
+var conns = replTest.startSet();
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
- // Make sure we have a master and that that master is node A
- replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
- var master = replTest.getPrimary();
- var a_conn = conns[0];
- a_conn.setSlaveOk();
- var A = a_conn.getDB("admin");
- var b_conn = conns[1];
- b_conn.setSlaveOk();
- var B = b_conn.getDB("admin");
- assert.eq(master, conns[0], "conns[0] assumed to be master");
- assert.eq(a_conn, master);
+// Make sure we have a master and that that master is node A
+replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
+var master = replTest.getPrimary();
+var a_conn = conns[0];
+a_conn.setSlaveOk();
+var A = a_conn.getDB("admin");
+var b_conn = conns[1];
+b_conn.setSlaveOk();
+var B = b_conn.getDB("admin");
+assert.eq(master, conns[0], "conns[0] assumed to be master");
+assert.eq(a_conn, master);
- // Wait for initial replication
- var a = a_conn.getDB("foo");
- var b = b_conn.getDB("foo");
+// Wait for initial replication
+var a = a_conn.getDB("foo");
+var b = b_conn.getDB("foo");
- // initial data for both nodes
- assert.writeOK(a.b.insert({x: 1}));
- a.b.ensureIndex({x: 1});
- assert.writeOK(a.oldname.insert({y: 1}));
- assert.writeOK(a.oldname.insert({y: 2}));
- a.oldname.ensureIndex({y: 1}, true);
- assert.writeOK(a.bar.insert({q: 0}));
- assert.writeOK(a.bar.insert({q: 1, a: "foo"}));
- assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1}));
- assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"}));
- assert.writeOK(a.bar.insert({q: 40333333, a: 1}));
- for (var i = 0; i < 200; i++) {
- assert.writeOK(a.bar.insert({i: i}));
- }
- assert.writeOK(a.bar.insert({q: 40, a: 2}));
- assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'}));
- a.createCollection("kap", {capped: true, size: 5000});
- assert.writeOK(a.kap.insert({foo: 1}));
- replTest.awaitReplication();
+// initial data for both nodes
+assert.writeOK(a.b.insert({x: 1}));
+a.b.ensureIndex({x: 1});
+assert.writeOK(a.oldname.insert({y: 1}));
+assert.writeOK(a.oldname.insert({y: 2}));
+a.oldname.ensureIndex({y: 1}, true);
+assert.writeOK(a.bar.insert({q: 0}));
+assert.writeOK(a.bar.insert({q: 1, a: "foo"}));
+assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1}));
+assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"}));
+assert.writeOK(a.bar.insert({q: 40333333, a: 1}));
+for (var i = 0; i < 200; i++) {
+ assert.writeOK(a.bar.insert({i: i}));
+}
+assert.writeOK(a.bar.insert({q: 40, a: 2}));
+assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'}));
+a.createCollection("kap", {capped: true, size: 5000});
+assert.writeOK(a.kap.insert({foo: 1}));
+replTest.awaitReplication();
- // isolate A and wait for B to become master
- conns[0].disconnect(conns[1]);
- conns[0].disconnect(conns[2]);
- assert.soon(function() {
- try {
- return B.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- });
+// isolate A and wait for B to become master
+conns[0].disconnect(conns[1]);
+conns[0].disconnect(conns[2]);
+assert.soon(function() {
+ try {
+ return B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+});
- // do operations on B and B alone, these will be rolled back
- assert.writeOK(b.bar.insert({q: 4}));
- assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true}));
- assert.writeOK(b.bar.remove({q: 40})); // multi remove test
- assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true}));
- // rolling back a delete will involve reinserting the item(s)
- assert.writeOK(b.bar.remove({q: 1}));
- assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}}));
- assert.writeOK(b.kap.insert({foo: 2}));
- assert.writeOK(b.kap2.insert({foo: 2}));
- // create a collection (need to roll back the whole thing)
- assert.writeOK(b.newcoll.insert({a: true}));
- // create a new empty collection (need to roll back the whole thing)
- b.createCollection("abc");
- // drop a collection - we'll need all its data back!
- b.bar.drop();
- // drop an index - verify it comes back
- b.b.dropIndexes();
- // two to see if we transitively rollback?
- b.oldname.renameCollection("newname");
- b.newname.renameCollection("fooname");
- assert(b.fooname.find().itcount() > 0, "count rename");
- // create an index - verify that it is removed
- b.fooname.ensureIndex({q: 1});
- // test roll back (drop) a whole database
- var abc = b.getSisterDB("abc");
- assert.writeOK(abc.foo.insert({x: 1}));
- assert.writeOK(abc.bar.insert({y: 999}));
+// do operations on B and B alone, these will be rolled back
+assert.writeOK(b.bar.insert({q: 4}));
+assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true}));
+assert.writeOK(b.bar.remove({q: 40})); // multi remove test
+assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true}));
+// rolling back a delete will involve reinserting the item(s)
+assert.writeOK(b.bar.remove({q: 1}));
+assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}}));
+assert.writeOK(b.kap.insert({foo: 2}));
+assert.writeOK(b.kap2.insert({foo: 2}));
+// create a collection (need to roll back the whole thing)
+assert.writeOK(b.newcoll.insert({a: true}));
+// create a new empty collection (need to roll back the whole thing)
+b.createCollection("abc");
+// drop a collection - we'll need all its data back!
+b.bar.drop();
+// drop an index - verify it comes back
+b.b.dropIndexes();
+// two to see if we transitively rollback?
+b.oldname.renameCollection("newname");
+b.newname.renameCollection("fooname");
+assert(b.fooname.find().itcount() > 0, "count rename");
+// create an index - verify that it is removed
+b.fooname.ensureIndex({q: 1});
+// test roll back (drop) a whole database
+var abc = b.getSisterDB("abc");
+assert.writeOK(abc.foo.insert({x: 1}));
+assert.writeOK(abc.bar.insert({y: 999}));
- // isolate B, bring A back into contact with the arbiter, then wait for A to become master
- // insert new data into A so that B will need to rollback when it reconnects to A
- conns[1].disconnect(conns[2]);
- assert.soon(function() {
- try {
- return !B.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- });
+// isolate B, bring A back into contact with the arbiter, then wait for A to become master
+// insert new data into A so that B will need to rollback when it reconnects to A
+conns[1].disconnect(conns[2]);
+assert.soon(function() {
+ try {
+ return !B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+});
- conns[0].reconnect(conns[2]);
- assert.soon(function() {
- try {
- return A.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- });
- assert(a.bar.find().itcount() >= 1, "count check");
- assert.writeOK(a.bar.insert({txt: 'foo'}));
- assert.writeOK(a.bar.remove({q: 70}));
- assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}}));
+conns[0].reconnect(conns[2]);
+assert.soon(function() {
+ try {
+ return A.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+});
+assert(a.bar.find().itcount() >= 1, "count check");
+assert.writeOK(a.bar.insert({txt: 'foo'}));
+assert.writeOK(a.bar.remove({q: 70}));
+assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}}));
- // A is 1 2 3 7 8
- // B is 1 2 3 4 5 6
- // put B back in contact with A and arbiter, as A is primary, B will rollback and then catch up
- conns[1].reconnect(conns[2]);
- conns[0].reconnect(conns[1]);
+// A is 1 2 3 7 8
+// B is 1 2 3 4 5 6
+// put B back in contact with A and arbiter, as A is primary, B will rollback and then catch up
+conns[1].reconnect(conns[2]);
+conns[0].reconnect(conns[1]);
- awaitOpTime(b_conn, a_conn);
+awaitOpTime(b_conn, a_conn);
- // await steady state and ensure the two nodes have the same contents
- replTest.awaitSecondaryNodes();
- replTest.awaitReplication();
- checkFinalResults(a);
- checkFinalResults(b);
+// await steady state and ensure the two nodes have the same contents
+replTest.awaitSecondaryNodes();
+replTest.awaitReplication();
+checkFinalResults(a);
+checkFinalResults(b);
- // Verify data consistency between nodes.
- replTest.checkReplicatedDataHashes();
- replTest.checkOplogs();
+// Verify data consistency between nodes.
+replTest.checkReplicatedDataHashes();
+replTest.checkOplogs();
- replTest.stopSet(15);
+replTest.stopSet(15);
}());
diff --git a/jstests/replsets/rollback_drop_database.js b/jstests/replsets/rollback_drop_database.js
index 70fb8561140..aa783cc9dd3 100644
--- a/jstests/replsets/rollback_drop_database.js
+++ b/jstests/replsets/rollback_drop_database.js
@@ -1,73 +1,72 @@
/*
-* Test that the server is able to roll back a 'dropDatabase' entry correctly. This test creates
-* a collection, then executes a 'dropDatabase' command, partitioning the primary such that the
-* final 'dropDatabase' oplog entry is not replicated. The test then forces rollback of that entry.
-*
-* The 'dropDatabase' command drops each collection, ensures that the last drop is committed,
-* and only then logs a 'dropDatabase' oplog entry. This is therefore the only entry that could
-* get rolled back.
-*/
+ * Test that the server is able to roll back a 'dropDatabase' entry correctly. This test creates
+ * a collection, then executes a 'dropDatabase' command, partitioning the primary such that the
+ * final 'dropDatabase' oplog entry is not replicated. The test then forces rollback of that entry.
+ *
+ * The 'dropDatabase' command drops each collection, ensures that the last drop is committed,
+ * and only then logs a 'dropDatabase' oplog entry. This is therefore the only entry that could
+ * get rolled back.
+ */
(function() {
- load("jstests/replsets/libs/rollback_test.js");
- load("jstests/libs/check_log.js");
+load("jstests/replsets/libs/rollback_test.js");
+load("jstests/libs/check_log.js");
- const testName = "rollback_drop_database";
- const oldDbName = "oldDatabase";
- const newDbName = "newDatabase";
+const testName = "rollback_drop_database";
+const oldDbName = "oldDatabase";
+const newDbName = "newDatabase";
- let rollbackTest = new RollbackTest(testName);
- let rollbackNode = rollbackTest.getPrimary();
- let syncSourceNode = rollbackTest.getSecondary();
+let rollbackTest = new RollbackTest(testName);
+let rollbackNode = rollbackTest.getPrimary();
+let syncSourceNode = rollbackTest.getSecondary();
- // Perform initial insert (common operation).
- assert.writeOK(rollbackNode.getDB(oldDbName)["beforeRollback"].insert({"num": 1}));
+// Perform initial insert (common operation).
+assert.writeOK(rollbackNode.getDB(oldDbName)["beforeRollback"].insert({"num": 1}));
- // Set a failpoint on the original primary, so that it blocks after it commits the last
- // 'dropCollection' entry but before the 'dropDatabase' entry is logged.
- assert.commandWorked(rollbackNode.adminCommand(
- {configureFailPoint: "dropDatabaseHangBeforeLog", mode: "alwaysOn"}));
+// Set a failpoint on the original primary, so that it blocks after it commits the last
+// 'dropCollection' entry but before the 'dropDatabase' entry is logged.
+assert.commandWorked(
+ rollbackNode.adminCommand({configureFailPoint: "dropDatabaseHangBeforeLog", mode: "alwaysOn"}));
- // Issue a 'dropDatabase' command.
- let dropDatabaseFn = function() {
- const rollbackDb = "oldDatabase";
- var primary = db.getMongo();
- jsTestLog("Dropping database " + rollbackDb + " on primary node " + primary.host);
- var dbToDrop = db.getSiblingDB(rollbackDb);
- assert.commandWorked(dbToDrop.dropDatabase({w: 1}));
- };
- let waitForDropDatabaseToFinish = startParallelShell(dropDatabaseFn, rollbackNode.port);
+// Issue a 'dropDatabase' command.
+let dropDatabaseFn = function() {
+ const rollbackDb = "oldDatabase";
+ var primary = db.getMongo();
+ jsTestLog("Dropping database " + rollbackDb + " on primary node " + primary.host);
+ var dbToDrop = db.getSiblingDB(rollbackDb);
+ assert.commandWorked(dbToDrop.dropDatabase({w: 1}));
+};
+let waitForDropDatabaseToFinish = startParallelShell(dropDatabaseFn, rollbackNode.port);
- // Ensure that we've hit the failpoint before moving on.
- checkLog.contains(rollbackNode, "dropDatabase - fail point dropDatabaseHangBeforeLog enabled");
+// Ensure that we've hit the failpoint before moving on.
+checkLog.contains(rollbackNode, "dropDatabase - fail point dropDatabaseHangBeforeLog enabled");
- // Wait for the secondary to finish dropping the collection (the last replicated entry).
- // We use the default 10-minute timeout for this.
- assert.soon(function() {
- let res = syncSourceNode.getDB(oldDbName).getCollectionNames().includes("beforeRollback");
- return !res;
- }, "Sync source did not finish dropping collection beforeRollback", 10 * 60 * 1000);
+// Wait for the secondary to finish dropping the collection (the last replicated entry).
+// We use the default 10-minute timeout for this.
+assert.soon(function() {
+ let res = syncSourceNode.getDB(oldDbName).getCollectionNames().includes("beforeRollback");
+ return !res;
+}, "Sync source did not finish dropping collection beforeRollback", 10 * 60 * 1000);
- rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToRollbackOperations();
- // Allow the final 'dropDatabase' entry to be logged on the now isolated primary.
- // This is the rollback node's divergent oplog entry.
- assert.commandWorked(
- rollbackNode.adminCommand({configureFailPoint: "dropDatabaseHangBeforeLog", mode: "off"}));
- waitForDropDatabaseToFinish();
- assert.eq(false, rollbackNode.getDB(oldDbName).getCollectionNames().includes("beforeRollback"));
- jsTestLog("Database " + oldDbName + " successfully dropped on primary node " +
- rollbackNode.host);
+// Allow the final 'dropDatabase' entry to be logged on the now isolated primary.
+// This is the rollback node's divergent oplog entry.
+assert.commandWorked(
+ rollbackNode.adminCommand({configureFailPoint: "dropDatabaseHangBeforeLog", mode: "off"}));
+waitForDropDatabaseToFinish();
+assert.eq(false, rollbackNode.getDB(oldDbName).getCollectionNames().includes("beforeRollback"));
+jsTestLog("Database " + oldDbName + " successfully dropped on primary node " + rollbackNode.host);
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- // Perform an insert on another database while interfacing with the new primary.
- // This is the sync source's divergent oplog entry.
- assert.writeOK(syncSourceNode.getDB(newDbName)["afterRollback"].insert({"num": 2}));
+// Perform an insert on another database while interfacing with the new primary.
+// This is the sync source's divergent oplog entry.
+assert.writeOK(syncSourceNode.getDB(newDbName)["afterRollback"].insert({"num": 2}));
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- rollbackTest.stop();
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_drop_index_after_rename.js b/jstests/replsets/rollback_drop_index_after_rename.js
index efeffcdbf68..143701e8e95 100644
--- a/jstests/replsets/rollback_drop_index_after_rename.js
+++ b/jstests/replsets/rollback_drop_index_after_rename.js
@@ -6,53 +6,53 @@
*/
(function() {
- "use strict";
-
- load("jstests/replsets/libs/rollback_test.js");
-
- const testName = "rollback_drop_index_after_rename";
- const dbName = testName;
-
- var fromColl = "fromColl";
- var toColl = "toColl";
- var idxName = "a_1";
-
- // Operations that will be present on both nodes, before the common point.
- let CommonOps = (node) => {
- let testDb = node.getDB(dbName);
- // This creates the collection implicitly and then creates the index.
- assert.commandWorked(testDb.runCommand({
- createIndexes: fromColl,
- indexes: [{
- key: {
- "a": 1,
- },
- name: idxName
- }]
- }));
- };
-
- // Operations that will be performed on the rollback node past the common point.
- let RollbackOps = (node) => {
- let testDb = node.getDB(dbName);
- assert.commandWorked(testDb.adminCommand({
- renameCollection: dbName + "." + fromColl,
- to: dbName + "." + toColl,
- }));
- assert.commandWorked(testDb.runCommand({dropIndexes: toColl, index: idxName}));
- };
-
- // Set up Rollback Test.
- let rollbackTest = new RollbackTest(testName);
- CommonOps(rollbackTest.getPrimary());
-
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
-
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
-
- rollbackTest.stop();
+"use strict";
+
+load("jstests/replsets/libs/rollback_test.js");
+
+const testName = "rollback_drop_index_after_rename";
+const dbName = testName;
+
+var fromColl = "fromColl";
+var toColl = "toColl";
+var idxName = "a_1";
+
+// Operations that will be present on both nodes, before the common point.
+let CommonOps = (node) => {
+ let testDb = node.getDB(dbName);
+ // This creates the collection implicitly and then creates the index.
+ assert.commandWorked(testDb.runCommand({
+ createIndexes: fromColl,
+ indexes: [{
+ key: {
+ "a": 1,
+ },
+ name: idxName
+ }]
+ }));
+};
+
+// Operations that will be performed on the rollback node past the common point.
+let RollbackOps = (node) => {
+ let testDb = node.getDB(dbName);
+ assert.commandWorked(testDb.adminCommand({
+ renameCollection: dbName + "." + fromColl,
+ to: dbName + "." + toColl,
+ }));
+ assert.commandWorked(testDb.runCommand({dropIndexes: toColl, index: idxName}));
+};
+
+// Set up Rollback Test.
+let rollbackTest = new RollbackTest(testName);
+CommonOps(rollbackTest.getPrimary());
+
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
+
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_dup_ids.js b/jstests/replsets/rollback_dup_ids.js
index a56b2b9bc05..99c329b76d7 100644
--- a/jstests/replsets/rollback_dup_ids.js
+++ b/jstests/replsets/rollback_dup_ids.js
@@ -1,43 +1,42 @@
// When run with --majorityReadConcern=off, this test reproduces the bug described in SERVER-38925,
// where rolling back a delete followed by a restart produces documents with duplicate _id.
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_test.js");
- TestData.rollbackShutdowns = true;
- TestData.allowUncleanShutdowns = true;
- let dbName = "test";
- let sourceCollName = "coll";
+TestData.rollbackShutdowns = true;
+TestData.allowUncleanShutdowns = true;
+let dbName = "test";
+let sourceCollName = "coll";
- let doc1 = {_id: 1, x: "document_of_interest"};
+let doc1 = {_id: 1, x: "document_of_interest"};
- let CommonOps = (node) => {
- // Insert a document that will exist on all nodes.
- assert.commandWorked(node.getDB(dbName)[sourceCollName].insert(doc1));
- };
+let CommonOps = (node) => {
+ // Insert a document that will exist on all nodes.
+ assert.commandWorked(node.getDB(dbName)[sourceCollName].insert(doc1));
+};
- let RollbackOps = (node) => {
- // Delete the document on rollback node so it will be refetched from sync source.
- assert.commandWorked(node.getDB(dbName)[sourceCollName].remove({_id: 1}));
- };
+let RollbackOps = (node) => {
+ // Delete the document on rollback node so it will be refetched from sync source.
+ assert.commandWorked(node.getDB(dbName)[sourceCollName].remove({_id: 1}));
+};
- // Set up Rollback Test.
- let rollbackTest = new RollbackTest();
- CommonOps(rollbackTest.getPrimary());
+// Set up Rollback Test.
+let rollbackTest = new RollbackTest();
+CommonOps(rollbackTest.getPrimary());
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- // Kill and restart the node that rolled back.
- rollbackTest.restartNode(0, 9);
-
- // Check the replica set.
- rollbackTest.stop();
+// Kill and restart the node that rolled back.
+rollbackTest.restartNode(0, 9);
+// Check the replica set.
+rollbackTest.stop();
}()); \ No newline at end of file
diff --git a/jstests/replsets/rollback_files_no_prepare_conflict.js b/jstests/replsets/rollback_files_no_prepare_conflict.js
index 1eb1e7a3a4c..40cc954b068 100644
--- a/jstests/replsets/rollback_files_no_prepare_conflict.js
+++ b/jstests/replsets/rollback_files_no_prepare_conflict.js
@@ -8,49 +8,49 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
- const name = "rollback_files_no_prepare_conflicts";
- const dbName = "test";
- const collName = name;
+const name = "rollback_files_no_prepare_conflicts";
+const dbName = "test";
+const collName = name;
- const rollbackTest = new RollbackTest(name);
+const rollbackTest = new RollbackTest(name);
- let primary = rollbackTest.getPrimary();
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
+let primary = rollbackTest.getPrimary();
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
- jsTestLog("Issue an insert that will be common to both nodes.");
- assert.commandWorked(testColl.insert({_id: 42, a: "one"}));
+jsTestLog("Issue an insert that will be common to both nodes.");
+assert.commandWorked(testColl.insert({_id: 42, a: "one"}));
- rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToRollbackOperations();
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- jsTestLog("Make an update to that document outside of a transaction on the rollback node.");
- assert.commandWorked(testColl.update({_id: 42, a: "one"}, {_id: 42, a: "two"}));
+jsTestLog("Make an update to that document outside of a transaction on the rollback node.");
+assert.commandWorked(testColl.update({_id: 42, a: "one"}, {_id: 42, a: "two"}));
- session.startTransaction();
+session.startTransaction();
- jsTestLog("Update the same document on the same node, this time as part of a transaction.");
- assert.commandWorked(sessionColl.update({_id: 42, a: "two"}, {_id: 42, a: "three"}));
+jsTestLog("Update the same document on the same node, this time as part of a transaction.");
+assert.commandWorked(sessionColl.update({_id: 42, a: "two"}, {_id: 42, a: "three"}));
- jsTestLog("Prepare the transaction on the rollback node.");
- PrepareHelpers.prepareTransaction(session, {w: 1});
+jsTestLog("Prepare the transaction on the rollback node.");
+PrepareHelpers.prepareTransaction(session, {w: 1});
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- jsTestLog("Verify that the document is in the same state as it was at the common point.");
- primary = rollbackTest.getPrimary();
- testDB = primary.getDB(dbName);
- testColl = testDB.getCollection(collName);
- assert.docEq(testColl.findOne({_id: 42}), {_id: 42, a: "one"});
+jsTestLog("Verify that the document is in the same state as it was at the common point.");
+primary = rollbackTest.getPrimary();
+testDB = primary.getDB(dbName);
+testColl = testDB.getCollection(collName);
+assert.docEq(testColl.findOne({_id: 42}), {_id: 42, a: "one"});
- rollbackTest.stop();
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_prepare_transaction.js b/jstests/replsets/rollback_prepare_transaction.js
index 26e497df4c1..1fa3bdd2c15 100644
--- a/jstests/replsets/rollback_prepare_transaction.js
+++ b/jstests/replsets/rollback_prepare_transaction.js
@@ -4,85 +4,85 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
- load("jstests/replsets/libs/rollback_files.js");
-
- const rollbackTest = new RollbackTest();
- const rollbackNode = rollbackTest.getPrimary();
-
- const testDB = rollbackNode.getDB("test");
- const collName = "rollback_prepare_transaction";
- const testColl = testDB.getCollection(collName);
-
- // We perform some operations on the collection aside from starting and preparing a transaction
- // in order to cause the count diff computed by replication to be non-zero.
- assert.commandWorked(testColl.insert({_id: "a"}));
-
- // Start two separate sessions for running transactions. On 'session1', we will run a prepared
- // transaction whose commit operation gets rolled back, and on 'session2', we will run a
- // prepared transaction whose prepare operation gets rolled back.
- const session1 = rollbackNode.startSession();
- const session1DB = session1.getDatabase(testDB.getName());
- const session1Coll = session1DB.getCollection(collName);
-
- const session2 = rollbackNode.startSession();
- const session2DB = session2.getDatabase(testDB.getName());
- const session2Coll = session2DB.getCollection(collName);
-
- // Prepare a transaction whose commit operation will be rolled back.
- session1.startTransaction();
- assert.commandWorked(session1Coll.insert({_id: "t2_a"}));
- assert.commandWorked(session1Coll.insert({_id: "t2_b"}));
- assert.commandWorked(session1Coll.insert({_id: "t2_c"}));
- let prepareTs = PrepareHelpers.prepareTransaction(session1);
-
- rollbackTest.transitionToRollbackOperations();
-
- // The following operations will be rolled-back.
- assert.commandWorked(testColl.insert({_id: "b"}));
-
- session2.startTransaction();
- assert.commandWorked(session2Coll.insert({_id: "t1"}));
-
- // Use w: 1 to simulate a prepare that will not become majority-committed.
- PrepareHelpers.prepareTransaction(session2, {w: 1});
-
- // Commit the transaction that was prepared before the common point.
- PrepareHelpers.commitTransaction(session1, prepareTs);
-
- // This is not exactly correct, but characterizes the current behavior of fastcount, which
- // includes the prepared but uncommitted transaction in the collection count.
- assert.eq(6, testColl.count());
-
- // Check the visible documents.
- arrayEq([{_id: "a"}, {_id: "b"}, {_id: "t2_a"}, {_id: "t2_b"}, {_id: "t2_c"}],
- testColl.find().toArray());
-
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- // Skip consistency checks so they don't conflict with the prepared transaction.
- rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
-
- // Both the regular insert and prepared insert should be rolled-back.
- assert.sameMembers([{_id: "a"}], testColl.find().toArray());
-
- // Confirm that the rollback wrote deleted documents to a file.
- const replTest = rollbackTest.getTestFixture();
- const expectedDocs = [{_id: "b"}, {_id: "t2_a"}, {_id: "t2_b"}, {_id: "t2_c"}];
- checkRollbackFiles(replTest.getDbPath(rollbackNode), testColl.getFullName(), expectedDocs);
-
- // Allow the test to complete by aborting the left over prepared transaction.
- jsTestLog("Aborting the prepared transaction on session " + tojson(session1.getSessionId()));
- let adminDB = rollbackTest.getPrimary().getDB("admin");
- assert.commandWorked(adminDB.adminCommand({
- abortTransaction: 1,
- lsid: session1.getSessionId(),
- txnNumber: session1.getTxnNumber_forTesting(),
- autocommit: false
- }));
-
- rollbackTest.stop();
+"use strict";
+
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_files.js");
+
+const rollbackTest = new RollbackTest();
+const rollbackNode = rollbackTest.getPrimary();
+
+const testDB = rollbackNode.getDB("test");
+const collName = "rollback_prepare_transaction";
+const testColl = testDB.getCollection(collName);
+
+// We perform some operations on the collection aside from starting and preparing a transaction
+// in order to cause the count diff computed by replication to be non-zero.
+assert.commandWorked(testColl.insert({_id: "a"}));
+
+// Start two separate sessions for running transactions. On 'session1', we will run a prepared
+// transaction whose commit operation gets rolled back, and on 'session2', we will run a
+// prepared transaction whose prepare operation gets rolled back.
+const session1 = rollbackNode.startSession();
+const session1DB = session1.getDatabase(testDB.getName());
+const session1Coll = session1DB.getCollection(collName);
+
+const session2 = rollbackNode.startSession();
+const session2DB = session2.getDatabase(testDB.getName());
+const session2Coll = session2DB.getCollection(collName);
+
+// Prepare a transaction whose commit operation will be rolled back.
+session1.startTransaction();
+assert.commandWorked(session1Coll.insert({_id: "t2_a"}));
+assert.commandWorked(session1Coll.insert({_id: "t2_b"}));
+assert.commandWorked(session1Coll.insert({_id: "t2_c"}));
+let prepareTs = PrepareHelpers.prepareTransaction(session1);
+
+rollbackTest.transitionToRollbackOperations();
+
+// The following operations will be rolled-back.
+assert.commandWorked(testColl.insert({_id: "b"}));
+
+session2.startTransaction();
+assert.commandWorked(session2Coll.insert({_id: "t1"}));
+
+// Use w: 1 to simulate a prepare that will not become majority-committed.
+PrepareHelpers.prepareTransaction(session2, {w: 1});
+
+// Commit the transaction that was prepared before the common point.
+PrepareHelpers.commitTransaction(session1, prepareTs);
+
+// This is not exactly correct, but characterizes the current behavior of fastcount, which
+// includes the prepared but uncommitted transaction in the collection count.
+assert.eq(6, testColl.count());
+
+// Check the visible documents.
+arrayEq([{_id: "a"}, {_id: "b"}, {_id: "t2_a"}, {_id: "t2_b"}, {_id: "t2_c"}],
+ testColl.find().toArray());
+
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+// Skip consistency checks so they don't conflict with the prepared transaction.
+rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
+
+// Both the regular insert and prepared insert should be rolled-back.
+assert.sameMembers([{_id: "a"}], testColl.find().toArray());
+
+// Confirm that the rollback wrote deleted documents to a file.
+const replTest = rollbackTest.getTestFixture();
+const expectedDocs = [{_id: "b"}, {_id: "t2_a"}, {_id: "t2_b"}, {_id: "t2_c"}];
+checkRollbackFiles(replTest.getDbPath(rollbackNode), testColl.getFullName(), expectedDocs);
+
+// Allow the test to complete by aborting the left over prepared transaction.
+jsTestLog("Aborting the prepared transaction on session " + tojson(session1.getSessionId()));
+let adminDB = rollbackTest.getPrimary().getDB("admin");
+assert.commandWorked(adminDB.adminCommand({
+ abortTransaction: 1,
+ lsid: session1.getSessionId(),
+ txnNumber: session1.getTxnNumber_forTesting(),
+ autocommit: false
+}));
+
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_reconstructs_transactions_prepared_before_stable.js b/jstests/replsets/rollback_reconstructs_transactions_prepared_before_stable.js
index 22144fecc1e..474d8246f93 100644
--- a/jstests/replsets/rollback_reconstructs_transactions_prepared_before_stable.js
+++ b/jstests/replsets/rollback_reconstructs_transactions_prepared_before_stable.js
@@ -6,107 +6,107 @@
*/
(function() {
- "use strict";
- load("jstests/aggregation/extras/utils.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
-
- const dbName = "test";
- const collName = "rollback_reconstructs_transactions_prepared_before_stable";
-
- const rollbackTest = new RollbackTest(dbName);
- let primary = rollbackTest.getPrimary();
-
- // Create collection we're using beforehand.
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
- assert.commandWorked(testDB.runCommand({create: collName}));
-
- // Start a session on the primary.
- let session = primary.startSession();
- const sessionID = session.getSessionId();
- let sessionDB = session.getDatabase(dbName);
- let sessionColl = sessionDB.getCollection(collName);
-
- assert.commandWorked(sessionColl.insert({_id: 0}));
-
- // Prepare the transaction on the session.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 1}));
- assert.commandWorked(sessionColl.update({_id: 0}, {$set: {a: 1}}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- // Fastcount reflects the insert of a prepared transaction.
- assert.eq(testColl.count(), 2);
-
- jsTestLog("Do a majority write to advance the stable timestamp past the prepareTimestamp");
- // Doing a majority write after preparing the transaction ensures that the stable timestamp is
- // past the prepare timestamp because this write must be in the committed snapshot.
- assert.commandWorked(
- testColl.runCommand("insert", {documents: [{_id: 2}]}, {writeConcern: {w: "majority"}}));
-
- // Fastcount reflects the insert of a prepared transaction.
- assert.eq(testColl.count(), 3);
-
- // Check that we have one transaction in the transactions table.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
-
- // The transaction should still be prepared after going through rollback.
- rollbackTest.transitionToRollbackOperations();
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
-
- // Make sure there is still one transaction in the transactions table. This is because the
- // entry in the transactions table is made durable when a transaction is prepared.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
-
- // Fastcount reflects the insert of the prepared transaction because was put back into prepare
- // at the end of rollback.
- assert.eq(testColl.count(), 3);
-
- // Make sure we cannot see the writes from the prepared transaction yet.
- arrayEq(testColl.find().toArray(), [{_id: 0}, {_id: 2}]);
-
- // Get the correct primary after the topology changes.
- primary = rollbackTest.getPrimary();
- rollbackTest.awaitReplication();
-
- // Make sure we can successfully commit the recovered prepared transaction.
- session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
- sessionDB = session.getDatabase(dbName);
- // The transaction on this session should have a txnNumber of 0. We explicitly set this
- // since createSessionWithGivenId does not restore the current txnNumber in the shell.
- session.setTxnNumber_forTesting(0);
- const txnNumber = session.getTxnNumber_forTesting();
-
- // Make sure we cannot add any operations to a prepared transaction.
- assert.commandFailedWithCode(sessionDB.runCommand({
- insert: collName,
- txnNumber: NumberLong(txnNumber),
- documents: [{_id: 10}],
- autocommit: false,
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- // Make sure that writing to a document that was updated in the prepared transaction causes
- // a write conflict.
- assert.commandFailedWithCode(
- sessionDB.runCommand(
- {update: collName, updates: [{q: {_id: 0}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Commit the transaction.
- assert.commandWorked(sessionDB.adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTimestamp,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- }));
-
- // Make sure we can see the effects of the prepared transaction.
- arrayEq(testColl.find().toArray(), [{_id: 0, a: 1}, {_id: 1}, {_id: 2}]);
- assert.eq(testColl.count(), 3);
-
- rollbackTest.stop();
+"use strict";
+load("jstests/aggregation/extras/utils.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
+
+const dbName = "test";
+const collName = "rollback_reconstructs_transactions_prepared_before_stable";
+
+const rollbackTest = new RollbackTest(dbName);
+let primary = rollbackTest.getPrimary();
+
+// Create collection we're using beforehand.
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+assert.commandWorked(testDB.runCommand({create: collName}));
+
+// Start a session on the primary.
+let session = primary.startSession();
+const sessionID = session.getSessionId();
+let sessionDB = session.getDatabase(dbName);
+let sessionColl = sessionDB.getCollection(collName);
+
+assert.commandWorked(sessionColl.insert({_id: 0}));
+
+// Prepare the transaction on the session.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 1}));
+assert.commandWorked(sessionColl.update({_id: 0}, {$set: {a: 1}}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+// Fastcount reflects the insert of a prepared transaction.
+assert.eq(testColl.count(), 2);
+
+jsTestLog("Do a majority write to advance the stable timestamp past the prepareTimestamp");
+// Doing a majority write after preparing the transaction ensures that the stable timestamp is
+// past the prepare timestamp because this write must be in the committed snapshot.
+assert.commandWorked(
+ testColl.runCommand("insert", {documents: [{_id: 2}]}, {writeConcern: {w: "majority"}}));
+
+// Fastcount reflects the insert of a prepared transaction.
+assert.eq(testColl.count(), 3);
+
+// Check that we have one transaction in the transactions table.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
+
+// The transaction should still be prepared after going through rollback.
+rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
+
+// Make sure there is still one transaction in the transactions table. This is because the
+// entry in the transactions table is made durable when a transaction is prepared.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
+
+// Fastcount reflects the insert of the prepared transaction because was put back into prepare
+// at the end of rollback.
+assert.eq(testColl.count(), 3);
+
+// Make sure we cannot see the writes from the prepared transaction yet.
+arrayEq(testColl.find().toArray(), [{_id: 0}, {_id: 2}]);
+
+// Get the correct primary after the topology changes.
+primary = rollbackTest.getPrimary();
+rollbackTest.awaitReplication();
+
+// Make sure we can successfully commit the recovered prepared transaction.
+session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
+sessionDB = session.getDatabase(dbName);
+// The transaction on this session should have a txnNumber of 0. We explicitly set this
+// since createSessionWithGivenId does not restore the current txnNumber in the shell.
+session.setTxnNumber_forTesting(0);
+const txnNumber = session.getTxnNumber_forTesting();
+
+// Make sure we cannot add any operations to a prepared transaction.
+assert.commandFailedWithCode(sessionDB.runCommand({
+ insert: collName,
+ txnNumber: NumberLong(txnNumber),
+ documents: [{_id: 10}],
+ autocommit: false,
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+// Make sure that writing to a document that was updated in the prepared transaction causes
+// a write conflict.
+assert.commandFailedWithCode(
+ sessionDB.runCommand(
+ {update: collName, updates: [{q: {_id: 0}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Commit the transaction.
+assert.commandWorked(sessionDB.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTimestamp,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+}));
+
+// Make sure we can see the effects of the prepared transaction.
+arrayEq(testColl.find().toArray(), [{_id: 0, a: 1}, {_id: 1}, {_id: 2}]);
+assert.eq(testColl.count(), 3);
+
+rollbackTest.stop();
}()); \ No newline at end of file
diff --git a/jstests/replsets/rollback_recovery_commit_transaction_before_stable_timestamp.js b/jstests/replsets/rollback_recovery_commit_transaction_before_stable_timestamp.js
index 712f7a8286a..45c0127dab7 100644
--- a/jstests/replsets/rollback_recovery_commit_transaction_before_stable_timestamp.js
+++ b/jstests/replsets/rollback_recovery_commit_transaction_before_stable_timestamp.js
@@ -10,94 +10,93 @@
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
-
- const dbName = "test";
- const collName = "commit_transaction_rollback_recovery_data_already_applied";
-
- const rollbackTest = new RollbackTest(dbName);
- let primary = rollbackTest.getPrimary();
- let testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- // Construct a large array such that two arrays in the same document are not greater than the
- // 16MB limit, but that three such arrays in the same document are greater than 16MB. This will
- // be helpful in recreating an idempotency issue that exists when applying the operations from
- // a transaction after the data already reflects the transaction.
- const largeArray = new Array(7 * 1024 * 1024).join('x');
- assert.commandWorked(testColl.insert([{_id: 1, "a": largeArray}]));
-
- // Start a transaction in a session that will be prepared and committed before rollback.
- let session = primary.startSession();
- let sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"b": largeArray}}));
- assert.commandWorked(sessionColl.update({_id: 1}, {$unset: {"b": 1}}));
- assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"c": largeArray}}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- const recoveryTimestamp =
- assert.commandWorked(testColl.runCommand("insert", {documents: [{_id: 2}]})).operationTime;
-
- jsTestLog("Holding back the stable timestamp to right after the prepareTimestamp");
-
- // Hold back the stable timestamp to be right after the prepareTimestamp, but before the
- // commitTransaction oplog entry so that the transaction will be replayed during rollback
- // recovery.
- assert.commandWorked(testDB.adminCommand({
- "configureFailPoint": 'holdStableTimestampAtSpecificTimestamp',
- "mode": 'alwaysOn',
- "data": {"timestamp": recoveryTimestamp}
- }));
-
- // Enable fail point "WTSetOldestTSToStableTS" to prevent lag between stable timestamp and
- // oldest timestamp during rollback recovery. We avoid this lag to test if we can prepare
- // and commit a transaction older than oldest timestamp.
- assert.commandWorked(
- testDB.adminCommand({"configureFailPoint": 'WTSetOldestTSToStableTS', "mode": 'alwaysOn'}));
-
- jsTestLog("Committing the transaction");
-
- // Since this transaction is committed after the last snapshot, this commit oplog entry will be
- // replayed during rollback recovery.
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- // During rollback, the data restored via rollback to stable timestamp should not reflect the
- // transaction. If not, replaying the commit oplog entry during rollback recovery would throw
- // BSONTooLarge exception.
- rollbackTest.transitionToRollbackOperations();
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- try {
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- } finally {
- assert.commandWorked(primary.adminCommand(
- {configureFailPoint: 'holdStableTimestampAtSpecificTimestamp', mode: 'off'}));
- }
-
- rollbackTest.transitionToSteadyStateOperations();
-
- primary = rollbackTest.getPrimary();
-
- // Make sure that the data reflects all the operations from the transaction after recovery.
- testDB = primary.getDB(dbName);
- const res = testDB[collName].findOne({_id: 1});
- assert.eq(res, {_id: 1, "a": largeArray, "c": largeArray});
-
- // Make sure that another write on the same document from the transaction has no write conflict.
- // Also, make sure that we can run another transaction after recovery without any problems.
- session = primary.startSession();
- sessionDB = session.getDatabase(dbName);
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 1}));
- prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 1});
-
- rollbackTest.stop();
-
+"use strict";
+
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
+
+const dbName = "test";
+const collName = "commit_transaction_rollback_recovery_data_already_applied";
+
+const rollbackTest = new RollbackTest(dbName);
+let primary = rollbackTest.getPrimary();
+let testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+// Construct a large array such that two arrays in the same document are not greater than the
+// 16MB limit, but that three such arrays in the same document are greater than 16MB. This will
+// be helpful in recreating an idempotency issue that exists when applying the operations from
+// a transaction after the data already reflects the transaction.
+const largeArray = new Array(7 * 1024 * 1024).join('x');
+assert.commandWorked(testColl.insert([{_id: 1, "a": largeArray}]));
+
+// Start a transaction in a session that will be prepared and committed before rollback.
+let session = primary.startSession();
+let sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"b": largeArray}}));
+assert.commandWorked(sessionColl.update({_id: 1}, {$unset: {"b": 1}}));
+assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"c": largeArray}}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+const recoveryTimestamp =
+ assert.commandWorked(testColl.runCommand("insert", {documents: [{_id: 2}]})).operationTime;
+
+jsTestLog("Holding back the stable timestamp to right after the prepareTimestamp");
+
+// Hold back the stable timestamp to be right after the prepareTimestamp, but before the
+// commitTransaction oplog entry so that the transaction will be replayed during rollback
+// recovery.
+assert.commandWorked(testDB.adminCommand({
+ "configureFailPoint": 'holdStableTimestampAtSpecificTimestamp',
+ "mode": 'alwaysOn',
+ "data": {"timestamp": recoveryTimestamp}
+}));
+
+// Enable fail point "WTSetOldestTSToStableTS" to prevent lag between stable timestamp and
+// oldest timestamp during rollback recovery. We avoid this lag to test if we can prepare
+// and commit a transaction older than oldest timestamp.
+assert.commandWorked(
+ testDB.adminCommand({"configureFailPoint": 'WTSetOldestTSToStableTS', "mode": 'alwaysOn'}));
+
+jsTestLog("Committing the transaction");
+
+// Since this transaction is committed after the last snapshot, this commit oplog entry will be
+// replayed during rollback recovery.
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+// During rollback, the data restored via rollback to stable timestamp should not reflect the
+// transaction. If not, replaying the commit oplog entry during rollback recovery would throw
+// BSONTooLarge exception.
+rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+try {
+ rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+} finally {
+ assert.commandWorked(primary.adminCommand(
+ {configureFailPoint: 'holdStableTimestampAtSpecificTimestamp', mode: 'off'}));
+}
+
+rollbackTest.transitionToSteadyStateOperations();
+
+primary = rollbackTest.getPrimary();
+
+// Make sure that the data reflects all the operations from the transaction after recovery.
+testDB = primary.getDB(dbName);
+const res = testDB[collName].findOne({_id: 1});
+assert.eq(res, {_id: 1, "a": largeArray, "c": largeArray});
+
+// Make sure that another write on the same document from the transaction has no write conflict.
+// Also, make sure that we can run another transaction after recovery without any problems.
+session = primary.startSession();
+sessionDB = session.getDatabase(dbName);
+session.startTransaction();
+assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 1}));
+prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 1});
+
+rollbackTest.stop();
}());
diff --git a/jstests/replsets/rollback_remote_cursor_retry.js b/jstests/replsets/rollback_remote_cursor_retry.js
index 77d8fd5b58a..2e2db8c5df6 100644
--- a/jstests/replsets/rollback_remote_cursor_retry.js
+++ b/jstests/replsets/rollback_remote_cursor_retry.js
@@ -6,46 +6,45 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/rollback_test.js");
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/replsets/libs/rollback_test.js");
+load("jstests/libs/check_log.js");
- const testName = "rollback_remote_cursor_retry";
- const dbName = testName;
+const testName = "rollback_remote_cursor_retry";
+const dbName = testName;
- const rollbackTest = new RollbackTest(testName);
+const rollbackTest = new RollbackTest(testName);
- const replSet = rollbackTest.getTestFixture();
+const replSet = rollbackTest.getTestFixture();
- replSet.awaitReplication();
+replSet.awaitReplication();
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- const syncSource = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+const syncSource = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- // This failpoint is used to make sure that we have started rollback before turning on
- // 'failCommand'. Otherwise, we would be failing the 'find' command that we issue against
- // the sync source before we decide to go into rollback.
- assert.commandWorked(rollbackNode.adminCommand(
- {configureFailPoint: "rollbackHangBeforeStart", mode: "alwaysOn"}));
+// This failpoint is used to make sure that we have started rollback before turning on
+// 'failCommand'. Otherwise, we would be failing the 'find' command that we issue against
+// the sync source before we decide to go into rollback.
+assert.commandWorked(
+ rollbackNode.adminCommand({configureFailPoint: "rollbackHangBeforeStart", mode: "alwaysOn"}));
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- // Ensure that we've hit the failpoint before moving on.
- checkLog.contains(rollbackNode, "rollback - rollbackHangBeforeStart fail point enabled");
+// Ensure that we've hit the failpoint before moving on.
+checkLog.contains(rollbackNode, "rollback - rollbackHangBeforeStart fail point enabled");
- // Fail the 'find' command exactly twice.
- jsTestLog("Failing the next two 'find' commands.");
- assert.commandWorked(syncSource.adminCommand({
- configureFailPoint: "failCommand",
- data: {errorCode: 279, failInternalCommands: true, failCommands: ["find"]},
- mode: {times: 2}
- }));
+// Fail the 'find' command exactly twice.
+jsTestLog("Failing the next two 'find' commands.");
+assert.commandWorked(syncSource.adminCommand({
+ configureFailPoint: "failCommand",
+ data: {errorCode: 279, failInternalCommands: true, failCommands: ["find"]},
+ mode: {times: 2}
+}));
- // Let rollback proceed.
- assert.commandWorked(
- rollbackNode.adminCommand({configureFailPoint: "rollbackHangBeforeStart", mode: "off"}));
-
- rollbackTest.transitionToSteadyStateOperations();
- rollbackTest.stop();
+// Let rollback proceed.
+assert.commandWorked(
+ rollbackNode.adminCommand({configureFailPoint: "rollbackHangBeforeStart", mode: "off"}));
+rollbackTest.transitionToSteadyStateOperations();
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_rename_collection_on_sync_source.js b/jstests/replsets/rollback_rename_collection_on_sync_source.js
index 0f781eb6b8d..be03faa94bf 100644
--- a/jstests/replsets/rollback_rename_collection_on_sync_source.js
+++ b/jstests/replsets/rollback_rename_collection_on_sync_source.js
@@ -5,48 +5,47 @@
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_test.js");
- let dbName = "rollback_rename_collection_on_sync_source";
- let sourceCollName = "sourceColl";
- let destCollName = "destColl";
+let dbName = "rollback_rename_collection_on_sync_source";
+let sourceCollName = "sourceColl";
+let destCollName = "destColl";
- let doc1 = {x: 1};
- let doc2 = {x: 2};
+let doc1 = {x: 1};
+let doc2 = {x: 2};
- let CommonOps = (node) => {
- // Insert a document that will exist on the sync source and rollback node.
- assert.writeOK(node.getDB(dbName)[sourceCollName].insert(doc1));
- };
+let CommonOps = (node) => {
+ // Insert a document that will exist on the sync source and rollback node.
+ assert.writeOK(node.getDB(dbName)[sourceCollName].insert(doc1));
+};
- let RollbackOps = (node) => {
- // Delete the document on rollback node so it will be refetched from sync source.
- assert.writeOK(node.getDB(dbName)[sourceCollName].remove(doc1));
- };
+let RollbackOps = (node) => {
+ // Delete the document on rollback node so it will be refetched from sync source.
+ assert.writeOK(node.getDB(dbName)[sourceCollName].remove(doc1));
+};
- let SyncSourceOps = (node) => {
- // Rename the original collection on the sync source.
- assert.commandWorked(node.getDB(dbName)[sourceCollName].renameCollection(destCollName));
- assert.writeOK(node.getDB(dbName)[destCollName].insert(doc2));
- };
+let SyncSourceOps = (node) => {
+ // Rename the original collection on the sync source.
+ assert.commandWorked(node.getDB(dbName)[sourceCollName].renameCollection(destCollName));
+ assert.writeOK(node.getDB(dbName)[destCollName].insert(doc2));
+};
- // Set up Rollback Test.
- let rollbackTest = new RollbackTest();
- CommonOps(rollbackTest.getPrimary());
+// Set up Rollback Test.
+let rollbackTest = new RollbackTest();
+CommonOps(rollbackTest.getPrimary());
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
- let syncSourceNode = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- SyncSourceOps(syncSourceNode);
+let syncSourceNode = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+SyncSourceOps(syncSourceNode);
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
-
- // Check the replica set.
- rollbackTest.stop();
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+// Check the replica set.
+rollbackTest.stop();
}()); \ No newline at end of file
diff --git a/jstests/replsets/rollback_rename_count.js b/jstests/replsets/rollback_rename_count.js
index f1376fe7dc5..51fa88f5324 100644
--- a/jstests/replsets/rollback_rename_count.js
+++ b/jstests/replsets/rollback_rename_count.js
@@ -2,58 +2,58 @@
* Tests that rollback corrects fastcounts even when collections are renamed.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_test.js");
- const testName = "rollback_rename_count";
- const dbName = testName;
+const testName = "rollback_rename_count";
+const dbName = testName;
- const rollbackTest = new RollbackTest(testName);
- const primary = rollbackTest.getPrimary();
- const testDb = primary.getDB(dbName);
+const rollbackTest = new RollbackTest(testName);
+const primary = rollbackTest.getPrimary();
+const testDb = primary.getDB(dbName);
- // This collection is non-empty at the stable timestamp.
- const fromCollName1 = "fromCollName1";
- const toCollName1 = "toCollName1";
- let coll1 = testDb.getCollection(fromCollName1);
- assert.commandWorked(coll1.insert({a: 1}));
+// This collection is non-empty at the stable timestamp.
+const fromCollName1 = "fromCollName1";
+const toCollName1 = "toCollName1";
+let coll1 = testDb.getCollection(fromCollName1);
+assert.commandWorked(coll1.insert({a: 1}));
- rollbackTest.awaitLastOpCommitted();
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
+rollbackTest.awaitLastOpCommitted();
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
- assert.commandWorked(coll1.renameCollection(toCollName1));
- coll1 = testDb.getCollection(toCollName1);
- assert.commandWorked(coll1.insert({b: 1}));
+assert.commandWorked(coll1.renameCollection(toCollName1));
+coll1 = testDb.getCollection(toCollName1);
+assert.commandWorked(coll1.insert({b: 1}));
- // This collection is empty at the stable timestamp.
- const fromCollName2 = "fromCollName2";
- const toCollName2 = "toCollName2";
- let coll2 = testDb.getCollection(fromCollName2);
- assert.commandWorked(coll2.insert({c: 1}));
- assert.commandWorked(coll2.renameCollection(toCollName2));
- coll2 = testDb.getCollection(toCollName2);
- assert.commandWorked(coll2.insert({d: 1}));
+// This collection is empty at the stable timestamp.
+const fromCollName2 = "fromCollName2";
+const toCollName2 = "toCollName2";
+let coll2 = testDb.getCollection(fromCollName2);
+assert.commandWorked(coll2.insert({c: 1}));
+assert.commandWorked(coll2.renameCollection(toCollName2));
+coll2 = testDb.getCollection(toCollName2);
+assert.commandWorked(coll2.insert({d: 1}));
- rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToRollbackOperations();
- assert.commandWorked(coll1.insert({e: 1}));
+assert.commandWorked(coll1.insert({e: 1}));
- assert.eq(coll1.find().itcount(), 3);
- assert.eq(coll2.find().itcount(), 2);
+assert.eq(coll1.find().itcount(), 3);
+assert.eq(coll2.find().itcount(), 2);
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- try {
- rollbackTest.transitionToSteadyStateOperations();
- } finally {
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
- }
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+try {
+ rollbackTest.transitionToSteadyStateOperations();
+} finally {
+ assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
+}
- assert.eq(coll1.find().itcount(), 2);
- assert.eq(coll2.find().itcount(), 2);
+assert.eq(coll1.find().itcount(), 2);
+assert.eq(coll2.find().itcount(), 2);
- rollbackTest.stop();
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_time_limit_param.js b/jstests/replsets/rollback_time_limit_param.js
index 879876dbfcf..345e38f5e89 100644
--- a/jstests/replsets/rollback_time_limit_param.js
+++ b/jstests/replsets/rollback_time_limit_param.js
@@ -7,48 +7,47 @@
(function() {
- "use strict";
+"use strict";
- const testName = "rollback_time_limit_param";
+const testName = "rollback_time_limit_param";
- // Make sure that we reject non-positive values for this parameter set on startup.
- let rstWithBadStartupOptions = new ReplSetTest(
- {name: testName, nodes: 1, nodeOptions: {setParameter: "rollbackTimeLimitSecs=-50"}});
+// Make sure that we reject non-positive values for this parameter set on startup.
+let rstWithBadStartupOptions = new ReplSetTest(
+ {name: testName, nodes: 1, nodeOptions: {setParameter: "rollbackTimeLimitSecs=-50"}});
- assert.throws(function() {
- rstWithBadStartupOptions.startSet();
+assert.throws(function() {
+ rstWithBadStartupOptions.startSet();
+});
- });
+assert(rawMongoProgramOutput().match("Bad value for parameter \"rollbackTimeLimitSecs\""),
+ "failed to reject bad value for parameter");
- assert(rawMongoProgramOutput().match("Bad value for parameter \"rollbackTimeLimitSecs\""),
- "failed to reject bad value for parameter");
+// Now initialize the same parameter correctly on startup.
+let rst = new ReplSetTest(
+ {name: testName, nodes: 1, nodeOptions: {setParameter: "rollbackTimeLimitSecs=1000"}});
+rst.startSet();
+rst.initiate();
- // Now initialize the same parameter correctly on startup.
- let rst = new ReplSetTest(
- {name: testName, nodes: 1, nodeOptions: {setParameter: "rollbackTimeLimitSecs=1000"}});
- rst.startSet();
- rst.initiate();
+let primary = rst.getPrimary();
- let primary = rst.getPrimary();
+// Check that the value of 'rollbackTimeLimitSecs' was initialized correctly on startup.
+let valueSetOnStartup =
+ assert.commandWorked(primary.adminCommand({getParameter: 1, rollbackTimeLimitSecs: 1}))
+ .rollbackTimeLimitSecs;
+assert.eq(NumberLong(1000), valueSetOnStartup);
- // Check that the value of 'rollbackTimeLimitSecs' was initialized correctly on startup.
- let valueSetOnStartup =
- assert.commandWorked(primary.adminCommand({getParameter: 1, rollbackTimeLimitSecs: 1}))
- .rollbackTimeLimitSecs;
- assert.eq(NumberLong(1000), valueSetOnStartup);
+// Check that the value of 'rollbackTimeLimitSecs' was set correctly at runtime.
+assert.commandWorked(primary.adminCommand({setParameter: 1, rollbackTimeLimitSecs: 2000}));
+let valueSetAtRuntime =
+ assert.commandWorked(primary.adminCommand({getParameter: 1, rollbackTimeLimitSecs: 1}))
+ .rollbackTimeLimitSecs;
+assert.eq(NumberLong(2000), valueSetAtRuntime);
- // Check that the value of 'rollbackTimeLimitSecs' was set correctly at runtime.
- assert.commandWorked(primary.adminCommand({setParameter: 1, rollbackTimeLimitSecs: 2000}));
- let valueSetAtRuntime =
- assert.commandWorked(primary.adminCommand({getParameter: 1, rollbackTimeLimitSecs: 1}))
- .rollbackTimeLimitSecs;
- assert.eq(NumberLong(2000), valueSetAtRuntime);
+// Make sure that we reject non-positive values for this parameter set at runtime.
+assert.commandFailedWithCode(primary.adminCommand({setParameter: 1, rollbackTimeLimitSecs: -5}),
+ ErrorCodes.BadValue);
+assert.commandFailedWithCode(primary.adminCommand({setParameter: 1, rollbackTimeLimitSecs: 0}),
+ ErrorCodes.BadValue);
- // Make sure that we reject non-positive values for this parameter set at runtime.
- assert.commandFailedWithCode(primary.adminCommand({setParameter: 1, rollbackTimeLimitSecs: -5}),
- ErrorCodes.BadValue);
- assert.commandFailedWithCode(primary.adminCommand({setParameter: 1, rollbackTimeLimitSecs: 0}),
- ErrorCodes.BadValue);
-
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/rollback_transaction_table.js b/jstests/replsets/rollback_transaction_table.js
index e44894a5b4c..3c1a18b436e 100644
--- a/jstests/replsets/rollback_transaction_table.js
+++ b/jstests/replsets/rollback_transaction_table.js
@@ -17,218 +17,216 @@
* - A record for the third session id was created during oplog replay.
*/
(function() {
- "use strict";
-
- // This test drops a collection in the config database, which is not allowed under a session. It
- // also manually simulates a session, which is not compatible with implicit sessions.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/retryable_writes_util.js");
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- load("jstests/replsets/rslib.js");
-
- function assertSameRecordOnBothConnections(primary, secondary, lsid) {
- let primaryRecord = primary.getDB("config").transactions.findOne({"_id.id": lsid.id});
- let secondaryRecord = secondary.getDB("config").transactions.findOne({"_id.id": lsid.id});
-
- jsTestLog("Primary record: " + tojson(primaryRecord));
- jsTestLog("Secondary record: " + tojson(secondaryRecord));
-
- assert.eq(bsonWoCompare(primaryRecord, secondaryRecord),
- 0,
- "expected transaction records: " + tojson(primaryRecord) + " and " +
- tojson(secondaryRecord) + " to be the same for lsid: " + tojson(lsid));
- }
-
- function assertRecordHasTxnNumber(conn, lsid, txnNum) {
- let recordTxnNum = conn.getDB("config").transactions.findOne({"_id.id": lsid.id}).txnNum;
- assert.eq(recordTxnNum,
- txnNum,
- "expected node: " + conn + " to have txnNumber: " + txnNum + " for session id: " +
- lsid + " - instead found: " + recordTxnNum);
- }
-
- let testName = "rollback_transaction_table";
- let dbName = "test";
-
- let replTest = new ReplSetTest({
- name: testName,
- nodes: [
- // Primary flops between nodes 0 and 1.
- {},
- {},
- // Arbiter to sway elections.
- {rsConfig: {arbiterOnly: true}}
- ],
- useBridge: true,
- });
- let nodes = replTest.startSet();
- replTest.initiate();
-
- let downstream = nodes[0];
- let upstream = nodes[1];
- let arbiter = nodes[2];
-
- jsTestLog("Making sure 'downstream node' is the primary node.");
- assert.eq(downstream, replTest.getPrimary());
-
- // Renaming or dropping the transactions collection shouldn't crash if command is not rolled
- // back.
- assert.commandWorked(downstream.getDB("config").transactions.renameCollection("foo"));
- assert.commandWorked(downstream.getDB("config").foo.renameCollection("transactions"));
- assert(downstream.getDB("config").transactions.drop());
- assert.commandWorked(downstream.getDB("config").createCollection("transactions"));
-
- jsTestLog("Running a transaction on the 'downstream node' and waiting for it to replicate.");
- let firstLsid = {id: UUID()};
- let firstCmd = {
- insert: "foo",
- documents: [{_id: 10}, {_id: 30}],
- ordered: false,
- lsid: firstLsid,
- txnNumber: NumberLong(5)
- };
-
- assert.commandWorked(downstream.getDB(dbName).runCommand(firstCmd));
- replTest.awaitReplication();
-
- // Both data bearing nodes should have the same record for the first session id.
- assertSameRecordOnBothConnections(downstream, upstream, firstLsid);
-
- assert.eq(downstream.getDB("config").transactions.find().itcount(), 1);
- assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(5));
-
- assert.eq(upstream.getDB("config").transactions.find().itcount(), 1);
- assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
-
- jsTestLog(
- "Creating a partition between 'the downstream and arbiter node' and 'the upstream node.'");
- downstream.disconnect(upstream);
- arbiter.disconnect(upstream);
-
- jsTestLog(
- "Running a higher transaction for the existing session on only the 'downstream node.'");
- let higherTxnFirstCmd = {
- insert: "foo",
- documents: [{_id: 50}],
- ordered: false,
- lsid: firstLsid,
- txnNumber: NumberLong(20)
- };
-
- assert.commandWorked(downstream.getDB(dbName).runCommand(higherTxnFirstCmd));
-
- // Now the data bearing nodes should have different transaction table records for the first
- // session id.
- assert.eq(downstream.getDB("config").transactions.find().itcount(), 1);
- assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(20));
-
- assert.eq(upstream.getDB("config").transactions.find().itcount(), 1);
- assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
-
- jsTestLog("Running a transaction for a second session on the 'downstream node.'");
- let secondLsid = {id: UUID()};
- let secondCmd = {
- insert: "foo",
- documents: [{_id: 100}, {_id: 200}],
- ordered: false,
- lsid: secondLsid,
- txnNumber: NumberLong(100)
- };
-
- assert.commandWorked(downstream.getDB(dbName).runCommand(secondCmd));
-
- // Only the downstream node should have two transaction table records, one for the first and
- // second session ids.
- assert.eq(downstream.getDB("config").transactions.find().itcount(), 2);
- assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(20));
- assertRecordHasTxnNumber(downstream, secondLsid, NumberLong(100));
-
- assert.eq(upstream.getDB("config").transactions.find().itcount(), 1);
- assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
-
- // We do not disconnect the downstream node from the arbiter node at the same time as we
- // disconnect it from the upstream node. This prevents a race where the transaction using the
- // second session id must finish before the downstream node steps down from being the primary.
- jsTestLog(
- "Disconnecting the 'downstream node' from the 'arbiter node' and reconnecting the 'upstream node' to the 'arbiter node.'");
- downstream.disconnect(arbiter);
- upstream.reconnect(arbiter);
-
- jsTestLog("Waiting for the 'upstream node' to become the new primary.");
- waitForState(downstream, ReplSetTest.State.SECONDARY);
- waitForState(upstream, ReplSetTest.State.PRIMARY);
- assert.eq(upstream, replTest.getPrimary());
-
- jsTestLog("Running a new transaction for a third session on the 'upstream node.'");
- let thirdLsid = {id: UUID()};
- let thirdCmd = {
- insert: "foo",
- documents: [{_id: 1000}, {_id: 2000}],
- ordered: false,
- lsid: thirdLsid,
- txnNumber: NumberLong(1)
- };
-
- assert.commandWorked(upstream.getDB(dbName).runCommand(thirdCmd));
-
- // Now the upstream node also has two transaction table records, but for the first and third
- // session ids, not the first and second.
- assert.eq(downstream.getDB("config").transactions.find().itcount(), 2);
- assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(20));
- assertRecordHasTxnNumber(downstream, secondLsid, NumberLong(100));
-
- assert.eq(upstream.getDB("config").transactions.find().itcount(), 2);
- assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
- assertRecordHasTxnNumber(upstream, thirdLsid, NumberLong(1));
-
- // Gets the rollback ID of the downstream node before rollback occurs.
- let downstreamRBIDBefore = assert.commandWorked(downstream.adminCommand('replSetGetRBID')).rbid;
-
- jsTestLog("Reconnecting the 'downstream node.'");
- downstream.reconnect(upstream);
- downstream.reconnect(arbiter);
-
- jsTestLog("Waiting for the 'downstream node' to complete rollback.");
- replTest.awaitReplication();
- replTest.awaitSecondaryNodes();
-
- // Ensure that connection to the downstream node is re-established, since the connection should
- // have gotten killed during the downstream node's transition to ROLLBACK state.
- reconnect(downstream);
-
- jsTestLog(
- "Checking the rollback ID of the downstream node to confirm that a rollback occurred.");
- assert.neq(downstreamRBIDBefore,
- assert.commandWorked(downstream.adminCommand('replSetGetRBID')).rbid);
-
- // Verify the record for the first lsid rolled back to its original value, the record for the
- // second lsid was removed, and the record for the third lsid was created during oplog replay.
- jsTestLog("Verifying the transaction collection rolled back properly.");
-
- assertSameRecordOnBothConnections(downstream, upstream, firstLsid);
- assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(5));
- assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
-
- assert.isnull(downstream.getDB("config").transactions.findOne({"_id.id": secondLsid.id}));
- assert.isnull(upstream.getDB("config").transactions.findOne({"_id.id": secondLsid.id}));
-
- assertSameRecordOnBothConnections(downstream, upstream, thirdLsid);
- assertRecordHasTxnNumber(downstream, thirdLsid, NumberLong(1));
- assertRecordHasTxnNumber(upstream, thirdLsid, NumberLong(1));
-
- assert.eq(downstream.getDB("config").transactions.find().itcount(), 2);
- assert.eq(upstream.getDB("config").transactions.find().itcount(), 2);
-
- // Confirm the nodes are consistent.
- replTest.checkOplogs();
- replTest.checkReplicatedDataHashes(testName);
- replTest.checkCollectionCounts();
-
- replTest.stopSet();
+"use strict";
+
+// This test drops a collection in the config database, which is not allowed under a session. It
+// also manually simulates a session, which is not compatible with implicit sessions.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/retryable_writes_util.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+load("jstests/replsets/rslib.js");
+
+function assertSameRecordOnBothConnections(primary, secondary, lsid) {
+ let primaryRecord = primary.getDB("config").transactions.findOne({"_id.id": lsid.id});
+ let secondaryRecord = secondary.getDB("config").transactions.findOne({"_id.id": lsid.id});
+
+ jsTestLog("Primary record: " + tojson(primaryRecord));
+ jsTestLog("Secondary record: " + tojson(secondaryRecord));
+
+ assert.eq(bsonWoCompare(primaryRecord, secondaryRecord),
+ 0,
+ "expected transaction records: " + tojson(primaryRecord) + " and " +
+ tojson(secondaryRecord) + " to be the same for lsid: " + tojson(lsid));
+}
+
+function assertRecordHasTxnNumber(conn, lsid, txnNum) {
+ let recordTxnNum = conn.getDB("config").transactions.findOne({"_id.id": lsid.id}).txnNum;
+ assert.eq(recordTxnNum,
+ txnNum,
+ "expected node: " + conn + " to have txnNumber: " + txnNum +
+ " for session id: " + lsid + " - instead found: " + recordTxnNum);
+}
+
+let testName = "rollback_transaction_table";
+let dbName = "test";
+
+let replTest = new ReplSetTest({
+ name: testName,
+ nodes: [
+ // Primary flops between nodes 0 and 1.
+ {},
+ {},
+ // Arbiter to sway elections.
+ {rsConfig: {arbiterOnly: true}}
+ ],
+ useBridge: true,
+});
+let nodes = replTest.startSet();
+replTest.initiate();
+
+let downstream = nodes[0];
+let upstream = nodes[1];
+let arbiter = nodes[2];
+
+jsTestLog("Making sure 'downstream node' is the primary node.");
+assert.eq(downstream, replTest.getPrimary());
+
+// Renaming or dropping the transactions collection shouldn't crash if command is not rolled
+// back.
+assert.commandWorked(downstream.getDB("config").transactions.renameCollection("foo"));
+assert.commandWorked(downstream.getDB("config").foo.renameCollection("transactions"));
+assert(downstream.getDB("config").transactions.drop());
+assert.commandWorked(downstream.getDB("config").createCollection("transactions"));
+
+jsTestLog("Running a transaction on the 'downstream node' and waiting for it to replicate.");
+let firstLsid = {id: UUID()};
+let firstCmd = {
+ insert: "foo",
+ documents: [{_id: 10}, {_id: 30}],
+ ordered: false,
+ lsid: firstLsid,
+ txnNumber: NumberLong(5)
+};
+
+assert.commandWorked(downstream.getDB(dbName).runCommand(firstCmd));
+replTest.awaitReplication();
+
+// Both data bearing nodes should have the same record for the first session id.
+assertSameRecordOnBothConnections(downstream, upstream, firstLsid);
+
+assert.eq(downstream.getDB("config").transactions.find().itcount(), 1);
+assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(5));
+
+assert.eq(upstream.getDB("config").transactions.find().itcount(), 1);
+assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
+
+jsTestLog(
+ "Creating a partition between 'the downstream and arbiter node' and 'the upstream node.'");
+downstream.disconnect(upstream);
+arbiter.disconnect(upstream);
+
+jsTestLog("Running a higher transaction for the existing session on only the 'downstream node.'");
+let higherTxnFirstCmd = {
+ insert: "foo",
+ documents: [{_id: 50}],
+ ordered: false,
+ lsid: firstLsid,
+ txnNumber: NumberLong(20)
+};
+
+assert.commandWorked(downstream.getDB(dbName).runCommand(higherTxnFirstCmd));
+
+// Now the data bearing nodes should have different transaction table records for the first
+// session id.
+assert.eq(downstream.getDB("config").transactions.find().itcount(), 1);
+assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(20));
+
+assert.eq(upstream.getDB("config").transactions.find().itcount(), 1);
+assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
+
+jsTestLog("Running a transaction for a second session on the 'downstream node.'");
+let secondLsid = {id: UUID()};
+let secondCmd = {
+ insert: "foo",
+ documents: [{_id: 100}, {_id: 200}],
+ ordered: false,
+ lsid: secondLsid,
+ txnNumber: NumberLong(100)
+};
+
+assert.commandWorked(downstream.getDB(dbName).runCommand(secondCmd));
+
+// Only the downstream node should have two transaction table records, one for the first and
+// second session ids.
+assert.eq(downstream.getDB("config").transactions.find().itcount(), 2);
+assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(20));
+assertRecordHasTxnNumber(downstream, secondLsid, NumberLong(100));
+
+assert.eq(upstream.getDB("config").transactions.find().itcount(), 1);
+assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
+
+// We do not disconnect the downstream node from the arbiter node at the same time as we
+// disconnect it from the upstream node. This prevents a race where the transaction using the
+// second session id must finish before the downstream node steps down from being the primary.
+jsTestLog(
+ "Disconnecting the 'downstream node' from the 'arbiter node' and reconnecting the 'upstream node' to the 'arbiter node.'");
+downstream.disconnect(arbiter);
+upstream.reconnect(arbiter);
+
+jsTestLog("Waiting for the 'upstream node' to become the new primary.");
+waitForState(downstream, ReplSetTest.State.SECONDARY);
+waitForState(upstream, ReplSetTest.State.PRIMARY);
+assert.eq(upstream, replTest.getPrimary());
+
+jsTestLog("Running a new transaction for a third session on the 'upstream node.'");
+let thirdLsid = {id: UUID()};
+let thirdCmd = {
+ insert: "foo",
+ documents: [{_id: 1000}, {_id: 2000}],
+ ordered: false,
+ lsid: thirdLsid,
+ txnNumber: NumberLong(1)
+};
+
+assert.commandWorked(upstream.getDB(dbName).runCommand(thirdCmd));
+
+// Now the upstream node also has two transaction table records, but for the first and third
+// session ids, not the first and second.
+assert.eq(downstream.getDB("config").transactions.find().itcount(), 2);
+assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(20));
+assertRecordHasTxnNumber(downstream, secondLsid, NumberLong(100));
+
+assert.eq(upstream.getDB("config").transactions.find().itcount(), 2);
+assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
+assertRecordHasTxnNumber(upstream, thirdLsid, NumberLong(1));
+
+// Gets the rollback ID of the downstream node before rollback occurs.
+let downstreamRBIDBefore = assert.commandWorked(downstream.adminCommand('replSetGetRBID')).rbid;
+
+jsTestLog("Reconnecting the 'downstream node.'");
+downstream.reconnect(upstream);
+downstream.reconnect(arbiter);
+
+jsTestLog("Waiting for the 'downstream node' to complete rollback.");
+replTest.awaitReplication();
+replTest.awaitSecondaryNodes();
+
+// Ensure that connection to the downstream node is re-established, since the connection should
+// have gotten killed during the downstream node's transition to ROLLBACK state.
+reconnect(downstream);
+
+jsTestLog("Checking the rollback ID of the downstream node to confirm that a rollback occurred.");
+assert.neq(downstreamRBIDBefore,
+ assert.commandWorked(downstream.adminCommand('replSetGetRBID')).rbid);
+
+// Verify the record for the first lsid rolled back to its original value, the record for the
+// second lsid was removed, and the record for the third lsid was created during oplog replay.
+jsTestLog("Verifying the transaction collection rolled back properly.");
+
+assertSameRecordOnBothConnections(downstream, upstream, firstLsid);
+assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(5));
+assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
+
+assert.isnull(downstream.getDB("config").transactions.findOne({"_id.id": secondLsid.id}));
+assert.isnull(upstream.getDB("config").transactions.findOne({"_id.id": secondLsid.id}));
+
+assertSameRecordOnBothConnections(downstream, upstream, thirdLsid);
+assertRecordHasTxnNumber(downstream, thirdLsid, NumberLong(1));
+assertRecordHasTxnNumber(upstream, thirdLsid, NumberLong(1));
+
+assert.eq(downstream.getDB("config").transactions.find().itcount(), 2);
+assert.eq(upstream.getDB("config").transactions.find().itcount(), 2);
+
+// Confirm the nodes are consistent.
+replTest.checkOplogs();
+replTest.checkReplicatedDataHashes(testName);
+replTest.checkCollectionCounts();
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/rollback_transactions_count.js b/jstests/replsets/rollback_transactions_count.js
index f965211284a..1aa7ceeef1c 100644
--- a/jstests/replsets/rollback_transactions_count.js
+++ b/jstests/replsets/rollback_transactions_count.js
@@ -5,62 +5,62 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_test.js");
- const testName = "rollback_transactions_count";
- const dbName = testName;
- const collName = "txnCollName";
+const testName = "rollback_transactions_count";
+const dbName = testName;
+const collName = "txnCollName";
- const rollbackTest = new RollbackTest(testName);
- const primary = rollbackTest.getPrimary();
+const rollbackTest = new RollbackTest(testName);
+const primary = rollbackTest.getPrimary();
- const session1 = primary.startSession();
- const sessionDb1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDb1[collName];
- assert.commandWorked(sessionColl1.insert({a: 1}));
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({b: 1}));
- assert.commandWorked(session1.commitTransaction_forTesting());
+const session1 = primary.startSession();
+const sessionDb1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDb1[collName];
+assert.commandWorked(sessionColl1.insert({a: 1}));
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({b: 1}));
+assert.commandWorked(session1.commitTransaction_forTesting());
- rollbackTest.awaitLastOpCommitted();
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
+rollbackTest.awaitLastOpCommitted();
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
- const session2 = primary.startSession();
- const sessionDb2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDb2[collName];
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({c: 1}));
- assert.commandWorked(session2.commitTransaction_forTesting());
+const session2 = primary.startSession();
+const sessionDb2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDb2[collName];
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({c: 1}));
+assert.commandWorked(session2.commitTransaction_forTesting());
- rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToRollbackOperations();
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({d: 1}));
- assert.commandWorked(session2.commitTransaction_forTesting());
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({d: 1}));
+assert.commandWorked(session2.commitTransaction_forTesting());
- const session3 = primary.startSession();
- const sessionDb3 = session3.getDatabase(dbName);
- const sessionColl3 = sessionDb3[collName];
- session3.startTransaction();
- assert.commandWorked(sessionColl3.insert({e: 1}));
- assert.commandWorked(session3.commitTransaction_forTesting());
+const session3 = primary.startSession();
+const sessionDb3 = session3.getDatabase(dbName);
+const sessionColl3 = sessionDb3[collName];
+session3.startTransaction();
+assert.commandWorked(sessionColl3.insert({e: 1}));
+assert.commandWorked(session3.commitTransaction_forTesting());
- assert.eq(sessionColl1.find().itcount(), 5);
+assert.eq(sessionColl1.find().itcount(), 5);
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- try {
- rollbackTest.transitionToSteadyStateOperations();
- } finally {
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
- }
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+try {
+ rollbackTest.transitionToSteadyStateOperations();
+} finally {
+ assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
+}
- assert.eq(sessionColl1.find().itcount(), 3);
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
+assert.eq(sessionColl1.find().itcount(), 3);
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
- rollbackTest.stop();
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_unprepared_transactions.js b/jstests/replsets/rollback_unprepared_transactions.js
index fdd286399d6..b2bfaa76405 100644
--- a/jstests/replsets/rollback_unprepared_transactions.js
+++ b/jstests/replsets/rollback_unprepared_transactions.js
@@ -3,58 +3,58 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- 'use strict';
-
- load('jstests/libs/check_log.js');
- load('jstests/replsets/libs/rollback_test.js');
- load('jstests/replsets/libs/rollback_files.js');
-
- // Operations that will be present on both nodes, before the common point.
- const dbName = 'test';
- const collName = 'test.t';
- const collNameShort = 't';
- let CommonOps = (node) => {
- const coll = node.getCollection(collName);
- const mydb = coll.getDB();
- assert.commandWorked(coll.insert({_id: 0}));
- };
-
- // Operations that will be performed on the rollback node past the common point.
- let RollbackOps = (node) => {
- const session = node.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collNameShort);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "a"}));
- assert.commandWorked(sessionColl.insert({_id: "b"}));
- assert.commandWorked(sessionColl.insert({_id: "c"}));
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
- };
-
- // Set up Rollback Test.
- const rollbackTest = new RollbackTest();
-
- CommonOps(rollbackTest.getPrimary());
-
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
-
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
-
- // Check collection count.
- const primary = rollbackTest.getPrimary();
- const coll = primary.getCollection(collName);
- assert.eq(1, coll.find().itcount());
- assert.eq(1, coll.count());
-
- // Confirm that the rollback wrote deleted documents to a file.
- const replTest = rollbackTest.getTestFixture();
- const expectedDocs = [{_id: "a"}, {_id: "b"}, {_id: "c"}];
- checkRollbackFiles(replTest.getDbPath(rollbackNode), collName, expectedDocs);
-
- rollbackTest.stop();
+'use strict';
+
+load('jstests/libs/check_log.js');
+load('jstests/replsets/libs/rollback_test.js');
+load('jstests/replsets/libs/rollback_files.js');
+
+// Operations that will be present on both nodes, before the common point.
+const dbName = 'test';
+const collName = 'test.t';
+const collNameShort = 't';
+let CommonOps = (node) => {
+ const coll = node.getCollection(collName);
+ const mydb = coll.getDB();
+ assert.commandWorked(coll.insert({_id: 0}));
+};
+
+// Operations that will be performed on the rollback node past the common point.
+let RollbackOps = (node) => {
+ const session = node.startSession();
+ const sessionDB = session.getDatabase(dbName);
+ const sessionColl = sessionDB.getCollection(collNameShort);
+ session.startTransaction();
+ assert.commandWorked(sessionColl.insert({_id: "a"}));
+ assert.commandWorked(sessionColl.insert({_id: "b"}));
+ assert.commandWorked(sessionColl.insert({_id: "c"}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+};
+
+// Set up Rollback Test.
+const rollbackTest = new RollbackTest();
+
+CommonOps(rollbackTest.getPrimary());
+
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
+
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+
+// Check collection count.
+const primary = rollbackTest.getPrimary();
+const coll = primary.getCollection(collName);
+assert.eq(1, coll.find().itcount());
+assert.eq(1, coll.count());
+
+// Confirm that the rollback wrote deleted documents to a file.
+const replTest = rollbackTest.getTestFixture();
+const expectedDocs = [{_id: "a"}, {_id: "b"}, {_id: "c"}];
+checkRollbackFiles(replTest.getDbPath(rollbackNode), collName, expectedDocs);
+
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_via_refetch_commit_transaction.js b/jstests/replsets/rollback_via_refetch_commit_transaction.js
index 380bcdb4fd2..317fc7b97f8 100644
--- a/jstests/replsets/rollback_via_refetch_commit_transaction.js
+++ b/jstests/replsets/rollback_via_refetch_commit_transaction.js
@@ -10,71 +10,73 @@
TestData.skipCheckDBHashes = true;
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
- const dbName = "test";
- const collName = "rollback_via_refetch_commit_transaction";
+const dbName = "test";
+const collName = "rollback_via_refetch_commit_transaction";
- // Provide RollbackTest with custom ReplSetTest so we can set forceRollbackViaRefetch.
- const rst = new ReplSetTest({
- name: collName,
- nodes: 3,
- useBridge: true,
- nodeOptions: {setParameter: "forceRollbackViaRefetch=true"}
- });
+// Provide RollbackTest with custom ReplSetTest so we can set forceRollbackViaRefetch.
+const rst = new ReplSetTest({
+ name: collName,
+ nodes: 3,
+ useBridge: true,
+ nodeOptions: {setParameter: "forceRollbackViaRefetch=true"}
+});
- rst.startSet();
- const config = rst.getReplSetConfig();
- config.members[2].priority = 0;
- config.settings = {chainingAllowed: false};
- rst.initiate(config);
+rst.startSet();
+const config = rst.getReplSetConfig();
+config.members[2].priority = 0;
+config.settings = {
+ chainingAllowed: false
+};
+rst.initiate(config);
- const primaryNode = rst.getPrimary();
+const primaryNode = rst.getPrimary();
- // Create collection that exists on the sync source and rollback node.
- assert.commandWorked(
- primaryNode.getDB(dbName).runCommand({create: collName, writeConcern: {w: 2}}));
+// Create collection that exists on the sync source and rollback node.
+assert.commandWorked(
+ primaryNode.getDB(dbName).runCommand({create: collName, writeConcern: {w: 2}}));
- // Issue a 'prepareTransaction' command just to the current primary.
- const session = primaryNode.getDB(dbName).getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({"prepare": "entry"}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+// Issue a 'prepareTransaction' command just to the current primary.
+const session = primaryNode.getDB(dbName).getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({"prepare": "entry"}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- const rollbackTest = new RollbackTest(collName, rst);
- // Stop replication from the current primary ("rollbackNode").
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
+const rollbackTest = new RollbackTest(collName, rst);
+// Stop replication from the current primary ("rollbackNode").
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
+PrepareHelpers.commitTransaction(session, prepareTimestamp);
- // Step down current primary and elect a node that lacks the commit.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+// Step down current primary and elect a node that lacks the commit.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- // Verify the old primary crashes trying to roll back.
- clearRawMongoProgramOutput();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- jsTestLog("Waiting for crash");
- assert.soon(function() {
- try {
- rollbackNode.getDB("local").runCommand({ping: 1});
- } catch (e) {
- return true;
- }
- return false;
- }, "Node did not fassert", ReplSetTest.kDefaultTimeoutMS);
+// Verify the old primary crashes trying to roll back.
+clearRawMongoProgramOutput();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+jsTestLog("Waiting for crash");
+assert.soon(function() {
+ try {
+ rollbackNode.getDB("local").runCommand({ping: 1});
+ } catch (e) {
+ return true;
+ }
+ return false;
+}, "Node did not fassert", ReplSetTest.kDefaultTimeoutMS);
- // Let the ReplSetTest know the old primary is down.
- rst.stop(rst.getNodeId(rollbackNode), undefined, {allowedExitCode: MongoRunner.EXIT_ABRUPT});
+// Let the ReplSetTest know the old primary is down.
+rst.stop(rst.getNodeId(rollbackNode), undefined, {allowedExitCode: MongoRunner.EXIT_ABRUPT});
- const msg = RegExp("Can't roll back this command yet: ");
- assert.soon(function() {
- return rawMongoProgramOutput().match(msg);
- }, "Node did not fail to roll back entry.");
+const msg = RegExp("Can't roll back this command yet: ");
+assert.soon(function() {
+ return rawMongoProgramOutput().match(msg);
+}, "Node did not fail to roll back entry.");
- // Transaction is still in prepared state and validation will be blocked, so skip it.
- rst.stopSet(undefined, undefined, {skipValidation: true});
+// Transaction is still in prepared state and validation will be blocked, so skip it.
+rst.stopSet(undefined, undefined, {skipValidation: true});
}());
diff --git a/jstests/replsets/rollback_via_refetch_survives_nonexistent_collection_drop.js b/jstests/replsets/rollback_via_refetch_survives_nonexistent_collection_drop.js
index f4c4b7575ad..7cf47857d2a 100644
--- a/jstests/replsets/rollback_via_refetch_survives_nonexistent_collection_drop.js
+++ b/jstests/replsets/rollback_via_refetch_survives_nonexistent_collection_drop.js
@@ -10,65 +10,63 @@
*/
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
- load("jstests/replsets/libs/rollback_test.js");
+"use strict";
+load("jstests/libs/check_log.js");
+load("jstests/replsets/libs/rollback_test.js");
- const dbName = "test";
- const collName = "rollback_via_refetch_survives_nonexistent_collection_drop";
+const dbName = "test";
+const collName = "rollback_via_refetch_survives_nonexistent_collection_drop";
- // Provide RollbackTest with custom ReplSetTest so we can set enableMajorityReadConcern.
- const rst = new ReplSetTest({
- name: collName,
- nodes: 3,
- useBridge: true,
- nodeOptions: {enableMajorityReadConcern: "false"}
- });
+// Provide RollbackTest with custom ReplSetTest so we can set enableMajorityReadConcern.
+const rst = new ReplSetTest(
+ {name: collName, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "false"}});
- rst.startSet();
- const config = rst.getReplSetConfig();
- config.members[2].priority = 0;
- config.settings = {chainingAllowed: false};
- rst.initiate(config);
+rst.startSet();
+const config = rst.getReplSetConfig();
+config.members[2].priority = 0;
+config.settings = {
+ chainingAllowed: false
+};
+rst.initiate(config);
- const rollbackTest = new RollbackTest(collName, rst);
+const rollbackTest = new RollbackTest(collName, rst);
- // Stop replication from the current primary, the rollback node.
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- const rollbackDB = rollbackNode.getDB(dbName);
+// Stop replication from the current primary, the rollback node.
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+const rollbackDB = rollbackNode.getDB(dbName);
- jsTestLog("Turning on the rollbackExitEarlyAfterCollectionDrop fail point");
- assert.commandWorked(rollbackDB.adminCommand(
- {configureFailPoint: 'rollbackExitEarlyAfterCollectionDrop', mode: 'alwaysOn'}));
+jsTestLog("Turning on the rollbackExitEarlyAfterCollectionDrop fail point");
+assert.commandWorked(rollbackDB.adminCommand(
+ {configureFailPoint: 'rollbackExitEarlyAfterCollectionDrop', mode: 'alwaysOn'}));
- // Create a collection on the rollback node.
- assert.commandWorked(rollbackDB.runCommand({create: collName}));
+// Create a collection on the rollback node.
+assert.commandWorked(rollbackDB.runCommand({create: collName}));
- // Step down the current primary and elect the node that does not have the collection.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+// Step down the current primary and elect the node that does not have the collection.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- jsTestLog("Attempting to roll back.");
- // Make the old primary rollback against the new primary. This attempt should fail because the
- // rollbackExitEarlyAfterCollectionDrop fail point is set. We fail with a recoverable error
- // so that the rollback will be retried.
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+jsTestLog("Attempting to roll back.");
+// Make the old primary rollback against the new primary. This attempt should fail because the
+// rollbackExitEarlyAfterCollectionDrop fail point is set. We fail with a recoverable error
+// so that the rollback will be retried.
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- // Make sure we exit the rollback early by checking for the correct log messages.
- checkLog.contains(rollbackDB.getMongo(),
- "rollbackExitEarlyAfterCollectionDrop fail point enabled.");
+// Make sure we exit the rollback early by checking for the correct log messages.
+checkLog.contains(rollbackDB.getMongo(),
+ "rollbackExitEarlyAfterCollectionDrop fail point enabled.");
- jsTestLog("Turning off the rollbackExitEarlyAfterCollectionDrop fail point");
- // A rollback attempt after turning off the fail point should succeed even if we already
- // dropped the collection.
- assert.commandWorked(rollbackDB.adminCommand(
- {configureFailPoint: 'rollbackExitEarlyAfterCollectionDrop', mode: 'off'}));
+jsTestLog("Turning off the rollbackExitEarlyAfterCollectionDrop fail point");
+// A rollback attempt after turning off the fail point should succeed even if we already
+// dropped the collection.
+assert.commandWorked(rollbackDB.adminCommand(
+ {configureFailPoint: 'rollbackExitEarlyAfterCollectionDrop', mode: 'off'}));
- rollbackTest.transitionToSteadyStateOperations();
+rollbackTest.transitionToSteadyStateOperations();
- // After a successful rollback attempt, we should have seen the following log message to ensure
- // that we tried to drop a non-existent collection and continued without acquiring a database
- // lock.
- checkLog.contains(rollbackDB.getMongo(), "This collection does not exist");
+// After a successful rollback attempt, we should have seen the following log message to ensure
+// that we tried to drop a non-existent collection and continued without acquiring a database
+// lock.
+checkLog.contains(rollbackDB.getMongo(), "This collection does not exist");
- rollbackTest.stop();
+rollbackTest.stop();
}()); \ No newline at end of file
diff --git a/jstests/replsets/rollback_views.js b/jstests/replsets/rollback_views.js
index a7c89014de6..a802eb81663 100644
--- a/jstests/replsets/rollback_views.js
+++ b/jstests/replsets/rollback_views.js
@@ -17,122 +17,121 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
-
- // Run a command, return the result if it worked, or assert with a message otherwise.
- let checkedRunCommand = (db, cmd) =>
- ((res, msg) => (assert.commandWorked(res, msg), res))(db.runCommand(cmd), tojson(cmd));
-
- // Like db.getCollectionNames, but allows a filter.
- let getCollectionNames = (db, filter) => checkedRunCommand(db, {listCollections: 1, filter})
- .cursor.firstBatch.map((entry) => entry.name)
- .sort();
-
- // Function that checks that all array elements are equal, and returns the unique element.
- let checkEqual = (array, what) =>
- array.reduce((x, y) => assert.eq(x, y, "nodes don't have matching " + what) || x);
-
- // Helper function for verifying database contents at the end of the test.
- let checkFinalResults = (dbs, expectedColls, expectedViews) => ({
- dbname: checkEqual(dbs, "names"),
- colls: checkEqual(
- dbs.map((db) => getCollectionNames(db, {type: "collection"})).concat([expectedColls]),
- "colls"),
- views: checkEqual(
- dbs.map((db) => getCollectionNames(db, {type: "view"})).concat([expectedViews]),
- "views"),
- md5: checkEqual(dbs.map((db) => checkedRunCommand(db, {dbHash: 1}).md5), "hashes")
- });
-
- let name = "rollback_views.js";
- let replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
- let nodes = replTest.nodeList();
-
- let conns = replTest.startSet();
- replTest.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0], priority: 3},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- });
-
- // Make sure we have a primary and that that primary is node A.
- replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
-
- let nodeA = conns[0];
- let nodeB = conns[1];
- let arbiter = conns[2];
-
- let a1 = nodeA.getDB("test1");
- let b1 = nodeB.getDB("test1");
-
- // Initial data for both nodes.
- assert.writeOK(a1.coll.insert([{_id: 1, x: 1}, {_id: 2, x: 2}]));
-
- // Wait for initial replication.
- replTest.awaitReplication();
-
- // Isolate A and wait for B to become primary.
- nodeA.disconnect(nodeB);
- nodeA.disconnect(arbiter);
- assert.soon(() => replTest.getPrimary() == nodeB, "node B did not become primary as expected");
-
- // Do operations on B and B alone, these will be rolled back.
- // For the collection creation, first create a view with the same name, stressing rollback.
- assert.writeOK(b1.coll.remove({x: 2}));
- assert.commandWorked(b1.createView("x", "coll", [{$match: {x: 1}}]));
- let b2 = b1.getSiblingDB("test2");
- assert.writeOK(b2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}]));
- assert.commandWorked(b2.createView("y", "coll", [{$match: {y: 2}}]));
- let b3 = b1.getSiblingDB("test3");
- assert.commandWorked(b3.createView("z", "coll", []));
- assert.writeOK(b3.system.views.remove({}));
- assert.writeOK(b3.z.insert([{z: 1}, {z: 2}, {z: 3}]));
- assert.writeOK(b3.z.remove({z: 1}));
-
- // Isolate B, bring A back into contact with the arbiter, then wait for A to become primary.
- // Insert new data into A, so that B will need to rollback when it reconnects to A.
- nodeB.disconnect(arbiter);
- replTest.awaitNoPrimary();
- nodeA.reconnect(arbiter);
- assert.soon(() => replTest.getPrimary() == nodeA, "nodeA did not become primary as expected");
-
- // A is now primary and will perform writes that must be copied by B after rollback.
- assert.eq(a1.coll.find().itcount(), 2, "expected two documents in test1.coll");
- assert.writeOK(a1.x.insert({_id: 3, x: "string in test1.x"}));
- let a2 = a1.getSiblingDB("test2");
- assert.commandWorked(a2.createView("y", "coll", [{$match: {y: 2}}]));
- assert.writeOK(a2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}]));
- let a3 = a1.getSiblingDB("test3");
- assert.writeOK(a3.coll.insert([{z: 1}, {z: 2}, {z: 3}]));
- assert.commandWorked(a3.createView("z", "coll", [{$match: {z: 3}}]));
-
- // A is collections: test1.{coll,x}, test2.{coll,system.views}, test3.{coll,system.views}
- // views: test2.y, test3.z
- // B is collections: test1.{coll,system.views}, test2.{coll,systems}, test3.{z,system.views}
- // views: test1.x, test2.y
- //
- // Put B back in contact with A and arbiter. A is primary, so B will rollback and catch up.
- nodeB.reconnect(arbiter);
- nodeA.reconnect(nodeB);
-
- awaitOpTime(nodeB, nodeA);
-
- // Await steady state and ensure the two nodes have the same contents.
- replTest.awaitSecondaryNodes();
- replTest.awaitReplication();
-
- // Check both nodes agree with each other and with the expected set of views and collections.
- print("All done, check that both nodes have the expected collections, views and md5.");
- printjson(checkFinalResults([a1, b1], ["coll", "x"], []));
- printjson(checkFinalResults([a2, b2], ["coll", "system.views"], ["y"]));
- printjson(checkFinalResults([a3, b3], ["coll", "system.views"], ["z"]));
-
- // Verify data consistency between nodes.
- replTest.checkReplicatedDataHashes();
- replTest.checkOplogs();
-
- replTest.stopSet();
+"use strict";
+
+// Run a command, return the result if it worked, or assert with a message otherwise.
+let checkedRunCommand = (db, cmd) =>
+ ((res, msg) => (assert.commandWorked(res, msg), res))(db.runCommand(cmd), tojson(cmd));
+
+// Like db.getCollectionNames, but allows a filter.
+let getCollectionNames = (db, filter) => checkedRunCommand(db, {listCollections: 1, filter})
+ .cursor.firstBatch.map((entry) => entry.name)
+ .sort();
+
+// Function that checks that all array elements are equal, and returns the unique element.
+let checkEqual = (array, what) =>
+ array.reduce((x, y) => assert.eq(x, y, "nodes don't have matching " + what) || x);
+
+// Helper function for verifying database contents at the end of the test.
+let checkFinalResults = (dbs, expectedColls, expectedViews) => ({
+ dbname: checkEqual(dbs, "names"),
+ colls: checkEqual(
+ dbs.map((db) => getCollectionNames(db, {type: "collection"})).concat([expectedColls]),
+ "colls"),
+ views: checkEqual(
+ dbs.map((db) => getCollectionNames(db, {type: "view"})).concat([expectedViews]), "views"),
+ md5: checkEqual(dbs.map((db) => checkedRunCommand(db, {dbHash: 1}).md5), "hashes")
+});
+
+let name = "rollback_views.js";
+let replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
+let nodes = replTest.nodeList();
+
+let conns = replTest.startSet();
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
+
+// Make sure we have a primary and that that primary is node A.
+replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
+
+let nodeA = conns[0];
+let nodeB = conns[1];
+let arbiter = conns[2];
+
+let a1 = nodeA.getDB("test1");
+let b1 = nodeB.getDB("test1");
+
+// Initial data for both nodes.
+assert.writeOK(a1.coll.insert([{_id: 1, x: 1}, {_id: 2, x: 2}]));
+
+// Wait for initial replication.
+replTest.awaitReplication();
+
+// Isolate A and wait for B to become primary.
+nodeA.disconnect(nodeB);
+nodeA.disconnect(arbiter);
+assert.soon(() => replTest.getPrimary() == nodeB, "node B did not become primary as expected");
+
+// Do operations on B and B alone, these will be rolled back.
+// For the collection creation, first create a view with the same name, stressing rollback.
+assert.writeOK(b1.coll.remove({x: 2}));
+assert.commandWorked(b1.createView("x", "coll", [{$match: {x: 1}}]));
+let b2 = b1.getSiblingDB("test2");
+assert.writeOK(b2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}]));
+assert.commandWorked(b2.createView("y", "coll", [{$match: {y: 2}}]));
+let b3 = b1.getSiblingDB("test3");
+assert.commandWorked(b3.createView("z", "coll", []));
+assert.writeOK(b3.system.views.remove({}));
+assert.writeOK(b3.z.insert([{z: 1}, {z: 2}, {z: 3}]));
+assert.writeOK(b3.z.remove({z: 1}));
+
+// Isolate B, bring A back into contact with the arbiter, then wait for A to become primary.
+// Insert new data into A, so that B will need to rollback when it reconnects to A.
+nodeB.disconnect(arbiter);
+replTest.awaitNoPrimary();
+nodeA.reconnect(arbiter);
+assert.soon(() => replTest.getPrimary() == nodeA, "nodeA did not become primary as expected");
+
+// A is now primary and will perform writes that must be copied by B after rollback.
+assert.eq(a1.coll.find().itcount(), 2, "expected two documents in test1.coll");
+assert.writeOK(a1.x.insert({_id: 3, x: "string in test1.x"}));
+let a2 = a1.getSiblingDB("test2");
+assert.commandWorked(a2.createView("y", "coll", [{$match: {y: 2}}]));
+assert.writeOK(a2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}]));
+let a3 = a1.getSiblingDB("test3");
+assert.writeOK(a3.coll.insert([{z: 1}, {z: 2}, {z: 3}]));
+assert.commandWorked(a3.createView("z", "coll", [{$match: {z: 3}}]));
+
+// A is collections: test1.{coll,x}, test2.{coll,system.views}, test3.{coll,system.views}
+// views: test2.y, test3.z
+// B is collections: test1.{coll,system.views}, test2.{coll,systems}, test3.{z,system.views}
+// views: test1.x, test2.y
+//
+// Put B back in contact with A and arbiter. A is primary, so B will rollback and catch up.
+nodeB.reconnect(arbiter);
+nodeA.reconnect(nodeB);
+
+awaitOpTime(nodeB, nodeA);
+
+// Await steady state and ensure the two nodes have the same contents.
+replTest.awaitSecondaryNodes();
+replTest.awaitReplication();
+
+// Check both nodes agree with each other and with the expected set of views and collections.
+print("All done, check that both nodes have the expected collections, views and md5.");
+printjson(checkFinalResults([a1, b1], ["coll", "x"], []));
+printjson(checkFinalResults([a2, b2], ["coll", "system.views"], ["y"]));
+printjson(checkFinalResults([a3, b3], ["coll", "system.views"], ["z"]));
+
+// Verify data consistency between nodes.
+replTest.checkReplicatedDataHashes();
+replTest.checkOplogs();
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/rollback_waits_for_bgindex_completion.js b/jstests/replsets/rollback_waits_for_bgindex_completion.js
index 32e99124587..e6433d558e6 100644
--- a/jstests/replsets/rollback_waits_for_bgindex_completion.js
+++ b/jstests/replsets/rollback_waits_for_bgindex_completion.js
@@ -5,87 +5,87 @@
* @tags: [requires_wiredtiger, requires_journaling, requires_majority_read_concern]
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/libs/check_log.js');
- load("jstests/replsets/rslib.js");
- load('jstests/replsets/libs/rollback_test.js');
+load('jstests/libs/check_log.js');
+load("jstests/replsets/rslib.js");
+load('jstests/replsets/libs/rollback_test.js');
+const dbName = "dbWithBgIndex";
+const collName = 'coll';
+let bgIndexThread;
+
+function hangIndexBuildsFailpoint(node, fpMode) {
+ assert.commandWorked(node.adminCommand(
+ {configureFailPoint: 'hangAfterStartingIndexBuildUnlocked', mode: fpMode}));
+}
+
+/**
+ * A function to create a background index on the test collection in a parallel shell.
+ */
+function createBgIndexFn() {
+ // Re-define constants, since they are not shared between shells.
const dbName = "dbWithBgIndex";
- const collName = 'coll';
- let bgIndexThread;
-
- function hangIndexBuildsFailpoint(node, fpMode) {
- assert.commandWorked(node.adminCommand(
- {configureFailPoint: 'hangAfterStartingIndexBuildUnlocked', mode: fpMode}));
- }
-
- /**
- * A function to create a background index on the test collection in a parallel shell.
- */
- function createBgIndexFn() {
- // Re-define constants, since they are not shared between shells.
- const dbName = "dbWithBgIndex";
- const collName = "coll";
- let testDB = db.getSiblingDB(dbName);
- jsTestLog("Starting background index build from parallel shell.");
- assert.commandWorked(testDB[collName].createIndex({x: 1}, {background: true}));
- }
-
- /**
- * Operations that will get replicated to both replica set nodes before rollback.
- *
- * These common operations are run against the node that will eventually go into rollback, so
- * the failpoints will only be enabled on the rollback node.
- */
- function CommonOps(node) {
- // Create a collection on both data bearing nodes, so we can create an index on it.
- const testDB = node.getDB(dbName);
- assert.commandWorked(testDB.createCollection(collName));
-
- // Hang background index builds.
- hangIndexBuildsFailpoint(node, "alwaysOn");
-
- jsTestLog("Starting background index build parallel shell.");
- bgIndexThread = startParallelShell(createBgIndexFn, node.port);
-
- // Make sure the index build started and hit the failpoint.
- jsTestLog("Waiting for background index build to start and hang due to failpoint.");
- checkLog.contains(node, "index build: starting on " + testDB[collName].getFullName());
- checkLog.contains(node, "Hanging index build with no locks");
- }
-
- const rollbackTest = new RollbackTest();
- const originalPrimary = rollbackTest.getPrimary();
- CommonOps(originalPrimary);
-
- // Insert a document so that there is an operation to rollback.
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- assert.writeOK(rollbackNode.getDB(dbName)["rollbackColl"].insert({x: 1}));
-
- // Allow rollback to start. There are no sync source ops.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
-
- // Make sure that rollback is hung waiting for the background index operation to complete.
- jsTestLog("Waiting for rollback to block on the background index build completion.");
- let msg1 = "Waiting for all background operations to complete before starting rollback";
- let msg2 = "Waiting for 1 background operations to complete on database '" + dbName + "'";
- checkLog.contains(rollbackNode, msg1);
- checkLog.contains(rollbackNode, msg2);
-
- // Now turn off the index build failpoint, allowing rollback to continue and finish.
- jsTestLog(
- "Disabling 'hangAfterStartingIndexBuildUnlocked' failpoint on the rollback node so background index build can complete.");
- hangIndexBuildsFailpoint(rollbackNode, "off");
-
- // Make sure the background index build completed before rollback started.
- checkLog.contains(rollbackNode,
- "Finished waiting for background operations to complete before rollback");
-
- // Wait for rollback to finish.
- rollbackTest.transitionToSteadyStateOperations();
-
- // Check the replica set.
- rollbackTest.stop();
+ const collName = "coll";
+ let testDB = db.getSiblingDB(dbName);
+ jsTestLog("Starting background index build from parallel shell.");
+ assert.commandWorked(testDB[collName].createIndex({x: 1}, {background: true}));
+}
+
+/**
+ * Operations that will get replicated to both replica set nodes before rollback.
+ *
+ * These common operations are run against the node that will eventually go into rollback, so
+ * the failpoints will only be enabled on the rollback node.
+ */
+function CommonOps(node) {
+ // Create a collection on both data bearing nodes, so we can create an index on it.
+ const testDB = node.getDB(dbName);
+ assert.commandWorked(testDB.createCollection(collName));
+
+ // Hang background index builds.
+ hangIndexBuildsFailpoint(node, "alwaysOn");
+
+ jsTestLog("Starting background index build parallel shell.");
+ bgIndexThread = startParallelShell(createBgIndexFn, node.port);
+
+ // Make sure the index build started and hit the failpoint.
+ jsTestLog("Waiting for background index build to start and hang due to failpoint.");
+ checkLog.contains(node, "index build: starting on " + testDB[collName].getFullName());
+ checkLog.contains(node, "Hanging index build with no locks");
+}
+
+const rollbackTest = new RollbackTest();
+const originalPrimary = rollbackTest.getPrimary();
+CommonOps(originalPrimary);
+
+// Insert a document so that there is an operation to rollback.
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+assert.writeOK(rollbackNode.getDB(dbName)["rollbackColl"].insert({x: 1}));
+
+// Allow rollback to start. There are no sync source ops.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+
+// Make sure that rollback is hung waiting for the background index operation to complete.
+jsTestLog("Waiting for rollback to block on the background index build completion.");
+let msg1 = "Waiting for all background operations to complete before starting rollback";
+let msg2 = "Waiting for 1 background operations to complete on database '" + dbName + "'";
+checkLog.contains(rollbackNode, msg1);
+checkLog.contains(rollbackNode, msg2);
+
+// Now turn off the index build failpoint, allowing rollback to continue and finish.
+jsTestLog(
+ "Disabling 'hangAfterStartingIndexBuildUnlocked' failpoint on the rollback node so background index build can complete.");
+hangIndexBuildsFailpoint(rollbackNode, "off");
+
+// Make sure the background index build completed before rollback started.
+checkLog.contains(rollbackNode,
+ "Finished waiting for background operations to complete before rollback");
+
+// Wait for rollback to finish.
+rollbackTest.transitionToSteadyStateOperations();
+
+// Check the replica set.
+rollbackTest.stop();
}());
diff --git a/jstests/replsets/rollback_with_socket_error_then_steady_state.js b/jstests/replsets/rollback_with_socket_error_then_steady_state.js
index 29f060757d1..713658e1b5f 100644
--- a/jstests/replsets/rollback_with_socket_error_then_steady_state.js
+++ b/jstests/replsets/rollback_with_socket_error_then_steady_state.js
@@ -4,134 +4,133 @@
// node would be "stuck" with state=ROLLBACK while it was doing steady-state replication, with no
// way to reach SECONDARY without restarting the process.
(function() {
- 'use strict';
-
- load("jstests/libs/check_log.js");
- load("jstests/replsets/rslib.js");
-
- var collName = "test.coll";
- var counter = 0;
-
- var rst = new ReplSetTest({
- name: 'rollback_with_socket_error_then_steady_state',
- nodes: [
- // Primary flops between nodes 0 and 1.
- {},
- {},
- // Node 2 is the node under test.
- {rsConfig: {priority: 0}},
- // Arbiters to sway elections.
- {rsConfig: {arbiterOnly: true}},
- {rsConfig: {arbiterOnly: true}}
- ],
- useBridge: true
- });
- var nodes = rst.startSet();
- rst.initiate();
-
- function stepUp(rst, node) {
- var primary = rst.getPrimary();
- if (primary != node) {
- assert.commandWorked(primary.adminCommand({replSetStepDown: 1, force: true}));
- }
- waitForState(node, ReplSetTest.State.PRIMARY);
+'use strict';
+
+load("jstests/libs/check_log.js");
+load("jstests/replsets/rslib.js");
+
+var collName = "test.coll";
+var counter = 0;
+
+var rst = new ReplSetTest({
+ name: 'rollback_with_socket_error_then_steady_state',
+ nodes: [
+ // Primary flops between nodes 0 and 1.
+ {},
+ {},
+ // Node 2 is the node under test.
+ {rsConfig: {priority: 0}},
+ // Arbiters to sway elections.
+ {rsConfig: {arbiterOnly: true}},
+ {rsConfig: {arbiterOnly: true}}
+ ],
+ useBridge: true
+});
+var nodes = rst.startSet();
+rst.initiate();
+
+function stepUp(rst, node) {
+ var primary = rst.getPrimary();
+ if (primary != node) {
+ assert.commandWorked(primary.adminCommand({replSetStepDown: 1, force: true}));
}
-
- jsTestLog("Make sure node 0 is primary.");
- stepUp(rst, nodes[0]);
- assert.eq(nodes[0], rst.getPrimary());
- // Wait for all data bearing nodes to get up to date.
- assert.writeOK(nodes[0].getCollection(collName).insert(
- {a: counter++}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
-
- jsTestLog("Create two partitions: [1] and [0,2,3,4].");
- nodes[1].disconnect(nodes[0]);
- nodes[1].disconnect(nodes[2]);
- nodes[1].disconnect(nodes[3]);
- nodes[1].disconnect(nodes[4]);
-
- jsTestLog("Do a write that is replicated to [0,2,3,4].");
- assert.writeOK(nodes[0].getCollection(collName).insert(
- {a: counter++}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
-
- jsTestLog("Repartition to: [0,2] and [1,3,4].");
- nodes[1].reconnect(nodes[3]);
- nodes[1].reconnect(nodes[4]);
- nodes[3].disconnect(nodes[0]);
- nodes[3].disconnect(nodes[2]);
- nodes[4].disconnect(nodes[0]);
- nodes[4].disconnect(nodes[2]);
-
- jsTestLog("Ensure that 0 steps down and that 1 becomes primary.");
- waitForState(nodes[0], ReplSetTest.State.SECONDARY);
- waitForState(nodes[1], ReplSetTest.State.PRIMARY);
- assert.eq(nodes[1], rst.getPrimary());
-
- jsTestLog("Do a write to node 1 on the [1,3,4] side of the partition.");
- assert.writeOK(nodes[1].getCollection(collName).insert({a: counter++}));
-
- // Turn on failpoint on node 2 to pause rollback before doing anything.
- assert.commandWorked(
- nodes[2].adminCommand({configureFailPoint: 'rollbackHangBeforeStart', mode: 'alwaysOn'}));
-
- jsTestLog("Repartition to: [0] and [1,2,3,4].");
- nodes[2].disconnect(nodes[0]);
- nodes[2].reconnect(nodes[1]);
- nodes[2].reconnect(nodes[3]);
- nodes[2].reconnect(nodes[4]);
-
- jsTestLog("Wait for node 2 to decide to go into ROLLBACK and start syncing from node 1.");
- // Since nodes 1 and 2 have now diverged, node 2 should go into rollback. The failpoint will
- // stop it from actually transitioning to rollback, so the checkLog bellow will ensure that we
- // have decided to rollback, but haven't actually started yet.
- rst.awaitSyncSource(nodes[2], nodes[1]);
-
- jsTestLog("Wait for failpoint on node 2 to pause rollback before it starts");
- // Wait for fail point message to be logged.
- checkLog.contains(nodes[2], 'rollback - rollbackHangBeforeStart fail point enabled');
-
- jsTestLog("Repartition to: [1] and [0,2,3,4].");
- nodes[1].disconnect(nodes[3]);
- nodes[1].disconnect(nodes[4]);
- nodes[2].disconnect(nodes[1]);
- nodes[2].reconnect(nodes[0]);
- nodes[3].reconnect(nodes[0]);
- nodes[3].reconnect(nodes[2]);
- nodes[4].reconnect(nodes[0]);
- nodes[4].reconnect(nodes[2]);
-
- // Turn off failpoint on node 2 to allow rollback against node 1 to fail with a network error.
- assert.adminCommandWorkedAllowingNetworkError(
- nodes[2], {configureFailPoint: 'rollbackHangBeforeStart', mode: 'off'});
-
- // Make node 0 ahead of node 2 again so node 2 will pick it as a sync source.
-
- jsTestLog("waiting for node 0 to be primary");
- waitForState(nodes[1], ReplSetTest.State.SECONDARY);
- waitForState(nodes[0], ReplSetTest.State.PRIMARY);
- assert.eq(nodes[0], rst.getPrimary());
-
- jsTestLog("w:2 write to node 0 (replicated to node 2)");
- assert.writeOK(nodes[0].getCollection(collName).insert(
- {a: counter++}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
-
- // At this point node 2 has failed rollback before making any durable changes, including writing
- // to minValid. That means that it is free to pick any sync source and will pick node 0 where it
- // can pick up where it left off without rolling back. Ensure that it is able to reach SECONDARY
- // and doesn't do steady-state replication in ROLLBACK state.
- jsTestLog("Wait for node 2 to go into SECONDARY");
- assert.neq(nodes[2].adminCommand('replSetGetStatus').myState,
- ReplSetTest.State.ROLLBACK,
- "node 2 is doing steady-state replication with state=ROLLBACK!");
- waitForState(nodes[2], ReplSetTest.State.SECONDARY);
-
- // Re-connect all nodes and await secondary nodes so we can check data consistency.
- nodes[1].reconnect([nodes[0], nodes[2], nodes[3], nodes[4]]);
- rst.awaitSecondaryNodes();
-
- // Verify data consistency between nodes.
- rst.checkReplicatedDataHashes();
- rst.checkOplogs();
- rst.stopSet();
-
+ waitForState(node, ReplSetTest.State.PRIMARY);
+}
+
+jsTestLog("Make sure node 0 is primary.");
+stepUp(rst, nodes[0]);
+assert.eq(nodes[0], rst.getPrimary());
+// Wait for all data bearing nodes to get up to date.
+assert.writeOK(nodes[0].getCollection(collName).insert(
+ {a: counter++}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+
+jsTestLog("Create two partitions: [1] and [0,2,3,4].");
+nodes[1].disconnect(nodes[0]);
+nodes[1].disconnect(nodes[2]);
+nodes[1].disconnect(nodes[3]);
+nodes[1].disconnect(nodes[4]);
+
+jsTestLog("Do a write that is replicated to [0,2,3,4].");
+assert.writeOK(nodes[0].getCollection(collName).insert(
+ {a: counter++}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+
+jsTestLog("Repartition to: [0,2] and [1,3,4].");
+nodes[1].reconnect(nodes[3]);
+nodes[1].reconnect(nodes[4]);
+nodes[3].disconnect(nodes[0]);
+nodes[3].disconnect(nodes[2]);
+nodes[4].disconnect(nodes[0]);
+nodes[4].disconnect(nodes[2]);
+
+jsTestLog("Ensure that 0 steps down and that 1 becomes primary.");
+waitForState(nodes[0], ReplSetTest.State.SECONDARY);
+waitForState(nodes[1], ReplSetTest.State.PRIMARY);
+assert.eq(nodes[1], rst.getPrimary());
+
+jsTestLog("Do a write to node 1 on the [1,3,4] side of the partition.");
+assert.writeOK(nodes[1].getCollection(collName).insert({a: counter++}));
+
+// Turn on failpoint on node 2 to pause rollback before doing anything.
+assert.commandWorked(
+ nodes[2].adminCommand({configureFailPoint: 'rollbackHangBeforeStart', mode: 'alwaysOn'}));
+
+jsTestLog("Repartition to: [0] and [1,2,3,4].");
+nodes[2].disconnect(nodes[0]);
+nodes[2].reconnect(nodes[1]);
+nodes[2].reconnect(nodes[3]);
+nodes[2].reconnect(nodes[4]);
+
+jsTestLog("Wait for node 2 to decide to go into ROLLBACK and start syncing from node 1.");
+// Since nodes 1 and 2 have now diverged, node 2 should go into rollback. The failpoint will
+// stop it from actually transitioning to rollback, so the checkLog bellow will ensure that we
+// have decided to rollback, but haven't actually started yet.
+rst.awaitSyncSource(nodes[2], nodes[1]);
+
+jsTestLog("Wait for failpoint on node 2 to pause rollback before it starts");
+// Wait for fail point message to be logged.
+checkLog.contains(nodes[2], 'rollback - rollbackHangBeforeStart fail point enabled');
+
+jsTestLog("Repartition to: [1] and [0,2,3,4].");
+nodes[1].disconnect(nodes[3]);
+nodes[1].disconnect(nodes[4]);
+nodes[2].disconnect(nodes[1]);
+nodes[2].reconnect(nodes[0]);
+nodes[3].reconnect(nodes[0]);
+nodes[3].reconnect(nodes[2]);
+nodes[4].reconnect(nodes[0]);
+nodes[4].reconnect(nodes[2]);
+
+// Turn off failpoint on node 2 to allow rollback against node 1 to fail with a network error.
+assert.adminCommandWorkedAllowingNetworkError(
+ nodes[2], {configureFailPoint: 'rollbackHangBeforeStart', mode: 'off'});
+
+// Make node 0 ahead of node 2 again so node 2 will pick it as a sync source.
+
+jsTestLog("waiting for node 0 to be primary");
+waitForState(nodes[1], ReplSetTest.State.SECONDARY);
+waitForState(nodes[0], ReplSetTest.State.PRIMARY);
+assert.eq(nodes[0], rst.getPrimary());
+
+jsTestLog("w:2 write to node 0 (replicated to node 2)");
+assert.writeOK(nodes[0].getCollection(collName).insert(
+ {a: counter++}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+
+// At this point node 2 has failed rollback before making any durable changes, including writing
+// to minValid. That means that it is free to pick any sync source and will pick node 0 where it
+// can pick up where it left off without rolling back. Ensure that it is able to reach SECONDARY
+// and doesn't do steady-state replication in ROLLBACK state.
+jsTestLog("Wait for node 2 to go into SECONDARY");
+assert.neq(nodes[2].adminCommand('replSetGetStatus').myState,
+ ReplSetTest.State.ROLLBACK,
+ "node 2 is doing steady-state replication with state=ROLLBACK!");
+waitForState(nodes[2], ReplSetTest.State.SECONDARY);
+
+// Re-connect all nodes and await secondary nodes so we can check data consistency.
+nodes[1].reconnect([nodes[0], nodes[2], nodes[3], nodes[4]]);
+rst.awaitSecondaryNodes();
+
+// Verify data consistency between nodes.
+rst.checkReplicatedDataHashes();
+rst.checkOplogs();
+rst.stopSet();
}());
diff --git a/jstests/replsets/rollover_preserves_active_txns.js b/jstests/replsets/rollover_preserves_active_txns.js
index 326cd764746..064913fda10 100644
--- a/jstests/replsets/rollover_preserves_active_txns.js
+++ b/jstests/replsets/rollover_preserves_active_txns.js
@@ -10,90 +10,88 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- // A new replica set for both the commit and abort tests to ensure the same clean state.
- function doTest(commitOrAbort) {
- const replSet = new ReplSetTest({
- // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
- nodeOptions:
- {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
- nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
- });
-
- replSet.startSet(PrepareHelpers.replSetStartSetOptions);
- replSet.initiate();
-
- const primary = replSet.getPrimary();
- const secondary = replSet.getSecondary();
- const primaryOplog = primary.getDB("local").oplog.rs;
- assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
- const secondaryOplog = secondary.getDB("local").oplog.rs;
- assert.lte(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
-
- const coll = primary.getDB("test").test;
- assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
-
- jsTestLog("Prepare a transaction");
-
- const session = primary.startSession();
- session.startTransaction();
- assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- const oldestRequiredTimestampForCrashRecovery =
- PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
- assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
-
- jsTestLog("Get transaction entry from config.transactions");
-
- const txnEntry = primary.getDB("config").transactions.findOne();
- assert.lte(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry));
-
- assert.soonNoExcept(() => {
- const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
- assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
- return true;
- });
-
- jsTestLog("Find prepare oplog entry");
-
- const oplogEntry = PrepareHelpers.findPrepareEntry(primaryOplog);
- assert.eq(oplogEntry.ts, prepareTimestamp, tojson(oplogEntry));
- // Must already be written on secondary, since the config.transactions entry is.
- const secondaryOplogEntry = PrepareHelpers.findPrepareEntry(secondaryOplog);
- assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
-
- jsTestLog("Insert documents until oplog exceeds oplogSize");
-
- // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
- PrepareHelpers.growOplogPastMaxSize(replSet);
-
- jsTestLog(
- `Oplog dataSize = ${primaryOplog.dataSize()}, check the prepare entry still exists`);
-
- assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(primaryOplog));
- assert.soon(() => {
- return secondaryOplog.dataSize() > PrepareHelpers.oplogSizeBytes;
- });
- assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(secondaryOplog));
-
- if (commitOrAbort === "commit") {
- jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
- } else if (commitOrAbort === "abort") {
- jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
- assert.commandWorked(session.abortTransaction_forTesting());
- } else {
- throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
- }
-
- PrepareHelpers.awaitOplogTruncation(replSet);
-
- replSet.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+// A new replica set for both the commit and abort tests to ensure the same clean state.
+function doTest(commitOrAbort) {
+ const replSet = new ReplSetTest({
+ // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
+ nodeOptions: {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
+ });
+
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
+ replSet.initiate();
+
+ const primary = replSet.getPrimary();
+ const secondary = replSet.getSecondary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
+ assert.lte(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+
+ const coll = primary.getDB("test").test;
+ assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
+
+ jsTestLog("Prepare a transaction");
+
+ const session = primary.startSession();
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+ const oldestRequiredTimestampForCrashRecovery =
+ PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
+ assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
+
+ jsTestLog("Get transaction entry from config.transactions");
+
+ const txnEntry = primary.getDB("config").transactions.findOne();
+ assert.lte(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry));
+
+ assert.soonNoExcept(() => {
+ const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
+ assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
+ return true;
+ });
+
+ jsTestLog("Find prepare oplog entry");
+
+ const oplogEntry = PrepareHelpers.findPrepareEntry(primaryOplog);
+ assert.eq(oplogEntry.ts, prepareTimestamp, tojson(oplogEntry));
+ // Must already be written on secondary, since the config.transactions entry is.
+ const secondaryOplogEntry = PrepareHelpers.findPrepareEntry(secondaryOplog);
+ assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
+
+ jsTestLog("Insert documents until oplog exceeds oplogSize");
+
+ // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
+ PrepareHelpers.growOplogPastMaxSize(replSet);
+
+ jsTestLog(`Oplog dataSize = ${primaryOplog.dataSize()}, check the prepare entry still exists`);
+
+ assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(primaryOplog));
+ assert.soon(() => {
+ return secondaryOplog.dataSize() > PrepareHelpers.oplogSizeBytes;
+ });
+ assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(secondaryOplog));
+
+ if (commitOrAbort === "commit") {
+ jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
+ PrepareHelpers.commitTransaction(session, prepareTimestamp);
+ } else if (commitOrAbort === "abort") {
+ jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
+ assert.commandWorked(session.abortTransaction_forTesting());
+ } else {
+ throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
}
- doTest("commit");
- doTest("abort");
+ PrepareHelpers.awaitOplogTruncation(replSet);
+
+ replSet.stopSet();
+}
+
+doTest("commit");
+doTest("abort");
})();
diff --git a/jstests/replsets/rslib.js b/jstests/replsets/rslib.js
index 2423baea807..209ed8172e1 100644
--- a/jstests/replsets/rslib.js
+++ b/jstests/replsets/rslib.js
@@ -16,449 +16,441 @@ var getLastOpTime;
var setLogVerbosity;
(function() {
- "use strict";
- load("jstests/libs/write_concern_util.js");
-
- var count = 0;
- var w = 0;
-
- /**
- * A wrapper around `replSetSyncFrom` to ensure that the desired sync source is ahead of the
- * syncing node so that the syncing node can choose to sync from the desired sync source.
- * It first stops replication on the syncing node so that it can do a write on the desired
- * sync source and make sure it's ahead. When replication is restarted, the desired sync
- * source will be a valid sync source for the syncing node.
- */
- syncFrom = function(syncingNode, desiredSyncSource, rst) {
- jsTestLog("Forcing " + syncingNode.name + " to sync from " + desiredSyncSource.name);
-
- // Ensure that 'desiredSyncSource' doesn't already have the dummy write sitting around from
- // a previous syncFrom attempt.
- var dummyName = "dummyForSyncFrom";
- rst.getPrimary().getDB(dummyName).getCollection(dummyName).drop();
- assert.soonNoExcept(function() {
- return desiredSyncSource.getDB(dummyName).getCollection(dummyName).findOne() == null;
- });
-
- stopServerReplication(syncingNode);
-
- assert.writeOK(rst.getPrimary().getDB(dummyName).getCollection(dummyName).insert({a: 1}));
- // Wait for 'desiredSyncSource' to get the dummy write we just did so we know it's
- // definitely ahead of 'syncingNode' before we call replSetSyncFrom.
- assert.soonNoExcept(function() {
- return desiredSyncSource.getDB(dummyName).getCollection(dummyName).findOne({a: 1});
- });
-
- assert.commandWorked(syncingNode.adminCommand({replSetSyncFrom: desiredSyncSource.name}));
- restartServerReplication(syncingNode);
- rst.awaitSyncSource(syncingNode, desiredSyncSource);
- };
-
- /**
- * Calls a function 'f' once a second until it returns true. Throws an exception once 'f' has
- * been called more than 'retries' times without returning true. If 'retries' is not given,
- * it defaults to 200. 'retries' must be an integer greater than or equal to zero.
- */
- wait = function(f, msg, retries) {
- w++;
- var n = 0;
- var default_retries = 200;
- var delay_interval_ms = 1000;
-
- // Set default value if 'retries' was not given.
- if (retries === undefined) {
- retries = default_retries;
+"use strict";
+load("jstests/libs/write_concern_util.js");
+
+var count = 0;
+var w = 0;
+
+/**
+ * A wrapper around `replSetSyncFrom` to ensure that the desired sync source is ahead of the
+ * syncing node so that the syncing node can choose to sync from the desired sync source.
+ * It first stops replication on the syncing node so that it can do a write on the desired
+ * sync source and make sure it's ahead. When replication is restarted, the desired sync
+ * source will be a valid sync source for the syncing node.
+ */
+syncFrom = function(syncingNode, desiredSyncSource, rst) {
+ jsTestLog("Forcing " + syncingNode.name + " to sync from " + desiredSyncSource.name);
+
+ // Ensure that 'desiredSyncSource' doesn't already have the dummy write sitting around from
+ // a previous syncFrom attempt.
+ var dummyName = "dummyForSyncFrom";
+ rst.getPrimary().getDB(dummyName).getCollection(dummyName).drop();
+ assert.soonNoExcept(function() {
+ return desiredSyncSource.getDB(dummyName).getCollection(dummyName).findOne() == null;
+ });
+
+ stopServerReplication(syncingNode);
+
+ assert.writeOK(rst.getPrimary().getDB(dummyName).getCollection(dummyName).insert({a: 1}));
+ // Wait for 'desiredSyncSource' to get the dummy write we just did so we know it's
+ // definitely ahead of 'syncingNode' before we call replSetSyncFrom.
+ assert.soonNoExcept(function() {
+ return desiredSyncSource.getDB(dummyName).getCollection(dummyName).findOne({a: 1});
+ });
+
+ assert.commandWorked(syncingNode.adminCommand({replSetSyncFrom: desiredSyncSource.name}));
+ restartServerReplication(syncingNode);
+ rst.awaitSyncSource(syncingNode, desiredSyncSource);
+};
+
+/**
+ * Calls a function 'f' once a second until it returns true. Throws an exception once 'f' has
+ * been called more than 'retries' times without returning true. If 'retries' is not given,
+ * it defaults to 200. 'retries' must be an integer greater than or equal to zero.
+ */
+wait = function(f, msg, retries) {
+ w++;
+ var n = 0;
+ var default_retries = 200;
+ var delay_interval_ms = 1000;
+
+ // Set default value if 'retries' was not given.
+ if (retries === undefined) {
+ retries = default_retries;
+ }
+ while (!f()) {
+ if (n % 4 == 0) {
+ print("Waiting " + w);
}
- while (!f()) {
- if (n % 4 == 0) {
- print("Waiting " + w);
- }
- if (++n == 4) {
- print("" + f);
- }
- if (n >= retries) {
- throw new Error('Tried ' + retries + ' times, giving up on ' + msg);
- }
- sleep(delay_interval_ms);
+ if (++n == 4) {
+ print("" + f);
}
- };
-
- /**
- * Use this to do something once every 4 iterations.
- *
- * <pre>
- * for (i=0; i<1000; i++) {
- * occasionally(function() { print("4 more iterations"); });
- * }
- * </pre>
- */
- occasionally = function(f, n) {
- var interval = n || 4;
- if (count % interval == 0) {
- f();
+ if (n >= retries) {
+ throw new Error('Tried ' + retries + ' times, giving up on ' + msg);
}
- count++;
- };
+ sleep(delay_interval_ms);
+ }
+};
+
+/**
+ * Use this to do something once every 4 iterations.
+ *
+ * <pre>
+ * for (i=0; i<1000; i++) {
+ * occasionally(function() { print("4 more iterations"); });
+ * }
+ * </pre>
+ */
+occasionally = function(f, n) {
+ var interval = n || 4;
+ if (count % interval == 0) {
+ f();
+ }
+ count++;
+};
+
+/**
+ * Attempt to re-establish and re-authenticate a Mongo connection if it was dropped, with
+ * multiple retries.
+ *
+ * Returns upon successful re-connnection. If connection cannot be established after 200
+ * retries, throws an exception.
+ *
+ * @param conn - a Mongo connection object or DB object.
+ */
+reconnect = function(conn) {
+ var retries = 200;
+ wait(function() {
+ var db;
+ try {
+ // Make this work with either dbs or connections.
+ if (typeof (conn.getDB) == "function") {
+ db = conn.getDB('foo');
+ } else {
+ db = conn;
+ }
- /**
- * Attempt to re-establish and re-authenticate a Mongo connection if it was dropped, with
- * multiple retries.
- *
- * Returns upon successful re-connnection. If connection cannot be established after 200
- * retries, throws an exception.
- *
- * @param conn - a Mongo connection object or DB object.
- */
- reconnect = function(conn) {
- var retries = 200;
- wait(function() {
- var db;
- try {
- // Make this work with either dbs or connections.
- if (typeof(conn.getDB) == "function") {
- db = conn.getDB('foo');
- } else {
- db = conn;
- }
+ // Run a simple command to re-establish connection.
+ db.bar.stats();
- // Run a simple command to re-establish connection.
- db.bar.stats();
+ // SERVER-4241: Shell connections don't re-authenticate on reconnect.
+ if (jsTest.options().keyFile) {
+ return jsTest.authenticate(db.getMongo());
+ }
+ return true;
+ } catch (e) {
+ print(e);
+ return false;
+ }
+ }, retries);
+};
+
+getLatestOp = function(server) {
+ server.getDB("admin").getMongo().setSlaveOk();
+ var log = server.getDB("local")['oplog.rs'];
+ var cursor = log.find({}).sort({'$natural': -1}).limit(1);
+ if (cursor.hasNext()) {
+ return cursor.next();
+ }
+ return null;
+};
+
+getLeastRecentOp = function({server, readConcern}) {
+ server.getDB("admin").getMongo().setSlaveOk();
+ const oplog = server.getDB("local").oplog.rs;
+ const cursor = oplog.find().sort({$natural: 1}).limit(1).readConcern(readConcern);
+ if (cursor.hasNext()) {
+ return cursor.next();
+ }
+ return null;
+};
+
+waitForAllMembers = function(master, timeout) {
+ var failCount = 0;
+
+ assert.soon(function() {
+ var state = null;
+ try {
+ state = master.getSisterDB("admin").runCommand({replSetGetStatus: 1});
+ failCount = 0;
+ } catch (e) {
+ // Connection can get reset on replica set failover causing a socket exception
+ print("Calling replSetGetStatus failed");
+ print(e);
+ return false;
+ }
+ occasionally(function() {
+ printjson(state);
+ }, 10);
- // SERVER-4241: Shell connections don't re-authenticate on reconnect.
- if (jsTest.options().keyFile) {
- return jsTest.authenticate(db.getMongo());
- }
- return true;
- } catch (e) {
- print(e);
+ for (var m in state.members) {
+ if (state.members[m].state != 1 && // PRIMARY
+ state.members[m].state != 2 && // SECONDARY
+ state.members[m].state != 7) { // ARBITER
return false;
}
- }, retries);
- };
+ }
+ printjson(state);
+ return true;
+ }, "not all members ready", timeout || 10 * 60 * 1000);
+
+ print("All members are now in state PRIMARY, SECONDARY, or ARBITER");
+};
- getLatestOp = function(server) {
- server.getDB("admin").getMongo().setSlaveOk();
- var log = server.getDB("local")['oplog.rs'];
- var cursor = log.find({}).sort({'$natural': -1}).limit(1);
- if (cursor.hasNext()) {
- return cursor.next();
+reconfig = function(rs, config, force) {
+ "use strict";
+ var admin = rs.getPrimary().getDB("admin");
+ var e;
+ var master;
+ try {
+ var reconfigCommand = {replSetReconfig: rs._updateConfigIfNotDurable(config), force: force};
+ var res = admin.runCommand(reconfigCommand);
+
+ // Retry reconfig if quorum check failed because not enough voting nodes responded.
+ if (!res.ok && res.code === ErrorCodes.NodeNotFound) {
+ print("Replset reconfig failed because quorum check failed. Retry reconfig once. " +
+ "Error: " + tojson(res));
+ res = admin.runCommand(reconfigCommand);
}
- return null;
- };
- getLeastRecentOp = function({server, readConcern}) {
- server.getDB("admin").getMongo().setSlaveOk();
- const oplog = server.getDB("local").oplog.rs;
- const cursor = oplog.find().sort({$natural: 1}).limit(1).readConcern(readConcern);
- if (cursor.hasNext()) {
- return cursor.next();
+ assert.commandWorked(res);
+ } catch (e) {
+ if (!isNetworkError(e)) {
+ throw e;
}
- return null;
- };
+ print("Calling replSetReconfig failed. " + tojson(e));
+ }
- waitForAllMembers = function(master, timeout) {
- var failCount = 0;
+ var master = rs.getPrimary().getDB("admin");
+ waitForAllMembers(master);
- assert.soon(function() {
- var state = null;
+ return master;
+};
+
+awaitOpTime = function(catchingUpNode, latestOpTimeNode) {
+ var ts, ex, opTime;
+ assert.soon(
+ function() {
try {
- state = master.getSisterDB("admin").runCommand({replSetGetStatus: 1});
- failCount = 0;
- } catch (e) {
- // Connection can get reset on replica set failover causing a socket exception
- print("Calling replSetGetStatus failed");
- print(e);
- return false;
- }
- occasionally(function() {
- printjson(state);
- }, 10);
-
- for (var m in state.members) {
- if (state.members[m].state != 1 && // PRIMARY
- state.members[m].state != 2 && // SECONDARY
- state.members[m].state != 7) { // ARBITER
- return false;
+ // The following statement extracts the timestamp field from the most recent
+ // element of
+ // the oplog, and stores it in "ts".
+ ts = getLatestOp(catchingUpNode).ts;
+ opTime = getLatestOp(latestOpTimeNode).ts;
+ if ((ts.t == opTime.t) && (ts.i == opTime.i)) {
+ return true;
}
+ ex = null;
+ return false;
+ } catch (ex) {
+ return false;
}
- printjson(state);
- return true;
- }, "not all members ready", timeout || 10 * 60 * 1000);
-
- print("All members are now in state PRIMARY, SECONDARY, or ARBITER");
- };
-
- reconfig = function(rs, config, force) {
- "use strict";
- var admin = rs.getPrimary().getDB("admin");
- var e;
- var master;
- try {
- var reconfigCommand = {
- replSetReconfig: rs._updateConfigIfNotDurable(config),
- force: force
- };
- var res = admin.runCommand(reconfigCommand);
-
- // Retry reconfig if quorum check failed because not enough voting nodes responded.
- if (!res.ok && res.code === ErrorCodes.NodeNotFound) {
- print("Replset reconfig failed because quorum check failed. Retry reconfig once. " +
- "Error: " + tojson(res));
- res = admin.runCommand(reconfigCommand);
+ },
+ function() {
+ var message = "Node " + catchingUpNode + " only reached optime " + tojson(ts) +
+ " not " + tojson(opTime);
+ if (ex) {
+ message += "; last attempt failed with exception " + tojson(ex);
}
-
- assert.commandWorked(res);
- } catch (e) {
- if (!isNetworkError(e)) {
- throw e;
+ return message;
+ });
+};
+
+/**
+ * Uses the results of running replSetGetStatus against an arbitrary replset node to wait until
+ * all nodes in the set are replicated through the same optime.
+ * 'rs' is an array of connections to replica set nodes. This function is useful when you
+ * don't have a ReplSetTest object to use, otherwise ReplSetTest.awaitReplication is preferred.
+ */
+waitUntilAllNodesCaughtUp = function(rs, timeout) {
+ var rsStatus;
+ var firstConflictingIndex;
+ var ot;
+ var otherOt;
+ assert.soon(
+ function() {
+ rsStatus = rs[0].adminCommand('replSetGetStatus');
+ if (rsStatus.ok != 1) {
+ return false;
}
- print("Calling replSetReconfig failed. " + tojson(e));
- }
-
- var master = rs.getPrimary().getDB("admin");
- waitForAllMembers(master);
-
- return master;
- };
-
- awaitOpTime = function(catchingUpNode, latestOpTimeNode) {
- var ts, ex, opTime;
- assert.soon(
- function() {
- try {
- // The following statement extracts the timestamp field from the most recent
- // element of
- // the oplog, and stores it in "ts".
- ts = getLatestOp(catchingUpNode).ts;
- opTime = getLatestOp(latestOpTimeNode).ts;
- if ((ts.t == opTime.t) && (ts.i == opTime.i)) {
- return true;
- }
- ex = null;
+ assert.eq(rs.length, rsStatus.members.length, tojson(rsStatus));
+ ot = rsStatus.members[0].optime;
+ for (var i = 1; i < rsStatus.members.length; ++i) {
+ var otherNode = rsStatus.members[i];
+
+ // Must be in PRIMARY or SECONDARY state.
+ if (otherNode.state != ReplSetTest.State.PRIMARY &&
+ otherNode.state != ReplSetTest.State.SECONDARY) {
return false;
- } catch (ex) {
- return false;
- }
- },
- function() {
- var message = "Node " + catchingUpNode + " only reached optime " + tojson(ts) +
- " not " + tojson(opTime);
- if (ex) {
- message += "; last attempt failed with exception " + tojson(ex);
}
- return message;
- });
- };
- /**
- * Uses the results of running replSetGetStatus against an arbitrary replset node to wait until
- * all nodes in the set are replicated through the same optime.
- * 'rs' is an array of connections to replica set nodes. This function is useful when you
- * don't have a ReplSetTest object to use, otherwise ReplSetTest.awaitReplication is preferred.
- */
- waitUntilAllNodesCaughtUp = function(rs, timeout) {
- var rsStatus;
- var firstConflictingIndex;
- var ot;
- var otherOt;
- assert.soon(
- function() {
- rsStatus = rs[0].adminCommand('replSetGetStatus');
- if (rsStatus.ok != 1) {
+ // Fail if optimes are not equal.
+ otherOt = otherNode.optime;
+ if (!friendlyEqual(otherOt, ot)) {
+ firstConflictingIndex = i;
return false;
}
- assert.eq(rs.length, rsStatus.members.length, tojson(rsStatus));
- ot = rsStatus.members[0].optime;
- for (var i = 1; i < rsStatus.members.length; ++i) {
- var otherNode = rsStatus.members[i];
-
- // Must be in PRIMARY or SECONDARY state.
- if (otherNode.state != ReplSetTest.State.PRIMARY &&
- otherNode.state != ReplSetTest.State.SECONDARY) {
- return false;
- }
-
- // Fail if optimes are not equal.
- otherOt = otherNode.optime;
- if (!friendlyEqual(otherOt, ot)) {
- firstConflictingIndex = i;
- return false;
- }
- }
- return true;
- },
- function() {
- return "Optimes of members 0 (" + tojson(ot) + ") and " + firstConflictingIndex +
- " (" + tojson(otherOt) + ") are different in " + tojson(rsStatus);
- },
- timeout);
- };
-
- /**
- * Waits for the given node to reach the given state, ignoring network errors. Ensures that the
- * connection is re-connected and usable when the function returns.
- */
- waitForState = function(node, state) {
- assert.soonNoExcept(function() {
- assert.commandWorked(node.adminCommand(
- {replSetTest: 1, waitForMemberState: state, timeoutMillis: 60 * 1000 * 5}));
- return true;
- });
- // Some state transitions cause connections to be closed, but whether the connection close
- // happens before or after the replSetTest command above returns is racy, so to ensure that
- // the connection to 'node' is usable after this function returns, reconnect it first.
- reconnect(node);
- };
-
- /**
- * Starts each node in the given replica set if the storage engine supports readConcern
- *'majority'.
- * Returns true if the replica set was started successfully and false otherwise.
- *
- * @param replSetTest - The instance of {@link ReplSetTest} to start
- * @param options - The options passed to {@link ReplSetTest.startSet}
- */
- startSetIfSupportsReadMajority = function(replSetTest, options) {
- replSetTest.startSet(options);
- return replSetTest.nodes[0]
- .adminCommand("serverStatus")
- .storageEngine.supportsCommittedReads;
- };
-
- /**
- * Performs a reInitiate() call on 'replSetTest', ignoring errors that are related to an aborted
- * secondary member. All other errors are rethrown.
- */
- reInitiateWithoutThrowingOnAbortedMember = function(replSetTest) {
- try {
- replSetTest.reInitiate();
- } catch (e) {
- // reInitiate can throw because it tries to run an ismaster command on
- // all secondaries, including the new one that may have already aborted
- const errMsg = tojson(e);
- if (isNetworkError(e)) {
- // Ignore these exceptions, which are indicative of an aborted node
- } else {
- throw e;
}
+ return true;
+ },
+ function() {
+ return "Optimes of members 0 (" + tojson(ot) + ") and " + firstConflictingIndex + " (" +
+ tojson(otherOt) + ") are different in " + tojson(rsStatus);
+ },
+ timeout);
+};
+
+/**
+ * Waits for the given node to reach the given state, ignoring network errors. Ensures that the
+ * connection is re-connected and usable when the function returns.
+ */
+waitForState = function(node, state) {
+ assert.soonNoExcept(function() {
+ assert.commandWorked(node.adminCommand(
+ {replSetTest: 1, waitForMemberState: state, timeoutMillis: 60 * 1000 * 5}));
+ return true;
+ });
+ // Some state transitions cause connections to be closed, but whether the connection close
+ // happens before or after the replSetTest command above returns is racy, so to ensure that
+ // the connection to 'node' is usable after this function returns, reconnect it first.
+ reconnect(node);
+};
+
+/**
+ * Starts each node in the given replica set if the storage engine supports readConcern
+ *'majority'.
+ * Returns true if the replica set was started successfully and false otherwise.
+ *
+ * @param replSetTest - The instance of {@link ReplSetTest} to start
+ * @param options - The options passed to {@link ReplSetTest.startSet}
+ */
+startSetIfSupportsReadMajority = function(replSetTest, options) {
+ replSetTest.startSet(options);
+ return replSetTest.nodes[0].adminCommand("serverStatus").storageEngine.supportsCommittedReads;
+};
+
+/**
+ * Performs a reInitiate() call on 'replSetTest', ignoring errors that are related to an aborted
+ * secondary member. All other errors are rethrown.
+ */
+reInitiateWithoutThrowingOnAbortedMember = function(replSetTest) {
+ try {
+ replSetTest.reInitiate();
+ } catch (e) {
+ // reInitiate can throw because it tries to run an ismaster command on
+ // all secondaries, including the new one that may have already aborted
+ const errMsg = tojson(e);
+ if (isNetworkError(e)) {
+ // Ignore these exceptions, which are indicative of an aborted node
+ } else {
+ throw e;
+ }
+ }
+};
+
+/**
+ * Waits for the specified hosts to enter a certain state.
+ */
+awaitRSClientHosts = function(conn, host, hostOk, rs, timeout) {
+ var hostCount = host.length;
+ if (hostCount) {
+ for (var i = 0; i < hostCount; i++) {
+ awaitRSClientHosts(conn, host[i], hostOk, rs);
}
- };
- /**
- * Waits for the specified hosts to enter a certain state.
- */
- awaitRSClientHosts = function(conn, host, hostOk, rs, timeout) {
- var hostCount = host.length;
- if (hostCount) {
- for (var i = 0; i < hostCount; i++) {
- awaitRSClientHosts(conn, host[i], hostOk, rs);
- }
+ return;
+ }
- return;
- }
+ timeout = timeout || 5 * 60 * 1000;
- timeout = timeout || 5 * 60 * 1000;
+ if (hostOk == undefined)
+ hostOk = {ok: true};
+ if (host.host)
+ host = host.host;
+ if (rs)
+ rs = rs.name;
- if (hostOk == undefined)
- hostOk = {ok: true};
- if (host.host)
- host = host.host;
- if (rs)
- rs = rs.name;
+ print("Awaiting " + host + " to be " + tojson(hostOk) + " for " + conn + " (rs: " + rs + ")");
- print("Awaiting " + host + " to be " + tojson(hostOk) + " for " + conn + " (rs: " + rs +
- ")");
+ var tests = 0;
- var tests = 0;
+ assert.soon(function() {
+ var rsClientHosts = conn.adminCommand('connPoolStats').replicaSets;
+ if (tests++ % 10 == 0) {
+ printjson(rsClientHosts);
+ }
- assert.soon(function() {
- var rsClientHosts = conn.adminCommand('connPoolStats').replicaSets;
- if (tests++ % 10 == 0) {
- printjson(rsClientHosts);
- }
+ for (var rsName in rsClientHosts) {
+ if (rs && rs != rsName)
+ continue;
- for (var rsName in rsClientHosts) {
- if (rs && rs != rsName)
+ for (var i = 0; i < rsClientHosts[rsName].hosts.length; i++) {
+ var clientHost = rsClientHosts[rsName].hosts[i];
+ if (clientHost.addr != host)
continue;
- for (var i = 0; i < rsClientHosts[rsName].hosts.length; i++) {
- var clientHost = rsClientHosts[rsName].hosts[i];
- if (clientHost.addr != host)
- continue;
+ // Check that *all* host properties are set correctly
+ var propOk = true;
+ for (var prop in hostOk) {
+ // Use special comparator for tags because isMaster can return the fields in
+ // different order. The fields of the tags should be treated like a set of
+ // strings and 2 tags should be considered the same if the set is equal.
+ if (prop == 'tags') {
+ if (!clientHost.tags) {
+ propOk = false;
+ break;
+ }
- // Check that *all* host properties are set correctly
- var propOk = true;
- for (var prop in hostOk) {
- // Use special comparator for tags because isMaster can return the fields in
- // different order. The fields of the tags should be treated like a set of
- // strings and 2 tags should be considered the same if the set is equal.
- if (prop == 'tags') {
- if (!clientHost.tags) {
+ for (var hostTag in hostOk.tags) {
+ if (clientHost.tags[hostTag] != hostOk.tags[hostTag]) {
propOk = false;
break;
}
-
- for (var hostTag in hostOk.tags) {
- if (clientHost.tags[hostTag] != hostOk.tags[hostTag]) {
- propOk = false;
- break;
- }
- }
-
- for (var clientTag in clientHost.tags) {
- if (clientHost.tags[clientTag] != hostOk.tags[clientTag]) {
- propOk = false;
- break;
- }
- }
-
- continue;
}
- if (isObject(hostOk[prop])) {
- if (!friendlyEqual(hostOk[prop], clientHost[prop])) {
+ for (var clientTag in clientHost.tags) {
+ if (clientHost.tags[clientTag] != hostOk.tags[clientTag]) {
propOk = false;
break;
}
- } else if (clientHost[prop] != hostOk[prop]) {
+ }
+
+ continue;
+ }
+
+ if (isObject(hostOk[prop])) {
+ if (!friendlyEqual(hostOk[prop], clientHost[prop])) {
propOk = false;
break;
}
+ } else if (clientHost[prop] != hostOk[prop]) {
+ propOk = false;
+ break;
}
+ }
- if (propOk) {
- return true;
- }
+ if (propOk) {
+ return true;
}
}
+ }
- return false;
- }, 'timed out waiting for replica set client to recognize hosts', timeout);
- };
-
- /**
- * Returns the last opTime of the connection based from replSetGetStatus. Can only
- * be used on replica set nodes.
- */
- getLastOpTime = function(conn) {
- var replSetStatus =
- assert.commandWorked(conn.getDB("admin").runCommand({replSetGetStatus: 1}));
- var connStatus = replSetStatus.members.filter(m => m.self)[0];
- return connStatus.optime;
- };
-
- /**
- * Set log verbosity on all given nodes.
- * e.g. setLogVerbosity(replTest.nodes, { "replication": {"verbosity": 3} });
- */
- setLogVerbosity = function(nodes, logVerbosity) {
- var verbosity = {
- "setParameter": 1,
- "logComponentVerbosity": logVerbosity,
- };
- nodes.forEach(function(node) {
- assert.commandWorked(node.adminCommand(verbosity));
- });
+ return false;
+ }, 'timed out waiting for replica set client to recognize hosts', timeout);
+};
+
+/**
+ * Returns the last opTime of the connection based from replSetGetStatus. Can only
+ * be used on replica set nodes.
+ */
+getLastOpTime = function(conn) {
+ var replSetStatus = assert.commandWorked(conn.getDB("admin").runCommand({replSetGetStatus: 1}));
+ var connStatus = replSetStatus.members.filter(m => m.self)[0];
+ return connStatus.optime;
+};
+
+/**
+ * Set log verbosity on all given nodes.
+ * e.g. setLogVerbosity(replTest.nodes, { "replication": {"verbosity": 3} });
+ */
+setLogVerbosity = function(nodes, logVerbosity) {
+ var verbosity = {
+ "setParameter": 1,
+ "logComponentVerbosity": logVerbosity,
};
-
+ nodes.forEach(function(node) {
+ assert.commandWorked(node.adminCommand(verbosity));
+ });
+};
}());
diff --git a/jstests/replsets/secondary_as_sync_source.js b/jstests/replsets/secondary_as_sync_source.js
index 6f446842daa..ec18cebff0d 100644
--- a/jstests/replsets/secondary_as_sync_source.js
+++ b/jstests/replsets/secondary_as_sync_source.js
@@ -5,84 +5,84 @@
* @tags: [requires_replication]
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- const firstIndexName = "_first";
+const firstIndexName = "_first";
- function addTestDocuments(db) {
- let size = 100;
- jsTest.log("Creating " + size + " test documents.");
- var bulk = db.getCollection(collName).initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i});
- }
- assert.writeOK(bulk.execute());
+function addTestDocuments(db) {
+ let size = 100;
+ jsTest.log("Creating " + size + " test documents.");
+ var bulk = db.getCollection(collName).initializeUnorderedBulkOp();
+ for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
}
+ assert.writeOK(bulk.execute());
+}
- let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2, useBridge: true});
- let nodes = replSet.nodeList();
+let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2, useBridge: true});
+let nodes = replSet.nodeList();
- replSet.startSet({startClean: true});
- replSet.initiate({
- _id: "indexBuilds",
- members: [
- {_id: 0, host: nodes[0]},
- {_id: 1, host: nodes[1], votes: 0, priority: 0},
- ]
- });
+replSet.startSet({startClean: true});
+replSet.initiate({
+ _id: "indexBuilds",
+ members: [
+ {_id: 0, host: nodes[0]},
+ {_id: 1, host: nodes[1], votes: 0, priority: 0},
+ ]
+});
- let primary = replSet.getPrimary();
- let primaryDB = primary.getDB(dbName);
+let primary = replSet.getPrimary();
+let primaryDB = primary.getDB(dbName);
- let secondary = replSet.getSecondary();
- let secondaryDB = secondary.getDB(dbName);
+let secondary = replSet.getSecondary();
+let secondaryDB = secondary.getDB(dbName);
- addTestDocuments(primaryDB);
+addTestDocuments(primaryDB);
- jsTest.log("Hanging index builds on the secondary node");
- assert.commandWorked(secondaryDB.adminCommand(
- {configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"}));
+jsTest.log("Hanging index builds on the secondary node");
+assert.commandWorked(secondaryDB.adminCommand(
+ {configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"}));
- jsTest.log("Beginning index build: " + firstIndexName);
- assert.commandWorked(primaryDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {i: 1}, name: firstIndexName, background: true}],
- writeConcern: {w: 2}
- }));
+jsTest.log("Beginning index build: " + firstIndexName);
+assert.commandWorked(primaryDB.runCommand({
+ createIndexes: collName,
+ indexes: [{key: {i: 1}, name: firstIndexName, background: true}],
+ writeConcern: {w: 2}
+}));
- jsTest.log("Adding a new node to the replica set");
- let newNode = replSet.add({rsConfig: {votes: 0, priority: 0}});
+jsTest.log("Adding a new node to the replica set");
+let newNode = replSet.add({rsConfig: {votes: 0, priority: 0}});
- // Ensure that the new node and primary cannot communicate to each other.
- newNode.disconnect(primary);
+// Ensure that the new node and primary cannot communicate to each other.
+newNode.disconnect(primary);
- replSet.reInitiate();
+replSet.reInitiate();
- // Wait for the new node to finish initial sync.
- waitForState(newNode, ReplSetTest.State.SECONDARY);
+// Wait for the new node to finish initial sync.
+waitForState(newNode, ReplSetTest.State.SECONDARY);
- // Let the 'secondary' finish its index build.
- jsTest.log("Removing index build hang on the secondary node to allow it to finish");
- assert.commandWorked(
- secondaryDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "off"}));
+// Let the 'secondary' finish its index build.
+jsTest.log("Removing index build hang on the secondary node to allow it to finish");
+assert.commandWorked(
+ secondaryDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "off"}));
- // Wait for the index builds to finish.
- replSet.waitForAllIndexBuildsToFinish(dbName, collName);
- jsTest.log("Checking if the indexes match between the new node and the secondary node");
+// Wait for the index builds to finish.
+replSet.waitForAllIndexBuildsToFinish(dbName, collName);
+jsTest.log("Checking if the indexes match between the new node and the secondary node");
- let newNodeDB = newNode.getDB(dbName);
- jsTest.log("New nodes indexes:");
- printjson(newNodeDB.getCollection(collName).getIndexes());
- jsTest.log("Secondary nodes indexes:");
- printjson(secondaryDB.getCollection(collName).getIndexes());
+let newNodeDB = newNode.getDB(dbName);
+jsTest.log("New nodes indexes:");
+printjson(newNodeDB.getCollection(collName).getIndexes());
+jsTest.log("Secondary nodes indexes:");
+printjson(secondaryDB.getCollection(collName).getIndexes());
- assert.eq(newNodeDB.getCollection(collName).getIndexes().length,
- secondaryDB.getCollection(collName).getIndexes().length);
+assert.eq(newNodeDB.getCollection(collName).getIndexes().length,
+ secondaryDB.getCollection(collName).getIndexes().length);
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/secondary_reads_timestamp_visibility.js b/jstests/replsets/secondary_reads_timestamp_visibility.js
index b0d213f91f0..4b981b72cc8 100644
--- a/jstests/replsets/secondary_reads_timestamp_visibility.js
+++ b/jstests/replsets/secondary_reads_timestamp_visibility.js
@@ -7,96 +7,94 @@
*
*/
(function() {
- "use strict";
-
- load('jstests/replsets/libs/secondary_reads_test.js');
-
- const name = "secondaryReadsTimestampVisibility";
- const collName = "testColl";
- let secondaryReadsTest = new SecondaryReadsTest(name);
- let replSet = secondaryReadsTest.getReplset();
-
- let primaryDB = secondaryReadsTest.getPrimaryDB();
- let secondaryDB = secondaryReadsTest.getSecondaryDB();
-
- if (!primaryDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
- secondaryReadsTest.stop();
- return;
- }
- let primaryColl = primaryDB.getCollection(collName);
-
- // Create a collection and an index. Insert some data.
- primaryDB.runCommand({drop: collName});
- assert.commandWorked(primaryDB.runCommand({create: collName}));
- assert.commandWorked(primaryDB.runCommand(
- {createIndexes: collName, indexes: [{key: {y: 1}, name: "y_1", unique: true}]}));
- for (let i = 0; i < 100; i++) {
- assert.commandWorked(primaryColl.insert({_id: i, x: 0, y: i + 1}));
- }
-
- replSet.awaitLastOpCommitted();
- // This function includes a call to awaitReplication().
- replSet.waitForAllIndexBuildsToFinish(primaryDB.getName(), collName);
-
- // Sanity check.
- assert.eq(secondaryDB.getCollection(collName).find({x: 0}).itcount(), 100);
- assert.eq(secondaryDB.getCollection(collName).find({y: {$gte: 1, $lt: 101}}).itcount(), 100);
-
- // Prevent a batch from completing on the secondary.
- let pauseAwait = secondaryReadsTest.pauseSecondaryBatchApplication();
-
- // Update x to 1 in each document with default writeConcern and make sure we see the correct
- // data on the primary.
- let updates = [];
- for (let i = 0; i < 100; i++) {
- updates[i] = {q: {_id: i}, u: {x: 1, y: i}};
- }
- assert.commandWorked(primaryDB.runCommand({update: collName, updates: updates}));
- assert.eq(primaryColl.find({x: 1}).itcount(), 100);
- assert.eq(primaryColl.find({y: {$gte: 0, $lt: 100}}).itcount(), 100);
-
- // Wait for the batch application to pause.
- pauseAwait();
-
- let levels = ["local", "available", "majority"];
-
- if (!primaryDB.serverStatus().storageEngine.supportsCommittedReads) {
- levels = ["local", "available"];
- }
-
- // We should see the previous, un-replicated state on the secondary with every readconcern.
- for (let i in levels) {
- print("Checking that no new updates are visible yet for readConcern: " + levels[i]);
- assert.eq(secondaryDB.getCollection(collName).find({x: 0}).readConcern(levels[i]).itcount(),
- 100);
- assert.eq(secondaryDB.getCollection(collName).find({x: 1}).readConcern(levels[i]).itcount(),
- 0);
- assert.eq(secondaryDB.getCollection(collName)
- .find({y: {$gte: 1, $lt: 101}})
- .readConcern(levels[i])
- .itcount(),
- 100);
- }
-
- // Disable the failpoint and let the batch complete.
- secondaryReadsTest.resumeSecondaryBatchApplication();
-
- // Wait for the last op to appear in the majority committed snapshot on each node. This ensures
- // that the op will be visible to a "majority" read.
- replSet.awaitLastOpCommitted();
-
- // Wait for the last op to be replicated to all nodes. This is needed because when majority read
- // concern is disabled, awaitLastOpCommitted() just checks the node's knowledge of the majority
- // commit point and does not ensure the node has applied the operations.
- replSet.awaitReplication();
-
- for (let i in levels) {
- print("Checking that new updates are visible for readConcern: " + levels[i]);
- // We should see the new state on the secondary with every readconcern.
- assert.eq(secondaryDB.getCollection(collName).find({x: 0}).readConcern(levels[i]).itcount(),
- 0);
- assert.eq(secondaryDB.getCollection(collName).find({x: 1}).readConcern(levels[i]).itcount(),
- 100);
- }
+"use strict";
+
+load('jstests/replsets/libs/secondary_reads_test.js');
+
+const name = "secondaryReadsTimestampVisibility";
+const collName = "testColl";
+let secondaryReadsTest = new SecondaryReadsTest(name);
+let replSet = secondaryReadsTest.getReplset();
+
+let primaryDB = secondaryReadsTest.getPrimaryDB();
+let secondaryDB = secondaryReadsTest.getSecondaryDB();
+
+if (!primaryDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
secondaryReadsTest.stop();
+ return;
+}
+let primaryColl = primaryDB.getCollection(collName);
+
+// Create a collection and an index. Insert some data.
+primaryDB.runCommand({drop: collName});
+assert.commandWorked(primaryDB.runCommand({create: collName}));
+assert.commandWorked(primaryDB.runCommand(
+ {createIndexes: collName, indexes: [{key: {y: 1}, name: "y_1", unique: true}]}));
+for (let i = 0; i < 100; i++) {
+ assert.commandWorked(primaryColl.insert({_id: i, x: 0, y: i + 1}));
+}
+
+replSet.awaitLastOpCommitted();
+// This function includes a call to awaitReplication().
+replSet.waitForAllIndexBuildsToFinish(primaryDB.getName(), collName);
+
+// Sanity check.
+assert.eq(secondaryDB.getCollection(collName).find({x: 0}).itcount(), 100);
+assert.eq(secondaryDB.getCollection(collName).find({y: {$gte: 1, $lt: 101}}).itcount(), 100);
+
+// Prevent a batch from completing on the secondary.
+let pauseAwait = secondaryReadsTest.pauseSecondaryBatchApplication();
+
+// Update x to 1 in each document with default writeConcern and make sure we see the correct
+// data on the primary.
+let updates = [];
+for (let i = 0; i < 100; i++) {
+ updates[i] = {q: {_id: i}, u: {x: 1, y: i}};
+}
+assert.commandWorked(primaryDB.runCommand({update: collName, updates: updates}));
+assert.eq(primaryColl.find({x: 1}).itcount(), 100);
+assert.eq(primaryColl.find({y: {$gte: 0, $lt: 100}}).itcount(), 100);
+
+// Wait for the batch application to pause.
+pauseAwait();
+
+let levels = ["local", "available", "majority"];
+
+if (!primaryDB.serverStatus().storageEngine.supportsCommittedReads) {
+ levels = ["local", "available"];
+}
+
+// We should see the previous, un-replicated state on the secondary with every readconcern.
+for (let i in levels) {
+ print("Checking that no new updates are visible yet for readConcern: " + levels[i]);
+ assert.eq(secondaryDB.getCollection(collName).find({x: 0}).readConcern(levels[i]).itcount(),
+ 100);
+ assert.eq(secondaryDB.getCollection(collName).find({x: 1}).readConcern(levels[i]).itcount(), 0);
+ assert.eq(secondaryDB.getCollection(collName)
+ .find({y: {$gte: 1, $lt: 101}})
+ .readConcern(levels[i])
+ .itcount(),
+ 100);
+}
+
+// Disable the failpoint and let the batch complete.
+secondaryReadsTest.resumeSecondaryBatchApplication();
+
+// Wait for the last op to appear in the majority committed snapshot on each node. This ensures
+// that the op will be visible to a "majority" read.
+replSet.awaitLastOpCommitted();
+
+// Wait for the last op to be replicated to all nodes. This is needed because when majority read
+// concern is disabled, awaitLastOpCommitted() just checks the node's knowledge of the majority
+// commit point and does not ensure the node has applied the operations.
+replSet.awaitReplication();
+
+for (let i in levels) {
+ print("Checking that new updates are visible for readConcern: " + levels[i]);
+ // We should see the new state on the secondary with every readconcern.
+ assert.eq(secondaryDB.getCollection(collName).find({x: 0}).readConcern(levels[i]).itcount(), 0);
+ assert.eq(secondaryDB.getCollection(collName).find({x: 1}).readConcern(levels[i]).itcount(),
+ 100);
+}
+secondaryReadsTest.stop();
})();
diff --git a/jstests/replsets/secondary_reads_unique_indexes.js b/jstests/replsets/secondary_reads_unique_indexes.js
index feff1df4545..7941eb8edb3 100644
--- a/jstests/replsets/secondary_reads_unique_indexes.js
+++ b/jstests/replsets/secondary_reads_unique_indexes.js
@@ -27,86 +27,86 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/secondary_reads_test.js");
+load("jstests/replsets/libs/secondary_reads_test.js");
- const name = "secondaryReadsUniqueIndexes";
- const collName = "testColl";
- let secondaryReadsTest = new SecondaryReadsTest(name);
+const name = "secondaryReadsUniqueIndexes";
+const collName = "testColl";
+let secondaryReadsTest = new SecondaryReadsTest(name);
- let primaryDB = secondaryReadsTest.getPrimaryDB();
- let secondaryDB = secondaryReadsTest.getSecondaryDB();
+let primaryDB = secondaryReadsTest.getPrimaryDB();
+let secondaryDB = secondaryReadsTest.getSecondaryDB();
- // Setup collection.
- primaryDB.runCommand({drop: collName});
- assert.commandWorked(primaryDB.runCommand({create: collName}));
+// Setup collection.
+primaryDB.runCommand({drop: collName});
+assert.commandWorked(primaryDB.runCommand({create: collName}));
- // Create a unique index on the collection in the foreground.
- assert.commandWorked(primaryDB.runCommand(
- {createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1", unique: true}]}));
+// Create a unique index on the collection in the foreground.
+assert.commandWorked(primaryDB.runCommand(
+ {createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1", unique: true}]}));
- let replSet = secondaryReadsTest.getReplset();
- replSet.awaitReplication();
+let replSet = secondaryReadsTest.getReplset();
+replSet.awaitReplication();
- // We want to do updates with at least as many different documents as there are parallel batch
- // writer threads (16). Each iteration increments and decrements a uniquely indexed value, 'x'.
- // The goal is that a reader on a secondary might find a case where the unique index constraint
- // is ignored, and an index on x maps to two different records.
- const nOps = 16;
- const nIterations = 50;
- const nReaders = 16;
+// We want to do updates with at least as many different documents as there are parallel batch
+// writer threads (16). Each iteration increments and decrements a uniquely indexed value, 'x'.
+// The goal is that a reader on a secondary might find a case where the unique index constraint
+// is ignored, and an index on x maps to two different records.
+const nOps = 16;
+const nIterations = 50;
+const nReaders = 16;
- // Do a bunch of reads using the 'x' index on the secondary.
- // No errors should be encountered on the secondary.
- let readFn = function() {
- for (let x = 0; x < TestData.nOps; x++) {
- assert.commandWorked(db.runCommand({
- find: TestData.collName,
- filter: {x: x},
- projection: {x: 1},
- readConcern: {level: "local"},
- }));
- // Sleep a bit to make these reader threads less CPU intensive.
- sleep(60);
- }
- };
- TestData.nOps = nOps;
- TestData.collName = collName;
- secondaryReadsTest.startSecondaryReaders(nReaders, readFn);
+// Do a bunch of reads using the 'x' index on the secondary.
+// No errors should be encountered on the secondary.
+let readFn = function() {
+ for (let x = 0; x < TestData.nOps; x++) {
+ assert.commandWorked(db.runCommand({
+ find: TestData.collName,
+ filter: {x: x},
+ projection: {x: 1},
+ readConcern: {level: "local"},
+ }));
+ // Sleep a bit to make these reader threads less CPU intensive.
+ sleep(60);
+ }
+};
+TestData.nOps = nOps;
+TestData.collName = collName;
+secondaryReadsTest.startSecondaryReaders(nReaders, readFn);
+
+// Write the initial documents. Ensure they have been replicated.
+for (let i = 0; i < nOps; i++) {
+ assert.commandWorked(
+ primaryDB.runCommand({insert: collName, documents: [{_id: i, x: i, iter: 0}]}));
+}
+replSet.awaitReplication();
- // Write the initial documents. Ensure they have been replicated.
+// Cycle the value of x in the document {_id: i, x: i} between i and i+1 each iteration.
+for (let iteration = 0; iteration < nIterations; iteration++) {
+ let updates = [];
+ // Reset each document.
for (let i = 0; i < nOps; i++) {
- assert.commandWorked(
- primaryDB.runCommand({insert: collName, documents: [{_id: i, x: i, iter: 0}]}));
+ updates[i] = {q: {_id: i}, u: {x: i, iter: iteration}};
}
- replSet.awaitReplication();
- // Cycle the value of x in the document {_id: i, x: i} between i and i+1 each iteration.
- for (let iteration = 0; iteration < nIterations; iteration++) {
- let updates = [];
- // Reset each document.
- for (let i = 0; i < nOps; i++) {
- updates[i] = {q: {_id: i}, u: {x: i, iter: iteration}};
- }
+ assert.commandWorked(primaryDB.runCommand({update: collName, updates: updates}));
+ updates = [];
- assert.commandWorked(primaryDB.runCommand({update: collName, updates: updates}));
- updates = [];
-
- // Generate updates that increment x on each document backwards by _id to avoid conficts
- // when applied in-order. When these updates get applied to the secondary, they may get
- // applied out of order by different threads and temporarily violate unique index
- // constraints.
- for (let i = 0; i < nOps; i++) {
- // Start at the end and increment x by 1.
- let end = nOps - i - 1;
- let nextX = end + 1;
- updates[i] = {q: {_id: end}, u: {x: nextX, iter: iteration}};
- }
- print("iteration " + iteration);
- assert.commandWorked(primaryDB.runCommand({update: collName, updates: updates}));
+ // Generate updates that increment x on each document backwards by _id to avoid conficts
+ // when applied in-order. When these updates get applied to the secondary, they may get
+ // applied out of order by different threads and temporarily violate unique index
+ // constraints.
+ for (let i = 0; i < nOps; i++) {
+ // Start at the end and increment x by 1.
+ let end = nOps - i - 1;
+ let nextX = end + 1;
+ updates[i] = {q: {_id: end}, u: {x: nextX, iter: iteration}};
}
+ print("iteration " + iteration);
+ assert.commandWorked(primaryDB.runCommand({update: collName, updates: updates}));
+}
- replSet.awaitReplication();
- secondaryReadsTest.stop();
+replSet.awaitReplication();
+secondaryReadsTest.stop();
})();
diff --git a/jstests/replsets/server8070.js b/jstests/replsets/server8070.js
index 500def42a51..7f821e0c61d 100644
--- a/jstests/replsets/server8070.js
+++ b/jstests/replsets/server8070.js
@@ -4,146 +4,140 @@
// to sync from member2.
(function() {
- "use strict";
-
- load('jstests/libs/write_concern_util.js');
- load("jstests/replsets/rslib.js");
-
- // helper to ensure two nodes are at the same place in the oplog
- var waitForSameOplogPosition = function(db1, db2, errmsg) {
- assert.soon(function() {
- var last1 =
- db1.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
- var last2 =
- db2.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
- jsTest.log("primary: " + tojson(last1) + " secondary: " + tojson(last2));
-
- return ((last1.ts.t === last2.ts.t) && (last1.ts.i === last2.ts.i));
- }, errmsg);
- };
-
- // start set
- var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
- replSet.startSet();
- replSet.initiate({
- _id: 'testSet',
- members: [
- {_id: 0, host: getHostName() + ":" + replSet.ports[0]},
- {_id: 1, host: getHostName() + ":" + replSet.ports[1], priority: 0},
- {_id: 2, host: getHostName() + ":" + replSet.ports[2], priority: 0}
- ],
- settings: {chainingAllowed: false}
- });
-
- // set up common points of access
- var master = replSet.getPrimary();
- var primary = master.getDB("foo");
- replSet.nodes[1].setSlaveOk();
- replSet.nodes[2].setSlaveOk();
- var member2 = replSet.nodes[1].getDB("admin");
- var member3 = replSet.nodes[2].getDB("admin");
-
- // Do an initial write
- master.getDB("foo").bar.insert({x: 1});
- replSet.awaitReplication();
-
- jsTest.log("Make sure 2 & 3 are syncing from the primary");
- assert.eq(master, replSet.nodes[0]);
- syncFrom(replSet.nodes[1], master, replSet);
- syncFrom(replSet.nodes[2], master, replSet);
-
- jsTest.log("Stop 2's replication");
- member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
-
- jsTest.log("Do a few writes");
- for (var i = 0; i < 25; i++) {
- primary.bar.insert({x: i});
- }
-
- jsTest.log("Make sure 3 is at write #25");
- waitForSameOplogPosition(primary, member3, "node 3 failed to catch up to the primary");
- // This means 3's buffer is empty
-
- jsTest.log("Stop 3's replication");
- member3.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
- // logLevel 3 will allow us to see each op the secondary pulls from the primary so that we can
- // determine whether or not all ops are actually being pulled
- member3.runCommand({setParameter: 1, logLevel: 3});
-
- jsTest.log("Start 2's replication");
- member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
-
- jsTest.log("Do some writes");
- for (var i = 25; i < 50; i++) {
- primary.bar.insert({x: i});
- }
-
- jsTest.log("Make sure 2 is at write #50");
- waitForSameOplogPosition(primary, member2, "node 2 failed to catch up to the primary");
- // This means 2's buffer is empty
-
- jsTest.log("Stop 2's replication");
- member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
-
- jsTest.log(
- "Do some writes - 2 & 3 should have up to write #75 in their buffers, but unapplied");
- for (var i = 50; i < 75; i++) {
- primary.bar.insert({x: i});
- }
- var primaryCollectionSize = primary.bar.find().itcount();
- jsTest.log("primary collection size: " + primaryCollectionSize);
- var last = primary.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
-
- jsTest.log("waiting a bit for the secondaries to get the write");
- sleep(10000);
-
- jsTest.log("Shut down the primary");
- replSet.stop(0);
-
- // make sure 3 doesn't try to sync from 2
- // the sleep 30sec is a hold over from the unsafe assert.throws(assert.soon())
- // which would check for 30 seconds that node 3 didn't try to sync from 2
- sleep(30 * 1000);
- jsTest.log("3 should not attempt to sync from 2, as it cannot clear its buffer");
- var syncingTo = member3.adminCommand({replSetGetStatus: 1}).syncingTo;
- assert(syncingTo !== getHostName() + ":" + replSet.ports[1],
- "node 3 is syncing from node 2 :(");
-
- jsTest.log("Pause 3's bgsync thread");
- stopServerReplication(member3.getMongo());
-
- // count documents in member 3
- assert.eq(26,
- member3.getSisterDB("foo").bar.find().itcount(),
- "collection size incorrect on node 3 before applying ops 25-75");
-
- jsTest.log("Allow 3 to apply ops 25-75");
- assert.commandWorked(member3.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
- "member 3 rsSyncApplyStop admin command failed");
+"use strict";
- assert.soon(function() {
- var last3 =
- member3.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
- jsTest.log("primary: " + tojson(last, '', true) + " secondary: " + tojson(last3, '', true));
- jsTest.log("member 3 collection size: " + member3.getSisterDB("foo").bar.find().itcount());
- jsTest.log("curop: ");
- printjson(member3.getSisterDB("foo").currentOp(true));
- return ((last.ts.t === last3.ts.t) && (last.ts.i === last3.ts.i));
- }, "Replication member 3 did not apply ops 25-75");
-
- jsTest.log("Start 3's bgsync thread");
- restartServerReplication(member3.getMongo());
-
- jsTest.log("Node 3 shouldn't hit rollback");
- var end = (new Date()).getTime() + 10000;
- while ((new Date()).getTime() < end) {
- assert('ROLLBACK' !== member3.runCommand({replSetGetStatus: 1}).members[2].stateStr);
- sleep(30);
- }
-
- // Need to re-enable writes before clean shutdown.
- assert.commandWorked(member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
-
- replSet.stopSet();
+load('jstests/libs/write_concern_util.js');
+load("jstests/replsets/rslib.js");
+// helper to ensure two nodes are at the same place in the oplog
+var waitForSameOplogPosition = function(db1, db2, errmsg) {
+ assert.soon(function() {
+ var last1 = db1.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ var last2 = db2.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ jsTest.log("primary: " + tojson(last1) + " secondary: " + tojson(last2));
+
+ return ((last1.ts.t === last2.ts.t) && (last1.ts.i === last2.ts.i));
+ }, errmsg);
+};
+
+// start set
+var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
+replSet.startSet();
+replSet.initiate({
+ _id: 'testSet',
+ members: [
+ {_id: 0, host: getHostName() + ":" + replSet.ports[0]},
+ {_id: 1, host: getHostName() + ":" + replSet.ports[1], priority: 0},
+ {_id: 2, host: getHostName() + ":" + replSet.ports[2], priority: 0}
+ ],
+ settings: {chainingAllowed: false}
+});
+
+// set up common points of access
+var master = replSet.getPrimary();
+var primary = master.getDB("foo");
+replSet.nodes[1].setSlaveOk();
+replSet.nodes[2].setSlaveOk();
+var member2 = replSet.nodes[1].getDB("admin");
+var member3 = replSet.nodes[2].getDB("admin");
+
+// Do an initial write
+master.getDB("foo").bar.insert({x: 1});
+replSet.awaitReplication();
+
+jsTest.log("Make sure 2 & 3 are syncing from the primary");
+assert.eq(master, replSet.nodes[0]);
+syncFrom(replSet.nodes[1], master, replSet);
+syncFrom(replSet.nodes[2], master, replSet);
+
+jsTest.log("Stop 2's replication");
+member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+
+jsTest.log("Do a few writes");
+for (var i = 0; i < 25; i++) {
+ primary.bar.insert({x: i});
+}
+
+jsTest.log("Make sure 3 is at write #25");
+waitForSameOplogPosition(primary, member3, "node 3 failed to catch up to the primary");
+// This means 3's buffer is empty
+
+jsTest.log("Stop 3's replication");
+member3.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+// logLevel 3 will allow us to see each op the secondary pulls from the primary so that we can
+// determine whether or not all ops are actually being pulled
+member3.runCommand({setParameter: 1, logLevel: 3});
+
+jsTest.log("Start 2's replication");
+member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+
+jsTest.log("Do some writes");
+for (var i = 25; i < 50; i++) {
+ primary.bar.insert({x: i});
+}
+
+jsTest.log("Make sure 2 is at write #50");
+waitForSameOplogPosition(primary, member2, "node 2 failed to catch up to the primary");
+// This means 2's buffer is empty
+
+jsTest.log("Stop 2's replication");
+member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+
+jsTest.log("Do some writes - 2 & 3 should have up to write #75 in their buffers, but unapplied");
+for (var i = 50; i < 75; i++) {
+ primary.bar.insert({x: i});
+}
+var primaryCollectionSize = primary.bar.find().itcount();
+jsTest.log("primary collection size: " + primaryCollectionSize);
+var last = primary.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
+
+jsTest.log("waiting a bit for the secondaries to get the write");
+sleep(10000);
+
+jsTest.log("Shut down the primary");
+replSet.stop(0);
+
+// make sure 3 doesn't try to sync from 2
+// the sleep 30sec is a hold over from the unsafe assert.throws(assert.soon())
+// which would check for 30 seconds that node 3 didn't try to sync from 2
+sleep(30 * 1000);
+jsTest.log("3 should not attempt to sync from 2, as it cannot clear its buffer");
+var syncingTo = member3.adminCommand({replSetGetStatus: 1}).syncingTo;
+assert(syncingTo !== getHostName() + ":" + replSet.ports[1], "node 3 is syncing from node 2 :(");
+
+jsTest.log("Pause 3's bgsync thread");
+stopServerReplication(member3.getMongo());
+
+// count documents in member 3
+assert.eq(26,
+ member3.getSisterDB("foo").bar.find().itcount(),
+ "collection size incorrect on node 3 before applying ops 25-75");
+
+jsTest.log("Allow 3 to apply ops 25-75");
+assert.commandWorked(member3.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
+ "member 3 rsSyncApplyStop admin command failed");
+
+assert.soon(function() {
+ var last3 = member3.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ jsTest.log("primary: " + tojson(last, '', true) + " secondary: " + tojson(last3, '', true));
+ jsTest.log("member 3 collection size: " + member3.getSisterDB("foo").bar.find().itcount());
+ jsTest.log("curop: ");
+ printjson(member3.getSisterDB("foo").currentOp(true));
+ return ((last.ts.t === last3.ts.t) && (last.ts.i === last3.ts.i));
+}, "Replication member 3 did not apply ops 25-75");
+
+jsTest.log("Start 3's bgsync thread");
+restartServerReplication(member3.getMongo());
+
+jsTest.log("Node 3 shouldn't hit rollback");
+var end = (new Date()).getTime() + 10000;
+while ((new Date()).getTime() < end) {
+ assert('ROLLBACK' !== member3.runCommand({replSetGetStatus: 1}).members[2].stateStr);
+ sleep(30);
+}
+
+// Need to re-enable writes before clean shutdown.
+assert.commandWorked(member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+
+replSet.stopSet();
}()); \ No newline at end of file
diff --git a/jstests/replsets/sessions_collection_auto_healing.js b/jstests/replsets/sessions_collection_auto_healing.js
index b75ed876d25..28f3dc51bba 100644
--- a/jstests/replsets/sessions_collection_auto_healing.js
+++ b/jstests/replsets/sessions_collection_auto_healing.js
@@ -1,122 +1,120 @@
load('jstests/libs/sessions_collection.js');
(function() {
- "use strict";
+"use strict";
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
- var replTest = new ReplSetTest({
- name: 'refresh',
- nodes: [{rsConfig: {votes: 1, priority: 1}}, {rsConfig: {votes: 0, priority: 0}}]
- });
- var nodes = replTest.startSet();
+var replTest = new ReplSetTest({
+ name: 'refresh',
+ nodes: [{rsConfig: {votes: 1, priority: 1}}, {rsConfig: {votes: 0, priority: 0}}]
+});
+var nodes = replTest.startSet();
- replTest.initiate();
- var primary = replTest.getPrimary();
- var primaryAdmin = primary.getDB("admin");
+replTest.initiate();
+var primary = replTest.getPrimary();
+var primaryAdmin = primary.getDB("admin");
- replTest.awaitSecondaryNodes();
- var secondary = replTest.getSecondary();
- var secondaryAdmin = secondary.getDB("admin");
+replTest.awaitSecondaryNodes();
+var secondary = replTest.getSecondary();
+var secondaryAdmin = secondary.getDB("admin");
- // Get the current value of the TTL index so that we can verify it's being properly applied.
- let res = assert.commandWorked(
- primary.adminCommand({getParameter: 1, localLogicalSessionTimeoutMinutes: 1}));
- let timeoutMinutes = res.localLogicalSessionTimeoutMinutes;
+// Get the current value of the TTL index so that we can verify it's being properly applied.
+let res = assert.commandWorked(
+ primary.adminCommand({getParameter: 1, localLogicalSessionTimeoutMinutes: 1}));
+let timeoutMinutes = res.localLogicalSessionTimeoutMinutes;
- // Test that we can use sessions on the primary before the sessions collection exists.
- {
- validateSessionsCollection(primary, false, false, timeoutMinutes);
+// Test that we can use sessions on the primary before the sessions collection exists.
+{
+ validateSessionsCollection(primary, false, false, timeoutMinutes);
- assert.commandWorked(primaryAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(primaryAdmin.runCommand({startSession: 1}));
- validateSessionsCollection(primary, false, false, timeoutMinutes);
- }
+ validateSessionsCollection(primary, false, false, timeoutMinutes);
+}
- // Test that we can use sessions on secondaries before the sessions collection exists.
- {
- validateSessionsCollection(primary, false, false, timeoutMinutes);
+// Test that we can use sessions on secondaries before the sessions collection exists.
+{
+ validateSessionsCollection(primary, false, false, timeoutMinutes);
- replTest.awaitReplication();
- validateSessionsCollection(secondary, false, false, timeoutMinutes);
+ replTest.awaitReplication();
+ validateSessionsCollection(secondary, false, false, timeoutMinutes);
- assert.commandWorked(secondaryAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(secondaryAdmin.runCommand({startSession: 1}));
- validateSessionsCollection(primary, false, false, timeoutMinutes);
+ validateSessionsCollection(primary, false, false, timeoutMinutes);
- replTest.awaitReplication();
- validateSessionsCollection(secondary, false, false, timeoutMinutes);
- }
+ replTest.awaitReplication();
+ validateSessionsCollection(secondary, false, false, timeoutMinutes);
+}
- // Test that a refresh on a secondary does not create the sessions collection.
- {
- validateSessionsCollection(primary, false, false, timeoutMinutes);
+// Test that a refresh on a secondary does not create the sessions collection.
+{
+ validateSessionsCollection(primary, false, false, timeoutMinutes);
- replTest.awaitReplication();
- validateSessionsCollection(secondary, false, false, timeoutMinutes);
+ replTest.awaitReplication();
+ validateSessionsCollection(secondary, false, false, timeoutMinutes);
- assert.commandWorked(secondaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(secondaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(primary, false, false, timeoutMinutes);
+ validateSessionsCollection(primary, false, false, timeoutMinutes);
- replTest.awaitReplication();
- validateSessionsCollection(secondary, false, false, timeoutMinutes);
- }
- // Test that a refresh on the primary creates the sessions collection.
- {
- validateSessionsCollection(primary, false, false, timeoutMinutes);
+ replTest.awaitReplication();
+ validateSessionsCollection(secondary, false, false, timeoutMinutes);
+}
+// Test that a refresh on the primary creates the sessions collection.
+{
+ validateSessionsCollection(primary, false, false, timeoutMinutes);
- replTest.awaitReplication();
- validateSessionsCollection(secondary, false, false, timeoutMinutes);
+ replTest.awaitReplication();
+ validateSessionsCollection(secondary, false, false, timeoutMinutes);
- assert.commandWorked(primaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(primaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(primary, true, true, timeoutMinutes);
- }
+ validateSessionsCollection(primary, true, true, timeoutMinutes);
+}
- // Test that a refresh on a secondary will not create the TTL index on the sessions collection.
- {
- assert.commandWorked(primary.getDB("config").system.sessions.dropIndex({lastUse: 1}));
+// Test that a refresh on a secondary will not create the TTL index on the sessions collection.
+{
+ assert.commandWorked(primary.getDB("config").system.sessions.dropIndex({lastUse: 1}));
- validateSessionsCollection(primary, true, false, timeoutMinutes);
+ validateSessionsCollection(primary, true, false, timeoutMinutes);
- assert.commandWorked(secondaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(secondaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(primary, true, false, timeoutMinutes);
- }
+ validateSessionsCollection(primary, true, false, timeoutMinutes);
+}
- // Test that a refresh on the primary will create the TTL index on the sessions collection.
- {
- validateSessionsCollection(primary, true, false, timeoutMinutes);
+// Test that a refresh on the primary will create the TTL index on the sessions collection.
+{
+ validateSessionsCollection(primary, true, false, timeoutMinutes);
- assert.commandWorked(primaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(primaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(primary, true, true, timeoutMinutes);
- }
+ validateSessionsCollection(primary, true, true, timeoutMinutes);
+}
- timeoutMinutes = 4;
+timeoutMinutes = 4;
- replTest.restart(
- 0,
- {startClean: false, setParameter: "localLogicalSessionTimeoutMinutes=" + timeoutMinutes});
+replTest.restart(
+ 0, {startClean: false, setParameter: "localLogicalSessionTimeoutMinutes=" + timeoutMinutes});
- primary = replTest.getPrimary();
- primaryAdmin = primary.getDB("admin");
- secondary = replTest.getSecondary();
+primary = replTest.getPrimary();
+primaryAdmin = primary.getDB("admin");
+secondary = replTest.getSecondary();
- // Test that a change to the TTL index expiration on restart will generate a collMod to change
- // the expiration time.
- {
- assert.commandWorked(primaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+// Test that a change to the TTL index expiration on restart will generate a collMod to change
+// the expiration time.
+{
+ assert.commandWorked(primaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(primary, true, true, timeoutMinutes);
+ validateSessionsCollection(primary, true, true, timeoutMinutes);
- replTest.awaitReplication();
- validateSessionsCollection(secondary, true, true, timeoutMinutes);
- }
-
- replTest.stopSet();
+ replTest.awaitReplication();
+ validateSessionsCollection(secondary, true, true, timeoutMinutes);
+}
+replTest.stopSet();
})();
diff --git a/jstests/replsets/shutdown.js b/jstests/replsets/shutdown.js
index b35172a808f..7fc2e19c749 100644
--- a/jstests/replsets/shutdown.js
+++ b/jstests/replsets/shutdown.js
@@ -2,30 +2,30 @@
//
load('jstests/replsets/rslib.js');
(function() {
- "use strict";
+"use strict";
- let ns = "test.coll";
+let ns = "test.coll";
- let rst = new ReplSetTest({
- nodes: 2,
- });
+let rst = new ReplSetTest({
+ nodes: 2,
+});
- let conf = rst.getReplSetConfig();
- conf.members[1].votes = 0;
- conf.members[1].priority = 0;
- conf.members[1].hidden = true;
+let conf = rst.getReplSetConfig();
+conf.members[1].votes = 0;
+conf.members[1].priority = 0;
+conf.members[1].hidden = true;
- rst.startSet();
- rst.initiate(conf);
- rst.awaitReplication();
+rst.startSet();
+rst.initiate(conf);
+rst.awaitReplication();
- let secondary = rst.getSecondary();
- rst.stop(secondary);
- let program = rst.start(
- secondary,
- {waitForConnect: false, setParameter: "failpoint.shutdownAtStartup={mode:'alwaysOn'}"});
- // mongod should exit automatically, since failpoint was set.
- let exitCode = waitProgram(program.pid);
- assert.eq(0, exitCode);
- rst.stopSet();
+let secondary = rst.getSecondary();
+rst.stop(secondary);
+let program = rst.start(
+ secondary,
+ {waitForConnect: false, setParameter: "failpoint.shutdownAtStartup={mode:'alwaysOn'}"});
+// mongod should exit automatically, since failpoint was set.
+let exitCode = waitProgram(program.pid);
+assert.eq(0, exitCode);
+rst.stopSet();
})();
diff --git a/jstests/replsets/shutdown_primary.js b/jstests/replsets/shutdown_primary.js
index 65eb4ec3e59..bcaefe8c541 100644
--- a/jstests/replsets/shutdown_primary.js
+++ b/jstests/replsets/shutdown_primary.js
@@ -12,56 +12,56 @@
*
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js"); // for stopReplicationOnSecondaries,
- // restartReplicationOnSecondaries
- var name = "shutdown_primary";
+load("jstests/libs/write_concern_util.js"); // for stopReplicationOnSecondaries,
+ // restartReplicationOnSecondaries
+var name = "shutdown_primary";
- var replTest = new ReplSetTest({name: name, nodes: 3});
- replTest.startSet();
- replTest.initiate();
+var replTest = new ReplSetTest({name: name, nodes: 3});
+replTest.startSet();
+replTest.initiate();
- var primary = replTest.getPrimary();
- var testDB = primary.getDB(name);
- var timeout = ReplSetTest.kDefaultTimeoutMS;
- assert.writeOK(testDB.foo.insert({x: 1}, {writeConcern: {w: 3, wtimeout: timeout}}));
+var primary = replTest.getPrimary();
+var testDB = primary.getDB(name);
+var timeout = ReplSetTest.kDefaultTimeoutMS;
+assert.writeOK(testDB.foo.insert({x: 1}, {writeConcern: {w: 3, wtimeout: timeout}}));
- jsTestLog("Blocking replication to secondaries.");
- stopReplicationOnSecondaries(replTest);
+jsTestLog("Blocking replication to secondaries.");
+stopReplicationOnSecondaries(replTest);
- jsTestLog("Executing write to primary.");
- assert.writeOK(testDB.foo.insert({x: 2}));
+jsTestLog("Executing write to primary.");
+assert.writeOK(testDB.foo.insert({x: 2}));
- jsTestLog("Attempting to shut down primary.");
- assert.commandFailedWithCode(primary.adminCommand({shutdown: 1}),
- ErrorCodes.ExceededTimeLimit,
- "shut down did not fail with 'ExceededTimeLimit'");
+jsTestLog("Attempting to shut down primary.");
+assert.commandFailedWithCode(primary.adminCommand({shutdown: 1}),
+ ErrorCodes.ExceededTimeLimit,
+ "shut down did not fail with 'ExceededTimeLimit'");
- jsTestLog("Verifying primary did not shut down.");
- assert.writeOK(testDB.foo.insert({x: 3}));
+jsTestLog("Verifying primary did not shut down.");
+assert.writeOK(testDB.foo.insert({x: 3}));
- jsTestLog("Shutting down primary in a parallel shell");
- var awaitShell = startParallelShell(function() {
- db.adminCommand({shutdown: 1, timeoutSecs: 60});
- }, primary.port);
+jsTestLog("Shutting down primary in a parallel shell");
+var awaitShell = startParallelShell(function() {
+ db.adminCommand({shutdown: 1, timeoutSecs: 60});
+}, primary.port);
- jsTestLog("Resuming replication.");
- restartReplicationOnSecondaries(replTest);
+jsTestLog("Resuming replication.");
+restartReplicationOnSecondaries(replTest);
- jsTestLog("Verifying primary shut down and cannot be connected to.");
- // Successfully starting shutdown throws a network error.
- var exitCode = awaitShell({checkExitSuccess: false});
- assert.neq(0, exitCode, "expected shutdown to close the shell's connection");
- assert.soonNoExcept(function() {
- // The parallel shell exits while shutdown is in progress, and if this happens early enough,
- // the primary can still accept connections despite successfully starting to shutdown.
- // So, retry connecting until connections cannot be established and an error is thrown.
- assert.throws(function() {
- new Mongo(primary.host);
- });
- return true;
- }, "expected primary node to shut down and not be connectable");
+jsTestLog("Verifying primary shut down and cannot be connected to.");
+// Successfully starting shutdown throws a network error.
+var exitCode = awaitShell({checkExitSuccess: false});
+assert.neq(0, exitCode, "expected shutdown to close the shell's connection");
+assert.soonNoExcept(function() {
+ // The parallel shell exits while shutdown is in progress, and if this happens early enough,
+ // the primary can still accept connections despite successfully starting to shutdown.
+ // So, retry connecting until connections cannot be established and an error is thrown.
+ assert.throws(function() {
+ new Mongo(primary.host);
+ });
+ return true;
+}, "expected primary node to shut down and not be connectable");
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/shutdown_with_prepared_transaction.js b/jstests/replsets/shutdown_with_prepared_transaction.js
index f8844b21074..d241df1a68b 100644
--- a/jstests/replsets/shutdown_with_prepared_transaction.js
+++ b/jstests/replsets/shutdown_with_prepared_transaction.js
@@ -4,35 +4,35 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- const conn = replTest.getPrimary();
+const conn = replTest.getPrimary();
- const dbName = "test";
- const collName = "shutdown_with_prepared_txn";
- const testDB = conn.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "shutdown_with_prepared_txn";
+const testDB = conn.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const session = conn.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = conn.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- jsTestLog("Starting a simple transaction and putting it into prepare");
+jsTestLog("Starting a simple transaction and putting it into prepare");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 1}));
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 1}));
- PrepareHelpers.prepareTransaction(session);
+PrepareHelpers.prepareTransaction(session);
- jsTestLog("Shutting down the set with the transaction still in prepare state");
- // Skip validation during ReplSetTest cleanup since validate() will block behind the prepared
- // transaction's locks when trying to take a collection X lock.
- replTest.stopSet(null /*signal*/, false /*forRestart*/, {skipValidation: true});
+jsTestLog("Shutting down the set with the transaction still in prepare state");
+// Skip validation during ReplSetTest cleanup since validate() will block behind the prepared
+// transaction's locks when trying to take a collection X lock.
+replTest.stopSet(null /*signal*/, false /*forRestart*/, {skipValidation: true});
}());
diff --git a/jstests/replsets/sized_zero_capped.js b/jstests/replsets/sized_zero_capped.js
index 41debd6d17c..91aaacdda40 100644
--- a/jstests/replsets/sized_zero_capped.js
+++ b/jstests/replsets/sized_zero_capped.js
@@ -2,28 +2,28 @@
// SECONDARY to crash. (see SERVER-18792)
(function() {
- "use strict";
+"use strict";
- var name = "sized_zero_capped";
- var replTest = new ReplSetTest({name: name, nodes: 3});
- var nodes = replTest.nodeList();
- replTest.startSet();
- replTest.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0], priority: 3},
- {"_id": 1, "host": nodes[1], priority: 0},
- {"_id": 2, "host": nodes[2], priority: 0}
- ]
- });
+var name = "sized_zero_capped";
+var replTest = new ReplSetTest({name: name, nodes: 3});
+var nodes = replTest.nodeList();
+replTest.startSet();
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], priority: 0}
+ ]
+});
- var testDB = replTest.getPrimary().getDB(name);
- testDB.createCollection(name, {capped: true, size: 0});
- replTest.awaitReplication();
+var testDB = replTest.getPrimary().getDB(name);
+testDB.createCollection(name, {capped: true, size: 0});
+replTest.awaitReplication();
- // ensure secondary is still up and responsive
- var secondary = replTest.getSecondary();
- assert.commandWorked(secondary.getDB(name).runCommand({ping: 1}));
+// ensure secondary is still up and responsive
+var secondary = replTest.getSecondary();
+assert.commandWorked(secondary.getDB(name).runCommand({ping: 1}));
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/slave_delay_clean_shutdown.js b/jstests/replsets/slave_delay_clean_shutdown.js
index 80371048b6b..2d3e75824da 100644
--- a/jstests/replsets/slave_delay_clean_shutdown.js
+++ b/jstests/replsets/slave_delay_clean_shutdown.js
@@ -3,64 +3,64 @@
// @tags: [requires_persistence]
load('jstests/replsets/rslib.js');
(function() {
- "use strict";
+"use strict";
- // Skip db hash check since secondary has slave delay.
- TestData.skipCheckDBHashes = true;
+// Skip db hash check since secondary has slave delay.
+TestData.skipCheckDBHashes = true;
- var ns = "test.coll";
+var ns = "test.coll";
- var rst = new ReplSetTest({
- nodes: 2,
- });
+var rst = new ReplSetTest({
+ nodes: 2,
+});
- var conf = rst.getReplSetConfig();
- conf.members[1].votes = 0;
- conf.members[1].priority = 0;
- conf.members[1].hidden = true;
- conf.members[1].slaveDelay = 0; // Set later.
+var conf = rst.getReplSetConfig();
+conf.members[1].votes = 0;
+conf.members[1].priority = 0;
+conf.members[1].hidden = true;
+conf.members[1].slaveDelay = 0; // Set later.
- rst.startSet();
- rst.initiate(conf);
+rst.startSet();
+rst.initiate(conf);
- var master = rst.getPrimary(); // Waits for PRIMARY state.
+var master = rst.getPrimary(); // Waits for PRIMARY state.
- // Push some ops through before setting slave delay.
- assert.writeOK(master.getCollection(ns).insert([{}, {}, {}], {writeConcern: {w: 2}}));
+// Push some ops through before setting slave delay.
+assert.writeOK(master.getCollection(ns).insert([{}, {}, {}], {writeConcern: {w: 2}}));
- // Set slaveDelay and wait for secondary to receive the change.
- conf = rst.getReplSetConfigFromNode();
- conf.version++;
- conf.members[1].slaveDelay = 24 * 60 * 60;
- reconfig(rst, conf);
- assert.soon(() => rst.getReplSetConfigFromNode(1).members[1].slaveDelay > 0,
- () => rst.getReplSetConfigFromNode(1));
+// Set slaveDelay and wait for secondary to receive the change.
+conf = rst.getReplSetConfigFromNode();
+conf.version++;
+conf.members[1].slaveDelay = 24 * 60 * 60;
+reconfig(rst, conf);
+assert.soon(() => rst.getReplSetConfigFromNode(1).members[1].slaveDelay > 0,
+ () => rst.getReplSetConfigFromNode(1));
- sleep(2000); // The secondary apply loop only checks for slaveDelay changes once per second.
- var secondary = rst.getSecondary();
- const lastOp = getLatestOp(secondary);
+sleep(2000); // The secondary apply loop only checks for slaveDelay changes once per second.
+var secondary = rst.getSecondary();
+const lastOp = getLatestOp(secondary);
- assert.writeOK(master.getCollection(ns).insert([{}, {}, {}]));
- assert.soon(() => secondary.adminCommand('serverStatus').metrics.repl.buffer.count > 0,
- () => secondary.adminCommand('serverStatus').metrics.repl);
- assert.neq(getLatestOp(master), lastOp);
- assert.eq(getLatestOp(secondary), lastOp);
+assert.writeOK(master.getCollection(ns).insert([{}, {}, {}]));
+assert.soon(() => secondary.adminCommand('serverStatus').metrics.repl.buffer.count > 0,
+ () => secondary.adminCommand('serverStatus').metrics.repl);
+assert.neq(getLatestOp(master), lastOp);
+assert.eq(getLatestOp(secondary), lastOp);
- sleep(2000); // Prevent the test from passing by chance.
- assert.eq(getLatestOp(secondary), lastOp);
+sleep(2000); // Prevent the test from passing by chance.
+assert.eq(getLatestOp(secondary), lastOp);
- // Make sure shutdown won't take a long time due to I/O.
- secondary.adminCommand('fsync');
+// Make sure shutdown won't take a long time due to I/O.
+secondary.adminCommand('fsync');
- // Shutting down shouldn't take long.
- assert.lt(Date.timeFunc(() => rst.stop(1)), 60 * 1000);
+// Shutting down shouldn't take long.
+assert.lt(Date.timeFunc(() => rst.stop(1)), 60 * 1000);
- secondary = rst.restart(1);
- rst.awaitSecondaryNodes();
+secondary = rst.restart(1);
+rst.awaitSecondaryNodes();
- assert.eq(getLatestOp(secondary), lastOp);
- sleep(2000); // Prevent the test from passing by chance.
- assert.eq(getLatestOp(secondary), lastOp);
+assert.eq(getLatestOp(secondary), lastOp);
+sleep(2000); // Prevent the test from passing by chance.
+assert.eq(getLatestOp(secondary), lastOp);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/slavedelay1.js b/jstests/replsets/slavedelay1.js
index 4a356eb62da..98dc46bacc4 100644
--- a/jstests/replsets/slavedelay1.js
+++ b/jstests/replsets/slavedelay1.js
@@ -1,7 +1,6 @@
load("jstests/replsets/rslib.js");
doTest = function(signal) {
-
var name = "slaveDelay";
var host = getHostName();
diff --git a/jstests/replsets/slaveok_read_pref.js b/jstests/replsets/slaveok_read_pref.js
index cb923324741..78b0139adfd 100644
--- a/jstests/replsets/slaveok_read_pref.js
+++ b/jstests/replsets/slaveok_read_pref.js
@@ -1,55 +1,53 @@
// Test that slaveOk is implicitly allowed for queries on a secondary with a read preference other
// than 'primary', and that queries which do have 'primary' read preference fail.
(function() {
- "use strict";
-
- const readPrefs =
- [undefined, "primary", "secondary", "primaryPreferred", "secondaryPreferred", "nearest"];
-
- const rst = new ReplSetTest({nodes: 3});
- rst.startSet();
-
- const nodes = rst.nodeList();
- rst.initiate({
- _id: jsTestName(),
- members: [
- {_id: 0, host: nodes[0]},
- {_id: 1, host: nodes[1], priority: 0},
- {_id: 2, host: nodes[2], arbiterOnly: true}
- ]
- });
-
- const priDB = rst.getPrimary().getDB(jsTestName());
- assert(priDB.dropDatabase());
-
- assert.commandWorked(priDB.test.insert({a: 1}, {writeConcern: {w: "majority"}}));
-
- const secDB = rst.getSecondary().getDB(jsTestName());
-
- for (let readMode of["commands", "legacy"]) {
- for (let readPref of readPrefs) {
- for (let slaveOk of[true, false]) {
- const testType = {readMode: readMode, readPref: readPref, slaveOk: slaveOk};
-
- secDB.getMongo().forceReadMode(readMode);
- secDB.getMongo().setSlaveOk(slaveOk);
-
- const cursor =
- (readPref ? secDB.test.find().readPref(readPref) : secDB.test.find());
-
- if (readPref === "primary" || (!readPref && !slaveOk)) {
- // Attempting to run the query throws an error of type NotMasterNoSlaveOk.
- const slaveOkErr = assert.throws(() => cursor.itcount(), [], tojson(testType));
- assert.commandFailedWithCode(slaveOkErr, ErrorCodes.NotMasterNoSlaveOk);
- } else {
- // Succeeds for all non-primary readPrefs, and for no readPref iff slaveOk.
- const docCount =
- assert.doesNotThrow(() => cursor.itcount(), [], tojson(testType));
- assert.eq(docCount, 1);
- }
+"use strict";
+
+const readPrefs =
+ [undefined, "primary", "secondary", "primaryPreferred", "secondaryPreferred", "nearest"];
+
+const rst = new ReplSetTest({nodes: 3});
+rst.startSet();
+
+const nodes = rst.nodeList();
+rst.initiate({
+ _id: jsTestName(),
+ members: [
+ {_id: 0, host: nodes[0]},
+ {_id: 1, host: nodes[1], priority: 0},
+ {_id: 2, host: nodes[2], arbiterOnly: true}
+ ]
+});
+
+const priDB = rst.getPrimary().getDB(jsTestName());
+assert(priDB.dropDatabase());
+
+assert.commandWorked(priDB.test.insert({a: 1}, {writeConcern: {w: "majority"}}));
+
+const secDB = rst.getSecondary().getDB(jsTestName());
+
+for (let readMode of ["commands", "legacy"]) {
+ for (let readPref of readPrefs) {
+ for (let slaveOk of [true, false]) {
+ const testType = {readMode: readMode, readPref: readPref, slaveOk: slaveOk};
+
+ secDB.getMongo().forceReadMode(readMode);
+ secDB.getMongo().setSlaveOk(slaveOk);
+
+ const cursor = (readPref ? secDB.test.find().readPref(readPref) : secDB.test.find());
+
+ if (readPref === "primary" || (!readPref && !slaveOk)) {
+ // Attempting to run the query throws an error of type NotMasterNoSlaveOk.
+ const slaveOkErr = assert.throws(() => cursor.itcount(), [], tojson(testType));
+ assert.commandFailedWithCode(slaveOkErr, ErrorCodes.NotMasterNoSlaveOk);
+ } else {
+ // Succeeds for all non-primary readPrefs, and for no readPref iff slaveOk.
+ const docCount = assert.doesNotThrow(() => cursor.itcount(), [], tojson(testType));
+ assert.eq(docCount, 1);
}
}
}
+}
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/speculative_majority_find.js b/jstests/replsets/speculative_majority_find.js
index fecfbf5dea1..94463145b7e 100644
--- a/jstests/replsets/speculative_majority_find.js
+++ b/jstests/replsets/speculative_majority_find.js
@@ -10,148 +10,148 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
- load("jstests/libs/parallelTester.js"); // for ScopedThread.
-
- let name = "speculative_majority_find";
- let replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
- });
- replTest.startSet();
- replTest.initiate();
-
- let dbName = name;
- let collName = "coll";
-
- let primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
-
- let primaryDB = primary.getDB(dbName);
- let secondaryDB = secondary.getDB(dbName);
- let primaryColl = primaryDB[collName];
- // Create a collection.
- assert.commandWorked(primaryColl.insert({}, {writeConcern: {w: "majority"}}));
-
- //
- // Test basic reads with speculative majority.
- //
-
- // Pause replication on the secondary so that writes won't majority commit.
- stopServerReplication(secondary);
- assert.commandWorked(primaryColl.insert({_id: 1}));
-
- jsTestLog("Do a speculative majority read that should time out.");
- let res = primaryDB.runCommand({
+"use strict";
+
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+load("jstests/libs/parallelTester.js"); // for ScopedThread.
+
+let name = "speculative_majority_find";
+let replTest = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {enableMajorityReadConcern: 'false'}
+});
+replTest.startSet();
+replTest.initiate();
+
+let dbName = name;
+let collName = "coll";
+
+let primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+
+let primaryDB = primary.getDB(dbName);
+let secondaryDB = secondary.getDB(dbName);
+let primaryColl = primaryDB[collName];
+// Create a collection.
+assert.commandWorked(primaryColl.insert({}, {writeConcern: {w: "majority"}}));
+
+//
+// Test basic reads with speculative majority.
+//
+
+// Pause replication on the secondary so that writes won't majority commit.
+stopServerReplication(secondary);
+assert.commandWorked(primaryColl.insert({_id: 1}));
+
+jsTestLog("Do a speculative majority read that should time out.");
+let res = primaryDB.runCommand({
+ find: collName,
+ readConcern: {level: "majority"},
+ filter: {_id: 1},
+ allowSpeculativeMajorityRead: true,
+ maxTimeMS: 5000
+});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+
+restartServerReplication(secondary);
+replTest.awaitReplication();
+
+jsTestLog("Do a speculative majority read that should succeed.");
+res = primaryDB.runCommand({
+ find: collName,
+ readConcern: {level: "majority"},
+ filter: {_id: 1},
+ allowSpeculativeMajorityRead: true
+});
+assert.commandWorked(res);
+assert.eq(res.cursor.firstBatch.length, 1);
+assert.eq(res.cursor.firstBatch[0], {_id: 1});
+
+//
+// Test that blocked reads can succeed when a write majority commits.
+//
+
+// Pause replication on the secondary so that writes won't majority commit.
+stopServerReplication(secondary);
+assert.commandWorked(primaryColl.insert({_id: 2}));
+
+jsTestLog("Do a speculative majority that should block until write commits.");
+let speculativeRead = new ScopedThread(function(host, dbName, collName) {
+ const nodeDB = new Mongo(host).getDB(dbName);
+ return nodeDB.runCommand({
find: collName,
readConcern: {level: "majority"},
- filter: {_id: 1},
- allowSpeculativeMajorityRead: true,
- maxTimeMS: 5000
- });
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
- restartServerReplication(secondary);
- replTest.awaitReplication();
-
- jsTestLog("Do a speculative majority read that should succeed.");
- res = primaryDB.runCommand({
- find: collName,
- readConcern: {level: "majority"},
- filter: {_id: 1},
- allowSpeculativeMajorityRead: true
- });
- assert.commandWorked(res);
- assert.eq(res.cursor.firstBatch.length, 1);
- assert.eq(res.cursor.firstBatch[0], {_id: 1});
-
- //
- // Test that blocked reads can succeed when a write majority commits.
- //
-
- // Pause replication on the secondary so that writes won't majority commit.
- stopServerReplication(secondary);
- assert.commandWorked(primaryColl.insert({_id: 2}));
-
- jsTestLog("Do a speculative majority that should block until write commits.");
- let speculativeRead = new ScopedThread(function(host, dbName, collName) {
- const nodeDB = new Mongo(host).getDB(dbName);
- return nodeDB.runCommand({
- find: collName,
- readConcern: {level: "majority"},
- filter: {_id: 2},
- allowSpeculativeMajorityRead: true
- });
- }, primary.host, dbName, collName);
- speculativeRead.start();
-
- // Wait for the read to start on the server.
- assert.soon(() => primaryDB.currentOp({ns: primaryColl.getFullName(), "command.find": collName})
- .inprog.length === 1);
-
- // Let the previous write commit.
- restartServerReplication(secondary);
- assert.commandWorked(
- primaryColl.insert({_id: "commit_last_write"}, {writeConcern: {w: "majority"}}));
-
- // Make sure the read finished and returned correct results.
- speculativeRead.join();
- res = speculativeRead.returnData();
- assert.commandWorked(res);
- assert.eq(res.cursor.firstBatch.length, 1);
- assert.eq(res.cursor.firstBatch[0], {_id: 2});
-
- //
- // Test 'afterClusterTime' reads with speculative majority.
- //
- stopServerReplication(secondary);
-
- // Insert a document on the primary and record the response.
- let writeRes = primaryDB.runCommand({insert: collName, documents: [{_id: 3}]});
- assert.commandWorked(writeRes);
-
- jsTestLog(
- "Do a speculative majority read on primary with 'afterClusterTime' that should time out.");
- res = primaryDB.runCommand({
- find: collName,
- readConcern: {level: "majority", afterClusterTime: writeRes.operationTime},
- filter: {_id: 3},
- $clusterTime: writeRes.$clusterTime,
- allowSpeculativeMajorityRead: true,
- maxTimeMS: 5000
- });
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog(
- "Do a speculative majority read on secondary with 'afterClusterTime' that should time out.");
- res = secondaryDB.runCommand({
- find: collName,
- readConcern: {level: "majority", afterClusterTime: writeRes.operationTime},
- filter: {_id: 3},
- $clusterTime: writeRes.$clusterTime,
- allowSpeculativeMajorityRead: true,
- maxTimeMS: 5000
- });
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
- // Let the previous write majority commit.
- restartServerReplication(secondary);
- replTest.awaitReplication();
-
- jsTestLog("Do a speculative majority read with 'afterClusterTime' that should succeed.");
- res = primaryDB.runCommand({
- find: collName,
- readConcern: {level: "majority", afterClusterTime: writeRes.operationTime},
- filter: {_id: 3},
- $clusterTime: res.$clusterTime,
+ filter: {_id: 2},
allowSpeculativeMajorityRead: true
});
- assert.commandWorked(res);
- assert.eq(res.cursor.firstBatch.length, 1);
- assert.eq(res.cursor.firstBatch[0], {_id: 3});
-
- replTest.stopSet();
+}, primary.host, dbName, collName);
+speculativeRead.start();
+
+// Wait for the read to start on the server.
+assert.soon(() => primaryDB.currentOp({ns: primaryColl.getFullName(), "command.find": collName})
+ .inprog.length === 1);
+
+// Let the previous write commit.
+restartServerReplication(secondary);
+assert.commandWorked(
+ primaryColl.insert({_id: "commit_last_write"}, {writeConcern: {w: "majority"}}));
+
+// Make sure the read finished and returned correct results.
+speculativeRead.join();
+res = speculativeRead.returnData();
+assert.commandWorked(res);
+assert.eq(res.cursor.firstBatch.length, 1);
+assert.eq(res.cursor.firstBatch[0], {_id: 2});
+
+//
+// Test 'afterClusterTime' reads with speculative majority.
+//
+stopServerReplication(secondary);
+
+// Insert a document on the primary and record the response.
+let writeRes = primaryDB.runCommand({insert: collName, documents: [{_id: 3}]});
+assert.commandWorked(writeRes);
+
+jsTestLog(
+ "Do a speculative majority read on primary with 'afterClusterTime' that should time out.");
+res = primaryDB.runCommand({
+ find: collName,
+ readConcern: {level: "majority", afterClusterTime: writeRes.operationTime},
+ filter: {_id: 3},
+ $clusterTime: writeRes.$clusterTime,
+ allowSpeculativeMajorityRead: true,
+ maxTimeMS: 5000
+});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+
+jsTestLog(
+ "Do a speculative majority read on secondary with 'afterClusterTime' that should time out.");
+res = secondaryDB.runCommand({
+ find: collName,
+ readConcern: {level: "majority", afterClusterTime: writeRes.operationTime},
+ filter: {_id: 3},
+ $clusterTime: writeRes.$clusterTime,
+ allowSpeculativeMajorityRead: true,
+ maxTimeMS: 5000
+});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+
+// Let the previous write majority commit.
+restartServerReplication(secondary);
+replTest.awaitReplication();
+
+jsTestLog("Do a speculative majority read with 'afterClusterTime' that should succeed.");
+res = primaryDB.runCommand({
+ find: collName,
+ readConcern: {level: "majority", afterClusterTime: writeRes.operationTime},
+ filter: {_id: 3},
+ $clusterTime: res.$clusterTime,
+ allowSpeculativeMajorityRead: true
+});
+assert.commandWorked(res);
+assert.eq(res.cursor.firstBatch.length, 1);
+assert.eq(res.cursor.firstBatch[0], {_id: 3});
+
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/speculative_majority_supported_commands.js b/jstests/replsets/speculative_majority_supported_commands.js
index 26fd40aa244..7c1bbdb2434 100644
--- a/jstests/replsets/speculative_majority_supported_commands.js
+++ b/jstests/replsets/speculative_majority_supported_commands.js
@@ -7,70 +7,70 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
+"use strict";
- let name = "speculative_majority_supported_commands";
- let replTest =
- new ReplSetTest({name: name, nodes: 1, nodeOptions: {enableMajorityReadConcern: 'false'}});
- replTest.startSet();
- replTest.initiate();
+let name = "speculative_majority_supported_commands";
+let replTest =
+ new ReplSetTest({name: name, nodes: 1, nodeOptions: {enableMajorityReadConcern: 'false'}});
+replTest.startSet();
+replTest.initiate();
- let dbName = name;
- let collName = "coll";
+let dbName = name;
+let collName = "coll";
- let primary = replTest.getPrimary();
- let primaryDB = primary.getDB(dbName);
+let primary = replTest.getPrimary();
+let primaryDB = primary.getDB(dbName);
- // Create a collection.
- assert.commandWorked(primaryDB[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
+// Create a collection.
+assert.commandWorked(primaryDB[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
- /**
- * Allowed commands.
- */
+/**
+ * Allowed commands.
+ */
- // Change stream aggregation is allowed.
- let res = primaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$changeStream: {}}],
- cursor: {},
- readConcern: {level: "majority"}
- });
- assert.commandWorked(res);
+// Change stream aggregation is allowed.
+let res = primaryDB.runCommand({
+ aggregate: collName,
+ pipeline: [{$changeStream: {}}],
+ cursor: {},
+ readConcern: {level: "majority"}
+});
+assert.commandWorked(res);
- // Find query with speculative flag is allowed.
- res = primaryDB.runCommand(
- {find: collName, readConcern: {level: "majority"}, allowSpeculativeMajorityRead: true});
- assert.commandWorked(res);
+// Find query with speculative flag is allowed.
+res = primaryDB.runCommand(
+ {find: collName, readConcern: {level: "majority"}, allowSpeculativeMajorityRead: true});
+assert.commandWorked(res);
- /**
- * Disallowed commands.
- */
+/**
+ * Disallowed commands.
+ */
- // A non change stream aggregation is not allowed.
- res = primaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$project: {}}],
- cursor: {},
- readConcern: {level: "majority"}
- });
- assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
+// A non change stream aggregation is not allowed.
+res = primaryDB.runCommand({
+ aggregate: collName,
+ pipeline: [{$project: {}}],
+ cursor: {},
+ readConcern: {level: "majority"}
+});
+assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
- // The 'find' command without requisite flag is unsupported.
- res = primaryDB.runCommand({find: collName, readConcern: {level: "majority"}});
- assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
+// The 'find' command without requisite flag is unsupported.
+res = primaryDB.runCommand({find: collName, readConcern: {level: "majority"}});
+assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
- res = primaryDB.runCommand(
- {find: collName, readConcern: {level: "majority"}, allowSpeculativeMajorityRead: false});
- assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
+res = primaryDB.runCommand(
+ {find: collName, readConcern: {level: "majority"}, allowSpeculativeMajorityRead: false});
+assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
- // Another basic read command. We don't exhaustively check all commands.
- res = primaryDB.runCommand({count: collName, readConcern: {level: "majority"}});
- assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
+// Another basic read command. We don't exhaustively check all commands.
+res = primaryDB.runCommand({count: collName, readConcern: {level: "majority"}});
+assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
- // Speculative flag is only allowed on find commands.
- res = primaryDB.runCommand(
- {count: collName, readConcern: {level: "majority"}, allowSpeculativeMajorityRead: true});
- assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
+// Speculative flag is only allowed on find commands.
+res = primaryDB.runCommand(
+ {count: collName, readConcern: {level: "majority"}, allowSpeculativeMajorityRead: true});
+assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
- replTest.stopSet();
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/speculative_read_transaction.js b/jstests/replsets/speculative_read_transaction.js
index 5ed70ff05e8..a7a8902d02e 100644
--- a/jstests/replsets/speculative_read_transaction.js
+++ b/jstests/replsets/speculative_read_transaction.js
@@ -5,105 +5,105 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
- load("jstests/libs/write_concern_util.js"); // For stopServerReplication
-
- const dbName = "test";
- const collName = "speculative_read_transaction";
-
- const rst = new ReplSetTest({name: collName, nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- const testDB = primary.getDB(dbName);
- const coll = testDB[collName];
-
- function runTest(sessionOptions) {
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
-
- // Do an initial write so we have something to update.
- assert.commandWorked(coll.insert([{_id: 0, x: 0}], {w: "majority"}));
- rst.awaitLastOpCommitted();
-
- // Stop replication on the secondary so the majority commit never moves forward.
- stopServerReplication(secondary);
-
- // Do a local update in another client.
- // The transaction should see this, due to speculative behavior.
- const otherclient = new Mongo(primary.host);
- assert.commandWorked(otherclient.getDB(dbName)[collName].update({_id: 0}, {x: 1}, {w: 1}));
-
- // Initiate a session on the primary.
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb.getCollection(collName);
-
- // Abort does not wait for write concern.
- jsTestLog("Starting majority-abort transaction");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
- assert.commandWorked(session.abortTransaction_forTesting());
-
- // This transaction should complete because it does not use majority write concern.
- jsTestLog("Starting non-majority commit transaction");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
- assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // This transaction should not complete because it uses snapshot read concern, majority
- // write concern and the commit point is not advancing.
- jsTestLog("Starting majority-commit snapshot-read transaction");
- session.startTransaction(
- {readConcern: {level: "snapshot"}, writeConcern: {w: "majority", wtimeout: 5000}});
- assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- // Allow the majority commit point to advance to allow the failed write concern to clear.
- restartServerReplication(secondary);
- rst.awaitReplication();
- stopServerReplication(secondary);
-
- // Do another local update from another client
- assert.commandWorked(otherclient.getDB(dbName)[collName].update({_id: 0}, {x: 2}, {w: 1}));
-
- // This transaction should not complete because it uses local read concern upconverted to
- // snapshot.
- // TODO(SERVER-34881): Once default read concern is speculative majority, local read
- // concern should not wait for the majority commit point to advance.
- jsTestLog("Starting majority-commit local-read transaction");
- session.startTransaction(
- {readConcern: {level: "local"}, writeConcern: {w: "majority", wtimeout: 5000}});
- assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 2});
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- // Allow the majority commit point to advance to allow the failed write concern to clear.
- restartServerReplication(secondary);
- rst.awaitReplication();
- stopServerReplication(secondary);
-
- // Do another local update from another client
- assert.commandWorked(otherclient.getDB(dbName)[collName].update({_id: 0}, {x: 3}, {w: 1}));
-
- // This transaction should not complete because it uses majority read concern, majority
- // write concern, and the commit point is not advancing.
- jsTestLog("Starting majority-commit majority-read transaction");
- session.startTransaction(
- {readConcern: {level: "majority"}, writeConcern: {w: "majority", wtimeout: 5000}});
- assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 3});
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- // Restart server replication to allow majority commit point to advance.
- restartServerReplication(secondary);
-
- session.endSession();
- }
- runTest({causalConsistency: false});
- runTest({causalConsistency: true});
-
- rst.stopSet();
+"use strict";
+load("jstests/libs/write_concern_util.js"); // For stopServerReplication
+
+const dbName = "test";
+const collName = "speculative_read_transaction";
+
+const rst = new ReplSetTest({name: collName, nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+const testDB = primary.getDB(dbName);
+const coll = testDB[collName];
+
+function runTest(sessionOptions) {
+ testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+
+ // Do an initial write so we have something to update.
+ assert.commandWorked(coll.insert([{_id: 0, x: 0}], {w: "majority"}));
+ rst.awaitLastOpCommitted();
+
+ // Stop replication on the secondary so the majority commit never moves forward.
+ stopServerReplication(secondary);
+
+ // Do a local update in another client.
+ // The transaction should see this, due to speculative behavior.
+ const otherclient = new Mongo(primary.host);
+ assert.commandWorked(otherclient.getDB(dbName)[collName].update({_id: 0}, {x: 1}, {w: 1}));
+
+ // Initiate a session on the primary.
+ const session = testDB.getMongo().startSession(sessionOptions);
+ const sessionDb = session.getDatabase(dbName);
+ const sessionColl = sessionDb.getCollection(collName);
+
+ // Abort does not wait for write concern.
+ jsTestLog("Starting majority-abort transaction");
+ session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+ assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
+ assert.commandWorked(session.abortTransaction_forTesting());
+
+ // This transaction should complete because it does not use majority write concern.
+ jsTestLog("Starting non-majority commit transaction");
+ session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
+ assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // This transaction should not complete because it uses snapshot read concern, majority
+ // write concern and the commit point is not advancing.
+ jsTestLog("Starting majority-commit snapshot-read transaction");
+ session.startTransaction(
+ {readConcern: {level: "snapshot"}, writeConcern: {w: "majority", wtimeout: 5000}});
+ assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ // Allow the majority commit point to advance to allow the failed write concern to clear.
+ restartServerReplication(secondary);
+ rst.awaitReplication();
+ stopServerReplication(secondary);
+
+ // Do another local update from another client
+ assert.commandWorked(otherclient.getDB(dbName)[collName].update({_id: 0}, {x: 2}, {w: 1}));
+
+ // This transaction should not complete because it uses local read concern upconverted to
+ // snapshot.
+ // TODO(SERVER-34881): Once default read concern is speculative majority, local read
+ // concern should not wait for the majority commit point to advance.
+ jsTestLog("Starting majority-commit local-read transaction");
+ session.startTransaction(
+ {readConcern: {level: "local"}, writeConcern: {w: "majority", wtimeout: 5000}});
+ assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 2});
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ // Allow the majority commit point to advance to allow the failed write concern to clear.
+ restartServerReplication(secondary);
+ rst.awaitReplication();
+ stopServerReplication(secondary);
+
+ // Do another local update from another client
+ assert.commandWorked(otherclient.getDB(dbName)[collName].update({_id: 0}, {x: 3}, {w: 1}));
+
+ // This transaction should not complete because it uses majority read concern, majority
+ // write concern, and the commit point is not advancing.
+ jsTestLog("Starting majority-commit majority-read transaction");
+ session.startTransaction(
+ {readConcern: {level: "majority"}, writeConcern: {w: "majority", wtimeout: 5000}});
+ assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 3});
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ // Restart server replication to allow majority commit point to advance.
+ restartServerReplication(secondary);
+
+ session.endSession();
+}
+runTest({causalConsistency: false});
+runTest({causalConsistency: true});
+
+rst.stopSet();
}());
diff --git a/jstests/replsets/speculative_transaction.js b/jstests/replsets/speculative_transaction.js
index 565b41c8300..e138612dcd1 100644
--- a/jstests/replsets/speculative_transaction.js
+++ b/jstests/replsets/speculative_transaction.js
@@ -6,120 +6,120 @@
* @tags: [uses_transactions, requires_majority_read_concern]
*/
(function() {
- "use strict";
- load("jstests/libs/write_concern_util.js"); // For stopServerReplication
+"use strict";
+load("jstests/libs/write_concern_util.js"); // For stopServerReplication
- const dbName = "test";
- const collName = "speculative_transaction";
+const dbName = "test";
+const collName = "speculative_transaction";
- const rst = new ReplSetTest({name: collName, nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({name: collName, nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- var testDB = primary.getDB(dbName);
- const coll = testDB[collName];
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+var testDB = primary.getDB(dbName);
+const coll = testDB[collName];
- function runTest(sessionOptions) {
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+function runTest(sessionOptions) {
+ testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- // Do an initial write so we have something to update.
- assert.commandWorked(coll.insert([{_id: 0}, {_id: 1}], {w: "majority"}));
- rst.awaitLastOpCommitted();
+ // Do an initial write so we have something to update.
+ assert.commandWorked(coll.insert([{_id: 0}, {_id: 1}], {w: "majority"}));
+ rst.awaitLastOpCommitted();
- // Stop replication on the secondary so the majority commit never moves forward.
- stopServerReplication(secondary);
+ // Stop replication on the secondary so the majority commit never moves forward.
+ stopServerReplication(secondary);
- // Initiate a session on the primary.
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb.getCollection(collName);
+ // Initiate a session on the primary.
+ const session = testDB.getMongo().startSession(sessionOptions);
+ const sessionDb = session.getDatabase(dbName);
+ const sessionColl = sessionDb.getCollection(collName);
- // Start the first transaction. Do not use majority commit for this one.
- jsTestLog("Starting first transaction");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
+ // Start the first transaction. Do not use majority commit for this one.
+ jsTestLog("Starting first transaction");
+ session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
- assert.commandWorked(sessionColl.update({_id: 0}, {$set: {x: 1}}));
+ assert.commandWorked(sessionColl.update({_id: 0}, {$set: {x: 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+ assert.commandWorked(session.commitTransaction_forTesting());
- // The document should be updated on the local snapshot.
- assert.eq(coll.findOne({_id: 0}), {_id: 0, x: 1});
+ // The document should be updated on the local snapshot.
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, x: 1});
- // The document should not be updated in the majority snapshot.
- assert.eq(coll.find({_id: 0}).readConcern("majority").next(), {_id: 0});
+ // The document should not be updated in the majority snapshot.
+ assert.eq(coll.find({_id: 0}).readConcern("majority").next(), {_id: 0});
- jsTestLog("Starting second transaction");
- // Start a second transaction. Still do not use majority commit for this one.
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
+ jsTestLog("Starting second transaction");
+ // Start a second transaction. Still do not use majority commit for this one.
+ session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
- // We should see the updated doc within the transaction as a result of speculative read
- // concern.
- assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
+ // We should see the updated doc within the transaction as a result of speculative read
+ // concern.
+ assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
- // Update it again.
- assert.commandWorked(sessionColl.update({_id: 0}, {$inc: {x: 1}}));
+ // Update it again.
+ assert.commandWorked(sessionColl.update({_id: 0}, {$inc: {x: 1}}));
- // Update a different document outside the transaction.
- assert.commandWorked(coll.update({_id: 1}, {$set: {y: 1}}));
+ // Update a different document outside the transaction.
+ assert.commandWorked(coll.update({_id: 1}, {$set: {y: 1}}));
- // Within the transaction, we should not see the out-of-transaction update.
- assert.eq(sessionColl.findOne({_id: 1}), {_id: 1});
+ // Within the transaction, we should not see the out-of-transaction update.
+ assert.eq(sessionColl.findOne({_id: 1}), {_id: 1});
- assert.commandWorked(session.commitTransaction_forTesting());
+ assert.commandWorked(session.commitTransaction_forTesting());
- // The document should be updated on the local snapshot.
- assert.eq(coll.findOne({_id: 0}), {_id: 0, x: 2});
+ // The document should be updated on the local snapshot.
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, x: 2});
- // The document should not be updated in the majority snapshot.
- assert.eq(coll.find({_id: 0}).readConcern("majority").next(), {_id: 0});
+ // The document should not be updated in the majority snapshot.
+ assert.eq(coll.find({_id: 0}).readConcern("majority").next(), {_id: 0});
- // Make sure write conflicts are caught with speculative transactions.
- jsTestLog("Starting a conflicting transaction which will be auto-aborted");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
+ // Make sure write conflicts are caught with speculative transactions.
+ jsTestLog("Starting a conflicting transaction which will be auto-aborted");
+ session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
- // Read some data inside the transaction.
- assert.eq(sessionColl.findOne({_id: 1}), {_id: 1, y: 1});
+ // Read some data inside the transaction.
+ assert.eq(sessionColl.findOne({_id: 1}), {_id: 1, y: 1});
- // Write it outside the transaction.
- assert.commandWorked(coll.update({_id: 1}, {$inc: {x: 1}}));
+ // Write it outside the transaction.
+ assert.commandWorked(coll.update({_id: 1}, {$inc: {x: 1}}));
- // Can still read old data in transaction.
- assert.eq(sessionColl.findOne({_id: 1}), {_id: 1, y: 1});
+ // Can still read old data in transaction.
+ assert.eq(sessionColl.findOne({_id: 1}), {_id: 1, y: 1});
- // But update fails
- assert.commandFailedWithCode(sessionColl.update({_id: 1}, {$inc: {x: 1}}),
- ErrorCodes.WriteConflict);
+ // But update fails
+ assert.commandFailedWithCode(sessionColl.update({_id: 1}, {$inc: {x: 1}}),
+ ErrorCodes.WriteConflict);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
- // Restart server replication to allow majority commit point to advance.
- restartServerReplication(secondary);
+ // Restart server replication to allow majority commit point to advance.
+ restartServerReplication(secondary);
- jsTestLog("Starting final transaction (with majority commit)");
- // Start a third transaction, with majority commit.
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+ jsTestLog("Starting final transaction (with majority commit)");
+ // Start a third transaction, with majority commit.
+ session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- // We should see the updated doc within the transaction.
- assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 2});
+ // We should see the updated doc within the transaction.
+ assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 2});
- // Update it one more time.
- assert.commandWorked(sessionColl.update({_id: 0}, {$inc: {x: 1}}));
+ // Update it one more time.
+ assert.commandWorked(sessionColl.update({_id: 0}, {$inc: {x: 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+ assert.commandWorked(session.commitTransaction_forTesting());
- // The document should be updated on the local snapshot.
- assert.eq(coll.findOne({_id: 0}), {_id: 0, x: 3});
+ // The document should be updated on the local snapshot.
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, x: 3});
- // The document should also be updated in the majority snapshot.
- assert.eq(coll.find({_id: 0}).readConcern("majority").next(), {_id: 0, x: 3});
+ // The document should also be updated in the majority snapshot.
+ assert.eq(coll.find({_id: 0}).readConcern("majority").next(), {_id: 0, x: 3});
- session.endSession();
- }
+ session.endSession();
+}
- runTest({causalConsistency: false});
- runTest({causalConsistency: true});
- rst.stopSet();
+runTest({causalConsistency: false});
+runTest({causalConsistency: true});
+rst.stopSet();
}());
diff --git a/jstests/replsets/standalone_replication_recovery_prepare_only.js b/jstests/replsets/standalone_replication_recovery_prepare_only.js
index 4c3910244f2..3c61ff64c8e 100644
--- a/jstests/replsets/standalone_replication_recovery_prepare_only.js
+++ b/jstests/replsets/standalone_replication_recovery_prepare_only.js
@@ -7,10 +7,10 @@
* requires_majority_read_concern, uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/prepare_standalone_replication_recovery.js");
+"use strict";
+load("jstests/replsets/libs/prepare_standalone_replication_recovery.js");
- const testName = "standalone_replication_recovery_prepare_only";
+const testName = "standalone_replication_recovery_prepare_only";
- testPrepareRecoverFromOplogAsStandalone(testName, /* commitBeforeRecovery */ false);
+testPrepareRecoverFromOplogAsStandalone(testName, /* commitBeforeRecovery */ false);
})();
diff --git a/jstests/replsets/standalone_replication_recovery_prepare_with_commit.js b/jstests/replsets/standalone_replication_recovery_prepare_with_commit.js
index a09bbb70ecf..e3843d99056 100644
--- a/jstests/replsets/standalone_replication_recovery_prepare_with_commit.js
+++ b/jstests/replsets/standalone_replication_recovery_prepare_with_commit.js
@@ -7,10 +7,10 @@
* requires_majority_read_concern, uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/prepare_standalone_replication_recovery.js");
+"use strict";
+load("jstests/replsets/libs/prepare_standalone_replication_recovery.js");
- const testName = "standalone_replication_recovery_prepare_with_commit";
+const testName = "standalone_replication_recovery_prepare_with_commit";
- testPrepareRecoverFromOplogAsStandalone(testName, /* commitBeforeRecovery */ true);
+testPrepareRecoverFromOplogAsStandalone(testName, /* commitBeforeRecovery */ true);
})();
diff --git a/jstests/replsets/startParallelShell.js b/jstests/replsets/startParallelShell.js
index 0027c5600e3..cb1838c392f 100644
--- a/jstests/replsets/startParallelShell.js
+++ b/jstests/replsets/startParallelShell.js
@@ -3,34 +3,34 @@
var db;
(function() {
- 'use strict';
+'use strict';
- const setName = 'rs0';
- const replSet = new ReplSetTest({name: setName, nodes: 3});
- const nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate();
+const setName = 'rs0';
+const replSet = new ReplSetTest({name: setName, nodes: 3});
+const nodes = replSet.nodeList();
+replSet.startSet();
+replSet.initiate();
- const url = replSet.getURL();
- print("* Connecting to " + url);
- const mongo = new Mongo(url);
- db = mongo.getDB('admin');
- assert.eq(url, mongo.host, "replSet.getURL() should match active connection string");
+const url = replSet.getURL();
+print("* Connecting to " + url);
+const mongo = new Mongo(url);
+db = mongo.getDB('admin');
+assert.eq(url, mongo.host, "replSet.getURL() should match active connection string");
- print("* Starting parallel shell on --host " + db.getMongo().host);
- var awaitShell = startParallelShell('db.coll0.insert({test: "connString only"});');
- assert.soon(function() {
- return db.coll0.find({test: "connString only"}).count() === 1;
- });
- awaitShell();
+print("* Starting parallel shell on --host " + db.getMongo().host);
+var awaitShell = startParallelShell('db.coll0.insert({test: "connString only"});');
+assert.soon(function() {
+ return db.coll0.find({test: "connString only"}).count() === 1;
+});
+awaitShell();
- const uri = new MongoURI(url);
- const port0 = uri.servers[0].port;
- print("* Starting parallel shell w/ --port " + port0);
- awaitShell = startParallelShell('db.coll0.insert({test: "explicit port"});', port0);
- assert.soon(function() {
- return db.coll0.find({test: "explicit port"}).count() === 1;
- });
- awaitShell();
- replSet.stopSet();
+const uri = new MongoURI(url);
+const port0 = uri.servers[0].port;
+print("* Starting parallel shell w/ --port " + port0);
+awaitShell = startParallelShell('db.coll0.insert({test: "explicit port"});', port0);
+assert.soon(function() {
+ return db.coll0.find({test: "explicit port"}).count() === 1;
+});
+awaitShell();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/startup_recovery_commit_transaction_before_stable_timestamp.js b/jstests/replsets/startup_recovery_commit_transaction_before_stable_timestamp.js
index 340ff978b78..66d3111df82 100644
--- a/jstests/replsets/startup_recovery_commit_transaction_before_stable_timestamp.js
+++ b/jstests/replsets/startup_recovery_commit_transaction_before_stable_timestamp.js
@@ -10,84 +10,84 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
-
- let primary = replTest.getPrimary();
-
- const dbName = "test";
- const collName = "commit_transaction_recovery";
- let testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- // Construct a large array such that two arrays in the same document are not greater than the
- // 16MB limit, but that three such arrays in the same document are greater than 16MB. This will
- // be helpful in recreating an idempotency issue that exists when applying the operations from
- // a transaction after the data already reflects the transaction.
- const largeArray = new Array(7 * 1024 * 1024).join('x');
- assert.commandWorked(testColl.insert([{_id: 1, "a": largeArray}]));
-
- // Start a transaction in a session that will be prepared and committed before node restart.
- let session = primary.startSession();
- let sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"b": largeArray}}));
- assert.commandWorked(sessionColl.update({_id: 1}, {$unset: {"b": 1}}));
- assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"c": largeArray}}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- const recoveryTimestamp =
- assert.commandWorked(testColl.runCommand("insert", {documents: [{_id: 2}]})).operationTime;
-
- jsTestLog("Holding back the stable timestamp to right after the prepareTimestamp");
-
- // Hold back the stable timestamp to be right after the prepareTimestamp, but before the
- // commitTransaction oplog entry so that the transaction will be replayed during startup
- // recovery.
- assert.commandWorked(testDB.adminCommand({
- "configureFailPoint": 'holdStableTimestampAtSpecificTimestamp',
- "mode": 'alwaysOn',
- "data": {"timestamp": recoveryTimestamp}
- }));
-
- jsTestLog("Committing the transaction");
-
- // Since this transaction is committed after the last snapshot, this commit oplog entry will be
- // replayed during startup replication recovery.
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- jsTestLog("Restarting node");
-
- // Perform a clean shutdown and restart. And, the data restored at the storage recovery
- // timestamp should not reflect the transaction. If not, replaying the commit oplog entry during
- // startup recovery would throw BSONTooLarge exception.
- replTest.stop(primary, undefined, {skipValidation: true});
- // Since the oldest timestamp is same as the stable timestamp during node's restart, this test
- // will commit a transaction older than oldest timestamp during startup recovery.
- replTest.start(primary, {}, true);
-
- jsTestLog("Node was restarted");
- primary = replTest.getPrimary();
-
- // Make sure that the data reflects all the operations from the transaction after recovery.
- testDB = primary.getDB(dbName);
- const res = testDB[collName].findOne({_id: 1});
- assert.eq(res, {_id: 1, "a": largeArray, "c": largeArray});
-
- // Make sure that another write on the same document from the transaction has no write conflict.
- // Also, make sure that we can run another transaction after recovery without any problems.
- session = primary.startSession();
- sessionDB = session.getDatabase(dbName);
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 1}));
- prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 1});
-
- replTest.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
+
+let primary = replTest.getPrimary();
+
+const dbName = "test";
+const collName = "commit_transaction_recovery";
+let testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+// Construct a large array such that two arrays in the same document are not greater than the
+// 16MB limit, but that three such arrays in the same document are greater than 16MB. This will
+// be helpful in recreating an idempotency issue that exists when applying the operations from
+// a transaction after the data already reflects the transaction.
+const largeArray = new Array(7 * 1024 * 1024).join('x');
+assert.commandWorked(testColl.insert([{_id: 1, "a": largeArray}]));
+
+// Start a transaction in a session that will be prepared and committed before node restart.
+let session = primary.startSession();
+let sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"b": largeArray}}));
+assert.commandWorked(sessionColl.update({_id: 1}, {$unset: {"b": 1}}));
+assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"c": largeArray}}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+const recoveryTimestamp =
+ assert.commandWorked(testColl.runCommand("insert", {documents: [{_id: 2}]})).operationTime;
+
+jsTestLog("Holding back the stable timestamp to right after the prepareTimestamp");
+
+// Hold back the stable timestamp to be right after the prepareTimestamp, but before the
+// commitTransaction oplog entry so that the transaction will be replayed during startup
+// recovery.
+assert.commandWorked(testDB.adminCommand({
+ "configureFailPoint": 'holdStableTimestampAtSpecificTimestamp',
+ "mode": 'alwaysOn',
+ "data": {"timestamp": recoveryTimestamp}
+}));
+
+jsTestLog("Committing the transaction");
+
+// Since this transaction is committed after the last snapshot, this commit oplog entry will be
+// replayed during startup replication recovery.
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+jsTestLog("Restarting node");
+
+// Perform a clean shutdown and restart. And, the data restored at the storage recovery
+// timestamp should not reflect the transaction. If not, replaying the commit oplog entry during
+// startup recovery would throw BSONTooLarge exception.
+replTest.stop(primary, undefined, {skipValidation: true});
+// Since the oldest timestamp is same as the stable timestamp during node's restart, this test
+// will commit a transaction older than oldest timestamp during startup recovery.
+replTest.start(primary, {}, true);
+
+jsTestLog("Node was restarted");
+primary = replTest.getPrimary();
+
+// Make sure that the data reflects all the operations from the transaction after recovery.
+testDB = primary.getDB(dbName);
+const res = testDB[collName].findOne({_id: 1});
+assert.eq(res, {_id: 1, "a": largeArray, "c": largeArray});
+
+// Make sure that another write on the same document from the transaction has no write conflict.
+// Also, make sure that we can run another transaction after recovery without any problems.
+session = primary.startSession();
+sessionDB = session.getDatabase(dbName);
+session.startTransaction();
+assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 1}));
+prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 1});
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/startup_recovery_reconstructs_txn_prepared_before_stable_ts.js b/jstests/replsets/startup_recovery_reconstructs_txn_prepared_before_stable_ts.js
index 1a8c46d5d3d..27237cbe18f 100644
--- a/jstests/replsets/startup_recovery_reconstructs_txn_prepared_before_stable_ts.js
+++ b/jstests/replsets/startup_recovery_reconstructs_txn_prepared_before_stable_ts.js
@@ -6,110 +6,110 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/aggregation/extras/utils.js");
-
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
-
- let primary = replTest.getPrimary();
-
- const dbName = "test";
- const collName = "startup_recovery_reconstructs_txn_prepared_before_stable";
- const testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
-
- assert.commandWorked(testColl.insert({_id: 0}));
-
- // Start a session on the primary.
- let session = primary.startSession();
- const sessionID = session.getSessionId();
- let sessionDB = session.getDatabase(dbName);
- let sessionColl = sessionDB.getCollection(collName);
-
- // Prepare the transaction on the session.
- session.startTransaction();
- // We are creating a record size of 14MB for _id '0', just to make sure when this
- // test runs with lesser wiredTiger cache size, there would be a higher possibility
- // of this record being considered for eviction from in-memory tree. And, to confirm
- // that we don't see problems like in SERVER-40422.
- const largeArray = new Array(14 * 1024 * 1024).join('x');
- assert.commandWorked(sessionColl.update({_id: 0}, {$set: {a: largeArray}}));
- assert.commandWorked(sessionColl.insert({_id: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- // Fastcount reflects the insert of a prepared transaction.
- assert.eq(testColl.count(), 2);
-
- jsTestLog("Do a majority write to advance the stable timestamp past the prepareTimestamp");
- // Doing a majority write after preparing the transaction ensures that the stable timestamp is
- // past the prepare timestamp because this write must be in the committed snapshot.
- assert.commandWorked(
- testColl.runCommand("insert", {documents: [{_id: 2}]}, {writeConcern: {w: "majority"}}));
-
- // Fastcount reflects the insert of a prepared transaction.
- assert.eq(testColl.count(), 3);
-
- // Check that we have one transaction in the transactions table.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
-
- jsTestLog("Restarting node");
- // Perform a clean shutdown and restart. And, the data restored at the storage recovery
- // timestamp should not reflect the prepared transaction.
- replTest.stop(primary, undefined, {skipValidation: true});
- // Since the oldest timestamp is same as the stable timestamp during node's restart, this test
- // will reconstruct a prepared transaction older than oldest timestamp during startup recovery.
- replTest.start(primary, {}, true);
-
- jsTestLog("Node was restarted");
- primary = replTest.getPrimary();
- testColl = primary.getDB(dbName)[collName];
-
- // Make sure we cannot see the writes from the prepared transaction yet.
- arrayEq(testColl.find().toArray(), [{_id: 0}, {_id: 2}]);
- assert.eq(testColl.count(), 3);
-
- // Make sure there is still one transaction in the transactions table. This is because the
- // entry in the transactions table is made durable when a transaction is prepared.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
-
- // Make sure we can successfully commit the recovered prepared transaction.
- session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
- sessionDB = session.getDatabase(dbName);
- // The transaction on this session should have a txnNumber of 0. We explicitly set this
- // since createSessionWithGivenId does not restore the current txnNumber in the shell.
- session.setTxnNumber_forTesting(0);
- const txnNumber = session.getTxnNumber_forTesting();
-
- // Make sure we cannot add any operations to a prepared transaction.
- assert.commandFailedWithCode(sessionDB.runCommand({
- insert: collName,
- txnNumber: NumberLong(txnNumber),
- documents: [{_id: 10}],
- autocommit: false,
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- // Make sure that writing to a document that was updated in the prepared transaction causes
- // a write conflict.
- assert.commandFailedWithCode(
- sessionDB.runCommand(
- {update: collName, updates: [{q: {_id: 0}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog("Committing the prepared transaction");
- assert.commandWorked(sessionDB.adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTimestamp,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- }));
-
- // Make sure we can see the effects of the prepared transaction.
- arrayEq(testColl.find().toArray(), [{_id: 0, a: largeArray}, {_id: 1}, {_id: 2}]);
- assert.eq(testColl.count(), 3);
-
- replTest.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/aggregation/extras/utils.js");
+
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
+
+let primary = replTest.getPrimary();
+
+const dbName = "test";
+const collName = "startup_recovery_reconstructs_txn_prepared_before_stable";
+const testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
+
+assert.commandWorked(testColl.insert({_id: 0}));
+
+// Start a session on the primary.
+let session = primary.startSession();
+const sessionID = session.getSessionId();
+let sessionDB = session.getDatabase(dbName);
+let sessionColl = sessionDB.getCollection(collName);
+
+// Prepare the transaction on the session.
+session.startTransaction();
+// We are creating a record size of 14MB for _id '0', just to make sure when this
+// test runs with lesser wiredTiger cache size, there would be a higher possibility
+// of this record being considered for eviction from in-memory tree. And, to confirm
+// that we don't see problems like in SERVER-40422.
+const largeArray = new Array(14 * 1024 * 1024).join('x');
+assert.commandWorked(sessionColl.update({_id: 0}, {$set: {a: largeArray}}));
+assert.commandWorked(sessionColl.insert({_id: 1}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+// Fastcount reflects the insert of a prepared transaction.
+assert.eq(testColl.count(), 2);
+
+jsTestLog("Do a majority write to advance the stable timestamp past the prepareTimestamp");
+// Doing a majority write after preparing the transaction ensures that the stable timestamp is
+// past the prepare timestamp because this write must be in the committed snapshot.
+assert.commandWorked(
+ testColl.runCommand("insert", {documents: [{_id: 2}]}, {writeConcern: {w: "majority"}}));
+
+// Fastcount reflects the insert of a prepared transaction.
+assert.eq(testColl.count(), 3);
+
+// Check that we have one transaction in the transactions table.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
+
+jsTestLog("Restarting node");
+// Perform a clean shutdown and restart. And, the data restored at the storage recovery
+// timestamp should not reflect the prepared transaction.
+replTest.stop(primary, undefined, {skipValidation: true});
+// Since the oldest timestamp is same as the stable timestamp during node's restart, this test
+// will reconstruct a prepared transaction older than oldest timestamp during startup recovery.
+replTest.start(primary, {}, true);
+
+jsTestLog("Node was restarted");
+primary = replTest.getPrimary();
+testColl = primary.getDB(dbName)[collName];
+
+// Make sure we cannot see the writes from the prepared transaction yet.
+arrayEq(testColl.find().toArray(), [{_id: 0}, {_id: 2}]);
+assert.eq(testColl.count(), 3);
+
+// Make sure there is still one transaction in the transactions table. This is because the
+// entry in the transactions table is made durable when a transaction is prepared.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
+
+// Make sure we can successfully commit the recovered prepared transaction.
+session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
+sessionDB = session.getDatabase(dbName);
+// The transaction on this session should have a txnNumber of 0. We explicitly set this
+// since createSessionWithGivenId does not restore the current txnNumber in the shell.
+session.setTxnNumber_forTesting(0);
+const txnNumber = session.getTxnNumber_forTesting();
+
+// Make sure we cannot add any operations to a prepared transaction.
+assert.commandFailedWithCode(sessionDB.runCommand({
+ insert: collName,
+ txnNumber: NumberLong(txnNumber),
+ documents: [{_id: 10}],
+ autocommit: false,
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+// Make sure that writing to a document that was updated in the prepared transaction causes
+// a write conflict.
+assert.commandFailedWithCode(
+ sessionDB.runCommand(
+ {update: collName, updates: [{q: {_id: 0}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+jsTestLog("Committing the prepared transaction");
+assert.commandWorked(sessionDB.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTimestamp,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+}));
+
+// Make sure we can see the effects of the prepared transaction.
+arrayEq(testColl.find().toArray(), [{_id: 0, a: largeArray}, {_id: 1}, {_id: 2}]);
+assert.eq(testColl.count(), 3);
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/startup_without_fcv_document_succeeds_if_initial_sync_flag_set.js b/jstests/replsets/startup_without_fcv_document_succeeds_if_initial_sync_flag_set.js
index 0dca4e5efd6..3b83bda0888 100644
--- a/jstests/replsets/startup_without_fcv_document_succeeds_if_initial_sync_flag_set.js
+++ b/jstests/replsets/startup_without_fcv_document_succeeds_if_initial_sync_flag_set.js
@@ -4,40 +4,40 @@
*/
(function() {
- load("jstests/libs/check_log.js");
- load("jstests/libs/feature_compatibility_version.js");
-
- rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- jsTestLog("Adding a second node to the replica set.");
-
- const adminDbName = "admin";
- const versionCollName = "system.version";
- const nss = adminDbName + "." + versionCollName;
-
- // Hang initial sync before cloning the FCV document.
- let secondary = rst.add({rsConfig: {priority: 0}});
- assert.commandWorked(secondary.getDB('admin').runCommand({
- configureFailPoint: 'initialSyncHangBeforeCollectionClone',
- mode: 'alwaysOn',
- data: {namespace: nss}
- }));
- rst.reInitiate();
- checkLog.contains(secondary, "initialSyncHangBeforeCollectionClone fail point enabled.");
-
- jsTestLog("Restarting secondary in the early stages of initial sync.");
- rst.restart(secondary);
-
- rst.awaitSecondaryNodes();
-
- // Get the new secondary connection.
- secondary = rst.getSecondary();
- secondary.setSlaveOk(true);
-
- const secondaryAdminDb = secondary.getDB("admin");
- // Assert that the FCV document was cloned through initial sync on the secondary.
- checkFCV(secondaryAdminDb, latestFCV);
- rst.stopSet();
+load("jstests/libs/check_log.js");
+load("jstests/libs/feature_compatibility_version.js");
+
+rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+jsTestLog("Adding a second node to the replica set.");
+
+const adminDbName = "admin";
+const versionCollName = "system.version";
+const nss = adminDbName + "." + versionCollName;
+
+// Hang initial sync before cloning the FCV document.
+let secondary = rst.add({rsConfig: {priority: 0}});
+assert.commandWorked(secondary.getDB('admin').runCommand({
+ configureFailPoint: 'initialSyncHangBeforeCollectionClone',
+ mode: 'alwaysOn',
+ data: {namespace: nss}
+}));
+rst.reInitiate();
+checkLog.contains(secondary, "initialSyncHangBeforeCollectionClone fail point enabled.");
+
+jsTestLog("Restarting secondary in the early stages of initial sync.");
+rst.restart(secondary);
+
+rst.awaitSecondaryNodes();
+
+// Get the new secondary connection.
+secondary = rst.getSecondary();
+secondary.setSlaveOk(true);
+
+const secondaryAdminDb = secondary.getDB("admin");
+// Assert that the FCV document was cloned through initial sync on the secondary.
+checkFCV(secondaryAdminDb, latestFCV);
+rst.stopSet();
}());
diff --git a/jstests/replsets/step_down_during_draining.js b/jstests/replsets/step_down_during_draining.js
index bedf5facb8e..47c8ee2651a 100644
--- a/jstests/replsets/step_down_during_draining.js
+++ b/jstests/replsets/step_down_during_draining.js
@@ -12,122 +12,121 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
- var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- var conf = replSet.getReplSetConfig();
- conf.members[2].priority = 0;
- conf.settings = conf.settings || {};
- conf.settings.chainingAllowed = false;
- conf.settings.catchUpTimeoutMillis = 0;
- replSet.initiate(conf);
-
- var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
-
- // Set verbosity for replication on all nodes.
- var verbosity = {
- "setParameter": 1,
- "logComponentVerbosity": {
- "replication": {"verbosity": 3},
- }
- };
- replSet.nodes.forEach(function(node) {
- node.adminCommand(verbosity);
- });
-
- function enableFailPoint(node) {
- jsTest.log("enable failpoint " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+"use strict";
+var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+var conf = replSet.getReplSetConfig();
+conf.members[2].priority = 0;
+conf.settings = conf.settings || {};
+conf.settings.chainingAllowed = false;
+conf.settings.catchUpTimeoutMillis = 0;
+replSet.initiate(conf);
+
+var primary = replSet.getPrimary();
+var secondary = replSet.getSecondary();
+
+// Set verbosity for replication on all nodes.
+var verbosity = {
+ "setParameter": 1,
+ "logComponentVerbosity": {
+ "replication": {"verbosity": 3},
}
+};
+replSet.nodes.forEach(function(node) {
+ node.adminCommand(verbosity);
+});
- function disableFailPoint(node) {
- jsTest.log("disable failpoint " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
- }
-
- // Since this test blocks a node in drain mode, we cannot use the ReplSetTest stepUp helper
- // that waits for a node to leave drain mode.
- function stepUpNode(node) {
- assert.soonNoExcept(function() {
- assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
- // We do not specify a specific primary so that if a different primary gets elected
- // due to unfortunate timing we can try again.
- replSet.awaitNodesAgreeOnPrimary();
- return node.adminCommand('replSetGetStatus').myState === ReplSetTest.State.PRIMARY;
- }, 'failed to step up node ' + node.host, replSet.kDefaultTimeoutMS);
- }
-
- // Do an initial insert to prevent the secondary from going into recovery
- var numDocuments = 20;
- var coll = primary.getDB("foo").foo;
- assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}}));
- replSet.awaitReplication();
-
- // Enable fail point to stop replication.
- var secondaries = replSet.getSecondaries();
- secondaries.forEach(enableFailPoint);
-
- var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
- for (var i = 1; i < numDocuments; ++i) {
- assert.writeOK(coll.insert({x: i}));
- }
- jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
- assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
-
- assert.soon(
- function() {
- var serverStatus = secondary.getDB('foo').serverStatus();
- var bufferCount = serverStatus.metrics.repl.buffer.count;
- var bufferCountChange = bufferCount - bufferCountBefore;
- jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
- bufferCountChange);
- return bufferCountChange == numDocuments - 1;
- },
- 'secondary did not buffer operations for new inserts on primary',
- replSet.kDefaultTimeoutMs,
- 1000);
-
- reconnect(secondary);
- stepUpNode(secondary);
-
- // Secondary doesn't allow writes yet.
- var res = secondary.getDB("admin").runCommand({"isMaster": 1});
- assert(!res.ismaster);
-
- assert.commandFailedWithCode(
- secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 5000,
- }),
- ErrorCodes.ExceededTimeLimit,
- 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
-
- // Original primary steps up.
- reconnect(primary);
- stepUpNode(primary);
-
- reconnect(secondary);
- stepUpNode(secondary);
-
- // Disable fail point to allow replication.
- secondaries.forEach(disableFailPoint);
-
+function enableFailPoint(node) {
+ jsTest.log("enable failpoint " + node.host);
assert.commandWorked(
- secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: replSet.kDefaultTimeoutMS,
- }),
- 'replSetTest waitForDrainFinish should work when draining is allowed to complete');
-
- // Ensure new primary is writable.
- jsTestLog('New primary should be writable after draining is complete');
- assert.writeOK(secondary.getDB("foo").flag.insert({sentinel: 1}));
- // Check that all writes reached the secondary's op queue prior to
- // stepping down the original primary and got applied.
- assert.eq(secondary.getDB("foo").foo.find().itcount(), numDocuments);
- replSet.stopSet();
+ node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+}
+
+function disableFailPoint(node) {
+ jsTest.log("disable failpoint " + node.host);
+ assert.commandWorked(node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+}
+
+// Since this test blocks a node in drain mode, we cannot use the ReplSetTest stepUp helper
+// that waits for a node to leave drain mode.
+function stepUpNode(node) {
+ assert.soonNoExcept(function() {
+ assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
+ // We do not specify a specific primary so that if a different primary gets elected
+ // due to unfortunate timing we can try again.
+ replSet.awaitNodesAgreeOnPrimary();
+ return node.adminCommand('replSetGetStatus').myState === ReplSetTest.State.PRIMARY;
+ }, 'failed to step up node ' + node.host, replSet.kDefaultTimeoutMS);
+}
+
+// Do an initial insert to prevent the secondary from going into recovery
+var numDocuments = 20;
+var coll = primary.getDB("foo").foo;
+assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}}));
+replSet.awaitReplication();
+
+// Enable fail point to stop replication.
+var secondaries = replSet.getSecondaries();
+secondaries.forEach(enableFailPoint);
+
+var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
+for (var i = 1; i < numDocuments; ++i) {
+ assert.writeOK(coll.insert({x: i}));
+}
+jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
+assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
+
+assert.soon(
+ function() {
+ var serverStatus = secondary.getDB('foo').serverStatus();
+ var bufferCount = serverStatus.metrics.repl.buffer.count;
+ var bufferCountChange = bufferCount - bufferCountBefore;
+ jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
+ bufferCountChange);
+ return bufferCountChange == numDocuments - 1;
+ },
+ 'secondary did not buffer operations for new inserts on primary',
+ replSet.kDefaultTimeoutMs,
+ 1000);
+
+reconnect(secondary);
+stepUpNode(secondary);
+
+// Secondary doesn't allow writes yet.
+var res = secondary.getDB("admin").runCommand({"isMaster": 1});
+assert(!res.ismaster);
+
+assert.commandFailedWithCode(
+ secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 5000,
+ }),
+ ErrorCodes.ExceededTimeLimit,
+ 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
+
+// Original primary steps up.
+reconnect(primary);
+stepUpNode(primary);
+
+reconnect(secondary);
+stepUpNode(secondary);
+
+// Disable fail point to allow replication.
+secondaries.forEach(disableFailPoint);
+
+assert.commandWorked(
+ secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: replSet.kDefaultTimeoutMS,
+ }),
+ 'replSetTest waitForDrainFinish should work when draining is allowed to complete');
+
+// Ensure new primary is writable.
+jsTestLog('New primary should be writable after draining is complete');
+assert.writeOK(secondary.getDB("foo").flag.insert({sentinel: 1}));
+// Check that all writes reached the secondary's op queue prior to
+// stepping down the original primary and got applied.
+assert.eq(secondary.getDB("foo").foo.find().itcount(), numDocuments);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/step_down_during_draining2.js b/jstests/replsets/step_down_during_draining2.js
index 1687d39d7c4..1e97f93865a 100644
--- a/jstests/replsets/step_down_during_draining2.js
+++ b/jstests/replsets/step_down_during_draining2.js
@@ -11,163 +11,162 @@
// 7. Allow Node 1 to finish stepping down.
(function() {
- "use strict";
-
- load("jstests/replsets/rslib.js");
- load("jstests/libs/check_log.js");
-
- var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- var conf = replSet.getReplSetConfig();
- conf.members[2].priority = 0;
- conf.settings = conf.settings || {};
- conf.settings.chainingAllowed = false;
- conf.settings.catchUpTimeoutMillis = 0;
- replSet.initiate(conf);
-
- var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
-
- // Set verbosity for replication on all nodes.
- var verbosity = {
- "setParameter": 1,
- "logComponentVerbosity": {
- "replication": {"verbosity": 3},
- }
- };
- replSet.nodes.forEach(function(node) {
- node.adminCommand(verbosity);
- });
-
- function enableFailPoint(node) {
- jsTest.log("enable failpoint " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+"use strict";
+
+load("jstests/replsets/rslib.js");
+load("jstests/libs/check_log.js");
+
+var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+var conf = replSet.getReplSetConfig();
+conf.members[2].priority = 0;
+conf.settings = conf.settings || {};
+conf.settings.chainingAllowed = false;
+conf.settings.catchUpTimeoutMillis = 0;
+replSet.initiate(conf);
+
+var primary = replSet.getPrimary();
+var secondary = replSet.getSecondary();
+
+// Set verbosity for replication on all nodes.
+var verbosity = {
+ "setParameter": 1,
+ "logComponentVerbosity": {
+ "replication": {"verbosity": 3},
}
+};
+replSet.nodes.forEach(function(node) {
+ node.adminCommand(verbosity);
+});
- function disableFailPoint(node) {
- jsTest.log("disable failpoint " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
- }
-
- // Since this test blocks a node in drain mode, we cannot use the ReplSetTest stepUp helper
- // that waits for a node to leave drain mode.
- function stepUpNode(node) {
- jsTest.log("Stepping up: " + node.host);
- assert.soonNoExcept(function() {
- assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
- // We do not specify a specific primary so that if a different primary gets elected
- // due to unfortunate timing we can try again.
- replSet.awaitNodesAgreeOnPrimary();
- return node.adminCommand('replSetGetStatus').myState === ReplSetTest.State.PRIMARY;
- }, 'failed to step up node ' + node.host, replSet.kDefaultTimeoutMS);
- }
-
- // Do an initial insert to prevent the secondary from going into recovery
- var numDocuments = 20;
- var coll = primary.getDB("foo").foo;
- assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}}));
- replSet.awaitReplication();
-
- // Enable fail point to stop replication.
- var secondaries = replSet.getSecondaries();
- secondaries.forEach(enableFailPoint);
-
- var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
- for (var i = 1; i < numDocuments; ++i) {
- assert.writeOK(coll.insert({x: i}));
- }
- jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
- assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
-
- assert.soon(
- function() {
- var serverStatus = secondary.getDB('foo').serverStatus();
- var bufferCount = serverStatus.metrics.repl.buffer.count;
- var bufferCountChange = bufferCount - bufferCountBefore;
- jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
- bufferCountChange);
- return bufferCountChange == numDocuments - 1;
- },
- 'secondary did not buffer operations for new inserts on primary',
- replSet.kDefaultTimeoutMs,
- 1000);
-
- reconnect(secondary);
- stepUpNode(secondary);
-
- // Secondary doesn't allow writes yet.
- var res = secondary.getDB("admin").runCommand({"isMaster": 1});
- assert(!res.ismaster);
-
- assert.commandFailedWithCode(
- secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 5000,
- }),
- ErrorCodes.ExceededTimeLimit,
- 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
-
- // Prevent the current primary from stepping down
- jsTest.log("disallowing heartbeat stepdown " + secondary.host);
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: "blockHeartbeatStepdown", mode: 'alwaysOn'}));
- jsTestLog("Shut down the rest of the set so the primary-elect has to step down");
- replSet.stop(primary);
- disableFailPoint(replSet.nodes[2]); // Fail point needs to be off when node is shut down.
- replSet.stop(2);
-
- jsTestLog("Waiting for secondary to begin stepping down while in drain mode");
- checkLog.contains(secondary, "stepDown - blockHeartbeatStepdown fail point enabled");
-
- // Disable fail point to allow replication and allow secondary to finish drain mode while in the
- // process of stepping down.
- jsTestLog("Re-enabling replication on secondary");
- assert.gt(numDocuments, secondary.getDB("foo").foo.find().itcount());
- disableFailPoint(secondary);
-
- // The node should now be able to apply the writes in its buffer.
- jsTestLog("Waiting for node to drain its apply buffer");
- assert.soon(function() {
- return secondary.getDB("foo").foo.find().itcount() == numDocuments;
- });
-
- // Even though it finished draining its buffer, it shouldn't be able to exit drain mode due to
- // pending stepdown.
- assert.commandFailedWithCode(
- secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 5000,
- }),
- ErrorCodes.ExceededTimeLimit,
- 'replSetTest waitForDrainFinish should time out when in the middle of stepping down');
-
- jsTestLog("Checking that node is PRIMARY but not master");
- assert.eq(ReplSetTest.State.PRIMARY, secondary.adminCommand({replSetGetStatus: 1}).myState);
- assert(!secondary.adminCommand('ismaster').ismaster);
-
- jsTest.log("allowing heartbeat stepdown " + secondary.host);
+function enableFailPoint(node) {
+ jsTest.log("enable failpoint " + node.host);
assert.commandWorked(
- secondary.adminCommand({configureFailPoint: "blockHeartbeatStepdown", mode: 'off'}));
-
- jsTestLog("Checking that node successfully stepped down");
- replSet.waitForState(secondary, ReplSetTest.State.SECONDARY);
- assert(!secondary.adminCommand('ismaster').ismaster);
-
- // Now ensure that the node can successfully become primary again.
- replSet.restart(0);
- replSet.restart(2);
- stepUpNode(secondary);
-
- assert.soon(function() {
- return secondary.adminCommand('ismaster').ismaster;
- });
-
- jsTestLog('Ensure new primary is writable.');
- assert.writeOK(secondary.getDB("foo").flag.insert({sentinel: 1}, {writeConcern: {w: 3}}));
- // Check that no writes were lost.
- assert.eq(secondary.getDB("foo").foo.find().itcount(), numDocuments);
- replSet.stopSet();
+ node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+}
+
+function disableFailPoint(node) {
+ jsTest.log("disable failpoint " + node.host);
+ assert.commandWorked(node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+}
+
+// Since this test blocks a node in drain mode, we cannot use the ReplSetTest stepUp helper
+// that waits for a node to leave drain mode.
+function stepUpNode(node) {
+ jsTest.log("Stepping up: " + node.host);
+ assert.soonNoExcept(function() {
+ assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
+ // We do not specify a specific primary so that if a different primary gets elected
+ // due to unfortunate timing we can try again.
+ replSet.awaitNodesAgreeOnPrimary();
+ return node.adminCommand('replSetGetStatus').myState === ReplSetTest.State.PRIMARY;
+ }, 'failed to step up node ' + node.host, replSet.kDefaultTimeoutMS);
+}
+
+// Do an initial insert to prevent the secondary from going into recovery
+var numDocuments = 20;
+var coll = primary.getDB("foo").foo;
+assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}}));
+replSet.awaitReplication();
+
+// Enable fail point to stop replication.
+var secondaries = replSet.getSecondaries();
+secondaries.forEach(enableFailPoint);
+
+var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
+for (var i = 1; i < numDocuments; ++i) {
+ assert.writeOK(coll.insert({x: i}));
+}
+jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
+assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
+
+assert.soon(
+ function() {
+ var serverStatus = secondary.getDB('foo').serverStatus();
+ var bufferCount = serverStatus.metrics.repl.buffer.count;
+ var bufferCountChange = bufferCount - bufferCountBefore;
+ jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
+ bufferCountChange);
+ return bufferCountChange == numDocuments - 1;
+ },
+ 'secondary did not buffer operations for new inserts on primary',
+ replSet.kDefaultTimeoutMs,
+ 1000);
+
+reconnect(secondary);
+stepUpNode(secondary);
+
+// Secondary doesn't allow writes yet.
+var res = secondary.getDB("admin").runCommand({"isMaster": 1});
+assert(!res.ismaster);
+
+assert.commandFailedWithCode(
+ secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 5000,
+ }),
+ ErrorCodes.ExceededTimeLimit,
+ 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
+
+// Prevent the current primary from stepping down
+jsTest.log("disallowing heartbeat stepdown " + secondary.host);
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "blockHeartbeatStepdown", mode: 'alwaysOn'}));
+jsTestLog("Shut down the rest of the set so the primary-elect has to step down");
+replSet.stop(primary);
+disableFailPoint(replSet.nodes[2]); // Fail point needs to be off when node is shut down.
+replSet.stop(2);
+
+jsTestLog("Waiting for secondary to begin stepping down while in drain mode");
+checkLog.contains(secondary, "stepDown - blockHeartbeatStepdown fail point enabled");
+
+// Disable fail point to allow replication and allow secondary to finish drain mode while in the
+// process of stepping down.
+jsTestLog("Re-enabling replication on secondary");
+assert.gt(numDocuments, secondary.getDB("foo").foo.find().itcount());
+disableFailPoint(secondary);
+
+// The node should now be able to apply the writes in its buffer.
+jsTestLog("Waiting for node to drain its apply buffer");
+assert.soon(function() {
+ return secondary.getDB("foo").foo.find().itcount() == numDocuments;
+});
+
+// Even though it finished draining its buffer, it shouldn't be able to exit drain mode due to
+// pending stepdown.
+assert.commandFailedWithCode(
+ secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 5000,
+ }),
+ ErrorCodes.ExceededTimeLimit,
+ 'replSetTest waitForDrainFinish should time out when in the middle of stepping down');
+
+jsTestLog("Checking that node is PRIMARY but not master");
+assert.eq(ReplSetTest.State.PRIMARY, secondary.adminCommand({replSetGetStatus: 1}).myState);
+assert(!secondary.adminCommand('ismaster').ismaster);
+
+jsTest.log("allowing heartbeat stepdown " + secondary.host);
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "blockHeartbeatStepdown", mode: 'off'}));
+
+jsTestLog("Checking that node successfully stepped down");
+replSet.waitForState(secondary, ReplSetTest.State.SECONDARY);
+assert(!secondary.adminCommand('ismaster').ismaster);
+
+// Now ensure that the node can successfully become primary again.
+replSet.restart(0);
+replSet.restart(2);
+stepUpNode(secondary);
+
+assert.soon(function() {
+ return secondary.adminCommand('ismaster').ismaster;
+});
+
+jsTestLog('Ensure new primary is writable.');
+assert.writeOK(secondary.getDB("foo").flag.insert({sentinel: 1}, {writeConcern: {w: 3}}));
+// Check that no writes were lost.
+assert.eq(secondary.getDB("foo").foo.find().itcount(), numDocuments);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/step_down_during_draining3.js b/jstests/replsets/step_down_during_draining3.js
index c8631bd12f1..98c42955fc6 100644
--- a/jstests/replsets/step_down_during_draining3.js
+++ b/jstests/replsets/step_down_during_draining3.js
@@ -1,123 +1,122 @@
// Test that the stepdown command can be run successfully during drain mode
(function() {
- "use strict";
-
- load("jstests/replsets/rslib.js");
-
- var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- var conf = replSet.getReplSetConfig();
- conf.members[2].priority = 0;
- conf.settings = conf.settings || {};
- conf.settings.chainingAllowed = false;
- conf.settings.catchUpTimeoutMillis = 0;
- replSet.initiate(conf);
-
- var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
-
- // Set verbosity for replication on all nodes.
- var verbosity = {
- "setParameter": 1,
- "logComponentVerbosity": {
- "replication": {"verbosity": 3},
- }
- };
- replSet.nodes.forEach(function(node) {
- node.adminCommand(verbosity);
- });
-
- function enableFailPoint(node) {
- jsTest.log("enable failpoint " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+"use strict";
+
+load("jstests/replsets/rslib.js");
+
+var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+var conf = replSet.getReplSetConfig();
+conf.members[2].priority = 0;
+conf.settings = conf.settings || {};
+conf.settings.chainingAllowed = false;
+conf.settings.catchUpTimeoutMillis = 0;
+replSet.initiate(conf);
+
+var primary = replSet.getPrimary();
+var secondary = replSet.getSecondary();
+
+// Set verbosity for replication on all nodes.
+var verbosity = {
+ "setParameter": 1,
+ "logComponentVerbosity": {
+ "replication": {"verbosity": 3},
}
+};
+replSet.nodes.forEach(function(node) {
+ node.adminCommand(verbosity);
+});
- function disableFailPoint(node) {
- jsTest.log("disable failpoint " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
- }
-
- // Since this test blocks a node in drain mode, we cannot use the ReplSetTest stepUp helper
- // that waits for a node to leave drain mode.
- function stepUpNode(node) {
- jsTest.log("Stepping up: " + node.host);
- assert.soonNoExcept(function() {
- assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
- // We do not specify a specific primary so that if a different primary gets elected
- // due to unfortunate timing we can try again.
- replSet.awaitNodesAgreeOnPrimary();
- return node.adminCommand('replSetGetStatus').myState === ReplSetTest.State.PRIMARY;
- }, 'failed to step up node ' + node.host, replSet.kDefaultTimeoutMS);
- }
-
- // Do an initial insert to prevent the secondary from going into recovery
- var numDocuments = 20;
- var coll = primary.getDB("foo").foo;
- assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}}));
- replSet.awaitReplication();
-
- // Enable fail point to stop replication.
- var secondaries = replSet.getSecondaries();
- secondaries.forEach(enableFailPoint);
-
- var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
- for (var i = 1; i < numDocuments; ++i) {
- assert.writeOK(coll.insert({x: i}));
- }
- jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
- assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
-
- assert.soon(
- function() {
- var serverStatus = secondary.getDB('foo').serverStatus();
- var bufferCount = serverStatus.metrics.repl.buffer.count;
- var bufferCountChange = bufferCount - bufferCountBefore;
- jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
- bufferCountChange);
- return bufferCountChange == numDocuments - 1;
- },
- 'secondary did not buffer operations for new inserts on primary',
- replSet.kDefaultTimeoutMs,
- 1000);
-
- reconnect(secondary);
- stepUpNode(secondary);
-
- // Secondary doesn't allow writes yet.
- var res = secondary.getDB("admin").runCommand({"isMaster": 1});
- assert(!res.ismaster);
-
- assert.commandFailedWithCode(
- secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 5000,
- }),
- ErrorCodes.ExceededTimeLimit,
- 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
-
- assert.commandWorked(secondary.adminCommand({replSetStepDown: 60, force: true}));
-
- // Assert stepdown was successful.
- assert.eq(ReplSetTest.State.SECONDARY, secondary.adminCommand({replSetGetStatus: 1}).myState);
- assert(!secondary.adminCommand('ismaster').ismaster);
-
- // Prevent the producer from fetching new ops
+function enableFailPoint(node) {
+ jsTest.log("enable failpoint " + node.host);
assert.commandWorked(
- secondary.adminCommand({configureFailPoint: 'stopReplProducer', mode: 'alwaysOn'}));
-
- // Allow the secondary to apply the ops already in its buffer.
- jsTestLog("Re-enabling replication on secondaries");
- assert.gt(numDocuments, secondary.getDB("foo").foo.find().itcount());
- secondaries.forEach(disableFailPoint);
-
- // The node should now be able to apply the writes in its buffer.
- jsTestLog("Waiting for node to drain its apply buffer");
- assert.soon(function() {
- return secondary.getDB("foo").foo.find().itcount() == numDocuments;
- });
- replSet.stopSet();
+ node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+}
+
+function disableFailPoint(node) {
+ jsTest.log("disable failpoint " + node.host);
+ assert.commandWorked(node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+}
+
+// Since this test blocks a node in drain mode, we cannot use the ReplSetTest stepUp helper
+// that waits for a node to leave drain mode.
+function stepUpNode(node) {
+ jsTest.log("Stepping up: " + node.host);
+ assert.soonNoExcept(function() {
+ assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
+ // We do not specify a specific primary so that if a different primary gets elected
+ // due to unfortunate timing we can try again.
+ replSet.awaitNodesAgreeOnPrimary();
+ return node.adminCommand('replSetGetStatus').myState === ReplSetTest.State.PRIMARY;
+ }, 'failed to step up node ' + node.host, replSet.kDefaultTimeoutMS);
+}
+
+// Do an initial insert to prevent the secondary from going into recovery
+var numDocuments = 20;
+var coll = primary.getDB("foo").foo;
+assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}}));
+replSet.awaitReplication();
+
+// Enable fail point to stop replication.
+var secondaries = replSet.getSecondaries();
+secondaries.forEach(enableFailPoint);
+
+var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
+for (var i = 1; i < numDocuments; ++i) {
+ assert.writeOK(coll.insert({x: i}));
+}
+jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
+assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
+
+assert.soon(
+ function() {
+ var serverStatus = secondary.getDB('foo').serverStatus();
+ var bufferCount = serverStatus.metrics.repl.buffer.count;
+ var bufferCountChange = bufferCount - bufferCountBefore;
+ jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
+ bufferCountChange);
+ return bufferCountChange == numDocuments - 1;
+ },
+ 'secondary did not buffer operations for new inserts on primary',
+ replSet.kDefaultTimeoutMs,
+ 1000);
+
+reconnect(secondary);
+stepUpNode(secondary);
+
+// Secondary doesn't allow writes yet.
+var res = secondary.getDB("admin").runCommand({"isMaster": 1});
+assert(!res.ismaster);
+
+assert.commandFailedWithCode(
+ secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 5000,
+ }),
+ ErrorCodes.ExceededTimeLimit,
+ 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
+
+assert.commandWorked(secondary.adminCommand({replSetStepDown: 60, force: true}));
+
+// Assert stepdown was successful.
+assert.eq(ReplSetTest.State.SECONDARY, secondary.adminCommand({replSetGetStatus: 1}).myState);
+assert(!secondary.adminCommand('ismaster').ismaster);
+
+// Prevent the producer from fetching new ops
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: 'stopReplProducer', mode: 'alwaysOn'}));
+
+// Allow the secondary to apply the ops already in its buffer.
+jsTestLog("Re-enabling replication on secondaries");
+assert.gt(numDocuments, secondary.getDB("foo").foo.find().itcount());
+secondaries.forEach(disableFailPoint);
+
+// The node should now be able to apply the writes in its buffer.
+jsTestLog("Waiting for node to drain its apply buffer");
+assert.soon(function() {
+ return secondary.getDB("foo").foo.find().itcount() == numDocuments;
+});
+replSet.stopSet();
})();
diff --git a/jstests/replsets/step_down_on_secondary.js b/jstests/replsets/step_down_on_secondary.js
index 5563d4a1e23..d4a90d48ca3 100644
--- a/jstests/replsets/step_down_on_secondary.js
+++ b/jstests/replsets/step_down_on_secondary.js
@@ -13,119 +13,119 @@
*/
(function() {
- "use strict";
- load('jstests/libs/parallelTester.js');
- load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/check_log.js");
-
- const dbName = "test";
- const collName = "coll";
-
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const primaryColl = primaryDB[collName];
- const collNss = primaryColl.getFullName();
- const secondary = rst.getSecondary();
-
- TestData.dbName = dbName;
- TestData.collName = collName;
- TestData.collNss = collNss;
-
- jsTestLog("Do a document write");
- assert.commandWorked(primaryColl.insert({_id: 0}, {"writeConcern": {"w": "majority"}}));
- rst.awaitReplication();
-
- jsTestLog("Hang primary on step down");
- const joinStepDownThread = startParallelShell(() => {
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "stepdownHangBeforeRSTLEnqueue", mode: "alwaysOn"}));
-
- const freezeSecs = 24 * 60 * 60; // 24 hours
- assert.commandFailedWithCode(
- db.adminCommand({"replSetStepDown": freezeSecs, "force": true}), ErrorCodes.NotMaster);
- }, primary.port);
-
- waitForCurOpByFailPointNoNS(primaryDB, "stepdownHangBeforeRSTLEnqueue");
-
- jsTestLog("Force reconfig to swap the electable node");
- const newConfig = rst.getReplSetConfigFromNode();
- const oldPrimaryId = rst.getNodeId(primary);
- const newPrimaryId = rst.getNodeId(secondary);
- newConfig.members[newPrimaryId].priority = 1;
- newConfig.members[oldPrimaryId].priority = 0;
- newConfig.version++;
- assert.commandWorked(secondary.adminCommand({"replSetReconfig": newConfig, force: true}));
-
- jsTestLog("Step up the new electable node");
- rst.stepUp(secondary);
-
- jsTestLog("Wait for step up to complete");
- // Wait until the primary successfully steps down via heartbeat reconfig.
- rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- const newPrimary = rst.getPrimary();
-
- jsTestLog("Prepare a transaction on the new primary");
- const session = newPrimary.startSession();
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
- session.startTransaction({writeConcern: {w: "majority"}});
- assert.commandWorked(sessionColl.update({_id: 0}, {$set: {"b": 1}}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- jsTestLog("Get a cluster time for afterClusterTime reads");
- TestData.clusterTimeAfterPrepare =
- assert
- .commandWorked(newPrimary.getDB(dbName)[collName].runCommand(
- "insert", {documents: [{_id: "clusterTimeAfterPrepare"}]}))
- .operationTime;
-
- jsTestLog("Do a read that hits a prepare conflict on the old primary");
+"use strict";
+load('jstests/libs/parallelTester.js');
+load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+
+const dbName = "test";
+const collName = "coll";
+
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const primaryColl = primaryDB[collName];
+const collNss = primaryColl.getFullName();
+const secondary = rst.getSecondary();
+
+TestData.dbName = dbName;
+TestData.collName = collName;
+TestData.collNss = collNss;
+
+jsTestLog("Do a document write");
+assert.commandWorked(primaryColl.insert({_id: 0}, {"writeConcern": {"w": "majority"}}));
+rst.awaitReplication();
+
+jsTestLog("Hang primary on step down");
+const joinStepDownThread = startParallelShell(() => {
assert.commandWorked(
- primary.adminCommand({configureFailPoint: "WTPrintPrepareConflictLog", mode: "alwaysOn"}));
-
- const joinReadThread = startParallelShell(() => {
- db.getMongo().setSlaveOk(true);
- oldPrimaryDB = db.getSiblingDB(TestData.dbName);
-
- assert.commandFailedWithCode(oldPrimaryDB.runCommand({
- find: TestData.collName,
- filter: {_id: 0},
- readConcern: {level: "local", afterClusterTime: TestData.clusterTimeAfterPrepare},
- }),
- ErrorCodes.InterruptedDueToReplStateChange);
- }, primary.port);
-
- jsTestLog("Wait to hit a prepare conflict");
- checkLog.contains(primary, "WTPrintPrepareConflictLog fail point enabled");
-
- jsTestLog("Allow step down to complete");
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "stepdownHangBeforeRSTLEnqueue", mode: "off"}));
-
- jsTestLog("Wait for step down to start killing operations");
- checkLog.contains(primary, "Starting to kill user operations");
-
- jsTestLog("Commit the prepared transaction");
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- jsTestLog("Join parallel shells");
- joinStepDownThread();
- joinReadThread();
-
- // Validate that the read operation got killed during step down.
- const replMetrics = assert.commandWorked(primary.adminCommand({serverStatus: 1})).metrics.repl;
- assert.eq(replMetrics.stepDown.userOperationsKilled, 1, replMetrics);
-
- jsTestLog("Check nodes have correct data");
- assert.docEq(newPrimary.getDB(dbName)[collName].find({_id: 0}).toArray(), [{_id: 0, b: 1}]);
- rst.awaitReplication();
- assert.docEq(primary.getDB(dbName)[collName].find({_id: 0}).toArray(), [{_id: 0, b: 1}]);
-
- rst.stopSet();
+ db.adminCommand({configureFailPoint: "stepdownHangBeforeRSTLEnqueue", mode: "alwaysOn"}));
+
+ const freezeSecs = 24 * 60 * 60; // 24 hours
+ assert.commandFailedWithCode(db.adminCommand({"replSetStepDown": freezeSecs, "force": true}),
+ ErrorCodes.NotMaster);
+}, primary.port);
+
+waitForCurOpByFailPointNoNS(primaryDB, "stepdownHangBeforeRSTLEnqueue");
+
+jsTestLog("Force reconfig to swap the electable node");
+const newConfig = rst.getReplSetConfigFromNode();
+const oldPrimaryId = rst.getNodeId(primary);
+const newPrimaryId = rst.getNodeId(secondary);
+newConfig.members[newPrimaryId].priority = 1;
+newConfig.members[oldPrimaryId].priority = 0;
+newConfig.version++;
+assert.commandWorked(secondary.adminCommand({"replSetReconfig": newConfig, force: true}));
+
+jsTestLog("Step up the new electable node");
+rst.stepUp(secondary);
+
+jsTestLog("Wait for step up to complete");
+// Wait until the primary successfully steps down via heartbeat reconfig.
+rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
+rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+const newPrimary = rst.getPrimary();
+
+jsTestLog("Prepare a transaction on the new primary");
+const session = newPrimary.startSession();
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+session.startTransaction({writeConcern: {w: "majority"}});
+assert.commandWorked(sessionColl.update({_id: 0}, {$set: {"b": 1}}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+jsTestLog("Get a cluster time for afterClusterTime reads");
+TestData.clusterTimeAfterPrepare =
+ assert
+ .commandWorked(newPrimary.getDB(dbName)[collName].runCommand(
+ "insert", {documents: [{_id: "clusterTimeAfterPrepare"}]}))
+ .operationTime;
+
+jsTestLog("Do a read that hits a prepare conflict on the old primary");
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "WTPrintPrepareConflictLog", mode: "alwaysOn"}));
+
+const joinReadThread = startParallelShell(() => {
+ db.getMongo().setSlaveOk(true);
+ oldPrimaryDB = db.getSiblingDB(TestData.dbName);
+
+ assert.commandFailedWithCode(oldPrimaryDB.runCommand({
+ find: TestData.collName,
+ filter: {_id: 0},
+ readConcern: {level: "local", afterClusterTime: TestData.clusterTimeAfterPrepare},
+ }),
+ ErrorCodes.InterruptedDueToReplStateChange);
+}, primary.port);
+
+jsTestLog("Wait to hit a prepare conflict");
+checkLog.contains(primary, "WTPrintPrepareConflictLog fail point enabled");
+
+jsTestLog("Allow step down to complete");
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "stepdownHangBeforeRSTLEnqueue", mode: "off"}));
+
+jsTestLog("Wait for step down to start killing operations");
+checkLog.contains(primary, "Starting to kill user operations");
+
+jsTestLog("Commit the prepared transaction");
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+jsTestLog("Join parallel shells");
+joinStepDownThread();
+joinReadThread();
+
+// Validate that the read operation got killed during step down.
+const replMetrics = assert.commandWorked(primary.adminCommand({serverStatus: 1})).metrics.repl;
+assert.eq(replMetrics.stepDown.userOperationsKilled, 1, replMetrics);
+
+jsTestLog("Check nodes have correct data");
+assert.docEq(newPrimary.getDB(dbName)[collName].find({_id: 0}).toArray(), [{_id: 0, b: 1}]);
+rst.awaitReplication();
+assert.docEq(primary.getDB(dbName)[collName].find({_id: 0}).toArray(), [{_id: 0, b: 1}]);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/stepdown3.js b/jstests/replsets/stepdown3.js
index f40caabe242..508645cdf5a 100644
--- a/jstests/replsets/stepdown3.js
+++ b/jstests/replsets/stepdown3.js
@@ -3,49 +3,48 @@
// This test requires the fsync command to force a secondary to be stale.
// @tags: [requires_fsync]
(function() {
- 'use strict';
-
- var replTest = new ReplSetTest({name: 'testSet', nodes: 2});
- var nodes = replTest.startSet();
- replTest.initiate();
- var master = replTest.getPrimary();
-
- // do a write to allow stepping down of the primary;
- // otherwise, the primary will refuse to step down
- print("\ndo a write");
- master.getDB("test").foo.insert({x: 1});
- replTest.awaitReplication();
-
- // do another write, because the first one might be longer than 10 seconds ago
- // on the secondary (due to starting up), and we need to be within 10 seconds
- // to step down.
- var options = {writeConcern: {w: 2, wtimeout: 30000}};
- assert.writeOK(master.getDB("test").foo.insert({x: 2}, options));
- // lock secondary, to pause replication
- print("\nlock secondary");
- var locked = replTest._slaves[0];
- printjson(locked.getDB("admin").runCommand({fsync: 1, lock: 1}));
-
- // do a write
- print("\ndo a write");
- master.getDB("test").foo.insert({x: 3});
-
- // step down the primary asyncronously
- print("stepdown");
- var command =
- "sleep(4000); assert.commandWorked(db.adminCommand( { replSetStepDown : 60, force : 1 } ));";
- var awaitShell = startParallelShell(command, master.port);
-
- print("getlasterror; should return an error");
- let result = master.getDB("test").runCommand({getLastError: 1, w: 2, wtimeout: 10 * 60 * 1000});
- assert(ErrorCodes.isNotMasterError(result.code));
- print("result of gle:");
- printjson(result);
-
- awaitShell();
-
- // unlock and shut down
- printjson(locked.getDB("admin").fsyncUnlock());
- replTest.stopSet();
-
+'use strict';
+
+var replTest = new ReplSetTest({name: 'testSet', nodes: 2});
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+
+// do a write to allow stepping down of the primary;
+// otherwise, the primary will refuse to step down
+print("\ndo a write");
+master.getDB("test").foo.insert({x: 1});
+replTest.awaitReplication();
+
+// do another write, because the first one might be longer than 10 seconds ago
+// on the secondary (due to starting up), and we need to be within 10 seconds
+// to step down.
+var options = {writeConcern: {w: 2, wtimeout: 30000}};
+assert.writeOK(master.getDB("test").foo.insert({x: 2}, options));
+// lock secondary, to pause replication
+print("\nlock secondary");
+var locked = replTest._slaves[0];
+printjson(locked.getDB("admin").runCommand({fsync: 1, lock: 1}));
+
+// do a write
+print("\ndo a write");
+master.getDB("test").foo.insert({x: 3});
+
+// step down the primary asyncronously
+print("stepdown");
+var command =
+ "sleep(4000); assert.commandWorked(db.adminCommand( { replSetStepDown : 60, force : 1 } ));";
+var awaitShell = startParallelShell(command, master.port);
+
+print("getlasterror; should return an error");
+let result = master.getDB("test").runCommand({getLastError: 1, w: 2, wtimeout: 10 * 60 * 1000});
+assert(ErrorCodes.isNotMasterError(result.code));
+print("result of gle:");
+printjson(result);
+
+awaitShell();
+
+// unlock and shut down
+printjson(locked.getDB("admin").fsyncUnlock());
+replTest.stopSet();
})();
diff --git a/jstests/replsets/stepdown_catch_up_opt.js b/jstests/replsets/stepdown_catch_up_opt.js
index 4fd88748ae4..82c31b49a0a 100644
--- a/jstests/replsets/stepdown_catch_up_opt.js
+++ b/jstests/replsets/stepdown_catch_up_opt.js
@@ -4,85 +4,85 @@
*/
(function() {
- 'use strict';
- var name = 'stepdown_catch_up_opt';
- // Only 2 nodes, so that we can control whether the secondary is caught up.
- var replTest = new ReplSetTest({name: name, nodes: 2});
- replTest.startSet();
- replTest.initiate();
- replTest.awaitSecondaryNodes();
- var primary = replTest.getPrimary();
- var secondary = replTest.getSecondary();
+'use strict';
+var name = 'stepdown_catch_up_opt';
+// Only 2 nodes, so that we can control whether the secondary is caught up.
+var replTest = new ReplSetTest({name: name, nodes: 2});
+replTest.startSet();
+replTest.initiate();
+replTest.awaitSecondaryNodes();
+var primary = replTest.getPrimary();
+var secondary = replTest.getSecondary();
- // Error codes we expect to see.
+// Error codes we expect to see.
- // If the secondary is not caught up.
- const noCaughtUpSecondariesCode = ErrorCodes.ExceededTimeLimit;
+// If the secondary is not caught up.
+const noCaughtUpSecondariesCode = ErrorCodes.ExceededTimeLimit;
- // If the stepdown period is shorter than the secondaryCatchUpPeriodSecs argument.
- var stepDownPeriodTooShortCode = 2;
+// If the stepdown period is shorter than the secondaryCatchUpPeriodSecs argument.
+var stepDownPeriodTooShortCode = 2;
- // If we give a string as an argument instead of an integer.
- var stringNotIntCode = 14;
+// If we give a string as an argument instead of an integer.
+var stringNotIntCode = 14;
- // Expect a failure with a string argument.
- assert.commandFailedWithCode(
- primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 'STR'}),
- stringNotIntCode,
- 'Expected string argument to secondaryCatchupPeriodSecs to fail.');
-
- // Expect a failure with a longer secondaryCatchupPeriodSecs than the stepdown period.
- assert.commandFailedWithCode(
- primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 20}),
- stepDownPeriodTooShortCode,
- ('Expected replSetStepDown to fail given a stepdown time shorter than' +
- ' secondaryCatchUpPeriodSecs'));
+// Expect a failure with a string argument.
+assert.commandFailedWithCode(
+ primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 'STR'}),
+ stringNotIntCode,
+ 'Expected string argument to secondaryCatchupPeriodSecs to fail.');
- jsTestLog('Stop secondary syncing.');
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}),
- 'Failed to configure rsSyncApplyStop failpoint.');
+// Expect a failure with a longer secondaryCatchupPeriodSecs than the stepdown period.
+assert.commandFailedWithCode(
+ primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 20}),
+ stepDownPeriodTooShortCode,
+ ('Expected replSetStepDown to fail given a stepdown time shorter than' +
+ ' secondaryCatchUpPeriodSecs'));
- function disableFailPoint() {
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
- 'Failed to disable rsSyncApplyStop failpoint.');
- }
+jsTestLog('Stop secondary syncing.');
+assert.commandWorked(
+ secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}),
+ 'Failed to configure rsSyncApplyStop failpoint.');
- // If any of these assertions fail, we need to disable the fail point in order for the mongod to
- // shut down.
- try {
- jsTestLog('Write to primary to make secondary out of sync.');
- assert.writeOK(primary.getDB('test').foo.insert({i: 1}), 'Failed to insert document.');
- sleep(1000);
- // Secondary is now at least 1 second behind.
+function disableFailPoint() {
+ assert.commandWorked(
+ secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
+ 'Failed to disable rsSyncApplyStop failpoint.');
+}
- jsTestLog('Try to step down.');
- var startTime = new Date();
- assert.commandFailedWithCode(
- primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 1}),
- noCaughtUpSecondariesCode,
- 'Expected replSetStepDown to fail, since no secondaries should be caught up.');
- var endTime = new Date();
+// If any of these assertions fail, we need to disable the fail point in order for the mongod to
+// shut down.
+try {
+ jsTestLog('Write to primary to make secondary out of sync.');
+ assert.writeOK(primary.getDB('test').foo.insert({i: 1}), 'Failed to insert document.');
+ sleep(1000);
+ // Secondary is now at least 1 second behind.
- // Ensure it took at least 1 second to time out. Adjust the timeout a little bit
- // for the precision issue of clock on Windows 2K8.
- assert.lte(0.95,
- (endTime - startTime) / 1000,
- 'Expected replSetStepDown command to fail after 1 second.');
- } catch (err) {
- disableFailPoint();
- throw err;
- }
+ jsTestLog('Try to step down.');
+ var startTime = new Date();
+ assert.commandFailedWithCode(
+ primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 1}),
+ noCaughtUpSecondariesCode,
+ 'Expected replSetStepDown to fail, since no secondaries should be caught up.');
+ var endTime = new Date();
+ // Ensure it took at least 1 second to time out. Adjust the timeout a little bit
+ // for the precision issue of clock on Windows 2K8.
+ assert.lte(0.95,
+ (endTime - startTime) / 1000,
+ 'Expected replSetStepDown command to fail after 1 second.');
+} catch (err) {
disableFailPoint();
+ throw err;
+}
+
+disableFailPoint();
- // Make sure the primary hasn't changed, since all stepdowns should have failed.
- var primaryStatus = primary.getDB('admin').runCommand({replSetGetStatus: 1});
- assert.commandWorked(primaryStatus, 'replSetGetStatus failed.');
- assert.eq(primaryStatus.myState,
- ReplSetTest.State.PRIMARY,
- 'Expected original primary node to still be primary');
+// Make sure the primary hasn't changed, since all stepdowns should have failed.
+var primaryStatus = primary.getDB('admin').runCommand({replSetGetStatus: 1});
+assert.commandWorked(primaryStatus, 'replSetGetStatus failed.');
+assert.eq(primaryStatus.myState,
+ ReplSetTest.State.PRIMARY,
+ 'Expected original primary node to still be primary');
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/stepdown_kill_other_ops.js b/jstests/replsets/stepdown_kill_other_ops.js
index 1dc2404b8e4..06fc8de563f 100644
--- a/jstests/replsets/stepdown_kill_other_ops.js
+++ b/jstests/replsets/stepdown_kill_other_ops.js
@@ -1,70 +1,70 @@
// SERVER-15310 Ensure that stepDown kills all other running operations
(function() {
- "use strict";
- var name = "stepdownKillOps";
- var replSet = new ReplSetTest({name: name, nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0], "priority": 3},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], "arbiterOnly": true}
- ]
- });
+"use strict";
+var name = "stepdownKillOps";
+var replSet = new ReplSetTest({name: name, nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+replSet.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], "priority": 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
- var primary = replSet.getPrimary();
- assert.eq(primary.host, nodes[0], "primary assumed to be node 0");
- assert.writeOK(primary.getDB(name).foo.insert({x: 1}, {w: 2, wtimeout: 10000}));
- replSet.awaitReplication();
+var primary = replSet.getPrimary();
+assert.eq(primary.host, nodes[0], "primary assumed to be node 0");
+assert.writeOK(primary.getDB(name).foo.insert({x: 1}, {w: 2, wtimeout: 10000}));
+replSet.awaitReplication();
- jsTestLog("Sleeping 30 seconds so the SECONDARY will be considered electable");
- sleep(30000);
+jsTestLog("Sleeping 30 seconds so the SECONDARY will be considered electable");
+sleep(30000);
- // Run sleep in a separate thread to take the global write lock which would prevent stepdown
- // from completing if it failed to kill all running operations.
- jsTestLog("Running {sleep:1, lock: 'w'} to grab global write lock");
- var sleepCmd = function() {
- // Run for 10 minutes if not interrupted.
- db.adminCommand({sleep: 1, lock: 'w', seconds: 60 * 10});
- };
- const startTime = new Date().getTime() / 1000;
- var sleepRunner = startParallelShell(sleepCmd, primary.port);
+// Run sleep in a separate thread to take the global write lock which would prevent stepdown
+// from completing if it failed to kill all running operations.
+jsTestLog("Running {sleep:1, lock: 'w'} to grab global write lock");
+var sleepCmd = function() {
+ // Run for 10 minutes if not interrupted.
+ db.adminCommand({sleep: 1, lock: 'w', seconds: 60 * 10});
+};
+const startTime = new Date().getTime() / 1000;
+var sleepRunner = startParallelShell(sleepCmd, primary.port);
- jsTestLog("Confirming that sleep() is running and has the global lock");
- assert.soon(function() {
- var res = primary.getDB('admin').currentOp();
- for (var index in res.inprog) {
- var entry = res.inprog[index];
- if (entry["command"] && entry["command"]["sleep"]) {
- if ("W" === entry["locks"]["Global"]) {
- return true;
- }
+jsTestLog("Confirming that sleep() is running and has the global lock");
+assert.soon(function() {
+ var res = primary.getDB('admin').currentOp();
+ for (var index in res.inprog) {
+ var entry = res.inprog[index];
+ if (entry["command"] && entry["command"]["sleep"]) {
+ if ("W" === entry["locks"]["Global"]) {
+ return true;
}
}
- printjson(res);
- return false;
- }, "sleep never ran and grabbed the global write lock");
+ }
+ printjson(res);
+ return false;
+}, "sleep never ran and grabbed the global write lock");
- jsTestLog("Stepping down");
- assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 30}));
+jsTestLog("Stepping down");
+assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 30}));
- jsTestLog("Waiting for former PRIMARY to become SECONDARY");
- replSet.waitForState(primary, ReplSetTest.State.SECONDARY, 30000);
+jsTestLog("Waiting for former PRIMARY to become SECONDARY");
+replSet.waitForState(primary, ReplSetTest.State.SECONDARY, 30000);
- var newPrimary = replSet.getPrimary();
- assert.neq(primary, newPrimary, "SECONDARY did not become PRIMARY");
+var newPrimary = replSet.getPrimary();
+assert.neq(primary, newPrimary, "SECONDARY did not become PRIMARY");
- sleepRunner({checkExitSuccess: false});
- const endTime = new Date().getTime() / 1000;
- const duration = endTime - startTime;
- assert.lt(duration,
- 60 * 9, // In practice, this should be well under 1 minute.
- "Sleep lock held longer than expected, possibly uninterrupted.");
+sleepRunner({checkExitSuccess: false});
+const endTime = new Date().getTime() / 1000;
+const duration = endTime - startTime;
+assert.lt(duration,
+ 60 * 9, // In practice, this should be well under 1 minute.
+ "Sleep lock held longer than expected, possibly uninterrupted.");
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/stepdown_killop.js b/jstests/replsets/stepdown_killop.js
index 5c3ac0d33ab..87d7d884a8b 100644
--- a/jstests/replsets/stepdown_killop.js
+++ b/jstests/replsets/stepdown_killop.js
@@ -7,76 +7,76 @@
// 6. Writes should become allowed again and the primary should stay primary.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js");
+load("jstests/libs/write_concern_util.js");
- var name = "interruptStepDown";
- var replSet = new ReplSetTest({name: name, nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], "priority": 0},
- {"_id": 2, "host": nodes[2], "arbiterOnly": true}
- ]
- });
+var name = "interruptStepDown";
+var replSet = new ReplSetTest({name: name, nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+replSet.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], "priority": 0},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
- var secondary = replSet.getSecondary();
- jsTestLog('Disable replication on the SECONDARY ' + secondary.host);
- stopServerReplication(secondary);
+var secondary = replSet.getSecondary();
+jsTestLog('Disable replication on the SECONDARY ' + secondary.host);
+stopServerReplication(secondary);
- var primary = replSet.getPrimary();
- assert.eq(primary.host, nodes[0], "primary assumed to be node 0");
+var primary = replSet.getPrimary();
+assert.eq(primary.host, nodes[0], "primary assumed to be node 0");
- // do a write then ask the PRIMARY to stepdown
- jsTestLog("Initiating stepdown");
- assert.writeOK(primary.getDB(name).foo.insert(
- {myDoc: true, x: 1}, {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- var stepDownCmd = function() {
- var res = db.getSiblingDB('admin').runCommand(
- {replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60});
- assert.commandFailedWithCode(res, 11601 /*interrupted*/);
- };
- var stepDowner = startParallelShell(stepDownCmd, primary.port);
- var stepDownOpID = -1;
+// do a write then ask the PRIMARY to stepdown
+jsTestLog("Initiating stepdown");
+assert.writeOK(primary.getDB(name).foo.insert(
+ {myDoc: true, x: 1}, {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+var stepDownCmd = function() {
+ var res =
+ db.getSiblingDB('admin').runCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60});
+ assert.commandFailedWithCode(res, 11601 /*interrupted*/);
+};
+var stepDowner = startParallelShell(stepDownCmd, primary.port);
+var stepDownOpID = -1;
- jsTestLog("Looking for stepdown in currentOp() output");
- assert.soon(function() {
- var res = primary.getDB('admin').currentOp(true);
- for (var index in res.inprog) {
- var entry = res.inprog[index];
- if (entry["command"] && entry["command"]["replSetStepDown"] === 60) {
- stepDownOpID = entry.opid;
- return true;
- }
+jsTestLog("Looking for stepdown in currentOp() output");
+assert.soon(function() {
+ var res = primary.getDB('admin').currentOp(true);
+ for (var index in res.inprog) {
+ var entry = res.inprog[index];
+ if (entry["command"] && entry["command"]["replSetStepDown"] === 60) {
+ stepDownOpID = entry.opid;
+ return true;
}
- printjson(res);
- return false;
- }, "No pending stepdown command found");
+ }
+ printjson(res);
+ return false;
+}, "No pending stepdown command found");
- jsTestLog("Ensure that writes start failing with NotMaster errors");
- assert.soonNoExcept(function() {
- assert.commandFailedWithCode(primary.getDB(name).foo.insert({x: 2}), ErrorCodes.NotMaster);
- return true;
- });
+jsTestLog("Ensure that writes start failing with NotMaster errors");
+assert.soonNoExcept(function() {
+ assert.commandFailedWithCode(primary.getDB(name).foo.insert({x: 2}), ErrorCodes.NotMaster);
+ return true;
+});
- jsTestLog("Ensure that even though writes are failing with NotMaster, we still report " +
- "ourselves as PRIMARY");
- assert.eq(ReplSetTest.State.PRIMARY, primary.adminCommand('replSetGetStatus').myState);
+jsTestLog("Ensure that even though writes are failing with NotMaster, we still report " +
+ "ourselves as PRIMARY");
+assert.eq(ReplSetTest.State.PRIMARY, primary.adminCommand('replSetGetStatus').myState);
- // kill the stepDown and ensure that that unblocks writes to the db
- jsTestLog("Killing stepdown");
- primary.getDB('admin').killOp(stepDownOpID);
+// kill the stepDown and ensure that that unblocks writes to the db
+jsTestLog("Killing stepdown");
+primary.getDB('admin').killOp(stepDownOpID);
- var exitCode = stepDowner();
- assert.eq(0, exitCode);
+var exitCode = stepDowner();
+assert.eq(0, exitCode);
- assert.writeOK(primary.getDB(name).foo.remove({}));
- restartServerReplication(secondary);
- replSet.stopSet();
+assert.writeOK(primary.getDB(name).foo.remove({}));
+restartServerReplication(secondary);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/stepdown_long_wait_time.js b/jstests/replsets/stepdown_long_wait_time.js
index 1d0aab19ac3..5958aa3a86c 100644
--- a/jstests/replsets/stepdown_long_wait_time.js
+++ b/jstests/replsets/stepdown_long_wait_time.js
@@ -7,70 +7,69 @@
// 6. Wait for PRIMARY to StepDown.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js");
+load("jstests/libs/write_concern_util.js");
- var name = "stepDownWithLongWait";
- var replSet = new ReplSetTest({name: name, nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0], "priority": 3},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], "arbiterOnly": true}
- ]
- });
+var name = "stepDownWithLongWait";
+var replSet = new ReplSetTest({name: name, nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+replSet.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], "priority": 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
- var primary = replSet.getPrimary();
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
+var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
- jsTestLog('Disable replication on the SECONDARY ' + secondary.host);
- stopServerReplication(secondary);
+var secondary = replSet.getSecondary();
+jsTestLog('Disable replication on the SECONDARY ' + secondary.host);
+stopServerReplication(secondary);
- jsTestLog("do a write then ask the PRIMARY to stepdown");
- var options = {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}};
- assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options));
+jsTestLog("do a write then ask the PRIMARY to stepdown");
+var options = {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}};
+assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options));
- var stepDownCmd = function() {
- assert.commandWorked(
- db.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}));
- };
- var stepDowner = startParallelShell(stepDownCmd, primary.port);
+var stepDownCmd = function() {
+ assert.commandWorked(db.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}));
+};
+var stepDowner = startParallelShell(stepDownCmd, primary.port);
- assert.soon(function() {
- var res = primary.getDB('admin').currentOp(true);
- for (var entry in res.inprog) {
- if (res.inprog[entry]["command"] &&
- res.inprog[entry]["command"]["replSetStepDown"] === 60) {
- return true;
- }
+assert.soon(function() {
+ var res = primary.getDB('admin').currentOp(true);
+ for (var entry in res.inprog) {
+ if (res.inprog[entry]["command"] &&
+ res.inprog[entry]["command"]["replSetStepDown"] === 60) {
+ return true;
}
- printjson(res);
- return false;
- }, "No pending stepdown command found");
+ }
+ printjson(res);
+ return false;
+}, "No pending stepdown command found");
- jsTestLog("Ensure that writes start failing with NotMaster errors");
- assert.soonNoExcept(function() {
- assert.commandFailedWithCode(primary.getDB(name).foo.insert({x: 2}), ErrorCodes.NotMaster);
- return true;
- });
+jsTestLog("Ensure that writes start failing with NotMaster errors");
+assert.soonNoExcept(function() {
+ assert.commandFailedWithCode(primary.getDB(name).foo.insert({x: 2}), ErrorCodes.NotMaster);
+ return true;
+});
- jsTestLog("Ensure that even though writes are failing with NotMaster, we still report " +
- "ourselves as PRIMARY");
- assert.eq(ReplSetTest.State.PRIMARY, primary.adminCommand('replSetGetStatus').myState);
+jsTestLog("Ensure that even though writes are failing with NotMaster, we still report " +
+ "ourselves as PRIMARY");
+assert.eq(ReplSetTest.State.PRIMARY, primary.adminCommand('replSetGetStatus').myState);
- jsTestLog('Enable replication on the SECONDARY ' + secondary.host);
- restartServerReplication(secondary);
+jsTestLog('Enable replication on the SECONDARY ' + secondary.host);
+restartServerReplication(secondary);
- jsTestLog("Wait for PRIMARY " + primary.host + " to completely step down.");
- replSet.waitForState(primary, ReplSetTest.State.SECONDARY);
- var exitCode = stepDowner();
+jsTestLog("Wait for PRIMARY " + primary.host + " to completely step down.");
+replSet.waitForState(primary, ReplSetTest.State.SECONDARY);
+var exitCode = stepDowner();
- jsTestLog("Wait for SECONDARY " + secondary.host + " to become PRIMARY");
- replSet.waitForState(secondary, ReplSetTest.State.PRIMARY);
- replSet.stopSet();
+jsTestLog("Wait for SECONDARY " + secondary.host + " to become PRIMARY");
+replSet.waitForState(secondary, ReplSetTest.State.PRIMARY);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/stepdown_needs_electable_secondary.js b/jstests/replsets/stepdown_needs_electable_secondary.js
index 799a2e69a9b..4d2124cc831 100644
--- a/jstests/replsets/stepdown_needs_electable_secondary.js
+++ b/jstests/replsets/stepdown_needs_electable_secondary.js
@@ -20,123 +20,117 @@
*
*/
(function() {
- 'use strict';
-
- load("jstests/libs/write_concern_util.js"); // for stopReplicationOnSecondaries,
- // restartServerReplication,
- // restartReplSetReplication
-
- var name = 'stepdown_needs_electable_secondary';
-
- var replTest = new ReplSetTest({name: name, nodes: 5});
- var nodes = replTest.nodeList();
-
- replTest.startSet();
- replTest.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2]},
- {"_id": 3, "host": nodes[3], "priority": 0}, // unelectable
- {"_id": 4, "host": nodes[4], "priority": 0} // unelectable
- ],
- "settings": {"chainingAllowed": false}
- });
-
- function assertStepDownFailsWithExceededTimeLimit(node) {
- assert.commandFailedWithCode(
- node.adminCommand({replSetStepDown: 5, secondaryCatchUpPeriodSecs: 5}),
- ErrorCodes.ExceededTimeLimit,
- "step down did not fail with 'ExceededTimeLimit'");
- }
-
- function assertStepDownSucceeds(node) {
- assert.commandWorked(
- node.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}));
- }
-
- var primary = replTest.getPrimary();
-
- jsTestLog("Blocking writes to all secondaries.");
- stopReplicationOnSecondaries(replTest);
-
- jsTestLog("Doing a write to primary.");
- var testDB = replTest.getPrimary().getDB('testdb');
- var coll = testDB.stepdown_needs_electable_secondary;
- var timeout = ReplSetTest.kDefaultTimeoutMS;
- assert.writeOK(
- coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 1, wtimeout: timeout}}));
-
- // Try to step down with only the primary caught up (1 node out of 5).
- // stepDown should fail.
- jsTestLog("Trying to step down primary with only 1 node out of 5 caught up.");
- assertStepDownFailsWithExceededTimeLimit(primary);
-
- // Get the two unelectable secondaries
- var secondaryB_unelectable = replTest.nodes[3];
- var secondaryC_unelectable = replTest.nodes[4];
-
- // Get an electable secondary
- var secondaryA_electable = replTest.getSecondaries().find(function(s) {
- var nodeId = replTest.getNodeId(s);
- return (nodeId !== 3 && nodeId !== 4); // nodes 3 and 4 are set to be unelectable
- });
-
- // Enable writes to Secondary B (unelectable). Await replication.
- // (2 out of 5 nodes caught up, 0 electable)
- // stepDown should fail due to no caught up majority.
- jsTestLog("Re-enabling writes to unelectable secondary: node #" +
- replTest.getNodeId(secondaryB_unelectable) + ", " + secondaryB_unelectable);
- restartServerReplication(secondaryB_unelectable);
-
- // Wait for this secondary to catch up by issuing a write that must be replicated to 2 nodes
- assert.writeOK(
- coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 2, wtimeout: timeout}}));
-
- // Try to step down and fail
- jsTestLog("Trying to step down primary with only 2 nodes out of 5 caught up.");
- assertStepDownFailsWithExceededTimeLimit(primary);
-
- // Enable writes to Secondary C (unelectable). Await replication.
- // (3 out of 5 nodes caught up, 0 electable)
- // stepDown should fail due to caught up majority without electable node.
- jsTestLog("Re-enabling writes to unelectable secondary: node #" +
- replTest.getNodeId(secondaryC_unelectable) + ", " + secondaryC_unelectable);
- restartServerReplication(secondaryC_unelectable);
-
- // Wait for this secondary to catch up by issuing a write that must be replicated to 3 nodes
- assert.writeOK(
- coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 3, wtimeout: timeout}}));
-
- // Try to step down and fail
- jsTestLog("Trying to step down primary with a caught up majority that " +
- "doesn't contain an electable node.");
- assertStepDownFailsWithExceededTimeLimit(primary);
-
- // Enable writes to Secondary A (electable). Await replication.
- // (4 out of 5 nodes caught up, 1 electable)
- // stepDown should succeed due to caught up majority containing an electable node.
- jsTestLog("Re-enabling writes to electable secondary: node #" +
- replTest.getNodeId(secondaryA_electable) + ", " + secondaryA_electable);
- restartServerReplication(secondaryA_electable);
-
- // Wait for this secondary to catch up by issuing a write that must be replicated to 4 nodes
- assert.writeOK(
- coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 4, wtimeout: timeout}}));
-
- // Try to step down. We expect success, so catch the exception thrown by 'replSetStepDown'.
- jsTestLog("Trying to step down primary with a caught up majority that " +
- "does contain an electable node.");
-
- assertStepDownSucceeds(primary);
-
- // Make sure that original primary has transitioned to SECONDARY state
- jsTestLog("Wait for PRIMARY " + primary.host + " to completely step down.");
- replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- // Disable all fail points for clean shutdown
- restartReplSetReplication(replTest);
- replTest.stopSet();
-
+'use strict';
+
+load("jstests/libs/write_concern_util.js"); // for stopReplicationOnSecondaries,
+ // restartServerReplication,
+ // restartReplSetReplication
+
+var name = 'stepdown_needs_electable_secondary';
+
+var replTest = new ReplSetTest({name: name, nodes: 5});
+var nodes = replTest.nodeList();
+
+replTest.startSet();
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2]},
+ {"_id": 3, "host": nodes[3], "priority": 0}, // unelectable
+ {"_id": 4, "host": nodes[4], "priority": 0} // unelectable
+ ],
+ "settings": {"chainingAllowed": false}
+});
+
+function assertStepDownFailsWithExceededTimeLimit(node) {
+ assert.commandFailedWithCode(
+ node.adminCommand({replSetStepDown: 5, secondaryCatchUpPeriodSecs: 5}),
+ ErrorCodes.ExceededTimeLimit,
+ "step down did not fail with 'ExceededTimeLimit'");
+}
+
+function assertStepDownSucceeds(node) {
+ assert.commandWorked(node.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}));
+}
+
+var primary = replTest.getPrimary();
+
+jsTestLog("Blocking writes to all secondaries.");
+stopReplicationOnSecondaries(replTest);
+
+jsTestLog("Doing a write to primary.");
+var testDB = replTest.getPrimary().getDB('testdb');
+var coll = testDB.stepdown_needs_electable_secondary;
+var timeout = ReplSetTest.kDefaultTimeoutMS;
+assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 1, wtimeout: timeout}}));
+
+// Try to step down with only the primary caught up (1 node out of 5).
+// stepDown should fail.
+jsTestLog("Trying to step down primary with only 1 node out of 5 caught up.");
+assertStepDownFailsWithExceededTimeLimit(primary);
+
+// Get the two unelectable secondaries
+var secondaryB_unelectable = replTest.nodes[3];
+var secondaryC_unelectable = replTest.nodes[4];
+
+// Get an electable secondary
+var secondaryA_electable = replTest.getSecondaries().find(function(s) {
+ var nodeId = replTest.getNodeId(s);
+ return (nodeId !== 3 && nodeId !== 4); // nodes 3 and 4 are set to be unelectable
+});
+
+// Enable writes to Secondary B (unelectable). Await replication.
+// (2 out of 5 nodes caught up, 0 electable)
+// stepDown should fail due to no caught up majority.
+jsTestLog("Re-enabling writes to unelectable secondary: node #" +
+ replTest.getNodeId(secondaryB_unelectable) + ", " + secondaryB_unelectable);
+restartServerReplication(secondaryB_unelectable);
+
+// Wait for this secondary to catch up by issuing a write that must be replicated to 2 nodes
+assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 2, wtimeout: timeout}}));
+
+// Try to step down and fail
+jsTestLog("Trying to step down primary with only 2 nodes out of 5 caught up.");
+assertStepDownFailsWithExceededTimeLimit(primary);
+
+// Enable writes to Secondary C (unelectable). Await replication.
+// (3 out of 5 nodes caught up, 0 electable)
+// stepDown should fail due to caught up majority without electable node.
+jsTestLog("Re-enabling writes to unelectable secondary: node #" +
+ replTest.getNodeId(secondaryC_unelectable) + ", " + secondaryC_unelectable);
+restartServerReplication(secondaryC_unelectable);
+
+// Wait for this secondary to catch up by issuing a write that must be replicated to 3 nodes
+assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 3, wtimeout: timeout}}));
+
+// Try to step down and fail
+jsTestLog("Trying to step down primary with a caught up majority that " +
+ "doesn't contain an electable node.");
+assertStepDownFailsWithExceededTimeLimit(primary);
+
+// Enable writes to Secondary A (electable). Await replication.
+// (4 out of 5 nodes caught up, 1 electable)
+// stepDown should succeed due to caught up majority containing an electable node.
+jsTestLog("Re-enabling writes to electable secondary: node #" +
+ replTest.getNodeId(secondaryA_electable) + ", " + secondaryA_electable);
+restartServerReplication(secondaryA_electable);
+
+// Wait for this secondary to catch up by issuing a write that must be replicated to 4 nodes
+assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 4, wtimeout: timeout}}));
+
+// Try to step down. We expect success, so catch the exception thrown by 'replSetStepDown'.
+jsTestLog("Trying to step down primary with a caught up majority that " +
+ "does contain an electable node.");
+
+assertStepDownSucceeds(primary);
+
+// Make sure that original primary has transitioned to SECONDARY state
+jsTestLog("Wait for PRIMARY " + primary.host + " to completely step down.");
+replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+// Disable all fail points for clean shutdown
+restartReplSetReplication(replTest);
+replTest.stopSet();
}());
diff --git a/jstests/replsets/stepdown_needs_majority.js b/jstests/replsets/stepdown_needs_majority.js
index efc874fde3a..cb465fb3f30 100644
--- a/jstests/replsets/stepdown_needs_majority.js
+++ b/jstests/replsets/stepdown_needs_majority.js
@@ -16,92 +16,90 @@
*
*/
(function() {
- 'use strict';
-
- load("jstests/libs/write_concern_util.js"); // for stopReplicationOnSecondaries, //
- // restartServerReplication,
- // restartReplSetReplication
-
- function assertStepDownFailsWithExceededTimeLimit(node) {
- assert.commandFailedWithCode(
- node.adminCommand({replSetStepDown: 5, secondaryCatchUpPeriodSecs: 5}),
- ErrorCodes.ExceededTimeLimit,
- "step down did not fail with 'ExceededTimeLimit'");
- }
-
- function assertStepDownSucceeds(node) {
- assert.commandWorked(
- node.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}));
- }
-
- function nodeIdStr(repltest, node) {
- return "node #" + repltest.getNodeId(node) + ", " + node.host;
- }
-
- //
- // Test setup
- //
- var name = 'stepdown_needs_majority';
- var replTest = new ReplSetTest({name: name, nodes: 5, settings: {chainingAllowed: false}});
-
- replTest.startSet();
- replTest.initiate();
-
- var primary = replTest.getPrimary();
- var testDB = primary.getDB('testdb');
- var coll = testDB[name];
- var dummy_doc = {"dummy_key": "dummy_val"};
- var timeout = ReplSetTest.kDefaultTimeoutMS;
-
- //
- // Block writes to all secondaries
- //
- jsTestLog("Blocking writes to all secondaries.");
- stopReplicationOnSecondaries(replTest);
-
- //
- // Write to the primary and attempt stepdown
- //
- jsTestLog("Issuing a write to the primary(" + primary.host + ") with write_concern:1");
- assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 1, wtimeout: timeout}}));
-
- jsTestLog("Trying to step down primary with only 1 node out of 5 caught up.");
- assertStepDownFailsWithExceededTimeLimit(primary);
-
- //
- // Re-enable writes to Secondary A and attempt stepdown
- //
- var secondaryA = replTest.getSecondaries()[0];
- jsTestLog("Reenabling writes to one secondary (" + nodeIdStr(replTest, secondaryA) + ")");
- restartServerReplication(secondaryA);
-
- jsTestLog("Issuing a write to the primary with write_concern:2");
- assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 2, wtimeout: timeout}}));
-
- jsTestLog("Trying to step down primary with only 2 nodes out of 5 caught up.");
- assertStepDownFailsWithExceededTimeLimit(primary);
-
- //
- // Re-enable writes to Secondary B and attempt stepdown
- //
- var secondaryB = replTest.getSecondaries()[1];
- jsTestLog("Reenabling writes to another secondary (" + nodeIdStr(replTest, secondaryB) + ")");
- restartServerReplication(secondaryB);
-
- jsTestLog("Issuing a write to the primary with write_concern:3");
- assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 3, wtimeout: timeout}}));
-
- jsTestLog("Trying to step down primary with 3 nodes out of 5 caught up.");
- assertStepDownSucceeds(primary);
-
- jsTestLog("Waiting for PRIMARY(" + primary.host + ") to step down & become SECONDARY.");
- replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- //
- // Disable failpoints and stop replica set
- //
- jsTestLog("Disabling all fail points to allow for clean shutdown");
- restartReplSetReplication(replTest);
- replTest.stopSet();
-
+'use strict';
+
+load("jstests/libs/write_concern_util.js"); // for stopReplicationOnSecondaries, //
+ // restartServerReplication,
+ // restartReplSetReplication
+
+function assertStepDownFailsWithExceededTimeLimit(node) {
+ assert.commandFailedWithCode(
+ node.adminCommand({replSetStepDown: 5, secondaryCatchUpPeriodSecs: 5}),
+ ErrorCodes.ExceededTimeLimit,
+ "step down did not fail with 'ExceededTimeLimit'");
+}
+
+function assertStepDownSucceeds(node) {
+ assert.commandWorked(node.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}));
+}
+
+function nodeIdStr(repltest, node) {
+ return "node #" + repltest.getNodeId(node) + ", " + node.host;
+}
+
+//
+// Test setup
+//
+var name = 'stepdown_needs_majority';
+var replTest = new ReplSetTest({name: name, nodes: 5, settings: {chainingAllowed: false}});
+
+replTest.startSet();
+replTest.initiate();
+
+var primary = replTest.getPrimary();
+var testDB = primary.getDB('testdb');
+var coll = testDB[name];
+var dummy_doc = {"dummy_key": "dummy_val"};
+var timeout = ReplSetTest.kDefaultTimeoutMS;
+
+//
+// Block writes to all secondaries
+//
+jsTestLog("Blocking writes to all secondaries.");
+stopReplicationOnSecondaries(replTest);
+
+//
+// Write to the primary and attempt stepdown
+//
+jsTestLog("Issuing a write to the primary(" + primary.host + ") with write_concern:1");
+assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 1, wtimeout: timeout}}));
+
+jsTestLog("Trying to step down primary with only 1 node out of 5 caught up.");
+assertStepDownFailsWithExceededTimeLimit(primary);
+
+//
+// Re-enable writes to Secondary A and attempt stepdown
+//
+var secondaryA = replTest.getSecondaries()[0];
+jsTestLog("Reenabling writes to one secondary (" + nodeIdStr(replTest, secondaryA) + ")");
+restartServerReplication(secondaryA);
+
+jsTestLog("Issuing a write to the primary with write_concern:2");
+assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 2, wtimeout: timeout}}));
+
+jsTestLog("Trying to step down primary with only 2 nodes out of 5 caught up.");
+assertStepDownFailsWithExceededTimeLimit(primary);
+
+//
+// Re-enable writes to Secondary B and attempt stepdown
+//
+var secondaryB = replTest.getSecondaries()[1];
+jsTestLog("Reenabling writes to another secondary (" + nodeIdStr(replTest, secondaryB) + ")");
+restartServerReplication(secondaryB);
+
+jsTestLog("Issuing a write to the primary with write_concern:3");
+assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 3, wtimeout: timeout}}));
+
+jsTestLog("Trying to step down primary with 3 nodes out of 5 caught up.");
+assertStepDownSucceeds(primary);
+
+jsTestLog("Waiting for PRIMARY(" + primary.host + ") to step down & become SECONDARY.");
+replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+//
+// Disable failpoints and stop replica set
+//
+jsTestLog("Disabling all fail points to allow for clean shutdown");
+restartReplSetReplication(replTest);
+replTest.stopSet();
}());
diff --git a/jstests/replsets/stepup.js b/jstests/replsets/stepup.js
index 65751b67c41..51bf1a7f54f 100644
--- a/jstests/replsets/stepup.js
+++ b/jstests/replsets/stepup.js
@@ -4,44 +4,44 @@ load("jstests/replsets/rslib.js");
(function() {
- "use strict";
- var name = "stepup";
- var rst = new ReplSetTest({name: name, nodes: 2});
-
- rst.startSet();
- rst.initiate();
- rst.awaitReplication();
-
- var primary = rst.getPrimary();
- var secondary = rst.getSecondary();
-
- // Step up the primary. Return OK because it's already the primary.
- var res = primary.adminCommand({replSetStepUp: 1});
- assert.commandWorked(res);
- assert.eq(primary, rst.getPrimary());
-
- // Step up the secondary, but it's not eligible to be primary.
- // Enable fail point on secondary.
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
-
- assert.writeOK(primary.getDB("test").bar.insert({x: 2}, {writeConcern: {w: 1}}));
- res = secondary.adminCommand({replSetStepUp: 1});
- assert.commandFailedWithCode(res, ErrorCodes.CommandFailed);
- assert.commandWorked(
- secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
-
- // Wait for the secondary to catch up by replicating a doc to both nodes.
- assert.writeOK(primary.getDB("test").bar.insert({x: 3}, {writeConcern: {w: "majority"}}));
-
- // Step up the secondary. Retry since the old primary may step down when we try to ask for its
- // vote.
- assert.soonNoExcept(function() {
- return secondary.adminCommand({replSetStepUp: 1}).ok;
- });
-
- // Make sure the step up succeeded.
- assert.eq(secondary, rst.getPrimary());
-
- rst.stopSet();
+"use strict";
+var name = "stepup";
+var rst = new ReplSetTest({name: name, nodes: 2});
+
+rst.startSet();
+rst.initiate();
+rst.awaitReplication();
+
+var primary = rst.getPrimary();
+var secondary = rst.getSecondary();
+
+// Step up the primary. Return OK because it's already the primary.
+var res = primary.adminCommand({replSetStepUp: 1});
+assert.commandWorked(res);
+assert.eq(primary, rst.getPrimary());
+
+// Step up the secondary, but it's not eligible to be primary.
+// Enable fail point on secondary.
+assert.commandWorked(
+ secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+
+assert.writeOK(primary.getDB("test").bar.insert({x: 2}, {writeConcern: {w: 1}}));
+res = secondary.adminCommand({replSetStepUp: 1});
+assert.commandFailedWithCode(res, ErrorCodes.CommandFailed);
+assert.commandWorked(
+ secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+
+// Wait for the secondary to catch up by replicating a doc to both nodes.
+assert.writeOK(primary.getDB("test").bar.insert({x: 3}, {writeConcern: {w: "majority"}}));
+
+// Step up the secondary. Retry since the old primary may step down when we try to ask for its
+// vote.
+assert.soonNoExcept(function() {
+ return secondary.adminCommand({replSetStepUp: 1}).ok;
+});
+
+// Make sure the step up succeeded.
+assert.eq(secondary, rst.getPrimary());
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/storage_commit_out_of_order.js b/jstests/replsets/storage_commit_out_of_order.js
index f6a65ebae97..7d96ae4c235 100644
--- a/jstests/replsets/storage_commit_out_of_order.js
+++ b/jstests/replsets/storage_commit_out_of_order.js
@@ -8,67 +8,67 @@
* is released after a few seconds and asserts that its write concern can be satisfied.
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/libs/parallelTester.js');
+load('jstests/libs/parallelTester.js');
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const dbName = 'storage_commit_out_of_order';
- const collName = 'foo';
- const numThreads = 2;
- const primary = rst.getPrimary();
- const coll = primary.getDB(dbName).getCollection(collName);
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const dbName = 'storage_commit_out_of_order';
+const collName = 'foo';
+const numThreads = 2;
+const primary = rst.getPrimary();
+const coll = primary.getDB(dbName).getCollection(collName);
- /**
- * Waits for the provided latch to reach 0 and then does a single w:majority insert.
- */
- const majorityInsert = function(num, host, dbName, collName, latch) {
- const m = new Mongo(host);
- latch.countDown();
- while (latch.getCount() > 0) {
- // do nothing
- }
- return m.getDB(dbName).runCommand({
- insert: collName,
- documents: [{b: num}],
- writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}
- });
- };
+/**
+ * Waits for the provided latch to reach 0 and then does a single w:majority insert.
+ */
+const majorityInsert = function(num, host, dbName, collName, latch) {
+ const m = new Mongo(host);
+ latch.countDown();
+ while (latch.getCount() > 0) {
+ // do nothing
+ }
+ return m.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{b: num}],
+ writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}
+ });
+};
- assert.commandWorked(primary.setLogLevel(2, 'replication'));
- assert.commandWorked(coll.insert(
- {a: 1}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+assert.commandWorked(primary.setLogLevel(2, 'replication'));
+assert.commandWorked(
+ coll.insert({a: 1}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- // Turn on a fail point to force the first thread to receive an optime from the optime
- // generator to wait a few seconds before storage-committing the insert.
- assert.commandWorked(primary.adminCommand({
- configureFailPoint: 'sleepBetweenInsertOpTimeGenerationAndLogOp',
- mode: {times: 1},
- data: {waitForMillis: 3000}
- }));
+// Turn on a fail point to force the first thread to receive an optime from the optime
+// generator to wait a few seconds before storage-committing the insert.
+assert.commandWorked(primary.adminCommand({
+ configureFailPoint: 'sleepBetweenInsertOpTimeGenerationAndLogOp',
+ mode: {times: 1},
+ data: {waitForMillis: 3000}
+}));
- // Start a bunch of threads. They will block waiting on the latch to hit 0.
- const t = [];
- const counter = new CountDownLatch(numThreads + 1);
- for (let i = 0; i < numThreads; ++i) {
- t[i] = new ScopedThread(majorityInsert, i, coll.getMongo().host, dbName, collName, counter);
- t[i].start();
- }
+// Start a bunch of threads. They will block waiting on the latch to hit 0.
+const t = [];
+const counter = new CountDownLatch(numThreads + 1);
+for (let i = 0; i < numThreads; ++i) {
+ t[i] = new ScopedThread(majorityInsert, i, coll.getMongo().host, dbName, collName, counter);
+ t[i].start();
+}
- // Release the threads with the latch once they are all blocked on it.
- jsTestLog('All threads started.');
- assert.soon(() => counter.getCount() === 1);
- jsTestLog('All threads at barrier.');
- counter.countDown();
- jsTestLog('All threads finishing.');
+// Release the threads with the latch once they are all blocked on it.
+jsTestLog('All threads started.');
+assert.soon(() => counter.getCount() === 1);
+jsTestLog('All threads at barrier.');
+counter.countDown();
+jsTestLog('All threads finishing.');
- // Wait for all threads to complete and ensure they succeeded.
- for (let i = 0; i < numThreads; ++i) {
- t[i].join();
- assert.commandWorked(t[i].returnData());
- }
+// Wait for all threads to complete and ensure they succeeded.
+for (let i = 0; i < numThreads; ++i) {
+ t[i].join();
+ assert.commandWorked(t[i].returnData());
+}
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/sync2.js b/jstests/replsets/sync2.js
index 79a1b48fa68..4a1053b04bb 100644
--- a/jstests/replsets/sync2.js
+++ b/jstests/replsets/sync2.js
@@ -2,50 +2,50 @@
// are disconnected from their current sync source.
(function() {
- 'use strict';
-
- var replTest = new ReplSetTest({
- name: 'sync2',
- nodes: [{rsConfig: {priority: 5}}, {arbiter: true}, {}, {}, {}],
- useBridge: true
- });
- var conns = replTest.startSet();
- replTest.initiate();
-
- var master = replTest.getPrimary();
- jsTestLog("Replica set test initialized");
-
- master.getDB("foo").bar.insert({x: 1});
- replTest.awaitReplication();
-
- conns[0].disconnect(conns[4]);
- conns[1].disconnect(conns[2]);
- conns[2].disconnect(conns[3]);
- conns[3].disconnect(conns[1]);
-
- // 4 is connected to 2
- conns[4].disconnect(conns[1]);
- conns[4].disconnect(conns[3]);
-
- assert.soon(function() {
- master = replTest.getPrimary();
- return master === conns[0];
- }, "node 0 should become primary before timeout", replTest.kDefaultTimeoutMS);
-
- replTest.awaitReplication();
- jsTestLog("Checking that ops still replicate correctly");
- var option = {writeConcern: {w: conns.length - 1, wtimeout: replTest.kDefaultTimeoutMS}};
- // In PV0, this write can fail as a result of a bad spanning tree. If 2 was syncing from 4 prior
- // to bridging, it will not change sync sources and receive the write in time. This was not a
- // problem in 3.0 because the old version of mongobridge caused all the nodes to restart during
- // partitioning, forcing the set to rebuild the spanning tree.
- assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option));
-
- // 4 is connected to 3
- conns[4].disconnect(conns[2]);
- conns[4].reconnect(conns[3]);
-
- assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option));
-
- replTest.stopSet();
+'use strict';
+
+var replTest = new ReplSetTest({
+ name: 'sync2',
+ nodes: [{rsConfig: {priority: 5}}, {arbiter: true}, {}, {}, {}],
+ useBridge: true
+});
+var conns = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+jsTestLog("Replica set test initialized");
+
+master.getDB("foo").bar.insert({x: 1});
+replTest.awaitReplication();
+
+conns[0].disconnect(conns[4]);
+conns[1].disconnect(conns[2]);
+conns[2].disconnect(conns[3]);
+conns[3].disconnect(conns[1]);
+
+// 4 is connected to 2
+conns[4].disconnect(conns[1]);
+conns[4].disconnect(conns[3]);
+
+assert.soon(function() {
+ master = replTest.getPrimary();
+ return master === conns[0];
+}, "node 0 should become primary before timeout", replTest.kDefaultTimeoutMS);
+
+replTest.awaitReplication();
+jsTestLog("Checking that ops still replicate correctly");
+var option = {writeConcern: {w: conns.length - 1, wtimeout: replTest.kDefaultTimeoutMS}};
+// In PV0, this write can fail as a result of a bad spanning tree. If 2 was syncing from 4 prior
+// to bridging, it will not change sync sources and receive the write in time. This was not a
+// problem in 3.0 because the old version of mongobridge caused all the nodes to restart during
+// partitioning, forcing the set to rebuild the spanning tree.
+assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option));
+
+// 4 is connected to 3
+conns[4].disconnect(conns[2]);
+conns[4].reconnect(conns[3]);
+
+assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option));
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/system_profile.js b/jstests/replsets/system_profile.js
index 7e1250da6dd..4e525d4fc93 100644
--- a/jstests/replsets/system_profile.js
+++ b/jstests/replsets/system_profile.js
@@ -2,48 +2,47 @@
// to the secondary.
(function() {
- "use strict";
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
- rst.awaitReplication();
+"use strict";
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+rst.awaitReplication();
- // filter out noop writes
- var getLatestOp = function() {
- return primaryDB.getSiblingDB('local')
- .oplog.rs.find({op: {$ne: 'n'}})
- .sort({$natural: -1})
- .limit(1)
- .next();
- };
+// filter out noop writes
+var getLatestOp = function() {
+ return primaryDB.getSiblingDB('local')
+ .oplog.rs.find({op: {$ne: 'n'}})
+ .sort({$natural: -1})
+ .limit(1)
+ .next();
+};
- var primaryDB = rst.getPrimary().getDB('test');
- assert.writeOK(primaryDB.foo.insert({}));
- var op = getLatestOp();
+var primaryDB = rst.getPrimary().getDB('test');
+assert.writeOK(primaryDB.foo.insert({}));
+var op = getLatestOp();
- // Enable profiling on the primary
- assert.commandWorked(primaryDB.runCommand({profile: 2}));
- assert.eq(op, getLatestOp(), "oplog entry created when profile was enabled");
- assert.writeOK(primaryDB.foo.insert({}));
- op = getLatestOp();
- assert.commandWorked(primaryDB.runCommand({profile: 0}));
- assert.eq(op, getLatestOp(), "oplog entry created when profile was disabled");
+// Enable profiling on the primary
+assert.commandWorked(primaryDB.runCommand({profile: 2}));
+assert.eq(op, getLatestOp(), "oplog entry created when profile was enabled");
+assert.writeOK(primaryDB.foo.insert({}));
+op = getLatestOp();
+assert.commandWorked(primaryDB.runCommand({profile: 0}));
+assert.eq(op, getLatestOp(), "oplog entry created when profile was disabled");
- // dropCollection
- assert(primaryDB.system.profile.drop());
- assert.eq(op, getLatestOp(), "oplog entry created when system.profile was dropped");
+// dropCollection
+assert(primaryDB.system.profile.drop());
+assert.eq(op, getLatestOp(), "oplog entry created when system.profile was dropped");
- assert.commandWorked(primaryDB.createCollection("system.profile", {capped: true, size: 1000}));
- assert.eq(op, getLatestOp(), "oplog entry created when system.profile was created");
- assert.commandWorked(primaryDB.runCommand({profile: 2}));
- assert.writeOK(primaryDB.foo.insert({}));
- op = getLatestOp();
- assert.commandWorked(primaryDB.runCommand({profile: 0}));
+assert.commandWorked(primaryDB.createCollection("system.profile", {capped: true, size: 1000}));
+assert.eq(op, getLatestOp(), "oplog entry created when system.profile was created");
+assert.commandWorked(primaryDB.runCommand({profile: 2}));
+assert.writeOK(primaryDB.foo.insert({}));
+op = getLatestOp();
+assert.commandWorked(primaryDB.runCommand({profile: 0}));
- // emptycapped the collection
- assert.commandWorked(primaryDB.runCommand({emptycapped: "system.profile"}));
- assert.eq(
- op, getLatestOp(), "oplog entry created when system.profile was emptied via emptycapped");
- assert(primaryDB.system.profile.drop());
- rst.stopSet();
+// emptycapped the collection
+assert.commandWorked(primaryDB.runCommand({emptycapped: "system.profile"}));
+assert.eq(op, getLatestOp(), "oplog entry created when system.profile was emptied via emptycapped");
+assert(primaryDB.system.profile.drop());
+rst.stopSet();
})();
diff --git a/jstests/replsets/system_profile_secondary.js b/jstests/replsets/system_profile_secondary.js
index 7c62e126323..954ec0bf523 100644
--- a/jstests/replsets/system_profile_secondary.js
+++ b/jstests/replsets/system_profile_secondary.js
@@ -1,26 +1,24 @@
// This tests that we can successfully profile queries on secondaries.
(function() {
- 'use strict';
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
- rst.awaitReplication();
+'use strict';
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+rst.awaitReplication();
- var secondaryDB = rst.getSecondary().getDB('test');
+var secondaryDB = rst.getSecondary().getDB('test');
- jsTestLog('Enable profiling on the secondary');
- assert.commandWorked(secondaryDB.runCommand({profile: 2}));
+jsTestLog('Enable profiling on the secondary');
+assert.commandWorked(secondaryDB.runCommand({profile: 2}));
- jsTestLog('Perform a query that returns no results, but will get profiled.');
- secondaryDB.doesntexist.find({}).itcount();
+jsTestLog('Perform a query that returns no results, but will get profiled.');
+secondaryDB.doesntexist.find({}).itcount();
- let numProfileEntries = (coll) =>
- coll.getDB()
- .system.profile.find({op: 'query', ns: coll.getFullName(), nreturned: 0})
- .itcount();
+let numProfileEntries = (coll) =>
+ coll.getDB().system.profile.find({op: 'query', ns: coll.getFullName(), nreturned: 0}).itcount();
- jsTestLog('Check the query is in the profile and turn profiling off.');
- assert.eq(numProfileEntries(secondaryDB.doesntexist), 1, 'expected a single profile entry');
- assert.commandWorked(secondaryDB.runCommand({profile: 0}));
- rst.stopSet();
+jsTestLog('Check the query is in the profile and turn profiling off.');
+assert.eq(numProfileEntries(secondaryDB.doesntexist), 1, 'expected a single profile entry');
+assert.commandWorked(secondaryDB.runCommand({profile: 0}));
+rst.stopSet();
})();
diff --git a/jstests/replsets/tags.js b/jstests/replsets/tags.js
index 78ab1e50588..1c753988af8 100644
--- a/jstests/replsets/tags.js
+++ b/jstests/replsets/tags.js
@@ -1,8 +1,8 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/tags.js");
+load("jstests/replsets/libs/tags.js");
- let nodes = [{}, {}, {}, {}, {}];
- new TagsTest({nodes: nodes}).run();
+let nodes = [{}, {}, {}, {}, {}];
+new TagsTest({nodes: nodes}).run();
}());
diff --git a/jstests/replsets/tags2.js b/jstests/replsets/tags2.js
index 7ee2fe81031..361b6204c08 100644
--- a/jstests/replsets/tags2.js
+++ b/jstests/replsets/tags2.js
@@ -1,60 +1,60 @@
// Change a write concern mode from 2 to 3 servers
(function() {
- "use strict";
-
- var host = getHostName();
- var replTest = new ReplSetTest({nodes: 4});
- var nodes = replTest.startSet();
- var ports = replTest.ports;
- var conf = {
- _id: replTest.name,
- members: [
- {_id: 0, host: host + ":" + ports[0], tags: {"backup": "A"}},
- {_id: 1, host: host + ":" + ports[1], tags: {"backup": "B"}},
- {_id: 2, host: host + ":" + ports[2], tags: {"backup": "C"}},
- {_id: 3, host: host + ":" + ports[3], tags: {"backup": "D"}, arbiterOnly: true}
- ],
- settings: {getLastErrorModes: {backedUp: {backup: 2}}}
- };
-
- print("arbiters can't have tags");
- var result = nodes[0].getDB("admin").runCommand({replSetInitiate: conf});
- printjson(result);
- assert.eq(result.ok, 0);
-
- conf.members.pop();
- replTest.stop(3);
- replTest.remove(3);
- replTest.initiate(conf);
-
- replTest.awaitReplication();
-
- var master = replTest.getPrimary();
- var db = master.getDB("test");
- var wtimeout = ReplSetTest.kDefaultTimeoutMS;
-
- assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
-
- var nextVersion = replTest.getReplSetConfigFromNode().version + 1;
- conf.version = nextVersion;
- conf.settings.getLastErrorModes.backedUp.backup = 3;
- master.getDB("admin").runCommand({replSetReconfig: conf});
- replTest.awaitReplication();
-
- master = replTest.getPrimary();
- var db = master.getDB("test");
- assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
-
- nextVersion++;
- conf.version = nextVersion;
- conf.members[0].priorty = 3;
- conf.members[2].priorty = 0;
- master.getDB("admin").runCommand({replSetReconfig: conf});
-
- master = replTest.getPrimary();
- var db = master.getDB("test");
- assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
-
- replTest.stopSet();
+"use strict";
+
+var host = getHostName();
+var replTest = new ReplSetTest({nodes: 4});
+var nodes = replTest.startSet();
+var ports = replTest.ports;
+var conf = {
+ _id: replTest.name,
+ members: [
+ {_id: 0, host: host + ":" + ports[0], tags: {"backup": "A"}},
+ {_id: 1, host: host + ":" + ports[1], tags: {"backup": "B"}},
+ {_id: 2, host: host + ":" + ports[2], tags: {"backup": "C"}},
+ {_id: 3, host: host + ":" + ports[3], tags: {"backup": "D"}, arbiterOnly: true}
+ ],
+ settings: {getLastErrorModes: {backedUp: {backup: 2}}}
+};
+
+print("arbiters can't have tags");
+var result = nodes[0].getDB("admin").runCommand({replSetInitiate: conf});
+printjson(result);
+assert.eq(result.ok, 0);
+
+conf.members.pop();
+replTest.stop(3);
+replTest.remove(3);
+replTest.initiate(conf);
+
+replTest.awaitReplication();
+
+var master = replTest.getPrimary();
+var db = master.getDB("test");
+var wtimeout = ReplSetTest.kDefaultTimeoutMS;
+
+assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
+
+var nextVersion = replTest.getReplSetConfigFromNode().version + 1;
+conf.version = nextVersion;
+conf.settings.getLastErrorModes.backedUp.backup = 3;
+master.getDB("admin").runCommand({replSetReconfig: conf});
+replTest.awaitReplication();
+
+master = replTest.getPrimary();
+var db = master.getDB("test");
+assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
+
+nextVersion++;
+conf.version = nextVersion;
+conf.members[0].priorty = 3;
+conf.members[2].priorty = 0;
+master.getDB("admin").runCommand({replSetReconfig: conf});
+
+master = replTest.getPrimary();
+var db = master.getDB("test");
+assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/tags_with_reconfig.js b/jstests/replsets/tags_with_reconfig.js
index d942a8e54d9..6f28faf300a 100644
--- a/jstests/replsets/tags_with_reconfig.js
+++ b/jstests/replsets/tags_with_reconfig.js
@@ -4,70 +4,70 @@
// reported their progress to a primary.
(function() {
- "use strict";
+"use strict";
- // Start a replica set with 3 nodes
- var host = getHostName();
- var replTest = new ReplSetTest({nodes: 3});
- var nodes = replTest.startSet();
- var ports = replTest.ports;
+// Start a replica set with 3 nodes
+var host = getHostName();
+var replTest = new ReplSetTest({nodes: 3});
+var nodes = replTest.startSet();
+var ports = replTest.ports;
- // Set tags and getLastErrorModes
- var conf = {
- _id: replTest.name,
- members: [
- {_id: 0, host: host + ":" + ports[0], tags: {"dc": "bbb"}},
- {_id: 1, host: host + ":" + ports[1], tags: {"dc": "bbb"}},
- {_id: 2, host: host + ":" + ports[2], tags: {"dc": "ccc"}}
- ],
- settings: {
- getLastErrorModes: {
- anydc: {dc: 1},
- alldc: {dc: 2},
- }
+// Set tags and getLastErrorModes
+var conf = {
+ _id: replTest.name,
+ members: [
+ {_id: 0, host: host + ":" + ports[0], tags: {"dc": "bbb"}},
+ {_id: 1, host: host + ":" + ports[1], tags: {"dc": "bbb"}},
+ {_id: 2, host: host + ":" + ports[2], tags: {"dc": "ccc"}}
+ ],
+ settings: {
+ getLastErrorModes: {
+ anydc: {dc: 1},
+ alldc: {dc: 2},
}
- };
+ }
+};
- replTest.initiate(conf);
- replTest.awaitReplication();
+replTest.initiate(conf);
+replTest.awaitReplication();
- var wtimeout = ReplSetTest.kDefaultTimeoutMS;
- var master = replTest.getPrimary();
- var db = master.getDB("test");
+var wtimeout = ReplSetTest.kDefaultTimeoutMS;
+var master = replTest.getPrimary();
+var db = master.getDB("test");
- // Insert a document with write concern : anydc
- assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}}));
+// Insert a document with write concern : anydc
+assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}}));
- // Insert a document with write concern : alldc
- assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}}));
+// Insert a document with write concern : alldc
+assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}}));
- // Add a new tag to the replica set
- var config = master.getDB("local").system.replset.findOne();
- printjson(config);
- var modes = config.settings.getLastErrorModes;
- config.version++;
- config.members[0].tags.newtag = "newtag";
+// Add a new tag to the replica set
+var config = master.getDB("local").system.replset.findOne();
+printjson(config);
+var modes = config.settings.getLastErrorModes;
+config.version++;
+config.members[0].tags.newtag = "newtag";
- try {
- master.getDB("admin").runCommand({replSetReconfig: config});
- } catch (e) {
- print(e);
- }
+try {
+ master.getDB("admin").runCommand({replSetReconfig: config});
+} catch (e) {
+ print(e);
+}
- replTest.awaitReplication();
+replTest.awaitReplication();
- // Print the new config for replica set
- var config = master.getDB("local").system.replset.findOne();
- printjson(config);
+// Print the new config for replica set
+var config = master.getDB("local").system.replset.findOne();
+printjson(config);
- master = replTest.getPrimary();
- var db = master.getDB("test");
+master = replTest.getPrimary();
+var db = master.getDB("test");
- // Insert a document with write concern : anydc
- assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}}));
+// Insert a document with write concern : anydc
+assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}}));
- // Insert a document with write concern : alldc
- assert.writeOK(db.foo.insert({x: 4}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}}));
+// Insert a document with write concern : alldc
+assert.writeOK(db.foo.insert({x: 4}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}}));
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/temp_namespace_restart_as_standalone.js b/jstests/replsets/temp_namespace_restart_as_standalone.js
index 89179d35428..e5061629c82 100644
--- a/jstests/replsets/temp_namespace_restart_as_standalone.js
+++ b/jstests/replsets/temp_namespace_restart_as_standalone.js
@@ -5,100 +5,99 @@
* @tags: [requires_persistence, requires_majority_read_concern, requires_replication]
*/
(function() {
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet();
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet();
- // Rig the election so that the first node becomes the primary and remains primary despite the
- // secondary being terminated during this test.
- var replSetConfig = rst.getReplSetConfig();
- replSetConfig.members[1].priority = 0;
- replSetConfig.members[1].votes = 0;
- rst.initiate(replSetConfig);
+// Rig the election so that the first node becomes the primary and remains primary despite the
+// secondary being terminated during this test.
+var replSetConfig = rst.getReplSetConfig();
+replSetConfig.members[1].priority = 0;
+replSetConfig.members[1].votes = 0;
+rst.initiate(replSetConfig);
- var primaryConn = rst.getPrimary();
- var secondaryConn = rst.getSecondary();
+var primaryConn = rst.getPrimary();
+var secondaryConn = rst.getSecondary();
- var primaryDB = primaryConn.getDB("test");
- var secondaryDB = secondaryConn.getDB("test");
+var primaryDB = primaryConn.getDB("test");
+var secondaryDB = secondaryConn.getDB("test");
- // Create a temporary collection and wait until the operation has replicated to the secondary.
- assert.commandWorked(primaryDB.runCommand({
- applyOps: [{
- op: "c",
- ns: primaryDB.getName() + ".$cmd",
- o: {
- create: "temp_collection",
- temp: true,
- writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}
- }
- }]
- }));
+// Create a temporary collection and wait until the operation has replicated to the secondary.
+assert.commandWorked(primaryDB.runCommand({
+ applyOps: [{
+ op: "c",
+ ns: primaryDB.getName() + ".$cmd",
+ o: {
+ create: "temp_collection",
+ temp: true,
+ writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}
+ }
+ }]
+}));
- rst.awaitReplication();
+rst.awaitReplication();
- // Verify that the temporary collection exists on the primary and has temp=true.
- var primaryCollectionInfos = primaryDB.getCollectionInfos({name: "temp_collection"});
- assert.eq(1, primaryCollectionInfos.length, "'temp_collection' wasn't created on the primary");
- assert.eq("temp_collection",
- primaryCollectionInfos[0].name,
- "'temp_collection' wasn't created on the primary");
- assert.eq(true,
- primaryCollectionInfos[0].options.temp,
- "'temp_collection' wasn't created as temporary on the primary: " +
- tojson(primaryCollectionInfos[0].options));
+// Verify that the temporary collection exists on the primary and has temp=true.
+var primaryCollectionInfos = primaryDB.getCollectionInfos({name: "temp_collection"});
+assert.eq(1, primaryCollectionInfos.length, "'temp_collection' wasn't created on the primary");
+assert.eq("temp_collection",
+ primaryCollectionInfos[0].name,
+ "'temp_collection' wasn't created on the primary");
+assert.eq(true,
+ primaryCollectionInfos[0].options.temp,
+ "'temp_collection' wasn't created as temporary on the primary: " +
+ tojson(primaryCollectionInfos[0].options));
- // Verify that the temporary collection exists on the secondary and has temp=true.
- var secondaryCollectionInfos = secondaryDB.getCollectionInfos({name: "temp_collection"});
- assert.eq(
- 1, secondaryCollectionInfos.length, "'temp_collection' wasn't created on the secondary");
- assert.eq("temp_collection",
- secondaryCollectionInfos[0].name,
- "'temp_collection' wasn't created on the secondary");
- assert.eq(true,
- secondaryCollectionInfos[0].options.temp,
- "'temp_collection' wasn't created as temporary on the secondary: " +
- tojson(secondaryCollectionInfos[0].options));
+// Verify that the temporary collection exists on the secondary and has temp=true.
+var secondaryCollectionInfos = secondaryDB.getCollectionInfos({name: "temp_collection"});
+assert.eq(1, secondaryCollectionInfos.length, "'temp_collection' wasn't created on the secondary");
+assert.eq("temp_collection",
+ secondaryCollectionInfos[0].name,
+ "'temp_collection' wasn't created on the secondary");
+assert.eq(true,
+ secondaryCollectionInfos[0].options.temp,
+ "'temp_collection' wasn't created as temporary on the secondary: " +
+ tojson(secondaryCollectionInfos[0].options));
- // Shut down the secondary and restart it as a stand-alone mongod.
- var secondaryNodeId = rst.getNodeId(secondaryDB.getMongo());
- rst.stop(secondaryNodeId);
+// Shut down the secondary and restart it as a stand-alone mongod.
+var secondaryNodeId = rst.getNodeId(secondaryDB.getMongo());
+rst.stop(secondaryNodeId);
- var storageEngine = jsTest.options().storageEngine || "wiredTiger";
- if (storageEngine === "wiredTiger") {
- secondaryConn = MongoRunner.runMongod({
- dbpath: secondaryConn.dbpath,
- noCleanData: true,
- setParameter: {recoverFromOplogAsStandalone: true}
- });
- } else {
- secondaryConn = MongoRunner.runMongod({dbpath: secondaryConn.dbpath, noCleanData: true});
- }
- assert.neq(null, secondaryConn, "secondary failed to start up as a stand-alone mongod");
- secondaryDB = secondaryConn.getDB("test");
+var storageEngine = jsTest.options().storageEngine || "wiredTiger";
+if (storageEngine === "wiredTiger") {
+ secondaryConn = MongoRunner.runMongod({
+ dbpath: secondaryConn.dbpath,
+ noCleanData: true,
+ setParameter: {recoverFromOplogAsStandalone: true}
+ });
+} else {
+ secondaryConn = MongoRunner.runMongod({dbpath: secondaryConn.dbpath, noCleanData: true});
+}
+assert.neq(null, secondaryConn, "secondary failed to start up as a stand-alone mongod");
+secondaryDB = secondaryConn.getDB("test");
- // Verify that the temporary collection still exists on the secondary and has temp=true.
- secondaryCollectionInfos = secondaryDB.getCollectionInfos({name: "temp_collection"});
- assert.eq(1,
- secondaryCollectionInfos.length,
- "'temp_collection' was dropped after restarting the secondary as a stand-alone");
- assert.eq("temp_collection",
- secondaryCollectionInfos[0].name,
- "'temp_collection' was dropped after restarting the secondary as a stand-alone");
- assert.eq(true,
- secondaryCollectionInfos[0].options.temp,
- "'temp_collection' is no longer temporary after restarting the secondary as a" +
- " stand-alone: " + tojson(secondaryCollectionInfos[0].options));
+// Verify that the temporary collection still exists on the secondary and has temp=true.
+secondaryCollectionInfos = secondaryDB.getCollectionInfos({name: "temp_collection"});
+assert.eq(1,
+ secondaryCollectionInfos.length,
+ "'temp_collection' was dropped after restarting the secondary as a stand-alone");
+assert.eq("temp_collection",
+ secondaryCollectionInfos[0].name,
+ "'temp_collection' was dropped after restarting the secondary as a stand-alone");
+assert.eq(true,
+ secondaryCollectionInfos[0].options.temp,
+ "'temp_collection' is no longer temporary after restarting the secondary as a" +
+ " stand-alone: " + tojson(secondaryCollectionInfos[0].options));
- // Shut down the secondary and restart it as a member of the replica set.
- MongoRunner.stopMongod(secondaryConn);
+// Shut down the secondary and restart it as a member of the replica set.
+MongoRunner.stopMongod(secondaryConn);
- var restart = true;
- rst.start(secondaryNodeId, {}, restart);
+var restart = true;
+rst.start(secondaryNodeId, {}, restart);
- // Verify that writes are replicated to the temporary collection and can successfully be applied
- // by the secondary after having restarted it.
- assert.writeOK(primaryDB.temp_collection.insert(
- {}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+// Verify that writes are replicated to the temporary collection and can successfully be applied
+// by the secondary after having restarted it.
+assert.writeOK(primaryDB.temp_collection.insert(
+ {}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/test_command.js b/jstests/replsets/test_command.js
index cc4ef0f0d0f..a8228464cef 100644
--- a/jstests/replsets/test_command.js
+++ b/jstests/replsets/test_command.js
@@ -3,143 +3,140 @@
// waitForDrainFinish - waits for primary to finish draining its applier queue.
(function() {
- 'use strict';
- var name = 'test_command';
- var replSet = new ReplSetTest({name: name, nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate({
- _id: name,
- members: [
- {_id: 0, host: nodes[0], priority: 3},
- {_id: 1, host: nodes[1]},
- {_id: 2, host: nodes[2], arbiterOnly: true},
- ],
- });
-
- // Stabilize replica set with node 0 as primary.
-
- assert.commandWorked(replSet.nodes[0].adminCommand({
+'use strict';
+var name = 'test_command';
+var replSet = new ReplSetTest({name: name, nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+replSet.initiate({
+ _id: name,
+ members: [
+ {_id: 0, host: nodes[0], priority: 3},
+ {_id: 1, host: nodes[1]},
+ {_id: 2, host: nodes[2], arbiterOnly: true},
+ ],
+});
+
+// Stabilize replica set with node 0 as primary.
+
+assert.commandWorked(replSet.nodes[0].adminCommand({
+ replSetTest: 1,
+ waitForMemberState: ReplSetTest.State.PRIMARY,
+ timeoutMillis: 60 * 1000,
+}),
+ 'node 0' + replSet.nodes[0].host + ' failed to become primary');
+
+// We need the try/catch to handle that the node may have hung up the connection due
+// to a state change.
+try {
+ assert.commandWorked(replSet.nodes[1].adminCommand({
replSetTest: 1,
- waitForMemberState: ReplSetTest.State.PRIMARY,
+ waitForMemberState: ReplSetTest.State.SECONDARY,
timeoutMillis: 60 * 1000,
- }),
- 'node 0' + replSet.nodes[0].host + ' failed to become primary');
-
- // We need the try/catch to handle that the node may have hung up the connection due
- // to a state change.
- try {
- assert.commandWorked(replSet.nodes[1].adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.SECONDARY,
- timeoutMillis: 60 * 1000,
- }));
- } catch (e) {
- jsTestLog(e);
- assert.commandWorked(replSet.nodes[1].adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.SECONDARY,
- timeoutMillis: 60 * 1000,
- }),
- 'node 1' + replSet.nodes[1].host + ' failed to become secondary');
- }
-
- var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
-
- // Check replication mode.
-
- assert.commandFailedWithCode(primary.getDB(name).runCommand({
- replSetTest: 1,
- }),
- ErrorCodes.Unauthorized,
- 'replSetTest should fail against non-admin database');
-
- assert.commandWorked(primary.adminCommand({
+ }));
+} catch (e) {
+ jsTestLog(e);
+ assert.commandWorked(replSet.nodes[1].adminCommand({
replSetTest: 1,
+ waitForMemberState: ReplSetTest.State.SECONDARY,
+ timeoutMillis: 60 * 1000,
}),
- 'failed to check replication mode');
-
- // waitForMemberState tests.
-
- assert.commandFailedWithCode(
- primary.adminCommand({
- replSetTest: 1,
- waitForMemberState: 'what state',
- timeoutMillis: 1000,
- }),
- ErrorCodes.TypeMismatch,
- 'replSetTest waitForMemberState should fail on non-numerical state');
-
- assert.commandFailedWithCode(
- primary.adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.PRIMARY,
- timeoutMillis: "what timeout",
- }),
- ErrorCodes.TypeMismatch,
- 'replSetTest waitForMemberState should fail on non-numerical timeout');
-
- assert.commandFailedWithCode(primary.adminCommand({
+ 'node 1' + replSet.nodes[1].host + ' failed to become secondary');
+}
+
+var primary = replSet.getPrimary();
+var secondary = replSet.getSecondary();
+
+// Check replication mode.
+
+assert.commandFailedWithCode(primary.getDB(name).runCommand({
+ replSetTest: 1,
+}),
+ ErrorCodes.Unauthorized,
+ 'replSetTest should fail against non-admin database');
+
+assert.commandWorked(primary.adminCommand({
+ replSetTest: 1,
+}),
+ 'failed to check replication mode');
+
+// waitForMemberState tests.
+
+assert.commandFailedWithCode(primary.adminCommand({
+ replSetTest: 1,
+ waitForMemberState: 'what state',
+ timeoutMillis: 1000,
+}),
+ ErrorCodes.TypeMismatch,
+ 'replSetTest waitForMemberState should fail on non-numerical state');
+
+assert.commandFailedWithCode(primary.adminCommand({
+ replSetTest: 1,
+ waitForMemberState: ReplSetTest.State.PRIMARY,
+ timeoutMillis: "what timeout",
+}),
+ ErrorCodes.TypeMismatch,
+ 'replSetTest waitForMemberState should fail on non-numerical timeout');
+
+assert.commandFailedWithCode(primary.adminCommand({
+ replSetTest: 1,
+ waitForMemberState: 9999,
+ timeoutMillis: 1000,
+}),
+ ErrorCodes.BadValue,
+ 'replSetTest waitForMemberState should fail on invalid state');
+
+assert.commandFailedWithCode(primary.adminCommand({
+ replSetTest: 1,
+ waitForMemberState: ReplSetTest.State.PRIMARY,
+ timeoutMillis: -1000,
+}),
+ ErrorCodes.BadValue,
+ 'replSetTest waitForMemberState should fail on negative timeout');
+
+assert.commandFailedWithCode(
+ primary.adminCommand({
replSetTest: 1,
- waitForMemberState: 9999,
+ waitForMemberState: ReplSetTest.State.SECONDARY,
timeoutMillis: 1000,
}),
- ErrorCodes.BadValue,
- 'replSetTest waitForMemberState should fail on invalid state');
-
- assert.commandFailedWithCode(primary.adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.PRIMARY,
- timeoutMillis: -1000,
- }),
- ErrorCodes.BadValue,
- 'replSetTest waitForMemberState should fail on negative timeout');
-
- assert.commandFailedWithCode(
- primary.adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.SECONDARY,
- timeoutMillis: 1000,
- }),
- ErrorCodes.ExceededTimeLimit,
- 'replSetTest waitForMemberState(SECONDARY) should time out on node 0 ' + primary.host);
-
- assert.commandWorked(
- secondary.adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.SECONDARY,
- timeoutMillis: 1000,
- }),
- 'replSetTest waitForMemberState(SECONDARY) failed on node 1 ' + secondary.host);
-
- // waitForDrainFinish tests.
-
- assert.commandFailedWithCode(
- primary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 'what state',
- }),
- ErrorCodes.TypeMismatch,
- 'replSetTest waitForDrainFinish should fail on non-numerical timeout');
-
- assert.commandFailedWithCode(primary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: -1000,
- }),
- ErrorCodes.BadValue,
- 'replSetTest waitForDrainFinish should fail on negative timeout');
+ ErrorCodes.ExceededTimeLimit,
+ 'replSetTest waitForMemberState(SECONDARY) should time out on node 0 ' + primary.host);
- assert.commandWorked(primary.adminCommand({
+assert.commandWorked(
+ secondary.adminCommand({
replSetTest: 1,
- waitForDrainFinish: 1000,
- }),
- 'node 0' + primary.host + ' failed to wait for drain to finish');
-
- assert.commandWorked(secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 0,
+ waitForMemberState: ReplSetTest.State.SECONDARY,
+ timeoutMillis: 1000,
}),
- 'node 1' + primary.host + ' failed to wait for drain to finish');
- replSet.stopSet();
+ 'replSetTest waitForMemberState(SECONDARY) failed on node 1 ' + secondary.host);
+
+// waitForDrainFinish tests.
+
+assert.commandFailedWithCode(primary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 'what state',
+}),
+ ErrorCodes.TypeMismatch,
+ 'replSetTest waitForDrainFinish should fail on non-numerical timeout');
+
+assert.commandFailedWithCode(primary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: -1000,
+}),
+ ErrorCodes.BadValue,
+ 'replSetTest waitForDrainFinish should fail on negative timeout');
+
+assert.commandWorked(primary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 1000,
+}),
+ 'node 0' + primary.host + ' failed to wait for drain to finish');
+
+assert.commandWorked(secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 0,
+}),
+ 'node 1' + primary.host + ' failed to wait for drain to finish');
+replSet.stopSet();
})();
diff --git a/jstests/replsets/too_stale_secondary.js b/jstests/replsets/too_stale_secondary.js
index f75b86489aa..bac1ae94ccd 100644
--- a/jstests/replsets/too_stale_secondary.js
+++ b/jstests/replsets/too_stale_secondary.js
@@ -29,114 +29,110 @@
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/replsets/rslib.js');
+load('jstests/replsets/rslib.js');
- function getFirstOplogEntry(conn) {
- return conn.getDB('local').oplog.rs.find().sort({$natural: 1}).limit(1)[0];
- }
+function getFirstOplogEntry(conn) {
+ return conn.getDB('local').oplog.rs.find().sort({$natural: 1}).limit(1)[0];
+}
- /**
- * Overflows the oplog of a given node.
- *
- * To detect oplog overflow, we continuously insert large documents until we
- * detect that the first entry of the oplog is no longer the same as when we started. This
- * implies that the oplog attempted to grow beyond its maximum size i.e. it
- * has overflowed/rolled over.
- *
- * Each document will be inserted with a writeConcern given by 'writeConcern'.
- *
- */
- function overflowOplog(conn, db, writeConcern) {
- var firstOplogEntry = getFirstOplogEntry(primary);
- var collName = "overflow";
-
- // Keep inserting large documents until the oplog rolls over.
- const largeStr = new Array(32 * 1024).join('aaaaaaaa');
- while (bsonWoCompare(getFirstOplogEntry(conn), firstOplogEntry) === 0) {
- assert.writeOK(
- db[collName].insert({data: largeStr}, {writeConcern: {w: writeConcern}}));
- }
+/**
+ * Overflows the oplog of a given node.
+ *
+ * To detect oplog overflow, we continuously insert large documents until we
+ * detect that the first entry of the oplog is no longer the same as when we started. This
+ * implies that the oplog attempted to grow beyond its maximum size i.e. it
+ * has overflowed/rolled over.
+ *
+ * Each document will be inserted with a writeConcern given by 'writeConcern'.
+ *
+ */
+function overflowOplog(conn, db, writeConcern) {
+ var firstOplogEntry = getFirstOplogEntry(primary);
+ var collName = "overflow";
+
+ // Keep inserting large documents until the oplog rolls over.
+ const largeStr = new Array(32 * 1024).join('aaaaaaaa');
+ while (bsonWoCompare(getFirstOplogEntry(conn), firstOplogEntry) === 0) {
+ assert.writeOK(db[collName].insert({data: largeStr}, {writeConcern: {w: writeConcern}}));
}
+}
- var testName = "too_stale_secondary";
+var testName = "too_stale_secondary";
- var smallOplogSizeMB = 1;
- var bigOplogSizeMB = 1000;
+var smallOplogSizeMB = 1;
+var bigOplogSizeMB = 1000;
- // Node 0 is given a small oplog so we can overflow it. Node 1's large oplog allows it to
- // store all entries comfortably without overflowing, so that Node 2 can eventually use it as
- // a sync source after it goes too stale. Because this test overflows the oplog, a small
- // syncdelay is chosen to frequently take checkpoints, allowing oplog truncation to proceed.
- var replTest = new ReplSetTest({
- name: testName,
- nodes: [
- {oplogSize: smallOplogSizeMB},
- {oplogSize: bigOplogSizeMB},
- {oplogSize: smallOplogSizeMB}
- ],
- nodeOptions: {syncdelay: 1},
- });
+// Node 0 is given a small oplog so we can overflow it. Node 1's large oplog allows it to
+// store all entries comfortably without overflowing, so that Node 2 can eventually use it as
+// a sync source after it goes too stale. Because this test overflows the oplog, a small
+// syncdelay is chosen to frequently take checkpoints, allowing oplog truncation to proceed.
+var replTest = new ReplSetTest({
+ name: testName,
+ nodes:
+ [{oplogSize: smallOplogSizeMB}, {oplogSize: bigOplogSizeMB}, {oplogSize: smallOplogSizeMB}],
+ nodeOptions: {syncdelay: 1},
+});
- var nodes = replTest.startSet();
- replTest.initiate({
- _id: testName,
- members: [
- {_id: 0, host: nodes[0].host},
- {_id: 1, host: nodes[1].host, priority: 0},
- {_id: 2, host: nodes[2].host, priority: 0}
- ]
- });
+var nodes = replTest.startSet();
+replTest.initiate({
+ _id: testName,
+ members: [
+ {_id: 0, host: nodes[0].host},
+ {_id: 1, host: nodes[1].host, priority: 0},
+ {_id: 2, host: nodes[2].host, priority: 0}
+ ]
+});
- var dbName = testName;
- var collName = "test";
+var dbName = testName;
+var collName = "test";
- jsTestLog("Wait for Node 0 to become the primary.");
- replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
+jsTestLog("Wait for Node 0 to become the primary.");
+replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
- var primary = replTest.getPrimary();
- var primaryTestDB = primary.getDB(dbName);
+var primary = replTest.getPrimary();
+var primaryTestDB = primary.getDB(dbName);
- jsTestLog("1: Insert one document on the primary (Node 0) and ensure it is replicated.");
- assert.writeOK(primaryTestDB[collName].insert({a: 1}, {writeConcern: {w: 3}}));
+jsTestLog("1: Insert one document on the primary (Node 0) and ensure it is replicated.");
+assert.writeOK(primaryTestDB[collName].insert({a: 1}, {writeConcern: {w: 3}}));
- jsTestLog("2: Stop Node 2.");
- replTest.stop(2);
+jsTestLog("2: Stop Node 2.");
+replTest.stop(2);
- jsTestLog("3: Wait until Node 2 is down.");
- replTest.waitForState(replTest.nodes[2], ReplSetTest.State.DOWN);
+jsTestLog("3: Wait until Node 2 is down.");
+replTest.waitForState(replTest.nodes[2], ReplSetTest.State.DOWN);
- var firstOplogEntryNode1 = getFirstOplogEntry(replTest.nodes[1]);
+var firstOplogEntryNode1 = getFirstOplogEntry(replTest.nodes[1]);
- jsTestLog("4: Overflow the primary's oplog.");
- overflowOplog(primary, primaryTestDB, 2);
+jsTestLog("4: Overflow the primary's oplog.");
+overflowOplog(primary, primaryTestDB, 2);
- // Make sure that Node 1's oplog didn't overflow.
- assert.eq(firstOplogEntryNode1,
- getFirstOplogEntry(replTest.nodes[1]),
- "Node 1's oplog overflowed unexpectedly.");
+// Make sure that Node 1's oplog didn't overflow.
+assert.eq(firstOplogEntryNode1,
+ getFirstOplogEntry(replTest.nodes[1]),
+ "Node 1's oplog overflowed unexpectedly.");
- jsTestLog("5: Stop Node 1 and restart Node 2.");
- replTest.stop(1);
- replTest.restart(2);
+jsTestLog("5: Stop Node 1 and restart Node 2.");
+replTest.stop(1);
+replTest.restart(2);
- jsTestLog("6: Wait for Node 2 to transition to RECOVERING (it should be too stale).");
- replTest.waitForState(replTest.nodes[2], ReplSetTest.State.RECOVERING);
+jsTestLog("6: Wait for Node 2 to transition to RECOVERING (it should be too stale).");
+replTest.waitForState(replTest.nodes[2], ReplSetTest.State.RECOVERING);
- jsTestLog("7: Stop and restart Node 2.");
- replTest.stop(2);
- replTest.restart(2);
+jsTestLog("7: Stop and restart Node 2.");
+replTest.stop(2);
+replTest.restart(2);
- jsTestLog(
- "8: Wait for Node 2 to transition to RECOVERING (its oplog should remain stale after restart)");
- replTest.waitForState(replTest.nodes[2], ReplSetTest.State.RECOVERING);
+jsTestLog(
+ "8: Wait for Node 2 to transition to RECOVERING (its oplog should remain stale after restart)");
+replTest.waitForState(replTest.nodes[2], ReplSetTest.State.RECOVERING);
- jsTestLog("9: Restart Node 1, which should have the full oplog history.");
- replTest.restart(1);
+jsTestLog("9: Restart Node 1, which should have the full oplog history.");
+replTest.restart(1);
- jsTestLog("10: Wait for Node 2 to leave RECOVERING and transition to SECONDARY.");
- replTest.waitForState(replTest.nodes[2], ReplSetTest.State.SECONDARY);
+jsTestLog("10: Wait for Node 2 to leave RECOVERING and transition to SECONDARY.");
+replTest.waitForState(replTest.nodes[2], ReplSetTest.State.SECONDARY);
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/transaction_table_multi_statement_txn.js b/jstests/replsets/transaction_table_multi_statement_txn.js
index 37579e35aba..01fc3a577d5 100644
--- a/jstests/replsets/transaction_table_multi_statement_txn.js
+++ b/jstests/replsets/transaction_table_multi_statement_txn.js
@@ -5,46 +5,45 @@
* @tags: [uses_transactions]
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}, {arbiter: true}]});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}, {arbiter: true}]});
+replTest.startSet();
+replTest.initiate();
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
- const session = primary.startSession();
- const primaryDB = session.getDatabase('test');
- const coll = primaryDB.getCollection('coll');
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
+const session = primary.startSession();
+const primaryDB = session.getDatabase('test');
+const coll = primaryDB.getCollection('coll');
- jsTestLog('Creating collection ' + coll.getFullName());
- assert.commandWorked(
- primaryDB.createCollection(coll.getName(), {writeConcern: {w: "majority"}}));
- replTest.awaitReplication();
+jsTestLog('Creating collection ' + coll.getFullName());
+assert.commandWorked(primaryDB.createCollection(coll.getName(), {writeConcern: {w: "majority"}}));
+replTest.awaitReplication();
- const sessionId = session.getSessionId();
- jsTestLog('Starting transaction on session ' + sessionId);
- session.startTransaction();
- assert.writeOK(coll.insert({_id: 0}));
- assert.writeOK(coll.insert({_id: 1}));
- assert.commandWorked(session.commitTransaction_forTesting());
- const opTime = session.getOperationTime();
- const txnNum = session.getTxnNumber_forTesting();
- jsTestLog('Successfully committed transaction at operation time ' + tojson(opTime) +
- 'with transaction number ' + txnNum);
+const sessionId = session.getSessionId();
+jsTestLog('Starting transaction on session ' + sessionId);
+session.startTransaction();
+assert.writeOK(coll.insert({_id: 0}));
+assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(session.commitTransaction_forTesting());
+const opTime = session.getOperationTime();
+const txnNum = session.getTxnNumber_forTesting();
+jsTestLog('Successfully committed transaction at operation time ' + tojson(opTime) +
+ 'with transaction number ' + txnNum);
- // After replication, assert the secondary's transaction table has been updated.
- replTest.awaitReplication();
- jsTestLog('Checking transaction tables on both primary and secondary.');
- jsTestLog('Primary ' + primary.host + ': ' +
- tojson(primary.getDB('config').transactions.find().toArray()));
- jsTestLog('Secondary ' + secondary.host + ': ' +
- tojson(secondary.getDB('config').transactions.find().toArray()));
- RetryableWritesUtil.checkTransactionTable(primary, sessionId, txnNum, opTime);
- RetryableWritesUtil.assertSameRecordOnBothConnections(primary, secondary, sessionId);
+// After replication, assert the secondary's transaction table has been updated.
+replTest.awaitReplication();
+jsTestLog('Checking transaction tables on both primary and secondary.');
+jsTestLog('Primary ' + primary.host + ': ' +
+ tojson(primary.getDB('config').transactions.find().toArray()));
+jsTestLog('Secondary ' + secondary.host + ': ' +
+ tojson(secondary.getDB('config').transactions.find().toArray()));
+RetryableWritesUtil.checkTransactionTable(primary, sessionId, txnNum, opTime);
+RetryableWritesUtil.assertSameRecordOnBothConnections(primary, secondary, sessionId);
- session.endSession();
- replTest.stopSet();
+session.endSession();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/transaction_table_oplog_replay.js b/jstests/replsets/transaction_table_oplog_replay.js
index b40ab630a49..eb155343127 100644
--- a/jstests/replsets/transaction_table_oplog_replay.js
+++ b/jstests/replsets/transaction_table_oplog_replay.js
@@ -2,201 +2,204 @@
* Tests that the transaction table is properly updated on secondaries through oplog replay.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- /**
- * Runs each command on the primary, awaits replication then asserts the secondary's transaction
- * collection has been updated to store the latest txnNumber and lastWriteOpTimeTs for each
- * sessionId.
- */
- function runCommandsWithDifferentIds(primary, secondary, cmds) {
- // Disable oplog application to ensure the oplog entries come in the same batch.
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"});
-
- let responseTimestamps = [];
- cmds.forEach(function(cmd) {
- let res = assert.commandWorked(primary.getDB("test").runCommand(cmd));
- let opTime = (res.opTime.ts ? res.opTime.ts : res.opTime);
-
- RetryableWritesUtil.checkTransactionTable(primary, cmd.lsid, cmd.txnNumber, opTime);
- responseTimestamps.push(opTime);
- });
-
- // After replication, assert the secondary's transaction table has been updated.
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"});
- replTest.awaitReplication();
- cmds.forEach(function(cmd, i) {
- RetryableWritesUtil.checkTransactionTable(
- secondary, cmd.lsid, cmd.txnNumber, responseTimestamps[i]);
- });
-
- // Both nodes should have the same transaction collection record for each sessionId.
- cmds.forEach(function(cmd) {
- RetryableWritesUtil.assertSameRecordOnBothConnections(primary, secondary, cmd.lsid);
- });
- }
+/**
+ * Runs each command on the primary, awaits replication then asserts the secondary's transaction
+ * collection has been updated to store the latest txnNumber and lastWriteOpTimeTs for each
+ * sessionId.
+ */
+function runCommandsWithDifferentIds(primary, secondary, cmds) {
+ // Disable oplog application to ensure the oplog entries come in the same batch.
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"});
- /**
- * Runs each command on the primary and tracks the highest txnNumber and lastWriteOpTimeTs, then
- * asserts the secondary's transaction collection document for the sessionId has been updated
- * correctly.
- */
- function runCommandsWithSameId(primary, secondary, cmds) {
- // Disable oplog application to ensure the oplog entries come in the same batch.
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"});
-
- let latestOpTimeTs = Timestamp();
- let highestTxnNumber = NumberLong(-1);
- cmds.forEach(function(cmd) {
- let res = assert.commandWorked(primary.getDB("test").runCommand(cmd));
- let opTime = (res.opTime.ts ? res.opTime.ts : res.opTime);
-
- RetryableWritesUtil.checkTransactionTable(primary, cmd.lsid, cmd.txnNumber, opTime);
- latestOpTimeTs = opTime;
- highestTxnNumber =
- (cmd.txnNumber > highestTxnNumber ? cmd.txnNumber : highestTxnNumber);
- });
-
- // After replication, assert the secondary's transaction table has been updated to store the
- // highest transaction number and the latest write optime.
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"});
- replTest.awaitReplication();
+ let responseTimestamps = [];
+ cmds.forEach(function(cmd) {
+ let res = assert.commandWorked(primary.getDB("test").runCommand(cmd));
+ let opTime = (res.opTime.ts ? res.opTime.ts : res.opTime);
+
+ RetryableWritesUtil.checkTransactionTable(primary, cmd.lsid, cmd.txnNumber, opTime);
+ responseTimestamps.push(opTime);
+ });
+
+ // After replication, assert the secondary's transaction table has been updated.
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"});
+ replTest.awaitReplication();
+ cmds.forEach(function(cmd, i) {
RetryableWritesUtil.checkTransactionTable(
- secondary, cmds[0].lsid, highestTxnNumber, latestOpTimeTs);
+ secondary, cmd.lsid, cmd.txnNumber, responseTimestamps[i]);
+ });
- // Both nodes should have the same transaction collection record for the sessionId.
- RetryableWritesUtil.assertSameRecordOnBothConnections(primary, secondary, cmds[0].lsid);
+ // Both nodes should have the same transaction collection record for each sessionId.
+ cmds.forEach(function(cmd) {
+ RetryableWritesUtil.assertSameRecordOnBothConnections(primary, secondary, cmd.lsid);
+ });
+}
+
+/**
+ * Runs each command on the primary and tracks the highest txnNumber and lastWriteOpTimeTs, then
+ * asserts the secondary's transaction collection document for the sessionId has been updated
+ * correctly.
+ */
+function runCommandsWithSameId(primary, secondary, cmds) {
+ // Disable oplog application to ensure the oplog entries come in the same batch.
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"});
+
+ let latestOpTimeTs = Timestamp();
+ let highestTxnNumber = NumberLong(-1);
+ cmds.forEach(function(cmd) {
+ let res = assert.commandWorked(primary.getDB("test").runCommand(cmd));
+ let opTime = (res.opTime.ts ? res.opTime.ts : res.opTime);
+
+ RetryableWritesUtil.checkTransactionTable(primary, cmd.lsid, cmd.txnNumber, opTime);
+ latestOpTimeTs = opTime;
+ highestTxnNumber = (cmd.txnNumber > highestTxnNumber ? cmd.txnNumber : highestTxnNumber);
+ });
+
+ // After replication, assert the secondary's transaction table has been updated to store the
+ // highest transaction number and the latest write optime.
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"});
+ replTest.awaitReplication();
+ RetryableWritesUtil.checkTransactionTable(
+ secondary, cmds[0].lsid, highestTxnNumber, latestOpTimeTs);
+
+ // Both nodes should have the same transaction collection record for the sessionId.
+ RetryableWritesUtil.assertSameRecordOnBothConnections(primary, secondary, cmds[0].lsid);
+}
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+let primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+
+////////////////////////////////////////////////////////////////////////
+// Test insert command
+
+let insertCmds = [
+ {
+ insert: "foo",
+ documents: [{_id: 10}, {_id: 20}, {_id: 30}, {_id: 40}],
+ ordered: true,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(5)
+ },
+ {
+ insert: "bar",
+ documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(10)
}
+];
+runCommandsWithDifferentIds(primary, secondary, insertCmds);
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- let primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
-
- ////////////////////////////////////////////////////////////////////////
- // Test insert command
-
- let insertCmds = [
- {
- insert: "foo",
- documents: [{_id: 10}, {_id: 20}, {_id: 30}, {_id: 40}],
- ordered: true,
- lsid: {id: UUID()},
- txnNumber: NumberLong(5)
- },
- {
- insert: "bar",
- documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(10)
- }
- ];
- runCommandsWithDifferentIds(primary, secondary, insertCmds);
-
- let lsid = {id: UUID()};
- insertCmds = insertCmds.map(function(cmd) {
- cmd.documents.forEach(function(doc) {
- doc._id = doc._id + 100;
- });
- cmd.lsid = lsid;
- cmd.txnNumber = NumberLong(cmd.txnNumber + 100);
- return cmd;
+let lsid = {id: UUID()};
+insertCmds = insertCmds.map(function(cmd) {
+ cmd.documents.forEach(function(doc) {
+ doc._id = doc._id + 100;
});
- runCommandsWithSameId(primary, secondary, insertCmds);
-
- ////////////////////////////////////////////////////////////////////////
- // Test update command
-
- let updateCommands = [
- {
- update: "foo",
- updates: [
- {q: {_id: 10}, u: {$set: {x: 10}}, upsert: false},
- {q: {_id: 20}, u: {$set: {x: 20}}, upsert: false},
- {q: {_id: 30}, u: {$set: {x: 30}}, upsert: false},
- {q: {_id: 40}, u: {$set: {x: 40}}, upsert: false}
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(5)
- },
- {
- update: "bar",
- updates: [
- {q: {_id: 1}, u: {$set: {x: 10}}, upsert: true},
- {q: {_id: 2}, u: {$set: {x: 20}}, upsert: true},
- {q: {_id: 3}, u: {$set: {x: 30}}, upsert: true},
- {q: {_id: 4}, u: {$set: {x: 40}}, upsert: true}
- ],
- ordered: true,
- lsid: {id: UUID()},
- txnNumber: NumberLong(10)
- }
- ];
- runCommandsWithDifferentIds(primary, secondary, updateCommands);
-
- lsid = {id: UUID()};
- updateCommands = updateCommands.map(function(cmd) {
- cmd.updates.forEach(function(up) {
- up.q._id = up.q._id + 100;
- });
- cmd.lsid = lsid;
- cmd.txnNumber = NumberLong(cmd.txnNumber + 100);
- return cmd;
+ cmd.lsid = lsid;
+ cmd.txnNumber = NumberLong(cmd.txnNumber + 100);
+ return cmd;
+});
+runCommandsWithSameId(primary, secondary, insertCmds);
+
+////////////////////////////////////////////////////////////////////////
+// Test update command
+
+let updateCommands = [
+ {
+ update: "foo",
+ updates: [
+ {q: {_id: 10}, u: {$set: {x: 10}}, upsert: false},
+ {q: {_id: 20}, u: {$set: {x: 20}}, upsert: false},
+ {q: {_id: 30}, u: {$set: {x: 30}}, upsert: false},
+ {q: {_id: 40}, u: {$set: {x: 40}}, upsert: false}
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(5)
+ },
+ {
+ update: "bar",
+ updates: [
+ {q: {_id: 1}, u: {$set: {x: 10}}, upsert: true},
+ {q: {_id: 2}, u: {$set: {x: 20}}, upsert: true},
+ {q: {_id: 3}, u: {$set: {x: 30}}, upsert: true},
+ {q: {_id: 4}, u: {$set: {x: 40}}, upsert: true}
+ ],
+ ordered: true,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(10)
+ }
+];
+runCommandsWithDifferentIds(primary, secondary, updateCommands);
+
+lsid = {
+ id: UUID()
+};
+updateCommands = updateCommands.map(function(cmd) {
+ cmd.updates.forEach(function(up) {
+ up.q._id = up.q._id + 100;
});
- runCommandsWithSameId(primary, secondary, updateCommands);
-
- ////////////////////////////////////////////////////////////////////////
- // Test delete command
-
- let deleteCommands = [
- {
- delete: "foo",
- deletes: [
- {q: {_id: 10}, limit: 1},
- {q: {_id: 20}, limit: 1},
- {q: {_id: 30}, limit: 1},
- {q: {_id: 40}, limit: 1}
- ],
- ordered: true,
- lsid: {id: UUID()},
- txnNumber: NumberLong(5)
- },
- {
- delete: "bar",
- deletes: [
- {q: {_id: 1}, limit: 1},
- {q: {_id: 2}, limit: 1},
- {q: {_id: 3}, limit: 1},
- {q: {_id: 4}, limit: 1}
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(10)
- }
- ];
- runCommandsWithDifferentIds(primary, secondary, deleteCommands);
-
- lsid = {id: UUID()};
- deleteCommands = deleteCommands.map(function(cmd) {
- cmd.deletes.forEach(function(d) {
- d.q._id = d.q._id + 100;
- });
- cmd.lsid = lsid;
- cmd.txnNumber = NumberLong(cmd.txnNumber + 100);
- return cmd;
+ cmd.lsid = lsid;
+ cmd.txnNumber = NumberLong(cmd.txnNumber + 100);
+ return cmd;
+});
+runCommandsWithSameId(primary, secondary, updateCommands);
+
+////////////////////////////////////////////////////////////////////////
+// Test delete command
+
+let deleteCommands = [
+ {
+ delete: "foo",
+ deletes: [
+ {q: {_id: 10}, limit: 1},
+ {q: {_id: 20}, limit: 1},
+ {q: {_id: 30}, limit: 1},
+ {q: {_id: 40}, limit: 1}
+ ],
+ ordered: true,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(5)
+ },
+ {
+ delete: "bar",
+ deletes: [
+ {q: {_id: 1}, limit: 1},
+ {q: {_id: 2}, limit: 1},
+ {q: {_id: 3}, limit: 1},
+ {q: {_id: 4}, limit: 1}
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(10)
+ }
+];
+runCommandsWithDifferentIds(primary, secondary, deleteCommands);
+
+lsid = {
+ id: UUID()
+};
+deleteCommands = deleteCommands.map(function(cmd) {
+ cmd.deletes.forEach(function(d) {
+ d.q._id = d.q._id + 100;
});
- runCommandsWithSameId(primary, secondary, deleteCommands);
+ cmd.lsid = lsid;
+ cmd.txnNumber = NumberLong(cmd.txnNumber + 100);
+ return cmd;
+});
+runCommandsWithSameId(primary, secondary, deleteCommands);
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/transactions_after_rollback_via_refetch.js b/jstests/replsets/transactions_after_rollback_via_refetch.js
index cb9ea1c3257..463d70f4489 100644
--- a/jstests/replsets/transactions_after_rollback_via_refetch.js
+++ b/jstests/replsets/transactions_after_rollback_via_refetch.js
@@ -7,115 +7,116 @@
* @tags: [uses_transactions]
*/
(function() {
- 'use strict';
-
- load("jstests/replsets/libs/rollback_test.js");
-
- let name = "transactions_after_rollback_via_refetch";
- let dbName = name;
- let crudCollName = "crudColl";
- let collToDropName = "collToDrop";
-
- let CommonOps = (node) => {
- // Insert a couple of documents that will initially be present on all nodes.
- let crudColl = node.getDB(dbName)[crudCollName];
- assert.commandWorked(crudColl.insert({_id: 0}));
- assert.commandWorked(crudColl.insert({_id: 1}));
-
- // Create a collection so it can be dropped on the rollback node.
- node.getDB(dbName)[collToDropName].insert({_id: 0});
- };
-
- // We want to have the rollback node perform some inserts, updates, and deletes locally
- // during the rollback process, so we can ensure that transactions will read correct data
- // post-rollback, even though these writes will be un-timestamped.
- let RollbackOps = (node) => {
- let crudColl = node.getDB(dbName)[crudCollName];
- // Roll back an update (causes refetch and local update).
- assert.commandWorked(crudColl.update({_id: 0}, {$set: {rollbackNode: 0}}));
- // Roll back a delete (causes refetch and local insert).
- assert.commandWorked(crudColl.remove({_id: 1}));
- // Roll back an insert (causes local delete).
- assert.commandWorked(crudColl.insert({_id: 2}));
-
- // Roll back a drop (re-creates the collection).
- node.getDB(dbName)[collToDropName].drop();
- };
-
- let SyncSourceOps = (node) => {
- let coll = node.getDB(dbName)[crudCollName];
- // Update these docs so the rollback node will refetch them.
- assert.commandWorked(coll.update({_id: 0}, {$set: {syncSource: 0}}));
- assert.commandWorked(coll.update({_id: 1}, {$set: {syncSource: 1}}));
- };
-
- // Set up a replica set for use in RollbackTest. We disable majority reads on all nodes so that
- // they will use the "rollbackViaRefetch" algorithm.
- let replTest = new ReplSetTest({
- name,
- nodes: 3,
- useBridge: true,
- settings: {chainingAllowed: false},
- nodeOptions: {enableMajorityReadConcern: "false"}
- });
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- replTest.initiate(config);
-
- let rollbackTest = new RollbackTest(name, replTest);
-
- CommonOps(rollbackTest.getPrimary());
-
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
-
- let syncSourceNode = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- SyncSourceOps(syncSourceNode);
-
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
-
- // Make the rollback node primary so we can run transactions against it.
- rollbackTest.getTestFixture().stepUp(rollbackNode);
-
- jsTestLog("Testing transactions against the node that just rolled back.");
- const sessionOptions = {causalConsistency: false};
- let session = rollbackNode.getDB(dbName).getMongo().startSession(sessionOptions);
- let sessionDb = session.getDatabase(dbName);
- let sessionColl = sessionDb[crudCollName];
-
- // Make sure we can do basic CRUD ops inside a transaction and read the data back correctly, pre
- // and post-commit.
- session.startTransaction();
- // Make sure we read from the snapshot correctly.
- assert.docEq(sessionColl.find().sort({_id: 1}).toArray(),
- [{_id: 0, syncSource: 0}, {_id: 1, syncSource: 1}]);
- // Do some basic ops.
- assert.commandWorked(sessionColl.update({_id: 0}, {$set: {inTxn: 1}}));
- assert.commandWorked(sessionColl.remove({_id: 1}));
- assert.commandWorked(sessionColl.insert({_id: 2}));
- // Make sure we read the updated data correctly.
- assert.docEq(sessionColl.find().sort({_id: 1}).toArray(),
- [{_id: 0, syncSource: 0, inTxn: 1}, {_id: 2}]);
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Make sure data is visible after commit.
- assert.docEq(sessionColl.find().sort({_id: 1}).toArray(),
- [{_id: 0, syncSource: 0, inTxn: 1}, {_id: 2}]);
-
- // Run a transaction that touches the collection that was re-created during rollback.
- sessionColl = sessionDb[collToDropName];
- session.startTransaction();
- assert.docEq(sessionColl.find().sort({_id: 1}).toArray(), [{_id: 0}]);
- assert.commandWorked(sessionColl.update({_id: 0}, {$set: {inTxn: 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Make sure data is visible after commit.
- assert.docEq(sessionColl.find().sort({_id: 1}).toArray(), [{_id: 0, inTxn: 1}]);
-
- // Check the replica set.
- rollbackTest.stop();
-
+'use strict';
+
+load("jstests/replsets/libs/rollback_test.js");
+
+let name = "transactions_after_rollback_via_refetch";
+let dbName = name;
+let crudCollName = "crudColl";
+let collToDropName = "collToDrop";
+
+let CommonOps = (node) => {
+ // Insert a couple of documents that will initially be present on all nodes.
+ let crudColl = node.getDB(dbName)[crudCollName];
+ assert.commandWorked(crudColl.insert({_id: 0}));
+ assert.commandWorked(crudColl.insert({_id: 1}));
+
+ // Create a collection so it can be dropped on the rollback node.
+ node.getDB(dbName)[collToDropName].insert({_id: 0});
+};
+
+// We want to have the rollback node perform some inserts, updates, and deletes locally
+// during the rollback process, so we can ensure that transactions will read correct data
+// post-rollback, even though these writes will be un-timestamped.
+let RollbackOps = (node) => {
+ let crudColl = node.getDB(dbName)[crudCollName];
+ // Roll back an update (causes refetch and local update).
+ assert.commandWorked(crudColl.update({_id: 0}, {$set: {rollbackNode: 0}}));
+ // Roll back a delete (causes refetch and local insert).
+ assert.commandWorked(crudColl.remove({_id: 1}));
+ // Roll back an insert (causes local delete).
+ assert.commandWorked(crudColl.insert({_id: 2}));
+
+ // Roll back a drop (re-creates the collection).
+ node.getDB(dbName)[collToDropName].drop();
+};
+
+let SyncSourceOps = (node) => {
+ let coll = node.getDB(dbName)[crudCollName];
+ // Update these docs so the rollback node will refetch them.
+ assert.commandWorked(coll.update({_id: 0}, {$set: {syncSource: 0}}));
+ assert.commandWorked(coll.update({_id: 1}, {$set: {syncSource: 1}}));
+};
+
+// Set up a replica set for use in RollbackTest. We disable majority reads on all nodes so that
+// they will use the "rollbackViaRefetch" algorithm.
+let replTest = new ReplSetTest({
+ name,
+ nodes: 3,
+ useBridge: true,
+ settings: {chainingAllowed: false},
+ nodeOptions: {enableMajorityReadConcern: "false"}
+});
+replTest.startSet();
+let config = replTest.getReplSetConfig();
+config.members[2].priority = 0;
+replTest.initiate(config);
+
+let rollbackTest = new RollbackTest(name, replTest);
+
+CommonOps(rollbackTest.getPrimary());
+
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
+
+let syncSourceNode = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+SyncSourceOps(syncSourceNode);
+
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+
+// Make the rollback node primary so we can run transactions against it.
+rollbackTest.getTestFixture().stepUp(rollbackNode);
+
+jsTestLog("Testing transactions against the node that just rolled back.");
+const sessionOptions = {
+ causalConsistency: false
+};
+let session = rollbackNode.getDB(dbName).getMongo().startSession(sessionOptions);
+let sessionDb = session.getDatabase(dbName);
+let sessionColl = sessionDb[crudCollName];
+
+// Make sure we can do basic CRUD ops inside a transaction and read the data back correctly, pre
+// and post-commit.
+session.startTransaction();
+// Make sure we read from the snapshot correctly.
+assert.docEq(sessionColl.find().sort({_id: 1}).toArray(),
+ [{_id: 0, syncSource: 0}, {_id: 1, syncSource: 1}]);
+// Do some basic ops.
+assert.commandWorked(sessionColl.update({_id: 0}, {$set: {inTxn: 1}}));
+assert.commandWorked(sessionColl.remove({_id: 1}));
+assert.commandWorked(sessionColl.insert({_id: 2}));
+// Make sure we read the updated data correctly.
+assert.docEq(sessionColl.find().sort({_id: 1}).toArray(),
+ [{_id: 0, syncSource: 0, inTxn: 1}, {_id: 2}]);
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// Make sure data is visible after commit.
+assert.docEq(sessionColl.find().sort({_id: 1}).toArray(),
+ [{_id: 0, syncSource: 0, inTxn: 1}, {_id: 2}]);
+
+// Run a transaction that touches the collection that was re-created during rollback.
+sessionColl = sessionDb[collToDropName];
+session.startTransaction();
+assert.docEq(sessionColl.find().sort({_id: 1}).toArray(), [{_id: 0}]);
+assert.commandWorked(sessionColl.update({_id: 0}, {$set: {inTxn: 1}}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// Make sure data is visible after commit.
+assert.docEq(sessionColl.find().sort({_id: 1}).toArray(), [{_id: 0, inTxn: 1}]);
+
+// Check the replica set.
+rollbackTest.stop();
}());
diff --git a/jstests/replsets/transactions_committed_with_tickets_exhausted.js b/jstests/replsets/transactions_committed_with_tickets_exhausted.js
index 9fe978a38d6..786d2b34777 100644
--- a/jstests/replsets/transactions_committed_with_tickets_exhausted.js
+++ b/jstests/replsets/transactions_committed_with_tickets_exhausted.js
@@ -5,92 +5,91 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
-
- load("jstests/libs/parallelTester.js"); // for ScopedThread
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- // We set the number of write tickets to be a small value in order to avoid needing to spawn a
- // large number of threads to exhaust all of the available ones.
- const kNumWriteTickets = 5;
-
- const rst = new ReplSetTest({
- nodes: 1,
- nodeOptions: {
- setParameter: {
- wiredTigerConcurrentWriteTransactions: kNumWriteTickets,
-
- // Setting a transaction lifetime of 20 seconds works fine locally because the
- // threads which attempt to run the drop command are spawned quickly enough. This
- // might not be the case for Evergreen hosts and may need to be tuned accordingly.
- transactionLifetimeLimitSeconds: 20,
- }
+"use strict";
+
+load("jstests/libs/parallelTester.js"); // for ScopedThread
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+// We set the number of write tickets to be a small value in order to avoid needing to spawn a
+// large number of threads to exhaust all of the available ones.
+const kNumWriteTickets = 5;
+
+const rst = new ReplSetTest({
+ nodes: 1,
+ nodeOptions: {
+ setParameter: {
+ wiredTigerConcurrentWriteTransactions: kNumWriteTickets,
+
+ // Setting a transaction lifetime of 20 seconds works fine locally because the
+ // threads which attempt to run the drop command are spawned quickly enough. This
+ // might not be the case for Evergreen hosts and may need to be tuned accordingly.
+ transactionLifetimeLimitSeconds: 20,
}
- });
- rst.startSet();
- rst.initiate();
+ }
+});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const db = primary.getDB("test");
+const primary = rst.getPrimary();
+const db = primary.getDB("test");
- const session = primary.startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
+const session = primary.startSession({causalConsistency: false});
+const sessionDb = session.getDatabase("test");
- assert.commandWorked(db.runCommand({create: "mycoll"}));
+assert.commandWorked(db.runCommand({create: "mycoll"}));
- jsTestLog("Starting transaction");
- session.startTransaction();
- assert.commandWorked(sessionDb.mycoll.insert({}));
+jsTestLog("Starting transaction");
+session.startTransaction();
+assert.commandWorked(sessionDb.mycoll.insert({}));
- jsTestLog("Preparing transaction");
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+jsTestLog("Preparing transaction");
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- const threads = [];
+const threads = [];
- for (let i = 0; i < kNumWriteTickets; ++i) {
- const thread = new ScopedThread(function(host) {
- try {
- const conn = new Mongo(host);
- const db = conn.getDB("test");
+for (let i = 0; i < kNumWriteTickets; ++i) {
+ const thread = new ScopedThread(function(host) {
+ try {
+ const conn = new Mongo(host);
+ const db = conn.getDB("test");
- // Dropping a collection requires a database X lock and therefore blocks behind the
- // transaction committing or aborting.
- db.mycoll.drop();
+ // Dropping a collection requires a database X lock and therefore blocks behind the
+ // transaction committing or aborting.
+ db.mycoll.drop();
- return {ok: 1};
- } catch (e) {
- return {ok: 0, error: e.toString(), stack: e.stack};
- }
- }, primary.host);
+ return {ok: 1};
+ } catch (e) {
+ return {ok: 0, error: e.toString(), stack: e.stack};
+ }
+ }, primary.host);
+
+ threads.push(thread);
+ thread.start();
+}
+
+// We wait until all of the drop commands are waiting for a lock to know that we've exhausted
+// all of the available write tickets.
+assert.soon(
+ () => {
+ const ops = db.currentOp({"command.drop": "mycoll", waitingForLock: true});
+ return ops.inprog.length === kNumWriteTickets;
+ },
+ () => {
+ return `Didn't find ${kNumWriteTickets} drop commands running: ` + tojson(db.currentOp());
+ });
- threads.push(thread);
- thread.start();
- }
+// Should be able to successfully commit the transaction with the write tickets exhausted.
+jsTestLog("Committing transaction");
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- // We wait until all of the drop commands are waiting for a lock to know that we've exhausted
- // all of the available write tickets.
- assert.soon(
- () => {
- const ops = db.currentOp({"command.drop": "mycoll", waitingForLock: true});
- return ops.inprog.length === kNumWriteTickets;
- },
- () => {
- return `Didn't find ${kNumWriteTickets} drop commands running: ` +
- tojson(db.currentOp());
- });
-
- // Should be able to successfully commit the transaction with the write tickets exhausted.
- jsTestLog("Committing transaction");
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- jsTestLog("Waiting for drop command to join");
- for (let thread of threads) {
- thread.join();
- }
+jsTestLog("Waiting for drop command to join");
+for (let thread of threads) {
+ thread.join();
+}
- for (let thread of threads) {
- assert.commandWorked(thread.returnData());
- }
+for (let thread of threads) {
+ assert.commandWorked(thread.returnData());
+}
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/transactions_during_step_down.js b/jstests/replsets/transactions_during_step_down.js
index 99eb4223a41..eb6aa6dad6e 100644
--- a/jstests/replsets/transactions_during_step_down.js
+++ b/jstests/replsets/transactions_during_step_down.js
@@ -6,129 +6,129 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
-
- load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
-
- const testName = "txnsDuringStepDown";
- const dbName = testName;
- const collName = "testcoll";
-
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
-
- var primary = rst.getPrimary();
- var db = primary.getDB(dbName);
- var primaryAdmin = primary.getDB("admin");
- var primaryColl = db[collName];
- var collNss = primaryColl.getFullName();
-
- jsTestLog("Writing data to collection.");
- assert.writeOK(primaryColl.insert({_id: 'readOp'}, {"writeConcern": {"w": 2}}));
-
- TestData.dbName = dbName;
- TestData.collName = collName;
- TestData.skipRetryOnNetworkError = true;
-
- function startTxn({parallel: parallel = true}) {
- var txnFunc = () => {
- jsTestLog("Starting a new transaction.");
- const session = db.getMongo().startSession();
- const sessionDb = session.getDatabase(TestData.dbName);
- const sessionColl = sessionDb[TestData.collName];
- session.startTransaction({writeConcern: {w: "majority"}});
- print(TestData.cmd);
- eval(TestData.cmd);
-
- // Validate that the connection is not closed on step down.
- assert.commandWorked(db.adminCommand({ping: 1}));
- };
- return parallel ? startParallelShell(txnFunc, primary.port) : txnFunc();
- }
-
- function runStepDown() {
- jsTestLog("Making primary step down.");
- assert.commandWorked(primaryAdmin.runCommand({"replSetStepDown": 30 * 60, "force": true}));
-
- // Wait until the primary transitioned to SECONDARY state.
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- jsTestLog("Validating data.");
- assert.docEq([{_id: 'readOp'}], primaryColl.find().toArray());
-
- jsTestLog("Making old primary eligible to be re-elected.");
- assert.commandWorked(primaryAdmin.runCommand({replSetFreeze: 0}));
- rst.getPrimary();
- }
-
- function testTxnFailsWithCode({
- op,
- failPoint: failPoint = 'hangAfterPreallocateSnapshot',
- nss: nss = dbName + '.$cmd',
- preOp: preOp = ''
- }) {
- jsTestLog("Enabling failPoint '" + failPoint + "' on primary.");
- assert.commandWorked(primary.adminCommand({
- configureFailPoint: failPoint,
- data: {shouldContinueOnInterrupt: true},
- mode: "alwaysOn"
- }));
-
- // Start transaction.
- TestData.cmd = preOp +
- `assert.commandFailedWithCode(${op}, ErrorCodes.InterruptedDueToReplStateChange);`;
- const waitForTxnShell = startTxn({});
-
- jsTestLog("Waiting for primary to reach failPoint '" + failPoint + "'.");
- waitForCurOpByFailPoint(primaryAdmin, nss, failPoint);
-
- // Call step down & validate data.
- runStepDown();
-
- // Wait for transaction shell to join.
- waitForTxnShell();
-
- // Disable fail point.
- assert.commandWorked(primaryAdmin.runCommand({configureFailPoint: failPoint, mode: 'off'}));
- }
-
- function testAbortOrCommitTxnFailsWithCode(params) {
- params["preOp"] = `sessionColl.insert({_id: 'abortOrCommitTxnOp'});`;
- params["nss"] = "admin.$cmd";
- testTxnFailsWithCode(params);
- }
-
- jsTestLog("Testing stepdown during read transaction.");
- testTxnFailsWithCode({op: "sessionDb.runCommand({find: '" + collName + "', batchSize: 1})"});
-
- jsTestLog("Testing stepdown during write transaction.");
- testTxnFailsWithCode({op: "sessionColl.insert({_id: 'writeOp'})"});
-
- jsTestLog("Testing stepdown during read-write transaction.");
- testTxnFailsWithCode({
- op: "sessionDb.runCommand({findAndModify: '" + collName +
- "', query: {_id: 'readOp'}, remove: true})"
- });
-
- jsTestLog("Testing stepdown during commit transaction.");
- testAbortOrCommitTxnFailsWithCode(
- {failPoint: "hangBeforeCommitingTxn", op: "session.commitTransaction_forTesting()"});
-
- jsTestLog("Testing stepdown during abort transaction.");
- testAbortOrCommitTxnFailsWithCode(
- {failPoint: "hangBeforeAbortingTxn", op: "session.abortTransaction_forTesting()"});
-
- jsTestLog("Testing stepdown during running transaction in inactive state.");
- TestData.cmd = "assert.writeOK(sessionColl.insert({_id: 'inactiveTxnOp'}))";
- // Do not start the transaction in parallel shell because when the parallel
- // shell work is done, implicit call to "endSessions" and "abortTransaction"
- // cmds are made. So, during step down we might not have any running
- // transaction to interrupt.
- startTxn({parallel: false});
+"use strict";
+
+load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
+
+const testName = "txnsDuringStepDown";
+const dbName = testName;
+const collName = "testcoll";
+
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
+
+var primary = rst.getPrimary();
+var db = primary.getDB(dbName);
+var primaryAdmin = primary.getDB("admin");
+var primaryColl = db[collName];
+var collNss = primaryColl.getFullName();
+
+jsTestLog("Writing data to collection.");
+assert.writeOK(primaryColl.insert({_id: 'readOp'}, {"writeConcern": {"w": 2}}));
+
+TestData.dbName = dbName;
+TestData.collName = collName;
+TestData.skipRetryOnNetworkError = true;
+
+function startTxn({parallel: parallel = true}) {
+ var txnFunc = () => {
+ jsTestLog("Starting a new transaction.");
+ const session = db.getMongo().startSession();
+ const sessionDb = session.getDatabase(TestData.dbName);
+ const sessionColl = sessionDb[TestData.collName];
+ session.startTransaction({writeConcern: {w: "majority"}});
+ print(TestData.cmd);
+ eval(TestData.cmd);
+
+ // Validate that the connection is not closed on step down.
+ assert.commandWorked(db.adminCommand({ping: 1}));
+ };
+ return parallel ? startParallelShell(txnFunc, primary.port) : txnFunc();
+}
+
+function runStepDown() {
+ jsTestLog("Making primary step down.");
+ assert.commandWorked(primaryAdmin.runCommand({"replSetStepDown": 30 * 60, "force": true}));
+
+ // Wait until the primary transitioned to SECONDARY state.
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+ jsTestLog("Validating data.");
+ assert.docEq([{_id: 'readOp'}], primaryColl.find().toArray());
+
+ jsTestLog("Making old primary eligible to be re-elected.");
+ assert.commandWorked(primaryAdmin.runCommand({replSetFreeze: 0}));
+ rst.getPrimary();
+}
+
+function testTxnFailsWithCode({
+ op,
+ failPoint: failPoint = 'hangAfterPreallocateSnapshot',
+ nss: nss = dbName + '.$cmd',
+ preOp: preOp = ''
+}) {
+ jsTestLog("Enabling failPoint '" + failPoint + "' on primary.");
+ assert.commandWorked(primary.adminCommand({
+ configureFailPoint: failPoint,
+ data: {shouldContinueOnInterrupt: true},
+ mode: "alwaysOn"
+ }));
+
+ // Start transaction.
+ TestData.cmd =
+ preOp + `assert.commandFailedWithCode(${op}, ErrorCodes.InterruptedDueToReplStateChange);`;
+ const waitForTxnShell = startTxn({});
+
+ jsTestLog("Waiting for primary to reach failPoint '" + failPoint + "'.");
+ waitForCurOpByFailPoint(primaryAdmin, nss, failPoint);
// Call step down & validate data.
runStepDown();
- rst.stopSet();
+ // Wait for transaction shell to join.
+ waitForTxnShell();
+
+ // Disable fail point.
+ assert.commandWorked(primaryAdmin.runCommand({configureFailPoint: failPoint, mode: 'off'}));
+}
+
+function testAbortOrCommitTxnFailsWithCode(params) {
+ params["preOp"] = `sessionColl.insert({_id: 'abortOrCommitTxnOp'});`;
+ params["nss"] = "admin.$cmd";
+ testTxnFailsWithCode(params);
+}
+
+jsTestLog("Testing stepdown during read transaction.");
+testTxnFailsWithCode({op: "sessionDb.runCommand({find: '" + collName + "', batchSize: 1})"});
+
+jsTestLog("Testing stepdown during write transaction.");
+testTxnFailsWithCode({op: "sessionColl.insert({_id: 'writeOp'})"});
+
+jsTestLog("Testing stepdown during read-write transaction.");
+testTxnFailsWithCode({
+ op: "sessionDb.runCommand({findAndModify: '" + collName +
+ "', query: {_id: 'readOp'}, remove: true})"
+});
+
+jsTestLog("Testing stepdown during commit transaction.");
+testAbortOrCommitTxnFailsWithCode(
+ {failPoint: "hangBeforeCommitingTxn", op: "session.commitTransaction_forTesting()"});
+
+jsTestLog("Testing stepdown during abort transaction.");
+testAbortOrCommitTxnFailsWithCode(
+ {failPoint: "hangBeforeAbortingTxn", op: "session.abortTransaction_forTesting()"});
+
+jsTestLog("Testing stepdown during running transaction in inactive state.");
+TestData.cmd = "assert.writeOK(sessionColl.insert({_id: 'inactiveTxnOp'}))";
+// Do not start the transaction in parallel shell because when the parallel
+// shell work is done, implicit call to "endSessions" and "abortTransaction"
+// cmds are made. So, during step down we might not have any running
+// transaction to interrupt.
+startTxn({parallel: false});
+
+// Call step down & validate data.
+runStepDown();
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/transactions_on_secondaries_not_allowed.js b/jstests/replsets/transactions_on_secondaries_not_allowed.js
index 1dfd1afdddd..59784afe1f3 100644
--- a/jstests/replsets/transactions_on_secondaries_not_allowed.js
+++ b/jstests/replsets/transactions_on_secondaries_not_allowed.js
@@ -5,83 +5,86 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
+
+const dbName = "test";
+const collName = "transactions_on_secondaries_not_allowed";
+
+const rst = new ReplSetTest({name: collName, nodes: 2});
+rst.startSet({verbose: 3});
+// We want a stable topology, so make the secondary unelectable.
+let config = rst.getReplSetConfig();
+config.members[1].priority = 0;
+rst.initiate(config);
+
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+const secondaryTestDB = secondary.getDB(dbName);
+
+// Do an initial write so we have something to find.
+const initialDoc = {
+ _id: 0
+};
+assert.commandWorked(primary.getDB(dbName)[collName].insert(initialDoc));
+rst.awaitLastOpCommitted();
+
+// Disable the best-effort check for primary-ness in the service entry point, so that we
+// exercise the real check for primary-ness in TransactionParticipant::beginOrContinue.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "skipCheckingForNotMasterInCommandDispatch", mode: "alwaysOn"}));
+
+// Initiate a session on the secondary.
+const sessionOptions = {
+ causalConsistency: false,
+ retryWrites: true
+};
+const session = secondaryTestDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
- const dbName = "test";
- const collName = "transactions_on_secondaries_not_allowed";
-
- const rst = new ReplSetTest({name: collName, nodes: 2});
- rst.startSet({verbose: 3});
- // We want a stable topology, so make the secondary unelectable.
- let config = rst.getReplSetConfig();
- config.members[1].priority = 0;
- rst.initiate(config);
-
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- const secondaryTestDB = secondary.getDB(dbName);
-
- // Do an initial write so we have something to find.
- const initialDoc = {_id: 0};
- assert.commandWorked(primary.getDB(dbName)[collName].insert(initialDoc));
- rst.awaitLastOpCommitted();
-
- // Disable the best-effort check for primary-ness in the service entry point, so that we
- // exercise the real check for primary-ness in TransactionParticipant::beginOrContinue.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "skipCheckingForNotMasterInCommandDispatch", mode: "alwaysOn"}));
-
- // Initiate a session on the secondary.
- const sessionOptions = {causalConsistency: false, retryWrites: true};
- const session = secondaryTestDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
-
- /**
- * Test starting a transaction and issuing a commitTransaction command.
- */
+/**
+ * Test starting a transaction and issuing a commitTransaction command.
+ */
- jsTestLog("Start a read-only transaction on the secondary.");
- session.startTransaction({readConcern: {level: "snapshot"}});
+jsTestLog("Start a read-only transaction on the secondary.");
+session.startTransaction({readConcern: {level: "snapshot"}});
- // Try to read a document (the first statement in the transaction) and verify that this fails.
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.NotMaster);
+// Try to read a document (the first statement in the transaction) and verify that this fails.
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.NotMaster);
- // The check for "NotMaster" supercedes the check for "NoSuchTransaction" in this case.
- jsTestLog(
- "Make sure we are not allowed to run the commitTransaction command on the secondary.");
- assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NotMaster);
+// The check for "NotMaster" supercedes the check for "NoSuchTransaction" in this case.
+jsTestLog("Make sure we are not allowed to run the commitTransaction command on the secondary.");
+assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NotMaster);
- /**
- * Test starting a transaction and issuing an abortTransaction command.
- */
+/**
+ * Test starting a transaction and issuing an abortTransaction command.
+ */
- jsTestLog("Start a different read-only transaction on the secondary.");
- session.startTransaction({readConcern: {level: "snapshot"}});
+jsTestLog("Start a different read-only transaction on the secondary.");
+session.startTransaction({readConcern: {level: "snapshot"}});
- // Try to read a document (the first statement in the transaction) and verify that this fails.
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.NotMaster);
+// Try to read a document (the first statement in the transaction) and verify that this fails.
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.NotMaster);
- // The check for "NotMaster" supercedes the check for "NoSuchTransaction" in this case.
- jsTestLog("Make sure we are not allowed to run the abortTransaction command on the secondary.");
- assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NotMaster);
+// The check for "NotMaster" supercedes the check for "NoSuchTransaction" in this case.
+jsTestLog("Make sure we are not allowed to run the abortTransaction command on the secondary.");
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NotMaster);
- /**
- * Test starting a retryable write.
- */
+/**
+ * Test starting a retryable write.
+ */
- jsTestLog("Start a retryable write");
- assert.commandFailedWithCode(sessionDb.foo.insert({_id: 0}), ErrorCodes.NotMaster);
+jsTestLog("Start a retryable write");
+assert.commandFailedWithCode(sessionDb.foo.insert({_id: 0}), ErrorCodes.NotMaster);
- /**
- * Test starting a read with txnNumber, but without autocommit. This fails in general because
- * txnNumber isn't supported for the find command outside of transactions, but we check that
- * this fails on a secondary.
- */
+/**
+ * Test starting a read with txnNumber, but without autocommit. This fails in general because
+ * txnNumber isn't supported for the find command outside of transactions, but we check that
+ * this fails on a secondary.
+ */
- jsTestLog("Start a read with txnNumber but without autocommit");
- assert.commandFailedWithCode(sessionDb.runCommand({find: 'foo', txnNumber: NumberLong(10)}),
- 50768);
+jsTestLog("Start a read with txnNumber but without autocommit");
+assert.commandFailedWithCode(sessionDb.runCommand({find: 'foo', txnNumber: NumberLong(10)}), 50768);
- session.endSession();
- rst.stopSet(undefined, false, {skipValidation: true});
+session.endSession();
+rst.stopSet(undefined, false, {skipValidation: true});
}());
diff --git a/jstests/replsets/transactions_only_allowed_on_primaries.js b/jstests/replsets/transactions_only_allowed_on_primaries.js
index 3d0633cc423..2ca360eca41 100644
--- a/jstests/replsets/transactions_only_allowed_on_primaries.js
+++ b/jstests/replsets/transactions_only_allowed_on_primaries.js
@@ -4,128 +4,128 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
-
- // In 4.0, we allow read-only transactions on secondaries when test commands are enabled, so we
- // disable them in this test, to test that transactions on secondaries will be disallowed
- // for production users.
- jsTest.setOption('enableTestCommands', false);
- TestData.roleGraphInvalidationIsFatal = false;
- TestData.authenticationDatabase = "local";
-
- const dbName = "test";
- const collName = "transactions_only_allowed_on_primaries";
-
- // Start up the replica set. We want a stable topology, so make the secondary unelectable.
- const replTest = new ReplSetTest({name: collName, nodes: 2});
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[1].priority = 0;
- replTest.initiate(config);
-
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
-
- // Set slaveOk=true so that normal read commands would be allowed on the secondary.
- secondary.setSlaveOk(true);
-
- // Create a test collection that we can run commands against.
- const primaryDB = primary.getDB(dbName);
- assert.commandWorked(primary.getDB(dbName).createCollection(collName));
- assert.commandWorked(primaryDB.runCommand({
- createIndexes: collName,
- indexes: [
- {name: "geo_2d", key: {geo: "2d"}},
- {key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}
- ]
- }));
- replTest.awaitLastOpCommitted();
-
- /**
- * Verify that all given commands are disallowed from starting a transaction on a secondary by
- * checking that each command fails with the expected error code.
- */
- function testCommands(session, commands, expectedErrorCode, readPref) {
- const sessionDb = session.getDatabase(dbName);
- for (let i = 0; i < commands.length; i++) {
- session.startTransaction();
- // Use a read preference that would normally allow read commands to run on secondaries.
- if (readPref !== null) {
- session.getOptions().setReadPreference(readPref);
- }
- const cmdObject = commands[i];
-
- jsTestLog("Trying to start transaction on secondary with command: " +
- tojson(cmdObject));
- assert.commandFailedWithCode(sessionDb.runCommand(cmdObject), expectedErrorCode);
-
- // Call abort for good measure, even though the transaction should have already been
- // aborted on the server.
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NotMaster);
- }
- }
-
- //
- // Make sure transactions are disallowed on secondaries.
- //
-
- // Initiate a session on the secondary.
- const sessionOptions = {causalConsistency: false};
- const secondarySession = secondary.getDB(dbName).getMongo().startSession(sessionOptions);
-
- // Test read commands that are supported in transactions.
- let readCommands = [
- {find: collName},
- {aggregate: collName, pipeline: [{$project: {_id: 1}}], cursor: {}},
- {distinct: collName, key: "_id"},
- {geoSearch: collName, near: [0, 0]}
- ];
-
- jsTestLog("Testing read commands.");
- // Make sure read commands can not start transactions with any supported read preference.
- testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, "secondary");
- testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, "secondaryPreferred");
- testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, "primaryPreferred");
- testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, null);
-
- // Test one write command. Normal write commands should already be
- // disallowed on secondaries so we don't test them exhaustively here.
- let writeCommands = [{insert: collName, documents: [{_id: 0}]}];
-
- jsTestLog("Testing write commands.");
- testCommands(secondarySession, writeCommands, ErrorCodes.NotMaster, "secondary");
-
- secondarySession.endSession();
+"use strict";
+
+// In 4.0, we allow read-only transactions on secondaries when test commands are enabled, so we
+// disable them in this test, to test that transactions on secondaries will be disallowed
+// for production users.
+jsTest.setOption('enableTestCommands', false);
+TestData.roleGraphInvalidationIsFatal = false;
+TestData.authenticationDatabase = "local";
+
+const dbName = "test";
+const collName = "transactions_only_allowed_on_primaries";
+
+// Start up the replica set. We want a stable topology, so make the secondary unelectable.
+const replTest = new ReplSetTest({name: collName, nodes: 2});
+replTest.startSet();
+let config = replTest.getReplSetConfig();
+config.members[1].priority = 0;
+replTest.initiate(config);
+
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
+
+// Set slaveOk=true so that normal read commands would be allowed on the secondary.
+secondary.setSlaveOk(true);
+
+// Create a test collection that we can run commands against.
+const primaryDB = primary.getDB(dbName);
+assert.commandWorked(primary.getDB(dbName).createCollection(collName));
+assert.commandWorked(primaryDB.runCommand({
+ createIndexes: collName,
+ indexes: [
+ {name: "geo_2d", key: {geo: "2d"}},
+ {key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}
+ ]
+}));
+replTest.awaitLastOpCommitted();
- //
- // Make sure transactions are allowed on primaries with any valid read preference.
- //
-
- const primarySession = primary.getDB(dbName).getMongo().startSession(sessionOptions);
- const primarySessionDb = primarySession.getDatabase(dbName);
-
- primarySession.startTransaction();
- assert.commandWorked(
- primarySessionDb.runCommand({find: collName, $readPreference: {mode: "primary"}}));
- assert.commandWorked(primarySession.commitTransaction_forTesting());
-
- primarySession.startTransaction();
- assert.commandWorked(
- primarySessionDb.runCommand({find: collName, $readPreference: {mode: "primaryPreferred"}}));
- assert.commandWorked(primarySession.commitTransaction_forTesting());
-
- primarySession.startTransaction();
- assert.commandWorked(primarySessionDb.runCommand(
- {find: collName, $readPreference: {mode: "secondaryPreferred"}}));
- assert.commandWorked(primarySession.commitTransaction_forTesting());
-
- primarySession.startTransaction();
- assert.commandWorked(
- primarySessionDb.runCommand({find: collName, $readPreference: {mode: "nearest"}}));
- assert.commandWorked(primarySession.commitTransaction_forTesting());
+/**
+ * Verify that all given commands are disallowed from starting a transaction on a secondary by
+ * checking that each command fails with the expected error code.
+ */
+function testCommands(session, commands, expectedErrorCode, readPref) {
+ const sessionDb = session.getDatabase(dbName);
+ for (let i = 0; i < commands.length; i++) {
+ session.startTransaction();
+ // Use a read preference that would normally allow read commands to run on secondaries.
+ if (readPref !== null) {
+ session.getOptions().setReadPreference(readPref);
+ }
+ const cmdObject = commands[i];
- primarySession.endSession();
+ jsTestLog("Trying to start transaction on secondary with command: " + tojson(cmdObject));
+ assert.commandFailedWithCode(sessionDb.runCommand(cmdObject), expectedErrorCode);
- replTest.stopSet();
+ // Call abort for good measure, even though the transaction should have already been
+ // aborted on the server.
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NotMaster);
+ }
+}
+
+//
+// Make sure transactions are disallowed on secondaries.
+//
+
+// Initiate a session on the secondary.
+const sessionOptions = {
+ causalConsistency: false
+};
+const secondarySession = secondary.getDB(dbName).getMongo().startSession(sessionOptions);
+
+// Test read commands that are supported in transactions.
+let readCommands = [
+ {find: collName},
+ {aggregate: collName, pipeline: [{$project: {_id: 1}}], cursor: {}},
+ {distinct: collName, key: "_id"},
+ {geoSearch: collName, near: [0, 0]}
+];
+
+jsTestLog("Testing read commands.");
+// Make sure read commands can not start transactions with any supported read preference.
+testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, "secondary");
+testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, "secondaryPreferred");
+testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, "primaryPreferred");
+testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, null);
+
+// Test one write command. Normal write commands should already be
+// disallowed on secondaries so we don't test them exhaustively here.
+let writeCommands = [{insert: collName, documents: [{_id: 0}]}];
+
+jsTestLog("Testing write commands.");
+testCommands(secondarySession, writeCommands, ErrorCodes.NotMaster, "secondary");
+
+secondarySession.endSession();
+
+//
+// Make sure transactions are allowed on primaries with any valid read preference.
+//
+
+const primarySession = primary.getDB(dbName).getMongo().startSession(sessionOptions);
+const primarySessionDb = primarySession.getDatabase(dbName);
+
+primarySession.startTransaction();
+assert.commandWorked(
+ primarySessionDb.runCommand({find: collName, $readPreference: {mode: "primary"}}));
+assert.commandWorked(primarySession.commitTransaction_forTesting());
+
+primarySession.startTransaction();
+assert.commandWorked(
+ primarySessionDb.runCommand({find: collName, $readPreference: {mode: "primaryPreferred"}}));
+assert.commandWorked(primarySession.commitTransaction_forTesting());
+
+primarySession.startTransaction();
+assert.commandWorked(
+ primarySessionDb.runCommand({find: collName, $readPreference: {mode: "secondaryPreferred"}}));
+assert.commandWorked(primarySession.commitTransaction_forTesting());
+
+primarySession.startTransaction();
+assert.commandWorked(
+ primarySessionDb.runCommand({find: collName, $readPreference: {mode: "nearest"}}));
+assert.commandWorked(primarySession.commitTransaction_forTesting());
+
+primarySession.endSession();
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/transactions_reaped_with_tickets_exhausted.js b/jstests/replsets/transactions_reaped_with_tickets_exhausted.js
index 905890af13d..3d46be27be7 100644
--- a/jstests/replsets/transactions_reaped_with_tickets_exhausted.js
+++ b/jstests/replsets/transactions_reaped_with_tickets_exhausted.js
@@ -5,91 +5,90 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/parallelTester.js"); // for ScopedThread
+load("jstests/libs/parallelTester.js"); // for ScopedThread
- // We set the number of write tickets to be a small value in order to avoid needing to spawn a
- // large number of threads to exhaust all of the available ones.
- const kNumWriteTickets = 5;
+// We set the number of write tickets to be a small value in order to avoid needing to spawn a
+// large number of threads to exhaust all of the available ones.
+const kNumWriteTickets = 5;
- const rst = new ReplSetTest({
- nodes: 1,
- nodeOptions: {
- setParameter: {
- wiredTigerConcurrentWriteTransactions: kNumWriteTickets,
+const rst = new ReplSetTest({
+ nodes: 1,
+ nodeOptions: {
+ setParameter: {
+ wiredTigerConcurrentWriteTransactions: kNumWriteTickets,
- // Setting a transaction lifetime of 1 hour to make sure the transaction reaper
- // doesn't abort the transaction.
- transactionLifetimeLimitSeconds: 3600,
- }
+ // Setting a transaction lifetime of 1 hour to make sure the transaction reaper
+ // doesn't abort the transaction.
+ transactionLifetimeLimitSeconds: 3600,
}
- });
- rst.startSet();
- rst.initiate();
+ }
+});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const db = primary.getDB("test");
+const primary = rst.getPrimary();
+const db = primary.getDB("test");
- const session = primary.startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
+const session = primary.startSession({causalConsistency: false});
+const sessionDb = session.getDatabase("test");
- assert.commandWorked(db.runCommand({create: "mycoll"}));
+assert.commandWorked(db.runCommand({create: "mycoll"}));
- session.startTransaction();
- assert.commandWorked(sessionDb.mycoll.insert({}));
+session.startTransaction();
+assert.commandWorked(sessionDb.mycoll.insert({}));
- const threads = [];
+const threads = [];
- for (let i = 0; i < kNumWriteTickets; ++i) {
- const thread = new ScopedThread(function(host) {
- try {
- const conn = new Mongo(host);
- const db = conn.getDB("test");
+for (let i = 0; i < kNumWriteTickets; ++i) {
+ const thread = new ScopedThread(function(host) {
+ try {
+ const conn = new Mongo(host);
+ const db = conn.getDB("test");
- // Dropping a collection requires a database X lock and therefore blocks behind the
- // transaction committing or aborting.
- db.mycoll.drop();
+ // Dropping a collection requires a database X lock and therefore blocks behind the
+ // transaction committing or aborting.
+ db.mycoll.drop();
- return {ok: 1};
- } catch (e) {
- return {ok: 0, error: e.toString(), stack: e.stack};
- }
- }, primary.host);
+ return {ok: 1};
+ } catch (e) {
+ return {ok: 0, error: e.toString(), stack: e.stack};
+ }
+ }, primary.host);
+
+ threads.push(thread);
+ thread.start();
+}
+
+// We wait until all of the drop commands are waiting for a lock to know that we've exhausted
+// all of the available write tickets.
+assert.soon(
+ () => {
+ const ops = db.currentOp({"command.drop": "mycoll", waitingForLock: true});
+ return ops.inprog.length === kNumWriteTickets;
+ },
+ () => {
+ return `Didn't find ${kNumWriteTickets} drop commands running: ` + tojson(db.currentOp());
+ });
- threads.push(thread);
- thread.start();
- }
+// Attempting to perform another operation inside of the transaction will block and should
+// cause it to be aborted implicity.
+assert.commandFailedWithCode(sessionDb.mycoll.insert({}), ErrorCodes.LockTimeout);
- // We wait until all of the drop commands are waiting for a lock to know that we've exhausted
- // all of the available write tickets.
- assert.soon(
- () => {
- const ops = db.currentOp({"command.drop": "mycoll", waitingForLock: true});
- return ops.inprog.length === kNumWriteTickets;
- },
- () => {
- return `Didn't find ${kNumWriteTickets} drop commands running: ` +
- tojson(db.currentOp());
- });
-
- // Attempting to perform another operation inside of the transaction will block and should
- // cause it to be aborted implicity.
- assert.commandFailedWithCode(sessionDb.mycoll.insert({}), ErrorCodes.LockTimeout);
-
- for (let thread of threads) {
- thread.join();
- }
+for (let thread of threads) {
+ thread.join();
+}
- for (let thread of threads) {
- assert.commandWorked(thread.returnData());
- }
+for (let thread of threads) {
+ assert.commandWorked(thread.returnData());
+}
- // Transaction should already be aborted.
- let res = assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert(res.errmsg.match(/Transaction .* has been aborted/), res.errmsg);
+// Transaction should already be aborted.
+let res = assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+assert(res.errmsg.match(/Transaction .* has been aborted/), res.errmsg);
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
})();
diff --git a/jstests/replsets/transactions_wait_for_write_concern.js b/jstests/replsets/transactions_wait_for_write_concern.js
index bf08d30f1f5..12d1154a28a 100644
--- a/jstests/replsets/transactions_wait_for_write_concern.js
+++ b/jstests/replsets/transactions_wait_for_write_concern.js
@@ -9,196 +9,196 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/write_concern_util.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const dbName = "test";
- const collNameBase = "coll";
+const dbName = "test";
+const collNameBase = "coll";
- const rst = new ReplSetTest({
- nodes: [{}, {rsConfig: {priority: 0}}],
+const rst = new ReplSetTest({
+ nodes: [{}, {rsConfig: {priority: 0}}],
+});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB(dbName);
+
+const failTimeoutMS = 1000;
+const successTimeoutMS = ReplSetTest.kDefaultTimeoutMS;
+
+function runTest(readConcernLevel) {
+ jsTestLog("Testing " + readConcernLevel);
+
+ const collName = `${collNameBase}_${readConcernLevel}`;
+ assert.commandWorked(primaryDB[collName].insert(
+ [{x: 1}, {x: 2}, {x: 3}, {x: 4}, {x: 5}, {x: 6}], {writeConcern: {w: "majority"}}));
+
+ jsTestLog("Unprepared Abort Setup");
+ const mongo1 = new Mongo(primary.host);
+ const session1 = mongo1.startSession();
+ const sessionDB1 = session1.getDatabase(dbName);
+ session1.startTransaction({
+ writeConcern: {w: "majority", wtimeout: successTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ const fruitlessUpdate1 = {update: collName, updates: [{q: {x: 1}, u: {$set: {x: 1}}}]};
+ printjson(assert.commandWorked(sessionDB1.runCommand(fruitlessUpdate1)));
+
+ jsTestLog("Prepared Abort Setup");
+ const mongo2 = new Mongo(primary.host);
+ const session2 = mongo2.startSession();
+ const sessionDB2 = session2.getDatabase(dbName);
+ session2.startTransaction({
+ writeConcern: {w: "majority", wtimeout: failTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ const fruitlessUpdate2 = {update: collName, updates: [{q: {x: 2}, u: {$set: {x: 2}}}]};
+ printjson(assert.commandWorked(sessionDB2.runCommand(fruitlessUpdate2)));
+ PrepareHelpers.prepareTransaction(session2);
+
+ jsTestLog("Prepare Setup");
+ const mongo3 = new Mongo(primary.host);
+ const session3 = mongo3.startSession();
+ const sessionDB3 = session3.getDatabase(dbName);
+ session3.startTransaction({
+ writeConcern: {w: "majority", wtimeout: failTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ const fruitlessUpdate3 = {update: collName, updates: [{q: {x: 3}, u: {$set: {x: 3}}}]};
+ printjson(assert.commandWorked(sessionDB3.runCommand(fruitlessUpdate3)));
+
+ jsTestLog("Unprepared Commit Setup");
+ const mongo4 = new Mongo(primary.host);
+ const session4 = mongo4.startSession();
+ const sessionDB4 = session4.getDatabase(dbName);
+ session4.startTransaction({
+ writeConcern: {w: "majority", wtimeout: failTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ const fruitlessUpdate4 = {update: collName, updates: [{q: {x: 4}, u: {$set: {x: 4}}}]};
+ printjson(assert.commandWorked(sessionDB4.runCommand(fruitlessUpdate4)));
+
+ jsTestLog("Prepared Commit Setup");
+ const mongo5 = new Mongo(primary.host);
+ const session5 = mongo5.startSession();
+ const sessionDB5 = session5.getDatabase(dbName);
+ session5.startTransaction({
+ writeConcern: {w: "majority", wtimeout: failTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ const fruitlessUpdate5 = {update: collName, updates: [{q: {x: 5}, u: {$set: {x: 5}}}]};
+ printjson(assert.commandWorked(sessionDB5.runCommand(fruitlessUpdate5)));
+ let prepareTS5 = PrepareHelpers.prepareTransaction(session5);
+
+ jsTestLog("Unprepared Abort On Used Connection Setup");
+ const session6 = primary.getDB("admin").getMongo().startSession();
+ const sessionDB6 = session6.getDatabase(dbName);
+ session6.startTransaction({
+ writeConcern: {w: "majority", wtimeout: failTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ const fruitlessUpdate6 = {update: collName, updates: [{q: {x: 6}, u: {$set: {x: 6}}}]};
+ printjson(assert.commandWorked(sessionDB6.runCommand(fruitlessUpdate6)));
+
+ jsTestLog("Stop replication");
+ stopReplicationOnSecondaries(rst);
+
+ jsTestLog("Advance OpTime on primary, with replication stopped");
+
+ printjson(assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{}]})));
+
+ jsTestLog("Run test commands, with replication stopped");
+
+ jsTestLog("Unprepared Abort Test");
+ assert.commandWorked(session1.abortTransaction_forTesting());
+
+ jsTestLog("Prepared Abort Test");
+ assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ jsTestLog("Prepare Test");
+ assert.commandFailedWithCode(
+ session3.getDatabase('admin').adminCommand(
+ {prepareTransaction: 1, writeConcern: {w: "majority", wtimeout: failTimeoutMS}}),
+ ErrorCodes.WriteConcernFailed);
+ assert.commandFailedWithCode(session3.abortTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ jsTestLog("Unprepared Commit Test");
+ assert.commandFailedWithCode(session4.commitTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ jsTestLog("Prepared Commit Test");
+ assert.commandFailedWithCode(session5.getDatabase('admin').adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTS5,
+ writeConcern: {w: "majority", wtimeout: failTimeoutMS}
+ }),
+ ErrorCodes.WriteConcernFailed);
+ // Send commit with the shell helper to reset the shell's state.
+ assert.commandFailedWithCode(session5.commitTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ jsTestLog("Unprepared Abort On Used Connection Test");
+ assert.commandFailedWithCode(session6.abortTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ jsTestLog("Restart replication");
+ restartReplicationOnSecondaries(rst);
+
+ jsTestLog("Try transaction with replication enabled");
+
+ // Unprepared Abort.
+ session1.startTransaction({
+ writeConcern: {w: "majority", wtimeout: successTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ assert.commandWorked(sessionDB1.runCommand(fruitlessUpdate1));
+ assert.commandWorked(session1.abortTransaction_forTesting());
+
+ // Prepared Abort.
+ session2.startTransaction({
+ writeConcern: {w: "majority", wtimeout: successTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ assert.commandWorked(sessionDB2.runCommand(fruitlessUpdate2));
+ PrepareHelpers.prepareTransaction(session2);
+ assert.commandWorked(session2.abortTransaction_forTesting());
+
+ // Testing prepare is no different then prepared abort or prepared commit.
+
+ // Unprepared Commit.
+ session4.startTransaction({
+ writeConcern: {w: "majority", wtimeout: successTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ assert.commandWorked(sessionDB4.runCommand(fruitlessUpdate4));
+ assert.commandWorked(session4.commitTransaction_forTesting());
+
+ // Prepared Commit.
+ session5.startTransaction({
+ writeConcern: {w: "majority", wtimeout: successTimeoutMS},
+ readConcern: {level: readConcernLevel}
});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB(dbName);
-
- const failTimeoutMS = 1000;
- const successTimeoutMS = ReplSetTest.kDefaultTimeoutMS;
-
- function runTest(readConcernLevel) {
- jsTestLog("Testing " + readConcernLevel);
-
- const collName = `${collNameBase}_${readConcernLevel}`;
- assert.commandWorked(primaryDB[collName].insert(
- [{x: 1}, {x: 2}, {x: 3}, {x: 4}, {x: 5}, {x: 6}], {writeConcern: {w: "majority"}}));
-
- jsTestLog("Unprepared Abort Setup");
- const mongo1 = new Mongo(primary.host);
- const session1 = mongo1.startSession();
- const sessionDB1 = session1.getDatabase(dbName);
- session1.startTransaction({
- writeConcern: {w: "majority", wtimeout: successTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- const fruitlessUpdate1 = {update: collName, updates: [{q: {x: 1}, u: {$set: {x: 1}}}]};
- printjson(assert.commandWorked(sessionDB1.runCommand(fruitlessUpdate1)));
-
- jsTestLog("Prepared Abort Setup");
- const mongo2 = new Mongo(primary.host);
- const session2 = mongo2.startSession();
- const sessionDB2 = session2.getDatabase(dbName);
- session2.startTransaction({
- writeConcern: {w: "majority", wtimeout: failTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- const fruitlessUpdate2 = {update: collName, updates: [{q: {x: 2}, u: {$set: {x: 2}}}]};
- printjson(assert.commandWorked(sessionDB2.runCommand(fruitlessUpdate2)));
- PrepareHelpers.prepareTransaction(session2);
-
- jsTestLog("Prepare Setup");
- const mongo3 = new Mongo(primary.host);
- const session3 = mongo3.startSession();
- const sessionDB3 = session3.getDatabase(dbName);
- session3.startTransaction({
- writeConcern: {w: "majority", wtimeout: failTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- const fruitlessUpdate3 = {update: collName, updates: [{q: {x: 3}, u: {$set: {x: 3}}}]};
- printjson(assert.commandWorked(sessionDB3.runCommand(fruitlessUpdate3)));
-
- jsTestLog("Unprepared Commit Setup");
- const mongo4 = new Mongo(primary.host);
- const session4 = mongo4.startSession();
- const sessionDB4 = session4.getDatabase(dbName);
- session4.startTransaction({
- writeConcern: {w: "majority", wtimeout: failTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- const fruitlessUpdate4 = {update: collName, updates: [{q: {x: 4}, u: {$set: {x: 4}}}]};
- printjson(assert.commandWorked(sessionDB4.runCommand(fruitlessUpdate4)));
-
- jsTestLog("Prepared Commit Setup");
- const mongo5 = new Mongo(primary.host);
- const session5 = mongo5.startSession();
- const sessionDB5 = session5.getDatabase(dbName);
- session5.startTransaction({
- writeConcern: {w: "majority", wtimeout: failTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- const fruitlessUpdate5 = {update: collName, updates: [{q: {x: 5}, u: {$set: {x: 5}}}]};
- printjson(assert.commandWorked(sessionDB5.runCommand(fruitlessUpdate5)));
- let prepareTS5 = PrepareHelpers.prepareTransaction(session5);
-
- jsTestLog("Unprepared Abort On Used Connection Setup");
- const session6 = primary.getDB("admin").getMongo().startSession();
- const sessionDB6 = session6.getDatabase(dbName);
- session6.startTransaction({
- writeConcern: {w: "majority", wtimeout: failTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- const fruitlessUpdate6 = {update: collName, updates: [{q: {x: 6}, u: {$set: {x: 6}}}]};
- printjson(assert.commandWorked(sessionDB6.runCommand(fruitlessUpdate6)));
-
- jsTestLog("Stop replication");
- stopReplicationOnSecondaries(rst);
-
- jsTestLog("Advance OpTime on primary, with replication stopped");
-
- printjson(assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{}]})));
-
- jsTestLog("Run test commands, with replication stopped");
-
- jsTestLog("Unprepared Abort Test");
- assert.commandWorked(session1.abortTransaction_forTesting());
-
- jsTestLog("Prepared Abort Test");
- assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- jsTestLog("Prepare Test");
- assert.commandFailedWithCode(
- session3.getDatabase('admin').adminCommand(
- {prepareTransaction: 1, writeConcern: {w: "majority", wtimeout: failTimeoutMS}}),
- ErrorCodes.WriteConcernFailed);
- assert.commandFailedWithCode(session3.abortTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- jsTestLog("Unprepared Commit Test");
- assert.commandFailedWithCode(session4.commitTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- jsTestLog("Prepared Commit Test");
- assert.commandFailedWithCode(session5.getDatabase('admin').adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTS5,
- writeConcern: {w: "majority", wtimeout: failTimeoutMS}
- }),
- ErrorCodes.WriteConcernFailed);
- // Send commit with the shell helper to reset the shell's state.
- assert.commandFailedWithCode(session5.commitTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- jsTestLog("Unprepared Abort On Used Connection Test");
- assert.commandFailedWithCode(session6.abortTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- jsTestLog("Restart replication");
- restartReplicationOnSecondaries(rst);
-
- jsTestLog("Try transaction with replication enabled");
-
- // Unprepared Abort.
- session1.startTransaction({
- writeConcern: {w: "majority", wtimeout: successTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- assert.commandWorked(sessionDB1.runCommand(fruitlessUpdate1));
- assert.commandWorked(session1.abortTransaction_forTesting());
-
- // Prepared Abort.
- session2.startTransaction({
- writeConcern: {w: "majority", wtimeout: successTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- assert.commandWorked(sessionDB2.runCommand(fruitlessUpdate2));
- PrepareHelpers.prepareTransaction(session2);
- assert.commandWorked(session2.abortTransaction_forTesting());
-
- // Testing prepare is no different then prepared abort or prepared commit.
-
- // Unprepared Commit.
- session4.startTransaction({
- writeConcern: {w: "majority", wtimeout: successTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- assert.commandWorked(sessionDB4.runCommand(fruitlessUpdate4));
- assert.commandWorked(session4.commitTransaction_forTesting());
-
- // Prepared Commit.
- session5.startTransaction({
- writeConcern: {w: "majority", wtimeout: successTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- assert.commandWorked(sessionDB5.runCommand(fruitlessUpdate5));
- prepareTS5 = PrepareHelpers.prepareTransaction(session5);
- assert.commandWorked(session5.getDatabase('admin').adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTS5,
- writeConcern: {w: "majority", wtimeout: successTimeoutMS}
- }));
- // Send commit with the shell helper to reset the shell's state.
- assert.commandWorked(session5.commitTransaction_forTesting());
-
- // Unprepared abort already is using a "used connection" for this success test.
- }
-
- runTest("local");
- runTest("majority");
- runTest("snapshot");
-
- rst.stopSet();
+ assert.commandWorked(sessionDB5.runCommand(fruitlessUpdate5));
+ prepareTS5 = PrepareHelpers.prepareTransaction(session5);
+ assert.commandWorked(session5.getDatabase('admin').adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTS5,
+ writeConcern: {w: "majority", wtimeout: successTimeoutMS}
+ }));
+ // Send commit with the shell helper to reset the shell's state.
+ assert.commandWorked(session5.commitTransaction_forTesting());
+
+ // Unprepared abort already is using a "used connection" for this success test.
+}
+
+runTest("local");
+runTest("majority");
+runTest("snapshot");
+
+rst.stopSet();
}());
diff --git a/jstests/replsets/transient_txn_error_labels.js b/jstests/replsets/transient_txn_error_labels.js
index defea4c774b..0a886856beb 100644
--- a/jstests/replsets/transient_txn_error_labels.js
+++ b/jstests/replsets/transient_txn_error_labels.js
@@ -1,244 +1,240 @@
// Test TransientTransactionErrors error label in transactions.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js");
- load("jstests/libs/parallelTester.js"); // For ScopedThread.
-
- const dbName = "test";
- const collName = "no_error_labels_outside_txn";
-
- // We are testing coordinateCommitTransaction, which requires the nodes to be started with
- // --shardsvr.
- const st = new ShardingTest(
- {config: 1, mongos: 1, shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}}});
- const primary = st.rs0.getPrimary();
- const secondary = st.rs0.getSecondary();
-
- const testDB = primary.getDB(dbName);
- const adminDB = testDB.getSiblingDB("admin");
- const testColl = testDB.getCollection(collName);
-
- const sessionOptions = {causalConsistency: false};
- let session = primary.startSession(sessionOptions);
- let sessionDb = session.getDatabase(dbName);
- let sessionColl = sessionDb.getCollection(collName);
- let secondarySession = secondary.startSession(sessionOptions);
- let secondarySessionDb = secondarySession.getDatabase(dbName);
-
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
-
- jsTest.log("Insert inside a transaction on secondary should fail but return error labels");
- let txnNumber = 0;
- let res = secondarySessionDb.runCommand({
- insert: collName,
- documents: [{_id: "insert-1"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
+"use strict";
+
+load("jstests/libs/write_concern_util.js");
+load("jstests/libs/parallelTester.js"); // For ScopedThread.
+
+const dbName = "test";
+const collName = "no_error_labels_outside_txn";
+
+// We are testing coordinateCommitTransaction, which requires the nodes to be started with
+// --shardsvr.
+const st = new ShardingTest(
+ {config: 1, mongos: 1, shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}}});
+const primary = st.rs0.getPrimary();
+const secondary = st.rs0.getSecondary();
+
+const testDB = primary.getDB(dbName);
+const adminDB = testDB.getSiblingDB("admin");
+const testColl = testDB.getCollection(collName);
+
+const sessionOptions = {
+ causalConsistency: false
+};
+let session = primary.startSession(sessionOptions);
+let sessionDb = session.getDatabase(dbName);
+let sessionColl = sessionDb.getCollection(collName);
+let secondarySession = secondary.startSession(sessionOptions);
+let secondarySessionDb = secondarySession.getDatabase(dbName);
+
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+
+jsTest.log("Insert inside a transaction on secondary should fail but return error labels");
+let txnNumber = 0;
+let res = secondarySessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "insert-1"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+});
+assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+
+jsTest.log("Insert outside a transaction on secondary should fail but not return error labels");
+txnNumber++;
+// Insert as a retryable write.
+res = secondarySessionDb.runCommand(
+ {insert: collName, documents: [{_id: "insert-1"}], txnNumber: NumberLong(txnNumber)});
+
+assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
+assert(!res.hasOwnProperty("errorLabels"));
+secondarySession.endSession();
+
+jsTest.log("failCommand should be able to return errors with TransientTransactionError");
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.WriteConflict, failCommands: ["insert"]}
+}));
+session.startTransaction();
+jsTest.log("WriteCommandError should have error labels inside transactions.");
+res = sessionColl.insert({_id: "write-fail-point"});
+assert.commandFailedWithCode(res, ErrorCodes.WriteConflict);
+assert(res instanceof WriteCommandError);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+res = testColl.insert({_id: "write-fail-point-outside-txn"});
+jsTest.log("WriteCommandError should not have error labels outside transactions.");
+// WriteConflict will not be returned outside transactions in real cases, but it's fine for
+// testing purpose.
+assert.commandFailedWithCode(res, ErrorCodes.WriteConflict);
+assert(res instanceof WriteCommandError);
+assert(!res.hasOwnProperty("errorLabels"));
+assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+jsTest.log("WriteConflict returned by commitTransaction command is TransientTransactionError");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "commitTransaction-fail-point"}));
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.WriteConflict, failCommands: ["commitTransaction"]}
+}));
+res = session.commitTransaction_forTesting();
+assert.commandFailedWithCode(res, ErrorCodes.WriteConflict);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+
+jsTest.log("NotMaster returned by commitTransaction command is not TransientTransactionError");
+// commitTransaction will attempt to perform a noop write in response to a NoSuchTransaction
+// error and non-empty writeConcern. This will throw NotMaster.
+res = secondarySessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(secondarySession.getTxnNumber_forTesting() + 1),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+});
+assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
+assert(!res.hasOwnProperty("errorLabels"));
+
+jsTest.log(
+ "NotMaster returned by coordinateCommitTransaction command is not TransientTransactionError");
+// coordinateCommitTransaction will attempt to perform a noop write in response to a
+// NoSuchTransaction error and non-empty writeConcern. This will throw NotMaster.
+res = secondarySessionDb.adminCommand({
+ coordinateCommitTransaction: 1,
+ participants: [],
+ txnNumber: NumberLong(secondarySession.getTxnNumber_forTesting() + 1),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+});
+assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
+assert(!res.hasOwnProperty("errorLabels"));
+
+jsTest.log("ShutdownInProgress returned by write commands is TransientTransactionError");
+session.startTransaction();
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.ShutdownInProgress, failCommands: ["insert"]}
+}));
+res = sessionColl.insert({_id: "commitTransaction-fail-point"});
+assert.commandFailedWithCode(res, ErrorCodes.ShutdownInProgress);
+assert(res instanceof WriteCommandError);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+jsTest.log(
+ "ShutdownInProgress returned by commitTransaction command is not TransientTransactionError");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "commitTransaction-fail-point"}));
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.ShutdownInProgress, failCommands: ["commitTransaction"]}
+}));
+res = session.commitTransaction_forTesting();
+assert.commandFailedWithCode(res, ErrorCodes.ShutdownInProgress);
+assert(!res.hasOwnProperty("errorLabels"));
+assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+
+jsTest.log(
+ "ShutdownInProgress returned by coordinateCommitTransaction command is not TransientTransactionError");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "coordinateCommitTransaction-fail-point"}));
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.ShutdownInProgress, failCommands: ["coordinateCommitTransaction"]}
+}));
+res = sessionDb.adminCommand({
+ coordinateCommitTransaction: 1,
+ participants: [],
+ txnNumber: NumberLong(session.getTxnNumber_forTesting()),
+ autocommit: false
+});
+assert.commandFailedWithCode(res, ErrorCodes.ShutdownInProgress);
+assert(!res.hasOwnProperty("errorLabels"));
+assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+
+jsTest.log("LockTimeout should be TransientTransactionError");
+// Start a transaction to hold the DBLock in IX mode so that drop will be blocked.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "lock-timeout-1"}));
+function dropCmdFunc(primaryHost, dbName, collName) {
+ const primary = new Mongo(primaryHost);
+ return primary.getDB(dbName).runCommand({drop: collName, writeConcern: {w: "majority"}});
+}
+const thread = new ScopedThread(dropCmdFunc, primary.host, dbName, collName);
+thread.start();
+// Wait for the drop to have a pending MODE_X lock on the database.
+assert.soon(
+ function() {
+ return adminDB
+ .aggregate([
+ {$currentOp: {}},
+ {$match: {"command.drop": collName, waitingForLock: true}}
+ ])
+ .itcount() === 1;
+ },
+ function() {
+ return "Failed to find drop in currentOp output: " +
+ tojson(adminDB.aggregate([{$currentOp: {}}]).toArray());
});
- assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
-
- jsTest.log("Insert outside a transaction on secondary should fail but not return error labels");
- txnNumber++;
- // Insert as a retryable write.
- res = secondarySessionDb.runCommand(
- {insert: collName, documents: [{_id: "insert-1"}], txnNumber: NumberLong(txnNumber)});
-
- assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
- assert(!res.hasOwnProperty("errorLabels"));
- secondarySession.endSession();
-
- jsTest.log("failCommand should be able to return errors with TransientTransactionError");
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {errorCode: ErrorCodes.WriteConflict, failCommands: ["insert"]}
- }));
- session.startTransaction();
- jsTest.log("WriteCommandError should have error labels inside transactions.");
- res = sessionColl.insert({_id: "write-fail-point"});
- assert.commandFailedWithCode(res, ErrorCodes.WriteConflict);
- assert(res instanceof WriteCommandError);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- res = testColl.insert({_id: "write-fail-point-outside-txn"});
- jsTest.log("WriteCommandError should not have error labels outside transactions.");
- // WriteConflict will not be returned outside transactions in real cases, but it's fine for
- // testing purpose.
- assert.commandFailedWithCode(res, ErrorCodes.WriteConflict);
- assert(res instanceof WriteCommandError);
- assert(!res.hasOwnProperty("errorLabels"));
- assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTest.log("WriteConflict returned by commitTransaction command is TransientTransactionError");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "commitTransaction-fail-point"}));
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {errorCode: ErrorCodes.WriteConflict, failCommands: ["commitTransaction"]}
- }));
- res = session.commitTransaction_forTesting();
- assert.commandFailedWithCode(res, ErrorCodes.WriteConflict);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
-
- jsTest.log("NotMaster returned by commitTransaction command is not TransientTransactionError");
- // commitTransaction will attempt to perform a noop write in response to a NoSuchTransaction
- // error and non-empty writeConcern. This will throw NotMaster.
- res = secondarySessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(secondarySession.getTxnNumber_forTesting() + 1),
- autocommit: false,
- writeConcern: {w: "majority"}
- });
- assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
- assert(!res.hasOwnProperty("errorLabels"));
-
- jsTest.log(
- "NotMaster returned by coordinateCommitTransaction command is not TransientTransactionError");
- // coordinateCommitTransaction will attempt to perform a noop write in response to a
- // NoSuchTransaction error and non-empty writeConcern. This will throw NotMaster.
- res = secondarySessionDb.adminCommand({
- coordinateCommitTransaction: 1,
- participants: [],
- txnNumber: NumberLong(secondarySession.getTxnNumber_forTesting() + 1),
- autocommit: false,
- writeConcern: {w: "majority"}
- });
- assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
- assert(!res.hasOwnProperty("errorLabels"));
-
- jsTest.log("ShutdownInProgress returned by write commands is TransientTransactionError");
- session.startTransaction();
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {errorCode: ErrorCodes.ShutdownInProgress, failCommands: ["insert"]}
- }));
- res = sessionColl.insert({_id: "commitTransaction-fail-point"});
- assert.commandFailedWithCode(res, ErrorCodes.ShutdownInProgress);
- assert(res instanceof WriteCommandError);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTest.log(
- "ShutdownInProgress returned by commitTransaction command is not TransientTransactionError");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "commitTransaction-fail-point"}));
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {errorCode: ErrorCodes.ShutdownInProgress, failCommands: ["commitTransaction"]}
- }));
- res = session.commitTransaction_forTesting();
- assert.commandFailedWithCode(res, ErrorCodes.ShutdownInProgress);
- assert(!res.hasOwnProperty("errorLabels"));
- assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
-
- jsTest.log(
- "ShutdownInProgress returned by coordinateCommitTransaction command is not TransientTransactionError");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "coordinateCommitTransaction-fail-point"}));
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- errorCode: ErrorCodes.ShutdownInProgress,
- failCommands: ["coordinateCommitTransaction"]
- }
- }));
- res = sessionDb.adminCommand({
- coordinateCommitTransaction: 1,
- participants: [],
- txnNumber: NumberLong(session.getTxnNumber_forTesting()),
- autocommit: false
- });
- assert.commandFailedWithCode(res, ErrorCodes.ShutdownInProgress);
- assert(!res.hasOwnProperty("errorLabels"));
- assert.commandWorked(session.abortTransaction_forTesting());
- assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
-
- jsTest.log("LockTimeout should be TransientTransactionError");
- // Start a transaction to hold the DBLock in IX mode so that drop will be blocked.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "lock-timeout-1"}));
- function dropCmdFunc(primaryHost, dbName, collName) {
- const primary = new Mongo(primaryHost);
- return primary.getDB(dbName).runCommand({drop: collName, writeConcern: {w: "majority"}});
- }
- const thread = new ScopedThread(dropCmdFunc, primary.host, dbName, collName);
- thread.start();
- // Wait for the drop to have a pending MODE_X lock on the database.
- assert.soon(
- function() {
- return adminDB
- .aggregate([
- {$currentOp: {}},
- {$match: {"command.drop": collName, waitingForLock: true}}
- ])
- .itcount() === 1;
- },
- function() {
- return "Failed to find drop in currentOp output: " +
- tojson(adminDB.aggregate([{$currentOp: {}}]).toArray());
- });
- // Start another transaction in a new session, which cannot acquire the database lock in time.
- let sessionOther = primary.startSession(sessionOptions);
- sessionOther.startTransaction();
- res = sessionOther.getDatabase(dbName).getCollection(collName).insert({_id: "lock-timeout-2"});
- assert.commandFailedWithCode(res, ErrorCodes.LockTimeout);
- assert(res instanceof WriteCommandError);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- assert.commandFailedWithCode(sessionOther.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.commandWorked(session.abortTransaction_forTesting());
- thread.join();
- assert.commandWorked(thread.returnData());
-
- // Re-create the collection for later test cases.
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
-
- jsTest.log("Network errors for in-progress statements should be transient");
- session.startTransaction();
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {errorCode: ErrorCodes.HostUnreachable, failCommands: ["aggregate"]}
- }));
- res = sessionDb.runCommand({aggregate: collName, pipeline: [{$match: {}}], cursor: {}});
- assert.commandFailedWithCode(res, ErrorCodes.HostUnreachable);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
-
- jsTest.log("Network errors for commit should not be transient");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "commitTransaction-network-error"}));
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {errorCode: ErrorCodes.HostUnreachable, failCommands: ["commitTransaction"]}
- }));
- res = sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(session.getTxnNumber_forTesting()),
- autocommit: false
- });
- assert.commandFailedWithCode(res, ErrorCodes.HostUnreachable);
- assert(!res.hasOwnProperty("errorLabels"), tojson(res));
- assert.commandWorked(session.abortTransaction_forTesting());
- assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
-
- session.endSession();
-
- st.stop();
+// Start another transaction in a new session, which cannot acquire the database lock in time.
+let sessionOther = primary.startSession(sessionOptions);
+sessionOther.startTransaction();
+res = sessionOther.getDatabase(dbName).getCollection(collName).insert({_id: "lock-timeout-2"});
+assert.commandFailedWithCode(res, ErrorCodes.LockTimeout);
+assert(res instanceof WriteCommandError);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+assert.commandFailedWithCode(sessionOther.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+assert.commandWorked(session.abortTransaction_forTesting());
+thread.join();
+assert.commandWorked(thread.returnData());
+
+// Re-create the collection for later test cases.
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+
+jsTest.log("Network errors for in-progress statements should be transient");
+session.startTransaction();
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.HostUnreachable, failCommands: ["aggregate"]}
+}));
+res = sessionDb.runCommand({aggregate: collName, pipeline: [{$match: {}}], cursor: {}});
+assert.commandFailedWithCode(res, ErrorCodes.HostUnreachable);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+
+jsTest.log("Network errors for commit should not be transient");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "commitTransaction-network-error"}));
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.HostUnreachable, failCommands: ["commitTransaction"]}
+}));
+res = sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(session.getTxnNumber_forTesting()),
+ autocommit: false
+});
+assert.commandFailedWithCode(res, ErrorCodes.HostUnreachable);
+assert(!res.hasOwnProperty("errorLabels"), tojson(res));
+assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+
+session.endSession();
+
+st.stop();
}());
diff --git a/jstests/replsets/transient_txn_error_labels_with_write_concern.js b/jstests/replsets/transient_txn_error_labels_with_write_concern.js
index b422bb96ccc..54ad4f9044a 100644
--- a/jstests/replsets/transient_txn_error_labels_with_write_concern.js
+++ b/jstests/replsets/transient_txn_error_labels_with_write_concern.js
@@ -1,131 +1,135 @@
// Test TransientTransactionError error label for commands in transactions with write concern.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/libs/write_concern_util.js");
- load("jstests/replsets/rslib.js");
-
- const dbName = "test";
- const collName = "transient_txn_error_labels_with_write_concern";
-
- // We are testing coordinateCommitTransaction, which requires the nodes to be started with
- // --shardsvr.
- const st = new ShardingTest(
- {config: 1, mongos: 1, shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}}});
- const rst = st.rs0;
-
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- assert.eq(primary, rst.nodes[0]);
- const testDB = primary.getDB(dbName);
-
- const sessionOptions = {causalConsistency: false};
- const writeConcernMajority = {w: "majority", wtimeout: 500};
-
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
-
- jsTest.log("Write concern errors should not have error labels");
- // Start a new session on the primary.
- let session = primary.startSession(sessionOptions);
- let sessionDb = session.getDatabase(dbName);
- let sessionColl = sessionDb.getCollection(collName);
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/libs/write_concern_util.js");
+load("jstests/replsets/rslib.js");
+
+const dbName = "test";
+const collName = "transient_txn_error_labels_with_write_concern";
+
+// We are testing coordinateCommitTransaction, which requires the nodes to be started with
+// --shardsvr.
+const st = new ShardingTest(
+ {config: 1, mongos: 1, shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}}});
+const rst = st.rs0;
+
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+assert.eq(primary, rst.nodes[0]);
+const testDB = primary.getDB(dbName);
+
+const sessionOptions = {
+ causalConsistency: false
+};
+const writeConcernMajority = {
+ w: "majority",
+ wtimeout: 500
+};
+
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+
+jsTest.log("Write concern errors should not have error labels");
+// Start a new session on the primary.
+let session = primary.startSession(sessionOptions);
+let sessionDb = session.getDatabase(dbName);
+let sessionColl = sessionDb.getCollection(collName);
+stopServerReplication(rst.getSecondaries());
+session.startTransaction({writeConcern: writeConcernMajority});
+assert.commandWorked(sessionColl.insert({_id: "write-with-write-concern"}));
+let res = session.commitTransaction_forTesting();
+checkWriteConcernTimedOut(res);
+assert(!res.hasOwnProperty("code"));
+assert(!res.hasOwnProperty("errorLabels"));
+restartServerReplication(rst.getSecondaries());
+
+function runNoSuchTransactionTests(cmd, cmdName) {
+ jsTest.log("Running NoSuchTransaction tests for " + cmdName);
+ assert.commandWorked(primary.adminCommand({clearLog: "global"}));
+
+ jsTest.log(cmdName + " should wait for write concern even if it returns NoSuchTransaction");
+ rst.awaitReplication();
stopServerReplication(rst.getSecondaries());
- session.startTransaction({writeConcern: writeConcernMajority});
- assert.commandWorked(sessionColl.insert({_id: "write-with-write-concern"}));
- let res = session.commitTransaction_forTesting();
+ // Use a txnNumber that is one higher than the server has tracked.
+ res = sessionDb.adminCommand(Object.assign(Object.assign({}, cmd), {
+ txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
+ autocommit: false,
+ writeConcern: writeConcernMajority
+ }));
checkWriteConcernTimedOut(res);
- assert(!res.hasOwnProperty("code"));
+ assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
+
+ jsTest.log("NoSuchTransaction with write concern error is not transient");
assert(!res.hasOwnProperty("errorLabels"));
- restartServerReplication(rst.getSecondaries());
- function runNoSuchTransactionTests(cmd, cmdName) {
- jsTest.log("Running NoSuchTransaction tests for " + cmdName);
- assert.commandWorked(primary.adminCommand({clearLog: "global"}));
-
- jsTest.log(cmdName + " should wait for write concern even if it returns NoSuchTransaction");
- rst.awaitReplication();
- stopServerReplication(rst.getSecondaries());
- // Use a txnNumber that is one higher than the server has tracked.
- res = sessionDb.adminCommand(Object.assign(Object.assign({}, cmd), {
- txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
- autocommit: false,
- writeConcern: writeConcernMajority
- }));
- checkWriteConcernTimedOut(res);
- assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
-
- jsTest.log("NoSuchTransaction with write concern error is not transient");
- assert(!res.hasOwnProperty("errorLabels"));
-
- jsTest.log("NoSuchTransaction without write concern error is transient");
- restartServerReplication(rst.getSecondaries());
- // Use a txnNumber that is one higher than the server has tracked.
- res = sessionDb.adminCommand(Object.assign(Object.assign({}, cmd), {
- txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
- autocommit: false,
- writeConcern: {w: "majority"} // Wait with a long timeout.
- }));
- assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
- assert(!res.hasOwnProperty("writeConcernError"), res);
- assert.eq(res["errorLabels"], ["TransientTransactionError"], res);
-
- jsTest.log(
- "If the noop write for NoSuchTransaction cannot occur, the error is not transient");
-
- // Lock 'local' database in X mode.
- let lockShell = startParallelShell(function() {
- assert.commandFailed(db.adminCommand({
- sleep: 1,
- secs: 500,
- lock: "w",
- lockTarget: "local",
- $comment: "transient_txn_error_labels_with_write_concern lock sleep"
- }));
- }, rst.ports[0]);
-
- // Wait for sleep to appear in currentOp
- let opId = -1;
- assert.soon(function() {
- const curopRes = testDB.currentOp();
- assert.commandWorked(curopRes);
- const foundOp = curopRes["inprog"].filter(
- op => (op["ns"] == "admin.$cmd" &&
- op["command"]["$comment"] ==
- "transient_txn_error_labels_with_write_concern lock sleep"));
- if (foundOp.length == 1) {
- opId = foundOp[0]["opid"];
- }
- return (foundOp.length == 1);
- });
-
- // The server will attempt to perform a noop write, since the command returns
- // NoSuchTransaction. The noop write will time out acquiring a lock on the 'local' database.
- // This should not be a TransientTransactionError, since the server has not successfully
- // replicated a write to confirm that it is primary.
- // Use a txnNumber that is one higher than the server has tracked.
- res = sessionDb.adminCommand(Object.assign(Object.assign({}, cmd), {
- txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
- autocommit: false,
- writeConcern: writeConcernMajority,
- maxTimeMS: 1000
+ jsTest.log("NoSuchTransaction without write concern error is transient");
+ restartServerReplication(rst.getSecondaries());
+ // Use a txnNumber that is one higher than the server has tracked.
+ res = sessionDb.adminCommand(Object.assign(Object.assign({}, cmd), {
+ txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
+ autocommit: false,
+ writeConcern: {w: "majority"} // Wait with a long timeout.
+ }));
+ assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
+ assert(!res.hasOwnProperty("writeConcernError"), res);
+ assert.eq(res["errorLabels"], ["TransientTransactionError"], res);
+
+ jsTest.log("If the noop write for NoSuchTransaction cannot occur, the error is not transient");
+
+ // Lock 'local' database in X mode.
+ let lockShell = startParallelShell(function() {
+ assert.commandFailed(db.adminCommand({
+ sleep: 1,
+ secs: 500,
+ lock: "w",
+ lockTarget: "local",
+ $comment: "transient_txn_error_labels_with_write_concern lock sleep"
}));
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
- assert(!res.hasOwnProperty("errorLabels"));
+ }, rst.ports[0]);
+
+ // Wait for sleep to appear in currentOp
+ let opId = -1;
+ assert.soon(function() {
+ const curopRes = testDB.currentOp();
+ assert.commandWorked(curopRes);
+ const foundOp = curopRes["inprog"].filter(
+ op => (op["ns"] == "admin.$cmd" &&
+ op["command"]["$comment"] ==
+ "transient_txn_error_labels_with_write_concern lock sleep"));
+ if (foundOp.length == 1) {
+ opId = foundOp[0]["opid"];
+ }
+ return (foundOp.length == 1);
+ });
+
+ // The server will attempt to perform a noop write, since the command returns
+ // NoSuchTransaction. The noop write will time out acquiring a lock on the 'local' database.
+ // This should not be a TransientTransactionError, since the server has not successfully
+ // replicated a write to confirm that it is primary.
+ // Use a txnNumber that is one higher than the server has tracked.
+ res = sessionDb.adminCommand(Object.assign(Object.assign({}, cmd), {
+ txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
+ autocommit: false,
+ writeConcern: writeConcernMajority,
+ maxTimeMS: 1000
+ }));
+ assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+ assert(!res.hasOwnProperty("errorLabels"));
- assert.commandWorked(testDB.killOp(opId));
- lockShell();
+ assert.commandWorked(testDB.killOp(opId));
+ lockShell();
- rst.awaitReplication();
- }
+ rst.awaitReplication();
+}
- runNoSuchTransactionTests({commitTransaction: 1}, "commitTransaction");
+runNoSuchTransactionTests({commitTransaction: 1}, "commitTransaction");
- runNoSuchTransactionTests({coordinateCommitTransaction: 1, participants: []},
- "coordinateCommitTransaction");
+runNoSuchTransactionTests({coordinateCommitTransaction: 1, participants: []},
+ "coordinateCommitTransaction");
- session.endSession();
+session.endSession();
- st.stop();
+st.stop();
}());
diff --git a/jstests/replsets/two_nodes_priority_take_over.js b/jstests/replsets/two_nodes_priority_take_over.js
index 897a930156f..1fba2350e8f 100644
--- a/jstests/replsets/two_nodes_priority_take_over.js
+++ b/jstests/replsets/two_nodes_priority_take_over.js
@@ -8,7 +8,6 @@ if (false) {
load("jstests/replsets/rslib.js");
(function() {
-
"use strict";
var name = "two_nodes_priority_take_over";
var rst = new ReplSetTest({name: name, nodes: 2});
@@ -55,6 +54,5 @@ if (false) {
// no current candidate. If vote requests failed (wrongly) for some reason,
// nodes have to start new elections, which increase the term unnecessarily.
assert.eq(newTerm, stableTerm + 1);
-
})();
}
diff --git a/jstests/replsets/txn_override_unittests.js b/jstests/replsets/txn_override_unittests.js
index 38187c4ff92..f508afe2a99 100644
--- a/jstests/replsets/txn_override_unittests.js
+++ b/jstests/replsets/txn_override_unittests.js
@@ -28,1896 +28,1894 @@
* @tags: [requires_replication, uses_transactions]
*/
(function() {
- "use strict";
- load("jstests/libs/transactions_util.js");
- load('jstests/libs/write_concern_util.js');
-
- // Commands not to override since they can log excessively.
- const runCommandOverrideBlacklistedCommands =
- ["getCmdLineOpts", "serverStatus", "configureFailPoint"];
-
- // cmdResponseOverrides is a map from commands to responses that should be provided in lieu of
- // running the command on the server. This is mostly used for returning WriteConcernErrors
- // without running the command or returning WriteConcernErrors with top level errors.
- // {<cmdName>: {responseObj: <response object>}}
- let cmdResponseOverrides = {};
-
- // postCommandFuncs is a map from commands to functions that should be run after either mocking
- // out their response or running them on the server. This is used to inject functionality at
- // times when the test is not given control, such as when the override runs extra commands on
- // retries.
- // {<cmdName>: {func}}
- let postCommandFuncs = {};
-
- /**
- * Deletes the command override from the given command.
- */
- function clearCommandOverride(cmdName) {
- assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
-
- delete cmdResponseOverrides[cmdName];
- }
+"use strict";
+load("jstests/libs/transactions_util.js");
+load('jstests/libs/write_concern_util.js');
+
+// Commands not to override since they can log excessively.
+const runCommandOverrideBlacklistedCommands =
+ ["getCmdLineOpts", "serverStatus", "configureFailPoint"];
+
+// cmdResponseOverrides is a map from commands to responses that should be provided in lieu of
+// running the command on the server. This is mostly used for returning WriteConcernErrors
+// without running the command or returning WriteConcernErrors with top level errors.
+// {<cmdName>: {responseObj: <response object>}}
+let cmdResponseOverrides = {};
+
+// postCommandFuncs is a map from commands to functions that should be run after either mocking
+// out their response or running them on the server. This is used to inject functionality at
+// times when the test is not given control, such as when the override runs extra commands on
+// retries.
+// {<cmdName>: {func}}
+let postCommandFuncs = {};
+
+/**
+ * Deletes the command override from the given command.
+ */
+function clearCommandOverride(cmdName) {
+ assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+
+ delete cmdResponseOverrides[cmdName];
+}
+
+/**
+ * Deletes the post-command function for the given command.
+ */
+function clearPostCommandFunc(cmdName) {
+ assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+
+ delete postCommandFuncs[cmdName];
+}
+
+/**
+ * Clears all command overrides and post-command functions.
+ */
+function clearAllCommandOverrides() {
+ cmdResponseOverrides = {};
+ postCommandFuncs = {};
+}
+
+/**
+ * Sets the provided function as the post-command function for the given command.
+ */
+function attachPostCmdFunction(cmdName, func) {
+ assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+
+ postCommandFuncs[cmdName] = func;
+}
+
+/**
+ * Sets that the given command should return the given response. The command will not actually
+ * be run.
+ */
+function setCommandMockResponse(cmdName, mockResponse) {
+ assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+
+ cmdResponseOverrides[cmdName] = {responseObj: mockResponse};
+}
+
+/**
+ * Sets that the given command should fail with ok:1 and the given write concern error.
+ * The command will not actually be run.
+ */
+function failCommandWithWCENoRun(cmdName, writeConcernErrorCode, writeConcernErrorCodeName) {
+ assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+
+ cmdResponseOverrides[cmdName] = {
+ responseObj: {
+ ok: 1,
+ writeConcernError: {code: writeConcernErrorCode, codeName: writeConcernErrorCodeName}
+ }
+ };
+}
- /**
- * Deletes the post-command function for the given command.
- */
- function clearPostCommandFunc(cmdName) {
- assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+/**
+ * Sets that the given command should fail with the given error and the given write concern
+ * error. The command will not actually be run.
+ */
+function failCommandWithErrorAndWCENoRun(
+ cmdName, errorCode, errorCodeName, writeConcernErrorCode, writeConcernErrorCodeName) {
+ assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+
+ cmdResponseOverrides[cmdName] = {
+ responseObj: {
+ ok: 0,
+ code: errorCode,
+ codeName: errorCodeName,
+ writeConcernError: {code: writeConcernErrorCode, codeName: writeConcernErrorCodeName}
+ }
+ };
+}
- delete postCommandFuncs[cmdName];
+/**
+ * Run the post-command function for the given command, if one has been set, and clear it once
+ * used.
+ */
+function runPostCommandFunc(cmdName) {
+ assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+
+ if (postCommandFuncs[cmdName]) {
+ jsTestLog("Running post-command function for " + cmdName);
+ try {
+ postCommandFuncs[cmdName]();
+ } finally {
+ clearPostCommandFunc(cmdName);
+ }
}
+}
- /**
- * Clears all command overrides and post-command functions.
- */
- function clearAllCommandOverrides() {
- cmdResponseOverrides = {};
- postCommandFuncs = {};
+/**
+ * Overrides 'runCommand' to provide a specific pre-set response to the given command. If the
+ * command is in the blacklist, it is not overridden. Otherwise, if a command response has been
+ * specified, returns that without running the function. If a post-command function is specified
+ * for the command, runs that after the command is run. The post-command function is run
+ * regardless of whether the command response was overridden or not.
+ */
+const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
+ const cmdName = Object.keys(cmdObj)[0];
+ if (runCommandOverrideBlacklistedCommands.includes(cmdName)) {
+ return mongoRunCommandOriginal.apply(this, arguments);
}
- /**
- * Sets the provided function as the post-command function for the given command.
- */
- function attachPostCmdFunction(cmdName, func) {
- assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+ if (cmdResponseOverrides.hasOwnProperty(cmdName)) {
+ const cmdResponse = cmdResponseOverrides[cmdName];
+ // Overrides are single-use.
+ clearCommandOverride(cmdName);
+ assert(cmdResponse);
+
+ jsTestLog("Unittest returning: " + tojsononeline(cmdResponse.responseObj) +
+ ", running: " + tojsononeline(cmdObj));
+ assert(cmdResponse.responseObj);
+ assert(cmdResponse.responseObj.ok === 1 || cmdResponse.responseObj.ok === 0);
- postCommandFuncs[cmdName] = func;
+ runPostCommandFunc(cmdName);
+ return cmdResponse.responseObj;
}
- /**
- * Sets that the given command should return the given response. The command will not actually
- * be run.
- */
- function setCommandMockResponse(cmdName, mockResponse) {
- assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+ const res = mongoRunCommandOriginal.apply(this, arguments);
+ print("Unittest received: " + tojsononeline(res) + ", running: " + tojsononeline(cmdObj));
+ runPostCommandFunc(cmdName);
+ return res;
+};
+
+const dbName = "txn_override_unittests";
+const collName1 = "test_coll1";
+const collName2 = "test_coll2";
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const conn = rst.getPrimary();
- cmdResponseOverrides[cmdName] = {responseObj: mockResponse};
+// We have a separate connection for the failpoint so that it does not break up the transaction
+// buffered in network_error_and_txn_override.js.
+const failpointConn = new Mongo(conn.host);
+
+/**
+ * Marks that the given command should fail with the given parameters using the failCommand
+ * failpoint. This does not break up a currently active transaction in the override function.
+ * This does override previous uses of the failpoint, however.
+ */
+function failCommandWithFailPoint(commandsToFail, {
+ errorCode: errorCode,
+ closeConnection: closeConnection = false,
+ writeConcernError: writeConcernError,
+ // By default only fail the next request of the given command.
+ mode: mode = {
+ times: 1
+ },
+} = {}) {
+ // The fail point will ignore the WCE if an error code is specified.
+ assert(!(writeConcernError && errorCode),
+ "Cannot specify both a WCE " + tojsononeline(writeConcernError) + " and an error code " +
+ errorCode);
+
+ let data = {
+ failCommands: commandsToFail,
+ };
+
+ if (errorCode) {
+ data["errorCode"] = errorCode;
}
- /**
- * Sets that the given command should fail with ok:1 and the given write concern error.
- * The command will not actually be run.
- */
- function failCommandWithWCENoRun(cmdName, writeConcernErrorCode, writeConcernErrorCodeName) {
- assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+ if (closeConnection) {
+ data["closeConnection"] = closeConnection;
+ }
- cmdResponseOverrides[cmdName] = {
- responseObj: {
- ok: 1,
- writeConcernError:
- {code: writeConcernErrorCode, codeName: writeConcernErrorCodeName}
- }
- };
+ if (writeConcernError) {
+ data["writeConcernError"] = writeConcernError;
}
- /**
- * Sets that the given command should fail with the given error and the given write concern
- * error. The command will not actually be run.
- */
- function failCommandWithErrorAndWCENoRun(
- cmdName, errorCode, errorCodeName, writeConcernErrorCode, writeConcernErrorCodeName) {
- assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+ assert.commandWorked(mongoRunCommandOriginal.apply(
+ failpointConn, ['admin', {configureFailPoint: "failCommand", mode: mode, data: data}, 0]));
+}
- cmdResponseOverrides[cmdName] = {
- responseObj: {
- ok: 0,
- code: errorCode,
- codeName: errorCodeName,
+/**
+ * Turns off the failCommand failpoint completely.
+ */
+function stopFailingCommands() {
+ assert.commandWorked(mongoRunCommandOriginal.apply(
+ failpointConn, ['admin', {configureFailPoint: "failCommand", mode: "off"}, 0]));
+}
+
+/**
+ * Run a 'ping' command that is not allowed in a transaction. This has no effect, but causes
+ * network_error_and_txn_override.js to commit the current transaction in order to run the
+ * 'ping'.
+ */
+function endCurrentTransactionIfOpen() {
+ print("=-=-=-= Ending current transaction if open");
+ assert.commandWorked(testDB.runCommand({ping: 1}));
+}
+
+/**
+ * Aborts the current transaction in network_error_and_txn_override.js.
+ */
+function abortCurrentTransaction() {
+ const session = testDB.getSession();
+ const lsid = session.getSessionId();
+ const txnNum = TestData.currentTxnOverrideTxnNumber;
+ print("=-=-=-= Aborting current transaction " + txnNum + " on " + tojsononeline(lsid));
+
+ assert.commandWorked(mongoRunCommandOriginal.apply(
+ testDB.getMongo(),
+ ['admin', {abortTransaction: 1, autocommit: false, lsid: lsid, txnNumber: txnNum}, 0]));
+}
+
+/**
+ * Runs a test where a transaction attempts to use a forbidden database name. When running a
+ * CRUD operation on one of these databases, network_error_and_txn_override.js is expected to
+ * commit the current transaction and run the CRUD operation outside of a transaction.
+ */
+function testBadDBName(session, badDBName) {
+ const badDB = session.getDatabase(badDBName);
+ const badColl = badDB['foo'];
+ assert.commandWorked(badDB.createCollection(collName1));
+
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+
+ assert.commandWorked(badColl.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(badColl.find().itcount(), 1);
+
+ // We attempt another insert in the 'bad collection' that gets a 'DuplicateKey' error.
+ // 'DuplicateKey' errors cause transactions to abort, so if this error were received in a
+ // transaction, we would expect the transaction to get aborted and the collections to be
+ // empty. Since this is not running in a transaction, even though the statement fails, the
+ // previous inserts do not storage-rollback.
+ assert.commandFailedWithCode(badColl.insert({_id: 1}), ErrorCodes.DuplicateKey);
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(badColl.find().itcount(), 1);
+}
+
+/**
+ * Runs a specific test case, resetting test state before and after.
+ */
+function runTest(testSuite, testCase) {
+ // Drop with majority write concern to ensure transactions in subsequent test cases can
+ // immediately take locks on either collection.
+ coll1.drop({writeConcern: {w: "majority"}});
+ coll2.drop({writeConcern: {w: "majority"}});
+
+ // Ensure all overrides and failpoints have been turned off before running the test.
+ clearAllCommandOverrides();
+ stopFailingCommands();
+
+ jsTestLog(testSuite + ": Testing " + testCase.name);
+ testCase.test();
+
+ // End the current transaction if the test did not end it itself.
+ endCurrentTransactionIfOpen();
+ jsTestLog(testSuite + ": Test " + testCase.name + " complete.");
+
+ // Ensure all overrides and failpoints have been turned off after running the test as well.
+ clearAllCommandOverrides();
+ stopFailingCommands();
+}
+
+const retryOnNetworkErrorTests = [
+ {
+ name: "update with network error after success",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ attachPostCmdFunction("update", function() {
+ throw new Error("SocketException");
+ });
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "ordinary CRUD ops",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(testDB.runCommand({insert: collName1, documents: [{_id: 2}]}));
+ assert.eq(coll1.find().itcount(), 2);
+ }
+ },
+ {
+ name: "retry on NotMaster",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ }
+ },
+ {
+ name: "retry on NotMaster ordered",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandFailed(
+ testDB.runCommand({insert: collName1, documents: [{_id: 2}], ordered: true}));
+ }
+ },
+ {
+ name: "retry on NotMaster with object change",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ let obj1 = {_id: 1, x: 5};
+ let obj2 = {_id: 2, x: 5};
+ assert.commandWorked(coll1.insert(obj1));
+ assert.commandWorked(coll1.insert(obj2));
+ assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 5}]);
+ obj1.x = 7;
+ assert.commandWorked(coll1.update({_id: 2}, {$set: {x: 8}}));
+ assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 8}]);
+ }
+ },
+ {
+ name: "implicit collection creation with stepdown",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(
+ ["insert"],
+ {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError and normal stepdown error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithErrorAndWCENoRun(
+ "insert", ErrorCodes.NotMaster, "NotMaster", ErrorCodes.NotMaster, "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError and normal ordinary error",
+ test: function() {
+ failCommandWithErrorAndWCENoRun("insert",
+ ErrorCodes.OperationFailed,
+ "OperationFailed",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with ordinary error",
+ test: function() {
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.OperationFailed});
+ assert.commandFailed(coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with network error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["insert"], {closeConnection: true});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError no success",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithWCENoRun("insert", ErrorCodes.NotMaster, "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "update with stepdown",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "update with ordinary error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.OperationFailed});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandFailed(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ }
+ },
+ {
+ name: "update with network error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {closeConnection: true});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "update with two stepdown errors",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"],
+ {errorCode: ErrorCodes.NotMaster, mode: {times: 2}});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+ }
+ },
+ {
+ name: "update with chained stepdown errors",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ // Chain multiple update errors together.
+ attachPostCmdFunction("update", function() {
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ });
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+ }
+ },
+ {
+ name: "commands not run in transactions",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.commandFailedWithCode(coll1.insert({_id: 1}), ErrorCodes.DuplicateKey);
+
+ // If this were run in a transaction, the original insert and the duplicate one would
+ // both be storage-rolled-back and the count would be 0. We test that the count is 1
+ // to prove that the inserts are not in a transaction.
+ assert.eq(coll1.find().itcount(), 1);
+ }
+ },
+ {
+ name: "transaction commands not retried on retryable code",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ session.startTransaction();
+ assert.commandFailedWithCode(
+ testDB.runCommand({update: collName1, updates: [{q: {_id: 1}, u: {$inc: {x: 1}}}]}),
+ ErrorCodes.NotMaster);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ }
+ },
+ {
+ name: "transaction commands not retried on network error",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {closeConnection: true});
+
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ session.startTransaction();
+ const error = assert.throws(() => {
+ return testDB.runCommand(
+ {update: collName1, updates: [{q: {_id: 1}, u: {$inc: {x: 1}}}]});
+ });
+ assert(isNetworkError(error), tojson(error));
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ }
+ },
+ {
+ name: "commitTransaction retried on retryable code",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"], {errorCode: ErrorCodes.NotMaster});
+
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ }
+ },
+ {
+ name: "commitTransaction retried on write concern error",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"], {
writeConcernError:
- {code: writeConcernErrorCode, codeName: writeConcernErrorCodeName}
- }
- };
- }
+ {code: ErrorCodes.PrimarySteppedDown, codeName: "PrimarySteppedDown"}
+ });
+
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ const res = assert.commandWorked(session.commitTransaction_forTesting());
+ assert(!res.hasOwnProperty("writeConcernError"));
- /**
- * Run the post-command function for the given command, if one has been set, and clear it once
- * used.
- */
- function runPostCommandFunc(cmdName) {
- assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
-
- if (postCommandFuncs[cmdName]) {
- jsTestLog("Running post-command function for " + cmdName);
- try {
- postCommandFuncs[cmdName]();
- } finally {
- clearPostCommandFunc(cmdName);
- }
+ assert.eq(coll1.find().itcount(), 1);
}
- }
+ },
+ {
+ name: "commitTransaction not retried on transient transaction error",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ // Abort the transaction so the commit receives NoSuchTransaction. Note that the fail
+ // command failpoint isn't used because it returns without implicitly aborting the
+ // transaction.
+ const lsid = session.getSessionId();
+ const txnNumber = NumberLong(session.getTxnNumber_forTesting());
+ assert.commandWorked(testDB.adminCommand(
+ {abortTransaction: 1, lsid, txnNumber, autocommit: false, stmtId: NumberInt(0)}));
+
+ const res = assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ assert.eq(["TransientTransactionError"], res.errorLabels);
+
+ assert.eq(coll1.find().itcount(), 0);
+ }
+ },
+ {
+ name: "commitTransaction retried on network error",
+ test: function() {
+ const session = testDB.getSession();
- /**
- * Overrides 'runCommand' to provide a specific pre-set response to the given command. If the
- * command is in the blacklist, it is not overridden. Otherwise, if a command response has been
- * specified, returns that without running the function. If a post-command function is specified
- * for the command, runs that after the command is run. The post-command function is run
- * regardless of whether the command response was overridden or not.
- */
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
- const cmdName = Object.keys(cmdObj)[0];
- if (runCommandOverrideBlacklistedCommands.includes(cmdName)) {
- return mongoRunCommandOriginal.apply(this, arguments);
- }
-
- if (cmdResponseOverrides.hasOwnProperty(cmdName)) {
- const cmdResponse = cmdResponseOverrides[cmdName];
- // Overrides are single-use.
- clearCommandOverride(cmdName);
- assert(cmdResponse);
-
- jsTestLog("Unittest returning: " + tojsononeline(cmdResponse.responseObj) +
- ", running: " + tojsononeline(cmdObj));
- assert(cmdResponse.responseObj);
- assert(cmdResponse.responseObj.ok === 1 || cmdResponse.responseObj.ok === 0);
-
- runPostCommandFunc(cmdName);
- return cmdResponse.responseObj;
- }
-
- const res = mongoRunCommandOriginal.apply(this, arguments);
- print("Unittest received: " + tojsononeline(res) + ", running: " + tojsononeline(cmdObj));
- runPostCommandFunc(cmdName);
- return res;
- };
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"], {closeConnection: true});
+
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ }
+ },
+ {
+ name: "abortTransaction retried on retryable code",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["abortTransaction"], {errorCode: ErrorCodes.NotMaster});
+
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ assert.commandWorked(session.abortTransaction_forTesting());
+
+ assert.eq(coll1.find().itcount(), 0);
+ }
+ },
+ {
+ name: "abortTransaction retried on network error",
+ test: function() {
+ const session = testDB.getSession();
- const dbName = "txn_override_unittests";
- const collName1 = "test_coll1";
- const collName2 = "test_coll2";
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["abortTransaction"], {closeConnection: true});
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const conn = rst.getPrimary();
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
- // We have a separate connection for the failpoint so that it does not break up the transaction
- // buffered in network_error_and_txn_override.js.
- const failpointConn = new Mongo(conn.host);
+ assert.commandWorked(session.abortTransaction_forTesting());
- /**
- * Marks that the given command should fail with the given parameters using the failCommand
- * failpoint. This does not break up a currently active transaction in the override function.
- * This does override previous uses of the failpoint, however.
- */
- function failCommandWithFailPoint(commandsToFail, {
- errorCode: errorCode,
- closeConnection: closeConnection = false,
- writeConcernError: writeConcernError,
- // By default only fail the next request of the given command.
- mode: mode = {times: 1},
- } = {}) {
- // The fail point will ignore the WCE if an error code is specified.
- assert(!(writeConcernError && errorCode),
- "Cannot specify both a WCE " + tojsononeline(writeConcernError) +
- " and an error code " + errorCode);
+ assert.eq(coll1.find().itcount(), 0);
+ }
+ },
+ {
+ name: "abortTransaction retried on write concern error",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["abortTransaction"], {
+ writeConcernError:
+ {code: ErrorCodes.PrimarySteppedDown, codeName: "PrimarySteppedDown"}
+ });
- let data = {
- failCommands: commandsToFail,
- };
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
- if (errorCode) {
- data["errorCode"] = errorCode;
+ // The fail command fail point with a write concern error triggers after the command
+ // is processed, so the retry will find the transaction has already aborted and return
+ // NoSuchTransaction.
+ const res = assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ assert(!res.hasOwnProperty("writeConcernError"));
+
+ assert.eq(coll1.find().itcount(), 0);
+ }
+ },
+ {
+ name: "abortTransaction not retried on transient transaction error",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ // Abort the transaction so the commit receives NoSuchTransaction. Note that the fail
+ // command failpoint isn't used because it returns without implicitly aborting the
+ // transaction.
+ const lsid = session.getSessionId();
+ const txnNumber = NumberLong(session.getTxnNumber_forTesting());
+ assert.commandWorked(testDB.adminCommand(
+ {abortTransaction: 1, lsid, txnNumber, autocommit: false, stmtId: NumberInt(0)}));
+
+ const res = assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ assert.eq(["TransientTransactionError"], res.errorLabels);
+
+ assert.eq(coll1.find().itcount(), 0);
}
+ },
+ {
+ name: "raw response w/ one retryable error",
+ test: function() {
+ setCommandMockResponse("createIndexes", {
+ ok: 0,
+ raw: {
+ shardOne: {code: ErrorCodes.NotMaster, errmsg: "dummy"},
+ shardTwo: {code: ErrorCodes.InternalError, errmsg: "dummy"}
+ }
+ });
+
+ assert.commandWorked(testDB.createCollection(collName1));
+
+ // The first attempt should fail, but the retry succeeds.
+ assert.commandWorked(coll1.createIndex({x: 1}));
- if (closeConnection) {
- data["closeConnection"] = closeConnection;
+ // The index should exist.
+ const indexes = coll1.getIndexes();
+ assert.eq(2, indexes.length, tojson(indexes));
+ assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
}
+ },
+ {
+ name: "raw response w/ one retryable error and one success",
+ test: function() {
+ setCommandMockResponse("createIndexes", {
+ ok: 0,
+ raw: {
+ // Raw responses only omit a top-level code if more than one error was
+ // returned from a shard, so a third shard is needed.
+ shardOne: {code: ErrorCodes.NotMaster, errmsg: "dummy"},
+ shardTwo: {ok: 1},
+ shardThree: {code: ErrorCodes.InternalError, errmsg: "dummy"},
+ }
+ });
+
+ assert.commandWorked(testDB.createCollection(collName1));
+
+ // The first attempt should fail, but the retry succeeds.
+ assert.commandWorked(coll1.createIndex({x: 1}));
+
+ // The index should exist.
+ const indexes = coll1.getIndexes();
+ assert.eq(2, indexes.length, tojson(indexes));
+ assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
+ }
+ },
+ {
+ name: "raw response w/ one network error",
+ test: function() {
+ setCommandMockResponse("createIndexes", {
+ ok: 0,
+ raw: {
+ shardOne: {code: ErrorCodes.InternalError, errmsg: "dummy"},
+ shardTwo: {code: ErrorCodes.HostUnreachable, errmsg: "dummy"}
+ }
+ });
- if (writeConcernError) {
- data["writeConcernError"] = writeConcernError;
+ assert.commandWorked(testDB.createCollection(collName1));
+
+ // The first attempt should fail, but the retry succeeds.
+ assert.commandWorked(coll1.createIndex({x: 1}));
+
+ // The index should exist.
+ const indexes = coll1.getIndexes();
+ assert.eq(2, indexes.length, tojson(indexes));
+ assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
+ }
+ },
+ {
+ name: "raw response ok:1 w/ retryable write concern error",
+ test: function() {
+ // The first encountered write concern error from a shard is attached as the top-level
+ // write concern error.
+ setCommandMockResponse("createIndexes", {
+ ok: 1,
+ raw: {
+ shardOne: {
+ ok: 1,
+ writeConcernError: {
+ code: ErrorCodes.PrimarySteppedDown,
+ codeName: "PrimarySteppedDown",
+ errmsg: "dummy"
+ }
+ },
+ shardTwo: {ok: 1}
+ },
+ writeConcernError: {
+ code: ErrorCodes.PrimarySteppedDown,
+ codeName: "PrimarySteppedDown",
+ errmsg: "dummy"
+ }
+ });
+
+ assert.commandWorked(testDB.createCollection(collName1));
+
+ // The first attempt should fail, but the retry succeeds.
+ assert.commandWorked(coll1.createIndex({x: 1}));
+
+ // The index should exist.
+ const indexes = coll1.getIndexes();
+ assert.eq(2, indexes.length, tojson(indexes));
+ assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
+ }
+ },
+ {
+ name: "raw response w/ no retryable error",
+ test: function() {
+ setCommandMockResponse("createIndexes", {
+ ok: 0,
+ raw: {
+ shardOne: {code: ErrorCodes.InvalidOptions, errmsg: "dummy"},
+ shardTwo: {code: ErrorCodes.InternalError, errmsg: "dummy"}
+ }
+ });
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandFailed(coll1.createIndex({x: 1}));
+ }
+ },
+ {
+ name: "raw response w/ only acceptable errors",
+ test: function() {
+ setCommandMockResponse("createIndexes", {
+ ok: 0,
+ code: ErrorCodes.IndexAlreadyExists,
+ raw: {
+ shardOne: {code: ErrorCodes.IndexAlreadyExists, errmsg: "dummy"},
+ shardTwo: {ok: 1},
+ shardThree: {code: ErrorCodes.IndexAlreadyExists, errmsg: "dummy"}
+ }
+ });
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.createIndex({x: 1}));
+ }
+ },
+ {
+ name: "raw response w/ acceptable error and non-acceptable, non-retryable error",
+ test: function() {
+ setCommandMockResponse("createIndexes", {
+ ok: 0,
+ raw: {
+ shardOne: {code: ErrorCodes.IndexAlreadyExists, errmsg: "dummy"},
+ shardTwo: {code: ErrorCodes.InternalError, errmsg: "dummy"}
+ }
+ });
+
+ // "Acceptable" errors are not overridden inside raw reponses.
+ assert.commandWorked(testDB.createCollection(collName1));
+ const res = assert.commandFailed(coll1.createIndex({x: 1}));
+ assert(!res.raw.shardOne.ok, tojson(res));
}
+ },
+ {
+ name: "shardCollection retryable code buried in error message",
+ test: function() {
+ setCommandMockResponse("shardCollection", {
+ ok: 0,
+ code: ErrorCodes.OperationFailed,
+ errmsg: "Sharding collection failed :: caused by InterruptedDueToStepdown",
+ });
+
+ // Mock a successful response for the retry, since sharding isn't enabled on the
+ // underlying replica set.
+ attachPostCmdFunction("shardCollection", function() {
+ setCommandMockResponse("shardCollection", {
+ ok: 1,
+ });
+ });
+
+ assert.commandWorked(
+ testDB.runCommand({shardCollection: "dummy_namespace", key: {_id: 1}}));
+ }
+ },
+ {
+ name: "drop retryable code buried in error message",
+ test: function() {
+ setCommandMockResponse("drop", {
+ ok: 0,
+ code: ErrorCodes.OperationFailed,
+ errmsg: "Dropping collection failed :: caused by ShutdownInProgress",
+ });
- assert.commandWorked(mongoRunCommandOriginal.apply(
- failpointConn,
- ['admin', {configureFailPoint: "failCommand", mode: mode, data: data}, 0]));
- }
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(testDB.runCommand({drop: collName1}));
+ }
+ },
+];
+
+// These tests only retry on TransientTransactionErrors. All other errors are expected to cause
+// the test to fail. Failpoints, overrides, and post-command functions are set by default to
+// only run once, so commands should succeed on retry.
+const txnOverrideTests = [
+ {
+ name: "ordinary CRUD ops",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(testDB.runCommand({insert: collName1, documents: [{_id: 2}]}));
+ assert.eq(coll1.find().itcount(), 2);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 2);
+ }
+ },
+ {
+ name: "getMore in transaction",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll1.insert({_id: 2}));
+ assert.eq(coll1.find().itcount(), 2);
+
+ let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
+ const cursorId = cmdRes.cursor.id;
+ assert.gt(cursorId, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, coll1.getFullName());
+ assert.eq(cmdRes.cursor.firstBatch.length, 1);
+
+ cmdRes =
+ assert.commandWorked(testDB.runCommand({getMore: cursorId, collection: collName1}));
+ assert.eq(cmdRes.cursor.id, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, coll1.getFullName());
+ assert.eq(cmdRes.cursor.nextBatch.length, 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 2);
+ }
+ },
+ {
+ name: "getMore starts transaction",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll1.insert({_id: 2}));
+ assert.eq(coll1.find().itcount(), 2);
+ assert.eq(coll2.find().itcount(), 0);
+
+ let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
+ const cursorId = cmdRes.cursor.id;
+ assert.gt(cursorId, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, coll1.getFullName());
+ assert.eq(cmdRes.cursor.firstBatch.length, 1);
+
+ assert.commandWorked(testDB.createCollection(collName2));
+
+ assert.throws(() => testDB.runCommand({getMore: cursorId, collection: collName1}));
+ }
+ },
+ {
+ name: "getMore in different transaction",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll1.insert({_id: 2}));
+ assert.eq(coll1.find().itcount(), 2);
+ assert.eq(coll2.find().itcount(), 0);
+
+ let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
+ const cursorId = cmdRes.cursor.id;
+ assert.gt(cursorId, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, coll1.getFullName());
+ assert.eq(cmdRes.cursor.firstBatch.length, 1);
+
+ assert.commandWorked(coll2.insert({_id: 3}));
+ assert.eq(coll1.find().itcount(), 2);
+ assert.eq(coll2.find().itcount(), 1);
+
+ assert.commandWorked(coll2.insert({_id: 4}));
+
+ assert.commandFailed(testDB.runCommand({getMore: cursorId, collection: collName1}));
+ }
+ },
+ {
+ name: "getMore after TransientTransactionError",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll1.insert({_id: 2}));
+ assert.eq(coll1.find().itcount(), 2);
+ failCommandWithFailPoint(["find"], {errorCode: ErrorCodes.NoSuchTransaction});
+
+ let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
+ const cursorId = cmdRes.cursor.id;
+ assert.gt(cursorId, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, coll1.getFullName());
+ assert.eq(cmdRes.cursor.firstBatch.length, 1);
+
+ cmdRes =
+ assert.commandWorked(testDB.runCommand({getMore: cursorId, collection: collName1}));
+ assert.eq(cmdRes.cursor.id, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, coll1.getFullName());
+ assert.eq(cmdRes.cursor.nextBatch.length, 1);
+ assert.eq(coll1.find().itcount(), 2);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 2);
+ }
+ },
+ {
+ name: "implicit collection creation",
+ test: function() {
+ const res = assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(1, res.nInserted);
+ assert.eq(coll1.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ }
+ },
+ {
+ name: "errors cause transaction to abort",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.commandFailedWithCode(coll1.insert({_id: 1}), ErrorCodes.DuplicateKey);
+
+ assert.eq(coll1.find().itcount(), 0);
+ }
+ },
+ {
+ name: "update with stepdown",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "update with ordinary error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.OperationFailed});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandFailed(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ }
+ },
+ {
+ name: "update with NoSuchTransaction error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NoSuchTransaction});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "update with network error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {closeConnection: true});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.throws(() => coll1.update({_id: 1}, {$inc: {x: 1}}));
+ }
+ },
+ {
+ name: "update with two stepdown errors",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"],
+ {errorCode: ErrorCodes.NotMaster, mode: {times: 2}});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+ }
+ },
+ {
+ name: "update with chained stepdown errors",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ // Chain multiple update errors together.
+ attachPostCmdFunction("update", function() {
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ });
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+ }
+ },
+ {
+ name: "implicit collection creation with stepdown",
+ test: function() {
+ failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.NotMaster});
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError",
+ test: function() {
+ failCommandWithFailPoint(
+ ["create"],
+ {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError and normal stepdown error",
+ test: function() {
+ failCommandWithErrorAndWCENoRun(
+ "create", ErrorCodes.NotMaster, "NotMaster", ErrorCodes.NotMaster, "NotMaster");
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError and normal ordinary error",
+ test: function() {
+ failCommandWithErrorAndWCENoRun("create",
+ ErrorCodes.OperationFailed,
+ "OperationFailed",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with ordinary error",
+ test: function() {
+ failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.OperationFailed});
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with network error",
+ test: function() {
+ failCommandWithFailPoint(["create"], {closeConnection: true});
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError no success",
+ test: function() {
+ failCommandWithWCENoRun("create", ErrorCodes.NotMaster, "NotMaster");
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "errors cause the override to abort transactions",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
- /**
- * Turns off the failCommand failpoint completely.
- */
- function stopFailingCommands() {
- assert.commandWorked(mongoRunCommandOriginal.apply(
- failpointConn, ['admin', {configureFailPoint: "failCommand", mode: "off"}, 0]));
- }
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.BadValue});
+ assert.commandFailedWithCode(coll1.insert({_id: 2}), ErrorCodes.BadValue);
- /**
- * Run a 'ping' command that is not allowed in a transaction. This has no effect, but causes
- * network_error_and_txn_override.js to commit the current transaction in order to run the
- * 'ping'.
- */
- function endCurrentTransactionIfOpen() {
- print("=-=-=-= Ending current transaction if open");
- assert.commandWorked(testDB.runCommand({ping: 1}));
- }
+ stopFailingCommands();
+ assert.eq(coll1.find().itcount(), 0);
- /**
- * Aborts the current transaction in network_error_and_txn_override.js.
- */
- function abortCurrentTransaction() {
- const session = testDB.getSession();
- const lsid = session.getSessionId();
- const txnNum = TestData.currentTxnOverrideTxnNumber;
- print("=-=-=-= Aborting current transaction " + txnNum + " on " + tojsononeline(lsid));
-
- assert.commandWorked(mongoRunCommandOriginal.apply(
- testDB.getMongo(),
- ['admin', {abortTransaction: 1, autocommit: false, lsid: lsid, txnNumber: txnNum}, 0]));
- }
+ assert.commandWorked(coll1.insert({_id: 3}));
+ assert.eq(coll1.find().itcount(), 1);
- /**
- * Runs a test where a transaction attempts to use a forbidden database name. When running a
- * CRUD operation on one of these databases, network_error_and_txn_override.js is expected to
- * commit the current transaction and run the CRUD operation outside of a transaction.
- */
- function testBadDBName(session, badDBName) {
- const badDB = session.getDatabase(badDBName);
- const badColl = badDB['foo'];
- assert.commandWorked(badDB.createCollection(collName1));
-
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
-
- assert.commandWorked(badColl.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(badColl.find().itcount(), 1);
-
- // We attempt another insert in the 'bad collection' that gets a 'DuplicateKey' error.
- // 'DuplicateKey' errors cause transactions to abort, so if this error were received in a
- // transaction, we would expect the transaction to get aborted and the collections to be
- // empty. Since this is not running in a transaction, even though the statement fails, the
- // previous inserts do not storage-rollback.
- assert.commandFailedWithCode(badColl.insert({_id: 1}), ErrorCodes.DuplicateKey);
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(badColl.find().itcount(), 1);
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with stepdown",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(
+ ["commitTransaction"],
+ {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError and normal stepdown error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithErrorAndWCENoRun("commitTransaction",
+ ErrorCodes.NotMaster,
+ "NotMaster",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError and normal ordinary error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithErrorAndWCENoRun("commitTransaction",
+ ErrorCodes.OperationFailed,
+ "OperationFailed",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with ordinary error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.OperationFailed});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError and normal NoSuchTransaction error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithErrorAndWCENoRun("commitTransaction",
+ ErrorCodes.NoSuchTransaction,
+ "NoSuchTransaction",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with NoSuchTransaction error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.NoSuchTransaction});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with network error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"], {closeConnection: true});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError no success",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithWCENoRun("commitTransaction", ErrorCodes.NotMaster, "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commands in 'admin' database end transaction",
+ test: function() {
+ testBadDBName(session, 'admin');
+ }
+ },
+ {
+ name: "commands in 'config' database end transaction",
+ test: function() {
+ testBadDBName(session, 'config');
+ }
+ },
+ {
+ name: "commands in 'local' database end transaction",
+ test: function() {
+ testBadDBName(session, 'local');
+ }
+ },
+ {
+ name: "getMore on change stream executes outside transaction",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+
+ // Starting a $changeStream aggregation within a transaction would fail, so the
+ // override has to execute this as a standalone command.
+ const changeStream = testDB.collName1.watch();
+ assert.commandWorked(testDB.collName1.insert({_id: 1}));
+ endCurrentTransactionIfOpen();
+
+ // Calling the `next` function on the change stream cursor will trigger a getmore,
+ // which the override must also run as a standalone command.
+ assert.eq(changeStream.next()["fullDocument"], {_id: 1});
+
+ // An aggregation without $changeStream runs within a transaction.
+ let aggCursor = testDB.collName1.aggregate([], {cursor: {batchSize: 0}});
+ assert.eq(aggCursor.next(), {_id: 1});
+
+ // Creating a non-$changeStream aggregation cursor and running its getMore in a
+ // different transaction will fail.
+ aggCursor = testDB.collName1.aggregate([], {cursor: {batchSize: 0}});
+ endCurrentTransactionIfOpen();
+ assert.throws(() => aggCursor.next());
+ }
+ },
+];
+
+// Failpoints, overrides, and post-command functions are set by default to only run once, so
+// commands should succeed on retry.
+const txnOverridePlusRetryOnNetworkErrorTests = [
+ {
+ name: "$where in jstests/core/js4.js",
+ test: function() {
+ const real = {a: 1, b: "abc", c: /abc/i, d: new Date(111911100111), e: null, f: true};
+ assert.commandWorked(coll1.insert(real));
+
+ failCommandWithErrorAndWCENoRun("drop",
+ ErrorCodes.NamespaceNotFound,
+ "NamespaceNotFound",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ coll1.drop();
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
+
+ assert.commandWorked(coll1.insert({a: 2, b: {c: 7, d: "d is good"}}));
+ const cursor = coll1.find({
+ $where: function() {
+ assert.eq(3, Object.keySet(obj).length);
+ assert.eq(2, obj.a);
+ assert.eq(7, obj.b.c);
+ assert.eq("d is good", obj.b.d);
+ return true;
+ }
+ });
+ assert.eq(1, cursor.toArray().length);
+ }
+ },
+ {
+ name: "update with network error after success",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ attachPostCmdFunction("update", function() {
+ throw new Error("SocketException");
+ });
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "retry on NotMaster",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ }
+ },
+ {
+ name: "retry on NotMaster with object change",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ let obj1 = {_id: 1, x: 5};
+ let obj2 = {_id: 2, x: 5};
+ assert.commandWorked(coll1.insert(obj1));
+ assert.commandWorked(coll1.insert(obj2));
+ assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 5}]);
+ obj1.x = 7;
+ assert.commandWorked(coll1.update({_id: 2}, {$set: {x: 8}}));
+ assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 8}]);
+
+ endCurrentTransactionIfOpen();
+ assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 8}]);
+ }
+ },
+ {
+ name: "implicit collection creation with stepdown",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(
+ ["create"],
+ {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError and normal stepdown error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithErrorAndWCENoRun(
+ "create", ErrorCodes.NotMaster, "NotMaster", ErrorCodes.NotMaster, "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError and normal ordinary error",
+ test: function() {
+ failCommandWithErrorAndWCENoRun("create",
+ ErrorCodes.OperationFailed,
+ "OperationFailed",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with ordinary error",
+ test: function() {
+ failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.OperationFailed});
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with network error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["create"], {closeConnection: true});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError no success",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithWCENoRun("create", ErrorCodes.NotMaster, "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "update with stepdown",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "update with ordinary error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.OperationFailed});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandFailed(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ }
+ },
+ {
+ name: "update with NoSuchTransaction error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NoSuchTransaction});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "update with network error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {closeConnection: true});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "update with two stepdown errors",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"],
+ {errorCode: ErrorCodes.NotMaster, mode: {times: 2}});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+ }
+ },
+ {
+ name: "update with chained stepdown errors",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ // Chain multiple update errors together.
+ attachPostCmdFunction("update", function() {
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ });
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+ }
+ },
+ {
+ name: "commit transaction with stepdown",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(
+ ["commitTransaction"],
+ {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError and normal stepdown error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithErrorAndWCENoRun("commitTransaction",
+ ErrorCodes.NotMaster,
+ "NotMaster",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError and normal ordinary error",
+ test: function() {
+ // We retry on write concern errors and this doesn't return OperationFailed again.
+ failCommandWithErrorAndWCENoRun("commitTransaction",
+ ErrorCodes.OperationFailed,
+ "OperationFailed",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError and normal ordinary error twice",
+ test: function() {
+ failCommandWithErrorAndWCENoRun("commitTransaction",
+ ErrorCodes.OperationFailed,
+ "OperationFailed",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ // After commitTransaction fails, fail it again with just the ordinary error.
+ attachPostCmdFunction("commitTransaction", function() {
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.OperationFailed});
+ });
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with ordinary error",
+ test: function() {
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.OperationFailed});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError and normal NoSuchTransaction error",
+ test: function() {
+ failCommandWithErrorAndWCENoRun("commitTransaction",
+ ErrorCodes.NoSuchTransaction,
+ "NoSuchTransaction",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with NoSuchTransaction error",
+ test: function() {
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.NoSuchTransaction});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with network error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"], {closeConnection: true});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError no success",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithWCENoRun("commitTransaction", ErrorCodes.NotMaster, "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commitTransaction fails with SERVER-38856",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(
+ ["create"],
+ {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
+
+ // After commitTransaction fails, abort the transaction and drop the collection
+ // as if the transaction were being retried on a different node.
+ attachPostCmdFunction("commitTransaction", function() {
+ abortCurrentTransaction();
+ assert.commandWorked(mongoRunCommandOriginal.apply(testDB.getMongo(),
+ [dbName, {drop: collName2}, 0]));
+ });
+ failCommandWithWCENoRun("commitTransaction", ErrorCodes.NotMaster, "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1, x: 2}));
+ assert.commandWorked(coll2.insert({_id: 2}));
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 4}}));
+
+ endCurrentTransactionIfOpen();
+
+ assert.docEq(coll1.find().toArray(), [{_id: 1, x: 6}]);
+ assert.docEq(coll2.find().toArray(), [{_id: 2}]);
+ }
+ },
+ {
+ name: 'Dates are copied correctly for SERVER-41917',
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.NoSuchTransaction});
+
+ let date = new Date();
+ assert.commandWorked(coll1.insert({_id: 3, a: date}));
+ date.setMilliseconds(date.getMilliseconds() + 2);
+ assert.eq(null, coll1.findOne({_id: 3, a: date}));
+ const origDoc = coll1.findOne({_id: 3});
+ const ret = assert.commandWorked(coll1.update({_id: 3}, {$min: {a: date}}));
+ assert.eq(ret.nModified, 0);
+
+ endCurrentTransactionIfOpen();
+
+ assert.eq(coll1.findOne({_id: 3}).a, origDoc.a);
+ }
+ },
+ {
+ name: 'Timestamps are copied correctly for SERVER-41917',
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.NoSuchTransaction});
+
+ let ts = new Timestamp(5, 6);
+ assert.commandWorked(coll1.insert({_id: 3, a: ts}));
+ ts.t++;
+
+ assert.eq(null, coll1.findOne({_id: 3, a: ts}));
+ const origDoc = coll1.findOne({_id: 3});
+ const ret = assert.commandWorked(coll1.update({_id: 3}, {$min: {a: ts}}));
+ assert.eq(ret.nModified, 0);
+
+ endCurrentTransactionIfOpen();
+
+ assert.eq(coll1.findOne({_id: 3}).a, origDoc.a);
+ }
}
+];
- /**
- * Runs a specific test case, resetting test state before and after.
- */
- function runTest(testSuite, testCase) {
- // Drop with majority write concern to ensure transactions in subsequent test cases can
- // immediately take locks on either collection.
- coll1.drop({writeConcern: {w: "majority"}});
- coll2.drop({writeConcern: {w: "majority"}});
-
- // Ensure all overrides and failpoints have been turned off before running the test.
- clearAllCommandOverrides();
- stopFailingCommands();
-
- jsTestLog(testSuite + ": Testing " + testCase.name);
- testCase.test();
-
- // End the current transaction if the test did not end it itself.
- endCurrentTransactionIfOpen();
- jsTestLog(testSuite + ": Test " + testCase.name + " complete.");
-
- // Ensure all overrides and failpoints have been turned off after running the test as well.
- clearAllCommandOverrides();
- stopFailingCommands();
- }
+TestData.networkErrorAndTxnOverrideConfig = {};
+TestData.sessionOptions = new SessionOptions();
+TestData.overrideRetryAttempts = 3;
+
+let session = conn.startSession(TestData.sessionOptions);
+let testDB = session.getDatabase(dbName);
+
+load("jstests/libs/override_methods/network_error_and_txn_override.js");
+
+jsTestLog("=-=-=-=-=-= Testing with 'retry on network error' by itself. =-=-=-=-=-=");
+TestData.sessionOptions = new SessionOptions({retryWrites: true});
+TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors = true;
+TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions = false;
+
+session = conn.startSession(TestData.sessionOptions);
+testDB = session.getDatabase(dbName);
+let coll1 = testDB[collName1];
+let coll2 = testDB[collName2];
+
+retryOnNetworkErrorTests.forEach((testCase) => runTest("retryOnNetworkErrorTests", testCase));
+
+jsTestLog("=-=-=-=-=-= Testing with 'txn override' by itself. =-=-=-=-=-=");
+TestData.sessionOptions = new SessionOptions({retryWrites: false});
+TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors = false;
+TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions = true;
+
+session = conn.startSession(TestData.sessionOptions);
+testDB = session.getDatabase(dbName);
+coll1 = testDB[collName1];
+coll2 = testDB[collName2];
+
+txnOverrideTests.forEach((testCase) => runTest("txnOverrideTests", testCase));
+
+jsTestLog("=-=-=-=-=-= Testing 'both txn override and retry on network error'. =-=-=-=-=-=");
+TestData.sessionOptions = new SessionOptions({retryWrites: true});
+TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors = true;
+TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions = true;
+
+session = conn.startSession(TestData.sessionOptions);
+testDB = session.getDatabase(dbName);
+coll1 = testDB[collName1];
+coll2 = testDB[collName2];
+
+txnOverridePlusRetryOnNetworkErrorTests.forEach(
+ (testCase) => runTest("txnOverridePlusRetryOnNetworkErrorTests", testCase));
- const retryOnNetworkErrorTests = [
- {
- name: "update with network error after success",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- attachPostCmdFunction("update", function() {
- throw new Error("SocketException");
- });
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "ordinary CRUD ops",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(testDB.runCommand({insert: collName1, documents: [{_id: 2}]}));
- assert.eq(coll1.find().itcount(), 2);
- }
- },
- {
- name: "retry on NotMaster",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- }
- },
- {
- name: "retry on NotMaster ordered",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
- assert.commandFailed(
- testDB.runCommand({insert: collName1, documents: [{_id: 2}], ordered: true}));
- }
- },
- {
- name: "retry on NotMaster with object change",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- let obj1 = {_id: 1, x: 5};
- let obj2 = {_id: 2, x: 5};
- assert.commandWorked(coll1.insert(obj1));
- assert.commandWorked(coll1.insert(obj2));
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 5}]);
- obj1.x = 7;
- assert.commandWorked(coll1.update({_id: 2}, {$set: {x: 8}}));
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 8}]);
- }
- },
- {
- name: "implicit collection creation with stepdown",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(
- ["insert"],
- {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError and normal stepdown error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithErrorAndWCENoRun(
- "insert", ErrorCodes.NotMaster, "NotMaster", ErrorCodes.NotMaster, "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError and normal ordinary error",
- test: function() {
- failCommandWithErrorAndWCENoRun("insert",
- ErrorCodes.OperationFailed,
- "OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with ordinary error",
- test: function() {
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.OperationFailed});
- assert.commandFailed(coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with network error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["insert"], {closeConnection: true});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError no success",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithWCENoRun("insert", ErrorCodes.NotMaster, "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "update with stepdown",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "update with ordinary error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.OperationFailed});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandFailed(coll1.update({_id: 1}, {$inc: {x: 1}}));
- }
- },
- {
- name: "update with network error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {closeConnection: true});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "update with two stepdown errors",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"],
- {errorCode: ErrorCodes.NotMaster, mode: {times: 2}});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
- }
- },
- {
- name: "update with chained stepdown errors",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- // Chain multiple update errors together.
- attachPostCmdFunction("update", function() {
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- });
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
- }
- },
- {
- name: "commands not run in transactions",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.commandFailedWithCode(coll1.insert({_id: 1}), ErrorCodes.DuplicateKey);
-
- // If this were run in a transaction, the original insert and the duplicate one would
- // both be storage-rolled-back and the count would be 0. We test that the count is 1
- // to prove that the inserts are not in a transaction.
- assert.eq(coll1.find().itcount(), 1);
- }
- },
- {
- name: "transaction commands not retried on retryable code",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
-
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- session.startTransaction();
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName1, updates: [{q: {_id: 1}, u: {$inc: {x: 1}}}]}),
- ErrorCodes.NotMaster);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- }
- },
- {
- name: "transaction commands not retried on network error",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {closeConnection: true});
-
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- session.startTransaction();
- const error = assert.throws(() => {
- return testDB.runCommand(
- {update: collName1, updates: [{q: {_id: 1}, u: {$inc: {x: 1}}}]});
- });
- assert(isNetworkError(error), tojson(error));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- }
- },
- {
- name: "commitTransaction retried on retryable code",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {errorCode: ErrorCodes.NotMaster});
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- }
- },
- {
- name: "commitTransaction retried on write concern error",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {
- writeConcernError:
- {code: ErrorCodes.PrimarySteppedDown, codeName: "PrimarySteppedDown"}
- });
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- const res = assert.commandWorked(session.commitTransaction_forTesting());
- assert(!res.hasOwnProperty("writeConcernError"));
-
- assert.eq(coll1.find().itcount(), 1);
- }
- },
- {
- name: "commitTransaction not retried on transient transaction error",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- // Abort the transaction so the commit receives NoSuchTransaction. Note that the fail
- // command failpoint isn't used because it returns without implicitly aborting the
- // transaction.
- const lsid = session.getSessionId();
- const txnNumber = NumberLong(session.getTxnNumber_forTesting());
- assert.commandWorked(testDB.adminCommand(
- {abortTransaction: 1, lsid, txnNumber, autocommit: false, stmtId: NumberInt(0)}));
-
- const res = assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.eq(["TransientTransactionError"], res.errorLabels);
-
- assert.eq(coll1.find().itcount(), 0);
- }
- },
- {
- name: "commitTransaction retried on network error",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {closeConnection: true});
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- }
- },
- {
- name: "abortTransaction retried on retryable code",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["abortTransaction"], {errorCode: ErrorCodes.NotMaster});
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- assert.commandWorked(session.abortTransaction_forTesting());
-
- assert.eq(coll1.find().itcount(), 0);
- }
- },
- {
- name: "abortTransaction retried on network error",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["abortTransaction"], {closeConnection: true});
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- assert.commandWorked(session.abortTransaction_forTesting());
-
- assert.eq(coll1.find().itcount(), 0);
- }
- },
- {
- name: "abortTransaction retried on write concern error",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["abortTransaction"], {
- writeConcernError:
- {code: ErrorCodes.PrimarySteppedDown, codeName: "PrimarySteppedDown"}
- });
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- // The fail command fail point with a write concern error triggers after the command
- // is processed, so the retry will find the transaction has already aborted and return
- // NoSuchTransaction.
- const res = assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert(!res.hasOwnProperty("writeConcernError"));
-
- assert.eq(coll1.find().itcount(), 0);
- }
- },
- {
- name: "abortTransaction not retried on transient transaction error",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- // Abort the transaction so the commit receives NoSuchTransaction. Note that the fail
- // command failpoint isn't used because it returns without implicitly aborting the
- // transaction.
- const lsid = session.getSessionId();
- const txnNumber = NumberLong(session.getTxnNumber_forTesting());
- assert.commandWorked(testDB.adminCommand(
- {abortTransaction: 1, lsid, txnNumber, autocommit: false, stmtId: NumberInt(0)}));
-
- const res = assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.eq(["TransientTransactionError"], res.errorLabels);
-
- assert.eq(coll1.find().itcount(), 0);
- }
- },
- {
- name: "raw response w/ one retryable error",
- test: function() {
- setCommandMockResponse("createIndexes", {
- ok: 0,
- raw: {
- shardOne: {code: ErrorCodes.NotMaster, errmsg: "dummy"},
- shardTwo: {code: ErrorCodes.InternalError, errmsg: "dummy"}
- }
- });
-
- assert.commandWorked(testDB.createCollection(collName1));
-
- // The first attempt should fail, but the retry succeeds.
- assert.commandWorked(coll1.createIndex({x: 1}));
-
- // The index should exist.
- const indexes = coll1.getIndexes();
- assert.eq(2, indexes.length, tojson(indexes));
- assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
- }
- },
- {
- name: "raw response w/ one retryable error and one success",
- test: function() {
- setCommandMockResponse("createIndexes", {
- ok: 0,
- raw: {
- // Raw responses only omit a top-level code if more than one error was
- // returned from a shard, so a third shard is needed.
- shardOne: {code: ErrorCodes.NotMaster, errmsg: "dummy"},
- shardTwo: {ok: 1},
- shardThree: {code: ErrorCodes.InternalError, errmsg: "dummy"},
- }
- });
-
- assert.commandWorked(testDB.createCollection(collName1));
-
- // The first attempt should fail, but the retry succeeds.
- assert.commandWorked(coll1.createIndex({x: 1}));
-
- // The index should exist.
- const indexes = coll1.getIndexes();
- assert.eq(2, indexes.length, tojson(indexes));
- assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
- }
- },
- {
- name: "raw response w/ one network error",
- test: function() {
- setCommandMockResponse("createIndexes", {
- ok: 0,
- raw: {
- shardOne: {code: ErrorCodes.InternalError, errmsg: "dummy"},
- shardTwo: {code: ErrorCodes.HostUnreachable, errmsg: "dummy"}
- }
- });
-
- assert.commandWorked(testDB.createCollection(collName1));
-
- // The first attempt should fail, but the retry succeeds.
- assert.commandWorked(coll1.createIndex({x: 1}));
-
- // The index should exist.
- const indexes = coll1.getIndexes();
- assert.eq(2, indexes.length, tojson(indexes));
- assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
- }
- },
- {
- name: "raw response ok:1 w/ retryable write concern error",
- test: function() {
- // The first encountered write concern error from a shard is attached as the top-level
- // write concern error.
- setCommandMockResponse("createIndexes", {
- ok: 1,
- raw: {
- shardOne: {
- ok: 1,
- writeConcernError: {
- code: ErrorCodes.PrimarySteppedDown,
- codeName: "PrimarySteppedDown",
- errmsg: "dummy"
- }
- },
- shardTwo: {ok: 1}
- },
- writeConcernError: {
- code: ErrorCodes.PrimarySteppedDown,
- codeName: "PrimarySteppedDown",
- errmsg: "dummy"
- }
- });
-
- assert.commandWorked(testDB.createCollection(collName1));
-
- // The first attempt should fail, but the retry succeeds.
- assert.commandWorked(coll1.createIndex({x: 1}));
-
- // The index should exist.
- const indexes = coll1.getIndexes();
- assert.eq(2, indexes.length, tojson(indexes));
- assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
- }
- },
- {
- name: "raw response w/ no retryable error",
- test: function() {
- setCommandMockResponse("createIndexes", {
- ok: 0,
- raw: {
- shardOne: {code: ErrorCodes.InvalidOptions, errmsg: "dummy"},
- shardTwo: {code: ErrorCodes.InternalError, errmsg: "dummy"}
- }
- });
-
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandFailed(coll1.createIndex({x: 1}));
- }
- },
- {
- name: "raw response w/ only acceptable errors",
- test: function() {
- setCommandMockResponse("createIndexes", {
- ok: 0,
- code: ErrorCodes.IndexAlreadyExists,
- raw: {
- shardOne: {code: ErrorCodes.IndexAlreadyExists, errmsg: "dummy"},
- shardTwo: {ok: 1},
- shardThree: {code: ErrorCodes.IndexAlreadyExists, errmsg: "dummy"}
- }
- });
-
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.createIndex({x: 1}));
- }
- },
- {
- name: "raw response w/ acceptable error and non-acceptable, non-retryable error",
- test: function() {
- setCommandMockResponse("createIndexes", {
- ok: 0,
- raw: {
- shardOne: {code: ErrorCodes.IndexAlreadyExists, errmsg: "dummy"},
- shardTwo: {code: ErrorCodes.InternalError, errmsg: "dummy"}
- }
- });
-
- // "Acceptable" errors are not overridden inside raw reponses.
- assert.commandWorked(testDB.createCollection(collName1));
- const res = assert.commandFailed(coll1.createIndex({x: 1}));
- assert(!res.raw.shardOne.ok, tojson(res));
- }
- },
- {
- name: "shardCollection retryable code buried in error message",
- test: function() {
- setCommandMockResponse("shardCollection", {
- ok: 0,
- code: ErrorCodes.OperationFailed,
- errmsg: "Sharding collection failed :: caused by InterruptedDueToStepdown",
- });
-
- // Mock a successful response for the retry, since sharding isn't enabled on the
- // underlying replica set.
- attachPostCmdFunction("shardCollection", function() {
- setCommandMockResponse("shardCollection", {
- ok: 1,
- });
- });
-
- assert.commandWorked(
- testDB.runCommand({shardCollection: "dummy_namespace", key: {_id: 1}}));
- }
- },
- {
- name: "drop retryable code buried in error message",
- test: function() {
- setCommandMockResponse("drop", {
- ok: 0,
- code: ErrorCodes.OperationFailed,
- errmsg: "Dropping collection failed :: caused by ShutdownInProgress",
- });
-
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(testDB.runCommand({drop: collName1}));
- }
- },
- ];
-
- // These tests only retry on TransientTransactionErrors. All other errors are expected to cause
- // the test to fail. Failpoints, overrides, and post-command functions are set by default to
- // only run once, so commands should succeed on retry.
- const txnOverrideTests = [
- {
- name: "ordinary CRUD ops",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(testDB.runCommand({insert: collName1, documents: [{_id: 2}]}));
- assert.eq(coll1.find().itcount(), 2);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 2);
- }
- },
- {
- name: "getMore in transaction",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll1.insert({_id: 2}));
- assert.eq(coll1.find().itcount(), 2);
-
- let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
- const cursorId = cmdRes.cursor.id;
- assert.gt(cursorId, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll1.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 1);
-
- cmdRes = assert.commandWorked(
- testDB.runCommand({getMore: cursorId, collection: collName1}));
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll1.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 2);
- }
- },
- {
- name: "getMore starts transaction",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll1.insert({_id: 2}));
- assert.eq(coll1.find().itcount(), 2);
- assert.eq(coll2.find().itcount(), 0);
-
- let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
- const cursorId = cmdRes.cursor.id;
- assert.gt(cursorId, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll1.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 1);
-
- assert.commandWorked(testDB.createCollection(collName2));
-
- assert.throws(() => testDB.runCommand({getMore: cursorId, collection: collName1}));
- }
- },
- {
- name: "getMore in different transaction",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll1.insert({_id: 2}));
- assert.eq(coll1.find().itcount(), 2);
- assert.eq(coll2.find().itcount(), 0);
-
- let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
- const cursorId = cmdRes.cursor.id;
- assert.gt(cursorId, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll1.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 1);
-
- assert.commandWorked(coll2.insert({_id: 3}));
- assert.eq(coll1.find().itcount(), 2);
- assert.eq(coll2.find().itcount(), 1);
-
- assert.commandWorked(coll2.insert({_id: 4}));
-
- assert.commandFailed(testDB.runCommand({getMore: cursorId, collection: collName1}));
- }
- },
- {
- name: "getMore after TransientTransactionError",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll1.insert({_id: 2}));
- assert.eq(coll1.find().itcount(), 2);
- failCommandWithFailPoint(["find"], {errorCode: ErrorCodes.NoSuchTransaction});
-
- let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
- const cursorId = cmdRes.cursor.id;
- assert.gt(cursorId, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll1.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 1);
-
- cmdRes = assert.commandWorked(
- testDB.runCommand({getMore: cursorId, collection: collName1}));
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll1.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 1);
- assert.eq(coll1.find().itcount(), 2);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 2);
- }
- },
- {
- name: "implicit collection creation",
- test: function() {
- const res = assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(1, res.nInserted);
- assert.eq(coll1.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- }
- },
- {
- name: "errors cause transaction to abort",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.commandFailedWithCode(coll1.insert({_id: 1}), ErrorCodes.DuplicateKey);
-
- assert.eq(coll1.find().itcount(), 0);
- }
- },
- {
- name: "update with stepdown",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "update with ordinary error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.OperationFailed});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandFailed(coll1.update({_id: 1}, {$inc: {x: 1}}));
- }
- },
- {
- name: "update with NoSuchTransaction error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NoSuchTransaction});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "update with network error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {closeConnection: true});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.throws(() => coll1.update({_id: 1}, {$inc: {x: 1}}));
- }
- },
- {
- name: "update with two stepdown errors",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"],
- {errorCode: ErrorCodes.NotMaster, mode: {times: 2}});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
- }
- },
- {
- name: "update with chained stepdown errors",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- // Chain multiple update errors together.
- attachPostCmdFunction("update", function() {
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- });
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
- }
- },
- {
- name: "implicit collection creation with stepdown",
- test: function() {
- failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.NotMaster});
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with WriteConcernError",
- test: function() {
- failCommandWithFailPoint(
- ["create"],
- {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with WriteConcernError and normal stepdown error",
- test: function() {
- failCommandWithErrorAndWCENoRun(
- "create", ErrorCodes.NotMaster, "NotMaster", ErrorCodes.NotMaster, "NotMaster");
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with WriteConcernError and normal ordinary error",
- test: function() {
- failCommandWithErrorAndWCENoRun("create",
- ErrorCodes.OperationFailed,
- "OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with ordinary error",
- test: function() {
- failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.OperationFailed});
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with network error",
- test: function() {
- failCommandWithFailPoint(["create"], {closeConnection: true});
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with WriteConcernError no success",
- test: function() {
- failCommandWithWCENoRun("create", ErrorCodes.NotMaster, "NotMaster");
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "errors cause the override to abort transactions",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
-
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.BadValue});
- assert.commandFailedWithCode(coll1.insert({_id: 2}), ErrorCodes.BadValue);
-
- stopFailingCommands();
- assert.eq(coll1.find().itcount(), 0);
-
- assert.commandWorked(coll1.insert({_id: 3}));
- assert.eq(coll1.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with stepdown",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with WriteConcernError",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(
- ["commitTransaction"],
- {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with WriteConcernError and normal stepdown error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.NotMaster,
- "NotMaster",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with WriteConcernError and normal ordinary error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.OperationFailed,
- "OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with ordinary error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"],
- {errorCode: ErrorCodes.OperationFailed});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with WriteConcernError and normal NoSuchTransaction error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.NoSuchTransaction,
- "NoSuchTransaction",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with NoSuchTransaction error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"],
- {errorCode: ErrorCodes.NoSuchTransaction});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with network error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {closeConnection: true});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with WriteConcernError no success",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithWCENoRun("commitTransaction", ErrorCodes.NotMaster, "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commands in 'admin' database end transaction",
- test: function() {
- testBadDBName(session, 'admin');
- }
- },
- {
- name: "commands in 'config' database end transaction",
- test: function() {
- testBadDBName(session, 'config');
- }
- },
- {
- name: "commands in 'local' database end transaction",
- test: function() {
- testBadDBName(session, 'local');
- }
- },
- {
- name: "getMore on change stream executes outside transaction",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
-
- // Starting a $changeStream aggregation within a transaction would fail, so the
- // override has to execute this as a standalone command.
- const changeStream = testDB.collName1.watch();
- assert.commandWorked(testDB.collName1.insert({_id: 1}));
- endCurrentTransactionIfOpen();
-
- // Calling the `next` function on the change stream cursor will trigger a getmore,
- // which the override must also run as a standalone command.
- assert.eq(changeStream.next()["fullDocument"], {_id: 1});
-
- // An aggregation without $changeStream runs within a transaction.
- let aggCursor = testDB.collName1.aggregate([], {cursor: {batchSize: 0}});
- assert.eq(aggCursor.next(), {_id: 1});
-
- // Creating a non-$changeStream aggregation cursor and running its getMore in a
- // different transaction will fail.
- aggCursor = testDB.collName1.aggregate([], {cursor: {batchSize: 0}});
- endCurrentTransactionIfOpen();
- assert.throws(() => aggCursor.next());
- }
- },
- ];
-
- // Failpoints, overrides, and post-command functions are set by default to only run once, so
- // commands should succeed on retry.
- const txnOverridePlusRetryOnNetworkErrorTests = [
- {
- name: "$where in jstests/core/js4.js",
- test: function() {
- const real = {a: 1, b: "abc", c: /abc/i, d: new Date(111911100111), e: null, f: true};
- assert.commandWorked(coll1.insert(real));
-
- failCommandWithErrorAndWCENoRun("drop",
- ErrorCodes.NamespaceNotFound,
- "NamespaceNotFound",
- ErrorCodes.NotMaster,
- "NotMaster");
- coll1.drop();
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
-
- assert.commandWorked(coll1.insert({a: 2, b: {c: 7, d: "d is good"}}));
- const cursor = coll1.find({
- $where: function() {
- assert.eq(3, Object.keySet(obj).length);
- assert.eq(2, obj.a);
- assert.eq(7, obj.b.c);
- assert.eq("d is good", obj.b.d);
- return true;
- }
- });
- assert.eq(1, cursor.toArray().length);
- }
- },
- {
- name: "update with network error after success",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- attachPostCmdFunction("update", function() {
- throw new Error("SocketException");
- });
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "retry on NotMaster",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- }
- },
- {
- name: "retry on NotMaster with object change",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- let obj1 = {_id: 1, x: 5};
- let obj2 = {_id: 2, x: 5};
- assert.commandWorked(coll1.insert(obj1));
- assert.commandWorked(coll1.insert(obj2));
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 5}]);
- obj1.x = 7;
- assert.commandWorked(coll1.update({_id: 2}, {$set: {x: 8}}));
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 8}]);
-
- endCurrentTransactionIfOpen();
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 8}]);
- }
- },
- {
- name: "implicit collection creation with stepdown",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(
- ["create"],
- {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError and normal stepdown error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithErrorAndWCENoRun(
- "create", ErrorCodes.NotMaster, "NotMaster", ErrorCodes.NotMaster, "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError and normal ordinary error",
- test: function() {
- failCommandWithErrorAndWCENoRun("create",
- ErrorCodes.OperationFailed,
- "OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with ordinary error",
- test: function() {
- failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.OperationFailed});
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with network error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["create"], {closeConnection: true});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError no success",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithWCENoRun("create", ErrorCodes.NotMaster, "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "update with stepdown",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "update with ordinary error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.OperationFailed});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandFailed(coll1.update({_id: 1}, {$inc: {x: 1}}));
- }
- },
- {
- name: "update with NoSuchTransaction error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NoSuchTransaction});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "update with network error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {closeConnection: true});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "update with two stepdown errors",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"],
- {errorCode: ErrorCodes.NotMaster, mode: {times: 2}});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
- }
- },
- {
- name: "update with chained stepdown errors",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- // Chain multiple update errors together.
- attachPostCmdFunction("update", function() {
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- });
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
- }
- },
- {
- name: "commit transaction with stepdown",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with WriteConcernError",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(
- ["commitTransaction"],
- {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with WriteConcernError and normal stepdown error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.NotMaster,
- "NotMaster",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with WriteConcernError and normal ordinary error",
- test: function() {
- // We retry on write concern errors and this doesn't return OperationFailed again.
- failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.OperationFailed,
- "OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with WriteConcernError and normal ordinary error twice",
- test: function() {
- failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.OperationFailed,
- "OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
- // After commitTransaction fails, fail it again with just the ordinary error.
- attachPostCmdFunction("commitTransaction", function() {
- failCommandWithFailPoint(["commitTransaction"],
- {errorCode: ErrorCodes.OperationFailed});
- });
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with ordinary error",
- test: function() {
- failCommandWithFailPoint(["commitTransaction"],
- {errorCode: ErrorCodes.OperationFailed});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with WriteConcernError and normal NoSuchTransaction error",
- test: function() {
- failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.NoSuchTransaction,
- "NoSuchTransaction",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with NoSuchTransaction error",
- test: function() {
- failCommandWithFailPoint(["commitTransaction"],
- {errorCode: ErrorCodes.NoSuchTransaction});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with network error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {closeConnection: true});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with WriteConcernError no success",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithWCENoRun("commitTransaction", ErrorCodes.NotMaster, "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commitTransaction fails with SERVER-38856",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(
- ["create"],
- {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
-
- // After commitTransaction fails, abort the transaction and drop the collection
- // as if the transaction were being retried on a different node.
- attachPostCmdFunction("commitTransaction", function() {
- abortCurrentTransaction();
- assert.commandWorked(mongoRunCommandOriginal.apply(
- testDB.getMongo(), [dbName, {drop: collName2}, 0]));
- });
- failCommandWithWCENoRun("commitTransaction", ErrorCodes.NotMaster, "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1, x: 2}));
- assert.commandWorked(coll2.insert({_id: 2}));
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 4}}));
-
- endCurrentTransactionIfOpen();
-
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 6}]);
- assert.docEq(coll2.find().toArray(), [{_id: 2}]);
- }
- },
- {
- name: 'Dates are copied correctly for SERVER-41917',
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"],
- {errorCode: ErrorCodes.NoSuchTransaction});
-
- let date = new Date();
- assert.commandWorked(coll1.insert({_id: 3, a: date}));
- date.setMilliseconds(date.getMilliseconds() + 2);
- assert.eq(null, coll1.findOne({_id: 3, a: date}));
- const origDoc = coll1.findOne({_id: 3});
- const ret = assert.commandWorked(coll1.update({_id: 3}, {$min: {a: date}}));
- assert.eq(ret.nModified, 0);
-
- endCurrentTransactionIfOpen();
-
- assert.eq(coll1.findOne({_id: 3}).a, origDoc.a);
- }
- },
- {
- name: 'Timestamps are copied correctly for SERVER-41917',
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"],
- {errorCode: ErrorCodes.NoSuchTransaction});
-
- let ts = new Timestamp(5, 6);
- assert.commandWorked(coll1.insert({_id: 3, a: ts}));
- ts.t++;
-
- assert.eq(null, coll1.findOne({_id: 3, a: ts}));
- const origDoc = coll1.findOne({_id: 3});
- const ret = assert.commandWorked(coll1.update({_id: 3}, {$min: {a: ts}}));
- assert.eq(ret.nModified, 0);
-
- endCurrentTransactionIfOpen();
-
- assert.eq(coll1.findOne({_id: 3}).a, origDoc.a);
- }
- }
- ];
-
- TestData.networkErrorAndTxnOverrideConfig = {};
- TestData.sessionOptions = new SessionOptions();
- TestData.overrideRetryAttempts = 3;
-
- let session = conn.startSession(TestData.sessionOptions);
- let testDB = session.getDatabase(dbName);
-
- load("jstests/libs/override_methods/network_error_and_txn_override.js");
-
- jsTestLog("=-=-=-=-=-= Testing with 'retry on network error' by itself. =-=-=-=-=-=");
- TestData.sessionOptions = new SessionOptions({retryWrites: true});
- TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors = true;
- TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions = false;
-
- session = conn.startSession(TestData.sessionOptions);
- testDB = session.getDatabase(dbName);
- let coll1 = testDB[collName1];
- let coll2 = testDB[collName2];
-
- retryOnNetworkErrorTests.forEach((testCase) => runTest("retryOnNetworkErrorTests", testCase));
-
- jsTestLog("=-=-=-=-=-= Testing with 'txn override' by itself. =-=-=-=-=-=");
- TestData.sessionOptions = new SessionOptions({retryWrites: false});
- TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors = false;
- TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions = true;
-
- session = conn.startSession(TestData.sessionOptions);
- testDB = session.getDatabase(dbName);
- coll1 = testDB[collName1];
- coll2 = testDB[collName2];
-
- txnOverrideTests.forEach((testCase) => runTest("txnOverrideTests", testCase));
-
- jsTestLog("=-=-=-=-=-= Testing 'both txn override and retry on network error'. =-=-=-=-=-=");
- TestData.sessionOptions = new SessionOptions({retryWrites: true});
- TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors = true;
- TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions = true;
-
- session = conn.startSession(TestData.sessionOptions);
- testDB = session.getDatabase(dbName);
- coll1 = testDB[collName1];
- coll2 = testDB[collName2];
-
- txnOverridePlusRetryOnNetworkErrorTests.forEach(
- (testCase) => runTest("txnOverridePlusRetryOnNetworkErrorTests", testCase));
-
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/unconditional_step_down.js b/jstests/replsets/unconditional_step_down.js
index 07902aec3fa..c9f95bcb1ac 100644
--- a/jstests/replsets/unconditional_step_down.js
+++ b/jstests/replsets/unconditional_step_down.js
@@ -4,213 +4,211 @@
* @tags: [requires_document_locking]
*/
(function() {
- "use strict";
+"use strict";
+
+load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
+
+const testName = "txnsDuringStepDown";
+const dbName = testName;
+const collName = "testcoll";
+const collNss = dbName + '.' + collName;
+
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}, {arbiter: true}]});
+rst.startSet();
+rst.initiate();
+
+let primary;
+let secondary;
+let primaryDB;
+
+function refreshConnection() {
+ primary = rst.getPrimary();
+ primaryDB = primary.getDB(dbName);
+ secondary = rst.getSecondary();
+}
+
+refreshConnection();
+
+jsTestLog("Writing data to collection.");
+assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{_id: 'readOp'}]}));
+rst.awaitReplication();
+
+const readFailPoint = "waitInFindBeforeMakingBatch";
+const writeFailPoint = "hangWithLockDuringBatchInsert";
+
+TestData.dbName = dbName;
+TestData.collName = collName;
+TestData.readFailPoint = readFailPoint;
+TestData.skipRetryOnNetworkError = true;
+
+function runStepDownTest({testMsg, stepDownFn, toRemovedState}) {
+ jsTestLog(`Testing step down due to ${testMsg}`);
+
+ // 'toRemovedState' determines whether to tag the connections not to close when
+ // primary changes its state to removed.
+ toRemovedState = toRemovedState || false;
+
+ // Clears the log before running the test.
+ assert.commandWorked(primary.adminCommand({clearLog: 'global'}));
+
+ jsTestLog("Enable fail point for namespace '" + collNss + "'");
+ // Find command.
+ assert.commandWorked(primary.adminCommand({
+ configureFailPoint: readFailPoint,
+ data: {nss: collNss, shouldCheckForInterrupt: true},
+ mode: "alwaysOn"
+ }));
+ // Insert command.
+ assert.commandWorked(primary.adminCommand({
+ configureFailPoint: writeFailPoint,
+ data: {nss: collNss, shouldCheckForInterrupt: true},
+ mode: "alwaysOn"
+ }));
+
+ var startSafeParallelShell = (func, port) => {
+ TestData.func = func;
+ var safeFunc = (toRemovedState) ? () => {
+ assert.commandWorked(db.adminCommand({isMaster: 1, hangUpOnStepDown: false}));
+ TestData.func();
+ } : func;
+ return startParallelShell(safeFunc, port);
+ };
+
+ const joinReadThread = startSafeParallelShell(() => {
+ jsTestLog("Start blocking find cmd before step down");
+ var findRes = assert.commandWorked(
+ db.getSiblingDB(TestData.dbName).runCommand({"find": TestData.collName}));
+ assert.eq(findRes.cursor.firstBatch.length, 1);
+ }, primary.port);
+
+ const joinWriteThread = startSafeParallelShell(() => {
+ jsTestLog("Start blocking insert cmd before step down");
+ assert.commandFailedWithCode(
+ db.getSiblingDB(TestData.dbName)[TestData.collName].insert([{val: 'writeOp1'}]),
+ ErrorCodes.InterruptedDueToReplStateChange);
+ }, primary.port);
+
+ const joinUnblockStepDown = startSafeParallelShell(() => {
+ load("jstests/libs/check_log.js");
+
+ jsTestLog("Wait for step down to start killing operations");
+ checkLog.contains(db, "Starting to kill user operations");
+
+ jsTestLog("Unblock step down");
+ // Turn off fail point on find cmd to allow step down to continue.
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: TestData.readFailPoint, mode: "off"}));
+ }, primary.port);
- load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
+ jsTestLog("Wait for find cmd to reach the fail point");
+ waitForCurOpByFailPoint(primaryDB, collNss, readFailPoint);
- const testName = "txnsDuringStepDown";
- const dbName = testName;
- const collName = "testcoll";
- const collNss = dbName + '.' + collName;
+ jsTestLog("Wait for write cmd to reach the fail point");
+ waitForCurOpByFailPoint(primaryDB, collNss, writeFailPoint);
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}, {arbiter: true}]});
- rst.startSet();
- rst.initiate();
+ jsTestLog("Trigger step down");
+ var oldConfig = stepDownFn();
- let primary;
- let secondary;
- let primaryDB;
+ // Waits for all threads to join.
+ joinUnblockStepDown();
+ joinReadThread();
+ joinWriteThread();
- function refreshConnection() {
- primary = rst.getPrimary();
- primaryDB = primary.getDB(dbName);
- secondary = rst.getSecondary();
- }
+ // Wait till the primary stepped down to primary.
+ waitForState(primary,
+ (toRemovedState) ? ReplSetTest.State.REMOVED : ReplSetTest.State.SECONDARY);
+ assert.commandWorked(primary.adminCommand({configureFailPoint: writeFailPoint, mode: "off"}));
+ // Get the new primary.
refreshConnection();
+}
- jsTestLog("Writing data to collection.");
- assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{_id: 'readOp'}]}));
- rst.awaitReplication();
-
- const readFailPoint = "waitInFindBeforeMakingBatch";
- const writeFailPoint = "hangWithLockDuringBatchInsert";
-
- TestData.dbName = dbName;
- TestData.collName = collName;
- TestData.readFailPoint = readFailPoint;
- TestData.skipRetryOnNetworkError = true;
-
- function runStepDownTest({testMsg, stepDownFn, toRemovedState}) {
- jsTestLog(`Testing step down due to ${testMsg}`);
-
- // 'toRemovedState' determines whether to tag the connections not to close when
- // primary changes its state to removed.
- toRemovedState = toRemovedState || false;
-
- // Clears the log before running the test.
- assert.commandWorked(primary.adminCommand({clearLog: 'global'}));
-
- jsTestLog("Enable fail point for namespace '" + collNss + "'");
- // Find command.
- assert.commandWorked(primary.adminCommand({
- configureFailPoint: readFailPoint,
- data: {nss: collNss, shouldCheckForInterrupt: true},
- mode: "alwaysOn"
- }));
- // Insert command.
- assert.commandWorked(primary.adminCommand({
- configureFailPoint: writeFailPoint,
- data: {nss: collNss, shouldCheckForInterrupt: true},
- mode: "alwaysOn"
- }));
-
- var startSafeParallelShell = (func, port) => {
- TestData.func = func;
- var safeFunc = (toRemovedState) ? () => {
- assert.commandWorked(db.adminCommand({isMaster: 1, hangUpOnStepDown: false}));
- TestData.func();
- } : func;
- return startParallelShell(safeFunc, port);
- };
-
- const joinReadThread = startSafeParallelShell(() => {
- jsTestLog("Start blocking find cmd before step down");
- var findRes = assert.commandWorked(
- db.getSiblingDB(TestData.dbName).runCommand({"find": TestData.collName}));
- assert.eq(findRes.cursor.firstBatch.length, 1);
- }, primary.port);
-
- const joinWriteThread = startSafeParallelShell(() => {
- jsTestLog("Start blocking insert cmd before step down");
- assert.commandFailedWithCode(
- db.getSiblingDB(TestData.dbName)[TestData.collName].insert([{val: 'writeOp1'}]),
- ErrorCodes.InterruptedDueToReplStateChange);
- }, primary.port);
-
- const joinUnblockStepDown = startSafeParallelShell(() => {
- load("jstests/libs/check_log.js");
-
- jsTestLog("Wait for step down to start killing operations");
- checkLog.contains(db, "Starting to kill user operations");
-
- jsTestLog("Unblock step down");
- // Turn off fail point on find cmd to allow step down to continue.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: TestData.readFailPoint, mode: "off"}));
- }, primary.port);
-
- jsTestLog("Wait for find cmd to reach the fail point");
- waitForCurOpByFailPoint(primaryDB, collNss, readFailPoint);
-
- jsTestLog("Wait for write cmd to reach the fail point");
- waitForCurOpByFailPoint(primaryDB, collNss, writeFailPoint);
-
- jsTestLog("Trigger step down");
- var oldConfig = stepDownFn();
-
- // Waits for all threads to join.
- joinUnblockStepDown();
- joinReadThread();
- joinWriteThread();
-
- // Wait till the primary stepped down to primary.
- waitForState(primary,
- (toRemovedState) ? ReplSetTest.State.REMOVED : ReplSetTest.State.SECONDARY);
+function runStepsDowntoRemoved(params) {
+ var oldConfigBeforeTest = rst.getReplSetConfigFromNode();
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: writeFailPoint, mode: "off"}));
- // Get the new primary.
- refreshConnection();
+ // Run the test.
+ params["toRemovedState"] = true;
+ runStepDownTest(params);
+ oldConfigBeforeTest.version = ++(rst.getReplSetConfigFromNode().version);
+
+ // On exit, add the removed node back to replica set.
+ assert.commandWorked(primary.adminCommand({replSetReconfig: oldConfigBeforeTest, force: true}));
+ refreshConnection();
+}
+
+runStepDownTest({
+ testMsg: "reconfig command",
+ stepDownFn: () => {
+ load("./jstests/replsets/rslib.js");
+ var newConfig = rst.getReplSetConfigFromNode();
+
+ var oldMasterId = rst.getNodeId(primary);
+ var newMasterId = rst.getNodeId(secondary);
+
+ newConfig.members[oldMasterId].priority = 0;
+ newConfig.members[newMasterId].priority = 1;
+ newConfig.version++;
+
+ // Run it on primary
+ assert.commandWorked(primary.adminCommand({replSetReconfig: newConfig, force: true}));
}
+});
- function runStepsDowntoRemoved(params) {
- var oldConfigBeforeTest = rst.getReplSetConfigFromNode();
+runStepDownTest({
+ testMsg: "reconfig via heartbeat",
+ stepDownFn: () => {
+ load("./jstests/replsets/rslib.js");
+ var newConfig = rst.getReplSetConfigFromNode();
- // Run the test.
- params["toRemovedState"] = true;
- runStepDownTest(params);
- oldConfigBeforeTest.version = ++(rst.getReplSetConfigFromNode().version);
+ var oldMasterId = rst.getNodeId(primary);
+ var newMasterId = rst.getNodeId(secondary);
- // On exit, add the removed node back to replica set.
- assert.commandWorked(
- primary.adminCommand({replSetReconfig: oldConfigBeforeTest, force: true}));
- refreshConnection();
+ newConfig.members[oldMasterId].priority = 0;
+ newConfig.members[newMasterId].priority = 1;
+ newConfig.version++;
+
+ // Run it on secondary
+ assert.commandWorked(secondary.adminCommand({replSetReconfig: newConfig, force: true}));
+ }
+});
+
+runStepsDowntoRemoved({
+ testMsg: "reconfig via heartbeat - primary to removed",
+ stepDownFn: () => {
+ load("./jstests/replsets/rslib.js");
+ var newConfig = rst.getReplSetConfigFromNode();
+
+ var oldMasterId = rst.getNodeId(primary);
+ var newMasterId = rst.getNodeId(secondary);
+
+ newConfig.members[newMasterId].priority = 1;
+ // Remove the current primary from the config
+ newConfig.members.splice(oldMasterId, 1);
+ newConfig.version++;
+
+ // Run it on secondary
+ assert.commandWorked(secondary.adminCommand({replSetReconfig: newConfig, force: true}));
+ }
+});
+
+runStepDownTest({
+ testMsg: "stepdown via heartbeat",
+ stepDownFn: () => {
+ load("./jstests/replsets/rslib.js");
+ var newConfig = rst.getReplSetConfigFromNode();
+
+ var newMasterId = rst.getNodeId(secondary);
+
+ newConfig.members[newMasterId].priority = 2;
+ newConfig.version++;
+
+ // Run it on primary
+ assert.commandWorked(primary.adminCommand({replSetReconfig: newConfig, force: false}));
+
+ // Now, step up the secondary which will make the current primary to step down.
+ rst.stepUp(secondary);
}
+});
- runStepDownTest({
- testMsg: "reconfig command",
- stepDownFn: () => {
- load("./jstests/replsets/rslib.js");
- var newConfig = rst.getReplSetConfigFromNode();
-
- var oldMasterId = rst.getNodeId(primary);
- var newMasterId = rst.getNodeId(secondary);
-
- newConfig.members[oldMasterId].priority = 0;
- newConfig.members[newMasterId].priority = 1;
- newConfig.version++;
-
- // Run it on primary
- assert.commandWorked(primary.adminCommand({replSetReconfig: newConfig, force: true}));
- }
- });
-
- runStepDownTest({
- testMsg: "reconfig via heartbeat",
- stepDownFn: () => {
- load("./jstests/replsets/rslib.js");
- var newConfig = rst.getReplSetConfigFromNode();
-
- var oldMasterId = rst.getNodeId(primary);
- var newMasterId = rst.getNodeId(secondary);
-
- newConfig.members[oldMasterId].priority = 0;
- newConfig.members[newMasterId].priority = 1;
- newConfig.version++;
-
- // Run it on secondary
- assert.commandWorked(secondary.adminCommand({replSetReconfig: newConfig, force: true}));
- }
- });
-
- runStepsDowntoRemoved({
- testMsg: "reconfig via heartbeat - primary to removed",
- stepDownFn: () => {
- load("./jstests/replsets/rslib.js");
- var newConfig = rst.getReplSetConfigFromNode();
-
- var oldMasterId = rst.getNodeId(primary);
- var newMasterId = rst.getNodeId(secondary);
-
- newConfig.members[newMasterId].priority = 1;
- // Remove the current primary from the config
- newConfig.members.splice(oldMasterId, 1);
- newConfig.version++;
-
- // Run it on secondary
- assert.commandWorked(secondary.adminCommand({replSetReconfig: newConfig, force: true}));
- }
- });
-
- runStepDownTest({
- testMsg: "stepdown via heartbeat",
- stepDownFn: () => {
- load("./jstests/replsets/rslib.js");
- var newConfig = rst.getReplSetConfigFromNode();
-
- var newMasterId = rst.getNodeId(secondary);
-
- newConfig.members[newMasterId].priority = 2;
- newConfig.version++;
-
- // Run it on primary
- assert.commandWorked(primary.adminCommand({replSetReconfig: newConfig, force: false}));
-
- // Now, step up the secondary which will make the current primary to step down.
- rst.stepUp(secondary);
- }
- });
-
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/uninitialized_fcv_access.js b/jstests/replsets/uninitialized_fcv_access.js
index f4cdfae7674..dd2ff681e4c 100644
--- a/jstests/replsets/uninitialized_fcv_access.js
+++ b/jstests/replsets/uninitialized_fcv_access.js
@@ -3,30 +3,33 @@
* initialized does not crash the server (see SERVER-34600).
*/
(function() {
- 'use strict';
- load('jstests/libs/feature_compatibility_version.js');
+'use strict';
+load('jstests/libs/feature_compatibility_version.js');
- let rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- let node = rst.nodes[0];
+let rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+let node = rst.nodes[0];
- // The featureCompatibilityVersion parameter is initialized during rst.initiate(), so calling
- // getParameter on the fCV before then will attempt to access an uninitialized fCV.
+// The featureCompatibilityVersion parameter is initialized during rst.initiate(), so calling
+// getParameter on the fCV before then will attempt to access an uninitialized fCV.
- const getParamCmd = {getParameter: 1, featureCompatibilityVersion: 1};
- assert.commandFailedWithCode(node.getDB('admin').runCommand(getParamCmd),
- ErrorCodes.UnknownFeatureCompatibilityVersion,
- 'expected ' + tojson(getParamCmd) +
- ' to fail with code UnknownFeatureCompatibilityVersion');
+const getParamCmd = {
+ getParameter: 1,
+ featureCompatibilityVersion: 1
+};
+assert.commandFailedWithCode(
+ node.getDB('admin').runCommand(getParamCmd),
+ ErrorCodes.UnknownFeatureCompatibilityVersion,
+ 'expected ' + tojson(getParamCmd) + ' to fail with code UnknownFeatureCompatibilityVersion');
- rst.initiate();
+rst.initiate();
- // After the replica set is initialized, getParameter should successfully return the fCV.
+// After the replica set is initialized, getParameter should successfully return the fCV.
- const primary = rst.getPrimary();
- const res = primary.adminCommand(getParamCmd);
- assert.commandWorked(res);
- assert.eq(res.featureCompatibilityVersion.version, latestFCV, tojson(res));
+const primary = rst.getPrimary();
+const res = primary.adminCommand(getParamCmd);
+assert.commandWorked(res);
+assert.eq(res.featureCompatibilityVersion.version, latestFCV, tojson(res));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/update_commit_point_from_sync_source_ignores_term.js b/jstests/replsets/update_commit_point_from_sync_source_ignores_term.js
index 7915dfd4b7b..61a4c339fdf 100644
--- a/jstests/replsets/update_commit_point_from_sync_source_ignores_term.js
+++ b/jstests/replsets/update_commit_point_from_sync_source_ignores_term.js
@@ -4,84 +4,83 @@
* @tags: [requires_majority_read_concern]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- // Set up a ReplSetTest where nodes only sync one oplog entry at a time.
- const rst = new ReplSetTest(
- {nodes: 5, useBridge: true, nodeOptions: {setParameter: "bgSyncOplogFetcherBatchSize=1"}});
- rst.startSet();
- const config = rst.getReplSetConfig();
- // Ban chaining and prevent elections.
- config.settings = {chainingAllowed: false, electionTimeoutMillis: 12 * 60 * 60 * 1000};
- rst.initiate(config);
+// Set up a ReplSetTest where nodes only sync one oplog entry at a time.
+const rst = new ReplSetTest(
+ {nodes: 5, useBridge: true, nodeOptions: {setParameter: "bgSyncOplogFetcherBatchSize=1"}});
+rst.startSet();
+const config = rst.getReplSetConfig();
+// Ban chaining and prevent elections.
+config.settings = {
+ chainingAllowed: false,
+ electionTimeoutMillis: 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- const nodeA = rst.nodes[0];
- const nodeB = rst.nodes[1];
- const nodeC = rst.nodes[2];
- const nodeD = rst.nodes[3];
- const nodeE = rst.nodes[4];
+const nodeA = rst.nodes[0];
+const nodeB = rst.nodes[1];
+const nodeC = rst.nodes[2];
+const nodeD = rst.nodes[3];
+const nodeE = rst.nodes[4];
- jsTest.log("Node A is primary in term 1. Replicate a write to Node E that is not committed.");
- assert.eq(nodeA, rst.getPrimary());
- // Ensure Node E has a majority committed snapshot.
- assert.commandWorked(nodeA.getDB(dbName)[collName].insert({_id: "dummy"}));
- rst.awaitLastOpCommitted();
- stopServerReplication([nodeB, nodeC, nodeD]);
- assert.commandWorked(nodeA.getDB(dbName)[collName].insert({_id: "term 1, doc 1"}));
- rst.awaitReplication(undefined, undefined, [nodeE]);
- assert.eq(0,
- nodeE.getDB(dbName)[collName]
- .find({_id: "term 1, doc 1"})
- .readConcern("majority")
- .itcount());
+jsTest.log("Node A is primary in term 1. Replicate a write to Node E that is not committed.");
+assert.eq(nodeA, rst.getPrimary());
+// Ensure Node E has a majority committed snapshot.
+assert.commandWorked(nodeA.getDB(dbName)[collName].insert({_id: "dummy"}));
+rst.awaitLastOpCommitted();
+stopServerReplication([nodeB, nodeC, nodeD]);
+assert.commandWorked(nodeA.getDB(dbName)[collName].insert({_id: "term 1, doc 1"}));
+rst.awaitReplication(undefined, undefined, [nodeE]);
+assert.eq(
+ 0,
+ nodeE.getDB(dbName)[collName].find({_id: "term 1, doc 1"}).readConcern("majority").itcount());
- jsTest.log("Disconnect Node E. Perform a new write.");
- nodeE.disconnect([nodeA, nodeB, nodeC, nodeD]);
- restartServerReplication([nodeB, nodeC, nodeD]);
- assert.commandWorked(nodeA.getDB(dbName)[collName].insert({_id: "term 1, doc 2"}));
+jsTest.log("Disconnect Node E. Perform a new write.");
+nodeE.disconnect([nodeA, nodeB, nodeC, nodeD]);
+restartServerReplication([nodeB, nodeC, nodeD]);
+assert.commandWorked(nodeA.getDB(dbName)[collName].insert({_id: "term 1, doc 2"}));
- jsTest.log("Step up Node B in term 2. Commit a new write.");
- // Ensure Node B is caught up, so that it can become primary.
- rst.awaitReplication(undefined, undefined, [nodeB]);
- assert.commandWorked(nodeB.adminCommand({replSetStepUp: 1}));
- rst.waitForState(nodeA, ReplSetTest.State.SECONDARY);
- assert.eq(nodeB, rst.getPrimary());
- assert.commandWorked(
- nodeB.getDB(dbName)[collName].insert({_id: "term 2"}, {writeConcern: {w: "majority"}}));
- // Node E might sync from Node A or Node B. Ensure they both have the new commit point.
- rst.awaitLastOpCommitted(undefined, [nodeA]);
+jsTest.log("Step up Node B in term 2. Commit a new write.");
+// Ensure Node B is caught up, so that it can become primary.
+rst.awaitReplication(undefined, undefined, [nodeB]);
+assert.commandWorked(nodeB.adminCommand({replSetStepUp: 1}));
+rst.waitForState(nodeA, ReplSetTest.State.SECONDARY);
+assert.eq(nodeB, rst.getPrimary());
+assert.commandWorked(
+ nodeB.getDB(dbName)[collName].insert({_id: "term 2"}, {writeConcern: {w: "majority"}}));
+// Node E might sync from Node A or Node B. Ensure they both have the new commit point.
+rst.awaitLastOpCommitted(undefined, [nodeA]);
- jsTest.log("Allow Node E to replicate the last write from term 1.");
- // The stopReplProducerOnDocument failpoint ensures that Node E stops replicating before
- // applying the document {msg: "new primary"}, which is the first document of term 2. This
- // depends on the oplog fetcher batch size being 1.
- assert.commandWorked(nodeE.adminCommand({
- configureFailPoint: "stopReplProducerOnDocument",
- mode: "alwaysOn",
- data: {document: {msg: "new primary"}}
- }));
- nodeE.reconnect([nodeA, nodeB, nodeC, nodeD]);
- assert.soon(() => {
- return nodeE.getDB(dbName)[collName].find({_id: "term 1, doc 2"}).itcount() === 1;
- });
- assert.eq(0, nodeE.getDB(dbName)[collName].find({_id: "term 2"}).itcount());
+jsTest.log("Allow Node E to replicate the last write from term 1.");
+// The stopReplProducerOnDocument failpoint ensures that Node E stops replicating before
+// applying the document {msg: "new primary"}, which is the first document of term 2. This
+// depends on the oplog fetcher batch size being 1.
+assert.commandWorked(nodeE.adminCommand({
+ configureFailPoint: "stopReplProducerOnDocument",
+ mode: "alwaysOn",
+ data: {document: {msg: "new primary"}}
+}));
+nodeE.reconnect([nodeA, nodeB, nodeC, nodeD]);
+assert.soon(() => {
+ return nodeE.getDB(dbName)[collName].find({_id: "term 1, doc 2"}).itcount() === 1;
+});
+assert.eq(0, nodeE.getDB(dbName)[collName].find({_id: "term 2"}).itcount());
- jsTest.log("Node E now knows that its first write is majority committed.");
- // It does not yet know that {_id: "term 1, doc 2"} is committed. Its last batch was {_id: "term
- // 1, doc 2"}. The sync source's lastOpCommitted was in term 2, so Node E updated its
- // lastOpCommitted to its lastApplied, which did not yet include {_id: "term 1, doc 2"}.
- assert.eq(1,
- nodeE.getDB(dbName)[collName]
- .find({_id: "term 1, doc 1"})
- .readConcern("majority")
- .itcount());
+jsTest.log("Node E now knows that its first write is majority committed.");
+// It does not yet know that {_id: "term 1, doc 2"} is committed. Its last batch was {_id: "term
+// 1, doc 2"}. The sync source's lastOpCommitted was in term 2, so Node E updated its
+// lastOpCommitted to its lastApplied, which did not yet include {_id: "term 1, doc 2"}.
+assert.eq(
+ 1,
+ nodeE.getDB(dbName)[collName].find({_id: "term 1, doc 1"}).readConcern("majority").itcount());
- assert.commandWorked(
- nodeE.adminCommand({configureFailPoint: "stopReplProducerOnDocument", mode: "off"}));
- rst.stopSet();
+assert.commandWorked(
+ nodeE.adminCommand({configureFailPoint: "stopReplProducerOnDocument", mode: "off"}));
+rst.stopSet();
}());
diff --git a/jstests/replsets/user_management_wc.js b/jstests/replsets/user_management_wc.js
index c3467d4f80c..f4e755b6f7e 100644
--- a/jstests/replsets/user_management_wc.js
+++ b/jstests/replsets/user_management_wc.js
@@ -9,135 +9,135 @@ load('jstests/multiVersion/libs/auth_helpers.js');
*/
(function() {
- "use strict";
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- var replTest = new ReplSetTest(
- {name: 'UserManagementWCSet', nodes: 3, settings: {chainingAllowed: false}});
- replTest.startSet();
- replTest.initiate();
-
- var master = replTest.getPrimary();
- var dbName = "user-management-wc-test";
- var db = master.getDB(dbName);
- var adminDB = master.getDB('admin');
-
- function dropUsersAndRoles() {
- db.dropUser('username');
- db.dropUser('user1');
- db.dropUser('user2');
- }
-
- var commands = [];
-
- commands.push({
- req: {createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles},
- setupFunc: function() {},
- confirmFunc: function() {
- assert(db.auth("username", "password"), "auth failed");
- assert(!db.auth("username", "passworda"), "auth should have failed");
- },
- admin: false
- });
-
- commands.push({
- req: {updateUser: 'username', pwd: 'password2', roles: jsTest.basicUserRoles},
- setupFunc: function() {
- db.runCommand({createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles});
- },
- confirmFunc: function() {
- assert(db.auth("username", "password2"), "auth failed");
- assert(!db.auth("username", "password"), "auth should have failed");
- },
- admin: false
- });
-
- commands.push({
- req: {dropUser: 'tempUser'},
- setupFunc: function() {
- db.runCommand({createUser: 'tempUser', pwd: 'password', roles: jsTest.basicUserRoles});
- assert(db.auth("tempUser", "password"), "auth failed");
- },
- confirmFunc: function() {
- assert(!db.auth("tempUser", "password"), "auth should have failed");
- },
- admin: false
- });
-
- commands.push({
- req: {
- _mergeAuthzCollections: 1,
- tempUsersCollection: 'admin.tempusers',
- tempRolesCollection: 'admin.temproles',
- db: "",
- drop: false
- },
- setupFunc: function() {
- adminDB.system.users.remove({});
- adminDB.system.roles.remove({});
- adminDB.createUser({user: 'lorax', pwd: 'pwd', roles: ['read']});
- adminDB.createRole({role: 'role1', roles: ['read'], privileges: []});
- adminDB.system.users.find().forEach(function(doc) {
- adminDB.tempusers.insert(doc);
- });
- adminDB.system.roles.find().forEach(function(doc) {
- adminDB.temproles.insert(doc);
- });
- adminDB.system.users.remove({});
- adminDB.system.roles.remove({});
-
- assert.eq(0, adminDB.system.users.find().itcount());
- assert.eq(0, adminDB.system.roles.find().itcount());
-
- db.createUser({user: 'lorax2', pwd: 'pwd', roles: ['readWrite']});
- db.createRole({role: 'role2', roles: ['readWrite'], privileges: []});
-
- assert.eq(1, adminDB.system.users.find().itcount());
- assert.eq(1, adminDB.system.roles.find().itcount());
- },
- confirmFunc: function() {
- assert.eq(2, adminDB.system.users.find().itcount());
- assert.eq(2, adminDB.system.roles.find().itcount());
- },
- admin: true
- });
-
- function assertUserManagementWriteConcernError(res) {
- assert.commandFailed(res);
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assertWriteConcernError(res);
- }
-
- function testValidWriteConcern(cmd) {
- cmd.req.writeConcern = {w: 'majority', wtimeout: 25000};
- jsTest.log("Testing " + tojson(cmd.req));
-
- dropUsersAndRoles();
- cmd.setupFunc();
- var res = runCommandCheckAdmin(db, cmd);
- assert.commandWorked(res);
- assert(!res.writeConcernError,
- 'command on a full replicaset had writeConcernError: ' + tojson(res));
- cmd.confirmFunc();
- }
-
- function testInvalidWriteConcern(cmd) {
- cmd.req.writeConcern = {w: 15};
- jsTest.log("Testing " + tojson(cmd.req));
-
- dropUsersAndRoles();
- cmd.setupFunc();
- var res = runCommandCheckAdmin(db, cmd);
- assertUserManagementWriteConcernError(res);
- cmd.confirmFunc();
- }
-
- commands.forEach(function(cmd) {
- testValidWriteConcern(cmd);
- testInvalidWriteConcern(cmd);
- });
-
- replTest.stopSet();
+"use strict";
+
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
+
+var replTest =
+ new ReplSetTest({name: 'UserManagementWCSet', nodes: 3, settings: {chainingAllowed: false}});
+replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+var dbName = "user-management-wc-test";
+var db = master.getDB(dbName);
+var adminDB = master.getDB('admin');
+
+function dropUsersAndRoles() {
+ db.dropUser('username');
+ db.dropUser('user1');
+ db.dropUser('user2');
+}
+
+var commands = [];
+
+commands.push({
+ req: {createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles},
+ setupFunc: function() {},
+ confirmFunc: function() {
+ assert(db.auth("username", "password"), "auth failed");
+ assert(!db.auth("username", "passworda"), "auth should have failed");
+ },
+ admin: false
+});
+
+commands.push({
+ req: {updateUser: 'username', pwd: 'password2', roles: jsTest.basicUserRoles},
+ setupFunc: function() {
+ db.runCommand({createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles});
+ },
+ confirmFunc: function() {
+ assert(db.auth("username", "password2"), "auth failed");
+ assert(!db.auth("username", "password"), "auth should have failed");
+ },
+ admin: false
+});
+
+commands.push({
+ req: {dropUser: 'tempUser'},
+ setupFunc: function() {
+ db.runCommand({createUser: 'tempUser', pwd: 'password', roles: jsTest.basicUserRoles});
+ assert(db.auth("tempUser", "password"), "auth failed");
+ },
+ confirmFunc: function() {
+ assert(!db.auth("tempUser", "password"), "auth should have failed");
+ },
+ admin: false
+});
+
+commands.push({
+ req: {
+ _mergeAuthzCollections: 1,
+ tempUsersCollection: 'admin.tempusers',
+ tempRolesCollection: 'admin.temproles',
+ db: "",
+ drop: false
+ },
+ setupFunc: function() {
+ adminDB.system.users.remove({});
+ adminDB.system.roles.remove({});
+ adminDB.createUser({user: 'lorax', pwd: 'pwd', roles: ['read']});
+ adminDB.createRole({role: 'role1', roles: ['read'], privileges: []});
+ adminDB.system.users.find().forEach(function(doc) {
+ adminDB.tempusers.insert(doc);
+ });
+ adminDB.system.roles.find().forEach(function(doc) {
+ adminDB.temproles.insert(doc);
+ });
+ adminDB.system.users.remove({});
+ adminDB.system.roles.remove({});
+
+ assert.eq(0, adminDB.system.users.find().itcount());
+ assert.eq(0, adminDB.system.roles.find().itcount());
+
+ db.createUser({user: 'lorax2', pwd: 'pwd', roles: ['readWrite']});
+ db.createRole({role: 'role2', roles: ['readWrite'], privileges: []});
+
+ assert.eq(1, adminDB.system.users.find().itcount());
+ assert.eq(1, adminDB.system.roles.find().itcount());
+ },
+ confirmFunc: function() {
+ assert.eq(2, adminDB.system.users.find().itcount());
+ assert.eq(2, adminDB.system.roles.find().itcount());
+ },
+ admin: true
+});
+
+function assertUserManagementWriteConcernError(res) {
+ assert.commandFailed(res);
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assertWriteConcernError(res);
+}
+
+function testValidWriteConcern(cmd) {
+ cmd.req.writeConcern = {w: 'majority', wtimeout: 25000};
+ jsTest.log("Testing " + tojson(cmd.req));
+
+ dropUsersAndRoles();
+ cmd.setupFunc();
+ var res = runCommandCheckAdmin(db, cmd);
+ assert.commandWorked(res);
+ assert(!res.writeConcernError,
+ 'command on a full replicaset had writeConcernError: ' + tojson(res));
+ cmd.confirmFunc();
+}
+
+function testInvalidWriteConcern(cmd) {
+ cmd.req.writeConcern = {w: 15};
+ jsTest.log("Testing " + tojson(cmd.req));
+
+ dropUsersAndRoles();
+ cmd.setupFunc();
+ var res = runCommandCheckAdmin(db, cmd);
+ assertUserManagementWriteConcernError(res);
+ cmd.confirmFunc();
+}
+
+commands.forEach(function(cmd) {
+ testValidWriteConcern(cmd);
+ testInvalidWriteConcern(cmd);
+});
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/verify_sessions_expiration_rs.js b/jstests/replsets/verify_sessions_expiration_rs.js
index fbb5465e8f4..70c02f205c9 100644
--- a/jstests/replsets/verify_sessions_expiration_rs.js
+++ b/jstests/replsets/verify_sessions_expiration_rs.js
@@ -14,123 +14,127 @@
// replace it in the config.system.sessions collection.
(function() {
- "use strict";
-
- // This test makes assertions about the number of logical session records.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
-
- const refresh = {refreshLogicalSessionCacheNow: 1};
- const startSession = {startSession: 1};
- const failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
-
- function refreshSessionsAndVerifyCount(config, expectedCount) {
- config.runCommand(refresh);
- assert.eq(config.system.sessions.count(), expectedCount);
+"use strict";
+
+// This test makes assertions about the number of logical session records.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
+
+const refresh = {
+ refreshLogicalSessionCacheNow: 1
+};
+const startSession = {
+ startSession: 1
+};
+const failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
+
+function refreshSessionsAndVerifyCount(config, expectedCount) {
+ config.runCommand(refresh);
+ assert.eq(config.system.sessions.count(), expectedCount);
+}
+
+function getSessions(config) {
+ return config.system.sessions.aggregate([{'$listSessions': {allUsers: true}}]).toArray();
+}
+
+const dbName = "test";
+const testCollName = "verify_sessions_find_get_more";
+
+let replTest = new ReplSetTest({name: 'refresh', nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+const primary = replTest.getPrimary();
+replTest.awaitSecondaryNodes();
+
+let db = primary.getDB(dbName);
+let config = primary.getDB("config");
+
+// 1. Verify that sessions expire from config.system.sessions after the timeout has passed.
+for (let i = 0; i < 5; i++) {
+ let res = db.runCommand(startSession);
+ assert.commandWorked(res, "unable to start session");
+}
+refreshSessionsAndVerifyCount(config, 5);
+
+// Manually delete entries in config.system.sessions to simulate TTL expiration.
+assert.commandWorked(config.system.sessions.remove({}));
+refreshSessionsAndVerifyCount(config, 0);
+
+// 2. Verify that getMores after finds will update the 'lastUse' field on documents in the
+// config.system.sessions collection.
+for (let i = 0; i < 10; i++) {
+ db[testCollName].insert({_id: i, a: i, b: 1});
+}
+
+let cursors = [];
+for (let i = 0; i < 5; i++) {
+ let session = db.getMongo().startSession({});
+ assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
+ "initialize the session");
+ cursors.push(session.getDatabase(dbName)[testCollName].find({b: 1}).batchSize(1));
+ assert(cursors[i].hasNext());
+}
+
+refreshSessionsAndVerifyCount(config, 5);
+
+let sessionsCollectionArray;
+let lastUseValues = [];
+for (let i = 0; i < 3; i++) {
+ for (let j = 0; j < cursors.length; j++) {
+ cursors[j].next();
}
- function getSessions(config) {
- return config.system.sessions.aggregate([{'$listSessions': {allUsers: true}}]).toArray();
- }
-
- const dbName = "test";
- const testCollName = "verify_sessions_find_get_more";
-
- let replTest = new ReplSetTest({name: 'refresh', nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- const primary = replTest.getPrimary();
- replTest.awaitSecondaryNodes();
-
- let db = primary.getDB(dbName);
- let config = primary.getDB("config");
-
- // 1. Verify that sessions expire from config.system.sessions after the timeout has passed.
- for (let i = 0; i < 5; i++) {
- let res = db.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
- }
refreshSessionsAndVerifyCount(config, 5);
- // Manually delete entries in config.system.sessions to simulate TTL expiration.
- assert.commandWorked(config.system.sessions.remove({}));
- refreshSessionsAndVerifyCount(config, 0);
-
- // 2. Verify that getMores after finds will update the 'lastUse' field on documents in the
- // config.system.sessions collection.
- for (let i = 0; i < 10; i++) {
- db[testCollName].insert({_id: i, a: i, b: 1});
- }
+ sessionsCollectionArray = getSessions(config);
- let cursors = [];
- for (let i = 0; i < 5; i++) {
- let session = db.getMongo().startSession({});
- assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
- "initialize the session");
- cursors.push(session.getDatabase(dbName)[testCollName].find({b: 1}).batchSize(1));
- assert(cursors[i].hasNext());
- }
-
- refreshSessionsAndVerifyCount(config, 5);
-
- let sessionsCollectionArray;
- let lastUseValues = [];
- for (let i = 0; i < 3; i++) {
- for (let j = 0; j < cursors.length; j++) {
- cursors[j].next();
+ if (i == 0) {
+ for (let j = 0; j < sessionsCollectionArray.length; j++) {
+ lastUseValues.push(sessionsCollectionArray[j].lastUse);
}
-
- refreshSessionsAndVerifyCount(config, 5);
-
- sessionsCollectionArray = getSessions(config);
-
- if (i == 0) {
- for (let j = 0; j < sessionsCollectionArray.length; j++) {
- lastUseValues.push(sessionsCollectionArray[j].lastUse);
- }
- } else {
- for (let j = 0; j < sessionsCollectionArray.length; j++) {
- assert.gt(sessionsCollectionArray[j].lastUse, lastUseValues[j]);
- lastUseValues[j] = sessionsCollectionArray[j].lastUse;
- }
+ } else {
+ for (let j = 0; j < sessionsCollectionArray.length; j++) {
+ assert.gt(sessionsCollectionArray[j].lastUse, lastUseValues[j]);
+ lastUseValues[j] = sessionsCollectionArray[j].lastUse;
}
}
-
- // 3. Verify that letting sessions expire (simulated by manual deletion) will kill their
- // cursors.
- assert.commandWorked(config.system.sessions.remove({}));
- refreshSessionsAndVerifyCount(config, 0);
-
- for (let i = 0; i < cursors.length; i++) {
- assert.commandFailedWithCode(
- db.runCommand({getMore: cursors[i]._cursor._cursorid, collection: testCollName}),
- ErrorCodes.CursorNotFound,
- 'expected getMore to fail because the cursor was killed');
- }
-
- // 4. Verify that an expired session (simulated by manual deletion) that has a currently running
- // operation will be vivified during the logical session cache refresh.
- let pinnedCursorSession = db.getMongo().startSession();
- let pinnedCursorDB = pinnedCursorSession.getDatabase(dbName);
-
- withPinnedCursor({
- conn: primary,
- db: pinnedCursorDB,
- assertFunction: (cursorId, coll) => {
- assert.commandWorked(config.system.sessions.remove({}));
- refreshSessionsAndVerifyCount(config, 1);
-
- let db = coll.getDB();
- assert.commandWorked(db.runCommand({killCursors: coll.getName(), cursors: [cursorId]}));
- },
- runGetMoreFunc: () => {
- db.runCommand({getMore: cursorId, collection: collName});
- },
- failPointName: failPointName
+}
+
+// 3. Verify that letting sessions expire (simulated by manual deletion) will kill their
+// cursors.
+assert.commandWorked(config.system.sessions.remove({}));
+refreshSessionsAndVerifyCount(config, 0);
+
+for (let i = 0; i < cursors.length; i++) {
+ assert.commandFailedWithCode(
+ db.runCommand({getMore: cursors[i]._cursor._cursorid, collection: testCollName}),
+ ErrorCodes.CursorNotFound,
+ 'expected getMore to fail because the cursor was killed');
+}
+
+// 4. Verify that an expired session (simulated by manual deletion) that has a currently running
+// operation will be vivified during the logical session cache refresh.
+let pinnedCursorSession = db.getMongo().startSession();
+let pinnedCursorDB = pinnedCursorSession.getDatabase(dbName);
+
+withPinnedCursor({
+ conn: primary,
+ db: pinnedCursorDB,
+ assertFunction: (cursorId, coll) => {
+ assert.commandWorked(config.system.sessions.remove({}));
+ refreshSessionsAndVerifyCount(config, 1);
+
+ let db = coll.getDB();
+ assert.commandWorked(db.runCommand({killCursors: coll.getName(), cursors: [cursorId]}));
+ },
+ runGetMoreFunc: () => {
+ db.runCommand({getMore: cursorId, collection: collName});
},
- /* assertEndCounts */ false);
+ failPointName: failPointName
+},
+ /* assertEndCounts */ false);
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/view_catalog_oplog_entries.js b/jstests/replsets/view_catalog_oplog_entries.js
index d245a84b897..a39a3f521de 100644
--- a/jstests/replsets/view_catalog_oplog_entries.js
+++ b/jstests/replsets/view_catalog_oplog_entries.js
@@ -4,45 +4,45 @@
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "view_catalog_oplog_entries";
- const collName = "test_coll";
- const viewName = "test_view";
+const dbName = "view_catalog_oplog_entries";
+const collName = "test_coll";
+const viewName = "test_view";
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- const primary = replTest.getPrimary();
+const primary = replTest.getPrimary();
- assert.commandWorked(primary.getDB(dbName)[collName].insert({a: 1}));
+assert.commandWorked(primary.getDB(dbName)[collName].insert({a: 1}));
- // Create the view.
- assert.commandWorked(primary.getDB(dbName).createView(viewName, collName, []));
+// Create the view.
+assert.commandWorked(primary.getDB(dbName).createView(viewName, collName, []));
- // Modify the view with the "collMod" command.
- assert.commandWorked(primary.getDB(dbName).runCommand(
- {collMod: viewName, viewOn: collName, pipeline: [{$project: {a: 1}}]}));
+// Modify the view with the "collMod" command.
+assert.commandWorked(primary.getDB(dbName).runCommand(
+ {collMod: viewName, viewOn: collName, pipeline: [{$project: {a: 1}}]}));
- // There should be exactly one insert into "system.views" for the view creation...
- const oplog = primary.getDB("local").oplog.rs;
- const createViewOplogEntry = oplog.find({op: "i", ns: (dbName + ".system.views")}).toArray();
- assert.eq(createViewOplogEntry.length, 1);
- assert(createViewOplogEntry[0].hasOwnProperty("ui"),
- "Oplog entry for view creation missing UUID for view catalog: " +
- tojson(createViewOplogEntry[0]));
- const viewCatalogUUID = createViewOplogEntry[0].ui;
+// There should be exactly one insert into "system.views" for the view creation...
+const oplog = primary.getDB("local").oplog.rs;
+const createViewOplogEntry = oplog.find({op: "i", ns: (dbName + ".system.views")}).toArray();
+assert.eq(createViewOplogEntry.length, 1);
+assert(createViewOplogEntry[0].hasOwnProperty("ui"),
+ "Oplog entry for view creation missing UUID for view catalog: " +
+ tojson(createViewOplogEntry[0]));
+const viewCatalogUUID = createViewOplogEntry[0].ui;
- // ...and exactly one update on "system.views" for the view collMod.
- const modViewOplogEntry = oplog.find({op: "u", ns: (dbName + ".system.views")}).toArray();
- assert.eq(modViewOplogEntry.length, 1);
- assert(modViewOplogEntry[0].hasOwnProperty("ui"),
- "Oplog entry for view modification missing UUID for view catalog: " +
- tojson(modViewOplogEntry[0]));
+// ...and exactly one update on "system.views" for the view collMod.
+const modViewOplogEntry = oplog.find({op: "u", ns: (dbName + ".system.views")}).toArray();
+assert.eq(modViewOplogEntry.length, 1);
+assert(modViewOplogEntry[0].hasOwnProperty("ui"),
+ "Oplog entry for view modification missing UUID for view catalog: " +
+ tojson(modViewOplogEntry[0]));
- // Both entries should have the same UUID.
- assert.eq(viewCatalogUUID, modViewOplogEntry[0].ui);
+// Both entries should have the same UUID.
+assert.eq(viewCatalogUUID, modViewOplogEntry[0].ui);
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/view_definition_initial_sync_with_feature_compatibility_version.js b/jstests/replsets/view_definition_initial_sync_with_feature_compatibility_version.js
index ad5ef00391d..94471b3657c 100644
--- a/jstests/replsets/view_definition_initial_sync_with_feature_compatibility_version.js
+++ b/jstests/replsets/view_definition_initial_sync_with_feature_compatibility_version.js
@@ -13,90 +13,87 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
- const testName = "view_definition_initial_sync_with_feature_compatibility";
-
- function testView(pipeline) {
- //
- // Create a single-node replica set.
- //
- let replTest = new ReplSetTest({name: testName, nodes: 1});
-
- replTest.startSet();
- replTest.initiate();
-
- let primary = replTest.getPrimary();
- let testDB = primary.getDB("test");
-
- //
- // Explicitly set the replica set to feature compatibility version 4.2.
- //
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
-
- //
- // Create a view using 4.2 query features.
- //
- assert.commandWorked(testDB.createView("view1", "coll", pipeline));
-
- //
- // Downgrade the replica set to feature compatibility version 4.0.
- //
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
-
- //
- // Add a new member to the replica set.
- //
- let secondaryDBPath = MongoRunner.dataPath + testName + "_secondary";
- resetDbpath(secondaryDBPath);
- let secondary = replTest.add({dbpath: secondaryDBPath});
- replTest.reInitiate(secondary);
- reconnect(primary);
- reconnect(secondary);
-
- //
- // Once the new member completes its initial sync, stop it, remove it from the replica
- // set, and start it back up as an individual instance.
- //
- replTest.waitForState(secondary, [ReplSetTest.State.PRIMARY, ReplSetTest.State.SECONDARY]);
-
- replTest.stopSet(undefined /* send default signal */,
- true /* don't clear data directory */);
-
- secondary = MongoRunner.runMongod({dbpath: secondaryDBPath, noCleanData: true});
- assert.neq(null, secondary, "mongod was unable to start up");
-
- //
- // Verify that the view synced to the new member.
- //
- let secondaryDB = secondary.getDB("test");
- assert.eq(secondaryDB.system.views.findOne({_id: "test.view1"}, {_id: 1}),
- {_id: "test.view1"});
-
- //
- // Verify that, even though a view using 4.2 query features exists, it is not possible to
- // create a new view using 4.2 query features because of feature compatibility version 4.0.
- //
- assert.commandFailedWithCode(secondaryDB.createView("view2", "coll", pipeline),
- ErrorCodes.QueryFeatureNotAllowed);
-
- MongoRunner.stopMongod(secondary);
+"use strict";
+const testName = "view_definition_initial_sync_with_feature_compatibility";
+
+function testView(pipeline) {
+ //
+ // Create a single-node replica set.
+ //
+ let replTest = new ReplSetTest({name: testName, nodes: 1});
+
+ replTest.startSet();
+ replTest.initiate();
+
+ let primary = replTest.getPrimary();
+ let testDB = primary.getDB("test");
+
+ //
+ // Explicitly set the replica set to feature compatibility version 4.2.
+ //
+ assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
+
+ //
+ // Create a view using 4.2 query features.
+ //
+ assert.commandWorked(testDB.createView("view1", "coll", pipeline));
+
+ //
+ // Downgrade the replica set to feature compatibility version 4.0.
+ //
+ assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: "4.0"}));
+
+ //
+ // Add a new member to the replica set.
+ //
+ let secondaryDBPath = MongoRunner.dataPath + testName + "_secondary";
+ resetDbpath(secondaryDBPath);
+ let secondary = replTest.add({dbpath: secondaryDBPath});
+ replTest.reInitiate(secondary);
+ reconnect(primary);
+ reconnect(secondary);
+
+ //
+ // Once the new member completes its initial sync, stop it, remove it from the replica
+ // set, and start it back up as an individual instance.
+ //
+ replTest.waitForState(secondary, [ReplSetTest.State.PRIMARY, ReplSetTest.State.SECONDARY]);
+
+ replTest.stopSet(undefined /* send default signal */, true /* don't clear data directory */);
+
+ secondary = MongoRunner.runMongod({dbpath: secondaryDBPath, noCleanData: true});
+ assert.neq(null, secondary, "mongod was unable to start up");
+
+ //
+ // Verify that the view synced to the new member.
+ //
+ let secondaryDB = secondary.getDB("test");
+ assert.eq(secondaryDB.system.views.findOne({_id: "test.view1"}, {_id: 1}), {_id: "test.view1"});
+
+ //
+ // Verify that, even though a view using 4.2 query features exists, it is not possible to
+ // create a new view using 4.2 query features because of feature compatibility version 4.0.
+ //
+ assert.commandFailedWithCode(secondaryDB.createView("view2", "coll", pipeline),
+ ErrorCodes.QueryFeatureNotAllowed);
+
+ MongoRunner.stopMongod(secondary);
+}
+
+testView([{$addFields: {x: {$round: 4.57}}}]);
+testView([{$addFields: {x: {$trunc: [4.57, 1]}}}]);
+testView([{$addFields: {x: {$regexFind: {input: "string", regex: /st/}}}}]);
+testView([{$addFields: {x: {$regexFindAll: {input: "string", regex: /st/}}}}]);
+testView([{$addFields: {x: {$regexMatch: {input: "string", regex: /st/}}}}]);
+testView([{$facet: {pipe1: [{$addFields: {x: {$round: 4.57}}}]}}]);
+testView([{
+ $facet: {
+ pipe1: [{$addFields: {x: {$round: 4.57}}}],
+ pipe2: [{$addFields: {newThing: {$regexMatch: {input: "string", regex: /st/}}}}]
}
-
- testView([{$addFields: {x: {$round: 4.57}}}]);
- testView([{$addFields: {x: {$trunc: [4.57, 1]}}}]);
- testView([{$addFields: {x: {$regexFind: {input: "string", regex: /st/}}}}]);
- testView([{$addFields: {x: {$regexFindAll: {input: "string", regex: /st/}}}}]);
- testView([{$addFields: {x: {$regexMatch: {input: "string", regex: /st/}}}}]);
- testView([{$facet: {pipe1: [{$addFields: {x: {$round: 4.57}}}]}}]);
- testView([{
- $facet: {
- pipe1: [{$addFields: {x: {$round: 4.57}}}],
- pipe2: [{$addFields: {newThing: {$regexMatch: {input: "string", regex: /st/}}}}]
- }
- }]);
- testView(
- [{$lookup: {from: 'x', pipeline: [{$addFields: {x: {$round: 4.57}}}], as: 'results'}}]);
- testView([{
+}]);
+testView([{$lookup: {from: 'x', pipeline: [{$addFields: {x: {$round: 4.57}}}], as: 'results'}}]);
+testView([{
$graphLookup: {
from: 'x',
startWith: ["$_id"],
@@ -106,7 +103,7 @@ load("jstests/replsets/rslib.js");
as: 'results'
}
}]);
- testView([{
+testView([{
$lookup: {
from: 'x',
pipeline: [{$facet: {pipe1: [{$addFields: {x: {$round: 4.57}}}]}}],
diff --git a/jstests/replsets/write_concern_after_stepdown.js b/jstests/replsets/write_concern_after_stepdown.js
index 250acb9e93d..b54e62e8965 100644
--- a/jstests/replsets/write_concern_after_stepdown.js
+++ b/jstests/replsets/write_concern_after_stepdown.js
@@ -3,100 +3,100 @@
* primary to incorrectly acknowledge a w:majority write that's about to be rolled back.
*/
(function() {
- 'use strict';
-
- load("jstests/replsets/rslib.js");
- load("jstests/libs/write_concern_util.js");
-
- var name = "writeConcernStepDownAndBackUp";
- var dbName = "wMajorityCheck";
- var collName = "stepdownAndBackUp";
-
- var rst = new ReplSetTest({
- name: name,
- nodes: [
- {},
- {},
- {rsConfig: {priority: 0}},
- ],
- useBridge: true
+'use strict';
+
+load("jstests/replsets/rslib.js");
+load("jstests/libs/write_concern_util.js");
+
+var name = "writeConcernStepDownAndBackUp";
+var dbName = "wMajorityCheck";
+var collName = "stepdownAndBackUp";
+
+var rst = new ReplSetTest({
+ name: name,
+ nodes: [
+ {},
+ {},
+ {rsConfig: {priority: 0}},
+ ],
+ useBridge: true
+});
+var nodes = rst.startSet();
+rst.initiate();
+
+function waitForPrimary(node) {
+ assert.soon(function() {
+ return node.adminCommand('ismaster').ismaster;
});
- var nodes = rst.startSet();
- rst.initiate();
-
- function waitForPrimary(node) {
- assert.soon(function() {
- return node.adminCommand('ismaster').ismaster;
- });
- }
-
- // SERVER-20844 ReplSetTest starts up a single node replica set then reconfigures to the correct
- // size for faster startup, so nodes[0] is always the first primary.
- jsTestLog("Make sure node 0 is primary.");
- var primary = rst.getPrimary();
- var secondaries = rst.getSecondaries();
- assert.eq(nodes[0], primary);
- // Wait for all data bearing nodes to get up to date.
- assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert(
- {a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}}));
-
- // Stop the secondaries from replicating.
- stopServerReplication(secondaries);
- // Stop the primary from being able to complete stepping down.
- assert.commandWorked(
- nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'alwaysOn'}));
-
- jsTestLog("Do w:majority write that will block waiting for replication.");
- var doMajorityWrite = function() {
- // Run ismaster command with 'hangUpOnStepDown' set to false to mark this connection as
- // one that shouldn't be closed when the node steps down. This makes it easier to detect
- // the error returned by the write concern failure.
- assert.commandWorked(db.adminCommand({ismaster: 1, hangUpOnStepDown: false}));
-
- var res = db.getSiblingDB('wMajorityCheck').stepdownAndBackUp.insert({a: 2}, {
- writeConcern: {w: 'majority', wtimeout: 600000}
- });
- assert.writeErrorWithCode(
- res, [ErrorCodes.PrimarySteppedDown, ErrorCodes.InterruptedDueToReplStateChange]);
- };
-
- var joinMajorityWriter = startParallelShell(doMajorityWrite, nodes[0].port);
-
- jsTest.log("Disconnect primary from all secondaries");
- nodes[0].disconnect(nodes[1]);
- nodes[0].disconnect(nodes[2]);
-
- jsTest.log("Wait for a new primary to be elected");
- // Allow the secondaries to replicate again.
- restartServerReplication(secondaries);
-
- waitForPrimary(nodes[1]);
-
- jsTest.log("Do a write to the new primary");
- assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert(
- {a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}}));
-
- jsTest.log("Reconnect the old primary to the rest of the nodes");
- // Only allow the old primary to connect to the other nodes, not the other way around.
- // This is so that the old priamry will detect that it needs to step down and step itself down,
- // rather than one of the other nodes detecting this and sending it a replSetStepDown command,
- // which would cause the old primary to kill all operations and close all connections, making
- // the way that the insert in the parallel shell fails be nondeterministic. Rather than
- // handling all possible failure modes in the parallel shell, allowing heartbeat connectivity in
- // only one direction makes it easier for the test to fail deterministically.
- nodes[1].acceptConnectionsFrom(nodes[0]);
- nodes[2].acceptConnectionsFrom(nodes[0]);
-
- // Allow the old primary to finish stepping down so that shutdown can finish.
- assert.commandWorked(
- nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'off'}));
-
- joinMajorityWriter();
-
- // Node 0 will go into rollback after it steps down. We want to wait for that to happen, and
- // then complete, in order to get a clean shutdown.
- jsTestLog("Waiting for node 0 to roll back the failed write.");
- rst.awaitReplication();
-
- rst.stopSet();
+}
+
+// SERVER-20844 ReplSetTest starts up a single node replica set then reconfigures to the correct
+// size for faster startup, so nodes[0] is always the first primary.
+jsTestLog("Make sure node 0 is primary.");
+var primary = rst.getPrimary();
+var secondaries = rst.getSecondaries();
+assert.eq(nodes[0], primary);
+// Wait for all data bearing nodes to get up to date.
+assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert(
+ {a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}}));
+
+// Stop the secondaries from replicating.
+stopServerReplication(secondaries);
+// Stop the primary from being able to complete stepping down.
+assert.commandWorked(
+ nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'alwaysOn'}));
+
+jsTestLog("Do w:majority write that will block waiting for replication.");
+var doMajorityWrite = function() {
+ // Run ismaster command with 'hangUpOnStepDown' set to false to mark this connection as
+ // one that shouldn't be closed when the node steps down. This makes it easier to detect
+ // the error returned by the write concern failure.
+ assert.commandWorked(db.adminCommand({ismaster: 1, hangUpOnStepDown: false}));
+
+ var res = db.getSiblingDB('wMajorityCheck').stepdownAndBackUp.insert({a: 2}, {
+ writeConcern: {w: 'majority', wtimeout: 600000}
+ });
+ assert.writeErrorWithCode(
+ res, [ErrorCodes.PrimarySteppedDown, ErrorCodes.InterruptedDueToReplStateChange]);
+};
+
+var joinMajorityWriter = startParallelShell(doMajorityWrite, nodes[0].port);
+
+jsTest.log("Disconnect primary from all secondaries");
+nodes[0].disconnect(nodes[1]);
+nodes[0].disconnect(nodes[2]);
+
+jsTest.log("Wait for a new primary to be elected");
+// Allow the secondaries to replicate again.
+restartServerReplication(secondaries);
+
+waitForPrimary(nodes[1]);
+
+jsTest.log("Do a write to the new primary");
+assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert(
+ {a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}}));
+
+jsTest.log("Reconnect the old primary to the rest of the nodes");
+// Only allow the old primary to connect to the other nodes, not the other way around.
+// This is so that the old priamry will detect that it needs to step down and step itself down,
+// rather than one of the other nodes detecting this and sending it a replSetStepDown command,
+// which would cause the old primary to kill all operations and close all connections, making
+// the way that the insert in the parallel shell fails be nondeterministic. Rather than
+// handling all possible failure modes in the parallel shell, allowing heartbeat connectivity in
+// only one direction makes it easier for the test to fail deterministically.
+nodes[1].acceptConnectionsFrom(nodes[0]);
+nodes[2].acceptConnectionsFrom(nodes[0]);
+
+// Allow the old primary to finish stepping down so that shutdown can finish.
+assert.commandWorked(
+ nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'off'}));
+
+joinMajorityWriter();
+
+// Node 0 will go into rollback after it steps down. We want to wait for that to happen, and
+// then complete, in order to get a clean shutdown.
+jsTestLog("Waiting for node 0 to roll back the failed write.");
+rst.awaitReplication();
+
+rst.stopSet();
}());
diff --git a/jstests/replsets/write_concern_after_stepdown_and_stepup.js b/jstests/replsets/write_concern_after_stepdown_and_stepup.js
index 19143230375..daa143aa901 100644
--- a/jstests/replsets/write_concern_after_stepdown_and_stepup.js
+++ b/jstests/replsets/write_concern_after_stepdown_and_stepup.js
@@ -4,118 +4,118 @@
* stale primary is re-elected primary before waiting for the write concern acknowledgement.
*/
(function() {
- 'use strict';
-
- load("jstests/replsets/rslib.js");
- load("jstests/libs/write_concern_util.js");
-
- var name = "writeConcernStepDownAndBackUp";
- var dbName = "wMajorityCheck";
- var collName = "stepdownAndBackUp";
-
- var rst = new ReplSetTest({
- name: name,
- nodes: [
- {},
- {},
- {rsConfig: {priority: 0}},
- ],
- useBridge: true
+'use strict';
+
+load("jstests/replsets/rslib.js");
+load("jstests/libs/write_concern_util.js");
+
+var name = "writeConcernStepDownAndBackUp";
+var dbName = "wMajorityCheck";
+var collName = "stepdownAndBackUp";
+
+var rst = new ReplSetTest({
+ name: name,
+ nodes: [
+ {},
+ {},
+ {rsConfig: {priority: 0}},
+ ],
+ useBridge: true
+});
+var nodes = rst.startSet();
+rst.initiate();
+
+function waitForPrimary(node) {
+ assert.soon(function() {
+ return node.adminCommand('ismaster').ismaster;
});
- var nodes = rst.startSet();
- rst.initiate();
+}
- function waitForPrimary(node) {
- assert.soon(function() {
- return node.adminCommand('ismaster').ismaster;
- });
- }
-
- function stepUp(node) {
- var primary = rst.getPrimary();
- if (primary != node) {
- assert.commandWorked(primary.adminCommand({replSetStepDown: 60 * 5}));
- }
- waitForPrimary(node);
- }
-
- jsTestLog("Make sure node 0 is primary.");
- stepUp(nodes[0]);
+function stepUp(node) {
var primary = rst.getPrimary();
- var secondaries = rst.getSecondaries();
- assert.eq(nodes[0], primary);
- // Wait for all data bearing nodes to get up to date.
- assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert(
- {a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}}));
-
- // Stop the secondaries from replicating.
- stopServerReplication(secondaries);
- // Stop the primary from calling into awaitReplication()
- assert.commandWorked(nodes[0].adminCommand(
- {configureFailPoint: 'hangBeforeWaitingForWriteConcern', mode: 'alwaysOn'}));
-
- jsTestLog("Do w:majority write that won't enter awaitReplication() until after the primary " +
- "has stepped down and back up");
- var doMajorityWrite = function() {
- // Run ismaster command with 'hangUpOnStepDown' set to false to mark this connection as
- // one that shouldn't be closed when the node steps down. This simulates the scenario where
- // the write was coming from a mongos.
- assert.commandWorked(db.adminCommand({ismaster: 1, hangUpOnStepDown: false}));
-
- var res = db.getSiblingDB('wMajorityCheck').stepdownAndBackUp.insert({a: 2}, {
- writeConcern: {w: 'majority'}
- });
- assert.writeErrorWithCode(res, ErrorCodes.InterruptedDueToReplStateChange);
- };
-
- var joinMajorityWriter = startParallelShell(doMajorityWrite, nodes[0].port);
-
- jsTest.log("Disconnect primary from all secondaries");
- nodes[0].disconnect(nodes[1]);
- nodes[0].disconnect(nodes[2]);
-
- jsTest.log("Wait for a new primary to be elected");
- // Allow the secondaries to replicate again.
- restartServerReplication(secondaries);
-
- waitForPrimary(nodes[1]);
-
- jsTest.log("Do a write to the new primary");
- assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert(
- {a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}}));
-
- jsTest.log("Reconnect the old primary to the rest of the nodes");
- nodes[0].reconnect(nodes[1]);
- nodes[0].reconnect(nodes[2]);
-
- jsTest.log("Wait for the old primary to step down, roll back its write, and apply the " +
- "new writes from the new primary");
- waitForState(nodes[0], ReplSetTest.State.SECONDARY);
- rst.awaitReplication();
-
- // At this point all 3 nodes should have the same data
- assert.soonNoExcept(function() {
- nodes.forEach(function(node) {
- assert.eq(null,
- node.getDB(dbName).getCollection(collName).findOne({a: 2}),
- "Node " + node.host + " contained op that should have been rolled back");
- assert.neq(null,
- node.getDB(dbName).getCollection(collName).findOne({a: 3}),
- "Node " + node.host +
- " was missing op from branch of history that should have persisted");
- });
- return true;
+ if (primary != node) {
+ assert.commandWorked(primary.adminCommand({replSetStepDown: 60 * 5}));
+ }
+ waitForPrimary(node);
+}
+
+jsTestLog("Make sure node 0 is primary.");
+stepUp(nodes[0]);
+var primary = rst.getPrimary();
+var secondaries = rst.getSecondaries();
+assert.eq(nodes[0], primary);
+// Wait for all data bearing nodes to get up to date.
+assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert(
+ {a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}}));
+
+// Stop the secondaries from replicating.
+stopServerReplication(secondaries);
+// Stop the primary from calling into awaitReplication()
+assert.commandWorked(nodes[0].adminCommand(
+ {configureFailPoint: 'hangBeforeWaitingForWriteConcern', mode: 'alwaysOn'}));
+
+jsTestLog("Do w:majority write that won't enter awaitReplication() until after the primary " +
+ "has stepped down and back up");
+var doMajorityWrite = function() {
+ // Run ismaster command with 'hangUpOnStepDown' set to false to mark this connection as
+ // one that shouldn't be closed when the node steps down. This simulates the scenario where
+ // the write was coming from a mongos.
+ assert.commandWorked(db.adminCommand({ismaster: 1, hangUpOnStepDown: false}));
+
+ var res = db.getSiblingDB('wMajorityCheck').stepdownAndBackUp.insert({a: 2}, {
+ writeConcern: {w: 'majority'}
+ });
+ assert.writeErrorWithCode(res, ErrorCodes.InterruptedDueToReplStateChange);
+};
+
+var joinMajorityWriter = startParallelShell(doMajorityWrite, nodes[0].port);
+
+jsTest.log("Disconnect primary from all secondaries");
+nodes[0].disconnect(nodes[1]);
+nodes[0].disconnect(nodes[2]);
+
+jsTest.log("Wait for a new primary to be elected");
+// Allow the secondaries to replicate again.
+restartServerReplication(secondaries);
+
+waitForPrimary(nodes[1]);
+
+jsTest.log("Do a write to the new primary");
+assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert(
+ {a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}}));
+
+jsTest.log("Reconnect the old primary to the rest of the nodes");
+nodes[0].reconnect(nodes[1]);
+nodes[0].reconnect(nodes[2]);
+
+jsTest.log("Wait for the old primary to step down, roll back its write, and apply the " +
+ "new writes from the new primary");
+waitForState(nodes[0], ReplSetTest.State.SECONDARY);
+rst.awaitReplication();
+
+// At this point all 3 nodes should have the same data
+assert.soonNoExcept(function() {
+ nodes.forEach(function(node) {
+ assert.eq(null,
+ node.getDB(dbName).getCollection(collName).findOne({a: 2}),
+ "Node " + node.host + " contained op that should have been rolled back");
+ assert.neq(null,
+ node.getDB(dbName).getCollection(collName).findOne({a: 3}),
+ "Node " + node.host +
+ " was missing op from branch of history that should have persisted");
});
+ return true;
+});
- jsTest.log("Make the original primary become primary once again");
- stepUp(nodes[0]);
+jsTest.log("Make the original primary become primary once again");
+stepUp(nodes[0]);
- jsTest.log("Unblock the thread waiting for replication of the now rolled-back write, ensure " +
- "that the write concern failed");
- assert.commandWorked(nodes[0].adminCommand(
- {configureFailPoint: 'hangBeforeWaitingForWriteConcern', mode: 'off'}));
+jsTest.log("Unblock the thread waiting for replication of the now rolled-back write, ensure " +
+ "that the write concern failed");
+assert.commandWorked(
+ nodes[0].adminCommand({configureFailPoint: 'hangBeforeWaitingForWriteConcern', mode: 'off'}));
- joinMajorityWriter();
+joinMajorityWriter();
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/serial_run/index_multi.js b/jstests/serial_run/index_multi.js
index a09b20fee96..41b6b217c2d 100644
--- a/jstests/serial_run/index_multi.js
+++ b/jstests/serial_run/index_multi.js
@@ -32,9 +32,14 @@ assert.writeOK(bulk.execute());
var specs = [];
var multikey = [];
-var setupDBStr = "var conn = null;" + "assert.soon(function() {" + " try {" +
- " conn = new Mongo(\"" + db.getMongo().host + "\");" + " return conn;" +
- " } catch (x) {" + " return false;" + " }" +
+var setupDBStr = "var conn = null;" +
+ "assert.soon(function() {" +
+ " try {" +
+ " conn = new Mongo(\"" + db.getMongo().host + "\");" +
+ " return conn;" +
+ " } catch (x) {" +
+ " return false;" +
+ " }" +
"}, 'Timed out waiting for temporary connection to connect', 30000, 5000);" +
"var db = conn.getDB('" + db.getName() + "');";
@@ -47,7 +52,8 @@ for (var i = 90; i < 93; i++) {
spec["field" + (i + 2)] = 1;
indexJobs.push(startParallelShell(
setupDBStr + "printjson(db.index_multi.createIndex(" + tojson(spec) + "," +
- "{ background: true }));" + "db.results.insert(Object.extend(" +
+ "{ background: true }));" +
+ "db.results.insert(Object.extend(" +
"db.runCommand({ getlasterror: 1 }), " + tojson(spec) + ") );",
null, // port
true)); // noconnect
@@ -62,7 +68,8 @@ for (var i = 30; i < 90; i += 2) {
spec["field" + (i + 1)] = 1;
indexJobs.push(startParallelShell(
setupDBStr + "printjson(db.index_multi.createIndex(" + tojson(spec) + ", " +
- "{ background: true }));" + "db.results.insert(Object.extend(" +
+ "{ background: true }));" +
+ "db.results.insert(Object.extend(" +
"db.runCommand({ getlasterror: 1 }), " + tojson(spec) + ") );",
null, // port
true)); // noconnect
@@ -76,7 +83,8 @@ for (var i = 0; i < 30; i++) {
spec["field" + i] = 1;
indexJobs.push(startParallelShell(
setupDBStr + "printjson(db.index_multi.createIndex(" + tojson(spec) + ", " +
- "{ background: true }));" + "db.results.insert(Object.extend(" +
+ "{ background: true }));" +
+ "db.results.insert(Object.extend(" +
"db.runCommand({ getlasterror: 1 }), " + tojson(spec) + ") );",
null, // port
true)); // noconnect
diff --git a/jstests/serial_run/srv-uri.js b/jstests/serial_run/srv-uri.js
index a3bb305aeee..ee0bc743e85 100644
--- a/jstests/serial_run/srv-uri.js
+++ b/jstests/serial_run/srv-uri.js
@@ -1,9 +1,9 @@
(function() {
- "use strict";
- const md = MongoRunner.runMongod({port: "27017", dbpath: MongoRunner.dataPath});
- assert.neq(null, md, "unable to start mongod");
- const targetURI = 'mongodb+srv://test1.test.build.10gen.cc./?ssl=false';
- const exitCode = runMongoProgram('mongo', targetURI, '--eval', ';');
- assert.eq(exitCode, 0, "Failed to connect with a `mongodb+srv://` style URI.");
- MongoRunner.stopMongod(md);
+"use strict";
+const md = MongoRunner.runMongod({port: "27017", dbpath: MongoRunner.dataPath});
+assert.neq(null, md, "unable to start mongod");
+const targetURI = 'mongodb+srv://test1.test.build.10gen.cc./?ssl=false';
+const exitCode = runMongoProgram('mongo', targetURI, '--eval', ';');
+assert.eq(exitCode, 0, "Failed to connect with a `mongodb+srv://` style URI.");
+MongoRunner.stopMongod(md);
})();
diff --git a/jstests/sharding/accurate_count_with_predicate.js b/jstests/sharding/accurate_count_with_predicate.js
index 1cb2cdf0d8a..bb440e88873 100644
--- a/jstests/sharding/accurate_count_with_predicate.js
+++ b/jstests/sharding/accurate_count_with_predicate.js
@@ -7,37 +7,37 @@
* twice.
*/
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 2});
- const shard0Coll = st.shard0.getCollection("test.slowcount");
- const num = 10;
- const middle = num / 2;
+const st = new ShardingTest({shards: 2});
+const shard0Coll = st.shard0.getCollection("test.slowcount");
+const num = 10;
+const middle = num / 2;
- function getNthDocument(n) {
- return {_id: n, one: 1, x: n};
- }
+function getNthDocument(n) {
+ return {_id: n, one: 1, x: n};
+}
- // Shard the collection. Shard 0 will get keys from [0, middle) and shard 1 will get everything
- // from [middle, num).
- assert.commandWorked(st.s.getDB("admin").runCommand({enableSharding: "test"}));
- st.ensurePrimaryShard("test", st.shard0.name);
- st.shardColl(shard0Coll.getName(), {x: 1}, {x: middle}, {x: middle + 1}, "test", true);
+// Shard the collection. Shard 0 will get keys from [0, middle) and shard 1 will get everything
+// from [middle, num).
+assert.commandWorked(st.s.getDB("admin").runCommand({enableSharding: "test"}));
+st.ensurePrimaryShard("test", st.shard0.name);
+st.shardColl(shard0Coll.getName(), {x: 1}, {x: middle}, {x: middle + 1}, "test", true);
- // Insert some docs.
- for (let i = 0; i < num; i++) {
- assert.writeOK(st.getDB("test").slowcount.insert(getNthDocument(i)));
- }
+// Insert some docs.
+for (let i = 0; i < num; i++) {
+ assert.writeOK(st.getDB("test").slowcount.insert(getNthDocument(i)));
+}
- // Insert some orphan documents to shard 0. These are just documents outside the range
- // which shard 0 owns.
- for (let i = middle + 1; i < middle + 3; i++) {
- assert.writeOK(shard0Coll.insert(getNthDocument(i)));
- }
+// Insert some orphan documents to shard 0. These are just documents outside the range
+// which shard 0 owns.
+for (let i = middle + 1; i < middle + 3; i++) {
+ assert.writeOK(shard0Coll.insert(getNthDocument(i)));
+}
- // Run a count on the whole collection. The orphaned documents on shard 0 shouldn't be double
- // counted.
- assert.eq(st.getDB("test").slowcount.count({one: 1}), num);
+// Run a count on the whole collection. The orphaned documents on shard 0 shouldn't be double
+// counted.
+assert.eq(st.getDB("test").slowcount.count({one: 1}), num);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/add_and_remove_shard_from_zone.js b/jstests/sharding/add_and_remove_shard_from_zone.js
index d4773597259..d6d78b3c2cb 100644
--- a/jstests/sharding/add_and_remove_shard_from_zone.js
+++ b/jstests/sharding/add_and_remove_shard_from_zone.js
@@ -3,38 +3,38 @@
* in sharding_catalog_add_shard_to_zone_test.cpp.
*/
(function() {
- 'use strict';
+'use strict';
- let st = new ShardingTest({shards: 1});
- let mongos = st.s0;
+let st = new ShardingTest({shards: 1});
+let mongos = st.s0;
- let config = mongos.getDB('config');
- var shardName = st.shard0.shardName;
+let config = mongos.getDB('config');
+var shardName = st.shard0.shardName;
- // Test adding shard with no zone to a new zone.
- assert.commandWorked(mongos.adminCommand({addShardToZone: shardName, zone: 'x'}));
- var shardDoc = config.shards.findOne();
- assert.eq(['x'], shardDoc.tags);
+// Test adding shard with no zone to a new zone.
+assert.commandWorked(mongos.adminCommand({addShardToZone: shardName, zone: 'x'}));
+var shardDoc = config.shards.findOne();
+assert.eq(['x'], shardDoc.tags);
- // Test adding zone to a shard with existing zones.
- assert.commandWorked(mongos.adminCommand({addShardToZone: shardName, zone: 'y'}));
- shardDoc = config.shards.findOne();
- assert.eq(['x', 'y'], shardDoc.tags);
+// Test adding zone to a shard with existing zones.
+assert.commandWorked(mongos.adminCommand({addShardToZone: shardName, zone: 'y'}));
+shardDoc = config.shards.findOne();
+assert.eq(['x', 'y'], shardDoc.tags);
- // Test removing shard from existing zone.
- assert.commandWorked(mongos.adminCommand({removeShardFromZone: shardName, zone: 'x'}));
- shardDoc = config.shards.findOne();
- assert.eq(['y'], shardDoc.tags);
+// Test removing shard from existing zone.
+assert.commandWorked(mongos.adminCommand({removeShardFromZone: shardName, zone: 'x'}));
+shardDoc = config.shards.findOne();
+assert.eq(['y'], shardDoc.tags);
- // Test removing shard from zone that no longer exists.
- assert.commandWorked(mongos.adminCommand({removeShardFromZone: shardName, zone: 'x'}));
- shardDoc = config.shards.findOne();
- assert.eq(['y'], shardDoc.tags);
+// Test removing shard from zone that no longer exists.
+assert.commandWorked(mongos.adminCommand({removeShardFromZone: shardName, zone: 'x'}));
+shardDoc = config.shards.findOne();
+assert.eq(['y'], shardDoc.tags);
- // Test removing the last zone from a shard
- assert.commandWorked(mongos.adminCommand({removeShardFromZone: shardName, zone: 'y'}));
- shardDoc = config.shards.findOne();
- assert.eq([], shardDoc.tags);
+// Test removing the last zone from a shard
+assert.commandWorked(mongos.adminCommand({removeShardFromZone: shardName, zone: 'y'}));
+shardDoc = config.shards.findOne();
+assert.eq([], shardDoc.tags);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
index 4fcfde18f83..b676cb474e7 100644
--- a/jstests/sharding/addshard1.js
+++ b/jstests/sharding/addshard1.js
@@ -1,80 +1,77 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({name: "add_shard1", shards: 1, useHostname: false});
+var s = new ShardingTest({name: "add_shard1", shards: 1, useHostname: false});
- // Create a shard and add a database; if the database is not duplicated the mongod should accept
- // it as shard
- var conn1 = MongoRunner.runMongod({'shardsvr': ""});
- var db1 = conn1.getDB("testDB");
+// Create a shard and add a database; if the database is not duplicated the mongod should accept
+// it as shard
+var conn1 = MongoRunner.runMongod({'shardsvr': ""});
+var db1 = conn1.getDB("testDB");
- var numObjs = 3;
- for (var i = 0; i < numObjs; i++) {
- assert.writeOK(db1.foo.save({a: i}));
- }
+var numObjs = 3;
+for (var i = 0; i < numObjs; i++) {
+ assert.writeOK(db1.foo.save({a: i}));
+}
- var configDB = s.s.getDB('config');
- assert.eq(null, configDB.databases.findOne({_id: 'testDB'}));
+var configDB = s.s.getDB('config');
+assert.eq(null, configDB.databases.findOne({_id: 'testDB'}));
- var newShard = "myShard";
- assert.commandWorked(
- s.admin.runCommand({addshard: "localhost:" + conn1.port, name: newShard, maxSize: 1024}));
+var newShard = "myShard";
+assert.commandWorked(
+ s.admin.runCommand({addshard: "localhost:" + conn1.port, name: newShard, maxSize: 1024}));
- assert.neq(null, configDB.databases.findOne({_id: 'testDB'}));
+assert.neq(null, configDB.databases.findOne({_id: 'testDB'}));
- var newShardDoc = configDB.shards.findOne({_id: newShard});
- assert.eq(1024, newShardDoc.maxSize);
+var newShardDoc = configDB.shards.findOne({_id: newShard});
+assert.eq(1024, newShardDoc.maxSize);
- // a mongod with an existing database name should not be allowed to become a shard
- var conn2 = MongoRunner.runMongod({'shardsvr': ""});
+// a mongod with an existing database name should not be allowed to become a shard
+var conn2 = MongoRunner.runMongod({'shardsvr': ""});
- var db2 = conn2.getDB("otherDB");
- assert.writeOK(db2.foo.save({a: 1}));
+var db2 = conn2.getDB("otherDB");
+assert.writeOK(db2.foo.save({a: 1}));
- var db3 = conn2.getDB("testDB");
- assert.writeOK(db3.foo.save({a: 1}));
+var db3 = conn2.getDB("testDB");
+assert.writeOK(db3.foo.save({a: 1}));
- s.config.databases.find().forEach(printjson);
+s.config.databases.find().forEach(printjson);
- var rejectedShard = "rejectedShard";
- assert(!s.admin.runCommand({addshard: "localhost:" + conn2.port, name: rejectedShard}).ok,
- "accepted mongod with duplicate db");
+var rejectedShard = "rejectedShard";
+assert(!s.admin.runCommand({addshard: "localhost:" + conn2.port, name: rejectedShard}).ok,
+ "accepted mongod with duplicate db");
- // Check that all collection that were local to the mongod's are accessible through the mongos
- var sdb1 = s.getDB("testDB");
- assert.eq(numObjs, sdb1.foo.count(), "wrong count for database that existed before addshard");
+// Check that all collection that were local to the mongod's are accessible through the mongos
+var sdb1 = s.getDB("testDB");
+assert.eq(numObjs, sdb1.foo.count(), "wrong count for database that existed before addshard");
- var sdb2 = s.getDB("otherDB");
- assert.eq(0, sdb2.foo.count(), "database of rejected shard appears through mongos");
+var sdb2 = s.getDB("otherDB");
+assert.eq(0, sdb2.foo.count(), "database of rejected shard appears through mongos");
- // make sure we can move a DB from the original mongod to a previoulsy existing shard
- assert.eq(s.normalize(s.config.databases.findOne({_id: "testDB"}).primary),
- newShard,
- "DB primary is wrong");
+// make sure we can move a DB from the original mongod to a previoulsy existing shard
+assert.eq(s.normalize(s.config.databases.findOne({_id: "testDB"}).primary),
+ newShard,
+ "DB primary is wrong");
- var origShard = s.getNonPrimaries("testDB")[0];
- s.ensurePrimaryShard("testDB", origShard);
- assert.eq(s.normalize(s.config.databases.findOne({_id: "testDB"}).primary),
- origShard,
- "DB primary didn't move");
- assert.eq(
- numObjs, sdb1.foo.count(), "wrong count after moving datbase that existed before addshard");
+var origShard = s.getNonPrimaries("testDB")[0];
+s.ensurePrimaryShard("testDB", origShard);
+assert.eq(s.normalize(s.config.databases.findOne({_id: "testDB"}).primary),
+ origShard,
+ "DB primary didn't move");
+assert.eq(
+ numObjs, sdb1.foo.count(), "wrong count after moving datbase that existed before addshard");
- // make sure we can shard the original collections
- sdb1.foo.ensureIndex({a: 1},
- {unique: true}); // can't shard populated collection without an index
- s.adminCommand({enablesharding: "testDB"});
- s.adminCommand({shardcollection: "testDB.foo", key: {a: 1}});
- s.adminCommand({split: "testDB.foo", middle: {a: Math.floor(numObjs / 2)}});
- assert.eq(2,
- s.config.chunks.count({"ns": "testDB.foo"}),
- "wrong chunk number after splitting collection that existed before");
- assert.eq(
- numObjs, sdb1.foo.count(), "wrong count after splitting collection that existed before");
+// make sure we can shard the original collections
+sdb1.foo.ensureIndex({a: 1}, {unique: true}); // can't shard populated collection without an index
+s.adminCommand({enablesharding: "testDB"});
+s.adminCommand({shardcollection: "testDB.foo", key: {a: 1}});
+s.adminCommand({split: "testDB.foo", middle: {a: Math.floor(numObjs / 2)}});
+assert.eq(2,
+ s.config.chunks.count({"ns": "testDB.foo"}),
+ "wrong chunk number after splitting collection that existed before");
+assert.eq(numObjs, sdb1.foo.count(), "wrong count after splitting collection that existed before");
- MongoRunner.stopMongod(conn1);
- MongoRunner.stopMongod(conn2);
-
- s.stop();
+MongoRunner.stopMongod(conn1);
+MongoRunner.stopMongod(conn2);
+s.stop();
})();
diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js
index cb61d4b4245..7fb1ab2efe1 100644
--- a/jstests/sharding/addshard2.js
+++ b/jstests/sharding/addshard2.js
@@ -4,196 +4,195 @@
*/
(function() {
- var addShardRes;
+var addShardRes;
- var assertAddShardSucceeded = function(res, shardName) {
- assert.commandWorked(res);
+var assertAddShardSucceeded = function(res, shardName) {
+ assert.commandWorked(res);
- // If a shard name was specified, make sure that the name the addShard command reports the
- // shard was added with matches the specified name.
- if (shardName) {
- assert.eq(shardName,
- res.shardAdded,
- "name returned by addShard does not match name specified in addShard");
- }
-
- // Make sure the shard shows up in config.shards with the shardName reported by the
- // addShard command.
- assert.neq(null,
- st.s.getDB('config').shards.findOne({_id: res.shardAdded}),
- "newly added shard " + res.shardAdded + " not found in config.shards");
- };
-
- // Note: this method expects that the failure is *not* that the specified shardName is already
- // the shardName of an existing shard.
- var assertAddShardFailed = function(res, shardName) {
- assert.commandFailed(res);
-
- // If a shard name was specified in the addShard, make sure no shard with its name shows up
- // in config.shards.
- if (shardName) {
- assert.eq(null,
- st.s.getDB('config').shards.findOne({_id: shardName}),
- "addShard for " + shardName +
- " reported failure, but shard shows up in config.shards");
- }
- };
-
- var removeShardWithName = function(shardName) {
- var res = st.s.adminCommand({removeShard: shardName});
- assert.commandWorked(res);
- assert.eq('started', res.state);
- assert.soon(function() {
- res = st.s.adminCommand({removeShard: shardName});
- assert.commandWorked(res);
- return ('completed' === res.state);
- }, "removeShard never completed for shard " + shardName);
- };
-
- var st = new ShardingTest({
- shards: 0,
- mongos: 1,
- });
-
- // Add one shard since the last shard cannot be removed.
- var normalShard = MongoRunner.runMongod({shardsvr: ''});
- st.s.adminCommand({addShard: normalShard.name, name: 'normalShard'});
-
- // Allocate a port that can be used to test adding invalid hosts.
- var portWithoutHostRunning = allocatePort();
-
- // 1. Test adding a *standalone*
-
- // 1.a. with or without specifying the shardName.
-
- jsTest.log("Adding a standalone *without* a specified shardName should succeed.");
- let standalone1 = MongoRunner.runMongod({shardsvr: ''});
- addShardRes = st.s.adminCommand({addshard: standalone1.name});
- assertAddShardSucceeded(addShardRes);
- removeShardWithName(addShardRes.shardAdded);
- MongoRunner.stopMongod(standalone1);
-
- jsTest.log("Adding a standalone *with* a specified shardName should succeed.");
- let standalone2 = MongoRunner.runMongod({shardsvr: ''});
- addShardRes = st.s.adminCommand({addshard: standalone2.name, name: "shardName"});
- assertAddShardSucceeded(addShardRes, "shardName");
- removeShardWithName(addShardRes.shardAdded);
- MongoRunner.stopMongod(standalone2);
-
- // 1.b. with an invalid hostname.
-
- jsTest.log("Adding a standalone with a non-existing host should fail.");
- addShardRes = st.s.adminCommand({addShard: getHostName() + ":" + portWithoutHostRunning});
- assertAddShardFailed(addShardRes);
-
- // 2. Test adding a *replica set* with an ordinary set name
-
- // 2.a. with or without specifying the shardName.
-
- jsTest.log("Adding a replica set without a specified shardName should succeed.");
- let rst1 = new ReplSetTest({nodes: 1});
- rst1.startSet({shardsvr: ''});
- rst1.initiate();
- addShardRes = st.s.adminCommand({addShard: rst1.getURL()});
- assertAddShardSucceeded(addShardRes);
- assert.eq(rst1.name, addShardRes.shardAdded);
- removeShardWithName(addShardRes.shardAdded);
- rst1.stopSet();
-
- jsTest.log(
- "Adding a replica set with a specified shardName that matches the set's name should succeed.");
- let rst2 = new ReplSetTest({nodes: 1});
- rst2.startSet({shardsvr: ''});
- rst2.initiate();
- addShardRes = st.s.adminCommand({addShard: rst2.getURL(), name: rst2.name});
- assertAddShardSucceeded(addShardRes, rst2.name);
- removeShardWithName(addShardRes.shardAdded);
- rst2.stopSet();
-
- let rst3 = new ReplSetTest({nodes: 1});
- rst3.startSet({shardsvr: ''});
- rst3.initiate();
-
- jsTest.log(
- "Adding a replica set with a specified shardName that differs from the set's name should succeed.");
- addShardRes = st.s.adminCommand({addShard: rst3.getURL(), name: "differentShardName"});
- assertAddShardSucceeded(addShardRes, "differentShardName");
- removeShardWithName(addShardRes.shardAdded);
-
- jsTest.log("Adding a replica with a specified shardName of 'config' should fail.");
- addShardRes = st.s.adminCommand({addShard: rst3.getURL(), name: "config"});
- assertAddShardFailed(addShardRes, "config");
-
- // 2.b. with invalid hostnames.
-
- jsTest.log("Adding a replica set with only non-existing hosts should fail.");
- addShardRes =
- st.s.adminCommand({addShard: rst3.name + "/NonExistingHost:" + portWithoutHostRunning});
- assertAddShardFailed(addShardRes);
-
- jsTest.log("Adding a replica set with mixed existing/non-existing hosts should fail.");
- addShardRes = st.s.adminCommand({
- addShard:
- rst3.name + "/" + rst3.getPrimary().name + ",NonExistingHost:" + portWithoutHostRunning
- });
- assertAddShardFailed(addShardRes);
-
- rst3.stopSet();
-
- // 3. Test adding a replica set whose *set name* is "config" with or without specifying the
- // shardName.
-
- let rst4 = new ReplSetTest({name: "config", nodes: 1});
- rst4.startSet({shardsvr: ''});
- rst4.initiate();
-
- jsTest.log(
- "Adding a replica set whose setName is config without specifying shardName should fail.");
- addShardRes = st.s.adminCommand({addShard: rst4.getURL()});
- assertAddShardFailed(addShardRes);
-
- jsTest.log(
- "Adding a replica set whose setName is config with specified shardName 'config' should fail.");
- addShardRes = st.s.adminCommand({addShard: rst4.getURL(), name: rst4.name});
- assertAddShardFailed(addShardRes, rst4.name);
-
- jsTest.log(
- "Adding a replica set whose setName is config with a non-'config' shardName should succeed");
- addShardRes = st.s.adminCommand({addShard: rst4.getURL(), name: "nonConfig"});
- assertAddShardSucceeded(addShardRes, "nonConfig");
- removeShardWithName(addShardRes.shardAdded);
-
- rst4.stopSet();
-
- // 4. Test that a replica set whose *set name* is "admin" can be written to (SERVER-17232).
-
- let rst5 = new ReplSetTest({name: "admin", nodes: 1});
- rst5.startSet({shardsvr: ''});
- rst5.initiate();
-
- jsTest.log("A replica set whose set name is 'admin' should be able to be written to.");
-
- addShardRes = st.s.adminCommand({addShard: rst5.getURL()});
- assertAddShardSucceeded(addShardRes);
-
- // Ensure the write goes to the newly added shard.
- assert.commandWorked(st.s.getDB('test').runCommand({create: "foo"}));
- var res = st.s.getDB('config').getCollection('databases').findOne({_id: 'test'});
- assert.neq(null, res);
- if (res.primary != addShardRes.shardAdded) {
- assert.commandWorked(st.s.adminCommand({movePrimary: 'test', to: addShardRes.shardAdded}));
+ // If a shard name was specified, make sure that the name the addShard command reports the
+ // shard was added with matches the specified name.
+ if (shardName) {
+ assert.eq(shardName,
+ res.shardAdded,
+ "name returned by addShard does not match name specified in addShard");
}
- assert.writeOK(st.s.getDB('test').foo.insert({x: 1}));
- assert.neq(null, rst5.getPrimary().getDB('test').foo.findOne());
-
- assert.commandWorked(st.s.getDB('test').runCommand({dropDatabase: 1}));
-
- removeShardWithName(addShardRes.shardAdded);
-
- rst5.stopSet();
+ // Make sure the shard shows up in config.shards with the shardName reported by the
+ // addShard command.
+ assert.neq(null,
+ st.s.getDB('config').shards.findOne({_id: res.shardAdded}),
+ "newly added shard " + res.shardAdded + " not found in config.shards");
+};
+
+// Note: this method expects that the failure is *not* that the specified shardName is already
+// the shardName of an existing shard.
+var assertAddShardFailed = function(res, shardName) {
+ assert.commandFailed(res);
+
+ // If a shard name was specified in the addShard, make sure no shard with its name shows up
+ // in config.shards.
+ if (shardName) {
+ assert.eq(
+ null,
+ st.s.getDB('config').shards.findOne({_id: shardName}),
+ "addShard for " + shardName + " reported failure, but shard shows up in config.shards");
+ }
+};
+
+var removeShardWithName = function(shardName) {
+ var res = st.s.adminCommand({removeShard: shardName});
+ assert.commandWorked(res);
+ assert.eq('started', res.state);
+ assert.soon(function() {
+ res = st.s.adminCommand({removeShard: shardName});
+ assert.commandWorked(res);
+ return ('completed' === res.state);
+ }, "removeShard never completed for shard " + shardName);
+};
+
+var st = new ShardingTest({
+ shards: 0,
+ mongos: 1,
+});
+
+// Add one shard since the last shard cannot be removed.
+var normalShard = MongoRunner.runMongod({shardsvr: ''});
+st.s.adminCommand({addShard: normalShard.name, name: 'normalShard'});
+
+// Allocate a port that can be used to test adding invalid hosts.
+var portWithoutHostRunning = allocatePort();
+
+// 1. Test adding a *standalone*
+
+// 1.a. with or without specifying the shardName.
+
+jsTest.log("Adding a standalone *without* a specified shardName should succeed.");
+let standalone1 = MongoRunner.runMongod({shardsvr: ''});
+addShardRes = st.s.adminCommand({addshard: standalone1.name});
+assertAddShardSucceeded(addShardRes);
+removeShardWithName(addShardRes.shardAdded);
+MongoRunner.stopMongod(standalone1);
+
+jsTest.log("Adding a standalone *with* a specified shardName should succeed.");
+let standalone2 = MongoRunner.runMongod({shardsvr: ''});
+addShardRes = st.s.adminCommand({addshard: standalone2.name, name: "shardName"});
+assertAddShardSucceeded(addShardRes, "shardName");
+removeShardWithName(addShardRes.shardAdded);
+MongoRunner.stopMongod(standalone2);
+
+// 1.b. with an invalid hostname.
+
+jsTest.log("Adding a standalone with a non-existing host should fail.");
+addShardRes = st.s.adminCommand({addShard: getHostName() + ":" + portWithoutHostRunning});
+assertAddShardFailed(addShardRes);
+
+// 2. Test adding a *replica set* with an ordinary set name
+
+// 2.a. with or without specifying the shardName.
+
+jsTest.log("Adding a replica set without a specified shardName should succeed.");
+let rst1 = new ReplSetTest({nodes: 1});
+rst1.startSet({shardsvr: ''});
+rst1.initiate();
+addShardRes = st.s.adminCommand({addShard: rst1.getURL()});
+assertAddShardSucceeded(addShardRes);
+assert.eq(rst1.name, addShardRes.shardAdded);
+removeShardWithName(addShardRes.shardAdded);
+rst1.stopSet();
+
+jsTest.log(
+ "Adding a replica set with a specified shardName that matches the set's name should succeed.");
+let rst2 = new ReplSetTest({nodes: 1});
+rst2.startSet({shardsvr: ''});
+rst2.initiate();
+addShardRes = st.s.adminCommand({addShard: rst2.getURL(), name: rst2.name});
+assertAddShardSucceeded(addShardRes, rst2.name);
+removeShardWithName(addShardRes.shardAdded);
+rst2.stopSet();
+
+let rst3 = new ReplSetTest({nodes: 1});
+rst3.startSet({shardsvr: ''});
+rst3.initiate();
+
+jsTest.log(
+ "Adding a replica set with a specified shardName that differs from the set's name should succeed.");
+addShardRes = st.s.adminCommand({addShard: rst3.getURL(), name: "differentShardName"});
+assertAddShardSucceeded(addShardRes, "differentShardName");
+removeShardWithName(addShardRes.shardAdded);
+
+jsTest.log("Adding a replica with a specified shardName of 'config' should fail.");
+addShardRes = st.s.adminCommand({addShard: rst3.getURL(), name: "config"});
+assertAddShardFailed(addShardRes, "config");
+
+// 2.b. with invalid hostnames.
+
+jsTest.log("Adding a replica set with only non-existing hosts should fail.");
+addShardRes =
+ st.s.adminCommand({addShard: rst3.name + "/NonExistingHost:" + portWithoutHostRunning});
+assertAddShardFailed(addShardRes);
+
+jsTest.log("Adding a replica set with mixed existing/non-existing hosts should fail.");
+addShardRes = st.s.adminCommand({
+ addShard:
+ rst3.name + "/" + rst3.getPrimary().name + ",NonExistingHost:" + portWithoutHostRunning
+});
+assertAddShardFailed(addShardRes);
+
+rst3.stopSet();
+
+// 3. Test adding a replica set whose *set name* is "config" with or without specifying the
+// shardName.
+
+let rst4 = new ReplSetTest({name: "config", nodes: 1});
+rst4.startSet({shardsvr: ''});
+rst4.initiate();
+
+jsTest.log(
+ "Adding a replica set whose setName is config without specifying shardName should fail.");
+addShardRes = st.s.adminCommand({addShard: rst4.getURL()});
+assertAddShardFailed(addShardRes);
+
+jsTest.log(
+ "Adding a replica set whose setName is config with specified shardName 'config' should fail.");
+addShardRes = st.s.adminCommand({addShard: rst4.getURL(), name: rst4.name});
+assertAddShardFailed(addShardRes, rst4.name);
- st.stop();
- MongoRunner.stopMongod(normalShard);
+jsTest.log(
+ "Adding a replica set whose setName is config with a non-'config' shardName should succeed");
+addShardRes = st.s.adminCommand({addShard: rst4.getURL(), name: "nonConfig"});
+assertAddShardSucceeded(addShardRes, "nonConfig");
+removeShardWithName(addShardRes.shardAdded);
+
+rst4.stopSet();
+
+// 4. Test that a replica set whose *set name* is "admin" can be written to (SERVER-17232).
+
+let rst5 = new ReplSetTest({name: "admin", nodes: 1});
+rst5.startSet({shardsvr: ''});
+rst5.initiate();
+
+jsTest.log("A replica set whose set name is 'admin' should be able to be written to.");
+addShardRes = st.s.adminCommand({addShard: rst5.getURL()});
+assertAddShardSucceeded(addShardRes);
+
+// Ensure the write goes to the newly added shard.
+assert.commandWorked(st.s.getDB('test').runCommand({create: "foo"}));
+var res = st.s.getDB('config').getCollection('databases').findOne({_id: 'test'});
+assert.neq(null, res);
+if (res.primary != addShardRes.shardAdded) {
+ assert.commandWorked(st.s.adminCommand({movePrimary: 'test', to: addShardRes.shardAdded}));
+}
+
+assert.writeOK(st.s.getDB('test').foo.insert({x: 1}));
+assert.neq(null, rst5.getPrimary().getDB('test').foo.findOne());
+
+assert.commandWorked(st.s.getDB('test').runCommand({dropDatabase: 1}));
+
+removeShardWithName(addShardRes.shardAdded);
+
+rst5.stopSet();
+
+st.stop();
+MongoRunner.stopMongod(normalShard);
})();
diff --git a/jstests/sharding/addshard4.js b/jstests/sharding/addshard4.js
index 67d4641f1e2..a23d0a13797 100644
--- a/jstests/sharding/addshard4.js
+++ b/jstests/sharding/addshard4.js
@@ -1,64 +1,63 @@
// A replica set's passive nodes should be okay to add as part of a shard config
(function() {
- var s = new ShardingTest({name: "addshard4", shards: 2, mongos: 1, other: {useHostname: true}});
+var s = new ShardingTest({name: "addshard4", shards: 2, mongos: 1, other: {useHostname: true}});
- var r = new ReplSetTest({name: "addshard4", nodes: 3, nodeOptions: {shardsvr: ""}});
+var r = new ReplSetTest({name: "addshard4", nodes: 3, nodeOptions: {shardsvr: ""}});
- r.startSet();
+r.startSet();
- var config = r.getReplSetConfig();
- config.members[2].priority = 0;
+var config = r.getReplSetConfig();
+config.members[2].priority = 0;
- r.initiate(config);
- // Wait for replica set to be fully initialized - could take some time
- // to pre-allocate files on slow systems
- r.awaitReplication();
+r.initiate(config);
+// Wait for replica set to be fully initialized - could take some time
+// to pre-allocate files on slow systems
+r.awaitReplication();
- var master = r.getPrimary();
+var master = r.getPrimary();
- var members = config.members.map(function(elem) {
- return elem.host;
- });
- var shardName = "addshard4/" + members.join(",");
- var invalidShardName = "addshard4/foobar";
+var members = config.members.map(function(elem) {
+ return elem.host;
+});
+var shardName = "addshard4/" + members.join(",");
+var invalidShardName = "addshard4/foobar";
- print("adding shard " + shardName);
+print("adding shard " + shardName);
- // First try adding shard with the correct replica set name but incorrect hostname
- // This will make sure that the metadata for this replica set name is cleaned up
- // so that the set can be added correctly when it has the proper hostnames.
- assert.throws(function() {
- s.adminCommand({"addshard": invalidShardName});
- });
+// First try adding shard with the correct replica set name but incorrect hostname
+// This will make sure that the metadata for this replica set name is cleaned up
+// so that the set can be added correctly when it has the proper hostnames.
+assert.throws(function() {
+ s.adminCommand({"addshard": invalidShardName});
+});
- var result = s.adminCommand({"addshard": shardName});
+var result = s.adminCommand({"addshard": shardName});
- printjson(result);
- assert.eq(result, true);
+printjson(result);
+assert.eq(result, true);
- r.stopSet();
- r = new ReplSetTest({name: "addshard42", nodes: 3, nodeOptions: {shardsvr: ""}});
- r.startSet();
+r.stopSet();
+r = new ReplSetTest({name: "addshard42", nodes: 3, nodeOptions: {shardsvr: ""}});
+r.startSet();
- config = r.getReplSetConfig();
- config.members[2].arbiterOnly = true;
+config = r.getReplSetConfig();
+config.members[2].arbiterOnly = true;
- r.initiate(config);
- // Wait for replica set to be fully initialized - could take some time
- // to pre-allocate files on slow systems
- r.awaitReplication();
+r.initiate(config);
+// Wait for replica set to be fully initialized - could take some time
+// to pre-allocate files on slow systems
+r.awaitReplication();
- master = r.getPrimary();
+master = r.getPrimary();
- print("adding shard addshard42");
+print("adding shard addshard42");
- result = s.adminCommand({"addshard": "addshard42/" + config.members[2].host});
+result = s.adminCommand({"addshard": "addshard42/" + config.members[2].host});
- printjson(result);
- assert.eq(result, true);
-
- s.stop();
- r.stopSet();
+printjson(result);
+assert.eq(result, true);
+s.stop();
+r.stopSet();
})();
diff --git a/jstests/sharding/addshard5.js b/jstests/sharding/addshard5.js
index f2e6068d023..31d2c10f505 100644
--- a/jstests/sharding/addshard5.js
+++ b/jstests/sharding/addshard5.js
@@ -1,46 +1,46 @@
// Tests that dropping and re-adding a shard with the same name to a cluster doesn't mess up
// migrations
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 1});
+var st = new ShardingTest({shards: 2, mongos: 1});
- var mongos = st.s;
- var admin = mongos.getDB('admin');
- var coll = mongos.getCollection('foo.bar');
+var mongos = st.s;
+var admin = mongos.getDB('admin');
+var coll = mongos.getCollection('foo.bar');
- // Shard collection
- assert.commandWorked(mongos.adminCommand({enableSharding: coll.getDB() + ''}));
+// Shard collection
+assert.commandWorked(mongos.adminCommand({enableSharding: coll.getDB() + ''}));
- // Just to be sure what primary we start from
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
- assert.commandWorked(mongos.adminCommand({shardCollection: coll + '', key: {_id: 1}}));
+// Just to be sure what primary we start from
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
+assert.commandWorked(mongos.adminCommand({shardCollection: coll + '', key: {_id: 1}}));
- // Insert one document
- assert.writeOK(coll.insert({hello: 'world'}));
+// Insert one document
+assert.writeOK(coll.insert({hello: 'world'}));
- // Migrate the collection to and from shard1 so shard0 loads the shard1 host
- assert.commandWorked(mongos.adminCommand(
- {moveChunk: coll + '', find: {_id: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- assert.commandWorked(mongos.adminCommand(
- {moveChunk: coll + '', find: {_id: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+// Migrate the collection to and from shard1 so shard0 loads the shard1 host
+assert.commandWorked(mongos.adminCommand(
+ {moveChunk: coll + '', find: {_id: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(mongos.adminCommand(
+ {moveChunk: coll + '', find: {_id: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- // Drop and re-add shard with the same name but a new host.
- assert.commandWorked(mongos.adminCommand({removeShard: st.shard1.shardName}));
- assert.commandWorked(mongos.adminCommand({removeShard: st.shard1.shardName}));
+// Drop and re-add shard with the same name but a new host.
+assert.commandWorked(mongos.adminCommand({removeShard: st.shard1.shardName}));
+assert.commandWorked(mongos.adminCommand({removeShard: st.shard1.shardName}));
- var shard2 = MongoRunner.runMongod({'shardsvr': ''});
- assert.commandWorked(mongos.adminCommand({addShard: shard2.host, name: st.shard1.shardName}));
+var shard2 = MongoRunner.runMongod({'shardsvr': ''});
+assert.commandWorked(mongos.adminCommand({addShard: shard2.host, name: st.shard1.shardName}));
- jsTest.log('Shard was dropped and re-added with same name...');
- st.printShardingStatus();
+jsTest.log('Shard was dropped and re-added with same name...');
+st.printShardingStatus();
- // Try a migration
- assert.commandWorked(
- mongos.adminCommand({moveChunk: coll + '', find: {_id: 0}, to: st.shard1.shardName}));
+// Try a migration
+assert.commandWorked(
+ mongos.adminCommand({moveChunk: coll + '', find: {_id: 0}, to: st.shard1.shardName}));
- assert.eq('world', shard2.getCollection(coll + '').findOne().hello);
+assert.eq('world', shard2.getCollection(coll + '').findOne().hello);
- st.stop();
- MongoRunner.stopMongod(shard2);
+st.stop();
+MongoRunner.stopMongod(shard2);
})();
diff --git a/jstests/sharding/addshard6.js b/jstests/sharding/addshard6.js
index df23fbc4939..b69350e76b6 100644
--- a/jstests/sharding/addshard6.js
+++ b/jstests/sharding/addshard6.js
@@ -3,48 +3,46 @@
*/
(function() {
- var addShardRes;
-
- // Note: this method expects that the failure is *not* that the specified shardName is already
- // the shardName of an existing shard.
- var assertAddShardFailed = function(res, shardName) {
- assert.commandFailed(res);
-
- // If a shard name was specified in the addShard, make sure no shard with its name shows up
- // in config.shards.
- if (shardName) {
- assert.eq(null,
- st.s.getDB('config').shards.findOne({_id: shardName}),
- "addShard for " + shardName +
- " reported failure, but shard shows up in config.shards");
- }
- };
-
- var st = new ShardingTest({
- shards: 0,
- mongos: 1,
- });
-
- var configRS = new ReplSetTest({name: "configsvrReplicaSet", nodes: 1});
- configRS.startSet({configsvr: '', storageEngine: 'wiredTiger'});
- configRS.initiate();
-
- jsTest.log("Adding a config server replica set without a specified shardName should fail.");
- addShardRes = st.s.adminCommand({addShard: configRS.getURL()});
- assertAddShardFailed(addShardRes);
-
- jsTest.log(
- "Adding a config server replica set with a shardName that matches the set's name should fail.");
- addShardRes = st.s.adminCommand({addShard: configRS.getURL(), name: configRS.name});
- assertAddShardFailed(addShardRes, configRS.name);
-
- jsTest.log(
- "Adding a config server replica set even with a non-'config' shardName should fail.");
- addShardRes = st.s.adminCommand({addShard: configRS.getURL(), name: "nonConfig"});
- assertAddShardFailed(addShardRes, "nonConfig");
-
- configRS.stopSet();
-
- st.stop();
-
+var addShardRes;
+
+// Note: this method expects that the failure is *not* that the specified shardName is already
+// the shardName of an existing shard.
+var assertAddShardFailed = function(res, shardName) {
+ assert.commandFailed(res);
+
+ // If a shard name was specified in the addShard, make sure no shard with its name shows up
+ // in config.shards.
+ if (shardName) {
+ assert.eq(
+ null,
+ st.s.getDB('config').shards.findOne({_id: shardName}),
+ "addShard for " + shardName + " reported failure, but shard shows up in config.shards");
+ }
+};
+
+var st = new ShardingTest({
+ shards: 0,
+ mongos: 1,
+});
+
+var configRS = new ReplSetTest({name: "configsvrReplicaSet", nodes: 1});
+configRS.startSet({configsvr: '', storageEngine: 'wiredTiger'});
+configRS.initiate();
+
+jsTest.log("Adding a config server replica set without a specified shardName should fail.");
+addShardRes = st.s.adminCommand({addShard: configRS.getURL()});
+assertAddShardFailed(addShardRes);
+
+jsTest.log(
+ "Adding a config server replica set with a shardName that matches the set's name should fail.");
+addShardRes = st.s.adminCommand({addShard: configRS.getURL(), name: configRS.name});
+assertAddShardFailed(addShardRes, configRS.name);
+
+jsTest.log("Adding a config server replica set even with a non-'config' shardName should fail.");
+addShardRes = st.s.adminCommand({addShard: configRS.getURL(), name: "nonConfig"});
+assertAddShardFailed(addShardRes, "nonConfig");
+
+configRS.stopSet();
+
+st.stop();
})();
diff --git a/jstests/sharding/addshard_idempotent.js b/jstests/sharding/addshard_idempotent.js
index 38000b62e49..800cf5fd88f 100644
--- a/jstests/sharding/addshard_idempotent.js
+++ b/jstests/sharding/addshard_idempotent.js
@@ -1,57 +1,56 @@
// Tests that adding an equivalent shard multiple times returns success.
(function() {
- 'use strict';
-
- var st = new ShardingTest({name: "add_shard_idempotent", shards: 0});
-
- jsTestLog("Testing adding a standalone shard multiple times");
- var shard1 = MongoRunner.runMongod({'shardsvr': ""});
- assert.commandWorked(
- st.admin.runCommand({addshard: shard1.host, name: "newShard1", maxSize: 1024}));
-
- // Running the identical addShard command should succeed.
- assert.commandWorked(
- st.admin.runCommand({addshard: shard1.host, name: "newShard1", maxSize: 1024}));
-
- // Trying to add the same shard with different options should fail
- assert.commandFailed(
- st.admin.runCommand({addshard: shard1.host, name: "newShard1"})); // No maxSize
-
- assert.commandFailed(st.admin.runCommand(
- {addshard: shard1.host, name: "a different shard name", maxSize: 1024}));
-
- jsTestLog("Testing adding a replica set shard multiple times");
- var shard2 = new ReplSetTest({name: 'rsShard', nodes: 3, nodeOptions: {shardsvr: ""}});
- shard2.startSet();
- shard2.initiate();
- shard2.getPrimary(); // Wait for there to be a primary
- var shard2SeedList1 = shard2.name + "/" + shard2.nodes[0].host;
- var shard2SeedList2 = shard2.name + "/" + shard2.nodes[2].host;
-
- assert.commandWorked(st.admin.runCommand({addshard: shard2SeedList1, name: "newShard2"}));
-
- // Running the identical addShard command should succeed.
- assert.commandWorked(st.admin.runCommand({addshard: shard2SeedList1, name: "newShard2"}));
-
- // We can only compare replica sets by their set name, so calling addShard with a different
- // seed list should still be considered a successful no-op.
- assert.commandWorked(st.admin.runCommand({addshard: shard2SeedList2, name: "newShard2"}));
-
- // Verify that the config.shards collection looks right.
- var shards = st.s.getDB('config').shards.find().toArray();
- assert.eq(2, shards.length);
- for (var i = 0; i < shards.length; i++) {
- var shard = shards[i];
- if (shard._id == 'newShard1') {
- assert.eq(shard1.host, shard.host);
- assert.eq(1024, shard.maxSize);
- } else {
- assert.eq('newShard2', shard._id);
- assert.eq(shard2.getURL(), shard.host);
- }
+'use strict';
+
+var st = new ShardingTest({name: "add_shard_idempotent", shards: 0});
+
+jsTestLog("Testing adding a standalone shard multiple times");
+var shard1 = MongoRunner.runMongod({'shardsvr': ""});
+assert.commandWorked(
+ st.admin.runCommand({addshard: shard1.host, name: "newShard1", maxSize: 1024}));
+
+// Running the identical addShard command should succeed.
+assert.commandWorked(
+ st.admin.runCommand({addshard: shard1.host, name: "newShard1", maxSize: 1024}));
+
+// Trying to add the same shard with different options should fail
+assert.commandFailed(
+ st.admin.runCommand({addshard: shard1.host, name: "newShard1"})); // No maxSize
+
+assert.commandFailed(
+ st.admin.runCommand({addshard: shard1.host, name: "a different shard name", maxSize: 1024}));
+
+jsTestLog("Testing adding a replica set shard multiple times");
+var shard2 = new ReplSetTest({name: 'rsShard', nodes: 3, nodeOptions: {shardsvr: ""}});
+shard2.startSet();
+shard2.initiate();
+shard2.getPrimary(); // Wait for there to be a primary
+var shard2SeedList1 = shard2.name + "/" + shard2.nodes[0].host;
+var shard2SeedList2 = shard2.name + "/" + shard2.nodes[2].host;
+
+assert.commandWorked(st.admin.runCommand({addshard: shard2SeedList1, name: "newShard2"}));
+
+// Running the identical addShard command should succeed.
+assert.commandWorked(st.admin.runCommand({addshard: shard2SeedList1, name: "newShard2"}));
+
+// We can only compare replica sets by their set name, so calling addShard with a different
+// seed list should still be considered a successful no-op.
+assert.commandWorked(st.admin.runCommand({addshard: shard2SeedList2, name: "newShard2"}));
+
+// Verify that the config.shards collection looks right.
+var shards = st.s.getDB('config').shards.find().toArray();
+assert.eq(2, shards.length);
+for (var i = 0; i < shards.length; i++) {
+ var shard = shards[i];
+ if (shard._id == 'newShard1') {
+ assert.eq(shard1.host, shard.host);
+ assert.eq(1024, shard.maxSize);
+ } else {
+ assert.eq('newShard2', shard._id);
+ assert.eq(shard2.getURL(), shard.host);
}
- MongoRunner.stopMongod(shard1);
- shard2.stopSet();
- st.stop();
-
+}
+MongoRunner.stopMongod(shard1);
+shard2.stopSet();
+st.stop();
})();
diff --git a/jstests/sharding/advance_cluster_time_action_type.js b/jstests/sharding/advance_cluster_time_action_type.js
index 676dde8b62e..1fff92c5e1d 100644
--- a/jstests/sharding/advance_cluster_time_action_type.js
+++ b/jstests/sharding/advance_cluster_time_action_type.js
@@ -3,60 +3,65 @@
*/
(function() {
- "use strict";
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- let st = new ShardingTest({
- mongos: 1,
- config: 1,
- shards: 1,
- keyFile: 'jstests/libs/key1',
- other: {shardAsReplicaSet: false}
- });
-
- let adminDB = st.s.getDB('admin');
-
- assert.commandWorked(adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]}));
- assert.eq(1, adminDB.auth("admin", "admin"));
-
- assert.commandWorked(adminDB.runCommand({
- createRole: "advanceClusterTimeRole",
- privileges: [{resource: {cluster: true}, actions: ["advanceClusterTime"]}],
- roles: []
- }));
-
- let testDB = adminDB.getSiblingDB("testDB");
-
- assert.commandWorked(
- testDB.runCommand({createUser: 'NotTrusted', pwd: 'pwd', roles: ['readWrite']}));
- assert.commandWorked(testDB.runCommand({
- createUser: 'Trusted',
- pwd: 'pwd',
- roles: [{role: 'advanceClusterTimeRole', db: 'admin'}, 'readWrite']
- }));
- assert.eq(1, testDB.auth("NotTrusted", "pwd"));
-
- let res = testDB.runCommand({insert: "foo", documents: [{_id: 0}]});
- assert.commandWorked(res);
-
- let clusterTime = Object.assign({}, res.$clusterTime);
- let clusterTimeTS = new Timestamp(clusterTime.clusterTime.getTime() + 1000, 0);
- clusterTime.clusterTime = clusterTimeTS;
-
- const cmdObj = {find: "foo", limit: 1, singleBatch: true, $clusterTime: clusterTime};
- jsTestLog("running NonTrusted. command: " + tojson(cmdObj));
- res = testDB.runCommand(cmdObj);
- assert.commandFailed(res, "Command request was: " + tojsononeline(cmdObj));
-
- assert.eq(1, testDB.auth("Trusted", "pwd"));
- jsTestLog("running Trusted. command: " + tojson(cmdObj));
- res = testDB.runCommand(cmdObj);
- assert.commandWorked(res, "Command request was: " + tojsononeline(cmdObj));
-
- testDB.logout();
-
- st.stop();
+"use strict";
+
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
+
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+let st = new ShardingTest({
+ mongos: 1,
+ config: 1,
+ shards: 1,
+ keyFile: 'jstests/libs/key1',
+ other: {shardAsReplicaSet: false}
+});
+
+let adminDB = st.s.getDB('admin');
+
+assert.commandWorked(adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]}));
+assert.eq(1, adminDB.auth("admin", "admin"));
+
+assert.commandWorked(adminDB.runCommand({
+ createRole: "advanceClusterTimeRole",
+ privileges: [{resource: {cluster: true}, actions: ["advanceClusterTime"]}],
+ roles: []
+}));
+
+let testDB = adminDB.getSiblingDB("testDB");
+
+assert.commandWorked(
+ testDB.runCommand({createUser: 'NotTrusted', pwd: 'pwd', roles: ['readWrite']}));
+assert.commandWorked(testDB.runCommand({
+ createUser: 'Trusted',
+ pwd: 'pwd',
+ roles: [{role: 'advanceClusterTimeRole', db: 'admin'}, 'readWrite']
+}));
+assert.eq(1, testDB.auth("NotTrusted", "pwd"));
+
+let res = testDB.runCommand({insert: "foo", documents: [{_id: 0}]});
+assert.commandWorked(res);
+
+let clusterTime = Object.assign({}, res.$clusterTime);
+let clusterTimeTS = new Timestamp(clusterTime.clusterTime.getTime() + 1000, 0);
+clusterTime.clusterTime = clusterTimeTS;
+
+const cmdObj = {
+ find: "foo",
+ limit: 1,
+ singleBatch: true,
+ $clusterTime: clusterTime
+};
+jsTestLog("running NonTrusted. command: " + tojson(cmdObj));
+res = testDB.runCommand(cmdObj);
+assert.commandFailed(res, "Command request was: " + tojsononeline(cmdObj));
+
+assert.eq(1, testDB.auth("Trusted", "pwd"));
+jsTestLog("running Trusted. command: " + tojson(cmdObj));
+res = testDB.runCommand(cmdObj);
+assert.commandWorked(res, "Command request was: " + tojsononeline(cmdObj));
+
+testDB.logout();
+
+st.stop();
})();
diff --git a/jstests/sharding/advance_logical_time_with_valid_signature.js b/jstests/sharding/advance_logical_time_with_valid_signature.js
index bc9f8d86353..fccd047f6fd 100644
--- a/jstests/sharding/advance_logical_time_with_valid_signature.js
+++ b/jstests/sharding/advance_logical_time_with_valid_signature.js
@@ -3,42 +3,42 @@
* cluster time.
*/
(function() {
- "use strict";
+"use strict";
- // Setup 2 mongos processes with mongobridge.
- let st = new ShardingTest({shards: 1, mongos: 2, useBridge: true});
+// Setup 2 mongos processes with mongobridge.
+let st = new ShardingTest({shards: 1, mongos: 2, useBridge: true});
- // Sever outgoing communications from the second mongos.
- st.s0.disconnect(st.s1);
- st._configServers.forEach(function(configSvr) {
- configSvr.disconnect(st.s1);
- });
+// Sever outgoing communications from the second mongos.
+st.s0.disconnect(st.s1);
+st._configServers.forEach(function(configSvr) {
+ configSvr.disconnect(st.s1);
+});
- st._rsObjects.forEach(function(rsNodes) {
- rsNodes.nodes.forEach(function(conn) {
- conn.disconnect(st.s1);
- });
+st._rsObjects.forEach(function(rsNodes) {
+ rsNodes.nodes.forEach(function(conn) {
+ conn.disconnect(st.s1);
});
+});
- let connectedDB = st.s0.getDB("test");
- let disconnectedDB = st.s1.getDB("test");
+let connectedDB = st.s0.getDB("test");
+let disconnectedDB = st.s1.getDB("test");
- // Send an insert to the connected mongos to advance its cluster time.
- let res = assert.commandWorked(connectedDB.runCommand({insert: "foo", documents: [{x: 1}]}));
+// Send an insert to the connected mongos to advance its cluster time.
+let res = assert.commandWorked(connectedDB.runCommand({insert: "foo", documents: [{x: 1}]}));
- // Get logicalTime metadata from the connected mongos's response and send it in an isMaster
- // command to the disconnected mongos. isMaster does not require mongos to contact any other
- // servers, so the command should succeed.
- let lt = res.$clusterTime;
- res = assert.commandWorked(
- disconnectedDB.runCommand({isMaster: 1, $clusterTime: lt}),
- "expected the disconnected mongos to accept cluster time: " + tojson(lt));
+// Get logicalTime metadata from the connected mongos's response and send it in an isMaster
+// command to the disconnected mongos. isMaster does not require mongos to contact any other
+// servers, so the command should succeed.
+let lt = res.$clusterTime;
+res =
+ assert.commandWorked(disconnectedDB.runCommand({isMaster: 1, $clusterTime: lt}),
+ "expected the disconnected mongos to accept cluster time: " + tojson(lt));
- // Verify cluster time response from the disconnected mongos matches what was passed.
- assert.eq(lt,
- res.$clusterTime,
- "expected the disconnected mongos to send cluster time: " + tojson(lt) +
- ", received: " + tojson(res.$clusterTime));
+// Verify cluster time response from the disconnected mongos matches what was passed.
+assert.eq(lt,
+ res.$clusterTime,
+ "expected the disconnected mongos to send cluster time: " + tojson(lt) +
+ ", received: " + tojson(res.$clusterTime));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/after_cluster_time.js b/jstests/sharding/after_cluster_time.js
index eeb8ec05a7e..f79bf03bf23 100644
--- a/jstests/sharding/after_cluster_time.js
+++ b/jstests/sharding/after_cluster_time.js
@@ -3,122 +3,118 @@
* @tags: [requires_majority_read_concern]
*/
(function() {
- "use strict";
-
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
-
- function assertAfterClusterTimeReadFailsWithCode(db, readConcernObj, errorCode) {
- return assert.commandFailedWithCode(
- db.runCommand({find: "foo", readConcern: readConcernObj}),
- errorCode,
- "expected command with read concern options: " + tojson(readConcernObj) + " to fail");
- }
-
- function assertAfterClusterTimeReadSucceeds(db, readConcernObj) {
- return assert.commandWorked(db.runCommand({find: "foo", readConcern: readConcernObj}),
- "expected command with read concern options: " +
- tojson(readConcernObj) + " to succeed");
+"use strict";
+
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+
+function assertAfterClusterTimeReadFailsWithCode(db, readConcernObj, errorCode) {
+ return assert.commandFailedWithCode(
+ db.runCommand({find: "foo", readConcern: readConcernObj}),
+ errorCode,
+ "expected command with read concern options: " + tojson(readConcernObj) + " to fail");
+}
+
+function assertAfterClusterTimeReadSucceeds(db, readConcernObj) {
+ return assert.commandWorked(
+ db.runCommand({find: "foo", readConcern: readConcernObj}),
+ "expected command with read concern options: " + tojson(readConcernObj) + " to succeed");
+}
+
+const rst = new ReplSetTest({
+ nodes: 1,
+ nodeOptions: {
+ enableMajorityReadConcern: "",
+ shardsvr: "",
}
+});
- const rst = new ReplSetTest({
- nodes: 1,
- nodeOptions: {
- enableMajorityReadConcern: "",
- shardsvr: "",
- }
- });
-
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
- rst.initiate();
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+rst.initiate();
- // Start the sharding test and add the majority read concern enabled replica set.
- const st = new ShardingTest({manualAddShard: true});
- assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
+// Start the sharding test and add the majority read concern enabled replica set.
+const st = new ShardingTest({manualAddShard: true});
+assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
- const testDB = st.s.getDB("test");
+const testDB = st.s.getDB("test");
- // Insert some data to find later.
- assert.commandWorked(testDB.runCommand(
- {insert: "foo", documents: [{_id: 1, x: 1}], writeConcern: {w: "majority"}}));
+// Insert some data to find later.
+assert.commandWorked(
+ testDB.runCommand({insert: "foo", documents: [{_id: 1, x: 1}], writeConcern: {w: "majority"}}));
- // Test the afterClusterTime API without causal consistency enabled on the mongo connection.
+// Test the afterClusterTime API without causal consistency enabled on the mongo connection.
- assertAfterClusterTimeReadFailsWithCode(
- testDB,
- {level: "linearizable", afterClusterTime: Timestamp(1, 1)},
- ErrorCodes.InvalidOptions);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "linearizable", afterClusterTime: Timestamp(1, 1)}, ErrorCodes.InvalidOptions);
- // Reads with afterClusterTime require a non-zero timestamp.
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "local", afterClusterTime: {}}, ErrorCodes.TypeMismatch);
+// Reads with afterClusterTime require a non-zero timestamp.
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "local", afterClusterTime: {}}, ErrorCodes.TypeMismatch);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "local", afterClusterTime: 10}, ErrorCodes.TypeMismatch);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "local", afterClusterTime: 10}, ErrorCodes.TypeMismatch);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "local", afterClusterTime: Timestamp()}, ErrorCodes.InvalidOptions);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "local", afterClusterTime: Timestamp()}, ErrorCodes.InvalidOptions);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "local", afterClusterTime: Timestamp(0, 0)}, ErrorCodes.InvalidOptions);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "local", afterClusterTime: Timestamp(0, 0)}, ErrorCodes.InvalidOptions);
- // Reads with proper afterClusterTime arguments return committed data after the given time.
- // Reads with afterClusterTime require a non-zero timestamp.
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: {}}, ErrorCodes.TypeMismatch);
+// Reads with proper afterClusterTime arguments return committed data after the given time.
+// Reads with afterClusterTime require a non-zero timestamp.
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: {}}, ErrorCodes.TypeMismatch);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: 10}, ErrorCodes.TypeMismatch);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: 10}, ErrorCodes.TypeMismatch);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: Timestamp()}, ErrorCodes.InvalidOptions);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: Timestamp()}, ErrorCodes.InvalidOptions);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: Timestamp(0, 0)}, ErrorCodes.InvalidOptions);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: Timestamp(0, 0)}, ErrorCodes.InvalidOptions);
- // Reads with proper afterClusterTime arguments return committed data after the given time.
- let testReadOwnWrite = function(readConcern) {
- let res = assert.commandWorked(testDB.runCommand(
- {find: "foo", readConcern: {level: readConcern, afterClusterTime: Timestamp(1, 1)}}));
+// Reads with proper afterClusterTime arguments return committed data after the given time.
+let testReadOwnWrite = function(readConcern) {
+ let res = assert.commandWorked(testDB.runCommand(
+ {find: "foo", readConcern: {level: readConcern, afterClusterTime: Timestamp(1, 1)}}));
- assert.eq(res.cursor.firstBatch,
- [{_id: 1, x: 1}],
- "expected afterClusterTime read to return the committed document");
+ assert.eq(res.cursor.firstBatch,
+ [{_id: 1, x: 1}],
+ "expected afterClusterTime read to return the committed document");
- // Test the afterClusterTime API with causal consistency enabled on the mongo connection.
- testDB.getMongo().setCausalConsistency(true);
+ // Test the afterClusterTime API with causal consistency enabled on the mongo connection.
+ testDB.getMongo().setCausalConsistency(true);
- // With causal consistency enabled, the shell sets read concern to level "majority" if it is
- // not specified.
- assertAfterClusterTimeReadSucceeds(testDB, {afterClusterTime: Timestamp(1, 1)});
- testDB.getMongo().setCausalConsistency(false);
- };
+ // With causal consistency enabled, the shell sets read concern to level "majority" if it is
+ // not specified.
+ assertAfterClusterTimeReadSucceeds(testDB, {afterClusterTime: Timestamp(1, 1)});
+ testDB.getMongo().setCausalConsistency(false);
+};
- testReadOwnWrite("local");
- testReadOwnWrite("majority");
+testReadOwnWrite("local");
+testReadOwnWrite("majority");
- // Read concern levels other than majority are still not accepted.
- assertAfterClusterTimeReadFailsWithCode(
- testDB,
- {level: "linearizable", afterClusterTime: Timestamp(1, 1)},
- ErrorCodes.InvalidOptions);
+// Read concern levels other than majority are still not accepted.
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "linearizable", afterClusterTime: Timestamp(1, 1)}, ErrorCodes.InvalidOptions);
- // Reads with afterClusterTime still require a non-zero timestamp.
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: {}}, ErrorCodes.TypeMismatch);
+// Reads with afterClusterTime still require a non-zero timestamp.
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: {}}, ErrorCodes.TypeMismatch);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: 10}, ErrorCodes.TypeMismatch);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: 10}, ErrorCodes.TypeMismatch);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: Timestamp()}, ErrorCodes.InvalidOptions);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: Timestamp()}, ErrorCodes.InvalidOptions);
- assertAfterClusterTimeReadFailsWithCode(
- testDB, {level: "majority", afterClusterTime: Timestamp(0, 0)}, ErrorCodes.InvalidOptions);
+assertAfterClusterTimeReadFailsWithCode(
+ testDB, {level: "majority", afterClusterTime: Timestamp(0, 0)}, ErrorCodes.InvalidOptions);
- rst.stopSet();
- st.stop();
+rst.stopSet();
+st.stop();
})();
diff --git a/jstests/sharding/agg_error_reports_shard_host_and_port.js b/jstests/sharding/agg_error_reports_shard_host_and_port.js
index 3a73c1d2493..346351d35e3 100644
--- a/jstests/sharding/agg_error_reports_shard_host_and_port.js
+++ b/jstests/sharding/agg_error_reports_shard_host_and_port.js
@@ -1,34 +1,34 @@
// Tests that an aggregation error which occurs on a sharded collection will send an error message
// containing the host and port of the shard where the error occurred.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrMsgContains.
+load("jstests/aggregation/extras/utils.js"); // For assertErrMsgContains.
- const st = new ShardingTest({shards: 2, config: 1});
+const st = new ShardingTest({shards: 2, config: 1});
- const mongosDb = st.s.getDB(jsTestName());
- const coll = mongosDb.getCollection("foo");
+const mongosDb = st.s.getDB(jsTestName());
+const coll = mongosDb.getCollection("foo");
- // Enable sharding on the test DB and ensure its primary is shard 0.
- assert.commandWorked(mongosDb.adminCommand({enableSharding: mongosDb.getName()}));
- st.ensurePrimaryShard(mongosDb.getName(), st.rs0.getURL());
+// Enable sharding on the test DB and ensure its primary is shard 0.
+assert.commandWorked(mongosDb.adminCommand({enableSharding: mongosDb.getName()}));
+st.ensurePrimaryShard(mongosDb.getName(), st.rs0.getURL());
- // Shard the collection.
- coll.drop();
- st.shardColl(coll, {_id: 1}, {_id: 0}, {_id: 1});
+// Shard the collection.
+coll.drop();
+st.shardColl(coll, {_id: 1}, {_id: 0}, {_id: 1});
- assert.commandWorked(coll.insert({_id: 0}));
+assert.commandWorked(coll.insert({_id: 0}));
- // Run an aggregation which will fail on shard 1, and verify that the error message contains
- // the host and port of the shard that failed.
- // We need to be careful here to involve some data in the computation that is actually
- // sent to the shard before failing (i.e. "$_id") so that mongos doesn't short-curcuit and
- // fail during optimization.
- const pipe = [{$project: {a: {$divide: ["$_id", 0]}}}];
- const divideByZeroErrorCode = 16608;
+// Run an aggregation which will fail on shard 1, and verify that the error message contains
+// the host and port of the shard that failed.
+// We need to be careful here to involve some data in the computation that is actually
+// sent to the shard before failing (i.e. "$_id") so that mongos doesn't short-curcuit and
+// fail during optimization.
+const pipe = [{$project: {a: {$divide: ["$_id", 0]}}}];
+const divideByZeroErrorCode = 16608;
- assertErrMsgContains(coll, pipe, divideByZeroErrorCode, st.rs1.getPrimary().host);
+assertErrMsgContains(coll, pipe, divideByZeroErrorCode, st.rs1.getPrimary().host);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/agg_explain_fmt.js b/jstests/sharding/agg_explain_fmt.js
index c331b2686b1..3d88d8a2383 100644
--- a/jstests/sharding/agg_explain_fmt.js
+++ b/jstests/sharding/agg_explain_fmt.js
@@ -1,42 +1,42 @@
// This test ensuexplain an explain of an aggregate through mongos has the intended format.
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/analyze_plan.js'); // For planHasStage.
+load('jstests/libs/analyze_plan.js'); // For planHasStage.
- const st = new ShardingTest({shards: 2});
- const mongosDB = st.s.getDB("test");
- const coll = mongosDB.agg_explain_fmt;
- // Insert documents with {_id: -5} to {_id: 4}.
- assert.commandWorked(coll.insert(Array.from({length: 10}, (_, i) => ({_id: i - 5}))));
+const st = new ShardingTest({shards: 2});
+const mongosDB = st.s.getDB("test");
+const coll = mongosDB.agg_explain_fmt;
+// Insert documents with {_id: -5} to {_id: 4}.
+assert.commandWorked(coll.insert(Array.from({length: 10}, (_, i) => ({_id: i - 5}))));
- // Test that with an unsharded collection we don't get any kind of 'splitPipeline', just the
- // normal explain with 'stages'.
- let explain = coll.explain().aggregate([{$project: {a: 1}}]);
- assert(!explain.hasOwnProperty("splitPipeline"), explain);
- assert(explain.hasOwnProperty("stages"), explain);
+// Test that with an unsharded collection we don't get any kind of 'splitPipeline', just the
+// normal explain with 'stages'.
+let explain = coll.explain().aggregate([{$project: {a: 1}}]);
+assert(!explain.hasOwnProperty("splitPipeline"), explain);
+assert(explain.hasOwnProperty("stages"), explain);
- // Now shard the collection by _id and move a chunk to each shard.
- st.shardColl(coll, {_id: 1}, {_id: 0}, {_id: 0});
+// Now shard the collection by _id and move a chunk to each shard.
+st.shardColl(coll, {_id: 1}, {_id: 0}, {_id: 0});
- // Test that we now have a split pipeline with information about what pipeline ran on each
- // shard.
- explain = coll.explain().aggregate([{$project: {a: 1}}]);
- assert(explain.hasOwnProperty("splitPipeline"), explain);
- assert(explain.splitPipeline.hasOwnProperty("shardsPart"), explain.splitPipeline);
- assert(explain.splitPipeline.hasOwnProperty("mergerPart"), explain.splitPipeline);
- assert(explain.hasOwnProperty("shards"), explain);
- for (let shardId in explain.shards) {
- const shardExplain = explain.shards[shardId];
- assert(shardExplain.hasOwnProperty("host"), shardExplain);
- assert(shardExplain.hasOwnProperty("stages") || shardExplain.hasOwnProperty("queryPlanner"),
- shardExplain);
- }
+// Test that we now have a split pipeline with information about what pipeline ran on each
+// shard.
+explain = coll.explain().aggregate([{$project: {a: 1}}]);
+assert(explain.hasOwnProperty("splitPipeline"), explain);
+assert(explain.splitPipeline.hasOwnProperty("shardsPart"), explain.splitPipeline);
+assert(explain.splitPipeline.hasOwnProperty("mergerPart"), explain.splitPipeline);
+assert(explain.hasOwnProperty("shards"), explain);
+for (let shardId in explain.shards) {
+ const shardExplain = explain.shards[shardId];
+ assert(shardExplain.hasOwnProperty("host"), shardExplain);
+ assert(shardExplain.hasOwnProperty("stages") || shardExplain.hasOwnProperty("queryPlanner"),
+ shardExplain);
+}
- // Do a sharded explain from a mongod, not mongos, to ensure that it does not have a
- // SHARDING_FILTER stage.");
- const shardDB = st.shard0.getDB(mongosDB.getName());
- explain = shardDB[coll.getName()].explain().aggregate([{$match: {}}]);
- assert(!planHasStage(shardDB, explain.queryPlanner.winningPlan, "SHARDING_FILTER"), explain);
- st.stop();
+// Do a sharded explain from a mongod, not mongos, to ensure that it does not have a
+// SHARDING_FILTER stage.");
+const shardDB = st.shard0.getDB(mongosDB.getName());
+explain = shardDB[coll.getName()].explain().aggregate([{$match: {}}]);
+assert(!planHasStage(shardDB, explain.queryPlanner.winningPlan, "SHARDING_FILTER"), explain);
+st.stop();
}());
diff --git a/jstests/sharding/agg_project_limit_pipe_split.js b/jstests/sharding/agg_project_limit_pipe_split.js
index 010cd46c46e..f17148a0877 100644
--- a/jstests/sharding/agg_project_limit_pipe_split.js
+++ b/jstests/sharding/agg_project_limit_pipe_split.js
@@ -1,82 +1,78 @@
// Tests that the correct number of results are returned when $limit is coalesced with $sort.
(function() {
- "use strict";
- load("jstests/libs/analyze_plan.js");
+"use strict";
+load("jstests/libs/analyze_plan.js");
- const shardingTest = new ShardingTest({shards: 2});
- const db = shardingTest.getDB("project_limit");
- const coll = db.project_limit_pipe_split;
- coll.drop();
- assert.commandWorked(shardingTest.s0.adminCommand({enableSharding: db.getName()}));
- assert.commandWorked(
- shardingTest.s0.adminCommand({shardCollection: coll.getFullName(), key: {_id: "hashed"}}));
- const bulkOp = coll.initializeOrderedBulkOp();
- for (let i = 0; i < 400; ++i) {
- bulkOp.insert({x: i, y: ["a", "b", "c"], z: Math.floor(i / 12)});
- }
- assert.writeOK(bulkOp.execute());
+const shardingTest = new ShardingTest({shards: 2});
+const db = shardingTest.getDB("project_limit");
+const coll = db.project_limit_pipe_split;
+coll.drop();
+assert.commandWorked(shardingTest.s0.adminCommand({enableSharding: db.getName()}));
+assert.commandWorked(
+ shardingTest.s0.adminCommand({shardCollection: coll.getFullName(), key: {_id: "hashed"}}));
+const bulkOp = coll.initializeOrderedBulkOp();
+for (let i = 0; i < 400; ++i) {
+ bulkOp.insert({x: i, y: ["a", "b", "c"], z: Math.floor(i / 12)});
+}
+assert.writeOK(bulkOp.execute());
- let agg = coll.aggregate([
- {$match: {$or: [{z: 9}, {z: 10}]}},
- {$sort: {x: -1}},
- {$project: {x: 1, y: 1, z: 1, _id: 0}},
- {$limit: 6},
- ]);
- assert.eq(
- [
- {"x": 131, "y": ["a", "b", "c"], "z": 10},
- {"x": 130, "y": ["a", "b", "c"], "z": 10},
- {"x": 129, "y": ["a", "b", "c"], "z": 10},
- {"x": 128, "y": ["a", "b", "c"], "z": 10},
- {"x": 127, "y": ["a", "b", "c"], "z": 10},
- {"x": 126, "y": ["a", "b", "c"], "z": 10}
- ],
- agg.toArray());
+let agg = coll.aggregate([
+ {$match: {$or: [{z: 9}, {z: 10}]}},
+ {$sort: {x: -1}},
+ {$project: {x: 1, y: 1, z: 1, _id: 0}},
+ {$limit: 6},
+]);
+assert.eq(
+ [
+ {"x": 131, "y": ["a", "b", "c"], "z": 10},
+ {"x": 130, "y": ["a", "b", "c"], "z": 10},
+ {"x": 129, "y": ["a", "b", "c"], "z": 10},
+ {"x": 128, "y": ["a", "b", "c"], "z": 10},
+ {"x": 127, "y": ["a", "b", "c"], "z": 10},
+ {"x": 126, "y": ["a", "b", "c"], "z": 10}
+ ],
+ agg.toArray());
- agg = coll.aggregate([
- {$sort: {x: 1}},
- {$redact: "$$KEEP"},
- {$project: {x: 1, y: 1, z: 1, _id: 0}},
- {$limit: 6}
- ]);
- assert.eq(
- [
- {"x": 0, "y": ["a", "b", "c"], "z": 0},
- {"x": 1, "y": ["a", "b", "c"], "z": 0},
- {"x": 2, "y": ["a", "b", "c"], "z": 0},
- {"x": 3, "y": ["a", "b", "c"], "z": 0},
- {"x": 4, "y": ["a", "b", "c"], "z": 0},
- {"x": 5, "y": ["a", "b", "c"], "z": 0}
- ],
- agg.toArray());
+agg = coll.aggregate(
+ [{$sort: {x: 1}}, {$redact: "$$KEEP"}, {$project: {x: 1, y: 1, z: 1, _id: 0}}, {$limit: 6}]);
+assert.eq(
+ [
+ {"x": 0, "y": ["a", "b", "c"], "z": 0},
+ {"x": 1, "y": ["a", "b", "c"], "z": 0},
+ {"x": 2, "y": ["a", "b", "c"], "z": 0},
+ {"x": 3, "y": ["a", "b", "c"], "z": 0},
+ {"x": 4, "y": ["a", "b", "c"], "z": 0},
+ {"x": 5, "y": ["a", "b", "c"], "z": 0}
+ ],
+ agg.toArray());
- agg = coll.aggregate(
- [{$sort: {x: -1}}, {$skip: 399}, {$project: {x: 1, y: 1, z: 1, _id: 0}}, {$limit: 6}]);
- assert.eq([{"x": 0, "y": ["a", "b", "c"], "z": 0}], agg.toArray());
+agg = coll.aggregate(
+ [{$sort: {x: -1}}, {$skip: 399}, {$project: {x: 1, y: 1, z: 1, _id: 0}}, {$limit: 6}]);
+assert.eq([{"x": 0, "y": ["a", "b", "c"], "z": 0}], agg.toArray());
- agg = coll.aggregate(
- [{$sort: {x: -1}}, {$project: {x: 1, y: 1, z: 1, _id: 0}}, {$skip: 401}, {$limit: 6}]);
- assert.eq(0, agg.itcount());
+agg = coll.aggregate(
+ [{$sort: {x: -1}}, {$project: {x: 1, y: 1, z: 1, _id: 0}}, {$skip: 401}, {$limit: 6}]);
+assert.eq(0, agg.itcount());
- agg = coll.aggregate([
- {$sort: {x: -1}},
- {$skip: 4},
- {$project: {x: 1, y: 1, z: 1, _id: 0}},
- {$skip: 3},
- {$limit: 30},
- {$skip: 3},
- {$limit: 6},
- ]);
- assert.eq(
- [
- {"x": 389, "y": ["a", "b", "c"], "z": 32},
- {"x": 388, "y": ["a", "b", "c"], "z": 32},
- {"x": 387, "y": ["a", "b", "c"], "z": 32},
- {"x": 386, "y": ["a", "b", "c"], "z": 32},
- {"x": 385, "y": ["a", "b", "c"], "z": 32},
- {"x": 384, "y": ["a", "b", "c"], "z": 32}
- ],
- agg.toArray());
+agg = coll.aggregate([
+ {$sort: {x: -1}},
+ {$skip: 4},
+ {$project: {x: 1, y: 1, z: 1, _id: 0}},
+ {$skip: 3},
+ {$limit: 30},
+ {$skip: 3},
+ {$limit: 6},
+]);
+assert.eq(
+ [
+ {"x": 389, "y": ["a", "b", "c"], "z": 32},
+ {"x": 388, "y": ["a", "b", "c"], "z": 32},
+ {"x": 387, "y": ["a", "b", "c"], "z": 32},
+ {"x": 386, "y": ["a", "b", "c"], "z": 32},
+ {"x": 385, "y": ["a", "b", "c"], "z": 32},
+ {"x": 384, "y": ["a", "b", "c"], "z": 32}
+ ],
+ agg.toArray());
- shardingTest.stop();
+shardingTest.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/agg_sort.js b/jstests/sharding/agg_sort.js
index 2aebb8e0ded..0ee78631ec0 100644
--- a/jstests/sharding/agg_sort.js
+++ b/jstests/sharding/agg_sort.js
@@ -1,225 +1,219 @@
// Tests that the sort order is obeyed when an aggregation requests sorted results that are
// scattered across multiple shards.
(function() {
- 'use strict';
-
- const shardingTest = new ShardingTest({shards: 2});
-
- const db = shardingTest.getDB("test");
- const coll = db.sharded_agg_sort;
- coll.drop();
-
- assert.commandWorked(shardingTest.s0.adminCommand({enableSharding: db.getName()}));
- shardingTest.ensurePrimaryShard(db.getName(), shardingTest.shard1.shardName);
- assert.commandWorked(
- shardingTest.s0.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
-
- const nDocs = 10;
- const yValues = [
- "abc",
- "ABC",
- null,
- 1,
- NumberLong(2),
- NumberDecimal(-20),
- MinKey,
- MaxKey,
- BinData(0, ""),
- [3, 4],
- ];
- const bulkOp = coll.initializeOrderedBulkOp();
- for (var i = 0; i < nDocs; ++i) {
- bulkOp.insert({_id: i, x: Math.floor(i / 2), y: yValues[i]});
- }
- assert.writeOK(bulkOp.execute());
-
- // Split the data into 3 chunks
- assert.commandWorked(
- shardingTest.s0.adminCommand({split: coll.getFullName(), middle: {_id: 3}}));
- assert.commandWorked(
- shardingTest.s0.adminCommand({split: coll.getFullName(), middle: {_id: 6}}));
-
- // Migrate the middle chunk to another shard
- assert.commandWorked(shardingTest.s0.adminCommand({
- movechunk: coll.getFullName(),
- find: {_id: 5},
- to: shardingTest.getOther(shardingTest.getPrimaryShard(db.getName())).name
- }));
-
- function assertResultsEqual({actual, expected}) {
- const resultsAsString = " actual: " + tojson(actual) + "\n expected: " + tojson(expected);
- assert.eq(
- actual.length, expected.length, `different number of results:\n${resultsAsString}`);
- for (let i = 0; i < actual.length; i++) {
- assert.eq(
- actual[i], expected[i], `different results at index ${i}:\n${resultsAsString}`);
- }
+'use strict';
+
+const shardingTest = new ShardingTest({shards: 2});
+
+const db = shardingTest.getDB("test");
+const coll = db.sharded_agg_sort;
+coll.drop();
+
+assert.commandWorked(shardingTest.s0.adminCommand({enableSharding: db.getName()}));
+shardingTest.ensurePrimaryShard(db.getName(), shardingTest.shard1.shardName);
+assert.commandWorked(
+ shardingTest.s0.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+
+const nDocs = 10;
+const yValues = [
+ "abc",
+ "ABC",
+ null,
+ 1,
+ NumberLong(2),
+ NumberDecimal(-20),
+ MinKey,
+ MaxKey,
+ BinData(0, ""),
+ [3, 4],
+];
+const bulkOp = coll.initializeOrderedBulkOp();
+for (var i = 0; i < nDocs; ++i) {
+ bulkOp.insert({_id: i, x: Math.floor(i / 2), y: yValues[i]});
+}
+assert.writeOK(bulkOp.execute());
+
+// Split the data into 3 chunks
+assert.commandWorked(shardingTest.s0.adminCommand({split: coll.getFullName(), middle: {_id: 3}}));
+assert.commandWorked(shardingTest.s0.adminCommand({split: coll.getFullName(), middle: {_id: 6}}));
+
+// Migrate the middle chunk to another shard
+assert.commandWorked(shardingTest.s0.adminCommand({
+ movechunk: coll.getFullName(),
+ find: {_id: 5},
+ to: shardingTest.getOther(shardingTest.getPrimaryShard(db.getName())).name
+}));
+
+function assertResultsEqual({actual, expected}) {
+ const resultsAsString = " actual: " + tojson(actual) + "\n expected: " + tojson(expected);
+ assert.eq(actual.length, expected.length, `different number of results:\n${resultsAsString}`);
+ for (let i = 0; i < actual.length; i++) {
+ assert.eq(actual[i], expected[i], `different results at index ${i}:\n${resultsAsString}`);
}
+}
- function testSorts() {
- // Test a basic sort by _id.
- assertResultsEqual({
- actual: coll.aggregate([{$sort: {_id: 1}}]).toArray(),
- expected: [
- {_id: 0, x: 0, y: "abc"},
- {_id: 1, x: 0, y: "ABC"},
- {_id: 2, x: 1, y: null},
- {_id: 3, x: 1, y: 1},
- {_id: 4, x: 2, y: NumberLong(2)},
- {_id: 5, x: 2, y: NumberDecimal(-20)},
- {_id: 6, x: 3, y: MinKey},
- {_id: 7, x: 3, y: MaxKey},
- {_id: 8, x: 4, y: BinData(0, "")},
- {_id: 9, x: 4, y: [3, 4]},
- ],
- });
- assertResultsEqual({
- actual: coll.aggregate([{$sort: {_id: 1}}, {$project: {_id: 1}}]).toArray(),
- expected: new Array(nDocs).fill().map(function(_, index) {
- return {_id: index};
- }),
- });
-
- // Test a compound sort.
- assertResultsEqual({
- actual: coll.aggregate([{$sort: {x: 1, y: 1}}]).toArray(),
- expected: [
- {_id: 1, x: 0, y: "ABC"},
- {_id: 0, x: 0, y: "abc"},
- {_id: 2, x: 1, y: null},
- {_id: 3, x: 1, y: 1},
- {_id: 5, x: 2, y: NumberDecimal(-20)},
- {_id: 4, x: 2, y: NumberLong(2)},
- {_id: 6, x: 3, y: MinKey},
- {_id: 7, x: 3, y: MaxKey},
- {_id: 9, x: 4, y: [3, 4]},
- {_id: 8, x: 4, y: BinData(0, "")},
- ],
- });
- assertResultsEqual({
- actual:
- coll.aggregate([{$sort: {x: 1, y: 1}}, {$project: {_id: 0, x: 1, y: 1}}]).toArray(),
- expected: [
- {x: 0, y: "ABC"},
- {x: 0, y: "abc"},
- {x: 1, y: null},
- {x: 1, y: 1},
- {x: 2, y: NumberDecimal(-20)},
- {x: 2, y: NumberLong(2)},
- {x: 3, y: MinKey},
- {x: 3, y: MaxKey},
- {x: 4, y: [3, 4]},
- {x: 4, y: BinData(0, "")},
- ],
- });
-
- // Test a compound sort with a missing field.
- assertResultsEqual({
- actual: coll.aggregate({$sort: {missing: -1, x: 1, _id: -1}}).toArray(),
- expected: [
- {_id: 1, x: 0, y: "ABC"},
- {_id: 0, x: 0, y: "abc"},
- {_id: 3, x: 1, y: 1},
- {_id: 2, x: 1, y: null},
- {_id: 5, x: 2, y: NumberDecimal(-20)},
- {_id: 4, x: 2, y: NumberLong(2)},
- {_id: 7, x: 3, y: MaxKey},
- {_id: 6, x: 3, y: MinKey},
- {_id: 9, x: 4, y: [3, 4]},
- {_id: 8, x: 4, y: BinData(0, "")},
- ]
- });
- }
- testSorts();
- assert.commandWorked(coll.createIndex({x: 1}));
- testSorts();
- assert.commandWorked(coll.createIndex({x: 1, y: 1}));
- testSorts();
- assert.commandWorked(coll.createIndex({missing: 1, x: -1}));
- testSorts();
- assert.commandWorked(coll.createIndex({missing: -1, x: 1, _id: -1}));
- testSorts();
-
- // Test that a sort including the text score is merged properly in a sharded cluster.
- const textColl = db.sharded_agg_sort_text;
-
- assert.commandWorked(
- shardingTest.s0.adminCommand({shardCollection: textColl.getFullName(), key: {_id: 1}}));
-
- assert.writeOK(textColl.insert([
- {_id: 0, text: "apple"},
- {_id: 1, text: "apple orange banana apple"},
- {_id: 2, text: "apple orange"},
- {_id: 3, text: "apple orange banana apple apple banana"},
- {_id: 4, text: "apple orange banana"},
- {_id: 5, text: "apple orange banana apple apple"},
- ]));
-
- // Split the data into 3 chunks
- assert.commandWorked(
- shardingTest.s0.adminCommand({split: textColl.getFullName(), middle: {_id: 2}}));
- assert.commandWorked(
- shardingTest.s0.adminCommand({split: textColl.getFullName(), middle: {_id: 4}}));
-
- // Migrate the middle chunk to another shard
- assert.commandWorked(shardingTest.s0.adminCommand({
- movechunk: textColl.getFullName(),
- find: {_id: 3},
- to: shardingTest.getOther(shardingTest.getPrimaryShard(db.getName())).name
- }));
-
- assert.commandWorked(textColl.createIndex({text: "text"}));
+function testSorts() {
+ // Test a basic sort by _id.
assertResultsEqual({
- actual: textColl
- .aggregate([
- {$match: {$text: {$search: "apple banana orange"}}},
- {$sort: {x: {$meta: "textScore"}}}
- ])
- .toArray(),
+ actual: coll.aggregate([{$sort: {_id: 1}}]).toArray(),
expected: [
- {_id: 3, text: "apple orange banana apple apple banana"},
- {_id: 5, text: "apple orange banana apple apple"},
- {_id: 1, text: "apple orange banana apple"},
- {_id: 4, text: "apple orange banana"},
- {_id: 2, text: "apple orange"},
- {_id: 0, text: "apple"},
+ {_id: 0, x: 0, y: "abc"},
+ {_id: 1, x: 0, y: "ABC"},
+ {_id: 2, x: 1, y: null},
+ {_id: 3, x: 1, y: 1},
+ {_id: 4, x: 2, y: NumberLong(2)},
+ {_id: 5, x: 2, y: NumberDecimal(-20)},
+ {_id: 6, x: 3, y: MinKey},
+ {_id: 7, x: 3, y: MaxKey},
+ {_id: 8, x: 4, y: BinData(0, "")},
+ {_id: 9, x: 4, y: [3, 4]},
],
});
+ assertResultsEqual({
+ actual: coll.aggregate([{$sort: {_id: 1}}, {$project: {_id: 1}}]).toArray(),
+ expected: new Array(nDocs).fill().map(function(_, index) {
+ return {_id: index};
+ }),
+ });
- function assertSortedByMetaField(results) {
- for (let i = 0; i < results.length - 1; ++i) {
- assert(results[i].hasOwnProperty("meta"),
- `Expected all results to have "meta" field, found one without it at index ${i}`);
- assert.gte(
- results[i].meta,
- results[i + 1].meta,
- `Expected results to be sorted by "meta" field, descending. Detected unsorted` +
- ` results at index ${i}, entire result set: ${tojson(results)}`);
- }
- }
+ // Test a compound sort.
+ assertResultsEqual({
+ actual: coll.aggregate([{$sort: {x: 1, y: 1}}]).toArray(),
+ expected: [
+ {_id: 1, x: 0, y: "ABC"},
+ {_id: 0, x: 0, y: "abc"},
+ {_id: 2, x: 1, y: null},
+ {_id: 3, x: 1, y: 1},
+ {_id: 5, x: 2, y: NumberDecimal(-20)},
+ {_id: 4, x: 2, y: NumberLong(2)},
+ {_id: 6, x: 3, y: MinKey},
+ {_id: 7, x: 3, y: MaxKey},
+ {_id: 9, x: 4, y: [3, 4]},
+ {_id: 8, x: 4, y: BinData(0, "")},
+ ],
+ });
+ assertResultsEqual({
+ actual: coll.aggregate([{$sort: {x: 1, y: 1}}, {$project: {_id: 0, x: 1, y: 1}}]).toArray(),
+ expected: [
+ {x: 0, y: "ABC"},
+ {x: 0, y: "abc"},
+ {x: 1, y: null},
+ {x: 1, y: 1},
+ {x: 2, y: NumberDecimal(-20)},
+ {x: 2, y: NumberLong(2)},
+ {x: 3, y: MinKey},
+ {x: 3, y: MaxKey},
+ {x: 4, y: [3, 4]},
+ {x: 4, y: BinData(0, "")},
+ ],
+ });
- assertSortedByMetaField(textColl
- .aggregate([
- {$match: {$text: {$search: "apple banana orange"}}},
- {$sort: {x: {$meta: "textScore"}}},
- {$project: {_id: 0, meta: {$meta: "textScore"}}},
- ])
- .toArray());
-
- assertSortedByMetaField(textColl
- .aggregate([
- {$match: {$text: {$search: "apple banana orange"}}},
- {$project: {_id: 0, meta: {$meta: "textScore"}}},
- {$sort: {meta: -1}},
- ])
- .toArray());
-
- assertSortedByMetaField(textColl
- .aggregate([
- {$sample: {size: 10}},
- {$project: {_id: 0, meta: {$meta: "randVal"}}},
- ])
- .toArray());
-
- shardingTest.stop();
+ // Test a compound sort with a missing field.
+ assertResultsEqual({
+ actual: coll.aggregate({$sort: {missing: -1, x: 1, _id: -1}}).toArray(),
+ expected: [
+ {_id: 1, x: 0, y: "ABC"},
+ {_id: 0, x: 0, y: "abc"},
+ {_id: 3, x: 1, y: 1},
+ {_id: 2, x: 1, y: null},
+ {_id: 5, x: 2, y: NumberDecimal(-20)},
+ {_id: 4, x: 2, y: NumberLong(2)},
+ {_id: 7, x: 3, y: MaxKey},
+ {_id: 6, x: 3, y: MinKey},
+ {_id: 9, x: 4, y: [3, 4]},
+ {_id: 8, x: 4, y: BinData(0, "")},
+ ]
+ });
+}
+testSorts();
+assert.commandWorked(coll.createIndex({x: 1}));
+testSorts();
+assert.commandWorked(coll.createIndex({x: 1, y: 1}));
+testSorts();
+assert.commandWorked(coll.createIndex({missing: 1, x: -1}));
+testSorts();
+assert.commandWorked(coll.createIndex({missing: -1, x: 1, _id: -1}));
+testSorts();
+
+// Test that a sort including the text score is merged properly in a sharded cluster.
+const textColl = db.sharded_agg_sort_text;
+
+assert.commandWorked(
+ shardingTest.s0.adminCommand({shardCollection: textColl.getFullName(), key: {_id: 1}}));
+
+assert.writeOK(textColl.insert([
+ {_id: 0, text: "apple"},
+ {_id: 1, text: "apple orange banana apple"},
+ {_id: 2, text: "apple orange"},
+ {_id: 3, text: "apple orange banana apple apple banana"},
+ {_id: 4, text: "apple orange banana"},
+ {_id: 5, text: "apple orange banana apple apple"},
+]));
+
+// Split the data into 3 chunks
+assert.commandWorked(
+ shardingTest.s0.adminCommand({split: textColl.getFullName(), middle: {_id: 2}}));
+assert.commandWorked(
+ shardingTest.s0.adminCommand({split: textColl.getFullName(), middle: {_id: 4}}));
+
+// Migrate the middle chunk to another shard
+assert.commandWorked(shardingTest.s0.adminCommand({
+ movechunk: textColl.getFullName(),
+ find: {_id: 3},
+ to: shardingTest.getOther(shardingTest.getPrimaryShard(db.getName())).name
+}));
+
+assert.commandWorked(textColl.createIndex({text: "text"}));
+assertResultsEqual({
+ actual: textColl
+ .aggregate([
+ {$match: {$text: {$search: "apple banana orange"}}},
+ {$sort: {x: {$meta: "textScore"}}}
+ ])
+ .toArray(),
+ expected: [
+ {_id: 3, text: "apple orange banana apple apple banana"},
+ {_id: 5, text: "apple orange banana apple apple"},
+ {_id: 1, text: "apple orange banana apple"},
+ {_id: 4, text: "apple orange banana"},
+ {_id: 2, text: "apple orange"},
+ {_id: 0, text: "apple"},
+ ],
+});
+
+function assertSortedByMetaField(results) {
+ for (let i = 0; i < results.length - 1; ++i) {
+ assert(results[i].hasOwnProperty("meta"),
+ `Expected all results to have "meta" field, found one without it at index ${i}`);
+ assert.gte(results[i].meta,
+ results[i + 1].meta,
+ `Expected results to be sorted by "meta" field, descending. Detected unsorted` +
+ ` results at index ${i}, entire result set: ${tojson(results)}`);
+ }
+}
+
+assertSortedByMetaField(textColl
+ .aggregate([
+ {$match: {$text: {$search: "apple banana orange"}}},
+ {$sort: {x: {$meta: "textScore"}}},
+ {$project: {_id: 0, meta: {$meta: "textScore"}}},
+ ])
+ .toArray());
+
+assertSortedByMetaField(textColl
+ .aggregate([
+ {$match: {$text: {$search: "apple banana orange"}}},
+ {$project: {_id: 0, meta: {$meta: "textScore"}}},
+ {$sort: {meta: -1}},
+ ])
+ .toArray());
+
+assertSortedByMetaField(textColl
+ .aggregate([
+ {$sample: {size: 10}},
+ {$project: {_id: 0, meta: {$meta: "randVal"}}},
+ ])
+ .toArray());
+
+shardingTest.stop();
})();
diff --git a/jstests/sharding/agg_write_stages_cannot_run_on_mongos.js b/jstests/sharding/agg_write_stages_cannot_run_on_mongos.js
index 740eade7b12..05a48adf3eb 100644
--- a/jstests/sharding/agg_write_stages_cannot_run_on_mongos.js
+++ b/jstests/sharding/agg_write_stages_cannot_run_on_mongos.js
@@ -1,46 +1,43 @@
// Tests that special stages which must run on mongos cannot be run in combination with an $out or
// $merge stage.
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
- const db = st.s0.getDB("db");
- const admin = st.s0.getDB("admin");
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
+const db = st.s0.getDB("db");
+const admin = st.s0.getDB("admin");
- // Create a collection in the db to get around optimizations that will do nothing in lieu of
- // failing when the db is empty.
- assert.commandWorked(db.runCommand({create: "coll"}));
+// Create a collection in the db to get around optimizations that will do nothing in lieu of
+// failing when the db is empty.
+assert.commandWorked(db.runCommand({create: "coll"}));
- // These should fail because the initial stages require mongos execution and $out/$merge
- // requires shard execution.
- assert.commandFailedWithCode(
- db.runCommand(
- {aggregate: 1, pipeline: [{$listLocalSessions: {}}, {$out: "test"}], cursor: {}}),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(
- admin.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: {localOps: true}}, {$out: "test"}], cursor: {}}),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(
- db.runCommand({aggregate: 1, pipeline: [{$changeStream: {}}, {$out: "test"}], cursor: {}}),
- ErrorCodes.IllegalOperation);
+// These should fail because the initial stages require mongos execution and $out/$merge
+// requires shard execution.
+assert.commandFailedWithCode(
+ db.runCommand({aggregate: 1, pipeline: [{$listLocalSessions: {}}, {$out: "test"}], cursor: {}}),
+ ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(
+ admin.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: {localOps: true}}, {$out: "test"}], cursor: {}}),
+ ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(
+ db.runCommand({aggregate: 1, pipeline: [{$changeStream: {}}, {$out: "test"}], cursor: {}}),
+ ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(db.runCommand({
- aggregate: 1,
- pipeline: [{$listLocalSessions: {}}, {$merge: {into: "test"}}],
- cursor: {}
- }),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(admin.runCommand({
- aggregate: 1,
- pipeline: [{$currentOp: {localOps: true}}, {$merge: {into: "test"}}],
- cursor: {}
- }),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(
- db.runCommand(
- {aggregate: 1, pipeline: [{$changeStream: {}}, {$merge: {into: "test"}}], cursor: {}}),
- ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(
+ db.runCommand(
+ {aggregate: 1, pipeline: [{$listLocalSessions: {}}, {$merge: {into: "test"}}], cursor: {}}),
+ ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(admin.runCommand({
+ aggregate: 1,
+ pipeline: [{$currentOp: {localOps: true}}, {$merge: {into: "test"}}],
+ cursor: {}
+}),
+ ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(
+ db.runCommand(
+ {aggregate: 1, pipeline: [{$changeStream: {}}, {$merge: {into: "test"}}], cursor: {}}),
+ ErrorCodes.IllegalOperation);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/aggregates_during_balancing.js b/jstests/sharding/aggregates_during_balancing.js
index 149f6031583..8adad032ce3 100644
--- a/jstests/sharding/aggregates_during_balancing.js
+++ b/jstests/sharding/aggregates_during_balancing.js
@@ -1,251 +1,246 @@
// Inserts some interesting data into a sharded collection, enables the balancer, and tests that
// various kinds of aggregations return the expected results.
(function() {
- load('jstests/aggregation/extras/utils.js');
-
- var shardedAggTest =
- new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-
- shardedAggTest.adminCommand({enablesharding: "aggShard"});
- db = shardedAggTest.getDB("aggShard");
- shardedAggTest.ensurePrimaryShard('aggShard', shardedAggTest.shard0.shardName);
-
- db.ts1.drop();
- db.literal.drop();
-
- shardedAggTest.adminCommand({shardcollection: "aggShard.ts1", key: {"_id": 1}});
- shardedAggTest.adminCommand({shardcollection: "aggShard.literal", key: {"_id": 1}});
-
- /*
- Test combining results in mongos for operations that sub-aggregate on shards.
-
- The unusual operators here are $avg, $pushToSet, $push. In the case of $avg,
- the shard pipeline produces an object with the current subtotal and item count
- so that these can be combined in mongos by totalling the subtotals counts
- before performing the final division. For $pushToSet and $push, the shard
- pipelines produce arrays, but in mongos these are combined rather than simply
- being added as arrays within arrays.
- */
-
- var count = 0;
- var strings = [
- "one", "two", "three", "four", "five", "six", "seven",
- "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen",
- "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", "twenty"
- ];
-
- jsTestLog("Bulk inserting data");
- var nItems = 200000;
- var bulk = db.ts1.initializeUnorderedBulkOp();
- for (i = 0; i < nItems; ++i) {
- bulk.insert({
- _id: i,
- counter: ++count,
- number: strings[i % 20],
- random: Math.random(),
- filler: "0123456789012345678901234567890123456789"
- });
- }
- assert.writeOK(bulk.execute());
-
- jsTestLog('a project and group in shards, result combined in mongos');
- var a1 = db.ts1
- .aggregate([
- {$project: {cMod10: {$mod: ["$counter", 10]}, number: 1, counter: 1}},
- {
- $group: {
- _id: "$cMod10",
- numberSet: {$addToSet: "$number"},
- avgCounter: {$avg: "$cMod10"}
- }
- },
- {$sort: {_id: 1}}
- ])
- .toArray();
-
- for (i = 0; i < 10; ++i) {
- assert.eq(a1[i].avgCounter, a1[i]._id, 'agg sharded test avgCounter failed');
- assert.eq(a1[i].numberSet.length, 2, 'agg sharded test numberSet length failed');
- }
-
- jsTestLog('an initial group starts the group in the shards, and combines them in mongos');
- var a2 = db.ts1.aggregate([{$group: {_id: "all", total: {$sum: "$counter"}}}]).toArray();
-
- jsTestLog('sum of an arithmetic progression S(n) = (n/2)(a(1) + a(n));');
- assert.eq(a2[0].total, (nItems / 2) * (1 + nItems), 'agg sharded test counter sum failed');
-
- jsTestLog('A group combining all documents into one, averaging a null field.');
- assert.eq(db.ts1.aggregate([{$group: {_id: null, avg: {$avg: "$missing"}}}]).toArray(),
- [{_id: null, avg: null}]);
-
- jsTestLog('an initial group starts the group in the shards, and combines them in mongos');
- var a3 = db.ts1.aggregate([{$group: {_id: "$number", total: {$sum: 1}}}, {$sort: {_id: 1}}])
- .toArray();
-
- for (i = 0; i < strings.length; ++i) {
- assert.eq(a3[i].total, nItems / strings.length, 'agg sharded test sum numbers failed');
- }
-
- jsTestLog('a match takes place in the shards; just returning the results from mongos');
- var a4 = db.ts1
- .aggregate([{
- $match: {
- $or: [
- {counter: 55},
- {counter: 1111},
- {counter: 2222},
- {counter: 33333},
- {counter: 99999},
- {counter: 55555}
- ]
+load('jstests/aggregation/extras/utils.js');
+
+var shardedAggTest =
+ new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
+
+shardedAggTest.adminCommand({enablesharding: "aggShard"});
+db = shardedAggTest.getDB("aggShard");
+shardedAggTest.ensurePrimaryShard('aggShard', shardedAggTest.shard0.shardName);
+
+db.ts1.drop();
+db.literal.drop();
+
+shardedAggTest.adminCommand({shardcollection: "aggShard.ts1", key: {"_id": 1}});
+shardedAggTest.adminCommand({shardcollection: "aggShard.literal", key: {"_id": 1}});
+
+/*
+Test combining results in mongos for operations that sub-aggregate on shards.
+
+The unusual operators here are $avg, $pushToSet, $push. In the case of $avg,
+the shard pipeline produces an object with the current subtotal and item count
+so that these can be combined in mongos by totalling the subtotals counts
+before performing the final division. For $pushToSet and $push, the shard
+pipelines produce arrays, but in mongos these are combined rather than simply
+being added as arrays within arrays.
+*/
+
+var count = 0;
+var strings = [
+ "one", "two", "three", "four", "five", "six", "seven",
+ "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen",
+ "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", "twenty"
+];
+
+jsTestLog("Bulk inserting data");
+var nItems = 200000;
+var bulk = db.ts1.initializeUnorderedBulkOp();
+for (i = 0; i < nItems; ++i) {
+ bulk.insert({
+ _id: i,
+ counter: ++count,
+ number: strings[i % 20],
+ random: Math.random(),
+ filler: "0123456789012345678901234567890123456789"
+ });
+}
+assert.writeOK(bulk.execute());
+
+jsTestLog('a project and group in shards, result combined in mongos');
+var a1 = db.ts1
+ .aggregate([
+ {$project: {cMod10: {$mod: ["$counter", 10]}, number: 1, counter: 1}},
+ {
+ $group: {
+ _id: "$cMod10",
+ numberSet: {$addToSet: "$number"},
+ avgCounter: {$avg: "$cMod10"}
}
- }])
- .toArray();
- assert.eq(a4.length, 6, tojson(a4));
- for (i = 0; i < 6; ++i) {
- c = a4[i].counter;
- printjson({c: c});
- assert(
- (c == 55) || (c == 1111) || (c == 2222) || (c == 33333) || (c == 99999) || (c == 55555),
- 'agg sharded test simple match failed');
- }
-
- function testSkipLimit(ops, expectedCount) {
- jsTestLog('testSkipLimit(' + tojson(ops) + ', ' + expectedCount + ')');
- if (expectedCount > 10) {
- // make shard -> mongos intermediate results less than 16MB
- ops.unshift({$project: {_id: 1}});
- }
-
- ops.push({$group: {_id: 1, count: {$sum: 1}}});
-
- var out = db.ts1.aggregate(ops).toArray();
- assert.eq(out[0].count, expectedCount);
- }
-
- testSkipLimit([], nItems); // control
- testSkipLimit([{$skip: 10}], nItems - 10);
- testSkipLimit([{$limit: 10}], 10);
- testSkipLimit([{$skip: 5}, {$limit: 10}], 10);
- testSkipLimit([{$limit: 10}, {$skip: 5}], 10 - 5);
- testSkipLimit([{$skip: 5}, {$skip: 3}, {$limit: 10}], 10);
- testSkipLimit([{$skip: 5}, {$limit: 10}, {$skip: 3}], 10 - 3);
- testSkipLimit([{$limit: 10}, {$skip: 5}, {$skip: 3}], 10 - 3 - 5);
-
- // test sort + limit (using random to pull from both shards)
- function testSortLimit(limit, direction) {
- jsTestLog('testSortLimit(' + limit + ', ' + direction + ')');
- var from_cursor =
- db.ts1.find({}, {random: 1, _id: 0}).sort({random: direction}).limit(limit).toArray();
- var from_agg = db.ts1
- .aggregate([
- {$project: {random: 1, _id: 0}},
- {$sort: {random: direction}},
- {$limit: limit}
- ])
- .toArray();
- assert.eq(from_cursor, from_agg);
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray();
+
+for (i = 0; i < 10; ++i) {
+ assert.eq(a1[i].avgCounter, a1[i]._id, 'agg sharded test avgCounter failed');
+ assert.eq(a1[i].numberSet.length, 2, 'agg sharded test numberSet length failed');
+}
+
+jsTestLog('an initial group starts the group in the shards, and combines them in mongos');
+var a2 = db.ts1.aggregate([{$group: {_id: "all", total: {$sum: "$counter"}}}]).toArray();
+
+jsTestLog('sum of an arithmetic progression S(n) = (n/2)(a(1) + a(n));');
+assert.eq(a2[0].total, (nItems / 2) * (1 + nItems), 'agg sharded test counter sum failed');
+
+jsTestLog('A group combining all documents into one, averaging a null field.');
+assert.eq(db.ts1.aggregate([{$group: {_id: null, avg: {$avg: "$missing"}}}]).toArray(),
+ [{_id: null, avg: null}]);
+
+jsTestLog('an initial group starts the group in the shards, and combines them in mongos');
+var a3 =
+ db.ts1.aggregate([{$group: {_id: "$number", total: {$sum: 1}}}, {$sort: {_id: 1}}]).toArray();
+
+for (i = 0; i < strings.length; ++i) {
+ assert.eq(a3[i].total, nItems / strings.length, 'agg sharded test sum numbers failed');
+}
+
+jsTestLog('a match takes place in the shards; just returning the results from mongos');
+var a4 = db.ts1
+ .aggregate([{
+ $match: {
+ $or: [
+ {counter: 55},
+ {counter: 1111},
+ {counter: 2222},
+ {counter: 33333},
+ {counter: 99999},
+ {counter: 55555}
+ ]
+ }
+ }])
+ .toArray();
+assert.eq(a4.length, 6, tojson(a4));
+for (i = 0; i < 6; ++i) {
+ c = a4[i].counter;
+ printjson({c: c});
+ assert((c == 55) || (c == 1111) || (c == 2222) || (c == 33333) || (c == 99999) || (c == 55555),
+ 'agg sharded test simple match failed');
+}
+
+function testSkipLimit(ops, expectedCount) {
+ jsTestLog('testSkipLimit(' + tojson(ops) + ', ' + expectedCount + ')');
+ if (expectedCount > 10) {
+ // make shard -> mongos intermediate results less than 16MB
+ ops.unshift({$project: {_id: 1}});
}
- testSortLimit(1, 1);
- testSortLimit(1, -1);
- testSortLimit(10, 1);
- testSortLimit(10, -1);
- testSortLimit(100, 1);
- testSortLimit(100, -1);
-
- function testAvgStdDev() {
- jsTestLog('testing $avg and $stdDevPop in sharded $group');
- // $stdDevPop can vary slightly between runs if a migration occurs. This is why we use
- // assert.close below.
- var res = db.ts1
- .aggregate([{
- $group: {
- _id: null,
- avg: {$avg: '$counter'},
- stdDevPop: {$stdDevPop: '$counter'},
- }
- }])
- .toArray();
- // http://en.wikipedia.org/wiki/Arithmetic_progression#Sum
- var avg = (1 + nItems) / 2;
- assert.close(res[0].avg, avg, '', 10 /*decimal places*/);
-
- // http://en.wikipedia.org/wiki/Arithmetic_progression#Standard_deviation
- var stdDev = Math.sqrt(((nItems - 1) * (nItems + 1)) / 12);
- assert.close(res[0].stdDevPop, stdDev, '', 10 /*decimal places*/);
- }
- testAvgStdDev();
-
- function testSample() {
- jsTestLog('testing $sample');
- [0, 1, 10, nItems, nItems + 1].forEach(function(size) {
- var res = db.ts1.aggregate([{$sample: {size: size}}]).toArray();
- assert.eq(res.length, Math.min(nItems, size));
- });
- }
-
- testSample();
- jsTestLog('test $out by copying source collection verbatim to output');
- var outCollection = db.ts1_out;
- var res = db.ts1.aggregate([{$out: outCollection.getName()}]).toArray();
- assert.eq(db.ts1.find().itcount(), outCollection.find().itcount());
- assert.eq(db.ts1.find().sort({_id: 1}).toArray(),
- outCollection.find().sort({_id: 1}).toArray());
-
- // Make sure we error out if $out collection is sharded
- assert.commandFailed(
- db.runCommand({aggregate: outCollection.getName(), pipeline: [{$out: db.ts1.getName()}]}));
-
- assert.writeOK(db.literal.save({dollar: false}));
-
- result =
- db.literal
- .aggregate([{
- $project:
- {_id: 0, cost: {$cond: ['$dollar', {$literal: '$1.00'}, {$literal: '$.99'}]}}
- }])
+ ops.push({$group: {_id: 1, count: {$sum: 1}}});
+
+ var out = db.ts1.aggregate(ops).toArray();
+ assert.eq(out[0].count, expectedCount);
+}
+
+testSkipLimit([], nItems); // control
+testSkipLimit([{$skip: 10}], nItems - 10);
+testSkipLimit([{$limit: 10}], 10);
+testSkipLimit([{$skip: 5}, {$limit: 10}], 10);
+testSkipLimit([{$limit: 10}, {$skip: 5}], 10 - 5);
+testSkipLimit([{$skip: 5}, {$skip: 3}, {$limit: 10}], 10);
+testSkipLimit([{$skip: 5}, {$limit: 10}, {$skip: 3}], 10 - 3);
+testSkipLimit([{$limit: 10}, {$skip: 5}, {$skip: 3}], 10 - 3 - 5);
+
+// test sort + limit (using random to pull from both shards)
+function testSortLimit(limit, direction) {
+ jsTestLog('testSortLimit(' + limit + ', ' + direction + ')');
+ var from_cursor =
+ db.ts1.find({}, {random: 1, _id: 0}).sort({random: direction}).limit(limit).toArray();
+ var from_agg =
+ db.ts1
+ .aggregate(
+ [{$project: {random: 1, _id: 0}}, {$sort: {random: direction}}, {$limit: limit}])
.toArray();
+ assert.eq(from_cursor, from_agg);
+}
+testSortLimit(1, 1);
+testSortLimit(1, -1);
+testSortLimit(10, 1);
+testSortLimit(10, -1);
+testSortLimit(100, 1);
+testSortLimit(100, -1);
+
+function testAvgStdDev() {
+ jsTestLog('testing $avg and $stdDevPop in sharded $group');
+ // $stdDevPop can vary slightly between runs if a migration occurs. This is why we use
+ // assert.close below.
+ var res = db.ts1
+ .aggregate([{
+ $group: {
+ _id: null,
+ avg: {$avg: '$counter'},
+ stdDevPop: {$stdDevPop: '$counter'},
+ }
+ }])
+ .toArray();
+ // http://en.wikipedia.org/wiki/Arithmetic_progression#Sum
+ var avg = (1 + nItems) / 2;
+ assert.close(res[0].avg, avg, '', 10 /*decimal places*/);
+
+ // http://en.wikipedia.org/wiki/Arithmetic_progression#Standard_deviation
+ var stdDev = Math.sqrt(((nItems - 1) * (nItems + 1)) / 12);
+ assert.close(res[0].stdDevPop, stdDev, '', 10 /*decimal places*/);
+}
+testAvgStdDev();
+
+function testSample() {
+ jsTestLog('testing $sample');
+ [0, 1, 10, nItems, nItems + 1].forEach(function(size) {
+ var res = db.ts1.aggregate([{$sample: {size: size}}]).toArray();
+ assert.eq(res.length, Math.min(nItems, size));
+ });
+}
+
+testSample();
+
+jsTestLog('test $out by copying source collection verbatim to output');
+var outCollection = db.ts1_out;
+var res = db.ts1.aggregate([{$out: outCollection.getName()}]).toArray();
+assert.eq(db.ts1.find().itcount(), outCollection.find().itcount());
+assert.eq(db.ts1.find().sort({_id: 1}).toArray(), outCollection.find().sort({_id: 1}).toArray());
+
+// Make sure we error out if $out collection is sharded
+assert.commandFailed(
+ db.runCommand({aggregate: outCollection.getName(), pipeline: [{$out: db.ts1.getName()}]}));
+
+assert.writeOK(db.literal.save({dollar: false}));
+
+result =
+ db.literal
+ .aggregate([{
+ $project: {_id: 0, cost: {$cond: ['$dollar', {$literal: '$1.00'}, {$literal: '$.99'}]}}
+ }])
+ .toArray();
+
+assert.eq([{cost: '$.99'}], result);
+
+(function() {
+jsTestLog('Testing a $match stage on the shard key.');
+
+var outCollection = 'testShardKeyMatchOut';
+
+// Point query.
+var targetId = Math.floor(nItems * Math.random());
+var pipeline = [{$match: {_id: targetId}}, {$project: {_id: 1}}, {$sort: {_id: 1}}];
+var expectedDocs = [{_id: targetId}];
+// Normal pipeline.
+assert.eq(db.ts1.aggregate(pipeline).toArray(), expectedDocs);
+// With $out.
+db[outCollection].drop();
+pipeline.push({$out: outCollection});
+db.ts1.aggregate(pipeline);
+assert.eq(db[outCollection].find().toArray(), expectedDocs);
+
+// Range query.
+var range = 500;
+var targetStart = Math.floor((nItems - range) * Math.random());
+pipeline = [
+ {$match: {_id: {$gte: targetStart, $lt: targetStart + range}}},
+ {$project: {_id: 1}},
+ {$sort: {_id: 1}}
+];
+expectedDocs = [];
+for (var i = targetStart; i < targetStart + range; i++) {
+ expectedDocs.push({_id: i});
+}
+// Normal pipeline.
+assert.eq(db.ts1.aggregate(pipeline).toArray(), expectedDocs);
+// With $out.
+db[outCollection].drop();
+pipeline.push({$out: outCollection});
+db.ts1.aggregate(pipeline);
+assert.eq(db[outCollection].find().toArray(), expectedDocs);
+}());
- assert.eq([{cost: '$.99'}], result);
-
- (function() {
- jsTestLog('Testing a $match stage on the shard key.');
-
- var outCollection = 'testShardKeyMatchOut';
-
- // Point query.
- var targetId = Math.floor(nItems * Math.random());
- var pipeline = [{$match: {_id: targetId}}, {$project: {_id: 1}}, {$sort: {_id: 1}}];
- var expectedDocs = [{_id: targetId}];
- // Normal pipeline.
- assert.eq(db.ts1.aggregate(pipeline).toArray(), expectedDocs);
- // With $out.
- db[outCollection].drop();
- pipeline.push({$out: outCollection});
- db.ts1.aggregate(pipeline);
- assert.eq(db[outCollection].find().toArray(), expectedDocs);
-
- // Range query.
- var range = 500;
- var targetStart = Math.floor((nItems - range) * Math.random());
- pipeline = [
- {$match: {_id: {$gte: targetStart, $lt: targetStart + range}}},
- {$project: {_id: 1}},
- {$sort: {_id: 1}}
- ];
- expectedDocs = [];
- for (var i = targetStart; i < targetStart + range; i++) {
- expectedDocs.push({_id: i});
- }
- // Normal pipeline.
- assert.eq(db.ts1.aggregate(pipeline).toArray(), expectedDocs);
- // With $out.
- db[outCollection].drop();
- pipeline.push({$out: outCollection});
- db.ts1.aggregate(pipeline);
- assert.eq(db[outCollection].find().toArray(), expectedDocs);
- }());
-
- shardedAggTest.stop();
+shardedAggTest.stop();
}());
diff --git a/jstests/sharding/aggregation_currentop.js b/jstests/sharding/aggregation_currentop.js
index 5e7ed32f09a..4973b4f2d3f 100644
--- a/jstests/sharding/aggregation_currentop.js
+++ b/jstests/sharding/aggregation_currentop.js
@@ -20,906 +20,883 @@
TestData.skipAwaitingReplicationOnShardsBeforeCheckingUUIDs = true;
(function() {
- "use strict";
-
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- load("jstests/libs/namespace_utils.js"); // For getCollectionNameFromFullNamespace.
-
- // Replica set nodes started with --shardsvr do not enable key generation until they are added
- // to a sharded cluster and reject commands with gossiped clusterTime from users without the
- // advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
- // briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
- // gossip that time later in setup.
- //
- // TODO SERVER-32672: remove this flag.
- TestData.skipGossipingClusterTime = true;
+"use strict";
+
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/namespace_utils.js"); // For getCollectionNameFromFullNamespace.
+
+// Replica set nodes started with --shardsvr do not enable key generation until they are added
+// to a sharded cluster and reject commands with gossiped clusterTime from users without the
+// advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
+// briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
+// gossip that time later in setup.
+//
+// TODO SERVER-32672: remove this flag.
+TestData.skipGossipingClusterTime = true;
+
+const key = "jstests/libs/key1";
+
+// Parameters used to establish the sharded cluster.
+const stParams = {
+ name: jsTestName(),
+ keyFile: key,
+ shards: 3,
+ rs: {nodes: 1, setParameter: {internalQueryExecYieldIterations: 1}}
+};
+
+// Create a new sharded cluster for testing. We set the internalQueryExecYieldIterations
+// parameter so that plan execution yields on every iteration. For some tests, we will
+// temporarily set yields to hang the mongod so we can capture particular operations in the
+// currentOp output.
+const st = new ShardingTest(stParams);
+
+// Assign various elements of the cluster. We will use shard rs0 to test replica-set level
+// $currentOp behaviour.
+let shardConn = st.rs0.getPrimary();
+let mongosConn = st.s;
+let shardRS = st.rs0;
+
+let clusterTestDB = mongosConn.getDB(jsTestName());
+let clusterAdminDB = mongosConn.getDB("admin");
+shardConn.waitForClusterTime(60);
+let shardTestDB = shardConn.getDB(jsTestName());
+let shardAdminDB = shardConn.getDB("admin");
+
+function createUsers(conn) {
+ let adminDB = conn.getDB("admin");
+
+ // Create an admin user, one user with the inprog privilege, and one without.
+ assert.commandWorked(adminDB.runCommand({createUser: "admin", pwd: "pwd", roles: ["root"]}));
+ assert(adminDB.auth("admin", "pwd"));
+
+ assert.commandWorked(adminDB.runCommand({
+ createRole: "role_inprog",
+ roles: [],
+ privileges: [{resource: {cluster: true}, actions: ["inprog"]}]
+ }));
- const key = "jstests/libs/key1";
+ assert.commandWorked(adminDB.runCommand(
+ {createUser: "user_inprog", pwd: "pwd", roles: ["readWriteAnyDatabase", "role_inprog"]}));
- // Parameters used to establish the sharded cluster.
- const stParams = {
- name: jsTestName(),
- keyFile: key,
- shards: 3,
- rs: {nodes: 1, setParameter: {internalQueryExecYieldIterations: 1}}
- };
+ assert.commandWorked(adminDB.runCommand(
+ {createUser: "user_no_inprog", pwd: "pwd", roles: ["readWriteAnyDatabase"]}));
+}
- // Create a new sharded cluster for testing. We set the internalQueryExecYieldIterations
- // parameter so that plan execution yields on every iteration. For some tests, we will
- // temporarily set yields to hang the mongod so we can capture particular operations in the
- // currentOp output.
- const st = new ShardingTest(stParams);
+// Create necessary users at both cluster and shard-local level.
+createUsers(shardConn);
+createUsers(mongosConn);
- // Assign various elements of the cluster. We will use shard rs0 to test replica-set level
- // $currentOp behaviour.
- let shardConn = st.rs0.getPrimary();
- let mongosConn = st.s;
- let shardRS = st.rs0;
+// Create a test database and some dummy data on rs0.
+assert(clusterAdminDB.auth("admin", "pwd"));
- let clusterTestDB = mongosConn.getDB(jsTestName());
- let clusterAdminDB = mongosConn.getDB("admin");
- shardConn.waitForClusterTime(60);
- let shardTestDB = shardConn.getDB(jsTestName());
- let shardAdminDB = shardConn.getDB("admin");
+for (let i = 0; i < 5; i++) {
+ assert.writeOK(clusterTestDB.test.insert({_id: i, a: i}));
+}
- function createUsers(conn) {
- let adminDB = conn.getDB("admin");
+st.ensurePrimaryShard(clusterTestDB.getName(), shardRS.name);
- // Create an admin user, one user with the inprog privilege, and one without.
- assert.commandWorked(
- adminDB.runCommand({createUser: "admin", pwd: "pwd", roles: ["root"]}));
- assert(adminDB.auth("admin", "pwd"));
-
- assert.commandWorked(adminDB.runCommand({
- createRole: "role_inprog",
- roles: [],
- privileges: [{resource: {cluster: true}, actions: ["inprog"]}]
- }));
-
- assert.commandWorked(adminDB.runCommand({
- createUser: "user_inprog",
- pwd: "pwd",
- roles: ["readWriteAnyDatabase", "role_inprog"]
- }));
-
- assert.commandWorked(adminDB.runCommand(
- {createUser: "user_no_inprog", pwd: "pwd", roles: ["readWriteAnyDatabase"]}));
+// Restarts a replset with a different set of parameters. Explicitly set the keyFile to null,
+// since if ReplSetTest#stopSet sees a keyFile property, it attempts to auth before dbhash
+// checks.
+function restartReplSet(replSet, newOpts) {
+ const numNodes = replSet.nodeList().length;
+ for (let n = 0; n < numNodes; n++) {
+ replSet.restart(n, newOpts);
}
+ replSet.keyFile = newOpts.keyFile;
+ return replSet.getPrimary();
+}
+// Restarts a cluster with a different set of parameters.
+function restartCluster(st, newOpts) {
+ restartReplSet(st.configRS, newOpts);
+ for (let i = 0; i < stParams.shards; i++) {
+ restartReplSet(st[`rs${i}`], newOpts);
+ }
+ st.restartMongos(0, Object.assign(newOpts, {restart: true}));
+ st.keyFile = newOpts.keyFile;
+ // Re-link the cluster components.
+ shardConn = st.rs0.getPrimary();
+ mongosConn = st.s;
+ shardRS = st.rs0;
+ clusterTestDB = mongosConn.getDB(jsTestName());
+ clusterAdminDB = mongosConn.getDB("admin");
+ shardTestDB = shardConn.getDB(jsTestName());
+ shardAdminDB = shardConn.getDB("admin");
+}
- // Create necessary users at both cluster and shard-local level.
- createUsers(shardConn);
- createUsers(mongosConn);
+function runCommandOnAllPrimaries({dbName, cmdObj, username, password}) {
+ for (let i = 0; i < stParams.shards; i++) {
+ const rsAdminDB = st[`rs${i}`].getPrimary().getDB("admin");
+ rsAdminDB.auth(username, password);
+ assert.commandWorked(rsAdminDB.getSiblingDB(dbName).runCommand(cmdObj));
+ }
+}
+
+// Functions to support running an operation in a parallel shell for testing allUsers behaviour.
+function runInParallelShell({conn, testfunc, username, password}) {
+ TestData.aggCurOpTest = testfunc;
+ TestData.aggCurOpUser = username;
+ TestData.aggCurOpPwd = password;
+
+ runCommandOnAllPrimaries({
+ dbName: "admin",
+ username: username,
+ password: password,
+ cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "alwaysOn"}
+ });
+
+ testfunc = function() {
+ db.getSiblingDB("admin").auth(TestData.aggCurOpUser, TestData.aggCurOpPwd);
+ TestData.aggCurOpTest();
+ db.getSiblingDB("admin").logout();
+ };
- // Create a test database and some dummy data on rs0.
- assert(clusterAdminDB.auth("admin", "pwd"));
+ return startParallelShell(testfunc, conn.port);
+}
- for (let i = 0; i < 5; i++) {
- assert.writeOK(clusterTestDB.test.insert({_id: i, a: i}));
- }
+function assertCurrentOpHasSingleMatchingEntry({conn, currentOpAggFilter, curOpSpec}) {
+ curOpSpec = (curOpSpec || {allUsers: true});
- st.ensurePrimaryShard(clusterTestDB.getName(), shardRS.name);
+ const connAdminDB = conn.getDB("admin");
- // Restarts a replset with a different set of parameters. Explicitly set the keyFile to null,
- // since if ReplSetTest#stopSet sees a keyFile property, it attempts to auth before dbhash
- // checks.
- function restartReplSet(replSet, newOpts) {
- const numNodes = replSet.nodeList().length;
- for (let n = 0; n < numNodes; n++) {
- replSet.restart(n, newOpts);
- }
- replSet.keyFile = newOpts.keyFile;
- return replSet.getPrimary();
- }
- // Restarts a cluster with a different set of parameters.
- function restartCluster(st, newOpts) {
- restartReplSet(st.configRS, newOpts);
- for (let i = 0; i < stParams.shards; i++) {
- restartReplSet(st[`rs${i}`], newOpts);
- }
- st.restartMongos(0, Object.assign(newOpts, {restart: true}));
- st.keyFile = newOpts.keyFile;
- // Re-link the cluster components.
- shardConn = st.rs0.getPrimary();
- mongosConn = st.s;
- shardRS = st.rs0;
- clusterTestDB = mongosConn.getDB(jsTestName());
- clusterAdminDB = mongosConn.getDB("admin");
- shardTestDB = shardConn.getDB(jsTestName());
- shardAdminDB = shardConn.getDB("admin");
- }
+ let curOpResult;
- function runCommandOnAllPrimaries({dbName, cmdObj, username, password}) {
- for (let i = 0; i < stParams.shards; i++) {
- const rsAdminDB = st[`rs${i}`].getPrimary().getDB("admin");
- rsAdminDB.auth(username, password);
- assert.commandWorked(rsAdminDB.getSiblingDB(dbName).runCommand(cmdObj));
- }
- }
+ assert.soon(
+ function() {
+ curOpResult =
+ connAdminDB.aggregate([{$currentOp: curOpSpec}, {$match: currentOpAggFilter}])
+ .toArray();
- // Functions to support running an operation in a parallel shell for testing allUsers behaviour.
- function runInParallelShell({conn, testfunc, username, password}) {
- TestData.aggCurOpTest = testfunc;
- TestData.aggCurOpUser = username;
- TestData.aggCurOpPwd = password;
-
- runCommandOnAllPrimaries({
- dbName: "admin",
- username: username,
- password: password,
- cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "alwaysOn"}
+ return curOpResult.length === 1;
+ },
+ function() {
+ return "Failed to find operation " + tojson(currentOpAggFilter) +
+ " in $currentOp output: " + tojson(curOpResult);
});
- testfunc = function() {
- db.getSiblingDB("admin").auth(TestData.aggCurOpUser, TestData.aggCurOpPwd);
- TestData.aggCurOpTest();
- db.getSiblingDB("admin").logout();
- };
-
- return startParallelShell(testfunc, conn.port);
- }
+ return curOpResult[0];
+}
- function assertCurrentOpHasSingleMatchingEntry({conn, currentOpAggFilter, curOpSpec}) {
- curOpSpec = (curOpSpec || {allUsers: true});
+function waitForParallelShell({conn, username, password, awaitShell}) {
+ runCommandOnAllPrimaries({
+ dbName: "admin",
+ username: username,
+ password: password,
+ cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
+ });
- const connAdminDB = conn.getDB("admin");
+ awaitShell();
+}
+
+// Generic function for running getMore on a $currentOp aggregation cursor and returning the
+// command response.
+function getMoreTest({conn, curOpSpec, getMoreBatchSize}) {
+ // Ensure that there are some other connections present so that the result set is larger
+ // than 1 $currentOp entry.
+ const otherConns = [new Mongo(conn.host), new Mongo(conn.host)];
+ curOpSpec = Object.assign({idleConnections: true}, (curOpSpec || {}));
+
+ // Log the other connections in as user_no_inprog so that they will show up for user_inprog
+ // with {allUsers: true} and user_no_inprog with {allUsers: false}.
+ for (let otherConn of otherConns) {
+ assert(otherConn.getDB("admin").auth("user_no_inprog", "pwd"));
+ }
- let curOpResult;
+ const connAdminDB = conn.getDB("admin");
- assert.soon(
- function() {
- curOpResult =
- connAdminDB.aggregate([{$currentOp: curOpSpec}, {$match: currentOpAggFilter}])
- .toArray();
+ const aggCmdRes = assert.commandWorked(connAdminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: curOpSpec}], cursor: {batchSize: 0}}));
+ assert.neq(aggCmdRes.cursor.id, 0);
- return curOpResult.length === 1;
- },
- function() {
- return "Failed to find operation " + tojson(currentOpAggFilter) +
- " in $currentOp output: " + tojson(curOpResult);
- });
+ return connAdminDB.runCommand({
+ getMore: aggCmdRes.cursor.id,
+ collection: getCollectionNameFromFullNamespace(aggCmdRes.cursor.ns),
+ batchSize: (getMoreBatchSize || 100)
+ });
+}
- return curOpResult[0];
- }
+//
+// Common tests.
+//
- function waitForParallelShell({conn, username, password, awaitShell}) {
- runCommandOnAllPrimaries({
- dbName: "admin",
- username: username,
- password: password,
- cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
- });
+// Runs a suite of tests for behaviour common to both the replica set and cluster levels.
+function runCommonTests(conn, curOpSpec) {
+ const testDB = conn.getDB(jsTestName());
+ const adminDB = conn.getDB("admin");
+ curOpSpec = (curOpSpec || {});
- awaitShell();
+ function addToSpec(spec) {
+ return Object.assign({}, curOpSpec, spec);
}
- // Generic function for running getMore on a $currentOp aggregation cursor and returning the
- // command response.
- function getMoreTest({conn, curOpSpec, getMoreBatchSize}) {
- // Ensure that there are some other connections present so that the result set is larger
- // than 1 $currentOp entry.
- const otherConns = [new Mongo(conn.host), new Mongo(conn.host)];
- curOpSpec = Object.assign({idleConnections: true}, (curOpSpec || {}));
-
- // Log the other connections in as user_no_inprog so that they will show up for user_inprog
- // with {allUsers: true} and user_no_inprog with {allUsers: false}.
- for (let otherConn of otherConns) {
- assert(otherConn.getDB("admin").auth("user_no_inprog", "pwd"));
- }
+ const isLocalMongosCurOp = (conn == mongosConn && curOpSpec.localOps);
+ const isRemoteShardCurOp = (conn == mongosConn && !curOpSpec.localOps);
- const connAdminDB = conn.getDB("admin");
+ // Test that an unauthenticated connection cannot run $currentOp even with {allUsers:
+ // false}.
+ assert(adminDB.logout());
- const aggCmdRes = assert.commandWorked(connAdminDB.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: curOpSpec}], cursor: {batchSize: 0}}));
- assert.neq(aggCmdRes.cursor.id, 0);
+ assert.commandFailedWithCode(
+ adminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: addToSpec({allUsers: false})}], cursor: {}}),
+ ErrorCodes.Unauthorized);
- return connAdminDB.runCommand({
- getMore: aggCmdRes.cursor.id,
- collection: getCollectionNameFromFullNamespace(aggCmdRes.cursor.ns),
- batchSize: (getMoreBatchSize || 100)
- });
- }
+ // Test that an unauthenticated connection cannot run the currentOp command even with
+ // {$ownOps: true}.
+ assert.commandFailedWithCode(adminDB.currentOp({$ownOps: true}), ErrorCodes.Unauthorized);
//
- // Common tests.
+ // Authenticate as user_no_inprog.
//
+ assert(adminDB.logout());
+ assert(adminDB.auth("user_no_inprog", "pwd"));
- // Runs a suite of tests for behaviour common to both the replica set and cluster levels.
- function runCommonTests(conn, curOpSpec) {
- const testDB = conn.getDB(jsTestName());
- const adminDB = conn.getDB("admin");
- curOpSpec = (curOpSpec || {});
+ // Test that $currentOp fails with {allUsers: true} for a user without the "inprog"
+ // privilege.
+ assert.commandFailedWithCode(
+ adminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: addToSpec({allUsers: true})}], cursor: {}}),
+ ErrorCodes.Unauthorized);
- function addToSpec(spec) {
- return Object.assign({}, curOpSpec, spec);
- }
+ // Test that the currentOp command fails with {ownOps: false} for a user without the
+ // "inprog" privilege.
+ assert.commandFailedWithCode(adminDB.currentOp({$ownOps: false}), ErrorCodes.Unauthorized);
- const isLocalMongosCurOp = (conn == mongosConn && curOpSpec.localOps);
- const isRemoteShardCurOp = (conn == mongosConn && !curOpSpec.localOps);
-
- // Test that an unauthenticated connection cannot run $currentOp even with {allUsers:
- // false}.
- assert(adminDB.logout());
-
- assert.commandFailedWithCode(
- adminDB.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: addToSpec({allUsers: false})}], cursor: {}}),
- ErrorCodes.Unauthorized);
-
- // Test that an unauthenticated connection cannot run the currentOp command even with
- // {$ownOps: true}.
- assert.commandFailedWithCode(adminDB.currentOp({$ownOps: true}), ErrorCodes.Unauthorized);
-
- //
- // Authenticate as user_no_inprog.
- //
- assert(adminDB.logout());
- assert(adminDB.auth("user_no_inprog", "pwd"));
-
- // Test that $currentOp fails with {allUsers: true} for a user without the "inprog"
- // privilege.
- assert.commandFailedWithCode(
- adminDB.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: addToSpec({allUsers: true})}], cursor: {}}),
- ErrorCodes.Unauthorized);
-
- // Test that the currentOp command fails with {ownOps: false} for a user without the
- // "inprog" privilege.
- assert.commandFailedWithCode(adminDB.currentOp({$ownOps: false}), ErrorCodes.Unauthorized);
-
- // Test that {aggregate: 1} fails when the first stage in the pipeline is not $currentOp.
- assert.commandFailedWithCode(
- adminDB.runCommand({aggregate: 1, pipeline: [{$match: {}}], cursor: {}}),
- ErrorCodes.InvalidNamespace);
-
- //
- // Authenticate as user_inprog.
- //
- assert(adminDB.logout());
- assert(adminDB.auth("user_inprog", "pwd"));
-
- // Test that $currentOp fails when it is not the first stage in the pipeline. We use two
- // $currentOp stages since any other stage in the initial position will trip the {aggregate:
- // 1} namespace check.
- assert.commandFailedWithCode(
- adminDB.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: {}}, {$currentOp: curOpSpec}], cursor: {}}),
- 40602);
-
- // Test that $currentOp fails when run on admin without {aggregate: 1}.
- assert.commandFailedWithCode(
- adminDB.runCommand(
- {aggregate: "collname", pipeline: [{$currentOp: curOpSpec}], cursor: {}}),
- ErrorCodes.InvalidNamespace);
-
- // Test that $currentOp fails when run as {aggregate: 1} on a database other than admin.
- assert.commandFailedWithCode(
- testDB.runCommand({aggregate: 1, pipeline: [{$currentOp: curOpSpec}], cursor: {}}),
- ErrorCodes.InvalidNamespace);
-
- // Test that the currentOp command fails when run directly on a database other than admin.
- assert.commandFailedWithCode(testDB.runCommand({currentOp: 1}), ErrorCodes.Unauthorized);
-
- // Test that the currentOp command helper succeeds when run on a database other than admin.
- // This is because the currentOp shell helper redirects the command to the admin database.
- assert.commandWorked(testDB.currentOp());
-
- // Test that $currentOp and the currentOp command accept all numeric types.
- const ones = [1, 1.0, NumberInt(1), NumberLong(1), NumberDecimal(1)];
-
- for (let one of ones) {
- assert.commandWorked(adminDB.runCommand(
- {aggregate: one, pipeline: [{$currentOp: curOpSpec}], cursor: {}}));
-
- assert.commandWorked(adminDB.runCommand({currentOp: one, $ownOps: true}));
- }
+ // Test that {aggregate: 1} fails when the first stage in the pipeline is not $currentOp.
+ assert.commandFailedWithCode(
+ adminDB.runCommand({aggregate: 1, pipeline: [{$match: {}}], cursor: {}}),
+ ErrorCodes.InvalidNamespace);
- // Test that $currentOp with {allUsers: true} succeeds for a user with the "inprog"
- // privilege.
- assert.commandWorked(adminDB.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: addToSpec({allUsers: true})}], cursor: {}}));
-
- // Test that the currentOp command with {$ownOps: false} succeeds for a user with the
- // "inprog" privilege.
- assert.commandWorked(adminDB.currentOp({$ownOps: false}));
-
- // Test that $currentOp succeeds if local readConcern is specified.
- assert.commandWorked(adminDB.runCommand({
- aggregate: 1,
- pipeline: [{$currentOp: curOpSpec}],
- readConcern: {level: "local"},
- cursor: {}
- }));
-
- // Test that $currentOp fails if a non-local readConcern is specified for any data-bearing
- // target.
- const linearizableAggCmd = {
- aggregate: 1,
- pipeline: [{$currentOp: curOpSpec}],
- readConcern: {level: "linearizable"},
- cursor: {}
- };
- assert.commandFailedWithCode(adminDB.runCommand(linearizableAggCmd),
- ErrorCodes.InvalidOptions);
-
- // Test that {idleConnections: false} returns only active connections.
- const idleConn = new Mongo(conn.host);
-
- assert.eq(adminDB
- .aggregate([
- {$currentOp: addToSpec({allUsers: true, idleConnections: false})},
- {$match: {active: false}}
- ])
- .itcount(),
- 0);
+ //
+ // Authenticate as user_inprog.
+ //
+ assert(adminDB.logout());
+ assert(adminDB.auth("user_inprog", "pwd"));
- // Test that the currentOp command with {$all: false} returns only active connections.
- assert.eq(adminDB.currentOp({$ownOps: false, $all: false, active: false}).inprog.length, 0);
+ // Test that $currentOp fails when it is not the first stage in the pipeline. We use two
+ // $currentOp stages since any other stage in the initial position will trip the {aggregate:
+ // 1} namespace check.
+ assert.commandFailedWithCode(
+ adminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: {}}, {$currentOp: curOpSpec}], cursor: {}}),
+ 40602);
- // Test that {idleConnections: true} returns inactive connections.
- assert.gte(adminDB
- .aggregate([
- {$currentOp: addToSpec({allUsers: true, idleConnections: true})},
- {$match: {active: false}}
- ])
- .itcount(),
- 1);
+ // Test that $currentOp fails when run on admin without {aggregate: 1}.
+ assert.commandFailedWithCode(
+ adminDB.runCommand(
+ {aggregate: "collname", pipeline: [{$currentOp: curOpSpec}], cursor: {}}),
+ ErrorCodes.InvalidNamespace);
- // Test that the currentOp command with {$all: true} returns inactive connections.
- assert.gte(adminDB.currentOp({$ownOps: false, $all: true, active: false}).inprog.length, 1);
+ // Test that $currentOp fails when run as {aggregate: 1} on a database other than admin.
+ assert.commandFailedWithCode(
+ testDB.runCommand({aggregate: 1, pipeline: [{$currentOp: curOpSpec}], cursor: {}}),
+ ErrorCodes.InvalidNamespace);
- // Test that collation rules apply to matches on $currentOp output.
- const matchField =
- (isRemoteShardCurOp ? "cursor.originatingCommand.comment" : "command.comment");
- const numExpectedMatches = (isRemoteShardCurOp ? stParams.shards : 1);
+ // Test that the currentOp command fails when run directly on a database other than admin.
+ assert.commandFailedWithCode(testDB.runCommand({currentOp: 1}), ErrorCodes.Unauthorized);
- assert.eq(
- adminDB
- .aggregate(
- [{$currentOp: curOpSpec}, {$match: {[matchField]: "AGG_currént_op_COLLATION"}}],
- {
- collation: {locale: "en_US", strength: 1}, // Case and diacritic insensitive.
- comment: "agg_current_op_collation"
- })
- .itcount(),
- numExpectedMatches);
-
- // Test that $currentOp output can be processed by $facet subpipelines.
- assert.eq(adminDB
- .aggregate(
- [
- {$currentOp: curOpSpec},
- {
- $facet: {
- testFacet: [
- {$match: {[matchField]: "agg_current_op_facets"}},
- {$count: "count"}
- ]
- }
- },
- {$unwind: "$testFacet"},
- {$replaceRoot: {newRoot: "$testFacet"}}
- ],
- {comment: "agg_current_op_facets"})
- .next()
- .count,
- numExpectedMatches);
-
- // Test that $currentOp is explainable.
- const explainPlan = assert.commandWorked(adminDB.runCommand({
- aggregate: 1,
- pipeline: [
- {$currentOp: addToSpec({idleConnections: true, allUsers: false})},
- {$match: {desc: "test"}}
- ],
- explain: true
- }));
-
- let expectedStages =
- [{$currentOp: {idleConnections: true}}, {$match: {desc: {$eq: "test"}}}];
-
- if (isRemoteShardCurOp) {
- assert.docEq(explainPlan.splitPipeline.shardsPart, expectedStages);
- for (let i = 0; i < stParams.shards; i++) {
- let shardName = st["rs" + i].name;
- assert.docEq(explainPlan.shards[shardName].stages, expectedStages);
- }
- } else if (isLocalMongosCurOp) {
- expectedStages[0].$currentOp.localOps = true;
- assert.docEq(explainPlan.mongos.stages, expectedStages);
- } else {
- assert.docEq(explainPlan.stages, expectedStages);
- }
+ // Test that the currentOp command helper succeeds when run on a database other than admin.
+ // This is because the currentOp shell helper redirects the command to the admin database.
+ assert.commandWorked(testDB.currentOp());
- // Test that a user with the inprog privilege can run getMore on a $currentOp aggregation
- // cursor which they created with {allUsers: true}.
- let getMoreCmdRes = assert.commandWorked(
- getMoreTest({conn: conn, curOpSpec: {allUsers: true}, getMoreBatchSize: 1}));
-
- // Test that a user without the inprog privilege cannot run getMore on a $currentOp
- // aggregation cursor created by a user with {allUsers: true}.
- assert(adminDB.logout());
- assert(adminDB.auth("user_no_inprog", "pwd"));
-
- assert.neq(getMoreCmdRes.cursor.id, 0);
- assert.commandFailedWithCode(adminDB.runCommand({
- getMore: getMoreCmdRes.cursor.id,
- collection: getCollectionNameFromFullNamespace(getMoreCmdRes.cursor.ns),
- batchSize: 100
- }),
- ErrorCodes.Unauthorized);
- }
+ // Test that $currentOp and the currentOp command accept all numeric types.
+ const ones = [1, 1.0, NumberInt(1), NumberLong(1), NumberDecimal(1)];
- // Run the common tests on a shard, through mongoS, and on mongoS with 'localOps' enabled.
- runCommonTests(shardConn);
- runCommonTests(mongosConn);
- runCommonTests(mongosConn, {localOps: true});
+ for (let one of ones) {
+ assert.commandWorked(
+ adminDB.runCommand({aggregate: one, pipeline: [{$currentOp: curOpSpec}], cursor: {}}));
- //
- // mongoS specific tests.
- //
+ assert.commandWorked(adminDB.runCommand({currentOp: one, $ownOps: true}));
+ }
- // Test that a user without the inprog privilege cannot run non-local $currentOp via mongoS even
- // if allUsers is false.
- assert(clusterAdminDB.logout());
- assert(clusterAdminDB.auth("user_no_inprog", "pwd"));
+ // Test that $currentOp with {allUsers: true} succeeds for a user with the "inprog"
+ // privilege.
+ assert.commandWorked(adminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: addToSpec({allUsers: true})}], cursor: {}}));
- assert.commandFailedWithCode(
- clusterAdminDB.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: {allUsers: false}}], cursor: {}}),
- ErrorCodes.Unauthorized);
+ // Test that the currentOp command with {$ownOps: false} succeeds for a user with the
+ // "inprog" privilege.
+ assert.commandWorked(adminDB.currentOp({$ownOps: false}));
- // Test that a user without the inprog privilege cannot run non-local currentOp command via
- // mongoS even if $ownOps is true.
- assert.commandFailedWithCode(clusterAdminDB.currentOp({$ownOps: true}),
- ErrorCodes.Unauthorized);
+ // Test that $currentOp succeeds if local readConcern is specified.
+ assert.commandWorked(adminDB.runCommand({
+ aggregate: 1,
+ pipeline: [{$currentOp: curOpSpec}],
+ readConcern: {level: "local"},
+ cursor: {}
+ }));
- // Test that a non-local $currentOp pipeline via mongoS returns results from all shards, and
- // includes both the shard and host names.
- assert(clusterAdminDB.logout());
- assert(clusterAdminDB.auth("user_inprog", "pwd"));
+ // Test that $currentOp fails if a non-local readConcern is specified for any data-bearing
+ // target.
+ const linearizableAggCmd = {
+ aggregate: 1,
+ pipeline: [{$currentOp: curOpSpec}],
+ readConcern: {level: "linearizable"},
+ cursor: {}
+ };
+ assert.commandFailedWithCode(adminDB.runCommand(linearizableAggCmd), ErrorCodes.InvalidOptions);
- assert.eq(clusterAdminDB
+ // Test that {idleConnections: false} returns only active connections.
+ const idleConn = new Mongo(conn.host);
+
+ assert.eq(adminDB
.aggregate([
- {$currentOp: {allUsers: true, idleConnections: true}},
- {$group: {_id: {shard: "$shard", host: "$host"}}},
- {$sort: {_id: 1}}
+ {$currentOp: addToSpec({allUsers: true, idleConnections: false})},
+ {$match: {active: false}}
])
- .toArray(),
- [
- {_id: {shard: "aggregation_currentop-rs0", host: st.rs0.getPrimary().host}},
- {_id: {shard: "aggregation_currentop-rs1", host: st.rs1.getPrimary().host}},
- {_id: {shard: "aggregation_currentop-rs2", host: st.rs2.getPrimary().host}}
- ]);
-
- // Test that a $currentOp pipeline with {localOps:true} returns operations from the mongoS
- // itself rather than the shards.
- assert.eq(clusterAdminDB
- .aggregate(
- [
- {$currentOp: {localOps: true}},
- {
- $match: {
- $expr: {$eq: ["$host", "$clientMetadata.mongos.host"]},
- "command.comment": "mongos_currentop_localOps"
- }
- }
- ],
- {comment: "mongos_currentop_localOps"})
.itcount(),
- 1);
+ 0);
- //
- // localOps tests.
- //
+ // Test that the currentOp command with {$all: false} returns only active connections.
+ assert.eq(adminDB.currentOp({$ownOps: false, $all: false, active: false}).inprog.length, 0);
- // Runs a suite of tests for behaviour common to both replica sets and mongoS with
- // {localOps:true}.
- function runLocalOpsTests(conn) {
- // The 'localOps' parameter is not supported by the currentOp command, so we limit its
- // testing to the replica set in certain cases.
- const connAdminDB = conn.getDB("admin");
- const isMongos = FixtureHelpers.isMongos(connAdminDB);
-
- // Test that a user with the inprog privilege can see another user's ops with
- // {allUsers:true}.
- assert(connAdminDB.logout());
- assert(connAdminDB.auth("user_inprog", "pwd"));
-
- let awaitShell = runInParallelShell({
- testfunc: function() {
- assert.eq(db.getSiblingDB(jsTestName())
- .test.find({})
- .comment("agg_current_op_allusers_test")
- .itcount(),
- 5);
- },
- conn: conn,
- username: "admin",
- password: "pwd"
- });
+ // Test that {idleConnections: true} returns inactive connections.
+ assert.gte(adminDB
+ .aggregate([
+ {$currentOp: addToSpec({allUsers: true, idleConnections: true})},
+ {$match: {active: false}}
+ ])
+ .itcount(),
+ 1);
- assertCurrentOpHasSingleMatchingEntry({
- conn: conn,
- currentOpAggFilter: {"command.comment": "agg_current_op_allusers_test"},
- curOpSpec: {allUsers: true, localOps: true}
- });
+ // Test that the currentOp command with {$all: true} returns inactive connections.
+ assert.gte(adminDB.currentOp({$ownOps: false, $all: true, active: false}).inprog.length, 1);
- // Test that the currentOp command can see another user's operations with {$ownOps: false}.
- // Only test on a replica set since 'localOps' isn't supported by the currentOp command.
- if (!isMongos) {
- assert.eq(
- connAdminDB
- .currentOp({$ownOps: false, "command.comment": "agg_current_op_allusers_test"})
- .inprog.length,
- 1);
- }
+ // Test that collation rules apply to matches on $currentOp output.
+ const matchField =
+ (isRemoteShardCurOp ? "cursor.originatingCommand.comment" : "command.comment");
+ const numExpectedMatches = (isRemoteShardCurOp ? stParams.shards : 1);
- // Test that $currentOp succeeds with {allUsers: false} for a user without the "inprog"
- // privilege.
- assert(connAdminDB.logout());
- assert(connAdminDB.auth("user_no_inprog", "pwd"));
-
- assert.commandWorked(connAdminDB.runCommand({
- aggregate: 1,
- pipeline: [{$currentOp: {allUsers: false, localOps: true}}],
- cursor: {}
- }));
-
- // Test that the currentOp command succeeds with {$ownOps: true} for a user without the
- // "inprog" privilege. Because currentOp does not support the 'localOps' parameter, we only
- // perform this test in the replica set case.
- if (!isMongos) {
- assert.commandWorked(connAdminDB.currentOp({$ownOps: true}));
- }
-
- // Test that a user without the inprog privilege cannot see another user's operations.
- assert.eq(connAdminDB
- .aggregate([
- {$currentOp: {allUsers: false, localOps: true}},
- {$match: {"command.comment": "agg_current_op_allusers_test"}}
- ])
- .itcount(),
- 0);
-
- // Test that a user without the inprog privilege cannot see another user's operations via
- // the currentOp command. Limit this test to the replica set case due to the absence of a
- // 'localOps' parameter for the currentOp command.
- if (!isMongos) {
- assert.eq(
- connAdminDB
- .currentOp({$ownOps: true, "command.comment": "agg_current_op_allusers_test"})
- .inprog.length,
- 0);
- }
-
- // Release the failpoint and wait for the parallel shell to complete.
- waitForParallelShell(
- {conn: conn, username: "admin", password: "pwd", awaitShell: awaitShell});
-
- // Test that a user without the inprog privilege can run getMore on a $currentOp cursor
- // which they created with {allUsers: false}.
- assert.commandWorked(
- getMoreTest({conn: conn, curOpSpec: {allUsers: false, localOps: true}}));
- }
+ assert.eq(
+ adminDB
+ .aggregate(
+ [{$currentOp: curOpSpec}, {$match: {[matchField]: "AGG_currént_op_COLLATION"}}], {
+ collation: {locale: "en_US", strength: 1}, // Case and diacritic insensitive.
+ comment: "agg_current_op_collation"
+ })
+ .itcount(),
+ numExpectedMatches);
- // Run the localOps tests for both replset and mongoS.
- runLocalOpsTests(mongosConn);
- runLocalOpsTests(shardConn);
+ // Test that $currentOp output can be processed by $facet subpipelines.
+ assert.eq(adminDB
+ .aggregate(
+ [
+ {$currentOp: curOpSpec},
+ {
+ $facet: {
+ testFacet: [
+ {$match: {[matchField]: "agg_current_op_facets"}},
+ {$count: "count"}
+ ]
+ }
+ },
+ {$unwind: "$testFacet"},
+ {$replaceRoot: {newRoot: "$testFacet"}}
+ ],
+ {comment: "agg_current_op_facets"})
+ .next()
+ .count,
+ numExpectedMatches);
- //
- // Stashed transactions tests.
- //
+ // Test that $currentOp is explainable.
+ const explainPlan = assert.commandWorked(adminDB.runCommand({
+ aggregate: 1,
+ pipeline: [
+ {$currentOp: addToSpec({idleConnections: true, allUsers: false})},
+ {$match: {desc: "test"}}
+ ],
+ explain: true
+ }));
- // Test that $currentOp will display stashed transaction locks if 'idleSessions' is true, and
- // will only permit a user to view other users' sessions if the caller possesses the 'inprog'
- // privilege and 'allUsers' is true.
- const userNames = ["user_inprog", "admin", "user_no_inprog"];
- let sessionDBs = [];
- let sessions = [];
-
- // Returns a set of predicates that filter $currentOp for all stashed transactions.
- function sessionFilter() {
- return {
- type: "idleSession",
- active: false,
- opid: {$exists: false},
- desc: "inactive transaction",
- "lsid.id": {$in: sessions.map((session) => session.getSessionId().id)},
- "transaction.parameters.txnNumber": {$gte: 0, $lt: sessions.length},
- };
- }
+ let expectedStages = [{$currentOp: {idleConnections: true}}, {$match: {desc: {$eq: "test"}}}];
- for (let i in userNames) {
- shardAdminDB.logout();
- assert(shardAdminDB.auth(userNames[i], "pwd"));
-
- // Create a session for this user.
- const session = shardAdminDB.getMongo().startSession();
-
- // For each session, start but do not complete a transaction.
- const sessionDB = session.getDatabase(shardTestDB.getName());
- assert.commandWorked(sessionDB.runCommand({
- insert: "test",
- documents: [{_id: `txn-insert-${userNames[i]}-${i}`}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(i),
- startTransaction: true,
- autocommit: false
- }));
- sessionDBs.push(sessionDB);
- sessions.push(session);
-
- // Use $currentOp to confirm that the incomplete transactions have stashed their locks while
- // inactive, and that each user can only view their own sessions with 'allUsers:false'.
- assert.eq(shardAdminDB
- .aggregate([
- {$currentOp: {allUsers: false, idleSessions: true}},
- {$match: sessionFilter()}
- ])
- .itcount(),
- 1);
+ if (isRemoteShardCurOp) {
+ assert.docEq(explainPlan.splitPipeline.shardsPart, expectedStages);
+ for (let i = 0; i < stParams.shards; i++) {
+ let shardName = st["rs" + i].name;
+ assert.docEq(explainPlan.shards[shardName].stages, expectedStages);
+ }
+ } else if (isLocalMongosCurOp) {
+ expectedStages[0].$currentOp.localOps = true;
+ assert.docEq(explainPlan.mongos.stages, expectedStages);
+ } else {
+ assert.docEq(explainPlan.stages, expectedStages);
}
- // Log in as 'user_no_inprog' to verify that the user cannot view other users' sessions via
- // 'allUsers:true'.
- shardAdminDB.logout();
- assert(shardAdminDB.auth("user_no_inprog", "pwd"));
-
- assert.commandFailedWithCode(shardAdminDB.runCommand({
- aggregate: 1,
- cursor: {},
- pipeline: [{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter()}]
+ // Test that a user with the inprog privilege can run getMore on a $currentOp aggregation
+ // cursor which they created with {allUsers: true}.
+ let getMoreCmdRes = assert.commandWorked(
+ getMoreTest({conn: conn, curOpSpec: {allUsers: true}, getMoreBatchSize: 1}));
+
+ // Test that a user without the inprog privilege cannot run getMore on a $currentOp
+ // aggregation cursor created by a user with {allUsers: true}.
+ assert(adminDB.logout());
+ assert(adminDB.auth("user_no_inprog", "pwd"));
+
+ assert.neq(getMoreCmdRes.cursor.id, 0);
+ assert.commandFailedWithCode(adminDB.runCommand({
+ getMore: getMoreCmdRes.cursor.id,
+ collection: getCollectionNameFromFullNamespace(getMoreCmdRes.cursor.ns),
+ batchSize: 100
}),
ErrorCodes.Unauthorized);
+}
+
+// Run the common tests on a shard, through mongoS, and on mongoS with 'localOps' enabled.
+runCommonTests(shardConn);
+runCommonTests(mongosConn);
+runCommonTests(mongosConn, {localOps: true});
+
+//
+// mongoS specific tests.
+//
+
+// Test that a user without the inprog privilege cannot run non-local $currentOp via mongoS even
+// if allUsers is false.
+assert(clusterAdminDB.logout());
+assert(clusterAdminDB.auth("user_no_inprog", "pwd"));
+
+assert.commandFailedWithCode(
+ clusterAdminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: {allUsers: false}}], cursor: {}}),
+ ErrorCodes.Unauthorized);
+
+// Test that a user without the inprog privilege cannot run non-local currentOp command via
+// mongoS even if $ownOps is true.
+assert.commandFailedWithCode(clusterAdminDB.currentOp({$ownOps: true}), ErrorCodes.Unauthorized);
+
+// Test that a non-local $currentOp pipeline via mongoS returns results from all shards, and
+// includes both the shard and host names.
+assert(clusterAdminDB.logout());
+assert(clusterAdminDB.auth("user_inprog", "pwd"));
+
+assert.eq(clusterAdminDB
+ .aggregate([
+ {$currentOp: {allUsers: true, idleConnections: true}},
+ {$group: {_id: {shard: "$shard", host: "$host"}}},
+ {$sort: {_id: 1}}
+ ])
+ .toArray(),
+ [
+ {_id: {shard: "aggregation_currentop-rs0", host: st.rs0.getPrimary().host}},
+ {_id: {shard: "aggregation_currentop-rs1", host: st.rs1.getPrimary().host}},
+ {_id: {shard: "aggregation_currentop-rs2", host: st.rs2.getPrimary().host}}
+ ]);
+
+// Test that a $currentOp pipeline with {localOps:true} returns operations from the mongoS
+// itself rather than the shards.
+assert.eq(clusterAdminDB
+ .aggregate(
+ [
+ {$currentOp: {localOps: true}},
+ {
+ $match: {
+ $expr: {$eq: ["$host", "$clientMetadata.mongos.host"]},
+ "command.comment": "mongos_currentop_localOps"
+ }
+ }
+ ],
+ {comment: "mongos_currentop_localOps"})
+ .itcount(),
+ 1);
+
+//
+// localOps tests.
+//
+
+// Runs a suite of tests for behaviour common to both replica sets and mongoS with
+// {localOps:true}.
+function runLocalOpsTests(conn) {
+ // The 'localOps' parameter is not supported by the currentOp command, so we limit its
+ // testing to the replica set in certain cases.
+ const connAdminDB = conn.getDB("admin");
+ const isMongos = FixtureHelpers.isMongos(connAdminDB);
+
+ // Test that a user with the inprog privilege can see another user's ops with
+ // {allUsers:true}.
+ assert(connAdminDB.logout());
+ assert(connAdminDB.auth("user_inprog", "pwd"));
+
+ let awaitShell = runInParallelShell({
+ testfunc: function() {
+ assert.eq(db.getSiblingDB(jsTestName())
+ .test.find({})
+ .comment("agg_current_op_allusers_test")
+ .itcount(),
+ 5);
+ },
+ conn: conn,
+ username: "admin",
+ password: "pwd"
+ });
+
+ assertCurrentOpHasSingleMatchingEntry({
+ conn: conn,
+ currentOpAggFilter: {"command.comment": "agg_current_op_allusers_test"},
+ curOpSpec: {allUsers: true, localOps: true}
+ });
+
+ // Test that the currentOp command can see another user's operations with {$ownOps: false}.
+ // Only test on a replica set since 'localOps' isn't supported by the currentOp command.
+ if (!isMongos) {
+ assert.eq(
+ connAdminDB
+ .currentOp({$ownOps: false, "command.comment": "agg_current_op_allusers_test"})
+ .inprog.length,
+ 1);
+ }
- // Log in as 'user_inprog' to confirm that a user with the 'inprog' privilege can see all three
- // stashed transactions with 'allUsers:true'.
- shardAdminDB.logout();
- assert(shardAdminDB.auth("user_inprog", "pwd"));
+ // Test that $currentOp succeeds with {allUsers: false} for a user without the "inprog"
+ // privilege.
+ assert(connAdminDB.logout());
+ assert(connAdminDB.auth("user_no_inprog", "pwd"));
- assert.eq(
- shardAdminDB
- .aggregate(
- [{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter()}])
- .itcount(),
- 3);
+ assert.commandWorked(connAdminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: {allUsers: false, localOps: true}}], cursor: {}}));
+
+ // Test that the currentOp command succeeds with {$ownOps: true} for a user without the
+ // "inprog" privilege. Because currentOp does not support the 'localOps' parameter, we only
+ // perform this test in the replica set case.
+ if (!isMongos) {
+ assert.commandWorked(connAdminDB.currentOp({$ownOps: true}));
+ }
- // Confirm that the 'idleSessions' parameter defaults to true.
- assert.eq(shardAdminDB.aggregate([{$currentOp: {allUsers: true}}, {$match: sessionFilter()}])
+ // Test that a user without the inprog privilege cannot see another user's operations.
+ assert.eq(connAdminDB
+ .aggregate([
+ {$currentOp: {allUsers: false, localOps: true}},
+ {$match: {"command.comment": "agg_current_op_allusers_test"}}
+ ])
.itcount(),
- 3);
+ 0);
- // Confirm that idleSessions:false omits the stashed locks from the report.
- assert.eq(
- shardAdminDB
- .aggregate(
- [{$currentOp: {allUsers: true, idleSessions: false}}, {$match: sessionFilter()}])
- .itcount(),
- 0);
-
- // Allow all transactions to complete and close the associated sessions.
- for (let i in userNames) {
- assert(shardAdminDB.auth(userNames[i], "pwd"));
- assert.commandWorked(sessionDBs[i].adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(i),
- autocommit: false,
- writeConcern: {w: 'majority'}
- }));
- sessions[i].endSession();
+ // Test that a user without the inprog privilege cannot see another user's operations via
+ // the currentOp command. Limit this test to the replica set case due to the absence of a
+ // 'localOps' parameter for the currentOp command.
+ if (!isMongos) {
+ assert.eq(connAdminDB
+ .currentOp({$ownOps: true, "command.comment": "agg_current_op_allusers_test"})
+ .inprog.length,
+ 0);
}
- //
- // No-auth tests.
- //
+ // Release the failpoint and wait for the parallel shell to complete.
+ waitForParallelShell({conn: conn, username: "admin", password: "pwd", awaitShell: awaitShell});
+
+ // Test that a user without the inprog privilege can run getMore on a $currentOp cursor
+ // which they created with {allUsers: false}.
+ assert.commandWorked(getMoreTest({conn: conn, curOpSpec: {allUsers: false, localOps: true}}));
+}
+
+// Run the localOps tests for both replset and mongoS.
+runLocalOpsTests(mongosConn);
+runLocalOpsTests(shardConn);
+
+//
+// Stashed transactions tests.
+//
+
+// Test that $currentOp will display stashed transaction locks if 'idleSessions' is true, and
+// will only permit a user to view other users' sessions if the caller possesses the 'inprog'
+// privilege and 'allUsers' is true.
+const userNames = ["user_inprog", "admin", "user_no_inprog"];
+let sessionDBs = [];
+let sessions = [];
+
+// Returns a set of predicates that filter $currentOp for all stashed transactions.
+function sessionFilter() {
+ return {
+ type: "idleSession",
+ active: false,
+ opid: {$exists: false},
+ desc: "inactive transaction",
+ "lsid.id": {$in: sessions.map((session) => session.getSessionId().id)},
+ "transaction.parameters.txnNumber": {$gte: 0, $lt: sessions.length},
+ };
+}
- // Restart the cluster with auth disabled.
- restartCluster(st, {keyFile: null});
+for (let i in userNames) {
+ shardAdminDB.logout();
+ assert(shardAdminDB.auth(userNames[i], "pwd"));
- // Test that $currentOp will display all stashed transaction locks by default if auth is
- // disabled, even with 'allUsers:false'.
+ // Create a session for this user.
const session = shardAdminDB.getMongo().startSession();
- // Run an operation prior to starting the transaction and save its operation time.
+ // For each session, start but do not complete a transaction.
const sessionDB = session.getDatabase(shardTestDB.getName());
- const res = assert.commandWorked(sessionDB.runCommand({insert: "test", documents: [{x: 1}]}));
- const operationTime = res.operationTime;
-
- // Set and save the transaction's lifetime. We will use this later to assert that our
- // transaction's expiry time is equal to its start time + lifetime.
- const transactionLifeTime = 10;
- assert.commandWorked(sessionDB.adminCommand(
- {setParameter: 1, transactionLifetimeLimitSeconds: transactionLifeTime}));
-
- // Start but do not complete a transaction.
assert.commandWorked(sessionDB.runCommand({
insert: "test",
- documents: [{_id: `txn-insert-no-auth`}],
+ documents: [{_id: `txn-insert-${userNames[i]}-${i}`}],
readConcern: {level: "snapshot"},
- txnNumber: NumberLong(0),
+ txnNumber: NumberLong(i),
startTransaction: true,
autocommit: false
}));
- sessionDBs = [sessionDB];
- sessions = [session];
+ sessionDBs.push(sessionDB);
+ sessions.push(session);
- const timeAfterTransactionStarts = new ISODate();
-
- // Use $currentOp to confirm that the incomplete transaction has stashed its locks.
- assert.eq(shardAdminDB.aggregate([{$currentOp: {allUsers: false}}, {$match: sessionFilter()}])
- .itcount(),
- 1);
-
- // Confirm that idleSessions:false omits the stashed locks from the report.
+ // Use $currentOp to confirm that the incomplete transactions have stashed their locks while
+ // inactive, and that each user can only view their own sessions with 'allUsers:false'.
assert.eq(
shardAdminDB
.aggregate(
- [{$currentOp: {allUsers: false, idleSessions: false}}, {$match: sessionFilter()}])
+ [{$currentOp: {allUsers: false, idleSessions: true}}, {$match: sessionFilter()}])
.itcount(),
- 0);
-
- // Prepare the transaction and ensure the prepareTimestamp is valid.
- const prepareRes = assert.commandWorked(sessionDB.adminCommand({
- prepareTransaction: 1,
- txnNumber: NumberLong(0),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
- assert(prepareRes.prepareTimestamp,
- "prepareTransaction did not return a 'prepareTimestamp': " + tojson(prepareRes));
- assert(prepareRes.prepareTimestamp instanceof Timestamp,
- 'prepareTimestamp was not a Timestamp: ' + tojson(prepareRes));
- assert.neq(prepareRes.prepareTimestamp,
- Timestamp(0, 0),
- "prepareTimestamp cannot be null: " + tojson(prepareRes));
-
- const timeBeforeCurrentOp = new ISODate();
-
- // Check that the currentOp's transaction subdocument's fields align with our expectations.
- let currentOp =
- shardAdminDB.aggregate([{$currentOp: {allUsers: false}}, {$match: sessionFilter()}])
- .toArray();
- let transactionDocument = currentOp[0].transaction;
- assert.eq(transactionDocument.parameters.autocommit, false);
- assert.eq(transactionDocument.parameters.readConcern, {level: "snapshot"});
- assert.gte(transactionDocument.readTimestamp, operationTime);
- // We round timeOpenMicros up to the nearest multiple of 1000 to avoid occasional assertion
- // failures caused by timeOpenMicros having microsecond precision while
- // timeBeforeCurrentOp/timeAfterTransactionStarts only have millisecond precision.
- assert.gte(Math.ceil(transactionDocument.timeOpenMicros / 1000) * 1000,
- (timeBeforeCurrentOp - timeAfterTransactionStarts) * 1000);
- assert.gte(transactionDocument.timeActiveMicros, 0);
- assert.gte(transactionDocument.timeInactiveMicros, 0);
- assert.gte(transactionDocument.timePreparedMicros, 0);
- // Not worried about its specific value, validate that in general we return some non-zero &
- // valid time greater than epoch time.
- assert.gt(ISODate(transactionDocument.startWallClockTime), ISODate("1970-01-01T00:00:00.000Z"));
- assert.eq(
- ISODate(transactionDocument.expiryTime).getTime(),
- ISODate(transactionDocument.startWallClockTime).getTime() + transactionLifeTime * 1000);
-
- // Allow the transactions to complete and close the session. We must commit prepared
- // transactions at a timestamp greater than the prepare timestamp.
- const commitTimestamp =
- Timestamp(prepareRes.prepareTimestamp.getTime(), prepareRes.prepareTimestamp.getInc() + 1);
- assert.commandWorked(sessionDB.adminCommand({
+ 1);
+}
+
+// Log in as 'user_no_inprog' to verify that the user cannot view other users' sessions via
+// 'allUsers:true'.
+shardAdminDB.logout();
+assert(shardAdminDB.auth("user_no_inprog", "pwd"));
+
+assert.commandFailedWithCode(shardAdminDB.runCommand({
+ aggregate: 1,
+ cursor: {},
+ pipeline: [{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter()}]
+}),
+ ErrorCodes.Unauthorized);
+
+// Log in as 'user_inprog' to confirm that a user with the 'inprog' privilege can see all three
+// stashed transactions with 'allUsers:true'.
+shardAdminDB.logout();
+assert(shardAdminDB.auth("user_inprog", "pwd"));
+
+assert.eq(
+ shardAdminDB
+ .aggregate([{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter()}])
+ .itcount(),
+ 3);
+
+// Confirm that the 'idleSessions' parameter defaults to true.
+assert.eq(
+ shardAdminDB.aggregate([{$currentOp: {allUsers: true}}, {$match: sessionFilter()}]).itcount(),
+ 3);
+
+// Confirm that idleSessions:false omits the stashed locks from the report.
+assert.eq(
+ shardAdminDB
+ .aggregate([{$currentOp: {allUsers: true, idleSessions: false}}, {$match: sessionFilter()}])
+ .itcount(),
+ 0);
+
+// Allow all transactions to complete and close the associated sessions.
+for (let i in userNames) {
+ assert(shardAdminDB.auth(userNames[i], "pwd"));
+ assert.commandWorked(sessionDBs[i].adminCommand({
commitTransaction: 1,
- txnNumber: NumberLong(0),
+ txnNumber: NumberLong(i),
autocommit: false,
- writeConcern: {w: 'majority'},
- commitTimestamp: commitTimestamp
+ writeConcern: {w: 'majority'}
}));
- session.endSession();
-
- // Run a set of tests of behaviour common to replset and mongoS when auth is disabled.
- function runNoAuthTests(conn, curOpSpec) {
- // Test that the allUsers parameter is ignored when authentication is disabled.
- // Ensure that there is at least one other connection present.
- const connAdminDB = conn.getDB("admin");
- const otherConn = new Mongo(conn.host);
- curOpSpec = Object.assign({localOps: false}, (curOpSpec || {}));
-
- // Verify that $currentOp displays all operations when auth is disabled regardless of the
- // allUsers parameter, by confirming that we can see non-client system operations when
- // {allUsers: false} is specified.
- assert.gte(
- connAdminDB
- .aggregate([
- {
- $currentOp:
- {allUsers: false, idleConnections: true, localOps: curOpSpec.localOps}
- },
- {$match: {connectionId: {$exists: false}}}
- ])
- .itcount(),
- 1);
-
- // Verify that the currentOp command displays all operations when auth is disabled
- // regardless of
- // the $ownOps parameter, by confirming that we can see non-client system operations when
- // {$ownOps: true} is specified.
- assert.gte(
- connAdminDB.currentOp({$ownOps: true, $all: true, connectionId: {$exists: false}})
- .inprog.length,
- 1);
-
- // Test that a user can run getMore on a $currentOp cursor when authentication is disabled.
- assert.commandWorked(
- getMoreTest({conn: conn, curOpSpec: {allUsers: true, localOps: curOpSpec.localOps}}));
- }
-
- runNoAuthTests(shardConn);
- runNoAuthTests(mongosConn);
- runNoAuthTests(mongosConn, {localOps: true});
-
- //
- // Replset specific tests.
- //
-
- // Take the replica set out of the cluster.
- shardConn = restartReplSet(st.rs0, {shardsvr: null});
- shardTestDB = shardConn.getDB(jsTestName());
- shardAdminDB = shardConn.getDB("admin");
-
- // Test that the host field is present and the shard field is absent when run on mongoD.
- assert.eq(shardAdminDB
- .aggregate([
- {$currentOp: {allUsers: true, idleConnections: true}},
- {$group: {_id: {shard: "$shard", host: "$host"}}}
- ])
- .toArray(),
- [
- {_id: {host: shardConn.host}},
- ]);
+ sessions[i].endSession();
+}
+
+//
+// No-auth tests.
+//
+
+// Restart the cluster with auth disabled.
+restartCluster(st, {keyFile: null});
+
+// Test that $currentOp will display all stashed transaction locks by default if auth is
+// disabled, even with 'allUsers:false'.
+const session = shardAdminDB.getMongo().startSession();
+
+// Run an operation prior to starting the transaction and save its operation time.
+const sessionDB = session.getDatabase(shardTestDB.getName());
+const res = assert.commandWorked(sessionDB.runCommand({insert: "test", documents: [{x: 1}]}));
+const operationTime = res.operationTime;
+
+// Set and save the transaction's lifetime. We will use this later to assert that our
+// transaction's expiry time is equal to its start time + lifetime.
+const transactionLifeTime = 10;
+assert.commandWorked(sessionDB.adminCommand(
+ {setParameter: 1, transactionLifetimeLimitSeconds: transactionLifeTime}));
+
+// Start but do not complete a transaction.
+assert.commandWorked(sessionDB.runCommand({
+ insert: "test",
+ documents: [{_id: `txn-insert-no-auth`}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(0),
+ startTransaction: true,
+ autocommit: false
+}));
+sessionDBs = [sessionDB];
+sessions = [session];
+
+const timeAfterTransactionStarts = new ISODate();
+
+// Use $currentOp to confirm that the incomplete transaction has stashed its locks.
+assert.eq(
+ shardAdminDB.aggregate([{$currentOp: {allUsers: false}}, {$match: sessionFilter()}]).itcount(),
+ 1);
+
+// Confirm that idleSessions:false omits the stashed locks from the report.
+assert.eq(shardAdminDB
+ .aggregate(
+ [{$currentOp: {allUsers: false, idleSessions: false}}, {$match: sessionFilter()}])
+ .itcount(),
+ 0);
+
+// Prepare the transaction and ensure the prepareTimestamp is valid.
+const prepareRes = assert.commandWorked(sessionDB.adminCommand({
+ prepareTransaction: 1,
+ txnNumber: NumberLong(0),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
+assert(prepareRes.prepareTimestamp,
+ "prepareTransaction did not return a 'prepareTimestamp': " + tojson(prepareRes));
+assert(prepareRes.prepareTimestamp instanceof Timestamp,
+ 'prepareTimestamp was not a Timestamp: ' + tojson(prepareRes));
+assert.neq(prepareRes.prepareTimestamp,
+ Timestamp(0, 0),
+ "prepareTimestamp cannot be null: " + tojson(prepareRes));
+
+const timeBeforeCurrentOp = new ISODate();
+
+// Check that the currentOp's transaction subdocument's fields align with our expectations.
+let currentOp =
+ shardAdminDB.aggregate([{$currentOp: {allUsers: false}}, {$match: sessionFilter()}]).toArray();
+let transactionDocument = currentOp[0].transaction;
+assert.eq(transactionDocument.parameters.autocommit, false);
+assert.eq(transactionDocument.parameters.readConcern, {level: "snapshot"});
+assert.gte(transactionDocument.readTimestamp, operationTime);
+// We round timeOpenMicros up to the nearest multiple of 1000 to avoid occasional assertion
+// failures caused by timeOpenMicros having microsecond precision while
+// timeBeforeCurrentOp/timeAfterTransactionStarts only have millisecond precision.
+assert.gte(Math.ceil(transactionDocument.timeOpenMicros / 1000) * 1000,
+ (timeBeforeCurrentOp - timeAfterTransactionStarts) * 1000);
+assert.gte(transactionDocument.timeActiveMicros, 0);
+assert.gte(transactionDocument.timeInactiveMicros, 0);
+assert.gte(transactionDocument.timePreparedMicros, 0);
+// Not worried about its specific value, validate that in general we return some non-zero &
+// valid time greater than epoch time.
+assert.gt(ISODate(transactionDocument.startWallClockTime), ISODate("1970-01-01T00:00:00.000Z"));
+assert.eq(ISODate(transactionDocument.expiryTime).getTime(),
+ ISODate(transactionDocument.startWallClockTime).getTime() + transactionLifeTime * 1000);
+
+// Allow the transactions to complete and close the session. We must commit prepared
+// transactions at a timestamp greater than the prepare timestamp.
+const commitTimestamp =
+ Timestamp(prepareRes.prepareTimestamp.getTime(), prepareRes.prepareTimestamp.getInc() + 1);
+assert.commandWorked(sessionDB.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(0),
+ autocommit: false,
+ writeConcern: {w: 'majority'},
+ commitTimestamp: commitTimestamp
+}));
+session.endSession();
+
+// Run a set of tests of behaviour common to replset and mongoS when auth is disabled.
+function runNoAuthTests(conn, curOpSpec) {
+ // Test that the allUsers parameter is ignored when authentication is disabled.
+ // Ensure that there is at least one other connection present.
+ const connAdminDB = conn.getDB("admin");
+ const otherConn = new Mongo(conn.host);
+ curOpSpec = Object.assign({localOps: false}, (curOpSpec || {}));
+
+ // Verify that $currentOp displays all operations when auth is disabled regardless of the
+ // allUsers parameter, by confirming that we can see non-client system operations when
+ // {allUsers: false} is specified.
+ assert.gte(
+ connAdminDB
+ .aggregate([
+ {
+ $currentOp:
+ {allUsers: false, idleConnections: true, localOps: curOpSpec.localOps}
+ },
+ {$match: {connectionId: {$exists: false}}}
+ ])
+ .itcount(),
+ 1);
- // Test that attempting to 'spoof' a sharded request on non-shardsvr mongoD fails.
- assert.commandFailedWithCode(
- shardAdminDB.runCommand(
- {aggregate: 1, pipeline: [{$currentOp: {}}], fromMongos: true, cursor: {}}),
- 40465);
-
- // Test that an operation which is at the BSON user size limit does not throw an error when the
- // currentOp metadata is added to the output document.
- const bsonUserSizeLimit = assert.commandWorked(shardAdminDB.isMaster()).maxBsonObjectSize;
-
- let aggPipeline = [
- {$currentOp: {}},
- {
- $match: {
- $or: [
- {
+ // Verify that the currentOp command displays all operations when auth is disabled
+ // regardless of
+ // the $ownOps parameter, by confirming that we can see non-client system operations when
+ // {$ownOps: true} is specified.
+ assert.gte(connAdminDB.currentOp({$ownOps: true, $all: true, connectionId: {$exists: false}})
+ .inprog.length,
+ 1);
+
+ // Test that a user can run getMore on a $currentOp cursor when authentication is disabled.
+ assert.commandWorked(
+ getMoreTest({conn: conn, curOpSpec: {allUsers: true, localOps: curOpSpec.localOps}}));
+}
+
+runNoAuthTests(shardConn);
+runNoAuthTests(mongosConn);
+runNoAuthTests(mongosConn, {localOps: true});
+
+//
+// Replset specific tests.
+//
+
+// Take the replica set out of the cluster.
+shardConn = restartReplSet(st.rs0, {shardsvr: null});
+shardTestDB = shardConn.getDB(jsTestName());
+shardAdminDB = shardConn.getDB("admin");
+
+// Test that the host field is present and the shard field is absent when run on mongoD.
+assert.eq(shardAdminDB
+ .aggregate([
+ {$currentOp: {allUsers: true, idleConnections: true}},
+ {$group: {_id: {shard: "$shard", host: "$host"}}}
+ ])
+ .toArray(),
+ [
+ {_id: {host: shardConn.host}},
+ ]);
+
+// Test that attempting to 'spoof' a sharded request on non-shardsvr mongoD fails.
+assert.commandFailedWithCode(
+ shardAdminDB.runCommand(
+ {aggregate: 1, pipeline: [{$currentOp: {}}], fromMongos: true, cursor: {}}),
+ 40465);
+
+// Test that an operation which is at the BSON user size limit does not throw an error when the
+// currentOp metadata is added to the output document.
+const bsonUserSizeLimit = assert.commandWorked(shardAdminDB.isMaster()).maxBsonObjectSize;
+
+let aggPipeline = [
+ {$currentOp: {}},
+ {
+ $match: {
+ $or: [
+ {
"command.comment": "agg_current_op_bson_limit_test",
"command.$truncated": {$exists: false}
- },
- {padding: ""}
- ]
- }
+ },
+ {padding: ""}
+ ]
}
- ];
+ }
+];
- aggPipeline[1].$match.$or[1].padding =
- "a".repeat(bsonUserSizeLimit - Object.bsonsize(aggPipeline));
+aggPipeline[1].$match.$or[1].padding = "a".repeat(bsonUserSizeLimit - Object.bsonsize(aggPipeline));
- assert.eq(Object.bsonsize(aggPipeline), bsonUserSizeLimit);
+assert.eq(Object.bsonsize(aggPipeline), bsonUserSizeLimit);
- assert.eq(
- shardAdminDB.aggregate(aggPipeline, {comment: "agg_current_op_bson_limit_test"}).itcount(),
- 1);
+assert.eq(
+ shardAdminDB.aggregate(aggPipeline, {comment: "agg_current_op_bson_limit_test"}).itcount(), 1);
- // Test that $currentOp can run while the mongoD is write-locked.
- let awaitShell = startParallelShell(function() {
- assert.commandFailedWithCode(db.adminCommand({sleep: 1, lock: "w", secs: 300}),
- ErrorCodes.Interrupted);
- }, shardConn.port);
+// Test that $currentOp can run while the mongoD is write-locked.
+let awaitShell = startParallelShell(function() {
+ assert.commandFailedWithCode(db.adminCommand({sleep: 1, lock: "w", secs: 300}),
+ ErrorCodes.Interrupted);
+}, shardConn.port);
- const op = assertCurrentOpHasSingleMatchingEntry(
- {conn: shardConn, currentOpAggFilter: {"command.sleep": 1, active: true}});
+const op = assertCurrentOpHasSingleMatchingEntry(
+ {conn: shardConn, currentOpAggFilter: {"command.sleep": 1, active: true}});
- assert.commandWorked(shardAdminDB.killOp(op.opid));
+assert.commandWorked(shardAdminDB.killOp(op.opid));
- awaitShell();
+awaitShell();
- // Add the shard back into the replset so that it can be validated by st.stop().
- shardConn = restartReplSet(st.rs0, {shardsvr: ""});
- st.stop();
+// Add the shard back into the replset so that it can be validated by st.stop().
+shardConn = restartReplSet(st.rs0, {shardsvr: ""});
+st.stop();
})();
diff --git a/jstests/sharding/aggregation_internal_parameters.js b/jstests/sharding/aggregation_internal_parameters.js
index 2076aa465d8..529a9e00b1f 100644
--- a/jstests/sharding/aggregation_internal_parameters.js
+++ b/jstests/sharding/aggregation_internal_parameters.js
@@ -3,116 +3,116 @@
* parameters that mongoS uses internally when communicating with the shards.
*/
(function() {
- "use strict";
-
- const st = new ShardingTest({shards: 2, rs: {nodes: 1, enableMajorityReadConcern: ''}});
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Test that command succeeds when no internal options have been specified.
- assert.commandWorked(
- mongosDB.runCommand({aggregate: mongosColl.getName(), pipeline: [], cursor: {}}));
-
- // Test that the command fails if we have 'needsMerge: false' without 'fromMongos'.
- assert.commandFailedWithCode(
- mongosDB.runCommand(
- {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, needsMerge: false}),
- ErrorCodes.FailedToParse);
-
- // Test that the command fails if we have 'needsMerge: true' without 'fromMongos'.
- assert.commandFailedWithCode(
- mongosDB.runCommand(
- {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, needsMerge: true}),
- ErrorCodes.FailedToParse);
-
- // Test that 'fromMongos: true' cannot be specified in a command sent to mongoS.
- assert.commandFailedWithCode(
- mongosDB.runCommand(
- {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, fromMongos: true}),
- 51089);
-
- // Test that 'fromMongos: false' can be specified in a command sent to mongoS.
- assert.commandWorked(mongosDB.runCommand(
- {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, fromMongos: false}));
-
- // Test that the command fails if we have 'needsMerge: true' with 'fromMongos: false'.
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [],
- cursor: {},
- needsMerge: true,
- fromMongos: false
- }),
- 51089);
-
- // Test that the command fails if we have 'needsMerge: true' with 'fromMongos: true'.
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [],
- cursor: {},
- needsMerge: true,
- fromMongos: true
- }),
- 51089);
-
- // Test that 'needsMerge: false' can be specified in a command sent to mongoS along with
- // 'fromMongos: false'.
- assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [],
- cursor: {},
- needsMerge: false,
- fromMongos: false
- }));
-
- // Test that 'mergeByPBRT: true' cannot be specified in a command sent to mongoS.
- assert.commandFailedWithCode(
- mongosDB.runCommand(
- {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, mergeByPBRT: true}),
- 51089);
-
- // Test that 'mergeByPBRT: false' can be specified in a command sent to mongoS.
- assert.commandWorked(mongosDB.runCommand(
- {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, mergeByPBRT: false}));
-
- // Test that the 'exchange' parameter cannot be specified in a command sent to mongoS.
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [],
- cursor: {},
- exchange: {policy: 'roundrobin', consumers: NumberInt(2)}
- }),
- 51028);
-
- // Test that the command fails when all internal parameters have been specified.
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [],
- cursor: {},
- needsMerge: true,
- fromMongos: true,
- mergeByPBRT: true,
- exchange: {policy: 'roundrobin', consumers: NumberInt(2)}
- }),
- 51028);
-
- // Test that the command fails when all internal parameters but exchange have been specified.
- assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [],
- cursor: {},
- needsMerge: true,
- fromMongos: true,
- mergeByPBRT: true
- }),
- 51089);
-
- st.stop();
+"use strict";
+
+const st = new ShardingTest({shards: 2, rs: {nodes: 1, enableMajorityReadConcern: ''}});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
+
+assert.commandWorked(mongosDB.dropDatabase());
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Test that command succeeds when no internal options have been specified.
+assert.commandWorked(
+ mongosDB.runCommand({aggregate: mongosColl.getName(), pipeline: [], cursor: {}}));
+
+// Test that the command fails if we have 'needsMerge: false' without 'fromMongos'.
+assert.commandFailedWithCode(
+ mongosDB.runCommand(
+ {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, needsMerge: false}),
+ ErrorCodes.FailedToParse);
+
+// Test that the command fails if we have 'needsMerge: true' without 'fromMongos'.
+assert.commandFailedWithCode(
+ mongosDB.runCommand(
+ {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, needsMerge: true}),
+ ErrorCodes.FailedToParse);
+
+// Test that 'fromMongos: true' cannot be specified in a command sent to mongoS.
+assert.commandFailedWithCode(
+ mongosDB.runCommand(
+ {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, fromMongos: true}),
+ 51089);
+
+// Test that 'fromMongos: false' can be specified in a command sent to mongoS.
+assert.commandWorked(mongosDB.runCommand(
+ {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, fromMongos: false}));
+
+// Test that the command fails if we have 'needsMerge: true' with 'fromMongos: false'.
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [],
+ cursor: {},
+ needsMerge: true,
+ fromMongos: false
+}),
+ 51089);
+
+// Test that the command fails if we have 'needsMerge: true' with 'fromMongos: true'.
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [],
+ cursor: {},
+ needsMerge: true,
+ fromMongos: true
+}),
+ 51089);
+
+// Test that 'needsMerge: false' can be specified in a command sent to mongoS along with
+// 'fromMongos: false'.
+assert.commandWorked(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [],
+ cursor: {},
+ needsMerge: false,
+ fromMongos: false
+}));
+
+// Test that 'mergeByPBRT: true' cannot be specified in a command sent to mongoS.
+assert.commandFailedWithCode(
+ mongosDB.runCommand(
+ {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, mergeByPBRT: true}),
+ 51089);
+
+// Test that 'mergeByPBRT: false' can be specified in a command sent to mongoS.
+assert.commandWorked(mongosDB.runCommand(
+ {aggregate: mongosColl.getName(), pipeline: [], cursor: {}, mergeByPBRT: false}));
+
+// Test that the 'exchange' parameter cannot be specified in a command sent to mongoS.
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [],
+ cursor: {},
+ exchange: {policy: 'roundrobin', consumers: NumberInt(2)}
+}),
+ 51028);
+
+// Test that the command fails when all internal parameters have been specified.
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [],
+ cursor: {},
+ needsMerge: true,
+ fromMongos: true,
+ mergeByPBRT: true,
+ exchange: {policy: 'roundrobin', consumers: NumberInt(2)}
+}),
+ 51028);
+
+// Test that the command fails when all internal parameters but exchange have been specified.
+assert.commandFailedWithCode(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [],
+ cursor: {},
+ needsMerge: true,
+ fromMongos: true,
+ mergeByPBRT: true
+}),
+ 51089);
+
+st.stop();
})();
diff --git a/jstests/sharding/aggregations_in_session.js b/jstests/sharding/aggregations_in_session.js
index b2eb82bed3c..456decee662 100644
--- a/jstests/sharding/aggregations_in_session.js
+++ b/jstests/sharding/aggregations_in_session.js
@@ -1,41 +1,41 @@
// Tests running aggregations within a client session. This test was designed to reproduce
// SERVER-33660.
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 2});
+const st = new ShardingTest({shards: 2});
- // Gate this test to transaction supporting engines only as it uses txnNumber.
- let shardDB = st.rs0.getPrimary().getDB("test");
- if (!shardDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
- jsTestLog("Do not run on storage engine that does not support transactions");
- st.stop();
- return;
- }
+// Gate this test to transaction supporting engines only as it uses txnNumber.
+let shardDB = st.rs0.getPrimary().getDB("test");
+if (!shardDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
+ jsTestLog("Do not run on storage engine that does not support transactions");
+ st.stop();
+ return;
+}
- const session = st.s0.getDB("test").getMongo().startSession();
- const mongosColl = session.getDatabase("test")[jsTestName()];
+const session = st.s0.getDB("test").getMongo().startSession();
+const mongosColl = session.getDatabase("test")[jsTestName()];
- // Shard the collection, split it into two chunks, and move the [1, MaxKey] chunk to the other
- // shard. We need chunks distributed across multiple shards in order to force a split pipeline
- // merging on a mongod - otherwise the entire pipeline will be forwarded without a split and
- // without a $mergeCursors stage.
- st.shardColl(mongosColl, {_id: 1}, {_id: 1}, {_id: 1});
- assert.writeOK(mongosColl.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
+// Shard the collection, split it into two chunks, and move the [1, MaxKey] chunk to the other
+// shard. We need chunks distributed across multiple shards in order to force a split pipeline
+// merging on a mongod - otherwise the entire pipeline will be forwarded without a split and
+// without a $mergeCursors stage.
+st.shardColl(mongosColl, {_id: 1}, {_id: 1}, {_id: 1});
+assert.writeOK(mongosColl.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
- // This assertion will reproduce the hang described in SERVER-33660.
- assert.eq(
- [{_id: 0}, {_id: 1}, {_id: 2}],
- mongosColl
- .aggregate([{$_internalSplitPipeline: {mergeType: "primaryShard"}}, {$sort: {_id: 1}}])
- .toArray());
+// This assertion will reproduce the hang described in SERVER-33660.
+assert.eq(
+ [{_id: 0}, {_id: 1}, {_id: 2}],
+ mongosColl
+ .aggregate([{$_internalSplitPipeline: {mergeType: "primaryShard"}}, {$sort: {_id: 1}}])
+ .toArray());
- // Test a couple more aggregations to be sure.
- assert.eq(
- [{_id: 0}, {_id: 1}, {_id: 2}],
- mongosColl.aggregate([{$_internalSplitPipeline: {mergeType: "mongos"}}, {$sort: {_id: 1}}])
- .toArray());
- assert.eq(mongosColl.aggregate([{$sort: {_id: 1}}, {$out: "testing"}]).itcount(), 0);
+// Test a couple more aggregations to be sure.
+assert.eq(
+ [{_id: 0}, {_id: 1}, {_id: 2}],
+ mongosColl.aggregate([{$_internalSplitPipeline: {mergeType: "mongos"}}, {$sort: {_id: 1}}])
+ .toArray());
+assert.eq(mongosColl.aggregate([{$sort: {_id: 1}}, {$out: "testing"}]).itcount(), 0);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/all_config_servers_blackholed_from_mongos.js b/jstests/sharding/all_config_servers_blackholed_from_mongos.js
index d13499cc551..53d6e435dd1 100644
--- a/jstests/sharding/all_config_servers_blackholed_from_mongos.js
+++ b/jstests/sharding/all_config_servers_blackholed_from_mongos.js
@@ -9,41 +9,40 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({
- shards: 2,
- mongos: 1,
- useBridge: true,
- });
+var st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ useBridge: true,
+});
- var testDB = st.s.getDB('BlackHoleDB');
+var testDB = st.s.getDB('BlackHoleDB');
- assert.commandWorked(testDB.adminCommand({enableSharding: 'BlackHoleDB'}));
- assert.commandWorked(
- testDB.adminCommand({shardCollection: testDB.ShardedColl.getFullName(), key: {_id: 1}}));
+assert.commandWorked(testDB.adminCommand({enableSharding: 'BlackHoleDB'}));
+assert.commandWorked(
+ testDB.adminCommand({shardCollection: testDB.ShardedColl.getFullName(), key: {_id: 1}}));
- assert.writeOK(testDB.ShardedColl.insert({a: 1}));
+assert.writeOK(testDB.ShardedColl.insert({a: 1}));
- jsTest.log('Making all the config servers appear as a blackhole to mongos');
- st._configServers.forEach(function(configSvr) {
- configSvr.discardMessagesFrom(st.s, 1.0);
- });
+jsTest.log('Making all the config servers appear as a blackhole to mongos');
+st._configServers.forEach(function(configSvr) {
+ configSvr.discardMessagesFrom(st.s, 1.0);
+});
- assert.commandWorked(testDB.adminCommand({flushRouterConfig: 1}));
+assert.commandWorked(testDB.adminCommand({flushRouterConfig: 1}));
- // This shouldn't stall
- jsTest.log('Doing read operation on the sharded collection');
- assert.throws(function() {
- testDB.ShardedColl.find({}).maxTimeMS(15000).itcount();
- });
+// This shouldn't stall
+jsTest.log('Doing read operation on the sharded collection');
+assert.throws(function() {
+ testDB.ShardedColl.find({}).maxTimeMS(15000).itcount();
+});
- // This should fail, because the primary is not available
- jsTest.log('Doing write operation on a new database and collection');
- assert.writeError(st.s.getDB('NonExistentDB')
- .TestColl.insert({_id: 0, value: 'This value will never be inserted'},
- {maxTimeMS: 15000}));
-
- st.stop();
+// This should fail, because the primary is not available
+jsTest.log('Doing write operation on a new database and collection');
+assert.writeError(
+ st.s.getDB('NonExistentDB')
+ .TestColl.insert({_id: 0, value: 'This value will never be inserted'}, {maxTimeMS: 15000}));
+st.stop();
}());
diff --git a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
index 6a89bf1508c..68745172568 100644
--- a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
+++ b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
@@ -7,72 +7,72 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
+var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
- jsTest.log('Config nodes up: 3 of 3, shard nodes up: 2 of 2: ' +
- 'Insert test data to work with');
- assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
- {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
- assert.eq([{_id: 0, count: 1}], st.s0.getDB('TestDB').TestColl.find().toArray());
+jsTest.log('Config nodes up: 3 of 3, shard nodes up: 2 of 2: ' +
+ 'Insert test data to work with');
+assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
+ {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
+assert.eq([{_id: 0, count: 1}], st.s0.getDB('TestDB').TestColl.find().toArray());
- jsTest.log('Config nodes up: 2 of 3, shard nodes up: 2 of 2: ' +
- 'Inserts and queries must work');
- st.configRS.stop(0);
- st.restartMongos(0);
- assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
- {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
- assert.eq([{_id: 0, count: 2}], st.s0.getDB('TestDB').TestColl.find().toArray());
+jsTest.log('Config nodes up: 2 of 3, shard nodes up: 2 of 2: ' +
+ 'Inserts and queries must work');
+st.configRS.stop(0);
+st.restartMongos(0);
+assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
+ {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
+assert.eq([{_id: 0, count: 2}], st.s0.getDB('TestDB').TestColl.find().toArray());
- jsTest.log('Config nodes up: 1 of 3, shard nodes up: 2 of 2: ' +
- 'Inserts and queries must work');
- st.configRS.stop(1);
- st.restartMongos(0);
- assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
- {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
- assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray());
+jsTest.log('Config nodes up: 1 of 3, shard nodes up: 2 of 2: ' +
+ 'Inserts and queries must work');
+st.configRS.stop(1);
+st.restartMongos(0);
+assert.writeOK(st.s0.getDB('TestDB').TestColl.update(
+ {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
+assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray());
- jsTest.log('Config nodes up: 1 of 3, shard nodes up: 1 of 2: ' +
- 'Only queries will work (no shard primary)');
- st.rs0.stop(0);
- st.restartMongos(0);
- st.s0.setSlaveOk(true);
- assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray());
+jsTest.log('Config nodes up: 1 of 3, shard nodes up: 1 of 2: ' +
+ 'Only queries will work (no shard primary)');
+st.rs0.stop(0);
+st.restartMongos(0);
+st.s0.setSlaveOk(true);
+assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray());
- jsTest.log('Config nodes up: 1 of 3, shard nodes up: 0 of 2: ' +
- 'MongoS must start, but no operations will work (no shard nodes available)');
- st.rs0.stop(1);
- st.restartMongos(0);
- assert.throws(function() {
- st.s0.getDB('TestDB').TestColl.find().toArray();
- });
+jsTest.log('Config nodes up: 1 of 3, shard nodes up: 0 of 2: ' +
+ 'MongoS must start, but no operations will work (no shard nodes available)');
+st.rs0.stop(1);
+st.restartMongos(0);
+assert.throws(function() {
+ st.s0.getDB('TestDB').TestColl.find().toArray();
+});
- jsTest.log('Config nodes up: 0 of 3, shard nodes up: 0 of 2: ' +
- 'Metadata cannot be loaded at all, no operations will work');
- st.configRS.stop(1);
+jsTest.log('Config nodes up: 0 of 3, shard nodes up: 0 of 2: ' +
+ 'Metadata cannot be loaded at all, no operations will work');
+st.configRS.stop(1);
- // Instead of restarting mongos, ensure it has no metadata
- assert.commandWorked(st.s0.adminCommand({flushRouterConfig: 1}));
+// Instead of restarting mongos, ensure it has no metadata
+assert.commandWorked(st.s0.adminCommand({flushRouterConfig: 1}));
- // Throws transport error first and subsequent times when loading config data, not no primary
- for (var i = 0; i < 2; i++) {
- try {
- st.s0.getDB('TestDB').TestColl.findOne();
+// Throws transport error first and subsequent times when loading config data, not no primary
+for (var i = 0; i < 2; i++) {
+ try {
+ st.s0.getDB('TestDB').TestColl.findOne();
- // Must always throw
- assert(false);
- } catch (e) {
- printjson(e);
+ // Must always throw
+ assert(false);
+ } catch (e) {
+ printjson(e);
- // Make sure we get a transport error, and not a no-primary error
- assert(e.code == 10276 || // Transport error
- e.code == 13328 || // Connect error
- e.code == ErrorCodes.HostUnreachable ||
- e.code == ErrorCodes.FailedToSatisfyReadPreference ||
- e.code == ErrorCodes.ReplicaSetNotFound);
- }
+ // Make sure we get a transport error, and not a no-primary error
+ assert(e.code == 10276 || // Transport error
+ e.code == 13328 || // Connect error
+ e.code == ErrorCodes.HostUnreachable ||
+ e.code == ErrorCodes.FailedToSatisfyReadPreference ||
+ e.code == ErrorCodes.ReplicaSetNotFound);
}
+}
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/allow_partial_results.js b/jstests/sharding/allow_partial_results.js
index 6490720cb80..7ecbbb1dc7b 100644
--- a/jstests/sharding/allow_partial_results.js
+++ b/jstests/sharding/allow_partial_results.js
@@ -7,71 +7,70 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
+"use strict";
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
- // TODO: SERVER-33597 remove shardAsReplicaSet: false
- const st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
+// TODO: SERVER-33597 remove shardAsReplicaSet: false
+const st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
- jsTest.log("Insert some data.");
- const nDocs = 100;
- const coll = st.s0.getDB(dbName)[collName];
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = -50; i < 50; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+jsTest.log("Insert some data.");
+const nDocs = 100;
+const coll = st.s0.getDB(dbName)[collName];
+let bulk = coll.initializeUnorderedBulkOp();
+for (let i = -50; i < 50; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
- jsTest.log("Create a sharded collection with one chunk on each of the two shards.");
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
+jsTest.log("Create a sharded collection with one chunk on each of the two shards.");
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
- let findRes;
+let findRes;
- jsTest.log("Without 'allowPartialResults', if all shards are up, find returns all docs.");
- findRes = coll.runCommand({find: collName});
- assert.commandWorked(findRes);
- assert.eq(nDocs, findRes.cursor.firstBatch.length);
+jsTest.log("Without 'allowPartialResults', if all shards are up, find returns all docs.");
+findRes = coll.runCommand({find: collName});
+assert.commandWorked(findRes);
+assert.eq(nDocs, findRes.cursor.firstBatch.length);
- jsTest.log("With 'allowPartialResults: false', if all shards are up, find returns all docs.");
- findRes = coll.runCommand({find: collName, allowPartialResults: false});
- assert.commandWorked(findRes);
- assert.eq(nDocs, findRes.cursor.firstBatch.length);
+jsTest.log("With 'allowPartialResults: false', if all shards are up, find returns all docs.");
+findRes = coll.runCommand({find: collName, allowPartialResults: false});
+assert.commandWorked(findRes);
+assert.eq(nDocs, findRes.cursor.firstBatch.length);
- jsTest.log("With 'allowPartialResults: true', if all shards are up, find returns all docs.");
- findRes = coll.runCommand({find: collName, allowPartialResults: true});
- assert.commandWorked(findRes);
- assert.eq(nDocs, findRes.cursor.firstBatch.length);
+jsTest.log("With 'allowPartialResults: true', if all shards are up, find returns all docs.");
+findRes = coll.runCommand({find: collName, allowPartialResults: true});
+assert.commandWorked(findRes);
+assert.eq(nDocs, findRes.cursor.firstBatch.length);
- jsTest.log("Stopping " + st.shard0.shardName);
- MongoRunner.stopMongod(st.shard0);
+jsTest.log("Stopping " + st.shard0.shardName);
+MongoRunner.stopMongod(st.shard0);
- jsTest.log("Without 'allowPartialResults', if some shard down, find fails.");
- assert.commandFailed(coll.runCommand({find: collName}));
+jsTest.log("Without 'allowPartialResults', if some shard down, find fails.");
+assert.commandFailed(coll.runCommand({find: collName}));
- jsTest.log("With 'allowPartialResults: false', if some shard down, find fails.");
- assert.commandFailed(coll.runCommand({find: collName, allowPartialResults: false}));
+jsTest.log("With 'allowPartialResults: false', if some shard down, find fails.");
+assert.commandFailed(coll.runCommand({find: collName, allowPartialResults: false}));
- jsTest.log(
- "With 'allowPartialResults: true', if some shard down, find succeeds with partial results");
- findRes = assert.commandWorked(coll.runCommand({find: collName, allowPartialResults: true}));
- assert.commandWorked(findRes);
- assert.eq(nDocs / 2, findRes.cursor.firstBatch.length);
+jsTest.log(
+ "With 'allowPartialResults: true', if some shard down, find succeeds with partial results");
+findRes = assert.commandWorked(coll.runCommand({find: collName, allowPartialResults: true}));
+assert.commandWorked(findRes);
+assert.eq(nDocs / 2, findRes.cursor.firstBatch.length);
- jsTest.log("The allowPartialResults option does not currently apply to aggregation.");
- assert.commandFailedWithCode(coll.runCommand({
- aggregate: collName,
- pipeline: [{$project: {_id: 1}}],
- cursor: {},
- allowPartialResults: true
- }),
- ErrorCodes.FailedToParse);
+jsTest.log("The allowPartialResults option does not currently apply to aggregation.");
+assert.commandFailedWithCode(coll.runCommand({
+ aggregate: collName,
+ pipeline: [{$project: {_id: 1}}],
+ cursor: {},
+ allowPartialResults: true
+}),
+ ErrorCodes.FailedToParse);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/arbiters_do_not_use_cluster_time.js b/jstests/sharding/arbiters_do_not_use_cluster_time.js
index f1a5b77dc83..03bf0f32f5a 100644
--- a/jstests/sharding/arbiters_do_not_use_cluster_time.js
+++ b/jstests/sharding/arbiters_do_not_use_cluster_time.js
@@ -3,29 +3,29 @@
*/
(function() {
- "use strict";
- let st = new ShardingTest(
- {shards: {rs0: {nodes: [{arbiter: false}, {arbiter: false}, {arbiter: true}]}}});
+"use strict";
+let st = new ShardingTest(
+ {shards: {rs0: {nodes: [{arbiter: false}, {arbiter: false}, {arbiter: true}]}}});
- jsTestLog("Started ShardingTest");
+jsTestLog("Started ShardingTest");
- let secondaries = st.rs0.getSecondaries();
+let secondaries = st.rs0.getSecondaries();
- let foundArbiter = false;
- for (let i = 0; i < secondaries.length; i++) {
- let conn = secondaries[i].getDB("admin");
- const res = conn.runCommand({isMaster: 1});
- if (res["arbiterOnly"]) {
- assert(!foundArbiter);
- foundArbiter = true;
- // nodes with disabled clocks do not gossip clusterTime and operationTime.
- assert.eq(res.hasOwnProperty("$clusterTime"), false);
- assert.eq(res.hasOwnProperty("operationTime"), false);
- } else {
- assert.eq(res.hasOwnProperty("$clusterTime"), true);
- assert.eq(res.hasOwnProperty("operationTime"), true);
- }
+let foundArbiter = false;
+for (let i = 0; i < secondaries.length; i++) {
+ let conn = secondaries[i].getDB("admin");
+ const res = conn.runCommand({isMaster: 1});
+ if (res["arbiterOnly"]) {
+ assert(!foundArbiter);
+ foundArbiter = true;
+ // nodes with disabled clocks do not gossip clusterTime and operationTime.
+ assert.eq(res.hasOwnProperty("$clusterTime"), false);
+ assert.eq(res.hasOwnProperty("operationTime"), false);
+ } else {
+ assert.eq(res.hasOwnProperty("$clusterTime"), true);
+ assert.eq(res.hasOwnProperty("operationTime"), true);
}
- assert.eq(foundArbiter, true);
- st.stop();
+}
+assert.eq(foundArbiter, true);
+st.stop();
})();
diff --git a/jstests/sharding/array_shard_key.js b/jstests/sharding/array_shard_key.js
index cdbe4bda885..2eb7dd102b6 100644
--- a/jstests/sharding/array_shard_key.js
+++ b/jstests/sharding/array_shard_key.js
@@ -1,113 +1,112 @@
// Ensure you can't shard on an array key
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 3});
+var st = new ShardingTest({shards: 3});
- var mongos = st.s0;
+var mongos = st.s0;
- var coll = mongos.getCollection("TestDB.foo");
+var coll = mongos.getCollection("TestDB.foo");
- st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
+st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
- printjson(mongos.getDB("config").chunks.find().toArray());
+printjson(mongos.getDB("config").chunks.find().toArray());
- print("1: insert some invalid data");
+print("1: insert some invalid data");
- var value = null;
+var value = null;
- // Insert an object with invalid array key
- assert.writeError(coll.insert({i: [1, 2]}));
+// Insert an object with invalid array key
+assert.writeError(coll.insert({i: [1, 2]}));
- // Insert an object with all the right fields, but an invalid array val for _id
- assert.writeError(coll.insert({_id: [1, 2], i: 3}));
+// Insert an object with all the right fields, but an invalid array val for _id
+assert.writeError(coll.insert({_id: [1, 2], i: 3}));
- // Insert an object with valid array key
- assert.writeOK(coll.insert({i: 1}));
+// Insert an object with valid array key
+assert.writeOK(coll.insert({i: 1}));
- // Update the value with valid other field
- value = coll.findOne({i: 1});
- assert.writeOK(coll.update(value, {$set: {j: 2}}));
+// Update the value with valid other field
+value = coll.findOne({i: 1});
+assert.writeOK(coll.update(value, {$set: {j: 2}}));
- // Update the value with invalid other fields
- value = coll.findOne({i: 1});
- assert.writeError(coll.update(value, Object.merge(value, {i: [3]})));
+// Update the value with invalid other fields
+value = coll.findOne({i: 1});
+assert.writeError(coll.update(value, Object.merge(value, {i: [3]})));
- // Multi-update the value with invalid other fields
- value = coll.findOne({i: 1});
- assert.writeError(coll.update(value, Object.merge(value, {i: [3, 4]}), false, true));
+// Multi-update the value with invalid other fields
+value = coll.findOne({i: 1});
+assert.writeError(coll.update(value, Object.merge(value, {i: [3, 4]}), false, true));
- // Multi-update the value with other fields (won't work, but no error)
- value = coll.findOne({i: 1});
- assert.writeOK(coll.update(Object.merge(value, {i: [1, 1]}), {$set: {k: 4}}, false, true));
+// Multi-update the value with other fields (won't work, but no error)
+value = coll.findOne({i: 1});
+assert.writeOK(coll.update(Object.merge(value, {i: [1, 1]}), {$set: {k: 4}}, false, true));
- // Query the value with other fields (won't work, but no error)
- value = coll.findOne({i: 1});
- coll.find(Object.merge(value, {i: [1, 1]})).toArray();
+// Query the value with other fields (won't work, but no error)
+value = coll.findOne({i: 1});
+coll.find(Object.merge(value, {i: [1, 1]})).toArray();
- // Can't remove using multikey, but shouldn't error
- value = coll.findOne({i: 1});
- coll.remove(Object.extend(value, {i: [1, 2, 3, 4]}));
+// Can't remove using multikey, but shouldn't error
+value = coll.findOne({i: 1});
+coll.remove(Object.extend(value, {i: [1, 2, 3, 4]}));
- // Can't remove using multikey, but shouldn't error
- value = coll.findOne({i: 1});
- assert.writeOK(coll.remove(Object.extend(value, {i: [1, 2, 3, 4, 5]})));
- assert.eq(coll.find().itcount(), 1);
+// Can't remove using multikey, but shouldn't error
+value = coll.findOne({i: 1});
+assert.writeOK(coll.remove(Object.extend(value, {i: [1, 2, 3, 4, 5]})));
+assert.eq(coll.find().itcount(), 1);
- value = coll.findOne({i: 1});
- assert.writeOK(coll.remove(Object.extend(value, {i: 1})));
- assert.eq(coll.find().itcount(), 0);
+value = coll.findOne({i: 1});
+assert.writeOK(coll.remove(Object.extend(value, {i: 1})));
+assert.eq(coll.find().itcount(), 0);
- coll.ensureIndex({_id: 1, i: 1, j: 1});
- // Can insert document that will make index into a multi-key as long as it's not part of shard
- // key.
- coll.remove({});
- assert.writeOK(coll.insert({i: 1, j: [1, 2]}));
- assert.eq(coll.find().itcount(), 1);
+coll.ensureIndex({_id: 1, i: 1, j: 1});
+// Can insert document that will make index into a multi-key as long as it's not part of shard
+// key.
+coll.remove({});
+assert.writeOK(coll.insert({i: 1, j: [1, 2]}));
+assert.eq(coll.find().itcount(), 1);
- // Same is true for updates.
- coll.remove({});
- coll.insert({_id: 1, i: 1});
- assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}));
- assert.eq(coll.find().itcount(), 1);
+// Same is true for updates.
+coll.remove({});
+coll.insert({_id: 1, i: 1});
+assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}));
+assert.eq(coll.find().itcount(), 1);
- // Same for upserts.
- coll.remove({});
- assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}, true));
- assert.eq(coll.find().itcount(), 1);
+// Same for upserts.
+coll.remove({});
+assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}, true));
+assert.eq(coll.find().itcount(), 1);
- printjson(
- "Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey");
+printjson("Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey");
- // Insert a bunch of data then shard over key which is an array
- var coll = mongos.getCollection("" + coll + "2");
- for (var i = 0; i < 10; i++) {
- // TODO : does not check weird cases like [ i, i ]
- assert.writeOK(coll.insert({i: [i, i + 1]}));
- }
+// Insert a bunch of data then shard over key which is an array
+var coll = mongos.getCollection("" + coll + "2");
+for (var i = 0; i < 10; i++) {
+ // TODO : does not check weird cases like [ i, i ]
+ assert.writeOK(coll.insert({i: [i, i + 1]}));
+}
- coll.ensureIndex({_id: 1, i: 1});
+coll.ensureIndex({_id: 1, i: 1});
- try {
- st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
- } catch (e) {
- print("Correctly threw error on sharding with multikey index.");
- }
+try {
+ st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
+} catch (e) {
+ print("Correctly threw error on sharding with multikey index.");
+}
- st.printShardingStatus();
+st.printShardingStatus();
- // Insert a bunch of data then shard over key which is not an array
- var coll = mongos.getCollection("" + coll + "3");
- for (var i = 0; i < 10; i++) {
- // TODO : does not check weird cases like [ i, i ]
- assert.writeOK(coll.insert({i: i}));
- }
+// Insert a bunch of data then shard over key which is not an array
+var coll = mongos.getCollection("" + coll + "3");
+for (var i = 0; i < 10; i++) {
+ // TODO : does not check weird cases like [ i, i ]
+ assert.writeOK(coll.insert({i: i}));
+}
- coll.ensureIndex({_id: 1, i: 1});
+coll.ensureIndex({_id: 1, i: 1});
- st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
+st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
- st.printShardingStatus();
+st.printShardingStatus();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index d2e46ff1ba4..61b5c273315 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -7,352 +7,342 @@
* @tags: [resource_intensive]
*/
(function() {
- 'use strict';
- load("jstests/replsets/rslib.js");
+'use strict';
+load("jstests/replsets/rslib.js");
- // Replica set nodes started with --shardsvr do not enable key generation until they are added
- // to a sharded cluster and reject commands with gossiped clusterTime from users without the
- // advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
- // briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
- // gossip that time later in setup.
- //
- // TODO SERVER-32672: remove this flag.
- TestData.skipGossipingClusterTime = true;
+// Replica set nodes started with --shardsvr do not enable key generation until they are added
+// to a sharded cluster and reject commands with gossiped clusterTime from users without the
+// advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
+// briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
+// gossip that time later in setup.
+//
+// TODO SERVER-32672: remove this flag.
+TestData.skipGossipingClusterTime = true;
- var adminUser = {db: "admin", username: "foo", password: "bar"};
+var adminUser = {db: "admin", username: "foo", password: "bar"};
- var testUser = {db: "test", username: "bar", password: "baz"};
+var testUser = {db: "test", username: "bar", password: "baz"};
- var testUserReadOnly = {db: "test", username: "sad", password: "bat"};
+var testUserReadOnly = {db: "test", username: "sad", password: "bat"};
- function login(userObj, thingToUse) {
- if (!thingToUse) {
- thingToUse = s;
- }
-
- thingToUse.getDB(userObj.db).auth(userObj.username, userObj.password);
+function login(userObj, thingToUse) {
+ if (!thingToUse) {
+ thingToUse = s;
}
- function logout(userObj, thingToUse) {
- if (!thingToUse)
- thingToUse = s;
+ thingToUse.getDB(userObj.db).auth(userObj.username, userObj.password);
+}
- s.getDB(userObj.db).runCommand({logout: 1});
- }
+function logout(userObj, thingToUse) {
+ if (!thingToUse)
+ thingToUse = s;
- function getShardName(rsTest) {
- var master = rsTest.getPrimary();
- var config = master.getDB("local").system.replset.findOne();
- var members = config.members.map(function(elem) {
- return elem.host;
- });
- return config._id + "/" + members.join(",");
- }
+ s.getDB(userObj.db).runCommand({logout: 1});
+}
- var s = new ShardingTest({
- name: "auth",
- mongos: 1,
- shards: 0,
- other: {keyFile: "jstests/libs/key1", chunkSize: 1, enableAutoSplit: false},
+function getShardName(rsTest) {
+ var master = rsTest.getPrimary();
+ var config = master.getDB("local").system.replset.findOne();
+ var members = config.members.map(function(elem) {
+ return elem.host;
});
+ return config._id + "/" + members.join(",");
+}
+
+var s = new ShardingTest({
+ name: "auth",
+ mongos: 1,
+ shards: 0,
+ other: {keyFile: "jstests/libs/key1", chunkSize: 1, enableAutoSplit: false},
+});
+
+if (s.getDB('admin').runCommand('buildInfo').bits < 64) {
+ print('Skipping test on 32-bit platforms');
+ return;
+}
+
+print("Configuration: Add user " + tojson(adminUser));
+s.getDB(adminUser.db)
+ .createUser({user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
+login(adminUser);
+
+// Set the chunk size, disable the secondary throttle (so the test doesn't run so slow)
+assert.writeOK(
+ s.getDB("config").settings.update({_id: "balancer"},
+ {$set: {"_secondaryThrottle": false, "_waitForDelete": true}},
+ {upsert: true}));
+
+printjson(s.getDB("config").settings.find().toArray());
+
+print("Restart mongos with different auth options");
+s.restartMongos(0);
+login(adminUser);
+
+var d1 = new ReplSetTest({name: "d1", nodes: 3, useHostName: true, waitForKeys: false});
+d1.startSet({keyFile: "jstests/libs/key2", shardsvr: ""});
+d1.initiate();
+
+print("d1 initiated");
+var shardName = authutil.asCluster(d1.nodes, "jstests/libs/key2", function() {
+ return getShardName(d1);
+});
+
+print("adding shard w/out auth " + shardName);
+logout(adminUser);
+
+var result = s.getDB("admin").runCommand({addShard: shardName});
+printjson(result);
+assert.eq(result.code, 13);
+
+login(adminUser);
+
+print("adding shard w/wrong key " + shardName);
+
+var thrown = false;
+try {
+ result = s.adminCommand({addShard: shardName});
+} catch (e) {
+ thrown = true;
+ printjson(e);
+}
+assert(thrown);
- if (s.getDB('admin').runCommand('buildInfo').bits < 64) {
- print('Skipping test on 32-bit platforms');
- return;
- }
-
- print("Configuration: Add user " + tojson(adminUser));
- s.getDB(adminUser.db).createUser({
- user: adminUser.username,
- pwd: adminUser.password,
- roles: jsTest.adminUserRoles
- });
- login(adminUser);
-
- // Set the chunk size, disable the secondary throttle (so the test doesn't run so slow)
- assert.writeOK(s.getDB("config").settings.update(
- {_id: "balancer"},
- {$set: {"_secondaryThrottle": false, "_waitForDelete": true}},
- {upsert: true}));
-
- printjson(s.getDB("config").settings.find().toArray());
-
- print("Restart mongos with different auth options");
- s.restartMongos(0);
- login(adminUser);
-
- var d1 = new ReplSetTest({name: "d1", nodes: 3, useHostName: true, waitForKeys: false});
- d1.startSet({keyFile: "jstests/libs/key2", shardsvr: ""});
- d1.initiate();
+print("start rs w/correct key");
- print("d1 initiated");
- var shardName = authutil.asCluster(d1.nodes, "jstests/libs/key2", function() {
- return getShardName(d1);
- });
+d1.stopSet();
+d1.startSet({keyFile: "jstests/libs/key1", restart: true});
+d1.initiate();
- print("adding shard w/out auth " + shardName);
- logout(adminUser);
+var master = d1.getPrimary();
- var result = s.getDB("admin").runCommand({addShard: shardName});
- printjson(result);
- assert.eq(result.code, 13);
+print("adding shard w/auth " + shardName);
- login(adminUser);
+result = s.getDB("admin").runCommand({addShard: shardName});
+assert.eq(result.ok, 1, tojson(result));
- print("adding shard w/wrong key " + shardName);
+s.getDB("admin").runCommand({enableSharding: "test"});
+s.getDB("admin").runCommand({shardCollection: "test.foo", key: {x: 1}});
- var thrown = false;
- try {
- result = s.adminCommand({addShard: shardName});
- } catch (e) {
- thrown = true;
- printjson(e);
- }
- assert(thrown);
+d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
- print("start rs w/correct key");
+s.getDB(testUser.db)
+ .createUser({user: testUser.username, pwd: testUser.password, roles: jsTest.basicUserRoles});
+s.getDB(testUserReadOnly.db).createUser({
+ user: testUserReadOnly.username,
+ pwd: testUserReadOnly.password,
+ roles: jsTest.readOnlyUserRoles
+});
- d1.stopSet();
- d1.startSet({keyFile: "jstests/libs/key1", restart: true});
- d1.initiate();
+logout(adminUser);
- var master = d1.getPrimary();
+print("query try");
+var e = assert.throws(function() {
+ s.s.getDB("foo").bar.findOne();
+});
+printjson(e);
- print("adding shard w/auth " + shardName);
+print("cmd try");
+assert.eq(0, s.s.getDB("foo").runCommand({listDatabases: 1}).ok);
- result = s.getDB("admin").runCommand({addShard: shardName});
- assert.eq(result.ok, 1, tojson(result));
+print("insert try 1");
+s.getDB("test").foo.insert({x: 1});
- s.getDB("admin").runCommand({enableSharding: "test"});
- s.getDB("admin").runCommand({shardCollection: "test.foo", key: {x: 1}});
+login(testUser);
+assert.eq(s.getDB("test").foo.findOne(), null);
- d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
-
- s.getDB(testUser.db).createUser({
- user: testUser.username,
- pwd: testUser.password,
- roles: jsTest.basicUserRoles
- });
- s.getDB(testUserReadOnly.db).createUser({
- user: testUserReadOnly.username,
- pwd: testUserReadOnly.password,
- roles: jsTest.readOnlyUserRoles
- });
+print("insert try 2");
+assert.writeOK(s.getDB("test").foo.insert({x: 1}));
+assert.eq(1, s.getDB("test").foo.find().itcount(), tojson(result));
- logout(adminUser);
+logout(testUser);
- print("query try");
- var e = assert.throws(function() {
- s.s.getDB("foo").bar.findOne();
- });
- printjson(e);
+var d2 = new ReplSetTest({name: "d2", nodes: 3, useHostName: true, waitForKeys: false});
+d2.startSet({keyFile: "jstests/libs/key1", shardsvr: ""});
+d2.initiate();
+d2.awaitSecondaryNodes();
- print("cmd try");
- assert.eq(0, s.s.getDB("foo").runCommand({listDatabases: 1}).ok);
+shardName = authutil.asCluster(d2.nodes, "jstests/libs/key1", function() {
+ return getShardName(d2);
+});
- print("insert try 1");
- s.getDB("test").foo.insert({x: 1});
+print("adding shard " + shardName);
+login(adminUser);
+print("logged in");
+result = s.getDB("admin").runCommand({addShard: shardName});
- login(testUser);
- assert.eq(s.getDB("test").foo.findOne(), null);
+awaitRSClientHosts(s.s, d1.nodes, {ok: true});
+awaitRSClientHosts(s.s, d2.nodes, {ok: true});
- print("insert try 2");
- assert.writeOK(s.getDB("test").foo.insert({x: 1}));
- assert.eq(1, s.getDB("test").foo.find().itcount(), tojson(result));
+s.getDB("test").foo.remove({});
- logout(testUser);
+var num = 10000;
+assert.commandWorked(s.s.adminCommand({split: "test.foo", middle: {x: num / 2}}));
+var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
+for (i = 0; i < num; i++) {
+ bulk.insert({_id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market"});
+}
+assert.writeOK(bulk.execute());
- var d2 = new ReplSetTest({name: "d2", nodes: 3, useHostName: true, waitForKeys: false});
- d2.startSet({keyFile: "jstests/libs/key1", shardsvr: ""});
- d2.initiate();
- d2.awaitSecondaryNodes();
+s.startBalancer(60000);
- shardName = authutil.asCluster(d2.nodes, "jstests/libs/key1", function() {
- return getShardName(d2);
- });
+assert.soon(function() {
+ var d1Chunks = s.getDB("config").chunks.count({ns: 'test.foo', shard: "d1"});
+ var d2Chunks = s.getDB("config").chunks.count({ns: 'test.foo', shard: "d2"});
+ var totalChunks = s.getDB("config").chunks.count({ns: 'test.foo'});
- print("adding shard " + shardName);
- login(adminUser);
- print("logged in");
- result = s.getDB("admin").runCommand({addShard: shardName});
+ print("chunks: " + d1Chunks + " " + d2Chunks + " " + totalChunks);
- awaitRSClientHosts(s.s, d1.nodes, {ok: true});
- awaitRSClientHosts(s.s, d2.nodes, {ok: true});
+ return d1Chunks > 0 && d2Chunks > 0 && (d1Chunks + d2Chunks == totalChunks);
+}, "Chunks failed to balance", 60000, 5000);
- s.getDB("test").foo.remove({});
+// SERVER-33753: count() without predicate can be wrong on sharded collections.
+// assert.eq(s.getDB("test").foo.count(), num+1);
+var numDocs = s.getDB("test").foo.find().itcount();
+if (numDocs != num) {
+ // Missing documents. At this point we're already in a failure mode, the code in this
+ // statement
+ // is to get a better idea how/why it's failing.
- var num = 10000;
- assert.commandWorked(s.s.adminCommand({split: "test.foo", middle: {x: num / 2}}));
- var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
- for (i = 0; i < num; i++) {
- bulk.insert(
- {_id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market"});
- }
- assert.writeOK(bulk.execute());
-
- s.startBalancer(60000);
-
- assert.soon(function() {
- var d1Chunks = s.getDB("config").chunks.count({ns: 'test.foo', shard: "d1"});
- var d2Chunks = s.getDB("config").chunks.count({ns: 'test.foo', shard: "d2"});
- var totalChunks = s.getDB("config").chunks.count({ns: 'test.foo'});
-
- print("chunks: " + d1Chunks + " " + d2Chunks + " " + totalChunks);
-
- return d1Chunks > 0 && d2Chunks > 0 && (d1Chunks + d2Chunks == totalChunks);
- }, "Chunks failed to balance", 60000, 5000);
-
- // SERVER-33753: count() without predicate can be wrong on sharded collections.
- // assert.eq(s.getDB("test").foo.count(), num+1);
- var numDocs = s.getDB("test").foo.find().itcount();
- if (numDocs != num) {
- // Missing documents. At this point we're already in a failure mode, the code in this
- // statement
- // is to get a better idea how/why it's failing.
-
- var numDocsSeen = 0;
- var lastDocNumber = -1;
- var missingDocNumbers = [];
- var docs = s.getDB("test").foo.find().sort({x: 1}).toArray();
- for (var i = 0; i < docs.length; i++) {
- if (docs[i].x != lastDocNumber + 1) {
- for (var missing = lastDocNumber + 1; missing < docs[i].x; missing++) {
- missingDocNumbers.push(missing);
- }
+ var numDocsSeen = 0;
+ var lastDocNumber = -1;
+ var missingDocNumbers = [];
+ var docs = s.getDB("test").foo.find().sort({x: 1}).toArray();
+ for (var i = 0; i < docs.length; i++) {
+ if (docs[i].x != lastDocNumber + 1) {
+ for (var missing = lastDocNumber + 1; missing < docs[i].x; missing++) {
+ missingDocNumbers.push(missing);
}
- lastDocNumber = docs[i].x;
- numDocsSeen++;
}
- assert.eq(numDocs, numDocsSeen, "More docs discovered on second find()");
- assert.eq(num - numDocs, missingDocNumbers.length);
-
- load('jstests/libs/trace_missing_docs.js');
-
- for (var i = 0; i < missingDocNumbers.length; i++) {
- jsTest.log("Tracing doc: " + missingDocNumbers[i]);
- traceMissingDoc(s.getDB("test").foo,
- {_id: missingDocNumbers[i], x: missingDocNumbers[i]});
- }
-
- assert(false,
- "Number of docs found does not equal the number inserted. Missing docs: " +
- missingDocNumbers);
+ lastDocNumber = docs[i].x;
+ numDocsSeen++;
}
+ assert.eq(numDocs, numDocsSeen, "More docs discovered on second find()");
+ assert.eq(num - numDocs, missingDocNumbers.length);
- // We're only sure we aren't duplicating documents iff there's no balancing going on here
- // This call also waits for any ongoing balancing to stop
- s.stopBalancer(60000);
-
- var cursor = s.getDB("test").foo.find({x: {$lt: 500}});
+ load('jstests/libs/trace_missing_docs.js');
- var count = 0;
- while (cursor.hasNext()) {
- cursor.next();
- count++;
+ for (var i = 0; i < missingDocNumbers.length; i++) {
+ jsTest.log("Tracing doc: " + missingDocNumbers[i]);
+ traceMissingDoc(s.getDB("test").foo, {_id: missingDocNumbers[i], x: missingDocNumbers[i]});
}
- assert.eq(count, 500);
-
- logout(adminUser);
-
- d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
- d2.waitForState(d2.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
-
- authutil.asCluster(d1.nodes, "jstests/libs/key1", function() {
- d1.awaitReplication();
- });
- authutil.asCluster(d2.nodes, "jstests/libs/key1", function() {
- d2.awaitReplication();
- });
-
- // add admin on shard itself, hack to prevent localhost auth bypass
- d1.getPrimary()
- .getDB(adminUser.db)
- .createUser(
- {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: 60000});
- d2.getPrimary()
- .getDB(adminUser.db)
- .createUser(
- {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: 60000});
-
- login(testUser);
- print("testing map reduce");
-
- // Sharded map reduce can be tricky since all components talk to each other. For example
- // SERVER-4114 is triggered when 1 mongod connects to another for final reduce it's not
- // properly tested here since addresses are localhost, which is more permissive.
- var res = s.getDB("test").runCommand({
- mapreduce: "foo",
- map: function() {
- emit(this.x, 1);
- },
- reduce: function(key, values) {
- return values.length;
- },
- out: "mrout"
- });
- printjson(res);
- assert.commandWorked(res);
-
- // Check that dump doesn't get stuck with auth
- var exitCode = MongoRunner.runMongoTool("mongodump", {
- host: s.s.host,
- db: testUser.db,
- username: testUser.username,
- password: testUser.password,
- authenticationMechanism: "SCRAM-SHA-1",
- });
- assert.eq(0, exitCode, "mongodump failed to run with authentication enabled");
-
- // Test read only users
- print("starting read only tests");
-
- var readOnlyS = new Mongo(s.getDB("test").getMongo().host);
- var readOnlyDB = readOnlyS.getDB("test");
-
- print(" testing find that should fail");
- assert.throws(function() {
- readOnlyDB.foo.findOne();
- });
-
- print(" logging in");
- login(testUserReadOnly, readOnlyS);
-
- print(" testing find that should work");
+ assert(false,
+ "Number of docs found does not equal the number inserted. Missing docs: " +
+ missingDocNumbers);
+}
+
+// We're only sure we aren't duplicating documents iff there's no balancing going on here
+// This call also waits for any ongoing balancing to stop
+s.stopBalancer(60000);
+
+var cursor = s.getDB("test").foo.find({x: {$lt: 500}});
+
+var count = 0;
+while (cursor.hasNext()) {
+ cursor.next();
+ count++;
+}
+
+assert.eq(count, 500);
+
+logout(adminUser);
+
+d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
+d2.waitForState(d2.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
+
+authutil.asCluster(d1.nodes, "jstests/libs/key1", function() {
+ d1.awaitReplication();
+});
+authutil.asCluster(d2.nodes, "jstests/libs/key1", function() {
+ d2.awaitReplication();
+});
+
+// add admin on shard itself, hack to prevent localhost auth bypass
+d1.getPrimary()
+ .getDB(adminUser.db)
+ .createUser({user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: 60000});
+d2.getPrimary()
+ .getDB(adminUser.db)
+ .createUser({user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: 60000});
+
+login(testUser);
+print("testing map reduce");
+
+// Sharded map reduce can be tricky since all components talk to each other. For example
+// SERVER-4114 is triggered when 1 mongod connects to another for final reduce it's not
+// properly tested here since addresses are localhost, which is more permissive.
+var res = s.getDB("test").runCommand({
+ mapreduce: "foo",
+ map: function() {
+ emit(this.x, 1);
+ },
+ reduce: function(key, values) {
+ return values.length;
+ },
+ out: "mrout"
+});
+printjson(res);
+assert.commandWorked(res);
+
+// Check that dump doesn't get stuck with auth
+var exitCode = MongoRunner.runMongoTool("mongodump", {
+ host: s.s.host,
+ db: testUser.db,
+ username: testUser.username,
+ password: testUser.password,
+ authenticationMechanism: "SCRAM-SHA-1",
+});
+assert.eq(0, exitCode, "mongodump failed to run with authentication enabled");
+
+// Test read only users
+print("starting read only tests");
+
+var readOnlyS = new Mongo(s.getDB("test").getMongo().host);
+var readOnlyDB = readOnlyS.getDB("test");
+
+print(" testing find that should fail");
+assert.throws(function() {
readOnlyDB.foo.findOne();
-
- print(" testing write that should fail");
- assert.writeError(readOnlyDB.foo.insert({eliot: 1}));
-
- print(" testing read command (should succeed)");
- assert.commandWorked(readOnlyDB.runCommand({count: "foo"}));
-
- print("make sure currentOp/killOp fail");
- assert.commandFailed(readOnlyDB.currentOp());
- assert.commandFailed(readOnlyDB.killOp(123));
-
- // fsyncUnlock doesn't work in mongos anyway, so no need check authorization for it
- /*
- broken because of SERVER-4156
- print( " testing write command (should fail)" );
- assert.commandFailed(readOnlyDB.runCommand(
- {mapreduce : "foo",
- map : function() { emit(this.y, 1); },
- reduce : function(key, values) { return values.length; },
- out:"blarg"
- }));
- */
-
- print(" testing logout (should succeed)");
- assert.commandWorked(readOnlyDB.runCommand({logout: 1}));
-
- print("make sure currentOp/killOp fail again");
- assert.commandFailed(readOnlyDB.currentOp());
- assert.commandFailed(readOnlyDB.killOp(123));
-
- s.stop();
- d1.stopSet();
- d2.stopSet();
+});
+
+print(" logging in");
+login(testUserReadOnly, readOnlyS);
+
+print(" testing find that should work");
+readOnlyDB.foo.findOne();
+
+print(" testing write that should fail");
+assert.writeError(readOnlyDB.foo.insert({eliot: 1}));
+
+print(" testing read command (should succeed)");
+assert.commandWorked(readOnlyDB.runCommand({count: "foo"}));
+
+print("make sure currentOp/killOp fail");
+assert.commandFailed(readOnlyDB.currentOp());
+assert.commandFailed(readOnlyDB.killOp(123));
+
+// fsyncUnlock doesn't work in mongos anyway, so no need check authorization for it
+/*
+broken because of SERVER-4156
+print( " testing write command (should fail)" );
+assert.commandFailed(readOnlyDB.runCommand(
+ {mapreduce : "foo",
+ map : function() { emit(this.y, 1); },
+ reduce : function(key, values) { return values.length; },
+ out:"blarg"
+ }));
+*/
+
+print(" testing logout (should succeed)");
+assert.commandWorked(readOnlyDB.runCommand({logout: 1}));
+
+print("make sure currentOp/killOp fail again");
+assert.commandFailed(readOnlyDB.currentOp());
+assert.commandFailed(readOnlyDB.killOp(123));
+
+s.stop();
+d1.stopSet();
+d2.stopSet();
})();
diff --git a/jstests/sharding/auth2.js b/jstests/sharding/auth2.js
index f3ac5caf1c7..d1d6cb20156 100644
--- a/jstests/sharding/auth2.js
+++ b/jstests/sharding/auth2.js
@@ -1,30 +1,26 @@
(function() {
- 'use strict';
+'use strict';
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest({
- shards: 2,
- other: {
- chunkSize: 1,
- useHostname: true,
- keyFile: 'jstests/libs/key1',
- shardAsReplicaSet: false
- },
- });
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest({
+ shards: 2,
+ other:
+ {chunkSize: 1, useHostname: true, keyFile: 'jstests/libs/key1', shardAsReplicaSet: false},
+});
- var mongos = st.s;
- var adminDB = mongos.getDB('admin');
- var db = mongos.getDB('test');
+var mongos = st.s;
+var adminDB = mongos.getDB('admin');
+var db = mongos.getDB('test');
- adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
+adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
- jsTestLog("Add user was successful");
+jsTestLog("Add user was successful");
- // Test for SERVER-6549, make sure that repeatedly logging in always passes.
- for (var i = 0; i < 100; i++) {
- adminDB = new Mongo(mongos.host).getDB('admin');
- assert(adminDB.auth('admin', 'password'), "Auth failed on attempt #: " + i);
- }
+// Test for SERVER-6549, make sure that repeatedly logging in always passes.
+for (var i = 0; i < 100; i++) {
+ adminDB = new Mongo(mongos.host).getDB('admin');
+ assert(adminDB.auth('admin', 'password'), "Auth failed on attempt #: " + i);
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index cd75ddd5cec..8afe4facc2e 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -2,308 +2,305 @@
* This tests using DB commands with authentication enabled when sharded.
*/
(function() {
- 'use strict';
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- load("jstests/replsets/rslib.js");
-
- // Replica set nodes started with --shardsvr do not enable key generation until they are added
- // to a sharded cluster and reject commands with gossiped clusterTime from users without the
- // advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
- // briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
- // gossip that time later in setup.
- //
- // TODO SERVER-32672: remove this flag.
- TestData.skipGossipingClusterTime = true;
-
- var st = new ShardingTest({
- shards: 2,
- rs: {oplogSize: 10, useHostname: false},
- other: {keyFile: 'jstests/libs/key1', useHostname: false, chunkSize: 2},
- });
-
- var mongos = st.s;
- var adminDB = mongos.getDB('admin');
- var configDB = mongos.getDB('config');
- var testDB = mongos.getDB('test');
+'use strict';
- jsTestLog('Setting up initial users');
- var rwUser = 'rwUser';
- var roUser = 'roUser';
- var password = 'password';
- var expectedDocs = 1000;
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
- adminDB.createUser({user: rwUser, pwd: password, roles: jsTest.adminUserRoles});
+load("jstests/replsets/rslib.js");
- assert(adminDB.auth(rwUser, password));
+// Replica set nodes started with --shardsvr do not enable key generation until they are added
+// to a sharded cluster and reject commands with gossiped clusterTime from users without the
+// advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
+// briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
+// gossip that time later in setup.
+//
+// TODO SERVER-32672: remove this flag.
+TestData.skipGossipingClusterTime = true;
- // Secondaries should be up here, since we awaitReplication in the ShardingTest, but we *don't*
- // wait for the mongos to explicitly detect them.
- awaitRSClientHosts(mongos, st.rs0.getSecondaries(), {ok: true, secondary: true});
- awaitRSClientHosts(mongos, st.rs1.getSecondaries(), {ok: true, secondary: true});
+var st = new ShardingTest({
+ shards: 2,
+ rs: {oplogSize: 10, useHostname: false},
+ other: {keyFile: 'jstests/libs/key1', useHostname: false, chunkSize: 2},
+});
- testDB.createUser({user: rwUser, pwd: password, roles: jsTest.basicUserRoles});
- testDB.createUser({user: roUser, pwd: password, roles: jsTest.readOnlyUserRoles});
+var mongos = st.s;
+var adminDB = mongos.getDB('admin');
+var configDB = mongos.getDB('config');
+var testDB = mongos.getDB('test');
- var authenticatedConn = new Mongo(mongos.host);
- authenticatedConn.getDB('admin').auth(rwUser, password);
+jsTestLog('Setting up initial users');
+var rwUser = 'rwUser';
+var roUser = 'roUser';
+var password = 'password';
+var expectedDocs = 1000;
- // Add user to shards to prevent localhost connections from having automatic full access
- st.rs0.getPrimary().getDB('admin').createUser(
- {user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000});
- st.rs1.getPrimary().getDB('admin').createUser(
- {user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000});
+adminDB.createUser({user: rwUser, pwd: password, roles: jsTest.adminUserRoles});
- jsTestLog('Creating initial data');
+assert(adminDB.auth(rwUser, password));
- st.adminCommand({enablesharding: "test"});
- st.ensurePrimaryShard('test', st.shard0.shardName);
- st.adminCommand({shardcollection: "test.foo", key: {i: 1, j: 1}});
+// Secondaries should be up here, since we awaitReplication in the ShardingTest, but we *don't*
+// wait for the mongos to explicitly detect them.
+awaitRSClientHosts(mongos, st.rs0.getSecondaries(), {ok: true, secondary: true});
+awaitRSClientHosts(mongos, st.rs1.getSecondaries(), {ok: true, secondary: true});
- // Balancer is stopped by default, so no moveChunks will interfere with the splits we're testing
+testDB.createUser({user: rwUser, pwd: password, roles: jsTest.basicUserRoles});
+testDB.createUser({user: roUser, pwd: password, roles: jsTest.readOnlyUserRoles});
- var str = 'a';
- while (str.length < 8000) {
- str += str;
- }
-
- for (var i = 0; i < 100; i++) {
- var bulk = testDB.foo.initializeUnorderedBulkOp();
- for (var j = 0; j < 10; j++) {
- bulk.insert({i: i, j: j, str: str});
- }
- assert.writeOK(bulk.execute({w: "majority"}));
- // Split the chunk we just inserted so that we have something to balance.
- assert.commandWorked(st.splitFind("test.foo", {i: i, j: 0}));
- }
+var authenticatedConn = new Mongo(mongos.host);
+authenticatedConn.getDB('admin').auth(rwUser, password);
- assert.eq(expectedDocs, testDB.foo.count());
+// Add user to shards to prevent localhost connections from having automatic full access
+st.rs0.getPrimary().getDB('admin').createUser(
+ {user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000});
+st.rs1.getPrimary().getDB('admin').createUser(
+ {user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000});
- // Wait for the balancer to start back up
- assert.writeOK(
- configDB.settings.update({_id: 'balancer'}, {$set: {_waitForDelete: true}}, true));
- st.startBalancer();
+jsTestLog('Creating initial data');
- // Make sure we've done at least some splitting, so the balancer will work
- assert.gt(configDB.chunks.find({ns: 'test.foo'}).count(), 2);
+st.adminCommand({enablesharding: "test"});
+st.ensurePrimaryShard('test', st.shard0.shardName);
+st.adminCommand({shardcollection: "test.foo", key: {i: 1, j: 1}});
- // Make sure we eventually balance all the chunks we've created
- assert.soon(function() {
- var x = st.chunkDiff("foo", "test");
- print("chunk diff: " + x);
- return x < 2 && configDB.locks.findOne({_id: 'test.foo'}).state == 0;
- }, "no balance happened", 5 * 60 * 1000);
+// Balancer is stopped by default, so no moveChunks will interfere with the splits we're testing
- var map = function() {
- emit(this.i, this.j);
- };
+var str = 'a';
+while (str.length < 8000) {
+ str += str;
+}
- var reduce = function(key, values) {
- var jCount = 0;
- values.forEach(function(j) {
- jCount += j;
+for (var i = 0; i < 100; i++) {
+ var bulk = testDB.foo.initializeUnorderedBulkOp();
+ for (var j = 0; j < 10; j++) {
+ bulk.insert({i: i, j: j, str: str});
+ }
+ assert.writeOK(bulk.execute({w: "majority"}));
+ // Split the chunk we just inserted so that we have something to balance.
+ assert.commandWorked(st.splitFind("test.foo", {i: i, j: 0}));
+}
+
+assert.eq(expectedDocs, testDB.foo.count());
+
+// Wait for the balancer to start back up
+assert.writeOK(configDB.settings.update({_id: 'balancer'}, {$set: {_waitForDelete: true}}, true));
+st.startBalancer();
+
+// Make sure we've done at least some splitting, so the balancer will work
+assert.gt(configDB.chunks.find({ns: 'test.foo'}).count(), 2);
+
+// Make sure we eventually balance all the chunks we've created
+assert.soon(function() {
+ var x = st.chunkDiff("foo", "test");
+ print("chunk diff: " + x);
+ return x < 2 && configDB.locks.findOne({_id: 'test.foo'}).state == 0;
+}, "no balance happened", 5 * 60 * 1000);
+
+var map = function() {
+ emit(this.i, this.j);
+};
+
+var reduce = function(key, values) {
+ var jCount = 0;
+ values.forEach(function(j) {
+ jCount += j;
+ });
+ return jCount;
+};
+
+var checkCommandSucceeded = function(db, cmdObj) {
+ print("Running command that should succeed: " + tojson(cmdObj));
+ var resultObj = assert.commandWorked(db.runCommand(cmdObj));
+ printjson(resultObj);
+ return resultObj;
+};
+
+var checkCommandFailed = function(db, cmdObj) {
+ print("Running command that should fail: " + tojson(cmdObj));
+ var resultObj = assert.commandFailed(db.runCommand(cmdObj));
+ printjson(resultObj);
+ return resultObj;
+};
+
+var checkReadOps = function(hasReadAuth) {
+ if (hasReadAuth) {
+ print("Checking read operations, should work");
+ assert.eq(expectedDocs, testDB.foo.find().itcount());
+ assert.eq(expectedDocs, testDB.foo.count());
+
+ // NOTE: This is an explicit check that GLE can be run with read prefs, not the result
+ // of above.
+ assert.eq(null, testDB.runCommand({getlasterror: 1}).err);
+ checkCommandSucceeded(testDB, {dbstats: 1});
+ checkCommandSucceeded(testDB, {collstats: 'foo'});
+
+ // inline map-reduce works read-only
+ var res = checkCommandSucceeded(
+ testDB, {mapreduce: 'foo', map: map, reduce: reduce, out: {inline: 1}});
+ assert.eq(100, res.results.length);
+ assert.eq(45, res.results[0].value);
+
+ res = checkCommandSucceeded(testDB, {
+ aggregate: 'foo',
+ pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}],
+ cursor: {}
});
- return jCount;
- };
-
- var checkCommandSucceeded = function(db, cmdObj) {
- print("Running command that should succeed: " + tojson(cmdObj));
- var resultObj = assert.commandWorked(db.runCommand(cmdObj));
- printjson(resultObj);
- return resultObj;
- };
-
- var checkCommandFailed = function(db, cmdObj) {
- print("Running command that should fail: " + tojson(cmdObj));
- var resultObj = assert.commandFailed(db.runCommand(cmdObj));
- printjson(resultObj);
- return resultObj;
- };
-
- var checkReadOps = function(hasReadAuth) {
- if (hasReadAuth) {
- print("Checking read operations, should work");
- assert.eq(expectedDocs, testDB.foo.find().itcount());
- assert.eq(expectedDocs, testDB.foo.count());
-
- // NOTE: This is an explicit check that GLE can be run with read prefs, not the result
- // of above.
- assert.eq(null, testDB.runCommand({getlasterror: 1}).err);
- checkCommandSucceeded(testDB, {dbstats: 1});
- checkCommandSucceeded(testDB, {collstats: 'foo'});
-
- // inline map-reduce works read-only
- var res = checkCommandSucceeded(
- testDB, {mapreduce: 'foo', map: map, reduce: reduce, out: {inline: 1}});
- assert.eq(100, res.results.length);
- assert.eq(45, res.results[0].value);
-
- res = checkCommandSucceeded(testDB, {
- aggregate: 'foo',
- pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}],
- cursor: {}
- });
- assert.eq(4500, res.cursor.firstBatch[0].sum);
- } else {
- print("Checking read operations, should fail");
- assert.throws(function() {
- testDB.foo.find().itcount();
- });
- checkCommandFailed(testDB, {dbstats: 1});
- checkCommandFailed(testDB, {collstats: 'foo'});
- checkCommandFailed(testDB,
- {mapreduce: 'foo', map: map, reduce: reduce, out: {inline: 1}});
- checkCommandFailed(testDB, {
- aggregate: 'foo',
- pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}],
- cursor: {}
- });
- }
- };
-
- var checkWriteOps = function(hasWriteAuth) {
- if (hasWriteAuth) {
- print("Checking write operations, should work");
- testDB.foo.insert({a: 1, i: 1, j: 1});
- var res = checkCommandSucceeded(
- testDB, {findAndModify: "foo", query: {a: 1, i: 1, j: 1}, update: {$set: {b: 1}}});
- assert.eq(1, res.value.a);
- assert.eq(null, res.value.b);
- assert.eq(1, testDB.foo.findOne({a: 1}).b);
- testDB.foo.remove({a: 1});
- assert.eq(null, testDB.runCommand({getlasterror: 1}).err);
- checkCommandSucceeded(testDB,
- {mapreduce: 'foo', map: map, reduce: reduce, out: 'mrOutput'});
- assert.eq(100, testDB.mrOutput.count());
- assert.eq(45, testDB.mrOutput.findOne().value);
-
- checkCommandSucceeded(testDB, {drop: 'foo'});
- assert.eq(0, testDB.foo.count());
- testDB.foo.insert({a: 1});
- assert.eq(1, testDB.foo.count());
- checkCommandSucceeded(testDB, {dropDatabase: 1});
- assert.eq(0, testDB.foo.count());
- checkCommandSucceeded(testDB, {create: 'baz'});
- } else {
- print("Checking write operations, should fail");
- testDB.foo.insert({a: 1, i: 1, j: 1});
- assert.eq(0, authenticatedConn.getDB('test').foo.count({a: 1, i: 1, j: 1}));
- checkCommandFailed(
- testDB, {findAndModify: "foo", query: {a: 1, i: 1, j: 1}, update: {$set: {b: 1}}});
- checkCommandFailed(testDB,
- {mapreduce: 'foo', map: map, reduce: reduce, out: 'mrOutput'});
- checkCommandFailed(testDB, {drop: 'foo'});
- checkCommandFailed(testDB, {dropDatabase: 1});
- var passed = true;
- try {
- // For some reason when create fails it throws an exception instead of just
- // returning ok:0
- var res = testDB.runCommand({create: 'baz'});
- if (!res.ok) {
- passed = false;
- }
- } catch (e) {
- // expected
- printjson(e);
+ assert.eq(4500, res.cursor.firstBatch[0].sum);
+ } else {
+ print("Checking read operations, should fail");
+ assert.throws(function() {
+ testDB.foo.find().itcount();
+ });
+ checkCommandFailed(testDB, {dbstats: 1});
+ checkCommandFailed(testDB, {collstats: 'foo'});
+ checkCommandFailed(testDB, {mapreduce: 'foo', map: map, reduce: reduce, out: {inline: 1}});
+ checkCommandFailed(testDB, {
+ aggregate: 'foo',
+ pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}],
+ cursor: {}
+ });
+ }
+};
+
+var checkWriteOps = function(hasWriteAuth) {
+ if (hasWriteAuth) {
+ print("Checking write operations, should work");
+ testDB.foo.insert({a: 1, i: 1, j: 1});
+ var res = checkCommandSucceeded(
+ testDB, {findAndModify: "foo", query: {a: 1, i: 1, j: 1}, update: {$set: {b: 1}}});
+ assert.eq(1, res.value.a);
+ assert.eq(null, res.value.b);
+ assert.eq(1, testDB.foo.findOne({a: 1}).b);
+ testDB.foo.remove({a: 1});
+ assert.eq(null, testDB.runCommand({getlasterror: 1}).err);
+ checkCommandSucceeded(testDB,
+ {mapreduce: 'foo', map: map, reduce: reduce, out: 'mrOutput'});
+ assert.eq(100, testDB.mrOutput.count());
+ assert.eq(45, testDB.mrOutput.findOne().value);
+
+ checkCommandSucceeded(testDB, {drop: 'foo'});
+ assert.eq(0, testDB.foo.count());
+ testDB.foo.insert({a: 1});
+ assert.eq(1, testDB.foo.count());
+ checkCommandSucceeded(testDB, {dropDatabase: 1});
+ assert.eq(0, testDB.foo.count());
+ checkCommandSucceeded(testDB, {create: 'baz'});
+ } else {
+ print("Checking write operations, should fail");
+ testDB.foo.insert({a: 1, i: 1, j: 1});
+ assert.eq(0, authenticatedConn.getDB('test').foo.count({a: 1, i: 1, j: 1}));
+ checkCommandFailed(
+ testDB, {findAndModify: "foo", query: {a: 1, i: 1, j: 1}, update: {$set: {b: 1}}});
+ checkCommandFailed(testDB, {mapreduce: 'foo', map: map, reduce: reduce, out: 'mrOutput'});
+ checkCommandFailed(testDB, {drop: 'foo'});
+ checkCommandFailed(testDB, {dropDatabase: 1});
+ var passed = true;
+ try {
+ // For some reason when create fails it throws an exception instead of just
+ // returning ok:0
+ var res = testDB.runCommand({create: 'baz'});
+ if (!res.ok) {
passed = false;
}
- assert(!passed);
- }
- };
-
- var checkAdminOps = function(hasAuth) {
- if (hasAuth) {
- checkCommandSucceeded(adminDB, {getCmdLineOpts: 1});
- checkCommandSucceeded(adminDB, {serverStatus: 1});
- checkCommandSucceeded(adminDB, {listShards: 1});
- checkCommandSucceeded(adminDB, {whatsmyuri: 1});
- checkCommandSucceeded(adminDB, {isdbgrid: 1});
- checkCommandSucceeded(adminDB, {ismaster: 1});
- checkCommandSucceeded(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
- var chunk = configDB.chunks.findOne({ns: 'test.foo', shard: st.rs0.name});
- checkCommandSucceeded(
- adminDB,
- {moveChunk: 'test.foo', find: chunk.min, to: st.rs1.name, _waitForDelete: true});
- } else {
- checkCommandFailed(adminDB, {getCmdLineOpts: 1});
- checkCommandFailed(adminDB, {serverStatus: 1});
- checkCommandFailed(adminDB, {listShards: 1});
- // whatsmyuri, isdbgrid, and ismaster don't require any auth
- checkCommandSucceeded(adminDB, {whatsmyuri: 1});
- checkCommandSucceeded(adminDB, {isdbgrid: 1});
- checkCommandSucceeded(adminDB, {ismaster: 1});
- checkCommandFailed(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
- var chunkKey = {i: {$minKey: 1}, j: {$minKey: 1}};
- checkCommandFailed(
- adminDB,
- {moveChunk: 'test.foo', find: chunkKey, to: st.rs1.name, _waitForDelete: true});
+ } catch (e) {
+ // expected
+ printjson(e);
+ passed = false;
}
- };
-
- var checkRemoveShard = function(hasWriteAuth) {
- if (hasWriteAuth) {
- // start draining
- checkCommandSucceeded(adminDB, {removeshard: st.rs1.name});
- // Wait for shard to be completely removed
- checkRemoveShard = function() {
- var res = checkCommandSucceeded(adminDB, {removeshard: st.rs1.name});
- return res.msg == 'removeshard completed successfully';
- };
- assert.soon(checkRemoveShard, "failed to remove shard");
- } else {
- checkCommandFailed(adminDB, {removeshard: st.rs1.name});
- }
- };
+ assert(!passed);
+ }
+};
+
+var checkAdminOps = function(hasAuth) {
+ if (hasAuth) {
+ checkCommandSucceeded(adminDB, {getCmdLineOpts: 1});
+ checkCommandSucceeded(adminDB, {serverStatus: 1});
+ checkCommandSucceeded(adminDB, {listShards: 1});
+ checkCommandSucceeded(adminDB, {whatsmyuri: 1});
+ checkCommandSucceeded(adminDB, {isdbgrid: 1});
+ checkCommandSucceeded(adminDB, {ismaster: 1});
+ checkCommandSucceeded(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
+ var chunk = configDB.chunks.findOne({ns: 'test.foo', shard: st.rs0.name});
+ checkCommandSucceeded(
+ adminDB,
+ {moveChunk: 'test.foo', find: chunk.min, to: st.rs1.name, _waitForDelete: true});
+ } else {
+ checkCommandFailed(adminDB, {getCmdLineOpts: 1});
+ checkCommandFailed(adminDB, {serverStatus: 1});
+ checkCommandFailed(adminDB, {listShards: 1});
+ // whatsmyuri, isdbgrid, and ismaster don't require any auth
+ checkCommandSucceeded(adminDB, {whatsmyuri: 1});
+ checkCommandSucceeded(adminDB, {isdbgrid: 1});
+ checkCommandSucceeded(adminDB, {ismaster: 1});
+ checkCommandFailed(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
+ var chunkKey = {i: {$minKey: 1}, j: {$minKey: 1}};
+ checkCommandFailed(
+ adminDB,
+ {moveChunk: 'test.foo', find: chunkKey, to: st.rs1.name, _waitForDelete: true});
+ }
+};
+
+var checkRemoveShard = function(hasWriteAuth) {
+ if (hasWriteAuth) {
+ // start draining
+ checkCommandSucceeded(adminDB, {removeshard: st.rs1.name});
+ // Wait for shard to be completely removed
+ checkRemoveShard = function() {
+ var res = checkCommandSucceeded(adminDB, {removeshard: st.rs1.name});
+ return res.msg == 'removeshard completed successfully';
+ };
+ assert.soon(checkRemoveShard, "failed to remove shard");
+ } else {
+ checkCommandFailed(adminDB, {removeshard: st.rs1.name});
+ }
+};
- var checkAddShard = function(hasWriteAuth) {
- if (hasWriteAuth) {
- checkCommandSucceeded(adminDB, {addshard: st.rs1.getURL()});
- } else {
- checkCommandFailed(adminDB, {addshard: st.rs1.getURL()});
- }
- };
-
- st.stopBalancer();
-
- jsTestLog("Checking admin commands with admin auth credentials");
- checkAdminOps(true);
- assert(adminDB.logout().ok);
-
- jsTestLog("Checking admin commands with no auth credentials");
- checkAdminOps(false);
-
- jsTestLog("Checking commands with no auth credentials");
- checkReadOps(false);
- checkWriteOps(false);
-
- // Authenticate as read-only user
- jsTestLog("Checking commands with read-only auth credentials");
- assert(testDB.auth(roUser, password));
- checkReadOps(true);
- checkWriteOps(false);
-
- // Authenticate as read-write user
- jsTestLog("Checking commands with read-write auth credentials");
- assert(testDB.auth(rwUser, password));
- checkReadOps(true);
- checkWriteOps(true);
-
- jsTestLog("Check drainging/removing a shard");
- assert(testDB.logout().ok);
- checkRemoveShard(false);
- assert(adminDB.auth(rwUser, password));
- assert(testDB.dropDatabase().ok);
- checkRemoveShard(true);
- st.printShardingStatus();
-
- jsTestLog("Check adding a shard");
- assert(adminDB.logout().ok);
- checkAddShard(false);
- assert(adminDB.auth(rwUser, password));
- checkAddShard(true);
- st.printShardingStatus();
-
- st.stop();
+var checkAddShard = function(hasWriteAuth) {
+ if (hasWriteAuth) {
+ checkCommandSucceeded(adminDB, {addshard: st.rs1.getURL()});
+ } else {
+ checkCommandFailed(adminDB, {addshard: st.rs1.getURL()});
+ }
+};
+
+st.stopBalancer();
+
+jsTestLog("Checking admin commands with admin auth credentials");
+checkAdminOps(true);
+assert(adminDB.logout().ok);
+
+jsTestLog("Checking admin commands with no auth credentials");
+checkAdminOps(false);
+
+jsTestLog("Checking commands with no auth credentials");
+checkReadOps(false);
+checkWriteOps(false);
+
+// Authenticate as read-only user
+jsTestLog("Checking commands with read-only auth credentials");
+assert(testDB.auth(roUser, password));
+checkReadOps(true);
+checkWriteOps(false);
+
+// Authenticate as read-write user
+jsTestLog("Checking commands with read-write auth credentials");
+assert(testDB.auth(rwUser, password));
+checkReadOps(true);
+checkWriteOps(true);
+
+jsTestLog("Check drainging/removing a shard");
+assert(testDB.logout().ok);
+checkRemoveShard(false);
+assert(adminDB.auth(rwUser, password));
+assert(testDB.dropDatabase().ok);
+checkRemoveShard(true);
+st.printShardingStatus();
+
+jsTestLog("Check adding a shard");
+assert(adminDB.logout().ok);
+checkAddShard(false);
+assert(adminDB.auth(rwUser, password));
+checkAddShard(true);
+st.printShardingStatus();
+
+st.stop();
})();
diff --git a/jstests/sharding/authConnectionHook.js b/jstests/sharding/authConnectionHook.js
index 6655d4d5248..73e81393d44 100644
--- a/jstests/sharding/authConnectionHook.js
+++ b/jstests/sharding/authConnectionHook.js
@@ -14,55 +14,49 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest({
- shards: 2,
- other: {
- keyFile: 'jstests/libs/key1',
- useHostname: true,
- chunkSize: 1,
- shardAsReplicaSet: false
- }
- });
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest({
+ shards: 2,
+ other: {keyFile: 'jstests/libs/key1', useHostname: true, chunkSize: 1, shardAsReplicaSet: false}
+});
- var mongos = st.s;
- var adminDB = mongos.getDB('admin');
- var db = mongos.getDB('test');
+var mongos = st.s;
+var adminDB = mongos.getDB('admin');
+var db = mongos.getDB('test');
- adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
+adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
- adminDB.auth('admin', 'password');
+adminDB.auth('admin', 'password');
- adminDB.runCommand({enableSharding: "test"});
- st.ensurePrimaryShard('test', 'shard0001');
- adminDB.runCommand({shardCollection: "test.foo", key: {x: 1}});
+adminDB.runCommand({enableSharding: "test"});
+st.ensurePrimaryShard('test', 'shard0001');
+adminDB.runCommand({shardCollection: "test.foo", key: {x: 1}});
- for (var i = 0; i < 100; i++) {
- db.foo.insert({x: i});
- }
+for (var i = 0; i < 100; i++) {
+ db.foo.insert({x: i});
+}
- adminDB.runCommand({split: "test.foo", middle: {x: 50}});
- var curShard = st.getShard("test.foo", {x: 75});
- var otherShard = st.getOther(curShard).name;
- adminDB.runCommand(
- {moveChunk: "test.foo", find: {x: 25}, to: otherShard, _waitForDelete: true});
+adminDB.runCommand({split: "test.foo", middle: {x: 50}});
+var curShard = st.getShard("test.foo", {x: 75});
+var otherShard = st.getOther(curShard).name;
+adminDB.runCommand({moveChunk: "test.foo", find: {x: 25}, to: otherShard, _waitForDelete: true});
- st.printShardingStatus();
+st.printShardingStatus();
- MongoRunner.stopMongod(st.shard0);
- st.shard0 = MongoRunner.runMongod({restart: st.shard0});
+MongoRunner.stopMongod(st.shard0);
+st.shard0 = MongoRunner.runMongod({restart: st.shard0});
- // May fail the first couple times due to socket exceptions
- assert.soon(function() {
- var res = adminDB.runCommand({moveChunk: "test.foo", find: {x: 75}, to: otherShard});
- printjson(res);
- return res.ok;
- });
+// May fail the first couple times due to socket exceptions
+assert.soon(function() {
+ var res = adminDB.runCommand({moveChunk: "test.foo", find: {x: 75}, to: otherShard});
+ printjson(res);
+ return res.ok;
+});
- printjson(db.foo.findOne({x: 25}));
- printjson(db.foo.findOne({x: 75}));
+printjson(db.foo.findOne({x: 25}));
+printjson(db.foo.findOne({x: 75}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/auth_add_shard.js b/jstests/sharding/auth_add_shard.js
index f898f5caecc..6c3c298b373 100644
--- a/jstests/sharding/auth_add_shard.js
+++ b/jstests/sharding/auth_add_shard.js
@@ -2,100 +2,97 @@
// The puporse of this test is to test authentication when adding/removing a shard. The test sets
// up a sharded system, then adds/removes a shard.
(function() {
- 'use strict';
+'use strict';
- // login method to login into the database
- function login(userObj) {
- var authResult = mongos.getDB(userObj.db).auth(userObj.username, userObj.password);
- printjson(authResult);
- }
+// login method to login into the database
+function login(userObj) {
+ var authResult = mongos.getDB(userObj.db).auth(userObj.username, userObj.password);
+ printjson(authResult);
+}
- // admin user object
- var adminUser = {db: "admin", username: "foo", password: "bar"};
+// admin user object
+var adminUser = {db: "admin", username: "foo", password: "bar"};
- // set up a 2 shard cluster with keyfile
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest(
- {shards: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
+// set up a 2 shard cluster with keyfile
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest(
+ {shards: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
- print("1 shard system setup");
+print("1 shard system setup");
- // add the admin user
- print("adding user");
- mongos.getDB(adminUser.db).createUser({
- user: adminUser.username,
- pwd: adminUser.password,
- roles: jsTest.adminUserRoles
- });
+// add the admin user
+print("adding user");
+mongos.getDB(adminUser.db)
+ .createUser({user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
- // login as admin user
- login(adminUser);
+// login as admin user
+login(adminUser);
- assert.eq(1, st.config.shards.count(), "initial server count wrong");
+assert.eq(1, st.config.shards.count(), "initial server count wrong");
- // start a mongod with NO keyfile
- var conn = MongoRunner.runMongod({shardsvr: ""});
- print(conn);
+// start a mongod with NO keyfile
+var conn = MongoRunner.runMongod({shardsvr: ""});
+print(conn);
- // --------------- Test 1 --------------------
- // Add shard to the existing cluster (should fail because it was added without a keyfile)
- printjson(assert.commandFailed(admin.runCommand({addShard: conn.host})));
+// --------------- Test 1 --------------------
+// Add shard to the existing cluster (should fail because it was added without a keyfile)
+printjson(assert.commandFailed(admin.runCommand({addShard: conn.host})));
- // stop mongod
- MongoRunner.stopMongod(conn);
+// stop mongod
+MongoRunner.stopMongod(conn);
- //--------------- Test 2 --------------------
- // start mongod again, this time with keyfile
- var conn = MongoRunner.runMongod({keyFile: "jstests/libs/key1", shardsvr: ""});
- // try adding the new shard
- assert.commandWorked(admin.runCommand({addShard: conn.host}));
+//--------------- Test 2 --------------------
+// start mongod again, this time with keyfile
+var conn = MongoRunner.runMongod({keyFile: "jstests/libs/key1", shardsvr: ""});
+// try adding the new shard
+assert.commandWorked(admin.runCommand({addShard: conn.host}));
- // Add some data
- var db = mongos.getDB("foo");
- var collA = mongos.getCollection("foo.bar");
+// Add some data
+var db = mongos.getDB("foo");
+var collA = mongos.getCollection("foo.bar");
- // enable sharding on a collection
- assert.commandWorked(admin.runCommand({enableSharding: "" + collA.getDB()}));
- st.ensurePrimaryShard("foo", "shard0000");
+// enable sharding on a collection
+assert.commandWorked(admin.runCommand({enableSharding: "" + collA.getDB()}));
+st.ensurePrimaryShard("foo", "shard0000");
- assert.commandWorked(admin.runCommand({shardCollection: "" + collA, key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({shardCollection: "" + collA, key: {_id: 1}}));
- // add data to the sharded collection
- for (var i = 0; i < 4; i++) {
- db.bar.save({_id: i});
- assert.commandWorked(admin.runCommand({split: "" + collA, middle: {_id: i}}));
- }
+// add data to the sharded collection
+for (var i = 0; i < 4; i++) {
+ db.bar.save({_id: i});
+ assert.commandWorked(admin.runCommand({split: "" + collA, middle: {_id: i}}));
+}
- // move a chunk
- assert.commandWorked(admin.runCommand({moveChunk: "foo.bar", find: {_id: 1}, to: "shard0001"}));
+// move a chunk
+assert.commandWorked(admin.runCommand({moveChunk: "foo.bar", find: {_id: 1}, to: "shard0001"}));
- // verify the chunk was moved
- admin.runCommand({flushRouterConfig: 1});
+// verify the chunk was moved
+admin.runCommand({flushRouterConfig: 1});
- var config = mongos.getDB("config");
- st.printShardingStatus(true);
+var config = mongos.getDB("config");
+st.printShardingStatus(true);
- // start balancer before removing the shard
- st.startBalancer();
+// start balancer before removing the shard
+st.startBalancer();
- //--------------- Test 3 --------------------
- // now drain the shard
- assert.commandWorked(admin.runCommand({removeShard: conn.host}));
+//--------------- Test 3 --------------------
+// now drain the shard
+assert.commandWorked(admin.runCommand({removeShard: conn.host}));
- // give it some time to drain
- assert.soon(function() {
- var result = admin.runCommand({removeShard: conn.host});
- printjson(result);
+// give it some time to drain
+assert.soon(function() {
+ var result = admin.runCommand({removeShard: conn.host});
+ printjson(result);
- return result.ok && result.state == "completed";
- }, "failed to drain shard completely", 5 * 60 * 1000);
+ return result.ok && result.state == "completed";
+}, "failed to drain shard completely", 5 * 60 * 1000);
- assert.eq(1, st.config.shards.count(), "removed server still appears in count");
+assert.eq(1, st.config.shards.count(), "removed server still appears in count");
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/auth_no_config_primary.js b/jstests/sharding/auth_no_config_primary.js
index cb71ca0ef74..57d6f2109b0 100644
--- a/jstests/sharding/auth_no_config_primary.js
+++ b/jstests/sharding/auth_no_config_primary.js
@@ -12,47 +12,47 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
TestData.skipCheckDBHashes = true;
(function() {
- 'use strict';
+'use strict';
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest(
- {shards: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st =
+ new ShardingTest({shards: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
- st.s.getDB('admin').createUser({user: 'root', pwd: 'pass', roles: ['root']});
- st.s.getDB('admin').auth('root', 'pass');
- var testDB = st.s.getDB('test');
- testDB.user.insert({hello: 'world'});
+st.s.getDB('admin').createUser({user: 'root', pwd: 'pass', roles: ['root']});
+st.s.getDB('admin').auth('root', 'pass');
+var testDB = st.s.getDB('test');
+testDB.user.insert({hello: 'world'});
- // Kill all secondaries, forcing the current primary to step down.
- st.configRS.getSecondaries().forEach(function(secondaryConn) {
- MongoRunner.stopMongod(secondaryConn);
- });
+// Kill all secondaries, forcing the current primary to step down.
+st.configRS.getSecondaries().forEach(function(secondaryConn) {
+ MongoRunner.stopMongod(secondaryConn);
+});
- // Test authenticate through a fresh connection.
- var newConn = new Mongo(st.s.host);
+// Test authenticate through a fresh connection.
+var newConn = new Mongo(st.s.host);
- assert.commandFailedWithCode(newConn.getDB('test').runCommand({find: 'user'}),
- ErrorCodes.Unauthorized);
+assert.commandFailedWithCode(newConn.getDB('test').runCommand({find: 'user'}),
+ ErrorCodes.Unauthorized);
- newConn.getDB('admin').auth('root', 'pass');
+newConn.getDB('admin').auth('root', 'pass');
- var res = newConn.getDB('test').user.findOne();
- assert.neq(null, res);
- assert.eq('world', res.hello);
+var res = newConn.getDB('test').user.findOne();
+assert.neq(null, res);
+assert.eq('world', res.hello);
- // Test authenticate through new mongos.
- var otherMongos =
- MongoRunner.runMongos({keyFile: "jstests/libs/key1", configdb: st.s.savedOptions.configdb});
+// Test authenticate through new mongos.
+var otherMongos =
+ MongoRunner.runMongos({keyFile: "jstests/libs/key1", configdb: st.s.savedOptions.configdb});
- assert.commandFailedWithCode(otherMongos.getDB('test').runCommand({find: 'user'}),
- ErrorCodes.Unauthorized);
+assert.commandFailedWithCode(otherMongos.getDB('test').runCommand({find: 'user'}),
+ ErrorCodes.Unauthorized);
- otherMongos.getDB('admin').auth('root', 'pass');
+otherMongos.getDB('admin').auth('root', 'pass');
- var res = otherMongos.getDB('test').user.findOne();
- assert.neq(null, res);
- assert.eq('world', res.hello);
+var res = otherMongos.getDB('test').user.findOne();
+assert.neq(null, res);
+assert.eq('world', res.hello);
- st.stop();
- MongoRunner.stopMongos(otherMongos);
+st.stop();
+MongoRunner.stopMongos(otherMongos);
})();
diff --git a/jstests/sharding/auth_sharding_cmd_metadata.js b/jstests/sharding/auth_sharding_cmd_metadata.js
index 352c31d199c..d4474a26da7 100644
--- a/jstests/sharding/auth_sharding_cmd_metadata.js
+++ b/jstests/sharding/auth_sharding_cmd_metadata.js
@@ -3,45 +3,44 @@
*/
(function() {
- "use strict";
+"use strict";
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest(
- {shards: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st =
+ new ShardingTest({shards: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
- var adminUser = {db: "admin", username: "foo", password: "bar"};
+var adminUser = {db: "admin", username: "foo", password: "bar"};
- st.s.getDB(adminUser.db).createUser({user: 'foo', pwd: 'bar', roles: jsTest.adminUserRoles});
+st.s.getDB(adminUser.db).createUser({user: 'foo', pwd: 'bar', roles: jsTest.adminUserRoles});
- st.s.getDB('admin').auth('foo', 'bar');
+st.s.getDB('admin').auth('foo', 'bar');
- st.adminCommand({enableSharding: 'test'});
- st.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+st.adminCommand({enableSharding: 'test'});
+st.adminCommand({shardCollection: 'test.user', key: {x: 1}});
- st.d0.getDB('admin').createUser({user: 'user', pwd: 'pwd', roles: jsTest.adminUserRoles});
- st.d0.getDB('admin').auth('user', 'pwd');
+st.d0.getDB('admin').createUser({user: 'user', pwd: 'pwd', roles: jsTest.adminUserRoles});
+st.d0.getDB('admin').auth('user', 'pwd');
- var maxSecs = Math.pow(2, 32) - 1;
- var metadata = {$configServerState: {opTime: {ts: Timestamp(maxSecs, 0), t: maxSecs}}};
- var res = st.d0.getDB('test').runCommandWithMetadata({ping: 1}, metadata);
+var maxSecs = Math.pow(2, 32) - 1;
+var metadata = {$configServerState: {opTime: {ts: Timestamp(maxSecs, 0), t: maxSecs}}};
+var res = st.d0.getDB('test').runCommandWithMetadata({ping: 1}, metadata);
- assert.commandFailedWithCode(res.commandReply, ErrorCodes.Unauthorized);
+assert.commandFailedWithCode(res.commandReply, ErrorCodes.Unauthorized);
- // Make sure that the config server optime did not advance.
- var status = st.d0.getDB('test').runCommand({serverStatus: 1});
- assert.neq(null, status.sharding);
- assert.lt(status.sharding.lastSeenConfigServerOpTime.t, maxSecs);
+// Make sure that the config server optime did not advance.
+var status = st.d0.getDB('test').runCommand({serverStatus: 1});
+assert.neq(null, status.sharding);
+assert.lt(status.sharding.lastSeenConfigServerOpTime.t, maxSecs);
- st.d0.getDB('admin').createUser({user: 'internal', pwd: 'pwd', roles: ['__system']});
- st.d0.getDB('admin').auth('internal', 'pwd');
+st.d0.getDB('admin').createUser({user: 'internal', pwd: 'pwd', roles: ['__system']});
+st.d0.getDB('admin').auth('internal', 'pwd');
- res = st.d0.getDB('test').runCommandWithMetadata({ping: 1}, metadata);
- assert.commandWorked(res.commandReply);
+res = st.d0.getDB('test').runCommandWithMetadata({ping: 1}, metadata);
+assert.commandWorked(res.commandReply);
- status = st.d0.getDB('test').runCommand({serverStatus: 1});
- assert.neq(null, status.sharding);
- assert.eq(status.sharding.lastSeenConfigServerOpTime.t, maxSecs);
-
- st.stop();
+status = st.d0.getDB('test').runCommand({serverStatus: 1});
+assert.neq(null, status.sharding);
+assert.eq(status.sharding.lastSeenConfigServerOpTime.t, maxSecs);
+st.stop();
})();
diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js
index 51810f76d3d..9aa9bc8db84 100644
--- a/jstests/sharding/auth_slaveok_routing.js
+++ b/jstests/sharding/auth_slaveok_routing.js
@@ -10,111 +10,111 @@
* @tags: [requires_persistence, requires_find_command]
*/
(function() {
- 'use strict';
- load("jstests/replsets/rslib.js");
-
- // Replica set nodes started with --shardsvr do not enable key generation until they are added
- // to a sharded cluster and reject commands with gossiped clusterTime from users without the
- // advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
- // briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
- // gossip that time later in setup.
- //
- // TODO SERVER-32672: remove this flag.
- TestData.skipGossipingClusterTime = true;
-
- /**
- * Checks if a query to the given collection will be routed to the secondary. Returns true if
- * query was routed to a secondary node.
- */
- function doesRouteToSec(coll, query) {
- var explain = coll.find(query).explain();
- assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
- var serverInfo = explain.queryPlanner.winningPlan.shards[0].serverInfo;
- var conn = new Mongo(serverInfo.host + ":" + serverInfo.port.toString());
- var cmdRes = conn.getDB('admin').runCommand({isMaster: 1});
-
- jsTest.log('isMaster: ' + tojson(cmdRes));
-
- return cmdRes.secondary;
- }
-
- var rsOpts = {oplogSize: 50};
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest(
- {shards: 1, rs: rsOpts, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
-
- var mongos = st.s;
- var replTest = st.rs0;
- var testDB = mongos.getDB('AAAAA');
- var coll = testDB.user;
- var nodeCount = replTest.nodes.length;
-
- /* Add an admin user to the replica member to simulate connecting from
- * remote location. This is because mongod allows unautheticated
- * connections to access the server from localhost connections if there
- * is no admin user.
- */
- var adminDB = mongos.getDB('admin');
- adminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles});
- adminDB.auth('user', 'password');
- var priAdminDB = replTest.getPrimary().getDB('admin');
- replTest.getPrimary().waitForClusterTime(60);
- priAdminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: 30000});
-
- coll.drop();
- coll.setSlaveOk(true);
-
- /* Secondaries should be up here, but they can still be in RECOVERY
- * state, which will make the ReplicaSetMonitor mark them as
- * ok = false and not eligible for slaveOk queries.
- */
- awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
-
- var bulk = coll.initializeUnorderedBulkOp();
- for (var x = 0; x < 20; x++) {
- bulk.insert({v: x, k: 10});
- }
- assert.writeOK(bulk.execute({w: nodeCount}));
-
- /* Although mongos never caches query results, try to do a different query
- * everytime just to be sure.
- */
- var vToFind = 0;
-
- jsTest.log('First query to SEC');
- assert(doesRouteToSec(coll, {v: vToFind++}));
-
- var SIG_TERM = 15;
- replTest.stopSet(SIG_TERM, true, {auth: {user: 'user', pwd: 'password'}});
-
- for (var n = 0; n < nodeCount; n++) {
- replTest.restart(n, rsOpts);
- }
-
- replTest.awaitSecondaryNodes();
-
- coll.setSlaveOk(true);
-
- /* replSetMonitor does not refresh the nodes information when getting secondaries.
- * A node that is previously labeled as secondary can now be a primary, so we
- * wait for the replSetMonitorWatcher thread to refresh the nodes information.
- */
- awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
- //
- // We also need to wait for the primary, it's possible that the mongos may think a node is a
- // secondary but it actually changed to a primary before we send our final query.
- //
- awaitRSClientHosts(mongos, replTest.getPrimary(), {ok: true, ismaster: true});
-
- // Recheck if we can still query secondaries after refreshing connections.
- jsTest.log('Final query to SEC');
- assert(doesRouteToSec(coll, {v: vToFind++}));
-
- // Cleanup auth so Windows will be able to shutdown gracefully
- priAdminDB = replTest.getPrimary().getDB('admin');
- priAdminDB.auth('user', 'password');
- priAdminDB.dropUser('user');
-
- st.stop();
+'use strict';
+load("jstests/replsets/rslib.js");
+
+// Replica set nodes started with --shardsvr do not enable key generation until they are added
+// to a sharded cluster and reject commands with gossiped clusterTime from users without the
+// advanceClusterTime privilege. This causes ShardingTest setup to fail because the shell
+// briefly authenticates as __system and recieves clusterTime metadata then will fail trying to
+// gossip that time later in setup.
+//
+// TODO SERVER-32672: remove this flag.
+TestData.skipGossipingClusterTime = true;
+
+/**
+ * Checks if a query to the given collection will be routed to the secondary. Returns true if
+ * query was routed to a secondary node.
+ */
+function doesRouteToSec(coll, query) {
+ var explain = coll.find(query).explain();
+ assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
+ var serverInfo = explain.queryPlanner.winningPlan.shards[0].serverInfo;
+ var conn = new Mongo(serverInfo.host + ":" + serverInfo.port.toString());
+ var cmdRes = conn.getDB('admin').runCommand({isMaster: 1});
+
+ jsTest.log('isMaster: ' + tojson(cmdRes));
+
+ return cmdRes.secondary;
+}
+
+var rsOpts = {oplogSize: 50};
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest(
+ {shards: 1, rs: rsOpts, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
+
+var mongos = st.s;
+var replTest = st.rs0;
+var testDB = mongos.getDB('AAAAA');
+var coll = testDB.user;
+var nodeCount = replTest.nodes.length;
+
+/* Add an admin user to the replica member to simulate connecting from
+ * remote location. This is because mongod allows unautheticated
+ * connections to access the server from localhost connections if there
+ * is no admin user.
+ */
+var adminDB = mongos.getDB('admin');
+adminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles});
+adminDB.auth('user', 'password');
+var priAdminDB = replTest.getPrimary().getDB('admin');
+replTest.getPrimary().waitForClusterTime(60);
+priAdminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: 30000});
+
+coll.drop();
+coll.setSlaveOk(true);
+
+/* Secondaries should be up here, but they can still be in RECOVERY
+ * state, which will make the ReplicaSetMonitor mark them as
+ * ok = false and not eligible for slaveOk queries.
+ */
+awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
+
+var bulk = coll.initializeUnorderedBulkOp();
+for (var x = 0; x < 20; x++) {
+ bulk.insert({v: x, k: 10});
+}
+assert.writeOK(bulk.execute({w: nodeCount}));
+
+/* Although mongos never caches query results, try to do a different query
+ * everytime just to be sure.
+ */
+var vToFind = 0;
+
+jsTest.log('First query to SEC');
+assert(doesRouteToSec(coll, {v: vToFind++}));
+
+var SIG_TERM = 15;
+replTest.stopSet(SIG_TERM, true, {auth: {user: 'user', pwd: 'password'}});
+
+for (var n = 0; n < nodeCount; n++) {
+ replTest.restart(n, rsOpts);
+}
+
+replTest.awaitSecondaryNodes();
+
+coll.setSlaveOk(true);
+
+/* replSetMonitor does not refresh the nodes information when getting secondaries.
+ * A node that is previously labeled as secondary can now be a primary, so we
+ * wait for the replSetMonitorWatcher thread to refresh the nodes information.
+ */
+awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
+//
+// We also need to wait for the primary, it's possible that the mongos may think a node is a
+// secondary but it actually changed to a primary before we send our final query.
+//
+awaitRSClientHosts(mongos, replTest.getPrimary(), {ok: true, ismaster: true});
+
+// Recheck if we can still query secondaries after refreshing connections.
+jsTest.log('Final query to SEC');
+assert(doesRouteToSec(coll, {v: vToFind++}));
+
+// Cleanup auth so Windows will be able to shutdown gracefully
+priAdminDB = replTest.getPrimary().getDB('admin');
+priAdminDB.auth('user', 'password');
+priAdminDB.dropUser('user');
+
+st.stop();
})();
diff --git a/jstests/sharding/authmr.js b/jstests/sharding/authmr.js
index 0f444f6208d..0d1fb713c97 100644
--- a/jstests/sharding/authmr.js
+++ b/jstests/sharding/authmr.js
@@ -3,115 +3,114 @@
(function() {
- //
- // User document declarations. All users in this test are added to the admin database.
- //
-
- var adminUser = {
- user: "admin",
- pwd: "a",
- roles:
- ["readWriteAnyDatabase", "dbAdminAnyDatabase", "userAdminAnyDatabase", "clusterAdmin"]
- };
-
- var test1User = {
- user: "test",
- pwd: "a",
- roles: [{role: 'readWrite', db: 'test1', hasRole: true, canDelegate: false}]
- };
-
- function assertRemove(collection, pattern) {
- assert.writeOK(collection.remove(pattern));
- }
-
- function assertInsert(collection, obj) {
- assert.writeOK(collection.insert(obj));
+//
+// User document declarations. All users in this test are added to the admin database.
+//
+
+var adminUser = {
+ user: "admin",
+ pwd: "a",
+ roles: ["readWriteAnyDatabase", "dbAdminAnyDatabase", "userAdminAnyDatabase", "clusterAdmin"]
+};
+
+var test1User = {
+ user: "test",
+ pwd: "a",
+ roles: [{role: 'readWrite', db: 'test1', hasRole: true, canDelegate: false}]
+};
+
+function assertRemove(collection, pattern) {
+ assert.writeOK(collection.remove(pattern));
+}
+
+function assertInsert(collection, obj) {
+ assert.writeOK(collection.insert(obj));
+}
+
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var cluster = new ShardingTest({
+ name: "authmr",
+ shards: 1,
+ mongos: 1,
+ other: {keyFile: "jstests/libs/key1", shardAsReplicaSet: false}
+});
+
+// Set up the test data.
+(function() {
+var adminDB = cluster.getDB('admin');
+var test1DB = adminDB.getSiblingDB('test1');
+var test2DB = adminDB.getSiblingDB('test2');
+var ex;
+try {
+ adminDB.createUser(adminUser);
+ assert(adminDB.auth(adminUser.user, adminUser.pwd));
+
+ adminDB.dropUser(test1User.user);
+ adminDB.createUser(test1User);
+
+ assertInsert(test1DB.foo, {a: 1});
+ assertInsert(test1DB.foo, {a: 2});
+ assertInsert(test1DB.foo, {a: 3});
+ assertInsert(test1DB.foo, {a: 4});
+ assertInsert(test2DB.foo, {x: 1});
+} finally {
+ adminDB.logout();
+}
+}());
+
+assert.throws(function() {
+ var adminDB = cluster.getDB('admin');
+ var test1DB;
+ var test2DB;
+ assert(adminDB.auth(test1User.user, test1User.pwd));
+ try {
+ test1DB = adminDB.getSiblingDB("test1");
+ test2DB = adminDB.getSiblingDB("test2");
+
+ // Sanity check. test1User can count (read) test1, but not test2.
+ assert.eq(test1DB.foo.count(), 4);
+ assert.throws(test2DB.foo.count);
+
+ test1DB.foo.mapReduce(
+ function() {
+ emit(0, this.a);
+ var t2 = new Mongo().getDB("test2");
+ t2.ad.insert(this);
+ },
+ function(k, vs) {
+ var t2 = new Mongo().getDB("test2");
+ t2.reductio.insert(this);
+
+ return Array.sum(vs);
+ },
+ {
+ out: "bar",
+ finalize: function(k, v) {
+ for (k in this) {
+ if (this.hasOwnProperty(k))
+ print(k + "=" + v);
+ }
+ var t2 = new Mongo().getDB("test2");
+ t2.absurdum.insert({key: k, value: v});
+ }
+ });
+ } finally {
+ adminDB.logout();
}
+});
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var cluster = new ShardingTest({
- name: "authmr",
- shards: 1,
- mongos: 1,
- other: {keyFile: "jstests/libs/key1", shardAsReplicaSet: false}
- });
-
- // Set up the test data.
- (function() {
- var adminDB = cluster.getDB('admin');
- var test1DB = adminDB.getSiblingDB('test1');
- var test2DB = adminDB.getSiblingDB('test2');
- var ex;
- try {
- adminDB.createUser(adminUser);
- assert(adminDB.auth(adminUser.user, adminUser.pwd));
-
- adminDB.dropUser(test1User.user);
- adminDB.createUser(test1User);
-
- assertInsert(test1DB.foo, {a: 1});
- assertInsert(test1DB.foo, {a: 2});
- assertInsert(test1DB.foo, {a: 3});
- assertInsert(test1DB.foo, {a: 4});
- assertInsert(test2DB.foo, {x: 1});
- } finally {
- adminDB.logout();
- }
- }());
-
- assert.throws(function() {
- var adminDB = cluster.getDB('admin');
- var test1DB;
- var test2DB;
- assert(adminDB.auth(test1User.user, test1User.pwd));
- try {
- test1DB = adminDB.getSiblingDB("test1");
- test2DB = adminDB.getSiblingDB("test2");
-
- // Sanity check. test1User can count (read) test1, but not test2.
- assert.eq(test1DB.foo.count(), 4);
- assert.throws(test2DB.foo.count);
-
- test1DB.foo.mapReduce(
- function() {
- emit(0, this.a);
- var t2 = new Mongo().getDB("test2");
- t2.ad.insert(this);
- },
- function(k, vs) {
- var t2 = new Mongo().getDB("test2");
- t2.reductio.insert(this);
-
- return Array.sum(vs);
- },
- {
- out: "bar",
- finalize: function(k, v) {
- for (k in this) {
- if (this.hasOwnProperty(k))
- print(k + "=" + v);
- }
- var t2 = new Mongo().getDB("test2");
- t2.absurdum.insert({key: k, value: v});
- }
- });
- } finally {
- adminDB.logout();
- }
- });
-
- (function() {
- var adminDB = cluster.getDB('admin');
- assert(adminDB.auth(adminUser.user, adminUser.pwd));
- try {
- var test2DB = cluster.getDB('test2');
- assert.eq(test2DB.reductio.count(), 0, "reductio");
- assert.eq(test2DB.ad.count(), 0, "ad");
- assert.eq(test2DB.absurdum.count(), 0, "absurdum");
- } finally {
- adminDB.logout();
- }
- }());
-
- cluster.stop();
+(function() {
+var adminDB = cluster.getDB('admin');
+assert(adminDB.auth(adminUser.user, adminUser.pwd));
+try {
+ var test2DB = cluster.getDB('test2');
+ assert.eq(test2DB.reductio.count(), 0, "reductio");
+ assert.eq(test2DB.ad.count(), 0, "ad");
+ assert.eq(test2DB.absurdum.count(), 0, "absurdum");
+} finally {
+ adminDB.logout();
+}
+}());
+
+cluster.stop();
})();
diff --git a/jstests/sharding/authwhere.js b/jstests/sharding/authwhere.js
index 95e0b0d7b45..3d60fb2ccca 100644
--- a/jstests/sharding/authwhere.js
+++ b/jstests/sharding/authwhere.js
@@ -3,88 +3,87 @@
(function() {
- //
- // User document declarations. All users in this test are added to the admin database.
- //
+//
+// User document declarations. All users in this test are added to the admin database.
+//
- var adminUser = {
- user: "admin",
- pwd: "a",
- roles:
- ["readWriteAnyDatabase", "dbAdminAnyDatabase", "userAdminAnyDatabase", "clusterAdmin"]
- };
+var adminUser = {
+ user: "admin",
+ pwd: "a",
+ roles: ["readWriteAnyDatabase", "dbAdminAnyDatabase", "userAdminAnyDatabase", "clusterAdmin"]
+};
- var test1Reader = {
- user: "test",
- pwd: "a",
- roles: [{role: 'read', db: 'test1', hasRole: true, canDelegate: false}]
- };
+var test1Reader = {
+ user: "test",
+ pwd: "a",
+ roles: [{role: 'read', db: 'test1', hasRole: true, canDelegate: false}]
+};
- function assertRemove(collection, pattern) {
- assert.writeOK(collection.remove(pattern));
- }
+function assertRemove(collection, pattern) {
+ assert.writeOK(collection.remove(pattern));
+}
- function assertInsert(collection, obj) {
- assert.writeOK(collection.insert(obj));
- }
+function assertInsert(collection, obj) {
+ assert.writeOK(collection.insert(obj));
+}
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var cluster = new ShardingTest({
- name: "authwhere",
- shards: 1,
- mongos: 1,
- other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
- });
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var cluster = new ShardingTest({
+ name: "authwhere",
+ shards: 1,
+ mongos: 1,
+ other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
+});
- // Set up the test data.
- (function() {
- var adminDB = cluster.getDB('admin');
- var test1DB = adminDB.getSiblingDB('test1');
- var test2DB = adminDB.getSiblingDB('test2');
- var ex;
- try {
- adminDB.createUser(adminUser);
- assert(adminDB.auth(adminUser.user, adminUser.pwd));
+// Set up the test data.
+(function() {
+var adminDB = cluster.getDB('admin');
+var test1DB = adminDB.getSiblingDB('test1');
+var test2DB = adminDB.getSiblingDB('test2');
+var ex;
+try {
+ adminDB.createUser(adminUser);
+ assert(adminDB.auth(adminUser.user, adminUser.pwd));
- adminDB.dropUser(test1Reader.user);
- adminDB.createUser(test1Reader);
+ adminDB.dropUser(test1Reader.user);
+ adminDB.createUser(test1Reader);
- assertInsert(test1DB.foo, {a: 1});
- assertInsert(test2DB.foo, {x: 1});
- } finally {
- adminDB.logout();
- }
- }());
+ assertInsert(test1DB.foo, {a: 1});
+ assertInsert(test2DB.foo, {x: 1});
+} finally {
+ adminDB.logout();
+}
+}());
- (function() {
- var adminDB = cluster.getDB('admin');
- var test1DB;
- var test2DB;
- assert(adminDB.auth(test1Reader.user, test1Reader.pwd));
- try {
- test1DB = adminDB.getSiblingDB("test1");
- test2DB = adminDB.getSiblingDB("test2");
+(function() {
+var adminDB = cluster.getDB('admin');
+var test1DB;
+var test2DB;
+assert(adminDB.auth(test1Reader.user, test1Reader.pwd));
+try {
+ test1DB = adminDB.getSiblingDB("test1");
+ test2DB = adminDB.getSiblingDB("test2");
- // Sanity check. test1Reader can count (read) test1, but not test2.
- assert.eq(test1DB.foo.count(), 1);
- assert.throws(function() {
- test2DB.foo.count();
- });
+ // Sanity check. test1Reader can count (read) test1, but not test2.
+ assert.eq(test1DB.foo.count(), 1);
+ assert.throws(function() {
+ test2DB.foo.count();
+ });
- // Cannot examine second database from a where clause.
- assert.throws(function() {
- test1DB.foo.count("db.getSiblingDB('test2').foo.count() == 1");
- });
+ // Cannot examine second database from a where clause.
+ assert.throws(function() {
+ test1DB.foo.count("db.getSiblingDB('test2').foo.count() == 1");
+ });
- // Cannot write test1 via tricky where clause.
- assert.throws(function() {
- test1DB.foo.count("db.foo.insert({b: 1})");
- });
- assert.eq(test1DB.foo.count(), 1);
- } finally {
- adminDB.logout();
- }
- }());
+ // Cannot write test1 via tricky where clause.
+ assert.throws(function() {
+ test1DB.foo.count("db.foo.insert({b: 1})");
+ });
+ assert.eq(test1DB.foo.count(), 1);
+} finally {
+ adminDB.logout();
+}
+}());
- cluster.stop();
+cluster.stop();
})();
diff --git a/jstests/sharding/auto_rebalance_parallel.js b/jstests/sharding/auto_rebalance_parallel.js
index bb86c1fb9f1..ef6af0d57c5 100644
--- a/jstests/sharding/auto_rebalance_parallel.js
+++ b/jstests/sharding/auto_rebalance_parallel.js
@@ -3,69 +3,69 @@
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 4});
- var config = st.s0.getDB('config');
+var st = new ShardingTest({shards: 4});
+var config = st.s0.getDB('config');
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- function prepareCollectionForBalance(collName) {
- assert.commandWorked(st.s0.adminCommand({shardCollection: collName, key: {Key: 1}}));
+function prepareCollectionForBalance(collName) {
+ assert.commandWorked(st.s0.adminCommand({shardCollection: collName, key: {Key: 1}}));
- var coll = st.s0.getCollection(collName);
+ var coll = st.s0.getCollection(collName);
- // Create 4 chunks initially and ensure they get balanced within 1 balancer round
- assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
- assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
- assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
- assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
+ // Create 4 chunks initially and ensure they get balanced within 1 balancer round
+ assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
+ assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
+ assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
+ assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
- assert.commandWorked(st.splitAt(collName, {Key: 10}));
- assert.commandWorked(st.splitAt(collName, {Key: 20}));
- assert.commandWorked(st.splitAt(collName, {Key: 30}));
+ assert.commandWorked(st.splitAt(collName, {Key: 10}));
+ assert.commandWorked(st.splitAt(collName, {Key: 20}));
+ assert.commandWorked(st.splitAt(collName, {Key: 30}));
- // Move two of the chunks to st.shard1.shardName so we have option to do parallel balancing
- assert.commandWorked(st.moveChunk(collName, {Key: 20}, st.shard1.shardName));
- assert.commandWorked(st.moveChunk(collName, {Key: 30}, st.shard1.shardName));
+ // Move two of the chunks to st.shard1.shardName so we have option to do parallel balancing
+ assert.commandWorked(st.moveChunk(collName, {Key: 20}, st.shard1.shardName));
+ assert.commandWorked(st.moveChunk(collName, {Key: 30}, st.shard1.shardName));
- assert.eq(2, config.chunks.find({ns: collName, shard: st.shard0.shardName}).itcount());
- assert.eq(2, config.chunks.find({ns: collName, shard: st.shard1.shardName}).itcount());
- }
+ assert.eq(2, config.chunks.find({ns: collName, shard: st.shard0.shardName}).itcount());
+ assert.eq(2, config.chunks.find({ns: collName, shard: st.shard1.shardName}).itcount());
+}
- function checkCollectionBalanced(collName) {
- assert.eq(1, config.chunks.find({ns: collName, shard: st.shard0.shardName}).itcount());
- assert.eq(1, config.chunks.find({ns: collName, shard: st.shard1.shardName}).itcount());
- assert.eq(1, config.chunks.find({ns: collName, shard: st.shard2.shardName}).itcount());
- assert.eq(1, config.chunks.find({ns: collName, shard: st.shard3.shardName}).itcount());
- }
+function checkCollectionBalanced(collName) {
+ assert.eq(1, config.chunks.find({ns: collName, shard: st.shard0.shardName}).itcount());
+ assert.eq(1, config.chunks.find({ns: collName, shard: st.shard1.shardName}).itcount());
+ assert.eq(1, config.chunks.find({ns: collName, shard: st.shard2.shardName}).itcount());
+ assert.eq(1, config.chunks.find({ns: collName, shard: st.shard3.shardName}).itcount());
+}
- function countMoves(collName) {
- return config.changelog.find({what: 'moveChunk.start', ns: collName}).itcount();
- }
+function countMoves(collName) {
+ return config.changelog.find({what: 'moveChunk.start', ns: collName}).itcount();
+}
- prepareCollectionForBalance('TestDB.TestColl1');
- prepareCollectionForBalance('TestDB.TestColl2');
+prepareCollectionForBalance('TestDB.TestColl1');
+prepareCollectionForBalance('TestDB.TestColl2');
- // Count the moveChunk start attempts accurately and ensure that only the correct number of
- // migrations are scheduled
- const testColl1InitialMoves = countMoves('TestDB.TestColl1');
- const testColl2InitialMoves = countMoves('TestDB.TestColl2');
+// Count the moveChunk start attempts accurately and ensure that only the correct number of
+// migrations are scheduled
+const testColl1InitialMoves = countMoves('TestDB.TestColl1');
+const testColl2InitialMoves = countMoves('TestDB.TestColl2');
- st.startBalancer();
- st.waitForBalancer(true, 60000);
- st.waitForBalancer(true, 60000);
- st.stopBalancer();
+st.startBalancer();
+st.waitForBalancer(true, 60000);
+st.waitForBalancer(true, 60000);
+st.stopBalancer();
- checkCollectionBalanced('TestDB.TestColl1');
- checkCollectionBalanced('TestDB.TestColl2');
+checkCollectionBalanced('TestDB.TestColl1');
+checkCollectionBalanced('TestDB.TestColl2');
- assert.eq(2, countMoves('TestDB.TestColl1') - testColl1InitialMoves);
- assert.eq(2, countMoves('TestDB.TestColl2') - testColl2InitialMoves);
+assert.eq(2, countMoves('TestDB.TestColl1') - testColl1InitialMoves);
+assert.eq(2, countMoves('TestDB.TestColl2') - testColl2InitialMoves);
- // Ensure there are no migration errors reported
- assert.eq(0, config.changelog.find({what: 'moveChunk.error'}).itcount());
+// Ensure there are no migration errors reported
+assert.eq(0, config.changelog.find({what: 'moveChunk.error'}).itcount());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/auto_rebalance_parallel_replica_sets.js b/jstests/sharding/auto_rebalance_parallel_replica_sets.js
index 35c9132b061..0be9549f3cd 100644
--- a/jstests/sharding/auto_rebalance_parallel_replica_sets.js
+++ b/jstests/sharding/auto_rebalance_parallel_replica_sets.js
@@ -2,66 +2,66 @@
* Tests that the cluster is balanced in parallel in one balancer round (replica sets).
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 4, rs: {nodes: 3}});
+var st = new ShardingTest({shards: 4, rs: {nodes: 3}});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
- var coll = st.s0.getDB('TestDB').TestColl;
+var coll = st.s0.getDB('TestDB').TestColl;
- // Create 4 chunks initially and ensure they get balanced within 1 balancer round
- assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
- assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
- assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
- assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
+// Create 4 chunks initially and ensure they get balanced within 1 balancer round
+assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
+assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
+assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
+assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
- assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 10}));
- assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 20}));
- assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 30}));
+assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 10}));
+assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 20}));
+assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 30}));
- // Move two of the chunks to st.shard1.shardName so we have option to do parallel balancing
- assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 20}, st.shard1.shardName));
- assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 30}, st.shard1.shardName));
+// Move two of the chunks to st.shard1.shardName so we have option to do parallel balancing
+assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 20}, st.shard1.shardName));
+assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 30}, st.shard1.shardName));
- assert.eq(2,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
- .itcount());
- assert.eq(2,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
- .itcount());
+assert.eq(2,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
+ .itcount());
+assert.eq(2,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
+ .itcount());
- // Do enable the balancer and wait for a single balancer round
- st.startBalancer();
- st.waitForBalancer(true, 60000);
- st.stopBalancer();
+// Do enable the balancer and wait for a single balancer round
+st.startBalancer();
+st.waitForBalancer(true, 60000);
+st.stopBalancer();
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
- .itcount());
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
- .itcount());
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard2.shardName})
- .itcount());
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard3.shardName})
- .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
+ .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
+ .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard2.shardName})
+ .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard3.shardName})
+ .itcount());
- // Ensure the range deleter quiesces
- st.rs0.awaitReplication();
- st.rs1.awaitReplication();
- st.rs2.awaitReplication();
- st.rs3.awaitReplication();
+// Ensure the range deleter quiesces
+st.rs0.awaitReplication();
+st.rs1.awaitReplication();
+st.rs2.awaitReplication();
+st.rs3.awaitReplication();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/autodiscover_config_rs_from_secondary.js b/jstests/sharding/autodiscover_config_rs_from_secondary.js
index c439f5347d3..390d9bb7aa6 100644
--- a/jstests/sharding/autodiscover_config_rs_from_secondary.js
+++ b/jstests/sharding/autodiscover_config_rs_from_secondary.js
@@ -4,59 +4,59 @@
load('jstests/libs/feature_compatibility_version.js');
(function() {
- 'use strict';
-
- var rst = new ReplSetTest(
- {name: "configRS", nodes: 3, nodeOptions: {configsvr: "", storageEngine: "wiredTiger"}});
- rst.startSet();
- var conf = rst.getReplSetConfig();
- conf.members[1].priority = 0;
- conf.members[2].priority = 0;
- conf.writeConcernMajorityJournalDefault = true;
- rst.initiate(conf);
-
- // Config servers always start at the latest available FCV for the binary. This poses a problem
- // when this test is run in the mixed version suite because mongos will be 'last-stable' and if
- // this node is of the latest binary, it will report itself as the 'latest' FCV, which would
- // cause mongos to refuse to connect to it and shutdown.
- //
- // In order to work around this, in the mixed version suite, be pessimistic and always set this
- // node to the 'last-stable' FCV
- if (jsTestOptions().shardMixedBinVersions) {
- assert.commandWorked(
- rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- rst.awaitReplication();
- }
-
- var seedList = rst.name + "/" + rst.nodes[1].host; // node 1 is guaranteed to not be primary
- {
- // Ensure that mongos can start up when given the CSRS secondary, discover the primary, and
- // perform writes to the config servers.
- var mongos = MongoRunner.runMongos({configdb: seedList});
- var admin = mongos.getDB('admin');
- assert.writeOK(admin.foo.insert({a: 1}));
- assert.eq(1, admin.foo.findOne().a);
- MongoRunner.stopMongos(mongos);
- }
-
- // Wait for replication to all config server replica set members to ensure that mongos
- // will be able to do majority reads when trying to verify if the initial cluster metadata
- // has been properly written.
- rst.awaitLastOpCommitted();
- // Now take down the one electable node
- rst.stop(0);
- rst.awaitNoPrimary();
-
- // Start a mongos when there is no primary
+'use strict';
+
+var rst = new ReplSetTest(
+ {name: "configRS", nodes: 3, nodeOptions: {configsvr: "", storageEngine: "wiredTiger"}});
+rst.startSet();
+var conf = rst.getReplSetConfig();
+conf.members[1].priority = 0;
+conf.members[2].priority = 0;
+conf.writeConcernMajorityJournalDefault = true;
+rst.initiate(conf);
+
+// Config servers always start at the latest available FCV for the binary. This poses a problem
+// when this test is run in the mixed version suite because mongos will be 'last-stable' and if
+// this node is of the latest binary, it will report itself as the 'latest' FCV, which would
+// cause mongos to refuse to connect to it and shutdown.
+//
+// In order to work around this, in the mixed version suite, be pessimistic and always set this
+// node to the 'last-stable' FCV
+if (jsTestOptions().shardMixedBinVersions) {
+ assert.commandWorked(
+ rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+ rst.awaitReplication();
+}
+
+var seedList = rst.name + "/" + rst.nodes[1].host; // node 1 is guaranteed to not be primary
+{
+ // Ensure that mongos can start up when given the CSRS secondary, discover the primary, and
+ // perform writes to the config servers.
var mongos = MongoRunner.runMongos({configdb: seedList});
- // Take down the one node the mongos knew about to ensure that it autodiscovered the one
- // remaining
- // config server
- rst.stop(1);
-
var admin = mongos.getDB('admin');
- mongos.setSlaveOk(true);
+ assert.writeOK(admin.foo.insert({a: 1}));
assert.eq(1, admin.foo.findOne().a);
MongoRunner.stopMongos(mongos);
- rst.stopSet();
+}
+
+// Wait for replication to all config server replica set members to ensure that mongos
+// will be able to do majority reads when trying to verify if the initial cluster metadata
+// has been properly written.
+rst.awaitLastOpCommitted();
+// Now take down the one electable node
+rst.stop(0);
+rst.awaitNoPrimary();
+
+// Start a mongos when there is no primary
+var mongos = MongoRunner.runMongos({configdb: seedList});
+// Take down the one node the mongos knew about to ensure that it autodiscovered the one
+// remaining
+// config server
+rst.stop(1);
+
+var admin = mongos.getDB('admin');
+mongos.setSlaveOk(true);
+assert.eq(1, admin.foo.findOne().a);
+MongoRunner.stopMongos(mongos);
+rst.stopSet();
})();
diff --git a/jstests/sharding/autosplit.js b/jstests/sharding/autosplit.js
index 7ac5047cc71..58dbe7ece1e 100644
--- a/jstests/sharding/autosplit.js
+++ b/jstests/sharding/autosplit.js
@@ -2,78 +2,78 @@
* This test confirms that chunks get split as they grow due to data insertion.
*/
(function() {
- 'use strict';
- load('jstests/sharding/autosplit_include.js');
-
- var s = new ShardingTest({
- name: "auto1",
- shards: 2,
- mongos: 1,
- other: {enableAutoSplit: true, chunkSize: 10},
- });
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
-
- var bigString = "";
- while (bigString.length < 1024 * 50)
- bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
-
- var db = s.getDB("test");
- var primary = s.getPrimaryShard("test").getDB("test");
- var coll = db.foo;
- var counts = [];
-
- var i = 0;
-
- // Inserts numDocs documents into the collection, waits for any ongoing
- // splits to finish, and then prints some information about the
- // collection's chunks
- function insertDocsAndWaitForSplit(numDocs) {
- var bulk = coll.initializeUnorderedBulkOp();
- var curMaxKey = i;
- // Increment the global 'i' variable to keep 'num' unique across all
- // documents
- for (; i < curMaxKey + numDocs; i++) {
- bulk.insert({num: i, s: bigString});
- }
- assert.writeOK(bulk.execute());
-
- waitForOngoingChunkSplits(s);
-
- s.printChunks();
- s.printChangeLog();
+'use strict';
+load('jstests/sharding/autosplit_include.js');
+
+var s = new ShardingTest({
+ name: "auto1",
+ shards: 2,
+ mongos: 1,
+ other: {enableAutoSplit: true, chunkSize: 10},
+});
+
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
+
+var bigString = "";
+while (bigString.length < 1024 * 50)
+ bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
+
+var db = s.getDB("test");
+var primary = s.getPrimaryShard("test").getDB("test");
+var coll = db.foo;
+var counts = [];
+
+var i = 0;
+
+// Inserts numDocs documents into the collection, waits for any ongoing
+// splits to finish, and then prints some information about the
+// collection's chunks
+function insertDocsAndWaitForSplit(numDocs) {
+ var bulk = coll.initializeUnorderedBulkOp();
+ var curMaxKey = i;
+ // Increment the global 'i' variable to keep 'num' unique across all
+ // documents
+ for (; i < curMaxKey + numDocs; i++) {
+ bulk.insert({num: i, s: bigString});
}
+ assert.writeOK(bulk.execute());
- insertDocsAndWaitForSplit(100);
+ waitForOngoingChunkSplits(s);
- counts.push(s.config.chunks.count({"ns": "test.foo"}));
- assert.eq(100, db.foo.find().itcount());
+ s.printChunks();
+ s.printChangeLog();
+}
- print("datasize: " +
- tojson(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"})));
+insertDocsAndWaitForSplit(100);
- insertDocsAndWaitForSplit(100);
- counts.push(s.config.chunks.count({"ns": "test.foo"}));
+counts.push(s.config.chunks.count({"ns": "test.foo"}));
+assert.eq(100, db.foo.find().itcount());
- insertDocsAndWaitForSplit(200);
- counts.push(s.config.chunks.count({"ns": "test.foo"}));
+print("datasize: " +
+ tojson(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"})));
- insertDocsAndWaitForSplit(300);
- counts.push(s.config.chunks.count({"ns": "test.foo"}));
+insertDocsAndWaitForSplit(100);
+counts.push(s.config.chunks.count({"ns": "test.foo"}));
- assert(counts[counts.length - 1] > counts[0], "counts 1 : " + tojson(counts));
- var sorted = counts.slice(0);
- // Sort doesn't sort numbers correctly by default, resulting in fail
- sorted.sort(function(a, b) {
- return a - b;
- });
- assert.eq(counts, sorted, "counts 2 : " + tojson(counts));
+insertDocsAndWaitForSplit(200);
+counts.push(s.config.chunks.count({"ns": "test.foo"}));
- print(counts);
+insertDocsAndWaitForSplit(300);
+counts.push(s.config.chunks.count({"ns": "test.foo"}));
- printjson(db.stats());
+assert(counts[counts.length - 1] > counts[0], "counts 1 : " + tojson(counts));
+var sorted = counts.slice(0);
+// Sort doesn't sort numbers correctly by default, resulting in fail
+sorted.sort(function(a, b) {
+ return a - b;
+});
+assert.eq(counts, sorted, "counts 2 : " + tojson(counts));
- s.stop();
+print(counts);
+
+printjson(db.stats());
+
+s.stop();
})();
diff --git a/jstests/sharding/autosplit_heuristics.js b/jstests/sharding/autosplit_heuristics.js
index 140ac3a1a40..1777a82678a 100644
--- a/jstests/sharding/autosplit_heuristics.js
+++ b/jstests/sharding/autosplit_heuristics.js
@@ -7,86 +7,85 @@
* @tags: [resource_intensive]
*/
(function() {
- 'use strict';
- load('jstests/sharding/autosplit_include.js');
+'use strict';
+load('jstests/sharding/autosplit_include.js');
- var st = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
+var st = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
- // The balancer is by default stopped, thus it will NOT interfere unpredictably with the chunk
- // moves/splits depending on the timing.
+// The balancer is by default stopped, thus it will NOT interfere unpredictably with the chunk
+// moves/splits depending on the timing.
- // Test is not valid for debug build, heuristics get all mangled by debug reload behavior
- var isDebugBuild = st.s0.getDB("admin").serverBuildInfo().debug;
+// Test is not valid for debug build, heuristics get all mangled by debug reload behavior
+var isDebugBuild = st.s0.getDB("admin").serverBuildInfo().debug;
- if (!isDebugBuild) {
- var mongos = st.s0;
- var config = mongos.getDB("config");
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.hashBar");
+if (!isDebugBuild) {
+ var mongos = st.s0;
+ var config = mongos.getDB("config");
+ var admin = mongos.getDB("admin");
+ var coll = mongos.getCollection("foo.hashBar");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- var numChunks = 10;
+ var numChunks = 10;
- // Split off the low and high chunks, to get non-special-case behavior
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: numChunks + 1}}));
+ // Split off the low and high chunks, to get non-special-case behavior
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: numChunks + 1}}));
- // Split all the other chunks, and an extra chunk. We need the extra chunk to compensate for
- // the fact that the chunk differ resets the highest chunk's (i.e. the last-split-chunk's)
- // data count on reload.
- for (var i = 1; i < numChunks + 1; i++) {
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
- }
-
- jsTest.log("Setup collection...");
- st.printShardingStatus(true);
- var pad = (new Array(1024)).join(' ');
- var approxSize = Object.bsonsize({_id: 0.0, pad: pad});
-
- jsTest.log("Starting inserts of approx size: " + approxSize + "...");
-
- var chunkSizeBytes = 1024 * 1024;
-
- // We insert slightly more than the max number of docs per chunk, to test
- // if resetting the chunk size happens during reloads. If the size is
- // reset, we'd expect to split less, since the first split would then
- // disable further splits (statistically, since the decision is randomized).
- // We choose 1.4 since split attempts happen about once every 1/5 chunkSize,
- // and we want to be sure we def get a split attempt at a full chunk.
- var insertsForSplit = Math.ceil((chunkSizeBytes * 1.4) / approxSize);
- var totalInserts = insertsForSplit * numChunks;
-
- printjson({
- chunkSizeBytes: chunkSizeBytes,
- insertsForSplit: insertsForSplit,
- totalInserts: totalInserts
- });
-
- // Insert enough docs to trigger splits into all chunks
- for (var i = 0; i < totalInserts; i++) {
- assert.writeOK(coll.insert({_id: i % numChunks + (i / totalInserts), pad: pad}));
- // Splitting is asynchronous so we should wait after each insert
- // for autosplitting to happen
- waitForOngoingChunkSplits(st);
- }
+ // Split all the other chunks, and an extra chunk. We need the extra chunk to compensate for
+ // the fact that the chunk differ resets the highest chunk's (i.e. the last-split-chunk's)
+ // data count on reload.
+ for (var i = 1; i < numChunks + 1; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
+ }
- jsTest.log("Inserts completed...");
+ jsTest.log("Setup collection...");
+ st.printShardingStatus(true);
+ var pad = (new Array(1024)).join(' ');
+ var approxSize = Object.bsonsize({_id: 0.0, pad: pad});
+
+ jsTest.log("Starting inserts of approx size: " + approxSize + "...");
+
+ var chunkSizeBytes = 1024 * 1024;
+
+ // We insert slightly more than the max number of docs per chunk, to test
+ // if resetting the chunk size happens during reloads. If the size is
+ // reset, we'd expect to split less, since the first split would then
+ // disable further splits (statistically, since the decision is randomized).
+ // We choose 1.4 since split attempts happen about once every 1/5 chunkSize,
+ // and we want to be sure we def get a split attempt at a full chunk.
+ var insertsForSplit = Math.ceil((chunkSizeBytes * 1.4) / approxSize);
+ var totalInserts = insertsForSplit * numChunks;
+
+ printjson({
+ chunkSizeBytes: chunkSizeBytes,
+ insertsForSplit: insertsForSplit,
+ totalInserts: totalInserts
+ });
+
+ // Insert enough docs to trigger splits into all chunks
+ for (var i = 0; i < totalInserts; i++) {
+ assert.writeOK(coll.insert({_id: i % numChunks + (i / totalInserts), pad: pad}));
+ // Splitting is asynchronous so we should wait after each insert
+ // for autosplitting to happen
+ waitForOngoingChunkSplits(st);
+ }
- st.printShardingStatus(true);
- printjson(coll.stats());
+ jsTest.log("Inserts completed...");
- // Check that all chunks (except the two extreme chunks)
- // have been split at least once + 1 extra chunk as reload buffer
- assert.gte(config.chunks.count({"ns": "foo.hashBar"}), numChunks * 2 + 3);
+ st.printShardingStatus(true);
+ printjson(coll.stats());
- jsTest.log("DONE!");
+ // Check that all chunks (except the two extreme chunks)
+ // have been split at least once + 1 extra chunk as reload buffer
+ assert.gte(config.chunks.count({"ns": "foo.hashBar"}), numChunks * 2 + 3);
- } else {
- jsTest.log("Disabled test in debug builds.");
- }
+ jsTest.log("DONE!");
- st.stop();
+} else {
+ jsTest.log("Disabled test in debug builds.");
+}
+st.stop();
})();
diff --git a/jstests/sharding/autosplit_with_balancer.js b/jstests/sharding/autosplit_with_balancer.js
index 8720790596f..0372ca09b9a 100644
--- a/jstests/sharding/autosplit_with_balancer.js
+++ b/jstests/sharding/autosplit_with_balancer.js
@@ -1,166 +1,166 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2, mongos: 2, other: {enableAutoSplit: true}});
+var s = new ShardingTest({shards: 2, mongos: 2, other: {enableAutoSplit: true}});
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
- var bigString = "";
- while (bigString.length < 1024 * 50) {
- bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
- }
+var bigString = "";
+while (bigString.length < 1024 * 50) {
+ bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
+}
- var db = s.getDB("test");
- var coll = db.foo;
+var db = s.getDB("test");
+var coll = db.foo;
- var i = 0;
- for (var j = 0; j < 30; j++) {
- print("j:" + j + " : " + Date.timeFunc(function() {
- var bulk = coll.initializeUnorderedBulkOp();
- for (var k = 0; k < 100; k++) {
- bulk.insert({num: i, s: bigString});
- i++;
- }
- assert.writeOK(bulk.execute());
- }));
- }
+var i = 0;
+for (var j = 0; j < 30; j++) {
+ print("j:" + j + " : " + Date.timeFunc(function() {
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var k = 0; k < 100; k++) {
+ bulk.insert({num: i, s: bigString});
+ i++;
+ }
+ assert.writeOK(bulk.execute());
+ }));
+}
- s.startBalancer();
+s.startBalancer();
- let confirmBalancerSettings = function(expectedBalancerOn, expectedAutoSplitOn) {
- let configSettings = s.s.getDB('config').settings;
+let confirmBalancerSettings = function(expectedBalancerOn, expectedAutoSplitOn) {
+ let configSettings = s.s.getDB('config').settings;
- let balancerSettings = configSettings.findOne({_id: 'balancer'});
- assert.neq(null, balancerSettings);
- assert.eq(expectedBalancerOn, !balancerSettings.stopped);
- assert.eq(expectedBalancerOn, balancerSettings.mode == 'full');
+ let balancerSettings = configSettings.findOne({_id: 'balancer'});
+ assert.neq(null, balancerSettings);
+ assert.eq(expectedBalancerOn, !balancerSettings.stopped);
+ assert.eq(expectedBalancerOn, balancerSettings.mode == 'full');
- let autoSplitSettings = configSettings.findOne({_id: 'autosplit'});
- assert.neq(null, autoSplitSettings);
- assert.eq(expectedAutoSplitOn, autoSplitSettings.enabled);
- };
+ let autoSplitSettings = configSettings.findOne({_id: 'autosplit'});
+ assert.neq(null, autoSplitSettings);
+ assert.eq(expectedAutoSplitOn, autoSplitSettings.enabled);
+};
- confirmBalancerSettings(true, true);
+confirmBalancerSettings(true, true);
- assert.eq(i, j * 100, "setup");
+assert.eq(i, j * 100, "setup");
- // Until SERVER-9715 is fixed, the sync command must be run on a diff connection
- new Mongo(s.s.host).adminCommand("connpoolsync");
+// Until SERVER-9715 is fixed, the sync command must be run on a diff connection
+new Mongo(s.s.host).adminCommand("connpoolsync");
- print("done inserting data");
+print("done inserting data");
- print("datasize: " +
- tojson(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"})));
- s.printChunks();
+print("datasize: " +
+ tojson(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"})));
+s.printChunks();
- var counta, countb;
+var counta, countb;
- function doCountsGlobal() {
- counta = s._connections[0].getDB("test").foo.count();
- countb = s._connections[1].getDB("test").foo.count();
- return counta + countb;
- }
+function doCountsGlobal() {
+ counta = s._connections[0].getDB("test").foo.count();
+ countb = s._connections[1].getDB("test").foo.count();
+ return counta + countb;
+}
- // Wait for the chunks to distribute
- assert.soon(function() {
- doCountsGlobal();
- print("Counts: " + counta + countb);
+// Wait for the chunks to distribute
+assert.soon(function() {
+ doCountsGlobal();
+ print("Counts: " + counta + countb);
- return counta > 0 && countb > 0;
- });
+ return counta > 0 && countb > 0;
+});
- print("checkpoint B");
+print("checkpoint B");
- var missing = [];
+var missing = [];
- for (i = 0; i < j * 100; i++) {
- var x = coll.findOne({num: i});
+for (i = 0; i < j * 100; i++) {
+ var x = coll.findOne({num: i});
+ if (!x) {
+ missing.push(i);
+ print("can't find: " + i);
+ sleep(5000);
+ x = coll.findOne({num: i});
if (!x) {
- missing.push(i);
- print("can't find: " + i);
- sleep(5000);
- x = coll.findOne({num: i});
- if (!x) {
- print("still can't find: " + i);
-
- for (var zzz = 0; zzz < s._connections.length; zzz++) {
- if (s._connections[zzz].getDB("test").foo.findOne({num: i})) {
- print("found on wrong server: " + s._connections[zzz]);
- }
+ print("still can't find: " + i);
+
+ for (var zzz = 0; zzz < s._connections.length; zzz++) {
+ if (s._connections[zzz].getDB("test").foo.findOne({num: i})) {
+ print("found on wrong server: " + s._connections[zzz]);
}
}
}
}
-
- s.printChangeLog();
-
- print("missing: " + tojson(missing));
- assert.soon(function(z) {
- return doCountsGlobal() == j * 100;
- }, "from each a:" + counta + " b:" + countb + " i:" + i);
- print("checkpoint B.a");
- s.printChunks();
- assert.eq(j * 100, coll.find().limit(100000000).itcount(), "itcount A");
- assert.eq(j * 100, counta + countb, "from each 2 a:" + counta + " b:" + countb + " i:" + i);
- assert(missing.length == 0, "missing : " + tojson(missing));
-
- print("checkpoint C");
-
- assert(Array.unique(s.config.chunks.find({ns: 'test.foo'}).toArray().map(function(z) {
- return z.shard;
- })).length == 2,
- "should be using both servers");
-
- for (i = 0; i < 100; i++) {
- cursor = coll.find().batchSize(5);
- cursor.next();
- cursor.close();
- }
-
- print("checkpoint D");
-
- // Test non-sharded cursors
- db = s.getDB("test2");
- var t = db.foobar;
- for (i = 0; i < 100; i++)
- t.save({_id: i});
- for (i = 0; i < 100; i++) {
- var cursor = t.find().batchSize(2);
- cursor.next();
- assert.lt(0, db.serverStatus().metrics.cursor.open.total, "cursor1");
- cursor.close();
- }
-
- assert.eq(0, db.serverStatus().metrics.cursor.open.total, "cursor2");
-
- // Stop the balancer, otherwise it may grab some connections from the pool for itself
- s.stopBalancer();
-
- confirmBalancerSettings(false, false);
-
- print("checkpoint E");
-
- assert(t.findOne(), "check close 0");
-
- for (i = 0; i < 20; i++) {
- var conn = new Mongo(db.getMongo().host);
- var temp2 = conn.getDB("test2").foobar;
- assert.eq(conn._fullNameSpace, t._fullNameSpace, "check close 1");
- assert(temp2.findOne(), "check close 2");
- conn.close();
- }
-
- print("checkpoint F");
-
- assert.throws(function() {
- s.getDB("test").foo.find().sort({s: 1}).forEach(function(x) {
- printjsononeline(x.substring(0, x.length > 30 ? 30 : x.length));
- });
+}
+
+s.printChangeLog();
+
+print("missing: " + tojson(missing));
+assert.soon(function(z) {
+ return doCountsGlobal() == j * 100;
+}, "from each a:" + counta + " b:" + countb + " i:" + i);
+print("checkpoint B.a");
+s.printChunks();
+assert.eq(j * 100, coll.find().limit(100000000).itcount(), "itcount A");
+assert.eq(j * 100, counta + countb, "from each 2 a:" + counta + " b:" + countb + " i:" + i);
+assert(missing.length == 0, "missing : " + tojson(missing));
+
+print("checkpoint C");
+
+assert(Array.unique(s.config.chunks.find({ns: 'test.foo'}).toArray().map(function(z) {
+ return z.shard;
+ })).length == 2,
+ "should be using both servers");
+
+for (i = 0; i < 100; i++) {
+ cursor = coll.find().batchSize(5);
+ cursor.next();
+ cursor.close();
+}
+
+print("checkpoint D");
+
+// Test non-sharded cursors
+db = s.getDB("test2");
+var t = db.foobar;
+for (i = 0; i < 100; i++)
+ t.save({_id: i});
+for (i = 0; i < 100; i++) {
+ var cursor = t.find().batchSize(2);
+ cursor.next();
+ assert.lt(0, db.serverStatus().metrics.cursor.open.total, "cursor1");
+ cursor.close();
+}
+
+assert.eq(0, db.serverStatus().metrics.cursor.open.total, "cursor2");
+
+// Stop the balancer, otherwise it may grab some connections from the pool for itself
+s.stopBalancer();
+
+confirmBalancerSettings(false, false);
+
+print("checkpoint E");
+
+assert(t.findOne(), "check close 0");
+
+for (i = 0; i < 20; i++) {
+ var conn = new Mongo(db.getMongo().host);
+ var temp2 = conn.getDB("test2").foobar;
+ assert.eq(conn._fullNameSpace, t._fullNameSpace, "check close 1");
+ assert(temp2.findOne(), "check close 2");
+ conn.close();
+}
+
+print("checkpoint F");
+
+assert.throws(function() {
+ s.getDB("test").foo.find().sort({s: 1}).forEach(function(x) {
+ printjsononeline(x.substring(0, x.length > 30 ? 30 : x.length));
});
+});
- print("checkpoint G");
+print("checkpoint G");
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/balance_repl.js b/jstests/sharding/balance_repl.js
index 02e2c54bbc0..fdc0d15509c 100644
--- a/jstests/sharding/balance_repl.js
+++ b/jstests/sharding/balance_repl.js
@@ -3,67 +3,67 @@
* secondaryThrottle is used.
*/
(function() {
- 'use strict';
+'use strict';
- // The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
- // from stepping down during migrations on slow evergreen builders.
- var s = new ShardingTest({
- shards: 2,
- other: {
- chunkSize: 1,
- rs0: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- },
- rs1: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- }
+// The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
+// from stepping down during migrations on slow evergreen builders.
+var s = new ShardingTest({
+ shards: 2,
+ other: {
+ chunkSize: 1,
+ rs0: {
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
+ },
+ rs1: {
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
}
- });
-
- var bulk = s.s0.getDB('TestDB').TestColl.initializeUnorderedBulkOp();
- for (var i = 0; i < 2100; i++) {
- bulk.insert({_id: i, x: i});
}
- assert.writeOK(bulk.execute());
+});
- assert.commandWorked(s.s0.adminCommand({enablesharding: 'TestDB'}));
- s.ensurePrimaryShard('TestDB', s.shard0.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: 'TestDB.TestColl', key: {_id: 1}}));
+var bulk = s.s0.getDB('TestDB').TestColl.initializeUnorderedBulkOp();
+for (var i = 0; i < 2100; i++) {
+ bulk.insert({_id: i, x: i});
+}
+assert.writeOK(bulk.execute());
- for (i = 0; i < 20; i++) {
- assert.commandWorked(s.s0.adminCommand({split: 'TestDB.TestColl', middle: {_id: i * 100}}));
- }
+assert.commandWorked(s.s0.adminCommand({enablesharding: 'TestDB'}));
+s.ensurePrimaryShard('TestDB', s.shard0.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: 'TestDB.TestColl', key: {_id: 1}}));
- var collPrimary = (new Mongo(s.s0.host)).getDB('TestDB').TestColl;
- assert.eq(2100, collPrimary.find().itcount());
+for (i = 0; i < 20; i++) {
+ assert.commandWorked(s.s0.adminCommand({split: 'TestDB.TestColl', middle: {_id: i * 100}}));
+}
- var collSlaveOk = (new Mongo(s.s0.host)).getDB('TestDB').TestColl;
- collSlaveOk.setSlaveOk();
- assert.eq(2100, collSlaveOk.find().itcount());
+var collPrimary = (new Mongo(s.s0.host)).getDB('TestDB').TestColl;
+assert.eq(2100, collPrimary.find().itcount());
- for (i = 0; i < 20; i++) {
- // Needs to waitForDelete because we'll be performing a slaveOk query, and secondaries don't
- // have a chunk manager so it doesn't know how to filter out docs it doesn't own.
- assert.commandWorked(s.s0.adminCommand({
- moveChunk: 'TestDB.TestColl',
- find: {_id: i * 100},
- to: s.shard1.shardName,
- _secondaryThrottle: true,
- writeConcern: {w: 2},
- _waitForDelete: true
- }));
+var collSlaveOk = (new Mongo(s.s0.host)).getDB('TestDB').TestColl;
+collSlaveOk.setSlaveOk();
+assert.eq(2100, collSlaveOk.find().itcount());
- assert.eq(2100,
- collSlaveOk.find().itcount(),
- 'Incorrect count when reading from secondary. Count from primary is ' +
- collPrimary.find().itcount());
- }
+for (i = 0; i < 20; i++) {
+ // Needs to waitForDelete because we'll be performing a slaveOk query, and secondaries don't
+ // have a chunk manager so it doesn't know how to filter out docs it doesn't own.
+ assert.commandWorked(s.s0.adminCommand({
+ moveChunk: 'TestDB.TestColl',
+ find: {_id: i * 100},
+ to: s.shard1.shardName,
+ _secondaryThrottle: true,
+ writeConcern: {w: 2},
+ _waitForDelete: true
+ }));
+
+ assert.eq(2100,
+ collSlaveOk.find().itcount(),
+ 'Incorrect count when reading from secondary. Count from primary is ' +
+ collPrimary.find().itcount());
+}
- s.stop();
+s.stop();
}());
diff --git a/jstests/sharding/balancer_shell_commands.js b/jstests/sharding/balancer_shell_commands.js
index 48c5c7c489a..f6d8faf1e99 100644
--- a/jstests/sharding/balancer_shell_commands.js
+++ b/jstests/sharding/balancer_shell_commands.js
@@ -7,20 +7,20 @@
var db;
(function() {
- "use strict";
- var shardingTest = new ShardingTest(
- {name: "shell_commands", shards: 1, mongos: 1, other: {enableBalancer: true}});
- db = shardingTest.getDB("test");
+"use strict";
+var shardingTest =
+ new ShardingTest({name: "shell_commands", shards: 1, mongos: 1, other: {enableBalancer: true}});
+db = shardingTest.getDB("test");
- assert(sh.getBalancerState(), "Balancer should have been enabled during cluster setup");
+assert(sh.getBalancerState(), "Balancer should have been enabled during cluster setup");
- // Test that the balancer can be disabled
- sh.setBalancerState(false);
- assert(!sh.getBalancerState(), "Failed to disable balancer");
+// Test that the balancer can be disabled
+sh.setBalancerState(false);
+assert(!sh.getBalancerState(), "Failed to disable balancer");
- // Test that the balancer can be re-enabled
- sh.setBalancerState(true);
- assert(sh.getBalancerState(), "Failed to re-enable balancer");
+// Test that the balancer can be re-enabled
+sh.setBalancerState(true);
+assert(sh.getBalancerState(), "Failed to re-enable balancer");
- shardingTest.stop();
+shardingTest.stop();
})();
diff --git a/jstests/sharding/balancer_window.js b/jstests/sharding/balancer_window.js
index 422085a537a..ee2d55b1345 100644
--- a/jstests/sharding/balancer_window.js
+++ b/jstests/sharding/balancer_window.js
@@ -11,83 +11,81 @@
* sure that some chunks are moved.
*/
(function() {
- 'use strict';
+'use strict';
- /**
- * Simple representation for wall clock time. Hour and minutes should be integers.
- */
- var HourAndMinute = function(hour, minutes) {
- return {
- /**
- * Returns a new HourAndMinute object with the amount of hours added.
- * Amount can be negative.
- */
- addHour: function(amount) {
- var newHour = (hour + amount) % 24;
- if (newHour < 0) {
- newHour += 24;
- }
+/**
+ * Simple representation for wall clock time. Hour and minutes should be integers.
+ */
+var HourAndMinute = function(hour, minutes) {
+ return {
+ /**
+ * Returns a new HourAndMinute object with the amount of hours added.
+ * Amount can be negative.
+ */
+ addHour: function(amount) {
+ var newHour = (hour + amount) % 24;
+ if (newHour < 0) {
+ newHour += 24;
+ }
- return new HourAndMinute(newHour, minutes);
- },
+ return new HourAndMinute(newHour, minutes);
+ },
- /**
- * Returns a string representation that is compatible with the format for the balancer
- * window settings.
- */
- toString: function() {
- var minStr = (minutes < 10) ? ('0' + minutes) : ('' + minutes);
- var hourStr = (hour < 10) ? ('0' + hour) : ('' + hour);
- return hourStr + ':' + minStr;
- }
- };
+ /**
+ * Returns a string representation that is compatible with the format for the balancer
+ * window settings.
+ */
+ toString: function() {
+ var minStr = (minutes < 10) ? ('0' + minutes) : ('' + minutes);
+ var hourStr = (hour < 10) ? ('0' + hour) : ('' + hour);
+ return hourStr + ':' + minStr;
+ }
};
+};
- var st = new ShardingTest({shards: 2});
- var configDB = st.s.getDB('config');
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
+var st = new ShardingTest({shards: 2});
+var configDB = st.s.getDB('config');
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
- for (var x = 0; x < 150; x += 10) {
- configDB.adminCommand({split: 'test.user', middle: {_id: x}});
- }
+for (var x = 0; x < 150; x += 10) {
+ configDB.adminCommand({split: 'test.user', middle: {_id: x}});
+}
- var shard0Chunks = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
+var shard0Chunks = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
- var startDate = new Date();
- var hourMinStart = new HourAndMinute(startDate.getHours(), startDate.getMinutes());
- assert.writeOK(configDB.settings.update({_id: 'balancer'},
- {
- $set: {
- activeWindow: {
- start: hourMinStart.addHour(-2).toString(),
- stop: hourMinStart.addHour(-1).toString()
- },
- }
- },
- true));
- st.startBalancer();
+var startDate = new Date();
+var hourMinStart = new HourAndMinute(startDate.getHours(), startDate.getMinutes());
+assert.writeOK(configDB.settings.update({_id: 'balancer'},
+ {
+ $set: {
+ activeWindow: {
+ start: hourMinStart.addHour(-2).toString(),
+ stop: hourMinStart.addHour(-1).toString()
+ },
+ }
+ },
+ true));
+st.startBalancer();
- st.waitForBalancer(true, 60000);
+st.waitForBalancer(true, 60000);
- var shard0ChunksAfter =
- configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
- assert.eq(shard0Chunks, shard0ChunksAfter);
+var shard0ChunksAfter = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
+assert.eq(shard0Chunks, shard0ChunksAfter);
- assert.writeOK(configDB.settings.update(
- {_id: 'balancer'},
- {
- $set: {
- activeWindow:
- {start: hourMinStart.toString(), stop: hourMinStart.addHour(2).toString()}
- }
- },
- true));
+assert.writeOK(configDB.settings.update(
+ {_id: 'balancer'},
+ {
+ $set: {
+ activeWindow: {start: hourMinStart.toString(), stop: hourMinStart.addHour(2).toString()}
+ }
+ },
+ true));
- st.waitForBalancer(true, 60000);
+st.waitForBalancer(true, 60000);
- shard0ChunksAfter = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
- assert.neq(shard0Chunks, shard0ChunksAfter);
+shard0ChunksAfter = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count();
+assert.neq(shard0Chunks, shard0ChunksAfter);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/basic_drop_coll.js b/jstests/sharding/basic_drop_coll.js
index 92a37102123..b7fda388e34 100644
--- a/jstests/sharding/basic_drop_coll.js
+++ b/jstests/sharding/basic_drop_coll.js
@@ -3,65 +3,64 @@
* cleaned up properly.
*/
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 2});
+var st = new ShardingTest({shards: 2});
- var testDB = st.s.getDB('test');
+var testDB = st.s.getDB('test');
- // Test dropping an unsharded collection.
+// Test dropping an unsharded collection.
- assert.writeOK(testDB.bar.insert({x: 1}));
- assert.neq(null, testDB.bar.findOne({x: 1}));
+assert.writeOK(testDB.bar.insert({x: 1}));
+assert.neq(null, testDB.bar.findOne({x: 1}));
- assert.commandWorked(testDB.runCommand({drop: 'bar'}));
- assert.eq(null, testDB.bar.findOne({x: 1}));
+assert.commandWorked(testDB.runCommand({drop: 'bar'}));
+assert.eq(null, testDB.bar.findOne({x: 1}));
- // Test dropping a sharded collection.
+// Test dropping a sharded collection.
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
- st.s.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
- st.s.adminCommand({split: 'test.user', middle: {_id: 0}});
- assert.commandWorked(
- st.s.adminCommand({moveChunk: 'test.user', find: {_id: 0}, to: st.shard1.shardName}));
- assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: 'foo'}));
- assert.commandWorked(st.s.adminCommand(
- {updateZoneKeyRange: 'test.user', min: {_id: 0}, max: {_id: 10}, zone: 'foo'}));
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
+st.s.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
+st.s.adminCommand({split: 'test.user', middle: {_id: 0}});
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: 'test.user', find: {_id: 0}, to: st.shard1.shardName}));
+assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: 'foo'}));
+assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: 'test.user', min: {_id: 0}, max: {_id: 10}, zone: 'foo'}));
- assert.writeOK(testDB.user.insert({_id: 10}));
- assert.writeOK(testDB.user.insert({_id: -10}));
+assert.writeOK(testDB.user.insert({_id: 10}));
+assert.writeOK(testDB.user.insert({_id: -10}));
- assert.neq(null, st.shard0.getDB('test').user.findOne({_id: -10}));
- assert.neq(null, st.shard1.getDB('test').user.findOne({_id: 10}));
+assert.neq(null, st.shard0.getDB('test').user.findOne({_id: -10}));
+assert.neq(null, st.shard1.getDB('test').user.findOne({_id: 10}));
- var configDB = st.s.getDB('config');
- var collDoc = configDB.collections.findOne({_id: 'test.user'});
+var configDB = st.s.getDB('config');
+var collDoc = configDB.collections.findOne({_id: 'test.user'});
- assert(!collDoc.dropped);
+assert(!collDoc.dropped);
- assert.eq(2, configDB.chunks.count({ns: 'test.user'}));
- assert.eq(1, configDB.tags.count({ns: 'test.user'}));
+assert.eq(2, configDB.chunks.count({ns: 'test.user'}));
+assert.eq(1, configDB.tags.count({ns: 'test.user'}));
- assert.commandWorked(testDB.runCommand({drop: 'user'}));
+assert.commandWorked(testDB.runCommand({drop: 'user'}));
- assert.eq(null, st.shard0.getDB('test').user.findOne());
- assert.eq(null, st.shard1.getDB('test').user.findOne());
+assert.eq(null, st.shard0.getDB('test').user.findOne());
+assert.eq(null, st.shard1.getDB('test').user.findOne());
- // Call drop again to verify that the command is idempotent.
- assert.commandWorked(testDB.runCommand({drop: 'user'}));
+// Call drop again to verify that the command is idempotent.
+assert.commandWorked(testDB.runCommand({drop: 'user'}));
- // Check for the collection with majority RC to verify that the write to remove the collection
- // document from the catalog has propagated to the majority snapshot.
- var findColl = configDB.runCommand(
- {find: 'collections', filter: {_id: 'test.user'}, readConcern: {'level': 'majority'}});
- collDoc = findColl.cursor.firstBatch[0];
+// Check for the collection with majority RC to verify that the write to remove the collection
+// document from the catalog has propagated to the majority snapshot.
+var findColl = configDB.runCommand(
+ {find: 'collections', filter: {_id: 'test.user'}, readConcern: {'level': 'majority'}});
+collDoc = findColl.cursor.firstBatch[0];
- assert(collDoc.dropped);
+assert(collDoc.dropped);
- assert.eq(0, configDB.chunks.count({ns: 'test.user'}));
- assert.eq(0, configDB.tags.count({ns: 'test.user'}));
-
- st.stop();
+assert.eq(0, configDB.chunks.count({ns: 'test.user'}));
+assert.eq(0, configDB.tags.count({ns: 'test.user'}));
+st.stop();
})();
diff --git a/jstests/sharding/basic_merge.js b/jstests/sharding/basic_merge.js
index 540a0f2355b..9bc75636e5d 100644
--- a/jstests/sharding/basic_merge.js
+++ b/jstests/sharding/basic_merge.js
@@ -2,65 +2,63 @@
* Perform basic tests for the mergeChunks command against mongos.
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({mongos: 2, shards: 2, other: {chunkSize: 1}});
- var mongos = st.s0;
+var st = new ShardingTest({mongos: 2, shards: 2, other: {chunkSize: 1}});
+var mongos = st.s0;
- var kDbName = 'db';
+var kDbName = 'db';
- var shard0 = st.shard0.shardName;
- var shard1 = st.shard1.shardName;
+var shard0 = st.shard0.shardName;
+var shard1 = st.shard1.shardName;
- var ns = kDbName + ".foo";
+var ns = kDbName + ".foo";
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, shard0);
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, shard0);
- // Fail if invalid namespace.
- assert.commandFailed(mongos.adminCommand({mergeChunks: '', bounds: [{a: -1}, {a: 1}]}));
+// Fail if invalid namespace.
+assert.commandFailed(mongos.adminCommand({mergeChunks: '', bounds: [{a: -1}, {a: 1}]}));
- // Fail if database does not exist.
- assert.commandFailed(mongos.adminCommand({mergeChunks: 'a.b', bounds: [{a: -1}, {a: 1}]}));
+// Fail if database does not exist.
+assert.commandFailed(mongos.adminCommand({mergeChunks: 'a.b', bounds: [{a: -1}, {a: 1}]}));
- // Fail if collection is unsharded.
- assert.commandFailed(
- mongos.adminCommand({mergeChunks: kDbName + '.xxx', bounds: [{a: -1}, {a: 1}]}));
+// Fail if collection is unsharded.
+assert.commandFailed(
+ mongos.adminCommand({mergeChunks: kDbName + '.xxx', bounds: [{a: -1}, {a: 1}]}));
- // Errors if either bounds is not a valid shard key.
- assert.eq(0, mongos.getDB('config').chunks.count({ns: ns}));
+// Errors if either bounds is not a valid shard key.
+assert.eq(0, mongos.getDB('config').chunks.count({ns: ns}));
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {a: 1}}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns}));
- assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 0}}));
- assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: -1}}));
- assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 1}}));
+assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {a: 1}}));
+assert.eq(1, mongos.getDB('config').chunks.count({ns: ns}));
+assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 0}}));
+assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: -1}}));
+assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 1}}));
- // Fail if a wrong key
- assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{x: -1}, {a: 1}]}));
- assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {x: 1}]}));
+// Fail if a wrong key
+assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{x: -1}, {a: 1}]}));
+assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {x: 1}]}));
- // Fail if chunks do not contain a bound
- assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 10}]}));
+// Fail if chunks do not contain a bound
+assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 10}]}));
- // Fail if chunks to be merged are not contiguous on the shard
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: ns, bounds: [{a: -1}, {a: 0}], to: shard1, _waitForDelete: true}));
- assert.commandFailed(
- st.s0.adminCommand({mergeChunks: ns, bounds: [{a: MinKey()}, {a: MaxKey()}]}));
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: ns, bounds: [{a: -1}, {a: 0}], to: shard0, _waitForDelete: true}));
+// Fail if chunks to be merged are not contiguous on the shard
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: ns, bounds: [{a: -1}, {a: 0}], to: shard1, _waitForDelete: true}));
+assert.commandFailed(st.s0.adminCommand({mergeChunks: ns, bounds: [{a: MinKey()}, {a: MaxKey()}]}));
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: ns, bounds: [{a: -1}, {a: 0}], to: shard0, _waitForDelete: true}));
- // Validate metadata
- // There are four chunks [{$minKey, -1}, {-1, 0}, {0, 1}, {1, $maxKey}]
- assert.eq(4, st.s0.getDB('config').chunks.count({ns: ns}));
+// Validate metadata
+// There are four chunks [{$minKey, -1}, {-1, 0}, {0, 1}, {1, $maxKey}]
+assert.eq(4, st.s0.getDB('config').chunks.count({ns: ns}));
- // Use the second (stale) mongos to invoke the mergeChunks command so we can exercise the stale
- // shard version refresh logic
- assert.commandWorked(st.s1.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 1}]}));
- assert.eq(3, mongos.getDB('config').chunks.count({ns: ns}));
- assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, min: {a: -1}, max: {a: 1}}));
-
- st.stop();
+// Use the second (stale) mongos to invoke the mergeChunks command so we can exercise the stale
+// shard version refresh logic
+assert.commandWorked(st.s1.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 1}]}));
+assert.eq(3, mongos.getDB('config').chunks.count({ns: ns}));
+assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, min: {a: -1}, max: {a: 1}}));
+st.stop();
})();
diff --git a/jstests/sharding/basic_sharding_params.js b/jstests/sharding/basic_sharding_params.js
index c1076ff8941..5aea800c56e 100644
--- a/jstests/sharding/basic_sharding_params.js
+++ b/jstests/sharding/basic_sharding_params.js
@@ -3,80 +3,80 @@
*/
(function() {
- 'use strict';
+'use strict';
- function shardingTestUsingObjects() {
- var st = new ShardingTest({
- mongos: {s0: {verbose: 6}, s1: {verbose: 5}},
- config: {c0: {verbose: 4}},
- shards: {d0: {verbose: 3}, rs1: {nodes: {d0: {verbose: 2}, a1: {verbose: 1}}}}
- });
+function shardingTestUsingObjects() {
+ var st = new ShardingTest({
+ mongos: {s0: {verbose: 6}, s1: {verbose: 5}},
+ config: {c0: {verbose: 4}},
+ shards: {d0: {verbose: 3}, rs1: {nodes: {d0: {verbose: 2}, a1: {verbose: 1}}}}
+ });
- var s0 = st.s0;
- assert.eq(s0, st._mongos[0]);
+ var s0 = st.s0;
+ assert.eq(s0, st._mongos[0]);
- var s1 = st.s1;
- assert.eq(s1, st._mongos[1]);
+ var s1 = st.s1;
+ assert.eq(s1, st._mongos[1]);
- var c0 = st.c0;
- assert.eq(c0, st._configServers[0]);
+ var c0 = st.c0;
+ assert.eq(c0, st._configServers[0]);
- var rs0 = st.rs0;
- assert.eq(rs0, st._rsObjects[0]);
+ var rs0 = st.rs0;
+ assert.eq(rs0, st._rsObjects[0]);
- var rs1 = st.rs1;
- assert.eq(rs1, st._rsObjects[1]);
+ var rs1 = st.rs1;
+ assert.eq(rs1, st._rsObjects[1]);
- var rs0_d0 = rs0.nodes[0];
+ var rs0_d0 = rs0.nodes[0];
- var rs1_d0 = rs1.nodes[0];
- var rs1_a1 = rs1.nodes[1];
+ var rs1_d0 = rs1.nodes[0];
+ var rs1_a1 = rs1.nodes[1];
- assert(s0.commandLine.hasOwnProperty("vvvvvv"));
- assert(s1.commandLine.hasOwnProperty("vvvvv"));
- assert(c0.commandLine.hasOwnProperty("vvvv"));
- assert(rs0_d0.commandLine.hasOwnProperty("vvv"));
- assert(rs1_d0.commandLine.hasOwnProperty("vv"));
- assert(rs1_a1.commandLine.hasOwnProperty("v"));
+ assert(s0.commandLine.hasOwnProperty("vvvvvv"));
+ assert(s1.commandLine.hasOwnProperty("vvvvv"));
+ assert(c0.commandLine.hasOwnProperty("vvvv"));
+ assert(rs0_d0.commandLine.hasOwnProperty("vvv"));
+ assert(rs1_d0.commandLine.hasOwnProperty("vv"));
+ assert(rs1_a1.commandLine.hasOwnProperty("v"));
- st.stop();
- }
+ st.stop();
+}
- function shardingTestUsingArrays() {
- var st = new ShardingTest({
- mongos: [{verbose: 5}, {verbose: 4}],
- config: [{verbose: 3}],
- shards: [{verbose: 2}, {verbose: 1}]
- });
+function shardingTestUsingArrays() {
+ var st = new ShardingTest({
+ mongos: [{verbose: 5}, {verbose: 4}],
+ config: [{verbose: 3}],
+ shards: [{verbose: 2}, {verbose: 1}]
+ });
- var s0 = st.s0;
- assert.eq(s0, st._mongos[0]);
+ var s0 = st.s0;
+ assert.eq(s0, st._mongos[0]);
- var s1 = st.s1;
- assert.eq(s1, st._mongos[1]);
+ var s1 = st.s1;
+ assert.eq(s1, st._mongos[1]);
- var c0 = st.c0;
- assert.eq(c0, st._configServers[0]);
+ var c0 = st.c0;
+ assert.eq(c0, st._configServers[0]);
- var rs0 = st.rs0;
- assert.eq(rs0, st._rsObjects[0]);
+ var rs0 = st.rs0;
+ assert.eq(rs0, st._rsObjects[0]);
- var rs1 = st.rs1;
- assert.eq(rs1, st._rsObjects[1]);
+ var rs1 = st.rs1;
+ assert.eq(rs1, st._rsObjects[1]);
- var rs0_d0 = rs0.nodes[0];
+ var rs0_d0 = rs0.nodes[0];
- var rs1_d0 = rs1.nodes[0];
+ var rs1_d0 = rs1.nodes[0];
- assert(s0.commandLine.hasOwnProperty("vvvvv"));
- assert(s1.commandLine.hasOwnProperty("vvvv"));
- assert(c0.commandLine.hasOwnProperty("vvv"));
- assert(rs0_d0.commandLine.hasOwnProperty("vv"));
- assert(rs1_d0.commandLine.hasOwnProperty("v"));
+ assert(s0.commandLine.hasOwnProperty("vvvvv"));
+ assert(s1.commandLine.hasOwnProperty("vvvv"));
+ assert(c0.commandLine.hasOwnProperty("vvv"));
+ assert(rs0_d0.commandLine.hasOwnProperty("vv"));
+ assert(rs1_d0.commandLine.hasOwnProperty("v"));
- st.stop();
- }
+ st.stop();
+}
- shardingTestUsingObjects();
- shardingTestUsingArrays();
+shardingTestUsingObjects();
+shardingTestUsingArrays();
})();
diff --git a/jstests/sharding/basic_split.js b/jstests/sharding/basic_split.js
index cb86e2d34b0..00a442ac353 100644
--- a/jstests/sharding/basic_split.js
+++ b/jstests/sharding/basic_split.js
@@ -2,106 +2,103 @@
* Perform basic tests for the split command against mongos.
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({mongos: 2, shards: 2, other: {chunkSize: 1}});
- var configDB = st.s0.getDB('config');
+var st = new ShardingTest({mongos: 2, shards: 2, other: {chunkSize: 1}});
+var configDB = st.s0.getDB('config');
- var shard0 = st.shard0.shardName;
- var shard1 = st.shard1.shardName;
+var shard0 = st.shard0.shardName;
+var shard1 = st.shard1.shardName;
- // split on invalid ns.
- assert.commandFailed(configDB.adminCommand({split: 'user', key: {_id: 1}}));
+// split on invalid ns.
+assert.commandFailed(configDB.adminCommand({split: 'user', key: {_id: 1}}));
- // split on unsharded collection (db is not sharding enabled).
- assert.commandFailed(configDB.adminCommand({split: 'test.user', key: {_id: 1}}));
+// split on unsharded collection (db is not sharding enabled).
+assert.commandFailed(configDB.adminCommand({split: 'test.user', key: {_id: 1}}));
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', shard0);
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', shard0);
- // split on unsharded collection (db is sharding enabled).
- assert.commandFailed(configDB.adminCommand({split: 'test.user', key: {_id: 1}}));
+// split on unsharded collection (db is sharding enabled).
+assert.commandFailed(configDB.adminCommand({split: 'test.user', key: {_id: 1}}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
- assert.eq(null, configDB.chunks.findOne({ns: 'test.user', min: {_id: 0}}));
+assert.eq(null, configDB.chunks.findOne({ns: 'test.user', min: {_id: 0}}));
- assert.commandWorked(configDB.adminCommand({split: 'test.user', middle: {_id: 0}}));
- assert.neq(null, configDB.chunks.findOne({ns: 'test.user', min: {_id: 0}}));
+assert.commandWorked(configDB.adminCommand({split: 'test.user', middle: {_id: 0}}));
+assert.neq(null, configDB.chunks.findOne({ns: 'test.user', min: {_id: 0}}));
- // Cannot split on existing chunk boundary.
- assert.commandFailed(configDB.adminCommand({split: 'test.user', middle: {_id: 0}}));
+// Cannot split on existing chunk boundary.
+assert.commandFailed(configDB.adminCommand({split: 'test.user', middle: {_id: 0}}));
- // Attempt to split on a value that is not the shard key.
- assert.commandFailed(configDB.adminCommand({split: 'test.user', middle: {x: 100}}));
- assert.commandFailed(configDB.adminCommand({split: 'test.user', find: {x: 100}}));
- assert.commandFailed(
- configDB.adminCommand({split: 'test.user', bounds: [{x: MinKey}, {x: MaxKey}]}));
+// Attempt to split on a value that is not the shard key.
+assert.commandFailed(configDB.adminCommand({split: 'test.user', middle: {x: 100}}));
+assert.commandFailed(configDB.adminCommand({split: 'test.user', find: {x: 100}}));
+assert.commandFailed(
+ configDB.adminCommand({split: 'test.user', bounds: [{x: MinKey}, {x: MaxKey}]}));
- // Insert documents large enough to fill up a chunk, but do it directly in the shard in order
- // to bypass the auto-split logic.
- var kiloDoc = new Array(1024).join('x');
- var testDB = st.rs0.getPrimary().getDB('test');
- var bulk = testDB.user.initializeUnorderedBulkOp();
- for (var x = -1200; x < 1200; x++) {
- bulk.insert({_id: x, val: kiloDoc});
- }
- assert.writeOK(bulk.execute());
+// Insert documents large enough to fill up a chunk, but do it directly in the shard in order
+// to bypass the auto-split logic.
+var kiloDoc = new Array(1024).join('x');
+var testDB = st.rs0.getPrimary().getDB('test');
+var bulk = testDB.user.initializeUnorderedBulkOp();
+for (var x = -1200; x < 1200; x++) {
+ bulk.insert({_id: x, val: kiloDoc});
+}
+assert.writeOK(bulk.execute());
- assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
+assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
- // Errors if bounds do not correspond to existing chunk boundaries.
- assert.commandFailed(
- configDB.adminCommand({split: 'test.user', bounds: [{_id: 0}, {_id: 1000}]}));
- assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
+// Errors if bounds do not correspond to existing chunk boundaries.
+assert.commandFailed(configDB.adminCommand({split: 'test.user', bounds: [{_id: 0}, {_id: 1000}]}));
+assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
- assert.commandWorked(
- configDB.adminCommand({split: 'test.user', bounds: [{_id: 0}, {_id: MaxKey}]}));
- assert.gt(configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount(), 1);
+assert.commandWorked(
+ configDB.adminCommand({split: 'test.user', bounds: [{_id: 0}, {_id: MaxKey}]}));
+assert.gt(configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount(), 1);
- assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$lt: {_id: 0}}}).itcount());
- assert.commandWorked(configDB.adminCommand({split: 'test.user', middle: {_id: -600}}));
- assert.gt(configDB.chunks.find({ns: 'test.user', min: {$lt: {_id: 0}}}).itcount(), 1);
+assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$lt: {_id: 0}}}).itcount());
+assert.commandWorked(configDB.adminCommand({split: 'test.user', middle: {_id: -600}}));
+assert.gt(configDB.chunks.find({ns: 'test.user', min: {$lt: {_id: 0}}}).itcount(), 1);
- // Mongos must refresh metadata if the chunk version does not match
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: 'test.user', find: {_id: -900}, to: shard1, _waitForDelete: true}));
- assert.commandWorked(st.s1.adminCommand({split: 'test.user', middle: {_id: -900}}));
- assert.commandWorked(st.s1.adminCommand(
- {moveChunk: 'test.user', find: {_id: -900}, to: shard0, _waitForDelete: true}));
- assert.commandWorked(st.s1.adminCommand(
- {moveChunk: 'test.user', find: {_id: -901}, to: shard0, _waitForDelete: true}));
- assert.eq(0, configDB.chunks.find({ns: 'test.user', shard: shard1}).itcount());
+// Mongos must refresh metadata if the chunk version does not match
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: 'test.user', find: {_id: -900}, to: shard1, _waitForDelete: true}));
+assert.commandWorked(st.s1.adminCommand({split: 'test.user', middle: {_id: -900}}));
+assert.commandWorked(st.s1.adminCommand(
+ {moveChunk: 'test.user', find: {_id: -900}, to: shard0, _waitForDelete: true}));
+assert.commandWorked(st.s1.adminCommand(
+ {moveChunk: 'test.user', find: {_id: -901}, to: shard0, _waitForDelete: true}));
+assert.eq(0, configDB.chunks.find({ns: 'test.user', shard: shard1}).itcount());
- //
- // Compound Key
- //
+//
+// Compound Key
+//
- assert.commandWorked(
- configDB.adminCommand({shardCollection: 'test.compound', key: {x: 1, y: 1}}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.compound', key: {x: 1, y: 1}}));
- assert.eq(null, configDB.chunks.findOne({ns: 'test.compound', min: {x: 0, y: 0}}));
- assert.commandWorked(configDB.adminCommand({split: 'test.compound', middle: {x: 0, y: 0}}));
- assert.neq(null, configDB.chunks.findOne({ns: 'test.compound', min: {x: 0, y: 0}}));
+assert.eq(null, configDB.chunks.findOne({ns: 'test.compound', min: {x: 0, y: 0}}));
+assert.commandWorked(configDB.adminCommand({split: 'test.compound', middle: {x: 0, y: 0}}));
+assert.neq(null, configDB.chunks.findOne({ns: 'test.compound', min: {x: 0, y: 0}}));
- // cannot split on existing chunk boundary.
- assert.commandFailed(configDB.adminCommand({split: 'test.compound', middle: {x: 0, y: 0}}));
+// cannot split on existing chunk boundary.
+assert.commandFailed(configDB.adminCommand({split: 'test.compound', middle: {x: 0, y: 0}}));
- bulk = testDB.compound.initializeUnorderedBulkOp();
- for (x = -1200; x < 1200; x++) {
- bulk.insert({x: x, y: x, val: kiloDoc});
- }
- assert.writeOK(bulk.execute());
+bulk = testDB.compound.initializeUnorderedBulkOp();
+for (x = -1200; x < 1200; x++) {
+ bulk.insert({x: x, y: x, val: kiloDoc});
+}
+assert.writeOK(bulk.execute());
- assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount());
- assert.commandWorked(configDB.adminCommand(
- {split: 'test.compound', bounds: [{x: 0, y: 0}, {x: MaxKey, y: MaxKey}]}));
- assert.gt(configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount(), 1);
+assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount());
+assert.commandWorked(configDB.adminCommand(
+ {split: 'test.compound', bounds: [{x: 0, y: 0}, {x: MaxKey, y: MaxKey}]}));
+assert.gt(configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount(), 1);
- assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$lt: {x: 0, y: 0}}}).itcount());
- assert.commandWorked(configDB.adminCommand({split: 'test.compound', find: {x: -1, y: -1}}));
- assert.gt(configDB.chunks.find({ns: 'test.compound', min: {$lt: {x: 0, y: 0}}}).itcount(), 1);
-
- st.stop();
+assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$lt: {x: 0, y: 0}}}).itcount());
+assert.commandWorked(configDB.adminCommand({split: 'test.compound', find: {x: -1, y: -1}}));
+assert.gt(configDB.chunks.find({ns: 'test.compound', min: {$lt: {x: 0, y: 0}}}).itcount(), 1);
+st.stop();
})();
diff --git a/jstests/sharding/batch_write_command_sharded.js b/jstests/sharding/batch_write_command_sharded.js
index cb3b4cd21d6..60b848dd6de 100644
--- a/jstests/sharding/batch_write_command_sharded.js
+++ b/jstests/sharding/batch_write_command_sharded.js
@@ -9,233 +9,267 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- var st = new ShardingTest({shards: 2, mongos: 1});
-
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
- var configConnStr = st._configDB;
-
- jsTest.log("Starting sharding batch write tests...");
-
- var request;
- var result;
-
- // NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
-
- //
- //
- // Mongos _id autogeneration tests for sharded collections
-
- var coll = mongos.getCollection("foo.bar");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}}));
-
- //
- // Basic insert no _id
- coll.remove({});
- printjson(request = {insert: coll.getName(), documents: [{a: 1}]});
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert.eq(1, coll.count());
-
- //
- // Multi insert some _ids
- coll.remove({});
- printjson(request = {insert: coll.getName(), documents: [{_id: 0, a: 1}, {a: 2}]});
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(2, result.n);
- assert.eq(2, coll.count());
- assert.eq(1, coll.count({_id: 0}));
-
- //
- // Ensure generating many _ids don't push us over limits
- var maxDocSize = (16 * 1024 * 1024) / 1000;
- var baseDocSize = Object.bsonsize({a: 1, data: ""});
- var dataSize = maxDocSize - baseDocSize;
-
- var data = "";
- for (var i = 0; i < dataSize; i++)
- data += "x";
-
- var documents = [];
- for (var i = 0; i < 1000; i++)
- documents.push({a: i, data: data});
-
- assert.commandWorked(coll.getMongo().getDB("admin").runCommand({setParameter: 1, logLevel: 4}));
- coll.remove({});
- request = {insert: coll.getName(), documents: documents};
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1000, result.n);
- assert.eq(1000, coll.count());
-
- //
- //
- // Config server upserts (against admin db, for example) require _id test
- var adminColl = admin.getCollection(coll.getName());
-
- //
- // Without _id
- adminColl.remove({});
- printjson(
- request = {update: adminColl.getName(), updates: [{q: {a: 1}, u: {a: 1}, upsert: true}]});
- var result = adminColl.runCommand(request);
- assert.commandWorked(result);
- assert.eq(1, result.n);
- assert.eq(1, adminColl.count());
-
- //
- // With _id
- adminColl.remove({});
- printjson(request = {
- update: adminColl.getName(),
- updates: [{q: {_id: 1, a: 1}, u: {a: 1}, upsert: true}]
- });
- assert.commandWorked(adminColl.runCommand(request));
- assert.eq(1, result.n);
- assert.eq(1, adminColl.count());
-
- //
- //
- // Stale config progress tests
- // Set up a new collection across two shards, then revert the chunks to an earlier state to put
- // mongos and mongod permanently out of sync.
-
- // START SETUP
- var brokenColl = mongos.getCollection("broken.coll");
- assert.commandWorked(admin.runCommand({enableSharding: brokenColl.getDB().toString()}));
- st.ensurePrimaryShard(brokenColl.getDB().toString(), st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: brokenColl.toString(), key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: brokenColl.toString(), middle: {_id: 0}}));
-
- var oldChunks = config.chunks.find().toArray();
-
- // Start a new mongos and bring it up-to-date with the chunks so far
-
- var staleMongos = MongoRunner.runMongos({configdb: configConnStr});
- brokenColl = staleMongos.getCollection(brokenColl.toString());
- assert.writeOK(brokenColl.insert({hello: "world"}));
-
- // Modify the chunks to make shards at a higher version
-
- assert.commandWorked(admin.runCommand(
- {moveChunk: brokenColl.toString(), find: {_id: 0}, to: st.shard1.shardName}));
-
- // Rewrite the old chunks back to the config server
-
- assert.writeOK(config.chunks.remove({}));
- for (var i = 0; i < oldChunks.length; i++) {
- assert.writeOK(config.chunks.insert(oldChunks[i]));
- }
-
- // Ensure that the inserts have propagated to all secondary nodes
- st.configRS.awaitReplication();
-
- // Stale mongos can no longer bring itself up-to-date!
- // END SETUP
-
- //
- // Config server insert, repeatedly stale
- printjson(request = {insert: brokenColl.getName(), documents: [{_id: -1}]});
- printjson(result = brokenColl.runCommand(request));
- assert(result.ok);
- assert.eq(0, result.n);
- assert.eq(1, result.writeErrors.length);
- assert.eq(0, result.writeErrors[0].index);
- assert.eq(result.writeErrors[0].code, 82); // No Progress Made
-
- //
- // Config server insert to other shard, repeatedly stale
- printjson(request = {insert: brokenColl.getName(), documents: [{_id: 1}]});
- printjson(result = brokenColl.runCommand(request));
- assert(result.ok);
- assert.eq(0, result.n);
- assert.eq(1, result.writeErrors.length);
- assert.eq(0, result.writeErrors[0].index);
- assert.eq(result.writeErrors[0].code, 82); // No Progress Made
-
- //
- //
- // Tests against config server
- var configColl = config.getCollection("batch_write_protocol_sharded");
-
- //
- // Basic config server insert
- configColl.remove({});
- printjson(request = {insert: configColl.getName(), documents: [{a: 1}]});
- var result = configColl.runCommand(request);
- assert.commandWorked(result);
- assert.eq(1, result.n);
-
- st.configRS.awaitReplication();
- assert.eq(1, st.config0.getCollection(configColl + "").count());
- assert.eq(1, st.config1.getCollection(configColl + "").count());
- assert.eq(1, st.config2.getCollection(configColl + "").count());
-
- //
- // Basic config server update
- configColl.remove({});
- configColl.insert({a: 1});
- printjson(request = {update: configColl.getName(), updates: [{q: {a: 1}, u: {$set: {b: 2}}}]});
- printjson(result = configColl.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
-
- st.configRS.awaitReplication();
- assert.eq(1, st.config0.getCollection(configColl + "").count({b: 2}));
- assert.eq(1, st.config1.getCollection(configColl + "").count({b: 2}));
- assert.eq(1, st.config2.getCollection(configColl + "").count({b: 2}));
-
- //
- // Basic config server delete
- configColl.remove({});
- configColl.insert({a: 1});
- printjson(request = {'delete': configColl.getName(), deletes: [{q: {a: 1}, limit: 0}]});
- printjson(result = configColl.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
-
- st.configRS.awaitReplication();
- assert.eq(0, st.config0.getCollection(configColl + "").count());
- assert.eq(0, st.config1.getCollection(configColl + "").count());
- assert.eq(0, st.config2.getCollection(configColl + "").count());
-
- MongoRunner.stopMongod(st.config1);
- MongoRunner.stopMongod(st.config2);
- st.configRS.awaitNoPrimary();
-
- // Config server insert with no config PRIMARY
- configColl.remove({});
- printjson(request = {insert: configColl.getName(), documents: [{a: 1}]});
- printjson(result = configColl.runCommand(request));
- assert(!result.ok);
- assert(result.errmsg != null);
-
- // Config server insert with no config PRIMARY
- configColl.remove({});
- configColl.insert({a: 1});
- printjson(request = {update: configColl.getName(), updates: [{q: {a: 1}, u: {$set: {b: 2}}}]});
- printjson(result = configColl.runCommand(request));
- assert(!result.ok);
- assert(result.errmsg != null);
-
- // Config server insert with no config PRIMARY
- configColl.remove({});
- configColl.insert({a: 1});
- printjson(request = {delete: configColl.getName(), deletes: [{q: {a: 1}, limit: 0}]});
- printjson(result = configColl.runCommand(request));
- assert(!result.ok);
- assert(result.errmsg != null);
-
- jsTest.log("DONE!");
-
- MongoRunner.stopMongos(staleMongos);
- st.stop();
+"use strict";
+var st = new ShardingTest({shards: 2, mongos: 1});
+
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var configConnStr = st._configDB;
+
+jsTest.log("Starting sharding batch write tests...");
+
+var request;
+var result;
+
+// NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
+
+//
+//
+// Mongos _id autogeneration tests for sharded collections
+
+var coll = mongos.getCollection("foo.bar");
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}}));
+
+//
+// Basic insert no _id
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}]
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert.eq(1, coll.count());
+
+//
+// Multi insert some _ids
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{_id: 0, a: 1}, {a: 2}]
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(2, result.n);
+assert.eq(2, coll.count());
+assert.eq(1, coll.count({_id: 0}));
+
+//
+// Ensure generating many _ids don't push us over limits
+var maxDocSize = (16 * 1024 * 1024) / 1000;
+var baseDocSize = Object.bsonsize({a: 1, data: ""});
+var dataSize = maxDocSize - baseDocSize;
+
+var data = "";
+for (var i = 0; i < dataSize; i++)
+ data += "x";
+
+var documents = [];
+for (var i = 0; i < 1000; i++)
+ documents.push({a: i, data: data});
+
+assert.commandWorked(coll.getMongo().getDB("admin").runCommand({setParameter: 1, logLevel: 4}));
+coll.remove({});
+request = {
+ insert: coll.getName(),
+ documents: documents
+};
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1000, result.n);
+assert.eq(1000, coll.count());
+
+//
+//
+// Config server upserts (against admin db, for example) require _id test
+var adminColl = admin.getCollection(coll.getName());
+
+//
+// Without _id
+adminColl.remove({});
+printjson(request = {
+ update: adminColl.getName(),
+ updates: [{q: {a: 1}, u: {a: 1}, upsert: true}]
+});
+var result = adminColl.runCommand(request);
+assert.commandWorked(result);
+assert.eq(1, result.n);
+assert.eq(1, adminColl.count());
+
+//
+// With _id
+adminColl.remove({});
+printjson(request = {
+ update: adminColl.getName(),
+ updates: [{q: {_id: 1, a: 1}, u: {a: 1}, upsert: true}]
+});
+assert.commandWorked(adminColl.runCommand(request));
+assert.eq(1, result.n);
+assert.eq(1, adminColl.count());
+
+//
+//
+// Stale config progress tests
+// Set up a new collection across two shards, then revert the chunks to an earlier state to put
+// mongos and mongod permanently out of sync.
+
+// START SETUP
+var brokenColl = mongos.getCollection("broken.coll");
+assert.commandWorked(admin.runCommand({enableSharding: brokenColl.getDB().toString()}));
+st.ensurePrimaryShard(brokenColl.getDB().toString(), st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: brokenColl.toString(), key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: brokenColl.toString(), middle: {_id: 0}}));
+
+var oldChunks = config.chunks.find().toArray();
+
+// Start a new mongos and bring it up-to-date with the chunks so far
+
+var staleMongos = MongoRunner.runMongos({configdb: configConnStr});
+brokenColl = staleMongos.getCollection(brokenColl.toString());
+assert.writeOK(brokenColl.insert({hello: "world"}));
+
+// Modify the chunks to make shards at a higher version
+
+assert.commandWorked(
+ admin.runCommand({moveChunk: brokenColl.toString(), find: {_id: 0}, to: st.shard1.shardName}));
+
+// Rewrite the old chunks back to the config server
+
+assert.writeOK(config.chunks.remove({}));
+for (var i = 0; i < oldChunks.length; i++) {
+ assert.writeOK(config.chunks.insert(oldChunks[i]));
+}
+
+// Ensure that the inserts have propagated to all secondary nodes
+st.configRS.awaitReplication();
+
+// Stale mongos can no longer bring itself up-to-date!
+// END SETUP
+
+//
+// Config server insert, repeatedly stale
+printjson(request = {
+ insert: brokenColl.getName(),
+ documents: [{_id: -1}]
+});
+printjson(result = brokenColl.runCommand(request));
+assert(result.ok);
+assert.eq(0, result.n);
+assert.eq(1, result.writeErrors.length);
+assert.eq(0, result.writeErrors[0].index);
+assert.eq(result.writeErrors[0].code, 82); // No Progress Made
+
+//
+// Config server insert to other shard, repeatedly stale
+printjson(request = {
+ insert: brokenColl.getName(),
+ documents: [{_id: 1}]
+});
+printjson(result = brokenColl.runCommand(request));
+assert(result.ok);
+assert.eq(0, result.n);
+assert.eq(1, result.writeErrors.length);
+assert.eq(0, result.writeErrors[0].index);
+assert.eq(result.writeErrors[0].code, 82); // No Progress Made
+
+//
+//
+// Tests against config server
+var configColl = config.getCollection("batch_write_protocol_sharded");
+
+//
+// Basic config server insert
+configColl.remove({});
+printjson(request = {
+ insert: configColl.getName(),
+ documents: [{a: 1}]
+});
+var result = configColl.runCommand(request);
+assert.commandWorked(result);
+assert.eq(1, result.n);
+
+st.configRS.awaitReplication();
+assert.eq(1, st.config0.getCollection(configColl + "").count());
+assert.eq(1, st.config1.getCollection(configColl + "").count());
+assert.eq(1, st.config2.getCollection(configColl + "").count());
+
+//
+// Basic config server update
+configColl.remove({});
+configColl.insert({a: 1});
+printjson(request = {
+ update: configColl.getName(),
+ updates: [{q: {a: 1}, u: {$set: {b: 2}}}]
+});
+printjson(result = configColl.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+
+st.configRS.awaitReplication();
+assert.eq(1, st.config0.getCollection(configColl + "").count({b: 2}));
+assert.eq(1, st.config1.getCollection(configColl + "").count({b: 2}));
+assert.eq(1, st.config2.getCollection(configColl + "").count({b: 2}));
+
+//
+// Basic config server delete
+configColl.remove({});
+configColl.insert({a: 1});
+printjson(request = {
+ 'delete': configColl.getName(),
+ deletes: [{q: {a: 1}, limit: 0}]
+});
+printjson(result = configColl.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+
+st.configRS.awaitReplication();
+assert.eq(0, st.config0.getCollection(configColl + "").count());
+assert.eq(0, st.config1.getCollection(configColl + "").count());
+assert.eq(0, st.config2.getCollection(configColl + "").count());
+
+MongoRunner.stopMongod(st.config1);
+MongoRunner.stopMongod(st.config2);
+st.configRS.awaitNoPrimary();
+
+// Config server insert with no config PRIMARY
+configColl.remove({});
+printjson(request = {
+ insert: configColl.getName(),
+ documents: [{a: 1}]
+});
+printjson(result = configColl.runCommand(request));
+assert(!result.ok);
+assert(result.errmsg != null);
+
+// Config server insert with no config PRIMARY
+configColl.remove({});
+configColl.insert({a: 1});
+printjson(request = {
+ update: configColl.getName(),
+ updates: [{q: {a: 1}, u: {$set: {b: 2}}}]
+});
+printjson(result = configColl.runCommand(request));
+assert(!result.ok);
+assert(result.errmsg != null);
+
+// Config server insert with no config PRIMARY
+configColl.remove({});
+configColl.insert({a: 1});
+printjson(request = {
+ delete: configColl.getName(),
+ deletes: [{q: {a: 1}, limit: 0}]
+});
+printjson(result = configColl.runCommand(request));
+assert(!result.ok);
+assert(result.errmsg != null);
+
+jsTest.log("DONE!");
+
+MongoRunner.stopMongos(staleMongos);
+st.stop();
}());
diff --git a/jstests/sharding/bouncing_count.js b/jstests/sharding/bouncing_count.js
index f4deb4335e5..68097fb23a3 100644
--- a/jstests/sharding/bouncing_count.js
+++ b/jstests/sharding/bouncing_count.js
@@ -2,71 +2,67 @@
* Tests whether new sharding is detected on insert by mongos
*/
(function() {
- 'use strict';
+'use strict';
- // TODO: SERVER-33830 remove shardAsReplicaSet: false
- var st = new ShardingTest({shards: 10, mongos: 3, other: {shardAsReplicaSet: false}});
+// TODO: SERVER-33830 remove shardAsReplicaSet: false
+var st = new ShardingTest({shards: 10, mongos: 3, other: {shardAsReplicaSet: false}});
- var mongosA = st.s0;
- var mongosB = st.s1;
- var mongosC = st.s2;
+var mongosA = st.s0;
+var mongosB = st.s1;
+var mongosC = st.s2;
- var admin = mongosA.getDB("admin");
- var config = mongosA.getDB("config");
+var admin = mongosA.getDB("admin");
+var config = mongosA.getDB("config");
- var collA = mongosA.getCollection("foo.bar");
- var collB = mongosB.getCollection("" + collA);
- var collC = mongosB.getCollection("" + collA);
+var collA = mongosA.getCollection("foo.bar");
+var collB = mongosB.getCollection("" + collA);
+var collC = mongosB.getCollection("" + collA);
- var shards = [
- st.shard0,
- st.shard1,
- st.shard2,
- st.shard3,
- st.shard4,
- st.shard5,
- st.shard6,
- st.shard7,
- st.shard8,
- st.shard9
- ];
+var shards = [
+ st.shard0,
+ st.shard1,
+ st.shard2,
+ st.shard3,
+ st.shard4,
+ st.shard5,
+ st.shard6,
+ st.shard7,
+ st.shard8,
+ st.shard9
+];
- assert.commandWorked(admin.runCommand({enableSharding: "" + collA.getDB()}));
- st.ensurePrimaryShard(collA.getDB().getName(), st.shard1.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: "" + collA, key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({enableSharding: "" + collA.getDB()}));
+st.ensurePrimaryShard(collA.getDB().getName(), st.shard1.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: "" + collA, key: {_id: 1}}));
- jsTestLog("Splitting up the collection...");
+jsTestLog("Splitting up the collection...");
- // Split up the collection
- for (var i = 0; i < shards.length; i++) {
- assert.commandWorked(admin.runCommand({split: "" + collA, middle: {_id: i}}));
- assert.commandWorked(
- admin.runCommand({moveChunk: "" + collA, find: {_id: i}, to: shards[i].shardName}));
- }
+// Split up the collection
+for (var i = 0; i < shards.length; i++) {
+ assert.commandWorked(admin.runCommand({split: "" + collA, middle: {_id: i}}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: "" + collA, find: {_id: i}, to: shards[i].shardName}));
+}
- mongosB.getDB("admin").runCommand({flushRouterConfig: 1});
- mongosC.getDB("admin").runCommand({flushRouterConfig: 1});
+mongosB.getDB("admin").runCommand({flushRouterConfig: 1});
+mongosC.getDB("admin").runCommand({flushRouterConfig: 1});
- printjson(collB.count());
- printjson(collC.count());
+printjson(collB.count());
+printjson(collC.count());
- // Change up all the versions...
- for (var i = 0; i < shards.length; i++) {
- assert.commandWorked(admin.runCommand({
- moveChunk: "" + collA,
- find: {_id: i},
- to: shards[(i + 1) % shards.length].shardName
- }));
- }
+// Change up all the versions...
+for (var i = 0; i < shards.length; i++) {
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: "" + collA, find: {_id: i}, to: shards[(i + 1) % shards.length].shardName}));
+}
- // Make sure mongos A is up-to-date
- mongosA.getDB("admin").runCommand({flushRouterConfig: 1});
+// Make sure mongos A is up-to-date
+mongosA.getDB("admin").runCommand({flushRouterConfig: 1});
- jsTestLog("Running count!");
+jsTestLog("Running count!");
- printjson(collB.count());
- printjson(collC.find().toArray());
-
- st.stop();
+printjson(collB.count());
+printjson(collC.find().toArray());
+st.stop();
})();
diff --git a/jstests/sharding/bulk_insert.js b/jstests/sharding/bulk_insert.js
index d50830a4665..a2162771492 100644
--- a/jstests/sharding/bulk_insert.js
+++ b/jstests/sharding/bulk_insert.js
@@ -1,284 +1,282 @@
// Tests bulk inserts to mongos
(function() {
- 'use strict';
+'use strict';
- // TODO: SERVER-33601 remove shardAsReplicaSet: false
- var st = new ShardingTest({shards: 2, mongos: 2, other: {shardAsReplicaSet: false}});
+// TODO: SERVER-33601 remove shardAsReplicaSet: false
+var st = new ShardingTest({shards: 2, mongos: 2, other: {shardAsReplicaSet: false}});
- var mongos = st.s;
- var staleMongos = st.s1;
- var admin = mongos.getDB("admin");
+var mongos = st.s;
+var staleMongos = st.s1;
+var admin = mongos.getDB("admin");
- var collSh = mongos.getCollection(jsTestName() + ".collSharded");
- var collUn = mongos.getCollection(jsTestName() + ".collUnsharded");
- var collDi = st.shard0.getCollection(jsTestName() + ".collDirect");
+var collSh = mongos.getCollection(jsTestName() + ".collSharded");
+var collUn = mongos.getCollection(jsTestName() + ".collUnsharded");
+var collDi = st.shard0.getCollection(jsTestName() + ".collDirect");
- jsTest.log('Checking write to config collections...');
- assert.writeOK(admin.TestColl.insert({SingleDoc: 1}));
+jsTest.log('Checking write to config collections...');
+assert.writeOK(admin.TestColl.insert({SingleDoc: 1}));
- jsTest.log("Setting up collections...");
+jsTest.log("Setting up collections...");
- assert.commandWorked(admin.runCommand({enableSharding: collSh.getDB() + ""}));
- st.ensurePrimaryShard(collSh.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({enableSharding: collSh.getDB() + ""}));
+st.ensurePrimaryShard(collSh.getDB() + "", st.shard0.shardName);
- assert.commandWorked(
- admin.runCommand({movePrimary: collUn.getDB() + "", to: st.shard1.shardName}));
+assert.commandWorked(admin.runCommand({movePrimary: collUn.getDB() + "", to: st.shard1.shardName}));
- printjson(collSh.ensureIndex({ukey: 1}, {unique: true}));
- printjson(collUn.ensureIndex({ukey: 1}, {unique: true}));
- printjson(collDi.ensureIndex({ukey: 1}, {unique: true}));
+printjson(collSh.ensureIndex({ukey: 1}, {unique: true}));
+printjson(collUn.ensureIndex({ukey: 1}, {unique: true}));
+printjson(collDi.ensureIndex({ukey: 1}, {unique: true}));
- assert.commandWorked(admin.runCommand({shardCollection: collSh + "", key: {ukey: 1}}));
- assert.commandWorked(admin.runCommand({split: collSh + "", middle: {ukey: 0}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand({shardCollection: collSh + "", key: {ukey: 1}}));
+assert.commandWorked(admin.runCommand({split: collSh + "", middle: {ukey: 0}}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- var resetColls = function() {
- assert.writeOK(collSh.remove({}));
- assert.writeOK(collUn.remove({}));
- assert.writeOK(collDi.remove({}));
- };
+var resetColls = function() {
+ assert.writeOK(collSh.remove({}));
+ assert.writeOK(collUn.remove({}));
+ assert.writeOK(collDi.remove({}));
+};
- var isDupKeyError = function(err) {
- return /dup key/.test(err + "");
- };
+var isDupKeyError = function(err) {
+ return /dup key/.test(err + "");
+};
- jsTest.log("Collections created.");
- st.printShardingStatus();
+jsTest.log("Collections created.");
+st.printShardingStatus();
- //
- // BREAK-ON-ERROR
- //
+//
+// BREAK-ON-ERROR
+//
- jsTest.log("Bulk insert (no ContinueOnError) to single shard...");
+jsTest.log("Bulk insert (no ContinueOnError) to single shard...");
- resetColls();
- var inserts = [{ukey: 0}, {ukey: 1}];
+resetColls();
+var inserts = [{ukey: 0}, {ukey: 1}];
- assert.writeOK(collSh.insert(inserts));
- assert.eq(2, collSh.find().itcount());
+assert.writeOK(collSh.insert(inserts));
+assert.eq(2, collSh.find().itcount());
- assert.writeOK(collUn.insert(inserts));
- assert.eq(2, collUn.find().itcount());
+assert.writeOK(collUn.insert(inserts));
+assert.eq(2, collUn.find().itcount());
- assert.writeOK(collDi.insert(inserts));
- assert.eq(2, collDi.find().itcount());
+assert.writeOK(collDi.insert(inserts));
+assert.eq(2, collDi.find().itcount());
- jsTest.log("Bulk insert (no COE) with mongos error...");
+jsTest.log("Bulk insert (no COE) with mongos error...");
- resetColls();
- var inserts = [{ukey: 0}, {hello: "world"}, {ukey: 1}];
+resetColls();
+var inserts = [{ukey: 0}, {hello: "world"}, {ukey: 1}];
- assert.writeError(collSh.insert(inserts));
- assert.eq(1, collSh.find().itcount());
+assert.writeError(collSh.insert(inserts));
+assert.eq(1, collSh.find().itcount());
- jsTest.log("Bulk insert (no COE) with mongod error...");
+jsTest.log("Bulk insert (no COE) with mongod error...");
- resetColls();
- var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}];
+resetColls();
+var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}];
- assert.writeError(collSh.insert(inserts));
- assert.eq(1, collSh.find().itcount());
+assert.writeError(collSh.insert(inserts));
+assert.eq(1, collSh.find().itcount());
- assert.writeError(collUn.insert(inserts));
- assert.eq(1, collUn.find().itcount());
+assert.writeError(collUn.insert(inserts));
+assert.eq(1, collUn.find().itcount());
- assert.writeError(collDi.insert(inserts));
- assert.eq(1, collDi.find().itcount());
+assert.writeError(collDi.insert(inserts));
+assert.eq(1, collDi.find().itcount());
- jsTest.log("Bulk insert (no COE) with mongod and mongos error...");
+jsTest.log("Bulk insert (no COE) with mongod and mongos error...");
- resetColls();
- var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}, {hello: "world"}];
+resetColls();
+var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}, {hello: "world"}];
- var res = assert.writeError(collSh.insert(inserts));
- assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
- assert.eq(1, collSh.find().itcount());
+var res = assert.writeError(collSh.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+assert.eq(1, collSh.find().itcount());
- res = assert.writeError(collUn.insert(inserts));
- assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
- assert.eq(1, collUn.find().itcount());
+res = assert.writeError(collUn.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+assert.eq(1, collUn.find().itcount());
- res = assert.writeError(collDi.insert(inserts));
- assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
- assert.eq(1, collDi.find().itcount());
+res = assert.writeError(collDi.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+assert.eq(1, collDi.find().itcount());
- jsTest.log("Bulk insert (no COE) on second shard...");
+jsTest.log("Bulk insert (no COE) on second shard...");
- resetColls();
- var inserts = [{ukey: 0}, {ukey: -1}];
+resetColls();
+var inserts = [{ukey: 0}, {ukey: -1}];
- assert.writeOK(collSh.insert(inserts));
- assert.eq(2, collSh.find().itcount());
+assert.writeOK(collSh.insert(inserts));
+assert.eq(2, collSh.find().itcount());
- assert.writeOK(collUn.insert(inserts));
- assert.eq(2, collUn.find().itcount());
+assert.writeOK(collUn.insert(inserts));
+assert.eq(2, collUn.find().itcount());
- assert.writeOK(collDi.insert(inserts));
- assert.eq(2, collDi.find().itcount());
+assert.writeOK(collDi.insert(inserts));
+assert.eq(2, collDi.find().itcount());
- jsTest.log("Bulk insert to second shard (no COE) with mongos error...");
+jsTest.log("Bulk insert to second shard (no COE) with mongos error...");
- resetColls();
- var inserts = [
- {ukey: 0},
- {ukey: 1}, // switches shards
- {ukey: -1},
- {hello: "world"}
- ];
+resetColls();
+var inserts = [
+ {ukey: 0},
+ {ukey: 1}, // switches shards
+ {ukey: -1},
+ {hello: "world"}
+];
- assert.writeError(collSh.insert(inserts));
- assert.eq(3, collSh.find().itcount());
+assert.writeError(collSh.insert(inserts));
+assert.eq(3, collSh.find().itcount());
- jsTest.log("Bulk insert to second shard (no COE) with mongod error...");
+jsTest.log("Bulk insert to second shard (no COE) with mongod error...");
- resetColls();
- var inserts = [{ukey: 0}, {ukey: 1}, {ukey: -1}, {ukey: -2}, {ukey: -2}];
+resetColls();
+var inserts = [{ukey: 0}, {ukey: 1}, {ukey: -1}, {ukey: -2}, {ukey: -2}];
- assert.writeError(collSh.insert(inserts));
- assert.eq(4, collSh.find().itcount());
+assert.writeError(collSh.insert(inserts));
+assert.eq(4, collSh.find().itcount());
- assert.writeError(collUn.insert(inserts));
- assert.eq(4, collUn.find().itcount());
+assert.writeError(collUn.insert(inserts));
+assert.eq(4, collUn.find().itcount());
- assert.writeError(collDi.insert(inserts));
- assert.eq(4, collDi.find().itcount());
+assert.writeError(collDi.insert(inserts));
+assert.eq(4, collDi.find().itcount());
- jsTest.log("Bulk insert to third shard (no COE) with mongod and mongos error...");
+jsTest.log("Bulk insert to third shard (no COE) with mongod and mongos error...");
- resetColls();
- var inserts =
- [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {ukey: 4}, {ukey: 4}, {hello: "world"}];
+resetColls();
+var inserts =
+ [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {ukey: 4}, {ukey: 4}, {hello: "world"}];
- res = assert.writeError(collSh.insert(inserts));
- assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
- assert.eq(5, collSh.find().itcount());
+res = assert.writeError(collSh.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+assert.eq(5, collSh.find().itcount());
- res = assert.writeError(collUn.insert(inserts));
- assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
- assert.eq(5, collUn.find().itcount());
+res = assert.writeError(collUn.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+assert.eq(5, collUn.find().itcount());
- res = assert.writeError(collDi.insert(inserts));
- assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
- assert.eq(5, collDi.find().itcount());
+res = assert.writeError(collDi.insert(inserts));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+assert.eq(5, collDi.find().itcount());
- //
- // CONTINUE-ON-ERROR
- //
+//
+// CONTINUE-ON-ERROR
+//
- jsTest.log("Bulk insert (yes COE) with mongos error...");
+jsTest.log("Bulk insert (yes COE) with mongos error...");
- resetColls();
- var inserts = [{ukey: 0}, {hello: "world"}, {ukey: 1}];
+resetColls();
+var inserts = [{ukey: 0}, {hello: "world"}, {ukey: 1}];
- assert.writeError(collSh.insert(inserts, 1)); // COE
- assert.eq(2, collSh.find().itcount());
+assert.writeError(collSh.insert(inserts, 1)); // COE
+assert.eq(2, collSh.find().itcount());
- jsTest.log("Bulk insert (yes COE) with mongod error...");
+jsTest.log("Bulk insert (yes COE) with mongod error...");
- resetColls();
- var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}];
+resetColls();
+var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}];
- assert.writeError(collSh.insert(inserts, 1));
- assert.eq(2, collSh.find().itcount());
+assert.writeError(collSh.insert(inserts, 1));
+assert.eq(2, collSh.find().itcount());
- assert.writeError(collUn.insert(inserts, 1));
- assert.eq(2, collUn.find().itcount());
+assert.writeError(collUn.insert(inserts, 1));
+assert.eq(2, collUn.find().itcount());
- assert.writeError(collDi.insert(inserts, 1));
- assert.eq(2, collDi.find().itcount());
+assert.writeError(collDi.insert(inserts, 1));
+assert.eq(2, collDi.find().itcount());
- jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error...");
+jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error...");
- resetColls();
- var inserts =
- [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {ukey: 4}, {ukey: 4}, {hello: "world"}];
+resetColls();
+var inserts =
+ [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {ukey: 4}, {ukey: 4}, {hello: "world"}];
- // Last error here is mongos error
- res = assert.writeError(collSh.insert(inserts, 1));
- assert(!isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg),
- res.toString());
- assert.eq(5, collSh.find().itcount());
+// Last error here is mongos error
+res = assert.writeError(collSh.insert(inserts, 1));
+assert(!isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
+assert.eq(5, collSh.find().itcount());
- // Extra insert goes through, since mongos error "doesn't count"
- res = assert.writeError(collUn.insert(inserts, 1));
- assert.eq(6, res.nInserted, res.toString());
- assert.eq(6, collUn.find().itcount());
+// Extra insert goes through, since mongos error "doesn't count"
+res = assert.writeError(collUn.insert(inserts, 1));
+assert.eq(6, res.nInserted, res.toString());
+assert.eq(6, collUn.find().itcount());
- res = assert.writeError(collDi.insert(inserts, 1));
- assert.eq(6, res.nInserted, res.toString());
- assert.eq(6, collDi.find().itcount());
+res = assert.writeError(collDi.insert(inserts, 1));
+assert.eq(6, res.nInserted, res.toString());
+assert.eq(6, collDi.find().itcount());
- jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error " +
- "(mongos error first)...");
+jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error " +
+ "(mongos error first)...");
- resetColls();
- var inserts =
- [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {hello: "world"}, {ukey: 4}, {ukey: 4}];
+resetColls();
+var inserts =
+ [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {hello: "world"}, {ukey: 4}, {ukey: 4}];
- // Last error here is mongos error
- res = assert.writeError(collSh.insert(inserts, 1));
- assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
- assert.eq(5, collSh.find().itcount());
+// Last error here is mongos error
+res = assert.writeError(collSh.insert(inserts, 1));
+assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
+assert.eq(5, collSh.find().itcount());
- // Extra insert goes through, since mongos error "doesn't count"
- res = assert.writeError(collUn.insert(inserts, 1));
- assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
- assert.eq(6, collUn.find().itcount());
+// Extra insert goes through, since mongos error "doesn't count"
+res = assert.writeError(collUn.insert(inserts, 1));
+assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
+assert.eq(6, collUn.find().itcount());
- res = assert.writeError(collDi.insert(inserts, 1));
- assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
- assert.eq(6, collDi.find().itcount());
+res = assert.writeError(collDi.insert(inserts, 1));
+assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+assert.eq(6, collDi.find().itcount());
- //
- // Test when WBL has to be invoked mid-insert
- //
+//
+// Test when WBL has to be invoked mid-insert
+//
- jsTest.log("Testing bulk insert (no COE) with WBL...");
- resetColls();
+jsTest.log("Testing bulk insert (no COE) with WBL...");
+resetColls();
- var inserts = [{ukey: 1}, {ukey: -1}];
+var inserts = [{ukey: 1}, {ukey: -1}];
- var staleCollSh = staleMongos.getCollection(collSh + "");
- assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
+var staleCollSh = staleMongos.getCollection(collSh + "");
+assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
- assert.commandWorked(admin.runCommand(
- {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- assert.writeOK(staleCollSh.insert(inserts));
+assert.writeOK(staleCollSh.insert(inserts));
- //
- // Test when the legacy batch exceeds the BSON object size limit
- //
+//
+// Test when the legacy batch exceeds the BSON object size limit
+//
- jsTest.log("Testing bulk insert (no COE) with large objects...");
- resetColls();
+jsTest.log("Testing bulk insert (no COE) with large objects...");
+resetColls();
- var inserts = (function() {
- var data = 'x'.repeat(10 * 1024 * 1024);
- return [
- {ukey: 1, data: data},
- {ukey: 2, data: data},
- {ukey: -1, data: data},
- {ukey: -2, data: data}
- ];
- })();
+var inserts = (function() {
+ var data = 'x'.repeat(10 * 1024 * 1024);
+ return [
+ {ukey: 1, data: data},
+ {ukey: 2, data: data},
+ {ukey: -1, data: data},
+ {ukey: -2, data: data}
+ ];
+})();
- var staleMongosWithLegacyWrites = new Mongo(staleMongos.name);
- staleMongosWithLegacyWrites.forceWriteMode('legacy');
+var staleMongosWithLegacyWrites = new Mongo(staleMongos.name);
+staleMongosWithLegacyWrites.forceWriteMode('legacy');
- staleCollSh = staleMongos.getCollection(collSh + "");
- assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
+staleCollSh = staleMongos.getCollection(collSh + "");
+assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
- assert.commandWorked(admin.runCommand(
- {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- staleCollSh.insert(inserts);
- staleCollSh.getDB().getLastError();
+staleCollSh.insert(inserts);
+staleCollSh.getDB().getLastError();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/bulk_shard_insert.js b/jstests/sharding/bulk_shard_insert.js
index 6db6a62c998..4f3a4626818 100644
--- a/jstests/sharding/bulk_shard_insert.js
+++ b/jstests/sharding/bulk_shard_insert.js
@@ -7,94 +7,92 @@
* @tags: [resource_intensive]
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 4, chunkSize: 1});
+var st = new ShardingTest({shards: 4, chunkSize: 1});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(
- st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Counter: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Counter: 1}}));
- var db = st.s0.getDB('TestDB');
- var coll = db.TestColl;
+var db = st.s0.getDB('TestDB');
+var coll = db.TestColl;
- // Insert lots of bulk documents
- var numDocs = 1000000;
+// Insert lots of bulk documents
+var numDocs = 1000000;
- var bulkSize = 4000;
- var docSize = 128; /* bytes */
- print("\n\n\nBulk size is " + bulkSize);
+var bulkSize = 4000;
+var docSize = 128; /* bytes */
+print("\n\n\nBulk size is " + bulkSize);
- var data = "x";
- while (Object.bsonsize({x: data}) < docSize) {
- data += data;
- }
+var data = "x";
+while (Object.bsonsize({x: data}) < docSize) {
+ data += data;
+}
- print("\n\n\nDocument size is " + Object.bsonsize({x: data}));
+print("\n\n\nDocument size is " + Object.bsonsize({x: data}));
- var docsInserted = 0;
- var balancerOn = false;
+var docsInserted = 0;
+var balancerOn = false;
- /**
- * Ensures that the just inserted documents can be found.
- */
- function checkDocuments() {
- var docsFound = coll.find({}, {_id: 0, Counter: 1}).toArray();
- var count = coll.find().count();
+/**
+ * Ensures that the just inserted documents can be found.
+ */
+function checkDocuments() {
+ var docsFound = coll.find({}, {_id: 0, Counter: 1}).toArray();
+ var count = coll.find().count();
- if (docsFound.length != docsInserted) {
- print("Inserted " + docsInserted + " count : " + count + " doc count : " +
- docsFound.length);
+ if (docsFound.length != docsInserted) {
+ print("Inserted " + docsInserted + " count : " + count +
+ " doc count : " + docsFound.length);
- var allFoundDocsSorted = docsFound.sort(function(a, b) {
- return a.Counter - b.Counter;
- });
+ var allFoundDocsSorted = docsFound.sort(function(a, b) {
+ return a.Counter - b.Counter;
+ });
- var missingValueInfo;
+ var missingValueInfo;
- for (var i = 0; i < docsInserted; i++) {
- if (i != allFoundDocsSorted[i].Counter) {
- missingValueInfo = {expected: i, actual: allFoundDocsSorted[i].Counter};
- break;
- }
+ for (var i = 0; i < docsInserted; i++) {
+ if (i != allFoundDocsSorted[i].Counter) {
+ missingValueInfo = {expected: i, actual: allFoundDocsSorted[i].Counter};
+ break;
}
+ }
- st.printShardingStatus();
+ st.printShardingStatus();
- assert(false,
- 'Inserted number of documents does not match the actual: ' +
- tojson(missingValueInfo));
- }
+ assert(
+ false,
+ 'Inserted number of documents does not match the actual: ' + tojson(missingValueInfo));
}
+}
- while (docsInserted < numDocs) {
- var currBulkSize =
- (numDocs - docsInserted > bulkSize) ? bulkSize : (numDocs - docsInserted);
+while (docsInserted < numDocs) {
+ var currBulkSize = (numDocs - docsInserted > bulkSize) ? bulkSize : (numDocs - docsInserted);
- var bulk = [];
- for (var i = 0; i < currBulkSize; i++) {
- bulk.push({Counter: docsInserted, hi: "there", i: i, x: data});
- docsInserted++;
- }
+ var bulk = [];
+ for (var i = 0; i < currBulkSize; i++) {
+ bulk.push({Counter: docsInserted, hi: "there", i: i, x: data});
+ docsInserted++;
+ }
- assert.writeOK(coll.insert(bulk));
+ assert.writeOK(coll.insert(bulk));
- if (docsInserted % 10000 == 0) {
- print("Inserted " + docsInserted + " documents.");
- st.printShardingStatus();
- }
+ if (docsInserted % 10000 == 0) {
+ print("Inserted " + docsInserted + " documents.");
+ st.printShardingStatus();
+ }
- if (docsInserted > numDocs / 3 && !balancerOn) {
- // Do one check before we turn balancer on
- checkDocuments();
- print('Turning on balancer after ' + docsInserted + ' documents inserted.');
- st.startBalancer();
- balancerOn = true;
- }
+ if (docsInserted > numDocs / 3 && !balancerOn) {
+ // Do one check before we turn balancer on
+ checkDocuments();
+ print('Turning on balancer after ' + docsInserted + ' documents inserted.');
+ st.startBalancer();
+ balancerOn = true;
}
+}
- checkDocuments();
+checkDocuments();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/causal_consistency_shell_support.js b/jstests/sharding/causal_consistency_shell_support.js
index f66b772aa9a..8466209d367 100644
--- a/jstests/sharding/causal_consistency_shell_support.js
+++ b/jstests/sharding/causal_consistency_shell_support.js
@@ -4,186 +4,184 @@
* response, and that the server rejects commands with afterClusterTime ahead of cluster time.
*/
(function() {
- "use strict";
-
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
-
- // Verifies the command works and properly updates operation or cluster time.
- function runCommandAndCheckLogicalTimes(cmdObj, db, shouldAdvance) {
- const session = db.getSession();
-
- // Extract initial operation and cluster time.
- let operationTime = session.getOperationTime();
- let clusterTimeObj = session.getClusterTime();
-
- assert.commandWorked(db.runCommand(cmdObj));
-
- // Verify cluster and operation time.
- if (shouldAdvance) {
- assert(bsonWoCompare(session.getOperationTime(), operationTime) > 0,
- "expected the shell's operationTime to increase after running command: " +
- tojson(cmdObj));
- assert(
- bsonWoCompare(session.getClusterTime().clusterTime, clusterTimeObj.clusterTime) > 0,
- "expected the shell's clusterTime value to increase after running command: " +
- tojson(cmdObj));
- } else {
- // Don't expect either clusterTime or operationTime to not change, because they may be
- // incremented by unrelated activity in the cluster.
- }
+"use strict";
+
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+
+// Verifies the command works and properly updates operation or cluster time.
+function runCommandAndCheckLogicalTimes(cmdObj, db, shouldAdvance) {
+ const session = db.getSession();
+
+ // Extract initial operation and cluster time.
+ let operationTime = session.getOperationTime();
+ let clusterTimeObj = session.getClusterTime();
+
+ assert.commandWorked(db.runCommand(cmdObj));
+
+ // Verify cluster and operation time.
+ if (shouldAdvance) {
+ assert(bsonWoCompare(session.getOperationTime(), operationTime) > 0,
+ "expected the shell's operationTime to increase after running command: " +
+ tojson(cmdObj));
+ assert(bsonWoCompare(session.getClusterTime().clusterTime, clusterTimeObj.clusterTime) > 0,
+ "expected the shell's clusterTime value to increase after running command: " +
+ tojson(cmdObj));
+ } else {
+ // Don't expect either clusterTime or operationTime to not change, because they may be
+ // incremented by unrelated activity in the cluster.
}
-
- // Verifies the command works and correctly updates the shell's operationTime.
- function commandWorksAndUpdatesOperationTime(cmdObj, db) {
- const session = db.getSession();
-
- // Use the latest cluster time returned as a new operationTime and run command.
- const clusterTimeObj = session.getClusterTime();
- session.advanceOperationTime(clusterTimeObj.clusterTime);
- assert.commandWorked(testDB.runCommand(cmdObj));
-
- // Verify the response contents and that new operation time is >= passed in time.
- assert(bsonWoCompare(session.getOperationTime(), clusterTimeObj.clusterTime) >= 0,
- "expected the shell's operationTime to be >= to:" + clusterTimeObj.clusterTime +
- " after running command: " + tojson(cmdObj));
- }
-
- // Manually create a shard so tests on storage engines that don't support majority readConcern
- // can exit early.
- const rsName = "causal_consistency_shell_support_rs";
- const rst = new ReplSetTest({
- nodes: 1,
- name: rsName,
- nodeOptions: {
- enableMajorityReadConcern: "",
- shardsvr: "",
- }
- });
-
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- rst.stopSet();
- return;
+}
+
+// Verifies the command works and correctly updates the shell's operationTime.
+function commandWorksAndUpdatesOperationTime(cmdObj, db) {
+ const session = db.getSession();
+
+ // Use the latest cluster time returned as a new operationTime and run command.
+ const clusterTimeObj = session.getClusterTime();
+ session.advanceOperationTime(clusterTimeObj.clusterTime);
+ assert.commandWorked(testDB.runCommand(cmdObj));
+
+ // Verify the response contents and that new operation time is >= passed in time.
+ assert(bsonWoCompare(session.getOperationTime(), clusterTimeObj.clusterTime) >= 0,
+ "expected the shell's operationTime to be >= to:" + clusterTimeObj.clusterTime +
+ " after running command: " + tojson(cmdObj));
+}
+
+// Manually create a shard so tests on storage engines that don't support majority readConcern
+// can exit early.
+const rsName = "causal_consistency_shell_support_rs";
+const rst = new ReplSetTest({
+ nodes: 1,
+ name: rsName,
+ nodeOptions: {
+ enableMajorityReadConcern: "",
+ shardsvr: "",
}
- rst.initiate();
-
- // Start the sharding test and add the majority readConcern enabled replica set.
- const name = "causal_consistency_shell_support";
- const st = new ShardingTest({name: name, shards: 1, manualAddShard: true});
- assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
-
- const testDB = st.s.getDB("test");
- const session = testDB.getSession();
-
- // Verify causal consistency is disabled unless explicitly set.
- assert.eq(testDB.getMongo()._causalConsistency,
- false,
- "causal consistency should be disabled by default");
- testDB.getMongo().setCausalConsistency(true);
-
- // Verify causal consistency is enabled for the connection and for each supported command.
- assert.eq(testDB.getMongo()._causalConsistency,
- true,
- "calling setCausalConsistency() didn't enable causal consistency");
-
- // Verify cluster times are tracked even before causal consistency is set (so the first
- // operation with causal consistency set can use valid cluster times).
- session.resetOperationTime_forTesting();
-
- assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 1}]}));
- assert.neq(session.getOperationTime(), null);
- assert.neq(session.getClusterTime(), null);
-
- session.resetOperationTime_forTesting();
-
- assert.commandWorked(testDB.runCommand({find: "foo"}));
- assert.neq(session.getOperationTime(), null);
- assert.neq(session.getClusterTime(), null);
-
- // Test that write commands advance both operation and cluster time.
- runCommandAndCheckLogicalTimes({insert: "foo", documents: [{x: 2}]}, testDB, true);
- runCommandAndCheckLogicalTimes(
- {update: "foo", updates: [{q: {x: 2}, u: {$set: {x: 3}}}]}, testDB, true);
-
- // Test that each supported command works as expected and the shell's cluster times are properly
- // forwarded to the server and updated based on the response.
- testDB.getMongo().setCausalConsistency(true);
-
- // Aggregate command.
- let aggColl = "aggColl";
- let aggCmd = {aggregate: aggColl, pipeline: [{$match: {x: 1}}], cursor: {}};
-
- runCommandAndCheckLogicalTimes({insert: aggColl, documents: [{_id: 1, x: 1}]}, testDB, true);
- runCommandAndCheckLogicalTimes(aggCmd, testDB, false);
- commandWorksAndUpdatesOperationTime(aggCmd, testDB);
-
- // Count command.
- let countColl = "countColl";
- let countCmd = {count: countColl};
-
- runCommandAndCheckLogicalTimes({insert: countColl, documents: [{_id: 1, x: 1}]}, testDB, true);
- runCommandAndCheckLogicalTimes(countCmd, testDB, false);
- commandWorksAndUpdatesOperationTime(countCmd, testDB);
-
- // Distinct command.
- let distinctColl = "distinctColl";
- let distinctCmd = {distinct: distinctColl, key: "x"};
-
- runCommandAndCheckLogicalTimes(
- {insert: distinctColl, documents: [{_id: 1, x: 1}]}, testDB, true);
- runCommandAndCheckLogicalTimes(distinctCmd, testDB, false);
- commandWorksAndUpdatesOperationTime(distinctCmd, testDB);
-
- // Find command.
- let findColl = "findColl";
- let findCmd = {find: findColl};
-
- runCommandAndCheckLogicalTimes({insert: findColl, documents: [{_id: 1, x: 1}]}, testDB, true);
- runCommandAndCheckLogicalTimes(findCmd, testDB, false);
- commandWorksAndUpdatesOperationTime(findCmd, testDB);
-
- // Aggregate command with $geoNear.
- let geoNearColl = "geoNearColl";
- let geoNearCmd = {
- aggregate: geoNearColl,
- cursor: {},
- pipeline: [
- {
- $geoNear: {
- near: {type: "Point", coordinates: [-10, 10]},
- distanceField: "dist",
- spherical: true
- }
- },
- ],
- };
-
- assert.commandWorked(testDB[geoNearColl].createIndex({loc: "2dsphere"}));
- runCommandAndCheckLogicalTimes(
- {insert: geoNearColl, documents: [{_id: 1, loc: {type: "Point", coordinates: [-10, 10]}}]},
- testDB,
- true);
- runCommandAndCheckLogicalTimes(geoNearCmd, testDB, false);
- commandWorksAndUpdatesOperationTime(geoNearCmd, testDB);
-
- // GeoSearch is not supported for sharded clusters.
-
- // MapReduce doesn't currently support read concern majority.
-
- // Verify that the server rejects commands when operation time is invalid by running a command
- // with an afterClusterTime value one day ahead.
- const invalidTime = new Timestamp(session.getOperationTime().getTime() + (60 * 60 * 24), 0);
- const invalidCmd = {
- find: "foo",
- readConcern: {level: "majority", afterClusterTime: invalidTime}
- };
- assert.commandFailedWithCode(
- testDB.runCommand(invalidCmd),
- ErrorCodes.InvalidOptions,
- "expected command, " + tojson(invalidCmd) + ", to fail with code, " +
- ErrorCodes.InvalidOptions + ", because the afterClusterTime value, " + invalidTime +
- ", should not be ahead of the clusterTime, " + session.getClusterTime().clusterTime);
+});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
rst.stopSet();
- st.stop();
+ return;
+}
+rst.initiate();
+
+// Start the sharding test and add the majority readConcern enabled replica set.
+const name = "causal_consistency_shell_support";
+const st = new ShardingTest({name: name, shards: 1, manualAddShard: true});
+assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
+
+const testDB = st.s.getDB("test");
+const session = testDB.getSession();
+
+// Verify causal consistency is disabled unless explicitly set.
+assert.eq(testDB.getMongo()._causalConsistency,
+ false,
+ "causal consistency should be disabled by default");
+testDB.getMongo().setCausalConsistency(true);
+
+// Verify causal consistency is enabled for the connection and for each supported command.
+assert.eq(testDB.getMongo()._causalConsistency,
+ true,
+ "calling setCausalConsistency() didn't enable causal consistency");
+
+// Verify cluster times are tracked even before causal consistency is set (so the first
+// operation with causal consistency set can use valid cluster times).
+session.resetOperationTime_forTesting();
+
+assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 1}]}));
+assert.neq(session.getOperationTime(), null);
+assert.neq(session.getClusterTime(), null);
+
+session.resetOperationTime_forTesting();
+
+assert.commandWorked(testDB.runCommand({find: "foo"}));
+assert.neq(session.getOperationTime(), null);
+assert.neq(session.getClusterTime(), null);
+
+// Test that write commands advance both operation and cluster time.
+runCommandAndCheckLogicalTimes({insert: "foo", documents: [{x: 2}]}, testDB, true);
+runCommandAndCheckLogicalTimes(
+ {update: "foo", updates: [{q: {x: 2}, u: {$set: {x: 3}}}]}, testDB, true);
+
+// Test that each supported command works as expected and the shell's cluster times are properly
+// forwarded to the server and updated based on the response.
+testDB.getMongo().setCausalConsistency(true);
+
+// Aggregate command.
+let aggColl = "aggColl";
+let aggCmd = {aggregate: aggColl, pipeline: [{$match: {x: 1}}], cursor: {}};
+
+runCommandAndCheckLogicalTimes({insert: aggColl, documents: [{_id: 1, x: 1}]}, testDB, true);
+runCommandAndCheckLogicalTimes(aggCmd, testDB, false);
+commandWorksAndUpdatesOperationTime(aggCmd, testDB);
+
+// Count command.
+let countColl = "countColl";
+let countCmd = {count: countColl};
+
+runCommandAndCheckLogicalTimes({insert: countColl, documents: [{_id: 1, x: 1}]}, testDB, true);
+runCommandAndCheckLogicalTimes(countCmd, testDB, false);
+commandWorksAndUpdatesOperationTime(countCmd, testDB);
+
+// Distinct command.
+let distinctColl = "distinctColl";
+let distinctCmd = {distinct: distinctColl, key: "x"};
+
+runCommandAndCheckLogicalTimes({insert: distinctColl, documents: [{_id: 1, x: 1}]}, testDB, true);
+runCommandAndCheckLogicalTimes(distinctCmd, testDB, false);
+commandWorksAndUpdatesOperationTime(distinctCmd, testDB);
+
+// Find command.
+let findColl = "findColl";
+let findCmd = {find: findColl};
+
+runCommandAndCheckLogicalTimes({insert: findColl, documents: [{_id: 1, x: 1}]}, testDB, true);
+runCommandAndCheckLogicalTimes(findCmd, testDB, false);
+commandWorksAndUpdatesOperationTime(findCmd, testDB);
+
+// Aggregate command with $geoNear.
+let geoNearColl = "geoNearColl";
+let geoNearCmd = {
+ aggregate: geoNearColl,
+ cursor: {},
+ pipeline: [
+ {
+ $geoNear: {
+ near: {type: "Point", coordinates: [-10, 10]},
+ distanceField: "dist",
+ spherical: true
+ }
+ },
+ ],
+};
+
+assert.commandWorked(testDB[geoNearColl].createIndex({loc: "2dsphere"}));
+runCommandAndCheckLogicalTimes(
+ {insert: geoNearColl, documents: [{_id: 1, loc: {type: "Point", coordinates: [-10, 10]}}]},
+ testDB,
+ true);
+runCommandAndCheckLogicalTimes(geoNearCmd, testDB, false);
+commandWorksAndUpdatesOperationTime(geoNearCmd, testDB);
+
+// GeoSearch is not supported for sharded clusters.
+
+// MapReduce doesn't currently support read concern majority.
+
+// Verify that the server rejects commands when operation time is invalid by running a command
+// with an afterClusterTime value one day ahead.
+const invalidTime = new Timestamp(session.getOperationTime().getTime() + (60 * 60 * 24), 0);
+const invalidCmd = {
+ find: "foo",
+ readConcern: {level: "majority", afterClusterTime: invalidTime}
+};
+assert.commandFailedWithCode(
+ testDB.runCommand(invalidCmd),
+ ErrorCodes.InvalidOptions,
+ "expected command, " + tojson(invalidCmd) + ", to fail with code, " +
+ ErrorCodes.InvalidOptions + ", because the afterClusterTime value, " + invalidTime +
+ ", should not be ahead of the clusterTime, " + session.getClusterTime().clusterTime);
+
+rst.stopSet();
+st.stop();
})();
diff --git a/jstests/sharding/change_stream_chunk_migration.js b/jstests/sharding/change_stream_chunk_migration.js
index 64f7d860c2a..a4e74ed3efd 100644
--- a/jstests/sharding/change_stream_chunk_migration.js
+++ b/jstests/sharding/change_stream_chunk_migration.js
@@ -2,167 +2,163 @@
// it's migrating a chunk to a new shard.
// @tags: [uses_change_streams]
(function() {
- 'use strict';
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- // TODO WT-3864: Re-enable test for LSM once transaction visibility bug in LSM is resolved.
- if (jsTest.options().wiredTigerCollectionConfigString === "type=lsm") {
- jsTestLog("Skipping test because we're running with WiredTiger's LSM tree.");
- return;
- }
-
- const rsNodeOptions = {
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
- };
- const st =
- new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
-
- const mongos = st.s;
- const mongosColl = mongos.getCollection('test.foo');
- const mongosDB = mongos.getDB("test");
-
- // Enable sharding to inform mongos of the database, allowing us to open a cursor.
- assert.commandWorked(mongos.adminCommand({enableSharding: mongosDB.getName()}));
-
- // Make sure all chunks start on shard 0.
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
- // Open a change stream cursor before the collection is sharded.
- const changeStream = mongosColl.aggregate([{$changeStream: {}}]);
- assert(!changeStream.hasNext(), "Do not expect any results yet");
-
- jsTestLog("Sharding collection");
- // Once we have a cursor, actually shard the collection.
- assert.commandWorked(
- mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Insert two documents.
- assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
-
- // Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
- assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
-
- jsTestLog("Migrating [10, MaxKey] chunk to shard1.");
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: 20},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- for (let id of[0, 20]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.documentKey, {_id: id});
- }
-
- // Insert into both the chunks.
- assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
-
- // Split again, and move a second chunk to the first shard. The new chunks are:
- // [MinKey, 0), [0, 10), and [10, MaxKey].
- jsTestLog("Moving [MinKey, 0] to shard 1");
- assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: 5},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Insert again, into all three chunks.
- assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
-
- // Make sure we can see all the inserts, without any 'retryNeeded' entries.
- for (let nextExpectedId of[1, 21, -2, 2, 22]) {
- assert.soon(() => changeStream.hasNext());
- let item = changeStream.next();
- assert.eq(item.documentKey, {_id: nextExpectedId});
- }
-
- // Make sure we're at the end of the stream.
- assert(!changeStream.hasNext());
-
- // Test that migrating the last chunk to shard 1 (meaning all chunks are now on the same shard)
- // will not invalidate the change stream.
-
- // Insert into all three chunks.
- jsTestLog("Insert into all three chunks");
- assert.writeOK(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
-
- jsTestLog("Move the [Minkey, 0) chunk to shard 1.");
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: -5},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Insert again, into all three chunks.
- assert.writeOK(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
-
- // Make sure we can see all the inserts, without any 'retryNeeded' entries.
- for (let nextExpectedId of[-3, 3, 23, -4, 4, 24]) {
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().documentKey, {_id: nextExpectedId});
- }
-
- // Now test that adding a new shard and migrating a chunk to it will continue to
- // return the correct results.
- const newShard = new ReplSetTest({name: "newShard", nodes: 1, nodeOptions: rsNodeOptions});
- newShard.startSet({shardsvr: ''});
- newShard.initiate();
- assert.commandWorked(mongos.adminCommand({addShard: newShard.getURL(), name: "newShard"}));
-
- // At this point, there haven't been any migrations to that shard; check that the changeStream
- // works normally.
- assert.writeOK(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
-
- for (let nextExpectedId of[-5, 5, 25]) {
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().documentKey, {_id: nextExpectedId});
- }
-
- assert.writeOK(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
-
- // Now migrate a chunk to the new shard and verify the stream continues to return results
- // from both before and after the migration.
- jsTestLog("Migrating [10, MaxKey] chunk to new shard.");
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: 20},
- to: "newShard",
- _waitForDelete: true
- }));
- assert.writeOK(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
-
- for (let nextExpectedId of[16, -6, 6, 26]) {
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().documentKey, {_id: nextExpectedId});
- }
- assert(!changeStream.hasNext());
-
- st.stop();
- newShard.stopSet();
+'use strict';
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+// TODO WT-3864: Re-enable test for LSM once transaction visibility bug in LSM is resolved.
+if (jsTest.options().wiredTigerCollectionConfigString === "type=lsm") {
+ jsTestLog("Skipping test because we're running with WiredTiger's LSM tree.");
+ return;
+}
+
+const rsNodeOptions = {
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
+};
+const st =
+ new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
+
+const mongos = st.s;
+const mongosColl = mongos.getCollection('test.foo');
+const mongosDB = mongos.getDB("test");
+
+// Enable sharding to inform mongos of the database, allowing us to open a cursor.
+assert.commandWorked(mongos.adminCommand({enableSharding: mongosDB.getName()}));
+
+// Make sure all chunks start on shard 0.
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+// Open a change stream cursor before the collection is sharded.
+const changeStream = mongosColl.aggregate([{$changeStream: {}}]);
+assert(!changeStream.hasNext(), "Do not expect any results yet");
+
+jsTestLog("Sharding collection");
+// Once we have a cursor, actually shard the collection.
+assert.commandWorked(
+ mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// Insert two documents.
+assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
+
+// Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
+assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
+
+jsTestLog("Migrating [10, MaxKey] chunk to shard1.");
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: 20},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+for (let id of [0, 20]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.documentKey, {_id: id});
+}
+
+// Insert into both the chunks.
+assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
+
+// Split again, and move a second chunk to the first shard. The new chunks are:
+// [MinKey, 0), [0, 10), and [10, MaxKey].
+jsTestLog("Moving [MinKey, 0] to shard 1");
+assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: 5},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Insert again, into all three chunks.
+assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
+
+// Make sure we can see all the inserts, without any 'retryNeeded' entries.
+for (let nextExpectedId of [1, 21, -2, 2, 22]) {
+ assert.soon(() => changeStream.hasNext());
+ let item = changeStream.next();
+ assert.eq(item.documentKey, {_id: nextExpectedId});
+}
+
+// Make sure we're at the end of the stream.
+assert(!changeStream.hasNext());
+
+// Test that migrating the last chunk to shard 1 (meaning all chunks are now on the same shard)
+// will not invalidate the change stream.
+
+// Insert into all three chunks.
+jsTestLog("Insert into all three chunks");
+assert.writeOK(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
+
+jsTestLog("Move the [Minkey, 0) chunk to shard 1.");
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: -5},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Insert again, into all three chunks.
+assert.writeOK(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
+
+// Make sure we can see all the inserts, without any 'retryNeeded' entries.
+for (let nextExpectedId of [-3, 3, 23, -4, 4, 24]) {
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().documentKey, {_id: nextExpectedId});
+}
+
+// Now test that adding a new shard and migrating a chunk to it will continue to
+// return the correct results.
+const newShard = new ReplSetTest({name: "newShard", nodes: 1, nodeOptions: rsNodeOptions});
+newShard.startSet({shardsvr: ''});
+newShard.initiate();
+assert.commandWorked(mongos.adminCommand({addShard: newShard.getURL(), name: "newShard"}));
+
+// At this point, there haven't been any migrations to that shard; check that the changeStream
+// works normally.
+assert.writeOK(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
+
+for (let nextExpectedId of [-5, 5, 25]) {
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().documentKey, {_id: nextExpectedId});
+}
+
+assert.writeOK(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
+
+// Now migrate a chunk to the new shard and verify the stream continues to return results
+// from both before and after the migration.
+jsTestLog("Migrating [10, MaxKey] chunk to new shard.");
+assert.commandWorked(mongos.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 20}, to: "newShard", _waitForDelete: true}));
+assert.writeOK(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
+
+for (let nextExpectedId of [16, -6, 6, 26]) {
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().documentKey, {_id: nextExpectedId});
+}
+assert(!changeStream.hasNext());
+
+st.stop();
+newShard.stopSet();
})();
diff --git a/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js b/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js
index d97e88f62a1..5d854fdf44c 100644
--- a/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js
+++ b/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js
@@ -5,186 +5,183 @@
// shards.
// @tags: [uses_change_streams]
(function() {
- "use strict";
-
- // For supportsMajorityReadConcern.
- load('jstests/multiVersion/libs/causal_consistency_helpers.js');
-
- // This test only works on storage engines that support committed reads, skip it if the
- // configured engine doesn't support it.
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- // Create a 2-shard cluster. Enable 'writePeriodicNoops' and set 'periodicNoopIntervalSecs' to 1
- // second so that each shard is continually advancing its optime, allowing the
- // AsyncResultsMerger to return sorted results even if some shards have not yet produced any
- // data.
- const st = new ShardingTest({
- shards: 2,
- rs: {nodes: 1, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
- });
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- const shard0DB = st.shard0.getDB(jsTestName());
- const shard1DB = st.shard1.getDB(jsTestName());
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
-
- // Move the [0, MaxKey] chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
-
- // Start the profiler on each shard so that we can examine the getMores' maxTimeMS.
- for (let profileDB of[shard0DB, shard1DB]) {
- assert.commandWorked(profileDB.setProfilingLevel(0));
- profileDB.system.profile.drop();
- assert.commandWorked(profileDB.setProfilingLevel(2));
- }
-
- // Returns 'true' if there is at least one getMore profile entry matching the given namespace,
- // identifying comment and maxTimeMS.
- function profilerHasAtLeastOneMatchingGetMore(profileDB, nss, comment, timeout) {
- return profileDB.system.profile.count({
- "originatingCommand.comment": comment,
- "command.maxTimeMS": timeout,
- op: "getmore",
- ns: nss
- }) > 0;
- }
-
- // Asserts that there is at least one getMore profile entry matching the given namespace and
- // identifying comment, and that all such entries have the given maxTimeMS.
- function assertAllGetMoresHaveTimeout(profileDB, nss, comment, timeout) {
- const getMoreTimeouts =
- profileDB.system.profile
- .aggregate([
- {$match: {op: "getmore", ns: nss, "originatingCommand.comment": comment}},
- {$group: {_id: "$command.maxTimeMS"}}
- ])
- .toArray();
- assert.eq(getMoreTimeouts.length, 1);
- assert.eq(getMoreTimeouts[0]._id, timeout);
- }
-
- // Kills the cursor with the given cursor id (if provided). Then opens a new change stream
- // against 'mongosColl' and returns the new change stream's cursor id.
- //
- // We re-open the change stream in between each test case with a batchSize if 0. This is done to
- // ensure that mongos delivers getMores to the shards for the first getMore against the mongos
- // change stream cursor (thus avoiding issues such as SERVER-35084).
- function reopenChangeStream(existingCursorId) {
- if (existingCursorId) {
- assert.commandWorked(mongosDB.runCommand(
- {killCursors: mongosColl.getName(), cursors: [existingCursorId]}));
- }
-
- const csCmdRes = assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [{$changeStream: {}}],
- comment: testComment,
- cursor: {batchSize: 0}
- }));
- assert.eq(csCmdRes.cursor.firstBatch.length, 0);
- assert.neq(csCmdRes.cursor.id, 0);
- return csCmdRes.cursor.id;
- }
-
- // Timeout values used in the subsequent getMore tests.
- const quarterSec = 250;
- const halfSec = 500;
- const oneSec = 2 * halfSec;
- const fiveSecs = 5 * oneSec;
- const fiveMins = 60 * fiveSecs;
- const thirtyMins = 6 * fiveMins;
- const testComment = "change stream sharded maxTimeMS test";
-
- // Open a $changeStream on the empty, inactive collection.
- let csCursorId = reopenChangeStream();
-
- // Confirm that getMores without an explicit maxTimeMS default to one second on the shards.
- assert.commandWorked(
- mongosDB.runCommand({getMore: csCursorId, collection: mongosColl.getName()}));
- for (let shardDB of[shard0DB, shard1DB]) {
- // The mongos is guaranteed to have already delivered getMores to each of the shards.
- // However, the mongos await time can expire prior to the await time on the shards.
- // Therefore, the getMore on mongos succeeding doesn't guarantee that the getMores on the
- // shards have already been profiled. We use an assert.soon() here to wait for the maxTimeMS
- // on the shards to expire, at which point the getMores will appear in the profile
- // collection.
- assert.soon(() => profilerHasAtLeastOneMatchingGetMore(
- shardDB, mongosColl.getFullName(), testComment, oneSec));
+"use strict";
+
+// For supportsMajorityReadConcern.
+load('jstests/multiVersion/libs/causal_consistency_helpers.js');
+
+// This test only works on storage engines that support committed reads, skip it if the
+// configured engine doesn't support it.
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+// Create a 2-shard cluster. Enable 'writePeriodicNoops' and set 'periodicNoopIntervalSecs' to 1
+// second so that each shard is continually advancing its optime, allowing the
+// AsyncResultsMerger to return sorted results even if some shards have not yet produced any
+// data.
+const st = new ShardingTest({
+ shards: 2,
+ rs: {nodes: 1, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
+});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
+
+const shard0DB = st.shard0.getDB(jsTestName());
+const shard1DB = st.shard1.getDB(jsTestName());
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Shard the test collection on _id.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+
+// Move the [0, MaxKey] chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+
+// Start the profiler on each shard so that we can examine the getMores' maxTimeMS.
+for (let profileDB of [shard0DB, shard1DB]) {
+ assert.commandWorked(profileDB.setProfilingLevel(0));
+ profileDB.system.profile.drop();
+ assert.commandWorked(profileDB.setProfilingLevel(2));
+}
+
+// Returns 'true' if there is at least one getMore profile entry matching the given namespace,
+// identifying comment and maxTimeMS.
+function profilerHasAtLeastOneMatchingGetMore(profileDB, nss, comment, timeout) {
+ return profileDB.system.profile.count({
+ "originatingCommand.comment": comment,
+ "command.maxTimeMS": timeout,
+ op: "getmore",
+ ns: nss
+ }) > 0;
+}
+
+// Asserts that there is at least one getMore profile entry matching the given namespace and
+// identifying comment, and that all such entries have the given maxTimeMS.
+function assertAllGetMoresHaveTimeout(profileDB, nss, comment, timeout) {
+ const getMoreTimeouts =
+ profileDB.system.profile
+ .aggregate([
+ {$match: {op: "getmore", ns: nss, "originatingCommand.comment": comment}},
+ {$group: {_id: "$command.maxTimeMS"}}
+ ])
+ .toArray();
+ assert.eq(getMoreTimeouts.length, 1);
+ assert.eq(getMoreTimeouts[0]._id, timeout);
+}
+
+// Kills the cursor with the given cursor id (if provided). Then opens a new change stream
+// against 'mongosColl' and returns the new change stream's cursor id.
+//
+// We re-open the change stream in between each test case with a batchSize if 0. This is done to
+// ensure that mongos delivers getMores to the shards for the first getMore against the mongos
+// change stream cursor (thus avoiding issues such as SERVER-35084).
+function reopenChangeStream(existingCursorId) {
+ if (existingCursorId) {
+ assert.commandWorked(
+ mongosDB.runCommand({killCursors: mongosColl.getName(), cursors: [existingCursorId]}));
}
- // Verify that with no activity on the shards, a $changeStream with maxTimeMS waits for the full
- // duration on mongoS. Allow some leniency since the server-side wait may wake spuriously.
- csCursorId = reopenChangeStream(csCursorId);
- let startTime = (new Date()).getTime();
+ const csCmdRes = assert.commandWorked(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [{$changeStream: {}}],
+ comment: testComment,
+ cursor: {batchSize: 0}
+ }));
+ assert.eq(csCmdRes.cursor.firstBatch.length, 0);
+ assert.neq(csCmdRes.cursor.id, 0);
+ return csCmdRes.cursor.id;
+}
+
+// Timeout values used in the subsequent getMore tests.
+const quarterSec = 250;
+const halfSec = 500;
+const oneSec = 2 * halfSec;
+const fiveSecs = 5 * oneSec;
+const fiveMins = 60 * fiveSecs;
+const thirtyMins = 6 * fiveMins;
+const testComment = "change stream sharded maxTimeMS test";
+
+// Open a $changeStream on the empty, inactive collection.
+let csCursorId = reopenChangeStream();
+
+// Confirm that getMores without an explicit maxTimeMS default to one second on the shards.
+assert.commandWorked(mongosDB.runCommand({getMore: csCursorId, collection: mongosColl.getName()}));
+for (let shardDB of [shard0DB, shard1DB]) {
+ // The mongos is guaranteed to have already delivered getMores to each of the shards.
+ // However, the mongos await time can expire prior to the await time on the shards.
+ // Therefore, the getMore on mongos succeeding doesn't guarantee that the getMores on the
+ // shards have already been profiled. We use an assert.soon() here to wait for the maxTimeMS
+ // on the shards to expire, at which point the getMores will appear in the profile
+ // collection.
+ assert.soon(() => profilerHasAtLeastOneMatchingGetMore(
+ shardDB, mongosColl.getFullName(), testComment, oneSec));
+}
+
+// Verify that with no activity on the shards, a $changeStream with maxTimeMS waits for the full
+// duration on mongoS. Allow some leniency since the server-side wait may wake spuriously.
+csCursorId = reopenChangeStream(csCursorId);
+let startTime = (new Date()).getTime();
+assert.commandWorked(mongosDB.runCommand(
+ {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: fiveSecs}));
+assert.gte((new Date()).getTime() - startTime, fiveSecs - halfSec);
+
+// Confirm that each getMore dispatched to the shards during this period had a maxTimeMS of 1s.
+for (let shardDB of [shard0DB, shard1DB]) {
+ assertAllGetMoresHaveTimeout(shardDB, mongosColl.getFullName(), testComment, oneSec);
+}
+
+// Issue a getMore with a sub-second maxTimeMS. This should propagate to the shards as-is.
+csCursorId = reopenChangeStream(csCursorId);
+assert.commandWorked(mongosDB.runCommand(
+ {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: halfSec}));
+
+for (let shardDB of [shard0DB, shard1DB]) {
+ // The mongos is guaranteed to have already delivered getMores to each of the shards.
+ // However, the mongos await time can expire prior to the await time on the shards.
+ // Therefore, the getMore on mongos succeeding doesn't guarantee that the getMores on the
+ // shards have already been profiled. We use an assert.soon() here to wait for the maxTimeMS
+ // on the shards to expire, at which point the getMores will appear in the profile
+ // collection.
+ assert.soon(() => profilerHasAtLeastOneMatchingGetMore(
+ shardDB, mongosColl.getFullName(), testComment, halfSec));
+}
+
+// Write a document to shard0, and confirm that - despite the fact that shard1 is still idle - a
+// getMore with a high maxTimeMS returns the document before this timeout expires.
+csCursorId = reopenChangeStream(csCursorId);
+assert.writeOK(mongosColl.insert({_id: -1}));
+startTime = (new Date()).getTime();
+const csResult = assert.commandWorked(mongosDB.runCommand(
+ {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: thirtyMins}));
+assert.lte((new Date()).getTime() - startTime, fiveMins);
+assert.docEq(csResult.cursor.nextBatch[0].fullDocument, {_id: -1});
+
+// Open a change stream with the default maxTimeMS. Then verify that if the client starts
+// issuing getMores with a subsecond maxTimeMS, that mongos eventually schedules getMores on the
+// shards with this subsecond maxTimeMS value.
+csCursorId = reopenChangeStream(csCursorId);
+assert.commandWorked(mongosDB.runCommand({getMore: csCursorId, collection: mongosColl.getName()}));
+assert.soon(function() {
+ // Run a getMore with a 250ms maxTimeMS against mongos.
assert.commandWorked(mongosDB.runCommand(
- {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: fiveSecs}));
- assert.gte((new Date()).getTime() - startTime, fiveSecs - halfSec);
-
- // Confirm that each getMore dispatched to the shards during this period had a maxTimeMS of 1s.
- for (let shardDB of[shard0DB, shard1DB]) {
- assertAllGetMoresHaveTimeout(shardDB, mongosColl.getFullName(), testComment, oneSec);
- }
-
- // Issue a getMore with a sub-second maxTimeMS. This should propagate to the shards as-is.
- csCursorId = reopenChangeStream(csCursorId);
- assert.commandWorked(mongosDB.runCommand(
- {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: halfSec}));
-
- for (let shardDB of[shard0DB, shard1DB]) {
- // The mongos is guaranteed to have already delivered getMores to each of the shards.
- // However, the mongos await time can expire prior to the await time on the shards.
- // Therefore, the getMore on mongos succeeding doesn't guarantee that the getMores on the
- // shards have already been profiled. We use an assert.soon() here to wait for the maxTimeMS
- // on the shards to expire, at which point the getMores will appear in the profile
- // collection.
- assert.soon(() => profilerHasAtLeastOneMatchingGetMore(
- shardDB, mongosColl.getFullName(), testComment, halfSec));
- }
-
- // Write a document to shard0, and confirm that - despite the fact that shard1 is still idle - a
- // getMore with a high maxTimeMS returns the document before this timeout expires.
- csCursorId = reopenChangeStream(csCursorId);
- assert.writeOK(mongosColl.insert({_id: -1}));
- startTime = (new Date()).getTime();
- const csResult = assert.commandWorked(mongosDB.runCommand(
- {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: thirtyMins}));
- assert.lte((new Date()).getTime() - startTime, fiveMins);
- assert.docEq(csResult.cursor.nextBatch[0].fullDocument, {_id: -1});
-
- // Open a change stream with the default maxTimeMS. Then verify that if the client starts
- // issuing getMores with a subsecond maxTimeMS, that mongos eventually schedules getMores on the
- // shards with this subsecond maxTimeMS value.
- csCursorId = reopenChangeStream(csCursorId);
- assert.commandWorked(
- mongosDB.runCommand({getMore: csCursorId, collection: mongosColl.getName()}));
- assert.soon(function() {
- // Run a getMore with a 250ms maxTimeMS against mongos.
- assert.commandWorked(mongosDB.runCommand(
- {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: quarterSec}));
- // Check whether all shards now have a getMore with 250ms maxTimeMS recorded in their
- // profile collections.
- return [shard0DB, shard1DB].every(function(shardDB) {
- return profilerHasAtLeastOneMatchingGetMore(
- shardDB, mongosColl.getFullName(), testComment, quarterSec);
- });
+ {getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: quarterSec}));
+ // Check whether all shards now have a getMore with 250ms maxTimeMS recorded in their
+ // profile collections.
+ return [shard0DB, shard1DB].every(function(shardDB) {
+ return profilerHasAtLeastOneMatchingGetMore(
+ shardDB, mongosColl.getFullName(), testComment, quarterSec);
});
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/change_stream_lookup_single_shard_cluster.js b/jstests/sharding/change_stream_lookup_single_shard_cluster.js
index 60ded0f352d..53fed919125 100644
--- a/jstests/sharding/change_stream_lookup_single_shard_cluster.js
+++ b/jstests/sharding/change_stream_lookup_single_shard_cluster.js
@@ -3,63 +3,63 @@
// sharded collection.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
- // For supportsMajorityReadConcern.
- load('jstests/multiVersion/libs/causal_consistency_helpers.js');
+// For supportsMajorityReadConcern.
+load('jstests/multiVersion/libs/causal_consistency_helpers.js');
- // TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
- if (!jsTestOptions().enableMajorityReadConcern &&
- jsTestOptions().mongosBinVersion === 'last-stable') {
- jsTestLog(
- "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
- return;
- }
+// TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
+if (!jsTestOptions().enableMajorityReadConcern &&
+ jsTestOptions().mongosBinVersion === 'last-stable') {
+ jsTestLog(
+ "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
+ return;
+}
- // This test only works on storage engines that support committed reads, skip it if the
- // configured engine doesn't support it.
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+// This test only works on storage engines that support committed reads, skip it if the
+// configured engine doesn't support it.
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- // Create a cluster with only 1 shard.
- const st = new ShardingTest({
- shards: 1,
- rs: {nodes: 1, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
- });
+// Create a cluster with only 1 shard.
+const st = new ShardingTest({
+ shards: 1,
+ rs: {nodes: 1, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
+});
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
- // Enable sharding, shard on _id, and insert a test document which will be updated later.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
- assert.writeOK(mongosColl.insert({_id: 1}));
+// Enable sharding, shard on _id, and insert a test document which will be updated later.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+assert.writeOK(mongosColl.insert({_id: 1}));
- // Verify that the pipeline splits and merges on mongoS despite only targeting a single shard.
- const explainPlan = assert.commandWorked(
- mongosColl.explain().aggregate([{$changeStream: {fullDocument: "updateLookup"}}]));
- assert.neq(explainPlan.splitPipeline, null);
- assert.eq(explainPlan.mergeType, "mongos");
+// Verify that the pipeline splits and merges on mongoS despite only targeting a single shard.
+const explainPlan = assert.commandWorked(
+ mongosColl.explain().aggregate([{$changeStream: {fullDocument: "updateLookup"}}]));
+assert.neq(explainPlan.splitPipeline, null);
+assert.eq(explainPlan.mergeType, "mongos");
- // Open a $changeStream on the collection with 'updateLookup' and update the test doc.
- const stream = mongosColl.watch([], {fullDocument: "updateLookup"});
- const wholeDbStream = mongosDB.watch([], {fullDocument: "updateLookup"});
+// Open a $changeStream on the collection with 'updateLookup' and update the test doc.
+const stream = mongosColl.watch([], {fullDocument: "updateLookup"});
+const wholeDbStream = mongosDB.watch([], {fullDocument: "updateLookup"});
- mongosColl.update({_id: 1}, {$set: {updated: true}});
+mongosColl.update({_id: 1}, {$set: {updated: true}});
- // Verify that the document is successfully retrieved from the single-collection and whole-db
- // change streams.
- assert.soon(() => stream.hasNext());
- assert.docEq(stream.next().fullDocument, {_id: 1, updated: true});
+// Verify that the document is successfully retrieved from the single-collection and whole-db
+// change streams.
+assert.soon(() => stream.hasNext());
+assert.docEq(stream.next().fullDocument, {_id: 1, updated: true});
- assert.soon(() => wholeDbStream.hasNext());
- assert.docEq(wholeDbStream.next().fullDocument, {_id: 1, updated: true});
+assert.soon(() => wholeDbStream.hasNext());
+assert.docEq(wholeDbStream.next().fullDocument, {_id: 1, updated: true});
- stream.close();
- wholeDbStream.close();
+stream.close();
+wholeDbStream.close();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/change_stream_metadata_notifications.js b/jstests/sharding/change_stream_metadata_notifications.js
index f535012a7b2..48138d089ec 100644
--- a/jstests/sharding/change_stream_metadata_notifications.js
+++ b/jstests/sharding/change_stream_metadata_notifications.js
@@ -2,154 +2,154 @@
// Legacy getMore fails after dropping the database that the original cursor is on.
// @tags: [requires_find_command]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
- load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
+load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
+load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
- // For supportsMajorityReadConcern.
- load('jstests/multiVersion/libs/causal_consistency_helpers.js');
+// For supportsMajorityReadConcern.
+load('jstests/multiVersion/libs/causal_consistency_helpers.js');
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- const st = new ShardingTest({
- shards: 2,
- rs: {
- nodes: 1,
- enableMajorityReadConcern: '',
- }
- });
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Shard the test collection on a field called 'shardKey'.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {shardKey: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {shardKey: 0}}));
-
- // Move the [0, MaxKey] chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {shardKey: 1}, to: st.rs1.getURL()}));
-
- // Write a document to each chunk.
- assert.writeOK(mongosColl.insert({shardKey: -1, _id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({shardKey: 1, _id: 1}, {writeConcern: {w: "majority"}}));
-
- let changeStream = mongosColl.watch();
-
- // We awaited the replication of the first writes, so the change stream shouldn't return them.
- assert.writeOK(mongosColl.update({shardKey: -1, _id: -1}, {$set: {updated: true}}));
- assert.writeOK(mongosColl.update({shardKey: 1, _id: 1}, {$set: {updated: true}}));
- assert.writeOK(mongosColl.insert({shardKey: 2, _id: 2}));
-
- // Drop the collection and test that we return a "drop" entry, followed by an "invalidate"
- // entry.
- mongosColl.drop();
-
- // Test that we see the two writes that happened before the collection drop.
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey.shardKey, -1);
- const resumeTokenFromFirstUpdate = next._id;
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey.shardKey, 1);
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.documentKey, {_id: 2});
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "drop");
- assert.eq(next.ns, {db: mongosDB.getName(), coll: mongosColl.getName()});
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "invalidate");
- assert(changeStream.isExhausted());
-
- // With an explicit collation, test that we can resume from before the collection drop.
- changeStream = mongosColl.watch(
- [], {resumeAfter: resumeTokenFromFirstUpdate, collation: {locale: "simple"}});
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, {shardKey: 1, _id: 1});
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.documentKey, {shardKey: 2, _id: 2});
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "drop");
- assert.eq(next.ns, {db: mongosDB.getName(), coll: mongosColl.getName()});
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "invalidate");
- assert(changeStream.isExhausted());
-
- // Test that we can resume the change stream without specifying an explicit collation.
- assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdate}}],
- cursor: {}
- }));
-
- // Recreate and shard the collection.
- assert.commandWorked(mongosDB.createCollection(mongosColl.getName()));
-
- // Shard the test collection on shardKey.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {shardKey: 1}}));
-
- // Test that resuming the change stream on the recreated collection succeeds, since we will not
- // attempt to inherit the collection's default collation and can therefore ignore the new UUID.
- assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdate}}],
- cursor: {}
- }));
-
- // Recreate the collection as unsharded and open a change stream on it.
- assertDropAndRecreateCollection(mongosDB, mongosColl.getName());
-
- changeStream = mongosColl.watch();
-
- // Drop the database and verify that the stream returns a collection drop followed by an
- // invalidate.
- assert.commandWorked(mongosDB.dropDatabase());
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "drop");
- assert.eq(next.ns, {db: mongosDB.getName(), coll: mongosColl.getName()});
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "invalidate");
- assert(changeStream.isExhausted());
-
- st.stop();
+const st = new ShardingTest({
+ shards: 2,
+ rs: {
+ nodes: 1,
+ enableMajorityReadConcern: '',
+ }
+});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
+
+assert.commandWorked(mongosDB.dropDatabase());
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Shard the test collection on a field called 'shardKey'.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {shardKey: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
+assert.commandWorked(
+ mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {shardKey: 0}}));
+
+// Move the [0, MaxKey] chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {shardKey: 1}, to: st.rs1.getURL()}));
+
+// Write a document to each chunk.
+assert.writeOK(mongosColl.insert({shardKey: -1, _id: -1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({shardKey: 1, _id: 1}, {writeConcern: {w: "majority"}}));
+
+let changeStream = mongosColl.watch();
+
+// We awaited the replication of the first writes, so the change stream shouldn't return them.
+assert.writeOK(mongosColl.update({shardKey: -1, _id: -1}, {$set: {updated: true}}));
+assert.writeOK(mongosColl.update({shardKey: 1, _id: 1}, {$set: {updated: true}}));
+assert.writeOK(mongosColl.insert({shardKey: 2, _id: 2}));
+
+// Drop the collection and test that we return a "drop" entry, followed by an "invalidate"
+// entry.
+mongosColl.drop();
+
+// Test that we see the two writes that happened before the collection drop.
+assert.soon(() => changeStream.hasNext());
+let next = changeStream.next();
+assert.eq(next.operationType, "update");
+assert.eq(next.documentKey.shardKey, -1);
+const resumeTokenFromFirstUpdate = next._id;
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "update");
+assert.eq(next.documentKey.shardKey, 1);
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "insert");
+assert.eq(next.documentKey, {_id: 2});
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "drop");
+assert.eq(next.ns, {db: mongosDB.getName(), coll: mongosColl.getName()});
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "invalidate");
+assert(changeStream.isExhausted());
+
+// With an explicit collation, test that we can resume from before the collection drop.
+changeStream =
+ mongosColl.watch([], {resumeAfter: resumeTokenFromFirstUpdate, collation: {locale: "simple"}});
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "update");
+assert.eq(next.documentKey, {shardKey: 1, _id: 1});
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "insert");
+assert.eq(next.documentKey, {shardKey: 2, _id: 2});
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "drop");
+assert.eq(next.ns, {db: mongosDB.getName(), coll: mongosColl.getName()});
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "invalidate");
+assert(changeStream.isExhausted());
+
+// Test that we can resume the change stream without specifying an explicit collation.
+assert.commandWorked(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdate}}],
+ cursor: {}
+}));
+
+// Recreate and shard the collection.
+assert.commandWorked(mongosDB.createCollection(mongosColl.getName()));
+
+// Shard the test collection on shardKey.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {shardKey: 1}}));
+
+// Test that resuming the change stream on the recreated collection succeeds, since we will not
+// attempt to inherit the collection's default collation and can therefore ignore the new UUID.
+assert.commandWorked(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdate}}],
+ cursor: {}
+}));
+
+// Recreate the collection as unsharded and open a change stream on it.
+assertDropAndRecreateCollection(mongosDB, mongosColl.getName());
+
+changeStream = mongosColl.watch();
+
+// Drop the database and verify that the stream returns a collection drop followed by an
+// invalidate.
+assert.commandWorked(mongosDB.dropDatabase());
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "drop");
+assert.eq(next.ns, {db: mongosDB.getName(), coll: mongosColl.getName()});
+
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "invalidate");
+assert(changeStream.isExhausted());
+
+st.stop();
})();
diff --git a/jstests/sharding/change_stream_no_shards.js b/jstests/sharding/change_stream_no_shards.js
index e92c91d7322..8e682172f9c 100644
--- a/jstests/sharding/change_stream_no_shards.js
+++ b/jstests/sharding/change_stream_no_shards.js
@@ -3,37 +3,37 @@
* set with a cursorID of zero.
*/
(function() {
- const st = new ShardingTest({shards: 0, config: 1});
+const st = new ShardingTest({shards: 0, config: 1});
- const adminDB = st.s.getDB("admin");
- const testDB = st.s.getDB("test");
+const adminDB = st.s.getDB("admin");
+const testDB = st.s.getDB("test");
- // Test that attempting to open a stream on a single collection results in an empty, closed
- // cursor response.
- let csCmdRes = assert.commandWorked(
- testDB.runCommand({aggregate: "testing", pipeline: [{$changeStream: {}}], cursor: {}}));
- assert.docEq(csCmdRes.cursor.firstBatch, []);
- assert.eq(csCmdRes.cursor.id, 0);
+// Test that attempting to open a stream on a single collection results in an empty, closed
+// cursor response.
+let csCmdRes = assert.commandWorked(
+ testDB.runCommand({aggregate: "testing", pipeline: [{$changeStream: {}}], cursor: {}}));
+assert.docEq(csCmdRes.cursor.firstBatch, []);
+assert.eq(csCmdRes.cursor.id, 0);
- // Test that attempting to open a whole-db stream results in an empty, closed cursor response.
- csCmdRes = assert.commandWorked(
- testDB.runCommand({aggregate: 1, pipeline: [{$changeStream: {}}], cursor: {}}));
- assert.docEq(csCmdRes.cursor.firstBatch, []);
- assert.eq(csCmdRes.cursor.id, 0);
+// Test that attempting to open a whole-db stream results in an empty, closed cursor response.
+csCmdRes = assert.commandWorked(
+ testDB.runCommand({aggregate: 1, pipeline: [{$changeStream: {}}], cursor: {}}));
+assert.docEq(csCmdRes.cursor.firstBatch, []);
+assert.eq(csCmdRes.cursor.id, 0);
- // Test that attempting to open a cluster-wide stream results in an empty, closed cursor
- // response.
- csCmdRes = assert.commandWorked(adminDB.runCommand(
- {aggregate: 1, pipeline: [{$changeStream: {allChangesForCluster: true}}], cursor: {}}));
- assert.docEq(csCmdRes.cursor.firstBatch, []);
- assert.eq(csCmdRes.cursor.id, 0);
+// Test that attempting to open a cluster-wide stream results in an empty, closed cursor
+// response.
+csCmdRes = assert.commandWorked(adminDB.runCommand(
+ {aggregate: 1, pipeline: [{$changeStream: {allChangesForCluster: true}}], cursor: {}}));
+assert.docEq(csCmdRes.cursor.firstBatch, []);
+assert.eq(csCmdRes.cursor.id, 0);
- // Test that a regular, non-$changeStream aggregation also results in an empty cursor when no
- // shards are present.
- const nonCsCmdRes = assert.commandWorked(
- testDB.runCommand({aggregate: "testing", pipeline: [{$match: {}}], cursor: {}}));
- assert.docEq(nonCsCmdRes.cursor.firstBatch, []);
- assert.eq(nonCsCmdRes.cursor.id, 0);
+// Test that a regular, non-$changeStream aggregation also results in an empty cursor when no
+// shards are present.
+const nonCsCmdRes = assert.commandWorked(
+ testDB.runCommand({aggregate: "testing", pipeline: [{$match: {}}], cursor: {}}));
+assert.docEq(nonCsCmdRes.cursor.firstBatch, []);
+assert.eq(nonCsCmdRes.cursor.id, 0);
- st.stop();
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/change_stream_read_preference.js b/jstests/sharding/change_stream_read_preference.js
index 4f35b42424a..1c4129e9952 100644
--- a/jstests/sharding/change_stream_read_preference.js
+++ b/jstests/sharding/change_stream_read_preference.js
@@ -2,139 +2,136 @@
// user.
// @tags: [uses_change_streams]
(function() {
- "use strict";
-
- load('jstests/libs/profiler.js'); // For various profiler helpers.
-
- // For supportsMajorityReadConcern.
- load('jstests/multiVersion/libs/causal_consistency_helpers.js');
-
- // TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
- if (!jsTestOptions().enableMajorityReadConcern &&
- jsTestOptions().mongosBinVersion === 'last-stable') {
- jsTestLog(
- "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
- return;
- }
-
- // This test only works on storage engines that support committed reads, skip it if the
- // configured engine doesn't support it.
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- const st = new ShardingTest({
- name: "change_stream_read_pref",
- shards: 2,
- rs: {
- nodes: 2,
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
- },
+"use strict";
+
+load('jstests/libs/profiler.js'); // For various profiler helpers.
+
+// For supportsMajorityReadConcern.
+load('jstests/multiVersion/libs/causal_consistency_helpers.js');
+
+// TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
+if (!jsTestOptions().enableMajorityReadConcern &&
+ jsTestOptions().mongosBinVersion === 'last-stable') {
+ jsTestLog(
+ "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
+ return;
+}
+
+// This test only works on storage engines that support committed reads, skip it if the
+// configured engine doesn't support it.
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ name: "change_stream_read_pref",
+ shards: 2,
+ rs: {
+ nodes: 2,
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
+ },
+});
+
+const dbName = jsTestName();
+const mongosDB = st.s0.getDB(dbName);
+const mongosColl = mongosDB[jsTestName()];
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Shard the test collection on _id.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+
+// Move the [0, MaxKey] chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+
+// Turn on the profiler.
+for (let rs of [st.rs0, st.rs1]) {
+ assert.commandWorked(rs.getPrimary().getDB(dbName).setProfilingLevel(2));
+ assert.commandWorked(rs.getSecondary().getDB(dbName).setProfilingLevel(2));
+}
+
+// Write a document to each chunk.
+assert.writeOK(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+
+// Test that change streams go to the primary by default.
+let changeStreamComment = "change stream against primary";
+const primaryStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}],
+ {comment: changeStreamComment});
+
+assert.writeOK(mongosColl.update({_id: -1}, {$set: {updated: true}}));
+assert.writeOK(mongosColl.update({_id: 1}, {$set: {updated: true}}));
+
+assert.soon(() => primaryStream.hasNext());
+assert.eq(primaryStream.next().fullDocument, {_id: -1, updated: true});
+assert.soon(() => primaryStream.hasNext());
+assert.eq(primaryStream.next().fullDocument, {_id: 1, updated: true});
+
+for (let rs of [st.rs0, st.rs1]) {
+ const primaryDB = rs.getPrimary().getDB(dbName);
+ // Test that the change stream itself goes to the primary. There might be more than one if
+ // we needed multiple getMores to retrieve the changes.
+ // TODO SERVER-31650 We have to use 'originatingCommand' here and look for the getMore
+ // because the initial aggregate will not show up.
+ profilerHasAtLeastOneMatchingEntryOrThrow(
+ {profileDB: primaryDB, filter: {'originatingCommand.comment': changeStreamComment}});
+
+ // Test that the update lookup goes to the primary as well.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryDB,
+ filter: {op: "query", ns: mongosColl.getFullName(), "command.comment": changeStreamComment}
});
+}
+
+primaryStream.close();
+
+// Test that change streams go to the secondary when the readPreference is {mode: "secondary"}.
+changeStreamComment = 'change stream against secondary';
+const secondaryStream =
+ mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}],
+ {comment: changeStreamComment, $readPreference: {mode: "secondary"}});
+
+assert.writeOK(mongosColl.update({_id: -1}, {$set: {updatedCount: 2}}));
+assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
+
+assert.soon(() => secondaryStream.hasNext());
+assert.eq(secondaryStream.next().fullDocument, {_id: -1, updated: true, updatedCount: 2});
+assert.soon(() => secondaryStream.hasNext());
+assert.eq(secondaryStream.next().fullDocument, {_id: 1, updated: true, updatedCount: 2});
+
+for (let rs of [st.rs0, st.rs1]) {
+ const secondaryDB = rs.getSecondary().getDB(dbName);
+ // Test that the change stream itself goes to the secondary. There might be more than one if
+ // we needed multiple getMores to retrieve the changes.
+ // TODO SERVER-31650 We have to use 'originatingCommand' here and look for the getMore
+ // because the initial aggregate will not show up.
+ profilerHasAtLeastOneMatchingEntryOrThrow(
+ {profileDB: secondaryDB, filter: {'originatingCommand.comment': changeStreamComment}});
+
+ // Test that the update lookup goes to the secondary as well.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: secondaryDB,
+ filter: {
+ op: "query",
+ ns: mongosColl.getFullName(),
+ "command.comment": changeStreamComment,
+ // We need to filter out any profiler entries with a stale config - this is the
+ // first read on this secondary with a readConcern specified, so it is the first
+ // read on this secondary that will enforce shard version.
+ errCode: {$ne: ErrorCodes.StaleConfig}
+ }
+ });
+}
- const dbName = jsTestName();
- const mongosDB = st.s0.getDB(dbName);
- const mongosColl = mongosDB[jsTestName()];
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
-
- // Move the [0, MaxKey] chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
-
- // Turn on the profiler.
- for (let rs of[st.rs0, st.rs1]) {
- assert.commandWorked(rs.getPrimary().getDB(dbName).setProfilingLevel(2));
- assert.commandWorked(rs.getSecondary().getDB(dbName).setProfilingLevel(2));
- }
-
- // Write a document to each chunk.
- assert.writeOK(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-
- // Test that change streams go to the primary by default.
- let changeStreamComment = "change stream against primary";
- const primaryStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}],
- {comment: changeStreamComment});
-
- assert.writeOK(mongosColl.update({_id: -1}, {$set: {updated: true}}));
- assert.writeOK(mongosColl.update({_id: 1}, {$set: {updated: true}}));
-
- assert.soon(() => primaryStream.hasNext());
- assert.eq(primaryStream.next().fullDocument, {_id: -1, updated: true});
- assert.soon(() => primaryStream.hasNext());
- assert.eq(primaryStream.next().fullDocument, {_id: 1, updated: true});
-
- for (let rs of[st.rs0, st.rs1]) {
- const primaryDB = rs.getPrimary().getDB(dbName);
- // Test that the change stream itself goes to the primary. There might be more than one if
- // we needed multiple getMores to retrieve the changes.
- // TODO SERVER-31650 We have to use 'originatingCommand' here and look for the getMore
- // because the initial aggregate will not show up.
- profilerHasAtLeastOneMatchingEntryOrThrow(
- {profileDB: primaryDB, filter: {'originatingCommand.comment': changeStreamComment}});
-
- // Test that the update lookup goes to the primary as well.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryDB,
- filter: {
- op: "query",
- ns: mongosColl.getFullName(), "command.comment": changeStreamComment
- }
- });
- }
-
- primaryStream.close();
-
- // Test that change streams go to the secondary when the readPreference is {mode: "secondary"}.
- changeStreamComment = 'change stream against secondary';
- const secondaryStream =
- mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}],
- {comment: changeStreamComment, $readPreference: {mode: "secondary"}});
-
- assert.writeOK(mongosColl.update({_id: -1}, {$set: {updatedCount: 2}}));
- assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
-
- assert.soon(() => secondaryStream.hasNext());
- assert.eq(secondaryStream.next().fullDocument, {_id: -1, updated: true, updatedCount: 2});
- assert.soon(() => secondaryStream.hasNext());
- assert.eq(secondaryStream.next().fullDocument, {_id: 1, updated: true, updatedCount: 2});
-
- for (let rs of[st.rs0, st.rs1]) {
- const secondaryDB = rs.getSecondary().getDB(dbName);
- // Test that the change stream itself goes to the secondary. There might be more than one if
- // we needed multiple getMores to retrieve the changes.
- // TODO SERVER-31650 We have to use 'originatingCommand' here and look for the getMore
- // because the initial aggregate will not show up.
- profilerHasAtLeastOneMatchingEntryOrThrow(
- {profileDB: secondaryDB, filter: {'originatingCommand.comment': changeStreamComment}});
-
- // Test that the update lookup goes to the secondary as well.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: secondaryDB,
- filter: {
- op: "query",
- ns: mongosColl.getFullName(), "command.comment": changeStreamComment,
- // We need to filter out any profiler entries with a stale config - this is the
- // first read on this secondary with a readConcern specified, so it is the first
- // read on this secondary that will enforce shard version.
- errCode: {$ne: ErrorCodes.StaleConfig}
- }
- });
- }
-
- secondaryStream.close();
- st.stop();
+secondaryStream.close();
+st.stop();
}());
diff --git a/jstests/sharding/change_stream_resume_from_different_mongos.js b/jstests/sharding/change_stream_resume_from_different_mongos.js
index 7efe9e06a36..73914b51af1 100644
--- a/jstests/sharding/change_stream_resume_from_different_mongos.js
+++ b/jstests/sharding/change_stream_resume_from_different_mongos.js
@@ -1,99 +1,99 @@
// Test resuming a change stream on a mongos other than the one the change stream was started on.
// @tags: [uses_change_streams]
(function() {
- "use strict";
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
+"use strict";
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ rs: {nodes: 3, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
+});
+
+for (let key of Object.keys(ChangeStreamWatchMode)) {
+ const watchMode = ChangeStreamWatchMode[key];
+ jsTestLog("Running test for mode " + watchMode);
+
+ const s0DB = st.s0.getDB("test");
+ const s1DB = st.s1.getDB("test");
+ const coll = assertDropAndRecreateCollection(s0DB, "change_stream_failover");
+
+ const nDocs = 100;
+
+ // Split so ids < nDocs / 2 are for one shard, ids >= nDocs / 2 + 1 for another.
+ st.shardColl(
+ coll,
+ {_id: 1}, // key
+ {_id: nDocs / 2}, // split
+ {_id: nDocs / 2 + 1}, // move
+ "test", // dbName
+ false // waitForDelete
+ );
+
+ // Open a change stream.
+ const cst = new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, s0DB));
+ let changeStream = cst.getChangeStream({watchMode: watchMode, coll: coll});
+
+ // Be sure we can read from the change stream. Write some documents that will end up on
+ // each shard. Use a bulk write to increase the chance that two of the writes get the same
+ // cluster time on each shard.
+ const kIds = [];
+ const bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < nDocs / 2; i++) {
+ // Interleave elements which will end up on shard 0 with elements that will end up on
+ // shard 1.
+ kIds.push(i);
+ bulk.insert({_id: i});
+ kIds.push(i + nDocs / 2);
+ bulk.insert({_id: i + nDocs / 2});
+ }
+ assert.commandWorked(bulk.execute());
+
+ // Read from the change stream. The order of the documents isn't guaranteed because we
+ // performed a bulk write.
+ const firstChange = cst.getOneChange(changeStream);
+ const docsFoundInOrder = [firstChange];
+ for (let i = 0; i < nDocs - 1; i++) {
+ const change = cst.getOneChange(changeStream);
+ assert.docEq(change.ns, {db: s0DB.getName(), coll: coll.getName()});
+ assert.eq(change.operationType, "insert");
+
+ docsFoundInOrder.push(change);
+ }
+
+ // Assert that we found the documents we inserted (in any order).
+ assert.setEq(new Set(kIds), new Set(docsFoundInOrder.map(doc => doc.fullDocument._id)));
+ cst.cleanUp();
+
+ // Now resume using the resume token from the first change on a different mongos.
+ const otherCst = new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, s1DB));
+
+ const resumeCursor =
+ otherCst.getChangeStream({watchMode: watchMode, coll: coll, resumeAfter: firstChange._id});
+
+ // Get the resume tokens for each change that occurred.
+ const resumeTokens = [firstChange._id];
+ for (let i = 0; i < kIds.length - 1; i++) {
+ resumeTokens.push(otherCst.getOneChange(resumeCursor)._id);
}
- const st = new ShardingTest({
- shards: 2,
- mongos: 2,
- rs: {nodes: 3, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
- });
-
- for (let key of Object.keys(ChangeStreamWatchMode)) {
- const watchMode = ChangeStreamWatchMode[key];
- jsTestLog("Running test for mode " + watchMode);
-
- const s0DB = st.s0.getDB("test");
- const s1DB = st.s1.getDB("test");
- const coll = assertDropAndRecreateCollection(s0DB, "change_stream_failover");
-
- const nDocs = 100;
-
- // Split so ids < nDocs / 2 are for one shard, ids >= nDocs / 2 + 1 for another.
- st.shardColl(coll,
- {_id: 1}, // key
- {_id: nDocs / 2}, // split
- {_id: nDocs / 2 + 1}, // move
- "test", // dbName
- false // waitForDelete
- );
-
- // Open a change stream.
- const cst = new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, s0DB));
- let changeStream = cst.getChangeStream({watchMode: watchMode, coll: coll});
-
- // Be sure we can read from the change stream. Write some documents that will end up on
- // each shard. Use a bulk write to increase the chance that two of the writes get the same
- // cluster time on each shard.
- const kIds = [];
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nDocs / 2; i++) {
- // Interleave elements which will end up on shard 0 with elements that will end up on
- // shard 1.
- kIds.push(i);
- bulk.insert({_id: i});
- kIds.push(i + nDocs / 2);
- bulk.insert({_id: i + nDocs / 2});
- }
- assert.commandWorked(bulk.execute());
-
- // Read from the change stream. The order of the documents isn't guaranteed because we
- // performed a bulk write.
- const firstChange = cst.getOneChange(changeStream);
- const docsFoundInOrder = [firstChange];
- for (let i = 0; i < nDocs - 1; i++) {
- const change = cst.getOneChange(changeStream);
- assert.docEq(change.ns, {db: s0DB.getName(), coll: coll.getName()});
- assert.eq(change.operationType, "insert");
-
- docsFoundInOrder.push(change);
- }
-
- // Assert that we found the documents we inserted (in any order).
- assert.setEq(new Set(kIds), new Set(docsFoundInOrder.map(doc => doc.fullDocument._id)));
- cst.cleanUp();
-
- // Now resume using the resume token from the first change on a different mongos.
- const otherCst =
- new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, s1DB));
-
- const resumeCursor = otherCst.getChangeStream(
- {watchMode: watchMode, coll: coll, resumeAfter: firstChange._id});
-
- // Get the resume tokens for each change that occurred.
- const resumeTokens = [firstChange._id];
- for (let i = 0; i < kIds.length - 1; i++) {
- resumeTokens.push(otherCst.getOneChange(resumeCursor)._id);
- }
-
- // Check that resuming from each possible resume token works.
- for (let i = 0; i < resumeTokens.length; i++) {
- const cursor = otherCst.getChangeStream(
- {watchMode: watchMode, coll: coll, resumeAfter: resumeTokens[i]});
- otherCst.assertNextChangesEqual(
- {cursor: cursor, expectedChanges: docsFoundInOrder.splice(i + 1)});
- }
- otherCst.cleanUp();
+ // Check that resuming from each possible resume token works.
+ for (let i = 0; i < resumeTokens.length; i++) {
+ const cursor = otherCst.getChangeStream(
+ {watchMode: watchMode, coll: coll, resumeAfter: resumeTokens[i]});
+ otherCst.assertNextChangesEqual(
+ {cursor: cursor, expectedChanges: docsFoundInOrder.splice(i + 1)});
}
+ otherCst.cleanUp();
+}
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/change_stream_shard_failover.js b/jstests/sharding/change_stream_shard_failover.js
index f4b3007bd30..f5675aedd04 100644
--- a/jstests/sharding/change_stream_shard_failover.js
+++ b/jstests/sharding/change_stream_shard_failover.js
@@ -9,104 +9,104 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
+"use strict";
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ shards: 2,
+ rs: {nodes: 2, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
+});
+
+const sDB = st.s.getDB("test");
+const kCollName = "change_stream_failover";
+
+for (let key of Object.keys(ChangeStreamWatchMode)) {
+ const watchMode = ChangeStreamWatchMode[key];
+ jsTestLog("Running test for mode " + watchMode);
+
+ const coll = assertDropAndRecreateCollection(sDB, kCollName);
+
+ const nDocs = 100;
+
+ // Split so ids < nDocs / 2 are for one shard, ids >= nDocs / 2 + 1 for another.
+ st.shardColl(
+ coll,
+ {_id: 1}, // key
+ {_id: nDocs / 2}, // split
+ {_id: nDocs / 2 + 1}, // move
+ "test", // dbName
+ false // waitForDelete
+ );
+
+ // Be sure we'll only read from the primaries.
+ st.s.setReadPref("primary");
+
+ // Open a changeStream.
+ const cst = new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, sDB));
+ let changeStream = cst.getChangeStream({watchMode: watchMode, coll: coll});
+
+ // Be sure we can read from the change stream. Write some documents that will end up on
+ // each shard. Use a bulk write to increase the chance that two of the writes get the same
+ // cluster time on each shard.
+ const bulk = coll.initializeUnorderedBulkOp();
+ const kIds = [];
+ for (let i = 0; i < nDocs / 2; i++) {
+ // Interleave elements which will end up on shard 0 with elements that will end up on
+ // shard 1.
+ kIds.push(i);
+ bulk.insert({_id: i});
+ kIds.push(i + nDocs / 2);
+ bulk.insert({_id: i + nDocs / 2});
}
+ // Use {w: "majority"} so that we're still guaranteed to be able to read after the
+ // failover.
+ assert.commandWorked(bulk.execute({w: "majority"}));
- const st = new ShardingTest({
- shards: 2,
- rs: {nodes: 2, setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}}
- });
-
- const sDB = st.s.getDB("test");
- const kCollName = "change_stream_failover";
-
- for (let key of Object.keys(ChangeStreamWatchMode)) {
- const watchMode = ChangeStreamWatchMode[key];
- jsTestLog("Running test for mode " + watchMode);
-
- const coll = assertDropAndRecreateCollection(sDB, kCollName);
-
- const nDocs = 100;
-
- // Split so ids < nDocs / 2 are for one shard, ids >= nDocs / 2 + 1 for another.
- st.shardColl(coll,
- {_id: 1}, // key
- {_id: nDocs / 2}, // split
- {_id: nDocs / 2 + 1}, // move
- "test", // dbName
- false // waitForDelete
- );
-
- // Be sure we'll only read from the primaries.
- st.s.setReadPref("primary");
-
- // Open a changeStream.
- const cst = new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, sDB));
- let changeStream = cst.getChangeStream({watchMode: watchMode, coll: coll});
-
- // Be sure we can read from the change stream. Write some documents that will end up on
- // each shard. Use a bulk write to increase the chance that two of the writes get the same
- // cluster time on each shard.
- const bulk = coll.initializeUnorderedBulkOp();
- const kIds = [];
- for (let i = 0; i < nDocs / 2; i++) {
- // Interleave elements which will end up on shard 0 with elements that will end up on
- // shard 1.
- kIds.push(i);
- bulk.insert({_id: i});
- kIds.push(i + nDocs / 2);
- bulk.insert({_id: i + nDocs / 2});
- }
- // Use {w: "majority"} so that we're still guaranteed to be able to read after the
- // failover.
- assert.commandWorked(bulk.execute({w: "majority"}));
-
- const firstChange = cst.getOneChange(changeStream);
-
- // Make one of the primaries step down.
- const oldPrimary = st.rs0.getPrimary();
-
- assert.commandWorked(oldPrimary.adminCommand({replSetStepDown: 300, force: true}));
-
- st.rs0.awaitNodesAgreeOnPrimary();
- const newPrimary = st.rs0.getPrimary();
- // Be sure the new primary is not the previous primary.
- assert.neq(newPrimary.port, oldPrimary.port);
-
- // Read the remaining documents from the original stream.
- const docsFoundInOrder = [firstChange];
- for (let i = 0; i < nDocs - 1; i++) {
- const change = cst.getOneChange(changeStream);
- assert.docEq(change.ns, {db: sDB.getName(), coll: coll.getName()});
- assert.eq(change.operationType, "insert");
-
- docsFoundInOrder.push(change);
- }
-
- // Assert that we found the documents we inserted (in any order).
- assert.setEq(new Set(kIds), new Set(docsFoundInOrder.map(doc => doc.fullDocument._id)));
-
- // Now resume using the resume token from the first change (which was read before the
- // failover). The mongos should talk to the new primary.
- const resumeCursor =
- cst.getChangeStream({watchMode: watchMode, coll: coll, resumeAfter: firstChange._id});
-
- // Be sure we can read the remaining changes in the same order as we read them initially.
- cst.assertNextChangesEqual(
- {cursor: resumeCursor, expectedChanges: docsFoundInOrder.splice(1)});
- cst.cleanUp();
-
- // Reset the original primary's election timeout.
- assert.commandWorked(oldPrimary.adminCommand({replSetFreeze: 0}));
+ const firstChange = cst.getOneChange(changeStream);
+
+ // Make one of the primaries step down.
+ const oldPrimary = st.rs0.getPrimary();
+
+ assert.commandWorked(oldPrimary.adminCommand({replSetStepDown: 300, force: true}));
+
+ st.rs0.awaitNodesAgreeOnPrimary();
+ const newPrimary = st.rs0.getPrimary();
+ // Be sure the new primary is not the previous primary.
+ assert.neq(newPrimary.port, oldPrimary.port);
+
+ // Read the remaining documents from the original stream.
+ const docsFoundInOrder = [firstChange];
+ for (let i = 0; i < nDocs - 1; i++) {
+ const change = cst.getOneChange(changeStream);
+ assert.docEq(change.ns, {db: sDB.getName(), coll: coll.getName()});
+ assert.eq(change.operationType, "insert");
+
+ docsFoundInOrder.push(change);
}
- st.stop();
+ // Assert that we found the documents we inserted (in any order).
+ assert.setEq(new Set(kIds), new Set(docsFoundInOrder.map(doc => doc.fullDocument._id)));
+
+ // Now resume using the resume token from the first change (which was read before the
+ // failover). The mongos should talk to the new primary.
+ const resumeCursor =
+ cst.getChangeStream({watchMode: watchMode, coll: coll, resumeAfter: firstChange._id});
+
+ // Be sure we can read the remaining changes in the same order as we read them initially.
+ cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: docsFoundInOrder.splice(1)});
+ cst.cleanUp();
+
+ // Reset the original primary's election timeout.
+ assert.commandWorked(oldPrimary.adminCommand({replSetFreeze: 0}));
+}
+
+st.stop();
}());
diff --git a/jstests/sharding/change_stream_show_migration_events.js b/jstests/sharding/change_stream_show_migration_events.js
index 4c75ca5fc2a..c07e059e4d1 100644
--- a/jstests/sharding/change_stream_show_migration_events.js
+++ b/jstests/sharding/change_stream_show_migration_events.js
@@ -3,265 +3,261 @@
// This test is connecting directly to a shard, and change streams require the getMore command.
// @tags: [requires_find_command, uses_change_streams]
(function() {
- 'use strict';
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode
-
- function checkEvents(changeStream, expectedEvents) {
- expectedEvents.forEach((event) => {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, event["operationType"]);
- assert.eq(next.documentKey, {_id: event["_id"]});
- });
- }
-
- function makeEvent(docId, opType) {
- assert(typeof docId === 'number');
- assert(typeof opType === 'string' && (opType === 'insert' || opType === 'delete'));
- return ({_id: docId, operationType: opType});
- }
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- // TODO WT-3864: Re-enable test for LSM once transaction visibility bug in LSM is resolved.
- if (jsTest.options().wiredTigerCollectionConfigString === "type=lsm") {
- jsTestLog("Skipping test because we're running with WiredTiger's LSM tree.");
- return;
- }
-
- const rsNodeOptions = {
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
- };
- const st =
- new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
-
- const mongos = st.s;
- const mongosColl = mongos.getCollection('test.chunk_mig');
- const mongosDB = mongos.getDB("test");
-
- // Enable sharding to inform mongos of the database, allowing us to open a cursor.
- assert.commandWorked(mongos.adminCommand({enableSharding: mongosDB.getName()}));
-
- // Make sure all chunks start on shard 0.
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
- // Open a change stream cursor before the collection is sharded.
- const changeStreamShardZero = st.shard0.getCollection('test.chunk_mig').aggregate([
- {$changeStream: {showMigrationEvents: true}}
- ]);
- const changeStreamShardOne = st.shard1.getCollection('test.chunk_mig').aggregate([
- {$changeStream: {showMigrationEvents: true}}
- ]);
-
- // Change streams opened on mongos do not allow showMigrationEvents to be set to true.
- assertErrorCode(mongosColl, [{$changeStream: {showMigrationEvents: true}}], 31123);
-
- assert(!changeStreamShardZero.hasNext(), "Do not expect any results yet");
- assert(!changeStreamShardOne.hasNext(), "Do not expect any results yet");
-
- jsTestLog("Sharding collection");
- // Once we have a cursor, actually shard the collection.
- assert.commandWorked(
- mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Insert two documents.
- assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
-
- // Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
- assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
-
- jsTestLog("Migrating [10, MaxKey] chunk to shard1.");
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: 20},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- var shardZeroEventsBeforeNewShard = [makeEvent(0, "insert"), makeEvent(20, "insert")];
- var shardZeroEventsAfterNewShard = [makeEvent(20, "delete")];
- var shardOneEvents = [makeEvent(20, "insert")];
-
- // Check that each change stream returns the expected events.
- checkEvents(changeStreamShardZero, shardZeroEventsBeforeNewShard);
- assert.soon(() => changeStreamShardZero.hasNext());
- let next = changeStreamShardZero.next();
- assert.eq(next.operationType, "kNewShardDetected");
-
- checkEvents(changeStreamShardZero, shardZeroEventsAfterNewShard);
- checkEvents(changeStreamShardOne, shardOneEvents);
-
- // Insert into both the chunks.
- assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
-
- // Split again, and move a second chunk to the first shard. The new chunks are:
- // [MinKey, 0), [0, 10), and [10, MaxKey].
- jsTestLog("Moving [MinKey, 0] to shard 1");
- assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: 5},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Insert again, into all three chunks.
- assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
-
- var shardZeroEvents = [
- makeEvent(1, "insert"),
- makeEvent(0, "delete"),
- makeEvent(1, "delete"),
- makeEvent(-2, "insert"),
- ];
- shardOneEvents = [
- makeEvent(21, "insert"),
- makeEvent(0, "insert"),
- makeEvent(1, "insert"),
- makeEvent(2, "insert"),
- makeEvent(22, "insert"),
- ];
-
- // Check that each change stream returns the expected events.
- checkEvents(changeStreamShardZero, shardZeroEvents);
- checkEvents(changeStreamShardOne, shardOneEvents);
-
- // Make sure we're at the end of the stream.
- assert(!changeStreamShardZero.hasNext());
- assert(!changeStreamShardOne.hasNext());
-
- // Test that migrating the last chunk to shard 1 (meaning all chunks are now on the same shard)
- // will not invalidate the change stream.
-
- // Insert into all three chunks.
- jsTestLog("Insert into all three chunks");
- assert.writeOK(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
-
- jsTestLog("Move the [Minkey, 0) chunk to shard 1.");
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: -5},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Insert again, into all three chunks.
- assert.writeOK(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
-
- // Check that each change stream returns the expected events.
- shardZeroEvents = [
- makeEvent(-3, "insert"),
- makeEvent(-3, "delete"),
- makeEvent(-2, "delete"),
- ];
- shardOneEvents = [
- makeEvent(3, "insert"),
- makeEvent(23, "insert"),
- makeEvent(-2, "insert"),
- makeEvent(-3, "insert"),
- makeEvent(-4, "insert"),
- makeEvent(4, "insert"),
- makeEvent(24, "insert"),
- ];
-
- checkEvents(changeStreamShardZero, shardZeroEvents);
- checkEvents(changeStreamShardOne, shardOneEvents);
-
- // Now test that adding a new shard and migrating a chunk to it will continue to
- // return the correct results.
- const newShard = new ReplSetTest({name: "newShard", nodes: 1, nodeOptions: rsNodeOptions});
- newShard.startSet({shardsvr: ''});
- newShard.initiate();
- assert.commandWorked(mongos.adminCommand({addShard: newShard.getURL(), name: "newShard"}));
- const changeStreamNewShard = newShard.getPrimary().getCollection('test.chunk_mig').aggregate([
- {$changeStream: {showMigrationEvents: true}}
- ]);
-
- // At this point, there haven't been any migrations to that shard; check that the changeStream
- // works normally.
- assert.writeOK(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
-
- shardOneEvents = [
- makeEvent(-5, "insert"),
- makeEvent(5, "insert"),
- makeEvent(25, "insert"),
- ];
-
- assert(!changeStreamShardZero.hasNext(), "Do not expect any results");
- checkEvents(changeStreamShardOne, shardOneEvents);
- assert(!changeStreamNewShard.hasNext(), "Do not expect any results yet");
-
- assert.writeOK(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
-
- // Now migrate a chunk to the new shard and verify the stream continues to return results
- // from both before and after the migration.
- jsTestLog("Migrating [10, MaxKey] chunk to new shard.");
- assert.commandWorked(mongos.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: 20},
- to: "newShard",
- _waitForDelete: true
- }));
- assert.writeOK(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
-
- let shardOneEventsBeforeNewShard = [
- makeEvent(16, "insert"),
- ];
- let shardOneEventsAfterNewShard = [
- makeEvent(16, "delete"),
- makeEvent(20, "delete"),
- makeEvent(21, "delete"),
- makeEvent(22, "delete"),
- makeEvent(23, "delete"),
- makeEvent(24, "delete"),
- makeEvent(25, "delete"),
- makeEvent(-6, "insert"),
- makeEvent(6, "insert"),
- ];
- let newShardEvents = [
- makeEvent(20, "insert"),
- makeEvent(21, "insert"),
- makeEvent(22, "insert"),
- makeEvent(23, "insert"),
- makeEvent(24, "insert"),
- makeEvent(25, "insert"),
- makeEvent(16, "insert"),
- makeEvent(26, "insert"),
- ];
-
- // Check that each change stream returns the expected events.
- assert(!changeStreamShardZero.hasNext(), "Do not expect any results");
- checkEvents(changeStreamShardOne, shardOneEventsBeforeNewShard);
- assert.soon(() => changeStreamShardOne.hasNext());
- next = changeStreamShardOne.next();
- assert.eq(next.operationType, "kNewShardDetected");
- checkEvents(changeStreamShardOne, shardOneEventsAfterNewShard);
- checkEvents(changeStreamNewShard, newShardEvents);
-
- // Make sure all change streams are empty.
- assert(!changeStreamShardZero.hasNext());
- assert(!changeStreamShardOne.hasNext());
- assert(!changeStreamNewShard.hasNext());
-
- st.stop();
- newShard.stopSet();
+'use strict';
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode
+
+function checkEvents(changeStream, expectedEvents) {
+ expectedEvents.forEach((event) => {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, event["operationType"]);
+ assert.eq(next.documentKey, {_id: event["_id"]});
+ });
+}
+
+function makeEvent(docId, opType) {
+ assert(typeof docId === 'number');
+ assert(typeof opType === 'string' && (opType === 'insert' || opType === 'delete'));
+ return ({_id: docId, operationType: opType});
+}
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+// TODO WT-3864: Re-enable test for LSM once transaction visibility bug in LSM is resolved.
+if (jsTest.options().wiredTigerCollectionConfigString === "type=lsm") {
+ jsTestLog("Skipping test because we're running with WiredTiger's LSM tree.");
+ return;
+}
+
+const rsNodeOptions = {
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
+};
+const st =
+ new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
+
+const mongos = st.s;
+const mongosColl = mongos.getCollection('test.chunk_mig');
+const mongosDB = mongos.getDB("test");
+
+// Enable sharding to inform mongos of the database, allowing us to open a cursor.
+assert.commandWorked(mongos.adminCommand({enableSharding: mongosDB.getName()}));
+
+// Make sure all chunks start on shard 0.
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+// Open a change stream cursor before the collection is sharded.
+const changeStreamShardZero = st.shard0.getCollection('test.chunk_mig').aggregate([
+ {$changeStream: {showMigrationEvents: true}}
+]);
+const changeStreamShardOne = st.shard1.getCollection('test.chunk_mig').aggregate([
+ {$changeStream: {showMigrationEvents: true}}
+]);
+
+// Change streams opened on mongos do not allow showMigrationEvents to be set to true.
+assertErrorCode(mongosColl, [{$changeStream: {showMigrationEvents: true}}], 31123);
+
+assert(!changeStreamShardZero.hasNext(), "Do not expect any results yet");
+assert(!changeStreamShardOne.hasNext(), "Do not expect any results yet");
+
+jsTestLog("Sharding collection");
+// Once we have a cursor, actually shard the collection.
+assert.commandWorked(
+ mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// Insert two documents.
+assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
+
+// Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
+assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
+
+jsTestLog("Migrating [10, MaxKey] chunk to shard1.");
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: 20},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+var shardZeroEventsBeforeNewShard = [makeEvent(0, "insert"), makeEvent(20, "insert")];
+var shardZeroEventsAfterNewShard = [makeEvent(20, "delete")];
+var shardOneEvents = [makeEvent(20, "insert")];
+
+// Check that each change stream returns the expected events.
+checkEvents(changeStreamShardZero, shardZeroEventsBeforeNewShard);
+assert.soon(() => changeStreamShardZero.hasNext());
+let next = changeStreamShardZero.next();
+assert.eq(next.operationType, "kNewShardDetected");
+
+checkEvents(changeStreamShardZero, shardZeroEventsAfterNewShard);
+checkEvents(changeStreamShardOne, shardOneEvents);
+
+// Insert into both the chunks.
+assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 21}, {writeConcern: {w: "majority"}}));
+
+// Split again, and move a second chunk to the first shard. The new chunks are:
+// [MinKey, 0), [0, 10), and [10, MaxKey].
+jsTestLog("Moving [MinKey, 0] to shard 1");
+assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: 5},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Insert again, into all three chunks.
+assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 22}, {writeConcern: {w: "majority"}}));
+
+var shardZeroEvents = [
+ makeEvent(1, "insert"),
+ makeEvent(0, "delete"),
+ makeEvent(1, "delete"),
+ makeEvent(-2, "insert"),
+];
+shardOneEvents = [
+ makeEvent(21, "insert"),
+ makeEvent(0, "insert"),
+ makeEvent(1, "insert"),
+ makeEvent(2, "insert"),
+ makeEvent(22, "insert"),
+];
+
+// Check that each change stream returns the expected events.
+checkEvents(changeStreamShardZero, shardZeroEvents);
+checkEvents(changeStreamShardOne, shardOneEvents);
+
+// Make sure we're at the end of the stream.
+assert(!changeStreamShardZero.hasNext());
+assert(!changeStreamShardOne.hasNext());
+
+// Test that migrating the last chunk to shard 1 (meaning all chunks are now on the same shard)
+// will not invalidate the change stream.
+
+// Insert into all three chunks.
+jsTestLog("Insert into all three chunks");
+assert.writeOK(mongosColl.insert({_id: -3}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 3}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 23}, {writeConcern: {w: "majority"}}));
+
+jsTestLog("Move the [Minkey, 0) chunk to shard 1.");
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {_id: -5},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Insert again, into all three chunks.
+assert.writeOK(mongosColl.insert({_id: -4}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 4}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 24}, {writeConcern: {w: "majority"}}));
+
+// Check that each change stream returns the expected events.
+shardZeroEvents = [
+ makeEvent(-3, "insert"),
+ makeEvent(-3, "delete"),
+ makeEvent(-2, "delete"),
+];
+shardOneEvents = [
+ makeEvent(3, "insert"),
+ makeEvent(23, "insert"),
+ makeEvent(-2, "insert"),
+ makeEvent(-3, "insert"),
+ makeEvent(-4, "insert"),
+ makeEvent(4, "insert"),
+ makeEvent(24, "insert"),
+];
+
+checkEvents(changeStreamShardZero, shardZeroEvents);
+checkEvents(changeStreamShardOne, shardOneEvents);
+
+// Now test that adding a new shard and migrating a chunk to it will continue to
+// return the correct results.
+const newShard = new ReplSetTest({name: "newShard", nodes: 1, nodeOptions: rsNodeOptions});
+newShard.startSet({shardsvr: ''});
+newShard.initiate();
+assert.commandWorked(mongos.adminCommand({addShard: newShard.getURL(), name: "newShard"}));
+const changeStreamNewShard = newShard.getPrimary().getCollection('test.chunk_mig').aggregate([
+ {$changeStream: {showMigrationEvents: true}}
+]);
+
+// At this point, there haven't been any migrations to that shard; check that the changeStream
+// works normally.
+assert.writeOK(mongosColl.insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 25}, {writeConcern: {w: "majority"}}));
+
+shardOneEvents = [
+ makeEvent(-5, "insert"),
+ makeEvent(5, "insert"),
+ makeEvent(25, "insert"),
+];
+
+assert(!changeStreamShardZero.hasNext(), "Do not expect any results");
+checkEvents(changeStreamShardOne, shardOneEvents);
+assert(!changeStreamNewShard.hasNext(), "Do not expect any results yet");
+
+assert.writeOK(mongosColl.insert({_id: 16}, {writeConcern: {w: "majority"}}));
+
+// Now migrate a chunk to the new shard and verify the stream continues to return results
+// from both before and after the migration.
+jsTestLog("Migrating [10, MaxKey] chunk to new shard.");
+assert.commandWorked(mongos.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 20}, to: "newShard", _waitForDelete: true}));
+assert.writeOK(mongosColl.insert({_id: -6}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 6}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 26}, {writeConcern: {w: "majority"}}));
+
+let shardOneEventsBeforeNewShard = [
+ makeEvent(16, "insert"),
+];
+let shardOneEventsAfterNewShard = [
+ makeEvent(16, "delete"),
+ makeEvent(20, "delete"),
+ makeEvent(21, "delete"),
+ makeEvent(22, "delete"),
+ makeEvent(23, "delete"),
+ makeEvent(24, "delete"),
+ makeEvent(25, "delete"),
+ makeEvent(-6, "insert"),
+ makeEvent(6, "insert"),
+];
+let newShardEvents = [
+ makeEvent(20, "insert"),
+ makeEvent(21, "insert"),
+ makeEvent(22, "insert"),
+ makeEvent(23, "insert"),
+ makeEvent(24, "insert"),
+ makeEvent(25, "insert"),
+ makeEvent(16, "insert"),
+ makeEvent(26, "insert"),
+];
+
+// Check that each change stream returns the expected events.
+assert(!changeStreamShardZero.hasNext(), "Do not expect any results");
+checkEvents(changeStreamShardOne, shardOneEventsBeforeNewShard);
+assert.soon(() => changeStreamShardOne.hasNext());
+next = changeStreamShardOne.next();
+assert.eq(next.operationType, "kNewShardDetected");
+checkEvents(changeStreamShardOne, shardOneEventsAfterNewShard);
+checkEvents(changeStreamNewShard, newShardEvents);
+
+// Make sure all change streams are empty.
+assert(!changeStreamShardZero.hasNext());
+assert(!changeStreamShardOne.hasNext());
+assert(!changeStreamNewShard.hasNext());
+
+st.stop();
+newShard.stopSet();
})();
diff --git a/jstests/sharding/change_stream_transaction_sharded.js b/jstests/sharding/change_stream_transaction_sharded.js
index 311f132012f..96e15459ff1 100644
--- a/jstests/sharding/change_stream_transaction_sharded.js
+++ b/jstests/sharding/change_stream_transaction_sharded.js
@@ -6,263 +6,254 @@
// uses_transactions,
// ]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "change_stream_transaction_sharded";
- const namespace = dbName + "." + collName;
-
- const st = new ShardingTest({
- shards: 2,
- rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
+"use strict";
+
+const dbName = "test";
+const collName = "change_stream_transaction_sharded";
+const namespace = dbName + "." + collName;
+
+const st = new ShardingTest({
+ shards: 2,
+ rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
+});
+
+const mongosConn = st.s;
+assert.commandWorked(mongosConn.getDB(dbName).getCollection(collName).createIndex({shard: 1}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+// Shard the test collection and split it into two chunks: one that contains all {shard: 1}
+// documents and one that contains all {shard: 2} documents.
+st.shardColl(collName,
+ {shard: 1} /* shard key */,
+ {shard: 2} /* split at */,
+ {shard: 2} /* move the chunk containing {shard: 2} to its own shard */,
+ dbName,
+ true);
+// Seed each chunk with an initial document.
+assert.commandWorked(mongosConn.getDB(dbName).getCollection(collName).insert(
+ {shard: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(mongosConn.getDB(dbName).getCollection(collName).insert(
+ {shard: 2}, {writeConcern: {w: "majority"}}));
+
+const db = mongosConn.getDB(dbName);
+const coll = db.getCollection(collName);
+let changeListShard1 = [], changeListShard2 = [];
+
+//
+// Start transaction 1.
+//
+const session1 = db.getMongo().startSession({causalConsistency: true});
+const sessionDb1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDb1[collName];
+session1.startTransaction({readConcern: {level: "majority"}});
+
+//
+// Start transaction 2.
+//
+const session2 = db.getMongo().startSession({causalConsistency: true});
+const sessionDb2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDb2[collName];
+session2.startTransaction({readConcern: {level: "majority"}});
+
+/**
+ * Asserts that there are no changes waiting on the change stream cursor.
+ */
+function assertNoChanges(cursor) {
+ assert(!cursor.hasNext(), () => {
+ return "Unexpected change set: " + tojson(cursor.toArray());
});
+}
- const mongosConn = st.s;
- assert.commandWorked(mongosConn.getDB(dbName).getCollection(collName).createIndex({shard: 1}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- // Shard the test collection and split it into two chunks: one that contains all {shard: 1}
- // documents and one that contains all {shard: 2} documents.
- st.shardColl(collName,
- {shard: 1} /* shard key */,
- {shard: 2} /* split at */,
- {shard: 2} /* move the chunk containing {shard: 2} to its own shard */,
- dbName,
- true);
- // Seed each chunk with an initial document.
- assert.commandWorked(mongosConn.getDB(dbName).getCollection(collName).insert(
- {shard: 1}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(mongosConn.getDB(dbName).getCollection(collName).insert(
- {shard: 2}, {writeConcern: {w: "majority"}}));
-
- const db = mongosConn.getDB(dbName);
- const coll = db.getCollection(collName);
- let changeListShard1 = [], changeListShard2 = [];
-
- //
- // Start transaction 1.
- //
- const session1 = db.getMongo().startSession({causalConsistency: true});
- const sessionDb1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDb1[collName];
- session1.startTransaction({readConcern: {level: "majority"}});
-
- //
- // Start transaction 2.
- //
- const session2 = db.getMongo().startSession({causalConsistency: true});
- const sessionDb2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDb2[collName];
- session2.startTransaction({readConcern: {level: "majority"}});
-
- /**
- * Asserts that there are no changes waiting on the change stream cursor.
- */
- function assertNoChanges(cursor) {
- assert(!cursor.hasNext(), () => {
- return "Unexpected change set: " + tojson(cursor.toArray());
- });
+//
+// Perform writes both in and outside of transactions and confirm that the changes expected are
+// returned by the change stream.
+//
+(function() {
+/**
+ * Asserts that the expected changes are found on the change stream cursor. Pushes the
+ * corresponding change stream document (with resume token) to an array. When expected
+ * changes are provided for both shards, we must assume that either shard's changes could
+ * come first or that they are interleaved via applyOps index. This is because a cross shard
+ * transaction may commit at a different cluster time on each shard, which impacts the
+ * ordering of the change stream.
+ */
+function assertWritesVisibleWithCapture(cursor,
+ expectedChangesShard1,
+ expectedChangesShard2,
+ changeCaptureListShard1,
+ changeCaptureListShard2) {
+ function assertChangeEqualWithCapture(changeDoc, expectedChange, changeCaptureList) {
+ assert.eq(expectedChange.operationType, changeDoc.operationType);
+ assert.eq(expectedChange._id, changeDoc.documentKey._id);
+ changeCaptureList.push(changeDoc);
}
- //
- // Perform writes both in and outside of transactions and confirm that the changes expected are
- // returned by the change stream.
- //
- (function() {
- /**
- * Asserts that the expected changes are found on the change stream cursor. Pushes the
- * corresponding change stream document (with resume token) to an array. When expected
- * changes are provided for both shards, we must assume that either shard's changes could
- * come first or that they are interleaved via applyOps index. This is because a cross shard
- * transaction may commit at a different cluster time on each shard, which impacts the
- * ordering of the change stream.
- */
- function assertWritesVisibleWithCapture(cursor,
- expectedChangesShard1,
- expectedChangesShard2,
- changeCaptureListShard1,
- changeCaptureListShard2) {
- function assertChangeEqualWithCapture(changeDoc, expectedChange, changeCaptureList) {
- assert.eq(expectedChange.operationType, changeDoc.operationType);
- assert.eq(expectedChange._id, changeDoc.documentKey._id);
- changeCaptureList.push(changeDoc);
- }
-
- while (expectedChangesShard1.length || expectedChangesShard2.length) {
- assert.soon(() => cursor.hasNext());
- const changeDoc = cursor.next();
-
- if (changeDoc.documentKey.shard === 1) {
- assert(expectedChangesShard1.length);
- assertChangeEqualWithCapture(
- changeDoc, expectedChangesShard1[0], changeCaptureListShard1);
- expectedChangesShard1.shift();
- } else {
- assert(changeDoc.documentKey.shard === 2);
- assert(expectedChangesShard2.length);
- assertChangeEqualWithCapture(
- changeDoc, expectedChangesShard2[0], changeCaptureListShard2);
- expectedChangesShard2.shift();
- }
- }
-
- assertNoChanges(cursor);
+ while (expectedChangesShard1.length || expectedChangesShard2.length) {
+ assert.soon(() => cursor.hasNext());
+ const changeDoc = cursor.next();
+
+ if (changeDoc.documentKey.shard === 1) {
+ assert(expectedChangesShard1.length);
+ assertChangeEqualWithCapture(
+ changeDoc, expectedChangesShard1[0], changeCaptureListShard1);
+ expectedChangesShard1.shift();
+ } else {
+ assert(changeDoc.documentKey.shard === 2);
+ assert(expectedChangesShard2.length);
+ assertChangeEqualWithCapture(
+ changeDoc, expectedChangesShard2[0], changeCaptureListShard2);
+ expectedChangesShard2.shift();
}
+ }
- // Open a change stream on the test collection.
- const changeStreamCursor = coll.watch();
-
- // Insert a document and confirm that the change stream has it.
- assert.commandWorked(
- coll.insert({shard: 1, _id: "no-txn-doc-1"}, {writeConcern: {w: "majority"}}));
- assertWritesVisibleWithCapture(changeStreamCursor,
- [{operationType: "insert", _id: "no-txn-doc-1"}],
- [],
- changeListShard1,
- changeListShard2);
-
- // Insert two documents under each transaction and confirm no change stream updates.
- assert.commandWorked(
- sessionColl1.insert([{shard: 1, _id: "txn1-doc-1"}, {shard: 2, _id: "txn1-doc-2"}]));
- assert.commandWorked(
- sessionColl2.insert([{shard: 1, _id: "txn2-doc-1"}, {shard: 2, _id: "txn2-doc-2"}]));
- assertNoChanges(changeStreamCursor);
-
- // Update one document under each transaction and confirm no change stream updates.
- assert.commandWorked(
- sessionColl1.update({shard: 1, _id: "txn1-doc-1"}, {$set: {"updated": 1}}));
- assert.commandWorked(
- sessionColl2.update({shard: 2, _id: "txn2-doc-2"}, {$set: {"updated": 1}}));
- assertNoChanges(changeStreamCursor);
-
- // Update and then remove second doc under each transaction.
- assert.commandWorked(sessionColl1.update({shard: 2, _id: "txn1-doc-2"},
- {$set: {"update-before-delete": 1}}));
- assert.commandWorked(sessionColl2.update({shard: 1, _id: "txn2-doc-1"},
- {$set: {"update-before-delete": 1}}));
- assert.commandWorked(sessionColl1.remove({shard: 2, _id: "txn1-doc-2"}));
- assert.commandWorked(sessionColl2.remove({shard: 1, _id: "txn2-doc-2"}));
- assertNoChanges(changeStreamCursor);
-
- // Perform a write outside of a transaction and confirm that the change stream sees only
- // this write.
- assert.commandWorked(
- coll.insert({shard: 2, _id: "no-txn-doc-2"}, {writeConcern: {w: "majority"}}));
- assertWritesVisibleWithCapture(changeStreamCursor,
- [],
- [{operationType: "insert", _id: "no-txn-doc-2"}],
- changeListShard1,
- changeListShard2);
- assertNoChanges(changeStreamCursor);
-
- // Perform a write outside of the transaction.
- assert.commandWorked(
- coll.insert({shard: 1, _id: "no-txn-doc-3"}, {writeConcern: {w: "majority"}}));
-
- // Commit first transaction and confirm that the change stream sees the changes expected
- // from each shard.
- assert.commandWorked(session1.commitTransaction_forTesting());
- assertWritesVisibleWithCapture(changeStreamCursor,
- [
- {operationType: "insert", _id: "no-txn-doc-3"},
- {operationType: "insert", _id: "txn1-doc-1"},
- {operationType: "update", _id: "txn1-doc-1"}
- ],
- [
- {operationType: "insert", _id: "txn1-doc-2"},
- {operationType: "update", _id: "txn1-doc-2"},
- {operationType: "delete", _id: "txn1-doc-2"}
- ],
- changeListShard1,
- changeListShard2);
- assertNoChanges(changeStreamCursor);
-
- // Perform a write outside of the transaction.
- assert.commandWorked(
- coll.insert({shard: 2, _id: "no-txn-doc-4"}, {writeConcern: {w: "majority"}}));
-
- // Abort second transaction and confirm that the change stream sees only the previous
- // non-transaction write.
- assert.commandWorked(session2.abortTransaction_forTesting());
- assertWritesVisibleWithCapture(changeStreamCursor,
- [],
- [{operationType: "insert", _id: "no-txn-doc-4"}],
- changeListShard1,
- changeListShard2);
- assertNoChanges(changeStreamCursor);
- changeStreamCursor.close();
- })();
-
- //
- // Open a change stream at each resume point captured for the previous writes. Confirm that the
- // documents returned match what was returned for the initial change stream.
- //
- (function() {
-
- /**
- * Iterates over a list of changes and returns the index of the change whose resume token is
- * higher than that of 'changeDoc'. It is expected that 'changeList' entries at this index
- * and beyond will be included in a change stream resumed at 'changeDoc._id'.
- */
- function getPostTokenChangeIndex(changeDoc, changeList) {
- for (let i = 0; i < changeList.length; ++i) {
- if (changeDoc._id._data < changeList[i]._id._data) {
- return i;
- }
- }
+ assertNoChanges(cursor);
+}
+
+// Open a change stream on the test collection.
+const changeStreamCursor = coll.watch();
+
+// Insert a document and confirm that the change stream has it.
+assert.commandWorked(coll.insert({shard: 1, _id: "no-txn-doc-1"}, {writeConcern: {w: "majority"}}));
+assertWritesVisibleWithCapture(changeStreamCursor,
+ [{operationType: "insert", _id: "no-txn-doc-1"}],
+ [],
+ changeListShard1,
+ changeListShard2);
+
+// Insert two documents under each transaction and confirm no change stream updates.
+assert.commandWorked(
+ sessionColl1.insert([{shard: 1, _id: "txn1-doc-1"}, {shard: 2, _id: "txn1-doc-2"}]));
+assert.commandWorked(
+ sessionColl2.insert([{shard: 1, _id: "txn2-doc-1"}, {shard: 2, _id: "txn2-doc-2"}]));
+assertNoChanges(changeStreamCursor);
+
+// Update one document under each transaction and confirm no change stream updates.
+assert.commandWorked(sessionColl1.update({shard: 1, _id: "txn1-doc-1"}, {$set: {"updated": 1}}));
+assert.commandWorked(sessionColl2.update({shard: 2, _id: "txn2-doc-2"}, {$set: {"updated": 1}}));
+assertNoChanges(changeStreamCursor);
+
+// Update and then remove second doc under each transaction.
+assert.commandWorked(
+ sessionColl1.update({shard: 2, _id: "txn1-doc-2"}, {$set: {"update-before-delete": 1}}));
+assert.commandWorked(
+ sessionColl2.update({shard: 1, _id: "txn2-doc-1"}, {$set: {"update-before-delete": 1}}));
+assert.commandWorked(sessionColl1.remove({shard: 2, _id: "txn1-doc-2"}));
+assert.commandWorked(sessionColl2.remove({shard: 1, _id: "txn2-doc-2"}));
+assertNoChanges(changeStreamCursor);
+
+// Perform a write outside of a transaction and confirm that the change stream sees only
+// this write.
+assert.commandWorked(coll.insert({shard: 2, _id: "no-txn-doc-2"}, {writeConcern: {w: "majority"}}));
+assertWritesVisibleWithCapture(changeStreamCursor,
+ [],
+ [{operationType: "insert", _id: "no-txn-doc-2"}],
+ changeListShard1,
+ changeListShard2);
+assertNoChanges(changeStreamCursor);
+
+// Perform a write outside of the transaction.
+assert.commandWorked(coll.insert({shard: 1, _id: "no-txn-doc-3"}, {writeConcern: {w: "majority"}}));
+
+// Commit first transaction and confirm that the change stream sees the changes expected
+// from each shard.
+assert.commandWorked(session1.commitTransaction_forTesting());
+assertWritesVisibleWithCapture(changeStreamCursor,
+ [
+ {operationType: "insert", _id: "no-txn-doc-3"},
+ {operationType: "insert", _id: "txn1-doc-1"},
+ {operationType: "update", _id: "txn1-doc-1"}
+ ],
+ [
+ {operationType: "insert", _id: "txn1-doc-2"},
+ {operationType: "update", _id: "txn1-doc-2"},
+ {operationType: "delete", _id: "txn1-doc-2"}
+ ],
+ changeListShard1,
+ changeListShard2);
+assertNoChanges(changeStreamCursor);
+
+// Perform a write outside of the transaction.
+assert.commandWorked(coll.insert({shard: 2, _id: "no-txn-doc-4"}, {writeConcern: {w: "majority"}}));
+
+// Abort second transaction and confirm that the change stream sees only the previous
+// non-transaction write.
+assert.commandWorked(session2.abortTransaction_forTesting());
+assertWritesVisibleWithCapture(changeStreamCursor,
+ [],
+ [{operationType: "insert", _id: "no-txn-doc-4"}],
+ changeListShard1,
+ changeListShard2);
+assertNoChanges(changeStreamCursor);
+changeStreamCursor.close();
+})();
- return changeList.length;
- }
+//
+// Open a change stream at each resume point captured for the previous writes. Confirm that the
+// documents returned match what was returned for the initial change stream.
+//
+(function() {
- /**
- * Confirms that the change represented by 'changeDoc' exists in 'shardChangeList' at index
- * 'changeListIndex'.
- */
- function shardHasDocumentAtChangeListIndex(changeDoc, shardChangeList, changeListIndex) {
- assert(changeListIndex < shardChangeList.length);
-
- const expectedChangeDoc = shardChangeList[changeListIndex];
- assert.eq(changeDoc, expectedChangeDoc);
- assert.eq(expectedChangeDoc.documentKey,
- changeDoc.documentKey,
- tojson(changeDoc) + ", " + tojson(expectedChangeDoc));
+/**
+ * Iterates over a list of changes and returns the index of the change whose resume token is
+ * higher than that of 'changeDoc'. It is expected that 'changeList' entries at this index
+ * and beyond will be included in a change stream resumed at 'changeDoc._id'.
+ */
+function getPostTokenChangeIndex(changeDoc, changeList) {
+ for (let i = 0; i < changeList.length; ++i) {
+ if (changeDoc._id._data < changeList[i]._id._data) {
+ return i;
}
+ }
- /**
- * Test that change stream returns the expected set of documuments when resumed from each
- * point captured by 'changeList'.
- */
- function confirmResumeForChangeList(changeList, changeListShard1, changeListShard2) {
- for (let i = 0; i < changeList.length; ++i) {
- const resumeDoc = changeList[i];
- let indexShard1 = getPostTokenChangeIndex(resumeDoc, changeListShard1);
- let indexShard2 = getPostTokenChangeIndex(resumeDoc, changeListShard2);
- const resumeCursor = coll.watch([], {startAfter: resumeDoc._id});
-
- while ((indexShard1 + indexShard2) <
- (changeListShard1.length + changeListShard2.length)) {
- assert.soon(() => resumeCursor.hasNext());
- const changeDoc = resumeCursor.next();
-
- if (changeDoc.documentKey.shard === 1) {
- shardHasDocumentAtChangeListIndex(
- changeDoc, changeListShard1, indexShard1++);
- } else {
- assert(changeDoc.documentKey.shard === 2);
- shardHasDocumentAtChangeListIndex(
- changeDoc, changeListShard2, indexShard2++);
- }
- }
-
- assertNoChanges(resumeCursor);
- resumeCursor.close();
+ return changeList.length;
+}
+
+/**
+ * Confirms that the change represented by 'changeDoc' exists in 'shardChangeList' at index
+ * 'changeListIndex'.
+ */
+function shardHasDocumentAtChangeListIndex(changeDoc, shardChangeList, changeListIndex) {
+ assert(changeListIndex < shardChangeList.length);
+
+ const expectedChangeDoc = shardChangeList[changeListIndex];
+ assert.eq(changeDoc, expectedChangeDoc);
+ assert.eq(expectedChangeDoc.documentKey,
+ changeDoc.documentKey,
+ tojson(changeDoc) + ", " + tojson(expectedChangeDoc));
+}
+
+/**
+ * Test that change stream returns the expected set of documuments when resumed from each
+ * point captured by 'changeList'.
+ */
+function confirmResumeForChangeList(changeList, changeListShard1, changeListShard2) {
+ for (let i = 0; i < changeList.length; ++i) {
+ const resumeDoc = changeList[i];
+ let indexShard1 = getPostTokenChangeIndex(resumeDoc, changeListShard1);
+ let indexShard2 = getPostTokenChangeIndex(resumeDoc, changeListShard2);
+ const resumeCursor = coll.watch([], {startAfter: resumeDoc._id});
+
+ while ((indexShard1 + indexShard2) < (changeListShard1.length + changeListShard2.length)) {
+ assert.soon(() => resumeCursor.hasNext());
+ const changeDoc = resumeCursor.next();
+
+ if (changeDoc.documentKey.shard === 1) {
+ shardHasDocumentAtChangeListIndex(changeDoc, changeListShard1, indexShard1++);
+ } else {
+ assert(changeDoc.documentKey.shard === 2);
+ shardHasDocumentAtChangeListIndex(changeDoc, changeListShard2, indexShard2++);
}
}
- // Confirm that the sequence of events returned by the stream is consistent when resuming
- // from any point in the stream on either shard.
- confirmResumeForChangeList(changeListShard1, changeListShard1, changeListShard2);
- confirmResumeForChangeList(changeListShard2, changeListShard1, changeListShard2);
- })();
+ assertNoChanges(resumeCursor);
+ resumeCursor.close();
+ }
+}
+
+// Confirm that the sequence of events returned by the stream is consistent when resuming
+// from any point in the stream on either shard.
+confirmResumeForChangeList(changeListShard1, changeListShard1, changeListShard2);
+confirmResumeForChangeList(changeListShard2, changeListShard1, changeListShard2);
+})();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/change_stream_update_lookup_collation.js b/jstests/sharding/change_stream_update_lookup_collation.js
index eefff9d463f..9c13f4afac8 100644
--- a/jstests/sharding/change_stream_update_lookup_collation.js
+++ b/jstests/sharding/change_stream_update_lookup_collation.js
@@ -4,164 +4,163 @@
// Collation is only supported with the find command, not with op query.
// @tags: [requires_find_command, uses_change_streams]
(function() {
- "use strict";
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- // TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
- if (!jsTestOptions().enableMajorityReadConcern &&
- jsTestOptions().mongosBinVersion === 'last-stable') {
- jsTestLog(
- "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
- return;
- }
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- const st = new ShardingTest({
- shards: 2,
- config: 1,
- rs: {
- nodes: 1,
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
- }
- });
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- const caseInsensitive = {locale: "en_US", strength: 2};
- assert.commandWorked(
- mongosDB.runCommand({create: mongosColl.getName(), collation: caseInsensitive}));
-
- // Shard the test collection on 'shardKey'. The shard key must use the simple collation.
- assert.commandWorked(mongosDB.adminCommand({
- shardCollection: mongosColl.getFullName(),
- key: {shardKey: 1},
- collation: {locale: "simple"}
- }));
-
- // Split the collection into 2 chunks: [MinKey, "aBC"), ["aBC", MaxKey). Note that there will be
- // documents in each chunk that will have the same shard key according to the collection's
- // default collation, but not according to the simple collation (e.g. "abc" and "ABC").
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {shardKey: "aBC"}}));
-
- // Move the [MinKey, 'aBC') chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {shardKey: "ABC"}, to: st.rs1.getURL()}));
-
- // Make sure that "ABC" and "abc" go to different shards - we rely on that to make sure the _ids
- // are unique on each shard.
- assert.lte(bsonWoCompare({shardKey: "ABC"}, {shardKey: "aBC"}), -1);
- assert.gte(bsonWoCompare({shardKey: "abc"}, {shardKey: "aBC"}), 1);
-
- // Write some documents to each chunk. Note that the _id is purposefully not unique, since we
- // know the update lookup will use both the _id and the shard key, and we want to make sure it
- // is only targeting a single shard. Also note that _id is a string, since we want to make sure
- // the _id index can only be used if we are using the collection's default collation.
- assert.writeOK(mongosColl.insert({_id: "abc_1", shardKey: "ABC"}));
- assert.writeOK(mongosColl.insert({_id: "abc_2", shardKey: "ABC"}));
- assert.writeOK(mongosColl.insert({_id: "abc_1", shardKey: "abc"}));
- assert.writeOK(mongosColl.insert({_id: "abc_2", shardKey: "abc"}));
-
- // Verify that the post-change lookup uses the simple collation to target to a single shard,
- // then uses the collection-default collation to perform the lookup on the shard.
- const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
-
- // Be sure to include the collation in the updates so that each can be targeted to exactly one
- // shard - this is important to ensure each update only updates one document (since with the
- // default collation their documentKeys are identical). If each operation updates only one, the
- // clusterTime sent from mongos will ensure that each corresponding oplog entry has a distinct
- // timestamp and so will appear in the change stream in the order we expect.
- let updateResult = mongosColl.updateOne({shardKey: "abc", _id: "abc_1"},
- {$set: {updatedCount: 1}},
- {collation: {locale: "simple"}});
- assert.eq(1, updateResult.modifiedCount);
- updateResult = mongosColl.updateOne({shardKey: "ABC", _id: "abc_1"},
- {$set: {updatedCount: 1}},
- {collation: {locale: "simple"}});
- assert.eq(1, updateResult.modifiedCount);
-
- function numIdIndexUsages(host) {
- return host.getCollection(mongosColl.getFullName())
- .aggregate([{$indexStats: {}}, {$match: {name: "_id_"}}])
- .toArray()[0]
- .accesses.ops;
- }
- let idIndexUsagesPreIteration = {
- shard0: numIdIndexUsages(st.rs0.getPrimary()),
- shard1: numIdIndexUsages(st.rs1.getPrimary())
- };
-
- for (let nextDocKey of[{shardKey: "abc", _id: "abc_1"}, {shardKey: "ABC", _id: "abc_1"}]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, nextDocKey, tojson(next));
- assert.docEq(next.fullDocument, Object.merge(nextDocKey, {updatedCount: 1}));
- }
- assert.eq(numIdIndexUsages(st.rs0.getPrimary()), idIndexUsagesPreIteration.shard0 + 1);
- assert.eq(numIdIndexUsages(st.rs1.getPrimary()), idIndexUsagesPreIteration.shard1 + 1);
-
- changeStream.close();
-
- // Now test that a change stream with a non-default collation will still use the simple
- // collation to target the update lookup, and the collection-default collation to do the update
- // lookup on the shard.
-
- // Strength 1 will consider "ç" equal to "c" and "C".
- const strengthOneCollation = {locale: "en_US", strength: 1};
-
- // Insert some documents that might be confused with existing documents under the change
- // stream's collation, but should not be confused during the update lookup.
- assert.writeOK(mongosColl.insert({_id: "abç_1", shardKey: "ABÇ"}));
- assert.writeOK(mongosColl.insert({_id: "abç_2", shardKey: "ABÇ"}));
- assert.writeOK(mongosColl.insert({_id: "abç_1", shardKey: "abç"}));
- assert.writeOK(mongosColl.insert({_id: "abç_2", shardKey: "abç"}));
-
- assert.eq(mongosColl.find({shardKey: "abc"}).collation(strengthOneCollation).itcount(), 8);
-
- const strengthOneChangeStream = mongosColl.aggregate(
- [
- {$changeStream: {fullDocument: "updateLookup"}},
- {$match: {"fullDocument.shardKey": "abc"}}
- ],
- {collation: strengthOneCollation});
-
- updateResult = mongosColl.updateOne({shardKey: "ABC", _id: "abc_1"},
- {$set: {updatedCount: 2}},
- {collation: {locale: "simple"}});
- assert.eq(1, updateResult.modifiedCount);
- updateResult = mongosColl.updateOne({shardKey: "abc", _id: "abc_1"},
- {$set: {updatedCount: 2}},
- {collation: {locale: "simple"}});
- assert.eq(1, updateResult.modifiedCount);
-
- idIndexUsagesPreIteration = {
- shard0: numIdIndexUsages(st.rs0.getPrimary()),
- shard1: numIdIndexUsages(st.rs1.getPrimary())
- };
- for (let nextDocKey of[{shardKey: "ABC", _id: "abc_1"}, {shardKey: "abc", _id: "abc_1"}]) {
- assert.soon(() => strengthOneChangeStream.hasNext());
- let next = strengthOneChangeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, nextDocKey, tojson(next));
- assert.docEq(next.fullDocument, Object.merge(nextDocKey, {updatedCount: 2}));
+"use strict";
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+// TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
+if (!jsTestOptions().enableMajorityReadConcern &&
+ jsTestOptions().mongosBinVersion === 'last-stable') {
+ jsTestLog(
+ "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
+ return;
+}
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ shards: 2,
+ config: 1,
+ rs: {
+ nodes: 1,
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
}
- assert.eq(numIdIndexUsages(st.rs0.getPrimary()), idIndexUsagesPreIteration.shard0 + 1);
- assert.eq(numIdIndexUsages(st.rs1.getPrimary()), idIndexUsagesPreIteration.shard1 + 1);
-
- strengthOneChangeStream.close();
-
- st.stop();
+});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
+assert.commandWorked(
+ mongosDB.runCommand({create: mongosColl.getName(), collation: caseInsensitive}));
+
+// Shard the test collection on 'shardKey'. The shard key must use the simple collation.
+assert.commandWorked(mongosDB.adminCommand({
+ shardCollection: mongosColl.getFullName(),
+ key: {shardKey: 1},
+ collation: {locale: "simple"}
+}));
+
+// Split the collection into 2 chunks: [MinKey, "aBC"), ["aBC", MaxKey). Note that there will be
+// documents in each chunk that will have the same shard key according to the collection's
+// default collation, but not according to the simple collation (e.g. "abc" and "ABC").
+assert.commandWorked(
+ mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {shardKey: "aBC"}}));
+
+// Move the [MinKey, 'aBC') chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {shardKey: "ABC"}, to: st.rs1.getURL()}));
+
+// Make sure that "ABC" and "abc" go to different shards - we rely on that to make sure the _ids
+// are unique on each shard.
+assert.lte(bsonWoCompare({shardKey: "ABC"}, {shardKey: "aBC"}), -1);
+assert.gte(bsonWoCompare({shardKey: "abc"}, {shardKey: "aBC"}), 1);
+
+// Write some documents to each chunk. Note that the _id is purposefully not unique, since we
+// know the update lookup will use both the _id and the shard key, and we want to make sure it
+// is only targeting a single shard. Also note that _id is a string, since we want to make sure
+// the _id index can only be used if we are using the collection's default collation.
+assert.writeOK(mongosColl.insert({_id: "abc_1", shardKey: "ABC"}));
+assert.writeOK(mongosColl.insert({_id: "abc_2", shardKey: "ABC"}));
+assert.writeOK(mongosColl.insert({_id: "abc_1", shardKey: "abc"}));
+assert.writeOK(mongosColl.insert({_id: "abc_2", shardKey: "abc"}));
+
+// Verify that the post-change lookup uses the simple collation to target to a single shard,
+// then uses the collection-default collation to perform the lookup on the shard.
+const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
+
+// Be sure to include the collation in the updates so that each can be targeted to exactly one
+// shard - this is important to ensure each update only updates one document (since with the
+// default collation their documentKeys are identical). If each operation updates only one, the
+// clusterTime sent from mongos will ensure that each corresponding oplog entry has a distinct
+// timestamp and so will appear in the change stream in the order we expect.
+let updateResult = mongosColl.updateOne(
+ {shardKey: "abc", _id: "abc_1"}, {$set: {updatedCount: 1}}, {collation: {locale: "simple"}});
+assert.eq(1, updateResult.modifiedCount);
+updateResult = mongosColl.updateOne(
+ {shardKey: "ABC", _id: "abc_1"}, {$set: {updatedCount: 1}}, {collation: {locale: "simple"}});
+assert.eq(1, updateResult.modifiedCount);
+
+function numIdIndexUsages(host) {
+ return host.getCollection(mongosColl.getFullName())
+ .aggregate([{$indexStats: {}}, {$match: {name: "_id_"}}])
+ .toArray()[0]
+ .accesses.ops;
+}
+let idIndexUsagesPreIteration = {
+ shard0: numIdIndexUsages(st.rs0.getPrimary()),
+ shard1: numIdIndexUsages(st.rs1.getPrimary())
+};
+
+for (let nextDocKey of [{shardKey: "abc", _id: "abc_1"}, {shardKey: "ABC", _id: "abc_1"}]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, nextDocKey, tojson(next));
+ assert.docEq(next.fullDocument, Object.merge(nextDocKey, {updatedCount: 1}));
+}
+assert.eq(numIdIndexUsages(st.rs0.getPrimary()), idIndexUsagesPreIteration.shard0 + 1);
+assert.eq(numIdIndexUsages(st.rs1.getPrimary()), idIndexUsagesPreIteration.shard1 + 1);
+
+changeStream.close();
+
+// Now test that a change stream with a non-default collation will still use the simple
+// collation to target the update lookup, and the collection-default collation to do the update
+// lookup on the shard.
+
+// Strength 1 will consider "ç" equal to "c" and "C".
+const strengthOneCollation = {
+ locale: "en_US",
+ strength: 1
+};
+
+// Insert some documents that might be confused with existing documents under the change
+// stream's collation, but should not be confused during the update lookup.
+assert.writeOK(mongosColl.insert({_id: "abç_1", shardKey: "ABÇ"}));
+assert.writeOK(mongosColl.insert({_id: "abç_2", shardKey: "ABÇ"}));
+assert.writeOK(mongosColl.insert({_id: "abç_1", shardKey: "abç"}));
+assert.writeOK(mongosColl.insert({_id: "abç_2", shardKey: "abç"}));
+
+assert.eq(mongosColl.find({shardKey: "abc"}).collation(strengthOneCollation).itcount(), 8);
+
+const strengthOneChangeStream = mongosColl.aggregate(
+ [{$changeStream: {fullDocument: "updateLookup"}}, {$match: {"fullDocument.shardKey": "abc"}}],
+ {collation: strengthOneCollation});
+
+updateResult = mongosColl.updateOne(
+ {shardKey: "ABC", _id: "abc_1"}, {$set: {updatedCount: 2}}, {collation: {locale: "simple"}});
+assert.eq(1, updateResult.modifiedCount);
+updateResult = mongosColl.updateOne(
+ {shardKey: "abc", _id: "abc_1"}, {$set: {updatedCount: 2}}, {collation: {locale: "simple"}});
+assert.eq(1, updateResult.modifiedCount);
+
+idIndexUsagesPreIteration = {
+ shard0: numIdIndexUsages(st.rs0.getPrimary()),
+ shard1: numIdIndexUsages(st.rs1.getPrimary())
+};
+for (let nextDocKey of [{shardKey: "ABC", _id: "abc_1"}, {shardKey: "abc", _id: "abc_1"}]) {
+ assert.soon(() => strengthOneChangeStream.hasNext());
+ let next = strengthOneChangeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, nextDocKey, tojson(next));
+ assert.docEq(next.fullDocument, Object.merge(nextDocKey, {updatedCount: 2}));
+}
+assert.eq(numIdIndexUsages(st.rs0.getPrimary()), idIndexUsagesPreIteration.shard0 + 1);
+assert.eq(numIdIndexUsages(st.rs1.getPrimary()), idIndexUsagesPreIteration.shard1 + 1);
+
+strengthOneChangeStream.close();
+
+st.stop();
}());
diff --git a/jstests/sharding/change_stream_update_lookup_read_concern.js b/jstests/sharding/change_stream_update_lookup_read_concern.js
index 1b6938589cf..03b9ec86738 100644
--- a/jstests/sharding/change_stream_update_lookup_read_concern.js
+++ b/jstests/sharding/change_stream_update_lookup_read_concern.js
@@ -3,170 +3,170 @@
// change that we're doing the lookup for, and that change will be majority-committed.
// @tags: [uses_change_streams]
(function() {
- "use strict";
-
- load('jstests/replsets/rslib.js'); // For startSetIfSupportsReadMajority.
- load("jstests/libs/profiler.js"); // For profilerHas*OrThrow() helpers.
- load("jstests/replsets/rslib.js"); // For reconfig().
-
- // For stopServerReplication() and restartServerReplication().
- load("jstests/libs/write_concern_util.js");
-
- // Configure a replica set to have nodes with specific tags - we will eventually add this as
- // part of a sharded cluster.
- const rsNodeOptions = {
- setParameter: {
- writePeriodicNoops: true,
- // Note we do not configure the periodic noop writes to be more frequent as we do to
- // speed up other change streams tests, since we provide an array of individually
- // configured nodes, in order to know which nodes have which tags. This requires a step
- // up command to happen, which requires all nodes to agree on an op time. With the
- // periodic noop writer at a high frequency, this can potentially never finish.
- },
- shardsvr: "",
- };
- const replSetName = jsTestName();
-
- // Note that we include {chainingAllowed: false} in the replica set settings, because this test
- // assumes that both secondaries sync from the primary. Without this setting, the
- // TopologyCoordinator would sometimes chain one of the secondaries off the other. The test
- // later disables replication on one secondary, but with chaining, that would effectively
- // disable replication on both secondaries, deadlocking the test.
- const rst = new ReplSetTest({
- name: replSetName,
- nodes: [
- {rsConfig: {priority: 1, tags: {tag: "primary"}}},
- {rsConfig: {priority: 0, tags: {tag: "closestSecondary"}}},
- {rsConfig: {priority: 0, tags: {tag: "fartherSecondary"}}}
- ],
- nodeOptions: rsNodeOptions,
- settings: {chainingAllowed: false},
- });
-
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
- rst.initiate();
- rst.awaitSecondaryNodes();
-
- // Start the sharding test and add the replica set.
- const st = new ShardingTest({manualAddShard: true});
- assert.commandWorked(st.s.adminCommand({addShard: replSetName + "/" + rst.getPrimary().host}));
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- // Shard the collection to ensure the change stream will perform update lookup from mongos.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- assert.writeOK(mongosColl.insert({_id: 1}));
- rst.awaitReplication();
-
- // Make sure reads with read preference tag 'closestSecondary' go to the tagged secondary.
- const closestSecondary = rst.nodes[1];
- const closestSecondaryDB = closestSecondary.getDB(mongosDB.getName());
- assert.commandWorked(closestSecondaryDB.setProfilingLevel(2));
-
- // We expect the tag to ensure there is only one node to choose from, so the actual read
- // preference doesn't really matter - we use 'nearest' throughout.
- assert.eq(mongosColl.find()
- .readPref("nearest", [{tag: "closestSecondary"}])
- .comment("testing targeting")
- .itcount(),
- 1);
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: closestSecondaryDB,
- filter: {ns: mongosColl.getFullName(), "command.comment": "testing targeting"}
- });
-
- const changeStreamComment = "change stream against closestSecondary";
- const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}], {
- comment: changeStreamComment,
- $readPreference: {mode: "nearest", tags: [{tag: "closestSecondary"}]}
- });
- assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 1}}));
- assert.soon(() => changeStream.hasNext());
- let latestChange = changeStream.next();
- assert.eq(latestChange.operationType, "update");
- assert.docEq(latestChange.fullDocument, {_id: 1, updatedCount: 1});
-
- // Test that the change stream itself goes to the secondary. There might be more than one if we
- // needed multiple getMores to retrieve the changes.
- // TODO SERVER-31650 We have to use 'originatingCommand' here and look for the getMore because
- // the initial aggregate will not show up.
- profilerHasAtLeastOneMatchingEntryOrThrow({
- profileDB: closestSecondaryDB,
- filter: {"originatingCommand.comment": changeStreamComment}
- });
-
- // Test that the update lookup goes to the secondary as well.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: closestSecondaryDB,
- filter: {
- op: "query",
- ns: mongosColl.getFullName(),
- "command.filter._id": 1,
- "command.comment": changeStreamComment,
- // We need to filter out any profiler entries with a stale config - this is the first
- // read on this secondary with a readConcern specified, so it is the first read on this
- // secondary that will enforce shard version.
- errCode: {$ne: ErrorCodes.StaleConfig}
- },
- errorMsgFilter: {ns: mongosColl.getFullName()},
- errorMsgProj: {ns: 1, op: 1, command: 1},
- });
-
- // Now add a new secondary which is "closer" (add the "closestSecondary" tag to that secondary,
- // and remove it from the old node with that tag) to force update lookups target a different
- // node than the change stream itself.
- let rsConfig = rst.getReplSetConfig();
- rsConfig.members[1].tags = {tag: "fartherSecondary"};
- rsConfig.members[2].tags = {tag: "closestSecondary"};
- rsConfig.version = rst.getReplSetConfigFromNode().version + 1;
- reconfig(rst, rsConfig);
- rst.awaitSecondaryNodes();
- const newClosestSecondary = rst.nodes[2];
- const newClosestSecondaryDB = newClosestSecondary.getDB(mongosDB.getName());
- const originalClosestSecondaryDB = closestSecondaryDB;
-
- // Wait for the mongos to acknowledge the new tags from our reconfig.
- awaitRSClientHosts(st.s,
- newClosestSecondary,
- {ok: true, secondary: true, tags: {tag: "closestSecondary"}},
- rst);
- awaitRSClientHosts(st.s,
- originalClosestSecondaryDB.getMongo(),
- {ok: true, secondary: true, tags: {tag: "fartherSecondary"}},
- rst);
- assert.commandWorked(newClosestSecondaryDB.setProfilingLevel(2));
-
- // Make sure new queries with read preference tag "closestSecondary" go to the new secondary.
- profilerHasZeroMatchingEntriesOrThrow({profileDB: newClosestSecondaryDB, filter: {}});
- assert.eq(mongosColl.find()
- .readPref("nearest", [{tag: "closestSecondary"}])
- .comment("testing targeting")
- .itcount(),
- 1);
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: newClosestSecondaryDB,
- filter: {ns: mongosColl.getFullName(), "command.comment": "testing targeting"}
- });
-
- // Test that the change stream continues on the original host, but the update lookup now targets
- // the new, lagged secondary. Even though it's lagged, the lookup should use 'afterClusterTime'
- // to ensure it does not return until the node can see the change it's looking up.
- stopServerReplication(newClosestSecondary);
- assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
-
- // Since we stopped replication, we expect the update lookup to block indefinitely until we
- // resume replication, so we resume replication in a parallel shell while this thread is blocked
- // getting the next change from the stream.
- const noConnect = true; // This shell creates its own connection to the host.
- const joinResumeReplicationShell =
+"use strict";
+
+load('jstests/replsets/rslib.js'); // For startSetIfSupportsReadMajority.
+load("jstests/libs/profiler.js"); // For profilerHas*OrThrow() helpers.
+load("jstests/replsets/rslib.js"); // For reconfig().
+
+// For stopServerReplication() and restartServerReplication().
+load("jstests/libs/write_concern_util.js");
+
+// Configure a replica set to have nodes with specific tags - we will eventually add this as
+// part of a sharded cluster.
+const rsNodeOptions = {
+ setParameter: {
+ writePeriodicNoops: true,
+ // Note we do not configure the periodic noop writes to be more frequent as we do to
+ // speed up other change streams tests, since we provide an array of individually
+ // configured nodes, in order to know which nodes have which tags. This requires a step
+ // up command to happen, which requires all nodes to agree on an op time. With the
+ // periodic noop writer at a high frequency, this can potentially never finish.
+ },
+ shardsvr: "",
+};
+const replSetName = jsTestName();
+
+// Note that we include {chainingAllowed: false} in the replica set settings, because this test
+// assumes that both secondaries sync from the primary. Without this setting, the
+// TopologyCoordinator would sometimes chain one of the secondaries off the other. The test
+// later disables replication on one secondary, but with chaining, that would effectively
+// disable replication on both secondaries, deadlocking the test.
+const rst = new ReplSetTest({
+ name: replSetName,
+ nodes: [
+ {rsConfig: {priority: 1, tags: {tag: "primary"}}},
+ {rsConfig: {priority: 0, tags: {tag: "closestSecondary"}}},
+ {rsConfig: {priority: 0, tags: {tag: "fartherSecondary"}}}
+ ],
+ nodeOptions: rsNodeOptions,
+ settings: {chainingAllowed: false},
+});
+
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+rst.initiate();
+rst.awaitSecondaryNodes();
+
+// Start the sharding test and add the replica set.
+const st = new ShardingTest({manualAddShard: true});
+assert.commandWorked(st.s.adminCommand({addShard: replSetName + "/" + rst.getPrimary().host}));
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
+
+// Shard the collection to ensure the change stream will perform update lookup from mongos.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+assert.writeOK(mongosColl.insert({_id: 1}));
+rst.awaitReplication();
+
+// Make sure reads with read preference tag 'closestSecondary' go to the tagged secondary.
+const closestSecondary = rst.nodes[1];
+const closestSecondaryDB = closestSecondary.getDB(mongosDB.getName());
+assert.commandWorked(closestSecondaryDB.setProfilingLevel(2));
+
+// We expect the tag to ensure there is only one node to choose from, so the actual read
+// preference doesn't really matter - we use 'nearest' throughout.
+assert.eq(mongosColl.find()
+ .readPref("nearest", [{tag: "closestSecondary"}])
+ .comment("testing targeting")
+ .itcount(),
+ 1);
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: closestSecondaryDB,
+ filter: {ns: mongosColl.getFullName(), "command.comment": "testing targeting"}
+});
+
+const changeStreamComment = "change stream against closestSecondary";
+const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}], {
+ comment: changeStreamComment,
+ $readPreference: {mode: "nearest", tags: [{tag: "closestSecondary"}]}
+});
+assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 1}}));
+assert.soon(() => changeStream.hasNext());
+let latestChange = changeStream.next();
+assert.eq(latestChange.operationType, "update");
+assert.docEq(latestChange.fullDocument, {_id: 1, updatedCount: 1});
+
+// Test that the change stream itself goes to the secondary. There might be more than one if we
+// needed multiple getMores to retrieve the changes.
+// TODO SERVER-31650 We have to use 'originatingCommand' here and look for the getMore because
+// the initial aggregate will not show up.
+profilerHasAtLeastOneMatchingEntryOrThrow(
+ {profileDB: closestSecondaryDB, filter: {"originatingCommand.comment": changeStreamComment}});
+
+// Test that the update lookup goes to the secondary as well.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: closestSecondaryDB,
+ filter: {
+ op: "query",
+ ns: mongosColl.getFullName(),
+ "command.filter._id": 1,
+ "command.comment": changeStreamComment,
+ // We need to filter out any profiler entries with a stale config - this is the first
+ // read on this secondary with a readConcern specified, so it is the first read on this
+ // secondary that will enforce shard version.
+ errCode: {$ne: ErrorCodes.StaleConfig}
+ },
+ errorMsgFilter: {ns: mongosColl.getFullName()},
+ errorMsgProj: {ns: 1, op: 1, command: 1},
+});
+
+// Now add a new secondary which is "closer" (add the "closestSecondary" tag to that secondary,
+// and remove it from the old node with that tag) to force update lookups target a different
+// node than the change stream itself.
+let rsConfig = rst.getReplSetConfig();
+rsConfig.members[1].tags = {
+ tag: "fartherSecondary"
+};
+rsConfig.members[2].tags = {
+ tag: "closestSecondary"
+};
+rsConfig.version = rst.getReplSetConfigFromNode().version + 1;
+reconfig(rst, rsConfig);
+rst.awaitSecondaryNodes();
+const newClosestSecondary = rst.nodes[2];
+const newClosestSecondaryDB = newClosestSecondary.getDB(mongosDB.getName());
+const originalClosestSecondaryDB = closestSecondaryDB;
+
+// Wait for the mongos to acknowledge the new tags from our reconfig.
+awaitRSClientHosts(
+ st.s, newClosestSecondary, {ok: true, secondary: true, tags: {tag: "closestSecondary"}}, rst);
+awaitRSClientHosts(st.s,
+ originalClosestSecondaryDB.getMongo(),
+ {ok: true, secondary: true, tags: {tag: "fartherSecondary"}},
+ rst);
+assert.commandWorked(newClosestSecondaryDB.setProfilingLevel(2));
+
+// Make sure new queries with read preference tag "closestSecondary" go to the new secondary.
+profilerHasZeroMatchingEntriesOrThrow({profileDB: newClosestSecondaryDB, filter: {}});
+assert.eq(mongosColl.find()
+ .readPref("nearest", [{tag: "closestSecondary"}])
+ .comment("testing targeting")
+ .itcount(),
+ 1);
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: newClosestSecondaryDB,
+ filter: {ns: mongosColl.getFullName(), "command.comment": "testing targeting"}
+});
+
+// Test that the change stream continues on the original host, but the update lookup now targets
+// the new, lagged secondary. Even though it's lagged, the lookup should use 'afterClusterTime'
+// to ensure it does not return until the node can see the change it's looking up.
+stopServerReplication(newClosestSecondary);
+assert.writeOK(mongosColl.update({_id: 1}, {$set: {updatedCount: 2}}));
+
+// Since we stopped replication, we expect the update lookup to block indefinitely until we
+// resume replication, so we resume replication in a parallel shell while this thread is blocked
+// getting the next change from the stream.
+const noConnect = true; // This shell creates its own connection to the host.
+const joinResumeReplicationShell =
startParallelShell(`load('jstests/libs/write_concern_util.js');
const pausedSecondary = new Mongo("${newClosestSecondary.host}");
@@ -194,26 +194,27 @@
restartServerReplication(pausedSecondary);`,
undefined,
noConnect);
- assert.soon(() => changeStream.hasNext());
- latestChange = changeStream.next();
- assert.eq(latestChange.operationType, "update");
- assert.docEq(latestChange.fullDocument, {_id: 1, updatedCount: 2});
- joinResumeReplicationShell();
-
- // Test that the update lookup goes to the new closest secondary.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: newClosestSecondaryDB,
- filter: {
- op: "query",
- ns: mongosColl.getFullName(), "command.comment": changeStreamComment,
- // We need to filter out any profiler entries with a stale config - this is the first
- // read on this secondary with a readConcern specified, so it is the first read on this
- // secondary that will enforce shard version.
- errCode: {$ne: ErrorCodes.StaleConfig}
- }
- });
-
- changeStream.close();
- st.stop();
- rst.stopSet();
+assert.soon(() => changeStream.hasNext());
+latestChange = changeStream.next();
+assert.eq(latestChange.operationType, "update");
+assert.docEq(latestChange.fullDocument, {_id: 1, updatedCount: 2});
+joinResumeReplicationShell();
+
+// Test that the update lookup goes to the new closest secondary.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: newClosestSecondaryDB,
+ filter: {
+ op: "query",
+ ns: mongosColl.getFullName(),
+ "command.comment": changeStreamComment,
+ // We need to filter out any profiler entries with a stale config - this is the first
+ // read on this secondary with a readConcern specified, so it is the first read on this
+ // secondary that will enforce shard version.
+ errCode: {$ne: ErrorCodes.StaleConfig}
+ }
+});
+
+changeStream.close();
+st.stop();
+rst.stopSet();
}());
diff --git a/jstests/sharding/change_streams.js b/jstests/sharding/change_streams.js
index 98039231687..08c075c1e18 100644
--- a/jstests/sharding/change_streams.js
+++ b/jstests/sharding/change_streams.js
@@ -1,252 +1,249 @@
// Tests the behavior of change streams on sharded collections.
// @tags: [uses_change_streams]
(function() {
- "use strict";
-
- load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
- load('jstests/aggregation/extras/utils.js'); // For assertErrorCode().
- load('jstests/libs/change_stream_util.js'); // For assertChangeStreamEventEq.
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- function runTest(collName, shardKey) {
- const st = new ShardingTest({
- shards: 2,
- rs: {
- nodes: 1,
- enableMajorityReadConcern: '',
- // Intentionally disable the periodic no-op writer in order to allow the test have
- // control of advancing the cluster time. For when it is enabled later in the test,
- // use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: false}
- }
- });
-
- const mongosDB = st.s0.getDB(jsTestName());
- assert.commandWorked(st.s0.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
- const mongosColl = mongosDB[collName];
-
- //
- // Sanity tests
- //
-
- // Test that $sort and $group are banned from running in a $changeStream pipeline.
- assertErrorCode(mongosDB.NegativeTest,
- [{$changeStream: {}}, {$sort: {operationType: 1}}],
- ErrorCodes.IllegalOperation);
- assertErrorCode(mongosDB.NegativeTest,
- [{$changeStream: {}}, {$group: {_id: '$documentKey'}}],
- ErrorCodes.IllegalOperation);
-
- // Test that using change streams with $out results in an error.
- assertErrorCode(
- mongosColl, [{$changeStream: {}}, {$out: "shouldntWork"}], ErrorCodes.IllegalOperation);
-
- //
- // Main tests
- //
-
- function makeShardKey(value) {
- var obj = {};
- obj[shardKey] = value;
- return obj;
- }
-
- function makeShardKeyDocument(value, optExtraFields) {
- var obj = {};
- if (shardKey !== '_id')
- obj['_id'] = value;
- obj[shardKey] = value;
- return Object.assign(obj, optExtraFields);
+"use strict";
+
+load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
+load('jstests/aggregation/extras/utils.js'); // For assertErrorCode().
+load('jstests/libs/change_stream_util.js'); // For assertChangeStreamEventEq.
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+function runTest(collName, shardKey) {
+ const st = new ShardingTest({
+ shards: 2,
+ rs: {
+ nodes: 1,
+ enableMajorityReadConcern: '',
+ // Intentionally disable the periodic no-op writer in order to allow the test have
+ // control of advancing the cluster time. For when it is enabled later in the test,
+ // use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: false}
}
+ });
+
+ const mongosDB = st.s0.getDB(jsTestName());
+ assert.commandWorked(st.s0.adminCommand({enableSharding: mongosDB.getName()}));
+ st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+ const mongosColl = mongosDB[collName];
+
+ //
+ // Sanity tests
+ //
+
+ // Test that $sort and $group are banned from running in a $changeStream pipeline.
+ assertErrorCode(mongosDB.NegativeTest,
+ [{$changeStream: {}}, {$sort: {operationType: 1}}],
+ ErrorCodes.IllegalOperation);
+ assertErrorCode(mongosDB.NegativeTest,
+ [{$changeStream: {}}, {$group: {_id: '$documentKey'}}],
+ ErrorCodes.IllegalOperation);
+
+ // Test that using change streams with $out results in an error.
+ assertErrorCode(
+ mongosColl, [{$changeStream: {}}, {$out: "shouldntWork"}], ErrorCodes.IllegalOperation);
+
+ //
+ // Main tests
+ //
+
+ function makeShardKey(value) {
+ var obj = {};
+ obj[shardKey] = value;
+ return obj;
+ }
- jsTestLog('Testing change streams with shard key ' + shardKey);
- // Shard the test collection and split it into 2 chunks:
- // [MinKey, 0) - shard0, [0, MaxKey) - shard1
- st.shardColl(mongosColl,
- makeShardKey(1) /* shard key */,
- makeShardKey(0) /* split at */,
- makeShardKey(1) /* move to shard 1 */);
-
- // Write a document to each chunk.
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1)));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1)));
-
- let changeStream = mongosColl.aggregate([{$changeStream: {}}]);
-
- // Test that a change stream can see inserts on shard 0.
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1000)));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1000)));
-
- assert.soon(() => changeStream.hasNext(), "expected to be able to see the first insert");
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: makeShardKeyDocument(1000),
- fullDocument: makeShardKeyDocument(1000),
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- });
-
- // Because the periodic noop writer is disabled, do another write to shard 0 in order to
- // advance that shard's clock and enabling the stream to return the earlier write to shard 1
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1001)));
-
- assert.soon(() => changeStream.hasNext(), "expected to be able to see the second insert");
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: makeShardKeyDocument(-1000),
- fullDocument: makeShardKeyDocument(-1000),
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- });
-
- // Test that all changes are eventually visible due to the periodic noop writer.
- assert.commandWorked(
- st.rs0.getPrimary().adminCommand({setParameter: 1, writePeriodicNoops: true}));
- assert.commandWorked(
- st.rs1.getPrimary().adminCommand({setParameter: 1, writePeriodicNoops: true}));
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: makeShardKeyDocument(1001),
- fullDocument: makeShardKeyDocument(1001),
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- });
- changeStream.close();
-
- jsTestLog('Testing multi-update change streams with shard key ' + shardKey);
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(10, {a: 0, b: 0})));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(-10, {a: 0, b: 0})));
- changeStream = mongosColl.aggregate([{$changeStream: {}}]);
-
- assert.writeOK(mongosColl.update({a: 0}, {$set: {b: 2}}, {multi: true}));
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- operationType: "update",
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- documentKey: makeShardKeyDocument(-10),
- updateDescription: {updatedFields: {b: 2}, removedFields: []},
- });
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- operationType: "update",
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- documentKey: makeShardKeyDocument(10),
- updateDescription: {updatedFields: {b: 2}, removedFields: []},
- });
- changeStream.close();
-
- // Test that it is legal to open a change stream, even if the
- // 'internalQueryProhibitMergingOnMongos' parameter is set.
- assert.commandWorked(
- st.s0.adminCommand({setParameter: 1, internalQueryProhibitMergingOnMongoS: true}));
- let tempCursor = assert.doesNotThrow(() => mongosColl.aggregate([{$changeStream: {}}]));
- tempCursor.close();
- assert.commandWorked(
- st.s0.adminCommand({setParameter: 1, internalQueryProhibitMergingOnMongoS: false}));
-
- assert.writeOK(mongosColl.remove({}));
- // We awaited the replication of the first write, so the change stream shouldn't return it.
- // Use { w: "majority" } to deal with journaling correctly, even though we only have one
- // node.
- assert.writeOK(
- mongosColl.insert(makeShardKeyDocument(0, {a: 1}), {writeConcern: {w: "majority"}}));
-
- changeStream = mongosColl.aggregate([{$changeStream: {}}]);
- assert(!changeStream.hasNext());
-
- // Drop the collection and test that we return a "drop" followed by an "invalidate" entry
- // and close the cursor.
- jsTestLog('Testing getMore command closes cursor for invalidate entries with shard key' +
- shardKey);
- mongosColl.drop();
- // Wait for the drop to actually happen.
- assert.soon(() => !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(
- mongosColl.getDB(), mongosColl.getName()));
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().operationType, "drop");
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().operationType, "invalidate");
- assert(changeStream.isExhausted());
-
- jsTestLog('Testing aggregate command closes cursor for invalidate entries with shard key' +
- shardKey);
- // Shard the test collection and split it into 2 chunks:
- // [MinKey, 0) - shard0, [0, MaxKey) - shard1
- st.shardColl(mongosColl,
- makeShardKey(1) /* shard key */,
- makeShardKey(0) /* split at */,
- makeShardKey(1) /* move to shard 1 */);
-
- // Write one document to each chunk.
- assert.writeOK(
- mongosColl.insert(makeShardKeyDocument(-1), {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(1), {writeConcern: {w: "majority"}}));
-
- changeStream = mongosColl.aggregate([{$changeStream: {}}]);
- assert(!changeStream.hasNext());
-
- // Store a valid resume token before dropping the collection, to be used later in the test
- assert.writeOK(
- mongosColl.insert(makeShardKeyDocument(-2), {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert(makeShardKeyDocument(2), {writeConcern: {w: "majority"}}));
-
- assert.soon(() => changeStream.hasNext());
- const resumeToken = changeStream.next()._id;
-
- mongosColl.drop();
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: makeShardKeyDocument(2),
- fullDocument: makeShardKeyDocument(2),
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- });
-
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().operationType, "drop");
-
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().operationType, "invalidate");
-
- // With an explicit collation, test that we can resume from before the collection drop
- changeStream =
- mongosColl.watch([], {resumeAfter: resumeToken, collation: {locale: "simple"}});
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: makeShardKeyDocument(2),
- fullDocument: makeShardKeyDocument(2),
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- });
-
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().operationType, "drop");
-
- assert.soon(() => changeStream.hasNext());
- assert.eq(changeStream.next().operationType, "invalidate");
-
- // Test that we can resume from before the collection drop without an explicit collation.
- assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- cursor: {}
- }));
-
- st.stop();
+ function makeShardKeyDocument(value, optExtraFields) {
+ var obj = {};
+ if (shardKey !== '_id')
+ obj['_id'] = value;
+ obj[shardKey] = value;
+ return Object.assign(obj, optExtraFields);
}
- runTest('with_id_shard_key', '_id');
- runTest('with_non_id_shard_key', 'non_id');
+ jsTestLog('Testing change streams with shard key ' + shardKey);
+ // Shard the test collection and split it into 2 chunks:
+ // [MinKey, 0) - shard0, [0, MaxKey) - shard1
+ st.shardColl(mongosColl,
+ makeShardKey(1) /* shard key */,
+ makeShardKey(0) /* split at */,
+ makeShardKey(1) /* move to shard 1 */);
+
+ // Write a document to each chunk.
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1)));
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(1)));
+
+ let changeStream = mongosColl.aggregate([{$changeStream: {}}]);
+
+ // Test that a change stream can see inserts on shard 0.
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(1000)));
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1000)));
+
+ assert.soon(() => changeStream.hasNext(), "expected to be able to see the first insert");
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: makeShardKeyDocument(1000),
+ fullDocument: makeShardKeyDocument(1000),
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ });
+
+ // Because the periodic noop writer is disabled, do another write to shard 0 in order to
+ // advance that shard's clock and enabling the stream to return the earlier write to shard 1
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(1001)));
+
+ assert.soon(() => changeStream.hasNext(), "expected to be able to see the second insert");
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: makeShardKeyDocument(-1000),
+ fullDocument: makeShardKeyDocument(-1000),
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ });
+
+ // Test that all changes are eventually visible due to the periodic noop writer.
+ assert.commandWorked(
+ st.rs0.getPrimary().adminCommand({setParameter: 1, writePeriodicNoops: true}));
+ assert.commandWorked(
+ st.rs1.getPrimary().adminCommand({setParameter: 1, writePeriodicNoops: true}));
+
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: makeShardKeyDocument(1001),
+ fullDocument: makeShardKeyDocument(1001),
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ });
+ changeStream.close();
+
+ jsTestLog('Testing multi-update change streams with shard key ' + shardKey);
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(10, {a: 0, b: 0})));
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(-10, {a: 0, b: 0})));
+ changeStream = mongosColl.aggregate([{$changeStream: {}}]);
+
+ assert.writeOK(mongosColl.update({a: 0}, {$set: {b: 2}}, {multi: true}));
+
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ operationType: "update",
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ documentKey: makeShardKeyDocument(-10),
+ updateDescription: {updatedFields: {b: 2}, removedFields: []},
+ });
+
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ operationType: "update",
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ documentKey: makeShardKeyDocument(10),
+ updateDescription: {updatedFields: {b: 2}, removedFields: []},
+ });
+ changeStream.close();
+
+ // Test that it is legal to open a change stream, even if the
+ // 'internalQueryProhibitMergingOnMongos' parameter is set.
+ assert.commandWorked(
+ st.s0.adminCommand({setParameter: 1, internalQueryProhibitMergingOnMongoS: true}));
+ let tempCursor = assert.doesNotThrow(() => mongosColl.aggregate([{$changeStream: {}}]));
+ tempCursor.close();
+ assert.commandWorked(
+ st.s0.adminCommand({setParameter: 1, internalQueryProhibitMergingOnMongoS: false}));
+
+ assert.writeOK(mongosColl.remove({}));
+ // We awaited the replication of the first write, so the change stream shouldn't return it.
+ // Use { w: "majority" } to deal with journaling correctly, even though we only have one
+ // node.
+ assert.writeOK(
+ mongosColl.insert(makeShardKeyDocument(0, {a: 1}), {writeConcern: {w: "majority"}}));
+
+ changeStream = mongosColl.aggregate([{$changeStream: {}}]);
+ assert(!changeStream.hasNext());
+
+ // Drop the collection and test that we return a "drop" followed by an "invalidate" entry
+ // and close the cursor.
+ jsTestLog('Testing getMore command closes cursor for invalidate entries with shard key' +
+ shardKey);
+ mongosColl.drop();
+ // Wait for the drop to actually happen.
+ assert.soon(() => !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(
+ mongosColl.getDB(), mongosColl.getName()));
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().operationType, "drop");
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().operationType, "invalidate");
+ assert(changeStream.isExhausted());
+
+ jsTestLog('Testing aggregate command closes cursor for invalidate entries with shard key' +
+ shardKey);
+ // Shard the test collection and split it into 2 chunks:
+ // [MinKey, 0) - shard0, [0, MaxKey) - shard1
+ st.shardColl(mongosColl,
+ makeShardKey(1) /* shard key */,
+ makeShardKey(0) /* split at */,
+ makeShardKey(1) /* move to shard 1 */);
+
+ // Write one document to each chunk.
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(-1), {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(1), {writeConcern: {w: "majority"}}));
+
+ changeStream = mongosColl.aggregate([{$changeStream: {}}]);
+ assert(!changeStream.hasNext());
+
+ // Store a valid resume token before dropping the collection, to be used later in the test
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(-2), {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert(makeShardKeyDocument(2), {writeConcern: {w: "majority"}}));
+
+ assert.soon(() => changeStream.hasNext());
+ const resumeToken = changeStream.next()._id;
+
+ mongosColl.drop();
+
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: makeShardKeyDocument(2),
+ fullDocument: makeShardKeyDocument(2),
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ });
+
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().operationType, "drop");
+
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().operationType, "invalidate");
+
+ // With an explicit collation, test that we can resume from before the collection drop
+ changeStream = mongosColl.watch([], {resumeAfter: resumeToken, collation: {locale: "simple"}});
+
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: makeShardKeyDocument(2),
+ fullDocument: makeShardKeyDocument(2),
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ });
+
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().operationType, "drop");
+
+ assert.soon(() => changeStream.hasNext());
+ assert.eq(changeStream.next().operationType, "invalidate");
+
+ // Test that we can resume from before the collection drop without an explicit collation.
+ assert.commandWorked(mongosDB.runCommand({
+ aggregate: mongosColl.getName(),
+ pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
+ cursor: {}
+ }));
+
+ st.stop();
+}
+
+runTest('with_id_shard_key', '_id');
+runTest('with_non_id_shard_key', 'non_id');
})();
diff --git a/jstests/sharding/change_streams_establishment_finds_new_shards.js b/jstests/sharding/change_streams_establishment_finds_new_shards.js
index 45bc1583e46..146fc166d50 100644
--- a/jstests/sharding/change_streams_establishment_finds_new_shards.js
+++ b/jstests/sharding/change_streams_establishment_finds_new_shards.js
@@ -2,50 +2,48 @@
// during cursor establishment.
// @tags: [uses_change_streams]
(function() {
- 'use strict';
+'use strict';
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- const rsNodeOptions = {
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
- };
- const st =
- new ShardingTest({shards: 1, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
+const rsNodeOptions = {
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
+};
+const st =
+ new ShardingTest({shards: 1, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
- jsTestLog("Starting new shard (but not adding to shard set yet)");
- const newShard = new ReplSetTest({name: "newShard", nodes: 1, nodeOptions: rsNodeOptions});
- newShard.startSet({shardsvr: ''});
- newShard.initiate();
+jsTestLog("Starting new shard (but not adding to shard set yet)");
+const newShard = new ReplSetTest({name: "newShard", nodes: 1, nodeOptions: rsNodeOptions});
+newShard.startSet({shardsvr: ''});
+newShard.initiate();
- const mongos = st.s;
- const mongosColl = mongos.getCollection('test.foo');
- const mongosDB = mongos.getDB("test");
+const mongos = st.s;
+const mongosColl = mongos.getCollection('test.foo');
+const mongosDB = mongos.getDB("test");
- // Enable sharding to inform mongos of the database, allowing us to open a cursor.
- assert.commandWorked(mongos.adminCommand({enableSharding: mongosDB.getName()}));
+// Enable sharding to inform mongos of the database, allowing us to open a cursor.
+assert.commandWorked(mongos.adminCommand({enableSharding: mongosDB.getName()}));
- // Shard the collection.
- assert.commandWorked(
- mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+// Shard the collection.
+assert.commandWorked(
+ mongos.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
- // Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
- assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
+// Split the collection into two chunks: [MinKey, 10) and [10, MaxKey].
+assert.commandWorked(mongos.adminCommand({split: mongosColl.getFullName(), middle: {_id: 10}}));
- // Enable the failpoint.
- assert.commandWorked(mongos.adminCommand({
- configureFailPoint: "clusterAggregateHangBeforeEstablishingShardCursors",
- mode: "alwaysOn"
- }));
+// Enable the failpoint.
+assert.commandWorked(mongos.adminCommand(
+ {configureFailPoint: "clusterAggregateHangBeforeEstablishingShardCursors", mode: "alwaysOn"}));
- // While opening the cursor, wait for the failpoint and add the new shard.
- const awaitNewShard = startParallelShell(`
+// While opening the cursor, wait for the failpoint and add the new shard.
+const awaitNewShard = startParallelShell(`
load("jstests/libs/check_log.js");
checkLog.contains(db,
"clusterAggregateHangBeforeEstablishingShardCursors fail point enabled");
@@ -62,27 +60,27 @@
mode: "off"}));`,
mongos.port);
- jsTestLog("Opening $changeStream cursor");
- const changeStream = mongosColl.aggregate([{$changeStream: {}}]);
- assert(!changeStream.hasNext(), "Do not expect any results yet");
+jsTestLog("Opening $changeStream cursor");
+const changeStream = mongosColl.aggregate([{$changeStream: {}}]);
+assert(!changeStream.hasNext(), "Do not expect any results yet");
- // Clean up the parallel shell.
- awaitNewShard();
+// Clean up the parallel shell.
+awaitNewShard();
- // Insert two documents in different shards.
- assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
+// Insert two documents in different shards.
+assert.writeOK(mongosColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.writeOK(mongosColl.insert({_id: 20}, {writeConcern: {w: "majority"}}));
- // Expect to see them both.
- for (let id of[0, 20]) {
- jsTestLog("Expecting Item " + id);
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.documentKey, {_id: id});
- }
- assert(!changeStream.hasNext());
+// Expect to see them both.
+for (let id of [0, 20]) {
+ jsTestLog("Expecting Item " + id);
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.documentKey, {_id: id});
+}
+assert(!changeStream.hasNext());
- st.stop();
- newShard.stopSet();
+st.stop();
+newShard.stopSet();
})();
diff --git a/jstests/sharding/change_streams_primary_shard_unaware.js b/jstests/sharding/change_streams_primary_shard_unaware.js
index 1fdb86564ae..b325f770585 100644
--- a/jstests/sharding/change_streams_primary_shard_unaware.js
+++ b/jstests/sharding/change_streams_primary_shard_unaware.js
@@ -5,183 +5,180 @@
// SERVER-36321.
// @tags: [requires_persistence, blacklist_from_rhel_67_s390x, uses_change_streams]
(function() {
- "use strict";
-
- load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- // TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
- if (!jsTestOptions().enableMajorityReadConcern &&
- jsTestOptions().mongosBinVersion === 'last-stable') {
- jsTestLog(
- "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
- return;
- }
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- // Returns true if the shard is aware that the collection is sharded.
- function isShardAware(shard, coll) {
- const res = shard.adminCommand({getShardVersion: coll, fullMetadata: true});
- assert.commandWorked(res);
- return res.metadata.collVersion != undefined;
- }
-
- const testName = "change_streams_primary_shard_unaware";
- const st = new ShardingTest({
- shards: 2,
- mongos: 3,
- rs: {
- nodes: 1,
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true},
- },
- });
-
- const mongosDB = st.s0.getDB(testName);
-
- // Ensure that shard0 is the primary shard.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Create unsharded collection on primary shard.
- const mongosColl = mongosDB[testName];
- assert.commandWorked(mongosDB.createCollection(testName));
-
- // Before sharding the collection, issue a write through mongos2 to ensure that it knows the
- // collection exists and believes it is unsharded. This is needed later in the test to avoid
- // triggering a refresh when a change stream is established through mongos2.
- const mongos2DB = st.s2.getDB(testName);
- const mongos2Coll = mongos2DB[testName];
- assert.writeOK(mongos2Coll.insert({_id: 0, a: 0}));
-
- // Create index on the shard key.
- assert.commandWorked(mongos2Coll.createIndex({a: 1}));
-
- // Shard the collection.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {a: 1}}));
-
- // Restart the primary shard and ensure that it is no longer aware that the collection is
- // sharded.
- st.restartShardRS(0);
- assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
-
- const mongos1DB = st.s1.getDB(testName);
- const mongos1Coll = mongos1DB[testName];
-
- // Establish change stream cursor on the second mongos, which is not aware that the
- // collection is sharded.
- let cstMongos1 = new ChangeStreamTest(mongos1DB);
- let cursorMongos1 = cstMongos1.startWatchingChanges(
- {pipeline: [{$changeStream: {fullDocument: "updateLookup"}}], collection: mongos1Coll});
- assert.eq(0, cursorMongos1.firstBatch.length, "Cursor had changes: " + tojson(cursorMongos1));
-
- // Establish a change stream cursor on the now sharded collection through the first mongos.
- let cst = new ChangeStreamTest(mongosDB);
- let cursor = cst.startWatchingChanges(
- {pipeline: [{$changeStream: {fullDocument: "updateLookup"}}], collection: mongosColl});
- assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
-
- // Ensure that the primary shard is still unaware that the collection is sharded.
- assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
-
- // Insert a doc and verify that the primary shard is now aware that the collection is sharded.
- assert.writeOK(mongosColl.insert({_id: 1, a: 1}));
- assert.eq(true, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
-
- // Verify that both cursors are able to pick up an inserted document.
- cst.assertNextChangesEqual({
- cursor: cursor,
- expectedChanges: [{
- documentKey: {_id: 1, a: 1},
- fullDocument: {_id: 1, a: 1},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- }]
- });
- let mongos1ChangeDoc = cstMongos1.getOneChange(cursorMongos1);
- assert.docEq({_id: 1, a: 1}, mongos1ChangeDoc.documentKey);
- assert.docEq({_id: 1, a: 1}, mongos1ChangeDoc.fullDocument);
- assert.eq({db: mongos1DB.getName(), coll: mongos1Coll.getName()}, mongos1ChangeDoc.ns);
- assert.eq("insert", mongos1ChangeDoc.operationType);
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
- assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {a: 0}}));
-
- // Move a chunk to the non-primary shard.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {a: -1},
- to: st.rs1.getURL(),
- _waitForDelete: true
- }));
-
- // Update the document on the primary shard.
- assert.writeOK(mongosColl.update({_id: 1, a: 1}, {$set: {b: 1}}));
- // Insert another document to each shard.
- assert.writeOK(mongosColl.insert({_id: -2, a: -2}));
- assert.writeOK(mongosColl.insert({_id: 2, a: 2}));
-
- // Verify that both cursors pick up the first inserted doc regardless of the moveChunk
- // operation.
- cst.assertNextChangesEqual({
- cursor: cursor,
- expectedChanges: [{
- documentKey: {_id: 1, a: 1},
- fullDocument: {_id: 1, a: 1, b: 1},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "update",
- updateDescription: {removedFields: [], updatedFields: {b: 1}}
- }]
- });
- mongos1ChangeDoc = cstMongos1.getOneChange(cursorMongos1);
- assert.docEq({_id: 1, a: 1}, mongos1ChangeDoc.documentKey);
- assert.docEq({_id: 1, a: 1, b: 1}, mongos1ChangeDoc.fullDocument);
- assert.eq({db: mongos1DB.getName(), coll: mongos1Coll.getName()}, mongos1ChangeDoc.ns);
- assert.eq("update", mongos1ChangeDoc.operationType);
-
- // Restart the primary shard and ensure that it is no longer aware that the collection is
- // sharded.
- st.restartShardRS(0);
- assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
-
- // Establish change stream cursor on mongos2 using the resume token from the change steam on
- // mongos1. Mongos2 is aware that the collection exists and thinks that it's unsharded, so it
- // won't trigger a routing table refresh. This must be done using a resume token from an update
- // otherwise the shard will generate the documentKey based on the assumption that the shard key
- // is _id which will cause the cursor establishment to fail due to SERVER-32085.
- let cstMongos2 = new ChangeStreamTest(mongos2DB);
- let cursorMongos2 = cstMongos2.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: mongos1ChangeDoc._id}}],
- collection: mongos2Coll
- });
-
- cstMongos2.assertNextChangesEqual({
- cursor: cursorMongos2,
- expectedChanges: [{
- documentKey: {_id: -2, a: -2},
- fullDocument: {_id: -2, a: -2},
- ns: {db: mongos2DB.getName(), coll: mongos2Coll.getName()},
- operationType: "insert",
- }]
- });
-
- cstMongos2.assertNextChangesEqual({
- cursor: cursorMongos2,
- expectedChanges: [{
- documentKey: {_id: 2, a: 2},
- fullDocument: {_id: 2, a: 2},
- ns: {db: mongos2DB.getName(), coll: mongos2Coll.getName()},
- operationType: "insert",
- }]
- });
-
- st.stop();
-
+"use strict";
+
+load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+// TODO (SERVER-38673): Remove this once BACKPORT-3428, BACKPORT-3429 are completed.
+if (!jsTestOptions().enableMajorityReadConcern &&
+ jsTestOptions().mongosBinVersion === 'last-stable') {
+ jsTestLog(
+ "Skipping test since 'last-stable' mongos doesn't support speculative majority update lookup queries.");
+ return;
+}
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+// Returns true if the shard is aware that the collection is sharded.
+function isShardAware(shard, coll) {
+ const res = shard.adminCommand({getShardVersion: coll, fullMetadata: true});
+ assert.commandWorked(res);
+ return res.metadata.collVersion != undefined;
+}
+
+const testName = "change_streams_primary_shard_unaware";
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 3,
+ rs: {
+ nodes: 1,
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true},
+ },
+});
+
+const mongosDB = st.s0.getDB(testName);
+
+// Ensure that shard0 is the primary shard.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Create unsharded collection on primary shard.
+const mongosColl = mongosDB[testName];
+assert.commandWorked(mongosDB.createCollection(testName));
+
+// Before sharding the collection, issue a write through mongos2 to ensure that it knows the
+// collection exists and believes it is unsharded. This is needed later in the test to avoid
+// triggering a refresh when a change stream is established through mongos2.
+const mongos2DB = st.s2.getDB(testName);
+const mongos2Coll = mongos2DB[testName];
+assert.writeOK(mongos2Coll.insert({_id: 0, a: 0}));
+
+// Create index on the shard key.
+assert.commandWorked(mongos2Coll.createIndex({a: 1}));
+
+// Shard the collection.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {a: 1}}));
+
+// Restart the primary shard and ensure that it is no longer aware that the collection is
+// sharded.
+st.restartShardRS(0);
+assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
+
+const mongos1DB = st.s1.getDB(testName);
+const mongos1Coll = mongos1DB[testName];
+
+// Establish change stream cursor on the second mongos, which is not aware that the
+// collection is sharded.
+let cstMongos1 = new ChangeStreamTest(mongos1DB);
+let cursorMongos1 = cstMongos1.startWatchingChanges(
+ {pipeline: [{$changeStream: {fullDocument: "updateLookup"}}], collection: mongos1Coll});
+assert.eq(0, cursorMongos1.firstBatch.length, "Cursor had changes: " + tojson(cursorMongos1));
+
+// Establish a change stream cursor on the now sharded collection through the first mongos.
+let cst = new ChangeStreamTest(mongosDB);
+let cursor = cst.startWatchingChanges(
+ {pipeline: [{$changeStream: {fullDocument: "updateLookup"}}], collection: mongosColl});
+assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
+
+// Ensure that the primary shard is still unaware that the collection is sharded.
+assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
+
+// Insert a doc and verify that the primary shard is now aware that the collection is sharded.
+assert.writeOK(mongosColl.insert({_id: 1, a: 1}));
+assert.eq(true, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
+
+// Verify that both cursors are able to pick up an inserted document.
+cst.assertNextChangesEqual({
+ cursor: cursor,
+ expectedChanges: [{
+ documentKey: {_id: 1, a: 1},
+ fullDocument: {_id: 1, a: 1},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ }]
+});
+let mongos1ChangeDoc = cstMongos1.getOneChange(cursorMongos1);
+assert.docEq({_id: 1, a: 1}, mongos1ChangeDoc.documentKey);
+assert.docEq({_id: 1, a: 1}, mongos1ChangeDoc.fullDocument);
+assert.eq({db: mongos1DB.getName(), coll: mongos1Coll.getName()}, mongos1ChangeDoc.ns);
+assert.eq("insert", mongos1ChangeDoc.operationType);
+
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {a: 0}}));
+
+// Move a chunk to the non-primary shard.
+assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {a: -1},
+ to: st.rs1.getURL(),
+ _waitForDelete: true
+}));
+
+// Update the document on the primary shard.
+assert.writeOK(mongosColl.update({_id: 1, a: 1}, {$set: {b: 1}}));
+// Insert another document to each shard.
+assert.writeOK(mongosColl.insert({_id: -2, a: -2}));
+assert.writeOK(mongosColl.insert({_id: 2, a: 2}));
+
+// Verify that both cursors pick up the first inserted doc regardless of the moveChunk
+// operation.
+cst.assertNextChangesEqual({
+ cursor: cursor,
+ expectedChanges: [{
+ documentKey: {_id: 1, a: 1},
+ fullDocument: {_id: 1, a: 1, b: 1},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "update",
+ updateDescription: {removedFields: [], updatedFields: {b: 1}}
+ }]
+});
+mongos1ChangeDoc = cstMongos1.getOneChange(cursorMongos1);
+assert.docEq({_id: 1, a: 1}, mongos1ChangeDoc.documentKey);
+assert.docEq({_id: 1, a: 1, b: 1}, mongos1ChangeDoc.fullDocument);
+assert.eq({db: mongos1DB.getName(), coll: mongos1Coll.getName()}, mongos1ChangeDoc.ns);
+assert.eq("update", mongos1ChangeDoc.operationType);
+
+// Restart the primary shard and ensure that it is no longer aware that the collection is
+// sharded.
+st.restartShardRS(0);
+assert.eq(false, isShardAware(st.rs0.getPrimary(), mongosColl.getFullName()));
+
+// Establish change stream cursor on mongos2 using the resume token from the change steam on
+// mongos1. Mongos2 is aware that the collection exists and thinks that it's unsharded, so it
+// won't trigger a routing table refresh. This must be done using a resume token from an update
+// otherwise the shard will generate the documentKey based on the assumption that the shard key
+// is _id which will cause the cursor establishment to fail due to SERVER-32085.
+let cstMongos2 = new ChangeStreamTest(mongos2DB);
+let cursorMongos2 = cstMongos2.startWatchingChanges(
+ {pipeline: [{$changeStream: {resumeAfter: mongos1ChangeDoc._id}}], collection: mongos2Coll});
+
+cstMongos2.assertNextChangesEqual({
+ cursor: cursorMongos2,
+ expectedChanges: [{
+ documentKey: {_id: -2, a: -2},
+ fullDocument: {_id: -2, a: -2},
+ ns: {db: mongos2DB.getName(), coll: mongos2Coll.getName()},
+ operationType: "insert",
+ }]
+});
+
+cstMongos2.assertNextChangesEqual({
+ cursor: cursorMongos2,
+ expectedChanges: [{
+ documentKey: {_id: 2, a: 2},
+ fullDocument: {_id: 2, a: 2},
+ ns: {db: mongos2DB.getName(), coll: mongos2Coll.getName()},
+ operationType: "insert",
+ }]
+});
+
+st.stop();
})();
diff --git a/jstests/sharding/change_streams_shards_start_in_sync.js b/jstests/sharding/change_streams_shards_start_in_sync.js
index 9209ad4ea70..3928913a1bb 100644
--- a/jstests/sharding/change_streams_shards_start_in_sync.js
+++ b/jstests/sharding/change_streams_shards_start_in_sync.js
@@ -7,109 +7,108 @@
// and 'B' will be seen in the changestream before 'C'.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ useBridge: true,
+ rs: {
+ nodes: 1,
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
+ }
+});
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- const st = new ShardingTest({
- shards: 2,
- mongos: 2,
- useBridge: true,
- rs: {
- nodes: 1,
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
- }
- });
+// Shard the test collection on _id.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
-
- // Move the [0, MaxKey) chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
-
- function checkStream() {
- load('jstests/libs/change_stream_util.js'); // For assertChangeStreamEventEq.
-
- db = db.getSiblingDB(jsTestName());
- let coll = db[jsTestName()];
- let changeStream = coll.aggregate([{$changeStream: {}}]);
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: {_id: -1000},
- fullDocument: {_id: -1000},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- });
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: {_id: 1001},
- fullDocument: {_id: 1001},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- });
-
- assert.soon(() => changeStream.hasNext());
- assertChangeStreamEventEq(changeStream.next(), {
- documentKey: {_id: -1002},
- fullDocument: {_id: -1002},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- });
- changeStream.close();
- }
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
- // Start the $changeStream with shard 1 unavailable on the second mongos (s1). We will be
- // writing through the first mongos (s0), which will remain connected to all shards.
- st.rs1.getPrimary().disconnect(st.s1);
- let waitForShell = startParallelShell(checkStream, st.s1.port);
-
- // Wait for the aggregate cursor to appear in currentOp on the current shard.
- function waitForShardCursor(rs) {
- assert.soon(() => st.rs0.getPrimary()
- .getDB('admin')
- .aggregate([
- {"$currentOp": {"idleCursors": true}},
- {"$match": {ns: mongosColl.getFullName(), type: "idleCursor"}}
-
- ])
- .itcount() === 1);
- }
- // Make sure the shard 0 $changeStream cursor is established before doing the first writes.
- waitForShardCursor(st.rs0);
+// Move the [0, MaxKey) chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
- assert.writeOK(mongosColl.insert({_id: -1000}, {writeConcern: {w: "majority"}}));
+function checkStream() {
+ load('jstests/libs/change_stream_util.js'); // For assertChangeStreamEventEq.
- // This write to shard 1 occurs before the $changeStream cursor on shard 1 is open, because the
- // mongos where the $changeStream is running is disconnected from shard 1.
- assert.writeOK(mongosColl.insert({_id: 1001}, {writeConcern: {w: "majority"}}));
+ db = db.getSiblingDB(jsTestName());
+ let coll = db[jsTestName()];
+ let changeStream = coll.aggregate([{$changeStream: {}}]);
- jsTestLog("Reconnecting");
- st.rs1.getPrimary().reconnect(st.s1);
- waitForShardCursor(st.rs1);
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: {_id: -1000},
+ fullDocument: {_id: -1000},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "insert",
+ });
+
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: {_id: 1001},
+ fullDocument: {_id: 1001},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "insert",
+ });
- assert.writeOK(mongosColl.insert({_id: -1002}, {writeConcern: {w: "majority"}}));
- waitForShell();
- st.stop();
+ assert.soon(() => changeStream.hasNext());
+ assertChangeStreamEventEq(changeStream.next(), {
+ documentKey: {_id: -1002},
+ fullDocument: {_id: -1002},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "insert",
+ });
+ changeStream.close();
+}
+
+// Start the $changeStream with shard 1 unavailable on the second mongos (s1). We will be
+// writing through the first mongos (s0), which will remain connected to all shards.
+st.rs1.getPrimary().disconnect(st.s1);
+let waitForShell = startParallelShell(checkStream, st.s1.port);
+
+// Wait for the aggregate cursor to appear in currentOp on the current shard.
+function waitForShardCursor(rs) {
+ assert.soon(() => st.rs0.getPrimary()
+ .getDB('admin')
+ .aggregate([
+ {"$currentOp": {"idleCursors": true}},
+ {"$match": {ns: mongosColl.getFullName(), type: "idleCursor"}}
+
+ ])
+ .itcount() === 1);
+}
+// Make sure the shard 0 $changeStream cursor is established before doing the first writes.
+waitForShardCursor(st.rs0);
+
+assert.writeOK(mongosColl.insert({_id: -1000}, {writeConcern: {w: "majority"}}));
+
+// This write to shard 1 occurs before the $changeStream cursor on shard 1 is open, because the
+// mongos where the $changeStream is running is disconnected from shard 1.
+assert.writeOK(mongosColl.insert({_id: 1001}, {writeConcern: {w: "majority"}}));
+
+jsTestLog("Reconnecting");
+st.rs1.getPrimary().reconnect(st.s1);
+waitForShardCursor(st.rs1);
+
+assert.writeOK(mongosColl.insert({_id: -1002}, {writeConcern: {w: "majority"}}));
+waitForShell();
+st.stop();
})();
diff --git a/jstests/sharding/change_streams_unsharded_becomes_sharded.js b/jstests/sharding/change_streams_unsharded_becomes_sharded.js
index e865fb709d5..c28e19c9520 100644
--- a/jstests/sharding/change_streams_unsharded_becomes_sharded.js
+++ b/jstests/sharding/change_streams_unsharded_becomes_sharded.js
@@ -5,190 +5,188 @@
// sharded.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
+
+load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const testName = "change_streams_unsharded_becomes_sharded";
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ rs: {
+ nodes: 1,
+ enableMajorityReadConcern: '',
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
+ }
+});
- load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
+const mongosDB = st.s0.getDB("test");
+const mongosColl = mongosDB[testName];
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+function testUnshardedBecomesSharded(collToWatch) {
+ mongosColl.drop();
+ mongosDB.createCollection(testName);
+ mongosColl.createIndex({x: 1});
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+ st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+ // Establish a change stream cursor on the unsharded collection.
+ const cst = new ChangeStreamTest(mongosDB);
+
+ // Create a different collection in the same database, and verify that it doesn't affect the
+ // results of the change stream.
+ const mongosCollOther = mongosDB[testName + "other"];
+ mongosCollOther.drop();
+ mongosDB.createCollection(testName + "other");
+ mongosCollOther.createIndex({y: 1});
+
+ let cursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {}}, {$match: {"ns.coll": mongosColl.getName()}}],
+ collection: collToWatch
+ });
+ assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
+
+ // Verify that the cursor picks up documents inserted while the collection is unsharded. The
+ // 'documentKey' at this point is simply the _id field.
+ assert.writeOK(mongosColl.insert({_id: 0, x: 0}));
+ assert.writeOK(mongosCollOther.insert({_id: 0, y: 0}));
+ const [preShardCollectionChange] = cst.assertNextChangesEqual({
+ cursor: cursor,
+ expectedChanges: [{
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, x: 0},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ }]
+ });
- const testName = "change_streams_unsharded_becomes_sharded";
- const st = new ShardingTest({
- shards: 2,
- mongos: 1,
- rs: {
- nodes: 1,
- enableMajorityReadConcern: '',
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
+ // Record the resume token for this change, before the collection is sharded.
+ const preShardCollectionResumeToken = preShardCollectionChange._id;
+
+ // Shard the test collection with shard key {x: 1} and split into 2 chunks.
+ st.shardColl(mongosColl.getName(), {x: 1}, {x: 0}, false, mongosDB.getName());
+
+ // Shard the other collection with shard key {y: 1} and split into 2 chunks.
+ st.shardColl(mongosCollOther.getName(), {y: 1}, {y: 0}, false, mongosDB.getName());
+
+ // List the changes we expect to see for the next two operations on the sharded collection.
+ // Later, we will resume the stream using the token generated before the collection was
+ // sharded, and will need to confirm that we can still see these two changes.
+ const postShardCollectionChanges = [
+ {
+ documentKey: {x: 1, _id: 1},
+ fullDocument: {_id: 1, x: 1},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ },
+ {
+ documentKey: {x: -1, _id: -1},
+ fullDocument: {_id: -1, x: -1},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
}
+ ];
+
+ // Verify that the cursor on the original shard is still valid and sees new inserted
+ // documents. The 'documentKey' field should now include the shard key, even before a
+ // 'kNewShardDetected' operation has been generated by the migration of a chunk to a new
+ // shard.
+ assert.writeOK(mongosColl.insert({_id: 1, x: 1}));
+ assert.writeOK(mongosCollOther.insert({_id: 1, y: 1}));
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [postShardCollectionChanges[0]]});
+
+ // Move the [minKey, 0) chunk to shard1.
+ assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {x: -1},
+ to: st.rs1.getURL(),
+ _waitForDelete: true
+ }));
+ assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosCollOther.getFullName(),
+ find: {y: -1},
+ to: st.rs1.getURL(),
+ _waitForDelete: true
+ }));
+
+ // Make sure the change stream cursor sees a document inserted on the recipient shard.
+ assert.writeOK(mongosColl.insert({_id: -1, x: -1}));
+ assert.writeOK(mongosCollOther.insert({_id: -1, y: -1}));
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [postShardCollectionChanges[1]]});
+
+ // Confirm that we can resume the stream on the sharded collection using the token generated
+ // while the collection was unsharded, whose documentKey contains the _id field but not the
+ // shard key.
+ let resumedCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: preShardCollectionResumeToken}}],
+ collection: mongosColl
});
- const mongosDB = st.s0.getDB("test");
- const mongosColl = mongosDB[testName];
-
- function testUnshardedBecomesSharded(collToWatch) {
- mongosColl.drop();
- mongosDB.createCollection(testName);
- mongosColl.createIndex({x: 1});
-
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Establish a change stream cursor on the unsharded collection.
- const cst = new ChangeStreamTest(mongosDB);
-
- // Create a different collection in the same database, and verify that it doesn't affect the
- // results of the change stream.
- const mongosCollOther = mongosDB[testName + "other"];
- mongosCollOther.drop();
- mongosDB.createCollection(testName + "other");
- mongosCollOther.createIndex({y: 1});
-
- let cursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {}}, {$match: {"ns.coll": mongosColl.getName()}}],
- collection: collToWatch
- });
- assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
-
- // Verify that the cursor picks up documents inserted while the collection is unsharded. The
- // 'documentKey' at this point is simply the _id field.
- assert.writeOK(mongosColl.insert({_id: 0, x: 0}));
- assert.writeOK(mongosCollOther.insert({_id: 0, y: 0}));
- const[preShardCollectionChange] = cst.assertNextChangesEqual({
- cursor: cursor,
- expectedChanges: [{
- documentKey: {_id: 0},
- fullDocument: {_id: 0, x: 0},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- }]
- });
+ // Verify that we see both of the insertions which occurred after the collection was
+ // sharded.
+ cst.assertNextChangesEqual(
+ {cursor: resumedCursor, expectedChanges: postShardCollectionChanges});
- // Record the resume token for this change, before the collection is sharded.
- const preShardCollectionResumeToken = preShardCollectionChange._id;
+ // Test the behavior of a change stream when a sharded collection is dropped and recreated.
+ cursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {}}, {$match: {"ns.coll": mongosColl.getName()}}],
+ collection: collToWatch
+ });
+ assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
- // Shard the test collection with shard key {x: 1} and split into 2 chunks.
- st.shardColl(mongosColl.getName(), {x: 1}, {x: 0}, false, mongosDB.getName());
+ // Insert a couple documents to shard1, creating a scenario where the getMore to shard0 will
+ // indicate that the change stream is invalidated yet shard1 will still have data to return.
+ assert.writeOK(mongosColl.insert({_id: -2, x: -2}));
+ assert.writeOK(mongosColl.insert({_id: -3, x: -3}));
- // Shard the other collection with shard key {y: 1} and split into 2 chunks.
- st.shardColl(mongosCollOther.getName(), {y: 1}, {y: 0}, false, mongosDB.getName());
+ // Drop and recreate the collection.
+ mongosColl.drop();
+ mongosDB.createCollection(mongosColl.getName());
+ mongosColl.createIndex({z: 1});
- // List the changes we expect to see for the next two operations on the sharded collection.
- // Later, we will resume the stream using the token generated before the collection was
- // sharded, and will need to confirm that we can still see these two changes.
- const postShardCollectionChanges = [
+ // Shard the collection on a different shard key and ensure that each shard has a chunk.
+ st.shardColl(mongosColl.getName(), {z: 1}, {z: 0}, {z: -1}, mongosDB.getName());
+
+ assert.writeOK(mongosColl.insert({_id: -1, z: -1}));
+ assert.writeOK(mongosColl.insert({_id: 1, z: 1}));
+
+ // Verify that the change stream picks up the inserts, however the shard key is missing
+ // since the collection has since been dropped and recreated.
+ cst.assertNextChangesEqual({
+ cursor: cursor,
+ expectedChanges: [
{
- documentKey: {x: 1, _id: 1},
- fullDocument: {_id: 1, x: 1},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
+ documentKey: {_id: -2},
+ fullDocument: {_id: -2, x: -2},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
},
{
- documentKey: {x: -1, _id: -1},
- fullDocument: {_id: -1, x: -1},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
+ documentKey: {_id: -3},
+ fullDocument: {_id: -3, x: -3},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
}
- ];
-
- // Verify that the cursor on the original shard is still valid and sees new inserted
- // documents. The 'documentKey' field should now include the shard key, even before a
- // 'kNewShardDetected' operation has been generated by the migration of a chunk to a new
- // shard.
- assert.writeOK(mongosColl.insert({_id: 1, x: 1}));
- assert.writeOK(mongosCollOther.insert({_id: 1, y: 1}));
- cst.assertNextChangesEqual(
- {cursor: cursor, expectedChanges: [postShardCollectionChanges[0]]});
-
- // Move the [minKey, 0) chunk to shard1.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {x: -1},
- to: st.rs1.getURL(),
- _waitForDelete: true
- }));
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosCollOther.getFullName(),
- find: {y: -1},
- to: st.rs1.getURL(),
- _waitForDelete: true
- }));
-
- // Make sure the change stream cursor sees a document inserted on the recipient shard.
- assert.writeOK(mongosColl.insert({_id: -1, x: -1}));
- assert.writeOK(mongosCollOther.insert({_id: -1, y: -1}));
- cst.assertNextChangesEqual(
- {cursor: cursor, expectedChanges: [postShardCollectionChanges[1]]});
-
- // Confirm that we can resume the stream on the sharded collection using the token generated
- // while the collection was unsharded, whose documentKey contains the _id field but not the
- // shard key.
- let resumedCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: preShardCollectionResumeToken}}],
- collection: mongosColl
- });
-
- // Verify that we see both of the insertions which occurred after the collection was
- // sharded.
- cst.assertNextChangesEqual(
- {cursor: resumedCursor, expectedChanges: postShardCollectionChanges});
-
- // Test the behavior of a change stream when a sharded collection is dropped and recreated.
- cursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {}}, {$match: {"ns.coll": mongosColl.getName()}}],
- collection: collToWatch
- });
- assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
-
- // Insert a couple documents to shard1, creating a scenario where the getMore to shard0 will
- // indicate that the change stream is invalidated yet shard1 will still have data to return.
- assert.writeOK(mongosColl.insert({_id: -2, x: -2}));
- assert.writeOK(mongosColl.insert({_id: -3, x: -3}));
-
- // Drop and recreate the collection.
- mongosColl.drop();
- mongosDB.createCollection(mongosColl.getName());
- mongosColl.createIndex({z: 1});
-
- // Shard the collection on a different shard key and ensure that each shard has a chunk.
- st.shardColl(mongosColl.getName(), {z: 1}, {z: 0}, {z: -1}, mongosDB.getName());
-
- assert.writeOK(mongosColl.insert({_id: -1, z: -1}));
- assert.writeOK(mongosColl.insert({_id: 1, z: 1}));
-
- // Verify that the change stream picks up the inserts, however the shard key is missing
- // since the collection has since been dropped and recreated.
- cst.assertNextChangesEqual({
- cursor: cursor,
- expectedChanges: [
- {
- documentKey: {_id: -2},
- fullDocument: {_id: -2, x: -2},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- },
- {
- documentKey: {_id: -3},
- fullDocument: {_id: -3, x: -3},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- }
- ]
- });
-
- cst.cleanUp();
- }
+ ]
+ });
+
+ cst.cleanUp();
+}
- // First test against a change stream on a single collection.
- testUnshardedBecomesSharded(mongosColl.getName());
+// First test against a change stream on a single collection.
+testUnshardedBecomesSharded(mongosColl.getName());
- // Test against a change stream on the entire database.
- testUnshardedBecomesSharded(1);
+// Test against a change stream on the entire database.
+testUnshardedBecomesSharded(1);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/change_streams_whole_db.js b/jstests/sharding/change_streams_whole_db.js
index 4051493c04f..322be4a19b4 100644
--- a/jstests/sharding/change_streams_whole_db.js
+++ b/jstests/sharding/change_streams_whole_db.js
@@ -1,192 +1,192 @@
// Tests the behavior of a change stream on a whole database in a sharded cluster.
// @tags: [uses_change_streams]
(function() {
- "use strict";
-
- load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
- load('jstests/aggregation/extras/utils.js'); // For assertErrorCode().
- load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
- load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection.
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
+"use strict";
+
+load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
+load('jstests/aggregation/extras/utils.js'); // For assertErrorCode().
+load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
+load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection.
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ shards: 2,
+ rs: {
+ nodes: 1,
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
}
-
- const st = new ShardingTest({
- shards: 2,
- rs: {
- nodes: 1,
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
- }
- });
-
- const mongosDB = st.s0.getDB("test");
- const mongosColl = mongosDB[jsTestName()];
-
- let cst = new ChangeStreamTest(mongosDB);
- let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
-
- // Test that if there are no changes, we return an empty batch.
- assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
-
- // Test that the change stream returns operations on the unsharded test collection.
- assert.writeOK(mongosColl.insert({_id: 0}));
- let expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0},
+});
+
+const mongosDB = st.s0.getDB("test");
+const mongosColl = mongosDB[jsTestName()];
+
+let cst = new ChangeStreamTest(mongosDB);
+let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+
+// Test that if there are no changes, we return an empty batch.
+assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
+
+// Test that the change stream returns operations on the unsharded test collection.
+assert.writeOK(mongosColl.insert({_id: 0}));
+let expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+// Create a new sharded collection.
+mongosDB.createCollection(jsTestName() + "_sharded_on_x");
+const mongosCollShardedOnX = mongosDB[jsTestName() + "_sharded_on_x"];
+
+// Shard, split, and move one chunk to shard1.
+st.shardColl(mongosCollShardedOnX.getName(), {x: 1}, {x: 0}, {x: 1}, mongosDB.getName());
+
+// Write a document to each chunk.
+assert.writeOK(mongosCollShardedOnX.insert({_id: 0, x: -1}));
+assert.writeOK(mongosCollShardedOnX.insert({_id: 1, x: 1}));
+
+// Verify that the change stream returns both inserts.
+expected = [
+ {
+ documentKey: {_id: 0, x: -1},
+ fullDocument: {_id: 0, x: -1},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
+ operationType: "insert",
+ },
+ {
+ documentKey: {_id: 1, x: 1},
+ fullDocument: {_id: 1, x: 1},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
+ operationType: "insert",
+ }
+];
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+
+// Now send inserts to both the sharded and unsharded collections, and verify that the change
+// streams returns them in order.
+assert.writeOK(mongosCollShardedOnX.insert({_id: 2, x: 2}));
+assert.writeOK(mongosColl.insert({_id: 1}));
+
+// Verify that the change stream returns both inserts.
+expected = [
+ {
+ documentKey: {_id: 2, x: 2},
+ fullDocument: {_id: 2, x: 2},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
+ operationType: "insert",
+ },
+ {
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1},
ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- // Create a new sharded collection.
- mongosDB.createCollection(jsTestName() + "_sharded_on_x");
- const mongosCollShardedOnX = mongosDB[jsTestName() + "_sharded_on_x"];
-
- // Shard, split, and move one chunk to shard1.
- st.shardColl(mongosCollShardedOnX.getName(), {x: 1}, {x: 0}, {x: 1}, mongosDB.getName());
-
- // Write a document to each chunk.
- assert.writeOK(mongosCollShardedOnX.insert({_id: 0, x: -1}));
- assert.writeOK(mongosCollShardedOnX.insert({_id: 1, x: 1}));
-
- // Verify that the change stream returns both inserts.
- expected = [
- {
- documentKey: {_id: 0, x: -1},
- fullDocument: {_id: 0, x: -1},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
- operationType: "insert",
- },
- {
- documentKey: {_id: 1, x: 1},
- fullDocument: {_id: 1, x: 1},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
- operationType: "insert",
- }
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
-
- // Now send inserts to both the sharded and unsharded collections, and verify that the change
- // streams returns them in order.
- assert.writeOK(mongosCollShardedOnX.insert({_id: 2, x: 2}));
- assert.writeOK(mongosColl.insert({_id: 1}));
-
- // Verify that the change stream returns both inserts.
- expected = [
- {
- documentKey: {_id: 2, x: 2},
- fullDocument: {_id: 2, x: 2},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
- operationType: "insert",
- },
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- }
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
-
- // Create a third sharded collection with a compound shard key.
- mongosDB.createCollection(jsTestName() + "_sharded_compound");
- const mongosCollShardedCompound = mongosDB[jsTestName() + "_sharded_compound"];
-
- // Shard, split, and move one chunk to shard1.
- st.shardColl(mongosCollShardedCompound.getName(),
- {y: 1, x: 1},
- {y: 1, x: MinKey},
- {y: 1, x: MinKey},
- mongosDB.getName());
-
- // Write a document to each chunk.
- assert.writeOK(mongosCollShardedCompound.insert({_id: 0, y: -1, x: 0}));
- assert.writeOK(mongosCollShardedCompound.insert({_id: 1, y: 1, x: 0}));
-
- // Verify that the change stream returns both inserts.
- expected = [
- {
- documentKey: {_id: 0, y: -1, x: 0},
- fullDocument: {_id: 0, y: -1, x: 0},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedCompound.getName()},
- operationType: "insert",
- },
- {
- documentKey: {_id: 1, y: 1, x: 0},
- fullDocument: {_id: 1, y: 1, x: 0},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedCompound.getName()},
- operationType: "insert",
- }
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
-
- // Send inserts to all 3 collections and verify that the results contain the correct
- // documentKeys and are in the correct order.
- assert.writeOK(mongosCollShardedOnX.insert({_id: 3, x: 3}));
- assert.writeOK(mongosColl.insert({_id: 3}));
- assert.writeOK(mongosCollShardedCompound.insert({_id: 2, x: 0, y: -2}));
-
- // Verify that the change stream returns both inserts.
- expected = [
- {
- documentKey: {_id: 3, x: 3},
- fullDocument: {_id: 3, x: 3},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
- operationType: "insert",
- },
- {
- documentKey: {_id: 3},
- fullDocument: {_id: 3},
- ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
- operationType: "insert",
- },
+ }
+];
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+
+// Create a third sharded collection with a compound shard key.
+mongosDB.createCollection(jsTestName() + "_sharded_compound");
+const mongosCollShardedCompound = mongosDB[jsTestName() + "_sharded_compound"];
+
+// Shard, split, and move one chunk to shard1.
+st.shardColl(mongosCollShardedCompound.getName(),
+ {y: 1, x: 1},
+ {y: 1, x: MinKey},
+ {y: 1, x: MinKey},
+ mongosDB.getName());
+
+// Write a document to each chunk.
+assert.writeOK(mongosCollShardedCompound.insert({_id: 0, y: -1, x: 0}));
+assert.writeOK(mongosCollShardedCompound.insert({_id: 1, y: 1, x: 0}));
+
+// Verify that the change stream returns both inserts.
+expected = [
+ {
+ documentKey: {_id: 0, y: -1, x: 0},
+ fullDocument: {_id: 0, y: -1, x: 0},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedCompound.getName()},
+ operationType: "insert",
+ },
+ {
+ documentKey: {_id: 1, y: 1, x: 0},
+ fullDocument: {_id: 1, y: 1, x: 0},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedCompound.getName()},
+ operationType: "insert",
+ }
+];
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+
+// Send inserts to all 3 collections and verify that the results contain the correct
+// documentKeys and are in the correct order.
+assert.writeOK(mongosCollShardedOnX.insert({_id: 3, x: 3}));
+assert.writeOK(mongosColl.insert({_id: 3}));
+assert.writeOK(mongosCollShardedCompound.insert({_id: 2, x: 0, y: -2}));
+
+// Verify that the change stream returns both inserts.
+expected = [
+ {
+ documentKey: {_id: 3, x: 3},
+ fullDocument: {_id: 3, x: 3},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
+ operationType: "insert",
+ },
+ {
+ documentKey: {_id: 3},
+ fullDocument: {_id: 3},
+ ns: {db: mongosDB.getName(), coll: mongosColl.getName()},
+ operationType: "insert",
+ },
+ {
+ documentKey: {_id: 2, x: 0, y: -2},
+ fullDocument: {_id: 2, x: 0, y: -2},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedCompound.getName()},
+ operationType: "insert",
+ },
+];
+
+const results = cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+// Store the resume token of the first insert to use after dropping the collection.
+const resumeTokenBeforeDrop = results[0]._id;
+
+// Write one more document to the collection that will be dropped, to be returned after
+// resuming.
+assert.writeOK(mongosCollShardedOnX.insert({_id: 4, x: 4}));
+
+// Drop the collection, invalidating the open change stream.
+assertDropCollection(mongosDB, mongosCollShardedOnX.getName());
+
+// Resume the change stream from before the collection drop, and verify that the documentKey
+// field contains the extracted shard key from the resume token.
+cursor = cst.startWatchingChanges({
+ pipeline: [
+ {$changeStream: {resumeAfter: resumeTokenBeforeDrop}},
+ {$match: {"ns.coll": mongosCollShardedOnX.getName()}}
+ ],
+ collection: 1
+});
+cst.assertNextChangesEqual({
+ cursor: cursor,
+ expectedChanges: [
{
- documentKey: {_id: 2, x: 0, y: -2},
- fullDocument: {_id: 2, x: 0, y: -2},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedCompound.getName()},
- operationType: "insert",
+ documentKey: {_id: 4, x: 4},
+ fullDocument: {_id: 4, x: 4},
+ ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
+ operationType: "insert",
},
- ];
-
- const results = cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
- // Store the resume token of the first insert to use after dropping the collection.
- const resumeTokenBeforeDrop = results[0]._id;
-
- // Write one more document to the collection that will be dropped, to be returned after
- // resuming.
- assert.writeOK(mongosCollShardedOnX.insert({_id: 4, x: 4}));
-
- // Drop the collection, invalidating the open change stream.
- assertDropCollection(mongosDB, mongosCollShardedOnX.getName());
-
- // Resume the change stream from before the collection drop, and verify that the documentKey
- // field contains the extracted shard key from the resume token.
- cursor = cst.startWatchingChanges({
- pipeline: [
- {$changeStream: {resumeAfter: resumeTokenBeforeDrop}},
- {$match: {"ns.coll": mongosCollShardedOnX.getName()}}
- ],
- collection: 1
- });
- cst.assertNextChangesEqual({
- cursor: cursor,
- expectedChanges: [
- {
- documentKey: {_id: 4, x: 4},
- fullDocument: {_id: 4, x: 4},
- ns: {db: mongosDB.getName(), coll: mongosCollShardedOnX.getName()},
- operationType: "insert",
- },
- ]
- });
-
- cst.cleanUp();
-
- st.stop();
+ ]
+});
+
+cst.cleanUp();
+
+st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_auth.js b/jstests/sharding/cleanup_orphaned_auth.js
index f32a66ee242..a54030fbf12 100644
--- a/jstests/sharding/cleanup_orphaned_auth.js
+++ b/jstests/sharding/cleanup_orphaned_auth.js
@@ -3,60 +3,58 @@
//
(function() {
- 'use strict';
+'use strict';
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
- function assertUnauthorized(res, msg) {
- if (assert._debug && msg)
- print("in assert for: " + msg);
+function assertUnauthorized(res, msg) {
+ if (assert._debug && msg)
+ print("in assert for: " + msg);
- if (res.ok == 0 && (res.errmsg.startsWith('not authorized') ||
- res.errmsg.match(/requires authentication/)))
- return;
+ if (res.ok == 0 &&
+ (res.errmsg.startsWith('not authorized') || res.errmsg.match(/requires authentication/)))
+ return;
- var finalMsg = "command worked when it should have been unauthorized: " + tojson(res);
- if (msg) {
- finalMsg += " : " + msg;
- }
- doassert(finalMsg);
+ var finalMsg = "command worked when it should have been unauthorized: " + tojson(res);
+ if (msg) {
+ finalMsg += " : " + msg;
}
+ doassert(finalMsg);
+}
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest({
- auth: true,
- other: {keyFile: 'jstests/libs/key1', useHostname: false, shardAsReplicaSet: false}
- });
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest({
+ auth: true,
+ other: {keyFile: 'jstests/libs/key1', useHostname: false, shardAsReplicaSet: false}
+});
- var shardAdmin = st.shard0.getDB('admin');
- shardAdmin.createUser(
- {user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
- shardAdmin.auth('admin', 'x');
+var shardAdmin = st.shard0.getDB('admin');
+shardAdmin.createUser({user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
+shardAdmin.auth('admin', 'x');
- var mongos = st.s0;
- var mongosAdmin = mongos.getDB('admin');
- var coll = mongos.getCollection('foo.bar');
+var mongos = st.s0;
+var mongosAdmin = mongos.getDB('admin');
+var coll = mongos.getCollection('foo.bar');
- mongosAdmin.createUser(
- {user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
- mongosAdmin.auth('admin', 'x');
+mongosAdmin.createUser({user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
+mongosAdmin.auth('admin', 'x');
- assert.commandWorked(mongosAdmin.runCommand({enableSharding: coll.getDB().getName()}));
+assert.commandWorked(mongosAdmin.runCommand({enableSharding: coll.getDB().getName()}));
- assert.commandWorked(
- mongosAdmin.runCommand({shardCollection: coll.getFullName(), key: {_id: 'hashed'}}));
+assert.commandWorked(
+ mongosAdmin.runCommand({shardCollection: coll.getFullName(), key: {_id: 'hashed'}}));
- // cleanupOrphaned requires auth as admin user.
- assert.commandWorked(shardAdmin.logout());
- assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
+// cleanupOrphaned requires auth as admin user.
+assert.commandWorked(shardAdmin.logout());
+assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
- var fooDB = st.shard0.getDB('foo');
- shardAdmin.auth('admin', 'x');
- fooDB.createUser({user: 'user', pwd: 'x', roles: ['readWrite', 'dbAdmin']});
- shardAdmin.logout();
- fooDB.auth('user', 'x');
- assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
+var fooDB = st.shard0.getDB('foo');
+shardAdmin.auth('admin', 'x');
+fooDB.createUser({user: 'user', pwd: 'x', roles: ['readWrite', 'dbAdmin']});
+shardAdmin.logout();
+fooDB.auth('user', 'x');
+assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_basic.js b/jstests/sharding/cleanup_orphaned_basic.js
index 66fe7924157..fb8893d5677 100644
--- a/jstests/sharding/cleanup_orphaned_basic.js
+++ b/jstests/sharding/cleanup_orphaned_basic.js
@@ -4,119 +4,117 @@
//
(function() {
- "use strict";
-
- /*****************************************************************************
- * Unsharded mongod.
- ****************************************************************************/
-
- // cleanupOrphaned fails against unsharded mongod.
- var mongod = MongoRunner.runMongod();
- assert.commandFailed(mongod.getDB('admin').runCommand({cleanupOrphaned: 'foo.bar'}));
-
- /*****************************************************************************
- * Bad invocations of cleanupOrphaned command.
- ****************************************************************************/
-
- var st = new ShardingTest({other: {rs: true, rsOptions: {nodes: 2}}});
-
- var mongos = st.s0;
- var mongosAdmin = mongos.getDB('admin');
- var dbName = 'foo';
- var collectionName = 'bar';
- var ns = dbName + '.' + collectionName;
- var coll = mongos.getCollection(ns);
-
- // cleanupOrphaned fails against mongos ('no such command'): it must be run
- // on mongod.
- assert.commandFailed(mongosAdmin.runCommand({cleanupOrphaned: ns}));
-
- // cleanupOrphaned must be run on admin DB.
- var shardFooDB = st.shard0.getDB(dbName);
- assert.commandFailed(shardFooDB.runCommand({cleanupOrphaned: ns}));
-
- // Must be run on primary.
- var secondaryAdmin = st.rs0.getSecondary().getDB('admin');
- var response = secondaryAdmin.runCommand({cleanupOrphaned: ns});
- print('cleanupOrphaned on secondary:');
- printjson(response);
- assert.commandFailed(response);
-
- var shardAdmin = st.shard0.getDB('admin');
- var badNS = ' \\/."*<>:|?';
- assert.commandFailed(shardAdmin.runCommand({cleanupOrphaned: badNS}));
-
- // cleanupOrphaned works on sharded collection.
- assert.commandWorked(mongosAdmin.runCommand({enableSharding: coll.getDB().getName()}));
-
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
-
- assert.commandWorked(mongosAdmin.runCommand({shardCollection: ns, key: {_id: 1}}));
-
- assert.commandWorked(shardAdmin.runCommand({cleanupOrphaned: ns}));
-
- /*****************************************************************************
- * Empty shard.
- ****************************************************************************/
-
- // Ping shard[1] so it will be aware that it is sharded. Otherwise cleanupOrphaned
- // may fail.
- assert.commandWorked(mongosAdmin.runCommand({
- moveChunk: coll.getFullName(),
- find: {_id: 1},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- assert.commandWorked(mongosAdmin.runCommand({
- moveChunk: coll.getFullName(),
- find: {_id: 1},
- to: st.shard0.shardName,
- _waitForDelete: true
- }));
-
- // Collection's home is shard0, there are no chunks assigned to shard1.
- st.shard1.getCollection(ns).insert({});
- assert.eq(null, st.shard1.getDB(dbName).getLastError());
- assert.eq(1, st.shard1.getCollection(ns).count());
- response = st.shard1.getDB('admin').runCommand({cleanupOrphaned: ns});
- assert.commandWorked(response);
- assert.eq({_id: {$maxKey: 1}}, response.stoppedAtKey);
- assert.eq(0,
- st.shard1.getCollection(ns).count(),
- "cleanupOrphaned didn't delete orphan on empty shard.");
-
- /*****************************************************************************
- * Bad startingFromKeys.
- ****************************************************************************/
-
- // startingFromKey of MaxKey.
- response = shardAdmin.runCommand({cleanupOrphaned: ns, startingFromKey: {_id: MaxKey}});
- assert.commandWorked(response);
- assert.eq(null, response.stoppedAtKey);
-
- // startingFromKey doesn't match number of fields in shard key.
- assert.commandFailed(shardAdmin.runCommand(
- {cleanupOrphaned: ns, startingFromKey: {someKey: 'someValue', someOtherKey: 1}}));
-
- // startingFromKey matches number of fields in shard key but not field names.
- assert.commandFailed(
- shardAdmin.runCommand({cleanupOrphaned: ns, startingFromKey: {someKey: 'someValue'}}));
-
- var coll2 = mongos.getCollection('foo.baz');
-
- assert.commandWorked(
- mongosAdmin.runCommand({shardCollection: coll2.getFullName(), key: {a: 1, b: 1}}));
-
- // startingFromKey doesn't match number of fields in shard key.
- assert.commandFailed(shardAdmin.runCommand(
- {cleanupOrphaned: coll2.getFullName(), startingFromKey: {someKey: 'someValue'}}));
-
- // startingFromKey matches number of fields in shard key but not field names.
- assert.commandFailed(shardAdmin.runCommand(
- {cleanupOrphaned: coll2.getFullName(), startingFromKey: {a: 'someValue', c: 1}}));
-
- st.stop();
- MongoRunner.stopMongod(mongod);
-
+"use strict";
+
+/*****************************************************************************
+ * Unsharded mongod.
+ ****************************************************************************/
+
+// cleanupOrphaned fails against unsharded mongod.
+var mongod = MongoRunner.runMongod();
+assert.commandFailed(mongod.getDB('admin').runCommand({cleanupOrphaned: 'foo.bar'}));
+
+/*****************************************************************************
+ * Bad invocations of cleanupOrphaned command.
+ ****************************************************************************/
+
+var st = new ShardingTest({other: {rs: true, rsOptions: {nodes: 2}}});
+
+var mongos = st.s0;
+var mongosAdmin = mongos.getDB('admin');
+var dbName = 'foo';
+var collectionName = 'bar';
+var ns = dbName + '.' + collectionName;
+var coll = mongos.getCollection(ns);
+
+// cleanupOrphaned fails against mongos ('no such command'): it must be run
+// on mongod.
+assert.commandFailed(mongosAdmin.runCommand({cleanupOrphaned: ns}));
+
+// cleanupOrphaned must be run on admin DB.
+var shardFooDB = st.shard0.getDB(dbName);
+assert.commandFailed(shardFooDB.runCommand({cleanupOrphaned: ns}));
+
+// Must be run on primary.
+var secondaryAdmin = st.rs0.getSecondary().getDB('admin');
+var response = secondaryAdmin.runCommand({cleanupOrphaned: ns});
+print('cleanupOrphaned on secondary:');
+printjson(response);
+assert.commandFailed(response);
+
+var shardAdmin = st.shard0.getDB('admin');
+var badNS = ' \\/."*<>:|?';
+assert.commandFailed(shardAdmin.runCommand({cleanupOrphaned: badNS}));
+
+// cleanupOrphaned works on sharded collection.
+assert.commandWorked(mongosAdmin.runCommand({enableSharding: coll.getDB().getName()}));
+
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
+
+assert.commandWorked(mongosAdmin.runCommand({shardCollection: ns, key: {_id: 1}}));
+
+assert.commandWorked(shardAdmin.runCommand({cleanupOrphaned: ns}));
+
+/*****************************************************************************
+ * Empty shard.
+ ****************************************************************************/
+
+// Ping shard[1] so it will be aware that it is sharded. Otherwise cleanupOrphaned
+// may fail.
+assert.commandWorked(mongosAdmin.runCommand({
+ moveChunk: coll.getFullName(),
+ find: {_id: 1},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+assert.commandWorked(mongosAdmin.runCommand({
+ moveChunk: coll.getFullName(),
+ find: {_id: 1},
+ to: st.shard0.shardName,
+ _waitForDelete: true
+}));
+
+// Collection's home is shard0, there are no chunks assigned to shard1.
+st.shard1.getCollection(ns).insert({});
+assert.eq(null, st.shard1.getDB(dbName).getLastError());
+assert.eq(1, st.shard1.getCollection(ns).count());
+response = st.shard1.getDB('admin').runCommand({cleanupOrphaned: ns});
+assert.commandWorked(response);
+assert.eq({_id: {$maxKey: 1}}, response.stoppedAtKey);
+assert.eq(
+ 0, st.shard1.getCollection(ns).count(), "cleanupOrphaned didn't delete orphan on empty shard.");
+
+/*****************************************************************************
+ * Bad startingFromKeys.
+ ****************************************************************************/
+
+// startingFromKey of MaxKey.
+response = shardAdmin.runCommand({cleanupOrphaned: ns, startingFromKey: {_id: MaxKey}});
+assert.commandWorked(response);
+assert.eq(null, response.stoppedAtKey);
+
+// startingFromKey doesn't match number of fields in shard key.
+assert.commandFailed(shardAdmin.runCommand(
+ {cleanupOrphaned: ns, startingFromKey: {someKey: 'someValue', someOtherKey: 1}}));
+
+// startingFromKey matches number of fields in shard key but not field names.
+assert.commandFailed(
+ shardAdmin.runCommand({cleanupOrphaned: ns, startingFromKey: {someKey: 'someValue'}}));
+
+var coll2 = mongos.getCollection('foo.baz');
+
+assert.commandWorked(
+ mongosAdmin.runCommand({shardCollection: coll2.getFullName(), key: {a: 1, b: 1}}));
+
+// startingFromKey doesn't match number of fields in shard key.
+assert.commandFailed(shardAdmin.runCommand(
+ {cleanupOrphaned: coll2.getFullName(), startingFromKey: {someKey: 'someValue'}}));
+
+// startingFromKey matches number of fields in shard key but not field names.
+assert.commandFailed(shardAdmin.runCommand(
+ {cleanupOrphaned: coll2.getFullName(), startingFromKey: {a: 'someValue', c: 1}}));
+
+st.stop();
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js
index 6341dbb887d..01576805b49 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js
@@ -8,151 +8,150 @@ load('./jstests/libs/chunk_manipulation_util.js');
load('./jstests/libs/cleanup_orphaned_util.js');
(function() {
- "use strict";
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
- var st = new ShardingTest({shards: 2, other: {separateConfig: true}});
-
- var mongos = st.s0, admin = mongos.getDB('admin'), dbName = 'foo', ns = dbName + '.bar',
- coll = mongos.getCollection(ns), donor = st.shard0, recipient = st.shard1,
- donorColl = donor.getCollection(ns), recipientColl = st.shard1.getCollection(ns);
-
- // Three chunks of 10 documents each, with ids -20, -18, -16, ..., 38.
- // Donor: [minKey, 0) [0, 20)
- // Recipient: [20, maxKey)
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- printjson(admin.runCommand({movePrimary: dbName, to: st.shard0.shardName}));
- assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 20}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: 20}, to: st.shard1.shardName, _waitForDelete: true}));
-
- jsTest.log('Inserting 20 docs into shard 0....');
- for (var i = -20; i < 20; i += 2) {
- coll.insert({_id: i});
- }
- assert.eq(null, coll.getDB().getLastError());
- assert.eq(20, donorColl.count());
-
- jsTest.log('Inserting 10 docs into shard 1....');
- for (i = 20; i < 40; i += 2) {
- coll.insert({_id: i});
- }
- assert.eq(null, coll.getDB().getLastError());
- assert.eq(10, recipientColl.count());
-
- //
- // Start a moveChunk in the background. Move chunk [0, 20), which has 10 docs,
- // from shard 0 to shard 1. Pause it at some points in the donor's and
- // recipient's work flows, and test cleanupOrphaned on shard 0 and shard 1.
- //
-
- jsTest.log('setting failpoint startedMoveChunk (donor) and cloned (recipient)');
- pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
- pauseMigrateAtStep(recipient, migrateStepNames.cloned);
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {_id: 0}, null, coll.getFullName(), st.shard1.shardName);
-
- waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
- waitForMigrateStep(recipient, migrateStepNames.cloned);
- // Recipient has run _recvChunkStart and begun its migration thread; docs have
- // been cloned and chunk [0, 20) is noted as 'pending' on recipient.
-
- // Donor: [minKey, 0) [0, 20)
- // Recipient (pending): [0, 20)
- // Recipient: [20, maxKey)
-
- // Create orphans. I'll show an orphaned doc on donor with _id 26 like {26}:
- //
- // Donor: [minKey, 0) [0, 20) {26}
- // Recipient (pending): [0, 20)
- // Recipient: {-1} [20, maxKey)
- donorColl.insert([{_id: 26}]);
- assert.eq(null, donorColl.getDB().getLastError());
- assert.eq(21, donorColl.count());
- recipientColl.insert([{_id: -1}]);
- assert.eq(null, recipientColl.getDB().getLastError());
- assert.eq(21, recipientColl.count());
-
- cleanupOrphaned(donor, ns, 2);
- assert.eq(20, donorColl.count());
- cleanupOrphaned(recipient, ns, 2);
- assert.eq(20, recipientColl.count());
-
- jsTest.log('Inserting document on donor side');
- // Inserted a new document (not an orphan) with id 19, which belongs in the
- // [0, 20) chunk.
- donorColl.insert({_id: 19});
- assert.eq(null, coll.getDB().getLastError());
- assert.eq(21, donorColl.count());
-
- // Recipient transfers this modification.
- jsTest.log('Let migrate proceed to transferredMods');
- proceedToMigrateStep(recipient, migrateStepNames.catchup);
- jsTest.log('Done letting migrate proceed to transferredMods');
-
- assert.eq(21, recipientColl.count(), "Recipient didn't transfer inserted document.");
-
- cleanupOrphaned(donor, ns, 2);
- assert.eq(21, donorColl.count());
- cleanupOrphaned(recipient, ns, 2);
- assert.eq(21, recipientColl.count());
-
- // Create orphans.
- donorColl.insert([{_id: 26}]);
- assert.eq(null, donorColl.getDB().getLastError());
- assert.eq(22, donorColl.count());
- recipientColl.insert([{_id: -1}]);
- assert.eq(null, recipientColl.getDB().getLastError());
- assert.eq(22, recipientColl.count());
-
- cleanupOrphaned(donor, ns, 2);
- assert.eq(21, donorColl.count());
- cleanupOrphaned(recipient, ns, 2);
- assert.eq(21, recipientColl.count());
-
- // Recipient has been waiting for donor to call _recvChunkCommit.
- pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
- unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
- proceedToMigrateStep(recipient, migrateStepNames.steady);
- proceedToMigrateStep(recipient, migrateStepNames.done);
-
- // Create orphans.
- donorColl.insert([{_id: 26}]);
- assert.eq(null, donorColl.getDB().getLastError());
- assert.eq(22, donorColl.count());
- recipientColl.insert([{_id: -1}]);
- assert.eq(null, recipientColl.getDB().getLastError());
- assert.eq(22, recipientColl.count());
-
- cleanupOrphaned(donor, ns, 2);
- assert.eq(21, donorColl.count());
- cleanupOrphaned(recipient, ns, 2);
- assert.eq(21, recipientColl.count());
-
- // Let recipient side of the migration finish so that the donor can proceed with the commit.
- unpauseMigrateAtStep(recipient, migrateStepNames.done);
- waitForMoveChunkStep(donor, moveChunkStepNames.committed);
-
- // Donor is paused after the migration chunk commit, but before it finishes the cleanup that
- // includes running the range deleter. Thus it technically has orphaned data -- commit is
- // complete, but moved data is still present. cleanupOrphaned can remove the data the donor
- // would otherwise clean up itself in its post-move delete phase.
- cleanupOrphaned(donor, ns, 2);
- assert.eq(10, donorColl.count());
-
- // Let the donor migration finish.
- unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
- joinMoveChunk();
-
- // Donor has finished post-move delete, which had nothing to remove with the range deleter
- // because of the preemptive cleanupOrphaned call.
- assert.eq(10, donorColl.count());
- assert.eq(21, recipientColl.count());
- assert.eq(31, coll.count());
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+"use strict";
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+var st = new ShardingTest({shards: 2, other: {separateConfig: true}});
+
+var mongos = st.s0, admin = mongos.getDB('admin'), dbName = 'foo', ns = dbName + '.bar',
+ coll = mongos.getCollection(ns), donor = st.shard0, recipient = st.shard1,
+ donorColl = donor.getCollection(ns), recipientColl = st.shard1.getCollection(ns);
+
+// Three chunks of 10 documents each, with ids -20, -18, -16, ..., 38.
+// Donor: [minKey, 0) [0, 20)
+// Recipient: [20, maxKey)
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+printjson(admin.runCommand({movePrimary: dbName, to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 20}}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 20}, to: st.shard1.shardName, _waitForDelete: true}));
+
+jsTest.log('Inserting 20 docs into shard 0....');
+for (var i = -20; i < 20; i += 2) {
+ coll.insert({_id: i});
+}
+assert.eq(null, coll.getDB().getLastError());
+assert.eq(20, donorColl.count());
+
+jsTest.log('Inserting 10 docs into shard 1....');
+for (i = 20; i < 40; i += 2) {
+ coll.insert({_id: i});
+}
+assert.eq(null, coll.getDB().getLastError());
+assert.eq(10, recipientColl.count());
+//
+// Start a moveChunk in the background. Move chunk [0, 20), which has 10 docs,
+// from shard 0 to shard 1. Pause it at some points in the donor's and
+// recipient's work flows, and test cleanupOrphaned on shard 0 and shard 1.
+//
+
+jsTest.log('setting failpoint startedMoveChunk (donor) and cloned (recipient)');
+pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+pauseMigrateAtStep(recipient, migrateStepNames.cloned);
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {_id: 0}, null, coll.getFullName(), st.shard1.shardName);
+
+waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
+waitForMigrateStep(recipient, migrateStepNames.cloned);
+// Recipient has run _recvChunkStart and begun its migration thread; docs have
+// been cloned and chunk [0, 20) is noted as 'pending' on recipient.
+
+// Donor: [minKey, 0) [0, 20)
+// Recipient (pending): [0, 20)
+// Recipient: [20, maxKey)
+
+// Create orphans. I'll show an orphaned doc on donor with _id 26 like {26}:
+//
+// Donor: [minKey, 0) [0, 20) {26}
+// Recipient (pending): [0, 20)
+// Recipient: {-1} [20, maxKey)
+donorColl.insert([{_id: 26}]);
+assert.eq(null, donorColl.getDB().getLastError());
+assert.eq(21, donorColl.count());
+recipientColl.insert([{_id: -1}]);
+assert.eq(null, recipientColl.getDB().getLastError());
+assert.eq(21, recipientColl.count());
+
+cleanupOrphaned(donor, ns, 2);
+assert.eq(20, donorColl.count());
+cleanupOrphaned(recipient, ns, 2);
+assert.eq(20, recipientColl.count());
+
+jsTest.log('Inserting document on donor side');
+// Inserted a new document (not an orphan) with id 19, which belongs in the
+// [0, 20) chunk.
+donorColl.insert({_id: 19});
+assert.eq(null, coll.getDB().getLastError());
+assert.eq(21, donorColl.count());
+
+// Recipient transfers this modification.
+jsTest.log('Let migrate proceed to transferredMods');
+proceedToMigrateStep(recipient, migrateStepNames.catchup);
+jsTest.log('Done letting migrate proceed to transferredMods');
+
+assert.eq(21, recipientColl.count(), "Recipient didn't transfer inserted document.");
+
+cleanupOrphaned(donor, ns, 2);
+assert.eq(21, donorColl.count());
+cleanupOrphaned(recipient, ns, 2);
+assert.eq(21, recipientColl.count());
+
+// Create orphans.
+donorColl.insert([{_id: 26}]);
+assert.eq(null, donorColl.getDB().getLastError());
+assert.eq(22, donorColl.count());
+recipientColl.insert([{_id: -1}]);
+assert.eq(null, recipientColl.getDB().getLastError());
+assert.eq(22, recipientColl.count());
+
+cleanupOrphaned(donor, ns, 2);
+assert.eq(21, donorColl.count());
+cleanupOrphaned(recipient, ns, 2);
+assert.eq(21, recipientColl.count());
+
+// Recipient has been waiting for donor to call _recvChunkCommit.
+pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+proceedToMigrateStep(recipient, migrateStepNames.steady);
+proceedToMigrateStep(recipient, migrateStepNames.done);
+
+// Create orphans.
+donorColl.insert([{_id: 26}]);
+assert.eq(null, donorColl.getDB().getLastError());
+assert.eq(22, donorColl.count());
+recipientColl.insert([{_id: -1}]);
+assert.eq(null, recipientColl.getDB().getLastError());
+assert.eq(22, recipientColl.count());
+
+cleanupOrphaned(donor, ns, 2);
+assert.eq(21, donorColl.count());
+cleanupOrphaned(recipient, ns, 2);
+assert.eq(21, recipientColl.count());
+
+// Let recipient side of the migration finish so that the donor can proceed with the commit.
+unpauseMigrateAtStep(recipient, migrateStepNames.done);
+waitForMoveChunkStep(donor, moveChunkStepNames.committed);
+
+// Donor is paused after the migration chunk commit, but before it finishes the cleanup that
+// includes running the range deleter. Thus it technically has orphaned data -- commit is
+// complete, but moved data is still present. cleanupOrphaned can remove the data the donor
+// would otherwise clean up itself in its post-move delete phase.
+cleanupOrphaned(donor, ns, 2);
+assert.eq(10, donorColl.count());
+
+// Let the donor migration finish.
+unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+joinMoveChunk();
+
+// Donor has finished post-move delete, which had nothing to remove with the range deleter
+// because of the preemptive cleanupOrphaned call.
+assert.eq(10, donorColl.count());
+assert.eq(21, recipientColl.count());
+assert.eq(31, coll.count());
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
index 8fb97e4aa1a..34a5f8d89fc 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
@@ -9,121 +9,119 @@ load('./jstests/libs/chunk_manipulation_util.js');
load('./jstests/libs/cleanup_orphaned_util.js');
(function() {
- "use strict";
+"use strict";
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
- var st = new ShardingTest({shards: 2, other: {separateConfig: true}});
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+var st = new ShardingTest({shards: 2, other: {separateConfig: true}});
- var mongos = st.s0, admin = mongos.getDB('admin'), dbName = 'foo', ns = dbName + '.bar',
- coll = mongos.getCollection(ns);
+var mongos = st.s0, admin = mongos.getDB('admin'), dbName = 'foo', ns = dbName + '.bar',
+ coll = mongos.getCollection(ns);
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- printjson(admin.runCommand({movePrimary: dbName, to: st.shard0.shardName}));
- assert.commandWorked(admin.runCommand({shardCollection: ns, key: {key: 'hashed'}}));
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+printjson(admin.runCommand({movePrimary: dbName, to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: ns, key: {key: 'hashed'}}));
- // Makes four chunks by default, two on each shard.
- var chunks = st.config.chunks.find({ns: ns}).sort({min: 1}).toArray();
- assert.eq(4, chunks.length);
+// Makes four chunks by default, two on each shard.
+var chunks = st.config.chunks.find({ns: ns}).sort({min: 1}).toArray();
+assert.eq(4, chunks.length);
- var chunkWithDoc = chunks[1];
- print('Trying to make doc that hashes to this chunk: ' + tojson(chunkWithDoc));
+var chunkWithDoc = chunks[1];
+print('Trying to make doc that hashes to this chunk: ' + tojson(chunkWithDoc));
- var found = false;
- for (var i = 0; i < 10000; i++) {
- var doc = {key: ObjectId()}, hash = mongos.adminCommand({_hashBSONElement: doc.key}).out;
+var found = false;
+for (var i = 0; i < 10000; i++) {
+ var doc = {key: ObjectId()}, hash = mongos.adminCommand({_hashBSONElement: doc.key}).out;
- print('doc.key ' + doc.key + ' hashes to ' + hash);
+ print('doc.key ' + doc.key + ' hashes to ' + hash);
- if (mongos.getCollection('config.chunks')
- .findOne(
- {_id: chunkWithDoc._id, 'min.key': {$lte: hash}, 'max.key': {$gt: hash}})) {
- found = true;
- break;
- }
+ if (mongos.getCollection('config.chunks')
+ .findOne({_id: chunkWithDoc._id, 'min.key': {$lte: hash}, 'max.key': {$gt: hash}})) {
+ found = true;
+ break;
}
+}
- assert(found, "Couldn't make doc that belongs to chunk 1.");
- print('Doc: ' + tojson(doc));
- coll.insert(doc);
- assert.eq(null, coll.getDB().getLastError());
-
- //
- // Start a moveChunk in the background from shard 0 to shard 1. Pause it at
- // some points in the donor's and recipient's work flows, and test
- // cleanupOrphaned.
- //
-
- var donor, recip;
- if (chunkWithDoc.shard == st.shard0.shardName) {
- donor = st.shard0;
- recip = st.shard1;
- } else {
- recip = st.shard0;
- donor = st.shard1;
- }
+assert(found, "Couldn't make doc that belongs to chunk 1.");
+print('Doc: ' + tojson(doc));
+coll.insert(doc);
+assert.eq(null, coll.getDB().getLastError());
- jsTest.log('setting failpoint startedMoveChunk (donor) and cloned (recipient)');
- pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
- pauseMigrateAtStep(recip, migrateStepNames.cloned);
-
- var joinMoveChunk = moveChunkParallel(staticMongod,
- st.s0.host,
- null,
- [chunkWithDoc.min, chunkWithDoc.max], // bounds
- coll.getFullName(),
- recip.shardName);
-
- waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
- waitForMigrateStep(recip, migrateStepNames.cloned);
- proceedToMigrateStep(recip, migrateStepNames.catchup);
- // recipient has run _recvChunkStart and begun its migration thread;
- // 'doc' has been cloned and chunkWithDoc is noted as 'pending' on recipient.
-
- var donorColl = donor.getCollection(ns), recipColl = recip.getCollection(ns);
-
- assert.eq(1, donorColl.count());
- assert.eq(1, recipColl.count());
-
- // cleanupOrphaned should go through two iterations, since the default chunk
- // setup leaves two unowned ranges on each shard.
- cleanupOrphaned(donor, ns, 2);
- cleanupOrphaned(recip, ns, 2);
- assert.eq(1, donorColl.count());
- assert.eq(1, recipColl.count());
-
- // recip has been waiting for donor to call _recvChunkCommit.
- pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
- unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
- proceedToMigrateStep(recip, migrateStepNames.steady);
- proceedToMigrateStep(recip, migrateStepNames.done);
-
- cleanupOrphaned(donor, ns, 2);
- assert.eq(1, donorColl.count());
- cleanupOrphaned(recip, ns, 2);
- assert.eq(1, recipColl.count());
-
- // Let recip side of the migration finish so that the donor proceeds with the commit.
- unpauseMigrateAtStep(recip, migrateStepNames.done);
- waitForMoveChunkStep(donor, moveChunkStepNames.committed);
-
- // Donor is paused after the migration chunk commit, but before it finishes the cleanup that
- // includes running the range deleter. Thus it technically has orphaned data -- commit is
- // complete, but moved data is still present. cleanupOrphaned can remove the data the donor
- // would otherwise clean up itself in its post-move delete phase.
- cleanupOrphaned(donor, ns, 2);
- assert.eq(0, donorColl.count());
-
- // Let migration thread complete.
- unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
- joinMoveChunk();
-
- // donor has finished post-move delete, which had nothing to remove with the range deleter
- // because of the preemptive cleanupOrphaned call.
- assert.eq(0, donorColl.count());
- assert.eq(1, recipColl.count());
- assert.eq(1, coll.count());
-
- MongoRunner.stopMongod(staticMongod);
- st.stop();
+//
+// Start a moveChunk in the background from shard 0 to shard 1. Pause it at
+// some points in the donor's and recipient's work flows, and test
+// cleanupOrphaned.
+//
+var donor, recip;
+if (chunkWithDoc.shard == st.shard0.shardName) {
+ donor = st.shard0;
+ recip = st.shard1;
+} else {
+ recip = st.shard0;
+ donor = st.shard1;
+}
+
+jsTest.log('setting failpoint startedMoveChunk (donor) and cloned (recipient)');
+pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+pauseMigrateAtStep(recip, migrateStepNames.cloned);
+
+var joinMoveChunk = moveChunkParallel(staticMongod,
+ st.s0.host,
+ null,
+ [chunkWithDoc.min, chunkWithDoc.max], // bounds
+ coll.getFullName(),
+ recip.shardName);
+
+waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
+waitForMigrateStep(recip, migrateStepNames.cloned);
+proceedToMigrateStep(recip, migrateStepNames.catchup);
+// recipient has run _recvChunkStart and begun its migration thread;
+// 'doc' has been cloned and chunkWithDoc is noted as 'pending' on recipient.
+
+var donorColl = donor.getCollection(ns), recipColl = recip.getCollection(ns);
+
+assert.eq(1, donorColl.count());
+assert.eq(1, recipColl.count());
+
+// cleanupOrphaned should go through two iterations, since the default chunk
+// setup leaves two unowned ranges on each shard.
+cleanupOrphaned(donor, ns, 2);
+cleanupOrphaned(recip, ns, 2);
+assert.eq(1, donorColl.count());
+assert.eq(1, recipColl.count());
+
+// recip has been waiting for donor to call _recvChunkCommit.
+pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+proceedToMigrateStep(recip, migrateStepNames.steady);
+proceedToMigrateStep(recip, migrateStepNames.done);
+
+cleanupOrphaned(donor, ns, 2);
+assert.eq(1, donorColl.count());
+cleanupOrphaned(recip, ns, 2);
+assert.eq(1, recipColl.count());
+
+// Let recip side of the migration finish so that the donor proceeds with the commit.
+unpauseMigrateAtStep(recip, migrateStepNames.done);
+waitForMoveChunkStep(donor, moveChunkStepNames.committed);
+
+// Donor is paused after the migration chunk commit, but before it finishes the cleanup that
+// includes running the range deleter. Thus it technically has orphaned data -- commit is
+// complete, but moved data is still present. cleanupOrphaned can remove the data the donor
+// would otherwise clean up itself in its post-move delete phase.
+cleanupOrphaned(donor, ns, 2);
+assert.eq(0, donorColl.count());
+
+// Let migration thread complete.
+unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+joinMoveChunk();
+
+// donor has finished post-move delete, which had nothing to remove with the range deleter
+// because of the preemptive cleanupOrphaned call.
+assert.eq(0, donorColl.count());
+assert.eq(1, recipColl.count());
+assert.eq(1, coll.count());
+
+MongoRunner.stopMongod(staticMongod);
+st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
index a4f9cfb25eb..bf996dda39b 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_hashed.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
@@ -3,76 +3,75 @@
//
(function() {
- "use strict";
-
- var st = new ShardingTest({shards: 2, mongos: 1});
-
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: "hashed"}}));
-
- // Create two orphaned data holes, one bounded by min or max on each shard
-
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(-100)}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(-50)}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(50)}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(100)}}));
- assert.commandWorked(admin.runCommand({
- moveChunk: coll + "",
- bounds: [{_id: NumberLong(-100)}, {_id: NumberLong(-50)}],
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
- assert.commandWorked(admin.runCommand({
- moveChunk: coll + "",
- bounds: [{_id: NumberLong(50)}, {_id: NumberLong(100)}],
- to: st.shard0.shardName,
- _waitForDelete: true
- }));
- st.printShardingStatus();
-
- jsTest.log("Inserting some docs on each shard, so 1/2 will be orphaned...");
-
- for (var s = 0; s < 2; s++) {
- var shardColl = (s == 0 ? st.shard0 : st.shard1).getCollection(coll + "");
- var bulk = shardColl.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++)
- bulk.insert({_id: i});
- assert.writeOK(bulk.execute());
- }
-
- assert.eq(200,
- st.shard0.getCollection(coll + "").find().itcount() +
- st.shard1.getCollection(coll + "").find().itcount());
- assert.eq(100, coll.find().itcount());
-
- jsTest.log("Cleaning up orphaned data in hashed coll...");
-
- for (var s = 0; s < 2; s++) {
- var shardAdmin = (s == 0 ? st.shard0 : st.shard1).getDB("admin");
-
- var result = shardAdmin.runCommand({cleanupOrphaned: coll + ""});
- while (result.ok && result.stoppedAtKey) {
- printjson(result);
- result = shardAdmin.runCommand(
- {cleanupOrphaned: coll + "", startingFromKey: result.stoppedAtKey});
- }
-
+"use strict";
+
+var st = new ShardingTest({shards: 2, mongos: 1});
+
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: "hashed"}}));
+
+// Create two orphaned data holes, one bounded by min or max on each shard
+
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(-100)}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(-50)}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(50)}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(100)}}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: coll + "",
+ bounds: [{_id: NumberLong(-100)}, {_id: NumberLong(-50)}],
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: coll + "",
+ bounds: [{_id: NumberLong(50)}, {_id: NumberLong(100)}],
+ to: st.shard0.shardName,
+ _waitForDelete: true
+}));
+st.printShardingStatus();
+
+jsTest.log("Inserting some docs on each shard, so 1/2 will be orphaned...");
+
+for (var s = 0; s < 2; s++) {
+ var shardColl = (s == 0 ? st.shard0 : st.shard1).getCollection(coll + "");
+ var bulk = shardColl.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; i++)
+ bulk.insert({_id: i});
+ assert.writeOK(bulk.execute());
+}
+
+assert.eq(200,
+ st.shard0.getCollection(coll + "").find().itcount() +
+ st.shard1.getCollection(coll + "").find().itcount());
+assert.eq(100, coll.find().itcount());
+
+jsTest.log("Cleaning up orphaned data in hashed coll...");
+
+for (var s = 0; s < 2; s++) {
+ var shardAdmin = (s == 0 ? st.shard0 : st.shard1).getDB("admin");
+
+ var result = shardAdmin.runCommand({cleanupOrphaned: coll + ""});
+ while (result.ok && result.stoppedAtKey) {
printjson(result);
- assert(result.ok);
+ result = shardAdmin.runCommand(
+ {cleanupOrphaned: coll + "", startingFromKey: result.stoppedAtKey});
}
- assert.eq(100,
- st.shard0.getCollection(coll + "").find().itcount() +
- st.shard1.getCollection(coll + "").find().itcount());
- assert.eq(100, coll.find().itcount());
+ printjson(result);
+ assert(result.ok);
+}
- jsTest.log("DONE!");
+assert.eq(100,
+ st.shard0.getCollection(coll + "").find().itcount() +
+ st.shard1.getCollection(coll + "").find().itcount());
+assert.eq(100, coll.find().itcount());
- st.stop();
+jsTest.log("DONE!");
+st.stop();
})();
diff --git a/jstests/sharding/clone_catalog_data.js b/jstests/sharding/clone_catalog_data.js
index c2d25fc9d1f..fae12fbcd56 100644
--- a/jstests/sharding/clone_catalog_data.js
+++ b/jstests/sharding/clone_catalog_data.js
@@ -4,7 +4,6 @@
// Eventually, _movePrimary will use this command.
(() => {
-
function sortByName(a, b) {
if (a.name < b.name)
return -1;
@@ -34,8 +33,8 @@
// Create indexes on each collection.
var coll1Indexes =
[
- {key: {a: 1}, name: 'index1', expireAfterSeconds: 5000},
- {key: {b: -1}, name: 'index2', unique: true},
+ {key: {a: 1}, name: 'index1', expireAfterSeconds: 5000},
+ {key: {b: -1}, name: 'index2', unique: true},
],
coll2Indexes = [
{key: {a: 1, b: 1}, name: 'index3'},
diff --git a/jstests/sharding/coll_epoch_test0.js b/jstests/sharding/coll_epoch_test0.js
index a8745cd3110..ba87929cb4e 100644
--- a/jstests/sharding/coll_epoch_test0.js
+++ b/jstests/sharding/coll_epoch_test0.js
@@ -22,7 +22,6 @@ config.shards.find().forEach(function(doc) {
var createdEpoch = null;
var checkEpochs = function() {
config.chunks.find({ns: coll + ""}).forEach(function(chunk) {
-
// Make sure the epochs exist, are non-zero, and are consistent
assert(chunk.lastmodEpoch);
print(chunk.lastmodEpoch + "");
@@ -31,7 +30,6 @@ var checkEpochs = function() {
createdEpoch = chunk.lastmodEpoch;
else
assert.eq(createdEpoch, chunk.lastmodEpoch);
-
});
};
diff --git a/jstests/sharding/coll_epoch_test1.js b/jstests/sharding/coll_epoch_test1.js
index 5e243b8fff4..d995ee19ab6 100644
--- a/jstests/sharding/coll_epoch_test1.js
+++ b/jstests/sharding/coll_epoch_test1.js
@@ -1,83 +1,83 @@
// Tests various cases of dropping and recreating collections in the same namespace with multiple
// mongoses
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 3, mongos: 3, causallyConsistent: true});
+var st = new ShardingTest({shards: 3, mongos: 3, causallyConsistent: true});
- var config = st.s0.getDB("config");
- var admin = st.s0.getDB("admin");
- var coll = st.s0.getCollection("foo.bar");
+var config = st.s0.getDB("config");
+var admin = st.s0.getDB("admin");
+var coll = st.s0.getCollection("foo.bar");
- // Use separate mongoses for admin, inserting data, and validating results, so no single-mongos
- // tricks will work
- var staleMongos = st.s1;
- var insertMongos = st.s2;
+// Use separate mongoses for admin, inserting data, and validating results, so no single-mongos
+// tricks will work
+var staleMongos = st.s1;
+var insertMongos = st.s2;
- var shards = [st.shard0, st.shard1, st.shard2];
+var shards = [st.shard0, st.shard1, st.shard2];
- //
- // Test that inserts and queries go to the correct shard even when the collection has been
- // sharded from another mongos
- //
+//
+// Test that inserts and queries go to the correct shard even when the collection has been
+// sharded from another mongos
+//
- jsTest.log("Enabling sharding for the first time...");
+jsTest.log("Enabling sharding for the first time...");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- // TODO(PM-85): Make sure we *always* move the primary after collection lifecyle project is
- // complete
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- st.configRS.awaitLastOpCommitted(); // TODO: Remove after collection lifecyle project (PM-85)
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+// TODO(PM-85): Make sure we *always* move the primary after collection lifecyle project is
+// complete
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+st.configRS.awaitLastOpCommitted(); // TODO: Remove after collection lifecyle project (PM-85)
- var bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({_id: i, test: "a"});
- }
- assert.writeOK(bulk.execute());
- assert.eq(100, staleMongos.getCollection(coll + "").find({test: "a"}).itcount());
+var bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({_id: i, test: "a"});
+}
+assert.writeOK(bulk.execute());
+assert.eq(100, staleMongos.getCollection(coll + "").find({test: "a"}).itcount());
- assert(coll.drop());
- st.configRS.awaitLastOpCommitted();
+assert(coll.drop());
+st.configRS.awaitLastOpCommitted();
- //
- // Test that inserts and queries go to the correct shard even when the collection has been
- // resharded from another mongos, with a different key
- //
+//
+// Test that inserts and queries go to the correct shard even when the collection has been
+// resharded from another mongos, with a different key
+//
- jsTest.log("Re-enabling sharding with a different key...");
+jsTest.log("Re-enabling sharding with a different key...");
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
- assert.commandWorked(coll.ensureIndex({notId: 1}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {notId: 1}}));
- st.configRS.awaitLastOpCommitted();
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
+assert.commandWorked(coll.ensureIndex({notId: 1}));
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {notId: 1}}));
+st.configRS.awaitLastOpCommitted();
- bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({notId: i, test: "b"});
- }
- assert.writeOK(bulk.execute());
- assert.eq(100, staleMongos.getCollection(coll + "").find({test: "b"}).itcount());
- assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a"]}}).itcount());
+bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({notId: i, test: "b"});
+}
+assert.writeOK(bulk.execute());
+assert.eq(100, staleMongos.getCollection(coll + "").find({test: "b"}).itcount());
+assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a"]}}).itcount());
- assert(coll.drop());
- st.configRS.awaitLastOpCommitted();
+assert(coll.drop());
+st.configRS.awaitLastOpCommitted();
- //
- // Test that inserts and queries go to the correct shard even when the collection has been
- // unsharded from another mongos
- //
+//
+// Test that inserts and queries go to the correct shard even when the collection has been
+// unsharded from another mongos
+//
- jsTest.log("Re-creating unsharded collection from a sharded collection...");
+jsTest.log("Re-creating unsharded collection from a sharded collection...");
- bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({test: "c"});
- }
- assert.writeOK(bulk.execute());
+bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({test: "c"});
+}
+assert.writeOK(bulk.execute());
- assert.eq(100, staleMongos.getCollection(coll + "").find({test: "c"}).itcount());
- assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a", "b"]}}).itcount());
+assert.eq(100, staleMongos.getCollection(coll + "").find({test: "c"}).itcount());
+assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a", "b"]}}).itcount());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/collation_lookup.js b/jstests/sharding/collation_lookup.js
index 6e202b069c6..f9388cf9aa3 100644
--- a/jstests/sharding/collation_lookup.js
+++ b/jstests/sharding/collation_lookup.js
@@ -7,15 +7,15 @@
* collection the "aggregate" command was performed on.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // for arrayEq
- load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
- load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
+load("jstests/aggregation/extras/utils.js"); // for arrayEq
+load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
+load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
- function runTests(withDefaultCollationColl, withoutDefaultCollationColl, collation) {
- // Test that the $lookup stage respects the inherited collation.
- let res = withDefaultCollationColl
+function runTests(withDefaultCollationColl, withoutDefaultCollationColl, collation) {
+ // Test that the $lookup stage respects the inherited collation.
+ let res = withDefaultCollationColl
.aggregate([{
$lookup: {
from: withoutDefaultCollationColl.getName(),
@@ -25,14 +25,14 @@
},
}])
.toArray();
- assert.eq(1, res.length, tojson(res));
+ assert.eq(1, res.length, tojson(res));
- let expected = [{_id: "lowercase", str: "abc"}, {_id: "uppercase", str: "ABC"}];
- assert(arrayEq(expected, res[0].matched),
- "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched) +
- " up to ordering");
+ let expected = [{_id: "lowercase", str: "abc"}, {_id: "uppercase", str: "ABC"}];
+ assert(
+ arrayEq(expected, res[0].matched),
+ "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched) + " up to ordering");
- res = withDefaultCollationColl
+ res = withDefaultCollationColl
.aggregate([{
$lookup: {
from: withoutDefaultCollationColl.getName(),
@@ -52,28 +52,27 @@
},
}])
.toArray();
- assert.eq(1, res.length, tojson(res));
-
- expected = [
- {
- "_id": "lowercase",
- "str": "abc",
- "matched2": [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
- },
- {
- "_id": "uppercase",
- "str": "ABC",
- "matched2":
- [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
- }
- ];
- assert(arrayEq(expected, res[0].matched1),
- "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched1) +
- " up to ordering. " + tojson(res));
-
- // Test that the $lookup stage respects the inherited collation when it optimizes with an
- // $unwind stage.
- res = withDefaultCollationColl
+ assert.eq(1, res.length, tojson(res));
+
+ expected = [
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched2": [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
+ },
+ {
+ "_id": "uppercase",
+ "str": "ABC",
+ "matched2": [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
+ }
+ ];
+ assert(arrayEq(expected, res[0].matched1),
+ "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched1) +
+ " up to ordering. " + tojson(res));
+
+ // Test that the $lookup stage respects the inherited collation when it optimizes with an
+ // $unwind stage.
+ res = withDefaultCollationColl
.aggregate([
{
$lookup: {
@@ -86,16 +85,16 @@
{$unwind: "$matched"},
])
.toArray();
- assert.eq(2, res.length, tojson(res));
+ assert.eq(2, res.length, tojson(res));
- expected = [
- {_id: "lowercase", str: "abc", matched: {_id: "lowercase", str: "abc"}},
- {_id: "lowercase", str: "abc", matched: {_id: "uppercase", str: "ABC"}}
- ];
- assert(arrayEq(expected, res),
- "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
+ expected = [
+ {_id: "lowercase", str: "abc", matched: {_id: "lowercase", str: "abc"}},
+ {_id: "lowercase", str: "abc", matched: {_id: "uppercase", str: "ABC"}}
+ ];
+ assert(arrayEq(expected, res),
+ "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
- res = withDefaultCollationColl
+ res = withDefaultCollationColl
.aggregate([
{
$lookup: {
@@ -119,51 +118,39 @@
{$unwind: "$matched1"},
])
.toArray();
- assert.eq(4, res.length, tojson(res));
-
- expected = [
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "lowercase",
- "str": "abc",
- "matched2": {"_id": "lowercase", "str": "abc"}
- }
- },
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "lowercase",
- "str": "abc",
- "matched2": {"_id": "uppercase", "str": "ABC"}
- }
- },
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "uppercase",
- "str": "ABC",
- "matched2": {"_id": "lowercase", "str": "abc"}
- }
- },
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "uppercase",
- "str": "ABC",
- "matched2": {"_id": "uppercase", "str": "ABC"}
- }
- }
- ];
- assert(arrayEq(expected, res),
- "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
-
- // Test that the $lookup stage respects an explicit collation on the aggregation operation.
- res = withoutDefaultCollationColl
+ assert.eq(4, res.length, tojson(res));
+
+ expected = [
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "lowercase", "str": "abc", "matched2": {"_id": "lowercase", "str": "abc"}}
+ },
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "lowercase", "str": "abc", "matched2": {"_id": "uppercase", "str": "ABC"}}
+ },
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "uppercase", "str": "ABC", "matched2": {"_id": "lowercase", "str": "abc"}}
+ },
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "uppercase", "str": "ABC", "matched2": {"_id": "uppercase", "str": "ABC"}}
+ }
+ ];
+ assert(arrayEq(expected, res),
+ "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
+
+ // Test that the $lookup stage respects an explicit collation on the aggregation operation.
+ res = withoutDefaultCollationColl
.aggregate(
[
{$match: {_id: "lowercase"}},
@@ -178,14 +165,14 @@
],
collation)
.toArray();
- assert.eq(1, res.length, tojson(res));
+ assert.eq(1, res.length, tojson(res));
- expected = [{_id: "lowercase", str: "abc"}, {_id: "uppercase", str: "ABC"}];
- assert(arrayEq(expected, res[0].matched),
- "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched) +
- " up to ordering");
+ expected = [{_id: "lowercase", str: "abc"}, {_id: "uppercase", str: "ABC"}];
+ assert(
+ arrayEq(expected, res[0].matched),
+ "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched) + " up to ordering");
- res = withoutDefaultCollationColl
+ res = withoutDefaultCollationColl
.aggregate(
[
{$match: {_id: "lowercase"}},
@@ -210,29 +197,28 @@
],
collation)
.toArray();
- assert.eq(1, res.length, tojson(res));
-
- expected = [
- {
- "_id": "lowercase",
- "str": "abc",
- "matched2": [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
- },
- {
- "_id": "uppercase",
- "str": "ABC",
- "matched2":
- [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
- }
- ];
- assert(arrayEq(expected, res[0].matched1),
- "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched1) +
- " up to ordering");
-
- // Test that the $lookup stage respects an explicit collation on the aggregation operation
- // when
- // it optimizes with an $unwind stage.
- res = withoutDefaultCollationColl
+ assert.eq(1, res.length, tojson(res));
+
+ expected = [
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched2": [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
+ },
+ {
+ "_id": "uppercase",
+ "str": "ABC",
+ "matched2": [{"_id": "lowercase", "str": "abc"}, {"_id": "uppercase", "str": "ABC"}]
+ }
+ ];
+ assert(arrayEq(expected, res[0].matched1),
+ "Expected " + tojson(expected) + " to equal " + tojson(res[0].matched1) +
+ " up to ordering");
+
+ // Test that the $lookup stage respects an explicit collation on the aggregation operation
+ // when
+ // it optimizes with an $unwind stage.
+ res = withoutDefaultCollationColl
.aggregate(
[
{$match: {_id: "lowercase"}},
@@ -248,16 +234,16 @@
],
collation)
.toArray();
- assert.eq(2, res.length, tojson(res));
+ assert.eq(2, res.length, tojson(res));
- expected = [
- {_id: "lowercase", str: "abc", matched: {_id: "lowercase", str: "abc"}},
- {_id: "lowercase", str: "abc", matched: {_id: "uppercase", str: "ABC"}}
- ];
- assert(arrayEq(expected, res),
- "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
+ expected = [
+ {_id: "lowercase", str: "abc", matched: {_id: "lowercase", str: "abc"}},
+ {_id: "lowercase", str: "abc", matched: {_id: "uppercase", str: "ABC"}}
+ ];
+ assert(arrayEq(expected, res),
+ "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
- res = withoutDefaultCollationColl
+ res = withoutDefaultCollationColl
.aggregate(
[
{$match: {_id: "lowercase"}},
@@ -284,52 +270,40 @@
],
collation)
.toArray();
- assert.eq(4, res.length, tojson(res));
-
- expected = [
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "lowercase",
- "str": "abc",
- "matched2": {"_id": "lowercase", "str": "abc"}
- }
- },
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "lowercase",
- "str": "abc",
- "matched2": {"_id": "uppercase", "str": "ABC"}
- }
- },
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "uppercase",
- "str": "ABC",
- "matched2": {"_id": "lowercase", "str": "abc"}
- }
- },
- {
- "_id": "lowercase",
- "str": "abc",
- "matched1": {
- "_id": "uppercase",
- "str": "ABC",
- "matched2": {"_id": "uppercase", "str": "ABC"}
- }
- }
- ];
- assert(arrayEq(expected, res),
- "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
-
- // Test that the $lookup stage uses the "simple" collation if a collation isn't set on the
- // collection or the aggregation operation.
- res = withoutDefaultCollationColl
+ assert.eq(4, res.length, tojson(res));
+
+ expected = [
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "lowercase", "str": "abc", "matched2": {"_id": "lowercase", "str": "abc"}}
+ },
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "lowercase", "str": "abc", "matched2": {"_id": "uppercase", "str": "ABC"}}
+ },
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "uppercase", "str": "ABC", "matched2": {"_id": "lowercase", "str": "abc"}}
+ },
+ {
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ {"_id": "uppercase", "str": "ABC", "matched2": {"_id": "uppercase", "str": "ABC"}}
+ }
+ ];
+ assert(arrayEq(expected, res),
+ "Expected " + tojson(expected) + " to equal " + tojson(res) + " up to ordering");
+
+ // Test that the $lookup stage uses the "simple" collation if a collation isn't set on the
+ // collection or the aggregation operation.
+ res = withoutDefaultCollationColl
.aggregate([
{$match: {_id: "lowercase"}},
{
@@ -342,9 +316,9 @@
},
])
.toArray();
- assert.eq([{_id: "lowercase", str: "abc", matched: [{_id: "lowercase", str: "abc"}]}], res);
+ assert.eq([{_id: "lowercase", str: "abc", matched: [{_id: "lowercase", str: "abc"}]}], res);
- res = withoutDefaultCollationColl
+ res = withoutDefaultCollationColl
.aggregate([
{$match: {_id: "lowercase"}},
{
@@ -368,92 +342,92 @@
},
])
.toArray();
- assert.eq([{
- "_id": "lowercase",
- "str": "abc",
- "matched1": [{
- "_id": "lowercase",
- "str": "abc",
- "matched2": {"_id": "lowercase", "str": "abc"}
- }]
- }],
- res);
- }
-
- const st = new ShardingTest({shards: 2, config: 1});
- setParameterOnAllHosts(
- DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", true);
-
- const testName = "collation_lookup";
- const caseInsensitive = {collation: {locale: "en_US", strength: 2}};
-
- const mongosDB = st.s0.getDB(testName);
- const withDefaultCollationColl = mongosDB[testName + "_with_default"];
- const withoutDefaultCollationColl = mongosDB[testName + "_without_default"];
-
- assert.commandWorked(
- mongosDB.createCollection(withDefaultCollationColl.getName(), caseInsensitive));
- assert.writeOK(withDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
-
- assert.writeOK(withoutDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
- assert.writeOK(withoutDefaultCollationColl.insert({_id: "uppercase", str: "ABC"}));
- assert.writeOK(withoutDefaultCollationColl.insert({_id: "unmatched", str: "def"}));
-
- //
- // Sharded collection with default collation and unsharded collection without a default
- // collation.
- //
- assert.commandWorked(
- withDefaultCollationColl.createIndex({str: 1}, {collation: {locale: "simple"}}));
-
- // Enable sharding on the test DB and ensure its primary is shard0000.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
- // Shard the collection with a default collation.
- assert.commandWorked(mongosDB.adminCommand({
- shardCollection: withDefaultCollationColl.getFullName(),
- key: {str: 1},
- collation: {locale: "simple"}
- }));
-
- // Split the collection into 2 chunks.
- assert.commandWorked(mongosDB.adminCommand(
- {split: withDefaultCollationColl.getFullName(), middle: {str: "abc"}}));
-
- // Move the chunk containing {str: "abc"} to shard0001.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: withDefaultCollationColl.getFullName(),
- find: {str: "abc"},
- to: st.shard1.shardName
- }));
-
- runTests(withDefaultCollationColl, withoutDefaultCollationColl, caseInsensitive);
-
- // TODO: Enable the following tests once SERVER-32536 is fixed.
- //
- // Sharded collection with default collation and sharded collection without a default
- // collation.
- //
-
- // Shard the collection without a default collation.
- // assert.commandWorked(mongosDB.adminCommand({
- // shardCollection: withoutDefaultCollationColl.getFullName(),
- // key: {_id: 1},
- // }));
-
- // // Split the collection into 2 chunks.
- // assert.commandWorked(mongosDB.adminCommand(
- // {split: withoutDefaultCollationColl.getFullName(), middle: {_id: "unmatched"}}));
-
- // // Move the chunk containing {_id: "lowercase"} to shard0001.
- // assert.commandWorked(mongosDB.adminCommand({
- // moveChunk: withoutDefaultCollationColl.getFullName(),
- // find: {_id: "lowercase"},
- // to: st.shard1.shardName
- // }));
-
- // runTests(withDefaultCollationColl, withoutDefaultCollationColl, caseInsensitive);
-
- st.stop();
+ assert.eq(
+ [{
+ "_id": "lowercase",
+ "str": "abc",
+ "matched1":
+ [{"_id": "lowercase", "str": "abc", "matched2": {"_id": "lowercase", "str": "abc"}}]
+ }],
+ res);
+}
+
+const st = new ShardingTest({shards: 2, config: 1});
+setParameterOnAllHosts(
+ DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", true);
+
+const testName = "collation_lookup";
+const caseInsensitive = {
+ collation: {locale: "en_US", strength: 2}
+};
+
+const mongosDB = st.s0.getDB(testName);
+const withDefaultCollationColl = mongosDB[testName + "_with_default"];
+const withoutDefaultCollationColl = mongosDB[testName + "_without_default"];
+
+assert.commandWorked(
+ mongosDB.createCollection(withDefaultCollationColl.getName(), caseInsensitive));
+assert.writeOK(withDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
+
+assert.writeOK(withoutDefaultCollationColl.insert({_id: "lowercase", str: "abc"}));
+assert.writeOK(withoutDefaultCollationColl.insert({_id: "uppercase", str: "ABC"}));
+assert.writeOK(withoutDefaultCollationColl.insert({_id: "unmatched", str: "def"}));
+
+//
+// Sharded collection with default collation and unsharded collection without a default
+// collation.
+//
+assert.commandWorked(
+ withDefaultCollationColl.createIndex({str: 1}, {collation: {locale: "simple"}}));
+
+// Enable sharding on the test DB and ensure its primary is shard0000.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+// Shard the collection with a default collation.
+assert.commandWorked(mongosDB.adminCommand({
+ shardCollection: withDefaultCollationColl.getFullName(),
+ key: {str: 1},
+ collation: {locale: "simple"}
+}));
+
+// Split the collection into 2 chunks.
+assert.commandWorked(
+ mongosDB.adminCommand({split: withDefaultCollationColl.getFullName(), middle: {str: "abc"}}));
+
+// Move the chunk containing {str: "abc"} to shard0001.
+assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: withDefaultCollationColl.getFullName(),
+ find: {str: "abc"},
+ to: st.shard1.shardName
+}));
+
+runTests(withDefaultCollationColl, withoutDefaultCollationColl, caseInsensitive);
+
+// TODO: Enable the following tests once SERVER-32536 is fixed.
+//
+// Sharded collection with default collation and sharded collection without a default
+// collation.
+//
+
+// Shard the collection without a default collation.
+// assert.commandWorked(mongosDB.adminCommand({
+// shardCollection: withoutDefaultCollationColl.getFullName(),
+// key: {_id: 1},
+// }));
+
+// // Split the collection into 2 chunks.
+// assert.commandWorked(mongosDB.adminCommand(
+// {split: withoutDefaultCollationColl.getFullName(), middle: {_id: "unmatched"}}));
+
+// // Move the chunk containing {_id: "lowercase"} to shard0001.
+// assert.commandWorked(mongosDB.adminCommand({
+// moveChunk: withoutDefaultCollationColl.getFullName(),
+// find: {_id: "lowercase"},
+// to: st.shard1.shardName
+// }));
+
+// runTests(withDefaultCollationColl, withoutDefaultCollationColl, caseInsensitive);
+
+st.stop();
})();
diff --git a/jstests/sharding/collation_targeting.js b/jstests/sharding/collation_targeting.js
index fc2b9c193eb..c58396eaa80 100644
--- a/jstests/sharding/collation_targeting.js
+++ b/jstests/sharding/collation_targeting.js
@@ -1,462 +1,465 @@
// Test shard targeting for queries with collation.
(function() {
- "use strict";
-
- const caseInsensitive = {locale: "en_US", strength: 2};
-
- var explain;
- var writeRes;
-
- // Create a cluster with 3 shards.
- var st = new ShardingTest({shards: 3});
- var testDB = st.s.getDB("test");
- assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
-
- // Create a collection sharded on {a: 1}. Add 2dsphere index to test $geoNear.
- var coll = testDB.getCollection("simple_collation");
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({geo: "2dsphere"}));
- assert.commandWorked(testDB.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
-
- // Split the collection.
- // st.shard0.shardName: { "a" : { "$minKey" : 1 } } -->> { "a" : 10 }
- // st.shard1.shardName: { "a" : 10 } -->> { "a" : "a"}
- // shard0002: { "a" : "a" } -->> { "a" : { "$maxKey" : 1 }}
- assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: 10}}));
- assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: "a"}}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {a: 1}, to: st.shard0.shardName}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {a: "FOO"}, to: st.shard1.shardName}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {a: "foo"}, to: st.shard2.shardName}));
-
- // Put data on each shard.
- // Note that the balancer is off by default, so the chunks will stay put.
- // st.shard0.shardName: {a: 1}
- // st.shard1.shardName: {a: 100}, {a: "FOO"}
- // shard0002: {a: "foo"}
- // Include geo field to test $geoNear.
- var a_1 = {_id: 0, a: 1, geo: {type: "Point", coordinates: [0, 0]}};
- var a_100 = {_id: 1, a: 100, geo: {type: "Point", coordinates: [0, 0]}};
- var a_FOO = {_id: 2, a: "FOO", geo: {type: "Point", coordinates: [0, 0]}};
- var a_foo = {_id: 3, a: "foo", geo: {type: "Point", coordinates: [0, 0]}};
- assert.writeOK(coll.insert(a_1));
- assert.writeOK(coll.insert(a_100));
- assert.writeOK(coll.insert(a_FOO));
- assert.writeOK(coll.insert(a_foo));
-
- // Aggregate.
-
- // Test an aggregate command on strings with a non-simple collation. This should be
- // scatter-gather.
- assert.eq(2, coll.aggregate([{$match: {a: "foo"}}], {collation: caseInsensitive}).itcount());
- explain = coll.explain().aggregate([{$match: {a: "foo"}}], {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(3, Object.keys(explain.shards).length);
-
- // Test an aggregate command with a simple collation. This should be single-shard.
- assert.eq(1, coll.aggregate([{$match: {a: "foo"}}]).itcount());
- explain = coll.explain().aggregate([{$match: {a: "foo"}}]);
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Test an aggregate command on numbers with a non-simple collation. This should be
- // single-shard.
- assert.eq(1, coll.aggregate([{$match: {a: 100}}], {collation: caseInsensitive}).itcount());
- explain = coll.explain().aggregate([{$match: {a: 100}}], {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Aggregate with $geoNear.
- const geoJSONPoint = {type: "Point", coordinates: [0, 0]};
-
- // Test $geoNear with a query on strings with a non-simple collation. This should
- // scatter-gather.
- const geoNearStageStringQuery = [{
- $geoNear: {
- near: geoJSONPoint,
- distanceField: "dist",
- spherical: true,
- query: {a: "foo"},
- }
- }];
- assert.eq(2, coll.aggregate(geoNearStageStringQuery, {collation: caseInsensitive}).itcount());
- explain = coll.explain().aggregate(geoNearStageStringQuery, {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(3, Object.keys(explain.shards).length);
-
- // Test $geoNear with a query on strings with a simple collation. This should be single-shard.
- assert.eq(1, coll.aggregate(geoNearStageStringQuery).itcount());
- explain = coll.explain().aggregate(geoNearStageStringQuery);
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Test a $geoNear with a query on numbers with a non-simple collation. This should be
- // single-shard.
- const geoNearStageNumericalQuery = [{
- $geoNear: {
- near: geoJSONPoint,
- distanceField: "dist",
- spherical: true,
- query: {a: 100},
- }
- }];
- assert.eq(1,
- coll.aggregate(geoNearStageNumericalQuery, {collation: caseInsensitive}).itcount());
- explain = coll.explain().aggregate(geoNearStageNumericalQuery, {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Count.
-
- // Test a count command on strings with a non-simple collation. This should be scatter-gather.
- assert.eq(2, coll.find({a: "foo"}).collation(caseInsensitive).count());
- explain = coll.explain().find({a: "foo"}).collation(caseInsensitive).count();
+"use strict";
+
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
+
+var explain;
+var writeRes;
+
+// Create a cluster with 3 shards.
+var st = new ShardingTest({shards: 3});
+var testDB = st.s.getDB("test");
+assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
+st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
+
+// Create a collection sharded on {a: 1}. Add 2dsphere index to test $geoNear.
+var coll = testDB.getCollection("simple_collation");
+coll.drop();
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({geo: "2dsphere"}));
+assert.commandWorked(testDB.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
+
+// Split the collection.
+// st.shard0.shardName: { "a" : { "$minKey" : 1 } } -->> { "a" : 10 }
+// st.shard1.shardName: { "a" : 10 } -->> { "a" : "a"}
+// shard0002: { "a" : "a" } -->> { "a" : { "$maxKey" : 1 }}
+assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: 10}}));
+assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: "a"}}));
+assert.commandWorked(
+ testDB.adminCommand({moveChunk: coll.getFullName(), find: {a: 1}, to: st.shard0.shardName}));
+assert.commandWorked(testDB.adminCommand(
+ {moveChunk: coll.getFullName(), find: {a: "FOO"}, to: st.shard1.shardName}));
+assert.commandWorked(testDB.adminCommand(
+ {moveChunk: coll.getFullName(), find: {a: "foo"}, to: st.shard2.shardName}));
+
+// Put data on each shard.
+// Note that the balancer is off by default, so the chunks will stay put.
+// st.shard0.shardName: {a: 1}
+// st.shard1.shardName: {a: 100}, {a: "FOO"}
+// shard0002: {a: "foo"}
+// Include geo field to test $geoNear.
+var a_1 = {_id: 0, a: 1, geo: {type: "Point", coordinates: [0, 0]}};
+var a_100 = {_id: 1, a: 100, geo: {type: "Point", coordinates: [0, 0]}};
+var a_FOO = {_id: 2, a: "FOO", geo: {type: "Point", coordinates: [0, 0]}};
+var a_foo = {_id: 3, a: "foo", geo: {type: "Point", coordinates: [0, 0]}};
+assert.writeOK(coll.insert(a_1));
+assert.writeOK(coll.insert(a_100));
+assert.writeOK(coll.insert(a_FOO));
+assert.writeOK(coll.insert(a_foo));
+
+// Aggregate.
+
+// Test an aggregate command on strings with a non-simple collation. This should be
+// scatter-gather.
+assert.eq(2, coll.aggregate([{$match: {a: "foo"}}], {collation: caseInsensitive}).itcount());
+explain = coll.explain().aggregate([{$match: {a: "foo"}}], {collation: caseInsensitive});
+assert.commandWorked(explain);
+assert.eq(3, Object.keys(explain.shards).length);
+
+// Test an aggregate command with a simple collation. This should be single-shard.
+assert.eq(1, coll.aggregate([{$match: {a: "foo"}}]).itcount());
+explain = coll.explain().aggregate([{$match: {a: "foo"}}]);
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Test an aggregate command on numbers with a non-simple collation. This should be
+// single-shard.
+assert.eq(1, coll.aggregate([{$match: {a: 100}}], {collation: caseInsensitive}).itcount());
+explain = coll.explain().aggregate([{$match: {a: 100}}], {collation: caseInsensitive});
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Aggregate with $geoNear.
+const geoJSONPoint = {
+ type: "Point",
+ coordinates: [0, 0]
+};
+
+// Test $geoNear with a query on strings with a non-simple collation. This should
+// scatter-gather.
+const geoNearStageStringQuery = [{
+ $geoNear: {
+ near: geoJSONPoint,
+ distanceField: "dist",
+ spherical: true,
+ query: {a: "foo"},
+ }
+}];
+assert.eq(2, coll.aggregate(geoNearStageStringQuery, {collation: caseInsensitive}).itcount());
+explain = coll.explain().aggregate(geoNearStageStringQuery, {collation: caseInsensitive});
+assert.commandWorked(explain);
+assert.eq(3, Object.keys(explain.shards).length);
+
+// Test $geoNear with a query on strings with a simple collation. This should be single-shard.
+assert.eq(1, coll.aggregate(geoNearStageStringQuery).itcount());
+explain = coll.explain().aggregate(geoNearStageStringQuery);
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Test a $geoNear with a query on numbers with a non-simple collation. This should be
+// single-shard.
+const geoNearStageNumericalQuery = [{
+ $geoNear: {
+ near: geoJSONPoint,
+ distanceField: "dist",
+ spherical: true,
+ query: {a: 100},
+ }
+}];
+assert.eq(1, coll.aggregate(geoNearStageNumericalQuery, {collation: caseInsensitive}).itcount());
+explain = coll.explain().aggregate(geoNearStageNumericalQuery, {collation: caseInsensitive});
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Count.
+
+// Test a count command on strings with a non-simple collation. This should be scatter-gather.
+assert.eq(2, coll.find({a: "foo"}).collation(caseInsensitive).count());
+explain = coll.explain().find({a: "foo"}).collation(caseInsensitive).count();
+assert.commandWorked(explain);
+assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a count command with a simple collation. This should be single-shard.
+assert.eq(1, coll.find({a: "foo"}).count());
+explain = coll.explain().find({a: "foo"}).count();
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a count command on numbers with a non-simple collation. This should be single-shard.
+assert.eq(1, coll.find({a: 100}).collation(caseInsensitive).count());
+explain = coll.explain().find({a: 100}).collation(caseInsensitive).count();
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Distinct.
+
+// Test a distinct command on strings with a non-simple collation. This should be
+// scatter-gather.
+assert.eq(2, coll.distinct("_id", {a: "foo"}, {collation: caseInsensitive}).length);
+explain = coll.explain().distinct("_id", {a: "foo"}, {collation: caseInsensitive});
+assert.commandWorked(explain);
+assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+
+// Test that deduping respects the collation.
+assert.eq(1, coll.distinct("a", {a: "foo"}, {collation: caseInsensitive}).length);
+
+// Test a distinct command with a simple collation. This should be single-shard.
+assert.eq(1, coll.distinct("_id", {a: "foo"}).length);
+explain = coll.explain().distinct("_id", {a: "foo"});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a distinct command on numbers with a non-simple collation. This should be single-shard.
+assert.eq(1, coll.distinct("_id", {a: 100}, {collation: caseInsensitive}).length);
+explain = coll.explain().distinct("_id", {a: 100}, {collation: caseInsensitive});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Find.
+
+// Test a find command on strings with a non-simple collation. This should be scatter-gather.
+if (testDB.getMongo().useReadCommands()) {
+ assert.eq(2, coll.find({a: "foo"}).collation(caseInsensitive).itcount());
+ explain = coll.find({a: "foo"}).collation(caseInsensitive).explain();
assert.commandWorked(explain);
assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a count command with a simple collation. This should be single-shard.
- assert.eq(1, coll.find({a: "foo"}).count());
- explain = coll.explain().find({a: "foo"}).count();
+}
+
+// Test a find command with a simple collation. This should be single-shard.
+assert.eq(1, coll.find({a: "foo"}).itcount());
+explain = coll.find({a: "foo"}).explain();
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a find command on numbers with a non-simple collation. This should be single-shard.
+if (testDB.getMongo().useReadCommands()) {
+ assert.eq(1, coll.find({a: 100}).collation(caseInsensitive).itcount());
+ explain = coll.find({a: 100}).collation(caseInsensitive).explain();
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a count command on numbers with a non-simple collation. This should be single-shard.
- assert.eq(1, coll.find({a: 100}).collation(caseInsensitive).count());
- explain = coll.explain().find({a: 100}).collation(caseInsensitive).count();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Distinct.
-
- // Test a distinct command on strings with a non-simple collation. This should be
- // scatter-gather.
- assert.eq(2, coll.distinct("_id", {a: "foo"}, {collation: caseInsensitive}).length);
- explain = coll.explain().distinct("_id", {a: "foo"}, {collation: caseInsensitive});
+}
+
+// FindAndModify.
+
+// Sharded findAndModify on strings with non-simple collation should fail, because findAndModify
+// must target a single shard.
+assert.throws(function() {
+ coll.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}, collation: caseInsensitive});
+});
+assert.throws(function() {
+ coll.explain().findAndModify(
+ {query: {a: "foo"}, update: {$set: {b: 1}}, collation: caseInsensitive});
+});
+
+// Sharded findAndModify on strings with simple collation should succeed. This should be
+// single-shard.
+assert.eq("foo", coll.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}}).a);
+explain = coll.explain().findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Sharded findAndModify on numbers with non-simple collation should succeed. This should be
+// single-shard.
+assert.eq(
+ 100,
+ coll.findAndModify({query: {a: 100}, update: {$set: {b: 1}}, collation: caseInsensitive}).a);
+explain = coll.explain().findAndModify(
+ {query: {a: 100}, update: {$set: {b: 1}}, collation: caseInsensitive});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// MapReduce.
+
+// Test mapReduce on strings with a non-simple collation.
+assert.eq(2,
+ assert
+ .commandWorked(coll.mapReduce(
+ function() {
+ emit(this.a, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: "foo"}, collation: caseInsensitive}))
+ .results.length);
+
+// Test mapReduce on strings with a simple collation.
+assert.eq(1,
+ assert
+ .commandWorked(coll.mapReduce(
+ function() {
+ emit(this.a, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: "foo"}}))
+ .results.length);
+
+// Remove.
+
+// Test a remove command on strings with non-simple collation. This should be scatter-gather.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.remove({a: "foo"}, {collation: caseInsensitive});
+ assert.writeOK(writeRes);
+ assert.eq(2, writeRes.nRemoved);
+ explain = coll.explain().remove({a: "foo"}, {collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
-
- // Test that deduping respects the collation.
- assert.eq(1, coll.distinct("a", {a: "foo"}, {collation: caseInsensitive}).length);
-
- // Test a distinct command with a simple collation. This should be single-shard.
- assert.eq(1, coll.distinct("_id", {a: "foo"}).length);
- explain = coll.explain().distinct("_id", {a: "foo"});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a distinct command on numbers with a non-simple collation. This should be single-shard.
- assert.eq(1, coll.distinct("_id", {a: 100}, {collation: caseInsensitive}).length);
- explain = coll.explain().distinct("_id", {a: 100}, {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Find.
-
- // Test a find command on strings with a non-simple collation. This should be scatter-gather.
- if (testDB.getMongo().useReadCommands()) {
- assert.eq(2, coll.find({a: "foo"}).collation(caseInsensitive).itcount());
- explain = coll.find({a: "foo"}).collation(caseInsensitive).explain();
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
- }
-
- // Test a find command with a simple collation. This should be single-shard.
- assert.eq(1, coll.find({a: "foo"}).itcount());
- explain = coll.find({a: "foo"}).explain();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a find command on numbers with a non-simple collation. This should be single-shard.
- if (testDB.getMongo().useReadCommands()) {
- assert.eq(1, coll.find({a: 100}).collation(caseInsensitive).itcount());
- explain = coll.find({a: 100}).collation(caseInsensitive).explain();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- }
-
- // FindAndModify.
-
- // Sharded findAndModify on strings with non-simple collation should fail, because findAndModify
- // must target a single shard.
- assert.throws(function() {
- coll.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}, collation: caseInsensitive});
- });
- assert.throws(function() {
- coll.explain().findAndModify(
- {query: {a: "foo"}, update: {$set: {b: 1}}, collation: caseInsensitive});
- });
-
- // Sharded findAndModify on strings with simple collation should succeed. This should be
- // single-shard.
- assert.eq("foo", coll.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}}).a);
- explain = coll.explain().findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Sharded findAndModify on numbers with non-simple collation should succeed. This should be
- // single-shard.
- assert.eq(
- 100,
- coll.findAndModify({query: {a: 100}, update: {$set: {b: 1}}, collation: caseInsensitive})
- .a);
- explain = coll.explain().findAndModify(
- {query: {a: 100}, update: {$set: {b: 1}}, collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // MapReduce.
-
- // Test mapReduce on strings with a non-simple collation.
- assert.eq(2,
- assert
- .commandWorked(coll.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: "foo"}, collation: caseInsensitive}))
- .results.length);
-
- // Test mapReduce on strings with a simple collation.
- assert.eq(1,
- assert
- .commandWorked(coll.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: "foo"}}))
- .results.length);
-
- // Remove.
-
- // Test a remove command on strings with non-simple collation. This should be scatter-gather.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.remove({a: "foo"}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(2, writeRes.nRemoved);
- explain = coll.explain().remove({a: "foo"}, {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_FOO));
- assert.writeOK(coll.insert(a_foo));
- }
-
- // Test a remove command on strings with simple collation. This should be single-shard.
- writeRes = coll.remove({a: "foo"});
+ assert.writeOK(coll.insert(a_FOO));
+ assert.writeOK(coll.insert(a_foo));
+}
+
+// Test a remove command on strings with simple collation. This should be single-shard.
+writeRes = coll.remove({a: "foo"});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+explain = coll.explain().remove({a: "foo"});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+assert.writeOK(coll.insert(a_foo));
+
+// Test a remove command on numbers with non-simple collation. This should be single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.remove({a: 100}, {collation: caseInsensitive});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nRemoved);
- explain = coll.explain().remove({a: "foo"});
+ explain = coll.explain().remove({a: 100}, {collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_foo));
-
- // Test a remove command on numbers with non-simple collation. This should be single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.remove({a: 100}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
- explain = coll.explain().remove({a: 100}, {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_100));
- }
-
- // A single remove (justOne: true) must be single-shard or an exact-ID query. A query is
- // exact-ID if it contains an equality on _id and either has the collection default collation or
- // _id is not a string/object/array.
-
- // Single remove on string shard key with non-simple collation should fail, because it is not
- // single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeError(coll.remove({a: "foo"}, {justOne: true, collation: caseInsensitive}));
- }
-
- // Single remove on string shard key with simple collation should succeed, because it is
- // single-shard.
- writeRes = coll.remove({a: "foo"}, {justOne: true});
+ assert.writeOK(coll.insert(a_100));
+}
+
+// A single remove (justOne: true) must be single-shard or an exact-ID query. A query is
+// exact-ID if it contains an equality on _id and either has the collection default collation or
+// _id is not a string/object/array.
+
+// Single remove on string shard key with non-simple collation should fail, because it is not
+// single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ assert.writeError(coll.remove({a: "foo"}, {justOne: true, collation: caseInsensitive}));
+}
+
+// Single remove on string shard key with simple collation should succeed, because it is
+// single-shard.
+writeRes = coll.remove({a: "foo"}, {justOne: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+explain = coll.explain().remove({a: "foo"}, {justOne: true});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+assert.writeOK(coll.insert(a_foo));
+
+// Single remove on number shard key with non-simple collation should succeed, because it is
+// single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.remove({a: 100}, {justOne: true, collation: caseInsensitive});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nRemoved);
- explain = coll.explain().remove({a: "foo"}, {justOne: true});
+ explain = coll.explain().remove({a: 100}, {justOne: true, collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_foo));
-
- // Single remove on number shard key with non-simple collation should succeed, because it is
- // single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.remove({a: 100}, {justOne: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
- explain = coll.explain().remove({a: 100}, {justOne: true, collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(coll.insert(a_100));
- }
-
- // Single remove on string _id with non-collection-default collation should fail, because it is
- // not an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeError(coll.remove({_id: "foo"}, {justOne: true, collation: caseInsensitive}));
- }
-
- // Single remove on string _id with collection-default collation should succeed, because it is
- // an exact-ID query.
+ assert.writeOK(coll.insert(a_100));
+}
+
+// Single remove on string _id with non-collection-default collation should fail, because it is
+// not an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
+ assert.writeError(coll.remove({_id: "foo"}, {justOne: true, collation: caseInsensitive}));
+}
+
+// Single remove on string _id with collection-default collation should succeed, because it is
+// an exact-ID query.
+assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+writeRes = coll.remove({_id: "foo"}, {justOne: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+
+// Single remove on string _id with collection-default collation explicitly given should
+// succeed, because it is an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
- writeRes = coll.remove({_id: "foo"}, {justOne: true});
+ writeRes = coll.remove({_id: "foo"}, {justOne: true, collation: {locale: "simple"}});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nRemoved);
+}
- // Single remove on string _id with collection-default collation explicitly given should
- // succeed, because it is an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
- writeRes = coll.remove({_id: "foo"}, {justOne: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
- }
-
- // Single remove on number _id with non-collection-default collation should succeed, because it
- // is an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.remove({_id: a_100._id}, {justOne: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
- assert.writeOK(coll.insert(a_100));
- }
+// Single remove on number _id with non-collection-default collation should succeed, because it
+// is an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.remove({_id: a_100._id}, {justOne: true, collation: caseInsensitive});
+ assert.writeOK(writeRes);
+ assert.eq(1, writeRes.nRemoved);
+ assert.writeOK(coll.insert(a_100));
+}
- // Update.
-
- // Test an update command on strings with non-simple collation. This should be scatter-gather.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes =
- coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(2, writeRes.nMatched);
- explain = coll.explain().update(
- {a: "foo"}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
- }
+// Update.
- // Test an update command on strings with simple collation. This should be single-shard.
- writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true});
+// Test an update command on strings with non-simple collation. This should be scatter-gather.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
+ assert.writeOK(writeRes);
+ assert.eq(2, writeRes.nMatched);
+ explain = coll.explain().update(
+ {a: "foo"}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
+ assert.commandWorked(explain);
+ assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+}
+
+// Test an update command on strings with simple collation. This should be single-shard.
+writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+explain = coll.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Test an update command on numbers with non-simple collation. This should be single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.update({a: 100}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
- explain = coll.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true});
+ explain =
+ coll.explain().update({a: 100}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Test an update command on numbers with non-simple collation. This should be single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.update({a: 100}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- explain = coll.explain().update(
- {a: 100}, {$set: {b: 1}}, {multi: true, collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- }
-
- // A single (non-multi) update must be single-shard or an exact-ID query. A query is exact-ID if
- // it contains an equality on _id and either has the collection default collation or _id is not
- // a string/object/array.
-
- // Single update on string shard key with non-simple collation should fail, because it is not
- // single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeError(coll.update({a: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive}));
- }
-
- // Single update on string shard key with simple collation should succeed, because it is
- // single-shard.
- writeRes = coll.update({a: "foo"}, {$set: {b: 1}});
+}
+
+// A single (non-multi) update must be single-shard or an exact-ID query. A query is exact-ID if
+// it contains an equality on _id and either has the collection default collation or _id is not
+// a string/object/array.
+
+// Single update on string shard key with non-simple collation should fail, because it is not
+// single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ assert.writeError(coll.update({a: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive}));
+}
+
+// Single update on string shard key with simple collation should succeed, because it is
+// single-shard.
+writeRes = coll.update({a: "foo"}, {$set: {b: 1}});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+explain = coll.explain().update({a: "foo"}, {$set: {b: 1}});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Single update on number shard key with non-simple collation should succeed, because it is
+// single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.update({a: 100}, {$set: {b: 1}}, {collation: caseInsensitive});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
- explain = coll.explain().update({a: "foo"}, {$set: {b: 1}});
+ explain = coll.explain().update({a: 100}, {$set: {b: 1}}, {collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+}
- // Single update on number shard key with non-simple collation should succeed, because it is
- // single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.update({a: 100}, {$set: {b: 1}}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- explain = coll.explain().update({a: 100}, {$set: {b: 1}}, {collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- }
-
- // Single update on string _id with non-collection-default collation should fail, because it is
- // not an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
- assert.writeError(coll.update({_id: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive}));
- assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
- }
-
- // Single update on string _id with collection-default collation should succeed, because it is
- // an exact-ID query.
+// Single update on string _id with non-collection-default collation should fail, because it is
+// not an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
+ assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+ assert.writeError(coll.update({_id: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive}));
+ assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
+}
+
+// Single update on string _id with collection-default collation should succeed, because it is
+// an exact-ID query.
+assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
+writeRes = coll.update({_id: "foo"}, {$set: {b: 1}});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
+
+// Single update on string _id with collection-default collation explicitly given should
+// succeed, because it is an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
- writeRes = coll.update({_id: "foo"}, {$set: {b: 1}});
+ writeRes = coll.update({_id: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
+}
- // Single update on string _id with collection-default collation explicitly given should
- // succeed, because it is an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(coll.insert({_id: "foo", a: "bar"}));
- writeRes = coll.update({_id: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- assert.writeOK(coll.remove({_id: "foo"}, {justOne: true}));
- }
-
- // Single update on number _id with non-collection-default collation should succeed, because it
- // is an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.update({_id: a_foo._id}, {$set: {b: 1}}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- }
-
- // Upsert must always be single-shard.
-
- // Upsert on strings with non-simple collation should fail, because it is not single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeError(coll.update(
- {a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive}));
- }
-
- // Upsert on strings with simple collation should succeed, because it is single-shard.
- writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true});
+// Single update on number _id with non-collection-default collation should succeed, because it
+// is an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.update({_id: a_foo._id}, {$set: {b: 1}}, {collation: caseInsensitive});
+ assert.writeOK(writeRes);
+ assert.eq(1, writeRes.nMatched);
+}
+
+// Upsert must always be single-shard.
+
+// Upsert on strings with non-simple collation should fail, because it is not single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ assert.writeError(coll.update(
+ {a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive}));
+}
+
+// Upsert on strings with simple collation should succeed, because it is single-shard.
+writeRes = coll.update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+explain = coll.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Upsert on numbers with non-simple collation should succeed, because it is single shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = coll.update(
+ {a: 100}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
- explain = coll.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true});
+ explain = coll.explain().update(
+ {a: 100}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+}
- // Upsert on numbers with non-simple collation should succeed, because it is single shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = coll.update(
- {a: 100}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- explain = coll.explain().update(
- {a: 100}, {$set: {b: 1}}, {multi: true, upsert: true, collation: caseInsensitive});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- }
-
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/collation_targeting_inherited.js b/jstests/sharding/collation_targeting_inherited.js
index 4c68e23fbc7..676dadbc972 100644
--- a/jstests/sharding/collation_targeting_inherited.js
+++ b/jstests/sharding/collation_targeting_inherited.js
@@ -1,482 +1,486 @@
// Test shard targeting for queries on a collection with a default collation.
(function() {
- "use strict";
-
- const caseInsensitive = {locale: "en_US", strength: 2};
-
- var explain;
- var writeRes;
-
- // Create a cluster with 3 shards.
- var st = new ShardingTest({shards: 3});
- var testDB = st.s.getDB("test");
- assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
-
- // Create a collection with a case-insensitive default collation sharded on {a: 1}.
- var collCaseInsensitive = testDB.getCollection("case_insensitive");
- collCaseInsensitive.drop();
- assert.commandWorked(testDB.createCollection("case_insensitive", {collation: caseInsensitive}));
- assert.commandWorked(collCaseInsensitive.createIndex({a: 1}, {collation: {locale: "simple"}}));
- assert.commandWorked(collCaseInsensitive.createIndex({geo: "2dsphere"}));
- assert.commandWorked(testDB.adminCommand({
- shardCollection: collCaseInsensitive.getFullName(),
- key: {a: 1},
- collation: {locale: "simple"}
- }));
-
- // Split the collection.
- // st.shard0.shardName: { "a" : { "$minKey" : 1 } } -->> { "a" : 10 }
- // st.shard1.shardName: { "a" : 10 } -->> { "a" : "a"}
- // shard0002: { "a" : "a" } -->> { "a" : { "$maxKey" : 1 }}
- assert.commandWorked(
- testDB.adminCommand({split: collCaseInsensitive.getFullName(), middle: {a: 10}}));
- assert.commandWorked(
- testDB.adminCommand({split: collCaseInsensitive.getFullName(), middle: {a: "a"}}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: collCaseInsensitive.getFullName(), find: {a: 1}, to: st.shard0.shardName}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: collCaseInsensitive.getFullName(), find: {a: "FOO"}, to: st.shard1.shardName}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: collCaseInsensitive.getFullName(), find: {a: "foo"}, to: st.shard2.shardName}));
-
- // Put data on each shard.
- // Note that the balancer is off by default, so the chunks will stay put.
- // st.shard0.shardName: {a: 1}
- // st.shard1.shardName: {a: 100}, {a: "FOO"}
- // shard0002: {a: "foo"}
- // Include geo field to test geoNear.
- var a_1 = {_id: 0, a: 1, geo: {type: "Point", coordinates: [0, 0]}};
- var a_100 = {_id: 1, a: 100, geo: {type: "Point", coordinates: [0, 0]}};
- var a_FOO = {_id: 2, a: "FOO", geo: {type: "Point", coordinates: [0, 0]}};
- var a_foo = {_id: 3, a: "foo", geo: {type: "Point", coordinates: [0, 0]}};
- assert.writeOK(collCaseInsensitive.insert(a_1));
- assert.writeOK(collCaseInsensitive.insert(a_100));
- assert.writeOK(collCaseInsensitive.insert(a_FOO));
- assert.writeOK(collCaseInsensitive.insert(a_foo));
-
- // Aggregate.
-
- // Test an aggregate command on strings with a non-simple collation inherited from the
- // collection default. This should be scatter-gather.
- assert.eq(2, collCaseInsensitive.aggregate([{$match: {a: "foo"}}]).itcount());
- explain = collCaseInsensitive.explain().aggregate([{$match: {a: "foo"}}]);
- assert.commandWorked(explain);
- assert.eq(3, Object.keys(explain.shards).length);
-
- // Test an aggregate command with a simple collation. This should be single-shard.
- assert.eq(1,
- collCaseInsensitive.aggregate([{$match: {a: "foo"}}], {collation: {locale: "simple"}})
- .itcount());
- explain = collCaseInsensitive.explain().aggregate([{$match: {a: "foo"}}],
- {collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Test an aggregate command on numbers with a non-simple collation inherited from the
- // collection default. This should be single-shard.
- assert.eq(1, collCaseInsensitive.aggregate([{$match: {a: 100}}]).itcount());
- explain = collCaseInsensitive.explain().aggregate([{$match: {a: 100}}]);
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Aggregate with $geoNear.
- const geoJSONPoint = {type: "Point", coordinates: [0, 0]};
-
- // Test $geoNear with a query on strings with a non-simple collation inherited from the
- // collection default. This should scatter-gather.
- const geoNearStageStringQuery = [{
- $geoNear: {
- near: geoJSONPoint,
- distanceField: "dist",
- spherical: true,
- query: {a: "foo"},
- }
- }];
- assert.eq(2, collCaseInsensitive.aggregate(geoNearStageStringQuery).itcount());
- explain = collCaseInsensitive.explain().aggregate(geoNearStageStringQuery);
- assert.commandWorked(explain);
- assert.eq(3, Object.keys(explain.shards).length);
-
- // Test $geoNear with a query on strings with a simple collation. This should be single-shard.
- assert.eq(
- 1,
- collCaseInsensitive.aggregate(geoNearStageStringQuery, {collation: {locale: "simple"}})
- .itcount());
- explain = collCaseInsensitive.explain().aggregate(geoNearStageStringQuery,
- {collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Test a $geoNear with a query on numbers with a non-simple collation inherited from the
- // collection default. This should be single-shard.
- const geoNearStageNumericalQuery = [{
- $geoNear: {
- near: geoJSONPoint,
- distanceField: "dist",
- spherical: true,
- query: {a: 100},
- }
- }];
- assert.eq(1, collCaseInsensitive.aggregate(geoNearStageNumericalQuery).itcount());
- explain = collCaseInsensitive.explain().aggregate(geoNearStageNumericalQuery);
- assert.commandWorked(explain);
- assert.eq(1, Object.keys(explain.shards).length);
-
- // Count.
-
- // Test a count command on strings with a non-simple collation inherited from the collection
- // default. This should be scatter-gather.
- assert.eq(2, collCaseInsensitive.find({a: "foo"}).count());
- explain = collCaseInsensitive.explain().find({a: "foo"}).count();
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a count command with a simple collation. This should be single-shard.
- assert.eq(1, collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).count());
- explain = collCaseInsensitive.explain().find({a: "foo"}).collation({locale: "simple"}).count();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a find command on numbers with a non-simple collation inheritied from the collection
- // default. This should be single-shard.
- assert.eq(1, collCaseInsensitive.find({a: 100}).count());
- explain = collCaseInsensitive.explain().find({a: 100}).count();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Distinct.
-
- // Test a distinct command on strings with a non-simple collation inherited from the collection
- // default. This should be scatter-gather.
- assert.eq(2, collCaseInsensitive.distinct("_id", {a: "foo"}).length);
- explain = collCaseInsensitive.explain().distinct("_id", {a: "foo"});
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
-
- // Test that deduping respects the collation inherited from the collection default.
- assert.eq(1, collCaseInsensitive.distinct("a", {a: "foo"}).length);
-
- // Test a distinct command with a simple collation. This should be single-shard.
- assert.eq(
- 1, collCaseInsensitive.distinct("_id", {a: "foo"}, {collation: {locale: "simple"}}).length);
- explain =
- collCaseInsensitive.explain().distinct("_id", {a: "foo"}, {collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a distinct command on numbers with a non-simple collation inherited from the collection
- // default. This should be single-shard.
- assert.eq(1, collCaseInsensitive.distinct("_id", {a: 100}).length);
- explain = collCaseInsensitive.explain().distinct("_id", {a: 100});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Find.
-
- // Test a find command on strings with a non-simple collation inherited from the collection
- // default. This should be scatter-gather.
- assert.eq(2, collCaseInsensitive.find({a: "foo"}).itcount());
- explain = collCaseInsensitive.find({a: "foo"}).explain();
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
-
- // Test a find command with a simple collation. This should be single-shard.
- if (testDB.getMongo().useReadCommands()) {
- assert.eq(1, collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).itcount());
- explain = collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).explain();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+"use strict";
+
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
+
+var explain;
+var writeRes;
+
+// Create a cluster with 3 shards.
+var st = new ShardingTest({shards: 3});
+var testDB = st.s.getDB("test");
+assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
+st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
+
+// Create a collection with a case-insensitive default collation sharded on {a: 1}.
+var collCaseInsensitive = testDB.getCollection("case_insensitive");
+collCaseInsensitive.drop();
+assert.commandWorked(testDB.createCollection("case_insensitive", {collation: caseInsensitive}));
+assert.commandWorked(collCaseInsensitive.createIndex({a: 1}, {collation: {locale: "simple"}}));
+assert.commandWorked(collCaseInsensitive.createIndex({geo: "2dsphere"}));
+assert.commandWorked(testDB.adminCommand({
+ shardCollection: collCaseInsensitive.getFullName(),
+ key: {a: 1},
+ collation: {locale: "simple"}
+}));
+
+// Split the collection.
+// st.shard0.shardName: { "a" : { "$minKey" : 1 } } -->> { "a" : 10 }
+// st.shard1.shardName: { "a" : 10 } -->> { "a" : "a"}
+// shard0002: { "a" : "a" } -->> { "a" : { "$maxKey" : 1 }}
+assert.commandWorked(
+ testDB.adminCommand({split: collCaseInsensitive.getFullName(), middle: {a: 10}}));
+assert.commandWorked(
+ testDB.adminCommand({split: collCaseInsensitive.getFullName(), middle: {a: "a"}}));
+assert.commandWorked(testDB.adminCommand(
+ {moveChunk: collCaseInsensitive.getFullName(), find: {a: 1}, to: st.shard0.shardName}));
+assert.commandWorked(testDB.adminCommand(
+ {moveChunk: collCaseInsensitive.getFullName(), find: {a: "FOO"}, to: st.shard1.shardName}));
+assert.commandWorked(testDB.adminCommand(
+ {moveChunk: collCaseInsensitive.getFullName(), find: {a: "foo"}, to: st.shard2.shardName}));
+
+// Put data on each shard.
+// Note that the balancer is off by default, so the chunks will stay put.
+// st.shard0.shardName: {a: 1}
+// st.shard1.shardName: {a: 100}, {a: "FOO"}
+// shard0002: {a: "foo"}
+// Include geo field to test geoNear.
+var a_1 = {_id: 0, a: 1, geo: {type: "Point", coordinates: [0, 0]}};
+var a_100 = {_id: 1, a: 100, geo: {type: "Point", coordinates: [0, 0]}};
+var a_FOO = {_id: 2, a: "FOO", geo: {type: "Point", coordinates: [0, 0]}};
+var a_foo = {_id: 3, a: "foo", geo: {type: "Point", coordinates: [0, 0]}};
+assert.writeOK(collCaseInsensitive.insert(a_1));
+assert.writeOK(collCaseInsensitive.insert(a_100));
+assert.writeOK(collCaseInsensitive.insert(a_FOO));
+assert.writeOK(collCaseInsensitive.insert(a_foo));
+
+// Aggregate.
+
+// Test an aggregate command on strings with a non-simple collation inherited from the
+// collection default. This should be scatter-gather.
+assert.eq(2, collCaseInsensitive.aggregate([{$match: {a: "foo"}}]).itcount());
+explain = collCaseInsensitive.explain().aggregate([{$match: {a: "foo"}}]);
+assert.commandWorked(explain);
+assert.eq(3, Object.keys(explain.shards).length);
+
+// Test an aggregate command with a simple collation. This should be single-shard.
+assert.eq(1,
+ collCaseInsensitive.aggregate([{$match: {a: "foo"}}], {collation: {locale: "simple"}})
+ .itcount());
+explain = collCaseInsensitive.explain().aggregate([{$match: {a: "foo"}}],
+ {collation: {locale: "simple"}});
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Test an aggregate command on numbers with a non-simple collation inherited from the
+// collection default. This should be single-shard.
+assert.eq(1, collCaseInsensitive.aggregate([{$match: {a: 100}}]).itcount());
+explain = collCaseInsensitive.explain().aggregate([{$match: {a: 100}}]);
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Aggregate with $geoNear.
+const geoJSONPoint = {
+ type: "Point",
+ coordinates: [0, 0]
+};
+
+// Test $geoNear with a query on strings with a non-simple collation inherited from the
+// collection default. This should scatter-gather.
+const geoNearStageStringQuery = [{
+ $geoNear: {
+ near: geoJSONPoint,
+ distanceField: "dist",
+ spherical: true,
+ query: {a: "foo"},
}
-
- // Test a find command on numbers with a non-simple collation inherited from the collection
- // default. This should be single-shard.
- assert.eq(1, collCaseInsensitive.find({a: 100}).itcount());
- explain = collCaseInsensitive.find({a: 100}).explain();
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // FindAndModify.
-
- // Sharded findAndModify on strings with non-simple collation inherited from the collection
- // default should fail, because findAndModify must target a single shard.
- assert.throws(function() {
- collCaseInsensitive.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}});
- });
- assert.throws(function() {
- collCaseInsensitive.explain().findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}});
- });
-
- // Sharded findAndModify on strings with simple collation should succeed. This should be
- // single-shard.
- assert.eq("foo",
- collCaseInsensitive
- .findAndModify(
- {query: {a: "foo"}, update: {$set: {b: 1}}, collation: {locale: "simple"}})
- .a);
- explain = collCaseInsensitive.explain().findAndModify(
- {query: {a: "foo"}, update: {$set: {b: 1}}, collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Sharded findAndModify on numbers with non-simple collation inherited from collection default
- // should succeed. This should be single-shard.
- assert.eq(100, collCaseInsensitive.findAndModify({query: {a: 100}, update: {$set: {b: 1}}}).a);
- explain =
- collCaseInsensitive.explain().findAndModify({query: {a: 100}, update: {$set: {b: 1}}});
+}];
+assert.eq(2, collCaseInsensitive.aggregate(geoNearStageStringQuery).itcount());
+explain = collCaseInsensitive.explain().aggregate(geoNearStageStringQuery);
+assert.commandWorked(explain);
+assert.eq(3, Object.keys(explain.shards).length);
+
+// Test $geoNear with a query on strings with a simple collation. This should be single-shard.
+assert.eq(1,
+ collCaseInsensitive.aggregate(geoNearStageStringQuery, {collation: {locale: "simple"}})
+ .itcount());
+explain = collCaseInsensitive.explain().aggregate(geoNearStageStringQuery,
+ {collation: {locale: "simple"}});
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Test a $geoNear with a query on numbers with a non-simple collation inherited from the
+// collection default. This should be single-shard.
+const geoNearStageNumericalQuery = [{
+ $geoNear: {
+ near: geoJSONPoint,
+ distanceField: "dist",
+ spherical: true,
+ query: {a: 100},
+ }
+}];
+assert.eq(1, collCaseInsensitive.aggregate(geoNearStageNumericalQuery).itcount());
+explain = collCaseInsensitive.explain().aggregate(geoNearStageNumericalQuery);
+assert.commandWorked(explain);
+assert.eq(1, Object.keys(explain.shards).length);
+
+// Count.
+
+// Test a count command on strings with a non-simple collation inherited from the collection
+// default. This should be scatter-gather.
+assert.eq(2, collCaseInsensitive.find({a: "foo"}).count());
+explain = collCaseInsensitive.explain().find({a: "foo"}).count();
+assert.commandWorked(explain);
+assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a count command with a simple collation. This should be single-shard.
+assert.eq(1, collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).count());
+explain = collCaseInsensitive.explain().find({a: "foo"}).collation({locale: "simple"}).count();
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a find command on numbers with a non-simple collation inheritied from the collection
+// default. This should be single-shard.
+assert.eq(1, collCaseInsensitive.find({a: 100}).count());
+explain = collCaseInsensitive.explain().find({a: 100}).count();
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Distinct.
+
+// Test a distinct command on strings with a non-simple collation inherited from the collection
+// default. This should be scatter-gather.
+assert.eq(2, collCaseInsensitive.distinct("_id", {a: "foo"}).length);
+explain = collCaseInsensitive.explain().distinct("_id", {a: "foo"});
+assert.commandWorked(explain);
+assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+
+// Test that deduping respects the collation inherited from the collection default.
+assert.eq(1, collCaseInsensitive.distinct("a", {a: "foo"}).length);
+
+// Test a distinct command with a simple collation. This should be single-shard.
+assert.eq(1,
+ collCaseInsensitive.distinct("_id", {a: "foo"}, {collation: {locale: "simple"}}).length);
+explain =
+ collCaseInsensitive.explain().distinct("_id", {a: "foo"}, {collation: {locale: "simple"}});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a distinct command on numbers with a non-simple collation inherited from the collection
+// default. This should be single-shard.
+assert.eq(1, collCaseInsensitive.distinct("_id", {a: 100}).length);
+explain = collCaseInsensitive.explain().distinct("_id", {a: 100});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Find.
+
+// Test a find command on strings with a non-simple collation inherited from the collection
+// default. This should be scatter-gather.
+assert.eq(2, collCaseInsensitive.find({a: "foo"}).itcount());
+explain = collCaseInsensitive.find({a: "foo"}).explain();
+assert.commandWorked(explain);
+assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+
+// Test a find command with a simple collation. This should be single-shard.
+if (testDB.getMongo().useReadCommands()) {
+ assert.eq(1, collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).itcount());
+ explain = collCaseInsensitive.find({a: "foo"}).collation({locale: "simple"}).explain();
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // MapReduce.
-
- // Test mapReduce on strings with a non-simple collation inherited from collection default.
- assert.eq(2,
- assert
- .commandWorked(collCaseInsensitive.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: "foo"}}))
- .results.length);
-
- // Test mapReduce on strings with a simple collation.
- assert.eq(1,
- assert
- .commandWorked(collCaseInsensitive.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: "foo"}, collation: {locale: "simple"}}))
- .results.length);
-
- // Remove.
-
- // Test a remove command on strings with non-simple collation inherited from collection default.
- // This should be scatter-gather.
- writeRes = collCaseInsensitive.remove({a: "foo"});
- assert.writeOK(writeRes);
- assert.eq(2, writeRes.nRemoved);
- explain = collCaseInsensitive.explain().remove({a: "foo"});
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(collCaseInsensitive.insert(a_FOO));
- assert.writeOK(collCaseInsensitive.insert(a_foo));
-
- // Test a remove command on strings with simple collation. This should be single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = collCaseInsensitive.remove({a: "foo"}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
- explain = collCaseInsensitive.explain().remove({a: "foo"}, {collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(collCaseInsensitive.insert(a_foo));
- }
-
- // Test a remove command on numbers with non-simple collation inherited from collection default.
- // This should be single-shard.
- writeRes = collCaseInsensitive.remove({a: 100});
+}
+
+// Test a find command on numbers with a non-simple collation inherited from the collection
+// default. This should be single-shard.
+assert.eq(1, collCaseInsensitive.find({a: 100}).itcount());
+explain = collCaseInsensitive.find({a: 100}).explain();
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// FindAndModify.
+
+// Sharded findAndModify on strings with non-simple collation inherited from the collection
+// default should fail, because findAndModify must target a single shard.
+assert.throws(function() {
+ collCaseInsensitive.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}});
+});
+assert.throws(function() {
+ collCaseInsensitive.explain().findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}});
+});
+
+// Sharded findAndModify on strings with simple collation should succeed. This should be
+// single-shard.
+assert.eq(
+ "foo",
+ collCaseInsensitive
+ .findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}, collation: {locale: "simple"}})
+ .a);
+explain = collCaseInsensitive.explain().findAndModify(
+ {query: {a: "foo"}, update: {$set: {b: 1}}, collation: {locale: "simple"}});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Sharded findAndModify on numbers with non-simple collation inherited from collection default
+// should succeed. This should be single-shard.
+assert.eq(100, collCaseInsensitive.findAndModify({query: {a: 100}, update: {$set: {b: 1}}}).a);
+explain = collCaseInsensitive.explain().findAndModify({query: {a: 100}, update: {$set: {b: 1}}});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// MapReduce.
+
+// Test mapReduce on strings with a non-simple collation inherited from collection default.
+assert.eq(2,
+ assert
+ .commandWorked(collCaseInsensitive.mapReduce(
+ function() {
+ emit(this.a, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: "foo"}}))
+ .results.length);
+
+// Test mapReduce on strings with a simple collation.
+assert.eq(1,
+ assert
+ .commandWorked(collCaseInsensitive.mapReduce(
+ function() {
+ emit(this.a, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {inline: 1}, query: {a: "foo"}, collation: {locale: "simple"}}))
+ .results.length);
+
+// Remove.
+
+// Test a remove command on strings with non-simple collation inherited from collection default.
+// This should be scatter-gather.
+writeRes = collCaseInsensitive.remove({a: "foo"});
+assert.writeOK(writeRes);
+assert.eq(2, writeRes.nRemoved);
+explain = collCaseInsensitive.explain().remove({a: "foo"});
+assert.commandWorked(explain);
+assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+assert.writeOK(collCaseInsensitive.insert(a_FOO));
+assert.writeOK(collCaseInsensitive.insert(a_foo));
+
+// Test a remove command on strings with simple collation. This should be single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = collCaseInsensitive.remove({a: "foo"}, {collation: {locale: "simple"}});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nRemoved);
- explain = collCaseInsensitive.explain().remove({a: 100});
+ explain = collCaseInsensitive.explain().remove({a: "foo"}, {collation: {locale: "simple"}});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(collCaseInsensitive.insert(a_100));
-
- // A single remove (justOne: true) must be single-shard or an exact-ID query. A query is
- // exact-ID if it contains an equality on _id and either has the collection default collation or
- // _id is not a string/object/array.
-
- // Single remove on string shard key with non-simple collation inherited from collection default
- // should fail, because it is not single-shard.
- assert.writeError(collCaseInsensitive.remove({a: "foo"}, {justOne: true}));
-
- // Single remove on string shard key with simple collation should succeed, because it is
- // single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes =
- collCaseInsensitive.remove({a: "foo"}, {justOne: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
- explain = collCaseInsensitive.explain().remove(
- {a: "foo"}, {justOne: true, collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(collCaseInsensitive.insert(a_foo));
- }
-
- // Single remove on number shard key with non-simple collation inherited from collection default
- // should succeed, because it is single-shard.
- writeRes = collCaseInsensitive.remove({a: 100}, {justOne: true});
+ assert.writeOK(collCaseInsensitive.insert(a_foo));
+}
+
+// Test a remove command on numbers with non-simple collation inherited from collection default.
+// This should be single-shard.
+writeRes = collCaseInsensitive.remove({a: 100});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+explain = collCaseInsensitive.explain().remove({a: 100});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+assert.writeOK(collCaseInsensitive.insert(a_100));
+
+// A single remove (justOne: true) must be single-shard or an exact-ID query. A query is
+// exact-ID if it contains an equality on _id and either has the collection default collation or
+// _id is not a string/object/array.
+
+// Single remove on string shard key with non-simple collation inherited from collection default
+// should fail, because it is not single-shard.
+assert.writeError(collCaseInsensitive.remove({a: "foo"}, {justOne: true}));
+
+// Single remove on string shard key with simple collation should succeed, because it is
+// single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes =
+ collCaseInsensitive.remove({a: "foo"}, {justOne: true, collation: {locale: "simple"}});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nRemoved);
- explain = collCaseInsensitive.explain().remove({a: 100}, {justOne: true});
+ explain = collCaseInsensitive.explain().remove({a: "foo"},
+ {justOne: true, collation: {locale: "simple"}});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- assert.writeOK(collCaseInsensitive.insert(a_100));
-
- // Single remove on string _id with non-collection-default collation should fail, because it is
- // not an exact-ID query.
- assert.writeError(
- collCaseInsensitive.remove({_id: "foo"}, {justOne: true, collation: {locale: "simple"}}));
-
- // Single remove on string _id with collection-default collation should succeed, because it is
- // an exact-ID query.
+ assert.writeOK(collCaseInsensitive.insert(a_foo));
+}
+
+// Single remove on number shard key with non-simple collation inherited from collection default
+// should succeed, because it is single-shard.
+writeRes = collCaseInsensitive.remove({a: 100}, {justOne: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+explain = collCaseInsensitive.explain().remove({a: 100}, {justOne: true});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+assert.writeOK(collCaseInsensitive.insert(a_100));
+
+// Single remove on string _id with non-collection-default collation should fail, because it is
+// not an exact-ID query.
+assert.writeError(
+ collCaseInsensitive.remove({_id: "foo"}, {justOne: true, collation: {locale: "simple"}}));
+
+// Single remove on string _id with collection-default collation should succeed, because it is
+// an exact-ID query.
+assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
+writeRes = collCaseInsensitive.remove({_id: "foo"}, {justOne: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+
+// Single remove on string _id with collection-default collation explicitly given should
+// succeed, because it is an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
- writeRes = collCaseInsensitive.remove({_id: "foo"}, {justOne: true});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
-
- // Single remove on string _id with collection-default collation explicitly given should
- // succeed, because it is an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
- writeRes =
- collCaseInsensitive.remove({_id: "foo"}, {justOne: true, collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nRemoved);
- }
-
- // Single remove on number _id with non-collection-default collation should succeed, because it
- // is an exact-ID query.
- writeRes = collCaseInsensitive.remove({_id: a_100._id},
- {justOne: true, collation: {locale: "simple"}});
+ writeRes =
+ collCaseInsensitive.remove({_id: "foo"}, {justOne: true, collation: caseInsensitive});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nRemoved);
- assert.writeOK(collCaseInsensitive.insert(a_100));
-
- // Update.
-
- // Test an update command on strings with non-simple collation inherited from collection
- // default. This should be scatter-gather.
- writeRes = collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {multi: true});
- assert.writeOK(writeRes);
- assert.eq(2, writeRes.nMatched);
- explain = collCaseInsensitive.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true});
- assert.commandWorked(explain);
- assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
-
- // Test an update command on strings with simple collation. This should be single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = collCaseInsensitive.update(
- {a: "foo"}, {$set: {b: 1}}, {multi: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- explain = collCaseInsensitive.explain().update(
- {a: "foo"}, {$set: {b: 1}}, {multi: true, collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- }
-
- // Test an update command on numbers with non-simple collation inherited from collection
- // default. This should be single-shard.
- writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}}, {multi: true});
+}
+
+// Single remove on number _id with non-collection-default collation should succeed, because it
+// is an exact-ID query.
+writeRes =
+ collCaseInsensitive.remove({_id: a_100._id}, {justOne: true, collation: {locale: "simple"}});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nRemoved);
+assert.writeOK(collCaseInsensitive.insert(a_100));
+
+// Update.
+
+// Test an update command on strings with non-simple collation inherited from collection
+// default. This should be scatter-gather.
+writeRes = collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {multi: true});
+assert.writeOK(writeRes);
+assert.eq(2, writeRes.nMatched);
+explain = collCaseInsensitive.explain().update({a: "foo"}, {$set: {b: 1}}, {multi: true});
+assert.commandWorked(explain);
+assert.eq(3, explain.queryPlanner.winningPlan.shards.length);
+
+// Test an update command on strings with simple collation. This should be single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = collCaseInsensitive.update(
+ {a: "foo"}, {$set: {b: 1}}, {multi: true, collation: {locale: "simple"}});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
- explain = collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}}, {multi: true});
+ explain = collCaseInsensitive.explain().update(
+ {a: "foo"}, {$set: {b: 1}}, {multi: true, collation: {locale: "simple"}});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // A single (non-multi) update must be single-shard or an exact-ID query. A query is exact-ID if
- // it
- // contains an equality on _id and either has the collection default collation or _id is not a
- // string/object/array.
-
- // Single update on string shard key with non-simple collation inherited from collection default
- // should fail, because it is not single-shard.
- assert.writeError(collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}));
-
- // Single update on string shard key with simple collation should succeed, because it is
- // single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes =
- collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- explain = collCaseInsensitive.explain().update(
- {a: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- }
-
- // Single update on number shard key with non-simple collation inherited from collation default
- // should succeed, because it is single-shard.
- writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}});
+}
+
+// Test an update command on numbers with non-simple collation inherited from collection
+// default. This should be single-shard.
+writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}}, {multi: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+explain = collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}}, {multi: true});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// A single (non-multi) update must be single-shard or an exact-ID query. A query is exact-ID if
+// it
+// contains an equality on _id and either has the collection default collation or _id is not a
+// string/object/array.
+
+// Single update on string shard key with non-simple collation inherited from collection default
+// should fail, because it is not single-shard.
+assert.writeError(collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}));
+
+// Single update on string shard key with simple collation should succeed, because it is
+// single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes =
+ collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
- explain = collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}});
+ explain = collCaseInsensitive.explain().update(
+ {a: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- // Single update on string _id with non-collection-default collation should fail, because it is
- // not an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeError(collCaseInsensitive.update(
- {_id: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}}));
- }
-
- // Single update on string _id with collection-default collation should succeed, because it is
- // an exact-ID query.
+}
+
+// Single update on number shard key with non-simple collation inherited from collation default
+// should succeed, because it is single-shard.
+writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+explain = collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+// Single update on string _id with non-collection-default collation should fail, because it is
+// not an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
+ assert.writeError(
+ collCaseInsensitive.update({_id: "foo"}, {$set: {b: 1}}, {collation: {locale: "simple"}}));
+}
+
+// Single update on string _id with collection-default collation should succeed, because it is
+// an exact-ID query.
+assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
+writeRes = collCaseInsensitive.update({_id: "foo"}, {$set: {b: 1}});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+assert.writeOK(collCaseInsensitive.remove({_id: "foo"}, {justOne: true}));
+
+// Single update on string _id with collection-default collation explicitly given should
+// succeed, because it is an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
- writeRes = collCaseInsensitive.update({_id: "foo"}, {$set: {b: 1}});
+ writeRes =
+ collCaseInsensitive.update({_id: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
assert.writeOK(collCaseInsensitive.remove({_id: "foo"}, {justOne: true}));
+}
- // Single update on string _id with collection-default collation explicitly given should
- // succeed, because it is an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- assert.writeOK(collCaseInsensitive.insert({_id: "foo", a: "bar"}));
- writeRes =
- collCaseInsensitive.update({_id: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- assert.writeOK(collCaseInsensitive.remove({_id: "foo"}, {justOne: true}));
- }
-
- // Single update on number _id with non-collection-default collation inherited from collection
- // default should succeed, because it is an exact-ID query.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = collCaseInsensitive.update(
- {_id: a_foo._id}, {$set: {b: 1}}, {collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- }
+// Single update on number _id with non-collection-default collation inherited from collection
+// default should succeed, because it is an exact-ID query.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = collCaseInsensitive.update(
+ {_id: a_foo._id}, {$set: {b: 1}}, {collation: {locale: "simple"}});
+ assert.writeOK(writeRes);
+ assert.eq(1, writeRes.nMatched);
+}
- // Upsert must always be single-shard.
+// Upsert must always be single-shard.
- // Upsert on strings with non-simple collation inherited from collection default should fail,
- // because it is not single-shard.
- assert.writeError(
- collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true}));
-
- // Upsert on strings with simple collation should succeed, because it is single-shard.
- if (testDB.getMongo().writeMode() === "commands") {
- writeRes = collCaseInsensitive.update(
- {a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: {locale: "simple"}});
- assert.writeOK(writeRes);
- assert.eq(1, writeRes.nMatched);
- explain = collCaseInsensitive.explain().update(
- {a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: {locale: "simple"}});
- assert.commandWorked(explain);
- assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
- }
+// Upsert on strings with non-simple collation inherited from collection default should fail,
+// because it is not single-shard.
+assert.writeError(
+ collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true}));
- // Upsert on numbers with non-simple collation inherited from collection default should succeed,
- // because it is single-shard.
- writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}}, {multi: true, upsert: true});
+// Upsert on strings with simple collation should succeed, because it is single-shard.
+if (testDB.getMongo().writeMode() === "commands") {
+ writeRes = collCaseInsensitive.update(
+ {a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: {locale: "simple"}});
assert.writeOK(writeRes);
assert.eq(1, writeRes.nMatched);
- explain =
- collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}}, {multi: true, upsert: true});
+ explain = collCaseInsensitive.explain().update(
+ {a: "foo"}, {$set: {b: 1}}, {multi: true, upsert: true, collation: {locale: "simple"}});
assert.commandWorked(explain);
assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
-
- st.stop();
+}
+
+// Upsert on numbers with non-simple collation inherited from collection default should succeed,
+// because it is single-shard.
+writeRes = collCaseInsensitive.update({a: 100}, {$set: {b: 1}}, {multi: true, upsert: true});
+assert.writeOK(writeRes);
+assert.eq(1, writeRes.nMatched);
+explain =
+ collCaseInsensitive.explain().update({a: 100}, {$set: {b: 1}}, {multi: true, upsert: true});
+assert.commandWorked(explain);
+assert.eq(1, explain.queryPlanner.winningPlan.shards.length);
+
+st.stop();
})();
diff --git a/jstests/sharding/commands_that_write_accept_wc_configRS.js b/jstests/sharding/commands_that_write_accept_wc_configRS.js
index aac64734c7f..95a84f32532 100644
--- a/jstests/sharding/commands_that_write_accept_wc_configRS.js
+++ b/jstests/sharding/commands_that_write_accept_wc_configRS.js
@@ -15,217 +15,216 @@ load('jstests/libs/write_concern_util.js');
load('jstests/multiVersion/libs/auth_helpers.js');
(function() {
- "use strict";
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- var st = new ShardingTest({
- // Set priority of secondaries to zero to prevent spurious elections.
- shards: {
- rs0: {
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- },
- rs1: {
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- }
- },
- configReplSetTestOptions: {settings: {chainingAllowed: false}},
- mongos: 1
- });
-
- var mongos = st.s;
- var dbName = "wc-test-configRS";
- var db = mongos.getDB(dbName);
- var adminDB = mongos.getDB('admin');
- // A database connection on a local shard, rather than through the mongos.
- var localDB = st.shard0.getDB('localWCTest');
- var collName = 'leaves';
- var coll = db[collName];
- var counter = 0;
-
- function dropTestData() {
- st.configRS.awaitReplication();
- st.rs0.awaitReplication();
- st.rs1.awaitReplication();
- db.dropUser('username');
- db.dropUser('user1');
- localDB.dropUser('user2');
- assert(!db.auth("username", "password"), "auth should have failed");
- getNewDB();
- }
-
- // We get new databases because we do not want to reuse dropped databases that may be in a
- // bad state. This test calls dropDatabase when config server secondary nodes are down, so the
- // command fails after only the database metadata is dropped from the config servers, but the
- // data on the shards still remains. This makes future operations, such as moveChunk, fail.
- function getNewDB() {
- db = mongos.getDB(dbName + counter);
- counter++;
- coll = db[collName];
- }
-
- // Commands in 'commands' will accept any valid writeConcern.
- var commands = [];
-
- commands.push({
- req: {createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles},
- setupFunc: function() {},
- confirmFunc: function() {
- assert(db.auth("username", "password"), "auth failed");
- },
- requiresMajority: true,
- runsOnShards: false,
- failsOnShards: false,
- admin: false
- });
+"use strict";
- commands.push({
- req: {updateUser: 'username', pwd: 'password2', roles: jsTest.basicUserRoles},
- setupFunc: function() {
- db.runCommand({createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles});
- },
- confirmFunc: function() {
- assert(!db.auth("username", "password"), "auth should have failed");
- assert(db.auth("username", "password2"), "auth failed");
- },
- requiresMajority: true,
- runsOnShards: false,
- admin: false
- });
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
- commands.push({
- req: {dropUser: 'tempUser'},
- setupFunc: function() {
- db.runCommand({createUser: 'tempUser', pwd: 'password', roles: jsTest.basicUserRoles});
- assert(db.auth("tempUser", "password"), "auth failed");
- },
- confirmFunc: function() {
- assert(!db.auth("tempUser", "password"), "auth should have failed");
+var st = new ShardingTest({
+ // Set priority of secondaries to zero to prevent spurious elections.
+ shards: {
+ rs0: {
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
},
- requiresMajority: true,
- runsOnShards: false,
- failsOnShards: false,
- admin: false
- });
-
- function testInvalidWriteConcern(wc, cmd) {
- if (wc.w === 2 && !cmd.requiresMajority) {
- return;
+ rs1: {
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
}
- cmd.req.writeConcern = wc;
- jsTest.log("Testing " + tojson(cmd.req));
-
- dropTestData();
- cmd.setupFunc();
- var res = runCommandCheckAdmin(db, cmd);
- assert.commandFailed(res);
- assert(!res.writeConcernError,
- 'bad writeConcern on config server had writeConcernError. ' +
- tojson(res.writeConcernError));
+ },
+ configReplSetTestOptions: {settings: {chainingAllowed: false}},
+ mongos: 1
+});
+
+var mongos = st.s;
+var dbName = "wc-test-configRS";
+var db = mongos.getDB(dbName);
+var adminDB = mongos.getDB('admin');
+// A database connection on a local shard, rather than through the mongos.
+var localDB = st.shard0.getDB('localWCTest');
+var collName = 'leaves';
+var coll = db[collName];
+var counter = 0;
+
+function dropTestData() {
+ st.configRS.awaitReplication();
+ st.rs0.awaitReplication();
+ st.rs1.awaitReplication();
+ db.dropUser('username');
+ db.dropUser('user1');
+ localDB.dropUser('user2');
+ assert(!db.auth("username", "password"), "auth should have failed");
+ getNewDB();
+}
+
+// We get new databases because we do not want to reuse dropped databases that may be in a
+// bad state. This test calls dropDatabase when config server secondary nodes are down, so the
+// command fails after only the database metadata is dropped from the config servers, but the
+// data on the shards still remains. This makes future operations, such as moveChunk, fail.
+function getNewDB() {
+ db = mongos.getDB(dbName + counter);
+ counter++;
+ coll = db[collName];
+}
+
+// Commands in 'commands' will accept any valid writeConcern.
+var commands = [];
+
+commands.push({
+ req: {createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles},
+ setupFunc: function() {},
+ confirmFunc: function() {
+ assert(db.auth("username", "password"), "auth failed");
+ },
+ requiresMajority: true,
+ runsOnShards: false,
+ failsOnShards: false,
+ admin: false
+});
+
+commands.push({
+ req: {updateUser: 'username', pwd: 'password2', roles: jsTest.basicUserRoles},
+ setupFunc: function() {
+ db.runCommand({createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles});
+ },
+ confirmFunc: function() {
+ assert(!db.auth("username", "password"), "auth should have failed");
+ assert(db.auth("username", "password2"), "auth failed");
+ },
+ requiresMajority: true,
+ runsOnShards: false,
+ admin: false
+});
+
+commands.push({
+ req: {dropUser: 'tempUser'},
+ setupFunc: function() {
+ db.runCommand({createUser: 'tempUser', pwd: 'password', roles: jsTest.basicUserRoles});
+ assert(db.auth("tempUser", "password"), "auth failed");
+ },
+ confirmFunc: function() {
+ assert(!db.auth("tempUser", "password"), "auth should have failed");
+ },
+ requiresMajority: true,
+ runsOnShards: false,
+ failsOnShards: false,
+ admin: false
+});
+
+function testInvalidWriteConcern(wc, cmd) {
+ if (wc.w === 2 && !cmd.requiresMajority) {
+ return;
}
-
- function runCommandFailOnShardsPassOnConfigs(cmd) {
- var req = cmd.req;
- var res;
- // This command is run on the shards in addition to the config servers.
- if (cmd.runsOnShards) {
- if (cmd.failsOnShards) {
- // This command fails when there is a writeConcernError on the shards.
- // We set the timeout high enough that the command should not time out against the
- // config server, but not exorbitantly high, because it will always time out against
- // shards and so will increase the runtime of this test.
- req.writeConcern.wtimeout = 15 * 1000;
- res = runCommandCheckAdmin(db, cmd);
- restartReplicationOnAllShards(st);
- assert.commandFailed(res);
- assert(
- !res.writeConcernError,
- 'command on config servers with a paused replicaset had writeConcernError: ' +
- tojson(res));
- } else {
- // This command passes and returns a writeConcernError when there is a
- // writeConcernError on the shards.
- // We set the timeout high enough that the command should not time out against the
- // config server, but not exorbitantly high, because it will always time out against
- // shards and so will increase the runtime of this test.
- req.writeConcern.wtimeout = 15 * 1000;
- res = runCommandCheckAdmin(db, cmd);
- restartReplicationOnAllShards(st);
- assert.commandWorked(res);
- cmd.confirmFunc();
- assertWriteConcernError(res);
- }
- } else {
- // This command is only run on the config servers and so should pass when shards are
- // not replicating.
+ cmd.req.writeConcern = wc;
+ jsTest.log("Testing " + tojson(cmd.req));
+
+ dropTestData();
+ cmd.setupFunc();
+ var res = runCommandCheckAdmin(db, cmd);
+ assert.commandFailed(res);
+ assert(!res.writeConcernError,
+ 'bad writeConcern on config server had writeConcernError. ' +
+ tojson(res.writeConcernError));
+}
+
+function runCommandFailOnShardsPassOnConfigs(cmd) {
+ var req = cmd.req;
+ var res;
+ // This command is run on the shards in addition to the config servers.
+ if (cmd.runsOnShards) {
+ if (cmd.failsOnShards) {
+ // This command fails when there is a writeConcernError on the shards.
+ // We set the timeout high enough that the command should not time out against the
+ // config server, but not exorbitantly high, because it will always time out against
+ // shards and so will increase the runtime of this test.
+ req.writeConcern.wtimeout = 15 * 1000;
res = runCommandCheckAdmin(db, cmd);
restartReplicationOnAllShards(st);
- assert.commandWorked(res);
- cmd.confirmFunc();
+ assert.commandFailed(res);
assert(!res.writeConcernError,
'command on config servers with a paused replicaset had writeConcernError: ' +
tojson(res));
+ } else {
+ // This command passes and returns a writeConcernError when there is a
+ // writeConcernError on the shards.
+ // We set the timeout high enough that the command should not time out against the
+ // config server, but not exorbitantly high, because it will always time out against
+ // shards and so will increase the runtime of this test.
+ req.writeConcern.wtimeout = 15 * 1000;
+ res = runCommandCheckAdmin(db, cmd);
+ restartReplicationOnAllShards(st);
+ assert.commandWorked(res);
+ cmd.confirmFunc();
+ assertWriteConcernError(res);
}
- }
-
- function testValidWriteConcern(wc, cmd) {
- var req = cmd.req;
- var setupFunc = cmd.setupFunc;
- var confirmFunc = cmd.confirmFunc;
-
- req.writeConcern = wc;
- jsTest.log("Testing " + tojson(req));
-
- dropTestData();
- setupFunc();
-
- // Command with a full cluster should succeed.
- var res = runCommandCheckAdmin(db, cmd);
- assert.commandWorked(res);
- assert(!res.writeConcernError,
- 'command on a full cluster had writeConcernError: ' + tojson(res));
- confirmFunc();
-
- dropTestData();
- setupFunc();
- // Stop replication at all shard secondaries.
- stopReplicationOnSecondariesOfAllShards(st);
-
- // Command is running on full config server replica set but a majority of a shard's
- // nodes are down.
- runCommandFailOnShardsPassOnConfigs(cmd);
-
- dropTestData();
- setupFunc();
- // Stop replication at all config server secondaries and all shard secondaries.
- stopReplicationOnSecondariesOfAllShards(st);
- st.configRS.awaitReplication();
- stopReplicationOnSecondaries(st.configRS);
-
- // Command should fail after two config servers are not replicating.
- req.writeConcern.wtimeout = 3000;
+ } else {
+ // This command is only run on the config servers and so should pass when shards are
+ // not replicating.
res = runCommandCheckAdmin(db, cmd);
restartReplicationOnAllShards(st);
- assert.commandFailed(res);
+ assert.commandWorked(res);
+ cmd.confirmFunc();
assert(!res.writeConcernError,
'command on config servers with a paused replicaset had writeConcernError: ' +
tojson(res));
}
-
- var majorityWC = {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS};
-
- // Config server commands require w: majority writeConcerns.
- var nonMajorityWCs = [{w: 'invalid'}, {w: 2}];
-
- commands.forEach(function(cmd) {
- nonMajorityWCs.forEach(function(wc) {
- testInvalidWriteConcern(wc, cmd);
- });
- testValidWriteConcern(majorityWC, cmd);
+}
+
+function testValidWriteConcern(wc, cmd) {
+ var req = cmd.req;
+ var setupFunc = cmd.setupFunc;
+ var confirmFunc = cmd.confirmFunc;
+
+ req.writeConcern = wc;
+ jsTest.log("Testing " + tojson(req));
+
+ dropTestData();
+ setupFunc();
+
+ // Command with a full cluster should succeed.
+ var res = runCommandCheckAdmin(db, cmd);
+ assert.commandWorked(res);
+ assert(!res.writeConcernError,
+ 'command on a full cluster had writeConcernError: ' + tojson(res));
+ confirmFunc();
+
+ dropTestData();
+ setupFunc();
+ // Stop replication at all shard secondaries.
+ stopReplicationOnSecondariesOfAllShards(st);
+
+ // Command is running on full config server replica set but a majority of a shard's
+ // nodes are down.
+ runCommandFailOnShardsPassOnConfigs(cmd);
+
+ dropTestData();
+ setupFunc();
+ // Stop replication at all config server secondaries and all shard secondaries.
+ stopReplicationOnSecondariesOfAllShards(st);
+ st.configRS.awaitReplication();
+ stopReplicationOnSecondaries(st.configRS);
+
+ // Command should fail after two config servers are not replicating.
+ req.writeConcern.wtimeout = 3000;
+ res = runCommandCheckAdmin(db, cmd);
+ restartReplicationOnAllShards(st);
+ assert.commandFailed(res);
+ assert(
+ !res.writeConcernError,
+ 'command on config servers with a paused replicaset had writeConcernError: ' + tojson(res));
+}
+
+var majorityWC = {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS};
+
+// Config server commands require w: majority writeConcerns.
+var nonMajorityWCs = [{w: 'invalid'}, {w: 2}];
+
+commands.forEach(function(cmd) {
+ nonMajorityWCs.forEach(function(wc) {
+ testInvalidWriteConcern(wc, cmd);
});
+ testValidWriteConcern(majorityWC, cmd);
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/commands_that_write_accept_wc_shards.js b/jstests/sharding/commands_that_write_accept_wc_shards.js
index 80ac26b36e7..ba791154b44 100644
--- a/jstests/sharding/commands_that_write_accept_wc_shards.js
+++ b/jstests/sharding/commands_that_write_accept_wc_shards.js
@@ -12,368 +12,368 @@
load('jstests/libs/write_concern_util.js');
(function() {
- "use strict";
- var st = new ShardingTest({
- // Set priority of secondaries to zero to prevent spurious elections.
- shards: {
- rs0: {
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- },
- rs1: {
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- }
- },
- configReplSetTestOptions: {settings: {chainingAllowed: false}},
- mongos: 1,
- });
+"use strict";
+var st = new ShardingTest({
+ // Set priority of secondaries to zero to prevent spurious elections.
+ shards: {
+ rs0: {
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
+ },
+ rs1: {
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
+ }
+ },
+ configReplSetTestOptions: {settings: {chainingAllowed: false}},
+ mongos: 1,
+});
- var mongos = st.s;
- var dbName = "wc-test-shards";
- var db = mongos.getDB(dbName);
- var collName = 'leaves';
- var coll = db[collName];
+var mongos = st.s;
+var dbName = "wc-test-shards";
+var db = mongos.getDB(dbName);
+var collName = 'leaves';
+var coll = db[collName];
- function dropTestDatabase() {
- db.runCommand({dropDatabase: 1});
- db.extra.insert({a: 1});
- coll = db[collName];
- st.ensurePrimaryShard(db.toString(), st.shard0.shardName);
- assert.eq(0, coll.find().itcount(), "test collection not empty");
- assert.eq(1, db.extra.find().itcount(), "extra collection should have 1 document");
- }
+function dropTestDatabase() {
+ db.runCommand({dropDatabase: 1});
+ db.extra.insert({a: 1});
+ coll = db[collName];
+ st.ensurePrimaryShard(db.toString(), st.shard0.shardName);
+ assert.eq(0, coll.find().itcount(), "test collection not empty");
+ assert.eq(1, db.extra.find().itcount(), "extra collection should have 1 document");
+}
- var commands = [];
+var commands = [];
- // Tests a runOnAllShardsCommand against a sharded collection.
- commands.push({
- req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'sharded_type_index'}]},
- setupFunc: function() {
- shardCollectionWithChunks(st, coll);
- coll.insert({type: 'oak', x: -3});
- coll.insert({type: 'maple', x: 23});
- assert.eq(coll.getIndexes().length, 2);
- },
- confirmFunc: function() {
- assert.eq(coll.getIndexes().length, 3);
- },
- admin: false
- });
+// Tests a runOnAllShardsCommand against a sharded collection.
+commands.push({
+ req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'sharded_type_index'}]},
+ setupFunc: function() {
+ shardCollectionWithChunks(st, coll);
+ coll.insert({type: 'oak', x: -3});
+ coll.insert({type: 'maple', x: 23});
+ assert.eq(coll.getIndexes().length, 2);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.getIndexes().length, 3);
+ },
+ admin: false
+});
- // Tests a runOnAllShardsCommand.
- commands.push({
- req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]},
- setupFunc: function() {
- coll.insert({type: 'oak'});
- st.ensurePrimaryShard(db.toString(), st.shard0.shardName);
- assert.eq(coll.getIndexes().length, 1);
- },
- confirmFunc: function() {
- assert.eq(coll.getIndexes().length, 2);
- },
- admin: false
- });
+// Tests a runOnAllShardsCommand.
+commands.push({
+ req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]},
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ st.ensurePrimaryShard(db.toString(), st.shard0.shardName);
+ assert.eq(coll.getIndexes().length, 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.getIndexes().length, 2);
+ },
+ admin: false
+});
- // Tests a batched write command.
- commands.push({
- req: {insert: collName, documents: [{x: -3, type: 'maple'}, {x: 23, type: 'maple'}]},
- setupFunc: function() {
- shardCollectionWithChunks(st, coll);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'maple'}), 2);
- },
- admin: false
- });
+// Tests a batched write command.
+commands.push({
+ req: {insert: collName, documents: [{x: -3, type: 'maple'}, {x: 23, type: 'maple'}]},
+ setupFunc: function() {
+ shardCollectionWithChunks(st, coll);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'maple'}), 2);
+ },
+ admin: false
+});
- // Tests a passthrough.
- commands.push({
- req: {renameCollection: "renameCollWC.leaves", to: 'renameCollWC.pine_needles'},
- setupFunc: function() {
- db = db.getSiblingDB("renameCollWC");
- // Ensure that database is created.
- db.leaves.insert({type: 'oak'});
- st.ensurePrimaryShard(db.toString(), st.shard0.shardName);
- db.leaves.drop();
- db.pine_needles.drop();
- db.leaves.insert({type: 'oak'});
- assert.eq(db.leaves.count(), 1);
- assert.eq(db.pine_needles.count(), 0);
- },
- confirmFunc: function() {
- assert.eq(db.leaves.count(), 0);
- assert.eq(db.pine_needles.count(), 1);
- },
- admin: true
- });
+// Tests a passthrough.
+commands.push({
+ req: {renameCollection: "renameCollWC.leaves", to: 'renameCollWC.pine_needles'},
+ setupFunc: function() {
+ db = db.getSiblingDB("renameCollWC");
+ // Ensure that database is created.
+ db.leaves.insert({type: 'oak'});
+ st.ensurePrimaryShard(db.toString(), st.shard0.shardName);
+ db.leaves.drop();
+ db.pine_needles.drop();
+ db.leaves.insert({type: 'oak'});
+ assert.eq(db.leaves.count(), 1);
+ assert.eq(db.pine_needles.count(), 0);
+ },
+ confirmFunc: function() {
+ assert.eq(db.leaves.count(), 0);
+ assert.eq(db.pine_needles.count(), 1);
+ },
+ admin: true
+});
- commands.push({
- req: {
- update: collName,
- updates: [{
- q: {type: 'oak'},
- u: [{$set: {type: 'ginkgo'}}],
- }],
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- },
- admin: false
- });
+commands.push({
+ req: {
+ update: collName,
+ updates: [{
+ q: {type: 'oak'},
+ u: [{$set: {type: 'ginkgo'}}],
+ }],
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ },
+ admin: false
+});
- commands.push({
- req: {
- findAndModify: collName,
- query: {type: 'oak'},
- update: {$set: {type: 'ginkgo'}},
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- },
- admin: false
- });
+commands.push({
+ req: {
+ findAndModify: collName,
+ query: {type: 'oak'},
+ update: {$set: {type: 'ginkgo'}},
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ },
+ admin: false
+});
- commands.push({
- req: {
- findAndModify: collName,
- query: {type: 'oak'},
- update: [{$set: {type: 'ginkgo'}}],
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- },
- admin: false
- });
+commands.push({
+ req: {
+ findAndModify: collName,
+ query: {type: 'oak'},
+ update: [{$set: {type: 'ginkgo'}}],
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ },
+ admin: false
+});
- // MapReduce on an unsharded collection.
- commands.push({
- req: {
- mapReduce: collName,
- map: function() {
- this.tags.forEach(function(z) {
- emit(z, 1);
- });
- },
- reduce: function(key, values) {
- var count = 0;
- values.forEach(function(v) {
- count = count + v;
- });
- return count;
- },
- out: "foo"
- },
- setupFunc: function() {
- coll.insert({x: -3, tags: ["a", "b"]});
- coll.insert({x: -7, tags: ["b", "c"]});
- coll.insert({x: 23, tags: ["c", "a"]});
- coll.insert({x: 27, tags: ["b", "c"]});
- },
- confirmFunc: function() {
- assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
- assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
- assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
- db.foo.drop();
- },
- admin: false
- });
+// MapReduce on an unsharded collection.
+commands.push({
+ req: {
+ mapReduce: collName,
+ map: function() {
+ this.tags.forEach(function(z) {
+ emit(z, 1);
+ });
+ },
+ reduce: function(key, values) {
+ var count = 0;
+ values.forEach(function(v) {
+ count = count + v;
+ });
+ return count;
+ },
+ out: "foo"
+ },
+ setupFunc: function() {
+ coll.insert({x: -3, tags: ["a", "b"]});
+ coll.insert({x: -7, tags: ["b", "c"]});
+ coll.insert({x: 23, tags: ["c", "a"]});
+ coll.insert({x: 27, tags: ["b", "c"]});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
+ assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
+ assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
+ db.foo.drop();
+ },
+ admin: false
+});
- // MapReduce on an unsharded collection with an output to a sharded collection.
- commands.push({
- req: {
- mapReduce: collName,
- map: function() {
- this.tags.forEach(function(z) {
- emit(z, 1);
- });
- },
- reduce: function(key, values) {
- var count = 0;
- values.forEach(function(v) {
- count = count + v;
- });
- return count;
- },
- out: {replace: "foo", sharded: true}
- },
- setupFunc: function() {
- db.adminCommand({enablesharding: db.toString()});
- coll.insert({x: -3, tags: ["a", "b"]});
- coll.insert({x: -7, tags: ["b", "c"]});
- coll.insert({x: 23, tags: ["c", "a"]});
- coll.insert({x: 27, tags: ["b", "c"]});
- },
- confirmFunc: function() {
- assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
- assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
- assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
- db.foo.drop();
- },
- admin: false
- });
+// MapReduce on an unsharded collection with an output to a sharded collection.
+commands.push({
+ req: {
+ mapReduce: collName,
+ map: function() {
+ this.tags.forEach(function(z) {
+ emit(z, 1);
+ });
+ },
+ reduce: function(key, values) {
+ var count = 0;
+ values.forEach(function(v) {
+ count = count + v;
+ });
+ return count;
+ },
+ out: {replace: "foo", sharded: true}
+ },
+ setupFunc: function() {
+ db.adminCommand({enablesharding: db.toString()});
+ coll.insert({x: -3, tags: ["a", "b"]});
+ coll.insert({x: -7, tags: ["b", "c"]});
+ coll.insert({x: 23, tags: ["c", "a"]});
+ coll.insert({x: 27, tags: ["b", "c"]});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
+ assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
+ assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
+ db.foo.drop();
+ },
+ admin: false
+});
- // MapReduce on a sharded collection.
- commands.push({
- req: {
- mapReduce: collName,
- map: function() {
- if (!this.tags) {
- return;
- }
- this.tags.forEach(function(z) {
- emit(z, 1);
- });
- },
- reduce: function(key, values) {
- var count = 0;
- values.forEach(function(v) {
- count = count + v;
- });
- return count;
- },
- out: "foo"
- },
- setupFunc: function() {
- shardCollectionWithChunks(st, coll);
- coll.insert({x: -3, tags: ["a", "b"]});
- coll.insert({x: -7, tags: ["b", "c"]});
- coll.insert({x: 23, tags: ["c", "a"]});
- coll.insert({x: 27, tags: ["b", "c"]});
- },
- confirmFunc: function() {
- assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
- assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
- assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
- db.foo.drop();
- },
- admin: false
- });
+// MapReduce on a sharded collection.
+commands.push({
+ req: {
+ mapReduce: collName,
+ map: function() {
+ if (!this.tags) {
+ return;
+ }
+ this.tags.forEach(function(z) {
+ emit(z, 1);
+ });
+ },
+ reduce: function(key, values) {
+ var count = 0;
+ values.forEach(function(v) {
+ count = count + v;
+ });
+ return count;
+ },
+ out: "foo"
+ },
+ setupFunc: function() {
+ shardCollectionWithChunks(st, coll);
+ coll.insert({x: -3, tags: ["a", "b"]});
+ coll.insert({x: -7, tags: ["b", "c"]});
+ coll.insert({x: 23, tags: ["c", "a"]});
+ coll.insert({x: 27, tags: ["b", "c"]});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
+ assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
+ assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
+ db.foo.drop();
+ },
+ admin: false
+});
- // MapReduce on a sharded collection with an output action to an unsharded collection.
- commands.push({
- req: {
- mapReduce: collName,
- map: function() {
- if (!this.tags) {
- return;
- }
- this.tags.forEach(function(z) {
- emit(z, 1);
- });
- },
- reduce: function(key, values) {
- var count = 0;
- values.forEach(function(v) {
- count = count + v;
- });
- return count;
- },
- out: {replace: "foo", sharded: false}
- },
- setupFunc: function() {
- shardCollectionWithChunks(st, coll);
- coll.insert({x: -3, tags: ["a", "b"]});
- coll.insert({x: -7, tags: ["b", "c"]});
- coll.insert({x: 23, tags: ["c", "a"]});
- coll.insert({x: 27, tags: ["b", "c"]});
- },
- confirmFunc: function() {
- assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
- assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
- assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
- db.foo.drop();
- },
- admin: false
- });
+// MapReduce on a sharded collection with an output action to an unsharded collection.
+commands.push({
+ req: {
+ mapReduce: collName,
+ map: function() {
+ if (!this.tags) {
+ return;
+ }
+ this.tags.forEach(function(z) {
+ emit(z, 1);
+ });
+ },
+ reduce: function(key, values) {
+ var count = 0;
+ values.forEach(function(v) {
+ count = count + v;
+ });
+ return count;
+ },
+ out: {replace: "foo", sharded: false}
+ },
+ setupFunc: function() {
+ shardCollectionWithChunks(st, coll);
+ coll.insert({x: -3, tags: ["a", "b"]});
+ coll.insert({x: -7, tags: ["b", "c"]});
+ coll.insert({x: 23, tags: ["c", "a"]});
+ coll.insert({x: 27, tags: ["b", "c"]});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
+ assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
+ assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
+ db.foo.drop();
+ },
+ admin: false
+});
- // MapReduce from a sharded collection with an output to a sharded collection.
- commands.push({
- req: {
- mapReduce: collName,
- map: function() {
- if (!this.tags) {
- return;
- }
- this.tags.forEach(function(z) {
- emit(z, 1);
- });
- },
- reduce: function(key, values) {
- var count = 0;
- values.forEach(function(v) {
- count = count + v;
- });
- return count;
- },
- out: {replace: "foo", sharded: true}
- },
- setupFunc: function() {
- shardCollectionWithChunks(st, coll);
- coll.insert({x: -3, tags: ["a", "b"]});
- coll.insert({x: -7, tags: ["b", "c"]});
- coll.insert({x: 23, tags: ["c", "a"]});
- coll.insert({x: 27, tags: ["b", "c"]});
- },
- confirmFunc: function() {
- assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
- assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
- assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
- db.foo.drop();
- },
- admin: false
- });
+// MapReduce from a sharded collection with an output to a sharded collection.
+commands.push({
+ req: {
+ mapReduce: collName,
+ map: function() {
+ if (!this.tags) {
+ return;
+ }
+ this.tags.forEach(function(z) {
+ emit(z, 1);
+ });
+ },
+ reduce: function(key, values) {
+ var count = 0;
+ values.forEach(function(v) {
+ count = count + v;
+ });
+ return count;
+ },
+ out: {replace: "foo", sharded: true}
+ },
+ setupFunc: function() {
+ shardCollectionWithChunks(st, coll);
+ coll.insert({x: -3, tags: ["a", "b"]});
+ coll.insert({x: -7, tags: ["b", "c"]});
+ coll.insert({x: 23, tags: ["c", "a"]});
+ coll.insert({x: 27, tags: ["b", "c"]});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.findOne({_id: 'a'}).value, 2);
+ assert.eq(db.foo.findOne({_id: 'b'}).value, 3);
+ assert.eq(db.foo.findOne({_id: 'c'}).value, 3);
+ db.foo.drop();
+ },
+ admin: false
+});
- function testValidWriteConcern(cmd) {
- cmd.req.writeConcern = {w: 'majority', wtimeout: 5 * 60 * 1000};
- jsTest.log("Testing " + tojson(cmd.req));
+function testValidWriteConcern(cmd) {
+ cmd.req.writeConcern = {w: 'majority', wtimeout: 5 * 60 * 1000};
+ jsTest.log("Testing " + tojson(cmd.req));
- dropTestDatabase();
- cmd.setupFunc();
- var res = runCommandCheckAdmin(db, cmd);
- assert.commandWorked(res);
- assert(!res.writeConcernError,
- 'command on a full cluster had writeConcernError: ' + tojson(res));
- cmd.confirmFunc();
- }
+ dropTestDatabase();
+ cmd.setupFunc();
+ var res = runCommandCheckAdmin(db, cmd);
+ assert.commandWorked(res);
+ assert(!res.writeConcernError,
+ 'command on a full cluster had writeConcernError: ' + tojson(res));
+ cmd.confirmFunc();
+}
- function testInvalidWriteConcern(cmd) {
- cmd.req.writeConcern = {w: 'invalid'};
- jsTest.log("Testing " + tojson(cmd.req));
+function testInvalidWriteConcern(cmd) {
+ cmd.req.writeConcern = {w: 'invalid'};
+ jsTest.log("Testing " + tojson(cmd.req));
- dropTestDatabase();
- cmd.setupFunc();
- var res = runCommandCheckAdmin(db, cmd);
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assertWriteConcernError(res, ErrorCodes.UnknownReplWriteConcern);
- cmd.confirmFunc();
- }
+ dropTestDatabase();
+ cmd.setupFunc();
+ var res = runCommandCheckAdmin(db, cmd);
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assertWriteConcernError(res, ErrorCodes.UnknownReplWriteConcern);
+ cmd.confirmFunc();
+}
- commands.forEach(function(cmd) {
- testValidWriteConcern(cmd);
- testInvalidWriteConcern(cmd);
- });
+commands.forEach(function(cmd) {
+ testValidWriteConcern(cmd);
+ testInvalidWriteConcern(cmd);
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/config_rs_no_primary.js b/jstests/sharding/config_rs_no_primary.js
index 92196425263..6b7c7155a6e 100644
--- a/jstests/sharding/config_rs_no_primary.js
+++ b/jstests/sharding/config_rs_no_primary.js
@@ -6,55 +6,55 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- var st = new ShardingTest({
- shards: 1,
- other: {
- c0: {}, // Make sure 1st config server is primary
- c1: {rsConfig: {priority: 0}},
- c2: {rsConfig: {priority: 0}}
- }
+"use strict";
+
+var st = new ShardingTest({
+ shards: 1,
+ other: {
+ c0: {}, // Make sure 1st config server is primary
+ c1: {rsConfig: {priority: 0}},
+ c2: {rsConfig: {priority: 0}}
+ }
+});
+
+assert.eq(st.config0, st.configRS.getPrimary());
+
+// Create the "test" database while the cluster metadata is still writeable.
+st.s.getDB('test').foo.insert({a: 1});
+
+// Take down two of the config servers so the remaining one goes into SECONDARY state.
+st.configRS.stop(1);
+st.configRS.stop(2);
+st.configRS.awaitNoPrimary();
+
+jsTestLog("Starting a new mongos when the config servers have no primary which should work");
+var mongos2 = MongoRunner.runMongos({configdb: st.configRS.getURL()});
+assert.neq(null, mongos2);
+
+var testOps = function(mongos) {
+ jsTestLog("Doing ops that don't require metadata writes and thus should succeed against: " +
+ mongos);
+ var initialCount = mongos.getDB('test').foo.count();
+ assert.writeOK(mongos.getDB('test').foo.insert({a: 1}));
+ assert.eq(initialCount + 1, mongos.getDB('test').foo.count());
+
+ assert.throws(function() {
+ mongos.getDB('config').shards.findOne();
});
-
- assert.eq(st.config0, st.configRS.getPrimary());
-
- // Create the "test" database while the cluster metadata is still writeable.
- st.s.getDB('test').foo.insert({a: 1});
-
- // Take down two of the config servers so the remaining one goes into SECONDARY state.
- st.configRS.stop(1);
- st.configRS.stop(2);
- st.configRS.awaitNoPrimary();
-
- jsTestLog("Starting a new mongos when the config servers have no primary which should work");
- var mongos2 = MongoRunner.runMongos({configdb: st.configRS.getURL()});
- assert.neq(null, mongos2);
-
- var testOps = function(mongos) {
- jsTestLog("Doing ops that don't require metadata writes and thus should succeed against: " +
- mongos);
- var initialCount = mongos.getDB('test').foo.count();
- assert.writeOK(mongos.getDB('test').foo.insert({a: 1}));
- assert.eq(initialCount + 1, mongos.getDB('test').foo.count());
-
- assert.throws(function() {
- mongos.getDB('config').shards.findOne();
- });
- mongos.setSlaveOk(true);
- var shardDoc = mongos.getDB('config').shards.findOne();
- mongos.setSlaveOk(false);
- assert.neq(null, shardDoc);
-
- jsTestLog("Doing ops that require metadata writes and thus should fail against: " + mongos);
- assert.writeError(mongos.getDB("newDB").foo.insert({a: 1}));
- assert.commandFailed(
- mongos.getDB('admin').runCommand({shardCollection: "test.foo", key: {a: 1}}));
- };
-
- testOps(mongos2);
- testOps(st.s);
-
- st.stop();
- MongoRunner.stopMongos(mongos2);
+ mongos.setSlaveOk(true);
+ var shardDoc = mongos.getDB('config').shards.findOne();
+ mongos.setSlaveOk(false);
+ assert.neq(null, shardDoc);
+
+ jsTestLog("Doing ops that require metadata writes and thus should fail against: " + mongos);
+ assert.writeError(mongos.getDB("newDB").foo.insert({a: 1}));
+ assert.commandFailed(
+ mongos.getDB('admin').runCommand({shardCollection: "test.foo", key: {a: 1}}));
+};
+
+testOps(mongos2);
+testOps(st.s);
+
+st.stop();
+MongoRunner.stopMongos(mongos2);
}());
diff --git a/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js b/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
index d4be74ed8bc..33e4c5b735b 100644
--- a/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
+++ b/jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
@@ -5,193 +5,193 @@
* 2) Issuing a metadata command directly to a config server with non-majority write concern fails.
*/
(function() {
- 'use strict';
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
- const newShardName = "newShard";
-
- // Commands sent directly to the config server should fail with WC < majority.
- const unacceptableWCsForConfig = [
- {writeConcern: {w: 1}},
- {writeConcern: {w: 2}},
- {writeConcern: {w: 3}},
- // TODO: should metadata commands allow j: false? can CSRS have an in-memory storage engine?
- // writeConcern{w: "majority", j: "false"}},
- ];
-
- // Only write concern majority can be sent to the config server.
- const acceptableWCsForConfig = [
- {writeConcern: {w: "majority"}},
- {writeConcern: {w: "majority", wtimeout: 15000}},
- ];
-
- // Any write concern can be sent to a mongos, because mongos will upconvert it to majority.
- const unacceptableWCsForMongos = [];
- const acceptableWCsForMongos = [
- {},
- {writeConcern: {w: 0}},
- {writeConcern: {w: 0, wtimeout: 15000}},
- {writeConcern: {w: 1}},
- {writeConcern: {w: 2}},
- {writeConcern: {w: 3}},
- {writeConcern: {w: "majority"}},
- {writeConcern: {w: "majority", wtimeout: 15000}},
- ];
-
- const setupFuncs = {
- noop: function() {},
- createDatabase: function() {
- // A database is implicitly created when a collection within it is created.
- assert.commandWorked(st.s.getDB(dbName).runCommand({create: collName}));
- },
- enableSharding: function() {
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- },
- addShard: function() {
- assert.commandWorked(st.s.adminCommand({addShard: newShard.name, name: newShardName}));
- },
- };
-
- const cleanupFuncs = {
- noop: function() {},
- dropDatabase: function() {
- assert.commandWorked(st.s.getDB(dbName).runCommand({dropDatabase: 1}));
- },
- removeShardIfExists: function() {
- var res = st.s.adminCommand({removeShard: newShardName});
- if (!res.ok && res.code == ErrorCodes.ShardNotFound) {
- return;
- }
- assert.commandWorked(res);
- assert.eq('started', res.state);
- res = st.s.adminCommand({removeShard: newShardName});
- assert.commandWorked(res);
- assert.eq('completed', res.state);
- },
- };
-
- function checkCommand(
- conn, command, unacceptableWCs, acceptableWCs, adminCommand, setupFunc, cleanupFunc) {
- unacceptableWCs.forEach(function(writeConcern) {
- jsTest.log("testing " + tojson(command) + " with writeConcern " + tojson(writeConcern) +
- " against " + conn + ", expecting the command to fail");
- setupFunc();
- let commandWithWriteConcern = {};
- Object.assign(commandWithWriteConcern, command, writeConcern);
- if (adminCommand) {
- assert.commandFailedWithCode(conn.adminCommand(commandWithWriteConcern),
- ErrorCodes.InvalidOptions);
- } else {
- assert.commandFailedWithCode(conn.runCommand(commandWithWriteConcern),
- ErrorCodes.InvalidOptions);
- }
- cleanupFunc();
- });
-
- acceptableWCs.forEach(function(writeConcern) {
- jsTest.log("testing " + tojson(command) + " with writeConcern " + tojson(writeConcern) +
- " against " + conn + ", expecting the command to succeed");
- setupFunc();
- let commandWithWriteConcern = {};
- Object.assign(commandWithWriteConcern, command, writeConcern);
- if (adminCommand) {
- assert.commandWorked(conn.adminCommand(commandWithWriteConcern));
- } else {
- assert.commandWorked(conn.runCommand(commandWithWriteConcern));
- }
- cleanupFunc();
- });
- }
-
- function checkCommandMongos(command, setupFunc, cleanupFunc) {
- checkCommand(st.s,
- command,
- unacceptableWCsForMongos,
- acceptableWCsForMongos,
- true,
- setupFunc,
- cleanupFunc);
- }
-
- function checkCommandConfigSvr(command, setupFunc, cleanupFunc) {
- checkCommand(st.configRS.getPrimary(),
- command,
- unacceptableWCsForConfig,
- acceptableWCsForConfig,
- true,
- setupFunc,
- cleanupFunc);
- }
-
- var st = new ShardingTest({shards: 1});
-
- // enableSharding
- checkCommandMongos({enableSharding: dbName}, setupFuncs.noop, cleanupFuncs.dropDatabase);
- checkCommandConfigSvr(
- {_configsvrEnableSharding: dbName}, setupFuncs.noop, cleanupFuncs.dropDatabase);
-
- // movePrimary
- checkCommandMongos({movePrimary: dbName, to: st.shard0.name},
- setupFuncs.createDatabase,
- cleanupFuncs.dropDatabase);
- checkCommandConfigSvr({_configsvrMovePrimary: dbName, to: st.shard0.name},
- setupFuncs.createDatabase,
- cleanupFuncs.dropDatabase);
-
- // We are using a different name from ns because it was already created in setupFuncs.
- checkCommandConfigSvr({_configsvrCreateCollection: dbName + '.bar', options: {}},
- setupFuncs.createDatabase,
- cleanupFuncs.dropDatabase);
-
- // shardCollection
- checkCommandMongos(
- {shardCollection: ns, key: {_id: 1}}, setupFuncs.enableSharding, cleanupFuncs.dropDatabase);
- checkCommandConfigSvr({_configsvrShardCollection: ns, key: {_id: 1}},
- setupFuncs.enableSharding,
- cleanupFuncs.dropDatabase);
-
- // createDatabase
- // Don't check createDatabase against mongos: there is no createDatabase command exposed on
- // mongos; a database is created implicitly when a collection in it is created.
- checkCommandConfigSvr({_configsvrCreateDatabase: dbName, to: st.shard0.name},
- setupFuncs.noop,
- cleanupFuncs.dropDatabase);
-
- // addShard
- var newShard = MongoRunner.runMongod({shardsvr: ""});
- checkCommandMongos({addShard: newShard.name, name: newShardName},
- setupFuncs.noop,
- cleanupFuncs.removeShardIfExists);
- checkCommandConfigSvr({_configsvrAddShard: newShard.name, name: newShardName},
- setupFuncs.noop,
- cleanupFuncs.removeShardIfExists);
-
- // removeShard
- checkCommandMongos({removeShard: newShardName}, setupFuncs.addShard, cleanupFuncs.noop);
- checkCommandConfigSvr(
- {_configsvrRemoveShard: newShardName}, setupFuncs.addShard, cleanupFuncs.noop);
-
- // dropCollection
- checkCommandMongos({drop: ns}, setupFuncs.createDatabase, cleanupFuncs.dropDatabase);
- checkCommandConfigSvr(
- {_configsvrDropCollection: ns}, setupFuncs.createDatabase, cleanupFuncs.dropDatabase);
-
- // dropDatabase
-
- // We can't use the checkCommandMongos wrapper because we need a connection to the test
- // database.
- checkCommand(st.s.getDB(dbName),
- {dropDatabase: 1},
+'use strict';
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+const newShardName = "newShard";
+
+// Commands sent directly to the config server should fail with WC < majority.
+const unacceptableWCsForConfig = [
+ {writeConcern: {w: 1}},
+ {writeConcern: {w: 2}},
+ {writeConcern: {w: 3}},
+ // TODO: should metadata commands allow j: false? can CSRS have an in-memory storage engine?
+ // writeConcern{w: "majority", j: "false"}},
+];
+
+// Only write concern majority can be sent to the config server.
+const acceptableWCsForConfig = [
+ {writeConcern: {w: "majority"}},
+ {writeConcern: {w: "majority", wtimeout: 15000}},
+];
+
+// Any write concern can be sent to a mongos, because mongos will upconvert it to majority.
+const unacceptableWCsForMongos = [];
+const acceptableWCsForMongos = [
+ {},
+ {writeConcern: {w: 0}},
+ {writeConcern: {w: 0, wtimeout: 15000}},
+ {writeConcern: {w: 1}},
+ {writeConcern: {w: 2}},
+ {writeConcern: {w: 3}},
+ {writeConcern: {w: "majority"}},
+ {writeConcern: {w: "majority", wtimeout: 15000}},
+];
+
+const setupFuncs = {
+ noop: function() {},
+ createDatabase: function() {
+ // A database is implicitly created when a collection within it is created.
+ assert.commandWorked(st.s.getDB(dbName).runCommand({create: collName}));
+ },
+ enableSharding: function() {
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ },
+ addShard: function() {
+ assert.commandWorked(st.s.adminCommand({addShard: newShard.name, name: newShardName}));
+ },
+};
+
+const cleanupFuncs = {
+ noop: function() {},
+ dropDatabase: function() {
+ assert.commandWorked(st.s.getDB(dbName).runCommand({dropDatabase: 1}));
+ },
+ removeShardIfExists: function() {
+ var res = st.s.adminCommand({removeShard: newShardName});
+ if (!res.ok && res.code == ErrorCodes.ShardNotFound) {
+ return;
+ }
+ assert.commandWorked(res);
+ assert.eq('started', res.state);
+ res = st.s.adminCommand({removeShard: newShardName});
+ assert.commandWorked(res);
+ assert.eq('completed', res.state);
+ },
+};
+
+function checkCommand(
+ conn, command, unacceptableWCs, acceptableWCs, adminCommand, setupFunc, cleanupFunc) {
+ unacceptableWCs.forEach(function(writeConcern) {
+ jsTest.log("testing " + tojson(command) + " with writeConcern " + tojson(writeConcern) +
+ " against " + conn + ", expecting the command to fail");
+ setupFunc();
+ let commandWithWriteConcern = {};
+ Object.assign(commandWithWriteConcern, command, writeConcern);
+ if (adminCommand) {
+ assert.commandFailedWithCode(conn.adminCommand(commandWithWriteConcern),
+ ErrorCodes.InvalidOptions);
+ } else {
+ assert.commandFailedWithCode(conn.runCommand(commandWithWriteConcern),
+ ErrorCodes.InvalidOptions);
+ }
+ cleanupFunc();
+ });
+
+ acceptableWCs.forEach(function(writeConcern) {
+ jsTest.log("testing " + tojson(command) + " with writeConcern " + tojson(writeConcern) +
+ " against " + conn + ", expecting the command to succeed");
+ setupFunc();
+ let commandWithWriteConcern = {};
+ Object.assign(commandWithWriteConcern, command, writeConcern);
+ if (adminCommand) {
+ assert.commandWorked(conn.adminCommand(commandWithWriteConcern));
+ } else {
+ assert.commandWorked(conn.runCommand(commandWithWriteConcern));
+ }
+ cleanupFunc();
+ });
+}
+
+function checkCommandMongos(command, setupFunc, cleanupFunc) {
+ checkCommand(st.s,
+ command,
unacceptableWCsForMongos,
acceptableWCsForMongos,
- false,
- setupFuncs.createDatabase,
- cleanupFuncs.dropDatabase);
- checkCommandConfigSvr(
- {_configsvrDropDatabase: dbName}, setupFuncs.createDatabase, cleanupFuncs.dropDatabase);
-
- MongoRunner.stopMongos(newShard);
- st.stop();
+ true,
+ setupFunc,
+ cleanupFunc);
+}
+
+function checkCommandConfigSvr(command, setupFunc, cleanupFunc) {
+ checkCommand(st.configRS.getPrimary(),
+ command,
+ unacceptableWCsForConfig,
+ acceptableWCsForConfig,
+ true,
+ setupFunc,
+ cleanupFunc);
+}
+
+var st = new ShardingTest({shards: 1});
+
+// enableSharding
+checkCommandMongos({enableSharding: dbName}, setupFuncs.noop, cleanupFuncs.dropDatabase);
+checkCommandConfigSvr(
+ {_configsvrEnableSharding: dbName}, setupFuncs.noop, cleanupFuncs.dropDatabase);
+
+// movePrimary
+checkCommandMongos({movePrimary: dbName, to: st.shard0.name},
+ setupFuncs.createDatabase,
+ cleanupFuncs.dropDatabase);
+checkCommandConfigSvr({_configsvrMovePrimary: dbName, to: st.shard0.name},
+ setupFuncs.createDatabase,
+ cleanupFuncs.dropDatabase);
+
+// We are using a different name from ns because it was already created in setupFuncs.
+checkCommandConfigSvr({_configsvrCreateCollection: dbName + '.bar', options: {}},
+ setupFuncs.createDatabase,
+ cleanupFuncs.dropDatabase);
+
+// shardCollection
+checkCommandMongos(
+ {shardCollection: ns, key: {_id: 1}}, setupFuncs.enableSharding, cleanupFuncs.dropDatabase);
+checkCommandConfigSvr({_configsvrShardCollection: ns, key: {_id: 1}},
+ setupFuncs.enableSharding,
+ cleanupFuncs.dropDatabase);
+
+// createDatabase
+// Don't check createDatabase against mongos: there is no createDatabase command exposed on
+// mongos; a database is created implicitly when a collection in it is created.
+checkCommandConfigSvr({_configsvrCreateDatabase: dbName, to: st.shard0.name},
+ setupFuncs.noop,
+ cleanupFuncs.dropDatabase);
+
+// addShard
+var newShard = MongoRunner.runMongod({shardsvr: ""});
+checkCommandMongos({addShard: newShard.name, name: newShardName},
+ setupFuncs.noop,
+ cleanupFuncs.removeShardIfExists);
+checkCommandConfigSvr({_configsvrAddShard: newShard.name, name: newShardName},
+ setupFuncs.noop,
+ cleanupFuncs.removeShardIfExists);
+
+// removeShard
+checkCommandMongos({removeShard: newShardName}, setupFuncs.addShard, cleanupFuncs.noop);
+checkCommandConfigSvr(
+ {_configsvrRemoveShard: newShardName}, setupFuncs.addShard, cleanupFuncs.noop);
+
+// dropCollection
+checkCommandMongos({drop: ns}, setupFuncs.createDatabase, cleanupFuncs.dropDatabase);
+checkCommandConfigSvr(
+ {_configsvrDropCollection: ns}, setupFuncs.createDatabase, cleanupFuncs.dropDatabase);
+
+// dropDatabase
+
+// We can't use the checkCommandMongos wrapper because we need a connection to the test
+// database.
+checkCommand(st.s.getDB(dbName),
+ {dropDatabase: 1},
+ unacceptableWCsForMongos,
+ acceptableWCsForMongos,
+ false,
+ setupFuncs.createDatabase,
+ cleanupFuncs.dropDatabase);
+checkCommandConfigSvr(
+ {_configsvrDropDatabase: dbName}, setupFuncs.createDatabase, cleanupFuncs.dropDatabase);
+
+MongoRunner.stopMongos(newShard);
+st.stop();
})();
diff --git a/jstests/sharding/conn_pool_stats.js b/jstests/sharding/conn_pool_stats.js
index 7c248f383e6..0476d3f7541 100644
--- a/jstests/sharding/conn_pool_stats.js
+++ b/jstests/sharding/conn_pool_stats.js
@@ -1,33 +1,33 @@
// Tests for the connPoolStats command.
(function() {
- "use strict";
- // Create a cluster with 2 shards.
- var cluster = new ShardingTest({shards: 2});
+"use strict";
+// Create a cluster with 2 shards.
+var cluster = new ShardingTest({shards: 2});
- // Needed because the command was expanded post 3.2
- var version = cluster.s.getDB("admin").runCommand({buildinfo: 1}).versionArray;
- var post32 = (version[0] > 4) || ((version[0] == 3) && (version[1] > 2));
+// Needed because the command was expanded post 3.2
+var version = cluster.s.getDB("admin").runCommand({buildinfo: 1}).versionArray;
+var post32 = (version[0] > 4) || ((version[0] == 3) && (version[1] > 2));
- // Run the connPoolStats command
- var stats = cluster.s.getDB("admin").runCommand({connPoolStats: 1});
+// Run the connPoolStats command
+var stats = cluster.s.getDB("admin").runCommand({connPoolStats: 1});
- // Validate output
- printjson(stats);
- assert.commandWorked(stats);
- assert("replicaSets" in stats);
- assert("hosts" in stats);
- assert("numClientConnections" in stats);
- assert("numAScopedConnections" in stats);
- assert("totalInUse" in stats);
- assert("totalAvailable" in stats);
- assert("totalCreated" in stats);
- assert.lte(stats["totalInUse"] + stats["totalAvailable"], stats["totalCreated"], tojson(stats));
- if (post32) {
- assert("pools" in stats);
- assert("totalRefreshing" in stats);
- assert.lte(stats["totalInUse"] + stats["totalAvailable"] + stats["totalRefreshing"],
- stats["totalCreated"],
- tojson(stats));
- }
- cluster.stop();
+// Validate output
+printjson(stats);
+assert.commandWorked(stats);
+assert("replicaSets" in stats);
+assert("hosts" in stats);
+assert("numClientConnections" in stats);
+assert("numAScopedConnections" in stats);
+assert("totalInUse" in stats);
+assert("totalAvailable" in stats);
+assert("totalCreated" in stats);
+assert.lte(stats["totalInUse"] + stats["totalAvailable"], stats["totalCreated"], tojson(stats));
+if (post32) {
+ assert("pools" in stats);
+ assert("totalRefreshing" in stats);
+ assert.lte(stats["totalInUse"] + stats["totalAvailable"] + stats["totalRefreshing"],
+ stats["totalCreated"],
+ tojson(stats));
+}
+cluster.stop();
})();
diff --git a/jstests/sharding/convert_to_and_from_sharded.js b/jstests/sharding/convert_to_and_from_sharded.js
index 96ee9d19a6d..15da2e0cc73 100644
--- a/jstests/sharding/convert_to_and_from_sharded.js
+++ b/jstests/sharding/convert_to_and_from_sharded.js
@@ -4,126 +4,126 @@
* @tags: [requires_persistence]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/feature_compatibility_version.js");
- var NUM_NODES = 3;
+var NUM_NODES = 3;
- /**
- * Checks that basic CRUD operations work as expected. Expects the collection to have a
- * { _id: 'marker' } document.
- */
- var checkBasicCRUD = function(coll) {
- var doc = coll.findOne({_id: 'marker', y: {$exists: false}});
- assert.neq(null, doc);
+/**
+ * Checks that basic CRUD operations work as expected. Expects the collection to have a
+ * { _id: 'marker' } document.
+ */
+var checkBasicCRUD = function(coll) {
+ var doc = coll.findOne({_id: 'marker', y: {$exists: false}});
+ assert.neq(null, doc);
- assert.writeOK(coll.update({_id: 'marker'}, {$set: {y: 2}}));
- assert.eq(2, coll.findOne({_id: 'marker'}).y);
+ assert.writeOK(coll.update({_id: 'marker'}, {$set: {y: 2}}));
+ assert.eq(2, coll.findOne({_id: 'marker'}).y);
- assert.writeOK(coll.remove({_id: 'marker'}));
- assert.eq(null, coll.findOne({_id: 'marker'}));
+ assert.writeOK(coll.remove({_id: 'marker'}));
+ assert.eq(null, coll.findOne({_id: 'marker'}));
- assert.writeOK(coll.insert({_id: 'marker'}, {writeConcern: {w: NUM_NODES}}));
- assert.eq('marker', coll.findOne({_id: 'marker'})._id);
- };
+ assert.writeOK(coll.insert({_id: 'marker'}, {writeConcern: {w: NUM_NODES}}));
+ assert.eq('marker', coll.findOne({_id: 'marker'})._id);
+};
- var st = new ShardingTest({shards: {}});
+var st = new ShardingTest({shards: {}});
- var replShard = new ReplSetTest({nodes: NUM_NODES});
- replShard.startSet({verbose: 1});
- replShard.initiate();
+var replShard = new ReplSetTest({nodes: NUM_NODES});
+replShard.startSet({verbose: 1});
+replShard.initiate();
- var priConn = replShard.getPrimary();
+var priConn = replShard.getPrimary();
- // Starting a brand new replica set without '--shardsvr' will cause the FCV to be written as the
- // latest available for that binary. This poses a problem when this test is run in the mixed
- // version suite because mongos will be 'last-stable' and if this node is of the latest binary,
- // it will report itself as the 'latest' FCV, which would cause mongos to refuse to connect to
- // it and shutdown.
- //
- // In order to work around this, in the mixed version suite, be pessimistic and always set this
- // node to the 'last-stable' FCV
- if (jsTestOptions().shardMixedBinVersions) {
- assert.commandWorked(priConn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- replShard.awaitReplication();
- }
+// Starting a brand new replica set without '--shardsvr' will cause the FCV to be written as the
+// latest available for that binary. This poses a problem when this test is run in the mixed
+// version suite because mongos will be 'last-stable' and if this node is of the latest binary,
+// it will report itself as the 'latest' FCV, which would cause mongos to refuse to connect to
+// it and shutdown.
+//
+// In order to work around this, in the mixed version suite, be pessimistic and always set this
+// node to the 'last-stable' FCV
+if (jsTestOptions().shardMixedBinVersions) {
+ assert.commandWorked(priConn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+ replShard.awaitReplication();
+}
- assert.writeOK(priConn.getDB('test').unsharded.insert({_id: 'marker'}));
- checkBasicCRUD(priConn.getDB('test').unsharded);
+assert.writeOK(priConn.getDB('test').unsharded.insert({_id: 'marker'}));
+checkBasicCRUD(priConn.getDB('test').unsharded);
- assert.writeOK(priConn.getDB('test').sharded.insert({_id: 'marker'}));
- checkBasicCRUD(priConn.getDB('test').sharded);
+assert.writeOK(priConn.getDB('test').sharded.insert({_id: 'marker'}));
+checkBasicCRUD(priConn.getDB('test').sharded);
- for (var x = 0; x < NUM_NODES; x++) {
- replShard.restart(x, {shardsvr: ''});
- }
+for (var x = 0; x < NUM_NODES; x++) {
+ replShard.restart(x, {shardsvr: ''});
+}
- replShard.awaitNodesAgreeOnPrimary();
- assert.commandWorked(st.s.adminCommand({addShard: replShard.getURL()}));
+replShard.awaitNodesAgreeOnPrimary();
+assert.commandWorked(st.s.adminCommand({addShard: replShard.getURL()}));
- priConn = replShard.getPrimary();
- checkBasicCRUD(priConn.getDB('test').unsharded);
- checkBasicCRUD(priConn.getDB('test').sharded);
+priConn = replShard.getPrimary();
+checkBasicCRUD(priConn.getDB('test').unsharded);
+checkBasicCRUD(priConn.getDB('test').sharded);
- checkBasicCRUD(st.s.getDB('test').unsharded);
- checkBasicCRUD(st.s.getDB('test').sharded);
+checkBasicCRUD(st.s.getDB('test').unsharded);
+checkBasicCRUD(st.s.getDB('test').sharded);
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(st.s.adminCommand({shardCollection: 'test.sharded', key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.sharded', key: {_id: 1}}));
- checkBasicCRUD(st.s.getDB('test').unsharded);
- checkBasicCRUD(st.s.getDB('test').sharded);
+checkBasicCRUD(st.s.getDB('test').unsharded);
+checkBasicCRUD(st.s.getDB('test').sharded);
- for (x = 0; x < 4; x++) {
- assert.writeOK(st.s.getDB('test').sharded.insert({_id: x}));
- assert.commandWorked(st.s.adminCommand({split: 'test.sharded', middle: {_id: x}}));
- }
+for (x = 0; x < 4; x++) {
+ assert.writeOK(st.s.getDB('test').sharded.insert({_id: x}));
+ assert.commandWorked(st.s.adminCommand({split: 'test.sharded', middle: {_id: x}}));
+}
- var newMongod = MongoRunner.runMongod({shardsvr: ''});
+var newMongod = MongoRunner.runMongod({shardsvr: ''});
- assert.commandWorked(st.s.adminCommand({addShard: newMongod.name, name: 'toRemoveLater'}));
+assert.commandWorked(st.s.adminCommand({addShard: newMongod.name, name: 'toRemoveLater'}));
- for (x = 0; x < 2; x++) {
- assert.commandWorked(
- st.s.adminCommand({moveChunk: 'test.sharded', find: {_id: x}, to: 'toRemoveLater'}));
- }
+for (x = 0; x < 2; x++) {
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: 'test.sharded', find: {_id: x}, to: 'toRemoveLater'}));
+}
- checkBasicCRUD(st.s.getDB('test').unsharded);
- checkBasicCRUD(st.s.getDB('test').sharded);
+checkBasicCRUD(st.s.getDB('test').unsharded);
+checkBasicCRUD(st.s.getDB('test').sharded);
- assert.commandWorked(st.s.adminCommand({removeShard: 'toRemoveLater'}));
+assert.commandWorked(st.s.adminCommand({removeShard: 'toRemoveLater'}));
- // Start the balancer to start draining the chunks.
- st.startBalancer();
+// Start the balancer to start draining the chunks.
+st.startBalancer();
- assert.soon(function() {
- var res = st.s.adminCommand({removeShard: 'toRemoveLater'});
- return res.state == 'completed';
- });
+assert.soon(function() {
+ var res = st.s.adminCommand({removeShard: 'toRemoveLater'});
+ return res.state == 'completed';
+});
- MongoRunner.stopMongod(newMongod);
+MongoRunner.stopMongod(newMongod);
- checkBasicCRUD(st.s.getDB('test').unsharded);
- checkBasicCRUD(st.s.getDB('test').sharded);
+checkBasicCRUD(st.s.getDB('test').unsharded);
+checkBasicCRUD(st.s.getDB('test').sharded);
- st.stop();
+st.stop();
- checkBasicCRUD(priConn.getDB('test').unsharded);
- checkBasicCRUD(priConn.getDB('test').sharded);
+checkBasicCRUD(priConn.getDB('test').unsharded);
+checkBasicCRUD(priConn.getDB('test').sharded);
- jsTest.log('About to restart repl w/o shardsvr');
+jsTest.log('About to restart repl w/o shardsvr');
- replShard.nodes.forEach(function(node) {
- delete node.fullOptions.shardsvr;
- });
+replShard.nodes.forEach(function(node) {
+ delete node.fullOptions.shardsvr;
+});
- replShard.restart(replShard.nodes);
- replShard.awaitNodesAgreeOnPrimary();
+replShard.restart(replShard.nodes);
+replShard.awaitNodesAgreeOnPrimary();
- priConn = replShard.getPrimary();
- checkBasicCRUD(priConn.getDB('test').unsharded);
- checkBasicCRUD(priConn.getDB('test').sharded);
+priConn = replShard.getPrimary();
+checkBasicCRUD(priConn.getDB('test').unsharded);
+checkBasicCRUD(priConn.getDB('test').sharded);
- replShard.stopSet();
+replShard.stopSet();
})();
diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js
index 2275faed656..b712191e6ed 100644
--- a/jstests/sharding/count1.js
+++ b/jstests/sharding/count1.js
@@ -1,181 +1,180 @@
(function() {
- 'use strict';
-
- var s = new ShardingTest({shards: 2});
- var db = s.getDB("test");
-
- // ************** Test Set #1 *************
- // Basic counts on "bar" collections, not yet sharded
-
- db.bar.save({n: 1});
- db.bar.save({n: 2});
- db.bar.save({n: 3});
-
- assert.eq(3, db.bar.find().count(), "bar 1");
- assert.eq(1, db.bar.find({n: 1}).count(), "bar 2");
-
- //************** Test Set #2 *************
- // Basic counts on sharded "foo" collection.
- // 1. Create foo collection, insert 6 docs
- // 2. Divide into three chunks
- // 3. Test counts before chunk migrations
- // 4. Manually move chunks. Now each shard should have 3 docs.
- // 5. i. Test basic counts on foo
- // ii. Test counts with limit
- // iii. Test counts with skip
- // iv. Test counts with skip + limit
- // v. Test counts with skip + limit + sorting
- // 6. Insert 10 more docs. Further limit/skip testing with a find query
- // 7. test invalid queries/values
-
- // part 1
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.foo", key: {name: 1}});
-
- var primary = s.getPrimaryShard("test").getDB("test");
- var secondary = s.getOther(primary).getDB("test");
-
- assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check A");
-
- assert.commandWorked(db.foo.insert({_id: 1, name: "eliot"}));
- assert.commandWorked(db.foo.insert({_id: 2, name: "sara"}));
- assert.commandWorked(db.foo.insert({_id: 3, name: "bob"}));
- assert.commandWorked(db.foo.insert({_id: 4, name: "joe"}));
- assert.commandWorked(db.foo.insert({_id: 5, name: "mark"}));
- assert.commandWorked(db.foo.insert({_id: 6, name: "allan"}));
-
- assert.eq(6, db.foo.find().count(), "basic count");
-
- // part 2
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {name: "allan"}}));
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {name: "sara"}}));
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {name: "eliot"}}));
-
- // MINKEY->allan,bob->eliot,joe,mark->sara,MAXKEY
-
- s.printChunks();
-
- // part 3
- assert.eq(6, db.foo.find().count(), "basic count after split ");
- assert.eq(6, db.foo.find().sort({name: 1}).count(), "basic count after split sorted ");
-
- // part 4
- assert.commandWorked(s.s0.adminCommand({
- moveChunk: "test.foo",
- find: {name: "eliot"},
- to: secondary.getMongo().name,
- _waitForDelete: true
- }));
-
- assert.eq(3, primary.foo.find().toArray().length, "primary count");
- assert.eq(3, secondary.foo.find().toArray().length, "secondary count");
- assert.eq(3, primary.foo.find().sort({name: 1}).toArray().length, "primary count sorted");
- assert.eq(3, secondary.foo.find().sort({name: 1}).toArray().length, "secondary count sorted");
-
- // part 5
- // Some redundant tests, but better safe than sorry. These are fast tests, anyway.
-
- // i.
- assert.eq(6, db.foo.find().count(), "total count after move");
- assert.eq(6, db.foo.find().toArray().length, "total count after move");
- assert.eq(6, db.foo.find().sort({name: 1}).toArray().length, "total count() sorted");
- assert.eq(6, db.foo.find().sort({name: 1}).count(), "total count with count() after move");
-
- // ii.
- assert.eq(2, db.foo.find().limit(2).count(true));
- assert.eq(2, db.foo.find().limit(-2).count(true));
- assert.eq(6, db.foo.find().limit(100).count(true));
- assert.eq(6, db.foo.find().limit(-100).count(true));
- assert.eq(6, db.foo.find().limit(0).count(true));
-
- // iii.
- assert.eq(6, db.foo.find().skip(0).count(true));
- assert.eq(5, db.foo.find().skip(1).count(true));
- assert.eq(4, db.foo.find().skip(2).count(true));
- assert.eq(3, db.foo.find().skip(3).count(true));
- assert.eq(2, db.foo.find().skip(4).count(true));
- assert.eq(1, db.foo.find().skip(5).count(true));
- assert.eq(0, db.foo.find().skip(6).count(true));
- assert.eq(0, db.foo.find().skip(7).count(true));
-
- // iv.
- assert.eq(2, db.foo.find().limit(2).skip(1).count(true));
- assert.eq(2, db.foo.find().limit(-2).skip(1).count(true));
- assert.eq(5, db.foo.find().limit(100).skip(1).count(true));
- assert.eq(5, db.foo.find().limit(-100).skip(1).count(true));
- assert.eq(5, db.foo.find().limit(0).skip(1).count(true));
-
- assert.eq(0, db.foo.find().limit(2).skip(10).count(true));
- assert.eq(0, db.foo.find().limit(-2).skip(10).count(true));
- assert.eq(0, db.foo.find().limit(100).skip(10).count(true));
- assert.eq(0, db.foo.find().limit(-100).skip(10).count(true));
- assert.eq(0, db.foo.find().limit(0).skip(10).count(true));
-
- assert.eq(2, db.foo.find().limit(2).itcount(), "LS1");
- assert.eq(2, db.foo.find().skip(2).limit(2).itcount(), "LS2");
- assert.eq(1, db.foo.find().skip(5).limit(2).itcount(), "LS3");
- assert.eq(6, db.foo.find().limit(2).count(), "LSC1");
- assert.eq(2, db.foo.find().limit(2).size(), "LSC2");
- assert.eq(2, db.foo.find().skip(2).limit(2).size(), "LSC3");
- assert.eq(1, db.foo.find().skip(5).limit(2).size(), "LSC4");
- assert.eq(4, db.foo.find().skip(1).limit(4).size(), "LSC5");
- assert.eq(5, db.foo.find().skip(1).limit(6).size(), "LSC6");
-
- // SERVER-3567 older negative limit tests
- assert.eq(2, db.foo.find().limit(2).itcount(), "N1");
- assert.eq(2, db.foo.find().limit(-2).itcount(), "N2");
- assert.eq(2, db.foo.find().skip(4).limit(2).itcount(), "N3");
- assert.eq(2, db.foo.find().skip(4).limit(-2).itcount(), "N4");
-
- // v.
- function nameString(c) {
- var s = "";
- while (c.hasNext()) {
- var o = c.next();
- if (s.length > 0)
- s += ",";
- s += o.name;
- }
- return s;
+'use strict';
+
+var s = new ShardingTest({shards: 2});
+var db = s.getDB("test");
+
+// ************** Test Set #1 *************
+// Basic counts on "bar" collections, not yet sharded
+
+db.bar.save({n: 1});
+db.bar.save({n: 2});
+db.bar.save({n: 3});
+
+assert.eq(3, db.bar.find().count(), "bar 1");
+assert.eq(1, db.bar.find({n: 1}).count(), "bar 2");
+
+//************** Test Set #2 *************
+// Basic counts on sharded "foo" collection.
+// 1. Create foo collection, insert 6 docs
+// 2. Divide into three chunks
+// 3. Test counts before chunk migrations
+// 4. Manually move chunks. Now each shard should have 3 docs.
+// 5. i. Test basic counts on foo
+// ii. Test counts with limit
+// iii. Test counts with skip
+// iv. Test counts with skip + limit
+// v. Test counts with skip + limit + sorting
+// 6. Insert 10 more docs. Further limit/skip testing with a find query
+// 7. test invalid queries/values
+
+// part 1
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.foo", key: {name: 1}});
+
+var primary = s.getPrimaryShard("test").getDB("test");
+var secondary = s.getOther(primary).getDB("test");
+
+assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check A");
+
+assert.commandWorked(db.foo.insert({_id: 1, name: "eliot"}));
+assert.commandWorked(db.foo.insert({_id: 2, name: "sara"}));
+assert.commandWorked(db.foo.insert({_id: 3, name: "bob"}));
+assert.commandWorked(db.foo.insert({_id: 4, name: "joe"}));
+assert.commandWorked(db.foo.insert({_id: 5, name: "mark"}));
+assert.commandWorked(db.foo.insert({_id: 6, name: "allan"}));
+
+assert.eq(6, db.foo.find().count(), "basic count");
+
+// part 2
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {name: "allan"}}));
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {name: "sara"}}));
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {name: "eliot"}}));
+
+// MINKEY->allan,bob->eliot,joe,mark->sara,MAXKEY
+
+s.printChunks();
+
+// part 3
+assert.eq(6, db.foo.find().count(), "basic count after split ");
+assert.eq(6, db.foo.find().sort({name: 1}).count(), "basic count after split sorted ");
+
+// part 4
+assert.commandWorked(s.s0.adminCommand({
+ moveChunk: "test.foo",
+ find: {name: "eliot"},
+ to: secondary.getMongo().name,
+ _waitForDelete: true
+}));
+
+assert.eq(3, primary.foo.find().toArray().length, "primary count");
+assert.eq(3, secondary.foo.find().toArray().length, "secondary count");
+assert.eq(3, primary.foo.find().sort({name: 1}).toArray().length, "primary count sorted");
+assert.eq(3, secondary.foo.find().sort({name: 1}).toArray().length, "secondary count sorted");
+
+// part 5
+// Some redundant tests, but better safe than sorry. These are fast tests, anyway.
+
+// i.
+assert.eq(6, db.foo.find().count(), "total count after move");
+assert.eq(6, db.foo.find().toArray().length, "total count after move");
+assert.eq(6, db.foo.find().sort({name: 1}).toArray().length, "total count() sorted");
+assert.eq(6, db.foo.find().sort({name: 1}).count(), "total count with count() after move");
+
+// ii.
+assert.eq(2, db.foo.find().limit(2).count(true));
+assert.eq(2, db.foo.find().limit(-2).count(true));
+assert.eq(6, db.foo.find().limit(100).count(true));
+assert.eq(6, db.foo.find().limit(-100).count(true));
+assert.eq(6, db.foo.find().limit(0).count(true));
+
+// iii.
+assert.eq(6, db.foo.find().skip(0).count(true));
+assert.eq(5, db.foo.find().skip(1).count(true));
+assert.eq(4, db.foo.find().skip(2).count(true));
+assert.eq(3, db.foo.find().skip(3).count(true));
+assert.eq(2, db.foo.find().skip(4).count(true));
+assert.eq(1, db.foo.find().skip(5).count(true));
+assert.eq(0, db.foo.find().skip(6).count(true));
+assert.eq(0, db.foo.find().skip(7).count(true));
+
+// iv.
+assert.eq(2, db.foo.find().limit(2).skip(1).count(true));
+assert.eq(2, db.foo.find().limit(-2).skip(1).count(true));
+assert.eq(5, db.foo.find().limit(100).skip(1).count(true));
+assert.eq(5, db.foo.find().limit(-100).skip(1).count(true));
+assert.eq(5, db.foo.find().limit(0).skip(1).count(true));
+
+assert.eq(0, db.foo.find().limit(2).skip(10).count(true));
+assert.eq(0, db.foo.find().limit(-2).skip(10).count(true));
+assert.eq(0, db.foo.find().limit(100).skip(10).count(true));
+assert.eq(0, db.foo.find().limit(-100).skip(10).count(true));
+assert.eq(0, db.foo.find().limit(0).skip(10).count(true));
+
+assert.eq(2, db.foo.find().limit(2).itcount(), "LS1");
+assert.eq(2, db.foo.find().skip(2).limit(2).itcount(), "LS2");
+assert.eq(1, db.foo.find().skip(5).limit(2).itcount(), "LS3");
+assert.eq(6, db.foo.find().limit(2).count(), "LSC1");
+assert.eq(2, db.foo.find().limit(2).size(), "LSC2");
+assert.eq(2, db.foo.find().skip(2).limit(2).size(), "LSC3");
+assert.eq(1, db.foo.find().skip(5).limit(2).size(), "LSC4");
+assert.eq(4, db.foo.find().skip(1).limit(4).size(), "LSC5");
+assert.eq(5, db.foo.find().skip(1).limit(6).size(), "LSC6");
+
+// SERVER-3567 older negative limit tests
+assert.eq(2, db.foo.find().limit(2).itcount(), "N1");
+assert.eq(2, db.foo.find().limit(-2).itcount(), "N2");
+assert.eq(2, db.foo.find().skip(4).limit(2).itcount(), "N3");
+assert.eq(2, db.foo.find().skip(4).limit(-2).itcount(), "N4");
+
+// v.
+function nameString(c) {
+ var s = "";
+ while (c.hasNext()) {
+ var o = c.next();
+ if (s.length > 0)
+ s += ",";
+ s += o.name;
}
- assert.eq("allan,bob,eliot,joe,mark,sara", nameString(db.foo.find().sort({name: 1})), "sort 1");
- assert.eq(
- "sara,mark,joe,eliot,bob,allan", nameString(db.foo.find().sort({name: -1})), "sort 2");
-
- assert.eq("allan,bob", nameString(db.foo.find().sort({name: 1}).limit(2)), "LSD1");
- assert.eq("bob,eliot", nameString(db.foo.find().sort({name: 1}).skip(1).limit(2)), "LSD2");
- assert.eq("joe,mark", nameString(db.foo.find().sort({name: 1}).skip(3).limit(2)), "LSD3");
-
- assert.eq("eliot,sara", nameString(db.foo.find().sort({_id: 1}).limit(2)), "LSE1");
- assert.eq("sara,bob", nameString(db.foo.find().sort({_id: 1}).skip(1).limit(2)), "LSE2");
- assert.eq("joe,mark", nameString(db.foo.find().sort({_id: 1}).skip(3).limit(2)), "LSE3");
-
- // part 6
- for (var i = 0; i < 10; i++) {
- assert.commandWorked(db.foo.insert({_id: 7 + i, name: "zzz" + i}));
- }
-
- assert.eq(10, db.foo.find({name: {$gt: "z"}}).itcount(), "LSF1");
- assert.eq(10, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).itcount(), "LSF2");
- assert.eq(5, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).skip(5).itcount(), "LSF3");
- assert.eq(3, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).skip(5).limit(3).itcount(), "LSF4");
-
- // part 7
- // Make sure count command returns error for invalid queries
- var badCmdResult = db.runCommand({count: 'foo', query: {$c: {$abc: 3}}});
- assert(!badCmdResult.ok, "invalid query syntax didn't return error");
- assert(badCmdResult.errmsg.length > 0, "no error msg for invalid query");
-
- // Negative skip values should return error
- var negSkipResult = db.runCommand({count: 'foo', skip: -2});
- assert(!negSkipResult.ok, "negative skip value shouldn't work");
- assert(negSkipResult.errmsg.length > 0, "no error msg for negative skip");
-
- // Negative skip values with positive limit should return error
- var negSkipLimitResult = db.runCommand({count: 'foo', skip: -2, limit: 1});
- assert(!negSkipLimitResult.ok, "negative skip value with limit shouldn't work");
- assert(negSkipLimitResult.errmsg.length > 0, "no error msg for negative skip");
-
- s.stop();
+ return s;
+}
+assert.eq("allan,bob,eliot,joe,mark,sara", nameString(db.foo.find().sort({name: 1})), "sort 1");
+assert.eq("sara,mark,joe,eliot,bob,allan", nameString(db.foo.find().sort({name: -1})), "sort 2");
+
+assert.eq("allan,bob", nameString(db.foo.find().sort({name: 1}).limit(2)), "LSD1");
+assert.eq("bob,eliot", nameString(db.foo.find().sort({name: 1}).skip(1).limit(2)), "LSD2");
+assert.eq("joe,mark", nameString(db.foo.find().sort({name: 1}).skip(3).limit(2)), "LSD3");
+
+assert.eq("eliot,sara", nameString(db.foo.find().sort({_id: 1}).limit(2)), "LSE1");
+assert.eq("sara,bob", nameString(db.foo.find().sort({_id: 1}).skip(1).limit(2)), "LSE2");
+assert.eq("joe,mark", nameString(db.foo.find().sort({_id: 1}).skip(3).limit(2)), "LSE3");
+
+// part 6
+for (var i = 0; i < 10; i++) {
+ assert.commandWorked(db.foo.insert({_id: 7 + i, name: "zzz" + i}));
+}
+
+assert.eq(10, db.foo.find({name: {$gt: "z"}}).itcount(), "LSF1");
+assert.eq(10, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).itcount(), "LSF2");
+assert.eq(5, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).skip(5).itcount(), "LSF3");
+assert.eq(3, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).skip(5).limit(3).itcount(), "LSF4");
+
+// part 7
+// Make sure count command returns error for invalid queries
+var badCmdResult = db.runCommand({count: 'foo', query: {$c: {$abc: 3}}});
+assert(!badCmdResult.ok, "invalid query syntax didn't return error");
+assert(badCmdResult.errmsg.length > 0, "no error msg for invalid query");
+
+// Negative skip values should return error
+var negSkipResult = db.runCommand({count: 'foo', skip: -2});
+assert(!negSkipResult.ok, "negative skip value shouldn't work");
+assert(negSkipResult.errmsg.length > 0, "no error msg for negative skip");
+
+// Negative skip values with positive limit should return error
+var negSkipLimitResult = db.runCommand({count: 'foo', skip: -2, limit: 1});
+assert(!negSkipLimitResult.ok, "negative skip value with limit shouldn't work");
+assert(negSkipLimitResult.errmsg.length > 0, "no error msg for negative skip");
+
+s.stop();
})();
diff --git a/jstests/sharding/count2.js b/jstests/sharding/count2.js
index d5bad76a246..b1d6bad4bf3 100644
--- a/jstests/sharding/count2.js
+++ b/jstests/sharding/count2.js
@@ -1,56 +1,55 @@
(function() {
- var s1 = new ShardingTest({name: "count2", shards: 2, mongos: 2});
- var s2 = s1._mongos[1];
+var s1 = new ShardingTest({name: "count2", shards: 2, mongos: 2});
+var s2 = s1._mongos[1];
- s1.adminCommand({enablesharding: "test"});
- s1.ensurePrimaryShard('test', s1.shard1.shardName);
- s1.adminCommand({shardcollection: "test.foo", key: {name: 1}});
+s1.adminCommand({enablesharding: "test"});
+s1.ensurePrimaryShard('test', s1.shard1.shardName);
+s1.adminCommand({shardcollection: "test.foo", key: {name: 1}});
- var db1 = s1.getDB("test").foo;
- var db2 = s2.getDB("test").foo;
+var db1 = s1.getDB("test").foo;
+var db2 = s2.getDB("test").foo;
- assert.eq(1, s1.config.chunks.count({"ns": "test.foo"}), "sanity check A");
+assert.eq(1, s1.config.chunks.count({"ns": "test.foo"}), "sanity check A");
- db1.save({name: "aaa"});
- db1.save({name: "bbb"});
- db1.save({name: "ccc"});
- db1.save({name: "ddd"});
- db1.save({name: "eee"});
- db1.save({name: "fff"});
+db1.save({name: "aaa"});
+db1.save({name: "bbb"});
+db1.save({name: "ccc"});
+db1.save({name: "ddd"});
+db1.save({name: "eee"});
+db1.save({name: "fff"});
- s1.adminCommand({split: "test.foo", middle: {name: "ddd"}});
+s1.adminCommand({split: "test.foo", middle: {name: "ddd"}});
- assert.eq(3, db1.count({name: {$gte: "aaa", $lt: "ddd"}}), "initial count mongos1");
- assert.eq(3, db2.count({name: {$gte: "aaa", $lt: "ddd"}}), "initial count mongos2");
+assert.eq(3, db1.count({name: {$gte: "aaa", $lt: "ddd"}}), "initial count mongos1");
+assert.eq(3, db2.count({name: {$gte: "aaa", $lt: "ddd"}}), "initial count mongos2");
- s1.printChunks("test.foo");
+s1.printChunks("test.foo");
- s1.adminCommand({
- movechunk: "test.foo",
- find: {name: "aaa"},
- to: s1.getOther(s1.getPrimaryShard("test")).name,
- _waitForDelete: true
- });
+s1.adminCommand({
+ movechunk: "test.foo",
+ find: {name: "aaa"},
+ to: s1.getOther(s1.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
- assert.eq(3, db1.count({name: {$gte: "aaa", $lt: "ddd"}}), "post count mongos1");
+assert.eq(3, db1.count({name: {$gte: "aaa", $lt: "ddd"}}), "post count mongos1");
- // The second mongos still thinks its shard mapping is valid and accepts a cound
- print("before sleep: " + Date());
- sleep(2000);
- print("after sleep: " + Date());
- s1.printChunks("test.foo");
- assert.eq(3, db2.find({name: {$gte: "aaa", $lt: "ddd"}}).count(), "post count mongos2");
+// The second mongos still thinks its shard mapping is valid and accepts a cound
+print("before sleep: " + Date());
+sleep(2000);
+print("after sleep: " + Date());
+s1.printChunks("test.foo");
+assert.eq(3, db2.find({name: {$gte: "aaa", $lt: "ddd"}}).count(), "post count mongos2");
- db2.findOne();
+db2.findOne();
- assert.eq(3, db2.count({name: {$gte: "aaa", $lt: "ddd"}}));
+assert.eq(3, db2.count({name: {$gte: "aaa", $lt: "ddd"}}));
- assert.eq(4, db2.find().limit(4).count(true));
- assert.eq(4, db2.find().limit(-4).count(true));
- assert.eq(6, db2.find().limit(0).count(true));
- assert.eq(6, db2.getDB().runCommand({count: db2.getName(), limit: 0}).n);
-
- s1.stop();
+assert.eq(4, db2.find().limit(4).count(true));
+assert.eq(4, db2.find().limit(-4).count(true));
+assert.eq(6, db2.find().limit(0).count(true));
+assert.eq(6, db2.getDB().runCommand({count: db2.getName(), limit: 0}).n);
+s1.stop();
})();
diff --git a/jstests/sharding/count_config_servers.js b/jstests/sharding/count_config_servers.js
index 6f2b244204b..ff7cff2c698 100644
--- a/jstests/sharding/count_config_servers.js
+++ b/jstests/sharding/count_config_servers.js
@@ -8,62 +8,61 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- var st = new ShardingTest({name: 'sync_conn_cmd', shards: 0});
- st.s.setSlaveOk(true);
-
- var configDB = st.config;
- var coll = configDB.test;
-
- for (var x = 0; x < 10; x++) {
- assert.writeOK(coll.insert({v: x}));
- }
-
- if (st.configRS) {
- // Make sure the inserts are replicated to all config servers.
- st.configRS.awaitReplication();
- }
-
- var testNormalCount = function() {
- var cmdRes = configDB.runCommand({count: coll.getName()});
- assert(cmdRes.ok);
- assert.eq(10, cmdRes.n);
- };
-
- var testCountWithQuery = function() {
- var cmdRes = configDB.runCommand({count: coll.getName(), query: {v: {$gt: 6}}});
- assert(cmdRes.ok);
- assert.eq(3, cmdRes.n);
- };
-
- // Use invalid query operator to make the count return error
- var testInvalidCount = function() {
- var cmdRes = configDB.runCommand({count: coll.getName(), query: {$c: {$abc: 3}}});
- assert(!cmdRes.ok);
- assert(cmdRes.errmsg.length > 0);
- };
-
- // Test with all config servers up
- testNormalCount();
- testCountWithQuery();
- testInvalidCount();
-
- // Test with the first config server down
- MongoRunner.stopMongod(st.c0);
-
- testNormalCount();
- testCountWithQuery();
- testInvalidCount();
-
- // Test with the first and second config server down
- MongoRunner.stopMongod(st.c1);
- jsTest.log('Second server is down');
-
- testNormalCount();
- testCountWithQuery();
- testInvalidCount();
-
- st.stop();
-
+"use strict";
+
+var st = new ShardingTest({name: 'sync_conn_cmd', shards: 0});
+st.s.setSlaveOk(true);
+
+var configDB = st.config;
+var coll = configDB.test;
+
+for (var x = 0; x < 10; x++) {
+ assert.writeOK(coll.insert({v: x}));
+}
+
+if (st.configRS) {
+ // Make sure the inserts are replicated to all config servers.
+ st.configRS.awaitReplication();
+}
+
+var testNormalCount = function() {
+ var cmdRes = configDB.runCommand({count: coll.getName()});
+ assert(cmdRes.ok);
+ assert.eq(10, cmdRes.n);
+};
+
+var testCountWithQuery = function() {
+ var cmdRes = configDB.runCommand({count: coll.getName(), query: {v: {$gt: 6}}});
+ assert(cmdRes.ok);
+ assert.eq(3, cmdRes.n);
+};
+
+// Use invalid query operator to make the count return error
+var testInvalidCount = function() {
+ var cmdRes = configDB.runCommand({count: coll.getName(), query: {$c: {$abc: 3}}});
+ assert(!cmdRes.ok);
+ assert(cmdRes.errmsg.length > 0);
+};
+
+// Test with all config servers up
+testNormalCount();
+testCountWithQuery();
+testInvalidCount();
+
+// Test with the first config server down
+MongoRunner.stopMongod(st.c0);
+
+testNormalCount();
+testCountWithQuery();
+testInvalidCount();
+
+// Test with the first and second config server down
+MongoRunner.stopMongod(st.c1);
+jsTest.log('Second server is down');
+
+testNormalCount();
+testCountWithQuery();
+testInvalidCount();
+
+st.stop();
}());
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
index f93ed7e0fa6..596509c1c2d 100644
--- a/jstests/sharding/count_slaveok.js
+++ b/jstests/sharding/count_slaveok.js
@@ -7,67 +7,67 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- var st = new ShardingTest({shards: 1, mongos: 1, other: {rs: true, rs0: {nodes: 2}}});
- var rst = st.rs0;
+var st = new ShardingTest({shards: 1, mongos: 1, other: {rs: true, rs0: {nodes: 2}}});
+var rst = st.rs0;
- // Insert data into replica set
- var conn = new Mongo(st.s.host);
+// Insert data into replica set
+var conn = new Mongo(st.s.host);
- var coll = conn.getCollection('test.countSlaveOk');
- coll.drop();
+var coll = conn.getCollection('test.countSlaveOk');
+coll.drop();
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 300; i++) {
- bulk.insert({i: i % 10});
- }
- assert.writeOK(bulk.execute());
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < 300; i++) {
+ bulk.insert({i: i % 10});
+}
+assert.writeOK(bulk.execute());
- var connA = conn;
- var connB = new Mongo(st.s.host);
- var connC = new Mongo(st.s.host);
+var connA = conn;
+var connB = new Mongo(st.s.host);
+var connC = new Mongo(st.s.host);
- st.printShardingStatus();
+st.printShardingStatus();
- // Wait for client to update itself and replication to finish
- rst.awaitReplication();
+// Wait for client to update itself and replication to finish
+rst.awaitReplication();
- var primary = rst.getPrimary();
- var sec = rst.getSecondary();
+var primary = rst.getPrimary();
+var sec = rst.getSecondary();
- // Data now inserted... stop the master, since only two in set, other will still be secondary
- rst.stop(rst.getPrimary());
- printjson(rst.status());
+// Data now inserted... stop the master, since only two in set, other will still be secondary
+rst.stop(rst.getPrimary());
+printjson(rst.status());
- // Wait for the mongos to recognize the slave
- awaitRSClientHosts(conn, sec, {ok: true, secondary: true});
+// Wait for the mongos to recognize the slave
+awaitRSClientHosts(conn, sec, {ok: true, secondary: true});
- // Make sure that mongos realizes that primary is already down
- awaitRSClientHosts(conn, primary, {ok: false});
+// Make sure that mongos realizes that primary is already down
+awaitRSClientHosts(conn, primary, {ok: false});
- // Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
- // master is down
- conn.setSlaveOk();
+// Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
+// master is down
+conn.setSlaveOk();
- // count using the command path
- assert.eq(30, coll.find({i: 0}).count());
- // count using the query path
- assert.eq(30, coll.find({i: 0}).itcount());
- assert.eq(10, coll.distinct("i").length);
+// count using the command path
+assert.eq(30, coll.find({i: 0}).count());
+// count using the query path
+assert.eq(30, coll.find({i: 0}).itcount());
+assert.eq(10, coll.distinct("i").length);
- try {
- conn.setSlaveOk(false);
- // Should throw exception, since not slaveOk'd
- coll.find({i: 0}).count();
+try {
+ conn.setSlaveOk(false);
+ // Should throw exception, since not slaveOk'd
+ coll.find({i: 0}).count();
- print("Should not reach here!");
- assert(false);
- } catch (e) {
- print("Non-slaveOk'd connection failed.");
- }
+ print("Should not reach here!");
+ assert(false);
+} catch (e) {
+ print("Non-slaveOk'd connection failed.");
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/covered_shard_key_indexes.js b/jstests/sharding/covered_shard_key_indexes.js
index ce6851cafe5..b68c4bf1bca 100644
--- a/jstests/sharding/covered_shard_key_indexes.js
+++ b/jstests/sharding/covered_shard_key_indexes.js
@@ -6,148 +6,142 @@
load("jstests/libs/analyze_plan.js");
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 1});
- var coll = st.s0.getCollection("foo.bar");
-
- assert.commandWorked(st.s0.adminCommand({enableSharding: coll.getDB() + ""}));
-
- jsTest.log('Tests with _id : 1 shard key');
- coll.drop();
- assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {_id: 1}}));
- st.printShardingStatus();
-
- assert.commandWorked(
- st.shard0.adminCommand({setParameter: 1, logComponentVerbosity: {query: {verbosity: 5}}}));
-
- // Insert some data
- assert.writeOK(coll.insert({_id: true, a: true, b: true}));
-
- // Index without shard key query - not covered
- assert.commandWorked(coll.ensureIndex({a: 1}));
- assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(1,
- coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
-
- // Index with shard key query - covered when projecting
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.ensureIndex({a: 1, _id: 1}));
- assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(0,
- coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
-
- // Compound index with shard key query - covered when projecting
- assert.commandWorked(coll.dropIndexes());
- assert.commandWorked(coll.ensureIndex({a: 1, b: 1, _id: 1}));
- assert.eq(1, coll.find({a: true, b: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(0,
- coll.find({a: true, b: true}, {_id: 1, a: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
-
- jsTest.log('Tests with _id : hashed shard key');
- coll.drop();
- assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {_id: "hashed"}}));
- st.printShardingStatus();
-
- // Insert some data
- assert.writeOK(coll.insert({_id: true, a: true, b: true}));
-
- // Index without shard key query - not covered
- assert.commandWorked(coll.ensureIndex({a: 1}));
- assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(1,
- coll.find({a: true}, {_id: 0, a: 1}).explain(true).executionStats.totalDocsExamined);
-
- // Index with shard key query - can't be covered since hashed index
- assert.commandWorked(coll.dropIndex({a: 1}));
- assert.eq(1, coll.find({_id: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(1, coll.find({_id: true}, {_id: 0}).explain(true).executionStats.totalDocsExamined);
-
- jsTest.log('Tests with compound shard key');
- coll.drop();
- assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {a: 1, b: 1}}));
- st.printShardingStatus();
-
- // Insert some data
- assert.writeOK(coll.insert({_id: true, a: true, b: true, c: true, d: true}));
-
- // Index without shard key query - not covered
- assert.commandWorked(coll.ensureIndex({c: 1}));
- assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(1,
- coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
-
- // Index with shard key query - covered when projecting
- assert.commandWorked(coll.dropIndex({c: 1}));
- assert.commandWorked(coll.ensureIndex({c: 1, b: 1, a: 1}));
- assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(0,
- coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
-
- // Compound index with shard key query - covered when projecting
- assert.commandWorked(coll.dropIndex({c: 1, b: 1, a: 1}));
- assert.commandWorked(coll.ensureIndex({c: 1, d: 1, a: 1, b: 1, _id: 1}));
- assert.eq(1, coll.find({c: true, d: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(0,
- coll.find({c: true, d: true}, {a: 1, b: 1, c: 1, d: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
-
- jsTest.log('Tests with nested shard key');
- coll.drop();
- assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {'a.b': 1}}));
- st.printShardingStatus();
-
- // Insert some data
- assert.writeOK(coll.insert({_id: true, a: {b: true}, c: true}));
-
- // Index without shard key query - not covered
- assert.commandWorked(coll.ensureIndex({c: 1}));
- assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(1,
- coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
-
- // Index with shard key query - can be covered given the appropriate projection.
- assert.commandWorked(coll.dropIndex({c: 1}));
- assert.commandWorked(coll.ensureIndex({c: 1, 'a.b': 1}));
- assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
- assert.eq(0,
- coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
-
- jsTest.log('Tests with bad data with no shard key');
- coll.drop();
- assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {a: 1}}));
- st.printShardingStatus();
-
- // Insert some bad data manually on the shard
- assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: "bad data", c: true}));
-
- // Index without shard key query - not covered but succeeds
- assert.commandWorked(coll.ensureIndex({c: 1}));
- var explain = coll.find({c: true}).explain(true).executionStats;
- assert.eq(0, explain.nReturned);
- assert.eq(1, explain.totalDocsExamined);
- assert.eq(1, getChunkSkips(explain.executionStages.shards[0].executionStages));
-
- // Index with shard key query - covered and succeeds and returns result
- //
- // NOTE: This is weird and only a result of the fact that we don't have a dedicated "does not
- // exist" value for indexes
- assert.commandWorked(coll.ensureIndex({c: 1, a: 1}));
- var explain = coll.find({c: true}, {_id: 0, a: 1, c: 1}).explain(true).executionStats;
- assert.eq(1, explain.nReturned);
- assert.eq(0, explain.totalDocsExamined);
- assert.eq(0, getChunkSkips(explain.executionStages.shards[0].executionStages));
-
- st.stop();
+'use strict';
+
+var st = new ShardingTest({shards: 1});
+var coll = st.s0.getCollection("foo.bar");
+
+assert.commandWorked(st.s0.adminCommand({enableSharding: coll.getDB() + ""}));
+
+jsTest.log('Tests with _id : 1 shard key');
+coll.drop();
+assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {_id: 1}}));
+st.printShardingStatus();
+
+assert.commandWorked(
+ st.shard0.adminCommand({setParameter: 1, logComponentVerbosity: {query: {verbosity: 5}}}));
+
+// Insert some data
+assert.writeOK(coll.insert({_id: true, a: true, b: true}));
+
+// Index without shard key query - not covered
+assert.commandWorked(coll.ensureIndex({a: 1}));
+assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1, coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
+
+// Index with shard key query - covered when projecting
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(coll.ensureIndex({a: 1, _id: 1}));
+assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(0, coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
+
+// Compound index with shard key query - covered when projecting
+assert.commandWorked(coll.dropIndexes());
+assert.commandWorked(coll.ensureIndex({a: 1, b: 1, _id: 1}));
+assert.eq(1, coll.find({a: true, b: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(
+ 0,
+ coll.find({a: true, b: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
+
+jsTest.log('Tests with _id : hashed shard key');
+coll.drop();
+assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {_id: "hashed"}}));
+st.printShardingStatus();
+
+// Insert some data
+assert.writeOK(coll.insert({_id: true, a: true, b: true}));
+
+// Index without shard key query - not covered
+assert.commandWorked(coll.ensureIndex({a: 1}));
+assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1, coll.find({a: true}, {_id: 0, a: 1}).explain(true).executionStats.totalDocsExamined);
+
+// Index with shard key query - can't be covered since hashed index
+assert.commandWorked(coll.dropIndex({a: 1}));
+assert.eq(1, coll.find({_id: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1, coll.find({_id: true}, {_id: 0}).explain(true).executionStats.totalDocsExamined);
+
+jsTest.log('Tests with compound shard key');
+coll.drop();
+assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {a: 1, b: 1}}));
+st.printShardingStatus();
+
+// Insert some data
+assert.writeOK(coll.insert({_id: true, a: true, b: true, c: true, d: true}));
+
+// Index without shard key query - not covered
+assert.commandWorked(coll.ensureIndex({c: 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1,
+ coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
+
+// Index with shard key query - covered when projecting
+assert.commandWorked(coll.dropIndex({c: 1}));
+assert.commandWorked(coll.ensureIndex({c: 1, b: 1, a: 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(0,
+ coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
+
+// Compound index with shard key query - covered when projecting
+assert.commandWorked(coll.dropIndex({c: 1, b: 1, a: 1}));
+assert.commandWorked(coll.ensureIndex({c: 1, d: 1, a: 1, b: 1, _id: 1}));
+assert.eq(1, coll.find({c: true, d: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(0,
+ coll.find({c: true, d: true}, {a: 1, b: 1, c: 1, d: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
+
+jsTest.log('Tests with nested shard key');
+coll.drop();
+assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {'a.b': 1}}));
+st.printShardingStatus();
+
+// Insert some data
+assert.writeOK(coll.insert({_id: true, a: {b: true}, c: true}));
+
+// Index without shard key query - not covered
+assert.commandWorked(coll.ensureIndex({c: 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(
+ 1,
+ coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1}).explain(true).executionStats.totalDocsExamined);
+
+// Index with shard key query - can be covered given the appropriate projection.
+assert.commandWorked(coll.dropIndex({c: 1}));
+assert.commandWorked(coll.ensureIndex({c: 1, 'a.b': 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(
+ 0,
+ coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1}).explain(true).executionStats.totalDocsExamined);
+
+jsTest.log('Tests with bad data with no shard key');
+coll.drop();
+assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {a: 1}}));
+st.printShardingStatus();
+
+// Insert some bad data manually on the shard
+assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: "bad data", c: true}));
+
+// Index without shard key query - not covered but succeeds
+assert.commandWorked(coll.ensureIndex({c: 1}));
+var explain = coll.find({c: true}).explain(true).executionStats;
+assert.eq(0, explain.nReturned);
+assert.eq(1, explain.totalDocsExamined);
+assert.eq(1, getChunkSkips(explain.executionStages.shards[0].executionStages));
+
+// Index with shard key query - covered and succeeds and returns result
+//
+// NOTE: This is weird and only a result of the fact that we don't have a dedicated "does not
+// exist" value for indexes
+assert.commandWorked(coll.ensureIndex({c: 1, a: 1}));
+var explain = coll.find({c: true}, {_id: 0, a: 1, c: 1}).explain(true).executionStats;
+assert.eq(1, explain.nReturned);
+assert.eq(0, explain.totalDocsExamined);
+assert.eq(0, getChunkSkips(explain.executionStages.shards[0].executionStages));
+
+st.stop();
})();
diff --git a/jstests/sharding/create_database.js b/jstests/sharding/create_database.js
index 441582aa201..04dfeff7877 100644
--- a/jstests/sharding/create_database.js
+++ b/jstests/sharding/create_database.js
@@ -3,69 +3,68 @@
* databaseVersion if FCV > 3.6, but not if FCV <= 3.6.
*/
(function() {
- 'use strict';
+'use strict';
- function createDatabase(mongos, dbName) {
- // A database is implicitly created when a collection inside it is created.
- assert.commandWorked(mongos.getDB(dbName).runCommand({create: collName}));
- }
+function createDatabase(mongos, dbName) {
+ // A database is implicitly created when a collection inside it is created.
+ assert.commandWorked(mongos.getDB(dbName).runCommand({create: collName}));
+}
- function cleanUp(mongos, dbName) {
- assert.commandWorked(mongos.getDB(dbName).runCommand({dropDatabase: 1}));
- }
+function cleanUp(mongos, dbName) {
+ assert.commandWorked(mongos.getDB(dbName).runCommand({dropDatabase: 1}));
+}
- function assertDbVersionAssigned(mongos, dbName) {
- createDatabase(mongos, dbName);
+function assertDbVersionAssigned(mongos, dbName) {
+ createDatabase(mongos, dbName);
- // Check that the entry in the sharding catalog contains a dbVersion.
- const dbEntry = mongos.getDB("config").getCollection("databases").findOne({_id: dbName});
- assert.neq(null, dbEntry);
- assert.neq(null, dbEntry.version);
- assert.neq(null, dbEntry.version.uuid);
- assert.eq(1, dbEntry.version.lastMod);
+ // Check that the entry in the sharding catalog contains a dbVersion.
+ const dbEntry = mongos.getDB("config").getCollection("databases").findOne({_id: dbName});
+ assert.neq(null, dbEntry);
+ assert.neq(null, dbEntry.version);
+ assert.neq(null, dbEntry.version.uuid);
+ assert.eq(1, dbEntry.version.lastMod);
- // Check that the catalog cache on the mongos contains the same dbVersion.
- const cachedDbEntry = mongos.adminCommand({getShardVersion: dbName});
- assert.commandWorked(cachedDbEntry);
- assert.eq(dbEntry.version.uuid, cachedDbEntry.version.uuid);
- assert.eq(dbEntry.version.lastMod, cachedDbEntry.version.lastMod);
+ // Check that the catalog cache on the mongos contains the same dbVersion.
+ const cachedDbEntry = mongos.adminCommand({getShardVersion: dbName});
+ assert.commandWorked(cachedDbEntry);
+ assert.eq(dbEntry.version.uuid, cachedDbEntry.version.uuid);
+ assert.eq(dbEntry.version.lastMod, cachedDbEntry.version.lastMod);
- cleanUp(mongos, dbName);
+ cleanUp(mongos, dbName);
- return dbEntry;
- }
+ return dbEntry;
+}
- function assertDbVersionNotAssigned(mongos, dbName) {
- createDatabase(mongos, dbName);
+function assertDbVersionNotAssigned(mongos, dbName) {
+ createDatabase(mongos, dbName);
- // Check that the entry in the sharding catalog *does not* contain a dbVersion.
- const dbEntry = mongos.getDB("config").getCollection("databases").findOne({_id: dbName});
- assert.neq(null, dbEntry);
- assert.eq(null, dbEntry.version);
+ // Check that the entry in the sharding catalog *does not* contain a dbVersion.
+ const dbEntry = mongos.getDB("config").getCollection("databases").findOne({_id: dbName});
+ assert.neq(null, dbEntry);
+ assert.eq(null, dbEntry.version);
- // Check that the catalog cache on the mongos *does not* contain a dbVersion.
- const cachedDbEntry = mongos.adminCommand({getShardVersion: dbName});
- assert.commandWorked(cachedDbEntry);
- assert.eq(null, cachedDbEntry.version);
+ // Check that the catalog cache on the mongos *does not* contain a dbVersion.
+ const cachedDbEntry = mongos.adminCommand({getShardVersion: dbName});
+ assert.commandWorked(cachedDbEntry);
+ assert.eq(null, cachedDbEntry.version);
- cleanUp(mongos, dbName);
+ cleanUp(mongos, dbName);
- return dbEntry;
- }
+ return dbEntry;
+}
- const dbName = "db1";
- const collName = "foo";
- const ns = dbName + "." + collName;
+const dbName = "db1";
+const collName = "foo";
+const ns = dbName + "." + collName;
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- // A new database is given a databaseVersion.
- let dbEntry1 = assertDbVersionAssigned(st.s, dbName);
+// A new database is given a databaseVersion.
+let dbEntry1 = assertDbVersionAssigned(st.s, dbName);
- // A new incarnation of a database that was previously dropped is given a fresh databaseVersion.
- let dbEntry2 = assertDbVersionAssigned(st.s, dbName);
- assert.neq(dbEntry1.version.uuid, dbEntry2.version.uuid);
-
- st.stop();
+// A new incarnation of a database that was previously dropped is given a fresh databaseVersion.
+let dbEntry2 = assertDbVersionAssigned(st.s, dbName);
+assert.neq(dbEntry1.version.uuid, dbEntry2.version.uuid);
+st.stop();
})();
diff --git a/jstests/sharding/create_idx_empty_primary.js b/jstests/sharding/create_idx_empty_primary.js
index 1610c1fef44..f11ffd13f2a 100644
--- a/jstests/sharding/create_idx_empty_primary.js
+++ b/jstests/sharding/create_idx_empty_primary.js
@@ -2,33 +2,32 @@
* Test to make sure that the createIndex command gets sent to all shards.
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2});
- assert.commandWorked(st.s.adminCommand({enablesharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
+var st = new ShardingTest({shards: 2});
+assert.commandWorked(st.s.adminCommand({enablesharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
- var testDB = st.s.getDB('test');
- assert.commandWorked(testDB.adminCommand({shardcollection: 'test.user', key: {_id: 1}}));
+var testDB = st.s.getDB('test');
+assert.commandWorked(testDB.adminCommand({shardcollection: 'test.user', key: {_id: 1}}));
- // Move only chunk out of primary shard.
- assert.commandWorked(
- testDB.adminCommand({movechunk: 'test.user', find: {_id: 0}, to: st.shard0.shardName}));
+// Move only chunk out of primary shard.
+assert.commandWorked(
+ testDB.adminCommand({movechunk: 'test.user', find: {_id: 0}, to: st.shard0.shardName}));
- assert.writeOK(testDB.user.insert({_id: 0}));
+assert.writeOK(testDB.user.insert({_id: 0}));
- var res = testDB.user.ensureIndex({i: 1});
- assert.commandWorked(res);
+var res = testDB.user.ensureIndex({i: 1});
+assert.commandWorked(res);
- var indexes = testDB.user.getIndexes();
- assert.eq(2, indexes.length);
+var indexes = testDB.user.getIndexes();
+assert.eq(2, indexes.length);
- indexes = st.rs0.getPrimary().getDB('test').user.getIndexes();
- assert.eq(2, indexes.length);
+indexes = st.rs0.getPrimary().getDB('test').user.getIndexes();
+assert.eq(2, indexes.length);
- indexes = st.rs1.getPrimary().getDB('test').user.getIndexes();
- assert.eq(2, indexes.length);
-
- st.stop();
+indexes = st.rs1.getPrimary().getDB('test').user.getIndexes();
+assert.eq(2, indexes.length);
+st.stop();
})();
diff --git a/jstests/sharding/current_op_no_shards.js b/jstests/sharding/current_op_no_shards.js
index 926b032e229..6d6dd3bdfcb 100644
--- a/jstests/sharding/current_op_no_shards.js
+++ b/jstests/sharding/current_op_no_shards.js
@@ -3,16 +3,15 @@
* set, and does not cause the mongoS floating point failure described in SERVER-30084.
*/
(function() {
- const st = new ShardingTest({shards: 0, config: 1});
+const st = new ShardingTest({shards: 0, config: 1});
- const adminDB = st.s.getDB("admin");
+const adminDB = st.s.getDB("admin");
- assert.commandWorked(
- adminDB.runCommand({aggregate: 1, pipeline: [{$currentOp: {}}], cursor: {}}));
- assert.commandWorked(adminDB.currentOp());
+assert.commandWorked(adminDB.runCommand({aggregate: 1, pipeline: [{$currentOp: {}}], cursor: {}}));
+assert.commandWorked(adminDB.currentOp());
- assert.eq(adminDB.aggregate([{$currentOp: {}}]).itcount(), 0);
- assert.eq(adminDB.currentOp().inprog.length, 0);
+assert.eq(adminDB.aggregate([{$currentOp: {}}]).itcount(), 0);
+assert.eq(adminDB.currentOp().inprog.length, 0);
- st.stop();
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/current_op_with_drop_shard.js b/jstests/sharding/current_op_with_drop_shard.js
index aaadca2dc3f..c6f9e7cad90 100644
--- a/jstests/sharding/current_op_with_drop_shard.js
+++ b/jstests/sharding/current_op_with_drop_shard.js
@@ -1,25 +1,25 @@
// Tests that currentOp is resilient to drop shard.
(function() {
- 'use strict';
+'use strict';
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
- // We need the balancer to remove a shard.
- st.startBalancer();
+// We need the balancer to remove a shard.
+st.startBalancer();
- const mongosDB = st.s.getDB(jsTestName());
- const shardName = st.shard0.shardName;
+const mongosDB = st.s.getDB(jsTestName());
+const shardName = st.shard0.shardName;
- var res = st.s.adminCommand({removeShard: shardName});
+var res = st.s.adminCommand({removeShard: shardName});
+assert.commandWorked(res);
+assert.eq('started', res.state);
+assert.soon(function() {
+ res = st.s.adminCommand({removeShard: shardName});
assert.commandWorked(res);
- assert.eq('started', res.state);
- assert.soon(function() {
- res = st.s.adminCommand({removeShard: shardName});
- assert.commandWorked(res);
- return ('completed' === res.state);
- }, "removeShard never completed for shard " + shardName);
+ return ('completed' === res.state);
+}, "removeShard never completed for shard " + shardName);
- assert.commandWorked(mongosDB.currentOp());
+assert.commandWorked(mongosDB.currentOp());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
index 92e321eac27..6b66c9cf130 100644
--- a/jstests/sharding/cursor1.js
+++ b/jstests/sharding/cursor1.js
@@ -2,69 +2,68 @@
// checks that cursors survive a chunk's move
(function() {
- var s = new ShardingTest({name: "sharding_cursor1", shards: 2});
+var s = new ShardingTest({name: "sharding_cursor1", shards: 2});
- s.config.settings.find().forEach(printjson);
+s.config.settings.find().forEach(printjson);
- // create a sharded 'test.foo', for the moment with just one chunk
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+// create a sharded 'test.foo', for the moment with just one chunk
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
- db = s.getDB("test");
- primary = s.getPrimaryShard("test").getDB("test");
- secondary = s.getOther(primary).getDB("test");
+db = s.getDB("test");
+primary = s.getPrimaryShard("test").getDB("test");
+secondary = s.getOther(primary).getDB("test");
- var numObjs = 30;
- var bulk = db.foo.initializeUnorderedBulkOp();
- for (i = 0; i < numObjs; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
- assert.eq(1,
- s.config.chunks.count({"ns": "test.foo"}),
- "test requires collection to have one chunk initially");
-
- // we'll split the collection in two and move the second chunk while three cursors are open
- // cursor1 still has more data in the first chunk, the one that didn't move
- // cursor2 buffered the last obj of the first chunk
- // cursor3 buffered data that was moved on the second chunk
- var cursor1 = db.foo.find().batchSize(3);
- assert.eq(3, cursor1.objsLeftInBatch());
- var cursor2 = db.foo.find().batchSize(5);
- assert.eq(5, cursor2.objsLeftInBatch());
- var cursor3 = db.foo.find().batchSize(7);
- assert.eq(7, cursor3.objsLeftInBatch());
+var numObjs = 30;
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (i = 0; i < numObjs; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
+assert.eq(1,
+ s.config.chunks.count({"ns": "test.foo"}),
+ "test requires collection to have one chunk initially");
- s.adminCommand({split: "test.foo", middle: {_id: 5}});
- s.adminCommand({movechunk: "test.foo", find: {_id: 5}, to: secondary.getMongo().name});
- assert.eq(2, s.config.chunks.count({"ns": "test.foo"}));
+// we'll split the collection in two and move the second chunk while three cursors are open
+// cursor1 still has more data in the first chunk, the one that didn't move
+// cursor2 buffered the last obj of the first chunk
+// cursor3 buffered data that was moved on the second chunk
+var cursor1 = db.foo.find().batchSize(3);
+assert.eq(3, cursor1.objsLeftInBatch());
+var cursor2 = db.foo.find().batchSize(5);
+assert.eq(5, cursor2.objsLeftInBatch());
+var cursor3 = db.foo.find().batchSize(7);
+assert.eq(7, cursor3.objsLeftInBatch());
- // the cursors should not have been affected
- assert.eq(numObjs, cursor1.itcount(), "c1");
- assert.eq(numObjs, cursor2.itcount(), "c2");
- assert.eq(numObjs, cursor3.itcount(), "c3");
+s.adminCommand({split: "test.foo", middle: {_id: 5}});
+s.adminCommand({movechunk: "test.foo", find: {_id: 5}, to: secondary.getMongo().name});
+assert.eq(2, s.config.chunks.count({"ns": "test.foo"}));
- // Test that a cursor with a 1 second timeout eventually times out.
- var cur = db.foo.find().batchSize(2);
- assert(cur.next(), "T1");
- assert(cur.next(), "T2");
- assert.commandWorked(s.admin.runCommand({
- setParameter: 1,
- cursorTimeoutMillis: 1000 // 1 second.
- }));
+// the cursors should not have been affected
+assert.eq(numObjs, cursor1.itcount(), "c1");
+assert.eq(numObjs, cursor2.itcount(), "c2");
+assert.eq(numObjs, cursor3.itcount(), "c3");
- assert.soon(function() {
- try {
- cur.next();
- cur.next();
- print("cursor still alive");
- return false;
- } catch (e) {
- return true;
- }
- }, "cursor failed to time out", /*timeout*/ 30000, /*interval*/ 5000);
+// Test that a cursor with a 1 second timeout eventually times out.
+var cur = db.foo.find().batchSize(2);
+assert(cur.next(), "T1");
+assert(cur.next(), "T2");
+assert.commandWorked(s.admin.runCommand({
+ setParameter: 1,
+ cursorTimeoutMillis: 1000 // 1 second.
+}));
- s.stop();
+assert.soon(function() {
+ try {
+ cur.next();
+ cur.next();
+ print("cursor still alive");
+ return false;
+ } catch (e) {
+ return true;
+ }
+}, "cursor failed to time out", /*timeout*/ 30000, /*interval*/ 5000);
+s.stop();
})();
diff --git a/jstests/sharding/cursor_timeout.js b/jstests/sharding/cursor_timeout.js
index a6be1762245..7c43fd8f99a 100644
--- a/jstests/sharding/cursor_timeout.js
+++ b/jstests/sharding/cursor_timeout.js
@@ -9,112 +9,111 @@
// After a period of inactivity, the test asserts that cursors #1 and #2 are still alive, and that
// #3 and #4 have been killed.
(function() {
- 'use strict';
-
- // Cursor timeout on mongod is handled by a single thread/timer that will sleep for
- // "clientCursorMonitorFrequencySecs" and add the sleep value to each operation's duration when
- // it wakes up, timing out those whose "now() - last accessed since" time exceeds. A cursor
- // timeout of 5 seconds with a monitor frequency of 1 second means an effective timeout period
- // of 4 to 5 seconds.
- const mongodCursorTimeoutMs = 5000;
-
- // Cursor timeout on mongos is handled by checking whether the "last accessed" cursor time stamp
- // is older than "now() - cursorTimeoutMillis" and is checked every
- // "clientCursorMonitorFrequencySecs" by a global thread/timer. A timeout of 4 seconds with a
- // monitor frequency of 1 second means an effective timeout period of 4 to 5 seconds.
- const mongosCursorTimeoutMs = 4000;
-
- const cursorMonitorFrequencySecs = 1;
-
- const st = new ShardingTest({
- shards: 2,
- other: {
- shardOptions: {
- verbose: 1,
- setParameter: {
- cursorTimeoutMillis: mongodCursorTimeoutMs,
- clientCursorMonitorFrequencySecs: cursorMonitorFrequencySecs
- }
- },
- mongosOptions: {
- verbose: 1,
- setParameter: {
- cursorTimeoutMillis: mongosCursorTimeoutMs,
- clientCursorMonitorFrequencySecs: cursorMonitorFrequencySecs
- }
- },
+'use strict';
+
+// Cursor timeout on mongod is handled by a single thread/timer that will sleep for
+// "clientCursorMonitorFrequencySecs" and add the sleep value to each operation's duration when
+// it wakes up, timing out those whose "now() - last accessed since" time exceeds. A cursor
+// timeout of 5 seconds with a monitor frequency of 1 second means an effective timeout period
+// of 4 to 5 seconds.
+const mongodCursorTimeoutMs = 5000;
+
+// Cursor timeout on mongos is handled by checking whether the "last accessed" cursor time stamp
+// is older than "now() - cursorTimeoutMillis" and is checked every
+// "clientCursorMonitorFrequencySecs" by a global thread/timer. A timeout of 4 seconds with a
+// monitor frequency of 1 second means an effective timeout period of 4 to 5 seconds.
+const mongosCursorTimeoutMs = 4000;
+
+const cursorMonitorFrequencySecs = 1;
+
+const st = new ShardingTest({
+ shards: 2,
+ other: {
+ shardOptions: {
+ verbose: 1,
+ setParameter: {
+ cursorTimeoutMillis: mongodCursorTimeoutMs,
+ clientCursorMonitorFrequencySecs: cursorMonitorFrequencySecs
+ }
},
- enableBalancer: false
- });
-
- const adminDB = st.admin;
- const routerColl = st.s.getDB('test').user;
-
- const shardHost = st.config.shards.findOne({_id: st.shard1.shardName}).host;
- const mongod = new Mongo(shardHost);
- const shardColl = mongod.getCollection(routerColl.getFullName());
-
- assert.commandWorked(adminDB.runCommand({enableSharding: routerColl.getDB().getName()}));
- st.ensurePrimaryShard(routerColl.getDB().getName(), st.shard0.shardName);
-
- assert.commandWorked(
- adminDB.runCommand({shardCollection: routerColl.getFullName(), key: {x: 1}}));
- assert.commandWorked(adminDB.runCommand({split: routerColl.getFullName(), middle: {x: 10}}));
- assert.commandWorked(adminDB.runCommand({
- moveChunk: routerColl.getFullName(),
- find: {x: 11},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- for (let x = 0; x < 20; x++) {
- assert.writeOK(routerColl.insert({x: x}));
- }
-
- // Open both a normal and a no-timeout cursor on mongos. Batch size is 1 to ensure that
- // cursor.next() performs only a single operation.
- const routerCursorWithTimeout = routerColl.find().batchSize(1);
- const routerCursorWithNoTimeout = routerColl.find().batchSize(1);
- routerCursorWithNoTimeout.addOption(DBQuery.Option.noTimeout);
-
- // Open both a normal and a no-timeout cursor on mongod. Batch size is 1 to ensure that
- // cursor.next() performs only a single operation.
- const shardCursorWithTimeout = shardColl.find().batchSize(1);
- const shardCursorWithNoTimeout = shardColl.find().batchSize(1);
- shardCursorWithNoTimeout.addOption(DBQuery.Option.noTimeout);
-
- // Execute initial find on each cursor.
- routerCursorWithTimeout.next();
- routerCursorWithNoTimeout.next();
- shardCursorWithTimeout.next();
- shardCursorWithNoTimeout.next();
-
- // Wait until the idle cursor background job has killed the cursors that do not have the "no
- // timeout" flag set. We use the "cursorTimeoutMillis" and "clientCursorMonitorFrequencySecs"
- // setParameters above to reduce the amount of time we need to wait here.
- assert.soon(function() {
- return routerColl.getDB().serverStatus().metrics.cursor.timedOut > 0;
- }, "sharded cursor failed to time out");
-
- // Wait for the shard to have two open cursors on it (routerCursorWithNoTimeout and
- // shardCursorWithNoTimeout).
- // We cannot reliably use metrics.cursor.timedOut here, because this will be 2 if
- // routerCursorWithTimeout is killed for timing out on the shard, and 1 if
- // routerCursorWithTimeout is killed by a killCursors command from the mongos.
- assert.soon(function() {
- return shardColl.getDB().serverStatus().metrics.cursor.open.total == 2;
- }, "cursor failed to time out");
-
- assert.throws(function() {
- routerCursorWithTimeout.itcount();
- });
- assert.throws(function() {
- shardCursorWithTimeout.itcount();
- });
-
- // +1 because we already advanced once
- assert.eq(routerColl.count(), routerCursorWithNoTimeout.itcount() + 1);
- assert.eq(shardColl.count(), shardCursorWithNoTimeout.itcount() + 1);
-
- st.stop();
+ mongosOptions: {
+ verbose: 1,
+ setParameter: {
+ cursorTimeoutMillis: mongosCursorTimeoutMs,
+ clientCursorMonitorFrequencySecs: cursorMonitorFrequencySecs
+ }
+ },
+ },
+ enableBalancer: false
+});
+
+const adminDB = st.admin;
+const routerColl = st.s.getDB('test').user;
+
+const shardHost = st.config.shards.findOne({_id: st.shard1.shardName}).host;
+const mongod = new Mongo(shardHost);
+const shardColl = mongod.getCollection(routerColl.getFullName());
+
+assert.commandWorked(adminDB.runCommand({enableSharding: routerColl.getDB().getName()}));
+st.ensurePrimaryShard(routerColl.getDB().getName(), st.shard0.shardName);
+
+assert.commandWorked(adminDB.runCommand({shardCollection: routerColl.getFullName(), key: {x: 1}}));
+assert.commandWorked(adminDB.runCommand({split: routerColl.getFullName(), middle: {x: 10}}));
+assert.commandWorked(adminDB.runCommand({
+ moveChunk: routerColl.getFullName(),
+ find: {x: 11},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+for (let x = 0; x < 20; x++) {
+ assert.writeOK(routerColl.insert({x: x}));
+}
+
+// Open both a normal and a no-timeout cursor on mongos. Batch size is 1 to ensure that
+// cursor.next() performs only a single operation.
+const routerCursorWithTimeout = routerColl.find().batchSize(1);
+const routerCursorWithNoTimeout = routerColl.find().batchSize(1);
+routerCursorWithNoTimeout.addOption(DBQuery.Option.noTimeout);
+
+// Open both a normal and a no-timeout cursor on mongod. Batch size is 1 to ensure that
+// cursor.next() performs only a single operation.
+const shardCursorWithTimeout = shardColl.find().batchSize(1);
+const shardCursorWithNoTimeout = shardColl.find().batchSize(1);
+shardCursorWithNoTimeout.addOption(DBQuery.Option.noTimeout);
+
+// Execute initial find on each cursor.
+routerCursorWithTimeout.next();
+routerCursorWithNoTimeout.next();
+shardCursorWithTimeout.next();
+shardCursorWithNoTimeout.next();
+
+// Wait until the idle cursor background job has killed the cursors that do not have the "no
+// timeout" flag set. We use the "cursorTimeoutMillis" and "clientCursorMonitorFrequencySecs"
+// setParameters above to reduce the amount of time we need to wait here.
+assert.soon(function() {
+ return routerColl.getDB().serverStatus().metrics.cursor.timedOut > 0;
+}, "sharded cursor failed to time out");
+
+// Wait for the shard to have two open cursors on it (routerCursorWithNoTimeout and
+// shardCursorWithNoTimeout).
+// We cannot reliably use metrics.cursor.timedOut here, because this will be 2 if
+// routerCursorWithTimeout is killed for timing out on the shard, and 1 if
+// routerCursorWithTimeout is killed by a killCursors command from the mongos.
+assert.soon(function() {
+ return shardColl.getDB().serverStatus().metrics.cursor.open.total == 2;
+}, "cursor failed to time out");
+
+assert.throws(function() {
+ routerCursorWithTimeout.itcount();
+});
+assert.throws(function() {
+ shardCursorWithTimeout.itcount();
+});
+
+// +1 because we already advanced once
+assert.eq(routerColl.count(), routerCursorWithNoTimeout.itcount() + 1);
+assert.eq(shardColl.count(), shardCursorWithNoTimeout.itcount() + 1);
+
+st.stop();
})();
diff --git a/jstests/sharding/cursor_valid_after_shard_stepdown.js b/jstests/sharding/cursor_valid_after_shard_stepdown.js
index b717d73cac8..c26de68b8c6 100644
--- a/jstests/sharding/cursor_valid_after_shard_stepdown.js
+++ b/jstests/sharding/cursor_valid_after_shard_stepdown.js
@@ -4,43 +4,42 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 1, rs: {nodes: 2}});
+var st = new ShardingTest({shards: 1, rs: {nodes: 2}});
- assert.commandWorked(st.s0.adminCommand({enablesharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardcollection: 'TestDB.TestColl', key: {x: 1}}));
+assert.commandWorked(st.s0.adminCommand({enablesharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardcollection: 'TestDB.TestColl', key: {x: 1}}));
- var db = st.s0.getDB('TestDB');
- var coll = db.TestColl;
+var db = st.s0.getDB('TestDB');
+var coll = db.TestColl;
- // Insert documents for the test
- assert.writeOK(coll.insert({x: 1, value: 'Test value 1'}));
- assert.writeOK(coll.insert({x: 2, value: 'Test value 2'}));
+// Insert documents for the test
+assert.writeOK(coll.insert({x: 1, value: 'Test value 1'}));
+assert.writeOK(coll.insert({x: 2, value: 'Test value 2'}));
- // Establish a cursor on the primary (by not using slaveOk read)
- var findCursor = assert.commandWorked(db.runCommand({find: 'TestColl', batchSize: 1})).cursor;
+// Establish a cursor on the primary (by not using slaveOk read)
+var findCursor = assert.commandWorked(db.runCommand({find: 'TestColl', batchSize: 1})).cursor;
- var shardVersionBeforeStepdown =
- assert.commandWorked(st.rs0.getPrimary().adminCommand({getShardVersion: 'TestDB.TestColl'}))
- .global;
- assert.neq(Timestamp(0, 0), shardVersionBeforeStepdown);
+var shardVersionBeforeStepdown =
+ assert.commandWorked(st.rs0.getPrimary().adminCommand({getShardVersion: 'TestDB.TestColl'}))
+ .global;
+assert.neq(Timestamp(0, 0), shardVersionBeforeStepdown);
- // Stepdown the primary of the shard and ensure that that cursor can still be read
- assert.commandWorked(st.rs0.getPrimary().adminCommand({replSetStepDown: 60, force: 1}));
+// Stepdown the primary of the shard and ensure that that cursor can still be read
+assert.commandWorked(st.rs0.getPrimary().adminCommand({replSetStepDown: 60, force: 1}));
- var getMoreCursor =
- assert.commandWorked(db.runCommand({getMore: findCursor.id, collection: 'TestColl'}))
- .cursor;
- assert.eq(0, getMoreCursor.id);
- assert.eq(2, getMoreCursor.nextBatch[0].x);
+var getMoreCursor =
+ assert.commandWorked(db.runCommand({getMore: findCursor.id, collection: 'TestColl'})).cursor;
+assert.eq(0, getMoreCursor.id);
+assert.eq(2, getMoreCursor.nextBatch[0].x);
- // After stepdown, the shard version will be reset
- var shardVersionAfterStepdown =
- assert.commandWorked(st.rs0.getPrimary().adminCommand({getShardVersion: 'TestDB.TestColl'}))
- .global;
- assert.eq("UNKNOWN", shardVersionAfterStepdown);
+// After stepdown, the shard version will be reset
+var shardVersionAfterStepdown =
+ assert.commandWorked(st.rs0.getPrimary().adminCommand({getShardVersion: 'TestDB.TestColl'}))
+ .global;
+assert.eq("UNKNOWN", shardVersionAfterStepdown);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/database_and_shard_versioning_all_commands.js b/jstests/sharding/database_and_shard_versioning_all_commands.js
index d78133d1386..a0fe0fa5da6 100644
--- a/jstests/sharding/database_and_shard_versioning_all_commands.js
+++ b/jstests/sharding/database_and_shard_versioning_all_commands.js
@@ -3,622 +3,615 @@
* verifies that the commands match the specification.
*/
(function() {
- 'use strict';
-
- load('jstests/libs/profiler.js');
- load('jstests/sharding/libs/last_stable_mongos_commands.js');
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- const SHARD_VERSION_UNSHARDED = [Timestamp(0, 0), ObjectId("000000000000000000000000")];
-
- function validateTestCase(testCase) {
- assert(testCase.skip || testCase.command,
- "must specify exactly one of 'skip' or 'command' for test case " + tojson(testCase));
-
- if (testCase.skip) {
- for (let key of Object.keys(testCase)) {
- assert(
- key === "skip" || key === "conditional",
- "if a test case specifies 'skip', it must not specify any other fields besides 'conditional': " +
- key + ": " + tojson(testCase));
- }
- return;
+'use strict';
+
+load('jstests/libs/profiler.js');
+load('jstests/sharding/libs/last_stable_mongos_commands.js');
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+const SHARD_VERSION_UNSHARDED = [Timestamp(0, 0), ObjectId("000000000000000000000000")];
+
+function validateTestCase(testCase) {
+ assert(testCase.skip || testCase.command,
+ "must specify exactly one of 'skip' or 'command' for test case " + tojson(testCase));
+
+ if (testCase.skip) {
+ for (let key of Object.keys(testCase)) {
+ assert(
+ key === "skip" || key === "conditional",
+ "if a test case specifies 'skip', it must not specify any other fields besides 'conditional': " +
+ key + ": " + tojson(testCase));
}
-
- // Check that required fields are present.
- assert(testCase.hasOwnProperty("sendsDbVersion"),
- "must specify 'sendsDbVersion' for test case " + tojson(testCase));
- assert(testCase.hasOwnProperty("sendsShardVersion"),
- "must specify 'sendsShardVersion' for test case " + tojson(testCase));
-
- // Check that all present fields are of the correct type.
- assert(typeof(testCase.command) === "object");
- assert(testCase.runsAgainstAdminDb ? typeof(testCase.runsAgainstAdminDb) === "boolean"
- : true);
- assert(testCase.skipProfilerCheck ? typeof(testCase.skipProfilerCheck) === "boolean"
- : true);
- assert(typeof(testCase.sendsDbVersion) === "boolean");
- assert(typeof(testCase.sendsShardVersion) === "boolean");
- assert(testCase.setUp ? typeof(testCase.setUp) === "function" : true,
- "setUp must be a function: " + tojson(testCase));
- assert(testCase.cleanUp ? typeof(testCase.cleanUp) === "function" : true,
- "cleanUp must be a function: " + tojson(testCase));
+ return;
}
- let testCases = {
- _hashBSONElement: {skip: "executes locally on mongos (not sent to any remote node)"},
- _isSelf: {skip: "executes locally on mongos (not sent to any remote node)"},
- _mergeAuthzCollections: {skip: "always targets the config server"},
- abortTransaction: {skip: "unversioned and uses special targetting rules"},
- addShard: {skip: "not on a user database"},
- addShardToZone: {skip: "not on a user database"},
- aggregate: {
- sendsDbVersion: false,
- sendsShardVersion: true,
- command: {aggregate: collName, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
- },
- authenticate: {skip: "does not forward command to primary shard"},
- availableQueryOptions: {skip: "executes locally on mongos (not sent to any remote node)"},
- balancerStart: {skip: "not on a user database"},
- balancerStatus: {skip: "not on a user database"},
- balancerStop: {skip: "not on a user database"},
- buildInfo: {skip: "executes locally on mongos (not sent to any remote node)"},
- clearLog: {skip: "executes locally on mongos (not sent to any remote node)"},
- collMod: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {collMod: collName},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
- },
- collStats: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {collStats: collName},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
- },
- commitTransaction: {skip: "unversioned and uses special targetting rules"},
- compact: {skip: "not allowed through mongos"},
- configureFailPoint: {skip: "executes locally on mongos (not sent to any remote node)"},
- connPoolStats: {skip: "executes locally on mongos (not sent to any remote node)"},
- connPoolSync: {skip: "executes locally on mongos (not sent to any remote node)"},
- connectionStatus: {skip: "executes locally on mongos (not sent to any remote node)"},
- convertToCapped: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {convertToCapped: collName, size: 8192},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
- },
- count: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- command: {count: collName, query: {x: 1}},
- },
- create: {
- sendsDbVersion: false,
- // The collection doesn't exist yet, so no shardVersion is sent.
- sendsShardVersion: false,
- command: {create: collName},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
- },
- createIndexes: {
- skipProfilerCheck: true,
- sendsDbVersion: true,
- sendsShardVersion: false,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {createIndexes: collName, indexes: [{key: {a: 1}, name: "index"}]},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- },
- },
- createRole: {skip: "always targets the config server"},
- createUser: {skip: "always targets the config server"},
- currentOp: {skip: "not on a user database"},
- dataSize: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {dataSize: ns},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
- },
- dbStats: {
- sendsDbVersion: false,
- // dbStats is always broadcast to all shards
- sendsShardVersion: false,
- command: {dbStats: 1, scale: 1}
- },
- delete: {
- skipProfilerCheck: true,
- sendsDbVersion: false,
- // The profiler extracts the individual deletes from the 'deletes' array, and so loses
- // the overall delete command's attached shardVersion, though one is sent.
- sendsShardVersion: true,
- command: {delete: collName, deletes: [{q: {_id: 1}, limit: 1}]}
+ // Check that required fields are present.
+ assert(testCase.hasOwnProperty("sendsDbVersion"),
+ "must specify 'sendsDbVersion' for test case " + tojson(testCase));
+ assert(testCase.hasOwnProperty("sendsShardVersion"),
+ "must specify 'sendsShardVersion' for test case " + tojson(testCase));
+
+ // Check that all present fields are of the correct type.
+ assert(typeof (testCase.command) === "object");
+ assert(testCase.runsAgainstAdminDb ? typeof (testCase.runsAgainstAdminDb) === "boolean" : true);
+ assert(testCase.skipProfilerCheck ? typeof (testCase.skipProfilerCheck) === "boolean" : true);
+ assert(typeof (testCase.sendsDbVersion) === "boolean");
+ assert(typeof (testCase.sendsShardVersion) === "boolean");
+ assert(testCase.setUp ? typeof (testCase.setUp) === "function" : true,
+ "setUp must be a function: " + tojson(testCase));
+ assert(testCase.cleanUp ? typeof (testCase.cleanUp) === "function" : true,
+ "cleanUp must be a function: " + tojson(testCase));
+}
+
+let testCases = {
+ _hashBSONElement: {skip: "executes locally on mongos (not sent to any remote node)"},
+ _isSelf: {skip: "executes locally on mongos (not sent to any remote node)"},
+ _mergeAuthzCollections: {skip: "always targets the config server"},
+ abortTransaction: {skip: "unversioned and uses special targetting rules"},
+ addShard: {skip: "not on a user database"},
+ addShardToZone: {skip: "not on a user database"},
+ aggregate: {
+ sendsDbVersion: false,
+ sendsShardVersion: true,
+ command: {aggregate: collName, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
+ },
+ authenticate: {skip: "does not forward command to primary shard"},
+ availableQueryOptions: {skip: "executes locally on mongos (not sent to any remote node)"},
+ balancerStart: {skip: "not on a user database"},
+ balancerStatus: {skip: "not on a user database"},
+ balancerStop: {skip: "not on a user database"},
+ buildInfo: {skip: "executes locally on mongos (not sent to any remote node)"},
+ clearLog: {skip: "executes locally on mongos (not sent to any remote node)"},
+ collMod: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- distinct: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- command: {distinct: collName, key: "x"},
+ command: {collMod: collName},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ collStats: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- drop: {
- skipProfilerCheck: true,
- sendsDbVersion: false,
- sendsShardVersion: false,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {drop: collName},
+ command: {collStats: collName},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ commitTransaction: {skip: "unversioned and uses special targetting rules"},
+ compact: {skip: "not allowed through mongos"},
+ configureFailPoint: {skip: "executes locally on mongos (not sent to any remote node)"},
+ connPoolStats: {skip: "executes locally on mongos (not sent to any remote node)"},
+ connPoolSync: {skip: "executes locally on mongos (not sent to any remote node)"},
+ connectionStatus: {skip: "executes locally on mongos (not sent to any remote node)"},
+ convertToCapped: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- dropAllRolesFromDatabase: {skip: "always targets the config server"},
- dropAllUsersFromDatabase: {skip: "always targets the config server"},
- dropConnections: {skip: "not on a user database"},
- dropDatabase: {skip: "drops the database from the cluster, changing the UUID"},
- dropIndexes: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {dropIndexes: collName, index: "*"},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
+ command: {convertToCapped: collName, size: 8192},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ count: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ command: {count: collName, query: {x: 1}},
+ },
+ create: {
+ sendsDbVersion: false,
+ // The collection doesn't exist yet, so no shardVersion is sent.
+ sendsShardVersion: false,
+ command: {create: collName},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ createIndexes: {
+ skipProfilerCheck: true,
+ sendsDbVersion: true,
+ sendsShardVersion: false,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- dropRole: {skip: "always targets the config server"},
- dropUser: {skip: "always targets the config server"},
- echo: {skip: "does not forward command to primary shard"},
- enableSharding: {skip: "does not forward command to primary shard"},
- endSessions: {skip: "goes through the cluster write path"},
- explain: {skip: "TODO SERVER-31226"},
- features: {skip: "executes locally on mongos (not sent to any remote node)"},
- filemd5: {
- sendsDbVersion: true,
- sendsShardVersion: true,
- command: {filemd5: ObjectId(), root: collName}
+ command: {createIndexes: collName, indexes: [{key: {a: 1}, name: "index"}]},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
},
- find: {
- sendsDbVersion: false,
- sendsShardVersion: true,
- command: {find: collName, filter: {x: 1}},
+ },
+ createRole: {skip: "always targets the config server"},
+ createUser: {skip: "always targets the config server"},
+ currentOp: {skip: "not on a user database"},
+ dataSize: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- findAndModify: {
- sendsDbVersion: false,
- sendsShardVersion: true,
- command: {findAndModify: collName, query: {_id: 0}, remove: true}
+ command: {dataSize: ns},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ dbStats: {
+ sendsDbVersion: false,
+ // dbStats is always broadcast to all shards
+ sendsShardVersion: false,
+ command: {dbStats: 1, scale: 1}
+ },
+ delete: {
+ skipProfilerCheck: true,
+ sendsDbVersion: false,
+ // The profiler extracts the individual deletes from the 'deletes' array, and so loses
+ // the overall delete command's attached shardVersion, though one is sent.
+ sendsShardVersion: true,
+ command: {delete: collName, deletes: [{q: {_id: 1}, limit: 1}]}
+ },
+ distinct: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ command: {distinct: collName, key: "x"},
+ },
+ drop: {
+ skipProfilerCheck: true,
+ sendsDbVersion: false,
+ sendsShardVersion: false,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- flushRouterConfig: {skip: "executes locally on mongos (not sent to any remote node)"},
- fsync: {skip: "broadcast to all shards"},
- getCmdLineOpts: {skip: "executes locally on mongos (not sent to any remote node)"},
- getDiagnosticData: {skip: "executes locally on mongos (not sent to any remote node)"},
- getLastError: {skip: "does not forward command to primary shard"},
- getLog: {skip: "executes locally on mongos (not sent to any remote node)"},
- getMore: {skip: "requires a previously established cursor"},
- getParameter: {skip: "executes locally on mongos (not sent to any remote node)"},
- getShardMap: {skip: "executes locally on mongos (not sent to any remote node)"},
- getShardVersion: {skip: "executes locally on mongos (not sent to any remote node)"},
- getnonce: {skip: "not on a user database"},
- grantPrivilegesToRole: {skip: "always targets the config server"},
- grantRolesToRole: {skip: "always targets the config server"},
- grantRolesToUser: {skip: "always targets the config server"},
- hostInfo: {skip: "executes locally on mongos (not sent to any remote node)"},
- insert: {
- sendsDbVersion: false,
- sendsShardVersion: true,
- command: {insert: collName, documents: [{_id: 1}]},
- cleanUp: function(mongosConn) {
- // Implicitly creates the collection.
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
+ command: {drop: collName},
+ },
+ dropAllRolesFromDatabase: {skip: "always targets the config server"},
+ dropAllUsersFromDatabase: {skip: "always targets the config server"},
+ dropConnections: {skip: "not on a user database"},
+ dropDatabase: {skip: "drops the database from the cluster, changing the UUID"},
+ dropIndexes: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- invalidateUserCache: {skip: "executes locally on mongos (not sent to any remote node)"},
- isdbgrid: {skip: "executes locally on mongos (not sent to any remote node)"},
- isMaster: {skip: "executes locally on mongos (not sent to any remote node)"},
- killCursors: {skip: "requires a previously established cursor"},
- killAllSessions: {skip: "always broadcast to all hosts in the cluster"},
- killAllSessionsByPattern: {skip: "always broadcast to all hosts in the cluster"},
- killOp: {skip: "does not forward command to primary shard"},
- killSessions: {skip: "always broadcast to all hosts in the cluster"},
- listCollections: {
- skipProfilerCheck: true,
- sendsDbVersion: true,
- sendsShardVersion: true,
- command: {listCollections: 1},
+ command: {dropIndexes: collName, index: "*"},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ dropRole: {skip: "always targets the config server"},
+ dropUser: {skip: "always targets the config server"},
+ echo: {skip: "does not forward command to primary shard"},
+ enableSharding: {skip: "does not forward command to primary shard"},
+ endSessions: {skip: "goes through the cluster write path"},
+ explain: {skip: "TODO SERVER-31226"},
+ features: {skip: "executes locally on mongos (not sent to any remote node)"},
+ filemd5: {
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ command: {filemd5: ObjectId(), root: collName}
+ },
+ find: {
+ sendsDbVersion: false,
+ sendsShardVersion: true,
+ command: {find: collName, filter: {x: 1}},
+ },
+ findAndModify: {
+ sendsDbVersion: false,
+ sendsShardVersion: true,
+ command: {findAndModify: collName, query: {_id: 0}, remove: true}
+ },
+ flushRouterConfig: {skip: "executes locally on mongos (not sent to any remote node)"},
+ fsync: {skip: "broadcast to all shards"},
+ getCmdLineOpts: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getDiagnosticData: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getLastError: {skip: "does not forward command to primary shard"},
+ getLog: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getMore: {skip: "requires a previously established cursor"},
+ getParameter: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getShardMap: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getShardVersion: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getnonce: {skip: "not on a user database"},
+ grantPrivilegesToRole: {skip: "always targets the config server"},
+ grantRolesToRole: {skip: "always targets the config server"},
+ grantRolesToUser: {skip: "always targets the config server"},
+ hostInfo: {skip: "executes locally on mongos (not sent to any remote node)"},
+ insert: {
+ sendsDbVersion: false,
+ sendsShardVersion: true,
+ command: {insert: collName, documents: [{_id: 1}]},
+ cleanUp: function(mongosConn) {
+ // Implicitly creates the collection.
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ invalidateUserCache: {skip: "executes locally on mongos (not sent to any remote node)"},
+ isdbgrid: {skip: "executes locally on mongos (not sent to any remote node)"},
+ isMaster: {skip: "executes locally on mongos (not sent to any remote node)"},
+ killCursors: {skip: "requires a previously established cursor"},
+ killAllSessions: {skip: "always broadcast to all hosts in the cluster"},
+ killAllSessionsByPattern: {skip: "always broadcast to all hosts in the cluster"},
+ killOp: {skip: "does not forward command to primary shard"},
+ killSessions: {skip: "always broadcast to all hosts in the cluster"},
+ listCollections: {
+ skipProfilerCheck: true,
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ command: {listCollections: 1},
+ },
+ listCommands: {skip: "executes locally on mongos (not sent to any remote node)"},
+ listDatabases: {skip: "does not forward command to primary shard"},
+ listIndexes: {
+ sendsDbVersion: true,
+ sendsShardVersion: false,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- listCommands: {skip: "executes locally on mongos (not sent to any remote node)"},
- listDatabases: {skip: "does not forward command to primary shard"},
- listIndexes: {
- sendsDbVersion: true,
- sendsShardVersion: false,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {listIndexes: collName},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
+ command: {listIndexes: collName},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ listShards: {skip: "does not forward command to primary shard"},
+ logApplicationMessage: {skip: "not on a user database", conditional: true},
+ logRotate: {skip: "executes locally on mongos (not sent to any remote node)"},
+ logout: {skip: "not on a user database"},
+ mapReduce: {
+ sendsDbVersion: false,
+ // mapReduce uses connection versioning rather than sending shardVersion in the command.
+ sendsShardVersion: false,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- listShards: {skip: "does not forward command to primary shard"},
- logApplicationMessage: {skip: "not on a user database", conditional: true},
- logRotate: {skip: "executes locally on mongos (not sent to any remote node)"},
- logout: {skip: "not on a user database"},
- mapReduce: {
- sendsDbVersion: false,
- // mapReduce uses connection versioning rather than sending shardVersion in the command.
- sendsShardVersion: false,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
+ command: {
+ mapReduce: collName,
+ map: function() {
+ emit(this.x, 1);
},
- command: {
- mapReduce: collName,
- map: function() {
- emit(this.x, 1);
- },
- reduce: function(key, values) {
- return Array.sum(values);
- },
- out: {inline: 1}
+ reduce: function(key, values) {
+ return Array.sum(values);
},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- assert(mongosConn.getDB(dbName).getCollection(collName + "_renamed").drop());
- }
- },
- mergeChunks: {skip: "does not forward command to primary shard"},
- moveChunk: {skip: "does not forward command to primary shard"},
- movePrimary: {skip: "reads primary shard from sharding catalog with readConcern: local"},
- multicast: {skip: "does not forward command to primary shard"},
- netstat: {skip: "executes locally on mongos (not sent to any remote node)"},
- ping: {skip: "executes locally on mongos (not sent to any remote node)"},
- planCacheClear: {
- sendsDbVersion: false,
- // Uses connection versioning.
- sendsShardVersion: false,
- command: {planCacheClear: collName}
+ out: {inline: 1}
},
- planCacheClearFilters: {
- sendsDbVersion: false,
- // Uses connection versioning.
- sendsShardVersion: false,
- command: {planCacheClearFilters: collName}
- },
- planCacheListFilters: {
- sendsDbVersion: false,
- // Uses connection versioning.
- sendsShardVersion: false,
- command: {planCacheListFilters: collName}
- },
- planCacheListPlans: {
- sendsDbVersion: false,
- // Uses connection versioning.
- sendsShardVersion: false,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {planCacheListPlans: collName, query: {_id: "A"}},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ assert(mongosConn.getDB(dbName).getCollection(collName + "_renamed").drop());
+ }
+ },
+ mergeChunks: {skip: "does not forward command to primary shard"},
+ moveChunk: {skip: "does not forward command to primary shard"},
+ movePrimary: {skip: "reads primary shard from sharding catalog with readConcern: local"},
+ multicast: {skip: "does not forward command to primary shard"},
+ netstat: {skip: "executes locally on mongos (not sent to any remote node)"},
+ ping: {skip: "executes locally on mongos (not sent to any remote node)"},
+ planCacheClear: {
+ sendsDbVersion: false,
+ // Uses connection versioning.
+ sendsShardVersion: false,
+ command: {planCacheClear: collName}
+ },
+ planCacheClearFilters: {
+ sendsDbVersion: false,
+ // Uses connection versioning.
+ sendsShardVersion: false,
+ command: {planCacheClearFilters: collName}
+ },
+ planCacheListFilters: {
+ sendsDbVersion: false,
+ // Uses connection versioning.
+ sendsShardVersion: false,
+ command: {planCacheListFilters: collName}
+ },
+ planCacheListPlans: {
+ sendsDbVersion: false,
+ // Uses connection versioning.
+ sendsShardVersion: false,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- planCacheListQueryShapes: {
- sendsDbVersion: false,
- // Uses connection versioning.
- sendsShardVersion: false,
- command: {planCacheListQueryShapes: collName}
+ command: {planCacheListPlans: collName, query: {_id: "A"}},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ planCacheListQueryShapes: {
+ sendsDbVersion: false,
+ // Uses connection versioning.
+ sendsShardVersion: false,
+ command: {planCacheListQueryShapes: collName}
+ },
+ planCacheSetFilter: {
+ sendsDbVersion: false,
+ // Uses connection versioning.
+ sendsShardVersion: false,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- planCacheSetFilter: {
- sendsDbVersion: false,
- // Uses connection versioning.
- sendsShardVersion: false,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {planCacheSetFilter: collName, query: {_id: "A"}, indexes: [{_id: 1}]},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
+ command: {planCacheSetFilter: collName, query: {_id: "A"}, indexes: [{_id: 1}]},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
+ }
+ },
+ profile: {skip: "not supported in mongos"},
+ reapLogicalSessionCacheNow: {skip: "is a no-op on mongos"},
+ refreshLogicalSessionCacheNow: {skip: "goes through the cluster write path"},
+ refreshSessions: {skip: "executes locally on mongos (not sent to any remote node)"},
+ refreshSessionsInternal:
+ {skip: "executes locally on mongos (not sent to any remote node)", conditional: true},
+ removeShard: {skip: "not on a user database"},
+ removeShardFromZone: {skip: "not on a user database"},
+ renameCollection: {
+ runsAgainstAdminDb: true,
+ skipProfilerCheck: true,
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- profile: {skip: "not supported in mongos"},
- reapLogicalSessionCacheNow: {skip: "is a no-op on mongos"},
- refreshLogicalSessionCacheNow: {skip: "goes through the cluster write path"},
- refreshSessions: {skip: "executes locally on mongos (not sent to any remote node)"},
- refreshSessionsInternal:
- {skip: "executes locally on mongos (not sent to any remote node)", conditional: true},
- removeShard: {skip: "not on a user database"},
- removeShardFromZone: {skip: "not on a user database"},
- renameCollection: {
- runsAgainstAdminDb: true,
- skipProfilerCheck: true,
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {
- renameCollection: dbName + "." + collName,
- to: dbName + "." + collName + "_renamed"
- },
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName + "_renamed").drop());
- }
+ command:
+ {renameCollection: dbName + "." + collName, to: dbName + "." + collName + "_renamed"},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName + "_renamed").drop());
+ }
+ },
+ replSetGetStatus: {skip: "not supported in mongos"},
+ resetError: {skip: "not on a user database"},
+ restartCatalog: {skip: "not on a user database"},
+ revokePrivilegesFromRole: {skip: "always targets the config server"},
+ revokeRolesFromRole: {skip: "always targets the config server"},
+ revokeRolesFromUser: {skip: "always targets the config server"},
+ rolesInfo: {skip: "always targets the config server"},
+ saslContinue: {skip: "not on a user database"},
+ saslStart: {skip: "not on a user database"},
+ serverStatus: {skip: "executes locally on mongos (not sent to any remote node)"},
+ setIndexCommitQuorum: {
+ skipProfilerCheck: true,
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- replSetGetStatus: {skip: "not supported in mongos"},
- resetError: {skip: "not on a user database"},
- restartCatalog: {skip: "not on a user database"},
- revokePrivilegesFromRole: {skip: "always targets the config server"},
- revokeRolesFromRole: {skip: "always targets the config server"},
- revokeRolesFromUser: {skip: "always targets the config server"},
- rolesInfo: {skip: "always targets the config server"},
- saslContinue: {skip: "not on a user database"},
- saslStart: {skip: "not on a user database"},
- serverStatus: {skip: "executes locally on mongos (not sent to any remote node)"},
- setIndexCommitQuorum: {
- skipProfilerCheck: true,
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command:
- {setIndexCommitQuorum: collName, indexNames: ["index"], commitQuorum: "majority"},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- },
+ command: {setIndexCommitQuorum: collName, indexNames: ["index"], commitQuorum: "majority"},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
},
- setFeatureCompatibilityVersion: {skip: "not on a user database"},
- setFreeMonitoring:
- {skip: "explicitly fails for mongos, primary mongod only", conditional: true},
- setParameter: {skip: "executes locally on mongos (not sent to any remote node)"},
- shardCollection: {skip: "does not forward command to primary shard"},
- shardConnPoolStats: {skip: "does not forward command to primary shard"},
- shutdown: {skip: "does not forward command to primary shard"},
- split: {skip: "does not forward command to primary shard"},
- splitVector: {skip: "does not forward command to primary shard"},
- startRecordingTraffic: {skip: "executes locally on mongos (not sent to any remote node)"},
- startSession: {skip: "executes locally on mongos (not sent to any remote node)"},
- stopRecordingTraffic: {skip: "executes locally on mongos (not sent to any remote node)"},
- update: {
- skipProfilerCheck: true,
- sendsDbVersion: false,
- // The profiler extracts the individual updates from the 'updates' array, and so loses
- // the overall update command's attached shardVersion, though one is sent.
- sendsShardVersion: true,
- command: {
- update: collName,
- updates: [{q: {_id: 2}, u: {_id: 2}, upsert: true, multi: false}]
- }
+ },
+ setFeatureCompatibilityVersion: {skip: "not on a user database"},
+ setFreeMonitoring:
+ {skip: "explicitly fails for mongos, primary mongod only", conditional: true},
+ setParameter: {skip: "executes locally on mongos (not sent to any remote node)"},
+ shardCollection: {skip: "does not forward command to primary shard"},
+ shardConnPoolStats: {skip: "does not forward command to primary shard"},
+ shutdown: {skip: "does not forward command to primary shard"},
+ split: {skip: "does not forward command to primary shard"},
+ splitVector: {skip: "does not forward command to primary shard"},
+ startRecordingTraffic: {skip: "executes locally on mongos (not sent to any remote node)"},
+ startSession: {skip: "executes locally on mongos (not sent to any remote node)"},
+ stopRecordingTraffic: {skip: "executes locally on mongos (not sent to any remote node)"},
+ update: {
+ skipProfilerCheck: true,
+ sendsDbVersion: false,
+ // The profiler extracts the individual updates from the 'updates' array, and so loses
+ // the overall update command's attached shardVersion, though one is sent.
+ sendsShardVersion: true,
+ command:
+ {update: collName, updates: [{q: {_id: 2}, u: {_id: 2}, upsert: true, multi: false}]}
+ },
+ updateRole: {skip: "always targets the config server"},
+ updateUser: {skip: "always targets the config server"},
+ updateZoneKeyRange: {skip: "not on a user database"},
+ usersInfo: {skip: "always targets the config server"},
+ validate: {
+ skipProfilerCheck: true,
+ sendsDbVersion: true,
+ sendsShardVersion: true,
+ setUp: function(mongosConn) {
+ // Expects the collection to exist, and doesn't implicitly create it.
+ assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
},
- updateRole: {skip: "always targets the config server"},
- updateUser: {skip: "always targets the config server"},
- updateZoneKeyRange: {skip: "not on a user database"},
- usersInfo: {skip: "always targets the config server"},
- validate: {
- skipProfilerCheck: true,
- sendsDbVersion: true,
- sendsShardVersion: true,
- setUp: function(mongosConn) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: {validate: collName},
- cleanUp: function(mongosConn) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- },
+ command: {validate: collName},
+ cleanUp: function(mongosConn) {
+ assert(mongosConn.getDB(dbName).getCollection(collName).drop());
},
- whatsmyuri: {skip: "executes locally on mongos (not sent to any remote node)"},
- };
-
- commandsRemovedFromMongosIn42.forEach(function(cmd) {
- testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
- });
-
- class AllCommandsTestRunner {
- constructor() {
- this.st = new ShardingTest(this.getShardingTestOptions());
- let db = this.st.s.getDB(dbName);
- // We do this create and drop so that we create an entry for the database in the
- // sharding catalog.
- assert.commandWorked(db.createCollection(collName));
- assert.commandWorked(db.runCommand({drop: collName}));
- this.primaryShard = this.st.shard0;
- this.st.ensurePrimaryShard(dbName, this.primaryShard.shardName);
-
- this.dbVersion =
- this.st.s.getDB("config").getCollection("databases").findOne({_id: dbName}).version;
- this.previousDbVersion = null;
-
- let res = this.st.s.adminCommand({listCommands: 1});
- assert.commandWorked(res);
- this.commands = Object.keys(res.commands);
- }
+ },
+ whatsmyuri: {skip: "executes locally on mongos (not sent to any remote node)"},
+};
+
+commandsRemovedFromMongosIn42.forEach(function(cmd) {
+ testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
+});
+
+class AllCommandsTestRunner {
+ constructor() {
+ this.st = new ShardingTest(this.getShardingTestOptions());
+ let db = this.st.s.getDB(dbName);
+ // We do this create and drop so that we create an entry for the database in the
+ // sharding catalog.
+ assert.commandWorked(db.createCollection(collName));
+ assert.commandWorked(db.runCommand({drop: collName}));
+ this.primaryShard = this.st.shard0;
+ this.st.ensurePrimaryShard(dbName, this.primaryShard.shardName);
+
+ this.dbVersion =
+ this.st.s.getDB("config").getCollection("databases").findOne({_id: dbName}).version;
+ this.previousDbVersion = null;
+
+ let res = this.st.s.adminCommand({listCommands: 1});
+ assert.commandWorked(res);
+ this.commands = Object.keys(res.commands);
+ }
- shutdown() {
- this.st.stop();
- }
+ shutdown() {
+ this.st.stop();
+ }
- getShardingTestOptions() {
- throw new Error("not implemented");
+ getShardingTestOptions() {
+ throw new Error("not implemented");
+ }
+ makeShardDatabaseCacheStale() {
+ throw new Error("not implemented");
+ }
+
+ assertSentDatabaseVersion(testCase, commandProfile) {
+ const res = this.primaryShard.adminCommand({getDatabaseVersion: dbName});
+ assert.commandWorked(res);
+ assert.eq(this.dbVersion, res.dbVersion);
+
+ // If the test case is marked as not tracked by the profiler, then we won't be able to
+ // verify the version was not sent here. Any test cases marked with this flag should be
+ // fixed in SERVER-33499.
+ if (!testCase.skipProfilerCheck) {
+ commandProfile["command.databaseVersion"] = this.dbVersion;
+ profilerHasSingleMatchingEntryOrThrow(
+ {profileDB: this.primaryShard.getDB(dbName), filter: commandProfile});
}
- makeShardDatabaseCacheStale() {
- throw new Error("not implemented");
+ }
+
+ assertDidNotSendDatabaseVersion(testCase, commandProfile) {
+ const res = this.primaryShard.adminCommand({getDatabaseVersion: dbName});
+ assert.commandWorked(res);
+ assert.eq({}, res.dbVersion);
+
+ // If the test case is marked as not tracked by the profiler, then we won't be able to
+ // verify the version was not sent here. Any test cases marked with this flag should be
+ // fixed in SERVER-33499.
+ if (!testCase.skipProfilerCheck) {
+ commandProfile["command.databaseVersion"] = {$exists: false};
+ profilerHasSingleMatchingEntryOrThrow(
+ {profileDB: this.primaryShard.getDB(dbName), filter: commandProfile});
}
+ }
- assertSentDatabaseVersion(testCase, commandProfile) {
- const res = this.primaryShard.adminCommand({getDatabaseVersion: dbName});
- assert.commandWorked(res);
- assert.eq(this.dbVersion, res.dbVersion);
-
- // If the test case is marked as not tracked by the profiler, then we won't be able to
- // verify the version was not sent here. Any test cases marked with this flag should be
- // fixed in SERVER-33499.
- if (!testCase.skipProfilerCheck) {
- commandProfile["command.databaseVersion"] = this.dbVersion;
- profilerHasSingleMatchingEntryOrThrow(
- {profileDB: this.primaryShard.getDB(dbName), filter: commandProfile});
+ runCommands() {
+ // Use the profiler to check that the command was received with or without a
+ // databaseVersion and shardVersion as expected by the 'testCase' for the command.
+ for (let command of this.commands) {
+ let testCase = testCases[command];
+ assert(testCase !== undefined,
+ "coverage failure: must define a test case for " + command);
+ if (!testCases[command].validated) {
+ validateTestCase(testCase);
+ testCases[command].validated = true;
}
- }
- assertDidNotSendDatabaseVersion(testCase, commandProfile) {
- const res = this.primaryShard.adminCommand({getDatabaseVersion: dbName});
- assert.commandWorked(res);
- assert.eq({}, res.dbVersion);
-
- // If the test case is marked as not tracked by the profiler, then we won't be able to
- // verify the version was not sent here. Any test cases marked with this flag should be
- // fixed in SERVER-33499.
- if (!testCase.skipProfilerCheck) {
- commandProfile["command.databaseVersion"] = {$exists: false};
- profilerHasSingleMatchingEntryOrThrow(
- {profileDB: this.primaryShard.getDB(dbName), filter: commandProfile});
+ if (testCase.skip) {
+ print("skipping " + command + ": " + testCase.skip);
+ continue;
}
- }
- runCommands() {
- // Use the profiler to check that the command was received with or without a
- // databaseVersion and shardVersion as expected by the 'testCase' for the command.
- for (let command of this.commands) {
- let testCase = testCases[command];
- assert(testCase !== undefined,
- "coverage failure: must define a test case for " + command);
- if (!testCases[command].validated) {
- validateTestCase(testCase);
- testCases[command].validated = true;
- }
-
- if (testCase.skip) {
- print("skipping " + command + ": " + testCase.skip);
- continue;
- }
-
- this.primaryShard.getDB(dbName).setProfilingLevel(2);
-
- jsTest.log("testing command " + tojson(testCase.command));
-
- if (testCase.setUp) {
- testCase.setUp(this.st.s);
- }
-
- let commandProfile = buildCommandProfile(testCase.command, false);
- commandProfile["command.shardVersion"] =
- testCase.sendsShardVersion ? SHARD_VERSION_UNSHARDED : {$exists: false};
-
- if (testCase.runsAgainstAdminDb) {
- assert.commandWorked(this.st.s.adminCommand(testCase.command));
- } else {
- assert.commandWorked(this.st.s.getDB(dbName).runCommand(testCase.command));
- }
-
- if (testCase.sendsDbVersion) {
- this.assertSentDatabaseVersion(testCase, commandProfile);
- } else {
- this.assertDidNotSendDatabaseVersion(testCase, commandProfile);
- }
-
- if (testCase.cleanUp) {
- testCase.cleanUp(this.st.s);
- }
-
- // Clear the profiler collection in between testing each command.
- this.primaryShard.getDB(dbName).setProfilingLevel(0);
- assert(this.primaryShard.getDB(dbName).getCollection("system.profile").drop());
-
- this.makeShardDatabaseCacheStale();
+ this.primaryShard.getDB(dbName).setProfilingLevel(2);
+
+ jsTest.log("testing command " + tojson(testCase.command));
+
+ if (testCase.setUp) {
+ testCase.setUp(this.st.s);
}
- // After iterating through all the existing commands, ensure there were no additional
- // test cases that did not correspond to any mongos command.
- for (let key of Object.keys(testCases)) {
- // We have defined real test cases for commands added in 4.2 so that the test cases
- // are exercised in the regular suites, but because these test cases can't run in
- // the last stable suite, we skip processing them here to avoid failing the below
- // assertion. We have defined "skip" test cases for commands removed in 4.2 so the
- // test case is defined in last stable suites (in which these commands still exist
- // on the mongos), but these test cases won't be run in regular suites, so we skip
- // processing them below as well.
- if (commandsAddedToMongosIn42.includes(key) ||
- commandsRemovedFromMongosIn42.includes(key)) {
- continue;
- }
- assert(testCases[key].validated || testCases[key].conditional,
- "you defined a test case for a command '" + key +
- "' that does not exist on mongos: " + tojson(testCases[key]));
+ let commandProfile = buildCommandProfile(testCase.command, false);
+ commandProfile["command.shardVersion"] =
+ testCase.sendsShardVersion ? SHARD_VERSION_UNSHARDED : {$exists: false};
+
+ if (testCase.runsAgainstAdminDb) {
+ assert.commandWorked(this.st.s.adminCommand(testCase.command));
+ } else {
+ assert.commandWorked(this.st.s.getDB(dbName).runCommand(testCase.command));
}
- }
- }
- class DropDatabaseTestRunner extends AllCommandsTestRunner {
- getShardingTestOptions() {
- return {shards: 1};
+ if (testCase.sendsDbVersion) {
+ this.assertSentDatabaseVersion(testCase, commandProfile);
+ } else {
+ this.assertDidNotSendDatabaseVersion(testCase, commandProfile);
+ }
+
+ if (testCase.cleanUp) {
+ testCase.cleanUp(this.st.s);
+ }
+
+ // Clear the profiler collection in between testing each command.
+ this.primaryShard.getDB(dbName).setProfilingLevel(0);
+ assert(this.primaryShard.getDB(dbName).getCollection("system.profile").drop());
+
+ this.makeShardDatabaseCacheStale();
}
- makeShardDatabaseCacheStale() {
- // Drop the database from the shard to clear the shard's cached in-memory database info.
- assert.commandWorked(this.primaryShard.getDB(dbName).runCommand({dropDatabase: 1}));
+ // After iterating through all the existing commands, ensure there were no additional
+ // test cases that did not correspond to any mongos command.
+ for (let key of Object.keys(testCases)) {
+ // We have defined real test cases for commands added in 4.2 so that the test cases
+ // are exercised in the regular suites, but because these test cases can't run in
+ // the last stable suite, we skip processing them here to avoid failing the below
+ // assertion. We have defined "skip" test cases for commands removed in 4.2 so the
+ // test case is defined in last stable suites (in which these commands still exist
+ // on the mongos), but these test cases won't be run in regular suites, so we skip
+ // processing them below as well.
+ if (commandsAddedToMongosIn42.includes(key) ||
+ commandsRemovedFromMongosIn42.includes(key)) {
+ continue;
+ }
+ assert(testCases[key].validated || testCases[key].conditional,
+ "you defined a test case for a command '" + key +
+ "' that does not exist on mongos: " + tojson(testCases[key]));
}
}
+}
- class MovePrimaryTestRunner extends AllCommandsTestRunner {
- getShardingTestOptions() {
- return {shards: 2};
- }
+class DropDatabaseTestRunner extends AllCommandsTestRunner {
+ getShardingTestOptions() {
+ return {shards: 1};
+ }
- makeShardDatabaseCacheStale() {
- let fromShard = this.st.getPrimaryShard(dbName);
- let toShard = this.st.getOther(fromShard);
+ makeShardDatabaseCacheStale() {
+ // Drop the database from the shard to clear the shard's cached in-memory database info.
+ assert.commandWorked(this.primaryShard.getDB(dbName).runCommand({dropDatabase: 1}));
+ }
+}
- this.primaryShard = toShard;
- this.previousDbVersion = this.dbVersion;
+class MovePrimaryTestRunner extends AllCommandsTestRunner {
+ getShardingTestOptions() {
+ return {shards: 2};
+ }
- assert.commandWorked(this.st.s0.adminCommand({movePrimary: dbName, to: toShard.name}));
- this.dbVersion =
- this.st.s.getDB("config").getCollection("databases").findOne({_id: dbName}).version;
+ makeShardDatabaseCacheStale() {
+ let fromShard = this.st.getPrimaryShard(dbName);
+ let toShard = this.st.getOther(fromShard);
- // The dbVersion should have changed due to the movePrimary operation.
- assert.eq(this.dbVersion.lastMod, this.previousDbVersion.lastMod + 1);
- }
+ this.primaryShard = toShard;
+ this.previousDbVersion = this.dbVersion;
+
+ assert.commandWorked(this.st.s0.adminCommand({movePrimary: dbName, to: toShard.name}));
+ this.dbVersion =
+ this.st.s.getDB("config").getCollection("databases").findOne({_id: dbName}).version;
+
+ // The dbVersion should have changed due to the movePrimary operation.
+ assert.eq(this.dbVersion.lastMod, this.previousDbVersion.lastMod + 1);
}
+}
- let dropDatabaseTestRunner = new DropDatabaseTestRunner();
- dropDatabaseTestRunner.runCommands();
- dropDatabaseTestRunner.shutdown();
+let dropDatabaseTestRunner = new DropDatabaseTestRunner();
+dropDatabaseTestRunner.runCommands();
+dropDatabaseTestRunner.shutdown();
- let movePrimaryTestRunner = new MovePrimaryTestRunner();
- movePrimaryTestRunner.runCommands();
- movePrimaryTestRunner.shutdown();
+let movePrimaryTestRunner = new MovePrimaryTestRunner();
+movePrimaryTestRunner.runCommands();
+movePrimaryTestRunner.shutdown();
})();
diff --git a/jstests/sharding/database_versioning_cache_entry_without_version_updated_with_version.js b/jstests/sharding/database_versioning_cache_entry_without_version_updated_with_version.js
index f6dc4560462..9beaa8c2f0c 100644
--- a/jstests/sharding/database_versioning_cache_entry_without_version_updated_with_version.js
+++ b/jstests/sharding/database_versioning_cache_entry_without_version_updated_with_version.js
@@ -4,58 +4,56 @@
// its on-disk cache so that the version can also be picked up by secondaries.
(function() {
- const st = new ShardingTest({shards: 1, rs: {nodes: 2}, other: {verbose: 2}});
-
- assert.commandWorked(st.s.getDB("test").getCollection("foo").insert({x: 1}));
-
- // The database is created with a version.
- const versionOnConfig =
- st.s.getDB("config").getCollection("databases").findOne({_id: "test"}).version;
- assert.neq(null, versionOnConfig);
-
- // Before the shard refreshes, it does not have a cache entry for the database.
- assert.eq(null,
- st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}));
-
- // After the shard refreshes, it has a cache entry for the database with version matching the
- // version on the config server.
- assert.commandWorked(st.shard0.adminCommand({_flushDatabaseCacheUpdates: "test"}));
- const versionOnShard =
- st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}).version;
- assert.docEq(versionOnConfig, versionOnShard);
-
- // The shard primary's in-memory version matches the on-disk version.
- assert.eq(versionOnShard, st.shard0.adminCommand({getDatabaseVersion: "test"}).dbVersion);
-
- jsTest.log("Remove the database version from the shard's cache entry");
- assert.commandWorked(
- st.shard0.getDB("config").getCollection("cache.databases").update({_id: "test"}, {
- $unset: {version: ""}
- }));
- assert.eq(
- null,
- st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}).version);
-
- // Deleting the version field from the on-disk entry did not affect the in-memory version.
- assert.eq(versionOnShard, st.shard0.adminCommand({getDatabaseVersion: "test"}).dbVersion);
-
- // The shard secondary does not have a version cached in memory.
- assert.eq({}, st.rs0.getSecondary().adminCommand({getDatabaseVersion: "test"}).dbVersion);
-
- // A versioned request against the shard secondary makes the shard primary refresh and update
- // the on-disk cache entry with a version, even though it already had an on-disk cache entry and
- // had the up-to-date version cached in memory.
- // Use readConcern 'local' because the default on secondaries is 'available'.
- assert.commandWorked(st.s.getDB("test").runCommand(
- {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
- const versionOnShard2 =
- st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}).version;
- assert.docEq(versionOnConfig, versionOnShard2);
-
- // The shard secondary's in-memory version now matches the on-disk version.
- assert.eq(versionOnShard,
- st.rs0.getSecondary().adminCommand({getDatabaseVersion: "test"}).dbVersion);
-
- st.stop();
-
+const st = new ShardingTest({shards: 1, rs: {nodes: 2}, other: {verbose: 2}});
+
+assert.commandWorked(st.s.getDB("test").getCollection("foo").insert({x: 1}));
+
+// The database is created with a version.
+const versionOnConfig =
+ st.s.getDB("config").getCollection("databases").findOne({_id: "test"}).version;
+assert.neq(null, versionOnConfig);
+
+// Before the shard refreshes, it does not have a cache entry for the database.
+assert.eq(null, st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}));
+
+// After the shard refreshes, it has a cache entry for the database with version matching the
+// version on the config server.
+assert.commandWorked(st.shard0.adminCommand({_flushDatabaseCacheUpdates: "test"}));
+const versionOnShard =
+ st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}).version;
+assert.docEq(versionOnConfig, versionOnShard);
+
+// The shard primary's in-memory version matches the on-disk version.
+assert.eq(versionOnShard, st.shard0.adminCommand({getDatabaseVersion: "test"}).dbVersion);
+
+jsTest.log("Remove the database version from the shard's cache entry");
+assert.commandWorked(
+ st.shard0.getDB("config").getCollection("cache.databases").update({_id: "test"}, {
+ $unset: {version: ""}
+ }));
+assert.eq(
+ null,
+ st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}).version);
+
+// Deleting the version field from the on-disk entry did not affect the in-memory version.
+assert.eq(versionOnShard, st.shard0.adminCommand({getDatabaseVersion: "test"}).dbVersion);
+
+// The shard secondary does not have a version cached in memory.
+assert.eq({}, st.rs0.getSecondary().adminCommand({getDatabaseVersion: "test"}).dbVersion);
+
+// A versioned request against the shard secondary makes the shard primary refresh and update
+// the on-disk cache entry with a version, even though it already had an on-disk cache entry and
+// had the up-to-date version cached in memory.
+// Use readConcern 'local' because the default on secondaries is 'available'.
+assert.commandWorked(st.s.getDB("test").runCommand(
+ {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
+const versionOnShard2 =
+ st.shard0.getDB("config").getCollection("cache.databases").findOne({_id: "test"}).version;
+assert.docEq(versionOnConfig, versionOnShard2);
+
+// The shard secondary's in-memory version now matches the on-disk version.
+assert.eq(versionOnShard,
+ st.rs0.getSecondary().adminCommand({getDatabaseVersion: "test"}).dbVersion);
+
+st.stop();
})();
diff --git a/jstests/sharding/database_versioning_safe_secondary_reads.js b/jstests/sharding/database_versioning_safe_secondary_reads.js
index 301f246f4e7..a4062f68686 100644
--- a/jstests/sharding/database_versioning_safe_secondary_reads.js
+++ b/jstests/sharding/database_versioning_safe_secondary_reads.js
@@ -6,217 +6,217 @@
* - the movePrimary critical section is entered on the primary node
*/
(function() {
- "use strict";
- load("jstests/libs/database_versioning.js");
-
- const dbName = "test";
-
- const st = new ShardingTest({
- mongos: 2,
- rs0: {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]},
- rs1: {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]},
- verbose: 2
- });
-
- // Before creating the database, none of the nodes have a cached entry for the database either
- // in memory or on disk.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
-
- // Use 'enableSharding' to create the database only in the sharding catalog (the database will
- // not exist on any shards).
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-
- // Check that a command that attaches databaseVersion returns empty results, even though the
- // database does not actually exist on any shard (because the version won't be checked).
- assert.commandWorked(st.s.getDB(dbName).runCommand({listCollections: 1}));
-
- // Once SERVER-34431 goes in, this should have caused the primary shard's primary to refresh its
- // in-memory and on-disk caches.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
-
- assert.commandWorked(st.s.getDB(dbName).runCommand(
- {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
-
- // Once SERVER-34431 goes in, this should have caused the primary shard's secondary to refresh
- // its in-memory cache (its on-disk cache was updated when the primary refreshed, above).
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
-
- // Use 'movePrimary' to ensure shard0 is the primary shard. This will create the database on the
- // shards only if shard0 was not already the primary shard.
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- const dbEntry1 = st.s.getDB("config").getCollection("databases").findOne({_id: dbName});
-
- // Ensure the database actually gets created on the primary shard by creating a collection in
- // it.
- assert.commandWorked(st.s.getDB(dbName).runCommand({create: "foo"}));
-
- // Run a command that attaches databaseVersion to cause the current primary shard's primary to
- // refresh its in-memory cached database version.
- jsTest.log("About to do listCollections with readPref=primary");
- assert.commandWorked(st.s.getDB(dbName).runCommand({listCollections: 1}));
-
- // Ensure the current primary shard's primary has written the new database entry to disk.
- st.rs0.getPrimary().adminCommand({_flushDatabaseCacheUpdates: dbName, syncFromConfig: false});
-
- // Ensure the database entry on the current primary shard has replicated to the secondary.
- st.rs0.awaitReplication();
-
- // The primary shard's primary should have refreshed.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1.version);
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
-
- // Now run a command that attaches databaseVersion with readPref=secondary to make the current
- // primary shard's secondary refresh its in-memory database version from its on-disk entry.
- jsTest.log("About to do listCollections with readPref=secondary");
- assert.commandWorked(st.s.getDB(dbName).runCommand(
- {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
-
- // The primary shard's secondary should have refreshed.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1.version);
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1.version);
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
-
- // Make "staleMongos" load the stale database info into memory.
- const freshMongos = st.s0;
- const staleMongos = st.s1;
- staleMongos.getDB(dbName).runCommand({listCollections: 1});
-
- // Run movePrimary to ensure the movePrimary critical section clears the in-memory cache on the
- // old primary shard.
- jsTest.log("About to do movePrimary");
- assert.commandWorked(freshMongos.adminCommand({movePrimary: dbName, to: st.shard1.shardName}));
- const dbEntry2 = freshMongos.getDB("config").getCollection("databases").findOne({_id: dbName});
- assert.eq(dbEntry2.version.uuid, dbEntry1.version.uuid);
- assert.eq(dbEntry2.version.lastMod, dbEntry1.version.lastMod + 1);
-
- // Ensure the old primary shard's primary has written the 'enterCriticalSectionSignal' flag to
- // its on-disk database entry.
- st.rs0.getPrimary().adminCommand({_flushDatabaseCacheUpdates: dbName, syncFromConfig: false});
-
- // Ensure 'enterCriticalSectionSignal' flag has replicated to the secondary.
- st.rs0.awaitReplication();
-
- // The in-memory cached version should have been cleared on the old primary shard's primary and
- // secondary nodes.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
-
- // Run listCollections with readPref=secondary from the stale mongos. First, this should cause
- // the old primary shard's secondary to provoke the old primary shard's primary to refresh. Then
- // once the stale mongos refreshes, it should cause the new primary shard's secondary to provoke
- // the new primary shard's primary to refresh.
- jsTest.log("About to do listCollections with readPref=secondary after movePrimary");
- assert.commandWorked(staleMongos.getDB(dbName).runCommand(
- {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
-
- // All nodes should have refreshed.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2.version);
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2.version);
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2.version);
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2.version);
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
-
- // Ensure that dropping the database drops it from all shards, which clears their in-memory
- // caches but not their on-disk caches.
- jsTest.log("About to drop database from the cluster");
- assert.commandWorked(freshMongos.getDB(dbName).runCommand({dropDatabase: 1}));
-
- // Ensure the drop has replicated to all nodes.
- st.rs0.awaitReplication();
- st.rs1.awaitReplication();
-
- // Once SERVER-34431 goes in, this should not have caused the in-memory versions to be cleared.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
-
- // Confirm that we have a bug (SERVER-34431), where if a database is dropped and recreated on a
- // different shard, a stale mongos that has cached the old database's primary shard will *not*
- // be routed to the new database's primary shard (and will see an empty database).
-
- // Use 'enableSharding' to create the database only in the sharding catalog (the database will
- // not exist on any shards).
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-
- // Simulate that the database was created on 'shard0' by directly modifying the database entry
- // (we cannot use movePrimary, since movePrimary creates the database on the shards).
- const dbEntry = st.s.getDB("config").getCollection("databases").findOne({_id: dbName}).version;
- assert.writeOK(st.s.getDB("config").getCollection("databases").update({_id: dbName}, {
- $set: {primary: st.shard0.shardName}
- }));
-
- assert.commandWorked(st.s.getDB(dbName).runCommand({listCollections: 1}));
-
- // Once SERVER-34431 goes in, this should have caused the primary shard's primary to refresh its
- // in-memory and on-disk caches.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
-
- assert.commandWorked(st.s.getDB(dbName).runCommand(
- {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
-
- // Once SERVER-34431 goes in, this should have caused the primary shard's secondary to refresh
- // its in-memory cache (its on-disk cache was already updated when the primary refreshed).
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
-
- st.stop();
+"use strict";
+load("jstests/libs/database_versioning.js");
+
+const dbName = "test";
+
+const st = new ShardingTest({
+ mongos: 2,
+ rs0: {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]},
+ rs1: {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]},
+ verbose: 2
+});
+
+// Before creating the database, none of the nodes have a cached entry for the database either
+// in memory or on disk.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+
+// Use 'enableSharding' to create the database only in the sharding catalog (the database will
+// not exist on any shards).
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+
+// Check that a command that attaches databaseVersion returns empty results, even though the
+// database does not actually exist on any shard (because the version won't be checked).
+assert.commandWorked(st.s.getDB(dbName).runCommand({listCollections: 1}));
+
+// Once SERVER-34431 goes in, this should have caused the primary shard's primary to refresh its
+// in-memory and on-disk caches.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+
+assert.commandWorked(st.s.getDB(dbName).runCommand(
+ {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
+
+// Once SERVER-34431 goes in, this should have caused the primary shard's secondary to refresh
+// its in-memory cache (its on-disk cache was updated when the primary refreshed, above).
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+
+// Use 'movePrimary' to ensure shard0 is the primary shard. This will create the database on the
+// shards only if shard0 was not already the primary shard.
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+const dbEntry1 = st.s.getDB("config").getCollection("databases").findOne({_id: dbName});
+
+// Ensure the database actually gets created on the primary shard by creating a collection in
+// it.
+assert.commandWorked(st.s.getDB(dbName).runCommand({create: "foo"}));
+
+// Run a command that attaches databaseVersion to cause the current primary shard's primary to
+// refresh its in-memory cached database version.
+jsTest.log("About to do listCollections with readPref=primary");
+assert.commandWorked(st.s.getDB(dbName).runCommand({listCollections: 1}));
+
+// Ensure the current primary shard's primary has written the new database entry to disk.
+st.rs0.getPrimary().adminCommand({_flushDatabaseCacheUpdates: dbName, syncFromConfig: false});
+
+// Ensure the database entry on the current primary shard has replicated to the secondary.
+st.rs0.awaitReplication();
+
+// The primary shard's primary should have refreshed.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1.version);
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+
+// Now run a command that attaches databaseVersion with readPref=secondary to make the current
+// primary shard's secondary refresh its in-memory database version from its on-disk entry.
+jsTest.log("About to do listCollections with readPref=secondary");
+assert.commandWorked(st.s.getDB(dbName).runCommand(
+ {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
+
+// The primary shard's secondary should have refreshed.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1.version);
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1.version);
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+
+// Make "staleMongos" load the stale database info into memory.
+const freshMongos = st.s0;
+const staleMongos = st.s1;
+staleMongos.getDB(dbName).runCommand({listCollections: 1});
+
+// Run movePrimary to ensure the movePrimary critical section clears the in-memory cache on the
+// old primary shard.
+jsTest.log("About to do movePrimary");
+assert.commandWorked(freshMongos.adminCommand({movePrimary: dbName, to: st.shard1.shardName}));
+const dbEntry2 = freshMongos.getDB("config").getCollection("databases").findOne({_id: dbName});
+assert.eq(dbEntry2.version.uuid, dbEntry1.version.uuid);
+assert.eq(dbEntry2.version.lastMod, dbEntry1.version.lastMod + 1);
+
+// Ensure the old primary shard's primary has written the 'enterCriticalSectionSignal' flag to
+// its on-disk database entry.
+st.rs0.getPrimary().adminCommand({_flushDatabaseCacheUpdates: dbName, syncFromConfig: false});
+
+// Ensure 'enterCriticalSectionSignal' flag has replicated to the secondary.
+st.rs0.awaitReplication();
+
+// The in-memory cached version should have been cleared on the old primary shard's primary and
+// secondary nodes.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+
+// Run listCollections with readPref=secondary from the stale mongos. First, this should cause
+// the old primary shard's secondary to provoke the old primary shard's primary to refresh. Then
+// once the stale mongos refreshes, it should cause the new primary shard's secondary to provoke
+// the new primary shard's primary to refresh.
+jsTest.log("About to do listCollections with readPref=secondary after movePrimary");
+assert.commandWorked(staleMongos.getDB(dbName).runCommand(
+ {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
+
+// All nodes should have refreshed.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2.version);
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2.version);
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2.version);
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2.version);
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
+
+// Ensure that dropping the database drops it from all shards, which clears their in-memory
+// caches but not their on-disk caches.
+jsTest.log("About to drop database from the cluster");
+assert.commandWorked(freshMongos.getDB(dbName).runCommand({dropDatabase: 1}));
+
+// Ensure the drop has replicated to all nodes.
+st.rs0.awaitReplication();
+st.rs1.awaitReplication();
+
+// Once SERVER-34431 goes in, this should not have caused the in-memory versions to be cleared.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
+
+// Confirm that we have a bug (SERVER-34431), where if a database is dropped and recreated on a
+// different shard, a stale mongos that has cached the old database's primary shard will *not*
+// be routed to the new database's primary shard (and will see an empty database).
+
+// Use 'enableSharding' to create the database only in the sharding catalog (the database will
+// not exist on any shards).
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+
+// Simulate that the database was created on 'shard0' by directly modifying the database entry
+// (we cannot use movePrimary, since movePrimary creates the database on the shards).
+const dbEntry = st.s.getDB("config").getCollection("databases").findOne({_id: dbName}).version;
+assert.writeOK(st.s.getDB("config").getCollection("databases").update({_id: dbName}, {
+ $set: {primary: st.shard0.shardName}
+}));
+
+assert.commandWorked(st.s.getDB(dbName).runCommand({listCollections: 1}));
+
+// Once SERVER-34431 goes in, this should have caused the primary shard's primary to refresh its
+// in-memory and on-disk caches.
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
+
+assert.commandWorked(st.s.getDB(dbName).runCommand(
+ {listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
+
+// Once SERVER-34431 goes in, this should have caused the primary shard's secondary to refresh
+// its in-memory cache (its on-disk cache was already updated when the primary refreshed).
+checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
+checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
+
+st.stop();
})();
diff --git a/jstests/sharding/delete_during_migrate.js b/jstests/sharding/delete_during_migrate.js
index 04c3075b1f1..87b13519678 100644
--- a/jstests/sharding/delete_during_migrate.js
+++ b/jstests/sharding/delete_during_migrate.js
@@ -8,38 +8,38 @@
* @tags: [resource_intensive]
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 1});
+var st = new ShardingTest({shards: 2, mongos: 1});
- var dbname = "test";
- var coll = "foo";
- var ns = dbname + "." + coll;
+var dbname = "test";
+var coll = "foo";
+var ns = dbname + "." + coll;
- assert.commandWorked(st.s0.adminCommand({enablesharding: dbname}));
- st.ensurePrimaryShard(dbname, st.shard1.shardName);
+assert.commandWorked(st.s0.adminCommand({enablesharding: dbname}));
+st.ensurePrimaryShard(dbname, st.shard1.shardName);
- var t = st.s0.getDB(dbname).getCollection(coll);
+var t = st.s0.getDB(dbname).getCollection(coll);
- var bulk = t.initializeUnorderedBulkOp();
- for (var i = 0; i < 200000; i++) {
- bulk.insert({a: i});
- }
- assert.writeOK(bulk.execute());
+var bulk = t.initializeUnorderedBulkOp();
+for (var i = 0; i < 200000; i++) {
+ bulk.insert({a: i});
+}
+assert.writeOK(bulk.execute());
- // enable sharding of the collection. Only 1 chunk.
- t.ensureIndex({a: 1});
+// enable sharding of the collection. Only 1 chunk.
+t.ensureIndex({a: 1});
- assert.commandWorked(st.s0.adminCommand({shardcollection: ns, key: {a: 1}}));
+assert.commandWorked(st.s0.adminCommand({shardcollection: ns, key: {a: 1}}));
- // start a parallel shell that deletes things
- var join = startParallelShell("db." + coll + ".remove({});", st.s0.port);
+// start a parallel shell that deletes things
+var join = startParallelShell("db." + coll + ".remove({});", st.s0.port);
- // migrate while deletions are happening
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: ns, find: {a: 1}, to: st.getOther(st.getPrimaryShard(dbname)).name}));
+// migrate while deletions are happening
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: ns, find: {a: 1}, to: st.getOther(st.getPrimaryShard(dbname)).name}));
- join();
+join();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js
index 0dbfa0b9502..26347ec8330 100644
--- a/jstests/sharding/diffservers1.js
+++ b/jstests/sharding/diffservers1.js
@@ -1,25 +1,25 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2});
+var s = new ShardingTest({shards: 2});
- assert.eq(2, s.config.shards.count(), "server count wrong");
+assert.eq(2, s.config.shards.count(), "server count wrong");
- var test1 = s.getDB("test1").foo;
- assert.writeOK(test1.insert({a: 1}));
- assert.writeOK(test1.insert({a: 2}));
- assert.writeOK(test1.insert({a: 3}));
- assert.eq(3, test1.count());
+var test1 = s.getDB("test1").foo;
+assert.writeOK(test1.insert({a: 1}));
+assert.writeOK(test1.insert({a: 2}));
+assert.writeOK(test1.insert({a: 3}));
+assert.eq(3, test1.count());
- assert.commandFailed(s.s0.adminCommand({addshard: "sdd$%", maxTimeMS: 60000}), "Bad hostname");
+assert.commandFailed(s.s0.adminCommand({addshard: "sdd$%", maxTimeMS: 60000}), "Bad hostname");
- var portWithoutHostRunning = allocatePort();
- assert.commandFailed(
- s.s0.adminCommand({addshard: "127.0.0.1:" + portWithoutHostRunning, maxTimeMS: 60000}),
- "Host which is not up");
- assert.commandFailed(
- s.s0.adminCommand({addshard: "10.0.0.1:" + portWithoutHostRunning, maxTimeMS: 60000}),
- "Allowed shard in IP when config is localhost");
+var portWithoutHostRunning = allocatePort();
+assert.commandFailed(
+ s.s0.adminCommand({addshard: "127.0.0.1:" + portWithoutHostRunning, maxTimeMS: 60000}),
+ "Host which is not up");
+assert.commandFailed(
+ s.s0.adminCommand({addshard: "10.0.0.1:" + portWithoutHostRunning, maxTimeMS: 60000}),
+ "Allowed shard in IP when config is localhost");
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/disable_autosplit.js b/jstests/sharding/disable_autosplit.js
index d6982c3280a..03d14cc970c 100644
--- a/jstests/sharding/disable_autosplit.js
+++ b/jstests/sharding/disable_autosplit.js
@@ -1,34 +1,33 @@
// Tests disabling of autosplit.
(function() {
- 'use strict';
+'use strict';
- var chunkSizeMB = 1;
+var chunkSizeMB = 1;
- // Autosplit is disabled by default, but specify it anyway in case the default changes,
- // especially since it defaults to the enableBalancer setting.
- var st = new ShardingTest(
- {shards: 1, mongos: 1, other: {chunkSize: chunkSizeMB, enableAutoSplit: false}});
+// Autosplit is disabled by default, but specify it anyway in case the default changes,
+// especially since it defaults to the enableBalancer setting.
+var st = new ShardingTest(
+ {shards: 1, mongos: 1, other: {chunkSize: chunkSizeMB, enableAutoSplit: false}});
- var data = "x";
- while (data.length < chunkSizeMB * 1024 * 1024) {
- data += data;
- }
+var data = "x";
+while (data.length < chunkSizeMB * 1024 * 1024) {
+ data += data;
+}
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
- var coll = mongos.getCollection("foo.bar");
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var coll = mongos.getCollection("foo.bar");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- for (var i = 0; i < 20; i++) {
- coll.insert({data: data});
- }
+for (var i = 0; i < 20; i++) {
+ coll.insert({data: data});
+}
- // Make sure we haven't split
- assert.eq(1, config.chunks.find({ns: coll + ""}).count());
-
- st.stop();
+// Make sure we haven't split
+assert.eq(1, config.chunks.find({ns: coll + ""}).count());
+st.stop();
})();
diff --git a/jstests/sharding/drop_configdb.js b/jstests/sharding/drop_configdb.js
index 63a3b533597..180741530b9 100644
--- a/jstests/sharding/drop_configdb.js
+++ b/jstests/sharding/drop_configdb.js
@@ -1,35 +1,35 @@
// Test that dropping the config database is completely disabled via
// mongos and via mongod, if started with --configsvr
(function() {
- "use strict";
+"use strict";
- var getConfigsvrToWriteTo = function(st) {
- if (st.configRS) {
- return st.configRS.getPrimary();
- } else {
- return st._configServers[0];
- }
- };
+var getConfigsvrToWriteTo = function(st) {
+ if (st.configRS) {
+ return st.configRS.getPrimary();
+ } else {
+ return st._configServers[0];
+ }
+};
- var st = new ShardingTest({shards: 2});
- var mongos = st.s;
- var config = getConfigsvrToWriteTo(st).getDB('config');
+var st = new ShardingTest({shards: 2});
+var mongos = st.s;
+var config = getConfigsvrToWriteTo(st).getDB('config');
- // Try to drop config db via configsvr
+// Try to drop config db via configsvr
- print("1: Try to drop config database via configsvr");
- assert.eq(0, config.dropDatabase().ok);
- assert.eq("Cannot drop 'config' database if mongod started with --configsvr",
- config.dropDatabase().errmsg);
+print("1: Try to drop config database via configsvr");
+assert.eq(0, config.dropDatabase().ok);
+assert.eq("Cannot drop 'config' database if mongod started with --configsvr",
+ config.dropDatabase().errmsg);
- // Try to drop config db via mongos
- var config = mongos.getDB("config");
+// Try to drop config db via mongos
+var config = mongos.getDB("config");
- print("1: Try to drop config database via mongos");
- assert.eq(0, config.dropDatabase().ok);
+print("1: Try to drop config database via mongos");
+assert.eq(0, config.dropDatabase().ok);
- // 20 = ErrorCodes::IllegalOperation
- assert.eq(20, config.dropDatabase().code);
+// 20 = ErrorCodes::IllegalOperation
+assert.eq(20, config.dropDatabase().code);
- st.stop();
+st.stop();
}()); \ No newline at end of file
diff --git a/jstests/sharding/drop_sharded_db.js b/jstests/sharding/drop_sharded_db.js
index 95ca3abd500..9de2ecb6d4a 100644
--- a/jstests/sharding/drop_sharded_db.js
+++ b/jstests/sharding/drop_sharded_db.js
@@ -1,67 +1,65 @@
// Tests the dropping of a sharded database SERVER-3471 SERVER-1726
(function() {
- var st = new ShardingTest({shards: 2});
+var st = new ShardingTest({shards: 2});
- var mongos = st.s0;
- var config = mongos.getDB("config");
+var mongos = st.s0;
+var config = mongos.getDB("config");
- var dbA = mongos.getDB("DropSharded_A");
- var dbB = mongos.getDB("DropSharded_B");
- var dbC = mongos.getDB("DropSharded_C");
+var dbA = mongos.getDB("DropSharded_A");
+var dbB = mongos.getDB("DropSharded_B");
+var dbC = mongos.getDB("DropSharded_C");
- // Dropping a database that doesn't exist will result in an info field in the response.
- var res = assert.commandWorked(dbA.dropDatabase());
- assert.eq('database does not exist', res.info);
+// Dropping a database that doesn't exist will result in an info field in the response.
+var res = assert.commandWorked(dbA.dropDatabase());
+assert.eq('database does not exist', res.info);
- var numDocs = 3000;
- var numColls = 10;
- for (var i = 0; i < numDocs; i++) {
- dbA.getCollection("data" + (i % numColls)).insert({_id: i});
- dbB.getCollection("data" + (i % numColls)).insert({_id: i});
- dbC.getCollection("data" + (i % numColls)).insert({_id: i});
- }
+var numDocs = 3000;
+var numColls = 10;
+for (var i = 0; i < numDocs; i++) {
+ dbA.getCollection("data" + (i % numColls)).insert({_id: i});
+ dbB.getCollection("data" + (i % numColls)).insert({_id: i});
+ dbC.getCollection("data" + (i % numColls)).insert({_id: i});
+}
- var key = {_id: 1};
- for (var i = 0; i < numColls; i++) {
- st.shardColl(dbA.getCollection("data" + i), key);
- st.shardColl(dbB.getCollection("data" + i), key);
- st.shardColl(dbC.getCollection("data" + i), key);
- }
+var key = {_id: 1};
+for (var i = 0; i < numColls; i++) {
+ st.shardColl(dbA.getCollection("data" + i), key);
+ st.shardColl(dbB.getCollection("data" + i), key);
+ st.shardColl(dbC.getCollection("data" + i), key);
+}
- // Insert a document to an unsharded collection and make sure that the document is there.
- assert.writeOK(dbA.unsharded.insert({dummy: 1}));
- var shardName = config.databases.findOne({_id: dbA.getName()}).primary;
- var shardHostConn = new Mongo(config.shards.findOne({_id: shardName}).host);
- var dbAOnShard = shardHostConn.getDB(dbA.getName());
- assert.neq(null, dbAOnShard.unsharded.findOne({dummy: 1}));
+// Insert a document to an unsharded collection and make sure that the document is there.
+assert.writeOK(dbA.unsharded.insert({dummy: 1}));
+var shardName = config.databases.findOne({_id: dbA.getName()}).primary;
+var shardHostConn = new Mongo(config.shards.findOne({_id: shardName}).host);
+var dbAOnShard = shardHostConn.getDB(dbA.getName());
+assert.neq(null, dbAOnShard.unsharded.findOne({dummy: 1}));
- // Drop the non-suffixed db and ensure that it is the only one that was dropped.
- assert.commandWorked(dbA.dropDatabase());
- var dbs = mongos.getDBNames();
- for (var i = 0; i < dbs.length; i++) {
- assert.neq(dbs[i], "" + dbA);
- }
+// Drop the non-suffixed db and ensure that it is the only one that was dropped.
+assert.commandWorked(dbA.dropDatabase());
+var dbs = mongos.getDBNames();
+for (var i = 0; i < dbs.length; i++) {
+ assert.neq(dbs[i], "" + dbA);
+}
- assert.eq(0, config.databases.count({_id: dbA.getName()}));
- assert.eq(1, config.databases.count({_id: dbB.getName()}));
- assert.eq(1, config.databases.count({_id: dbC.getName()}));
+assert.eq(0, config.databases.count({_id: dbA.getName()}));
+assert.eq(1, config.databases.count({_id: dbB.getName()}));
+assert.eq(1, config.databases.count({_id: dbC.getName()}));
- // 10 dropped collections
- assert.eq(numColls,
- config.collections.count({_id: RegExp("^" + dbA + "\\..*"), dropped: true}));
+// 10 dropped collections
+assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbA + "\\..*"), dropped: true}));
- // 20 active (dropped is missing)
- assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbB + "\\..*")}));
- assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbC + "\\..*")}));
+// 20 active (dropped is missing)
+assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbB + "\\..*")}));
+assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbC + "\\..*")}));
- for (var i = 0; i < numColls; i++) {
- assert.eq(numDocs / numColls, dbB.getCollection("data" + (i % numColls)).find().itcount());
- assert.eq(numDocs / numColls, dbC.getCollection("data" + (i % numColls)).find().itcount());
- }
+for (var i = 0; i < numColls; i++) {
+ assert.eq(numDocs / numColls, dbB.getCollection("data" + (i % numColls)).find().itcount());
+ assert.eq(numDocs / numColls, dbC.getCollection("data" + (i % numColls)).find().itcount());
+}
- // Check that the unsharded collection should have been dropped.
- assert.eq(null, dbAOnShard.unsharded.findOne());
-
- st.stop();
+// Check that the unsharded collection should have been dropped.
+assert.eq(null, dbAOnShard.unsharded.findOne());
+st.stop();
})();
diff --git a/jstests/sharding/drop_sharded_db_tags_cleanup.js b/jstests/sharding/drop_sharded_db_tags_cleanup.js
index e58b21d39eb..e5c89b7a551 100644
--- a/jstests/sharding/drop_sharded_db_tags_cleanup.js
+++ b/jstests/sharding/drop_sharded_db_tags_cleanup.js
@@ -1,29 +1,29 @@
// Tests that dropping a database also removes the zones associated with the
// collections in the database.
(function() {
- var st = new ShardingTest({shards: 1});
- var configDB = st.s.getDB('config');
- var shardName = configDB.shards.findOne()._id;
+var st = new ShardingTest({shards: 1});
+var configDB = st.s.getDB('config');
+var shardName = configDB.shards.findOne()._id;
- assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: 'x'}));
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(
- st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: 'x'}));
+assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: 'x'}));
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: 'x'}));
- var tagDoc = configDB.tags.findOne();
- assert.eq(1, configDB.tags.find().length());
- assert.eq('test.user', tagDoc.ns);
- assert.eq({x: 0}, tagDoc.min);
- assert.eq({x: 10}, tagDoc.max);
- assert.eq('x', tagDoc.tag);
+var tagDoc = configDB.tags.findOne();
+assert.eq(1, configDB.tags.find().length());
+assert.eq('test.user', tagDoc.ns);
+assert.eq({x: 0}, tagDoc.min);
+assert.eq({x: 10}, tagDoc.max);
+assert.eq('x', tagDoc.tag);
- var db = st.s.getDB("test");
- db.dropDatabase();
+var db = st.s.getDB("test");
+db.dropDatabase();
- assert.eq(null, configDB.tags.findOne());
- assert.commandWorked(st.s.adminCommand({removeShardFromZone: shardName, zone: 'x'}));
- assert.commandWorked(st.removeRangeFromZone('test.user', {x: 0}, {x: 10}));
+assert.eq(null, configDB.tags.findOne());
+assert.commandWorked(st.s.adminCommand({removeShardFromZone: shardName, zone: 'x'}));
+assert.commandWorked(st.removeRangeFromZone('test.user', {x: 0}, {x: 10}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/dump_coll_metadata.js b/jstests/sharding/dump_coll_metadata.js
index c1254985dd6..f8292262249 100644
--- a/jstests/sharding/dump_coll_metadata.js
+++ b/jstests/sharding/dump_coll_metadata.js
@@ -2,59 +2,58 @@
// Tests that we can dump collection metadata via getShardVersion()
//
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 2, mongos: 1});
-
- var mongos = st.s0;
- var coll = mongos.getCollection("foo.bar");
- var admin = mongos.getDB("admin");
- var shardAdmin = st.shard0.getDB("admin");
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-
- assert.commandWorked(shardAdmin.runCommand({getShardVersion: coll + ""}));
-
- // Make sure we have chunks information on the shard after the shard collection call
- var result = assert.commandWorked(
- shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true}));
- printjson(result);
- var metadata = result.metadata;
-
- assert.eq(metadata.chunks.length, 1);
- assert.eq(metadata.pending.length, 0);
- assert.eq(metadata.chunks[0][0]._id, MinKey);
- assert.eq(metadata.chunks[0][1]._id, MaxKey);
- assert.eq(metadata.shardVersion, result.global);
-
- // Make sure a collection with no metadata still returns the metadata field
- assert.neq(shardAdmin.runCommand({getShardVersion: coll + "xyz", fullMetadata: true}).metadata,
- undefined);
-
- // Make sure we get multiple chunks after a split and refresh -- splits by themselves do not
- // cause the shard to refresh.
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
- assert.commandWorked(
- st.shard0.getDB('admin').runCommand({_flushRoutingTableCacheUpdates: coll + ""}));
-
- assert.commandWorked(shardAdmin.runCommand({getShardVersion: coll + ""}));
- printjson(shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true}));
-
- // Make sure we have chunks info
- result = shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true});
- assert.commandWorked(result);
- metadata = result.metadata;
-
- assert.eq(metadata.chunks.length, 2);
- assert.eq(metadata.pending.length, 0);
- assert(metadata.chunks[0][0]._id + "" == MinKey + "");
- assert(metadata.chunks[0][1]._id == 0);
- assert(metadata.chunks[1][0]._id == 0);
- assert(metadata.chunks[1][1]._id + "" == MaxKey + "");
- assert(metadata.shardVersion + "" == result.global + "");
-
- st.stop();
-
+'use strict';
+
+var st = new ShardingTest({shards: 2, mongos: 1});
+
+var mongos = st.s0;
+var coll = mongos.getCollection("foo.bar");
+var admin = mongos.getDB("admin");
+var shardAdmin = st.shard0.getDB("admin");
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+
+assert.commandWorked(shardAdmin.runCommand({getShardVersion: coll + ""}));
+
+// Make sure we have chunks information on the shard after the shard collection call
+var result =
+ assert.commandWorked(shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true}));
+printjson(result);
+var metadata = result.metadata;
+
+assert.eq(metadata.chunks.length, 1);
+assert.eq(metadata.pending.length, 0);
+assert.eq(metadata.chunks[0][0]._id, MinKey);
+assert.eq(metadata.chunks[0][1]._id, MaxKey);
+assert.eq(metadata.shardVersion, result.global);
+
+// Make sure a collection with no metadata still returns the metadata field
+assert.neq(shardAdmin.runCommand({getShardVersion: coll + "xyz", fullMetadata: true}).metadata,
+ undefined);
+
+// Make sure we get multiple chunks after a split and refresh -- splits by themselves do not
+// cause the shard to refresh.
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+assert.commandWorked(
+ st.shard0.getDB('admin').runCommand({_flushRoutingTableCacheUpdates: coll + ""}));
+
+assert.commandWorked(shardAdmin.runCommand({getShardVersion: coll + ""}));
+printjson(shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true}));
+
+// Make sure we have chunks info
+result = shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true});
+assert.commandWorked(result);
+metadata = result.metadata;
+
+assert.eq(metadata.chunks.length, 2);
+assert.eq(metadata.pending.length, 0);
+assert(metadata.chunks[0][0]._id + "" == MinKey + "");
+assert(metadata.chunks[0][1]._id == 0);
+assert(metadata.chunks[1][0]._id == 0);
+assert(metadata.chunks[1][1]._id + "" == MaxKey + "");
+assert(metadata.shardVersion + "" == result.global + "");
+
+st.stop();
})();
diff --git a/jstests/sharding/empty_doc_results.js b/jstests/sharding/empty_doc_results.js
index 0ee44a76988..65fe1cccd7f 100644
--- a/jstests/sharding/empty_doc_results.js
+++ b/jstests/sharding/empty_doc_results.js
@@ -1,59 +1,59 @@
// Verifies that mongos correctly handles empty documents when all fields are projected out
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 2});
-
- var mongos = st.s0;
- var coll = mongos.getCollection("foo.bar");
- var admin = mongos.getDB("admin");
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
- printjson(admin.runCommand({movePrimary: coll.getDB().getName(), to: st.shard0.shardName}));
- assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
-
- assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(
- admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
-
- st.printShardingStatus();
-
- // Insert 100 documents, half of which have an extra field
- for (var i = -50; i < 50; i++) {
- var doc = {};
- if (i >= 0)
- doc.positiveId = true;
- assert.writeOK(coll.insert(doc));
- }
-
- //
- //
- // Ensure projecting out all fields still returns the same number of documents
- assert.eq(100, coll.find({}).itcount());
- assert.eq(100, coll.find({}).sort({positiveId: 1}).itcount());
- assert.eq(100, coll.find({}, {_id: 0, positiveId: 0}).itcount());
- // Can't remove sort key from projection (SERVER-11877) but some documents will still be empty
- assert.eq(100, coll.find({}, {_id: 0}).sort({positiveId: 1}).itcount());
-
- //
- //
- // Ensure projecting out all fields still returns the same ordering of documents
- var assertLast50Positive = function(sortedDocs) {
- assert.eq(100, sortedDocs.length);
- var positiveCount = 0;
- for (var i = 0; i < sortedDocs.length; ++i) {
- if (sortedDocs[i].positiveId) {
- positiveCount++;
- } else {
- // Make sure only the last set of documents have "positiveId" set
- assert.eq(positiveCount, 0);
- }
+'use strict';
+
+var st = new ShardingTest({shards: 2});
+
+var mongos = st.s0;
+var coll = mongos.getCollection("foo.bar");
+var admin = mongos.getDB("admin");
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
+printjson(admin.runCommand({movePrimary: coll.getDB().getName(), to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+
+assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
+
+st.printShardingStatus();
+
+// Insert 100 documents, half of which have an extra field
+for (var i = -50; i < 50; i++) {
+ var doc = {};
+ if (i >= 0)
+ doc.positiveId = true;
+ assert.writeOK(coll.insert(doc));
+}
+
+//
+//
+// Ensure projecting out all fields still returns the same number of documents
+assert.eq(100, coll.find({}).itcount());
+assert.eq(100, coll.find({}).sort({positiveId: 1}).itcount());
+assert.eq(100, coll.find({}, {_id: 0, positiveId: 0}).itcount());
+// Can't remove sort key from projection (SERVER-11877) but some documents will still be empty
+assert.eq(100, coll.find({}, {_id: 0}).sort({positiveId: 1}).itcount());
+
+//
+//
+// Ensure projecting out all fields still returns the same ordering of documents
+var assertLast50Positive = function(sortedDocs) {
+ assert.eq(100, sortedDocs.length);
+ var positiveCount = 0;
+ for (var i = 0; i < sortedDocs.length; ++i) {
+ if (sortedDocs[i].positiveId) {
+ positiveCount++;
+ } else {
+ // Make sure only the last set of documents have "positiveId" set
+ assert.eq(positiveCount, 0);
}
- assert.eq(positiveCount, 50);
- };
+ }
+ assert.eq(positiveCount, 50);
+};
- assertLast50Positive(coll.find({}).sort({positiveId: 1}).toArray());
- assertLast50Positive(coll.find({}, {_id: 0}).sort({positiveId: 1}).toArray());
+assertLast50Positive(coll.find({}).sort({positiveId: 1}).toArray());
+assertLast50Positive(coll.find({}, {_id: 0}).sort({positiveId: 1}).toArray());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/enable_sharding_basic.js b/jstests/sharding/enable_sharding_basic.js
index fb0cbdbdb4a..d185ff11b6e 100644
--- a/jstests/sharding/enable_sharding_basic.js
+++ b/jstests/sharding/enable_sharding_basic.js
@@ -3,57 +3,56 @@
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({mongos: 2, shards: 2});
+var st = new ShardingTest({mongos: 2, shards: 2});
- // enableSharding can run only on mongos.
- assert.commandFailedWithCode(
- st.rs0.getPrimary().getDB('admin').runCommand({enableSharding: 'db'}),
- ErrorCodes.CommandNotFound);
+// enableSharding can run only on mongos.
+assert.commandFailedWithCode(st.rs0.getPrimary().getDB('admin').runCommand({enableSharding: 'db'}),
+ ErrorCodes.CommandNotFound);
- // enableSharding can run only against the admin database.
- assert.commandFailedWithCode(st.s0.getDB('test').runCommand({enableSharding: 'db'}),
- ErrorCodes.Unauthorized);
+// enableSharding can run only against the admin database.
+assert.commandFailedWithCode(st.s0.getDB('test').runCommand({enableSharding: 'db'}),
+ ErrorCodes.Unauthorized);
- // Can't shard 'local' database.
- assert.commandFailed(st.s0.adminCommand({enableSharding: 'local'}));
+// Can't shard 'local' database.
+assert.commandFailed(st.s0.adminCommand({enableSharding: 'local'}));
- // Can't shard 'admin' database.
- assert.commandFailed(st.s0.adminCommand({enableSharding: 'admin'}));
+// Can't shard 'admin' database.
+assert.commandFailed(st.s0.adminCommand({enableSharding: 'admin'}));
- // Can't shard db with the name that just differ on case.
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'db'}));
- assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
+// Can't shard db with the name that just differ on case.
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'db'}));
+assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
- assert.commandFailedWithCode(st.s0.adminCommand({enableSharding: 'DB'}),
- ErrorCodes.DatabaseDifferCase);
+assert.commandFailedWithCode(st.s0.adminCommand({enableSharding: 'DB'}),
+ ErrorCodes.DatabaseDifferCase);
- // Can't shard invalid db name.
- assert.commandFailed(st.s0.adminCommand({enableSharding: 'a.b'}));
- assert.commandFailed(st.s0.adminCommand({enableSharding: ''}));
+// Can't shard invalid db name.
+assert.commandFailed(st.s0.adminCommand({enableSharding: 'a.b'}));
+assert.commandFailed(st.s0.adminCommand({enableSharding: ''}));
- // Attempting to shard already sharded database returns success.
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'db'}));
- assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
+// Attempting to shard already sharded database returns success.
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'db'}));
+assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
- // Verify config.databases metadata.
- assert.writeOK(st.s0.getDB('unsharded').foo.insert({aKey: "aValue"}));
- assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, false);
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'unsharded'}));
- assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, true);
+// Verify config.databases metadata.
+assert.writeOK(st.s0.getDB('unsharded').foo.insert({aKey: "aValue"}));
+assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, false);
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'unsharded'}));
+assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, true);
- // Sharding a collection before 'enableSharding' is called fails
- assert.commandFailed(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
- assert.commandFailed(st.s1.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
+// Sharding a collection before 'enableSharding' is called fails
+assert.commandFailed(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
+assert.commandFailed(st.s1.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
- assert.writeOK(st.s0.getDB('TestDB').TestColl.insert({_id: 0}));
- assert.writeOK(st.s1.getDB('TestDB').TestColl.insert({_id: 1}));
+assert.writeOK(st.s0.getDB('TestDB').TestColl.insert({_id: 0}));
+assert.writeOK(st.s1.getDB('TestDB').TestColl.insert({_id: 1}));
- // Calling 'enableSharding' on one mongos and 'shardCollection' through another must work
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- assert.commandWorked(st.s1.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
+// Calling 'enableSharding' on one mongos and 'shardCollection' through another must work
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+assert.commandWorked(st.s1.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/enforce_zone_policy.js b/jstests/sharding/enforce_zone_policy.js
index 4c57885ce10..259d05ff716 100644
--- a/jstests/sharding/enforce_zone_policy.js
+++ b/jstests/sharding/enforce_zone_policy.js
@@ -1,105 +1,103 @@
// Tests changing the zones on a shard at runtime results in a correct distribution of chunks across
// the cluster
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 3, mongos: 1});
+var st = new ShardingTest({shards: 3, mongos: 1});
- assert.commandWorked(st.s0.adminCommand({enablesharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
+assert.commandWorked(st.s0.adminCommand({enablesharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
- var testDB = st.s0.getDB('test');
- var configDB = st.s0.getDB('config');
+var testDB = st.s0.getDB('test');
+var configDB = st.s0.getDB('config');
- var bulk = testDB.foo.initializeUnorderedBulkOp();
- for (var i = 0; i < 9; i++) {
- bulk.insert({_id: i, x: i});
- }
- assert.writeOK(bulk.execute());
+var bulk = testDB.foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 9; i++) {
+ bulk.insert({_id: i, x: i});
+}
+assert.writeOK(bulk.execute());
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {_id: 1}}));
- // Produce 9 chunks with min value at the documents just inserted
- for (var i = 0; i < 8; i++) {
- assert.commandWorked(st.s0.adminCommand({split: 'test.foo', middle: {_id: i}}));
- }
+// Produce 9 chunks with min value at the documents just inserted
+for (var i = 0; i < 8; i++) {
+ assert.commandWorked(st.s0.adminCommand({split: 'test.foo', middle: {_id: i}}));
+}
- /**
- * Waits for the balancer state described by the checking logic function (checkFunc) to be
- * reached and ensures that it does not change from that state at the next balancer round.
- */
- function assertBalanceCompleteAndStable(checkFunc, stepName) {
- st.printShardingStatus(true);
+/**
+ * Waits for the balancer state described by the checking logic function (checkFunc) to be
+ * reached and ensures that it does not change from that state at the next balancer round.
+ */
+function assertBalanceCompleteAndStable(checkFunc, stepName) {
+ st.printShardingStatus(true);
- assert.soon(
- checkFunc, 'Balance at step ' + stepName + ' did not happen', 3 * 60 * 1000, 2000);
+ assert.soon(checkFunc, 'Balance at step ' + stepName + ' did not happen', 3 * 60 * 1000, 2000);
- st.waitForBalancer(true, 60000);
- st.printShardingStatus(true);
- assert(checkFunc());
+ st.waitForBalancer(true, 60000);
+ st.printShardingStatus(true);
+ assert(checkFunc());
- jsTestLog('Completed step ' + stepName);
- }
+ jsTestLog('Completed step ' + stepName);
+}
- /**
- * Checker function to be used with assertBalanceCompleteAndStable, which ensures that the
- * cluster is evenly balanced.
- */
- function checkClusterEvenlyBalanced() {
- var maxChunkDiff = st.chunkDiff('foo', 'test');
- return maxChunkDiff <= 1;
- }
+/**
+ * Checker function to be used with assertBalanceCompleteAndStable, which ensures that the
+ * cluster is evenly balanced.
+ */
+function checkClusterEvenlyBalanced() {
+ var maxChunkDiff = st.chunkDiff('foo', 'test');
+ return maxChunkDiff <= 1;
+}
- st.startBalancer();
+st.startBalancer();
- // Initial balance
- assertBalanceCompleteAndStable(checkClusterEvenlyBalanced, 'initial');
+// Initial balance
+assertBalanceCompleteAndStable(checkClusterEvenlyBalanced, 'initial');
- // Spread chunks correctly across zones
- st.addShardTag(st.shard0.shardName, 'a');
- st.addShardTag(st.shard1.shardName, 'a');
- st.addTagRange('test.foo', {_id: -100}, {_id: 100}, 'a');
+// Spread chunks correctly across zones
+st.addShardTag(st.shard0.shardName, 'a');
+st.addShardTag(st.shard1.shardName, 'a');
+st.addTagRange('test.foo', {_id: -100}, {_id: 100}, 'a');
- st.addShardTag(st.shard2.shardName, 'b');
- st.addTagRange('test.foo', {_id: MinKey}, {_id: -100}, 'b');
- st.addTagRange('test.foo', {_id: 100}, {_id: MaxKey}, 'b');
+st.addShardTag(st.shard2.shardName, 'b');
+st.addTagRange('test.foo', {_id: MinKey}, {_id: -100}, 'b');
+st.addTagRange('test.foo', {_id: 100}, {_id: MaxKey}, 'b');
- assertBalanceCompleteAndStable(function() {
- var chunksOnShard2 = configDB.chunks.find({ns: 'test.foo', shard: st.shard2.shardName})
- .sort({min: 1})
- .toArray();
+assertBalanceCompleteAndStable(function() {
+ var chunksOnShard2 =
+ configDB.chunks.find({ns: 'test.foo', shard: st.shard2.shardName}).sort({min: 1}).toArray();
- jsTestLog('Chunks on shard2: ' + tojson(chunksOnShard2));
+ jsTestLog('Chunks on shard2: ' + tojson(chunksOnShard2));
- if (chunksOnShard2.length != 2) {
- return false;
- }
+ if (chunksOnShard2.length != 2) {
+ return false;
+ }
- return chunksOnShard2[0].min._id == MinKey && chunksOnShard2[0].max._id == -100 &&
- chunksOnShard2[1].min._id == 100 && chunksOnShard2[1].max._id == MaxKey;
- }, 'chunks to zones a and b');
+ return chunksOnShard2[0].min._id == MinKey && chunksOnShard2[0].max._id == -100 &&
+ chunksOnShard2[1].min._id == 100 && chunksOnShard2[1].max._id == MaxKey;
+}, 'chunks to zones a and b');
- // Tag the entire collection to shard0 and wait for everything to move to that shard
- st.removeTagRange('test.foo', {_id: -100}, {_id: 100}, 'a');
- st.removeTagRange('test.foo', {_id: MinKey}, {_id: -100}, 'b');
- st.removeTagRange('test.foo', {_id: 100}, {_id: MaxKey}, 'b');
+// Tag the entire collection to shard0 and wait for everything to move to that shard
+st.removeTagRange('test.foo', {_id: -100}, {_id: 100}, 'a');
+st.removeTagRange('test.foo', {_id: MinKey}, {_id: -100}, 'b');
+st.removeTagRange('test.foo', {_id: 100}, {_id: MaxKey}, 'b');
- st.removeShardTag(st.shard1.shardName, 'a');
- st.removeShardTag(st.shard2.shardName, 'b');
- st.addTagRange('test.foo', {_id: MinKey}, {_id: MaxKey}, 'a');
+st.removeShardTag(st.shard1.shardName, 'a');
+st.removeShardTag(st.shard2.shardName, 'b');
+st.addTagRange('test.foo', {_id: MinKey}, {_id: MaxKey}, 'a');
- assertBalanceCompleteAndStable(function() {
- var counts = st.chunkCounts('foo');
- printjson(counts);
- return counts[st.shard0.shardName] == 11 && counts[st.shard1.shardName] == 0 &&
- counts[st.shard2.shardName] == 0;
- }, 'all chunks to zone a');
+assertBalanceCompleteAndStable(function() {
+ var counts = st.chunkCounts('foo');
+ printjson(counts);
+ return counts[st.shard0.shardName] == 11 && counts[st.shard1.shardName] == 0 &&
+ counts[st.shard2.shardName] == 0;
+}, 'all chunks to zone a');
- // Remove all zones and ensure collection is correctly redistributed
- st.removeShardTag(st.shard0.shardName, 'a');
- st.removeTagRange('test.foo', {_id: MinKey}, {_id: MaxKey}, 'a');
+// Remove all zones and ensure collection is correctly redistributed
+st.removeShardTag(st.shard0.shardName, 'a');
+st.removeTagRange('test.foo', {_id: MinKey}, {_id: MaxKey}, 'a');
- assertBalanceCompleteAndStable(checkClusterEvenlyBalanced, 'final');
+assertBalanceCompleteAndStable(checkClusterEvenlyBalanced, 'final');
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/error_during_agg_getmore.js b/jstests/sharding/error_during_agg_getmore.js
index d6f3f8a2f90..74933437c16 100644
--- a/jstests/sharding/error_during_agg_getmore.js
+++ b/jstests/sharding/error_during_agg_getmore.js
@@ -1,52 +1,51 @@
// This test was designed to reproduce SERVER-31475. It issues sharded aggregations with an error
// returned from one shard, and a delayed response from another shard.
(function() {
- "use strict";
-
- const st = new ShardingTest({shards: 2, useBridge: true});
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
-
- // Move the [0, MaxKey] chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.shard1.shardName}));
-
- // Write a document to each chunk.
- assert.writeOK(mongosColl.insert({_id: -1}));
- assert.writeOK(mongosColl.insert({_id: 1}));
-
- // Delay messages between shard 1 and the mongos, long enough that shard 1's responses will
- // likely arrive after the response from shard 0, but not so long that the background cluster
- // client cleanup job will have been given a chance to run.
- const delayMillis = 100;
- st.rs1.getPrimary().delayMessagesFrom(st.s, delayMillis);
-
- const nTrials = 10;
- for (let i = 1; i < 10; ++i) {
- // This will trigger an error on shard 0, but not shard 1. We set up a delay from shard 1,
- // so the response should get back after the error has been returned to the client. We use a
- // batch size of 0 to ensure the error happens during a getMore.
- assert.throws(
- () => mongosColl
- .aggregate([{$project: {_id: 0, x: {$divide: [2, {$add: ["$_id", 1]}]}}}],
- {cursor: {batchSize: 0}})
- .itcount());
- }
-
- st.stop();
+"use strict";
+
+const st = new ShardingTest({shards: 2, useBridge: true});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
+
+assert.commandWorked(mongosDB.dropDatabase());
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+// Shard the test collection on _id.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+
+// Move the [0, MaxKey] chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.shard1.shardName}));
+
+// Write a document to each chunk.
+assert.writeOK(mongosColl.insert({_id: -1}));
+assert.writeOK(mongosColl.insert({_id: 1}));
+
+// Delay messages between shard 1 and the mongos, long enough that shard 1's responses will
+// likely arrive after the response from shard 0, but not so long that the background cluster
+// client cleanup job will have been given a chance to run.
+const delayMillis = 100;
+st.rs1.getPrimary().delayMessagesFrom(st.s, delayMillis);
+
+const nTrials = 10;
+for (let i = 1; i < 10; ++i) {
+ // This will trigger an error on shard 0, but not shard 1. We set up a delay from shard 1,
+ // so the response should get back after the error has been returned to the client. We use a
+ // batch size of 0 to ensure the error happens during a getMore.
+ assert.throws(() =>
+ mongosColl
+ .aggregate([{$project: {_id: 0, x: {$divide: [2, {$add: ["$_id", 1]}]}}}],
+ {cursor: {batchSize: 0}})
+ .itcount());
+}
+
+st.stop();
}());
diff --git a/jstests/sharding/error_propagation.js b/jstests/sharding/error_propagation.js
index 596534bf094..7fe4822e295 100644
--- a/jstests/sharding/error_propagation.js
+++ b/jstests/sharding/error_propagation.js
@@ -1,24 +1,24 @@
(function() {
- // Tests that errors encountered on shards are correctly returned to the client when mongos uses
- // the legacy DBClientCursor method of executing commands on shards. We use aggregation here
- // specifically because it is one of the few query paths that still uses the legacy DBClient
- // classes in mongos.
- "use strict";
+// Tests that errors encountered on shards are correctly returned to the client when mongos uses
+// the legacy DBClientCursor method of executing commands on shards. We use aggregation here
+// specifically because it is one of the few query paths that still uses the legacy DBClient
+// classes in mongos.
+"use strict";
- var st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 3}});
+var st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 3}});
- var db = st.getDB('test');
- db.setSlaveOk(true);
+var db = st.getDB('test');
+db.setSlaveOk(true);
- assert.writeOK(db.foo.insert({a: 1}, {writeConcern: {w: 3}}));
- assert.commandWorked(db.runCommand(
- {aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}], cursor: {}}));
+assert.writeOK(db.foo.insert({a: 1}, {writeConcern: {w: 3}}));
+assert.commandWorked(db.runCommand(
+ {aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}], cursor: {}}));
- assert.writeOK(db.foo.insert({a: [1, 2]}, {writeConcern: {w: 3}}));
+assert.writeOK(db.foo.insert({a: [1, 2]}, {writeConcern: {w: 3}}));
- var res = db.runCommand(
- {aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}], cursor: {}});
- assert.commandFailed(res);
- assert.eq("$add only supports numeric or date types, not array", res.errmsg, printjson(res));
- st.stop();
+var res = db.runCommand(
+ {aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}], cursor: {}});
+assert.commandFailed(res);
+assert.eq("$add only supports numeric or date types, not array", res.errmsg, printjson(res));
+st.stop();
}());
diff --git a/jstests/sharding/explainFind_stale_mongos.js b/jstests/sharding/explainFind_stale_mongos.js
index d4ed2972541..93a5d1489cc 100644
--- a/jstests/sharding/explainFind_stale_mongos.js
+++ b/jstests/sharding/explainFind_stale_mongos.js
@@ -3,31 +3,31 @@
* find sent using the legacy query mode (it retries on the stale shardVersion error internally).
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
- const st = new ShardingTest({mongos: 2, shards: 1, verbose: 2});
+const st = new ShardingTest({mongos: 2, shards: 1, verbose: 2});
- let staleMongos = st.s0;
- let freshMongos = st.s1;
+let staleMongos = st.s0;
+let freshMongos = st.s1;
- jsTest.log("Make the stale mongos load a cache entry for db " + dbName + " once");
- assert.writeOK(staleMongos.getDB(dbName).getCollection(collName).insert({_id: 1}));
+jsTest.log("Make the stale mongos load a cache entry for db " + dbName + " once");
+assert.writeOK(staleMongos.getDB(dbName).getCollection(collName).insert({_id: 1}));
- jsTest.log("Call shardCollection on " + ns + " from the fresh mongos");
- assert.commandWorked(freshMongos.adminCommand({enableSharding: dbName}));
- assert.commandWorked(freshMongos.adminCommand({shardCollection: ns, key: {"_id": 1}}));
+jsTest.log("Call shardCollection on " + ns + " from the fresh mongos");
+assert.commandWorked(freshMongos.adminCommand({enableSharding: dbName}));
+assert.commandWorked(freshMongos.adminCommand({shardCollection: ns, key: {"_id": 1}}));
- jsTest.log("Ensure the shard knows " + ns + " is sharded");
- assert.commandWorked(
- st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns, syncFromConfig: true}));
+jsTest.log("Ensure the shard knows " + ns + " is sharded");
+assert.commandWorked(
+ st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns, syncFromConfig: true}));
- jsTest.log("Run explain find on " + ns + " from the stale mongos");
- staleMongos.getDB(dbName).getMongo().forceReadMode("legacy");
- staleMongos.getDB(dbName).getCollection(collName).find({$query: {}, $explain: true}).next();
+jsTest.log("Run explain find on " + ns + " from the stale mongos");
+staleMongos.getDB(dbName).getMongo().forceReadMode("legacy");
+staleMongos.getDB(dbName).getCollection(collName).find({$query: {}, $explain: true}).next();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/explain_agg_read_pref.js b/jstests/sharding/explain_agg_read_pref.js
index 820ab9799cc..0e774e4d8a8 100644
--- a/jstests/sharding/explain_agg_read_pref.js
+++ b/jstests/sharding/explain_agg_read_pref.js
@@ -2,160 +2,153 @@
* Tests that readPref applies on an explain for an aggregation command.
*/
(function() {
- "use strict";
-
- load("jstests/libs/profiler.js"); // For profilerHasAtLeastOneMatchingEntryOrThrow.
-
- const st = new ShardingTest({
- name: "agg_explain_readPref",
- shards: 2,
- other: {
- rs0: {
- nodes: [
- {rsConfig: {priority: 1, tags: {"tag": "primary"}}},
- {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}
- ]
- },
- rs1: {
- nodes: [
- {rsConfig: {priority: 1, tags: {"tag": "primary"}}},
- {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}
- ]
- },
- enableBalancer: false
- }
- });
-
- const mongos = st.s;
- const config = mongos.getDB("config");
- const mongosDB = mongos.getDB("agg_explain_readPref");
- assert.commandWorked(mongosDB.dropDatabase());
-
- const coll = mongosDB.getCollection("coll");
-
- assert.commandWorked(config.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), "agg_explain_readPref-rs0");
- const rs0Primary = st.rs0.getPrimary();
- const rs0Secondary = st.rs0.getSecondary();
- const rs1Primary = st.rs1.getPrimary();
- const rs1Secondary = st.rs1.getSecondary();
-
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
+"use strict";
+
+load("jstests/libs/profiler.js"); // For profilerHasAtLeastOneMatchingEntryOrThrow.
+
+const st = new ShardingTest({
+ name: "agg_explain_readPref",
+ shards: 2,
+ other: {
+ rs0: {
+ nodes: [
+ {rsConfig: {priority: 1, tags: {"tag": "primary"}}},
+ {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}
+ ]
+ },
+ rs1: {
+ nodes: [
+ {rsConfig: {priority: 1, tags: {"tag": "primary"}}},
+ {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}
+ ]
+ },
+ enableBalancer: false
}
+});
+
+const mongos = st.s;
+const config = mongos.getDB("config");
+const mongosDB = mongos.getDB("agg_explain_readPref");
+assert.commandWorked(mongosDB.dropDatabase());
+
+const coll = mongosDB.getCollection("coll");
+
+assert.commandWorked(config.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), "agg_explain_readPref-rs0");
+const rs0Primary = st.rs0.getPrimary();
+const rs0Secondary = st.rs0.getSecondary();
+const rs1Primary = st.rs1.getPrimary();
+const rs1Secondary = st.rs1.getSecondary();
+
+for (let i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+
+//
+// Confirms that aggregations with explain run against mongos are executed against a tagged
+// secondary or primary, as per readPreference setting.
+//
+function confirmReadPreference(primary, secondary) {
+ assert.commandWorked(secondary.setProfilingLevel(2));
+ assert.commandWorked(primary.setProfilingLevel(2));
+
+ // [<pref>, <tags>, <target>, <comment>]
+ [['primary', [{}], primary, "primary"],
+ ['primaryPreferred', [{tag: 'secondary'}], primary, "primaryPreferred"],
+ ['secondary', [{}], secondary, "secondary"],
+ ['secondary', [{tag: 'secondary'}], secondary, "secondaryTag"],
+ ['secondaryPreferred', [{tag: 'secondary'}], secondary, "secondaryPreferred"],
+ ['secondaryPreferred', [{tag: 'primary'}], primary, "secondaryPreferredTagPrimary"]]
+ .forEach(function(args) {
+ const pref = args[0], tagSets = args[1], target = args[2], name = args[3];
+
+ //
+ // Tests that explain within an aggregate command and an explicit $readPreference
+ // targets the correct node in the replica set given by 'target'.
+ //
+ let comment = name + "_explain_within_query";
+ assert.commandWorked(mongosDB.runCommand({
+ query:
+ {aggregate: "coll", pipeline: [], comment: comment, cursor: {}, explain: true},
+ $readPreference: {mode: pref, tags: tagSets}
+ }));
+
+ // Look for an operation without an exception, since the shard throws a stale config
+ // exception if the shard or mongos has stale routing metadata, and the operation
+ // gets retried.
+ // Note, we look for *at least one* (not exactly one) matching entry: Mongos cancels
+ // requests to all shards on receiving a stale version error from any shard.
+ // However, the requests may have reached the other shards before they are canceled.
+ // If the other shards were already fresh, they will re-receive the request in the
+ // next attempt, meaning the request can show up more than once in the profiler.
+ profilerHasAtLeastOneMatchingEntryOrThrow({
+ profileDB: target,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.explain.aggregate": coll.getName(),
+ "command.explain.comment": comment,
+ "command.$readPreference.mode": pref == 'primary' ? null : pref,
+ "errMsg": {"$exists": false}
+ }
+ });
- //
- // Confirms that aggregations with explain run against mongos are executed against a tagged
- // secondary or primary, as per readPreference setting.
- //
- function confirmReadPreference(primary, secondary) {
- assert.commandWorked(secondary.setProfilingLevel(2));
- assert.commandWorked(primary.setProfilingLevel(2));
-
- // [<pref>, <tags>, <target>, <comment>]
- [['primary', [{}], primary, "primary"],
- ['primaryPreferred', [{tag: 'secondary'}], primary, "primaryPreferred"],
- ['secondary', [{}], secondary, "secondary"],
- ['secondary', [{tag: 'secondary'}], secondary, "secondaryTag"],
- ['secondaryPreferred', [{tag: 'secondary'}], secondary, "secondaryPreferred"],
- ['secondaryPreferred', [{tag: 'primary'}], primary, "secondaryPreferredTagPrimary"]]
- .forEach(function(args) {
- const pref = args[0], tagSets = args[1], target = args[2], name = args[3];
-
- //
- // Tests that explain within an aggregate command and an explicit $readPreference
- // targets the correct node in the replica set given by 'target'.
- //
- let comment = name + "_explain_within_query";
- assert.commandWorked(mongosDB.runCommand({
- query: {
+ //
+ // Tests that an aggregation command wrapped in an explain with explicit
+ // $queryOptions targets the correct node in the replica set given by 'target'.
+ //
+ comment = name + "_explain_wrapped_agg";
+ assert.commandWorked(mongosDB.runCommand({
+ $query: {
+ explain: {
aggregate: "coll",
pipeline: [],
comment: comment,
cursor: {},
- explain: true
- },
- $readPreference: {mode: pref, tags: tagSets}
- }));
-
- // Look for an operation without an exception, since the shard throws a stale config
- // exception if the shard or mongos has stale routing metadata, and the operation
- // gets retried.
- // Note, we look for *at least one* (not exactly one) matching entry: Mongos cancels
- // requests to all shards on receiving a stale version error from any shard.
- // However, the requests may have reached the other shards before they are canceled.
- // If the other shards were already fresh, they will re-receive the request in the
- // next attempt, meaning the request can show up more than once in the profiler.
- profilerHasAtLeastOneMatchingEntryOrThrow({
- profileDB: target,
- filter: {
- "ns": coll.getFullName(),
- "command.explain.aggregate": coll.getName(),
- "command.explain.comment": comment,
- "command.$readPreference.mode": pref == 'primary' ? null : pref,
- "errMsg": {"$exists": false}
- }
- });
-
- //
- // Tests that an aggregation command wrapped in an explain with explicit
- // $queryOptions targets the correct node in the replica set given by 'target'.
- //
- comment = name + "_explain_wrapped_agg";
- assert.commandWorked(mongosDB.runCommand({
- $query: {
- explain: {
- aggregate: "coll",
- pipeline: [],
- comment: comment,
- cursor: {},
- }
- },
- $readPreference: {mode: pref, tags: tagSets}
- }));
-
- // Look for an operation without an exception, since the shard throws a stale config
- // exception if the shard or mongos has stale routing metadata, and the operation
- // gets retried.
- // Note, we look for *at least one* (not exactly one) matching entry: Mongos cancels
- // requests to all shards on receiving a stale version error from any shard.
- // However, the requests may have reached the other shards before they are canceled.
- // If the other shards were already fresh, they will re-receive the request in the
- // next attempt, meaning the request can show up more than once in the profiler.
- profilerHasAtLeastOneMatchingEntryOrThrow({
- profileDB: target,
- filter: {
- "ns": coll.getFullName(),
- "command.explain.aggregate": coll.getName(),
- "command.explain.comment": comment,
- "command.$readPreference.mode": pref == 'primary' ? null : pref,
- "errMsg": {"$exists": false}
}
- });
+ },
+ $readPreference: {mode: pref, tags: tagSets}
+ }));
+
+ // Look for an operation without an exception, since the shard throws a stale config
+ // exception if the shard or mongos has stale routing metadata, and the operation
+ // gets retried.
+ // Note, we look for *at least one* (not exactly one) matching entry: Mongos cancels
+ // requests to all shards on receiving a stale version error from any shard.
+ // However, the requests may have reached the other shards before they are canceled.
+ // If the other shards were already fresh, they will re-receive the request in the
+ // next attempt, meaning the request can show up more than once in the profiler.
+ profilerHasAtLeastOneMatchingEntryOrThrow({
+ profileDB: target,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.explain.aggregate": coll.getName(),
+ "command.explain.comment": comment,
+ "command.$readPreference.mode": pref == 'primary' ? null : pref,
+ "errMsg": {"$exists": false}
+ }
});
-
- assert.commandWorked(secondary.setProfilingLevel(0));
- assert.commandWorked(primary.setProfilingLevel(0));
- }
-
- //
- // Test aggregate explains run against an unsharded collection.
- //
- confirmReadPreference(rs0Primary.getDB(mongosDB.getName()),
- rs0Secondary.getDB(mongosDB.getName()));
-
- //
- // Test aggregate explains run against a sharded collection.
- //
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(config.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
- assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {a: 6}}));
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {a: 25}, to: "agg_explain_readPref-rs1"}));
-
- // Sharded tests are run against the non-primary shard for the "agg_explain_readPref" db.
- confirmReadPreference(rs1Primary.getDB(mongosDB.getName()),
- rs1Secondary.getDB(mongosDB.getName()));
-
- st.stop();
+ });
+
+ assert.commandWorked(secondary.setProfilingLevel(0));
+ assert.commandWorked(primary.setProfilingLevel(0));
+}
+
+//
+// Test aggregate explains run against an unsharded collection.
+//
+confirmReadPreference(rs0Primary.getDB(mongosDB.getName()), rs0Secondary.getDB(mongosDB.getName()));
+
+//
+// Test aggregate explains run against a sharded collection.
+//
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(config.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
+assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {a: 6}}));
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: coll.getFullName(), find: {a: 25}, to: "agg_explain_readPref-rs1"}));
+
+// Sharded tests are run against the non-primary shard for the "agg_explain_readPref" db.
+confirmReadPreference(rs1Primary.getDB(mongosDB.getName()), rs1Secondary.getDB(mongosDB.getName()));
+
+st.stop();
})();
diff --git a/jstests/sharding/explain_cmd.js b/jstests/sharding/explain_cmd.js
index 5a984f5f610..2e3cb631f85 100644
--- a/jstests/sharding/explain_cmd.js
+++ b/jstests/sharding/explain_cmd.js
@@ -1,160 +1,155 @@
// Tests for the mongos explain command.
(function() {
- 'use strict';
-
- // Create a cluster with 3 shards.
- var st = new ShardingTest({shards: 2});
-
- var db = st.s.getDB("test");
- var explain;
-
- // Setup a collection that will be sharded. The shard key will be 'a'. There's also an index on
- // 'b'.
- var collSharded = db.getCollection("mongos_explain_cmd");
- collSharded.drop();
- collSharded.ensureIndex({a: 1});
- collSharded.ensureIndex({b: 1});
-
- // Enable sharding.
- assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
- st.ensurePrimaryShard(db.getName(), st.shard1.shardName);
- db.adminCommand({shardCollection: collSharded.getFullName(), key: {a: 1}});
-
- // Pre-split the collection to ensure that both shards have chunks. Explicitly
- // move chunks since the balancer is disabled.
- assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: 1}}));
- printjson(db.adminCommand(
- {moveChunk: collSharded.getFullName(), find: {a: 1}, to: st.shard0.shardName}));
-
- assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: 2}}));
- printjson(db.adminCommand(
- {moveChunk: collSharded.getFullName(), find: {a: 2}, to: st.shard1.shardName}));
-
- // Put data on each shard.
- for (var i = 0; i < 3; i++) {
- collSharded.insert({_id: i, a: i, b: 1});
- }
-
- st.printShardingStatus();
-
- // Test a scatter-gather count command.
- assert.eq(3, collSharded.count({b: 1}));
-
- // Explain the scatter-gather count.
- explain = db.runCommand(
- {explain: {count: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});
-
- // Validate some basic properties of the result.
- printjson(explain);
- assert.commandWorked(explain);
- assert("queryPlanner" in explain);
- assert("executionStats" in explain);
- assert.eq(2, explain.queryPlanner.winningPlan.shards.length);
- assert.eq(2, explain.executionStats.executionStages.shards.length);
-
- // An explain of a command that doesn't exist should fail gracefully.
- explain = db.runCommand({
- explain: {nonexistent: collSharded.getName(), query: {b: 1}},
- verbosity: "allPlansExecution"
- });
- printjson(explain);
- assert.commandFailed(explain);
-
- // -------
-
- // Setup a collection that is not sharded.
- var collUnsharded = db.getCollection("mongos_explain_cmd_unsharded");
- collUnsharded.drop();
- collUnsharded.ensureIndex({a: 1});
- collUnsharded.ensureIndex({b: 1});
-
- for (var i = 0; i < 3; i++) {
- collUnsharded.insert({_id: i, a: i, b: 1});
- }
- assert.eq(3, collUnsharded.count({b: 1}));
-
- // -------
-
- // Explain a delete operation and verify that it hits all shards without the shard key
- explain = db.runCommand({
- explain: {delete: collSharded.getName(), deletes: [{q: {b: 1}, limit: 0}]},
- verbosity: "allPlansExecution"
- });
- assert.commandWorked(explain, tojson(explain));
- assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
- assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
- assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "DELETE");
- assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "DELETE");
- // Check that the deletes didn't actually happen.
- assert.eq(3, collSharded.count({b: 1}));
-
- // Explain a delete operation and verify that it hits only one shard with the shard key
- explain = db.runCommand({
- explain: {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 0}]},
- verbosity: "allPlansExecution"
- });
- assert.commandWorked(explain, tojson(explain));
- assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
- // Check that the deletes didn't actually happen.
- assert.eq(3, collSharded.count({b: 1}));
-
- // Check that we fail gracefully if we try to do an explain of a write batch that has more
- // than one operation in it.
- explain = db.runCommand({
- explain: {
- delete: collSharded.getName(),
- deletes: [{q: {a: 1}, limit: 1}, {q: {a: 2}, limit: 1}]
- },
- verbosity: "allPlansExecution"
- });
- assert.commandFailed(explain, tojson(explain));
-
- // Explain a multi upsert operation and verify that it hits all shards
- explain = db.runCommand({
- explain:
- {update: collSharded.getName(), updates: [{q: {}, u: {$set: {b: 10}}, multi: true}]},
- verbosity: "allPlansExecution"
- });
- assert.commandWorked(explain, tojson(explain));
- assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
- assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
- assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
- assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "UPDATE");
- assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "UPDATE");
- // Check that the update didn't actually happen.
- assert.eq(0, collSharded.count({b: 10}));
-
- // Explain an upsert operation and verify that it hits only a single shard
- explain = db.runCommand({
- explain: {update: collSharded.getName(), updates: [{q: {a: 10}, u: {a: 10}, upsert: true}]},
- verbosity: "allPlansExecution"
- });
- assert.commandWorked(explain, tojson(explain));
- assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
- // Check that the upsert didn't actually happen.
- assert.eq(0, collSharded.count({a: 10}));
-
- // Explain an upsert operation which cannot be targeted, ensure an error is thrown
- explain = db.runCommand({
- explain: {update: collSharded.getName(), updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]},
- verbosity: "allPlansExecution"
- });
- assert.commandFailed(explain, tojson(explain));
-
- // Explain a changeStream, ensure an error is thrown under snapshot read concern.
- const session = db.getMongo().startSession();
- const sessionDB = session.getDatabase(db.getName());
- explain = sessionDB.runCommand({
- aggregate: "coll",
- pipeline: [{$changeStream: {}}],
- explain: true,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(0),
- startTransaction: true,
- autocommit: false
- });
- assert.commandFailedWithCode(
- explain, ErrorCodes.OperationNotSupportedInTransaction, tojson(explain));
-
- st.stop();
+'use strict';
+
+// Create a cluster with 3 shards.
+var st = new ShardingTest({shards: 2});
+
+var db = st.s.getDB("test");
+var explain;
+
+// Setup a collection that will be sharded. The shard key will be 'a'. There's also an index on
+// 'b'.
+var collSharded = db.getCollection("mongos_explain_cmd");
+collSharded.drop();
+collSharded.ensureIndex({a: 1});
+collSharded.ensureIndex({b: 1});
+
+// Enable sharding.
+assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
+st.ensurePrimaryShard(db.getName(), st.shard1.shardName);
+db.adminCommand({shardCollection: collSharded.getFullName(), key: {a: 1}});
+
+// Pre-split the collection to ensure that both shards have chunks. Explicitly
+// move chunks since the balancer is disabled.
+assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: 1}}));
+printjson(
+ db.adminCommand({moveChunk: collSharded.getFullName(), find: {a: 1}, to: st.shard0.shardName}));
+
+assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: 2}}));
+printjson(
+ db.adminCommand({moveChunk: collSharded.getFullName(), find: {a: 2}, to: st.shard1.shardName}));
+
+// Put data on each shard.
+for (var i = 0; i < 3; i++) {
+ collSharded.insert({_id: i, a: i, b: 1});
+}
+
+st.printShardingStatus();
+
+// Test a scatter-gather count command.
+assert.eq(3, collSharded.count({b: 1}));
+
+// Explain the scatter-gather count.
+explain = db.runCommand(
+ {explain: {count: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});
+
+// Validate some basic properties of the result.
+printjson(explain);
+assert.commandWorked(explain);
+assert("queryPlanner" in explain);
+assert("executionStats" in explain);
+assert.eq(2, explain.queryPlanner.winningPlan.shards.length);
+assert.eq(2, explain.executionStats.executionStages.shards.length);
+
+// An explain of a command that doesn't exist should fail gracefully.
+explain = db.runCommand(
+ {explain: {nonexistent: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});
+printjson(explain);
+assert.commandFailed(explain);
+
+// -------
+
+// Setup a collection that is not sharded.
+var collUnsharded = db.getCollection("mongos_explain_cmd_unsharded");
+collUnsharded.drop();
+collUnsharded.ensureIndex({a: 1});
+collUnsharded.ensureIndex({b: 1});
+
+for (var i = 0; i < 3; i++) {
+ collUnsharded.insert({_id: i, a: i, b: 1});
+}
+assert.eq(3, collUnsharded.count({b: 1}));
+
+// -------
+
+// Explain a delete operation and verify that it hits all shards without the shard key
+explain = db.runCommand({
+ explain: {delete: collSharded.getName(), deletes: [{q: {b: 1}, limit: 0}]},
+ verbosity: "allPlansExecution"
+});
+assert.commandWorked(explain, tojson(explain));
+assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
+assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
+assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "DELETE");
+assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "DELETE");
+// Check that the deletes didn't actually happen.
+assert.eq(3, collSharded.count({b: 1}));
+
+// Explain a delete operation and verify that it hits only one shard with the shard key
+explain = db.runCommand({
+ explain: {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 0}]},
+ verbosity: "allPlansExecution"
+});
+assert.commandWorked(explain, tojson(explain));
+assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
+// Check that the deletes didn't actually happen.
+assert.eq(3, collSharded.count({b: 1}));
+
+// Check that we fail gracefully if we try to do an explain of a write batch that has more
+// than one operation in it.
+explain = db.runCommand({
+ explain:
+ {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 1}, {q: {a: 2}, limit: 1}]},
+ verbosity: "allPlansExecution"
+});
+assert.commandFailed(explain, tojson(explain));
+
+// Explain a multi upsert operation and verify that it hits all shards
+explain = db.runCommand({
+ explain: {update: collSharded.getName(), updates: [{q: {}, u: {$set: {b: 10}}, multi: true}]},
+ verbosity: "allPlansExecution"
+});
+assert.commandWorked(explain, tojson(explain));
+assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
+assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE");
+assert.eq(explain.queryPlanner.winningPlan.shards.length, 2);
+assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "UPDATE");
+assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "UPDATE");
+// Check that the update didn't actually happen.
+assert.eq(0, collSharded.count({b: 10}));
+
+// Explain an upsert operation and verify that it hits only a single shard
+explain = db.runCommand({
+ explain: {update: collSharded.getName(), updates: [{q: {a: 10}, u: {a: 10}, upsert: true}]},
+ verbosity: "allPlansExecution"
+});
+assert.commandWorked(explain, tojson(explain));
+assert.eq(explain.queryPlanner.winningPlan.shards.length, 1);
+// Check that the upsert didn't actually happen.
+assert.eq(0, collSharded.count({a: 10}));
+
+// Explain an upsert operation which cannot be targeted, ensure an error is thrown
+explain = db.runCommand({
+ explain: {update: collSharded.getName(), updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]},
+ verbosity: "allPlansExecution"
+});
+assert.commandFailed(explain, tojson(explain));
+
+// Explain a changeStream, ensure an error is thrown under snapshot read concern.
+const session = db.getMongo().startSession();
+const sessionDB = session.getDatabase(db.getName());
+explain = sessionDB.runCommand({
+ aggregate: "coll",
+ pipeline: [{$changeStream: {}}],
+ explain: true,
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(0),
+ startTransaction: true,
+ autocommit: false
+});
+assert.commandFailedWithCode(
+ explain, ErrorCodes.OperationNotSupportedInTransaction, tojson(explain));
+
+st.stop();
})();
diff --git a/jstests/sharding/explain_find_and_modify_sharded.js b/jstests/sharding/explain_find_and_modify_sharded.js
index 3066666c82d..a8dad43a201 100644
--- a/jstests/sharding/explain_find_and_modify_sharded.js
+++ b/jstests/sharding/explain_find_and_modify_sharded.js
@@ -3,88 +3,86 @@
* and the collection is sharded.
*/
(function() {
- 'use strict';
+'use strict';
- var collName = 'explain_find_and_modify';
+var collName = 'explain_find_and_modify';
- // Create a cluster with 2 shards.
- var st = new ShardingTest({shards: 2});
+// Create a cluster with 2 shards.
+var st = new ShardingTest({shards: 2});
- var testDB = st.s.getDB('test');
- var shardKey = {a: 1};
+var testDB = st.s.getDB('test');
+var shardKey = {a: 1};
- // Create a collection with an index on the intended shard key.
- var shardedColl = testDB.getCollection(collName);
- shardedColl.drop();
- assert.commandWorked(testDB.createCollection(collName));
- assert.commandWorked(shardedColl.ensureIndex(shardKey));
+// Create a collection with an index on the intended shard key.
+var shardedColl = testDB.getCollection(collName);
+shardedColl.drop();
+assert.commandWorked(testDB.createCollection(collName));
+assert.commandWorked(shardedColl.ensureIndex(shardKey));
- // Enable sharding on the database and shard the collection.
- // Use "st.shard0.shardName" as the primary shard.
- assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.toString(), st.shard0.shardName);
- assert.commandWorked(
- testDB.adminCommand({shardCollection: shardedColl.getFullName(), key: shardKey}));
+// Enable sharding on the database and shard the collection.
+// Use "st.shard0.shardName" as the primary shard.
+assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
+st.ensurePrimaryShard(testDB.toString(), st.shard0.shardName);
+assert.commandWorked(
+ testDB.adminCommand({shardCollection: shardedColl.getFullName(), key: shardKey}));
- // Split and move the chunks so that
- // chunk { "a" : { "$minKey" : 1 } } -->> { "a" : 10 } is on
- // st.shard0.shardName
- // chunk { "a" : 10 } -->> { "a" : { "$maxKey" : 1 } } is on
- // st.shard1.shardName
- assert.commandWorked(testDB.adminCommand({split: shardedColl.getFullName(), middle: {a: 10}}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: shardedColl.getFullName(), find: {a: 10}, to: st.shard1.shardName}));
+// Split and move the chunks so that
+// chunk { "a" : { "$minKey" : 1 } } -->> { "a" : 10 } is on
+// st.shard0.shardName
+// chunk { "a" : 10 } -->> { "a" : { "$maxKey" : 1 } } is on
+// st.shard1.shardName
+assert.commandWorked(testDB.adminCommand({split: shardedColl.getFullName(), middle: {a: 10}}));
+assert.commandWorked(testDB.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {a: 10}, to: st.shard1.shardName}));
- var res;
+var res;
- // Queries that do not involve the shard key are invalid.
- res = testDB.runCommand({
- explain: {findAndModify: collName, query: {b: 1}, remove: true},
- verbosity: 'queryPlanner'
- });
- assert.commandFailed(res);
+// Queries that do not involve the shard key are invalid.
+res = testDB.runCommand(
+ {explain: {findAndModify: collName, query: {b: 1}, remove: true}, verbosity: 'queryPlanner'});
+assert.commandFailed(res);
- // Queries that have non-equality queries on the shard key are invalid.
- res = testDB.runCommand({
- explain: {
- findAndModify: collName,
- query: {a: {$gt: 5}},
- update: {$inc: {b: 7}},
- },
- verbosity: 'allPlansExecution'
- });
- assert.commandFailed(res);
+// Queries that have non-equality queries on the shard key are invalid.
+res = testDB.runCommand({
+ explain: {
+ findAndModify: collName,
+ query: {a: {$gt: 5}},
+ update: {$inc: {b: 7}},
+ },
+ verbosity: 'allPlansExecution'
+});
+assert.commandFailed(res);
- // Asserts that the explain command ran on the specified shard and used the given stage
- // for performing the findAndModify command.
- function assertExplainResult(explainOut, outerKey, innerKey, shardName, expectedStage) {
- assert(explainOut.hasOwnProperty(outerKey));
- assert(explainOut[outerKey].hasOwnProperty(innerKey));
+// Asserts that the explain command ran on the specified shard and used the given stage
+// for performing the findAndModify command.
+function assertExplainResult(explainOut, outerKey, innerKey, shardName, expectedStage) {
+ assert(explainOut.hasOwnProperty(outerKey));
+ assert(explainOut[outerKey].hasOwnProperty(innerKey));
- var shardStage = explainOut[outerKey][innerKey];
- assert.eq('SINGLE_SHARD', shardStage.stage);
- assert.eq(1, shardStage.shards.length);
- assert.eq(shardName, shardStage.shards[0].shardName);
- assert.eq(expectedStage, shardStage.shards[0][innerKey].stage);
- }
+ var shardStage = explainOut[outerKey][innerKey];
+ assert.eq('SINGLE_SHARD', shardStage.stage);
+ assert.eq(1, shardStage.shards.length);
+ assert.eq(shardName, shardStage.shards[0].shardName);
+ assert.eq(expectedStage, shardStage.shards[0][innerKey].stage);
+}
- // Test that the explain command is routed to "st.shard0.shardName" when targeting the lower
- // chunk range.
- res = testDB.runCommand({
- explain: {findAndModify: collName, query: {a: 0}, update: {$inc: {b: 7}}, upsert: true},
- verbosity: 'queryPlanner'
- });
- assert.commandWorked(res);
- assertExplainResult(res, 'queryPlanner', 'winningPlan', st.shard0.shardName, 'UPDATE');
+// Test that the explain command is routed to "st.shard0.shardName" when targeting the lower
+// chunk range.
+res = testDB.runCommand({
+ explain: {findAndModify: collName, query: {a: 0}, update: {$inc: {b: 7}}, upsert: true},
+ verbosity: 'queryPlanner'
+});
+assert.commandWorked(res);
+assertExplainResult(res, 'queryPlanner', 'winningPlan', st.shard0.shardName, 'UPDATE');
- // Test that the explain command is routed to "st.shard1.shardName" when targeting the higher
- // chunk range.
- res = testDB.runCommand({
- explain: {findAndModify: collName, query: {a: 20, c: 5}, remove: true},
- verbosity: 'executionStats'
- });
- assert.commandWorked(res);
- assertExplainResult(res, 'executionStats', 'executionStages', st.shard1.shardName, 'DELETE');
+// Test that the explain command is routed to "st.shard1.shardName" when targeting the higher
+// chunk range.
+res = testDB.runCommand({
+ explain: {findAndModify: collName, query: {a: 20, c: 5}, remove: true},
+ verbosity: 'executionStats'
+});
+assert.commandWorked(res);
+assertExplainResult(res, 'executionStats', 'executionStages', st.shard1.shardName, 'DELETE');
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/explain_read_pref.js b/jstests/sharding/explain_read_pref.js
index e84393607d3..ce5e2cf47af 100644
--- a/jstests/sharding/explain_read_pref.js
+++ b/jstests/sharding/explain_read_pref.js
@@ -27,7 +27,6 @@ var assertCorrectTargeting = function(explain, isMongos, secExpected) {
};
var testAllModes = function(conn, isMongos) {
-
// The primary is tagged with { tag: 'one' } and the secondary with
// { tag: 'two' } so we can test the interaction of modes and tags. Test
// a bunch of combinations.
diff --git a/jstests/sharding/failcommand_failpoint_not_parallel.js b/jstests/sharding/failcommand_failpoint_not_parallel.js
index c759986a11c..18117e0e4b8 100644
--- a/jstests/sharding/failcommand_failpoint_not_parallel.js
+++ b/jstests/sharding/failcommand_failpoint_not_parallel.js
@@ -1,21 +1,21 @@
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 3, mongos: 1});
- const db = st.s.getDB("test_failcommand_noparallel");
+const st = new ShardingTest({shards: 3, mongos: 1});
+const db = st.s.getDB("test_failcommand_noparallel");
- // Test times when closing connection.
- // Sharding tests require failInternalCommands: true, since the mongos appears to mongod to be
- // an internal client.
- assert.commandWorked(st.s.adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 2},
- data: {closeConnection: true, failCommands: ["find"], failInternalCommands: true}
- }));
- assert.throws(() => db.runCommand({find: "c"}));
- assert.throws(() => db.runCommand({find: "c"}));
- assert.commandWorked(db.runCommand({find: "c"}));
- assert.commandWorked(st.s.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Test times when closing connection.
+// Sharding tests require failInternalCommands: true, since the mongos appears to mongod to be
+// an internal client.
+assert.commandWorked(st.s.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 2},
+ data: {closeConnection: true, failCommands: ["find"], failInternalCommands: true}
+}));
+assert.throws(() => db.runCommand({find: "c"}));
+assert.throws(() => db.runCommand({find: "c"}));
+assert.commandWorked(db.runCommand({find: "c"}));
+assert.commandWorked(st.s.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/failcommand_ignores_internal.js b/jstests/sharding/failcommand_ignores_internal.js
index 64789c64c96..7e4f0413cb1 100644
--- a/jstests/sharding/failcommand_ignores_internal.js
+++ b/jstests/sharding/failcommand_ignores_internal.js
@@ -1,54 +1,55 @@
// Tests that the "failCommand" failpoint ignores commands from internal clients: SERVER-34943.
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 1});
- const mongosDB = st.s0.getDB("test_failcommand_ignores_internal");
+const st = new ShardingTest({shards: 1});
+const mongosDB = st.s0.getDB("test_failcommand_ignores_internal");
- // Enough documents for three getMores.
- assert.commandWorked(mongosDB.collection.insertMany([{}, {}, {}]));
- const findReply = assert.commandWorked(mongosDB.runCommand({find: "collection", batchSize: 0}));
- const cursorId = findReply.cursor.id;
+// Enough documents for three getMores.
+assert.commandWorked(mongosDB.collection.insertMany([{}, {}, {}]));
+const findReply = assert.commandWorked(mongosDB.runCommand({find: "collection", batchSize: 0}));
+const cursorId = findReply.cursor.id;
- // Test failing "getMore" twice with a particular error code.
- assert.commandWorked(mongosDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 2},
- data: {errorCode: ErrorCodes.BadValue, failCommands: ["getMore"]}
- }));
- const getMore = {getMore: cursorId, collection: "collection", batchSize: 1};
- assert.commandFailedWithCode(mongosDB.runCommand(getMore), ErrorCodes.BadValue);
- assert.commandFailedWithCode(mongosDB.runCommand(getMore), ErrorCodes.BadValue);
- assert.commandWorked(mongosDB.runCommand(getMore));
+// Test failing "getMore" twice with a particular error code.
+assert.commandWorked(mongosDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 2},
+ data: {errorCode: ErrorCodes.BadValue, failCommands: ["getMore"]}
+}));
+const getMore = {
+ getMore: cursorId,
+ collection: "collection",
+ batchSize: 1
+};
+assert.commandFailedWithCode(mongosDB.runCommand(getMore), ErrorCodes.BadValue);
+assert.commandFailedWithCode(mongosDB.runCommand(getMore), ErrorCodes.BadValue);
+assert.commandWorked(mongosDB.runCommand(getMore));
- // Setting a failpoint for "distinct" on a shard has no effect on mongos.
- assert.commandWorked(st.shard0.getDB("admin").runCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {errorCode: ErrorCodes.BadValue, failCommands: ["distinct"]}
- }));
- const distinct = {distinct: "collection", key: "x"};
- assert.commandFailedWithCode(
- st.shard0.getDB("test_failcommand_ignores_internal").runCommand(distinct),
- ErrorCodes.BadValue);
- assert.commandWorked(mongosDB.runCommand(distinct));
- assert.commandWorked(
- st.shard0.getDB("admin").runCommand({configureFailPoint: "failCommand", mode: "off"}));
+// Setting a failpoint for "distinct" on a shard has no effect on mongos.
+assert.commandWorked(st.shard0.getDB("admin").runCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.BadValue, failCommands: ["distinct"]}
+}));
+const distinct = {
+ distinct: "collection",
+ key: "x"
+};
+assert.commandFailedWithCode(
+ st.shard0.getDB("test_failcommand_ignores_internal").runCommand(distinct), ErrorCodes.BadValue);
+assert.commandWorked(mongosDB.runCommand(distinct));
+assert.commandWorked(
+ st.shard0.getDB("admin").runCommand({configureFailPoint: "failCommand", mode: "off"}));
- // Setting a failpoint for "distinct" on a shard with failInternalCommands DOES affect mongos.
- assert.commandWorked(st.shard0.getDB("admin").runCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- errorCode: ErrorCodes.BadValue,
- failCommands: ["distinct"],
- failInternalCommands: true
- }
- }));
- assert.commandFailedWithCode(mongosDB.runCommand(distinct), ErrorCodes.BadValue);
- assert.commandFailedWithCode(
- st.shard0.getDB("test_failcommand_ignores_internal").runCommand(distinct),
- ErrorCodes.BadValue);
+// Setting a failpoint for "distinct" on a shard with failInternalCommands DOES affect mongos.
+assert.commandWorked(st.shard0.getDB("admin").runCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.BadValue, failCommands: ["distinct"], failInternalCommands: true}
+}));
+assert.commandFailedWithCode(mongosDB.runCommand(distinct), ErrorCodes.BadValue);
+assert.commandFailedWithCode(
+ st.shard0.getDB("test_failcommand_ignores_internal").runCommand(distinct), ErrorCodes.BadValue);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index 3f3e86056b1..e92e9c4d713 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -1,132 +1,131 @@
(function() {
- 'use strict';
-
- var s = new ShardingTest({name: "features1", shards: 2, mongos: 1});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- // ---- can't shard system namespaces ----
- assert.commandFailed(s.s0.adminCommand({shardcollection: "test.system.blah", key: {num: 1}}),
- "shard system namespace");
-
- // ---- setup test.foo -----
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
- let db = s.s0.getDB("test");
-
- assert.commandWorked(db.foo.createIndex({y: 1}));
-
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 10}}));
- assert.commandWorked(s.s0.adminCommand(
- {movechunk: "test.foo", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name}));
-
- assert.writeOK(db.foo.insert({num: 5}));
- assert.writeOK(db.foo.save({num: 15}));
-
- let a = s.rs0.getPrimary().getDB("test");
- let b = s.rs1.getPrimary().getDB("test");
-
- // ---- make sure shard key index is everywhere ----
- assert.eq(3, a.foo.getIndexKeys().length, "a index 1");
- assert.eq(3, b.foo.getIndexKeys().length, "b index 1");
-
- // ---- make sure if you add an index it goes everywhere ------
- assert.commandWorked(db.foo.createIndex({x: 1}));
- assert.eq(4, a.foo.getIndexKeys().length, "a index 2");
- assert.eq(4, b.foo.getIndexKeys().length, "b index 2");
-
- // ---- no unique indexes allowed that do not include the shard key ------
- assert.commandFailed(db.foo.createIndex({z: 1}, true));
- assert.eq(4, a.foo.getIndexKeys().length, "a index 3");
- assert.eq(4, b.foo.getIndexKeys().length, "b index 3");
-
- // ---- unique indexes that include the shard key are allowed ------
- assert.commandWorked(db.foo.createIndex({num: 1, bar: 1}, true));
- assert.eq(5, b.foo.getIndexKeys().length, "c index 3");
-
- // ---- can't shard thing with unique indexes ------
- assert.commandWorked(db.foo2.createIndex({a: 1}));
- printjson(db.foo2.getIndexes());
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo2", key: {num: 1}}),
- "shard with index");
-
- assert.commandWorked(db.foo3.createIndex({a: 1}, true));
- printjson(db.foo3.getIndexes());
- assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo3", key: {num: 1}}),
- "shard with unique index");
-
- assert.commandWorked(db.foo7.createIndex({num: 1, a: 1}, true));
- printjson(db.foo7.getIndexes());
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo7", key: {num: 1}}),
- "shard with ok unique index");
-
- // ---- unique shard key ----
- assert.commandWorked(
- s.s0.adminCommand({shardcollection: "test.foo4", key: {num: 1}, unique: true}),
- "shard with index and unique");
- assert.commandWorked(s.s0.adminCommand({split: "test.foo4", middle: {num: 10}}));
- assert.commandWorked(s.s0.adminCommand(
- {movechunk: "test.foo4", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name}));
-
- assert.writeOK(db.foo4.insert({num: 5}));
- assert.writeOK(db.foo4.insert({num: 15}));
-
- assert.eq(1, a.foo4.count(), "ua1");
- assert.eq(1, b.foo4.count(), "ub1");
-
- assert.eq(2, a.foo4.getIndexes().length, "ua2");
- assert.eq(2, b.foo4.getIndexes().length, "ub2");
-
- assert(a.foo4.getIndexes()[1].unique, "ua3");
- assert(b.foo4.getIndexes()[1].unique, "ub3");
-
- assert.eq(2, db.foo4.count(), "uc1");
- assert.writeOK(db.foo4.insert({num: 7}));
- assert.eq(3, db.foo4.count(), "uc2");
- assert.writeError(db.foo4.insert({num: 7}));
- assert.eq(3, db.foo4.count(), "uc4");
-
- // --- don't let you convertToCapped ----
- assert(!db.foo4.isCapped(), "ca1");
- assert(!a.foo4.isCapped(), "ca2");
- assert(!b.foo4.isCapped(), "ca3");
-
- assert.commandFailed(db.foo4.convertToCapped(30000), "ca30");
- assert(!db.foo4.isCapped(), "ca4");
- assert(!a.foo4.isCapped(), "ca5");
- assert(!b.foo4.isCapped(), "ca6");
-
- // make sure i didn't break anything
- db.foo4a.save({a: 1});
- assert(!db.foo4a.isCapped(), "ca7");
- db.foo4a.convertToCapped(30000);
- assert(db.foo4a.isCapped(), "ca8");
-
- // --- don't let you shard a capped collection
- db.createCollection("foo5", {capped: true, size: 30000});
- assert(db.foo5.isCapped(), "cb1");
- assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo5", key: {num: 1}}));
-
- // ---- can't shard non-empty collection without index -----
- assert.writeOK(db.foo8.insert({a: 1}));
- assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo8", key: {a: 1}}),
- "non-empty collection");
-
- // ---- can't shard non-empty collection with null values in shard key ----
- assert.writeOK(db.foo9.insert({b: 1}));
- assert.commandWorked(db.foo9.createIndex({a: 1}));
- assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo9", key: {a: 1}}),
- "entry with null value");
-
- // --- listDatabases ---
- var r = db.getMongo().getDBs();
- assert.eq(3, r.databases.length, tojson(r));
- assert.eq("number", typeof(r.totalSize), "listDatabases 3 : " + tojson(r));
-
- // --- flushRouterconfig ---
- assert.commandWorked(s.s0.adminCommand({flushRouterConfig: 1}));
- assert.commandWorked(s.s0.adminCommand({flushRouterConfig: true}));
- assert.commandWorked(s.s0.adminCommand({flushRouterConfig: 'TestDB'}));
- assert.commandWorked(s.s0.adminCommand({flushRouterConfig: 'TestDB.TestColl'}));
-
- s.stop();
+'use strict';
+
+var s = new ShardingTest({name: "features1", shards: 2, mongos: 1});
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+// ---- can't shard system namespaces ----
+assert.commandFailed(s.s0.adminCommand({shardcollection: "test.system.blah", key: {num: 1}}),
+ "shard system namespace");
+
+// ---- setup test.foo -----
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
+let db = s.s0.getDB("test");
+
+assert.commandWorked(db.foo.createIndex({y: 1}));
+
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 10}}));
+assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name}));
+
+assert.writeOK(db.foo.insert({num: 5}));
+assert.writeOK(db.foo.save({num: 15}));
+
+let a = s.rs0.getPrimary().getDB("test");
+let b = s.rs1.getPrimary().getDB("test");
+
+// ---- make sure shard key index is everywhere ----
+assert.eq(3, a.foo.getIndexKeys().length, "a index 1");
+assert.eq(3, b.foo.getIndexKeys().length, "b index 1");
+
+// ---- make sure if you add an index it goes everywhere ------
+assert.commandWorked(db.foo.createIndex({x: 1}));
+assert.eq(4, a.foo.getIndexKeys().length, "a index 2");
+assert.eq(4, b.foo.getIndexKeys().length, "b index 2");
+
+// ---- no unique indexes allowed that do not include the shard key ------
+assert.commandFailed(db.foo.createIndex({z: 1}, true));
+assert.eq(4, a.foo.getIndexKeys().length, "a index 3");
+assert.eq(4, b.foo.getIndexKeys().length, "b index 3");
+
+// ---- unique indexes that include the shard key are allowed ------
+assert.commandWorked(db.foo.createIndex({num: 1, bar: 1}, true));
+assert.eq(5, b.foo.getIndexKeys().length, "c index 3");
+
+// ---- can't shard thing with unique indexes ------
+assert.commandWorked(db.foo2.createIndex({a: 1}));
+printjson(db.foo2.getIndexes());
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo2", key: {num: 1}}),
+ "shard with index");
+
+assert.commandWorked(db.foo3.createIndex({a: 1}, true));
+printjson(db.foo3.getIndexes());
+assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo3", key: {num: 1}}),
+ "shard with unique index");
+
+assert.commandWorked(db.foo7.createIndex({num: 1, a: 1}, true));
+printjson(db.foo7.getIndexes());
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo7", key: {num: 1}}),
+ "shard with ok unique index");
+
+// ---- unique shard key ----
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo4", key: {num: 1}, unique: true}),
+ "shard with index and unique");
+assert.commandWorked(s.s0.adminCommand({split: "test.foo4", middle: {num: 10}}));
+assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo4", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name}));
+
+assert.writeOK(db.foo4.insert({num: 5}));
+assert.writeOK(db.foo4.insert({num: 15}));
+
+assert.eq(1, a.foo4.count(), "ua1");
+assert.eq(1, b.foo4.count(), "ub1");
+
+assert.eq(2, a.foo4.getIndexes().length, "ua2");
+assert.eq(2, b.foo4.getIndexes().length, "ub2");
+
+assert(a.foo4.getIndexes()[1].unique, "ua3");
+assert(b.foo4.getIndexes()[1].unique, "ub3");
+
+assert.eq(2, db.foo4.count(), "uc1");
+assert.writeOK(db.foo4.insert({num: 7}));
+assert.eq(3, db.foo4.count(), "uc2");
+assert.writeError(db.foo4.insert({num: 7}));
+assert.eq(3, db.foo4.count(), "uc4");
+
+// --- don't let you convertToCapped ----
+assert(!db.foo4.isCapped(), "ca1");
+assert(!a.foo4.isCapped(), "ca2");
+assert(!b.foo4.isCapped(), "ca3");
+
+assert.commandFailed(db.foo4.convertToCapped(30000), "ca30");
+assert(!db.foo4.isCapped(), "ca4");
+assert(!a.foo4.isCapped(), "ca5");
+assert(!b.foo4.isCapped(), "ca6");
+
+// make sure i didn't break anything
+db.foo4a.save({a: 1});
+assert(!db.foo4a.isCapped(), "ca7");
+db.foo4a.convertToCapped(30000);
+assert(db.foo4a.isCapped(), "ca8");
+
+// --- don't let you shard a capped collection
+db.createCollection("foo5", {capped: true, size: 30000});
+assert(db.foo5.isCapped(), "cb1");
+assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo5", key: {num: 1}}));
+
+// ---- can't shard non-empty collection without index -----
+assert.writeOK(db.foo8.insert({a: 1}));
+assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo8", key: {a: 1}}),
+ "non-empty collection");
+
+// ---- can't shard non-empty collection with null values in shard key ----
+assert.writeOK(db.foo9.insert({b: 1}));
+assert.commandWorked(db.foo9.createIndex({a: 1}));
+assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo9", key: {a: 1}}),
+ "entry with null value");
+
+// --- listDatabases ---
+var r = db.getMongo().getDBs();
+assert.eq(3, r.databases.length, tojson(r));
+assert.eq("number", typeof (r.totalSize), "listDatabases 3 : " + tojson(r));
+
+// --- flushRouterconfig ---
+assert.commandWorked(s.s0.adminCommand({flushRouterConfig: 1}));
+assert.commandWorked(s.s0.adminCommand({flushRouterConfig: true}));
+assert.commandWorked(s.s0.adminCommand({flushRouterConfig: 'TestDB'}));
+assert.commandWorked(s.s0.adminCommand({flushRouterConfig: 'TestDB.TestColl'}));
+
+s.stop();
})();
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index 16d28c4d1ba..374acb15518 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -1,187 +1,185 @@
(function() {
- "use strict";
+"use strict";
- var s = new ShardingTest({name: "features2", shards: 2, mongos: 1});
+var s = new ShardingTest({name: "features2", shards: 2, mongos: 1});
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
- let a = s._connections[0].getDB("test");
- let b = s._connections[1].getDB("test");
+let a = s._connections[0].getDB("test");
+let b = s._connections[1].getDB("test");
- let db = s.getDB("test");
+let db = s.getDB("test");
- // ---- distinct ----
+// ---- distinct ----
- db.foo.save({x: 1});
- db.foo.save({x: 2});
- db.foo.save({x: 3});
- db.foo.ensureIndex({x: 1});
+db.foo.save({x: 1});
+db.foo.save({x: 2});
+db.foo.save({x: 3});
+db.foo.ensureIndex({x: 1});
- assert.eq("1,2,3", db.foo.distinct("x"), "distinct 1");
- assert(a.foo.distinct("x").length == 3 || b.foo.distinct("x").length == 3, "distinct 2");
- assert(a.foo.distinct("x").length == 0 || b.foo.distinct("x").length == 0, "distinct 3");
+assert.eq("1,2,3", db.foo.distinct("x"), "distinct 1");
+assert(a.foo.distinct("x").length == 3 || b.foo.distinct("x").length == 3, "distinct 2");
+assert(a.foo.distinct("x").length == 0 || b.foo.distinct("x").length == 0, "distinct 3");
- assert.eq(1, s.onNumShards("foo"), "A1");
+assert.eq(1, s.onNumShards("foo"), "A1");
- s.shardColl("foo", {x: 1}, {x: 2}, {x: 3}, null, true /* waitForDelete */);
+s.shardColl("foo", {x: 1}, {x: 2}, {x: 3}, null, true /* waitForDelete */);
- assert.eq(2, s.onNumShards("foo"), "A2");
+assert.eq(2, s.onNumShards("foo"), "A2");
- assert.eq("1,2,3", db.foo.distinct("x"), "distinct 4");
+assert.eq("1,2,3", db.foo.distinct("x"), "distinct 4");
- // ----- delete ---
+// ----- delete ---
- assert.eq(3, db.foo.count(), "D1");
+assert.eq(3, db.foo.count(), "D1");
- db.foo.remove({x: 3});
- assert.eq(2, db.foo.count(), "D2");
+db.foo.remove({x: 3});
+assert.eq(2, db.foo.count(), "D2");
- db.foo.save({x: 3});
- assert.eq(3, db.foo.count(), "D3");
+db.foo.save({x: 3});
+assert.eq(3, db.foo.count(), "D3");
- db.foo.remove({x: {$gt: 2}});
- assert.eq(2, db.foo.count(), "D4");
+db.foo.remove({x: {$gt: 2}});
+assert.eq(2, db.foo.count(), "D4");
- db.foo.remove({x: {$gt: -1}});
- assert.eq(0, db.foo.count(), "D5");
+db.foo.remove({x: {$gt: -1}});
+assert.eq(0, db.foo.count(), "D5");
- db.foo.save({x: 1});
- db.foo.save({x: 2});
- db.foo.save({x: 3});
- assert.eq(3, db.foo.count(), "D6");
- db.foo.remove({});
- assert.eq(0, db.foo.count(), "D7");
+db.foo.save({x: 1});
+db.foo.save({x: 2});
+db.foo.save({x: 3});
+assert.eq(3, db.foo.count(), "D6");
+db.foo.remove({});
+assert.eq(0, db.foo.count(), "D7");
- // --- _id key ---
+// --- _id key ---
- db.foo2.save({_id: new ObjectId()});
- db.foo2.save({_id: new ObjectId()});
- db.foo2.save({_id: new ObjectId()});
+db.foo2.save({_id: new ObjectId()});
+db.foo2.save({_id: new ObjectId()});
+db.foo2.save({_id: new ObjectId()});
- assert.eq(1, s.onNumShards("foo2"), "F1");
+assert.eq(1, s.onNumShards("foo2"), "F1");
- printjson(db.foo2.getIndexes());
- s.adminCommand({shardcollection: "test.foo2", key: {_id: 1}});
+printjson(db.foo2.getIndexes());
+s.adminCommand({shardcollection: "test.foo2", key: {_id: 1}});
- assert.eq(3, db.foo2.count(), "F2");
- db.foo2.insert({});
- assert.eq(4, db.foo2.count(), "F3");
+assert.eq(3, db.foo2.count(), "F2");
+db.foo2.insert({});
+assert.eq(4, db.foo2.count(), "F3");
- // --- map/reduce
+// --- map/reduce
- db.mr.save({x: 1, tags: ["a", "b"]});
- db.mr.save({x: 2, tags: ["b", "c"]});
- db.mr.save({x: 3, tags: ["c", "a"]});
- db.mr.save({x: 4, tags: ["b", "c"]});
- db.mr.ensureIndex({x: 1});
+db.mr.save({x: 1, tags: ["a", "b"]});
+db.mr.save({x: 2, tags: ["b", "c"]});
+db.mr.save({x: 3, tags: ["c", "a"]});
+db.mr.save({x: 4, tags: ["b", "c"]});
+db.mr.ensureIndex({x: 1});
- let m = function() {
- this.tags.forEach(function(z) {
- emit(z, {count: 1});
- });
- };
+let m = function() {
+ this.tags.forEach(function(z) {
+ emit(z, {count: 1});
+ });
+};
- let r = function(key, values) {
- var total = 0;
- for (var i = 0; i < values.length; i++) {
- total += values[i].count;
- }
- return {count: total};
- };
+let r = function(key, values) {
+ var total = 0;
+ for (var i = 0; i < values.length; i++) {
+ total += values[i].count;
+ }
+ return {count: total};
+};
- let doMR = function(n) {
- print(n);
+let doMR = function(n) {
+ print(n);
- // on-disk
+ // on-disk
- var res = db.mr.mapReduce(m, r, "smr1_out");
- printjson(res);
- assert.eq(4, res.counts.input, "MR T0 " + n);
+ var res = db.mr.mapReduce(m, r, "smr1_out");
+ printjson(res);
+ assert.eq(4, res.counts.input, "MR T0 " + n);
- var x = db[res.result];
- assert.eq(3, x.find().count(), "MR T1 " + n);
+ var x = db[res.result];
+ assert.eq(3, x.find().count(), "MR T1 " + n);
- var z = {};
- x.find().forEach(function(a) {
- z[a._id] = a.value.count;
- });
- assert.eq(3, Object.keySet(z).length, "MR T2 " + n);
- assert.eq(2, z.a, "MR T3 " + n);
- assert.eq(3, z.b, "MR T4 " + n);
- assert.eq(3, z.c, "MR T5 " + n);
+ var z = {};
+ x.find().forEach(function(a) {
+ z[a._id] = a.value.count;
+ });
+ assert.eq(3, Object.keySet(z).length, "MR T2 " + n);
+ assert.eq(2, z.a, "MR T3 " + n);
+ assert.eq(3, z.b, "MR T4 " + n);
+ assert.eq(3, z.c, "MR T5 " + n);
- x.drop();
+ x.drop();
- // inline
+ // inline
- var res = db.mr.mapReduce(m, r, {out: {inline: 1}});
- printjson(res);
- assert.eq(4, res.counts.input, "MR T6 " + n);
+ var res = db.mr.mapReduce(m, r, {out: {inline: 1}});
+ printjson(res);
+ assert.eq(4, res.counts.input, "MR T6 " + n);
- var z = {};
- res.find().forEach(function(a) {
- z[a._id] = a.value.count;
- });
- printjson(z);
- assert.eq(3, Object.keySet(z).length, "MR T7 " + n);
- assert.eq(2, z.a, "MR T8 " + n);
- assert.eq(3, z.b, "MR T9 " + n);
- assert.eq(3, z.c, "MR TA " + n);
+ var z = {};
+ res.find().forEach(function(a) {
+ z[a._id] = a.value.count;
+ });
+ printjson(z);
+ assert.eq(3, Object.keySet(z).length, "MR T7 " + n);
+ assert.eq(2, z.a, "MR T8 " + n);
+ assert.eq(3, z.b, "MR T9 " + n);
+ assert.eq(3, z.c, "MR TA " + n);
+};
- };
+doMR("before");
- doMR("before");
+assert.eq(1, s.onNumShards("mr"), "E1");
+s.shardColl("mr", {x: 1}, {x: 2}, {x: 3}, null, true /* waitForDelete */);
+assert.eq(2, s.onNumShards("mr"), "E1");
- assert.eq(1, s.onNumShards("mr"), "E1");
- s.shardColl("mr", {x: 1}, {x: 2}, {x: 3}, null, true /* waitForDelete */);
- assert.eq(2, s.onNumShards("mr"), "E1");
-
- doMR("after");
-
- s.adminCommand({split: 'test.mr', middle: {x: 3}});
- s.adminCommand({split: 'test.mr', middle: {x: 4}});
- s.adminCommand({movechunk: 'test.mr', find: {x: 3}, to: s.getPrimaryShard('test').name});
-
- doMR("after extra split");
-
- let cmd = {mapreduce: "mr", map: "emit( ", reduce: "fooz + ", out: "broken1"};
-
- let x = db.runCommand(cmd);
- let y = s._connections[0].getDB("test").runCommand(cmd);
-
- printjson(x);
- printjson(y);
-
- // count
-
- db.countaa.save({"regex": /foo/i});
- db.countaa.save({"regex": /foo/i});
- db.countaa.save({"regex": /foo/i});
- assert.eq(3, db.countaa.count(), "counta1");
- assert.eq(3, db.countaa.find().itcount(), "counta1");
-
- // isMaster and query-wrapped-command
- let isMaster = db.runCommand({isMaster: 1});
- assert(isMaster.ismaster);
- assert.eq('isdbgrid', isMaster.msg);
- delete isMaster.localTime;
- delete isMaster.$clusterTime;
- delete isMaster.operationTime;
-
- let im2 = db.runCommand({query: {isMaster: 1}});
- delete im2.localTime;
- delete im2.$clusterTime;
- delete im2.operationTime;
- assert.eq(isMaster, im2);
-
- im2 = db.runCommand({$query: {isMaster: 1}});
- delete im2.localTime;
- delete im2.$clusterTime;
- delete im2.operationTime;
- assert.eq(isMaster, im2);
-
- s.stop();
+doMR("after");
+
+s.adminCommand({split: 'test.mr', middle: {x: 3}});
+s.adminCommand({split: 'test.mr', middle: {x: 4}});
+s.adminCommand({movechunk: 'test.mr', find: {x: 3}, to: s.getPrimaryShard('test').name});
+
+doMR("after extra split");
+
+let cmd = {mapreduce: "mr", map: "emit( ", reduce: "fooz + ", out: "broken1"};
+
+let x = db.runCommand(cmd);
+let y = s._connections[0].getDB("test").runCommand(cmd);
+
+printjson(x);
+printjson(y);
+// count
+
+db.countaa.save({"regex": /foo/i});
+db.countaa.save({"regex": /foo/i});
+db.countaa.save({"regex": /foo/i});
+assert.eq(3, db.countaa.count(), "counta1");
+assert.eq(3, db.countaa.find().itcount(), "counta1");
+
+// isMaster and query-wrapped-command
+let isMaster = db.runCommand({isMaster: 1});
+assert(isMaster.ismaster);
+assert.eq('isdbgrid', isMaster.msg);
+delete isMaster.localTime;
+delete isMaster.$clusterTime;
+delete isMaster.operationTime;
+
+let im2 = db.runCommand({query: {isMaster: 1}});
+delete im2.localTime;
+delete im2.$clusterTime;
+delete im2.operationTime;
+assert.eq(isMaster, im2);
+
+im2 = db.runCommand({$query: {isMaster: 1}});
+delete im2.localTime;
+delete im2.$clusterTime;
+delete im2.operationTime;
+assert.eq(isMaster, im2);
+
+s.stop();
})();
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index cee22543b7e..65b3ba2019d 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -6,153 +6,152 @@
// - Verifies a $where query can be killed on multiple DBs
// - Tests fsync and fsync+lock permissions on sharded db
(function() {
- 'use strict';
-
- var s = new ShardingTest({shards: 2, mongos: 1});
- var dbForTest = s.getDB("test");
- var admin = s.getDB("admin");
- dbForTest.foo.drop();
-
- var numDocs = 10000;
-
- // shard test.foo and add a split point
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
- s.adminCommand({split: "test.foo", middle: {_id: numDocs / 2}});
-
- // move a chunk range to the non-primary shard
- s.adminCommand({
- moveChunk: "test.foo",
- find: {_id: 3},
- to: s.getNonPrimaries("test")[0],
- _waitForDelete: true
- });
-
- // restart balancer
- s.startBalancer();
-
- // insert 10k small documents into the sharded collection
- var bulk = dbForTest.foo.initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
-
- var x = dbForTest.foo.stats();
-
- // verify the colleciton has been sharded and documents are evenly distributed
- assert.eq("test.foo", x.ns, "namespace mismatch");
- assert(x.sharded, "collection is not sharded");
- assert.eq(numDocs, x.count, "total count");
- assert.eq(numDocs / 2, x.shards[s.shard0.shardName].count, "count on " + s.shard0.shardName);
- assert.eq(numDocs / 2, x.shards[s.shard1.shardName].count, "count on " + s.shard1.shardName);
- assert(x.totalIndexSize > 0);
-
- // insert one doc into a non-sharded collection
- dbForTest.bar.insert({x: 1});
- var x = dbForTest.bar.stats();
- assert.eq(1, x.count, "XXX1");
- assert.eq("test.bar", x.ns, "XXX2");
- assert(!x.sharded, "XXX3: " + tojson(x));
-
- // fork shell and start querying the data
- var start = new Date();
-
- var whereKillSleepTime = 1000;
- var parallelCommand = "db.foo.find(function() { " + " sleep(" + whereKillSleepTime + "); " +
- " return false; " + "}).itcount(); ";
-
- // fork a parallel shell, but do not wait for it to start
- print("about to fork new shell at: " + Date());
- var awaitShell = startParallelShell(parallelCommand, s.s.port);
- print("done forking shell at: " + Date());
-
- // Get all current $where operations
- function getInProgWhereOps() {
- let inProgressOps = admin.aggregate([{$currentOp: {'allUsers': true}}]);
- let inProgressStr = '';
-
- // Find all the where queries
- var myProcs = [];
- while (inProgressOps.hasNext()) {
- let op = inProgressOps.next();
- inProgressStr += tojson(op);
- if (op.command && op.command.filter && op.command.filter.$where) {
- myProcs.push(op);
- }
- }
-
- if (myProcs.length == 0) {
- print('No $where operations found: ' + inProgressStr);
- } else {
- print('Found ' + myProcs.length + ' $where operations: ' + tojson(myProcs));
+'use strict';
+
+var s = new ShardingTest({shards: 2, mongos: 1});
+var dbForTest = s.getDB("test");
+var admin = s.getDB("admin");
+dbForTest.foo.drop();
+
+var numDocs = 10000;
+
+// shard test.foo and add a split point
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+s.adminCommand({split: "test.foo", middle: {_id: numDocs / 2}});
+
+// move a chunk range to the non-primary shard
+s.adminCommand({
+ moveChunk: "test.foo",
+ find: {_id: 3},
+ to: s.getNonPrimaries("test")[0],
+ _waitForDelete: true
+});
+
+// restart balancer
+s.startBalancer();
+
+// insert 10k small documents into the sharded collection
+var bulk = dbForTest.foo.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
+
+var x = dbForTest.foo.stats();
+
+// verify the colleciton has been sharded and documents are evenly distributed
+assert.eq("test.foo", x.ns, "namespace mismatch");
+assert(x.sharded, "collection is not sharded");
+assert.eq(numDocs, x.count, "total count");
+assert.eq(numDocs / 2, x.shards[s.shard0.shardName].count, "count on " + s.shard0.shardName);
+assert.eq(numDocs / 2, x.shards[s.shard1.shardName].count, "count on " + s.shard1.shardName);
+assert(x.totalIndexSize > 0);
+
+// insert one doc into a non-sharded collection
+dbForTest.bar.insert({x: 1});
+var x = dbForTest.bar.stats();
+assert.eq(1, x.count, "XXX1");
+assert.eq("test.bar", x.ns, "XXX2");
+assert(!x.sharded, "XXX3: " + tojson(x));
+
+// fork shell and start querying the data
+var start = new Date();
+
+var whereKillSleepTime = 1000;
+var parallelCommand = "db.foo.find(function() { " +
+ " sleep(" + whereKillSleepTime + "); " +
+ " return false; " +
+ "}).itcount(); ";
+
+// fork a parallel shell, but do not wait for it to start
+print("about to fork new shell at: " + Date());
+var awaitShell = startParallelShell(parallelCommand, s.s.port);
+print("done forking shell at: " + Date());
+
+// Get all current $where operations
+function getInProgWhereOps() {
+ let inProgressOps = admin.aggregate([{$currentOp: {'allUsers': true}}]);
+ let inProgressStr = '';
+
+ // Find all the where queries
+ var myProcs = [];
+ while (inProgressOps.hasNext()) {
+ let op = inProgressOps.next();
+ inProgressStr += tojson(op);
+ if (op.command && op.command.filter && op.command.filter.$where) {
+ myProcs.push(op);
}
-
- return myProcs;
}
- var curOpState = 0; // 0 = not found, 1 = killed
- var killTime = null;
- var mine;
-
- assert.soon(function() {
- // Get all the current operations
- mine = getInProgWhereOps();
-
- // Wait for the queries to start (one per shard, so 2 total)
- if (curOpState == 0 && mine.length == 2) {
- // queries started
- curOpState = 1;
- // kill all $where
- mine.forEach(function(z) {
- printjson(dbForTest.getSisterDB("admin").killOp(z.opid));
- });
- killTime = new Date();
- }
- // Wait for killed queries to end
- else if (curOpState == 1 && mine.length == 0) {
- // Queries ended
- curOpState = 2;
- return true;
- }
-
- }, "Couldn't kill the $where operations.", 2 * 60 * 1000);
+ if (myProcs.length == 0) {
+ print('No $where operations found: ' + inProgressStr);
+ } else {
+ print('Found ' + myProcs.length + ' $where operations: ' + tojson(myProcs));
+ }
- print("after loop: " + Date());
- assert(killTime, "timed out waiting too kill last mine:" + tojson(mine));
+ return myProcs;
+}
+
+var curOpState = 0; // 0 = not found, 1 = killed
+var killTime = null;
+var mine;
+
+assert.soon(function() {
+ // Get all the current operations
+ mine = getInProgWhereOps();
+
+ // Wait for the queries to start (one per shard, so 2 total)
+ if (curOpState == 0 && mine.length == 2) {
+ // queries started
+ curOpState = 1;
+ // kill all $where
+ mine.forEach(function(z) {
+ printjson(dbForTest.getSisterDB("admin").killOp(z.opid));
+ });
+ killTime = new Date();
+ }
+ // Wait for killed queries to end
+ else if (curOpState == 1 && mine.length == 0) {
+ // Queries ended
+ curOpState = 2;
+ return true;
+ }
+}, "Couldn't kill the $where operations.", 2 * 60 * 1000);
- assert.eq(2, curOpState, "failed killing");
+print("after loop: " + Date());
+assert(killTime, "timed out waiting too kill last mine:" + tojson(mine));
- killTime = new Date().getTime() - killTime.getTime();
- print("killTime: " + killTime);
- print("time if run full: " + (numDocs * whereKillSleepTime));
- assert.gt(whereKillSleepTime * numDocs / 20, killTime, "took too long to kill");
+assert.eq(2, curOpState, "failed killing");
- // wait for the parallel shell we spawned to complete
- var exitCode = awaitShell({checkExitSuccess: false});
- assert.neq(
- 0, exitCode, "expected shell to exit abnormally due to JS execution being terminated");
+killTime = new Date().getTime() - killTime.getTime();
+print("killTime: " + killTime);
+print("time if run full: " + (numDocs * whereKillSleepTime));
+assert.gt(whereKillSleepTime * numDocs / 20, killTime, "took too long to kill");
- var end = new Date();
- print("elapsed: " + (end.getTime() - start.getTime()));
+// wait for the parallel shell we spawned to complete
+var exitCode = awaitShell({checkExitSuccess: false});
+assert.neq(0, exitCode, "expected shell to exit abnormally due to JS execution being terminated");
- // test fsync command on non-admin db
- x = dbForTest.runCommand("fsync");
- assert(!x.ok, "fsync on non-admin namespace should fail : " + tojson(x));
- assert(x.code == 13, "fsync on non-admin succeeded, but should have failed: " + tojson(x));
+var end = new Date();
+print("elapsed: " + (end.getTime() - start.getTime()));
- // test fsync on admin db
- x = dbForTest._adminCommand("fsync");
- assert(x.ok == 1, "fsync failed: " + tojson(x));
- if (x.all[s.shard0.shardName] > 0) {
- assert(x.numFiles > 0, "fsync failed: " + tojson(x));
- }
+// test fsync command on non-admin db
+x = dbForTest.runCommand("fsync");
+assert(!x.ok, "fsync on non-admin namespace should fail : " + tojson(x));
+assert(x.code == 13, "fsync on non-admin succeeded, but should have failed: " + tojson(x));
- // test fsync+lock on admin db
- x = dbForTest._adminCommand({"fsync": 1, lock: true});
- assert(!x.ok, "lock should fail: " + tojson(x));
+// test fsync on admin db
+x = dbForTest._adminCommand("fsync");
+assert(x.ok == 1, "fsync failed: " + tojson(x));
+if (x.all[s.shard0.shardName] > 0) {
+ assert(x.numFiles > 0, "fsync failed: " + tojson(x));
+}
- s.stop();
+// test fsync+lock on admin db
+x = dbForTest._adminCommand({"fsync": 1, lock: true});
+assert(!x.ok, "lock should fail: " + tojson(x));
+s.stop();
})();
diff --git a/jstests/sharding/find_and_modify_after_multi_write.js b/jstests/sharding/find_and_modify_after_multi_write.js
index 749f999c54c..81c6db44a3f 100644
--- a/jstests/sharding/find_and_modify_after_multi_write.js
+++ b/jstests/sharding/find_and_modify_after_multi_write.js
@@ -1,73 +1,72 @@
(function() {
- "use strict";
+"use strict";
- /**
- * Test that a targetted findAndModify will be properly routed after executing a write that
- * does not perform any shard version checks.
- */
- var runTest = function(writeFunc) {
- var st = new ShardingTest({shards: 2, mongos: 2});
+/**
+ * Test that a targetted findAndModify will be properly routed after executing a write that
+ * does not perform any shard version checks.
+ */
+var runTest = function(writeFunc) {
+ var st = new ShardingTest({shards: 2, mongos: 2});
- var testDB = st.s.getDB('test');
+ var testDB = st.s.getDB('test');
- assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
+ assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', st.shard0.shardName);
- assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+ assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- var testDB2 = st.s1.getDB('test');
- testDB2.user.insert({x: 123456});
+ var testDB2 = st.s1.getDB('test');
+ testDB2.user.insert({x: 123456});
- // Move chunk to bump version on a different mongos.
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+ // Move chunk to bump version on a different mongos.
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- // Issue a targetted findAndModify and check that it was upserted to the right shard.
- assert.commandWorked(testDB2.runCommand(
- {findAndModify: 'user', query: {x: 100}, update: {$set: {y: 1}}, upsert: true}));
+ // Issue a targetted findAndModify and check that it was upserted to the right shard.
+ assert.commandWorked(testDB2.runCommand(
+ {findAndModify: 'user', query: {x: 100}, update: {$set: {y: 1}}, upsert: true}));
- assert.neq(null, st.rs0.getPrimary().getDB('test').user.findOne({x: 100}));
- assert.eq(null, st.rs1.getPrimary().getDB('test').user.findOne({x: 100}));
+ assert.neq(null, st.rs0.getPrimary().getDB('test').user.findOne({x: 100}));
+ assert.eq(null, st.rs1.getPrimary().getDB('test').user.findOne({x: 100}));
- // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
- // incremented to 3
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+ // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
+ // incremented to 3
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- assert.commandWorked(testDB2.runCommand(
- {findAndModify: 'user', query: {x: 200}, update: {$set: {y: 1}}, upsert: true}));
+ assert.commandWorked(testDB2.runCommand(
+ {findAndModify: 'user', query: {x: 200}, update: {$set: {y: 1}}, upsert: true}));
- assert.eq(null, st.rs0.getPrimary().getDB('test').user.findOne({x: 200}));
- assert.neq(null, st.rs1.getPrimary().getDB('test').user.findOne({x: 200}));
+ assert.eq(null, st.rs0.getPrimary().getDB('test').user.findOne({x: 200}));
+ assert.neq(null, st.rs1.getPrimary().getDB('test').user.findOne({x: 200}));
- // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
- // incremented to 4
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+ // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
+ // incremented to 4
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- // Ensure that write commands with multi version do not reset the connection shard version
- // to
- // ignored.
- writeFunc(testDB2);
+ // Ensure that write commands with multi version do not reset the connection shard version
+ // to
+ // ignored.
+ writeFunc(testDB2);
- assert.commandWorked(testDB2.runCommand(
- {findAndModify: 'user', query: {x: 300}, update: {$set: {y: 1}}, upsert: true}));
+ assert.commandWorked(testDB2.runCommand(
+ {findAndModify: 'user', query: {x: 300}, update: {$set: {y: 1}}, upsert: true}));
- assert.neq(null, st.rs0.getPrimary().getDB('test').user.findOne({x: 300}));
- assert.eq(null, st.rs1.getPrimary().getDB('test').user.findOne({x: 300}));
+ assert.neq(null, st.rs0.getPrimary().getDB('test').user.findOne({x: 300}));
+ assert.eq(null, st.rs1.getPrimary().getDB('test').user.findOne({x: 300}));
- st.stop();
- };
+ st.stop();
+};
- runTest(function(db) {
- db.user.update({}, {$inc: {y: 987654}}, false, true);
- });
-
- runTest(function(db) {
- db.user.remove({y: 'noMatch'}, false);
- });
+runTest(function(db) {
+ db.user.update({}, {$inc: {y: 987654}}, false, true);
+});
+runTest(function(db) {
+ db.user.remove({y: 'noMatch'}, false);
+});
})();
diff --git a/jstests/sharding/find_collname_uuid_test.js b/jstests/sharding/find_collname_uuid_test.js
index 59f2e4e7674..846a24b0e21 100644
--- a/jstests/sharding/find_collname_uuid_test.js
+++ b/jstests/sharding/find_collname_uuid_test.js
@@ -2,20 +2,20 @@
* Test ClusterFindCmd with UUID for collection name fails (but does not crash)
*/
(function() {
- "use strict";
+"use strict";
- var cmdRes;
- var cursorId;
+var cmdRes;
+var cursorId;
- var st = new ShardingTest({shards: 2});
- st.stopBalancer();
+var st = new ShardingTest({shards: 2});
+st.stopBalancer();
- var db = st.s.getDB("test");
+var db = st.s.getDB("test");
- assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
+assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
- cmdRes = db.adminCommand({find: UUID()});
- assert.commandFailed(cmdRes);
+cmdRes = db.adminCommand({find: UUID()});
+assert.commandFailed(cmdRes);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/find_getmore_cmd.js b/jstests/sharding/find_getmore_cmd.js
index e688fc0f9ca..d711c7cda53 100644
--- a/jstests/sharding/find_getmore_cmd.js
+++ b/jstests/sharding/find_getmore_cmd.js
@@ -2,163 +2,163 @@
* Test issuing raw find and getMore commands to mongos using db.runCommand().
*/
(function() {
- "use strict";
-
- var cmdRes;
- var cursorId;
-
- var st = new ShardingTest({shards: 2});
- st.stopBalancer();
-
- // Set up a collection sharded by "_id" with one chunk on each of the two shards.
- var db = st.s.getDB("test");
- var coll = db.getCollection("find_getmore_cmd");
-
- coll.drop();
- assert.writeOK(coll.insert({_id: -9, a: 4, b: "foo foo"}));
- assert.writeOK(coll.insert({_id: -5, a: 8}));
- assert.writeOK(coll.insert({_id: -1, a: 10, b: "foo"}));
- assert.writeOK(coll.insert({_id: 1, a: 5}));
- assert.writeOK(coll.insert({_id: 5, a: 20, b: "foo foo foo"}));
- assert.writeOK(coll.insert({_id: 9, a: 3}));
-
- assert.commandWorked(coll.ensureIndex({b: "text"}));
-
- assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
- st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
- db.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}});
- assert.commandWorked(db.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(
- db.adminCommand({moveChunk: coll.getFullName(), find: {_id: 1}, to: st.shard1.shardName}));
-
- // Find with no options.
- cmdRes = db.runCommand({find: coll.getName()});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 6);
-
- // Find with batchSize greater than the number of docs residing on each shard. This means that a
- // getMore is required between mongos and the shell, but no getMores are issued between mongos
- // and mongod.
- cmdRes = db.runCommand({find: coll.getName(), batchSize: 4});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 4);
- cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: coll.getName()});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 2);
-
- // Find with batchSize less than the number of docs residing on each shard. This time getMores
- // will be issued between mongos and mongod.
- cmdRes = db.runCommand({find: coll.getName(), batchSize: 2});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 2);
- cursorId = cmdRes.cursor.id;
- cmdRes = db.runCommand({getMore: cursorId, collection: coll.getName(), batchSize: 2});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, cursorId);
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 2);
- cmdRes = db.runCommand({getMore: cursorId, collection: coll.getName()});
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 2);
-
- // Combine skip, limit, and sort.
- cmdRes = db.runCommand({find: coll.getName(), skip: 4, limit: 1, sort: {_id: -1}});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 1);
- assert.eq(cmdRes.cursor.firstBatch[0], {_id: -5, a: 8});
-
- // Find where adding limit/ntoreturn and skip overflows.
- var largeInt = new NumberLong('9223372036854775807');
- cmdRes = db.runCommand({find: coll.getName(), skip: largeInt, limit: largeInt});
- assert.commandFailed(cmdRes);
- cmdRes = db.runCommand({find: coll.getName(), skip: largeInt, ntoreturn: largeInt});
- assert.commandFailed(cmdRes);
- cmdRes = db.runCommand(
- {find: coll.getName(), skip: largeInt, ntoreturn: largeInt, singleBatch: true});
- assert.commandFailed(cmdRes);
-
- // A predicate with $where.
- cmdRes = db.runCommand({find: coll.getName(), filter: {$where: "this._id == 5"}});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 1);
- assert.eq(cmdRes.cursor.firstBatch[0], {_id: 5, a: 20, b: "foo foo foo"});
-
- // Tailable option should result in a failure because the collection is not capped.
- cmdRes = db.runCommand({find: coll.getName(), tailable: true});
- assert.commandFailed(cmdRes);
-
- // $natural sort.
- cmdRes = db.runCommand({find: coll.getName(), sort: {$natural: 1}});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 6);
-
- // Should be able to sort despite projecting out the sort key.
- cmdRes = db.runCommand({find: coll.getName(), sort: {a: 1}, projection: {_id: 1}});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 6);
- assert.eq(cmdRes.cursor.firstBatch[0], {_id: 9});
- assert.eq(cmdRes.cursor.firstBatch[1], {_id: -9});
- assert.eq(cmdRes.cursor.firstBatch[2], {_id: 1});
- assert.eq(cmdRes.cursor.firstBatch[3], {_id: -5});
- assert.eq(cmdRes.cursor.firstBatch[4], {_id: -1});
- assert.eq(cmdRes.cursor.firstBatch[5], {_id: 5});
-
- // Ensure textScore meta-sort works in mongos.
- cmdRes = db.runCommand({
- find: coll.getName(),
- filter: {$text: {$search: "foo"}},
- sort: {score: {$meta: "textScore"}},
- projection: {score: {$meta: "textScore"}}
- });
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 3);
- assert.eq(cmdRes.cursor.firstBatch[0]["_id"], 5);
- assert.eq(cmdRes.cursor.firstBatch[1]["_id"], -9);
- assert.eq(cmdRes.cursor.firstBatch[2]["_id"], -1);
-
- // User projection on $sortKey is illegal.
- cmdRes = db.runCommand({find: coll.getName(), projection: {$sortKey: 1}, sort: {_id: 1}});
- assert.commandFailed(cmdRes);
- cmdRes = db.runCommand(
- {find: coll.getName(), projection: {$sortKey: {$meta: 'sortKey'}}, sort: {_id: 1}});
- assert.commandFailed(cmdRes);
-
- // User should be able to issue a sortKey meta-projection, as long as it's not on the reserved
- // $sortKey field.
- cmdRes = db.runCommand({
- find: coll.getName(),
- projection: {_id: 0, a: 0, b: 0, key: {$meta: 'sortKey'}},
- sort: {_id: 1}
- });
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 6);
- assert.eq(cmdRes.cursor.firstBatch[0], {key: {"": -9}});
- assert.eq(cmdRes.cursor.firstBatch[1], {key: {"": -5}});
- assert.eq(cmdRes.cursor.firstBatch[2], {key: {"": -1}});
- assert.eq(cmdRes.cursor.firstBatch[3], {key: {"": 1}});
- assert.eq(cmdRes.cursor.firstBatch[4], {key: {"": 5}});
- assert.eq(cmdRes.cursor.firstBatch[5], {key: {"": 9}});
-
- st.stop();
+"use strict";
+
+var cmdRes;
+var cursorId;
+
+var st = new ShardingTest({shards: 2});
+st.stopBalancer();
+
+// Set up a collection sharded by "_id" with one chunk on each of the two shards.
+var db = st.s.getDB("test");
+var coll = db.getCollection("find_getmore_cmd");
+
+coll.drop();
+assert.writeOK(coll.insert({_id: -9, a: 4, b: "foo foo"}));
+assert.writeOK(coll.insert({_id: -5, a: 8}));
+assert.writeOK(coll.insert({_id: -1, a: 10, b: "foo"}));
+assert.writeOK(coll.insert({_id: 1, a: 5}));
+assert.writeOK(coll.insert({_id: 5, a: 20, b: "foo foo foo"}));
+assert.writeOK(coll.insert({_id: 9, a: 3}));
+
+assert.commandWorked(coll.ensureIndex({b: "text"}));
+
+assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
+st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
+db.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}});
+assert.commandWorked(db.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(
+ db.adminCommand({moveChunk: coll.getFullName(), find: {_id: 1}, to: st.shard1.shardName}));
+
+// Find with no options.
+cmdRes = db.runCommand({find: coll.getName()});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 6);
+
+// Find with batchSize greater than the number of docs residing on each shard. This means that a
+// getMore is required between mongos and the shell, but no getMores are issued between mongos
+// and mongod.
+cmdRes = db.runCommand({find: coll.getName(), batchSize: 4});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 4);
+cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: coll.getName()});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.nextBatch.length, 2);
+
+// Find with batchSize less than the number of docs residing on each shard. This time getMores
+// will be issued between mongos and mongod.
+cmdRes = db.runCommand({find: coll.getName(), batchSize: 2});
+assert.commandWorked(cmdRes);
+assert.gt(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 2);
+cursorId = cmdRes.cursor.id;
+cmdRes = db.runCommand({getMore: cursorId, collection: coll.getName(), batchSize: 2});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, cursorId);
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.nextBatch.length, 2);
+cmdRes = db.runCommand({getMore: cursorId, collection: coll.getName()});
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.nextBatch.length, 2);
+
+// Combine skip, limit, and sort.
+cmdRes = db.runCommand({find: coll.getName(), skip: 4, limit: 1, sort: {_id: -1}});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 1);
+assert.eq(cmdRes.cursor.firstBatch[0], {_id: -5, a: 8});
+
+// Find where adding limit/ntoreturn and skip overflows.
+var largeInt = new NumberLong('9223372036854775807');
+cmdRes = db.runCommand({find: coll.getName(), skip: largeInt, limit: largeInt});
+assert.commandFailed(cmdRes);
+cmdRes = db.runCommand({find: coll.getName(), skip: largeInt, ntoreturn: largeInt});
+assert.commandFailed(cmdRes);
+cmdRes =
+ db.runCommand({find: coll.getName(), skip: largeInt, ntoreturn: largeInt, singleBatch: true});
+assert.commandFailed(cmdRes);
+
+// A predicate with $where.
+cmdRes = db.runCommand({find: coll.getName(), filter: {$where: "this._id == 5"}});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 1);
+assert.eq(cmdRes.cursor.firstBatch[0], {_id: 5, a: 20, b: "foo foo foo"});
+
+// Tailable option should result in a failure because the collection is not capped.
+cmdRes = db.runCommand({find: coll.getName(), tailable: true});
+assert.commandFailed(cmdRes);
+
+// $natural sort.
+cmdRes = db.runCommand({find: coll.getName(), sort: {$natural: 1}});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 6);
+
+// Should be able to sort despite projecting out the sort key.
+cmdRes = db.runCommand({find: coll.getName(), sort: {a: 1}, projection: {_id: 1}});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 6);
+assert.eq(cmdRes.cursor.firstBatch[0], {_id: 9});
+assert.eq(cmdRes.cursor.firstBatch[1], {_id: -9});
+assert.eq(cmdRes.cursor.firstBatch[2], {_id: 1});
+assert.eq(cmdRes.cursor.firstBatch[3], {_id: -5});
+assert.eq(cmdRes.cursor.firstBatch[4], {_id: -1});
+assert.eq(cmdRes.cursor.firstBatch[5], {_id: 5});
+
+// Ensure textScore meta-sort works in mongos.
+cmdRes = db.runCommand({
+ find: coll.getName(),
+ filter: {$text: {$search: "foo"}},
+ sort: {score: {$meta: "textScore"}},
+ projection: {score: {$meta: "textScore"}}
+});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 3);
+assert.eq(cmdRes.cursor.firstBatch[0]["_id"], 5);
+assert.eq(cmdRes.cursor.firstBatch[1]["_id"], -9);
+assert.eq(cmdRes.cursor.firstBatch[2]["_id"], -1);
+
+// User projection on $sortKey is illegal.
+cmdRes = db.runCommand({find: coll.getName(), projection: {$sortKey: 1}, sort: {_id: 1}});
+assert.commandFailed(cmdRes);
+cmdRes = db.runCommand(
+ {find: coll.getName(), projection: {$sortKey: {$meta: 'sortKey'}}, sort: {_id: 1}});
+assert.commandFailed(cmdRes);
+
+// User should be able to issue a sortKey meta-projection, as long as it's not on the reserved
+// $sortKey field.
+cmdRes = db.runCommand({
+ find: coll.getName(),
+ projection: {_id: 0, a: 0, b: 0, key: {$meta: 'sortKey'}},
+ sort: {_id: 1}
+});
+assert.commandWorked(cmdRes);
+assert.eq(cmdRes.cursor.id, NumberLong(0));
+assert.eq(cmdRes.cursor.ns, coll.getFullName());
+assert.eq(cmdRes.cursor.firstBatch.length, 6);
+assert.eq(cmdRes.cursor.firstBatch[0], {key: {"": -9}});
+assert.eq(cmdRes.cursor.firstBatch[1], {key: {"": -5}});
+assert.eq(cmdRes.cursor.firstBatch[2], {key: {"": -1}});
+assert.eq(cmdRes.cursor.firstBatch[3], {key: {"": 1}});
+assert.eq(cmdRes.cursor.firstBatch[4], {key: {"": 5}});
+assert.eq(cmdRes.cursor.firstBatch[5], {key: {"": 9}});
+
+st.stop();
})();
diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js
index 59d2745861f..001a9a386d7 100644
--- a/jstests/sharding/findandmodify1.js
+++ b/jstests/sharding/findandmodify1.js
@@ -1,80 +1,78 @@
(function() {
- 'use strict';
-
- var s = new ShardingTest({shards: 2});
-
- // Make sure that findAndModify with upsert against a non-existent database and collection will
- // implicitly create them both
- assert.eq(undefined,
- assert.commandWorked(s.s0.adminCommand({listDatabases: 1, nameOnly: 1}))
- .databases.find((dbInfo) => {
- return (dbInfo.name === 'NewUnshardedDB');
- }));
-
- var newlyCreatedDb = s.getDB('NewUnshardedDB');
- assert.eq(0, newlyCreatedDb.unsharded_coll.find({}).itcount());
- newlyCreatedDb.unsharded_coll.findAndModify(
- {query: {_id: 1}, update: {$set: {Value: 'Value'}}, upsert: true});
- assert.eq(1, newlyCreatedDb.unsharded_coll.find({}).itcount());
-
- // Tests with sharded database
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.sharded_coll", key: {_id: 1}}));
-
- var db = s.getDB('test');
-
- var numObjs = 20;
-
- // Pre-split the collection so to avoid interference from auto-split
- assert.commandWorked(
- s.s0.adminCommand({split: "test.sharded_coll", middle: {_id: numObjs / 2}}));
- assert.commandWorked(s.s0.adminCommand(
- {movechunk: "test.sharded_coll", find: {_id: numObjs / 2}, to: s.shard0.shardName}));
-
- var bulk = db.sharded_coll.initializeUnorderedBulkOp();
- for (var i = 0; i < numObjs; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
-
- // Put two docs in each chunk (avoid the split in 0, since there are no docs less than 0)
- for (var i = 2; i < numObjs; i += 2) {
- if (i == numObjs / 2)
- continue;
-
- assert.commandWorked(s.s0.adminCommand({split: "test.sharded_coll", middle: {_id: i}}));
- }
-
- s.printChunks();
- assert.eq(
- numObjs / 2, s.config.chunks.count({"ns": "test.sharded_coll"}), 'Split was incorrect');
- assert.eq(numObjs / 4,
- s.config.chunks.count({shard: s.shard0.shardName, "ns": "test.sharded_coll"}));
- assert.eq(numObjs / 4,
- s.config.chunks.count({shard: s.shard1.shardName, "ns": "test.sharded_coll"}));
-
- // update
- for (var i = 0; i < numObjs; i++) {
- assert.eq(db.sharded_coll.count({b: 1}), i, "2 A");
-
- var out = db.sharded_coll.findAndModify({query: {_id: i, b: null}, update: {$set: {b: 1}}});
- assert.eq(out._id, i, "2 E");
-
- assert.eq(db.sharded_coll.count({b: 1}), i + 1, "2 B");
- }
-
- // remove
- for (var i = 0; i < numObjs; i++) {
- assert.eq(db.sharded_coll.count(), numObjs - i, "3 A");
- assert.eq(db.sharded_coll.count({_id: i}), 1, "3 B");
-
- var out = db.sharded_coll.findAndModify({remove: true, query: {_id: i}});
-
- assert.eq(db.sharded_coll.count(), numObjs - i - 1, "3 C");
- assert.eq(db.sharded_coll.count({_id: i}), 0, "3 D");
- assert.eq(out._id, i, "3 E");
- }
-
- s.stop();
+'use strict';
+
+var s = new ShardingTest({shards: 2});
+
+// Make sure that findAndModify with upsert against a non-existent database and collection will
+// implicitly create them both
+assert.eq(undefined,
+ assert.commandWorked(s.s0.adminCommand({listDatabases: 1, nameOnly: 1}))
+ .databases.find((dbInfo) => {
+ return (dbInfo.name === 'NewUnshardedDB');
+ }));
+
+var newlyCreatedDb = s.getDB('NewUnshardedDB');
+assert.eq(0, newlyCreatedDb.unsharded_coll.find({}).itcount());
+newlyCreatedDb.unsharded_coll.findAndModify(
+ {query: {_id: 1}, update: {$set: {Value: 'Value'}}, upsert: true});
+assert.eq(1, newlyCreatedDb.unsharded_coll.find({}).itcount());
+
+// Tests with sharded database
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.sharded_coll", key: {_id: 1}}));
+
+var db = s.getDB('test');
+
+var numObjs = 20;
+
+// Pre-split the collection so to avoid interference from auto-split
+assert.commandWorked(s.s0.adminCommand({split: "test.sharded_coll", middle: {_id: numObjs / 2}}));
+assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.sharded_coll", find: {_id: numObjs / 2}, to: s.shard0.shardName}));
+
+var bulk = db.sharded_coll.initializeUnorderedBulkOp();
+for (var i = 0; i < numObjs; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
+
+// Put two docs in each chunk (avoid the split in 0, since there are no docs less than 0)
+for (var i = 2; i < numObjs; i += 2) {
+ if (i == numObjs / 2)
+ continue;
+
+ assert.commandWorked(s.s0.adminCommand({split: "test.sharded_coll", middle: {_id: i}}));
+}
+
+s.printChunks();
+assert.eq(numObjs / 2, s.config.chunks.count({"ns": "test.sharded_coll"}), 'Split was incorrect');
+assert.eq(numObjs / 4,
+ s.config.chunks.count({shard: s.shard0.shardName, "ns": "test.sharded_coll"}));
+assert.eq(numObjs / 4,
+ s.config.chunks.count({shard: s.shard1.shardName, "ns": "test.sharded_coll"}));
+
+// update
+for (var i = 0; i < numObjs; i++) {
+ assert.eq(db.sharded_coll.count({b: 1}), i, "2 A");
+
+ var out = db.sharded_coll.findAndModify({query: {_id: i, b: null}, update: {$set: {b: 1}}});
+ assert.eq(out._id, i, "2 E");
+
+ assert.eq(db.sharded_coll.count({b: 1}), i + 1, "2 B");
+}
+
+// remove
+for (var i = 0; i < numObjs; i++) {
+ assert.eq(db.sharded_coll.count(), numObjs - i, "3 A");
+ assert.eq(db.sharded_coll.count({_id: i}), 1, "3 B");
+
+ var out = db.sharded_coll.findAndModify({remove: true, query: {_id: i}});
+
+ assert.eq(db.sharded_coll.count(), numObjs - i - 1, "3 C");
+ assert.eq(db.sharded_coll.count({_id: i}), 0, "3 D");
+ assert.eq(out._id, i, "3 E");
+}
+
+s.stop();
})();
diff --git a/jstests/sharding/findandmodify2.js b/jstests/sharding/findandmodify2.js
index a1aa58ffefb..17af6c1a685 100644
--- a/jstests/sharding/findandmodify2.js
+++ b/jstests/sharding/findandmodify2.js
@@ -1,124 +1,124 @@
(function() {
- 'use strict';
- load('jstests/sharding/autosplit_include.js');
-
- var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
-
- var db = s.getDB("test");
- s.ensurePrimaryShard('test', s.shard1.shardName);
- var primary = s.getPrimaryShard("test").getDB("test");
- var secondary = s.getOther(primary).getDB("test");
-
- var n = 100;
- var collection = "stuff";
- var minChunks = 2;
-
- var col_update = collection + '_col_update';
- var col_update_upsert = col_update + '_upsert';
- var col_fam = collection + '_col_fam';
- var col_fam_upsert = col_fam + '_upsert';
-
- var big = "x";
- for (var i = 0; i < 15; i++) {
- big += big;
+'use strict';
+load('jstests/sharding/autosplit_include.js');
+
+var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+
+var db = s.getDB("test");
+s.ensurePrimaryShard('test', s.shard1.shardName);
+var primary = s.getPrimaryShard("test").getDB("test");
+var secondary = s.getOther(primary).getDB("test");
+
+var n = 100;
+var collection = "stuff";
+var minChunks = 2;
+
+var col_update = collection + '_col_update';
+var col_update_upsert = col_update + '_upsert';
+var col_fam = collection + '_col_fam';
+var col_fam_upsert = col_fam + '_upsert';
+
+var big = "x";
+for (var i = 0; i < 15; i++) {
+ big += big;
+}
+
+// drop the collection
+db[col_update].drop();
+db[col_update_upsert].drop();
+db[col_fam].drop();
+db[col_fam_upsert].drop();
+
+// shard the collection on _id
+s.adminCommand({shardcollection: 'test.' + col_update, key: {_id: 1}});
+s.adminCommand({shardcollection: 'test.' + col_update_upsert, key: {_id: 1}});
+s.adminCommand({shardcollection: 'test.' + col_fam, key: {_id: 1}});
+s.adminCommand({shardcollection: 'test.' + col_fam_upsert, key: {_id: 1}});
+
+// update via findAndModify
+function via_fam() {
+ for (var i = 0; i < n; i++) {
+ db[col_fam].save({_id: i});
}
- // drop the collection
- db[col_update].drop();
- db[col_update_upsert].drop();
- db[col_fam].drop();
- db[col_fam_upsert].drop();
-
- // shard the collection on _id
- s.adminCommand({shardcollection: 'test.' + col_update, key: {_id: 1}});
- s.adminCommand({shardcollection: 'test.' + col_update_upsert, key: {_id: 1}});
- s.adminCommand({shardcollection: 'test.' + col_fam, key: {_id: 1}});
- s.adminCommand({shardcollection: 'test.' + col_fam_upsert, key: {_id: 1}});
-
- // update via findAndModify
- function via_fam() {
- for (var i = 0; i < n; i++) {
- db[col_fam].save({_id: i});
- }
-
- for (var i = 0; i < n; i++) {
- db[col_fam].findAndModify({query: {_id: i}, update: {$set: {big: big}}});
- }
+ for (var i = 0; i < n; i++) {
+ db[col_fam].findAndModify({query: {_id: i}, update: {$set: {big: big}}});
}
+}
- // upsert via findAndModify
- function via_fam_upsert() {
- for (var i = 0; i < n; i++) {
- db[col_fam_upsert].findAndModify(
- {query: {_id: i}, update: {$set: {big: big}}, upsert: true});
- }
+// upsert via findAndModify
+function via_fam_upsert() {
+ for (var i = 0; i < n; i++) {
+ db[col_fam_upsert].findAndModify(
+ {query: {_id: i}, update: {$set: {big: big}}, upsert: true});
}
+}
- // update data using basic update
- function via_update() {
- for (var i = 0; i < n; i++) {
- db[col_update].save({_id: i});
- }
+// update data using basic update
+function via_update() {
+ for (var i = 0; i < n; i++) {
+ db[col_update].save({_id: i});
+ }
- for (var i = 0; i < n; i++) {
- db[col_update].update({_id: i}, {$set: {big: big}});
- }
+ for (var i = 0; i < n; i++) {
+ db[col_update].update({_id: i}, {$set: {big: big}});
}
+}
- // upsert data using basic update
- function via_update_upsert() {
- for (var i = 0; i < n; i++) {
- db[col_update_upsert].update({_id: i}, {$set: {big: big}}, true);
- }
+// upsert data using basic update
+function via_update_upsert() {
+ for (var i = 0; i < n; i++) {
+ db[col_update_upsert].update({_id: i}, {$set: {big: big}}, true);
}
+}
- print("---------- Update via findAndModify...");
- via_fam();
- waitForOngoingChunkSplits(s);
+print("---------- Update via findAndModify...");
+via_fam();
+waitForOngoingChunkSplits(s);
- print("---------- Done.");
+print("---------- Done.");
- print("---------- Upsert via findAndModify...");
- via_fam_upsert();
- waitForOngoingChunkSplits(s);
+print("---------- Upsert via findAndModify...");
+via_fam_upsert();
+waitForOngoingChunkSplits(s);
- print("---------- Done.");
+print("---------- Done.");
- print("---------- Basic update...");
- via_update();
- waitForOngoingChunkSplits(s);
+print("---------- Basic update...");
+via_update();
+waitForOngoingChunkSplits(s);
- print("---------- Done.");
+print("---------- Done.");
- print("---------- Basic update with upsert...");
- via_update_upsert();
- waitForOngoingChunkSplits(s);
+print("---------- Basic update with upsert...");
+via_update_upsert();
+waitForOngoingChunkSplits(s);
- print("---------- Done.");
+print("---------- Done.");
- print("---------- Printing chunks:");
- s.printChunks();
+print("---------- Printing chunks:");
+s.printChunks();
- print("---------- Verifying that both codepaths resulted in splits...");
- assert.gte(s.config.chunks.count({"ns": "test." + col_fam}),
- minChunks,
- "findAndModify update code path didn't result in splits");
- assert.gte(s.config.chunks.count({"ns": "test." + col_fam_upsert}),
- minChunks,
- "findAndModify upsert code path didn't result in splits");
- assert.gte(s.config.chunks.count({"ns": "test." + col_update}),
- minChunks,
- "update code path didn't result in splits");
- assert.gte(s.config.chunks.count({"ns": "test." + col_update_upsert}),
- minChunks,
- "upsert code path didn't result in splits");
+print("---------- Verifying that both codepaths resulted in splits...");
+assert.gte(s.config.chunks.count({"ns": "test." + col_fam}),
+ minChunks,
+ "findAndModify update code path didn't result in splits");
+assert.gte(s.config.chunks.count({"ns": "test." + col_fam_upsert}),
+ minChunks,
+ "findAndModify upsert code path didn't result in splits");
+assert.gte(s.config.chunks.count({"ns": "test." + col_update}),
+ minChunks,
+ "update code path didn't result in splits");
+assert.gte(s.config.chunks.count({"ns": "test." + col_update_upsert}),
+ minChunks,
+ "upsert code path didn't result in splits");
- printjson(db[col_update].stats());
+printjson(db[col_update].stats());
- // ensure that all chunks are smaller than chunkSize
- // make sure not teensy
- // test update without upsert and with upsert
+// ensure that all chunks are smaller than chunkSize
+// make sure not teensy
+// test update without upsert and with upsert
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/geo_near_random1.js b/jstests/sharding/geo_near_random1.js
index 260c37fea0d..0548e74f027 100644
--- a/jstests/sharding/geo_near_random1.js
+++ b/jstests/sharding/geo_near_random1.js
@@ -5,47 +5,47 @@
load("jstests/libs/geo_near_random.js");
(function() {
- 'use strict';
-
- var testName = "geo_near_random1";
- var s = new ShardingTest({shards: 3});
-
- var db = s.getDB("test");
-
- var test = new GeoNearRandomTest(testName, db);
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: 'test'}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: ('test.' + testName), key: {_id: 1}}));
-
- test.insertPts(50);
- var shardList = [s.shard0.shardName, s.shard1.shardName, s.shard2.shardName];
- for (var i = (test.nPts / 10); i < test.nPts; i += (test.nPts / 10)) {
- assert.commandWorked(s.s0.adminCommand({split: ('test.' + testName), middle: {_id: i}}));
- try {
- assert.commandWorked(s.s0.adminCommand({
- moveChunk: ('test.' + testName),
- find: {_id: i - 1},
- to: (shardList[i % 3]),
- _waitForDelete: true
- }));
- } catch (e) {
- // ignore this error
- if (!e.message.match(/that chunk is already on that shard/)) {
- throw e;
- }
+'use strict';
+
+var testName = "geo_near_random1";
+var s = new ShardingTest({shards: 3});
+
+var db = s.getDB("test");
+
+var test = new GeoNearRandomTest(testName, db);
+
+assert.commandWorked(s.s0.adminCommand({enablesharding: 'test'}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: ('test.' + testName), key: {_id: 1}}));
+
+test.insertPts(50);
+var shardList = [s.shard0.shardName, s.shard1.shardName, s.shard2.shardName];
+for (var i = (test.nPts / 10); i < test.nPts; i += (test.nPts / 10)) {
+ assert.commandWorked(s.s0.adminCommand({split: ('test.' + testName), middle: {_id: i}}));
+ try {
+ assert.commandWorked(s.s0.adminCommand({
+ moveChunk: ('test.' + testName),
+ find: {_id: i - 1},
+ to: (shardList[i % 3]),
+ _waitForDelete: true
+ }));
+ } catch (e) {
+ // ignore this error
+ if (!e.message.match(/that chunk is already on that shard/)) {
+ throw e;
}
}
+}
- // Turn balancer back on, for actual tests
- // s.startBalancer(); // SERVER-13365
+// Turn balancer back on, for actual tests
+// s.startBalancer(); // SERVER-13365
- var opts = {};
- test.testPt([0, 0], opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
+var opts = {};
+test.testPt([0, 0], opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/geo_near_random2.js b/jstests/sharding/geo_near_random2.js
index 0b874e5aafe..43b89b77392 100644
--- a/jstests/sharding/geo_near_random2.js
+++ b/jstests/sharding/geo_near_random2.js
@@ -5,54 +5,54 @@
load("jstests/libs/geo_near_random.js");
(function() {
- 'use strict';
-
- var testName = "geo_near_random2";
- var s = new ShardingTest({shards: 3});
-
- var db = s.getDB("test");
-
- var test = new GeoNearRandomTest(testName, db);
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: 'test'}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: ('test.' + testName), key: {_id: 1}}));
-
- test.insertPts(5000);
- var shardList = [s.shard0.shardName, s.shard1.shardName, s.shard2.shardName];
- for (var i = (test.nPts / 10); i < test.nPts; i += (test.nPts / 10)) {
- assert.commandWorked(s.s0.adminCommand({split: ('test.' + testName), middle: {_id: i}}));
- try {
- assert.commandWorked(s.s0.adminCommand({
- moveChunk: ('test.' + testName),
- find: {_id: i - 1},
- to: shardList[i % 3],
- _waitForDelete: true
- }));
- } catch (e) {
- // ignore this error
- if (!e.message.match(/that chunk is already on that shard/)) {
- throw e;
- }
+'use strict';
+
+var testName = "geo_near_random2";
+var s = new ShardingTest({shards: 3});
+
+var db = s.getDB("test");
+
+var test = new GeoNearRandomTest(testName, db);
+
+assert.commandWorked(s.s0.adminCommand({enablesharding: 'test'}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: ('test.' + testName), key: {_id: 1}}));
+
+test.insertPts(5000);
+var shardList = [s.shard0.shardName, s.shard1.shardName, s.shard2.shardName];
+for (var i = (test.nPts / 10); i < test.nPts; i += (test.nPts / 10)) {
+ assert.commandWorked(s.s0.adminCommand({split: ('test.' + testName), middle: {_id: i}}));
+ try {
+ assert.commandWorked(s.s0.adminCommand({
+ moveChunk: ('test.' + testName),
+ find: {_id: i - 1},
+ to: shardList[i % 3],
+ _waitForDelete: true
+ }));
+ } catch (e) {
+ // ignore this error
+ if (!e.message.match(/that chunk is already on that shard/)) {
+ throw e;
}
}
-
- // Turn balancer back on, for actual tests
- // s.startBalancer(); // SERVER-13365
-
- var opts = {sphere: 0, nToTest: test.nPts * 0.01};
- test.testPt([0, 0], opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
-
- opts.sphere = 1;
- test.testPt([0, 0], opts);
- test.testPt(test.mkPt(0.8), opts);
- test.testPt(test.mkPt(0.8), opts);
- test.testPt(test.mkPt(0.8), opts);
- test.testPt(test.mkPt(0.8), opts);
-
- s.stop();
+}
+
+// Turn balancer back on, for actual tests
+// s.startBalancer(); // SERVER-13365
+
+var opts = {sphere: 0, nToTest: test.nPts * 0.01};
+test.testPt([0, 0], opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+
+opts.sphere = 1;
+test.testPt([0, 0], opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+
+s.stop();
})();
diff --git a/jstests/sharding/geo_near_sharded.js b/jstests/sharding/geo_near_sharded.js
index 361468cec18..714876e7fda 100644
--- a/jstests/sharding/geo_near_sharded.js
+++ b/jstests/sharding/geo_near_sharded.js
@@ -1,67 +1,67 @@
// SERVER-7906
(function() {
- 'use strict';
+'use strict';
- var coll = 'points';
+var coll = 'points';
- function test(st, db, sharded, indexType) {
- printjson(db);
+function test(st, db, sharded, indexType) {
+ printjson(db);
- if (sharded) {
- var shards = [st.shard0, st.shard1, st.shard2];
+ if (sharded) {
+ var shards = [st.shard0, st.shard1, st.shard2];
+ assert.commandWorked(
+ st.s0.adminCommand({shardCollection: db[coll].getFullName(), key: {rand: 1}}));
+ for (var i = 1; i < 10; i++) {
+ // split at 0.1, 0.2, ... 0.9
assert.commandWorked(
- st.s0.adminCommand({shardCollection: db[coll].getFullName(), key: {rand: 1}}));
- for (var i = 1; i < 10; i++) {
- // split at 0.1, 0.2, ... 0.9
- assert.commandWorked(
- st.s0.adminCommand({split: db[coll].getFullName(), middle: {rand: i / 10}}));
- assert.commandWorked(st.s0.adminCommand({
- moveChunk: db[coll].getFullName(),
- find: {rand: i / 10},
- to: shards[i % shards.length].shardName
- }));
- }
-
- var config = db.getSiblingDB("config");
- assert.eq(config.chunks.count({'ns': db[coll].getFullName()}), 10);
+ st.s0.adminCommand({split: db[coll].getFullName(), middle: {rand: i / 10}}));
+ assert.commandWorked(st.s0.adminCommand({
+ moveChunk: db[coll].getFullName(),
+ find: {rand: i / 10},
+ to: shards[i % shards.length].shardName
+ }));
}
- Random.setRandomSeed();
-
- var bulk = db[coll].initializeUnorderedBulkOp();
- var numPts = 10 * 1000;
- for (var i = 0; i < numPts; i++) {
- var lat = 90 - Random.rand() * 180;
- var lng = 180 - Random.rand() * 360;
- bulk.insert({rand: Math.random(), loc: [lng, lat]});
- }
- assert.writeOK(bulk.execute());
- assert.eq(db[coll].count(), numPts);
+ var config = db.getSiblingDB("config");
+ assert.eq(config.chunks.count({'ns': db[coll].getFullName()}), 10);
+ }
- assert.commandWorked(db[coll].ensureIndex({loc: indexType}));
+ Random.setRandomSeed();
- let res = assert.commandWorked(db.runCommand({
- aggregate: coll,
- cursor: {},
- pipeline: [{
- $geoNear: {
- near: [0, 0],
- spherical: true,
- includeLocs: "match",
- distanceField: "dist",
- }
- }]
- }),
- tojson({sharded: sharded, indexType: indexType}));
- assert.gt(res.cursor.firstBatch.length, 0, tojson(res));
+ var bulk = db[coll].initializeUnorderedBulkOp();
+ var numPts = 10 * 1000;
+ for (var i = 0; i < numPts; i++) {
+ var lat = 90 - Random.rand() * 180;
+ var lng = 180 - Random.rand() * 360;
+ bulk.insert({rand: Math.random(), loc: [lng, lat]});
}
+ assert.writeOK(bulk.execute());
+ assert.eq(db[coll].count(), numPts);
+
+ assert.commandWorked(db[coll].ensureIndex({loc: indexType}));
+
+ let res = assert.commandWorked(db.runCommand({
+ aggregate: coll,
+ cursor: {},
+ pipeline: [{
+ $geoNear: {
+ near: [0, 0],
+ spherical: true,
+ includeLocs: "match",
+ distanceField: "dist",
+ }
+ }]
+ }),
+ tojson({sharded: sharded, indexType: indexType}));
+ assert.gt(res.cursor.firstBatch.length, 0, tojson(res));
+}
- // TODO: SERVER-33954 Remove shardAsReplicaSet: false
- var st = new ShardingTest({shards: 3, mongos: 1, other: {shardAsReplicaSet: false}});
- assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
+// TODO: SERVER-33954 Remove shardAsReplicaSet: false
+var st = new ShardingTest({shards: 3, mongos: 1, other: {shardAsReplicaSet: false}});
+assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
- test(st, st.getDB('test'), true, '2dsphere');
- st.stop();
+test(st, st.getDB('test'), true, '2dsphere');
+st.stop();
})();
diff --git a/jstests/sharding/geo_near_sort.js b/jstests/sharding/geo_near_sort.js
index fa839a78551..e2f0292904e 100644
--- a/jstests/sharding/geo_near_sort.js
+++ b/jstests/sharding/geo_near_sort.js
@@ -1,69 +1,77 @@
// Tests that the sort specification is obeyed when the query contains $near/$nearSphere.
(function() {
- 'use strict';
+'use strict';
- const st = new ShardingTest({shards: 2});
- const db = st.getDB("test");
- const coll = db.geo_near_sort;
- const caseInsensitive = {locale: "en_US", strength: 2};
+const st = new ShardingTest({shards: 2});
+const db = st.getDB("test");
+const coll = db.geo_near_sort;
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
- assert.commandWorked(st.s0.adminCommand({enableSharding: db.getName()}));
- st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: db.getName()}));
+st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
- // Split the data into 2 chunks and move the chunk with _id > 0 to shard 1.
- assert.commandWorked(st.s0.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(st.s0.adminCommand(
- {movechunk: coll.getFullName(), find: {_id: 1}, to: st.shard1.shardName}));
+// Split the data into 2 chunks and move the chunk with _id > 0 to shard 1.
+assert.commandWorked(st.s0.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(
+ st.s0.adminCommand({movechunk: coll.getFullName(), find: {_id: 1}, to: st.shard1.shardName}));
- // Insert some documents. The sort order by distance from the origin is [-2, 1, -1, 2] (under 2d
- // or 2dsphere geometry). The sort order by {a: 1} under the case-insensitive collation is [2,
- // -1, 1, -2]. The sort order by {b: 1} is [2. -1, 1, -2].
- const docMinus2 = {_id: -2, geo: [0, 0], a: "BB", b: 3};
- const docMinus1 = {_id: -1, geo: [0, 2], a: "aB", b: 1};
- const doc1 = {_id: 1, geo: [0, 1], a: "Ba", b: 2};
- const doc2 = {_id: 2, geo: [0, 3], a: "aa", b: 0};
- assert.writeOK(coll.insert(docMinus2));
- assert.writeOK(coll.insert(docMinus1));
- assert.writeOK(coll.insert(doc1));
- assert.writeOK(coll.insert(doc2));
+// Insert some documents. The sort order by distance from the origin is [-2, 1, -1, 2] (under 2d
+// or 2dsphere geometry). The sort order by {a: 1} under the case-insensitive collation is [2,
+// -1, 1, -2]. The sort order by {b: 1} is [2. -1, 1, -2].
+const docMinus2 = {
+ _id: -2,
+ geo: [0, 0],
+ a: "BB",
+ b: 3
+};
+const docMinus1 = {
+ _id: -1,
+ geo: [0, 2],
+ a: "aB",
+ b: 1
+};
+const doc1 = {
+ _id: 1,
+ geo: [0, 1],
+ a: "Ba",
+ b: 2
+};
+const doc2 = {
+ _id: 2,
+ geo: [0, 3],
+ a: "aa",
+ b: 0
+};
+assert.writeOK(coll.insert(docMinus2));
+assert.writeOK(coll.insert(docMinus1));
+assert.writeOK(coll.insert(doc1));
+assert.writeOK(coll.insert(doc2));
- function testSortOrders(query, indexSpec) {
- assert.commandWorked(coll.createIndex(indexSpec));
+function testSortOrders(query, indexSpec) {
+ assert.commandWorked(coll.createIndex(indexSpec));
- // Test a $near/$nearSphere query without a specified sort. The results should be sorted by
- // distance from the origin.
- let res = coll.find(query).toArray();
- assert.eq(res.length, 4, tojson(res));
- assert.eq(res[0], docMinus2, tojson(res));
- assert.eq(res[1], doc1, tojson(res));
- assert.eq(res[2], docMinus1, tojson(res));
- assert.eq(res[3], doc2, tojson(res));
-
- // Test with a limit.
- res = coll.find(query).limit(2).toArray();
- assert.eq(res.length, 2, tojson(res));
- assert.eq(res[0], docMinus2, tojson(res));
- assert.eq(res[1], doc1, tojson(res));
-
- if (db.getMongo().useReadCommands()) {
- // Test a $near/$nearSphere query sorted by {a: 1} with the case-insensitive collation.
- res = coll.find(query).collation(caseInsensitive).sort({a: 1}).toArray();
- assert.eq(res.length, 4, tojson(res));
- assert.eq(res[0], doc2, tojson(res));
- assert.eq(res[1], docMinus1, tojson(res));
- assert.eq(res[2], doc1, tojson(res));
- assert.eq(res[3], docMinus2, tojson(res));
+ // Test a $near/$nearSphere query without a specified sort. The results should be sorted by
+ // distance from the origin.
+ let res = coll.find(query).toArray();
+ assert.eq(res.length, 4, tojson(res));
+ assert.eq(res[0], docMinus2, tojson(res));
+ assert.eq(res[1], doc1, tojson(res));
+ assert.eq(res[2], docMinus1, tojson(res));
+ assert.eq(res[3], doc2, tojson(res));
- // Test with a limit.
- res = coll.find(query).collation(caseInsensitive).sort({a: 1}).limit(2).toArray();
- assert.eq(res.length, 2, tojson(res));
- assert.eq(res[0], doc2, tojson(res));
- assert.eq(res[1], docMinus1, tojson(res));
- }
+ // Test with a limit.
+ res = coll.find(query).limit(2).toArray();
+ assert.eq(res.length, 2, tojson(res));
+ assert.eq(res[0], docMinus2, tojson(res));
+ assert.eq(res[1], doc1, tojson(res));
- // Test a $near/$nearSphere query sorted by {b: 1}.
- res = coll.find(query).sort({b: 1}).toArray();
+ if (db.getMongo().useReadCommands()) {
+ // Test a $near/$nearSphere query sorted by {a: 1} with the case-insensitive collation.
+ res = coll.find(query).collation(caseInsensitive).sort({a: 1}).toArray();
assert.eq(res.length, 4, tojson(res));
assert.eq(res[0], doc2, tojson(res));
assert.eq(res[1], docMinus1, tojson(res));
@@ -71,20 +79,35 @@
assert.eq(res[3], docMinus2, tojson(res));
// Test with a limit.
- res = coll.find(query).sort({b: 1}).limit(2).toArray();
+ res = coll.find(query).collation(caseInsensitive).sort({a: 1}).limit(2).toArray();
assert.eq(res.length, 2, tojson(res));
assert.eq(res[0], doc2, tojson(res));
assert.eq(res[1], docMinus1, tojson(res));
-
- assert.commandWorked(coll.dropIndex(indexSpec));
}
- testSortOrders({geo: {$near: [0, 0]}}, {geo: "2d"});
- testSortOrders({geo: {$nearSphere: [0, 0]}}, {geo: "2d"});
- testSortOrders({geo: {$near: {$geometry: {type: "Point", coordinates: [0, 0]}}}},
- {geo: "2dsphere"});
- testSortOrders({geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}},
- {geo: "2dsphere"});
+ // Test a $near/$nearSphere query sorted by {b: 1}.
+ res = coll.find(query).sort({b: 1}).toArray();
+ assert.eq(res.length, 4, tojson(res));
+ assert.eq(res[0], doc2, tojson(res));
+ assert.eq(res[1], docMinus1, tojson(res));
+ assert.eq(res[2], doc1, tojson(res));
+ assert.eq(res[3], docMinus2, tojson(res));
+
+ // Test with a limit.
+ res = coll.find(query).sort({b: 1}).limit(2).toArray();
+ assert.eq(res.length, 2, tojson(res));
+ assert.eq(res[0], doc2, tojson(res));
+ assert.eq(res[1], docMinus1, tojson(res));
+
+ assert.commandWorked(coll.dropIndex(indexSpec));
+}
+
+testSortOrders({geo: {$near: [0, 0]}}, {geo: "2d"});
+testSortOrders({geo: {$nearSphere: [0, 0]}}, {geo: "2d"});
+testSortOrders({geo: {$near: {$geometry: {type: "Point", coordinates: [0, 0]}}}},
+ {geo: "2dsphere"});
+testSortOrders({geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}},
+ {geo: "2dsphere"});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/graph_lookup.js b/jstests/sharding/graph_lookup.js
index bd3236f2c84..4678ba2f9a9 100644
--- a/jstests/sharding/graph_lookup.js
+++ b/jstests/sharding/graph_lookup.js
@@ -1,18 +1,18 @@
// Test aggregating a sharded collection while using $graphLookup on an unsharded collection.
(function() {
- 'use strict';
+'use strict';
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
- assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
- assert.commandWorked(st.s0.adminCommand({shardCollection: "test.foo", key: {_id: "hashed"}}));
+assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: "test.foo", key: {_id: "hashed"}}));
- let db = st.s0.getDB("test");
+let db = st.s0.getDB("test");
- assert.writeOK(db.foo.insert([{}, {}, {}, {}]));
- assert.writeOK(db.bar.insert({_id: 1, x: 1}));
+assert.writeOK(db.foo.insert([{}, {}, {}, {}]));
+assert.writeOK(db.bar.insert({_id: 1, x: 1}));
- const res = db.foo
+const res = db.foo
.aggregate([{
$graphLookup: {
from: "bar",
@@ -24,17 +24,17 @@
}])
.toArray();
- assert.eq(res.length, 4);
- res.forEach(function(c) {
- assert.eq(c.res.length, 1);
- assert.eq(c.res[0]._id, 1);
- assert.eq(c.res[0].x, 1);
- });
-
- // Be sure $graphLookup is banned on sharded foreign collection.
- assert.commandWorked(st.s0.adminCommand({shardCollection: "test.baz", key: {_id: "hashed"}}));
- assert.commandWorked(db.baz.insert({_id: 1, x: 1}));
- const err = assert.throws(() => db.foo.aggregate([{
+assert.eq(res.length, 4);
+res.forEach(function(c) {
+ assert.eq(c.res.length, 1);
+ assert.eq(c.res[0]._id, 1);
+ assert.eq(c.res[0].x, 1);
+});
+
+// Be sure $graphLookup is banned on sharded foreign collection.
+assert.commandWorked(st.s0.adminCommand({shardCollection: "test.baz", key: {_id: "hashed"}}));
+assert.commandWorked(db.baz.insert({_id: 1, x: 1}));
+const err = assert.throws(() => db.foo.aggregate([{
$graphLookup: {
from: "baz",
startWith: {$literal: 1},
@@ -43,7 +43,7 @@
as: "res"
}
}]));
- assert.eq(28769, err.code);
+assert.eq(28769, err.code);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/hash_basic.js b/jstests/sharding/hash_basic.js
index bffada99eef..1435611e7fb 100644
--- a/jstests/sharding/hash_basic.js
+++ b/jstests/sharding/hash_basic.js
@@ -1,57 +1,56 @@
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, chunkSize: 1});
+var st = new ShardingTest({shards: 2, chunkSize: 1});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 'hashed'}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 'hashed'}}));
- var configDB = st.s0.getDB('config');
- var chunkCountBefore = configDB.chunks.count({ns: 'test.user'});
- assert.gt(chunkCountBefore, 1);
+var configDB = st.s0.getDB('config');
+var chunkCountBefore = configDB.chunks.count({ns: 'test.user'});
+assert.gt(chunkCountBefore, 1);
- var testDB = st.s0.getDB('test');
- for (var x = 0; x < 1000; x++) {
- testDB.user.insert({x: x});
- }
+var testDB = st.s0.getDB('test');
+for (var x = 0; x < 1000; x++) {
+ testDB.user.insert({x: x});
+}
- var chunkDoc = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).next();
- var min = chunkDoc.min;
- var max = chunkDoc.max;
+var chunkDoc = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).next();
+var min = chunkDoc.min;
+var max = chunkDoc.max;
- // Assumption: There are documents in the MinKey chunk, otherwise, splitVector will fail.
- //
- // Note: This chunk will have 267 documents if collection was presplit to 4.
- var cmdRes =
- assert.commandWorked(st.s0.adminCommand({split: 'test.user', bounds: [min, max]}),
- 'Split on bounds failed for chunk [' + tojson(chunkDoc) + ']');
+// Assumption: There are documents in the MinKey chunk, otherwise, splitVector will fail.
+//
+// Note: This chunk will have 267 documents if collection was presplit to 4.
+var cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', bounds: [min, max]}),
+ 'Split on bounds failed for chunk [' + tojson(chunkDoc) + ']');
- chunkDoc = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).skip(1).next();
+chunkDoc = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).skip(1).next();
- var middle = NumberLong(chunkDoc.min.x + 1000000);
- cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', middle: {x: middle}}),
- 'Split failed with middle [' + middle + ']');
+var middle = NumberLong(chunkDoc.min.x + 1000000);
+cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', middle: {x: middle}}),
+ 'Split failed with middle [' + middle + ']');
- cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', find: {x: 7}}),
- 'Split failed with find.');
+cmdRes = assert.commandWorked(st.s0.adminCommand({split: 'test.user', find: {x: 7}}),
+ 'Split failed with find.');
- var chunkList = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).toArray();
- assert.eq(chunkCountBefore + 3, chunkList.length);
+var chunkList = configDB.chunks.find({ns: 'test.user'}).sort({min: 1}).toArray();
+assert.eq(chunkCountBefore + 3, chunkList.length);
- chunkList.forEach(function(chunkToMove) {
- var toShard = configDB.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
+chunkList.forEach(function(chunkToMove) {
+ var toShard = configDB.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
- print('Moving chunk ' + chunkToMove._id + ' from shard ' + chunkToMove.shard + ' to ' +
- toShard + ' ...');
+ print('Moving chunk ' + chunkToMove._id + ' from shard ' + chunkToMove.shard + ' to ' +
+ toShard + ' ...');
- assert.commandWorked(st.s0.adminCommand({
- moveChunk: 'test.user',
- bounds: [chunkToMove.min, chunkToMove.max],
- to: toShard,
- _waitForDelete: true
- }));
- });
+ assert.commandWorked(st.s0.adminCommand({
+ moveChunk: 'test.user',
+ bounds: [chunkToMove.min, chunkToMove.max],
+ to: toShard,
+ _waitForDelete: true
+ }));
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/hash_shard_num_chunks.js b/jstests/sharding/hash_shard_num_chunks.js
index b551ad53df2..03bd2da845d 100644
--- a/jstests/sharding/hash_shard_num_chunks.js
+++ b/jstests/sharding/hash_shard_num_chunks.js
@@ -1,35 +1,34 @@
// Hash sharding with initial chunk count set.
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 3});
+var s = new ShardingTest({shards: 3});
- var dbname = "test";
- var coll = "foo";
- var db = s.getDB(dbname);
+var dbname = "test";
+var coll = "foo";
+var db = s.getDB(dbname);
- assert.commandWorked(db.adminCommand({enablesharding: dbname}));
- s.ensurePrimaryShard(dbname, s.shard1.shardName);
+assert.commandWorked(db.adminCommand({enablesharding: dbname}));
+s.ensurePrimaryShard(dbname, s.shard1.shardName);
- assert.commandWorked(db.adminCommand(
- {shardcollection: dbname + "." + coll, key: {a: "hashed"}, numInitialChunks: 500}));
+assert.commandWorked(db.adminCommand(
+ {shardcollection: dbname + "." + coll, key: {a: "hashed"}, numInitialChunks: 500}));
- s.printShardingStatus();
+s.printShardingStatus();
- var numChunks = s.config.chunks.count({"ns": "test.foo"});
- assert.eq(numChunks, 500, "should be exactly 500 chunks");
+var numChunks = s.config.chunks.count({"ns": "test.foo"});
+assert.eq(numChunks, 500, "should be exactly 500 chunks");
- s.config.shards.find().forEach(
- // Check that each shard has one third the numInitialChunks
- function(shard) {
- var numChunksOnShard = s.config.chunks.find({"shard": shard._id}).count();
- assert.gte(numChunksOnShard, Math.floor(500 / 3));
- });
+s.config.shards.find().forEach(
+ // Check that each shard has one third the numInitialChunks
+ function(shard) {
+ var numChunksOnShard = s.config.chunks.find({"shard": shard._id}).count();
+ assert.gte(numChunksOnShard, Math.floor(500 / 3));
+ });
- // Check that the collection gets dropped correctly (which doesn't happen if pre-splitting fails
- // to create the collection on all shards).
- assert.commandWorked(db.runCommand({"drop": coll}),
- "couldn't drop empty, pre-split collection");
+// Check that the collection gets dropped correctly (which doesn't happen if pre-splitting fails
+// to create the collection on all shards).
+assert.commandWorked(db.runCommand({"drop": coll}), "couldn't drop empty, pre-split collection");
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/hash_shard_unique_compound.js b/jstests/sharding/hash_shard_unique_compound.js
index 51adb85f1d5..056bf2bdbca 100644
--- a/jstests/sharding/hash_shard_unique_compound.js
+++ b/jstests/sharding/hash_shard_unique_compound.js
@@ -7,41 +7,41 @@
// SERVER-36321.
// @tags: [blacklist_from_rhel_67_s390x]
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 1, mongos: 1});
- var dbName = "test";
- var collName = "foo";
- var ns = dbName + "." + collName;
- var db = s.getDB(dbName);
- var coll = db.getCollection(collName);
+var s = new ShardingTest({shards: 1, mongos: 1});
+var dbName = "test";
+var collName = "foo";
+var ns = dbName + "." + collName;
+var db = s.getDB(dbName);
+var coll = db.getCollection(collName);
- // Enable sharding on DB
- assert.commandWorked(db.adminCommand({enablesharding: dbName}));
+// Enable sharding on DB
+assert.commandWorked(db.adminCommand({enablesharding: dbName}));
- // Shard a fresh collection using a hashed shard key
- assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}));
+// Shard a fresh collection using a hashed shard key
+assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}));
- // Create unique index
- assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
+// Create unique index
+assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
- jsTest.log("------ indexes -------");
- jsTest.log(tojson(coll.getIndexes()));
+jsTest.log("------ indexes -------");
+jsTest.log(tojson(coll.getIndexes()));
- // Second Part
- jsTest.log("------ dropping sharded collection to start part 2 -------");
- coll.drop();
+// Second Part
+jsTest.log("------ dropping sharded collection to start part 2 -------");
+coll.drop();
- // Create unique index
- assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
+// Create unique index
+assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
- // shard a fresh collection using a hashed shard key
- assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}),
- "shardcollection didn't worked 2");
+// shard a fresh collection using a hashed shard key
+assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}),
+ "shardcollection didn't worked 2");
- s.printShardingStatus();
- jsTest.log("------ indexes 2-------");
- jsTest.log(tojson(coll.getIndexes()));
+s.printShardingStatus();
+jsTest.log("------ indexes 2-------");
+jsTest.log(tojson(coll.getIndexes()));
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/implicit_db_creation.js b/jstests/sharding/implicit_db_creation.js
index 9ec911e6f52..0f45dbb94ae 100644
--- a/jstests/sharding/implicit_db_creation.js
+++ b/jstests/sharding/implicit_db_creation.js
@@ -2,45 +2,44 @@
* This tests the basic cases for implicit database creation in a sharded cluster.
*/
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 2});
- var configDB = st.s.getDB('config');
+var st = new ShardingTest({shards: 2});
+var configDB = st.s.getDB('config');
- assert.eq(null, configDB.databases.findOne());
+assert.eq(null, configDB.databases.findOne());
- var testDB = st.s.getDB('test');
+var testDB = st.s.getDB('test');
- // Test that reads will not result into a new config.databases entry.
- assert.eq(null, testDB.user.findOne());
- assert.eq(null, configDB.databases.findOne({_id: 'test'}));
+// Test that reads will not result into a new config.databases entry.
+assert.eq(null, testDB.user.findOne());
+assert.eq(null, configDB.databases.findOne({_id: 'test'}));
- assert.writeOK(testDB.user.insert({x: 1}));
+assert.writeOK(testDB.user.insert({x: 1}));
- var testDBDoc = configDB.databases.findOne();
- assert.eq('test', testDBDoc._id, tojson(testDBDoc));
+var testDBDoc = configDB.databases.findOne();
+assert.eq('test', testDBDoc._id, tojson(testDBDoc));
- // Test that inserting to another collection in the same database will not modify the existing
- // config.databases entry.
- assert.writeOK(testDB.bar.insert({y: 1}));
- assert.eq(testDBDoc, configDB.databases.findOne());
+// Test that inserting to another collection in the same database will not modify the existing
+// config.databases entry.
+assert.writeOK(testDB.bar.insert({y: 1}));
+assert.eq(testDBDoc, configDB.databases.findOne());
- st.s.adminCommand({enableSharding: 'foo'});
- var fooDBDoc = configDB.databases.findOne({_id: 'foo'});
+st.s.adminCommand({enableSharding: 'foo'});
+var fooDBDoc = configDB.databases.findOne({_id: 'foo'});
- assert.neq(null, fooDBDoc);
- assert(fooDBDoc.partitioned);
+assert.neq(null, fooDBDoc);
+assert(fooDBDoc.partitioned);
- var newShardConn = MongoRunner.runMongod({'shardsvr': ""});
- var unshardedDB = newShardConn.getDB('unshardedDB');
+var newShardConn = MongoRunner.runMongod({'shardsvr': ""});
+var unshardedDB = newShardConn.getDB('unshardedDB');
- unshardedDB.user.insert({z: 1});
+unshardedDB.user.insert({z: 1});
- assert.commandWorked(st.s.adminCommand({addShard: newShardConn.name}));
+assert.commandWorked(st.s.adminCommand({addShard: newShardConn.name}));
- assert.neq(null, configDB.databases.findOne({_id: 'unshardedDB'}));
-
- MongoRunner.stopMongod(newShardConn);
- st.stop();
+assert.neq(null, configDB.databases.findOne({_id: 'unshardedDB'}));
+MongoRunner.stopMongod(newShardConn);
+st.stop();
})();
diff --git a/jstests/sharding/in_memory_sort_limit.js b/jstests/sharding/in_memory_sort_limit.js
index 328ae508f5a..0d4971c796c 100644
--- a/jstests/sharding/in_memory_sort_limit.js
+++ b/jstests/sharding/in_memory_sort_limit.js
@@ -2,49 +2,48 @@
// doesn't cause the in-memory sort limit to be reached, then make sure the same limit also doesn't
// cause the in-memory sort limit to be reached when running through a mongos.
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 2});
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
-
- // Make sure that at least 1 chunk is on another shard so that mongos doesn't treat this as a
- // single-shard query (which doesn't exercise the bug)
- assert.commandWorked(st.s.adminCommand(
- {shardCollection: 'test.skip', key: {_id: 'hashed'}, numInitialChunks: 64}));
-
- var mongosCol = st.s.getDB('test').getCollection('skip');
- var shardCol = st.shard0.getDB('test').getCollection('skip');
-
- // Create enough data to exceed the 32MB in-memory sort limit (per shard)
- var filler = new Array(10240).toString();
- var bulkOp = mongosCol.initializeOrderedBulkOp();
- for (var i = 0; i < 12800; i++) {
- bulkOp.insert({x: i, str: filler});
- }
- assert.writeOK(bulkOp.execute());
-
- var passLimit = 2000;
- var failLimit = 4000;
-
- // Test on MongoD
- jsTestLog("Test no error with limit of " + passLimit + " on mongod");
- assert.eq(passLimit, shardCol.find().sort({x: 1}).limit(passLimit).itcount());
-
- jsTestLog("Test error with limit of " + failLimit + " on mongod");
- assert.throws(function() {
- shardCol.find().sort({x: 1}).limit(failLimit).itcount();
- });
-
- // Test on MongoS
- jsTestLog("Test no error with limit of " + passLimit + " on mongos");
- assert.eq(passLimit, mongosCol.find().sort({x: 1}).limit(passLimit).itcount());
-
- jsTestLog("Test error with limit of " + failLimit + " on mongos");
- assert.throws(function() {
- mongosCol.find().sort({x: 1}).limit(failLimit).itcount();
- });
-
- st.stop();
-
+'use strict';
+
+var st = new ShardingTest({shards: 2});
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
+
+// Make sure that at least 1 chunk is on another shard so that mongos doesn't treat this as a
+// single-shard query (which doesn't exercise the bug)
+assert.commandWorked(
+ st.s.adminCommand({shardCollection: 'test.skip', key: {_id: 'hashed'}, numInitialChunks: 64}));
+
+var mongosCol = st.s.getDB('test').getCollection('skip');
+var shardCol = st.shard0.getDB('test').getCollection('skip');
+
+// Create enough data to exceed the 32MB in-memory sort limit (per shard)
+var filler = new Array(10240).toString();
+var bulkOp = mongosCol.initializeOrderedBulkOp();
+for (var i = 0; i < 12800; i++) {
+ bulkOp.insert({x: i, str: filler});
+}
+assert.writeOK(bulkOp.execute());
+
+var passLimit = 2000;
+var failLimit = 4000;
+
+// Test on MongoD
+jsTestLog("Test no error with limit of " + passLimit + " on mongod");
+assert.eq(passLimit, shardCol.find().sort({x: 1}).limit(passLimit).itcount());
+
+jsTestLog("Test error with limit of " + failLimit + " on mongod");
+assert.throws(function() {
+ shardCol.find().sort({x: 1}).limit(failLimit).itcount();
+});
+
+// Test on MongoS
+jsTestLog("Test no error with limit of " + passLimit + " on mongos");
+assert.eq(passLimit, mongosCol.find().sort({x: 1}).limit(passLimit).itcount());
+
+jsTestLog("Test error with limit of " + failLimit + " on mongos");
+assert.throws(function() {
+ mongosCol.find().sort({x: 1}).limit(failLimit).itcount();
+});
+
+st.stop();
})();
diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js
index 482137a9d03..b440c535230 100644
--- a/jstests/sharding/index1.js
+++ b/jstests/sharding/index1.js
@@ -1,363 +1,360 @@
// SERVER-2326 - make sure that sharding only works with unique indices
(function() {
- var s = new ShardingTest({name: "shard_index", shards: 2, mongos: 1});
+var s = new ShardingTest({name: "shard_index", shards: 2, mongos: 1});
- // Regenerate fully because of SERVER-2782
- for (var i = 0; i < 22; i++) {
- var coll = s.admin._mongo.getDB("test").getCollection("foo" + i);
- coll.drop();
+// Regenerate fully because of SERVER-2782
+for (var i = 0; i < 22; i++) {
+ var coll = s.admin._mongo.getDB("test").getCollection("foo" + i);
+ coll.drop();
- var bulk = coll.initializeUnorderedBulkOp();
- for (var j = 0; j < 300; j++) {
- bulk.insert({num: j, x: 1});
- }
- assert.writeOK(bulk.execute());
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var j = 0; j < 300; j++) {
+ bulk.insert({num: j, x: 1});
+ }
+ assert.writeOK(bulk.execute());
- if (i == 0) {
- s.adminCommand({enablesharding: "" + coll._db});
- s.ensurePrimaryShard(coll.getDB().getName(), s.shard1.shardName);
- }
+ if (i == 0) {
+ s.adminCommand({enablesharding: "" + coll._db});
+ s.ensurePrimaryShard(coll.getDB().getName(), s.shard1.shardName);
+ }
+
+ print("\n\n\n\n\nTest # " + i);
+
+ if (i == 0) {
+ // Unique index exists, but not the right one.
+ coll.ensureIndex({num: 1}, {unique: true});
+ coll.ensureIndex({x: 1});
- print("\n\n\n\n\nTest # " + i);
-
- if (i == 0) {
- // Unique index exists, but not the right one.
- coll.ensureIndex({num: 1}, {unique: true});
- coll.ensureIndex({x: 1});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not shard collection when another unique index exists!");
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 1) {
- // Unique index exists as prefix, also index exists
- coll.ensureIndex({x: 1});
- coll.ensureIndex({x: 1, num: 1}, {unique: true});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
- } catch (e) {
- print(e);
- assert(false, "Should be able to shard non-unique index without unique option.");
- }
+ assert(!passed, "Should not shard collection when another unique index exists!");
+ }
+ if (i == 1) {
+ // Unique index exists as prefix, also index exists
+ coll.ensureIndex({x: 1});
+ coll.ensureIndex({x: 1, num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard non-unique index without unique option.");
}
- if (i == 2) {
- // Non-unique index exists as prefix, also index exists. No unique index.
- coll.ensureIndex({x: 1});
- coll.ensureIndex({x: 1, num: 1});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
- passed = true;
-
- } catch (e) {
- print(e);
- assert(
- !passed,
- "Should be able to shard collection with no unique index if unique not specified.");
- }
+ }
+ if (i == 2) {
+ // Non-unique index exists as prefix, also index exists. No unique index.
+ coll.ensureIndex({x: 1});
+ coll.ensureIndex({x: 1, num: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+
+ } catch (e) {
+ print(e);
+ assert(
+ !passed,
+ "Should be able to shard collection with no unique index if unique not specified.");
}
- if (i == 3) {
- // Unique index exists as prefix, also unique index exists
- coll.ensureIndex({num: 1}, {unique: true});
- coll.ensureIndex({num: 1, x: 1}, {unique: true});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
- } catch (e) {
- print(e);
- assert(false, "Should be able to shard collection with unique prefix index.");
- }
+ }
+ if (i == 3) {
+ // Unique index exists as prefix, also unique index exists
+ coll.ensureIndex({num: 1}, {unique: true});
+ coll.ensureIndex({num: 1, x: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with unique prefix index.");
}
- if (i == 4) {
- // Unique index exists as id, also unique prefix index exists
- coll.ensureIndex({_id: 1, num: 1}, {unique: true});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {_id: 1}, unique: true});
- } catch (e) {
- print(e);
- assert(false, "Should be able to shard collection with unique id index.");
- }
+ }
+ if (i == 4) {
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex({_id: 1, num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {_id: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with unique id index.");
}
- if (i == 5) {
- // Unique index exists as id, also unique prefix index exists
- coll.ensureIndex({_id: 1, num: 1}, {unique: true});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {_id: 1, num: 1}, unique: true});
- } catch (e) {
- print(e);
- assert(false,
- "Should be able to shard collection with unique combination id index.");
- }
+ }
+ if (i == 5) {
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex({_id: 1, num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {_id: 1, num: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with unique combination id index.");
+ }
+ }
+ if (i == 6) {
+ coll.remove({});
+
+ // Unique index does not exist, also unique prefix index exists
+ coll.ensureIndex({num: 1, _id: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(
+ false,
+ "Should be able to shard collection with no unique index but with a unique prefix index.");
}
- if (i == 6) {
- coll.remove({});
-
- // Unique index does not exist, also unique prefix index exists
- coll.ensureIndex({num: 1, _id: 1}, {unique: true});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
- } catch (e) {
- print(e);
- assert(
- false,
- "Should be able to shard collection with no unique index but with a unique prefix index.");
- }
-
- printjson(coll.getIndexes());
-
- // Make sure the index created is unique!
- assert.eq(1,
- coll.getIndexes()
- .filter(function(z) {
- return friendlyEqual(z.key, {num: 1}) && z.unique;
- })
- .length);
+
+ printjson(coll.getIndexes());
+
+ // Make sure the index created is unique!
+ assert.eq(1,
+ coll.getIndexes()
+ .filter(function(z) {
+ return friendlyEqual(z.key, {num: 1}) && z.unique;
+ })
+ .length);
+ }
+ if (i == 7) {
+ coll.remove({});
+
+ // No index exists
+
+ try {
+ assert.eq(coll.find().itcount(), 0);
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with no index on shard key.");
}
- if (i == 7) {
- coll.remove({});
-
- // No index exists
-
- try {
- assert.eq(coll.find().itcount(), 0);
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
- } catch (e) {
- print(e);
- assert(false, "Should be able to shard collection with no index on shard key.");
- }
+ }
+ if (i == 8) {
+ coll.remove({});
+
+ // No index exists
+
+ passed = false;
+ try {
+ assert.eq(coll.find().itcount(), 0);
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 8) {
- coll.remove({});
-
- // No index exists
-
- passed = false;
- try {
- assert.eq(coll.find().itcount(), 0);
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(
- passed,
- "Should be able to shard collection with unique flag but with no unique index on shard key, if coll empty.");
-
- printjson(coll.getIndexes());
-
- // Make sure the index created is unique!
- assert.eq(1,
- coll.getIndexes()
- .filter(function(z) {
- return friendlyEqual(z.key, {num: 1}) && z.unique;
- })
- .length);
+ assert(
+ passed,
+ "Should be able to shard collection with unique flag but with no unique index on shard key, if coll empty.");
+
+ printjson(coll.getIndexes());
+
+ // Make sure the index created is unique!
+ assert.eq(1,
+ coll.getIndexes()
+ .filter(function(z) {
+ return friendlyEqual(z.key, {num: 1}) && z.unique;
+ })
+ .length);
+ }
+ if (i == 9) {
+ // Unique index exists on a different field as well
+ coll.ensureIndex({num: 1}, {unique: true});
+ coll.ensureIndex({x: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 9) {
- // Unique index exists on a different field as well
- coll.ensureIndex({num: 1}, {unique: true});
- coll.ensureIndex({x: 1});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not shard collection when another unique index exists!");
+ assert(!passed, "Should not shard collection when another unique index exists!");
+ }
+ if (i == 10) {
+ // try sharding non-empty collection without any index
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 10) {
- // try sharding non-empty collection without any index
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not be able to shard without index");
-
- // now add containing index and try sharding by prefix
- coll.ensureIndex({num: 1, x: 1});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(passed, "Should be able to shard collection with prefix of existing index");
-
- printjson(coll.getIndexes());
-
- // make sure no extra index is created
- assert.eq(2, coll.getIndexes().length);
+ assert(!passed, "Should not be able to shard without index");
+
+ // now add containing index and try sharding by prefix
+ coll.ensureIndex({num: 1, x: 1});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 11) {
- coll.remove({});
+ assert(passed, "Should be able to shard collection with prefix of existing index");
- // empty collection with useful index. check new index not created
- coll.ensureIndex({num: 1, x: 1});
+ printjson(coll.getIndexes());
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(passed, "Should be able to shard collection with prefix of existing index");
+ // make sure no extra index is created
+ assert.eq(2, coll.getIndexes().length);
+ }
+ if (i == 11) {
+ coll.remove({});
- printjson(coll.getIndexes());
+ // empty collection with useful index. check new index not created
+ coll.ensureIndex({num: 1, x: 1});
- // make sure no extra index is created
- assert.eq(2, coll.getIndexes().length);
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 12) {
- // check multikey values for x make index unusable for shard key
- coll.save({num: 100, x: [2, 3]});
- coll.ensureIndex({num: 1, x: 1});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not be able to shard collection with mulikey index");
+ assert(passed, "Should be able to shard collection with prefix of existing index");
+
+ printjson(coll.getIndexes());
+
+ // make sure no extra index is created
+ assert.eq(2, coll.getIndexes().length);
+ }
+ if (i == 12) {
+ // check multikey values for x make index unusable for shard key
+ coll.save({num: 100, x: [2, 3]});
+ coll.ensureIndex({num: 1, x: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 13) {
- coll.save({num: [100, 200], x: 10});
- coll.ensureIndex({num: 1, x: 1});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not be able to shard collection with mulikey index");
+ assert(!passed, "Should not be able to shard collection with mulikey index");
+ }
+ if (i == 13) {
+ coll.save({num: [100, 200], x: 10});
+ coll.ensureIndex({num: 1, x: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 14) {
- coll.save({num: 100, x: 10, y: [1, 2]});
- coll.ensureIndex({num: 1, x: 1, y: 1});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not be able to shard collection with mulikey index");
+ assert(!passed, "Should not be able to shard collection with mulikey index");
+ }
+ if (i == 14) {
+ coll.save({num: 100, x: 10, y: [1, 2]});
+ coll.ensureIndex({num: 1, x: 1, y: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 15) {
- // try sharding with a hashed index
- coll.ensureIndex({num: "hashed"});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}});
- } catch (e) {
- print(e);
- assert(false, "Should be able to shard collection with hashed index.");
- }
+ assert(!passed, "Should not be able to shard collection with mulikey index");
+ }
+ if (i == 15) {
+ // try sharding with a hashed index
+ coll.ensureIndex({num: "hashed"});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with hashed index.");
}
- if (i == 16) {
- // create hashed index, but try to declare it unique when sharding
- coll.ensureIndex({num: "hashed"});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}, unique: true});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not be able to declare hashed shard key unique.");
+ }
+ if (i == 16) {
+ // create hashed index, but try to declare it unique when sharding
+ coll.ensureIndex({num: "hashed"});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}, unique: true});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 17) {
- // create hashed index, but unrelated unique index present
- coll.ensureIndex({x: "hashed"});
- coll.ensureIndex({num: 1}, {unique: true});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: "hashed"}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed,
- "Should not be able to shard on hashed index with another unique index");
+ assert(!passed, "Should not be able to declare hashed shard key unique.");
+ }
+ if (i == 17) {
+ // create hashed index, but unrelated unique index present
+ coll.ensureIndex({x: "hashed"});
+ coll.ensureIndex({num: 1}, {unique: true});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: "hashed"}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 18) {
- // create hashed index, and a regular unique index exists on same field
- coll.ensureIndex({num: "hashed"});
- coll.ensureIndex({num: 1}, {unique: true});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}});
- } catch (e) {
- print(e);
- assert(false, "Should be able to shard coll with hashed and regular unique index");
- }
+ assert(!passed, "Should not be able to shard on hashed index with another unique index");
+ }
+ if (i == 18) {
+ // create hashed index, and a regular unique index exists on same field
+ coll.ensureIndex({num: "hashed"});
+ coll.ensureIndex({num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard coll with hashed and regular unique index");
}
- if (i == 19) {
- // Create sparse index.
- coll.ensureIndex({x: 1}, {sparse: true});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not be able to shard coll with sparse index");
+ }
+ if (i == 19) {
+ // Create sparse index.
+ coll.ensureIndex({x: 1}, {sparse: true});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 20) {
- // Create partial index.
- coll.ensureIndex({x: 1}, {filter: {num: {$gt: 1}}});
-
- passed = false;
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
- passed = true;
- } catch (e) {
- print(e);
- }
- assert(!passed, "Should not be able to shard coll with partial index");
+ assert(!passed, "Should not be able to shard coll with sparse index");
+ }
+ if (i == 20) {
+ // Create partial index.
+ coll.ensureIndex({x: 1}, {filter: {num: {$gt: 1}}});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
}
- if (i == 21) {
- // Ensure that a collection with a normal index and a partial index can be sharded,
- // where
- // both are prefixed by the shard key.
-
- coll.ensureIndex({x: 1, num: 1}, {filter: {num: {$gt: 1}}});
- coll.ensureIndex({x: 1, num: -1});
-
- try {
- s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
- } catch (e) {
- print(e);
- assert(false, "Should be able to shard coll with regular and partial index");
- }
+ assert(!passed, "Should not be able to shard coll with partial index");
+ }
+ if (i == 21) {
+ // Ensure that a collection with a normal index and a partial index can be sharded,
+ // where
+ // both are prefixed by the shard key.
+
+ coll.ensureIndex({x: 1, num: 1}, {filter: {num: {$gt: 1}}});
+ coll.ensureIndex({x: 1, num: -1});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard coll with regular and partial index");
}
}
+}
- s.stop();
-
+s.stop();
})();
diff --git a/jstests/sharding/index_and_collection_option_propagation.js b/jstests/sharding/index_and_collection_option_propagation.js
index 7e50856014f..114d3c57cf5 100644
--- a/jstests/sharding/index_and_collection_option_propagation.js
+++ b/jstests/sharding/index_and_collection_option_propagation.js
@@ -14,214 +14,216 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- // Helper function that runs listIndexes against shards to check for the existence of an index.
- function checkShardIndexes(indexKey, shardsWithIndex, shardsWithoutIndex) {
- function shardHasIndex(indexKey, shard) {
- const res = shard.getDB(dbName).runCommand({listIndexes: collName});
- if (res.code === ErrorCodes.NamespaceNotFound) {
- return [res, false];
- }
- assert.commandWorked(res);
- for (index of res.cursor.firstBatch) {
- if (index.key.hasOwnProperty(indexKey)) {
- return [res, true];
- }
- }
+// Helper function that runs listIndexes against shards to check for the existence of an index.
+function checkShardIndexes(indexKey, shardsWithIndex, shardsWithoutIndex) {
+ function shardHasIndex(indexKey, shard) {
+ const res = shard.getDB(dbName).runCommand({listIndexes: collName});
+ if (res.code === ErrorCodes.NamespaceNotFound) {
return [res, false];
}
-
- for (shard of shardsWithIndex) {
- [listIndexesRes, foundIndex] = shardHasIndex(indexKey, shard);
- assert(foundIndex,
- "expected to see index with key " + indexKey + " in listIndexes response from " +
- shard + ": " + tojson(listIndexesRes));
+ assert.commandWorked(res);
+ for (index of res.cursor.firstBatch) {
+ if (index.key.hasOwnProperty(indexKey)) {
+ return [res, true];
+ }
}
+ return [res, false];
+ }
- for (shard of shardsWithoutIndex) {
- [listIndexesRes, foundIndex] = shardHasIndex(indexKey, shard);
- assert(!foundIndex,
- "expected not to see index with key " + indexKey +
- " in listIndexes response from " + shard + ": " + tojson(listIndexesRes));
- }
+ for (shard of shardsWithIndex) {
+ [listIndexesRes, foundIndex] = shardHasIndex(indexKey, shard);
+ assert(foundIndex,
+ "expected to see index with key " + indexKey + " in listIndexes response from " +
+ shard + ": " + tojson(listIndexesRes));
}
- // Helper function that runs listCollections against shards to check for the existence of a
- // collection option.
- function checkShardCollOption(optionKey, optionValue, shardsWithOption, shardsWithoutOption) {
- function shardHasOption(optionKey, optionValue, shard) {
- const res =
- shard.getDB(dbName).runCommand({listCollections: 1, filter: {name: collName}});
- assert.commandWorked(res);
- if (res.cursor.firstBatch.length === 0) {
- return [res, false];
- }
- assert.eq(1, res.cursor.firstBatch.length);
- if (friendlyEqual(res.cursor.firstBatch[0].options[optionKey], optionValue)) {
- return [res, true];
- }
+ for (shard of shardsWithoutIndex) {
+ [listIndexesRes, foundIndex] = shardHasIndex(indexKey, shard);
+ assert(!foundIndex,
+ "expected not to see index with key " + indexKey + " in listIndexes response from " +
+ shard + ": " + tojson(listIndexesRes));
+ }
+}
+
+// Helper function that runs listCollections against shards to check for the existence of a
+// collection option.
+function checkShardCollOption(optionKey, optionValue, shardsWithOption, shardsWithoutOption) {
+ function shardHasOption(optionKey, optionValue, shard) {
+ const res = shard.getDB(dbName).runCommand({listCollections: 1, filter: {name: collName}});
+ assert.commandWorked(res);
+ if (res.cursor.firstBatch.length === 0) {
return [res, false];
}
-
- for (shard of shardsWithOption) {
- [listCollsRes, foundOption] = shardHasOption(optionKey, optionValue, shard);
- assert(foundOption,
- "expected to see option " + optionKey + " in listCollections response from " +
- shard + ": " + tojson(listCollsRes));
+ assert.eq(1, res.cursor.firstBatch.length);
+ if (friendlyEqual(res.cursor.firstBatch[0].options[optionKey], optionValue)) {
+ return [res, true];
}
+ return [res, false];
+ }
- for (shard of shardsWithoutOption) {
- [listOptionsRes, foundOption] = shardHasOption(optionKey, optionValue, shard);
- assert(!foundOption,
- "expected not to see option " + optionKey +
- " in listCollections response from " + shard + ": " + tojson(listCollsRes));
- }
+ for (shard of shardsWithOption) {
+ [listCollsRes, foundOption] = shardHasOption(optionKey, optionValue, shard);
+ assert(foundOption,
+ "expected to see option " + optionKey + " in listCollections response from " +
+ shard + ": " + tojson(listCollsRes));
}
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- var st = new ShardingTest(
- {shards: {rs0: {nodes: 1}, rs1: {nodes: 1}, rs2: {nodes: 1}}, other: {config: 3}});
-
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.name);
-
- // When creating index or setting a collection option on an unsharded collection, only the
- // primary shard is affected.
-
- assert.commandWorked(st.s.getDB(dbName).getCollection(collName).createIndex({"idx1": 1}));
- checkShardIndexes("idx1", [st.shard0], [st.shard1, st.shard2]);
-
- const validationOption1 = {dummyField1: {$type: "string"}};
- assert.commandWorked(st.s.getDB(dbName).runCommand({
- collMod: collName,
- validator: validationOption1,
- validationLevel: "moderate",
- validationAction: "warn"
- }));
- checkShardCollOption("validator", validationOption1, [st.shard0], [st.shard1, st.shard2]);
-
- // After sharding the collection but before any migrations, only the primary shard has the
- // index and collection option.
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
- checkShardIndexes("idx1", [st.shard0], [st.shard1, st.shard2]);
- checkShardCollOption("validator", validationOption1, [st.shard0], [st.shard1, st.shard2]);
-
- // After a migration, only shards that own data for the collection have the index and collection
- // option.
- assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {x: 0}, to: st.shard1.shardName}));
- checkShardIndexes("idx1", [st.shard0, st.shard1], [st.shard2]);
- checkShardCollOption("validator", validationOption1, [st.shard0, st.shard1], [st.shard2]);
-
- // Though some shards don't own data for the sharded collection, createIndex, reIndex,
- // dropIndex, and collMod (which are broadcast to all shards) report overall success (that is,
- // NamespaceNotFound-type errors from shards are ignored, and they are not included in the 'raw'
- // shard responses).
-
- var res;
-
- // createIndex
- res = st.s.getDB(dbName).getCollection(collName).createIndex({"idx2": 1});
- assert.commandWorked(res);
- assert.eq(res.raw[st.shard0.host].ok, 1, tojson(res));
- assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
- assert.eq(undefined,
- res.raw[st.shard2.host],
- tojson(res)); // CannotImplicitlyCreateCollection is ignored
- checkShardIndexes("idx2", [st.shard0, st.shard1], [st.shard2]);
-
- // dropIndex
- res = st.s.getDB(dbName).getCollection(collName).dropIndex("idx1_1");
- assert.commandWorked(res);
- assert.eq(res.raw[st.shard0.host].ok, 1, tojson(res));
- assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
- assert.eq(undefined, res.raw[st.shard2.host], tojson(res)); // NamespaceNotFound is ignored
- checkShardIndexes("idx1", [], [st.shard0, st.shard1, st.shard2]);
-
- // collMod
- const validationOption2 = {dummyField2: {$type: "string"}};
- res = st.s.getDB(dbName).runCommand({
- collMod: collName,
- validator: validationOption2,
- validationLevel: "moderate",
- validationAction: "warn"
- });
- assert.commandWorked(res);
- assert.eq(res.raw[st.shard0.host].ok, 1, tojson(res));
- assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
- assert.eq(undefined, res.raw[st.shard2.host], tojson(res)); // NamespaceNotFound is ignored
- checkShardCollOption("validator", validationOption2, [st.shard0, st.shard1], [st.shard2]);
-
- // Check that errors from shards are aggregated correctly.
-
- // If no shard returns success, then errors that are usually ignored should be reported.
- res = st.s.getDB(dbName).getCollection("unshardedColl").dropIndex("nonexistentIndex");
- assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
- assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
- assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
- assert.eq(res.code, ErrorCodes.NamespaceNotFound, tojson(res));
- assert.eq("NamespaceNotFound", res.codeName, tojson(res));
- assert.neq(null, res.errmsg, tojson(res));
-
- // If all shards report the same error, the overall command error should be set to that error.
- res = st.s.getDB(dbName).getCollection(collName).createIndex({});
- assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
- assert.eq(res.raw[st.shard1.host].ok, 0, tojson(res));
- assert.eq(res.raw[st.shard2.host].ok, 0, tojson(res));
- assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
- assert.eq(res.code, res.raw[st.shard1.host].code, tojson(res));
- assert.eq(res.code, res.raw[st.shard2.host].code, tojson(res));
- assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
- assert.eq(res.codeName, res.raw[st.shard1.host].codeName, tojson(res));
- assert.eq(res.codeName, res.raw[st.shard2.host].codeName, tojson(res));
- assert.eq(res.code, ErrorCodes.CannotCreateIndex, tojson(res));
- assert.eq("CannotCreateIndex", res.codeName, tojson(res));
- assert.neq(null, res.errmsg, tojson(res));
-
- // If all the non-ignorable errors reported by shards are the same, the overall command error
- // should be set to that error.
- res = st.s.getDB(dbName).getCollection(collName).createIndex({z: 1}, {unique: true});
- assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
- assert.eq(res.raw[st.shard1.host].ok, 0, tojson(res));
- assert.eq(null, res.raw[st.shard2.host], tojson(res));
- assert.eq(ErrorCodes.CannotCreateIndex, res.raw[st.shard0.host].code, tojson(res));
- assert.eq(ErrorCodes.CannotCreateIndex, res.raw[st.shard1.host].code, tojson(res));
- assert.eq("CannotCreateIndex", res.raw[st.shard0.host].codeName, tojson(res));
- assert.eq("CannotCreateIndex", res.raw[st.shard1.host].codeName, tojson(res));
- assert.eq(res.code, ErrorCodes.CannotCreateIndex, tojson(res));
- assert.eq("CannotCreateIndex", res.codeName, tojson(res));
- assert.neq(null, res.errmsg, tojson(res));
-
- st.rs0.stopSet();
-
- // If we receive a non-ignorable error, it should be reported as the command error.
- res = st.s.getDB(dbName).getCollection("unshardedColl").createIndex({"validIdx": 1});
- assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
- assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
- assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
- // We might see 'HostUnreachable' the first time if the mongos's ReplicaSetMonitor does not yet
- // know that the shard is down.
- assert(res.code === ErrorCodes.HostUnreachable ||
- res.code === ErrorCodes.FailedToSatisfyReadPreference,
- tojson(res));
- assert(res.codeName === "HostUnreachable" || res.codeName === "FailedToSatisfyReadPreference",
- tojson(res));
-
- // If some shard returns a non-ignorable error, it should be reported as the command error, even
- // if other shards returned ignorable errors.
- res = st.s.getDB(dbName).getCollection(collName).createIndex({"validIdx": 1});
- assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res)); // shard was down
- assert.eq(
- res.raw[st.shard1.host].ok, 1, tojson(res)); // gets created on shard that owns chunks
- assert.eq(undefined, res.raw[st.shard2.host], tojson(res)); // shard does not own chunks
- assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
- assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
- // We can expect to see 'FailedToSatisfyReadPreference' this time, because after the previous
- // createIndexes attempt, mongos's ReplicaSetMonitor should have been updated.
- assert.eq(res.code, ErrorCodes.FailedToSatisfyReadPreference, tojson(res));
- assert.eq("FailedToSatisfyReadPreference", res.codeName, tojson(res));
- assert.neq(null, res.errmsg, tojson(res));
-
- st.stop();
+ for (shard of shardsWithoutOption) {
+ [listOptionsRes, foundOption] = shardHasOption(optionKey, optionValue, shard);
+ assert(!foundOption,
+ "expected not to see option " + optionKey + " in listCollections response from " +
+ shard + ": " + tojson(listCollsRes));
+ }
+}
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+var st = new ShardingTest(
+ {shards: {rs0: {nodes: 1}, rs1: {nodes: 1}, rs2: {nodes: 1}}, other: {config: 3}});
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.name);
+
+// When creating index or setting a collection option on an unsharded collection, only the
+// primary shard is affected.
+
+assert.commandWorked(st.s.getDB(dbName).getCollection(collName).createIndex({"idx1": 1}));
+checkShardIndexes("idx1", [st.shard0], [st.shard1, st.shard2]);
+
+const validationOption1 = {
+ dummyField1: {$type: "string"}
+};
+assert.commandWorked(st.s.getDB(dbName).runCommand({
+ collMod: collName,
+ validator: validationOption1,
+ validationLevel: "moderate",
+ validationAction: "warn"
+}));
+checkShardCollOption("validator", validationOption1, [st.shard0], [st.shard1, st.shard2]);
+
+// After sharding the collection but before any migrations, only the primary shard has the
+// index and collection option.
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
+checkShardIndexes("idx1", [st.shard0], [st.shard1, st.shard2]);
+checkShardCollOption("validator", validationOption1, [st.shard0], [st.shard1, st.shard2]);
+
+// After a migration, only shards that own data for the collection have the index and collection
+// option.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {x: 0}, to: st.shard1.shardName}));
+checkShardIndexes("idx1", [st.shard0, st.shard1], [st.shard2]);
+checkShardCollOption("validator", validationOption1, [st.shard0, st.shard1], [st.shard2]);
+
+// Though some shards don't own data for the sharded collection, createIndex, reIndex,
+// dropIndex, and collMod (which are broadcast to all shards) report overall success (that is,
+// NamespaceNotFound-type errors from shards are ignored, and they are not included in the 'raw'
+// shard responses).
+
+var res;
+
+// createIndex
+res = st.s.getDB(dbName).getCollection(collName).createIndex({"idx2": 1});
+assert.commandWorked(res);
+assert.eq(res.raw[st.shard0.host].ok, 1, tojson(res));
+assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
+assert.eq(undefined,
+ res.raw[st.shard2.host],
+ tojson(res)); // CannotImplicitlyCreateCollection is ignored
+checkShardIndexes("idx2", [st.shard0, st.shard1], [st.shard2]);
+
+// dropIndex
+res = st.s.getDB(dbName).getCollection(collName).dropIndex("idx1_1");
+assert.commandWorked(res);
+assert.eq(res.raw[st.shard0.host].ok, 1, tojson(res));
+assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
+assert.eq(undefined, res.raw[st.shard2.host], tojson(res)); // NamespaceNotFound is ignored
+checkShardIndexes("idx1", [], [st.shard0, st.shard1, st.shard2]);
+
+// collMod
+const validationOption2 = {
+ dummyField2: {$type: "string"}
+};
+res = st.s.getDB(dbName).runCommand({
+ collMod: collName,
+ validator: validationOption2,
+ validationLevel: "moderate",
+ validationAction: "warn"
+});
+assert.commandWorked(res);
+assert.eq(res.raw[st.shard0.host].ok, 1, tojson(res));
+assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
+assert.eq(undefined, res.raw[st.shard2.host], tojson(res)); // NamespaceNotFound is ignored
+checkShardCollOption("validator", validationOption2, [st.shard0, st.shard1], [st.shard2]);
+
+// Check that errors from shards are aggregated correctly.
+
+// If no shard returns success, then errors that are usually ignored should be reported.
+res = st.s.getDB(dbName).getCollection("unshardedColl").dropIndex("nonexistentIndex");
+assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
+assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
+assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
+assert.eq(res.code, ErrorCodes.NamespaceNotFound, tojson(res));
+assert.eq("NamespaceNotFound", res.codeName, tojson(res));
+assert.neq(null, res.errmsg, tojson(res));
+
+// If all shards report the same error, the overall command error should be set to that error.
+res = st.s.getDB(dbName).getCollection(collName).createIndex({});
+assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
+assert.eq(res.raw[st.shard1.host].ok, 0, tojson(res));
+assert.eq(res.raw[st.shard2.host].ok, 0, tojson(res));
+assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
+assert.eq(res.code, res.raw[st.shard1.host].code, tojson(res));
+assert.eq(res.code, res.raw[st.shard2.host].code, tojson(res));
+assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
+assert.eq(res.codeName, res.raw[st.shard1.host].codeName, tojson(res));
+assert.eq(res.codeName, res.raw[st.shard2.host].codeName, tojson(res));
+assert.eq(res.code, ErrorCodes.CannotCreateIndex, tojson(res));
+assert.eq("CannotCreateIndex", res.codeName, tojson(res));
+assert.neq(null, res.errmsg, tojson(res));
+
+// If all the non-ignorable errors reported by shards are the same, the overall command error
+// should be set to that error.
+res = st.s.getDB(dbName).getCollection(collName).createIndex({z: 1}, {unique: true});
+assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
+assert.eq(res.raw[st.shard1.host].ok, 0, tojson(res));
+assert.eq(null, res.raw[st.shard2.host], tojson(res));
+assert.eq(ErrorCodes.CannotCreateIndex, res.raw[st.shard0.host].code, tojson(res));
+assert.eq(ErrorCodes.CannotCreateIndex, res.raw[st.shard1.host].code, tojson(res));
+assert.eq("CannotCreateIndex", res.raw[st.shard0.host].codeName, tojson(res));
+assert.eq("CannotCreateIndex", res.raw[st.shard1.host].codeName, tojson(res));
+assert.eq(res.code, ErrorCodes.CannotCreateIndex, tojson(res));
+assert.eq("CannotCreateIndex", res.codeName, tojson(res));
+assert.neq(null, res.errmsg, tojson(res));
+
+st.rs0.stopSet();
+
+// If we receive a non-ignorable error, it should be reported as the command error.
+res = st.s.getDB(dbName).getCollection("unshardedColl").createIndex({"validIdx": 1});
+assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
+assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
+assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
+// We might see 'HostUnreachable' the first time if the mongos's ReplicaSetMonitor does not yet
+// know that the shard is down.
+assert(res.code === ErrorCodes.HostUnreachable ||
+ res.code === ErrorCodes.FailedToSatisfyReadPreference,
+ tojson(res));
+assert(res.codeName === "HostUnreachable" || res.codeName === "FailedToSatisfyReadPreference",
+ tojson(res));
+
+// If some shard returns a non-ignorable error, it should be reported as the command error, even
+// if other shards returned ignorable errors.
+res = st.s.getDB(dbName).getCollection(collName).createIndex({"validIdx": 1});
+assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res)); // shard was down
+assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res)); // gets created on shard that owns chunks
+assert.eq(undefined, res.raw[st.shard2.host], tojson(res)); // shard does not own chunks
+assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
+assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
+// We can expect to see 'FailedToSatisfyReadPreference' this time, because after the previous
+// createIndexes attempt, mongos's ReplicaSetMonitor should have been updated.
+assert.eq(res.code, ErrorCodes.FailedToSatisfyReadPreference, tojson(res));
+assert.eq("FailedToSatisfyReadPreference", res.codeName, tojson(res));
+assert.neq(null, res.errmsg, tojson(res));
+
+st.stop();
})();
diff --git a/jstests/sharding/initial_split_validate_shard_collections.js b/jstests/sharding/initial_split_validate_shard_collections.js
index a2bc2070622..537b03a8783 100644
--- a/jstests/sharding/initial_split_validate_shard_collections.js
+++ b/jstests/sharding/initial_split_validate_shard_collections.js
@@ -7,68 +7,68 @@ load("jstests/libs/feature_compatibility_version.js");
load("jstests/libs/uuid_util.js");
(function() {
- 'use strict';
-
- let st = new ShardingTest({shards: 2});
- let mongos = st.s0;
-
- assert.commandWorked(mongos.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
-
- assert.commandWorked(mongos.adminCommand(
- {shardCollection: 'test.user', key: {x: 'hashed'}, numInitialChunks: 2}));
-
- // Ensure that all the pending (received chunks) have been incorporated in the shard's filtering
- // metadata so they will show up in the getShardVersion command
- assert.eq(0, mongos.getDB('test').user.find({}).itcount());
-
- st.printShardingStatus();
-
- function checkMetadata(metadata) {
- jsTestLog(tojson(metadata));
-
- assert.eq(1, metadata.chunks.length);
- assert.eq(0, metadata.pending.length);
-
- // Check that the single chunk on the shard's metadata is a valid chunk (getShardVersion
- // represents chunks as an array of [min, max])
- let chunks = metadata.chunks;
- assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0);
- }
-
- // Check that the shards' in-memory catalog caches were refreshed
- checkMetadata(assert
- .commandWorked(st.rs0.getPrimary().adminCommand(
- {getShardVersion: 'test.user', fullMetadata: true}))
- .metadata);
- checkMetadata(assert
- .commandWorked(st.rs1.getPrimary().adminCommand(
- {getShardVersion: 'test.user', fullMetadata: true}))
- .metadata);
-
- // Check that the shards' catalogs have the correct UUIDs
- const configUUID = getUUIDFromConfigCollections(mongos, 'test.user');
- const shard0UUID = getUUIDFromListCollections(st.shard0.getDB('test'), 'user');
- const shard1UUID = getUUIDFromListCollections(st.shard1.getDB('test'), 'user');
- assert.eq(configUUID, shard0UUID);
- assert.eq(configUUID, shard1UUID);
-
- // Check that the shards' on-disk caches have the correct number of chunks
- assert.commandWorked(st.shard0.adminCommand(
- {_flushRoutingTableCacheUpdates: 'test.user', syncFromConfig: false}));
- assert.commandWorked(st.shard1.adminCommand(
- {_flushRoutingTableCacheUpdates: 'test.user', syncFromConfig: false}));
-
- const chunksOnConfigCount = st.config.chunks.count({ns: 'test.user'});
- assert.eq(2, chunksOnConfigCount);
-
- const cacheChunksOnShard0 =
- st.shard0.getDB("config").getCollection("cache.chunks.test.user").find().toArray();
- const cacheChunksOnShard1 =
- st.shard1.getDB("config").getCollection("cache.chunks.test.user").find().toArray();
- assert.eq(chunksOnConfigCount, cacheChunksOnShard0.length);
- assert.eq(chunksOnConfigCount, cacheChunksOnShard1.length);
- assert.eq(cacheChunksOnShard0, cacheChunksOnShard1);
-
- st.stop();
+'use strict';
+
+let st = new ShardingTest({shards: 2});
+let mongos = st.s0;
+
+assert.commandWorked(mongos.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
+
+assert.commandWorked(
+ mongos.adminCommand({shardCollection: 'test.user', key: {x: 'hashed'}, numInitialChunks: 2}));
+
+// Ensure that all the pending (received chunks) have been incorporated in the shard's filtering
+// metadata so they will show up in the getShardVersion command
+assert.eq(0, mongos.getDB('test').user.find({}).itcount());
+
+st.printShardingStatus();
+
+function checkMetadata(metadata) {
+ jsTestLog(tojson(metadata));
+
+ assert.eq(1, metadata.chunks.length);
+ assert.eq(0, metadata.pending.length);
+
+ // Check that the single chunk on the shard's metadata is a valid chunk (getShardVersion
+ // represents chunks as an array of [min, max])
+ let chunks = metadata.chunks;
+ assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0);
+}
+
+// Check that the shards' in-memory catalog caches were refreshed
+checkMetadata(assert
+ .commandWorked(st.rs0.getPrimary().adminCommand(
+ {getShardVersion: 'test.user', fullMetadata: true}))
+ .metadata);
+checkMetadata(assert
+ .commandWorked(st.rs1.getPrimary().adminCommand(
+ {getShardVersion: 'test.user', fullMetadata: true}))
+ .metadata);
+
+// Check that the shards' catalogs have the correct UUIDs
+const configUUID = getUUIDFromConfigCollections(mongos, 'test.user');
+const shard0UUID = getUUIDFromListCollections(st.shard0.getDB('test'), 'user');
+const shard1UUID = getUUIDFromListCollections(st.shard1.getDB('test'), 'user');
+assert.eq(configUUID, shard0UUID);
+assert.eq(configUUID, shard1UUID);
+
+// Check that the shards' on-disk caches have the correct number of chunks
+assert.commandWorked(
+ st.shard0.adminCommand({_flushRoutingTableCacheUpdates: 'test.user', syncFromConfig: false}));
+assert.commandWorked(
+ st.shard1.adminCommand({_flushRoutingTableCacheUpdates: 'test.user', syncFromConfig: false}));
+
+const chunksOnConfigCount = st.config.chunks.count({ns: 'test.user'});
+assert.eq(2, chunksOnConfigCount);
+
+const cacheChunksOnShard0 =
+ st.shard0.getDB("config").getCollection("cache.chunks.test.user").find().toArray();
+const cacheChunksOnShard1 =
+ st.shard1.getDB("config").getCollection("cache.chunks.test.user").find().toArray();
+assert.eq(chunksOnConfigCount, cacheChunksOnShard0.length);
+assert.eq(chunksOnConfigCount, cacheChunksOnShard1.length);
+assert.eq(cacheChunksOnShard0, cacheChunksOnShard1);
+
+st.stop();
})();
diff --git a/jstests/sharding/inserts_consistent.js b/jstests/sharding/inserts_consistent.js
index 86db9ae4e82..4bea7d95474 100644
--- a/jstests/sharding/inserts_consistent.js
+++ b/jstests/sharding/inserts_consistent.js
@@ -1,71 +1,71 @@
// Test write re-routing on version mismatch.
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 2});
+var st = new ShardingTest({shards: 2, mongos: 2});
- var mongos = st.s;
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
- var coll = st.s.getCollection('TestDB.coll');
+var mongos = st.s;
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var coll = st.s.getCollection('TestDB.coll');
- assert.commandWorked(mongos.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(mongos.adminCommand({shardCollection: 'TestDB.coll', key: {_id: 1}}));
+assert.commandWorked(mongos.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(mongos.adminCommand({shardCollection: 'TestDB.coll', key: {_id: 1}}));
- jsTest.log("Refreshing second mongos...");
+jsTest.log("Refreshing second mongos...");
- var mongosB = st.s1;
- var adminB = mongosB.getDB("admin");
- var collB = mongosB.getCollection(coll + "");
+var mongosB = st.s1;
+var adminB = mongosB.getDB("admin");
+var collB = mongosB.getCollection(coll + "");
- // Make sure mongosB knows about the coll
- assert.eq(0, collB.find().itcount());
+// Make sure mongosB knows about the coll
+assert.eq(0, collB.find().itcount());
- jsTest.log("Moving chunk to create stale mongos...");
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
+jsTest.log("Moving chunk to create stale mongos...");
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
- jsTest.log("Inserting docs that needs to be retried...");
+jsTest.log("Inserting docs that needs to be retried...");
- var nextId = -1;
- for (var i = 0; i < 2; i++) {
- printjson("Inserting " + nextId);
- assert.writeOK(collB.insert({_id: nextId--, hello: "world"}));
- }
+var nextId = -1;
+for (var i = 0; i < 2; i++) {
+ printjson("Inserting " + nextId);
+ assert.writeOK(collB.insert({_id: nextId--, hello: "world"}));
+}
- jsTest.log("Inserting doc which successfully goes through...");
+jsTest.log("Inserting doc which successfully goes through...");
- // Do second write
- assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
+// Do second write
+assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
- // Assert that write went through
- assert.eq(coll.find().itcount(), 3);
+// Assert that write went through
+assert.eq(coll.find().itcount(), 3);
- jsTest.log("Now try moving the actual chunk we're writing to...");
+jsTest.log("Now try moving the actual chunk we're writing to...");
- // Now move the actual chunk we're writing to
- printjson(admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: st.shard1.shardName}));
+// Now move the actual chunk we're writing to
+printjson(admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: st.shard1.shardName}));
- jsTest.log("Inserting second docs to get written back...");
+jsTest.log("Inserting second docs to get written back...");
- // Will fail entirely if too many of these, waiting for write to get applied can get too long.
- for (var i = 0; i < 2; i++) {
- collB.insert({_id: nextId--, hello: "world"});
- }
+// Will fail entirely if too many of these, waiting for write to get applied can get too long.
+for (var i = 0; i < 2; i++) {
+ collB.insert({_id: nextId--, hello: "world"});
+}
- // Refresh server
- printjson(adminB.runCommand({flushRouterConfig: 1}));
+// Refresh server
+printjson(adminB.runCommand({flushRouterConfig: 1}));
- jsTest.log("Inserting second doc which successfully goes through...");
+jsTest.log("Inserting second doc which successfully goes through...");
- // Do second write
- assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
+// Do second write
+assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
- jsTest.log("All docs written this time!");
+jsTest.log("All docs written this time!");
- // Assert that writes went through.
- assert.eq(coll.find().itcount(), 6);
+// Assert that writes went through.
+assert.eq(coll.find().itcount(), 6);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/invalid_system_views_sharded_collection.js b/jstests/sharding/invalid_system_views_sharded_collection.js
index 1248a7aee3a..899d4482987 100644
--- a/jstests/sharding/invalid_system_views_sharded_collection.js
+++ b/jstests/sharding/invalid_system_views_sharded_collection.js
@@ -4,76 +4,72 @@
*/
(function() {
- "use strict";
-
- function runTest(st, badViewDefinition) {
- const mongos = st.s;
- const config = mongos.getDB("config");
- const db = mongos.getDB("invalid_system_views");
- assert.commandWorked(db.dropDatabase());
-
- assert.commandWorked(config.adminCommand({enableSharding: db.getName()}));
- st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
-
- // Create sharded and unsharded collections, then insert an invalid view into system.views.
- const viewsCollection = db.getCollection("coll");
- const staticCollection = db.getCollection("staticCollection");
- assert.commandWorked(
- config.adminCommand({shardCollection: viewsCollection.getFullName(), key: {a: 1}}));
- assert.commandWorked(
- config.adminCommand({shardCollection: staticCollection.getFullName(), key: {a: 1}}));
-
- assert.commandWorked(viewsCollection.createIndex({x: 1}));
-
- const unshardedColl = db.getCollection("unshardedColl");
- assert.writeOK(unshardedColl.insert({b: "boo"}));
-
- assert.writeOK(db.system.views.insert(badViewDefinition),
- "failed to insert " + tojson(badViewDefinition));
-
- // Test that a command involving views properly fails with a views-specific error code.
- assert.commandFailedWithCode(
- db.runCommand({listCollections: 1}),
- ErrorCodes.InvalidViewDefinition,
- "listCollections should have failed in the presence of an invalid view");
-
- // Helper function to create a message to use if an assertion fails.
- function makeErrorMessage(msg) {
- return msg +
- " should work on a valid, existing collection, despite the presence of bad views" +
- " in system.views";
- }
+"use strict";
+
+function runTest(st, badViewDefinition) {
+ const mongos = st.s;
+ const config = mongos.getDB("config");
+ const db = mongos.getDB("invalid_system_views");
+ assert.commandWorked(db.dropDatabase());
+
+ assert.commandWorked(config.adminCommand({enableSharding: db.getName()}));
+ st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
+
+ // Create sharded and unsharded collections, then insert an invalid view into system.views.
+ const viewsCollection = db.getCollection("coll");
+ const staticCollection = db.getCollection("staticCollection");
+ assert.commandWorked(
+ config.adminCommand({shardCollection: viewsCollection.getFullName(), key: {a: 1}}));
+ assert.commandWorked(
+ config.adminCommand({shardCollection: staticCollection.getFullName(), key: {a: 1}}));
+
+ assert.commandWorked(viewsCollection.createIndex({x: 1}));
+
+ const unshardedColl = db.getCollection("unshardedColl");
+ assert.writeOK(unshardedColl.insert({b: "boo"}));
+
+ assert.writeOK(db.system.views.insert(badViewDefinition),
+ "failed to insert " + tojson(badViewDefinition));
+
+ // Test that a command involving views properly fails with a views-specific error code.
+ assert.commandFailedWithCode(
+ db.runCommand({listCollections: 1}),
+ ErrorCodes.InvalidViewDefinition,
+ "listCollections should have failed in the presence of an invalid view");
+
+ // Helper function to create a message to use if an assertion fails.
+ function makeErrorMessage(msg) {
+ return msg +
+ " should work on a valid, existing collection, despite the presence of bad views" +
+ " in system.views";
+ }
- assert.writeOK(viewsCollection.insert({y: "baz", a: 5}), makeErrorMessage("insert"));
+ assert.writeOK(viewsCollection.insert({y: "baz", a: 5}), makeErrorMessage("insert"));
- assert.writeOK(viewsCollection.update({y: "baz"}, {$set: {y: "qux"}}),
- makeErrorMessage("update"));
+ assert.writeOK(viewsCollection.update({y: "baz"}, {$set: {y: "qux"}}),
+ makeErrorMessage("update"));
- assert.writeOK(viewsCollection.remove({y: "baz"}), makeErrorMessage("remove"));
+ assert.writeOK(viewsCollection.remove({y: "baz"}), makeErrorMessage("remove"));
- assert.commandWorked(
- db.runCommand(
- {findAndModify: viewsCollection.getName(), query: {x: 1, a: 1}, update: {x: 2}}),
- makeErrorMessage("findAndModify with update"));
+ assert.commandWorked(
+ db.runCommand(
+ {findAndModify: viewsCollection.getName(), query: {x: 1, a: 1}, update: {x: 2}}),
+ makeErrorMessage("findAndModify with update"));
- assert.commandWorked(
- db.runCommand(
- {findAndModify: viewsCollection.getName(), query: {x: 2, a: 1}, remove: true}),
- makeErrorMessage("findAndModify with remove"));
+ assert.commandWorked(
+ db.runCommand(
+ {findAndModify: viewsCollection.getName(), query: {x: 2, a: 1}, remove: true}),
+ makeErrorMessage("findAndModify with remove"));
- const lookup = {
- $lookup: {
- from: unshardedColl.getName(),
- localField: "_id",
- foreignField: "_id",
- as: "match"
- }
- };
- assert.commandWorked(
- db.runCommand({aggregate: viewsCollection.getName(), pipeline: [lookup], cursor: {}}),
- makeErrorMessage("aggregate with $lookup"));
+ const lookup = {
+ $lookup:
+ {from: unshardedColl.getName(), localField: "_id", foreignField: "_id", as: "match"}
+ };
+ assert.commandWorked(
+ db.runCommand({aggregate: viewsCollection.getName(), pipeline: [lookup], cursor: {}}),
+ makeErrorMessage("aggregate with $lookup"));
- const graphLookup = {
+ const graphLookup = {
$graphLookup: {
from: unshardedColl.getName(),
startWith: "$_id",
@@ -82,54 +78,46 @@
as: "match"
}
};
- assert.commandWorked(
- db.runCommand(
- {aggregate: viewsCollection.getName(), pipeline: [graphLookup], cursor: {}}),
- makeErrorMessage("aggregate with $graphLookup"));
-
- assert.commandWorked(db.runCommand({dropIndexes: viewsCollection.getName(), index: "x_1"}),
- makeErrorMessage("dropIndexes"));
-
- assert.commandWorked(viewsCollection.createIndex({x: 1}),
- makeErrorMessage("createIndexes"));
-
- assert.commandWorked(
- db.runCommand({collMod: viewsCollection.getName(), validator: {x: {$type: "string"}}}),
- makeErrorMessage("collMod"));
-
- assert.commandWorked(db.runCommand({drop: viewsCollection.getName()}),
- makeErrorMessage("drop"));
- assert.commandWorked(db.runCommand({drop: staticCollection.getName()}),
- makeErrorMessage("drop"));
- assert.commandWorked(db.runCommand({drop: unshardedColl.getName()}),
- makeErrorMessage("drop"));
-
- // Drop the offending view so that the validate hook succeeds.
- db.system.views.remove(badViewDefinition);
- }
-
- const st = new ShardingTest({name: "views_sharded", shards: 2, other: {enableBalancer: false}});
-
- runTest(st,
- {_id: "invalid_system_views.badViewStringPipeline", viewOn: "coll", pipeline: "bad"});
- runTest(st,
- {_id: "invalid_system_views.badViewEmptyObjectPipeline", viewOn: "coll", pipeline: {}});
- runTest(st,
- {_id: "invalid_system_views.badViewNumericalPipeline", viewOn: "coll", pipeline: 7});
- runTest(st, {
- _id: "invalid_system_views.badViewArrayWithIntegerPipeline",
- viewOn: "coll",
- pipeline: [1]
- });
- runTest(st, {
- _id: "invalid_system_views.badViewArrayWithEmptyArrayPipeline",
- viewOn: "coll",
- pipeline: [[]]
- });
- runTest(st, {_id: 7, viewOn: "coll", pipeline: []});
- runTest(st, {_id: "invalid_system_views.embedded\0null", viewOn: "coll", pipeline: []});
- runTest(st, {_id: "invalidNotFullyQualifiedNs", viewOn: "coll", pipeline: []});
- runTest(st, {_id: "invalid_system_views.missingViewOnField", pipeline: []});
-
- st.stop();
+ assert.commandWorked(
+ db.runCommand({aggregate: viewsCollection.getName(), pipeline: [graphLookup], cursor: {}}),
+ makeErrorMessage("aggregate with $graphLookup"));
+
+ assert.commandWorked(db.runCommand({dropIndexes: viewsCollection.getName(), index: "x_1"}),
+ makeErrorMessage("dropIndexes"));
+
+ assert.commandWorked(viewsCollection.createIndex({x: 1}), makeErrorMessage("createIndexes"));
+
+ assert.commandWorked(
+ db.runCommand({collMod: viewsCollection.getName(), validator: {x: {$type: "string"}}}),
+ makeErrorMessage("collMod"));
+
+ assert.commandWorked(db.runCommand({drop: viewsCollection.getName()}),
+ makeErrorMessage("drop"));
+ assert.commandWorked(db.runCommand({drop: staticCollection.getName()}),
+ makeErrorMessage("drop"));
+ assert.commandWorked(db.runCommand({drop: unshardedColl.getName()}), makeErrorMessage("drop"));
+
+ // Drop the offending view so that the validate hook succeeds.
+ db.system.views.remove(badViewDefinition);
+}
+
+const st = new ShardingTest({name: "views_sharded", shards: 2, other: {enableBalancer: false}});
+
+runTest(st, {_id: "invalid_system_views.badViewStringPipeline", viewOn: "coll", pipeline: "bad"});
+runTest(st, {_id: "invalid_system_views.badViewEmptyObjectPipeline", viewOn: "coll", pipeline: {}});
+runTest(st, {_id: "invalid_system_views.badViewNumericalPipeline", viewOn: "coll", pipeline: 7});
+runTest(
+ st,
+ {_id: "invalid_system_views.badViewArrayWithIntegerPipeline", viewOn: "coll", pipeline: [1]});
+runTest(st, {
+ _id: "invalid_system_views.badViewArrayWithEmptyArrayPipeline",
+ viewOn: "coll",
+ pipeline: [[]]
+});
+runTest(st, {_id: 7, viewOn: "coll", pipeline: []});
+runTest(st, {_id: "invalid_system_views.embedded\0null", viewOn: "coll", pipeline: []});
+runTest(st, {_id: "invalidNotFullyQualifiedNs", viewOn: "coll", pipeline: []});
+runTest(st, {_id: "invalid_system_views.missingViewOnField", pipeline: []});
+
+st.stop();
}());
diff --git a/jstests/sharding/json_schema.js b/jstests/sharding/json_schema.js
index 1c24f427eed..5a4a68102b7 100644
--- a/jstests/sharding/json_schema.js
+++ b/jstests/sharding/json_schema.js
@@ -2,61 +2,64 @@
* Tests for $jsonSchema queries in a sharded cluster.
*/
(function() {
- "use strict";
-
- const dbName = "json_schema_sharding";
-
- var st = new ShardingTest({shards: 2, mongos: 1, config: 1});
-
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.name);
-
- const testDB = st.s.getDB(dbName);
- const coll = testDB.json_schema_sharding;
- coll.drop();
-
- // Shard the collection on _id.
- assert.commandWorked(testDB.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 4 chunks: [MinKey, -100), [-100, 0), [0, 100), [100, MaxKey).
- assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {_id: -100}}));
- assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {_id: 100}}));
-
- // Move the [0, 100) and [100, MaxKey) chunks to st.shard1.shardName.
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {_id: 50}, to: st.shard1.shardName}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {_id: 150}, to: st.shard1.shardName}));
-
- // Write one document into each of the chunks.
- assert.writeOK(coll.insert({_id: -150, a: 1}));
- assert.writeOK(coll.insert({_id: -50, a: 10}));
- assert.writeOK(coll.insert({_id: 50, a: "str"}));
- assert.writeOK(coll.insert({_id: 150}));
-
- // Test that $jsonSchema in a find command returns the correct results.
- assert.eq(4, coll.find({$jsonSchema: {}}).itcount());
- assert.eq(3, coll.find({$jsonSchema: {properties: {a: {type: "number"}}}}).itcount());
- assert.eq(4, coll.find({$jsonSchema: {required: ["_id"]}}).itcount());
- assert.eq(1, coll.find({$jsonSchema: {properties: {_id: {minimum: 150}}}}).itcount());
-
- // Test that $jsonSchema works correctly in an update command.
- let res = coll.update(
- {$jsonSchema: {properties: {_id: {type: "number", minimum: 100}, a: {type: "number"}}}},
- {$inc: {a: 1}},
- {multi: true});
- assert.writeOK(res);
- assert.eq(1, res.nModified);
-
- const schema = {properties: {_id: {type: "number", minimum: 100}}, required: ["_id"]};
- res = coll.update({$jsonSchema: schema}, {$set: {b: 1}}, {multi: true});
- assert.writeOK(res);
- assert.eq(1, res.nModified);
-
- // Test that $jsonSchema works correctly in a findAndModify command.
- res = coll.findAndModify({query: {_id: 150, $jsonSchema: schema}, update: {$set: {b: 1}}});
- assert.eq(1, res.b);
-
- st.stop();
+"use strict";
+
+const dbName = "json_schema_sharding";
+
+var st = new ShardingTest({shards: 2, mongos: 1, config: 1});
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.name);
+
+const testDB = st.s.getDB(dbName);
+const coll = testDB.json_schema_sharding;
+coll.drop();
+
+// Shard the collection on _id.
+assert.commandWorked(testDB.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 4 chunks: [MinKey, -100), [-100, 0), [0, 100), [100, MaxKey).
+assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {_id: -100}}));
+assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {_id: 100}}));
+
+// Move the [0, 100) and [100, MaxKey) chunks to st.shard1.shardName.
+assert.commandWorked(
+ testDB.adminCommand({moveChunk: coll.getFullName(), find: {_id: 50}, to: st.shard1.shardName}));
+assert.commandWorked(testDB.adminCommand(
+ {moveChunk: coll.getFullName(), find: {_id: 150}, to: st.shard1.shardName}));
+
+// Write one document into each of the chunks.
+assert.writeOK(coll.insert({_id: -150, a: 1}));
+assert.writeOK(coll.insert({_id: -50, a: 10}));
+assert.writeOK(coll.insert({_id: 50, a: "str"}));
+assert.writeOK(coll.insert({_id: 150}));
+
+// Test that $jsonSchema in a find command returns the correct results.
+assert.eq(4, coll.find({$jsonSchema: {}}).itcount());
+assert.eq(3, coll.find({$jsonSchema: {properties: {a: {type: "number"}}}}).itcount());
+assert.eq(4, coll.find({$jsonSchema: {required: ["_id"]}}).itcount());
+assert.eq(1, coll.find({$jsonSchema: {properties: {_id: {minimum: 150}}}}).itcount());
+
+// Test that $jsonSchema works correctly in an update command.
+let res = coll.update(
+ {$jsonSchema: {properties: {_id: {type: "number", minimum: 100}, a: {type: "number"}}}},
+ {$inc: {a: 1}},
+ {multi: true});
+assert.writeOK(res);
+assert.eq(1, res.nModified);
+
+const schema = {
+ properties: {_id: {type: "number", minimum: 100}},
+ required: ["_id"]
+};
+res = coll.update({$jsonSchema: schema}, {$set: {b: 1}}, {multi: true});
+assert.writeOK(res);
+assert.eq(1, res.nModified);
+
+// Test that $jsonSchema works correctly in a findAndModify command.
+res = coll.findAndModify({query: {_id: 150, $jsonSchema: schema}, update: {$set: {b: 1}}});
+assert.eq(1, res.b);
+
+st.stop();
})();
diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js
index 573d7b1e5f7..fe8e04e7492 100644
--- a/jstests/sharding/jumbo1.js
+++ b/jstests/sharding/jumbo1.js
@@ -1,58 +1,57 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
+var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
- var db = s.getDB("test");
+var db = s.getDB("test");
- const big = 'X'.repeat(10000);
+const big = 'X'.repeat(10000);
- // Create sufficient documents to create a jumbo chunk, and use the same shard key in all of
- // them so that the chunk cannot be split.
- var x = 0;
- var bulk = db.foo.initializeUnorderedBulkOp();
- for (var i = 0; i < 500; i++) {
- bulk.insert({x: x, big: big});
- }
+// Create sufficient documents to create a jumbo chunk, and use the same shard key in all of
+// them so that the chunk cannot be split.
+var x = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 500; i++) {
+ bulk.insert({x: x, big: big});
+}
- // Create documents with different shard keys that can be split and moved without issue.
- for (; x < 1500; x++) {
- bulk.insert({x: x, big: big});
- }
+// Create documents with different shard keys that can be split and moved without issue.
+for (; x < 1500; x++) {
+ bulk.insert({x: x, big: big});
+}
- assert.writeOK(bulk.execute());
+assert.writeOK(bulk.execute());
+s.printShardingStatus(true);
+
+s.startBalancer();
+
+function diff1() {
+ var x = s.chunkCounts("foo");
+ printjson(x);
+ return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
+ Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
+}
+
+assert.soon(function() {
+ var d = diff1();
+ print("diff: " + d);
s.printShardingStatus(true);
+ return d < 5;
+}, "balance didn't happen", 1000 * 60 * 10, 5000);
+
+// Check that the jumbo chunk did not move, which shouldn't be possible.
+var jumboChunk =
+ s.getDB('config').chunks.findOne({ns: 'test.foo', min: {$lte: {x: 0}}, max: {$gt: {x: 0}}});
+assert.eq(s.shard1.shardName, jumboChunk.shard, 'jumbo chunk ' + tojson(jumboChunk) + ' was moved');
+
+// TODO: SERVER-26531 Make sure that balancer marked the first chunk as jumbo.
+// Assumption: balancer favors moving the lowest valued chunk out of a shard.
+// assert(jumboChunk.jumbo, tojson(jumboChunk));
- s.startBalancer();
-
- function diff1() {
- var x = s.chunkCounts("foo");
- printjson(x);
- return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
- Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
- }
-
- assert.soon(function() {
- var d = diff1();
- print("diff: " + d);
- s.printShardingStatus(true);
- return d < 5;
- }, "balance didn't happen", 1000 * 60 * 10, 5000);
-
- // Check that the jumbo chunk did not move, which shouldn't be possible.
- var jumboChunk =
- s.getDB('config').chunks.findOne({ns: 'test.foo', min: {$lte: {x: 0}}, max: {$gt: {x: 0}}});
- assert.eq(
- s.shard1.shardName, jumboChunk.shard, 'jumbo chunk ' + tojson(jumboChunk) + ' was moved');
-
- // TODO: SERVER-26531 Make sure that balancer marked the first chunk as jumbo.
- // Assumption: balancer favors moving the lowest valued chunk out of a shard.
- // assert(jumboChunk.jumbo, tojson(jumboChunk));
-
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index c671e691f94..e0d19e8874b 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -1,252 +1,240 @@
(function() {
- 'use strict';
-
- // Values have to be sorted - you must have exactly 6 values in each array
- var types = [
- {name: "string", values: ["allan", "bob", "eliot", "joe", "mark", "sara"], keyfield: "k"},
- {name: "double", values: [1.2, 3.5, 4.5, 4.6, 6.7, 9.9], keyfield: "a"},
- {
- name: "date",
- values: [
- new Date(1000000),
- new Date(2000000),
- new Date(3000000),
- new Date(4000000),
- new Date(5000000),
- new Date(6000000)
- ],
- keyfield: "a"
- },
- {
- name: "string_id",
- values: ["allan", "bob", "eliot", "joe", "mark", "sara"],
- keyfield: "_id"
- },
- {
- name: "embedded 1",
- values: ["allan", "bob", "eliot", "joe", "mark", "sara"],
- keyfield: "a.b"
- },
- {
- name: "embedded 2",
- values: ["allan", "bob", "eliot", "joe", "mark", "sara"],
- keyfield: "a.b.c"
- },
- {
- name: "object",
- values: [
- {a: 1, b: 1.2},
- {a: 1, b: 3.5},
- {a: 1, b: 4.5},
- {a: 2, b: 1.2},
- {a: 2, b: 3.5},
- {a: 2, b: 4.5}
- ],
- keyfield: "o"
- },
- {
- name: "compound",
- values: [
- {a: 1, b: 1.2},
- {a: 1, b: 3.5},
- {a: 1, b: 4.5},
- {a: 2, b: 1.2},
- {a: 2, b: 3.5},
- {a: 2, b: 4.5}
- ],
- keyfield: "o",
- compound: true
- },
- {
- name: "oid_id",
- values: [ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId()],
- keyfield: "_id"
- },
- {
- name: "oid_other",
- values: [ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId()],
- keyfield: "o"
- },
- ];
-
- var s = new ShardingTest({name: "key_many", shards: 2});
-
- assert.commandWorked(s.s0.adminCommand({enableSharding: 'test'}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- var db = s.getDB('test');
- var primary = s.getPrimaryShard("test").getDB("test");
- var secondary = s.getOther(primary).getDB("test");
-
- var curT;
-
- function makeObjectDotted(v) {
- var o = {};
- if (curT.compound) {
- var prefix = curT.keyfield + '.';
- if (typeof(v) == 'object') {
- for (var key in v)
- o[prefix + key] = v[key];
- } else {
- for (var key in curT.values[0])
- o[prefix + key] = v;
- }
+'use strict';
+
+// Values have to be sorted - you must have exactly 6 values in each array
+var types = [
+ {name: "string", values: ["allan", "bob", "eliot", "joe", "mark", "sara"], keyfield: "k"},
+ {name: "double", values: [1.2, 3.5, 4.5, 4.6, 6.7, 9.9], keyfield: "a"},
+ {
+ name: "date",
+ values: [
+ new Date(1000000),
+ new Date(2000000),
+ new Date(3000000),
+ new Date(4000000),
+ new Date(5000000),
+ new Date(6000000)
+ ],
+ keyfield: "a"
+ },
+ {name: "string_id", values: ["allan", "bob", "eliot", "joe", "mark", "sara"], keyfield: "_id"},
+ {name: "embedded 1", values: ["allan", "bob", "eliot", "joe", "mark", "sara"], keyfield: "a.b"},
+ {
+ name: "embedded 2",
+ values: ["allan", "bob", "eliot", "joe", "mark", "sara"],
+ keyfield: "a.b.c"
+ },
+ {
+ name: "object",
+ values: [
+ {a: 1, b: 1.2},
+ {a: 1, b: 3.5},
+ {a: 1, b: 4.5},
+ {a: 2, b: 1.2},
+ {a: 2, b: 3.5},
+ {a: 2, b: 4.5}
+ ],
+ keyfield: "o"
+ },
+ {
+ name: "compound",
+ values: [
+ {a: 1, b: 1.2},
+ {a: 1, b: 3.5},
+ {a: 1, b: 4.5},
+ {a: 2, b: 1.2},
+ {a: 2, b: 3.5},
+ {a: 2, b: 4.5}
+ ],
+ keyfield: "o",
+ compound: true
+ },
+ {
+ name: "oid_id",
+ values: [ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId()],
+ keyfield: "_id"
+ },
+ {
+ name: "oid_other",
+ values: [ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId()],
+ keyfield: "o"
+ },
+];
+
+var s = new ShardingTest({name: "key_many", shards: 2});
+
+assert.commandWorked(s.s0.adminCommand({enableSharding: 'test'}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+var db = s.getDB('test');
+var primary = s.getPrimaryShard("test").getDB("test");
+var secondary = s.getOther(primary).getDB("test");
+
+var curT;
+
+function makeObjectDotted(v) {
+ var o = {};
+ if (curT.compound) {
+ var prefix = curT.keyfield + '.';
+ if (typeof (v) == 'object') {
+ for (var key in v)
+ o[prefix + key] = v[key];
} else {
- o[curT.keyfield] = v;
+ for (var key in curT.values[0])
+ o[prefix + key] = v;
}
- return o;
+ } else {
+ o[curT.keyfield] = v;
}
+ return o;
+}
- function makeObject(v) {
- var o = {};
- var p = o;
+function makeObject(v) {
+ var o = {};
+ var p = o;
- var keys = curT.keyfield.split('.');
- for (var i = 0; i < keys.length - 1; i++) {
- p[keys[i]] = {};
- p = p[keys[i]];
- }
+ var keys = curT.keyfield.split('.');
+ for (var i = 0; i < keys.length - 1; i++) {
+ p[keys[i]] = {};
+ p = p[keys[i]];
+ }
- p[keys[i]] = v;
+ p[keys[i]] = v;
- return o;
- }
+ return o;
+}
- function makeInQuery() {
- if (curT.compound) {
- // cheating a bit...
- return {'o.a': {$in: [1, 2]}};
- } else {
- return makeObjectDotted({$in: curT.values});
- }
+function makeInQuery() {
+ if (curT.compound) {
+ // cheating a bit...
+ return {'o.a': {$in: [1, 2]}};
+ } else {
+ return makeObjectDotted({$in: curT.values});
}
+}
- function getKey(o) {
- var keys = curT.keyfield.split('.');
- for (var i = 0; i < keys.length; i++) {
- o = o[keys[i]];
- }
- return o;
+function getKey(o) {
+ var keys = curT.keyfield.split('.');
+ for (var i = 0; i < keys.length; i++) {
+ o = o[keys[i]];
}
+ return o;
+}
- Random.setRandomSeed();
+Random.setRandomSeed();
- for (var i = 0; i < types.length; i++) {
- curT = types[i];
+for (var i = 0; i < types.length; i++) {
+ curT = types[i];
- print("\n\n#### Now Testing " + curT.name + " ####\n\n");
+ print("\n\n#### Now Testing " + curT.name + " ####\n\n");
- var shortName = "foo_" + curT.name;
- var longName = "test." + shortName;
+ var shortName = "foo_" + curT.name;
+ var longName = "test." + shortName;
- var c = db[shortName];
- s.adminCommand({shardcollection: longName, key: makeObjectDotted(1)});
+ var c = db[shortName];
+ s.adminCommand({shardcollection: longName, key: makeObjectDotted(1)});
- assert.eq(1, s.config.chunks.find({ns: longName}).count(), curT.name + " sanity check A");
+ assert.eq(1, s.config.chunks.find({ns: longName}).count(), curT.name + " sanity check A");
- var unsorted = Array.shuffle(Object.extend([], curT.values));
- c.insert(makeObject(unsorted[0]));
- for (var x = 1; x < unsorted.length; x++) {
- c.save(makeObject(unsorted[x]));
- }
-
- assert.eq(6, c.find().count(), curT.name + " basic count");
-
- s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[0])});
- s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[2])});
- s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[5])});
-
- s.adminCommand({
- movechunk: longName,
- find: makeObjectDotted(curT.values[2]),
- to: secondary.getMongo().name,
- _waitForDelete: true
- });
-
- s.printChunks();
-
- assert.eq(3, primary[shortName].find().toArray().length, curT.name + " primary count");
- assert.eq(3, secondary[shortName].find().toArray().length, curT.name + " secondary count");
-
- assert.eq(6, c.find().toArray().length, curT.name + " total count");
- assert.eq(6,
- c.find().sort(makeObjectDotted(1)).toArray().length,
- curT.name + " total count sorted");
-
- assert.eq(
- 6, c.find().sort(makeObjectDotted(1)).count(), curT.name + " total count with count()");
-
- assert.eq(2,
- c.find({
- $or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
- }).count(),
- curT.name + " $or count()");
- assert.eq(2,
- c.find({
- $or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
- }).itcount(),
- curT.name + " $or itcount()");
- assert.eq(4,
- c.find({
- $nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
- }).count(),
- curT.name + " $nor count()");
- assert.eq(4,
- c.find({
- $nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
- }).itcount(),
- curT.name + " $nor itcount()");
-
- var stats = c.stats();
- printjson(stats);
- assert.eq(6, stats.count, curT.name + " total count with stats()");
-
- var count = 0;
- for (var shard in stats.shards) {
- count += stats.shards[shard].count;
- }
- assert.eq(6, count, curT.name + " total count with stats() sum");
-
- assert.eq(curT.values,
- c.find().sort(makeObjectDotted(1)).toArray().map(getKey),
- curT.name + " sort 1");
- assert.eq(curT.values,
- c.find(makeInQuery()).sort(makeObjectDotted(1)).toArray().map(getKey),
- curT.name + " sort 1 - $in");
- assert.eq(curT.values.reverse(),
- c.find().sort(makeObjectDotted(-1)).toArray().map(getKey),
- curT.name + " sort 2");
-
- assert.eq(0, c.find({xx: 17}).sort({zz: 1}).count(), curT.name + " xx 0a ");
- assert.eq(0, c.find({xx: 17}).sort(makeObjectDotted(1)).count(), curT.name + " xx 0b ");
- assert.eq(0, c.find({xx: 17}).count(), curT.name + " xx 0c ");
- assert.eq(0, c.find({xx: {$exists: true}}).count(), curT.name + " xx 1 ");
-
- c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}});
- assert.eq(1, c.find({xx: {$exists: true}}).count(), curT.name + " xx 2 ");
- assert.eq(curT.values[3], getKey(c.findOne({xx: 17})), curT.name + " xx 3 ");
-
- assert.writeOK(
- c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}}, {upsert: true}));
-
- assert.commandWorked(c.ensureIndex({_id: 1}));
-
- // multi update
- var mysum = 0;
- c.find().forEach(function(z) {
- mysum += z.xx || 0;
- });
- assert.eq(17, mysum, curT.name + " multi update pre");
-
- c.update({}, {$inc: {xx: 1}}, false, true);
-
- var mysum = 0;
- c.find().forEach(function(z) {
- mysum += z.xx || 0;
- });
- assert.eq(23, mysum, curT.name + " multi update");
+ var unsorted = Array.shuffle(Object.extend([], curT.values));
+ c.insert(makeObject(unsorted[0]));
+ for (var x = 1; x < unsorted.length; x++) {
+ c.save(makeObject(unsorted[x]));
}
- s.stop();
-
+ assert.eq(6, c.find().count(), curT.name + " basic count");
+
+ s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[0])});
+ s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[2])});
+ s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[5])});
+
+ s.adminCommand({
+ movechunk: longName,
+ find: makeObjectDotted(curT.values[2]),
+ to: secondary.getMongo().name,
+ _waitForDelete: true
+ });
+
+ s.printChunks();
+
+ assert.eq(3, primary[shortName].find().toArray().length, curT.name + " primary count");
+ assert.eq(3, secondary[shortName].find().toArray().length, curT.name + " secondary count");
+
+ assert.eq(6, c.find().toArray().length, curT.name + " total count");
+ assert.eq(
+ 6, c.find().sort(makeObjectDotted(1)).toArray().length, curT.name + " total count sorted");
+
+ assert.eq(
+ 6, c.find().sort(makeObjectDotted(1)).count(), curT.name + " total count with count()");
+
+ assert.eq(
+ 2,
+ c.find({$or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).count(),
+ curT.name + " $or count()");
+ assert.eq(2,
+ c.find({
+ $or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
+ }).itcount(),
+ curT.name + " $or itcount()");
+ assert.eq(4,
+ c.find({
+ $nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
+ }).count(),
+ curT.name + " $nor count()");
+ assert.eq(4,
+ c.find({
+ $nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
+ }).itcount(),
+ curT.name + " $nor itcount()");
+
+ var stats = c.stats();
+ printjson(stats);
+ assert.eq(6, stats.count, curT.name + " total count with stats()");
+
+ var count = 0;
+ for (var shard in stats.shards) {
+ count += stats.shards[shard].count;
+ }
+ assert.eq(6, count, curT.name + " total count with stats() sum");
+
+ assert.eq(curT.values,
+ c.find().sort(makeObjectDotted(1)).toArray().map(getKey),
+ curT.name + " sort 1");
+ assert.eq(curT.values,
+ c.find(makeInQuery()).sort(makeObjectDotted(1)).toArray().map(getKey),
+ curT.name + " sort 1 - $in");
+ assert.eq(curT.values.reverse(),
+ c.find().sort(makeObjectDotted(-1)).toArray().map(getKey),
+ curT.name + " sort 2");
+
+ assert.eq(0, c.find({xx: 17}).sort({zz: 1}).count(), curT.name + " xx 0a ");
+ assert.eq(0, c.find({xx: 17}).sort(makeObjectDotted(1)).count(), curT.name + " xx 0b ");
+ assert.eq(0, c.find({xx: 17}).count(), curT.name + " xx 0c ");
+ assert.eq(0, c.find({xx: {$exists: true}}).count(), curT.name + " xx 1 ");
+
+ c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}});
+ assert.eq(1, c.find({xx: {$exists: true}}).count(), curT.name + " xx 2 ");
+ assert.eq(curT.values[3], getKey(c.findOne({xx: 17})), curT.name + " xx 3 ");
+
+ assert.writeOK(c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}}, {upsert: true}));
+
+ assert.commandWorked(c.ensureIndex({_id: 1}));
+
+ // multi update
+ var mysum = 0;
+ c.find().forEach(function(z) {
+ mysum += z.xx || 0;
+ });
+ assert.eq(17, mysum, curT.name + " multi update pre");
+
+ c.update({}, {$inc: {xx: 1}}, false, true);
+
+ var mysum = 0;
+ c.find().forEach(function(z) {
+ mysum += z.xx || 0;
+ });
+ assert.eq(23, mysum, curT.name + " multi update");
+}
+
+s.stop();
})();
diff --git a/jstests/sharding/key_rotation.js b/jstests/sharding/key_rotation.js
index 7067efa1cd5..da969e087f5 100644
--- a/jstests/sharding/key_rotation.js
+++ b/jstests/sharding/key_rotation.js
@@ -14,79 +14,79 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- let st = new ShardingTest({shards: {rs0: {nodes: 2}}});
-
- // Verify after startup there is a new key in admin.system.keys.
- jsTestLog("Verify the admin.system.keys collection after startup.");
-
- let startupKeys = st.s.getDB("admin").system.keys.find();
- assert(startupKeys.count() >= 2); // Should be at least two generations of keys available.
- startupKeys.toArray().forEach(function(key, i) {
- assert.hasFields(
- key,
- ["purpose", "key", "expiresAt"],
- "key document " + i + ": " + tojson(key) + ", did not have all of the expected fields");
- });
-
- // Verify there is a $clusterTime with a signature in the response.
- jsTestLog("Verify a signature is included in the cluster time in a response.");
-
- let res = assert.commandWorked(st.s.getDB("test").runCommand({isMaster: 1}));
- assert.hasFields(res, ["$clusterTime"]);
- assert.hasFields(res.$clusterTime, ["signature"]);
- assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
-
- // Verify manual key rotation.
- jsTestLog("Verify manual key rotation.");
-
- // Pause key generation on the config server primary.
- for (let i = 0; i < st.configRS.nodes.length; i++) {
- st.configRS.nodes[i].adminCommand(
- {configureFailPoint: "disableKeyGeneration", mode: "alwaysOn"});
- }
-
- // Delete all existing keys.
- res = st.configRS.getPrimary().getDB("admin").system.keys.remove({purpose: "HMAC"});
- assert(res.nRemoved >= 2);
- assert(st.s.getDB("admin").system.keys.find().count() == 0);
-
- // Restart the config servers, so they will create new keys once the failpoint is disabled.
- st.configRS.stopSet(null /* signal */, true /* forRestart */);
- st.configRS.startSet(
- {restart: true, setParameter: {"failpoint.disableKeyGeneration": "{'mode':'alwaysOn'}"}});
-
- // Limit the max time between refreshes on the config server, so new keys are created quickly.
- st.configRS.getPrimary().adminCommand({
- "configureFailPoint": "maxKeyRefreshWaitTimeOverrideMS",
- "mode": "alwaysOn",
- "data": {"overrideMS": 1000}
- });
-
- // Kill and restart all shards and mongos processes so they have no keys in memory.
- st.rs0.stopSet(null /* signal */, true /* forRestart */);
- st.rs0.startSet({restart: true});
-
- // The shard primary should return a dummy signed cluster time, because there are no keys.
- res = assert.commandWorked(st.rs0.getPrimary().getDB("test").runCommand({isMaster: 1}));
- assert.hasFields(res, ["$clusterTime", "operationTime"]);
- assert.eq(res.$clusterTime.signature.keyId, NumberLong(0));
-
- // Resume key generation.
- for (let i = 0; i < st.configRS.nodes.length; i++) {
- st.configRS.getPrimary().adminCommand(
- {configureFailPoint: "disableKeyGeneration", mode: "off"});
- }
-
- st.restartMongos(0);
-
- // Wait for config server primary to create new keys.
- assert.soonNoExcept(function() {
- let keys = st.s.getDB("admin").system.keys.find();
- assert(keys.count() >= 2);
- return true;
- }, "expected the config server primary to create new keys");
-
- st.stop();
+"use strict";
+
+let st = new ShardingTest({shards: {rs0: {nodes: 2}}});
+
+// Verify after startup there is a new key in admin.system.keys.
+jsTestLog("Verify the admin.system.keys collection after startup.");
+
+let startupKeys = st.s.getDB("admin").system.keys.find();
+assert(startupKeys.count() >= 2); // Should be at least two generations of keys available.
+startupKeys.toArray().forEach(function(key, i) {
+ assert.hasFields(
+ key,
+ ["purpose", "key", "expiresAt"],
+ "key document " + i + ": " + tojson(key) + ", did not have all of the expected fields");
+});
+
+// Verify there is a $clusterTime with a signature in the response.
+jsTestLog("Verify a signature is included in the cluster time in a response.");
+
+let res = assert.commandWorked(st.s.getDB("test").runCommand({isMaster: 1}));
+assert.hasFields(res, ["$clusterTime"]);
+assert.hasFields(res.$clusterTime, ["signature"]);
+assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
+
+// Verify manual key rotation.
+jsTestLog("Verify manual key rotation.");
+
+// Pause key generation on the config server primary.
+for (let i = 0; i < st.configRS.nodes.length; i++) {
+ st.configRS.nodes[i].adminCommand(
+ {configureFailPoint: "disableKeyGeneration", mode: "alwaysOn"});
+}
+
+// Delete all existing keys.
+res = st.configRS.getPrimary().getDB("admin").system.keys.remove({purpose: "HMAC"});
+assert(res.nRemoved >= 2);
+assert(st.s.getDB("admin").system.keys.find().count() == 0);
+
+// Restart the config servers, so they will create new keys once the failpoint is disabled.
+st.configRS.stopSet(null /* signal */, true /* forRestart */);
+st.configRS.startSet(
+ {restart: true, setParameter: {"failpoint.disableKeyGeneration": "{'mode':'alwaysOn'}"}});
+
+// Limit the max time between refreshes on the config server, so new keys are created quickly.
+st.configRS.getPrimary().adminCommand({
+ "configureFailPoint": "maxKeyRefreshWaitTimeOverrideMS",
+ "mode": "alwaysOn",
+ "data": {"overrideMS": 1000}
+});
+
+// Kill and restart all shards and mongos processes so they have no keys in memory.
+st.rs0.stopSet(null /* signal */, true /* forRestart */);
+st.rs0.startSet({restart: true});
+
+// The shard primary should return a dummy signed cluster time, because there are no keys.
+res = assert.commandWorked(st.rs0.getPrimary().getDB("test").runCommand({isMaster: 1}));
+assert.hasFields(res, ["$clusterTime", "operationTime"]);
+assert.eq(res.$clusterTime.signature.keyId, NumberLong(0));
+
+// Resume key generation.
+for (let i = 0; i < st.configRS.nodes.length; i++) {
+ st.configRS.getPrimary().adminCommand(
+ {configureFailPoint: "disableKeyGeneration", mode: "off"});
+}
+
+st.restartMongos(0);
+
+// Wait for config server primary to create new keys.
+assert.soonNoExcept(function() {
+ let keys = st.s.getDB("admin").system.keys.find();
+ assert(keys.count() >= 2);
+ return true;
+}, "expected the config server primary to create new keys");
+
+st.stop();
})();
diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js
index c3fc654bf11..78fa4c4d5dc 100644
--- a/jstests/sharding/key_string.js
+++ b/jstests/sharding/key_string.js
@@ -1,68 +1,67 @@
(function() {
- var s = new ShardingTest({name: "keystring", shards: 2});
+var s = new ShardingTest({name: "keystring", shards: 2});
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.foo", key: {name: 1}});
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.foo", key: {name: 1}});
- primary = s.getPrimaryShard("test").getDB("test");
- seconday = s.getOther(primary).getDB("test");
+primary = s.getPrimaryShard("test").getDB("test");
+seconday = s.getOther(primary).getDB("test");
- assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check A");
+assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check A");
- var db = s.getDB("test");
+var db = s.getDB("test");
- db.foo.save({name: "eliot"});
- db.foo.save({name: "sara"});
- db.foo.save({name: "bob"});
- db.foo.save({name: "joe"});
- db.foo.save({name: "mark"});
- db.foo.save({name: "allan"});
+db.foo.save({name: "eliot"});
+db.foo.save({name: "sara"});
+db.foo.save({name: "bob"});
+db.foo.save({name: "joe"});
+db.foo.save({name: "mark"});
+db.foo.save({name: "allan"});
- assert.eq(6, db.foo.find().count(), "basic count");
+assert.eq(6, db.foo.find().count(), "basic count");
+s.adminCommand({split: "test.foo", middle: {name: "allan"}});
+s.adminCommand({split: "test.foo", middle: {name: "sara"}});
+s.adminCommand({split: "test.foo", middle: {name: "eliot"}});
+
+s.adminCommand({
+ movechunk: "test.foo",
+ find: {name: "eliot"},
+ to: seconday.getMongo().name,
+ _waitForDelete: true
+});
+
+s.printChunks();
+
+assert.eq(3, primary.foo.find().toArray().length, "primary count");
+assert.eq(3, seconday.foo.find().toArray().length, "secondary count");
+
+assert.eq(6, db.foo.find().toArray().length, "total count");
+assert.eq(6, db.foo.find().sort({name: 1}).toArray().length, "total count sorted");
+
+assert.eq(6, db.foo.find().sort({name: 1}).count(), "total count with count()");
+
+assert.eq("allan,bob,eliot,joe,mark,sara",
+ db.foo.find().sort({name: 1}).toArray().map(function(z) {
+ return z.name;
+ }),
+ "sort 1");
+assert.eq("sara,mark,joe,eliot,bob,allan",
+ db.foo.find().sort({name: -1}).toArray().map(function(z) {
+ return z.name;
+ }),
+ "sort 2");
+
+// make sure we can't foce a split on an extreme key
+// [allan->joe)
+assert.throws(function() {
s.adminCommand({split: "test.foo", middle: {name: "allan"}});
- s.adminCommand({split: "test.foo", middle: {name: "sara"}});
+});
+assert.throws(function() {
s.adminCommand({split: "test.foo", middle: {name: "eliot"}});
+});
- s.adminCommand({
- movechunk: "test.foo",
- find: {name: "eliot"},
- to: seconday.getMongo().name,
- _waitForDelete: true
- });
-
- s.printChunks();
-
- assert.eq(3, primary.foo.find().toArray().length, "primary count");
- assert.eq(3, seconday.foo.find().toArray().length, "secondary count");
-
- assert.eq(6, db.foo.find().toArray().length, "total count");
- assert.eq(6, db.foo.find().sort({name: 1}).toArray().length, "total count sorted");
-
- assert.eq(6, db.foo.find().sort({name: 1}).count(), "total count with count()");
-
- assert.eq("allan,bob,eliot,joe,mark,sara",
- db.foo.find().sort({name: 1}).toArray().map(function(z) {
- return z.name;
- }),
- "sort 1");
- assert.eq("sara,mark,joe,eliot,bob,allan",
- db.foo.find().sort({name: -1}).toArray().map(function(z) {
- return z.name;
- }),
- "sort 2");
-
- // make sure we can't foce a split on an extreme key
- // [allan->joe)
- assert.throws(function() {
- s.adminCommand({split: "test.foo", middle: {name: "allan"}});
- });
- assert.throws(function() {
- s.adminCommand({split: "test.foo", middle: {name: "eliot"}});
- });
-
- s.stop();
-
+s.stop();
})();
diff --git a/jstests/sharding/keys_rotation_interval_sec.js b/jstests/sharding/keys_rotation_interval_sec.js
index 310b3cd612e..4234786df35 100644
--- a/jstests/sharding/keys_rotation_interval_sec.js
+++ b/jstests/sharding/keys_rotation_interval_sec.js
@@ -3,28 +3,28 @@
*/
(function() {
- "use strict";
- const kRotationInterval = 30;
- let st = new ShardingTest({
- mongos: 1,
- shards: {rs0: {nodes: 2}},
- other: {configOptions: {setParameter: "KeysRotationIntervalSec=30"}}
- });
+"use strict";
+const kRotationInterval = 30;
+let st = new ShardingTest({
+ mongos: 1,
+ shards: {rs0: {nodes: 2}},
+ other: {configOptions: {setParameter: "KeysRotationIntervalSec=30"}}
+});
- let keys = st.s.getDB("admin").system.keys.find();
- // add a few seconds to the expire timestamp to account for rounding that may happen.
- let maxExpireTime = Timestamp(Date.now() / 1000 + kRotationInterval * 2 + 5, 0);
+let keys = st.s.getDB("admin").system.keys.find();
+// add a few seconds to the expire timestamp to account for rounding that may happen.
+let maxExpireTime = Timestamp(Date.now() / 1000 + kRotationInterval * 2 + 5, 0);
- assert(keys.count() >= 2);
- keys.toArray().forEach(function(key, i) {
- assert.hasFields(
- key,
- ["purpose", "key", "expiresAt"],
- "key document " + i + ": " + tojson(key) + ", did not have all of the expected fields");
- assert.lte(bsonWoCompare(key.expiresAt, maxExpireTime),
- 0,
- "key document " + i + ": " + tojson(key) + "expiresAt value is greater than: " +
- maxExpireTime);
- });
- st.stop();
+assert(keys.count() >= 2);
+keys.toArray().forEach(function(key, i) {
+ assert.hasFields(
+ key,
+ ["purpose", "key", "expiresAt"],
+ "key document " + i + ": " + tojson(key) + ", did not have all of the expected fields");
+ assert.lte(bsonWoCompare(key.expiresAt, maxExpireTime),
+ 0,
+ "key document " + i + ": " + tojson(key) +
+ "expiresAt value is greater than: " + maxExpireTime);
+});
+st.stop();
})();
diff --git a/jstests/sharding/kill_op_overflow.js b/jstests/sharding/kill_op_overflow.js
index 6ca5c236bab..b433ba60702 100644
--- a/jstests/sharding/kill_op_overflow.js
+++ b/jstests/sharding/kill_op_overflow.js
@@ -3,10 +3,10 @@
* failure being propagated back to the client.
*/
(function() {
- "use strict";
- var st = new ShardingTest({name: "shard1", shards: 1, mongos: 1});
+"use strict";
+var st = new ShardingTest({name: "shard1", shards: 1, mongos: 1});
- assert.commandFailed(st.s.getDB("admin").runCommand(
- {killOp: 1, op: st.shard0.shardName + ":99999999999999999999999"}));
- st.stop();
+assert.commandFailed(st.s.getDB("admin").runCommand(
+ {killOp: 1, op: st.shard0.shardName + ":99999999999999999999999"}));
+st.stop();
})();
diff --git a/jstests/sharding/kill_pinned_cursor.js b/jstests/sharding/kill_pinned_cursor.js
index d7e4017d273..1c19626ab77 100644
--- a/jstests/sharding/kill_pinned_cursor.js
+++ b/jstests/sharding/kill_pinned_cursor.js
@@ -7,234 +7,234 @@
*/
(function() {
- "use strict";
-
- // This test manually simulates a session, which is not compatible with implicit sessions.
- TestData.disableImplicitSessions = true;
-
- const kFailPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
- const kFailpointOptions = {shouldCheckForInterrupt: true};
-
- const st = new ShardingTest({shards: 2});
- const kDBName = "test";
- const mongosDB = st.s.getDB(kDBName);
- const shard0DB = st.shard0.getDB(kDBName);
- const shard1DB = st.shard1.getDB(kDBName);
-
- let coll = mongosDB.jstest_kill_pinned_cursor;
- coll.drop();
-
- for (let i = 0; i < 10; i++) {
- assert.writeOK(coll.insert({_id: i}));
+"use strict";
+
+// This test manually simulates a session, which is not compatible with implicit sessions.
+TestData.disableImplicitSessions = true;
+
+const kFailPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
+const kFailpointOptions = {
+ shouldCheckForInterrupt: true
+};
+
+const st = new ShardingTest({shards: 2});
+const kDBName = "test";
+const mongosDB = st.s.getDB(kDBName);
+const shard0DB = st.shard0.getDB(kDBName);
+const shard1DB = st.shard1.getDB(kDBName);
+
+let coll = mongosDB.jstest_kill_pinned_cursor;
+coll.drop();
+
+for (let i = 0; i < 10; i++) {
+ assert.writeOK(coll.insert({_id: i}));
+}
+
+st.shardColl(coll, {_id: 1}, {_id: 5}, {_id: 6}, kDBName, false);
+st.ensurePrimaryShard(kDBName, st.shard0.name);
+
+// The startParallelShell function will take the string it's given and serialize it into a
+// string. This means that we can't pass it functions which capture variables. Instead we use
+// the trick below, by putting the values for the variables we'd like to capture inside the
+// string. Kudos to Dave Storch for coming up with this idea.
+function makeParallelShellFunctionString(cursorId, getMoreErrCodes, useSession, sessionId) {
+ let code = `const cursorId = ${cursorId.toString()};`;
+ code += `const kDBName = "${kDBName}";`;
+ code += `let collName = "${coll.getName()}";`;
+ code += `const useSession = ${useSession};`;
+
+ TestData.getMoreErrCodes = getMoreErrCodes;
+ if (useSession) {
+ TestData.sessionId = sessionId;
}
- st.shardColl(coll, {_id: 1}, {_id: 5}, {_id: 6}, kDBName, false);
- st.ensurePrimaryShard(kDBName, st.shard0.name);
-
- // The startParallelShell function will take the string it's given and serialize it into a
- // string. This means that we can't pass it functions which capture variables. Instead we use
- // the trick below, by putting the values for the variables we'd like to capture inside the
- // string. Kudos to Dave Storch for coming up with this idea.
- function makeParallelShellFunctionString(cursorId, getMoreErrCodes, useSession, sessionId) {
- let code = `const cursorId = ${cursorId.toString()};`;
- code += `const kDBName = "${kDBName}";`;
- code += `let collName = "${coll.getName()}";`;
- code += `const useSession = ${useSession};`;
+ const runGetMore = function() {
+ let getMoreCmd = {getMore: cursorId, collection: collName, batchSize: 4};
- TestData.getMoreErrCodes = getMoreErrCodes;
if (useSession) {
- TestData.sessionId = sessionId;
+ getMoreCmd.lsid = TestData.sessionId;
}
- const runGetMore = function() {
- let getMoreCmd = {getMore: cursorId, collection: collName, batchSize: 4};
-
- if (useSession) {
- getMoreCmd.lsid = TestData.sessionId;
- }
-
- // We expect that the operation will get interrupted and fail.
- assert.commandFailedWithCode(db.runCommand(getMoreCmd), TestData.getMoreErrCodes);
-
- if (useSession) {
- assert.commandWorked(db.adminCommand({endSessions: [TestData.sessionId]}));
- }
- };
-
- code += `(${runGetMore.toString()})();`;
- return code;
- }
-
- // Tests that the various cursors involved in a sharded query can be killed, even when pinned.
- //
- // Sets up a sharded cursor, opens a mongos cursor, and uses failpoints to cause the mongos
- // cursor to hang due to getMore commands hanging on each of the shards. Then invokes
- // 'killFunc', and verifies the cursors on the shards and the mongos cursor get cleaned up.
- //
- // 'getMoreErrCodes' are the error codes with which we expect the getMore to fail (e.g. a
- // killCursors command should cause getMore to fail with "CursorKilled", but killOp should cause
- // a getMore to fail with "Interrupted").
- function testShardedKillPinned(
- {killFunc: killFunc, getMoreErrCodes: getMoreErrCodes, useSession: useSession}) {
- let getMoreJoiner = null;
- let cursorId;
- let sessionId;
-
- try {
- // Set up the mongods to hang on a getMore request. ONLY set the failpoint on the
- // mongods. Setting the failpoint on the mongos will only cause it to spin, and not
- // actually send any requests out.
- assert.commandWorked(shard0DB.adminCommand(
- {configureFailPoint: kFailPointName, mode: "alwaysOn", data: kFailpointOptions}));
- assert.commandWorked(shard1DB.adminCommand(
- {configureFailPoint: kFailPointName, mode: "alwaysOn", data: kFailpointOptions}));
-
- // Run a find against mongos. This should open cursors on both of the shards.
- let findCmd = {find: coll.getName(), batchSize: 2};
-
- if (useSession) {
- // Manually start a session so it can be continued from inside a parallel shell.
- sessionId = assert.commandWorked(mongosDB.adminCommand({startSession: 1})).id;
- findCmd.lsid = sessionId;
- }
-
- let cmdRes = mongosDB.runCommand(findCmd);
- assert.commandWorked(cmdRes);
- cursorId = cmdRes.cursor.id;
- assert.neq(cursorId, NumberLong(0));
-
- const parallelShellFn =
- makeParallelShellFunctionString(cursorId, getMoreErrCodes, useSession, sessionId);
- getMoreJoiner = startParallelShell(parallelShellFn, st.s.port);
+ // We expect that the operation will get interrupted and fail.
+ assert.commandFailedWithCode(db.runCommand(getMoreCmd), TestData.getMoreErrCodes);
- // Sleep until we know the mongod cursors are pinned.
- assert.soon(() => shard0DB.serverStatus().metrics.cursor.open.pinned > 0);
- assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.pinned > 0);
+ if (useSession) {
+ assert.commandWorked(db.adminCommand({endSessions: [TestData.sessionId]}));
+ }
+ };
+
+ code += `(${runGetMore.toString()})();`;
+ return code;
+}
+
+// Tests that the various cursors involved in a sharded query can be killed, even when pinned.
+//
+// Sets up a sharded cursor, opens a mongos cursor, and uses failpoints to cause the mongos
+// cursor to hang due to getMore commands hanging on each of the shards. Then invokes
+// 'killFunc', and verifies the cursors on the shards and the mongos cursor get cleaned up.
+//
+// 'getMoreErrCodes' are the error codes with which we expect the getMore to fail (e.g. a
+// killCursors command should cause getMore to fail with "CursorKilled", but killOp should cause
+// a getMore to fail with "Interrupted").
+function testShardedKillPinned(
+ {killFunc: killFunc, getMoreErrCodes: getMoreErrCodes, useSession: useSession}) {
+ let getMoreJoiner = null;
+ let cursorId;
+ let sessionId;
+
+ try {
+ // Set up the mongods to hang on a getMore request. ONLY set the failpoint on the
+ // mongods. Setting the failpoint on the mongos will only cause it to spin, and not
+ // actually send any requests out.
+ assert.commandWorked(shard0DB.adminCommand(
+ {configureFailPoint: kFailPointName, mode: "alwaysOn", data: kFailpointOptions}));
+ assert.commandWorked(shard1DB.adminCommand(
+ {configureFailPoint: kFailPointName, mode: "alwaysOn", data: kFailpointOptions}));
+
+ // Run a find against mongos. This should open cursors on both of the shards.
+ let findCmd = {find: coll.getName(), batchSize: 2};
- // Use the function provided by the caller to kill the sharded query.
- killFunc(cursorId);
+ if (useSession) {
+ // Manually start a session so it can be continued from inside a parallel shell.
+ sessionId = assert.commandWorked(mongosDB.adminCommand({startSession: 1})).id;
+ findCmd.lsid = sessionId;
+ }
- // The getMore should finish now that we've killed the cursor (even though the failpoint
- // is still enabled).
+ let cmdRes = mongosDB.runCommand(findCmd);
+ assert.commandWorked(cmdRes);
+ cursorId = cmdRes.cursor.id;
+ assert.neq(cursorId, NumberLong(0));
+
+ const parallelShellFn =
+ makeParallelShellFunctionString(cursorId, getMoreErrCodes, useSession, sessionId);
+ getMoreJoiner = startParallelShell(parallelShellFn, st.s.port);
+
+ // Sleep until we know the mongod cursors are pinned.
+ assert.soon(() => shard0DB.serverStatus().metrics.cursor.open.pinned > 0);
+ assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.pinned > 0);
+
+ // Use the function provided by the caller to kill the sharded query.
+ killFunc(cursorId);
+
+ // The getMore should finish now that we've killed the cursor (even though the failpoint
+ // is still enabled).
+ getMoreJoiner();
+ getMoreJoiner = null;
+
+ // By now, the getMore run against the mongos has returned with an indication that the
+ // cursor has been killed. Verify that the cursor is really gone by running a
+ // killCursors command, and checking that the cursor is reported as "not found".
+ let killRes = mongosDB.runCommand({killCursors: coll.getName(), cursors: [cursorId]});
+ assert.commandWorked(killRes);
+ assert.eq(killRes.cursorsAlive, []);
+ assert.eq(killRes.cursorsNotFound, [cursorId]);
+ assert.eq(killRes.cursorsUnknown, []);
+
+ // Eventually the cursors on the mongods should also be cleaned up. They should be
+ // killed by mongos when the mongos cursor gets killed.
+ assert.soon(() => shard0DB.serverStatus().metrics.cursor.open.pinned == 0);
+ assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.pinned == 0);
+ assert.eq(shard0DB.serverStatus().metrics.cursor.open.total, 0);
+ assert.eq(shard1DB.serverStatus().metrics.cursor.open.total, 0);
+ } finally {
+ assert.commandWorked(
+ shard0DB.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
+ assert.commandWorked(
+ shard1DB.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
+ if (getMoreJoiner) {
getMoreJoiner();
- getMoreJoiner = null;
-
- // By now, the getMore run against the mongos has returned with an indication that the
- // cursor has been killed. Verify that the cursor is really gone by running a
- // killCursors command, and checking that the cursor is reported as "not found".
- let killRes = mongosDB.runCommand({killCursors: coll.getName(), cursors: [cursorId]});
- assert.commandWorked(killRes);
- assert.eq(killRes.cursorsAlive, []);
- assert.eq(killRes.cursorsNotFound, [cursorId]);
- assert.eq(killRes.cursorsUnknown, []);
-
- // Eventually the cursors on the mongods should also be cleaned up. They should be
- // killed by mongos when the mongos cursor gets killed.
- assert.soon(() => shard0DB.serverStatus().metrics.cursor.open.pinned == 0);
- assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.pinned == 0);
- assert.eq(shard0DB.serverStatus().metrics.cursor.open.total, 0);
- assert.eq(shard1DB.serverStatus().metrics.cursor.open.total, 0);
- } finally {
- assert.commandWorked(
- shard0DB.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
- assert.commandWorked(
- shard1DB.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
- if (getMoreJoiner) {
- getMoreJoiner();
- }
}
}
+}
- for (let useSession of[true, false]) {
- // Test that running 'killCursors' against a pinned mongos cursor (with pinned mongod
- // cursors) correctly cleans up all of the involved cursors.
- testShardedKillPinned({
- killFunc: function(mongosCursorId) {
- // Run killCursors against the mongos cursor. Verify that the cursor is reported as
- // killed successfully, and does not hang or return a "CursorInUse" error.
- let cmdRes =
- mongosDB.runCommand({killCursors: coll.getName(), cursors: [mongosCursorId]});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursorsKilled, [mongosCursorId]);
- assert.eq(cmdRes.cursorsAlive, []);
- assert.eq(cmdRes.cursorsNotFound, []);
- assert.eq(cmdRes.cursorsUnknown, []);
- },
- getMoreErrCodes: ErrorCodes.CursorKilled,
- useSession: useSession
- });
-
- // Test that running killOp against one of the cursors pinned on mongod causes all involved
- // cursors to be killed.
- testShardedKillPinned({
- // This function ignores the mongos cursor id, since it instead uses currentOp to
- // obtain an op id to kill.
- killFunc: function() {
- let currentGetMoresArray =
- shard0DB.getSiblingDB("admin")
- .aggregate(
- [{$currentOp: {}}, {$match: {"command.getMore": {$exists: true}}}])
- .toArray();
- assert.eq(1, currentGetMoresArray.length);
- let currentGetMore = currentGetMoresArray[0];
- let killOpResult = shard0DB.killOp(currentGetMore.opid);
- assert.commandWorked(killOpResult);
- },
- getMoreErrCodes: ErrorCodes.Interrupted,
- useSession: useSession
- });
-
- // Test that running killCursors against one of the cursors pinned on mongod causes all
- // involved cursors to be killed.
- testShardedKillPinned({
- // This function ignores the mongos cursor id, since it instead uses currentOp to
- // obtain the cursor id of one of the shard cursors.
- killFunc: function() {
- let currentGetMoresArray =
- shard0DB.getSiblingDB("admin")
- .aggregate(
- [{$currentOp: {}}, {$match: {"command.getMore": {$exists: true}}}])
- .toArray();
- assert.eq(1, currentGetMoresArray.length);
- let currentGetMore = currentGetMoresArray[0];
- let shardCursorId = currentGetMore.command.getMore;
- let cmdRes =
- shard0DB.runCommand({killCursors: coll.getName(), cursors: [shardCursorId]});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursorsKilled, [shardCursorId]);
- assert.eq(cmdRes.cursorsAlive, []);
- assert.eq(cmdRes.cursorsNotFound, []);
- assert.eq(cmdRes.cursorsUnknown, []);
- },
- getMoreErrCodes: ErrorCodes.CursorKilled,
- useSession: useSession
- });
- }
+for (let useSession of [true, false]) {
+ // Test that running 'killCursors' against a pinned mongos cursor (with pinned mongod
+ // cursors) correctly cleans up all of the involved cursors.
+ testShardedKillPinned({
+ killFunc: function(mongosCursorId) {
+ // Run killCursors against the mongos cursor. Verify that the cursor is reported as
+ // killed successfully, and does not hang or return a "CursorInUse" error.
+ let cmdRes =
+ mongosDB.runCommand({killCursors: coll.getName(), cursors: [mongosCursorId]});
+ assert.commandWorked(cmdRes);
+ assert.eq(cmdRes.cursorsKilled, [mongosCursorId]);
+ assert.eq(cmdRes.cursorsAlive, []);
+ assert.eq(cmdRes.cursorsNotFound, []);
+ assert.eq(cmdRes.cursorsUnknown, []);
+ },
+ getMoreErrCodes: ErrorCodes.CursorKilled,
+ useSession: useSession
+ });
- // Test that running killSessions on the session which is running the getMore causes the
- // cursor to be killed.
+ // Test that running killOp against one of the cursors pinned on mongod causes all involved
+ // cursors to be killed.
testShardedKillPinned({
- // This function ignores the mongos cursor id, since it instead uses listLocalSessions
- // to obtain the session id of the session running the getMore.
+ // This function ignores the mongos cursor id, since it instead uses currentOp to
+ // obtain an op id to kill.
killFunc: function() {
- // Must sort by 'lastUse' because there may be sessions left over on the server from
- // the previous runs. We will only call killSessions on the most recently used one.
- const localSessions = mongosDB
- .aggregate([
- {$listLocalSessions: {allUsers: true}},
- {$sort: {"lastUse": -1}},
- ])
- .toArray();
-
- const sessionUUID = localSessions[0]._id.id;
- assert.commandWorked(mongosDB.runCommand({killSessions: [{id: sessionUUID}]}));
+ let currentGetMoresArray =
+ shard0DB.getSiblingDB("admin")
+ .aggregate([{$currentOp: {}}, {$match: {"command.getMore": {$exists: true}}}])
+ .toArray();
+ assert.eq(1, currentGetMoresArray.length);
+ let currentGetMore = currentGetMoresArray[0];
+ let killOpResult = shard0DB.killOp(currentGetMore.opid);
+ assert.commandWorked(killOpResult);
},
- // Killing a session on mongos kills all matching remote cursors (through KillCursors) then
- // all matching local operations (through KillOp), so the getMore can fail with either
- // CursorKilled or Interrupted depending on which response is returned first.
- getMoreErrCodes: [ErrorCodes.CursorKilled, ErrorCodes.Interrupted],
- useSession: true,
+ getMoreErrCodes: ErrorCodes.Interrupted,
+ useSession: useSession
});
- st.stop();
+ // Test that running killCursors against one of the cursors pinned on mongod causes all
+ // involved cursors to be killed.
+ testShardedKillPinned({
+ // This function ignores the mongos cursor id, since it instead uses currentOp to
+ // obtain the cursor id of one of the shard cursors.
+ killFunc: function() {
+ let currentGetMoresArray =
+ shard0DB.getSiblingDB("admin")
+ .aggregate([{$currentOp: {}}, {$match: {"command.getMore": {$exists: true}}}])
+ .toArray();
+ assert.eq(1, currentGetMoresArray.length);
+ let currentGetMore = currentGetMoresArray[0];
+ let shardCursorId = currentGetMore.command.getMore;
+ let cmdRes =
+ shard0DB.runCommand({killCursors: coll.getName(), cursors: [shardCursorId]});
+ assert.commandWorked(cmdRes);
+ assert.eq(cmdRes.cursorsKilled, [shardCursorId]);
+ assert.eq(cmdRes.cursorsAlive, []);
+ assert.eq(cmdRes.cursorsNotFound, []);
+ assert.eq(cmdRes.cursorsUnknown, []);
+ },
+ getMoreErrCodes: ErrorCodes.CursorKilled,
+ useSession: useSession
+ });
+}
+
+// Test that running killSessions on the session which is running the getMore causes the
+// cursor to be killed.
+testShardedKillPinned({
+ // This function ignores the mongos cursor id, since it instead uses listLocalSessions
+ // to obtain the session id of the session running the getMore.
+ killFunc: function() {
+ // Must sort by 'lastUse' because there may be sessions left over on the server from
+ // the previous runs. We will only call killSessions on the most recently used one.
+ const localSessions = mongosDB
+ .aggregate([
+ {$listLocalSessions: {allUsers: true}},
+ {$sort: {"lastUse": -1}},
+ ])
+ .toArray();
+
+ const sessionUUID = localSessions[0]._id.id;
+ assert.commandWorked(mongosDB.runCommand({killSessions: [{id: sessionUUID}]}));
+ },
+ // Killing a session on mongos kills all matching remote cursors (through KillCursors) then
+ // all matching local operations (through KillOp), so the getMore can fail with either
+ // CursorKilled or Interrupted depending on which response is returned first.
+ getMoreErrCodes: [ErrorCodes.CursorKilled, ErrorCodes.Interrupted],
+ useSession: true,
+});
+
+st.stop();
})();
diff --git a/jstests/sharding/kill_sessions.js b/jstests/sharding/kill_sessions.js
index a3ad23139a0..b96bf4bd326 100644
--- a/jstests/sharding/kill_sessions.js
+++ b/jstests/sharding/kill_sessions.js
@@ -1,63 +1,63 @@
load("jstests/libs/kill_sessions.js");
(function() {
- 'use strict';
-
- // TODO SERVER-35447: This test involves killing all sessions, which will not work as expected
- // if the kill command is sent with an implicit session.
- TestData.disableImplicitSessions = true;
-
- function runTests(needAuth) {
- var other = {
- rs: true,
- rs0: {nodes: 3},
- rs1: {nodes: 3},
- };
- if (needAuth) {
- other.keyFile = 'jstests/libs/key1';
- }
+'use strict';
+
+// TODO SERVER-35447: This test involves killing all sessions, which will not work as expected
+// if the kill command is sent with an implicit session.
+TestData.disableImplicitSessions = true;
+
+function runTests(needAuth) {
+ var other = {
+ rs: true,
+ rs0: {nodes: 3},
+ rs1: {nodes: 3},
+ };
+ if (needAuth) {
+ other.keyFile = 'jstests/libs/key1';
+ }
- var st = new ShardingTest({shards: 2, mongos: 1, config: 1, other: other});
+ var st = new ShardingTest({shards: 2, mongos: 1, config: 1, other: other});
- var forExec = st.s0;
+ var forExec = st.s0;
- if (needAuth) {
- KillSessionsTestHelper.initializeAuth(forExec);
- }
+ if (needAuth) {
+ KillSessionsTestHelper.initializeAuth(forExec);
+ }
- var forKill = new Mongo(forExec.host);
+ var forKill = new Mongo(forExec.host);
- var r = forExec.getDB("admin").runCommand({
- multicast: {ping: 1},
- db: "admin",
- });
- assert(r.ok);
-
- var hosts = [];
- for (var host in r["hosts"]) {
- var host = new Mongo(host);
- if (needAuth) {
- host.getDB("local").auth("__system", "foopdedoop");
- }
- hosts.push(host);
-
- assert.soon(function() {
- var fcv = host.getDB("admin").runCommand(
- {getParameter: 1, featureCompatibilityVersion: 1});
- return fcv["ok"] && fcv["featureCompatibilityVersion"] != "3.4";
- });
- }
+ var r = forExec.getDB("admin").runCommand({
+ multicast: {ping: 1},
+ db: "admin",
+ });
+ assert(r.ok);
- var args = [forExec, forKill, hosts];
+ var hosts = [];
+ for (var host in r["hosts"]) {
+ var host = new Mongo(host);
if (needAuth) {
- KillSessionsTestHelper.runAuth.apply({}, args);
- } else {
- KillSessionsTestHelper.runNoAuth.apply({}, args);
+ host.getDB("local").auth("__system", "foopdedoop");
}
+ hosts.push(host);
+
+ assert.soon(function() {
+ var fcv =
+ host.getDB("admin").runCommand({getParameter: 1, featureCompatibilityVersion: 1});
+ return fcv["ok"] && fcv["featureCompatibilityVersion"] != "3.4";
+ });
+ }
- st.stop();
+ var args = [forExec, forKill, hosts];
+ if (needAuth) {
+ KillSessionsTestHelper.runAuth.apply({}, args);
+ } else {
+ KillSessionsTestHelper.runNoAuth.apply({}, args);
}
- runTests(true);
- runTests(false);
+ st.stop();
+}
+
+runTests(true);
+runTests(false);
})();
diff --git a/jstests/sharding/killop.js b/jstests/sharding/killop.js
index 39c9c36538b..7f2e4d23173 100644
--- a/jstests/sharding/killop.js
+++ b/jstests/sharding/killop.js
@@ -2,67 +2,66 @@
// @tags: [requires_replication, requires_sharding]
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 2});
- const conn = st.s;
+const st = new ShardingTest({shards: 2});
+const conn = st.s;
- const db = conn.getDB("killOp");
- const coll = db.test;
- assert.writeOK(db.getCollection(coll.getName()).insert({x: 1}));
+const db = conn.getDB("killOp");
+const coll = db.test;
+assert.writeOK(db.getCollection(coll.getName()).insert({x: 1}));
- const kFailPointName = "waitInFindBeforeMakingBatch";
- assert.commandWorked(
- conn.adminCommand({"configureFailPoint": kFailPointName, "mode": "alwaysOn"}));
+const kFailPointName = "waitInFindBeforeMakingBatch";
+assert.commandWorked(conn.adminCommand({"configureFailPoint": kFailPointName, "mode": "alwaysOn"}));
- const queryToKill = `assert.commandFailedWithCode(db.getSiblingDB('${db.getName()}')` +
- `.runCommand({find: '${coll.getName()}', filter: {x: 1}}), ErrorCodes.Interrupted);`;
- const awaitShell = startParallelShell(queryToKill, conn.port);
+const queryToKill = `assert.commandFailedWithCode(db.getSiblingDB('${db.getName()}')` +
+ `.runCommand({find: '${coll.getName()}', filter: {x: 1}}), ErrorCodes.Interrupted);`;
+const awaitShell = startParallelShell(queryToKill, conn.port);
- function runCurOp() {
- const filter = {"ns": coll.getFullName(), "command.filter": {x: 1}};
- return db.getSiblingDB("admin")
- .aggregate([{$currentOp: {localOps: true}}, {$match: filter}])
- .toArray();
- }
+function runCurOp() {
+ const filter = {"ns": coll.getFullName(), "command.filter": {x: 1}};
+ return db.getSiblingDB("admin")
+ .aggregate([{$currentOp: {localOps: true}}, {$match: filter}])
+ .toArray();
+}
- let opId;
+let opId;
- // Wait for the operation to start.
- assert.soon(
- function() {
- const result = runCurOp();
+// Wait for the operation to start.
+assert.soon(
+ function() {
+ const result = runCurOp();
- // Check the 'msg' field to be sure that the failpoint has been reached.
- if (result.length === 1 && result[0].msg === kFailPointName) {
- opId = result[0].opid;
+ // Check the 'msg' field to be sure that the failpoint has been reached.
+ if (result.length === 1 && result[0].msg === kFailPointName) {
+ opId = result[0].opid;
- return true;
- }
+ return true;
+ }
- return false;
- },
- function() {
- return "Failed to find operation in currentOp() output: " +
- tojson(db.currentOp({"ns": coll.getFullName()}));
- });
+ return false;
+ },
+ function() {
+ return "Failed to find operation in currentOp() output: " +
+ tojson(db.currentOp({"ns": coll.getFullName()}));
+ });
- // Kill the operation.
- assert.commandWorked(db.killOp(opId));
+// Kill the operation.
+assert.commandWorked(db.killOp(opId));
- // Ensure that the operation gets marked kill pending while it's still hanging.
- let result = runCurOp();
- assert(result.length === 1, tojson(result));
- assert(result[0].hasOwnProperty("killPending"));
- assert.eq(true, result[0].killPending);
+// Ensure that the operation gets marked kill pending while it's still hanging.
+let result = runCurOp();
+assert(result.length === 1, tojson(result));
+assert(result[0].hasOwnProperty("killPending"));
+assert.eq(true, result[0].killPending);
- // Release the failpoint. The operation should check for interrupt and then finish.
- assert.commandWorked(conn.adminCommand({"configureFailPoint": kFailPointName, "mode": "off"}));
+// Release the failpoint. The operation should check for interrupt and then finish.
+assert.commandWorked(conn.adminCommand({"configureFailPoint": kFailPointName, "mode": "off"}));
- awaitShell();
+awaitShell();
- result = runCurOp();
- assert(result.length === 0, tojson(result));
+result = runCurOp();
+assert(result.length === 0, tojson(result));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/lagged_config_secondary.js b/jstests/sharding/lagged_config_secondary.js
index df23946dee4..35e38722edb 100644
--- a/jstests/sharding/lagged_config_secondary.js
+++ b/jstests/sharding/lagged_config_secondary.js
@@ -8,64 +8,64 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- var st = new ShardingTest(
- {shards: 1, configReplSetTestOptions: {settings: {chainingAllowed: false}}});
- var testDB = st.s.getDB('test');
+var st =
+ new ShardingTest({shards: 1, configReplSetTestOptions: {settings: {chainingAllowed: false}}});
+var testDB = st.s.getDB('test');
- assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
+assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
- // Ensures that all metadata writes thus far have been replicated to all nodes
- st.configRS.awaitReplication();
+// Ensures that all metadata writes thus far have been replicated to all nodes
+st.configRS.awaitReplication();
- var configSecondaryList = st.configRS.getSecondaries();
- var configSecondaryToKill = configSecondaryList[0];
- var delayedConfigSecondary = configSecondaryList[1];
+var configSecondaryList = st.configRS.getSecondaries();
+var configSecondaryToKill = configSecondaryList[0];
+var delayedConfigSecondary = configSecondaryList[1];
- assert.writeOK(testDB.user.insert({_id: 1}));
+assert.writeOK(testDB.user.insert({_id: 1}));
- delayedConfigSecondary.getDB('admin').adminCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+delayedConfigSecondary.getDB('admin').adminCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
- // Do one metadata write in order to bump the optime on mongos
- assert.writeOK(st.getDB('config').TestConfigColl.insert({TestKey: 'Test value'}));
+// Do one metadata write in order to bump the optime on mongos
+assert.writeOK(st.getDB('config').TestConfigColl.insert({TestKey: 'Test value'}));
- st.configRS.stopMaster();
- MongoRunner.stopMongod(configSecondaryToKill);
+st.configRS.stopMaster();
+MongoRunner.stopMongod(configSecondaryToKill);
- // Clears all cached info so mongos will be forced to query from the config.
- st.s.adminCommand({flushRouterConfig: 1});
+// Clears all cached info so mongos will be forced to query from the config.
+st.s.adminCommand({flushRouterConfig: 1});
- print('Attempting read on a sharded collection...');
- var exception = assert.throws(function() {
- testDB.user.find({}).maxTimeMS(15000).itcount();
- });
+print('Attempting read on a sharded collection...');
+var exception = assert.throws(function() {
+ testDB.user.find({}).maxTimeMS(15000).itcount();
+});
- assert(ErrorCodes.isExceededTimeLimitError(exception.code));
+assert(ErrorCodes.isExceededTimeLimitError(exception.code));
- let msgAA = 'command config.$cmd command: find { find: "databases"';
- let msgAB = 'errCode:' + ErrorCodes.ClientDisconnect;
- let msgB = 'Command on database config timed out waiting for read concern to be satisfied.';
- assert.soon(
- function() {
- var logMessages =
- assert.commandWorked(delayedConfigSecondary.adminCommand({getLog: 'global'})).log;
- for (var i = 0; i < logMessages.length; i++) {
- if ((logMessages[i].indexOf(msgAA) != -1 && logMessages[i].indexOf(msgAB) != -1) ||
- logMessages[i].indexOf(msgB) != -1) {
- return true;
- }
+let msgAA = 'command config.$cmd command: find { find: "databases"';
+let msgAB = 'errCode:' + ErrorCodes.ClientDisconnect;
+let msgB = 'Command on database config timed out waiting for read concern to be satisfied.';
+assert.soon(
+ function() {
+ var logMessages =
+ assert.commandWorked(delayedConfigSecondary.adminCommand({getLog: 'global'})).log;
+ for (var i = 0; i < logMessages.length; i++) {
+ if ((logMessages[i].indexOf(msgAA) != -1 && logMessages[i].indexOf(msgAB) != -1) ||
+ logMessages[i].indexOf(msgB) != -1) {
+ return true;
}
- return false;
- },
- 'Did not see any log entries containing the following message: ' + msgAA + ' ... ' + msgAB +
- ' or ' + msgB,
- 60000,
- 300);
+ }
+ return false;
+ },
+ 'Did not see any log entries containing the following message: ' + msgAA + ' ... ' + msgAB +
+ ' or ' + msgB,
+ 60000,
+ 300);
- // Can't do clean shutdown with this failpoint on.
- delayedConfigSecondary.getDB('admin').adminCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+// Can't do clean shutdown with this failpoint on.
+delayedConfigSecondary.getDB('admin').adminCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/large_chunk.js b/jstests/sharding/large_chunk.js
index 5862483138a..c3df1b4baf2 100644
--- a/jstests/sharding/large_chunk.js
+++ b/jstests/sharding/large_chunk.js
@@ -6,65 +6,65 @@
* @tags: [resource_intensive]
*/
(function() {
- 'use strict';
+'use strict';
- // Starts a new sharding environment limiting the chunk size to 1GB (highest value allowed).
- // Note that early splitting will start with a 1/4 of max size currently.
- var s = new ShardingTest({name: 'large_chunk', shards: 2, other: {chunkSize: 1024}});
- var db = s.getDB("test");
+// Starts a new sharding environment limiting the chunk size to 1GB (highest value allowed).
+// Note that early splitting will start with a 1/4 of max size currently.
+var s = new ShardingTest({name: 'large_chunk', shards: 2, other: {chunkSize: 1024}});
+var db = s.getDB("test");
- //
- // Step 1 - Test moving a large chunk
- //
+//
+// Step 1 - Test moving a large chunk
+//
- // Turn on sharding on the 'test.foo' collection and generate a large chunk
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
+// Turn on sharding on the 'test.foo' collection and generate a large chunk
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
- var bigString = "";
- while (bigString.length < 10000) {
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
- }
+var bigString = "";
+while (bigString.length < 10000) {
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+}
- var inserted = 0;
- var num = 0;
- var bulk = db.foo.initializeUnorderedBulkOp();
- while (inserted < (400 * 1024 * 1024)) {
- bulk.insert({_id: num++, s: bigString});
- inserted += bigString.length;
- }
- assert.writeOK(bulk.execute());
+var inserted = 0;
+var num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+while (inserted < (400 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+}
+assert.writeOK(bulk.execute());
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "step 1 - need one large chunk");
+assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "step 1 - need one large chunk");
- var primary = s.getPrimaryShard("test").getDB("test");
- var secondary = s.getOther(primary).getDB("test");
+var primary = s.getPrimaryShard("test").getDB("test");
+var secondary = s.getOther(primary).getDB("test");
- // Make sure that we don't move that chunk if it goes past what we consider the maximum chunk
- // size
- print("Checkpoint 1a");
- var max = 200 * 1024 * 1024;
- assert.throws(function() {
- s.adminCommand({
- movechunk: "test.foo",
- find: {_id: 1},
- to: secondary.getMongo().name,
- maxChunkSizeBytes: max
- });
+// Make sure that we don't move that chunk if it goes past what we consider the maximum chunk
+// size
+print("Checkpoint 1a");
+var max = 200 * 1024 * 1024;
+assert.throws(function() {
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {_id: 1},
+ to: secondary.getMongo().name,
+ maxChunkSizeBytes: max
});
+});
- // Move the chunk
- print("checkpoint 1b");
- var before = s.config.chunks.find({ns: 'test.foo'}).toArray();
- assert.commandWorked(
- s.s0.adminCommand({movechunk: "test.foo", find: {_id: 1}, to: secondary.getMongo().name}));
+// Move the chunk
+print("checkpoint 1b");
+var before = s.config.chunks.find({ns: 'test.foo'}).toArray();
+assert.commandWorked(
+ s.s0.adminCommand({movechunk: "test.foo", find: {_id: 1}, to: secondary.getMongo().name}));
- var after = s.config.chunks.find({ns: 'test.foo'}).toArray();
- assert.neq(before[0].shard, after[0].shard, "move chunk did not work");
+var after = s.config.chunks.find({ns: 'test.foo'}).toArray();
+assert.neq(before[0].shard, after[0].shard, "move chunk did not work");
- s.config.changelog.find().forEach(printjson);
+s.config.changelog.find().forEach(printjson);
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/libs/sharded_transactions_helpers.js b/jstests/sharding/libs/sharded_transactions_helpers.js
index 1355e9610f0..dc49b839e30 100644
--- a/jstests/sharding/libs/sharded_transactions_helpers.js
+++ b/jstests/sharding/libs/sharded_transactions_helpers.js
@@ -13,27 +13,27 @@ function getCoordinatorFailpoints() {
const coordinatorFailpointDataArr = [
{failpoint: "hangBeforeWritingParticipantList", numTimesShouldBeHit: 1},
{
- // Test targeting remote nodes for prepare
- failpoint: "hangWhileTargetingRemoteHost",
- numTimesShouldBeHit: 2 /* once per remote participant */
+ // Test targeting remote nodes for prepare
+ failpoint: "hangWhileTargetingRemoteHost",
+ numTimesShouldBeHit: 2 /* once per remote participant */
},
{
- // Test targeting local node for prepare
- failpoint: "hangWhileTargetingLocalHost",
- numTimesShouldBeHit: 1
+ // Test targeting local node for prepare
+ failpoint: "hangWhileTargetingLocalHost",
+ numTimesShouldBeHit: 1
},
{failpoint: "hangBeforeWritingDecision", numTimesShouldBeHit: 1},
{
- // Test targeting remote nodes for decision
- failpoint: "hangWhileTargetingRemoteHost",
- numTimesShouldBeHit: 2, /* once per remote participant */
- skip: 2 /* to skip when the failpoint is hit for prepare */
+ // Test targeting remote nodes for decision
+ failpoint: "hangWhileTargetingRemoteHost",
+ numTimesShouldBeHit: 2, /* once per remote participant */
+ skip: 2 /* to skip when the failpoint is hit for prepare */
},
{
- // Test targeting local node for decision
- failpoint: "hangWhileTargetingLocalHost",
- numTimesShouldBeHit: 1,
- skip: 1 /* to skip when the failpoint is hit for prepare */
+ // Test targeting local node for decision
+ failpoint: "hangWhileTargetingLocalHost",
+ numTimesShouldBeHit: 1,
+ skip: 1 /* to skip when the failpoint is hit for prepare */
},
{failpoint: "hangBeforeDeletingCoordinatorDoc", numTimesShouldBeHit: 1},
];
@@ -70,16 +70,16 @@ function assertNoSuchTransactionOnAllShards(st, lsid, txnNumber) {
}
function assertNoSuchTransactionOnConn(conn, lsid, txnNumber) {
- assert.commandFailedWithCode(conn.getDB("foo").runCommand({
- find: "bar",
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction,
- "expected there to be no active transaction on shard, lsid: " +
- tojson(lsid) + ", txnNumber: " + tojson(txnNumber) +
- ", connection: " + tojson(conn));
+ assert.commandFailedWithCode(
+ conn.getDB("foo").runCommand({
+ find: "bar",
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ }),
+ ErrorCodes.NoSuchTransaction,
+ "expected there to be no active transaction on shard, lsid: " + tojson(lsid) +
+ ", txnNumber: " + tojson(txnNumber) + ", connection: " + tojson(conn));
}
function waitForFailpoint(hitFailpointStr, numTimes) {
diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js
index ec744207e97..ef6b7a1c903 100644
--- a/jstests/sharding/limit_push.js
+++ b/jstests/sharding/limit_push.js
@@ -2,59 +2,60 @@
// See: http://jira.mongodb.org/browse/SERVER-1896
(function() {
- var s = new ShardingTest({name: "limit_push", shards: 2, mongos: 1});
- var db = s.getDB("test");
-
- // Create some data
- for (i = 0; i < 100; i++) {
- db.limit_push.insert({_id: i, x: i});
- }
- db.limit_push.ensureIndex({x: 1});
- assert.eq(100, db.limit_push.find().length(), "Incorrect number of documents");
-
- // Shard the collection
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.limit_push", key: {x: 1}});
-
- // Now split the and move the data between the shards
- s.adminCommand({split: "test.limit_push", middle: {x: 50}});
- s.adminCommand({
- moveChunk: "test.limit_push",
- find: {x: 51},
- to: s.getOther(s.getPrimaryShard("test")).name,
- _waitForDelete: true
- });
-
- // Check that the chunck have split correctly
- assert.eq(2, s.config.chunks.count({"ns": "test.limit_push"}), "wrong number of chunks");
-
- // The query is asking for the maximum value below a given value
- // db.limit_push.find( { x : { $lt : 60} } ).sort( { x:-1} ).limit(1)
- q = {x: {$lt: 60}};
-
- // Make sure the basic queries are correct
- assert.eq(60, db.limit_push.find(q).count(), "Did not find 60 documents");
- // rs = db.limit_push.find( q ).sort( { x:-1} ).limit(1)
- // assert.eq( rs , { _id : "1" , x : 59 } , "Did not find document with value 59" );
-
- // Now make sure that the explain shos that each shard is returning a single document as
- // indicated
- // by the "n" element for each shard
- exp = db.limit_push.find(q).sort({x: -1}).limit(1).explain("executionStats");
- printjson(exp);
-
- var execStages = exp.executionStats.executionStages;
- assert.eq("SHARD_MERGE_SORT", execStages.stage, "Expected SHARD_MERGE_SORT as root stage");
-
- var k = 0;
- for (var j in execStages.shards) {
- assert.eq(1,
- execStages.shards[j].executionStages.nReturned,
- "'n' is not 1 from shard000" + k.toString());
- k++;
- }
-
- s.stop();
-
+var s = new ShardingTest({name: "limit_push", shards: 2, mongos: 1});
+var db = s.getDB("test");
+
+// Create some data
+for (i = 0; i < 100; i++) {
+ db.limit_push.insert({_id: i, x: i});
+}
+db.limit_push.ensureIndex({x: 1});
+assert.eq(100, db.limit_push.find().length(), "Incorrect number of documents");
+
+// Shard the collection
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.limit_push", key: {x: 1}});
+
+// Now split the and move the data between the shards
+s.adminCommand({split: "test.limit_push", middle: {x: 50}});
+s.adminCommand({
+ moveChunk: "test.limit_push",
+ find: {x: 51},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
+
+// Check that the chunck have split correctly
+assert.eq(2, s.config.chunks.count({"ns": "test.limit_push"}), "wrong number of chunks");
+
+// The query is asking for the maximum value below a given value
+// db.limit_push.find( { x : { $lt : 60} } ).sort( { x:-1} ).limit(1)
+q = {
+ x: {$lt: 60}
+};
+
+// Make sure the basic queries are correct
+assert.eq(60, db.limit_push.find(q).count(), "Did not find 60 documents");
+// rs = db.limit_push.find( q ).sort( { x:-1} ).limit(1)
+// assert.eq( rs , { _id : "1" , x : 59 } , "Did not find document with value 59" );
+
+// Now make sure that the explain shos that each shard is returning a single document as
+// indicated
+// by the "n" element for each shard
+exp = db.limit_push.find(q).sort({x: -1}).limit(1).explain("executionStats");
+printjson(exp);
+
+var execStages = exp.executionStats.executionStages;
+assert.eq("SHARD_MERGE_SORT", execStages.stage, "Expected SHARD_MERGE_SORT as root stage");
+
+var k = 0;
+for (var j in execStages.shards) {
+ assert.eq(1,
+ execStages.shards[j].executionStages.nReturned,
+ "'n' is not 1 from shard000" + k.toString());
+ k++;
+}
+
+s.stop();
})();
diff --git a/jstests/sharding/linearizable_read_concern.js b/jstests/sharding/linearizable_read_concern.js
index 1269bc5c4a3..9b3ac62acce 100644
--- a/jstests/sharding/linearizable_read_concern.js
+++ b/jstests/sharding/linearizable_read_concern.js
@@ -25,104 +25,104 @@ load("jstests/replsets/rslib.js");
load("jstests/libs/write_concern_util.js");
(function() {
- "use strict";
-
- // Skip db hash check and shard replication since this test leaves a replica set shard
- // partitioned.
- TestData.skipCheckDBHashes = true;
- TestData.skipAwaitingReplicationOnShardsBeforeCheckingUUIDs = true;
-
- var testName = "linearizable_read_concern";
-
- var st = new ShardingTest({
- name: testName,
- shards: 2,
- other: {rs0: {nodes: 3}, rs1: {nodes: 3}, useBridge: true},
- mongos: 1,
- config: 1,
- enableBalancer: false
- });
-
- jsTestLog("Setting up sharded cluster.");
-
- // Set up the sharded cluster.
- var dbName = testName;
- var collName = "test";
- var collNamespace = dbName + "." + collName;
- var shard0ReplTest = st.rs0;
- var shard1ReplTest = st.rs1;
- var testDB = st.s.getDB(dbName);
-
- // Set high election timeout so that primary doesn't step down during linearizable read test.
- var cfg = shard0ReplTest.getReplSetConfigFromNode(0);
- cfg.settings.electionTimeoutMillis = shard0ReplTest.kDefaultTimeoutMS;
- reconfig(shard0ReplTest, cfg, true);
-
- // Set up sharded collection. Put 5 documents on each shard, with keys {x: 0...9}.
- var numDocs = 10;
- shardCollectionWithChunks(st, testDB[collName], numDocs);
-
- // Make sure the 'shardIdentity' document on each shard is replicated to all secondary nodes
- // before issuing reads against them.
- shard0ReplTest.awaitReplication();
- shard1ReplTest.awaitReplication();
-
- // Print current sharding stats for debugging.
- st.printShardingStatus(5);
-
- // Filter to target one document in each shard.
- var shard0DocKey = 2;
- var shard1DocKey = 7;
- var dualShardQueryFilter = {$or: [{x: shard0DocKey}, {x: shard1DocKey}]};
-
- jsTestLog("Testing linearizable read from secondaries");
-
- // Execute a linearizable read from secondaries (targeting both shards) which should fail.
- st.s.setReadPref("secondary");
- var res = assert.commandFailed(testDB.runReadCommand({
- find: collName,
- filter: dualShardQueryFilter,
- readConcern: {level: "linearizable"},
- maxTimeMS: shard0ReplTest.kDefaultTimeoutMS
- }));
- assert.eq(res.code, ErrorCodes.NotMaster);
-
- jsTestLog("Testing linearizable read from primaries.");
-
- // Execute a linearizable read from primaries (targeting both shards) which should succeed.
- st.s.setReadPref("primary");
- var res = assert.writeOK(testDB.runReadCommand({
- find: collName,
- sort: {x: 1},
- filter: dualShardQueryFilter,
- readConcern: {level: "linearizable"},
- maxTimeMS: shard0ReplTest.kDefaultTimeoutMS
- }));
-
- // Make sure data was returned from both shards correctly.
- assert.eq(res.cursor.firstBatch[0].x, shard0DocKey);
- assert.eq(res.cursor.firstBatch[1].x, shard1DocKey);
-
- jsTestLog("Testing linearizable read targeting partitioned primary.");
-
- var primary = shard0ReplTest.getPrimary();
- var secondaries = shard0ReplTest.getSecondaries();
-
- // Partition the primary in the first shard.
- secondaries[0].disconnect(primary);
- secondaries[1].disconnect(primary);
-
- jsTestLog("Current Replica Set Topology of First Shard: [Secondary-Secondary] [Primary]");
-
- // Execute a linearizable read targeting the partitioned primary in first shard, and good
- // primary in the second shard. This should time out due to partitioned primary.
- var result = testDB.runReadCommand({
- find: collName,
- filter: dualShardQueryFilter,
- readConcern: {level: "linearizable"},
- maxTimeMS: 3000
- });
- assert.commandFailedWithCode(result, ErrorCodes.MaxTimeMSExpired);
-
- st.stop();
+"use strict";
+
+// Skip db hash check and shard replication since this test leaves a replica set shard
+// partitioned.
+TestData.skipCheckDBHashes = true;
+TestData.skipAwaitingReplicationOnShardsBeforeCheckingUUIDs = true;
+
+var testName = "linearizable_read_concern";
+
+var st = new ShardingTest({
+ name: testName,
+ shards: 2,
+ other: {rs0: {nodes: 3}, rs1: {nodes: 3}, useBridge: true},
+ mongos: 1,
+ config: 1,
+ enableBalancer: false
+});
+
+jsTestLog("Setting up sharded cluster.");
+
+// Set up the sharded cluster.
+var dbName = testName;
+var collName = "test";
+var collNamespace = dbName + "." + collName;
+var shard0ReplTest = st.rs0;
+var shard1ReplTest = st.rs1;
+var testDB = st.s.getDB(dbName);
+
+// Set high election timeout so that primary doesn't step down during linearizable read test.
+var cfg = shard0ReplTest.getReplSetConfigFromNode(0);
+cfg.settings.electionTimeoutMillis = shard0ReplTest.kDefaultTimeoutMS;
+reconfig(shard0ReplTest, cfg, true);
+
+// Set up sharded collection. Put 5 documents on each shard, with keys {x: 0...9}.
+var numDocs = 10;
+shardCollectionWithChunks(st, testDB[collName], numDocs);
+
+// Make sure the 'shardIdentity' document on each shard is replicated to all secondary nodes
+// before issuing reads against them.
+shard0ReplTest.awaitReplication();
+shard1ReplTest.awaitReplication();
+
+// Print current sharding stats for debugging.
+st.printShardingStatus(5);
+
+// Filter to target one document in each shard.
+var shard0DocKey = 2;
+var shard1DocKey = 7;
+var dualShardQueryFilter = {$or: [{x: shard0DocKey}, {x: shard1DocKey}]};
+
+jsTestLog("Testing linearizable read from secondaries");
+
+// Execute a linearizable read from secondaries (targeting both shards) which should fail.
+st.s.setReadPref("secondary");
+var res = assert.commandFailed(testDB.runReadCommand({
+ find: collName,
+ filter: dualShardQueryFilter,
+ readConcern: {level: "linearizable"},
+ maxTimeMS: shard0ReplTest.kDefaultTimeoutMS
+}));
+assert.eq(res.code, ErrorCodes.NotMaster);
+
+jsTestLog("Testing linearizable read from primaries.");
+
+// Execute a linearizable read from primaries (targeting both shards) which should succeed.
+st.s.setReadPref("primary");
+var res = assert.writeOK(testDB.runReadCommand({
+ find: collName,
+ sort: {x: 1},
+ filter: dualShardQueryFilter,
+ readConcern: {level: "linearizable"},
+ maxTimeMS: shard0ReplTest.kDefaultTimeoutMS
+}));
+
+// Make sure data was returned from both shards correctly.
+assert.eq(res.cursor.firstBatch[0].x, shard0DocKey);
+assert.eq(res.cursor.firstBatch[1].x, shard1DocKey);
+
+jsTestLog("Testing linearizable read targeting partitioned primary.");
+
+var primary = shard0ReplTest.getPrimary();
+var secondaries = shard0ReplTest.getSecondaries();
+
+// Partition the primary in the first shard.
+secondaries[0].disconnect(primary);
+secondaries[1].disconnect(primary);
+
+jsTestLog("Current Replica Set Topology of First Shard: [Secondary-Secondary] [Primary]");
+
+// Execute a linearizable read targeting the partitioned primary in first shard, and good
+// primary in the second shard. This should time out due to partitioned primary.
+var result = testDB.runReadCommand({
+ find: collName,
+ filter: dualShardQueryFilter,
+ readConcern: {level: "linearizable"},
+ maxTimeMS: 3000
+});
+assert.commandFailedWithCode(result, ErrorCodes.MaxTimeMSExpired);
+
+st.stop();
})();
diff --git a/jstests/sharding/listDatabases.js b/jstests/sharding/listDatabases.js
index f5b046c26b7..ce13ea5871d 100644
--- a/jstests/sharding/listDatabases.js
+++ b/jstests/sharding/listDatabases.js
@@ -1,93 +1,93 @@
(function() {
- 'use strict';
- var test = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1}});
-
- var mongos = test.s0;
- var mongod = test.shard0;
-
- var res;
- var dbArray;
-
- // grab the config db instance by name
- var getDBSection = function(dbsArray, dbToFind) {
- for (var pos in dbsArray) {
- if (dbsArray[pos].name && dbsArray[pos].name === dbToFind)
- return dbsArray[pos];
- }
- return null;
- };
-
- // Function to verify information for a database entry in listDatabases.
- var dbEntryCheck = function(dbEntry, onConfig) {
- assert.neq(null, dbEntry);
- assert.neq(null, dbEntry.sizeOnDisk);
- assert.eq(false, dbEntry.empty);
-
- // Check against shards
- var shards = dbEntry.shards;
- assert(shards);
- assert((shards["config"] && onConfig) || (!shards["config"] && !onConfig));
- };
-
- // Non-config-server db checks.
- {
- assert.writeOK(mongos.getDB("blah").foo.insert({_id: 1}));
- assert.writeOK(mongos.getDB("foo").foo.insert({_id: 1}));
- assert.writeOK(mongos.getDB("raw").foo.insert({_id: 1}));
-
- res = mongos.adminCommand("listDatabases");
- dbArray = res.databases;
-
- dbEntryCheck(getDBSection(dbArray, "blah"), false);
- dbEntryCheck(getDBSection(dbArray, "foo"), false);
- dbEntryCheck(getDBSection(dbArray, "raw"), false);
- }
-
- // Local db is never returned.
- {
- res = mongos.adminCommand("listDatabases");
- dbArray = res.databases;
-
- assert(!getDBSection(dbArray, 'local'));
- }
+'use strict';
+var test = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1}});
- // Admin and config are always reported on the config shard.
- {
- assert.writeOK(mongos.getDB("admin").test.insert({_id: 1}));
- assert.writeOK(mongos.getDB("config").test.insert({_id: 1}));
+var mongos = test.s0;
+var mongod = test.shard0;
- res = mongos.adminCommand("listDatabases");
- dbArray = res.databases;
+var res;
+var dbArray;
- dbEntryCheck(getDBSection(dbArray, "config"), true);
- dbEntryCheck(getDBSection(dbArray, "admin"), true);
+// grab the config db instance by name
+var getDBSection = function(dbsArray, dbToFind) {
+ for (var pos in dbsArray) {
+ if (dbsArray[pos].name && dbsArray[pos].name === dbToFind)
+ return dbsArray[pos];
}
-
- // Config db can be present on config shard and on other shards.
- {
- mongod.getDB("config").foo.insert({_id: 1});
-
- res = mongos.adminCommand("listDatabases");
- dbArray = res.databases;
-
- var entry = getDBSection(dbArray, "config");
- dbEntryCheck(entry, true);
- assert(entry["shards"]);
- assert.eq(Object.keys(entry["shards"]).length, 2);
- }
-
- // Admin db is only reported on the config shard, never on other shards.
- {
- mongod.getDB("admin").foo.insert({_id: 1});
-
- res = mongos.adminCommand("listDatabases");
- dbArray = res.databases;
-
- var entry = getDBSection(dbArray, "admin");
- dbEntryCheck(entry, true);
- assert(entry["shards"]);
- assert.eq(Object.keys(entry["shards"]).length, 1);
- }
-
- test.stop();
+ return null;
+};
+
+// Function to verify information for a database entry in listDatabases.
+var dbEntryCheck = function(dbEntry, onConfig) {
+ assert.neq(null, dbEntry);
+ assert.neq(null, dbEntry.sizeOnDisk);
+ assert.eq(false, dbEntry.empty);
+
+ // Check against shards
+ var shards = dbEntry.shards;
+ assert(shards);
+ assert((shards["config"] && onConfig) || (!shards["config"] && !onConfig));
+};
+
+// Non-config-server db checks.
+{
+ assert.writeOK(mongos.getDB("blah").foo.insert({_id: 1}));
+ assert.writeOK(mongos.getDB("foo").foo.insert({_id: 1}));
+ assert.writeOK(mongos.getDB("raw").foo.insert({_id: 1}));
+
+ res = mongos.adminCommand("listDatabases");
+ dbArray = res.databases;
+
+ dbEntryCheck(getDBSection(dbArray, "blah"), false);
+ dbEntryCheck(getDBSection(dbArray, "foo"), false);
+ dbEntryCheck(getDBSection(dbArray, "raw"), false);
+}
+
+// Local db is never returned.
+{
+ res = mongos.adminCommand("listDatabases");
+ dbArray = res.databases;
+
+ assert(!getDBSection(dbArray, 'local'));
+}
+
+// Admin and config are always reported on the config shard.
+{
+ assert.writeOK(mongos.getDB("admin").test.insert({_id: 1}));
+ assert.writeOK(mongos.getDB("config").test.insert({_id: 1}));
+
+ res = mongos.adminCommand("listDatabases");
+ dbArray = res.databases;
+
+ dbEntryCheck(getDBSection(dbArray, "config"), true);
+ dbEntryCheck(getDBSection(dbArray, "admin"), true);
+}
+
+// Config db can be present on config shard and on other shards.
+{
+ mongod.getDB("config").foo.insert({_id: 1});
+
+ res = mongos.adminCommand("listDatabases");
+ dbArray = res.databases;
+
+ var entry = getDBSection(dbArray, "config");
+ dbEntryCheck(entry, true);
+ assert(entry["shards"]);
+ assert.eq(Object.keys(entry["shards"]).length, 2);
+}
+
+// Admin db is only reported on the config shard, never on other shards.
+{
+ mongod.getDB("admin").foo.insert({_id: 1});
+
+ res = mongos.adminCommand("listDatabases");
+ dbArray = res.databases;
+
+ var entry = getDBSection(dbArray, "admin");
+ dbEntryCheck(entry, true);
+ assert(entry["shards"]);
+ assert.eq(Object.keys(entry["shards"]).length, 1);
+}
+
+test.stop();
})();
diff --git a/jstests/sharding/listshards.js b/jstests/sharding/listshards.js
index d4261cadb03..e008ffb6689 100644
--- a/jstests/sharding/listshards.js
+++ b/jstests/sharding/listshards.js
@@ -2,69 +2,67 @@
// Test the listShards command by adding stand-alone and replica-set shards to a cluster
//
(function() {
- 'use strict';
+'use strict';
- function checkShardName(shardName, shardsArray) {
- var found = false;
- shardsArray.forEach(function(shardObj) {
- if (shardObj._id === shardName) {
- found = true;
- return;
- }
- });
- return found;
- }
+function checkShardName(shardName, shardsArray) {
+ var found = false;
+ shardsArray.forEach(function(shardObj) {
+ if (shardObj._id === shardName) {
+ found = true;
+ return;
+ }
+ });
+ return found;
+}
- var shardTest = new ShardingTest(
- {name: 'listShardsTest', shards: 1, mongos: 1, other: {useHostname: true}});
+var shardTest =
+ new ShardingTest({name: 'listShardsTest', shards: 1, mongos: 1, other: {useHostname: true}});
- var mongos = shardTest.s0;
- var res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- var shardsArray = res.shards;
- assert.eq(shardsArray.length, 1);
+var mongos = shardTest.s0;
+var res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+var shardsArray = res.shards;
+assert.eq(shardsArray.length, 1);
- // add standalone mongod
- var standaloneShard = MongoRunner.runMongod({useHostName: true, shardsvr: ""});
- res = shardTest.admin.runCommand({addShard: standaloneShard.host, name: 'standalone'});
- assert.commandWorked(res, 'addShard command failed');
- res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- shardsArray = res.shards;
- assert.eq(shardsArray.length, 2);
- assert(checkShardName('standalone', shardsArray),
- 'listShards command didn\'t return standalone shard: ' + tojson(shardsArray));
+// add standalone mongod
+var standaloneShard = MongoRunner.runMongod({useHostName: true, shardsvr: ""});
+res = shardTest.admin.runCommand({addShard: standaloneShard.host, name: 'standalone'});
+assert.commandWorked(res, 'addShard command failed');
+res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+shardsArray = res.shards;
+assert.eq(shardsArray.length, 2);
+assert(checkShardName('standalone', shardsArray),
+ 'listShards command didn\'t return standalone shard: ' + tojson(shardsArray));
- // add replica set named 'repl'
- var rs1 =
- new ReplSetTest({name: 'repl', nodes: 1, useHostName: true, nodeOptions: {shardsvr: ""}});
- rs1.startSet();
- rs1.initiate();
- res = shardTest.admin.runCommand({addShard: rs1.getURL()});
- assert.commandWorked(res, 'addShard command failed');
- res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- shardsArray = res.shards;
- assert.eq(shardsArray.length, 3);
- assert(checkShardName('repl', shardsArray),
- 'listShards command didn\'t return replica set shard: ' + tojson(shardsArray));
+// add replica set named 'repl'
+var rs1 = new ReplSetTest({name: 'repl', nodes: 1, useHostName: true, nodeOptions: {shardsvr: ""}});
+rs1.startSet();
+rs1.initiate();
+res = shardTest.admin.runCommand({addShard: rs1.getURL()});
+assert.commandWorked(res, 'addShard command failed');
+res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+shardsArray = res.shards;
+assert.eq(shardsArray.length, 3);
+assert(checkShardName('repl', shardsArray),
+ 'listShards command didn\'t return replica set shard: ' + tojson(shardsArray));
- // remove 'repl' shard
- assert.soon(function() {
- var res = shardTest.admin.runCommand({removeShard: 'repl'});
- assert.commandWorked(res, 'removeShard command failed');
- return res.state === 'completed';
- }, 'failed to remove the replica set shard');
+// remove 'repl' shard
+assert.soon(function() {
+ var res = shardTest.admin.runCommand({removeShard: 'repl'});
+ assert.commandWorked(res, 'removeShard command failed');
+ return res.state === 'completed';
+}, 'failed to remove the replica set shard');
- res = mongos.adminCommand('listShards');
- assert.commandWorked(res, 'listShards command failed');
- shardsArray = res.shards;
- assert.eq(shardsArray.length, 2);
- assert(!checkShardName('repl', shardsArray),
- 'listShards command returned removed replica set shard: ' + tojson(shardsArray));
-
- rs1.stopSet();
- shardTest.stop();
- MongoRunner.stopMongod(standaloneShard);
+res = mongos.adminCommand('listShards');
+assert.commandWorked(res, 'listShards command failed');
+shardsArray = res.shards;
+assert.eq(shardsArray.length, 2);
+assert(!checkShardName('repl', shardsArray),
+ 'listShards command returned removed replica set shard: ' + tojson(shardsArray));
+rs1.stopSet();
+shardTest.stop();
+MongoRunner.stopMongod(standaloneShard);
})();
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index 996fb949175..55b7548d6db 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -9,142 +9,67 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
-
- var replSetName = "replsets_server-6591";
- var keyfile = "jstests/libs/key1";
- var numShards = 2;
- var username = "foo";
- var password = "bar";
-
- var createUser = function(mongo) {
- print("============ adding a user.");
- mongo.getDB("admin").createUser(
- {user: username, pwd: password, roles: jsTest.adminUserRoles});
- };
-
- var addUsersToEachShard = function(st) {
- for (var i = 0; i < numShards; i++) {
- print("============ adding a user to shard " + i);
- var d = st["shard" + i];
- d.getDB("admin").createUser(
- {user: username, pwd: password, roles: jsTest.adminUserRoles});
+'use strict';
+
+var replSetName = "replsets_server-6591";
+var keyfile = "jstests/libs/key1";
+var numShards = 2;
+var username = "foo";
+var password = "bar";
+
+var createUser = function(mongo) {
+ print("============ adding a user.");
+ mongo.getDB("admin").createUser({user: username, pwd: password, roles: jsTest.adminUserRoles});
+};
+
+var addUsersToEachShard = function(st) {
+ for (var i = 0; i < numShards; i++) {
+ print("============ adding a user to shard " + i);
+ var d = st["shard" + i];
+ d.getDB("admin").createUser({user: username, pwd: password, roles: jsTest.adminUserRoles});
+ }
+};
+
+var addShard = function(st, shouldPass) {
+ var m = MongoRunner.runMongod({auth: "", keyFile: keyfile, useHostname: false, 'shardsvr': ''});
+ var res = st.getDB("admin").runCommand({addShard: m.host});
+ if (shouldPass) {
+ assert.commandWorked(res, "Add shard");
+ } else {
+ assert.commandFailed(res, "Add shard");
+ }
+ return m;
+};
+
+var findEmptyShard = function(st, ns) {
+ var counts = st.chunkCounts("foo");
+
+ for (var shard in counts) {
+ if (counts[shard] == 0) {
+ return shard;
}
- };
-
- var addShard = function(st, shouldPass) {
- var m =
- MongoRunner.runMongod({auth: "", keyFile: keyfile, useHostname: false, 'shardsvr': ''});
- var res = st.getDB("admin").runCommand({addShard: m.host});
- if (shouldPass) {
- assert.commandWorked(res, "Add shard");
- } else {
- assert.commandFailed(res, "Add shard");
- }
- return m;
- };
+ }
- var findEmptyShard = function(st, ns) {
- var counts = st.chunkCounts("foo");
+ return null;
+};
- for (var shard in counts) {
- if (counts[shard] == 0) {
- return shard;
- }
- }
+var assertCannotRunCommands = function(mongo, st) {
+ print("============ ensuring that commands cannot be run.");
- return null;
- };
-
- var assertCannotRunCommands = function(mongo, st) {
- print("============ ensuring that commands cannot be run.");
-
- // CRUD
- var test = mongo.getDB("test");
- assert.throws(function() {
- test.system.users.findOne();
- });
- assert.writeError(test.foo.save({_id: 0}));
- assert.throws(function() {
- test.foo.findOne({_id: 0});
- });
- assert.writeError(test.foo.update({_id: 0}, {$set: {x: 20}}));
- assert.writeError(test.foo.remove({_id: 0}));
-
- // Multi-shard
- assert.throws(function() {
- test.foo.mapReduce(
- function() {
- emit(1, 1);
- },
- function(id, count) {
- return Array.sum(count);
- },
- {out: "other"});
- });
-
- // Config
- assert.throws(function() {
- mongo.getDB("config").shards.findOne();
- });
-
- var authorizeErrorCode = 13;
- var res = mongo.getDB("admin").runCommand({
- moveChunk: "test.foo",
- find: {_id: 1},
- to: st.shard0.shardName // Arbitrary shard.
- });
- assert.commandFailedWithCode(res, authorizeErrorCode, "moveChunk");
- // Create collection
- assert.commandFailedWithCode(
- mongo.getDB("test").createCollection("log", {capped: true, size: 5242880, max: 5000}),
- authorizeErrorCode,
- "createCollection");
- // Set/Get system parameters
- var params = [
- {param: "journalCommitInterval", val: 200},
- {param: "logLevel", val: 2},
- {param: "logUserIds", val: 1},
- {param: "notablescan", val: 1},
- {param: "quiet", val: 1},
- {param: "replApplyBatchSize", val: 10},
- {param: "replIndexPrefetch", val: "none"},
- {param: "syncdelay", val: 30},
- {param: "traceExceptions", val: true},
- {param: "sslMode", val: "preferSSL"},
- {param: "clusterAuthMode", val: "sendX509"},
- {param: "userCacheInvalidationIntervalSecs", val: 300}
- ];
- params.forEach(function(p) {
- var cmd = {setParameter: 1};
- cmd[p.param] = p.val;
- assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd),
- authorizeErrorCode,
- "setParameter: " + p.param);
- });
- params.forEach(function(p) {
- var cmd = {getParameter: 1};
- cmd[p.param] = 1;
- assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd),
- authorizeErrorCode,
- "getParameter: " + p.param);
- });
- };
-
- var assertCanRunCommands = function(mongo, st) {
- print("============ ensuring that commands can be run.");
-
- // CRUD
- var test = mongo.getDB("test");
-
- // this will throw if it fails
+ // CRUD
+ var test = mongo.getDB("test");
+ assert.throws(function() {
test.system.users.findOne();
+ });
+ assert.writeError(test.foo.save({_id: 0}));
+ assert.throws(function() {
+ test.foo.findOne({_id: 0});
+ });
+ assert.writeError(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.writeError(test.foo.remove({_id: 0}));
- assert.writeOK(test.foo.save({_id: 0}));
- assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}}));
- assert.writeOK(test.foo.remove({_id: 0}));
-
- // Multi-shard
+ // Multi-shard
+ assert.throws(function() {
test.foo.mapReduce(
function() {
emit(1, 1);
@@ -153,122 +78,192 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
return Array.sum(count);
},
{out: "other"});
+ });
- // Config
- // this will throw if it fails
+ // Config
+ assert.throws(function() {
mongo.getDB("config").shards.findOne();
+ });
- var to = findEmptyShard(st, "test.foo");
- var res = mongo.getDB("admin").runCommand({moveChunk: "test.foo", find: {_id: 1}, to: to});
- assert.commandWorked(res);
- };
-
- var authenticate = function(mongo) {
- print("============ authenticating user.");
- mongo.getDB("admin").auth(username, password);
- };
+ var authorizeErrorCode = 13;
+ var res = mongo.getDB("admin").runCommand({
+ moveChunk: "test.foo",
+ find: {_id: 1},
+ to: st.shard0.shardName // Arbitrary shard.
+ });
+ assert.commandFailedWithCode(res, authorizeErrorCode, "moveChunk");
+ // Create collection
+ assert.commandFailedWithCode(
+ mongo.getDB("test").createCollection("log", {capped: true, size: 5242880, max: 5000}),
+ authorizeErrorCode,
+ "createCollection");
+ // Set/Get system parameters
+ var params = [
+ {param: "journalCommitInterval", val: 200},
+ {param: "logLevel", val: 2},
+ {param: "logUserIds", val: 1},
+ {param: "notablescan", val: 1},
+ {param: "quiet", val: 1},
+ {param: "replApplyBatchSize", val: 10},
+ {param: "replIndexPrefetch", val: "none"},
+ {param: "syncdelay", val: 30},
+ {param: "traceExceptions", val: true},
+ {param: "sslMode", val: "preferSSL"},
+ {param: "clusterAuthMode", val: "sendX509"},
+ {param: "userCacheInvalidationIntervalSecs", val: 300}
+ ];
+ params.forEach(function(p) {
+ var cmd = {setParameter: 1};
+ cmd[p.param] = p.val;
+ assert.commandFailedWithCode(
+ mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "setParameter: " + p.param);
+ });
+ params.forEach(function(p) {
+ var cmd = {getParameter: 1};
+ cmd[p.param] = 1;
+ assert.commandFailedWithCode(
+ mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "getParameter: " + p.param);
+ });
+};
+
+var assertCanRunCommands = function(mongo, st) {
+ print("============ ensuring that commands can be run.");
+
+ // CRUD
+ var test = mongo.getDB("test");
+
+ // this will throw if it fails
+ test.system.users.findOne();
+
+ assert.writeOK(test.foo.save({_id: 0}));
+ assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.writeOK(test.foo.remove({_id: 0}));
+
+ // Multi-shard
+ test.foo.mapReduce(
+ function() {
+ emit(1, 1);
+ },
+ function(id, count) {
+ return Array.sum(count);
+ },
+ {out: "other"});
+
+ // Config
+ // this will throw if it fails
+ mongo.getDB("config").shards.findOne();
+
+ var to = findEmptyShard(st, "test.foo");
+ var res = mongo.getDB("admin").runCommand({moveChunk: "test.foo", find: {_id: 1}, to: to});
+ assert.commandWorked(res);
+};
+
+var authenticate = function(mongo) {
+ print("============ authenticating user.");
+ mongo.getDB("admin").auth(username, password);
+};
+
+var setupSharding = function(shardingTest) {
+ var mongo = shardingTest.s;
+
+ print("============ enabling sharding on test.foo.");
+ mongo.getDB("admin").runCommand({enableSharding: "test"});
+ shardingTest.ensurePrimaryShard('test', st.shard1.shardName);
+ mongo.getDB("admin").runCommand({shardCollection: "test.foo", key: {_id: 1}});
+
+ var test = mongo.getDB("test");
+ for (var i = 1; i < 20; i++) {
+ test.foo.insert({_id: i});
+ }
+};
+
+var start = function() {
+ // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+ return new ShardingTest({
+ auth: "",
+ shards: numShards,
+ other: {
+ keyFile: keyfile,
+ chunkSize: 1,
+ useHostname:
+ false, // Must use localhost to take advantage of the localhost auth bypass
+ shardAsReplicaSet: false
+ }
+ });
+};
- var setupSharding = function(shardingTest) {
- var mongo = shardingTest.s;
+var shutdown = function(st) {
+ print("============ shutting down.");
- print("============ enabling sharding on test.foo.");
- mongo.getDB("admin").runCommand({enableSharding: "test"});
- shardingTest.ensurePrimaryShard('test', st.shard1.shardName);
- mongo.getDB("admin").runCommand({shardCollection: "test.foo", key: {_id: 1}});
+ // SERVER-8445
+ // Unlike MongoRunner.stopMongod and ReplSetTest.stopSet,
+ // ShardingTest.stop does not have a way to provide auth
+ // information. Therefore, we'll do this manually for now.
- var test = mongo.getDB("test");
- for (var i = 1; i < 20; i++) {
- test.foo.insert({_id: i});
- }
- };
-
- var start = function() {
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- return new ShardingTest({
- auth: "",
- shards: numShards,
- other: {
- keyFile: keyfile,
- chunkSize: 1,
- useHostname:
- false, // Must use localhost to take advantage of the localhost auth bypass
- shardAsReplicaSet: false
- }
- });
- };
-
- var shutdown = function(st) {
- print("============ shutting down.");
-
- // SERVER-8445
- // Unlike MongoRunner.stopMongod and ReplSetTest.stopSet,
- // ShardingTest.stop does not have a way to provide auth
- // information. Therefore, we'll do this manually for now.
-
- for (var i = 0; i < st._mongos.length; i++) {
- var conn = st["s" + i];
- MongoRunner.stopMongos(conn,
- /*signal*/ false,
- {auth: {user: username, pwd: password}});
- }
+ for (var i = 0; i < st._mongos.length; i++) {
+ var conn = st["s" + i];
+ MongoRunner.stopMongos(conn,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
+ }
- for (var i = 0; i < st._connections.length; i++) {
- var conn = st["shard" + i];
- MongoRunner.stopMongod(conn,
- /*signal*/ false,
- {auth: {user: username, pwd: password}});
- }
+ for (var i = 0; i < st._connections.length; i++) {
+ var conn = st["shard" + i];
+ MongoRunner.stopMongod(conn,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
+ }
- for (var i = 0; i < st._configServers.length; i++) {
- var conn = st["config" + i];
- MongoRunner.stopMongod(conn,
- /*signal*/ false,
- {auth: {user: username, pwd: password}});
- }
+ for (var i = 0; i < st._configServers.length; i++) {
+ var conn = st["config" + i];
+ MongoRunner.stopMongod(conn,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
+ }
- st.stop();
- };
+ st.stop();
+};
- print("=====================");
- print("starting shards");
- print("=====================");
- var st = start();
- var host = st.s.host;
- var extraShards = [];
+print("=====================");
+print("starting shards");
+print("=====================");
+var st = start();
+var host = st.s.host;
+var extraShards = [];
- var mongo = new Mongo(host);
+var mongo = new Mongo(host);
- assertCannotRunCommands(mongo, st);
+assertCannotRunCommands(mongo, st);
- extraShards.push(addShard(st, 1));
- createUser(mongo);
+extraShards.push(addShard(st, 1));
+createUser(mongo);
- authenticate(mongo);
- authenticate(st.s);
- setupSharding(st);
+authenticate(mongo);
+authenticate(st.s);
+setupSharding(st);
- addUsersToEachShard(st);
- st.printShardingStatus();
+addUsersToEachShard(st);
+st.printShardingStatus();
- assertCanRunCommands(mongo, st);
+assertCanRunCommands(mongo, st);
- print("===============================");
- print("reconnecting with a new client.");
- print("===============================");
+print("===============================");
+print("reconnecting with a new client.");
+print("===============================");
- mongo = new Mongo(host);
+mongo = new Mongo(host);
- assertCannotRunCommands(mongo, st);
- extraShards.push(addShard(mongo, 0));
+assertCannotRunCommands(mongo, st);
+extraShards.push(addShard(mongo, 0));
- authenticate(mongo);
+authenticate(mongo);
- assertCanRunCommands(mongo, st);
- extraShards.push(addShard(mongo, 1));
- st.printShardingStatus();
+assertCanRunCommands(mongo, st);
+extraShards.push(addShard(mongo, 1));
+st.printShardingStatus();
- shutdown(st);
- extraShards.forEach(function(sh) {
- MongoRunner.stopMongod(sh);
- });
+shutdown(st);
+extraShards.forEach(function(sh) {
+ MongoRunner.stopMongod(sh);
+});
})();
diff --git a/jstests/sharding/logical_time_api.js b/jstests/sharding/logical_time_api.js
index 918c47b9864..3fc1a484d29 100644
--- a/jstests/sharding/logical_time_api.js
+++ b/jstests/sharding/logical_time_api.js
@@ -8,93 +8,92 @@
* Expects logicalTime to come in the command body from both a mongos and a mongod.
*/
(function() {
- "use strict";
-
- // Returns true if the given object contains a logicalTime BSON object in the following format:
- // $clusterTime: {
- // clusterTime: <Timestamp>
- // signature: {
- // hash: <BinData>
- // keyId: <NumberLong>
- // }
- // }
- function containsValidLogicalTimeBson(obj) {
- if (!obj) {
- return false;
- }
-
- var logicalTime = obj.$clusterTime;
- return logicalTime && isType(logicalTime, "BSON") &&
- isType(logicalTime.clusterTime, "Timestamp") && isType(logicalTime.signature, "BSON") &&
- isType(logicalTime.signature.hash, "BinData") &&
- isType(logicalTime.signature.keyId, "NumberLong");
+"use strict";
+
+// Returns true if the given object contains a logicalTime BSON object in the following format:
+// $clusterTime: {
+// clusterTime: <Timestamp>
+// signature: {
+// hash: <BinData>
+// keyId: <NumberLong>
+// }
+// }
+function containsValidLogicalTimeBson(obj) {
+ if (!obj) {
+ return false;
}
- function isType(val, typeString) {
- assert.eq(Object.prototype.toString.call(val),
- "[object " + typeString + "]",
- "expected: " + val + ", to be of type: " + typeString);
- return true;
- }
-
- // A mongos that talks to a non-sharded collection on a sharded replica set returns a
- // logicalTime BSON object that matches the expected format.
- var st = new ShardingTest({name: "logical_time_api", shards: {rs0: {nodes: 1}}});
-
- var testDB = st.s.getDB("test");
- var res =
- assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 1}]}));
- assert(containsValidLogicalTimeBson(res),
- "Expected command body from a mongos talking to a non-sharded collection on a sharded " +
- "replica set to contain logicalTime, received: " + tojson(res));
-
- // A mongos that talks to a sharded collection on a sharded replica set returns a
- // logicalTime BSON object that matches the expected format.
- assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
- assert.commandWorked(st.s.adminCommand({shardCollection: "test.bar", key: {x: 1}}));
-
- res = assert.commandWorked(testDB.runCommand("insert", {insert: "bar", documents: [{x: 2}]}));
- assert(containsValidLogicalTimeBson(res),
- "Expected command body from a mongos talking to a sharded collection on a sharded " +
- "replica set to contain logicalTime, received: " + tojson(res));
-
- // Verify mongos can accept requests with $clusterTime in the command body.
- assert.commandWorked(testDB.runCommand({isMaster: 1, $clusterTime: res.$clusterTime}));
-
- // A mongod in a sharded replica set returns a logicalTime bson that matches the expected
- // format.
- testDB = st.rs0.getPrimary().getDB("test");
- res = assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 3}]}));
- assert(containsValidLogicalTimeBson(res),
- "Expected command body from a mongod in a sharded replica set to contain " +
- "logicalTime, received: " + tojson(res));
-
- // Verify mongod can accept requests with $clusterTime in the command body.
- res = assert.commandWorked(testDB.runCommand({isMaster: 1, $clusterTime: res.$clusterTime}));
-
- st.stop();
-
- // A mongod from a non-sharded replica set does not return logicalTime.
- var replTest = new ReplSetTest({name: "logical_time_api_non_sharded_replset", nodes: 1});
- replTest.startSet();
- replTest.initiate();
-
- testDB = replTest.getPrimary().getDB("test");
- res = assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 4}]}));
- assert(containsValidLogicalTimeBson(res),
- "Expected command body from a mongod in a non-sharded replica set to " +
- "contain logicalTime, received: " + tojson(res));
-
- replTest.stopSet();
-
- // A standalone mongod does not return logicalTime.
- var standalone = MongoRunner.runMongod();
-
- testDB = standalone.getDB("test");
- res = assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 5}]}));
- assert(!containsValidLogicalTimeBson(res),
- "Expected command body from a standalone mongod to not contain logicalTime, " +
- "received: " + tojson(res));
-
- MongoRunner.stopMongod(standalone);
+ var logicalTime = obj.$clusterTime;
+ return logicalTime && isType(logicalTime, "BSON") &&
+ isType(logicalTime.clusterTime, "Timestamp") && isType(logicalTime.signature, "BSON") &&
+ isType(logicalTime.signature.hash, "BinData") &&
+ isType(logicalTime.signature.keyId, "NumberLong");
+}
+
+function isType(val, typeString) {
+ assert.eq(Object.prototype.toString.call(val),
+ "[object " + typeString + "]",
+ "expected: " + val + ", to be of type: " + typeString);
+ return true;
+}
+
+// A mongos that talks to a non-sharded collection on a sharded replica set returns a
+// logicalTime BSON object that matches the expected format.
+var st = new ShardingTest({name: "logical_time_api", shards: {rs0: {nodes: 1}}});
+
+var testDB = st.s.getDB("test");
+var res = assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 1}]}));
+assert(containsValidLogicalTimeBson(res),
+ "Expected command body from a mongos talking to a non-sharded collection on a sharded " +
+ "replica set to contain logicalTime, received: " + tojson(res));
+
+// A mongos that talks to a sharded collection on a sharded replica set returns a
+// logicalTime BSON object that matches the expected format.
+assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
+assert.commandWorked(st.s.adminCommand({shardCollection: "test.bar", key: {x: 1}}));
+
+res = assert.commandWorked(testDB.runCommand("insert", {insert: "bar", documents: [{x: 2}]}));
+assert(containsValidLogicalTimeBson(res),
+ "Expected command body from a mongos talking to a sharded collection on a sharded " +
+ "replica set to contain logicalTime, received: " + tojson(res));
+
+// Verify mongos can accept requests with $clusterTime in the command body.
+assert.commandWorked(testDB.runCommand({isMaster: 1, $clusterTime: res.$clusterTime}));
+
+// A mongod in a sharded replica set returns a logicalTime bson that matches the expected
+// format.
+testDB = st.rs0.getPrimary().getDB("test");
+res = assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 3}]}));
+assert(containsValidLogicalTimeBson(res),
+ "Expected command body from a mongod in a sharded replica set to contain " +
+ "logicalTime, received: " + tojson(res));
+
+// Verify mongod can accept requests with $clusterTime in the command body.
+res = assert.commandWorked(testDB.runCommand({isMaster: 1, $clusterTime: res.$clusterTime}));
+
+st.stop();
+
+// A mongod from a non-sharded replica set does not return logicalTime.
+var replTest = new ReplSetTest({name: "logical_time_api_non_sharded_replset", nodes: 1});
+replTest.startSet();
+replTest.initiate();
+
+testDB = replTest.getPrimary().getDB("test");
+res = assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 4}]}));
+assert(containsValidLogicalTimeBson(res),
+ "Expected command body from a mongod in a non-sharded replica set to " +
+ "contain logicalTime, received: " + tojson(res));
+
+replTest.stopSet();
+
+// A standalone mongod does not return logicalTime.
+var standalone = MongoRunner.runMongod();
+
+testDB = standalone.getDB("test");
+res = assert.commandWorked(testDB.runCommand("insert", {insert: "foo", documents: [{x: 5}]}));
+assert(!containsValidLogicalTimeBson(res),
+ "Expected command body from a standalone mongod to not contain logicalTime, " +
+ "received: " + tojson(res));
+
+MongoRunner.stopMongod(standalone);
})();
diff --git a/jstests/sharding/logical_time_metadata.js b/jstests/sharding/logical_time_metadata.js
index e8afac2f5b3..838b3d07816 100644
--- a/jstests/sharding/logical_time_metadata.js
+++ b/jstests/sharding/logical_time_metadata.js
@@ -4,58 +4,57 @@
* where the cluster time metadata can propagated, making it inherently racy.
*/
(function() {
- "use strict";
+"use strict";
- function assertHasClusterTimeAndOperationTime(res) {
- assert.hasFields(res, ['$clusterTime']);
- assert.hasFields(res.$clusterTime, ['clusterTime', 'signature']);
- }
+function assertHasClusterTimeAndOperationTime(res) {
+ assert.hasFields(res, ['$clusterTime']);
+ assert.hasFields(res.$clusterTime, ['clusterTime', 'signature']);
+}
- var st = new ShardingTest({shards: {rs0: {nodes: 3}}});
- st.s.adminCommand({enableSharding: 'test'});
+var st = new ShardingTest({shards: {rs0: {nodes: 3}}});
+st.s.adminCommand({enableSharding: 'test'});
- var db = st.s.getDB('test');
+var db = st.s.getDB('test');
- var res = db.runCommand({insert: 'user', documents: [{x: 10}]});
- assert.commandWorked(res);
- assertHasClusterTimeAndOperationTime(res);
+var res = db.runCommand({insert: 'user', documents: [{x: 10}]});
+assert.commandWorked(res);
+assertHasClusterTimeAndOperationTime(res);
- res = db.runCommand({blah: 'blah'});
- assert.commandFailed(res);
- assertHasClusterTimeAndOperationTime(res);
+res = db.runCommand({blah: 'blah'});
+assert.commandFailed(res);
+assertHasClusterTimeAndOperationTime(res);
- res = db.runCommand({insert: "user", documents: [{x: 10}], writeConcern: {blah: "blah"}});
- assert.commandFailed(res);
- assertHasClusterTimeAndOperationTime(res);
+res = db.runCommand({insert: "user", documents: [{x: 10}], writeConcern: {blah: "blah"}});
+assert.commandFailed(res);
+assertHasClusterTimeAndOperationTime(res);
- res = st.rs0.getPrimary().adminCommand({replSetGetStatus: 1});
+res = st.rs0.getPrimary().adminCommand({replSetGetStatus: 1});
- // Cluster time may advance after replSetGetStatus finishes executing and before its logical
- // time metadata is computed, in which case the response's $clusterTime will be greater than the
- // appliedOpTime timestamp in its body. Assert the timestamp is <= $clusterTime to account for
- // this.
- var appliedTime = res.optimes.appliedOpTime.ts;
- var logicalTimeMetadata = res.$clusterTime;
- assert.lte(0,
- timestampCmp(appliedTime, logicalTimeMetadata.clusterTime),
- 'appliedTime: ' + tojson(appliedTime) + ' not less than or equal to clusterTime: ' +
- tojson(logicalTimeMetadata.clusterTime));
+// Cluster time may advance after replSetGetStatus finishes executing and before its logical
+// time metadata is computed, in which case the response's $clusterTime will be greater than the
+// appliedOpTime timestamp in its body. Assert the timestamp is <= $clusterTime to account for
+// this.
+var appliedTime = res.optimes.appliedOpTime.ts;
+var logicalTimeMetadata = res.$clusterTime;
+assert.lte(0,
+ timestampCmp(appliedTime, logicalTimeMetadata.clusterTime),
+ 'appliedTime: ' + tojson(appliedTime) + ' not less than or equal to clusterTime: ' +
+ tojson(logicalTimeMetadata.clusterTime));
- assert.commandWorked(db.runCommand({ping: 1, '$clusterTime': logicalTimeMetadata}));
+assert.commandWorked(db.runCommand({ping: 1, '$clusterTime': logicalTimeMetadata}));
- db = st.rs0.getPrimary().getDB('testRS');
- res = db.runCommand({insert: 'user', documents: [{x: 10}]});
- assert.commandWorked(res);
- assertHasClusterTimeAndOperationTime(res);
+db = st.rs0.getPrimary().getDB('testRS');
+res = db.runCommand({insert: 'user', documents: [{x: 10}]});
+assert.commandWorked(res);
+assertHasClusterTimeAndOperationTime(res);
- res = db.runCommand({blah: 'blah'});
- assert.commandFailed(res);
- assertHasClusterTimeAndOperationTime(res);
+res = db.runCommand({blah: 'blah'});
+assert.commandFailed(res);
+assertHasClusterTimeAndOperationTime(res);
- res = db.runCommand({insert: "user", documents: [{x: 10}], writeConcern: {blah: "blah"}});
- assert.commandFailed(res);
- assertHasClusterTimeAndOperationTime(res);
-
- st.stop();
+res = db.runCommand({insert: "user", documents: [{x: 10}], writeConcern: {blah: "blah"}});
+assert.commandFailed(res);
+assertHasClusterTimeAndOperationTime(res);
+st.stop();
})();
diff --git a/jstests/sharding/lookup.js b/jstests/sharding/lookup.js
index 2d988912944..3c0364bd6a4 100644
--- a/jstests/sharding/lookup.js
+++ b/jstests/sharding/lookup.js
@@ -1,254 +1,244 @@
// Basic $lookup regression tests.
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- load("jstests/libs/fixture_helpers.js"); // For isSharded.
- load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
- load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
-
- const st = new ShardingTest({shards: 2, config: 1, mongos: 1});
- const testName = "lookup_sharded";
-
- const nodeList = DiscoverTopology.findNonConfigNodes(st.s);
- setParameterOnAllHosts(nodeList, "internalQueryAllowShardedLookup", true);
-
- const mongosDB = st.s0.getDB(testName);
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Used by testPipeline to sort result documents. All _ids must be primitives.
- function compareId(a, b) {
- if (a._id < b._id) {
- return -1;
- }
- if (a._id > b._id) {
- return 1;
- }
- return 0;
- }
-
- // Helper for testing that pipeline returns correct set of results.
- function testPipeline(pipeline, expectedResult, collection) {
- assert.eq(collection.aggregate(pipeline).toArray().sort(compareId),
- expectedResult.sort(compareId));
- }
-
- function runTest(coll, from, thirdColl, fourthColl) {
- let db = null; // Using the db variable is banned in this function.
-
- assert.commandWorked(coll.remove({}));
- assert.commandWorked(from.remove({}));
- assert.commandWorked(thirdColl.remove({}));
- assert.commandWorked(fourthColl.remove({}));
+"use strict";
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.writeOK(coll.insert({_id: 1, a: null}));
- assert.writeOK(coll.insert({_id: 2}));
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/libs/fixture_helpers.js"); // For isSharded.
+load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
+load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
- assert.writeOK(from.insert({_id: 0, b: 1}));
- assert.writeOK(from.insert({_id: 1, b: null}));
- assert.writeOK(from.insert({_id: 2}));
+const st = new ShardingTest({shards: 2, config: 1, mongos: 1});
+const testName = "lookup_sharded";
- //
- // Basic functionality.
- //
+const nodeList = DiscoverTopology.findNonConfigNodes(st.s);
+setParameterOnAllHosts(nodeList, "internalQueryAllowShardedLookup", true);
- // "from" document added to "as" field if a == b, where nonexistent fields are treated as
- // null.
- let expectedResults = [
- {_id: 0, a: 1, "same": [{_id: 0, b: 1}]},
- {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
- ];
- testPipeline([{$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}}],
- expectedResults,
- coll);
-
- // If localField is nonexistent, it is treated as if it is null.
- expectedResults = [
- {_id: 0, a: 1, "same": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
- ];
- testPipeline(
- [{$lookup: {localField: "nonexistent", foreignField: "b", from: "from", as: "same"}}],
- expectedResults,
- coll);
+const mongosDB = st.s0.getDB(testName);
+assert.commandWorked(mongosDB.dropDatabase());
- // If foreignField is nonexistent, it is treated as if it is null.
- expectedResults = [
- {_id: 0, a: 1, "same": []},
- {_id: 1, a: null, "same": [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
- {_id: 2, "same": [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]}
- ];
- testPipeline(
- [{$lookup: {localField: "a", foreignField: "nonexistent", from: "from", as: "same"}}],
- expectedResults,
- coll);
-
- // If there are no matches or the from coll doesn't exist, the result is an empty array.
- expectedResults =
- [{_id: 0, a: 1, "same": []}, {_id: 1, a: null, "same": []}, {_id: 2, "same": []}];
- testPipeline(
- [{$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}}],
- expectedResults,
- coll);
- testPipeline(
- [{$lookup: {localField: "a", foreignField: "b", from: "nonexistent", as: "same"}}],
- expectedResults,
- coll);
+// Used by testPipeline to sort result documents. All _ids must be primitives.
+function compareId(a, b) {
+ if (a._id < b._id) {
+ return -1;
+ }
+ if (a._id > b._id) {
+ return 1;
+ }
+ return 0;
+}
- // If field name specified by "as" already exists, it is overwritten.
- expectedResults = [
- {_id: 0, "a": [{_id: 0, b: 1}]},
- {_id: 1, "a": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 2, "a": [{_id: 1, b: null}, {_id: 2}]}
- ];
- testPipeline([{$lookup: {localField: "a", foreignField: "b", from: "from", as: "a"}}],
- expectedResults,
- coll);
+// Helper for testing that pipeline returns correct set of results.
+function testPipeline(pipeline, expectedResult, collection) {
+ assert.eq(collection.aggregate(pipeline).toArray().sort(compareId),
+ expectedResult.sort(compareId));
+}
- // Running multiple $lookups in the same pipeline is allowed.
- expectedResults = [
- {_id: 0, a: 1, "c": [{_id: 0, b: 1}], "d": [{_id: 0, b: 1}]},
- {
- _id: 1,
- a: null, "c": [{_id: 1, b: null}, {_id: 2}], "d": [{_id: 1, b: null}, {_id: 2}]
- },
- {_id: 2, "c": [{_id: 1, b: null}, {_id: 2}], "d": [{_id: 1, b: null}, {_id: 2}]}
- ];
- testPipeline(
- [
- {$lookup: {localField: "a", foreignField: "b", from: "from", as: "c"}},
- {$project: {"a": 1, "c": 1}},
- {$lookup: {localField: "a", foreignField: "b", from: "from", as: "d"}}
- ],
- expectedResults,
- coll);
+function runTest(coll, from, thirdColl, fourthColl) {
+ let db = null; // Using the db variable is banned in this function.
- //
- // Coalescing with $unwind.
- //
-
- // A normal $unwind with on the "as" field.
- expectedResults = [
- {_id: 0, a: 1, same: {_id: 0, b: 1}},
- {_id: 1, a: null, same: {_id: 1, b: null}},
- {_id: 1, a: null, same: {_id: 2}},
- {_id: 2, same: {_id: 1, b: null}},
- {_id: 2, same: {_id: 2}}
- ];
- testPipeline(
- [
- {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
- {$unwind: {path: "$same"}}
- ],
- expectedResults,
- coll);
+ assert.commandWorked(coll.remove({}));
+ assert.commandWorked(from.remove({}));
+ assert.commandWorked(thirdColl.remove({}));
+ assert.commandWorked(fourthColl.remove({}));
- // An $unwind on the "as" field, with includeArrayIndex.
- expectedResults = [
- {_id: 0, a: 1, same: {_id: 0, b: 1}, index: NumberLong(0)},
- {_id: 1, a: null, same: {_id: 1, b: null}, index: NumberLong(0)},
- {_id: 1, a: null, same: {_id: 2}, index: NumberLong(1)},
- {_id: 2, same: {_id: 1, b: null}, index: NumberLong(0)},
- {_id: 2, same: {_id: 2}, index: NumberLong(1)},
- ];
- testPipeline(
- [
- {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
- {$unwind: {path: "$same", includeArrayIndex: "index"}}
- ],
- expectedResults,
- coll);
+ assert.writeOK(coll.insert({_id: 0, a: 1}));
+ assert.writeOK(coll.insert({_id: 1, a: null}));
+ assert.writeOK(coll.insert({_id: 2}));
- // Normal $unwind with no matching documents.
- expectedResults = [];
- testPipeline(
- [
- {$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}},
- {$unwind: {path: "$same"}}
- ],
- expectedResults,
- coll);
+ assert.writeOK(from.insert({_id: 0, b: 1}));
+ assert.writeOK(from.insert({_id: 1, b: null}));
+ assert.writeOK(from.insert({_id: 2}));
- // $unwind with preserveNullAndEmptyArray with no matching documents.
- expectedResults = [
- {_id: 0, a: 1},
- {_id: 1, a: null},
- {_id: 2},
- ];
- testPipeline(
- [
- {$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}},
- {$unwind: {path: "$same", preserveNullAndEmptyArrays: true}}
- ],
- expectedResults,
- coll);
+ //
+ // Basic functionality.
+ //
- // $unwind with preserveNullAndEmptyArray, some with matching documents, some without.
- expectedResults = [
- {_id: 0, a: 1},
- {_id: 1, a: null, same: {_id: 0, b: 1}},
- {_id: 2},
- ];
- testPipeline(
- [
- {$lookup: {localField: "_id", foreignField: "b", from: "from", as: "same"}},
- {$unwind: {path: "$same", preserveNullAndEmptyArrays: true}}
- ],
- expectedResults,
- coll);
+ // "from" document added to "as" field if a == b, where nonexistent fields are treated as
+ // null.
+ let expectedResults = [
+ {_id: 0, a: 1, "same": [{_id: 0, b: 1}]},
+ {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
+ ];
+ testPipeline([{$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}}],
+ expectedResults,
+ coll);
+
+ // If localField is nonexistent, it is treated as if it is null.
+ expectedResults = [
+ {_id: 0, a: 1, "same": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
+ ];
+ testPipeline(
+ [{$lookup: {localField: "nonexistent", foreignField: "b", from: "from", as: "same"}}],
+ expectedResults,
+ coll);
+
+ // If foreignField is nonexistent, it is treated as if it is null.
+ expectedResults = [
+ {_id: 0, a: 1, "same": []},
+ {_id: 1, a: null, "same": [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "same": [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]}
+ ];
+ testPipeline(
+ [{$lookup: {localField: "a", foreignField: "nonexistent", from: "from", as: "same"}}],
+ expectedResults,
+ coll);
+
+ // If there are no matches or the from coll doesn't exist, the result is an empty array.
+ expectedResults =
+ [{_id: 0, a: 1, "same": []}, {_id: 1, a: null, "same": []}, {_id: 2, "same": []}];
+ testPipeline(
+ [{$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}}],
+ expectedResults,
+ coll);
+ testPipeline([{$lookup: {localField: "a", foreignField: "b", from: "nonexistent", as: "same"}}],
+ expectedResults,
+ coll);
+
+ // If field name specified by "as" already exists, it is overwritten.
+ expectedResults = [
+ {_id: 0, "a": [{_id: 0, b: 1}]},
+ {_id: 1, "a": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "a": [{_id: 1, b: null}, {_id: 2}]}
+ ];
+ testPipeline([{$lookup: {localField: "a", foreignField: "b", from: "from", as: "a"}}],
+ expectedResults,
+ coll);
+
+ // Running multiple $lookups in the same pipeline is allowed.
+ expectedResults = [
+ {_id: 0, a: 1, "c": [{_id: 0, b: 1}], "d": [{_id: 0, b: 1}]},
+ {_id: 1, a: null, "c": [{_id: 1, b: null}, {_id: 2}], "d": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "c": [{_id: 1, b: null}, {_id: 2}], "d": [{_id: 1, b: null}, {_id: 2}]}
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "c"}},
+ {$project: {"a": 1, "c": 1}},
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "d"}}
+ ],
+ expectedResults,
+ coll);
- // $unwind with preserveNullAndEmptyArray and includeArrayIndex, some with matching
- // documents, some without.
- expectedResults = [
- {_id: 0, a: 1, index: null},
- {_id: 1, a: null, same: {_id: 0, b: 1}, index: NumberLong(0)},
- {_id: 2, index: null},
- ];
- testPipeline(
- [
- {$lookup: {localField: "_id", foreignField: "b", from: "from", as: "same"}},
- {
- $unwind:
- {path: "$same", preserveNullAndEmptyArrays: true, includeArrayIndex: "index"}
- }
- ],
- expectedResults,
- coll);
+ //
+ // Coalescing with $unwind.
+ //
- //
- // Dependencies.
- //
+ // A normal $unwind with on the "as" field.
+ expectedResults = [
+ {_id: 0, a: 1, same: {_id: 0, b: 1}},
+ {_id: 1, a: null, same: {_id: 1, b: null}},
+ {_id: 1, a: null, same: {_id: 2}},
+ {_id: 2, same: {_id: 1, b: null}},
+ {_id: 2, same: {_id: 2}}
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
+ {$unwind: {path: "$same"}}
+ ],
+ expectedResults,
+ coll);
+
+ // An $unwind on the "as" field, with includeArrayIndex.
+ expectedResults = [
+ {_id: 0, a: 1, same: {_id: 0, b: 1}, index: NumberLong(0)},
+ {_id: 1, a: null, same: {_id: 1, b: null}, index: NumberLong(0)},
+ {_id: 1, a: null, same: {_id: 2}, index: NumberLong(1)},
+ {_id: 2, same: {_id: 1, b: null}, index: NumberLong(0)},
+ {_id: 2, same: {_id: 2}, index: NumberLong(1)},
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
+ {$unwind: {path: "$same", includeArrayIndex: "index"}}
+ ],
+ expectedResults,
+ coll);
+
+ // Normal $unwind with no matching documents.
+ expectedResults = [];
+ testPipeline(
+ [
+ {$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}},
+ {$unwind: {path: "$same"}}
+ ],
+ expectedResults,
+ coll);
+
+ // $unwind with preserveNullAndEmptyArray with no matching documents.
+ expectedResults = [
+ {_id: 0, a: 1},
+ {_id: 1, a: null},
+ {_id: 2},
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}},
+ {$unwind: {path: "$same", preserveNullAndEmptyArrays: true}}
+ ],
+ expectedResults,
+ coll);
+
+ // $unwind with preserveNullAndEmptyArray, some with matching documents, some without.
+ expectedResults = [
+ {_id: 0, a: 1},
+ {_id: 1, a: null, same: {_id: 0, b: 1}},
+ {_id: 2},
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "_id", foreignField: "b", from: "from", as: "same"}},
+ {$unwind: {path: "$same", preserveNullAndEmptyArrays: true}}
+ ],
+ expectedResults,
+ coll);
+
+ // $unwind with preserveNullAndEmptyArray and includeArrayIndex, some with matching
+ // documents, some without.
+ expectedResults = [
+ {_id: 0, a: 1, index: null},
+ {_id: 1, a: null, same: {_id: 0, b: 1}, index: NumberLong(0)},
+ {_id: 2, index: null},
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "_id", foreignField: "b", from: "from", as: "same"}},
+ {$unwind: {path: "$same", preserveNullAndEmptyArrays: true, includeArrayIndex: "index"}}
+ ],
+ expectedResults,
+ coll);
- // If $lookup didn't add "localField" to its dependencies, this test would fail as the
- // value of the "a" field would be lost and treated as null.
- expectedResults = [
- {_id: 0, "same": [{_id: 0, b: 1}]},
- {_id: 1, "same": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
- ];
- testPipeline(
- [
- {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
- {$project: {"same": 1}}
- ],
- expectedResults,
- coll);
+ //
+ // Dependencies.
+ //
- // If $lookup didn't add fields referenced by "let" variables to its dependencies, this test
- // would fail as the value of the "a" field would be lost and treated as null.
- expectedResults = [
- {"_id": 0, "same": [{"_id": 0, "x": 1}, {"_id": 1, "x": 1}, {"_id": 2, "x": 1}]},
- {
- "_id": 1,
- "same": [{"_id": 0, "x": null}, {"_id": 1, "x": null}, {"_id": 2, "x": null}]
- },
- {"_id": 2, "same": [{"_id": 0}, {"_id": 1}, {"_id": 2}]}
- ];
- testPipeline(
+ // If $lookup didn't add "localField" to its dependencies, this test would fail as the
+ // value of the "a" field would be lost and treated as null.
+ expectedResults = [
+ {_id: 0, "same": [{_id: 0, b: 1}]},
+ {_id: 1, "same": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
+ {$project: {"same": 1}}
+ ],
+ expectedResults,
+ coll);
+
+ // If $lookup didn't add fields referenced by "let" variables to its dependencies, this test
+ // would fail as the value of the "a" field would be lost and treated as null.
+ expectedResults = [
+ {"_id": 0, "same": [{"_id": 0, "x": 1}, {"_id": 1, "x": 1}, {"_id": 2, "x": 1}]},
+ {"_id": 1, "same": [{"_id": 0, "x": null}, {"_id": 1, "x": null}, {"_id": 2, "x": null}]},
+ {"_id": 2, "same": [{"_id": 0}, {"_id": 1}, {"_id": 2}]}
+ ];
+ testPipeline(
[
{
$lookup: {
@@ -263,53 +253,53 @@
expectedResults,
coll);
- //
- // Dotted field paths.
- //
-
- assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.writeOK(coll.insert({_id: 1, a: null}));
- assert.writeOK(coll.insert({_id: 2}));
- assert.writeOK(coll.insert({_id: 3, a: {c: 1}}));
-
- assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, b: 1}));
- assert.writeOK(from.insert({_id: 1, b: null}));
- assert.writeOK(from.insert({_id: 2}));
- assert.writeOK(from.insert({_id: 3, b: {c: 1}}));
- assert.writeOK(from.insert({_id: 4, b: {c: 2}}));
-
- // Once without a dotted field.
- let pipeline = [{$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}}];
- expectedResults = [
- {_id: 0, a: 1, "same": [{_id: 0, b: 1}]},
- {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 3, a: {c: 1}, "same": [{_id: 3, b: {c: 1}}]}
- ];
- testPipeline(pipeline, expectedResults, coll);
-
- // Look up a dotted field.
- pipeline = [{$lookup: {localField: "a.c", foreignField: "b.c", from: "from", as: "same"}}];
- // All but the last document in 'coll' have a nullish value for 'a.c'.
- expectedResults = [
- {_id: 0, a: 1, same: [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
- {_id: 1, a: null, same: [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
- {_id: 2, same: [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
- {_id: 3, a: {c: 1}, same: [{_id: 3, b: {c: 1}}]}
- ];
- testPipeline(pipeline, expectedResults, coll);
-
- // With an $unwind stage.
- assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: {b: 1}}));
- assert.writeOK(coll.insert({_id: 1}));
-
- assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, target: 1}));
+ //
+ // Dotted field paths.
+ //
- pipeline = [
+ assert.commandWorked(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 0, a: 1}));
+ assert.writeOK(coll.insert({_id: 1, a: null}));
+ assert.writeOK(coll.insert({_id: 2}));
+ assert.writeOK(coll.insert({_id: 3, a: {c: 1}}));
+
+ assert.commandWorked(from.remove({}));
+ assert.writeOK(from.insert({_id: 0, b: 1}));
+ assert.writeOK(from.insert({_id: 1, b: null}));
+ assert.writeOK(from.insert({_id: 2}));
+ assert.writeOK(from.insert({_id: 3, b: {c: 1}}));
+ assert.writeOK(from.insert({_id: 4, b: {c: 2}}));
+
+ // Once without a dotted field.
+ let pipeline = [{$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}}];
+ expectedResults = [
+ {_id: 0, a: 1, "same": [{_id: 0, b: 1}]},
+ {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 3, a: {c: 1}, "same": [{_id: 3, b: {c: 1}}]}
+ ];
+ testPipeline(pipeline, expectedResults, coll);
+
+ // Look up a dotted field.
+ pipeline = [{$lookup: {localField: "a.c", foreignField: "b.c", from: "from", as: "same"}}];
+ // All but the last document in 'coll' have a nullish value for 'a.c'.
+ expectedResults = [
+ {_id: 0, a: 1, same: [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
+ {_id: 1, a: null, same: [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, same: [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
+ {_id: 3, a: {c: 1}, same: [{_id: 3, b: {c: 1}}]}
+ ];
+ testPipeline(pipeline, expectedResults, coll);
+
+ // With an $unwind stage.
+ assert.commandWorked(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 0, a: {b: 1}}));
+ assert.writeOK(coll.insert({_id: 1}));
+
+ assert.commandWorked(from.remove({}));
+ assert.writeOK(from.insert({_id: 0, target: 1}));
+
+ pipeline = [
{
$lookup: {
localField: "a.b",
@@ -329,25 +319,25 @@
}
}
];
- expectedResults = [
- {_id: 0, a: {b: 1}, same: {documents: {_id: 0, target: 1}}, c: {d: {e: NumberLong(0)}}},
- {_id: 1, same: {}, c: {d: {e: null}}},
- ];
- testPipeline(pipeline, expectedResults, coll);
+ expectedResults = [
+ {_id: 0, a: {b: 1}, same: {documents: {_id: 0, target: 1}}, c: {d: {e: NumberLong(0)}}},
+ {_id: 1, same: {}, c: {d: {e: null}}},
+ ];
+ testPipeline(pipeline, expectedResults, coll);
- //
- // Query-like local fields (SERVER-21287)
- //
+ //
+ // Query-like local fields (SERVER-21287)
+ //
- // This must only do an equality match rather than treating the value as a regex.
- assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: /a regex/}));
+ // This must only do an equality match rather than treating the value as a regex.
+ assert.commandWorked(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 0, a: /a regex/}));
- assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, b: /a regex/}));
- assert.writeOK(from.insert({_id: 1, b: "string that matches /a regex/"}));
+ assert.commandWorked(from.remove({}));
+ assert.writeOK(from.insert({_id: 0, b: /a regex/}));
+ assert.writeOK(from.insert({_id: 1, b: "string that matches /a regex/"}));
- pipeline = [
+ pipeline = [
{
$lookup: {
localField: "a",
@@ -357,22 +347,22 @@
}
},
];
- expectedResults = [{_id: 0, a: /a regex/, b: [{_id: 0, b: /a regex/}]}];
- testPipeline(pipeline, expectedResults, coll);
+ expectedResults = [{_id: 0, a: /a regex/, b: [{_id: 0, b: /a regex/}]}];
+ testPipeline(pipeline, expectedResults, coll);
- //
- // A local value of an array.
- //
+ //
+ // A local value of an array.
+ //
- // Basic array corresponding to multiple documents.
- assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [0, 1, 2]}));
+ // Basic array corresponding to multiple documents.
+ assert.commandWorked(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 0, a: [0, 1, 2]}));
- assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0}));
- assert.writeOK(from.insert({_id: 1}));
+ assert.commandWorked(from.remove({}));
+ assert.writeOK(from.insert({_id: 0}));
+ assert.writeOK(from.insert({_id: 1}));
- pipeline = [
+ pipeline = [
{
$lookup: {
localField: "a",
@@ -382,18 +372,18 @@
}
},
];
- expectedResults = [{_id: 0, a: [0, 1, 2], b: [{_id: 0}, {_id: 1}]}];
- testPipeline(pipeline, expectedResults, coll);
+ expectedResults = [{_id: 0, a: [0, 1, 2], b: [{_id: 0}, {_id: 1}]}];
+ testPipeline(pipeline, expectedResults, coll);
- // Basic array corresponding to a single document.
- assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [1]}));
+ // Basic array corresponding to a single document.
+ assert.commandWorked(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 0, a: [1]}));
- assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0}));
- assert.writeOK(from.insert({_id: 1}));
+ assert.commandWorked(from.remove({}));
+ assert.writeOK(from.insert({_id: 0}));
+ assert.writeOK(from.insert({_id: 1}));
- pipeline = [
+ pipeline = [
{
$lookup: {
localField: "a",
@@ -403,21 +393,21 @@
}
},
];
- expectedResults = [{_id: 0, a: [1], b: [{_id: 1}]}];
- testPipeline(pipeline, expectedResults, coll);
+ expectedResults = [{_id: 0, a: [1], b: [{_id: 1}]}];
+ testPipeline(pipeline, expectedResults, coll);
- // Array containing regular expressions.
- assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [/a regex/, /^x/]}));
- assert.writeOK(coll.insert({_id: 1, a: [/^x/]}));
+ // Array containing regular expressions.
+ assert.commandWorked(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 0, a: [/a regex/, /^x/]}));
+ assert.writeOK(coll.insert({_id: 1, a: [/^x/]}));
- assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0, b: "should not match a regex"}));
- assert.writeOK(from.insert({_id: 1, b: "xxxx"}));
- assert.writeOK(from.insert({_id: 2, b: /a regex/}));
- assert.writeOK(from.insert({_id: 3, b: /^x/}));
+ assert.commandWorked(from.remove({}));
+ assert.writeOK(from.insert({_id: 0, b: "should not match a regex"}));
+ assert.writeOK(from.insert({_id: 1, b: "xxxx"}));
+ assert.writeOK(from.insert({_id: 2, b: /a regex/}));
+ assert.writeOK(from.insert({_id: 3, b: /^x/}));
- pipeline = [
+ pipeline = [
{
$lookup: {
localField: "a",
@@ -427,23 +417,23 @@
}
},
];
- expectedResults = [
- {_id: 0, a: [/a regex/, /^x/], b: [{_id: 2, b: /a regex/}, {_id: 3, b: /^x/}]},
- {_id: 1, a: [/^x/], b: [{_id: 3, b: /^x/}]}
- ];
- testPipeline(pipeline, expectedResults, coll);
-
- // 'localField' references a field within an array of sub-objects.
- assert.commandWorked(coll.remove({}));
- assert.writeOK(coll.insert({_id: 0, a: [{b: 1}, {b: 2}]}));
-
- assert.commandWorked(from.remove({}));
- assert.writeOK(from.insert({_id: 0}));
- assert.writeOK(from.insert({_id: 1}));
- assert.writeOK(from.insert({_id: 2}));
- assert.writeOK(from.insert({_id: 3}));
-
- pipeline = [
+ expectedResults = [
+ {_id: 0, a: [/a regex/, /^x/], b: [{_id: 2, b: /a regex/}, {_id: 3, b: /^x/}]},
+ {_id: 1, a: [/^x/], b: [{_id: 3, b: /^x/}]}
+ ];
+ testPipeline(pipeline, expectedResults, coll);
+
+ // 'localField' references a field within an array of sub-objects.
+ assert.commandWorked(coll.remove({}));
+ assert.writeOK(coll.insert({_id: 0, a: [{b: 1}, {b: 2}]}));
+
+ assert.commandWorked(from.remove({}));
+ assert.writeOK(from.insert({_id: 0}));
+ assert.writeOK(from.insert({_id: 1}));
+ assert.writeOK(from.insert({_id: 2}));
+ assert.writeOK(from.insert({_id: 3}));
+
+ pipeline = [
{
$lookup: {
localField: "a.b",
@@ -454,17 +444,17 @@
},
];
- expectedResults = [{"_id": 0, "a": [{"b": 1}, {"b": 2}], "c": [{"_id": 1}, {"_id": 2}]}];
- testPipeline(pipeline, expectedResults, coll);
+ expectedResults = [{"_id": 0, "a": [{"b": 1}, {"b": 2}], "c": [{"_id": 1}, {"_id": 2}]}];
+ testPipeline(pipeline, expectedResults, coll);
- //
- // Test $lookup when the foreign collection is a view.
- //
- // TODO SERVER-32548: Allow this test to run when the foreign collection is sharded.
- if (!FixtureHelpers.isSharded(from)) {
- assert.commandWorked(
- coll.getDB().runCommand({create: "fromView", viewOn: "from", pipeline: []}));
- pipeline = [
+ //
+ // Test $lookup when the foreign collection is a view.
+ //
+ // TODO SERVER-32548: Allow this test to run when the foreign collection is sharded.
+ if (!FixtureHelpers.isSharded(from)) {
+ assert.commandWorked(
+ coll.getDB().runCommand({create: "fromView", viewOn: "from", pipeline: []}));
+ pipeline = [
{
$lookup: {
localField: "a.b",
@@ -475,181 +465,167 @@
},
];
- expectedResults =
- [{"_id": 0, "a": [{"b": 1}, {"b": 2}], "c": [{"_id": 1}, {"_id": 2}]}];
- testPipeline(pipeline, expectedResults, coll);
- }
-
- //
- // Error cases.
- //
-
- // 'from', 'as', 'localField' and 'foreignField' must all be specified when run with
- // localField/foreignField syntax.
- assertErrorCode(coll,
- [{$lookup: {foreignField: "b", from: "from", as: "same"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {localField: "a", from: "from", as: "same"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {localField: "a", foreignField: "b", as: "same"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {localField: "a", foreignField: "b", from: "from"}}],
- ErrorCodes.FailedToParse);
-
- // localField/foreignField and pipeline/let syntax must not be mixed.
- assertErrorCode(coll,
- [{$lookup: {pipeline: [], foreignField: "b", from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {pipeline: [], localField: "b", from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(
- coll,
- [{$lookup: {pipeline: [], localField: "b", foreignField: "b", from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {let : {a: "$b"}, foreignField: "b", from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {let : {a: "$b"}, localField: "b", from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(
- coll,
- [{
- $lookup:
- {let : {a: "$b"}, localField: "b", foreignField: "b", from: "from", as: "as"}
- }],
- ErrorCodes.FailedToParse);
-
- // 'from', 'as', 'localField' and 'foreignField' must all be of type string.
- assertErrorCode(coll,
- [{$lookup: {localField: 1, foreignField: "b", from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {localField: "a", foreignField: 1, from: "from", as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {localField: "a", foreignField: "b", from: 1, as: "as"}}],
- ErrorCodes.FailedToParse);
- assertErrorCode(coll,
- [{$lookup: {localField: "a", foreignField: "b", from: "from", as: 1}}],
- ErrorCodes.FailedToParse);
-
- // The foreign collection must be a valid namespace.
- assertErrorCode(coll,
- [{$lookup: {localField: "a", foreignField: "b", from: "", as: "as"}}],
- ErrorCodes.InvalidNamespace);
- // $lookup's field must be an object.
- assertErrorCode(coll, [{$lookup: "string"}], ErrorCodes.FailedToParse);
+ expectedResults = [{"_id": 0, "a": [{"b": 1}, {"b": 2}], "c": [{"_id": 1}, {"_id": 2}]}];
+ testPipeline(pipeline, expectedResults, coll);
}
//
- // Test unsharded local collection and unsharded foreign collection.
+ // Error cases.
//
- mongosDB.lookUp.drop();
- mongosDB.from.drop();
- mongosDB.thirdColl.drop();
- mongosDB.fourthColl.drop();
-
- runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
-
- // Verify that the command is sent only to the primary shard when both the local and foreign
- // collections are unsharded.
- assert(!assert
- .commandWorked(mongosDB.lookup.explain().aggregate([{
- $lookup: {
- from: mongosDB.from.getName(),
- localField: "a",
- foreignField: "b",
- as: "results"
- }
- }]))
- .hasOwnProperty("shards"));
- // Enable sharding on the test DB and ensure its primary is shard0000.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
- //
- // Test unsharded local collection and sharded foreign collection.
- //
-
- // Shard the foreign collection on _id.
- st.shardColl(mongosDB.from, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
- runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
-
- //
- // Test sharded local collection and unsharded foreign collection.
- //
- assert(mongosDB.from.drop());
-
- // Shard the local collection on _id.
- st.shardColl(mongosDB.lookup, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
- runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
-
- //
- // Test sharded local and foreign collections.
- //
-
- // Shard the foreign collection on _id.
- st.shardColl(mongosDB.from, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
- runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
-
- // Test that a $lookup from an unsharded collection followed by a $merge to a sharded collection
- // is allowed.
- const sourceColl = st.getDB(testName).lookUp;
- assert(sourceColl.drop());
- assert(st.adminCommand({shardCollection: sourceColl.getFullName(), key: {_id: "hashed"}}));
- assert.commandWorked(sourceColl.insert({_id: 0, a: 0}));
-
- const outColl = st.getDB(testName).out;
- assert(outColl.drop());
- assert(st.adminCommand({shardCollection: outColl.getFullName(), key: {_id: "hashed"}}));
-
- const fromColl = st.getDB(testName).from;
- assert(fromColl.drop());
- assert.commandWorked(fromColl.insert({_id: 0, b: 0}));
-
- sourceColl.aggregate([
- {$lookup: {localField: "a", foreignField: "b", from: fromColl.getName(), as: "same"}},
- {$merge: {into: outColl.getName()}}
- ]);
-
- assert.eq([{a: 0, same: [{_id: 0, b: 0}]}], outColl.find({}, {_id: 0}).toArray());
-
- // Disable the server parameter. Be sure that an attempt to run a $lookup on a sharded
- // collection fails.
- setParameterOnAllHosts(nodeList, "internalQueryAllowShardedLookup", false);
-
- // Re shard the foreign collection on _id.
- st.shardColl(mongosDB.from, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
-
- let err = assert.throws(() => sourceColl
- .aggregate([{
- $lookup: {
- localField: "a",
- foreignField: "b",
- from: fromColl.getName(),
- as: "same"
- }
- }])
- .itcount());
- assert.eq(err.code, 28769);
- err = assert.throws(() => sourceColl
- .aggregate([{
- $lookup: {
- localField: "a",
- foreignField: "b",
- from: fromColl.getName(),
- as: "same"
- }
- }],
- {allowDiskUse: true})
- .itcount());
- assert.eq(err.code, 28769);
- err = assert.throws(() => sourceColl
+ // 'from', 'as', 'localField' and 'foreignField' must all be specified when run with
+ // localField/foreignField syntax.
+ assertErrorCode(
+ coll, [{$lookup: {foreignField: "b", from: "from", as: "same"}}], ErrorCodes.FailedToParse);
+ assertErrorCode(
+ coll, [{$lookup: {localField: "a", from: "from", as: "same"}}], ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {localField: "a", foreignField: "b", as: "same"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {localField: "a", foreignField: "b", from: "from"}}],
+ ErrorCodes.FailedToParse);
+
+ // localField/foreignField and pipeline/let syntax must not be mixed.
+ assertErrorCode(coll,
+ [{$lookup: {pipeline: [], foreignField: "b", from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {pipeline: [], localField: "b", from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(
+ coll,
+ [{$lookup: {pipeline: [], localField: "b", foreignField: "b", from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {let : {a: "$b"}, foreignField: "b", from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {let : {a: "$b"}, localField: "b", from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(
+ coll,
+ [{$lookup: {let : {a: "$b"}, localField: "b", foreignField: "b", from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+
+ // 'from', 'as', 'localField' and 'foreignField' must all be of type string.
+ assertErrorCode(coll,
+ [{$lookup: {localField: 1, foreignField: "b", from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {localField: "a", foreignField: 1, from: "from", as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {localField: "a", foreignField: "b", from: 1, as: "as"}}],
+ ErrorCodes.FailedToParse);
+ assertErrorCode(coll,
+ [{$lookup: {localField: "a", foreignField: "b", from: "from", as: 1}}],
+ ErrorCodes.FailedToParse);
+
+ // The foreign collection must be a valid namespace.
+ assertErrorCode(coll,
+ [{$lookup: {localField: "a", foreignField: "b", from: "", as: "as"}}],
+ ErrorCodes.InvalidNamespace);
+ // $lookup's field must be an object.
+ assertErrorCode(coll, [{$lookup: "string"}], ErrorCodes.FailedToParse);
+}
+
+//
+// Test unsharded local collection and unsharded foreign collection.
+//
+mongosDB.lookUp.drop();
+mongosDB.from.drop();
+mongosDB.thirdColl.drop();
+mongosDB.fourthColl.drop();
+
+runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
+
+// Verify that the command is sent only to the primary shard when both the local and foreign
+// collections are unsharded.
+assert(
+ !assert
+ .commandWorked(mongosDB.lookup.explain().aggregate([{
+ $lookup:
+ {from: mongosDB.from.getName(), localField: "a", foreignField: "b", as: "results"}
+ }]))
+ .hasOwnProperty("shards"));
+// Enable sharding on the test DB and ensure its primary is shard0000.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+//
+// Test unsharded local collection and sharded foreign collection.
+//
+
+// Shard the foreign collection on _id.
+st.shardColl(mongosDB.from, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
+runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
+
+//
+// Test sharded local collection and unsharded foreign collection.
+//
+assert(mongosDB.from.drop());
+
+// Shard the local collection on _id.
+st.shardColl(mongosDB.lookup, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
+runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
+
+//
+// Test sharded local and foreign collections.
+//
+
+// Shard the foreign collection on _id.
+st.shardColl(mongosDB.from, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
+runTest(mongosDB.lookUp, mongosDB.from, mongosDB.thirdColl, mongosDB.fourthColl);
+
+// Test that a $lookup from an unsharded collection followed by a $merge to a sharded collection
+// is allowed.
+const sourceColl = st.getDB(testName).lookUp;
+assert(sourceColl.drop());
+assert(st.adminCommand({shardCollection: sourceColl.getFullName(), key: {_id: "hashed"}}));
+assert.commandWorked(sourceColl.insert({_id: 0, a: 0}));
+
+const outColl = st.getDB(testName).out;
+assert(outColl.drop());
+assert(st.adminCommand({shardCollection: outColl.getFullName(), key: {_id: "hashed"}}));
+
+const fromColl = st.getDB(testName).from;
+assert(fromColl.drop());
+assert.commandWorked(fromColl.insert({_id: 0, b: 0}));
+
+sourceColl.aggregate([
+ {$lookup: {localField: "a", foreignField: "b", from: fromColl.getName(), as: "same"}},
+ {$merge: {into: outColl.getName()}}
+]);
+
+assert.eq([{a: 0, same: [{_id: 0, b: 0}]}], outColl.find({}, {_id: 0}).toArray());
+
+// Disable the server parameter. Be sure that an attempt to run a $lookup on a sharded
+// collection fails.
+setParameterOnAllHosts(nodeList, "internalQueryAllowShardedLookup", false);
+
+// Re shard the foreign collection on _id.
+st.shardColl(mongosDB.from, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
+
+let err = assert.throws(
+ () =>
+ sourceColl
+ .aggregate([{
+ $lookup: {localField: "a", foreignField: "b", from: fromColl.getName(), as: "same"}
+ }])
+ .itcount());
+assert.eq(err.code, 28769);
+err = assert.throws(
+ () => sourceColl
+ .aggregate(
+ [{
+ $lookup:
+ {localField: "a", foreignField: "b", from: fromColl.getName(), as: "same"}
+ }],
+ {allowDiskUse: true})
+ .itcount());
+assert.eq(err.code, 28769);
+err = assert.throws(() => sourceColl
.aggregate(
[
{$_internalSplitPipeline: {mergeType: "anyShard"}},
@@ -664,7 +640,7 @@
],
{allowDiskUse: true})
.itcount());
- assert.eq(err.code, 28769);
+assert.eq(err.code, 28769);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js
index a1bce25ad81..9d71a70e135 100644
--- a/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js
+++ b/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js
@@ -2,114 +2,114 @@
// sharded with a compound shard key.
// @tags: [uses_change_streams]
(function() {
- "use strict";
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- const st = new ShardingTest({
- shards: 2,
- rs: {
- nodes: 1,
- enableMajorityReadConcern: '',
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
- }
- });
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB['coll'];
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Shard the test collection with a compound shard key: a, b, c. Then split it into two chunks,
- // and put one chunk on each shard.
- assert.commandWorked(mongosDB.adminCommand(
- {shardCollection: mongosColl.getFullName(), key: {a: 1, b: 1, c: 1}}));
-
- // Split the collection into 2 chunks:
- // [{a: MinKey, b: MinKey, c: MinKey}, {a: 1, b: MinKey, c: MinKey})
- // and
- // [{a: 1, b: MinKey, c: MinKey}, {a: MaxKey, b: MaxKey, c: MaxKey}).
- assert.commandWorked(mongosDB.adminCommand(
- {split: mongosColl.getFullName(), middle: {a: 1, b: MinKey, c: MinKey}}));
-
- // Move the upper chunk to shard 1.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {a: 1, b: MinKey, c: MinKey},
- to: st.rs1.getURL()
- }));
-
- const changeStreamSingleColl = mongosColl.watch([], {fullDocument: "updateLookup"});
- const changeStreamWholeDb = mongosDB.watch([], {fullDocument: "updateLookup"});
-
- const nDocs = 6;
- const bValues = ["one", "two", "three", "four", "five", "six"];
-
- // This shard key function results in 1/3rd of documents on shard0 and 2/3rds on shard1.
- function shardKeyFromId(id) {
- return {a: id % 3, b: bValues[id], c: id % 2};
+"use strict";
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ shards: 2,
+ rs: {
+ nodes: 1,
+ enableMajorityReadConcern: '',
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
}
-
- // Do some writes.
+});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB['coll'];
+
+assert.commandWorked(mongosDB.dropDatabase());
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Shard the test collection with a compound shard key: a, b, c. Then split it into two chunks,
+// and put one chunk on each shard.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {a: 1, b: 1, c: 1}}));
+
+// Split the collection into 2 chunks:
+// [{a: MinKey, b: MinKey, c: MinKey}, {a: 1, b: MinKey, c: MinKey})
+// and
+// [{a: 1, b: MinKey, c: MinKey}, {a: MaxKey, b: MaxKey, c: MaxKey}).
+assert.commandWorked(
+ mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {a: 1, b: MinKey, c: MinKey}}));
+
+// Move the upper chunk to shard 1.
+assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {a: 1, b: MinKey, c: MinKey},
+ to: st.rs1.getURL()
+}));
+
+const changeStreamSingleColl = mongosColl.watch([], {fullDocument: "updateLookup"});
+const changeStreamWholeDb = mongosDB.watch([], {fullDocument: "updateLookup"});
+
+const nDocs = 6;
+const bValues = ["one", "two", "three", "four", "five", "six"];
+
+// This shard key function results in 1/3rd of documents on shard0 and 2/3rds on shard1.
+function shardKeyFromId(id) {
+ return {a: id % 3, b: bValues[id], c: id % 2};
+}
+
+// Do some writes.
+for (let id = 0; id < nDocs; ++id) {
+ const documentKey = Object.merge({_id: id}, shardKeyFromId(id));
+ assert.writeOK(mongosColl.insert(documentKey));
+ assert.writeOK(mongosColl.update(documentKey, {$set: {updatedCount: 1}}));
+}
+
+[changeStreamSingleColl, changeStreamWholeDb].forEach(function(changeStream) {
+ jsTestLog(`Testing updateLookup on namespace ${changeStream._ns}`);
for (let id = 0; id < nDocs; ++id) {
- const documentKey = Object.merge({_id: id}, shardKeyFromId(id));
- assert.writeOK(mongosColl.insert(documentKey));
- assert.writeOK(mongosColl.update(documentKey, {$set: {updatedCount: 1}}));
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
+
+ assert.soon(() => changeStream.hasNext());
+ next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
+ assert.docEq(next.fullDocument,
+ Object.merge(shardKeyFromId(id), {_id: id, updatedCount: 1}));
}
-
- [changeStreamSingleColl, changeStreamWholeDb].forEach(function(changeStream) {
- jsTestLog(`Testing updateLookup on namespace ${changeStream._ns}`);
- for (let id = 0; id < nDocs; ++id) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
-
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
- assert.docEq(next.fullDocument,
- Object.merge(shardKeyFromId(id), {_id: id, updatedCount: 1}));
- }
- });
-
- // Test that the change stream can still see the updated post image, even if a chunk is
- // migrated.
+});
+
+// Test that the change stream can still see the updated post image, even if a chunk is
+// migrated.
+for (let id = 0; id < nDocs; ++id) {
+ const documentKey = Object.merge({_id: id}, shardKeyFromId(id));
+ assert.writeOK(mongosColl.update(documentKey, {$set: {updatedCount: 2}}));
+}
+
+// Move the upper chunk back to shard 0.
+assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {a: 1, b: MinKey, c: MinKey},
+ to: st.rs0.getURL()
+}));
+
+[changeStreamSingleColl, changeStreamWholeDb].forEach(function(changeStream) {
+ jsTestLog(`Testing updateLookup after moveChunk on namespace ${changeStream._ns}`);
for (let id = 0; id < nDocs; ++id) {
- const documentKey = Object.merge({_id: id}, shardKeyFromId(id));
- assert.writeOK(mongosColl.update(documentKey, {$set: {updatedCount: 2}}));
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
+ assert.docEq(next.fullDocument,
+ Object.merge(shardKeyFromId(id), {_id: id, updatedCount: 2}));
}
+});
- // Move the upper chunk back to shard 0.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {a: 1, b: MinKey, c: MinKey},
- to: st.rs0.getURL()
- }));
-
- [changeStreamSingleColl, changeStreamWholeDb].forEach(function(changeStream) {
- jsTestLog(`Testing updateLookup after moveChunk on namespace ${changeStream._ns}`);
- for (let id = 0; id < nDocs; ++id) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
- assert.docEq(next.fullDocument,
- Object.merge(shardKeyFromId(id), {_id: id, updatedCount: 2}));
- }
- });
-
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js
index f1e9e6da502..058a92c6832 100644
--- a/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js
+++ b/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js
@@ -2,78 +2,75 @@
// sharded with a hashed shard key.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- const st = new ShardingTest({
- shards: 2,
- enableBalancer: false,
- rs: {
- nodes: 1,
- enableMajorityReadConcern: '',
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
- }
- });
+const st = new ShardingTest({
+ shards: 2,
+ enableBalancer: false,
+ rs: {
+ nodes: 1,
+ enableMajorityReadConcern: '',
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
+ }
+});
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB['coll'];
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB['coll'];
- assert.commandWorked(mongosDB.dropDatabase());
+assert.commandWorked(mongosDB.dropDatabase());
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- // Shard the test collection on the field "shardKey", and split it into two chunks.
- assert.commandWorked(mongosDB.adminCommand({
- shardCollection: mongosColl.getFullName(),
- numInitialChunks: 2,
- key: {shardKey: "hashed"}
- }));
+// Shard the test collection on the field "shardKey", and split it into two chunks.
+assert.commandWorked(mongosDB.adminCommand(
+ {shardCollection: mongosColl.getFullName(), numInitialChunks: 2, key: {shardKey: "hashed"}}));
- // Make sure the negative chunk is on shard 0.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosColl.getFullName(),
- bounds: [{shardKey: MinKey}, {shardKey: NumberLong("0")}],
- to: st.rs0.getURL()
- }));
+// Make sure the negative chunk is on shard 0.
+assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ bounds: [{shardKey: MinKey}, {shardKey: NumberLong("0")}],
+ to: st.rs0.getURL()
+}));
- // Make sure the positive chunk is on shard 1.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosColl.getFullName(),
- bounds: [{shardKey: NumberLong("0")}, {shardKey: MaxKey}],
- to: st.rs1.getURL()
- }));
+// Make sure the positive chunk is on shard 1.
+assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ bounds: [{shardKey: NumberLong("0")}, {shardKey: MaxKey}],
+ to: st.rs1.getURL()
+}));
- const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
+const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
- // Write enough documents that we likely have some on each shard.
- const nDocs = 1000;
- for (let id = 0; id < nDocs; ++id) {
- assert.writeOK(mongosColl.insert({_id: id, shardKey: id}));
- assert.writeOK(mongosColl.update({shardKey: id}, {$set: {updatedCount: 1}}));
- }
+// Write enough documents that we likely have some on each shard.
+const nDocs = 1000;
+for (let id = 0; id < nDocs; ++id) {
+ assert.writeOK(mongosColl.insert({_id: id, shardKey: id}));
+ assert.writeOK(mongosColl.update({shardKey: id}, {$set: {updatedCount: 1}}));
+}
- for (let id = 0; id < nDocs; ++id) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.documentKey, {shardKey: id, _id: id});
+for (let id = 0; id < nDocs; ++id) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.documentKey, {shardKey: id, _id: id});
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, {shardKey: id, _id: id});
- assert.docEq(next.fullDocument, {_id: id, shardKey: id, updatedCount: 1});
- }
+ assert.soon(() => changeStream.hasNext());
+ next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, {shardKey: id, _id: id});
+ assert.docEq(next.fullDocument, {_id: id, shardKey: id, updatedCount: 1});
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js
index 843dda1c524..f6235d1082c 100644
--- a/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js
+++ b/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js
@@ -2,94 +2,92 @@
// sharded with a key which is just the "_id" field.
// @tags: [uses_change_streams]
(function() {
- "use strict";
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
+"use strict";
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const st = new ShardingTest({
+ shards: 2,
+ rs: {
+ nodes: 1,
+ enableMajorityReadConcern: '',
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
}
-
- const st = new ShardingTest({
- shards: 2,
- rs: {
- nodes: 1,
- enableMajorityReadConcern: '',
- // Use a higher frequency for periodic noops to speed up the test.
- setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
- }
- });
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB['coll'];
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
-
- // Move the [0, MaxKey) chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
-
- // Write a document to each chunk.
- assert.writeOK(mongosColl.insert({_id: -1}));
- assert.writeOK(mongosColl.insert({_id: 1}));
-
- const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
-
- // Do some writes.
- assert.writeOK(mongosColl.insert({_id: 1000}));
- assert.writeOK(mongosColl.insert({_id: -1000}));
- assert.writeOK(mongosColl.update({_id: 1000}, {$set: {updatedCount: 1}}));
- assert.writeOK(mongosColl.update({_id: -1000}, {$set: {updatedCount: 1}}));
-
- for (let nextId of[1000, -1000]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.documentKey, {_id: nextId});
- }
-
- for (let nextId of[1000, -1000]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "update");
- // Only the "_id" field is present in next.documentKey because the shard key is the _id.
- assert.eq(next.documentKey, {_id: nextId});
- assert.docEq(next.fullDocument, {_id: nextId, updatedCount: 1});
- }
-
- // Test that the change stream can still see the updated post image, even if a chunk is
- // migrated.
- assert.writeOK(mongosColl.update({_id: 1000}, {$set: {updatedCount: 2}}));
- assert.writeOK(mongosColl.update({_id: -1000}, {$set: {updatedCount: 2}}));
-
- // Split the [0, MaxKey) chunk into 2: [0, 500), [500, MaxKey).
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 500}}));
- // Move the [500, MaxKey) chunk back to st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1000}, to: st.rs0.getURL()}));
-
- for (let nextId of[1000, -1000]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, {_id: nextId});
- assert.docEq(next.fullDocument, {_id: nextId, updatedCount: 2});
- }
-
- st.stop();
+});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB['coll'];
+
+assert.commandWorked(mongosDB.dropDatabase());
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Shard the test collection on _id.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+
+// Move the [0, MaxKey) chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+
+// Write a document to each chunk.
+assert.writeOK(mongosColl.insert({_id: -1}));
+assert.writeOK(mongosColl.insert({_id: 1}));
+
+const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
+
+// Do some writes.
+assert.writeOK(mongosColl.insert({_id: 1000}));
+assert.writeOK(mongosColl.insert({_id: -1000}));
+assert.writeOK(mongosColl.update({_id: 1000}, {$set: {updatedCount: 1}}));
+assert.writeOK(mongosColl.update({_id: -1000}, {$set: {updatedCount: 1}}));
+
+for (let nextId of [1000, -1000]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.documentKey, {_id: nextId});
+}
+
+for (let nextId of [1000, -1000]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ // Only the "_id" field is present in next.documentKey because the shard key is the _id.
+ assert.eq(next.documentKey, {_id: nextId});
+ assert.docEq(next.fullDocument, {_id: nextId, updatedCount: 1});
+}
+
+// Test that the change stream can still see the updated post image, even if a chunk is
+// migrated.
+assert.writeOK(mongosColl.update({_id: 1000}, {$set: {updatedCount: 2}}));
+assert.writeOK(mongosColl.update({_id: -1000}, {$set: {updatedCount: 2}}));
+
+// Split the [0, MaxKey) chunk into 2: [0, 500), [500, MaxKey).
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 500}}));
+// Move the [500, MaxKey) chunk back to st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1000}, to: st.rs0.getURL()}));
+
+for (let nextId of [1000, -1000]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, {_id: nextId});
+ assert.docEq(next.fullDocument, {_id: nextId, updatedCount: 2});
+}
+
+st.stop();
})();
diff --git a/jstests/sharding/lookup_mongod_unaware.js b/jstests/sharding/lookup_mongod_unaware.js
index 2a363eb1ce2..2750425205e 100644
--- a/jstests/sharding/lookup_mongod_unaware.js
+++ b/jstests/sharding/lookup_mongod_unaware.js
@@ -6,182 +6,179 @@
// expect it to still have all the previous data.
// @tags: [requires_persistence]
(function() {
- "use strict";
-
- load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
- load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
-
- // Restarts the primary shard and ensures that it believes both collections are unsharded.
- function restartPrimaryShard(rs, localColl, foreignColl) {
- // Returns true if the shard is aware that the collection is sharded.
- function hasRoutingInfoForNs(shardConn, coll) {
- const res = shardConn.adminCommand({getShardVersion: coll, fullMetadata: true});
- assert.commandWorked(res);
- return res.metadata.collVersion != undefined;
- }
-
- rs.restart(0);
- rs.awaitSecondaryNodes();
- assert(!hasRoutingInfoForNs(rs.getPrimary(), localColl.getFullName()));
- assert(!hasRoutingInfoForNs(rs.getPrimary(), foreignColl.getFullName()));
-
- // Reset the server parameter allowing sharded $lookup on each node.
- setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(rs.getPrimary()),
- "internalQueryAllowShardedLookup",
- true);
+"use strict";
+
+load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
+load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
+
+// Restarts the primary shard and ensures that it believes both collections are unsharded.
+function restartPrimaryShard(rs, localColl, foreignColl) {
+ // Returns true if the shard is aware that the collection is sharded.
+ function hasRoutingInfoForNs(shardConn, coll) {
+ const res = shardConn.adminCommand({getShardVersion: coll, fullMetadata: true});
+ assert.commandWorked(res);
+ return res.metadata.collVersion != undefined;
}
- const testName = "lookup_stale_mongod";
- const st = new ShardingTest({
- shards: 2,
- mongos: 2,
- rs: {nodes: 1},
- });
+ rs.restart(0);
+ rs.awaitSecondaryNodes();
+ assert(!hasRoutingInfoForNs(rs.getPrimary(), localColl.getFullName()));
+ assert(!hasRoutingInfoForNs(rs.getPrimary(), foreignColl.getFullName()));
- // Set the parameter allowing sharded $lookup on all nodes.
- setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(st.s0).concat([st.s1.host]),
+ // Reset the server parameter allowing sharded $lookup on each node.
+ setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(rs.getPrimary()),
"internalQueryAllowShardedLookup",
true);
+}
+
+const testName = "lookup_stale_mongod";
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ rs: {nodes: 1},
+});
+
+// Set the parameter allowing sharded $lookup on all nodes.
+setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(st.s0).concat([st.s1.host]),
+ "internalQueryAllowShardedLookup",
+ true);
+
+const mongos0DB = st.s0.getDB(testName);
+const mongos0LocalColl = mongos0DB[testName + "_local"];
+const mongos0ForeignColl = mongos0DB[testName + "_foreign"];
+
+const mongos1DB = st.s1.getDB(testName);
+const mongos1LocalColl = mongos1DB[testName + "_local"];
+const mongos1ForeignColl = mongos1DB[testName + "_foreign"];
+
+const pipeline = [
+ {$lookup: {localField: "a", foreignField: "b", from: mongos0ForeignColl.getName(), as: "same"}},
+ // Unwind the results of the $lookup, so we can sort by them to get a consistent ordering
+ // for the query results.
+ {$unwind: "$same"},
+ {$sort: {_id: 1, "same._id": 1}}
+];
+
+// The results are expected to be correct if the $lookup stage is executed on the mongos which
+// is aware that the collection is sharded.
+const expectedResults = [
+ {_id: 0, a: 1, "same": {_id: 0, b: 1}},
+ {_id: 1, a: null, "same": {_id: 1, b: null}},
+ {_id: 1, a: null, "same": {_id: 2}},
+ {_id: 2, "same": {_id: 1, b: null}},
+ {_id: 2, "same": {_id: 2}}
+];
+
+// Ensure that shard0 is the primary shard.
+assert.commandWorked(mongos0DB.adminCommand({enableSharding: mongos0DB.getName()}));
+st.ensurePrimaryShard(mongos0DB.getName(), st.shard0.shardName);
+
+assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
+assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
+
+assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
+
+// Send writes through mongos1 such that it's aware of the collections and believes they are
+// unsharded.
+assert.writeOK(mongos1LocalColl.insert({_id: 2}));
+assert.writeOK(mongos1ForeignColl.insert({_id: 2}));
- const mongos0DB = st.s0.getDB(testName);
- const mongos0LocalColl = mongos0DB[testName + "_local"];
- const mongos0ForeignColl = mongos0DB[testName + "_foreign"];
-
- const mongos1DB = st.s1.getDB(testName);
- const mongos1LocalColl = mongos1DB[testName + "_local"];
- const mongos1ForeignColl = mongos1DB[testName + "_foreign"];
-
- const pipeline = [
- {
- $lookup:
- {localField: "a", foreignField: "b", from: mongos0ForeignColl.getName(), as: "same"}
- },
- // Unwind the results of the $lookup, so we can sort by them to get a consistent ordering
- // for the query results.
- {$unwind: "$same"},
- {$sort: {_id: 1, "same._id": 1}}
- ];
-
- // The results are expected to be correct if the $lookup stage is executed on the mongos which
- // is aware that the collection is sharded.
- const expectedResults = [
- {_id: 0, a: 1, "same": {_id: 0, b: 1}},
- {_id: 1, a: null, "same": {_id: 1, b: null}},
- {_id: 1, a: null, "same": {_id: 2}},
- {_id: 2, "same": {_id: 1, b: null}},
- {_id: 2, "same": {_id: 2}}
- ];
-
- // Ensure that shard0 is the primary shard.
- assert.commandWorked(mongos0DB.adminCommand({enableSharding: mongos0DB.getName()}));
- st.ensurePrimaryShard(mongos0DB.getName(), st.shard0.shardName);
-
- assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
- assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
-
- assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
- assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
-
- // Send writes through mongos1 such that it's aware of the collections and believes they are
- // unsharded.
- assert.writeOK(mongos1LocalColl.insert({_id: 2}));
- assert.writeOK(mongos1ForeignColl.insert({_id: 2}));
-
- //
- // Test unsharded local and sharded foreign collections, with the primary shard unaware that
- // the foreign collection is sharded.
- //
-
- // Shard the foreign collection.
- assert.commandWorked(
- mongos0DB.adminCommand({shardCollection: mongos0ForeignColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
- assert.commandWorked(
- mongos0DB.adminCommand({split: mongos0ForeignColl.getFullName(), middle: {_id: 1}}));
-
- // Move the [minKey, 1) chunk to shard1.
- assert.commandWorked(mongos0DB.adminCommand({
- moveChunk: mongos0ForeignColl.getFullName(),
- find: {_id: 0},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Verify $lookup results through the fresh mongos.
- restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
- assert.eq(mongos0LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- // Verify $lookup results through mongos1, which is not aware that the foreign collection is
- // sharded. In this case the results will be correct since the entire pipeline will be run on a
- // shard, which will do a refresh before executing the foreign pipeline.
- restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
- assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- //
- // Test sharded local and sharded foreign collections, with the primary shard unaware that
- // either collection is sharded.
- //
-
- // Shard the local collection.
- assert.commandWorked(
- mongos0DB.adminCommand({shardCollection: mongos0LocalColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
- assert.commandWorked(
- mongos0DB.adminCommand({split: mongos0LocalColl.getFullName(), middle: {_id: 1}}));
-
- // Move the [minKey, 1) chunk to shard1.
- assert.commandWorked(mongos0DB.adminCommand({
- moveChunk: mongos0LocalColl.getFullName(),
- find: {_id: 0},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Verify $lookup results through the fresh mongos.
- restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
- assert.eq(mongos0LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- // Verify $lookup results through mongos1, which is not aware that the local
- // collection is sharded. The results are expected to be incorrect when both the mongos and
- // primary shard incorrectly believe that a collection is unsharded.
- // TODO: This should be fixed by SERVER-32629, likewise for the other aggregates in this file
- // sent to the stale mongos.
- restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
- assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), [
- {_id: 1, a: null, "same": {_id: 1, b: null}},
- {_id: 1, a: null, "same": {_id: 2}},
-
- {_id: 2, "same": {_id: 1, b: null}},
- {_id: 2, "same": {_id: 2}}
- ]);
-
- //
- // Test sharded local and unsharded foreign collections, with the primary shard unaware that
- // the local collection is sharded.
- //
-
- // Recreate the foreign collection as unsharded.
- mongos0ForeignColl.drop();
- assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
- assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
- assert.writeOK(mongos0ForeignColl.insert({_id: 2}));
-
- // Verify $lookup results through the fresh mongos.
- restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
- assert.eq(mongos0LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- // Verify $lookup results through mongos1, which is not aware that the local
- // collection is sharded. The results are expected to be incorrect when both the mongos and
- // primary shard incorrectly believe that a collection is unsharded.
- restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
- assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), [
- {_id: 1, a: null, "same": {_id: 1, b: null}},
- {_id: 1, a: null, "same": {_id: 2}},
- {_id: 2, "same": {_id: 1, b: null}},
- {_id: 2, "same": {_id: 2}}
- ]);
-
- st.stop();
+//
+// Test unsharded local and sharded foreign collections, with the primary shard unaware that
+// the foreign collection is sharded.
+//
+
+// Shard the foreign collection.
+assert.commandWorked(
+ mongos0DB.adminCommand({shardCollection: mongos0ForeignColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
+assert.commandWorked(
+ mongos0DB.adminCommand({split: mongos0ForeignColl.getFullName(), middle: {_id: 1}}));
+
+// Move the [minKey, 1) chunk to shard1.
+assert.commandWorked(mongos0DB.adminCommand({
+ moveChunk: mongos0ForeignColl.getFullName(),
+ find: {_id: 0},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Verify $lookup results through the fresh mongos.
+restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
+assert.eq(mongos0LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+// Verify $lookup results through mongos1, which is not aware that the foreign collection is
+// sharded. In this case the results will be correct since the entire pipeline will be run on a
+// shard, which will do a refresh before executing the foreign pipeline.
+restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
+assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+//
+// Test sharded local and sharded foreign collections, with the primary shard unaware that
+// either collection is sharded.
+//
+
+// Shard the local collection.
+assert.commandWorked(
+ mongos0DB.adminCommand({shardCollection: mongos0LocalColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
+assert.commandWorked(
+ mongos0DB.adminCommand({split: mongos0LocalColl.getFullName(), middle: {_id: 1}}));
+
+// Move the [minKey, 1) chunk to shard1.
+assert.commandWorked(mongos0DB.adminCommand({
+ moveChunk: mongos0LocalColl.getFullName(),
+ find: {_id: 0},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Verify $lookup results through the fresh mongos.
+restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
+assert.eq(mongos0LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+// Verify $lookup results through mongos1, which is not aware that the local
+// collection is sharded. The results are expected to be incorrect when both the mongos and
+// primary shard incorrectly believe that a collection is unsharded.
+// TODO: This should be fixed by SERVER-32629, likewise for the other aggregates in this file
+// sent to the stale mongos.
+restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
+assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), [
+ {_id: 1, a: null, "same": {_id: 1, b: null}},
+ {_id: 1, a: null, "same": {_id: 2}},
+
+ {_id: 2, "same": {_id: 1, b: null}},
+ {_id: 2, "same": {_id: 2}}
+]);
+
+//
+// Test sharded local and unsharded foreign collections, with the primary shard unaware that
+// the local collection is sharded.
+//
+
+// Recreate the foreign collection as unsharded.
+mongos0ForeignColl.drop();
+assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
+assert.writeOK(mongos0ForeignColl.insert({_id: 2}));
+
+// Verify $lookup results through the fresh mongos.
+restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
+assert.eq(mongos0LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+// Verify $lookup results through mongos1, which is not aware that the local
+// collection is sharded. The results are expected to be incorrect when both the mongos and
+// primary shard incorrectly believe that a collection is unsharded.
+restartPrimaryShard(st.rs0, mongos0LocalColl, mongos0ForeignColl);
+assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), [
+ {_id: 1, a: null, "same": {_id: 1, b: null}},
+ {_id: 1, a: null, "same": {_id: 2}},
+ {_id: 2, "same": {_id: 1, b: null}},
+ {_id: 2, "same": {_id: 2}}
+]);
+
+st.stop();
})();
diff --git a/jstests/sharding/lookup_on_shard.js b/jstests/sharding/lookup_on_shard.js
index cf6104dcaf1..2dc96378fab 100644
--- a/jstests/sharding/lookup_on_shard.js
+++ b/jstests/sharding/lookup_on_shard.js
@@ -1,39 +1,39 @@
// Test that a pipeline with a $lookup stage on a sharded foreign collection may be run on a mongod.
(function() {
- load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
- load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
+load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
+load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
- const sharded = new ShardingTest({mongos: 1, shards: 2});
+const sharded = new ShardingTest({mongos: 1, shards: 2});
- setParameterOnAllHosts(
- DiscoverTopology.findNonConfigNodes(sharded.s), "internalQueryAllowShardedLookup", true);
+setParameterOnAllHosts(
+ DiscoverTopology.findNonConfigNodes(sharded.s), "internalQueryAllowShardedLookup", true);
- assert.commandWorked(sharded.s.adminCommand({enableSharding: "test"}));
- sharded.ensurePrimaryShard('test', sharded.shard0.shardName);
+assert.commandWorked(sharded.s.adminCommand({enableSharding: "test"}));
+sharded.ensurePrimaryShard('test', sharded.shard0.shardName);
- const coll = sharded.s.getDB('test').mainColl;
- const foreignColl = sharded.s.getDB('test').foreignColl;
- const smallColl = sharded.s.getDB("test").smallColl;
+const coll = sharded.s.getDB('test').mainColl;
+const foreignColl = sharded.s.getDB('test').foreignColl;
+const smallColl = sharded.s.getDB("test").smallColl;
- const nDocsMainColl = 10;
- const nDocsForeignColl = 2 * nDocsMainColl;
+const nDocsMainColl = 10;
+const nDocsForeignColl = 2 * nDocsMainColl;
- for (let i = 0; i < nDocsMainColl; i++) {
- assert.commandWorked(coll.insert({_id: i, collName: "mainColl", foreignId: i}));
+for (let i = 0; i < nDocsMainColl; i++) {
+ assert.commandWorked(coll.insert({_id: i, collName: "mainColl", foreignId: i}));
- assert.commandWorked(
- foreignColl.insert({_id: 2 * i, key: i, collName: "foreignColl", data: "hello-0"}));
- assert.commandWorked(
- foreignColl.insert({_id: 2 * i + 1, key: i, collName: "foreignColl", data: "hello-1"}));
- }
- assert.commandWorked(smallColl.insert({_id: 0, collName: "smallColl"}));
+ assert.commandWorked(
+ foreignColl.insert({_id: 2 * i, key: i, collName: "foreignColl", data: "hello-0"}));
+ assert.commandWorked(
+ foreignColl.insert({_id: 2 * i + 1, key: i, collName: "foreignColl", data: "hello-1"}));
+}
+assert.commandWorked(smallColl.insert({_id: 0, collName: "smallColl"}));
- const runTest = function() {
- (function testSingleLookupFromShard() {
- // Run a pipeline which must be merged on a shard. This should force the $lookup (on
- // the sharded collection) to be run on a mongod.
- pipeline = [
+const runTest = function() {
+ (function testSingleLookupFromShard() {
+ // Run a pipeline which must be merged on a shard. This should force the $lookup (on
+ // the sharded collection) to be run on a mongod.
+ pipeline = [
{
$lookup: {
localField: "foreignId",
@@ -45,16 +45,16 @@
{$_internalSplitPipeline: {mergeType: "anyShard"}}
];
- const results = coll.aggregate(pipeline).toArray();
- assert.eq(results.length, nDocsMainColl);
- for (let i = 0; i < results.length; i++) {
- assert.eq(results[i].foreignDoc.length, 2, results[i]);
- }
- })();
+ const results = coll.aggregate(pipeline).toArray();
+ assert.eq(results.length, nDocsMainColl);
+ for (let i = 0; i < results.length; i++) {
+ assert.eq(results[i].foreignDoc.length, 2, results[i]);
+ }
+ })();
- (function testMultipleLookupsFromShard() {
- // Run two lookups in a row (both on mongod).
- pipeline = [
+ (function testMultipleLookupsFromShard() {
+ // Run two lookups in a row (both on mongod).
+ pipeline = [
{
$lookup: {
localField: "foreignId",
@@ -72,17 +72,17 @@
},
{$_internalSplitPipeline: {mergeType: "anyShard"}}
];
- const results = coll.aggregate(pipeline).toArray();
- assert.eq(results.length, nDocsMainColl);
- for (let i = 0; i < results.length; i++) {
- assert.eq(results[i].foreignDoc.length, 2, results[i]);
- assert.eq(results[i].smallCollDocs.length, 1, results[i]);
- }
- })();
-
- (function testUnshardedLookupWithinShardedLookup() {
- // Pipeline with unsharded $lookup inside a sharded $lookup.
- pipeline = [
+ const results = coll.aggregate(pipeline).toArray();
+ assert.eq(results.length, nDocsMainColl);
+ for (let i = 0; i < results.length; i++) {
+ assert.eq(results[i].foreignDoc.length, 2, results[i]);
+ assert.eq(results[i].smallCollDocs.length, 1, results[i]);
+ }
+ })();
+
+ (function testUnshardedLookupWithinShardedLookup() {
+ // Pipeline with unsharded $lookup inside a sharded $lookup.
+ pipeline = [
{
$lookup: {
from: "foreignColl",
@@ -94,55 +94,57 @@
},
{$_internalSplitPipeline: {mergeType: "anyShard"}}
];
- const results = coll.aggregate(pipeline).toArray();
-
- assert.eq(results.length, nDocsMainColl);
- for (let i = 0; i < results.length; i++) {
- assert.eq(results[i].foreignDoc.length, nDocsForeignColl);
- for (let j = 0; j < nDocsForeignColl; j++) {
- // Each document pulled from the foreign collection should have one document
- // from "smallColl."
- assert.eq(results[i].foreignDoc[j].collName, "foreignColl");
-
- // TODO SERVER-39016: Once a mongod is able to target the primary shard when
- // reading from a non-sharded collection this should always work. Until then,
- // the results of the query depend on which shard is chosen as the merging
- // shard. If the primary shard is chosen, we'll get the correct results (and
- // correctly find a document in "smallColl"). Otherwise if the merging shard is
- // not the primary shard, the merging shard will attempt to do a local read (on
- // an empty/non-existent collection), which will return nothing.
- if (results[i].foreignDoc[j].doc.length === 1) {
- assert.eq(results[i].foreignDoc[j].doc[0].collName, "smallColl");
- } else {
- assert.eq(results[i].foreignDoc[j].doc.length, 0);
- }
+ const results = coll.aggregate(pipeline).toArray();
+
+ assert.eq(results.length, nDocsMainColl);
+ for (let i = 0; i < results.length; i++) {
+ assert.eq(results[i].foreignDoc.length, nDocsForeignColl);
+ for (let j = 0; j < nDocsForeignColl; j++) {
+ // Each document pulled from the foreign collection should have one document
+ // from "smallColl."
+ assert.eq(results[i].foreignDoc[j].collName, "foreignColl");
+
+ // TODO SERVER-39016: Once a mongod is able to target the primary shard when
+ // reading from a non-sharded collection this should always work. Until then,
+ // the results of the query depend on which shard is chosen as the merging
+ // shard. If the primary shard is chosen, we'll get the correct results (and
+ // correctly find a document in "smallColl"). Otherwise if the merging shard is
+ // not the primary shard, the merging shard will attempt to do a local read (on
+ // an empty/non-existent collection), which will return nothing.
+ if (results[i].foreignDoc[j].doc.length === 1) {
+ assert.eq(results[i].foreignDoc[j].doc[0].collName, "smallColl");
+ } else {
+ assert.eq(results[i].foreignDoc[j].doc.length, 0);
}
}
- })();
- };
-
- jsTestLog("Running test with neither collection sharded");
- runTest();
-
- jsTestLog("Running test with foreign collection sharded");
- sharded.shardColl("foreignColl",
- {_id: 1}, // shard key
- {_id: 5}, // split
- {_id: 5}, // move
- "test", // dbName
- true // waitForDelete
- );
- runTest();
-
- jsTestLog("Running test with main and foreign collection sharded");
- sharded.shardColl("mainColl",
- {_id: 1}, // shard key
- {_id: 5}, // split
- {_id: 5}, // move
- "test", // dbName
- true // waitForDelete
- );
- runTest();
-
- sharded.stop();
+ }
+ })();
+};
+
+jsTestLog("Running test with neither collection sharded");
+runTest();
+
+jsTestLog("Running test with foreign collection sharded");
+sharded.shardColl(
+ "foreignColl",
+ {_id: 1}, // shard key
+ {_id: 5}, // split
+ {_id: 5}, // move
+ "test", // dbName
+ true // waitForDelete
+);
+runTest();
+
+jsTestLog("Running test with main and foreign collection sharded");
+sharded.shardColl(
+ "mainColl",
+ {_id: 1}, // shard key
+ {_id: 5}, // split
+ {_id: 5}, // move
+ "test", // dbName
+ true // waitForDelete
+);
+runTest();
+
+sharded.stop();
})();
diff --git a/jstests/sharding/lookup_stale_mongos.js b/jstests/sharding/lookup_stale_mongos.js
index 2b346c8c0c0..f1e71280a18 100644
--- a/jstests/sharding/lookup_stale_mongos.js
+++ b/jstests/sharding/lookup_stale_mongos.js
@@ -3,134 +3,131 @@
// when it's not, and likewise when mongos thinks the collection is unsharded but is actually
// sharded.
(function() {
- "use strict";
-
- load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
- load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
-
- const testName = "lookup_stale_mongos";
- const st = new ShardingTest({
- shards: 2,
- mongos: 2,
- });
- setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(st.s0).concat([st.s1.host]),
- "internalQueryAllowShardedLookup",
- true);
-
- const mongos0DB = st.s0.getDB(testName);
- assert.commandWorked(mongos0DB.dropDatabase());
- const mongos0LocalColl = mongos0DB[testName + "_local"];
- const mongos0ForeignColl = mongos0DB[testName + "_foreign"];
-
- const mongos1DB = st.s1.getDB(testName);
- const mongos1LocalColl = mongos1DB[testName + "_local"];
- const mongos1ForeignColl = mongos1DB[testName + "_foreign"];
-
- const pipeline = [
- {
- $lookup:
- {localField: "a", foreignField: "b", from: mongos1ForeignColl.getName(), as: "same"}
- },
- {$sort: {_id: 1}}
- ];
- const expectedResults = [
- {_id: 0, a: 1, "same": [{_id: 0, b: 1}]},
- {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
- {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
- ];
-
- // Ensure that shard0 is the primary shard.
- assert.commandWorked(mongos0DB.adminCommand({enableSharding: mongos0DB.getName()}));
- st.ensurePrimaryShard(mongos0DB.getName(), st.shard0.shardName);
-
- assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
- assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
-
- assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
- assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
-
- // Send writes through mongos1 such that it's aware of the collections and believes they are
- // unsharded.
- assert.writeOK(mongos1LocalColl.insert({_id: 2}));
- assert.writeOK(mongos1ForeignColl.insert({_id: 2}));
-
- //
- // Test unsharded local and sharded foreign collections, with mongos unaware that the foreign
- // collection is sharded.
- //
-
- // Shard the foreign collection through mongos0.
- assert.commandWorked(
- mongos0DB.adminCommand({shardCollection: mongos0ForeignColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
- assert.commandWorked(
- mongos0DB.adminCommand({split: mongos0ForeignColl.getFullName(), middle: {_id: 1}}));
-
- // Move the [minKey, 1) chunk to shard1.
- assert.commandWorked(mongos0DB.adminCommand({
- moveChunk: mongos0ForeignColl.getFullName(),
- find: {_id: 0},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Issue a $lookup through mongos1, which is unaware that the foreign collection is sharded.
- assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- //
- // Test sharded local and sharded foreign collections, with mongos unaware that the local
- // collection is sharded.
- //
-
- // Shard the local collection through mongos0.
- assert.commandWorked(
- mongos0DB.adminCommand({shardCollection: mongos0LocalColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
- assert.commandWorked(
- mongos0DB.adminCommand({split: mongos0LocalColl.getFullName(), middle: {_id: 1}}));
-
- // Move the [minKey, 1) chunk to shard1.
- assert.commandWorked(mongos0DB.adminCommand({
- moveChunk: mongos0LocalColl.getFullName(),
- find: {_id: 0},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Issue a $lookup through mongos1, which is unaware that the local collection is sharded.
- assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- //
- // Test sharded local and unsharded foreign collections, with mongos unaware that the foreign
- // collection is unsharded.
- //
-
- // Recreate the foreign collection as unsharded through mongos0.
- mongos0ForeignColl.drop();
- assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
- assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
- assert.writeOK(mongos0ForeignColl.insert({_id: 2}));
-
- // Issue a $lookup through mongos1, which is unaware that the foreign collection is now
- // unsharded.
- assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- //
- // Test unsharded local and foreign collections, with mongos unaware that the local
- // collection is unsharded.
- //
-
- // Recreate the local collection as unsharded through mongos0.
- mongos0LocalColl.drop();
- assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
- assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
- assert.writeOK(mongos0LocalColl.insert({_id: 2}));
-
- // Issue a $lookup through mongos1, which is unaware that the local collection is now
- // unsharded.
- assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
-
- st.stop();
+"use strict";
+
+load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
+load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
+
+const testName = "lookup_stale_mongos";
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+});
+setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(st.s0).concat([st.s1.host]),
+ "internalQueryAllowShardedLookup",
+ true);
+
+const mongos0DB = st.s0.getDB(testName);
+assert.commandWorked(mongos0DB.dropDatabase());
+const mongos0LocalColl = mongos0DB[testName + "_local"];
+const mongos0ForeignColl = mongos0DB[testName + "_foreign"];
+
+const mongos1DB = st.s1.getDB(testName);
+const mongos1LocalColl = mongos1DB[testName + "_local"];
+const mongos1ForeignColl = mongos1DB[testName + "_foreign"];
+
+const pipeline = [
+ {$lookup: {localField: "a", foreignField: "b", from: mongos1ForeignColl.getName(), as: "same"}},
+ {$sort: {_id: 1}}
+];
+const expectedResults = [
+ {_id: 0, a: 1, "same": [{_id: 0, b: 1}]},
+ {_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
+ {_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
+];
+
+// Ensure that shard0 is the primary shard.
+assert.commandWorked(mongos0DB.adminCommand({enableSharding: mongos0DB.getName()}));
+st.ensurePrimaryShard(mongos0DB.getName(), st.shard0.shardName);
+
+assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
+assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
+
+assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
+
+// Send writes through mongos1 such that it's aware of the collections and believes they are
+// unsharded.
+assert.writeOK(mongos1LocalColl.insert({_id: 2}));
+assert.writeOK(mongos1ForeignColl.insert({_id: 2}));
+
+//
+// Test unsharded local and sharded foreign collections, with mongos unaware that the foreign
+// collection is sharded.
+//
+
+// Shard the foreign collection through mongos0.
+assert.commandWorked(
+ mongos0DB.adminCommand({shardCollection: mongos0ForeignColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
+assert.commandWorked(
+ mongos0DB.adminCommand({split: mongos0ForeignColl.getFullName(), middle: {_id: 1}}));
+
+// Move the [minKey, 1) chunk to shard1.
+assert.commandWorked(mongos0DB.adminCommand({
+ moveChunk: mongos0ForeignColl.getFullName(),
+ find: {_id: 0},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Issue a $lookup through mongos1, which is unaware that the foreign collection is sharded.
+assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+//
+// Test sharded local and sharded foreign collections, with mongos unaware that the local
+// collection is sharded.
+//
+
+// Shard the local collection through mongos0.
+assert.commandWorked(
+ mongos0DB.adminCommand({shardCollection: mongos0LocalColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 1), [1, MaxKey).
+assert.commandWorked(
+ mongos0DB.adminCommand({split: mongos0LocalColl.getFullName(), middle: {_id: 1}}));
+
+// Move the [minKey, 1) chunk to shard1.
+assert.commandWorked(mongos0DB.adminCommand({
+ moveChunk: mongos0LocalColl.getFullName(),
+ find: {_id: 0},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+// Issue a $lookup through mongos1, which is unaware that the local collection is sharded.
+assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+//
+// Test sharded local and unsharded foreign collections, with mongos unaware that the foreign
+// collection is unsharded.
+//
+
+// Recreate the foreign collection as unsharded through mongos0.
+mongos0ForeignColl.drop();
+assert.writeOK(mongos0ForeignColl.insert({_id: 0, b: 1}));
+assert.writeOK(mongos0ForeignColl.insert({_id: 1, b: null}));
+assert.writeOK(mongos0ForeignColl.insert({_id: 2}));
+
+// Issue a $lookup through mongos1, which is unaware that the foreign collection is now
+// unsharded.
+assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+//
+// Test unsharded local and foreign collections, with mongos unaware that the local
+// collection is unsharded.
+//
+
+// Recreate the local collection as unsharded through mongos0.
+mongos0LocalColl.drop();
+assert.writeOK(mongos0LocalColl.insert({_id: 0, a: 1}));
+assert.writeOK(mongos0LocalColl.insert({_id: 1, a: null}));
+assert.writeOK(mongos0LocalColl.insert({_id: 2}));
+
+// Issue a $lookup through mongos1, which is unaware that the local collection is now
+// unsharded.
+assert.eq(mongos1LocalColl.aggregate(pipeline).toArray(), expectedResults);
+
+st.stop();
})();
diff --git a/jstests/sharding/major_version_check.js b/jstests/sharding/major_version_check.js
index 1b4e1906379..eb6eccc1f1e 100644
--- a/jstests/sharding/major_version_check.js
+++ b/jstests/sharding/major_version_check.js
@@ -2,52 +2,51 @@
// Tests that only a correct major-version is needed to connect to a shard via mongos
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 1, mongos: 2});
+var st = new ShardingTest({shards: 1, mongos: 2});
- var mongos = st.s0;
- var staleMongos = st.s1;
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
- var coll = mongos.getCollection("foo.bar");
+var mongos = st.s0;
+var staleMongos = st.s1;
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var coll = mongos.getCollection("foo.bar");
- // Shard collection
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+// Shard collection
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- // Make sure our stale mongos is up-to-date with no splits
- staleMongos.getCollection(coll + "").findOne();
+// Make sure our stale mongos is up-to-date with no splits
+staleMongos.getCollection(coll + "").findOne();
- // Run one split
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+// Run one split
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
- // Make sure our stale mongos is not up-to-date with the split
- printjson(admin.runCommand({getShardVersion: coll + ""}));
- printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+// Make sure our stale mongos is not up-to-date with the split
+printjson(admin.runCommand({getShardVersion: coll + ""}));
+printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
- // Compare strings b/c timestamp comparison is a bit weird
- assert.eq(Timestamp(1, 2), admin.runCommand({getShardVersion: coll + ""}).version);
- assert.eq(Timestamp(1, 0),
- staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+// Compare strings b/c timestamp comparison is a bit weird
+assert.eq(Timestamp(1, 2), admin.runCommand({getShardVersion: coll + ""}).version);
+assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
- // See if our stale mongos is required to catch up to run a findOne on an existing connection
- staleMongos.getCollection(coll + "").findOne();
+// See if our stale mongos is required to catch up to run a findOne on an existing connection
+staleMongos.getCollection(coll + "").findOne();
- printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
- assert.eq(Timestamp(1, 0),
- staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
- // See if our stale mongos is required to catch up to run a findOne on a new connection
- staleMongos = new Mongo(staleMongos.host);
- staleMongos.getCollection(coll + "").findOne();
+// See if our stale mongos is required to catch up to run a findOne on a new connection
+staleMongos = new Mongo(staleMongos.host);
+staleMongos.getCollection(coll + "").findOne();
- printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
- assert.eq(Timestamp(1, 0),
- staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
-
- st.stop();
+assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+st.stop();
})();
diff --git a/jstests/sharding/mapReduce_inSharded.js b/jstests/sharding/mapReduce_inSharded.js
index 6737d5fec0c..b51b0111a1e 100644
--- a/jstests/sharding/mapReduce_inSharded.js
+++ b/jstests/sharding/mapReduce_inSharded.js
@@ -1,86 +1,87 @@
(function() {
- "use strict";
-
- var verifyOutput = function(out) {
- printjson(out);
- assert.commandWorked(out);
- assert.eq(out.counts.input, 51200, "input count is wrong");
- assert.eq(out.counts.emit, 51200, "emit count is wrong");
- assert.gt(out.counts.reduce, 99, "reduce count is wrong");
- assert.eq(out.counts.output, 512, "output count is wrong");
- };
-
- var st = new ShardingTest(
- {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-
- st.adminCommand({enablesharding: "mrShard"});
- st.ensurePrimaryShard('mrShard', st.shard1.shardName);
- st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}});
-
- var db = st.getDB("mrShard");
-
- var bulk = db.srcSharded.initializeUnorderedBulkOp();
- for (var j = 0; j < 100; j++) {
- for (var i = 0; i < 512; i++) {
- bulk.insert({j: j, i: i});
- }
- }
- assert.writeOK(bulk.execute());
+"use strict";
- function map() {
- emit(this.i, 1);
- }
- function reduce(key, values) {
- return Array.sum(values);
- }
-
- // sharded src
- var suffix = "InSharded";
-
- var out = db.srcSharded.mapReduce(map, reduce, "mrBasic" + suffix);
- verifyOutput(out);
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix}});
- verifyOutput(out);
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix}});
- verifyOutput(out);
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix}});
- verifyOutput(out);
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}});
- verifyOutput(out);
- assert(out.results != 'undefined', "no results for inline");
-
- // Ensure that mapReduce with a sharded input collection can accept the collation option.
- out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}, collation: {locale: "en_US"}});
- verifyOutput(out);
- assert(out.results != 'undefined', "no results for inline with collation");
-
- out = db.srcSharded.mapReduce(
- map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB"}});
- verifyOutput(out);
-
- out = db.runCommand({
- mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
- map: map,
- reduce: reduce,
- out: "mrBasic" + "srcSharded",
- });
- verifyOutput(out);
-
- // Ensure that the collation option is propagated to the shards. This uses a case-insensitive
- // collation, and the query seeding the mapReduce should only match the document if the
- // collation is passed along to the shards.
- assert.writeOK(db.srcSharded.remove({}));
- assert.eq(db.srcSharded.find().itcount(), 0);
- assert.writeOK(db.srcSharded.insert({i: 0, j: 0, str: "FOO"}));
- out = db.srcSharded.mapReduce(
- map,
- reduce,
- {out: {inline: 1}, query: {str: "foo"}, collation: {locale: "en_US", strength: 2}});
+var verifyOutput = function(out) {
+ printjson(out);
assert.commandWorked(out);
- assert.eq(out.counts.input, 1);
- st.stop();
+ assert.eq(out.counts.input, 51200, "input count is wrong");
+ assert.eq(out.counts.emit, 51200, "emit count is wrong");
+ assert.gt(out.counts.reduce, 99, "reduce count is wrong");
+ assert.eq(out.counts.output, 512, "output count is wrong");
+};
+
+var st = new ShardingTest(
+ {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
+
+st.adminCommand({enablesharding: "mrShard"});
+st.ensurePrimaryShard('mrShard', st.shard1.shardName);
+st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}});
+
+var db = st.getDB("mrShard");
+
+var bulk = db.srcSharded.initializeUnorderedBulkOp();
+for (var j = 0; j < 100; j++) {
+ for (var i = 0; i < 512; i++) {
+ bulk.insert({j: j, i: i});
+ }
+}
+assert.writeOK(bulk.execute());
+
+function map() {
+ emit(this.i, 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
+
+// sharded src
+var suffix = "InSharded";
+
+var out = db.srcSharded.mapReduce(map, reduce, "mrBasic" + suffix);
+verifyOutput(out);
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix}});
+verifyOutput(out);
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix}});
+verifyOutput(out);
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix}});
+verifyOutput(out);
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}});
+verifyOutput(out);
+assert(out.results != 'undefined', "no results for inline");
+
+// Ensure that mapReduce with a sharded input collection can accept the collation option.
+out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}, collation: {locale: "en_US"}});
+verifyOutput(out);
+assert(out.results != 'undefined', "no results for inline with collation");
+
+out = db.srcSharded.mapReduce(
+ map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB"}});
+verifyOutput(out);
+
+out = db.runCommand({
+ mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
+ map: map,
+ reduce: reduce,
+ out: "mrBasic" +
+ "srcSharded",
+});
+verifyOutput(out);
+
+// Ensure that the collation option is propagated to the shards. This uses a case-insensitive
+// collation, and the query seeding the mapReduce should only match the document if the
+// collation is passed along to the shards.
+assert.writeOK(db.srcSharded.remove({}));
+assert.eq(db.srcSharded.find().itcount(), 0);
+assert.writeOK(db.srcSharded.insert({i: 0, j: 0, str: "FOO"}));
+out = db.srcSharded.mapReduce(
+ map,
+ reduce,
+ {out: {inline: 1}, query: {str: "foo"}, collation: {locale: "en_US", strength: 2}});
+assert.commandWorked(out);
+assert.eq(out.counts.input, 1);
+st.stop();
})();
diff --git a/jstests/sharding/mapReduce_inSharded_outSharded.js b/jstests/sharding/mapReduce_inSharded_outSharded.js
index 92dae92f5f0..7a8730d2c4d 100644
--- a/jstests/sharding/mapReduce_inSharded_outSharded.js
+++ b/jstests/sharding/mapReduce_inSharded_outSharded.js
@@ -1,70 +1,69 @@
(function() {
- "use strict";
+"use strict";
- var verifyOutput = function(out) {
- printjson(out);
- assert.eq(out.counts.input, 51200, "input count is wrong");
- assert.eq(out.counts.emit, 51200, "emit count is wrong");
- assert.gt(out.counts.reduce, 99, "reduce count is wrong");
- assert.eq(out.counts.output, 512, "output count is wrong");
- };
+var verifyOutput = function(out) {
+ printjson(out);
+ assert.eq(out.counts.input, 51200, "input count is wrong");
+ assert.eq(out.counts.emit, 51200, "emit count is wrong");
+ assert.gt(out.counts.reduce, 99, "reduce count is wrong");
+ assert.eq(out.counts.output, 512, "output count is wrong");
+};
- var st = new ShardingTest(
- {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
+var st = new ShardingTest(
+ {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
- var admin = st.s0.getDB('admin');
+var admin = st.s0.getDB('admin');
- assert.commandWorked(admin.runCommand({enablesharding: "mrShard"}));
- st.ensurePrimaryShard('mrShard', st.shard1.shardName);
- assert.commandWorked(
- admin.runCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}}));
+assert.commandWorked(admin.runCommand({enablesharding: "mrShard"}));
+st.ensurePrimaryShard('mrShard', st.shard1.shardName);
+assert.commandWorked(admin.runCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}}));
- var db = st.s0.getDB("mrShard");
+var db = st.s0.getDB("mrShard");
- var bulk = db.srcSharded.initializeUnorderedBulkOp();
- for (var j = 0; j < 100; j++) {
- for (var i = 0; i < 512; i++) {
- bulk.insert({j: j, i: i});
- }
+var bulk = db.srcSharded.initializeUnorderedBulkOp();
+for (var j = 0; j < 100; j++) {
+ for (var i = 0; i < 512; i++) {
+ bulk.insert({j: j, i: i});
}
- assert.writeOK(bulk.execute());
-
- function map() {
- emit(this.i, 1);
- }
- function reduce(key, values) {
- return Array.sum(values);
- }
-
- // sharded src sharded dst
- var suffix = "InShardedOutSharded";
-
- var out =
- db.srcSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix, sharded: true}});
- verifyOutput(out);
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix, sharded: true}});
- verifyOutput(out);
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix, sharded: true}});
- verifyOutput(out);
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}});
- verifyOutput(out);
- assert(out.results != 'undefined', "no results for inline");
-
- out = db.srcSharded.mapReduce(
- map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB", sharded: true}});
- verifyOutput(out);
-
- out = db.runCommand({
- mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
- map: map,
- reduce: reduce,
- out: "mrBasic" + "srcSharded",
- });
- verifyOutput(out);
-
- st.stop();
-
+}
+assert.writeOK(bulk.execute());
+
+function map() {
+ emit(this.i, 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
+
+// sharded src sharded dst
+var suffix = "InShardedOutSharded";
+
+var out =
+ db.srcSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix, sharded: true}});
+verifyOutput(out);
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix, sharded: true}});
+verifyOutput(out);
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix, sharded: true}});
+verifyOutput(out);
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}});
+verifyOutput(out);
+assert(out.results != 'undefined', "no results for inline");
+
+out = db.srcSharded.mapReduce(
+ map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB", sharded: true}});
+verifyOutput(out);
+
+out = db.runCommand({
+ mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
+ map: map,
+ reduce: reduce,
+ out: "mrBasic" +
+ "srcSharded",
+});
+verifyOutput(out);
+
+st.stop();
})();
diff --git a/jstests/sharding/mapReduce_nonSharded.js b/jstests/sharding/mapReduce_nonSharded.js
index d194623c3e7..07da267d132 100644
--- a/jstests/sharding/mapReduce_nonSharded.js
+++ b/jstests/sharding/mapReduce_nonSharded.js
@@ -57,7 +57,8 @@ out = db.runCommand({
mapReduce: "srcNonSharded", // use new name mapReduce rather than mapreduce
map: map,
reduce: reduce,
- out: "mrBasic" + "srcNonSharded",
+ out: "mrBasic" +
+ "srcNonSharded",
});
verifyOutput(out);
st.stop();
diff --git a/jstests/sharding/mapReduce_outSharded.js b/jstests/sharding/mapReduce_outSharded.js
index 75a5fcfca33..eeb88371a7e 100644
--- a/jstests/sharding/mapReduce_outSharded.js
+++ b/jstests/sharding/mapReduce_outSharded.js
@@ -55,7 +55,8 @@ out = db.runCommand({
mapReduce: "srcNonSharded", // use new name mapReduce rather than mapreduce
map: map,
reduce: reduce,
- out: "mrBasic" + "srcNonSharded",
+ out: "mrBasic" +
+ "srcNonSharded",
});
verifyOutput(out);
st.stop();
diff --git a/jstests/sharding/mapReduce_outSharded_checkUUID.js b/jstests/sharding/mapReduce_outSharded_checkUUID.js
index 9faa35cb836..25a499c4bed 100644
--- a/jstests/sharding/mapReduce_outSharded_checkUUID.js
+++ b/jstests/sharding/mapReduce_outSharded_checkUUID.js
@@ -1,151 +1,149 @@
(function() {
- "use strict";
- load("jstests/libs/uuid_util.js");
-
- var verifyOutput = function(out, output) {
- printjson(out);
- assert.eq(out.counts.input, 51200, "input count is wrong");
- assert.eq(out.counts.emit, 51200, "emit count is wrong");
- assert.gt(out.counts.reduce, 99, "reduce count is wrong");
- assert.eq(out.counts.output, output, "output count is wrong");
- };
-
- var assertCollectionNotOnShard = function(db, coll) {
- var listCollsRes = db.runCommand({listCollections: 1, filter: {name: coll}});
- assert.commandWorked(listCollsRes);
- assert.neq(undefined, listCollsRes.cursor);
- assert.neq(undefined, listCollsRes.cursor.firstBatch);
- assert.eq(0, listCollsRes.cursor.firstBatch.length);
- };
-
- var st = new ShardingTest({shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1}});
-
- var admin = st.s0.getDB('admin');
-
- assert.commandWorked(admin.runCommand({enablesharding: "mrShard"}));
- st.ensurePrimaryShard('mrShard', st.shard1.shardName);
- assert.commandWorked(
- admin.runCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}}));
-
- var db = st.s0.getDB("mrShard");
-
- var bulk = db.srcSharded.initializeUnorderedBulkOp();
- for (var j = 0; j < 100; j++) {
- for (var i = 0; i < 512; i++) {
- bulk.insert({j: j, i: i});
- }
+"use strict";
+load("jstests/libs/uuid_util.js");
+
+var verifyOutput = function(out, output) {
+ printjson(out);
+ assert.eq(out.counts.input, 51200, "input count is wrong");
+ assert.eq(out.counts.emit, 51200, "emit count is wrong");
+ assert.gt(out.counts.reduce, 99, "reduce count is wrong");
+ assert.eq(out.counts.output, output, "output count is wrong");
+};
+
+var assertCollectionNotOnShard = function(db, coll) {
+ var listCollsRes = db.runCommand({listCollections: 1, filter: {name: coll}});
+ assert.commandWorked(listCollsRes);
+ assert.neq(undefined, listCollsRes.cursor);
+ assert.neq(undefined, listCollsRes.cursor.firstBatch);
+ assert.eq(0, listCollsRes.cursor.firstBatch.length);
+};
+
+var st = new ShardingTest({shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1}});
+
+var admin = st.s0.getDB('admin');
+
+assert.commandWorked(admin.runCommand({enablesharding: "mrShard"}));
+st.ensurePrimaryShard('mrShard', st.shard1.shardName);
+assert.commandWorked(admin.runCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}}));
+
+var db = st.s0.getDB("mrShard");
+
+var bulk = db.srcSharded.initializeUnorderedBulkOp();
+for (var j = 0; j < 100; j++) {
+ for (var i = 0; i < 512; i++) {
+ bulk.insert({j: j, i: i});
}
- assert.writeOK(bulk.execute());
-
- function map() {
- emit(this.i, 1);
- }
- function reduce(key, values) {
- return Array.sum(values);
- }
-
- // sharded src sharded dst
- var suffix = "InShardedOutSharded";
-
- // Check that merge to an existing empty sharded collection works and creates a new UUID after
- // M/R
- st.adminCommand({shardcollection: "mrShard.outSharded", key: {"_id": 1}});
- var origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
- var out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "outSharded", sharded: true}});
- verifyOutput(out, 512);
- var newUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
- assert.neq(origUUID, newUUID);
-
- // Shard1 is the primary shard and only one chunk should have been written, so the chunk with
- // the new UUID should have been written to it.
- assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "outSharded"));
-
- // Shard0 should not have any chunks from the output collection because all shards should have
- // returned an empty split point list in the first phase of the mapReduce, since the reduced
- // data size is far less than the chunk size setting of 1MB.
- assertCollectionNotOnShard(st.shard0.getDB("mrShard"), "outSharded");
-
- // Check that merge to an existing sharded collection that has data on all shards works and that
- // the collection uses the same UUID after M/R
- assert.commandWorked(admin.runCommand({split: "mrShard.outSharded", middle: {"_id": 2000}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: "mrShard.outSharded", find: {"_id": 2000}, to: st.shard0.shardName}));
- assert.writeOK(st.s.getCollection("mrShard.outSharded").insert({_id: 1000}));
- assert.writeOK(st.s.getCollection("mrShard.outSharded").insert({_id: 2001}));
- origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
-
- out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "outSharded", sharded: true}});
- verifyOutput(out, 514);
-
- newUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
- assert.eq(origUUID, newUUID);
- assert.eq(newUUID, getUUIDFromListCollections(st.shard0.getDB("mrShard"), "outSharded"));
- assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "outSharded"));
-
- // Check that replace to an existing sharded collection has data on all shards works and that
- // the collection creates a new UUID after M/R.
- origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
- out = db.srcSharded.mapReduce(map, reduce, {out: {replace: "outSharded", sharded: true}});
- verifyOutput(out, 512);
-
- newUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
- assert.neq(origUUID, newUUID);
-
- // Shard1 is the primary shard and only one chunk should have been written, so the chunk with
- // the new UUID should have been written to it.
- assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "outSharded"));
-
- // Shard0 should not have any chunks from the output collection because all shards should have
- // returned an empty split point list in the first phase of the mapReduce, since the reduced
- // data size is far less than the chunk size setting of 1MB.
- assertCollectionNotOnShard(st.shard0.getDB("mrShard"), "outSharded");
-
- // Check that reduce to an existing unsharded collection fails when `sharded: true`.
- assert.commandWorked(db.runCommand({create: "reduceUnsharded"}));
- assert.commandFailed(db.runCommand({
- mapReduce: "srcSharded",
- map: map,
- reduce: reduce,
- out: {reduce: "reduceUnsharded", sharded: true}
- }));
-
- assert.commandWorked(db.reduceUnsharded.insert({x: 1}));
- assert.commandFailed(db.runCommand({
- mapReduce: "srcSharded",
- map: map,
- reduce: reduce,
- out: {reduce: "reduceUnsharded", sharded: true}
- }));
-
- // Check that replace to an existing unsharded collection works when `sharded: true`.
- assert.commandWorked(db.runCommand({create: "replaceUnsharded"}));
- origUUID = getUUIDFromListCollections(st.s.getDB("mrShard"), "replaceUnsharded");
-
- assert.commandWorked(db.runCommand({
- mapReduce: "srcSharded",
- map: map,
- reduce: reduce,
- out: {replace: "replaceUnsharded", sharded: true}
- }));
-
- newUUID = getUUIDFromConfigCollections(st.s, "mrShard.replaceUnsharded");
- assert.neq(origUUID, newUUID);
- assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "replaceUnsharded"));
-
- assert.commandWorked(db.replaceUnsharded.insert({x: 1}));
- origUUID = getUUIDFromListCollections(st.s.getDB("mrShard"), "replaceUnsharded");
-
- assert.commandWorked(db.runCommand({
- mapReduce: "srcSharded",
- map: map,
- reduce: reduce,
- out: {replace: "replaceUnsharded", sharded: true}
- }));
-
- newUUID = getUUIDFromConfigCollections(st.s, "mrShard.replaceUnsharded");
- assert.neq(origUUID, newUUID);
- assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "replaceUnsharded"));
-
- st.stop();
-
+}
+assert.writeOK(bulk.execute());
+
+function map() {
+ emit(this.i, 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
+
+// sharded src sharded dst
+var suffix = "InShardedOutSharded";
+
+// Check that merge to an existing empty sharded collection works and creates a new UUID after
+// M/R
+st.adminCommand({shardcollection: "mrShard.outSharded", key: {"_id": 1}});
+var origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
+var out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "outSharded", sharded: true}});
+verifyOutput(out, 512);
+var newUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
+assert.neq(origUUID, newUUID);
+
+// Shard1 is the primary shard and only one chunk should have been written, so the chunk with
+// the new UUID should have been written to it.
+assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "outSharded"));
+
+// Shard0 should not have any chunks from the output collection because all shards should have
+// returned an empty split point list in the first phase of the mapReduce, since the reduced
+// data size is far less than the chunk size setting of 1MB.
+assertCollectionNotOnShard(st.shard0.getDB("mrShard"), "outSharded");
+
+// Check that merge to an existing sharded collection that has data on all shards works and that
+// the collection uses the same UUID after M/R
+assert.commandWorked(admin.runCommand({split: "mrShard.outSharded", middle: {"_id": 2000}}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: "mrShard.outSharded", find: {"_id": 2000}, to: st.shard0.shardName}));
+assert.writeOK(st.s.getCollection("mrShard.outSharded").insert({_id: 1000}));
+assert.writeOK(st.s.getCollection("mrShard.outSharded").insert({_id: 2001}));
+origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
+
+out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "outSharded", sharded: true}});
+verifyOutput(out, 514);
+
+newUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
+assert.eq(origUUID, newUUID);
+assert.eq(newUUID, getUUIDFromListCollections(st.shard0.getDB("mrShard"), "outSharded"));
+assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "outSharded"));
+
+// Check that replace to an existing sharded collection has data on all shards works and that
+// the collection creates a new UUID after M/R.
+origUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
+out = db.srcSharded.mapReduce(map, reduce, {out: {replace: "outSharded", sharded: true}});
+verifyOutput(out, 512);
+
+newUUID = getUUIDFromConfigCollections(st.s, "mrShard.outSharded");
+assert.neq(origUUID, newUUID);
+
+// Shard1 is the primary shard and only one chunk should have been written, so the chunk with
+// the new UUID should have been written to it.
+assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "outSharded"));
+
+// Shard0 should not have any chunks from the output collection because all shards should have
+// returned an empty split point list in the first phase of the mapReduce, since the reduced
+// data size is far less than the chunk size setting of 1MB.
+assertCollectionNotOnShard(st.shard0.getDB("mrShard"), "outSharded");
+
+// Check that reduce to an existing unsharded collection fails when `sharded: true`.
+assert.commandWorked(db.runCommand({create: "reduceUnsharded"}));
+assert.commandFailed(db.runCommand({
+ mapReduce: "srcSharded",
+ map: map,
+ reduce: reduce,
+ out: {reduce: "reduceUnsharded", sharded: true}
+}));
+
+assert.commandWorked(db.reduceUnsharded.insert({x: 1}));
+assert.commandFailed(db.runCommand({
+ mapReduce: "srcSharded",
+ map: map,
+ reduce: reduce,
+ out: {reduce: "reduceUnsharded", sharded: true}
+}));
+
+// Check that replace to an existing unsharded collection works when `sharded: true`.
+assert.commandWorked(db.runCommand({create: "replaceUnsharded"}));
+origUUID = getUUIDFromListCollections(st.s.getDB("mrShard"), "replaceUnsharded");
+
+assert.commandWorked(db.runCommand({
+ mapReduce: "srcSharded",
+ map: map,
+ reduce: reduce,
+ out: {replace: "replaceUnsharded", sharded: true}
+}));
+
+newUUID = getUUIDFromConfigCollections(st.s, "mrShard.replaceUnsharded");
+assert.neq(origUUID, newUUID);
+assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "replaceUnsharded"));
+
+assert.commandWorked(db.replaceUnsharded.insert({x: 1}));
+origUUID = getUUIDFromListCollections(st.s.getDB("mrShard"), "replaceUnsharded");
+
+assert.commandWorked(db.runCommand({
+ mapReduce: "srcSharded",
+ map: map,
+ reduce: reduce,
+ out: {replace: "replaceUnsharded", sharded: true}
+}));
+
+newUUID = getUUIDFromConfigCollections(st.s, "mrShard.replaceUnsharded");
+assert.neq(origUUID, newUUID);
+assert.eq(newUUID, getUUIDFromListCollections(st.shard1.getDB("mrShard"), "replaceUnsharded"));
+
+st.stop();
})();
diff --git a/jstests/sharding/max_time_ms_sharded.js b/jstests/sharding/max_time_ms_sharded.js
index d46ba9af74a..16c56658a5d 100644
--- a/jstests/sharding/max_time_ms_sharded.js
+++ b/jstests/sharding/max_time_ms_sharded.js
@@ -5,267 +5,266 @@
// Note that mongos does not time out commands or query ops (which remains responsibility of mongod,
// pending development of an interrupt framework for mongos).
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2});
+var st = new ShardingTest({shards: 2});
- var mongos = st.s0;
- var shards = [st.shard0, st.shard1];
- var coll = mongos.getCollection("foo.bar");
- var admin = mongos.getDB("admin");
- var cursor;
- var res;
+var mongos = st.s0;
+var shards = [st.shard0, st.shard1];
+var coll = mongos.getCollection("foo.bar");
+var admin = mongos.getDB("admin");
+var cursor;
+var res;
- // Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which forces mongod
- // to throw if it receives an operation with a max time. See fail point declaration for complete
- // description.
- var configureMaxTimeAlwaysTimeOut = function(mode) {
- assert.commandWorked(shards[0].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
- assert.commandWorked(shards[1].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
- };
+// Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which forces mongod
+// to throw if it receives an operation with a max time. See fail point declaration for complete
+// description.
+var configureMaxTimeAlwaysTimeOut = function(mode) {
+ assert.commandWorked(shards[0].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
+ assert.commandWorked(shards[1].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
+};
- // Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which prohibits
- // mongod from enforcing time limits. See fail point declaration for complete description.
- var configureMaxTimeNeverTimeOut = function(mode) {
- assert.commandWorked(shards[0].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
- assert.commandWorked(shards[1].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
- };
+// Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which prohibits
+// mongod from enforcing time limits. See fail point declaration for complete description.
+var configureMaxTimeNeverTimeOut = function(mode) {
+ assert.commandWorked(shards[0].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
+ assert.commandWorked(shards[1].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
+};
- //
- // Pre-split collection: shard 0 takes {_id: {$lt: 0}}, shard 1 takes {_id: {$gte: 0}}.
- //
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
- st.ensurePrimaryShard(coll.getDB().toString(), st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(
- admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
+//
+// Pre-split collection: shard 0 takes {_id: {$lt: 0}}, shard 1 takes {_id: {$gte: 0}}.
+//
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
+st.ensurePrimaryShard(coll.getDB().toString(), st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
- //
- // Insert 1000 documents into sharded collection, such that each shard owns 500.
- //
- const nDocsPerShard = 500;
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = -nDocsPerShard; i < nDocsPerShard; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
- assert.eq(nDocsPerShard, shards[0].getCollection(coll.getFullName()).count());
- assert.eq(nDocsPerShard, shards[1].getCollection(coll.getFullName()).count());
+//
+// Insert 1000 documents into sharded collection, such that each shard owns 500.
+//
+const nDocsPerShard = 500;
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = -nDocsPerShard; i < nDocsPerShard; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
+assert.eq(nDocsPerShard, shards[0].getCollection(coll.getFullName()).count());
+assert.eq(nDocsPerShard, shards[1].getCollection(coll.getFullName()).count());
- //
- // Test that mongos correctly forwards max time to shards for sharded queries. Uses
- // maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
- //
+//
+// Test that mongos correctly forwards max time to shards for sharded queries. Uses
+// maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
+//
- // Positive test.
- configureMaxTimeAlwaysTimeOut("alwaysOn");
- cursor = coll.find();
- cursor.maxTimeMS(60 * 1000);
- assert.throws(function() {
- cursor.next();
- }, [], "expected query to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+// Positive test.
+configureMaxTimeAlwaysTimeOut("alwaysOn");
+cursor = coll.find();
+cursor.maxTimeMS(60 * 1000);
+assert.throws(function() {
+ cursor.next();
+}, [], "expected query to fail in mongod due to maxTimeAlwaysTimeOut fail point");
- // Negative test.
- configureMaxTimeAlwaysTimeOut("off");
- cursor = coll.find();
- cursor.maxTimeMS(60 * 1000);
- assert.doesNotThrow(function() {
- cursor.next();
- }, [], "expected query to not hit time limit in mongod");
+// Negative test.
+configureMaxTimeAlwaysTimeOut("off");
+cursor = coll.find();
+cursor.maxTimeMS(60 * 1000);
+assert.doesNotThrow(function() {
+ cursor.next();
+}, [], "expected query to not hit time limit in mongod");
- //
- // Test that mongos correctly times out max time sharded getmore operations. Uses
- // maxTimeNeverTimeOut to ensure mongod doesn't enforce a time limit.
- //
+//
+// Test that mongos correctly times out max time sharded getmore operations. Uses
+// maxTimeNeverTimeOut to ensure mongod doesn't enforce a time limit.
+//
- configureMaxTimeNeverTimeOut("alwaysOn");
+configureMaxTimeNeverTimeOut("alwaysOn");
- // Positive test. ~10s operation, 2s limit. The operation takes ~10s because each shard
- // processes 250 batches of ~40ms each, and the shards are processing getMores in parallel.
- cursor = coll.find({
- $where: function() {
- sleep(20);
- return true;
- }
- });
- cursor.batchSize(2);
- cursor.maxTimeMS(2 * 1000);
- assert.doesNotThrow(
- () => cursor.next(), [], "did not expect mongos to time out first batch of query");
- assert.throws(() => cursor.itcount(), [], "expected mongos to abort getmore due to time limit");
+// Positive test. ~10s operation, 2s limit. The operation takes ~10s because each shard
+// processes 250 batches of ~40ms each, and the shards are processing getMores in parallel.
+cursor = coll.find({
+ $where: function() {
+ sleep(20);
+ return true;
+ }
+});
+cursor.batchSize(2);
+cursor.maxTimeMS(2 * 1000);
+assert.doesNotThrow(
+ () => cursor.next(), [], "did not expect mongos to time out first batch of query");
+assert.throws(() => cursor.itcount(), [], "expected mongos to abort getmore due to time limit");
- // Negative test. ~5s operation, with a high (1-day) limit.
- cursor = coll.find({
- $where: function() {
- sleep(10);
- return true;
- }
- });
- cursor.batchSize(2);
- cursor.maxTimeMS(1000 * 60 * 60 * 24);
- assert.doesNotThrow(function() {
- cursor.next();
- }, [], "did not expect mongos to time out first batch of query");
- assert.doesNotThrow(function() {
- cursor.itcount();
- }, [], "did not expect getmore ops to hit the time limit");
+// Negative test. ~5s operation, with a high (1-day) limit.
+cursor = coll.find({
+ $where: function() {
+ sleep(10);
+ return true;
+ }
+});
+cursor.batchSize(2);
+cursor.maxTimeMS(1000 * 60 * 60 * 24);
+assert.doesNotThrow(function() {
+ cursor.next();
+}, [], "did not expect mongos to time out first batch of query");
+assert.doesNotThrow(function() {
+ cursor.itcount();
+}, [], "did not expect getmore ops to hit the time limit");
- configureMaxTimeNeverTimeOut("off");
+configureMaxTimeNeverTimeOut("off");
- //
- // Test that mongos correctly forwards max time to shards for sharded commands. Uses
- // maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
- //
+//
+// Test that mongos correctly forwards max time to shards for sharded commands. Uses
+// maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
+//
- // Positive test for "validate".
- configureMaxTimeAlwaysTimeOut("alwaysOn");
- assert.commandFailedWithCode(
- coll.runCommand("validate", {maxTimeMS: 60 * 1000}),
- ErrorCodes.MaxTimeMSExpired,
- "expected vailidate to fail with code " + ErrorCodes.MaxTimeMSExpired +
- " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
+// Positive test for "validate".
+configureMaxTimeAlwaysTimeOut("alwaysOn");
+assert.commandFailedWithCode(
+ coll.runCommand("validate", {maxTimeMS: 60 * 1000}),
+ ErrorCodes.MaxTimeMSExpired,
+ "expected vailidate to fail with code " + ErrorCodes.MaxTimeMSExpired +
+ " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
- // Negative test for "validate".
- configureMaxTimeAlwaysTimeOut("off");
- assert.commandWorked(coll.runCommand("validate", {maxTimeMS: 60 * 1000}),
- "expected validate to not hit time limit in mongod");
+// Negative test for "validate".
+configureMaxTimeAlwaysTimeOut("off");
+assert.commandWorked(coll.runCommand("validate", {maxTimeMS: 60 * 1000}),
+ "expected validate to not hit time limit in mongod");
- // Positive test for "count".
- configureMaxTimeAlwaysTimeOut("alwaysOn");
- assert.commandFailedWithCode(coll.runCommand("count", {maxTimeMS: 60 * 1000}),
- ErrorCodes.MaxTimeMSExpired,
- "expected count to fail with code " + ErrorCodes.MaxTimeMSExpired +
- " due to maxTimeAlwaysTimeOut fail point, but instead got: " +
- tojson(res));
+// Positive test for "count".
+configureMaxTimeAlwaysTimeOut("alwaysOn");
+assert.commandFailedWithCode(
+ coll.runCommand("count", {maxTimeMS: 60 * 1000}),
+ ErrorCodes.MaxTimeMSExpired,
+ "expected count to fail with code " + ErrorCodes.MaxTimeMSExpired +
+ " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
- // Negative test for "count".
- configureMaxTimeAlwaysTimeOut("off");
- assert.commandWorked(coll.runCommand("count", {maxTimeMS: 60 * 1000}),
- "expected count to not hit time limit in mongod");
+// Negative test for "count".
+configureMaxTimeAlwaysTimeOut("off");
+assert.commandWorked(coll.runCommand("count", {maxTimeMS: 60 * 1000}),
+ "expected count to not hit time limit in mongod");
- // Positive test for "collStats".
- configureMaxTimeAlwaysTimeOut("alwaysOn");
- assert.commandFailedWithCode(
- coll.runCommand("collStats", {maxTimeMS: 60 * 1000}),
- ErrorCodes.MaxTimeMSExpired,
- "expected collStats to fail with code " + ErrorCodes.MaxTimeMSExpired +
- " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
+// Positive test for "collStats".
+configureMaxTimeAlwaysTimeOut("alwaysOn");
+assert.commandFailedWithCode(
+ coll.runCommand("collStats", {maxTimeMS: 60 * 1000}),
+ ErrorCodes.MaxTimeMSExpired,
+ "expected collStats to fail with code " + ErrorCodes.MaxTimeMSExpired +
+ " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
- // Negative test for "collStats".
- configureMaxTimeAlwaysTimeOut("off");
- assert.commandWorked(coll.runCommand("collStats", {maxTimeMS: 60 * 1000}),
- "expected collStats to not hit time limit in mongod");
+// Negative test for "collStats".
+configureMaxTimeAlwaysTimeOut("off");
+assert.commandWorked(coll.runCommand("collStats", {maxTimeMS: 60 * 1000}),
+ "expected collStats to not hit time limit in mongod");
- // Positive test for "mapReduce".
- configureMaxTimeAlwaysTimeOut("alwaysOn");
- res = coll.runCommand("mapReduce", {
- map: function() {
- emit(0, 0);
- },
- reduce: function(key, values) {
- return 0;
- },
- out: {inline: 1},
- maxTimeMS: 60 * 1000
- });
- assert.commandFailedWithCode(
- res,
- ErrorCodes.MaxTimeMSExpired,
- "expected mapReduce to fail with code " + ErrorCodes.MaxTimeMSExpired +
- " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
+// Positive test for "mapReduce".
+configureMaxTimeAlwaysTimeOut("alwaysOn");
+res = coll.runCommand("mapReduce", {
+ map: function() {
+ emit(0, 0);
+ },
+ reduce: function(key, values) {
+ return 0;
+ },
+ out: {inline: 1},
+ maxTimeMS: 60 * 1000
+});
+assert.commandFailedWithCode(
+ res,
+ ErrorCodes.MaxTimeMSExpired,
+ "expected mapReduce to fail with code " + ErrorCodes.MaxTimeMSExpired +
+ " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
- // Negative test for "mapReduce".
- configureMaxTimeAlwaysTimeOut("off");
- assert.commandWorked(coll.runCommand("mapReduce", {
- map: function() {
- emit(0, 0);
- },
- reduce: function(key, values) {
- return 0;
- },
- out: {inline: 1},
- maxTimeMS: 60 * 1000
- }),
- "expected mapReduce to not hit time limit in mongod");
+// Negative test for "mapReduce".
+configureMaxTimeAlwaysTimeOut("off");
+assert.commandWorked(coll.runCommand("mapReduce", {
+ map: function() {
+ emit(0, 0);
+ },
+ reduce: function(key, values) {
+ return 0;
+ },
+ out: {inline: 1},
+ maxTimeMS: 60 * 1000
+}),
+ "expected mapReduce to not hit time limit in mongod");
- // Positive test for "aggregate".
- configureMaxTimeAlwaysTimeOut("alwaysOn");
- assert.commandFailedWithCode(
- coll.runCommand("aggregate", {pipeline: [], cursor: {}, maxTimeMS: 60 * 1000}),
- ErrorCodes.MaxTimeMSExpired,
- "expected aggregate to fail with code " + ErrorCodes.MaxTimeMSExpired +
- " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
+// Positive test for "aggregate".
+configureMaxTimeAlwaysTimeOut("alwaysOn");
+assert.commandFailedWithCode(
+ coll.runCommand("aggregate", {pipeline: [], cursor: {}, maxTimeMS: 60 * 1000}),
+ ErrorCodes.MaxTimeMSExpired,
+ "expected aggregate to fail with code " + ErrorCodes.MaxTimeMSExpired +
+ " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
- // Negative test for "aggregate".
- configureMaxTimeAlwaysTimeOut("off");
- assert.commandWorked(
- coll.runCommand("aggregate", {pipeline: [], cursor: {}, maxTimeMS: 60 * 1000}),
- "expected aggregate to not hit time limit in mongod");
+// Negative test for "aggregate".
+configureMaxTimeAlwaysTimeOut("off");
+assert.commandWorked(coll.runCommand("aggregate", {pipeline: [], cursor: {}, maxTimeMS: 60 * 1000}),
+ "expected aggregate to not hit time limit in mongod");
- // Test that the maxTimeMS is still enforced on the shards even if we do not spend much time in
- // mongos blocking.
+// Test that the maxTimeMS is still enforced on the shards even if we do not spend much time in
+// mongos blocking.
- // Manually run a find here so we can be sure cursor establishment happens with batch size 0.
- res = assert.commandWorked(coll.runCommand({
- find: coll.getName(),
- filter: {
- $where: function() {
- if (this._id < 0) {
- // Slow down the query only on one of the shards. Each shard has 500 documents
- // so we expect this shard to take ~10 seconds to return a batch of 500.
- sleep(20);
- }
- return true;
+// Manually run a find here so we can be sure cursor establishment happens with batch size 0.
+res = assert.commandWorked(coll.runCommand({
+ find: coll.getName(),
+ filter: {
+ $where: function() {
+ if (this._id < 0) {
+ // Slow down the query only on one of the shards. Each shard has 500 documents
+ // so we expect this shard to take ~10 seconds to return a batch of 500.
+ sleep(20);
}
- },
- maxTimeMS: 2000,
- batchSize: 0
- }));
- // Use a batch size of 500 to allow returning results from the fast shard as soon as they're
- // ready, as opposed to waiting to return one 16MB batch at a time.
- const kBatchSize = nDocsPerShard;
- cursor = new DBCommandCursor(coll.getDB(), res, kBatchSize);
- // The fast shard should return relatively quickly.
- for (let i = 0; i < nDocsPerShard; ++i) {
- let next = assert.doesNotThrow(
- () => cursor.next(), [], "did not expect mongos to time out first batch of query");
- assert.gte(next._id, 0);
- }
- // Sleep on the client-side so mongos's time budget is not being used.
- sleep(3 * 1000);
- // Even though mongos has not been blocking this whole time, the shard has been busy computing
- // the next batch and should have timed out.
- assert.throws(() => cursor.next(), [], "expected mongos to abort getMore due to time limit");
+ return true;
+ }
+ },
+ maxTimeMS: 2000,
+ batchSize: 0
+}));
+// Use a batch size of 500 to allow returning results from the fast shard as soon as they're
+// ready, as opposed to waiting to return one 16MB batch at a time.
+const kBatchSize = nDocsPerShard;
+cursor = new DBCommandCursor(coll.getDB(), res, kBatchSize);
+// The fast shard should return relatively quickly.
+for (let i = 0; i < nDocsPerShard; ++i) {
+ let next = assert.doesNotThrow(
+ () => cursor.next(), [], "did not expect mongos to time out first batch of query");
+ assert.gte(next._id, 0);
+}
+// Sleep on the client-side so mongos's time budget is not being used.
+sleep(3 * 1000);
+// Even though mongos has not been blocking this whole time, the shard has been busy computing
+// the next batch and should have timed out.
+assert.throws(() => cursor.next(), [], "expected mongos to abort getMore due to time limit");
- // The moveChunk tests are disabled due to SERVER-30179
- //
- // // Positive test for "moveChunk".
- // configureMaxTimeAlwaysTimeOut("alwaysOn");
- // res = admin.runCommand({
- // moveChunk: coll.getFullName(),
- // find: {_id: 0},
- // to: st.shard0.shardName,
- // maxTimeMS: 1000 * 60 * 60 * 24
- // });
- // assert.commandFailed(
- // res,
- // "expected moveChunk to fail due to maxTimeAlwaysTimeOut fail point, but instead got: " +
- // tojson(res));
+// The moveChunk tests are disabled due to SERVER-30179
+//
+// // Positive test for "moveChunk".
+// configureMaxTimeAlwaysTimeOut("alwaysOn");
+// res = admin.runCommand({
+// moveChunk: coll.getFullName(),
+// find: {_id: 0},
+// to: st.shard0.shardName,
+// maxTimeMS: 1000 * 60 * 60 * 24
+// });
+// assert.commandFailed(
+// res,
+// "expected moveChunk to fail due to maxTimeAlwaysTimeOut fail point, but instead got: " +
+// tojson(res));
- // // Negative test for "moveChunk".
- // configureMaxTimeAlwaysTimeOut("off");
- // assert.commandWorked(admin.runCommand({
- // moveChunk: coll.getFullName(),
- // find: {_id: 0},
- // to: st.shard0.shardName,
- // maxTimeMS: 1000 * 60 * 60 * 24
- // }),
- // "expected moveChunk to not hit time limit in mongod");
+// // Negative test for "moveChunk".
+// configureMaxTimeAlwaysTimeOut("off");
+// assert.commandWorked(admin.runCommand({
+// moveChunk: coll.getFullName(),
+// find: {_id: 0},
+// to: st.shard0.shardName,
+// maxTimeMS: 1000 * 60 * 60 * 24
+// }),
+// "expected moveChunk to not hit time limit in mongod");
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/max_time_ms_sharded_new_commands.js b/jstests/sharding/max_time_ms_sharded_new_commands.js
index c8072359ce1..b611199954e 100644
--- a/jstests/sharding/max_time_ms_sharded_new_commands.js
+++ b/jstests/sharding/max_time_ms_sharded_new_commands.js
@@ -1,45 +1,44 @@
// Make sure the setFeatureCompatibilityVersion command respects maxTimeMs.
(function() {
- 'use strict';
- load("./jstests/libs/feature_compatibility_version.js");
- var st = new ShardingTest({shards: 2});
+'use strict';
+load("./jstests/libs/feature_compatibility_version.js");
+var st = new ShardingTest({shards: 2});
- var mongos = st.s0;
- var shards = [st.shard0, st.shard1];
- var coll = mongos.getCollection("foo.bar");
- var admin = mongos.getDB("admin");
- var cursor;
- var res;
+var mongos = st.s0;
+var shards = [st.shard0, st.shard1];
+var coll = mongos.getCollection("foo.bar");
+var admin = mongos.getDB("admin");
+var cursor;
+var res;
- // Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which forces mongod
- // to throw if it receives an operation with a max time. See fail point declaration for
- // complete description.
- var configureMaxTimeAlwaysTimeOut = function(mode) {
- assert.commandWorked(shards[0].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
- assert.commandWorked(shards[1].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
- };
+// Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which forces mongod
+// to throw if it receives an operation with a max time. See fail point declaration for
+// complete description.
+var configureMaxTimeAlwaysTimeOut = function(mode) {
+ assert.commandWorked(shards[0].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
+ assert.commandWorked(shards[1].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
+};
- // Positive test for "setFeatureCompatibilityVersion"
- configureMaxTimeAlwaysTimeOut("alwaysOn");
- assert.commandFailedWithCode(
- admin.runCommand(
- {setFeatureCompatibilityVersion: lastStableFCV, maxTimeMS: 1000 * 60 * 60 * 24}),
- ErrorCodes.MaxTimeMSExpired,
- "expected setFeatureCompatibilityVersion to fail due to maxTimeAlwaysTimeOut fail point");
+// Positive test for "setFeatureCompatibilityVersion"
+configureMaxTimeAlwaysTimeOut("alwaysOn");
+assert.commandFailedWithCode(
+ admin.runCommand(
+ {setFeatureCompatibilityVersion: lastStableFCV, maxTimeMS: 1000 * 60 * 60 * 24}),
+ ErrorCodes.MaxTimeMSExpired,
+ "expected setFeatureCompatibilityVersion to fail due to maxTimeAlwaysTimeOut fail point");
- // Negative test for "setFeatureCompatibilityVersion"
- configureMaxTimeAlwaysTimeOut("off");
- assert.commandWorked(
- admin.runCommand(
- {setFeatureCompatibilityVersion: lastStableFCV, maxTimeMS: 1000 * 60 * 60 * 24}),
- "expected setFeatureCompatibilityVersion to not hit time limit in mongod");
+// Negative test for "setFeatureCompatibilityVersion"
+configureMaxTimeAlwaysTimeOut("off");
+assert.commandWorked(
+ admin.runCommand(
+ {setFeatureCompatibilityVersion: lastStableFCV, maxTimeMS: 1000 * 60 * 60 * 24}),
+ "expected setFeatureCompatibilityVersion to not hit time limit in mongod");
- assert.commandWorked(
- admin.runCommand(
- {setFeatureCompatibilityVersion: latestFCV, maxTimeMS: 1000 * 60 * 60 * 24}),
- "expected setFeatureCompatibilityVersion to not hit time limit in mongod");
+assert.commandWorked(
+ admin.runCommand({setFeatureCompatibilityVersion: latestFCV, maxTimeMS: 1000 * 60 * 60 * 24}),
+ "expected setFeatureCompatibilityVersion to not hit time limit in mongod");
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/merge_chunks_compound_shard_key.js b/jstests/sharding/merge_chunks_compound_shard_key.js
index 9d6fc3aac14..3472073f4c5 100644
--- a/jstests/sharding/merge_chunks_compound_shard_key.js
+++ b/jstests/sharding/merge_chunks_compound_shard_key.js
@@ -4,92 +4,89 @@
//
(function() {
- 'use strict';
-
- var getShardVersion = function() {
- var res = st.shard0.adminCommand({getShardVersion: coll + ""});
- assert.commandWorked(res);
- var version = res.global;
- assert(version);
- return version;
- };
-
- // Merge two neighboring chunks and check post conditions.
- var checkMergeWorked = function(lowerBound, upperBound) {
- var oldVersion = getShardVersion();
- var numChunksBefore = chunks.find().itcount();
-
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [lowerBound, upperBound]}));
-
- assert.eq(numChunksBefore - 1, chunks.find().itcount());
- assert.eq(1, chunks.find({min: lowerBound, max: upperBound}).itcount());
-
- var newVersion = getShardVersion();
- assert.eq(newVersion.t, oldVersion.t);
- assert.gt(newVersion.i, oldVersion.i);
- };
-
- var st = new ShardingTest({shards: 2, mongos: 1});
-
- var mongos = st.s;
- var admin = mongos.getDB("admin");
- var shards = mongos.getCollection("config.shards").find().toArray();
- var chunks = mongos.getCollection("config.chunks");
- var coll = mongos.getCollection("foo.bar");
-
- jsTest.log("Create a sharded collection with a compound shard key.");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {x: 1, y: 1}}));
-
- // Chunks after splits:
- // (MinKey, { x: 0, y: 1 })
- // ({ x: 0, y: 1 }, { x: 1, y: 0 })
- // ({ x: 1, y: 0 }, { x: 2, y: 0 })
- // ({ x: 2, y: 0 }, { x: 2, y: 1 })
- // ({ x: 2, y: 1 }, MaxKey)
- jsTest.log("Create chunks.");
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 0, y: 1}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 1, y: 0}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 2, y: 0}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 2, y: 1}}));
-
- jsTest.log("Insert some data into each of the chunk ranges.");
- assert.writeOK(coll.insert({x: -1, y: 2}));
- assert.writeOK(coll.insert({x: 0, y: 2}));
- assert.writeOK(coll.insert({x: 1, y: 2}));
- assert.writeOK(coll.insert({x: 2, y: 1}));
- assert.writeOK(coll.insert({x: 2, y: 3}));
-
- // Chunks after merge:
- // (MinKey, { x: 0, y: 1 })
- // ({ x: 0, y: 1 }, { x: 2, y: 0 })
- // ({ x: 2, y: 0 }, { x: 2, y: 1 })
- // ({ x: 2, y: 1 }, MaxKey)
- jsTest.log("Merge chunks whose upper and lower bounds are compound shard keys.");
- checkMergeWorked({x: 0, y: 1}, {x: 2, y: 0});
-
- // Chunks after merge:
- // (MinKey, { x: 2, y: 0 })
- // ({ x: 2, y: 0 }, { x: 2, y: 1 })
- // ({ x: 2, y: 1 }, MaxKey)
- jsTest.log(
- "Merge chunks whose upper bound contains a compound shard key, lower bound is MinKey");
- checkMergeWorked({x: MinKey, y: MinKey}, {x: 2, y: 0});
-
- // Chunks after merge:
- // (MinKey, { x: 2, y: 0 })
- // ({ x: 2, y: 0 }, MaxKey)
- jsTest.log(
- "Merge chunks whose lower bound contains a compound shard key, upper bound is MaxKey");
- checkMergeWorked({x: 2, y: 0}, {x: MaxKey, y: MaxKey});
-
- // Chunks after merge:
- // (MinKey, MaxKey)
- jsTest.log("Merge chunks whos bounds are MinKey/MaxKey, but which have a compound shard key");
- checkMergeWorked({x: MinKey, y: MinKey}, {x: MaxKey, y: MaxKey});
-
- st.stop();
-
+'use strict';
+
+var getShardVersion = function() {
+ var res = st.shard0.adminCommand({getShardVersion: coll + ""});
+ assert.commandWorked(res);
+ var version = res.global;
+ assert(version);
+ return version;
+};
+
+// Merge two neighboring chunks and check post conditions.
+var checkMergeWorked = function(lowerBound, upperBound) {
+ var oldVersion = getShardVersion();
+ var numChunksBefore = chunks.find().itcount();
+
+ assert.commandWorked(
+ admin.runCommand({mergeChunks: coll + "", bounds: [lowerBound, upperBound]}));
+
+ assert.eq(numChunksBefore - 1, chunks.find().itcount());
+ assert.eq(1, chunks.find({min: lowerBound, max: upperBound}).itcount());
+
+ var newVersion = getShardVersion();
+ assert.eq(newVersion.t, oldVersion.t);
+ assert.gt(newVersion.i, oldVersion.i);
+};
+
+var st = new ShardingTest({shards: 2, mongos: 1});
+
+var mongos = st.s;
+var admin = mongos.getDB("admin");
+var shards = mongos.getCollection("config.shards").find().toArray();
+var chunks = mongos.getCollection("config.chunks");
+var coll = mongos.getCollection("foo.bar");
+
+jsTest.log("Create a sharded collection with a compound shard key.");
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {x: 1, y: 1}}));
+
+// Chunks after splits:
+// (MinKey, { x: 0, y: 1 })
+// ({ x: 0, y: 1 }, { x: 1, y: 0 })
+// ({ x: 1, y: 0 }, { x: 2, y: 0 })
+// ({ x: 2, y: 0 }, { x: 2, y: 1 })
+// ({ x: 2, y: 1 }, MaxKey)
+jsTest.log("Create chunks.");
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 0, y: 1}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 1, y: 0}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 2, y: 0}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 2, y: 1}}));
+
+jsTest.log("Insert some data into each of the chunk ranges.");
+assert.writeOK(coll.insert({x: -1, y: 2}));
+assert.writeOK(coll.insert({x: 0, y: 2}));
+assert.writeOK(coll.insert({x: 1, y: 2}));
+assert.writeOK(coll.insert({x: 2, y: 1}));
+assert.writeOK(coll.insert({x: 2, y: 3}));
+
+// Chunks after merge:
+// (MinKey, { x: 0, y: 1 })
+// ({ x: 0, y: 1 }, { x: 2, y: 0 })
+// ({ x: 2, y: 0 }, { x: 2, y: 1 })
+// ({ x: 2, y: 1 }, MaxKey)
+jsTest.log("Merge chunks whose upper and lower bounds are compound shard keys.");
+checkMergeWorked({x: 0, y: 1}, {x: 2, y: 0});
+
+// Chunks after merge:
+// (MinKey, { x: 2, y: 0 })
+// ({ x: 2, y: 0 }, { x: 2, y: 1 })
+// ({ x: 2, y: 1 }, MaxKey)
+jsTest.log("Merge chunks whose upper bound contains a compound shard key, lower bound is MinKey");
+checkMergeWorked({x: MinKey, y: MinKey}, {x: 2, y: 0});
+
+// Chunks after merge:
+// (MinKey, { x: 2, y: 0 })
+// ({ x: 2, y: 0 }, MaxKey)
+jsTest.log("Merge chunks whose lower bound contains a compound shard key, upper bound is MaxKey");
+checkMergeWorked({x: 2, y: 0}, {x: MaxKey, y: MaxKey});
+
+// Chunks after merge:
+// (MinKey, MaxKey)
+jsTest.log("Merge chunks whos bounds are MinKey/MaxKey, but which have a compound shard key");
+checkMergeWorked({x: MinKey, y: MinKey}, {x: MaxKey, y: MaxKey});
+
+st.stop();
})();
diff --git a/jstests/sharding/merge_chunks_test.js b/jstests/sharding/merge_chunks_test.js
index 5d3bfbbb97b..3166f47113e 100644
--- a/jstests/sharding/merge_chunks_test.js
+++ b/jstests/sharding/merge_chunks_test.js
@@ -2,145 +2,133 @@
// Tests that merging chunks via mongos works/doesn't work with different chunk configurations
//
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 2, mongos: 2});
-
- var mongos = st.s0;
- var staleMongos = st.s1;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard('foo', st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-
- // Create ranges MIN->0,0->10,(hole),20->40,40->50,50->90,(hole),100->110,110->MAX on first
- // shard
- jsTest.log("Creating ranges...");
-
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 10}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 20}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 40}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 50}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 90}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 100}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 110}}));
-
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 10}, to: st.shard1.shardName}));
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 90}, to: st.shard1.shardName}));
-
- st.printShardingStatus();
-
- // Insert some data into each of the consolidated ranges
- assert.writeOK(coll.insert({_id: 0}));
- assert.writeOK(coll.insert({_id: 10}));
- assert.writeOK(coll.insert({_id: 40}));
- assert.writeOK(coll.insert({_id: 110}));
-
- var staleCollection = staleMongos.getCollection(coll + "");
-
- jsTest.log("Trying merges that should fail...");
-
- // S0: min->0, 0->10, 20->40, 40->50, 50->90, 100->110, 110->max
- // S1: 10->20, 90->100
-
- // Make sure merging non-exact chunks is invalid
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 5}]}));
- assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 5}, {_id: 10}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 15}, {_id: 50}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 55}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 105}, {_id: MaxKey}]}));
-
- // Make sure merging single chunks is invalid
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 0}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 40}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 110}, {_id: MaxKey}]}));
-
- // Make sure merging over holes is invalid
- assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 0}, {_id: 40}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 40}, {_id: 110}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 50}, {_id: 110}]}));
-
- // Make sure merging between shards is invalid
- assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 0}, {_id: 20}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 10}, {_id: 40}]}));
- assert.commandFailed(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 40}, {_id: 100}]}));
- assert.eq(4, staleCollection.find().itcount());
-
- jsTest.log("Trying merges that should succeed...");
-
- // Make sure merge including the MinKey works
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 10}]}));
- assert.eq(4, staleCollection.find().itcount());
- // S0: min->10, 20->40, 40->50, 50->90, 100->110, 110->max
- // S1: 10->20, 90->100
-
- // Make sure merging three chunks in the middle works
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 90}]}));
- assert.eq(4, staleCollection.find().itcount());
- // S0: min->10, 20->90, 100->110, 110->max
- // S1: 10->20, 90->100
-
- // Make sure merge including the MaxKey works
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 100}, {_id: MaxKey}]}));
- assert.eq(4, staleCollection.find().itcount());
- // S0: min->10, 20->90, 100->max
- // S1: 10->20, 90->100
-
- // Make sure merging chunks after a chunk has been moved out of a shard succeeds
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 110}, to: st.shard1.shardName}));
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 10}, to: st.shard0.shardName}));
- assert.eq(4, staleCollection.find().itcount());
- // S0: min->10, 10->20, 20->90
- // S1: 90->100, 100->max
-
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 90}, {_id: MaxKey}]}));
- assert.eq(4, staleCollection.find().itcount());
- // S0: min->10, 10->20, 20->90
- // S1: 90->max
-
- // Make sure merge on the other shard after a chunk has been merged succeeds
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 90}]}));
- // S0: min->90
- // S1: 90->max
-
- st.printShardingStatus(true);
-
- assert.eq(2, st.s0.getDB('config').chunks.find({'ns': 'foo.bar'}).itcount());
- assert.eq(
- 1,
- st.s0.getDB('config')
- .chunks
- .find({'ns': 'foo.bar', 'min._id': MinKey, 'max._id': 90, shard: st.shard0.shardName})
- .itcount());
- assert.eq(
- 1,
- st.s0.getDB('config')
- .chunks
- .find({'ns': 'foo.bar', 'min._id': 90, 'max._id': MaxKey, shard: st.shard1.shardName})
- .itcount());
-
- st.stop();
+'use strict';
+
+var st = new ShardingTest({shards: 2, mongos: 2});
+
+var mongos = st.s0;
+var staleMongos = st.s1;
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard('foo', st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+
+// Create ranges MIN->0,0->10,(hole),20->40,40->50,50->90,(hole),100->110,110->MAX on first
+// shard
+jsTest.log("Creating ranges...");
+
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 10}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 20}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 40}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 50}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 90}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 100}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 110}}));
+
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 10}, to: st.shard1.shardName}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 90}, to: st.shard1.shardName}));
+
+st.printShardingStatus();
+
+// Insert some data into each of the consolidated ranges
+assert.writeOK(coll.insert({_id: 0}));
+assert.writeOK(coll.insert({_id: 10}));
+assert.writeOK(coll.insert({_id: 40}));
+assert.writeOK(coll.insert({_id: 110}));
+
+var staleCollection = staleMongos.getCollection(coll + "");
+
+jsTest.log("Trying merges that should fail...");
+
+// S0: min->0, 0->10, 20->40, 40->50, 50->90, 100->110, 110->max
+// S1: 10->20, 90->100
+
+// Make sure merging non-exact chunks is invalid
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 5}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 5}, {_id: 10}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 15}, {_id: 50}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 55}]}));
+assert.commandFailed(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 105}, {_id: MaxKey}]}));
+
+// Make sure merging single chunks is invalid
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 0}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 40}]}));
+assert.commandFailed(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 110}, {_id: MaxKey}]}));
+
+// Make sure merging over holes is invalid
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 0}, {_id: 40}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 40}, {_id: 110}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 50}, {_id: 110}]}));
+
+// Make sure merging between shards is invalid
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 0}, {_id: 20}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 10}, {_id: 40}]}));
+assert.commandFailed(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 40}, {_id: 100}]}));
+assert.eq(4, staleCollection.find().itcount());
+
+jsTest.log("Trying merges that should succeed...");
+
+// Make sure merge including the MinKey works
+assert.commandWorked(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 10}]}));
+assert.eq(4, staleCollection.find().itcount());
+// S0: min->10, 20->40, 40->50, 50->90, 100->110, 110->max
+// S1: 10->20, 90->100
+
+// Make sure merging three chunks in the middle works
+assert.commandWorked(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 90}]}));
+assert.eq(4, staleCollection.find().itcount());
+// S0: min->10, 20->90, 100->110, 110->max
+// S1: 10->20, 90->100
+
+// Make sure merge including the MaxKey works
+assert.commandWorked(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 100}, {_id: MaxKey}]}));
+assert.eq(4, staleCollection.find().itcount());
+// S0: min->10, 20->90, 100->max
+// S1: 10->20, 90->100
+
+// Make sure merging chunks after a chunk has been moved out of a shard succeeds
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 110}, to: st.shard1.shardName}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 10}, to: st.shard0.shardName}));
+assert.eq(4, staleCollection.find().itcount());
+// S0: min->10, 10->20, 20->90
+// S1: 90->100, 100->max
+
+assert.commandWorked(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 90}, {_id: MaxKey}]}));
+assert.eq(4, staleCollection.find().itcount());
+// S0: min->10, 10->20, 20->90
+// S1: 90->max
+
+// Make sure merge on the other shard after a chunk has been merged succeeds
+assert.commandWorked(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 90}]}));
+// S0: min->90
+// S1: 90->max
+
+st.printShardingStatus(true);
+
+assert.eq(2, st.s0.getDB('config').chunks.find({'ns': 'foo.bar'}).itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks
+ .find({'ns': 'foo.bar', 'min._id': MinKey, 'max._id': 90, shard: st.shard0.shardName})
+ .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks
+ .find({'ns': 'foo.bar', 'min._id': 90, 'max._id': MaxKey, shard: st.shard1.shardName})
+ .itcount());
+
+st.stop();
})();
diff --git a/jstests/sharding/merge_chunks_test_with_md_ops.js b/jstests/sharding/merge_chunks_test_with_md_ops.js
index 63b2504521f..9f99cd584c4 100644
--- a/jstests/sharding/merge_chunks_test_with_md_ops.js
+++ b/jstests/sharding/merge_chunks_test_with_md_ops.js
@@ -1,54 +1,53 @@
// Tests that merging chunks does not prevent cluster from doing other metadata ops
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2});
+var st = new ShardingTest({shards: 2});
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- st.printShardingStatus();
+st.printShardingStatus();
- // Split and merge the first chunk repeatedly
- jsTest.log("Splitting and merging repeatedly...");
-
- for (var i = 0; i < 5; i++) {
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
- printjson(mongos.getDB("config").chunks.find().toArray());
- }
-
- // Move the first chunk to the other shard
- jsTest.log("Moving to another shard...");
+// Split and merge the first chunk repeatedly
+jsTest.log("Splitting and merging repeatedly...");
+for (var i = 0; i < 5; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
+ printjson(mongos.getDB("config").chunks.find().toArray());
+}
- // Split and merge the chunk repeatedly
- jsTest.log("Splitting and merging repeatedly (again)...");
+// Move the first chunk to the other shard
+jsTest.log("Moving to another shard...");
- for (var i = 0; i < 5; i++) {
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
- assert.commandWorked(
- admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
- printjson(mongos.getDB("config").chunks.find().toArray());
- }
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
- // Move the chunk back to the original shard
- jsTest.log("Moving to original shard...");
+// Split and merge the chunk repeatedly
+jsTest.log("Splitting and merging repeatedly (again)...");
+for (var i = 0; i < 5; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard0.shardName}));
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
+ printjson(mongos.getDB("config").chunks.find().toArray());
+}
+
+// Move the chunk back to the original shard
+jsTest.log("Moving to original shard...");
- st.printShardingStatus();
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard0.shardName}));
- st.stop();
+st.printShardingStatus();
+st.stop();
})();
diff --git a/jstests/sharding/merge_command_options.js b/jstests/sharding/merge_command_options.js
index e82f71695f0..7d0edc56754 100644
--- a/jstests/sharding/merge_command_options.js
+++ b/jstests/sharding/merge_command_options.js
@@ -1,182 +1,183 @@
// Tests that aggregations with a $merge stage respect the options set on the command.
(function() {
- 'use strict';
-
- load("jstests/libs/profiler.js"); // For profilerHasNumMatchingEntriesOrThrow.
-
- const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
+'use strict';
+
+load("jstests/libs/profiler.js"); // For profilerHasNumMatchingEntriesOrThrow.
+
+const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
+
+const mongosDB = st.s0.getDB("test");
+const source = mongosDB.getCollection("source");
+const target = mongosDB.getCollection("target");
+const primaryDB = st.rs0.getPrimary().getDB("test");
+const nonPrimaryDB = st.rs1.getPrimary().getDB("test");
+const maxTimeMS = 5 * 60 * 1000;
+
+// Enable profiling on the test DB.
+assert.commandWorked(primaryDB.setProfilingLevel(2));
+assert.commandWorked(nonPrimaryDB.setProfilingLevel(2));
+
+// Enable sharding on the test DB and ensure that shard0 is the primary.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Shard the target collection, and set the unique flag to ensure that there's a unique
+// index on the shard key.
+const shardKey = {
+ sk: 1
+};
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: target.getFullName(), key: shardKey, unique: true}));
+assert.commandWorked(mongosDB.adminCommand({split: target.getFullName(), middle: {sk: 1}}));
+assert.commandWorked(
+ mongosDB.adminCommand({moveChunk: target.getFullName(), find: {sk: 1}, to: st.rs1.getURL()}));
+
+assert.commandWorked(source.insert({sk: "dummy"}));
+
+// The shardCollection command will send a listIndexes on the target collection.
+profilerHasNumMatchingEntriesOrThrow({
+ profileDB: primaryDB,
+ filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
+ numExpectedMatches: 1
+});
+
+// Test that the maxTimeMS value is used for both the listIndexes command for uniqueKey
+// validation as well as the $merge aggregation itself.
+(function testMaxTimeMS() {
+ assert.commandWorked(source.runCommand("aggregate", {
+ pipeline: [{
+ $merge: {
+ into: target.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: Object.keys(shardKey)
+ }
+ }],
+ cursor: {},
+ maxTimeMS: maxTimeMS
+ }));
+
+ // Verify the profile entry for the aggregate on the source collection.
+ profilerHasNumMatchingEntriesOrThrow({
+ profileDB: primaryDB,
+ filter: {
+ ns: source.getFullName(),
+ "command.aggregate": source.getName(),
+ "command.maxTimeMS": maxTimeMS
+ },
+ numExpectedMatches: 1
+ });
- const mongosDB = st.s0.getDB("test");
- const source = mongosDB.getCollection("source");
- const target = mongosDB.getCollection("target");
- const primaryDB = st.rs0.getPrimary().getDB("test");
- const nonPrimaryDB = st.rs1.getPrimary().getDB("test");
- const maxTimeMS = 5 * 60 * 1000;
+ // The listIndexes command should be sent to the primary shard only. Note that the
+ // maxTimeMS will *not* show up in the profiler since the parameter is used as a timeout for
+ // the remote command vs. part of the command itself.
+ profilerHasNumMatchingEntriesOrThrow({
+ profileDB: primaryDB,
+ filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
+ numExpectedMatches: 2
+ });
+})();
- // Enable profiling on the test DB.
- assert.commandWorked(primaryDB.setProfilingLevel(2));
- assert.commandWorked(nonPrimaryDB.setProfilingLevel(2));
+(function testTimeout() {
+ // Configure the "maxTimeAlwaysTimeOut" fail point on the primary shard, which forces
+ // mongod to throw if it receives an operation with a max time.
+ assert.commandWorked(primaryDB.getSiblingDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}));
+
+ // Test that the $merge correctly fails when the maxTimeMS is exceeded.
+ const res = source.runCommand("aggregate", {
+ pipeline: [{
+ $merge: {
+ into: target.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: Object.keys(shardKey)
+ }
+ }],
+ cursor: {},
+ maxTimeMS: maxTimeMS
+ });
+ assert.commandFailedWithCode(
+ res,
+ ErrorCodes.MaxTimeMSExpired,
+ "expected aggregate to fail with code " + ErrorCodes.MaxTimeMSExpired +
+ " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
+
+ // The actual aggregate should not be in the profiler since the initial listIndexes should
+ // have timed out.
+ profilerHasNumMatchingEntriesOrThrow({
+ profileDB: primaryDB,
+ filter: {
+ ns: source.getFullName(),
+ "command.aggregate": source.getName(),
+ "command.maxTimeMS": maxTimeMS
+ },
+ numExpectedMatches: 1
+ });
- // Enable sharding on the test DB and ensure that shard0 is the primary.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+ // Verify that there is an additional listIndexes profiler entry on the primary shard.
+ profilerHasNumMatchingEntriesOrThrow({
+ profileDB: primaryDB,
+ filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
+ numExpectedMatches: 3
+ });
- // Shard the target collection, and set the unique flag to ensure that there's a unique
- // index on the shard key.
- const shardKey = {sk: 1};
- assert.commandWorked(mongosDB.adminCommand(
- {shardCollection: target.getFullName(), key: shardKey, unique: true}));
- assert.commandWorked(mongosDB.adminCommand({split: target.getFullName(), middle: {sk: 1}}));
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: target.getFullName(), find: {sk: 1}, to: st.rs1.getURL()}));
+ assert.commandWorked(primaryDB.getSiblingDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}));
+})();
- assert.commandWorked(source.insert({sk: "dummy"}));
+// Test that setting a read preference on the $merge also applies to the listIndexes
+// command.
+(function testReadPreference() {
+ const secondaryDB = st.rs0.getSecondary().getDB("test");
+ assert.commandWorked(secondaryDB.setProfilingLevel(2));
+
+ assert.commandWorked(source.runCommand("aggregate", {
+ pipeline: [{
+ $merge: {
+ into: target.getName(),
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: Object.keys(shardKey)
+ }
+ }],
+ cursor: {},
+ $readPreference: {mode: "secondary"}
+ }));
+
+ // Verify that the profiler on the secondary includes an entry for the listIndexes.
+ profilerHasNumMatchingEntriesOrThrow({
+ profileDB: secondaryDB,
+ filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
+ numExpectedMatches: 1
+ });
- // The shardCollection command will send a listIndexes on the target collection.
+ // Verify that the primary shard does *not* have an additional listIndexes profiler entry.
profilerHasNumMatchingEntriesOrThrow({
profileDB: primaryDB,
filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
+ numExpectedMatches: 3
+ });
+
+ profilerHasNumMatchingEntriesOrThrow({
+ profileDB: secondaryDB,
+ filter: {
+ ns: source.getFullName(),
+ "command.aggregate": source.getName(),
+ "command.$readPreference": {mode: "secondary"},
+ },
numExpectedMatches: 1
});
- // Test that the maxTimeMS value is used for both the listIndexes command for uniqueKey
- // validation as well as the $merge aggregation itself.
- (function testMaxTimeMS() {
- assert.commandWorked(source.runCommand("aggregate", {
- pipeline: [{
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: Object.keys(shardKey)
- }
- }],
- cursor: {},
- maxTimeMS: maxTimeMS
- }));
-
- // Verify the profile entry for the aggregate on the source collection.
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: primaryDB,
- filter: {
- ns: source.getFullName(),
- "command.aggregate": source.getName(),
- "command.maxTimeMS": maxTimeMS
- },
- numExpectedMatches: 1
- });
-
- // The listIndexes command should be sent to the primary shard only. Note that the
- // maxTimeMS will *not* show up in the profiler since the parameter is used as a timeout for
- // the remote command vs. part of the command itself.
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: primaryDB,
- filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
- numExpectedMatches: 2
- });
- })();
-
- (function testTimeout() {
- // Configure the "maxTimeAlwaysTimeOut" fail point on the primary shard, which forces
- // mongod to throw if it receives an operation with a max time.
- assert.commandWorked(primaryDB.getSiblingDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}));
-
- // Test that the $merge correctly fails when the maxTimeMS is exceeded.
- const res = source.runCommand("aggregate", {
- pipeline: [{
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: Object.keys(shardKey)
- }
- }],
- cursor: {},
- maxTimeMS: maxTimeMS
- });
- assert.commandFailedWithCode(
- res,
- ErrorCodes.MaxTimeMSExpired,
- "expected aggregate to fail with code " + ErrorCodes.MaxTimeMSExpired +
- " due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
-
- // The actual aggregate should not be in the profiler since the initial listIndexes should
- // have timed out.
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: primaryDB,
- filter: {
- ns: source.getFullName(),
- "command.aggregate": source.getName(),
- "command.maxTimeMS": maxTimeMS
- },
- numExpectedMatches: 1
- });
-
- // Verify that there is an additional listIndexes profiler entry on the primary shard.
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: primaryDB,
- filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
- numExpectedMatches: 3
- });
-
- assert.commandWorked(primaryDB.getSiblingDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}));
- })();
-
- // Test that setting a read preference on the $merge also applies to the listIndexes
- // command.
- (function testReadPreference() {
- const secondaryDB = st.rs0.getSecondary().getDB("test");
- assert.commandWorked(secondaryDB.setProfilingLevel(2));
-
- assert.commandWorked(source.runCommand("aggregate", {
- pipeline: [{
- $merge: {
- into: target.getName(),
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: Object.keys(shardKey)
- }
- }],
- cursor: {},
- $readPreference: {mode: "secondary"}
- }));
-
- // Verify that the profiler on the secondary includes an entry for the listIndexes.
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: secondaryDB,
- filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
- numExpectedMatches: 1
- });
-
- // Verify that the primary shard does *not* have an additional listIndexes profiler entry.
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: primaryDB,
- filter: {ns: target.getFullName(), "command.listIndexes": target.getName()},
- numExpectedMatches: 3
- });
-
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: secondaryDB,
- filter: {
- ns: source.getFullName(),
- "command.aggregate": source.getName(),
- "command.$readPreference": {mode: "secondary"},
- },
- numExpectedMatches: 1
- });
-
- // Test that $out cannot be run against a secondary since it writes directly to a local temp
- // collection.
- assert.commandFailedWithCode(source.runCommand("aggregate", {
- pipeline: [{$out: "non_existent"}],
- cursor: {},
- $readPreference: {mode: "secondary"}
- }),
- 16994,
- "Expected $out to fail to create the temp collection.");
- })();
-
- st.stop();
+ // Test that $out cannot be run against a secondary since it writes directly to a local temp
+ // collection.
+ assert.commandFailedWithCode(
+ source.runCommand(
+ "aggregate",
+ {pipeline: [{$out: "non_existent"}], cursor: {}, $readPreference: {mode: "secondary"}}),
+ 16994,
+ "Expected $out to fail to create the temp collection.");
+})();
+
+st.stop();
})();
diff --git a/jstests/sharding/merge_does_not_force_pipeline_split.js b/jstests/sharding/merge_does_not_force_pipeline_split.js
index 383452469aa..ce919537951 100644
--- a/jstests/sharding/merge_does_not_force_pipeline_split.js
+++ b/jstests/sharding/merge_does_not_force_pipeline_split.js
@@ -1,108 +1,108 @@
// Tests that a $merge stage does not force a pipeline to split into a "shards part" and a "merging
// part" if no other stage in the pipeline would force such a split.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
- const mongosDB = st.s.getDB("test_db");
+const mongosDB = st.s.getDB("test_db");
- const inColl = mongosDB["inColl"];
- // Two different output collections will be sharded by different keys.
- const outCollById = mongosDB["outCollById"];
- const outCollBySK = mongosDB["outCollBySK"];
- st.shardColl(outCollById, {_id: 1}, {_id: 500}, {_id: 500}, mongosDB.getName());
- st.shardColl(outCollBySK, {sk: 1}, {sk: 500}, {sk: 500}, mongosDB.getName());
- const numDocs = 1000;
+const inColl = mongosDB["inColl"];
+// Two different output collections will be sharded by different keys.
+const outCollById = mongosDB["outCollById"];
+const outCollBySK = mongosDB["outCollBySK"];
+st.shardColl(outCollById, {_id: 1}, {_id: 500}, {_id: 500}, mongosDB.getName());
+st.shardColl(outCollBySK, {sk: 1}, {sk: 500}, {sk: 500}, mongosDB.getName());
+const numDocs = 1000;
- function insertData(coll) {
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, sk: numDocs - i});
- }
- assert.commandWorked(bulk.execute());
+function insertData(coll) {
+ const bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, sk: numDocs - i});
}
+ assert.commandWorked(bulk.execute());
+}
- // Shard the input collection.
- st.shardColl(inColl, {_id: 1}, {_id: 500}, {_id: 500}, mongosDB.getName());
+// Shard the input collection.
+st.shardColl(inColl, {_id: 1}, {_id: 500}, {_id: 500}, mongosDB.getName());
- // Insert some data to the input collection.
- insertData(inColl);
+// Insert some data to the input collection.
+insertData(inColl);
- function assertMergeRunsOnShards(explain) {
- assert(explain.hasOwnProperty("splitPipeline"), tojson(explain));
- assert(explain.splitPipeline.hasOwnProperty("shardsPart"), tojson(explain));
- assert.eq(
- explain.splitPipeline.shardsPart.filter(stage => stage.hasOwnProperty("$merge")).length,
- 1,
- tojson(explain));
- assert(explain.splitPipeline.hasOwnProperty("mergerPart"), tojson(explain));
- assert.eq([], explain.splitPipeline.mergerPart, tojson(explain));
- }
+function assertMergeRunsOnShards(explain) {
+ assert(explain.hasOwnProperty("splitPipeline"), tojson(explain));
+ assert(explain.splitPipeline.hasOwnProperty("shardsPart"), tojson(explain));
+ assert.eq(
+ explain.splitPipeline.shardsPart.filter(stage => stage.hasOwnProperty("$merge")).length,
+ 1,
+ tojson(explain));
+ assert(explain.splitPipeline.hasOwnProperty("mergerPart"), tojson(explain));
+ assert.eq([], explain.splitPipeline.mergerPart, tojson(explain));
+}
- // Test that a simple $merge can run in parallel. Note that we still expect a 'splitPipeline' in
- // the explain output, but the merging half should be empty to indicate that the entire thing is
- // executing in parallel on the shards.
+// Test that a simple $merge can run in parallel. Note that we still expect a 'splitPipeline' in
+// the explain output, but the merging half should be empty to indicate that the entire thing is
+// executing in parallel on the shards.
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- assert.commandWorked(outCollById.remove({}));
- assert.commandWorked(outCollBySK.remove({}));
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ assert.commandWorked(outCollById.remove({}));
+ assert.commandWorked(outCollBySK.remove({}));
- let explain = inColl.explain().aggregate([{
- $merge: {
- into: outCollById.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]);
- assertMergeRunsOnShards(explain);
- assert.eq(outCollById.find().itcount(), 0);
- // We expect the test to succeed for all $merge modes. However, the 'whenNotMatched: fail'
- // mode will cause the test to fail if the source collection has a document without a match
- // in the target collection. Similarly 'whenNotMatched: discard' will fail the assertion
- // below for the expected number of document in target collection. So we populate the target
- // collection with the same documents as in the source.
- if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
- insertData(outCollById);
+ let explain = inColl.explain().aggregate([{
+ $merge: {
+ into: outCollById.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
}
+ }]);
+ assertMergeRunsOnShards(explain);
+ assert.eq(outCollById.find().itcount(), 0);
+ // We expect the test to succeed for all $merge modes. However, the 'whenNotMatched: fail'
+ // mode will cause the test to fail if the source collection has a document without a match
+ // in the target collection. Similarly 'whenNotMatched: discard' will fail the assertion
+ // below for the expected number of document in target collection. So we populate the target
+ // collection with the same documents as in the source.
+ if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
+ insertData(outCollById);
+ }
- // Actually execute the pipeline and make sure it works as expected.
- assert.doesNotThrow(() => inColl.aggregate([{
- $merge: {
- into: outCollById.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]));
- assert.eq(outCollById.find().itcount(), numDocs);
+ // Actually execute the pipeline and make sure it works as expected.
+ assert.doesNotThrow(() => inColl.aggregate([{
+ $merge: {
+ into: outCollById.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]));
+ assert.eq(outCollById.find().itcount(), numDocs);
- // Test the same thing but in a pipeline where the output collection's shard key differs
- // from the input collection's.
- explain = inColl.explain().aggregate([{
- $merge: {
- into: outCollBySK.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]);
- assertMergeRunsOnShards(explain);
- // Again, test that execution works as expected.
- assert.eq(outCollBySK.find().itcount(), 0);
+ // Test the same thing but in a pipeline where the output collection's shard key differs
+ // from the input collection's.
+ explain = inColl.explain().aggregate([{
+ $merge: {
+ into: outCollBySK.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]);
+ assertMergeRunsOnShards(explain);
+ // Again, test that execution works as expected.
+ assert.eq(outCollBySK.find().itcount(), 0);
- if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
- insertData(outCollBySK);
+ if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
+ insertData(outCollBySK);
+ }
+ assert.doesNotThrow(() => inColl.aggregate([{
+ $merge: {
+ into: outCollBySK.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
}
- assert.doesNotThrow(() => inColl.aggregate([{
- $merge: {
- into: outCollBySK.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]));
- assert.eq(outCollBySK.find().itcount(), numDocs);
- });
+ }]));
+ assert.eq(outCollBySK.find().itcount(), numDocs);
+});
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/merge_from_stale_mongos.js b/jstests/sharding/merge_from_stale_mongos.js
index d91d92dcb62..e7b7e42d548 100644
--- a/jstests/sharding/merge_from_stale_mongos.js
+++ b/jstests/sharding/merge_from_stale_mongos.js
@@ -1,247 +1,245 @@
// Tests for $merge against a stale mongos with combinations of sharded/unsharded source and target
// collections.
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
-
- const st = new ShardingTest({
- shards: 2,
- mongos: 4,
- });
-
- const freshMongos = st.s0.getDB(jsTestName());
- const staleMongosSource = st.s1.getDB(jsTestName());
- const staleMongosTarget = st.s2.getDB(jsTestName());
- const staleMongosBoth = st.s3.getDB(jsTestName());
-
- const sourceColl = freshMongos.getCollection("source");
- const targetColl = freshMongos.getCollection("target");
-
- // Enable sharding on the test DB and ensure its primary is shard 0.
+"use strict";
+
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 4,
+});
+
+const freshMongos = st.s0.getDB(jsTestName());
+const staleMongosSource = st.s1.getDB(jsTestName());
+const staleMongosTarget = st.s2.getDB(jsTestName());
+const staleMongosBoth = st.s3.getDB(jsTestName());
+
+const sourceColl = freshMongos.getCollection("source");
+const targetColl = freshMongos.getCollection("target");
+
+// Enable sharding on the test DB and ensure its primary is shard 0.
+assert.commandWorked(staleMongosSource.adminCommand({enableSharding: staleMongosSource.getName()}));
+st.ensurePrimaryShard(staleMongosSource.getName(), st.rs0.getURL());
+
+// Shards the collection 'coll' through 'mongos'.
+function shardCollWithMongos(mongos, coll) {
+ coll.drop();
+ // Shard the given collection on _id, split the collection into 2 chunks: [MinKey, 0) and
+ // [0, MaxKey), then move the [0, MaxKey) chunk to shard 1.
+ assert.commandWorked(mongos.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+ assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
assert.commandWorked(
- staleMongosSource.adminCommand({enableSharding: staleMongosSource.getName()}));
- st.ensurePrimaryShard(staleMongosSource.getName(), st.rs0.getURL());
-
- // Shards the collection 'coll' through 'mongos'.
- function shardCollWithMongos(mongos, coll) {
- coll.drop();
- // Shard the given collection on _id, split the collection into 2 chunks: [MinKey, 0) and
- // [0, MaxKey), then move the [0, MaxKey) chunk to shard 1.
- assert.commandWorked(
- mongos.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
- assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(mongos.adminCommand(
- {moveChunk: coll.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
- }
-
- // Configures the two mongos, staleMongosSource and staleMongosTarget, to be stale on the source
- // and target collections, respectively. For instance, if 'shardedSource' is true then
- // staleMongosSource will believe that the source collection is unsharded.
- function setupStaleMongos({shardedSource, shardedTarget}) {
- // Initialize both mongos to believe the collections are unsharded.
+ mongos.adminCommand({moveChunk: coll.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+}
+
+// Configures the two mongos, staleMongosSource and staleMongosTarget, to be stale on the source
+// and target collections, respectively. For instance, if 'shardedSource' is true then
+// staleMongosSource will believe that the source collection is unsharded.
+function setupStaleMongos({shardedSource, shardedTarget}) {
+ // Initialize both mongos to believe the collections are unsharded.
+ sourceColl.drop();
+ targetColl.drop();
+ assert.commandWorked(
+ staleMongosSource[sourceColl.getName()].insert({_id: "insert when unsharded (source)"}));
+ assert.commandWorked(
+ staleMongosSource[targetColl.getName()].insert({_id: "insert when unsharded (source)"}));
+ assert.commandWorked(
+ staleMongosTarget[sourceColl.getName()].insert({_id: "insert when unsharded (target)"}));
+ assert.commandWorked(
+ staleMongosTarget[targetColl.getName()].insert({_id: "insert when unsharded (target)"}));
+
+ if (shardedSource) {
+ // Shard the source collection through the staleMongosTarget mongos, keeping the
+ // staleMongosSource unaware.
+ shardCollWithMongos(staleMongosTarget, sourceColl);
+ } else {
+ // Shard the collection through staleMongosSource.
+ shardCollWithMongos(staleMongosSource, sourceColl);
+
+ // Then drop the collection, but do not recreate it yet as that will happen on the next
+ // insert later in the test.
sourceColl.drop();
- targetColl.drop();
- assert.commandWorked(staleMongosSource[sourceColl.getName()].insert(
- {_id: "insert when unsharded (source)"}));
- assert.commandWorked(staleMongosSource[targetColl.getName()].insert(
- {_id: "insert when unsharded (source)"}));
- assert.commandWorked(staleMongosTarget[sourceColl.getName()].insert(
- {_id: "insert when unsharded (target)"}));
- assert.commandWorked(staleMongosTarget[targetColl.getName()].insert(
- {_id: "insert when unsharded (target)"}));
-
- if (shardedSource) {
- // Shard the source collection through the staleMongosTarget mongos, keeping the
- // staleMongosSource unaware.
- shardCollWithMongos(staleMongosTarget, sourceColl);
- } else {
- // Shard the collection through staleMongosSource.
- shardCollWithMongos(staleMongosSource, sourceColl);
-
- // Then drop the collection, but do not recreate it yet as that will happen on the next
- // insert later in the test.
- sourceColl.drop();
- }
-
- if (shardedTarget) {
- // Shard the target collection through the staleMongosSource mongos, keeping the
- // staleMongosTarget unaware.
- shardCollWithMongos(staleMongosSource, targetColl);
- } else {
- // Shard the collection through staleMongosTarget.
- shardCollWithMongos(staleMongosTarget, targetColl);
-
- // Then drop the collection, but do not recreate it yet as that will happen on the next
- // insert later in the test.
- targetColl.drop();
- }
}
- // Runs a $merge with the given modes against each mongos in 'mongosList'. This method will wrap
- // 'mongosList' into a list if it is not an array.
- function runMergeTest(whenMatchedMode, whenNotMatchedMode, mongosList) {
- if (!(mongosList instanceof Array)) {
- mongosList = [mongosList];
- }
-
- mongosList.forEach(mongos => {
- targetColl.remove({});
- sourceColl.remove({});
- // Insert several documents into the source and target collection without any conflicts.
- // Note that the chunk split point is at {_id: 0}.
- assert.commandWorked(sourceColl.insert([{_id: -1}, {_id: 0}, {_id: 1}]));
- assert.commandWorked(targetColl.insert([{_id: -2}, {_id: 2}, {_id: 3}]));
-
- mongos[sourceColl.getName()].aggregate([{
- $merge: {
- into: targetColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]);
-
- // If whenNotMatchedMode is "discard", then the documents in the source collection will
- // not get written to the target since none of them match.
- assert.eq(whenNotMatchedMode == "discard" ? 3 : 6, targetColl.find().itcount());
- });
- }
+ if (shardedTarget) {
+ // Shard the target collection through the staleMongosSource mongos, keeping the
+ // staleMongosTarget unaware.
+ shardCollWithMongos(staleMongosSource, targetColl);
+ } else {
+ // Shard the collection through staleMongosTarget.
+ shardCollWithMongos(staleMongosTarget, targetColl);
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- // Skip the combination of merge modes which will fail depending on the contents of the
- // source and target collection, as this will cause the assertion below to trip.
- if (whenNotMatchedMode == "fail")
- return;
-
- // For each mode, test the following scenarios:
- // * Both the source and target collections are sharded.
- // * Both the source and target collections are unsharded.
- // * Source collection is sharded and the target collection is unsharded.
- // * Source collection is unsharded and the target collection is sharded.
- setupStaleMongos({shardedSource: false, shardedTarget: false});
- runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
-
- setupStaleMongos({shardedSource: true, shardedTarget: true});
- runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
-
- setupStaleMongos({shardedSource: true, shardedTarget: false});
- runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
-
- setupStaleMongos({shardedSource: false, shardedTarget: true});
- runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
-
- //
- // The remaining tests run against a mongos which is stale with respect to BOTH the source
- // and target collections.
- //
- const sourceCollStale = staleMongosBoth.getCollection(sourceColl.getName());
- const targetCollStale = staleMongosBoth.getCollection(targetColl.getName());
-
- //
- // 1. Both source and target collections are sharded.
- //
- sourceCollStale.drop();
- targetCollStale.drop();
-
- // Insert into both collections through the stale mongos such that it believes the
- // collections exist and are unsharded.
- assert.commandWorked(sourceCollStale.insert({_id: 0}));
- assert.commandWorked(targetCollStale.insert({_id: 0}));
-
- shardCollWithMongos(freshMongos, sourceColl);
- shardCollWithMongos(freshMongos, targetColl);
-
- // Test against the stale mongos, which believes both collections are unsharded.
- runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
-
- //
- // 2. Both source and target collections are unsharded.
- //
- sourceColl.drop();
+ // Then drop the collection, but do not recreate it yet as that will happen on the next
+ // insert later in the test.
targetColl.drop();
+ }
+}
- // The collections were both dropped through a different mongos, so the stale mongos still
- // believes that they're sharded.
- runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
-
- //
- // 3. Source collection is sharded and target collection is unsharded.
- //
- sourceCollStale.drop();
-
- // Insert into the source collection through the stale mongos such that it believes the
- // collection exists and is unsharded.
- assert.commandWorked(sourceCollStale.insert({_id: 0}));
-
- // Shard the source collection through the fresh mongos.
- shardCollWithMongos(freshMongos, sourceColl);
-
- // Shard the target through the stale mongos, but then drop and recreate it as unsharded
- // through a different mongos.
- shardCollWithMongos(staleMongosBoth, targetColl);
- targetColl.drop();
+// Runs a $merge with the given modes against each mongos in 'mongosList'. This method will wrap
+// 'mongosList' into a list if it is not an array.
+function runMergeTest(whenMatchedMode, whenNotMatchedMode, mongosList) {
+ if (!(mongosList instanceof Array)) {
+ mongosList = [mongosList];
+ }
- // At this point, the stale mongos believes the source collection is unsharded and the
- // target collection is sharded when in fact the reverse is true.
- runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
+ mongosList.forEach(mongos => {
+ targetColl.remove({});
+ sourceColl.remove({});
+ // Insert several documents into the source and target collection without any conflicts.
+ // Note that the chunk split point is at {_id: 0}.
+ assert.commandWorked(sourceColl.insert([{_id: -1}, {_id: 0}, {_id: 1}]));
+ assert.commandWorked(targetColl.insert([{_id: -2}, {_id: 2}, {_id: 3}]));
+
+ mongos[sourceColl.getName()].aggregate([{
+ $merge: {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]);
+
+ // If whenNotMatchedMode is "discard", then the documents in the source collection will
+ // not get written to the target since none of them match.
+ assert.eq(whenNotMatchedMode == "discard" ? 3 : 6, targetColl.find().itcount());
+ });
+}
+
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ // Skip the combination of merge modes which will fail depending on the contents of the
+ // source and target collection, as this will cause the assertion below to trip.
+ if (whenNotMatchedMode == "fail")
+ return;
+
+ // For each mode, test the following scenarios:
+ // * Both the source and target collections are sharded.
+ // * Both the source and target collections are unsharded.
+ // * Source collection is sharded and the target collection is unsharded.
+ // * Source collection is unsharded and the target collection is sharded.
+ setupStaleMongos({shardedSource: false, shardedTarget: false});
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
- //
- // 4. Source collection is unsharded and target collection is sharded.
- //
- sourceCollStale.drop();
- targetCollStale.drop();
+ setupStaleMongos({shardedSource: true, shardedTarget: true});
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
- // Insert into the target collection through the stale mongos such that it believes the
- // collection exists and is unsharded.
- assert.commandWorked(targetCollStale.insert({_id: 0}));
+ setupStaleMongos({shardedSource: true, shardedTarget: false});
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
- shardCollWithMongos(freshMongos, targetColl);
+ setupStaleMongos({shardedSource: false, shardedTarget: true});
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, [staleMongosSource, staleMongosTarget]);
+
+ //
+ // The remaining tests run against a mongos which is stale with respect to BOTH the source
+ // and target collections.
+ //
+ const sourceCollStale = staleMongosBoth.getCollection(sourceColl.getName());
+ const targetCollStale = staleMongosBoth.getCollection(targetColl.getName());
+
+ //
+ // 1. Both source and target collections are sharded.
+ //
+ sourceCollStale.drop();
+ targetCollStale.drop();
+
+ // Insert into both collections through the stale mongos such that it believes the
+ // collections exist and are unsharded.
+ assert.commandWorked(sourceCollStale.insert({_id: 0}));
+ assert.commandWorked(targetCollStale.insert({_id: 0}));
+
+ shardCollWithMongos(freshMongos, sourceColl);
+ shardCollWithMongos(freshMongos, targetColl);
+
+ // Test against the stale mongos, which believes both collections are unsharded.
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
+
+ //
+ // 2. Both source and target collections are unsharded.
+ //
+ sourceColl.drop();
+ targetColl.drop();
+
+ // The collections were both dropped through a different mongos, so the stale mongos still
+ // believes that they're sharded.
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
+
+ //
+ // 3. Source collection is sharded and target collection is unsharded.
+ //
+ sourceCollStale.drop();
+
+ // Insert into the source collection through the stale mongos such that it believes the
+ // collection exists and is unsharded.
+ assert.commandWorked(sourceCollStale.insert({_id: 0}));
+
+ // Shard the source collection through the fresh mongos.
+ shardCollWithMongos(freshMongos, sourceColl);
+
+ // Shard the target through the stale mongos, but then drop and recreate it as unsharded
+ // through a different mongos.
+ shardCollWithMongos(staleMongosBoth, targetColl);
+ targetColl.drop();
+
+ // At this point, the stale mongos believes the source collection is unsharded and the
+ // target collection is sharded when in fact the reverse is true.
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
+
+ //
+ // 4. Source collection is unsharded and target collection is sharded.
+ //
+ sourceCollStale.drop();
+ targetCollStale.drop();
+
+ // Insert into the target collection through the stale mongos such that it believes the
+ // collection exists and is unsharded.
+ assert.commandWorked(targetCollStale.insert({_id: 0}));
+
+ shardCollWithMongos(freshMongos, targetColl);
+
+ // Shard the source through the stale mongos, but then drop and recreate it as unsharded
+ // through a different mongos.
+ shardCollWithMongos(staleMongosBoth, sourceColl);
+ sourceColl.drop();
+
+ // At this point, the stale mongos believes the source collection is sharded and the target
+ // collection is unsharded when in fact the reverse is true.
+ runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
+});
+
+// Runs a legacy $out against each mongos in 'mongosList'. This method will wrap 'mongosList'
+// into a list if it is not an array.
+function runOutTest(mongosList) {
+ if (!(mongosList instanceof Array)) {
+ mongosList = [mongosList];
+ }
- // Shard the source through the stale mongos, but then drop and recreate it as unsharded
- // through a different mongos.
- shardCollWithMongos(staleMongosBoth, sourceColl);
- sourceColl.drop();
+ mongosList.forEach(mongos => {
+ targetColl.remove({});
+ sourceColl.remove({});
+ // Insert several documents into the source and target collection without any conflicts.
+ // Note that the chunk split point is at {_id: 0}.
+ assert.commandWorked(sourceColl.insert([{_id: -1}, {_id: 0}, {_id: 1}]));
+ assert.commandWorked(targetColl.insert([{_id: -2}, {_id: 2}, {_id: 3}]));
- // At this point, the stale mongos believes the source collection is sharded and the target
- // collection is unsharded when in fact the reverse is true.
- runMergeTest(whenMatchedMode, whenNotMatchedMode, staleMongosBoth);
+ mongos[sourceColl.getName()].aggregate([{$out: targetColl.getName()}]);
+ assert.eq(3, targetColl.find().itcount());
});
+}
- // Runs a legacy $out against each mongos in 'mongosList'. This method will wrap 'mongosList'
- // into a list if it is not an array.
- function runOutTest(mongosList) {
- if (!(mongosList instanceof Array)) {
- mongosList = [mongosList];
- }
-
- mongosList.forEach(mongos => {
- targetColl.remove({});
- sourceColl.remove({});
- // Insert several documents into the source and target collection without any conflicts.
- // Note that the chunk split point is at {_id: 0}.
- assert.commandWorked(sourceColl.insert([{_id: -1}, {_id: 0}, {_id: 1}]));
- assert.commandWorked(targetColl.insert([{_id: -2}, {_id: 2}, {_id: 3}]));
-
- mongos[sourceColl.getName()].aggregate([{$out: targetColl.getName()}]);
- assert.eq(3, targetColl.find().itcount());
- });
- }
-
- // Legacy $out will fail if the target collection is sharded.
- setupStaleMongos({shardedSource: false, shardedTarget: false});
- runOutTest([staleMongosSource, staleMongosTarget]);
+// Legacy $out will fail if the target collection is sharded.
+setupStaleMongos({shardedSource: false, shardedTarget: false});
+runOutTest([staleMongosSource, staleMongosTarget]);
- setupStaleMongos({shardedSource: true, shardedTarget: true});
- assert.eq(assert.throws(() => runOutTest(staleMongosSource)).code, 28769);
- assert.eq(assert.throws(() => runOutTest(staleMongosTarget)).code, 17017);
+setupStaleMongos({shardedSource: true, shardedTarget: true});
+assert.eq(assert.throws(() => runOutTest(staleMongosSource)).code, 28769);
+assert.eq(assert.throws(() => runOutTest(staleMongosTarget)).code, 17017);
- setupStaleMongos({shardedSource: true, shardedTarget: false});
- runOutTest([staleMongosSource, staleMongosTarget]);
+setupStaleMongos({shardedSource: true, shardedTarget: false});
+runOutTest([staleMongosSource, staleMongosTarget]);
- setupStaleMongos({shardedSource: false, shardedTarget: true});
- assert.eq(assert.throws(() => runOutTest(staleMongosSource)).code, 28769);
- assert.eq(assert.throws(() => runOutTest(staleMongosTarget)).code, 17017);
+setupStaleMongos({shardedSource: false, shardedTarget: true});
+assert.eq(assert.throws(() => runOutTest(staleMongosSource)).code, 28769);
+assert.eq(assert.throws(() => runOutTest(staleMongosTarget)).code, 17017);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/merge_hashed_shard_key.js b/jstests/sharding/merge_hashed_shard_key.js
index bd9e1e11475..86661c9c1b0 100644
--- a/jstests/sharding/merge_hashed_shard_key.js
+++ b/jstests/sharding/merge_hashed_shard_key.js
@@ -2,89 +2,88 @@
// when the "on" field is not explicitly specified and also when there is a unique, non-hashed index
// that matches the "on" field(s).
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode,
- // assertMergeFailsWithoutUniqueIndex,
- // assertMergeSucceedsWithExpectedUniqueIndex.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode,
+ // assertMergeFailsWithoutUniqueIndex,
+// assertMergeSucceedsWithExpectedUniqueIndex.
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
- const mongosDB = st.s0.getDB("merge_hashed_shard_key");
- const foreignDB = st.s0.getDB("merge_hashed_shard_key_foreign");
- const source = mongosDB.source;
- const target = mongosDB.target;
- source.drop();
- target.drop();
+const mongosDB = st.s0.getDB("merge_hashed_shard_key");
+const foreignDB = st.s0.getDB("merge_hashed_shard_key_foreign");
+const source = mongosDB.source;
+const target = mongosDB.target;
+source.drop();
+target.drop();
- assert.commandWorked(source.insert({placeholderDoc: 1}));
+assert.commandWorked(source.insert({placeholderDoc: 1}));
- function testHashedShardKey(shardKey, spec, prefixPipeline = []) {
- target.drop();
- st.shardColl(target, shardKey, spec);
+function testHashedShardKey(shardKey, spec, prefixPipeline = []) {
+ target.drop();
+ st.shardColl(target, shardKey, spec);
- // Test that $merge passes without specifying an "on" field.
- assertMergeSucceedsWithExpectedUniqueIndex(
- {source: source, target: target, prevStages: prefixPipeline});
+ // Test that $merge passes without specifying an "on" field.
+ assertMergeSucceedsWithExpectedUniqueIndex(
+ {source: source, target: target, prevStages: prefixPipeline});
- // Test that $merge fails even if the "on" fields matches the shardKey, since it isn't
- // unique.
- assertMergeFailsWithoutUniqueIndex({
- source: source,
- target: target,
- onFields: Object.keys(shardKey),
- prevStages: prefixPipeline
- });
+ // Test that $merge fails even if the "on" fields matches the shardKey, since it isn't
+ // unique.
+ assertMergeFailsWithoutUniqueIndex({
+ source: source,
+ target: target,
+ onFields: Object.keys(shardKey),
+ prevStages: prefixPipeline
+ });
- // Test that the $merge passes if there exists a unique index prefixed on the hashed shard
- // key.
- const prefixedUniqueKey = Object.merge(shardKey, {extraField: 1});
- prefixPipeline = prefixPipeline.concat([{$addFields: {extraField: 1}}]);
- assert.commandWorked(target.createIndex(prefixedUniqueKey, {unique: true}));
- assertMergeSucceedsWithExpectedUniqueIndex(
- {source: source, target: target, prevStages: prefixPipeline});
- assertMergeSucceedsWithExpectedUniqueIndex({
- source: source,
- target: target,
- onFields: Object.keys(prefixedUniqueKey),
- prevStages: prefixPipeline
- });
- }
+ // Test that the $merge passes if there exists a unique index prefixed on the hashed shard
+ // key.
+ const prefixedUniqueKey = Object.merge(shardKey, {extraField: 1});
+ prefixPipeline = prefixPipeline.concat([{$addFields: {extraField: 1}}]);
+ assert.commandWorked(target.createIndex(prefixedUniqueKey, {unique: true}));
+ assertMergeSucceedsWithExpectedUniqueIndex(
+ {source: source, target: target, prevStages: prefixPipeline});
+ assertMergeSucceedsWithExpectedUniqueIndex({
+ source: source,
+ target: target,
+ onFields: Object.keys(prefixedUniqueKey),
+ prevStages: prefixPipeline
+ });
+}
- //
- // Tests for a hashed non-id shard key.
- //
- let prevStage = [{$addFields: {hashedKey: 1}}];
- testHashedShardKey({hashedKey: 1}, {hashedKey: "hashed"}, prevStage);
+//
+// Tests for a hashed non-id shard key.
+//
+let prevStage = [{$addFields: {hashedKey: 1}}];
+testHashedShardKey({hashedKey: 1}, {hashedKey: "hashed"}, prevStage);
- //
- // Tests for a hashed non-id dotted path shard key.
- //
- prevStage = [{$addFields: {dotted: {path: 1}}}];
- testHashedShardKey({"dotted.path": 1}, {"dotted.path": "hashed"}, prevStage);
+//
+// Tests for a hashed non-id dotted path shard key.
+//
+prevStage = [{$addFields: {dotted: {path: 1}}}];
+testHashedShardKey({"dotted.path": 1}, {"dotted.path": "hashed"}, prevStage);
- //
- // Tests for a compound hashed shard key.
- //
- prevStage = [{$addFields: {hashedKey: {subField: 1}, nonHashedKey: 1}}];
- testHashedShardKey({"hashedKey.subField": 1, nonHashedKey: 1},
- {"hashedKey.subField": "hashed", nonHashedKey: 1},
- prevStage);
+//
+// Tests for a compound hashed shard key.
+//
+prevStage = [{$addFields: {hashedKey: {subField: 1}, nonHashedKey: 1}}];
+testHashedShardKey({"hashedKey.subField": 1, nonHashedKey: 1},
+ {"hashedKey.subField": "hashed", nonHashedKey: 1},
+ prevStage);
- //
- // Tests for a hashed _id shard key.
- //
- target.drop();
- st.shardColl(target, {_id: 1}, {_id: "hashed"});
+//
+// Tests for a hashed _id shard key.
+//
+target.drop();
+st.shardColl(target, {_id: 1}, {_id: "hashed"});
- // Test that $merge passes without specifying an "on" field.
- assertMergeSucceedsWithExpectedUniqueIndex({source: source, target: target});
+// Test that $merge passes without specifying an "on" field.
+assertMergeSucceedsWithExpectedUniqueIndex({source: source, target: target});
- // Test that $merge passes when the uniqueKey matches the shard key. Note that the _id index is
- // always create with {unique: true} regardless of whether the shard key was marked as unique
- // when the collection was sharded.
- assertMergeSucceedsWithExpectedUniqueIndex(
- {source: source, target: target, uniqueKey: {_id: 1}});
+// Test that $merge passes when the uniqueKey matches the shard key. Note that the _id index is
+// always create with {unique: true} regardless of whether the shard key was marked as unique
+// when the collection was sharded.
+assertMergeSucceedsWithExpectedUniqueIndex({source: source, target: target, uniqueKey: {_id: 1}});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/merge_on_fields.js b/jstests/sharding/merge_on_fields.js
index c3437603343..91345835ae7 100644
--- a/jstests/sharding/merge_on_fields.js
+++ b/jstests/sharding/merge_on_fields.js
@@ -1,87 +1,87 @@
// Tests that the "on" fields are correctly automatically generated when the user does not specify
// it in the $merge stage.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStage'.
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStage'.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
- const mongosDB = st.s0.getDB("merge_on_fields");
- const firstColl = mongosDB.first;
- const secondColl = mongosDB.second;
- const sourceCollection = mongosDB.source;
- assert.commandWorked(sourceCollection.insert([{a: 1, b: 1, c: 1, d: 1}, {a: 2, b: 2, c: 2}]));
+const mongosDB = st.s0.getDB("merge_on_fields");
+const firstColl = mongosDB.first;
+const secondColl = mongosDB.second;
+const sourceCollection = mongosDB.source;
+assert.commandWorked(sourceCollection.insert([{a: 1, b: 1, c: 1, d: 1}, {a: 2, b: 2, c: 2}]));
- // Test that the unique key will be defaulted to the document key for a sharded collection.
- st.shardColl(firstColl.getName(),
- {a: 1, b: 1, c: 1},
- {a: 1, b: 1, c: 1},
- {a: 1, b: MinKey, c: MinKey},
- mongosDB.getName());
+// Test that the unique key will be defaulted to the document key for a sharded collection.
+st.shardColl(firstColl.getName(),
+ {a: 1, b: 1, c: 1},
+ {a: 1, b: 1, c: 1},
+ {a: 1, b: MinKey, c: MinKey},
+ mongosDB.getName());
- // Write a document to each chunk.
- assert.commandWorked(firstColl.insert({_id: 1, a: -3, b: -5, c: -6}));
- assert.commandWorked(firstColl.insert({_id: 2, a: 5, b: 3, c: 2}));
+// Write a document to each chunk.
+assert.commandWorked(firstColl.insert({_id: 1, a: -3, b: -5, c: -6}));
+assert.commandWorked(firstColl.insert({_id: 2, a: 5, b: 3, c: 2}));
- // Testing operations on the same sharded collection.
- let explainResult = sourceCollection.explain().aggregate(
- [{$merge: {into: firstColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}]);
- assert.setEq(new Set(["_id", "a", "b", "c"]),
- new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
+// Testing operations on the same sharded collection.
+let explainResult = sourceCollection.explain().aggregate(
+ [{$merge: {into: firstColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}]);
+assert.setEq(new Set(["_id", "a", "b", "c"]),
+ new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
- explainResult = sourceCollection.explain().aggregate(
- [{$merge: {into: firstColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}]);
- assert.setEq(new Set(["_id", "a", "b", "c"]),
- new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
+explainResult = sourceCollection.explain().aggregate(
+ [{$merge: {into: firstColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}]);
+assert.setEq(new Set(["_id", "a", "b", "c"]),
+ new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
- // Test it with a different collection and shard key pattern.
- st.shardColl(
- secondColl.getName(), {a: 1, b: 1}, {a: 1, b: 1}, {a: 1, b: MinKey}, mongosDB.getName());
+// Test it with a different collection and shard key pattern.
+st.shardColl(
+ secondColl.getName(), {a: 1, b: 1}, {a: 1, b: 1}, {a: 1, b: MinKey}, mongosDB.getName());
- // Write a document to each chunk.
- assert.commandWorked(secondColl.insert({_id: 3, a: -1, b: -3, c: 5}));
- assert.commandWorked(secondColl.insert({_id: 4, a: 4, b: 5, c: 6}));
+// Write a document to each chunk.
+assert.commandWorked(secondColl.insert({_id: 3, a: -1, b: -3, c: 5}));
+assert.commandWorked(secondColl.insert({_id: 4, a: 4, b: 5, c: 6}));
- explainResult = sourceCollection.explain().aggregate(
- [{$merge: {into: secondColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}]);
- assert.setEq(new Set(["_id", "a", "b"]),
- new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
+explainResult = sourceCollection.explain().aggregate(
+ [{$merge: {into: secondColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}]);
+assert.setEq(new Set(["_id", "a", "b"]),
+ new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
- explainResult = sourceCollection.explain().aggregate(
- [{$merge: {into: firstColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}]);
- assert.setEq(new Set(["_id", "a", "b", "c"]),
- new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
+explainResult = sourceCollection.explain().aggregate(
+ [{$merge: {into: firstColl.getName(), whenMatched: "replace", whenNotMatched: "insert"}}]);
+assert.setEq(new Set(["_id", "a", "b", "c"]),
+ new Set(getAggPlanStage(explainResult, "$merge").$merge.on));
- // Test that the "on" field is defaulted to _id for a collection which does not exist.
- const doesNotExist = mongosDB.doesNotExist;
- doesNotExist.drop();
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- explainResult = sourceCollection.explain().aggregate([{
- $merge: {
- into: doesNotExist.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]);
- assert.eq(["_id"], getAggPlanStage(explainResult, "$merge").$merge.on);
- });
+// Test that the "on" field is defaulted to _id for a collection which does not exist.
+const doesNotExist = mongosDB.doesNotExist;
+doesNotExist.drop();
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ explainResult = sourceCollection.explain().aggregate([{
+ $merge: {
+ into: doesNotExist.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]);
+ assert.eq(["_id"], getAggPlanStage(explainResult, "$merge").$merge.on);
+});
- // Test that the "on" field is defaulted to _id for an unsharded collection.
- const unsharded = mongosDB.unsharded;
- unsharded.drop();
- assert.commandWorked(unsharded.insert({x: 1}));
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- explainResult = sourceCollection.explain().aggregate([{
- $merge: {
- into: unsharded.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]);
- assert.eq(["_id"], getAggPlanStage(explainResult, "$merge").$merge.on);
- });
+// Test that the "on" field is defaulted to _id for an unsharded collection.
+const unsharded = mongosDB.unsharded;
+unsharded.drop();
+assert.commandWorked(unsharded.insert({x: 1}));
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ explainResult = sourceCollection.explain().aggregate([{
+ $merge: {
+ into: unsharded.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]);
+ assert.eq(["_id"], getAggPlanStage(explainResult, "$merge").$merge.on);
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/merge_requires_unique_index.js b/jstests/sharding/merge_requires_unique_index.js
index 78ee6c7f9eb..e42a49e5dce 100644
--- a/jstests/sharding/merge_requires_unique_index.js
+++ b/jstests/sharding/merge_requires_unique_index.js
@@ -3,204 +3,235 @@
// collator-compatible index in the index catalog. This is meant to test sharding-related
// configurations that are not covered by the aggregation passthrough suites.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode,
- // assertMergeFailsWithoutUniqueIndex,
- // assertMergeSucceedsWithExpectedUniqueIndex.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode,
+ // assertMergeFailsWithoutUniqueIndex,
+// assertMergeSucceedsWithExpectedUniqueIndex.
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
- const mongosDB = st.s0.getDB("merge_requires_unique_index");
- const foreignDB = st.s0.getDB("merge_requires_unique_index_foreign");
- const sourceColl = mongosDB.source;
- let targetColl = mongosDB.target;
- sourceColl.drop();
+const mongosDB = st.s0.getDB("merge_requires_unique_index");
+const foreignDB = st.s0.getDB("merge_requires_unique_index_foreign");
+const sourceColl = mongosDB.source;
+let targetColl = mongosDB.target;
+sourceColl.drop();
- // Enable sharding on the test DB and ensure that shard0 is the primary.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+// Enable sharding on the test DB and ensure that shard0 is the primary.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- // Enable sharding on the foreign DB, except ensure that shard1 is the primary shard.
- assert.commandWorked(foreignDB.adminCommand({enableSharding: foreignDB.getName()}));
- st.ensurePrimaryShard(foreignDB.getName(), st.rs1.getURL());
+// Enable sharding on the foreign DB, except ensure that shard1 is the primary shard.
+assert.commandWorked(foreignDB.adminCommand({enableSharding: foreignDB.getName()}));
+st.ensurePrimaryShard(foreignDB.getName(), st.rs1.getURL());
- // Increase the log verbosity for sharding, in the hope of getting a clearer picture of the
- // cluster writer as part of BF-11106. This should be removed once BF-11106 is fixed.
- st.shard0.getDB("admin").setLogLevel(4, 'sharding');
- st.shard1.getDB("admin").setLogLevel(4, 'sharding');
+// Increase the log verbosity for sharding, in the hope of getting a clearer picture of the
+// cluster writer as part of BF-11106. This should be removed once BF-11106 is fixed.
+st.shard0.getDB("admin").setLogLevel(4, 'sharding');
+st.shard1.getDB("admin").setLogLevel(4, 'sharding');
- function resetTargetColl(shardKey, split) {
- targetColl.drop();
- // Shard the target collection, and set the unique flag to ensure that there's a unique
- // index on the shard key.
- assert.commandWorked(mongosDB.adminCommand(
- {shardCollection: targetColl.getFullName(), key: shardKey, unique: true}));
- assert.commandWorked(
- mongosDB.adminCommand({split: targetColl.getFullName(), middle: split}));
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: targetColl.getFullName(), find: split, to: st.rs1.getURL()}));
- }
+function resetTargetColl(shardKey, split) {
+ targetColl.drop();
+ // Shard the target collection, and set the unique flag to ensure that there's a unique
+ // index on the shard key.
+ assert.commandWorked(mongosDB.adminCommand(
+ {shardCollection: targetColl.getFullName(), key: shardKey, unique: true}));
+ assert.commandWorked(mongosDB.adminCommand({split: targetColl.getFullName(), middle: split}));
+ assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: targetColl.getFullName(), find: split, to: st.rs1.getURL()}));
+}
- function runOnFieldsTests(targetShardKey, targetSplit) {
- jsTestLog("Running unique key tests for target shard key " + tojson(targetShardKey));
- resetTargetColl(targetShardKey, targetSplit);
+function runOnFieldsTests(targetShardKey, targetSplit) {
+ jsTestLog("Running unique key tests for target shard key " + tojson(targetShardKey));
+ resetTargetColl(targetShardKey, targetSplit);
- // Not specifying "on" fields should always pass.
- assertMergeSucceedsWithExpectedUniqueIndex({source: sourceColl, target: targetColl});
+ // Not specifying "on" fields should always pass.
+ assertMergeSucceedsWithExpectedUniqueIndex({source: sourceColl, target: targetColl});
- // Since the target collection is sharded with a unique shard key, specifying "on" fields
- // that is equal to the shard key should be valid.
- assertMergeSucceedsWithExpectedUniqueIndex(
- {source: sourceColl, target: targetColl, onFields: Object.keys(targetShardKey)});
+ // Since the target collection is sharded with a unique shard key, specifying "on" fields
+ // that is equal to the shard key should be valid.
+ assertMergeSucceedsWithExpectedUniqueIndex(
+ {source: sourceColl, target: targetColl, onFields: Object.keys(targetShardKey)});
- // Create a compound "on" fields consisting of the shard key and one additional field.
- let prefixPipeline = [{$addFields: {newField: 1}}];
- const indexSpec = Object.merge(targetShardKey, {newField: 1});
+ // Create a compound "on" fields consisting of the shard key and one additional field.
+ let prefixPipeline = [{$addFields: {newField: 1}}];
+ const indexSpec = Object.merge(targetShardKey, {newField: 1});
- // Expect the $merge to fail since we haven't created a unique index on the compound
- // "on" fields.
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- prevStages: prefixPipeline
- });
+ // Expect the $merge to fail since we haven't created a unique index on the compound
+ // "on" fields.
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ prevStages: prefixPipeline
+ });
- // Create the unique index and verify that the "on" fields is now valid.
- assert.commandWorked(targetColl.createIndex(indexSpec, {unique: true}));
- assertMergeSucceedsWithExpectedUniqueIndex({
- source: sourceColl,
- target: targetColl,
- onFields: Object.keys(indexSpec),
- prevStages: prefixPipeline
- });
+ // Create the unique index and verify that the "on" fields is now valid.
+ assert.commandWorked(targetColl.createIndex(indexSpec, {unique: true}));
+ assertMergeSucceedsWithExpectedUniqueIndex({
+ source: sourceColl,
+ target: targetColl,
+ onFields: Object.keys(indexSpec),
+ prevStages: prefixPipeline
+ });
- // Create a non-unique index and make sure that doesn't work.
- assert.commandWorked(targetColl.dropIndex(indexSpec));
- assert.commandWorked(targetColl.createIndex(indexSpec));
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- prevStages: prefixPipeline
- });
+ // Create a non-unique index and make sure that doesn't work.
+ assert.commandWorked(targetColl.dropIndex(indexSpec));
+ assert.commandWorked(targetColl.createIndex(indexSpec));
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ prevStages: prefixPipeline
+ });
- // Test that a unique, partial index on the "on" fields cannot be used to satisfy the
- // requirement.
- resetTargetColl(targetShardKey, targetSplit);
- assert.commandWorked(targetColl.createIndex(
- indexSpec, {unique: true, partialFilterExpression: {a: {$gte: 2}}}));
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- prevStages: prefixPipeline
- });
+ // Test that a unique, partial index on the "on" fields cannot be used to satisfy the
+ // requirement.
+ resetTargetColl(targetShardKey, targetSplit);
+ assert.commandWorked(
+ targetColl.createIndex(indexSpec, {unique: true, partialFilterExpression: {a: {$gte: 2}}}));
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ prevStages: prefixPipeline
+ });
- // Test that a unique index on the "on" fields cannot be used to satisfy the requirement if
- // it has a different collation.
- resetTargetColl(targetShardKey, targetSplit);
- assert.commandWorked(
- targetColl.createIndex(indexSpec, {unique: true, collation: {locale: "en_US"}}));
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- prevStages: prefixPipeline
- });
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- options: {collation: {locale: "en"}},
- prevStages: prefixPipeline
- });
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- options: {collation: {locale: "simple"}},
- prevStages: prefixPipeline
- });
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- options: {collation: {locale: "en_US", strength: 1}},
- prevStages: prefixPipeline
- });
- assertMergeSucceedsWithExpectedUniqueIndex({
- source: sourceColl,
- target: targetColl,
- onFields: Object.keys(indexSpec),
- options: {collation: {locale: "en_US"}},
- prevStages: prefixPipeline
- });
+ // Test that a unique index on the "on" fields cannot be used to satisfy the requirement if
+ // it has a different collation.
+ resetTargetColl(targetShardKey, targetSplit);
+ assert.commandWorked(
+ targetColl.createIndex(indexSpec, {unique: true, collation: {locale: "en_US"}}));
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ prevStages: prefixPipeline
+ });
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ options: {collation: {locale: "en"}},
+ prevStages: prefixPipeline
+ });
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ options: {collation: {locale: "simple"}},
+ prevStages: prefixPipeline
+ });
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ options: {collation: {locale: "en_US", strength: 1}},
+ prevStages: prefixPipeline
+ });
+ assertMergeSucceedsWithExpectedUniqueIndex({
+ source: sourceColl,
+ target: targetColl,
+ onFields: Object.keys(indexSpec),
+ options: {collation: {locale: "en_US"}},
+ prevStages: prefixPipeline
+ });
- // Test that a unique index with dotted field names can be used.
- resetTargetColl(targetShardKey, targetSplit);
- const dottedPathIndexSpec = Object.merge(targetShardKey, {"newField.subField": 1});
- assert.commandWorked(targetColl.createIndex(dottedPathIndexSpec, {unique: true}));
+ // Test that a unique index with dotted field names can be used.
+ resetTargetColl(targetShardKey, targetSplit);
+ const dottedPathIndexSpec = Object.merge(targetShardKey, {"newField.subField": 1});
+ assert.commandWorked(targetColl.createIndex(dottedPathIndexSpec, {unique: true}));
- // No longer a supporting index on the original compound "on" fields.
- assertMergeFailsWithoutUniqueIndex({
- source: sourceColl,
- onFields: Object.keys(indexSpec),
- target: targetColl,
- prevStages: prefixPipeline
- });
+ // No longer a supporting index on the original compound "on" fields.
+ assertMergeFailsWithoutUniqueIndex({
+ source: sourceColl,
+ onFields: Object.keys(indexSpec),
+ target: targetColl,
+ prevStages: prefixPipeline
+ });
- // Test that an embedded object matching the "on" fields is valid.
- prefixPipeline = [{$addFields: {"newField.subField": 5}}];
- assertMergeSucceedsWithExpectedUniqueIndex({
- source: sourceColl,
- target: targetColl,
- onFields: Object.keys(dottedPathIndexSpec),
- prevStages: prefixPipeline
- });
+ // Test that an embedded object matching the "on" fields is valid.
+ prefixPipeline = [{$addFields: {"newField.subField": 5}}];
+ assertMergeSucceedsWithExpectedUniqueIndex({
+ source: sourceColl,
+ target: targetColl,
+ onFields: Object.keys(dottedPathIndexSpec),
+ prevStages: prefixPipeline
+ });
- // Test that we cannot use arrays with a dotted path within a $merge.
- resetTargetColl(targetShardKey, targetSplit);
- assert.commandWorked(targetColl.createIndex(dottedPathIndexSpec, {unique: true}));
- withEachMergeMode(
- ({whenMatchedMode, whenNotMatchedMode}) => {
- assertErrorCode(
- sourceColl,
- [
- {
- $replaceRoot:
- {newRoot: {$mergeObjects: ["$$ROOT", {newField: [{subField: 1}]}]}}
- },
- {
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
+ // Test that we cannot use arrays with a dotted path within a $merge.
+ resetTargetColl(targetShardKey, targetSplit);
+ assert.commandWorked(targetColl.createIndex(dottedPathIndexSpec, {unique: true}));
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ assertErrorCode(sourceColl,
+ [
+ {
+ $replaceRoot: {
+ newRoot:
+ {$mergeObjects: ["$$ROOT", {newField: [{subField: 1}]}]}
+ }
},
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode,
- on: Object.keys(dottedPathIndexSpec)
- }
- }
- ],
- 51132);
- });
+ {
+ $merge: {
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode,
+ on: Object.keys(dottedPathIndexSpec)
+ }
+ }
+ ],
+ 51132);
+ });
- // Test that a unique index that is multikey can still be used.
- resetTargetColl(targetShardKey, targetSplit);
- assert.commandWorked(targetColl.createIndex(dottedPathIndexSpec, {unique: true}));
- assert.commandWorked(targetColl.insert(
- Object.merge(targetShardKey, {newField: [{subField: "hi"}, {subField: "hello"}]})));
- assert.commandWorked(sourceColl.update(
- {}, {$set: {newField: {subField: "hi"}, proofOfUpdate: "PROOF"}}, {multi: true}));
+ // Test that a unique index that is multikey can still be used.
+ resetTargetColl(targetShardKey, targetSplit);
+ assert.commandWorked(targetColl.createIndex(dottedPathIndexSpec, {unique: true}));
+ assert.commandWorked(targetColl.insert(
+ Object.merge(targetShardKey, {newField: [{subField: "hi"}, {subField: "hello"}]})));
+ assert.commandWorked(sourceColl.update(
+ {}, {$set: {newField: {subField: "hi"}, proofOfUpdate: "PROOF"}}, {multi: true}));
- // If whenMatched is "replace" and whenNotMatched is "insert", expect the command to
- // fail if the "on" fields does not contain _id, since a replacement-style update will fail
- // if attempting to modify _id.
- if (dottedPathIndexSpec.hasOwnProperty("_id")) {
- assert.doesNotThrow(() => sourceColl.aggregate([{
+ // If whenMatched is "replace" and whenNotMatched is "insert", expect the command to
+ // fail if the "on" fields does not contain _id, since a replacement-style update will fail
+ // if attempting to modify _id.
+ if (dottedPathIndexSpec.hasOwnProperty("_id")) {
+ assert.doesNotThrow(() => sourceColl.aggregate([{
+ $merge: {
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: Object.keys(dottedPathIndexSpec)
+ }
+ }]));
+ assert.docEq(targetColl.findOne({"newField.subField": "hi", proofOfUpdate: "PROOF"},
+ {"newField.subField": 1, proofOfUpdate: 1, _id: 0}),
+ {newField: {subField: "hi"}, proofOfUpdate: "PROOF"});
+ } else {
+ assertErrMsgContains(sourceColl,
+ [{
+ $merge: {
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
+ whenMatched: "replace",
+ whenNotMatched: "insert",
+ on: Object.keys(dottedPathIndexSpec)
+ }
+ }],
+ ErrorCodes.ImmutableField,
+ "did you attempt to modify the _id or the shard key?");
+
+ assert.doesNotThrow(() => sourceColl.aggregate([
+ {$project: {_id: 0}},
+ {
$merge: {
into: {
db: targetColl.getDB().getName(),
@@ -210,85 +241,53 @@
whenNotMatched: "insert",
on: Object.keys(dottedPathIndexSpec)
}
- }]));
- assert.docEq(targetColl.findOne({"newField.subField": "hi", proofOfUpdate: "PROOF"},
- {"newField.subField": 1, proofOfUpdate: 1, _id: 0}),
- {newField: {subField: "hi"}, proofOfUpdate: "PROOF"});
- } else {
- assertErrMsgContains(sourceColl,
- [{
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: Object.keys(dottedPathIndexSpec)
- }
- }],
- ErrorCodes.ImmutableField,
- "did you attempt to modify the _id or the shard key?");
-
- assert.doesNotThrow(() => sourceColl.aggregate([
- {$project: {_id: 0}},
- {
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: "replace",
- whenNotMatched: "insert",
- on: Object.keys(dottedPathIndexSpec)
- }
- }
- ]));
- assert.docEq(targetColl.findOne({"newField.subField": "hi", proofOfUpdate: "PROOF"},
- {"newField.subField": 1, proofOfUpdate: 1, _id: 0}),
- {newField: {subField: "hi"}, proofOfUpdate: "PROOF"});
- }
+ }
+ ]));
+ assert.docEq(targetColl.findOne({"newField.subField": "hi", proofOfUpdate: "PROOF"},
+ {"newField.subField": 1, proofOfUpdate: 1, _id: 0}),
+ {newField: {subField: "hi"}, proofOfUpdate: "PROOF"});
}
+}
- function testAgainstDB(targetDB) {
- targetColl = targetDB["target"];
- targetColl.drop();
+function testAgainstDB(targetDB) {
+ targetColl = targetDB["target"];
+ targetColl.drop();
- //
- // Test unsharded source and sharded target collections.
- //
- let targetShardKey = {_id: 1, a: 1, b: 1};
- let splitPoint = {_id: 0, a: 0, b: 0};
- sourceColl.drop();
- assert.commandWorked(sourceColl.insert([{a: 0, b: 0}, {a: 1, b: 1}]));
- runOnFieldsTests(targetShardKey, splitPoint);
+ //
+ // Test unsharded source and sharded target collections.
+ //
+ let targetShardKey = {_id: 1, a: 1, b: 1};
+ let splitPoint = {_id: 0, a: 0, b: 0};
+ sourceColl.drop();
+ assert.commandWorked(sourceColl.insert([{a: 0, b: 0}, {a: 1, b: 1}]));
+ runOnFieldsTests(targetShardKey, splitPoint);
- // Test with a shard key that does *not* include _id.
- targetShardKey = {a: 1, b: 1};
- splitPoint = {a: 0, b: 0};
- runOnFieldsTests(targetShardKey, splitPoint);
+ // Test with a shard key that does *not* include _id.
+ targetShardKey = {a: 1, b: 1};
+ splitPoint = {a: 0, b: 0};
+ runOnFieldsTests(targetShardKey, splitPoint);
- //
- // Test both source and target collections as sharded.
- //
- targetShardKey = {_id: 1, a: 1, b: 1};
- splitPoint = {_id: 0, a: 0, b: 0};
- sourceColl.drop();
- st.shardColl(sourceColl.getName(), {a: 1}, {a: 0}, {a: 1}, mongosDB.getName());
- assert.commandWorked(sourceColl.insert([{a: 0, b: 0}, {a: 1, b: 1}]));
- runOnFieldsTests(targetShardKey, splitPoint);
+ //
+ // Test both source and target collections as sharded.
+ //
+ targetShardKey = {_id: 1, a: 1, b: 1};
+ splitPoint = {_id: 0, a: 0, b: 0};
+ sourceColl.drop();
+ st.shardColl(sourceColl.getName(), {a: 1}, {a: 0}, {a: 1}, mongosDB.getName());
+ assert.commandWorked(sourceColl.insert([{a: 0, b: 0}, {a: 1, b: 1}]));
+ runOnFieldsTests(targetShardKey, splitPoint);
- // Re-run the test with a shard key that does *not* include _id.
- targetShardKey = {a: 1, b: 1};
- splitPoint = {a: 0, b: 0};
- runOnFieldsTests(targetShardKey, splitPoint);
- }
+ // Re-run the test with a shard key that does *not* include _id.
+ targetShardKey = {a: 1, b: 1};
+ splitPoint = {a: 0, b: 0};
+ runOnFieldsTests(targetShardKey, splitPoint);
+}
- // First test $merge to the same database as the source.
- testAgainstDB(mongosDB);
+// First test $merge to the same database as the source.
+testAgainstDB(mongosDB);
- // Then test against a foreign database, with the same expected behavior.
- testAgainstDB(foreignDB);
+// Then test against a foreign database, with the same expected behavior.
+testAgainstDB(foreignDB);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/merge_stale_on_fields.js b/jstests/sharding/merge_stale_on_fields.js
index 685840c527c..87a48a0482c 100644
--- a/jstests/sharding/merge_stale_on_fields.js
+++ b/jstests/sharding/merge_stale_on_fields.js
@@ -1,93 +1,93 @@
// Tests that an $merge stage is able to default the "on" fields to the correct value - even if one
// or more of the involved nodes has a stale cache of the routing information.
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode,
-
- const st = new ShardingTest({shards: 2, mongos: 2});
-
- const dbName = "merge_stale_unique_key";
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-
- const source = st.s0.getDB(dbName).source;
- const target = st.s0.getDB(dbName).target;
-
- // Test that an $merge through a stale mongos can still use the correct "on" fields and succeed.
- (function testDefaultOnFieldsIsRecent() {
- const freshMongos = st.s0;
- const staleMongos = st.s1;
-
- // Set up two collections for an aggregate with an $merge: The source collection will be
- // unsharded and the target collection will be sharded amongst the two shards.
- const staleMongosDB = staleMongos.getDB(dbName);
- st.shardColl(source, {_id: 1}, {_id: 0}, {_id: 1});
-
- (function setupStaleMongos() {
- // Shard the collection through 'staleMongos', setting up 'staleMongos' to believe the
- // collection is sharded by {sk: 1, _id: 1}.
- assert.commandWorked(staleMongosDB.adminCommand(
- {shardCollection: target.getFullName(), key: {sk: 1, _id: 1}}));
- // Perform a query through that mongos to ensure the cache is populated.
- assert.eq(0, staleMongosDB[target.getName()].find().itcount());
-
- // Drop the collection from the other mongos - it is no longer sharded but the stale
- // mongos doesn't know that yet.
- target.drop();
- }());
-
- // At this point 'staleMongos' will believe that the target collection is sharded. This
- // should not prevent it from running an $merge without "on" fields specified.
- // Specifically, the mongos should force a refresh of its cache before defaulting the "on"
- // fields.
- assert.commandWorked(source.insert({_id: 'seed'}));
-
- // If we had used the stale "on" fields, this aggregation would fail since the documents do
- // not have an 'sk' field.
- assert.doesNotThrow(() => staleMongosDB[source.getName()].aggregate([
- {$merge: {into: target.getName(), whenMatched: 'fail', whenNotMatched: 'insert'}}
- ]));
- assert.eq(target.find().toArray(), [{_id: 'seed'}]);
+"use strict";
+
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode,
+
+const st = new ShardingTest({shards: 2, mongos: 2});
+
+const dbName = "merge_stale_unique_key";
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+
+const source = st.s0.getDB(dbName).source;
+const target = st.s0.getDB(dbName).target;
+
+// Test that an $merge through a stale mongos can still use the correct "on" fields and succeed.
+(function testDefaultOnFieldsIsRecent() {
+ const freshMongos = st.s0;
+ const staleMongos = st.s1;
+
+ // Set up two collections for an aggregate with an $merge: The source collection will be
+ // unsharded and the target collection will be sharded amongst the two shards.
+ const staleMongosDB = staleMongos.getDB(dbName);
+ st.shardColl(source, {_id: 1}, {_id: 0}, {_id: 1});
+
+ (function setupStaleMongos() {
+ // Shard the collection through 'staleMongos', setting up 'staleMongos' to believe the
+ // collection is sharded by {sk: 1, _id: 1}.
+ assert.commandWorked(staleMongosDB.adminCommand(
+ {shardCollection: target.getFullName(), key: {sk: 1, _id: 1}}));
+ // Perform a query through that mongos to ensure the cache is populated.
+ assert.eq(0, staleMongosDB[target.getName()].find().itcount());
+
+ // Drop the collection from the other mongos - it is no longer sharded but the stale
+ // mongos doesn't know that yet.
target.drop();
}());
- // Test that if the collection is dropped and re-sharded during the course of the aggregation
- // that the operation will fail rather than proceed with the old shard key.
- function testEpochChangeDuringAgg({mergeSpec, failpoint, failpointData}) {
- // Converts a single string or an array of strings into it's object spec form. For instance,
- // for input ["a", "b"] the returned object would be {a: 1, b: 1}.
- function indexSpecFromOnFields(onFields) {
- let spec = {};
- if (typeof(onFields) == "string") {
- spec[onFields] = 1;
- } else {
- onFields.forEach((field) => {
- spec[field] = 1;
- });
- }
- return spec;
- }
+ // At this point 'staleMongos' will believe that the target collection is sharded. This
+ // should not prevent it from running an $merge without "on" fields specified.
+ // Specifically, the mongos should force a refresh of its cache before defaulting the "on"
+ // fields.
+ assert.commandWorked(source.insert({_id: 'seed'}));
+
+ // If we had used the stale "on" fields, this aggregation would fail since the documents do
+ // not have an 'sk' field.
+ assert.doesNotThrow(
+ () => staleMongosDB[source.getName()].aggregate(
+ [{$merge: {into: target.getName(), whenMatched: 'fail', whenNotMatched: 'insert'}}]));
+ assert.eq(target.find().toArray(), [{_id: 'seed'}]);
+ target.drop();
+}());
- target.drop();
- if (mergeSpec.hasOwnProperty('on')) {
- assert.commandWorked(
- target.createIndex(indexSpecFromOnFields(mergeSpec.on), {unique: true}));
- assert.commandWorked(st.s.adminCommand(
- {shardCollection: target.getFullName(), key: indexSpecFromOnFields(mergeSpec.on)}));
+// Test that if the collection is dropped and re-sharded during the course of the aggregation
+// that the operation will fail rather than proceed with the old shard key.
+function testEpochChangeDuringAgg({mergeSpec, failpoint, failpointData}) {
+ // Converts a single string or an array of strings into it's object spec form. For instance,
+ // for input ["a", "b"] the returned object would be {a: 1, b: 1}.
+ function indexSpecFromOnFields(onFields) {
+ let spec = {};
+ if (typeof (onFields) == "string") {
+ spec[onFields] = 1;
} else {
- assert.commandWorked(
- st.s.adminCommand({shardCollection: target.getFullName(), key: {sk: 1, _id: 1}}));
+ onFields.forEach((field) => {
+ spec[field] = 1;
+ });
}
+ return spec;
+ }
- // Use a failpoint to make the query feeding into the aggregate hang while we drop the
- // collection.
- [st.rs0.getPrimary(), st.rs1.getPrimary()].forEach((mongod) => {
- assert.commandWorked(mongod.adminCommand(
- {configureFailPoint: failpoint, mode: "alwaysOn", data: failpointData || {}}));
- });
- let parallelShellJoiner;
- try {
- let parallelCode = `
+ target.drop();
+ if (mergeSpec.hasOwnProperty('on')) {
+ assert.commandWorked(
+ target.createIndex(indexSpecFromOnFields(mergeSpec.on), {unique: true}));
+ assert.commandWorked(st.s.adminCommand(
+ {shardCollection: target.getFullName(), key: indexSpecFromOnFields(mergeSpec.on)}));
+ } else {
+ assert.commandWorked(
+ st.s.adminCommand({shardCollection: target.getFullName(), key: {sk: 1, _id: 1}}));
+ }
+
+ // Use a failpoint to make the query feeding into the aggregate hang while we drop the
+ // collection.
+ [st.rs0.getPrimary(), st.rs1.getPrimary()].forEach((mongod) => {
+ assert.commandWorked(mongod.adminCommand(
+ {configureFailPoint: failpoint, mode: "alwaysOn", data: failpointData || {}}));
+ });
+ let parallelShellJoiner;
+ try {
+ let parallelCode = `
const source = db.getSiblingDB("${dbName}").${source.getName()};
const error = assert.throws(() => source.aggregate([
{$addFields: {sk: "$_id"}},
@@ -96,103 +96,101 @@
assert.eq(error.code, ErrorCodes.StaleEpoch);
`;
- if (mergeSpec.hasOwnProperty("on")) {
- // If a user specifies their own "on" fields, we don't need to fail an aggregation
- // if the collection is dropped and recreated or the epoch otherwise changes. We are
- // allowed to fail such an operation should we choose to in the future, but for now
- // we don't expect to because we do not do anything special on mongos to ensure the
- // catalog cache is up to date, so do not want to attach mongos's believed epoch to
- // the command for the shards.
- parallelCode = `
+ if (mergeSpec.hasOwnProperty("on")) {
+ // If a user specifies their own "on" fields, we don't need to fail an aggregation
+ // if the collection is dropped and recreated or the epoch otherwise changes. We are
+ // allowed to fail such an operation should we choose to in the future, but for now
+ // we don't expect to because we do not do anything special on mongos to ensure the
+ // catalog cache is up to date, so do not want to attach mongos's believed epoch to
+ // the command for the shards.
+ parallelCode = `
const source = db.getSiblingDB("${dbName}").${source.getName()};
assert.doesNotThrow(() => source.aggregate([
{$addFields: {sk: "$_id"}},
{$merge: ${tojsononeline(mergeSpec)}}
]));
`;
- }
-
- parallelShellJoiner = startParallelShell(parallelCode, st.s.port);
-
- // Wait for the merging $merge to appear in the currentOp output from the shards. We
- // should see that the $merge stage has an 'epoch' field serialized from the mongos.
- const getAggOps = function() {
- return st.s.getDB("admin")
- .aggregate([
- {$currentOp: {}},
- {$match: {"cursor.originatingCommand.pipeline": {$exists: true}}}
- ])
- .toArray();
- };
- const hasMergeRunning = function() {
- return getAggOps()
- .filter((op) => {
- const pipeline = op.cursor.originatingCommand.pipeline;
- return pipeline.length > 0 &&
- pipeline[pipeline.length - 1].hasOwnProperty("$merge");
- })
- .length >= 1;
- };
- assert.soon(hasMergeRunning, () => tojson(getAggOps()));
-
- // Drop the collection so that the epoch changes.
- target.drop();
- } finally {
- [st.rs0.getPrimary(), st.rs1.getPrimary()].forEach((mongod) => {
- assert.commandWorked(
- mongod.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- });
}
- parallelShellJoiner();
- }
- // Insert enough documents to force a yield.
- const bulk = source.initializeUnorderedBulkOp();
- for (let i = 0; i < 1000; ++i) {
- bulk.insert({_id: i});
- }
- assert.commandWorked(bulk.execute());
-
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- // Skip the combination of merge modes which will fail depending on the contents of the
- // source and target collection, as this will cause a different assertion error from the one
- // expected.
- if (whenNotMatchedMode == "fail")
- return;
-
- testEpochChangeDuringAgg({
- mergeSpec: {
- into: target.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- },
- failpoint: "setYieldAllLocksHang",
- failpointData: {namespace: source.getFullName()}
- });
- testEpochChangeDuringAgg({
- mergeSpec: {
- into: target.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode,
- on: "sk"
- },
- failpoint: "setYieldAllLocksHang",
- failpointData: {namespace: source.getFullName()}
+ parallelShellJoiner = startParallelShell(parallelCode, st.s.port);
+
+ // Wait for the merging $merge to appear in the currentOp output from the shards. We
+ // should see that the $merge stage has an 'epoch' field serialized from the mongos.
+ const getAggOps = function() {
+ return st.s.getDB("admin")
+ .aggregate([
+ {$currentOp: {}},
+ {$match: {"cursor.originatingCommand.pipeline": {$exists: true}}}
+ ])
+ .toArray();
+ };
+ const hasMergeRunning = function() {
+ return getAggOps()
+ .filter((op) => {
+ const pipeline = op.cursor.originatingCommand.pipeline;
+ return pipeline.length > 0 &&
+ pipeline[pipeline.length - 1].hasOwnProperty("$merge");
+ })
+ .length >= 1;
+ };
+ assert.soon(hasMergeRunning, () => tojson(getAggOps()));
+
+ // Drop the collection so that the epoch changes.
+ target.drop();
+ } finally {
+ [st.rs0.getPrimary(), st.rs1.getPrimary()].forEach((mongod) => {
+ assert.commandWorked(mongod.adminCommand({configureFailPoint: failpoint, mode: "off"}));
});
+ }
+ parallelShellJoiner();
+}
+
+// Insert enough documents to force a yield.
+const bulk = source.initializeUnorderedBulkOp();
+for (let i = 0; i < 1000; ++i) {
+ bulk.insert({_id: i});
+}
+assert.commandWorked(bulk.execute());
+
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ // Skip the combination of merge modes which will fail depending on the contents of the
+ // source and target collection, as this will cause a different assertion error from the one
+ // expected.
+ if (whenNotMatchedMode == "fail")
+ return;
- });
- // Test with some different failpoints to prove we will detect an epoch change in the middle
- // of the inserts or updates.
testEpochChangeDuringAgg({
- mergeSpec: {into: target.getName(), whenMatched: "fail", whenNotMatched: "insert"},
- failpoint: "hangDuringBatchInsert",
- failpointData: {nss: target.getFullName()}
+ mergeSpec: {
+ into: target.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ },
+ failpoint: "setYieldAllLocksHang",
+ failpointData: {namespace: source.getFullName()}
});
testEpochChangeDuringAgg({
- mergeSpec: {into: target.getName(), whenMatched: "replace", whenNotMatched: "insert"},
- failpoint: "hangDuringBatchUpdate",
- failpointData: {nss: target.getFullName()}
+ mergeSpec: {
+ into: target.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode,
+ on: "sk"
+ },
+ failpoint: "setYieldAllLocksHang",
+ failpointData: {namespace: source.getFullName()}
});
-
- st.stop();
+});
+// Test with some different failpoints to prove we will detect an epoch change in the middle
+// of the inserts or updates.
+testEpochChangeDuringAgg({
+ mergeSpec: {into: target.getName(), whenMatched: "fail", whenNotMatched: "insert"},
+ failpoint: "hangDuringBatchInsert",
+ failpointData: {nss: target.getFullName()}
+});
+testEpochChangeDuringAgg({
+ mergeSpec: {into: target.getName(), whenMatched: "replace", whenNotMatched: "insert"},
+ failpoint: "hangDuringBatchUpdate",
+ failpointData: {nss: target.getFullName()}
+});
+
+st.stop();
}());
diff --git a/jstests/sharding/merge_to_existing.js b/jstests/sharding/merge_to_existing.js
index ff653186fcb..3e9038f9cc4 100644
--- a/jstests/sharding/merge_to_existing.js
+++ b/jstests/sharding/merge_to_existing.js
@@ -1,150 +1,150 @@
// Tests for $merge with an existing target collection.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
- const mongosDB = st.s0.getDB("source_db");
- const sourceColl = mongosDB["source_coll"];
- const outputCollSameDb = mongosDB[jsTestName() + "_merge"];
+const mongosDB = st.s0.getDB("source_db");
+const sourceColl = mongosDB["source_coll"];
+const outputCollSameDb = mongosDB[jsTestName() + "_merge"];
- function testMerge(sourceColl, targetColl, shardedSource, shardedTarget) {
- jsTestLog(`Testing $merge from ${sourceColl.getFullName()} ` +
- `(${shardedSource ? "sharded" : "unsharded"}) to ${targetColl.getFullName()} ` +
- `(${shardedTarget ? "sharded" : "unsharded"})`);
- sourceColl.drop();
- targetColl.drop();
- assert.commandWorked(targetColl.runCommand("create"));
+function testMerge(sourceColl, targetColl, shardedSource, shardedTarget) {
+ jsTestLog(`Testing $merge from ${sourceColl.getFullName()} ` +
+ `(${shardedSource ? "sharded" : "unsharded"}) to ${targetColl.getFullName()} ` +
+ `(${shardedTarget ? "sharded" : "unsharded"})`);
+ sourceColl.drop();
+ targetColl.drop();
+ assert.commandWorked(targetColl.runCommand("create"));
- if (shardedSource) {
- st.shardColl(sourceColl, {_id: 1}, {_id: 0}, {_id: 1}, sourceColl.getDB().getName());
+ if (shardedSource) {
+ st.shardColl(sourceColl, {_id: 1}, {_id: 0}, {_id: 1}, sourceColl.getDB().getName());
+ }
+
+ if (shardedTarget) {
+ st.shardColl(targetColl, {_id: 1}, {_id: 0}, {_id: 1}, targetColl.getDB().getName());
+ }
+
+ for (let i = -5; i < 5; i++) {
+ assert.commandWorked(sourceColl.insert({_id: i}));
+ }
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ // Test without documents in target collection.
+ assert.commandWorked(targetColl.remove({}));
+ if (whenNotMatchedMode == "fail") {
+ // Test whenNotMatchedMode: "fail" to an existing collection.
+ assertErrorCode(sourceColl,
+ [{
+ $merge: {
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }],
+ 13113);
+ } else {
+ assert.doesNotThrow(() => sourceColl.aggregate([{
+ $merge: {
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]));
+ assert.eq(whenNotMatchedMode == "discard" ? 0 : 10, targetColl.find().itcount());
}
- if (shardedTarget) {
- st.shardColl(targetColl, {_id: 1}, {_id: 0}, {_id: 1}, targetColl.getDB().getName());
+ // Test with documents in target collection. Every document in the source collection is
+ // present in the target, plus some additional documents that doesn't match.
+ assert.commandWorked(targetColl.remove({}));
+ for (let i = -10; i < 5; i++) {
+ assert.commandWorked(targetColl.insert({_id: i}));
}
- for (let i = -5; i < 5; i++) {
- assert.commandWorked(sourceColl.insert({_id: i}));
+ if (whenMatchedMode == "fail") {
+ // Test whenMatched: "fail" to an existing collection with unique key conflicts.
+ assertErrorCode(sourceColl,
+ [{
+ $merge: {
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }],
+ ErrorCodes.DuplicateKey);
+ } else {
+ assert.doesNotThrow(() => sourceColl.aggregate([{
+ $merge: {
+ into: {
+ db: targetColl.getDB().getName(),
+ coll: targetColl.getName(),
+ },
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }]));
}
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- // Test without documents in target collection.
- assert.commandWorked(targetColl.remove({}));
- if (whenNotMatchedMode == "fail") {
- // Test whenNotMatchedMode: "fail" to an existing collection.
- assertErrorCode(sourceColl,
- [{
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }],
- 13113);
- } else {
- assert.doesNotThrow(() => sourceColl.aggregate([{
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]));
- assert.eq(whenNotMatchedMode == "discard" ? 0 : 10, targetColl.find().itcount());
- }
-
- // Test with documents in target collection. Every document in the source collection is
- // present in the target, plus some additional documents that doesn't match.
- assert.commandWorked(targetColl.remove({}));
- for (let i = -10; i < 5; i++) {
- assert.commandWorked(targetColl.insert({_id: i}));
- }
-
- if (whenMatchedMode == "fail") {
- // Test whenMatched: "fail" to an existing collection with unique key conflicts.
- assertErrorCode(sourceColl,
- [{
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }],
- ErrorCodes.DuplicateKey);
- } else {
- assert.doesNotThrow(() => sourceColl.aggregate([{
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }]));
- }
- assert.eq(15, targetColl.find().itcount());
- });
-
- // Legacy $out is only supported to the same database.
- if (sourceColl.getDB() === targetColl.getDB()) {
- if (shardedTarget) {
- // Test that legacy $out fails if the target collection is sharded.
- assertErrorCode(sourceColl, [{$out: targetColl.getName()}], 28769);
- } else {
- // Test that legacy $out will drop the target collection and replace with the
- // contents of the source collection.
- sourceColl.aggregate([{$out: targetColl.getName()}]);
- assert.eq(10, targetColl.find().itcount());
- }
+ assert.eq(15, targetColl.find().itcount());
+ });
+
+ // Legacy $out is only supported to the same database.
+ if (sourceColl.getDB() === targetColl.getDB()) {
+ if (shardedTarget) {
+ // Test that legacy $out fails if the target collection is sharded.
+ assertErrorCode(sourceColl, [{$out: targetColl.getName()}], 28769);
+ } else {
+ // Test that legacy $out will drop the target collection and replace with the
+ // contents of the source collection.
+ sourceColl.aggregate([{$out: targetColl.getName()}]);
+ assert.eq(10, targetColl.find().itcount());
}
}
+}
- //
- // Tests for $merge where the output collection is in the same database as the source
- // collection.
- //
+//
+// Tests for $merge where the output collection is in the same database as the source
+// collection.
+//
- // Test with unsharded source and sharded target collection.
- testMerge(sourceColl, outputCollSameDb, false, true);
+// Test with unsharded source and sharded target collection.
+testMerge(sourceColl, outputCollSameDb, false, true);
- // Test with sharded source and sharded target collection.
- testMerge(sourceColl, outputCollSameDb, true, true);
+// Test with sharded source and sharded target collection.
+testMerge(sourceColl, outputCollSameDb, true, true);
- // Test with sharded source and unsharded target collection.
- testMerge(sourceColl, outputCollSameDb, true, false);
+// Test with sharded source and unsharded target collection.
+testMerge(sourceColl, outputCollSameDb, true, false);
- // Test with unsharded source and unsharded target collection.
- testMerge(sourceColl, outputCollSameDb, false, false);
+// Test with unsharded source and unsharded target collection.
+testMerge(sourceColl, outputCollSameDb, false, false);
- //
- // Tests for $merge to a database that differs from the source collection's database.
- //
- const foreignDb = st.s0.getDB("foreign_db");
- const outputCollDiffDb = foreignDb["output_coll"];
+//
+// Tests for $merge to a database that differs from the source collection's database.
+//
+const foreignDb = st.s0.getDB("foreign_db");
+const outputCollDiffDb = foreignDb["output_coll"];
- // Test with sharded source and sharded target collection.
- testMerge(sourceColl, outputCollDiffDb, true, true);
+// Test with sharded source and sharded target collection.
+testMerge(sourceColl, outputCollDiffDb, true, true);
- // Test with unsharded source and unsharded target collection.
- testMerge(sourceColl, outputCollDiffDb, false, false);
+// Test with unsharded source and unsharded target collection.
+testMerge(sourceColl, outputCollDiffDb, false, false);
- // Test with unsharded source and sharded target collection.
- testMerge(sourceColl, outputCollDiffDb, false, true);
+// Test with unsharded source and sharded target collection.
+testMerge(sourceColl, outputCollDiffDb, false, true);
- // Test with sharded source and unsharded target collection.
- testMerge(sourceColl, outputCollDiffDb, true, false);
+// Test with sharded source and unsharded target collection.
+testMerge(sourceColl, outputCollDiffDb, true, false);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/merge_to_non_existing.js b/jstests/sharding/merge_to_non_existing.js
index e4be4a1618c..5f6af78a86a 100644
--- a/jstests/sharding/merge_to_non_existing.js
+++ b/jstests/sharding/merge_to_non_existing.js
@@ -1,108 +1,107 @@
// Tests for $merge with a non-existing target collection.
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
-
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
- const sourceDB = st.s0.getDB("source_db");
-
- /**
- * Run an aggregation on 'sourceColl' that writes documents to 'targetColl' with $merge.
- */
- function testMerge(sourceColl, targetColl, shardedSource) {
- sourceColl.drop();
-
- if (shardedSource) {
- st.shardColl(sourceColl, {_id: 1}, {_id: 0}, {_id: 1}, sourceDB.getName());
- }
-
- for (let i = 0; i < 10; i++) {
- assert.commandWorked(sourceColl.insert({_id: i}));
- }
-
- // Test the behavior for each of the $merge modes. Since the target collection does not
- // exist, the behavior should be identical.
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- // Skip the combination of merge modes which will fail depending on the contents of the
- // source and target collection, as this will cause the assertion below to trip.
- if (whenMatchedMode == "fail" || whenNotMatchedMode == "fail")
- return;
-
- targetColl.drop();
- sourceColl.aggregate([{
+"use strict";
+
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}, config: 1});
+const sourceDB = st.s0.getDB("source_db");
+
+/**
+ * Run an aggregation on 'sourceColl' that writes documents to 'targetColl' with $merge.
+ */
+function testMerge(sourceColl, targetColl, shardedSource) {
+ sourceColl.drop();
+
+ if (shardedSource) {
+ st.shardColl(sourceColl, {_id: 1}, {_id: 0}, {_id: 1}, sourceDB.getName());
+ }
+
+ for (let i = 0; i < 10; i++) {
+ assert.commandWorked(sourceColl.insert({_id: i}));
+ }
+
+ // Test the behavior for each of the $merge modes. Since the target collection does not
+ // exist, the behavior should be identical.
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ // Skip the combination of merge modes which will fail depending on the contents of the
+ // source and target collection, as this will cause the assertion below to trip.
+ if (whenMatchedMode == "fail" || whenNotMatchedMode == "fail")
+ return;
+
+ targetColl.drop();
+ sourceColl.aggregate([{
+ $merge: {
+ into: {db: targetColl.getDB().getName(), coll: targetColl.getName()},
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode,
+ on: "_id"
+ }
+ }]);
+ assert.eq(whenNotMatchedMode == "discard" ? 0 : 10, targetColl.find().itcount());
+ });
+
+ // Test that $merge fails if the "on" field is anything but "_id" when the target collection
+ // does not exist.
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ // Skip the combination of merge modes which will fail depending on the contents of the
+ // source and target collection, as this will cause the assertion below to trip.
+ if (whenMatchedMode == "fail" || whenNotMatchedMode == "fail")
+ return;
+
+ targetColl.drop();
+ assertErrorCode(
+ sourceColl,
+ [{
$merge: {
into: {db: targetColl.getDB().getName(), coll: targetColl.getName()},
whenMatched: whenMatchedMode,
whenNotMatched: whenNotMatchedMode,
- on: "_id"
+ on: "not_allowed"
}
- }]);
- assert.eq(whenNotMatchedMode == "discard" ? 0 : 10, targetColl.find().itcount());
- });
-
- // Test that $merge fails if the "on" field is anything but "_id" when the target collection
- // does not exist.
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- // Skip the combination of merge modes which will fail depending on the contents of the
- // source and target collection, as this will cause the assertion below to trip.
- if (whenMatchedMode == "fail" || whenNotMatchedMode == "fail")
- return;
-
- targetColl.drop();
- assertErrorCode(
- sourceColl,
- [{
- $merge: {
- into: {db: targetColl.getDB().getName(), coll: targetColl.getName()},
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode,
- on: "not_allowed"
- }
- }],
- 51190);
- });
-
- // If 'targetColl' is in the same database as 'sourceColl', test that the legacy $out works
- // correctly.
- if (targetColl.getDB() == sourceColl.getDB()) {
- jsTestLog(
- `Testing $out from ${sourceColl.getFullName()} ` +
- `(${shardedSource ? "sharded" : "unsharded"}) to ${targetColl.getFullName()} ` +
- `with legacy syntax`);
-
- targetColl.drop();
- sourceColl.aggregate([{$out: targetColl.getName()}]);
- assert.eq(10, targetColl.find().itcount());
- }
+ }],
+ 51190);
+ });
+
+ // If 'targetColl' is in the same database as 'sourceColl', test that the legacy $out works
+ // correctly.
+ if (targetColl.getDB() == sourceColl.getDB()) {
+ jsTestLog(`Testing $out from ${sourceColl.getFullName()} ` +
+ `(${shardedSource ? "sharded" : "unsharded"}) to ${targetColl.getFullName()} ` +
+ `with legacy syntax`);
+
+ targetColl.drop();
+ sourceColl.aggregate([{$out: targetColl.getName()}]);
+ assert.eq(10, targetColl.find().itcount());
}
-
- const sourceColl = sourceDB["source_coll"];
- const outputCollSameDb = sourceDB["output_coll"];
-
- // Test $merge from an unsharded source collection to a non-existent output collection in the
- // same database.
- testMerge(sourceColl, outputCollSameDb, false);
-
- // Like the last test case, but perform a $merge from a sharded source collection to a
- // non-existent output collection in the same database.
- testMerge(sourceColl, outputCollSameDb, true);
-
- // Test that $merge in a sharded cluster fails when the output is sent to a different database
- // that doesn't exist.
- const foreignDb = st.s0.getDB("foreign_db");
- const outputCollDiffDb = foreignDb["output_coll"];
- foreignDb.dropDatabase();
- assert.throws(() => testMerge(sourceColl, outputCollDiffDb, false));
- assert.throws(() => testMerge(sourceColl, outputCollDiffDb, true));
-
- // Test $merge from an unsharded source collection to an output collection in a different
- // database where the database exists but the collection does not.
- assert.commandWorked(foreignDb["test"].insert({_id: "forcing database creation"}));
- testMerge(sourceColl, outputCollDiffDb, false);
-
- // Like the last test, but with a sharded source collection.
- testMerge(sourceColl, outputCollDiffDb, true);
- st.stop();
+}
+
+const sourceColl = sourceDB["source_coll"];
+const outputCollSameDb = sourceDB["output_coll"];
+
+// Test $merge from an unsharded source collection to a non-existent output collection in the
+// same database.
+testMerge(sourceColl, outputCollSameDb, false);
+
+// Like the last test case, but perform a $merge from a sharded source collection to a
+// non-existent output collection in the same database.
+testMerge(sourceColl, outputCollSameDb, true);
+
+// Test that $merge in a sharded cluster fails when the output is sent to a different database
+// that doesn't exist.
+const foreignDb = st.s0.getDB("foreign_db");
+const outputCollDiffDb = foreignDb["output_coll"];
+foreignDb.dropDatabase();
+assert.throws(() => testMerge(sourceColl, outputCollDiffDb, false));
+assert.throws(() => testMerge(sourceColl, outputCollDiffDb, true));
+
+// Test $merge from an unsharded source collection to an output collection in a different
+// database where the database exists but the collection does not.
+assert.commandWorked(foreignDb["test"].insert({_id: "forcing database creation"}));
+testMerge(sourceColl, outputCollDiffDb, false);
+
+// Like the last test, but with a sharded source collection.
+testMerge(sourceColl, outputCollDiffDb, true);
+st.stop();
}());
diff --git a/jstests/sharding/merge_with_chunk_migrations.js b/jstests/sharding/merge_with_chunk_migrations.js
index 2b9ba4256fa..461fe57cf8d 100644
--- a/jstests/sharding/merge_with_chunk_migrations.js
+++ b/jstests/sharding/merge_with_chunk_migrations.js
@@ -1,47 +1,47 @@
// Tests that the $merge aggregation stage is resilient to chunk migrations in both the source and
// output collection during execution.
(function() {
- 'use strict';
+'use strict';
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
- const mongosDB = st.s.getDB(jsTestName());
- const sourceColl = mongosDB["source"];
- const targetColl = mongosDB["target"];
+const mongosDB = st.s.getDB(jsTestName());
+const sourceColl = mongosDB["source"];
+const targetColl = mongosDB["target"];
- function setAggHang(mode) {
- assert.commandWorked(st.shard0.adminCommand(
- {configureFailPoint: "hangBeforeDocumentSourceCursorLoadBatch", mode: mode}));
- assert.commandWorked(st.shard1.adminCommand(
- {configureFailPoint: "hangBeforeDocumentSourceCursorLoadBatch", mode: mode}));
+function setAggHang(mode) {
+ assert.commandWorked(st.shard0.adminCommand(
+ {configureFailPoint: "hangBeforeDocumentSourceCursorLoadBatch", mode: mode}));
+ assert.commandWorked(st.shard1.adminCommand(
+ {configureFailPoint: "hangBeforeDocumentSourceCursorLoadBatch", mode: mode}));
+}
+
+function runMergeWithMode(whenMatchedMode, whenNotMatchedMode, shardedColl) {
+ assert.commandWorked(targetColl.remove({}));
+
+ // For modes 'whenNotMatchedMode:fail/discard', the $merge will not insert the expected
+ // documents, causing the assertion below to fail. To avoid that, we match the documents in
+ // target collection with the documents in source.
+ if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
+ assert.commandWorked(targetColl.insert({_id: 0, shardKey: -1}));
+ assert.commandWorked(targetColl.insert({_id: 1, shardKey: 1}));
}
- function runMergeWithMode(whenMatchedMode, whenNotMatchedMode, shardedColl) {
- assert.commandWorked(targetColl.remove({}));
-
- // For modes 'whenNotMatchedMode:fail/discard', the $merge will not insert the expected
- // documents, causing the assertion below to fail. To avoid that, we match the documents in
- // target collection with the documents in source.
- if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
- assert.commandWorked(targetColl.insert({_id: 0, shardKey: -1}));
- assert.commandWorked(targetColl.insert({_id: 1, shardKey: 1}));
- }
-
- // Set the failpoint to hang in the first call to DocumentSourceCursor's getNext().
- setAggHang("alwaysOn");
-
- let comment = whenMatchedMode + "_" + whenNotMatchedMode + "_" + shardedColl.getName();
-
- const mergeSpec = {
- into: targetColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- };
- // The $_internalInhibitOptimization stage is added to the pipeline to prevent the pipeline
- // from being optimized away after it's been split. Otherwise, we won't hit the failpoint.
- let outFn = `
+ // Set the failpoint to hang in the first call to DocumentSourceCursor's getNext().
+ setAggHang("alwaysOn");
+
+ let comment = whenMatchedMode + "_" + whenNotMatchedMode + "_" + shardedColl.getName();
+
+ const mergeSpec = {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ };
+ // The $_internalInhibitOptimization stage is added to the pipeline to prevent the pipeline
+ // from being optimized away after it's been split. Otherwise, we won't hit the failpoint.
+ let outFn = `
const sourceDB = db.getSiblingDB(jsTestName());
const sourceColl = sourceDB["${sourceColl.getName()}"];
sourceColl.aggregate([
@@ -51,43 +51,42 @@
{comment: "${comment}"});
`;
- // Start the $merge aggregation in a parallel shell.
- let mergeShell = startParallelShell(outFn, st.s.port);
-
- // Wait for the parallel shell to hit the failpoint.
- assert.soon(
- () =>
- mongosDB.currentOp({op: "command", "command.comment": comment}).inprog.length == 1,
- () => tojson(mongosDB.currentOp().inprog));
-
- // Migrate the chunk on shard1 to shard0.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: shardedColl.getFullName(), find: {shardKey: 1}, to: st.shard0.shardName}));
-
- // Unset the failpoint to unblock the $merge and join with the parallel shell.
- setAggHang("off");
- mergeShell();
-
- // Verify that the $merge succeeded.
- assert.eq(2, targetColl.find().itcount());
-
- // Now both chunks are on shard0. Run a similar test except migrate the chunks back to
- // shard1 in the middle of execution.
- assert.commandWorked(targetColl.remove({}));
-
- // For modes 'whenNotMatchedMode:fail/discard', the $merge will not insert the expected
- // documents, causing the assertion below to fail. To avoid that, we match the documents in
- // target collection with the documents in source.
- if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
- assert.commandWorked(targetColl.insert({_id: 0, shardKey: -1}));
- assert.commandWorked(targetColl.insert({_id: 1, shardKey: 1}));
- }
-
- setAggHang("alwaysOn");
- comment = comment + "_2";
- // The $_internalInhibitOptimization stage is added to the pipeline to prevent the pipeline
- // from being optimized away after it's been split. Otherwise, we won't hit the failpoint.
- outFn = `
+ // Start the $merge aggregation in a parallel shell.
+ let mergeShell = startParallelShell(outFn, st.s.port);
+
+ // Wait for the parallel shell to hit the failpoint.
+ assert.soon(
+ () => mongosDB.currentOp({op: "command", "command.comment": comment}).inprog.length == 1,
+ () => tojson(mongosDB.currentOp().inprog));
+
+ // Migrate the chunk on shard1 to shard0.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {shardKey: 1}, to: st.shard0.shardName}));
+
+ // Unset the failpoint to unblock the $merge and join with the parallel shell.
+ setAggHang("off");
+ mergeShell();
+
+ // Verify that the $merge succeeded.
+ assert.eq(2, targetColl.find().itcount());
+
+ // Now both chunks are on shard0. Run a similar test except migrate the chunks back to
+ // shard1 in the middle of execution.
+ assert.commandWorked(targetColl.remove({}));
+
+ // For modes 'whenNotMatchedMode:fail/discard', the $merge will not insert the expected
+ // documents, causing the assertion below to fail. To avoid that, we match the documents in
+ // target collection with the documents in source.
+ if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
+ assert.commandWorked(targetColl.insert({_id: 0, shardKey: -1}));
+ assert.commandWorked(targetColl.insert({_id: 1, shardKey: 1}));
+ }
+
+ setAggHang("alwaysOn");
+ comment = comment + "_2";
+ // The $_internalInhibitOptimization stage is added to the pipeline to prevent the pipeline
+ // from being optimized away after it's been split. Otherwise, we won't hit the failpoint.
+ outFn = `
const sourceDB = db.getSiblingDB(jsTestName());
const sourceColl = sourceDB["${sourceColl.getName()}"];
sourceColl.aggregate([
@@ -96,56 +95,55 @@
],
{comment: "${comment}"});
`;
- mergeShell = startParallelShell(outFn, st.s.port);
+ mergeShell = startParallelShell(outFn, st.s.port);
- // Wait for the parallel shell to hit the failpoint.
- assert.soon(
- () =>
- mongosDB.currentOp({op: "command", "command.comment": comment}).inprog.length == 1,
- () => tojson(mongosDB.currentOp().inprog));
+ // Wait for the parallel shell to hit the failpoint.
+ assert.soon(
+ () => mongosDB.currentOp({op: "command", "command.comment": comment}).inprog.length == 1,
+ () => tojson(mongosDB.currentOp().inprog));
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: shardedColl.getFullName(), find: {shardKey: -1}, to: st.shard1.shardName}));
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: shardedColl.getFullName(), find: {shardKey: 1}, to: st.shard1.shardName}));
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {shardKey: -1}, to: st.shard1.shardName}));
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {shardKey: 1}, to: st.shard1.shardName}));
- // Unset the failpoint to unblock the $merge and join with the parallel shell.
- setAggHang("off");
- mergeShell();
+ // Unset the failpoint to unblock the $merge and join with the parallel shell.
+ setAggHang("off");
+ mergeShell();
- // Verify that the $merge succeeded.
- assert.eq(2, targetColl.find().itcount());
+ // Verify that the $merge succeeded.
+ assert.eq(2, targetColl.find().itcount());
- // Reset the chunk distribution.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: shardedColl.getFullName(), find: {shardKey: -1}, to: st.shard0.shardName}));
- }
+ // Reset the chunk distribution.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {shardKey: -1}, to: st.shard0.shardName}));
+}
- // Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
- st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+// Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
+st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
- // Write a document to each chunk of the source collection.
- assert.commandWorked(sourceColl.insert({_id: 0, shardKey: -1}));
- assert.commandWorked(sourceColl.insert({_id: 1, shardKey: 1}));
+// Write a document to each chunk of the source collection.
+assert.commandWorked(sourceColl.insert({_id: 0, shardKey: -1}));
+assert.commandWorked(sourceColl.insert({_id: 1, shardKey: 1}));
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- runMergeWithMode(whenMatchedMode, whenNotMatchedMode, sourceColl);
- });
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ runMergeWithMode(whenMatchedMode, whenNotMatchedMode, sourceColl);
+});
- // Run a similar test with chunk migrations on the output collection instead.
- sourceColl.drop();
- assert.commandWorked(targetColl.remove({}));
- // Shard the output collection with shard key {shardKey: 1} and split into 2 chunks.
- st.shardColl(targetColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+// Run a similar test with chunk migrations on the output collection instead.
+sourceColl.drop();
+assert.commandWorked(targetColl.remove({}));
+// Shard the output collection with shard key {shardKey: 1} and split into 2 chunks.
+st.shardColl(targetColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
- // Write two documents in the source collection that should target the two chunks in the target
- // collection.
- assert.commandWorked(sourceColl.insert({_id: 0, shardKey: -1}));
- assert.commandWorked(sourceColl.insert({_id: 1, shardKey: 1}));
+// Write two documents in the source collection that should target the two chunks in the target
+// collection.
+assert.commandWorked(sourceColl.insert({_id: 0, shardKey: -1}));
+assert.commandWorked(sourceColl.insert({_id: 1, shardKey: 1}));
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- runMergeWithMode(whenMatchedMode, whenNotMatchedMode, targetColl);
- });
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ runMergeWithMode(whenMatchedMode, whenNotMatchedMode, targetColl);
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/merge_with_drop_shard.js b/jstests/sharding/merge_with_drop_shard.js
index 442f4c89a4c..cc03ea31c42 100644
--- a/jstests/sharding/merge_with_drop_shard.js
+++ b/jstests/sharding/merge_with_drop_shard.js
@@ -1,61 +1,60 @@
// Tests that the $merge aggregation stage is resilient to drop shard in both the source and
// output collection during execution.
(function() {
- 'use strict';
-
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
-
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
-
- const mongosDB = st.s.getDB(jsTestName());
- const sourceColl = mongosDB["source"];
- const targetColl = mongosDB["target"];
-
- assert.commandWorked(st.s.getDB("admin").runCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard1.name);
-
- function setAggHang(mode) {
- assert.commandWorked(st.shard0.adminCommand(
- {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
- assert.commandWorked(st.shard1.adminCommand(
- {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
- }
-
- function removeShard(shard) {
- // We need the balancer to drain all the chunks out of the shard that is being removed.
- assert.commandWorked(st.startBalancer());
- st.waitForBalancer(true, 60000);
- var res = st.s.adminCommand({removeShard: shard.shardName});
+'use strict';
+
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+
+const mongosDB = st.s.getDB(jsTestName());
+const sourceColl = mongosDB["source"];
+const targetColl = mongosDB["target"];
+
+assert.commandWorked(st.s.getDB("admin").runCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.shard1.name);
+
+function setAggHang(mode) {
+ assert.commandWorked(st.shard0.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
+ assert.commandWorked(st.shard1.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
+}
+
+function removeShard(shard) {
+ // We need the balancer to drain all the chunks out of the shard that is being removed.
+ assert.commandWorked(st.startBalancer());
+ st.waitForBalancer(true, 60000);
+ var res = st.s.adminCommand({removeShard: shard.shardName});
+ assert.commandWorked(res);
+ assert.eq('started', res.state);
+ assert.soon(function() {
+ res = st.s.adminCommand({removeShard: shard.shardName});
assert.commandWorked(res);
- assert.eq('started', res.state);
- assert.soon(function() {
- res = st.s.adminCommand({removeShard: shard.shardName});
- assert.commandWorked(res);
- return ('completed' === res.state);
- }, "removeShard never completed for shard " + shard.shardName);
-
- // Drop the test database on the removed shard so it does not interfere with addShard later.
- assert.commandWorked(shard.getDB(mongosDB.getName()).dropDatabase());
-
- st.configRS.awaitLastOpCommitted();
- assert.commandWorked(st.s.adminCommand({flushRouterConfig: 1}));
- assert.commandWorked(st.stopBalancer());
- st.waitForBalancer(false, 60000);
- }
-
- function addShard(shard) {
- assert.commandWorked(st.s.adminCommand({addShard: shard}));
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: sourceColl.getFullName(), find: {shardKey: 0}, to: shard}));
- }
- function runMergeWithMode(
- whenMatchedMode, whenNotMatchedMode, shardedColl, dropShard, expectFailCode) {
- // Set the failpoint to hang in the first call to DocumentSourceCursor's getNext().
- setAggHang("alwaysOn");
-
- let comment =
- whenMatchedMode + "_" + whenNotMatchedMode + "_" + shardedColl.getName() + "_1";
- let outFn = `
+ return ('completed' === res.state);
+ }, "removeShard never completed for shard " + shard.shardName);
+
+ // Drop the test database on the removed shard so it does not interfere with addShard later.
+ assert.commandWorked(shard.getDB(mongosDB.getName()).dropDatabase());
+
+ st.configRS.awaitLastOpCommitted();
+ assert.commandWorked(st.s.adminCommand({flushRouterConfig: 1}));
+ assert.commandWorked(st.stopBalancer());
+ st.waitForBalancer(false, 60000);
+}
+
+function addShard(shard) {
+ assert.commandWorked(st.s.adminCommand({addShard: shard}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: sourceColl.getFullName(), find: {shardKey: 0}, to: shard}));
+}
+function runMergeWithMode(
+ whenMatchedMode, whenNotMatchedMode, shardedColl, dropShard, expectFailCode) {
+ // Set the failpoint to hang in the first call to DocumentSourceCursor's getNext().
+ setAggHang("alwaysOn");
+
+ let comment = whenMatchedMode + "_" + whenNotMatchedMode + "_" + shardedColl.getName() + "_1";
+ let outFn = `
const sourceDB = db.getSiblingDB(jsTestName());
const sourceColl = sourceDB["${sourceColl.getName()}"];
let cmdRes = sourceDB.runCommand({
@@ -76,61 +75,60 @@
}
`;
- // Start the $merge aggregation in a parallel shell.
- let mergeShell = startParallelShell(outFn, st.s.port);
-
- // Wait for the parallel shell to hit the failpoint.
- assert.soon(
- () => mongosDB
- .currentOp({
- $or: [
- {op: "command", "command.comment": comment},
- {op: "getmore", "cursor.originatingCommand.comment": comment}
- ]
- })
- .inprog.length >= 1,
- () => tojson(mongosDB.currentOp().inprog));
-
- if (dropShard) {
- removeShard(st.shard0);
- } else {
- addShard(st.rs0.getURL());
- }
- // Unset the failpoint to unblock the $merge and join with the parallel shell.
- setAggHang("off");
- mergeShell();
-
- assert.eq(2, targetColl.find().itcount());
+ // Start the $merge aggregation in a parallel shell.
+ let mergeShell = startParallelShell(outFn, st.s.port);
+
+ // Wait for the parallel shell to hit the failpoint.
+ assert.soon(() => mongosDB
+ .currentOp({
+ $or: [
+ {op: "command", "command.comment": comment},
+ {op: "getmore", "cursor.originatingCommand.comment": comment}
+ ]
+ })
+ .inprog.length >= 1,
+ () => tojson(mongosDB.currentOp().inprog));
+
+ if (dropShard) {
+ removeShard(st.shard0);
+ } else {
+ addShard(st.rs0.getURL());
}
+ // Unset the failpoint to unblock the $merge and join with the parallel shell.
+ setAggHang("off");
+ mergeShell();
+
+ assert.eq(2, targetColl.find().itcount());
+}
- // Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
- st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+// Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
+st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
- // Shard the output collection with shard key {shardKey: 1} and split into 2 chunks.
- st.shardColl(targetColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+// Shard the output collection with shard key {shardKey: 1} and split into 2 chunks.
+st.shardColl(targetColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
- // Write two documents in the source collection that should target the two chunks in the target
- // collection.
- assert.commandWorked(sourceColl.insert({shardKey: -1, _id: 0}));
- assert.commandWorked(sourceColl.insert({shardKey: 1, _id: 1}));
+// Write two documents in the source collection that should target the two chunks in the target
+// collection.
+assert.commandWorked(sourceColl.insert({shardKey: -1, _id: 0}));
+assert.commandWorked(sourceColl.insert({shardKey: 1, _id: 1}));
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- assert.commandWorked(targetColl.remove({}));
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ assert.commandWorked(targetColl.remove({}));
- // Match the data from source into target so that we don't fail the assertion for
- // 'whenNotMatchedMode:fail/discard'.
- if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
- assert.commandWorked(targetColl.insert({shardKey: -1, _id: 0}));
- assert.commandWorked(targetColl.insert({shardKey: 1, _id: 1}));
- }
+ // Match the data from source into target so that we don't fail the assertion for
+ // 'whenNotMatchedMode:fail/discard'.
+ if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
+ assert.commandWorked(targetColl.insert({shardKey: -1, _id: 0}));
+ assert.commandWorked(targetColl.insert({shardKey: 1, _id: 1}));
+ }
- runMergeWithMode(whenMatchedMode, whenNotMatchedMode, targetColl, true, undefined);
- runMergeWithMode(whenMatchedMode,
- whenNotMatchedMode,
- targetColl,
- false,
- whenMatchedMode == "fail" ? ErrorCodes.DuplicateKey : undefined);
- });
+ runMergeWithMode(whenMatchedMode, whenNotMatchedMode, targetColl, true, undefined);
+ runMergeWithMode(whenMatchedMode,
+ whenNotMatchedMode,
+ targetColl,
+ false,
+ whenMatchedMode == "fail" ? ErrorCodes.DuplicateKey : undefined);
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/merge_with_move_primary.js b/jstests/sharding/merge_with_move_primary.js
index 5de910b29fa..94d00de22eb 100644
--- a/jstests/sharding/merge_with_move_primary.js
+++ b/jstests/sharding/merge_with_move_primary.js
@@ -1,37 +1,37 @@
// Tests that the $merge aggregation stage is resilient to move primary in both the source and
// output collection during execution.
(function() {
- 'use strict';
+'use strict';
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
- const mongosDB = st.s.getDB(jsTestName());
- const sourceColl = mongosDB["source"];
- const targetColl = mongosDB["target"];
+const mongosDB = st.s.getDB(jsTestName());
+const sourceColl = mongosDB["source"];
+const targetColl = mongosDB["target"];
- function setAggHang(mode) {
- assert.commandWorked(st.shard0.adminCommand(
- {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
- assert.commandWorked(st.shard1.adminCommand(
- {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
+function setAggHang(mode) {
+ assert.commandWorked(st.shard0.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
+ assert.commandWorked(st.shard1.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode}));
- assert.commandWorked(st.shard0.adminCommand(
- {configureFailPoint: "hangWhileBuildingDocumentSourceOutBatch", mode: mode}));
- assert.commandWorked(st.shard1.adminCommand(
- {configureFailPoint: "hangWhileBuildingDocumentSourceOutBatch", mode: mode}));
- }
+ assert.commandWorked(st.shard0.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceOutBatch", mode: mode}));
+ assert.commandWorked(st.shard1.adminCommand(
+ {configureFailPoint: "hangWhileBuildingDocumentSourceOutBatch", mode: mode}));
+}
- function runPipelineWithStage({stage, shardedColl, expectedfailCode, expectedNumDocs}) {
- // Set the failpoint to hang in the first call to DocumentSourceCursor's getNext().
- setAggHang("alwaysOn");
+function runPipelineWithStage({stage, shardedColl, expectedfailCode, expectedNumDocs}) {
+ // Set the failpoint to hang in the first call to DocumentSourceCursor's getNext().
+ setAggHang("alwaysOn");
- // Set the primary shard.
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+ // Set the primary shard.
+ st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
- let comment = jsTestName() + "_comment";
- let outFn = `
+ let comment = jsTestName() + "_comment";
+ let outFn = `
const sourceDB = db.getSiblingDB(jsTestName());
const sourceColl = sourceDB["${sourceColl.getName()}"];
let cmdRes = sourceDB.runCommand({
@@ -47,151 +47,149 @@
}
`;
- // Start the $merge aggregation in a parallel shell.
- let outShell = startParallelShell(outFn, st.s.port);
-
- // Wait for the parallel shell to hit the failpoint.
- assert.soon(
- () => mongosDB
- .currentOp({
- $or: [
- {op: "command", "command.comment": comment},
- {op: "getmore", "cursor.originatingCommand.comment": comment}
- ]
- })
- .inprog.length == 1,
- () => tojson(mongosDB.currentOp().inprog));
-
- // Migrate the primary shard from shard0 to shard1.
- st.ensurePrimaryShard(mongosDB.getName(), st.shard1.shardName);
-
- // Unset the failpoint to unblock the $merge and join with the parallel shell.
- setAggHang("off");
- outShell();
-
- // Verify that the $merge succeeded.
- if (expectedfailCode === undefined) {
- assert.eq(expectedNumDocs, targetColl.find().itcount());
- }
-
- assert.commandWorked(targetColl.remove({}));
+ // Start the $merge aggregation in a parallel shell.
+ let outShell = startParallelShell(outFn, st.s.port);
+
+ // Wait for the parallel shell to hit the failpoint.
+ assert.soon(() => mongosDB
+ .currentOp({
+ $or: [
+ {op: "command", "command.comment": comment},
+ {op: "getmore", "cursor.originatingCommand.comment": comment}
+ ]
+ })
+ .inprog.length == 1,
+ () => tojson(mongosDB.currentOp().inprog));
+
+ // Migrate the primary shard from shard0 to shard1.
+ st.ensurePrimaryShard(mongosDB.getName(), st.shard1.shardName);
+
+ // Unset the failpoint to unblock the $merge and join with the parallel shell.
+ setAggHang("off");
+ outShell();
+
+ // Verify that the $merge succeeded.
+ if (expectedfailCode === undefined) {
+ assert.eq(expectedNumDocs, targetColl.find().itcount());
}
- // The source collection is unsharded.
- assert.commandWorked(sourceColl.insert({shardKey: -1}));
- assert.commandWorked(sourceColl.insert({shardKey: 1}));
+ assert.commandWorked(targetColl.remove({}));
+}
+
+// The source collection is unsharded.
+assert.commandWorked(sourceColl.insert({shardKey: -1}));
+assert.commandWorked(sourceColl.insert({shardKey: 1}));
- // Note that the actual error is NamespaceNotFound but it is wrapped in a generic error code by
- // mistake.
+// Note that the actual error is NamespaceNotFound but it is wrapped in a generic error code by
+// mistake.
+runPipelineWithStage({
+ stage: {$out: targetColl.getName()},
+ shardedColl: sourceColl,
+ expectedfailCode: ErrorCodes.CommandFailed
+});
+
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
runPipelineWithStage({
- stage: {$out: targetColl.getName()},
+ stage: {
+ $merge: {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ },
shardedColl: sourceColl,
- expectedfailCode: ErrorCodes.CommandFailed
+ expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
+ expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
});
+});
+sourceColl.drop();
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- runPipelineWithStage({
- stage: {
- $merge: {
- into: targetColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- },
- shardedColl: sourceColl,
- expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
- expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
- });
+// Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
+st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
- });
- sourceColl.drop();
-
- // Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
- st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+// Write a document to each chunk of the source collection.
+assert.commandWorked(sourceColl.insert({shardKey: -1}));
+assert.commandWorked(sourceColl.insert({shardKey: 1}));
- // Write a document to each chunk of the source collection.
- assert.commandWorked(sourceColl.insert({shardKey: -1}));
- assert.commandWorked(sourceColl.insert({shardKey: 1}));
+runPipelineWithStage({
+ stage: {$out: targetColl.getName()},
+ shardedColl: sourceColl,
+ expectedfailCode: ErrorCodes.CommandFailed
+});
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
runPipelineWithStage({
- stage: {$out: targetColl.getName()},
+ stage: {
+ $merge: {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ },
shardedColl: sourceColl,
- expectedfailCode: ErrorCodes.CommandFailed
+ expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
+ expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
});
+});
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- runPipelineWithStage({
- stage: {
- $merge: {
- into: targetColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- },
- shardedColl: sourceColl,
- expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
- expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
- });
- });
+sourceColl.drop();
- sourceColl.drop();
+// Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
+st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
- // Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
- st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+// Write two documents in the source collection that should target the two chunks in the target
+// collection.
+assert.commandWorked(sourceColl.insert({shardKey: -1}));
+assert.commandWorked(sourceColl.insert({shardKey: 1}));
- // Write two documents in the source collection that should target the two chunks in the target
- // collection.
- assert.commandWorked(sourceColl.insert({shardKey: -1}));
- assert.commandWorked(sourceColl.insert({shardKey: 1}));
+runPipelineWithStage({
+ stage: {$out: targetColl.getName()},
+ shardedColl: targetColl,
+ expectedfailCode: ErrorCodes.CommandFailed
+});
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
runPipelineWithStage({
- stage: {$out: targetColl.getName()},
+ stage: {
+ $merge: {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ },
shardedColl: targetColl,
- expectedfailCode: ErrorCodes.CommandFailed
+ expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
+ expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
});
+});
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- runPipelineWithStage({
- stage: {
- $merge: {
- into: targetColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- },
- shardedColl: targetColl,
- expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
- expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
- });
- });
+sourceColl.drop();
+targetColl.drop();
+
+// Shard the collections with shard key {shardKey: 1} and split into 2 chunks.
+st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+st.shardColl(targetColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
+
+// Write two documents in the source collection that should target the two chunks in the target
+// collection.
+assert.commandWorked(sourceColl.insert({shardKey: -1}));
+assert.commandWorked(sourceColl.insert({shardKey: 1}));
- sourceColl.drop();
- targetColl.drop();
-
- // Shard the collections with shard key {shardKey: 1} and split into 2 chunks.
- st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
- st.shardColl(targetColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
-
- // Write two documents in the source collection that should target the two chunks in the target
- // collection.
- assert.commandWorked(sourceColl.insert({shardKey: -1}));
- assert.commandWorked(sourceColl.insert({shardKey: 1}));
-
- // Note that the legacy $out is not supported with an existing sharded output collection.
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- runPipelineWithStage({
- stage: {
- $merge: {
- into: targetColl.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- },
- shardedColl: targetColl,
- expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
- expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
- });
+// Note that the legacy $out is not supported with an existing sharded output collection.
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ runPipelineWithStage({
+ stage: {
+ $merge: {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ },
+ shardedColl: targetColl,
+ expectedNumDocs: whenNotMatchedMode == "discard" ? 0 : 2,
+ expectedfailCode: whenNotMatchedMode == "fail" ? 13113 : undefined
});
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/merge_write_concern.js b/jstests/sharding/merge_write_concern.js
index b49d2e381ae..a0e3c0a9fcb 100644
--- a/jstests/sharding/merge_write_concern.js
+++ b/jstests/sharding/merge_write_concern.js
@@ -1,101 +1,101 @@
// Tests that $merge respects the writeConcern set on the original aggregation command.
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
-
- const st = new ShardingTest({shards: 2, rs: {nodes: 3}, config: 1});
-
- const mongosDB = st.s0.getDB("merge_write_concern");
- const source = mongosDB["source"];
- const target = mongosDB["target"];
- const shard0 = st.rs0;
- const shard1 = st.rs1;
-
- // Enable sharding on the test DB and ensure its primary is shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
- function testWriteConcernError(rs) {
- // Make sure that there are only 2 nodes up so w:3 writes will always time out.
- const stoppedSecondary = rs.getSecondary();
- rs.stop(stoppedSecondary);
-
- // Test that $merge correctly returns a WC error.
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- const res = mongosDB.runCommand({
- aggregate: "source",
- pipeline: [{
- $merge: {
- into: "target",
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }],
- writeConcern: {w: 3, wtimeout: 100},
- cursor: {},
- });
-
- // $merge writeConcern errors are handled differently from normal writeConcern
- // errors. Rather than returing ok:1 and a WriteConcernError, the entire operation
- // fails.
- assert.commandFailedWithCode(res,
- whenNotMatchedMode == "fail"
- ? [13113, ErrorCodes.WriteConcernFailed]
- : ErrorCodes.WriteConcernFailed);
- assert.commandWorked(target.remove({}));
+"use strict";
+
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+
+const st = new ShardingTest({shards: 2, rs: {nodes: 3}, config: 1});
+
+const mongosDB = st.s0.getDB("merge_write_concern");
+const source = mongosDB["source"];
+const target = mongosDB["target"];
+const shard0 = st.rs0;
+const shard1 = st.rs1;
+
+// Enable sharding on the test DB and ensure its primary is shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+
+function testWriteConcernError(rs) {
+ // Make sure that there are only 2 nodes up so w:3 writes will always time out.
+ const stoppedSecondary = rs.getSecondary();
+ rs.stop(stoppedSecondary);
+
+ // Test that $merge correctly returns a WC error.
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ const res = mongosDB.runCommand({
+ aggregate: "source",
+ pipeline: [{
+ $merge: {
+ into: "target",
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }],
+ writeConcern: {w: 3, wtimeout: 100},
+ cursor: {},
});
- // Restart the stopped node and verify that the $merge's now pass.
- rs.restart(rs.getSecondary());
- rs.awaitReplication();
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
- // Skip the combination of merge modes which will fail depending on the contents of the
- // source and target collection, as this will cause the assertion below to trip.
- if (whenNotMatchedMode == "fail")
- return;
-
- const res = mongosDB.runCommand({
- aggregate: "source",
- pipeline: [{
- $merge: {
- into: "target",
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }],
- writeConcern: {w: 3},
- cursor: {},
- });
-
- // Ensure that the write concern is satisfied within a reasonable amount of time. This
- // prevents the test from hanging if for some reason the write concern can't be
- // satisfied.
- assert.soon(() => assert.commandWorked(res), "writeConcern was not satisfied");
- assert.commandWorked(target.remove({}));
+ // $merge writeConcern errors are handled differently from normal writeConcern
+ // errors. Rather than returing ok:1 and a WriteConcernError, the entire operation
+ // fails.
+ assert.commandFailedWithCode(res,
+ whenNotMatchedMode == "fail"
+ ? [13113, ErrorCodes.WriteConcernFailed]
+ : ErrorCodes.WriteConcernFailed);
+ assert.commandWorked(target.remove({}));
+ });
+
+ // Restart the stopped node and verify that the $merge's now pass.
+ rs.restart(rs.getSecondary());
+ rs.awaitReplication();
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ // Skip the combination of merge modes which will fail depending on the contents of the
+ // source and target collection, as this will cause the assertion below to trip.
+ if (whenNotMatchedMode == "fail")
+ return;
+
+ const res = mongosDB.runCommand({
+ aggregate: "source",
+ pipeline: [{
+ $merge: {
+ into: "target",
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }],
+ writeConcern: {w: 3},
+ cursor: {},
});
- }
- // Test that when both collections are unsharded, all writes are directed to the primary shard.
- assert.commandWorked(source.insert([{_id: -1}, {_id: 0}, {_id: 1}, {_id: 2}]));
- testWriteConcernError(shard0);
+ // Ensure that the write concern is satisfied within a reasonable amount of time. This
+ // prevents the test from hanging if for some reason the write concern can't be
+ // satisfied.
+ assert.soon(() => assert.commandWorked(res), "writeConcern was not satisfied");
+ assert.commandWorked(target.remove({}));
+ });
+}
- // Shard the source collection and continue to expect writes to the primary shard.
- st.shardColl(source, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
- testWriteConcernError(shard0);
+// Test that when both collections are unsharded, all writes are directed to the primary shard.
+assert.commandWorked(source.insert([{_id: -1}, {_id: 0}, {_id: 1}, {_id: 2}]));
+testWriteConcernError(shard0);
- // Shard the target collection, however make sure that all writes go to the primary shard by
- // splitting the collection at {_id: 10} and keeping all values in the same chunk.
- st.shardColl(target, {_id: 1}, {_id: 10}, {_id: 10}, mongosDB.getName());
- assert.eq(FixtureHelpers.isSharded(target), true);
- testWriteConcernError(shard0);
+// Shard the source collection and continue to expect writes to the primary shard.
+st.shardColl(source, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
+testWriteConcernError(shard0);
- // Write a few documents to the source collection which will be $merge-ed to the second shard.
- assert.commandWorked(source.insert([{_id: 11}, {_id: 12}, {_id: 13}]));
+// Shard the target collection, however make sure that all writes go to the primary shard by
+// splitting the collection at {_id: 10} and keeping all values in the same chunk.
+st.shardColl(target, {_id: 1}, {_id: 10}, {_id: 10}, mongosDB.getName());
+assert.eq(FixtureHelpers.isSharded(target), true);
+testWriteConcernError(shard0);
- // Verify that either shard can produce a WriteConcernError since writes are going to both.
- testWriteConcernError(shard0);
- testWriteConcernError(shard1);
+// Write a few documents to the source collection which will be $merge-ed to the second shard.
+assert.commandWorked(source.insert([{_id: 11}, {_id: 12}, {_id: 13}]));
- st.stop();
+// Verify that either shard can produce a WriteConcernError since writes are going to both.
+testWriteConcernError(shard0);
+testWriteConcernError(shard1);
+
+st.stop();
}());
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index 5cdc03d292e..e525a909fea 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -1,68 +1,67 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({name: "migrateBig", shards: 2, other: {chunkSize: 1}});
+var s = new ShardingTest({name: "migrateBig", shards: 2, other: {chunkSize: 1}});
- assert.writeOK(
- s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true));
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
+assert.writeOK(s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
- var db = s.getDB("test");
- var coll = db.foo;
+var db = s.getDB("test");
+var coll = db.foo;
- var big = "";
- while (big.length < 10000)
- big += "eliot";
+var big = "";
+while (big.length < 10000)
+ big += "eliot";
- var bulk = coll.initializeUnorderedBulkOp();
- for (var x = 0; x < 100; x++) {
- bulk.insert({x: x, big: big});
- }
- assert.writeOK(bulk.execute());
+var bulk = coll.initializeUnorderedBulkOp();
+for (var x = 0; x < 100; x++) {
+ bulk.insert({x: x, big: big});
+}
+assert.writeOK(bulk.execute());
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 30}}));
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 66}}));
- assert.commandWorked(s.s0.adminCommand(
- {movechunk: "test.foo", find: {x: 90}, to: s.getOther(s.getPrimaryShard("test")).name}));
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 30}}));
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 66}}));
+assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {x: 90}, to: s.getOther(s.getPrimaryShard("test")).name}));
- s.printShardingStatus();
+s.printShardingStatus();
- print("YO : " + s.getPrimaryShard("test").host);
- var direct = new Mongo(s.getPrimaryShard("test").host);
- print("direct : " + direct);
+print("YO : " + s.getPrimaryShard("test").host);
+var direct = new Mongo(s.getPrimaryShard("test").host);
+print("direct : " + direct);
- var directDB = direct.getDB("test");
+var directDB = direct.getDB("test");
- for (var done = 0; done < 2 * 1024 * 1024; done += big.length) {
- assert.writeOK(directDB.foo.insert({x: 50 + Math.random(), big: big}));
- }
+for (var done = 0; done < 2 * 1024 * 1024; done += big.length) {
+ assert.writeOK(directDB.foo.insert({x: 50 + Math.random(), big: big}));
+}
- s.printShardingStatus();
+s.printShardingStatus();
- // This is a large chunk, which should not be able to move
- assert.commandFailed(s.s0.adminCommand(
- {movechunk: "test.foo", find: {x: 50}, to: s.getOther(s.getPrimaryShard("test")).name}));
+// This is a large chunk, which should not be able to move
+assert.commandFailed(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {x: 50}, to: s.getOther(s.getPrimaryShard("test")).name}));
- for (var i = 0; i < 20; i += 2) {
- try {
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: i}}));
- } catch (e) {
- // We may have auto split on some of these, which is ok
- print(e);
- }
+for (var i = 0; i < 20; i += 2) {
+ try {
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: i}}));
+ } catch (e) {
+ // We may have auto split on some of these, which is ok
+ print(e);
}
+}
- s.printShardingStatus();
+s.printShardingStatus();
- s.startBalancer();
+s.startBalancer();
- assert.soon(function() {
- var x = s.chunkDiff("foo", "test");
- print("chunk diff: " + x);
- return x < 2;
- }, "no balance happened", 8 * 60 * 1000, 2000);
+assert.soon(function() {
+ var x = s.chunkDiff("foo", "test");
+ print("chunk diff: " + x);
+ return x < 2;
+}, "no balance happened", 8 * 60 * 1000, 2000);
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/migrateBig_balancer.js b/jstests/sharding/migrateBig_balancer.js
index 37cba54f498..13195b61b65 100644
--- a/jstests/sharding/migrateBig_balancer.js
+++ b/jstests/sharding/migrateBig_balancer.js
@@ -4,58 +4,55 @@
* @tags: [resource_intensive]
*/
(function() {
- "use strict";
-
- // TODO: SERVER-33830 remove shardAsReplicaSet: false
- var st = new ShardingTest({
- name: 'migrateBig_balancer',
- shards: 2,
- other: {enableBalancer: true, shardAsReplicaSet: false}
- });
- var mongos = st.s;
- var admin = mongos.getDB("admin");
- var db = mongos.getDB("test");
- var coll = db.getCollection("stuff");
-
- assert.commandWorked(admin.runCommand({enablesharding: coll.getDB().getName()}));
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
-
- var data = "x";
- var nsq = 16;
- var n = 255;
-
- for (var i = 0; i < nsq; i++)
- data += data;
-
- var dataObj = {};
- for (var i = 0; i < n; i++)
- dataObj["data-" + i] = data;
-
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 40; i++) {
- bulk.insert({data: dataObj});
- }
-
- assert.writeOK(bulk.execute());
- assert.eq(40, coll.count(), "prep1");
-
- assert.commandWorked(admin.runCommand({shardcollection: "" + coll, key: {_id: 1}}));
- st.printShardingStatus();
-
- assert.lt(
- 5, mongos.getDB("config").chunks.find({ns: "test.stuff"}).count(), "not enough chunks");
-
- assert.soon(() => {
- let res =
- mongos.getDB("config")
- .chunks
- .aggregate(
- [{$match: {ns: "test.stuff"}}, {$group: {_id: "$shard", nChunks: {$sum: 1}}}])
- .toArray();
- printjson(res);
- return res.length > 1 && Math.abs(res[0].nChunks - res[1].nChunks) <= 3;
-
- }, "never migrated", 10 * 60 * 1000, 1000);
-
- st.stop();
+"use strict";
+
+// TODO: SERVER-33830 remove shardAsReplicaSet: false
+var st = new ShardingTest({
+ name: 'migrateBig_balancer',
+ shards: 2,
+ other: {enableBalancer: true, shardAsReplicaSet: false}
+});
+var mongos = st.s;
+var admin = mongos.getDB("admin");
+var db = mongos.getDB("test");
+var coll = db.getCollection("stuff");
+
+assert.commandWorked(admin.runCommand({enablesharding: coll.getDB().getName()}));
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
+
+var data = "x";
+var nsq = 16;
+var n = 255;
+
+for (var i = 0; i < nsq; i++)
+ data += data;
+
+var dataObj = {};
+for (var i = 0; i < n; i++)
+ dataObj["data-" + i] = data;
+
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < 40; i++) {
+ bulk.insert({data: dataObj});
+}
+
+assert.writeOK(bulk.execute());
+assert.eq(40, coll.count(), "prep1");
+
+assert.commandWorked(admin.runCommand({shardcollection: "" + coll, key: {_id: 1}}));
+st.printShardingStatus();
+
+assert.lt(5, mongos.getDB("config").chunks.find({ns: "test.stuff"}).count(), "not enough chunks");
+
+assert.soon(() => {
+ let res = mongos.getDB("config")
+ .chunks
+ .aggregate(
+ [{$match: {ns: "test.stuff"}}, {$group: {_id: "$shard", nChunks: {$sum: 1}}}])
+ .toArray();
+ printjson(res);
+ return res.length > 1 && Math.abs(res[0].nChunks - res[1].nChunks) <= 3;
+}, "never migrated", 10 * 60 * 1000, 1000);
+
+st.stop();
})();
diff --git a/jstests/sharding/migration_critical_section_concurrency.js b/jstests/sharding/migration_critical_section_concurrency.js
index e51d9d5d738..e98f1f05262 100644
--- a/jstests/sharding/migration_critical_section_concurrency.js
+++ b/jstests/sharding/migration_critical_section_concurrency.js
@@ -4,65 +4,65 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- 'use strict';
+'use strict';
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
- var st = new ShardingTest({mongos: 1, shards: 2});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+var st = new ShardingTest({mongos: 1, shards: 2});
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- var testDB = st.s0.getDB('TestDB');
+var testDB = st.s0.getDB('TestDB');
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll0', key: {Key: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll0', middle: {Key: 0}}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll0', key: {Key: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll0', middle: {Key: 0}}));
- var coll0 = testDB.Coll0;
- assert.writeOK(coll0.insert({Key: -1, Value: '-1'}));
- assert.writeOK(coll0.insert({Key: 1, Value: '1'}));
+var coll0 = testDB.Coll0;
+assert.writeOK(coll0.insert({Key: -1, Value: '-1'}));
+assert.writeOK(coll0.insert({Key: 1, Value: '1'}));
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll1', key: {Key: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll1', middle: {Key: 0}}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll1', key: {Key: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll1', middle: {Key: 0}}));
- var coll1 = testDB.Coll1;
- assert.writeOK(coll1.insert({Key: -1, Value: '-1'}));
- assert.writeOK(coll1.insert({Key: 1, Value: '1'}));
+var coll1 = testDB.Coll1;
+assert.writeOK(coll1.insert({Key: -1, Value: '-1'}));
+assert.writeOK(coll1.insert({Key: 1, Value: '1'}));
- // Ensure that coll0 has chunks on both shards so we can test queries against both donor and
- // recipient for Coll1's migration below
- assert.commandWorked(
- st.s0.adminCommand({moveChunk: 'TestDB.Coll0', find: {Key: 1}, to: st.shard1.shardName}));
+// Ensure that coll0 has chunks on both shards so we can test queries against both donor and
+// recipient for Coll1's migration below
+assert.commandWorked(
+ st.s0.adminCommand({moveChunk: 'TestDB.Coll0', find: {Key: 1}, to: st.shard1.shardName}));
- // Pause the move chunk operation just before it leaves the critical section
- pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
+// Pause the move chunk operation just before it leaves the critical section
+pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {Key: 1}, null, 'TestDB.Coll1', st.shard1.shardName);
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {Key: 1}, null, 'TestDB.Coll1', st.shard1.shardName);
- waitForMoveChunkStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
+waitForMoveChunkStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
- // Ensure that all operations for 'Coll0', which is not being migrated are not stalled
- assert.eq(1, coll0.find({Key: {$lte: -1}}).itcount());
- assert.eq(1, coll0.find({Key: {$gte: 1}}).itcount());
- assert.writeOK(coll0.insert({Key: -2, Value: '-2'}));
- assert.writeOK(coll0.insert({Key: 2, Value: '2'}));
- assert.eq(2, coll0.find({Key: {$lte: -1}}).itcount());
- assert.eq(2, coll0.find({Key: {$gte: 1}}).itcount());
+// Ensure that all operations for 'Coll0', which is not being migrated are not stalled
+assert.eq(1, coll0.find({Key: {$lte: -1}}).itcount());
+assert.eq(1, coll0.find({Key: {$gte: 1}}).itcount());
+assert.writeOK(coll0.insert({Key: -2, Value: '-2'}));
+assert.writeOK(coll0.insert({Key: 2, Value: '2'}));
+assert.eq(2, coll0.find({Key: {$lte: -1}}).itcount());
+assert.eq(2, coll0.find({Key: {$gte: 1}}).itcount());
- // Ensure that read operations for 'Coll1', which *is* being migration are not stalled
- assert.eq(1, coll1.find({Key: {$lte: -1}}).itcount());
- assert.eq(1, coll1.find({Key: {$gte: 1}}).itcount());
+// Ensure that read operations for 'Coll1', which *is* being migration are not stalled
+assert.eq(1, coll1.find({Key: {$lte: -1}}).itcount());
+assert.eq(1, coll1.find({Key: {$gte: 1}}).itcount());
- // Ensure that all operations for non-sharded collections are not stalled
- var collUnsharded = testDB.CollUnsharded;
- assert.eq(0, collUnsharded.find({}).itcount());
- assert.writeOK(collUnsharded.insert({TestKey: 0, Value: 'Zero'}));
- assert.eq(1, collUnsharded.find({}).itcount());
+// Ensure that all operations for non-sharded collections are not stalled
+var collUnsharded = testDB.CollUnsharded;
+assert.eq(0, collUnsharded.find({}).itcount());
+assert.writeOK(collUnsharded.insert({TestKey: 0, Value: 'Zero'}));
+assert.eq(1, collUnsharded.find({}).itcount());
- unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
+unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
- joinMoveChunk();
+joinMoveChunk();
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_failure.js b/jstests/sharding/migration_failure.js
index 7e3ba438262..f731c0d3614 100644
--- a/jstests/sharding/migration_failure.js
+++ b/jstests/sharding/migration_failure.js
@@ -5,88 +5,84 @@
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 1});
+var st = new ShardingTest({shards: 2, mongos: 1});
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
- assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
- printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
- assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
- assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
+assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
- st.printShardingStatus();
+st.printShardingStatus();
- jsTest.log("Testing failed migrations...");
+jsTest.log("Testing failed migrations...");
- var oldVersion = null;
- var newVersion = null;
+var oldVersion = null;
+var newVersion = null;
- // failMigrationCommit -- this creates an error that aborts the migration before the commit
- // migration command is sent.
- assert.commandWorked(st.shard0.getDB("admin").runCommand(
- {configureFailPoint: 'failMigrationCommit', mode: 'alwaysOn'}));
+// failMigrationCommit -- this creates an error that aborts the migration before the commit
+// migration command is sent.
+assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'failMigrationCommit', mode: 'alwaysOn'}));
- oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
- assert.commandFailed(
- admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
+assert.commandFailed(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
- newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
- assert.eq(oldVersion.t,
- newVersion.t,
- "The shard version major value should not change after a failed migration");
- // Split does not cause a shard routing table refresh, but the moveChunk attempt will.
- assert.eq(2,
- newVersion.i,
- "The shard routing table should refresh on a failed migration and show the split");
+assert.eq(oldVersion.t,
+ newVersion.t,
+ "The shard version major value should not change after a failed migration");
+// Split does not cause a shard routing table refresh, but the moveChunk attempt will.
+assert.eq(2,
+ newVersion.i,
+ "The shard routing table should refresh on a failed migration and show the split");
- assert.commandWorked(st.shard0.getDB("admin").runCommand(
- {configureFailPoint: 'failMigrationCommit', mode: 'off'}));
+assert.commandWorked(
+ st.shard0.getDB("admin").runCommand({configureFailPoint: 'failMigrationCommit', mode: 'off'}));
- // migrationCommitNetworkError -- mimic migration commit command returning a network error,
- // whereupon the config server is queried to determine that this commit was successful.
- assert.commandWorked(st.shard0.getDB("admin").runCommand(
- {configureFailPoint: 'migrationCommitNetworkError', mode: 'alwaysOn'}));
+// migrationCommitNetworkError -- mimic migration commit command returning a network error,
+// whereupon the config server is queried to determine that this commit was successful.
+assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'migrationCommitNetworkError', mode: 'alwaysOn'}));
- // Run a migration where there will still be chunks in the collection remaining on the shard
- // afterwards. This will cause the collection's shardVersion to be bumped higher.
- oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+// Run a migration where there will still be chunks in the collection remaining on the shard
+// afterwards. This will cause the collection's shardVersion to be bumped higher.
+oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 1}, to: st.shard1.shardName}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 1}, to: st.shard1.shardName}));
- newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
- assert.lt(
- oldVersion.t, newVersion.t, "The major value in the shard version should have increased");
- assert.eq(1, newVersion.i, "The minor value in the shard version should be 1");
+assert.lt(oldVersion.t, newVersion.t, "The major value in the shard version should have increased");
+assert.eq(1, newVersion.i, "The minor value in the shard version should be 1");
- // Run a migration to move off the shard's last chunk in the collection. The collection's
- // shardVersion will be reset.
- oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+// Run a migration to move off the shard's last chunk in the collection. The collection's
+// shardVersion will be reset.
+oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: st.shard1.shardName}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: st.shard1.shardName}));
- newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
- assert.gt(oldVersion.t,
- newVersion.t,
- "The version prior to the migration should be greater than the reset value");
+assert.gt(oldVersion.t,
+ newVersion.t,
+ "The version prior to the migration should be greater than the reset value");
- assert.eq(
- 0, newVersion.t, "The shard version should have reset, but the major value is not zero");
- assert.eq(
- 0, newVersion.i, "The shard version should have reset, but the minor value is not zero");
+assert.eq(0, newVersion.t, "The shard version should have reset, but the major value is not zero");
+assert.eq(0, newVersion.i, "The shard version should have reset, but the minor value is not zero");
- assert.commandWorked(st.shard0.getDB("admin").runCommand(
- {configureFailPoint: 'migrationCommitNetworkError', mode: 'off'}));
-
- st.stop();
+assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'migrationCommitNetworkError', mode: 'off'}));
+st.stop();
})();
diff --git a/jstests/sharding/migration_id_index.js b/jstests/sharding/migration_id_index.js
index 72501a226cb..cb9fc45d7db 100644
--- a/jstests/sharding/migration_id_index.js
+++ b/jstests/sharding/migration_id_index.js
@@ -1,45 +1,45 @@
// This tests that when a chunk migration occurs, all replica set members of the destination shard
// get the correct _id index version for the collection.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/get_index_helpers.js");
- var st = new ShardingTest({shards: 2, rs: {nodes: 2}});
- var testDB = st.s.getDB("test");
- assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
+var st = new ShardingTest({shards: 2, rs: {nodes: 2}});
+var testDB = st.s.getDB("test");
+assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
+st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
- // Create a collection with a v:1 _id index.
- var coll = testDB.getCollection("migration_id_index");
- coll.drop();
- assert.commandWorked(
- testDB.createCollection(coll.getName(), {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
- st.rs0.awaitReplication();
- var spec = GetIndexHelpers.findByName(
- st.rs0.getPrimary().getDB("test").migration_id_index.getIndexes(), "_id_");
- assert.neq(spec, null, "_id index spec not found");
- assert.eq(spec.v, 1, tojson(spec));
- spec = GetIndexHelpers.findByName(
- st.rs0.getSecondary().getDB("test").migration_id_index.getIndexes(), "_id_");
- assert.neq(spec, null, "_id index spec not found");
- assert.eq(spec.v, 1, tojson(spec));
+// Create a collection with a v:1 _id index.
+var coll = testDB.getCollection("migration_id_index");
+coll.drop();
+assert.commandWorked(
+ testDB.createCollection(coll.getName(), {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
+st.rs0.awaitReplication();
+var spec = GetIndexHelpers.findByName(
+ st.rs0.getPrimary().getDB("test").migration_id_index.getIndexes(), "_id_");
+assert.neq(spec, null, "_id index spec not found");
+assert.eq(spec.v, 1, tojson(spec));
+spec = GetIndexHelpers.findByName(
+ st.rs0.getSecondary().getDB("test").migration_id_index.getIndexes(), "_id_");
+assert.neq(spec, null, "_id index spec not found");
+assert.eq(spec.v, 1, tojson(spec));
- // Move a chunk to the non-primary shard.
- assert.commandWorked(testDB.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
- assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: 5}}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {a: 6}, to: st.shard1.shardName}));
+// Move a chunk to the non-primary shard.
+assert.commandWorked(testDB.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
+assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: 5}}));
+assert.commandWorked(
+ testDB.adminCommand({moveChunk: coll.getFullName(), find: {a: 6}, to: st.shard1.shardName}));
- // Check that the collection was created with a v:1 _id index on the non-primary shard.
- spec = GetIndexHelpers.findByName(
- st.rs1.getPrimary().getDB("test").migration_id_index.getIndexes(), "_id_");
- assert.neq(spec, null, "_id index spec not found");
- assert.eq(spec.v, 1, tojson(spec));
- spec = GetIndexHelpers.findByName(
- st.rs1.getSecondary().getDB("test").migration_id_index.getIndexes(), "_id_");
- assert.neq(spec, null, "_id index spec not found");
- assert.eq(spec.v, 1, tojson(spec));
+// Check that the collection was created with a v:1 _id index on the non-primary shard.
+spec = GetIndexHelpers.findByName(st.rs1.getPrimary().getDB("test").migration_id_index.getIndexes(),
+ "_id_");
+assert.neq(spec, null, "_id index spec not found");
+assert.eq(spec.v, 1, tojson(spec));
+spec = GetIndexHelpers.findByName(
+ st.rs1.getSecondary().getDB("test").migration_id_index.getIndexes(), "_id_");
+assert.neq(spec, null, "_id index spec not found");
+assert.eq(spec.v, 1, tojson(spec));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/migration_ignore_interrupts_1.js b/jstests/sharding/migration_ignore_interrupts_1.js
index 83a77f08445..0272a204661 100644
--- a/jstests/sharding/migration_ignore_interrupts_1.js
+++ b/jstests/sharding/migration_ignore_interrupts_1.js
@@ -6,72 +6,72 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- "use strict";
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
- var st = new ShardingTest({shards: 3});
-
- var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
- coll1 = mongos.getCollection(ns1), shard0 = st.shard0, shard1 = st.shard1,
- shard2 = st.shard2, shard0Coll1 = shard0.getCollection(ns1),
- shard1Coll1 = shard1.getCollection(ns1), shard2Coll1 = shard2.getCollection(ns1);
-
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
-
- assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
- assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 0}}));
- assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 10}}));
- assert.writeOK(coll1.insert({a: -10}));
- assert.writeOK(coll1.insert({a: 0}));
- assert.writeOK(coll1.insert({a: 10}));
- assert.eq(3, shard0Coll1.find().itcount());
- assert.eq(0, shard1Coll1.find().itcount());
- assert.eq(0, shard2Coll1.find().itcount());
- assert.eq(3, coll1.find().itcount());
-
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns1, find: {a: 10}, to: st.shard2.shardName, _waitForDelete: true}));
-
- // Shard0:
- // coll1: [-inf, 0) [0, 10)
- // Shard1:
- // Shard2:
- // coll1: [10, +inf)
-
- jsTest.log("Set up complete, now proceeding to test that migration interruptions fail.");
-
- // Start a migration between shard0 and shard1 on coll1 and then pause it
- pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
- waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
-
- assert.commandFailedWithCode(
- admin.runCommand({moveChunk: ns1, find: {a: -10}, to: st.shard2.shardName}),
- ErrorCodes.ConflictingOperationInProgress,
- "(1) A shard should not be able to be the donor for two ongoing migrations.");
-
- assert.commandFailedWithCode(
- admin.runCommand({moveChunk: ns1, find: {a: 10}, to: st.shard1.shardName}),
- ErrorCodes.ConflictingOperationInProgress,
- "(2) A shard should not be able to be the recipient of two ongoing migrations.");
-
- assert.commandFailedWithCode(
- admin.runCommand({moveChunk: ns1, find: {a: 10}, to: st.shard0.shardName}),
- ErrorCodes.ConflictingOperationInProgress,
- "(3) A shard should not be able to be both a donor and recipient of migrations.");
-
- // Finish migration
- unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
- assert.doesNotThrow(function() {
- joinMoveChunk();
- });
- assert.eq(1, shard0Coll1.find().itcount());
- assert.eq(1, shard1Coll1.find().itcount());
- assert.eq(1, shard2Coll1.find().itcount());
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+"use strict";
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+var st = new ShardingTest({shards: 3});
+
+var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
+ coll1 = mongos.getCollection(ns1), shard0 = st.shard0, shard1 = st.shard1, shard2 = st.shard2,
+ shard0Coll1 = shard0.getCollection(ns1), shard1Coll1 = shard1.getCollection(ns1),
+ shard2Coll1 = shard2.getCollection(ns1);
+
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
+assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 0}}));
+assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 10}}));
+assert.writeOK(coll1.insert({a: -10}));
+assert.writeOK(coll1.insert({a: 0}));
+assert.writeOK(coll1.insert({a: 10}));
+assert.eq(3, shard0Coll1.find().itcount());
+assert.eq(0, shard1Coll1.find().itcount());
+assert.eq(0, shard2Coll1.find().itcount());
+assert.eq(3, coll1.find().itcount());
+
+assert.commandWorked(admin.runCommand(
+ {moveChunk: ns1, find: {a: 10}, to: st.shard2.shardName, _waitForDelete: true}));
+
+// Shard0:
+// coll1: [-inf, 0) [0, 10)
+// Shard1:
+// Shard2:
+// coll1: [10, +inf)
+
+jsTest.log("Set up complete, now proceeding to test that migration interruptions fail.");
+
+// Start a migration between shard0 and shard1 on coll1 and then pause it
+pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
+waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
+
+assert.commandFailedWithCode(
+ admin.runCommand({moveChunk: ns1, find: {a: -10}, to: st.shard2.shardName}),
+ ErrorCodes.ConflictingOperationInProgress,
+ "(1) A shard should not be able to be the donor for two ongoing migrations.");
+
+assert.commandFailedWithCode(
+ admin.runCommand({moveChunk: ns1, find: {a: 10}, to: st.shard1.shardName}),
+ ErrorCodes.ConflictingOperationInProgress,
+ "(2) A shard should not be able to be the recipient of two ongoing migrations.");
+
+assert.commandFailedWithCode(
+ admin.runCommand({moveChunk: ns1, find: {a: 10}, to: st.shard0.shardName}),
+ ErrorCodes.ConflictingOperationInProgress,
+ "(3) A shard should not be able to be both a donor and recipient of migrations.");
+
+// Finish migration
+unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+assert.doesNotThrow(function() {
+ joinMoveChunk();
+});
+assert.eq(1, shard0Coll1.find().itcount());
+assert.eq(1, shard1Coll1.find().itcount());
+assert.eq(1, shard2Coll1.find().itcount());
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_ignore_interrupts_2.js b/jstests/sharding/migration_ignore_interrupts_2.js
index 718c6a347ff..b60fa50ccf2 100644
--- a/jstests/sharding/migration_ignore_interrupts_2.js
+++ b/jstests/sharding/migration_ignore_interrupts_2.js
@@ -4,54 +4,54 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- "use strict";
+"use strict";
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
- var st = new ShardingTest({shards: 2});
+var st = new ShardingTest({shards: 2});
- var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
- coll1 = mongos.getCollection(ns1), shard0 = st.shard0, shard1 = st.shard1,
- shard0Coll1 = shard0.getCollection(ns1), shard1Coll1 = shard1.getCollection(ns1);
+var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
+ coll1 = mongos.getCollection(ns1), shard0 = st.shard0, shard1 = st.shard1,
+ shard0Coll1 = shard0.getCollection(ns1), shard1Coll1 = shard1.getCollection(ns1);
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
- assert.writeOK(coll1.insert({a: 0}));
- assert.eq(1, shard0Coll1.find().itcount());
- assert.eq(0, shard1Coll1.find().itcount());
- assert.eq(1, coll1.find().itcount());
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
+assert.writeOK(coll1.insert({a: 0}));
+assert.eq(1, shard0Coll1.find().itcount());
+assert.eq(0, shard1Coll1.find().itcount());
+assert.eq(1, coll1.find().itcount());
- // Shard0:
- // coll1: [-inf, +inf)
- // Shard1:
+// Shard0:
+// coll1: [-inf, +inf)
+// Shard1:
- jsTest.log("Set up complete, now proceeding to test that migration interruption fails.");
+jsTest.log("Set up complete, now proceeding to test that migration interruption fails.");
- // Start a migration between shard0 and shard1 on coll1, pause in steady state before commit.
- pauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
- waitForMoveChunkStep(shard0, moveChunkStepNames.reachedSteadyState);
+// Start a migration between shard0 and shard1 on coll1, pause in steady state before commit.
+pauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
+waitForMoveChunkStep(shard0, moveChunkStepNames.reachedSteadyState);
- jsTest.log('Sending false commit command....');
- assert.commandFailed(
- shard1.adminCommand({'_recvChunkCommit': 1, 'sessionId': "fake-migration-session-id"}));
+jsTest.log('Sending false commit command....');
+assert.commandFailed(
+ shard1.adminCommand({'_recvChunkCommit': 1, 'sessionId': "fake-migration-session-id"}));
- jsTest.log("Checking migration recipient is still in steady state, waiting for commit....");
- var res = shard1.adminCommand('_recvChunkStatus');
- assert.commandWorked(res);
- assert.eq(true, res.state === "steady", "False commit command succeeded.");
+jsTest.log("Checking migration recipient is still in steady state, waiting for commit....");
+var res = shard1.adminCommand('_recvChunkStatus');
+assert.commandWorked(res);
+assert.eq(true, res.state === "steady", "False commit command succeeded.");
- // Finish migration.
- unpauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
- assert.doesNotThrow(function() {
- joinMoveChunk();
- });
+// Finish migration.
+unpauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
+assert.doesNotThrow(function() {
+ joinMoveChunk();
+});
- assert.eq(0, shard0Coll1.find().itcount());
- assert.eq(1, shard1Coll1.find().itcount());
+assert.eq(0, shard0Coll1.find().itcount());
+assert.eq(1, shard1Coll1.find().itcount());
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_ignore_interrupts_3.js b/jstests/sharding/migration_ignore_interrupts_3.js
index 13c5b15f6f9..e48159b77b8 100644
--- a/jstests/sharding/migration_ignore_interrupts_3.js
+++ b/jstests/sharding/migration_ignore_interrupts_3.js
@@ -8,101 +8,99 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- "use strict";
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
- var st = new ShardingTest({shards: 3});
-
- var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
- ns2 = dbName + ".bar", coll1 = mongos.getCollection(ns1), coll2 = mongos.getCollection(ns2),
- shard0 = st.shard0, shard1 = st.shard1, shard2 = st.shard2,
- shard0Coll1 = shard0.getCollection(ns1), shard1Coll1 = shard1.getCollection(ns1),
- shard2Coll1 = shard2.getCollection(ns1), shard0Coll2 = shard0.getCollection(ns2),
- shard1Coll2 = shard1.getCollection(ns2), shard2Coll2 = shard2.getCollection(ns2);
-
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
-
- assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
- assert.writeOK(coll1.insert({a: 0}));
- assert.eq(1, shard0Coll1.find().itcount());
- assert.eq(0, shard1Coll1.find().itcount());
- assert.eq(0, shard2Coll1.find().itcount());
- assert.eq(1, coll1.find().itcount());
-
- assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
- assert.writeOK(coll2.insert({a: 0}));
- assert.eq(1, shard0Coll2.find().itcount());
- assert.eq(0, shard1Coll2.find().itcount());
- assert.eq(0, shard2Coll2.find().itcount());
- assert.eq(1, coll2.find().itcount());
-
- // Shard0:
- // coll1: [-inf, +inf)
- // coll2: [-inf, +inf)
- // Shard1:
- // Shard2:
-
- jsTest.log("Set up complete, now proceeding to test that migration interruption fails.");
-
- // Start coll1 migration to shard1: pause recipient after delete step, donor before interrupt
- // check.
- pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
- pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
- waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
-
- // Abort migration on donor side, recipient is unaware.
- let inProgressOps = admin.aggregate([{$currentOp: {'allUsers': true}}]);
- var abortedMigration = false;
- let inProgressStr = '';
- while (inProgressOps.hasNext()) {
- let op = inProgressOps.next();
- inProgressStr += tojson(op);
- if (op.command.moveChunk) {
- admin.killOp(op.opid);
- abortedMigration = true;
- }
+"use strict";
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+var st = new ShardingTest({shards: 3});
+
+var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
+ ns2 = dbName + ".bar", coll1 = mongos.getCollection(ns1), coll2 = mongos.getCollection(ns2),
+ shard0 = st.shard0, shard1 = st.shard1, shard2 = st.shard2,
+ shard0Coll1 = shard0.getCollection(ns1), shard1Coll1 = shard1.getCollection(ns1),
+ shard2Coll1 = shard2.getCollection(ns1), shard0Coll2 = shard0.getCollection(ns2),
+ shard1Coll2 = shard1.getCollection(ns2), shard2Coll2 = shard2.getCollection(ns2);
+
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
+assert.writeOK(coll1.insert({a: 0}));
+assert.eq(1, shard0Coll1.find().itcount());
+assert.eq(0, shard1Coll1.find().itcount());
+assert.eq(0, shard2Coll1.find().itcount());
+assert.eq(1, coll1.find().itcount());
+
+assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
+assert.writeOK(coll2.insert({a: 0}));
+assert.eq(1, shard0Coll2.find().itcount());
+assert.eq(0, shard1Coll2.find().itcount());
+assert.eq(0, shard2Coll2.find().itcount());
+assert.eq(1, coll2.find().itcount());
+
+// Shard0:
+// coll1: [-inf, +inf)
+// coll2: [-inf, +inf)
+// Shard1:
+// Shard2:
+
+jsTest.log("Set up complete, now proceeding to test that migration interruption fails.");
+
+// Start coll1 migration to shard1: pause recipient after delete step, donor before interrupt
+// check.
+pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
+waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
+
+// Abort migration on donor side, recipient is unaware.
+let inProgressOps = admin.aggregate([{$currentOp: {'allUsers': true}}]);
+var abortedMigration = false;
+let inProgressStr = '';
+while (inProgressOps.hasNext()) {
+ let op = inProgressOps.next();
+ inProgressStr += tojson(op);
+ if (op.command.moveChunk) {
+ admin.killOp(op.opid);
+ abortedMigration = true;
}
- assert.eq(
- true, abortedMigration, "Failed to abort migration, current running ops: " + inProgressStr);
- unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
- assert.throws(function() {
- joinMoveChunk();
- });
-
- // Start coll2 migration to shard2, pause recipient after delete step.
- pauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
- joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {a: 0}, null, coll2.getFullName(), st.shard2.shardName);
- waitForMigrateStep(shard2, migrateStepNames.deletedPriorDataInRange);
-
- jsTest.log('Releasing coll1 migration recipient, whose clone command should fail....');
- unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
- assert.soon(function() {
- // Wait for the destination shard to report that it is not in an active migration.
- var res = shard1.adminCommand({'_recvChunkStatus': 1});
- return (res.active == false);
- }, "coll1 migration recipient didn't abort migration in clone phase.", 2 * 60 * 1000);
- assert.eq(
- 1, shard0Coll1.find().itcount(), "donor shard0 completed a migration that it aborted.");
- assert.eq(0,
- shard1Coll1.find().itcount(),
- "shard1 cloned documents despite donor migration abortion.");
-
- jsTest.log('Finishing coll2 migration, which should succeed....');
- unpauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
- assert.doesNotThrow(function() {
- joinMoveChunk();
- });
-
- assert.eq(0,
- shard0Coll2.find().itcount(),
- "donor shard0 failed to complete a migration after aborting a prior migration.");
- assert.eq(1, shard2Coll2.find().itcount(), "shard2 failed to complete migration.");
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+}
+assert.eq(
+ true, abortedMigration, "Failed to abort migration, current running ops: " + inProgressStr);
+unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+assert.throws(function() {
+ joinMoveChunk();
+});
+
+// Start coll2 migration to shard2, pause recipient after delete step.
+pauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
+joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 0}, null, coll2.getFullName(), st.shard2.shardName);
+waitForMigrateStep(shard2, migrateStepNames.deletedPriorDataInRange);
+
+jsTest.log('Releasing coll1 migration recipient, whose clone command should fail....');
+unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+assert.soon(function() {
+ // Wait for the destination shard to report that it is not in an active migration.
+ var res = shard1.adminCommand({'_recvChunkStatus': 1});
+ return (res.active == false);
+}, "coll1 migration recipient didn't abort migration in clone phase.", 2 * 60 * 1000);
+assert.eq(1, shard0Coll1.find().itcount(), "donor shard0 completed a migration that it aborted.");
+assert.eq(
+ 0, shard1Coll1.find().itcount(), "shard1 cloned documents despite donor migration abortion.");
+
+jsTest.log('Finishing coll2 migration, which should succeed....');
+unpauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
+assert.doesNotThrow(function() {
+ joinMoveChunk();
+});
+
+assert.eq(0,
+ shard0Coll2.find().itcount(),
+ "donor shard0 failed to complete a migration after aborting a prior migration.");
+assert.eq(1, shard2Coll2.find().itcount(), "shard2 failed to complete migration.");
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_ignore_interrupts_4.js b/jstests/sharding/migration_ignore_interrupts_4.js
index 64c7b89e25e..bc692a9897c 100644
--- a/jstests/sharding/migration_ignore_interrupts_4.js
+++ b/jstests/sharding/migration_ignore_interrupts_4.js
@@ -8,102 +8,101 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- "use strict";
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
- var st = new ShardingTest({shards: 3});
-
- var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
- ns2 = dbName + ".bar", coll1 = mongos.getCollection(ns1), coll2 = mongos.getCollection(ns2),
- shard0 = st.shard0, shard1 = st.shard1, shard2 = st.shard2,
- shard0Coll1 = shard0.getCollection(ns1), shard1Coll1 = shard1.getCollection(ns1),
- shard2Coll1 = shard2.getCollection(ns1), shard0Coll2 = shard0.getCollection(ns2),
- shard1Coll2 = shard1.getCollection(ns2), shard2Coll2 = shard2.getCollection(ns2);
-
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
-
- assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
- assert.writeOK(coll1.insert({a: 0}));
- assert.eq(1, shard0Coll1.find().itcount());
- assert.eq(0, shard1Coll1.find().itcount());
- assert.eq(0, shard2Coll1.find().itcount());
- assert.eq(1, coll1.find().itcount());
-
- assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
- assert.writeOK(coll2.insert({a: 0}));
- assert.eq(1, shard0Coll2.find().itcount());
- assert.eq(0, shard1Coll2.find().itcount());
- assert.eq(0, shard2Coll2.find().itcount());
- assert.eq(1, coll2.find().itcount());
-
- // Shard0:
- // coll1: [-inf, +inf)
- // coll2: [-inf, +inf)
- // Shard1:
- // Shard2:
-
- jsTest.log("Set up complete, now proceeding to test that migration interruption fails.");
-
- // Start coll1 migration to shard1: pause recipient after cloning, donor before interrupt check
- pauseMigrateAtStep(shard1, migrateStepNames.cloned);
- pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
- waitForMigrateStep(shard1, migrateStepNames.cloned);
-
- // Abort migration on donor side, recipient is unaware
- let inProgressOps = admin.aggregate([{$currentOp: {'allUsers': true}}]);
- var abortedMigration = false;
- let inProgressStr = '';
- while (inProgressOps.hasNext()) {
- let op = inProgressOps.next();
- inProgressStr += tojson(op);
- if (op.command.moveChunk) {
- admin.killOp(op.opid);
- abortedMigration = true;
- }
+"use strict";
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+var st = new ShardingTest({shards: 3});
+
+var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns1 = dbName + ".foo",
+ ns2 = dbName + ".bar", coll1 = mongos.getCollection(ns1), coll2 = mongos.getCollection(ns2),
+ shard0 = st.shard0, shard1 = st.shard1, shard2 = st.shard2,
+ shard0Coll1 = shard0.getCollection(ns1), shard1Coll1 = shard1.getCollection(ns1),
+ shard2Coll1 = shard2.getCollection(ns1), shard0Coll2 = shard0.getCollection(ns2),
+ shard1Coll2 = shard1.getCollection(ns2), shard2Coll2 = shard2.getCollection(ns2);
+
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
+assert.writeOK(coll1.insert({a: 0}));
+assert.eq(1, shard0Coll1.find().itcount());
+assert.eq(0, shard1Coll1.find().itcount());
+assert.eq(0, shard2Coll1.find().itcount());
+assert.eq(1, coll1.find().itcount());
+
+assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
+assert.writeOK(coll2.insert({a: 0}));
+assert.eq(1, shard0Coll2.find().itcount());
+assert.eq(0, shard1Coll2.find().itcount());
+assert.eq(0, shard2Coll2.find().itcount());
+assert.eq(1, coll2.find().itcount());
+
+// Shard0:
+// coll1: [-inf, +inf)
+// coll2: [-inf, +inf)
+// Shard1:
+// Shard2:
+
+jsTest.log("Set up complete, now proceeding to test that migration interruption fails.");
+
+// Start coll1 migration to shard1: pause recipient after cloning, donor before interrupt check
+pauseMigrateAtStep(shard1, migrateStepNames.cloned);
+pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 0}, null, coll1.getFullName(), st.shard1.shardName);
+waitForMigrateStep(shard1, migrateStepNames.cloned);
+
+// Abort migration on donor side, recipient is unaware
+let inProgressOps = admin.aggregate([{$currentOp: {'allUsers': true}}]);
+var abortedMigration = false;
+let inProgressStr = '';
+while (inProgressOps.hasNext()) {
+ let op = inProgressOps.next();
+ inProgressStr += tojson(op);
+ if (op.command.moveChunk) {
+ admin.killOp(op.opid);
+ abortedMigration = true;
}
- assert.eq(
- true, abortedMigration, "Failed to abort migration, current running ops: " + inProgressStr);
- unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
- assert.throws(function() {
- joinMoveChunk();
- });
-
- // Start coll2 migration to shard2, pause recipient after cloning step.
- pauseMigrateAtStep(shard2, migrateStepNames.cloned);
- joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {a: 0}, null, coll2.getFullName(), st.shard2.shardName);
- waitForMigrateStep(shard2, migrateStepNames.cloned);
-
- // Populate donor (shard0) xfermods log.
- assert.writeOK(coll2.insert({a: 1}));
- assert.writeOK(coll2.insert({a: 2}));
- assert.eq(3, coll2.find().itcount(), "Failed to insert documents into coll2.");
- assert.eq(3, shard0Coll2.find().itcount());
-
- jsTest.log('Releasing coll1 migration recipient, whose transferMods command should fail....');
- unpauseMigrateAtStep(shard1, migrateStepNames.cloned);
- assert.soon(function() {
- // Wait for the destination shard to report that it is not in an active migration.
- var res = shard1.adminCommand({'_recvChunkStatus': 1});
- return (res.active == false);
- }, "coll1 migration recipient didn't abort migration in catchup phase.", 2 * 60 * 1000);
- assert.eq(
- 1, shard0Coll1.find().itcount(), "donor shard0 completed a migration that it aborted.");
-
- jsTest.log('Finishing coll2 migration, which should succeed....');
- unpauseMigrateAtStep(shard2, migrateStepNames.cloned);
- assert.doesNotThrow(function() {
- joinMoveChunk();
- });
- assert.eq(0,
- shard0Coll2.find().itcount(),
- "donor shard0 failed to complete a migration after aborting a prior migration.");
- assert.eq(3, shard2Coll2.find().itcount(), "shard2 failed to complete migration.");
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+}
+assert.eq(
+ true, abortedMigration, "Failed to abort migration, current running ops: " + inProgressStr);
+unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+assert.throws(function() {
+ joinMoveChunk();
+});
+
+// Start coll2 migration to shard2, pause recipient after cloning step.
+pauseMigrateAtStep(shard2, migrateStepNames.cloned);
+joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 0}, null, coll2.getFullName(), st.shard2.shardName);
+waitForMigrateStep(shard2, migrateStepNames.cloned);
+
+// Populate donor (shard0) xfermods log.
+assert.writeOK(coll2.insert({a: 1}));
+assert.writeOK(coll2.insert({a: 2}));
+assert.eq(3, coll2.find().itcount(), "Failed to insert documents into coll2.");
+assert.eq(3, shard0Coll2.find().itcount());
+
+jsTest.log('Releasing coll1 migration recipient, whose transferMods command should fail....');
+unpauseMigrateAtStep(shard1, migrateStepNames.cloned);
+assert.soon(function() {
+ // Wait for the destination shard to report that it is not in an active migration.
+ var res = shard1.adminCommand({'_recvChunkStatus': 1});
+ return (res.active == false);
+}, "coll1 migration recipient didn't abort migration in catchup phase.", 2 * 60 * 1000);
+assert.eq(1, shard0Coll1.find().itcount(), "donor shard0 completed a migration that it aborted.");
+
+jsTest.log('Finishing coll2 migration, which should succeed....');
+unpauseMigrateAtStep(shard2, migrateStepNames.cloned);
+assert.doesNotThrow(function() {
+ joinMoveChunk();
+});
+assert.eq(0,
+ shard0Coll2.find().itcount(),
+ "donor shard0 failed to complete a migration after aborting a prior migration.");
+assert.eq(3, shard2Coll2.find().itcount(), "shard2 failed to complete migration.");
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_move_chunk_after_receive.js b/jstests/sharding/migration_move_chunk_after_receive.js
index 662dc1879e4..fe28af0d8c3 100644
--- a/jstests/sharding/migration_move_chunk_after_receive.js
+++ b/jstests/sharding/migration_move_chunk_after_receive.js
@@ -5,70 +5,70 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- 'use strict';
+'use strict';
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
- var st = new ShardingTest({shards: 3});
+var st = new ShardingTest({shards: 3});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
- var testDB = st.s0.getDB('TestDB');
- var testColl = testDB.TestColl;
+var testDB = st.s0.getDB('TestDB');
+var testColl = testDB.TestColl;
- // Create 3 chunks with one document each and move them so that 0 is on shard0, 1 is on shard1,
- // etc.
- assert.writeOK(testColl.insert({Key: 0, Value: 'Value'}));
- assert.writeOK(testColl.insert({Key: 100, Value: 'Value'}));
- assert.writeOK(testColl.insert({Key: 101, Value: 'Value'}));
- assert.writeOK(testColl.insert({Key: 200, Value: 'Value'}));
+// Create 3 chunks with one document each and move them so that 0 is on shard0, 1 is on shard1,
+// etc.
+assert.writeOK(testColl.insert({Key: 0, Value: 'Value'}));
+assert.writeOK(testColl.insert({Key: 100, Value: 'Value'}));
+assert.writeOK(testColl.insert({Key: 101, Value: 'Value'}));
+assert.writeOK(testColl.insert({Key: 200, Value: 'Value'}));
- assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 100}}));
- assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 101}}));
- assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 200}}));
+assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 100}}));
+assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 101}}));
+assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {Key: 200}}));
- assert.commandWorked(st.s0.adminCommand({
- moveChunk: 'TestDB.TestColl',
- find: {Key: 100},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
- assert.commandWorked(st.s0.adminCommand({
- moveChunk: 'TestDB.TestColl',
- find: {Key: 101},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
- assert.commandWorked(st.s0.adminCommand({
- moveChunk: 'TestDB.TestColl',
- find: {Key: 200},
- to: st.shard2.shardName,
- _waitForDelete: true
- }));
+assert.commandWorked(st.s0.adminCommand({
+ moveChunk: 'TestDB.TestColl',
+ find: {Key: 100},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(st.s0.adminCommand({
+ moveChunk: 'TestDB.TestColl',
+ find: {Key: 101},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(st.s0.adminCommand({
+ moveChunk: 'TestDB.TestColl',
+ find: {Key: 200},
+ to: st.shard2.shardName,
+ _waitForDelete: true
+}));
- // Start moving chunk 0 from shard0 to shard1 and pause it just before the metadata is written
- // (but after the migration of the documents has been committed on the recipient)
- pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
- var joinMoveChunk0 = moveChunkParallel(
- staticMongod, st.s0.host, {Key: 0}, null, 'TestDB.TestColl', st.shard1.shardName);
- waitForMoveChunkStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
+// Start moving chunk 0 from shard0 to shard1 and pause it just before the metadata is written
+// (but after the migration of the documents has been committed on the recipient)
+pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
+var joinMoveChunk0 = moveChunkParallel(
+ staticMongod, st.s0.host, {Key: 0}, null, 'TestDB.TestColl', st.shard1.shardName);
+waitForMoveChunkStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
- pauseMoveChunkAtStep(st.shard1, moveChunkStepNames.chunkDataCommitted);
- var joinMoveChunk1 = moveChunkParallel(
- staticMongod, st.s0.host, {Key: 100}, null, 'TestDB.TestColl', st.shard2.shardName);
- waitForMoveChunkStep(st.shard1, moveChunkStepNames.chunkDataCommitted);
+pauseMoveChunkAtStep(st.shard1, moveChunkStepNames.chunkDataCommitted);
+var joinMoveChunk1 = moveChunkParallel(
+ staticMongod, st.s0.host, {Key: 100}, null, 'TestDB.TestColl', st.shard2.shardName);
+waitForMoveChunkStep(st.shard1, moveChunkStepNames.chunkDataCommitted);
- unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
- unpauseMoveChunkAtStep(st.shard1, moveChunkStepNames.chunkDataCommitted);
+unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
+unpauseMoveChunkAtStep(st.shard1, moveChunkStepNames.chunkDataCommitted);
- joinMoveChunk0();
- joinMoveChunk1();
+joinMoveChunk0();
+joinMoveChunk1();
- var foundDocs = testColl.find().toArray();
- assert.eq(4, foundDocs.length, 'Incorrect number of documents found ' + tojson(foundDocs));
+var foundDocs = testColl.find().toArray();
+assert.eq(4, foundDocs.length, 'Incorrect number of documents found ' + tojson(foundDocs));
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_server_status.js b/jstests/sharding/migration_server_status.js
index 63f4c828d1e..423b8353d89 100644
--- a/jstests/sharding/migration_server_status.js
+++ b/jstests/sharding/migration_server_status.js
@@ -6,72 +6,71 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- 'use strict';
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
- var st = new ShardingTest({shards: 2, mongos: 1});
-
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("db.coll");
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
-
- // Pause the migration once it starts on both shards -- somewhat arbitrary pause point.
- pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.startedMoveChunk);
-
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {_id: 1}, null, coll.getFullName(), st.shard1.shardName);
-
- var assertMigrationStatusOnServerStatus = function(serverStatusResult,
- sourceShard,
- destinationShard,
- isDonorShard,
- minKey,
- maxKey,
- collectionName) {
- var migrationResult = serverStatusResult.sharding.migrations;
- assert.eq(sourceShard, migrationResult.source);
- assert.eq(destinationShard, migrationResult.destination);
- assert.eq(isDonorShard, migrationResult.isDonorShard);
- assert.eq(minKey, migrationResult.chunk.min);
- assert.eq(maxKey, migrationResult.chunk.max);
- assert.eq(collectionName, migrationResult.collection);
- };
-
- waitForMoveChunkStep(st.shard0, moveChunkStepNames.startedMoveChunk);
-
- // Source shard should return a migration status.
- var shard0ServerStatus = st.shard0.getDB('admin').runCommand({serverStatus: 1});
- assert(shard0ServerStatus.sharding.migrations);
- assertMigrationStatusOnServerStatus(shard0ServerStatus,
- st.shard0.shardName,
- st.shard1.shardName,
- true,
- {"_id": 0},
- {"_id": {"$maxKey": 1}},
- coll + "");
-
- // Destination shard should not return any migration status.
- var shard1ServerStatus = st.shard1.getDB('admin').runCommand({serverStatus: 1});
- assert(!shard1ServerStatus.sharding.migrations);
-
- // Mongos should never return a migration status.
- var mongosServerStatus = st.s0.getDB('admin').runCommand({serverStatus: 1});
- assert(!mongosServerStatus.sharding.migrations);
-
- unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.startedMoveChunk);
- joinMoveChunk();
-
- // Migration is over, should no longer get a migration status.
- var shard0ServerStatus = st.shard0.getDB('admin').runCommand({serverStatus: 1});
- assert(!shard0ServerStatus.sharding.migrations);
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
-
+'use strict';
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+var st = new ShardingTest({shards: 2, mongos: 1});
+
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("db.coll");
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+
+// Pause the migration once it starts on both shards -- somewhat arbitrary pause point.
+pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.startedMoveChunk);
+
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {_id: 1}, null, coll.getFullName(), st.shard1.shardName);
+
+var assertMigrationStatusOnServerStatus = function(serverStatusResult,
+ sourceShard,
+ destinationShard,
+ isDonorShard,
+ minKey,
+ maxKey,
+ collectionName) {
+ var migrationResult = serverStatusResult.sharding.migrations;
+ assert.eq(sourceShard, migrationResult.source);
+ assert.eq(destinationShard, migrationResult.destination);
+ assert.eq(isDonorShard, migrationResult.isDonorShard);
+ assert.eq(minKey, migrationResult.chunk.min);
+ assert.eq(maxKey, migrationResult.chunk.max);
+ assert.eq(collectionName, migrationResult.collection);
+};
+
+waitForMoveChunkStep(st.shard0, moveChunkStepNames.startedMoveChunk);
+
+// Source shard should return a migration status.
+var shard0ServerStatus = st.shard0.getDB('admin').runCommand({serverStatus: 1});
+assert(shard0ServerStatus.sharding.migrations);
+assertMigrationStatusOnServerStatus(shard0ServerStatus,
+ st.shard0.shardName,
+ st.shard1.shardName,
+ true,
+ {"_id": 0},
+ {"_id": {"$maxKey": 1}},
+ coll + "");
+
+// Destination shard should not return any migration status.
+var shard1ServerStatus = st.shard1.getDB('admin').runCommand({serverStatus: 1});
+assert(!shard1ServerStatus.sharding.migrations);
+
+// Mongos should never return a migration status.
+var mongosServerStatus = st.s0.getDB('admin').runCommand({serverStatus: 1});
+assert(!mongosServerStatus.sharding.migrations);
+
+unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.startedMoveChunk);
+joinMoveChunk();
+
+// Migration is over, should no longer get a migration status.
+var shard0ServerStatus = st.shard0.getDB('admin').runCommand({serverStatus: 1});
+assert(!shard0ServerStatus.sharding.migrations);
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_sets_fromMigrate_flag.js b/jstests/sharding/migration_sets_fromMigrate_flag.js
index 7b0802286b1..73ee2dea163 100644
--- a/jstests/sharding/migration_sets_fromMigrate_flag.js
+++ b/jstests/sharding/migration_sets_fromMigrate_flag.js
@@ -17,178 +17,175 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- "use strict";
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
- var st = new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 1}});
-
- const dbName = "testDB";
- const ns = dbName + ".foo";
-
- let mongos = st.s0;
- let admin = mongos.getDB('admin');
- let coll = mongos.getCollection(ns);
-
- let donor = st.shard0;
- let recipient = st.shard1;
- let donorColl = donor.getCollection(ns);
- let recipientColl = recipient.getCollection(ns);
- let donorLocal = donor.getDB('local');
- let recipientLocal = recipient.getDB('local');
-
- // Two chunks
- // Donor: [0, 2) [2, 5)
- // Recipient:
- jsTest.log('Enable sharding of the collection and split into two chunks....');
-
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 2}}));
-
- // 6 documents,
- // donor: 2 in the first chunk, 3 in the second.
- // recipient: 1 document (shardkey overlaps with a doc in second chunk of donor shard)
- jsTest.log('Inserting 5 docs into donor shard, ensuring one orphan on the recipient shard....');
-
- // Insert just one document into the collection and fail a migration after the cloning step in
- // order to get an orphan onto the recipient shard with the correct UUID for the collection.
- assert.writeOK(coll.insert({_id: 2}));
- assert.eq(1, donorColl.count());
- assert.commandWorked(recipient.adminCommand(
- {configureFailPoint: "failMigrationLeaveOrphans", mode: "alwaysOn"}));
- assert.commandFailed(
- admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 2}, to: st.shard1.shardName}));
- assert.eq(1, recipientColl.count());
- assert.commandWorked(
- recipient.adminCommand({configureFailPoint: "failMigrationLeaveOrphans", mode: "off"}));
-
- // Insert the remaining documents into the collection.
- assert.writeOK(coll.insert({_id: 0}));
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.insert({_id: 3}));
- assert.writeOK(coll.insert({_id: 4}));
- assert.eq(5, donorColl.count());
-
- /**
- * Set failpoint: recipient will pause migration after cloning chunk data from donor,
- * before checking transfer mods log on donor.
- */
-
- jsTest.log('setting recipient failpoint cloned');
- pauseMigrateAtStep(recipient, migrateStepNames.cloned);
-
- /**
- * Start moving chunk [2, 5) from donor shard to recipient shard, run in the background.
- */
-
- // Donor: [0, 2)
- // Recipient: [2, 5)
- jsTest.log('Starting chunk migration, pause after cloning...');
-
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {_id: 2}, null, coll.getFullName(), st.shard1.shardName);
-
- /**
- * Wait for recipient to finish cloning.
- * THEN update 1 document {_id: 3} on donor within the currently migrating chunk.
- * AND delete 1 document {_id: 4} on donor within the currently migrating chunk.
- */
-
- waitForMigrateStep(recipient, migrateStepNames.cloned);
-
- jsTest.log('Update 1 doc and delete 1 doc on donor within the currently migrating chunk...');
-
- assert.writeOK(coll.update({_id: 3}, {_id: 3, a: "updated doc"}));
- assert.writeOK(coll.remove({_id: 4}));
-
- /**
- * Finish migration. Unpause recipient migration, wait for it to collect
- * the transfer mods log from donor and finish migration.
- */
-
- jsTest.log('Continuing and finishing migration...');
- unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
- joinMoveChunk();
-
- /**
- * Check documents are where they should be: 2 docs in donor chunk, 2 docs in recipient chunk
- * (because third doc in recipient shard's chunk got deleted on the donor shard during
- * migration).
- */
-
- jsTest.log('Checking that documents are on the shards they should be...');
-
- assert.eq(2, recipientColl.count(), "Recipient shard doesn't have exactly 2 documents!");
- assert.eq(2, donorColl.count(), "Donor shard doesn't have exactly 2 documents!");
- assert.eq(4, coll.count(), "Collection total is not 4!");
-
- /**
- * Check that the fromMigrate flag has been set correctly in donor and recipient oplogs,
- */
-
- jsTest.log('Checking donor and recipient oplogs for correct fromMigrate flags...');
-
- function assertEqAndDumpOpLog(expected, actual, msg) {
- if (expected === actual)
- return;
-
- print('Dumping oplog contents for', ns);
- print('On donor:');
- print(tojson(donorLocal.oplog.rs.find({ns: ns}).toArray()));
-
- print('On recipient:');
- print(tojson(recipientLocal.oplog.rs.find({ns: ns}).toArray()));
-
- assert.eq(expected, actual, msg);
- }
-
- var donorOplogRes = donorLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
- assertEqAndDumpOpLog(1,
- donorOplogRes,
- "fromMigrate flag wasn't set on the donor shard's oplog for " +
- "migrating delete op on {_id: 2}! Test #2 failed.");
-
- donorOplogRes =
- donorLocal.oplog.rs.find({op: 'd', fromMigrate: {$exists: false}, 'o._id': 4}).count();
- assertEqAndDumpOpLog(1,
- donorOplogRes,
- "Real delete of {_id: 4} on donor shard incorrectly set the " +
- "fromMigrate flag in the oplog! Test #5 failed.");
-
- // Expect to see two oplog entries for {_id: 2} with 'fromMigrate: true', because this doc was
- // cloned as part of the first failed migration as well as the second successful migration.
- var recipientOplogRes =
- recipientLocal.oplog.rs.find({op: 'i', fromMigrate: true, 'o._id': 2}).count();
- assertEqAndDumpOpLog(2,
- recipientOplogRes,
- "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for migrating insert op on {_id: 2}! Test #3 failed.");
-
- recipientOplogRes =
- recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
- assertEqAndDumpOpLog(1,
- recipientOplogRes,
- "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for delete op on the old {_id: 2} that overlapped " +
- "with the chunk about to be copied! Test #1 failed.");
-
- recipientOplogRes =
- recipientLocal.oplog.rs.find({op: 'u', fromMigrate: true, 'o._id': 3}).count();
- assertEqAndDumpOpLog(1,
- recipientOplogRes,
- "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for update op on {_id: 3}! Test #4 failed.");
-
- recipientOplogRes =
- recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 4}).count();
- assertEqAndDumpOpLog(1,
- recipientOplogRes,
- "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for delete op on {_id: 4} that occurred during " +
- "migration! Test #5 failed.");
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+"use strict";
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+var st = new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 1}});
+
+const dbName = "testDB";
+const ns = dbName + ".foo";
+
+let mongos = st.s0;
+let admin = mongos.getDB('admin');
+let coll = mongos.getCollection(ns);
+
+let donor = st.shard0;
+let recipient = st.shard1;
+let donorColl = donor.getCollection(ns);
+let recipientColl = recipient.getCollection(ns);
+let donorLocal = donor.getDB('local');
+let recipientLocal = recipient.getDB('local');
+
+// Two chunks
+// Donor: [0, 2) [2, 5)
+// Recipient:
+jsTest.log('Enable sharding of the collection and split into two chunks....');
+
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 2}}));
+
+// 6 documents,
+// donor: 2 in the first chunk, 3 in the second.
+// recipient: 1 document (shardkey overlaps with a doc in second chunk of donor shard)
+jsTest.log('Inserting 5 docs into donor shard, ensuring one orphan on the recipient shard....');
+
+// Insert just one document into the collection and fail a migration after the cloning step in
+// order to get an orphan onto the recipient shard with the correct UUID for the collection.
+assert.writeOK(coll.insert({_id: 2}));
+assert.eq(1, donorColl.count());
+assert.commandWorked(
+ recipient.adminCommand({configureFailPoint: "failMigrationLeaveOrphans", mode: "alwaysOn"}));
+assert.commandFailed(
+ admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 2}, to: st.shard1.shardName}));
+assert.eq(1, recipientColl.count());
+assert.commandWorked(
+ recipient.adminCommand({configureFailPoint: "failMigrationLeaveOrphans", mode: "off"}));
+
+// Insert the remaining documents into the collection.
+assert.writeOK(coll.insert({_id: 0}));
+assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(coll.insert({_id: 3}));
+assert.writeOK(coll.insert({_id: 4}));
+assert.eq(5, donorColl.count());
+
+/**
+ * Set failpoint: recipient will pause migration after cloning chunk data from donor,
+ * before checking transfer mods log on donor.
+ */
+
+jsTest.log('setting recipient failpoint cloned');
+pauseMigrateAtStep(recipient, migrateStepNames.cloned);
+
+/**
+ * Start moving chunk [2, 5) from donor shard to recipient shard, run in the background.
+ */
+
+// Donor: [0, 2)
+// Recipient: [2, 5)
+jsTest.log('Starting chunk migration, pause after cloning...');
+
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {_id: 2}, null, coll.getFullName(), st.shard1.shardName);
+
+/**
+ * Wait for recipient to finish cloning.
+ * THEN update 1 document {_id: 3} on donor within the currently migrating chunk.
+ * AND delete 1 document {_id: 4} on donor within the currently migrating chunk.
+ */
+
+waitForMigrateStep(recipient, migrateStepNames.cloned);
+
+jsTest.log('Update 1 doc and delete 1 doc on donor within the currently migrating chunk...');
+
+assert.writeOK(coll.update({_id: 3}, {_id: 3, a: "updated doc"}));
+assert.writeOK(coll.remove({_id: 4}));
+
+/**
+ * Finish migration. Unpause recipient migration, wait for it to collect
+ * the transfer mods log from donor and finish migration.
+ */
+
+jsTest.log('Continuing and finishing migration...');
+unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
+joinMoveChunk();
+
+/**
+ * Check documents are where they should be: 2 docs in donor chunk, 2 docs in recipient chunk
+ * (because third doc in recipient shard's chunk got deleted on the donor shard during
+ * migration).
+ */
+
+jsTest.log('Checking that documents are on the shards they should be...');
+
+assert.eq(2, recipientColl.count(), "Recipient shard doesn't have exactly 2 documents!");
+assert.eq(2, donorColl.count(), "Donor shard doesn't have exactly 2 documents!");
+assert.eq(4, coll.count(), "Collection total is not 4!");
+
+/**
+ * Check that the fromMigrate flag has been set correctly in donor and recipient oplogs,
+ */
+
+jsTest.log('Checking donor and recipient oplogs for correct fromMigrate flags...');
+
+function assertEqAndDumpOpLog(expected, actual, msg) {
+ if (expected === actual)
+ return;
+
+ print('Dumping oplog contents for', ns);
+ print('On donor:');
+ print(tojson(donorLocal.oplog.rs.find({ns: ns}).toArray()));
+
+ print('On recipient:');
+ print(tojson(recipientLocal.oplog.rs.find({ns: ns}).toArray()));
+
+ assert.eq(expected, actual, msg);
+}
+
+var donorOplogRes = donorLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
+assertEqAndDumpOpLog(1,
+ donorOplogRes,
+ "fromMigrate flag wasn't set on the donor shard's oplog for " +
+ "migrating delete op on {_id: 2}! Test #2 failed.");
+
+donorOplogRes =
+ donorLocal.oplog.rs.find({op: 'd', fromMigrate: {$exists: false}, 'o._id': 4}).count();
+assertEqAndDumpOpLog(1,
+ donorOplogRes,
+ "Real delete of {_id: 4} on donor shard incorrectly set the " +
+ "fromMigrate flag in the oplog! Test #5 failed.");
+
+// Expect to see two oplog entries for {_id: 2} with 'fromMigrate: true', because this doc was
+// cloned as part of the first failed migration as well as the second successful migration.
+var recipientOplogRes =
+ recipientLocal.oplog.rs.find({op: 'i', fromMigrate: true, 'o._id': 2}).count();
+assertEqAndDumpOpLog(2,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for migrating insert op on {_id: 2}! Test #3 failed.");
+
+recipientOplogRes = recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
+assertEqAndDumpOpLog(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for delete op on the old {_id: 2} that overlapped " +
+ "with the chunk about to be copied! Test #1 failed.");
+
+recipientOplogRes = recipientLocal.oplog.rs.find({op: 'u', fromMigrate: true, 'o._id': 3}).count();
+assertEqAndDumpOpLog(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for update op on {_id: 3}! Test #4 failed.");
+
+recipientOplogRes = recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 4}).count();
+assertEqAndDumpOpLog(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for delete op on {_id: 4} that occurred during " +
+ "migration! Test #5 failed.");
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/migration_with_source_ops.js b/jstests/sharding/migration_with_source_ops.js
index 2fc2c467c0f..91c7a460196 100644
--- a/jstests/sharding/migration_with_source_ops.js
+++ b/jstests/sharding/migration_with_source_ops.js
@@ -18,128 +18,126 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- "use strict";
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
- /**
- * Start up new sharded cluster, stop balancer that would interfere in manual chunk management.
- */
-
- var st = new ShardingTest({shards: 2, mongos: 1});
- st.stopBalancer();
-
- var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns = dbName + ".foo",
- coll = mongos.getCollection(ns), donor = st.shard0, recipient = st.shard1,
- donorColl = donor.getCollection(ns), recipientColl = recipient.getCollection(ns);
-
- /**
- * Exable sharding, and split collection into two chunks.
- */
-
- // Two chunks
- // Donor: [0, 20) [20, 40)
- // Recipient:
- jsTest.log('Enabling sharding of the collection and pre-splitting into two chunks....');
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: ns, key: {a: 1}}));
- assert.commandWorked(admin.runCommand({split: ns, middle: {a: 20}}));
-
- /**
- * Insert data into collection
- */
-
- // 10 documents in each chunk on the donor
- jsTest.log('Inserting 20 docs into donor shard, 10 in each chunk....');
- for (var i = 0; i < 10; ++i)
- assert.writeOK(coll.insert({a: i}));
- for (var i = 20; i < 30; ++i)
- assert.writeOK(coll.insert({a: i}));
- assert.eq(20, coll.count());
-
- /**
- * Set failpoints. Recipient will crash if an out of chunk range data op is
- * received from donor. Recipient will pause migration after cloning chunk data from donor,
- * before checking transfer mods log on donor.
- */
-
- jsTest.log('Setting failpoint failMigrationReceivedOutOfRangeOperation');
- assert.commandWorked(recipient.getDB('admin').runCommand(
- {configureFailPoint: 'failMigrationReceivedOutOfRangeOperation', mode: 'alwaysOn'}));
-
- jsTest.log(
- 'Setting chunk migration recipient failpoint so that it pauses after bulk clone step');
- pauseMigrateAtStep(recipient, migrateStepNames.cloned);
-
- /**
- * Start a moveChunk in the background. Move chunk [20, 40), which has 10 docs in the
- * range, from shard 0 (donor) to shard 1 (recipient). Migration will pause after
- * cloning step (when it reaches the recipient failpoint).
- */
-
- // Donor: [0, 20)
- // Recipient: [20, 40)
- jsTest.log('Starting migration, pause after cloning...');
- var joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {a: 20}, null, coll.getFullName(), st.shard1.shardName);
-
- /**
- * Wait for recipient to finish cloning step.
- * THEN delete 10 documents on the donor shard, 5 in the migrating chunk and 5 in the remaining
- *chunk.
- * AND insert 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining
- *chunk.
- * AND update 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining
- *chunk.
- *
- * This will populate the migration transfer mods log, which the recipient will collect when it
- * is unpaused.
- */
-
- waitForMigrateStep(recipient, migrateStepNames.cloned);
-
- jsTest.log('Deleting 5 docs from each chunk, migrating chunk and remaining chunk...');
- assert.writeOK(coll.remove({$and: [{a: {$gte: 5}}, {a: {$lt: 25}}]}));
-
- jsTest.log('Inserting 1 in the migrating chunk range and 1 in the remaining chunk range...');
- assert.writeOK(coll.insert({a: 10}));
- assert.writeOK(coll.insert({a: 30}));
-
- jsTest.log('Updating 1 in the migrating chunk range and 1 in the remaining chunk range...');
- assert.writeOK(coll.update({a: 0}, {a: 0, updatedData: "updated"}));
- assert.writeOK(coll.update({a: 25}, {a: 25, updatedData: "updated"}));
-
- /**
- * Finish migration. Unpause recipient migration, wait for it to collect
- * the new ops from the donor shard's migration transfer mods log, and finish.
- */
-
- jsTest.log('Continuing and finishing migration...');
- unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
- joinMoveChunk();
-
- /**
- * Check documents are where they should be: 6 docs in each shard's respective chunk.
- */
-
- jsTest.log('Checking that documents are on the shards they should be...');
- assert.eq(6, donorColl.count());
- assert.eq(6, recipientColl.count());
- assert.eq(12, coll.count());
-
- /**
- * Check that the updated documents are where they should be, one on each shard.
- */
-
- jsTest.log('Checking that documents were updated correctly...');
- var donorCollUpdatedNum = donorColl.find({updatedData: "updated"}).count();
- assert.eq(1, donorCollUpdatedNum, "Update failed on donor shard during migration!");
- var recipientCollUpdatedNum = recipientColl.find({updatedData: "updated"}).count();
- assert.eq(1, recipientCollUpdatedNum, "Update failed on recipient shard during migration!");
-
- jsTest.log('DONE!');
- MongoRunner.stopMongod(staticMongod);
- st.stop();
-
+"use strict";
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+/**
+ * Start up new sharded cluster, stop balancer that would interfere in manual chunk management.
+ */
+
+var st = new ShardingTest({shards: 2, mongos: 1});
+st.stopBalancer();
+
+var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns = dbName + ".foo",
+ coll = mongos.getCollection(ns), donor = st.shard0, recipient = st.shard1,
+ donorColl = donor.getCollection(ns), recipientColl = recipient.getCollection(ns);
+
+/**
+ * Exable sharding, and split collection into two chunks.
+ */
+
+// Two chunks
+// Donor: [0, 20) [20, 40)
+// Recipient:
+jsTest.log('Enabling sharding of the collection and pre-splitting into two chunks....');
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: ns, key: {a: 1}}));
+assert.commandWorked(admin.runCommand({split: ns, middle: {a: 20}}));
+
+/**
+ * Insert data into collection
+ */
+
+// 10 documents in each chunk on the donor
+jsTest.log('Inserting 20 docs into donor shard, 10 in each chunk....');
+for (var i = 0; i < 10; ++i)
+ assert.writeOK(coll.insert({a: i}));
+for (var i = 20; i < 30; ++i)
+ assert.writeOK(coll.insert({a: i}));
+assert.eq(20, coll.count());
+
+/**
+ * Set failpoints. Recipient will crash if an out of chunk range data op is
+ * received from donor. Recipient will pause migration after cloning chunk data from donor,
+ * before checking transfer mods log on donor.
+ */
+
+jsTest.log('Setting failpoint failMigrationReceivedOutOfRangeOperation');
+assert.commandWorked(recipient.getDB('admin').runCommand(
+ {configureFailPoint: 'failMigrationReceivedOutOfRangeOperation', mode: 'alwaysOn'}));
+
+jsTest.log('Setting chunk migration recipient failpoint so that it pauses after bulk clone step');
+pauseMigrateAtStep(recipient, migrateStepNames.cloned);
+
+/**
+ * Start a moveChunk in the background. Move chunk [20, 40), which has 10 docs in the
+ * range, from shard 0 (donor) to shard 1 (recipient). Migration will pause after
+ * cloning step (when it reaches the recipient failpoint).
+ */
+
+// Donor: [0, 20)
+// Recipient: [20, 40)
+jsTest.log('Starting migration, pause after cloning...');
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 20}, null, coll.getFullName(), st.shard1.shardName);
+
+/**
+ * Wait for recipient to finish cloning step.
+ * THEN delete 10 documents on the donor shard, 5 in the migrating chunk and 5 in the remaining
+ *chunk.
+ * AND insert 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining
+ *chunk.
+ * AND update 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining
+ *chunk.
+ *
+ * This will populate the migration transfer mods log, which the recipient will collect when it
+ * is unpaused.
+ */
+
+waitForMigrateStep(recipient, migrateStepNames.cloned);
+
+jsTest.log('Deleting 5 docs from each chunk, migrating chunk and remaining chunk...');
+assert.writeOK(coll.remove({$and: [{a: {$gte: 5}}, {a: {$lt: 25}}]}));
+
+jsTest.log('Inserting 1 in the migrating chunk range and 1 in the remaining chunk range...');
+assert.writeOK(coll.insert({a: 10}));
+assert.writeOK(coll.insert({a: 30}));
+
+jsTest.log('Updating 1 in the migrating chunk range and 1 in the remaining chunk range...');
+assert.writeOK(coll.update({a: 0}, {a: 0, updatedData: "updated"}));
+assert.writeOK(coll.update({a: 25}, {a: 25, updatedData: "updated"}));
+
+/**
+ * Finish migration. Unpause recipient migration, wait for it to collect
+ * the new ops from the donor shard's migration transfer mods log, and finish.
+ */
+
+jsTest.log('Continuing and finishing migration...');
+unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
+joinMoveChunk();
+
+/**
+ * Check documents are where they should be: 6 docs in each shard's respective chunk.
+ */
+
+jsTest.log('Checking that documents are on the shards they should be...');
+assert.eq(6, donorColl.count());
+assert.eq(6, recipientColl.count());
+assert.eq(12, coll.count());
+
+/**
+ * Check that the updated documents are where they should be, one on each shard.
+ */
+
+jsTest.log('Checking that documents were updated correctly...');
+var donorCollUpdatedNum = donorColl.find({updatedData: "updated"}).count();
+assert.eq(1, donorCollUpdatedNum, "Update failed on donor shard during migration!");
+var recipientCollUpdatedNum = recipientColl.find({updatedData: "updated"}).count();
+assert.eq(1, recipientCollUpdatedNum, "Update failed on recipient shard during migration!");
+
+jsTest.log('DONE!');
+MongoRunner.stopMongod(staticMongod);
+st.stop();
})();
diff --git a/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js b/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js
index ab125e12e21..b7cd5ba2876 100644
--- a/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js
+++ b/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js
@@ -3,41 +3,39 @@
* @tags: [requires_persistence]
*/
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- // Insert a recovery doc with non-zero minOpTimeUpdaters to simulate a migration
- // process that crashed in the middle of the critical section.
+// Insert a recovery doc with non-zero minOpTimeUpdaters to simulate a migration
+// process that crashed in the middle of the critical section.
- var recoveryDoc = {
- _id: 'minOpTimeRecovery',
- configsvrConnectionString: st.configRS.getURL(),
- shardName: st.shard0.shardName,
- minOpTime: {ts: Timestamp(0, 0), t: 0},
- minOpTimeUpdaters: 2
- };
+var recoveryDoc = {
+ _id: 'minOpTimeRecovery',
+ configsvrConnectionString: st.configRS.getURL(),
+ shardName: st.shard0.shardName,
+ minOpTime: {ts: Timestamp(0, 0), t: 0},
+ minOpTimeUpdaters: 2
+};
- assert.writeOK(st.shard0.getDB('admin').system.version.insert(recoveryDoc));
+assert.writeOK(st.shard0.getDB('admin').system.version.insert(recoveryDoc));
- // Make sure test is setup correctly.
- var minOpTimeRecoveryDoc =
- st.shard0.getDB('admin').system.version.findOne({_id: 'minOpTimeRecovery'});
+// Make sure test is setup correctly.
+var minOpTimeRecoveryDoc =
+ st.shard0.getDB('admin').system.version.findOne({_id: 'minOpTimeRecovery'});
- assert.neq(null, minOpTimeRecoveryDoc);
- assert.eq(0, minOpTimeRecoveryDoc.minOpTime.ts.getTime());
- assert.eq(2, minOpTimeRecoveryDoc.minOpTimeUpdaters);
+assert.neq(null, minOpTimeRecoveryDoc);
+assert.eq(0, minOpTimeRecoveryDoc.minOpTime.ts.getTime());
+assert.eq(2, minOpTimeRecoveryDoc.minOpTimeUpdaters);
- st.restartShardRS(0);
+st.restartShardRS(0);
- // After the restart, the shard should have updated the opTime and reset minOpTimeUpdaters.
- minOpTimeRecoveryDoc =
- st.shard0.getDB('admin').system.version.findOne({_id: 'minOpTimeRecovery'});
+// After the restart, the shard should have updated the opTime and reset minOpTimeUpdaters.
+minOpTimeRecoveryDoc = st.shard0.getDB('admin').system.version.findOne({_id: 'minOpTimeRecovery'});
- assert.neq(null, minOpTimeRecoveryDoc);
- assert.gt(minOpTimeRecoveryDoc.minOpTime.ts.getTime(), 0);
- assert.eq(0, minOpTimeRecoveryDoc.minOpTimeUpdaters);
-
- st.stop();
+assert.neq(null, minOpTimeRecoveryDoc);
+assert.gt(minOpTimeRecoveryDoc.minOpTime.ts.getTime(), 0);
+assert.eq(0, minOpTimeRecoveryDoc.minOpTimeUpdaters);
+st.stop();
})();
diff --git a/jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js b/jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js
index c33eabeffd7..143b939d381 100644
--- a/jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js
+++ b/jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js
@@ -2,38 +2,37 @@
* Tests that the minOpTimeRecovery document will be created after a migration.
*/
(function() {
- "use strict";
-
- var st = new ShardingTest({shards: 2});
-
- var testDB = st.s.getDB('test');
- testDB.adminCommand({enableSharding: 'test'});
- st.ensurePrimaryShard('test', st.shard0.shardName);
- testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
-
- var priConn = st.configRS.getPrimary();
- var replStatus = priConn.getDB('admin').runCommand({replSetGetStatus: 1});
- replStatus.members.forEach(function(memberState) {
- if (memberState.state == 1) { // if primary
- assert.neq(null, memberState.optime);
- assert.neq(null, memberState.optime.ts);
- assert.neq(null, memberState.optime.t);
- }
- });
-
- testDB.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName});
-
- var shardAdmin = st.rs0.getPrimary().getDB('admin');
- var minOpTimeRecoveryDoc = shardAdmin.system.version.findOne({_id: 'minOpTimeRecovery'});
-
- assert.neq(null, minOpTimeRecoveryDoc);
- assert.eq('minOpTimeRecovery', minOpTimeRecoveryDoc._id);
- assert.eq(st.configRS.getURL(),
- minOpTimeRecoveryDoc.configsvrConnectionString); // TODO SERVER-34166: Remove.
- assert.eq(st.shard0.shardName, minOpTimeRecoveryDoc.shardName); // TODO SERVER-34166: Remove.
- assert.gt(minOpTimeRecoveryDoc.minOpTime.ts.getTime(), 0);
- assert.eq(0, minOpTimeRecoveryDoc.minOpTimeUpdaters);
-
- st.stop();
-
+"use strict";
+
+var st = new ShardingTest({shards: 2});
+
+var testDB = st.s.getDB('test');
+testDB.adminCommand({enableSharding: 'test'});
+st.ensurePrimaryShard('test', st.shard0.shardName);
+testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+
+var priConn = st.configRS.getPrimary();
+var replStatus = priConn.getDB('admin').runCommand({replSetGetStatus: 1});
+replStatus.members.forEach(function(memberState) {
+ if (memberState.state == 1) { // if primary
+ assert.neq(null, memberState.optime);
+ assert.neq(null, memberState.optime.ts);
+ assert.neq(null, memberState.optime.t);
+ }
+});
+
+testDB.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName});
+
+var shardAdmin = st.rs0.getPrimary().getDB('admin');
+var minOpTimeRecoveryDoc = shardAdmin.system.version.findOne({_id: 'minOpTimeRecovery'});
+
+assert.neq(null, minOpTimeRecoveryDoc);
+assert.eq('minOpTimeRecovery', minOpTimeRecoveryDoc._id);
+assert.eq(st.configRS.getURL(),
+ minOpTimeRecoveryDoc.configsvrConnectionString); // TODO SERVER-34166: Remove.
+assert.eq(st.shard0.shardName, minOpTimeRecoveryDoc.shardName); // TODO SERVER-34166: Remove.
+assert.gt(minOpTimeRecoveryDoc.minOpTime.ts.getTime(), 0);
+assert.eq(0, minOpTimeRecoveryDoc.minOpTimeUpdaters);
+
+st.stop();
})();
diff --git a/jstests/sharding/missing_key.js b/jstests/sharding/missing_key.js
index 304d77fc839..14078cbff24 100644
--- a/jstests/sharding/missing_key.js
+++ b/jstests/sharding/missing_key.js
@@ -1,42 +1,42 @@
// Test that the shardCollection command fails when a preexisting document lacks a shard key field.
// SERVER-8772
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- var db = st.s.getDB('testDb');
- var coll = db.testColl;
+var db = st.s.getDB('testDb');
+var coll = db.testColl;
- assert.writeOK(coll.insert({x: 1, z: 1}));
- assert.writeOK(coll.insert({y: 1, z: 1}));
+assert.writeOK(coll.insert({x: 1, z: 1}));
+assert.writeOK(coll.insert({y: 1, z: 1}));
- assert.commandWorked(db.adminCommand({enableSharding: 'testDb'}));
+assert.commandWorked(db.adminCommand({enableSharding: 'testDb'}));
- /**
- * Assert that the shardCollection command fails, with a preexisting index on the provided
- * 'shardKey'.
- */
- function assertInvalidShardKey(shardKey) {
- // Manually create a shard key index.
- coll.dropIndexes();
- coll.ensureIndex(shardKey);
+/**
+ * Assert that the shardCollection command fails, with a preexisting index on the provided
+ * 'shardKey'.
+ */
+function assertInvalidShardKey(shardKey) {
+ // Manually create a shard key index.
+ coll.dropIndexes();
+ coll.ensureIndex(shardKey);
- // Ensure that the shard key index identifies 'x' as present in one document and absent in
- // the other.
- assert.eq(1, coll.find({x: 1}).hint(shardKey).itcount());
- assert.eq(1, coll.find({x: {$exists: false}}).hint(shardKey).itcount());
+ // Ensure that the shard key index identifies 'x' as present in one document and absent in
+ // the other.
+ assert.eq(1, coll.find({x: 1}).hint(shardKey).itcount());
+ assert.eq(1, coll.find({x: {$exists: false}}).hint(shardKey).itcount());
- // Assert that the shardCollection command fails with the provided 'shardKey'.
- assert.commandFailed(db.adminCommand({shardCollection: 'testDb.testColl', key: shardKey}),
- 'shardCollection should have failed on key ' + tojson(shardKey));
- }
+ // Assert that the shardCollection command fails with the provided 'shardKey'.
+ assert.commandFailed(db.adminCommand({shardCollection: 'testDb.testColl', key: shardKey}),
+ 'shardCollection should have failed on key ' + tojson(shardKey));
+}
- // Test single, compound, and hashed shard keys.
- assertInvalidShardKey({x: 1});
- assertInvalidShardKey({x: 1, y: 1});
- assertInvalidShardKey({y: 1, x: 1});
- assertInvalidShardKey({x: 'hashed'});
+// Test single, compound, and hashed shard keys.
+assertInvalidShardKey({x: 1});
+assertInvalidShardKey({x: 1, y: 1});
+assertInvalidShardKey({y: 1, x: 1});
+assertInvalidShardKey({x: 'hashed'});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js b/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
index 8cb8fe8f7fb..56c766e24ba 100644
--- a/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
+++ b/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
@@ -7,81 +7,89 @@
* @tags: [requires_persistence]
*/
(function() {
- "use strict";
-
- // This test uses authentication and runs commands without authenticating, which is not
- // compatible with implicit sessions.
- TestData.disableImplicitSessions = true;
-
- load("jstests/multiVersion/libs/multi_rs.js");
-
- // TODO SERVER-32672: remove this flag.
- TestData.skipGossipingClusterTime = true;
- const keyFile = 'jstests/libs/key1';
- const adminUser = {db: "admin", username: "foo", password: "bar"};
- const rUser = {db: "test", username: "r", password: "bar"};
-
- function assertContainsValidLogicalTime(res) {
- assert.hasFields(res, ["$clusterTime"]);
- assert.hasFields(res.$clusterTime, ["signature", "clusterTime"]);
- // clusterTime must be greater than the uninitialzed value.
- assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
- // The signature must have been signed by a key with a valid generation.
- assert(res.$clusterTime.signature.keyId > NumberLong(0));
-
- assert.hasFields(res, ["operationTime"]);
- assert(Object.prototype.toString.call(res.operationTime) === "[object Timestamp]",
- "operationTime must be a timestamp");
- }
-
- let st = new ShardingTest({shards: {rs0: {nodes: 2}}, other: {keyFile: keyFile}});
-
- jsTestLog("Started ShardingTest");
-
- var adminDB = st.s.getDB("admin");
- adminDB.createUser({user: adminUser.username, pwd: adminUser.password, roles: ["__system"]});
-
- adminDB.auth(adminUser.username, adminUser.password);
- assert(st.s.getDB("admin").system.keys.count() >= 2);
-
- let priRSConn = st.rs0.getPrimary().getDB("admin");
- priRSConn.createUser({user: rUser.username, pwd: rUser.password, roles: ["root"]});
-
- priRSConn.auth(rUser.username, rUser.password);
- const resWithKeys = priRSConn.runCommand({isMaster: 1});
- assertContainsValidLogicalTime(resWithKeys);
- priRSConn.logout();
-
- // Enable the failpoint, remove all keys, and restart the config servers with the failpoint
- // still enabled to guarantee there are no keys.
- for (let i = 0; i < st.configRS.nodes.length; i++) {
- assert.commandWorked(st.configRS.nodes[i].adminCommand(
- {"configureFailPoint": "disableKeyGeneration", "mode": "alwaysOn"}));
- }
-
- var priCSConn = st.configRS.getPrimary();
- authutil.asCluster(priCSConn, keyFile, function() {
- priCSConn.getDB("admin").system.keys.remove({purpose: "HMAC"});
- });
-
- assert(adminDB.system.keys.count() == 0, "expected there to be no keys on the config server");
- adminDB.logout();
-
- st.configRS.stopSet(null /* signal */, true /* forRestart */);
- st.configRS.startSet(
- {restart: true, setParameter: {"failpoint.disableKeyGeneration": "{'mode':'alwaysOn'}"}});
-
- // bounce rs0 to clean the key cache
- st.rs0.stopSet(null /* signal */, true /* forRestart */);
- st.rs0.startSet({restart: true});
-
- priRSConn = st.rs0.getPrimary().getDB("admin");
- priRSConn.auth(rUser.username, rUser.password);
- const resNoKeys = assert.commandWorked(priRSConn.runCommand({isMaster: 1}));
- priRSConn.logout();
-
- assert.eq(resNoKeys.hasOwnProperty("$clusterTime"), false);
- assert.eq(resNoKeys.hasOwnProperty("operationTime"), false);
-
- st.stop();
+"use strict";
+
+// This test uses authentication and runs commands without authenticating, which is not
+// compatible with implicit sessions.
+TestData.disableImplicitSessions = true;
+
+load("jstests/multiVersion/libs/multi_rs.js");
+
+// TODO SERVER-32672: remove this flag.
+TestData.skipGossipingClusterTime = true;
+const keyFile = 'jstests/libs/key1';
+const adminUser = {
+ db: "admin",
+ username: "foo",
+ password: "bar"
+};
+const rUser = {
+ db: "test",
+ username: "r",
+ password: "bar"
+};
+
+function assertContainsValidLogicalTime(res) {
+ assert.hasFields(res, ["$clusterTime"]);
+ assert.hasFields(res.$clusterTime, ["signature", "clusterTime"]);
+ // clusterTime must be greater than the uninitialzed value.
+ assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
+ // The signature must have been signed by a key with a valid generation.
+ assert(res.$clusterTime.signature.keyId > NumberLong(0));
+
+ assert.hasFields(res, ["operationTime"]);
+ assert(Object.prototype.toString.call(res.operationTime) === "[object Timestamp]",
+ "operationTime must be a timestamp");
+}
+
+let st = new ShardingTest({shards: {rs0: {nodes: 2}}, other: {keyFile: keyFile}});
+
+jsTestLog("Started ShardingTest");
+
+var adminDB = st.s.getDB("admin");
+adminDB.createUser({user: adminUser.username, pwd: adminUser.password, roles: ["__system"]});
+
+adminDB.auth(adminUser.username, adminUser.password);
+assert(st.s.getDB("admin").system.keys.count() >= 2);
+
+let priRSConn = st.rs0.getPrimary().getDB("admin");
+priRSConn.createUser({user: rUser.username, pwd: rUser.password, roles: ["root"]});
+
+priRSConn.auth(rUser.username, rUser.password);
+const resWithKeys = priRSConn.runCommand({isMaster: 1});
+assertContainsValidLogicalTime(resWithKeys);
+priRSConn.logout();
+
+// Enable the failpoint, remove all keys, and restart the config servers with the failpoint
+// still enabled to guarantee there are no keys.
+for (let i = 0; i < st.configRS.nodes.length; i++) {
+ assert.commandWorked(st.configRS.nodes[i].adminCommand(
+ {"configureFailPoint": "disableKeyGeneration", "mode": "alwaysOn"}));
+}
+
+var priCSConn = st.configRS.getPrimary();
+authutil.asCluster(priCSConn, keyFile, function() {
+ priCSConn.getDB("admin").system.keys.remove({purpose: "HMAC"});
+});
+
+assert(adminDB.system.keys.count() == 0, "expected there to be no keys on the config server");
+adminDB.logout();
+
+st.configRS.stopSet(null /* signal */, true /* forRestart */);
+st.configRS.startSet(
+ {restart: true, setParameter: {"failpoint.disableKeyGeneration": "{'mode':'alwaysOn'}"}});
+
+// bounce rs0 to clean the key cache
+st.rs0.stopSet(null /* signal */, true /* forRestart */);
+st.rs0.startSet({restart: true});
+
+priRSConn = st.rs0.getPrimary().getDB("admin");
+priRSConn.auth(rUser.username, rUser.password);
+const resNoKeys = assert.commandWorked(priRSConn.runCommand({isMaster: 1}));
+priRSConn.logout();
+
+assert.eq(resNoKeys.hasOwnProperty("$clusterTime"), false);
+assert.eq(resNoKeys.hasOwnProperty("operationTime"), false);
+
+st.stop();
})();
diff --git a/jstests/sharding/mongos_dataSize_test.js b/jstests/sharding/mongos_dataSize_test.js
index 389529e7d9d..9dc9ceac78f 100644
--- a/jstests/sharding/mongos_dataSize_test.js
+++ b/jstests/sharding/mongos_dataSize_test.js
@@ -1,16 +1,14 @@
// This tests the command dataSize on sharded clusters to ensure that they can use the command.
(function() {
- 'use strict';
+'use strict';
- let s = new ShardingTest({shards: 2, mongos: 1});
- let db = s.getDB("test");
- assert.commandWorked(s.s0.adminCommand({enableSharding: "test"}));
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
- assert.commandWorked(
- s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"}));
- assert.commandFailedWithCode(
- s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "foo"}),
- ErrorCodes.InvalidNamespace);
- s.stop();
+let s = new ShardingTest({shards: 2, mongos: 1});
+let db = s.getDB("test");
+assert.commandWorked(s.s0.adminCommand({enableSharding: "test"}));
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
+assert.commandWorked(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"}));
+assert.commandFailedWithCode(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "foo"}),
+ ErrorCodes.InvalidNamespace);
+s.stop();
})();
diff --git a/jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js b/jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js
index 84791008f4d..69a64cd808d 100644
--- a/jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js
+++ b/jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js
@@ -4,80 +4,80 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/multiVersion/libs/multi_rs.js");
- load("jstests/multiVersion/libs/multi_cluster.js"); // For restartMongoses.
+load("jstests/multiVersion/libs/multi_rs.js");
+load("jstests/multiVersion/libs/multi_cluster.js"); // For restartMongoses.
- function assertContainsValidLogicalTime(res, check) {
- assert.hasFields(res, ["$clusterTime"]);
- assert.hasFields(res.$clusterTime, ["signature", "clusterTime"]);
- // clusterTime must be greater than the uninitialzed value.
- // TODO: SERVER-31986 this check can be done only for authenticated connections that do not
- // have advance_cluster_time privilege.
- if (check) {
- assert.eq(bsonWoCompare(res.$clusterTime.clusterTime, Timestamp(0, 0)), 1);
- }
- assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
- // The signature must have been signed by a key with a valid generation.
- if (check) {
- assert(res.$clusterTime.signature.keyId > NumberLong(0));
- }
+function assertContainsValidLogicalTime(res, check) {
+ assert.hasFields(res, ["$clusterTime"]);
+ assert.hasFields(res.$clusterTime, ["signature", "clusterTime"]);
+ // clusterTime must be greater than the uninitialzed value.
+ // TODO: SERVER-31986 this check can be done only for authenticated connections that do not
+ // have advance_cluster_time privilege.
+ if (check) {
+ assert.eq(bsonWoCompare(res.$clusterTime.clusterTime, Timestamp(0, 0)), 1);
}
+ assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
+ // The signature must have been signed by a key with a valid generation.
+ if (check) {
+ assert(res.$clusterTime.signature.keyId > NumberLong(0));
+ }
+}
- let st = new ShardingTest({shards: {rs0: {nodes: 2}}});
+let st = new ShardingTest({shards: {rs0: {nodes: 2}}});
- // Verify there are keys in the config server eventually, since mongos doesn't block for keys at
- // startup, and that once there are, mongos sends $clusterTime with a signature in responses.
- assert.soonNoExcept(function() {
- assert(st.s.getDB("admin").system.keys.count() >= 2);
+// Verify there are keys in the config server eventually, since mongos doesn't block for keys at
+// startup, and that once there are, mongos sends $clusterTime with a signature in responses.
+assert.soonNoExcept(function() {
+ assert(st.s.getDB("admin").system.keys.count() >= 2);
- let res = assert.commandWorked(st.s.getDB("test").runCommand({isMaster: 1}));
- assertContainsValidLogicalTime(res, false);
+ let res = assert.commandWorked(st.s.getDB("test").runCommand({isMaster: 1}));
+ assertContainsValidLogicalTime(res, false);
- return true;
- }, "expected keys to be created and for mongos to send signed cluster times");
+ return true;
+}, "expected keys to be created and for mongos to send signed cluster times");
- // Enable the failpoint, remove all keys, and restart the config servers with the failpoint
- // still enabled to guarantee there are no keys.
- for (let i = 0; i < st.configRS.nodes.length; i++) {
- assert.commandWorked(st.configRS.nodes[i].adminCommand(
- {"configureFailPoint": "disableKeyGeneration", "mode": "alwaysOn"}));
- }
- let res = st.configRS.getPrimary().getDB("admin").system.keys.remove({purpose: "HMAC"});
- assert(res.nRemoved >= 2);
- assert(st.s.getDB("admin").system.keys.count() == 0,
- "expected there to be no keys on the config server");
- st.configRS.stopSet(null /* signal */, true /* forRestart */);
- st.configRS.startSet(
- {restart: true, setParameter: {"failpoint.disableKeyGeneration": "{'mode':'alwaysOn'}"}});
+// Enable the failpoint, remove all keys, and restart the config servers with the failpoint
+// still enabled to guarantee there are no keys.
+for (let i = 0; i < st.configRS.nodes.length; i++) {
+ assert.commandWorked(st.configRS.nodes[i].adminCommand(
+ {"configureFailPoint": "disableKeyGeneration", "mode": "alwaysOn"}));
+}
+let res = st.configRS.getPrimary().getDB("admin").system.keys.remove({purpose: "HMAC"});
+assert(res.nRemoved >= 2);
+assert(st.s.getDB("admin").system.keys.count() == 0,
+ "expected there to be no keys on the config server");
+st.configRS.stopSet(null /* signal */, true /* forRestart */);
+st.configRS.startSet(
+ {restart: true, setParameter: {"failpoint.disableKeyGeneration": "{'mode':'alwaysOn'}"}});
- // Limit the max time between refreshes on the config server, so new keys are created quickly.
- st.configRS.getPrimary().adminCommand({
- "configureFailPoint": "maxKeyRefreshWaitTimeOverrideMS",
- "mode": "alwaysOn",
- "data": {"overrideMS": 1000}
- });
+// Limit the max time between refreshes on the config server, so new keys are created quickly.
+st.configRS.getPrimary().adminCommand({
+ "configureFailPoint": "maxKeyRefreshWaitTimeOverrideMS",
+ "mode": "alwaysOn",
+ "data": {"overrideMS": 1000}
+});
- // Disable the failpoint.
- for (let i = 0; i < st.configRS.nodes.length; i++) {
- assert.commandWorked(st.configRS.nodes[i].adminCommand(
- {"configureFailPoint": "disableKeyGeneration", "mode": "off"}));
- }
+// Disable the failpoint.
+for (let i = 0; i < st.configRS.nodes.length; i++) {
+ assert.commandWorked(st.configRS.nodes[i].adminCommand(
+ {"configureFailPoint": "disableKeyGeneration", "mode": "off"}));
+}
- // Mongos should restart with no problems.
- st.restartMongoses();
+// Mongos should restart with no problems.
+st.restartMongoses();
- // Eventually mongos will discover the new keys, and start signing cluster times.
- assert.soonNoExcept(function() {
- assertContainsValidLogicalTime(st.s.getDB("test").runCommand({isMaster: 1}), false);
- return true;
- }, "expected mongos to eventually start signing cluster times", 60 * 1000); // 60 seconds.
+// Eventually mongos will discover the new keys, and start signing cluster times.
+assert.soonNoExcept(function() {
+ assertContainsValidLogicalTime(st.s.getDB("test").runCommand({isMaster: 1}), false);
+ return true;
+}, "expected mongos to eventually start signing cluster times", 60 * 1000); // 60 seconds.
- // There may be a delay between the creation of the first and second keys, but mongos will start
- // signing after seeing the first key, so there is only guaranteed to be one at this point.
- assert(st.s.getDB("admin").system.keys.count() >= 1,
- "expected there to be at least one generation of keys on the config server");
+// There may be a delay between the creation of the first and second keys, but mongos will start
+// signing after seeing the first key, so there is only guaranteed to be one at this point.
+assert(st.s.getDB("admin").system.keys.count() >= 1,
+ "expected there to be at least one generation of keys on the config server");
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/mongos_local_explain.js b/jstests/sharding/mongos_local_explain.js
index bf9ab379e53..d21ee745306 100644
--- a/jstests/sharding/mongos_local_explain.js
+++ b/jstests/sharding/mongos_local_explain.js
@@ -3,30 +3,30 @@
* confirms that the pipeline ran entirely on mongoS.
*/
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({name: "mongos_comment_test", mongos: 1, shards: 1});
- const mongosConn = st.s;
+const st = new ShardingTest({name: "mongos_comment_test", mongos: 1, shards: 1});
+const mongosConn = st.s;
- const stageSpec = {
- "$listLocalSessions": {allUsers: false, users: [{user: "nobody", db: "nothing"}]}
- };
+const stageSpec = {
+ "$listLocalSessions": {allUsers: false, users: [{user: "nobody", db: "nothing"}]}
+};
- // Use the test stage to create a pipeline that runs exclusively on mongoS.
- const mongosOnlyPipeline = [stageSpec, {$match: {dummyField: 1}}];
+// Use the test stage to create a pipeline that runs exclusively on mongoS.
+const mongosOnlyPipeline = [stageSpec, {$match: {dummyField: 1}}];
- // We expect the explain output to reflect the stage's spec.
- const expectedExplainStages = [stageSpec, {$match: {dummyField: {$eq: 1}}}];
+// We expect the explain output to reflect the stage's spec.
+const expectedExplainStages = [stageSpec, {$match: {dummyField: {$eq: 1}}}];
- // Test that the mongoS-only pipeline is explainable.
- const explainPlan = assert.commandWorked(mongosConn.getDB("admin").runCommand(
- {aggregate: 1, pipeline: mongosOnlyPipeline, explain: true}));
+// Test that the mongoS-only pipeline is explainable.
+const explainPlan = assert.commandWorked(mongosConn.getDB("admin").runCommand(
+ {aggregate: 1, pipeline: mongosOnlyPipeline, explain: true}));
- // We expect the stages to appear under the 'mongos' heading, for 'splitPipeline' to be
- // null, and for the 'mongos.host' field to be the hostname:port of the mongoS itself.
- assert.docEq(explainPlan.mongos.stages, expectedExplainStages);
- assert.eq(explainPlan.mongos.host, mongosConn.name);
- assert.isnull(explainPlan.splitPipeline);
+// We expect the stages to appear under the 'mongos' heading, for 'splitPipeline' to be
+// null, and for the 'mongos.host' field to be the hostname:port of the mongoS itself.
+assert.docEq(explainPlan.mongos.stages, expectedExplainStages);
+assert.eq(explainPlan.mongos.host, mongosConn.name);
+assert.isnull(explainPlan.splitPipeline);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/mongos_no_detect_sharding.js b/jstests/sharding/mongos_no_detect_sharding.js
index 8fedcb09ce2..6dc458c2ae7 100644
--- a/jstests/sharding/mongos_no_detect_sharding.js
+++ b/jstests/sharding/mongos_no_detect_sharding.js
@@ -1,42 +1,41 @@
// Tests whether new sharding is detected on insert by mongos
(function() {
- var st = new ShardingTest({name: "mongos_no_detect_sharding", shards: 1, mongos: 2});
+var st = new ShardingTest({name: "mongos_no_detect_sharding", shards: 1, mongos: 2});
- var mongos = st.s;
- var config = mongos.getDB("config");
+var mongos = st.s;
+var config = mongos.getDB("config");
- print("Creating unsharded connection...");
+print("Creating unsharded connection...");
- var mongos2 = st._mongos[1];
+var mongos2 = st._mongos[1];
- var coll = mongos2.getCollection("test.foo");
- assert.writeOK(coll.insert({i: 0}));
+var coll = mongos2.getCollection("test.foo");
+assert.writeOK(coll.insert({i: 0}));
- print("Sharding collection...");
+print("Sharding collection...");
- var admin = mongos.getDB("admin");
+var admin = mongos.getDB("admin");
- assert.eq(coll.getShardVersion().ok, 0);
+assert.eq(coll.getShardVersion().ok, 0);
- admin.runCommand({enableSharding: "test"});
- admin.runCommand({shardCollection: "test.foo", key: {_id: 1}});
+admin.runCommand({enableSharding: "test"});
+admin.runCommand({shardCollection: "test.foo", key: {_id: 1}});
- print("Seeing if data gets inserted unsharded...");
- print("No splits occur here!");
+print("Seeing if data gets inserted unsharded...");
+print("No splits occur here!");
- // Insert a bunch of data which should trigger a split
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({i: i + 1});
- }
- assert.writeOK(bulk.execute());
+// Insert a bunch of data which should trigger a split
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({i: i + 1});
+}
+assert.writeOK(bulk.execute());
- st.printShardingStatus(true);
+st.printShardingStatus(true);
- assert.eq(coll.getShardVersion().ok, 1);
- assert.eq(101, coll.find().itcount());
-
- st.stop();
+assert.eq(coll.getShardVersion().ok, 1);
+assert.eq(101, coll.find().itcount());
+st.stop();
})();
diff --git a/jstests/sharding/mongos_no_replica_set_refresh.js b/jstests/sharding/mongos_no_replica_set_refresh.js
index c6a8a1e05b0..b9080f950fc 100644
--- a/jstests/sharding/mongos_no_replica_set_refresh.js
+++ b/jstests/sharding/mongos_no_replica_set_refresh.js
@@ -2,136 +2,132 @@
load("jstests/replsets/rslib.js");
(function() {
- 'use strict';
-
- var five_minutes = 5 * 60 * 1000;
-
- var numRSHosts = function() {
- var result = assert.commandWorked(rsObj.nodes[0].adminCommand({ismaster: 1}));
- return result.hosts.length + result.passives.length;
- };
-
- var numMongosHosts = function() {
- var commandResult = assert.commandWorked(mongos.adminCommand("connPoolStats"));
- var result = commandResult.replicaSets[rsObj.name];
- return result.hosts.length;
- };
-
- var configServerURL = function() {
- var result = config.shards.find().toArray()[0];
- return result.host;
- };
-
- var checkNumHosts = function(expectedNumHosts) {
- jsTest.log("Waiting for the shard to discover that it now has " + expectedNumHosts +
- " hosts.");
- var numHostsSeenByShard;
-
- // Use a high timeout (5 minutes) because replica set refreshes are only done every 30
- // seconds.
- assert.soon(
- function() {
- numHostsSeenByShard = numRSHosts();
- return numHostsSeenByShard === expectedNumHosts;
- },
- function() {
- return ("Expected shard to see " + expectedNumHosts + " hosts but found " +
- numHostsSeenByShard);
- },
- five_minutes);
-
- jsTest.log("Waiting for the mongos to discover that the shard now has " + expectedNumHosts +
- " hosts.");
- var numHostsSeenByMongos;
-
- // Use a high timeout (5 minutes) because replica set refreshes are only done every 30
- // seconds.
- assert.soon(
- function() {
- numHostsSeenByMongos = numMongosHosts();
- return numHostsSeenByMongos === expectedNumHosts;
- },
- function() {
- return ("Expected mongos to see " + expectedNumHosts +
- " hosts on shard but found " + numHostsSeenByMongos);
- },
- five_minutes);
- };
-
- var st = new ShardingTest({
- name: 'mongos_no_replica_set_refresh',
- shards: 1,
- mongos: 1,
- other: {
- rs0: {
- nodes: [
- {},
- {rsConfig: {priority: 0}},
- {rsConfig: {priority: 0}},
- ],
- }
- }
- });
-
- var rsObj = st.rs0;
- assert.commandWorked(rsObj.nodes[0].adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.PRIMARY,
- timeoutMillis: 60 * 1000,
- }),
- 'node 0 ' + rsObj.nodes[0].host + ' failed to become primary');
+'use strict';
- var mongos = st.s;
- var config = mongos.getDB("config");
+var five_minutes = 5 * 60 * 1000;
- printjson(mongos.getCollection("foo.bar").findOne());
+var numRSHosts = function() {
+ var result = assert.commandWorked(rsObj.nodes[0].adminCommand({ismaster: 1}));
+ return result.hosts.length + result.passives.length;
+};
- jsTestLog("Removing a node from the shard's replica set.");
+var numMongosHosts = function() {
+ var commandResult = assert.commandWorked(mongos.adminCommand("connPoolStats"));
+ var result = commandResult.replicaSets[rsObj.name];
+ return result.hosts.length;
+};
- var rsConfig = rsObj.getReplSetConfigFromNode(0);
+var configServerURL = function() {
+ var result = config.shards.find().toArray()[0];
+ return result.host;
+};
- var removedNode = rsConfig.members.pop();
- rsConfig.version++;
- reconfig(rsObj, rsConfig);
+var checkNumHosts = function(expectedNumHosts) {
+ jsTest.log("Waiting for the shard to discover that it now has " + expectedNumHosts + " hosts.");
+ var numHostsSeenByShard;
- // Wait for the election round to complete
- rsObj.getPrimary();
+ // Use a high timeout (5 minutes) because replica set refreshes are only done every 30
+ // seconds.
+ assert.soon(
+ function() {
+ numHostsSeenByShard = numRSHosts();
+ return numHostsSeenByShard === expectedNumHosts;
+ },
+ function() {
+ return ("Expected shard to see " + expectedNumHosts + " hosts but found " +
+ numHostsSeenByShard);
+ },
+ five_minutes);
- checkNumHosts(rsConfig.members.length);
+ jsTest.log("Waiting for the mongos to discover that the shard now has " + expectedNumHosts +
+ " hosts.");
+ var numHostsSeenByMongos;
- jsTest.log("Waiting for config.shards to reflect that " + removedNode.host +
- " has been removed.");
+ // Use a high timeout (5 minutes) because replica set refreshes are only done every 30
+ // seconds.
assert.soon(
function() {
- return configServerURL().indexOf(removedNode.host) < 0;
+ numHostsSeenByMongos = numMongosHosts();
+ return numHostsSeenByMongos === expectedNumHosts;
},
function() {
- return (removedNode.host + " was removed from " + rsObj.name +
- ", but is still seen in config.shards");
- });
+ return ("Expected mongos to see " + expectedNumHosts + " hosts on shard but found " +
+ numHostsSeenByMongos);
+ },
+ five_minutes);
+};
+
+var st = new ShardingTest({
+ name: 'mongos_no_replica_set_refresh',
+ shards: 1,
+ mongos: 1,
+ other: {
+ rs0: {
+ nodes: [
+ {},
+ {rsConfig: {priority: 0}},
+ {rsConfig: {priority: 0}},
+ ],
+ }
+ }
+});
- jsTestLog("Adding the node back to the shard's replica set.");
+var rsObj = st.rs0;
+assert.commandWorked(rsObj.nodes[0].adminCommand({
+ replSetTest: 1,
+ waitForMemberState: ReplSetTest.State.PRIMARY,
+ timeoutMillis: 60 * 1000,
+}),
+ 'node 0 ' + rsObj.nodes[0].host + ' failed to become primary');
- config.shards.update({_id: rsObj.name}, {$set: {host: rsObj.name + "/" + rsObj.nodes[0].host}});
- printjson(config.shards.find().toArray());
+var mongos = st.s;
+var config = mongos.getDB("config");
- rsConfig.members.push(removedNode);
- rsConfig.version++;
- reconfig(rsObj, rsConfig);
+printjson(mongos.getCollection("foo.bar").findOne());
- checkNumHosts(rsConfig.members.length);
+jsTestLog("Removing a node from the shard's replica set.");
- jsTest.log("Waiting for config.shards to reflect that " + removedNode.host +
- " has been re-added.");
- assert.soon(
- function() {
- return configServerURL().indexOf(removedNode.host) >= 0;
- },
- function() {
- return (removedNode.host + " was re-added to " + rsObj.name +
- ", but is not seen in config.shards");
- });
+var rsConfig = rsObj.getReplSetConfigFromNode(0);
+
+var removedNode = rsConfig.members.pop();
+rsConfig.version++;
+reconfig(rsObj, rsConfig);
+
+// Wait for the election round to complete
+rsObj.getPrimary();
+
+checkNumHosts(rsConfig.members.length);
+
+jsTest.log("Waiting for config.shards to reflect that " + removedNode.host + " has been removed.");
+assert.soon(
+ function() {
+ return configServerURL().indexOf(removedNode.host) < 0;
+ },
+ function() {
+ return (removedNode.host + " was removed from " + rsObj.name +
+ ", but is still seen in config.shards");
+ });
+
+jsTestLog("Adding the node back to the shard's replica set.");
+
+config.shards.update({_id: rsObj.name}, {$set: {host: rsObj.name + "/" + rsObj.nodes[0].host}});
+printjson(config.shards.find().toArray());
- st.stop();
+rsConfig.members.push(removedNode);
+rsConfig.version++;
+reconfig(rsObj, rsConfig);
+
+checkNumHosts(rsConfig.members.length);
+
+jsTest.log("Waiting for config.shards to reflect that " + removedNode.host + " has been re-added.");
+assert.soon(
+ function() {
+ return configServerURL().indexOf(removedNode.host) >= 0;
+ },
+ function() {
+ return (removedNode.host + " was re-added to " + rsObj.name +
+ ", but is not seen in config.shards");
+ });
+st.stop();
}());
diff --git a/jstests/sharding/mongos_query_comment.js b/jstests/sharding/mongos_query_comment.js
index 963e43f6bf8..ccb10d16824 100644
--- a/jstests/sharding/mongos_query_comment.js
+++ b/jstests/sharding/mongos_query_comment.js
@@ -5,83 +5,80 @@
* comment to the find command fails.
*/
(function() {
- "use strict";
-
- // For profilerHasSingleMatchingEntryOrThrow.
- load("jstests/libs/profiler.js");
-
- const st = new ShardingTest({name: "mongos_comment_test", mongos: 1, shards: 1});
-
- const shard = st.shard0;
- const mongos = st.s;
-
- // Need references to the database via both mongos and mongod so that we can enable profiling &
- // test queries on the shard.
- const mongosDB = mongos.getDB("mongos_comment");
- const shardDB = shard.getDB("mongos_comment");
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- const mongosColl = mongosDB.test;
- const shardColl = shardDB.test;
-
- const collNS = mongosColl.getFullName();
-
- for (let i = 0; i < 5; ++i) {
- assert.writeOK(mongosColl.insert({_id: i, a: i}));
- }
-
- // The profiler will be used to verify that comments are present on the shard.
- assert.commandWorked(shardDB.setProfilingLevel(2));
- const profiler = shardDB.system.profile;
-
- //
- // Set legacy read mode for the mongos and shard connections.
- //
- mongosDB.getMongo().forceReadMode("legacy");
- shardDB.getMongo().forceReadMode("legacy");
-
- // TEST CASE: A legacy string $comment meta-operator is propagated to the shards via mongos.
- assert.eq(mongosColl.find({$query: {a: 1}, $comment: "TEST"}).itcount(), 1);
- profilerHasSingleMatchingEntryOrThrow(
- {profileDB: shardDB, filter: {op: "query", ns: collNS, "command.comment": "TEST"}});
-
- // TEST CASE: A legacy BSONObj $comment is converted to a string and propagated via mongos.
- assert.eq(mongosColl.find({$query: {a: 1}, $comment: {c: 2, d: {e: "TEST"}}}).itcount(), 1);
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardDB,
- filter: {op: "query", ns: collNS, "command.comment": "{ c: 2.0, d: { e: \"TEST\" } }"}
- });
-
- // TEST CASE: Legacy BSONObj $comment is NOT converted to a string when issued on the mongod.
- assert.eq(shardColl.find({$query: {a: 1}, $comment: {c: 3, d: {e: "TEST"}}}).itcount(), 1);
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardDB,
- filter: {op: "query", ns: collNS, "command.comment": {c: 3, d: {e: "TEST"}}}
- });
-
- //
- // Revert to "commands" read mode for the find command test cases below.
- //
- mongosDB.getMongo().forceReadMode("commands");
- shardDB.getMongo().forceReadMode("commands");
-
- // TEST CASE: Verify that string find.comment and non-string find.filter.$comment propagate.
- assert.eq(mongosColl.find({a: 1, $comment: {b: "TEST"}}).comment("TEST").itcount(), 1);
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardDB,
- filter: {
- op: "query",
- ns: collNS, "command.comment": "TEST", "command.filter.$comment": {b: "TEST"}
- }
- });
-
- // TEST CASE: Verify that find command with a non-string comment parameter is rejected.
- assert.commandFailedWithCode(
- mongosDB.runCommand(
- {"find": mongosColl.getName(), "filter": {a: 1}, "comment": {b: "TEST"}}),
- 9,
- "Non-string find command comment did not return an error.");
-
- st.stop();
+"use strict";
+
+// For profilerHasSingleMatchingEntryOrThrow.
+load("jstests/libs/profiler.js");
+
+const st = new ShardingTest({name: "mongos_comment_test", mongos: 1, shards: 1});
+
+const shard = st.shard0;
+const mongos = st.s;
+
+// Need references to the database via both mongos and mongod so that we can enable profiling &
+// test queries on the shard.
+const mongosDB = mongos.getDB("mongos_comment");
+const shardDB = shard.getDB("mongos_comment");
+
+assert.commandWorked(mongosDB.dropDatabase());
+
+const mongosColl = mongosDB.test;
+const shardColl = shardDB.test;
+
+const collNS = mongosColl.getFullName();
+
+for (let i = 0; i < 5; ++i) {
+ assert.writeOK(mongosColl.insert({_id: i, a: i}));
+}
+
+// The profiler will be used to verify that comments are present on the shard.
+assert.commandWorked(shardDB.setProfilingLevel(2));
+const profiler = shardDB.system.profile;
+
+//
+// Set legacy read mode for the mongos and shard connections.
+//
+mongosDB.getMongo().forceReadMode("legacy");
+shardDB.getMongo().forceReadMode("legacy");
+
+// TEST CASE: A legacy string $comment meta-operator is propagated to the shards via mongos.
+assert.eq(mongosColl.find({$query: {a: 1}, $comment: "TEST"}).itcount(), 1);
+profilerHasSingleMatchingEntryOrThrow(
+ {profileDB: shardDB, filter: {op: "query", ns: collNS, "command.comment": "TEST"}});
+
+// TEST CASE: A legacy BSONObj $comment is converted to a string and propagated via mongos.
+assert.eq(mongosColl.find({$query: {a: 1}, $comment: {c: 2, d: {e: "TEST"}}}).itcount(), 1);
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardDB,
+ filter: {op: "query", ns: collNS, "command.comment": "{ c: 2.0, d: { e: \"TEST\" } }"}
+});
+
+// TEST CASE: Legacy BSONObj $comment is NOT converted to a string when issued on the mongod.
+assert.eq(shardColl.find({$query: {a: 1}, $comment: {c: 3, d: {e: "TEST"}}}).itcount(), 1);
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardDB,
+ filter: {op: "query", ns: collNS, "command.comment": {c: 3, d: {e: "TEST"}}}
+});
+
+//
+// Revert to "commands" read mode for the find command test cases below.
+//
+mongosDB.getMongo().forceReadMode("commands");
+shardDB.getMongo().forceReadMode("commands");
+
+// TEST CASE: Verify that string find.comment and non-string find.filter.$comment propagate.
+assert.eq(mongosColl.find({a: 1, $comment: {b: "TEST"}}).comment("TEST").itcount(), 1);
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardDB,
+ filter:
+ {op: "query", ns: collNS, "command.comment": "TEST", "command.filter.$comment": {b: "TEST"}}
+});
+
+// TEST CASE: Verify that find command with a non-string comment parameter is rejected.
+assert.commandFailedWithCode(
+ mongosDB.runCommand({"find": mongosColl.getName(), "filter": {a: 1}, "comment": {b: "TEST"}}),
+ 9,
+ "Non-string find command comment did not return an error.");
+
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/mongos_rs_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
index 7b43adcdd49..d41759de5db 100644
--- a/jstests/sharding/mongos_rs_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
@@ -16,417 +16,415 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 3, mongos: 1, other: {rs: true, rsOptions: {nodes: 2}}});
+var st = new ShardingTest({shards: 3, mongos: 1, other: {rs: true, rsOptions: {nodes: 2}}});
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
- assert.commandWorked(admin.runCommand({setParameter: 1, traceExceptions: true}));
+assert.commandWorked(admin.runCommand({setParameter: 1, traceExceptions: true}));
- var collSharded = mongos.getCollection("fooSharded.barSharded");
- var collUnsharded = mongos.getCollection("fooUnsharded.barUnsharded");
+var collSharded = mongos.getCollection("fooSharded.barSharded");
+var collUnsharded = mongos.getCollection("fooUnsharded.barUnsharded");
- // Create the unsharded database
- assert.writeOK(collUnsharded.insert({some: "doc"}));
- assert.writeOK(collUnsharded.remove({}));
- assert.commandWorked(
- admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName}));
+// Create the unsharded database
+assert.writeOK(collUnsharded.insert({some: "doc"}));
+assert.writeOK(collUnsharded.remove({}));
+assert.commandWorked(
+ admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName}));
- // Create the sharded database
- assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()}));
- assert.commandWorked(
- admin.runCommand({movePrimary: collSharded.getDB().toString(), to: st.shard0.shardName}));
- assert.commandWorked(
- admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: collSharded.toString(), find: {_id: 0}, to: st.shard1.shardName}));
+// Create the sharded database
+assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()}));
+assert.commandWorked(
+ admin.runCommand({movePrimary: collSharded.getDB().toString(), to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: collSharded.toString(), find: {_id: 0}, to: st.shard1.shardName}));
- // Secondaries do not refresh their in-memory routing table until a request with a higher
- // version is received, and refreshing requires communication with the primary to obtain the
- // newest version. Read from the secondaries once before taking down primaries to ensure they
- // have loaded the routing table into memory.
- // TODO SERVER-30148: replace this with calls to awaitReplication() on each shard owning data
- // for the sharded collection once secondaries refresh proactively.
- var mongosSetupConn = new Mongo(mongos.host);
- mongosSetupConn.setReadPref("secondary");
- assert(!mongosSetupConn.getCollection(collSharded.toString()).find({}).hasNext());
+// Secondaries do not refresh their in-memory routing table until a request with a higher
+// version is received, and refreshing requires communication with the primary to obtain the
+// newest version. Read from the secondaries once before taking down primaries to ensure they
+// have loaded the routing table into memory.
+// TODO SERVER-30148: replace this with calls to awaitReplication() on each shard owning data
+// for the sharded collection once secondaries refresh proactively.
+var mongosSetupConn = new Mongo(mongos.host);
+mongosSetupConn.setReadPref("secondary");
+assert(!mongosSetupConn.getCollection(collSharded.toString()).find({}).hasNext());
- gc(); // Clean up connections
+gc(); // Clean up connections
- st.printShardingStatus();
+st.printShardingStatus();
- //
- // Setup is complete
- //
+//
+// Setup is complete
+//
- jsTest.log("Inserting initial data...");
+jsTest.log("Inserting initial data...");
- var mongosConnActive = new Mongo(mongos.host);
- var mongosConnIdle = null;
- var mongosConnNew = null;
+var mongosConnActive = new Mongo(mongos.host);
+var mongosConnIdle = null;
+var mongosConnNew = null;
- var wc = {writeConcern: {w: 2, wtimeout: 60000}};
+var wc = {writeConcern: {w: 2, wtimeout: 60000}};
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
- assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
- jsTest.log("Stopping primary of third shard...");
+jsTest.log("Stopping primary of third shard...");
- mongosConnIdle = new Mongo(mongos.host);
+mongosConnIdle = new Mongo(mongos.host);
- st.rs2.stop(st.rs2.getPrimary());
+st.rs2.stop(st.rs2.getPrimary());
- jsTest.log("Testing active connection with third primary down...");
+jsTest.log("Testing active connection with third primary down...");
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
- assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
- jsTest.log("Testing idle connection with third primary down...");
+jsTest.log("Testing idle connection with third primary down...");
- assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
- assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
- assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
-
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- jsTest.log("Testing new connections with third primary down...");
-
- mongosConnNew = new Mongo(mongos.host);
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- mongosConnNew = new Mongo(mongos.host);
- assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
-
- gc(); // Clean up new connections
-
- jsTest.log("Stopping primary of second shard...");
-
- mongosConnIdle = new Mongo(mongos.host);
-
- // Need to save this node for later
- var rs1Secondary = st.rs1.getSecondary();
-
- st.rs1.stop(st.rs1.getPrimary());
-
- jsTest.log("Testing active connection with second primary down...");
-
- // Reads with read prefs
- mongosConnActive.setSlaveOk();
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnActive.setSlaveOk(false);
-
- mongosConnActive.setReadPref("primary");
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.throws(function() {
- mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1});
- });
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- // Ensure read prefs override slaveOK
- mongosConnActive.setSlaveOk();
- mongosConnActive.setReadPref("primary");
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.throws(function() {
- mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1});
- });
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnActive.setSlaveOk(false);
-
- mongosConnActive.setReadPref("secondary");
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- mongosConnActive.setReadPref("primaryPreferred");
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- mongosConnActive.setReadPref("secondaryPreferred");
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- mongosConnActive.setReadPref("nearest");
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- // Writes
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
- assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}, wc));
- assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
-
- jsTest.log("Testing idle connection with second primary down...");
-
- // Writes
- assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
- assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}, wc));
- assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
-
- // Reads with read prefs
- mongosConnIdle.setSlaveOk();
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnIdle.setSlaveOk(false);
-
- mongosConnIdle.setReadPref("primary");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.throws(function() {
- mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1});
- });
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- // Ensure read prefs override slaveOK
- mongosConnIdle.setSlaveOk();
- mongosConnIdle.setReadPref("primary");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.throws(function() {
- mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1});
- });
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnIdle.setSlaveOk(false);
-
- mongosConnIdle.setReadPref("secondary");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- mongosConnIdle.setReadPref("primaryPreferred");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- mongosConnIdle.setReadPref("secondaryPreferred");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- mongosConnIdle.setReadPref("nearest");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- jsTest.log("Testing new connections with second primary down...");
-
- // Reads with read prefs
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("primary");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("primary");
- assert.throws(function() {
- mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1});
- });
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("primary");
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
- // Ensure read prefs override slaveok
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- mongosConnNew.setReadPref("primary");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- mongosConnNew.setReadPref("primary");
- assert.throws(function() {
- mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1});
- });
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- mongosConnNew.setReadPref("primary");
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("secondary");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("secondary");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("secondary");
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("primaryPreferred");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("primaryPreferred");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("primaryPreferred");
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("secondaryPreferred");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("secondaryPreferred");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("secondaryPreferred");
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("nearest");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("nearest");
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setReadPref("nearest");
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
- // Writes
- mongosConnNew = new Mongo(mongos.host);
- assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}, wc));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
-
- gc(); // Clean up new connections
-
- jsTest.log("Stopping primary of first shard...");
-
- mongosConnIdle = new Mongo(mongos.host);
-
- st.rs0.stop(st.rs0.getPrimary());
-
- jsTest.log("Testing active connection with first primary down...");
-
- mongosConnActive.setSlaveOk();
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -8}));
- assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 8}));
- assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 8}));
-
- jsTest.log("Testing idle connection with first primary down...");
-
- assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -9}));
- assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 9}));
- assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 9}));
-
- mongosConnIdle.setSlaveOk();
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-
- jsTest.log("Testing new connections with first primary down...");
-
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
+assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -10}));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 10}));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 10}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- gc(); // Clean up new connections
+jsTest.log("Testing new connections with third primary down...");
- jsTest.log("Stopping second shard...");
+mongosConnNew = new Mongo(mongos.host);
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(mongos.host);
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+mongosConnNew = new Mongo(mongos.host);
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
+
+gc(); // Clean up new connections
+
+jsTest.log("Stopping primary of second shard...");
+
+mongosConnIdle = new Mongo(mongos.host);
+
+// Need to save this node for later
+var rs1Secondary = st.rs1.getSecondary();
+
+st.rs1.stop(st.rs1.getPrimary());
+
+jsTest.log("Testing active connection with second primary down...");
+
+// Reads with read prefs
+mongosConnActive.setSlaveOk();
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+mongosConnActive.setSlaveOk(false);
+
+mongosConnActive.setReadPref("primary");
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.throws(function() {
+ mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1});
+});
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+// Ensure read prefs override slaveOK
+mongosConnActive.setSlaveOk();
+mongosConnActive.setReadPref("primary");
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.throws(function() {
+ mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1});
+});
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+mongosConnActive.setSlaveOk(false);
+
+mongosConnActive.setReadPref("secondary");
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+mongosConnActive.setReadPref("primaryPreferred");
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+mongosConnActive.setReadPref("secondaryPreferred");
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+mongosConnActive.setReadPref("nearest");
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+// Writes
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}, wc));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
+
+jsTest.log("Testing idle connection with second primary down...");
+
+// Writes
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}, wc));
+assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
+
+// Reads with read prefs
+mongosConnIdle.setSlaveOk();
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+mongosConnIdle.setSlaveOk(false);
+
+mongosConnIdle.setReadPref("primary");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.throws(function() {
+ mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1});
+});
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+// Ensure read prefs override slaveOK
+mongosConnIdle.setSlaveOk();
+mongosConnIdle.setReadPref("primary");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.throws(function() {
+ mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1});
+});
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+mongosConnIdle.setSlaveOk(false);
+
+mongosConnIdle.setReadPref("secondary");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+mongosConnIdle.setReadPref("primaryPreferred");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+mongosConnIdle.setReadPref("secondaryPreferred");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+mongosConnIdle.setReadPref("nearest");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+jsTest.log("Testing new connections with second primary down...");
+
+// Reads with read prefs
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("primary");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("primary");
+assert.throws(function() {
+ mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1});
+});
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("primary");
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+// Ensure read prefs override slaveok
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+mongosConnNew.setReadPref("primary");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+mongosConnNew.setReadPref("primary");
+assert.throws(function() {
+ mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1});
+});
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+mongosConnNew.setReadPref("primary");
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("secondary");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("secondary");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("secondary");
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("primaryPreferred");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("primaryPreferred");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("primaryPreferred");
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("secondaryPreferred");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("secondaryPreferred");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("secondaryPreferred");
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("nearest");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("nearest");
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setReadPref("nearest");
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+// Writes
+mongosConnNew = new Mongo(mongos.host);
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}, wc));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
+
+gc(); // Clean up new connections
+
+jsTest.log("Stopping primary of first shard...");
+
+mongosConnIdle = new Mongo(mongos.host);
+
+st.rs0.stop(st.rs0.getPrimary());
+
+jsTest.log("Testing active connection with first primary down...");
+
+mongosConnActive.setSlaveOk();
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -8}));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 8}));
+assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 8}));
+
+jsTest.log("Testing idle connection with first primary down...");
+
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -9}));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 9}));
+assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 9}));
+
+mongosConnIdle.setSlaveOk();
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+jsTest.log("Testing new connections with first primary down...");
+
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnIdle = new Mongo(mongos.host);
+mongosConnNew = new Mongo(mongos.host);
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -10}));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 10}));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 10}));
+
+gc(); // Clean up new connections
- st.rs1.stop(rs1Secondary);
+jsTest.log("Stopping second shard...");
- jsTest.log("Testing active connection with second shard down...");
+mongosConnIdle = new Mongo(mongos.host);
- mongosConnActive.setSlaveOk();
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+st.rs1.stop(rs1Secondary);
+
+jsTest.log("Testing active connection with second shard down...");
- assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -11}));
- assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 11}));
- assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 11}));
+mongosConnActive.setSlaveOk();
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- jsTest.log("Testing idle connection with second shard down...");
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -11}));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 11}));
+assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 11}));
- assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -12}));
- assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 12}));
- assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 12}));
+jsTest.log("Testing idle connection with second shard down...");
- mongosConnIdle.setSlaveOk();
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -12}));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 12}));
+assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 12}));
- jsTest.log("Testing new connections with second shard down...");
+mongosConnIdle.setSlaveOk();
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(mongos.host);
- mongosConnNew.setSlaveOk();
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+jsTest.log("Testing new connections with second shard down...");
- mongosConnNew = new Mongo(mongos.host);
- assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -13}));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 13}));
- mongosConnNew = new Mongo(mongos.host);
- assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 13}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(mongos.host);
+mongosConnNew.setSlaveOk();
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- gc(); // Clean up new connections
+mongosConnNew = new Mongo(mongos.host);
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -13}));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 13}));
+mongosConnNew = new Mongo(mongos.host);
+assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 13}));
- st.stop();
+gc(); // Clean up new connections
+st.stop();
})();
diff --git a/jstests/sharding/mongos_shard_failure_tolerance.js b/jstests/sharding/mongos_shard_failure_tolerance.js
index 5443450f3bc..7d4560b5ee6 100644
--- a/jstests/sharding/mongos_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_shard_failure_tolerance.js
@@ -14,128 +14,126 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 3, mongos: 1});
+var st = new ShardingTest({shards: 3, mongos: 1});
- var admin = st.s0.getDB("admin");
+var admin = st.s0.getDB("admin");
- var collSharded = st.s0.getCollection("fooSharded.barSharded");
- var collUnsharded = st.s0.getCollection("fooUnsharded.barUnsharded");
+var collSharded = st.s0.getCollection("fooSharded.barSharded");
+var collUnsharded = st.s0.getCollection("fooUnsharded.barUnsharded");
- assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()}));
- st.ensurePrimaryShard(collSharded.getDB().toString(), st.shard0.shardName);
+assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()}));
+st.ensurePrimaryShard(collSharded.getDB().toString(), st.shard0.shardName);
- assert.commandWorked(
- admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: collSharded.toString(), find: {_id: 0}, to: st.shard1.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: collSharded.toString(), find: {_id: 0}, to: st.shard1.shardName}));
- // Create the unsharded database
- assert.writeOK(collUnsharded.insert({some: "doc"}));
- assert.writeOK(collUnsharded.remove({}));
- st.ensurePrimaryShard(collUnsharded.getDB().toString(), st.shard0.shardName);
+// Create the unsharded database
+assert.writeOK(collUnsharded.insert({some: "doc"}));
+assert.writeOK(collUnsharded.remove({}));
+st.ensurePrimaryShard(collUnsharded.getDB().toString(), st.shard0.shardName);
- //
- // Setup is complete
- //
-
- jsTest.log("Inserting initial data...");
+//
+// Setup is complete
+//
- var mongosConnActive = new Mongo(st.s0.host);
- var mongosConnIdle = null;
- var mongosConnNew = null;
+jsTest.log("Inserting initial data...");
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}));
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}));
- assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}));
+var mongosConnActive = new Mongo(st.s0.host);
+var mongosConnIdle = null;
+var mongosConnNew = null;
- jsTest.log("Stopping third shard...");
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}));
- mongosConnIdle = new Mongo(st.s0.host);
+jsTest.log("Stopping third shard...");
- st.rs2.stopSet();
+mongosConnIdle = new Mongo(st.s0.host);
- jsTest.log("Testing active connection...");
+st.rs2.stopSet();
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+jsTest.log("Testing active connection...");
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}));
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}));
- assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- jsTest.log("Testing idle connection...");
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}));
- assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}));
- assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}));
- assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}));
+jsTest.log("Testing idle connection...");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}));
+assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}));
- jsTest.log("Testing new connections...");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+jsTest.log("Testing new connections...");
- mongosConnNew = new Mongo(st.s0.host);
- assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- gc(); // Clean up new connections
+mongosConnNew = new Mongo(st.s0.host);
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}));
- jsTest.log("Stopping second shard...");
+gc(); // Clean up new connections
- mongosConnIdle = new Mongo(st.s0.host);
+jsTest.log("Stopping second shard...");
- st.rs1.stopSet();
- jsTest.log("Testing active connection...");
+mongosConnIdle = new Mongo(st.s0.host);
- assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+st.rs1.stopSet();
+jsTest.log("Testing active connection...");
- assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}));
- assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}));
- jsTest.log("Testing idle connection...");
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}));
- assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}));
- assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}));
- assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}));
+jsTest.log("Testing idle connection...");
- assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
- assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}));
+assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}));
- jsTest.log("Testing new connections...");
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+jsTest.log("Testing new connections...");
- mongosConnNew = new Mongo(st.s0.host);
- assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}));
- mongosConnNew = new Mongo(st.s0.host);
- assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}));
+mongosConnNew = new Mongo(st.s0.host);
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}));
- st.stop();
+mongosConnNew = new Mongo(st.s0.host);
+assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}));
+st.stop();
})();
diff --git a/jstests/sharding/mongos_validate_writes.js b/jstests/sharding/mongos_validate_writes.js
index d9114a6033f..66b71aa12c3 100644
--- a/jstests/sharding/mongos_validate_writes.js
+++ b/jstests/sharding/mongos_validate_writes.js
@@ -4,85 +4,85 @@
// Note that this is *unsafe* with broadcast removes and updates
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 3, other: {shardOptions: {verbose: 2}}});
+var st = new ShardingTest({shards: 2, mongos: 3, other: {shardOptions: {verbose: 2}}});
- var mongos = st.s0;
- var staleMongosA = st.s1;
- var staleMongosB = st.s2;
+var mongos = st.s0;
+var staleMongosA = st.s1;
+var staleMongosB = st.s2;
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
- var coll = mongos.getCollection("foo.bar");
- var staleCollA = staleMongosA.getCollection(coll + "");
- var staleCollB = staleMongosB.getCollection(coll + "");
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var coll = mongos.getCollection("foo.bar");
+var staleCollA = staleMongosA.getCollection(coll + "");
+var staleCollB = staleMongosB.getCollection(coll + "");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
- coll.ensureIndex({a: 1});
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
+coll.ensureIndex({a: 1});
- // Shard the collection on {a: 1} and move one chunk to another shard. Updates need to be across
- // two shards to trigger an error, otherwise they are versioned and will succeed after raising
- // a StaleConfigException.
- st.shardColl(coll, {a: 1}, {a: 0}, {a: 1}, coll.getDB(), true);
+// Shard the collection on {a: 1} and move one chunk to another shard. Updates need to be across
+// two shards to trigger an error, otherwise they are versioned and will succeed after raising
+// a StaleConfigException.
+st.shardColl(coll, {a: 1}, {a: 0}, {a: 1}, coll.getDB(), true);
- // Let the stale mongos see the collection state
- staleCollA.findOne();
- staleCollB.findOne();
+// Let the stale mongos see the collection state
+staleCollA.findOne();
+staleCollB.findOne();
- // Change the collection sharding state
- coll.drop();
- coll.ensureIndex({b: 1});
- st.shardColl(coll, {b: 1}, {b: 0}, {b: 1}, coll.getDB(), true);
+// Change the collection sharding state
+coll.drop();
+coll.ensureIndex({b: 1});
+st.shardColl(coll, {b: 1}, {b: 0}, {b: 1}, coll.getDB(), true);
- // Make sure that we can successfully insert, even though we have stale state
- assert.writeOK(staleCollA.insert({b: "b"}));
+// Make sure that we can successfully insert, even though we have stale state
+assert.writeOK(staleCollA.insert({b: "b"}));
- // Make sure we unsuccessfully insert with old info
- assert.writeError(staleCollB.insert({a: "a"}));
+// Make sure we unsuccessfully insert with old info
+assert.writeError(staleCollB.insert({a: "a"}));
- // Change the collection sharding state
- coll.drop();
- coll.ensureIndex({c: 1});
- st.shardColl(coll, {c: 1}, {c: 0}, {c: 1}, coll.getDB(), true);
+// Change the collection sharding state
+coll.drop();
+coll.ensureIndex({c: 1});
+st.shardColl(coll, {c: 1}, {c: 0}, {c: 1}, coll.getDB(), true);
- // Make sure we can successfully upsert, even though we have stale state
- assert.writeOK(staleCollA.update({c: "c"}, {c: "c"}, true));
+// Make sure we can successfully upsert, even though we have stale state
+assert.writeOK(staleCollA.update({c: "c"}, {c: "c"}, true));
- // Make sure we unsuccessfully upsert with old info
- assert.writeError(staleCollB.update({b: "b"}, {b: "b"}, true));
+// Make sure we unsuccessfully upsert with old info
+assert.writeError(staleCollB.update({b: "b"}, {b: "b"}, true));
- // Change the collection sharding state
- coll.drop();
- coll.ensureIndex({d: 1});
- st.shardColl(coll, {d: 1}, {d: 0}, {d: 1}, coll.getDB(), true);
+// Change the collection sharding state
+coll.drop();
+coll.ensureIndex({d: 1});
+st.shardColl(coll, {d: 1}, {d: 0}, {d: 1}, coll.getDB(), true);
- // Make sure we can successfully update, even though we have stale state
- assert.writeOK(coll.insert({d: "d"}));
+// Make sure we can successfully update, even though we have stale state
+assert.writeOK(coll.insert({d: "d"}));
- assert.writeOK(staleCollA.update({d: "d"}, {$set: {x: "x"}}, false, false));
- assert.eq(staleCollA.findOne().x, "x");
+assert.writeOK(staleCollA.update({d: "d"}, {$set: {x: "x"}}, false, false));
+assert.eq(staleCollA.findOne().x, "x");
- // Make sure we unsuccessfully update with old info
- assert.writeError(staleCollB.update({c: "c"}, {$set: {x: "y"}}, false, false));
- assert.eq(staleCollB.findOne().x, "x");
+// Make sure we unsuccessfully update with old info
+assert.writeError(staleCollB.update({c: "c"}, {$set: {x: "y"}}, false, false));
+assert.eq(staleCollB.findOne().x, "x");
- // Change the collection sharding state
- coll.drop();
- coll.ensureIndex({e: 1});
- // Deletes need to be across two shards to trigger an error.
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
- st.shardColl(coll, {e: 1}, {e: 0}, {e: 1}, coll.getDB(), true);
+// Change the collection sharding state
+coll.drop();
+coll.ensureIndex({e: 1});
+// Deletes need to be across two shards to trigger an error.
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
+st.shardColl(coll, {e: 1}, {e: 0}, {e: 1}, coll.getDB(), true);
- // Make sure we can successfully remove, even though we have stale state
- assert.writeOK(coll.insert({e: "e"}));
+// Make sure we can successfully remove, even though we have stale state
+assert.writeOK(coll.insert({e: "e"}));
- assert.writeOK(staleCollA.remove({e: "e"}, true));
- assert.eq(null, staleCollA.findOne());
+assert.writeOK(staleCollA.remove({e: "e"}, true));
+assert.eq(null, staleCollA.findOne());
- // Make sure we unsuccessfully remove with old info
- assert.writeError(staleCollB.remove({d: "d"}, true));
+// Make sure we unsuccessfully remove with old info
+assert.writeError(staleCollB.remove({d: "d"}, true));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js b/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js
index ccc8012e9c8..5bece4f1c76 100644
--- a/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js
+++ b/jstests/sharding/moveChunk_recipient_rejects_chunk_if_UUID_mismatch.js
@@ -3,49 +3,48 @@
* causes the recipient to fail the migration.
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "inputColl";
- const ns = dbName + "." + collName;
+const dbName = "test";
+const collName = "inputColl";
+const ns = dbName + "." + collName;
- const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
- let donor = st.shard0;
- let recipient = st.shard1;
+let donor = st.shard0;
+let recipient = st.shard1;
- jsTest.log("Make " + donor.shardName + " the primary shard, and shard collection " + ns);
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, donor.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+jsTest.log("Make " + donor.shardName + " the primary shard, and shard collection " + ns);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, donor.shardName);
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- jsTest.log("Insert a document with {_id: 0} into " + ns + " through mongos");
- assert.writeOK(st.s.getCollection(ns).insert({_id: 0}));
+jsTest.log("Insert a document with {_id: 0} into " + ns + " through mongos");
+assert.writeOK(st.s.getCollection(ns).insert({_id: 0}));
- jsTest.log("Insert a document with {_id: 1} into " + ns + " directly on the recipient");
- assert.writeOK(recipient.getCollection(ns).insert({_id: 1}));
+jsTest.log("Insert a document with {_id: 1} into " + ns + " directly on the recipient");
+assert.writeOK(recipient.getCollection(ns).insert({_id: 1}));
- jsTest.log("Check that the UUID on the recipient differs from the UUID on the donor");
- const recipientUUIDBefore =
- recipient.getDB(dbName).getCollectionInfos({name: collName})[0].info.uuid;
- const donorUUIDBefore = donor.getDB(dbName).getCollectionInfos({name: collName})[0].info.uuid;
- assert.neq(recipientUUIDBefore, donorUUIDBefore);
+jsTest.log("Check that the UUID on the recipient differs from the UUID on the donor");
+const recipientUUIDBefore =
+ recipient.getDB(dbName).getCollectionInfos({name: collName})[0].info.uuid;
+const donorUUIDBefore = donor.getDB(dbName).getCollectionInfos({name: collName})[0].info.uuid;
+assert.neq(recipientUUIDBefore, donorUUIDBefore);
- jsTest.log("Ensure that we fail to migrate data from the donor to the recipient");
- assert.commandFailed(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: recipient.shardName}));
+jsTest.log("Ensure that we fail to migrate data from the donor to the recipient");
+assert.commandFailed(st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: recipient.shardName}));
- jsTest.log("Ensure the recipient's collection UUID is unmodified after the migration attempt");
- const recipientUUIDAfter =
- recipient.getDB(dbName).getCollectionInfos({name: collName})[0].info.uuid;
- assert.eq(recipientUUIDBefore, recipientUUIDAfter);
+jsTest.log("Ensure the recipient's collection UUID is unmodified after the migration attempt");
+const recipientUUIDAfter =
+ recipient.getDB(dbName).getCollectionInfos({name: collName})[0].info.uuid;
+assert.eq(recipientUUIDBefore, recipientUUIDAfter);
- jsTest.log("Ensure the document that was on the recipient was not deleted");
- assert.neq(null, recipient.getCollection(ns).findOne({_id: 1}));
+jsTest.log("Ensure the document that was on the recipient was not deleted");
+assert.neq(null, recipient.getCollection(ns).findOne({_id: 1}));
- jsTest.log("Ensure dropCollection causes the collection to be dropped even on the recipient");
- assert.commandWorked(st.s.getDB(dbName).runCommand({drop: collName}));
- assert.eq(0, recipient.getDB(dbName).getCollectionInfos({name: collName}).length);
+jsTest.log("Ensure dropCollection causes the collection to be dropped even on the recipient");
+assert.commandWorked(st.s.getDB(dbName).runCommand({drop: collName}));
+assert.eq(0, recipient.getDB(dbName).getCollectionInfos({name: collName}).length);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/movePrimary1.js b/jstests/sharding/movePrimary1.js
index 118c70b1415..a2dc328da29 100644
--- a/jstests/sharding/movePrimary1.js
+++ b/jstests/sharding/movePrimary1.js
@@ -1,57 +1,56 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2});
+var s = new ShardingTest({shards: 2});
- assert.commandWorked(s.getDB('test1').runCommand({dropDatabase: 1}));
- var db = s.getDB('test1');
- var c = db.foo;
- c.save({a: 1});
- c.save({a: 2});
- c.save({a: 3});
- assert.eq(3, c.count());
+assert.commandWorked(s.getDB('test1').runCommand({dropDatabase: 1}));
+var db = s.getDB('test1');
+var c = db.foo;
+c.save({a: 1});
+c.save({a: 2});
+c.save({a: 3});
+assert.eq(3, c.count());
- assert.commandWorked(
- db.runCommand({create: "view", viewOn: "foo", pipeline: [{$match: {a: 3}}]}));
+assert.commandWorked(db.runCommand({create: "view", viewOn: "foo", pipeline: [{$match: {a: 3}}]}));
- var fromShard = s.getPrimaryShard('test1');
- var toShard = s.getOther(fromShard);
+var fromShard = s.getPrimaryShard('test1');
+var toShard = s.getOther(fromShard);
- assert.eq(3, fromShard.getDB("test1").foo.count(), "from doesn't have data before move");
- assert.eq(0, toShard.getDB("test1").foo.count(), "to has data before move");
- assert.eq(1, s.s.getDB("test1").view.count(), "count on view incorrect before move");
+assert.eq(3, fromShard.getDB("test1").foo.count(), "from doesn't have data before move");
+assert.eq(0, toShard.getDB("test1").foo.count(), "to has data before move");
+assert.eq(1, s.s.getDB("test1").view.count(), "count on view incorrect before move");
- s.printShardingStatus();
- assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
- s.normalize(fromShard.name),
- "not in db correctly to start");
+s.printShardingStatus();
+assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
+ s.normalize(fromShard.name),
+ "not in db correctly to start");
- var oldShardName = s.config.databases.findOne({_id: "test1"}).primary;
+var oldShardName = s.config.databases.findOne({_id: "test1"}).primary;
- assert.commandWorked(s.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
- s.printShardingStatus();
- assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
- s.normalize(toShard.name),
- "to in config db didn't change after first move");
+assert.commandWorked(s.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
+s.printShardingStatus();
+assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
+ s.normalize(toShard.name),
+ "to in config db didn't change after first move");
- assert.eq(0, fromShard.getDB("test1").foo.count(), "from still has data after move");
- assert.eq(3, toShard.getDB("test1").foo.count(), "to doesn't have data after move");
- assert.eq(1, s.s.getDB("test1").view.count(), "count on view incorrect after move");
+assert.eq(0, fromShard.getDB("test1").foo.count(), "from still has data after move");
+assert.eq(3, toShard.getDB("test1").foo.count(), "to doesn't have data after move");
+assert.eq(1, s.s.getDB("test1").view.count(), "count on view incorrect after move");
- // Move back, now using shard name instead of server address
- assert.commandWorked(s.s0.adminCommand({movePrimary: "test1", to: oldShardName}));
- s.printShardingStatus();
- assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
- oldShardName,
- "to in config db didn't change after second move");
+// Move back, now using shard name instead of server address
+assert.commandWorked(s.s0.adminCommand({movePrimary: "test1", to: oldShardName}));
+s.printShardingStatus();
+assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
+ oldShardName,
+ "to in config db didn't change after second move");
- assert.eq(3, fromShard.getDB("test1").foo.count(), "from doesn't have data after move back");
- assert.eq(0, toShard.getDB("test1").foo.count(), "to has data after move back");
- assert.eq(1, s.s.getDB("test1").view.count(), "count on view incorrect after move back");
+assert.eq(3, fromShard.getDB("test1").foo.count(), "from doesn't have data after move back");
+assert.eq(0, toShard.getDB("test1").foo.count(), "to has data after move back");
+assert.eq(1, s.s.getDB("test1").view.count(), "count on view incorrect after move back");
- assert.commandFailedWithCode(s.s0.adminCommand({movePrimary: 'test1', to: 'dontexist'}),
- ErrorCodes.ShardNotFound,
- 'attempting to use non-existent shard as primary should fail');
+assert.commandFailedWithCode(s.s0.adminCommand({movePrimary: 'test1', to: 'dontexist'}),
+ ErrorCodes.ShardNotFound,
+ 'attempting to use non-existent shard as primary should fail');
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/move_chunk_basic.js b/jstests/sharding/move_chunk_basic.js
index 988d741e00e..0aad2048861 100644
--- a/jstests/sharding/move_chunk_basic.js
+++ b/jstests/sharding/move_chunk_basic.js
@@ -3,82 +3,82 @@
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({mongos: 1, shards: 2});
- var kDbName = 'db';
+var st = new ShardingTest({mongos: 1, shards: 2});
+var kDbName = 'db';
- var mongos = st.s0;
- var shard0 = st.shard0.shardName;
- var shard1 = st.shard1.shardName;
+var mongos = st.s0;
+var shard0 = st.shard0.shardName;
+var shard1 = st.shard1.shardName;
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, shard0);
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, shard0);
- // Fail if invalid namespace.
- assert.commandFailed(mongos.adminCommand({moveChunk: '', find: {_id: 1}, to: shard1}));
+// Fail if invalid namespace.
+assert.commandFailed(mongos.adminCommand({moveChunk: '', find: {_id: 1}, to: shard1}));
- // Fail if database does not exist.
- assert.commandFailed(mongos.adminCommand({moveChunk: 'a.b', find: {_id: 1}, to: shard1}));
+// Fail if database does not exist.
+assert.commandFailed(mongos.adminCommand({moveChunk: 'a.b', find: {_id: 1}, to: shard1}));
- // Fail if collection is unsharded.
- assert.commandFailed(
- mongos.adminCommand({moveChunk: kDbName + '.xxx', find: {_id: 1}, to: shard1}));
+// Fail if collection is unsharded.
+assert.commandFailed(
+ mongos.adminCommand({moveChunk: kDbName + '.xxx', find: {_id: 1}, to: shard1}));
- function testHashed() {
- var ns = kDbName + '.fooHashed';
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 'hashed'}}));
+function testHashed() {
+ var ns = kDbName + '.fooHashed';
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 'hashed'}}));
- var aChunk = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0});
- assert(aChunk);
+ var aChunk = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0});
+ assert(aChunk);
- // Error if either of the bounds is not a valid shard key (BSON object - 1 yields a NaN)
- assert.commandFailed(
- mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min - 1, aChunk.max], to: shard1}));
- assert.commandFailed(
- mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max - 1], to: shard1}));
+ // Error if either of the bounds is not a valid shard key (BSON object - 1 yields a NaN)
+ assert.commandFailed(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min - 1, aChunk.max], to: shard1}));
+ assert.commandFailed(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max - 1], to: shard1}));
- // Fail if find and bounds are both set.
- assert.commandFailed(mongos.adminCommand(
- {moveChunk: ns, find: {_id: 1}, bounds: [aChunk.min, aChunk.max], to: shard1}));
+ // Fail if find and bounds are both set.
+ assert.commandFailed(mongos.adminCommand(
+ {moveChunk: ns, find: {_id: 1}, bounds: [aChunk.min, aChunk.max], to: shard1}));
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max], to: shard1}));
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max], to: shard1}));
- assert.eq(0, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard0}));
- assert.eq(1, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard1}));
+ assert.eq(0, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard0}));
+ assert.eq(1, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard1}));
- mongos.getDB(kDbName).fooHashed.drop();
- }
+ mongos.getDB(kDbName).fooHashed.drop();
+}
- function testNotHashed(keyDoc) {
- var ns = kDbName + '.foo';
+function testNotHashed(keyDoc) {
+ var ns = kDbName + '.foo';
- // Fail if find is not a valid shard key.
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
+ // Fail if find is not a valid shard key.
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- var chunkId = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0})._id;
+ var chunkId = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0})._id;
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: {xxx: 1}, to: shard1}));
- assert.eq(shard0, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
+ assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: {xxx: 1}, to: shard1}));
+ assert.eq(shard0, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
- assert.commandWorked(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: shard1}));
- assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
+ assert.commandWorked(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: shard1}));
+ assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
- // Fail if to shard does not exists
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: 'WrongShard'}));
+ // Fail if to shard does not exists
+ assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: 'WrongShard'}));
- // Fail if chunk is already at shard
- assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
+ // Fail if chunk is already at shard
+ assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
- mongos.getDB(kDbName).foo.drop();
- }
+ mongos.getDB(kDbName).foo.drop();
+}
- testHashed();
+testHashed();
- testNotHashed({a: 1});
+testNotHashed({a: 1});
- testNotHashed({a: 1, b: 1});
+testNotHashed({a: 1, b: 1});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js b/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js
index 06802d65c61..c7602b4f644 100644
--- a/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js
+++ b/jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js
@@ -1,126 +1,125 @@
load("jstests/sharding/move_chunk_with_session_helper.js");
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- var checkFindAndModifyResult = function(expected, toCheck) {
- assert.eq(expected.ok, toCheck.ok);
- assert.eq(expected.value, toCheck.value);
- assert.eq(expected.lastErrorObject, toCheck.lastErrorObject);
- };
+var checkFindAndModifyResult = function(expected, toCheck) {
+ assert.eq(expected.ok, toCheck.ok);
+ assert.eq(expected.value, toCheck.value);
+ assert.eq(expected.lastErrorObject, toCheck.lastErrorObject);
+};
- var lsid = UUID();
- var tests = [
- {
- coll: 'findAndMod-upsert',
- cmd: {
- findAndModify: 'findAndMod-upsert',
- query: {x: 60},
- update: {$inc: {y: 1}},
- new: true,
- upsert: true,
- lsid: {id: lsid},
- txnNumber: NumberLong(37),
- },
- setup: function(coll) {},
- checkRetryResult: function(result, retryResult) {
- checkFindAndModifyResult(result, retryResult);
- },
- checkDocuments: function(coll) {
- assert.eq(1, coll.findOne({x: 60}).y);
- },
+var lsid = UUID();
+var tests = [
+ {
+ coll: 'findAndMod-upsert',
+ cmd: {
+ findAndModify: 'findAndMod-upsert',
+ query: {x: 60},
+ update: {$inc: {y: 1}},
+ new: true,
+ upsert: true,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(37),
},
- {
- coll: 'findAndMod-update-preImage',
- cmd: {
- findAndModify: 'findAndMod-update-preImage',
- query: {x: 60},
- update: {$inc: {y: 1}},
- new: false,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(38),
- },
- setup: function(coll) {
- coll.insert({x: 60});
- },
- checkRetryResult: function(result, retryResult) {
- checkFindAndModifyResult(result, retryResult);
- },
- checkDocuments: function(coll) {
- assert.eq(1, coll.findOne({x: 60}).y);
- },
+ setup: function(coll) {},
+ checkRetryResult: function(result, retryResult) {
+ checkFindAndModifyResult(result, retryResult);
},
- {
- coll: 'findAndMod-update-postImage',
- cmd: {
- findAndModify: 'findAndMod-update-postImage',
- query: {x: 60},
- update: {$inc: {y: 1}},
- new: true,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(39),
- },
- setup: function(coll) {
- coll.insert({x: 60});
- },
- checkRetryResult: function(result, retryResult) {
- checkFindAndModifyResult(result, retryResult);
- },
- checkDocuments: function(coll) {
- assert.eq(1, coll.findOne({x: 60}).y);
- },
+ checkDocuments: function(coll) {
+ assert.eq(1, coll.findOne({x: 60}).y);
},
- {
- coll: 'findAndMod-delete',
- cmd: {
- findAndModify: 'findAndMod-delete',
- query: {x: 10},
- remove: true,
- lsid: {id: lsid},
- txnNumber: NumberLong(40),
- },
- setup: function(coll) {
- var bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 10; i++) {
- bulk.insert({x: 10});
- }
- assert.writeOK(bulk.execute());
-
- },
- checkRetryResult: function(result, retryResult) {
- checkFindAndModifyResult(result, retryResult);
- },
- checkDocuments: function(coll) {
- assert.eq(9, coll.find({x: 10}).itcount());
- },
+ },
+ {
+ coll: 'findAndMod-update-preImage',
+ cmd: {
+ findAndModify: 'findAndMod-update-preImage',
+ query: {x: 60},
+ update: {$inc: {y: 1}},
+ new: false,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(38),
+ },
+ setup: function(coll) {
+ coll.insert({x: 60});
+ },
+ checkRetryResult: function(result, retryResult) {
+ checkFindAndModifyResult(result, retryResult);
+ },
+ checkDocuments: function(coll) {
+ assert.eq(1, coll.findOne({x: 60}).y);
+ },
+ },
+ {
+ coll: 'findAndMod-update-postImage',
+ cmd: {
+ findAndModify: 'findAndMod-update-postImage',
+ query: {x: 60},
+ update: {$inc: {y: 1}},
+ new: true,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(39),
+ },
+ setup: function(coll) {
+ coll.insert({x: 60});
},
- ];
+ checkRetryResult: function(result, retryResult) {
+ checkFindAndModifyResult(result, retryResult);
+ },
+ checkDocuments: function(coll) {
+ assert.eq(1, coll.findOne({x: 60}).y);
+ },
+ },
+ {
+ coll: 'findAndMod-delete',
+ cmd: {
+ findAndModify: 'findAndMod-delete',
+ query: {x: 10},
+ remove: true,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(40),
+ },
+ setup: function(coll) {
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < 10; i++) {
+ bulk.insert({x: 10});
+ }
+ assert.writeOK(bulk.execute());
+ },
+ checkRetryResult: function(result, retryResult) {
+ checkFindAndModifyResult(result, retryResult);
+ },
+ checkDocuments: function(coll) {
+ assert.eq(9, coll.find({x: 10}).itcount());
+ },
+ },
+];
- // Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
- // secondary to get elected, so we don't give it a zero priority.
- var st = new ShardingTest({
- mongos: 2,
- shards: {
- rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
- rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
- }
- });
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
+// Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
+// secondary to get elected, so we don't give it a zero priority.
+var st = new ShardingTest({
+ mongos: 2,
+ shards: {
+ rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
+ rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
+ }
+});
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
- tests.forEach(function(test) {
- testMoveChunkWithSession(
- st, test.coll, test.cmd, test.setup, test.checkRetryResult, test.checkDocuments);
- });
+tests.forEach(function(test) {
+ testMoveChunkWithSession(
+ st, test.coll, test.cmd, test.setup, test.checkRetryResult, test.checkDocuments);
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/move_chunk_insert_with_write_retryability.js b/jstests/sharding/move_chunk_insert_with_write_retryability.js
index 0f755de41c4..c6a79000712 100644
--- a/jstests/sharding/move_chunk_insert_with_write_retryability.js
+++ b/jstests/sharding/move_chunk_insert_with_write_retryability.js
@@ -1,48 +1,48 @@
load("jstests/sharding/move_chunk_with_session_helper.js");
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- // Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
- // secondary to get elected, so we don't give it a zero priority.
- var st = new ShardingTest({
- mongos: 2,
- shards: {
- rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
- rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
- }
- });
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
+// Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
+// secondary to get elected, so we don't give it a zero priority.
+var st = new ShardingTest({
+ mongos: 2,
+ shards: {
+ rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
+ rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
+ }
+});
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
- var coll = 'insert';
- var cmd = {
- insert: coll,
- documents: [{x: 10}, {x: 30}],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(34),
- };
- var setup = function() {};
- var checkRetryResult = function(result, retryResult) {
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
- };
- var checkDocuments = function(coll) {
- assert.eq(1, coll.find({x: 10}).itcount());
- assert.eq(1, coll.find({x: 30}).itcount());
- };
+var coll = 'insert';
+var cmd = {
+ insert: coll,
+ documents: [{x: 10}, {x: 30}],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(34),
+};
+var setup = function() {};
+var checkRetryResult = function(result, retryResult) {
+ assert.eq(result.ok, retryResult.ok);
+ assert.eq(result.n, retryResult.n);
+ assert.eq(result.writeErrors, retryResult.writeErrors);
+ assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+};
+var checkDocuments = function(coll) {
+ assert.eq(1, coll.find({x: 10}).itcount());
+ assert.eq(1, coll.find({x: 30}).itcount());
+};
- testMoveChunkWithSession(st, coll, cmd, setup, checkRetryResult, checkDocuments);
+testMoveChunkWithSession(st, coll, cmd, setup, checkRetryResult, checkDocuments);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/move_chunk_open_cursors.js b/jstests/sharding/move_chunk_open_cursors.js
index fe0942f0558..1b15fb198cf 100644
--- a/jstests/sharding/move_chunk_open_cursors.js
+++ b/jstests/sharding/move_chunk_open_cursors.js
@@ -3,52 +3,50 @@
* migration.
*/
(function() {
- "use strict";
- const dbName = "test";
- const collName = jsTest.name();
- const testNs = dbName + "." + collName;
+"use strict";
+const dbName = "test";
+const collName = jsTest.name();
+const testNs = dbName + "." + collName;
- const nDocs = 1000 * 10;
- const st = new ShardingTest({shards: 2});
- const coll = st.s0.getDB(dbName)[collName];
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nDocs; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+const nDocs = 1000 * 10;
+const st = new ShardingTest({shards: 2});
+const coll = st.s0.getDB(dbName)[collName];
+let bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < nDocs; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
- // Make sure we know which shard will host the data to begin.
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.admin.runCommand({enableSharding: dbName}));
- assert.commandWorked(st.admin.runCommand({shardCollection: testNs, key: {_id: 1}}));
+// Make sure we know which shard will host the data to begin.
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.admin.runCommand({enableSharding: dbName}));
+assert.commandWorked(st.admin.runCommand({shardCollection: testNs, key: {_id: 1}}));
- // Open some cursors before migrating data.
- // Ensure the cursor stage at the front of the pipeline does not buffer any data.
- assert.commandWorked(
- st.shard0.adminCommand({setParameter: 1, internalDocumentSourceCursorBatchSizeBytes: 1}));
- const getMoreBatchSize = 100;
- const aggResponse = assert.commandWorked(
- coll.runCommand({aggregate: collName, pipeline: [], cursor: {batchSize: 0}}));
- const aggCursor = new DBCommandCursor(coll.getDB(), aggResponse, getMoreBatchSize);
+// Open some cursors before migrating data.
+// Ensure the cursor stage at the front of the pipeline does not buffer any data.
+assert.commandWorked(
+ st.shard0.adminCommand({setParameter: 1, internalDocumentSourceCursorBatchSizeBytes: 1}));
+const getMoreBatchSize = 100;
+const aggResponse = assert.commandWorked(
+ coll.runCommand({aggregate: collName, pipeline: [], cursor: {batchSize: 0}}));
+const aggCursor = new DBCommandCursor(coll.getDB(), aggResponse, getMoreBatchSize);
- assert(st.adminCommand({split: testNs, middle: {_id: nDocs / 2}}));
- assert(st.adminCommand({moveChunk: testNs, find: {_id: nDocs - 1}, to: st.shard1.shardName}));
+assert(st.adminCommand({split: testNs, middle: {_id: nDocs / 2}}));
+assert(st.adminCommand({moveChunk: testNs, find: {_id: nDocs - 1}, to: st.shard1.shardName}));
- assert.eq(
- aggCursor.itcount(),
- nDocs,
- "expected agg cursor to return all matching documents, even though some have migrated");
+assert.eq(aggCursor.itcount(),
+ nDocs,
+ "expected agg cursor to return all matching documents, even though some have migrated");
- // Test the same behavior with the find command.
- const findResponse = assert.commandWorked(
- coll.runCommand({find: collName, filter: {}, batchSize: getMoreBatchSize}));
- const findCursor = new DBCommandCursor(coll.getDB(), findResponse, getMoreBatchSize);
- assert(st.adminCommand({split: testNs, middle: {_id: nDocs / 4}}));
- assert(st.adminCommand({moveChunk: testNs, find: {_id: 0}, to: st.shard1.shardName}));
+// Test the same behavior with the find command.
+const findResponse = assert.commandWorked(
+ coll.runCommand({find: collName, filter: {}, batchSize: getMoreBatchSize}));
+const findCursor = new DBCommandCursor(coll.getDB(), findResponse, getMoreBatchSize);
+assert(st.adminCommand({split: testNs, middle: {_id: nDocs / 4}}));
+assert(st.adminCommand({moveChunk: testNs, find: {_id: 0}, to: st.shard1.shardName}));
- assert.eq(
- findCursor.itcount(),
- nDocs,
- "expected find cursor to return all matching documents, even though some have migrated");
- st.stop();
+assert.eq(findCursor.itcount(),
+ nDocs,
+ "expected find cursor to return all matching documents, even though some have migrated");
+st.stop();
}());
diff --git a/jstests/sharding/move_chunk_remove_with_write_retryability.js b/jstests/sharding/move_chunk_remove_with_write_retryability.js
index 78c20ceddc6..c417710f462 100644
--- a/jstests/sharding/move_chunk_remove_with_write_retryability.js
+++ b/jstests/sharding/move_chunk_remove_with_write_retryability.js
@@ -1,55 +1,55 @@
load("jstests/sharding/move_chunk_with_session_helper.js");
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- // Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
- // secondary to get elected, so we don't give it a zero priority.
- var st = new ShardingTest({
- mongos: 2,
- shards: {
- rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
- rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
- }
- });
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
+// Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
+// secondary to get elected, so we don't give it a zero priority.
+var st = new ShardingTest({
+ mongos: 2,
+ shards: {
+ rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
+ rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
+ }
+});
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
- var coll = 'delete';
- var cmd = {
- delete: coll,
- deletes: [{q: {x: 10}, limit: 1}, {q: {x: 20}, limit: 1}],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(36),
- };
- var setup = function(coll) {
- var bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 10; i++) {
- bulk.insert({x: 10});
- bulk.insert({x: 20});
- }
- assert.writeOK(bulk.execute());
- };
- var checkRetryResult = function(result, retryResult) {
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
- };
- var checkDocuments = function(coll) {
- assert.eq(9, coll.find({x: 10}).itcount());
- assert.eq(9, coll.find({x: 20}).itcount());
- };
+var coll = 'delete';
+var cmd = {
+ delete: coll,
+ deletes: [{q: {x: 10}, limit: 1}, {q: {x: 20}, limit: 1}],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(36),
+};
+var setup = function(coll) {
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < 10; i++) {
+ bulk.insert({x: 10});
+ bulk.insert({x: 20});
+ }
+ assert.writeOK(bulk.execute());
+};
+var checkRetryResult = function(result, retryResult) {
+ assert.eq(result.ok, retryResult.ok);
+ assert.eq(result.n, retryResult.n);
+ assert.eq(result.writeErrors, retryResult.writeErrors);
+ assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+};
+var checkDocuments = function(coll) {
+ assert.eq(9, coll.find({x: 10}).itcount());
+ assert.eq(9, coll.find({x: 20}).itcount());
+};
- testMoveChunkWithSession(st, coll, cmd, setup, checkRetryResult, checkDocuments);
+testMoveChunkWithSession(st, coll, cmd, setup, checkRetryResult, checkDocuments);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/move_chunk_update_shard_key_in_retryable_write.js b/jstests/sharding/move_chunk_update_shard_key_in_retryable_write.js
index 592ba020629..32a33e21a82 100644
--- a/jstests/sharding/move_chunk_update_shard_key_in_retryable_write.js
+++ b/jstests/sharding/move_chunk_update_shard_key_in_retryable_write.js
@@ -10,76 +10,141 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/retryable_writes_util.js");
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
- load('./jstests/libs/chunk_manipulation_util.js');
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- // For startParallelOps to write its state
- let staticMongod = MongoRunner.runMongod({});
-
- let st = new ShardingTest({shards: {rs0: {nodes: 2}, rs1: {nodes: 2}, rs2: {nodes: 2}}});
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
- let testDB = st.s.getDB(dbName);
- let testColl = testDB.foo;
-
- // Create a sharded collection with three chunks:
- // [-inf, -10), [-10, 10), [10, inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: -10}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 10}}));
-
- /**
- * Sets up a test by moving chunks to such that one chunk is on each
- * shard, with the following distribution:
- * shard0: [-inf, -10)
- * shard1: [-10, 10)
- * shard2: [10, inf)
- */
- function setUp() {
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {x: -100}, to: st.shard0.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {x: 0}, to: st.shard1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {x: 1000}, to: st.shard2.shardName}));
-
- flushRoutersAndRefreshShardMetadata(st, {ns});
- }
-
- /**
- * Tears down a test by dropping all documents from the test collection.
- */
- function tearDown() {
- assert.commandWorked(testColl.deleteMany({}));
- }
-
- /**
- * Generic function to run a test. 'description' is a description of the test for logging
- * purposes and 'testBody' is the test function.
- */
- function test(description, testBody) {
- jsTest.log(`Running Test Setup: ${description}`);
- setUp();
- jsTest.log(`Running Test Body: ${description}`);
- testBody();
- jsTest.log(`Running Test Tear-Down: ${description}`);
- tearDown();
- jsTest.log(`Finished Running Test: ${description}`);
- }
-
- test("Updating shard key in retryable write receives error on retry", () => {
+"use strict";
+
+load("jstests/libs/retryable_writes_util.js");
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+load('./jstests/libs/chunk_manipulation_util.js');
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+// For startParallelOps to write its state
+let staticMongod = MongoRunner.runMongod({});
+
+let st = new ShardingTest({shards: {rs0: {nodes: 2}, rs1: {nodes: 2}, rs2: {nodes: 2}}});
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+let testDB = st.s.getDB(dbName);
+let testColl = testDB.foo;
+
+// Create a sharded collection with three chunks:
+// [-inf, -10), [-10, 10), [10, inf)
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: -10}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 10}}));
+
+/**
+ * Sets up a test by moving chunks to such that one chunk is on each
+ * shard, with the following distribution:
+ * shard0: [-inf, -10)
+ * shard1: [-10, 10)
+ * shard2: [10, inf)
+ */
+function setUp() {
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {x: -100}, to: st.shard0.shardName}));
+ assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {x: 0}, to: st.shard1.shardName}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {x: 1000}, to: st.shard2.shardName}));
+
+ flushRoutersAndRefreshShardMetadata(st, {ns});
+}
+
+/**
+ * Tears down a test by dropping all documents from the test collection.
+ */
+function tearDown() {
+ assert.commandWorked(testColl.deleteMany({}));
+}
+
+/**
+ * Generic function to run a test. 'description' is a description of the test for logging
+ * purposes and 'testBody' is the test function.
+ */
+function test(description, testBody) {
+ jsTest.log(`Running Test Setup: ${description}`);
+ setUp();
+ jsTest.log(`Running Test Body: ${description}`);
+ testBody();
+ jsTest.log(`Running Test Tear-Down: ${description}`);
+ tearDown();
+ jsTest.log(`Finished Running Test: ${description}`);
+}
+
+test("Updating shard key in retryable write receives error on retry", () => {
+ const shardKeyValueOnShard0 = -100;
+ const shardKeyValueOnShard1 = 0;
+
+ // Insert a single document on shard 0.
+ testColl.insert({x: shardKeyValueOnShard0});
+
+ const cmdObj = {
+ update: collName,
+ updates: [
+ {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(35),
+ };
+
+ // Update the document shard key. The document should now be on shard 1.
+ const result = assert.commandWorked(testDB.runCommand(cmdObj));
+ assert.eq(result.n, 1);
+ assert.eq(result.nModified, 1);
+ assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
+
+ // Retry the command. This should retry against shard 0, which should throw
+ // IncompleteTransactionHistory.
+ assert.commandFailedWithCode(testDB.runCommand(cmdObj),
+ ErrorCodes.IncompleteTransactionHistory);
+});
+
+test(
+ "Updating shard key in retryable write receives error on retry when the original chunk has been migrated to a new shard",
+ () => {
+ const shardKeyValueOnShard0 = -100;
+ const shardKeyValueOnShard1 = 0;
+
+ // Insert a single document on shard 0.
+ testColl.insert({x: shardKeyValueOnShard0});
+
+ const cmdObj = {
+ update: collName,
+ updates: [
+ {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(35),
+ };
+
+ // Update the document shard key. The document should now be on shard 1.
+ const result = assert.commandWorked(testDB.runCommand(cmdObj));
+ assert.eq(result.n, 1);
+ assert.eq(result.nModified, 1);
+ assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
+
+ // Move the chunk that contained the original document to shard 1.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard1.shardName}));
+
+ // Retry the command. This should retry against shard 1, which should throw
+ // IncompleteTransactionHistory.
+ assert.commandFailedWithCode(testDB.runCommand(cmdObj),
+ ErrorCodes.IncompleteTransactionHistory);
+ });
+
+test(
+ "Updating shard key in retryable write receives error on retry when the original chunk has been migrated to a new shard and then to a third shard",
+ () => {
const shardKeyValueOnShard0 = -100;
const shardKeyValueOnShard1 = 0;
@@ -102,295 +167,228 @@
assert.eq(result.nModified, 1);
assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
- // Retry the command. This should retry against shard 0, which should throw
+ // Move the chunk that contained the original document to shard 1.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard1.shardName}));
+
+ // Then move the same chunk that contained the original document to shard 2.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard2.shardName}));
+
+ // Retry the command. This should retry against shard 1, which should throw
+ // IncompleteTransactionHistory.
+ assert.commandFailedWithCode(testDB.runCommand(cmdObj),
+ ErrorCodes.IncompleteTransactionHistory);
+ });
+
+test(
+ "Updating shard key in retryable write receives error on retry when the original chunk has been migrated to a shard without knowledge of the transaction",
+ () => {
+ const shardKeyValueOnShard0 = -100;
+ const shardKeyValueOnShard1 = 0;
+
+ // Insert a single document on shard 0.
+ testColl.insert({x: shardKeyValueOnShard0});
+
+ const cmdObj = {
+ update: collName,
+ updates: [
+ {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(35),
+ };
+
+ // Update the document shard key. The document should now be on shard 1.
+ const result = assert.commandWorked(testDB.runCommand(cmdObj));
+ assert.eq(result.n, 1);
+ assert.eq(result.nModified, 1);
+ assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
+
+ // Move the chunk that contained the original document to shard 2,
+ // which does not know about the tranasaction.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard2.shardName}));
+
+ // Retry the command. This should retry against shard 2, which should throw
+ // IncompleteTransactionHistory.
+ assert.commandFailedWithCode(testDB.runCommand(cmdObj),
+ ErrorCodes.IncompleteTransactionHistory);
+ });
+
+test(
+ "config.transactions entries for single-shard transactions which commit during transferMods phase are successfully migrated as dead-end sentinels",
+ () => {
+ const shardKeyValueOnShard0 = -100;
+ const anotherShardKeyValueOnShard0 = -101;
+ const shardKeyValueOnShard1 = 0;
+ const lsid = {id: UUID()};
+ const txnNumber = 35;
+
+ // Insert a single document on shard 0.
+ assert.commandWorked(testColl.insert({x: shardKeyValueOnShard0}));
+
+ const cmdToRunInTransaction = {
+ update: collName,
+ updates: [
+ // Add a new field.
+ {q: {x: shardKeyValueOnShard0}, u: {$set: {a: 4}}},
+ ],
+ ordered: false,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+ };
+
+ const fakeRetryCmd = {
+ update: collName,
+ updates: [
+ // Add a new field.
+ {q: {x: shardKeyValueOnShard0}, u: {$set: {a: 4}}},
+ ],
+ ordered: false,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber)
+ };
+
+ pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+ let joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s.host, {x: shardKeyValueOnShard0}, null, ns, st.shard1.shardName);
+
+ waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+ // Update a document being migrated.
+ const result = assert.commandWorked(testDB.runCommand(cmdToRunInTransaction));
+ assert.eq(result.n, 1);
+ assert.eq(result.nModified, 1);
+
+ assert.commandWorked(testDB.adminCommand({
+ commitTransaction: 1,
+ writeConcern: {w: "majority"},
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+ }));
+
+ // Check that the update from the transaction succeeded.
+ const resultingDoc = testColl.findOne({x: shardKeyValueOnShard0});
+ assert.neq(resultingDoc, null);
+ assert.eq(resultingDoc["a"], 4);
+
+ unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+ // Wait for moveChunk to complete
+ joinMoveChunk();
+
+ st.printShardingStatus();
+ // Retry the command. This should retry against shard 1, which should throw
+ // IncompleteTransactionHistory.
+ assert.commandFailedWithCode(testDB.runCommand(fakeRetryCmd),
+ ErrorCodes.IncompleteTransactionHistory);
+ });
+
+test(
+ "Update to shard key in retryable write during transferMods phase of chunk migration is migrated successfully to a node not involved in the shard key update",
+ () => {
+ const shardKeyValueOnShard0 = -100;
+ const shardKeyValueOnShard1 = 0;
+ const docId = 0;
+
+ // Insert a single document on shard 0.
+ assert.commandWorked(testColl.insert({_id: docId, x: shardKeyValueOnShard0}));
+
+ const cmdObj = {
+ update: collName,
+ updates: [
+ {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(35),
+ };
+
+ pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+ // We're going to do a shard key update to move a document from shard 0 to shard 1, so
+ // here we move the chunk from shard 0 to shard 2, which won't be involved in the
+ // transaction created by the shard key update.
+ let joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s.host, {x: shardKeyValueOnShard0}, null, ns, st.shard2.shardName);
+
+ waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+ // Update the document shard key so that the document will move from shard 0 to shard 1.
+ const result = assert.commandWorked(testDB.runCommand(cmdObj));
+ assert.eq(result.n, 1);
+ assert.eq(result.nModified, 1);
+ assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
+
+ unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+ // Wait for moveChunk to complete
+ joinMoveChunk();
+
+ st.printShardingStatus();
+ // Retry the command. This should retry against shard 2, which should throw
// IncompleteTransactionHistory.
assert.commandFailedWithCode(testDB.runCommand(cmdObj),
ErrorCodes.IncompleteTransactionHistory);
});
- test(
- "Updating shard key in retryable write receives error on retry when the original chunk has been migrated to a new shard",
- () => {
- const shardKeyValueOnShard0 = -100;
- const shardKeyValueOnShard1 = 0;
-
- // Insert a single document on shard 0.
- testColl.insert({x: shardKeyValueOnShard0});
-
- const cmdObj = {
- update: collName,
- updates: [
- {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(35),
- };
-
- // Update the document shard key. The document should now be on shard 1.
- const result = assert.commandWorked(testDB.runCommand(cmdObj));
- assert.eq(result.n, 1);
- assert.eq(result.nModified, 1);
- assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
-
- // Move the chunk that contained the original document to shard 1.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard1.shardName}));
-
- // Retry the command. This should retry against shard 1, which should throw
- // IncompleteTransactionHistory.
- assert.commandFailedWithCode(testDB.runCommand(cmdObj),
- ErrorCodes.IncompleteTransactionHistory);
-
- });
-
- test(
- "Updating shard key in retryable write receives error on retry when the original chunk has been migrated to a new shard and then to a third shard",
- () => {
- const shardKeyValueOnShard0 = -100;
- const shardKeyValueOnShard1 = 0;
-
- // Insert a single document on shard 0.
- testColl.insert({x: shardKeyValueOnShard0});
-
- const cmdObj = {
- update: collName,
- updates: [
- {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(35),
- };
-
- // Update the document shard key. The document should now be on shard 1.
- const result = assert.commandWorked(testDB.runCommand(cmdObj));
- assert.eq(result.n, 1);
- assert.eq(result.nModified, 1);
- assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
-
- // Move the chunk that contained the original document to shard 1.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard1.shardName}));
-
- // Then move the same chunk that contained the original document to shard 2.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard2.shardName}));
-
- // Retry the command. This should retry against shard 1, which should throw
- // IncompleteTransactionHistory.
- assert.commandFailedWithCode(testDB.runCommand(cmdObj),
- ErrorCodes.IncompleteTransactionHistory);
- });
-
- test(
- "Updating shard key in retryable write receives error on retry when the original chunk has been migrated to a shard without knowledge of the transaction",
- () => {
- const shardKeyValueOnShard0 = -100;
- const shardKeyValueOnShard1 = 0;
-
- // Insert a single document on shard 0.
- testColl.insert({x: shardKeyValueOnShard0});
-
- const cmdObj = {
- update: collName,
- updates: [
- {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(35),
- };
-
- // Update the document shard key. The document should now be on shard 1.
- const result = assert.commandWorked(testDB.runCommand(cmdObj));
- assert.eq(result.n, 1);
- assert.eq(result.nModified, 1);
- assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
-
- // Move the chunk that contained the original document to shard 2,
- // which does not know about the tranasaction.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: ns, find: {x: shardKeyValueOnShard0}, to: st.shard2.shardName}));
-
- // Retry the command. This should retry against shard 2, which should throw
- // IncompleteTransactionHistory.
- assert.commandFailedWithCode(testDB.runCommand(cmdObj),
- ErrorCodes.IncompleteTransactionHistory);
- });
-
- test(
- "config.transactions entries for single-shard transactions which commit during transferMods phase are successfully migrated as dead-end sentinels",
- () => {
- const shardKeyValueOnShard0 = -100;
- const anotherShardKeyValueOnShard0 = -101;
- const shardKeyValueOnShard1 = 0;
- const lsid = {id: UUID()};
- const txnNumber = 35;
-
- // Insert a single document on shard 0.
- assert.commandWorked(testColl.insert({x: shardKeyValueOnShard0}));
-
- const cmdToRunInTransaction = {
- update: collName,
- updates: [
- // Add a new field.
- {q: {x: shardKeyValueOnShard0}, u: {$set: {a: 4}}},
- ],
- ordered: false,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
- };
-
- const fakeRetryCmd = {
- update: collName,
- updates: [
- // Add a new field.
- {q: {x: shardKeyValueOnShard0}, u: {$set: {a: 4}}},
- ],
- ordered: false,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber)
- };
-
- pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- let joinMoveChunk = moveChunkParallel(
- staticMongod, st.s.host, {x: shardKeyValueOnShard0}, null, ns, st.shard1.shardName);
-
- waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // Update a document being migrated.
- const result = assert.commandWorked(testDB.runCommand(cmdToRunInTransaction));
- assert.eq(result.n, 1);
- assert.eq(result.nModified, 1);
-
- assert.commandWorked(testDB.adminCommand({
- commitTransaction: 1,
- writeConcern: {w: "majority"},
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }));
-
- // Check that the update from the transaction succeeded.
- const resultingDoc = testColl.findOne({x: shardKeyValueOnShard0});
- assert.neq(resultingDoc, null);
- assert.eq(resultingDoc["a"], 4);
-
- unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // Wait for moveChunk to complete
- joinMoveChunk();
-
- st.printShardingStatus();
- // Retry the command. This should retry against shard 1, which should throw
- // IncompleteTransactionHistory.
- assert.commandFailedWithCode(testDB.runCommand(fakeRetryCmd),
- ErrorCodes.IncompleteTransactionHistory);
- });
-
- test(
- "Update to shard key in retryable write during transferMods phase of chunk migration is migrated successfully to a node not involved in the shard key update",
- () => {
- const shardKeyValueOnShard0 = -100;
- const shardKeyValueOnShard1 = 0;
- const docId = 0;
-
- // Insert a single document on shard 0.
- assert.commandWorked(testColl.insert({_id: docId, x: shardKeyValueOnShard0}));
-
- const cmdObj = {
- update: collName,
- updates: [
- {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(35),
- };
-
- pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // We're going to do a shard key update to move a document from shard 0 to shard 1, so
- // here we move the chunk from shard 0 to shard 2, which won't be involved in the
- // transaction created by the shard key update.
- let joinMoveChunk = moveChunkParallel(
- staticMongod, st.s.host, {x: shardKeyValueOnShard0}, null, ns, st.shard2.shardName);
-
- waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // Update the document shard key so that the document will move from shard 0 to shard 1.
- const result = assert.commandWorked(testDB.runCommand(cmdObj));
- assert.eq(result.n, 1);
- assert.eq(result.nModified, 1);
- assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
-
- unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // Wait for moveChunk to complete
- joinMoveChunk();
-
- st.printShardingStatus();
- // Retry the command. This should retry against shard 2, which should throw
- // IncompleteTransactionHistory.
- assert.commandFailedWithCode(testDB.runCommand(cmdObj),
- ErrorCodes.IncompleteTransactionHistory);
- });
-
- // TODO (SERVER-40815) This test currently fails with DuplicateKeyError on _id.
- //
- // test(
- // "Update to shard key in retryable write during transfer mods phase of chunk migration is
- // migrated successfully ",
- // () => {
- // const shardKeyValueOnShard0 = -100;
- // const shardKeyValueOnShard1 = 0;
- // const docId = 0;
-
- // // Insert a single document on shard 0.
- // assert.commandWorked(testColl.insert({_id: docId, x: shardKeyValueOnShard0}));
-
- // const cmdObj = {
- // update: collName,
- // updates: [
- // {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
- // ],
- // ordered: false,
- // lsid: {id: UUID()},
- // txnNumber: NumberLong(35),
- // };
-
- // pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // let joinMoveChunk = moveChunkParallel(
- // staticMongod, st.s.host, {x: shardKeyValueOnShard0}, null, ns,
- // st.shard1.shardName);
-
- // waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // // Update the document shard key.
-
- // // THIS CURRENTLY FAILS WITH DuplicateKeyError on _id
- // const result = assert.commandWorked(testDB.runCommand(cmdObj));
- // assert.eq(result.n, 1);
- // assert.eq(result.nModified, 1);
- // assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
-
- // unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- // // Wait for moveChunk to complete
- // joinMoveChunk();
-
- // st.printShardingStatus();
- // // Retry the command. This should retry against shard 1, which should throw
- // // IncompleteTransactionHistory.
- // assert.commandFailedWithCode(testDB.runCommand(cmdObj),
- // ErrorCodes.IncompleteTransactionHistory);
- // });
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+// TODO (SERVER-40815) This test currently fails with DuplicateKeyError on _id.
+//
+// test(
+// "Update to shard key in retryable write during transfer mods phase of chunk migration is
+// migrated successfully ",
+// () => {
+// const shardKeyValueOnShard0 = -100;
+// const shardKeyValueOnShard1 = 0;
+// const docId = 0;
+
+// // Insert a single document on shard 0.
+// assert.commandWorked(testColl.insert({_id: docId, x: shardKeyValueOnShard0}));
+
+// const cmdObj = {
+// update: collName,
+// updates: [
+// {q: {x: shardKeyValueOnShard0}, u: {$set: {x: shardKeyValueOnShard1}}},
+// ],
+// ordered: false,
+// lsid: {id: UUID()},
+// txnNumber: NumberLong(35),
+// };
+
+// pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+// let joinMoveChunk = moveChunkParallel(
+// staticMongod, st.s.host, {x: shardKeyValueOnShard0}, null, ns,
+// st.shard1.shardName);
+
+// waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+// // Update the document shard key.
+
+// // THIS CURRENTLY FAILS WITH DuplicateKeyError on _id
+// const result = assert.commandWorked(testDB.runCommand(cmdObj));
+// assert.eq(result.n, 1);
+// assert.eq(result.nModified, 1);
+// assert.eq(testColl.find({x: shardKeyValueOnShard1}).itcount(), 1);
+
+// unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+// // Wait for moveChunk to complete
+// joinMoveChunk();
+
+// st.printShardingStatus();
+// // Retry the command. This should retry against shard 1, which should throw
+// // IncompleteTransactionHistory.
+// assert.commandFailedWithCode(testDB.runCommand(cmdObj),
+// ErrorCodes.IncompleteTransactionHistory);
+// });
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/move_chunk_update_with_write_retryability.js b/jstests/sharding/move_chunk_update_with_write_retryability.js
index 03748a56c20..b7d0ddae5d3 100644
--- a/jstests/sharding/move_chunk_update_with_write_retryability.js
+++ b/jstests/sharding/move_chunk_update_with_write_retryability.js
@@ -1,58 +1,58 @@
load("jstests/sharding/move_chunk_with_session_helper.js");
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- // Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
- // secondary to get elected, so we don't give it a zero priority.
- var st = new ShardingTest({
- mongos: 2,
- shards: {
- rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
- rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
- }
- });
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
+// Prevent unnecessary elections in the first shard replica set. Shard 'rs1' shard will need its
+// secondary to get elected, so we don't give it a zero priority.
+var st = new ShardingTest({
+ mongos: 2,
+ shards: {
+ rs0: {nodes: [{rsConfig: {}}, {rsConfig: {priority: 0}}]},
+ rs1: {nodes: [{rsConfig: {}}, {rsConfig: {}}]}
+ }
+});
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
- var coll = 'update';
- var cmd = {
- update: 'update',
- updates: [
- {q: {x: 10}, u: {$inc: {a: 1}}}, // in place
- {q: {x: 20}, u: {$inc: {b: 1}}, upsert: true},
- {q: {x: 30}, u: {x: 30, z: 1}} // replacement
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(35),
- };
- var setup = function(coll) {
- coll.insert({x: 10});
- coll.insert({x: 30});
- };
- var checkRetryResult = function(result, retryResult) {
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.nModified, retryResult.nModified);
- assert.eq(result.upserted, retryResult.upserted);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
- };
- var checkDocuments = function(coll) {
- assert.eq(1, coll.findOne({x: 10}).a);
- assert.eq(1, coll.findOne({x: 20}).b);
- assert.eq(1, coll.findOne({x: 30}).z);
- };
+var coll = 'update';
+var cmd = {
+ update: 'update',
+ updates: [
+ {q: {x: 10}, u: {$inc: {a: 1}}}, // in place
+ {q: {x: 20}, u: {$inc: {b: 1}}, upsert: true},
+ {q: {x: 30}, u: {x: 30, z: 1}} // replacement
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(35),
+};
+var setup = function(coll) {
+ coll.insert({x: 10});
+ coll.insert({x: 30});
+};
+var checkRetryResult = function(result, retryResult) {
+ assert.eq(result.ok, retryResult.ok);
+ assert.eq(result.n, retryResult.n);
+ assert.eq(result.nModified, retryResult.nModified);
+ assert.eq(result.upserted, retryResult.upserted);
+ assert.eq(result.writeErrors, retryResult.writeErrors);
+ assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+};
+var checkDocuments = function(coll) {
+ assert.eq(1, coll.findOne({x: 10}).a);
+ assert.eq(1, coll.findOne({x: 20}).b);
+ assert.eq(1, coll.findOne({x: 30}).z);
+};
- testMoveChunkWithSession(st, coll, cmd, setup, checkRetryResult, checkDocuments);
+testMoveChunkWithSession(st, coll, cmd, setup, checkRetryResult, checkDocuments);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/move_chunk_wc.js b/jstests/sharding/move_chunk_wc.js
index 601b327b76e..7dc75204d19 100644
--- a/jstests/sharding/move_chunk_wc.js
+++ b/jstests/sharding/move_chunk_wc.js
@@ -10,92 +10,107 @@
load('jstests/libs/write_concern_util.js');
(function() {
- "use strict";
- var st = new ShardingTest({
- shards: {
- rs0: {nodes: 3, settings: {chainingAllowed: false}},
- rs1: {nodes: 5, settings: {chainingAllowed: false}}
- },
- mongos: 1,
- config: 1,
- configReplSetTestOptions: {settings: {chainingAllowed: false}}
- });
+"use strict";
+var st = new ShardingTest({
+ shards: {
+ rs0: {nodes: 3, settings: {chainingAllowed: false}},
+ rs1: {nodes: 5, settings: {chainingAllowed: false}}
+ },
+ mongos: 1,
+ config: 1,
+ configReplSetTestOptions: {settings: {chainingAllowed: false}}
+});
- var mongos = st.s;
- var dbName = "move-chunk-wc-test";
- var db = mongos.getDB(dbName);
- var collName = 'leaves';
- var coll = db[collName];
- var numberDoc = 20;
- var s0 = st.shard0.shardName;
- var s1 = st.shard1.shardName;
+var mongos = st.s;
+var dbName = "move-chunk-wc-test";
+var db = mongos.getDB(dbName);
+var collName = 'leaves';
+var coll = db[collName];
+var numberDoc = 20;
+var s0 = st.shard0.shardName;
+var s1 = st.shard1.shardName;
- coll.ensureIndex({x: 1}, {unique: true});
- st.ensurePrimaryShard(db.toString(), s0);
- st.shardColl(collName, {x: 1}, {x: numberDoc / 2}, {x: numberDoc / 2}, db.toString(), true);
+coll.ensureIndex({x: 1}, {unique: true});
+st.ensurePrimaryShard(db.toString(), s0);
+st.shardColl(collName, {x: 1}, {x: numberDoc / 2}, {x: numberDoc / 2}, db.toString(), true);
- for (var i = 0; i < numberDoc; i++) {
- coll.insert({x: i});
- }
- assert.eq(coll.count(), numberDoc);
+for (var i = 0; i < numberDoc; i++) {
+ coll.insert({x: i});
+}
+assert.eq(coll.count(), numberDoc);
- // Checks that each shard has the expected number of chunks.
- function checkChunkCount(s0Count, s1Count) {
- var chunkCounts = st.chunkCounts(collName, db.toString());
- assert.eq(chunkCounts[s0], s0Count);
- assert.eq(chunkCounts[s1], s1Count);
- }
- checkChunkCount(1, 1);
+// Checks that each shard has the expected number of chunks.
+function checkChunkCount(s0Count, s1Count) {
+ var chunkCounts = st.chunkCounts(collName, db.toString());
+ assert.eq(chunkCounts[s0], s0Count);
+ assert.eq(chunkCounts[s1], s1Count);
+}
+checkChunkCount(1, 1);
- var req = {
- moveChunk: coll.toString(),
- find: {x: numberDoc / 2},
- to: s0,
- _secondaryThrottle: true,
- _waitForDelete: true
- };
+var req = {
+ moveChunk: coll.toString(),
+ find: {x: numberDoc / 2},
+ to: s0,
+ _secondaryThrottle: true,
+ _waitForDelete: true
+};
- req.writeConcern = {w: 1, wtimeout: 30000};
- jsTest.log("Testing " + tojson(req));
- var res = db.adminCommand(req);
- assert.commandWorked(res);
- assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
- checkChunkCount(2, 0);
+req.writeConcern = {
+ w: 1,
+ wtimeout: 30000
+};
+jsTest.log("Testing " + tojson(req));
+var res = db.adminCommand(req);
+assert.commandWorked(res);
+assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
+checkChunkCount(2, 0);
- // This should pass because w: majority is always passed to config servers.
- req.writeConcern = {w: 2, wtimeout: 30000};
- jsTest.log("Testing " + tojson(req));
- req.to = s1;
- res = db.adminCommand(req);
- assert.commandWorked(res);
- assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
- checkChunkCount(1, 1);
+// This should pass because w: majority is always passed to config servers.
+req.writeConcern = {
+ w: 2,
+ wtimeout: 30000
+};
+jsTest.log("Testing " + tojson(req));
+req.to = s1;
+res = db.adminCommand(req);
+assert.commandWorked(res);
+assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
+checkChunkCount(1, 1);
- // This should fail because the writeConcern cannot be satisfied on the to shard.
- req.writeConcern = {w: 4, wtimeout: 3000};
- jsTest.log("Testing " + tojson(req));
- req.to = s0;
- res = db.adminCommand(req);
- assert.commandFailed(res);
- assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
- checkChunkCount(1, 1);
+// This should fail because the writeConcern cannot be satisfied on the to shard.
+req.writeConcern = {
+ w: 4,
+ wtimeout: 3000
+};
+jsTest.log("Testing " + tojson(req));
+req.to = s0;
+res = db.adminCommand(req);
+assert.commandFailed(res);
+assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
+checkChunkCount(1, 1);
- // This should fail because the writeConcern cannot be satisfied on the from shard.
- req.writeConcern = {w: 6, wtimeout: 3000};
- jsTest.log("Testing " + tojson(req));
- req.to = s0;
- res = db.adminCommand(req);
- assert.commandFailed(res);
- assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
- checkChunkCount(1, 1);
+// This should fail because the writeConcern cannot be satisfied on the from shard.
+req.writeConcern = {
+ w: 6,
+ wtimeout: 3000
+};
+jsTest.log("Testing " + tojson(req));
+req.to = s0;
+res = db.adminCommand(req);
+assert.commandFailed(res);
+assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
+checkChunkCount(1, 1);
- // This should fail because the writeConcern is invalid and cannot be satisfied anywhere.
- req.writeConcern = {w: "invalid", wtimeout: 3000};
- jsTest.log("Testing " + tojson(req));
- req.to = s0;
- res = db.adminCommand(req);
- assert.commandFailed(res);
- assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
- checkChunkCount(1, 1);
- st.stop();
+// This should fail because the writeConcern is invalid and cannot be satisfied anywhere.
+req.writeConcern = {
+ w: "invalid",
+ wtimeout: 3000
+};
+jsTest.log("Testing " + tojson(req));
+req.to = s0;
+res = db.adminCommand(req);
+assert.commandFailed(res);
+assert(!res.writeConcernError, 'moveChunk had writeConcernError: ' + tojson(res));
+checkChunkCount(1, 1);
+st.stop();
})();
diff --git a/jstests/sharding/move_primary_basic.js b/jstests/sharding/move_primary_basic.js
index 477d4732b20..dbfcb88d492 100644
--- a/jstests/sharding/move_primary_basic.js
+++ b/jstests/sharding/move_primary_basic.js
@@ -3,64 +3,63 @@
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({mongos: 1, shards: 2});
- var mongos = st.s0;
+var st = new ShardingTest({mongos: 1, shards: 2});
+var mongos = st.s0;
- var kDbName = 'db';
+var kDbName = 'db';
- var shard0 = st.shard0.shardName;
- var shard1 = st.shard1.shardName;
+var shard0 = st.shard0.shardName;
+var shard1 = st.shard1.shardName;
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, shard0);
- assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, shard0);
+assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
- // Can run only against the admin database.
- assert.commandFailedWithCode(
- mongos.getDB('test').runCommand({movePrimary: kDbName, to: shard0}),
- ErrorCodes.Unauthorized);
+// Can run only against the admin database.
+assert.commandFailedWithCode(mongos.getDB('test').runCommand({movePrimary: kDbName, to: shard0}),
+ ErrorCodes.Unauthorized);
- // Can't movePrimary for 'config' database.
- assert.commandFailed(mongos.adminCommand({movePrimary: 'config', to: shard0}));
+// Can't movePrimary for 'config' database.
+assert.commandFailed(mongos.adminCommand({movePrimary: 'config', to: shard0}));
- // Can't movePrimary for 'local' database.
- assert.commandFailed(mongos.adminCommand({movePrimary: 'local', to: shard0}));
+// Can't movePrimary for 'local' database.
+assert.commandFailed(mongos.adminCommand({movePrimary: 'local', to: shard0}));
- // Can't movePrimary for 'admin' database.
- assert.commandFailed(mongos.adminCommand({movePrimary: 'admin', to: shard0}));
+// Can't movePrimary for 'admin' database.
+assert.commandFailed(mongos.adminCommand({movePrimary: 'admin', to: shard0}));
- // Can't movePrimary for invalid db name.
- assert.commandFailed(mongos.adminCommand({movePrimary: 'a.b', to: shard0}));
- assert.commandFailed(mongos.adminCommand({movePrimary: '', to: shard0}));
+// Can't movePrimary for invalid db name.
+assert.commandFailed(mongos.adminCommand({movePrimary: 'a.b', to: shard0}));
+assert.commandFailed(mongos.adminCommand({movePrimary: '', to: shard0}));
- // Fail if 'to' shard does not exist or empty.
- assert.commandFailed(mongos.adminCommand({movePrimary: kDbName, to: 'Unknown'}));
- assert.commandFailed(mongos.adminCommand({movePrimary: kDbName, to: ''}));
- assert.commandFailed(mongos.adminCommand({movePrimary: kDbName}));
+// Fail if 'to' shard does not exist or empty.
+assert.commandFailed(mongos.adminCommand({movePrimary: kDbName, to: 'Unknown'}));
+assert.commandFailed(mongos.adminCommand({movePrimary: kDbName, to: ''}));
+assert.commandFailed(mongos.adminCommand({movePrimary: kDbName}));
- let versionBeforeMovePrimary = mongos.getDB('config').databases.findOne({_id: kDbName}).version;
+let versionBeforeMovePrimary = mongos.getDB('config').databases.findOne({_id: kDbName}).version;
- // Succeed if 'to' shard exists and verify metadata changes.
- assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
- assert.commandWorked(mongos.adminCommand({movePrimary: kDbName, to: shard1}));
- assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+// Succeed if 'to' shard exists and verify metadata changes.
+assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+assert.commandWorked(mongos.adminCommand({movePrimary: kDbName, to: shard1}));
+assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
- assert.eq(versionBeforeMovePrimary.lastMod + 1,
- mongos.getDB('config').databases.findOne({_id: kDbName}).version.lastMod);
- assert.eq(versionBeforeMovePrimary.uuid,
- mongos.getDB('config').databases.findOne({_id: kDbName}).version.uuid);
+assert.eq(versionBeforeMovePrimary.lastMod + 1,
+ mongos.getDB('config').databases.findOne({_id: kDbName}).version.lastMod);
+assert.eq(versionBeforeMovePrimary.uuid,
+ mongos.getDB('config').databases.findOne({_id: kDbName}).version.uuid);
- // Succeed if 'to' shard is already the primary shard for the db.
- assert.commandWorked(mongos.adminCommand({movePrimary: kDbName, to: shard1}));
- assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+// Succeed if 'to' shard is already the primary shard for the db.
+assert.commandWorked(mongos.adminCommand({movePrimary: kDbName, to: shard1}));
+assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
- // Verify the version doesn't change if the 'to' shard is already the primary shard.
- assert.eq(versionBeforeMovePrimary.lastMod + 1,
- mongos.getDB('config').databases.findOne({_id: kDbName}).version.lastMod);
- assert.eq(versionBeforeMovePrimary.uuid,
- mongos.getDB('config').databases.findOne({_id: kDbName}).version.uuid);
+// Verify the version doesn't change if the 'to' shard is already the primary shard.
+assert.eq(versionBeforeMovePrimary.lastMod + 1,
+ mongos.getDB('config').databases.findOne({_id: kDbName}).version.lastMod);
+assert.eq(versionBeforeMovePrimary.uuid,
+ mongos.getDB('config').databases.findOne({_id: kDbName}).version.uuid);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/move_primary_clone_test.js b/jstests/sharding/move_primary_clone_test.js
index 6c30ff56b46..6baa283405a 100644
--- a/jstests/sharding/move_primary_clone_test.js
+++ b/jstests/sharding/move_primary_clone_test.js
@@ -1,218 +1,213 @@
(function() {
- 'use strict';
-
- function sortByName(a, b) {
- if (a.name < b.name)
- return -1;
- if (a.name > b.name)
- return 1;
- return 0;
- }
+'use strict';
- function checkCollectionsCopiedCorrectly(fromShard, toShard, sharded, barUUID, fooUUID) {
- var res = toShard.getDB("test1").runCommand({listCollections: 1});
- assert.commandWorked(res);
+function sortByName(a, b) {
+ if (a.name < b.name)
+ return -1;
+ if (a.name > b.name)
+ return 1;
+ return 0;
+}
- var collections = res.cursor.firstBatch;
-
- // Sort collections by name.
- collections.sort(sortByName);
- assert.eq(collections.length, 2);
-
- var c1, c2;
- [c1, c2] = collections;
-
- function checkName(c, expectedName) {
- assert.eq(c.name,
- expectedName,
- 'Expected collection to be ' + expectedName + ', got ' + c.name);
- }
-
- function checkOptions(c, expectedOptions) {
- assert.hasFields(c, ['options'], 'Missing options field for collection ' + c.name);
- assert.hasFields(
- c.options, expectedOptions, 'Missing expected option(s) for collection ' + c.name);
- }
-
- function checkUUIDsEqual(c, expectedUUID) {
- assert.hasFields(c, ['info'], 'Missing info field for collection ' + c.name);
- assert.hasFields(c.info, ['uuid'], 'Missing uuid field for collection ' + c.name);
- assert.eq(c.info.uuid, expectedUUID, 'Incorrect uuid for collection ' + c.name);
- }
-
- function checkUUIDsNotEqual(c, originalUUID) {
- assert.hasFields(c, ['info'], 'Missing info field for collection ' + c.name);
- assert.hasFields(c.info, ['uuid'], 'Missing uuid field for collection ' + c.name);
- assert.neq(c.info.uuid,
- originalUUID,
- 'UUID for ' + c.name +
- ' should be different than the original collection but is the same');
- }
-
- function checkIndexes(collName, expectedIndexes) {
- var res = toShard.getDB('test1').runCommand({listIndexes: collName});
- assert.commandWorked(res, 'Failed to get indexes for collection ' + collName);
- var indexes = res.cursor.firstBatch;
- indexes.sort(sortByName);
-
- assert.eq(indexes.length, 2);
-
- indexes.forEach((index, i) => {
- var expected;
- if (i == 0) {
- expected = {name: "_id_", key: {_id: 1}};
- } else {
- expected = expectedIndexes[i - 1];
- }
- Object.keys(expected).forEach(k => {
- assert.eq(index[k], expected[k]);
- });
- });
- }
-
- function checkCount(shard, collName, count) {
- var res = shard.getDB('test1').runCommand({count: collName});
- assert.commandWorked(res);
- assert.eq(res.n, count);
- }
-
- checkName(c1, 'bar');
- checkName(c2, 'foo');
- checkOptions(c1, Object.keys(barOptions));
- checkIndexes('bar', barIndexes);
- checkOptions(c2, Object.keys(fooOptions));
- checkIndexes('foo', fooIndexes);
-
- if (sharded) {
- checkCount(fromShard, 'foo', 3);
- checkCount(fromShard, 'bar', 3);
- checkCount(toShard, 'foo', 0);
- checkCount(toShard, 'bar', 0);
-
- // UUIDs should be the same as the original
- checkUUIDsEqual(c1, barUUID);
- checkUUIDsEqual(c2, fooUUID);
- } else {
- checkCount(toShard, 'foo', 3);
- checkCount(toShard, 'bar', 3);
- checkCount(fromShard, 'foo', 0);
- checkCount(fromShard, 'bar', 0);
-
- // UUIDs should not be the same as the original
- checkUUIDsNotEqual(c1, barUUID);
- checkUUIDsNotEqual(c2, fooUUID);
- }
- }
+function checkCollectionsCopiedCorrectly(fromShard, toShard, sharded, barUUID, fooUUID) {
+ var res = toShard.getDB("test1").runCommand({listCollections: 1});
+ assert.commandWorked(res);
- function createCollections(sharded) {
- assert.commandWorked(st.getDB('test1').runCommand({dropDatabase: 1}));
- var db = st.getDB('test1');
+ var collections = res.cursor.firstBatch;
- assert.commandWorked(db.createCollection('foo', fooOptions));
- assert.commandWorked(db.createCollection('bar', barOptions));
+ // Sort collections by name.
+ collections.sort(sortByName);
+ assert.eq(collections.length, 2);
- for (let i = 0; i < 3; i++) {
- assert.writeOK(db.foo.insert({a: i}));
- assert.writeOK(db.bar.insert({a: i}));
- }
- assert.eq(3, db.foo.count());
- assert.eq(3, db.bar.count());
+ var c1, c2;
+ [c1, c2] = collections;
- assert.commandWorked(db.runCommand({createIndexes: 'foo', indexes: fooIndexes}));
- assert.commandWorked(db.runCommand({createIndexes: 'bar', indexes: barIndexes}));
+ function checkName(c, expectedName) {
+ assert.eq(
+ c.name, expectedName, 'Expected collection to be ' + expectedName + ', got ' + c.name);
+ }
- if (sharded) {
- assert.commandWorked(db.adminCommand({enableSharding: 'test1'}));
- assert.commandWorked(db.adminCommand({shardCollection: 'test1.foo', key: {_id: 1}}));
- assert.commandWorked(db.adminCommand({shardCollection: 'test1.bar', key: {_id: 1}}));
- }
+ function checkOptions(c, expectedOptions) {
+ assert.hasFields(c, ['options'], 'Missing options field for collection ' + c.name);
+ assert.hasFields(
+ c.options, expectedOptions, 'Missing expected option(s) for collection ' + c.name);
}
- function movePrimaryWithFailpoint(sharded) {
- var db = st.getDB('test1');
- createCollections(sharded);
+ function checkUUIDsEqual(c, expectedUUID) {
+ assert.hasFields(c, ['info'], 'Missing info field for collection ' + c.name);
+ assert.hasFields(c.info, ['uuid'], 'Missing uuid field for collection ' + c.name);
+ assert.eq(c.info.uuid, expectedUUID, 'Incorrect uuid for collection ' + c.name);
+ }
- var fromShard = st.getPrimaryShard('test1');
- var toShard = st.getOther(fromShard);
+ function checkUUIDsNotEqual(c, originalUUID) {
+ assert.hasFields(c, ['info'], 'Missing info field for collection ' + c.name);
+ assert.hasFields(c.info, ['uuid'], 'Missing uuid field for collection ' + c.name);
+ assert.neq(c.info.uuid,
+ originalUUID,
+ 'UUID for ' + c.name +
+ ' should be different than the original collection but is the same');
+ }
- assert.eq(
- 3, fromShard.getDB("test1").foo.count(), "from shard doesn't have data before move");
- assert.eq(0, toShard.getDB("test1").foo.count(), "to shard has data before move");
- assert.eq(
- 3, fromShard.getDB("test1").bar.count(), "from shard doesn't have data before move");
- assert.eq(0, toShard.getDB("test1").bar.count(), "to shard has data before move");
+ function checkIndexes(collName, expectedIndexes) {
+ var res = toShard.getDB('test1').runCommand({listIndexes: collName});
+ assert.commandWorked(res, 'Failed to get indexes for collection ' + collName);
+ var indexes = res.cursor.firstBatch;
+ indexes.sort(sortByName);
+
+ assert.eq(indexes.length, 2);
+
+ indexes.forEach((index, i) => {
+ var expected;
+ if (i == 0) {
+ expected = {name: "_id_", key: {_id: 1}};
+ } else {
+ expected = expectedIndexes[i - 1];
+ }
+ Object.keys(expected).forEach(k => {
+ assert.eq(index[k], expected[k]);
+ });
+ });
+ }
+
+ function checkCount(shard, collName, count) {
+ var res = shard.getDB('test1').runCommand({count: collName});
+ assert.commandWorked(res);
+ assert.eq(res.n, count);
+ }
- var listCollsFrom = fromShard.getDB("test1").runCommand({listCollections: 1});
- var fromColls = listCollsFrom.cursor.firstBatch;
- fromColls.sort(sortByName);
- var baruuid = fromColls[0].info.uuid;
- var foouuid = fromColls[1].info.uuid;
+ checkName(c1, 'bar');
+ checkName(c2, 'foo');
+ checkOptions(c1, Object.keys(barOptions));
+ checkIndexes('bar', barIndexes);
+ checkOptions(c2, Object.keys(fooOptions));
+ checkIndexes('foo', fooIndexes);
+
+ if (sharded) {
+ checkCount(fromShard, 'foo', 3);
+ checkCount(fromShard, 'bar', 3);
+ checkCount(toShard, 'foo', 0);
+ checkCount(toShard, 'bar', 0);
+
+ // UUIDs should be the same as the original
+ checkUUIDsEqual(c1, barUUID);
+ checkUUIDsEqual(c2, fooUUID);
+ } else {
+ checkCount(toShard, 'foo', 3);
+ checkCount(toShard, 'bar', 3);
+ checkCount(fromShard, 'foo', 0);
+ checkCount(fromShard, 'bar', 0);
+
+ // UUIDs should not be the same as the original
+ checkUUIDsNotEqual(c1, barUUID);
+ checkUUIDsNotEqual(c2, fooUUID);
+ }
+}
- assert.commandWorked(toShard.getDB("admin").runCommand(
- {configureFailPoint: 'movePrimaryFailPoint', mode: 'alwaysOn'}));
+function createCollections(sharded) {
+ assert.commandWorked(st.getDB('test1').runCommand({dropDatabase: 1}));
+ var db = st.getDB('test1');
- // Failpoint will cause movePrimary to fail after the first collection has been copied over
- assert.commandFailed(st.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
+ assert.commandWorked(db.createCollection('foo', fooOptions));
+ assert.commandWorked(db.createCollection('bar', barOptions));
- assert.commandWorked(toShard.getDB("admin").runCommand(
- {configureFailPoint: 'movePrimaryFailPoint', mode: 'off'}));
-
- if (sharded) {
- // If the collections are sharded, the UUID of the collection on the donor should be
- // copied over and the options should be the same so retrying the move should succeed.
- assert.commandWorked(st.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
-
- checkCollectionsCopiedCorrectly(fromShard, toShard, sharded, baruuid, foouuid);
-
- // Now change an option on the toShard, and verify that calling clone again fails if
- // the options don't match.
- assert.commandWorked(
- toShard.getDB('test1').runCommand({collMod: 'bar', validationLevel: 'moderate'}));
- assert.commandFailed(st.s0.adminCommand({movePrimary: "test1", to: fromShard.name}));
- } else {
- // If the collections are unsharded, we should fail when any collections being copied
- // exist on the target shard.
- assert.commandFailed(st.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
- }
+ for (let i = 0; i < 3; i++) {
+ assert.writeOK(db.foo.insert({a: i}));
+ assert.writeOK(db.bar.insert({a: i}));
}
+ assert.eq(3, db.foo.count());
+ assert.eq(3, db.bar.count());
- function movePrimaryNoFailpoint(sharded) {
- var db = st.getDB('test1');
- createCollections(sharded);
+ assert.commandWorked(db.runCommand({createIndexes: 'foo', indexes: fooIndexes}));
+ assert.commandWorked(db.runCommand({createIndexes: 'bar', indexes: barIndexes}));
- var fromShard = st.getPrimaryShard('test1');
- var toShard = st.getOther(fromShard);
+ if (sharded) {
+ assert.commandWorked(db.adminCommand({enableSharding: 'test1'}));
+ assert.commandWorked(db.adminCommand({shardCollection: 'test1.foo', key: {_id: 1}}));
+ assert.commandWorked(db.adminCommand({shardCollection: 'test1.bar', key: {_id: 1}}));
+ }
+}
- assert.eq(
- 3, fromShard.getDB("test1").foo.count(), "from shard doesn't have data before move");
- assert.eq(0, toShard.getDB("test1").foo.count(), "to shard has data before move");
- assert.eq(
- 3, fromShard.getDB("test1").bar.count(), "from shard doesn't have data before move");
- assert.eq(0, toShard.getDB("test1").bar.count(), "to shard has data before move");
+function movePrimaryWithFailpoint(sharded) {
+ var db = st.getDB('test1');
+ createCollections(sharded);
+
+ var fromShard = st.getPrimaryShard('test1');
+ var toShard = st.getOther(fromShard);
+
+ assert.eq(3, fromShard.getDB("test1").foo.count(), "from shard doesn't have data before move");
+ assert.eq(0, toShard.getDB("test1").foo.count(), "to shard has data before move");
+ assert.eq(3, fromShard.getDB("test1").bar.count(), "from shard doesn't have data before move");
+ assert.eq(0, toShard.getDB("test1").bar.count(), "to shard has data before move");
+
+ var listCollsFrom = fromShard.getDB("test1").runCommand({listCollections: 1});
+ var fromColls = listCollsFrom.cursor.firstBatch;
+ fromColls.sort(sortByName);
+ var baruuid = fromColls[0].info.uuid;
+ var foouuid = fromColls[1].info.uuid;
+
+ assert.commandWorked(toShard.getDB("admin").runCommand(
+ {configureFailPoint: 'movePrimaryFailPoint', mode: 'alwaysOn'}));
+
+ // Failpoint will cause movePrimary to fail after the first collection has been copied over
+ assert.commandFailed(st.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
- var listCollsFrom = fromShard.getDB("test1").runCommand({listCollections: 1});
- var fromColls = listCollsFrom.cursor.firstBatch;
- fromColls.sort(sortByName);
- var baruuid = fromColls[0].info.uuid;
- var foouuid = fromColls[1].info.uuid;
+ assert.commandWorked(toShard.getDB("admin").runCommand(
+ {configureFailPoint: 'movePrimaryFailPoint', mode: 'off'}));
+ if (sharded) {
+ // If the collections are sharded, the UUID of the collection on the donor should be
+ // copied over and the options should be the same so retrying the move should succeed.
assert.commandWorked(st.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
checkCollectionsCopiedCorrectly(fromShard, toShard, sharded, baruuid, foouuid);
+
+ // Now change an option on the toShard, and verify that calling clone again fails if
+ // the options don't match.
+ assert.commandWorked(
+ toShard.getDB('test1').runCommand({collMod: 'bar', validationLevel: 'moderate'}));
+ assert.commandFailed(st.s0.adminCommand({movePrimary: "test1", to: fromShard.name}));
+ } else {
+ // If the collections are unsharded, we should fail when any collections being copied
+ // exist on the target shard.
+ assert.commandFailed(st.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
}
+}
+
+function movePrimaryNoFailpoint(sharded) {
+ var db = st.getDB('test1');
+ createCollections(sharded);
+
+ var fromShard = st.getPrimaryShard('test1');
+ var toShard = st.getOther(fromShard);
+
+ assert.eq(3, fromShard.getDB("test1").foo.count(), "from shard doesn't have data before move");
+ assert.eq(0, toShard.getDB("test1").foo.count(), "to shard has data before move");
+ assert.eq(3, fromShard.getDB("test1").bar.count(), "from shard doesn't have data before move");
+ assert.eq(0, toShard.getDB("test1").bar.count(), "to shard has data before move");
+
+ var listCollsFrom = fromShard.getDB("test1").runCommand({listCollections: 1});
+ var fromColls = listCollsFrom.cursor.firstBatch;
+ fromColls.sort(sortByName);
+ var baruuid = fromColls[0].info.uuid;
+ var foouuid = fromColls[1].info.uuid;
+
+ assert.commandWorked(st.s0.adminCommand({movePrimary: "test1", to: toShard.name}));
+
+ checkCollectionsCopiedCorrectly(fromShard, toShard, sharded, baruuid, foouuid);
+}
- var st = new ShardingTest({shards: 2});
+var st = new ShardingTest({shards: 2});
- var fooOptions = {validationLevel: "off"};
- var barOptions = {validator: {$jsonSchema: {required: ['a']}}};
+var fooOptions = {validationLevel: "off"};
+var barOptions = {validator: {$jsonSchema: {required: ['a']}}};
- var fooIndexes = [{key: {a: 1}, name: 'index1', expireAfterSeconds: 5000}];
- var barIndexes = [{key: {a: -1}, name: 'index2'}];
+var fooIndexes = [{key: {a: 1}, name: 'index1', expireAfterSeconds: 5000}];
+var barIndexes = [{key: {a: -1}, name: 'index2'}];
- movePrimaryWithFailpoint(true);
- movePrimaryWithFailpoint(false);
- movePrimaryNoFailpoint(true);
- movePrimaryNoFailpoint(false);
+movePrimaryWithFailpoint(true);
+movePrimaryWithFailpoint(false);
+movePrimaryNoFailpoint(true);
+movePrimaryNoFailpoint(false);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/move_primary_fails_without_database_version.js b/jstests/sharding/move_primary_fails_without_database_version.js
index cf03d5e1cfe..27b447efdc4 100644
--- a/jstests/sharding/move_primary_fails_without_database_version.js
+++ b/jstests/sharding/move_primary_fails_without_database_version.js
@@ -1,19 +1,17 @@
// Tests that a movePrimary will fail if the database doesn't have a version in config.databases
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
+const dbName = "test";
- const st = new ShardingTest({shards: 2});
+const st = new ShardingTest({shards: 2});
- assert.commandWorked(st.s.getDB("config").getCollection("databases").insert({
- _id: dbName,
- partitioned: false,
- primary: st.shard0.shardName
- }));
+assert.commandWorked(st.s.getDB("config")
+ .getCollection("databases")
+ .insert({_id: dbName, partitioned: false, primary: st.shard0.shardName}));
- assert.commandFailedWithCode(st.s.adminCommand({movePrimary: dbName, to: st.shard1.shardName}),
- ErrorCodes.InternalError);
+assert.commandFailedWithCode(st.s.adminCommand({movePrimary: dbName, to: st.shard1.shardName}),
+ ErrorCodes.InternalError);
- st.stop();
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/movechunk_commit_changelog_stats.js b/jstests/sharding/movechunk_commit_changelog_stats.js
index cff5ae2a445..0ec7e9261c1 100644
--- a/jstests/sharding/movechunk_commit_changelog_stats.js
+++ b/jstests/sharding/movechunk_commit_changelog_stats.js
@@ -3,38 +3,38 @@
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({mongos: 1, shards: 2});
- var kDbName = 'db';
+var st = new ShardingTest({mongos: 1, shards: 2});
+var kDbName = 'db';
- var mongos = st.s0;
- var shard0 = st.shard0.shardName;
- var shard1 = st.shard1.shardName;
+var mongos = st.s0;
+var shard0 = st.shard0.shardName;
+var shard1 = st.shard1.shardName;
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, shard0);
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, shard0);
- function assertCountsInChangelog() {
- let changeLog = st.s.getDB('config').changelog.find({what: 'moveChunk.commit'}).toArray();
- assert.gt(changeLog.length, 0);
- for (let i = 0; i < changeLog.length; i++) {
- assert(changeLog[i].details.hasOwnProperty('counts'));
- }
+function assertCountsInChangelog() {
+ let changeLog = st.s.getDB('config').changelog.find({what: 'moveChunk.commit'}).toArray();
+ assert.gt(changeLog.length, 0);
+ for (let i = 0; i < changeLog.length; i++) {
+ assert(changeLog[i].details.hasOwnProperty('counts'));
}
+}
- var ns = kDbName + '.fooHashed';
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 'hashed'}}));
+var ns = kDbName + '.fooHashed';
+assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 'hashed'}}));
- var aChunk = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0});
- assert(aChunk);
+var aChunk = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0});
+assert(aChunk);
- // Assert counts field exists in the changelog entry for moveChunk.commit
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max], to: shard1}));
- assertCountsInChangelog();
+// Assert counts field exists in the changelog entry for moveChunk.commit
+assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max], to: shard1}));
+assertCountsInChangelog();
- mongos.getDB(kDbName).fooHashed.drop();
+mongos.getDB(kDbName).fooHashed.drop();
- st.stop();
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js b/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
index 7158d12c719..3a03a485dc9 100644
--- a/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
+++ b/jstests/sharding/movechunk_interrupt_at_primary_stepdown.js
@@ -9,69 +9,69 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- 'use strict';
+'use strict';
- // Intentionally use a config server with 1 node so that the step down and promotion to primary
- // are guaranteed to happen on the same host
- var st = new ShardingTest({config: 1, shards: 2});
- var mongos = st.s0;
+// Intentionally use a config server with 1 node so that the step down and promotion to primary
+// are guaranteed to happen on the same host
+var st = new ShardingTest({config: 1, shards: 2});
+var mongos = st.s0;
- assert.commandWorked(mongos.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(mongos.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
+assert.commandWorked(mongos.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(mongos.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
- var coll = mongos.getDB('TestDB').TestColl;
+var coll = mongos.getDB('TestDB').TestColl;
- // We have one chunk initially
- assert.writeOK(coll.insert({Key: 0, Value: 'Test value'}));
+// We have one chunk initially
+assert.writeOK(coll.insert({Key: 0, Value: 'Test value'}));
- pauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
+pauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
- // For startParallelOps to write its state
- var staticMongod = MongoRunner.runMongod({});
+// For startParallelOps to write its state
+var staticMongod = MongoRunner.runMongod({});
- var joinMoveChunk = moveChunkParallel(
- staticMongod, mongos.host, {Key: 0}, null, 'TestDB.TestColl', st.shard1.shardName);
- waitForMigrateStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
+var joinMoveChunk = moveChunkParallel(
+ staticMongod, mongos.host, {Key: 0}, null, 'TestDB.TestColl', st.shard1.shardName);
+waitForMigrateStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
- // Stepdown the primary in order to force the balancer to stop. Use a timeout of 5 seconds for
- // both step down operations, because mongos will retry to find the CSRS primary for up to 20
- // seconds and we have two successive ones.
- assert.commandWorked(st.configRS.getPrimary().adminCommand({replSetStepDown: 5, force: true}));
+// Stepdown the primary in order to force the balancer to stop. Use a timeout of 5 seconds for
+// both step down operations, because mongos will retry to find the CSRS primary for up to 20
+// seconds and we have two successive ones.
+assert.commandWorked(st.configRS.getPrimary().adminCommand({replSetStepDown: 5, force: true}));
- // Ensure a new primary is found promptly
- st.configRS.getPrimary(30000);
+// Ensure a new primary is found promptly
+st.configRS.getPrimary(30000);
- assert.eq(1,
- mongos.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
- .itcount());
- assert.eq(0,
- mongos.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
- .itcount());
+assert.eq(1,
+ mongos.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
+ .itcount());
+assert.eq(0,
+ mongos.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
+ .itcount());
- // At this point, the balancer is in recovery mode. Ensure that stepdown can be done again and
- // the recovery mode interrupted.
- assert.commandWorked(st.configRS.getPrimary().adminCommand({replSetStepDown: 5, force: true}));
+// At this point, the balancer is in recovery mode. Ensure that stepdown can be done again and
+// the recovery mode interrupted.
+assert.commandWorked(st.configRS.getPrimary().adminCommand({replSetStepDown: 5, force: true}));
- // Ensure a new primary is found promptly
- st.configRS.getPrimary(30000);
+// Ensure a new primary is found promptly
+st.configRS.getPrimary(30000);
- unpauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
+unpauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
- // Ensure that migration succeeded
- joinMoveChunk();
+// Ensure that migration succeeded
+joinMoveChunk();
- assert.eq(0,
- mongos.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
- .itcount());
- assert.eq(1,
- mongos.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
- .itcount());
+assert.eq(0,
+ mongos.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
+ .itcount());
+assert.eq(1,
+ mongos.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
+ .itcount());
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/movechunk_parallel.js b/jstests/sharding/movechunk_parallel.js
index 37fddce75f4..4c486e64f89 100644
--- a/jstests/sharding/movechunk_parallel.js
+++ b/jstests/sharding/movechunk_parallel.js
@@ -4,78 +4,78 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- 'use strict';
-
- // For startParallelOps to write its state
- var staticMongod = MongoRunner.runMongod({});
-
- var st = new ShardingTest({shards: 4});
-
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
-
- var coll = st.s0.getDB('TestDB').TestColl;
-
- // Create 4 chunks initially
- assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
- assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
- assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
- assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
-
- assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 10}));
- assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 20}));
- assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 30}));
-
- // Move two of the chunks to st.shard1.shardName so we have option to do parallel balancing
- assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 20}, st.shard1.shardName));
- assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 30}, st.shard1.shardName));
-
- assert.eq(2,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
- .itcount());
- assert.eq(2,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
- .itcount());
-
- // Pause migrations at shards 2 and 3
- pauseMigrateAtStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
- pauseMigrateAtStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
-
- // Both move chunk operations should proceed
- var joinMoveChunk1 = moveChunkParallel(
- staticMongod, st.s0.host, {Key: 10}, null, 'TestDB.TestColl', st.shard2.shardName);
- var joinMoveChunk2 = moveChunkParallel(
- staticMongod, st.s0.host, {Key: 30}, null, 'TestDB.TestColl', st.shard3.shardName);
-
- waitForMigrateStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
- waitForMigrateStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
-
- unpauseMigrateAtStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
- unpauseMigrateAtStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
-
- joinMoveChunk1();
- joinMoveChunk2();
-
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
- .itcount());
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
- .itcount());
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard2.shardName})
- .itcount());
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard3.shardName})
- .itcount());
-
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+'use strict';
+
+// For startParallelOps to write its state
+var staticMongod = MongoRunner.runMongod({});
+
+var st = new ShardingTest({shards: 4});
+
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
+
+var coll = st.s0.getDB('TestDB').TestColl;
+
+// Create 4 chunks initially
+assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
+assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
+assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
+assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
+
+assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 10}));
+assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 20}));
+assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 30}));
+
+// Move two of the chunks to st.shard1.shardName so we have option to do parallel balancing
+assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 20}, st.shard1.shardName));
+assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 30}, st.shard1.shardName));
+
+assert.eq(2,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
+ .itcount());
+assert.eq(2,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
+ .itcount());
+
+// Pause migrations at shards 2 and 3
+pauseMigrateAtStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
+pauseMigrateAtStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
+
+// Both move chunk operations should proceed
+var joinMoveChunk1 = moveChunkParallel(
+ staticMongod, st.s0.host, {Key: 10}, null, 'TestDB.TestColl', st.shard2.shardName);
+var joinMoveChunk2 = moveChunkParallel(
+ staticMongod, st.s0.host, {Key: 30}, null, 'TestDB.TestColl', st.shard3.shardName);
+
+waitForMigrateStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
+waitForMigrateStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
+
+unpauseMigrateAtStep(st.shard2, migrateStepNames.deletedPriorDataInRange);
+unpauseMigrateAtStep(st.shard3, migrateStepNames.deletedPriorDataInRange);
+
+joinMoveChunk1();
+joinMoveChunk2();
+
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
+ .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
+ .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard2.shardName})
+ .itcount());
+assert.eq(1,
+ st.s0.getDB('config')
+ .chunks.find({ns: 'TestDB.TestColl', shard: st.shard3.shardName})
+ .itcount());
+
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/mrShardedOutputAuth.js b/jstests/sharding/mrShardedOutputAuth.js
index 0536c6a51b9..c860c16148e 100644
--- a/jstests/sharding/mrShardedOutputAuth.js
+++ b/jstests/sharding/mrShardedOutputAuth.js
@@ -6,90 +6,89 @@
(function() {
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- function doMapReduce(connection, outputDb) {
- // clean output db and run m/r
- outputDb.numbers_out.drop();
- printjson(connection.getDB('input').runCommand({
- mapreduce: "numbers",
- map: function() {
- emit(this.num, {count: 1});
- },
- reduce: function(k, values) {
- var result = {};
- values.forEach(function(value) {
- result.count = 1;
- });
- return result;
- },
- out: {merge: "numbers_out", sharded: true, db: "output"},
- verbose: true,
- query: {}
- }));
- }
-
- function assertSuccess(configDb, outputDb) {
- assert.eq(outputDb.numbers_out.count(), 50, "map/reduce failed");
- assert(!configDb.collections.findOne().dropped, "no sharded collections");
- }
-
- function assertFailure(configDb, outputDb) {
- assert.eq(outputDb.numbers_out.count(), 0, "map/reduce should not have succeeded");
- }
-
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest({
- name: "mrShardedOutputAuth",
- shards: 1,
- mongos: 1,
- other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
- });
-
- // Setup the users to the input, output and admin databases
- var mongos = st.s;
- var adminDb = mongos.getDB("admin");
- adminDb.createUser({user: "user", pwd: "pass", roles: jsTest.adminUserRoles});
-
- var authenticatedConn = new Mongo(mongos.host);
- authenticatedConn.getDB('admin').auth("user", "pass");
- adminDb = authenticatedConn.getDB("admin");
-
- var configDb = authenticatedConn.getDB("config");
-
- var inputDb = authenticatedConn.getDB("input");
- inputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
-
- var outputDb = authenticatedConn.getDB("output");
- outputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
-
- // Setup the input db
- inputDb.numbers.drop();
- for (var i = 0; i < 50; i++) {
- inputDb.numbers.insert({num: i});
- }
- assert.eq(inputDb.numbers.count(), 50);
-
- // Setup a connection authenticated to both input and output db
- var inputOutputAuthConn = new Mongo(mongos.host);
- inputOutputAuthConn.getDB('input').auth("user", "pass");
- inputOutputAuthConn.getDB('output').auth("user", "pass");
- doMapReduce(inputOutputAuthConn, outputDb);
- assertSuccess(configDb, outputDb);
-
- // setup a connection authenticated to only input db
- var inputAuthConn = new Mongo(mongos.host);
- inputAuthConn.getDB('input').auth("user", "pass");
- doMapReduce(inputAuthConn, outputDb);
- assertFailure(configDb, outputDb);
-
- // setup a connection authenticated to only output db
- var outputAuthConn = new Mongo(mongos.host);
- outputAuthConn.getDB('output').auth("user", "pass");
- doMapReduce(outputAuthConn, outputDb);
- assertFailure(configDb, outputDb);
-
- st.stop();
-
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
+
+function doMapReduce(connection, outputDb) {
+ // clean output db and run m/r
+ outputDb.numbers_out.drop();
+ printjson(connection.getDB('input').runCommand({
+ mapreduce: "numbers",
+ map: function() {
+ emit(this.num, {count: 1});
+ },
+ reduce: function(k, values) {
+ var result = {};
+ values.forEach(function(value) {
+ result.count = 1;
+ });
+ return result;
+ },
+ out: {merge: "numbers_out", sharded: true, db: "output"},
+ verbose: true,
+ query: {}
+ }));
+}
+
+function assertSuccess(configDb, outputDb) {
+ assert.eq(outputDb.numbers_out.count(), 50, "map/reduce failed");
+ assert(!configDb.collections.findOne().dropped, "no sharded collections");
+}
+
+function assertFailure(configDb, outputDb) {
+ assert.eq(outputDb.numbers_out.count(), 0, "map/reduce should not have succeeded");
+}
+
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest({
+ name: "mrShardedOutputAuth",
+ shards: 1,
+ mongos: 1,
+ other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
+});
+
+// Setup the users to the input, output and admin databases
+var mongos = st.s;
+var adminDb = mongos.getDB("admin");
+adminDb.createUser({user: "user", pwd: "pass", roles: jsTest.adminUserRoles});
+
+var authenticatedConn = new Mongo(mongos.host);
+authenticatedConn.getDB('admin').auth("user", "pass");
+adminDb = authenticatedConn.getDB("admin");
+
+var configDb = authenticatedConn.getDB("config");
+
+var inputDb = authenticatedConn.getDB("input");
+inputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
+
+var outputDb = authenticatedConn.getDB("output");
+outputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
+
+// Setup the input db
+inputDb.numbers.drop();
+for (var i = 0; i < 50; i++) {
+ inputDb.numbers.insert({num: i});
+}
+assert.eq(inputDb.numbers.count(), 50);
+
+// Setup a connection authenticated to both input and output db
+var inputOutputAuthConn = new Mongo(mongos.host);
+inputOutputAuthConn.getDB('input').auth("user", "pass");
+inputOutputAuthConn.getDB('output').auth("user", "pass");
+doMapReduce(inputOutputAuthConn, outputDb);
+assertSuccess(configDb, outputDb);
+
+// setup a connection authenticated to only input db
+var inputAuthConn = new Mongo(mongos.host);
+inputAuthConn.getDB('input').auth("user", "pass");
+doMapReduce(inputAuthConn, outputDb);
+assertFailure(configDb, outputDb);
+
+// setup a connection authenticated to only output db
+var outputAuthConn = new Mongo(mongos.host);
+outputAuthConn.getDB('output').auth("user", "pass");
+doMapReduce(outputAuthConn, outputDb);
+assertFailure(configDb, outputDb);
+
+st.stop();
})();
diff --git a/jstests/sharding/mr_and_agg_versioning.js b/jstests/sharding/mr_and_agg_versioning.js
index 765ba02ae16..bb129b2c6b7 100644
--- a/jstests/sharding/mr_and_agg_versioning.js
+++ b/jstests/sharding/mr_and_agg_versioning.js
@@ -1,65 +1,64 @@
// Test that map reduce and aggregate properly handle shard versioning.
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 2, mongos: 3});
+var st = new ShardingTest({shards: 2, mongos: 3});
- var dbName = jsTest.name();
- var collName = dbName + ".coll";
- var numDocs = 50000;
- var numKeys = 1000;
+var dbName = jsTest.name();
+var collName = dbName + ".coll";
+var numDocs = 50000;
+var numKeys = 1000;
- st.s.adminCommand({enableSharding: dbName});
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- st.s.adminCommand({shardCollection: collName, key: {key: 1}});
+st.s.adminCommand({enableSharding: dbName});
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+st.s.adminCommand({shardCollection: collName, key: {key: 1}});
- // Load chunk data to the stale mongoses before moving a chunk
- var staleMongos1 = st.s1;
- var staleMongos2 = st.s2;
- staleMongos1.getCollection(collName).find().itcount();
- staleMongos2.getCollection(collName).find().itcount();
+// Load chunk data to the stale mongoses before moving a chunk
+var staleMongos1 = st.s1;
+var staleMongos2 = st.s2;
+staleMongos1.getCollection(collName).find().itcount();
+staleMongos2.getCollection(collName).find().itcount();
- st.s.adminCommand({split: collName, middle: {key: numKeys / 2}});
- st.s.adminCommand({moveChunk: collName, find: {key: 0}, to: st.shard1.shardName});
+st.s.adminCommand({split: collName, middle: {key: numKeys / 2}});
+st.s.adminCommand({moveChunk: collName, find: {key: 0}, to: st.shard1.shardName});
- var bulk = st.s.getCollection(collName).initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, key: (i % numKeys), value: i % numKeys});
- }
- assert.writeOK(bulk.execute());
+var bulk = st.s.getCollection(collName).initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, key: (i % numKeys), value: i % numKeys});
+}
+assert.writeOK(bulk.execute());
- // Add orphaned documents directly to the shards to ensure they are properly filtered out.
- st.shard0.getCollection(collName).insert({_id: 0, key: 0, value: 0});
- st.shard1.getCollection(collName).insert({_id: numDocs, key: numKeys, value: numKeys});
+// Add orphaned documents directly to the shards to ensure they are properly filtered out.
+st.shard0.getCollection(collName).insert({_id: 0, key: 0, value: 0});
+st.shard1.getCollection(collName).insert({_id: numDocs, key: numKeys, value: numKeys});
- jsTest.log("Doing mapReduce");
+jsTest.log("Doing mapReduce");
- var map = function() {
- emit(this.key, this.value);
- };
- var reduce = function(k, values) {
- var total = 0;
- for (var i = 0; i < values.length; i++) {
- total += values[i];
- }
- return total;
- };
- function validateOutput(output) {
- assert.eq(output.length, numKeys, tojson(output));
- for (var i = 0; i < output.length; i++) {
- assert.eq(output[i]._id * (numDocs / numKeys), output[i].value, tojson(output));
- }
+var map = function() {
+ emit(this.key, this.value);
+};
+var reduce = function(k, values) {
+ var total = 0;
+ for (var i = 0; i < values.length; i++) {
+ total += values[i];
}
+ return total;
+};
+function validateOutput(output) {
+ assert.eq(output.length, numKeys, tojson(output));
+ for (var i = 0; i < output.length; i++) {
+ assert.eq(output[i]._id * (numDocs / numKeys), output[i].value, tojson(output));
+ }
+}
- var res = staleMongos1.getCollection(collName).mapReduce(map, reduce, {out: {inline: 1}});
- validateOutput(res.results);
-
- jsTest.log("Doing aggregation");
+var res = staleMongos1.getCollection(collName).mapReduce(map, reduce, {out: {inline: 1}});
+validateOutput(res.results);
- res = staleMongos2.getCollection(collName).aggregate(
- [{'$group': {_id: "$key", value: {"$sum": "$value"}}}, {'$sort': {_id: 1}}]);
- validateOutput(res.toArray());
+jsTest.log("Doing aggregation");
- st.stop();
+res = staleMongos2.getCollection(collName).aggregate(
+ [{'$group': {_id: "$key", value: {"$sum": "$value"}}}, {'$sort': {_id: 1}}]);
+validateOutput(res.toArray());
+st.stop();
})();
diff --git a/jstests/sharding/mr_output_sharded_validation.js b/jstests/sharding/mr_output_sharded_validation.js
index 1d33af3f83b..2643ec72f4c 100644
--- a/jstests/sharding/mr_output_sharded_validation.js
+++ b/jstests/sharding/mr_output_sharded_validation.js
@@ -3,42 +3,42 @@
// output namespace of the first phase of a mapReduce with sharded input before the final result
// collection is created. This test was designed to reproduce SERVER-36966.
(function() {
- "use strict";
-
- const st = new ShardingTest({shards: 2, config: 1, verbose: ''});
-
- const mongosDB = st.s.getDB("test");
- st.shardColl(mongosDB.foo, {_id: 1}, {_id: 0}, {_id: -1});
-
- assert.commandWorked(mongosDB.foo.insert([{_id: 1}, {_id: 2}]));
-
- assert.commandWorked(mongosDB.adminCommand(
- {shardCollection: mongosDB.output.getFullName(), key: {_id: "hashed"}}));
-
- assert.commandWorked(mongosDB.foo.mapReduce(
- function() {
- emit(this._id, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {replace: "output", sharded: true}}));
-
- // Test that using just a collection name without specifying a merge mode or the 'sharded: true'
- // information will fail if the named collection is sharded.
- const error = assert.throws(() => mongosDB.foo.mapReduce(
- function() {
- emit(this._id, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: "output"}));
- assert.eq(error.code, 15920);
-
- for (let name of mongosDB.getCollectionNames()) {
- assert.eq(-1, name.indexOf("tmp.mrs"), name);
- }
-
- st.stop();
+"use strict";
+
+const st = new ShardingTest({shards: 2, config: 1, verbose: ''});
+
+const mongosDB = st.s.getDB("test");
+st.shardColl(mongosDB.foo, {_id: 1}, {_id: 0}, {_id: -1});
+
+assert.commandWorked(mongosDB.foo.insert([{_id: 1}, {_id: 2}]));
+
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosDB.output.getFullName(), key: {_id: "hashed"}}));
+
+assert.commandWorked(mongosDB.foo.mapReduce(
+ function() {
+ emit(this._id, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: {replace: "output", sharded: true}}));
+
+// Test that using just a collection name without specifying a merge mode or the 'sharded: true'
+// information will fail if the named collection is sharded.
+const error = assert.throws(() => mongosDB.foo.mapReduce(
+ function() {
+ emit(this._id, 1);
+ },
+ function(key, values) {
+ return Array.sum(values);
+ },
+ {out: "output"}));
+assert.eq(error.code, 15920);
+
+for (let name of mongosDB.getCollectionNames()) {
+ assert.eq(-1, name.indexOf("tmp.mrs"), name);
+}
+
+st.stop();
}());
diff --git a/jstests/sharding/mr_shard_version.js b/jstests/sharding/mr_shard_version.js
index 225c9be324d..52622b4ce66 100644
--- a/jstests/sharding/mr_shard_version.js
+++ b/jstests/sharding/mr_shard_version.js
@@ -1,88 +1,87 @@
// Test for SERVER-4158 (version changes during mapreduce)
(function() {
- var st = new ShardingTest({shards: 2, mongos: 1});
+var st = new ShardingTest({shards: 2, mongos: 1});
- // Stop balancer, since it'll just get in the way of these
- st.stopBalancer();
+// Stop balancer, since it'll just get in the way of these
+st.stopBalancer();
- var coll = st.s.getCollection(jsTest.name() + ".coll");
+var coll = st.s.getCollection(jsTest.name() + ".coll");
- var numDocs = 50000;
- var numKeys = 1000;
- var numTests = 3;
+var numDocs = 50000;
+var numKeys = 1000;
+var numTests = 3;
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, key: "" + (i % numKeys), value: i % numKeys});
- }
- assert.writeOK(bulk.execute());
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, key: "" + (i % numKeys), value: i % numKeys});
+}
+assert.writeOK(bulk.execute());
- assert.eq(numDocs, coll.find().itcount());
+assert.eq(numDocs, coll.find().itcount());
- var halfId = coll.find().itcount() / 2;
+var halfId = coll.find().itcount() / 2;
- // Shard collection in half
- st.shardColl(coll, {_id: 1}, {_id: halfId});
+// Shard collection in half
+st.shardColl(coll, {_id: 1}, {_id: halfId});
- st.printShardingStatus();
+st.printShardingStatus();
- jsTest.log("Collection now initialized with keys and values...");
+jsTest.log("Collection now initialized with keys and values...");
- jsTest.log("Starting migrations...");
+jsTest.log("Starting migrations...");
- var ops = {};
- for (var i = 0; i < st._connections.length; i++) {
- for (var j = 0; j < 2; j++) {
- ops["" + (i * 2 + j)] = {
- op: "command",
- ns: "admin",
- command: {
- moveChunk: "" + coll,
- find: {_id: (j == 0 ? 0 : halfId)},
- to: st._connections[i].shardName
- },
- };
- }
+var ops = {};
+for (var i = 0; i < st._connections.length; i++) {
+ for (var j = 0; j < 2; j++) {
+ ops["" + (i * 2 + j)] = {
+ op: "command",
+ ns: "admin",
+ command: {
+ moveChunk: "" + coll,
+ find: {_id: (j == 0 ? 0 : halfId)},
+ to: st._connections[i].shardName
+ },
+ };
}
+}
- var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false});
-
- jsTest.log("Starting m/r...");
+var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false});
- var map = function() {
- emit(this.key, this.value);
- };
- var reduce = function(k, values) {
- var total = 0;
- for (var i = 0; i < values.length; i++)
- total += values[i];
- return total;
- };
+jsTest.log("Starting m/r...");
- var outputColl = st.s.getCollection(jsTest.name() + ".mrOutput");
+var map = function() {
+ emit(this.key, this.value);
+};
+var reduce = function(k, values) {
+ var total = 0;
+ for (var i = 0; i < values.length; i++)
+ total += values[i];
+ return total;
+};
- jsTest.log("Output coll : " + outputColl);
+var outputColl = st.s.getCollection(jsTest.name() + ".mrOutput");
- for (var t = 0; t < numTests; t++) {
- var results = coll.mapReduce(map, reduce, {out: {replace: outputColl.getName()}});
+jsTest.log("Output coll : " + outputColl);
- // Assert that the results are actually correct, all keys have values of (numDocs / numKeys)
- // x key
- var output = outputColl.find().sort({_id: 1}).toArray();
+for (var t = 0; t < numTests; t++) {
+ var results = coll.mapReduce(map, reduce, {out: {replace: outputColl.getName()}});
- // printjson( output )
+ // Assert that the results are actually correct, all keys have values of (numDocs / numKeys)
+ // x key
+ var output = outputColl.find().sort({_id: 1}).toArray();
- assert.eq(output.length, numKeys);
- printjson(output);
- for (var i = 0; i < output.length; i++)
- assert.eq(parseInt(output[i]._id) * (numDocs / numKeys), output[i].value);
- }
+ // printjson( output )
- jsTest.log("Finishing parallel migrations...");
+ assert.eq(output.length, numKeys);
+ printjson(output);
+ for (var i = 0; i < output.length; i++)
+ assert.eq(parseInt(output[i]._id) * (numDocs / numKeys), output[i].value);
+}
- printjson(benchFinish(bid));
+jsTest.log("Finishing parallel migrations...");
- st.stop();
+printjson(benchFinish(bid));
+st.stop();
})();
diff --git a/jstests/sharding/multi_coll_drop.js b/jstests/sharding/multi_coll_drop.js
index 076c39048f3..084577d8b27 100644
--- a/jstests/sharding/multi_coll_drop.js
+++ b/jstests/sharding/multi_coll_drop.js
@@ -1,44 +1,43 @@
// Tests the dropping and re-adding of a collection
(function() {
- var st = new ShardingTest({name: "multidrop", shards: 1, mongos: 2});
+var st = new ShardingTest({name: "multidrop", shards: 1, mongos: 2});
- var mA = st.s0;
- var mB = st.s1;
+var mA = st.s0;
+var mB = st.s1;
- var coll = mA.getCollection('multidrop.coll');
- var collB = mB.getCollection('multidrop.coll');
+var coll = mA.getCollection('multidrop.coll');
+var collB = mB.getCollection('multidrop.coll');
- jsTestLog("Shard and split collection...");
+jsTestLog("Shard and split collection...");
- var admin = mA.getDB("admin");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+var admin = mA.getDB("admin");
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- for (var i = -100; i < 100; i++) {
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
- }
+for (var i = -100; i < 100; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
+}
- jsTestLog("Create versioned connection for each mongos...");
+jsTestLog("Create versioned connection for each mongos...");
- assert.eq(0, coll.find().itcount());
- assert.eq(0, collB.find().itcount());
+assert.eq(0, coll.find().itcount());
+assert.eq(0, collB.find().itcount());
- jsTestLog("Dropping sharded collection...");
- assert(coll.drop());
+jsTestLog("Dropping sharded collection...");
+assert(coll.drop());
- jsTestLog("Recreating collection...");
+jsTestLog("Recreating collection...");
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- for (var i = -10; i < 10; i++) {
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
- }
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+for (var i = -10; i < 10; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
+}
- jsTestLog("Retrying connections...");
+jsTestLog("Retrying connections...");
- assert.eq(0, coll.find().itcount());
- assert.eq(0, collB.find().itcount());
-
- st.stop();
+assert.eq(0, coll.find().itcount());
+assert.eq(0, collB.find().itcount());
+st.stop();
})();
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
index 27d6dd447c7..29f350ae095 100644
--- a/jstests/sharding/multi_mongos2.js
+++ b/jstests/sharding/multi_mongos2.js
@@ -1,53 +1,53 @@
// This tests sharding an existing collection that both shards are aware of (SERVER-2828)
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 2});
+var st = new ShardingTest({shards: 2, mongos: 2});
- assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
+assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
- // "test.foo" - sharded (by mongos 0)
- assert.commandWorked(st.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
+// "test.foo" - sharded (by mongos 0)
+assert.commandWorked(st.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
- // "test.existing" - unsharded
- assert.writeOK(st.s0.getDB('test').existing.insert({_id: 1}));
- assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
- assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
+// "test.existing" - unsharded
+assert.writeOK(st.s0.getDB('test').existing.insert({_id: 1}));
+assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
+assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
- // "test.existing" - unsharded to sharded (by mongos 1)
- assert.commandWorked(st.s1.adminCommand({shardcollection: "test.existing", key: {_id: 1}}));
- assert.commandWorked(st.s1.adminCommand({split: "test.existing", middle: {_id: 5}}));
- assert.commandWorked(
- st.s1.adminCommand({moveChunk: "test.existing", find: {_id: 1}, to: st.shard0.shardName}));
+// "test.existing" - unsharded to sharded (by mongos 1)
+assert.commandWorked(st.s1.adminCommand({shardcollection: "test.existing", key: {_id: 1}}));
+assert.commandWorked(st.s1.adminCommand({split: "test.existing", middle: {_id: 5}}));
+assert.commandWorked(
+ st.s1.adminCommand({moveChunk: "test.existing", find: {_id: 1}, to: st.shard0.shardName}));
- assert.eq(1, st.s0.getDB('test').existing.count({_id: 1})); // SERVER-2828
- assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
+assert.eq(1, st.s0.getDB('test').existing.count({_id: 1})); // SERVER-2828
+assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
- // Test stats
- assert.writeOK(st.s0.getDB('test').existing2.insert({_id: 1}));
- assert.eq(1, st.s0.getDB('test').existing2.count({_id: 1}));
- assert.eq(1, st.s1.getDB('test').existing2.count({_id: 1}));
+// Test stats
+assert.writeOK(st.s0.getDB('test').existing2.insert({_id: 1}));
+assert.eq(1, st.s0.getDB('test').existing2.count({_id: 1}));
+assert.eq(1, st.s1.getDB('test').existing2.count({_id: 1}));
- assert.commandWorked(st.s1.adminCommand({shardcollection: "test.existing2", key: {_id: 1}}));
- assert.eq(true, st.s1.getDB('test').existing2.stats().sharded);
+assert.commandWorked(st.s1.adminCommand({shardcollection: "test.existing2", key: {_id: 1}}));
+assert.eq(true, st.s1.getDB('test').existing2.stats().sharded);
- assert.commandWorked(st.s1.adminCommand({split: "test.existing2", middle: {_id: 5}}));
- {
- var res = st.s0.getDB('test').existing2.stats();
- printjson(res);
- assert.eq(true, res.sharded); // SERVER-2828
- }
+assert.commandWorked(st.s1.adminCommand({split: "test.existing2", middle: {_id: 5}}));
+{
+ var res = st.s0.getDB('test').existing2.stats();
+ printjson(res);
+ assert.eq(true, res.sharded); // SERVER-2828
+}
- // Test admin commands
- assert.writeOK(st.s0.getDB('test').existing3.insert({_id: 1}));
- assert.eq(1, st.s0.getDB('test').existing3.count({_id: 1}));
- assert.eq(1, st.s1.getDB('test').existing3.count({_id: 1}));
+// Test admin commands
+assert.writeOK(st.s0.getDB('test').existing3.insert({_id: 1}));
+assert.eq(1, st.s0.getDB('test').existing3.count({_id: 1}));
+assert.eq(1, st.s1.getDB('test').existing3.count({_id: 1}));
- assert.writeOK(st.s1.adminCommand({shardcollection: "test.existing3", key: {_id: 1}}));
- assert.commandWorked(st.s1.adminCommand({split: "test.existing3", middle: {_id: 5}}));
- assert.commandWorked(
- st.s0.adminCommand({moveChunk: "test.existing3", find: {_id: 1}, to: st.shard0.shardName}));
+assert.writeOK(st.s1.adminCommand({shardcollection: "test.existing3", key: {_id: 1}}));
+assert.commandWorked(st.s1.adminCommand({split: "test.existing3", middle: {_id: 5}}));
+assert.commandWorked(
+ st.s0.adminCommand({moveChunk: "test.existing3", find: {_id: 1}, to: st.shard0.shardName}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/multi_mongos2a.js b/jstests/sharding/multi_mongos2a.js
index 451bdbd80f1..7e2dce7c8b0 100644
--- a/jstests/sharding/multi_mongos2a.js
+++ b/jstests/sharding/multi_mongos2a.js
@@ -1,29 +1,29 @@
// This tests sharding an existing collection that both shards are aware of (SERVER-2828)
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 2});
+var st = new ShardingTest({shards: 2, mongos: 2});
- assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
+assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
- assert.commandWorked(st.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
+assert.commandWorked(st.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
- assert.writeOK(st.s0.getDB('test').existing.insert({_id: 1}));
- assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
- assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
+assert.writeOK(st.s0.getDB('test').existing.insert({_id: 1}));
+assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
+assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
- assert.commandWorked(st.s1.adminCommand({shardcollection: "test.existing", key: {_id: 1}}));
- assert.eq(true, st.s1.getDB('test').existing.stats().sharded);
+assert.commandWorked(st.s1.adminCommand({shardcollection: "test.existing", key: {_id: 1}}));
+assert.eq(true, st.s1.getDB('test').existing.stats().sharded);
- assert.commandWorked(st.s1.getDB("admin").runCommand({
- moveChunk: "test.existing",
- find: {_id: 1},
- to: st.getOther(st.getPrimaryShard("test")).name
- }));
+assert.commandWorked(st.s1.getDB("admin").runCommand({
+ moveChunk: "test.existing",
+ find: {_id: 1},
+ to: st.getOther(st.getPrimaryShard("test")).name
+}));
- assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
- assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
+assert.eq(1, st.s0.getDB('test').existing.count({_id: 1}));
+assert.eq(1, st.s1.getDB('test').existing.count({_id: 1}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/multi_shard_transaction_without_majority_reads.js b/jstests/sharding/multi_shard_transaction_without_majority_reads.js
index dafdf503f84..8ddb69a665d 100644
--- a/jstests/sharding/multi_shard_transaction_without_majority_reads.js
+++ b/jstests/sharding/multi_shard_transaction_without_majority_reads.js
@@ -6,34 +6,34 @@
*/
(function() {
- 'use strict';
+'use strict';
- const st = new ShardingTest({shards: 2, rs: {nodes: 1, enableMajorityReadConcern: 'false'}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1, enableMajorityReadConcern: 'false'}});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
- const coll = st.s0.getDB('TestDB').TestColl;
- assert.writeOK(coll.insert({_id: -1, x: 0}));
- assert.writeOK(coll.insert({_id: 1, x: 0}));
- assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {_id: 1}}));
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: 'TestDB.TestColl', find: {_id: 1}, to: st.shard1.shardName}));
+const coll = st.s0.getDB('TestDB').TestColl;
+assert.writeOK(coll.insert({_id: -1, x: 0}));
+assert.writeOK(coll.insert({_id: 1, x: 0}));
+assert.commandWorked(st.s0.adminCommand({split: 'TestDB.TestColl', middle: {_id: 1}}));
+assert.commandWorked(
+ st.s0.adminCommand({moveChunk: 'TestDB.TestColl', find: {_id: 1}, to: st.shard1.shardName}));
- assert.writeOK(coll.update({_id: -1}, {$inc: {x: 1}}));
- assert.writeOK(coll.update({_id: 1}, {$inc: {x: 1}}));
+assert.writeOK(coll.update({_id: -1}, {$inc: {x: 1}}));
+assert.writeOK(coll.update({_id: 1}, {$inc: {x: 1}}));
- const session = st.s0.startSession();
- const sessionColl = session.getDatabase('TestDB').TestColl;
+const session = st.s0.startSession();
+const sessionColl = session.getDatabase('TestDB').TestColl;
- session.startTransaction();
+session.startTransaction();
- assert.writeOK(sessionColl.update({_id: -1}, {$inc: {x: 1}}));
- assert.writeOK(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
+assert.writeOK(sessionColl.update({_id: -1}, {$inc: {x: 1}}));
+assert.writeOK(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.ReadConcernMajorityNotEnabled);
+assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.ReadConcernMajorityNotEnabled);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/multi_write_target.js b/jstests/sharding/multi_write_target.js
index 9c4f37430da..90330f43cc0 100644
--- a/jstests/sharding/multi_write_target.js
+++ b/jstests/sharding/multi_write_target.js
@@ -2,72 +2,71 @@
// Tests that multi-writes (update/delete) target *all* shards and not just shards in the collection
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 3, mongos: 2});
+var st = new ShardingTest({shards: 3, mongos: 2});
- var admin = st.s0.getDB("admin");
- var coll = st.s0.getCollection("foo.bar");
+var admin = st.s0.getDB("admin");
+var coll = st.s0.getCollection("foo.bar");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {skey: 1}}));
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {skey: 1}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {skey: 0}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {skey: 100}}));
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {skey: 0}, to: st.shard1.shardName}));
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {skey: 100}, to: st.shard2.shardName}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {skey: 0}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {skey: 100}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {skey: 0}, to: st.shard1.shardName}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {skey: 100}, to: st.shard2.shardName}));
- jsTest.log("Testing multi-update...");
+jsTest.log("Testing multi-update...");
- // Put data on all shards
- assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
- assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
- assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: 100, x: 1}));
+// Put data on all shards
+assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
+assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
+assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: 100, x: 1}));
- // Non-multi-update doesn't work without shard key
- assert.writeError(coll.update({x: 1}, {$set: {updated: true}}, {multi: false}));
- assert.writeOK(coll.update({x: 1}, {$set: {updated: true}}, {multi: true}));
+// Non-multi-update doesn't work without shard key
+assert.writeError(coll.update({x: 1}, {$set: {updated: true}}, {multi: false}));
+assert.writeOK(coll.update({x: 1}, {$set: {updated: true}}, {multi: true}));
- // Ensure update goes to *all* shards
- assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updated: true}));
- assert.neq(null, st.shard1.getCollection(coll.toString()).findOne({updated: true}));
- assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({updated: true}));
+// Ensure update goes to *all* shards
+assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updated: true}));
+assert.neq(null, st.shard1.getCollection(coll.toString()).findOne({updated: true}));
+assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({updated: true}));
- // _id update works, and goes to all shards even on the stale mongos
- var staleColl = st.s1.getCollection('foo.bar');
- assert.writeOK(staleColl.update({_id: 0}, {$set: {updatedById: true}}, {multi: false}));
+// _id update works, and goes to all shards even on the stale mongos
+var staleColl = st.s1.getCollection('foo.bar');
+assert.writeOK(staleColl.update({_id: 0}, {$set: {updatedById: true}}, {multi: false}));
- // Ensure _id update goes to *all* shards
- assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updatedById: true}));
- assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({updatedById: true}));
+// Ensure _id update goes to *all* shards
+assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updatedById: true}));
+assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({updatedById: true}));
- jsTest.log("Testing multi-delete...");
+jsTest.log("Testing multi-delete...");
- // non-multi-delete doesn't work without shard key
- assert.writeError(coll.remove({x: 1}, {justOne: true}));
+// non-multi-delete doesn't work without shard key
+assert.writeError(coll.remove({x: 1}, {justOne: true}));
- assert.writeOK(coll.remove({x: 1}, {justOne: false}));
+assert.writeOK(coll.remove({x: 1}, {justOne: false}));
- // Ensure delete goes to *all* shards
- assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
- assert.eq(null, st.shard1.getCollection(coll.toString()).findOne({x: 1}));
- assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({x: 1}));
+// Ensure delete goes to *all* shards
+assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
+assert.eq(null, st.shard1.getCollection(coll.toString()).findOne({x: 1}));
+assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({x: 1}));
- // Put more on all shards
- assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
- assert.writeOK(st.shard1.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
- // Data not in chunks
- assert.writeOK(st.shard2.getCollection(coll.toString()).insert({_id: 0, x: 1}));
+// Put more on all shards
+assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
+assert.writeOK(st.shard1.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
+// Data not in chunks
+assert.writeOK(st.shard2.getCollection(coll.toString()).insert({_id: 0, x: 1}));
- assert.writeOK(coll.remove({_id: 0}, {justOne: true}));
+assert.writeOK(coll.remove({_id: 0}, {justOne: true}));
- // Ensure _id delete goes to *all* shards
- assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
- assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({x: 1}));
-
- st.stop();
+// Ensure _id delete goes to *all* shards
+assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
+assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({x: 1}));
+st.stop();
})();
diff --git a/jstests/sharding/names.js b/jstests/sharding/names.js
index d7e7e884380..aa20b4ead0a 100644
--- a/jstests/sharding/names.js
+++ b/jstests/sharding/names.js
@@ -1,59 +1,56 @@
// Test that having replica set names the same as the names of other shards works fine
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 0, mongos: 1});
-
- var rsA = new ReplSetTest({nodes: 2, name: "rsA", nodeOptions: {shardsvr: ""}});
- var rsB = new ReplSetTest({nodes: 2, name: "rsB", nodeOptions: {shardsvr: ""}});
-
- rsA.startSet();
- rsB.startSet();
- rsA.initiate();
- rsB.initiate();
- rsA.getPrimary();
- rsB.getPrimary();
-
- var mongos = st.s;
- var config = mongos.getDB("config");
- var admin = mongos.getDB("admin");
-
- assert.commandWorked(mongos.adminCommand({addShard: rsA.getURL(), name: rsB.name}));
- printjson(config.shards.find().toArray());
-
- assert.commandWorked(mongos.adminCommand({addShard: rsB.getURL(), name: rsA.name}));
- printjson(config.shards.find().toArray());
-
- assert.eq(2, config.shards.count(), "Error adding a shard");
- assert.eq(
- rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA");
- assert.eq(
- rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB");
-
- // Remove shard
- assert.commandWorked(mongos.adminCommand({removeshard: rsA.name}),
- "failed to start draining shard");
- var res = assert.commandWorked(mongos.adminCommand({removeshard: rsA.name}),
- "failed to remove shard");
-
- assert.eq(
- 1,
- config.shards.count(),
- "Shard was not removed: " + res + "; Shards: " + tojson(config.shards.find().toArray()));
- assert.eq(
- rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB 2");
-
- // Re-add shard
- assert.commandWorked(mongos.adminCommand({addShard: rsB.getURL(), name: rsA.name}));
- printjson(config.shards.find().toArray());
-
- assert.eq(2, config.shards.count(), "Error re-adding a shard");
- assert.eq(
- rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA 3");
- assert.eq(
- rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB 3");
-
- rsA.stopSet();
- rsB.stopSet();
- st.stop();
+'use strict';
+
+var st = new ShardingTest({shards: 0, mongos: 1});
+
+var rsA = new ReplSetTest({nodes: 2, name: "rsA", nodeOptions: {shardsvr: ""}});
+var rsB = new ReplSetTest({nodes: 2, name: "rsB", nodeOptions: {shardsvr: ""}});
+
+rsA.startSet();
+rsB.startSet();
+rsA.initiate();
+rsB.initiate();
+rsA.getPrimary();
+rsB.getPrimary();
+
+var mongos = st.s;
+var config = mongos.getDB("config");
+var admin = mongos.getDB("admin");
+
+assert.commandWorked(mongos.adminCommand({addShard: rsA.getURL(), name: rsB.name}));
+printjson(config.shards.find().toArray());
+
+assert.commandWorked(mongos.adminCommand({addShard: rsB.getURL(), name: rsA.name}));
+printjson(config.shards.find().toArray());
+
+assert.eq(2, config.shards.count(), "Error adding a shard");
+assert.eq(rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA");
+assert.eq(rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB");
+
+// Remove shard
+assert.commandWorked(mongos.adminCommand({removeshard: rsA.name}),
+ "failed to start draining shard");
+var res =
+ assert.commandWorked(mongos.adminCommand({removeshard: rsA.name}), "failed to remove shard");
+
+assert.eq(1,
+ config.shards.count(),
+ "Shard was not removed: " + res + "; Shards: " + tojson(config.shards.find().toArray()));
+assert.eq(
+ rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB 2");
+
+// Re-add shard
+assert.commandWorked(mongos.adminCommand({addShard: rsB.getURL(), name: rsA.name}));
+printjson(config.shards.find().toArray());
+
+assert.eq(2, config.shards.count(), "Error re-adding a shard");
+assert.eq(
+ rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA 3");
+assert.eq(
+ rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB 3");
+
+rsA.stopSet();
+rsB.stopSet();
+st.stop();
})();
diff --git a/jstests/sharding/nonreplicated_uuids_on_shardservers.js b/jstests/sharding/nonreplicated_uuids_on_shardservers.js
index 7c7be172ee1..64716774fb7 100644
--- a/jstests/sharding/nonreplicated_uuids_on_shardservers.js
+++ b/jstests/sharding/nonreplicated_uuids_on_shardservers.js
@@ -1,23 +1,23 @@
// SERVER-32255 This test ensures a node started with --shardsvr and added to a replica set receives
// UUIDs upon re-initiation.
(function() {
- "use strict";
- load("jstests/libs/check_uuids.js");
- let st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
- let mongos = st.s;
- let rs = st.rs0;
+"use strict";
+load("jstests/libs/check_uuids.js");
+let st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
+let mongos = st.s;
+let rs = st.rs0;
- // Create `test.coll`.
- mongos.getDB("test").coll.insert({_id: 1, x: 1});
+// Create `test.coll`.
+mongos.getDB("test").coll.insert({_id: 1, x: 1});
- // Add a node with --shardsvr to the replica set.
- let newNode = rs.add({'shardsvr': '', rsConfig: {priority: 0, votes: 0}});
- rs.reInitiate();
- rs.awaitSecondaryNodes();
+// Add a node with --shardsvr to the replica set.
+let newNode = rs.add({'shardsvr': '', rsConfig: {priority: 0, votes: 0}});
+rs.reInitiate();
+rs.awaitSecondaryNodes();
- let secondaryAdminDB = newNode.getDB("admin");
+let secondaryAdminDB = newNode.getDB("admin");
- // Ensure the new node has UUIDs for all its collections.
- checkCollectionUUIDs(secondaryAdminDB);
- st.stop();
+// Ensure the new node has UUIDs for all its collections.
+checkCollectionUUIDs(secondaryAdminDB);
+st.stop();
})();
diff --git a/jstests/sharding/not_allowed_on_sharded_collection_cmd.js b/jstests/sharding/not_allowed_on_sharded_collection_cmd.js
index 2649994c300..7190e8b6bba 100644
--- a/jstests/sharding/not_allowed_on_sharded_collection_cmd.js
+++ b/jstests/sharding/not_allowed_on_sharded_collection_cmd.js
@@ -2,25 +2,24 @@
// collections.
(function() {
- const st = new ShardingTest({shards: 2, mongos: 2});
+const st = new ShardingTest({shards: 2, mongos: 2});
- const dbName = 'test';
- const coll = 'foo';
- const ns = dbName + '.' + coll;
+const dbName = 'test';
+const coll = 'foo';
+const ns = dbName + '.' + coll;
- const freshMongos = st.s0.getDB(dbName);
- const staleMongos = st.s1.getDB(dbName);
+const freshMongos = st.s0.getDB(dbName);
+const staleMongos = st.s1.getDB(dbName);
- assert.commandWorked(staleMongos.adminCommand({enableSharding: dbName}));
- assert.commandWorked(freshMongos.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(staleMongos.adminCommand({enableSharding: dbName}));
+assert.commandWorked(freshMongos.adminCommand({shardCollection: ns, key: {_id: 1}}));
- // Test that commands that should not be runnable on sharded collection do not work on sharded
- // collections, using both fresh mongos and stale mongos instances.
- assert.commandFailedWithCode(freshMongos.runCommand({convertToCapped: coll, size: 64 * 1024}),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(staleMongos.runCommand({convertToCapped: coll, size: 32 * 1024}),
- ErrorCodes.IllegalOperation);
-
- st.stop();
+// Test that commands that should not be runnable on sharded collection do not work on sharded
+// collections, using both fresh mongos and stale mongos instances.
+assert.commandFailedWithCode(freshMongos.runCommand({convertToCapped: coll, size: 64 * 1024}),
+ ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(staleMongos.runCommand({convertToCapped: coll, size: 32 * 1024}),
+ ErrorCodes.IllegalOperation);
+st.stop();
})();
diff --git a/jstests/sharding/now_variable_replset.js b/jstests/sharding/now_variable_replset.js
index ad5104a0695..29089ff5e1a 100644
--- a/jstests/sharding/now_variable_replset.js
+++ b/jstests/sharding/now_variable_replset.js
@@ -3,131 +3,130 @@
*/
// @tags: [requires_find_command]
(function() {
- "use strict";
-
- var replTest = new ReplSetTest({name: "now_and_cluster_time", nodes: 1});
- replTest.startSet();
- replTest.initiate();
-
- var db = replTest.getPrimary().getDB("test");
-
- const coll = db[jsTest.name()];
- const otherColl = db[coll.getName() + "_other"];
- otherColl.drop();
- coll.drop();
- db["viewWithNow"].drop();
- db["viewWithClusterTime"].drop();
-
- // Insert simple documents into the main test collection. Aggregation and view pipelines will
- // augment these docs with time-based fields.
- const numdocs = 1000;
- let bulk = coll.initializeUnorderedBulkOp();
+"use strict";
+
+var replTest = new ReplSetTest({name: "now_and_cluster_time", nodes: 1});
+replTest.startSet();
+replTest.initiate();
+
+var db = replTest.getPrimary().getDB("test");
+
+const coll = db[jsTest.name()];
+const otherColl = db[coll.getName() + "_other"];
+otherColl.drop();
+coll.drop();
+db["viewWithNow"].drop();
+db["viewWithClusterTime"].drop();
+
+// Insert simple documents into the main test collection. Aggregation and view pipelines will
+// augment these docs with time-based fields.
+const numdocs = 1000;
+let bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < numdocs; ++i) {
+ bulk.insert({_id: i});
+}
+assert.commandWorked(bulk.execute());
+
+// Insert into another collection with pre-made fields for testing the find() command.
+bulk = otherColl.initializeUnorderedBulkOp();
+const timeFieldValue = new Date();
+for (let i = 0; i < numdocs; ++i) {
+ bulk.insert({_id: i, timeField: timeFieldValue, clusterTimeField: new Timestamp(0, 1)});
+}
+assert.commandWorked(bulk.execute());
+
+assert.commandWorked(
+ db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$NOW"}}]));
+const viewWithNow = db["viewWithNow"];
+
+assert.commandWorked(db.createView(
+ "viewWithClusterTime", coll.getName(), [{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
+const viewWithClusterTime = db["viewWithClusterTime"];
+
+function toResultsArray(queryRes) {
+ return Array.isArray(queryRes) ? queryRes : queryRes.toArray();
+}
+
+function runTests(query) {
+ const results = toResultsArray(query());
+ assert.eq(results.length, numdocs);
+
+ // Make sure the values are the same for all documents
for (let i = 0; i < numdocs; ++i) {
- bulk.insert({_id: i});
- }
- assert.commandWorked(bulk.execute());
-
- // Insert into another collection with pre-made fields for testing the find() command.
- bulk = otherColl.initializeUnorderedBulkOp();
- const timeFieldValue = new Date();
- for (let i = 0; i < numdocs; ++i) {
- bulk.insert({_id: i, timeField: timeFieldValue, clusterTimeField: new Timestamp(0, 1)});
- }
- assert.commandWorked(bulk.execute());
-
- assert.commandWorked(
- db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$NOW"}}]));
- const viewWithNow = db["viewWithNow"];
-
- assert.commandWorked(db.createView(
- "viewWithClusterTime", coll.getName(), [{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
- const viewWithClusterTime = db["viewWithClusterTime"];
-
- function toResultsArray(queryRes) {
- return Array.isArray(queryRes) ? queryRes : queryRes.toArray();
- }
-
- function runTests(query) {
- const results = toResultsArray(query());
- assert.eq(results.length, numdocs);
-
- // Make sure the values are the same for all documents
- for (let i = 0; i < numdocs; ++i) {
- assert.eq(results[0].timeField, results[i].timeField);
- }
-
- // Sleep for a while and then rerun.
- sleep(3000);
-
- const resultsLater = toResultsArray(query());
- assert.eq(resultsLater.length, numdocs);
-
- // Later results should be later in time.
- assert.lte(results[0].timeField, resultsLater[0].timeField);
- }
-
- function baseCollectionNowFind() {
- return otherColl.find({$expr: {$lte: ["$timeField", "$$NOW"]}});
- }
-
- function baseCollectionClusterTimeFind() {
- // The test validator examines 'timeField', so we copy clusterTimeField into timeField here.
- const results =
- otherColl.find({$expr: {$lt: ["$clusterTimeField", "$$CLUSTER_TIME"]}}).toArray();
- results.forEach((val, idx) => {
- results[idx].timeField = results[idx].clusterTimeField;
- });
- return results;
- }
-
- function baseCollectionNowAgg() {
- return coll.aggregate([{$addFields: {timeField: "$$NOW"}}]);
- }
-
- function baseCollectionClusterTimeAgg() {
- return coll.aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]);
- }
-
- function fromViewWithNow() {
- return viewWithNow.find();
- }
-
- function fromViewWithClusterTime() {
- return viewWithClusterTime.find();
- }
-
- function withExprNow() {
- return viewWithNow.find({$expr: {$eq: ["$timeField", "$$NOW"]}});
- }
-
- function withExprClusterTime() {
- return viewWithClusterTime.find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}});
+ assert.eq(results[0].timeField, results[i].timeField);
}
- // $$NOW
- runTests(baseCollectionNowFind);
- runTests(baseCollectionNowAgg);
- runTests(fromViewWithNow);
- runTests(withExprNow);
-
- // Test that $$NOW can be used in explain for both find and aggregate.
- assert.commandWorked(coll.explain().find({$expr: {$lte: ["$timeField", "$$NOW"]}}).finish());
- assert.commandWorked(
- viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$NOW"]}}).finish());
- assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$NOW"}}]));
-
- // $$CLUSTER_TIME
- runTests(baseCollectionClusterTimeFind);
- runTests(baseCollectionClusterTimeAgg);
- runTests(fromViewWithClusterTime);
- runTests(withExprClusterTime);
-
- // Test that $$CLUSTER_TIME can be used in explain for both find and aggregate.
- assert.commandWorked(
- coll.explain().find({$expr: {$lte: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
- assert.commandWorked(
- viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
- assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
-
- replTest.stopSet();
+ // Sleep for a while and then rerun.
+ sleep(3000);
+
+ const resultsLater = toResultsArray(query());
+ assert.eq(resultsLater.length, numdocs);
+
+ // Later results should be later in time.
+ assert.lte(results[0].timeField, resultsLater[0].timeField);
+}
+
+function baseCollectionNowFind() {
+ return otherColl.find({$expr: {$lte: ["$timeField", "$$NOW"]}});
+}
+
+function baseCollectionClusterTimeFind() {
+ // The test validator examines 'timeField', so we copy clusterTimeField into timeField here.
+ const results =
+ otherColl.find({$expr: {$lt: ["$clusterTimeField", "$$CLUSTER_TIME"]}}).toArray();
+ results.forEach((val, idx) => {
+ results[idx].timeField = results[idx].clusterTimeField;
+ });
+ return results;
+}
+
+function baseCollectionNowAgg() {
+ return coll.aggregate([{$addFields: {timeField: "$$NOW"}}]);
+}
+
+function baseCollectionClusterTimeAgg() {
+ return coll.aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]);
+}
+
+function fromViewWithNow() {
+ return viewWithNow.find();
+}
+
+function fromViewWithClusterTime() {
+ return viewWithClusterTime.find();
+}
+
+function withExprNow() {
+ return viewWithNow.find({$expr: {$eq: ["$timeField", "$$NOW"]}});
+}
+
+function withExprClusterTime() {
+ return viewWithClusterTime.find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}});
+}
+
+// $$NOW
+runTests(baseCollectionNowFind);
+runTests(baseCollectionNowAgg);
+runTests(fromViewWithNow);
+runTests(withExprNow);
+
+// Test that $$NOW can be used in explain for both find and aggregate.
+assert.commandWorked(coll.explain().find({$expr: {$lte: ["$timeField", "$$NOW"]}}).finish());
+assert.commandWorked(viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$NOW"]}}).finish());
+assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$NOW"}}]));
+
+// $$CLUSTER_TIME
+runTests(baseCollectionClusterTimeFind);
+runTests(baseCollectionClusterTimeAgg);
+runTests(fromViewWithClusterTime);
+runTests(withExprClusterTime);
+
+// Test that $$CLUSTER_TIME can be used in explain for both find and aggregate.
+assert.commandWorked(
+ coll.explain().find({$expr: {$lte: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
+assert.commandWorked(
+ viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
+assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
+
+replTest.stopSet();
}());
diff --git a/jstests/sharding/now_variable_sharding.js b/jstests/sharding/now_variable_sharding.js
index 49e2833b46f..13f9b90e626 100644
--- a/jstests/sharding/now_variable_sharding.js
+++ b/jstests/sharding/now_variable_sharding.js
@@ -3,150 +3,149 @@
*/
// @tags: [requires_find_command]
(function() {
- "use strict";
-
- var st = new ShardingTest({mongos: 1, shards: 2});
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- st.adminCommand({enableSharding: "test"});
- st.ensurePrimaryShard("test", st.rs0.getURL());
-
- var db = st.s.getDB("test");
-
- const numdocs = 1000;
-
- const coll = db[jsTest.name()];
- const otherColl = db[coll.getName() + "_other"];
-
- for (let testColl of[coll, otherColl]) {
- testColl.createIndex({_id: 1}, {unique: true});
-
- st.adminCommand({shardcollection: testColl.getFullName(), key: {_id: 1}});
- st.adminCommand({split: testColl.getFullName(), middle: {_id: numdocs / 2}});
-
- st.adminCommand({
- moveChunk: testColl.getFullName(),
- find: {_id: 0},
- to: st.shard1.shardName,
- _waitForDelete: true
- });
- st.adminCommand({
- moveChunk: testColl.getFullName(),
- find: {_id: numdocs / 2},
- to: st.shard0.shardName,
- _waitForDelete: true
- });
- }
-
- // Insert simple documents into the main test collection. Aggregation and view pipelines will
- // augment these docs with time-based fields.
- let bulk = coll.initializeUnorderedBulkOp();
+"use strict";
+
+var st = new ShardingTest({mongos: 1, shards: 2});
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+st.adminCommand({enableSharding: "test"});
+st.ensurePrimaryShard("test", st.rs0.getURL());
+
+var db = st.s.getDB("test");
+
+const numdocs = 1000;
+
+const coll = db[jsTest.name()];
+const otherColl = db[coll.getName() + "_other"];
+
+for (let testColl of [coll, otherColl]) {
+ testColl.createIndex({_id: 1}, {unique: true});
+
+ st.adminCommand({shardcollection: testColl.getFullName(), key: {_id: 1}});
+ st.adminCommand({split: testColl.getFullName(), middle: {_id: numdocs / 2}});
+
+ st.adminCommand({
+ moveChunk: testColl.getFullName(),
+ find: {_id: 0},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+ });
+ st.adminCommand({
+ moveChunk: testColl.getFullName(),
+ find: {_id: numdocs / 2},
+ to: st.shard0.shardName,
+ _waitForDelete: true
+ });
+}
+
+// Insert simple documents into the main test collection. Aggregation and view pipelines will
+// augment these docs with time-based fields.
+let bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < numdocs; ++i) {
+ bulk.insert({_id: i});
+}
+assert.commandWorked(bulk.execute());
+
+// Insert into another collection with pre-made fields for testing the find() command.
+bulk = otherColl.initializeUnorderedBulkOp();
+const timeFieldValue = new Date();
+for (let i = 0; i < numdocs; ++i) {
+ bulk.insert({_id: i, timeField: timeFieldValue, clusterTimeField: new Timestamp(0, 1)});
+}
+assert.commandWorked(bulk.execute());
+
+assert.commandWorked(
+ db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$NOW"}}]));
+const viewWithNow = db["viewWithNow"];
+
+assert.commandWorked(db.createView(
+ "viewWithClusterTime", coll.getName(), [{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
+const viewWithClusterTime = db["viewWithClusterTime"];
+
+function toResultsArray(queryRes) {
+ return Array.isArray(queryRes) ? queryRes : queryRes.toArray();
+}
+
+function runTests(query) {
+ const results = toResultsArray(query());
+ assert.eq(results.length, numdocs);
+
+ // Make sure the values are the same for all documents
for (let i = 0; i < numdocs; ++i) {
- bulk.insert({_id: i});
- }
- assert.commandWorked(bulk.execute());
-
- // Insert into another collection with pre-made fields for testing the find() command.
- bulk = otherColl.initializeUnorderedBulkOp();
- const timeFieldValue = new Date();
- for (let i = 0; i < numdocs; ++i) {
- bulk.insert({_id: i, timeField: timeFieldValue, clusterTimeField: new Timestamp(0, 1)});
- }
- assert.commandWorked(bulk.execute());
-
- assert.commandWorked(
- db.createView("viewWithNow", coll.getName(), [{$addFields: {timeField: "$$NOW"}}]));
- const viewWithNow = db["viewWithNow"];
-
- assert.commandWorked(db.createView(
- "viewWithClusterTime", coll.getName(), [{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
- const viewWithClusterTime = db["viewWithClusterTime"];
-
- function toResultsArray(queryRes) {
- return Array.isArray(queryRes) ? queryRes : queryRes.toArray();
- }
-
- function runTests(query) {
- const results = toResultsArray(query());
- assert.eq(results.length, numdocs);
-
- // Make sure the values are the same for all documents
- for (let i = 0; i < numdocs; ++i) {
- assert.eq(results[0].timeField, results[i].timeField);
- }
-
- // Sleep for a while and then rerun.
- sleep(3000);
-
- const resultsLater = toResultsArray(query());
- assert.eq(resultsLater.length, numdocs);
-
- // Later results should be later in time.
- assert.lte(results[0].timeField, resultsLater[0].timeField);
- }
-
- function baseCollectionNowFind() {
- return otherColl.find({$expr: {$lte: ["$timeField", "$$NOW"]}});
- }
-
- function baseCollectionClusterTimeFind() {
- // The test validator examines 'timeField', so we copy clusterTimeField into timeField here.
- const results =
- otherColl.find({$expr: {$lt: ["$clusterTimeField", "$$CLUSTER_TIME"]}}).toArray();
- results.forEach((val, idx) => {
- results[idx].timeField = results[idx].clusterTimeField;
- });
- return results;
- }
-
- function baseCollectionNowAgg() {
- return coll.aggregate([{$addFields: {timeField: "$$NOW"}}]);
- }
-
- function baseCollectionClusterTimeAgg() {
- return coll.aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]);
- }
-
- function fromViewWithNow() {
- return viewWithNow.find();
- }
-
- function fromViewWithClusterTime() {
- return viewWithClusterTime.find();
- }
-
- function withExprNow() {
- return viewWithNow.find({$expr: {$eq: ["$timeField", "$$NOW"]}});
- }
-
- function withExprClusterTime() {
- return viewWithClusterTime.find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}});
+ assert.eq(results[0].timeField, results[i].timeField);
}
- // $$NOW
- runTests(baseCollectionNowFind);
- runTests(baseCollectionNowAgg);
- runTests(fromViewWithNow);
- runTests(withExprNow);
-
- // Test that $$NOW can be used in explain for both find and aggregate.
- assert.commandWorked(coll.explain().find({$expr: {$lte: ["$timeField", "$$NOW"]}}).finish());
- assert.commandWorked(
- viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$NOW"]}}).finish());
- assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$NOW"}}]));
-
- // $$CLUSTER_TIME
- runTests(baseCollectionClusterTimeFind);
- runTests(baseCollectionClusterTimeAgg);
- runTests(fromViewWithClusterTime);
- runTests(withExprClusterTime);
-
- // Test that $$CLUSTER_TIME can be used in explain for both find and aggregate.
- assert.commandWorked(
- coll.explain().find({$expr: {$lte: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
- assert.commandWorked(
- viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
- assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
-
- st.stop();
+ // Sleep for a while and then rerun.
+ sleep(3000);
+
+ const resultsLater = toResultsArray(query());
+ assert.eq(resultsLater.length, numdocs);
+
+ // Later results should be later in time.
+ assert.lte(results[0].timeField, resultsLater[0].timeField);
+}
+
+function baseCollectionNowFind() {
+ return otherColl.find({$expr: {$lte: ["$timeField", "$$NOW"]}});
+}
+
+function baseCollectionClusterTimeFind() {
+ // The test validator examines 'timeField', so we copy clusterTimeField into timeField here.
+ const results =
+ otherColl.find({$expr: {$lt: ["$clusterTimeField", "$$CLUSTER_TIME"]}}).toArray();
+ results.forEach((val, idx) => {
+ results[idx].timeField = results[idx].clusterTimeField;
+ });
+ return results;
+}
+
+function baseCollectionNowAgg() {
+ return coll.aggregate([{$addFields: {timeField: "$$NOW"}}]);
+}
+
+function baseCollectionClusterTimeAgg() {
+ return coll.aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]);
+}
+
+function fromViewWithNow() {
+ return viewWithNow.find();
+}
+
+function fromViewWithClusterTime() {
+ return viewWithClusterTime.find();
+}
+
+function withExprNow() {
+ return viewWithNow.find({$expr: {$eq: ["$timeField", "$$NOW"]}});
+}
+
+function withExprClusterTime() {
+ return viewWithClusterTime.find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}});
+}
+
+// $$NOW
+runTests(baseCollectionNowFind);
+runTests(baseCollectionNowAgg);
+runTests(fromViewWithNow);
+runTests(withExprNow);
+
+// Test that $$NOW can be used in explain for both find and aggregate.
+assert.commandWorked(coll.explain().find({$expr: {$lte: ["$timeField", "$$NOW"]}}).finish());
+assert.commandWorked(viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$NOW"]}}).finish());
+assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$NOW"}}]));
+
+// $$CLUSTER_TIME
+runTests(baseCollectionClusterTimeFind);
+runTests(baseCollectionClusterTimeAgg);
+runTests(fromViewWithClusterTime);
+runTests(withExprClusterTime);
+
+// Test that $$CLUSTER_TIME can be used in explain for both find and aggregate.
+assert.commandWorked(
+ coll.explain().find({$expr: {$lte: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
+assert.commandWorked(
+ viewWithNow.explain().find({$expr: {$eq: ["$timeField", "$$CLUSTER_TIME"]}}).finish());
+assert.commandWorked(coll.explain().aggregate([{$addFields: {timeField: "$$CLUSTER_TIME"}}]));
+
+st.stop();
}());
diff --git a/jstests/sharding/operation_time_api.js b/jstests/sharding/operation_time_api.js
index a64468f027b..cd694503590 100644
--- a/jstests/sharding/operation_time_api.js
+++ b/jstests/sharding/operation_time_api.js
@@ -6,66 +6,66 @@
* - standalone mongod
*/
(function() {
- "use strict";
+"use strict";
- function responseContainsTimestampOperationTime(res) {
- return res.operationTime !== undefined && isTimestamp(res.operationTime);
- }
+function responseContainsTimestampOperationTime(res) {
+ return res.operationTime !== undefined && isTimestamp(res.operationTime);
+}
- function isTimestamp(val) {
- return Object.prototype.toString.call(val) === "[object Timestamp]";
- }
+function isTimestamp(val) {
+ return Object.prototype.toString.call(val) === "[object Timestamp]";
+}
- // A mongos that talks to a non-sharded collection on a sharded replica set returns an
- // operationTime that is a Timestamp.
- var st = new ShardingTest({name: "operation_time_api", shards: {rs0: {nodes: 1}}});
+// A mongos that talks to a non-sharded collection on a sharded replica set returns an
+// operationTime that is a Timestamp.
+var st = new ShardingTest({name: "operation_time_api", shards: {rs0: {nodes: 1}}});
- var testDB = st.s.getDB("test");
- var res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 1}]}));
- assert(responseContainsTimestampOperationTime(res),
- "Expected response from a mongos talking to a non-sharded collection on a sharded " +
- "replica set to contain an operationTime, received: " + tojson(res));
+var testDB = st.s.getDB("test");
+var res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 1}]}));
+assert(responseContainsTimestampOperationTime(res),
+ "Expected response from a mongos talking to a non-sharded collection on a sharded " +
+ "replica set to contain an operationTime, received: " + tojson(res));
- // A mongos that talks to a sharded collection on a sharded replica set returns an operationTime
- // that is a Timestamp.
- assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
- assert.commandWorked(st.s.adminCommand({shardCollection: "test.bar", key: {x: 1}}));
+// A mongos that talks to a sharded collection on a sharded replica set returns an operationTime
+// that is a Timestamp.
+assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
+assert.commandWorked(st.s.adminCommand({shardCollection: "test.bar", key: {x: 1}}));
- res = assert.commandWorked(testDB.runCommand({insert: "bar", documents: [{x: 2}]}));
- assert(responseContainsTimestampOperationTime(res),
- "Expected response from a mongos inserting to a sharded collection on a sharded " +
- "replica set to contain an operationTime, received: " + tojson(res));
+res = assert.commandWorked(testDB.runCommand({insert: "bar", documents: [{x: 2}]}));
+assert(responseContainsTimestampOperationTime(res),
+ "Expected response from a mongos inserting to a sharded collection on a sharded " +
+ "replica set to contain an operationTime, received: " + tojson(res));
- // A mongod in a sharded replica set returns an operationTime that is a Timestamp.
- testDB = st.rs0.getPrimary().getDB("test");
- res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 3}]}));
- assert(responseContainsTimestampOperationTime(res),
- "Expected response from a mongod in a sharded replica set to contain an " +
- "operationTime, received: " + tojson(res));
+// A mongod in a sharded replica set returns an operationTime that is a Timestamp.
+testDB = st.rs0.getPrimary().getDB("test");
+res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 3}]}));
+assert(responseContainsTimestampOperationTime(res),
+ "Expected response from a mongod in a sharded replica set to contain an " +
+ "operationTime, received: " + tojson(res));
- st.stop();
+st.stop();
- // A mongod from a non-sharded replica set returns an operationTime that is a Timestamp.
- var replTest = new ReplSetTest({name: "operation_time_api_non_sharded_replset", nodes: 1});
- replTest.startSet();
- replTest.initiate();
+// A mongod from a non-sharded replica set returns an operationTime that is a Timestamp.
+var replTest = new ReplSetTest({name: "operation_time_api_non_sharded_replset", nodes: 1});
+replTest.startSet();
+replTest.initiate();
- testDB = replTest.getPrimary().getDB("test");
- res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 4}]}));
- assert(responseContainsTimestampOperationTime(res),
- "Expected response from a non-sharded replica set to contain an operationTime, " +
- "received: " + tojson(res));
+testDB = replTest.getPrimary().getDB("test");
+res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 4}]}));
+assert(responseContainsTimestampOperationTime(res),
+ "Expected response from a non-sharded replica set to contain an operationTime, " +
+ "received: " + tojson(res));
- replTest.stopSet();
+replTest.stopSet();
- // A standalone mongod does not return an operationTime.
- var standalone = MongoRunner.runMongod();
+// A standalone mongod does not return an operationTime.
+var standalone = MongoRunner.runMongod();
- testDB = standalone.getDB("test");
- res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 5}]}));
- assert(!responseContainsTimestampOperationTime(res),
- "Expected response from a standalone mongod to not contain an operationTime, " +
- "received: " + tojson(res));
+testDB = standalone.getDB("test");
+res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 5}]}));
+assert(!responseContainsTimestampOperationTime(res),
+ "Expected response from a standalone mongod to not contain an operationTime, " +
+ "received: " + tojson(res));
- MongoRunner.stopMongod(standalone);
+MongoRunner.stopMongod(standalone);
})();
diff --git a/jstests/sharding/oplog_document_key.js b/jstests/sharding/oplog_document_key.js
index ba20575b031..d138457e4f0 100644
--- a/jstests/sharding/oplog_document_key.js
+++ b/jstests/sharding/oplog_document_key.js
@@ -6,149 +6,149 @@
*/
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({name: 'test', shards: {rs0: {nodes: 1}}});
- var db = st.s.getDB('test');
+var st = new ShardingTest({name: 'test', shards: {rs0: {nodes: 1}}});
+var db = st.s.getDB('test');
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- // 'test.un' is left unsharded.
- assert.commandWorked(db.adminCommand({shardcollection: 'test.byId', key: {_id: 1}}));
- assert.commandWorked(db.adminCommand({shardcollection: 'test.byX', key: {x: 1}}));
- assert.commandWorked(db.adminCommand({shardcollection: 'test.byXId', key: {x: 1, _id: 1}}));
- assert.commandWorked(db.adminCommand({shardcollection: 'test.byIdX', key: {_id: 1, x: 1}}));
+// 'test.un' is left unsharded.
+assert.commandWorked(db.adminCommand({shardcollection: 'test.byId', key: {_id: 1}}));
+assert.commandWorked(db.adminCommand({shardcollection: 'test.byX', key: {x: 1}}));
+assert.commandWorked(db.adminCommand({shardcollection: 'test.byXId', key: {x: 1, _id: 1}}));
+assert.commandWorked(db.adminCommand({shardcollection: 'test.byIdX', key: {_id: 1, x: 1}}));
- assert.writeOK(db.un.insert({_id: 10, x: 50, y: 60}));
- assert.writeOK(db.un.insert({_id: 30, x: 70, y: 80}));
+assert.writeOK(db.un.insert({_id: 10, x: 50, y: 60}));
+assert.writeOK(db.un.insert({_id: 30, x: 70, y: 80}));
- assert.writeOK(db.byId.insert({_id: 11, x: 51, y: 61}));
- assert.writeOK(db.byId.insert({_id: 31, x: 71, y: 81}));
+assert.writeOK(db.byId.insert({_id: 11, x: 51, y: 61}));
+assert.writeOK(db.byId.insert({_id: 31, x: 71, y: 81}));
- assert.writeOK(db.byX.insert({_id: 12, x: 52, y: 62}));
- assert.writeOK(db.byX.insert({_id: 32, x: 72, y: 82}));
+assert.writeOK(db.byX.insert({_id: 12, x: 52, y: 62}));
+assert.writeOK(db.byX.insert({_id: 32, x: 72, y: 82}));
- assert.writeOK(db.byXId.insert({_id: 13, x: 53, y: 63}));
- assert.writeOK(db.byXId.insert({_id: 33, x: 73, y: 83}));
+assert.writeOK(db.byXId.insert({_id: 13, x: 53, y: 63}));
+assert.writeOK(db.byXId.insert({_id: 33, x: 73, y: 83}));
- assert.writeOK(db.byIdX.insert({_id: 14, x: 54, y: 64}));
- assert.writeOK(db.byIdX.insert({_id: 34, x: 74, y: 84}));
+assert.writeOK(db.byIdX.insert({_id: 14, x: 54, y: 64}));
+assert.writeOK(db.byIdX.insert({_id: 34, x: 74, y: 84}));
- var oplog = st.rs0.getPrimary().getDB('local').oplog.rs;
+var oplog = st.rs0.getPrimary().getDB('local').oplog.rs;
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test update command on 'un'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test update command on 'un'");
- assert.writeOK(db.un.update({_id: 10, x: 50}, {$set: {y: 70}})); // in place
- assert.writeOK(db.un.update({_id: 30, x: 70}, {y: 75})); // replacement
+assert.writeOK(db.un.update({_id: 10, x: 50}, {$set: {y: 70}})); // in place
+assert.writeOK(db.un.update({_id: 30, x: 70}, {y: 75})); // replacement
- // unsharded, only _id appears in o2:
+// unsharded, only _id appears in o2:
- var a = oplog.findOne({ns: 'test.un', op: 'u', 'o2._id': 10});
- assert.eq(a.o2, {_id: 10});
+var a = oplog.findOne({ns: 'test.un', op: 'u', 'o2._id': 10});
+assert.eq(a.o2, {_id: 10});
- var b = oplog.findOne({ns: 'test.un', op: 'u', 'o2._id': 30});
- assert.eq(b.o2, {_id: 30});
+var b = oplog.findOne({ns: 'test.un', op: 'u', 'o2._id': 30});
+assert.eq(b.o2, {_id: 30});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test update command on 'byId'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test update command on 'byId'");
- assert.writeOK(db.byId.update({_id: 11}, {$set: {y: 71}})); // in place
- assert.writeOK(db.byId.update({_id: 31}, {x: 71, y: 76})); // replacement
+assert.writeOK(db.byId.update({_id: 11}, {$set: {y: 71}})); // in place
+assert.writeOK(db.byId.update({_id: 31}, {x: 71, y: 76})); // replacement
- // sharded by {_id: 1}: only _id appears in o2:
+// sharded by {_id: 1}: only _id appears in o2:
- a = oplog.findOne({ns: 'test.byId', op: 'u', 'o2._id': 11});
- assert.eq(a.o2, {_id: 11});
+a = oplog.findOne({ns: 'test.byId', op: 'u', 'o2._id': 11});
+assert.eq(a.o2, {_id: 11});
- b = oplog.findOne({ns: 'test.byId', op: 'u', 'o2._id': 31});
- assert.eq(b.o2, {_id: 31});
+b = oplog.findOne({ns: 'test.byId', op: 'u', 'o2._id': 31});
+assert.eq(b.o2, {_id: 31});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test update command on 'byX'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test update command on 'byX'");
- assert.writeOK(db.byX.update({x: 52}, {$set: {y: 72}})); // in place
- assert.writeOK(db.byX.update({x: 72}, {x: 72, y: 77})); // replacement
+assert.writeOK(db.byX.update({x: 52}, {$set: {y: 72}})); // in place
+assert.writeOK(db.byX.update({x: 72}, {x: 72, y: 77})); // replacement
- // sharded by {x: 1}: x appears in o2, followed by _id:
+// sharded by {x: 1}: x appears in o2, followed by _id:
- a = oplog.findOne({ns: 'test.byX', op: 'u', 'o2._id': 12});
- assert.eq(a.o2, {x: 52, _id: 12});
+a = oplog.findOne({ns: 'test.byX', op: 'u', 'o2._id': 12});
+assert.eq(a.o2, {x: 52, _id: 12});
- b = oplog.findOne({ns: 'test.byX', op: 'u', 'o2._id': 32});
- assert.eq(b.o2, {x: 72, _id: 32});
+b = oplog.findOne({ns: 'test.byX', op: 'u', 'o2._id': 32});
+assert.eq(b.o2, {x: 72, _id: 32});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test update command on 'byXId'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test update command on 'byXId'");
- assert.writeOK(db.byXId.update({_id: 13, x: 53}, {$set: {y: 73}})); // in place
- assert.writeOK(db.byXId.update({_id: 33, x: 73}, {x: 73, y: 78})); // replacement
+assert.writeOK(db.byXId.update({_id: 13, x: 53}, {$set: {y: 73}})); // in place
+assert.writeOK(db.byXId.update({_id: 33, x: 73}, {x: 73, y: 78})); // replacement
- // sharded by {x: 1, _id: 1}: x appears in o2, followed by _id:
+// sharded by {x: 1, _id: 1}: x appears in o2, followed by _id:
- a = oplog.findOne({ns: 'test.byXId', op: 'u', 'o2._id': 13});
- assert.eq(a.o2, {x: 53, _id: 13});
+a = oplog.findOne({ns: 'test.byXId', op: 'u', 'o2._id': 13});
+assert.eq(a.o2, {x: 53, _id: 13});
- b = oplog.findOne({ns: 'test.byXId', op: 'u', 'o2._id': 33});
- assert.eq(b.o2, {x: 73, _id: 33});
+b = oplog.findOne({ns: 'test.byXId', op: 'u', 'o2._id': 33});
+assert.eq(b.o2, {x: 73, _id: 33});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test update command on 'byIdX'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test update command on 'byIdX'");
- assert.writeOK(db.byIdX.update({_id: 14, x: 54}, {$set: {y: 74}})); // in place
- assert.writeOK(db.byIdX.update({_id: 34, x: 74}, {x: 74, y: 79})); // replacement
+assert.writeOK(db.byIdX.update({_id: 14, x: 54}, {$set: {y: 74}})); // in place
+assert.writeOK(db.byIdX.update({_id: 34, x: 74}, {x: 74, y: 79})); // replacement
- // sharded by {_id: 1, x: 1}: _id appears in o2, followed by x:
+// sharded by {_id: 1, x: 1}: _id appears in o2, followed by x:
- a = oplog.findOne({ns: 'test.byIdX', op: 'u', 'o2._id': 14});
- assert.eq(a.o2, {_id: 14, x: 54});
+a = oplog.findOne({ns: 'test.byIdX', op: 'u', 'o2._id': 14});
+assert.eq(a.o2, {_id: 14, x: 54});
- b = oplog.findOne({ns: 'test.byIdX', op: 'u', 'o2._id': 34});
- assert.eq(b.o2, {_id: 34, x: 74});
+b = oplog.findOne({ns: 'test.byIdX', op: 'u', 'o2._id': 34});
+assert.eq(b.o2, {_id: 34, x: 74});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test remove command: 'un'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test remove command: 'un'");
- assert.writeOK(db.un.remove({_id: 10}));
- assert.writeOK(db.un.remove({_id: 30}));
+assert.writeOK(db.un.remove({_id: 10}));
+assert.writeOK(db.un.remove({_id: 30}));
- a = oplog.findOne({ns: 'test.un', op: 'd', 'o._id': 10});
- assert.eq(a.o, {_id: 10});
- b = oplog.findOne({ns: 'test.un', op: 'd', 'o._id': 30});
- assert.eq(b.o, {_id: 30});
+a = oplog.findOne({ns: 'test.un', op: 'd', 'o._id': 10});
+assert.eq(a.o, {_id: 10});
+b = oplog.findOne({ns: 'test.un', op: 'd', 'o._id': 30});
+assert.eq(b.o, {_id: 30});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test remove command: 'byX'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test remove command: 'byX'");
- assert.writeOK(db.byX.remove({_id: 12}));
- assert.writeOK(db.byX.remove({_id: 32}));
+assert.writeOK(db.byX.remove({_id: 12}));
+assert.writeOK(db.byX.remove({_id: 32}));
- a = oplog.findOne({ns: 'test.byX', op: 'd', 'o._id': 12});
- assert.eq(a.o, {x: 52, _id: 12});
- b = oplog.findOne({ns: 'test.byX', op: 'd', 'o._id': 32});
- assert.eq(b.o, {x: 72, _id: 32});
+a = oplog.findOne({ns: 'test.byX', op: 'd', 'o._id': 12});
+assert.eq(a.o, {x: 52, _id: 12});
+b = oplog.findOne({ns: 'test.byX', op: 'd', 'o._id': 32});
+assert.eq(b.o, {x: 72, _id: 32});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test remove command: 'byXId'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test remove command: 'byXId'");
- assert.writeOK(db.byXId.remove({_id: 13}));
- assert.writeOK(db.byXId.remove({_id: 33}));
+assert.writeOK(db.byXId.remove({_id: 13}));
+assert.writeOK(db.byXId.remove({_id: 33}));
- a = oplog.findOne({ns: 'test.byXId', op: 'd', 'o._id': 13});
- assert.eq(a.o, {x: 53, _id: 13});
- b = oplog.findOne({ns: 'test.byXId', op: 'd', 'o._id': 33});
- assert.eq(b.o, {x: 73, _id: 33});
+a = oplog.findOne({ns: 'test.byXId', op: 'd', 'o._id': 13});
+assert.eq(a.o, {x: 53, _id: 13});
+b = oplog.findOne({ns: 'test.byXId', op: 'd', 'o._id': 33});
+assert.eq(b.o, {x: 73, _id: 33});
- ////////////////////////////////////////////////////////////////////////
- jsTest.log("Test remove command: 'byIdX'");
+////////////////////////////////////////////////////////////////////////
+jsTest.log("Test remove command: 'byIdX'");
- assert.writeOK(db.byIdX.remove({_id: 14}));
- assert.writeOK(db.byIdX.remove({_id: 34}));
+assert.writeOK(db.byIdX.remove({_id: 14}));
+assert.writeOK(db.byIdX.remove({_id: 34}));
- a = oplog.findOne({ns: 'test.byIdX', op: 'd', 'o._id': 14});
- assert.eq(a.o, {_id: 14, x: 54});
- b = oplog.findOne({ns: 'test.byIdX', op: 'd', 'o._id': 34});
- assert.eq(b.o, {_id: 34, x: 74});
+a = oplog.findOne({ns: 'test.byIdX', op: 'd', 'o._id': 14});
+assert.eq(a.o, {_id: 14, x: 54});
+b = oplog.findOne({ns: 'test.byIdX', op: 'd', 'o._id': 34});
+assert.eq(b.o, {_id: 34, x: 74});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/out_fails_to_replace_sharded_collection.js b/jstests/sharding/out_fails_to_replace_sharded_collection.js
index d96a5489ac5..ba72de459ee 100644
--- a/jstests/sharding/out_fails_to_replace_sharded_collection.js
+++ b/jstests/sharding/out_fails_to_replace_sharded_collection.js
@@ -1,46 +1,46 @@
// Tests that an aggregate with an $out cannot output to a sharded collection, even if the
// collection becomes sharded during the aggregation.
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For 'assertErrorCode'.
-
- const st = new ShardingTest({shards: 2});
-
- const mongosDB = st.s.getDB("test");
- const sourceColl = mongosDB.source;
- const targetColl = mongosDB.target;
-
- assert.commandWorked(sourceColl.insert(Array.from({length: 10}, (_, i) => ({_id: i}))));
-
- // First simply test that the $out fails if the target collection is definitely sharded, meaning
- // it starts as sharded and remains sharded for the duration of the $out.
- st.shardColl(targetColl, {_id: 1}, false);
- assertErrorCode(sourceColl, [{$out: targetColl.getName()}], 28769);
-
- // Test that the "legacy" mode will not succeed when outputting to a sharded collection, even
- // for explain.
- let error = assert.throws(() => sourceColl.explain().aggregate([{$out: targetColl.getName()}]));
- assert.eq(error.code, 28769);
-
- // Then test that the $out fails if the collection becomes sharded between establishing the
- // cursor and performing the $out.
- targetColl.drop();
- const cursorResponse = assert.commandWorked(mongosDB.runCommand({
- aggregate: sourceColl.getName(),
- pipeline: [{$out: targetColl.getName()}],
- cursor: {batchSize: 0}
- }));
- st.shardColl(targetColl, {_id: 1}, false);
- error = assert.throws(() => new DBCommandCursor(mongosDB, cursorResponse).itcount());
- // On master, we check whether the output collection is sharded at parse time so this error code
- // is simply 'CommandFailed' because it is a failed rename going through the DBDirectClient. The
- // message should indicate that the rename failed. In a mixed-version environment we can end up
- // with the code 17017 because a v4.0 shard will assert the collection is unsharded before
- // performing any writes but after parse time, instead of relying on the rename to fail. Because
- // this test is run in a mixed-version passthrough we have to allow both. Once 4.2 becomes the
- // last stable version, this assertion can be tightened up to only expect CommandFailed.
- assert.contains(error.code, [ErrorCodes.CommandFailed, 17017]);
-
- st.stop();
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For 'assertErrorCode'.
+
+const st = new ShardingTest({shards: 2});
+
+const mongosDB = st.s.getDB("test");
+const sourceColl = mongosDB.source;
+const targetColl = mongosDB.target;
+
+assert.commandWorked(sourceColl.insert(Array.from({length: 10}, (_, i) => ({_id: i}))));
+
+// First simply test that the $out fails if the target collection is definitely sharded, meaning
+// it starts as sharded and remains sharded for the duration of the $out.
+st.shardColl(targetColl, {_id: 1}, false);
+assertErrorCode(sourceColl, [{$out: targetColl.getName()}], 28769);
+
+// Test that the "legacy" mode will not succeed when outputting to a sharded collection, even
+// for explain.
+let error = assert.throws(() => sourceColl.explain().aggregate([{$out: targetColl.getName()}]));
+assert.eq(error.code, 28769);
+
+// Then test that the $out fails if the collection becomes sharded between establishing the
+// cursor and performing the $out.
+targetColl.drop();
+const cursorResponse = assert.commandWorked(mongosDB.runCommand({
+ aggregate: sourceColl.getName(),
+ pipeline: [{$out: targetColl.getName()}],
+ cursor: {batchSize: 0}
+}));
+st.shardColl(targetColl, {_id: 1}, false);
+error = assert.throws(() => new DBCommandCursor(mongosDB, cursorResponse).itcount());
+// On master, we check whether the output collection is sharded at parse time so this error code
+// is simply 'CommandFailed' because it is a failed rename going through the DBDirectClient. The
+// message should indicate that the rename failed. In a mixed-version environment we can end up
+// with the code 17017 because a v4.0 shard will assert the collection is unsharded before
+// performing any writes but after parse time, instead of relying on the rename to fail. Because
+// this test is run in a mixed-version passthrough we have to allow both. Once 4.2 becomes the
+// last stable version, this assertion can be tightened up to only expect CommandFailed.
+assert.contains(error.code, [ErrorCodes.CommandFailed, 17017]);
+
+st.stop();
}());
diff --git a/jstests/sharding/parallel.js b/jstests/sharding/parallel.js
index 04217dcc390..c02e708fd53 100644
--- a/jstests/sharding/parallel.js
+++ b/jstests/sharding/parallel.js
@@ -1,58 +1,55 @@
// This test fails when run with authentication because benchRun with auth is broken: SERVER-6388
(function() {
- "use strict";
-
- var numShards = 3;
- var s = new ShardingTest({name: "parallel", shards: numShards, mongos: 2});
-
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-
- var db = s.getDB("test");
-
- var N = 10000;
- var shards = [s.shard0.shardName, s.shard1.shardName, s.shard2.shardName];
-
- for (var i = 0; i < N; i += (N / 10)) {
- s.adminCommand({split: "test.foo", middle: {_id: i}});
- s.s.getDB('admin').runCommand({
- moveChunk: "test.foo",
- find: {_id: i},
- to: shards[Math.floor(Math.random() * numShards)]
- });
- }
-
- s.startBalancer();
-
- var bulk = db.foo.initializeUnorderedBulkOp();
- for (i = 0; i < N; i++)
- bulk.insert({_id: i});
- assert.writeOK(bulk.execute());
-
- var doCommand = function(dbname, cmd) {
- x = benchRun({
- ops: [{op: "findOne", ns: dbname + ".$cmd", query: cmd}],
- host: db.getMongo().host,
- parallel: 2,
- seconds: 2
- });
- printjson(x);
- x = benchRun({
- ops: [{op: "findOne", ns: dbname + ".$cmd", query: cmd}],
- host: s._mongos[1].host,
- parallel: 2,
- seconds: 2
- });
- printjson(x);
- };
-
- doCommand("test", {dbstats: 1});
- doCommand("config", {dbstats: 1});
-
- var x = s.getDB("config").stats();
- assert(x.ok, tojson(x));
+"use strict";
+
+var numShards = 3;
+var s = new ShardingTest({name: "parallel", shards: numShards, mongos: 2});
+
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+
+var db = s.getDB("test");
+
+var N = 10000;
+var shards = [s.shard0.shardName, s.shard1.shardName, s.shard2.shardName];
+
+for (var i = 0; i < N; i += (N / 10)) {
+ s.adminCommand({split: "test.foo", middle: {_id: i}});
+ s.s.getDB('admin').runCommand(
+ {moveChunk: "test.foo", find: {_id: i}, to: shards[Math.floor(Math.random() * numShards)]});
+}
+
+s.startBalancer();
+
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (i = 0; i < N; i++)
+ bulk.insert({_id: i});
+assert.writeOK(bulk.execute());
+
+var doCommand = function(dbname, cmd) {
+ x = benchRun({
+ ops: [{op: "findOne", ns: dbname + ".$cmd", query: cmd}],
+ host: db.getMongo().host,
+ parallel: 2,
+ seconds: 2
+ });
+ printjson(x);
+ x = benchRun({
+ ops: [{op: "findOne", ns: dbname + ".$cmd", query: cmd}],
+ host: s._mongos[1].host,
+ parallel: 2,
+ seconds: 2
+ });
printjson(x);
+};
+
+doCommand("test", {dbstats: 1});
+doCommand("config", {dbstats: 1});
+
+var x = s.getDB("config").stats();
+assert(x.ok, tojson(x));
+printjson(x);
- s.stop();
+s.stop();
}());
diff --git a/jstests/sharding/pending_chunk.js b/jstests/sharding/pending_chunk.js
index fb8730b6ab4..06f9a2afec0 100644
--- a/jstests/sharding/pending_chunk.js
+++ b/jstests/sharding/pending_chunk.js
@@ -3,81 +3,80 @@
//
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 2, mongos: 2, other: {separateConfig: true}});
+var st = new ShardingTest({shards: 2, mongos: 2, other: {separateConfig: true}});
- var mongos = st.s0;
- var admin = mongos.getDB('admin');
- var coll = mongos.getCollection('foo.bar');
- var ns = coll.getFullName();
- var dbName = coll.getDB().getName();
+var mongos = st.s0;
+var admin = mongos.getDB('admin');
+var coll = mongos.getCollection('foo.bar');
+var ns = coll.getFullName();
+var dbName = coll.getDB().getName();
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- printjson(admin.runCommand({movePrimary: dbName, to: st.shard0.shardName}));
- assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+printjson(admin.runCommand({movePrimary: dbName, to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
- // Turn off best-effort recipient metadata refresh post-migration commit on both shards because
- // it would clean up the pending chunks on migration recipients.
- assert.commandWorked(st.shard0.getDB('admin').runCommand(
- {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'}));
- assert.commandWorked(st.shard1.getDB('admin').runCommand(
- {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'}));
+// Turn off best-effort recipient metadata refresh post-migration commit on both shards because
+// it would clean up the pending chunks on migration recipients.
+assert.commandWorked(st.shard0.getDB('admin').runCommand(
+ {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'}));
+assert.commandWorked(st.shard1.getDB('admin').runCommand(
+ {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'}));
- jsTest.log('Moving some chunks to shard1...');
+jsTest.log('Moving some chunks to shard1...');
- assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 1}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 1}}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true}));
- function getMetadata(shard) {
- var admin = shard.getDB('admin'),
- metadata = admin.runCommand({getShardVersion: ns, fullMetadata: true}).metadata;
+function getMetadata(shard) {
+ var admin = shard.getDB('admin'),
+ metadata = admin.runCommand({getShardVersion: ns, fullMetadata: true}).metadata;
- jsTest.log('Got metadata: ' + tojson(metadata));
- return metadata;
- }
+ jsTest.log('Got metadata: ' + tojson(metadata));
+ return metadata;
+}
- var metadata = getMetadata(st.shard1);
- assert.eq(metadata.pending[0][0]._id, 1);
- assert.eq(metadata.pending[0][1]._id, MaxKey);
+var metadata = getMetadata(st.shard1);
+assert.eq(metadata.pending[0][0]._id, 1);
+assert.eq(metadata.pending[0][1]._id, MaxKey);
- jsTest.log('Moving some chunks back to shard0 after empty...');
+jsTest.log('Moving some chunks back to shard0 after empty...');
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: -1}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: -1}, to: st.shard1.shardName, _waitForDelete: true}));
- metadata = getMetadata(st.shard0);
- assert.eq(metadata.shardVersion.t, 0);
- assert.neq(metadata.collVersion.t, 0);
- assert.eq(metadata.pending.length, 0);
+metadata = getMetadata(st.shard0);
+assert.eq(metadata.shardVersion.t, 0);
+assert.neq(metadata.collVersion.t, 0);
+assert.eq(metadata.pending.length, 0);
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
- metadata = getMetadata(st.shard0);
- assert.eq(metadata.shardVersion.t, 0);
- assert.neq(metadata.collVersion.t, 0);
- assert.eq(metadata.pending[0][0]._id, 1);
- assert.eq(metadata.pending[0][1]._id, MaxKey);
+metadata = getMetadata(st.shard0);
+assert.eq(metadata.shardVersion.t, 0);
+assert.neq(metadata.collVersion.t, 0);
+assert.eq(metadata.pending[0][0]._id, 1);
+assert.eq(metadata.pending[0][1]._id, MaxKey);
- // The pending chunk should be promoted to a real chunk when shard0 reloads
- // its config.
- jsTest.log('Checking that pending chunk is promoted on reload...');
+// The pending chunk should be promoted to a real chunk when shard0 reloads
+// its config.
+jsTest.log('Checking that pending chunk is promoted on reload...');
- assert.eq(null, coll.findOne({_id: 1}));
+assert.eq(null, coll.findOne({_id: 1}));
- metadata = getMetadata(st.shard0);
- assert.neq(metadata.shardVersion.t, 0);
- assert.neq(metadata.collVersion.t, 0);
- assert.eq(metadata.chunks[0][0]._id, 1);
- assert.eq(metadata.chunks[0][1]._id, MaxKey);
+metadata = getMetadata(st.shard0);
+assert.neq(metadata.shardVersion.t, 0);
+assert.neq(metadata.collVersion.t, 0);
+assert.eq(metadata.chunks[0][0]._id, 1);
+assert.eq(metadata.chunks[0][1]._id, MaxKey);
- st.printShardingStatus();
-
- st.stop();
+st.printShardingStatus();
+st.stop();
})();
diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js
index 6dd3b30344f..b1a730db297 100644
--- a/jstests/sharding/prefix_shard_key.js
+++ b/jstests/sharding/prefix_shard_key.js
@@ -7,194 +7,193 @@
// Insert docs with same val for 'skey' but different vals for 'extra'.
// Move chunks around and check that [min,max) chunk boundaries are properly obeyed.
(function() {
- 'use strict';
-
- // TODO: SERVER-33601 remove shardAsReplicaSet: false
- var s = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
-
- var db = s.getDB("test");
- var admin = s.getDB("admin");
- var config = s.getDB("config");
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- //******************Part 1********************
-
- var coll = db.foo;
-
- var longStr = 'a';
- while (longStr.length < 1024 * 128) {
- longStr += longStr;
+'use strict';
+
+// TODO: SERVER-33601 remove shardAsReplicaSet: false
+var s = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
+
+var db = s.getDB("test");
+var admin = s.getDB("admin");
+var config = s.getDB("config");
+
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+//******************Part 1********************
+
+var coll = db.foo;
+
+var longStr = 'a';
+while (longStr.length < 1024 * 128) {
+ longStr += longStr;
+}
+var bulk = coll.initializeUnorderedBulkOp();
+for (i = 0; i < 100; i++) {
+ bulk.insert({num: i, str: longStr});
+ bulk.insert({num: i + 100, x: i, str: longStr});
+}
+assert.writeOK(bulk.execute());
+
+// no usable index yet, should throw
+assert.throws(function() {
+ s.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}});
+});
+
+// create usable index
+assert.commandWorked(coll.ensureIndex({num: 1, x: 1}));
+
+// usable index, but doc with empty 'num' value, so still should throw
+assert.writeOK(coll.insert({x: -5}));
+assert.throws(function() {
+ s.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}});
+});
+
+// remove the bad doc. now should finally succeed
+assert.writeOK(coll.remove({x: -5}));
+assert.commandWorked(s.s0.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}}));
+
+// make sure extra index is not created
+assert.eq(2, coll.getIndexes().length);
+
+// make sure balancing happens
+s.awaitBalance(coll.getName(), db.getName());
+
+// Make sure our initial balance cleanup doesn't interfere with later migrations.
+assert.soon(function() {
+ print("Waiting for migration cleanup to occur...");
+ return coll.count() == coll.find().itcount();
+});
+
+s.stopBalancer();
+
+// test splitting
+assert.commandWorked(s.s0.adminCommand({split: coll.getFullName(), middle: {num: 50}}));
+
+// test moving
+assert.commandWorked(s.s0.adminCommand({
+ movechunk: coll.getFullName(),
+ find: {num: 20},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+}));
+
+//******************Part 2********************
+
+// Migrations and splits will still work on a sharded collection that only has multi key
+// index.
+db.user.ensureIndex({num: 1, x: 1});
+db.adminCommand({shardCollection: 'test.user', key: {num: 1}});
+
+var indexCount = db.user.getIndexes().length;
+assert.eq(2,
+ indexCount, // indexes for _id_ and num_1_x_1
+ 'index count not expected: ' + tojson(db.user.getIndexes()));
+
+var array = [];
+for (var item = 0; item < 50; item++) {
+ array.push(item);
+}
+
+for (var docs = 0; docs < 1000; docs++) {
+ db.user.insert({num: docs, x: array});
+}
+
+assert.eq(1000, db.user.find().itcount());
+
+assert.commandWorked(admin.runCommand({
+ movechunk: 'test.user',
+ find: {num: 70},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+}));
+
+var expectedShardCount = {shard0000: 0, shard0001: 0};
+config.chunks.find({ns: 'test.user'}).forEach(function(chunkDoc) {
+ var min = chunkDoc.min.num;
+ var max = chunkDoc.max.num;
+
+ if (min < 0 || min == MinKey) {
+ min = 0;
}
- var bulk = coll.initializeUnorderedBulkOp();
- for (i = 0; i < 100; i++) {
- bulk.insert({num: i, str: longStr});
- bulk.insert({num: i + 100, x: i, str: longStr});
- }
- assert.writeOK(bulk.execute());
-
- // no usable index yet, should throw
- assert.throws(function() {
- s.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}});
- });
-
- // create usable index
- assert.commandWorked(coll.ensureIndex({num: 1, x: 1}));
-
- // usable index, but doc with empty 'num' value, so still should throw
- assert.writeOK(coll.insert({x: -5}));
- assert.throws(function() {
- s.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}});
- });
-
- // remove the bad doc. now should finally succeed
- assert.writeOK(coll.remove({x: -5}));
- assert.commandWorked(s.s0.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}}));
-
- // make sure extra index is not created
- assert.eq(2, coll.getIndexes().length);
-
- // make sure balancing happens
- s.awaitBalance(coll.getName(), db.getName());
-
- // Make sure our initial balance cleanup doesn't interfere with later migrations.
- assert.soon(function() {
- print("Waiting for migration cleanup to occur...");
- return coll.count() == coll.find().itcount();
- });
-
- s.stopBalancer();
-
- // test splitting
- assert.commandWorked(s.s0.adminCommand({split: coll.getFullName(), middle: {num: 50}}));
-
- // test moving
- assert.commandWorked(s.s0.adminCommand({
- movechunk: coll.getFullName(),
- find: {num: 20},
- to: s.getOther(s.getPrimaryShard("test")).name,
- _waitForDelete: true
- }));
- //******************Part 2********************
-
- // Migrations and splits will still work on a sharded collection that only has multi key
- // index.
- db.user.ensureIndex({num: 1, x: 1});
- db.adminCommand({shardCollection: 'test.user', key: {num: 1}});
-
- var indexCount = db.user.getIndexes().length;
- assert.eq(2,
- indexCount, // indexes for _id_ and num_1_x_1
- 'index count not expected: ' + tojson(db.user.getIndexes()));
-
- var array = [];
- for (var item = 0; item < 50; item++) {
- array.push(item);
+ if (max > 1000 || max == MaxKey) {
+ max = 1000;
}
- for (var docs = 0; docs < 1000; docs++) {
- db.user.insert({num: docs, x: array});
+ if (max > 0) {
+ expectedShardCount[chunkDoc.shard] += (max - min);
}
+});
- assert.eq(1000, db.user.find().itcount());
-
- assert.commandWorked(admin.runCommand({
- movechunk: 'test.user',
- find: {num: 70},
- to: s.getOther(s.getPrimaryShard("test")).name,
- _waitForDelete: true
- }));
-
- var expectedShardCount = {shard0000: 0, shard0001: 0};
- config.chunks.find({ns: 'test.user'}).forEach(function(chunkDoc) {
- var min = chunkDoc.min.num;
- var max = chunkDoc.max.num;
-
- if (min < 0 || min == MinKey) {
- min = 0;
- }
-
- if (max > 1000 || max == MaxKey) {
- max = 1000;
- }
-
- if (max > 0) {
- expectedShardCount[chunkDoc.shard] += (max - min);
- }
- });
+assert.eq(expectedShardCount['shard0000'], s.shard0.getDB('test').user.find().count());
+assert.eq(expectedShardCount['shard0001'], s.shard1.getDB('test').user.find().count());
- assert.eq(expectedShardCount['shard0000'], s.shard0.getDB('test').user.find().count());
- assert.eq(expectedShardCount['shard0001'], s.shard1.getDB('test').user.find().count());
+assert.commandWorked(admin.runCommand({split: 'test.user', middle: {num: 70}}));
- assert.commandWorked(admin.runCommand({split: 'test.user', middle: {num: 70}}));
+assert.eq(expectedShardCount['shard0000'], s.shard0.getDB('test').user.find().count());
+assert.eq(expectedShardCount['shard0001'], s.shard1.getDB('test').user.find().count());
- assert.eq(expectedShardCount['shard0000'], s.shard0.getDB('test').user.find().count());
- assert.eq(expectedShardCount['shard0001'], s.shard1.getDB('test').user.find().count());
+//******************Part 3********************
- //******************Part 3********************
+// Check chunk boundaries obeyed when using prefix shard key.
+// This test repeats with shard key as the prefix of different longer indices.
- // Check chunk boundaries obeyed when using prefix shard key.
- // This test repeats with shard key as the prefix of different longer indices.
-
- for (i = 0; i < 3; i++) {
- // setup new collection on shard0
- var coll2 = db.foo2;
- coll2.drop();
- if (s.getPrimaryShardIdForDatabase(coll2.getDB()) != s.shard0.shardName) {
- var moveRes =
- admin.runCommand({movePrimary: coll2.getDB() + "", to: s.shard0.shardName});
- assert.eq(moveRes.ok, 1, "primary not moved correctly");
- }
+for (i = 0; i < 3; i++) {
+ // setup new collection on shard0
+ var coll2 = db.foo2;
+ coll2.drop();
+ if (s.getPrimaryShardIdForDatabase(coll2.getDB()) != s.shard0.shardName) {
+ var moveRes = admin.runCommand({movePrimary: coll2.getDB() + "", to: s.shard0.shardName});
+ assert.eq(moveRes.ok, 1, "primary not moved correctly");
+ }
- // declare a longer index
- if (i == 0) {
- assert.commandWorked(coll2.ensureIndex({skey: 1, extra: 1}));
- } else if (i == 1) {
- assert.commandWorked(coll2.ensureIndex({skey: 1, extra: -1}));
- } else if (i == 2) {
- assert.commandWorked(coll2.ensureIndex({skey: 1, extra: 1, superfluous: -1}));
- }
+ // declare a longer index
+ if (i == 0) {
+ assert.commandWorked(coll2.ensureIndex({skey: 1, extra: 1}));
+ } else if (i == 1) {
+ assert.commandWorked(coll2.ensureIndex({skey: 1, extra: -1}));
+ } else if (i == 2) {
+ assert.commandWorked(coll2.ensureIndex({skey: 1, extra: 1, superfluous: -1}));
+ }
- // then shard collection on prefix
- var shardRes = admin.runCommand({shardCollection: coll2 + "", key: {skey: 1}});
- assert.eq(shardRes.ok, 1, "collection not sharded");
+ // then shard collection on prefix
+ var shardRes = admin.runCommand({shardCollection: coll2 + "", key: {skey: 1}});
+ assert.eq(shardRes.ok, 1, "collection not sharded");
- // insert docs with same value for skey
- bulk = coll2.initializeUnorderedBulkOp();
- for (var i = 0; i < 5; i++) {
- for (var j = 0; j < 5; j++) {
- bulk.insert({skey: 0, extra: i, superfluous: j});
- }
+ // insert docs with same value for skey
+ bulk = coll2.initializeUnorderedBulkOp();
+ for (var i = 0; i < 5; i++) {
+ for (var j = 0; j < 5; j++) {
+ bulk.insert({skey: 0, extra: i, superfluous: j});
}
- assert.writeOK(bulk.execute());
+ }
+ assert.writeOK(bulk.execute());
- // split on that key, and check it makes 2 chunks
- var splitRes = admin.runCommand({split: coll2 + "", middle: {skey: 0}});
- assert.eq(splitRes.ok, 1, "split didn't work");
- assert.eq(config.chunks.find({ns: coll2.getFullName()}).count(), 2);
+ // split on that key, and check it makes 2 chunks
+ var splitRes = admin.runCommand({split: coll2 + "", middle: {skey: 0}});
+ assert.eq(splitRes.ok, 1, "split didn't work");
+ assert.eq(config.chunks.find({ns: coll2.getFullName()}).count(), 2);
- // movechunk should move ALL docs since they have same value for skey
- moveRes = admin.runCommand(
- {moveChunk: coll2 + "", find: {skey: 0}, to: s.shard1.shardName, _waitForDelete: true});
- assert.eq(moveRes.ok, 1, "movechunk didn't work");
+ // movechunk should move ALL docs since they have same value for skey
+ moveRes = admin.runCommand(
+ {moveChunk: coll2 + "", find: {skey: 0}, to: s.shard1.shardName, _waitForDelete: true});
+ assert.eq(moveRes.ok, 1, "movechunk didn't work");
- // Make sure our migration eventually goes through before testing individual shards
- assert.soon(function() {
- print("Waiting for migration cleanup to occur...");
- return coll2.count() == coll2.find().itcount();
- });
+ // Make sure our migration eventually goes through before testing individual shards
+ assert.soon(function() {
+ print("Waiting for migration cleanup to occur...");
+ return coll2.count() == coll2.find().itcount();
+ });
- // check no orphaned docs on the shards
- assert.eq(0, s.shard0.getCollection(coll2 + "").find().itcount());
- assert.eq(25, s.shard1.getCollection(coll2 + "").find().itcount());
+ // check no orphaned docs on the shards
+ assert.eq(0, s.shard0.getCollection(coll2 + "").find().itcount());
+ assert.eq(25, s.shard1.getCollection(coll2 + "").find().itcount());
- // and check total
- assert.eq(25, coll2.find().itcount(), "bad total number of docs after move");
+ // and check total
+ assert.eq(25, coll2.find().itcount(), "bad total number of docs after move");
- s.printShardingStatus();
- }
+ s.printShardingStatus();
+}
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/prepare_transaction_then_migrate.js b/jstests/sharding/prepare_transaction_then_migrate.js
index 038ebfb8463..034259d02be 100644
--- a/jstests/sharding/prepare_transaction_then_migrate.js
+++ b/jstests/sharding/prepare_transaction_then_migrate.js
@@ -7,65 +7,67 @@
*/
(function() {
- "use strict";
- load('jstests/libs/chunk_manipulation_util.js');
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
+"use strict";
+load('jstests/libs/chunk_manipulation_util.js');
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
- const dbName = "test";
- const collName = "user";
+const dbName = "test";
+const collName = "user";
- const staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+const staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
- const st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}});
- st.adminCommand({enableSharding: 'test'});
- st.ensurePrimaryShard('test', st.shard0.shardName);
- st.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
+const st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}});
+st.adminCommand({enableSharding: 'test'});
+st.ensurePrimaryShard('test', st.shard0.shardName);
+st.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
- const session = st.s.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = st.s.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- assert.commandWorked(sessionColl.insert({_id: 1}));
+assert.commandWorked(sessionColl.insert({_id: 1}));
- const lsid = {id: UUID()};
- const txnNumber = 0;
- const stmtId = 0;
+const lsid = {
+ id: UUID()
+};
+const txnNumber = 0;
+const stmtId = 0;
- assert.commandWorked(st.s0.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: 2}, {_id: 5}, {_id: 15}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- startTransaction: true,
- autocommit: false,
- }));
+assert.commandWorked(st.s0.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: 2}, {_id: 5}, {_id: 15}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ startTransaction: true,
+ autocommit: false,
+}));
- const res = assert.commandWorked(st.shard0.getDB(dbName).adminCommand({
- prepareTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- }));
+const res = assert.commandWorked(st.shard0.getDB(dbName).adminCommand({
+ prepareTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+}));
- const joinMoveChunk = moveChunkParallel(
- staticMongod, st.s.host, {_id: 1}, null, 'test.user', st.shard1.shardName);
+const joinMoveChunk =
+ moveChunkParallel(staticMongod, st.s.host, {_id: 1}, null, 'test.user', st.shard1.shardName);
- // Wait for catchup to verify that the migration has exited the clone phase.
- waitForMigrateStep(st.shard1, migrateStepNames.catchup);
+// Wait for catchup to verify that the migration has exited the clone phase.
+waitForMigrateStep(st.shard1, migrateStepNames.catchup);
- assert.commandWorked(st.shard0.getDB(dbName).adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- commitTimestamp: res.prepareTimestamp,
- }));
+assert.commandWorked(st.shard0.getDB(dbName).adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ commitTimestamp: res.prepareTimestamp,
+}));
- joinMoveChunk();
+joinMoveChunk();
- assert.eq(sessionColl.find({_id: 2}).count(), 1);
+assert.eq(sessionColl.find({_id: 2}).count(), 1);
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js
index ec71924fc53..288d6abe694 100644
--- a/jstests/sharding/presplit.js
+++ b/jstests/sharding/presplit.js
@@ -1,40 +1,39 @@
(function() {
- var s = new ShardingTest({name: "presplit", shards: 2, mongos: 1, other: {chunkSize: 1}});
-
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- // Insert enough data in 'test.foo' to fill several chunks, if it was sharded.
- bigString = "";
- while (bigString.length < 10000) {
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
- }
-
- db = s.getDB("test");
- inserted = 0;
- num = 0;
- var bulk = db.foo.initializeUnorderedBulkOp();
- while (inserted < (20 * 1024 * 1024)) {
- bulk.insert({_id: num++, s: bigString});
- inserted += bigString.length;
- }
- assert.writeOK(bulk.execute());
-
- // Make sure that there's only one chunk holding all the data.
- s.printChunks();
- primary = s.getPrimaryShard("test").getDB("test");
- assert.eq(0, s.config.chunks.count({"ns": "test.foo"}), "single chunk assertion");
- assert.eq(num, primary.foo.count());
-
- s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-
- // Make sure the collection's original chunk got split
- s.printChunks();
- assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "many chunks assertion");
- assert.eq(num, primary.foo.count());
-
- s.printChangeLog();
- s.stop();
-
+var s = new ShardingTest({name: "presplit", shards: 2, mongos: 1, other: {chunkSize: 1}});
+
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+// Insert enough data in 'test.foo' to fill several chunks, if it was sharded.
+bigString = "";
+while (bigString.length < 10000) {
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+}
+
+db = s.getDB("test");
+inserted = 0;
+num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+while (inserted < (20 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+}
+assert.writeOK(bulk.execute());
+
+// Make sure that there's only one chunk holding all the data.
+s.printChunks();
+primary = s.getPrimaryShard("test").getDB("test");
+assert.eq(0, s.config.chunks.count({"ns": "test.foo"}), "single chunk assertion");
+assert.eq(num, primary.foo.count());
+
+s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+
+// Make sure the collection's original chunk got split
+s.printChunks();
+assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "many chunks assertion");
+assert.eq(num, primary.foo.count());
+
+s.printChangeLog();
+s.stop();
})();
diff --git a/jstests/sharding/primary_config_server_blackholed_from_mongos.js b/jstests/sharding/primary_config_server_blackholed_from_mongos.js
index a7cc266c1a2..674dc1f9235 100644
--- a/jstests/sharding/primary_config_server_blackholed_from_mongos.js
+++ b/jstests/sharding/primary_config_server_blackholed_from_mongos.js
@@ -1,75 +1,75 @@
// Ensures that if the primary config server is blackholed from the point of view of mongos, CRUD
// and read-only config operations continue to work.
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 1, useBridge: true});
+var st = new ShardingTest({shards: 2, mongos: 1, useBridge: true});
- var testDB = st.s.getDB('BlackHoleDB');
- var configDB = st.s.getDB('config');
+var testDB = st.s.getDB('BlackHoleDB');
+var configDB = st.s.getDB('config');
- assert.commandWorked(testDB.adminCommand({enableSharding: 'BlackHoleDB'}));
- assert.commandWorked(
- testDB.adminCommand({shardCollection: testDB.ShardedColl.getFullName(), key: {_id: 1}}));
+assert.commandWorked(testDB.adminCommand({enableSharding: 'BlackHoleDB'}));
+assert.commandWorked(
+ testDB.adminCommand({shardCollection: testDB.ShardedColl.getFullName(), key: {_id: 1}}));
- var bulk = testDB.ShardedColl.initializeUnorderedBulkOp();
- for (var i = 0; i < 1000; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+var bulk = testDB.ShardedColl.initializeUnorderedBulkOp();
+for (var i = 0; i < 1000; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
- const configPrimary = st.configRS.getPrimary();
- const admin = configPrimary.getDB("admin");
+const configPrimary = st.configRS.getPrimary();
+const admin = configPrimary.getDB("admin");
- // Set the priority and votes to 0 for secondary config servers so that in the case
- // of an election, they cannot step up. If a different node were to step up, the
- // config server would no longer be blackholed from mongos.
- let conf = admin.runCommand({replSetGetConfig: 1}).config;
- for (let i = 0; i < conf.members.length; i++) {
- if (conf.members[i].host !== configPrimary.host) {
- conf.members[i].votes = 0;
- conf.members[i].priority = 0;
- }
+// Set the priority and votes to 0 for secondary config servers so that in the case
+// of an election, they cannot step up. If a different node were to step up, the
+// config server would no longer be blackholed from mongos.
+let conf = admin.runCommand({replSetGetConfig: 1}).config;
+for (let i = 0; i < conf.members.length; i++) {
+ if (conf.members[i].host !== configPrimary.host) {
+ conf.members[i].votes = 0;
+ conf.members[i].priority = 0;
}
- conf.version++;
- const response = admin.runCommand({replSetReconfig: conf});
- assert.commandWorked(response);
+}
+conf.version++;
+const response = admin.runCommand({replSetReconfig: conf});
+assert.commandWorked(response);
- jsTest.log('Partitioning the config server primary from the mongos');
- configPrimary.discardMessagesFrom(st.s, 1.0);
- st.s.discardMessagesFrom(configPrimary, 1.0);
+jsTest.log('Partitioning the config server primary from the mongos');
+configPrimary.discardMessagesFrom(st.s, 1.0);
+st.s.discardMessagesFrom(configPrimary, 1.0);
- assert.commandWorked(testDB.adminCommand({flushRouterConfig: 1}));
+assert.commandWorked(testDB.adminCommand({flushRouterConfig: 1}));
- // This should fail, because the primary is not available
- jsTest.log('Doing write operation on a new database and collection');
- assert.writeError(st.s.getDB('NonExistentDB')
- .TestColl.insert({_id: 0, value: 'This value will never be inserted'},
- {maxTimeMS: 15000}));
+// This should fail, because the primary is not available
+jsTest.log('Doing write operation on a new database and collection');
+assert.writeError(
+ st.s.getDB('NonExistentDB')
+ .TestColl.insert({_id: 0, value: 'This value will never be inserted'}, {maxTimeMS: 15000}));
- jsTest.log('Doing CRUD operations on the sharded collection');
- assert.eq(1000, testDB.ShardedColl.find().itcount());
- assert.writeOK(testDB.ShardedColl.insert({_id: 1000}));
- assert.eq(1001, testDB.ShardedColl.find().count());
+jsTest.log('Doing CRUD operations on the sharded collection');
+assert.eq(1000, testDB.ShardedColl.find().itcount());
+assert.writeOK(testDB.ShardedColl.insert({_id: 1000}));
+assert.eq(1001, testDB.ShardedColl.find().count());
- jsTest.log('Doing read operations on a config server collection');
+jsTest.log('Doing read operations on a config server collection');
- // Should fail due to primary read preference
- assert.throws(function() {
- configDB.chunks.find().itcount();
- });
- assert.throws(function() {
- configDB.chunks.find().count();
- });
- assert.throws(function() {
- configDB.chunks.aggregate().itcount();
- });
+// Should fail due to primary read preference
+assert.throws(function() {
+ configDB.chunks.find().itcount();
+});
+assert.throws(function() {
+ configDB.chunks.find().count();
+});
+assert.throws(function() {
+ configDB.chunks.aggregate().itcount();
+});
- // With secondary read pref config server reads should work
- st.s.setReadPref('secondary');
- assert.lt(0, configDB.chunks.find().itcount());
- assert.lt(0, configDB.chunks.find().count());
- assert.lt(0, configDB.chunks.aggregate().itcount());
+// With secondary read pref config server reads should work
+st.s.setReadPref('secondary');
+assert.lt(0, configDB.chunks.find().itcount());
+assert.lt(0, configDB.chunks.find().count());
+assert.lt(0, configDB.chunks.aggregate().itcount());
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/printShardingStatus.js b/jstests/sharding/printShardingStatus.js
index 3b1548aabab..18bc8bdea6e 100644
--- a/jstests/sharding/printShardingStatus.js
+++ b/jstests/sharding/printShardingStatus.js
@@ -3,251 +3,248 @@
// headings and the names of sharded collections and their shard keys.
(function() {
- 'use strict';
+'use strict';
- const MONGOS_COUNT = 2;
+const MONGOS_COUNT = 2;
- var st = new ShardingTest({shards: 1, mongos: MONGOS_COUNT, config: 1});
+var st = new ShardingTest({shards: 1, mongos: MONGOS_COUNT, config: 1});
- var standalone = MongoRunner.runMongod();
+var standalone = MongoRunner.runMongod();
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
- // Wait for the background thread from the mongos to insert their entries before beginning
- // the tests.
- assert.soon(function() {
- return MONGOS_COUNT == mongos.getDB('config').mongos.count();
- });
-
- function grabStatusOutput(configdb, verbose) {
- var res = print.captureAllOutput(function() {
- return printShardingStatus(configdb, verbose);
- });
- var output = res.output.join("\n");
- jsTestLog(output);
- return output;
- }
+// Wait for the background thread from the mongos to insert their entries before beginning
+// the tests.
+assert.soon(function() {
+ return MONGOS_COUNT == mongos.getDB('config').mongos.count();
+});
- function assertPresentInOutput(output, content, what) {
- assert(output.includes(content),
- what + " \"" + content + "\" NOT present in output of " +
- "printShardingStatus() (but it should be)");
+function grabStatusOutput(configdb, verbose) {
+ var res = print.captureAllOutput(function() {
+ return printShardingStatus(configdb, verbose);
+ });
+ var output = res.output.join("\n");
+ jsTestLog(output);
+ return output;
+}
+
+function assertPresentInOutput(output, content, what) {
+ assert(output.includes(content),
+ what + " \"" + content + "\" NOT present in output of " +
+ "printShardingStatus() (but it should be)");
+}
+
+function assertNotPresentInOutput(output, content, what) {
+ assert(!output.includes(content),
+ what + " \"" + content + "\" IS present in output of " +
+ "printShardingStatus() (but it should not be)");
+}
+
+////////////////////////
+// Basic tests
+////////////////////////
+
+var dbName = "thisIsTheDatabase";
+var collName = "thisIsTheCollection";
+var shardKeyName = "thisIsTheShardKey";
+var nsName = dbName + "." + collName;
+
+assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+var key = {};
+key[shardKeyName] = 1;
+assert.commandWorked(admin.runCommand({shardCollection: nsName, key: key}));
+
+function testBasic(output) {
+ assertPresentInOutput(output, "shards:", "section header");
+ assertPresentInOutput(output, "databases:", "section header");
+ assertPresentInOutput(output, "balancer:", "section header");
+ assertPresentInOutput(output, "active mongoses:", "section header");
+ assertNotPresentInOutput(output, "most recently active mongoses:", "section header");
+
+ assertPresentInOutput(output, dbName, "database");
+ assertPresentInOutput(output, collName, "collection");
+ assertPresentInOutput(output, shardKeyName, "shard key");
+}
+
+function testBasicNormalOnly(output) {
+ assertPresentInOutput(output, tojson(version) + " : 2\n", "active mongos version");
+}
+
+function testBasicVerboseOnly(output) {
+ assertPresentInOutput(output, '"mongoVersion" : ' + tojson(version), "active mongos version");
+ assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "active mongos hostname");
+ assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "active mongos hostname");
+}
+
+var buildinfo = assert.commandWorked(mongos.adminCommand("buildinfo"));
+var serverStatus1 = assert.commandWorked(mongos.adminCommand("serverStatus"));
+var serverStatus2 = assert.commandWorked(st.s1.adminCommand("serverStatus"));
+var version = buildinfo.version;
+var s1Host = serverStatus1.host;
+var s2Host = serverStatus2.host;
+
+// Normal, active mongoses
+var outputNormal = grabStatusOutput(st.config, false);
+testBasic(outputNormal);
+testBasicNormalOnly(outputNormal);
+
+var outputVerbose = grabStatusOutput(st.config, true);
+testBasic(outputVerbose);
+testBasicVerboseOnly(outputVerbose);
+
+// Take a copy of the config db, in order to test the harder-to-setup cases below.
+// Copy into a standalone to also test running printShardingStatus() against a config dump.
+var config = mongos.getDB("config");
+var configCopy = standalone.getDB("configCopy");
+config.getCollectionInfos().forEach(function(c) {
+ // Create collection with options.
+ assert.commandWorked(configCopy.createCollection(c.name, c.options));
+ // Clone the docs.
+ config.getCollection(c.name).find().hint({_id: 1}).forEach(function(d) {
+ assert.writeOK(configCopy.getCollection(c.name).insert(d));
+ });
+ // Build the indexes.
+ config.getCollection(c.name).getIndexes().forEach(function(i) {
+ var key = i.key;
+ delete i.key;
+ delete i.ns;
+ delete i.v;
+ assert.commandWorked(configCopy.getCollection(c.name).ensureIndex(key, i));
+ });
+});
+
+// Inactive mongoses
+// Make the first ping be older than now by 1 second more than the threshold
+// Make the second ping be older still by the same amount again
+var pingAdjustMs = 60000 + 1000;
+var then = new Date();
+then.setTime(then.getTime() - pingAdjustMs);
+configCopy.mongos.update({_id: s1Host}, {$set: {ping: then}});
+then.setTime(then.getTime() - pingAdjustMs);
+configCopy.mongos.update({_id: s2Host}, {$set: {ping: then}});
+
+var output = grabStatusOutput(configCopy, false);
+assertPresentInOutput(output, "most recently active mongoses:", "section header");
+assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
+
+var output = grabStatusOutput(configCopy, true);
+assertPresentInOutput(output, "most recently active mongoses:", "section header");
+assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "recent mongos hostname");
+assertNotPresentInOutput(output, '"_id" : ' + tojson(s2Host), "old mongos hostname");
+
+// Older mongoses
+configCopy.mongos.remove({_id: s1Host});
+
+var output = grabStatusOutput(configCopy, false);
+assertPresentInOutput(output, "most recently active mongoses:", "section header");
+assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
+
+var output = grabStatusOutput(configCopy, true);
+assertPresentInOutput(output, "most recently active mongoses:", "section header");
+assertNotPresentInOutput(output, '"_id" : ' + tojson(s1Host), "removed mongos hostname");
+assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "recent mongos hostname");
+
+// No mongoses at all
+configCopy.mongos.remove({});
+
+var output = grabStatusOutput(configCopy, false);
+assertPresentInOutput(output, "most recently active mongoses:\n none", "no mongoses");
+
+var output = grabStatusOutput(configCopy, true);
+assertPresentInOutput(
+ output, "most recently active mongoses:\n none", "no mongoses (verbose)");
+
+assert(mongos.getDB(dbName).dropDatabase());
+
+////////////////////////
+// Extended tests
+////////////////////////
+
+var testCollDetailsNum = 0;
+function testCollDetails(args) {
+ if (args === undefined || typeof (args) != "object") {
+ args = {};
}
- function assertNotPresentInOutput(output, content, what) {
- assert(!output.includes(content),
- what + " \"" + content + "\" IS present in output of " +
- "printShardingStatus() (but it should not be)");
- }
+ var getCollName = function(x) {
+ return "test.test" + x.zeroPad(4);
+ };
+ var collName = getCollName(testCollDetailsNum);
- ////////////////////////
- // Basic tests
- ////////////////////////
-
- var dbName = "thisIsTheDatabase";
- var collName = "thisIsTheCollection";
- var shardKeyName = "thisIsTheShardKey";
- var nsName = dbName + "." + collName;
-
- assert.commandWorked(admin.runCommand({enableSharding: dbName}));
- var key = {};
- key[shardKeyName] = 1;
- assert.commandWorked(admin.runCommand({shardCollection: nsName, key: key}));
-
- function testBasic(output) {
- assertPresentInOutput(output, "shards:", "section header");
- assertPresentInOutput(output, "databases:", "section header");
- assertPresentInOutput(output, "balancer:", "section header");
- assertPresentInOutput(output, "active mongoses:", "section header");
- assertNotPresentInOutput(output, "most recently active mongoses:", "section header");
-
- assertPresentInOutput(output, dbName, "database");
- assertPresentInOutput(output, collName, "collection");
- assertPresentInOutput(output, shardKeyName, "shard key");
+ var cmdObj = {shardCollection: collName, key: {_id: 1}};
+ if (args.unique) {
+ cmdObj.unique = true;
}
+ assert.commandWorked(admin.runCommand(cmdObj));
- function testBasicNormalOnly(output) {
- assertPresentInOutput(output, tojson(version) + " : 2\n", "active mongos version");
+ if (args.hasOwnProperty("unique")) {
+ assert.writeOK(mongos.getDB("config").collections.update({_id: collName},
+ {$set: {"unique": args.unique}}));
}
-
- function testBasicVerboseOnly(output) {
- assertPresentInOutput(
- output, '"mongoVersion" : ' + tojson(version), "active mongos version");
- assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "active mongos hostname");
- assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "active mongos hostname");
+ if (args.hasOwnProperty("noBalance")) {
+ assert.writeOK(mongos.getDB("config").collections.update(
+ {_id: collName}, {$set: {"noBalance": args.noBalance}}));
}
- var buildinfo = assert.commandWorked(mongos.adminCommand("buildinfo"));
- var serverStatus1 = assert.commandWorked(mongos.adminCommand("serverStatus"));
- var serverStatus2 = assert.commandWorked(st.s1.adminCommand("serverStatus"));
- var version = buildinfo.version;
- var s1Host = serverStatus1.host;
- var s2Host = serverStatus2.host;
-
- // Normal, active mongoses
- var outputNormal = grabStatusOutput(st.config, false);
- testBasic(outputNormal);
- testBasicNormalOnly(outputNormal);
-
- var outputVerbose = grabStatusOutput(st.config, true);
- testBasic(outputVerbose);
- testBasicVerboseOnly(outputVerbose);
-
- // Take a copy of the config db, in order to test the harder-to-setup cases below.
- // Copy into a standalone to also test running printShardingStatus() against a config dump.
- var config = mongos.getDB("config");
- var configCopy = standalone.getDB("configCopy");
- config.getCollectionInfos().forEach(function(c) {
- // Create collection with options.
- assert.commandWorked(configCopy.createCollection(c.name, c.options));
- // Clone the docs.
- config.getCollection(c.name).find().hint({_id: 1}).forEach(function(d) {
- assert.writeOK(configCopy.getCollection(c.name).insert(d));
- });
- // Build the indexes.
- config.getCollection(c.name).getIndexes().forEach(function(i) {
- var key = i.key;
- delete i.key;
- delete i.ns;
- delete i.v;
- assert.commandWorked(configCopy.getCollection(c.name).ensureIndex(key, i));
- });
- });
-
- // Inactive mongoses
- // Make the first ping be older than now by 1 second more than the threshold
- // Make the second ping be older still by the same amount again
- var pingAdjustMs = 60000 + 1000;
- var then = new Date();
- then.setTime(then.getTime() - pingAdjustMs);
- configCopy.mongos.update({_id: s1Host}, {$set: {ping: then}});
- then.setTime(then.getTime() - pingAdjustMs);
- configCopy.mongos.update({_id: s2Host}, {$set: {ping: then}});
-
- var output = grabStatusOutput(configCopy, false);
- assertPresentInOutput(output, "most recently active mongoses:", "section header");
- assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
-
- var output = grabStatusOutput(configCopy, true);
- assertPresentInOutput(output, "most recently active mongoses:", "section header");
- assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "recent mongos hostname");
- assertNotPresentInOutput(output, '"_id" : ' + tojson(s2Host), "old mongos hostname");
+ var output = grabStatusOutput(st.config);
- // Older mongoses
- configCopy.mongos.remove({_id: s1Host});
+ assertPresentInOutput(output, collName, "collection");
- var output = grabStatusOutput(configCopy, false);
- assertPresentInOutput(output, "most recently active mongoses:", "section header");
- assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
-
- var output = grabStatusOutput(configCopy, true);
- assertPresentInOutput(output, "most recently active mongoses:", "section header");
- assertNotPresentInOutput(output, '"_id" : ' + tojson(s1Host), "removed mongos hostname");
- assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "recent mongos hostname");
+ // If any of the previous collection names are present, then their optional indicators
+ // might also be present. This might taint the results when we go searching through
+ // the output.
+ // This also means that earlier collNames can't be a prefix of later collNames.
+ for (var i = 0; i < testCollDetailsNum; i++) {
+ assertNotPresentInOutput(output, getCollName(i), "previous collection");
+ }
- // No mongoses at all
- configCopy.mongos.remove({});
+ assertPresentInOutput(output, "unique: " + (!!args.unique), "unique shard key indicator");
- var output = grabStatusOutput(configCopy, false);
- assertPresentInOutput(output, "most recently active mongoses:\n none", "no mongoses");
+ if (args.hasOwnProperty("unique") && typeof (args.unique) != "boolean") {
+ // non-bool: actual value must be shown
+ assertPresentInOutput(output, tojson(args.unique), "unique shard key indicator (non bool)");
+ }
- var output = grabStatusOutput(configCopy, true);
assertPresentInOutput(
- output, "most recently active mongoses:\n none", "no mongoses (verbose)");
-
- assert(mongos.getDB(dbName).dropDatabase());
-
- ////////////////////////
- // Extended tests
- ////////////////////////
-
- var testCollDetailsNum = 0;
- function testCollDetails(args) {
- if (args === undefined || typeof(args) != "object") {
- args = {};
- }
-
- var getCollName = function(x) {
- return "test.test" + x.zeroPad(4);
- };
- var collName = getCollName(testCollDetailsNum);
-
- var cmdObj = {shardCollection: collName, key: {_id: 1}};
- if (args.unique) {
- cmdObj.unique = true;
- }
- assert.commandWorked(admin.runCommand(cmdObj));
-
- if (args.hasOwnProperty("unique")) {
- assert.writeOK(mongos.getDB("config").collections.update(
- {_id: collName}, {$set: {"unique": args.unique}}));
- }
- if (args.hasOwnProperty("noBalance")) {
- assert.writeOK(mongos.getDB("config").collections.update(
- {_id: collName}, {$set: {"noBalance": args.noBalance}}));
- }
-
- var output = grabStatusOutput(st.config);
-
- assertPresentInOutput(output, collName, "collection");
-
- // If any of the previous collection names are present, then their optional indicators
- // might also be present. This might taint the results when we go searching through
- // the output.
- // This also means that earlier collNames can't be a prefix of later collNames.
- for (var i = 0; i < testCollDetailsNum; i++) {
- assertNotPresentInOutput(output, getCollName(i), "previous collection");
- }
-
- assertPresentInOutput(output, "unique: " + (!!args.unique), "unique shard key indicator");
-
- if (args.hasOwnProperty("unique") && typeof(args.unique) != "boolean") {
- // non-bool: actual value must be shown
- assertPresentInOutput(
- output, tojson(args.unique), "unique shard key indicator (non bool)");
- }
-
- assertPresentInOutput(output,
- "balancing: " + (!args.noBalance),
- "balancing indicator (inverse of noBalance)");
- if (args.hasOwnProperty("noBalance") && typeof(args.noBalance) != "boolean") {
- // non-bool: actual value must be shown
- assertPresentInOutput(output, tojson(args.noBalance), "noBalance indicator (non bool)");
- }
-
- try {
- mongos.getCollection(collName).drop();
- } catch (e) {
- // Ignore drop errors because they are from the illegal values in the collection entry
- assert.writeOK(mongos.getDB("config").collections.remove({_id: collName}));
- }
-
- testCollDetailsNum++;
+ output, "balancing: " + (!args.noBalance), "balancing indicator (inverse of noBalance)");
+ if (args.hasOwnProperty("noBalance") && typeof (args.noBalance) != "boolean") {
+ // non-bool: actual value must be shown
+ assertPresentInOutput(output, tojson(args.noBalance), "noBalance indicator (non bool)");
+ }
+
+ try {
+ mongos.getCollection(collName).drop();
+ } catch (e) {
+ // Ignore drop errors because they are from the illegal values in the collection entry
+ assert.writeOK(mongos.getDB("config").collections.remove({_id: collName}));
}
- assert.commandWorked(admin.runCommand({enableSharding: "test"}));
+ testCollDetailsNum++;
+}
+
+assert.commandWorked(admin.runCommand({enableSharding: "test"}));
- // Defaults
- testCollDetails({});
+// Defaults
+testCollDetails({});
- // Expected values
- testCollDetails({unique: false, noBalance: false});
- testCollDetails({unique: true, noBalance: true});
+// Expected values
+testCollDetails({unique: false, noBalance: false});
+testCollDetails({unique: true, noBalance: true});
- // Unexpected truthy values
- testCollDetails({unique: "truthy unique value 1", noBalance: "truthy noBalance value 1"});
- testCollDetails({unique: 1, noBalance: 1});
- testCollDetails({unique: -1, noBalance: -1});
- testCollDetails({unique: {}, noBalance: {}});
+// Unexpected truthy values
+testCollDetails({unique: "truthy unique value 1", noBalance: "truthy noBalance value 1"});
+testCollDetails({unique: 1, noBalance: 1});
+testCollDetails({unique: -1, noBalance: -1});
+testCollDetails({unique: {}, noBalance: {}});
- // Unexpected falsy values
- testCollDetails({unique: "", noBalance: ""});
- testCollDetails({unique: 0, noBalance: 0});
+// Unexpected falsy values
+testCollDetails({unique: "", noBalance: ""});
+testCollDetails({unique: 0, noBalance: 0});
- assert(mongos.getDB("test").dropDatabase());
+assert(mongos.getDB("test").dropDatabase());
- MongoRunner.stopMongod(standalone);
+MongoRunner.stopMongod(standalone);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/query_after_multi_write.js b/jstests/sharding/query_after_multi_write.js
index 4cfff22be4a..3405d0e2b40 100644
--- a/jstests/sharding/query_after_multi_write.js
+++ b/jstests/sharding/query_after_multi_write.js
@@ -1,63 +1,62 @@
(function() {
- "use strict";
+"use strict";
- /**
- * Test that queries will be properly routed after executing a write that does not
- * perform any shard version checks.
- */
- var runTest = function(writeFunc) {
- var st = new ShardingTest({shards: 2, mongos: 2});
+/**
+ * Test that queries will be properly routed after executing a write that does not
+ * perform any shard version checks.
+ */
+var runTest = function(writeFunc) {
+ var st = new ShardingTest({shards: 2, mongos: 2});
- var testDB = st.s.getDB('test');
- testDB.dropDatabase();
+ var testDB = st.s.getDB('test');
+ testDB.dropDatabase();
- assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
+ assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', st.shard0.shardName);
- assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+ assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- var testDB2 = st.s1.getDB('test');
- testDB2.user.insert({x: 123456});
+ var testDB2 = st.s1.getDB('test');
+ testDB2.user.insert({x: 123456});
- // Move chunk to bump version on a different mongos.
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+ // Move chunk to bump version on a different mongos.
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- // Issue a query and make sure it gets routed to the right shard.
- assert.neq(null, testDB2.user.findOne({x: 123456}));
+ // Issue a query and make sure it gets routed to the right shard.
+ assert.neq(null, testDB2.user.findOne({x: 123456}));
- // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
- // incremented to 3
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+ // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
+ // incremented to 3
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- // Issue a query and make sure it gets routed to the right shard again.
- assert.neq(null, testDB2.user.findOne({x: 123456}));
+ // Issue a query and make sure it gets routed to the right shard again.
+ assert.neq(null, testDB2.user.findOne({x: 123456}));
- // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
- // incremented to 4
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+ // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
+ // incremented to 4
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- // Ensure that write commands with multi version do not reset the connection shard version
- // to
- // ignored.
- writeFunc(testDB2);
+ // Ensure that write commands with multi version do not reset the connection shard version
+ // to
+ // ignored.
+ writeFunc(testDB2);
- assert.neq(null, testDB2.user.findOne({x: 123456}));
+ assert.neq(null, testDB2.user.findOne({x: 123456}));
- st.stop();
- };
+ st.stop();
+};
- runTest(function(db) {
- db.user.update({}, {$inc: {y: 987654}}, false, true);
- });
-
- runTest(function(db) {
- db.user.remove({y: 'noMatch'}, false);
- });
+runTest(function(db) {
+ db.user.update({}, {$inc: {y: 987654}}, false, true);
+});
+runTest(function(db) {
+ db.user.remove({y: 'noMatch'}, false);
+});
})();
diff --git a/jstests/sharding/query_config.js b/jstests/sharding/query_config.js
index b760af12609..65739b2b9a5 100644
--- a/jstests/sharding/query_config.js
+++ b/jstests/sharding/query_config.js
@@ -1,357 +1,344 @@
// Tests user queries over the config servers.
(function() {
- 'use strict';
-
- var getListCollectionsCursor = function(database, options, subsequentBatchSize) {
- return new DBCommandCursor(
- database, database.runCommand("listCollections", options), subsequentBatchSize);
- };
-
- var getListIndexesCursor = function(coll, options, subsequentBatchSize) {
- return new DBCommandCursor(
- coll.getDB(), coll.runCommand("listIndexes", options), subsequentBatchSize);
- };
-
- var arrayGetNames = function(array) {
- return array.map(function(spec) {
- return spec.name;
- });
- };
-
- var cursorGetCollectionNames = function(cursor) {
- return arrayGetNames(cursor.toArray());
- };
-
- var sortArrayByName = function(array) {
- return array.sort(function(a, b) {
- return a.name > b.name;
- });
- };
-
- var cursorGetIndexNames = function(cursor) {
- return arrayGetNames(sortArrayByName(cursor.toArray()));
- };
-
- var sortArrayById = function(array) {
- return array.sort(function(a, b) {
- return a._id > b._id;
- });
- };
-
- var dropCollectionIfExists = function(coll) {
- try {
- coll.drop();
- } catch (err) {
- assert.eq(err.code, ErrorCodes.NamespaceNotFound);
- }
- };
-
- /**
- * Sets up the test database with with several sharded collections.
- *
- * @return The list of collection namespaces that were added to the test database.
- */
- var setupTestCollections = function(st) {
- // testKeys and testCollNames are parallel arrays, testKeys contains the shard key of the
- // corresponding collection whose name is in testCollNames.
- var testCollNames = ["4a1", "1a12", "3a1b1", "2a1b1c1", "b1", "b1c1", "d1"];
- var testKeys =
- [{a: 1}, {a: 1}, {a: 1, b: 1}, {a: 1, b: 1, c: 1}, {b: 1}, {b: 1, c: 1}, {d: 1}];
- var testDB = st.s.getDB("test");
-
- assert.commandWorked(st.s.adminCommand({enablesharding: testDB.getName()}));
- var testNamespaces = testCollNames.map(function(e) {
- return testDB.getName() + "." + e;
- });
- for (var i = 0; i < testKeys.length; i++) {
- assert.commandWorked(
- st.s.adminCommand({shardcollection: testNamespaces[i], key: testKeys[i]}));
- }
-
- return testNamespaces;
- };
-
- /**
- * Test that a list collections query works on the config database. This test cannot detect
- * whether list collections lists extra collections.
- */
- var testListConfigCollections = function(st) {
- // This test depends on all the collections in the configCollList being in the config
- // database.
- var configCollList = [
- "chunks",
- "collections",
- "databases",
- "lockpings",
- "locks",
- "shards",
- "tags",
- "version"
- ];
- var configDB = st.s.getDB("config");
- var userAddedColl = configDB.userAddedColl;
- var cursor;
- var cursorArray;
-
- dropCollectionIfExists(userAddedColl);
- configDB.createCollection(userAddedColl.getName());
- configCollList.push(userAddedColl.getName());
-
- cursor = getListCollectionsCursor(configDB);
- cursorArray = cursorGetCollectionNames(cursor);
- for (var i = 0; i < configCollList.length; i++) {
- assert(cursorArray.indexOf(configCollList[i]) > -1, "Missing " + configCollList[i]);
+'use strict';
+
+var getListCollectionsCursor = function(database, options, subsequentBatchSize) {
+ return new DBCommandCursor(
+ database, database.runCommand("listCollections", options), subsequentBatchSize);
+};
+
+var getListIndexesCursor = function(coll, options, subsequentBatchSize) {
+ return new DBCommandCursor(
+ coll.getDB(), coll.runCommand("listIndexes", options), subsequentBatchSize);
+};
+
+var arrayGetNames = function(array) {
+ return array.map(function(spec) {
+ return spec.name;
+ });
+};
+
+var cursorGetCollectionNames = function(cursor) {
+ return arrayGetNames(cursor.toArray());
+};
+
+var sortArrayByName = function(array) {
+ return array.sort(function(a, b) {
+ return a.name > b.name;
+ });
+};
+
+var cursorGetIndexNames = function(cursor) {
+ return arrayGetNames(sortArrayByName(cursor.toArray()));
+};
+
+var sortArrayById = function(array) {
+ return array.sort(function(a, b) {
+ return a._id > b._id;
+ });
+};
+
+var dropCollectionIfExists = function(coll) {
+ try {
+ coll.drop();
+ } catch (err) {
+ assert.eq(err.code, ErrorCodes.NamespaceNotFound);
+ }
+};
+
+/**
+ * Sets up the test database with with several sharded collections.
+ *
+ * @return The list of collection namespaces that were added to the test database.
+ */
+var setupTestCollections = function(st) {
+ // testKeys and testCollNames are parallel arrays, testKeys contains the shard key of the
+ // corresponding collection whose name is in testCollNames.
+ var testCollNames = ["4a1", "1a12", "3a1b1", "2a1b1c1", "b1", "b1c1", "d1"];
+ var testKeys = [{a: 1}, {a: 1}, {a: 1, b: 1}, {a: 1, b: 1, c: 1}, {b: 1}, {b: 1, c: 1}, {d: 1}];
+ var testDB = st.s.getDB("test");
+
+ assert.commandWorked(st.s.adminCommand({enablesharding: testDB.getName()}));
+ var testNamespaces = testCollNames.map(function(e) {
+ return testDB.getName() + "." + e;
+ });
+ for (var i = 0; i < testKeys.length; i++) {
+ assert.commandWorked(
+ st.s.adminCommand({shardcollection: testNamespaces[i], key: testKeys[i]}));
+ }
+
+ return testNamespaces;
+};
+
+/**
+ * Test that a list collections query works on the config database. This test cannot detect
+ * whether list collections lists extra collections.
+ */
+var testListConfigCollections = function(st) {
+ // This test depends on all the collections in the configCollList being in the config
+ // database.
+ var configCollList =
+ ["chunks", "collections", "databases", "lockpings", "locks", "shards", "tags", "version"];
+ var configDB = st.s.getDB("config");
+ var userAddedColl = configDB.userAddedColl;
+ var cursor;
+ var cursorArray;
+
+ dropCollectionIfExists(userAddedColl);
+ configDB.createCollection(userAddedColl.getName());
+ configCollList.push(userAddedColl.getName());
+
+ cursor = getListCollectionsCursor(configDB);
+ cursorArray = cursorGetCollectionNames(cursor);
+ for (var i = 0; i < configCollList.length; i++) {
+ assert(cursorArray.indexOf(configCollList[i]) > -1, "Missing " + configCollList[i]);
+ }
+
+ cursor = getListCollectionsCursor(configDB, {cursor: {batchSize: 1}}, 1);
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ assert(cursorArray.indexOf(cursor.next().name) > -1);
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ assert(cursorArray.indexOf(cursor.next().name) > -1);
+
+ assert(userAddedColl.drop());
+};
+
+/**
+ * Test that a list indexes query works on the chunks collection of the config database.
+ */
+var testListConfigChunksIndexes = function(st) {
+ // This test depends on all the indexes in the configChunksIndexes being the exact indexes
+ // in the config chunks collection.
+ var configChunksIndexes = ["_id_", "ns_1_lastmod_1", "ns_1_min_1", "ns_1_shard_1_min_1"];
+ var configDB = st.s.getDB("config");
+ var cursor;
+ var cursorArray = [];
+
+ cursor = getListIndexesCursor(configDB.chunks);
+ assert.eq(cursorGetIndexNames(cursor), configChunksIndexes);
+
+ cursor = getListIndexesCursor(configDB.chunks, {cursor: {batchSize: 2}}, 2);
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ cursorArray.push(cursor.next());
+ cursorArray.push(cursor.next());
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ cursorArray.push(cursor.next());
+ cursorArray.push(cursor.next());
+ assert(!cursor.hasNext());
+ assert.eq(arrayGetNames(sortArrayByName(cursorArray)), configChunksIndexes);
+};
+
+/**
+ * Test queries over the collections collection of the config database.
+ */
+var queryConfigCollections = function(st, testNamespaces) {
+ var configDB = st.s.getDB("config");
+ var cursor;
+
+ // Find query.
+ cursor = configDB.collections.find({"key.a": 1}, {dropped: 1, "key.a": 1, "key.c": 1})
+ .sort({"_id": 1})
+ .batchSize(2);
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ assert.eq(cursor.next(), {_id: testNamespaces[1], dropped: false, key: {a: 1}});
+ assert.eq(cursor.next(), {_id: testNamespaces[3], dropped: false, key: {a: 1, c: 1}});
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ assert.eq(cursor.next(), {_id: testNamespaces[2], dropped: false, key: {a: 1}});
+ assert.eq(cursor.next(), {_id: testNamespaces[0], dropped: false, key: {a: 1}});
+ assert(!cursor.hasNext());
+
+ // Aggregate query.
+ cursor = configDB.collections.aggregate(
+ [
+ {$match: {"key.b": 1}},
+ {$sort: {"_id": 1}},
+ {$project: {"keyb": "$key.b", "keyc": "$key.c"}}
+ ],
+ {cursor: {batchSize: 2}});
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ assert.eq(cursor.next(), {_id: testNamespaces[3], keyb: 1, keyc: 1});
+ assert.eq(cursor.next(), {_id: testNamespaces[2], keyb: 1});
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ assert.eq(cursor.next(), {_id: testNamespaces[4], keyb: 1});
+ assert.eq(cursor.next(), {_id: testNamespaces[5], keyb: 1, keyc: 1});
+ assert(!cursor.hasNext());
+};
+
+/**
+ * Test queries over the chunks collection of the config database.
+ */
+var queryConfigChunks = function(st) {
+ var configDB = st.s.getDB("config");
+ var testDB = st.s.getDB("test2");
+ var testColl = testDB.testColl;
+ var testCollData = [{e: 1}, {e: 3}, {e: 4}, {e: 5}, {e: 7}, {e: 9}, {e: 10}, {e: 12}];
+ var cursor;
+ var result;
+
+ // Get shard names.
+ cursor = configDB.shards.find().sort({_id: 1});
+ var shard1 = cursor.next()._id;
+ var shard2 = cursor.next()._id;
+ assert(!cursor.hasNext());
+ assert.commandWorked(st.s.adminCommand({enablesharding: testDB.getName()}));
+ st.ensurePrimaryShard(testDB.getName(), shard1);
+
+ // Setup.
+ assert.commandWorked(st.s.adminCommand({shardcollection: testColl.getFullName(), key: {e: 1}}));
+ for (var i = 0; i < testCollData.length; i++) {
+ assert.writeOK(testColl.insert(testCollData[i]));
+ }
+ assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 2}}));
+ assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 6}}));
+ assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 8}}));
+ assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 11}}));
+ assert.commandWorked(
+ st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 1}, to: shard2}));
+ assert.commandWorked(
+ st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 9}, to: shard2}));
+ assert.commandWorked(
+ st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 12}, to: shard2}));
+
+ // Find query.
+ cursor = configDB.chunks.find({ns: testColl.getFullName()}, {_id: 0, min: 1, max: 1, shard: 1})
+ .sort({"min.e": 1});
+ assert.eq(cursor.next(), {min: {e: {"$minKey": 1}}, "max": {"e": 2}, shard: shard2});
+ assert.eq(cursor.next(), {min: {e: 2}, max: {e: 6}, shard: shard1});
+ assert.eq(cursor.next(), {min: {e: 6}, max: {e: 8}, shard: shard1});
+ assert.eq(cursor.next(), {min: {e: 8}, max: {e: 11}, shard: shard2});
+ assert.eq(cursor.next(), {min: {e: 11}, max: {e: {"$maxKey": 1}}, shard: shard2});
+ assert(!cursor.hasNext());
+
+ // Count query with filter.
+ assert.eq(configDB.chunks.count({ns: testColl.getFullName()}), 5);
+
+ // Distinct query.
+ assert.eq(configDB.chunks.distinct("shard").sort(), [shard1, shard2]);
+
+ // Map reduce query.
+ var mapFunction = function() {
+ if (this.ns == "test2.testColl") {
+ emit(this.shard, 1);
}
-
- cursor = getListCollectionsCursor(configDB, {cursor: {batchSize: 1}}, 1);
- assert.eq(cursor.objsLeftInBatch(), 1);
- assert(cursorArray.indexOf(cursor.next().name) > -1);
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 1);
- assert(cursorArray.indexOf(cursor.next().name) > -1);
-
- assert(userAddedColl.drop());
};
-
- /**
- * Test that a list indexes query works on the chunks collection of the config database.
- */
- var testListConfigChunksIndexes = function(st) {
- // This test depends on all the indexes in the configChunksIndexes being the exact indexes
- // in the config chunks collection.
- var configChunksIndexes = ["_id_", "ns_1_lastmod_1", "ns_1_min_1", "ns_1_shard_1_min_1"];
- var configDB = st.s.getDB("config");
- var cursor;
- var cursorArray = [];
-
- cursor = getListIndexesCursor(configDB.chunks);
- assert.eq(cursorGetIndexNames(cursor), configChunksIndexes);
-
- cursor = getListIndexesCursor(configDB.chunks, {cursor: {batchSize: 2}}, 2);
- assert.eq(cursor.objsLeftInBatch(), 2);
- cursorArray.push(cursor.next());
- cursorArray.push(cursor.next());
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 2);
- cursorArray.push(cursor.next());
- cursorArray.push(cursor.next());
- assert(!cursor.hasNext());
- assert.eq(arrayGetNames(sortArrayByName(cursorArray)), configChunksIndexes);
+ var reduceFunction = function(key, values) {
+ return {chunks: values.length};
};
-
- /**
- * Test queries over the collections collection of the config database.
- */
- var queryConfigCollections = function(st, testNamespaces) {
- var configDB = st.s.getDB("config");
- var cursor;
-
- // Find query.
- cursor = configDB.collections.find({"key.a": 1}, {dropped: 1, "key.a": 1, "key.c": 1})
- .sort({"_id": 1})
- .batchSize(2);
- assert.eq(cursor.objsLeftInBatch(), 2);
- assert.eq(cursor.next(), {_id: testNamespaces[1], dropped: false, key: {a: 1}});
- assert.eq(cursor.next(), {_id: testNamespaces[3], dropped: false, key: {a: 1, c: 1}});
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 2);
- assert.eq(cursor.next(), {_id: testNamespaces[2], dropped: false, key: {a: 1}});
- assert.eq(cursor.next(), {_id: testNamespaces[0], dropped: false, key: {a: 1}});
- assert(!cursor.hasNext());
-
- // Aggregate query.
- cursor = configDB.collections.aggregate(
- [
- {$match: {"key.b": 1}},
- {$sort: {"_id": 1}},
- {$project: {"keyb": "$key.b", "keyc": "$key.c"}}
- ],
- {cursor: {batchSize: 2}});
- assert.eq(cursor.objsLeftInBatch(), 2);
- assert.eq(cursor.next(), {_id: testNamespaces[3], keyb: 1, keyc: 1});
- assert.eq(cursor.next(), {_id: testNamespaces[2], keyb: 1});
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 2);
- assert.eq(cursor.next(), {_id: testNamespaces[4], keyb: 1});
- assert.eq(cursor.next(), {_id: testNamespaces[5], keyb: 1, keyc: 1});
- assert(!cursor.hasNext());
+ result = configDB.chunks.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
+ assert.eq(result.ok, 1);
+ assert.eq(sortArrayById(result.results),
+ [{_id: shard1, value: {chunks: 2}}, {_id: shard2, value: {chunks: 3}}]);
+};
+
+/**
+ * Test queries over a user created collection of an arbitrary database on the config servers.
+ */
+var queryUserCreated = function(database) {
+ var userColl = database.userColl;
+ var userCollData = [
+ {_id: 1, g: 1, c: 4, s: "c", u: [1, 2]},
+ {_id: 2, g: 1, c: 5, s: "b", u: [1]},
+ {_id: 3, g: 2, c: 16, s: "g", u: [3]},
+ {_id: 4, g: 2, c: 1, s: "a", u: [2, 4]},
+ {_id: 5, g: 2, c: 18, s: "d", u: [3]},
+ {_id: 6, g: 3, c: 11, s: "e", u: [2, 3]},
+ {_id: 7, g: 3, c: 2, s: "f", u: [1]}
+ ];
+ var userCollIndexes = ["_id_", "s_1"];
+ var cursor;
+ var cursorArray;
+ var result;
+
+ // Setup.
+ dropCollectionIfExists(userColl);
+ for (var i = 0; i < userCollData.length; i++) {
+ assert.writeOK(userColl.insert(userCollData[i]));
+ }
+ assert.commandWorked(userColl.createIndex({s: 1}));
+
+ // List indexes.
+ cursorArray = [];
+ cursor = getListIndexesCursor(userColl, {cursor: {batchSize: 1}}, 1);
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ cursorArray.push(cursor.next());
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ cursorArray.push(cursor.next());
+ assert(!cursor.hasNext());
+ assert.eq(arrayGetNames(sortArrayByName(cursorArray)), userCollIndexes);
+
+ // Find query.
+ cursor = userColl.find({g: {$gte: 2}}, {_id: 0, c: 1}).sort({s: 1}).batchSize(2);
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ assert.eq(cursor.next(), {c: 1});
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ assert.eq(cursor.next(), {c: 18});
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ assert.eq(cursor.next(), {c: 11});
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ assert.eq(cursor.next(), {c: 2});
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ assert.eq(cursor.next(), {c: 16});
+ assert(!cursor.hasNext());
+
+ // Aggregate query.
+ cursor = userColl.aggregate(
+ [
+ {$match: {c: {$gt: 1}}},
+ {$unwind: "$u"},
+ {$group: {_id: "$u", sum: {$sum: "$c"}}},
+ {$sort: {_id: 1}}
+ ],
+ {cursor: {batchSize: 2}});
+ assert.eq(cursor.objsLeftInBatch(), 2);
+ assert.eq(cursor.next(), {_id: 1, sum: 11});
+ assert.eq(cursor.next(), {_id: 2, sum: 15});
+ assert(cursor.hasNext());
+ assert.eq(cursor.objsLeftInBatch(), 1);
+ assert.eq(cursor.next(), {_id: 3, sum: 45});
+ assert(!cursor.hasNext());
+
+ // Count query without filter.
+ assert.eq(userColl.count(), userCollData.length);
+
+ // Count query with filter.
+ assert.eq(userColl.count({g: 2}), 3);
+
+ // Distinct query.
+ assert.eq(userColl.distinct("g").sort(), [1, 2, 3]);
+
+ // Map reduce query.
+ var mapFunction = function() {
+ emit(this.g, 1);
};
-
- /**
- * Test queries over the chunks collection of the config database.
- */
- var queryConfigChunks = function(st) {
- var configDB = st.s.getDB("config");
- var testDB = st.s.getDB("test2");
- var testColl = testDB.testColl;
- var testCollData = [{e: 1}, {e: 3}, {e: 4}, {e: 5}, {e: 7}, {e: 9}, {e: 10}, {e: 12}];
- var cursor;
- var result;
-
- // Get shard names.
- cursor = configDB.shards.find().sort({_id: 1});
- var shard1 = cursor.next()._id;
- var shard2 = cursor.next()._id;
- assert(!cursor.hasNext());
- assert.commandWorked(st.s.adminCommand({enablesharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.getName(), shard1);
-
- // Setup.
- assert.commandWorked(
- st.s.adminCommand({shardcollection: testColl.getFullName(), key: {e: 1}}));
- for (var i = 0; i < testCollData.length; i++) {
- assert.writeOK(testColl.insert(testCollData[i]));
- }
- assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 2}}));
- assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 6}}));
- assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 8}}));
- assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 11}}));
- assert.commandWorked(
- st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 1}, to: shard2}));
- assert.commandWorked(
- st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 9}, to: shard2}));
- assert.commandWorked(
- st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 12}, to: shard2}));
-
- // Find query.
- cursor =
- configDB.chunks.find({ns: testColl.getFullName()}, {_id: 0, min: 1, max: 1, shard: 1})
- .sort({"min.e": 1});
- assert.eq(cursor.next(), {min: {e: {"$minKey": 1}}, "max": {"e": 2}, shard: shard2});
- assert.eq(cursor.next(), {min: {e: 2}, max: {e: 6}, shard: shard1});
- assert.eq(cursor.next(), {min: {e: 6}, max: {e: 8}, shard: shard1});
- assert.eq(cursor.next(), {min: {e: 8}, max: {e: 11}, shard: shard2});
- assert.eq(cursor.next(), {min: {e: 11}, max: {e: {"$maxKey": 1}}, shard: shard2});
- assert(!cursor.hasNext());
-
- // Count query with filter.
- assert.eq(configDB.chunks.count({ns: testColl.getFullName()}), 5);
-
- // Distinct query.
- assert.eq(configDB.chunks.distinct("shard").sort(), [shard1, shard2]);
-
- // Map reduce query.
- var mapFunction = function() {
- if (this.ns == "test2.testColl") {
- emit(this.shard, 1);
- }
- };
- var reduceFunction = function(key, values) {
- return {chunks: values.length};
- };
- result = configDB.chunks.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
- assert.eq(result.ok, 1);
- assert.eq(sortArrayById(result.results),
- [{_id: shard1, value: {chunks: 2}}, {_id: shard2, value: {chunks: 3}}]);
+ var reduceFunction = function(key, values) {
+ return {count: values.length};
};
-
- /**
- * Test queries over a user created collection of an arbitrary database on the config servers.
- */
- var queryUserCreated = function(database) {
- var userColl = database.userColl;
- var userCollData = [
- {_id: 1, g: 1, c: 4, s: "c", u: [1, 2]},
- {_id: 2, g: 1, c: 5, s: "b", u: [1]},
- {_id: 3, g: 2, c: 16, s: "g", u: [3]},
- {_id: 4, g: 2, c: 1, s: "a", u: [2, 4]},
- {_id: 5, g: 2, c: 18, s: "d", u: [3]},
- {_id: 6, g: 3, c: 11, s: "e", u: [2, 3]},
- {_id: 7, g: 3, c: 2, s: "f", u: [1]}
- ];
- var userCollIndexes = ["_id_", "s_1"];
- var cursor;
- var cursorArray;
- var result;
-
- // Setup.
- dropCollectionIfExists(userColl);
- for (var i = 0; i < userCollData.length; i++) {
- assert.writeOK(userColl.insert(userCollData[i]));
- }
- assert.commandWorked(userColl.createIndex({s: 1}));
-
- // List indexes.
- cursorArray = [];
- cursor = getListIndexesCursor(userColl, {cursor: {batchSize: 1}}, 1);
- assert.eq(cursor.objsLeftInBatch(), 1);
- cursorArray.push(cursor.next());
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 1);
- cursorArray.push(cursor.next());
- assert(!cursor.hasNext());
- assert.eq(arrayGetNames(sortArrayByName(cursorArray)), userCollIndexes);
-
- // Find query.
- cursor = userColl.find({g: {$gte: 2}}, {_id: 0, c: 1}).sort({s: 1}).batchSize(2);
- assert.eq(cursor.objsLeftInBatch(), 2);
- assert.eq(cursor.next(), {c: 1});
- assert.eq(cursor.objsLeftInBatch(), 1);
- assert.eq(cursor.next(), {c: 18});
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 2);
- assert.eq(cursor.next(), {c: 11});
- assert.eq(cursor.objsLeftInBatch(), 1);
- assert.eq(cursor.next(), {c: 2});
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 1);
- assert.eq(cursor.next(), {c: 16});
- assert(!cursor.hasNext());
-
- // Aggregate query.
- cursor = userColl.aggregate(
- [
- {$match: {c: {$gt: 1}}},
- {$unwind: "$u"},
- {$group: {_id: "$u", sum: {$sum: "$c"}}},
- {$sort: {_id: 1}}
- ],
- {cursor: {batchSize: 2}});
- assert.eq(cursor.objsLeftInBatch(), 2);
- assert.eq(cursor.next(), {_id: 1, sum: 11});
- assert.eq(cursor.next(), {_id: 2, sum: 15});
- assert(cursor.hasNext());
- assert.eq(cursor.objsLeftInBatch(), 1);
- assert.eq(cursor.next(), {_id: 3, sum: 45});
- assert(!cursor.hasNext());
-
- // Count query without filter.
- assert.eq(userColl.count(), userCollData.length);
-
- // Count query with filter.
- assert.eq(userColl.count({g: 2}), 3);
-
- // Distinct query.
- assert.eq(userColl.distinct("g").sort(), [1, 2, 3]);
-
- // Map reduce query.
- var mapFunction = function() {
- emit(this.g, 1);
- };
- var reduceFunction = function(key, values) {
- return {count: values.length};
- };
- result = userColl.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
- assert.eq(result.ok, 1);
- assert.eq(sortArrayById(result.results), [
- {_id: 1, value: {count: 2}},
- {_id: 2, value: {count: 3}},
- {_id: 3, value: {count: 2}}
- ]);
-
- assert(userColl.drop());
- };
-
- var st = new ShardingTest({shards: 2, mongos: 1});
- var testNamespaces = setupTestCollections(st);
- var configDB = st.s.getDB("config");
- var adminDB = st.s.getDB("admin");
-
- testListConfigCollections(st);
- testListConfigChunksIndexes(st);
- queryConfigCollections(st, testNamespaces);
- queryConfigChunks(st);
- queryUserCreated(configDB);
- queryUserCreated(adminDB);
- st.stop();
+ result = userColl.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
+ assert.eq(result.ok, 1);
+ assert.eq(
+ sortArrayById(result.results),
+ [{_id: 1, value: {count: 2}}, {_id: 2, value: {count: 3}}, {_id: 3, value: {count: 2}}]);
+
+ assert(userColl.drop());
+};
+
+var st = new ShardingTest({shards: 2, mongos: 1});
+var testNamespaces = setupTestCollections(st);
+var configDB = st.s.getDB("config");
+var adminDB = st.s.getDB("admin");
+
+testListConfigCollections(st);
+testListConfigChunksIndexes(st);
+queryConfigCollections(st, testNamespaces);
+queryConfigChunks(st);
+queryUserCreated(configDB);
+queryUserCreated(adminDB);
+st.stop();
})();
diff --git a/jstests/sharding/range_deleter_does_not_block_stepdown_with_prepare_conflict.js b/jstests/sharding/range_deleter_does_not_block_stepdown_with_prepare_conflict.js
index 1e3f85f499b..60c654404e5 100644
--- a/jstests/sharding/range_deleter_does_not_block_stepdown_with_prepare_conflict.js
+++ b/jstests/sharding/range_deleter_does_not_block_stepdown_with_prepare_conflict.js
@@ -15,67 +15,66 @@
* @tags: [uses_transactions, uses_multi_shard_transaction]
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
- TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
- // Helper to add generic txn fields to a command.
- function addTxnFieldsToCmd(cmd, lsid, txnNumber) {
- return Object.extend(
- cmd, {lsid, txnNumber: NumberLong(txnNumber), stmtId: NumberInt(0), autocommit: false});
- }
+// Helper to add generic txn fields to a command.
+function addTxnFieldsToCmd(cmd, lsid, txnNumber) {
+ return Object.extend(
+ cmd, {lsid, txnNumber: NumberLong(txnNumber), stmtId: NumberInt(0), autocommit: false});
+}
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
- const st = new ShardingTest({shards: [{verbose: 1}, {verbose: 1}], config: 1});
+const st = new ShardingTest({shards: [{verbose: 1}, {verbose: 1}], config: 1});
- // Set up sharded collection with two chunks - [-inf, 0), [0, inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+// Set up sharded collection with two chunks - [-inf, 0), [0, inf)
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- st.rs0.getPrimary().adminCommand(
- {configureFailPoint: 'suspendRangeDeletion', mode: 'alwaysOn'});
- // Move a chunk away from Shard0 (the donor) so its range deleter will asynchronously delete the
- // chunk's range. Flush its metadata to avoid StaleConfig during the later transaction.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: st.shard1.shardName}));
- assert.commandWorked(st.rs0.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
+st.rs0.getPrimary().adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'alwaysOn'});
+// Move a chunk away from Shard0 (the donor) so its range deleter will asynchronously delete the
+// chunk's range. Flush its metadata to avoid StaleConfig during the later transaction.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: st.shard1.shardName}));
+assert.commandWorked(st.rs0.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
- // Insert a doc into the chunk still owned by the donor shard in a transaction then prepare the
- // transaction so readers of that doc will enter a prepare conflict retry loop.
- const lsid = {id: UUID()};
- const txnNumber = 0;
- assert.commandWorked(st.s.getDB(dbName).runCommand(addTxnFieldsToCmd(
- {insert: collName, documents: [{_id: -5}], startTransaction: true}, lsid, txnNumber)));
+// Insert a doc into the chunk still owned by the donor shard in a transaction then prepare the
+// transaction so readers of that doc will enter a prepare conflict retry loop.
+const lsid = {
+ id: UUID()
+};
+const txnNumber = 0;
+assert.commandWorked(st.s.getDB(dbName).runCommand(addTxnFieldsToCmd(
+ {insert: collName, documents: [{_id: -5}], startTransaction: true}, lsid, txnNumber)));
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- addTxnFieldsToCmd({prepareTransaction: 1}, lsid, txnNumber)));
+assert.commandWorked(
+ st.rs0.getPrimary().adminCommand(addTxnFieldsToCmd({prepareTransaction: 1}, lsid, txnNumber)));
- // Set a failpoint to hang right after beginning the index scan for documents to delete.
- st.rs0.getPrimary().adminCommand(
- {configureFailPoint: 'hangBeforeDoingDeletion', mode: 'alwaysOn'});
+// Set a failpoint to hang right after beginning the index scan for documents to delete.
+st.rs0.getPrimary().adminCommand({configureFailPoint: 'hangBeforeDoingDeletion', mode: 'alwaysOn'});
- // Allow the range deleter to run. It should get stuck in a prepare conflict retry loop.
- st.rs0.getPrimary().adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'off'});
+// Allow the range deleter to run. It should get stuck in a prepare conflict retry loop.
+st.rs0.getPrimary().adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'off'});
- // Wait until we've started the index scan to delete documents.
- waitForFailpoint("Hit hangBeforeDoingDeletion failpoint", 1);
+// Wait until we've started the index scan to delete documents.
+waitForFailpoint("Hit hangBeforeDoingDeletion failpoint", 1);
- // Let the deletion continue.
- st.rs0.getPrimary().adminCommand({configureFailPoint: 'hangBeforeDoingDeletion', mode: 'off'});
+// Let the deletion continue.
+st.rs0.getPrimary().adminCommand({configureFailPoint: 'hangBeforeDoingDeletion', mode: 'off'});
- // Attempt to step down the primary.
- assert.commandWorked(st.rs0.getPrimary().adminCommand({replSetStepDown: 5, force: true}));
+// Attempt to step down the primary.
+assert.commandWorked(st.rs0.getPrimary().adminCommand({replSetStepDown: 5, force: true}));
- // Cleanup the transaction so the sharding test can shut down.
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- addTxnFieldsToCmd({abortTransaction: 1}, lsid, txnNumber)));
+// Cleanup the transaction so the sharding test can shut down.
+assert.commandWorked(
+ st.rs0.getPrimary().adminCommand(addTxnFieldsToCmd({abortTransaction: 1}, lsid, txnNumber)));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/read_after_optime.js b/jstests/sharding/read_after_optime.js
index ce8c5201179..11be1022efd 100644
--- a/jstests/sharding/read_after_optime.js
+++ b/jstests/sharding/read_after_optime.js
@@ -1,49 +1,49 @@
// Test read after opTime functionality with maxTimeMS on config servers (CSRS only)`.
(function() {
- 'use strict';
+'use strict';
- var shardingTest = new ShardingTest({shards: 0});
- assert(shardingTest.configRS, 'this test requires config servers to run in CSRS mode');
+var shardingTest = new ShardingTest({shards: 0});
+assert(shardingTest.configRS, 'this test requires config servers to run in CSRS mode');
- var configReplSetTest = shardingTest.configRS;
- var primaryConn = configReplSetTest.getPrimary();
+var configReplSetTest = shardingTest.configRS;
+var primaryConn = configReplSetTest.getPrimary();
- var lastOp = configReplSetTest.awaitLastOpCommitted();
- assert(lastOp, 'invalid op returned from ReplSetTest.awaitLastOpCommitted()');
+var lastOp = configReplSetTest.awaitLastOpCommitted();
+assert(lastOp, 'invalid op returned from ReplSetTest.awaitLastOpCommitted()');
- var config = configReplSetTest.getReplSetConfigFromNode();
- var term = lastOp.t;
+var config = configReplSetTest.getReplSetConfigFromNode();
+var term = lastOp.t;
- var runFindCommand = function(ts) {
- return primaryConn.getDB('local').runCommand({
- find: 'oplog.rs',
- readConcern: {
- afterOpTime: {
- ts: ts,
- t: term,
- },
+var runFindCommand = function(ts) {
+ return primaryConn.getDB('local').runCommand({
+ find: 'oplog.rs',
+ readConcern: {
+ afterOpTime: {
+ ts: ts,
+ t: term,
},
- maxTimeMS: 5000,
- });
- };
-
- assert.commandWorked(runFindCommand(lastOp.ts));
-
- var pingIntervalSeconds = 10;
- assert.commandFailedWithCode(
- runFindCommand(new Timestamp(lastOp.ts.getTime() + pingIntervalSeconds * 5, 0)),
- ErrorCodes.MaxTimeMSExpired);
-
- var msg = 'Command on database local timed out waiting for read concern to be satisfied.';
- assert.soon(function() {
- var logMessages = assert.commandWorked(primaryConn.adminCommand({getLog: 'global'})).log;
- for (var i = 0; i < logMessages.length; i++) {
- if (logMessages[i].indexOf(msg) != -1) {
- return true;
- }
+ },
+ maxTimeMS: 5000,
+ });
+};
+
+assert.commandWorked(runFindCommand(lastOp.ts));
+
+var pingIntervalSeconds = 10;
+assert.commandFailedWithCode(
+ runFindCommand(new Timestamp(lastOp.ts.getTime() + pingIntervalSeconds * 5, 0)),
+ ErrorCodes.MaxTimeMSExpired);
+
+var msg = 'Command on database local timed out waiting for read concern to be satisfied.';
+assert.soon(function() {
+ var logMessages = assert.commandWorked(primaryConn.adminCommand({getLog: 'global'})).log;
+ for (var i = 0; i < logMessages.length; i++) {
+ if (logMessages[i].indexOf(msg) != -1) {
+ return true;
}
- return false;
- }, 'Did not see any log entries containing the following message: ' + msg, 60000, 300);
- shardingTest.stop();
+ }
+ return false;
+}, 'Did not see any log entries containing the following message: ' + msg, 60000, 300);
+shardingTest.stop();
})();
diff --git a/jstests/sharding/read_committed_lookup.js b/jstests/sharding/read_committed_lookup.js
index 4ecfd0c6e1f..72046bbf260 100644
--- a/jstests/sharding/read_committed_lookup.js
+++ b/jstests/sharding/read_committed_lookup.js
@@ -8,59 +8,58 @@ load("jstests/libs/read_committed_lib.js"); // For testReadCommittedLookup
(function() {
- // Manually create a shard.
- const replSetName = "lookup_read_majority";
- let rst = new ReplSetTest({
- nodes: 3,
- name: replSetName,
- nodeOptions: {
- enableMajorityReadConcern: "",
- shardsvr: "",
- }
- });
-
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- rst.stopSet();
- return;
+// Manually create a shard.
+const replSetName = "lookup_read_majority";
+let rst = new ReplSetTest({
+ nodes: 3,
+ name: replSetName,
+ nodeOptions: {
+ enableMajorityReadConcern: "",
+ shardsvr: "",
}
+});
- const nodes = rst.nodeList();
- const config = {
- _id: replSetName,
- members: [
- {_id: 0, host: nodes[0]},
- {_id: 1, host: nodes[1], priority: 0},
- {_id: 2, host: nodes[2], arbiterOnly: true},
- ]
- };
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ rst.stopSet();
+ return;
+}
- rst.initiate(config);
+const nodes = rst.nodeList();
+const config = {
+ _id: replSetName,
+ members: [
+ {_id: 0, host: nodes[0]},
+ {_id: 1, host: nodes[1], priority: 0},
+ {_id: 2, host: nodes[2], arbiterOnly: true},
+ ]
+};
- let shardSecondary = rst._slaves[0];
+rst.initiate(config);
- // Confirm read committed works on a cluster with a database that is not sharding enabled.
- let st = new ShardingTest({
- manualAddShard: true,
- });
- assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
- testReadCommittedLookup(st.s.getDB("test"), shardSecondary, rst);
+let shardSecondary = rst._slaves[0];
- // Confirm read committed works on a cluster with:
- // - A sharding enabled database
- // - An unsharded local collection
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- testReadCommittedLookup(st.s.getDB("test"), shardSecondary, rst);
+// Confirm read committed works on a cluster with a database that is not sharding enabled.
+let st = new ShardingTest({
+ manualAddShard: true,
+});
+assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
+testReadCommittedLookup(st.s.getDB("test"), shardSecondary, rst);
- // Confirm read committed works on a cluster with:
- // - A sharding enabled database
- // - A sharded local collection.
- assert.commandWorked(st.s.getDB("test").runCommand(
- {createIndexes: 'local', indexes: [{name: "foreignKey_1", key: {foreignKey: 1}}]}));
- assert.commandWorked(st.s.adminCommand({shardCollection: 'test.local', key: {foreignKey: 1}}));
- testReadCommittedLookup(st.s.getDB("test"), shardSecondary, rst);
+// Confirm read committed works on a cluster with:
+// - A sharding enabled database
+// - An unsharded local collection
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+testReadCommittedLookup(st.s.getDB("test"), shardSecondary, rst);
- st.stop();
- rst.stopSet();
+// Confirm read committed works on a cluster with:
+// - A sharding enabled database
+// - A sharded local collection.
+assert.commandWorked(st.s.getDB("test").runCommand(
+ {createIndexes: 'local', indexes: [{name: "foreignKey_1", key: {foreignKey: 1}}]}));
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.local', key: {foreignKey: 1}}));
+testReadCommittedLookup(st.s.getDB("test"), shardSecondary, rst);
+st.stop();
+rst.stopSet();
})();
diff --git a/jstests/sharding/read_does_not_create_namespaces.js b/jstests/sharding/read_does_not_create_namespaces.js
index 8ee48576ba1..21e50372c6b 100644
--- a/jstests/sharding/read_does_not_create_namespaces.js
+++ b/jstests/sharding/read_does_not_create_namespaces.js
@@ -2,15 +2,14 @@
// cause entries to be created in the catalog.
(function() {
- var shardingTest = new ShardingTest({name: 'read_does_not_create_namespaces', shards: 1});
- var db = shardingTest.getDB('NonExistentDB');
+var shardingTest = new ShardingTest({name: 'read_does_not_create_namespaces', shards: 1});
+var db = shardingTest.getDB('NonExistentDB');
- assert.isnull(db.nonExistentColl.findOne({}));
+assert.isnull(db.nonExistentColl.findOne({}));
- // Neither the database nor the collection should have been created
- assert.isnull(shardingTest.getDB('config').databases.findOne({_id: 'NonExistentDB'}));
- assert.eq(-1, shardingTest.shard0.getDBNames().indexOf('NonExistentDB'));
-
- shardingTest.stop();
+// Neither the database nor the collection should have been created
+assert.isnull(shardingTest.getDB('config').databases.findOne({_id: 'NonExistentDB'}));
+assert.eq(-1, shardingTest.shard0.getDBNames().indexOf('NonExistentDB'));
+shardingTest.stop();
})();
diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js
index 99e662d57b1..454cc2fb6dd 100644
--- a/jstests/sharding/read_pref.js
+++ b/jstests/sharding/read_pref.js
@@ -7,204 +7,202 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- var PRI_TAG = {dc: 'ny'};
- var SEC_TAGS = [{dc: 'sf', s: "1"}, {dc: 'ma', s: "2"}, {dc: 'eu', s: "3"}, {dc: 'jp', s: "4"}];
- var NODES = SEC_TAGS.length + 1;
+var PRI_TAG = {dc: 'ny'};
+var SEC_TAGS = [{dc: 'sf', s: "1"}, {dc: 'ma', s: "2"}, {dc: 'eu', s: "3"}, {dc: 'jp', s: "4"}];
+var NODES = SEC_TAGS.length + 1;
- var doTest = function(useDollarQuerySyntax) {
- var st =
- new ShardingTest({shards: {rs0: {nodes: NODES, oplogSize: 10, useHostName: true}}});
- var replTest = st.rs0;
- var primaryNode = replTest.getPrimary();
+var doTest = function(useDollarQuerySyntax) {
+ var st = new ShardingTest({shards: {rs0: {nodes: NODES, oplogSize: 10, useHostName: true}}});
+ var replTest = st.rs0;
+ var primaryNode = replTest.getPrimary();
- // The $-prefixed query syntax is only legal for compatibility mode reads, not for the
- // find/getMore commands.
- if (useDollarQuerySyntax && st.s.getDB("test").getMongo().useReadCommands()) {
- st.stop();
- return;
- }
+ // The $-prefixed query syntax is only legal for compatibility mode reads, not for the
+ // find/getMore commands.
+ if (useDollarQuerySyntax && st.s.getDB("test").getMongo().useReadCommands()) {
+ st.stop();
+ return;
+ }
- var setupConf = function() {
- var replConf = primaryNode.getDB('local').system.replset.findOne();
- replConf.version = (replConf.version || 0) + 1;
+ var setupConf = function() {
+ var replConf = primaryNode.getDB('local').system.replset.findOne();
+ replConf.version = (replConf.version || 0) + 1;
- var secIdx = 0;
- for (var x = 0; x < NODES; x++) {
- var node = replConf.members[x];
+ var secIdx = 0;
+ for (var x = 0; x < NODES; x++) {
+ var node = replConf.members[x];
- if (node.host == primaryNode.name) {
- node.tags = PRI_TAG;
- } else {
- node.tags = SEC_TAGS[secIdx++];
- node.priority = 0;
- }
- }
-
- try {
- primaryNode.getDB('admin').runCommand({replSetReconfig: replConf});
- } catch (x) {
- jsTest.log('Exception expected because reconfiguring would close all conn, got ' +
- x);
+ if (node.host == primaryNode.name) {
+ node.tags = PRI_TAG;
+ } else {
+ node.tags = SEC_TAGS[secIdx++];
+ node.priority = 0;
}
+ }
- return replConf;
- };
+ try {
+ primaryNode.getDB('admin').runCommand({replSetReconfig: replConf});
+ } catch (x) {
+ jsTest.log('Exception expected because reconfiguring would close all conn, got ' + x);
+ }
- var checkTag = function(nodeToCheck, tag) {
- for (var idx = 0; idx < NODES; idx++) {
- var node = replConf.members[idx];
+ return replConf;
+ };
- if (node.host == nodeToCheck) {
- jsTest.log('node[' + node.host + '], Tag: ' + tojson(node['tags']));
- jsTest.log('tagToCheck: ' + tojson(tag));
+ var checkTag = function(nodeToCheck, tag) {
+ for (var idx = 0; idx < NODES; idx++) {
+ var node = replConf.members[idx];
- var nodeTag = node['tags'];
+ if (node.host == nodeToCheck) {
+ jsTest.log('node[' + node.host + '], Tag: ' + tojson(node['tags']));
+ jsTest.log('tagToCheck: ' + tojson(tag));
- for (var key in tag) {
- assert.eq(tag[key], nodeTag[key]);
- }
+ var nodeTag = node['tags'];
- return;
+ for (var key in tag) {
+ assert.eq(tag[key], nodeTag[key]);
}
- }
-
- assert(false, 'node ' + nodeToCheck + ' not part of config!');
- };
- var replConf = setupConf();
-
- var conn = st.s;
-
- // Wait until the ReplicaSetMonitor refreshes its view and see the tags
- var replConfig = replTest.getReplSetConfigFromNode();
- replConfig.members.forEach(function(node) {
- var nodeConn = new Mongo(node.host);
- awaitRSClientHosts(conn, nodeConn, {ok: true, tags: node.tags}, replTest);
- });
- replTest.awaitReplication();
+ return;
+ }
+ }
- jsTest.log('New rs config: ' + tojson(primaryNode.getDB('local').system.replset.findOne()));
- jsTest.log('connpool: ' + tojson(conn.getDB('admin').runCommand({connPoolStats: 1})));
+ assert(false, 'node ' + nodeToCheck + ' not part of config!');
+ };
- var coll = conn.getDB('test').user;
+ var replConf = setupConf();
- assert.soon(function() {
- var res = coll.insert({x: 1}, {writeConcern: {w: NODES}});
- if (!res.hasWriteError()) {
- return true;
- }
+ var conn = st.s;
- var err = res.getWriteError().errmsg;
- // Transient transport errors may be expected b/c of the replSetReconfig
- if (err.indexOf("transport error") == -1) {
- throw err;
- }
- return false;
- });
+ // Wait until the ReplicaSetMonitor refreshes its view and see the tags
+ var replConfig = replTest.getReplSetConfigFromNode();
+ replConfig.members.forEach(function(node) {
+ var nodeConn = new Mongo(node.host);
+ awaitRSClientHosts(conn, nodeConn, {ok: true, tags: node.tags}, replTest);
+ });
+ replTest.awaitReplication();
- var getExplain = function(readPrefMode, readPrefTags) {
- if (useDollarQuerySyntax) {
- var readPrefObj = {mode: readPrefMode};
+ jsTest.log('New rs config: ' + tojson(primaryNode.getDB('local').system.replset.findOne()));
+ jsTest.log('connpool: ' + tojson(conn.getDB('admin').runCommand({connPoolStats: 1})));
- if (readPrefTags) {
- readPrefObj.tags = readPrefTags;
- }
+ var coll = conn.getDB('test').user;
- return coll.find({$query: {}, $readPreference: readPrefObj, $explain: true})
- .limit(-1)
- .next();
- } else {
- return coll.find().readPref(readPrefMode, readPrefTags).explain("executionStats");
- }
- };
-
- var getExplainServer = function(explain) {
- assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
- var serverInfo = explain.queryPlanner.winningPlan.shards[0].serverInfo;
- return serverInfo.host + ":" + serverInfo.port.toString();
- };
-
- // Read pref should work without slaveOk
- var explain = getExplain("secondary");
- var explainServer = getExplainServer(explain);
- assert.neq(primaryNode.name, explainServer);
-
- conn.setSlaveOk();
-
- // It should also work with slaveOk
- explain = getExplain("secondary");
- explainServer = getExplainServer(explain);
- assert.neq(primaryNode.name, explainServer);
-
- // Check that $readPreference does not influence the actual query
- assert.eq(1, explain.executionStats.nReturned);
-
- explain = getExplain("secondaryPreferred", [{s: "2"}]);
- explainServer = getExplainServer(explain);
- checkTag(explainServer, {s: "2"});
- assert.eq(1, explain.executionStats.nReturned);
-
- // Cannot use tags with primaryOnly
- assert.throws(function() {
- getExplain("primary", [{s: "2"}]);
- });
-
- // Ok to use empty tags on primaryOnly
- explain = getExplain("primary", [{}]);
- explainServer = getExplainServer(explain);
- assert.eq(primaryNode.name, explainServer);
-
- explain = getExplain("primary", []);
- explainServer = getExplainServer(explain);
- assert.eq(primaryNode.name, explainServer);
-
- // Check that mongos will try the next tag if nothing matches the first
- explain = getExplain("secondary", [{z: "3"}, {dc: "jp"}]);
- explainServer = getExplainServer(explain);
- checkTag(explainServer, {dc: "jp"});
- assert.eq(1, explain.executionStats.nReturned);
-
- // Check that mongos will fallback to primary if none of tags given matches
- explain = getExplain("secondaryPreferred", [{z: "3"}, {dc: "ph"}]);
- explainServer = getExplainServer(explain);
- // Call getPrimary again since the primary could have changed after the restart.
- assert.eq(replTest.getPrimary().name, explainServer);
- assert.eq(1, explain.executionStats.nReturned);
-
- // Kill all members except one
- var stoppedNodes = [];
- for (var x = 0; x < NODES - 1; x++) {
- replTest.stop(x);
- stoppedNodes.push(replTest.nodes[x]);
+ assert.soon(function() {
+ var res = coll.insert({x: 1}, {writeConcern: {w: NODES}});
+ if (!res.hasWriteError()) {
+ return true;
}
- // Wait for ReplicaSetMonitor to realize nodes are down
- awaitRSClientHosts(conn, stoppedNodes, {ok: false}, replTest.name);
-
- // Wait for the last node to be in steady state -> secondary (not recovering)
- var lastNode = replTest.nodes[NODES - 1];
- awaitRSClientHosts(conn, lastNode, {ok: true, secondary: true}, replTest.name);
+ var err = res.getWriteError().errmsg;
+ // Transient transport errors may be expected b/c of the replSetReconfig
+ if (err.indexOf("transport error") == -1) {
+ throw err;
+ }
+ return false;
+ });
- jsTest.log('connpool: ' + tojson(conn.getDB('admin').runCommand({connPoolStats: 1})));
+ var getExplain = function(readPrefMode, readPrefTags) {
+ if (useDollarQuerySyntax) {
+ var readPrefObj = {mode: readPrefMode};
- // Test to make sure that connection is ok, in prep for priOnly test
- explain = getExplain("nearest");
- explainServer = getExplainServer(explain);
- assert.eq(explainServer, replTest.nodes[NODES - 1].name);
- assert.eq(1, explain.executionStats.nReturned);
+ if (readPrefTags) {
+ readPrefObj.tags = readPrefTags;
+ }
- // Should assert if request with priOnly but no primary
- assert.throws(function() {
- getExplain("primary");
- });
+ return coll.find({$query: {}, $readPreference: readPrefObj, $explain: true})
+ .limit(-1)
+ .next();
+ } else {
+ return coll.find().readPref(readPrefMode, readPrefTags).explain("executionStats");
+ }
+ };
- st.stop();
+ var getExplainServer = function(explain) {
+ assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
+ var serverInfo = explain.queryPlanner.winningPlan.shards[0].serverInfo;
+ return serverInfo.host + ":" + serverInfo.port.toString();
};
- doTest(false);
- doTest(true);
+ // Read pref should work without slaveOk
+ var explain = getExplain("secondary");
+ var explainServer = getExplainServer(explain);
+ assert.neq(primaryNode.name, explainServer);
+
+ conn.setSlaveOk();
+
+ // It should also work with slaveOk
+ explain = getExplain("secondary");
+ explainServer = getExplainServer(explain);
+ assert.neq(primaryNode.name, explainServer);
+
+ // Check that $readPreference does not influence the actual query
+ assert.eq(1, explain.executionStats.nReturned);
+
+ explain = getExplain("secondaryPreferred", [{s: "2"}]);
+ explainServer = getExplainServer(explain);
+ checkTag(explainServer, {s: "2"});
+ assert.eq(1, explain.executionStats.nReturned);
+
+ // Cannot use tags with primaryOnly
+ assert.throws(function() {
+ getExplain("primary", [{s: "2"}]);
+ });
+
+ // Ok to use empty tags on primaryOnly
+ explain = getExplain("primary", [{}]);
+ explainServer = getExplainServer(explain);
+ assert.eq(primaryNode.name, explainServer);
+
+ explain = getExplain("primary", []);
+ explainServer = getExplainServer(explain);
+ assert.eq(primaryNode.name, explainServer);
+
+ // Check that mongos will try the next tag if nothing matches the first
+ explain = getExplain("secondary", [{z: "3"}, {dc: "jp"}]);
+ explainServer = getExplainServer(explain);
+ checkTag(explainServer, {dc: "jp"});
+ assert.eq(1, explain.executionStats.nReturned);
+
+ // Check that mongos will fallback to primary if none of tags given matches
+ explain = getExplain("secondaryPreferred", [{z: "3"}, {dc: "ph"}]);
+ explainServer = getExplainServer(explain);
+ // Call getPrimary again since the primary could have changed after the restart.
+ assert.eq(replTest.getPrimary().name, explainServer);
+ assert.eq(1, explain.executionStats.nReturned);
+
+ // Kill all members except one
+ var stoppedNodes = [];
+ for (var x = 0; x < NODES - 1; x++) {
+ replTest.stop(x);
+ stoppedNodes.push(replTest.nodes[x]);
+ }
+
+ // Wait for ReplicaSetMonitor to realize nodes are down
+ awaitRSClientHosts(conn, stoppedNodes, {ok: false}, replTest.name);
+
+ // Wait for the last node to be in steady state -> secondary (not recovering)
+ var lastNode = replTest.nodes[NODES - 1];
+ awaitRSClientHosts(conn, lastNode, {ok: true, secondary: true}, replTest.name);
+
+ jsTest.log('connpool: ' + tojson(conn.getDB('admin').runCommand({connPoolStats: 1})));
+
+ // Test to make sure that connection is ok, in prep for priOnly test
+ explain = getExplain("nearest");
+ explainServer = getExplainServer(explain);
+ assert.eq(explainServer, replTest.nodes[NODES - 1].name);
+ assert.eq(1, explain.executionStats.nReturned);
+
+ // Should assert if request with priOnly but no primary
+ assert.throws(function() {
+ getExplain("primary");
+ });
+
+ st.stop();
+};
+
+doTest(false);
+doTest(true);
})();
diff --git a/jstests/sharding/read_pref_cmd.js b/jstests/sharding/read_pref_cmd.js
index a103338751f..e41a3c0b670 100644
--- a/jstests/sharding/read_pref_cmd.js
+++ b/jstests/sharding/read_pref_cmd.js
@@ -198,10 +198,7 @@ var testReadPreference = function(conn, hostList, isMongos, mode, tagSets, secEx
aggregate: 1,
pipeline: [
{$currentOp: {}},
- {
- $lookup:
- {from: "dummy", localField: "dummy", foreignField: "dummy", as: "dummy"}
- }
+ {$lookup: {from: "dummy", localField: "dummy", foreignField: "dummy", as: "dummy"}}
],
comment: curOpComment,
cursor: {}
@@ -251,7 +248,6 @@ var testBadMode = function(conn, hostList, isMongos, mode, tagSets) {
};
var testAllModes = function(conn, hostList, isMongos) {
-
// The primary is tagged with { tag: 'one' } and the secondary with
// { tag: 'two' } so we can test the interaction of modes and tags. Test
// a bunch of combinations.
diff --git a/jstests/sharding/read_pref_multi_mongos_stale_config.js b/jstests/sharding/read_pref_multi_mongos_stale_config.js
index c00d202cdc0..b451b976d39 100644
--- a/jstests/sharding/read_pref_multi_mongos_stale_config.js
+++ b/jstests/sharding/read_pref_multi_mongos_stale_config.js
@@ -1,41 +1,40 @@
// Tests that a mongos will correctly retry a stale shard version when read preference is used
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({
- shards: {rs0: {quiet: ''}, rs1: {quiet: ''}},
- mongos: 2,
- other: {mongosOptions: {verbose: 2}}
- });
+var st = new ShardingTest({
+ shards: {rs0: {quiet: ''}, rs1: {quiet: ''}},
+ mongos: 2,
+ other: {mongosOptions: {verbose: 2}}
+});
- var testDB1 = st.s0.getDB('test');
- var testDB2 = st.s1.getDB('test');
+var testDB1 = st.s0.getDB('test');
+var testDB2 = st.s1.getDB('test');
- // Trigger a query on mongos 1 so it will have a view of test.user as being unsharded.
- testDB1.user.findOne();
+// Trigger a query on mongos 1 so it will have a view of test.user as being unsharded.
+testDB1.user.findOne();
- assert.commandWorked(testDB2.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(testDB2.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(testDB2.adminCommand({split: 'test.user', middle: {x: 100}}));
+assert.commandWorked(testDB2.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(testDB2.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+assert.commandWorked(testDB2.adminCommand({split: 'test.user', middle: {x: 100}}));
- var configDB2 = st.s1.getDB('config');
- var chunkToMove = configDB2.chunks.find().sort({min: 1}).next();
- var toShard = configDB2.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
- assert.commandWorked(
- testDB2.adminCommand({moveChunk: 'test.user', to: toShard, find: {x: 50}}));
+var configDB2 = st.s1.getDB('config');
+var chunkToMove = configDB2.chunks.find().sort({min: 1}).next();
+var toShard = configDB2.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
+assert.commandWorked(testDB2.adminCommand({moveChunk: 'test.user', to: toShard, find: {x: 50}}));
- // Insert a document into each chunk
- assert.writeOK(testDB2.user.insert({x: 30}));
- assert.writeOK(testDB2.user.insert({x: 130}));
+// Insert a document into each chunk
+assert.writeOK(testDB2.user.insert({x: 30}));
+assert.writeOK(testDB2.user.insert({x: 130}));
- // The testDB1 mongos does not know the chunk has been moved, and will retry
- var cursor = testDB1.user.find({x: 30}).readPref('primary');
- assert(cursor.hasNext());
- assert.eq(30, cursor.next().x);
+// The testDB1 mongos does not know the chunk has been moved, and will retry
+var cursor = testDB1.user.find({x: 30}).readPref('primary');
+assert(cursor.hasNext());
+assert.eq(30, cursor.next().x);
- cursor = testDB1.user.find({x: 130}).readPref('primary');
- assert(cursor.hasNext());
- assert.eq(130, cursor.next().x);
+cursor = testDB1.user.find({x: 130}).readPref('primary');
+assert(cursor.hasNext());
+assert.eq(130, cursor.next().x);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js
index ba3fe454c2b..40326f50fec 100644
--- a/jstests/sharding/recovering_slaveok.js
+++ b/jstests/sharding/recovering_slaveok.js
@@ -8,128 +8,127 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
- load("jstests/replsets/rslib.js");
+'use strict';
+load("jstests/replsets/rslib.js");
- var shardTest =
- new ShardingTest({name: "recovering_slaveok", shards: 2, mongos: 2, other: {rs: true}});
+var shardTest =
+ new ShardingTest({name: "recovering_slaveok", shards: 2, mongos: 2, other: {rs: true}});
- var mongos = shardTest.s0;
- var mongosSOK = shardTest.s1;
- mongosSOK.setSlaveOk();
+var mongos = shardTest.s0;
+var mongosSOK = shardTest.s1;
+mongosSOK.setSlaveOk();
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
- const dbName = "test";
- var dbase = mongos.getDB(dbName);
- var coll = dbase.getCollection("foo");
- var dbaseSOk = mongosSOK.getDB("" + dbase);
- var collSOk = mongosSOK.getCollection("" + coll);
+const dbName = "test";
+var dbase = mongos.getDB(dbName);
+var coll = dbase.getCollection("foo");
+var dbaseSOk = mongosSOK.getDB("" + dbase);
+var collSOk = mongosSOK.getCollection("" + coll);
- var rsA = shardTest.rs0;
- var rsB = shardTest.rs1;
+var rsA = shardTest.rs0;
+var rsB = shardTest.rs1;
- assert.writeOK(rsA.getPrimary().getDB("test_a").dummy.insert({x: 1}));
- assert.writeOK(rsB.getPrimary().getDB("test_b").dummy.insert({x: 1}));
+assert.writeOK(rsA.getPrimary().getDB("test_a").dummy.insert({x: 1}));
+assert.writeOK(rsB.getPrimary().getDB("test_b").dummy.insert({x: 1}));
- rsA.awaitReplication();
- rsB.awaitReplication();
+rsA.awaitReplication();
+rsB.awaitReplication();
- print("1: initial insert");
+print("1: initial insert");
- assert.writeOK(coll.save({_id: -1, a: "a", date: new Date()}));
- assert.writeOK(coll.save({_id: 1, b: "b", date: new Date()}));
+assert.writeOK(coll.save({_id: -1, a: "a", date: new Date()}));
+assert.writeOK(coll.save({_id: 1, b: "b", date: new Date()}));
- print("2: shard collection");
+print("2: shard collection");
- shardTest.shardColl(coll,
- /* shardBy */ {_id: 1},
- /* splitAt */ {_id: 0},
- /* move chunk */ {_id: 0},
- /* dbname */ null,
- /* waitForDelete */ true);
+shardTest.shardColl(coll,
+ /* shardBy */ {_id: 1},
+ /* splitAt */ {_id: 0},
+ /* move chunk */ {_id: 0},
+ /* dbname */ null,
+ /* waitForDelete */ true);
- print("3: test normal and slaveOk queries");
+print("3: test normal and slaveOk queries");
- // Make shardA and rsA the same
- var shardA = shardTest.getShard(coll, {_id: -1});
- var shardAColl = shardA.getCollection("" + coll);
- var shardB = shardTest.getShard(coll, {_id: 1});
+// Make shardA and rsA the same
+var shardA = shardTest.getShard(coll, {_id: -1});
+var shardAColl = shardA.getCollection("" + coll);
+var shardB = shardTest.getShard(coll, {_id: 1});
- if (shardA.name == rsB.getURL()) {
- var swap = rsB;
- rsB = rsA;
- rsA = swap;
- }
+if (shardA.name == rsB.getURL()) {
+ var swap = rsB;
+ rsB = rsA;
+ rsA = swap;
+}
- rsA.awaitReplication();
- rsB.awaitReplication();
+rsA.awaitReplication();
+rsB.awaitReplication();
- // Because of async migration cleanup, we need to wait for this condition to be true
- assert.soon(function() {
- return coll.find().itcount() == collSOk.find().itcount();
- });
+// Because of async migration cleanup, we need to wait for this condition to be true
+assert.soon(function() {
+ return coll.find().itcount() == collSOk.find().itcount();
+});
- assert.eq(shardAColl.find().itcount(), 1);
- assert.eq(shardAColl.findOne()._id, -1);
+assert.eq(shardAColl.find().itcount(), 1);
+assert.eq(shardAColl.findOne()._id, -1);
- print("5: make one of the secondaries RECOVERING");
+print("5: make one of the secondaries RECOVERING");
- var secs = rsA.getSecondaries();
- var goodSec = secs[0];
- var badSec = secs[1];
+var secs = rsA.getSecondaries();
+var goodSec = secs[0];
+var badSec = secs[1];
- assert.commandWorked(badSec.adminCommand("replSetMaintenance"));
- rsA.waitForState(badSec, ReplSetTest.State.RECOVERING);
+assert.commandWorked(badSec.adminCommand("replSetMaintenance"));
+rsA.waitForState(badSec, ReplSetTest.State.RECOVERING);
- print("6: stop non-RECOVERING secondary");
+print("6: stop non-RECOVERING secondary");
- rsA.stop(goodSec);
+rsA.stop(goodSec);
- print("7: check our regular and slaveOk query");
+print("7: check our regular and slaveOk query");
- assert.eq(2, coll.find().itcount());
- assert.eq(2, collSOk.find().itcount());
+assert.eq(2, coll.find().itcount());
+assert.eq(2, collSOk.find().itcount());
- print("8: restart both our secondaries clean");
+print("8: restart both our secondaries clean");
- rsA.restart(rsA.getSecondaries(), {remember: true, startClean: true}, undefined, 5 * 60 * 1000);
+rsA.restart(rsA.getSecondaries(), {remember: true, startClean: true}, undefined, 5 * 60 * 1000);
- print("9: wait for recovery");
+print("9: wait for recovery");
- rsA.waitForState(rsA.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
+rsA.waitForState(rsA.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
- print("10: check our regular and slaveOk query");
+print("10: check our regular and slaveOk query");
- // We need to make sure our nodes are considered accessible from mongos - otherwise we fail
- // See SERVER-7274
- awaitRSClientHosts(coll.getMongo(), rsA.nodes, {ok: true});
- awaitRSClientHosts(coll.getMongo(), rsB.nodes, {ok: true});
+// We need to make sure our nodes are considered accessible from mongos - otherwise we fail
+// See SERVER-7274
+awaitRSClientHosts(coll.getMongo(), rsA.nodes, {ok: true});
+awaitRSClientHosts(coll.getMongo(), rsB.nodes, {ok: true});
- // We need to make sure at least one secondary is accessible from mongos - otherwise we fail
- // See SERVER-7699
- awaitRSClientHosts(collSOk.getMongo(), [rsA.getSecondaries()[0]], {secondary: true, ok: true});
- awaitRSClientHosts(collSOk.getMongo(), [rsB.getSecondaries()[0]], {secondary: true, ok: true});
+// We need to make sure at least one secondary is accessible from mongos - otherwise we fail
+// See SERVER-7699
+awaitRSClientHosts(collSOk.getMongo(), [rsA.getSecondaries()[0]], {secondary: true, ok: true});
+awaitRSClientHosts(collSOk.getMongo(), [rsB.getSecondaries()[0]], {secondary: true, ok: true});
- print("SlaveOK Query...");
- var sOKCount = collSOk.find().itcount();
+print("SlaveOK Query...");
+var sOKCount = collSOk.find().itcount();
- var collCount = null;
- try {
- print("Normal query...");
- collCount = coll.find().itcount();
- } catch (e) {
- printjson(e);
+var collCount = null;
+try {
+ print("Normal query...");
+ collCount = coll.find().itcount();
+} catch (e) {
+ printjson(e);
- // There may have been a stepdown caused by step 8, so we run this twice in a row. The first
- // time can error out.
- print("Error may have been caused by stepdown, try again.");
- collCount = coll.find().itcount();
- }
+ // There may have been a stepdown caused by step 8, so we run this twice in a row. The first
+ // time can error out.
+ print("Error may have been caused by stepdown, try again.");
+ collCount = coll.find().itcount();
+}
- assert.eq(collCount, sOKCount);
-
- shardTest.stop();
+assert.eq(collCount, sOKCount);
+shardTest.stop();
})();
diff --git a/jstests/sharding/refresh_sessions.js b/jstests/sharding/refresh_sessions.js
index ee4ee125db3..c6d229707ca 100644
--- a/jstests/sharding/refresh_sessions.js
+++ b/jstests/sharding/refresh_sessions.js
@@ -1,86 +1,86 @@
(function() {
- "use strict";
-
- // This test makes assumptions about the number of logical sessions.
- TestData.disableImplicitSessions = true;
-
- var sessionsDb = "config";
- var refresh = {refreshLogicalSessionCacheNow: 1};
- var startSession = {startSession: 1};
-
- // Create a cluster with 1 shard.
- var cluster = new ShardingTest({shards: 2});
-
- // Test that we can refresh without any sessions, as a sanity check.
- {
- assert.commandWorked(cluster.s.getDB(sessionsDb).runCommand(refresh));
- assert.commandWorked(cluster.shard0.getDB(sessionsDb).runCommand(refresh));
- assert.commandWorked(cluster.shard1.getDB(sessionsDb).runCommand(refresh));
- }
-
- // Test that refreshing on mongos flushes local records to the collection.
- {
- var mongos = cluster.s.getDB(sessionsDb);
- var sessionCount = mongos.system.sessions.count();
-
- // Start one session.
- assert.commandWorked(mongos.runCommand(startSession));
- assert.commandWorked(mongos.runCommand(refresh));
-
- // Test that it landed in the collection.
- assert.eq(mongos.system.sessions.count(),
- sessionCount + 1,
- "refresh on mongos did not flush session record");
- }
-
- // Test that refreshing on mongod flushes local records to the collection.
- {
- var mongos = cluster.s.getDB(sessionsDb);
- var shard = cluster.shard0.getDB(sessionsDb);
- var sessionCount = mongos.system.sessions.count();
-
- assert.commandWorked(shard.runCommand(startSession));
- assert.commandWorked(shard.runCommand(refresh));
-
- // Test that the new record landed in the collection.
- assert.eq(mongos.system.sessions.count(),
- sessionCount + 1,
- "refresh on mongod did not flush session record");
- }
-
- // Test that refreshing on all servers flushes all records.
- {
- var mongos = cluster.s.getDB(sessionsDb);
- var shard0 = cluster.shard0.getDB(sessionsDb);
- var shard1 = cluster.shard1.getDB(sessionsDb);
-
- var sessionCount = mongos.system.sessions.count();
-
- assert.commandWorked(mongos.runCommand(startSession));
- assert.commandWorked(shard0.runCommand(startSession));
- assert.commandWorked(shard1.runCommand(startSession));
-
- // All records should be in local caches only.
- assert.eq(mongos.system.sessions.count(),
- sessionCount,
- "startSession should not flush records to disk");
-
- // Refresh on each server, see that it ups the session count.
- assert.commandWorked(mongos.runCommand(refresh));
- assert.eq(mongos.system.sessions.count(),
- sessionCount + 1,
- "refresh on mongos did not flush session records to disk");
-
- assert.commandWorked(shard0.runCommand(refresh));
- assert.eq(mongos.system.sessions.count(),
- sessionCount + 2,
- "refresh on shard did not flush session records to disk");
-
- assert.commandWorked(shard1.runCommand(refresh));
- assert.eq(mongos.system.sessions.count(),
- sessionCount + 3,
- "refresh on shard did not flush session records to disk");
- }
-
- cluster.stop();
+"use strict";
+
+// This test makes assumptions about the number of logical sessions.
+TestData.disableImplicitSessions = true;
+
+var sessionsDb = "config";
+var refresh = {refreshLogicalSessionCacheNow: 1};
+var startSession = {startSession: 1};
+
+// Create a cluster with 1 shard.
+var cluster = new ShardingTest({shards: 2});
+
+// Test that we can refresh without any sessions, as a sanity check.
+{
+ assert.commandWorked(cluster.s.getDB(sessionsDb).runCommand(refresh));
+ assert.commandWorked(cluster.shard0.getDB(sessionsDb).runCommand(refresh));
+ assert.commandWorked(cluster.shard1.getDB(sessionsDb).runCommand(refresh));
+}
+
+// Test that refreshing on mongos flushes local records to the collection.
+{
+ var mongos = cluster.s.getDB(sessionsDb);
+ var sessionCount = mongos.system.sessions.count();
+
+ // Start one session.
+ assert.commandWorked(mongos.runCommand(startSession));
+ assert.commandWorked(mongos.runCommand(refresh));
+
+ // Test that it landed in the collection.
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount + 1,
+ "refresh on mongos did not flush session record");
+}
+
+// Test that refreshing on mongod flushes local records to the collection.
+{
+ var mongos = cluster.s.getDB(sessionsDb);
+ var shard = cluster.shard0.getDB(sessionsDb);
+ var sessionCount = mongos.system.sessions.count();
+
+ assert.commandWorked(shard.runCommand(startSession));
+ assert.commandWorked(shard.runCommand(refresh));
+
+ // Test that the new record landed in the collection.
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount + 1,
+ "refresh on mongod did not flush session record");
+}
+
+// Test that refreshing on all servers flushes all records.
+{
+ var mongos = cluster.s.getDB(sessionsDb);
+ var shard0 = cluster.shard0.getDB(sessionsDb);
+ var shard1 = cluster.shard1.getDB(sessionsDb);
+
+ var sessionCount = mongos.system.sessions.count();
+
+ assert.commandWorked(mongos.runCommand(startSession));
+ assert.commandWorked(shard0.runCommand(startSession));
+ assert.commandWorked(shard1.runCommand(startSession));
+
+ // All records should be in local caches only.
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount,
+ "startSession should not flush records to disk");
+
+ // Refresh on each server, see that it ups the session count.
+ assert.commandWorked(mongos.runCommand(refresh));
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount + 1,
+ "refresh on mongos did not flush session records to disk");
+
+ assert.commandWorked(shard0.runCommand(refresh));
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount + 2,
+ "refresh on shard did not flush session records to disk");
+
+ assert.commandWorked(shard1.runCommand(refresh));
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount + 3,
+ "refresh on shard did not flush session records to disk");
+}
+
+cluster.stop();
})();
diff --git a/jstests/sharding/regex_targeting.js b/jstests/sharding/regex_targeting.js
index e2300a3e896..df836cd8ef0 100644
--- a/jstests/sharding/regex_targeting.js
+++ b/jstests/sharding/regex_targeting.js
@@ -1,288 +1,283 @@
// This checks to make sure that sharded regex queries behave the same as unsharded regex queries
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 2});
-
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
-
- //
- // Set up multiple collections to target with regex shard keys on two shards
- //
-
- var coll = mongos.getCollection("foo.bar");
- var collSharded = mongos.getCollection("foo.barSharded");
- var collCompound = mongos.getCollection("foo.barCompound");
- var collNested = mongos.getCollection("foo.barNested");
- var collHashed = mongos.getCollection("foo.barHashed");
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
- st.ensurePrimaryShard(coll.getDB().toString(), st.shard0.shardName);
-
- //
- // Split the collection so that "abcde-0" and "abcde-1" go on different shards when possible
- //
-
- assert.commandWorked(admin.runCommand({shardCollection: collSharded.toString(), key: {a: 1}}));
- assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {a: "abcde-1"}}));
- assert.commandWorked(admin.runCommand({
- moveChunk: collSharded.toString(),
- find: {a: 0},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- assert.commandWorked(
- admin.runCommand({shardCollection: collCompound.toString(), key: {a: 1, b: 1}}));
- assert.commandWorked(
- admin.runCommand({split: collCompound.toString(), middle: {a: "abcde-1", b: 0}}));
- assert.commandWorked(admin.runCommand({
- moveChunk: collCompound.toString(),
- find: {a: 0, b: 0},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- assert.commandWorked(
- admin.runCommand({shardCollection: collNested.toString(), key: {'a.b': 1}}));
- assert.commandWorked(
- admin.runCommand({split: collNested.toString(), middle: {'a.b': "abcde-1"}}));
- assert.commandWorked(admin.runCommand({
- moveChunk: collNested.toString(),
- find: {a: {b: 0}},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- assert.commandWorked(
- admin.runCommand({shardCollection: collHashed.toString(), key: {hash: "hashed"}}));
-
- st.printShardingStatus();
-
- //
- //
- // Cannot insert regex _id
- assert.writeError(coll.insert({_id: /regex value/}));
- assert.writeError(collSharded.insert({_id: /regex value/, a: 0}));
- assert.writeError(collCompound.insert({_id: /regex value/, a: 0, b: 0}));
- assert.writeError(collNested.insert({_id: /regex value/, a: {b: 0}}));
- assert.writeError(collHashed.insert({_id: /regex value/, hash: 0}));
-
- //
- //
- // (For now) we can insert a regex shard key
- assert.writeOK(collSharded.insert({a: /regex value/}));
- assert.writeOK(collCompound.insert({a: /regex value/, b: "other value"}));
- assert.writeOK(collNested.insert({a: {b: /regex value/}}));
- assert.writeOK(collHashed.insert({hash: /regex value/}));
-
- //
- //
- // Query by regex should hit all matching keys, across all shards if applicable
- coll.remove({});
- assert.writeOK(coll.insert({a: "abcde-0"}));
- assert.writeOK(coll.insert({a: "abcde-1"}));
- assert.writeOK(coll.insert({a: /abcde.*/}));
- assert.eq(coll.find().itcount(), coll.find({a: /abcde.*/}).itcount());
-
- collSharded.remove({});
- assert.writeOK(collSharded.insert({a: "abcde-0"}));
- assert.writeOK(collSharded.insert({a: "abcde-1"}));
- assert.writeOK(collSharded.insert({a: /abcde.*/}));
- assert.eq(collSharded.find().itcount(), collSharded.find({a: /abcde.*/}).itcount());
-
- collCompound.remove({});
- assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
- assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
- assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
- assert.eq(collCompound.find().itcount(), collCompound.find({a: /abcde.*/}).itcount());
-
- collNested.remove({});
- assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
- assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
- assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
- assert.eq(collNested.find().itcount(), collNested.find({'a.b': /abcde.*/}).itcount());
-
- collHashed.remove({});
- while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
- st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
- }
- assert.writeOK(collHashed.insert({hash: /abcde.*/}));
- assert.eq(collHashed.find().itcount(), collHashed.find({hash: /abcde.*/}).itcount());
-
- //
- //
- // Update by regex should hit all matching keys, across all shards if applicable
- coll.remove({});
- assert.writeOK(coll.insert({a: "abcde-0"}));
- assert.writeOK(coll.insert({a: "abcde-1"}));
- assert.writeOK(coll.insert({a: /abcde.*/}));
- assert.writeOK(coll.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
- assert.eq(coll.find().itcount(), coll.find({updated: true}).itcount());
-
- collSharded.remove({});
- assert.writeOK(collSharded.insert({a: "abcde-0"}));
- assert.writeOK(collSharded.insert({a: "abcde-1"}));
- assert.writeOK(collSharded.insert({a: /abcde.*/}));
- assert.writeOK(collSharded.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
- assert.eq(collSharded.find().itcount(), collSharded.find({updated: true}).itcount());
-
- collCompound.remove({});
- assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
- assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
- assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
- assert.writeOK(collCompound.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
- assert.eq(collCompound.find().itcount(), collCompound.find({updated: true}).itcount());
-
- collNested.remove({});
- assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
- assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
- assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
- assert.writeOK(collNested.update({'a.b': /abcde.*/}, {$set: {updated: true}}, {multi: true}));
- assert.eq(collNested.find().itcount(), collNested.find({updated: true}).itcount());
-
- collHashed.remove({});
- while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
- st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
- }
- assert.writeOK(collHashed.insert({hash: /abcde.*/}));
- assert.writeOK(collHashed.update({hash: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
- assert.eq(collHashed.find().itcount(), collHashed.find({updated: true}).itcount());
-
- collSharded.remove({});
- collCompound.remove({});
- collNested.remove({});
-
- //
- //
- // Op-style updates with regex should fail on sharded collections.
- // Query clause is targeted, and regex in query clause is ambiguous.
- assert.commandFailedWithCode(
- collSharded.update({a: /abcde-1/}, {"$set": {b: 1}}, {upsert: false}),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(
- collSharded.update({a: /abcde-[1-2]/}, {"$set": {b: 1}}, {upsert: false}),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(
- collNested.update({a: {b: /abcde-1/}}, {"$set": {"a.b": /abcde-1/, b: 1}}, {upsert: false}),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(
- collNested.update({"a.b": /abcde.*/}, {"$set": {b: 1}}, {upsert: false}),
- ErrorCodes.InvalidOptions);
-
- //
- //
- // Replacement style updates with regex should work on sharded collections.
- // If query clause is ambiguous, we fallback to using update clause for targeting.
- assert.commandWorked(collSharded.update({a: /abcde.*/}, {a: /abcde.*/, b: 1}, {upsert: false}));
- assert.commandWorked(collSharded.update({a: /abcde-1/}, {a: /abcde-1/, b: 1}, {upsert: false}));
- assert.commandWorked(
- collNested.update({a: {b: /abcde.*/}}, {a: {b: /abcde.*/}}, {upsert: false}));
- assert.commandWorked(
- collNested.update({'a.b': /abcde-1/}, {a: {b: /abcde.*/}}, {upsert: false}));
-
- //
- //
- // Upsert with op-style regex should fail on sharded collections
- // Query clause is targeted, and regex in query clause is ambiguous
-
- // The queries will also be interpreted as regex based prefix search and cannot target a single
- // shard.
- assert.writeError(collSharded.update({a: /abcde.*/}, {$set: {a: /abcde.*/}}, {upsert: true}));
- assert.writeError(
- collCompound.update({a: /abcde-1/}, {$set: {a: /abcde.*/, b: 1}}, {upsert: true}));
- // Exact regex in query never equality
- assert.writeError(
- collNested.update({'a.b': /abcde.*/}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
- // Even nested regexes are not extracted in queries
- assert.writeError(
- collNested.update({a: {b: /abcde.*/}}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
- assert.writeError(collNested.update({c: 1}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
-
- //
- //
- // Upsert by replacement-style regex should fail on sharded collections
- // Query clause is targeted, and regex in query clause is ambiguous
- assert.commandFailedWithCode(collSharded.update({a: /abcde.*/}, {a: /abcde.*/}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- collCompound.update({a: /abcde.*/}, {a: /abcde.*/, b: 1}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- collNested.update({'a.b': /abcde-1/}, {a: {b: /abcde.*/}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- collNested.update({a: {b: /abcde.*/}}, {a: {b: /abcde.*/}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(collNested.update({c: 1}, {a: {b: /abcde.*/}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- //
- //
- // Remove by regex should hit all matching keys, across all shards if applicable
- coll.remove({});
- assert.writeOK(coll.insert({a: "abcde-0"}));
- assert.writeOK(coll.insert({a: "abcde-1"}));
- assert.writeOK(coll.insert({a: /abcde.*/}));
- assert.writeOK(coll.remove({a: /abcde.*/}));
- assert.eq(0, coll.find({}).itcount());
-
- collSharded.remove({});
- assert.writeOK(collSharded.insert({a: "abcde-0"}));
- assert.writeOK(collSharded.insert({a: "abcde-1"}));
- assert.writeOK(collSharded.insert({a: /abcde.*/}));
- assert.writeOK(collSharded.remove({a: /abcde.*/}));
- assert.eq(0, collSharded.find({}).itcount());
-
- collCompound.remove({});
- assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
- assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
- assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
- assert.writeOK(collCompound.remove({a: /abcde.*/}));
- assert.eq(0, collCompound.find({}).itcount());
-
- collNested.remove({});
- assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
- assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
- assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
- assert.writeOK(collNested.remove({'a.b': /abcde.*/}));
- assert.eq(0, collNested.find({}).itcount());
-
- collHashed.remove({});
- while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
- st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
- }
- assert.writeOK(collHashed.insert({hash: /abcde.*/}));
- assert.writeOK(collHashed.remove({hash: /abcde.*/}));
- assert.eq(0, collHashed.find({}).itcount());
-
- //
- //
- // Query/Update/Remove by nested regex is different depending on how the nested regex is
- // specified
- coll.remove({});
- assert.writeOK(coll.insert({a: {b: "abcde-0"}}));
- assert.writeOK(coll.insert({a: {b: "abcde-1"}}));
- assert.writeOK(coll.insert({a: {b: /abcde.*/}}));
- assert.eq(1, coll.find({a: {b: /abcde.*/}}).itcount());
- assert.writeOK(coll.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
- assert.eq(1, coll.find({updated: true}).itcount());
- assert.writeOK(coll.remove({a: {b: /abcde.*/}}));
- assert.eq(2, coll.find().itcount());
-
- collNested.remove({});
- assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
- assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
- assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
- assert.eq(1, collNested.find({a: {b: /abcde.*/}}).itcount());
- assert.writeOK(collNested.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
- assert.eq(1, collNested.find({updated: true}).itcount());
- assert.writeOK(collNested.remove({a: {b: /abcde.*/}}));
- assert.eq(2, collNested.find().itcount());
-
- st.stop();
+'use strict';
+
+var st = new ShardingTest({shards: 2});
+
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+
+//
+// Set up multiple collections to target with regex shard keys on two shards
+//
+
+var coll = mongos.getCollection("foo.bar");
+var collSharded = mongos.getCollection("foo.barSharded");
+var collCompound = mongos.getCollection("foo.barCompound");
+var collNested = mongos.getCollection("foo.barNested");
+var collHashed = mongos.getCollection("foo.barHashed");
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
+st.ensurePrimaryShard(coll.getDB().toString(), st.shard0.shardName);
+
+//
+// Split the collection so that "abcde-0" and "abcde-1" go on different shards when possible
+//
+
+assert.commandWorked(admin.runCommand({shardCollection: collSharded.toString(), key: {a: 1}}));
+assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {a: "abcde-1"}}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: collSharded.toString(),
+ find: {a: 0},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+assert.commandWorked(
+ admin.runCommand({shardCollection: collCompound.toString(), key: {a: 1, b: 1}}));
+assert.commandWorked(
+ admin.runCommand({split: collCompound.toString(), middle: {a: "abcde-1", b: 0}}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: collCompound.toString(),
+ find: {a: 0, b: 0},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+assert.commandWorked(admin.runCommand({shardCollection: collNested.toString(), key: {'a.b': 1}}));
+assert.commandWorked(admin.runCommand({split: collNested.toString(), middle: {'a.b': "abcde-1"}}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: collNested.toString(),
+ find: {a: {b: 0}},
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+
+assert.commandWorked(
+ admin.runCommand({shardCollection: collHashed.toString(), key: {hash: "hashed"}}));
+
+st.printShardingStatus();
+
+//
+//
+// Cannot insert regex _id
+assert.writeError(coll.insert({_id: /regex value/}));
+assert.writeError(collSharded.insert({_id: /regex value/, a: 0}));
+assert.writeError(collCompound.insert({_id: /regex value/, a: 0, b: 0}));
+assert.writeError(collNested.insert({_id: /regex value/, a: {b: 0}}));
+assert.writeError(collHashed.insert({_id: /regex value/, hash: 0}));
+
+//
+//
+// (For now) we can insert a regex shard key
+assert.writeOK(collSharded.insert({a: /regex value/}));
+assert.writeOK(collCompound.insert({a: /regex value/, b: "other value"}));
+assert.writeOK(collNested.insert({a: {b: /regex value/}}));
+assert.writeOK(collHashed.insert({hash: /regex value/}));
+
+//
+//
+// Query by regex should hit all matching keys, across all shards if applicable
+coll.remove({});
+assert.writeOK(coll.insert({a: "abcde-0"}));
+assert.writeOK(coll.insert({a: "abcde-1"}));
+assert.writeOK(coll.insert({a: /abcde.*/}));
+assert.eq(coll.find().itcount(), coll.find({a: /abcde.*/}).itcount());
+
+collSharded.remove({});
+assert.writeOK(collSharded.insert({a: "abcde-0"}));
+assert.writeOK(collSharded.insert({a: "abcde-1"}));
+assert.writeOK(collSharded.insert({a: /abcde.*/}));
+assert.eq(collSharded.find().itcount(), collSharded.find({a: /abcde.*/}).itcount());
+
+collCompound.remove({});
+assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
+assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
+assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.eq(collCompound.find().itcount(), collCompound.find({a: /abcde.*/}).itcount());
+
+collNested.remove({});
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.eq(collNested.find().itcount(), collNested.find({'a.b': /abcde.*/}).itcount());
+
+collHashed.remove({});
+while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
+ st.shard1.getCollection(collHashed.toString()).count() == 0) {
+ assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
+}
+assert.writeOK(collHashed.insert({hash: /abcde.*/}));
+assert.eq(collHashed.find().itcount(), collHashed.find({hash: /abcde.*/}).itcount());
+
+//
+//
+// Update by regex should hit all matching keys, across all shards if applicable
+coll.remove({});
+assert.writeOK(coll.insert({a: "abcde-0"}));
+assert.writeOK(coll.insert({a: "abcde-1"}));
+assert.writeOK(coll.insert({a: /abcde.*/}));
+assert.writeOK(coll.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(coll.find().itcount(), coll.find({updated: true}).itcount());
+
+collSharded.remove({});
+assert.writeOK(collSharded.insert({a: "abcde-0"}));
+assert.writeOK(collSharded.insert({a: "abcde-1"}));
+assert.writeOK(collSharded.insert({a: /abcde.*/}));
+assert.writeOK(collSharded.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collSharded.find().itcount(), collSharded.find({updated: true}).itcount());
+
+collCompound.remove({});
+assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
+assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
+assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.writeOK(collCompound.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collCompound.find().itcount(), collCompound.find({updated: true}).itcount());
+
+collNested.remove({});
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.writeOK(collNested.update({'a.b': /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collNested.find().itcount(), collNested.find({updated: true}).itcount());
+
+collHashed.remove({});
+while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
+ st.shard1.getCollection(collHashed.toString()).count() == 0) {
+ assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
+}
+assert.writeOK(collHashed.insert({hash: /abcde.*/}));
+assert.writeOK(collHashed.update({hash: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collHashed.find().itcount(), collHashed.find({updated: true}).itcount());
+
+collSharded.remove({});
+collCompound.remove({});
+collNested.remove({});
+
+//
+//
+// Op-style updates with regex should fail on sharded collections.
+// Query clause is targeted, and regex in query clause is ambiguous.
+assert.commandFailedWithCode(collSharded.update({a: /abcde-1/}, {"$set": {b: 1}}, {upsert: false}),
+ ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(
+ collSharded.update({a: /abcde-[1-2]/}, {"$set": {b: 1}}, {upsert: false}),
+ ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(
+ collNested.update({a: {b: /abcde-1/}}, {"$set": {"a.b": /abcde-1/, b: 1}}, {upsert: false}),
+ ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(
+ collNested.update({"a.b": /abcde.*/}, {"$set": {b: 1}}, {upsert: false}),
+ ErrorCodes.InvalidOptions);
+
+//
+//
+// Replacement style updates with regex should work on sharded collections.
+// If query clause is ambiguous, we fallback to using update clause for targeting.
+assert.commandWorked(collSharded.update({a: /abcde.*/}, {a: /abcde.*/, b: 1}, {upsert: false}));
+assert.commandWorked(collSharded.update({a: /abcde-1/}, {a: /abcde-1/, b: 1}, {upsert: false}));
+assert.commandWorked(collNested.update({a: {b: /abcde.*/}}, {a: {b: /abcde.*/}}, {upsert: false}));
+assert.commandWorked(collNested.update({'a.b': /abcde-1/}, {a: {b: /abcde.*/}}, {upsert: false}));
+
+//
+//
+// Upsert with op-style regex should fail on sharded collections
+// Query clause is targeted, and regex in query clause is ambiguous
+
+// The queries will also be interpreted as regex based prefix search and cannot target a single
+// shard.
+assert.writeError(collSharded.update({a: /abcde.*/}, {$set: {a: /abcde.*/}}, {upsert: true}));
+assert.writeError(
+ collCompound.update({a: /abcde-1/}, {$set: {a: /abcde.*/, b: 1}}, {upsert: true}));
+// Exact regex in query never equality
+assert.writeError(
+ collNested.update({'a.b': /abcde.*/}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
+// Even nested regexes are not extracted in queries
+assert.writeError(
+ collNested.update({a: {b: /abcde.*/}}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
+assert.writeError(collNested.update({c: 1}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
+
+//
+//
+// Upsert by replacement-style regex should fail on sharded collections
+// Query clause is targeted, and regex in query clause is ambiguous
+assert.commandFailedWithCode(collSharded.update({a: /abcde.*/}, {a: /abcde.*/}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ collCompound.update({a: /abcde.*/}, {a: /abcde.*/, b: 1}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ collNested.update({'a.b': /abcde-1/}, {a: {b: /abcde.*/}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ collNested.update({a: {b: /abcde.*/}}, {a: {b: /abcde.*/}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(collNested.update({c: 1}, {a: {b: /abcde.*/}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+//
+//
+// Remove by regex should hit all matching keys, across all shards if applicable
+coll.remove({});
+assert.writeOK(coll.insert({a: "abcde-0"}));
+assert.writeOK(coll.insert({a: "abcde-1"}));
+assert.writeOK(coll.insert({a: /abcde.*/}));
+assert.writeOK(coll.remove({a: /abcde.*/}));
+assert.eq(0, coll.find({}).itcount());
+
+collSharded.remove({});
+assert.writeOK(collSharded.insert({a: "abcde-0"}));
+assert.writeOK(collSharded.insert({a: "abcde-1"}));
+assert.writeOK(collSharded.insert({a: /abcde.*/}));
+assert.writeOK(collSharded.remove({a: /abcde.*/}));
+assert.eq(0, collSharded.find({}).itcount());
+
+collCompound.remove({});
+assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
+assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
+assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.writeOK(collCompound.remove({a: /abcde.*/}));
+assert.eq(0, collCompound.find({}).itcount());
+
+collNested.remove({});
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.writeOK(collNested.remove({'a.b': /abcde.*/}));
+assert.eq(0, collNested.find({}).itcount());
+
+collHashed.remove({});
+while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
+ st.shard1.getCollection(collHashed.toString()).count() == 0) {
+ assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
+}
+assert.writeOK(collHashed.insert({hash: /abcde.*/}));
+assert.writeOK(collHashed.remove({hash: /abcde.*/}));
+assert.eq(0, collHashed.find({}).itcount());
+
+//
+//
+// Query/Update/Remove by nested regex is different depending on how the nested regex is
+// specified
+coll.remove({});
+assert.writeOK(coll.insert({a: {b: "abcde-0"}}));
+assert.writeOK(coll.insert({a: {b: "abcde-1"}}));
+assert.writeOK(coll.insert({a: {b: /abcde.*/}}));
+assert.eq(1, coll.find({a: {b: /abcde.*/}}).itcount());
+assert.writeOK(coll.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
+assert.eq(1, coll.find({updated: true}).itcount());
+assert.writeOK(coll.remove({a: {b: /abcde.*/}}));
+assert.eq(2, coll.find().itcount());
+
+collNested.remove({});
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.eq(1, collNested.find({a: {b: /abcde.*/}}).itcount());
+assert.writeOK(collNested.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
+assert.eq(1, collNested.find({updated: true}).itcount());
+assert.writeOK(collNested.remove({a: {b: /abcde.*/}}));
+assert.eq(2, collNested.find().itcount());
+
+st.stop();
})();
diff --git a/jstests/sharding/remove1.js b/jstests/sharding/remove1.js
index 8ccf8dadcf2..3c8364382ce 100644
--- a/jstests/sharding/remove1.js
+++ b/jstests/sharding/remove1.js
@@ -1,48 +1,46 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2, other: {enableBalancer: true}});
- var config = s.s0.getDB('config');
+var s = new ShardingTest({shards: 2, other: {enableBalancer: true}});
+var config = s.s0.getDB('config');
- assert.commandWorked(s.s0.adminCommand({enableSharding: 'needToMove'}));
- s.ensurePrimaryShard('needToMove', s.shard0.shardName);
+assert.commandWorked(s.s0.adminCommand({enableSharding: 'needToMove'}));
+s.ensurePrimaryShard('needToMove', s.shard0.shardName);
- // Returns an error when trying to remove a shard that doesn't exist.
- assert.commandFailedWithCode(s.s0.adminCommand({removeshard: "shardz"}),
- ErrorCodes.ShardNotFound);
+// Returns an error when trying to remove a shard that doesn't exist.
+assert.commandFailedWithCode(s.s0.adminCommand({removeshard: "shardz"}), ErrorCodes.ShardNotFound);
- // First remove puts in draining mode, the second tells me a db needs to move, the third
- // actually removes
- assert.commandWorked(s.s0.adminCommand({removeshard: s.shard0.shardName}));
+// First remove puts in draining mode, the second tells me a db needs to move, the third
+// actually removes
+assert.commandWorked(s.s0.adminCommand({removeshard: s.shard0.shardName}));
- // Can't have more than one draining shard at a time
- assert.commandFailedWithCode(s.s0.adminCommand({removeshard: s.shard1.shardName}),
- ErrorCodes.ConflictingOperationInProgress);
- assert.eq(s.s0.adminCommand({removeshard: s.shard0.shardName}).dbsToMove,
- ['needToMove'],
- "didn't show db to move");
+// Can't have more than one draining shard at a time
+assert.commandFailedWithCode(s.s0.adminCommand({removeshard: s.shard1.shardName}),
+ ErrorCodes.ConflictingOperationInProgress);
+assert.eq(s.s0.adminCommand({removeshard: s.shard0.shardName}).dbsToMove,
+ ['needToMove'],
+ "didn't show db to move");
- s.s0.getDB('needToMove').dropDatabase();
+s.s0.getDB('needToMove').dropDatabase();
- // Ensure the balancer moves the config.system.sessions collection chunks out of the shard being
- // removed
- s.awaitBalancerRound();
+// Ensure the balancer moves the config.system.sessions collection chunks out of the shard being
+// removed
+s.awaitBalancerRound();
- var removeResult = assert.commandWorked(s.s0.adminCommand({removeshard: s.shard0.shardName}));
- assert.eq('completed', removeResult.state, 'Shard was not removed: ' + tojson(removeResult));
+var removeResult = assert.commandWorked(s.s0.adminCommand({removeshard: s.shard0.shardName}));
+assert.eq('completed', removeResult.state, 'Shard was not removed: ' + tojson(removeResult));
- var existingShards = config.shards.find({}).toArray();
- assert.eq(1,
- existingShards.length,
- "Removed server still appears in count: " + tojson(existingShards));
+var existingShards = config.shards.find({}).toArray();
+assert.eq(
+ 1, existingShards.length, "Removed server still appears in count: " + tojson(existingShards));
- assert.commandFailed(s.s0.adminCommand({removeshard: s.shard1.shardName}));
+assert.commandFailed(s.s0.adminCommand({removeshard: s.shard1.shardName}));
- // Should create a shard0002 shard
- var conn = MongoRunner.runMongod({shardsvr: ""});
- assert.commandWorked(s.s0.adminCommand({addshard: conn.host}));
- assert.eq(2, s.config.shards.count(), "new server does not appear in count");
+// Should create a shard0002 shard
+var conn = MongoRunner.runMongod({shardsvr: ""});
+assert.commandWorked(s.s0.adminCommand({addshard: conn.host}));
+assert.eq(2, s.config.shards.count(), "new server does not appear in count");
- MongoRunner.stopMongod(conn);
- s.stop();
+MongoRunner.stopMongod(conn);
+s.stop();
})();
diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js
index 55258dec663..eb7418b76ed 100644
--- a/jstests/sharding/remove2.js
+++ b/jstests/sharding/remove2.js
@@ -12,195 +12,193 @@ load("jstests/replsets/rslib.js");
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
-
- function seedString(replTest) {
- var members = replTest.getReplSetConfig().members.map(function(elem) {
- return elem.host;
- });
- return replTest.name + '/' + members.join(',');
- }
-
- function removeShard(st, replTest) {
- jsTest.log("Removing shard with name: " + replTest.name);
- var res = st.s.adminCommand({removeShard: replTest.name});
+'use strict';
+
+function seedString(replTest) {
+ var members = replTest.getReplSetConfig().members.map(function(elem) {
+ return elem.host;
+ });
+ return replTest.name + '/' + members.join(',');
+}
+
+function removeShard(st, replTest) {
+ jsTest.log("Removing shard with name: " + replTest.name);
+ var res = st.s.adminCommand({removeShard: replTest.name});
+ assert.commandWorked(res);
+ assert.eq('started', res.state);
+ assert.soon(function() {
+ res = st.s.adminCommand({removeShard: replTest.name});
assert.commandWorked(res);
- assert.eq('started', res.state);
- assert.soon(function() {
- res = st.s.adminCommand({removeShard: replTest.name});
- assert.commandWorked(res);
- return ('completed' === res.state);
- }, "failed to remove shard: " + tojson(res));
-
- // Drop the database so the shard can be re-added.
- assert.commandWorked(replTest.getPrimary().getDB(coll.getDB().getName()).dropDatabase());
- }
-
- function addShard(st, replTest) {
- var seed = seedString(replTest);
- print("Adding shard with seed: " + seed);
- try {
- assert.eq(true, st.adminCommand({addshard: seed}));
- } catch (e) {
- print("First attempt to addShard failed, trying again");
- // transport error on first attempt is expected. Make sure second attempt goes through
- assert.eq(true, st.adminCommand({addshard: seed}));
- }
- awaitRSClientHosts(
- new Mongo(st.s.host), replTest.getSecondaries(), {ok: true, secondary: true});
-
- assert.soon(function() {
- var x = st.chunkDiff(coll.getName(), coll.getDB().getName());
- print("chunk diff: " + x);
- return x < 2;
- }, "no balance happened", 30 * 60 * 1000);
-
- try {
- assert.eq(300, coll.find().itcount());
- } catch (e) {
- // Expected. First query might get transport error and need to reconnect.
- printjson(e);
- assert.eq(300, coll.find().itcount());
- }
- print("Shard added successfully");
- }
-
- var st = new ShardingTest(
- {shards: {rs0: {nodes: 2}, rs1: {nodes: 2}}, other: {chunkSize: 1, enableBalancer: true}});
-
- // Pending resolution of SERVER-8598, we need to wait for deletion after chunk migrations to
- // avoid a pending delete re-creating a database after it was dropped.
- st.s.getDB("config").settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
-
- var conn = new Mongo(st.s.host);
- var coll = conn.getCollection("test.remove2");
- coll.drop();
-
- assert.commandWorked(st.s0.adminCommand({enableSharding: coll.getDB().getName()}));
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: coll.getFullName(), key: {i: 1}}));
-
- // Setup initial data
- var str = 'a';
- while (str.length < 1024 * 16) {
- str += str;
- }
-
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 300; i++) {
- bulk.insert({i: i % 10, str: str});
+ return ('completed' === res.state);
+ }, "failed to remove shard: " + tojson(res));
+
+ // Drop the database so the shard can be re-added.
+ assert.commandWorked(replTest.getPrimary().getDB(coll.getDB().getName()).dropDatabase());
+}
+
+function addShard(st, replTest) {
+ var seed = seedString(replTest);
+ print("Adding shard with seed: " + seed);
+ try {
+ assert.eq(true, st.adminCommand({addshard: seed}));
+ } catch (e) {
+ print("First attempt to addShard failed, trying again");
+ // transport error on first attempt is expected. Make sure second attempt goes through
+ assert.eq(true, st.adminCommand({addshard: seed}));
}
- assert.writeOK(bulk.execute());
-
- assert.eq(300, coll.find().itcount());
+ awaitRSClientHosts(
+ new Mongo(st.s.host), replTest.getSecondaries(), {ok: true, secondary: true});
assert.soon(function() {
- var x = st.chunkDiff('remove2', "test");
+ var x = st.chunkDiff(coll.getName(), coll.getDB().getName());
print("chunk diff: " + x);
return x < 2;
}, "no balance happened", 30 * 60 * 1000);
- assert.eq(300, coll.find().itcount());
-
- st.printShardingStatus();
-
- var rst1 = st.rs1;
- // Remove shard and add it back in, without shutting it down.
- jsTestLog("Attempting to remove shard and add it back in");
- removeShard(st, rst1);
- addShard(st, rst1);
-
- // Remove shard, restart set, then add it back in.
- jsTestLog("Attempting to remove shard, restart the set, and then add it back in");
- var originalSeed = seedString(rst1);
-
- removeShard(st, rst1);
- rst1.stopSet();
- print("Sleeping for 20 seconds to let the other shard's ReplicaSetMonitor time out");
- sleep(20000); // 1 failed check should take 10 seconds, sleep for 20 just to be safe
-
- rst1.startSet({restart: true});
- rst1.initiate();
- rst1.awaitReplication();
-
- assert.eq(
- originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before");
- addShard(st, rst1);
-
- // Shut down shard and wait for its ReplicaSetMonitor to be cleaned up, then start it back up
- // and use it.
- //
- // TODO: test this both with AND without waiting for the ReplicaSetMonitor to be cleaned up.
- //
- // This part doesn't pass, even without cleaning up the ReplicaSetMonitor - see SERVER-5900.
- /*
- printjson( conn.getDB('admin').runCommand({movePrimary : 'test2', to : rst1.name}) );
- printjson( conn.getDB('admin').runCommand({setParameter : 1, replMonitorMaxFailedChecks : 5}) );
- jsTestLog( "Shutting down set" )
- rst1.stopSet();
- jsTestLog( "sleeping for 20 seconds to make sure ReplicaSetMonitor gets cleaned up");
- sleep(20000); // 1 failed check should take 10 seconds, sleep for 20 just to be safe
-
- // Should fail since rst1 is the primary for test2
- assert.throws(function() {conn.getDB('test2').foo.find().itcount()});
- jsTestLog( "Bringing set back up" );
- rst1.startSet();
- rst1.initiate();
- rst1.awaitReplication();
-
- jsTestLog( "Checking that set is usable again" );
- //conn.getDB('admin').runCommand({flushRouterConfig:1}); // Uncommenting this makes test pass
- conn.getDB('test2').foo.insert({a:1});
- gle = conn.getDB('test2').runCommand('getLastError');
- if ( !gle.ok ) {
- // Expected. First write will fail and need to re-connect
- print( "write failed" );
- printjson( gle );
- conn.getDB('test2').foo.insert({a:1});
- assert( conn.getDB('test2').getLastErrorObj().ok );
+ try {
+ assert.eq(300, coll.find().itcount());
+ } catch (e) {
+ // Expected. First query might get transport error and need to reconnect.
+ printjson(e);
+ assert.eq(300, coll.find().itcount());
}
-
- assert.eq( 1, conn.getDB('test2').foo.find().itcount() );
- assert( conn.getDB('test2').dropDatabase().ok );
- */
-
- // Remove shard and add a new shard with the same replica set and shard name, but different
- // ports
- jsTestLog("Attempt removing shard and adding a new shard with the same Replica Set name");
- removeShard(st, rst1);
- rst1.stopSet();
- print("Sleeping for 60 seconds to let the other shards restart their ReplicaSetMonitors");
- sleep(60000);
-
- var rst2 = new ReplSetTest({name: rst1.name, nodes: 2, useHostName: true});
- rst2.startSet({shardsvr: ""});
- rst2.initiate();
- rst2.awaitReplication();
-
- addShard(st, rst2);
- printjson(st.admin.runCommand({movePrimary: 'test2', to: rst2.name}));
-
- assert.eq(300, coll.find().itcount());
- conn.getDB('test2').foo.insert({a: 1});
- assert.eq(1, conn.getDB('test2').foo.find().itcount());
-
- // Can't shut down with rst2 in the set or ShardingTest will fail trying to cleanup on shutdown.
- // Have to take out rst2 and put rst1 back into the set so that it can clean up.
- jsTestLog("Putting ShardingTest back to state it expects");
- printjson(st.admin.runCommand({movePrimary: 'test2', to: st.rs0.name}));
- removeShard(st, rst2);
- rst2.stopSet();
-
- print("Sleeping for 60 seconds to let the other shards restart their ReplicaSetMonitors");
- sleep(60000);
-
- rst1.startSet({restart: true});
- rst1.initiate();
- rst1.awaitReplication();
-
- assert.eq(
- originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before");
- addShard(st, rst1);
-
- st.stop();
+ print("Shard added successfully");
+}
+
+var st = new ShardingTest(
+ {shards: {rs0: {nodes: 2}, rs1: {nodes: 2}}, other: {chunkSize: 1, enableBalancer: true}});
+
+// Pending resolution of SERVER-8598, we need to wait for deletion after chunk migrations to
+// avoid a pending delete re-creating a database after it was dropped.
+st.s.getDB("config").settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
+
+var conn = new Mongo(st.s.host);
+var coll = conn.getCollection("test.remove2");
+coll.drop();
+
+assert.commandWorked(st.s0.adminCommand({enableSharding: coll.getDB().getName()}));
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: coll.getFullName(), key: {i: 1}}));
+
+// Setup initial data
+var str = 'a';
+while (str.length < 1024 * 16) {
+ str += str;
+}
+
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < 300; i++) {
+ bulk.insert({i: i % 10, str: str});
+}
+assert.writeOK(bulk.execute());
+
+assert.eq(300, coll.find().itcount());
+
+assert.soon(function() {
+ var x = st.chunkDiff('remove2', "test");
+ print("chunk diff: " + x);
+ return x < 2;
+}, "no balance happened", 30 * 60 * 1000);
+
+assert.eq(300, coll.find().itcount());
+
+st.printShardingStatus();
+
+var rst1 = st.rs1;
+// Remove shard and add it back in, without shutting it down.
+jsTestLog("Attempting to remove shard and add it back in");
+removeShard(st, rst1);
+addShard(st, rst1);
+
+// Remove shard, restart set, then add it back in.
+jsTestLog("Attempting to remove shard, restart the set, and then add it back in");
+var originalSeed = seedString(rst1);
+
+removeShard(st, rst1);
+rst1.stopSet();
+print("Sleeping for 20 seconds to let the other shard's ReplicaSetMonitor time out");
+sleep(20000); // 1 failed check should take 10 seconds, sleep for 20 just to be safe
+
+rst1.startSet({restart: true});
+rst1.initiate();
+rst1.awaitReplication();
+
+assert.eq(originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before");
+addShard(st, rst1);
+
+// Shut down shard and wait for its ReplicaSetMonitor to be cleaned up, then start it back up
+// and use it.
+//
+// TODO: test this both with AND without waiting for the ReplicaSetMonitor to be cleaned up.
+//
+// This part doesn't pass, even without cleaning up the ReplicaSetMonitor - see SERVER-5900.
+/*
+printjson( conn.getDB('admin').runCommand({movePrimary : 'test2', to : rst1.name}) );
+printjson( conn.getDB('admin').runCommand({setParameter : 1, replMonitorMaxFailedChecks : 5}) );
+jsTestLog( "Shutting down set" )
+rst1.stopSet();
+jsTestLog( "sleeping for 20 seconds to make sure ReplicaSetMonitor gets cleaned up");
+sleep(20000); // 1 failed check should take 10 seconds, sleep for 20 just to be safe
+
+// Should fail since rst1 is the primary for test2
+assert.throws(function() {conn.getDB('test2').foo.find().itcount()});
+jsTestLog( "Bringing set back up" );
+rst1.startSet();
+rst1.initiate();
+rst1.awaitReplication();
+
+jsTestLog( "Checking that set is usable again" );
+//conn.getDB('admin').runCommand({flushRouterConfig:1}); // Uncommenting this makes test pass
+conn.getDB('test2').foo.insert({a:1});
+gle = conn.getDB('test2').runCommand('getLastError');
+if ( !gle.ok ) {
+ // Expected. First write will fail and need to re-connect
+ print( "write failed" );
+ printjson( gle );
+ conn.getDB('test2').foo.insert({a:1});
+ assert( conn.getDB('test2').getLastErrorObj().ok );
+}
+
+assert.eq( 1, conn.getDB('test2').foo.find().itcount() );
+assert( conn.getDB('test2').dropDatabase().ok );
+*/
+
+// Remove shard and add a new shard with the same replica set and shard name, but different
+// ports
+jsTestLog("Attempt removing shard and adding a new shard with the same Replica Set name");
+removeShard(st, rst1);
+rst1.stopSet();
+print("Sleeping for 60 seconds to let the other shards restart their ReplicaSetMonitors");
+sleep(60000);
+
+var rst2 = new ReplSetTest({name: rst1.name, nodes: 2, useHostName: true});
+rst2.startSet({shardsvr: ""});
+rst2.initiate();
+rst2.awaitReplication();
+
+addShard(st, rst2);
+printjson(st.admin.runCommand({movePrimary: 'test2', to: rst2.name}));
+
+assert.eq(300, coll.find().itcount());
+conn.getDB('test2').foo.insert({a: 1});
+assert.eq(1, conn.getDB('test2').foo.find().itcount());
+
+// Can't shut down with rst2 in the set or ShardingTest will fail trying to cleanup on shutdown.
+// Have to take out rst2 and put rst1 back into the set so that it can clean up.
+jsTestLog("Putting ShardingTest back to state it expects");
+printjson(st.admin.runCommand({movePrimary: 'test2', to: st.rs0.name}));
+removeShard(st, rst2);
+rst2.stopSet();
+
+print("Sleeping for 60 seconds to let the other shards restart their ReplicaSetMonitors");
+sleep(60000);
+
+rst1.startSet({restart: true});
+rst1.initiate();
+rst1.awaitReplication();
+
+assert.eq(originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before");
+addShard(st, rst1);
+
+st.stop();
})();
diff --git a/jstests/sharding/remove3.js b/jstests/sharding/remove3.js
index ab066f92f9d..b9d45baf789 100644
--- a/jstests/sharding/remove3.js
+++ b/jstests/sharding/remove3.js
@@ -1,44 +1,43 @@
// Validates the remove/drain shard functionality when there is data on the shard being removed
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({name: "remove_shard3", shards: 2, mongos: 2});
+var st = new ShardingTest({name: "remove_shard3", shards: 2, mongos: 2});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll', key: {_id: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll', middle: {_id: 0}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll', key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll', middle: {_id: 0}}));
- // Insert some documents and make sure there are docs on both shards
- st.s0.getDB('TestDB').Coll.insert({_id: -1, value: 'Negative value'});
- st.s0.getDB('TestDB').Coll.insert({_id: 1, value: 'Positive value'});
+// Insert some documents and make sure there are docs on both shards
+st.s0.getDB('TestDB').Coll.insert({_id: -1, value: 'Negative value'});
+st.s0.getDB('TestDB').Coll.insert({_id: 1, value: 'Positive value'});
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true}));
- // Make sure both mongos instances know of the latest metadata
- assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
- assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
+// Make sure both mongos instances know of the latest metadata
+assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
+assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
- // Remove st.shard1.shardName
- var removeRes;
- removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: st.shard1.shardName}));
- assert.eq('started', removeRes.state);
- removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: st.shard1.shardName}));
- assert.eq('ongoing', removeRes.state);
+// Remove st.shard1.shardName
+var removeRes;
+removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: st.shard1.shardName}));
+assert.eq('started', removeRes.state);
+removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: st.shard1.shardName}));
+assert.eq('ongoing', removeRes.state);
- // Move the one chunk off st.shard1.shardName
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
+// Move the one chunk off st.shard1.shardName
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
- // Remove shard must succeed now
- removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: st.shard1.shardName}));
- assert.eq('completed', removeRes.state);
+// Remove shard must succeed now
+removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: st.shard1.shardName}));
+assert.eq('completed', removeRes.state);
- // Make sure both mongos instance refresh their metadata and do not reference the missing shard
- assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
- assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
-
- st.stop();
+// Make sure both mongos instance refresh their metadata and do not reference the missing shard
+assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
+assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
+st.stop();
})();
diff --git a/jstests/sharding/rename.js b/jstests/sharding/rename.js
index 92d1c46ba50..bcd37cdf570 100644
--- a/jstests/sharding/rename.js
+++ b/jstests/sharding/rename.js
@@ -2,84 +2,84 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- var s = new ShardingTest({shards: 2, mongos: 1, rs: {oplogSize: 10}});
+var s = new ShardingTest({shards: 2, mongos: 1, rs: {oplogSize: 10}});
- var db = s.getDB("test");
- var replTest = s.rs0;
+var db = s.getDB("test");
+var replTest = s.rs0;
- assert.writeOK(db.foo.insert({_id: 1}));
- db.foo.renameCollection('bar');
- assert.isnull(db.getLastError(), '1.0');
- assert.eq(db.bar.findOne(), {_id: 1}, '1.1');
- assert.eq(db.bar.count(), 1, '1.2');
- assert.eq(db.foo.count(), 0, '1.3');
+assert.writeOK(db.foo.insert({_id: 1}));
+db.foo.renameCollection('bar');
+assert.isnull(db.getLastError(), '1.0');
+assert.eq(db.bar.findOne(), {_id: 1}, '1.1');
+assert.eq(db.bar.count(), 1, '1.2');
+assert.eq(db.foo.count(), 0, '1.3');
- assert.writeOK(db.foo.insert({_id: 2}));
- db.foo.renameCollection('bar', true);
- assert.isnull(db.getLastError(), '2.0');
- assert.eq(db.bar.findOne(), {_id: 2}, '2.1');
- assert.eq(db.bar.count(), 1, '2.2');
- assert.eq(db.foo.count(), 0, '2.3');
+assert.writeOK(db.foo.insert({_id: 2}));
+db.foo.renameCollection('bar', true);
+assert.isnull(db.getLastError(), '2.0');
+assert.eq(db.bar.findOne(), {_id: 2}, '2.1');
+assert.eq(db.bar.count(), 1, '2.2');
+assert.eq(db.foo.count(), 0, '2.3');
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard0.shardName);
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard0.shardName);
- // Ensure renaming to or from a sharded collection fails.
- jsTest.log('Testing renaming sharded collections');
- assert.commandWorked(
- s.s0.adminCommand({shardCollection: 'test.shardedColl', key: {_id: 'hashed'}}));
+// Ensure renaming to or from a sharded collection fails.
+jsTest.log('Testing renaming sharded collections');
+assert.commandWorked(
+ s.s0.adminCommand({shardCollection: 'test.shardedColl', key: {_id: 'hashed'}}));
- // Renaming from a sharded collection
- assert.commandFailed(db.shardedColl.renameCollection('somethingElse'));
+// Renaming from a sharded collection
+assert.commandFailed(db.shardedColl.renameCollection('somethingElse'));
- // Renaming to a sharded collection
- assert.commandFailed(db.bar.renameCollection('shardedColl'));
+// Renaming to a sharded collection
+assert.commandFailed(db.bar.renameCollection('shardedColl'));
- const dropTarget = true;
- assert.commandFailed(db.bar.renameCollection('shardedColl', dropTarget));
+const dropTarget = true;
+assert.commandFailed(db.bar.renameCollection('shardedColl', dropTarget));
- jsTest.log('Testing renaming sharded collections, directly on the shard');
- var primary = replTest.getPrimary();
- assert.commandFailed(primary.getDB('test').shardedColl.renameCollection('somethingElse'));
- assert.commandFailed(primary.getDB('test').bar.renameCollection('shardedColl'));
- assert.commandFailed(primary.getDB('test').bar.renameCollection('shardedColl', dropTarget));
+jsTest.log('Testing renaming sharded collections, directly on the shard');
+var primary = replTest.getPrimary();
+assert.commandFailed(primary.getDB('test').shardedColl.renameCollection('somethingElse'));
+assert.commandFailed(primary.getDB('test').bar.renameCollection('shardedColl'));
+assert.commandFailed(primary.getDB('test').bar.renameCollection('shardedColl', dropTarget));
- jsTest.log("Testing write concern (1)");
+jsTest.log("Testing write concern (1)");
- assert.writeOK(db.foo.insert({_id: 3}));
- db.foo.renameCollection('bar', true);
+assert.writeOK(db.foo.insert({_id: 3}));
+db.foo.renameCollection('bar', true);
- var ans = db.runCommand({getLastError: 1, w: 3});
- printjson(ans);
- assert.isnull(ans.err, '3.0');
+var ans = db.runCommand({getLastError: 1, w: 3});
+printjson(ans);
+assert.isnull(ans.err, '3.0');
- assert.eq(db.bar.findOne(), {_id: 3}, '3.1');
- assert.eq(db.bar.count(), 1, '3.2');
- assert.eq(db.foo.count(), 0, '3.3');
+assert.eq(db.bar.findOne(), {_id: 3}, '3.1');
+assert.eq(db.bar.count(), 1, '3.2');
+assert.eq(db.foo.count(), 0, '3.3');
- // Ensure write concern works by shutting down 1 node in a replica set shard
- jsTest.log("Testing write concern (2)");
+// Ensure write concern works by shutting down 1 node in a replica set shard
+jsTest.log("Testing write concern (2)");
- // Kill any node. Don't care if it's a primary or secondary.
- replTest.stop(0);
+// Kill any node. Don't care if it's a primary or secondary.
+replTest.stop(0);
- // Call getPrimary() to populate replTest._slaves.
- replTest.getPrimary();
- let liveSlaves = replTest._slaves.filter(function(node) {
- return node.host !== replTest.nodes[0].host;
- });
- replTest.awaitSecondaryNodes(null, liveSlaves);
- awaitRSClientHosts(s.s, replTest.getPrimary(), {ok: true, ismaster: true}, replTest.name);
+// Call getPrimary() to populate replTest._slaves.
+replTest.getPrimary();
+let liveSlaves = replTest._slaves.filter(function(node) {
+ return node.host !== replTest.nodes[0].host;
+});
+replTest.awaitSecondaryNodes(null, liveSlaves);
+awaitRSClientHosts(s.s, replTest.getPrimary(), {ok: true, ismaster: true}, replTest.name);
- assert.writeOK(db.foo.insert({_id: 4}));
- assert.commandWorked(db.foo.renameCollection('bar', true));
+assert.writeOK(db.foo.insert({_id: 4}));
+assert.commandWorked(db.foo.renameCollection('bar', true));
- ans = db.runCommand({getLastError: 1, w: 3, wtimeout: 5000});
- assert.eq(ans.err, "timeout", 'gle: ' + tojson(ans));
+ans = db.runCommand({getLastError: 1, w: 3, wtimeout: 5000});
+assert.eq(ans.err, "timeout", 'gle: ' + tojson(ans));
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/rename_across_mongos.js b/jstests/sharding/rename_across_mongos.js
index e9c435ecff1..de2fa50bcea 100644
--- a/jstests/sharding/rename_across_mongos.js
+++ b/jstests/sharding/rename_across_mongos.js
@@ -1,29 +1,28 @@
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({name: 'rename_across_mongos', shards: 1, mongos: 2});
- var dbName = 'RenameDB';
+var st = new ShardingTest({name: 'rename_across_mongos', shards: 1, mongos: 2});
+var dbName = 'RenameDB';
- st.s0.getDB(dbName).dropDatabase();
- st.s1.getDB(dbName).dropDatabase();
+st.s0.getDB(dbName).dropDatabase();
+st.s1.getDB(dbName).dropDatabase();
- // Create collection on first mongos and insert a document
- assert.commandWorked(st.s0.getDB(dbName).runCommand({create: 'CollNameBeforeRename'}));
- assert.writeOK(st.s0.getDB(dbName).CollNameBeforeRename.insert({Key: 1, Value: 1}));
+// Create collection on first mongos and insert a document
+assert.commandWorked(st.s0.getDB(dbName).runCommand({create: 'CollNameBeforeRename'}));
+assert.writeOK(st.s0.getDB(dbName).CollNameBeforeRename.insert({Key: 1, Value: 1}));
- if (st.configRS) {
- // Ensure that the second mongos will see the newly created database metadata when
- // it tries to do the collection rename.
- st.configRS.awaitLastOpCommitted();
- }
+if (st.configRS) {
+ // Ensure that the second mongos will see the newly created database metadata when
+ // it tries to do the collection rename.
+ st.configRS.awaitLastOpCommitted();
+}
- // Rename collection on second mongos and ensure the document is found
- assert.commandWorked(
- st.s1.getDB(dbName).CollNameBeforeRename.renameCollection('CollNameAfterRename'));
- assert.eq([{Key: 1, Value: 1}],
- st.s1.getDB(dbName).CollNameAfterRename.find({}, {_id: false}).toArray());
-
- st.stop();
+// Rename collection on second mongos and ensure the document is found
+assert.commandWorked(
+ st.s1.getDB(dbName).CollNameBeforeRename.renameCollection('CollNameAfterRename'));
+assert.eq([{Key: 1, Value: 1}],
+ st.s1.getDB(dbName).CollNameAfterRename.find({}, {_id: false}).toArray());
+st.stop();
})();
diff --git a/jstests/sharding/repl_monitor_refresh.js b/jstests/sharding/repl_monitor_refresh.js
index b3d91d04065..20f1d930d98 100644
--- a/jstests/sharding/repl_monitor_refresh.js
+++ b/jstests/sharding/repl_monitor_refresh.js
@@ -5,78 +5,77 @@ load("jstests/replsets/rslib.js");
* become invalid when a replica set reconfig happens.
*/
(function() {
- "use strict";
-
- // Skip db hash check and shard replication since the removed node has wrong config and is still
- // alive.
- TestData.skipCheckDBHashes = true;
- TestData.skipAwaitingReplicationOnShardsBeforeCheckingUUIDs = true;
-
- var NODE_COUNT = 3;
- var st = new ShardingTest({shards: {rs0: {nodes: NODE_COUNT, oplogSize: 10}}});
- var replTest = st.rs0;
- var mongos = st.s;
-
- var shardDoc = mongos.getDB('config').shards.findOne();
- assert.eq(NODE_COUNT, shardDoc.host.split(',').length); // seed list should contain all nodes
-
- /* Make sure that the first node is not the primary (by making the second one primary).
- * We need to do this since the ReplicaSetMonitor iterates over the nodes one
- * by one and you can't remove a node that is currently the primary.
- */
- var connPoolStats = mongos.getDB('admin').runCommand({connPoolStats: 1});
- var targetHostName = connPoolStats['replicaSets'][replTest.name].hosts[1].addr;
-
- var priConn = replTest.getPrimary();
- var confDoc = priConn.getDB("local").system.replset.findOne();
-
- for (var idx = 0; idx < confDoc.members.length; idx++) {
- if (confDoc.members[idx].host == targetHostName) {
- confDoc.members[idx].priority = 100;
- } else {
- confDoc.members[idx].priority = 1;
- }
- }
-
- confDoc.version++;
-
- jsTest.log('Changing conf to ' + tojson(confDoc));
-
- reconfig(replTest, confDoc);
+"use strict";
- awaitRSClientHosts(mongos, {host: targetHostName}, {ok: true, ismaster: true});
+// Skip db hash check and shard replication since the removed node has wrong config and is still
+// alive.
+TestData.skipCheckDBHashes = true;
+TestData.skipAwaitingReplicationOnShardsBeforeCheckingUUIDs = true;
- // Remove first node from set
- confDoc.members.shift();
- confDoc.version++;
+var NODE_COUNT = 3;
+var st = new ShardingTest({shards: {rs0: {nodes: NODE_COUNT, oplogSize: 10}}});
+var replTest = st.rs0;
+var mongos = st.s;
- reconfig(replTest, confDoc);
+var shardDoc = mongos.getDB('config').shards.findOne();
+assert.eq(NODE_COUNT, shardDoc.host.split(',').length); // seed list should contain all nodes
- jsTest.log("Waiting for mongos to reflect change in shard replica set membership.");
- var replView;
- assert.soon(
- function() {
- var connPoolStats = mongos.getDB('admin').runCommand('connPoolStats');
- replView = connPoolStats.replicaSets[replTest.name].hosts;
- return replView.length == confDoc.members.length;
- },
- function() {
- return ("Expected to find " + confDoc.members.length + " nodes but found " +
- replView.length + " in " + tojson(replView));
- });
-
- jsTest.log("Waiting for config.shards to reflect change in shard replica set membership.");
- assert.soon(
- function() {
- shardDoc = mongos.getDB('config').shards.findOne();
- // seed list should contain one less node
- return shardDoc.host.split(',').length == confDoc.members.length;
- },
- function() {
- return ("Expected to find " + confDoc.members.length + " nodes but found " +
- shardDoc.host.split(',').length + " in " + shardDoc.host);
- });
+/* Make sure that the first node is not the primary (by making the second one primary).
+ * We need to do this since the ReplicaSetMonitor iterates over the nodes one
+ * by one and you can't remove a node that is currently the primary.
+ */
+var connPoolStats = mongos.getDB('admin').runCommand({connPoolStats: 1});
+var targetHostName = connPoolStats['replicaSets'][replTest.name].hosts[1].addr;
- st.stop();
+var priConn = replTest.getPrimary();
+var confDoc = priConn.getDB("local").system.replset.findOne();
+for (var idx = 0; idx < confDoc.members.length; idx++) {
+ if (confDoc.members[idx].host == targetHostName) {
+ confDoc.members[idx].priority = 100;
+ } else {
+ confDoc.members[idx].priority = 1;
+ }
+}
+
+confDoc.version++;
+
+jsTest.log('Changing conf to ' + tojson(confDoc));
+
+reconfig(replTest, confDoc);
+
+awaitRSClientHosts(mongos, {host: targetHostName}, {ok: true, ismaster: true});
+
+// Remove first node from set
+confDoc.members.shift();
+confDoc.version++;
+
+reconfig(replTest, confDoc);
+
+jsTest.log("Waiting for mongos to reflect change in shard replica set membership.");
+var replView;
+assert.soon(
+ function() {
+ var connPoolStats = mongos.getDB('admin').runCommand('connPoolStats');
+ replView = connPoolStats.replicaSets[replTest.name].hosts;
+ return replView.length == confDoc.members.length;
+ },
+ function() {
+ return ("Expected to find " + confDoc.members.length + " nodes but found " +
+ replView.length + " in " + tojson(replView));
+ });
+
+jsTest.log("Waiting for config.shards to reflect change in shard replica set membership.");
+assert.soon(
+ function() {
+ shardDoc = mongos.getDB('config').shards.findOne();
+ // seed list should contain one less node
+ return shardDoc.host.split(',').length == confDoc.members.length;
+ },
+ function() {
+ return ("Expected to find " + confDoc.members.length + " nodes but found " +
+ shardDoc.host.split(',').length + " in " + shardDoc.host);
+ });
+
+st.stop();
}());
diff --git a/jstests/sharding/replication_with_undefined_shard_key.js b/jstests/sharding/replication_with_undefined_shard_key.js
index 8e37c171735..2da48889a4c 100644
--- a/jstests/sharding/replication_with_undefined_shard_key.js
+++ b/jstests/sharding/replication_with_undefined_shard_key.js
@@ -1,30 +1,30 @@
// Test for SERVER-31953 where secondaries crash when replicating an oplog entry where the document
// identifier in the oplog entry contains a shard key value that contains an undefined value.
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({mongos: 1, config: 1, shard: 1, rs: {nodes: 2}});
- const mongosDB = st.s.getDB("test");
- const mongosColl = mongosDB.mycoll;
+const st = new ShardingTest({mongos: 1, config: 1, shard: 1, rs: {nodes: 2}});
+const mongosDB = st.s.getDB("test");
+const mongosColl = mongosDB.mycoll;
- // Shard the test collection on the "x" field.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- assert.commandWorked(mongosDB.adminCommand({
- shardCollection: mongosColl.getFullName(),
- key: {x: 1},
- }));
+// Shard the test collection on the "x" field.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+assert.commandWorked(mongosDB.adminCommand({
+ shardCollection: mongosColl.getFullName(),
+ key: {x: 1},
+}));
- // Insert a document with a literal undefined value.
- assert.writeOK(mongosColl.insert({x: undefined}));
+// Insert a document with a literal undefined value.
+assert.writeOK(mongosColl.insert({x: undefined}));
- jsTestLog("Doing writes that generate oplog entries including undefined document key");
+jsTestLog("Doing writes that generate oplog entries including undefined document key");
- assert.writeOK(mongosColl.update(
- {},
- {$set: {a: 1}},
- {multi: true, writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMs}}));
- assert.writeOK(
- mongosColl.remove({}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMs}}));
+assert.writeOK(mongosColl.update(
+ {},
+ {$set: {a: 1}},
+ {multi: true, writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMs}}));
+assert.writeOK(
+ mongosColl.remove({}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMs}}));
- st.stop();
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/replmonitor_bad_seed.js b/jstests/sharding/replmonitor_bad_seed.js
index f8b6bf74b21..453c4d980f1 100644
--- a/jstests/sharding/replmonitor_bad_seed.js
+++ b/jstests/sharding/replmonitor_bad_seed.js
@@ -19,31 +19,31 @@
* @tags: [requires_persistence]
*/
(function() {
- 'use strict';
- load("jstests/replsets/rslib.js");
+'use strict';
+load("jstests/replsets/rslib.js");
- var st = new ShardingTest({shards: 1, rs: {oplogSize: 10}});
- var replTest = st.rs0;
+var st = new ShardingTest({shards: 1, rs: {oplogSize: 10}});
+var replTest = st.rs0;
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- // The cluster now has the shard information. Then kill the replica set so when mongos restarts
- // and tries to create a ReplSetMonitor for that shard, it will not be able to connect to any of
- // the seed servers.
- // Don't clear the data directory so that the shardIdentity is not deleted.
- replTest.stopSet(undefined /* send default signal */, true /* don't clear data directory */);
+// The cluster now has the shard information. Then kill the replica set so when mongos restarts
+// and tries to create a ReplSetMonitor for that shard, it will not be able to connect to any of
+// the seed servers.
+// Don't clear the data directory so that the shardIdentity is not deleted.
+replTest.stopSet(undefined /* send default signal */, true /* don't clear data directory */);
- st.restartMongos(0);
+st.restartMongos(0);
- replTest.startSet({restart: true, noCleanData: true});
- replTest.awaitSecondaryNodes();
+replTest.startSet({restart: true, noCleanData: true});
+replTest.awaitSecondaryNodes();
- // Verify that the replSetMonitor can reach the restarted set
- awaitRSClientHosts(st.s0, replTest.nodes, {ok: true});
- replTest.awaitNodesAgreeOnPrimary();
+// Verify that the replSetMonitor can reach the restarted set
+awaitRSClientHosts(st.s0, replTest.nodes, {ok: true});
+replTest.awaitNodesAgreeOnPrimary();
- assert.writeOK(st.s0.getDB('test').user.insert({x: 1}));
+assert.writeOK(st.s0.getDB('test').user.insert({x: 1}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/restart_transactions.js b/jstests/sharding/restart_transactions.js
index caa60e9bce1..d1f505b1d9c 100644
--- a/jstests/sharding/restart_transactions.js
+++ b/jstests/sharding/restart_transactions.js
@@ -5,167 +5,167 @@
* @tags: [requires_sharding, uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
-
- const collName = "restart_transactions";
-
- function runTest(routerDB, directDB) {
- // Set up the underlying collection.
- const routerColl = routerDB[collName];
- assert.commandWorked(
- routerDB.createCollection(routerColl.getName(), {writeConcern: {w: "majority"}}));
-
- //
- // Can restart a transaction that has been aborted.
- //
-
- let txnNumber = 0;
- assert.commandWorked(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true,
- }));
- assert.commandWorked(directDB.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
-
- assert.commandWorked(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }));
-
- //
- // Cannot restart a transaction that is in progress.
- //
-
- txnNumber++;
- assert.commandWorked(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }));
-
- assert.commandFailedWithCode(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }),
- 50911);
-
- //
- // Cannot restart a transaction that has completed a retryable write.
- //
-
- txnNumber++;
- assert.commandWorked(directDB.runCommand(
- {insert: collName, documents: [{x: txnNumber}], txnNumber: NumberLong(txnNumber)}));
-
- assert.commandFailedWithCode(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }),
- 50911);
-
- //
- // Cannot restart a transaction that has been committed.
- //
-
- txnNumber++;
- assert.commandWorked(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }));
- assert.commandWorked(directDB.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
-
- assert.commandFailedWithCode(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }),
- 50911);
-
- //
- // Cannot restart a transaction that has been prepared.
- //
-
- txnNumber++;
- assert.commandWorked(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }));
- assert.commandWorked(directDB.adminCommand(
- {prepareTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
-
- assert.commandFailedWithCode(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }),
- 50911);
-
- assert.commandWorked(directDB.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
-
- //
- // Cannot restart a transaction that has been aborted after being prepared.
- //
-
- txnNumber++;
- assert.commandWorked(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }));
- assert.commandWorked(directDB.adminCommand(
- {prepareTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
- assert.commandWorked(directDB.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
-
- assert.commandFailedWithCode(directDB.runCommand({
- find: collName,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }),
- 50911);
- }
-
- const st = new ShardingTest({shards: 1, mongos: 1, config: 1});
-
- // Directly connect to the shard primary to simulate internal retries by mongos.
- const shardDBName = "test";
- const shardSession = st.rs0.getPrimary().startSession({causalConsistency: false});
- const shardDB = shardSession.getDatabase(shardDBName);
-
- runTest(st.s.getDB(shardDBName), shardDB);
-
- // TODO SERVER-36632: Consider allowing commands in a transaction to run against the config or
- // admin databases, excluding special collections.
+"use strict";
+
+const collName = "restart_transactions";
+
+function runTest(routerDB, directDB) {
+ // Set up the underlying collection.
+ const routerColl = routerDB[collName];
+ assert.commandWorked(
+ routerDB.createCollection(routerColl.getName(), {writeConcern: {w: "majority"}}));
+
+ //
+ // Can restart a transaction that has been aborted.
+ //
+
+ let txnNumber = 0;
+ assert.commandWorked(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true,
+ }));
+ assert.commandWorked(directDB.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+
+ assert.commandWorked(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }));
+
+ //
+ // Cannot restart a transaction that is in progress.
+ //
+
+ txnNumber++;
+ assert.commandWorked(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }));
+
+ assert.commandFailedWithCode(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }),
+ 50911);
+
+ //
+ // Cannot restart a transaction that has completed a retryable write.
+ //
+
+ txnNumber++;
+ assert.commandWorked(directDB.runCommand(
+ {insert: collName, documents: [{x: txnNumber}], txnNumber: NumberLong(txnNumber)}));
+
+ assert.commandFailedWithCode(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }),
+ 50911);
+
+ //
+ // Cannot restart a transaction that has been committed.
+ //
+
+ txnNumber++;
+ assert.commandWorked(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }));
+ assert.commandWorked(directDB.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+ }));
+
+ assert.commandFailedWithCode(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }),
+ 50911);
+
+ //
+ // Cannot restart a transaction that has been prepared.
+ //
+
+ txnNumber++;
+ assert.commandWorked(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }));
+ assert.commandWorked(directDB.adminCommand(
+ {prepareTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+
+ assert.commandFailedWithCode(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }),
+ 50911);
+
+ assert.commandWorked(directDB.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+
//
- // Directly connect to the config sever primary to simulate internal retries by mongos.
- // const configDBName = "config";
- // const configSession = st.configRS.getPrimary().startSession({causalConsistency: false});
- // const configDB = configSession.getDatabase(configDBName);
+ // Cannot restart a transaction that has been aborted after being prepared.
//
- // runTest(st.s.getDB(configDBName), configDB);
- st.stop();
+ txnNumber++;
+ assert.commandWorked(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }));
+ assert.commandWorked(directDB.adminCommand(
+ {prepareTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+ assert.commandWorked(directDB.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+
+ assert.commandFailedWithCode(directDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+ }),
+ 50911);
+}
+
+const st = new ShardingTest({shards: 1, mongos: 1, config: 1});
+
+// Directly connect to the shard primary to simulate internal retries by mongos.
+const shardDBName = "test";
+const shardSession = st.rs0.getPrimary().startSession({causalConsistency: false});
+const shardDB = shardSession.getDatabase(shardDBName);
+
+runTest(st.s.getDB(shardDBName), shardDB);
+
+// TODO SERVER-36632: Consider allowing commands in a transaction to run against the config or
+// admin databases, excluding special collections.
+//
+// Directly connect to the config sever primary to simulate internal retries by mongos.
+// const configDBName = "config";
+// const configSession = st.configRS.getPrimary().startSession({causalConsistency: false});
+// const configDB = configSession.getDatabase(configDBName);
+//
+// runTest(st.s.getDB(configDBName), configDB);
+
+st.stop();
})();
diff --git a/jstests/sharding/resume_change_stream.js b/jstests/sharding/resume_change_stream.js
index 9b5e33d0173..19c53012fda 100644
--- a/jstests/sharding/resume_change_stream.js
+++ b/jstests/sharding/resume_change_stream.js
@@ -2,211 +2,211 @@
// We need to use a readConcern in this test, which requires read commands.
// @tags: [requires_find_command, uses_change_streams]
(function() {
- "use strict";
+"use strict";
+
+load('jstests/replsets/rslib.js'); // For getLatestOp.
+load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
+
+// For supportsMajorityReadConcern.
+load('jstests/multiVersion/libs/causal_consistency_helpers.js');
+
+// This test only works on storage engines that support committed reads, skip it if the
+// configured engine doesn't support it.
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const oplogSize = 1; // size in MB
+const st = new ShardingTest({
+ shards: 2,
+ rs: {
+ nodes: 1,
+ oplogSize: oplogSize,
+ enableMajorityReadConcern: '',
+ // Use the noop writer with a higher frequency for periodic noops to speed up the test.
+ setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
+ }
+});
- load('jstests/replsets/rslib.js'); // For getLatestOp.
- load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
- // For supportsMajorityReadConcern.
- load('jstests/multiVersion/libs/causal_consistency_helpers.js');
+let cst = new ChangeStreamTest(mongosDB);
- // This test only works on storage engines that support committed reads, skip it if the
- // configured engine doesn't support it.
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+function testResume(mongosColl, collToWatch) {
+ mongosColl.drop();
- const oplogSize = 1; // size in MB
- const st = new ShardingTest({
- shards: 2,
- rs: {
- nodes: 1,
- oplogSize: oplogSize,
- enableMajorityReadConcern: '',
- // Use the noop writer with a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}
- }
- });
+ // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+ assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+ st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
+ // Shard the test collection on _id.
+ assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
- let cst = new ChangeStreamTest(mongosDB);
+ // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
+ assert.commandWorked(
+ mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
- function testResume(mongosColl, collToWatch) {
- mongosColl.drop();
+ // Move the [0, MaxKey] chunk to st.shard1.shardName.
+ assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+ // Write a document to each chunk.
+ assert.writeOK(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+ let changeStream = cst.startWatchingChanges(
+ {pipeline: [{$changeStream: {}}], collection: collToWatch, includeToken: true});
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+ // We awaited the replication of the first writes, so the change stream shouldn't return
+ // them.
+ assert.writeOK(mongosColl.update({_id: -1}, {$set: {updated: true}}));
- // Move the [0, MaxKey] chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+ // Record current time to resume a change stream later in the test.
+ const resumeTimeFirstUpdate = mongosDB.runCommand({isMaster: 1}).$clusterTime.clusterTime;
- // Write a document to each chunk.
- assert.writeOK(mongosColl.insert({_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.update({_id: 1}, {$set: {updated: true}}));
- let changeStream = cst.startWatchingChanges(
- {pipeline: [{$changeStream: {}}], collection: collToWatch, includeToken: true});
+ // Test that we see the two writes, and remember their resume tokens.
+ let next = cst.getOneChange(changeStream);
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey._id, -1);
+ const resumeTokenFromFirstUpdateOnShard0 = next._id;
- // We awaited the replication of the first writes, so the change stream shouldn't return
- // them.
- assert.writeOK(mongosColl.update({_id: -1}, {$set: {updated: true}}));
+ next = cst.getOneChange(changeStream);
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey._id, 1);
+ const resumeTokenFromFirstUpdateOnShard1 = next._id;
- // Record current time to resume a change stream later in the test.
- const resumeTimeFirstUpdate = mongosDB.runCommand({isMaster: 1}).$clusterTime.clusterTime;
+ // Write some additional documents, then test that it's possible to resume after the first
+ // update.
+ assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.update({_id: 1}, {$set: {updated: true}}));
+ changeStream = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdateOnShard0}}],
+ collection: collToWatch
+ });
- // Test that we see the two writes, and remember their resume tokens.
- let next = cst.getOneChange(changeStream);
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey._id, -1);
- const resumeTokenFromFirstUpdateOnShard0 = next._id;
+ for (let nextExpectedId of [1, -2, 2]) {
+ assert.eq(cst.getOneChange(changeStream).documentKey._id, nextExpectedId);
+ }
- next = cst.getOneChange(changeStream);
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey._id, 1);
- const resumeTokenFromFirstUpdateOnShard1 = next._id;
+ // Test that the stream can't resume if the resume token is no longer present in the oplog.
+
+ // Roll over the entire oplog on the shard with the resume token for the first update.
+ const shardWithResumeToken = st.rs1.getPrimary(); // Resume from shard 1.
+ const mostRecentOplogEntry = getLatestOp(shardWithResumeToken);
+ assert.neq(mostRecentOplogEntry, null);
+ const largeStr = new Array(4 * 1024 * oplogSize).join('abcdefghi');
+ let i = 0;
+
+ function oplogIsRolledOver() {
+ // The oplog has rolled over if the op that used to be newest is now older than the
+ // oplog's current oldest entry. Said another way, the oplog is rolled over when
+ // everything in the oplog is newer than what used to be the newest entry.
+ return bsonWoCompare(
+ mostRecentOplogEntry.ts,
+ getLeastRecentOp({server: shardWithResumeToken, readConcern: "majority"}).ts) <
+ 0;
+ }
- // Write some additional documents, then test that it's possible to resume after the first
- // update.
- assert.writeOK(mongosColl.insert({_id: -2}, {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+ while (!oplogIsRolledOver()) {
+ let idVal = 100 + (i++);
+ assert.writeOK(
+ mongosColl.insert({_id: idVal, long_str: largeStr}, {writeConcern: {w: "majority"}}));
+ sleep(100);
+ }
- changeStream = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdateOnShard0}}],
- collection: collToWatch
- });
+ ChangeStreamTest.assertChangeStreamThrowsCode({
+ db: mongosDB,
+ collName: collToWatch,
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdateOnShard1}}],
+ expectedCode: 40576
+ });
- for (let nextExpectedId of[1, -2, 2]) {
- assert.eq(cst.getOneChange(changeStream).documentKey._id, nextExpectedId);
- }
-
- // Test that the stream can't resume if the resume token is no longer present in the oplog.
-
- // Roll over the entire oplog on the shard with the resume token for the first update.
- const shardWithResumeToken = st.rs1.getPrimary(); // Resume from shard 1.
- const mostRecentOplogEntry = getLatestOp(shardWithResumeToken);
- assert.neq(mostRecentOplogEntry, null);
- const largeStr = new Array(4 * 1024 * oplogSize).join('abcdefghi');
- let i = 0;
-
- function oplogIsRolledOver() {
- // The oplog has rolled over if the op that used to be newest is now older than the
- // oplog's current oldest entry. Said another way, the oplog is rolled over when
- // everything in the oplog is newer than what used to be the newest entry.
- return bsonWoCompare(mostRecentOplogEntry.ts, getLeastRecentOp({
- server: shardWithResumeToken,
- readConcern: "majority"
- }).ts) < 0;
- }
-
- while (!oplogIsRolledOver()) {
- let idVal = 100 + (i++);
- assert.writeOK(mongosColl.insert({_id: idVal, long_str: largeStr},
- {writeConcern: {w: "majority"}}));
- sleep(100);
- }
-
- ChangeStreamTest.assertChangeStreamThrowsCode({
- db: mongosDB,
- collName: collToWatch,
- pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdateOnShard1}}],
- expectedCode: 40576
- });
+ ChangeStreamTest.assertChangeStreamThrowsCode({
+ db: mongosDB,
+ collName: collToWatch,
+ pipeline: [{$changeStream: {startAtOperationTime: resumeTimeFirstUpdate}}],
+ expectedCode: 40576
+ });
- ChangeStreamTest.assertChangeStreamThrowsCode({
- db: mongosDB,
- collName: collToWatch,
- pipeline: [{$changeStream: {startAtOperationTime: resumeTimeFirstUpdate}}],
- expectedCode: 40576
- });
+ // Test that the change stream can't resume if the resume token *is* present in the oplog,
+ // but one of the shards has rolled over its oplog enough that it doesn't have a long enough
+ // history to resume. Since we just rolled over the oplog on shard 1, we know that
+ // 'resumeTokenFromFirstUpdateOnShard0' is still present on shard 0, but shard 1 doesn't
+ // have any changes earlier than that, so won't be able to resume.
+ ChangeStreamTest.assertChangeStreamThrowsCode({
+ db: mongosDB,
+ collName: collToWatch,
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdateOnShard0}}],
+ expectedCode: 40576
+ });
- // Test that the change stream can't resume if the resume token *is* present in the oplog,
- // but one of the shards has rolled over its oplog enough that it doesn't have a long enough
- // history to resume. Since we just rolled over the oplog on shard 1, we know that
- // 'resumeTokenFromFirstUpdateOnShard0' is still present on shard 0, but shard 1 doesn't
- // have any changes earlier than that, so won't be able to resume.
- ChangeStreamTest.assertChangeStreamThrowsCode({
- db: mongosDB,
- collName: collToWatch,
- pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdateOnShard0}}],
- expectedCode: 40576
- });
+ // Drop the collection.
+ assert(mongosColl.drop());
+
+ // Shard the test collection on shardKey.
+ assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {shardKey: 1}}));
+
+ // Split the collection into 2 chunks: [MinKey, 50), [50, MaxKey].
+ assert.commandWorked(
+ mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {shardKey: 50}}));
+
+ // Move the [50, MaxKey] chunk to st.shard1.shardName.
+ assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {shardKey: 51}, to: st.rs1.getURL()}));
+
+ const numberOfDocs = 100;
+
+ // Insert test documents.
+ for (let counter = 0; counter < numberOfDocs / 5; ++counter) {
+ assert.writeOK(mongosColl.insert({_id: "abcd" + counter, shardKey: counter * 5 + 0},
+ {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert({_id: "Abcd" + counter, shardKey: counter * 5 + 1},
+ {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert({_id: "aBcd" + counter, shardKey: counter * 5 + 2},
+ {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert({_id: "abCd" + counter, shardKey: counter * 5 + 3},
+ {writeConcern: {w: "majority"}}));
+ assert.writeOK(mongosColl.insert({_id: "abcD" + counter, shardKey: counter * 5 + 4},
+ {writeConcern: {w: "majority"}}));
+ }
- // Drop the collection.
- assert(mongosColl.drop());
-
- // Shard the test collection on shardKey.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {shardKey: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 50), [50, MaxKey].
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {shardKey: 50}}));
-
- // Move the [50, MaxKey] chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {shardKey: 51}, to: st.rs1.getURL()}));
-
- const numberOfDocs = 100;
-
- // Insert test documents.
- for (let counter = 0; counter < numberOfDocs / 5; ++counter) {
- assert.writeOK(mongosColl.insert({_id: "abcd" + counter, shardKey: counter * 5 + 0},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "Abcd" + counter, shardKey: counter * 5 + 1},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "aBcd" + counter, shardKey: counter * 5 + 2},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "abCd" + counter, shardKey: counter * 5 + 3},
- {writeConcern: {w: "majority"}}));
- assert.writeOK(mongosColl.insert({_id: "abcD" + counter, shardKey: counter * 5 + 4},
- {writeConcern: {w: "majority"}}));
- }
-
- let allChangesCursor = cst.startWatchingChanges(
- {pipeline: [{$changeStream: {}}], collection: collToWatch, includeToken: true});
-
- // Perform the multi-update that will induce timestamp collisions
- assert.writeOK(mongosColl.update({}, {$set: {updated: true}}, {multi: true}));
-
- // Loop over documents and open inner change streams resuming from a specified position.
- // Note we skip the last document as it does not have the next document so we would
- // hang indefinitely.
- for (let counter = 0; counter < numberOfDocs - 1; ++counter) {
- let next = cst.getOneChange(allChangesCursor);
-
- const resumeToken = next._id;
- const caseInsensitive = {locale: "en_US", strength: 2};
- let resumedCaseInsensitiveCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- collection: collToWatch,
- aggregateOptions: {collation: caseInsensitive}
- });
- cst.getOneChange(resumedCaseInsensitiveCursor);
- }
+ let allChangesCursor = cst.startWatchingChanges(
+ {pipeline: [{$changeStream: {}}], collection: collToWatch, includeToken: true});
+
+ // Perform the multi-update that will induce timestamp collisions
+ assert.writeOK(mongosColl.update({}, {$set: {updated: true}}, {multi: true}));
+
+ // Loop over documents and open inner change streams resuming from a specified position.
+ // Note we skip the last document as it does not have the next document so we would
+ // hang indefinitely.
+ for (let counter = 0; counter < numberOfDocs - 1; ++counter) {
+ let next = cst.getOneChange(allChangesCursor);
+
+ const resumeToken = next._id;
+ const caseInsensitive = {locale: "en_US", strength: 2};
+ let resumedCaseInsensitiveCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
+ collection: collToWatch,
+ aggregateOptions: {collation: caseInsensitive}
+ });
+ cst.getOneChange(resumedCaseInsensitiveCursor);
}
+}
- // Test change stream on a single collection.
- testResume(mongosColl, mongosColl.getName());
+// Test change stream on a single collection.
+testResume(mongosColl, mongosColl.getName());
- // Test change stream on all collections.
- testResume(mongosColl, 1);
+// Test change stream on all collections.
+testResume(mongosColl, 1);
- cst.cleanUp();
+cst.cleanUp();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/resume_change_stream_from_stale_mongos.js b/jstests/sharding/resume_change_stream_from_stale_mongos.js
index 7b8dd9cf673..fbc8bd904bb 100644
--- a/jstests/sharding/resume_change_stream_from_stale_mongos.js
+++ b/jstests/sharding/resume_change_stream_from_stale_mongos.js
@@ -3,88 +3,88 @@
// a stale shard version.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- // Create a 2-shard cluster. Enable 'writePeriodicNoops' and set 'periodicNoopIntervalSecs' to 1
- // second so that each shard is continually advancing its optime, allowing the
- // AsyncResultsMerger to return sorted results even if some shards have not yet produced any
- // data.
- const st = new ShardingTest({
- shards: 2,
- mongos: 2,
- rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
- });
+// Create a 2-shard cluster. Enable 'writePeriodicNoops' and set 'periodicNoopIntervalSecs' to 1
+// second so that each shard is continually advancing its optime, allowing the
+// AsyncResultsMerger to return sorted results even if some shards have not yet produced any
+// data.
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
+});
- const firstMongosDB = st.s0.getDB(jsTestName());
- const firstMongosColl = firstMongosDB.test;
+const firstMongosDB = st.s0.getDB(jsTestName());
+const firstMongosColl = firstMongosDB.test;
- // Enable sharding on the test DB and ensure its primary is shard 0.
- assert.commandWorked(firstMongosDB.adminCommand({enableSharding: firstMongosDB.getName()}));
- st.ensurePrimaryShard(firstMongosDB.getName(), st.rs0.getURL());
+// Enable sharding on the test DB and ensure its primary is shard 0.
+assert.commandWorked(firstMongosDB.adminCommand({enableSharding: firstMongosDB.getName()}));
+st.ensurePrimaryShard(firstMongosDB.getName(), st.rs0.getURL());
- // Establish a change stream while it is unsharded, then shard the collection, move a chunk, and
- // record a resume token after the first chunk migration.
- let changeStream = firstMongosColl.aggregate([{$changeStream: {}}]);
+// Establish a change stream while it is unsharded, then shard the collection, move a chunk, and
+// record a resume token after the first chunk migration.
+let changeStream = firstMongosColl.aggregate([{$changeStream: {}}]);
- assert.writeOK(firstMongosColl.insert({_id: -1}));
- assert.writeOK(firstMongosColl.insert({_id: 1}));
+assert.writeOK(firstMongosColl.insert({_id: -1}));
+assert.writeOK(firstMongosColl.insert({_id: 1}));
- for (let nextId of[-1, 1]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.fullDocument, {_id: nextId});
- }
+for (let nextId of [-1, 1]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.fullDocument, {_id: nextId});
+}
- // Shard the test collection on _id, split the collection into 2 chunks: [MinKey, 0) and
- // [0, MaxKey), then move the [0, MaxKey) chunk to shard 1.
- assert.commandWorked(firstMongosDB.adminCommand(
- {shardCollection: firstMongosColl.getFullName(), key: {_id: 1}}));
- assert.commandWorked(
- firstMongosDB.adminCommand({split: firstMongosColl.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(firstMongosDB.adminCommand(
- {moveChunk: firstMongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+// Shard the test collection on _id, split the collection into 2 chunks: [MinKey, 0) and
+// [0, MaxKey), then move the [0, MaxKey) chunk to shard 1.
+assert.commandWorked(
+ firstMongosDB.adminCommand({shardCollection: firstMongosColl.getFullName(), key: {_id: 1}}));
+assert.commandWorked(
+ firstMongosDB.adminCommand({split: firstMongosColl.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(firstMongosDB.adminCommand(
+ {moveChunk: firstMongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
- // Then do one insert to each shard.
- assert.writeOK(firstMongosColl.insert({_id: -2}));
- assert.writeOK(firstMongosColl.insert({_id: 2}));
+// Then do one insert to each shard.
+assert.writeOK(firstMongosColl.insert({_id: -2}));
+assert.writeOK(firstMongosColl.insert({_id: 2}));
- // The change stream should see all the inserts after internally re-establishing cursors after
- // the chunk split.
- let resumeToken = null; // We'll fill this out to be the token of the last change.
- for (let nextId of[-2, 2]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.fullDocument, {_id: nextId});
- resumeToken = next._id;
- }
+// The change stream should see all the inserts after internally re-establishing cursors after
+// the chunk split.
+let resumeToken = null; // We'll fill this out to be the token of the last change.
+for (let nextId of [-2, 2]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.fullDocument, {_id: nextId});
+ resumeToken = next._id;
+}
- // Do some writes that occur on each shard after the resume token.
- assert.writeOK(firstMongosColl.insert({_id: -3}));
- assert.writeOK(firstMongosColl.insert({_id: 3}));
+// Do some writes that occur on each shard after the resume token.
+assert.writeOK(firstMongosColl.insert({_id: -3}));
+assert.writeOK(firstMongosColl.insert({_id: 3}));
- // Now try to resume the change stream using a stale mongos which believes the collection is
- // unsharded. The first mongos should use the shard versioning protocol to discover that the
- // collection is no longer unsharded, and re-target to all shards in the cluster.
- changeStream.close();
- const secondMongosColl = st.s1.getDB(jsTestName()).test;
- changeStream = secondMongosColl.aggregate([{$changeStream: {resumeAfter: resumeToken}}]);
- // Verify we can see both inserts that occurred after the resume point.
- for (let nextId of[-3, 3]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.fullDocument, {_id: nextId});
- }
+// Now try to resume the change stream using a stale mongos which believes the collection is
+// unsharded. The first mongos should use the shard versioning protocol to discover that the
+// collection is no longer unsharded, and re-target to all shards in the cluster.
+changeStream.close();
+const secondMongosColl = st.s1.getDB(jsTestName()).test;
+changeStream = secondMongosColl.aggregate([{$changeStream: {resumeAfter: resumeToken}}]);
+// Verify we can see both inserts that occurred after the resume point.
+for (let nextId of [-3, 3]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.fullDocument, {_id: nextId});
+}
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/resume_change_stream_on_subset_of_shards.js b/jstests/sharding/resume_change_stream_on_subset_of_shards.js
index 3c51004ed3c..b914a310e82 100644
--- a/jstests/sharding/resume_change_stream_on_subset_of_shards.js
+++ b/jstests/sharding/resume_change_stream_on_subset_of_shards.js
@@ -2,74 +2,73 @@
// the collection.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- // Create a 3-shard cluster. Enable 'writePeriodicNoops' and set 'periodicNoopIntervalSecs' to 1
- // second so that each shard is continually advancing its optime, allowing the
- // AsyncResultsMerger to return sorted results even if some shards have not yet produced any
- // data.
- const st = new ShardingTest({
- shards: 3,
- rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
- });
+// Create a 3-shard cluster. Enable 'writePeriodicNoops' and set 'periodicNoopIntervalSecs' to 1
+// second so that each shard is continually advancing its optime, allowing the
+// AsyncResultsMerger to return sorted results even if some shards have not yet produced any
+// data.
+const st = new ShardingTest({
+ shards: 3,
+ rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
+});
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB.test;
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB.test;
- // Enable sharding on the test DB and ensure its primary is shard 0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+// Enable sharding on the test DB and ensure its primary is shard 0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- // Shard the test collection on _id, split the collection into 2 chunks: [MinKey, 0) and
- // [0, MaxKey), then move the [0, MaxKey) chunk to shard 1.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+// Shard the test collection on _id, split the collection into 2 chunks: [MinKey, 0) and
+// [0, MaxKey), then move the [0, MaxKey) chunk to shard 1.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
- // Establish a change stream...
- let changeStream = mongosColl.watch();
+// Establish a change stream...
+let changeStream = mongosColl.watch();
- // ... then do one write to produce a resume token...
- assert.writeOK(mongosColl.insert({_id: -2}));
- assert.soon(() => changeStream.hasNext());
- const resumeToken = changeStream.next()._id;
-
- // ... followed by one write to each chunk for testing purposes, i.e. shards 0 and 1.
- assert.writeOK(mongosColl.insert({_id: -1}));
- assert.writeOK(mongosColl.insert({_id: 1}));
-
- // The change stream should see all the inserts after establishing cursors on all shards.
- for (let nextId of[-1, 1]) {
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.eq(next.fullDocument, {_id: nextId});
- jsTestLog(`Saw insert for _id ${nextId}`);
- }
+// ... then do one write to produce a resume token...
+assert.writeOK(mongosColl.insert({_id: -2}));
+assert.soon(() => changeStream.hasNext());
+const resumeToken = changeStream.next()._id;
- // Insert another document after storing the resume token.
- assert.writeOK(mongosColl.insert({_id: 2}));
+// ... followed by one write to each chunk for testing purposes, i.e. shards 0 and 1.
+assert.writeOK(mongosColl.insert({_id: -1}));
+assert.writeOK(mongosColl.insert({_id: 1}));
- // Resume the change stream and verify that it correctly sees the next insert. This is meant
- // to test resuming a change stream when not all shards are aware that the collection exists,
- // since shard 2 has no data at this point.
- changeStream = mongosColl.watch([], {resumeAfter: resumeToken});
+// The change stream should see all the inserts after establishing cursors on all shards.
+for (let nextId of [-1, 1]) {
assert.soon(() => changeStream.hasNext());
let next = changeStream.next();
- assert.eq(next.documentKey, {_id: -1});
- assert.eq(next.fullDocument, {_id: -1});
assert.eq(next.operationType, "insert");
+ assert.eq(next.fullDocument, {_id: nextId});
+ jsTestLog(`Saw insert for _id ${nextId}`);
+}
+
+// Insert another document after storing the resume token.
+assert.writeOK(mongosColl.insert({_id: 2}));
+
+// Resume the change stream and verify that it correctly sees the next insert. This is meant
+// to test resuming a change stream when not all shards are aware that the collection exists,
+// since shard 2 has no data at this point.
+changeStream = mongosColl.watch([], {resumeAfter: resumeToken});
+assert.soon(() => changeStream.hasNext());
+let next = changeStream.next();
+assert.eq(next.documentKey, {_id: -1});
+assert.eq(next.fullDocument, {_id: -1});
+assert.eq(next.operationType, "insert");
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/retryable_writes.js b/jstests/sharding/retryable_writes.js
index cfcef0d0eed..d35172edf3a 100644
--- a/jstests/sharding/retryable_writes.js
+++ b/jstests/sharding/retryable_writes.js
@@ -3,531 +3,528 @@
* retry is as expected and it does not create additional oplog entries.
*/
(function() {
- "use strict";
-
- load("jstests/libs/retryable_writes_util.js");
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- function checkFindAndModifyResult(expected, toCheck) {
- assert.eq(expected.ok, toCheck.ok);
- assert.eq(expected.value, toCheck.value);
- assert.docEq(expected.lastErrorObject, toCheck.lastErrorObject);
- }
-
- function verifyServerStatusFields(serverStatusResponse) {
- assert(serverStatusResponse.hasOwnProperty("transactions"),
- "Expected the serverStatus response to have a 'transactions' field");
- assert.hasFields(
- serverStatusResponse.transactions,
- ["retriedCommandsCount", "retriedStatementsCount", "transactionsCollectionWriteCount"],
- "The 'transactions' field in serverStatus did not have all of the expected fields");
- }
-
- function verifyServerStatusChanges(
- initialStats, newStats, newCommands, newStatements, newCollectionWrites) {
- assert.eq(initialStats.retriedCommandsCount + newCommands,
- newStats.retriedCommandsCount,
- "expected retriedCommandsCount to increase by " + newCommands);
- assert.eq(initialStats.retriedStatementsCount + newStatements,
- newStats.retriedStatementsCount,
- "expected retriedStatementsCount to increase by " + newStatements);
- assert.eq(initialStats.transactionsCollectionWriteCount + newCollectionWrites,
- newStats.transactionsCollectionWriteCount,
- "expected retriedCommandsCount to increase by " + newCollectionWrites);
- }
-
- function runTests(mainConn, priConn) {
- var lsid = UUID();
-
- ////////////////////////////////////////////////////////////////////////
- // Test insert command
-
- let initialStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(initialStatus);
-
- var cmd = {
+"use strict";
+
+load("jstests/libs/retryable_writes_util.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+function checkFindAndModifyResult(expected, toCheck) {
+ assert.eq(expected.ok, toCheck.ok);
+ assert.eq(expected.value, toCheck.value);
+ assert.docEq(expected.lastErrorObject, toCheck.lastErrorObject);
+}
+
+function verifyServerStatusFields(serverStatusResponse) {
+ assert(serverStatusResponse.hasOwnProperty("transactions"),
+ "Expected the serverStatus response to have a 'transactions' field");
+ assert.hasFields(
+ serverStatusResponse.transactions,
+ ["retriedCommandsCount", "retriedStatementsCount", "transactionsCollectionWriteCount"],
+ "The 'transactions' field in serverStatus did not have all of the expected fields");
+}
+
+function verifyServerStatusChanges(
+ initialStats, newStats, newCommands, newStatements, newCollectionWrites) {
+ assert.eq(initialStats.retriedCommandsCount + newCommands,
+ newStats.retriedCommandsCount,
+ "expected retriedCommandsCount to increase by " + newCommands);
+ assert.eq(initialStats.retriedStatementsCount + newStatements,
+ newStats.retriedStatementsCount,
+ "expected retriedStatementsCount to increase by " + newStatements);
+ assert.eq(initialStats.transactionsCollectionWriteCount + newCollectionWrites,
+ newStats.transactionsCollectionWriteCount,
+ "expected retriedCommandsCount to increase by " + newCollectionWrites);
+}
+
+function runTests(mainConn, priConn) {
+ var lsid = UUID();
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test insert command
+
+ let initialStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(initialStatus);
+
+ var cmd = {
+ insert: 'user',
+ documents: [{_id: 10}, {_id: 30}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(34),
+ };
+
+ var testDBMain = mainConn.getDB('test');
+ var result = assert.commandWorked(testDBMain.runCommand(cmd));
+
+ var oplog = priConn.getDB('local').oplog.rs;
+ var insertOplogEntries = oplog.find({ns: 'test.user', op: 'i'}).itcount();
+
+ var testDBPri = priConn.getDB('test');
+ assert.eq(2, testDBPri.user.find().itcount());
+
+ var retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
+ assert.eq(result.ok, retryResult.ok);
+ assert.eq(result.n, retryResult.n);
+ assert.eq(result.writeErrors, retryResult.writeErrors);
+ assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+
+ assert.eq(2, testDBPri.user.find().itcount());
+ assert.eq(insertOplogEntries, oplog.find({ns: 'test.user', op: 'i'}).itcount());
+
+ let newStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(newStatus);
+ verifyServerStatusChanges(initialStatus.transactions,
+ newStatus.transactions,
+ 1 /* newCommands */,
+ 2 /* newStatements */,
+ 1 /* newCollectionWrites */);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test update command
+
+ initialStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(initialStatus);
+
+ cmd = {
+ update: 'user',
+ updates: [
+ {q: {_id: 10}, u: {$inc: {x: 1}}}, // in place
+ {q: {_id: 20}, u: {$inc: {y: 1}}, upsert: true},
+ {q: {_id: 30}, u: {z: 1}} // replacement
+ ],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(35),
+ };
+
+ result = assert.commandWorked(testDBMain.runCommand(cmd));
+
+ let updateOplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
+
+ // Upserts are stored as inserts in the oplog, so check inserts too.
+ insertOplogEntries = oplog.find({ns: 'test.user', op: 'i'}).itcount();
+
+ assert.eq(3, testDBPri.user.find().itcount());
+
+ retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
+ assert.eq(result.ok, retryResult.ok);
+ assert.eq(result.n, retryResult.n);
+ assert.eq(result.nModified, retryResult.nModified);
+ assert.eq(result.upserted, retryResult.upserted);
+ assert.eq(result.writeErrors, retryResult.writeErrors);
+ assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+
+ assert.eq(3, testDBPri.user.find().itcount());
+
+ assert.eq({_id: 10, x: 1}, testDBPri.user.findOne({_id: 10}));
+ assert.eq({_id: 20, y: 1}, testDBPri.user.findOne({_id: 20}));
+ assert.eq({_id: 30, z: 1}, testDBPri.user.findOne({_id: 30}));
+
+ assert.eq(updateOplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
+ assert.eq(insertOplogEntries, oplog.find({ns: 'test.user', op: 'i'}).itcount());
+
+ newStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(newStatus);
+ verifyServerStatusChanges(initialStatus.transactions,
+ newStatus.transactions,
+ 1 /* newCommands */,
+ 3 /* newStatements */,
+ 3 /* newCollectionWrites */);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test delete command
+
+ initialStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(initialStatus);
+
+ assert.writeOK(testDBMain.user.insert({_id: 40, x: 1}));
+ assert.writeOK(testDBMain.user.insert({_id: 50, y: 1}));
+
+ assert.eq(2, testDBPri.user.find({x: 1}).itcount());
+ assert.eq(2, testDBPri.user.find({y: 1}).itcount());
+
+ cmd = {
+ delete: 'user',
+ deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 1}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(36),
+ };
+
+ result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+
+ let deleteOplogEntries = oplog.find({ns: 'test.user', op: 'd'}).itcount();
+
+ assert.eq(1, testDBPri.user.find({x: 1}).itcount());
+ assert.eq(1, testDBPri.user.find({y: 1}).itcount());
+
+ retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
+ assert.eq(result.ok, retryResult.ok);
+ assert.eq(result.n, retryResult.n);
+ assert.eq(result.writeErrors, retryResult.writeErrors);
+ assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+
+ assert.eq(1, testDBPri.user.find({x: 1}).itcount());
+ assert.eq(1, testDBPri.user.find({y: 1}).itcount());
+
+ assert.eq(deleteOplogEntries, oplog.find({ns: 'test.user', op: 'd'}).itcount());
+
+ newStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(newStatus);
+ verifyServerStatusChanges(initialStatus.transactions,
+ newStatus.transactions,
+ 1 /* newCommands */,
+ 2 /* newStatements */,
+ 2 /* newCollectionWrites */);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (upsert)
+
+ initialStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(initialStatus);
+
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 60},
+ update: {$inc: {x: 1}},
+ new: true,
+ upsert: true,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(37),
+ };
+
+ result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+ insertOplogEntries = oplog.find({ns: 'test.user', op: 'i'}).itcount();
+ updateOplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
+ assert.eq({_id: 60, x: 1}, testDBPri.user.findOne({_id: 60}));
+
+ retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
+
+ assert.eq({_id: 60, x: 1}, testDBPri.user.findOne({_id: 60}));
+ assert.eq(insertOplogEntries, oplog.find({ns: 'test.user', op: 'i'}).itcount());
+ assert.eq(updateOplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
+
+ checkFindAndModifyResult(result, retryResult);
+
+ newStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(newStatus);
+ verifyServerStatusChanges(initialStatus.transactions,
+ newStatus.transactions,
+ 1 /* newCommands */,
+ 1 /* newStatements */,
+ 1 /* newCollectionWrites */);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (update, return pre-image)
+
+ initialStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(initialStatus);
+
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 60},
+ update: {$inc: {x: 1}},
+ new: false,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(38),
+ };
+
+ result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+ var oplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
+ assert.eq({_id: 60, x: 2}, testDBPri.user.findOne({_id: 60}));
+
+ retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
+
+ assert.eq({_id: 60, x: 2}, testDBPri.user.findOne({_id: 60}));
+ assert.eq(oplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
+
+ checkFindAndModifyResult(result, retryResult);
+
+ newStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(newStatus);
+ verifyServerStatusChanges(initialStatus.transactions,
+ newStatus.transactions,
+ 1 /* newCommands */,
+ 1 /* newStatements */,
+ 1 /* newCollectionWrites */);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (update, return post-image)
+
+ initialStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(initialStatus);
+
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 60},
+ update: {$inc: {x: 1}},
+ new: true,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(39),
+ };
+
+ result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+ oplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
+ assert.eq({_id: 60, x: 3}, testDBPri.user.findOne({_id: 60}));
+
+ retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
+
+ assert.eq({_id: 60, x: 3}, testDBPri.user.findOne({_id: 60}));
+ assert.eq(oplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
+
+ checkFindAndModifyResult(result, retryResult);
+
+ newStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(newStatus);
+ verifyServerStatusChanges(initialStatus.transactions,
+ newStatus.transactions,
+ 1 /* newCommands */,
+ 1 /* newStatements */,
+ 1 /* newCollectionWrites */);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (remove, return pre-image)
+
+ initialStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(initialStatus);
+
+ assert.writeOK(testDBMain.user.insert({_id: 70, f: 1}));
+ assert.writeOK(testDBMain.user.insert({_id: 80, f: 1}));
+
+ cmd = {
+ findAndModify: 'user',
+ query: {f: 1},
+ remove: true,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(40),
+ };
+
+ result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+ oplogEntries = oplog.find({ns: 'test.user', op: 'd'}).itcount();
+ var docCount = testDBPri.user.find().itcount();
+
+ retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
+
+ assert.eq(oplogEntries, oplog.find({ns: 'test.user', op: 'd'}).itcount());
+ assert.eq(docCount, testDBPri.user.find().itcount());
+
+ checkFindAndModifyResult(result, retryResult);
+
+ newStatus = priConn.adminCommand({serverStatus: 1});
+ verifyServerStatusFields(newStatus);
+ verifyServerStatusChanges(initialStatus.transactions,
+ newStatus.transactions,
+ 1 /* newCommands */,
+ 1 /* newStatements */,
+ 1 /* newCollectionWrites */);
+}
+
+function runFailpointTests(mainConn, priConn) {
+ // Test the 'onPrimaryTransactionalWrite' failpoint
+ var lsid = UUID();
+ var testDb = mainConn.getDB('TestDB');
+
+ // Test connection close (default behaviour). The connection will get closed, but the
+ // inserts must succeed
+ assert.commandWorked(priConn.adminCommand(
+ {configureFailPoint: 'onPrimaryTransactionalWrite', mode: 'alwaysOn'}));
+
+ try {
+ // Set skipRetryOnNetworkError so the shell doesn't automatically retry, since the
+ // command has a txnNumber.
+ TestData.skipRetryOnNetworkError = true;
+ var res = assert.commandWorked(testDb.runCommand({
insert: 'user',
- documents: [{_id: 10}, {_id: 30}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(34),
- };
-
- var testDBMain = mainConn.getDB('test');
- var result = assert.commandWorked(testDBMain.runCommand(cmd));
-
- var oplog = priConn.getDB('local').oplog.rs;
- var insertOplogEntries = oplog.find({ns: 'test.user', op: 'i'}).itcount();
-
- var testDBPri = priConn.getDB('test');
- assert.eq(2, testDBPri.user.find().itcount());
-
- var retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
-
- assert.eq(2, testDBPri.user.find().itcount());
- assert.eq(insertOplogEntries, oplog.find({ns: 'test.user', op: 'i'}).itcount());
-
- let newStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(newStatus);
- verifyServerStatusChanges(initialStatus.transactions,
- newStatus.transactions,
- 1 /* newCommands */,
- 2 /* newStatements */,
- 1 /* newCollectionWrites */);
-
- ////////////////////////////////////////////////////////////////////////
- // Test update command
-
- initialStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(initialStatus);
-
- cmd = {
- update: 'user',
- updates: [
- {q: {_id: 10}, u: {$inc: {x: 1}}}, // in place
- {q: {_id: 20}, u: {$inc: {y: 1}}, upsert: true},
- {q: {_id: 30}, u: {z: 1}} // replacement
- ],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(35),
- };
-
- result = assert.commandWorked(testDBMain.runCommand(cmd));
-
- let updateOplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
-
- // Upserts are stored as inserts in the oplog, so check inserts too.
- insertOplogEntries = oplog.find({ns: 'test.user', op: 'i'}).itcount();
-
- assert.eq(3, testDBPri.user.find().itcount());
-
- retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.nModified, retryResult.nModified);
- assert.eq(result.upserted, retryResult.upserted);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
-
- assert.eq(3, testDBPri.user.find().itcount());
-
- assert.eq({_id: 10, x: 1}, testDBPri.user.findOne({_id: 10}));
- assert.eq({_id: 20, y: 1}, testDBPri.user.findOne({_id: 20}));
- assert.eq({_id: 30, z: 1}, testDBPri.user.findOne({_id: 30}));
-
- assert.eq(updateOplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
- assert.eq(insertOplogEntries, oplog.find({ns: 'test.user', op: 'i'}).itcount());
-
- newStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(newStatus);
- verifyServerStatusChanges(initialStatus.transactions,
- newStatus.transactions,
- 1 /* newCommands */,
- 3 /* newStatements */,
- 3 /* newCollectionWrites */);
-
- ////////////////////////////////////////////////////////////////////////
- // Test delete command
-
- initialStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(initialStatus);
-
- assert.writeOK(testDBMain.user.insert({_id: 40, x: 1}));
- assert.writeOK(testDBMain.user.insert({_id: 50, y: 1}));
-
- assert.eq(2, testDBPri.user.find({x: 1}).itcount());
- assert.eq(2, testDBPri.user.find({y: 1}).itcount());
-
- cmd = {
- delete: 'user',
- deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 1}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(36),
- };
-
- result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- let deleteOplogEntries = oplog.find({ns: 'test.user', op: 'd'}).itcount();
-
- assert.eq(1, testDBPri.user.find({x: 1}).itcount());
- assert.eq(1, testDBPri.user.find({y: 1}).itcount());
-
- retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
-
- assert.eq(1, testDBPri.user.find({x: 1}).itcount());
- assert.eq(1, testDBPri.user.find({y: 1}).itcount());
-
- assert.eq(deleteOplogEntries, oplog.find({ns: 'test.user', op: 'd'}).itcount());
-
- newStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(newStatus);
- verifyServerStatusChanges(initialStatus.transactions,
- newStatus.transactions,
- 1 /* newCommands */,
- 2 /* newStatements */,
- 2 /* newCollectionWrites */);
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (upsert)
-
- initialStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(initialStatus);
-
- cmd = {
- findAndModify: 'user',
- query: {_id: 60},
- update: {$inc: {x: 1}},
- new: true,
- upsert: true,
- lsid: {id: lsid},
- txnNumber: NumberLong(37),
- };
-
- result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
- insertOplogEntries = oplog.find({ns: 'test.user', op: 'i'}).itcount();
- updateOplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
- assert.eq({_id: 60, x: 1}, testDBPri.user.findOne({_id: 60}));
-
- retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
-
- assert.eq({_id: 60, x: 1}, testDBPri.user.findOne({_id: 60}));
- assert.eq(insertOplogEntries, oplog.find({ns: 'test.user', op: 'i'}).itcount());
- assert.eq(updateOplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
-
- checkFindAndModifyResult(result, retryResult);
-
- newStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(newStatus);
- verifyServerStatusChanges(initialStatus.transactions,
- newStatus.transactions,
- 1 /* newCommands */,
- 1 /* newStatements */,
- 1 /* newCollectionWrites */);
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (update, return pre-image)
-
- initialStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(initialStatus);
-
- cmd = {
- findAndModify: 'user',
- query: {_id: 60},
- update: {$inc: {x: 1}},
- new: false,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(38),
- };
-
- result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
- var oplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
- assert.eq({_id: 60, x: 2}, testDBPri.user.findOne({_id: 60}));
-
- retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
-
- assert.eq({_id: 60, x: 2}, testDBPri.user.findOne({_id: 60}));
- assert.eq(oplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
-
- checkFindAndModifyResult(result, retryResult);
-
- newStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(newStatus);
- verifyServerStatusChanges(initialStatus.transactions,
- newStatus.transactions,
- 1 /* newCommands */,
- 1 /* newStatements */,
- 1 /* newCollectionWrites */);
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (update, return post-image)
-
- initialStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(initialStatus);
-
- cmd = {
- findAndModify: 'user',
- query: {_id: 60},
- update: {$inc: {x: 1}},
- new: true,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(39),
- };
-
- result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
- oplogEntries = oplog.find({ns: 'test.user', op: 'u'}).itcount();
- assert.eq({_id: 60, x: 3}, testDBPri.user.findOne({_id: 60}));
-
- retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
-
- assert.eq({_id: 60, x: 3}, testDBPri.user.findOne({_id: 60}));
- assert.eq(oplogEntries, oplog.find({ns: 'test.user', op: 'u'}).itcount());
-
- checkFindAndModifyResult(result, retryResult);
-
- newStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(newStatus);
- verifyServerStatusChanges(initialStatus.transactions,
- newStatus.transactions,
- 1 /* newCommands */,
- 1 /* newStatements */,
- 1 /* newCollectionWrites */);
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (remove, return pre-image)
-
- initialStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(initialStatus);
-
- assert.writeOK(testDBMain.user.insert({_id: 70, f: 1}));
- assert.writeOK(testDBMain.user.insert({_id: 80, f: 1}));
-
- cmd = {
- findAndModify: 'user',
- query: {f: 1},
- remove: true,
- lsid: {id: lsid},
- txnNumber: NumberLong(40),
- };
-
- result = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
- oplogEntries = oplog.find({ns: 'test.user', op: 'd'}).itcount();
- var docCount = testDBPri.user.find().itcount();
-
- retryResult = assert.commandWorked(testDBMain.runCommand(cmd));
-
- assert.eq(oplogEntries, oplog.find({ns: 'test.user', op: 'd'}).itcount());
- assert.eq(docCount, testDBPri.user.find().itcount());
-
- checkFindAndModifyResult(result, retryResult);
-
- newStatus = priConn.adminCommand({serverStatus: 1});
- verifyServerStatusFields(newStatus);
- verifyServerStatusChanges(initialStatus.transactions,
- newStatus.transactions,
- 1 /* newCommands */,
- 1 /* newStatements */,
- 1 /* newCollectionWrites */);
- }
-
- function runFailpointTests(mainConn, priConn) {
- // Test the 'onPrimaryTransactionalWrite' failpoint
- var lsid = UUID();
- var testDb = mainConn.getDB('TestDB');
-
- // Test connection close (default behaviour). The connection will get closed, but the
- // inserts must succeed
- assert.commandWorked(priConn.adminCommand(
- {configureFailPoint: 'onPrimaryTransactionalWrite', mode: 'alwaysOn'}));
-
- try {
- // Set skipRetryOnNetworkError so the shell doesn't automatically retry, since the
- // command has a txnNumber.
- TestData.skipRetryOnNetworkError = true;
- var res = assert.commandWorked(testDb.runCommand({
- insert: 'user',
- documents: [{x: 0}, {x: 1}],
- ordered: true,
- lsid: {id: lsid},
- txnNumber: NumberLong(1)
- }));
- // Mongos will automatically retry on retryable errors if the request has a txnNumber,
- // and the retry path for already completed writes does not trigger the failpoint, so
- // the command will succeed when run through mongos.
- assert.eq(2, res.n);
- assert.eq(false, res.hasOwnProperty("writeErrors"));
- } catch (e) {
- var exceptionMsg = e.toString();
- assert(isNetworkError(e), 'Incorrect exception thrown: ' + exceptionMsg);
- } finally {
- TestData.skipRetryOnNetworkError = false;
- }
-
- let collCount = 0;
- assert.soon(() => {
- collCount = testDb.user.find({}).itcount();
- return collCount == 2;
- }, 'testDb.user returned ' + collCount + ' entries');
-
- // Test exception throw. One update must succeed and the other must fail.
- assert.commandWorked(priConn.adminCommand({
- configureFailPoint: 'onPrimaryTransactionalWrite',
- mode: {skip: 1},
- data: {
- closeConnection: false,
- failBeforeCommitExceptionCode: ErrorCodes.InternalError
- }
- }));
-
- var cmd = {
- update: 'user',
- updates: [{q: {x: 0}, u: {$inc: {y: 1}}}, {q: {x: 1}, u: {$inc: {y: 1}}}],
+ documents: [{x: 0}, {x: 1}],
ordered: true,
lsid: {id: lsid},
- txnNumber: NumberLong(2)
- };
-
- var writeResult = testDb.runCommand(cmd);
-
- assert.eq(1, writeResult.nModified);
- assert.eq(1, writeResult.writeErrors.length);
- assert.eq(1, writeResult.writeErrors[0].index);
- assert.eq(ErrorCodes.InternalError, writeResult.writeErrors[0].code);
-
- assert.commandWorked(
- priConn.adminCommand({configureFailPoint: 'onPrimaryTransactionalWrite', mode: 'off'}));
-
- var writeResult = testDb.runCommand(cmd);
- assert.eq(2, writeResult.nModified);
-
- var collContents = testDb.user.find({}).sort({x: 1}).toArray();
- assert.eq(2, collContents.length);
- assert.eq(0, collContents[0].x);
- assert.eq(1, collContents[0].y);
- assert.eq(1, collContents[1].x);
- assert.eq(1, collContents[1].y);
- }
-
- function runMultiTests(mainConn) {
- // Test the behavior of retryable writes with multi=true / limit=0
- var lsid = {id: UUID()};
- var testDb = mainConn.getDB('test_multi');
-
- // Only the update statements with multi=true in a batch fail.
- var cmd = {
- update: 'user',
- updates: [{q: {x: 1}, u: {y: 1}}, {q: {x: 2}, u: {z: 1}, multi: true}],
- ordered: true,
- lsid: lsid,
- txnNumber: NumberLong(1),
- };
- var res = assert.commandWorkedIgnoringWriteErrors(testDb.runCommand(cmd));
- assert.eq(1,
- res.writeErrors.length,
- 'expected only one write error, received: ' + tojson(res.writeErrors));
- assert.eq(1,
- res.writeErrors[0].index,
- 'expected the update at index 1 to fail, not the update at index: ' +
- res.writeErrors[0].index);
- assert.eq(ErrorCodes.InvalidOptions,
- res.writeErrors[0].code,
- 'expected to fail with code ' + ErrorCodes.InvalidOptions + ', received: ' +
- res.writeErrors[0].code);
-
- // Only the delete statements with limit=0 in a batch fail.
- cmd = {
- delete: 'user',
- deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 0}],
- ordered: false,
- lsid: lsid,
- txnNumber: NumberLong(1),
- };
- res = assert.commandWorkedIgnoringWriteErrors(testDb.runCommand(cmd));
- assert.eq(1,
- res.writeErrors.length,
- 'expected only one write error, received: ' + tojson(res.writeErrors));
- assert.eq(1,
- res.writeErrors[0].index,
- 'expected the delete at index 1 to fail, not the delete at index: ' +
- res.writeErrors[0].index);
- assert.eq(ErrorCodes.InvalidOptions,
- res.writeErrors[0].code,
- 'expected to fail with code ' + ErrorCodes.InvalidOptions + ', received: ' +
- res.writeErrors[0].code);
- }
-
- function runInvalidTests(mainConn) {
- var lsid = {id: UUID()};
- var localDB = mainConn.getDB('local');
-
- let cmd = {
- insert: 'user',
- documents: [{_id: 10}, {_id: 30}],
- ordered: false,
- lsid: lsid,
- txnNumber: NumberLong(10),
- };
-
- let res = assert.commandWorkedIgnoringWriteErrors(localDB.runCommand(cmd));
- assert.eq(2, res.writeErrors.length);
-
- localDB.user.insert({_id: 10, x: 1});
- localDB.user.insert({_id: 30, z: 2});
-
- cmd = {
- update: 'user',
- updates: [
- {q: {_id: 10}, u: {$inc: {x: 1}}}, // in place
- {q: {_id: 20}, u: {$inc: {y: 1}}, upsert: true},
- {q: {_id: 30}, u: {z: 1}} // replacement
- ],
- ordered: false,
- lsid: lsid,
- txnNumber: NumberLong(11),
- };
-
- res = assert.commandWorkedIgnoringWriteErrors(localDB.runCommand(cmd));
- assert.eq(3, res.writeErrors.length);
-
- cmd = {
- delete: 'user',
- deletes: [{q: {x: 1}, limit: 1}, {q: {z: 2}, limit: 1}],
- ordered: false,
- lsid: lsid,
- txnNumber: NumberLong(12),
- };
-
- res = assert.commandWorkedIgnoringWriteErrors(localDB.runCommand(cmd));
- assert.eq(2, res.writeErrors.length);
-
- cmd = {
- findAndModify: 'user',
- query: {_id: 60},
- update: {$inc: {x: 1}},
- new: true,
- upsert: true,
- lsid: {id: lsid},
- txnNumber: NumberLong(37),
- };
-
- assert.commandFailed(localDB.runCommand(cmd));
+ txnNumber: NumberLong(1)
+ }));
+ // Mongos will automatically retry on retryable errors if the request has a txnNumber,
+ // and the retry path for already completed writes does not trigger the failpoint, so
+ // the command will succeed when run through mongos.
+ assert.eq(2, res.n);
+ assert.eq(false, res.hasOwnProperty("writeErrors"));
+ } catch (e) {
+ var exceptionMsg = e.toString();
+ assert(isNetworkError(e), 'Incorrect exception thrown: ' + exceptionMsg);
+ } finally {
+ TestData.skipRetryOnNetworkError = false;
}
- // Tests for replica set
- var replTest = new ReplSetTest({nodes: 2});
- replTest.startSet({verbose: 5});
- replTest.initiate();
-
- var priConn = replTest.getPrimary();
-
- runTests(priConn, priConn);
- runFailpointTests(priConn, priConn);
- runMultiTests(priConn);
- runInvalidTests(priConn);
-
- replTest.stopSet();
-
- // Tests for sharded cluster
- var st = new ShardingTest({shards: {rs0: {nodes: 1, verbose: 5}}});
-
- runTests(st.s0, st.rs0.getPrimary());
- runFailpointTests(st.s0, st.rs0.getPrimary());
- runMultiTests(st.s0);
-
- st.stop();
+ let collCount = 0;
+ assert.soon(() => {
+ collCount = testDb.user.find({}).itcount();
+ return collCount == 2;
+ }, 'testDb.user returned ' + collCount + ' entries');
+
+ // Test exception throw. One update must succeed and the other must fail.
+ assert.commandWorked(priConn.adminCommand({
+ configureFailPoint: 'onPrimaryTransactionalWrite',
+ mode: {skip: 1},
+ data: {closeConnection: false, failBeforeCommitExceptionCode: ErrorCodes.InternalError}
+ }));
+
+ var cmd = {
+ update: 'user',
+ updates: [{q: {x: 0}, u: {$inc: {y: 1}}}, {q: {x: 1}, u: {$inc: {y: 1}}}],
+ ordered: true,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(2)
+ };
+
+ var writeResult = testDb.runCommand(cmd);
+
+ assert.eq(1, writeResult.nModified);
+ assert.eq(1, writeResult.writeErrors.length);
+ assert.eq(1, writeResult.writeErrors[0].index);
+ assert.eq(ErrorCodes.InternalError, writeResult.writeErrors[0].code);
+
+ assert.commandWorked(
+ priConn.adminCommand({configureFailPoint: 'onPrimaryTransactionalWrite', mode: 'off'}));
+
+ var writeResult = testDb.runCommand(cmd);
+ assert.eq(2, writeResult.nModified);
+
+ var collContents = testDb.user.find({}).sort({x: 1}).toArray();
+ assert.eq(2, collContents.length);
+ assert.eq(0, collContents[0].x);
+ assert.eq(1, collContents[0].y);
+ assert.eq(1, collContents[1].x);
+ assert.eq(1, collContents[1].y);
+}
+
+function runMultiTests(mainConn) {
+ // Test the behavior of retryable writes with multi=true / limit=0
+ var lsid = {id: UUID()};
+ var testDb = mainConn.getDB('test_multi');
+
+ // Only the update statements with multi=true in a batch fail.
+ var cmd = {
+ update: 'user',
+ updates: [{q: {x: 1}, u: {y: 1}}, {q: {x: 2}, u: {z: 1}, multi: true}],
+ ordered: true,
+ lsid: lsid,
+ txnNumber: NumberLong(1),
+ };
+ var res = assert.commandWorkedIgnoringWriteErrors(testDb.runCommand(cmd));
+ assert.eq(1,
+ res.writeErrors.length,
+ 'expected only one write error, received: ' + tojson(res.writeErrors));
+ assert.eq(1,
+ res.writeErrors[0].index,
+ 'expected the update at index 1 to fail, not the update at index: ' +
+ res.writeErrors[0].index);
+ assert.eq(ErrorCodes.InvalidOptions,
+ res.writeErrors[0].code,
+ 'expected to fail with code ' + ErrorCodes.InvalidOptions +
+ ', received: ' + res.writeErrors[0].code);
+
+ // Only the delete statements with limit=0 in a batch fail.
+ cmd = {
+ delete: 'user',
+ deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 0}],
+ ordered: false,
+ lsid: lsid,
+ txnNumber: NumberLong(1),
+ };
+ res = assert.commandWorkedIgnoringWriteErrors(testDb.runCommand(cmd));
+ assert.eq(1,
+ res.writeErrors.length,
+ 'expected only one write error, received: ' + tojson(res.writeErrors));
+ assert.eq(1,
+ res.writeErrors[0].index,
+ 'expected the delete at index 1 to fail, not the delete at index: ' +
+ res.writeErrors[0].index);
+ assert.eq(ErrorCodes.InvalidOptions,
+ res.writeErrors[0].code,
+ 'expected to fail with code ' + ErrorCodes.InvalidOptions +
+ ', received: ' + res.writeErrors[0].code);
+}
+
+function runInvalidTests(mainConn) {
+ var lsid = {id: UUID()};
+ var localDB = mainConn.getDB('local');
+
+ let cmd = {
+ insert: 'user',
+ documents: [{_id: 10}, {_id: 30}],
+ ordered: false,
+ lsid: lsid,
+ txnNumber: NumberLong(10),
+ };
+
+ let res = assert.commandWorkedIgnoringWriteErrors(localDB.runCommand(cmd));
+ assert.eq(2, res.writeErrors.length);
+
+ localDB.user.insert({_id: 10, x: 1});
+ localDB.user.insert({_id: 30, z: 2});
+
+ cmd = {
+ update: 'user',
+ updates: [
+ {q: {_id: 10}, u: {$inc: {x: 1}}}, // in place
+ {q: {_id: 20}, u: {$inc: {y: 1}}, upsert: true},
+ {q: {_id: 30}, u: {z: 1}} // replacement
+ ],
+ ordered: false,
+ lsid: lsid,
+ txnNumber: NumberLong(11),
+ };
+
+ res = assert.commandWorkedIgnoringWriteErrors(localDB.runCommand(cmd));
+ assert.eq(3, res.writeErrors.length);
+
+ cmd = {
+ delete: 'user',
+ deletes: [{q: {x: 1}, limit: 1}, {q: {z: 2}, limit: 1}],
+ ordered: false,
+ lsid: lsid,
+ txnNumber: NumberLong(12),
+ };
+
+ res = assert.commandWorkedIgnoringWriteErrors(localDB.runCommand(cmd));
+ assert.eq(2, res.writeErrors.length);
+
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 60},
+ update: {$inc: {x: 1}},
+ new: true,
+ upsert: true,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(37),
+ };
+
+ assert.commandFailed(localDB.runCommand(cmd));
+}
+
+// Tests for replica set
+var replTest = new ReplSetTest({nodes: 2});
+replTest.startSet({verbose: 5});
+replTest.initiate();
+
+var priConn = replTest.getPrimary();
+
+runTests(priConn, priConn);
+runFailpointTests(priConn, priConn);
+runMultiTests(priConn);
+runInvalidTests(priConn);
+
+replTest.stopSet();
+
+// Tests for sharded cluster
+var st = new ShardingTest({shards: {rs0: {nodes: 1, verbose: 5}}});
+
+runTests(st.s0, st.rs0.getPrimary());
+runFailpointTests(st.s0, st.rs0.getPrimary());
+runMultiTests(st.s0);
+
+st.stop();
})();
diff --git a/jstests/sharding/rs_stepdown_and_pooling.js b/jstests/sharding/rs_stepdown_and_pooling.js
index e298c61ff17..61c2a64ba9c 100644
--- a/jstests/sharding/rs_stepdown_and_pooling.js
+++ b/jstests/sharding/rs_stepdown_and_pooling.js
@@ -2,101 +2,101 @@
// Tests what happens when a replica set primary goes down with pooled connections.
//
(function() {
- "use strict";
- load("jstests/replsets/rslib.js");
+"use strict";
+load("jstests/replsets/rslib.js");
- var st = new ShardingTest({shards: {rs0: {nodes: 2}}, mongos: 1});
+var st = new ShardingTest({shards: {rs0: {nodes: 2}}, mongos: 1});
- // Stop balancer to eliminate weird conn stuff
- st.stopBalancer();
+// Stop balancer to eliminate weird conn stuff
+st.stopBalancer();
- var mongos = st.s0;
- var coll = mongos.getCollection("foo.bar");
- var db = coll.getDB();
+var mongos = st.s0;
+var coll = mongos.getCollection("foo.bar");
+var db = coll.getDB();
- // Test is not valid for Win32
- var is32Bits = (db.serverBuildInfo().bits == 32);
- if (is32Bits && _isWindows()) {
- // Win32 doesn't provide the polling interface we need to implement the check tested here
- jsTest.log("Test is not valid on Win32 platform.");
+// Test is not valid for Win32
+var is32Bits = (db.serverBuildInfo().bits == 32);
+if (is32Bits && _isWindows()) {
+ // Win32 doesn't provide the polling interface we need to implement the check tested here
+ jsTest.log("Test is not valid on Win32 platform.");
- } else {
- // Non-Win32 platform
+} else {
+ // Non-Win32 platform
- var primary = st.rs0.getPrimary();
- var secondary = st.rs0.getSecondary();
+ var primary = st.rs0.getPrimary();
+ var secondary = st.rs0.getSecondary();
- jsTest.log("Creating new connections...");
+ jsTest.log("Creating new connections...");
- // Create a bunch of connections to the primary node through mongos.
- // jstest ->(x10)-> mongos ->(x10)-> primary
- var conns = [];
- for (var i = 0; i < 50; i++) {
- conns.push(new Mongo(mongos.host));
- conns[i].getCollection(coll + "").findOne();
- }
+ // Create a bunch of connections to the primary node through mongos.
+ // jstest ->(x10)-> mongos ->(x10)-> primary
+ var conns = [];
+ for (var i = 0; i < 50; i++) {
+ conns.push(new Mongo(mongos.host));
+ conns[i].getCollection(coll + "").findOne();
+ }
- jsTest.log("Returning the connections back to the pool.");
+ jsTest.log("Returning the connections back to the pool.");
- for (var i = 0; i < conns.length; i++) {
- conns[i] = null;
- }
- // Make sure we return connections back to the pool
- gc();
+ for (var i = 0; i < conns.length; i++) {
+ conns[i] = null;
+ }
+ // Make sure we return connections back to the pool
+ gc();
- // Don't make test fragile by linking to format of shardConnPoolStats, but this is useful if
- // something goes wrong.
- var connPoolStats = mongos.getDB("admin").runCommand({shardConnPoolStats: 1});
- printjson(connPoolStats);
+ // Don't make test fragile by linking to format of shardConnPoolStats, but this is useful if
+ // something goes wrong.
+ var connPoolStats = mongos.getDB("admin").runCommand({shardConnPoolStats: 1});
+ printjson(connPoolStats);
- jsTest.log("Stepdown primary and then step back up...");
+ jsTest.log("Stepdown primary and then step back up...");
- var stepDown = function(node, timeSecs) {
- assert.commandWorked(
- node.getDB("admin").runCommand({replSetStepDown: timeSecs, force: true}));
- };
+ var stepDown = function(node, timeSecs) {
+ assert.commandWorked(
+ node.getDB("admin").runCommand({replSetStepDown: timeSecs, force: true}));
+ };
- stepDown(primary, 0);
+ stepDown(primary, 0);
- jsTest.log("Waiting for mongos to acknowledge stepdown...");
+ jsTest.log("Waiting for mongos to acknowledge stepdown...");
- awaitRSClientHosts(mongos,
- secondary,
- {ismaster: true},
- st.rs0,
- 2 * 60 * 1000); // slow hosts can take longer to recognize sd
+ awaitRSClientHosts(mongos,
+ secondary,
+ {ismaster: true},
+ st.rs0,
+ 2 * 60 * 1000); // slow hosts can take longer to recognize sd
- jsTest.log("Stepping back up...");
+ jsTest.log("Stepping back up...");
- stepDown(secondary, 10000);
+ stepDown(secondary, 10000);
- jsTest.log("Waiting for mongos to acknowledge step up...");
+ jsTest.log("Waiting for mongos to acknowledge step up...");
- awaitRSClientHosts(mongos, primary, {ismaster: true}, st.rs0, 2 * 60 * 1000);
+ awaitRSClientHosts(mongos, primary, {ismaster: true}, st.rs0, 2 * 60 * 1000);
- jsTest.log("Waiting for socket timeout time...");
+ jsTest.log("Waiting for socket timeout time...");
- // Need to wait longer than the socket polling time.
- sleep(2 * 5000);
+ // Need to wait longer than the socket polling time.
+ sleep(2 * 5000);
- jsTest.log("Run queries using new connections.");
+ jsTest.log("Run queries using new connections.");
- var numErrors = 0;
- for (var i = 0; i < conns.length; i++) {
- var newConn = new Mongo(mongos.host);
- try {
- printjson(newConn.getCollection("foo.bar").findOne());
- } catch (e) {
- printjson(e);
- numErrors++;
- }
+ var numErrors = 0;
+ for (var i = 0; i < conns.length; i++) {
+ var newConn = new Mongo(mongos.host);
+ try {
+ printjson(newConn.getCollection("foo.bar").findOne());
+ } catch (e) {
+ printjson(e);
+ numErrors++;
}
+ }
- assert.eq(0, numErrors);
+ assert.eq(0, numErrors);
- } // End Win32 check
+} // End Win32 check
- jsTest.log("DONE!");
+jsTest.log("DONE!");
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js
index 814b79b6308..a507f876fce 100644
--- a/jstests/sharding/safe_secondary_reads_drop_recreate.js
+++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js
@@ -17,592 +17,588 @@
* "versioned". Determines what system profiler checks are performed.
*/
(function() {
- "use strict";
-
- load('jstests/libs/profiler.js');
- load('jstests/sharding/libs/last_stable_mongos_commands.js');
-
- let db = "test";
- let coll = "foo";
- let nss = db + "." + coll;
-
- // Check that a test case is well-formed.
- let validateTestCase = function(test) {
- assert(test.setUp && typeof(test.setUp) === "function");
- assert(test.command && typeof(test.command) === "object");
- assert(test.checkResults && typeof(test.checkResults) === "function");
- assert(test.behavior === "unshardedOnly" ||
- test.behavior === "targetsPrimaryUsesConnectionVersioning" ||
- test.behavior === "versioned");
- };
-
- let testCases = {
- _addShard: {skip: "primary only"},
- _cloneCatalogData: {skip: "primary only"},
- _configsvrAddShard: {skip: "primary only"},
- _configsvrAddShardToZone: {skip: "primary only"},
- _configsvrBalancerStart: {skip: "primary only"},
- _configsvrBalancerStatus: {skip: "primary only"},
- _configsvrBalancerStop: {skip: "primary only"},
- _configsvrCommitChunkMerge: {skip: "primary only"},
- _configsvrCommitChunkMigration: {skip: "primary only"},
- _configsvrCommitChunkSplit: {skip: "primary only"},
- _configsvrCommitMovePrimary: {skip: "primary only"},
- _configsvrDropCollection: {skip: "primary only"},
- _configsvrDropDatabase: {skip: "primary only"},
- _configsvrMoveChunk: {skip: "primary only"},
- _configsvrMovePrimary: {skip: "primary only"},
- _configsvrRemoveShardFromZone: {skip: "primary only"},
- _configsvrShardCollection: {skip: "primary only"},
- _configsvrUpdateZoneKeyRange: {skip: "primary only"},
- _flushRoutingTableCacheUpdates: {skip: "does not return user data"},
- _getUserCacheGeneration: {skip: "does not return user data"},
- _hashBSONElement: {skip: "does not return user data"},
- _isSelf: {skip: "does not return user data"},
- _mergeAuthzCollections: {skip: "primary only"},
- _migrateClone: {skip: "primary only"},
- _movePrimary: {skip: "primary only"},
- _recvChunkAbort: {skip: "primary only"},
- _recvChunkCommit: {skip: "primary only"},
- _recvChunkStart: {skip: "primary only"},
- _recvChunkStatus: {skip: "primary only"},
- _transferMods: {skip: "primary only"},
- abortTransaction: {skip: "primary only"},
- addShard: {skip: "primary only"},
- addShardToZone: {skip: "primary only"},
- aggregate: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(0, res.cursor.firstBatch.length, tojson(res));
- },
- behavior: "versioned"
+"use strict";
+
+load('jstests/libs/profiler.js');
+load('jstests/sharding/libs/last_stable_mongos_commands.js');
+
+let db = "test";
+let coll = "foo";
+let nss = db + "." + coll;
+
+// Check that a test case is well-formed.
+let validateTestCase = function(test) {
+ assert(test.setUp && typeof (test.setUp) === "function");
+ assert(test.command && typeof (test.command) === "object");
+ assert(test.checkResults && typeof (test.checkResults) === "function");
+ assert(test.behavior === "unshardedOnly" ||
+ test.behavior === "targetsPrimaryUsesConnectionVersioning" ||
+ test.behavior === "versioned");
+};
+
+let testCases = {
+ _addShard: {skip: "primary only"},
+ _cloneCatalogData: {skip: "primary only"},
+ _configsvrAddShard: {skip: "primary only"},
+ _configsvrAddShardToZone: {skip: "primary only"},
+ _configsvrBalancerStart: {skip: "primary only"},
+ _configsvrBalancerStatus: {skip: "primary only"},
+ _configsvrBalancerStop: {skip: "primary only"},
+ _configsvrCommitChunkMerge: {skip: "primary only"},
+ _configsvrCommitChunkMigration: {skip: "primary only"},
+ _configsvrCommitChunkSplit: {skip: "primary only"},
+ _configsvrCommitMovePrimary: {skip: "primary only"},
+ _configsvrDropCollection: {skip: "primary only"},
+ _configsvrDropDatabase: {skip: "primary only"},
+ _configsvrMoveChunk: {skip: "primary only"},
+ _configsvrMovePrimary: {skip: "primary only"},
+ _configsvrRemoveShardFromZone: {skip: "primary only"},
+ _configsvrShardCollection: {skip: "primary only"},
+ _configsvrUpdateZoneKeyRange: {skip: "primary only"},
+ _flushRoutingTableCacheUpdates: {skip: "does not return user data"},
+ _getUserCacheGeneration: {skip: "does not return user data"},
+ _hashBSONElement: {skip: "does not return user data"},
+ _isSelf: {skip: "does not return user data"},
+ _mergeAuthzCollections: {skip: "primary only"},
+ _migrateClone: {skip: "primary only"},
+ _movePrimary: {skip: "primary only"},
+ _recvChunkAbort: {skip: "primary only"},
+ _recvChunkCommit: {skip: "primary only"},
+ _recvChunkStart: {skip: "primary only"},
+ _recvChunkStatus: {skip: "primary only"},
+ _transferMods: {skip: "primary only"},
+ abortTransaction: {skip: "primary only"},
+ addShard: {skip: "primary only"},
+ addShardToZone: {skip: "primary only"},
+ aggregate: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
},
- appendOplogNote: {skip: "primary only"},
- applyOps: {skip: "primary only"},
- authSchemaUpgrade: {skip: "primary only"},
- authenticate: {skip: "does not return user data"},
- availableQueryOptions: {skip: "does not return user data"},
- balancerStart: {skip: "primary only"},
- balancerStatus: {skip: "primary only"},
- balancerStop: {skip: "primary only"},
- buildInfo: {skip: "does not return user data"},
- captrunc: {skip: "primary only"},
- checkShardingIndex: {skip: "primary only"},
- cleanupOrphaned: {skip: "primary only"},
- clearLog: {skip: "does not return user data"},
- clone: {skip: "primary only"},
- cloneCollection: {skip: "primary only"},
- cloneCollectionAsCapped: {skip: "primary only"},
- collMod: {skip: "primary only"},
- collStats: {skip: "does not return user data"},
- commitTransaction: {skip: "primary only"},
- compact: {skip: "does not return user data"},
- configureFailPoint: {skip: "does not return user data"},
- connPoolStats: {skip: "does not return user data"},
- connPoolSync: {skip: "does not return user data"},
- connectionStatus: {skip: "does not return user data"},
- convertToCapped: {skip: "primary only"},
- count: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {count: coll, query: {x: 1}},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(0, res.n, tojson(res));
- },
- behavior: "versioned"
+ command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(0, res.cursor.firstBatch.length, tojson(res));
},
- cpuload: {skip: "does not return user data"},
- create: {skip: "primary only"},
- createIndexes: {skip: "primary only"},
- createRole: {skip: "primary only"},
- createUser: {skip: "primary only"},
- currentOp: {skip: "does not return user data"},
- dataSize: {skip: "does not return user data"},
- dbHash: {skip: "does not return user data"},
- dbStats: {skip: "does not return user data"},
- delete: {skip: "primary only"},
- distinct: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {distinct: coll, key: "x"},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(0, res.values.length, tojson(res));
- },
- behavior: "versioned"
+ behavior: "versioned"
+ },
+ appendOplogNote: {skip: "primary only"},
+ applyOps: {skip: "primary only"},
+ authSchemaUpgrade: {skip: "primary only"},
+ authenticate: {skip: "does not return user data"},
+ availableQueryOptions: {skip: "does not return user data"},
+ balancerStart: {skip: "primary only"},
+ balancerStatus: {skip: "primary only"},
+ balancerStop: {skip: "primary only"},
+ buildInfo: {skip: "does not return user data"},
+ captrunc: {skip: "primary only"},
+ checkShardingIndex: {skip: "primary only"},
+ cleanupOrphaned: {skip: "primary only"},
+ clearLog: {skip: "does not return user data"},
+ clone: {skip: "primary only"},
+ cloneCollection: {skip: "primary only"},
+ cloneCollectionAsCapped: {skip: "primary only"},
+ collMod: {skip: "primary only"},
+ collStats: {skip: "does not return user data"},
+ commitTransaction: {skip: "primary only"},
+ compact: {skip: "does not return user data"},
+ configureFailPoint: {skip: "does not return user data"},
+ connPoolStats: {skip: "does not return user data"},
+ connPoolSync: {skip: "does not return user data"},
+ connectionStatus: {skip: "does not return user data"},
+ convertToCapped: {skip: "primary only"},
+ count: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
},
- driverOIDTest: {skip: "does not return user data"},
- drop: {skip: "primary only"},
- dropAllRolesFromDatabase: {skip: "primary only"},
- dropAllUsersFromDatabase: {skip: "primary only"},
- dropConnections: {skip: "does not return user data"},
- dropDatabase: {skip: "primary only"},
- dropIndexes: {skip: "primary only"},
- dropRole: {skip: "primary only"},
- dropUser: {skip: "primary only"},
- echo: {skip: "does not return user data"},
- emptycapped: {skip: "primary only"},
- enableSharding: {skip: "primary only"},
- endSessions: {skip: "does not return user data"},
- explain: {skip: "TODO SERVER-30068"},
- features: {skip: "does not return user data"},
- filemd5: {skip: "does not return user data"},
- find: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {find: coll, filter: {x: 1}},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(0, res.cursor.firstBatch.length, tojson(res));
- },
- behavior: "versioned"
+ command: {count: coll, query: {x: 1}},
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(0, res.n, tojson(res));
},
- findAndModify: {skip: "primary only"},
- flushRouterConfig: {skip: "does not return user data"},
- forceerror: {skip: "does not return user data"},
- fsync: {skip: "does not return user data"},
- fsyncUnlock: {skip: "does not return user data"},
- geoSearch: {skip: "not supported in mongos"},
- getCmdLineOpts: {skip: "does not return user data"},
- getDiagnosticData: {skip: "does not return user data"},
- getLastError: {skip: "primary only"},
- getLog: {skip: "does not return user data"},
- getMore: {skip: "shard version already established"},
- getParameter: {skip: "does not return user data"},
- getShardMap: {skip: "does not return user data"},
- getShardVersion: {skip: "primary only"},
- getnonce: {skip: "does not return user data"},
- godinsert: {skip: "for testing only"},
- grantPrivilegesToRole: {skip: "primary only"},
- grantRolesToRole: {skip: "primary only"},
- grantRolesToUser: {skip: "primary only"},
- handshake: {skip: "does not return user data"},
- hostInfo: {skip: "does not return user data"},
- insert: {skip: "primary only"},
- invalidateUserCache: {skip: "does not return user data"},
- isdbgrid: {skip: "does not return user data"},
- isMaster: {skip: "does not return user data"},
- killAllSessions: {skip: "does not return user data"},
- killAllSessionsByPattern: {skip: "does not return user data"},
- killCursors: {skip: "does not return user data"},
- killOp: {skip: "does not return user data"},
- killSessions: {skip: "does not return user data"},
- listCollections: {skip: "primary only"},
- listCommands: {skip: "does not return user data"},
- listDatabases: {skip: "primary only"},
- listIndexes: {skip: "primary only"},
- listShards: {skip: "does not return user data"},
- lockInfo: {skip: "primary only"},
- logApplicationMessage: {skip: "primary only"},
- logRotate: {skip: "does not return user data"},
- logout: {skip: "does not return user data"},
- makeSnapshot: {skip: "does not return user data"},
- mapReduce: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {
- mapReduce: coll,
- map: function() {
- emit(this.x, 1);
- },
- reduce: function(key, values) {
- return Array.sum(values);
- },
- out: {inline: 1}
- },
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(0, res.results.length, tojson(res));
- },
- behavior: "targetsPrimaryUsesConnectionVersioning"
+ behavior: "versioned"
+ },
+ cpuload: {skip: "does not return user data"},
+ create: {skip: "primary only"},
+ createIndexes: {skip: "primary only"},
+ createRole: {skip: "primary only"},
+ createUser: {skip: "primary only"},
+ currentOp: {skip: "does not return user data"},
+ dataSize: {skip: "does not return user data"},
+ dbHash: {skip: "does not return user data"},
+ dbStats: {skip: "does not return user data"},
+ delete: {skip: "primary only"},
+ distinct: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
},
- mergeChunks: {skip: "primary only"},
- moveChunk: {skip: "primary only"},
- movePrimary: {skip: "primary only"},
- multicast: {skip: "does not return user data"},
- netstat: {skip: "does not return user data"},
- ping: {skip: "does not return user data"},
- planCacheClear: {skip: "does not return user data"},
- planCacheClearFilters: {skip: "does not return user data"},
- planCacheListFilters: {skip: "does not return user data"},
- planCacheListPlans: {skip: "does not return user data"},
- planCacheListQueryShapes: {skip: "does not return user data"},
- planCacheSetFilter: {skip: "does not return user data"},
- profile: {skip: "primary only"},
- reapLogicalSessionCacheNow: {skip: "does not return user data"},
- refreshLogicalSessionCacheNow: {skip: "does not return user data"},
- refreshSessions: {skip: "does not return user data"},
- refreshSessionsInternal: {skip: "does not return user data"},
- removeShard: {skip: "primary only"},
- removeShardFromZone: {skip: "primary only"},
- renameCollection: {skip: "primary only"},
- repairCursor: {skip: "does not return user data"},
- replSetAbortPrimaryCatchUp: {skip: "does not return user data"},
- replSetFreeze: {skip: "does not return user data"},
- replSetGetConfig: {skip: "does not return user data"},
- replSetGetRBID: {skip: "does not return user data"},
- replSetGetStatus: {skip: "does not return user data"},
- replSetHeartbeat: {skip: "does not return user data"},
- replSetInitiate: {skip: "does not return user data"},
- replSetMaintenance: {skip: "does not return user data"},
- replSetReconfig: {skip: "does not return user data"},
- replSetRequestVotes: {skip: "does not return user data"},
- replSetStepDown: {skip: "does not return user data"},
- replSetStepUp: {skip: "does not return user data"},
- replSetSyncFrom: {skip: "does not return user data"},
- replSetTest: {skip: "does not return user data"},
- replSetUpdatePosition: {skip: "does not return user data"},
- replSetResizeOplog: {skip: "does not return user data"},
- resetError: {skip: "does not return user data"},
- restartCatalog: {skip: "internal-only command"},
- resync: {skip: "primary only"},
- revokePrivilegesFromRole: {skip: "primary only"},
- revokeRolesFromRole: {skip: "primary only"},
- revokeRolesFromUser: {skip: "primary only"},
- rolesInfo: {skip: "primary only"},
- saslContinue: {skip: "primary only"},
- saslStart: {skip: "primary only"},
- serverStatus: {skip: "does not return user data"},
- setCommittedSnapshot: {skip: "does not return user data"},
- setIndexCommitQuorum: {skip: "primary only"},
- setFeatureCompatibilityVersion: {skip: "primary only"},
- setFreeMonitoring: {skip: "primary only"},
- setParameter: {skip: "does not return user data"},
- setShardVersion: {skip: "does not return user data"},
- shardCollection: {skip: "primary only"},
- shardConnPoolStats: {skip: "does not return user data"},
- shardingState: {skip: "does not return user data"},
- shutdown: {skip: "does not return user data"},
- sleep: {skip: "does not return user data"},
- split: {skip: "primary only"},
- splitChunk: {skip: "primary only"},
- splitVector: {skip: "primary only"},
- stageDebug: {skip: "primary only"},
- startRecordingTraffic: {skip: "does not return user data"},
- startSession: {skip: "does not return user data"},
- stopRecordingTraffic: {skip: "does not return user data"},
- top: {skip: "does not return user data"},
- touch: {skip: "does not return user data"},
- unsetSharding: {skip: "does not return user data"},
- update: {skip: "primary only"},
- updateRole: {skip: "primary only"},
- updateUser: {skip: "primary only"},
- updateZoneKeyRange: {skip: "primary only"},
- usersInfo: {skip: "primary only"},
- validate: {skip: "does not return user data"},
- waitForOngoingChunkSplits: {skip: "does not return user data"},
- whatsmyuri: {skip: "does not return user data"}
- };
-
- commandsRemovedFromMongosIn42.forEach(function(cmd) {
- testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
- });
-
- let scenarios = {
- dropRecreateAsUnshardedOnSameShard: function(
- staleMongos, freshMongos, test, commandProfile) {
- let primaryShardPrimary = st.rs0.getPrimary();
- let primaryShardSecondary = st.rs0.getSecondary();
-
- // Drop and recreate the collection.
- assert.commandWorked(freshMongos.getDB(db).runCommand({drop: coll}));
- assert.commandWorked(freshMongos.getDB(db).runCommand({create: coll}));
-
- // Ensure the latest version changes have been persisted and propagate to the secondary
- // before we target it with versioned commands.
- assert.commandWorked(st.rs0.getPrimary().getDB('admin').runCommand(
- {_flushRoutingTableCacheUpdates: nss}));
- st.rs0.awaitReplication();
-
- let res = staleMongos.getDB(db).runCommand(Object.assign(
- {},
- test.command,
- {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
-
- test.checkResults(res);
-
- if (test.behavior === "unshardedOnly") {
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: primaryShardSecondary.getDB(db), filter: commandProfile});
- } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
- // Check that the primary shard primary received the request without a shardVersion
- // field and returned success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShardPrimary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": false},
- "command.$readPreference": {$exists: false},
- "command.readConcern": {"level": "local"},
- "errCode": {"$exists": false}
- },
- commandProfile)
- });
- } else if (test.behavior == "versioned") {
- // Check that the primary shard secondary returned stale shardVersion.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- },
- commandProfile)
- });
-
- // Check that the primary shard secondary received the request again and returned
- // success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": {"$ne": ErrorCodes.StaleConfig},
- },
- commandProfile)
- });
- }
+ command: {distinct: coll, key: "x"},
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(0, res.values.length, tojson(res));
},
- dropRecreateAsShardedOnSameShard: function(staleMongos, freshMongos, test, commandProfile) {
- let primaryShardPrimary = st.rs0.getPrimary();
- let primaryShardSecondary = st.rs0.getSecondary();
-
- // Drop and recreate the collection as sharded.
- assert.commandWorked(freshMongos.getDB(db).runCommand({drop: coll}));
- assert.commandWorked(freshMongos.getDB(db).runCommand({create: coll}));
- assert.commandWorked(freshMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
-
- // We do this because we expect staleMongos to see that the collection is sharded, which
- // it may not if the "nearest" config server it contacts has not replicated the
- // shardCollection writes (or has not heard that they have reached a majority).
- st.configRS.awaitReplication();
-
- // Ensure the latest version changes have been persisted and propagate to the secondary
- // before we target it with versioned commands.
- assert.commandWorked(st.rs0.getPrimary().getDB('admin').runCommand(
- {_flushRoutingTableCacheUpdates: nss}));
- st.rs0.awaitReplication();
-
- let res = staleMongos.getDB(db).runCommand(Object.assign(
- {},
- test.command,
- {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
-
- test.checkResults(res);
-
- if (test.behavior === "unshardedOnly") {
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: primaryShardSecondary.getDB(db), filter: commandProfile});
- } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
- // Check that the primary shard primary received the request without a shardVersion
- // field and returned success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShardPrimary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": false},
- "command.$readPreference": {$exists: false},
- "command.readConcern": {"level": "local"},
- "errCode": {"$exists": false},
- },
- commandProfile)
- });
- } else if (test.behavior == "versioned") {
- // Check that the primary shard secondary returned stale shardVersion.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- },
- commandProfile)
- });
-
- // Check that the primary shard secondary received the request again and returned
- // success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": {"$ne": ErrorCodes.StaleConfig},
- },
- commandProfile)
- });
- }
+ behavior: "versioned"
+ },
+ driverOIDTest: {skip: "does not return user data"},
+ drop: {skip: "primary only"},
+ dropAllRolesFromDatabase: {skip: "primary only"},
+ dropAllUsersFromDatabase: {skip: "primary only"},
+ dropConnections: {skip: "does not return user data"},
+ dropDatabase: {skip: "primary only"},
+ dropIndexes: {skip: "primary only"},
+ dropRole: {skip: "primary only"},
+ dropUser: {skip: "primary only"},
+ echo: {skip: "does not return user data"},
+ emptycapped: {skip: "primary only"},
+ enableSharding: {skip: "primary only"},
+ endSessions: {skip: "does not return user data"},
+ explain: {skip: "TODO SERVER-30068"},
+ features: {skip: "does not return user data"},
+ filemd5: {skip: "does not return user data"},
+ find: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
},
- dropRecreateAsUnshardedOnDifferentShard: function(
- staleMongos, freshMongos, test, commandProfile) {
- // There is no way to drop and recreate the collection as unsharded on a *different*
- // shard without calling movePrimary, and it is known that a stale mongos will not
- // refresh its notion of the primary shard after it loads it once.
+ command: {find: coll, filter: {x: 1}},
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(0, res.cursor.firstBatch.length, tojson(res));
+ },
+ behavior: "versioned"
+ },
+ findAndModify: {skip: "primary only"},
+ flushRouterConfig: {skip: "does not return user data"},
+ forceerror: {skip: "does not return user data"},
+ fsync: {skip: "does not return user data"},
+ fsyncUnlock: {skip: "does not return user data"},
+ geoSearch: {skip: "not supported in mongos"},
+ getCmdLineOpts: {skip: "does not return user data"},
+ getDiagnosticData: {skip: "does not return user data"},
+ getLastError: {skip: "primary only"},
+ getLog: {skip: "does not return user data"},
+ getMore: {skip: "shard version already established"},
+ getParameter: {skip: "does not return user data"},
+ getShardMap: {skip: "does not return user data"},
+ getShardVersion: {skip: "primary only"},
+ getnonce: {skip: "does not return user data"},
+ godinsert: {skip: "for testing only"},
+ grantPrivilegesToRole: {skip: "primary only"},
+ grantRolesToRole: {skip: "primary only"},
+ grantRolesToUser: {skip: "primary only"},
+ handshake: {skip: "does not return user data"},
+ hostInfo: {skip: "does not return user data"},
+ insert: {skip: "primary only"},
+ invalidateUserCache: {skip: "does not return user data"},
+ isdbgrid: {skip: "does not return user data"},
+ isMaster: {skip: "does not return user data"},
+ killAllSessions: {skip: "does not return user data"},
+ killAllSessionsByPattern: {skip: "does not return user data"},
+ killCursors: {skip: "does not return user data"},
+ killOp: {skip: "does not return user data"},
+ killSessions: {skip: "does not return user data"},
+ listCollections: {skip: "primary only"},
+ listCommands: {skip: "does not return user data"},
+ listDatabases: {skip: "primary only"},
+ listIndexes: {skip: "primary only"},
+ listShards: {skip: "does not return user data"},
+ lockInfo: {skip: "primary only"},
+ logApplicationMessage: {skip: "primary only"},
+ logRotate: {skip: "does not return user data"},
+ logout: {skip: "does not return user data"},
+ makeSnapshot: {skip: "does not return user data"},
+ mapReduce: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {
+ mapReduce: coll,
+ map: function() {
+ emit(this.x, 1);
+ },
+ reduce: function(key, values) {
+ return Array.sum(values);
+ },
+ out: {inline: 1}
},
- dropRecreateAsShardedOnDifferentShard: function(
- staleMongos, freshMongos, test, commandProfile) {
- let donorShardSecondary = st.rs0.getSecondary();
- let recipientShardPrimary = st.rs1.getPrimary();
- let recipientShardSecondary = st.rs1.getSecondary();
-
- // Drop and recreate the collection as sharded, and move the chunk to the other shard.
- assert.commandWorked(freshMongos.getDB(db).runCommand({drop: coll}));
- assert.commandWorked(freshMongos.getDB(db).runCommand({create: coll}));
- assert.commandWorked(freshMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
-
- // We do this because we expect staleMongos to see that the collection is sharded, which
- // it may not if the "nearest" config server it contacts has not replicated the
- // shardCollection writes (or has not heard that they have reached a majority).
- st.configRS.awaitReplication();
-
- // Use {w:2} (all) write concern in the moveChunk operation so the metadata change gets
- // persisted to the secondary before versioned commands are sent against the secondary.
- assert.commandWorked(freshMongos.adminCommand({
- moveChunk: nss,
- find: {x: 0},
- to: st.shard1.shardName,
- _secondaryThrottle: true,
- writeConcern: {w: 2},
- }));
-
- let res = staleMongos.getDB(db).runCommand(Object.assign(
- {},
- test.command,
- {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
-
- test.checkResults(res);
-
- if (test.behavior === "unshardedOnly") {
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: donorShardSecondary.getDB(db), filter: commandProfile});
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: recipientShardSecondary.getDB(db), filter: commandProfile});
- } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
- // Check that the recipient shard primary received the request without a
- // shardVersion field and returned success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardPrimary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": false},
- "command.$readPreference": {$exists: false},
- "command.readConcern": {"level": "local"},
- "errCode": {"$exists": false},
- },
- commandProfile)
- });
- } else if (test.behavior == "versioned") {
- // Check that the donor shard secondary returned stale shardVersion.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- },
- commandProfile)
- });
-
- // Check that the recipient shard secondary received the request and returned
- // success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": {"$ne": ErrorCodes.StaleConfig},
- },
- commandProfile)
- });
- }
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(0, res.results.length, tojson(res));
+ },
+ behavior: "targetsPrimaryUsesConnectionVersioning"
+ },
+ mergeChunks: {skip: "primary only"},
+ moveChunk: {skip: "primary only"},
+ movePrimary: {skip: "primary only"},
+ multicast: {skip: "does not return user data"},
+ netstat: {skip: "does not return user data"},
+ ping: {skip: "does not return user data"},
+ planCacheClear: {skip: "does not return user data"},
+ planCacheClearFilters: {skip: "does not return user data"},
+ planCacheListFilters: {skip: "does not return user data"},
+ planCacheListPlans: {skip: "does not return user data"},
+ planCacheListQueryShapes: {skip: "does not return user data"},
+ planCacheSetFilter: {skip: "does not return user data"},
+ profile: {skip: "primary only"},
+ reapLogicalSessionCacheNow: {skip: "does not return user data"},
+ refreshLogicalSessionCacheNow: {skip: "does not return user data"},
+ refreshSessions: {skip: "does not return user data"},
+ refreshSessionsInternal: {skip: "does not return user data"},
+ removeShard: {skip: "primary only"},
+ removeShardFromZone: {skip: "primary only"},
+ renameCollection: {skip: "primary only"},
+ repairCursor: {skip: "does not return user data"},
+ replSetAbortPrimaryCatchUp: {skip: "does not return user data"},
+ replSetFreeze: {skip: "does not return user data"},
+ replSetGetConfig: {skip: "does not return user data"},
+ replSetGetRBID: {skip: "does not return user data"},
+ replSetGetStatus: {skip: "does not return user data"},
+ replSetHeartbeat: {skip: "does not return user data"},
+ replSetInitiate: {skip: "does not return user data"},
+ replSetMaintenance: {skip: "does not return user data"},
+ replSetReconfig: {skip: "does not return user data"},
+ replSetRequestVotes: {skip: "does not return user data"},
+ replSetStepDown: {skip: "does not return user data"},
+ replSetStepUp: {skip: "does not return user data"},
+ replSetSyncFrom: {skip: "does not return user data"},
+ replSetTest: {skip: "does not return user data"},
+ replSetUpdatePosition: {skip: "does not return user data"},
+ replSetResizeOplog: {skip: "does not return user data"},
+ resetError: {skip: "does not return user data"},
+ restartCatalog: {skip: "internal-only command"},
+ resync: {skip: "primary only"},
+ revokePrivilegesFromRole: {skip: "primary only"},
+ revokeRolesFromRole: {skip: "primary only"},
+ revokeRolesFromUser: {skip: "primary only"},
+ rolesInfo: {skip: "primary only"},
+ saslContinue: {skip: "primary only"},
+ saslStart: {skip: "primary only"},
+ serverStatus: {skip: "does not return user data"},
+ setCommittedSnapshot: {skip: "does not return user data"},
+ setIndexCommitQuorum: {skip: "primary only"},
+ setFeatureCompatibilityVersion: {skip: "primary only"},
+ setFreeMonitoring: {skip: "primary only"},
+ setParameter: {skip: "does not return user data"},
+ setShardVersion: {skip: "does not return user data"},
+ shardCollection: {skip: "primary only"},
+ shardConnPoolStats: {skip: "does not return user data"},
+ shardingState: {skip: "does not return user data"},
+ shutdown: {skip: "does not return user data"},
+ sleep: {skip: "does not return user data"},
+ split: {skip: "primary only"},
+ splitChunk: {skip: "primary only"},
+ splitVector: {skip: "primary only"},
+ stageDebug: {skip: "primary only"},
+ startRecordingTraffic: {skip: "does not return user data"},
+ startSession: {skip: "does not return user data"},
+ stopRecordingTraffic: {skip: "does not return user data"},
+ top: {skip: "does not return user data"},
+ touch: {skip: "does not return user data"},
+ unsetSharding: {skip: "does not return user data"},
+ update: {skip: "primary only"},
+ updateRole: {skip: "primary only"},
+ updateUser: {skip: "primary only"},
+ updateZoneKeyRange: {skip: "primary only"},
+ usersInfo: {skip: "primary only"},
+ validate: {skip: "does not return user data"},
+ waitForOngoingChunkSplits: {skip: "does not return user data"},
+ whatsmyuri: {skip: "does not return user data"}
+};
+
+commandsRemovedFromMongosIn42.forEach(function(cmd) {
+ testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
+});
+
+let scenarios = {
+ dropRecreateAsUnshardedOnSameShard: function(staleMongos, freshMongos, test, commandProfile) {
+ let primaryShardPrimary = st.rs0.getPrimary();
+ let primaryShardSecondary = st.rs0.getSecondary();
+
+ // Drop and recreate the collection.
+ assert.commandWorked(freshMongos.getDB(db).runCommand({drop: coll}));
+ assert.commandWorked(freshMongos.getDB(db).runCommand({create: coll}));
+
+ // Ensure the latest version changes have been persisted and propagate to the secondary
+ // before we target it with versioned commands.
+ assert.commandWorked(
+ st.rs0.getPrimary().getDB('admin').runCommand({_flushRoutingTableCacheUpdates: nss}));
+ st.rs0.awaitReplication();
+
+ let res = staleMongos.getDB(db).runCommand(
+ Object.assign({},
+ test.command,
+ {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
+
+ test.checkResults(res);
+
+ if (test.behavior === "unshardedOnly") {
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: primaryShardSecondary.getDB(db), filter: commandProfile});
+ } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
+ // Check that the primary shard primary received the request without a shardVersion
+ // field and returned success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryShardPrimary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": false},
+ "command.$readPreference": {$exists: false},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$exists": false}
+ },
+ commandProfile)
+ });
+ } else if (test.behavior == "versioned") {
+ // Check that the primary shard secondary returned stale shardVersion.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
+ },
+ commandProfile)
+ });
+
+ // Check that the primary shard secondary received the request again and returned
+ // success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$ne": ErrorCodes.StaleConfig},
+ },
+ commandProfile)
+ });
+ }
+ },
+ dropRecreateAsShardedOnSameShard: function(staleMongos, freshMongos, test, commandProfile) {
+ let primaryShardPrimary = st.rs0.getPrimary();
+ let primaryShardSecondary = st.rs0.getSecondary();
+
+ // Drop and recreate the collection as sharded.
+ assert.commandWorked(freshMongos.getDB(db).runCommand({drop: coll}));
+ assert.commandWorked(freshMongos.getDB(db).runCommand({create: coll}));
+ assert.commandWorked(freshMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
+
+ // We do this because we expect staleMongos to see that the collection is sharded, which
+ // it may not if the "nearest" config server it contacts has not replicated the
+ // shardCollection writes (or has not heard that they have reached a majority).
+ st.configRS.awaitReplication();
+
+ // Ensure the latest version changes have been persisted and propagate to the secondary
+ // before we target it with versioned commands.
+ assert.commandWorked(
+ st.rs0.getPrimary().getDB('admin').runCommand({_flushRoutingTableCacheUpdates: nss}));
+ st.rs0.awaitReplication();
+
+ let res = staleMongos.getDB(db).runCommand(
+ Object.assign({},
+ test.command,
+ {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
+
+ test.checkResults(res);
+
+ if (test.behavior === "unshardedOnly") {
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: primaryShardSecondary.getDB(db), filter: commandProfile});
+ } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
+ // Check that the primary shard primary received the request without a shardVersion
+ // field and returned success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryShardPrimary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": false},
+ "command.$readPreference": {$exists: false},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$exists": false},
+ },
+ commandProfile)
+ });
+ } else if (test.behavior == "versioned") {
+ // Check that the primary shard secondary returned stale shardVersion.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
+ },
+ commandProfile)
+ });
+
+ // Check that the primary shard secondary received the request again and returned
+ // success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: primaryShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$ne": ErrorCodes.StaleConfig},
+ },
+ commandProfile)
+ });
}
- };
+ },
+ dropRecreateAsUnshardedOnDifferentShard: function(
+ staleMongos, freshMongos, test, commandProfile) {
+ // There is no way to drop and recreate the collection as unsharded on a *different*
+ // shard without calling movePrimary, and it is known that a stale mongos will not
+ // refresh its notion of the primary shard after it loads it once.
+ },
+ dropRecreateAsShardedOnDifferentShard: function(
+ staleMongos, freshMongos, test, commandProfile) {
+ let donorShardSecondary = st.rs0.getSecondary();
+ let recipientShardPrimary = st.rs1.getPrimary();
+ let recipientShardSecondary = st.rs1.getSecondary();
+
+ // Drop and recreate the collection as sharded, and move the chunk to the other shard.
+ assert.commandWorked(freshMongos.getDB(db).runCommand({drop: coll}));
+ assert.commandWorked(freshMongos.getDB(db).runCommand({create: coll}));
+ assert.commandWorked(freshMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
+
+ // We do this because we expect staleMongos to see that the collection is sharded, which
+ // it may not if the "nearest" config server it contacts has not replicated the
+ // shardCollection writes (or has not heard that they have reached a majority).
+ st.configRS.awaitReplication();
+
+ // Use {w:2} (all) write concern in the moveChunk operation so the metadata change gets
+ // persisted to the secondary before versioned commands are sent against the secondary.
+ assert.commandWorked(freshMongos.adminCommand({
+ moveChunk: nss,
+ find: {x: 0},
+ to: st.shard1.shardName,
+ _secondaryThrottle: true,
+ writeConcern: {w: 2},
+ }));
+
+ let res = staleMongos.getDB(db).runCommand(
+ Object.assign({},
+ test.command,
+ {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
+
+ test.checkResults(res);
+
+ if (test.behavior === "unshardedOnly") {
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: donorShardSecondary.getDB(db), filter: commandProfile});
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: recipientShardSecondary.getDB(db), filter: commandProfile});
+ } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
+ // Check that the recipient shard primary received the request without a
+ // shardVersion field and returned success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardPrimary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": false},
+ "command.$readPreference": {$exists: false},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$exists": false},
+ },
+ commandProfile)
+ });
+ } else if (test.behavior == "versioned") {
+ // Check that the donor shard secondary returned stale shardVersion.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
+ },
+ commandProfile)
+ });
+
+ // Check that the recipient shard secondary received the request and returned
+ // success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$ne": ErrorCodes.StaleConfig},
+ },
+ commandProfile)
+ });
+ }
+ }
+};
- // Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
- let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
- let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
+// Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
+let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
+let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
- let freshMongos = st.s0;
- let staleMongos = st.s1;
+let freshMongos = st.s0;
+let staleMongos = st.s1;
- let res = st.s.adminCommand({listCommands: 1});
- assert.commandWorked(res);
+let res = st.s.adminCommand({listCommands: 1});
+assert.commandWorked(res);
- let commands = Object.keys(res.commands);
- for (let command of commands) {
- let test = testCases[command];
- assert(test !== undefined,
- "coverage failure: must define a safe secondary reads test for " + command);
+let commands = Object.keys(res.commands);
+for (let command of commands) {
+ let test = testCases[command];
+ assert(test !== undefined,
+ "coverage failure: must define a safe secondary reads test for " + command);
- if (test.skip !== undefined) {
- print("skipping " + command + ": " + test.skip);
- continue;
- }
- validateTestCase(test);
-
- // Build the query to identify the operation in the system profiler.
- let commandProfile = buildCommandProfile(test.command, true /* sharded */);
-
- for (let scenario in scenarios) {
- jsTest.log("testing command " + tojson(command) + " under scenario " + scenario);
-
- // Each scenario starts with a sharded collection with shard0 as the primary shard.
- assert.commandWorked(staleMongos.adminCommand({enableSharding: db}));
- st.ensurePrimaryShard(db, st.shard0.shardName);
- assert.commandWorked(staleMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
-
- // We do this because we expect staleMongos to see that the collection is sharded, which
- // it may not if the "nearest" config server it contacts has not replicated the
- // shardCollection writes (or has not heard that they have reached a majority).
- st.configRS.awaitReplication();
-
- // Do any test-specific setup.
- test.setUp(staleMongos);
-
- // Wait for replication as a safety net, in case the individual setup function for a
- // test case did not specify writeConcern itself
- st.rs0.awaitReplication();
- st.rs1.awaitReplication();
-
- // Do dummy read from the stale mongos so it loads the routing table into memory once.
- // Additionally, do a secondary read to ensure that the secondary has loaded the initial
- // routing table -- the first read to the primary will refresh the mongos' shardVersion,
- // which will then be used against the secondary to ensure the secondary is fresh.
- assert.commandWorked(staleMongos.getDB(db).runCommand({find: coll}));
- assert.commandWorked(freshMongos.getDB(db).runCommand({
- find: coll,
- $readPreference: {mode: 'secondary'},
- readConcern: {'level': 'local'}
- }));
- // Wait for drop of previous database to replicate before beginning profiling
- st.rs0.awaitReplication();
- st.rs1.awaitReplication();
- assert.commandWorked(st.rs0.getPrimary().getDB(db).setProfilingLevel(2));
- assert.commandWorked(st.rs0.getSecondary().getDB(db).setProfilingLevel(2));
- assert.commandWorked(st.rs1.getPrimary().getDB(db).setProfilingLevel(2));
- assert.commandWorked(st.rs1.getSecondary().getDB(db).setProfilingLevel(2));
-
- scenarios[scenario](staleMongos, freshMongos, test, commandProfile);
-
- // Clean up the database by dropping it; this is the only way to drop the profiler
- // collection on secondaries.
- // Do this from staleMongos, so staleMongos purges the database entry from its cache.
- assert.commandWorked(staleMongos.getDB(db).runCommand({dropDatabase: 1}));
- }
+ if (test.skip !== undefined) {
+ print("skipping " + command + ": " + test.skip);
+ continue;
+ }
+ validateTestCase(test);
+
+ // Build the query to identify the operation in the system profiler.
+ let commandProfile = buildCommandProfile(test.command, true /* sharded */);
+
+ for (let scenario in scenarios) {
+ jsTest.log("testing command " + tojson(command) + " under scenario " + scenario);
+
+ // Each scenario starts with a sharded collection with shard0 as the primary shard.
+ assert.commandWorked(staleMongos.adminCommand({enableSharding: db}));
+ st.ensurePrimaryShard(db, st.shard0.shardName);
+ assert.commandWorked(staleMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
+
+ // We do this because we expect staleMongos to see that the collection is sharded, which
+ // it may not if the "nearest" config server it contacts has not replicated the
+ // shardCollection writes (or has not heard that they have reached a majority).
+ st.configRS.awaitReplication();
+
+ // Do any test-specific setup.
+ test.setUp(staleMongos);
+
+ // Wait for replication as a safety net, in case the individual setup function for a
+ // test case did not specify writeConcern itself
+ st.rs0.awaitReplication();
+ st.rs1.awaitReplication();
+
+ // Do dummy read from the stale mongos so it loads the routing table into memory once.
+ // Additionally, do a secondary read to ensure that the secondary has loaded the initial
+ // routing table -- the first read to the primary will refresh the mongos' shardVersion,
+ // which will then be used against the secondary to ensure the secondary is fresh.
+ assert.commandWorked(staleMongos.getDB(db).runCommand({find: coll}));
+ assert.commandWorked(freshMongos.getDB(db).runCommand(
+ {find: coll, $readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
+ // Wait for drop of previous database to replicate before beginning profiling
+ st.rs0.awaitReplication();
+ st.rs1.awaitReplication();
+ assert.commandWorked(st.rs0.getPrimary().getDB(db).setProfilingLevel(2));
+ assert.commandWorked(st.rs0.getSecondary().getDB(db).setProfilingLevel(2));
+ assert.commandWorked(st.rs1.getPrimary().getDB(db).setProfilingLevel(2));
+ assert.commandWorked(st.rs1.getSecondary().getDB(db).setProfilingLevel(2));
+
+ scenarios[scenario](staleMongos, freshMongos, test, commandProfile);
+
+ // Clean up the database by dropping it; this is the only way to drop the profiler
+ // collection on secondaries.
+ // Do this from staleMongos, so staleMongos purges the database entry from its cache.
+ assert.commandWorked(staleMongos.getDB(db).runCommand({dropDatabase: 1}));
}
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
index 7f7e8fff3f1..234f0873076 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
@@ -22,521 +22,520 @@
* "versioned". Determines what system profiler checks are performed.
*/
(function() {
- "use strict";
-
- load('jstests/libs/profiler.js');
- load('jstests/sharding/libs/last_stable_mongos_commands.js');
-
- let db = "test";
- let coll = "foo";
- let nss = db + "." + coll;
-
- // Check that a test case is well-formed.
- let validateTestCase = function(test) {
- assert(test.setUp && typeof(test.setUp) === "function");
- assert(test.command && typeof(test.command) === "object");
- assert(test.checkResults && typeof(test.checkResults) === "function");
- assert(test.checkAvailableReadConcernResults &&
- typeof(test.checkAvailableReadConcernResults) === "function");
- assert(test.behavior === "unshardedOnly" ||
- test.behavior === "targetsPrimaryUsesConnectionVersioning" ||
- test.behavior === "versioned");
- };
-
- let testCases = {
- _addShard: {skip: "primary only"},
- _cloneCatalogData: {skip: "primary only"},
- _configsvrAddShard: {skip: "primary only"},
- _configsvrAddShardToZone: {skip: "primary only"},
- _configsvrBalancerStart: {skip: "primary only"},
- _configsvrBalancerStatus: {skip: "primary only"},
- _configsvrBalancerStop: {skip: "primary only"},
- _configsvrCommitChunkMerge: {skip: "primary only"},
- _configsvrCommitChunkMigration: {skip: "primary only"},
- _configsvrCommitChunkSplit: {skip: "primary only"},
- _configsvrCommitMovePrimary: {skip: "primary only"},
- _configsvrDropCollection: {skip: "primary only"},
- _configsvrDropDatabase: {skip: "primary only"},
- _configsvrMoveChunk: {skip: "primary only"},
- _configsvrMovePrimary: {skip: "primary only"},
- _configsvrRemoveShardFromZone: {skip: "primary only"},
- _configsvrShardCollection: {skip: "primary only"},
- _configsvrUpdateZoneKeyRange: {skip: "primary only"},
- _flushRoutingTableCacheUpdates: {skip: "does not return user data"},
- _getUserCacheGeneration: {skip: "does not return user data"},
- _hashBSONElement: {skip: "does not return user data"},
- _isSelf: {skip: "does not return user data"},
- _mergeAuthzCollections: {skip: "primary only"},
- _migrateClone: {skip: "primary only"},
- _movePrimary: {skip: "primary only"},
- _recvChunkAbort: {skip: "primary only"},
- _recvChunkCommit: {skip: "primary only"},
- _recvChunkStart: {skip: "primary only"},
- _recvChunkStatus: {skip: "primary only"},
- _transferMods: {skip: "primary only"},
- abortTransaction: {skip: "primary only"},
- addShard: {skip: "primary only"},
- addShardToZone: {skip: "primary only"},
- aggregate: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
- checkResults: function(res) {
- // The command should work and return correct results.
- assert.commandWorked(res);
- assert.eq(1, res.cursor.firstBatch.length, tojson(res));
- },
- checkAvailableReadConcernResults: function(res) {
- // The command should work and return orphaned results.
- assert.commandWorked(res);
- assert.eq(1, res.cursor.firstBatch.length, tojson(res));
- },
- behavior: "versioned"
+"use strict";
+
+load('jstests/libs/profiler.js');
+load('jstests/sharding/libs/last_stable_mongos_commands.js');
+
+let db = "test";
+let coll = "foo";
+let nss = db + "." + coll;
+
+// Check that a test case is well-formed.
+let validateTestCase = function(test) {
+ assert(test.setUp && typeof (test.setUp) === "function");
+ assert(test.command && typeof (test.command) === "object");
+ assert(test.checkResults && typeof (test.checkResults) === "function");
+ assert(test.checkAvailableReadConcernResults &&
+ typeof (test.checkAvailableReadConcernResults) === "function");
+ assert(test.behavior === "unshardedOnly" ||
+ test.behavior === "targetsPrimaryUsesConnectionVersioning" ||
+ test.behavior === "versioned");
+};
+
+let testCases = {
+ _addShard: {skip: "primary only"},
+ _cloneCatalogData: {skip: "primary only"},
+ _configsvrAddShard: {skip: "primary only"},
+ _configsvrAddShardToZone: {skip: "primary only"},
+ _configsvrBalancerStart: {skip: "primary only"},
+ _configsvrBalancerStatus: {skip: "primary only"},
+ _configsvrBalancerStop: {skip: "primary only"},
+ _configsvrCommitChunkMerge: {skip: "primary only"},
+ _configsvrCommitChunkMigration: {skip: "primary only"},
+ _configsvrCommitChunkSplit: {skip: "primary only"},
+ _configsvrCommitMovePrimary: {skip: "primary only"},
+ _configsvrDropCollection: {skip: "primary only"},
+ _configsvrDropDatabase: {skip: "primary only"},
+ _configsvrMoveChunk: {skip: "primary only"},
+ _configsvrMovePrimary: {skip: "primary only"},
+ _configsvrRemoveShardFromZone: {skip: "primary only"},
+ _configsvrShardCollection: {skip: "primary only"},
+ _configsvrUpdateZoneKeyRange: {skip: "primary only"},
+ _flushRoutingTableCacheUpdates: {skip: "does not return user data"},
+ _getUserCacheGeneration: {skip: "does not return user data"},
+ _hashBSONElement: {skip: "does not return user data"},
+ _isSelf: {skip: "does not return user data"},
+ _mergeAuthzCollections: {skip: "primary only"},
+ _migrateClone: {skip: "primary only"},
+ _movePrimary: {skip: "primary only"},
+ _recvChunkAbort: {skip: "primary only"},
+ _recvChunkCommit: {skip: "primary only"},
+ _recvChunkStart: {skip: "primary only"},
+ _recvChunkStatus: {skip: "primary only"},
+ _transferMods: {skip: "primary only"},
+ abortTransaction: {skip: "primary only"},
+ addShard: {skip: "primary only"},
+ addShardToZone: {skip: "primary only"},
+ aggregate: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
},
- appendOplogNote: {skip: "primary only"},
- applyOps: {skip: "primary only"},
- authSchemaUpgrade: {skip: "primary only"},
- authenticate: {skip: "does not return user data"},
- availableQueryOptions: {skip: "does not return user data"},
- balancerStart: {skip: "primary only"},
- balancerStatus: {skip: "primary only"},
- balancerStop: {skip: "primary only"},
- buildInfo: {skip: "does not return user data"},
- captrunc: {skip: "primary only"},
- checkShardingIndex: {skip: "primary only"},
- cleanupOrphaned: {skip: "primary only"},
- clearLog: {skip: "does not return user data"},
- clone: {skip: "primary only"},
- cloneCollection: {skip: "primary only"},
- cloneCollectionAsCapped: {skip: "primary only"},
- commitTransaction: {skip: "primary only"},
- collMod: {skip: "primary only"},
- collStats: {skip: "does not return user data"},
- compact: {skip: "does not return user data"},
- configureFailPoint: {skip: "does not return user data"},
- connPoolStats: {skip: "does not return user data"},
- connPoolSync: {skip: "does not return user data"},
- connectionStatus: {skip: "does not return user data"},
- convertToCapped: {skip: "primary only"},
- count: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {count: coll, query: {x: 1}},
- checkResults: function(res) {
- // The command should work and return correct results.
- assert.commandWorked(res);
- assert.eq(1, res.n, tojson(res));
- },
- checkAvailableReadConcernResults: function(res) {
- // The command should work and return orphaned results.
- assert.commandWorked(res);
- assert.eq(1, res.n, tojson(res));
- },
- behavior: "versioned"
+ command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
+ checkResults: function(res) {
+ // The command should work and return correct results.
+ assert.commandWorked(res);
+ assert.eq(1, res.cursor.firstBatch.length, tojson(res));
},
- cpuload: {skip: "does not return user data"},
- create: {skip: "primary only"},
- createIndexes: {skip: "primary only"},
- createRole: {skip: "primary only"},
- createUser: {skip: "primary only"},
- currentOp: {skip: "does not return user data"},
- dataSize: {skip: "does not return user data"},
- dbHash: {skip: "does not return user data"},
- dbStats: {skip: "does not return user data"},
- delete: {skip: "primary only"},
- distinct: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {distinct: coll, key: "x"},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(1, res.values.length, tojson(res));
+ checkAvailableReadConcernResults: function(res) {
+ // The command should work and return orphaned results.
+ assert.commandWorked(res);
+ assert.eq(1, res.cursor.firstBatch.length, tojson(res));
+ },
+ behavior: "versioned"
+ },
+ appendOplogNote: {skip: "primary only"},
+ applyOps: {skip: "primary only"},
+ authSchemaUpgrade: {skip: "primary only"},
+ authenticate: {skip: "does not return user data"},
+ availableQueryOptions: {skip: "does not return user data"},
+ balancerStart: {skip: "primary only"},
+ balancerStatus: {skip: "primary only"},
+ balancerStop: {skip: "primary only"},
+ buildInfo: {skip: "does not return user data"},
+ captrunc: {skip: "primary only"},
+ checkShardingIndex: {skip: "primary only"},
+ cleanupOrphaned: {skip: "primary only"},
+ clearLog: {skip: "does not return user data"},
+ clone: {skip: "primary only"},
+ cloneCollection: {skip: "primary only"},
+ cloneCollectionAsCapped: {skip: "primary only"},
+ commitTransaction: {skip: "primary only"},
+ collMod: {skip: "primary only"},
+ collStats: {skip: "does not return user data"},
+ compact: {skip: "does not return user data"},
+ configureFailPoint: {skip: "does not return user data"},
+ connPoolStats: {skip: "does not return user data"},
+ connPoolSync: {skip: "does not return user data"},
+ connectionStatus: {skip: "does not return user data"},
+ convertToCapped: {skip: "primary only"},
+ count: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {count: coll, query: {x: 1}},
+ checkResults: function(res) {
+ // The command should work and return correct results.
+ assert.commandWorked(res);
+ assert.eq(1, res.n, tojson(res));
+ },
+ checkAvailableReadConcernResults: function(res) {
+ // The command should work and return orphaned results.
+ assert.commandWorked(res);
+ assert.eq(1, res.n, tojson(res));
+ },
+ behavior: "versioned"
+ },
+ cpuload: {skip: "does not return user data"},
+ create: {skip: "primary only"},
+ createIndexes: {skip: "primary only"},
+ createRole: {skip: "primary only"},
+ createUser: {skip: "primary only"},
+ currentOp: {skip: "does not return user data"},
+ dataSize: {skip: "does not return user data"},
+ dbHash: {skip: "does not return user data"},
+ dbStats: {skip: "does not return user data"},
+ delete: {skip: "primary only"},
+ distinct: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {distinct: coll, key: "x"},
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(1, res.values.length, tojson(res));
+ },
+ checkAvailableReadConcernResults: function(res) {
+ // The command should work and return orphaned results.
+ assert.commandWorked(res);
+ assert.eq(1, res.values.length, tojson(res));
+ },
+ behavior: "versioned"
+ },
+ driverOIDTest: {skip: "does not return user data"},
+ drop: {skip: "primary only"},
+ dropAllRolesFromDatabase: {skip: "primary only"},
+ dropAllUsersFromDatabase: {skip: "primary only"},
+ dropConnections: {skip: "does not return user data"},
+ dropDatabase: {skip: "primary only"},
+ dropIndexes: {skip: "primary only"},
+ dropRole: {skip: "primary only"},
+ dropUser: {skip: "primary only"},
+ echo: {skip: "does not return user data"},
+ emptycapped: {skip: "primary only"},
+ enableSharding: {skip: "primary only"},
+ endSessions: {skip: "does not return user data"},
+ explain: {skip: "TODO SERVER-30068"},
+ features: {skip: "does not return user data"},
+ filemd5: {skip: "does not return user data"},
+ find: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {find: coll, filter: {x: 1}},
+ checkResults: function(res) {
+ // The command should work and return correct results.
+ assert.commandWorked(res);
+ assert.eq(1, res.cursor.firstBatch.length, tojson(res));
+ },
+ checkAvailableReadConcernResults: function(res) {
+ // The command should work and return orphaned results.
+ assert.commandWorked(res);
+ assert.eq(1, res.cursor.firstBatch.length, tojson(res));
+ },
+ behavior: "versioned"
+ },
+ findAndModify: {skip: "primary only"},
+ flushRouterConfig: {skip: "does not return user data"},
+ forceerror: {skip: "does not return user data"},
+ fsync: {skip: "does not return user data"},
+ fsyncUnlock: {skip: "does not return user data"},
+ geoSearch: {skip: "not supported in mongos"},
+ getCmdLineOpts: {skip: "does not return user data"},
+ getDiagnosticData: {skip: "does not return user data"},
+ getLastError: {skip: "primary only"},
+ getLog: {skip: "does not return user data"},
+ getMore: {skip: "shard version already established"},
+ getParameter: {skip: "does not return user data"},
+ getShardMap: {skip: "does not return user data"},
+ getShardVersion: {skip: "primary only"},
+ getnonce: {skip: "does not return user data"},
+ godinsert: {skip: "for testing only"},
+ grantPrivilegesToRole: {skip: "primary only"},
+ grantRolesToRole: {skip: "primary only"},
+ grantRolesToUser: {skip: "primary only"},
+ handshake: {skip: "does not return user data"},
+ hostInfo: {skip: "does not return user data"},
+ insert: {skip: "primary only"},
+ invalidateUserCache: {skip: "does not return user data"},
+ isdbgrid: {skip: "does not return user data"},
+ isMaster: {skip: "does not return user data"},
+ killCursors: {skip: "does not return user data"},
+ killAllSessions: {skip: "does not return user data"},
+ killAllSessionsByPattern: {skip: "does not return user data"},
+ killOp: {skip: "does not return user data"},
+ killSessions: {skip: "does not return user data"},
+ listCollections: {skip: "primary only"},
+ listCommands: {skip: "does not return user data"},
+ listDatabases: {skip: "primary only"},
+ listIndexes: {skip: "primary only"},
+ listShards: {skip: "does not return user data"},
+ lockInfo: {skip: "primary only"},
+ logApplicationMessage: {skip: "primary only"},
+ logRotate: {skip: "does not return user data"},
+ logout: {skip: "does not return user data"},
+ makeSnapshot: {skip: "does not return user data"},
+ mapReduce: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {
+ mapReduce: coll,
+ map: function() {
+ emit(this.x, 1);
},
- checkAvailableReadConcernResults: function(res) {
- // The command should work and return orphaned results.
- assert.commandWorked(res);
- assert.eq(1, res.values.length, tojson(res));
+ reduce: function(key, values) {
+ return Array.sum(values);
},
- behavior: "versioned"
+ out: {inline: 1}
},
- driverOIDTest: {skip: "does not return user data"},
- drop: {skip: "primary only"},
- dropAllRolesFromDatabase: {skip: "primary only"},
- dropAllUsersFromDatabase: {skip: "primary only"},
- dropConnections: {skip: "does not return user data"},
- dropDatabase: {skip: "primary only"},
- dropIndexes: {skip: "primary only"},
- dropRole: {skip: "primary only"},
- dropUser: {skip: "primary only"},
- echo: {skip: "does not return user data"},
- emptycapped: {skip: "primary only"},
- enableSharding: {skip: "primary only"},
- endSessions: {skip: "does not return user data"},
- explain: {skip: "TODO SERVER-30068"},
- features: {skip: "does not return user data"},
- filemd5: {skip: "does not return user data"},
- find: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {find: coll, filter: {x: 1}},
- checkResults: function(res) {
- // The command should work and return correct results.
- assert.commandWorked(res);
- assert.eq(1, res.cursor.firstBatch.length, tojson(res));
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(1, res.results.length, tojson(res));
+ assert.eq(1, res.results[0]._id, tojson(res));
+ assert.eq(2, res.results[0].value, tojson(res));
+ },
+ checkAvailableReadConcernResults: function(res) {
+ assert.commandFailed(res);
+ },
+ behavior: "targetsPrimaryUsesConnectionVersioning"
+ },
+ mergeChunks: {skip: "primary only"},
+ moveChunk: {skip: "primary only"},
+ movePrimary: {skip: "primary only"},
+ multicast: {skip: "does not return user data"},
+ netstat: {skip: "does not return user data"},
+ ping: {skip: "does not return user data"},
+ planCacheClear: {skip: "does not return user data"},
+ planCacheClearFilters: {skip: "does not return user data"},
+ planCacheListFilters: {skip: "does not return user data"},
+ planCacheListPlans: {skip: "does not return user data"},
+ planCacheListQueryShapes: {skip: "does not return user data"},
+ planCacheSetFilter: {skip: "does not return user data"},
+ profile: {skip: "primary only"},
+ reapLogicalSessionCacheNow: {skip: "does not return user data"},
+ refreshLogicalSessionCacheNow: {skip: "does not return user data"},
+ refreshSessions: {skip: "does not return user data"},
+ refreshSessionsInternal: {skip: "does not return user data"},
+ removeShard: {skip: "primary only"},
+ removeShardFromZone: {skip: "primary only"},
+ renameCollection: {skip: "primary only"},
+ repairCursor: {skip: "does not return user data"},
+ replSetAbortPrimaryCatchUp: {skip: "does not return user data"},
+ replSetFreeze: {skip: "does not return user data"},
+ replSetGetConfig: {skip: "does not return user data"},
+ replSetGetRBID: {skip: "does not return user data"},
+ replSetGetStatus: {skip: "does not return user data"},
+ replSetHeartbeat: {skip: "does not return user data"},
+ replSetInitiate: {skip: "does not return user data"},
+ replSetMaintenance: {skip: "does not return user data"},
+ replSetReconfig: {skip: "does not return user data"},
+ replSetRequestVotes: {skip: "does not return user data"},
+ replSetStepDown: {skip: "does not return user data"},
+ replSetStepUp: {skip: "does not return user data"},
+ replSetSyncFrom: {skip: "does not return user data"},
+ replSetTest: {skip: "does not return user data"},
+ replSetUpdatePosition: {skip: "does not return user data"},
+ replSetResizeOplog: {skip: "does not return user data"},
+ resetError: {skip: "does not return user data"},
+ restartCatalog: {skip: "internal-only command"},
+ resync: {skip: "primary only"},
+ revokePrivilegesFromRole: {skip: "primary only"},
+ revokeRolesFromRole: {skip: "primary only"},
+ revokeRolesFromUser: {skip: "primary only"},
+ rolesInfo: {skip: "primary only"},
+ saslContinue: {skip: "primary only"},
+ saslStart: {skip: "primary only"},
+ serverStatus: {skip: "does not return user data"},
+ setCommittedSnapshot: {skip: "does not return user data"},
+ setIndexCommitQuorum: {skip: "primary only"},
+ setFeatureCompatibilityVersion: {skip: "primary only"},
+ setFreeMonitoring: {skip: "primary only"},
+ setParameter: {skip: "does not return user data"},
+ setShardVersion: {skip: "does not return user data"},
+ shardCollection: {skip: "primary only"},
+ shardConnPoolStats: {skip: "does not return user data"},
+ shardingState: {skip: "does not return user data"},
+ shutdown: {skip: "does not return user data"},
+ sleep: {skip: "does not return user data"},
+ split: {skip: "primary only"},
+ splitChunk: {skip: "primary only"},
+ splitVector: {skip: "primary only"},
+ stageDebug: {skip: "primary only"},
+ startRecordingTraffic: {skip: "does not return user data"},
+ startSession: {skip: "does not return user data"},
+ stopRecordingTraffic: {skip: "does not return user data"},
+ top: {skip: "does not return user data"},
+ touch: {skip: "does not return user data"},
+ unsetSharding: {skip: "does not return user data"},
+ update: {skip: "primary only"},
+ updateRole: {skip: "primary only"},
+ updateUser: {skip: "primary only"},
+ updateZoneKeyRange: {skip: "primary only"},
+ usersInfo: {skip: "primary only"},
+ validate: {skip: "does not return user data"},
+ waitForOngoingChunkSplits: {skip: "does not return user data"},
+ whatsmyuri: {skip: "does not return user data"}
+};
+
+commandsRemovedFromMongosIn42.forEach(function(cmd) {
+ testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
+});
+
+// Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
+let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
+let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
+
+let donorShardPrimary = st.rs0.getPrimary();
+let recipientShardPrimary = st.rs1.getPrimary();
+let donorShardSecondary = st.rs0.getSecondary();
+let recipientShardSecondary = st.rs1.getSecondary();
+
+let freshMongos = st.s0;
+let staleMongos = st.s1;
+
+let res = st.s.adminCommand({listCommands: 1});
+assert.commandWorked(res);
+
+let commands = Object.keys(res.commands);
+for (let command of commands) {
+ let test = testCases[command];
+ assert(test !== undefined,
+ "coverage failure: must define a safe secondary reads test for " + command);
+
+ if (test.skip !== undefined) {
+ print("skipping " + command + ": " + test.skip);
+ continue;
+ }
+ validateTestCase(test);
+
+ jsTest.log("testing command " + tojson(test.command));
+
+ assert.commandWorked(freshMongos.adminCommand({enableSharding: db}));
+ st.ensurePrimaryShard(db, st.shard0.shardName);
+ assert.commandWorked(freshMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
+
+ // We do this because we expect staleMongos to see that the collection is sharded, which
+ // it may not if the "nearest" config server it contacts has not replicated the
+ // shardCollection writes (or has not heard that they have reached a majority).
+ st.configRS.awaitReplication();
+
+ assert.commandWorked(freshMongos.adminCommand({split: nss, middle: {x: 0}}));
+
+ // Do dummy read from the stale mongos so it loads the routing table into memory once.
+ // Additionally, do a secondary read to ensure that the secondary has loaded the initial
+ // routing table -- the first read to the primary will refresh the mongos' shardVersion,
+ // which will then be used against the secondary to ensure the secondary is fresh.
+ assert.commandWorked(staleMongos.getDB(db).runCommand({find: coll}));
+ assert.commandWorked(freshMongos.getDB(db).runCommand(
+ {find: coll, $readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
+
+ // Do any test-specific setup.
+ test.setUp(staleMongos);
+
+ // Wait for replication as a safety net, in case the individual setup function for a test
+ // case did not specify writeConcern itself
+ st.rs0.awaitReplication();
+ st.rs1.awaitReplication();
+
+ assert.commandWorked(recipientShardPrimary.getDB(db).setProfilingLevel(2));
+ assert.commandWorked(donorShardSecondary.getDB(db).setProfilingLevel(2));
+ assert.commandWorked(recipientShardSecondary.getDB(db).setProfilingLevel(2));
+
+ // Suspend range deletion on the donor shard.
+ donorShardPrimary.adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'alwaysOn'});
+
+ // Do a moveChunk from the fresh mongos to make the other mongos stale.
+ // Use {w:2} (all) write concern so the metadata change gets persisted to the secondary
+ // before stalely versioned commands are sent against the secondary.
+ assert.commandWorked(freshMongos.adminCommand({
+ moveChunk: nss,
+ find: {x: 0},
+ to: st.shard1.shardName,
+ _secondaryThrottle: true,
+ writeConcern: {w: 2},
+ }));
+
+ let cmdReadPrefSecondary =
+ Object.assign({}, test.command, {$readPreference: {mode: 'secondary'}});
+ let cmdPrefSecondaryConcernAvailable =
+ Object.assign({}, cmdReadPrefSecondary, {readConcern: {level: 'available'}});
+ let cmdPrefSecondaryConcernLocal =
+ Object.assign({}, cmdReadPrefSecondary, {readConcern: {level: 'local'}});
+
+ let availableReadConcernRes =
+ staleMongos.getDB(db).runCommand(cmdPrefSecondaryConcernAvailable);
+ test.checkAvailableReadConcernResults(availableReadConcernRes);
+
+ let defaultReadConcernRes = staleMongos.getDB(db).runCommand(cmdReadPrefSecondary);
+ if (command === 'mapReduce') {
+ // mapReduce is always sent to a primary, which defaults to 'local' readConcern
+ test.checkResults(defaultReadConcernRes);
+ } else {
+ // Secondaries default to the 'available' readConcern
+ test.checkAvailableReadConcernResults(defaultReadConcernRes);
+ }
+
+ let localReadConcernRes = staleMongos.getDB(db).runCommand(cmdPrefSecondaryConcernLocal);
+ test.checkResults(localReadConcernRes);
+
+ // Build the query to identify the operation in the system profiler.
+ let commandProfile = buildCommandProfile(test.command, true /* sharded */);
+
+ if (test.behavior === "unshardedOnly") {
+ // Check that neither the donor nor recipient shard secondaries received either request.
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: donorShardSecondary.getDB(db), filter: commandProfile});
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: recipientShardSecondary.getDB(db), filter: commandProfile});
+ } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
+ // Check that the recipient shard primary received the request without a shardVersion
+ // field and returned success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardPrimary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": false},
+ "command.$readPreference": {$exists: false},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$exists": false},
},
- checkAvailableReadConcernResults: function(res) {
- // The command should work and return orphaned results.
- assert.commandWorked(res);
- assert.eq(1, res.cursor.firstBatch.length, tojson(res));
+ commandProfile)
+ });
+ } else if (test.behavior === "versioned") {
+ // Check that the donor shard secondary received both the 'available' read concern
+ // request and read concern not specified request and returned success for both, despite
+ // the mongos' stale routing table.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "available"},
+ "errCode": {"$ne": ErrorCodes.StaleConfig},
},
- behavior: "versioned"
- },
- findAndModify: {skip: "primary only"},
- flushRouterConfig: {skip: "does not return user data"},
- forceerror: {skip: "does not return user data"},
- fsync: {skip: "does not return user data"},
- fsyncUnlock: {skip: "does not return user data"},
- geoSearch: {skip: "not supported in mongos"},
- getCmdLineOpts: {skip: "does not return user data"},
- getDiagnosticData: {skip: "does not return user data"},
- getLastError: {skip: "primary only"},
- getLog: {skip: "does not return user data"},
- getMore: {skip: "shard version already established"},
- getParameter: {skip: "does not return user data"},
- getShardMap: {skip: "does not return user data"},
- getShardVersion: {skip: "primary only"},
- getnonce: {skip: "does not return user data"},
- godinsert: {skip: "for testing only"},
- grantPrivilegesToRole: {skip: "primary only"},
- grantRolesToRole: {skip: "primary only"},
- grantRolesToUser: {skip: "primary only"},
- handshake: {skip: "does not return user data"},
- hostInfo: {skip: "does not return user data"},
- insert: {skip: "primary only"},
- invalidateUserCache: {skip: "does not return user data"},
- isdbgrid: {skip: "does not return user data"},
- isMaster: {skip: "does not return user data"},
- killCursors: {skip: "does not return user data"},
- killAllSessions: {skip: "does not return user data"},
- killAllSessionsByPattern: {skip: "does not return user data"},
- killOp: {skip: "does not return user data"},
- killSessions: {skip: "does not return user data"},
- listCollections: {skip: "primary only"},
- listCommands: {skip: "does not return user data"},
- listDatabases: {skip: "primary only"},
- listIndexes: {skip: "primary only"},
- listShards: {skip: "does not return user data"},
- lockInfo: {skip: "primary only"},
- logApplicationMessage: {skip: "primary only"},
- logRotate: {skip: "does not return user data"},
- logout: {skip: "does not return user data"},
- makeSnapshot: {skip: "does not return user data"},
- mapReduce: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ commandProfile)
+ });
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"$exists": false},
+ "errCode": {"$ne": ErrorCodes.StaleConfig},
},
- command: {
- mapReduce: coll,
- map: function() {
- emit(this.x, 1);
- },
- reduce: function(key, values) {
- return Array.sum(values);
- },
- out: {inline: 1}
+ commandProfile)
+ });
+
+ // Check that the donor shard secondary then returned stale shardVersion for the request
+ // with local read concern.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(1, res.results.length, tojson(res));
- assert.eq(1, res.results[0]._id, tojson(res));
- assert.eq(2, res.results[0].value, tojson(res));
+ commandProfile)
+ });
+
+ // Check that the recipient shard secondary received the request with local read concern
+ // and also returned stale shardVersion once, even though the mongos is fresh, because
+ // the secondary was stale.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
},
- checkAvailableReadConcernResults: function(res) {
- assert.commandFailed(res);
+ commandProfile)
+ });
+
+ // Check that the recipient shard secondary received the request with local read concern
+ // again and finally returned success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$ne": ErrorCodes.StaleConfig},
},
- behavior: "targetsPrimaryUsesConnectionVersioning"
- },
- mergeChunks: {skip: "primary only"},
- moveChunk: {skip: "primary only"},
- movePrimary: {skip: "primary only"},
- multicast: {skip: "does not return user data"},
- netstat: {skip: "does not return user data"},
- ping: {skip: "does not return user data"},
- planCacheClear: {skip: "does not return user data"},
- planCacheClearFilters: {skip: "does not return user data"},
- planCacheListFilters: {skip: "does not return user data"},
- planCacheListPlans: {skip: "does not return user data"},
- planCacheListQueryShapes: {skip: "does not return user data"},
- planCacheSetFilter: {skip: "does not return user data"},
- profile: {skip: "primary only"},
- reapLogicalSessionCacheNow: {skip: "does not return user data"},
- refreshLogicalSessionCacheNow: {skip: "does not return user data"},
- refreshSessions: {skip: "does not return user data"},
- refreshSessionsInternal: {skip: "does not return user data"},
- removeShard: {skip: "primary only"},
- removeShardFromZone: {skip: "primary only"},
- renameCollection: {skip: "primary only"},
- repairCursor: {skip: "does not return user data"},
- replSetAbortPrimaryCatchUp: {skip: "does not return user data"},
- replSetFreeze: {skip: "does not return user data"},
- replSetGetConfig: {skip: "does not return user data"},
- replSetGetRBID: {skip: "does not return user data"},
- replSetGetStatus: {skip: "does not return user data"},
- replSetHeartbeat: {skip: "does not return user data"},
- replSetInitiate: {skip: "does not return user data"},
- replSetMaintenance: {skip: "does not return user data"},
- replSetReconfig: {skip: "does not return user data"},
- replSetRequestVotes: {skip: "does not return user data"},
- replSetStepDown: {skip: "does not return user data"},
- replSetStepUp: {skip: "does not return user data"},
- replSetSyncFrom: {skip: "does not return user data"},
- replSetTest: {skip: "does not return user data"},
- replSetUpdatePosition: {skip: "does not return user data"},
- replSetResizeOplog: {skip: "does not return user data"},
- resetError: {skip: "does not return user data"},
- restartCatalog: {skip: "internal-only command"},
- resync: {skip: "primary only"},
- revokePrivilegesFromRole: {skip: "primary only"},
- revokeRolesFromRole: {skip: "primary only"},
- revokeRolesFromUser: {skip: "primary only"},
- rolesInfo: {skip: "primary only"},
- saslContinue: {skip: "primary only"},
- saslStart: {skip: "primary only"},
- serverStatus: {skip: "does not return user data"},
- setCommittedSnapshot: {skip: "does not return user data"},
- setIndexCommitQuorum: {skip: "primary only"},
- setFeatureCompatibilityVersion: {skip: "primary only"},
- setFreeMonitoring: {skip: "primary only"},
- setParameter: {skip: "does not return user data"},
- setShardVersion: {skip: "does not return user data"},
- shardCollection: {skip: "primary only"},
- shardConnPoolStats: {skip: "does not return user data"},
- shardingState: {skip: "does not return user data"},
- shutdown: {skip: "does not return user data"},
- sleep: {skip: "does not return user data"},
- split: {skip: "primary only"},
- splitChunk: {skip: "primary only"},
- splitVector: {skip: "primary only"},
- stageDebug: {skip: "primary only"},
- startRecordingTraffic: {skip: "does not return user data"},
- startSession: {skip: "does not return user data"},
- stopRecordingTraffic: {skip: "does not return user data"},
- top: {skip: "does not return user data"},
- touch: {skip: "does not return user data"},
- unsetSharding: {skip: "does not return user data"},
- update: {skip: "primary only"},
- updateRole: {skip: "primary only"},
- updateUser: {skip: "primary only"},
- updateZoneKeyRange: {skip: "primary only"},
- usersInfo: {skip: "primary only"},
- validate: {skip: "does not return user data"},
- waitForOngoingChunkSplits: {skip: "does not return user data"},
- whatsmyuri: {skip: "does not return user data"}
- };
-
- commandsRemovedFromMongosIn42.forEach(function(cmd) {
- testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
- });
-
- // Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
- let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
- let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
-
- let donorShardPrimary = st.rs0.getPrimary();
- let recipientShardPrimary = st.rs1.getPrimary();
- let donorShardSecondary = st.rs0.getSecondary();
- let recipientShardSecondary = st.rs1.getSecondary();
-
- let freshMongos = st.s0;
- let staleMongos = st.s1;
-
- let res = st.s.adminCommand({listCommands: 1});
- assert.commandWorked(res);
-
- let commands = Object.keys(res.commands);
- for (let command of commands) {
- let test = testCases[command];
- assert(test !== undefined,
- "coverage failure: must define a safe secondary reads test for " + command);
-
- if (test.skip !== undefined) {
- print("skipping " + command + ": " + test.skip);
- continue;
- }
- validateTestCase(test);
-
- jsTest.log("testing command " + tojson(test.command));
-
- assert.commandWorked(freshMongos.adminCommand({enableSharding: db}));
- st.ensurePrimaryShard(db, st.shard0.shardName);
- assert.commandWorked(freshMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
-
- // We do this because we expect staleMongos to see that the collection is sharded, which
- // it may not if the "nearest" config server it contacts has not replicated the
- // shardCollection writes (or has not heard that they have reached a majority).
- st.configRS.awaitReplication();
-
- assert.commandWorked(freshMongos.adminCommand({split: nss, middle: {x: 0}}));
-
- // Do dummy read from the stale mongos so it loads the routing table into memory once.
- // Additionally, do a secondary read to ensure that the secondary has loaded the initial
- // routing table -- the first read to the primary will refresh the mongos' shardVersion,
- // which will then be used against the secondary to ensure the secondary is fresh.
- assert.commandWorked(staleMongos.getDB(db).runCommand({find: coll}));
- assert.commandWorked(freshMongos.getDB(db).runCommand(
- {find: coll, $readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
-
- // Do any test-specific setup.
- test.setUp(staleMongos);
-
- // Wait for replication as a safety net, in case the individual setup function for a test
- // case did not specify writeConcern itself
- st.rs0.awaitReplication();
- st.rs1.awaitReplication();
-
- assert.commandWorked(recipientShardPrimary.getDB(db).setProfilingLevel(2));
- assert.commandWorked(donorShardSecondary.getDB(db).setProfilingLevel(2));
- assert.commandWorked(recipientShardSecondary.getDB(db).setProfilingLevel(2));
-
- // Suspend range deletion on the donor shard.
- donorShardPrimary.adminCommand(
- {configureFailPoint: 'suspendRangeDeletion', mode: 'alwaysOn'});
-
- // Do a moveChunk from the fresh mongos to make the other mongos stale.
- // Use {w:2} (all) write concern so the metadata change gets persisted to the secondary
- // before stalely versioned commands are sent against the secondary.
- assert.commandWorked(freshMongos.adminCommand({
- moveChunk: nss,
- find: {x: 0},
- to: st.shard1.shardName,
- _secondaryThrottle: true,
- writeConcern: {w: 2},
- }));
-
- let cmdReadPrefSecondary =
- Object.assign({}, test.command, {$readPreference: {mode: 'secondary'}});
- let cmdPrefSecondaryConcernAvailable =
- Object.assign({}, cmdReadPrefSecondary, {readConcern: {level: 'available'}});
- let cmdPrefSecondaryConcernLocal =
- Object.assign({}, cmdReadPrefSecondary, {readConcern: {level: 'local'}});
-
- let availableReadConcernRes =
- staleMongos.getDB(db).runCommand(cmdPrefSecondaryConcernAvailable);
- test.checkAvailableReadConcernResults(availableReadConcernRes);
-
- let defaultReadConcernRes = staleMongos.getDB(db).runCommand(cmdReadPrefSecondary);
- if (command === 'mapReduce') {
- // mapReduce is always sent to a primary, which defaults to 'local' readConcern
- test.checkResults(defaultReadConcernRes);
- } else {
- // Secondaries default to the 'available' readConcern
- test.checkAvailableReadConcernResults(defaultReadConcernRes);
- }
-
- let localReadConcernRes = staleMongos.getDB(db).runCommand(cmdPrefSecondaryConcernLocal);
- test.checkResults(localReadConcernRes);
-
- // Build the query to identify the operation in the system profiler.
- let commandProfile = buildCommandProfile(test.command, true /* sharded */);
-
- if (test.behavior === "unshardedOnly") {
- // Check that neither the donor nor recipient shard secondaries received either request.
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: donorShardSecondary.getDB(db), filter: commandProfile});
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: recipientShardSecondary.getDB(db), filter: commandProfile});
- } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
- // Check that the recipient shard primary received the request without a shardVersion
- // field and returned success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardPrimary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": false},
- "command.$readPreference": {$exists: false},
- "command.readConcern": {"level": "local"},
- "errCode": {"$exists": false},
- },
- commandProfile)
- });
- } else if (test.behavior === "versioned") {
- // Check that the donor shard secondary received both the 'available' read concern
- // request and read concern not specified request and returned success for both, despite
- // the mongos' stale routing table.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "available"},
- "errCode": {"$ne": ErrorCodes.StaleConfig},
- },
- commandProfile)
- });
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"$exists": false},
- "errCode": {"$ne": ErrorCodes.StaleConfig},
- },
- commandProfile)
- });
-
- // Check that the donor shard secondary then returned stale shardVersion for the request
- // with local read concern.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- },
- commandProfile)
- });
-
- // Check that the recipient shard secondary received the request with local read concern
- // and also returned stale shardVersion once, even though the mongos is fresh, because
- // the secondary was stale.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- },
- commandProfile)
- });
-
- // Check that the recipient shard secondary received the request with local read concern
- // again and finally returned success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": {"$ne": ErrorCodes.StaleConfig},
- },
- commandProfile)
- });
- }
-
- donorShardPrimary.adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'off'});
-
- // Clean up the collection by dropping the DB. This also drops all associated indexes and
- // clears the profiler collection.
- // Do this from staleMongos, so staleMongos purges the database entry from its cache.
- assert.commandWorked(staleMongos.getDB(db).runCommand({dropDatabase: 1}));
+ commandProfile)
+ });
}
- st.stop();
+ donorShardPrimary.adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'off'});
+
+ // Clean up the collection by dropping the DB. This also drops all associated indexes and
+ // clears the profiler collection.
+ // Do this from staleMongos, so staleMongos purges the database entry from its cache.
+ assert.commandWorked(staleMongos.getDB(db).runCommand({dropDatabase: 1}));
+}
+
+st.stop();
})();
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
index 4ce841f18f9..866080326d2 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
@@ -17,448 +17,448 @@
* "versioned". Determines what system profiler checks are performed.
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/profiler.js');
- load('jstests/sharding/libs/last_stable_mongos_commands.js');
+load('jstests/libs/profiler.js');
+load('jstests/sharding/libs/last_stable_mongos_commands.js');
- let db = "test";
- let coll = "foo";
- let nss = db + "." + coll;
+let db = "test";
+let coll = "foo";
+let nss = db + "." + coll;
- // Check that a test case is well-formed.
- let validateTestCase = function(test) {
- assert(test.setUp && typeof(test.setUp) === "function");
- assert(test.command && typeof(test.command) === "object");
- assert(test.checkResults && typeof(test.checkResults) === "function");
- assert(test.behavior === "unshardedOnly" ||
- test.behavior === "targetsPrimaryUsesConnectionVersioning" ||
- test.behavior === "versioned");
- };
+// Check that a test case is well-formed.
+let validateTestCase = function(test) {
+ assert(test.setUp && typeof (test.setUp) === "function");
+ assert(test.command && typeof (test.command) === "object");
+ assert(test.checkResults && typeof (test.checkResults) === "function");
+ assert(test.behavior === "unshardedOnly" ||
+ test.behavior === "targetsPrimaryUsesConnectionVersioning" ||
+ test.behavior === "versioned");
+};
- let testCases = {
- _addShard: {skip: "primary only"},
- _cloneCatalogData: {skip: "primary only"},
- _configsvrAddShard: {skip: "primary only"},
- _configsvrAddShardToZone: {skip: "primary only"},
- _configsvrBalancerStart: {skip: "primary only"},
- _configsvrBalancerStatus: {skip: "primary only"},
- _configsvrBalancerStop: {skip: "primary only"},
- _configsvrCommitChunkMerge: {skip: "primary only"},
- _configsvrCommitChunkMigration: {skip: "primary only"},
- _configsvrCommitChunkSplit: {skip: "primary only"},
- _configsvrCommitMovePrimary: {skip: "primary only"},
- _configsvrDropCollection: {skip: "primary only"},
- _configsvrDropDatabase: {skip: "primary only"},
- _configsvrMoveChunk: {skip: "primary only"},
- _configsvrMovePrimary: {skip: "primary only"},
- _configsvrRemoveShardFromZone: {skip: "primary only"},
- _configsvrShardCollection: {skip: "primary only"},
- _configsvrUpdateZoneKeyRange: {skip: "primary only"},
- _flushRoutingTableCacheUpdates: {skip: "does not return user data"},
- _getUserCacheGeneration: {skip: "does not return user data"},
- _hashBSONElement: {skip: "does not return user data"},
- _isSelf: {skip: "does not return user data"},
- _mergeAuthzCollections: {skip: "primary only"},
- _migrateClone: {skip: "primary only"},
- _movePrimary: {skip: "primary only"},
- _recvChunkAbort: {skip: "primary only"},
- _recvChunkCommit: {skip: "primary only"},
- _recvChunkStart: {skip: "primary only"},
- _recvChunkStatus: {skip: "primary only"},
- _transferMods: {skip: "primary only"},
- abortTransaction: {skip: "primary only"},
- addShard: {skip: "primary only"},
- addShardToZone: {skip: "primary only"},
- aggregate: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
- checkResults: function(res) {
- // The command should work and return correct results.
- assert.commandWorked(res);
- assert.eq(1, res.cursor.firstBatch.length, tojson(res));
- },
- behavior: "versioned"
+let testCases = {
+ _addShard: {skip: "primary only"},
+ _cloneCatalogData: {skip: "primary only"},
+ _configsvrAddShard: {skip: "primary only"},
+ _configsvrAddShardToZone: {skip: "primary only"},
+ _configsvrBalancerStart: {skip: "primary only"},
+ _configsvrBalancerStatus: {skip: "primary only"},
+ _configsvrBalancerStop: {skip: "primary only"},
+ _configsvrCommitChunkMerge: {skip: "primary only"},
+ _configsvrCommitChunkMigration: {skip: "primary only"},
+ _configsvrCommitChunkSplit: {skip: "primary only"},
+ _configsvrCommitMovePrimary: {skip: "primary only"},
+ _configsvrDropCollection: {skip: "primary only"},
+ _configsvrDropDatabase: {skip: "primary only"},
+ _configsvrMoveChunk: {skip: "primary only"},
+ _configsvrMovePrimary: {skip: "primary only"},
+ _configsvrRemoveShardFromZone: {skip: "primary only"},
+ _configsvrShardCollection: {skip: "primary only"},
+ _configsvrUpdateZoneKeyRange: {skip: "primary only"},
+ _flushRoutingTableCacheUpdates: {skip: "does not return user data"},
+ _getUserCacheGeneration: {skip: "does not return user data"},
+ _hashBSONElement: {skip: "does not return user data"},
+ _isSelf: {skip: "does not return user data"},
+ _mergeAuthzCollections: {skip: "primary only"},
+ _migrateClone: {skip: "primary only"},
+ _movePrimary: {skip: "primary only"},
+ _recvChunkAbort: {skip: "primary only"},
+ _recvChunkCommit: {skip: "primary only"},
+ _recvChunkStart: {skip: "primary only"},
+ _recvChunkStatus: {skip: "primary only"},
+ _transferMods: {skip: "primary only"},
+ abortTransaction: {skip: "primary only"},
+ addShard: {skip: "primary only"},
+ addShardToZone: {skip: "primary only"},
+ aggregate: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
},
- appendOplogNote: {skip: "primary only"},
- applyOps: {skip: "primary only"},
- authenticate: {skip: "does not return user data"},
- authSchemaUpgrade: {skip: "primary only"},
- availableQueryOptions: {skip: "does not return user data"},
- balancerStart: {skip: "primary only"},
- balancerStatus: {skip: "primary only"},
- balancerStop: {skip: "primary only"},
- buildInfo: {skip: "does not return user data"},
- captrunc: {skip: "primary only"},
- checkShardingIndex: {skip: "primary only"},
- cleanupOrphaned: {skip: "primary only"},
- clearLog: {skip: "does not return user data"},
- clone: {skip: "primary only"},
- cloneCollection: {skip: "primary only"},
- cloneCollectionAsCapped: {skip: "primary only"},
- collMod: {skip: "primary only"},
- collStats: {skip: "does not return user data"},
- commitTransaction: {skip: "primary only"},
- compact: {skip: "does not return user data"},
- configureFailPoint: {skip: "does not return user data"},
- connPoolStats: {skip: "does not return user data"},
- connPoolSync: {skip: "does not return user data"},
- connectionStatus: {skip: "does not return user data"},
- convertToCapped: {skip: "primary only"},
- count: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {count: coll, query: {x: 1}},
- checkResults: function(res) {
- // The command should work and return correct results.
- assert.commandWorked(res);
- assert.eq(1, res.n, tojson(res));
- },
- behavior: "versioned"
+ command: {aggregate: coll, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}},
+ checkResults: function(res) {
+ // The command should work and return correct results.
+ assert.commandWorked(res);
+ assert.eq(1, res.cursor.firstBatch.length, tojson(res));
},
- cpuload: {skip: "does not return user data"},
- create: {skip: "primary only"},
- createIndexes: {skip: "primary only"},
- createRole: {skip: "primary only"},
- createUser: {skip: "primary only"},
- currentOp: {skip: "does not return user data"},
- dataSize: {skip: "does not return user data"},
- dbHash: {skip: "does not return user data"},
- dbStats: {skip: "does not return user data"},
- delete: {skip: "primary only"},
- distinct: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {distinct: coll, key: "x"},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(1, res.values.length, tojson(res));
- },
- behavior: "versioned"
+ behavior: "versioned"
+ },
+ appendOplogNote: {skip: "primary only"},
+ applyOps: {skip: "primary only"},
+ authenticate: {skip: "does not return user data"},
+ authSchemaUpgrade: {skip: "primary only"},
+ availableQueryOptions: {skip: "does not return user data"},
+ balancerStart: {skip: "primary only"},
+ balancerStatus: {skip: "primary only"},
+ balancerStop: {skip: "primary only"},
+ buildInfo: {skip: "does not return user data"},
+ captrunc: {skip: "primary only"},
+ checkShardingIndex: {skip: "primary only"},
+ cleanupOrphaned: {skip: "primary only"},
+ clearLog: {skip: "does not return user data"},
+ clone: {skip: "primary only"},
+ cloneCollection: {skip: "primary only"},
+ cloneCollectionAsCapped: {skip: "primary only"},
+ collMod: {skip: "primary only"},
+ collStats: {skip: "does not return user data"},
+ commitTransaction: {skip: "primary only"},
+ compact: {skip: "does not return user data"},
+ configureFailPoint: {skip: "does not return user data"},
+ connPoolStats: {skip: "does not return user data"},
+ connPoolSync: {skip: "does not return user data"},
+ connectionStatus: {skip: "does not return user data"},
+ convertToCapped: {skip: "primary only"},
+ count: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
},
- driverOIDTest: {skip: "does not return user data"},
- drop: {skip: "primary only"},
- dropAllRolesFromDatabase: {skip: "primary only"},
- dropAllUsersFromDatabase: {skip: "primary only"},
- dropConnections: {skip: "does not return user data"},
- dropDatabase: {skip: "primary only"},
- dropIndexes: {skip: "primary only"},
- dropRole: {skip: "primary only"},
- dropUser: {skip: "primary only"},
- echo: {skip: "does not return user data"},
- emptycapped: {skip: "primary only"},
- enableSharding: {skip: "primary only"},
- endSessions: {skip: "does not return user data"},
- explain: {skip: "TODO SERVER-30068"},
- features: {skip: "does not return user data"},
- filemd5: {skip: "does not return user data"},
- find: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {find: coll, filter: {x: 1}},
- checkResults: function(res) {
- // The command should work and return correct results.
- assert.commandWorked(res);
- assert.eq(1, res.cursor.firstBatch.length, tojson(res));
- },
- behavior: "versioned"
+ command: {count: coll, query: {x: 1}},
+ checkResults: function(res) {
+ // The command should work and return correct results.
+ assert.commandWorked(res);
+ assert.eq(1, res.n, tojson(res));
},
- findAndModify: {skip: "primary only"},
- flushRouterConfig: {skip: "does not return user data"},
- forceerror: {skip: "does not return user data"},
- fsync: {skip: "does not return user data"},
- fsyncUnlock: {skip: "does not return user data"},
- geoSearch: {skip: "not supported in mongos"},
- getCmdLineOpts: {skip: "does not return user data"},
- getDiagnosticData: {skip: "does not return user data"},
- getLastError: {skip: "primary only"},
- getLog: {skip: "does not return user data"},
- getMore: {skip: "shard version already established"},
- getParameter: {skip: "does not return user data"},
- getShardMap: {skip: "does not return user data"},
- getShardVersion: {skip: "primary only"},
- getnonce: {skip: "does not return user data"},
- godinsert: {skip: "for testing only"},
- grantPrivilegesToRole: {skip: "primary only"},
- grantRolesToRole: {skip: "primary only"},
- grantRolesToUser: {skip: "primary only"},
- handshake: {skip: "does not return user data"},
- hostInfo: {skip: "does not return user data"},
- insert: {skip: "primary only"},
- invalidateUserCache: {skip: "does not return user data"},
- isdbgrid: {skip: "does not return user data"},
- isMaster: {skip: "does not return user data"},
- killAllSessions: {skip: "does not return user data"},
- killAllSessionsByPattern: {skip: "does not return user data"},
- killCursors: {skip: "does not return user data"},
- killOp: {skip: "does not return user data"},
- killSessions: {skip: "does not return user data"},
- listCollections: {skip: "primary only"},
- listCommands: {skip: "does not return user data"},
- listDatabases: {skip: "primary only"},
- listIndexes: {skip: "primary only"},
- listShards: {skip: "does not return user data"},
- lockInfo: {skip: "primary only"},
- logApplicationMessage: {skip: "primary only"},
- logRotate: {skip: "does not return user data"},
- logout: {skip: "does not return user data"},
- makeSnapshot: {skip: "does not return user data"},
- mapReduce: {
- setUp: function(mongosConn) {
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
- },
- command: {
- mapReduce: coll,
- map: function() {
- emit(this.x, 1);
- },
- reduce: function(key, values) {
- return Array.sum(values);
- },
- out: {inline: 1}
+ behavior: "versioned"
+ },
+ cpuload: {skip: "does not return user data"},
+ create: {skip: "primary only"},
+ createIndexes: {skip: "primary only"},
+ createRole: {skip: "primary only"},
+ createUser: {skip: "primary only"},
+ currentOp: {skip: "does not return user data"},
+ dataSize: {skip: "does not return user data"},
+ dbHash: {skip: "does not return user data"},
+ dbStats: {skip: "does not return user data"},
+ delete: {skip: "primary only"},
+ distinct: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {distinct: coll, key: "x"},
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(1, res.values.length, tojson(res));
+ },
+ behavior: "versioned"
+ },
+ driverOIDTest: {skip: "does not return user data"},
+ drop: {skip: "primary only"},
+ dropAllRolesFromDatabase: {skip: "primary only"},
+ dropAllUsersFromDatabase: {skip: "primary only"},
+ dropConnections: {skip: "does not return user data"},
+ dropDatabase: {skip: "primary only"},
+ dropIndexes: {skip: "primary only"},
+ dropRole: {skip: "primary only"},
+ dropUser: {skip: "primary only"},
+ echo: {skip: "does not return user data"},
+ emptycapped: {skip: "primary only"},
+ enableSharding: {skip: "primary only"},
+ endSessions: {skip: "does not return user data"},
+ explain: {skip: "TODO SERVER-30068"},
+ features: {skip: "does not return user data"},
+ filemd5: {skip: "does not return user data"},
+ find: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {find: coll, filter: {x: 1}},
+ checkResults: function(res) {
+ // The command should work and return correct results.
+ assert.commandWorked(res);
+ assert.eq(1, res.cursor.firstBatch.length, tojson(res));
+ },
+ behavior: "versioned"
+ },
+ findAndModify: {skip: "primary only"},
+ flushRouterConfig: {skip: "does not return user data"},
+ forceerror: {skip: "does not return user data"},
+ fsync: {skip: "does not return user data"},
+ fsyncUnlock: {skip: "does not return user data"},
+ geoSearch: {skip: "not supported in mongos"},
+ getCmdLineOpts: {skip: "does not return user data"},
+ getDiagnosticData: {skip: "does not return user data"},
+ getLastError: {skip: "primary only"},
+ getLog: {skip: "does not return user data"},
+ getMore: {skip: "shard version already established"},
+ getParameter: {skip: "does not return user data"},
+ getShardMap: {skip: "does not return user data"},
+ getShardVersion: {skip: "primary only"},
+ getnonce: {skip: "does not return user data"},
+ godinsert: {skip: "for testing only"},
+ grantPrivilegesToRole: {skip: "primary only"},
+ grantRolesToRole: {skip: "primary only"},
+ grantRolesToUser: {skip: "primary only"},
+ handshake: {skip: "does not return user data"},
+ hostInfo: {skip: "does not return user data"},
+ insert: {skip: "primary only"},
+ invalidateUserCache: {skip: "does not return user data"},
+ isdbgrid: {skip: "does not return user data"},
+ isMaster: {skip: "does not return user data"},
+ killAllSessions: {skip: "does not return user data"},
+ killAllSessionsByPattern: {skip: "does not return user data"},
+ killCursors: {skip: "does not return user data"},
+ killOp: {skip: "does not return user data"},
+ killSessions: {skip: "does not return user data"},
+ listCollections: {skip: "primary only"},
+ listCommands: {skip: "does not return user data"},
+ listDatabases: {skip: "primary only"},
+ listIndexes: {skip: "primary only"},
+ listShards: {skip: "does not return user data"},
+ lockInfo: {skip: "primary only"},
+ logApplicationMessage: {skip: "primary only"},
+ logRotate: {skip: "does not return user data"},
+ logout: {skip: "does not return user data"},
+ makeSnapshot: {skip: "does not return user data"},
+ mapReduce: {
+ setUp: function(mongosConn) {
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ assert.writeOK(mongosConn.getCollection(nss).insert({x: 1}));
+ },
+ command: {
+ mapReduce: coll,
+ map: function() {
+ emit(this.x, 1);
},
- checkResults: function(res) {
- assert.commandWorked(res);
- assert.eq(1, res.results.length, tojson(res));
- assert.eq(1, res.results[0]._id, tojson(res));
- assert.eq(2, res.results[0].value, tojson(res));
+ reduce: function(key, values) {
+ return Array.sum(values);
},
- behavior: "targetsPrimaryUsesConnectionVersioning"
+ out: {inline: 1}
},
- mergeChunks: {skip: "primary only"},
- moveChunk: {skip: "primary only"},
- movePrimary: {skip: "primary only"},
- multicast: {skip: "does not return user data"},
- netstat: {skip: "does not return user data"},
- ping: {skip: "does not return user data"},
- planCacheClear: {skip: "does not return user data"},
- planCacheClearFilters: {skip: "does not return user data"},
- planCacheListFilters: {skip: "does not return user data"},
- planCacheListPlans: {skip: "does not return user data"},
- planCacheListQueryShapes: {skip: "does not return user data"},
- planCacheSetFilter: {skip: "does not return user data"},
- profile: {skip: "primary only"},
- reapLogicalSessionCacheNow: {skip: "does not return user data"},
- refreshLogicalSessionCacheNow: {skip: "does not return user data"},
- refreshSessions: {skip: "does not return user data"},
- refreshSessionsInternal: {skip: "does not return user data"},
- removeShard: {skip: "primary only"},
- removeShardFromZone: {skip: "primary only"},
- renameCollection: {skip: "primary only"},
- repairCursor: {skip: "does not return user data"},
- replSetAbortPrimaryCatchUp: {skip: "does not return user data"},
- replSetFreeze: {skip: "does not return user data"},
- replSetGetConfig: {skip: "does not return user data"},
- replSetGetRBID: {skip: "does not return user data"},
- replSetGetStatus: {skip: "does not return user data"},
- replSetHeartbeat: {skip: "does not return user data"},
- replSetInitiate: {skip: "does not return user data"},
- replSetMaintenance: {skip: "does not return user data"},
- replSetReconfig: {skip: "does not return user data"},
- replSetRequestVotes: {skip: "does not return user data"},
- replSetStepDown: {skip: "does not return user data"},
- replSetStepUp: {skip: "does not return user data"},
- replSetSyncFrom: {skip: "does not return user data"},
- replSetTest: {skip: "does not return user data"},
- replSetUpdatePosition: {skip: "does not return user data"},
- replSetResizeOplog: {skip: "does not return user data"},
- resetError: {skip: "does not return user data"},
- restartCatalog: {skip: "internal-only command"},
- resync: {skip: "primary only"},
- revokePrivilegesFromRole: {skip: "primary only"},
- revokeRolesFromRole: {skip: "primary only"},
- revokeRolesFromUser: {skip: "primary only"},
- rolesInfo: {skip: "primary only"},
- saslContinue: {skip: "primary only"},
- saslStart: {skip: "primary only"},
- serverStatus: {skip: "does not return user data"},
- setCommittedSnapshot: {skip: "does not return user data"},
- setIndexCommitQuorum: {skip: "primary only"},
- setFeatureCompatibilityVersion: {skip: "primary only"},
- setFreeMonitoring: {skip: "primary only"},
- setParameter: {skip: "does not return user data"},
- setShardVersion: {skip: "does not return user data"},
- shardCollection: {skip: "primary only"},
- shardConnPoolStats: {skip: "does not return user data"},
- shardingState: {skip: "does not return user data"},
- shutdown: {skip: "does not return user data"},
- sleep: {skip: "does not return user data"},
- split: {skip: "primary only"},
- splitChunk: {skip: "primary only"},
- splitVector: {skip: "primary only"},
- stageDebug: {skip: "primary only"},
- startRecordingTraffic: {skip: "does not return user data"},
- startSession: {skip: "does not return user data"},
- stopRecordingTraffic: {skip: "does not return user data"},
- top: {skip: "does not return user data"},
- touch: {skip: "does not return user data"},
- unsetSharding: {skip: "does not return user data"},
- update: {skip: "primary only"},
- updateRole: {skip: "primary only"},
- updateUser: {skip: "primary only"},
- updateZoneKeyRange: {skip: "primary only"},
- usersInfo: {skip: "primary only"},
- validate: {skip: "does not return user data"},
- waitForOngoingChunkSplits: {skip: "does not return user data"},
- whatsmyuri: {skip: "does not return user data"}
- };
-
- commandsRemovedFromMongosIn42.forEach(function(cmd) {
- testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
- });
+ checkResults: function(res) {
+ assert.commandWorked(res);
+ assert.eq(1, res.results.length, tojson(res));
+ assert.eq(1, res.results[0]._id, tojson(res));
+ assert.eq(2, res.results[0].value, tojson(res));
+ },
+ behavior: "targetsPrimaryUsesConnectionVersioning"
+ },
+ mergeChunks: {skip: "primary only"},
+ moveChunk: {skip: "primary only"},
+ movePrimary: {skip: "primary only"},
+ multicast: {skip: "does not return user data"},
+ netstat: {skip: "does not return user data"},
+ ping: {skip: "does not return user data"},
+ planCacheClear: {skip: "does not return user data"},
+ planCacheClearFilters: {skip: "does not return user data"},
+ planCacheListFilters: {skip: "does not return user data"},
+ planCacheListPlans: {skip: "does not return user data"},
+ planCacheListQueryShapes: {skip: "does not return user data"},
+ planCacheSetFilter: {skip: "does not return user data"},
+ profile: {skip: "primary only"},
+ reapLogicalSessionCacheNow: {skip: "does not return user data"},
+ refreshLogicalSessionCacheNow: {skip: "does not return user data"},
+ refreshSessions: {skip: "does not return user data"},
+ refreshSessionsInternal: {skip: "does not return user data"},
+ removeShard: {skip: "primary only"},
+ removeShardFromZone: {skip: "primary only"},
+ renameCollection: {skip: "primary only"},
+ repairCursor: {skip: "does not return user data"},
+ replSetAbortPrimaryCatchUp: {skip: "does not return user data"},
+ replSetFreeze: {skip: "does not return user data"},
+ replSetGetConfig: {skip: "does not return user data"},
+ replSetGetRBID: {skip: "does not return user data"},
+ replSetGetStatus: {skip: "does not return user data"},
+ replSetHeartbeat: {skip: "does not return user data"},
+ replSetInitiate: {skip: "does not return user data"},
+ replSetMaintenance: {skip: "does not return user data"},
+ replSetReconfig: {skip: "does not return user data"},
+ replSetRequestVotes: {skip: "does not return user data"},
+ replSetStepDown: {skip: "does not return user data"},
+ replSetStepUp: {skip: "does not return user data"},
+ replSetSyncFrom: {skip: "does not return user data"},
+ replSetTest: {skip: "does not return user data"},
+ replSetUpdatePosition: {skip: "does not return user data"},
+ replSetResizeOplog: {skip: "does not return user data"},
+ resetError: {skip: "does not return user data"},
+ restartCatalog: {skip: "internal-only command"},
+ resync: {skip: "primary only"},
+ revokePrivilegesFromRole: {skip: "primary only"},
+ revokeRolesFromRole: {skip: "primary only"},
+ revokeRolesFromUser: {skip: "primary only"},
+ rolesInfo: {skip: "primary only"},
+ saslContinue: {skip: "primary only"},
+ saslStart: {skip: "primary only"},
+ serverStatus: {skip: "does not return user data"},
+ setCommittedSnapshot: {skip: "does not return user data"},
+ setIndexCommitQuorum: {skip: "primary only"},
+ setFeatureCompatibilityVersion: {skip: "primary only"},
+ setFreeMonitoring: {skip: "primary only"},
+ setParameter: {skip: "does not return user data"},
+ setShardVersion: {skip: "does not return user data"},
+ shardCollection: {skip: "primary only"},
+ shardConnPoolStats: {skip: "does not return user data"},
+ shardingState: {skip: "does not return user data"},
+ shutdown: {skip: "does not return user data"},
+ sleep: {skip: "does not return user data"},
+ split: {skip: "primary only"},
+ splitChunk: {skip: "primary only"},
+ splitVector: {skip: "primary only"},
+ stageDebug: {skip: "primary only"},
+ startRecordingTraffic: {skip: "does not return user data"},
+ startSession: {skip: "does not return user data"},
+ stopRecordingTraffic: {skip: "does not return user data"},
+ top: {skip: "does not return user data"},
+ touch: {skip: "does not return user data"},
+ unsetSharding: {skip: "does not return user data"},
+ update: {skip: "primary only"},
+ updateRole: {skip: "primary only"},
+ updateUser: {skip: "primary only"},
+ updateZoneKeyRange: {skip: "primary only"},
+ usersInfo: {skip: "primary only"},
+ validate: {skip: "does not return user data"},
+ waitForOngoingChunkSplits: {skip: "does not return user data"},
+ whatsmyuri: {skip: "does not return user data"}
+};
- // Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
- let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
- let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
+commandsRemovedFromMongosIn42.forEach(function(cmd) {
+ testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
+});
- let recipientShardPrimary = st.rs1.getPrimary();
- let donorShardSecondary = st.rs0.getSecondary();
- let recipientShardSecondary = st.rs1.getSecondary();
+// Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
+let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
+let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
- let freshMongos = st.s0;
- let staleMongos = st.s1;
+let recipientShardPrimary = st.rs1.getPrimary();
+let donorShardSecondary = st.rs0.getSecondary();
+let recipientShardSecondary = st.rs1.getSecondary();
- let res = st.s.adminCommand({listCommands: 1});
- assert.commandWorked(res);
+let freshMongos = st.s0;
+let staleMongos = st.s1;
- let commands = Object.keys(res.commands);
- for (let command of commands) {
- let test = testCases[command];
- assert(test !== undefined,
- "coverage failure: must define a safe secondary reads test for " + command);
+let res = st.s.adminCommand({listCommands: 1});
+assert.commandWorked(res);
- if (test.skip !== undefined) {
- print("skipping " + command + ": " + test.skip);
- continue;
- }
- validateTestCase(test);
+let commands = Object.keys(res.commands);
+for (let command of commands) {
+ let test = testCases[command];
+ assert(test !== undefined,
+ "coverage failure: must define a safe secondary reads test for " + command);
- jsTest.log("testing command " + tojson(test.command));
+ if (test.skip !== undefined) {
+ print("skipping " + command + ": " + test.skip);
+ continue;
+ }
+ validateTestCase(test);
- assert.commandWorked(staleMongos.adminCommand({enableSharding: db}));
- st.ensurePrimaryShard(db, st.shard0.shardName);
- assert.commandWorked(staleMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
+ jsTest.log("testing command " + tojson(test.command));
- // We do this because we expect freshMongos to see that the collection is sharded, which it
- // may not if the "nearest" config server it contacts has not replicated the shardCollection
- // writes (or has not heard that they have reached a majority).
- st.configRS.awaitReplication();
+ assert.commandWorked(staleMongos.adminCommand({enableSharding: db}));
+ st.ensurePrimaryShard(db, st.shard0.shardName);
+ assert.commandWorked(staleMongos.adminCommand({shardCollection: nss, key: {x: 1}}));
- assert.commandWorked(staleMongos.adminCommand({split: nss, middle: {x: 0}}));
+ // We do this because we expect freshMongos to see that the collection is sharded, which it
+ // may not if the "nearest" config server it contacts has not replicated the shardCollection
+ // writes (or has not heard that they have reached a majority).
+ st.configRS.awaitReplication();
- // Do dummy read from the stale mongos so it loads the routing table into memory once.
- // Additionally, do a secondary read to ensure that the secondary has loaded the initial
- // routing table -- the first read to the primary will refresh the mongos' shardVersion,
- // which will then be used against the secondary to ensure the secondary is fresh.
- assert.commandWorked(staleMongos.getDB(db).runCommand({find: coll}));
- assert.commandWorked(freshMongos.getDB(db).runCommand(
- {find: coll, $readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
+ assert.commandWorked(staleMongos.adminCommand({split: nss, middle: {x: 0}}));
- // Do any test-specific setup.
- test.setUp(staleMongos);
+ // Do dummy read from the stale mongos so it loads the routing table into memory once.
+ // Additionally, do a secondary read to ensure that the secondary has loaded the initial
+ // routing table -- the first read to the primary will refresh the mongos' shardVersion,
+ // which will then be used against the secondary to ensure the secondary is fresh.
+ assert.commandWorked(staleMongos.getDB(db).runCommand({find: coll}));
+ assert.commandWorked(freshMongos.getDB(db).runCommand(
+ {find: coll, $readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
- // Wait for replication as a safety net, in case the individual setup function for a test
- // case did not specify writeConcern itself
- st.rs0.awaitReplication();
- st.rs1.awaitReplication();
+ // Do any test-specific setup.
+ test.setUp(staleMongos);
- assert.commandWorked(recipientShardPrimary.getDB(db).setProfilingLevel(2));
- assert.commandWorked(donorShardSecondary.getDB(db).setProfilingLevel(2));
- assert.commandWorked(recipientShardSecondary.getDB(db).setProfilingLevel(2));
+ // Wait for replication as a safety net, in case the individual setup function for a test
+ // case did not specify writeConcern itself
+ st.rs0.awaitReplication();
+ st.rs1.awaitReplication();
- // Do a moveChunk from the fresh mongos to make the other mongos stale.
- // Use {w:2} (all) write concern so the metadata change gets persisted to the secondary
- // before stalely versioned commands are sent against the secondary.
- assert.commandWorked(freshMongos.adminCommand({
- moveChunk: nss,
- find: {x: 0},
- to: st.shard1.shardName,
- waitForDelete: true,
- _secondaryThrottle: true,
- writeConcern: {w: 2},
- }));
+ assert.commandWorked(recipientShardPrimary.getDB(db).setProfilingLevel(2));
+ assert.commandWorked(donorShardSecondary.getDB(db).setProfilingLevel(2));
+ assert.commandWorked(recipientShardSecondary.getDB(db).setProfilingLevel(2));
- let res = staleMongos.getDB(db).runCommand(Object.extend(
- test.command, {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
+ // Do a moveChunk from the fresh mongos to make the other mongos stale.
+ // Use {w:2} (all) write concern so the metadata change gets persisted to the secondary
+ // before stalely versioned commands are sent against the secondary.
+ assert.commandWorked(freshMongos.adminCommand({
+ moveChunk: nss,
+ find: {x: 0},
+ to: st.shard1.shardName,
+ waitForDelete: true,
+ _secondaryThrottle: true,
+ writeConcern: {w: 2},
+ }));
- test.checkResults(res);
+ let res = staleMongos.getDB(db).runCommand(Object.extend(
+ test.command, {$readPreference: {mode: 'secondary'}, readConcern: {'level': 'local'}}));
- // Build the query to identify the operation in the system profiler.
- let commandProfile = buildCommandProfile(test.command, true /* sharded */);
+ test.checkResults(res);
- if (test.behavior === "unshardedOnly") {
- // Check that neither the donor shard secondary nor recipient shard secondary
- // received the request.
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: donorShardSecondary.getDB(db), filter: commandProfile});
- profilerHasZeroMatchingEntriesOrThrow(
- {profileDB: recipientShardSecondary.getDB(db), filter: commandProfile});
- } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
- // Check that the recipient shard primary received the request without a shardVersion
- // field and returned success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardPrimary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": false},
- "command.$readPreference": {$exists: false},
- "command.readConcern": {"level": "local"},
- "errCode": {"$exists": false},
- },
- commandProfile)
- });
- } else if (test.behavior === "versioned") {
- // Check that the donor shard secondary returned stale shardVersion.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- },
- commandProfile)
- });
+ // Build the query to identify the operation in the system profiler.
+ let commandProfile = buildCommandProfile(test.command, true /* sharded */);
- // Check that the recipient shard secondary received the request and returned stale
- // shardVersion once, even though the mongos is fresh, because the secondary was
- // stale.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- },
- commandProfile)
- });
+ if (test.behavior === "unshardedOnly") {
+ // Check that neither the donor shard secondary nor recipient shard secondary
+ // received the request.
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: donorShardSecondary.getDB(db), filter: commandProfile});
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: recipientShardSecondary.getDB(db), filter: commandProfile});
+ } else if (test.behavior === "targetsPrimaryUsesConnectionVersioning") {
+ // Check that the recipient shard primary received the request without a shardVersion
+ // field and returned success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardPrimary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": false},
+ "command.$readPreference": {$exists: false},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$exists": false},
+ },
+ commandProfile)
+ });
+ } else if (test.behavior === "versioned") {
+ // Check that the donor shard secondary returned stale shardVersion.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
+ },
+ commandProfile)
+ });
- // Check that the recipient shard secondary received the request again and returned
- // success.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardSecondary.getDB(db),
- filter: Object.extend({
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": {"$ne": ErrorCodes.StaleConfig},
- },
- commandProfile)
- });
- }
+ // Check that the recipient shard secondary received the request and returned stale
+ // shardVersion once, even though the mongos is fresh, because the secondary was
+ // stale.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
+ },
+ commandProfile)
+ });
- // Clean up the database by dropping it; this is the only way to drop the profiler
- // collection on secondaries. This also drops all associated indexes.
- // Do this from staleMongos, so staleMongos purges the database entry from its cache.
- assert.commandWorked(staleMongos.getDB(db).runCommand({dropDatabase: 1}));
+ // Check that the recipient shard secondary received the request again and returned
+ // success.
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardSecondary.getDB(db),
+ filter: Object.extend({
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$ne": ErrorCodes.StaleConfig},
+ },
+ commandProfile)
+ });
}
- st.stop();
+ // Clean up the database by dropping it; this is the only way to drop the profiler
+ // collection on secondaries. This also drops all associated indexes.
+ // Do this from staleMongos, so staleMongos purges the database entry from its cache.
+ assert.commandWorked(staleMongos.getDB(db).runCommand({dropDatabase: 1}));
+}
+
+st.stop();
})();
diff --git a/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js b/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js
index d1da3e396d0..6c8b150aebb 100644
--- a/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js
+++ b/jstests/sharding/secondary_shard_version_protocol_with_causal_consistency.js
@@ -5,112 +5,110 @@
* level should default to 'local' read concern level, using the shard version protocol.
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/profiler.js'); // for profilerHasSingleMatchingEntryOrThrow()
+load('jstests/libs/profiler.js'); // for profilerHasSingleMatchingEntryOrThrow()
- // Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
- let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
- let st =
- new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}, causallyConsistent: true});
- let dbName = 'test', collName = 'foo', ns = 'test.foo';
+// Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
+let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
+let st =
+ new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}, causallyConsistent: true});
+let dbName = 'test', collName = 'foo', ns = 'test.foo';
- assert.commandWorked(st.s0.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: ns, key: {x: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: ns, middle: {x: 0}}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: ns, key: {x: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: ns, middle: {x: 0}}));
- let freshMongos = st.s0;
- let staleMongos = st.s1;
+let freshMongos = st.s0;
+let staleMongos = st.s1;
- jsTest.log("do insert from stale mongos to make it load the routing table before the move");
- assert.writeOK(staleMongos.getCollection(ns).insert({x: 1}));
+jsTest.log("do insert from stale mongos to make it load the routing table before the move");
+assert.writeOK(staleMongos.getCollection(ns).insert({x: 1}));
- jsTest.log("do moveChunk from fresh mongos");
- assert.commandWorked(freshMongos.adminCommand({
- moveChunk: ns,
- find: {x: 0},
- to: st.shard1.shardName,
- secondaryThrottle: true,
- _waitForDelete: true,
- writeConcern: {w: 2},
- }));
+jsTest.log("do moveChunk from fresh mongos");
+assert.commandWorked(freshMongos.adminCommand({
+ moveChunk: ns,
+ find: {x: 0},
+ to: st.shard1.shardName,
+ secondaryThrottle: true,
+ _waitForDelete: true,
+ writeConcern: {w: 2},
+}));
- // Turn on system profiler on secondaries to collect data on all future operations on the db.
- let donorShardSecondary = st.rs0.getSecondary();
- let recipientShardSecondary = st.rs1.getSecondary();
- assert.commandWorked(donorShardSecondary.getDB(dbName).setProfilingLevel(2));
- assert.commandWorked(recipientShardSecondary.getDB(dbName).setProfilingLevel(2));
+// Turn on system profiler on secondaries to collect data on all future operations on the db.
+let donorShardSecondary = st.rs0.getSecondary();
+let recipientShardSecondary = st.rs1.getSecondary();
+assert.commandWorked(donorShardSecondary.getDB(dbName).setProfilingLevel(2));
+assert.commandWorked(recipientShardSecondary.getDB(dbName).setProfilingLevel(2));
- // Note: this query will not be registered by the profiler because it errors before reaching the
- // storage level.
- jsTest.log("Do a secondary read from stale mongos with afterClusterTime and level 'available'");
- const staleMongosDB = staleMongos.getDB(dbName);
- assert.commandFailedWithCode(staleMongosDB.runCommand({
- count: collName,
- query: {x: 1},
- $readPreference: {mode: "secondary"},
- readConcern: {
- 'afterClusterTime': staleMongosDB.getSession().getOperationTime(),
- 'level': 'available'
- }
- }),
- ErrorCodes.InvalidOptions);
+// Note: this query will not be registered by the profiler because it errors before reaching the
+// storage level.
+jsTest.log("Do a secondary read from stale mongos with afterClusterTime and level 'available'");
+const staleMongosDB = staleMongos.getDB(dbName);
+assert.commandFailedWithCode(staleMongosDB.runCommand({
+ count: collName,
+ query: {x: 1},
+ $readPreference: {mode: "secondary"},
+ readConcern:
+ {'afterClusterTime': staleMongosDB.getSession().getOperationTime(), 'level': 'available'}
+}),
+ ErrorCodes.InvalidOptions);
- jsTest.log("Do a secondary read from stale mongos with afterClusterTime and no level");
- let res = staleMongosDB.runCommand({
- count: collName,
- query: {x: 1},
- $readPreference: {mode: "secondary"},
- readConcern: {'afterClusterTime': staleMongosDB.getSession().getOperationTime()},
- });
- assert(res.ok);
- assert.eq(1, res.n, tojson(res));
+jsTest.log("Do a secondary read from stale mongos with afterClusterTime and no level");
+let res = staleMongosDB.runCommand({
+ count: collName,
+ query: {x: 1},
+ $readPreference: {mode: "secondary"},
+ readConcern: {'afterClusterTime': staleMongosDB.getSession().getOperationTime()},
+});
+assert(res.ok);
+assert.eq(1, res.n, tojson(res));
- // The stale mongos will first go to the donor shard and receive a stale shard version,
- // prompting the stale mongos to refresh it's routing table and retarget to the recipient shard.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB(dbName),
- filter: {
- "ns": ns,
- "command.count": collName,
- "command.query": {x: 1},
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern.afterClusterTime": {"$exists": true},
- "errCode": ErrorCodes.StaleConfig
- }
- });
+// The stale mongos will first go to the donor shard and receive a stale shard version,
+// prompting the stale mongos to refresh it's routing table and retarget to the recipient shard.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB(dbName),
+ filter: {
+ "ns": ns,
+ "command.count": collName,
+ "command.query": {x: 1},
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern.afterClusterTime": {"$exists": true},
+ "errCode": ErrorCodes.StaleConfig
+ }
+});
- // The recipient shard will then return a stale shard version error because it needs to refresh
- // its own routing table.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardSecondary.getDB(dbName),
- filter: {
- "ns": ns,
- "command.count": collName,
- "command.query": {x: 1},
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern.afterClusterTime": {"$exists": true},
- "errCode": ErrorCodes.StaleConfig
- }
- });
+// The recipient shard will then return a stale shard version error because it needs to refresh
+// its own routing table.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardSecondary.getDB(dbName),
+ filter: {
+ "ns": ns,
+ "command.count": collName,
+ "command.query": {x: 1},
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern.afterClusterTime": {"$exists": true},
+ "errCode": ErrorCodes.StaleConfig
+ }
+});
- // Finally, the command is retried on the recipient shard and succeeds.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardSecondary.getDB(dbName),
- filter: {
- "ns": ns,
- "command.count": collName,
- "command.query": {x: 1},
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern.afterClusterTime": {"$exists": true},
- "errCode": {"$exists": false}
- }
- });
+// Finally, the command is retried on the recipient shard and succeeds.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardSecondary.getDB(dbName),
+ filter: {
+ "ns": ns,
+ "command.count": collName,
+ "command.query": {x: 1},
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern.afterClusterTime": {"$exists": true},
+ "errCode": {"$exists": false}
+ }
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/secondary_shard_versioning.js b/jstests/sharding/secondary_shard_versioning.js
index 6c92231ec82..94e49c09a5d 100644
--- a/jstests/sharding/secondary_shard_versioning.js
+++ b/jstests/sharding/secondary_shard_versioning.js
@@ -2,98 +2,98 @@
* Tests that secondaries participate in the shard versioning protocol.
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/profiler.js'); // for profilerHasSingleMatchingEntryOrThrow()
+load('jstests/libs/profiler.js'); // for profilerHasSingleMatchingEntryOrThrow()
- // Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
- let rsOpts = {nodes: [{}, {rsConfig: {priority: 0}}]};
- let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
+// Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
+let rsOpts = {nodes: [{}, {rsConfig: {priority: 0}}]};
+let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {x: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: 'test.foo', middle: {x: 0}}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {x: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: 'test.foo', middle: {x: 0}}));
- let freshMongos = st.s0;
- let staleMongos = st.s1;
+let freshMongos = st.s0;
+let staleMongos = st.s1;
- jsTest.log("do insert from stale mongos to make it load the routing table before the move");
- assert.writeOK(staleMongos.getDB('test').foo.insert({x: 1}));
+jsTest.log("do insert from stale mongos to make it load the routing table before the move");
+assert.writeOK(staleMongos.getDB('test').foo.insert({x: 1}));
- jsTest.log("do moveChunk from fresh mongos");
- assert.commandWorked(freshMongos.adminCommand({
- moveChunk: 'test.foo',
- find: {x: 0},
- to: st.shard1.shardName,
- }));
+jsTest.log("do moveChunk from fresh mongos");
+assert.commandWorked(freshMongos.adminCommand({
+ moveChunk: 'test.foo',
+ find: {x: 0},
+ to: st.shard1.shardName,
+}));
- // Turn on system profiler on secondaries to collect data on all future operations on the db.
- let donorShardSecondary = st.rs0.getSecondary();
- let recipientShardSecondary = st.rs1.getSecondary();
- assert.commandWorked(donorShardSecondary.getDB('test').setProfilingLevel(2));
- assert.commandWorked(recipientShardSecondary.getDB('test').setProfilingLevel(2));
+// Turn on system profiler on secondaries to collect data on all future operations on the db.
+let donorShardSecondary = st.rs0.getSecondary();
+let recipientShardSecondary = st.rs1.getSecondary();
+assert.commandWorked(donorShardSecondary.getDB('test').setProfilingLevel(2));
+assert.commandWorked(recipientShardSecondary.getDB('test').setProfilingLevel(2));
- // Use the mongos with the stale routing table to send read requests to the secondaries. 'local'
- // read concern level must be specified in the request because secondaries default to
- // 'available', which doesn't participate in the version protocol. Check that the donor shard
- // returns a stale shardVersion error, which provokes mongos to refresh its routing table and
- // re-target; that the recipient shard secondary refreshes its routing table on hearing the
- // fresh version from mongos; and that the recipient shard secondary returns the results.
+// Use the mongos with the stale routing table to send read requests to the secondaries. 'local'
+// read concern level must be specified in the request because secondaries default to
+// 'available', which doesn't participate in the version protocol. Check that the donor shard
+// returns a stale shardVersion error, which provokes mongos to refresh its routing table and
+// re-target; that the recipient shard secondary refreshes its routing table on hearing the
+// fresh version from mongos; and that the recipient shard secondary returns the results.
- jsTest.log("do secondary read from stale mongos");
- let res = staleMongos.getDB('test').runCommand({
- count: 'foo',
- query: {x: 1},
- $readPreference: {mode: "secondary"},
- readConcern: {"level": "local"}
- });
- assert(res.ok);
- assert.eq(1, res.n, tojson(res));
+jsTest.log("do secondary read from stale mongos");
+let res = staleMongos.getDB('test').runCommand({
+ count: 'foo',
+ query: {x: 1},
+ $readPreference: {mode: "secondary"},
+ readConcern: {"level": "local"}
+});
+assert(res.ok);
+assert.eq(1, res.n, tojson(res));
- // Check that the donor shard secondary returned stale shardVersion.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB('test'),
- filter: {
- "ns": "test.foo",
- "command.count": "foo",
- "command.query": {x: 1},
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- }
- });
+// Check that the donor shard secondary returned stale shardVersion.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB('test'),
+ filter: {
+ "ns": "test.foo",
+ "command.count": "foo",
+ "command.query": {x: 1},
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
+ }
+});
- // The recipient shard secondary will also return stale shardVersion once, even though the
- // mongos is fresh, because the recipient shard secondary was stale.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: donorShardSecondary.getDB('test'),
- filter: {
- "ns": "test.foo",
- "command.count": "foo",
- "command.query": {x: 1},
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": ErrorCodes.StaleConfig
- }
- });
+// The recipient shard secondary will also return stale shardVersion once, even though the
+// mongos is fresh, because the recipient shard secondary was stale.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: donorShardSecondary.getDB('test'),
+ filter: {
+ "ns": "test.foo",
+ "command.count": "foo",
+ "command.query": {x: 1},
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": ErrorCodes.StaleConfig
+ }
+});
- // Check that the recipient shard secondary received the query and returned results.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: recipientShardSecondary.getDB('test'),
- filter: {
- "ns": "test.foo",
- "command.count": "foo",
- "command.query": {x: 1},
- "command.shardVersion": {"$exists": true},
- "command.$readPreference": {"mode": "secondary"},
- "command.readConcern": {"level": "local"},
- "errCode": {"$exists": false}
- }
- });
+// Check that the recipient shard secondary received the query and returned results.
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: recipientShardSecondary.getDB('test'),
+ filter: {
+ "ns": "test.foo",
+ "command.count": "foo",
+ "command.query": {x: 1},
+ "command.shardVersion": {"$exists": true},
+ "command.$readPreference": {"mode": "secondary"},
+ "command.readConcern": {"level": "local"},
+ "errCode": {"$exists": false}
+ }
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/server_status.js b/jstests/sharding/server_status.js
index 770300174b8..0e2865842b4 100644
--- a/jstests/sharding/server_status.js
+++ b/jstests/sharding/server_status.js
@@ -4,41 +4,41 @@
*/
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- var testDB = st.s.getDB('test');
- testDB.adminCommand({enableSharding: 'test'});
- testDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
+var testDB = st.s.getDB('test');
+testDB.adminCommand({enableSharding: 'test'});
+testDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
- // Initialize shard metadata in shards
- testDB.user.insert({x: 1});
+// Initialize shard metadata in shards
+testDB.user.insert({x: 1});
- var checkShardingServerStatus = function(doc) {
- var shardingSection = doc.sharding;
- assert.neq(shardingSection, null);
+var checkShardingServerStatus = function(doc) {
+ var shardingSection = doc.sharding;
+ assert.neq(shardingSection, null);
- var configConnStr = shardingSection.configsvrConnectionString;
- var configConn = new Mongo(configConnStr);
- var configIsMaster = configConn.getDB('admin').runCommand({isMaster: 1});
+ var configConnStr = shardingSection.configsvrConnectionString;
+ var configConn = new Mongo(configConnStr);
+ var configIsMaster = configConn.getDB('admin').runCommand({isMaster: 1});
- var configOpTimeObj = shardingSection.lastSeenConfigServerOpTime;
+ var configOpTimeObj = shardingSection.lastSeenConfigServerOpTime;
- assert.gt(configConnStr.indexOf('/'), 0);
- assert.gte(configIsMaster.configsvr, 1); // If it's a shard, this field won't exist.
- assert.neq(null, configOpTimeObj);
- assert.neq(null, configOpTimeObj.ts);
- assert.neq(null, configOpTimeObj.t);
+ assert.gt(configConnStr.indexOf('/'), 0);
+ assert.gte(configIsMaster.configsvr, 1); // If it's a shard, this field won't exist.
+ assert.neq(null, configOpTimeObj);
+ assert.neq(null, configOpTimeObj.ts);
+ assert.neq(null, configOpTimeObj.t);
- assert.neq(null, shardingSection.maxChunkSizeInBytes);
- };
+ assert.neq(null, shardingSection.maxChunkSizeInBytes);
+};
- var mongosServerStatus = testDB.adminCommand({serverStatus: 1});
- checkShardingServerStatus(mongosServerStatus);
+var mongosServerStatus = testDB.adminCommand({serverStatus: 1});
+checkShardingServerStatus(mongosServerStatus);
- var mongodServerStatus = st.rs0.getPrimary().getDB('admin').runCommand({serverStatus: 1});
- checkShardingServerStatus(mongodServerStatus);
+var mongodServerStatus = st.rs0.getPrimary().getDB('admin').runCommand({serverStatus: 1});
+checkShardingServerStatus(mongodServerStatus);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/server_status_crud_metrics.js b/jstests/sharding/server_status_crud_metrics.js
index 522f2f89222..b40c412fcd7 100644
--- a/jstests/sharding/server_status_crud_metrics.js
+++ b/jstests/sharding/server_status_crud_metrics.js
@@ -4,75 +4,75 @@
*/
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 2});
- const testDB = st.s.getDB("test");
- const testColl = testDB.coll;
- const unshardedColl = testDB.unsharded;
+const st = new ShardingTest({shards: 2});
+const testDB = st.s.getDB("test");
+const testColl = testDB.coll;
+const unshardedColl = testDB.unsharded;
- assert.commandWorked(st.s0.adminCommand({enableSharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({enableSharding: testDB.getName()}));
+st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
- // Shard testColl on {x:1}, split it at {x:0}, and move chunk {x:1} to shard1.
- st.shardColl(testColl, {x: 1}, {x: 0}, {x: 1});
+// Shard testColl on {x:1}, split it at {x:0}, and move chunk {x:1} to shard1.
+st.shardColl(testColl, {x: 1}, {x: 0}, {x: 1});
- // Insert one document on each shard.
- assert.commandWorked(testColl.insert({x: 1, _id: 1}));
- assert.commandWorked(testColl.insert({x: -1, _id: 0}));
+// Insert one document on each shard.
+assert.commandWorked(testColl.insert({x: 1, _id: 1}));
+assert.commandWorked(testColl.insert({x: -1, _id: 0}));
- assert.commandWorked(unshardedColl.insert({x: 1, _id: 1}));
+assert.commandWorked(unshardedColl.insert({x: 1, _id: 1}));
- // Verification for 'updateOneOpStyleBroadcastWithExactIDCount' metric.
+// Verification for 'updateOneOpStyleBroadcastWithExactIDCount' metric.
- // Should increment the metric as the update cannot target single shard and are {multi:false}.
- assert.commandWorked(testDB.coll.update({_id: "missing"}, {$set: {a: 1}}, {multi: false}));
- assert.commandWorked(testDB.coll.update({_id: 1}, {$set: {a: 2}}, {multi: false}));
+// Should increment the metric as the update cannot target single shard and are {multi:false}.
+assert.commandWorked(testDB.coll.update({_id: "missing"}, {$set: {a: 1}}, {multi: false}));
+assert.commandWorked(testDB.coll.update({_id: 1}, {$set: {a: 2}}, {multi: false}));
- // Should increment the metric because we broadcast by _id, even though the update subsequently
- // fails on the individual shard.
- assert.commandFailedWithCode(testDB.coll.update({_id: 1}, {$set: {x: 2}}, {multi: false}),
- [ErrorCodes.ImmutableField, 31025]);
- assert.commandFailedWithCode(
- testDB.coll.update({_id: 1}, {$set: {x: 2, $invalidField: 4}}, {multi: false}),
- ErrorCodes.DollarPrefixedFieldName);
+// Should increment the metric because we broadcast by _id, even though the update subsequently
+// fails on the individual shard.
+assert.commandFailedWithCode(testDB.coll.update({_id: 1}, {$set: {x: 2}}, {multi: false}),
+ [ErrorCodes.ImmutableField, 31025]);
+assert.commandFailedWithCode(
+ testDB.coll.update({_id: 1}, {$set: {x: 2, $invalidField: 4}}, {multi: false}),
+ ErrorCodes.DollarPrefixedFieldName);
- let mongosServerStatus = testDB.adminCommand({serverStatus: 1});
+let mongosServerStatus = testDB.adminCommand({serverStatus: 1});
- // Verify that the above four updates incremented the metric counter.
- assert.eq(4, mongosServerStatus.metrics.query.updateOneOpStyleBroadcastWithExactIDCount);
+// Verify that the above four updates incremented the metric counter.
+assert.eq(4, mongosServerStatus.metrics.query.updateOneOpStyleBroadcastWithExactIDCount);
- // Shouldn't increment the metric when {multi:true}.
- assert.commandWorked(testDB.coll.update({_id: 1}, {$set: {a: 3}}, {multi: true}));
- assert.commandWorked(testDB.coll.update({}, {$set: {a: 3}}, {multi: true}));
+// Shouldn't increment the metric when {multi:true}.
+assert.commandWorked(testDB.coll.update({_id: 1}, {$set: {a: 3}}, {multi: true}));
+assert.commandWorked(testDB.coll.update({}, {$set: {a: 3}}, {multi: true}));
- // Shouldn't increment the metric when update can target single shard.
- assert.commandWorked(testDB.coll.update({x: 11}, {$set: {a: 2}}, {multi: false}));
- assert.commandWorked(testDB.coll.update({x: 1}, {$set: {a: 2}}, {multi: false}));
+// Shouldn't increment the metric when update can target single shard.
+assert.commandWorked(testDB.coll.update({x: 11}, {$set: {a: 2}}, {multi: false}));
+assert.commandWorked(testDB.coll.update({x: 1}, {$set: {a: 2}}, {multi: false}));
- // Shouldn't increment the metric for replacement style updates.
- assert.commandWorked(testDB.coll.update({_id: 1}, {x: 1, a: 2}));
- assert.commandWorked(testDB.coll.update({x: 1}, {x: 1, a: 1}));
+// Shouldn't increment the metric for replacement style updates.
+assert.commandWorked(testDB.coll.update({_id: 1}, {x: 1, a: 2}));
+assert.commandWorked(testDB.coll.update({x: 1}, {x: 1, a: 1}));
- // Shouldn't increment the metric when routing fails.
- assert.commandFailedWithCode(testDB.coll.update({}, {$set: {x: 2}}, {multi: false}),
- [ErrorCodes.InvalidOptions, ErrorCodes.ShardKeyNotFound]);
- assert.commandFailedWithCode(testDB.coll.update({_id: 1}, {$set: {x: 2}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
+// Shouldn't increment the metric when routing fails.
+assert.commandFailedWithCode(testDB.coll.update({}, {$set: {x: 2}}, {multi: false}),
+ [ErrorCodes.InvalidOptions, ErrorCodes.ShardKeyNotFound]);
+assert.commandFailedWithCode(testDB.coll.update({_id: 1}, {$set: {x: 2}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
- // Shouldn't increment the metrics for unsharded collection.
- assert.commandWorked(unshardedColl.update({_id: "missing"}, {$set: {a: 1}}, {multi: false}));
- assert.commandWorked(unshardedColl.update({_id: 1}, {$set: {a: 2}}, {multi: false}));
+// Shouldn't increment the metrics for unsharded collection.
+assert.commandWorked(unshardedColl.update({_id: "missing"}, {$set: {a: 1}}, {multi: false}));
+assert.commandWorked(unshardedColl.update({_id: 1}, {$set: {a: 2}}, {multi: false}));
- // Shouldn't incement the metrics when query had invalid operator.
- assert.commandFailedWithCode(
- testDB.coll.update({_id: 1, $invalidOperator: 1}, {$set: {a: 2}}, {multi: false}),
- ErrorCodes.BadValue);
+// Shouldn't incement the metrics when query had invalid operator.
+assert.commandFailedWithCode(
+ testDB.coll.update({_id: 1, $invalidOperator: 1}, {$set: {a: 2}}, {multi: false}),
+ ErrorCodes.BadValue);
- mongosServerStatus = testDB.adminCommand({serverStatus: 1});
+mongosServerStatus = testDB.adminCommand({serverStatus: 1});
- // Verify that only the first four upserts incremented the metric counter.
- assert.eq(4, mongosServerStatus.metrics.query.updateOneOpStyleBroadcastWithExactIDCount);
+// Verify that only the first four upserts incremented the metric counter.
+assert.eq(4, mongosServerStatus.metrics.query.updateOneOpStyleBroadcastWithExactIDCount);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/session_info_in_oplog.js b/jstests/sharding/session_info_in_oplog.js
index dc7b17f9494..617d5759207 100644
--- a/jstests/sharding/session_info_in_oplog.js
+++ b/jstests/sharding/session_info_in_oplog.js
@@ -4,363 +4,342 @@
* updated after the write operations.
*/
(function() {
- "use strict";
+"use strict";
+
+load("jstests/libs/retryable_writes_util.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+const kNodes = 2;
+
+var checkOplog = function(oplog, lsid, uid, txnNum, stmtId, prevTs, prevTerm) {
+ assert(oplog != null);
+ assert(oplog.lsid != null);
+ assert.eq(lsid, oplog.lsid.id);
+ assert.eq(uid, oplog.lsid.uid);
+ assert.eq(txnNum, oplog.txnNumber);
+ assert.eq(stmtId, oplog.stmtId);
+
+ var oplogPrevTs = oplog.prevOpTime.ts;
+ assert.eq(prevTs.getTime(), oplogPrevTs.getTime());
+ assert.eq(prevTs.getInc(), oplogPrevTs.getInc());
+ assert.eq(prevTerm, oplog.prevOpTime.t);
+};
+
+var checkSessionCatalog = function(conn, sessionId, uid, txnNum, expectedTs, expectedTerm) {
+ var coll = conn.getDB('config').transactions;
+ var sessionDoc = coll.findOne({'_id': {id: sessionId, uid: uid}});
+
+ assert.eq(txnNum, sessionDoc.txnNum);
+
+ var oplogTs = sessionDoc.lastWriteOpTime.ts;
+ assert.eq(expectedTs.getTime(), oplogTs.getTime());
+ assert.eq(expectedTs.getInc(), oplogTs.getInc());
+
+ assert.eq(expectedTerm, sessionDoc.lastWriteOpTime.t);
+};
+
+var runTests = function(mainConn, priConn, secConn) {
+ var lsid = UUID();
+ var uid = function() {
+ var user = mainConn.getDB("admin")
+ .runCommand({connectionStatus: 1})
+ .authInfo.authenticatedUsers[0];
+
+ if (user) {
+ return computeSHA256Block(user.user + "@" + user.db);
+ } else {
+ return computeSHA256Block("");
+ }
+ }();
+
+ var txnNumber = NumberLong(34);
+ var incrementTxnNumber = function() {
+ txnNumber = NumberLong(txnNumber + 1);
+ };
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test insert command
+
+ var cmd = {
+ insert: 'user',
+ documents: [{_id: 10}, {_id: 30}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
+ };
- load("jstests/libs/retryable_writes_util.js");
+ assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+ var oplog = priConn.getDB('local').oplog.rs;
- const kNodes = 2;
+ var firstDoc = oplog.findOne({ns: 'test.user', 'o._id': 10});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
- var checkOplog = function(oplog, lsid, uid, txnNum, stmtId, prevTs, prevTerm) {
- assert(oplog != null);
- assert(oplog.lsid != null);
- assert.eq(lsid, oplog.lsid.id);
- assert.eq(uid, oplog.lsid.uid);
- assert.eq(txnNum, oplog.txnNumber);
- assert.eq(stmtId, oplog.stmtId);
+ var secondDoc = oplog.findOne({ns: 'test.user', 'o._id': 30});
+ checkOplog(secondDoc, lsid, uid, txnNumber, 1, firstDoc.ts, firstDoc.t);
- var oplogPrevTs = oplog.prevOpTime.ts;
- assert.eq(prevTs.getTime(), oplogPrevTs.getTime());
- assert.eq(prevTs.getInc(), oplogPrevTs.getInc());
- assert.eq(prevTerm, oplog.prevOpTime.t);
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test update command
+
+ incrementTxnNumber();
+ cmd = {
+ update: 'user',
+ updates: [
+ {q: {_id: 10}, u: {$set: {x: 1}}}, // in place
+ {q: {_id: 20}, u: {$set: {y: 1}}, upsert: true},
+ {q: {_id: 30}, u: {z: 1}} // replacement
+ ],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
};
- var checkSessionCatalog = function(conn, sessionId, uid, txnNum, expectedTs, expectedTerm) {
- var coll = conn.getDB('config').transactions;
- var sessionDoc = coll.findOne({'_id': {id: sessionId, uid: uid}});
+ assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 10});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ secondDoc = oplog.findOne({ns: 'test.user', op: 'i', 'o._id': 20});
+ checkOplog(secondDoc, lsid, uid, txnNumber, 1, firstDoc.ts, firstDoc.t);
- assert.eq(txnNum, sessionDoc.txnNum);
+ var thirdDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 30});
+ checkOplog(thirdDoc, lsid, uid, txnNumber, 2, secondDoc.ts, secondDoc.t);
- var oplogTs = sessionDoc.lastWriteOpTime.ts;
- assert.eq(expectedTs.getTime(), oplogTs.getTime());
- assert.eq(expectedTs.getInc(), oplogTs.getInc());
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, thirdDoc.ts, thirdDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, thirdDoc.ts, thirdDoc.t);
- assert.eq(expectedTerm, sessionDoc.lastWriteOpTime.t);
+ ////////////////////////////////////////////////////////////////////////
+ // Test delete command
+
+ incrementTxnNumber();
+ cmd = {
+ delete: 'user',
+ deletes: [{q: {_id: 10}, limit: 1}, {q: {_id: 20}, limit: 1}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
};
- var runTests = function(mainConn, priConn, secConn) {
- var lsid = UUID();
- var uid = function() {
- var user = mainConn.getDB("admin")
- .runCommand({connectionStatus: 1})
- .authInfo.authenticatedUsers[0];
-
- if (user) {
- return computeSHA256Block(user.user + "@" + user.db);
- } else {
- return computeSHA256Block("");
- }
- }();
-
- var txnNumber = NumberLong(34);
- var incrementTxnNumber = function() {
- txnNumber = NumberLong(txnNumber + 1);
- };
-
- ////////////////////////////////////////////////////////////////////////
- // Test insert command
-
- var cmd = {
- insert: 'user',
- documents: [{_id: 10}, {_id: 30}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- var oplog = priConn.getDB('local').oplog.rs;
-
- var firstDoc = oplog.findOne({ns: 'test.user', 'o._id': 10});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- var secondDoc = oplog.findOne({ns: 'test.user', 'o._id': 30});
- checkOplog(secondDoc, lsid, uid, txnNumber, 1, firstDoc.ts, firstDoc.t);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
-
- ////////////////////////////////////////////////////////////////////////
- // Test update command
-
- incrementTxnNumber();
- cmd = {
- update: 'user',
- updates: [
- {q: {_id: 10}, u: {$set: {x: 1}}}, // in place
- {q: {_id: 20}, u: {$set: {y: 1}}, upsert: true},
- {q: {_id: 30}, u: {z: 1}} // replacement
- ],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 10});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- secondDoc = oplog.findOne({ns: 'test.user', op: 'i', 'o._id': 20});
- checkOplog(secondDoc, lsid, uid, txnNumber, 1, firstDoc.ts, firstDoc.t);
-
- var thirdDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 30});
- checkOplog(thirdDoc, lsid, uid, txnNumber, 2, secondDoc.ts, secondDoc.t);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, thirdDoc.ts, thirdDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, thirdDoc.ts, thirdDoc.t);
-
- ////////////////////////////////////////////////////////////////////////
- // Test delete command
-
- incrementTxnNumber();
- cmd = {
- delete: 'user',
- deletes: [{q: {_id: 10}, limit: 1}, {q: {_id: 20}, limit: 1}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'd', 'o._id': 10});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- secondDoc = oplog.findOne({ns: 'test.user', op: 'd', 'o._id': 20});
- checkOplog(secondDoc, lsid, uid, txnNumber, 1, firstDoc.ts, firstDoc.t);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (upsert)
-
- incrementTxnNumber();
- cmd = {
- findAndModify: 'user',
- query: {_id: 40},
- update: {$set: {x: 1}},
- new: true,
- upsert: true,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'i', 'o._id': 40});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- assert.eq(null, firstDoc.preImageTs);
- assert.eq(null, firstDoc.postImageTs);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- var lastTs = firstDoc.ts;
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (in-place update, return pre-image)
-
- incrementTxnNumber();
- cmd = {
- findAndModify: 'user',
- query: {_id: 40},
- update: {$inc: {x: 1}},
- new: false,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- var beforeDoc = mainConn.getDB('test').user.findOne({_id: 40});
- var res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- assert.eq(null, firstDoc.postImageTs);
-
- var savedDoc = oplog.findOne({
- ns: 'test.user',
- op: 'n',
- ts: firstDoc.preImageOpTime.ts,
- t: firstDoc.preImageOpTime.t
- });
- assert.eq(beforeDoc, savedDoc.o);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- lastTs = firstDoc.ts;
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (in-place update, return post-image)
-
- incrementTxnNumber();
- cmd = {
- findAndModify: 'user',
- query: {_id: 40},
- update: {$inc: {x: 1}},
- new: true,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
- var afterDoc = mainConn.getDB('test').user.findOne({_id: 40});
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- assert.eq(null, firstDoc.preImageTs);
-
- savedDoc = oplog.findOne({
- ns: 'test.user',
- op: 'n',
- ts: firstDoc.postImageOpTime.ts,
- t: firstDoc.postImageOpTime.t
- });
- assert.eq(afterDoc, savedDoc.o);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- lastTs = firstDoc.ts;
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (replacement update, return pre-image)
-
- incrementTxnNumber();
- cmd = {
- findAndModify: 'user',
- query: {_id: 40},
- update: {y: 1},
- new: false,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- beforeDoc = mainConn.getDB('test').user.findOne({_id: 40});
- res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- assert.eq(null, firstDoc.postImageTs);
-
- savedDoc = oplog.findOne({
- ns: 'test.user',
- op: 'n',
- ts: firstDoc.preImageOpTime.ts,
- t: firstDoc.preImageOpTime.t
- });
- assert.eq(beforeDoc, savedDoc.o);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- lastTs = firstDoc.ts;
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (replacement update, return post-image)
-
- incrementTxnNumber();
- cmd = {
- findAndModify: 'user',
- query: {_id: 40},
- update: {z: 1},
- new: true,
- upsert: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
- afterDoc = mainConn.getDB('test').user.findOne({_id: 40});
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- assert.eq(null, firstDoc.preImageTs);
-
- savedDoc = oplog.findOne({
- ns: 'test.user',
- op: 'n',
- ts: firstDoc.postImageOpTime.ts,
- t: firstDoc.postImageOpTime.t
- });
- assert.eq(afterDoc, savedDoc.o);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- lastTs = firstDoc.ts;
-
- ////////////////////////////////////////////////////////////////////////
- // Test findAndModify command (remove, return pre-image)
-
- incrementTxnNumber();
- cmd = {
- findAndModify: 'user',
- query: {_id: 40},
- remove: true,
- new: false,
- lsid: {id: lsid},
- txnNumber: txnNumber,
- writeConcern: {w: kNodes},
- };
-
- beforeDoc = mainConn.getDB('test').user.findOne({_id: 40});
- res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
-
- firstDoc = oplog.findOne({ns: 'test.user', op: 'd', 'o._id': 40, ts: {$gt: lastTs}});
- checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
-
- assert.eq(null, firstDoc.postImageTs);
-
- savedDoc = oplog.findOne({
- ns: 'test.user',
- op: 'n',
- ts: firstDoc.preImageOpTime.ts,
- t: firstDoc.preImageOpTime.t
- });
- assert.eq(beforeDoc, savedDoc.o);
-
- checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
- lastTs = firstDoc.ts;
+ assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'd', 'o._id': 10});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ secondDoc = oplog.findOne({ns: 'test.user', op: 'd', 'o._id': 20});
+ checkOplog(secondDoc, lsid, uid, txnNumber, 1, firstDoc.ts, firstDoc.t);
+
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, secondDoc.ts, secondDoc.t);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (upsert)
+
+ incrementTxnNumber();
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 40},
+ update: {$set: {x: 1}},
+ new: true,
+ upsert: true,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
+ };
+
+ assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'i', 'o._id': 40});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ assert.eq(null, firstDoc.preImageTs);
+ assert.eq(null, firstDoc.postImageTs);
+
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ var lastTs = firstDoc.ts;
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (in-place update, return pre-image)
+
+ incrementTxnNumber();
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 40},
+ update: {$inc: {x: 1}},
+ new: false,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
+ };
+
+ var beforeDoc = mainConn.getDB('test').user.findOne({_id: 40});
+ var res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ assert.eq(null, firstDoc.postImageTs);
+
+ var savedDoc = oplog.findOne(
+ {ns: 'test.user', op: 'n', ts: firstDoc.preImageOpTime.ts, t: firstDoc.preImageOpTime.t});
+ assert.eq(beforeDoc, savedDoc.o);
+
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ lastTs = firstDoc.ts;
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (in-place update, return post-image)
+
+ incrementTxnNumber();
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 40},
+ update: {$inc: {x: 1}},
+ new: true,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
};
- var replTest = new ReplSetTest({nodes: kNodes});
- replTest.startSet();
- replTest.initiate();
+ res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+ var afterDoc = mainConn.getDB('test').user.findOne({_id: 40});
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ assert.eq(null, firstDoc.preImageTs);
+
+ savedDoc = oplog.findOne(
+ {ns: 'test.user', op: 'n', ts: firstDoc.postImageOpTime.ts, t: firstDoc.postImageOpTime.t});
+ assert.eq(afterDoc, savedDoc.o);
+
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ lastTs = firstDoc.ts;
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (replacement update, return pre-image)
+
+ incrementTxnNumber();
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 40},
+ update: {y: 1},
+ new: false,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
+ };
+
+ beforeDoc = mainConn.getDB('test').user.findOne({_id: 40});
+ res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ assert.eq(null, firstDoc.postImageTs);
+
+ savedDoc = oplog.findOne(
+ {ns: 'test.user', op: 'n', ts: firstDoc.preImageOpTime.ts, t: firstDoc.preImageOpTime.t});
+ assert.eq(beforeDoc, savedDoc.o);
+
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ lastTs = firstDoc.ts;
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (replacement update, return post-image)
+
+ incrementTxnNumber();
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 40},
+ update: {z: 1},
+ new: true,
+ upsert: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
+ };
+
+ res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+ afterDoc = mainConn.getDB('test').user.findOne({_id: 40});
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'u', 'o2._id': 40, ts: {$gt: lastTs}});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ assert.eq(null, firstDoc.preImageTs);
+
+ savedDoc = oplog.findOne(
+ {ns: 'test.user', op: 'n', ts: firstDoc.postImageOpTime.ts, t: firstDoc.postImageOpTime.t});
+ assert.eq(afterDoc, savedDoc.o);
+
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ lastTs = firstDoc.ts;
+
+ ////////////////////////////////////////////////////////////////////////
+ // Test findAndModify command (remove, return pre-image)
+
+ incrementTxnNumber();
+ cmd = {
+ findAndModify: 'user',
+ query: {_id: 40},
+ remove: true,
+ new: false,
+ lsid: {id: lsid},
+ txnNumber: txnNumber,
+ writeConcern: {w: kNodes},
+ };
+
+ beforeDoc = mainConn.getDB('test').user.findOne({_id: 40});
+ res = assert.commandWorked(mainConn.getDB('test').runCommand(cmd));
+
+ firstDoc = oplog.findOne({ns: 'test.user', op: 'd', 'o._id': 40, ts: {$gt: lastTs}});
+ checkOplog(firstDoc, lsid, uid, txnNumber, 0, Timestamp(0, 0), -1);
+
+ assert.eq(null, firstDoc.postImageTs);
+
+ savedDoc = oplog.findOne(
+ {ns: 'test.user', op: 'n', ts: firstDoc.preImageOpTime.ts, t: firstDoc.preImageOpTime.t});
+ assert.eq(beforeDoc, savedDoc.o);
+
+ checkSessionCatalog(priConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ checkSessionCatalog(secConn, lsid, uid, txnNumber, firstDoc.ts, firstDoc.t);
+ lastTs = firstDoc.ts;
+};
- var priConn = replTest.getPrimary();
- var secConn = replTest.getSecondary();
- secConn.setSlaveOk(true);
+var replTest = new ReplSetTest({nodes: kNodes});
+replTest.startSet();
+replTest.initiate();
- runTests(priConn, priConn, secConn);
+var priConn = replTest.getPrimary();
+var secConn = replTest.getSecondary();
+secConn.setSlaveOk(true);
- replTest.stopSet();
+runTests(priConn, priConn, secConn);
- var st = new ShardingTest({shards: {rs0: {nodes: kNodes}}});
+replTest.stopSet();
- secConn = st.rs0.getSecondary();
- secConn.setSlaveOk(true);
- runTests(st.s, st.rs0.getPrimary(), secConn);
+var st = new ShardingTest({shards: {rs0: {nodes: kNodes}}});
- st.stop();
+secConn = st.rs0.getSecondary();
+secConn.setSlaveOk(true);
+runTests(st.s, st.rs0.getPrimary(), secConn);
+st.stop();
})();
diff --git a/jstests/sharding/sessions_collection_auto_healing.js b/jstests/sharding/sessions_collection_auto_healing.js
index 8d0c91f9d85..6efb9cf2274 100644
--- a/jstests/sharding/sessions_collection_auto_healing.js
+++ b/jstests/sharding/sessions_collection_auto_healing.js
@@ -1,164 +1,163 @@
load('jstests/libs/sessions_collection.js');
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
- var st = new ShardingTest({shards: 0});
- var configSvr = st.configRS.getPrimary();
- var configAdmin = configSvr.getDB("admin");
+var st = new ShardingTest({shards: 0});
+var configSvr = st.configRS.getPrimary();
+var configAdmin = configSvr.getDB("admin");
- var mongos = st.s;
- var mongosAdmin = mongos.getDB("admin");
- var mongosConfig = mongos.getDB("config");
+var mongos = st.s;
+var mongosAdmin = mongos.getDB("admin");
+var mongosConfig = mongos.getDB("config");
- // Test that we can use sessions on the config server before we add any shards.
- {
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(mongos, false, false);
+// Test that we can use sessions on the config server before we add any shards.
+{
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(mongos, false, false);
- assert.commandWorked(configAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(configAdmin.runCommand({startSession: 1}));
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(mongos, false, false);
- }
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(mongos, false, false);
+}
- // Test that we can use sessions on a mongos before we add any shards.
- {
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(mongos, false, false);
+// Test that we can use sessions on a mongos before we add any shards.
+{
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(mongos, false, false);
- assert.commandWorked(mongosAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(mongosAdmin.runCommand({startSession: 1}));
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(mongos, false, false);
- }
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(mongos, false, false);
+}
- // Test that the config server does not create the sessions collection
- // if there are not any shards.
- {
- assert.eq(mongosConfig.shards.count(), 0);
+// Test that the config server does not create the sessions collection
+// if there are not any shards.
+{
+ assert.eq(mongosConfig.shards.count(), 0);
- assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(configSvr, false, false);
- }
+ validateSessionsCollection(configSvr, false, false);
+}
- // Test-wide: add a shard
- var rs = new ReplSetTest({nodes: 1});
- rs.startSet({shardsvr: ""});
- rs.initiate();
+// Test-wide: add a shard
+var rs = new ReplSetTest({nodes: 1});
+rs.startSet({shardsvr: ""});
+rs.initiate();
- var shard = rs.getPrimary();
- var shardAdmin = shard.getDB("admin");
- var shardConfig = shard.getDB("config");
+var shard = rs.getPrimary();
+var shardAdmin = shard.getDB("admin");
+var shardConfig = shard.getDB("config");
- // Test that we can add this shard, even with a local config.system.sessions collection,
- // and test that we drop its local collection
- {
- shardConfig.system.sessions.insert({"hey": "you"});
- validateSessionsCollection(shard, true, false);
+// Test that we can add this shard, even with a local config.system.sessions collection,
+// and test that we drop its local collection
+{
+ shardConfig.system.sessions.insert({"hey": "you"});
+ validateSessionsCollection(shard, true, false);
- assert.commandWorked(mongosAdmin.runCommand({addShard: rs.getURL()}));
- assert.eq(mongosConfig.shards.count(), 1);
- validateSessionsCollection(shard, false, false);
- }
+ assert.commandWorked(mongosAdmin.runCommand({addShard: rs.getURL()}));
+ assert.eq(mongosConfig.shards.count(), 1);
+ validateSessionsCollection(shard, false, false);
+}
- // Test that we can use sessions on a shard before the sessions collection
- // is set up by the config servers.
- {
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(shard, false, false);
+// Test that we can use sessions on a shard before the sessions collection
+// is set up by the config servers.
+{
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
- assert.commandWorked(shardAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(shardAdmin.runCommand({startSession: 1}));
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(shard, false, false);
- }
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
+}
- // Test that we can use sessions from a mongos before the sessions collection
- // is set up by the config servers.
- {
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(shard, false, false);
- validateSessionsCollection(mongos, false, false);
+// Test that we can use sessions from a mongos before the sessions collection
+// is set up by the config servers.
+{
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
+ validateSessionsCollection(mongos, false, false);
- assert.commandWorked(mongosAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(mongosAdmin.runCommand({startSession: 1}));
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(shard, false, false);
- validateSessionsCollection(mongos, false, false);
- }
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
+ validateSessionsCollection(mongos, false, false);
+}
- // Test that if we do a refresh (write) from a shard server while there
- // is no sessions collection, it does not create the sessions collection.
- {
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(shard, false, false);
+// Test that if we do a refresh (write) from a shard server while there
+// is no sessions collection, it does not create the sessions collection.
+{
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
- assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(configSvr, false, false);
- validateSessionsCollection(shard, false, false);
- }
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
+}
- // Test that a refresh on the config servers once there are shards creates
- // the sessions collection on a shard.
- {
- validateSessionsCollection(shard, false, false);
+// Test that a refresh on the config servers once there are shards creates
+// the sessions collection on a shard.
+{
+ validateSessionsCollection(shard, false, false);
- assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(shard, true, true);
+ validateSessionsCollection(shard, true, true);
- // We will have two sessions because of the session used in the shardCollection's retryable
- // write to shard the sessions collection. It will disappear after we run the refresh
- // function on the shard.
- assert.eq(shardConfig.system.sessions.count(), 2, "did not flush config's sessions");
+ // We will have two sessions because of the session used in the shardCollection's retryable
+ // write to shard the sessions collection. It will disappear after we run the refresh
+ // function on the shard.
+ assert.eq(shardConfig.system.sessions.count(), 2, "did not flush config's sessions");
- // Now, if we do refreshes on the other servers, their in-mem records will
- // be written to the collection.
- assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- assert.eq(shardConfig.system.sessions.count(), 2, "did not flush shard's sessions");
+ // Now, if we do refreshes on the other servers, their in-mem records will
+ // be written to the collection.
+ assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.eq(shardConfig.system.sessions.count(), 2, "did not flush shard's sessions");
- assert.commandWorked(mongosAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- assert.eq(shardConfig.system.sessions.count(), 4, "did not flush mongos' sessions");
- }
+ assert.commandWorked(mongosAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.eq(shardConfig.system.sessions.count(), 4, "did not flush mongos' sessions");
+}
- // Test that if we drop the index on the sessions collection, only a refresh on the config
- // server heals it.
- {
- assert.commandWorked(shardConfig.system.sessions.dropIndex({lastUse: 1}));
+// Test that if we drop the index on the sessions collection, only a refresh on the config
+// server heals it.
+{
+ assert.commandWorked(shardConfig.system.sessions.dropIndex({lastUse: 1}));
- validateSessionsCollection(shard, true, false);
+ validateSessionsCollection(shard, true, false);
- assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(shard, true, true);
+ assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(shard, true, true);
- assert.commandWorked(shardConfig.system.sessions.dropIndex({lastUse: 1}));
+ assert.commandWorked(shardConfig.system.sessions.dropIndex({lastUse: 1}));
- assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(shard, true, false);
- }
+ assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(shard, true, false);
+}
- // Test that if we drop the collection, it will be recreated only by the config server.
- {
- assertDropCollection(mongosConfig, "system.sessions");
- validateSessionsCollection(shard, false, false);
+// Test that if we drop the collection, it will be recreated only by the config server.
+{
+ assertDropCollection(mongosConfig, "system.sessions");
+ validateSessionsCollection(shard, false, false);
- assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(shard, false, false);
+ assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(shard, false, false);
- assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(shard, true, true);
- }
-
- st.stop();
- rs.stopSet();
+ assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(shard, true, true);
+}
+st.stop();
+rs.stopSet();
})();
diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js
index a5dfd4aca1c..5d74e86728a 100644
--- a/jstests/sharding/shard1.js
+++ b/jstests/sharding/shard1.js
@@ -1,47 +1,50 @@
/**
-* this tests some of the ground work
-*/
+ * this tests some of the ground work
+ */
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2});
- var db = s.getDB("test");
+var s = new ShardingTest({shards: 2});
+var db = s.getDB("test");
- assert.writeOK(db.foo.insert({num: 1, name: "eliot"}));
- assert.writeOK(db.foo.insert({num: 2, name: "sara"}));
- assert.writeOK(db.foo.insert({num: -1, name: "joe"}));
+assert.writeOK(db.foo.insert({num: 1, name: "eliot"}));
+assert.writeOK(db.foo.insert({num: 2, name: "sara"}));
+assert.writeOK(db.foo.insert({num: -1, name: "joe"}));
- assert.commandWorked(db.foo.ensureIndex({num: 1}));
+assert.commandWorked(db.foo.ensureIndex({num: 1}));
- assert.eq(3, db.foo.find().length(), "A");
+assert.eq(3, db.foo.find().length(), "A");
- const shardCommand = {shardcollection: "test.foo", key: {num: 1}};
+const shardCommand = {
+ shardcollection: "test.foo",
+ key: {num: 1}
+};
- assert.commandFailed(s.s0.adminCommand(shardCommand));
+assert.commandFailed(s.s0.adminCommand(shardCommand));
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.eq(3, db.foo.find().length(), "after partitioning count failed");
+assert.eq(3, db.foo.find().length(), "after partitioning count failed");
- assert.commandWorked(s.s0.adminCommand(shardCommand));
- assert.commandFailed(s.s0.adminCommand({shardCollection: 'test', key: {x: 1}}));
- assert.commandFailed(s.s0.adminCommand({shardCollection: '.foo', key: {x: 1}}));
+assert.commandWorked(s.s0.adminCommand(shardCommand));
+assert.commandFailed(s.s0.adminCommand({shardCollection: 'test', key: {x: 1}}));
+assert.commandFailed(s.s0.adminCommand({shardCollection: '.foo', key: {x: 1}}));
- var cconfig = s.config.collections.findOne({_id: "test.foo"});
- assert(cconfig, "No collection entry found for test.foo");
+var cconfig = s.config.collections.findOne({_id: "test.foo"});
+assert(cconfig, "No collection entry found for test.foo");
- delete cconfig.lastmod;
- delete cconfig.dropped;
- delete cconfig.lastmodEpoch;
- delete cconfig.uuid;
+delete cconfig.lastmod;
+delete cconfig.dropped;
+delete cconfig.lastmodEpoch;
+delete cconfig.uuid;
- assert.eq(cconfig, {_id: "test.foo", key: {num: 1}, unique: false}, "Sharded content mismatch");
+assert.eq(cconfig, {_id: "test.foo", key: {num: 1}, unique: false}, "Sharded content mismatch");
- s.config.collections.find().forEach(printjson);
+s.config.collections.find().forEach(printjson);
- assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "num chunks A");
- assert.eq(3, db.foo.find().length(), "after sharding, no split count failed");
+assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "num chunks A");
+assert.eq(3, db.foo.find().length(), "after sharding, no split count failed");
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index 527fad07be5..fd8d8657af6 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -1,240 +1,225 @@
(function() {
- 'use strict';
-
- function placeCheck(num) {
- print("shard2 step: " + num);
- }
-
- function printAll() {
- print("****************");
- db.foo.find().forEach(printjsononeline);
- print("++++++++++++++++++");
- primary.foo.find().forEach(printjsononeline);
- print("++++++++++++++++++");
- secondary.foo.find().forEach(printjsononeline);
- print("---------------------");
- }
-
- var s = new ShardingTest({shards: 2});
- var db = s.getDB("test");
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
- assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check 1");
-
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 0}}));
- assert.eq(2, s.config.chunks.count({"ns": "test.foo"}), "should be 2 shards");
- var chunks = s.config.chunks.find({"ns": "test.foo"}).toArray();
- assert.eq(chunks[0].shard, chunks[1].shard, "server should be the same after a split");
-
- assert.writeOK(db.foo.save({num: 1, name: "eliot"}));
- assert.writeOK(db.foo.save({num: 2, name: "sara"}));
- assert.writeOK(db.foo.save({num: -1, name: "joe"}));
-
- assert.eq(3,
- s.getPrimaryShard("test").getDB("test").foo.find().length(),
- "not right directly to db A");
- assert.eq(3, db.foo.find().length(), "not right on shard");
-
- var primary = s.getPrimaryShard("test").getDB("test");
- var secondary = s.getOther(primary).getDB("test");
-
- assert.eq(3, primary.foo.find().length(), "primary wrong B");
- assert.eq(0, secondary.foo.find().length(), "secondary wrong C");
- assert.eq(3, db.foo.find().sort({num: 1}).length());
-
- placeCheck(2);
-
- // Test move shard to unexisting shard
- assert.commandFailedWithCode(
- s.s0.adminCommand(
- {movechunk: "test.foo", find: {num: 1}, to: "adasd", _waitForDelete: true}),
- ErrorCodes.ShardNotFound);
-
- assert.commandWorked(s.s0.adminCommand({
- movechunk: "test.foo",
- find: {num: 1},
- to: secondary.getMongo().name,
- _waitForDelete: true
- }));
- assert.eq(2, secondary.foo.find().length(), "secondary should have 2 after move shard");
- assert.eq(1, primary.foo.find().length(), "primary should only have 1 after move shard");
-
- assert.eq(2,
- s.config.chunks.count({"ns": "test.foo"}),
- "still should have 2 shards after move not:" + s.getChunksString());
- var chunks = s.config.chunks.find({"ns": "test.foo"}).toArray();
- assert.neq(chunks[0].shard, chunks[1].shard, "servers should NOT be the same after the move");
-
- placeCheck(3);
-
- // Test inserts go to right server/shard
- assert.writeOK(db.foo.save({num: 3, name: "bob"}));
- assert.eq(1, primary.foo.find().length(), "after move insert go wrong place?");
- assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
-
- assert.writeOK(db.foo.save({num: -2, name: "funny man"}));
- assert.eq(2, primary.foo.find().length(), "after move insert go wrong place?");
- assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
-
- assert.writeOK(db.foo.save({num: 0, name: "funny guy"}));
- assert.eq(2, primary.foo.find().length(), "boundary A");
- assert.eq(4, secondary.foo.find().length(), "boundary B");
-
- placeCheck(4);
-
- // findOne
- assert.eq("eliot", db.foo.findOne({num: 1}).name);
- assert.eq("funny man", db.foo.findOne({num: -2}).name);
-
- // getAll
- function sumQuery(c) {
- var sum = 0;
- c.toArray().forEach(function(z) {
- sum += z.num;
- });
- return sum;
- }
- assert.eq(6, db.foo.find().length(), "sharded query 1");
- assert.eq(3, sumQuery(db.foo.find()), "sharded query 2");
+'use strict';
+
+function placeCheck(num) {
+ print("shard2 step: " + num);
+}
+
+function printAll() {
+ print("****************");
+ db.foo.find().forEach(printjsononeline);
+ print("++++++++++++++++++");
+ primary.foo.find().forEach(printjsononeline);
+ print("++++++++++++++++++");
+ secondary.foo.find().forEach(printjsononeline);
+ print("---------------------");
+}
+
+var s = new ShardingTest({shards: 2});
+var db = s.getDB("test");
+
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
+assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check 1");
+
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 0}}));
+assert.eq(2, s.config.chunks.count({"ns": "test.foo"}), "should be 2 shards");
+var chunks = s.config.chunks.find({"ns": "test.foo"}).toArray();
+assert.eq(chunks[0].shard, chunks[1].shard, "server should be the same after a split");
+
+assert.writeOK(db.foo.save({num: 1, name: "eliot"}));
+assert.writeOK(db.foo.save({num: 2, name: "sara"}));
+assert.writeOK(db.foo.save({num: -1, name: "joe"}));
+
+assert.eq(
+ 3, s.getPrimaryShard("test").getDB("test").foo.find().length(), "not right directly to db A");
+assert.eq(3, db.foo.find().length(), "not right on shard");
+
+var primary = s.getPrimaryShard("test").getDB("test");
+var secondary = s.getOther(primary).getDB("test");
+
+assert.eq(3, primary.foo.find().length(), "primary wrong B");
+assert.eq(0, secondary.foo.find().length(), "secondary wrong C");
+assert.eq(3, db.foo.find().sort({num: 1}).length());
+
+placeCheck(2);
+
+// Test move shard to unexisting shard
+assert.commandFailedWithCode(
+ s.s0.adminCommand({movechunk: "test.foo", find: {num: 1}, to: "adasd", _waitForDelete: true}),
+ ErrorCodes.ShardNotFound);
+
+assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {num: 1}, to: secondary.getMongo().name, _waitForDelete: true}));
+assert.eq(2, secondary.foo.find().length(), "secondary should have 2 after move shard");
+assert.eq(1, primary.foo.find().length(), "primary should only have 1 after move shard");
+
+assert.eq(2,
+ s.config.chunks.count({"ns": "test.foo"}),
+ "still should have 2 shards after move not:" + s.getChunksString());
+var chunks = s.config.chunks.find({"ns": "test.foo"}).toArray();
+assert.neq(chunks[0].shard, chunks[1].shard, "servers should NOT be the same after the move");
+
+placeCheck(3);
+
+// Test inserts go to right server/shard
+assert.writeOK(db.foo.save({num: 3, name: "bob"}));
+assert.eq(1, primary.foo.find().length(), "after move insert go wrong place?");
+assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
+
+assert.writeOK(db.foo.save({num: -2, name: "funny man"}));
+assert.eq(2, primary.foo.find().length(), "after move insert go wrong place?");
+assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
+
+assert.writeOK(db.foo.save({num: 0, name: "funny guy"}));
+assert.eq(2, primary.foo.find().length(), "boundary A");
+assert.eq(4, secondary.foo.find().length(), "boundary B");
+
+placeCheck(4);
+
+// findOne
+assert.eq("eliot", db.foo.findOne({num: 1}).name);
+assert.eq("funny man", db.foo.findOne({num: -2}).name);
+
+// getAll
+function sumQuery(c) {
+ var sum = 0;
+ c.toArray().forEach(function(z) {
+ sum += z.num;
+ });
+ return sum;
+}
+assert.eq(6, db.foo.find().length(), "sharded query 1");
+assert.eq(3, sumQuery(db.foo.find()), "sharded query 2");
- placeCheck(5);
+placeCheck(5);
- // sort by num
+// sort by num
- assert.eq(3, sumQuery(db.foo.find().sort({num: 1})), "sharding query w/sort 1");
- assert.eq(3, sumQuery(db.foo.find().sort({num: -1})), "sharding query w/sort 2");
+assert.eq(3, sumQuery(db.foo.find().sort({num: 1})), "sharding query w/sort 1");
+assert.eq(3, sumQuery(db.foo.find().sort({num: -1})), "sharding query w/sort 2");
- assert.eq(
- "funny man", db.foo.find().sort({num: 1})[0].name, "sharding query w/sort 3 order wrong");
- assert.eq(-2, db.foo.find().sort({num: 1})[0].num, "sharding query w/sort 4 order wrong");
+assert.eq("funny man", db.foo.find().sort({num: 1})[0].name, "sharding query w/sort 3 order wrong");
+assert.eq(-2, db.foo.find().sort({num: 1})[0].num, "sharding query w/sort 4 order wrong");
- assert.eq("bob", db.foo.find().sort({num: -1})[0].name, "sharding query w/sort 5 order wrong");
- assert.eq(3, db.foo.find().sort({num: -1})[0].num, "sharding query w/sort 6 order wrong");
+assert.eq("bob", db.foo.find().sort({num: -1})[0].name, "sharding query w/sort 5 order wrong");
+assert.eq(3, db.foo.find().sort({num: -1})[0].num, "sharding query w/sort 6 order wrong");
- placeCheck(6);
+placeCheck(6);
- // Sort by name
- function getNames(c) {
- return c.toArray().map(function(z) {
- return z.name;
- });
- }
- var correct = getNames(db.foo.find()).sort();
- assert.eq(correct, getNames(db.foo.find().sort({name: 1})));
- correct = correct.reverse();
- assert.eq(correct, getNames(db.foo.find().sort({name: -1})));
-
- assert.eq(3, sumQuery(db.foo.find().sort({name: 1})), "sharding query w/non-shard sort 1");
- assert.eq(3, sumQuery(db.foo.find().sort({name: -1})), "sharding query w/non-shard sort 2");
-
- // sort by num multiple shards per server
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 2}}));
- assert.eq("funny man",
- db.foo.find().sort({num: 1})[0].name,
- "sharding query w/sort and another split 1 order wrong");
- assert.eq("bob",
- db.foo.find().sort({num: -1})[0].name,
- "sharding query w/sort and another split 2 order wrong");
- assert.eq("funny man",
- db.foo.find({num: {$lt: 100}}).sort({num: 1}).arrayAccess(0).name,
- "sharding query w/sort and another split 3 order wrong");
-
- placeCheck(7);
-
- db.foo.find().sort({_id: 1}).forEach(function(z) {
- print(z._id);
+// Sort by name
+function getNames(c) {
+ return c.toArray().map(function(z) {
+ return z.name;
});
-
- var zzz = db.foo.find().explain("executionStats").executionStats;
- assert.eq(0, zzz.totalKeysExamined, "EX1a");
- assert.eq(6, zzz.nReturned, "EX1b");
- assert.eq(6, zzz.totalDocsExamined, "EX1c");
-
- zzz = db.foo.find().hint({_id: 1}).sort({_id: 1}).explain("executionStats").executionStats;
- assert.eq(6, zzz.totalKeysExamined, "EX2a");
- assert.eq(6, zzz.nReturned, "EX2b");
- assert.eq(6, zzz.totalDocsExamined, "EX2c");
-
- // getMore
- assert.eq(4, db.foo.find().limit(-4).toArray().length, "getMore 1");
- function countCursor(c) {
- var num = 0;
- while (c.hasNext()) {
- c.next();
- num++;
- }
- return num;
+}
+var correct = getNames(db.foo.find()).sort();
+assert.eq(correct, getNames(db.foo.find().sort({name: 1})));
+correct = correct.reverse();
+assert.eq(correct, getNames(db.foo.find().sort({name: -1})));
+
+assert.eq(3, sumQuery(db.foo.find().sort({name: 1})), "sharding query w/non-shard sort 1");
+assert.eq(3, sumQuery(db.foo.find().sort({name: -1})), "sharding query w/non-shard sort 2");
+
+// sort by num multiple shards per server
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 2}}));
+assert.eq("funny man",
+ db.foo.find().sort({num: 1})[0].name,
+ "sharding query w/sort and another split 1 order wrong");
+assert.eq("bob",
+ db.foo.find().sort({num: -1})[0].name,
+ "sharding query w/sort and another split 2 order wrong");
+assert.eq("funny man",
+ db.foo.find({num: {$lt: 100}}).sort({num: 1}).arrayAccess(0).name,
+ "sharding query w/sort and another split 3 order wrong");
+
+placeCheck(7);
+
+db.foo.find().sort({_id: 1}).forEach(function(z) {
+ print(z._id);
+});
+
+var zzz = db.foo.find().explain("executionStats").executionStats;
+assert.eq(0, zzz.totalKeysExamined, "EX1a");
+assert.eq(6, zzz.nReturned, "EX1b");
+assert.eq(6, zzz.totalDocsExamined, "EX1c");
+
+zzz = db.foo.find().hint({_id: 1}).sort({_id: 1}).explain("executionStats").executionStats;
+assert.eq(6, zzz.totalKeysExamined, "EX2a");
+assert.eq(6, zzz.nReturned, "EX2b");
+assert.eq(6, zzz.totalDocsExamined, "EX2c");
+
+// getMore
+assert.eq(4, db.foo.find().limit(-4).toArray().length, "getMore 1");
+function countCursor(c) {
+ var num = 0;
+ while (c.hasNext()) {
+ c.next();
+ num++;
}
- assert.eq(6, countCursor(db.foo.find()._exec()), "getMore 2");
- assert.eq(6, countCursor(db.foo.find().batchSize(1)._exec()), "getMore 3");
-
- // find by non-shard-key
- db.foo.find().forEach(function(z) {
- var y = db.foo.findOne({_id: z._id});
- assert(y, "_id check 1 : " + tojson(z));
- assert.eq(z.num, y.num, "_id check 2 : " + tojson(z));
- });
+ return num;
+}
+assert.eq(6, countCursor(db.foo.find()._exec()), "getMore 2");
+assert.eq(6, countCursor(db.foo.find().batchSize(1)._exec()), "getMore 3");
+
+// find by non-shard-key
+db.foo.find().forEach(function(z) {
+ var y = db.foo.findOne({_id: z._id});
+ assert(y, "_id check 1 : " + tojson(z));
+ assert.eq(z.num, y.num, "_id check 2 : " + tojson(z));
+});
- // update
- var person = db.foo.findOne({num: 3});
- assert.eq("bob", person.name, "update setup 1");
- person.name = "bob is gone";
- db.foo.update({num: 3}, person);
- person = db.foo.findOne({num: 3});
- assert.eq("bob is gone", person.name, "update test B");
+// update
+var person = db.foo.findOne({num: 3});
+assert.eq("bob", person.name, "update setup 1");
+person.name = "bob is gone";
+db.foo.update({num: 3}, person);
+person = db.foo.findOne({num: 3});
+assert.eq("bob is gone", person.name, "update test B");
- // remove
- assert(db.foo.findOne({num: 3}) != null, "remove test A");
- db.foo.remove({num: 3});
- assert.isnull(db.foo.findOne({num: 3}), "remove test B");
+// remove
+assert(db.foo.findOne({num: 3}) != null, "remove test A");
+db.foo.remove({num: 3});
+assert.isnull(db.foo.findOne({num: 3}), "remove test B");
- db.foo.save({num: 3, name: "eliot2"});
- person = db.foo.findOne({num: 3});
- assert(person, "remove test C");
- assert.eq(person.name, "eliot2");
+db.foo.save({num: 3, name: "eliot2"});
+person = db.foo.findOne({num: 3});
+assert(person, "remove test C");
+assert.eq(person.name, "eliot2");
- db.foo.remove({_id: person._id});
- assert.isnull(db.foo.findOne({num: 3}), "remove test E");
+db.foo.remove({_id: person._id});
+assert.isnull(db.foo.findOne({num: 3}), "remove test E");
- placeCheck(8);
+placeCheck(8);
- // more update stuff
+// more update stuff
- printAll();
- var total = db.foo.find().count();
- var res = assert.writeOK(db.foo.update({}, {$inc: {x: 1}}, false, true));
- printAll();
- assert.eq(total, res.nModified, res.toString());
+printAll();
+var total = db.foo.find().count();
+var res = assert.writeOK(db.foo.update({}, {$inc: {x: 1}}, false, true));
+printAll();
+assert.eq(total, res.nModified, res.toString());
- res = db.foo.update({num: -1}, {$inc: {x: 1}}, false, true);
- assert.eq(1, res.nModified, res.toString());
+res = db.foo.update({num: -1}, {$inc: {x: 1}}, false, true);
+assert.eq(1, res.nModified, res.toString());
- // ---- move all to the secondary
+// ---- move all to the secondary
- assert.eq(2, s.onNumShards("foo"), "on 2 shards");
+assert.eq(2, s.onNumShards("foo"), "on 2 shards");
- secondary.foo.insert({num: -3});
+secondary.foo.insert({num: -3});
- assert.commandWorked(s.s0.adminCommand({
- movechunk: "test.foo",
- find: {num: -2},
- to: secondary.getMongo().name,
- _waitForDelete: true
- }));
- assert.eq(1, s.onNumShards("foo"), "on 1 shards");
+assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {num: -2}, to: secondary.getMongo().name, _waitForDelete: true}));
+assert.eq(1, s.onNumShards("foo"), "on 1 shards");
- assert.commandWorked(s.s0.adminCommand({
- movechunk: "test.foo",
- find: {num: -2},
- to: primary.getMongo().name,
- _waitForDelete: true
- }));
- assert.eq(2, s.onNumShards("foo"), "on 2 shards again");
- assert.eq(3, s.config.chunks.count({"ns": "test.foo"}), "only 3 chunks");
+assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {num: -2}, to: primary.getMongo().name, _waitForDelete: true}));
+assert.eq(2, s.onNumShards("foo"), "on 2 shards again");
+assert.eq(3, s.config.chunks.count({"ns": "test.foo"}), "only 3 chunks");
- print("YO : " + tojson(db.runCommand("serverStatus")));
+print("YO : " + tojson(db.runCommand("serverStatus")));
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index b0a2c0c72c8..d0957a1c45d 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -1,195 +1,194 @@
(function() {
- // Include helpers for analyzing explain output.
- load("jstests/libs/analyze_plan.js");
+// Include helpers for analyzing explain output.
+load("jstests/libs/analyze_plan.js");
- var s = new ShardingTest({name: "shard3", shards: 2, mongos: 2, other: {enableBalancer: true}});
+var s = new ShardingTest({name: "shard3", shards: 2, mongos: 2, other: {enableBalancer: true}});
- s2 = s._mongos[1];
+s2 = s._mongos[1];
- db = s.getDB("test");
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
+db = s.getDB("test");
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
- // Ensure that the second mongos will see the movePrimary
- s.configRS.awaitLastOpCommitted();
+// Ensure that the second mongos will see the movePrimary
+s.configRS.awaitLastOpCommitted();
- assert(sh.getBalancerState(), "A1");
+assert(sh.getBalancerState(), "A1");
- s.stopBalancer();
- assert(!sh.getBalancerState(), "A2");
+s.stopBalancer();
+assert(!sh.getBalancerState(), "A2");
- s.startBalancer();
- assert(sh.getBalancerState(), "A3");
+s.startBalancer();
+assert(sh.getBalancerState(), "A3");
- s.stopBalancer();
- assert(!sh.getBalancerState(), "A4");
+s.stopBalancer();
+assert(!sh.getBalancerState(), "A4");
- s.config.databases.find().forEach(printjson);
+s.config.databases.find().forEach(printjson);
- a = s.getDB("test").foo;
- b = s2.getDB("test").foo;
+a = s.getDB("test").foo;
+b = s2.getDB("test").foo;
- primary = s.getPrimaryShard("test").getDB("test").foo;
- secondary = s.getOther(primary.name).getDB("test").foo;
+primary = s.getPrimaryShard("test").getDB("test").foo;
+secondary = s.getOther(primary.name).getDB("test").foo;
- a.save({num: 1});
- a.save({num: 2});
- a.save({num: 3});
+a.save({num: 1});
+a.save({num: 2});
+a.save({num: 3});
- assert.eq(3, a.find().toArray().length, "normal A");
- assert.eq(3, b.find().toArray().length, "other A");
+assert.eq(3, a.find().toArray().length, "normal A");
+assert.eq(3, b.find().toArray().length, "other A");
- assert.eq(3, primary.count(), "p1");
- assert.eq(0, secondary.count(), "s1");
-
- assert.eq(1, s.onNumShards("foo"), "on 1 shards");
+assert.eq(3, primary.count(), "p1");
+assert.eq(0, secondary.count(), "s1");
+
+assert.eq(1, s.onNumShards("foo"), "on 1 shards");
- s.adminCommand({split: "test.foo", middle: {num: 2}});
- s.adminCommand({
- movechunk: "test.foo",
- find: {num: 3},
- to: s.getOther(s.getPrimaryShard("test")).name,
- _waitForDelete: true
- });
-
- assert(primary.find().toArray().length > 0, "blah 1");
- assert(secondary.find().toArray().length > 0, "blah 2");
- assert.eq(3, primary.find().itcount() + secondary.find().itcount(), "blah 3");
-
- assert.eq(3, a.find().toArray().length, "normal B");
- assert.eq(3, b.find().toArray().length, "other B");
-
- printjson(primary._db._adminCommand("shardingState"));
-
- // --- filtering ---
-
- function doCounts(name, total, onlyItCounts) {
- total = total || (primary.count() + secondary.count());
- if (!onlyItCounts)
- assert.eq(total, a.count(), name + " count");
- assert.eq(total, a.find().sort({n: 1}).itcount(), name + " itcount - sort n");
- assert.eq(total, a.find().itcount(), name + " itcount");
- assert.eq(total, a.find().sort({_id: 1}).itcount(), name + " itcount - sort _id");
- return total;
- }
-
- var total = doCounts("before wrong save");
- assert.writeOK(secondary.insert({_id: 111, num: -3}));
- doCounts("after wrong save", total, true);
- e = a.find().explain("executionStats").executionStats;
- assert.eq(3, e.nReturned, "ex1");
- assert.eq(0, e.totalKeysExamined, "ex2");
- assert.eq(4, e.totalDocsExamined, "ex3");
-
- var chunkSkips = 0;
- for (var shard in e.executionStages.shards) {
- var theShard = e.executionStages.shards[shard];
- chunkSkips += getChunkSkips(theShard.executionStages);
- }
- assert.eq(1, chunkSkips, "ex4");
-
- // SERVER-4612
- // make sure idhack obeys chunks
- x = a.findOne({_id: 111});
- assert(!x, "idhack didn't obey chunk boundaries " + tojson(x));
-
- // --- move all to 1 ---
- print("MOVE ALL TO 1");
-
- assert.eq(2, s.onNumShards("foo"), "on 2 shards");
- s.printCollectionInfo("test.foo");
-
- assert(a.findOne({num: 1}));
- assert(b.findOne({num: 1}));
-
- print("GOING TO MOVE");
- assert(a.findOne({num: 1}), "pre move 1");
- s.printCollectionInfo("test.foo");
- myto = s.getOther(s.getPrimaryShard("test")).name;
- print("counts before move: " + tojson(s.shardCounts("foo")));
- s.adminCommand({movechunk: "test.foo", find: {num: 1}, to: myto, _waitForDelete: true});
- print("counts after move: " + tojson(s.shardCounts("foo")));
- s.printCollectionInfo("test.foo");
- assert.eq(1, s.onNumShards("foo"), "on 1 shard again");
- assert(a.findOne({num: 1}), "post move 1");
- assert(b.findOne({num: 1}), "post move 2");
-
- print("*** drop");
-
- s.printCollectionInfo("test.foo", "before drop");
- a.drop();
- s.printCollectionInfo("test.foo", "after drop");
-
- assert.eq(0, a.count(), "a count after drop");
- assert.eq(0, b.count(), "b count after drop");
-
- s.printCollectionInfo("test.foo", "after counts");
-
- assert.eq(0, primary.count(), "p count after drop");
- assert.eq(0, secondary.count(), "s count after drop");
-
- print("*** dropDatabase setup");
-
- s.printShardingStatus();
- s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
- a.save({num: 2});
- a.save({num: 3});
- s.adminCommand({split: "test.foo", middle: {num: 2}});
- s.adminCommand({
- movechunk: "test.foo",
- find: {num: 3},
- to: s.getOther(s.getPrimaryShard("test")).name,
- _waitForDelete: true
- });
- s.printShardingStatus();
-
- s.printCollectionInfo("test.foo", "after dropDatabase setup");
- doCounts("after dropDatabase setup2");
- s.printCollectionInfo("test.foo", "after dropDatabase setup3");
-
- print("*** ready to call dropDatabase");
- res = s.getDB("test").dropDatabase();
- assert.eq(1, res.ok, "dropDatabase failed : " + tojson(res));
- // Waiting for SERVER-2253
- assert.eq(0,
- s.config.databases.count({_id: "test"}),
- "database 'test' was dropped but still appears in configDB");
-
- s.printShardingStatus();
- s.printCollectionInfo("test.foo", "after dropDatabase call 1");
- assert.eq(0, doCounts("after dropDatabase called"));
-
- // ---- retry commands SERVER-1471 ----
-
- s.adminCommand({enablesharding: "test2"});
- s.ensurePrimaryShard('test2', s.shard0.shardName);
- s.adminCommand({shardcollection: "test2.foo", key: {num: 1}});
- dba = s.getDB("test2");
- dbb = s2.getDB("test2");
- dba.foo.save({num: 1});
- dba.foo.save({num: 2});
- dba.foo.save({num: 3});
-
- assert.eq(1, s.onNumShards("foo", "test2"), "B on 1 shards");
- assert.eq(3, dba.foo.count(), "Ba");
- assert.eq(3, dbb.foo.count(), "Bb");
-
- s.adminCommand({split: "test2.foo", middle: {num: 2}});
- s.adminCommand({
- movechunk: "test2.foo",
- find: {num: 3},
- to: s.getOther(s.getPrimaryShard("test2")).name,
- _waitForDelete: true
- });
-
- assert.eq(2, s.onNumShards("foo", "test2"), "B on 2 shards");
-
- x = dba.foo.stats();
- printjson(x);
- y = dbb.foo.stats();
- printjson(y);
-
- s.stop();
+s.adminCommand({split: "test.foo", middle: {num: 2}});
+s.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
+
+assert(primary.find().toArray().length > 0, "blah 1");
+assert(secondary.find().toArray().length > 0, "blah 2");
+assert.eq(3, primary.find().itcount() + secondary.find().itcount(), "blah 3");
+
+assert.eq(3, a.find().toArray().length, "normal B");
+assert.eq(3, b.find().toArray().length, "other B");
+
+printjson(primary._db._adminCommand("shardingState"));
+// --- filtering ---
+
+function doCounts(name, total, onlyItCounts) {
+ total = total || (primary.count() + secondary.count());
+ if (!onlyItCounts)
+ assert.eq(total, a.count(), name + " count");
+ assert.eq(total, a.find().sort({n: 1}).itcount(), name + " itcount - sort n");
+ assert.eq(total, a.find().itcount(), name + " itcount");
+ assert.eq(total, a.find().sort({_id: 1}).itcount(), name + " itcount - sort _id");
+ return total;
+}
+
+var total = doCounts("before wrong save");
+assert.writeOK(secondary.insert({_id: 111, num: -3}));
+doCounts("after wrong save", total, true);
+e = a.find().explain("executionStats").executionStats;
+assert.eq(3, e.nReturned, "ex1");
+assert.eq(0, e.totalKeysExamined, "ex2");
+assert.eq(4, e.totalDocsExamined, "ex3");
+
+var chunkSkips = 0;
+for (var shard in e.executionStages.shards) {
+ var theShard = e.executionStages.shards[shard];
+ chunkSkips += getChunkSkips(theShard.executionStages);
+}
+assert.eq(1, chunkSkips, "ex4");
+
+// SERVER-4612
+// make sure idhack obeys chunks
+x = a.findOne({_id: 111});
+assert(!x, "idhack didn't obey chunk boundaries " + tojson(x));
+
+// --- move all to 1 ---
+print("MOVE ALL TO 1");
+
+assert.eq(2, s.onNumShards("foo"), "on 2 shards");
+s.printCollectionInfo("test.foo");
+
+assert(a.findOne({num: 1}));
+assert(b.findOne({num: 1}));
+
+print("GOING TO MOVE");
+assert(a.findOne({num: 1}), "pre move 1");
+s.printCollectionInfo("test.foo");
+myto = s.getOther(s.getPrimaryShard("test")).name;
+print("counts before move: " + tojson(s.shardCounts("foo")));
+s.adminCommand({movechunk: "test.foo", find: {num: 1}, to: myto, _waitForDelete: true});
+print("counts after move: " + tojson(s.shardCounts("foo")));
+s.printCollectionInfo("test.foo");
+assert.eq(1, s.onNumShards("foo"), "on 1 shard again");
+assert(a.findOne({num: 1}), "post move 1");
+assert(b.findOne({num: 1}), "post move 2");
+
+print("*** drop");
+
+s.printCollectionInfo("test.foo", "before drop");
+a.drop();
+s.printCollectionInfo("test.foo", "after drop");
+
+assert.eq(0, a.count(), "a count after drop");
+assert.eq(0, b.count(), "b count after drop");
+
+s.printCollectionInfo("test.foo", "after counts");
+
+assert.eq(0, primary.count(), "p count after drop");
+assert.eq(0, secondary.count(), "s count after drop");
+
+print("*** dropDatabase setup");
+
+s.printShardingStatus();
+s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
+a.save({num: 2});
+a.save({num: 3});
+s.adminCommand({split: "test.foo", middle: {num: 2}});
+s.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
+s.printShardingStatus();
+
+s.printCollectionInfo("test.foo", "after dropDatabase setup");
+doCounts("after dropDatabase setup2");
+s.printCollectionInfo("test.foo", "after dropDatabase setup3");
+
+print("*** ready to call dropDatabase");
+res = s.getDB("test").dropDatabase();
+assert.eq(1, res.ok, "dropDatabase failed : " + tojson(res));
+// Waiting for SERVER-2253
+assert.eq(0,
+ s.config.databases.count({_id: "test"}),
+ "database 'test' was dropped but still appears in configDB");
+
+s.printShardingStatus();
+s.printCollectionInfo("test.foo", "after dropDatabase call 1");
+assert.eq(0, doCounts("after dropDatabase called"));
+
+// ---- retry commands SERVER-1471 ----
+
+s.adminCommand({enablesharding: "test2"});
+s.ensurePrimaryShard('test2', s.shard0.shardName);
+s.adminCommand({shardcollection: "test2.foo", key: {num: 1}});
+dba = s.getDB("test2");
+dbb = s2.getDB("test2");
+dba.foo.save({num: 1});
+dba.foo.save({num: 2});
+dba.foo.save({num: 3});
+
+assert.eq(1, s.onNumShards("foo", "test2"), "B on 1 shards");
+assert.eq(3, dba.foo.count(), "Ba");
+assert.eq(3, dbb.foo.count(), "Bb");
+
+s.adminCommand({split: "test2.foo", middle: {num: 2}});
+s.adminCommand({
+ movechunk: "test2.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test2")).name,
+ _waitForDelete: true
+});
+
+assert.eq(2, s.onNumShards("foo", "test2"), "B on 2 shards");
+
+x = dba.foo.stats();
+printjson(x);
+y = dbb.foo.stats();
+printjson(y);
+
+s.stop();
})();
diff --git a/jstests/sharding/shard6.js b/jstests/sharding/shard6.js
index e00833ac179..a2b60b77b98 100644
--- a/jstests/sharding/shard6.js
+++ b/jstests/sharding/shard6.js
@@ -1,120 +1,120 @@
// shard6.js
(function() {
- "use strict";
- var summary = "";
-
- var s = new ShardingTest({name: "shard6", shards: 2});
-
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.data", key: {num: 1}});
-
- var version = s.getDB("admin").runCommand({buildinfo: 1}).versionArray;
- var post32 = (version[0] > 4) || ((version[0] == 3) && (version[1] > 2));
-
- var db = s.getDB("test");
-
- function poolStats(where) {
- var total = 0;
- var msg = "poolStats " + where + " ";
- var stats = db.runCommand("connPoolStats");
- for (var h in stats.hosts) {
- if (!stats.hosts.hasOwnProperty(h)) {
- continue;
- }
- var host = stats.hosts[h];
- msg += host.created + " ";
- total += host.created;
- }
- printjson(stats.hosts);
- print("****\n" + msg + "\n*****");
- summary += msg + "\n";
- if (post32) {
- assert.eq(total, stats.totalCreated, "mismatched number of total connections created");
+"use strict";
+var summary = "";
+
+var s = new ShardingTest({name: "shard6", shards: 2});
+
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.data", key: {num: 1}});
+
+var version = s.getDB("admin").runCommand({buildinfo: 1}).versionArray;
+var post32 = (version[0] > 4) || ((version[0] == 3) && (version[1] > 2));
+
+var db = s.getDB("test");
+
+function poolStats(where) {
+ var total = 0;
+ var msg = "poolStats " + where + " ";
+ var stats = db.runCommand("connPoolStats");
+ for (var h in stats.hosts) {
+ if (!stats.hosts.hasOwnProperty(h)) {
+ continue;
}
- return total;
+ var host = stats.hosts[h];
+ msg += host.created + " ";
+ total += host.created;
+ }
+ printjson(stats.hosts);
+ print("****\n" + msg + "\n*****");
+ summary += msg + "\n";
+ if (post32) {
+ assert.eq(total, stats.totalCreated, "mismatched number of total connections created");
}
+ return total;
+}
- poolStats("at start");
+poolStats("at start");
- // we want a lot of data, so lets make a 50k string to cheat :)
- var bigString = "this is a big string. ".repeat(50000);
+// we want a lot of data, so lets make a 50k string to cheat :)
+var bigString = "this is a big string. ".repeat(50000);
- // ok, now lets insert a some data
- var num = 0;
- for (; num < 100; num++) {
- db.data.save({num: num, bigString: bigString});
- }
+// ok, now lets insert a some data
+var num = 0;
+for (; num < 100; num++) {
+ db.data.save({num: num, bigString: bigString});
+}
- assert.eq(100, db.data.find().toArray().length, "basic find after setup");
+assert.eq(100, db.data.find().toArray().length, "basic find after setup");
- poolStats("setup done");
+poolStats("setup done");
- // limit
+// limit
- assert.eq(77, db.data.find().limit(77).itcount(), "limit test 1");
- assert.eq(1, db.data.find().limit(1).itcount(), "limit test 2");
- for (var i = 1; i < 10; i++) {
- assert.eq(i, db.data.find().limit(i).itcount(), "limit test 3a : " + i);
- assert.eq(i, db.data.find().skip(i).limit(i).itcount(), "limit test 3b : " + i);
- poolStats("after loop : " + i);
- }
+assert.eq(77, db.data.find().limit(77).itcount(), "limit test 1");
+assert.eq(1, db.data.find().limit(1).itcount(), "limit test 2");
+for (var i = 1; i < 10; i++) {
+ assert.eq(i, db.data.find().limit(i).itcount(), "limit test 3a : " + i);
+ assert.eq(i, db.data.find().skip(i).limit(i).itcount(), "limit test 3b : " + i);
+ poolStats("after loop : " + i);
+}
- poolStats("limit test done");
+poolStats("limit test done");
- function assertOrder(start, num) {
- var a = db.data.find().skip(start).limit(num).sort({num: 1}).map(function(z) {
- return z.num;
- });
- var c = [];
- for (var i = 0; i < num; i++)
- c.push(start + i);
- assert.eq(c, a, "assertOrder start: " + start + " num: " + num);
- }
+function assertOrder(start, num) {
+ var a = db.data.find().skip(start).limit(num).sort({num: 1}).map(function(z) {
+ return z.num;
+ });
+ var c = [];
+ for (var i = 0; i < num; i++)
+ c.push(start + i);
+ assert.eq(c, a, "assertOrder start: " + start + " num: " + num);
+}
- assertOrder(0, 10);
- assertOrder(5, 10);
+assertOrder(0, 10);
+assertOrder(5, 10);
- poolStats("after checking order");
+poolStats("after checking order");
- function doItCount(skip, sort, batchSize) {
- var c = db.data.find();
- if (skip)
- c.skip(skip);
- if (sort)
- c.sort(sort);
- if (batchSize)
- c.batchSize(batchSize);
- return c.itcount();
- }
+function doItCount(skip, sort, batchSize) {
+ var c = db.data.find();
+ if (skip)
+ c.skip(skip);
+ if (sort)
+ c.sort(sort);
+ if (batchSize)
+ c.batchSize(batchSize);
+ return c.itcount();
+}
- function checkItCount(batchSize) {
- assert.eq(5, doItCount(num - 5, null, batchSize), "skip 1 " + batchSize);
- assert.eq(5, doItCount(num - 5, {num: 1}, batchSize), "skip 2 " + batchSize);
- assert.eq(5, doItCount(num - 5, {_id: 1}, batchSize), "skip 3 " + batchSize);
- assert.eq(0, doItCount(num + 5, {num: 1}, batchSize), "skip 4 " + batchSize);
- assert.eq(0, doItCount(num + 5, {_id: 1}, batchSize), "skip 5 " + batchSize);
- }
+function checkItCount(batchSize) {
+ assert.eq(5, doItCount(num - 5, null, batchSize), "skip 1 " + batchSize);
+ assert.eq(5, doItCount(num - 5, {num: 1}, batchSize), "skip 2 " + batchSize);
+ assert.eq(5, doItCount(num - 5, {_id: 1}, batchSize), "skip 3 " + batchSize);
+ assert.eq(0, doItCount(num + 5, {num: 1}, batchSize), "skip 4 " + batchSize);
+ assert.eq(0, doItCount(num + 5, {_id: 1}, batchSize), "skip 5 " + batchSize);
+}
- poolStats("before checking itcount");
+poolStats("before checking itcount");
- checkItCount(0);
- checkItCount(2);
+checkItCount(0);
+checkItCount(2);
- poolStats("after checking itcount");
+poolStats("after checking itcount");
- // --- Verify that modify & save style updates doesn't work on sharded clusters ---
+// --- Verify that modify & save style updates doesn't work on sharded clusters ---
- var o = db.data.findOne();
- o.x = 16;
- assert.commandFailedWithCode(db.data.save(o), ErrorCodes.ShardKeyNotFound);
- poolStats("at end");
+var o = db.data.findOne();
+o.x = 16;
+assert.commandFailedWithCode(db.data.save(o), ErrorCodes.ShardKeyNotFound);
+poolStats("at end");
- print(summary);
+print(summary);
- assert.throws(function() {
- s.adminCommand({enablesharding: "admin"});
- });
+assert.throws(function() {
+ s.adminCommand({enablesharding: "admin"});
+});
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/shard_aware_init.js b/jstests/sharding/shard_aware_init.js
index e041d4f377a..5ed9e129a4d 100644
--- a/jstests/sharding/shard_aware_init.js
+++ b/jstests/sharding/shard_aware_init.js
@@ -7,111 +7,64 @@
*/
(function() {
- "use strict";
+"use strict";
- var waitForMaster = function(conn) {
- assert.soon(function() {
- var res = conn.getDB('admin').runCommand({isMaster: 1});
- return res.ismaster;
- });
+var waitForMaster = function(conn) {
+ assert.soon(function() {
+ var res = conn.getDB('admin').runCommand({isMaster: 1});
+ return res.ismaster;
+ });
+};
+
+/**
+ * Runs a series of test on the mongod instance mongodConn is pointing to. Notes that the
+ * test can restart the mongod instance several times so mongodConn can end up with a broken
+ * connection after.
+ *
+ * awaitVersionUpdate is used with the replset invocation of this test to ensure that our
+ * initial write to the admin.system.version collection is fully flushed out of the oplog before
+ * restarting. That allows our standalone corrupting update to see the write (and cause us to
+ * fail on startup).
+ *
+ * TODO: Remove awaitVersionUpdate after SERVER-41005, where we figure out how to wait until
+ * after replication is started before reading our shard identity from
+ * admin.system.version
+ */
+var runTest = function(mongodConn, configConnStr, awaitVersionUpdate) {
+ var shardIdentityDoc = {
+ _id: 'shardIdentity',
+ configsvrConnectionString: configConnStr,
+ shardName: 'newShard',
+ clusterId: ObjectId()
};
/**
- * Runs a series of test on the mongod instance mongodConn is pointing to. Notes that the
- * test can restart the mongod instance several times so mongodConn can end up with a broken
- * connection after.
- *
- * awaitVersionUpdate is used with the replset invocation of this test to ensure that our
- * initial write to the admin.system.version collection is fully flushed out of the oplog before
- * restarting. That allows our standalone corrupting update to see the write (and cause us to
- * fail on startup).
- *
- * TODO: Remove awaitVersionUpdate after SERVER-41005, where we figure out how to wait until
- * after replication is started before reading our shard identity from
- * admin.system.version
+ * Restarts the server without --shardsvr and replace the shardIdentity doc with a valid
+ * document. Then, restarts the server again with --shardsvr. This also returns a
+ * connection to the server after the last restart.
*/
- var runTest = function(mongodConn, configConnStr, awaitVersionUpdate) {
- var shardIdentityDoc = {
- _id: 'shardIdentity',
- configsvrConnectionString: configConnStr,
- shardName: 'newShard',
- clusterId: ObjectId()
- };
-
- /**
- * Restarts the server without --shardsvr and replace the shardIdentity doc with a valid
- * document. Then, restarts the server again with --shardsvr. This also returns a
- * connection to the server after the last restart.
- */
- var restartAndFixShardIdentityDoc = function(startOptions) {
- var options = Object.extend({}, startOptions);
- // With Recover to a Timestamp, writes to a replica set member may not be written to
- // disk in the collection, but are instead re-applied from the oplog at startup. When
- // restarting with `--shardsvr`, the update to the `shardIdentity` document is not
- // processed. Turning off `--replSet` guarantees the update is written out to the
- // collection and the test no longer relies on replication recovery from performing
- // the update with `--shardsvr` on.
- var rsName = options.replSet;
- delete options.replSet;
- delete options.shardsvr;
- var mongodConn = MongoRunner.runMongod(options);
- waitForMaster(mongodConn);
-
- var res = mongodConn.getDB('admin').system.version.update({_id: 'shardIdentity'},
- shardIdentityDoc);
- assert.eq(1, res.nModified);
-
- MongoRunner.stopMongod(mongodConn);
-
- newMongodOptions.shardsvr = '';
- newMongodOptions.replSet = rsName;
- mongodConn = MongoRunner.runMongod(newMongodOptions);
- waitForMaster(mongodConn);
-
- res = mongodConn.getDB('admin').runCommand({shardingState: 1});
-
- assert(res.enabled);
- assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
- assert.eq(shardIdentityDoc.shardName, res.shardName);
- assert.eq(shardIdentityDoc.clusterId, res.clusterId);
-
- return mongodConn;
- };
-
- // Simulate the upsert that is performed by a config server on addShard.
- assert.writeOK(mongodConn.getDB('admin').system.version.update(
- {
- _id: shardIdentityDoc._id,
- shardName: shardIdentityDoc.shardName,
- clusterId: shardIdentityDoc.clusterId,
- },
- {$set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}},
- {upsert: true}));
-
- awaitVersionUpdate();
-
- var res = mongodConn.getDB('admin').runCommand({shardingState: 1});
+ var restartAndFixShardIdentityDoc = function(startOptions) {
+ var options = Object.extend({}, startOptions);
+ // With Recover to a Timestamp, writes to a replica set member may not be written to
+ // disk in the collection, but are instead re-applied from the oplog at startup. When
+ // restarting with `--shardsvr`, the update to the `shardIdentity` document is not
+ // processed. Turning off `--replSet` guarantees the update is written out to the
+ // collection and the test no longer relies on replication recovery from performing
+ // the update with `--shardsvr` on.
+ var rsName = options.replSet;
+ delete options.replSet;
+ delete options.shardsvr;
+ var mongodConn = MongoRunner.runMongod(options);
+ waitForMaster(mongodConn);
+
+ var res = mongodConn.getDB('admin').system.version.update({_id: 'shardIdentity'},
+ shardIdentityDoc);
+ assert.eq(1, res.nModified);
- assert(res.enabled);
- assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
- assert.eq(shardIdentityDoc.shardName, res.shardName);
- assert.eq(shardIdentityDoc.clusterId, res.clusterId);
- // Should not be allowed to remove the shardIdentity document
- assert.writeErrorWithCode(
- mongodConn.getDB('admin').system.version.remove({_id: 'shardIdentity'}), 40070);
-
- //
- // Test normal startup
- //
-
- var newMongodOptions = Object.extend(mongodConn.savedOptions, {
- restart: true,
- // disable snapshotting to force the stable timestamp forward with or without the
- // majority commit point. This simplifies forcing out our corrupted write to
- // admin.system.version
- setParameter: {"failpoint.disableSnapshotting": "{'mode':'alwaysOn'}"}
- });
MongoRunner.stopMongod(mongodConn);
+
+ newMongodOptions.shardsvr = '';
+ newMongodOptions.replSet = rsName;
mongodConn = MongoRunner.runMongod(newMongodOptions);
waitForMaster(mongodConn);
@@ -122,61 +75,108 @@
assert.eq(shardIdentityDoc.shardName, res.shardName);
assert.eq(shardIdentityDoc.clusterId, res.clusterId);
- //
- // Test shardIdentity doc without configsvrConnectionString, resulting into parse error
- //
-
- // Note: modification of the shardIdentity is allowed only when not running with --shardsvr
- MongoRunner.stopMongod(mongodConn);
- // The manipulation of `--replSet` is explained in `restartAndFixShardIdentityDoc`.
- var rsName = newMongodOptions.replSet;
- delete newMongodOptions.replSet;
- delete newMongodOptions.shardsvr;
- mongodConn = MongoRunner.runMongod(newMongodOptions);
- waitForMaster(mongodConn);
-
- let writeResult = assert.commandWorked(mongodConn.getDB('admin').system.version.update(
- {_id: 'shardIdentity'}, {_id: 'shardIdentity', shardName: 'x', clusterId: ObjectId()}));
- assert.eq(writeResult.nModified, 1);
-
- MongoRunner.stopMongod(mongodConn);
-
- newMongodOptions.shardsvr = '';
- newMongodOptions.replSet = rsName;
- assert.throws(function() {
- var connToCrashedMongod = MongoRunner.runMongod(newMongodOptions);
- waitForMaster(connToCrashedMongod);
- });
-
- // We call MongoRunner.stopMongod() using a former connection to the server that is
- // configured with the same port in order to be able to assert on the server's exit code.
- MongoRunner.stopMongod(mongodConn, undefined, {allowedExitCode: MongoRunner.EXIT_UNCAUGHT});
-
- //
- // Test that it is possible to fix the invalid shardIdentity doc by not passing --shardsvr
- //
- mongodConn = restartAndFixShardIdentityDoc(newMongodOptions);
- res = mongodConn.getDB('admin').runCommand({shardingState: 1});
- assert(res.enabled);
+ return mongodConn;
};
- var st = new ShardingTest({shards: 1});
-
- {
- var mongod = MongoRunner.runMongod({shardsvr: ''});
- runTest(mongod, st.configRS.getURL(), function() {});
- MongoRunner.stopMongod(mongod);
- }
-
- {
- var replTest = new ReplSetTest({nodes: 1});
- replTest.startSet({shardsvr: ''});
- replTest.initiate();
- runTest(replTest.getPrimary(), st.configRS.getURL(), function() {
- replTest.awaitLastStableRecoveryTimestamp();
- });
- replTest.stopSet();
- }
-
- st.stop();
+ // Simulate the upsert that is performed by a config server on addShard.
+ assert.writeOK(mongodConn.getDB('admin').system.version.update(
+ {
+ _id: shardIdentityDoc._id,
+ shardName: shardIdentityDoc.shardName,
+ clusterId: shardIdentityDoc.clusterId,
+ },
+ {$set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}},
+ {upsert: true}));
+
+ awaitVersionUpdate();
+
+ var res = mongodConn.getDB('admin').runCommand({shardingState: 1});
+
+ assert(res.enabled);
+ assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
+ assert.eq(shardIdentityDoc.shardName, res.shardName);
+ assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+ // Should not be allowed to remove the shardIdentity document
+ assert.writeErrorWithCode(
+ mongodConn.getDB('admin').system.version.remove({_id: 'shardIdentity'}), 40070);
+
+ //
+ // Test normal startup
+ //
+
+ var newMongodOptions = Object.extend(mongodConn.savedOptions, {
+ restart: true,
+ // disable snapshotting to force the stable timestamp forward with or without the
+ // majority commit point. This simplifies forcing out our corrupted write to
+ // admin.system.version
+ setParameter: {"failpoint.disableSnapshotting": "{'mode':'alwaysOn'}"}
+ });
+ MongoRunner.stopMongod(mongodConn);
+ mongodConn = MongoRunner.runMongod(newMongodOptions);
+ waitForMaster(mongodConn);
+
+ res = mongodConn.getDB('admin').runCommand({shardingState: 1});
+
+ assert(res.enabled);
+ assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
+ assert.eq(shardIdentityDoc.shardName, res.shardName);
+ assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+
+ //
+ // Test shardIdentity doc without configsvrConnectionString, resulting into parse error
+ //
+
+ // Note: modification of the shardIdentity is allowed only when not running with --shardsvr
+ MongoRunner.stopMongod(mongodConn);
+ // The manipulation of `--replSet` is explained in `restartAndFixShardIdentityDoc`.
+ var rsName = newMongodOptions.replSet;
+ delete newMongodOptions.replSet;
+ delete newMongodOptions.shardsvr;
+ mongodConn = MongoRunner.runMongod(newMongodOptions);
+ waitForMaster(mongodConn);
+
+ let writeResult = assert.commandWorked(mongodConn.getDB('admin').system.version.update(
+ {_id: 'shardIdentity'}, {_id: 'shardIdentity', shardName: 'x', clusterId: ObjectId()}));
+ assert.eq(writeResult.nModified, 1);
+
+ MongoRunner.stopMongod(mongodConn);
+
+ newMongodOptions.shardsvr = '';
+ newMongodOptions.replSet = rsName;
+ assert.throws(function() {
+ var connToCrashedMongod = MongoRunner.runMongod(newMongodOptions);
+ waitForMaster(connToCrashedMongod);
+ });
+
+ // We call MongoRunner.stopMongod() using a former connection to the server that is
+ // configured with the same port in order to be able to assert on the server's exit code.
+ MongoRunner.stopMongod(mongodConn, undefined, {allowedExitCode: MongoRunner.EXIT_UNCAUGHT});
+
+ //
+ // Test that it is possible to fix the invalid shardIdentity doc by not passing --shardsvr
+ //
+ mongodConn = restartAndFixShardIdentityDoc(newMongodOptions);
+ res = mongodConn.getDB('admin').runCommand({shardingState: 1});
+ assert(res.enabled);
+};
+
+var st = new ShardingTest({shards: 1});
+
+{
+ var mongod = MongoRunner.runMongod({shardsvr: ''});
+ runTest(mongod, st.configRS.getURL(), function() {});
+ MongoRunner.stopMongod(mongod);
+}
+
+{
+ var replTest = new ReplSetTest({nodes: 1});
+ replTest.startSet({shardsvr: ''});
+ replTest.initiate();
+ runTest(replTest.getPrimary(), st.configRS.getURL(), function() {
+ replTest.awaitLastStableRecoveryTimestamp();
+ });
+ replTest.stopSet();
+}
+
+st.stop();
})();
diff --git a/jstests/sharding/shard_aware_init_secondaries.js b/jstests/sharding/shard_aware_init_secondaries.js
index 009610f47a6..a1387592212 100644
--- a/jstests/sharding/shard_aware_init_secondaries.js
+++ b/jstests/sharding/shard_aware_init_secondaries.js
@@ -5,68 +5,67 @@
*/
(function() {
- "use strict";
-
- var st = new ShardingTest({shards: 1});
-
- var replTest = new ReplSetTest({nodes: 2});
- replTest.startSet({shardsvr: ''});
- var nodeList = replTest.nodeList();
- replTest.initiate({
- _id: replTest.name,
- members:
- [{_id: 0, host: nodeList[0], priority: 1}, {_id: 1, host: nodeList[1], priority: 0}]
- });
-
- var priConn = replTest.getPrimary();
-
- var configConnStr = st.configRS.getURL();
-
- var shardIdentityDoc = {
- _id: 'shardIdentity',
- configsvrConnectionString: configConnStr,
- shardName: 'newShard',
- clusterId: ObjectId()
- };
-
- // Simulate the upsert that is performed by a config server on addShard.
- var shardIdentityQuery = {
- _id: shardIdentityDoc._id,
- shardName: shardIdentityDoc.shardName,
- clusterId: shardIdentityDoc.clusterId
- };
- var shardIdentityUpdate = {
- $set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}
- };
- assert.writeOK(priConn.getDB('admin').system.version.update(
- shardIdentityQuery, shardIdentityUpdate, {upsert: true, writeConcern: {w: 2}}));
-
- var secConn = replTest.getSecondary();
- secConn.setSlaveOk(true);
-
- var res = secConn.getDB('admin').runCommand({shardingState: 1});
-
- assert(res.enabled, tojson(res));
- assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
- assert.eq(shardIdentityDoc.shardName, res.shardName);
- assert.eq(shardIdentityDoc.clusterId, res.clusterId);
-
- var newMongodOptions = Object.extend(secConn.savedOptions, {restart: true});
- replTest.restart(replTest.getNodeId(secConn), newMongodOptions);
- replTest.waitForMaster();
- replTest.awaitSecondaryNodes();
-
- secConn = replTest.getSecondary();
- secConn.setSlaveOk(true);
-
- res = secConn.getDB('admin').runCommand({shardingState: 1});
-
- assert(res.enabled, tojson(res));
- assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
- assert.eq(shardIdentityDoc.shardName, res.shardName);
- assert.eq(shardIdentityDoc.clusterId, res.clusterId);
-
- replTest.stopSet();
-
- st.stop();
+"use strict";
+
+var st = new ShardingTest({shards: 1});
+
+var replTest = new ReplSetTest({nodes: 2});
+replTest.startSet({shardsvr: ''});
+var nodeList = replTest.nodeList();
+replTest.initiate({
+ _id: replTest.name,
+ members: [{_id: 0, host: nodeList[0], priority: 1}, {_id: 1, host: nodeList[1], priority: 0}]
+});
+
+var priConn = replTest.getPrimary();
+
+var configConnStr = st.configRS.getURL();
+
+var shardIdentityDoc = {
+ _id: 'shardIdentity',
+ configsvrConnectionString: configConnStr,
+ shardName: 'newShard',
+ clusterId: ObjectId()
+};
+
+// Simulate the upsert that is performed by a config server on addShard.
+var shardIdentityQuery = {
+ _id: shardIdentityDoc._id,
+ shardName: shardIdentityDoc.shardName,
+ clusterId: shardIdentityDoc.clusterId
+};
+var shardIdentityUpdate = {
+ $set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}
+};
+assert.writeOK(priConn.getDB('admin').system.version.update(
+ shardIdentityQuery, shardIdentityUpdate, {upsert: true, writeConcern: {w: 2}}));
+
+var secConn = replTest.getSecondary();
+secConn.setSlaveOk(true);
+
+var res = secConn.getDB('admin').runCommand({shardingState: 1});
+
+assert(res.enabled, tojson(res));
+assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
+assert.eq(shardIdentityDoc.shardName, res.shardName);
+assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+
+var newMongodOptions = Object.extend(secConn.savedOptions, {restart: true});
+replTest.restart(replTest.getNodeId(secConn), newMongodOptions);
+replTest.waitForMaster();
+replTest.awaitSecondaryNodes();
+
+secConn = replTest.getSecondary();
+secConn.setSlaveOk(true);
+
+res = secConn.getDB('admin').runCommand({shardingState: 1});
+
+assert(res.enabled, tojson(res));
+assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
+assert.eq(shardIdentityDoc.shardName, res.shardName);
+assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+
+replTest.stopSet();
+
+st.stop();
})();
diff --git a/jstests/sharding/shard_aware_on_add_shard.js b/jstests/sharding/shard_aware_on_add_shard.js
index 92c490f2982..68a0c871b88 100644
--- a/jstests/sharding/shard_aware_on_add_shard.js
+++ b/jstests/sharding/shard_aware_on_add_shard.js
@@ -4,63 +4,62 @@
*/
(function() {
- "use strict";
+"use strict";
- var waitForIsMaster = function(conn) {
- assert.soon(function() {
- var res = conn.getDB('admin').runCommand({isMaster: 1});
- return res.ismaster;
- });
- };
+var waitForIsMaster = function(conn) {
+ assert.soon(function() {
+ var res = conn.getDB('admin').runCommand({isMaster: 1});
+ return res.ismaster;
+ });
+};
- var checkShardingStateInitialized = function(conn, configConnStr, shardName, clusterId) {
- var res = conn.getDB('admin').runCommand({shardingState: 1});
- assert.commandWorked(res);
- assert(res.enabled);
- assert.eq(configConnStr, res.configServer);
- assert.eq(shardName, res.shardName);
- assert(clusterId.equals(res.clusterId),
- 'cluster id: ' + tojson(clusterId) + ' != ' + tojson(res.clusterId));
- };
+var checkShardingStateInitialized = function(conn, configConnStr, shardName, clusterId) {
+ var res = conn.getDB('admin').runCommand({shardingState: 1});
+ assert.commandWorked(res);
+ assert(res.enabled);
+ assert.eq(configConnStr, res.configServer);
+ assert.eq(shardName, res.shardName);
+ assert(clusterId.equals(res.clusterId),
+ 'cluster id: ' + tojson(clusterId) + ' != ' + tojson(res.clusterId));
+};
- var checkShardMarkedAsShardAware = function(mongosConn, shardName) {
- var res = mongosConn.getDB('config').getCollection('shards').findOne({_id: shardName});
- assert.neq(null, res, "Could not find new shard " + shardName + " in config.shards");
- assert.eq(1, res.state);
- };
+var checkShardMarkedAsShardAware = function(mongosConn, shardName) {
+ var res = mongosConn.getDB('config').getCollection('shards').findOne({_id: shardName});
+ assert.neq(null, res, "Could not find new shard " + shardName + " in config.shards");
+ assert.eq(1, res.state);
+};
- // Create the cluster to test adding shards to.
- var st = new ShardingTest({shards: 1});
- var clusterId = st.s.getDB('config').getCollection('version').findOne().clusterId;
+// Create the cluster to test adding shards to.
+var st = new ShardingTest({shards: 1});
+var clusterId = st.s.getDB('config').getCollection('version').findOne().clusterId;
- // Add a shard that is a standalone mongod.
+// Add a shard that is a standalone mongod.
- var standaloneConn = MongoRunner.runMongod({shardsvr: ''});
- waitForIsMaster(standaloneConn);
+var standaloneConn = MongoRunner.runMongod({shardsvr: ''});
+waitForIsMaster(standaloneConn);
- jsTest.log("Going to add standalone as shard: " + standaloneConn);
- var newShardName = "newShard";
- assert.commandWorked(st.s.adminCommand({addShard: standaloneConn.name, name: newShardName}));
- checkShardingStateInitialized(standaloneConn, st.configRS.getURL(), newShardName, clusterId);
- checkShardMarkedAsShardAware(st.s, newShardName);
+jsTest.log("Going to add standalone as shard: " + standaloneConn);
+var newShardName = "newShard";
+assert.commandWorked(st.s.adminCommand({addShard: standaloneConn.name, name: newShardName}));
+checkShardingStateInitialized(standaloneConn, st.configRS.getURL(), newShardName, clusterId);
+checkShardMarkedAsShardAware(st.s, newShardName);
- MongoRunner.stopMongod(standaloneConn);
+MongoRunner.stopMongod(standaloneConn);
- // Add a shard that is a replica set.
+// Add a shard that is a replica set.
- var replTest = new ReplSetTest({nodes: 1});
- replTest.startSet({shardsvr: ''});
- replTest.initiate();
- waitForIsMaster(replTest.getPrimary());
+var replTest = new ReplSetTest({nodes: 1});
+replTest.startSet({shardsvr: ''});
+replTest.initiate();
+waitForIsMaster(replTest.getPrimary());
- jsTest.log("Going to add replica set as shard: " + tojson(replTest));
- assert.commandWorked(st.s.adminCommand({addShard: replTest.getURL(), name: replTest.getURL()}));
- checkShardingStateInitialized(
- replTest.getPrimary(), st.configRS.getURL(), replTest.getURL(), clusterId);
- checkShardMarkedAsShardAware(st.s, newShardName);
+jsTest.log("Going to add replica set as shard: " + tojson(replTest));
+assert.commandWorked(st.s.adminCommand({addShard: replTest.getURL(), name: replTest.getURL()}));
+checkShardingStateInitialized(
+ replTest.getPrimary(), st.configRS.getURL(), replTest.getURL(), clusterId);
+checkShardMarkedAsShardAware(st.s, newShardName);
- replTest.stopSet();
-
- st.stop();
+replTest.stopSet();
+st.stop();
})();
diff --git a/jstests/sharding/shard_aware_primary_failover.js b/jstests/sharding/shard_aware_primary_failover.js
index abbfb47c1cf..9e7f572c3e9 100644
--- a/jstests/sharding/shard_aware_primary_failover.js
+++ b/jstests/sharding/shard_aware_primary_failover.js
@@ -2,57 +2,57 @@
* Test that a new primary that gets elected will properly perform shard initialization.
*/
(function() {
- "use strict";
-
- var st = new ShardingTest({shards: 1});
-
- var replTest = new ReplSetTest({nodes: 3});
- replTest.startSet({shardsvr: ''});
-
- var nodes = replTest.nodeList();
- replTest.initiate({
- _id: replTest.name,
- members: [
- {_id: 0, host: nodes[0]},
- {_id: 1, host: nodes[1]},
- {_id: 2, host: nodes[2], arbiterOnly: true}
- ]
- });
-
- var primaryConn = replTest.getPrimary();
-
- var shardIdentityDoc = {
- _id: 'shardIdentity',
- configsvrConnectionString: st.configRS.getURL(),
- shardName: 'newShard',
- clusterId: ObjectId()
- };
-
- // Simulate the upsert that is performed by a config server on addShard.
- var shardIdentityQuery = {
- _id: shardIdentityDoc._id,
- shardName: shardIdentityDoc.shardName,
- clusterId: shardIdentityDoc.clusterId
- };
- var shardIdentityUpdate = {
- $set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}
- };
- assert.writeOK(primaryConn.getDB('admin').system.version.update(
- shardIdentityQuery, shardIdentityUpdate, {upsert: true, writeConcern: {w: 'majority'}}));
-
- replTest.stopMaster();
- replTest.waitForMaster(30000);
-
- primaryConn = replTest.getPrimary();
-
- var res = primaryConn.getDB('admin').runCommand({shardingState: 1});
-
- assert(res.enabled);
- assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
- assert.eq(shardIdentityDoc.shardName, res.shardName);
- assert.eq(shardIdentityDoc.clusterId, res.clusterId);
-
- replTest.stopSet();
-
- st.stop();
+"use strict";
+
+var st = new ShardingTest({shards: 1});
+
+var replTest = new ReplSetTest({nodes: 3});
+replTest.startSet({shardsvr: ''});
+
+var nodes = replTest.nodeList();
+replTest.initiate({
+ _id: replTest.name,
+ members: [
+ {_id: 0, host: nodes[0]},
+ {_id: 1, host: nodes[1]},
+ {_id: 2, host: nodes[2], arbiterOnly: true}
+ ]
+});
+
+var primaryConn = replTest.getPrimary();
+
+var shardIdentityDoc = {
+ _id: 'shardIdentity',
+ configsvrConnectionString: st.configRS.getURL(),
+ shardName: 'newShard',
+ clusterId: ObjectId()
+};
+
+// Simulate the upsert that is performed by a config server on addShard.
+var shardIdentityQuery = {
+ _id: shardIdentityDoc._id,
+ shardName: shardIdentityDoc.shardName,
+ clusterId: shardIdentityDoc.clusterId
+};
+var shardIdentityUpdate = {
+ $set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}
+};
+assert.writeOK(primaryConn.getDB('admin').system.version.update(
+ shardIdentityQuery, shardIdentityUpdate, {upsert: true, writeConcern: {w: 'majority'}}));
+
+replTest.stopMaster();
+replTest.waitForMaster(30000);
+
+primaryConn = replTest.getPrimary();
+
+var res = primaryConn.getDB('admin').runCommand({shardingState: 1});
+
+assert(res.enabled);
+assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
+assert.eq(shardIdentityDoc.shardName, res.shardName);
+assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+
+replTest.stopSet();
+
+st.stop();
})();
diff --git a/jstests/sharding/shard_collection_basic.js b/jstests/sharding/shard_collection_basic.js
index f12d4c8482b..f417cdc4165 100644
--- a/jstests/sharding/shard_collection_basic.js
+++ b/jstests/sharding/shard_collection_basic.js
@@ -3,356 +3,347 @@
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({mongos: 1, shards: 2});
- var kDbName = 'db';
- var mongos = st.s0;
+var st = new ShardingTest({mongos: 1, shards: 2});
+var kDbName = 'db';
+var mongos = st.s0;
- function testAndClenaupWithKeyNoIndexFailed(keyDoc) {
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
-
- var ns = kDbName + '.foo';
- assert.commandFailed(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
-
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- }
+function testAndClenaupWithKeyNoIndexFailed(keyDoc) {
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- function testAndClenaupWithKeyOK(keyDoc) {
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex(keyDoc));
+ var ns = kDbName + '.foo';
+ assert.commandFailed(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- var ns = kDbName + '.foo';
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
+ assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+}
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
+function testAndClenaupWithKeyOK(keyDoc) {
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+ assert.commandWorked(mongos.getDB(kDbName).foo.createIndex(keyDoc));
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- }
+ var ns = kDbName + '.foo';
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
- function testAndClenaupWithKeyNoIndexOK(keyDoc) {
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- var ns = kDbName + '.foo';
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
+ assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+}
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
+function testAndClenaupWithKeyNoIndexOK(keyDoc) {
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- }
+ var ns = kDbName + '.foo';
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
- function getIndexSpecByName(coll, indexName) {
- var indexes = coll.getIndexes().filter(function(spec) {
- return spec.name === indexName;
- });
- assert.eq(1, indexes.length, 'index "' + indexName + '" not found"');
- return indexes[0];
- }
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- // Fail if db is not sharded.
- assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
+ assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+}
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+function getIndexSpecByName(coll, indexName) {
+ var indexes = coll.getIndexes().filter(function(spec) {
+ return spec.name === indexName;
+ });
+ assert.eq(1, indexes.length, 'index "' + indexName + '" not found"');
+ return indexes[0];
+}
- // Fail if db is not sharding enabled.
- assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
+// Fail if db is not sharded.
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- // Verify wrong arguments errors.
- assert.commandFailed(mongos.adminCommand({shardCollection: 'foo', key: {_id: 1}}));
+// Fail if db is not sharding enabled.
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
- assert.commandFailed(mongos.adminCommand({shardCollection: 'foo', key: "aaa"}));
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- // shardCollection may only be run against admin database.
- assert.commandFailed(
- mongos.getDB('test').runCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
+// Verify wrong arguments errors.
+assert.commandFailed(mongos.adminCommand({shardCollection: 'foo', key: {_id: 1}}));
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- // Can't shard if key is not specified.
- assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo'}));
+assert.commandFailed(mongos.adminCommand({shardCollection: 'foo', key: "aaa"}));
- assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {}}));
+// shardCollection may only be run against admin database.
+assert.commandFailed(
+ mongos.getDB('test').runCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
- // Verify key format
- assert.commandFailed(
- mongos.adminCommand({shardCollection: kDbName + '.foo', key: {aKey: "hahahashed"}}));
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+// Can't shard if key is not specified.
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo'}));
- // Shard key cannot contain embedded objects.
- assert.commandFailed(
- mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: {a: 1}}}));
- assert.commandFailed(
- mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: {'a.b': 1}}}));
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {}}));
- // Shard key can contain dotted path to embedded element.
- assert.commandWorked(mongos.adminCommand(
- {shardCollection: kDbName + '.shard_key_dotted_path', key: {'_id.a': 1}}));
+// Verify key format
+assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {aKey: "hahahashed"}}));
- //
- // Test shardCollection's idempotency
- //
+// Shard key cannot contain embedded objects.
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: {a: 1}}}));
+assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: {'a.b': 1}}}));
- // Succeed if a collection is already sharded with the same options.
- assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
- assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
- // Specifying the simple collation or not specifying a collation should be equivalent, because
- // if no collation is specified, the collection default collation is used.
- assert.commandWorked(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'simple'}}));
+// Shard key can contain dotted path to embedded element.
+assert.commandWorked(
+ mongos.adminCommand({shardCollection: kDbName + '.shard_key_dotted_path', key: {'_id.a': 1}}));
- // Fail if the collection is already sharded with different options.
- // different shard key
- assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {x: 1}}));
- // different 'unique'
- assert.commandFailed(
- mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}, unique: true}));
+//
+// Test shardCollection's idempotency
+//
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+// Succeed if a collection is already sharded with the same options.
+assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
+assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
+// Specifying the simple collation or not specifying a collation should be equivalent, because
+// if no collation is specified, the collection default collation is used.
+assert.commandWorked(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'simple'}}));
- // Shard empty collections no index required.
- testAndClenaupWithKeyNoIndexOK({_id: 1});
- testAndClenaupWithKeyNoIndexOK({_id: 'hashed'});
+// Fail if the collection is already sharded with different options.
+// different shard key
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {x: 1}}));
+// different 'unique'
+assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}, unique: true}));
- // Shard by a plain key.
- testAndClenaupWithKeyNoIndexOK({a: 1});
+assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- // Cant shard collection with data and no index on the shard key.
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- testAndClenaupWithKeyNoIndexFailed({a: 1});
+// Shard empty collections no index required.
+testAndClenaupWithKeyNoIndexOK({_id: 1});
+testAndClenaupWithKeyNoIndexOK({_id: 'hashed'});
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- testAndClenaupWithKeyOK({a: 1});
+// Shard by a plain key.
+testAndClenaupWithKeyNoIndexOK({a: 1});
- // Shard by a hashed key.
- testAndClenaupWithKeyNoIndexOK({a: 'hashed'});
+// Cant shard collection with data and no index on the shard key.
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+testAndClenaupWithKeyNoIndexFailed({a: 1});
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+testAndClenaupWithKeyOK({a: 1});
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- testAndClenaupWithKeyOK({a: 'hashed'});
+// Shard by a hashed key.
+testAndClenaupWithKeyNoIndexOK({a: 'hashed'});
- // Shard by a compound key.
- testAndClenaupWithKeyNoIndexOK({x: 1, y: 1});
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
- assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
- testAndClenaupWithKeyNoIndexFailed({x: 1, y: 1});
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+testAndClenaupWithKeyOK({a: 'hashed'});
- assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
- testAndClenaupWithKeyOK({x: 1, y: 1});
+// Shard by a compound key.
+testAndClenaupWithKeyNoIndexOK({x: 1, y: 1});
- testAndClenaupWithKeyNoIndexFailed({x: 'hashed', y: 1});
- testAndClenaupWithKeyNoIndexFailed({x: 'hashed', y: 'hashed'});
+assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
+testAndClenaupWithKeyNoIndexFailed({x: 1, y: 1});
- // Shard by a key component.
- testAndClenaupWithKeyOK({'z.x': 1});
- testAndClenaupWithKeyOK({'z.x': 'hashed'});
+assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
+testAndClenaupWithKeyOK({x: 1, y: 1});
- // Can't shard by a multikey.
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}));
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
- testAndClenaupWithKeyNoIndexFailed({a: 1});
+testAndClenaupWithKeyNoIndexFailed({x: 'hashed', y: 1});
+testAndClenaupWithKeyNoIndexFailed({x: 'hashed', y: 'hashed'});
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1, b: 1}));
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
- testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
+// Shard by a key component.
+testAndClenaupWithKeyOK({'z.x': 1});
+testAndClenaupWithKeyOK({'z.x': 'hashed'});
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
+// Can't shard by a multikey.
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}));
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
+testAndClenaupWithKeyNoIndexFailed({a: 1});
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
- testAndClenaupWithKeyOK({a: 'hashed'});
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1, b: 1}));
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
+testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
- // Cant shard by a parallel arrays.
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: [1, 2, 3, 4, 5]}));
- testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+testAndClenaupWithKeyOK({a: 'hashed'});
- // Can't shard on unique hashed key.
- assert.commandFailed(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {aKey: "hashed"}, unique: true}));
+// Cant shard by a parallel arrays.
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: [1, 2, 3, 4, 5]}));
+testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
- // If shardCollection has unique:true it must have a unique index.
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({aKey: 1}));
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- assert.commandFailed(
- mongos.adminCommand({shardCollection: kDbName + '.foo', key: {aKey: 1}, unique: true}));
+// Can't shard on unique hashed key.
+assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {aKey: "hashed"}, unique: true}));
- //
- // Session-related tests
- //
+// If shardCollection has unique:true it must have a unique index.
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({aKey: 1}));
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {aKey: 1}, unique: true}));
- // shardCollection can be called under a session.
- const sessionDb = mongos.startSession().getDatabase(kDbName);
- assert.commandWorked(
- sessionDb.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 'hashed'}}));
- sessionDb.getSession().endSession();
-
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
-
- //
- // Collation-related tests
- //
+//
+// Session-related tests
+//
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- // shardCollection should fail when the 'collation' option is not a nested object.
- assert.commandFailed(
- mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}, collation: true}));
-
- // shardCollection should fail when the 'collation' option cannot be parsed.
- assert.commandFailed(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'unknown'}}));
-
- // shardCollection should fail when the 'collation' option is valid but is not the simple
- // collation.
- assert.commandFailed(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'en_US'}}));
-
- // shardCollection should succeed when the 'collation' option specifies the simple collation.
- assert.commandWorked(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'simple'}}));
-
- // shardCollection should fail when it does not specify the 'collation' option but the
- // collection has a non-simple default collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(
- mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
- assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
-
- // shardCollection should fail for the key pattern {_id: 1} if the collection has a non-simple
- // default collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(
- mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
- assert.commandFailed(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'simple'}}));
-
- // shardCollection should fail for the key pattern {a: 1} if there is already an index 'a_1',
- // but it has a non-simple collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(
- mongos.getDB(kDbName).foo.createIndex({a: 1}, {collation: {locale: 'en_US'}}));
- assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
-
- // shardCollection should succeed for the key pattern {a: 1} and collation {locale: 'simple'} if
- // there is no index 'a_1', but there is a non-simple collection default collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(
- mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
- assert.commandWorked(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {a: 1}, collation: {locale: 'simple'}}));
- var indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
- assert(!indexSpec.hasOwnProperty('collation'));
-
- // shardCollection should succeed for the key pattern {a: 1} if there are two indexes on {a: 1}
- // and one has the simple collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}, {name: "a_1_simple"}));
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex(
- {a: 1}, {collation: {locale: 'en_US'}, name: "a_1_en_US"}));
- assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
-
- // shardCollection should fail on a non-empty collection when the only index available with the
- // shard key as a prefix has a non-simple collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(
- mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
- assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 'foo'}));
- // This index will inherit the collection's default collation.
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}));
- assert.commandFailed(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {a: 1}, collation: {locale: 'simple'}}));
-
- // shardCollection should succeed on an empty collection with a non-simple default collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(
- mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
- assert.commandWorked(mongos.adminCommand(
- {shardCollection: kDbName + '.foo', key: {a: 1}, collation: {locale: 'simple'}}));
-
- // shardCollection should succeed on an empty collection with no default collation.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
- assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
+// shardCollection can be called under a session.
+const sessionDb = mongos.startSession().getDatabase(kDbName);
+assert.commandWorked(
+ sessionDb.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 'hashed'}}));
+sessionDb.getSession().endSession();
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- //
- // Tests for the shell helper sh.shardCollection().
- //
+//
+// Collation-related tests
+//
- db = mongos.getDB(kDbName);
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+
+// shardCollection should fail when the 'collation' option is not a nested object.
+assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}, collation: true}));
+
+// shardCollection should fail when the 'collation' option cannot be parsed.
+assert.commandFailed(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'unknown'}}));
+
+// shardCollection should fail when the 'collation' option is valid but is not the simple
+// collation.
+assert.commandFailed(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'en_US'}}));
+
+// shardCollection should succeed when the 'collation' option specifies the simple collation.
+assert.commandWorked(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'simple'}}));
+
+// shardCollection should fail when it does not specify the 'collation' option but the
+// collection has a non-simple default collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
+
+// shardCollection should fail for the key pattern {_id: 1} if the collection has a non-simple
+// default collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
+assert.commandFailed(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {_id: 1}, collation: {locale: 'simple'}}));
+
+// shardCollection should fail for the key pattern {a: 1} if there is already an index 'a_1',
+// but it has a non-simple collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}, {collation: {locale: 'en_US'}}));
+assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
+
+// shardCollection should succeed for the key pattern {a: 1} and collation {locale: 'simple'} if
+// there is no index 'a_1', but there is a non-simple collection default collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
+assert.commandWorked(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {a: 1}, collation: {locale: 'simple'}}));
+var indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
+assert(!indexSpec.hasOwnProperty('collation'));
+
+// shardCollection should succeed for the key pattern {a: 1} if there are two indexes on {a: 1}
+// and one has the simple collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}, {name: "a_1_simple"}));
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex(
+ {a: 1}, {collation: {locale: 'en_US'}, name: "a_1_en_US"}));
+assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
+
+// shardCollection should fail on a non-empty collection when the only index available with the
+// shard key as a prefix has a non-simple collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
+assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 'foo'}));
+// This index will inherit the collection's default collation.
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}));
+assert.commandFailed(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {a: 1}, collation: {locale: 'simple'}}));
+
+// shardCollection should succeed on an empty collection with a non-simple default collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
+assert.commandWorked(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {a: 1}, collation: {locale: 'simple'}}));
+
+// shardCollection should succeed on an empty collection with no default collation.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
+assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {a: 1}}));
+
+assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- // shardCollection() propagates the shard key and the correct defaults.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
- assert.commandWorked(sh.shardCollection(kDbName + '.foo', {a: 1}));
- indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
- assert(!indexSpec.hasOwnProperty('unique'), tojson(indexSpec));
- assert(!indexSpec.hasOwnProperty('collation'), tojson(indexSpec));
-
- // shardCollection() propagates the value for 'unique'.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
- assert.commandWorked(sh.shardCollection(kDbName + '.foo', {a: 1}, true));
- indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
- assert(indexSpec.hasOwnProperty('unique'), tojson(indexSpec));
- assert.eq(indexSpec.unique, true, tojson(indexSpec));
-
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
- assert.commandWorked(sh.shardCollection(kDbName + '.foo', {a: 1}, false));
- indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
- assert(!indexSpec.hasOwnProperty('unique'), tojson(indexSpec));
-
- // shardCollections() 'options' parameter must be an object.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
- assert.throws(function() {
- sh.shardCollection(kDbName + '.foo', {a: 1}, false, 'not an object');
- });
-
- // shardCollection() propagates the value for 'collation'.
- // Currently only the simple collation is supported.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
- assert.commandFailed(
- sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'en_US'}}));
- assert.commandWorked(
- sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'simple'}}));
- indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
- assert(!indexSpec.hasOwnProperty('collation'), tojson(indexSpec));
-
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(
- mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
- assert.commandFailed(sh.shardCollection(kDbName + '.foo', {a: 1}));
- assert.commandFailed(
- sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'en_US'}}));
- assert.commandWorked(
- sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'simple'}}));
- indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
- assert(!indexSpec.hasOwnProperty('collation'), tojson(indexSpec));
-
- // shardCollection() propagates the value for 'numInitialChunks'.
- mongos.getDB(kDbName).foo.drop();
- assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
- assert.commandWorked(
- sh.shardCollection(kDbName + '.foo', {a: "hashed"}, false, {numInitialChunks: 5}));
- st.printShardingStatus();
- var numChunks = st.config.chunks.find({ns: kDbName + '.foo'}).count();
- assert.eq(numChunks, 5, "unexpected number of chunks");
-
- st.stop();
+//
+// Tests for the shell helper sh.shardCollection().
+//
+db = mongos.getDB(kDbName);
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+
+// shardCollection() propagates the shard key and the correct defaults.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
+assert.commandWorked(sh.shardCollection(kDbName + '.foo', {a: 1}));
+indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
+assert(!indexSpec.hasOwnProperty('unique'), tojson(indexSpec));
+assert(!indexSpec.hasOwnProperty('collation'), tojson(indexSpec));
+
+// shardCollection() propagates the value for 'unique'.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
+assert.commandWorked(sh.shardCollection(kDbName + '.foo', {a: 1}, true));
+indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
+assert(indexSpec.hasOwnProperty('unique'), tojson(indexSpec));
+assert.eq(indexSpec.unique, true, tojson(indexSpec));
+
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
+assert.commandWorked(sh.shardCollection(kDbName + '.foo', {a: 1}, false));
+indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
+assert(!indexSpec.hasOwnProperty('unique'), tojson(indexSpec));
+
+// shardCollections() 'options' parameter must be an object.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
+assert.throws(function() {
+ sh.shardCollection(kDbName + '.foo', {a: 1}, false, 'not an object');
+});
+
+// shardCollection() propagates the value for 'collation'.
+// Currently only the simple collation is supported.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
+assert.commandFailed(
+ sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'en_US'}}));
+assert.commandWorked(
+ sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'simple'}}));
+indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
+assert(!indexSpec.hasOwnProperty('collation'), tojson(indexSpec));
+
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo', {collation: {locale: 'en_US'}}));
+assert.commandFailed(sh.shardCollection(kDbName + '.foo', {a: 1}));
+assert.commandFailed(
+ sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'en_US'}}));
+assert.commandWorked(
+ sh.shardCollection(kDbName + '.foo', {a: 1}, false, {collation: {locale: 'simple'}}));
+indexSpec = getIndexSpecByName(mongos.getDB(kDbName).foo, 'a_1');
+assert(!indexSpec.hasOwnProperty('collation'), tojson(indexSpec));
+
+// shardCollection() propagates the value for 'numInitialChunks'.
+mongos.getDB(kDbName).foo.drop();
+assert.commandWorked(mongos.getDB(kDbName).createCollection('foo'));
+assert.commandWorked(
+ sh.shardCollection(kDbName + '.foo', {a: "hashed"}, false, {numInitialChunks: 5}));
+st.printShardingStatus();
+var numChunks = st.config.chunks.find({ns: kDbName + '.foo'}).count();
+assert.eq(numChunks, 5, "unexpected number of chunks");
+
+st.stop();
})();
diff --git a/jstests/sharding/shard_collection_existing_zones.js b/jstests/sharding/shard_collection_existing_zones.js
index 8782e4e132b..8030b40ee9a 100644
--- a/jstests/sharding/shard_collection_existing_zones.js
+++ b/jstests/sharding/shard_collection_existing_zones.js
@@ -1,204 +1,188 @@
// Test that shardCollection uses existing zone info to validate
// shard keys and do initial chunk splits.
(function() {
- 'use strict';
-
- var st = new ShardingTest({mongos: 1, shards: 3});
- var kDbName = 'test';
- var kCollName = 'foo';
- var ns = kDbName + '.' + kCollName;
- var zoneName = 'zoneName';
- var mongos = st.s0;
- var testDB = mongos.getDB(kDbName);
- var configDB = mongos.getDB('config');
- var shardName = st.shard0.shardName;
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
-
- /**
- * Test that shardCollection correctly validates that a zone is associated with a shard.
- */
- function testShardZoneAssociationValidation(proposedShardKey, numberLongMin, numberLongMax) {
- var zoneMin = numberLongMin ? {x: NumberLong(0)} : {x: 0};
- var zoneMax = numberLongMax ? {x: NumberLong(10)} : {x: 10};
- assert.commandWorked(configDB.tags.insert(
- {_id: {ns: ns, min: zoneMin}, ns: ns, min: zoneMin, max: zoneMax, tag: zoneName}));
-
- var tagDoc = configDB.tags.findOne();
- assert.eq(ns, tagDoc.ns);
- assert.eq(zoneMin, tagDoc.min);
- assert.eq(zoneMax, tagDoc.max);
- assert.eq(zoneName, tagDoc.tag);
-
- assert.commandFailed(mongos.adminCommand({shardCollection: ns, key: proposedShardKey}));
-
- assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: zoneName}));
+'use strict';
+
+var st = new ShardingTest({mongos: 1, shards: 3});
+var kDbName = 'test';
+var kCollName = 'foo';
+var ns = kDbName + '.' + kCollName;
+var zoneName = 'zoneName';
+var mongos = st.s0;
+var testDB = mongos.getDB(kDbName);
+var configDB = mongos.getDB('config');
+var shardName = st.shard0.shardName;
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+
+/**
+ * Test that shardCollection correctly validates that a zone is associated with a shard.
+ */
+function testShardZoneAssociationValidation(proposedShardKey, numberLongMin, numberLongMax) {
+ var zoneMin = numberLongMin ? {x: NumberLong(0)} : {x: 0};
+ var zoneMax = numberLongMax ? {x: NumberLong(10)} : {x: 10};
+ assert.commandWorked(configDB.tags.insert(
+ {_id: {ns: ns, min: zoneMin}, ns: ns, min: zoneMin, max: zoneMax, tag: zoneName}));
+
+ var tagDoc = configDB.tags.findOne();
+ assert.eq(ns, tagDoc.ns);
+ assert.eq(zoneMin, tagDoc.min);
+ assert.eq(zoneMax, tagDoc.max);
+ assert.eq(zoneName, tagDoc.tag);
+
+ assert.commandFailed(mongos.adminCommand({shardCollection: ns, key: proposedShardKey}));
+
+ assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: zoneName}));
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: proposedShardKey}));
+
+ assert.commandWorked(testDB.runCommand({drop: kCollName}));
+}
+
+/**
+ * Test that shardCollection correctly validates shard key against existing zones.
+ */
+function testShardKeyValidation(proposedShardKey, numberLongMin, numberLongMax, success) {
+ assert.commandWorked(testDB.foo.createIndex(proposedShardKey));
+ assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: zoneName}));
+
+ var zoneMin = numberLongMin ? {x: NumberLong(0)} : {x: 0};
+ var zoneMax = numberLongMax ? {x: NumberLong(10)} : {x: 10};
+ assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: ns, min: zoneMin, max: zoneMax, zone: zoneName}));
+
+ var tagDoc = configDB.tags.findOne();
+ jsTestLog("xxx tag doc " + tojson(tagDoc));
+ assert.eq(ns, tagDoc.ns);
+ assert.eq(zoneMin, tagDoc.min);
+ assert.eq(zoneMax, tagDoc.max);
+ assert.eq(zoneName, tagDoc.tag);
+
+ if (success) {
assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: proposedShardKey}));
-
- assert.commandWorked(testDB.runCommand({drop: kCollName}));
+ } else {
+ assert.commandFailed(mongos.adminCommand({shardCollection: ns, key: proposedShardKey}));
}
- /**
- * Test that shardCollection correctly validates shard key against existing zones.
- */
- function testShardKeyValidation(proposedShardKey, numberLongMin, numberLongMax, success) {
- assert.commandWorked(testDB.foo.createIndex(proposedShardKey));
- assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: zoneName}));
+ assert.commandWorked(testDB.runCommand({drop: kCollName}));
+}
+
+/**
+ * Test that shardCollection uses existing zone ranges to split chunks.
+ */
+function testChunkSplits(collectionExists) {
+ var shardKey = {x: 1};
+ var ranges =
+ [{min: {x: 0}, max: {x: 10}}, {min: {x: 10}, max: {x: 20}}, {min: {x: 30}, max: {x: 40}}];
+ var shards = configDB.shards.find().toArray();
+ assert.eq(ranges.length, shards.length);
+ if (collectionExists) {
+ assert.commandWorked(testDB.foo.createIndex(shardKey));
+ }
- var zoneMin = numberLongMin ? {x: NumberLong(0)} : {x: 0};
- var zoneMax = numberLongMax ? {x: NumberLong(10)} : {x: 10};
+ // create zones:
+ // shard0 - zonename0 - [0, 10)
+ // shard1 - zonename0 - [10, 20)
+ // shard2 - zonename0 - [30, 40)
+ for (var i = 0; i < shards.length; i++) {
+ assert.commandWorked(
+ st.s.adminCommand({addShardToZone: shards[i]._id, zone: zoneName + i}));
assert.commandWorked(st.s.adminCommand(
- {updateZoneKeyRange: ns, min: zoneMin, max: zoneMax, zone: zoneName}));
-
- var tagDoc = configDB.tags.findOne();
- jsTestLog("xxx tag doc " + tojson(tagDoc));
- assert.eq(ns, tagDoc.ns);
- assert.eq(zoneMin, tagDoc.min);
- assert.eq(zoneMax, tagDoc.max);
- assert.eq(zoneName, tagDoc.tag);
-
- if (success) {
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: proposedShardKey}));
- } else {
- assert.commandFailed(mongos.adminCommand({shardCollection: ns, key: proposedShardKey}));
- }
-
- assert.commandWorked(testDB.runCommand({drop: kCollName}));
+ {updateZoneKeyRange: ns, min: ranges[i].min, max: ranges[i].max, zone: zoneName + i}));
+ }
+ assert.eq(
+ configDB.tags.find().count(), shards.length, "failed to create tag documents correctly");
+ assert.eq(configDB.chunks.find({ns: ns}).count(),
+ 0,
+ "expect to see no chunk documents for the collection before shardCollection is run");
+
+ // shard the collection and validate the resulting chunks
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: shardKey}));
+ var expectedChunks = [
+ {range: [{x: {"$minKey": 1}}, {x: 0}], shardId: st.shard0.shardName},
+ {range: [{x: 0}, {x: 10}], shardId: st.shard0.shardName}, // pre-defined
+ {range: [{x: 10}, {x: 20}], shardId: st.shard1.shardName},
+ {range: [{x: 20}, {x: 30}], shardId: st.shard1.shardName}, // pre-defined
+ {range: [{x: 30}, {x: 40}], shardId: st.shard2.shardName}, // pre-defined
+ {range: [{x: 40}, {x: {"$maxKey": 1}}], shardId: st.shard2.shardName}
+ ];
+ var chunkDocs = configDB.chunks.find({ns: ns}).toArray();
+ assert.eq(chunkDocs.length,
+ expectedChunks.length,
+ "shardCollection failed to create chunk documents correctly");
+ for (var i = 0; i < chunkDocs.length; i++) {
+ var errMsg = "expect to see chunk " + tojson(expectedChunks[i]) + " but found chunk " +
+ tojson(chunkDocs[i]);
+ assert.eq(expectedChunks[i].range[0], chunkDocs[i].min, errMsg);
+ assert.eq(expectedChunks[i].range[1], chunkDocs[i].max, errMsg);
+ assert.eq(expectedChunks[i].shardId, chunkDocs[i].shard, errMsg);
}
- /**
- * Test that shardCollection uses existing zone ranges to split chunks.
- */
- function testChunkSplits(collectionExists) {
- var shardKey = {x: 1};
- var ranges = [
- {min: {x: 0}, max: {x: 10}},
- {min: {x: 10}, max: {x: 20}},
- {min: {x: 30}, max: {x: 40}}
- ];
- var shards = configDB.shards.find().toArray();
- assert.eq(ranges.length, shards.length);
- if (collectionExists) {
- assert.commandWorked(testDB.foo.createIndex(shardKey));
- }
-
- // create zones:
- // shard0 - zonename0 - [0, 10)
- // shard1 - zonename0 - [10, 20)
- // shard2 - zonename0 - [30, 40)
- for (var i = 0; i < shards.length; i++) {
- assert.commandWorked(
- st.s.adminCommand({addShardToZone: shards[i]._id, zone: zoneName + i}));
- assert.commandWorked(st.s.adminCommand({
- updateZoneKeyRange: ns,
- min: ranges[i].min,
- max: ranges[i].max,
- zone: zoneName + i
- }));
- }
- assert.eq(configDB.tags.find().count(),
- shards.length,
- "failed to create tag documents correctly");
- assert.eq(
- configDB.chunks.find({ns: ns}).count(),
- 0,
- "expect to see no chunk documents for the collection before shardCollection is run");
-
- // shard the collection and validate the resulting chunks
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: shardKey}));
- var expectedChunks = [
- {range: [{x: {"$minKey": 1}}, {x: 0}], shardId: st.shard0.shardName},
- {range: [{x: 0}, {x: 10}], shardId: st.shard0.shardName}, // pre-defined
- {range: [{x: 10}, {x: 20}], shardId: st.shard1.shardName},
- {range: [{x: 20}, {x: 30}], shardId: st.shard1.shardName}, // pre-defined
- {range: [{x: 30}, {x: 40}], shardId: st.shard2.shardName}, // pre-defined
- {range: [{x: 40}, {x: {"$maxKey": 1}}], shardId: st.shard2.shardName}
- ];
- var chunkDocs = configDB.chunks.find({ns: ns}).toArray();
- assert.eq(chunkDocs.length,
- expectedChunks.length,
- "shardCollection failed to create chunk documents correctly");
- for (var i = 0; i < chunkDocs.length; i++) {
- var errMsg = "expect to see chunk " + tojson(expectedChunks[i]) + " but found chunk " +
- tojson(chunkDocs[i]);
- assert.eq(expectedChunks[i].range[0], chunkDocs[i].min, errMsg);
- assert.eq(expectedChunks[i].range[1], chunkDocs[i].max, errMsg);
- assert.eq(expectedChunks[i].shardId, chunkDocs[i].shard, errMsg);
- }
-
- assert.commandWorked(testDB.runCommand({drop: kCollName}));
+ assert.commandWorked(testDB.runCommand({drop: kCollName}));
+}
+
+/**
+ * Tests that a non-empty collection associated with zones can be sharded.
+ */
+function testNonemptyZonedCollection() {
+ var shardKey = {x: 1};
+ var shards = configDB.shards.find().toArray();
+ var testColl = testDB.getCollection(kCollName);
+ var ranges =
+ [{min: {x: 0}, max: {x: 10}}, {min: {x: 10}, max: {x: 20}}, {min: {x: 20}, max: {x: 40}}];
+
+ for (let i = 0; i < 40; i++) {
+ assert.writeOK(testColl.insert({x: i}));
}
- /**
- * Tests that a non-empty collection associated with zones can be sharded.
- */
- function testNonemptyZonedCollection() {
- var shardKey = {x: 1};
- var shards = configDB.shards.find().toArray();
- var testColl = testDB.getCollection(kCollName);
- var ranges = [
- {min: {x: 0}, max: {x: 10}},
- {min: {x: 10}, max: {x: 20}},
- {min: {x: 20}, max: {x: 40}}
- ];
-
- for (let i = 0; i < 40; i++) {
- assert.writeOK(testColl.insert({x: i}));
- }
-
- assert.commandWorked(testColl.createIndex(shardKey));
-
- for (let i = 0; i < shards.length; i++) {
- assert.commandWorked(
- mongos.adminCommand({addShardToZone: shards[i]._id, zone: zoneName + i}));
- assert.commandWorked(mongos.adminCommand({
- updateZoneKeyRange: ns,
- min: ranges[i].min,
- max: ranges[i].max,
- zone: zoneName + i
- }));
- }
-
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: shardKey}));
-
- // Check that there is initially 1 chunk.
- assert.eq(1, configDB.chunks.count({ns: ns}));
-
- st.startBalancer();
-
- // Check that the chunks were moved properly.
- assert.soon(() => {
- let res = configDB.chunks.count({ns: ns});
- return res === 5;
- }, 'balancer never ran', 10 * 60 * 1000, 1000);
-
- assert.commandWorked(testDB.runCommand({drop: kCollName}));
+ assert.commandWorked(testColl.createIndex(shardKey));
+
+ for (let i = 0; i < shards.length; i++) {
+ assert.commandWorked(
+ mongos.adminCommand({addShardToZone: shards[i]._id, zone: zoneName + i}));
+ assert.commandWorked(mongos.adminCommand(
+ {updateZoneKeyRange: ns, min: ranges[i].min, max: ranges[i].max, zone: zoneName + i}));
}
- // test that shardCollection checks that a zone is associated with a shard.
- testShardZoneAssociationValidation({x: 1}, false, false);
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: shardKey}));
+
+ // Check that there is initially 1 chunk.
+ assert.eq(1, configDB.chunks.count({ns: ns}));
+
+ st.startBalancer();
+
+ // Check that the chunks were moved properly.
+ assert.soon(() => {
+ let res = configDB.chunks.count({ns: ns});
+ return res === 5;
+ }, 'balancer never ran', 10 * 60 * 1000, 1000);
+
+ assert.commandWorked(testDB.runCommand({drop: kCollName}));
+}
+
+// test that shardCollection checks that a zone is associated with a shard.
+testShardZoneAssociationValidation({x: 1}, false, false);
- // test that shardCollection uses existing zones to validate shard key
- testShardKeyValidation({x: 1}, false, false, true);
+// test that shardCollection uses existing zones to validate shard key
+testShardKeyValidation({x: 1}, false, false, true);
- // cannot use a completely different key from the zone shard key or a key
- // that has the zone shard key as a prefix is not allowed.
- testShardKeyValidation({y: 1}, false, false, false);
- testShardKeyValidation({x: 1, y: 1}, false, false, false);
+// cannot use a completely different key from the zone shard key or a key
+// that has the zone shard key as a prefix is not allowed.
+testShardKeyValidation({y: 1}, false, false, false);
+testShardKeyValidation({x: 1, y: 1}, false, false, false);
- // can only do hash sharding when the boundaries are of type NumberLong.
- testShardKeyValidation({x: "hashed"}, false, false, false);
- testShardKeyValidation({x: "hashed"}, true, false, false);
- testShardKeyValidation({x: "hashed"}, false, true, false);
- testShardKeyValidation({x: "hashed"}, true, true, true);
+// can only do hash sharding when the boundaries are of type NumberLong.
+testShardKeyValidation({x: "hashed"}, false, false, false);
+testShardKeyValidation({x: "hashed"}, true, false, false);
+testShardKeyValidation({x: "hashed"}, false, true, false);
+testShardKeyValidation({x: "hashed"}, true, true, true);
- assert.commandWorked(st.s.adminCommand({removeShardFromZone: shardName, zone: zoneName}));
+assert.commandWorked(st.s.adminCommand({removeShardFromZone: shardName, zone: zoneName}));
- // test that shardCollection uses zone ranges to split chunks
+// test that shardCollection uses zone ranges to split chunks
- testChunkSplits(false);
- testChunkSplits(true);
+testChunkSplits(false);
+testChunkSplits(true);
- testNonemptyZonedCollection();
+testNonemptyZonedCollection();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/shard_collection_verify_initial_chunks.js b/jstests/sharding/shard_collection_verify_initial_chunks.js
index e7072132b11..65c5897371e 100644
--- a/jstests/sharding/shard_collection_verify_initial_chunks.js
+++ b/jstests/sharding/shard_collection_verify_initial_chunks.js
@@ -3,55 +3,47 @@
* and empty/non-empty collections.
*/
(function() {
- 'use strict';
-
- let st = new ShardingTest({mongos: 1, shards: 2});
- let mongos = st.s0;
-
- let config = mongos.getDB("config");
- let db = mongos.getDB('TestDB');
-
- assert.commandWorked(mongos.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard1.shardName);
-
- function checkChunkCounts(collName, chunksOnShard0, chunksOnShard1) {
- let counts = st.chunkCounts(collName, 'TestDB');
- assert.eq(chunksOnShard0,
- counts[st.shard0.shardName],
- 'Count mismatch on shard0: ' + tojson(counts));
- assert.eq(chunksOnShard1,
- counts[st.shard1.shardName],
- 'Count mismatch on shard1: ' + tojson(counts));
- }
-
- // Unsupported: Range sharding + numInitialChunks
- assert.commandFailed(mongos.adminCommand(
- {shardCollection: 'TestDB.RangeCollEmpty', key: {aKey: 1}, numInitialChunks: 6}));
-
- // Unsupported: Hashed sharding + numInitialChunks + non-empty collection
- assert.writeOK(db.HashedCollNotEmpty.insert({aKey: 1}));
- assert.commandWorked(db.HashedCollNotEmpty.createIndex({aKey: "hashed"}));
- assert.commandFailed(mongos.adminCommand({
- shardCollection: 'TestDB.HashedCollNotEmpty',
- key: {aKey: "hashed"},
- numInitialChunks: 6
- }));
-
- // Supported: Hashed sharding + numInitialChunks + empty collection
- // Expected: Even chunk distribution
- assert.commandWorked(db.HashedCollEmpty.createIndex({aKey: "hashed"}));
- assert.commandWorked(mongos.adminCommand(
- {shardCollection: 'TestDB.HashedCollEmpty', key: {aKey: "hashed"}, numInitialChunks: 6}));
- checkChunkCounts('HashedCollEmpty', 3, 3);
-
- // Supported: Hashed sharding + numInitialChunks + non-existent collection
- // Expected: Even chunk distribution
- assert.commandWorked(mongos.adminCommand({
- shardCollection: 'TestDB.HashedCollNonExistent',
- key: {aKey: "hashed"},
- numInitialChunks: 6
- }));
- checkChunkCounts('HashedCollNonExistent', 3, 3);
-
- st.stop();
+'use strict';
+
+let st = new ShardingTest({mongos: 1, shards: 2});
+let mongos = st.s0;
+
+let config = mongos.getDB("config");
+let db = mongos.getDB('TestDB');
+
+assert.commandWorked(mongos.adminCommand({enableSharding: 'TestDB'}));
+st.ensurePrimaryShard('TestDB', st.shard1.shardName);
+
+function checkChunkCounts(collName, chunksOnShard0, chunksOnShard1) {
+ let counts = st.chunkCounts(collName, 'TestDB');
+ assert.eq(
+ chunksOnShard0, counts[st.shard0.shardName], 'Count mismatch on shard0: ' + tojson(counts));
+ assert.eq(
+ chunksOnShard1, counts[st.shard1.shardName], 'Count mismatch on shard1: ' + tojson(counts));
+}
+
+// Unsupported: Range sharding + numInitialChunks
+assert.commandFailed(mongos.adminCommand(
+ {shardCollection: 'TestDB.RangeCollEmpty', key: {aKey: 1}, numInitialChunks: 6}));
+
+// Unsupported: Hashed sharding + numInitialChunks + non-empty collection
+assert.writeOK(db.HashedCollNotEmpty.insert({aKey: 1}));
+assert.commandWorked(db.HashedCollNotEmpty.createIndex({aKey: "hashed"}));
+assert.commandFailed(mongos.adminCommand(
+ {shardCollection: 'TestDB.HashedCollNotEmpty', key: {aKey: "hashed"}, numInitialChunks: 6}));
+
+// Supported: Hashed sharding + numInitialChunks + empty collection
+// Expected: Even chunk distribution
+assert.commandWorked(db.HashedCollEmpty.createIndex({aKey: "hashed"}));
+assert.commandWorked(mongos.adminCommand(
+ {shardCollection: 'TestDB.HashedCollEmpty', key: {aKey: "hashed"}, numInitialChunks: 6}));
+checkChunkCounts('HashedCollEmpty', 3, 3);
+
+// Supported: Hashed sharding + numInitialChunks + non-existent collection
+// Expected: Even chunk distribution
+assert.commandWorked(mongos.adminCommand(
+ {shardCollection: 'TestDB.HashedCollNonExistent', key: {aKey: "hashed"}, numInitialChunks: 6}));
+checkChunkCounts('HashedCollNonExistent', 3, 3);
+
+st.stop();
})();
diff --git a/jstests/sharding/shard_config_db_collections.js b/jstests/sharding/shard_config_db_collections.js
index 73e711946c1..8f8f324957b 100644
--- a/jstests/sharding/shard_config_db_collections.js
+++ b/jstests/sharding/shard_config_db_collections.js
@@ -1,52 +1,50 @@
(function() {
- 'use strict';
+'use strict';
- // Database-level tests
- {
- var st = new ShardingTest({shards: 2});
- var config = st.s.getDB('config');
- var admin = st.s.getDB('admin');
+// Database-level tests
+{
+ var st = new ShardingTest({shards: 2});
+ var config = st.s.getDB('config');
+ var admin = st.s.getDB('admin');
- // At first, there should not be an entry for config
- assert.eq(0, config.databases.count({"_id": "config"}));
+ // At first, there should not be an entry for config
+ assert.eq(0, config.databases.count({"_id": "config"}));
- // Test that we can enable sharding on the config db
- assert.commandWorked(admin.runCommand({enableSharding: 'config'}));
+ // Test that we can enable sharding on the config db
+ assert.commandWorked(admin.runCommand({enableSharding: 'config'}));
- // We should never have a metadata doc for config, it is generated in-mem
- assert.eq(0, config.databases.count({"_id": "config"}));
+ // We should never have a metadata doc for config, it is generated in-mem
+ assert.eq(0, config.databases.count({"_id": "config"}));
- // Test that you cannot set the primary shard for config (not even to 'config')
- assert.commandFailed(admin.runCommand({movePrimary: 'config', to: st.shard0.shardName}));
- assert.commandFailed(admin.runCommand({movePrimary: 'config', to: 'config'}));
+ // Test that you cannot set the primary shard for config (not even to 'config')
+ assert.commandFailed(admin.runCommand({movePrimary: 'config', to: st.shard0.shardName}));
+ assert.commandFailed(admin.runCommand({movePrimary: 'config', to: 'config'}));
- st.stop();
- }
+ st.stop();
+}
- // Test that only system.sessions may be sharded.
- {
- var st = new ShardingTest({shards: 2});
- var admin = st.s.getDB('admin');
+// Test that only system.sessions may be sharded.
+{
+ var st = new ShardingTest({shards: 2});
+ var admin = st.s.getDB('admin');
- assert.commandWorked(
- admin.runCommand({shardCollection: "config.system.sessions", key: {_id: 1}}));
- assert.eq(0, st.s.getDB('config').chunks.count({"shard": "config"}));
+ assert.commandWorked(
+ admin.runCommand({shardCollection: "config.system.sessions", key: {_id: 1}}));
+ assert.eq(0, st.s.getDB('config').chunks.count({"shard": "config"}));
- assert.commandFailed(
- admin.runCommand({shardCollection: "config.anythingelse", key: {_id: 1}}));
+ assert.commandFailed(admin.runCommand({shardCollection: "config.anythingelse", key: {_id: 1}}));
- st.stop();
- }
+ st.stop();
+}
- // Cannot shard things in config without shards.
- {
- var st = new ShardingTest({shards: 0});
- var admin = st.s.getDB('admin');
+// Cannot shard things in config without shards.
+{
+ var st = new ShardingTest({shards: 0});
+ var admin = st.s.getDB('admin');
- assert.commandFailed(
- admin.runCommand({shardCollection: "config.system.sessions", key: {_id: 1}}));
-
- st.stop();
- }
+ assert.commandFailed(
+ admin.runCommand({shardCollection: "config.system.sessions", key: {_id: 1}}));
+ st.stop();
+}
})();
diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js
index 1c8415662a7..8a5c19d1eb9 100644
--- a/jstests/sharding/shard_existing.js
+++ b/jstests/sharding/shard_existing.js
@@ -1,37 +1,37 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({name: "shard_existing", shards: 2, mongos: 1, other: {chunkSize: 1}});
- var db = s.getDB("test");
+var s = new ShardingTest({name: "shard_existing", shards: 2, mongos: 1, other: {chunkSize: 1}});
+var db = s.getDB("test");
- var stringSize = 10000;
- var numDocs = 2000;
+var stringSize = 10000;
+var numDocs = 2000;
- // we want a lot of data, so lets make a string to cheat :)
- var bigString = new Array(stringSize).toString();
- var docSize = Object.bsonsize({_id: numDocs, s: bigString});
- var totalSize = docSize * numDocs;
- print("NumDocs: " + numDocs + " DocSize: " + docSize + " TotalSize: " + totalSize);
+// we want a lot of data, so lets make a string to cheat :)
+var bigString = new Array(stringSize).toString();
+var docSize = Object.bsonsize({_id: numDocs, s: bigString});
+var totalSize = docSize * numDocs;
+print("NumDocs: " + numDocs + " DocSize: " + docSize + " TotalSize: " + totalSize);
- var bulk = db.data.initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, s: bigString});
- }
- assert.writeOK(bulk.execute());
+var bulk = db.data.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, s: bigString});
+}
+assert.writeOK(bulk.execute());
- var avgObjSize = db.data.stats().avgObjSize;
- var dataSize = db.data.stats().size;
- assert.lte(totalSize, dataSize);
+var avgObjSize = db.data.stats().avgObjSize;
+var dataSize = db.data.stats().size;
+assert.lte(totalSize, dataSize);
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- var res = s.adminCommand({shardcollection: "test.data", key: {_id: 1}});
- printjson(res);
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+var res = s.adminCommand({shardcollection: "test.data", key: {_id: 1}});
+printjson(res);
- // number of chunks should be approx equal to the total data size / half the chunk size
- var numChunks = s.config.chunks.find({ns: 'test.data'}).itcount();
- var guess = Math.ceil(dataSize / (512 * 1024 + avgObjSize));
- assert(Math.abs(numChunks - guess) < 2, "not right number of chunks");
+// number of chunks should be approx equal to the total data size / half the chunk size
+var numChunks = s.config.chunks.find({ns: 'test.data'}).itcount();
+var guess = Math.ceil(dataSize / (512 * 1024 + avgObjSize));
+assert(Math.abs(numChunks - guess) < 2, "not right number of chunks");
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/shard_existing_coll_chunk_count.js b/jstests/sharding/shard_existing_coll_chunk_count.js
index 4910b5e8964..91a6abca2ee 100644
--- a/jstests/sharding/shard_existing_coll_chunk_count.js
+++ b/jstests/sharding/shard_existing_coll_chunk_count.js
@@ -5,171 +5,170 @@
* @tags: [requires_persistence]
*/
(function() {
- 'use strict';
- load('jstests/sharding/autosplit_include.js');
-
- // TODO (SERVER-37699): Lower logging verbosity.
- var s = new ShardingTest({
- name: "shard_existing_coll_chunk_count",
- shards: 1,
- mongos: 1,
- other: {enableAutoSplit: true},
- });
-
- assert.commandWorked(s.s.adminCommand({enablesharding: "test"}));
-
- var collNum = 0;
- var overhead = Object.bsonsize({_id: ObjectId(), i: 1, pad: ""});
-
- var getNumberChunks = function(ns) {
- return s.getDB("config").getCollection("chunks").count({ns});
- };
-
- var runCase = function(opts) {
- // Expected options.
- assert.gte(opts.docSize, 0);
- assert.gte(opts.stages.length, 2);
-
- // Compute padding.
- if (opts.docSize < overhead) {
- var pad = "";
- } else {
- var pad = (new Array(opts.docSize - overhead + 1)).join(' ');
- }
-
- collNum++;
- var db = s.getDB("test");
- var collName = "coll" + collNum;
- var coll = db.getCollection(collName);
- var i = 0;
- var limit = 0;
- var stageNum = 0;
- var stage = opts.stages[stageNum];
-
- // Insert initial docs.
- var bulk = coll.initializeUnorderedBulkOp();
+'use strict';
+load('jstests/sharding/autosplit_include.js');
+
+// TODO (SERVER-37699): Lower logging verbosity.
+var s = new ShardingTest({
+ name: "shard_existing_coll_chunk_count",
+ shards: 1,
+ mongos: 1,
+ other: {enableAutoSplit: true},
+});
+
+assert.commandWorked(s.s.adminCommand({enablesharding: "test"}));
+
+var collNum = 0;
+var overhead = Object.bsonsize({_id: ObjectId(), i: 1, pad: ""});
+
+var getNumberChunks = function(ns) {
+ return s.getDB("config").getCollection("chunks").count({ns});
+};
+
+var runCase = function(opts) {
+ // Expected options.
+ assert.gte(opts.docSize, 0);
+ assert.gte(opts.stages.length, 2);
+
+ // Compute padding.
+ if (opts.docSize < overhead) {
+ var pad = "";
+ } else {
+ var pad = (new Array(opts.docSize - overhead + 1)).join(' ');
+ }
+
+ collNum++;
+ var db = s.getDB("test");
+ var collName = "coll" + collNum;
+ var coll = db.getCollection(collName);
+ var i = 0;
+ var limit = 0;
+ var stageNum = 0;
+ var stage = opts.stages[stageNum];
+
+ // Insert initial docs.
+ var bulk = coll.initializeUnorderedBulkOp();
+ limit += stage.numDocsToInsert;
+ for (; i < limit; i++) {
+ bulk.insert({i, pad});
+ }
+ assert.writeOK(bulk.execute());
+
+ // Create shard key index.
+ assert.commandWorked(coll.createIndex({i: 1}));
+
+ // Shard collection.
+ assert.commandWorked(s.s.adminCommand({shardcollection: coll.getFullName(), key: {i: 1}}));
+
+ // Confirm initial number of chunks.
+ var numChunks = getNumberChunks(coll.getFullName());
+ assert.eq(numChunks,
+ stage.expectedNumChunks,
+ 'in ' + coll.getFullName() + ' expected ' + stage.expectedNumChunks +
+ ' initial chunks, but found ' + numChunks + '\nopts: ' + tojson(opts) +
+ '\nchunks:\n' + s.getChunksString(coll.getFullName()));
+
+ // Do the rest of the stages.
+ for (stageNum = 1; stageNum < opts.stages.length; stageNum++) {
+ stage = opts.stages[stageNum];
+
+ // Insert the later docs (one at a time, to maximise the autosplit effects).
limit += stage.numDocsToInsert;
for (; i < limit; i++) {
- bulk.insert({i, pad});
- }
- assert.writeOK(bulk.execute());
+ coll.insert({i, pad});
- // Create shard key index.
- assert.commandWorked(coll.createIndex({i: 1}));
-
- // Shard collection.
- assert.commandWorked(s.s.adminCommand({shardcollection: coll.getFullName(), key: {i: 1}}));
+ waitForOngoingChunkSplits(s);
+ }
- // Confirm initial number of chunks.
+ // Confirm number of chunks for this stage.
var numChunks = getNumberChunks(coll.getFullName());
- assert.eq(numChunks,
- stage.expectedNumChunks,
- 'in ' + coll.getFullName() + ' expected ' + stage.expectedNumChunks +
- ' initial chunks, but found ' + numChunks + '\nopts: ' + tojson(opts) +
- '\nchunks:\n' + s.getChunksString(coll.getFullName()));
-
- // Do the rest of the stages.
- for (stageNum = 1; stageNum < opts.stages.length; stageNum++) {
- stage = opts.stages[stageNum];
-
- // Insert the later docs (one at a time, to maximise the autosplit effects).
- limit += stage.numDocsToInsert;
- for (; i < limit; i++) {
- coll.insert({i, pad});
-
- waitForOngoingChunkSplits(s);
- }
-
- // Confirm number of chunks for this stage.
- var numChunks = getNumberChunks(coll.getFullName());
- assert.gte(numChunks,
- stage.expectedNumChunks,
- 'in ' + coll.getFullName() + ' expected ' + stage.expectedNumChunks +
- ' chunks for stage ' + stageNum + ', but found ' + numChunks +
- '\nopts: ' + tojson(opts) + '\nchunks:\n' +
- s.getChunksString(coll.getFullName()));
- }
- };
-
- // Original problematic case.
- runCase({
- docSize: 0,
- stages: [
- {numDocsToInsert: 20000, expectedNumChunks: 1},
- {numDocsToInsert: 7, expectedNumChunks: 1},
- {numDocsToInsert: 1000, expectedNumChunks: 1},
- ],
- });
-
- // Original problematic case (worse).
- runCase({
- docSize: 0,
- stages: [
- {numDocsToInsert: 90000, expectedNumChunks: 1},
- {numDocsToInsert: 7, expectedNumChunks: 1},
- {numDocsToInsert: 1000, expectedNumChunks: 1},
- ],
- });
-
- // Pathological case #1.
- runCase({
- docSize: 522,
- stages: [
- {numDocsToInsert: 8191, expectedNumChunks: 1},
- {numDocsToInsert: 2, expectedNumChunks: 1},
- {numDocsToInsert: 1000, expectedNumChunks: 1},
- ],
- });
-
- // Pathological case #2.
- runCase({
- docSize: 522,
- stages: [
- {numDocsToInsert: 8192, expectedNumChunks: 1},
- {numDocsToInsert: 8192, expectedNumChunks: 1},
- ],
- });
-
- // Lower chunksize to 1MB, and restart the mongod for it to take. We also
- // need to restart mongos for the case of the last-stable suite where the
- // shard is also last-stable.
- assert.writeOK(
- s.getDB("config").getCollection("settings").update({_id: "chunksize"}, {$set: {value: 1}}, {
- upsert: true
- }));
-
- s.restartMongos(0);
- s.restartShardRS(0);
-
- // Original problematic case, scaled down to smaller chunksize.
- runCase({
- docSize: 0,
- stages: [
- {numDocsToInsert: 10000, expectedNumChunks: 1},
- {numDocsToInsert: 10, expectedNumChunks: 1},
- {numDocsToInsert: 20, expectedNumChunks: 1},
- {numDocsToInsert: 40, expectedNumChunks: 1},
- {numDocsToInsert: 1000, expectedNumChunks: 1},
- ],
- });
-
- // Docs just smaller than half chunk size.
- runCase({
- docSize: 510 * 1024,
- stages: [
- {numDocsToInsert: 10, expectedNumChunks: 6},
- {numDocsToInsert: 10, expectedNumChunks: 10},
- ],
- });
-
- // Docs just larger than half chunk size.
- runCase({
- docSize: 514 * 1024,
- stages: [
- {numDocsToInsert: 10, expectedNumChunks: 10},
- {numDocsToInsert: 10, expectedNumChunks: 18},
- ],
- });
-
- s.stop();
+ assert.gte(numChunks,
+ stage.expectedNumChunks,
+ 'in ' + coll.getFullName() + ' expected ' + stage.expectedNumChunks +
+ ' chunks for stage ' + stageNum + ', but found ' + numChunks + '\nopts: ' +
+ tojson(opts) + '\nchunks:\n' + s.getChunksString(coll.getFullName()));
+ }
+};
+
+// Original problematic case.
+runCase({
+ docSize: 0,
+ stages: [
+ {numDocsToInsert: 20000, expectedNumChunks: 1},
+ {numDocsToInsert: 7, expectedNumChunks: 1},
+ {numDocsToInsert: 1000, expectedNumChunks: 1},
+ ],
+});
+
+// Original problematic case (worse).
+runCase({
+ docSize: 0,
+ stages: [
+ {numDocsToInsert: 90000, expectedNumChunks: 1},
+ {numDocsToInsert: 7, expectedNumChunks: 1},
+ {numDocsToInsert: 1000, expectedNumChunks: 1},
+ ],
+});
+
+// Pathological case #1.
+runCase({
+ docSize: 522,
+ stages: [
+ {numDocsToInsert: 8191, expectedNumChunks: 1},
+ {numDocsToInsert: 2, expectedNumChunks: 1},
+ {numDocsToInsert: 1000, expectedNumChunks: 1},
+ ],
+});
+
+// Pathological case #2.
+runCase({
+ docSize: 522,
+ stages: [
+ {numDocsToInsert: 8192, expectedNumChunks: 1},
+ {numDocsToInsert: 8192, expectedNumChunks: 1},
+ ],
+});
+
+// Lower chunksize to 1MB, and restart the mongod for it to take. We also
+// need to restart mongos for the case of the last-stable suite where the
+// shard is also last-stable.
+assert.writeOK(
+ s.getDB("config").getCollection("settings").update({_id: "chunksize"}, {$set: {value: 1}}, {
+ upsert: true
+ }));
+
+s.restartMongos(0);
+s.restartShardRS(0);
+
+// Original problematic case, scaled down to smaller chunksize.
+runCase({
+ docSize: 0,
+ stages: [
+ {numDocsToInsert: 10000, expectedNumChunks: 1},
+ {numDocsToInsert: 10, expectedNumChunks: 1},
+ {numDocsToInsert: 20, expectedNumChunks: 1},
+ {numDocsToInsert: 40, expectedNumChunks: 1},
+ {numDocsToInsert: 1000, expectedNumChunks: 1},
+ ],
+});
+
+// Docs just smaller than half chunk size.
+runCase({
+ docSize: 510 * 1024,
+ stages: [
+ {numDocsToInsert: 10, expectedNumChunks: 6},
+ {numDocsToInsert: 10, expectedNumChunks: 10},
+ ],
+});
+
+// Docs just larger than half chunk size.
+runCase({
+ docSize: 514 * 1024,
+ stages: [
+ {numDocsToInsert: 10, expectedNumChunks: 10},
+ {numDocsToInsert: 10, expectedNumChunks: 18},
+ ],
+});
+
+s.stop();
})();
diff --git a/jstests/sharding/shard_identity_config_update.js b/jstests/sharding/shard_identity_config_update.js
index b7c3453134f..8c5235b50a4 100644
--- a/jstests/sharding/shard_identity_config_update.js
+++ b/jstests/sharding/shard_identity_config_update.js
@@ -8,98 +8,98 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
+"use strict";
- load('jstests/replsets/rslib.js');
+load('jstests/replsets/rslib.js');
- var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
+var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
- var shardPri = st.rs0.getPrimary();
+var shardPri = st.rs0.getPrimary();
- // Note: Adding new replica set member by hand because of SERVER-24011.
+// Note: Adding new replica set member by hand because of SERVER-24011.
- var newNode = MongoRunner.runMongod(
- {configsvr: '', replSet: st.configRS.name, storageEngine: 'wiredTiger'});
+var newNode =
+ MongoRunner.runMongod({configsvr: '', replSet: st.configRS.name, storageEngine: 'wiredTiger'});
- var replConfig = st.configRS.getReplSetConfigFromNode();
- replConfig.version += 1;
- replConfig.members.push({_id: 3, host: newNode.host});
+var replConfig = st.configRS.getReplSetConfigFromNode();
+replConfig.version += 1;
+replConfig.members.push({_id: 3, host: newNode.host});
- reconfig(st.configRS, replConfig);
+reconfig(st.configRS, replConfig);
- /**
- * Returns true if the shardIdentity document has all the replica set member nodes in the
- * expectedConfigStr.
- */
- var checkConfigStrUpdated = function(conn, expectedConfigStr) {
- var shardIdentity = conn.getDB('admin').system.version.findOne({_id: 'shardIdentity'});
+/**
+ * Returns true if the shardIdentity document has all the replica set member nodes in the
+ * expectedConfigStr.
+ */
+var checkConfigStrUpdated = function(conn, expectedConfigStr) {
+ var shardIdentity = conn.getDB('admin').system.version.findOne({_id: 'shardIdentity'});
- var shardConfigsvrStr = shardIdentity.configsvrConnectionString;
- var shardConfigReplName = shardConfigsvrStr.split('/')[0];
- var expectedReplName = expectedConfigStr.split('/')[0];
+ var shardConfigsvrStr = shardIdentity.configsvrConnectionString;
+ var shardConfigReplName = shardConfigsvrStr.split('/')[0];
+ var expectedReplName = expectedConfigStr.split('/')[0];
- assert.eq(expectedReplName, shardConfigReplName);
+ assert.eq(expectedReplName, shardConfigReplName);
- var expectedHostList = expectedConfigStr.split('/')[1].split(',');
- var shardConfigHostList = shardConfigsvrStr.split('/')[1].split(',');
+ var expectedHostList = expectedConfigStr.split('/')[1].split(',');
+ var shardConfigHostList = shardConfigsvrStr.split('/')[1].split(',');
- if (expectedHostList.length != shardConfigHostList.length) {
- return false;
- }
+ if (expectedHostList.length != shardConfigHostList.length) {
+ return false;
+ }
- for (var x = 0; x < expectedHostList.length; x++) {
- if (shardConfigsvrStr.indexOf(expectedHostList[x]) == -1) {
- return false;
- }
+ for (var x = 0; x < expectedHostList.length; x++) {
+ if (shardConfigsvrStr.indexOf(expectedHostList[x]) == -1) {
+ return false;
}
+ }
- return true;
- };
+ return true;
+};
- var origConfigConnStr = st.configRS.getURL();
- var expectedConfigStr = origConfigConnStr + ',' + newNode.host;
- assert.soon(function() {
- return checkConfigStrUpdated(st.rs0.getPrimary(), expectedConfigStr);
- });
+var origConfigConnStr = st.configRS.getURL();
+var expectedConfigStr = origConfigConnStr + ',' + newNode.host;
+assert.soon(function() {
+ return checkConfigStrUpdated(st.rs0.getPrimary(), expectedConfigStr);
+});
- var secConn = st.rs0.getSecondary();
- secConn.setSlaveOk(true);
- assert.soon(function() {
- return checkConfigStrUpdated(secConn, expectedConfigStr);
- });
+var secConn = st.rs0.getSecondary();
+secConn.setSlaveOk(true);
+assert.soon(function() {
+ return checkConfigStrUpdated(secConn, expectedConfigStr);
+});
- //
- // Remove the newly added member from the config replSet while the shards are down.
- // Check that the shard identity document will be updated with the new replSet connection
- // string when they come back up.
- //
+//
+// Remove the newly added member from the config replSet while the shards are down.
+// Check that the shard identity document will be updated with the new replSet connection
+// string when they come back up.
+//
- st.rs0.stop(0);
- st.rs0.stop(1);
+st.rs0.stop(0);
+st.rs0.stop(1);
- MongoRunner.stopMongod(newNode);
+MongoRunner.stopMongod(newNode);
- replConfig = st.configRS.getReplSetConfigFromNode();
- replConfig.version += 1;
- replConfig.members.pop();
+replConfig = st.configRS.getReplSetConfigFromNode();
+replConfig.version += 1;
+replConfig.members.pop();
- reconfig(st.configRS, replConfig);
+reconfig(st.configRS, replConfig);
- st.rs0.restart(0, {shardsvr: ''});
- st.rs0.restart(1, {shardsvr: ''});
+st.rs0.restart(0, {shardsvr: ''});
+st.rs0.restart(1, {shardsvr: ''});
- st.rs0.waitForMaster();
- st.rs0.awaitSecondaryNodes();
+st.rs0.waitForMaster();
+st.rs0.awaitSecondaryNodes();
- assert.soon(function() {
- return checkConfigStrUpdated(st.rs0.getPrimary(), origConfigConnStr);
- });
+assert.soon(function() {
+ return checkConfigStrUpdated(st.rs0.getPrimary(), origConfigConnStr);
+});
- secConn = st.rs0.getSecondary();
- secConn.setSlaveOk(true);
- assert.soon(function() {
- return checkConfigStrUpdated(secConn, origConfigConnStr);
- });
+secConn = st.rs0.getSecondary();
+secConn.setSlaveOk(true);
+assert.soon(function() {
+ return checkConfigStrUpdated(secConn, origConfigConnStr);
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/shard_identity_rollback.js b/jstests/sharding/shard_identity_rollback.js
index e799a79600d..b0a3f9b891c 100644
--- a/jstests/sharding/shard_identity_rollback.js
+++ b/jstests/sharding/shard_identity_rollback.js
@@ -5,131 +5,129 @@
*/
(function() {
- "use strict";
-
- load('jstests/libs/write_concern_util.js');
-
- var st = new ShardingTest({shards: 1});
-
- var replTest = new ReplSetTest({nodes: 3});
- var nodes = replTest.startSet({shardsvr: ''});
- replTest.initiate();
-
- var priConn = replTest.getPrimary();
- var secondaries = replTest.getSecondaries();
- var configConnStr = st.configRS.getURL();
-
- // Shards start in FCV 4.0 until a config server reaches out to them. This causes storage to
- // shutdown with 4.0 compatible files, requiring rollback via refetch.
- priConn.adminCommand({setFeatureCompatibilityVersion: "4.0"});
-
- // Wait for the secondaries to have the latest oplog entries before stopping the fetcher to
- // avoid the situation where one of the secondaries will not have an overlapping oplog with
- // the other nodes once the primary is killed.
- replTest.awaitSecondaryNodes();
-
- replTest.awaitReplication();
-
- stopServerReplication(secondaries);
-
- jsTest.log("inserting shardIdentity document to primary that shouldn't replicate");
-
- var shardIdentityDoc = {
- _id: 'shardIdentity',
- configsvrConnectionString: configConnStr,
- shardName: 'newShard',
- clusterId: ObjectId()
- };
-
- assert.writeOK(priConn.getDB('admin').system.version.update(
- {_id: 'shardIdentity'}, shardIdentityDoc, {upsert: true}));
-
- // Ensure sharding state on the primary was initialized
- var res = priConn.getDB('admin').runCommand({shardingState: 1});
- assert(res.enabled, tojson(res));
- assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
- assert.eq(shardIdentityDoc.shardName, res.shardName);
- assert.eq(shardIdentityDoc.clusterId, res.clusterId);
-
- // Ensure sharding state on the secondaries was *not* initialized
- secondaries.forEach(function(secondary) {
- secondary.setSlaveOk(true);
- res = secondary.getDB('admin').runCommand({shardingState: 1});
- assert(!res.enabled, tojson(res));
- });
-
- // Ensure manually deleting the shardIdentity document is not allowed.
- assert.writeErrorWithCode(priConn.getDB('admin').system.version.remove({_id: 'shardIdentity'}),
- 40070);
-
- jsTest.log("shutting down primary");
- // Shut down the primary so a secondary gets elected that definitely won't have replicated the
- // shardIdentity insert, which should trigger a rollback on the original primary when it comes
- // back online.
- replTest.stop(priConn);
-
- // Disable the fail point so that the elected node can exit drain mode and finish becoming
- // primary.
- restartServerReplication(secondaries);
-
- // Wait for a new healthy primary
- var newPriConn = replTest.getPrimary();
- assert.neq(priConn, newPriConn);
- assert.writeOK(newPriConn.getDB('test').foo.insert({a: 1}, {writeConcern: {w: 'majority'}}));
-
- // Restart the original primary so it triggers a rollback of the shardIdentity insert.
- jsTest.log("Restarting original primary");
- priConn = replTest.restart(priConn);
-
- // Wait until we cannot create a connection to the former primary, which indicates that it must
- // have shut itself down during the rollback.
- jsTest.log("Waiting for original primary to rollback and shut down");
- assert.soon(
- function() {
- try {
- var newConn = new Mongo(priConn.host);
- return false;
- } catch (x) {
- return true;
- }
- },
- function() {
- var oldPriOplog = priConn.getDB('local').oplog.rs.find().sort({$natural: -1}).toArray();
- var newPriOplog =
- newPriConn.getDB('local').oplog.rs.find().sort({$natural: -1}).toArray();
- return "timed out waiting for original primary to shut down after rollback. " +
- "Old primary oplog: " + tojson(oldPriOplog) + "; new primary oplog: " +
- tojson(newPriOplog);
- },
- 90000);
-
- // Restart the original primary again. This time, the shardIdentity document should already be
- // rolled back, so there shouldn't be any rollback and the node should stay online.
- jsTest.log(
- "Restarting original primary a second time and waiting for it to successfully become " +
- "secondary");
- try {
- // Join() with the crashed mongod and ignore its bad exit status.
- MongoRunner.stopMongod(priConn);
- } catch (e) {
- // expected
- }
- priConn = replTest.restart(priConn, {shardsvr: ''});
- priConn.setSlaveOk();
-
- // Wait for the old primary to replicate the document that was written to the new primary while
- // it was shut down.
- assert.soonNoExcept(function() {
- return priConn.getDB('test').foo.findOne();
- });
-
- // Ensure that there's no sharding state on the restarted original primary, since the
- // shardIdentity doc should have been rolled back.
- res = priConn.getDB('admin').runCommand({shardingState: 1});
- assert(!res.enabled, tojson(res));
- assert.eq(null, priConn.getDB('admin').system.version.findOne({_id: 'shardIdentity'}));
+"use strict";
+
+load('jstests/libs/write_concern_util.js');
+
+var st = new ShardingTest({shards: 1});
+
+var replTest = new ReplSetTest({nodes: 3});
+var nodes = replTest.startSet({shardsvr: ''});
+replTest.initiate();
+
+var priConn = replTest.getPrimary();
+var secondaries = replTest.getSecondaries();
+var configConnStr = st.configRS.getURL();
+
+// Shards start in FCV 4.0 until a config server reaches out to them. This causes storage to
+// shutdown with 4.0 compatible files, requiring rollback via refetch.
+priConn.adminCommand({setFeatureCompatibilityVersion: "4.0"});
+
+// Wait for the secondaries to have the latest oplog entries before stopping the fetcher to
+// avoid the situation where one of the secondaries will not have an overlapping oplog with
+// the other nodes once the primary is killed.
+replTest.awaitSecondaryNodes();
- replTest.stopSet();
+replTest.awaitReplication();
- st.stop();
+stopServerReplication(secondaries);
+
+jsTest.log("inserting shardIdentity document to primary that shouldn't replicate");
+
+var shardIdentityDoc = {
+ _id: 'shardIdentity',
+ configsvrConnectionString: configConnStr,
+ shardName: 'newShard',
+ clusterId: ObjectId()
+};
+
+assert.writeOK(priConn.getDB('admin').system.version.update(
+ {_id: 'shardIdentity'}, shardIdentityDoc, {upsert: true}));
+
+// Ensure sharding state on the primary was initialized
+var res = priConn.getDB('admin').runCommand({shardingState: 1});
+assert(res.enabled, tojson(res));
+assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
+assert.eq(shardIdentityDoc.shardName, res.shardName);
+assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+
+// Ensure sharding state on the secondaries was *not* initialized
+secondaries.forEach(function(secondary) {
+ secondary.setSlaveOk(true);
+ res = secondary.getDB('admin').runCommand({shardingState: 1});
+ assert(!res.enabled, tojson(res));
+});
+
+// Ensure manually deleting the shardIdentity document is not allowed.
+assert.writeErrorWithCode(priConn.getDB('admin').system.version.remove({_id: 'shardIdentity'}),
+ 40070);
+
+jsTest.log("shutting down primary");
+// Shut down the primary so a secondary gets elected that definitely won't have replicated the
+// shardIdentity insert, which should trigger a rollback on the original primary when it comes
+// back online.
+replTest.stop(priConn);
+
+// Disable the fail point so that the elected node can exit drain mode and finish becoming
+// primary.
+restartServerReplication(secondaries);
+
+// Wait for a new healthy primary
+var newPriConn = replTest.getPrimary();
+assert.neq(priConn, newPriConn);
+assert.writeOK(newPriConn.getDB('test').foo.insert({a: 1}, {writeConcern: {w: 'majority'}}));
+
+// Restart the original primary so it triggers a rollback of the shardIdentity insert.
+jsTest.log("Restarting original primary");
+priConn = replTest.restart(priConn);
+
+// Wait until we cannot create a connection to the former primary, which indicates that it must
+// have shut itself down during the rollback.
+jsTest.log("Waiting for original primary to rollback and shut down");
+assert.soon(
+ function() {
+ try {
+ var newConn = new Mongo(priConn.host);
+ return false;
+ } catch (x) {
+ return true;
+ }
+ },
+ function() {
+ var oldPriOplog = priConn.getDB('local').oplog.rs.find().sort({$natural: -1}).toArray();
+ var newPriOplog = newPriConn.getDB('local').oplog.rs.find().sort({$natural: -1}).toArray();
+ return "timed out waiting for original primary to shut down after rollback. " +
+ "Old primary oplog: " + tojson(oldPriOplog) +
+ "; new primary oplog: " + tojson(newPriOplog);
+ },
+ 90000);
+
+// Restart the original primary again. This time, the shardIdentity document should already be
+// rolled back, so there shouldn't be any rollback and the node should stay online.
+jsTest.log("Restarting original primary a second time and waiting for it to successfully become " +
+ "secondary");
+try {
+ // Join() with the crashed mongod and ignore its bad exit status.
+ MongoRunner.stopMongod(priConn);
+} catch (e) {
+ // expected
+}
+priConn = replTest.restart(priConn, {shardsvr: ''});
+priConn.setSlaveOk();
+
+// Wait for the old primary to replicate the document that was written to the new primary while
+// it was shut down.
+assert.soonNoExcept(function() {
+ return priConn.getDB('test').foo.findOne();
+});
+
+// Ensure that there's no sharding state on the restarted original primary, since the
+// shardIdentity doc should have been rolled back.
+res = priConn.getDB('admin').runCommand({shardingState: 1});
+assert(!res.enabled, tojson(res));
+assert.eq(null, priConn.getDB('admin').system.version.findOne({_id: 'shardIdentity'}));
+
+replTest.stopSet();
+
+st.stop();
})();
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
index c87fc13478f..a068da936fb 100644
--- a/jstests/sharding/shard_insert_getlasterror_w2.js
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -6,88 +6,83 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- var numDocs = 2000;
- var baseName = "shard_insert_getlasterror_w2";
- var testDBName = baseName;
- var testCollName = 'coll';
- var replNodes = 3;
-
- // ~1KB string
- var textString = '';
- for (var i = 0; i < 40; i++) {
- textString += 'abcdefghijklmnopqrstuvwxyz';
+"use strict";
+
+var numDocs = 2000;
+var baseName = "shard_insert_getlasterror_w2";
+var testDBName = baseName;
+var testCollName = 'coll';
+var replNodes = 3;
+
+// ~1KB string
+var textString = '';
+for (var i = 0; i < 40; i++) {
+ textString += 'abcdefghijklmnopqrstuvwxyz';
+}
+
+// Spin up a sharded cluster, but do not add the shards
+var shardingTestConfig =
+ {name: baseName, mongos: 1, shards: 1, rs: {nodes: replNodes}, other: {manualAddShard: true}};
+var shardingTest = new ShardingTest(shardingTestConfig);
+
+// Get connection to the individual shard
+var replSet1 = shardingTest.rs0;
+
+// Add data to it
+var testDBReplSet1 = replSet1.getPrimary().getDB(testDBName);
+var bulk = testDBReplSet1.foo.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ bulk.insert({x: i, text: textString});
+}
+assert.writeOK(bulk.execute());
+
+// Get connection to mongos for the cluster
+var mongosConn = shardingTest.s;
+var testDB = mongosConn.getDB(testDBName);
+
+// Add replSet1 as only shard
+assert.commandWorked(mongosConn.adminCommand({addshard: replSet1.getURL()}));
+
+// Enable sharding on test db and its collection foo
+assert.commandWorked(mongosConn.getDB('admin').runCommand({enablesharding: testDBName}));
+testDB[testCollName].ensureIndex({x: 1});
+assert.commandWorked(mongosConn.getDB('admin').runCommand(
+ {shardcollection: testDBName + '.' + testCollName, key: {x: 1}}));
+
+// Test case where GLE should return an error
+assert.writeOK(testDB.foo.insert({_id: 'a', x: 1}));
+assert.writeError(testDB.foo.insert({_id: 'a', x: 1}, {writeConcern: {w: 2, wtimeout: 30000}}));
+
+// Add more data
+bulk = testDB.foo.initializeUnorderedBulkOp();
+for (var i = numDocs; i < 2 * numDocs; i++) {
+ bulk.insert({x: i, text: textString});
+}
+assert.writeOK(bulk.execute({w: replNodes, wtimeout: 30000}));
+
+// Take down two nodes and make sure slaveOk reads still work
+var primary = replSet1._master;
+var secondary1 = replSet1._slaves[0];
+var secondary2 = replSet1._slaves[1];
+replSet1.stop(secondary1);
+replSet1.stop(secondary2);
+replSet1.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+testDB.getMongo().adminCommand({setParameter: 1, logLevel: 1});
+testDB.getMongo().setSlaveOk();
+print("trying some queries");
+assert.soon(function() {
+ try {
+ testDB.foo.find().next();
+ } catch (e) {
+ print(e);
+ return false;
}
+ return true;
+}, "Queries took too long to complete correctly.", 2 * 60 * 1000);
- // Spin up a sharded cluster, but do not add the shards
- var shardingTestConfig = {
- name: baseName,
- mongos: 1,
- shards: 1,
- rs: {nodes: replNodes},
- other: {manualAddShard: true}
- };
- var shardingTest = new ShardingTest(shardingTestConfig);
-
- // Get connection to the individual shard
- var replSet1 = shardingTest.rs0;
-
- // Add data to it
- var testDBReplSet1 = replSet1.getPrimary().getDB(testDBName);
- var bulk = testDBReplSet1.foo.initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({x: i, text: textString});
- }
- assert.writeOK(bulk.execute());
-
- // Get connection to mongos for the cluster
- var mongosConn = shardingTest.s;
- var testDB = mongosConn.getDB(testDBName);
-
- // Add replSet1 as only shard
- assert.commandWorked(mongosConn.adminCommand({addshard: replSet1.getURL()}));
+// Shutdown cluster
+shardingTest.stop();
- // Enable sharding on test db and its collection foo
- assert.commandWorked(mongosConn.getDB('admin').runCommand({enablesharding: testDBName}));
- testDB[testCollName].ensureIndex({x: 1});
- assert.commandWorked(mongosConn.getDB('admin').runCommand(
- {shardcollection: testDBName + '.' + testCollName, key: {x: 1}}));
-
- // Test case where GLE should return an error
- assert.writeOK(testDB.foo.insert({_id: 'a', x: 1}));
- assert.writeError(testDB.foo.insert({_id: 'a', x: 1}, {writeConcern: {w: 2, wtimeout: 30000}}));
-
- // Add more data
- bulk = testDB.foo.initializeUnorderedBulkOp();
- for (var i = numDocs; i < 2 * numDocs; i++) {
- bulk.insert({x: i, text: textString});
- }
- assert.writeOK(bulk.execute({w: replNodes, wtimeout: 30000}));
-
- // Take down two nodes and make sure slaveOk reads still work
- var primary = replSet1._master;
- var secondary1 = replSet1._slaves[0];
- var secondary2 = replSet1._slaves[1];
- replSet1.stop(secondary1);
- replSet1.stop(secondary2);
- replSet1.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- testDB.getMongo().adminCommand({setParameter: 1, logLevel: 1});
- testDB.getMongo().setSlaveOk();
- print("trying some queries");
- assert.soon(function() {
- try {
- testDB.foo.find().next();
- } catch (e) {
- print(e);
- return false;
- }
- return true;
- }, "Queries took too long to complete correctly.", 2 * 60 * 1000);
-
- // Shutdown cluster
- shardingTest.stop();
-
- print('shard_insert_getlasterror_w2.js SUCCESS');
+print('shard_insert_getlasterror_w2.js SUCCESS');
})();
diff --git a/jstests/sharding/shard_keycount.js b/jstests/sharding/shard_keycount.js
index 78ac1c3fb6f..3076dde5b7e 100644
--- a/jstests/sharding/shard_keycount.js
+++ b/jstests/sharding/shard_keycount.js
@@ -1,43 +1,43 @@
// Tests splitting a chunk twice
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({name: "shard_keycount", shards: 2, mongos: 1, other: {chunkSize: 1}});
+var s = new ShardingTest({name: "shard_keycount", shards: 2, mongos: 1, other: {chunkSize: 1}});
- var dbName = "test";
- var collName = "foo";
- var ns = dbName + "." + collName;
+var dbName = "test";
+var collName = "foo";
+var ns = dbName + "." + collName;
- var db = s.getDB(dbName);
+var db = s.getDB(dbName);
- for (var i = 0; i < 10; i++) {
- db.foo.insert({_id: i});
- }
+for (var i = 0; i < 10; i++) {
+ db.foo.insert({_id: i});
+}
- // Enable sharding on DB
- assert.commandWorked(s.s0.adminCommand({enablesharding: dbName}));
- s.ensurePrimaryShard(dbName, s.shard1.shardName);
+// Enable sharding on DB
+assert.commandWorked(s.s0.adminCommand({enablesharding: dbName}));
+s.ensurePrimaryShard(dbName, s.shard1.shardName);
- // Enable sharding on collection
- assert.commandWorked(s.s0.adminCommand({shardcollection: ns, key: {_id: 1}}));
+// Enable sharding on collection
+assert.commandWorked(s.s0.adminCommand({shardcollection: ns, key: {_id: 1}}));
- // Split into two chunks
- assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
+// Split into two chunks
+assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
- var coll = db.getCollection(collName);
+var coll = db.getCollection(collName);
- // Split chunk again
- assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
+// Split chunk again
+assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
- assert.writeOK(coll.update({_id: 3}, {_id: 3}));
+assert.writeOK(coll.update({_id: 3}, {_id: 3}));
- // Split chunk again
- assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
+// Split chunk again
+assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
- assert.writeOK(coll.update({_id: 3}, {_id: 3}));
+assert.writeOK(coll.update({_id: 3}, {_id: 3}));
- // Split chunk again
- assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
+// Split chunk again
+assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/shard_kill_and_pooling.js b/jstests/sharding/shard_kill_and_pooling.js
index 1f21d823f26..13715d62ddc 100644
--- a/jstests/sharding/shard_kill_and_pooling.js
+++ b/jstests/sharding/shard_kill_and_pooling.js
@@ -11,76 +11,75 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
// Run through the same test twice, once with a hard -9 kill, once with a regular shutdown
(function() {
- 'use strict';
+'use strict';
- for (var test = 0; test < 2; test++) {
- var killWith = (test == 0 ? 15 : 9);
+for (var test = 0; test < 2; test++) {
+ var killWith = (test == 0 ? 15 : 9);
- var st = new ShardingTest({shards: 1});
+ var st = new ShardingTest({shards: 1});
- var mongos = st.s0;
- var coll = mongos.getCollection("foo.bar");
- var db = coll.getDB();
+ var mongos = st.s0;
+ var coll = mongos.getCollection("foo.bar");
+ var db = coll.getDB();
- assert.writeOK(coll.insert({hello: "world"}));
+ assert.writeOK(coll.insert({hello: "world"}));
- jsTest.log("Creating new connections...");
+ jsTest.log("Creating new connections...");
- // Create a bunch of connections to the primary node through mongos.
- // jstest ->(x10)-> mongos ->(x10)-> primary
- var conns = [];
- for (var i = 0; i < 50; i++) {
- conns.push(new Mongo(mongos.host));
- assert.neq(null, conns[i].getCollection(coll + "").findOne());
- }
+ // Create a bunch of connections to the primary node through mongos.
+ // jstest ->(x10)-> mongos ->(x10)-> primary
+ var conns = [];
+ for (var i = 0; i < 50; i++) {
+ conns.push(new Mongo(mongos.host));
+ assert.neq(null, conns[i].getCollection(coll + "").findOne());
+ }
- jsTest.log("Returning the connections back to the pool.");
+ jsTest.log("Returning the connections back to the pool.");
- for (var i = 0; i < conns.length; i++) {
- conns[i].close();
- }
+ for (var i = 0; i < conns.length; i++) {
+ conns[i].close();
+ }
- // Don't make test fragile by linking to format of shardConnPoolStats, but this is
- // useful if
- // something goes wrong.
- var connPoolStats = mongos.getDB("admin").runCommand({shardConnPoolStats: 1});
- printjson(connPoolStats);
+ // Don't make test fragile by linking to format of shardConnPoolStats, but this is
+ // useful if
+ // something goes wrong.
+ var connPoolStats = mongos.getDB("admin").runCommand({shardConnPoolStats: 1});
+ printjson(connPoolStats);
- jsTest.log("Shutdown shard " + (killWith == 9 ? "uncleanly" : "") + "...");
+ jsTest.log("Shutdown shard " + (killWith == 9 ? "uncleanly" : "") + "...");
- // Flush writes to disk, since sometimes we're killing uncleanly
- assert(mongos.getDB("admin").runCommand({fsync: 1}).ok);
+ // Flush writes to disk, since sometimes we're killing uncleanly
+ assert(mongos.getDB("admin").runCommand({fsync: 1}).ok);
- var exitCode = killWith === 9 ? MongoRunner.EXIT_SIGKILL : MongoRunner.EXIT_CLEAN;
+ var exitCode = killWith === 9 ? MongoRunner.EXIT_SIGKILL : MongoRunner.EXIT_CLEAN;
- st.rs0.stopSet(killWith, true, {allowedExitCode: exitCode});
+ st.rs0.stopSet(killWith, true, {allowedExitCode: exitCode});
- jsTest.log("Restart shard...");
- st.rs0.startSet({forceLock: true}, true);
+ jsTest.log("Restart shard...");
+ st.rs0.startSet({forceLock: true}, true);
- jsTest.log("Waiting for socket timeout time...");
+ jsTest.log("Waiting for socket timeout time...");
- // Need to wait longer than the socket polling time.
- sleep(2 * 5000);
+ // Need to wait longer than the socket polling time.
+ sleep(2 * 5000);
- jsTest.log("Run queries using new connections.");
+ jsTest.log("Run queries using new connections.");
- var numErrors = 0;
- for (var i = 0; i < conns.length; i++) {
- var newConn = new Mongo(mongos.host);
- try {
- assert.neq(null, newConn.getCollection("foo.bar").findOne());
- } catch (e) {
- printjson(e);
- numErrors++;
- }
+ var numErrors = 0;
+ for (var i = 0; i < conns.length; i++) {
+ var newConn = new Mongo(mongos.host);
+ try {
+ assert.neq(null, newConn.getCollection("foo.bar").findOne());
+ } catch (e) {
+ printjson(e);
+ numErrors++;
}
+ }
- assert.eq(0, numErrors);
-
- st.stop();
+ assert.eq(0, numErrors);
- jsTest.log("DONE test " + test);
- }
+ st.stop();
+ jsTest.log("DONE test " + test);
+}
})();
diff --git a/jstests/sharding/shard_targeting.js b/jstests/sharding/shard_targeting.js
index 62d0f3fa88e..224b3b3ae14 100644
--- a/jstests/sharding/shard_targeting.js
+++ b/jstests/sharding/shard_targeting.js
@@ -4,64 +4,63 @@
// BSONObj itself as the query to target shards, which could return wrong
// shards if the shard key happens to be one of the fields in the command object.
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
+var s = new ShardingTest({shards: 2});
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
- var db = s.getDB("test");
- var res;
+var db = s.getDB("test");
+var res;
- //
- // Target count command
- //
+//
+// Target count command
+//
- // Shard key is the same with command name.
- s.shardColl("foo", {count: 1}, {count: ""});
+// Shard key is the same with command name.
+s.shardColl("foo", {count: 1}, {count: ""});
- for (var i = 0; i < 50; i++) {
- db.foo.insert({count: i}); // chunk [MinKey, ""), including numbers
- db.foo.insert({count: "" + i}); // chunk ["", MaxKey]
- }
+for (var i = 0; i < 50; i++) {
+ db.foo.insert({count: i}); // chunk [MinKey, ""), including numbers
+ db.foo.insert({count: "" + i}); // chunk ["", MaxKey]
+}
- var theOtherShard = s.getOther(s.getPrimaryShard("test")).name;
- s.printShardingStatus();
+var theOtherShard = s.getOther(s.getPrimaryShard("test")).name;
+s.printShardingStatus();
- // Count documents on both shards
+// Count documents on both shards
- // "count" commnad with "query" option { }.
- assert.eq(db.foo.count(), 100);
- // Optional "query" option is not given.
- res = db.foo.runCommand("count");
- assert.eq(res.n, 100);
+// "count" commnad with "query" option { }.
+assert.eq(db.foo.count(), 100);
+// Optional "query" option is not given.
+res = db.foo.runCommand("count");
+assert.eq(res.n, 100);
- //
- // Target mapreduce command
- //
- db.foo.drop();
+//
+// Target mapreduce command
+//
+db.foo.drop();
- // Shard key is the same with command name.
- s.shardColl("foo", {mapReduce: 1}, {mapReduce: ""});
+// Shard key is the same with command name.
+s.shardColl("foo", {mapReduce: 1}, {mapReduce: ""});
- for (var i = 0; i < 50; i++) {
- db.foo.insert({mapReduce: i}); // to the chunk including number
- db.foo.insert({mapReduce: "" + i}); // to the chunk including string
- }
+for (var i = 0; i < 50; i++) {
+ db.foo.insert({mapReduce: i}); // to the chunk including number
+ db.foo.insert({mapReduce: "" + i}); // to the chunk including string
+}
- s.printShardingStatus();
+s.printShardingStatus();
- function m() {
- emit("total", 1);
- }
- function r(k, v) {
- return Array.sum(v);
- }
- res = db.foo.runCommand({mapReduce: "foo", map: m, reduce: r, out: {inline: 1}});
+function m() {
+ emit("total", 1);
+}
+function r(k, v) {
+ return Array.sum(v);
+}
+res = db.foo.runCommand({mapReduce: "foo", map: m, reduce: r, out: {inline: 1}});
- // Count documents on both shards
- assert.eq(res.results[0].value, 100);
-
- s.stop();
+// Count documents on both shards
+assert.eq(res.results[0].value, 100);
+s.stop();
})();
diff --git a/jstests/sharding/shard_with_special_db_names.js b/jstests/sharding/shard_with_special_db_names.js
index 75f0ea19bb5..b96b6bf3f5c 100644
--- a/jstests/sharding/shard_with_special_db_names.js
+++ b/jstests/sharding/shard_with_special_db_names.js
@@ -1,28 +1,28 @@
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2, mongos: 2});
- var specialDB = "[a-z]+";
- var specialNS = specialDB + ".special";
+var s = new ShardingTest({shards: 2, mongos: 2});
+var specialDB = "[a-z]+";
+var specialNS = specialDB + ".special";
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.data", key: {num: 1}}));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.data", key: {num: 1}}));
- // Test that the database will not complain "cannot have 2 database names that differs on case"
- assert.commandWorked(s.s0.adminCommand({enablesharding: specialDB}));
- s.ensurePrimaryShard(specialDB, s.shard0.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: specialNS, key: {num: 1}}));
+// Test that the database will not complain "cannot have 2 database names that differs on case"
+assert.commandWorked(s.s0.adminCommand({enablesharding: specialDB}));
+s.ensurePrimaryShard(specialDB, s.shard0.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: specialNS, key: {num: 1}}));
- var exists = s.getDB("config").collections.find({_id: specialNS}).itcount();
- assert.eq(exists, 1);
+var exists = s.getDB("config").collections.find({_id: specialNS}).itcount();
+assert.eq(exists, 1);
- // Test that drop database properly cleans up config
- s.getDB(specialDB).dropDatabase();
+// Test that drop database properly cleans up config
+s.getDB(specialDB).dropDatabase();
- var cursor = s.getDB("config").collections.find({_id: specialNS});
- assert(cursor.next()["dropped"]);
- assert(!cursor.hasNext());
+var cursor = s.getDB("config").collections.find({_id: specialNS});
+assert(cursor.next()["dropped"]);
+assert(!cursor.hasNext());
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/sharded_limit_batchsize.js b/jstests/sharding/sharded_limit_batchsize.js
index 7d47db46e7d..e7f1f589ca9 100644
--- a/jstests/sharding/sharded_limit_batchsize.js
+++ b/jstests/sharding/sharded_limit_batchsize.js
@@ -2,151 +2,150 @@
// of limit and batchSize with sort return the correct results, and do not issue
// unnecessary getmores (see SERVER-14299).
(function() {
- 'use strict';
-
- /**
- * Test the correctness of queries with sort and batchSize on a sharded cluster,
- * running the queries against collection 'coll'.
- */
- function testBatchSize(coll) {
- // Roll the cursor over the second batch and make sure it's correctly sized
- assert.eq(20, coll.find().sort({x: 1}).batchSize(3).itcount());
- assert.eq(15, coll.find().sort({x: 1}).batchSize(3).skip(5).itcount());
- }
-
- /**
- * Test the correctness of queries with sort and limit on a sharded cluster,
- * running the queries against collection 'coll'.
- */
- function testLimit(coll) {
- var cursor = coll.find().sort({x: 1}).limit(3);
- assert.eq(-10, cursor.next()["_id"]);
- assert.eq(-9, cursor.next()["_id"]);
- assert.eq(-8, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- assert.eq(13, coll.find().sort({x: 1}).limit(13).itcount());
-
- cursor = coll.find().sort({x: 1}).skip(5).limit(2);
- assert.eq(-5, cursor.next()["_id"]);
- assert.eq(-4, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- cursor = coll.find().sort({x: 1}).skip(9).limit(2);
- assert.eq(-1, cursor.next()["_id"]);
- assert.eq(1, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- cursor = coll.find().sort({x: 1}).skip(11).limit(2);
- assert.eq(2, cursor.next()["_id"]);
- assert.eq(3, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- // Ensure that in the limit 1 case, which is special when in legacy readMode, the server
- // does not leave a cursor open.
- var openCursorsBefore =
- assert.commandWorked(coll.getDB().serverStatus()).metrics.cursor.open.total;
- cursor = coll.find().sort({x: 1}).limit(1);
- assert(cursor.hasNext());
- assert.eq(-10, cursor.next()["_id"]);
- var openCursorsAfter =
- assert.commandWorked(coll.getDB().serverStatus()).metrics.cursor.open.total;
- assert.eq(openCursorsBefore, openCursorsAfter);
- }
-
- /**
- * Test correctness of queries run with singleBatch=true.
- */
- function testSingleBatch(coll, numShards) {
- // Ensure that singleBatch queries that require multiple batches from individual shards
- // return complete results.
- var batchSize = 5;
- var res = assert.commandWorked(coll.getDB().runCommand({
- find: coll.getName(),
- filter: {x: {$lte: 10}},
- skip: numShards * batchSize,
- singleBatch: true,
- batchSize: batchSize
- }));
- assert.eq(batchSize, res.cursor.firstBatch.length);
- assert.eq(0, res.cursor.id);
- var cursor = coll.find().skip(numShards * batchSize).limit(-1 * batchSize);
- assert.eq(batchSize, cursor.itcount());
- cursor = coll.find().skip(numShards * batchSize).batchSize(-1 * batchSize);
- assert.eq(batchSize, cursor.itcount());
- }
-
- //
- // Create a two-shard cluster. Have an unsharded collection and a sharded collection.
- //
-
- var st = new ShardingTest(
- {shards: 2, other: {shardOptions: {setParameter: "enableTestCommands=1"}}});
-
- var db = st.s.getDB("test");
- var shardedCol = db.getCollection("sharded_limit_batchsize");
- var unshardedCol = db.getCollection("unsharded_limit_batchsize");
- shardedCol.drop();
- unshardedCol.drop();
-
- // Enable sharding and pre-split the sharded collection.
- assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
- st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
- db.adminCommand({shardCollection: shardedCol.getFullName(), key: {_id: 1}});
- assert.commandWorked(db.adminCommand({split: shardedCol.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(db.adminCommand(
- {moveChunk: shardedCol.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
-
- // Write 10 documents to shard 0, and 10 documents to shard 1 inside the sharded collection.
- // Write 20 documents which all go to the primary shard in the unsharded collection.
- for (var i = 1; i <= 10; ++i) {
- // These go to shard 1.
- assert.writeOK(shardedCol.insert({_id: i, x: i}));
-
- // These go to shard 0.
- assert.writeOK(shardedCol.insert({_id: -i, x: -i}));
-
- // These go to shard 0 inside the non-sharded collection.
- assert.writeOK(unshardedCol.insert({_id: i, x: i}));
- assert.writeOK(unshardedCol.insert({_id: -i, x: -i}));
- }
-
- //
- // Run tests for singleBatch queries.
- //
-
- testSingleBatch(shardedCol, 2);
- testSingleBatch(unshardedCol, 1);
-
- //
- // Run tests for batch size. These should issue getmores.
- //
-
- jsTest.log("Running batchSize tests against sharded collection.");
- st.shard0.adminCommand({setParameter: 1, logLevel: 1});
- testBatchSize(shardedCol);
- st.shard0.adminCommand({setParameter: 1, logLevel: 0});
-
- jsTest.log("Running batchSize tests against non-sharded collection.");
- testBatchSize(unshardedCol);
-
- //
- // Run tests for limit. These should *not* issue getmores. We confirm this
- // by enabling the getmore failpoint on the shards.
- //
-
- assert.commandWorked(st.shard0.getDB("test").adminCommand(
- {configureFailPoint: "failReceivedGetmore", mode: "alwaysOn"}));
-
- assert.commandWorked(st.shard1.getDB("test").adminCommand(
- {configureFailPoint: "failReceivedGetmore", mode: "alwaysOn"}));
-
- jsTest.log("Running limit tests against sharded collection.");
- testLimit(shardedCol, st.shard0);
-
- jsTest.log("Running limit tests against non-sharded collection.");
- testLimit(unshardedCol, st.shard0);
-
- st.stop();
-
+'use strict';
+
+/**
+ * Test the correctness of queries with sort and batchSize on a sharded cluster,
+ * running the queries against collection 'coll'.
+ */
+function testBatchSize(coll) {
+ // Roll the cursor over the second batch and make sure it's correctly sized
+ assert.eq(20, coll.find().sort({x: 1}).batchSize(3).itcount());
+ assert.eq(15, coll.find().sort({x: 1}).batchSize(3).skip(5).itcount());
+}
+
+/**
+ * Test the correctness of queries with sort and limit on a sharded cluster,
+ * running the queries against collection 'coll'.
+ */
+function testLimit(coll) {
+ var cursor = coll.find().sort({x: 1}).limit(3);
+ assert.eq(-10, cursor.next()["_id"]);
+ assert.eq(-9, cursor.next()["_id"]);
+ assert.eq(-8, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ assert.eq(13, coll.find().sort({x: 1}).limit(13).itcount());
+
+ cursor = coll.find().sort({x: 1}).skip(5).limit(2);
+ assert.eq(-5, cursor.next()["_id"]);
+ assert.eq(-4, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ cursor = coll.find().sort({x: 1}).skip(9).limit(2);
+ assert.eq(-1, cursor.next()["_id"]);
+ assert.eq(1, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ cursor = coll.find().sort({x: 1}).skip(11).limit(2);
+ assert.eq(2, cursor.next()["_id"]);
+ assert.eq(3, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ // Ensure that in the limit 1 case, which is special when in legacy readMode, the server
+ // does not leave a cursor open.
+ var openCursorsBefore =
+ assert.commandWorked(coll.getDB().serverStatus()).metrics.cursor.open.total;
+ cursor = coll.find().sort({x: 1}).limit(1);
+ assert(cursor.hasNext());
+ assert.eq(-10, cursor.next()["_id"]);
+ var openCursorsAfter =
+ assert.commandWorked(coll.getDB().serverStatus()).metrics.cursor.open.total;
+ assert.eq(openCursorsBefore, openCursorsAfter);
+}
+
+/**
+ * Test correctness of queries run with singleBatch=true.
+ */
+function testSingleBatch(coll, numShards) {
+ // Ensure that singleBatch queries that require multiple batches from individual shards
+ // return complete results.
+ var batchSize = 5;
+ var res = assert.commandWorked(coll.getDB().runCommand({
+ find: coll.getName(),
+ filter: {x: {$lte: 10}},
+ skip: numShards * batchSize,
+ singleBatch: true,
+ batchSize: batchSize
+ }));
+ assert.eq(batchSize, res.cursor.firstBatch.length);
+ assert.eq(0, res.cursor.id);
+ var cursor = coll.find().skip(numShards * batchSize).limit(-1 * batchSize);
+ assert.eq(batchSize, cursor.itcount());
+ cursor = coll.find().skip(numShards * batchSize).batchSize(-1 * batchSize);
+ assert.eq(batchSize, cursor.itcount());
+}
+
+//
+// Create a two-shard cluster. Have an unsharded collection and a sharded collection.
+//
+
+var st =
+ new ShardingTest({shards: 2, other: {shardOptions: {setParameter: "enableTestCommands=1"}}});
+
+var db = st.s.getDB("test");
+var shardedCol = db.getCollection("sharded_limit_batchsize");
+var unshardedCol = db.getCollection("unsharded_limit_batchsize");
+shardedCol.drop();
+unshardedCol.drop();
+
+// Enable sharding and pre-split the sharded collection.
+assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
+st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
+db.adminCommand({shardCollection: shardedCol.getFullName(), key: {_id: 1}});
+assert.commandWorked(db.adminCommand({split: shardedCol.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(db.adminCommand(
+ {moveChunk: shardedCol.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
+
+// Write 10 documents to shard 0, and 10 documents to shard 1 inside the sharded collection.
+// Write 20 documents which all go to the primary shard in the unsharded collection.
+for (var i = 1; i <= 10; ++i) {
+ // These go to shard 1.
+ assert.writeOK(shardedCol.insert({_id: i, x: i}));
+
+ // These go to shard 0.
+ assert.writeOK(shardedCol.insert({_id: -i, x: -i}));
+
+ // These go to shard 0 inside the non-sharded collection.
+ assert.writeOK(unshardedCol.insert({_id: i, x: i}));
+ assert.writeOK(unshardedCol.insert({_id: -i, x: -i}));
+}
+
+//
+// Run tests for singleBatch queries.
+//
+
+testSingleBatch(shardedCol, 2);
+testSingleBatch(unshardedCol, 1);
+
+//
+// Run tests for batch size. These should issue getmores.
+//
+
+jsTest.log("Running batchSize tests against sharded collection.");
+st.shard0.adminCommand({setParameter: 1, logLevel: 1});
+testBatchSize(shardedCol);
+st.shard0.adminCommand({setParameter: 1, logLevel: 0});
+
+jsTest.log("Running batchSize tests against non-sharded collection.");
+testBatchSize(unshardedCol);
+
+//
+// Run tests for limit. These should *not* issue getmores. We confirm this
+// by enabling the getmore failpoint on the shards.
+//
+
+assert.commandWorked(st.shard0.getDB("test").adminCommand(
+ {configureFailPoint: "failReceivedGetmore", mode: "alwaysOn"}));
+
+assert.commandWorked(st.shard1.getDB("test").adminCommand(
+ {configureFailPoint: "failReceivedGetmore", mode: "alwaysOn"}));
+
+jsTest.log("Running limit tests against sharded collection.");
+testLimit(shardedCol, st.shard0);
+
+jsTest.log("Running limit tests against non-sharded collection.");
+testLimit(unshardedCol, st.shard0);
+
+st.stop();
})();
diff --git a/jstests/sharding/sharded_profile.js b/jstests/sharding/sharded_profile.js
index 0ae81862e0e..c38b178f73a 100644
--- a/jstests/sharding/sharded_profile.js
+++ b/jstests/sharding/sharded_profile.js
@@ -3,33 +3,32 @@
(function() {
- var st = new ShardingTest({shards: 1, mongos: 2});
- st.stopBalancer();
+var st = new ShardingTest({shards: 1, mongos: 2});
+st.stopBalancer();
- var admin = st.s0.getDB('admin');
- var shards = st.s0.getCollection('config.shards').find().toArray();
- var coll = st.s0.getCollection('foo.bar');
+var admin = st.s0.getDB('admin');
+var shards = st.s0.getCollection('config.shards').find().toArray();
+var coll = st.s0.getCollection('foo.bar');
- assert(admin.runCommand({enableSharding: coll.getDB() + ''}).ok);
- assert(admin.runCommand({shardCollection: coll + '', key: {_id: 1}}).ok);
+assert(admin.runCommand({enableSharding: coll.getDB() + ''}).ok);
+assert(admin.runCommand({shardCollection: coll + '', key: {_id: 1}}).ok);
- st.printShardingStatus();
+st.printShardingStatus();
- jsTest.log('Turning on profiling on ' + st.shard0);
+jsTest.log('Turning on profiling on ' + st.shard0);
- st.shard0.getDB(coll.getDB().toString()).setProfilingLevel(2);
+st.shard0.getDB(coll.getDB().toString()).setProfilingLevel(2);
- var profileColl = st.shard0.getDB(coll.getDB().toString()).system.profile;
+var profileColl = st.shard0.getDB(coll.getDB().toString()).system.profile;
- var inserts = [{_id: 0}, {_id: 1}, {_id: 2}];
+var inserts = [{_id: 0}, {_id: 1}, {_id: 2}];
- assert.writeOK(st.s1.getCollection(coll.toString()).insert(inserts));
+assert.writeOK(st.s1.getCollection(coll.toString()).insert(inserts));
- profileEntry = profileColl.findOne();
- assert.neq(null, profileEntry);
- printjson(profileEntry);
- assert.eq(profileEntry.command.documents, inserts);
-
- st.stop();
+profileEntry = profileColl.findOne();
+assert.neq(null, profileEntry);
+printjson(profileEntry);
+assert.eq(profileEntry.command.documents, inserts);
+st.stop();
})();
diff --git a/jstests/sharding/sharding_balance1.js b/jstests/sharding/sharding_balance1.js
index 413a7194c22..f07708d2d23 100644
--- a/jstests/sharding/sharding_balance1.js
+++ b/jstests/sharding/sharding_balance1.js
@@ -1,51 +1,51 @@
(function() {
- 'use strict';
-
- var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- var db = s.getDB("test");
-
- var bigString = "";
- while (bigString.length < 10000) {
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
- }
-
- var inserted = 0;
- var num = 0;
- var bulk = db.foo.initializeUnorderedBulkOp();
- while (inserted < (20 * 1024 * 1024)) {
- bulk.insert({_id: num++, s: bigString});
- inserted += bigString.length;
- }
- assert.writeOK(bulk.execute());
-
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
-
- function diff1() {
- var x = s.chunkCounts("foo");
- printjson(x);
- return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
- Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
- }
-
- function sum() {
- var x = s.chunkCounts("foo");
- return x[s.shard0.shardName] + x[s.shard1.shardName];
- }
-
- assert.lt(20, diff1(), "big differential here");
- print(diff1());
-
- assert.soon(function() {
- var d = diff1();
- return d < 5;
- // Make sure there's enough time here, since balancing can sleep for 15s or so between
- // balances.
- }, "balance didn't happen", 1000 * 60 * 5, 5000);
-
- s.stop();
+'use strict';
+
+var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
+
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+var db = s.getDB("test");
+
+var bigString = "";
+while (bigString.length < 10000) {
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+}
+
+var inserted = 0;
+var num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+while (inserted < (20 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+}
+assert.writeOK(bulk.execute());
+
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
+
+function diff1() {
+ var x = s.chunkCounts("foo");
+ printjson(x);
+ return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
+ Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
+}
+
+function sum() {
+ var x = s.chunkCounts("foo");
+ return x[s.shard0.shardName] + x[s.shard1.shardName];
+}
+
+assert.lt(20, diff1(), "big differential here");
+print(diff1());
+
+assert.soon(function() {
+ var d = diff1();
+ return d < 5;
+ // Make sure there's enough time here, since balancing can sleep for 15s or so between
+ // balances.
+}, "balance didn't happen", 1000 * 60 * 5, 5000);
+
+s.stop();
})();
diff --git a/jstests/sharding/sharding_balance2.js b/jstests/sharding/sharding_balance2.js
index 7bba7e25bf3..697f3f5c0b0 100644
--- a/jstests/sharding/sharding_balance2.js
+++ b/jstests/sharding/sharding_balance2.js
@@ -2,68 +2,68 @@
* Test the maxSize setting for the addShard command.
*/
(function() {
- 'use strict';
-
- var MaxSizeMB = 1;
-
- var s = new ShardingTest({shards: 2, other: {chunkSize: 1, manualAddShard: true}});
- var db = s.getDB("test");
-
- var names = s.getConnNames();
- assert.eq(2, names.length);
- assert.commandWorked(s.s0.adminCommand({addshard: names[0]}));
- assert.commandWorked(s.s0.adminCommand({addshard: names[1], maxSize: MaxSizeMB}));
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', names[0]);
-
- var bigString = "";
- while (bigString.length < 10000)
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-
- var inserted = 0;
- var num = 0;
- var bulk = db.foo.initializeUnorderedBulkOp();
- while (inserted < (40 * 1024 * 1024)) {
- bulk.insert({_id: num++, s: bigString});
- inserted += bigString.length;
- }
- assert.writeOK(bulk.execute());
+'use strict';
+
+var MaxSizeMB = 1;
+
+var s = new ShardingTest({shards: 2, other: {chunkSize: 1, manualAddShard: true}});
+var db = s.getDB("test");
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- assert.gt(s.config.chunks.count({"ns": "test.foo"}), 10);
+var names = s.getConnNames();
+assert.eq(2, names.length);
+assert.commandWorked(s.s0.adminCommand({addshard: names[0]}));
+assert.commandWorked(s.s0.adminCommand({addshard: names[1], maxSize: MaxSizeMB}));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', names[0]);
- var getShardSize = function(conn) {
- var listDatabases = conn.getDB('admin').runCommand({listDatabases: 1});
- return listDatabases.totalSize;
- };
+var bigString = "";
+while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
- var shardConn = new Mongo(names[1]);
+var inserted = 0;
+var num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+while (inserted < (40 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+}
+assert.writeOK(bulk.execute());
- // Make sure that shard doesn't have any documents.
- assert.eq(0, shardConn.getDB('test').foo.find().itcount());
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+assert.gt(s.config.chunks.count({"ns": "test.foo"}), 10);
- var maxSizeBytes = MaxSizeMB * 1024 * 1024;
+var getShardSize = function(conn) {
+ var listDatabases = conn.getDB('admin').runCommand({listDatabases: 1});
+ return listDatabases.totalSize;
+};
- // Fill the shard with documents to exceed the max size so the balancer won't move
- // chunks to this shard.
- var localColl = shardConn.getDB('local').padding;
- while (getShardSize(shardConn) < maxSizeBytes) {
- var localBulk = localColl.initializeUnorderedBulkOp();
+var shardConn = new Mongo(names[1]);
- for (var x = 0; x < 20; x++) {
- localBulk.insert({x: x, val: bigString});
- }
- assert.writeOK(localBulk.execute());
+// Make sure that shard doesn't have any documents.
+assert.eq(0, shardConn.getDB('test').foo.find().itcount());
- // Force the storage engine to flush files to disk so shardSize will get updated.
- assert.commandWorked(shardConn.getDB('admin').runCommand({fsync: 1}));
+var maxSizeBytes = MaxSizeMB * 1024 * 1024;
+
+// Fill the shard with documents to exceed the max size so the balancer won't move
+// chunks to this shard.
+var localColl = shardConn.getDB('local').padding;
+while (getShardSize(shardConn) < maxSizeBytes) {
+ var localBulk = localColl.initializeUnorderedBulkOp();
+
+ for (var x = 0; x < 20; x++) {
+ localBulk.insert({x: x, val: bigString});
}
+ assert.writeOK(localBulk.execute());
+
+ // Force the storage engine to flush files to disk so shardSize will get updated.
+ assert.commandWorked(shardConn.getDB('admin').runCommand({fsync: 1}));
+}
- s.startBalancer();
- s.awaitBalancerRound();
+s.startBalancer();
+s.awaitBalancerRound();
- var chunkCounts = s.chunkCounts('foo', 'test');
- assert.eq(0, chunkCounts[s.rs1.name]);
+var chunkCounts = s.chunkCounts('foo', 'test');
+assert.eq(0, chunkCounts[s.rs1.name]);
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/sharding_balance3.js b/jstests/sharding/sharding_balance3.js
index 155403e0b7c..fa9b0dc38da 100644
--- a/jstests/sharding/sharding_balance3.js
+++ b/jstests/sharding/sharding_balance3.js
@@ -2,69 +2,68 @@
(function() {
- var s = new ShardingTest({
- name: "slow_sharding_balance3",
- shards: 2,
- mongos: 1,
- other: {chunkSize: 1, enableBalancer: true}
- });
-
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- s.config.settings.find().forEach(printjson);
-
- db = s.getDB("test");
-
- bigString = "";
- while (bigString.length < 10000)
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-
- inserted = 0;
- num = 0;
- var bulk = db.foo.initializeUnorderedBulkOp();
- while (inserted < (40 * 1024 * 1024)) {
- bulk.insert({_id: num++, s: bigString});
- inserted += bigString.length;
- }
- assert.writeOK(bulk.execute());
-
- s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
- assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
-
- function diff1() {
- var x = s.chunkCounts("foo");
- printjson(x);
- return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
- Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
- }
-
- assert.lt(10, diff1());
-
- // Wait for balancer to kick in.
- var initialDiff = diff1();
- assert.soon(function() {
- return diff1() != initialDiff;
- }, "Balancer did not kick in", 5 * 60 * 1000, 1000);
-
- print("* A");
- print("disabling the balancer");
- s.stopBalancer();
- s.config.settings.find().forEach(printjson);
- print("* B");
-
- print(diff1());
-
- var currDiff = diff1();
- var waitTime = 0;
- var startTime = Date.now();
- while (waitTime < (1000 * 60)) {
- // Wait for 60 seconds to ensure balancer did not run
- assert.eq(currDiff, diff1(), "balance with stopped flag should not have happened");
- sleep(5000);
- waitTime = Date.now() - startTime;
- }
-
- s.stop();
-
+var s = new ShardingTest({
+ name: "slow_sharding_balance3",
+ shards: 2,
+ mongos: 1,
+ other: {chunkSize: 1, enableBalancer: true}
+});
+
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+s.config.settings.find().forEach(printjson);
+
+db = s.getDB("test");
+
+bigString = "";
+while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+inserted = 0;
+num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+while (inserted < (40 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+}
+assert.writeOK(bulk.execute());
+
+s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
+
+function diff1() {
+ var x = s.chunkCounts("foo");
+ printjson(x);
+ return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
+ Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
+}
+
+assert.lt(10, diff1());
+
+// Wait for balancer to kick in.
+var initialDiff = diff1();
+assert.soon(function() {
+ return diff1() != initialDiff;
+}, "Balancer did not kick in", 5 * 60 * 1000, 1000);
+
+print("* A");
+print("disabling the balancer");
+s.stopBalancer();
+s.config.settings.find().forEach(printjson);
+print("* B");
+
+print(diff1());
+
+var currDiff = diff1();
+var waitTime = 0;
+var startTime = Date.now();
+while (waitTime < (1000 * 60)) {
+ // Wait for 60 seconds to ensure balancer did not run
+ assert.eq(currDiff, diff1(), "balance with stopped flag should not have happened");
+ sleep(5000);
+ waitTime = Date.now() - startTime;
+}
+
+s.stop();
})();
diff --git a/jstests/sharding/sharding_balance4.js b/jstests/sharding/sharding_balance4.js
index 75a30b62b9a..e97a6366120 100644
--- a/jstests/sharding/sharding_balance4.js
+++ b/jstests/sharding/sharding_balance4.js
@@ -7,158 +7,158 @@
*/
(function() {
- var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
+var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- s.config.settings.find().forEach(printjson);
+s.config.settings.find().forEach(printjson);
- db = s.getDB("test");
+db = s.getDB("test");
- bigString = "";
- while (bigString.length < 10000)
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+bigString = "";
+while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
- N = 3000;
+N = 3000;
- num = 0;
+num = 0;
- var counts = {};
+var counts = {};
- //
- // TODO: Rewrite to make much clearer.
- //
- // The core behavior of this test is to add a bunch of documents to a sharded collection, then
- // incrementally update each document and make sure the counts in the document match our update
- // counts while balancing occurs (doUpdate()). Every once in a while we also check (check())
- // our counts via a query.
- //
- // If during a chunk migration an update is missed, we trigger an assertion and fail.
- //
+//
+// TODO: Rewrite to make much clearer.
+//
+// The core behavior of this test is to add a bunch of documents to a sharded collection, then
+// incrementally update each document and make sure the counts in the document match our update
+// counts while balancing occurs (doUpdate()). Every once in a while we also check (check())
+// our counts via a query.
+//
+// If during a chunk migration an update is missed, we trigger an assertion and fail.
+//
- function doUpdate(bulk, includeString, optionalId) {
- var up = {$inc: {x: 1}};
- if (includeString) {
- up["$set"] = {s: bigString};
+function doUpdate(bulk, includeString, optionalId) {
+ var up = {$inc: {x: 1}};
+ if (includeString) {
+ up["$set"] = {s: bigString};
+ }
+ var myid = optionalId == undefined ? Random.randInt(N) : optionalId;
+ bulk.find({_id: myid}).upsert().update(up);
+
+ counts[myid] = (counts[myid] ? counts[myid] : 0) + 1;
+ return myid;
+}
+
+Random.setRandomSeed();
+
+// Initially update all documents from 1 to N, otherwise later checks can fail because no
+// document previously existed
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (i = 0; i < N; i++) {
+ doUpdate(bulk, true, i);
+}
+
+for (i = 0; i < N * 9; i++) {
+ doUpdate(bulk, false);
+}
+assert.writeOK(bulk.execute());
+
+for (var i = 0; i < 50; i++) {
+ s.printChunks("test.foo");
+ if (check("initial:" + i, true))
+ break;
+ sleep(5000);
+}
+check("initial at end");
+
+assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
+
+function check(msg, dontAssert) {
+ for (var x in counts) {
+ var e = counts[x];
+ var z = db.foo.findOne({_id: parseInt(x)});
+
+ if (z && z.x == e)
+ continue;
+
+ if (dontAssert) {
+ if (z)
+ delete z.s;
+ print("not asserting for key failure: " + x + " want: " + e + " got: " + tojson(z));
+ return false;
}
- var myid = optionalId == undefined ? Random.randInt(N) : optionalId;
- bulk.find({_id: myid}).upsert().update(up);
- counts[myid] = (counts[myid] ? counts[myid] : 0) + 1;
- return myid;
- }
+ s.s.getDB("admin").runCommand({setParameter: 1, logLevel: 2});
- Random.setRandomSeed();
+ printjson(db.foo.findOne({_id: parseInt(x)}));
- // Initially update all documents from 1 to N, otherwise later checks can fail because no
- // document previously existed
- var bulk = db.foo.initializeUnorderedBulkOp();
- for (i = 0; i < N; i++) {
- doUpdate(bulk, true, i);
- }
+ var y = db.foo.findOne({_id: parseInt(x)});
- for (i = 0; i < N * 9; i++) {
- doUpdate(bulk, false);
- }
- assert.writeOK(bulk.execute());
+ if (y) {
+ delete y.s;
+ }
- for (var i = 0; i < 50; i++) {
s.printChunks("test.foo");
- if (check("initial:" + i, true))
- break;
- sleep(5000);
- }
- check("initial at end");
-
- assert.lt(20, s.config.chunks.count({"ns": "test.foo"}), "setup2");
-
- function check(msg, dontAssert) {
- for (var x in counts) {
- var e = counts[x];
- var z = db.foo.findOne({_id: parseInt(x)});
- if (z && z.x == e)
- continue;
-
- if (dontAssert) {
- if (z)
- delete z.s;
- print("not asserting for key failure: " + x + " want: " + e + " got: " + tojson(z));
- return false;
- }
+ assert(z, "couldn't find : " + x + " y:" + tojson(y) + " e: " + e + " " + msg);
+ assert.eq(e, z.x, "count for : " + x + " y:" + tojson(y) + " " + msg);
+ }
- s.s.getDB("admin").runCommand({setParameter: 1, logLevel: 2});
+ return true;
+}
- printjson(db.foo.findOne({_id: parseInt(x)}));
+var consecutiveNoProgressMadeErrors = 0;
- var y = db.foo.findOne({_id: parseInt(x)});
+function diff1() {
+ jsTest.log("Running diff1...");
- if (y) {
- delete y.s;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ var myid = doUpdate(bulk, false);
+ var res = bulk.execute();
+
+ assert(res instanceof BulkWriteResult,
+ 'Result from bulk.execute should be of type BulkWriteResult');
+ if (res.hasWriteErrors()) {
+ res.writeErrors.forEach(function(err) {
+ // Ignore up to 3 consecutive NoProgressMade errors for the cases where migration
+ // might be going faster than the writes are executing
+ if (err.code == ErrorCodes.NoProgressMade) {
+ consecutiveNoProgressMadeErrors++;
+ if (consecutiveNoProgressMadeErrors < 3) {
+ return;
+ }
}
- s.printChunks("test.foo");
+ assert.writeOK(res);
+ });
+ } else {
+ consecutiveNoProgressMadeErrors = 0;
- assert(z, "couldn't find : " + x + " y:" + tojson(y) + " e: " + e + " " + msg);
- assert.eq(e, z.x, "count for : " + x + " y:" + tojson(y) + " " + msg);
- }
-
- return true;
+ assert.eq(1,
+ res.nModified,
+ "diff myid: " + myid + " 2: " + res.toString() + "\n" +
+ " correct count is: " + counts[myid] +
+ " db says count is: " + tojson(db.foo.findOne({_id: myid})));
}
- var consecutiveNoProgressMadeErrors = 0;
-
- function diff1() {
- jsTest.log("Running diff1...");
-
- var bulk = db.foo.initializeUnorderedBulkOp();
- var myid = doUpdate(bulk, false);
- var res = bulk.execute();
-
- assert(res instanceof BulkWriteResult,
- 'Result from bulk.execute should be of type BulkWriteResult');
- if (res.hasWriteErrors()) {
- res.writeErrors.forEach(function(err) {
- // Ignore up to 3 consecutive NoProgressMade errors for the cases where migration
- // might be going faster than the writes are executing
- if (err.code == ErrorCodes.NoProgressMade) {
- consecutiveNoProgressMadeErrors++;
- if (consecutiveNoProgressMadeErrors < 3) {
- return;
- }
- }
+ var x = s.chunkCounts("foo");
+ if (Math.random() > .999)
+ printjson(x);
- assert.writeOK(res);
- });
- } else {
- consecutiveNoProgressMadeErrors = 0;
-
- assert.eq(1,
- res.nModified,
- "diff myid: " + myid + " 2: " + res.toString() + "\n" +
- " correct count is: " + counts[myid] + " db says count is: " +
- tojson(db.foo.findOne({_id: myid})));
- }
-
- var x = s.chunkCounts("foo");
- if (Math.random() > .999)
- printjson(x);
-
- return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
- Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
- }
+ return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
+ Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
+}
- assert.lt(20, diff1(), "initial load");
- print(diff1());
+assert.lt(20, diff1(), "initial load");
+print(diff1());
- s.startBalancer();
+s.startBalancer();
- assert.soon(function() {
- var d = diff1();
- return d < 5;
- }, "balance didn't happen", 1000 * 60 * 20, 1);
+assert.soon(function() {
+ var d = diff1();
+ return d < 5;
+}, "balance didn't happen", 1000 * 60 * 20, 1);
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/sharding_migrate_cursor1.js b/jstests/sharding/sharding_migrate_cursor1.js
index 0fef085d5c6..f196381528e 100644
--- a/jstests/sharding/sharding_migrate_cursor1.js
+++ b/jstests/sharding/sharding_migrate_cursor1.js
@@ -7,85 +7,84 @@
*/
(function() {
- var chunkSize = 25;
+var chunkSize = 25;
- var s = new ShardingTest(
- {name: "migrate_cursor1", shards: 2, mongos: 1, other: {chunkSize: chunkSize}});
+var s = new ShardingTest(
+ {name: "migrate_cursor1", shards: 2, mongos: 1, other: {chunkSize: chunkSize}});
- s.adminCommand({enablesharding: "test"});
- db = s.getDB("test");
- s.ensurePrimaryShard('test', s.shard1.shardName);
- t = db.foo;
+s.adminCommand({enablesharding: "test"});
+db = s.getDB("test");
+s.ensurePrimaryShard('test', s.shard1.shardName);
+t = db.foo;
- bigString = "";
- stringSize = 1024;
+bigString = "";
+stringSize = 1024;
- while (bigString.length < stringSize)
- bigString += "asdasdas";
+while (bigString.length < stringSize)
+ bigString += "asdasdas";
- stringSize = bigString.length;
- docsPerChunk = Math.ceil((chunkSize * 1024 * 1024) / (stringSize - 12));
- numChunks = 5;
- numDocs = 20 * docsPerChunk;
+stringSize = bigString.length;
+docsPerChunk = Math.ceil((chunkSize * 1024 * 1024) / (stringSize - 12));
+numChunks = 5;
+numDocs = 20 * docsPerChunk;
- print("stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs);
+print("stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs);
- var bulk = t.initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, s: bigString});
- }
- assert.writeOK(bulk.execute());
+var bulk = t.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, s: bigString});
+}
+assert.writeOK(bulk.execute());
- s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
- assert.lt(numChunks, s.config.chunks.find().count(), "initial 1");
+assert.lt(numChunks, s.config.chunks.find().count(), "initial 1");
- primary = s.getPrimaryShard("test").getDB("test").foo;
- secondaryName = s.getOther(primary.name);
- secondary = secondaryName.getDB("test").foo;
+primary = s.getPrimaryShard("test").getDB("test").foo;
+secondaryName = s.getOther(primary.name);
+secondary = secondaryName.getDB("test").foo;
- assert.eq(numDocs, primary.count(), "initial 2");
- assert.eq(0, secondary.count(), "initial 3");
- assert.eq(numDocs, t.count(), "initial 4");
+assert.eq(numDocs, primary.count(), "initial 2");
+assert.eq(0, secondary.count(), "initial 3");
+assert.eq(numDocs, t.count(), "initial 4");
- x = primary.find({_id: {$lt: 500}}).batchSize(2);
- x.next(); // 1. Create an open cursor
+x = primary.find({_id: {$lt: 500}}).batchSize(2);
+x.next(); // 1. Create an open cursor
- print("start moving chunks...");
+print("start moving chunks...");
- // 2. Move chunk from s0 to s1 without waiting for deletion.
- // Command returns, but the deletion on s0 will block due to the open cursor.
- s.adminCommand({moveChunk: "test.foo", find: {_id: 0}, to: secondaryName.name});
+// 2. Move chunk from s0 to s1 without waiting for deletion.
+// Command returns, but the deletion on s0 will block due to the open cursor.
+s.adminCommand({moveChunk: "test.foo", find: {_id: 0}, to: secondaryName.name});
- // 3. Start second moveChunk command from s0 to s1.
- // This moveChunk should not observe the above deletion as a 'mod', transfer it to s1 and cause
- // deletion on s1.
- // This moveChunk will wait for deletion.
- join = startParallelShell(
- "db.x.insert( {x:1} ); db.adminCommand( { moveChunk : 'test.foo' , find : { _id : " +
- docsPerChunk * 3 + " } , to : '" + secondaryName.name + "', _waitForDelete: true } )");
- assert.soon(function() {
- return db.x.count() > 0;
- }, "XXX", 30000, 1);
+// 3. Start second moveChunk command from s0 to s1.
+// This moveChunk should not observe the above deletion as a 'mod', transfer it to s1 and cause
+// deletion on s1.
+// This moveChunk will wait for deletion.
+join = startParallelShell(
+ "db.x.insert( {x:1} ); db.adminCommand( { moveChunk : 'test.foo' , find : { _id : " +
+ docsPerChunk * 3 + " } , to : '" + secondaryName.name + "', _waitForDelete: true } )");
+assert.soon(function() {
+ return db.x.count() > 0;
+}, "XXX", 30000, 1);
- // 4. Close the cursor to enable chunk deletion.
- print("itcount: " + x.itcount());
+// 4. Close the cursor to enable chunk deletion.
+print("itcount: " + x.itcount());
- x = null;
- for (i = 0; i < 5; i++)
- gc();
+x = null;
+for (i = 0; i < 5; i++)
+ gc();
- print("cursor should be gone");
+print("cursor should be gone");
- // 5. Waiting for the second moveChunk to finish its deletion.
- // Note the deletion for the first moveChunk may not be finished.
- join();
+// 5. Waiting for the second moveChunk to finish its deletion.
+// Note the deletion for the first moveChunk may not be finished.
+join();
- // assert.soon( function(){ return numDocs == t.count(); } , "at end 1" )
- // 6. Check the total number of docs on both shards to make sure no doc is lost.
- // Use itcount() to ignore orphan docments.
- assert.eq(numDocs, t.find().itcount(), "at end 2");
-
- s.stop();
+// assert.soon( function(){ return numDocs == t.count(); } , "at end 1" )
+// 6. Check the total number of docs on both shards to make sure no doc is lost.
+// Use itcount() to ignore orphan docments.
+assert.eq(numDocs, t.find().itcount(), "at end 2");
+s.stop();
})();
diff --git a/jstests/sharding/sharding_multiple_ns_rs.js b/jstests/sharding/sharding_multiple_ns_rs.js
index f9ff596b87e..cd4a70fda15 100644
--- a/jstests/sharding/sharding_multiple_ns_rs.js
+++ b/jstests/sharding/sharding_multiple_ns_rs.js
@@ -5,56 +5,56 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- var s = new ShardingTest({shards: 1, mongos: 1, other: {rs: true, chunkSize: 1}});
+var s = new ShardingTest({shards: 1, mongos: 1, other: {rs: true, chunkSize: 1}});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- var db = s.getDB("test");
+var db = s.getDB("test");
- var bulk = db.foo.initializeUnorderedBulkOp();
- var bulk2 = db.bar.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({_id: i, x: i});
- bulk2.insert({_id: i, x: i});
- }
- assert.writeOK(bulk.execute());
- assert.writeOK(bulk2.execute());
+var bulk = db.foo.initializeUnorderedBulkOp();
+var bulk2 = db.bar.initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({_id: i, x: i});
+ bulk2.insert({_id: i, x: i});
+}
+assert.writeOK(bulk.execute());
+assert.writeOK(bulk2.execute());
- s.splitAt("test.foo", {_id: 50});
+s.splitAt("test.foo", {_id: 50});
- var other = new Mongo(s.s0.name);
- var dbother = other.getDB("test");
+var other = new Mongo(s.s0.name);
+var dbother = other.getDB("test");
- assert.eq(5, db.foo.findOne({_id: 5}).x);
- assert.eq(5, dbother.foo.findOne({_id: 5}).x);
+assert.eq(5, db.foo.findOne({_id: 5}).x);
+assert.eq(5, dbother.foo.findOne({_id: 5}).x);
- assert.eq(5, db.bar.findOne({_id: 5}).x);
- assert.eq(5, dbother.bar.findOne({_id: 5}).x);
+assert.eq(5, db.bar.findOne({_id: 5}).x);
+assert.eq(5, dbother.bar.findOne({_id: 5}).x);
- s.rs0.awaitReplication();
- s.rs0.stopMaster(15);
+s.rs0.awaitReplication();
+s.rs0.stopMaster(15);
- // Wait for mongos and the config server primary to recognize the new shard primary
- awaitRSClientHosts(db.getMongo(), s.rs0.getPrimary(), {ismaster: true});
- awaitRSClientHosts(db.getMongo(), s.configRS.getPrimary(), {ismaster: true});
+// Wait for mongos and the config server primary to recognize the new shard primary
+awaitRSClientHosts(db.getMongo(), s.rs0.getPrimary(), {ismaster: true});
+awaitRSClientHosts(db.getMongo(), s.configRS.getPrimary(), {ismaster: true});
- assert.eq(5, db.foo.findOne({_id: 5}).x);
- assert.eq(5, db.bar.findOne({_id: 5}).x);
+assert.eq(5, db.foo.findOne({_id: 5}).x);
+assert.eq(5, db.bar.findOne({_id: 5}).x);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.bar", key: {_id: 1}}));
- s.splitAt("test.bar", {_id: 50});
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.bar", key: {_id: 1}}));
+s.splitAt("test.bar", {_id: 50});
- var yetagain = new Mongo(s.s.name);
- assert.eq(5, yetagain.getDB("test").bar.findOne({_id: 5}).x);
- assert.eq(5, yetagain.getDB("test").foo.findOne({_id: 5}).x);
+var yetagain = new Mongo(s.s.name);
+assert.eq(5, yetagain.getDB("test").bar.findOne({_id: 5}).x);
+assert.eq(5, yetagain.getDB("test").foo.findOne({_id: 5}).x);
- assert.eq(5, dbother.bar.findOne({_id: 5}).x);
- assert.eq(5, dbother.foo.findOne({_id: 5}).x);
+assert.eq(5, dbother.bar.findOne({_id: 5}).x);
+assert.eq(5, dbother.foo.findOne({_id: 5}).x);
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/sharding_options.js b/jstests/sharding/sharding_options.js
index 190d78a1e94..0703a784eff 100644
--- a/jstests/sharding/sharding_options.js
+++ b/jstests/sharding/sharding_options.js
@@ -29,8 +29,7 @@ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_paranoia.jso
// Sharding Role
jsTest.log("Testing \"configsvr\" command line option");
var expectedResult = {
- "parsed":
- {"sharding": {"clusterRole": "configsvr"}, "storage": {"journal": {"enabled": true}}}
+ "parsed": {"sharding": {"clusterRole": "configsvr"}, "storage": {"journal": {"enabled": true}}}
};
testGetCmdLineOptsMongod({configsvr: "", journal: ""}, expectedResult);
diff --git a/jstests/sharding/sharding_rs1.js b/jstests/sharding/sharding_rs1.js
index c5022348fa6..af021bf9741 100644
--- a/jstests/sharding/sharding_rs1.js
+++ b/jstests/sharding/sharding_rs1.js
@@ -2,60 +2,59 @@
* tests sharding with replica sets
*/
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 3, other: {rs: true, chunkSize: 1, enableBalancer: true}});
+var s = new ShardingTest({shards: 3, other: {rs: true, chunkSize: 1, enableBalancer: true}});
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard0.shardName);
- s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard0.shardName);
+s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
- var db = s.getDB("test");
+var db = s.getDB("test");
- var bigString = "X".repeat(256 * 1024);
+var bigString = "X".repeat(256 * 1024);
- var insertedBytes = 0;
- var num = 0;
+var insertedBytes = 0;
+var num = 0;
- // Insert 10 MB of data to result in 10+ chunks
- var bulk = db.foo.initializeUnorderedBulkOp();
- while (insertedBytes < (10 * 1024 * 1024)) {
- bulk.insert({_id: num++, s: bigString, x: Math.random()});
- insertedBytes += bigString.length;
- }
- assert.writeOK(bulk.execute({w: 3}));
-
- assert.commandWorked(s.s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+// Insert 10 MB of data to result in 10+ chunks
+var bulk = db.foo.initializeUnorderedBulkOp();
+while (insertedBytes < (10 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString, x: Math.random()});
+ insertedBytes += bigString.length;
+}
+assert.writeOK(bulk.execute({w: 3}));
- jsTest.log("Waiting for balance to complete");
- s.awaitBalance('foo', 'test', 5 * 60 * 1000);
+assert.commandWorked(s.s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- jsTest.log("Stopping balancer");
- s.stopBalancer();
+jsTest.log("Waiting for balance to complete");
+s.awaitBalance('foo', 'test', 5 * 60 * 1000);
- jsTest.log("Balancer stopped, checking dbhashes");
- s._rs.forEach(function(rsNode) {
- rsNode.test.awaitReplication();
+jsTest.log("Stopping balancer");
+s.stopBalancer();
- var dbHashes = rsNode.test.getHashes("test");
- print(rsNode.url + ': ' + tojson(dbHashes));
+jsTest.log("Balancer stopped, checking dbhashes");
+s._rs.forEach(function(rsNode) {
+ rsNode.test.awaitReplication();
- for (var j = 0; j < dbHashes.slaves.length; j++) {
- assert.eq(dbHashes.master.md5,
- dbHashes.slaves[j].md5,
- "hashes not same for: " + rsNode.url + " slave: " + j);
- }
- });
+ var dbHashes = rsNode.test.getHashes("test");
+ print(rsNode.url + ': ' + tojson(dbHashes));
- assert.eq(num, db.foo.find().count(), "C1");
- assert.eq(num, db.foo.find().itcount(), "C2");
- assert.eq(num, db.foo.find().sort({_id: 1}).itcount(), "C3");
- assert.eq(num, db.foo.find().sort({_id: -1}).itcount(), "C4");
+ for (var j = 0; j < dbHashes.slaves.length; j++) {
+ assert.eq(dbHashes.master.md5,
+ dbHashes.slaves[j].md5,
+ "hashes not same for: " + rsNode.url + " slave: " + j);
+ }
+});
- db.foo.ensureIndex({x: 1});
- assert.eq(num, db.foo.find().sort({x: 1}).itcount(), "C5");
- assert.eq(num, db.foo.find().sort({x: -1}).itcount(), "C6");
+assert.eq(num, db.foo.find().count(), "C1");
+assert.eq(num, db.foo.find().itcount(), "C2");
+assert.eq(num, db.foo.find().sort({_id: 1}).itcount(), "C3");
+assert.eq(num, db.foo.find().sort({_id: -1}).itcount(), "C4");
- s.stop();
+db.foo.ensureIndex({x: 1});
+assert.eq(num, db.foo.find().sort({x: 1}).itcount(), "C5");
+assert.eq(num, db.foo.find().sort({x: -1}).itcount(), "C6");
+s.stop();
})();
diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js
index 648512b051d..2b35cf695e4 100644
--- a/jstests/sharding/sharding_rs2.js
+++ b/jstests/sharding/sharding_rs2.js
@@ -10,246 +10,246 @@
//
(function() {
- 'use strict';
-
- // The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
- // from stepping down during migrations on slow evergreen builders.
- var s = new ShardingTest({
- shards: 2,
- other: {
- chunkSize: 1,
- rs0: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- },
- rs1: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- }
+'use strict';
+
+// The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
+// from stepping down during migrations on slow evergreen builders.
+var s = new ShardingTest({
+ shards: 2,
+ other: {
+ chunkSize: 1,
+ rs0: {
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
+ },
+ rs1: {
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
}
- });
+ }
+});
- var db = s.getDB("test");
- var t = db.foo;
+var db = s.getDB("test");
+var t = db.foo;
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard0.shardName);
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard0.shardName);
- // -------------------------------------------------------------------------------------------
- // ---------- test that config server updates when replica set config changes ----------------
- // -------------------------------------------------------------------------------------------
+// -------------------------------------------------------------------------------------------
+// ---------- test that config server updates when replica set config changes ----------------
+// -------------------------------------------------------------------------------------------
- db.foo.save({_id: 5, x: 17});
- assert.eq(1, db.foo.count());
+db.foo.save({_id: 5, x: 17});
+assert.eq(1, db.foo.count());
- s.config.databases.find().forEach(printjson);
- s.config.shards.find().forEach(printjson);
+s.config.databases.find().forEach(printjson);
+s.config.shards.find().forEach(printjson);
- function countNodes() {
- return s.config.shards.findOne({_id: s.shard0.shardName}).host.split(",").length;
- }
+function countNodes() {
+ return s.config.shards.findOne({_id: s.shard0.shardName}).host.split(",").length;
+}
+
+assert.eq(2, countNodes(), "A1");
- assert.eq(2, countNodes(), "A1");
+var rs = s.rs0;
+rs.add({'shardsvr': ""});
+try {
+ rs.reInitiate();
+} catch (e) {
+ // this os ok as rs's may close connections on a change of master
+ print(e);
+}
- var rs = s.rs0;
- rs.add({'shardsvr': ""});
+assert.soon(function() {
try {
- rs.reInitiate();
+ printjson(rs.getPrimary().getDB("admin").runCommand("isMaster"));
+ s.config.shards.find().forEach(printjsononeline);
+ return countNodes() == 3;
} catch (e) {
- // this os ok as rs's may close connections on a change of master
print(e);
}
+}, "waiting for config server to update", 180 * 1000, 1000);
- assert.soon(function() {
- try {
- printjson(rs.getPrimary().getDB("admin").runCommand("isMaster"));
- s.config.shards.find().forEach(printjsononeline);
- return countNodes() == 3;
- } catch (e) {
- print(e);
- }
- }, "waiting for config server to update", 180 * 1000, 1000);
-
- // cleanup after adding node
- for (var i = 0; i < 5; i++) {
- try {
- db.foo.findOne();
- } catch (e) {
- }
+// cleanup after adding node
+for (var i = 0; i < 5; i++) {
+ try {
+ db.foo.findOne();
+ } catch (e) {
}
+}
- jsTest.log(
- "Awaiting replication of all nodes, so spurious sync'ing queries don't upset our counts...");
- rs.awaitReplication();
- // Make sure we wait for secondaries here - otherwise a secondary could come online later and be
- // used for the
- // count command before being fully replicated
- jsTest.log("Awaiting secondary status of all nodes");
- rs.waitForState(rs.getSecondaries(), ReplSetTest.State.SECONDARY, 180 * 1000);
+jsTest.log(
+ "Awaiting replication of all nodes, so spurious sync'ing queries don't upset our counts...");
+rs.awaitReplication();
+// Make sure we wait for secondaries here - otherwise a secondary could come online later and be
+// used for the
+// count command before being fully replicated
+jsTest.log("Awaiting secondary status of all nodes");
+rs.waitForState(rs.getSecondaries(), ReplSetTest.State.SECONDARY, 180 * 1000);
- // -------------------------------------------------------------------------------------------
- // ---------- test routing to slaves ----------------
- // -------------------------------------------------------------------------------------------
+// -------------------------------------------------------------------------------------------
+// ---------- test routing to slaves ----------------
+// -------------------------------------------------------------------------------------------
- // --- not sharded ----
+// --- not sharded ----
- var m = new Mongo(s.s.name);
- var ts = m.getDB("test").foo;
+var m = new Mongo(s.s.name);
+var ts = m.getDB("test").foo;
- var before = rs.getPrimary().adminCommand("serverStatus").opcounters;
+var before = rs.getPrimary().adminCommand("serverStatus").opcounters;
- for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne().x, "B1");
- }
+for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne().x, "B1");
+}
- m.setSlaveOk();
+m.setSlaveOk();
- for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne().x, "B2");
- }
+for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne().x, "B2");
+}
- var after = rs.getPrimary().adminCommand("serverStatus").opcounters;
+var after = rs.getPrimary().adminCommand("serverStatus").opcounters;
- printjson(before);
- printjson(after);
+printjson(before);
+printjson(after);
- assert.lte(before.query + 10, after.query, "B3");
+assert.lte(before.query + 10, after.query, "B3");
- // --- add more data ----
+// --- add more data ----
- db.foo.ensureIndex({x: 1});
+db.foo.ensureIndex({x: 1});
- var bulk = db.foo.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- if (i == 17)
- continue;
- bulk.insert({x: i});
- }
- assert.writeOK(bulk.execute({w: 3}));
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ if (i == 17)
+ continue;
+ bulk.insert({x: i});
+}
+assert.writeOK(bulk.execute({w: 3}));
- // Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
- // replication for this and future tests to pass
- rs.awaitReplication();
+// Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
+// replication for this and future tests to pass
+rs.awaitReplication();
- assert.eq(100, ts.count(), "B4");
- assert.eq(100, ts.find().itcount(), "B5");
- assert.eq(100, ts.find().batchSize(5).itcount(), "B6");
+assert.eq(100, ts.count(), "B4");
+assert.eq(100, ts.find().itcount(), "B5");
+assert.eq(100, ts.find().batchSize(5).itcount(), "B6");
- var cursor = t.find().batchSize(3);
- cursor.next();
- cursor.close();
+var cursor = t.find().batchSize(3);
+cursor.next();
+cursor.close();
- // --- sharded ----
+// --- sharded ----
- assert.eq(100, db.foo.count(), "C1");
+assert.eq(100, db.foo.count(), "C1");
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
- // We're doing some manual chunk stuff, so stop the balancer first
- s.stopBalancer();
+// We're doing some manual chunk stuff, so stop the balancer first
+s.stopBalancer();
- assert.eq(100, t.count(), "C2");
- assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 50}}));
+assert.eq(100, t.count(), "C2");
+assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 50}}));
- s.printShardingStatus();
+s.printShardingStatus();
- var other = s.config.shards.findOne({_id: {$ne: s.shard0.shardName}});
- assert.commandWorked(s.getDB('admin').runCommand({
- moveChunk: "test.foo",
- find: {x: 10},
- to: other._id,
- _secondaryThrottle: true,
- writeConcern: {w: 2},
- _waitForDelete: true
- }));
- assert.eq(100, t.count(), "C3");
+var other = s.config.shards.findOne({_id: {$ne: s.shard0.shardName}});
+assert.commandWorked(s.getDB('admin').runCommand({
+ moveChunk: "test.foo",
+ find: {x: 10},
+ to: other._id,
+ _secondaryThrottle: true,
+ writeConcern: {w: 2},
+ _waitForDelete: true
+}));
+assert.eq(100, t.count(), "C3");
- assert.eq(50, rs.getPrimary().getDB("test").foo.count(), "C4");
+assert.eq(50, rs.getPrimary().getDB("test").foo.count(), "C4");
- // by non-shard key
+// by non-shard key
- m = new Mongo(s.s.name);
- ts = m.getDB("test").foo;
+m = new Mongo(s.s.name);
+ts = m.getDB("test").foo;
- before = rs.getPrimary().adminCommand("serverStatus").opcounters;
+before = rs.getPrimary().adminCommand("serverStatus").opcounters;
- for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne({_id: 5}).x, "D1");
- }
+for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne({_id: 5}).x, "D1");
+}
- m.setSlaveOk();
- for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne({_id: 5}).x, "D2");
- }
+m.setSlaveOk();
+for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne({_id: 5}).x, "D2");
+}
- after = rs.getPrimary().adminCommand("serverStatus").opcounters;
+after = rs.getPrimary().adminCommand("serverStatus").opcounters;
- assert.lte(before.query + 10, after.query, "D3");
+assert.lte(before.query + 10, after.query, "D3");
- // by shard key
+// by shard key
- m = new Mongo(s.s.name);
- m.forceWriteMode("commands");
+m = new Mongo(s.s.name);
+m.forceWriteMode("commands");
- s.printShardingStatus();
+s.printShardingStatus();
- ts = m.getDB("test").foo;
+ts = m.getDB("test").foo;
- before = rs.getPrimary().adminCommand("serverStatus").opcounters;
+before = rs.getPrimary().adminCommand("serverStatus").opcounters;
- for (var i = 0; i < 10; i++) {
- assert.eq(57, ts.findOne({x: 57}).x, "E1");
- }
+for (var i = 0; i < 10; i++) {
+ assert.eq(57, ts.findOne({x: 57}).x, "E1");
+}
- m.setSlaveOk();
- for (var i = 0; i < 10; i++) {
- assert.eq(57, ts.findOne({x: 57}).x, "E2");
- }
+m.setSlaveOk();
+for (var i = 0; i < 10; i++) {
+ assert.eq(57, ts.findOne({x: 57}).x, "E2");
+}
- after = rs.getPrimary().adminCommand("serverStatus").opcounters;
-
- assert.lte(before.query + 10, after.query, "E3");
-
- assert.eq(100, ts.count(), "E4");
- assert.eq(100, ts.find().itcount(), "E5");
- printjson(ts.find().batchSize(5).explain());
-
- // fsyncLock the secondaries
- rs.getSecondaries().forEach(function(secondary) {
- assert.commandWorked(secondary.getDB("test").fsyncLock());
- });
-
- // Modify data only on the primary replica of the primary shard.
- // { x: 60 } goes to the shard of "rs", which is the primary shard.
- assert.writeOK(ts.insert({primaryOnly: true, x: 60}));
- // Read from secondary through mongos, the doc is not there due to replication delay or fsync.
- // But we can guarantee not to read from primary.
- assert.eq(0, ts.find({primaryOnly: true, x: 60}).itcount());
- // Unlock the secondaries
- rs.getSecondaries().forEach(function(secondary) {
- secondary.getDB("test").fsyncUnlock();
- });
- // Clean up the data
- assert.writeOK(ts.remove({primaryOnly: true, x: 60}, {writeConcern: {w: 3}}));
-
- for (var i = 0; i < 10; i++) {
- m = new Mongo(s.s.name);
- m.setSlaveOk();
- ts = m.getDB("test").foo;
- assert.eq(100, ts.find().batchSize(5).itcount(), "F2." + i);
- }
+after = rs.getPrimary().adminCommand("serverStatus").opcounters;
- for (var i = 0; i < 10; i++) {
- m = new Mongo(s.s.name);
- ts = m.getDB("test").foo;
- assert.eq(100, ts.find().batchSize(5).itcount(), "F3." + i);
- }
+assert.lte(before.query + 10, after.query, "E3");
+
+assert.eq(100, ts.count(), "E4");
+assert.eq(100, ts.find().itcount(), "E5");
+printjson(ts.find().batchSize(5).explain());
+
+// fsyncLock the secondaries
+rs.getSecondaries().forEach(function(secondary) {
+ assert.commandWorked(secondary.getDB("test").fsyncLock());
+});
+
+// Modify data only on the primary replica of the primary shard.
+// { x: 60 } goes to the shard of "rs", which is the primary shard.
+assert.writeOK(ts.insert({primaryOnly: true, x: 60}));
+// Read from secondary through mongos, the doc is not there due to replication delay or fsync.
+// But we can guarantee not to read from primary.
+assert.eq(0, ts.find({primaryOnly: true, x: 60}).itcount());
+// Unlock the secondaries
+rs.getSecondaries().forEach(function(secondary) {
+ secondary.getDB("test").fsyncUnlock();
+});
+// Clean up the data
+assert.writeOK(ts.remove({primaryOnly: true, x: 60}, {writeConcern: {w: 3}}));
+
+for (var i = 0; i < 10; i++) {
+ m = new Mongo(s.s.name);
+ m.setSlaveOk();
+ ts = m.getDB("test").foo;
+ assert.eq(100, ts.find().batchSize(5).itcount(), "F2." + i);
+}
+
+for (var i = 0; i < 10; i++) {
+ m = new Mongo(s.s.name);
+ ts = m.getDB("test").foo;
+ assert.eq(100, ts.find().batchSize(5).itcount(), "F3." + i);
+}
- printjson(db.adminCommand("getShardMap"));
+printjson(db.adminCommand("getShardMap"));
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/sharding_statistics_server_status.js b/jstests/sharding/sharding_statistics_server_status.js
index 7e25f5465ba..9aaa6ab3a9c 100644
--- a/jstests/sharding/sharding_statistics_server_status.js
+++ b/jstests/sharding/sharding_statistics_server_status.js
@@ -5,179 +5,178 @@
// @tags: [uses_transactions]
(function() {
- 'use strict';
-
- load("jstests/libs/chunk_manipulation_util.js");
- load("jstests/libs/parallelTester.js");
-
- function ShardStat() {
- this.countDonorMoveChunkStarted = 0;
- this.countRecipientMoveChunkStarted = 0;
- this.countDocsClonedOnRecipient = 0;
- this.countDocsClonedOnDonor = 0;
- this.countDocsDeletedOnDonor = 0;
- }
-
- function incrementStatsAndCheckServerShardStats(donor, recipient, numDocs) {
- ++donor.countDonorMoveChunkStarted;
- donor.countDocsClonedOnDonor += numDocs;
- ++recipient.countRecipientMoveChunkStarted;
- recipient.countDocsClonedOnRecipient += numDocs;
- donor.countDocsDeletedOnDonor += numDocs;
- const statsFromServerStatus = shardArr.map(function(shardVal) {
- return shardVal.getDB('admin').runCommand({serverStatus: 1}).shardingStatistics;
- });
- for (let i = 0; i < shardArr.length; ++i) {
- assert(statsFromServerStatus[i]);
- assert(statsFromServerStatus[i].countStaleConfigErrors);
- assert(statsFromServerStatus[i].totalCriticalSectionCommitTimeMillis);
- assert(statsFromServerStatus[i].totalCriticalSectionTimeMillis);
- assert(statsFromServerStatus[i].totalDonorChunkCloneTimeMillis);
- assert(statsFromServerStatus[i].countDonorMoveChunkLockTimeout);
- assert.eq(stats[i].countDonorMoveChunkStarted,
- statsFromServerStatus[i].countDonorMoveChunkStarted);
- assert.eq(stats[i].countDocsClonedOnRecipient,
- statsFromServerStatus[i].countDocsClonedOnRecipient);
- assert.eq(stats[i].countDocsClonedOnDonor,
- statsFromServerStatus[i].countDocsClonedOnDonor);
- assert.eq(stats[i].countDocsDeletedOnDonor,
- statsFromServerStatus[i].countDocsDeletedOnDonor);
- assert.eq(stats[i].countRecipientMoveChunkStarted,
- statsFromServerStatus[i].countRecipientMoveChunkStarted);
- }
- }
-
- function checkServerStatusMigrationLockTimeoutCount(shardConn, count) {
- const shardStats =
- assert.commandWorked(shardConn.adminCommand({serverStatus: 1})).shardingStatistics;
- assert(shardStats.hasOwnProperty("countDonorMoveChunkLockTimeout"));
- assert.eq(count, shardStats.countDonorMoveChunkLockTimeout);
- }
-
- function runConcurrentMoveChunk(host, ns, toShard) {
- const mongos = new Mongo(host);
- return mongos.adminCommand({moveChunk: ns, find: {_id: 1}, to: toShard});
- }
-
- function runConcurrentRead(host, dbName, collName) {
- const mongos = new Mongo(host);
- return mongos.getDB(dbName)[collName].find({_id: 5}).comment("concurrent read").itcount();
- }
-
- const dbName = "db";
- const collName = "coll";
-
- const st = new ShardingTest({shards: 2, mongos: 1});
- const mongos = st.s0;
- const admin = mongos.getDB("admin");
- const coll = mongos.getCollection(dbName + "." + collName);
- const numDocsToInsert = 3;
- const shardArr = [st.shard0, st.shard1];
- const stats = [new ShardStat(), new ShardStat()];
- let numDocsInserted = 0;
-
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
-
- // Move chunk from shard0 to shard1 without docs.
- assert.commandWorked(
- mongos.adminCommand({moveChunk: coll + '', find: {_id: 1}, to: st.shard1.shardName}));
- incrementStatsAndCheckServerShardStats(stats[0], stats[1], numDocsInserted);
-
- // Insert docs and then move chunk again from shard1 to shard0.
- for (let i = 0; i < numDocsToInsert; ++i) {
- assert.writeOK(coll.insert({_id: i}));
- ++numDocsInserted;
- }
- assert.commandWorked(mongos.adminCommand(
- {moveChunk: coll + '', find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
- incrementStatsAndCheckServerShardStats(stats[1], stats[0], numDocsInserted);
-
- // Check that numbers are indeed cumulative. Move chunk from shard0 to shard1.
- assert.commandWorked(mongos.adminCommand(
- {moveChunk: coll + '', find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true}));
- incrementStatsAndCheckServerShardStats(stats[0], stats[1], numDocsInserted);
-
- // Move chunk from shard1 to shard0.
- assert.commandWorked(mongos.adminCommand(
- {moveChunk: coll + '', find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
- incrementStatsAndCheckServerShardStats(stats[1], stats[0], numDocsInserted);
-
- //
- // Tests for the count of migrations aborting from lock timeouts.
- //
-
- // Lower migrationLockAcquisitionMaxWaitMS so migrations time out more quickly.
- const donorConn = st.rs0.getPrimary();
- const lockParameterRes = assert.commandWorked(
- donorConn.adminCommand({getParameter: 1, migrationLockAcquisitionMaxWaitMS: 1}));
- const originalMigrationLockTimeout = lockParameterRes.migrationLockAcquisitionMaxWaitMS;
- assert.commandWorked(
- donorConn.adminCommand({setParameter: 1, migrationLockAcquisitionMaxWaitMS: 2 * 1000}));
-
- // Counter starts at 0.
- checkServerStatusMigrationLockTimeoutCount(donorConn, 0);
-
- // Pause a migration before entering the critical section.
- pauseMoveChunkAtStep(donorConn, moveChunkStepNames.reachedSteadyState);
- let moveChunkThread = new ScopedThread(
- runConcurrentMoveChunk, st.s.host, dbName + "." + collName, st.shard1.shardName);
- moveChunkThread.start();
- waitForMoveChunkStep(donorConn, moveChunkStepNames.reachedSteadyState);
-
- // Start a transaction and insert to the migrating chunk to block entering the critical section.
- const session = mongos.startSession();
- session.startTransaction();
- assert.commandWorked(session.getDatabase(dbName)[collName].insert({_id: 5}));
-
- // Unpause the migration and it should time out entering the critical section.
- unpauseMoveChunkAtStep(donorConn, moveChunkStepNames.reachedSteadyState);
- moveChunkThread.join();
- assert.commandFailedWithCode(moveChunkThread.returnData(), ErrorCodes.LockTimeout);
-
- // Clean up the transaction and verify the counter was incremented in serverStatus.
- assert.commandWorked(session.abortTransaction_forTesting());
-
- checkServerStatusMigrationLockTimeoutCount(donorConn, 1);
-
- // Writes are blocked during the critical section, so insert a document into the chunk to be
- // moved before the migration begins that can be read later.
- assert.commandWorked(st.s.getDB(dbName)[collName].insert({_id: 5}));
-
- // Pause a migration after entering the critical section, but before entering the commit phase.
- pauseMoveChunkAtStep(donorConn, moveChunkStepNames.chunkDataCommitted);
- moveChunkThread = new ScopedThread(
- runConcurrentMoveChunk, st.s.host, dbName + "." + collName, st.shard1.shardName);
- moveChunkThread.start();
- waitForMoveChunkStep(donorConn, moveChunkStepNames.chunkDataCommitted);
-
- // Pause a read while it's holding locks so the migration can't commit.
- assert.commandWorked(donorConn.adminCommand(
- {configureFailPoint: "waitInFindBeforeMakingBatch", mode: "alwaysOn"}));
- const concurrentRead = new ScopedThread(runConcurrentRead, st.s.host, dbName, collName);
- concurrentRead.start();
- assert.soon(function() {
- const curOpResults = assert.commandWorked(donorConn.adminCommand({currentOp: 1}));
- return curOpResults.inprog.some(op => op["command"]["comment"] === "concurrent read");
+'use strict';
+
+load("jstests/libs/chunk_manipulation_util.js");
+load("jstests/libs/parallelTester.js");
+
+function ShardStat() {
+ this.countDonorMoveChunkStarted = 0;
+ this.countRecipientMoveChunkStarted = 0;
+ this.countDocsClonedOnRecipient = 0;
+ this.countDocsClonedOnDonor = 0;
+ this.countDocsDeletedOnDonor = 0;
+}
+
+function incrementStatsAndCheckServerShardStats(donor, recipient, numDocs) {
+ ++donor.countDonorMoveChunkStarted;
+ donor.countDocsClonedOnDonor += numDocs;
+ ++recipient.countRecipientMoveChunkStarted;
+ recipient.countDocsClonedOnRecipient += numDocs;
+ donor.countDocsDeletedOnDonor += numDocs;
+ const statsFromServerStatus = shardArr.map(function(shardVal) {
+ return shardVal.getDB('admin').runCommand({serverStatus: 1}).shardingStatistics;
});
+ for (let i = 0; i < shardArr.length; ++i) {
+ assert(statsFromServerStatus[i]);
+ assert(statsFromServerStatus[i].countStaleConfigErrors);
+ assert(statsFromServerStatus[i].totalCriticalSectionCommitTimeMillis);
+ assert(statsFromServerStatus[i].totalCriticalSectionTimeMillis);
+ assert(statsFromServerStatus[i].totalDonorChunkCloneTimeMillis);
+ assert(statsFromServerStatus[i].countDonorMoveChunkLockTimeout);
+ assert.eq(stats[i].countDonorMoveChunkStarted,
+ statsFromServerStatus[i].countDonorMoveChunkStarted);
+ assert.eq(stats[i].countDocsClonedOnRecipient,
+ statsFromServerStatus[i].countDocsClonedOnRecipient);
+ assert.eq(stats[i].countDocsClonedOnDonor, statsFromServerStatus[i].countDocsClonedOnDonor);
+ assert.eq(stats[i].countDocsDeletedOnDonor,
+ statsFromServerStatus[i].countDocsDeletedOnDonor);
+ assert.eq(stats[i].countRecipientMoveChunkStarted,
+ statsFromServerStatus[i].countRecipientMoveChunkStarted);
+ }
+}
+
+function checkServerStatusMigrationLockTimeoutCount(shardConn, count) {
+ const shardStats =
+ assert.commandWorked(shardConn.adminCommand({serverStatus: 1})).shardingStatistics;
+ assert(shardStats.hasOwnProperty("countDonorMoveChunkLockTimeout"));
+ assert.eq(count, shardStats.countDonorMoveChunkLockTimeout);
+}
+
+function runConcurrentMoveChunk(host, ns, toShard) {
+ const mongos = new Mongo(host);
+ return mongos.adminCommand({moveChunk: ns, find: {_id: 1}, to: toShard});
+}
+
+function runConcurrentRead(host, dbName, collName) {
+ const mongos = new Mongo(host);
+ return mongos.getDB(dbName)[collName].find({_id: 5}).comment("concurrent read").itcount();
+}
+
+const dbName = "db";
+const collName = "coll";
+
+const st = new ShardingTest({shards: 2, mongos: 1});
+const mongos = st.s0;
+const admin = mongos.getDB("admin");
+const coll = mongos.getCollection(dbName + "." + collName);
+const numDocsToInsert = 3;
+const shardArr = [st.shard0, st.shard1];
+const stats = [new ShardStat(), new ShardStat()];
+let numDocsInserted = 0;
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+
+// Move chunk from shard0 to shard1 without docs.
+assert.commandWorked(
+ mongos.adminCommand({moveChunk: coll + '', find: {_id: 1}, to: st.shard1.shardName}));
+incrementStatsAndCheckServerShardStats(stats[0], stats[1], numDocsInserted);
+
+// Insert docs and then move chunk again from shard1 to shard0.
+for (let i = 0; i < numDocsToInsert; ++i) {
+ assert.writeOK(coll.insert({_id: i}));
+ ++numDocsInserted;
+}
+assert.commandWorked(mongos.adminCommand(
+ {moveChunk: coll + '', find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
+incrementStatsAndCheckServerShardStats(stats[1], stats[0], numDocsInserted);
+
+// Check that numbers are indeed cumulative. Move chunk from shard0 to shard1.
+assert.commandWorked(mongos.adminCommand(
+ {moveChunk: coll + '', find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true}));
+incrementStatsAndCheckServerShardStats(stats[0], stats[1], numDocsInserted);
+
+// Move chunk from shard1 to shard0.
+assert.commandWorked(mongos.adminCommand(
+ {moveChunk: coll + '', find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true}));
+incrementStatsAndCheckServerShardStats(stats[1], stats[0], numDocsInserted);
- // Unpause the migration and it should time out entering the commit phase.
- unpauseMoveChunkAtStep(donorConn, moveChunkStepNames.chunkDataCommitted);
- moveChunkThread.join();
- assert.commandFailedWithCode(moveChunkThread.returnData(), ErrorCodes.LockTimeout);
-
- // Let the read finish and verify the counter was incremented in serverStatus.
- assert.commandWorked(
- donorConn.adminCommand({configureFailPoint: "waitInFindBeforeMakingBatch", mode: "off"}));
- concurrentRead.join();
- assert.eq(1, concurrentRead.returnData());
-
- checkServerStatusMigrationLockTimeoutCount(donorConn, 2);
-
- assert.commandWorked(donorConn.adminCommand(
- {setParameter: 1, migrationLockAcquisitionMaxWaitMS: originalMigrationLockTimeout}));
+//
+// Tests for the count of migrations aborting from lock timeouts.
+//
- st.stop();
+// Lower migrationLockAcquisitionMaxWaitMS so migrations time out more quickly.
+const donorConn = st.rs0.getPrimary();
+const lockParameterRes = assert.commandWorked(
+ donorConn.adminCommand({getParameter: 1, migrationLockAcquisitionMaxWaitMS: 1}));
+const originalMigrationLockTimeout = lockParameterRes.migrationLockAcquisitionMaxWaitMS;
+assert.commandWorked(
+ donorConn.adminCommand({setParameter: 1, migrationLockAcquisitionMaxWaitMS: 2 * 1000}));
+
+// Counter starts at 0.
+checkServerStatusMigrationLockTimeoutCount(donorConn, 0);
+
+// Pause a migration before entering the critical section.
+pauseMoveChunkAtStep(donorConn, moveChunkStepNames.reachedSteadyState);
+let moveChunkThread = new ScopedThread(
+ runConcurrentMoveChunk, st.s.host, dbName + "." + collName, st.shard1.shardName);
+moveChunkThread.start();
+waitForMoveChunkStep(donorConn, moveChunkStepNames.reachedSteadyState);
+
+// Start a transaction and insert to the migrating chunk to block entering the critical section.
+const session = mongos.startSession();
+session.startTransaction();
+assert.commandWorked(session.getDatabase(dbName)[collName].insert({_id: 5}));
+
+// Unpause the migration and it should time out entering the critical section.
+unpauseMoveChunkAtStep(donorConn, moveChunkStepNames.reachedSteadyState);
+moveChunkThread.join();
+assert.commandFailedWithCode(moveChunkThread.returnData(), ErrorCodes.LockTimeout);
+
+// Clean up the transaction and verify the counter was incremented in serverStatus.
+assert.commandWorked(session.abortTransaction_forTesting());
+
+checkServerStatusMigrationLockTimeoutCount(donorConn, 1);
+
+// Writes are blocked during the critical section, so insert a document into the chunk to be
+// moved before the migration begins that can be read later.
+assert.commandWorked(st.s.getDB(dbName)[collName].insert({_id: 5}));
+
+// Pause a migration after entering the critical section, but before entering the commit phase.
+pauseMoveChunkAtStep(donorConn, moveChunkStepNames.chunkDataCommitted);
+moveChunkThread = new ScopedThread(
+ runConcurrentMoveChunk, st.s.host, dbName + "." + collName, st.shard1.shardName);
+moveChunkThread.start();
+waitForMoveChunkStep(donorConn, moveChunkStepNames.chunkDataCommitted);
+
+// Pause a read while it's holding locks so the migration can't commit.
+assert.commandWorked(
+ donorConn.adminCommand({configureFailPoint: "waitInFindBeforeMakingBatch", mode: "alwaysOn"}));
+const concurrentRead = new ScopedThread(runConcurrentRead, st.s.host, dbName, collName);
+concurrentRead.start();
+assert.soon(function() {
+ const curOpResults = assert.commandWorked(donorConn.adminCommand({currentOp: 1}));
+ return curOpResults.inprog.some(op => op["command"]["comment"] === "concurrent read");
+});
+
+// Unpause the migration and it should time out entering the commit phase.
+unpauseMoveChunkAtStep(donorConn, moveChunkStepNames.chunkDataCommitted);
+moveChunkThread.join();
+assert.commandFailedWithCode(moveChunkThread.returnData(), ErrorCodes.LockTimeout);
+
+// Let the read finish and verify the counter was incremented in serverStatus.
+assert.commandWorked(
+ donorConn.adminCommand({configureFailPoint: "waitInFindBeforeMakingBatch", mode: "off"}));
+concurrentRead.join();
+assert.eq(1, concurrentRead.returnData());
+
+checkServerStatusMigrationLockTimeoutCount(donorConn, 2);
+
+assert.commandWorked(donorConn.adminCommand(
+ {setParameter: 1, migrationLockAcquisitionMaxWaitMS: originalMigrationLockTimeout}));
+
+st.stop();
})();
diff --git a/jstests/sharding/shards_and_config_return_last_committed_optime.js b/jstests/sharding/shards_and_config_return_last_committed_optime.js
index b884deaef39..efe6d7f431b 100644
--- a/jstests/sharding/shards_and_config_return_last_committed_optime.js
+++ b/jstests/sharding/shards_and_config_return_last_committed_optime.js
@@ -10,185 +10,183 @@
* - standalone mongod
*/
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js"); // For stopReplProducer
-
- function assertCmdDoesNotReturnLastCommittedOpTime(testDB, cmdObj, connType, expectSuccess) {
+"use strict";
+
+load("jstests/libs/write_concern_util.js"); // For stopReplProducer
+
+function assertCmdDoesNotReturnLastCommittedOpTime(testDB, cmdObj, connType, expectSuccess) {
+ const res = testDB.runCommand(cmdObj);
+ assert.eq(expectSuccess ? 1 : 0, res.ok);
+ assert(typeof res.lastCommittedOpTime === "undefined",
+ "Expected response from a " + connType + " to not contain lastCommittedOpTime," +
+ " received: " + tojson(res) + ", cmd was: " + tojson(cmdObj));
+}
+
+function assertDoesNotReturnLastCommittedOpTime(testDB, collName, connType) {
+ // Successful commands return lastCommittedOpTime.
+ assertCmdDoesNotReturnLastCommittedOpTime(testDB, {find: collName}, connType, true);
+
+ // Failed commands return lastCommittedOpTime.
+ assertCmdDoesNotReturnLastCommittedOpTime(
+ testDB, {dummyCommand: collName}, connType, false /* expectSuccess */);
+ assertCmdDoesNotReturnLastCommittedOpTime(testDB,
+ {find: collName, readConcern: {invalid: "rc"}},
+ connType,
+ false /* expectSuccess */);
+ assertCmdDoesNotReturnLastCommittedOpTime(
+ testDB,
+ {insert: collName, documents: [{x: 2}], writeConcern: {invalid: "wc"}},
+ connType,
+ false /* expectSuccess */);
+}
+
+function assertCmdReturnsLastCommittedOpTime(testDB, cmdObj, connType, expectSuccess) {
+ // Retry up to one time to avoid possible failures from lag in setting the
+ // lastCommittedOpTime.
+ assert.retryNoExcept(() => {
const res = testDB.runCommand(cmdObj);
assert.eq(expectSuccess ? 1 : 0, res.ok);
- assert(typeof res.lastCommittedOpTime === "undefined",
- "Expected response from a " + connType + " to not contain lastCommittedOpTime," +
+ assert(typeof res.lastCommittedOpTime !== "undefined",
+ "Expected response from a " + connType + " to contain lastCommittedOpTime," +
" received: " + tojson(res) + ", cmd was: " + tojson(cmdObj));
- }
-
- function assertDoesNotReturnLastCommittedOpTime(testDB, collName, connType) {
- // Successful commands return lastCommittedOpTime.
- assertCmdDoesNotReturnLastCommittedOpTime(testDB, {find: collName}, connType, true);
-
- // Failed commands return lastCommittedOpTime.
- assertCmdDoesNotReturnLastCommittedOpTime(
- testDB, {dummyCommand: collName}, connType, false /* expectSuccess */);
- assertCmdDoesNotReturnLastCommittedOpTime(testDB,
- {find: collName, readConcern: {invalid: "rc"}},
- connType,
- false /* expectSuccess */);
- assertCmdDoesNotReturnLastCommittedOpTime(
- testDB,
- {insert: collName, documents: [{x: 2}], writeConcern: {invalid: "wc"}},
- connType,
- false /* expectSuccess */);
- }
-
- function assertCmdReturnsLastCommittedOpTime(testDB, cmdObj, connType, expectSuccess) {
- // Retry up to one time to avoid possible failures from lag in setting the
- // lastCommittedOpTime.
- assert.retryNoExcept(() => {
- const res = testDB.runCommand(cmdObj);
- assert.eq(expectSuccess ? 1 : 0, res.ok);
- assert(typeof res.lastCommittedOpTime !== "undefined",
- "Expected response from a " + connType + " to contain lastCommittedOpTime," +
- " received: " + tojson(res) + ", cmd was: " + tojson(cmdObj));
-
- // The last committed opTime may advance after replSetGetStatus finishes executing and
- // before its response's metadata is computed, in which case the response's
- // lastCommittedOpTime will be greater than the lastCommittedOpTime timestamp in its
- // body. Assert the timestamp is <= lastCommittedOpTime to account for this.
- const statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
- assert.lte(
- 0,
- bsonWoCompare(res.lastCommittedOpTime, statusRes.optimes.lastCommittedOpTime.ts),
- "lastCommittedOpTime in command response, " + res.lastCommittedOpTime +
- ", is not <= to the replSetGetStatus lastCommittedOpTime timestamp, " +
- statusRes.optimes.lastCommittedOpTime.ts + ", cmd was: " + tojson(cmdObj));
-
- return true;
- }, "command: " + tojson(cmdObj) + " failed to return correct lastCommittedOpTime", 2);
- }
-
- function assertReturnsLastCommittedOpTime(testDB, collName, connType) {
- // Successful commands return lastCommittedOpTime.
- assertCmdReturnsLastCommittedOpTime(
- testDB, {find: collName}, connType, true /* expectSuccess */);
-
- // Failed commands return lastCommittedOpTime.
- assertCmdReturnsLastCommittedOpTime(
- testDB, {dummyCommand: collName}, connType, false /* expectSuccess */);
- assertCmdReturnsLastCommittedOpTime(testDB,
- {find: collName, readConcern: {invalid: "rc"}},
- connType,
- false /* expectSuccess */);
- assertCmdReturnsLastCommittedOpTime(
- testDB,
- {insert: collName, documents: [{x: 2}], writeConcern: {invalid: "wc"}},
- connType,
- false /* expectSuccess */);
- }
-
- //
- // Mongos should not return lastCommittedOpTime.
- //
-
- const st = new ShardingTest({shards: 1, rs: {nodes: 2}, config: 2});
- assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
- assert.commandWorked(st.s.adminCommand({shardCollection: "test.foo", key: {x: 1}}));
-
- // Sharded collection.
- assertDoesNotReturnLastCommittedOpTime(
- st.s.getDB("test"), "foo", "mongos talking to a sharded collection");
-
- // Unsharded collection.
- assertDoesNotReturnLastCommittedOpTime(
- st.s.getDB("test"), "unsharded", "mongos talking to a non-sharded collection");
-
- // Collection stored on the config server.
- assertDoesNotReturnLastCommittedOpTime(
- st.s.getDB("config"), "foo", "mongos talking to a config server collection");
-
- //
- // A mongod in a sharded replica set returns lastCommittedOpTime.
- //
-
- // To verify the lastCommittedOpTime is being returned, pause replication on the secondary to
- // prevent the primary from advancing its lastCommittedOpTime and then perform a local write to
- // advance the primary's lastAppliedOpTime.
- let primary = st.rs0.getPrimary();
- let secondary = st.rs0.getSecondary();
-
- st.rs0.awaitLastOpCommitted();
- stopServerReplication(secondary);
- assert.writeOK(primary.getDB("test").foo.insert({x: 1}, {writeConcern: {w: 1}}));
-
- // Sharded collection.
- assertReturnsLastCommittedOpTime(primary.getDB("test"), "foo", "sharding-aware shard primary");
- assertReturnsLastCommittedOpTime(
- secondary.getDB("test"), "foo", "sharding-aware shard secondary");
-
- // Unsharded collection.
- assertReturnsLastCommittedOpTime(
- primary.getDB("test"), "unsharded", "sharding-aware shard primary");
- assertReturnsLastCommittedOpTime(
- secondary.getDB("test"), "unsharded", "sharding-aware shard secondary");
-
- restartServerReplication(secondary);
-
- //
- // A config server in a sharded replica set returns lastCommittedOpTime.
- //
-
- // Split the lastCommitted and lastApplied opTimes by pausing secondary application and
- // performing a local write.
- primary = st.configRS.getPrimary();
- secondary = st.configRS.getSecondary();
-
- st.configRS.awaitLastOpCommitted();
- stopServerReplication(secondary);
- assert.writeOK(primary.getDB("config").foo.insert({x: 1}, {writeConcern: {w: 1}}));
-
- assertReturnsLastCommittedOpTime(primary.getDB("test"), "foo", "config server primary");
- assertReturnsLastCommittedOpTime(secondary.getDB("test"), "foo", "config server secondary");
-
- restartServerReplication(secondary);
- st.stop();
-
- //
- // A mongod started with --shardsvr that is not sharding aware does not return
- // lastCommittedOpTime.
- //
-
- const replTestShardSvr = new ReplSetTest({nodes: 2, nodeOptions: {shardsvr: ""}});
- replTestShardSvr.startSet();
- replTestShardSvr.initiate();
-
- assertDoesNotReturnLastCommittedOpTime(
- replTestShardSvr.getPrimary().getDB("test"), "foo", "non-sharding aware shard primary");
- assertDoesNotReturnLastCommittedOpTime(
- replTestShardSvr.getSecondary().getDB("test"), "foo", "non-sharding aware shard secondary");
-
- replTestShardSvr.stopSet();
-
- //
- // A mongod from a standalone replica set does not return lastCommittedOpTime.
- //
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
- assertDoesNotReturnLastCommittedOpTime(
- replTest.getPrimary().getDB("test"), "foo", "standalone replica set primary");
- assertDoesNotReturnLastCommittedOpTime(
- replTest.getSecondary().getDB("test"), "foo", "standalone replica set secondary");
+ // The last committed opTime may advance after replSetGetStatus finishes executing and
+ // before its response's metadata is computed, in which case the response's
+ // lastCommittedOpTime will be greater than the lastCommittedOpTime timestamp in its
+ // body. Assert the timestamp is <= lastCommittedOpTime to account for this.
+ const statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
+ assert.lte(0,
+ bsonWoCompare(res.lastCommittedOpTime, statusRes.optimes.lastCommittedOpTime.ts),
+ "lastCommittedOpTime in command response, " + res.lastCommittedOpTime +
+ ", is not <= to the replSetGetStatus lastCommittedOpTime timestamp, " +
+ statusRes.optimes.lastCommittedOpTime.ts + ", cmd was: " + tojson(cmdObj));
+
+ return true;
+ }, "command: " + tojson(cmdObj) + " failed to return correct lastCommittedOpTime", 2);
+}
+
+function assertReturnsLastCommittedOpTime(testDB, collName, connType) {
+ // Successful commands return lastCommittedOpTime.
+ assertCmdReturnsLastCommittedOpTime(
+ testDB, {find: collName}, connType, true /* expectSuccess */);
+
+ // Failed commands return lastCommittedOpTime.
+ assertCmdReturnsLastCommittedOpTime(
+ testDB, {dummyCommand: collName}, connType, false /* expectSuccess */);
+ assertCmdReturnsLastCommittedOpTime(testDB,
+ {find: collName, readConcern: {invalid: "rc"}},
+ connType,
+ false /* expectSuccess */);
+ assertCmdReturnsLastCommittedOpTime(
+ testDB,
+ {insert: collName, documents: [{x: 2}], writeConcern: {invalid: "wc"}},
+ connType,
+ false /* expectSuccess */);
+}
+
+//
+// Mongos should not return lastCommittedOpTime.
+//
+
+const st = new ShardingTest({shards: 1, rs: {nodes: 2}, config: 2});
+assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
+assert.commandWorked(st.s.adminCommand({shardCollection: "test.foo", key: {x: 1}}));
+
+// Sharded collection.
+assertDoesNotReturnLastCommittedOpTime(
+ st.s.getDB("test"), "foo", "mongos talking to a sharded collection");
+
+// Unsharded collection.
+assertDoesNotReturnLastCommittedOpTime(
+ st.s.getDB("test"), "unsharded", "mongos talking to a non-sharded collection");
+
+// Collection stored on the config server.
+assertDoesNotReturnLastCommittedOpTime(
+ st.s.getDB("config"), "foo", "mongos talking to a config server collection");
+
+//
+// A mongod in a sharded replica set returns lastCommittedOpTime.
+//
+
+// To verify the lastCommittedOpTime is being returned, pause replication on the secondary to
+// prevent the primary from advancing its lastCommittedOpTime and then perform a local write to
+// advance the primary's lastAppliedOpTime.
+let primary = st.rs0.getPrimary();
+let secondary = st.rs0.getSecondary();
+
+st.rs0.awaitLastOpCommitted();
+stopServerReplication(secondary);
+assert.writeOK(primary.getDB("test").foo.insert({x: 1}, {writeConcern: {w: 1}}));
+
+// Sharded collection.
+assertReturnsLastCommittedOpTime(primary.getDB("test"), "foo", "sharding-aware shard primary");
+assertReturnsLastCommittedOpTime(secondary.getDB("test"), "foo", "sharding-aware shard secondary");
+
+// Unsharded collection.
+assertReturnsLastCommittedOpTime(
+ primary.getDB("test"), "unsharded", "sharding-aware shard primary");
+assertReturnsLastCommittedOpTime(
+ secondary.getDB("test"), "unsharded", "sharding-aware shard secondary");
+
+restartServerReplication(secondary);
+
+//
+// A config server in a sharded replica set returns lastCommittedOpTime.
+//
+
+// Split the lastCommitted and lastApplied opTimes by pausing secondary application and
+// performing a local write.
+primary = st.configRS.getPrimary();
+secondary = st.configRS.getSecondary();
+
+st.configRS.awaitLastOpCommitted();
+stopServerReplication(secondary);
+assert.writeOK(primary.getDB("config").foo.insert({x: 1}, {writeConcern: {w: 1}}));
+
+assertReturnsLastCommittedOpTime(primary.getDB("test"), "foo", "config server primary");
+assertReturnsLastCommittedOpTime(secondary.getDB("test"), "foo", "config server secondary");
+
+restartServerReplication(secondary);
+st.stop();
+
+//
+// A mongod started with --shardsvr that is not sharding aware does not return
+// lastCommittedOpTime.
+//
+
+const replTestShardSvr = new ReplSetTest({nodes: 2, nodeOptions: {shardsvr: ""}});
+replTestShardSvr.startSet();
+replTestShardSvr.initiate();
+
+assertDoesNotReturnLastCommittedOpTime(
+ replTestShardSvr.getPrimary().getDB("test"), "foo", "non-sharding aware shard primary");
+assertDoesNotReturnLastCommittedOpTime(
+ replTestShardSvr.getSecondary().getDB("test"), "foo", "non-sharding aware shard secondary");
+
+replTestShardSvr.stopSet();
+
+//
+// A mongod from a standalone replica set does not return lastCommittedOpTime.
+//
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+assertDoesNotReturnLastCommittedOpTime(
+ replTest.getPrimary().getDB("test"), "foo", "standalone replica set primary");
+assertDoesNotReturnLastCommittedOpTime(
+ replTest.getSecondary().getDB("test"), "foo", "standalone replica set secondary");
- replTest.stopSet();
+replTest.stopSet();
- //
- // A standalone mongod does not return lastCommittedOpTime.
- //
+//
+// A standalone mongod does not return lastCommittedOpTime.
+//
- const standalone = MongoRunner.runMongod();
+const standalone = MongoRunner.runMongod();
- assertDoesNotReturnLastCommittedOpTime(standalone.getDB("test"), "foo", "standalone mongod");
+assertDoesNotReturnLastCommittedOpTime(standalone.getDB("test"), "foo", "standalone mongod");
- MongoRunner.stopMongod(standalone);
+MongoRunner.stopMongod(standalone);
})();
diff --git a/jstests/sharding/single_shard_transaction_with_arbiter.js b/jstests/sharding/single_shard_transaction_with_arbiter.js
index 4acf1e5b820..24c82b78dca 100644
--- a/jstests/sharding/single_shard_transaction_with_arbiter.js
+++ b/jstests/sharding/single_shard_transaction_with_arbiter.js
@@ -5,43 +5,43 @@
*/
(function() {
- "use strict";
-
- const name = "single_shard_transaction_with_arbiter";
- const dbName = "test";
- const collName = name;
-
- const shardingTest = new ShardingTest({
- shards: 1,
- rs: {
- nodes: [
- {/* primary */},
- {/* secondary */ rsConfig: {priority: 0}},
- {/* arbiter */ rsConfig: {arbiterOnly: true}}
- ]
- }
- });
-
- const mongos = shardingTest.s;
- const mongosDB = mongos.getDB(dbName);
- const mongosColl = mongosDB[collName];
-
- // Create and shard collection beforehand.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
- assert.commandWorked(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-
- const session = mongos.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- // Start a transaction and verify that it succeeds.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 0}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq({_id: 0}, sessionColl.findOne({_id: 0}));
-
- shardingTest.stop();
+"use strict";
+
+const name = "single_shard_transaction_with_arbiter";
+const dbName = "test";
+const collName = name;
+
+const shardingTest = new ShardingTest({
+ shards: 1,
+ rs: {
+ nodes: [
+ {/* primary */},
+ {/* secondary */ rsConfig: {priority: 0}},
+ {/* arbiter */ rsConfig: {arbiterOnly: true}}
+ ]
+ }
+});
+
+const mongos = shardingTest.s;
+const mongosDB = mongos.getDB(dbName);
+const mongosColl = mongosDB[collName];
+
+// Create and shard collection beforehand.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+assert.commandWorked(mongosColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+
+const session = mongos.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+// Start a transaction and verify that it succeeds.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 0}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq({_id: 0}, sessionColl.findOne({_id: 0}));
+
+shardingTest.stop();
})();
diff --git a/jstests/sharding/single_shard_transaction_without_majority_reads_lagged.js b/jstests/sharding/single_shard_transaction_without_majority_reads_lagged.js
index 03dd211baf7..82d969eccb7 100644
--- a/jstests/sharding/single_shard_transaction_without_majority_reads_lagged.js
+++ b/jstests/sharding/single_shard_transaction_without_majority_reads_lagged.js
@@ -14,77 +14,77 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js"); // for 'stopServerReplication' and
- // 'restartServerReplication'.
-
- const name = "single_shard_transaction_without_majority_reads_lagged";
- const dbName = "test";
- const collName = name;
-
- const shardingTest = new ShardingTest({
- shards: 1,
- rs: {
- nodes: [
- {/* primary */ enableMajorityReadConcern: 'false'},
- {/* secondary */ rsConfig: {priority: 0}}
- ]
- }
- });
-
- const rst = shardingTest.rs0;
- const mongos = shardingTest.s;
- const mongosDB = mongos.getDB(dbName);
- const mongosColl = mongosDB[collName];
-
- // Create and shard collection beforehand.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // This is the last write the secondary will have before the start of the transaction.
- assert.commandWorked(mongosColl.insert({_id: 1, x: 10}, {writeConcern: {w: "majority"}}));
-
- // We want the secondary to lag for an amount generously greater than the history window.
- const secondary = rst.getSecondary();
- const maxWindowResult = assert.commandWorked(secondary.getDB("admin").runCommand(
- {"getParameter": 1, "maxTargetSnapshotHistoryWindowInSeconds": 1}));
- stopServerReplication(secondary);
-
- const maxWindowInMS = maxWindowResult.maxTargetSnapshotHistoryWindowInSeconds * 1000;
- const lagTimeMS = maxWindowInMS * 2;
- const startTime = Date.now();
- let nextId = 1000;
-
- // Insert a stream of writes to the primary with _ids all numbers greater or equal than
- // 1000 (this is done to easily distinguish them from the write above done with _id: 1).
- // The secondary cannot replicate them, so this has the effect of making that node lagged.
- // It would also update mongos' notion of the latest clusterTime in the system.
- while (Date.now() - startTime < maxWindowInMS) {
- assert.commandWorked(mongosColl.insert({id: nextId}));
- nextId++;
- sleep(50);
+"use strict";
+
+load("jstests/libs/write_concern_util.js"); // for 'stopServerReplication' and
+ // 'restartServerReplication'.
+
+const name = "single_shard_transaction_without_majority_reads_lagged";
+const dbName = "test";
+const collName = name;
+
+const shardingTest = new ShardingTest({
+ shards: 1,
+ rs: {
+ nodes: [
+ {/* primary */ enableMajorityReadConcern: 'false'},
+ {/* secondary */ rsConfig: {priority: 0}}
+ ]
}
-
- // This is an update only the primary has. The test will explicitly check for it in a few lines.
- assert.commandWorked(mongosColl.update({_id: 1, x: 10}, {_id: 1, x: 20}));
-
- const session = mongos.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- // Begin a transaction and make sure its associated read succeeds. To make this test stricter,
- // have the transaction manipulate data that differs between the primary and secondary.
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Confirm that the results of the transaction are based on what the primary's data was when we
- // started the transaction.
- assert.eq(21, sessionColl.findOne({_id: 1}).x);
-
- restartServerReplication(secondary);
- shardingTest.stop();
+});
+
+const rst = shardingTest.rs0;
+const mongos = shardingTest.s;
+const mongosDB = mongos.getDB(dbName);
+const mongosColl = mongosDB[collName];
+
+// Create and shard collection beforehand.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// This is the last write the secondary will have before the start of the transaction.
+assert.commandWorked(mongosColl.insert({_id: 1, x: 10}, {writeConcern: {w: "majority"}}));
+
+// We want the secondary to lag for an amount generously greater than the history window.
+const secondary = rst.getSecondary();
+const maxWindowResult = assert.commandWorked(secondary.getDB("admin").runCommand(
+ {"getParameter": 1, "maxTargetSnapshotHistoryWindowInSeconds": 1}));
+stopServerReplication(secondary);
+
+const maxWindowInMS = maxWindowResult.maxTargetSnapshotHistoryWindowInSeconds * 1000;
+const lagTimeMS = maxWindowInMS * 2;
+const startTime = Date.now();
+let nextId = 1000;
+
+// Insert a stream of writes to the primary with _ids all numbers greater or equal than
+// 1000 (this is done to easily distinguish them from the write above done with _id: 1).
+// The secondary cannot replicate them, so this has the effect of making that node lagged.
+// It would also update mongos' notion of the latest clusterTime in the system.
+while (Date.now() - startTime < maxWindowInMS) {
+ assert.commandWorked(mongosColl.insert({id: nextId}));
+ nextId++;
+ sleep(50);
+}
+
+// This is an update only the primary has. The test will explicitly check for it in a few lines.
+assert.commandWorked(mongosColl.update({_id: 1, x: 10}, {_id: 1, x: 20}));
+
+const session = mongos.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+// Begin a transaction and make sure its associated read succeeds. To make this test stricter,
+// have the transaction manipulate data that differs between the primary and secondary.
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {$inc: {x: 1}}));
+
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// Confirm that the results of the transaction are based on what the primary's data was when we
+// started the transaction.
+assert.eq(21, sessionColl.findOne({_id: 1}).x);
+
+restartServerReplication(secondary);
+shardingTest.stop();
})();
diff --git a/jstests/sharding/snapshot_cursor_commands_mongos.js b/jstests/sharding/snapshot_cursor_commands_mongos.js
index face37b1af3..a853cc10942 100644
--- a/jstests/sharding/snapshot_cursor_commands_mongos.js
+++ b/jstests/sharding/snapshot_cursor_commands_mongos.js
@@ -1,273 +1,266 @@
// Tests snapshot isolation on readConcern level snapshot reads through mongos.
// @tags: [requires_sharding, uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
-
- // This test intentionally runs commands without a logical session id, which is not compatible
- // with implicit sessions.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/global_snapshot_reads_util.js");
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- const dbName = "test";
- const shardedCollName = "shardedColl";
- const unshardedCollName = "unshardedColl";
-
- const commands = {
- aggregate: {
- firstCommand: function(collName) {
- return {
- aggregate: collName,
- pipeline: [{$sort: {_id: 1}}],
- cursor: {batchSize: 5},
- readConcern: {level: "snapshot"},
- };
- },
- secondCommand: function(collName) {
- return {
- aggregate: collName,
- pipeline: [{$sort: {_id: 1}}],
- cursor: {batchSize: 20},
- readConcern: {level: "snapshot"},
- };
- }
+"use strict";
+
+// This test intentionally runs commands without a logical session id, which is not compatible
+// with implicit sessions.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/global_snapshot_reads_util.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+const dbName = "test";
+const shardedCollName = "shardedColl";
+const unshardedCollName = "unshardedColl";
+
+const commands = {
+ aggregate: {
+ firstCommand: function(collName) {
+ return {
+ aggregate: collName,
+ pipeline: [{$sort: {_id: 1}}],
+ cursor: {batchSize: 5},
+ readConcern: {level: "snapshot"},
+ };
},
- find: {
- firstCommand: function(collName) {
- return {
- find: collName,
- sort: {_id: 1},
- batchSize: 5,
- readConcern: {level: "snapshot"},
- };
- },
- secondCommand: function(collName) {
- return {
- find: collName,
- sort: {_id: 1},
- batchSize: 20,
- readConcern: {level: "snapshot"},
- };
- }
+ secondCommand: function(collName) {
+ return {
+ aggregate: collName,
+ pipeline: [{$sort: {_id: 1}}],
+ cursor: {batchSize: 20},
+ readConcern: {level: "snapshot"},
+ };
}
- };
-
- let shardingScenarios = {
- // Tests a snapshot cursor command in a single shard environment. The set up inserts a
- // collection, shards it if it's a collection meant to be sharded, and inserts ten
- // documents.
- singleShard: {
- compatibleCollections: [shardedCollName, unshardedCollName],
- name: "singleShard",
- setUp: function(collName) {
- const st = new ShardingTest({shards: 1, mongos: 1, config: 1});
- return shardingScenarios.allScenarios.setUp(st, collName);
- }
- },
- // Tests a snapshot cursor command in a multi shard enviroment. The set up inserts a
- // collection, shards the collection, and inserts ten documents. Afterwards, chunks are
- // split and moved such that every shard should have some documents that will be found
- // by the cursor command.
- multiShardAllShardReads: {
- compatibleCollections: [shardedCollName],
- name: "multiShardAllShardReads",
- setUp: function(collName) {
- let st = new ShardingTest({shards: 3, mongos: 1, config: 1});
- st = shardingScenarios.allScenarios.setUp(st, collName);
-
- if (st === undefined) {
- return;
- }
-
- const mongos = st.s0;
-
- const ns = dbName + '.' + shardedCollName;
-
- assert.commandWorked(st.splitAt(ns, {_id: 4}));
- assert.commandWorked(st.splitAt(ns, {_id: 7}));
-
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard0.shardName}));
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, find: {_id: 4}, to: st.shard1.shardName}));
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, find: {_id: 7}, to: st.shard2.shardName}));
-
- assert.eq(
- 1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(
- 1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
- assert.eq(
- 1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard2.shardName}));
-
- flushRoutersAndRefreshShardMetadata(st, {ns});
-
- return st;
- }
- },
- // Tests a snapshot cursor command in a multi shard enviroment. The set up inserts a
- // collection, shards the collection, and inserts ten documents. Afterwards, chunks are
- // split and moved such that only two out of three shards will have documents that will be
- // found by the cursor command.
- multiShardSomeShardReads: {
- compatibleCollections: [shardedCollName],
- name: "multiShardSomeShardReads",
- setUp: function(collName) {
- let st = new ShardingTest({shards: 3, mongos: 1, config: 1});
- st = shardingScenarios.allScenarios.setUp(st, collName);
-
- if (st === undefined) {
- return;
- }
-
- const mongos = st.s0;
-
- const ns = dbName + '.' + shardedCollName;
-
- assert.commandWorked(st.splitAt(ns, {_id: 5}));
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, find: {_id: 7}, to: st.shard2.shardName}));
-
- assert.eq(
- 0, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(
- 1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
- assert.eq(
- 1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard2.shardName}));
-
- flushRoutersAndRefreshShardMetadata(st, {ns});
-
- return st;
- }
+ },
+ find: {
+ firstCommand: function(collName) {
+ return {
+ find: collName,
+ sort: {_id: 1},
+ batchSize: 5,
+ readConcern: {level: "snapshot"},
+ };
},
- allScenarios: {
- name: "allScenarios",
- setUp: function(st, collName) {
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand(
- {shardCollection: st.s.getDB(dbName)[shardedCollName] + "", key: {_id: 1}}));
-
- const mainDb = st.s.getDB(dbName);
-
- let bulk = mainDb[collName].initializeUnorderedBulkOp();
- for (let x = 0; x < 10; ++x) {
- bulk.insert({_id: x});
- }
- assert.commandWorked(bulk.execute({w: "majority"}));
-
- return st;
+ secondCommand: function(collName) {
+ return {
+ find: collName,
+ sort: {_id: 1},
+ batchSize: 20,
+ readConcern: {level: "snapshot"},
+ };
+ }
+ }
+};
+
+let shardingScenarios = {
+ // Tests a snapshot cursor command in a single shard environment. The set up inserts a
+ // collection, shards it if it's a collection meant to be sharded, and inserts ten
+ // documents.
+ singleShard: {
+ compatibleCollections: [shardedCollName, unshardedCollName],
+ name: "singleShard",
+ setUp: function(collName) {
+ const st = new ShardingTest({shards: 1, mongos: 1, config: 1});
+ return shardingScenarios.allScenarios.setUp(st, collName);
+ }
+ },
+ // Tests a snapshot cursor command in a multi shard enviroment. The set up inserts a
+ // collection, shards the collection, and inserts ten documents. Afterwards, chunks are
+ // split and moved such that every shard should have some documents that will be found
+ // by the cursor command.
+ multiShardAllShardReads: {
+ compatibleCollections: [shardedCollName],
+ name: "multiShardAllShardReads",
+ setUp: function(collName) {
+ let st = new ShardingTest({shards: 3, mongos: 1, config: 1});
+ st = shardingScenarios.allScenarios.setUp(st, collName);
+
+ if (st === undefined) {
+ return;
}
+
+ const mongos = st.s0;
+
+ const ns = dbName + '.' + shardedCollName;
+
+ assert.commandWorked(st.splitAt(ns, {_id: 4}));
+ assert.commandWorked(st.splitAt(ns, {_id: 7}));
+
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard0.shardName}));
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, find: {_id: 4}, to: st.shard1.shardName}));
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, find: {_id: 7}, to: st.shard2.shardName}));
+
+ assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+ assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+ assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard2.shardName}));
+
+ flushRoutersAndRefreshShardMetadata(st, {ns});
+
+ return st;
}
- };
+ },
+ // Tests a snapshot cursor command in a multi shard enviroment. The set up inserts a
+ // collection, shards the collection, and inserts ten documents. Afterwards, chunks are
+ // split and moved such that only two out of three shards will have documents that will be
+ // found by the cursor command.
+ multiShardSomeShardReads: {
+ compatibleCollections: [shardedCollName],
+ name: "multiShardSomeShardReads",
+ setUp: function(collName) {
+ let st = new ShardingTest({shards: 3, mongos: 1, config: 1});
+ st = shardingScenarios.allScenarios.setUp(st, collName);
+
+ if (st === undefined) {
+ return;
+ }
- function runScenario(testScenario, {useCausalConsistency}) {
- testScenario.compatibleCollections.forEach(function(collName) {
- jsTestLog("Running the " + testScenario.name + " scenario on collection " + collName);
- runTest(testScenario, {useCausalConsistency, commands, collName});
- });
- }
+ const mongos = st.s0;
+
+ const ns = dbName + '.' + shardedCollName;
+
+ assert.commandWorked(st.splitAt(ns, {_id: 5}));
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, find: {_id: 7}, to: st.shard2.shardName}));
- function runTest(testScenario, {useCausalConsistency, commands, collName}) {
- let st = testScenario.setUp(collName);
- assert(st);
- assert(commands);
- assert(collName);
-
- const mainDb = st.s.getDB(dbName);
-
- for (let commandKey in commands) {
- assert(commandKey);
- jsTestLog("Testing the " + commandKey + " command.");
- const command = commands[commandKey];
-
- const session =
- mainDb.getMongo().startSession({causalConsistency: useCausalConsistency});
- const lsid = session.getSessionId();
- const sessionDb = session.getDatabase(dbName);
-
- // Test snapshot reads.
- session.startTransaction({writeConcern: {w: "majority"}});
-
- let txnNumber = session.getTxnNumber_forTesting();
-
- // Establish a snapshot cursor, fetching the first 5 documents.
- let res = assert.commandWorked(sessionDb.runCommand(command.firstCommand(collName)));
-
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("firstBatch"));
- assert.eq(5, res.cursor.firstBatch.length);
-
- assert(res.cursor.hasOwnProperty("id"));
- const cursorId = res.cursor.id;
- assert.neq(cursorId, 0);
-
- // Insert an 11th document which should not be visible to the snapshot cursor. This
- // write is performed outside of the session.
- assert.writeOK(mainDb[collName].insert({_id: 10}, {writeConcern: {w: "majority"}}));
-
- verifyInvalidGetMoreAttempts(mainDb, collName, cursorId, lsid, txnNumber);
-
- // Fetch the 6th document. This confirms that the transaction stash is preserved across
- // multiple getMore invocations.
- res = assert.commandWorked(sessionDb.runCommand({
- getMore: cursorId,
- collection: collName,
- batchSize: 1,
- }));
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- assert.neq(0, res.cursor.id);
-
- // Exhaust the cursor, retrieving the remainder of the result set.
- res = assert.commandWorked(sessionDb.runCommand({
- getMore: cursorId,
- collection: collName,
- batchSize: 10,
- }));
-
- // The cursor has been exhausted.
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- assert.eq(0, res.cursor.id);
-
- // Only the remaining 4 of the initial 10 documents are returned. The 11th document is
- // not part of the result set.
- assert(res.cursor.hasOwnProperty("nextBatch"));
- assert.eq(4, res.cursor.nextBatch.length);
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Perform a second snapshot read under a new transaction.
- session.startTransaction({writeConcern: {w: "majority"}});
- res = assert.commandWorked(sessionDb.runCommand(command.secondCommand(collName)));
-
- // The cursor has been exhausted.
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- assert.eq(0, res.cursor.id);
-
- // All 11 documents are returned.
- assert(res.cursor.hasOwnProperty("firstBatch"));
- assert.eq(11, res.cursor.firstBatch.length);
-
- // Remove the 11th document to preserve the collection for the next command.
- assert.writeOK(mainDb[collName].remove({_id: 10}, {writeConcern: {w: "majority"}}));
-
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
+ assert.eq(0, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+ assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+ assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, shard: st.shard2.shardName}));
+
+ flushRoutersAndRefreshShardMetadata(st, {ns});
+
+ return st;
}
+ },
+ allScenarios: {
+ name: "allScenarios",
+ setUp: function(st, collName) {
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ assert.commandWorked(st.s.adminCommand(
+ {shardCollection: st.s.getDB(dbName)[shardedCollName] + "", key: {_id: 1}}));
+
+ const mainDb = st.s.getDB(dbName);
+
+ let bulk = mainDb[collName].initializeUnorderedBulkOp();
+ for (let x = 0; x < 10; ++x) {
+ bulk.insert({_id: x});
+ }
+ assert.commandWorked(bulk.execute({w: "majority"}));
- st.stop();
+ return st;
+ }
+ }
+};
+
+function runScenario(testScenario, {useCausalConsistency}) {
+ testScenario.compatibleCollections.forEach(function(collName) {
+ jsTestLog("Running the " + testScenario.name + " scenario on collection " + collName);
+ runTest(testScenario, {useCausalConsistency, commands, collName});
+ });
+}
+
+function runTest(testScenario, {useCausalConsistency, commands, collName}) {
+ let st = testScenario.setUp(collName);
+ assert(st);
+ assert(commands);
+ assert(collName);
+
+ const mainDb = st.s.getDB(dbName);
+
+ for (let commandKey in commands) {
+ assert(commandKey);
+ jsTestLog("Testing the " + commandKey + " command.");
+ const command = commands[commandKey];
+
+ const session = mainDb.getMongo().startSession({causalConsistency: useCausalConsistency});
+ const lsid = session.getSessionId();
+ const sessionDb = session.getDatabase(dbName);
+
+ // Test snapshot reads.
+ session.startTransaction({writeConcern: {w: "majority"}});
+
+ let txnNumber = session.getTxnNumber_forTesting();
+
+ // Establish a snapshot cursor, fetching the first 5 documents.
+ let res = assert.commandWorked(sessionDb.runCommand(command.firstCommand(collName)));
+
+ assert(res.hasOwnProperty("cursor"));
+ assert(res.cursor.hasOwnProperty("firstBatch"));
+ assert.eq(5, res.cursor.firstBatch.length);
+
+ assert(res.cursor.hasOwnProperty("id"));
+ const cursorId = res.cursor.id;
+ assert.neq(cursorId, 0);
+
+ // Insert an 11th document which should not be visible to the snapshot cursor. This
+ // write is performed outside of the session.
+ assert.writeOK(mainDb[collName].insert({_id: 10}, {writeConcern: {w: "majority"}}));
+
+ verifyInvalidGetMoreAttempts(mainDb, collName, cursorId, lsid, txnNumber);
+
+ // Fetch the 6th document. This confirms that the transaction stash is preserved across
+ // multiple getMore invocations.
+ res = assert.commandWorked(sessionDb.runCommand({
+ getMore: cursorId,
+ collection: collName,
+ batchSize: 1,
+ }));
+ assert(res.hasOwnProperty("cursor"));
+ assert(res.cursor.hasOwnProperty("id"));
+ assert.neq(0, res.cursor.id);
+
+ // Exhaust the cursor, retrieving the remainder of the result set.
+ res = assert.commandWorked(sessionDb.runCommand({
+ getMore: cursorId,
+ collection: collName,
+ batchSize: 10,
+ }));
+
+ // The cursor has been exhausted.
+ assert(res.hasOwnProperty("cursor"));
+ assert(res.cursor.hasOwnProperty("id"));
+ assert.eq(0, res.cursor.id);
+
+ // Only the remaining 4 of the initial 10 documents are returned. The 11th document is
+ // not part of the result set.
+ assert(res.cursor.hasOwnProperty("nextBatch"));
+ assert.eq(4, res.cursor.nextBatch.length);
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // Perform a second snapshot read under a new transaction.
+ session.startTransaction({writeConcern: {w: "majority"}});
+ res = assert.commandWorked(sessionDb.runCommand(command.secondCommand(collName)));
+
+ // The cursor has been exhausted.
+ assert(res.hasOwnProperty("cursor"));
+ assert(res.cursor.hasOwnProperty("id"));
+ assert.eq(0, res.cursor.id);
+
+ // All 11 documents are returned.
+ assert(res.cursor.hasOwnProperty("firstBatch"));
+ assert.eq(11, res.cursor.firstBatch.length);
+
+ // Remove the 11th document to preserve the collection for the next command.
+ assert.writeOK(mainDb[collName].remove({_id: 10}, {writeConcern: {w: "majority"}}));
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
}
- // Runs against a sharded and unsharded collection.
- runScenario(shardingScenarios.singleShard, {useCausalConsistency: false});
+ st.stop();
+}
+
+// Runs against a sharded and unsharded collection.
+runScenario(shardingScenarios.singleShard, {useCausalConsistency: false});
- runScenario(shardingScenarios.multiShardAllShardReads, {useCausalConsistency: false});
+runScenario(shardingScenarios.multiShardAllShardReads, {useCausalConsistency: false});
- runScenario(shardingScenarios.multiShardSomeShardReads,
- {useCausalConsistency: false, collName: shardedCollName});
+runScenario(shardingScenarios.multiShardSomeShardReads,
+ {useCausalConsistency: false, collName: shardedCollName});
})();
diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js
index dc423ac14d7..ab73928836e 100644
--- a/jstests/sharding/sort1.js
+++ b/jstests/sharding/sort1.js
@@ -1,110 +1,110 @@
(function() {
- 'use strict';
-
- var s = new ShardingTest({name: "sort1", shards: 2, mongos: 2});
-
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
- s.adminCommand({shardcollection: "test.data", key: {'sub.num': 1}});
-
- var db = s.getDB("test");
-
- const N = 100;
-
- var forward = [];
- var backward = [];
- for (var i = 0; i < N; i++) {
- db.data.insert({_id: i, sub: {num: i, x: N - i}});
- forward.push(i);
- backward.push((N - 1) - i);
- }
-
- s.adminCommand({split: "test.data", middle: {'sub.num': 33}});
- s.adminCommand({split: "test.data", middle: {'sub.num': 66}});
-
- s.adminCommand({
- movechunk: "test.data",
- find: {'sub.num': 50},
- to: s.getOther(s.getPrimaryShard("test")).name,
- _waitForDelete: true
- });
-
- assert.lte(3, s.config.chunks.find({ns: 'test.data'}).itcount(), "A1");
-
- var temp = s.config.chunks.find({ns: 'test.data'}).sort({min: 1}).toArray();
- temp.forEach(printjsononeline);
-
- var z = 0;
- for (; z < temp.length; z++)
- if (temp[z].min["sub.num"] <= 50 && temp[z].max["sub.num"] > 50)
- break;
-
- assert.eq(temp[z - 1].shard, temp[z + 1].shard, "A2");
- assert.neq(temp[z - 1].shard, temp[z].shard, "A3");
-
- temp = db.data.find().sort({'sub.num': 1}).toArray();
- assert.eq(N, temp.length, "B1");
- for (i = 0; i < 100; i++) {
- assert.eq(i, temp[i].sub.num, "B2");
- }
-
- db.data.find().sort({'sub.num': 1}).toArray();
- s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
-
- var a = Date.timeFunc(function() {
- z = db.data.find().sort({'sub.num': 1}).toArray();
- }, 200);
- assert.eq(100, z.length, "C1");
-
- var b = 1.5 * Date.timeFunc(function() {
- z = s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
- }, 200);
- assert.eq(67, z.length, "C2");
-
- print("a: " + a + " b:" + b + " mongos slow down: " + Math.ceil(100 * ((a - b) / b)) + "%");
-
- // -- secondary index sorting
-
- function getSorted(by, dir, proj) {
- var s = {};
- s[by] = dir || 1;
- printjson(s);
- var cur = db.data.find({}, proj || {}).sort(s);
- return terse(cur.map(function(z) {
- return z.sub.num;
- }));
- }
-
- function terse(a) {
- var s = "";
- for (var i = 0; i < a.length; i++) {
- if (i > 0)
- s += ",";
- s += a[i];
- }
- return s;
+'use strict';
+
+var s = new ShardingTest({name: "sort1", shards: 2, mongos: 2});
+
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+s.adminCommand({shardcollection: "test.data", key: {'sub.num': 1}});
+
+var db = s.getDB("test");
+
+const N = 100;
+
+var forward = [];
+var backward = [];
+for (var i = 0; i < N; i++) {
+ db.data.insert({_id: i, sub: {num: i, x: N - i}});
+ forward.push(i);
+ backward.push((N - 1) - i);
+}
+
+s.adminCommand({split: "test.data", middle: {'sub.num': 33}});
+s.adminCommand({split: "test.data", middle: {'sub.num': 66}});
+
+s.adminCommand({
+ movechunk: "test.data",
+ find: {'sub.num': 50},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
+
+assert.lte(3, s.config.chunks.find({ns: 'test.data'}).itcount(), "A1");
+
+var temp = s.config.chunks.find({ns: 'test.data'}).sort({min: 1}).toArray();
+temp.forEach(printjsononeline);
+
+var z = 0;
+for (; z < temp.length; z++)
+ if (temp[z].min["sub.num"] <= 50 && temp[z].max["sub.num"] > 50)
+ break;
+
+assert.eq(temp[z - 1].shard, temp[z + 1].shard, "A2");
+assert.neq(temp[z - 1].shard, temp[z].shard, "A3");
+
+temp = db.data.find().sort({'sub.num': 1}).toArray();
+assert.eq(N, temp.length, "B1");
+for (i = 0; i < 100; i++) {
+ assert.eq(i, temp[i].sub.num, "B2");
+}
+
+db.data.find().sort({'sub.num': 1}).toArray();
+s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
+
+var a = Date.timeFunc(function() {
+ z = db.data.find().sort({'sub.num': 1}).toArray();
+}, 200);
+assert.eq(100, z.length, "C1");
+
+var b = 1.5 * Date.timeFunc(function() {
+ z = s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
+}, 200);
+assert.eq(67, z.length, "C2");
+
+print("a: " + a + " b:" + b + " mongos slow down: " + Math.ceil(100 * ((a - b) / b)) + "%");
+
+// -- secondary index sorting
+
+function getSorted(by, dir, proj) {
+ var s = {};
+ s[by] = dir || 1;
+ printjson(s);
+ var cur = db.data.find({}, proj || {}).sort(s);
+ return terse(cur.map(function(z) {
+ return z.sub.num;
+ }));
+}
+
+function terse(a) {
+ var s = "";
+ for (var i = 0; i < a.length; i++) {
+ if (i > 0)
+ s += ",";
+ s += a[i];
}
+ return s;
+}
- forward = terse(forward);
- backward = terse(backward);
+forward = terse(forward);
+backward = terse(backward);
- assert.eq(forward, getSorted("sub.num", 1), "D1");
- assert.eq(backward, getSorted("sub.num", -1), "D2");
+assert.eq(forward, getSorted("sub.num", 1), "D1");
+assert.eq(backward, getSorted("sub.num", -1), "D2");
- assert.eq(backward, getSorted("sub.x", 1), "D3");
- assert.eq(forward, getSorted("sub.x", -1), "D4");
+assert.eq(backward, getSorted("sub.x", 1), "D3");
+assert.eq(forward, getSorted("sub.x", -1), "D4");
- assert.eq(backward, getSorted("sub.x", 1, {'sub.num': 1}), "D5");
- assert.eq(forward, getSorted("sub.x", -1, {'sub.num': 1}), "D6");
+assert.eq(backward, getSorted("sub.x", 1, {'sub.num': 1}), "D5");
+assert.eq(forward, getSorted("sub.x", -1, {'sub.num': 1}), "D6");
- assert.eq(backward, getSorted("sub.x", 1, {'sub': 1}), "D7");
- assert.eq(forward, getSorted("sub.x", -1, {'sub': 1}), "D8");
+assert.eq(backward, getSorted("sub.x", 1, {'sub': 1}), "D7");
+assert.eq(forward, getSorted("sub.x", -1, {'sub': 1}), "D8");
- assert.eq(backward, getSorted("sub.x", 1, {'_id': 0}), "D9");
- assert.eq(forward, getSorted("sub.x", -1, {'_id': 0}), "D10");
+assert.eq(backward, getSorted("sub.x", 1, {'_id': 0}), "D9");
+assert.eq(forward, getSorted("sub.x", -1, {'_id': 0}), "D10");
- assert.eq(backward, getSorted("sub.x", 1, {'_id': 0, 'sub.num': 1}), "D11");
- assert.eq(forward, getSorted("sub.x", -1, {'_id': 0, 'sub.num': 1}), "D12");
+assert.eq(backward, getSorted("sub.x", 1, {'_id': 0, 'sub.num': 1}), "D11");
+assert.eq(forward, getSorted("sub.x", -1, {'_id': 0, 'sub.num': 1}), "D12");
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/split_against_shard_with_invalid_split_points.js b/jstests/sharding/split_against_shard_with_invalid_split_points.js
index 8817e524daf..54eff23d8b7 100644
--- a/jstests/sharding/split_against_shard_with_invalid_split_points.js
+++ b/jstests/sharding/split_against_shard_with_invalid_split_points.js
@@ -1,41 +1,41 @@
// Tests that executing splitChunk directly against a shard, with an invalid split point will not
// corrupt the chunks metadata
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 1});
-
- var testDB = st.s.getDB('TestSplitDB');
- assert.commandWorked(testDB.adminCommand({enableSharding: 'TestSplitDB'}));
- st.ensurePrimaryShard('TestSplitDB', st.shard0.shardName);
-
- assert.commandWorked(testDB.adminCommand({shardCollection: 'TestSplitDB.Coll', key: {x: 1}}));
- assert.commandWorked(testDB.adminCommand({split: 'TestSplitDB.Coll', middle: {x: 0}}));
-
- var chunksBefore = st.s.getDB('config').chunks.find().toArray();
-
- // Try to do a split with invalid parameters through mongod
- var callSplit = function(db, minKey, maxKey, splitPoints) {
- var res = assert.commandWorked(st.s.adminCommand({getShardVersion: 'TestSplitDB.Coll'}));
- return db.runCommand({
- splitChunk: 'TestSplitDB.Coll',
- from: st.shard0.shardName,
- min: minKey,
- max: maxKey,
- keyPattern: {x: 1},
- splitKeys: splitPoints,
- epoch: res.versionEpoch,
- });
- };
-
- assert.commandFailedWithCode(
- callSplit(st.rs0.getPrimary().getDB('admin'), {x: MinKey}, {x: 0}, [{x: 2}]),
- ErrorCodes.InvalidOptions);
-
- var chunksAfter = st.s.getDB('config').chunks.find().toArray();
- assert.eq(chunksBefore,
- chunksAfter,
- 'Split chunks failed, but the chunks were updated in the config database');
-
- st.stop();
+'use strict';
+
+var st = new ShardingTest({shards: 1});
+
+var testDB = st.s.getDB('TestSplitDB');
+assert.commandWorked(testDB.adminCommand({enableSharding: 'TestSplitDB'}));
+st.ensurePrimaryShard('TestSplitDB', st.shard0.shardName);
+
+assert.commandWorked(testDB.adminCommand({shardCollection: 'TestSplitDB.Coll', key: {x: 1}}));
+assert.commandWorked(testDB.adminCommand({split: 'TestSplitDB.Coll', middle: {x: 0}}));
+
+var chunksBefore = st.s.getDB('config').chunks.find().toArray();
+
+// Try to do a split with invalid parameters through mongod
+var callSplit = function(db, minKey, maxKey, splitPoints) {
+ var res = assert.commandWorked(st.s.adminCommand({getShardVersion: 'TestSplitDB.Coll'}));
+ return db.runCommand({
+ splitChunk: 'TestSplitDB.Coll',
+ from: st.shard0.shardName,
+ min: minKey,
+ max: maxKey,
+ keyPattern: {x: 1},
+ splitKeys: splitPoints,
+ epoch: res.versionEpoch,
+ });
+};
+
+assert.commandFailedWithCode(
+ callSplit(st.rs0.getPrimary().getDB('admin'), {x: MinKey}, {x: 0}, [{x: 2}]),
+ ErrorCodes.InvalidOptions);
+
+var chunksAfter = st.s.getDB('config').chunks.find().toArray();
+assert.eq(chunksBefore,
+ chunksAfter,
+ 'Split chunks failed, but the chunks were updated in the config database');
+
+st.stop();
})();
diff --git a/jstests/sharding/split_large_key.js b/jstests/sharding/split_large_key.js
index 5a5504594d2..5ee2ecf7bc7 100644
--- a/jstests/sharding/split_large_key.js
+++ b/jstests/sharding/split_large_key.js
@@ -1,68 +1,67 @@
// Test for splitting a chunk with a very large shard key value should not be allowed
// and does not corrupt the config.chunks metadata.
(function() {
- 'use strict';
+'use strict';
- function verifyChunk(keys, expectFail, ns) {
- // If split failed then there's only 1 chunk
- // With a min & max for the shardKey
- if (expectFail) {
- assert.eq(1, configDB.chunks.find({"ns": ns}).count(), "Chunks count no split");
- var chunkDoc = configDB.chunks.findOne({"ns": ns});
- assert.eq(0, bsonWoCompare(chunkDoc.min, keys.min), "Chunks min");
- assert.eq(0, bsonWoCompare(chunkDoc.max, keys.max), "Chunks max");
- } else {
- assert.eq(2, configDB.chunks.find({"ns": ns}).count(), "Chunks count split");
- }
+function verifyChunk(keys, expectFail, ns) {
+ // If split failed then there's only 1 chunk
+ // With a min & max for the shardKey
+ if (expectFail) {
+ assert.eq(1, configDB.chunks.find({"ns": ns}).count(), "Chunks count no split");
+ var chunkDoc = configDB.chunks.findOne({"ns": ns});
+ assert.eq(0, bsonWoCompare(chunkDoc.min, keys.min), "Chunks min");
+ assert.eq(0, bsonWoCompare(chunkDoc.max, keys.max), "Chunks max");
+ } else {
+ assert.eq(2, configDB.chunks.find({"ns": ns}).count(), "Chunks count split");
}
+}
- // Tests
- // - name: Name of test, used in collection name
- // - key: key to test
- // - keyFieldSize: size of each key field
- // - expectFail: true/false, true if key is too large to pre-split
- var tests = [
- {name: "Key size small", key: {x: 1}, keyFieldSize: 100, expectFail: false},
- {name: "Key size 512", key: {x: 1}, keyFieldSize: 512, expectFail: true},
- {name: "Key size 2000", key: {x: 1}, keyFieldSize: 2000, expectFail: true},
- {name: "Compound key size small", key: {x: 1, y: 1}, keyFieldSize: 100, expectFail: false},
- {name: "Compound key size 512", key: {x: 1, y: 1}, keyFieldSize: 256, expectFail: true},
- {name: "Compound key size 10000", key: {x: 1, y: 1}, keyFieldSize: 5000, expectFail: true},
- ];
+// Tests
+// - name: Name of test, used in collection name
+// - key: key to test
+// - keyFieldSize: size of each key field
+// - expectFail: true/false, true if key is too large to pre-split
+var tests = [
+ {name: "Key size small", key: {x: 1}, keyFieldSize: 100, expectFail: false},
+ {name: "Key size 512", key: {x: 1}, keyFieldSize: 512, expectFail: true},
+ {name: "Key size 2000", key: {x: 1}, keyFieldSize: 2000, expectFail: true},
+ {name: "Compound key size small", key: {x: 1, y: 1}, keyFieldSize: 100, expectFail: false},
+ {name: "Compound key size 512", key: {x: 1, y: 1}, keyFieldSize: 256, expectFail: true},
+ {name: "Compound key size 10000", key: {x: 1, y: 1}, keyFieldSize: 5000, expectFail: true},
+];
- var st = new ShardingTest({shards: 1});
- var configDB = st.s.getDB('config');
+var st = new ShardingTest({shards: 1});
+var configDB = st.s.getDB('config');
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- tests.forEach(function(test) {
- var collName = "split_large_key_" + test.name;
- var midKey = {};
- var chunkKeys = {min: {}, max: {}};
- for (var k in test.key) {
- // new Array with join creates string length 1 less than size, so add 1
- midKey[k] = new Array(test.keyFieldSize + 1).join('a');
- // min & max keys for each field in the index
- chunkKeys.min[k] = MinKey;
- chunkKeys.max[k] = MaxKey;
- }
-
- assert.commandWorked(
- configDB.adminCommand({shardCollection: "test." + collName, key: test.key}));
+tests.forEach(function(test) {
+ var collName = "split_large_key_" + test.name;
+ var midKey = {};
+ var chunkKeys = {min: {}, max: {}};
+ for (var k in test.key) {
+ // new Array with join creates string length 1 less than size, so add 1
+ midKey[k] = new Array(test.keyFieldSize + 1).join('a');
+ // min & max keys for each field in the index
+ chunkKeys.min[k] = MinKey;
+ chunkKeys.max[k] = MaxKey;
+ }
- var res = configDB.adminCommand({split: "test." + collName, middle: midKey});
- if (test.expectFail) {
- assert(!res.ok, "Split: " + collName);
- assert(res.errmsg !== null, "Split errmsg: " + collName);
- } else {
- assert(res.ok, "Split: " + collName + " " + res.errmsg);
- }
+ assert.commandWorked(
+ configDB.adminCommand({shardCollection: "test." + collName, key: test.key}));
- verifyChunk(chunkKeys, test.expectFail, "test." + collName);
+ var res = configDB.adminCommand({split: "test." + collName, middle: midKey});
+ if (test.expectFail) {
+ assert(!res.ok, "Split: " + collName);
+ assert(res.errmsg !== null, "Split errmsg: " + collName);
+ } else {
+ assert(res.ok, "Split: " + collName + " " + res.errmsg);
+ }
- st.s0.getCollection("test." + collName).drop();
- });
+ verifyChunk(chunkKeys, test.expectFail, "test." + collName);
- st.stop();
+ st.s0.getCollection("test." + collName).drop();
+});
+st.stop();
})();
diff --git a/jstests/sharding/split_with_force_small.js b/jstests/sharding/split_with_force_small.js
index 2554d41048f..8e281dcbe20 100644
--- a/jstests/sharding/split_with_force_small.js
+++ b/jstests/sharding/split_with_force_small.js
@@ -2,67 +2,67 @@
// Tests autosplit locations with force : true, for small collections
//
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1}});
+var st = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1}});
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
- var shardAdmin = st.shard0.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var shardAdmin = st.shard0.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
- jsTest.log("Insert a bunch of data into the low chunk of a collection," +
- " to prevent relying on stats.");
+jsTest.log("Insert a bunch of data into the low chunk of a collection," +
+ " to prevent relying on stats.");
- var data128k = "x";
- for (var i = 0; i < 7; i++)
- data128k += data128k;
+var data128k = "x";
+for (var i = 0; i < 7; i++)
+ data128k += data128k;
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 1024; i++) {
- bulk.insert({_id: -(i + 1)});
- }
- assert.writeOK(bulk.execute());
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < 1024; i++) {
+ bulk.insert({_id: -(i + 1)});
+}
+assert.writeOK(bulk.execute());
- jsTest.log("Insert 32 docs into the high chunk of a collection");
+jsTest.log("Insert 32 docs into the high chunk of a collection");
- bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 32; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < 32; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
- jsTest.log("Split off MaxKey chunk...");
+jsTest.log("Split off MaxKey chunk...");
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 32}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 32}}));
- jsTest.log("Keep splitting chunk multiple times...");
+jsTest.log("Keep splitting chunk multiple times...");
- st.printShardingStatus();
+st.printShardingStatus();
- for (var i = 0; i < 5; i++) {
- assert.commandWorked(admin.runCommand({split: coll + "", find: {_id: 0}}));
- st.printShardingStatus();
- }
+for (var i = 0; i < 5; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", find: {_id: 0}}));
+ st.printShardingStatus();
+}
- // Make sure we can't split further than 5 (2^5) times
- assert.commandFailed(admin.runCommand({split: coll + "", find: {_id: 0}}));
+// Make sure we can't split further than 5 (2^5) times
+assert.commandFailed(admin.runCommand({split: coll + "", find: {_id: 0}}));
- var chunks = config.chunks.find({'min._id': {$gte: 0, $lt: 32}}).sort({min: 1}).toArray();
- printjson(chunks);
+var chunks = config.chunks.find({'min._id': {$gte: 0, $lt: 32}}).sort({min: 1}).toArray();
+printjson(chunks);
- // Make sure the chunks grow by 2x (except the first)
- var nextSize = 1;
- for (var i = 0; i < chunks.size; i++) {
- assert.eq(coll.count({_id: {$gte: chunks[i].min._id, $lt: chunks[i].max._id}}), nextSize);
- if (i != 0)
- nextSize += nextSize;
- }
+// Make sure the chunks grow by 2x (except the first)
+var nextSize = 1;
+for (var i = 0; i < chunks.size; i++) {
+ assert.eq(coll.count({_id: {$gte: chunks[i].min._id, $lt: chunks[i].max._id}}), nextSize);
+ if (i != 0)
+ nextSize += nextSize;
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/ssv_config_check.js b/jstests/sharding/ssv_config_check.js
index cba03476cb0..48d7f2eac70 100644
--- a/jstests/sharding/ssv_config_check.js
+++ b/jstests/sharding/ssv_config_check.js
@@ -3,46 +3,46 @@
* replica set name, but with a member list that is not strictly the same.
*/
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- var testDB = st.s.getDB('test');
- testDB.adminCommand({enableSharding: 'test'});
- testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+var testDB = st.s.getDB('test');
+testDB.adminCommand({enableSharding: 'test'});
+testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
- testDB.user.insert({x: 1});
+testDB.user.insert({x: 1});
- var directConn = new Mongo(st.rs0.getPrimary().host);
- var adminDB = directConn.getDB('admin');
+var directConn = new Mongo(st.rs0.getPrimary().host);
+var adminDB = directConn.getDB('admin');
- var configStr = adminDB.runCommand({getShardVersion: 'test.user'}).configServer;
- var alternateConfigStr = configStr.substring(0, configStr.lastIndexOf(','));
+var configStr = adminDB.runCommand({getShardVersion: 'test.user'}).configServer;
+var alternateConfigStr = configStr.substring(0, configStr.lastIndexOf(','));
- var shardDoc = st.s.getDB('config').shards.findOne();
+var shardDoc = st.s.getDB('config').shards.findOne();
- jsTest.log("Verify that the obsolete init form of setShardVersion succeeds on shards.");
- assert.commandWorked(adminDB.runCommand({
- setShardVersion: '',
- init: true,
- authoritative: true,
- configdb: alternateConfigStr,
- shard: shardDoc._id,
- shardHost: shardDoc.host
- }));
+jsTest.log("Verify that the obsolete init form of setShardVersion succeeds on shards.");
+assert.commandWorked(adminDB.runCommand({
+ setShardVersion: '',
+ init: true,
+ authoritative: true,
+ configdb: alternateConfigStr,
+ shard: shardDoc._id,
+ shardHost: shardDoc.host
+}));
- var configAdmin = st.c0.getDB('admin');
+var configAdmin = st.c0.getDB('admin');
- jsTest.log("Verify that setShardVersion fails on the config server");
- // Even if shardName sent is 'config' and connstring sent is config server's actual connstring.
- assert.commandFailedWithCode(configAdmin.runCommand({
- setShardVersion: '',
- init: true,
- authoritative: true,
- configdb: configStr,
- shard: 'config'
- }),
- ErrorCodes.NoShardingEnabled);
+jsTest.log("Verify that setShardVersion fails on the config server");
+// Even if shardName sent is 'config' and connstring sent is config server's actual connstring.
+assert.commandFailedWithCode(configAdmin.runCommand({
+ setShardVersion: '',
+ init: true,
+ authoritative: true,
+ configdb: configStr,
+ shard: 'config'
+}),
+ ErrorCodes.NoShardingEnabled);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/stale_mongos_updates_and_removes.js b/jstests/sharding/stale_mongos_updates_and_removes.js
index 1c0e251d296..85eb196e93e 100644
--- a/jstests/sharding/stale_mongos_updates_and_removes.js
+++ b/jstests/sharding/stale_mongos_updates_and_removes.js
@@ -18,250 +18,255 @@
*/
(function() {
- 'use strict';
+'use strict';
- // Create a new sharded collection with numDocs documents, with two docs sharing each shard key
- // (used for testing *multi* removes to a *specific* shard key).
- function resetCollection() {
- assert(staleMongos.getCollection(collNS).drop());
- assert.commandWorked(staleMongos.adminCommand({shardCollection: collNS, key: {x: 1}}));
+// Create a new sharded collection with numDocs documents, with two docs sharing each shard key
+// (used for testing *multi* removes to a *specific* shard key).
+function resetCollection() {
+ assert(staleMongos.getCollection(collNS).drop());
+ assert.commandWorked(staleMongos.adminCommand({shardCollection: collNS, key: {x: 1}}));
- for (let i = 0; i < numShardKeys; i++) {
- assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
- assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
- }
-
- // Make sure data has replicated to all config servers so freshMongos finds a sharded
- // collection: freshMongos has an older optime and won't wait to see what staleMongos did
- // (shardCollection).
- st.configRS.awaitLastOpCommitted();
+ for (let i = 0; i < numShardKeys; i++) {
+ assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
+ assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
}
- // Create a new sharded collection, then split it into two chunks on different shards using the
- // stale mongos. Then use the fresh mongos to consolidate the chunks onto one of the shards.
- // staleMongos will see:
- // shard0: (-inf, splitPoint]
- // shard1: (splitPoint, inf]
- // freshMongos will see:
- // shard0: (-inf, splitPoint], (splitPoint, inf]
- // shard1:
- function makeStaleMongosTargetMultipleShardsWhenAllChunksAreOnOneShard() {
- resetCollection();
-
- // Make sure staleMongos sees all data on first shard.
- const chunk = staleMongos.getCollection("config.chunks")
- .findOne({min: {x: MinKey}, max: {x: MaxKey}});
- assert(chunk.shard === st.shard0.shardName);
-
- // Make sure staleMongos sees two chunks on two different shards.
- assert.commandWorked(staleMongos.adminCommand({split: collNS, middle: {x: splitPoint}}));
- assert.commandWorked(staleMongos.adminCommand(
- {moveChunk: collNS, find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
-
- st.configRS.awaitLastOpCommitted();
-
- // Use freshMongos to consolidate the chunks on one shard.
- assert.commandWorked(freshMongos.adminCommand(
- {moveChunk: collNS, find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
- }
-
- // Create a new sharded collection with a single chunk, then move that chunk from the primary
- // shard to another shard using the fresh mongos.
- // staleMongos will see:
- // shard0: (-inf, inf]
- // shard1:
- // freshMongos will see:
- // shard0:
- // shard1: (-inf, inf]
- function makeStaleMongosTargetOneShardWhenAllChunksAreOnAnotherShard() {
- resetCollection();
-
- // Make sure staleMongos sees all data on first shard.
- const chunk = staleMongos.getCollection("config.chunks")
- .findOne({min: {x: MinKey}, max: {x: MaxKey}});
- assert(chunk.shard === st.shard0.shardName);
-
- // Use freshMongos to move chunk to another shard.
- assert.commandWorked(freshMongos.adminCommand(
- {moveChunk: collNS, find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+ // Make sure data has replicated to all config servers so freshMongos finds a sharded
+ // collection: freshMongos has an older optime and won't wait to see what staleMongos did
+ // (shardCollection).
+ st.configRS.awaitLastOpCommitted();
+}
+
+// Create a new sharded collection, then split it into two chunks on different shards using the
+// stale mongos. Then use the fresh mongos to consolidate the chunks onto one of the shards.
+// staleMongos will see:
+// shard0: (-inf, splitPoint]
+// shard1: (splitPoint, inf]
+// freshMongos will see:
+// shard0: (-inf, splitPoint], (splitPoint, inf]
+// shard1:
+function makeStaleMongosTargetMultipleShardsWhenAllChunksAreOnOneShard() {
+ resetCollection();
+
+ // Make sure staleMongos sees all data on first shard.
+ const chunk =
+ staleMongos.getCollection("config.chunks").findOne({min: {x: MinKey}, max: {x: MaxKey}});
+ assert(chunk.shard === st.shard0.shardName);
+
+ // Make sure staleMongos sees two chunks on two different shards.
+ assert.commandWorked(staleMongos.adminCommand({split: collNS, middle: {x: splitPoint}}));
+ assert.commandWorked(staleMongos.adminCommand(
+ {moveChunk: collNS, find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+
+ st.configRS.awaitLastOpCommitted();
+
+ // Use freshMongos to consolidate the chunks on one shard.
+ assert.commandWorked(freshMongos.adminCommand(
+ {moveChunk: collNS, find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+}
+
+// Create a new sharded collection with a single chunk, then move that chunk from the primary
+// shard to another shard using the fresh mongos.
+// staleMongos will see:
+// shard0: (-inf, inf]
+// shard1:
+// freshMongos will see:
+// shard0:
+// shard1: (-inf, inf]
+function makeStaleMongosTargetOneShardWhenAllChunksAreOnAnotherShard() {
+ resetCollection();
+
+ // Make sure staleMongos sees all data on first shard.
+ const chunk =
+ staleMongos.getCollection("config.chunks").findOne({min: {x: MinKey}, max: {x: MaxKey}});
+ assert(chunk.shard === st.shard0.shardName);
+
+ // Use freshMongos to move chunk to another shard.
+ assert.commandWorked(freshMongos.adminCommand(
+ {moveChunk: collNS, find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+}
+
+// Create a new sharded collection, then split it into two chunks on different shards using the
+// fresh mongos.
+// staleMongos will see:
+// shard0: (-inf, inf]
+// shard1:
+// freshMongos will see:
+// shard0: (-inf, splitPoint]
+// shard1: (splitPoint, inf]
+function makeStaleMongosTargetOneShardWhenChunksAreOnMultipleShards() {
+ resetCollection();
+
+ // Make sure staleMongos sees all data on first shard.
+ const chunk =
+ staleMongos.getCollection("config.chunks").findOne({min: {x: MinKey}, max: {x: MaxKey}});
+ assert(chunk.shard === st.shard0.shardName);
+
+ // Use freshMongos to split and move chunks to both shards.
+ assert.commandWorked(freshMongos.adminCommand({split: collNS, middle: {x: splitPoint}}));
+ assert.commandWorked(freshMongos.adminCommand(
+ {moveChunk: collNS, find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+
+ st.configRS.awaitLastOpCommitted();
+}
+
+function checkAllRemoveQueries(makeMongosStaleFunc) {
+ const multi = {justOne: false};
+ const single = {justOne: true};
+
+ function doRemove(query, multiOption, makeMongosStaleFunc) {
+ makeMongosStaleFunc();
+ assert.writeOK(staleMongos.getCollection(collNS).remove(query, multiOption));
+ if (multiOption.justOne) {
+ // A total of one document should have been removed from the collection.
+ assert.eq(numDocs - 1, staleMongos.getCollection(collNS).find().itcount());
+ } else {
+ // All documents matching the query should have been removed.
+ assert.eq(0, staleMongos.getCollection(collNS).find(query).itcount());
+ }
}
- // Create a new sharded collection, then split it into two chunks on different shards using the
- // fresh mongos.
- // staleMongos will see:
- // shard0: (-inf, inf]
- // shard1:
- // freshMongos will see:
- // shard0: (-inf, splitPoint]
- // shard1: (splitPoint, inf]
- function makeStaleMongosTargetOneShardWhenChunksAreOnMultipleShards() {
- resetCollection();
-
- // Make sure staleMongos sees all data on first shard.
- const chunk = staleMongos.getCollection("config.chunks")
- .findOne({min: {x: MinKey}, max: {x: MaxKey}});
- assert(chunk.shard === st.shard0.shardName);
-
- // Use freshMongos to split and move chunks to both shards.
- assert.commandWorked(freshMongos.adminCommand({split: collNS, middle: {x: splitPoint}}));
- assert.commandWorked(freshMongos.adminCommand(
- {moveChunk: collNS, find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
-
- st.configRS.awaitLastOpCommitted();
+ function checkRemoveIsInvalid(query, multiOption, makeMongosStaleFunc) {
+ makeMongosStaleFunc();
+ const res = staleMongos.getCollection(collNS).remove(query, multiOption);
+ assert.writeError(res);
}
- function checkAllRemoveQueries(makeMongosStaleFunc) {
- const multi = {justOne: false};
- const single = {justOne: true};
-
- function doRemove(query, multiOption, makeMongosStaleFunc) {
- makeMongosStaleFunc();
- assert.writeOK(staleMongos.getCollection(collNS).remove(query, multiOption));
- if (multiOption.justOne) {
- // A total of one document should have been removed from the collection.
- assert.eq(numDocs - 1, staleMongos.getCollection(collNS).find().itcount());
- } else {
- // All documents matching the query should have been removed.
- assert.eq(0, staleMongos.getCollection(collNS).find(query).itcount());
- }
+ // Not possible because single remove requires equality match on shard key.
+ checkRemoveIsInvalid(emptyQuery, single, makeMongosStaleFunc);
+ doRemove(emptyQuery, multi, makeMongosStaleFunc);
+
+ doRemove(pointQuery, single, makeMongosStaleFunc);
+ doRemove(pointQuery, multi, makeMongosStaleFunc);
+
+ // Not possible because can't do range query on a single remove.
+ checkRemoveIsInvalid(rangeQuery, single, makeMongosStaleFunc);
+ doRemove(rangeQuery, multi, makeMongosStaleFunc);
+
+ // Not possible because single remove must contain _id or shard key at top level
+ // (not within $or).
+ checkRemoveIsInvalid(multiPointQuery, single, makeMongosStaleFunc);
+ doRemove(multiPointQuery, multi, makeMongosStaleFunc);
+}
+
+function checkAllUpdateQueries(makeMongosStaleFunc) {
+ const oUpdate = {$inc: {fieldToUpdate: 1}}; // op-style update (non-idempotent)
+ const rUpdate = {x: 0, fieldToUpdate: 1}; // replacement-style update (idempotent)
+ const queryAfterUpdate = {fieldToUpdate: 1};
+
+ const multi = {multi: true};
+ const single = {multi: false};
+
+ function doUpdate(query, update, multiOption, makeMongosStaleFunc) {
+ makeMongosStaleFunc();
+ assert.writeOK(staleMongos.getCollection(collNS).update(query, update, multiOption));
+ if (multiOption.multi) {
+ // All documents matching the query should have been updated.
+ assert.eq(staleMongos.getCollection(collNS).find(query).itcount(),
+ staleMongos.getCollection(collNS).find(queryAfterUpdate).itcount());
+ } else {
+ // A total of one document should have been updated.
+ assert.eq(1, staleMongos.getCollection(collNS).find(queryAfterUpdate).itcount());
}
-
- function checkRemoveIsInvalid(query, multiOption, makeMongosStaleFunc) {
- makeMongosStaleFunc();
- const res = staleMongos.getCollection(collNS).remove(query, multiOption);
- assert.writeError(res);
- }
-
- // Not possible because single remove requires equality match on shard key.
- checkRemoveIsInvalid(emptyQuery, single, makeMongosStaleFunc);
- doRemove(emptyQuery, multi, makeMongosStaleFunc);
-
- doRemove(pointQuery, single, makeMongosStaleFunc);
- doRemove(pointQuery, multi, makeMongosStaleFunc);
-
- // Not possible because can't do range query on a single remove.
- checkRemoveIsInvalid(rangeQuery, single, makeMongosStaleFunc);
- doRemove(rangeQuery, multi, makeMongosStaleFunc);
-
- // Not possible because single remove must contain _id or shard key at top level
- // (not within $or).
- checkRemoveIsInvalid(multiPointQuery, single, makeMongosStaleFunc);
- doRemove(multiPointQuery, multi, makeMongosStaleFunc);
}
- function checkAllUpdateQueries(makeMongosStaleFunc) {
- const oUpdate = {$inc: {fieldToUpdate: 1}}; // op-style update (non-idempotent)
- const rUpdate = {x: 0, fieldToUpdate: 1}; // replacement-style update (idempotent)
- const queryAfterUpdate = {fieldToUpdate: 1};
-
- const multi = {multi: true};
- const single = {multi: false};
-
- function doUpdate(query, update, multiOption, makeMongosStaleFunc) {
- makeMongosStaleFunc();
- assert.writeOK(staleMongos.getCollection(collNS).update(query, update, multiOption));
- if (multiOption.multi) {
- // All documents matching the query should have been updated.
- assert.eq(staleMongos.getCollection(collNS).find(query).itcount(),
- staleMongos.getCollection(collNS).find(queryAfterUpdate).itcount());
- } else {
- // A total of one document should have been updated.
- assert.eq(1, staleMongos.getCollection(collNS).find(queryAfterUpdate).itcount());
- }
- }
-
- function assertUpdateIsInvalid(query, update, multiOption, makeMongosStaleFunc) {
- makeMongosStaleFunc();
- const res = staleMongos.getCollection(collNS).update(query, update, multiOption);
- assert.writeError(res);
- }
+ function assertUpdateIsInvalid(query, update, multiOption, makeMongosStaleFunc) {
+ makeMongosStaleFunc();
+ const res = staleMongos.getCollection(collNS).update(query, update, multiOption);
+ assert.writeError(res);
+ }
- function assertUpdateIsValidIfAllChunksOnSingleShard(
- query, update, multiOption, makeMongosStaleFunc) {
- if (makeMongosStaleFunc == makeStaleMongosTargetOneShardWhenChunksAreOnMultipleShards) {
- assertUpdateIsInvalid(query, update, multiOption, makeMongosStaleFunc);
- } else {
- doUpdate(query, update, multiOption, makeMongosStaleFunc);
- }
+ function assertUpdateIsValidIfAllChunksOnSingleShard(
+ query, update, multiOption, makeMongosStaleFunc) {
+ if (makeMongosStaleFunc == makeStaleMongosTargetOneShardWhenChunksAreOnMultipleShards) {
+ assertUpdateIsInvalid(query, update, multiOption, makeMongosStaleFunc);
+ } else {
+ doUpdate(query, update, multiOption, makeMongosStaleFunc);
}
+ }
- // Note on the tests below: single-doc updates are able to succeed even in cases where the
- // stale mongoS incorrectly believes that the update targets multiple shards, because the
- // mongoS write path swallows the first error encountered in each batch, then internally
- // refreshes its routing table and tries the write again. Because all chunks are actually
- // on a single shard in two of the three test cases, this second update attempt succeeds.
+ // Note on the tests below: single-doc updates are able to succeed even in cases where the
+ // stale mongoS incorrectly believes that the update targets multiple shards, because the
+ // mongoS write path swallows the first error encountered in each batch, then internally
+ // refreshes its routing table and tries the write again. Because all chunks are actually
+ // on a single shard in two of the three test cases, this second update attempt succeeds.
- // This update has inconsistent behavior as explained in SERVER-22895.
- // doUpdate(emptyQuery, rUpdate, single, makeMongosStaleFunc);
+ // This update has inconsistent behavior as explained in SERVER-22895.
+ // doUpdate(emptyQuery, rUpdate, single, makeMongosStaleFunc);
- // Not possible because replacement-style requires equality match on shard key.
- assertUpdateIsInvalid(emptyQuery, rUpdate, multi, makeMongosStaleFunc);
+ // Not possible because replacement-style requires equality match on shard key.
+ assertUpdateIsInvalid(emptyQuery, rUpdate, multi, makeMongosStaleFunc);
- // Single op-style update succeeds if all chunks are on one shard, regardless of staleness.
- assertUpdateIsValidIfAllChunksOnSingleShard(
- emptyQuery, oUpdate, single, makeMongosStaleFunc);
- doUpdate(emptyQuery, oUpdate, multi, makeMongosStaleFunc);
+ // Single op-style update succeeds if all chunks are on one shard, regardless of staleness.
+ assertUpdateIsValidIfAllChunksOnSingleShard(emptyQuery, oUpdate, single, makeMongosStaleFunc);
+ doUpdate(emptyQuery, oUpdate, multi, makeMongosStaleFunc);
- doUpdate(pointQuery, rUpdate, single, makeMongosStaleFunc);
+ doUpdate(pointQuery, rUpdate, single, makeMongosStaleFunc);
- // Not possible because replacement-style requires multi=false.
- assertUpdateIsInvalid(pointQuery, rUpdate, multi, makeMongosStaleFunc);
- doUpdate(pointQuery, oUpdate, single, makeMongosStaleFunc);
- doUpdate(pointQuery, oUpdate, multi, makeMongosStaleFunc);
+ // Not possible because replacement-style requires multi=false.
+ assertUpdateIsInvalid(pointQuery, rUpdate, multi, makeMongosStaleFunc);
+ doUpdate(pointQuery, oUpdate, single, makeMongosStaleFunc);
+ doUpdate(pointQuery, oUpdate, multi, makeMongosStaleFunc);
- doUpdate(rangeQuery, rUpdate, single, makeMongosStaleFunc);
+ doUpdate(rangeQuery, rUpdate, single, makeMongosStaleFunc);
- // Not possible because replacement-style requires multi=false.
- assertUpdateIsInvalid(rangeQuery, rUpdate, multi, makeMongosStaleFunc);
+ // Not possible because replacement-style requires multi=false.
+ assertUpdateIsInvalid(rangeQuery, rUpdate, multi, makeMongosStaleFunc);
- // Range query for a single update succeeds because the range falls entirely on one shard.
- doUpdate(rangeQuery, oUpdate, single, makeMongosStaleFunc);
- doUpdate(rangeQuery, oUpdate, multi, makeMongosStaleFunc);
+ // Range query for a single update succeeds because the range falls entirely on one shard.
+ doUpdate(rangeQuery, oUpdate, single, makeMongosStaleFunc);
+ doUpdate(rangeQuery, oUpdate, multi, makeMongosStaleFunc);
- doUpdate(multiPointQuery, rUpdate, single, makeMongosStaleFunc);
+ doUpdate(multiPointQuery, rUpdate, single, makeMongosStaleFunc);
- // Not possible because replacement-style requires multi=false.
- assertUpdateIsInvalid(multiPointQuery, rUpdate, multi, makeMongosStaleFunc);
+ // Not possible because replacement-style requires multi=false.
+ assertUpdateIsInvalid(multiPointQuery, rUpdate, multi, makeMongosStaleFunc);
- // Multi-point single-doc update succeeds if all points are on a single shard.
- assertUpdateIsValidIfAllChunksOnSingleShard(
- multiPointQuery, oUpdate, single, makeMongosStaleFunc);
- doUpdate(multiPointQuery, oUpdate, multi, makeMongosStaleFunc);
- }
+ // Multi-point single-doc update succeeds if all points are on a single shard.
+ assertUpdateIsValidIfAllChunksOnSingleShard(
+ multiPointQuery, oUpdate, single, makeMongosStaleFunc);
+ doUpdate(multiPointQuery, oUpdate, multi, makeMongosStaleFunc);
+}
- // TODO: SERVER-33954 remove shardAsReplicaSet: false.
- const st = new ShardingTest({shards: 2, mongos: 2, other: {shardAsReplicaSet: false}});
+// TODO: SERVER-33954 remove shardAsReplicaSet: false.
+const st = new ShardingTest({shards: 2, mongos: 2, other: {shardAsReplicaSet: false}});
- const dbName = 'test';
- const collNS = dbName + '.foo';
- const numShardKeys = 10;
- const numDocs = numShardKeys * 2;
- const splitPoint = numShardKeys / 2;
+const dbName = 'test';
+const collNS = dbName + '.foo';
+const numShardKeys = 10;
+const numDocs = numShardKeys * 2;
+const splitPoint = numShardKeys / 2;
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: collNS, key: {x: 1}}));
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: collNS, key: {x: 1}}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- const freshMongos = st.s0;
- const staleMongos = st.s1;
+const freshMongos = st.s0;
+const staleMongos = st.s1;
- const emptyQuery = {};
- const pointQuery = {x: 0};
+const emptyQuery = {};
+const pointQuery = {
+ x: 0
+};
- // Choose a range that would fall on only one shard.
- // Use (splitPoint - 1) because of SERVER-20768.
- const rangeQuery = {x: {$gte: 0, $lt: splitPoint - 1}};
+// Choose a range that would fall on only one shard.
+// Use (splitPoint - 1) because of SERVER-20768.
+const rangeQuery = {
+ x: {$gte: 0, $lt: splitPoint - 1}
+};
- // Choose points that would fall on two different shards.
- const multiPointQuery = {$or: [{x: 0}, {x: numShardKeys}]};
+// Choose points that would fall on two different shards.
+const multiPointQuery = {
+ $or: [{x: 0}, {x: numShardKeys}]
+};
- checkAllRemoveQueries(makeStaleMongosTargetOneShardWhenAllChunksAreOnAnotherShard);
- checkAllRemoveQueries(makeStaleMongosTargetMultipleShardsWhenAllChunksAreOnOneShard);
+checkAllRemoveQueries(makeStaleMongosTargetOneShardWhenAllChunksAreOnAnotherShard);
+checkAllRemoveQueries(makeStaleMongosTargetMultipleShardsWhenAllChunksAreOnOneShard);
- checkAllUpdateQueries(makeStaleMongosTargetOneShardWhenAllChunksAreOnAnotherShard);
- checkAllUpdateQueries(makeStaleMongosTargetMultipleShardsWhenAllChunksAreOnOneShard);
- checkAllUpdateQueries(makeStaleMongosTargetOneShardWhenChunksAreOnMultipleShards);
+checkAllUpdateQueries(makeStaleMongosTargetOneShardWhenAllChunksAreOnAnotherShard);
+checkAllUpdateQueries(makeStaleMongosTargetMultipleShardsWhenAllChunksAreOnOneShard);
+checkAllUpdateQueries(makeStaleMongosTargetOneShardWhenChunksAreOnMultipleShards);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/stale_version_write.js b/jstests/sharding/stale_version_write.js
index bd603124548..1183e369b2e 100644
--- a/jstests/sharding/stale_version_write.js
+++ b/jstests/sharding/stale_version_write.js
@@ -1,37 +1,37 @@
// Tests whether a reset sharding version triggers errors
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 1, mongos: 2});
+var st = new ShardingTest({shards: 1, mongos: 2});
- var mongosA = st.s0;
- var mongosB = st.s1;
+var mongosA = st.s0;
+var mongosB = st.s1;
- jsTest.log("Adding new collections...");
+jsTest.log("Adding new collections...");
- var collA = mongosA.getCollection(jsTestName() + ".coll");
- assert.writeOK(collA.insert({hello: "world"}));
+var collA = mongosA.getCollection(jsTestName() + ".coll");
+assert.writeOK(collA.insert({hello: "world"}));
- var collB = mongosB.getCollection("" + collA);
- assert.writeOK(collB.insert({hello: "world"}));
+var collB = mongosB.getCollection("" + collA);
+assert.writeOK(collB.insert({hello: "world"}));
- jsTest.log("Enabling sharding...");
+jsTest.log("Enabling sharding...");
- assert.commandWorked(mongosA.getDB("admin").adminCommand({enableSharding: "" + collA.getDB()}));
- assert.commandWorked(
- mongosA.getDB("admin").adminCommand({shardCollection: "" + collA, key: {_id: 1}}));
+assert.commandWorked(mongosA.getDB("admin").adminCommand({enableSharding: "" + collA.getDB()}));
+assert.commandWorked(
+ mongosA.getDB("admin").adminCommand({shardCollection: "" + collA, key: {_id: 1}}));
- // MongoD doesn't know about the config shard version *until* MongoS tells it
- collA.findOne();
+// MongoD doesn't know about the config shard version *until* MongoS tells it
+collA.findOne();
- jsTest.log("Trigger shard version mismatch...");
+jsTest.log("Trigger shard version mismatch...");
- assert.writeOK(collB.insert({goodbye: "world"}));
+assert.writeOK(collB.insert({goodbye: "world"}));
- print("Inserted...");
+print("Inserted...");
- assert.eq(3, collA.find().itcount());
- assert.eq(3, collB.find().itcount());
+assert.eq(3, collA.find().itcount());
+assert.eq(3, collB.find().itcount());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/startup_with_all_configs_down.js b/jstests/sharding/startup_with_all_configs_down.js
index 2dc78e07d76..21fd233944c 100644
--- a/jstests/sharding/startup_with_all_configs_down.js
+++ b/jstests/sharding/startup_with_all_configs_down.js
@@ -11,92 +11,89 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- /**
- * Restarts the mongod backing the specified shard instance, without restarting the mongobridge.
- */
- function restartShard(shard, waitForConnect) {
- MongoRunner.stopMongod(shard);
- shard.restart = true;
- shard.waitForConnect = waitForConnect;
- MongoRunner.runMongod(shard);
+"use strict";
+
+/**
+ * Restarts the mongod backing the specified shard instance, without restarting the mongobridge.
+ */
+function restartShard(shard, waitForConnect) {
+ MongoRunner.stopMongod(shard);
+ shard.restart = true;
+ shard.waitForConnect = waitForConnect;
+ MongoRunner.runMongod(shard);
+}
+
+// TODO: SERVER-33830 remove shardAsReplicaSet: false
+var st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
+
+jsTestLog("Setting up initial data");
+
+for (var i = 0; i < 100; i++) {
+ assert.writeOK(st.s.getDB('test').foo.insert({_id: i}));
+}
+
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
+
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: 'test.foo', find: {_id: 50}}));
+assert.commandWorked(
+ st.s0.adminCommand({moveChunk: 'test.foo', find: {_id: 75}, to: st.shard1.shardName}));
+
+// Make sure the pre-existing mongos already has the routing information loaded into memory
+assert.eq(100, st.s.getDB('test').foo.find().itcount());
+
+jsTestLog("Shutting down all config servers");
+for (var i = 0; i < st._configServers.length; i++) {
+ st.stopConfigServer(i);
+}
+
+jsTestLog("Starting a new mongos when there are no config servers up");
+var newMongosInfo = MongoRunner.runMongos({configdb: st._configDB, waitForConnect: false});
+// The new mongos won't accept any new connections, but it should stay up and continue trying
+// to contact the config servers to finish startup.
+assert.throws(function() {
+ new Mongo(newMongosInfo.host);
+});
+
+jsTestLog("Restarting a shard while there are no config servers up");
+restartShard(st.shard1, false);
+
+jsTestLog("Queries should fail because the shard can't initialize sharding state");
+var error = assert.throws(function() {
+ st.s.getDB('test').foo.find().itcount();
+});
+
+assert(ErrorCodes.ReplicaSetNotFound == error.code || ErrorCodes.ExceededTimeLimit == error.code ||
+ ErrorCodes.HostUnreachable == error.code);
+
+jsTestLog("Restarting the config servers");
+for (var i = 0; i < st._configServers.length; i++) {
+ st.restartConfigServer(i);
+}
+
+print("Sleeping for 60 seconds to let the other shards restart their ReplicaSetMonitors");
+sleep(60000);
+
+jsTestLog("Queries against the original mongos should work again");
+assert.eq(100, st.s.getDB('test').foo.find().itcount());
+
+jsTestLog("Should now be possible to connect to the mongos that was started while the config " +
+ "servers were down");
+var newMongosConn = null;
+var caughtException = null;
+assert.soon(function() {
+ try {
+ newMongosConn = new Mongo(newMongosInfo.host);
+ return true;
+ } catch (e) {
+ caughtException = e;
+ return false;
}
+}, "Failed to connect to mongos after config servers were restarted: " + tojson(caughtException));
- // TODO: SERVER-33830 remove shardAsReplicaSet: false
- var st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}});
+assert.eq(100, newMongosConn.getDB('test').foo.find().itcount());
- jsTestLog("Setting up initial data");
-
- for (var i = 0; i < 100; i++) {
- assert.writeOK(st.s.getDB('test').foo.insert({_id: i}));
- }
-
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
-
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {_id: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: 'test.foo', find: {_id: 50}}));
- assert.commandWorked(
- st.s0.adminCommand({moveChunk: 'test.foo', find: {_id: 75}, to: st.shard1.shardName}));
-
- // Make sure the pre-existing mongos already has the routing information loaded into memory
- assert.eq(100, st.s.getDB('test').foo.find().itcount());
-
- jsTestLog("Shutting down all config servers");
- for (var i = 0; i < st._configServers.length; i++) {
- st.stopConfigServer(i);
- }
-
- jsTestLog("Starting a new mongos when there are no config servers up");
- var newMongosInfo = MongoRunner.runMongos({configdb: st._configDB, waitForConnect: false});
- // The new mongos won't accept any new connections, but it should stay up and continue trying
- // to contact the config servers to finish startup.
- assert.throws(function() {
- new Mongo(newMongosInfo.host);
- });
-
- jsTestLog("Restarting a shard while there are no config servers up");
- restartShard(st.shard1, false);
-
- jsTestLog("Queries should fail because the shard can't initialize sharding state");
- var error = assert.throws(function() {
- st.s.getDB('test').foo.find().itcount();
- });
-
- assert(ErrorCodes.ReplicaSetNotFound == error.code ||
- ErrorCodes.ExceededTimeLimit == error.code || ErrorCodes.HostUnreachable == error.code);
-
- jsTestLog("Restarting the config servers");
- for (var i = 0; i < st._configServers.length; i++) {
- st.restartConfigServer(i);
- }
-
- print("Sleeping for 60 seconds to let the other shards restart their ReplicaSetMonitors");
- sleep(60000);
-
- jsTestLog("Queries against the original mongos should work again");
- assert.eq(100, st.s.getDB('test').foo.find().itcount());
-
- jsTestLog("Should now be possible to connect to the mongos that was started while the config " +
- "servers were down");
- var newMongosConn = null;
- var caughtException = null;
- assert.soon(
- function() {
- try {
- newMongosConn = new Mongo(newMongosInfo.host);
- return true;
- } catch (e) {
- caughtException = e;
- return false;
- }
- },
- "Failed to connect to mongos after config servers were restarted: " +
- tojson(caughtException));
-
- assert.eq(100, newMongosConn.getDB('test').foo.find().itcount());
-
- st.stop();
- MongoRunner.stopMongos(newMongosInfo);
+st.stop();
+MongoRunner.stopMongos(newMongosInfo);
}());
diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js
index 76c64c8b41f..0e182997531 100644
--- a/jstests/sharding/stats.js
+++ b/jstests/sharding/stats.js
@@ -1,235 +1,222 @@
(function() {
- var s = new ShardingTest({name: "stats", shards: 2, mongos: 1});
-
- s.adminCommand({enablesharding: "test"});
-
- db = s.getDB("test");
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- function numKeys(o) {
- var num = 0;
- for (var x in o)
- num++;
- return num;
- }
-
- db.foo.drop();
- // SERVER-29678 changed collStats so versions > 4.0 now return 0s on NS not found
- if (MongoRunner.getBinVersionFor(jsTest.options().mongosBinVersion) === '4.0') {
- // TODO: This should be fixed in 4.4
- let res = db.foo.stats();
- if (res.ok === 1) {
- // Possible to hit a shard that is actually version >= 4.2 => result should be zeros
- assert(res.size === 0 && res.count === 0 && res.storageSize === 0 &&
- res.nindexes === 0);
- } else {
- assert.commandFailed(
- db.foo.stats(),
- 'db.collection.stats() should fail non-existent in versions <= 4.0');
- }
+var s = new ShardingTest({name: "stats", shards: 2, mongos: 1});
+
+s.adminCommand({enablesharding: "test"});
+
+db = s.getDB("test");
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+function numKeys(o) {
+ var num = 0;
+ for (var x in o)
+ num++;
+ return num;
+}
+
+db.foo.drop();
+// SERVER-29678 changed collStats so versions > 4.0 now return 0s on NS not found
+if (MongoRunner.getBinVersionFor(jsTest.options().mongosBinVersion) === '4.0') {
+ // TODO: This should be fixed in 4.4
+ let res = db.foo.stats();
+ if (res.ok === 1) {
+ // Possible to hit a shard that is actually version >= 4.2 => result should be zeros
+ assert(res.size === 0 && res.count === 0 && res.storageSize === 0 && res.nindexes === 0);
} else {
- assert.commandWorked(db.foo.stats(),
- 'db.collection.stats() should return 0s on non-existent collection');
+ assert.commandFailed(db.foo.stats(),
+ 'db.collection.stats() should fail non-existent in versions <= 4.0');
}
-
- // ---------- load some data -----
-
- // need collections sharded before and after main collection for proper test
- s.adminCommand({shardcollection: "test.aaa", key: {_id: 1}});
- s.adminCommand(
- {shardcollection: "test.foo", key: {_id: 1}}); // this collection is actually used
- s.adminCommand({shardcollection: "test.zzz", key: {_id: 1}});
-
- N = 10000;
- s.adminCommand({split: "test.foo", middle: {_id: N / 2}});
- s.adminCommand({
- moveChunk: "test.foo",
- find: {_id: 3},
- to: s.getNonPrimaries("test")[0],
- _waitForDelete: true
- });
-
- var bulk = db.foo.initializeUnorderedBulkOp();
- for (i = 0; i < N; i++)
- bulk.insert({_id: i});
- assert.writeOK(bulk.execute());
-
- // Flush all writes to disk since some of the stats are dependent on state in disk (like
- // totalIndexSize).
- assert.commandWorked(db.adminCommand({fsync: 1}));
-
- a = s.shard0.getDB("test");
- b = s.shard1.getDB("test");
-
- x = assert.commandWorked(db.foo.stats());
- assert.eq(N, x.count, "coll total count expected");
- assert.eq(db.foo.count(), x.count, "coll total count match");
- assert.eq(2, x.nchunks, "coll chunk num");
- assert.eq(2, numKeys(x.shards), "coll shard num");
- assert.eq(
- N / 2, x.shards[s.shard0.shardName].count, "coll count on s.shard0.shardName expected");
- assert.eq(
- N / 2, x.shards[s.shard1.shardName].count, "coll count on s.shard1.shardName expected");
- assert.eq(a.foo.count(),
- x.shards[s.shard0.shardName].count,
- "coll count on s.shard0.shardName match");
- assert.eq(b.foo.count(),
- x.shards[s.shard1.shardName].count,
- "coll count on s.shard1.shardName match");
- assert(!x.shards[s.shard0.shardName].indexDetails,
- 'indexDetails should not be present in s.shard0.shardName: ' +
- tojson(x.shards[s.shard0.shardName]));
- assert(!x.shards[s.shard1.shardName].indexDetails,
- 'indexDetails should not be present in s.shard1.shardName: ' +
- tojson(x.shards[s.shard1.shardName]));
-
- a_extras = a.stats().objects - a.foo.count();
- b_extras = b.stats().objects - b.foo.count();
- print("a_extras: " + a_extras);
- print("b_extras: " + b_extras);
-
- x = assert.commandWorked(db.stats());
-
- assert.eq(N + (a_extras + b_extras), x.objects, "db total count expected");
- assert.eq(2, numKeys(x.raw), "db shard num");
- assert.eq((N / 2) + a_extras,
- x.raw[s.shard0.name].objects,
- "db count on s.shard0.shardName expected");
- assert.eq((N / 2) + b_extras,
- x.raw[s.shard1.name].objects,
- "db count on s.shard1.shardName expected");
- assert.eq(
- a.stats().objects, x.raw[s.shard0.name].objects, "db count on s.shard0.shardName match");
- assert.eq(
- b.stats().objects, x.raw[s.shard1.name].objects, "db count on s.shard1.shardName match");
-
- /* Test db.stat() and db.collection.stat() scaling */
-
- /* Helper functions */
- function statComp(stat, stat_scaled, scale) {
- /* Because of loss of floating point precision, do not check exact equality */
- if (stat == stat_scaled)
- return true;
-
- var msg = 'scaled: ' + stat_scaled + ', stat: ' + stat + ', scale: ' + scale;
- assert.lte((stat_scaled - 2), (stat / scale), msg);
- assert.gte((stat_scaled + 2), (stat / scale), msg);
+} else {
+ assert.commandWorked(db.foo.stats(),
+ 'db.collection.stats() should return 0s on non-existent collection');
+}
+
+// ---------- load some data -----
+
+// need collections sharded before and after main collection for proper test
+s.adminCommand({shardcollection: "test.aaa", key: {_id: 1}});
+s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}); // this collection is actually used
+s.adminCommand({shardcollection: "test.zzz", key: {_id: 1}});
+
+N = 10000;
+s.adminCommand({split: "test.foo", middle: {_id: N / 2}});
+s.adminCommand({
+ moveChunk: "test.foo",
+ find: {_id: 3},
+ to: s.getNonPrimaries("test")[0],
+ _waitForDelete: true
+});
+
+var bulk = db.foo.initializeUnorderedBulkOp();
+for (i = 0; i < N; i++)
+ bulk.insert({_id: i});
+assert.writeOK(bulk.execute());
+
+// Flush all writes to disk since some of the stats are dependent on state in disk (like
+// totalIndexSize).
+assert.commandWorked(db.adminCommand({fsync: 1}));
+
+a = s.shard0.getDB("test");
+b = s.shard1.getDB("test");
+
+x = assert.commandWorked(db.foo.stats());
+assert.eq(N, x.count, "coll total count expected");
+assert.eq(db.foo.count(), x.count, "coll total count match");
+assert.eq(2, x.nchunks, "coll chunk num");
+assert.eq(2, numKeys(x.shards), "coll shard num");
+assert.eq(N / 2, x.shards[s.shard0.shardName].count, "coll count on s.shard0.shardName expected");
+assert.eq(N / 2, x.shards[s.shard1.shardName].count, "coll count on s.shard1.shardName expected");
+assert.eq(
+ a.foo.count(), x.shards[s.shard0.shardName].count, "coll count on s.shard0.shardName match");
+assert.eq(
+ b.foo.count(), x.shards[s.shard1.shardName].count, "coll count on s.shard1.shardName match");
+assert(!x.shards[s.shard0.shardName].indexDetails,
+ 'indexDetails should not be present in s.shard0.shardName: ' +
+ tojson(x.shards[s.shard0.shardName]));
+assert(!x.shards[s.shard1.shardName].indexDetails,
+ 'indexDetails should not be present in s.shard1.shardName: ' +
+ tojson(x.shards[s.shard1.shardName]));
+
+a_extras = a.stats().objects - a.foo.count();
+b_extras = b.stats().objects - b.foo.count();
+print("a_extras: " + a_extras);
+print("b_extras: " + b_extras);
+
+x = assert.commandWorked(db.stats());
+
+assert.eq(N + (a_extras + b_extras), x.objects, "db total count expected");
+assert.eq(2, numKeys(x.raw), "db shard num");
+assert.eq(
+ (N / 2) + a_extras, x.raw[s.shard0.name].objects, "db count on s.shard0.shardName expected");
+assert.eq(
+ (N / 2) + b_extras, x.raw[s.shard1.name].objects, "db count on s.shard1.shardName expected");
+assert.eq(a.stats().objects, x.raw[s.shard0.name].objects, "db count on s.shard0.shardName match");
+assert.eq(b.stats().objects, x.raw[s.shard1.name].objects, "db count on s.shard1.shardName match");
+
+/* Test db.stat() and db.collection.stat() scaling */
+
+/* Helper functions */
+function statComp(stat, stat_scaled, scale) {
+ /* Because of loss of floating point precision, do not check exact equality */
+ if (stat == stat_scaled)
+ return true;
+
+ var msg = 'scaled: ' + stat_scaled + ', stat: ' + stat + ', scale: ' + scale;
+ assert.lte((stat_scaled - 2), (stat / scale), msg);
+ assert.gte((stat_scaled + 2), (stat / scale), msg);
+}
+
+function dbStatComp(stat_obj, stat_obj_scaled, scale) {
+ statComp(stat_obj.dataSize, stat_obj_scaled.dataSize, scale);
+ statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
+ statComp(stat_obj.indexSize, stat_obj_scaled.indexSize, scale);
+ statComp(stat_obj.fileSize, stat_obj_scaled.fileSize, scale);
+ /* avgObjSize not scaled. See SERVER-7347 */
+ statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, 1);
+}
+
+function collStatComp(stat_obj, stat_obj_scaled, scale, mongos) {
+ statComp(stat_obj.size, stat_obj_scaled.size, scale);
+ statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
+ statComp(stat_obj.totalIndexSize, stat_obj_scaled.totalIndexSize, scale);
+ statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, 1);
+ /* lastExtentSize doesn't exist in mongos level collection stats */
+ if (!mongos) {
+ statComp(stat_obj.lastExtentSize, stat_obj_scaled.lastExtentSize, scale);
}
+}
- function dbStatComp(stat_obj, stat_obj_scaled, scale) {
- statComp(stat_obj.dataSize, stat_obj_scaled.dataSize, scale);
- statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
- statComp(stat_obj.indexSize, stat_obj_scaled.indexSize, scale);
- statComp(stat_obj.fileSize, stat_obj_scaled.fileSize, scale);
- /* avgObjSize not scaled. See SERVER-7347 */
- statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, 1);
- }
+/* db.stats() tests */
+db_not_scaled = assert.commandWorked(db.stats());
+db_scaled_512 = assert.commandWorked(db.stats(512));
+db_scaled_1024 = assert.commandWorked(db.stats(1024));
- function collStatComp(stat_obj, stat_obj_scaled, scale, mongos) {
- statComp(stat_obj.size, stat_obj_scaled.size, scale);
- statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
- statComp(stat_obj.totalIndexSize, stat_obj_scaled.totalIndexSize, scale);
- statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, 1);
- /* lastExtentSize doesn't exist in mongos level collection stats */
- if (!mongos) {
- statComp(stat_obj.lastExtentSize, stat_obj_scaled.lastExtentSize, scale);
- }
- }
+for (var shard in db_not_scaled.raw) {
+ dbStatComp(db_not_scaled.raw[shard], db_scaled_512.raw[shard], 512);
+ dbStatComp(db_not_scaled.raw[shard], db_scaled_1024.raw[shard], 1024);
+}
- /* db.stats() tests */
- db_not_scaled = assert.commandWorked(db.stats());
- db_scaled_512 = assert.commandWorked(db.stats(512));
- db_scaled_1024 = assert.commandWorked(db.stats(1024));
+dbStatComp(db_not_scaled, db_scaled_512, 512);
+dbStatComp(db_not_scaled, db_scaled_1024, 1024);
- for (var shard in db_not_scaled.raw) {
- dbStatComp(db_not_scaled.raw[shard], db_scaled_512.raw[shard], 512);
- dbStatComp(db_not_scaled.raw[shard], db_scaled_1024.raw[shard], 1024);
- }
+/* db.collection.stats() tests */
+coll_not_scaled = assert.commandWorked(db.foo.stats());
+coll_scaled_512 = assert.commandWorked(db.foo.stats(512));
+coll_scaled_1024 = assert.commandWorked(db.foo.stats(1024));
- dbStatComp(db_not_scaled, db_scaled_512, 512);
- dbStatComp(db_not_scaled, db_scaled_1024, 1024);
+for (var shard in coll_not_scaled.shards) {
+ collStatComp(coll_not_scaled.shards[shard], coll_scaled_512.shards[shard], 512, false);
+ collStatComp(coll_not_scaled.shards[shard], coll_scaled_1024.shards[shard], 1024, false);
+}
- /* db.collection.stats() tests */
- coll_not_scaled = assert.commandWorked(db.foo.stats());
- coll_scaled_512 = assert.commandWorked(db.foo.stats(512));
- coll_scaled_1024 = assert.commandWorked(db.foo.stats(1024));
+collStatComp(coll_not_scaled, coll_scaled_512, 512, true);
+collStatComp(coll_not_scaled, coll_scaled_1024, 1024, true);
- for (var shard in coll_not_scaled.shards) {
- collStatComp(coll_not_scaled.shards[shard], coll_scaled_512.shards[shard], 512, false);
- collStatComp(coll_not_scaled.shards[shard], coll_scaled_1024.shards[shard], 1024, false);
+/* db.collection.stats() - indexDetails tests */
+(function() {
+var t = db.foo;
+
+assert.commandWorked(t.ensureIndex({a: 1}));
+assert.eq(2, t.getIndexes().length);
+
+var isWiredTiger =
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger");
+
+var stats = assert.commandWorked(t.stats({indexDetails: true}));
+var shardName;
+var shardStats;
+for (shardName in stats.shards) {
+ shardStats = stats.shards[shardName];
+ assert(shardStats.indexDetails,
+ 'indexDetails missing for ' + shardName + ': ' + tojson(shardStats));
+ if (isWiredTiger) {
+ assert.eq(t.getIndexes().length,
+ Object.keys(shardStats.indexDetails).length,
+ 'incorrect number of entries in WiredTiger indexDetails: ' + tojson(shardStats));
}
+}
- collStatComp(coll_not_scaled, coll_scaled_512, 512, true);
- collStatComp(coll_not_scaled, coll_scaled_1024, 1024, true);
-
- /* db.collection.stats() - indexDetails tests */
- (function() {
- var t = db.foo;
-
- assert.commandWorked(t.ensureIndex({a: 1}));
- assert.eq(2, t.getIndexes().length);
-
- var isWiredTiger =
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger");
-
- var stats = assert.commandWorked(t.stats({indexDetails: true}));
- var shardName;
- var shardStats;
- for (shardName in stats.shards) {
- shardStats = stats.shards[shardName];
- assert(shardStats.indexDetails,
- 'indexDetails missing for ' + shardName + ': ' + tojson(shardStats));
- if (isWiredTiger) {
- assert.eq(t.getIndexes().length,
- Object.keys(shardStats.indexDetails).length,
- 'incorrect number of entries in WiredTiger indexDetails: ' +
- tojson(shardStats));
- }
- }
-
- function getIndexName(indexKey) {
- var indexes = t.getIndexes().filter(function(doc) {
- return friendlyEqual(doc.key, indexKey);
- });
- assert.eq(
- 1,
- indexes.length,
- tojson(indexKey) + ' not found in getIndexes() result: ' + tojson(t.getIndexes()));
- return indexes[0].name;
- }
-
- function checkIndexDetails(options, indexName) {
- var stats = assert.commandWorked(t.stats(options));
- for (shardName in stats.shards) {
- shardStats = stats.shards[shardName];
- assert(shardStats.indexDetails,
- 'indexDetails missing from db.collection.stats(' + tojson(options) +
- ').shards[' + shardName + '] result: ' + tojson(shardStats));
- // Currently, indexDetails is only supported with WiredTiger.
- if (isWiredTiger) {
- assert.eq(1,
- Object.keys(shardStats.indexDetails).length,
- 'WiredTiger indexDetails must have exactly one entry');
- assert(shardStats.indexDetails[indexName],
- indexName + ' missing from WiredTiger indexDetails: ' +
- tojson(shardStats.indexDetails));
- assert.neq(0,
- Object.keys(shardStats.indexDetails[indexName]).length,
- indexName + ' exists in indexDetails but contains no information: ' +
- tojson(shardStats.indexDetails));
- }
- }
+function getIndexName(indexKey) {
+ var indexes = t.getIndexes().filter(function(doc) {
+ return friendlyEqual(doc.key, indexKey);
+ });
+ assert.eq(1,
+ indexes.length,
+ tojson(indexKey) + ' not found in getIndexes() result: ' + tojson(t.getIndexes()));
+ return indexes[0].name;
+}
+
+function checkIndexDetails(options, indexName) {
+ var stats = assert.commandWorked(t.stats(options));
+ for (shardName in stats.shards) {
+ shardStats = stats.shards[shardName];
+ assert(shardStats.indexDetails,
+ 'indexDetails missing from db.collection.stats(' + tojson(options) + ').shards[' +
+ shardName + '] result: ' + tojson(shardStats));
+ // Currently, indexDetails is only supported with WiredTiger.
+ if (isWiredTiger) {
+ assert.eq(1,
+ Object.keys(shardStats.indexDetails).length,
+ 'WiredTiger indexDetails must have exactly one entry');
+ assert(shardStats.indexDetails[indexName],
+ indexName +
+ ' missing from WiredTiger indexDetails: ' + tojson(shardStats.indexDetails));
+ assert.neq(0,
+ Object.keys(shardStats.indexDetails[indexName]).length,
+ indexName + ' exists in indexDetails but contains no information: ' +
+ tojson(shardStats.indexDetails));
}
+ }
+}
- // indexDetailsKey - show indexDetails results for this index key only.
- var indexKey = {a: 1};
- var indexName = getIndexName(indexKey);
- checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName);
+// indexDetailsKey - show indexDetails results for this index key only.
+var indexKey = {a: 1};
+var indexName = getIndexName(indexKey);
+checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName);
- // indexDetailsName - show indexDetails results for this index name only.
- checkIndexDetails({indexDetails: true, indexDetailsName: indexName}, indexName);
- }());
+// indexDetailsName - show indexDetails results for this index name only.
+checkIndexDetails({indexDetails: true, indexDetailsName: indexName}, indexName);
+}());
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/tag_auto_split.js b/jstests/sharding/tag_auto_split.js
index ac80c057d85..46e9325052c 100644
--- a/jstests/sharding/tag_auto_split.js
+++ b/jstests/sharding/tag_auto_split.js
@@ -1,35 +1,35 @@
// Test to make sure that tag ranges get split when full keys are used for the tag ranges
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2, mongos: 1});
+var s = new ShardingTest({shards: 2, mongos: 1});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- assert.eq(1, s.config.chunks.find({"ns": "test.foo"}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo"}).itcount());
- s.addShardTag(s.shard0.shardName, "a");
- s.addShardTag(s.shard0.shardName, "b");
+s.addShardTag(s.shard0.shardName, "a");
+s.addShardTag(s.shard0.shardName, "b");
- s.addTagRange("test.foo", {_id: 5}, {_id: 10}, "a");
- s.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b");
+s.addTagRange("test.foo", {_id: 5}, {_id: 10}, "a");
+s.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b");
- s.startBalancer();
+s.startBalancer();
- assert.soon(function() {
- return s.config.chunks.find({"ns": "test.foo"}).itcount() == 4;
- }, 'Split did not occur', 3 * 60 * 1000);
+assert.soon(function() {
+ return s.config.chunks.find({"ns": "test.foo"}).itcount() == 4;
+}, 'Split did not occur', 3 * 60 * 1000);
- s.awaitBalancerRound();
- s.printShardingStatus(true);
- assert.eq(4, s.config.chunks.find({"ns": "test.foo"}).itcount(), 'Split points changed');
+s.awaitBalancerRound();
+s.printShardingStatus(true);
+assert.eq(4, s.config.chunks.find({"ns": "test.foo"}).itcount(), 'Split points changed');
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: MinKey}}).itcount());
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 5}}).itcount());
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 10}}).itcount());
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 15}}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: MinKey}}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 5}}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 10}}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 15}}).itcount());
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/tag_auto_split_partial_key.js b/jstests/sharding/tag_auto_split_partial_key.js
index 35f1c6c7b65..dc19059b726 100644
--- a/jstests/sharding/tag_auto_split_partial_key.js
+++ b/jstests/sharding/tag_auto_split_partial_key.js
@@ -1,45 +1,45 @@
// Test to make sure that tag ranges get split when partial keys are used for the tag ranges
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2, mongos: 1});
+var s = new ShardingTest({shards: 2, mongos: 1});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1, a: 1}}));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1, a: 1}}));
- assert.eq(1, s.config.chunks.find({"ns": "test.foo"}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo"}).itcount());
- s.addShardTag(s.shard0.shardName, "a");
- s.addShardTag(s.shard0.shardName, "b");
+s.addShardTag(s.shard0.shardName, "a");
+s.addShardTag(s.shard0.shardName, "b");
- s.addTagRange("test.foo", {_id: 5}, {_id: 10}, "a");
- s.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b");
+s.addTagRange("test.foo", {_id: 5}, {_id: 10}, "a");
+s.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b");
- s.startBalancer();
+s.startBalancer();
- assert.soon(function() {
- return s.config.chunks.find({"ns": "test.foo"}).itcount() == 4;
- }, 'Split did not occur', 3 * 60 * 1000);
+assert.soon(function() {
+ return s.config.chunks.find({"ns": "test.foo"}).itcount() == 4;
+}, 'Split did not occur', 3 * 60 * 1000);
- s.awaitBalancerRound();
- s.printShardingStatus(true);
- assert.eq(4, s.config.chunks.find({"ns": "test.foo"}).itcount(), 'Split points changed');
+s.awaitBalancerRound();
+s.printShardingStatus(true);
+assert.eq(4, s.config.chunks.find({"ns": "test.foo"}).itcount(), 'Split points changed');
- s.config.chunks.find({"ns": "test.foo"}).forEach(function(chunk) {
- var numFields = 0;
- for (var x in chunk.min) {
- numFields++;
- assert(x == "_id" || x == "a", tojson(chunk));
- }
- assert.eq(2, numFields, tojson(chunk));
- });
+s.config.chunks.find({"ns": "test.foo"}).forEach(function(chunk) {
+ var numFields = 0;
+ for (var x in chunk.min) {
+ numFields++;
+ assert(x == "_id" || x == "a", tojson(chunk));
+ }
+ assert.eq(2, numFields, tojson(chunk));
+});
- // Check chunk mins correspond exactly to tag range boundaries, extended to match shard key
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: MinKey, a: MinKey}}).itcount());
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 5, a: MinKey}}).itcount());
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 10, a: MinKey}}).itcount());
- assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 15, a: MinKey}}).itcount());
+// Check chunk mins correspond exactly to tag range boundaries, extended to match shard key
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: MinKey, a: MinKey}}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 5, a: MinKey}}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 10, a: MinKey}}).itcount());
+assert.eq(1, s.config.chunks.find({"ns": "test.foo", min: {_id: 15, a: MinKey}}).itcount());
- s.stop();
+s.stop();
})();
diff --git a/jstests/sharding/tag_range.js b/jstests/sharding/tag_range.js
index 3cb99e6ab9f..0cfb3cd35a1 100644
--- a/jstests/sharding/tag_range.js
+++ b/jstests/sharding/tag_range.js
@@ -1,93 +1,91 @@
// tests to make sure that tag ranges are added/removed/updated successfully
(function() {
- 'use strict';
+'use strict';
- const st = new ShardingTest({shards: 2, mongos: 1});
+const st = new ShardingTest({shards: 2, mongos: 1});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.tag_range', key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.tag_range', key: {_id: 1}}));
- function countTags(num, message) {
- assert.eq(st.config.tags.count(), num, message);
- }
+function countTags(num, message) {
+ assert.eq(st.config.tags.count(), num, message);
+}
- assert.eq(1, st.config.chunks.count({"ns": "test.tag_range"}));
+assert.eq(1, st.config.chunks.count({"ns": "test.tag_range"}));
- st.addShardTag(st.shard0.shardName, 'a');
- st.addShardTag(st.shard0.shardName, 'b');
- st.addShardTag(st.shard0.shardName, 'c');
+st.addShardTag(st.shard0.shardName, 'a');
+st.addShardTag(st.shard0.shardName, 'b');
+st.addShardTag(st.shard0.shardName, 'c');
- // add two ranges, verify the additions
+// add two ranges, verify the additions
- assert.commandWorked(st.addTagRange('test.tag_range', {_id: 5}, {_id: 10}, 'a'));
- assert.commandWorked(st.addTagRange('test.tag_range', {_id: 10}, {_id: 15}, 'b'));
+assert.commandWorked(st.addTagRange('test.tag_range', {_id: 5}, {_id: 10}, 'a'));
+assert.commandWorked(st.addTagRange('test.tag_range', {_id: 10}, {_id: 15}, 'b'));
- countTags(2, 'tag ranges were not successfully added');
+countTags(2, 'tag ranges were not successfully added');
- // remove the second range, should be left with one
+// remove the second range, should be left with one
- assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 10}, {_id: 15}, 'b'));
+assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 10}, {_id: 15}, 'b'));
- countTags(1, 'tag range not removed successfully');
+countTags(1, 'tag range not removed successfully');
- // add range min=max, verify the additions
+// add range min=max, verify the additions
- try {
- st.addTagRange('test.tag_range', {_id: 20}, {_id: 20}, 'a');
- } catch (e) {
- countTags(1, 'tag range should not have been added');
- }
+try {
+ st.addTagRange('test.tag_range', {_id: 20}, {_id: 20}, 'a');
+} catch (e) {
+ countTags(1, 'tag range should not have been added');
+}
- // Test that a dotted path is allowed in a tag range if it includes the shard key.
- assert.commandWorked(
- st.s0.adminCommand({shardCollection: 'test.tag_range_dotted', key: {"_id.a": 1}}));
- assert.commandWorked(st.addTagRange('test.tag_range_dotted', {"_id.a": 5}, {"_id.a": 10}, 'c'));
- countTags(2, 'Dotted path tag range not successfully added.');
+// Test that a dotted path is allowed in a tag range if it includes the shard key.
+assert.commandWorked(
+ st.s0.adminCommand({shardCollection: 'test.tag_range_dotted', key: {"_id.a": 1}}));
+assert.commandWorked(st.addTagRange('test.tag_range_dotted', {"_id.a": 5}, {"_id.a": 10}, 'c'));
+countTags(2, 'Dotted path tag range not successfully added.');
- assert.commandWorked(
- st.removeTagRange('test.tag_range_dotted', {"_id.a": 5}, {"_id.a": 10}, 'c'));
- assert.commandFailed(st.addTagRange('test.tag_range_dotted', {"_id.b": 5}, {"_id.b": 10}, 'c'));
- countTags(1, 'Incorrectly added tag range.');
+assert.commandWorked(st.removeTagRange('test.tag_range_dotted', {"_id.a": 5}, {"_id.a": 10}, 'c'));
+assert.commandFailed(st.addTagRange('test.tag_range_dotted', {"_id.b": 5}, {"_id.b": 10}, 'c'));
+countTags(1, 'Incorrectly added tag range.');
- // Test that ranges on embedded fields of the shard key are not allowed.
- assert.commandFailed(
- st.addTagRange('test.tag_range_dotted', {_id: {a: 5}}, {_id: {a: 10}}, 'c'));
- countTags(1, 'Incorrectly added embedded field tag range');
+// Test that ranges on embedded fields of the shard key are not allowed.
+assert.commandFailed(st.addTagRange('test.tag_range_dotted', {_id: {a: 5}}, {_id: {a: 10}}, 'c'));
+countTags(1, 'Incorrectly added embedded field tag range');
- // removeTagRange tests for tag ranges that do not exist
+// removeTagRange tests for tag ranges that do not exist
- // Bad namespace
- assert.commandFailed(st.removeTagRange('badns', {_id: 5}, {_id: 11}, 'a'));
- countTags(1, 'Bad namespace: tag range does not exist');
+// Bad namespace
+assert.commandFailed(st.removeTagRange('badns', {_id: 5}, {_id: 11}, 'a'));
+countTags(1, 'Bad namespace: tag range does not exist');
- // Bad tag
- assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 11}, 'badtag'));
- countTags(1, 'Bad tag: tag range does not exist');
+// Bad tag
+assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 11}, 'badtag'));
+countTags(1, 'Bad tag: tag range does not exist');
- // Bad min
- assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 0}, {_id: 11}, 'a'));
- countTags(1, 'Bad min: tag range does not exist');
+// Bad min
+assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 0}, {_id: 11}, 'a'));
+countTags(1, 'Bad min: tag range does not exist');
- // Bad max
- assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 12}, 'a'));
- countTags(1, 'Bad max: tag range does not exist');
+// Bad max
+assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 12}, 'a'));
+countTags(1, 'Bad max: tag range does not exist');
- // Invalid namesapce
- assert.commandFailed(st.removeTagRange(35, {_id: 5}, {_id: 11}, 'a'));
- countTags(1, 'Invalid namespace: tag range does not exist');
+// Invalid namesapce
+assert.commandFailed(st.removeTagRange(35, {_id: 5}, {_id: 11}, 'a'));
+countTags(1, 'Invalid namespace: tag range does not exist');
- // Invalid tag
- assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 11}, 35));
- countTags(1, 'Invalid tag: tag range does not exist');
+// Invalid tag
+assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 11}, 35));
+countTags(1, 'Invalid tag: tag range does not exist');
- // Invalid min
- assert.commandFailed(st.removeTagRange('test.tag_range', 35, {_id: 11}, 'a'));
- countTags(1, 'Invalid min: tag range does not exist');
+// Invalid min
+assert.commandFailed(st.removeTagRange('test.tag_range', 35, {_id: 11}, 'a'));
+countTags(1, 'Invalid min: tag range does not exist');
- // Invalid max
- assert.commandFailed(st.removeTagRange('test.tag_range', {_id: 5}, 35, 'a'));
- countTags(1, 'Invalid max: tag range does not exist');
+// Invalid max
+assert.commandFailed(st.removeTagRange('test.tag_range', {_id: 5}, 35, 'a'));
+countTags(1, 'Invalid max: tag range does not exist');
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/test_stacked_migration_cleanup.js b/jstests/sharding/test_stacked_migration_cleanup.js
index b8baba5f5b2..b85a188938d 100644
--- a/jstests/sharding/test_stacked_migration_cleanup.js
+++ b/jstests/sharding/test_stacked_migration_cleanup.js
@@ -1,68 +1,67 @@
// Tests "stacking" multiple migration cleanup threads and their behavior when the collection
// changes
(function() {
- 'use strict';
+'use strict';
- // start up a new sharded cluster
- var st = new ShardingTest({shards: 2, mongos: 1});
+// start up a new sharded cluster
+var st = new ShardingTest({shards: 2, mongos: 1});
- var mongos = st.s;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
+var mongos = st.s;
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
- // Enable sharding of the collection
- assert.commandWorked(mongos.adminCommand({enablesharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(mongos.adminCommand({shardcollection: coll + "", key: {_id: 1}}));
+// Enable sharding of the collection
+assert.commandWorked(mongos.adminCommand({enablesharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(mongos.adminCommand({shardcollection: coll + "", key: {_id: 1}}));
- var numChunks = 30;
+var numChunks = 30;
- // Create a bunch of chunks
- for (var i = 0; i < numChunks; i++) {
- assert.commandWorked(mongos.adminCommand({split: coll + "", middle: {_id: i}}));
- }
+// Create a bunch of chunks
+for (var i = 0; i < numChunks; i++) {
+ assert.commandWorked(mongos.adminCommand({split: coll + "", middle: {_id: i}}));
+}
- jsTest.log("Inserting a lot of small documents...");
+jsTest.log("Inserting a lot of small documents...");
- // Insert a lot of small documents to make multiple cursor batches
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 10 * 1000; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+// Insert a lot of small documents to make multiple cursor batches
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < 10 * 1000; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
- jsTest.log("Opening a mongod cursor...");
+jsTest.log("Opening a mongod cursor...");
- // Open a new cursor on the mongod
- var cursor = coll.find();
- var next = cursor.next();
+// Open a new cursor on the mongod
+var cursor = coll.find();
+var next = cursor.next();
- jsTest.log("Moving a bunch of chunks to stack cleanup...");
+jsTest.log("Moving a bunch of chunks to stack cleanup...");
- // Move a bunch of chunks, but don't close the cursor so they stack.
- for (var i = 0; i < numChunks; i++) {
- assert.commandWorked(
- mongos.adminCommand({moveChunk: coll + "", find: {_id: i}, to: st.shard1.shardName}));
- }
+// Move a bunch of chunks, but don't close the cursor so they stack.
+for (var i = 0; i < numChunks; i++) {
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: coll + "", find: {_id: i}, to: st.shard1.shardName}));
+}
- jsTest.log("Dropping and re-creating collection...");
+jsTest.log("Dropping and re-creating collection...");
- coll.drop();
+coll.drop();
- bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < numChunks; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < numChunks; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
- sleep(10 * 1000);
+sleep(10 * 1000);
- jsTest.log("Checking that documents were not cleaned up...");
+jsTest.log("Checking that documents were not cleaned up...");
- for (var i = 0; i < numChunks; i++) {
- assert.neq(null, coll.findOne({_id: i}));
- }
-
- st.stop();
+for (var i = 0; i < numChunks; i++) {
+ assert.neq(null, coll.findOne({_id: i}));
+}
+st.stop();
})();
diff --git a/jstests/sharding/time_zone_info_mongos.js b/jstests/sharding/time_zone_info_mongos.js
index c75ac56628b..73b59b16f7a 100644
--- a/jstests/sharding/time_zone_info_mongos.js
+++ b/jstests/sharding/time_zone_info_mongos.js
@@ -1,100 +1,99 @@
// Test that mongoS accepts --timeZoneInfo <timezoneDBPath> as a command-line argument and that an
// aggregation pipeline with timezone expressions executes correctly on mongoS.
(function() {
- const tzGoodInfo = "jstests/libs/config_files/good_timezone_info";
- const tzBadInfo = "jstests/libs/config_files/bad_timezone_info";
- const tzNoInfo = "jstests/libs/config_files/missing_directory";
-
- const st = new ShardingTest({
- shards: 2,
- mongos: {s0: {timeZoneInfo: tzGoodInfo}},
- rs: {nodes: 1, timeZoneInfo: tzGoodInfo}
- });
-
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
-
- assert.commandWorked(mongosDB.dropDatabase());
-
- // Confirm that the timeZoneInfo command-line argument has been set on mongoS.
- const mongosCfg = assert.commandWorked(mongosDB.adminCommand({getCmdLineOpts: 1}));
- assert.eq(mongosCfg.parsed.processManagement.timeZoneInfo, tzGoodInfo);
-
- // Test that a bad timezone file causes mongoS startup to fail.
- let conn = MongoRunner.runMongos({configdb: st.configRS.getURL(), timeZoneInfo: tzBadInfo});
- assert.eq(conn, null, "expected launching mongos with bad timezone rules to fail");
- assert.neq(-1, rawMongoProgramOutput().indexOf("Fatal assertion 40475"));
-
- // Test that a non-existent timezone directory causes mongoS startup to fail.
- conn = MongoRunner.runMongos({configdb: st.configRS.getURL(), timeZoneInfo: tzNoInfo});
- assert.eq(conn, null, "expected launching mongos with bad timezone rules to fail");
- // Look for either old or new error message
- assert(rawMongoProgramOutput().indexOf("Failed to create service context") != -1 ||
- rawMongoProgramOutput().indexOf("Failed global initialization") != -1);
-
- // Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
-
- // Shard the test collection on _id.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
- // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
- assert.commandWorked(
- mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
-
- // Move the [0, MaxKey) chunk to st.shard1.shardName.
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
-
- // Write a document containing a 'date' field to each chunk.
- assert.writeOK(mongosColl.insert({_id: -1, date: ISODate("2017-11-13T12:00:00.000+0000")}));
- assert.writeOK(mongosColl.insert({_id: 1, date: ISODate("2017-11-13T03:00:00.000+0600")}));
-
- // Constructs a pipeline which splits the 'date' field into its constituent parts on mongoD,
- // reassembles the original date on mongoS, and verifies that the two match. All timezone
- // expressions in the pipeline use the passed 'tz' string or, if absent, default to "GMT".
- function buildTimeZonePipeline(tz) {
- // We use $const here so that the input pipeline matches the format of the explain output.
- const tzExpr = {$const: (tz || "GMT")};
- return [
- {$addFields: {mongodParts: {$dateToParts: {date: "$date", timezone: tzExpr}}}},
- {$_internalSplitPipeline: {mergeType: "mongos"}},
- {
- $addFields: {
- mongosDate: {
- $dateFromParts: {
- year: "$mongodParts.year",
- month: "$mongodParts.month",
- day: "$mongodParts.day",
- hour: "$mongodParts.hour",
- minute: "$mongodParts.minute",
- second: "$mongodParts.second",
- millisecond: "$mongodParts.millisecond",
- timezone: tzExpr
- }
- }
- }
- },
- {$match: {$expr: {$eq: ["$date", "$mongosDate"]}}}
- ];
- }
-
- // Confirm that the pipe splits at '$_internalSplitPipeline' and that the merge runs on mongoS.
- let timeZonePipeline = buildTimeZonePipeline("GMT");
- const tzExplain = assert.commandWorked(mongosColl.explain().aggregate(timeZonePipeline));
- assert.eq(tzExplain.splitPipeline.shardsPart, [timeZonePipeline[0]]);
- assert.eq(tzExplain.splitPipeline.mergerPart, timeZonePipeline.slice(1));
- assert.eq(tzExplain.mergeType, "mongos");
-
- // Confirm that both documents are output by the pipeline, demonstrating that the date has been
- // correctly disassembled on mongoD and reassembled on mongoS.
- assert.eq(mongosColl.aggregate(timeZonePipeline).itcount(), 2);
-
- // Confirm that aggregating with a timezone which is not present in 'good_timezone_info' fails.
- timeZonePipeline = buildTimeZonePipeline("Europe/Dublin");
- assert.eq(assert.throws(() => mongosColl.aggregate(timeZonePipeline)).code, 40485);
-
- st.stop();
+const tzGoodInfo = "jstests/libs/config_files/good_timezone_info";
+const tzBadInfo = "jstests/libs/config_files/bad_timezone_info";
+const tzNoInfo = "jstests/libs/config_files/missing_directory";
+
+const st = new ShardingTest({
+ shards: 2,
+ mongos: {s0: {timeZoneInfo: tzGoodInfo}},
+ rs: {nodes: 1, timeZoneInfo: tzGoodInfo}
+});
+
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
+
+assert.commandWorked(mongosDB.dropDatabase());
+
+// Confirm that the timeZoneInfo command-line argument has been set on mongoS.
+const mongosCfg = assert.commandWorked(mongosDB.adminCommand({getCmdLineOpts: 1}));
+assert.eq(mongosCfg.parsed.processManagement.timeZoneInfo, tzGoodInfo);
+
+// Test that a bad timezone file causes mongoS startup to fail.
+let conn = MongoRunner.runMongos({configdb: st.configRS.getURL(), timeZoneInfo: tzBadInfo});
+assert.eq(conn, null, "expected launching mongos with bad timezone rules to fail");
+assert.neq(-1, rawMongoProgramOutput().indexOf("Fatal assertion 40475"));
+
+// Test that a non-existent timezone directory causes mongoS startup to fail.
+conn = MongoRunner.runMongos({configdb: st.configRS.getURL(), timeZoneInfo: tzNoInfo});
+assert.eq(conn, null, "expected launching mongos with bad timezone rules to fail");
+// Look for either old or new error message
+assert(rawMongoProgramOutput().indexOf("Failed to create service context") != -1 ||
+ rawMongoProgramOutput().indexOf("Failed global initialization") != -1);
+
+// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+// Shard the test collection on _id.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
+assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+
+// Move the [0, MaxKey) chunk to st.shard1.shardName.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+
+// Write a document containing a 'date' field to each chunk.
+assert.writeOK(mongosColl.insert({_id: -1, date: ISODate("2017-11-13T12:00:00.000+0000")}));
+assert.writeOK(mongosColl.insert({_id: 1, date: ISODate("2017-11-13T03:00:00.000+0600")}));
+
+// Constructs a pipeline which splits the 'date' field into its constituent parts on mongoD,
+// reassembles the original date on mongoS, and verifies that the two match. All timezone
+// expressions in the pipeline use the passed 'tz' string or, if absent, default to "GMT".
+function buildTimeZonePipeline(tz) {
+ // We use $const here so that the input pipeline matches the format of the explain output.
+ const tzExpr = {$const: (tz || "GMT")};
+ return [
+ {$addFields: {mongodParts: {$dateToParts: {date: "$date", timezone: tzExpr}}}},
+ {$_internalSplitPipeline: {mergeType: "mongos"}},
+ {
+ $addFields: {
+ mongosDate: {
+ $dateFromParts: {
+ year: "$mongodParts.year",
+ month: "$mongodParts.month",
+ day: "$mongodParts.day",
+ hour: "$mongodParts.hour",
+ minute: "$mongodParts.minute",
+ second: "$mongodParts.second",
+ millisecond: "$mongodParts.millisecond",
+ timezone: tzExpr
+ }
+ }
+ }
+ },
+ {$match: {$expr: {$eq: ["$date", "$mongosDate"]}}}
+ ];
+}
+
+// Confirm that the pipe splits at '$_internalSplitPipeline' and that the merge runs on mongoS.
+let timeZonePipeline = buildTimeZonePipeline("GMT");
+const tzExplain = assert.commandWorked(mongosColl.explain().aggregate(timeZonePipeline));
+assert.eq(tzExplain.splitPipeline.shardsPart, [timeZonePipeline[0]]);
+assert.eq(tzExplain.splitPipeline.mergerPart, timeZonePipeline.slice(1));
+assert.eq(tzExplain.mergeType, "mongos");
+
+// Confirm that both documents are output by the pipeline, demonstrating that the date has been
+// correctly disassembled on mongoD and reassembled on mongoS.
+assert.eq(mongosColl.aggregate(timeZonePipeline).itcount(), 2);
+
+// Confirm that aggregating with a timezone which is not present in 'good_timezone_info' fails.
+timeZonePipeline = buildTimeZonePipeline("Europe/Dublin");
+assert.eq(assert.throws(() => mongosColl.aggregate(timeZonePipeline)).code, 40485);
+
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js
index 78b86e64e50..bbf930c9b8f 100644
--- a/jstests/sharding/top_chunk_autosplit.js
+++ b/jstests/sharding/top_chunk_autosplit.js
@@ -157,119 +157,119 @@ var configDB = st.s.getDB('config');
// high - high shard key value
var tests = [
{
- // Test auto-split on the "low" top chunk to another tagged shard
- name: "low top chunk with tag move",
- lowOrHigh: lowChunk,
- movedToShard: st.rs2.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 20, tags: ["NYC"]},
- {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: st.rs2.name, range: highChunkRange, chunks: 5, tags: ["NYC"]},
- {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]},
- ],
- tagRanges: [
- {range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}
- ],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk to another tagged shard
+ name: "low top chunk with tag move",
+ lowOrHigh: lowChunk,
+ movedToShard: st.rs2.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: st.rs2.name, range: highChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]},
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "low" top chunk to same tagged shard
- name: "low top chunk with tag no move",
- lowOrHigh: lowChunk,
- movedToShard: st.rs0.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 5, tags: ["NYC"]},
- {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: st.rs2.name, range: highChunkRange, chunks: 20, tags: ["NYC"]},
- {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]},
- ],
- tagRanges: [
- {range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}
- ],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk to same tagged shard
+ name: "low top chunk with tag no move",
+ lowOrHigh: lowChunk,
+ movedToShard: st.rs0.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: st.rs2.name, range: highChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]},
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "low" top chunk to another shard
- name: "low top chunk no tag move",
- lowOrHigh: lowChunk,
- movedToShard: st.rs3.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 20},
- {name: st.rs1.name, range: midChunkRange1, chunks: 20},
- {name: st.rs2.name, range: highChunkRange, chunks: 5},
- {name: st.rs3.name, range: midChunkRange2, chunks: 1}
- ],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk to another shard
+ name: "low top chunk no tag move",
+ lowOrHigh: lowChunk,
+ movedToShard: st.rs3.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 20},
+ {name: st.rs1.name, range: midChunkRange1, chunks: 20},
+ {name: st.rs2.name, range: highChunkRange, chunks: 5},
+ {name: st.rs3.name, range: midChunkRange2, chunks: 1}
+ ],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "high" top chunk to another tagged shard
- name: "high top chunk with tag move",
- lowOrHigh: highChunk,
- movedToShard: st.rs0.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 5, tags: ["NYC"]},
- {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: st.rs2.name, range: highChunkRange, chunks: 20, tags: ["NYC"]},
- {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]}
- ],
- tagRanges: [
- {range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}
- ],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to another tagged shard
+ name: "high top chunk with tag move",
+ lowOrHigh: highChunk,
+ movedToShard: st.rs0.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: st.rs2.name, range: highChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]}
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: highChunkInserts
},
{
- // Test auto-split on the "high" top chunk to another shard
- name: "high top chunk no tag move",
- lowOrHigh: highChunk,
- movedToShard: st.rs3.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 5},
- {name: st.rs1.name, range: midChunkRange1, chunks: 20},
- {name: st.rs2.name, range: highChunkRange, chunks: 20},
- {name: st.rs3.name, range: midChunkRange2, chunks: 1}
- ],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to another shard
+ name: "high top chunk no tag move",
+ lowOrHigh: highChunk,
+ movedToShard: st.rs3.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 5},
+ {name: st.rs1.name, range: midChunkRange1, chunks: 20},
+ {name: st.rs2.name, range: highChunkRange, chunks: 20},
+ {name: st.rs3.name, range: midChunkRange2, chunks: 1}
+ ],
+ inserts: highChunkInserts
},
{
- // Test auto-split on the "high" top chunk to same tagged shard
- name: "high top chunk with tag no move",
- lowOrHigh: highChunk,
- movedToShard: st.rs2.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 20, tags: ["NYC"]},
- {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: st.rs2.name, range: highChunkRange, chunks: 5, tags: ["NYC"]},
- {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]}
- ],
- tagRanges: [
- {range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}
- ],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to same tagged shard
+ name: "high top chunk with tag no move",
+ lowOrHigh: highChunk,
+ movedToShard: st.rs2.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: st.rs2.name, range: highChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]}
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: highChunkInserts
},
{
- // Test auto-split on the "high" top chunk to same shard
- name: "high top chunk no tag no move",
- lowOrHigh: highChunk,
- movedToShard: st.rs2.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 20},
- {name: st.rs1.name, range: midChunkRange1, chunks: 20},
- {name: st.rs2.name, range: highChunkRange, chunks: 1},
- {name: st.rs3.name, range: midChunkRange2, chunks: 5}
- ],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to same shard
+ name: "high top chunk no tag no move",
+ lowOrHigh: highChunk,
+ movedToShard: st.rs2.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 20},
+ {name: st.rs1.name, range: midChunkRange1, chunks: 20},
+ {name: st.rs2.name, range: highChunkRange, chunks: 1},
+ {name: st.rs3.name, range: midChunkRange2, chunks: 5}
+ ],
+ inserts: highChunkInserts
},
];
@@ -296,20 +296,20 @@ st.ensurePrimaryShard(dbName, st.rs0.name);
var singleNodeTests = [
{
- // Test auto-split on the "low" top chunk on single node shard
- name: "single node shard - low top chunk",
- lowOrHigh: lowChunk,
- movedToShard: st.rs0.name,
- shards: [{name: st.rs0.name, range: lowChunkRange, chunks: 2}],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk on single node shard
+ name: "single node shard - low top chunk",
+ lowOrHigh: lowChunk,
+ movedToShard: st.rs0.name,
+ shards: [{name: st.rs0.name, range: lowChunkRange, chunks: 2}],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "high" top chunk on single node shard
- name: "single node shard - high top chunk",
- lowOrHigh: highChunk,
- movedToShard: st.rs0.name,
- shards: [{name: st.rs0.name, range: highChunkRange, chunks: 2}],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk on single node shard
+ name: "single node shard - high top chunk",
+ lowOrHigh: highChunk,
+ movedToShard: st.rs0.name,
+ shards: [{name: st.rs0.name, range: highChunkRange, chunks: 2}],
+ inserts: highChunkInserts
},
];
@@ -336,26 +336,26 @@ configDB = st.s.getDB('config');
var maxSizeTests = [
{
- // Test auto-split on the "low" top chunk with maxSize on destination shard
- name: "maxSize - low top chunk",
- lowOrHigh: lowChunk,
- movedToShard: st.rs0.name,
- shards: [
- {name: st.rs0.name, range: lowChunkRange, chunks: 10},
- {name: st.rs1.name, range: highChunkRange, chunks: 1}
- ],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk with maxSize on destination shard
+ name: "maxSize - low top chunk",
+ lowOrHigh: lowChunk,
+ movedToShard: st.rs0.name,
+ shards: [
+ {name: st.rs0.name, range: lowChunkRange, chunks: 10},
+ {name: st.rs1.name, range: highChunkRange, chunks: 1}
+ ],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "high" top chunk with maxSize on destination shard
- name: "maxSize - high top chunk",
- lowOrHigh: highChunk,
- movedToShard: st.rs0.name,
- shards: [
- {name: st.rs0.name, range: highChunkRange, chunks: 10},
- {name: st.rs1.name, range: lowChunkRange, chunks: 1}
- ],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk with maxSize on destination shard
+ name: "maxSize - high top chunk",
+ lowOrHigh: highChunk,
+ movedToShard: st.rs0.name,
+ shards: [
+ {name: st.rs0.name, range: highChunkRange, chunks: 10},
+ {name: st.rs1.name, range: lowChunkRange, chunks: 1}
+ ],
+ inserts: highChunkInserts
},
];
diff --git a/jstests/sharding/top_chunk_split.js b/jstests/sharding/top_chunk_split.js
index 5aeeb14ddfd..d7d6c4cda0f 100644
--- a/jstests/sharding/top_chunk_split.js
+++ b/jstests/sharding/top_chunk_split.js
@@ -5,144 +5,139 @@
* entire shard key space.
*/
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 1});
-
- var testDB = st.s.getDB('test');
- assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
-
- var callSplit = function(db, minKey, maxKey, splitPoints) {
- var res = st.s.adminCommand({getShardVersion: "test.user"});
- assert.commandWorked(res);
- var shardVersion = [res.version, res.versionEpoch];
- return db.runCommand({
- splitChunk: 'test.user',
- from: st.shard0.shardName,
- min: minKey,
- max: maxKey,
- keyPattern: {x: 1},
- splitKeys: splitPoints,
- epoch: res.versionEpoch,
- });
- };
-
- var tests = [
- //
- // Lower extreme chunk tests.
- //
-
- // All chunks have 1 doc.
- //
- // Expected doc counts for new chunks:
- // [ MinKey, -2 ): 1
- // [ -2, -1 ): 1
- // [ -1, 0): 1
- //
- function(db) {
- var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -2}, {x: -1}]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, {x: MinKey}) == 0,
- tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, {x: -2}) == 0,
- tojson(res.shouldMigrate.max));
- },
-
- // One chunk has single doc, extreme doesn't.
- //
- // Expected doc counts for new chunks:
- // [ MinKey, -1 ): 2
- // [ -1, 0): 1
- //
- function(db) {
- var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -1}]);
- assert.commandWorked(res);
- assert.eq(res.shouldMigrate, null, tojson(res));
- },
-
- // Only extreme has single doc.
- //
- // Expected doc counts for new chunks:
- // [ MinKey, -2 ): 1
- // [ -2, 0): 2
- //
- function(db) {
- var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -2}]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, {x: MinKey}) == 0,
- tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, {x: -2}) == 0,
- tojson(res.shouldMigrate.max));
- },
-
- //
- // Upper extreme chunk tests.
- //
-
- // All chunks have 1 doc.
- //
- // Expected doc counts for new chunks:
- // [ 0, 1 ): 1
- // [ 1, 2 ): 1
- // [ 2, MaxKey): 1
- //
- function(db) {
- var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 1}, {x: 2}]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, {x: 2}) == 0,
- tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, {x: MaxKey}) == 0,
- tojson(res.shouldMigrate.max));
- },
-
- // One chunk has single doc, extreme doesn't.
- //
- // Expected doc counts for new chunks:
- // [ 0, 1 ): 1
- // [ 1, MaxKey): 2
- //
- function(db) {
- var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 1}]);
- assert.commandWorked(res);
- assert.eq(res.shouldMigrate, null, tojson(res));
- },
-
- // Only extreme has single doc.
- //
- // Expected doc counts for new chunks:
- // [ 0, 2 ): 2
- // [ 2, MaxKey): 1
- //
- function(db) {
- var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 2}]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, {x: 2}) == 0,
- tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, {x: MaxKey}) == 0,
- tojson(res.shouldMigrate.max));
- },
- ];
-
- tests.forEach(function(test) {
- // setup
- assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
-
- for (var x = -3; x < 3; x++) {
- testDB.user.insert({x: x});
- }
-
- // run test
- test(st.rs0.getPrimary().getDB('admin'));
-
- // teardown
- testDB.user.drop();
+'use strict';
+
+var st = new ShardingTest({shards: 1});
+
+var testDB = st.s.getDB('test');
+assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+
+var callSplit = function(db, minKey, maxKey, splitPoints) {
+ var res = st.s.adminCommand({getShardVersion: "test.user"});
+ assert.commandWorked(res);
+ var shardVersion = [res.version, res.versionEpoch];
+ return db.runCommand({
+ splitChunk: 'test.user',
+ from: st.shard0.shardName,
+ min: minKey,
+ max: maxKey,
+ keyPattern: {x: 1},
+ splitKeys: splitPoints,
+ epoch: res.versionEpoch,
});
-
- st.stop();
-
+};
+
+var tests = [
+ //
+ // Lower extreme chunk tests.
+ //
+
+ // All chunks have 1 doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ MinKey, -2 ): 1
+ // [ -2, -1 ): 1
+ // [ -1, 0): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -2}, {x: -1}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: MinKey}) == 0,
+ tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: -2}) == 0, tojson(res.shouldMigrate.max));
+ },
+
+ // One chunk has single doc, extreme doesn't.
+ //
+ // Expected doc counts for new chunks:
+ // [ MinKey, -1 ): 2
+ // [ -1, 0): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -1}]);
+ assert.commandWorked(res);
+ assert.eq(res.shouldMigrate, null, tojson(res));
+ },
+
+ // Only extreme has single doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ MinKey, -2 ): 1
+ // [ -2, 0): 2
+ //
+ function(db) {
+ var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -2}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: MinKey}) == 0,
+ tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: -2}) == 0, tojson(res.shouldMigrate.max));
+ },
+
+ //
+ // Upper extreme chunk tests.
+ //
+
+ // All chunks have 1 doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ 0, 1 ): 1
+ // [ 1, 2 ): 1
+ // [ 2, MaxKey): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 1}, {x: 2}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: 2}) == 0, tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: MaxKey}) == 0,
+ tojson(res.shouldMigrate.max));
+ },
+
+ // One chunk has single doc, extreme doesn't.
+ //
+ // Expected doc counts for new chunks:
+ // [ 0, 1 ): 1
+ // [ 1, MaxKey): 2
+ //
+ function(db) {
+ var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 1}]);
+ assert.commandWorked(res);
+ assert.eq(res.shouldMigrate, null, tojson(res));
+ },
+
+ // Only extreme has single doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ 0, 2 ): 2
+ // [ 2, MaxKey): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 2}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: 2}) == 0, tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: MaxKey}) == 0,
+ tojson(res.shouldMigrate.max));
+ },
+];
+
+tests.forEach(function(test) {
+ // setup
+ assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
+
+ for (var x = -3; x < 3; x++) {
+ testDB.user.insert({x: x});
+ }
+
+ // run test
+ test(st.rs0.getPrimary().getDB('admin'));
+
+ // teardown
+ testDB.user.drop();
+});
+
+st.stop();
})();
diff --git a/jstests/sharding/trace_missing_docs_test.js b/jstests/sharding/trace_missing_docs_test.js
index b09003617d4..329ad529ac3 100644
--- a/jstests/sharding/trace_missing_docs_test.js
+++ b/jstests/sharding/trace_missing_docs_test.js
@@ -2,45 +2,45 @@
load('jstests/libs/trace_missing_docs.js');
(function() {
- 'use strict';
+'use strict';
- var testDocMissing = function(useReplicaSet) {
- var options = {
- rs: useReplicaSet,
- shardOptions: {oplogSize: 10},
- rsOptions: {nodes: 1, oplogSize: 10}
- };
+var testDocMissing = function(useReplicaSet) {
+ var options = {
+ rs: useReplicaSet,
+ shardOptions: {oplogSize: 10},
+ rsOptions: {nodes: 1, oplogSize: 10}
+ };
- var st = new ShardingTest({shards: 2, mongos: 1, other: options});
+ var st = new ShardingTest({shards: 2, mongos: 1, other: options});
- var mongos = st.s0;
- var coll = mongos.getCollection("foo.bar");
- var admin = mongos.getDB("admin");
+ var mongos = st.s0;
+ var coll = mongos.getCollection("foo.bar");
+ var admin = mongos.getDB("admin");
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- coll.ensureIndex({sk: 1});
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {sk: 1}}));
+ coll.ensureIndex({sk: 1});
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {sk: 1}}));
- assert.writeOK(coll.insert({_id: 12345, sk: 67890, hello: "world"}));
- assert.writeOK(coll.update({_id: 12345}, {$set: {baz: 'biz'}}));
- assert.writeOK(coll.update({sk: 67890}, {$set: {baz: 'boz'}}));
+ assert.writeOK(coll.insert({_id: 12345, sk: 67890, hello: "world"}));
+ assert.writeOK(coll.update({_id: 12345}, {$set: {baz: 'biz'}}));
+ assert.writeOK(coll.update({sk: 67890}, {$set: {baz: 'boz'}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: coll + "", find: {sk: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: coll + "", find: {sk: 0}, to: st.shard1.shardName, _waitForDelete: true}));
- st.printShardingStatus();
+ st.printShardingStatus();
- var ops = traceMissingDoc(coll, {_id: 12345, sk: 67890});
+ var ops = traceMissingDoc(coll, {_id: 12345, sk: 67890});
- assert.eq(ops[0].op, 'i');
- assert.eq(ops.length, 5);
+ assert.eq(ops[0].op, 'i');
+ assert.eq(ops.length, 5);
- jsTest.log("DONE! (using rs)");
+ jsTest.log("DONE! (using rs)");
- st.stop();
- };
+ st.stop();
+};
- testDocMissing(true);
+testDocMissing(true);
})();
diff --git a/jstests/sharding/transactions_causal_consistency.js b/jstests/sharding/transactions_causal_consistency.js
index 5ab2c8e9aba..e2f6a9aed58 100644
--- a/jstests/sharding/transactions_causal_consistency.js
+++ b/jstests/sharding/transactions_causal_consistency.js
@@ -7,78 +7,77 @@
// uses_transactions,
// ]
(function() {
- "use strict";
+"use strict";
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
- const st = new ShardingTest({shards: 2, mongos: 2});
+const st = new ShardingTest({shards: 2, mongos: 2});
- enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- // Set up a sharded collection with 2 chunks, [min, 0) and [0, max), one on each shard, with one
- // document in each.
+// Set up a sharded collection with 2 chunks, [min, 0) and [0, max), one on each shard, with one
+// document in each.
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
+
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+
+// Verifies transactions using causal consistency read all causally prior operations.
+function runTest(st, readConcern) {
+ jsTestLog("Testing readConcern: " + tojson(readConcern));
+
+ const session = st.s.startSession({causalConsistency: true});
+ const sessionDB = session.getDatabase(dbName);
+
+ // Insert data to one shard in a causally consistent session.
+ const docToInsert = {_id: 5};
+ assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [docToInsert]}));
+
+ // Through a separate router move the chunk that was inserted to, so the original router is
+ // stale when it starts its transaction.
+ const otherRouter = st.s1;
assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
-
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
-
- // Verifies transactions using causal consistency read all causally prior operations.
- function runTest(st, readConcern) {
- jsTestLog("Testing readConcern: " + tojson(readConcern));
-
- const session = st.s.startSession({causalConsistency: true});
- const sessionDB = session.getDatabase(dbName);
-
- // Insert data to one shard in a causally consistent session.
- const docToInsert = {_id: 5};
- assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [docToInsert]}));
-
- // Through a separate router move the chunk that was inserted to, so the original router is
- // stale when it starts its transaction.
- const otherRouter = st.s1;
- assert.commandWorked(
- otherRouter.adminCommand({moveChunk: ns, find: docToInsert, to: st.shard0.shardName}));
-
- session.startTransaction({readConcern: readConcern});
-
- // The transaction should always see the document written earlier through its session,
- // regardless of the move.
- //
- // Note: until transactions can read from secondaries and/or disabling speculative snapshot
- // is allowed, read concerns that do not require global snapshots (i.e. local and majority)
- // will always read the inserted document here because the local snapshot established on
- // this shard will include all currently applied operations, which must include all earlier
- // acknowledged writes.
- assert.docEq(docToInsert,
- sessionDB[collName].findOne(docToInsert),
- "sharded transaction with read concern " + tojson(readConcern) +
- " did not see expected document");
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Clean up for the next iteration.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: docToInsert, to: st.shard1.shardName}));
- assert.writeOK(sessionDB[collName].remove(docToInsert));
- }
-
- const kAllowedReadConcernLevels = ["local", "majority", "snapshot"];
- for (let readConcernLevel of kAllowedReadConcernLevels) {
- runTest(st, {level: readConcernLevel});
- }
-
- disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
-
- st.stop();
+ otherRouter.adminCommand({moveChunk: ns, find: docToInsert, to: st.shard0.shardName}));
+
+ session.startTransaction({readConcern: readConcern});
+
+ // The transaction should always see the document written earlier through its session,
+ // regardless of the move.
+ //
+ // Note: until transactions can read from secondaries and/or disabling speculative snapshot
+ // is allowed, read concerns that do not require global snapshots (i.e. local and majority)
+ // will always read the inserted document here because the local snapshot established on
+ // this shard will include all currently applied operations, which must include all earlier
+ // acknowledged writes.
+ assert.docEq(docToInsert,
+ sessionDB[collName].findOne(docToInsert),
+ "sharded transaction with read concern " + tojson(readConcern) +
+ " did not see expected document");
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // Clean up for the next iteration.
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: docToInsert, to: st.shard1.shardName}));
+ assert.writeOK(sessionDB[collName].remove(docToInsert));
+}
+
+const kAllowedReadConcernLevels = ["local", "majority", "snapshot"];
+for (let readConcernLevel of kAllowedReadConcernLevels) {
+ runTest(st, {level: readConcernLevel});
+}
+
+disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+
+st.stop();
})();
diff --git a/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js b/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js
index ffb7d9dde37..11a2c39997f 100644
--- a/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js
+++ b/jstests/sharding/transactions_distinct_not_allowed_on_sharded_collections.js
@@ -5,47 +5,45 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
- const st = new ShardingTest({shards: 1});
+const st = new ShardingTest({shards: 1});
- // Set up a sharded and unsharded collection, each with one document.
+// Set up a sharded and unsharded collection, each with one document.
- const unshardedDbName = "unsharded_db";
- const unshardedCollName = "unsharded_coll";
+const unshardedDbName = "unsharded_db";
+const unshardedCollName = "unsharded_coll";
- const shardedDbName = "sharded_db";
- const shardedCollName = "sharded_coll";
- const shardedNs = shardedDbName + "." + shardedCollName;
+const shardedDbName = "sharded_db";
+const shardedCollName = "sharded_coll";
+const shardedNs = shardedDbName + "." + shardedCollName;
- assert.commandWorked(st.s.adminCommand({enableSharding: shardedDbName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: shardedNs, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({enableSharding: shardedDbName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: shardedNs, key: {_id: 1}}));
- const session = st.s.startSession();
- const unshardedCollDB = session.getDatabase(unshardedDbName);
- const shardedCollDB = session.getDatabase(shardedDbName);
+const session = st.s.startSession();
+const unshardedCollDB = session.getDatabase(unshardedDbName);
+const shardedCollDB = session.getDatabase(shardedDbName);
- assert.writeOK(unshardedCollDB[unshardedCollName].insert({_id: "jack"}));
- assert.writeOK(shardedCollDB[shardedCollName].insert({_id: "jack"}));
+assert.writeOK(unshardedCollDB[unshardedCollName].insert({_id: "jack"}));
+assert.writeOK(shardedCollDB[shardedCollName].insert({_id: "jack"}));
- // Reload metadata to avoid stale config or stale database version errors.
- flushRoutersAndRefreshShardMetadata(st, {ns: shardedNs, dbNames: [unshardedDbName]});
+// Reload metadata to avoid stale config or stale database version errors.
+flushRoutersAndRefreshShardMetadata(st, {ns: shardedNs, dbNames: [unshardedDbName]});
- // Can run distinct on an unsharded collection.
- session.startTransaction();
- assert.eq(unshardedCollDB.runCommand({distinct: unshardedCollName, key: "_id"}).values,
- ["jack"]);
- assert.commandWorked(session.commitTransaction_forTesting());
+// Can run distinct on an unsharded collection.
+session.startTransaction();
+assert.eq(unshardedCollDB.runCommand({distinct: unshardedCollName, key: "_id"}).values, ["jack"]);
+assert.commandWorked(session.commitTransaction_forTesting());
- // Cannot run distinct on a sharded collection.
- session.startTransaction();
- assert.commandFailedWithCode(shardedCollDB.runCommand({distinct: shardedCollName, key: "_id"}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// Cannot run distinct on a sharded collection.
+session.startTransaction();
+assert.commandFailedWithCode(shardedCollDB.runCommand({distinct: shardedCollName, key: "_id"}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- session.endSession();
- st.stop();
+session.endSession();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_error_labels.js b/jstests/sharding/transactions_error_labels.js
index 755f2120167..f295dddf588 100644
--- a/jstests/sharding/transactions_error_labels.js
+++ b/jstests/sharding/transactions_error_labels.js
@@ -1,198 +1,195 @@
// Test TransientTransactionErrors error label in mongos write commands.
// @tags: [uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
-
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- const failCommandWithError = function(rst, {commandToFail, errorCode, closeConnection}) {
- rst.nodes.forEach(function(node) {
- assert.commandWorked(node.getDB("admin").runCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- closeConnection: closeConnection,
- errorCode: errorCode,
- failCommands: [commandToFail],
- failInternalCommands: true // mongod sees mongos as an internal client
- }
- }));
- });
- };
-
- const failCommandWithWriteConcernError = function(rst, commandToFail) {
- rst.nodes.forEach(function(node) {
- assert.commandWorked(node.getDB("admin").runCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- writeConcernError: {code: NumberInt(12345), errmsg: "dummy"},
- failCommands: [commandToFail],
- failInternalCommands: true // mongod sees mongos as an internal client
- }
- }));
- });
- };
-
- const turnOffFailCommand = function(rst) {
- rst.nodes.forEach(function(node) {
- assert.commandWorked(
- node.getDB("admin").runCommand({configureFailPoint: "failCommand", mode: "off"}));
- });
- };
-
- let numCalls = 0;
- const startTransaction = function(mongosSession, dbName, collName) {
- numCalls++;
- mongosSession.startTransaction();
- return mongosSession.getDatabase(dbName).runCommand({
- insert: collName,
- // Target both chunks, wherever they may be
- documents: [{_id: -1 * numCalls}, {_id: numCalls}],
- readConcern: {level: "snapshot"},
- });
- };
-
- const abortTransactionDirectlyOnParticipant = function(rst, lsid, txnNumber) {
- assert.commandWorked(rst.getPrimary().adminCommand({
- abortTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
+"use strict";
+
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+const failCommandWithError = function(rst, {commandToFail, errorCode, closeConnection}) {
+ rst.nodes.forEach(function(node) {
+ assert.commandWorked(node.getDB("admin").runCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {
+ closeConnection: closeConnection,
+ errorCode: errorCode,
+ failCommands: [commandToFail],
+ failInternalCommands: true // mongod sees mongos as an internal client
+ }
}));
- };
-
- const commitTransaction = function(mongosSession) {
- let res = mongosSession.commitTransaction_forTesting();
- print("commitTransaction response from mongos: " + tojson(res));
- return res;
- };
-
- const checkMongosResponse = function(
- res, expectedErrorCode, expectedErrorLabel, writeConcernErrorExpected) {
- if (expectedErrorCode) {
- assert.eq(0, res.ok, tojson(res));
- assert.eq(expectedErrorCode, res.code, tojson(res));
- } else {
- assert.eq(1, res.ok, tojson(res));
- }
-
- if (expectedErrorLabel) {
- assert.neq(null, res.errorLabels, tojson(res));
- assert.contains(expectedErrorLabel, res.errorLabels, tojson(res));
- } else {
- assert.eq(null, res.errorLabels, tojson(res));
- }
-
- if (writeConcernErrorExpected) {
- assert.neq(null, res.writeConcernError, tojson(res));
- } else {
- assert.eq(null, res.writeConcernError, tojson(res));
- }
- };
-
- const runCommitTests = function(commandSentToShard) {
- jsTest.log("Mongos does not attach any error label if " + commandSentToShard +
- " returns success.");
- assert.commandWorked(startTransaction(mongosSession, dbName, collName));
- res = mongosSession.commitTransaction_forTesting();
- checkMongosResponse(res, null, null, null);
-
- jsTest.log("Mongos does not attach any error label if " + commandSentToShard +
- " returns success with writeConcern error.");
- failCommandWithWriteConcernError(st.rs0, commandSentToShard);
- assert.commandWorked(startTransaction(mongosSession, dbName, collName));
- res = mongosSession.commitTransaction_forTesting();
- checkMongosResponse(res, null, null, true);
- turnOffFailCommand(st.rs0);
-
- jsTest.log("Mongos attaches 'TransientTransactionError' label if " + commandSentToShard +
- " returns NoSuchTransaction.");
- assert.commandWorked(startTransaction(mongosSession, dbName, collName));
- abortTransactionDirectlyOnParticipant(
- st.rs0, mongosSession.getSessionId(), mongosSession.getTxnNumber_forTesting());
- res = mongosSession.commitTransaction_forTesting();
- checkMongosResponse(res, ErrorCodes.NoSuchTransaction, "TransientTransactionError", null);
- turnOffFailCommand(st.rs0);
-
- jsTest.log("Mongos does not attach any error label if " + commandSentToShard +
- " returns NoSuchTransaction with writeConcern error.");
- failCommandWithWriteConcernError(st.rs0, commandSentToShard);
- assert.commandWorked(startTransaction(mongosSession, dbName, collName));
- abortTransactionDirectlyOnParticipant(
- st.rs0, mongosSession.getSessionId(), mongosSession.getTxnNumber_forTesting());
- res = mongosSession.commitTransaction_forTesting();
- checkMongosResponse(res, ErrorCodes.NoSuchTransaction, null, true);
- turnOffFailCommand(st.rs0);
-
- jsTest.log("No error label for network error if " + commandSentToShard +
- " returns network error");
- assert.commandWorked(startTransaction(mongosSession, dbName, collName));
- failCommandWithError(st.rs0, {
- commandToFail: commandSentToShard,
- errorCode: ErrorCodes.InternalError,
- closeConnection: true
- });
- res = mongosSession.commitTransaction_forTesting();
- checkMongosResponse(res, ErrorCodes.HostUnreachable, false /* expectedErrorLabel */, null);
- turnOffFailCommand(st.rs0);
- };
-
- let st = new ShardingTest({shards: 2, config: 1, mongosOptions: {verbose: 3}});
-
- // Create a sharded collection with a chunk on each shard:
- // shard0: [-inf, 0)
- // shard1: [0, +inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
-
- // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
- // from the shards starting, aborting, and restarting the transaction due to needing to
- // refresh after the transaction has started.
- assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
-
- let mongosSession = st.s.startSession();
- let mongosSessionDB = mongosSession.getDatabase(dbName);
-
- let res;
-
- // write statement
- jsTest.log(
- "'TransientTransactionError' label is attached if write statement returns WriteConflict");
- failCommandWithError(
- st.rs0,
- {commandToFail: "insert", errorCode: ErrorCodes.WriteConflict, closeConnection: false});
- res = startTransaction(mongosSession, dbName, collName);
- checkMongosResponse(res, ErrorCodes.WriteConflict, "TransientTransactionError", null);
- turnOffFailCommand(st.rs0);
- assert.commandFailedWithCode(mongosSession.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // statements prior to commit network error
- failCommandWithError(
- st.rs0,
- {commandToFail: "insert", errorCode: ErrorCodes.InternalError, closeConnection: true});
- res = startTransaction(mongosSession, dbName, collName);
- checkMongosResponse(res, ErrorCodes.HostUnreachable, "TransientTransactionError", null);
+ });
+};
+
+const failCommandWithWriteConcernError = function(rst, commandToFail) {
+ rst.nodes.forEach(function(node) {
+ assert.commandWorked(node.getDB("admin").runCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {
+ writeConcernError: {code: NumberInt(12345), errmsg: "dummy"},
+ failCommands: [commandToFail],
+ failInternalCommands: true // mongod sees mongos as an internal client
+ }
+ }));
+ });
+};
+
+const turnOffFailCommand = function(rst) {
+ rst.nodes.forEach(function(node) {
+ assert.commandWorked(
+ node.getDB("admin").runCommand({configureFailPoint: "failCommand", mode: "off"}));
+ });
+};
+
+let numCalls = 0;
+const startTransaction = function(mongosSession, dbName, collName) {
+ numCalls++;
+ mongosSession.startTransaction();
+ return mongosSession.getDatabase(dbName).runCommand({
+ insert: collName,
+ // Target both chunks, wherever they may be
+ documents: [{_id: -1 * numCalls}, {_id: numCalls}],
+ readConcern: {level: "snapshot"},
+ });
+};
+
+const abortTransactionDirectlyOnParticipant = function(rst, lsid, txnNumber) {
+ assert.commandWorked(rst.getPrimary().adminCommand({
+ abortTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ }));
+};
+
+const commitTransaction = function(mongosSession) {
+ let res = mongosSession.commitTransaction_forTesting();
+ print("commitTransaction response from mongos: " + tojson(res));
+ return res;
+};
+
+const checkMongosResponse = function(
+ res, expectedErrorCode, expectedErrorLabel, writeConcernErrorExpected) {
+ if (expectedErrorCode) {
+ assert.eq(0, res.ok, tojson(res));
+ assert.eq(expectedErrorCode, res.code, tojson(res));
+ } else {
+ assert.eq(1, res.ok, tojson(res));
+ }
+
+ if (expectedErrorLabel) {
+ assert.neq(null, res.errorLabels, tojson(res));
+ assert.contains(expectedErrorLabel, res.errorLabels, tojson(res));
+ } else {
+ assert.eq(null, res.errorLabels, tojson(res));
+ }
+
+ if (writeConcernErrorExpected) {
+ assert.neq(null, res.writeConcernError, tojson(res));
+ } else {
+ assert.eq(null, res.writeConcernError, tojson(res));
+ }
+};
+
+const runCommitTests = function(commandSentToShard) {
+ jsTest.log("Mongos does not attach any error label if " + commandSentToShard +
+ " returns success.");
+ assert.commandWorked(startTransaction(mongosSession, dbName, collName));
+ res = mongosSession.commitTransaction_forTesting();
+ checkMongosResponse(res, null, null, null);
+
+ jsTest.log("Mongos does not attach any error label if " + commandSentToShard +
+ " returns success with writeConcern error.");
+ failCommandWithWriteConcernError(st.rs0, commandSentToShard);
+ assert.commandWorked(startTransaction(mongosSession, dbName, collName));
+ res = mongosSession.commitTransaction_forTesting();
+ checkMongosResponse(res, null, null, true);
turnOffFailCommand(st.rs0);
- assert.commandFailedWithCode(mongosSession.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- // commitTransaction for single-shard transaction (mongos sends commitTransaction)
- runCommitTests("commitTransaction");
+ jsTest.log("Mongos attaches 'TransientTransactionError' label if " + commandSentToShard +
+ " returns NoSuchTransaction.");
+ assert.commandWorked(startTransaction(mongosSession, dbName, collName));
+ abortTransactionDirectlyOnParticipant(
+ st.rs0, mongosSession.getSessionId(), mongosSession.getTxnNumber_forTesting());
+ res = mongosSession.commitTransaction_forTesting();
+ checkMongosResponse(res, ErrorCodes.NoSuchTransaction, "TransientTransactionError", null);
+ turnOffFailCommand(st.rs0);
- // commitTransaction for multi-shard transaction (mongos sends coordinateCommitTransaction)
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
- flushRoutersAndRefreshShardMetadata(st, {ns});
- runCommitTests("coordinateCommitTransaction");
+ jsTest.log("Mongos does not attach any error label if " + commandSentToShard +
+ " returns NoSuchTransaction with writeConcern error.");
+ failCommandWithWriteConcernError(st.rs0, commandSentToShard);
+ assert.commandWorked(startTransaction(mongosSession, dbName, collName));
+ abortTransactionDirectlyOnParticipant(
+ st.rs0, mongosSession.getSessionId(), mongosSession.getTxnNumber_forTesting());
+ res = mongosSession.commitTransaction_forTesting();
+ checkMongosResponse(res, ErrorCodes.NoSuchTransaction, null, true);
+ turnOffFailCommand(st.rs0);
- st.stop();
+ jsTest.log("No error label for network error if " + commandSentToShard +
+ " returns network error");
+ assert.commandWorked(startTransaction(mongosSession, dbName, collName));
+ failCommandWithError(st.rs0, {
+ commandToFail: commandSentToShard,
+ errorCode: ErrorCodes.InternalError,
+ closeConnection: true
+ });
+ res = mongosSession.commitTransaction_forTesting();
+ checkMongosResponse(res, ErrorCodes.HostUnreachable, false /* expectedErrorLabel */, null);
+ turnOffFailCommand(st.rs0);
+};
+
+let st = new ShardingTest({shards: 2, config: 1, mongosOptions: {verbose: 3}});
+
+// Create a sharded collection with a chunk on each shard:
+// shard0: [-inf, 0)
+// shard1: [0, +inf)
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+
+// These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
+// from the shards starting, aborting, and restarting the transaction due to needing to
+// refresh after the transaction has started.
+assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+assert.commandWorked(st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+
+let mongosSession = st.s.startSession();
+let mongosSessionDB = mongosSession.getDatabase(dbName);
+
+let res;
+
+// write statement
+jsTest.log(
+ "'TransientTransactionError' label is attached if write statement returns WriteConflict");
+failCommandWithError(
+ st.rs0, {commandToFail: "insert", errorCode: ErrorCodes.WriteConflict, closeConnection: false});
+res = startTransaction(mongosSession, dbName, collName);
+checkMongosResponse(res, ErrorCodes.WriteConflict, "TransientTransactionError", null);
+turnOffFailCommand(st.rs0);
+assert.commandFailedWithCode(mongosSession.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+// statements prior to commit network error
+failCommandWithError(
+ st.rs0, {commandToFail: "insert", errorCode: ErrorCodes.InternalError, closeConnection: true});
+res = startTransaction(mongosSession, dbName, collName);
+checkMongosResponse(res, ErrorCodes.HostUnreachable, "TransientTransactionError", null);
+turnOffFailCommand(st.rs0);
+assert.commandFailedWithCode(mongosSession.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+// commitTransaction for single-shard transaction (mongos sends commitTransaction)
+runCommitTests("commitTransaction");
+
+// commitTransaction for multi-shard transaction (mongos sends coordinateCommitTransaction)
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
+flushRoutersAndRefreshShardMetadata(st, {ns});
+runCommitTests("coordinateCommitTransaction");
+
+st.stop();
}());
diff --git a/jstests/sharding/transactions_expiration.js b/jstests/sharding/transactions_expiration.js
index 65178dd1c30..e337a81632c 100644
--- a/jstests/sharding/transactions_expiration.js
+++ b/jstests/sharding/transactions_expiration.js
@@ -5,75 +5,74 @@
// @tags: [uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
+"use strict";
- let st = new ShardingTest({shards: 2});
+let st = new ShardingTest({shards: 2});
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.name);
- assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(st.s.adminCommand({split: 'test.user', middle: {x: 0}}));
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.name);
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+assert.commandWorked(st.s.adminCommand({split: 'test.user', middle: {x: 0}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.name}));
+
+let lowerTxnTimeout = (conn) => {
assert.commandWorked(
- st.s.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.name}));
-
- let lowerTxnTimeout = (conn) => {
- assert.commandWorked(
- conn.getDB('admin').runCommand({setParameter: 1, transactionLifetimeLimitSeconds: 1}));
- };
-
- st.rs0.nodes.forEach(lowerTxnTimeout);
- st.rs1.nodes.forEach(lowerTxnTimeout);
-
- let testDB = st.s.getDB('test');
-
- // Create the collections in the shards outside the transactions.
- assert.commandWorked(testDB.runCommand(
- {insert: 'user', documents: [{x: -1}, {x: 1}], writeConcern: {w: 'majority'}}));
-
- const session = st.s.startSession();
- const sessionDb = session.getDatabase('test');
-
- let txnNumber = 0;
-
- assert.commandWorked(sessionDb.runCommand({
- insert: 'user',
- documents: [{x: -10}, {x: 10}],
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false,
- }));
-
- // We can deterministically wait for the transaction to be aborted by waiting for currentOp
- // to cease reporting the inactive transaction: the transaction should disappear from the
- // currentOp results once aborted.
- assert.soon(
- function() {
- const sessionFilter = {
- active: false,
- opid: {$exists: false},
- desc: "inactive transaction",
- "transaction.parameters.txnNumber": NumberLong(txnNumber),
- "lsid.id": session.getSessionId().id
- };
-
- const priConn = st.rs0.getPrimary();
- const res = priConn.getDB("admin").aggregate(
- [{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter}]);
-
- return (res.itcount() == 0);
- },
- "currentOp reports that the idle transaction still exists, it has not been " +
- "aborted as expected.");
-
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: 'user',
- documents: [{x: -100}, {x: 100}],
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
-
- session.endSession();
-
- st.stop();
+ conn.getDB('admin').runCommand({setParameter: 1, transactionLifetimeLimitSeconds: 1}));
+};
+
+st.rs0.nodes.forEach(lowerTxnTimeout);
+st.rs1.nodes.forEach(lowerTxnTimeout);
+
+let testDB = st.s.getDB('test');
+
+// Create the collections in the shards outside the transactions.
+assert.commandWorked(testDB.runCommand(
+ {insert: 'user', documents: [{x: -1}, {x: 1}], writeConcern: {w: 'majority'}}));
+
+const session = st.s.startSession();
+const sessionDb = session.getDatabase('test');
+
+let txnNumber = 0;
+
+assert.commandWorked(sessionDb.runCommand({
+ insert: 'user',
+ documents: [{x: -10}, {x: 10}],
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false,
+}));
+
+// We can deterministically wait for the transaction to be aborted by waiting for currentOp
+// to cease reporting the inactive transaction: the transaction should disappear from the
+// currentOp results once aborted.
+assert.soon(
+ function() {
+ const sessionFilter = {
+ active: false,
+ opid: {$exists: false},
+ desc: "inactive transaction",
+ "transaction.parameters.txnNumber": NumberLong(txnNumber),
+ "lsid.id": session.getSessionId().id
+ };
+
+ const priConn = st.rs0.getPrimary();
+ const res = priConn.getDB("admin").aggregate(
+ [{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter}]);
+
+ return (res.itcount() == 0);
+ },
+ "currentOp reports that the idle transaction still exists, it has not been " +
+ "aborted as expected.");
+
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: 'user',
+ documents: [{x: -100}, {x: 100}],
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+}),
+ ErrorCodes.NoSuchTransaction);
+
+session.endSession();
+
+st.stop();
}());
diff --git a/jstests/sharding/transactions_implicit_abort.js b/jstests/sharding/transactions_implicit_abort.js
index a965c7bec87..003b6e4cefe 100644
--- a/jstests/sharding/transactions_implicit_abort.js
+++ b/jstests/sharding/transactions_implicit_abort.js
@@ -3,65 +3,58 @@
//
// @tags: [requires_sharding, uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
+"use strict";
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
+const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
- // Set up a sharded collection with one chunk on each shard.
+// Set up a sharded collection with one chunk on each shard.
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
+const session = st.s.startSession();
+const sessionDB = session.getDatabase(dbName);
- //
- // An unhandled error during a transaction should try to abort it on all participants.
- //
+//
+// An unhandled error during a transaction should try to abort it on all participants.
+//
- session.startTransaction();
+session.startTransaction();
- // Targets Shard0 successfully.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: -1}}));
+// Targets Shard0 successfully.
+assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: -1}}));
- // Sharding tests require failInternalCommands: true, since the mongos appears to mongod to be
- // an internal client.
- assert.commandWorked(st.rs1.getPrimary().adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- errorCode: ErrorCodes.InternalError,
- failCommands: ["find"],
- failInternalCommands: true
- }
- }));
+// Sharding tests require failInternalCommands: true, since the mongos appears to mongod to be
+// an internal client.
+assert.commandWorked(st.rs1.getPrimary().adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.InternalError, failCommands: ["find"], failInternalCommands: true}
+}));
- // Targets Shard1 and encounters a transaction fatal error.
- assert.commandFailedWithCode(sessionDB.runCommand({find: collName, filter: {_id: 1}}),
- ErrorCodes.InternalError);
+// Targets Shard1 and encounters a transaction fatal error.
+assert.commandFailedWithCode(sessionDB.runCommand({find: collName, filter: {_id: 1}}),
+ ErrorCodes.InternalError);
- assert.commandWorked(
- st.rs1.getPrimary().adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+assert.commandWorked(
+ st.rs1.getPrimary().adminCommand({configureFailPoint: "failCommand", mode: "off"}));
- // The transaction should have been aborted on both shards.
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// The transaction should have been aborted on both shards.
+assertNoSuchTransactionOnAllShards(st, session.getSessionId(), session.getTxnNumber_forTesting());
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_multi_writes.js b/jstests/sharding/transactions_multi_writes.js
index 403ab4dcc8b..e4c8b43cd95 100644
--- a/jstests/sharding/transactions_multi_writes.js
+++ b/jstests/sharding/transactions_multi_writes.js
@@ -6,145 +6,143 @@
// uses_transactions,
// ]
(function() {
- "use strict";
-
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- const st = new ShardingTest({shards: 3, config: 1, mongos: 2});
-
- enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
-
- // Set up a sharded collection with 3 chunks, [min, 0), [0, 10), [10, max), one on each shard,
- // with one document in each.
-
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
-
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {skey: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {skey: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {skey: 10}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {skey: 5}, to: st.shard1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {skey: 15}, to: st.shard2.shardName}));
-
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 3, counter: 0, skey: 15}));
-
- // Runs the given multi-write and asserts a manually inserted orphan document is not affected.
- // The write is assumed to target chunks [min, 0) and [0, 10), which begin on shard0 and shard1,
- // respectively.
- function runTest(st, session, writeCmd, staleRouter) {
- const isUpdate = writeCmd.hasOwnProperty("update");
- const sessionDB = session.getDatabase(dbName);
-
- let orphanShardName;
- let orphanDoc = {_id: 2, counter: 0, skey: 5};
- if (staleRouter) {
- // Using a separate router, move a chunk that will be targeted by the write to a shard
- // that would not be targeted by a stale router. Put the orphan on the shard that
- // previously owned the chunk to verify the multi-write obeys the shard versioning
- // protocol.
- assert.commandWorked(st.s1.adminCommand(
- {moveChunk: ns, find: {skey: 5}, to: st.shard2.shardName, _waitForDelete: true}));
- orphanShardName = "rs1";
- } else {
- // Otherwise put the orphan on a shard that should not be targeted by a fresh router to
- // verify the multi-write is not broadcast to all shards.
- orphanShardName = "rs2";
- }
-
- const orphanShardDB = st[orphanShardName].getPrimary().getDB(dbName);
- assert.writeOK(orphanShardDB[collName].insert(orphanDoc, {writeConcern: {w: "majority"}}));
-
- // Start a transaction with majority read concern to ensure the orphan will be visible if
- // its shard is targeted and send the multi-write.
- session.startTransaction({readConcern: {level: "majority"}});
- assert.commandWorked(sessionDB.runCommand(writeCmd));
-
- // The write shouldn't be visible until the transaction commits.
- assert.sameMembers(st.getDB(dbName)[collName].find().toArray(), [
- {_id: 1, counter: 0, skey: -5},
- {_id: 2, counter: 0, skey: 5},
- {_id: 3, counter: 0, skey: 15}
- ]);
-
- // Commit the transaction and verify the write was successful.
- assert.commandWorked(session.commitTransaction_forTesting());
- if (isUpdate) {
- assert.sameMembers(st.getDB(dbName)[collName].find().toArray(),
- [
- {_id: 1, counter: 1, skey: -5},
- {_id: 2, counter: 1, skey: 5},
- {_id: 3, counter: 0, skey: 15}
- ],
- "document mismatch for update, stale: " + staleRouter + ", cmd: " +
- tojson(writeCmd));
- } else { // isDelete
- assert.sameMembers(st.getDB(dbName)[collName].find().toArray(),
- [{_id: 3, counter: 0, skey: 15}],
- "document mismatch for delete, stale: " + staleRouter + ", cmd: " +
- tojson(writeCmd));
- }
-
- // The orphaned document should not have been affected.
- assert.docEq(orphanDoc,
- orphanShardDB[collName].findOne({skey: orphanDoc.skey}),
- "document mismatch for orphaned doc, stale: " + staleRouter + ", cmd: " +
- tojson(writeCmd));
-
- // Reset the database state for the next iteration.
- if (isUpdate) {
- assert.writeOK(sessionDB[collName].update({}, {$set: {counter: 0}}, {multi: true}));
- } else { // isDelete
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
- }
-
- assert.writeOK(orphanShardDB[collName].remove({skey: orphanDoc.skey}));
-
- if (staleRouter) {
- // Move the chunk back with the main router so it isn't stale.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: ns, find: {skey: 5}, to: st.shard1.shardName, _waitForDelete: true}));
- }
+"use strict";
+
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+const st = new ShardingTest({shards: 3, config: 1, mongos: 2});
+
+enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+
+// Set up a sharded collection with 3 chunks, [min, 0), [0, 10), [10, max), one on each shard,
+// with one document in each.
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {skey: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {skey: 0}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {skey: 10}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {skey: 5}, to: st.shard1.shardName}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {skey: 15}, to: st.shard2.shardName}));
+
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 3, counter: 0, skey: 15}));
+
+// Runs the given multi-write and asserts a manually inserted orphan document is not affected.
+// The write is assumed to target chunks [min, 0) and [0, 10), which begin on shard0 and shard1,
+// respectively.
+function runTest(st, session, writeCmd, staleRouter) {
+ const isUpdate = writeCmd.hasOwnProperty("update");
+ const sessionDB = session.getDatabase(dbName);
+
+ let orphanShardName;
+ let orphanDoc = {_id: 2, counter: 0, skey: 5};
+ if (staleRouter) {
+ // Using a separate router, move a chunk that will be targeted by the write to a shard
+ // that would not be targeted by a stale router. Put the orphan on the shard that
+ // previously owned the chunk to verify the multi-write obeys the shard versioning
+ // protocol.
+ assert.commandWorked(st.s1.adminCommand(
+ {moveChunk: ns, find: {skey: 5}, to: st.shard2.shardName, _waitForDelete: true}));
+ orphanShardName = "rs1";
+ } else {
+ // Otherwise put the orphan on a shard that should not be targeted by a fresh router to
+ // verify the multi-write is not broadcast to all shards.
+ orphanShardName = "rs2";
}
- const session = st.s.startSession();
+ const orphanShardDB = st[orphanShardName].getPrimary().getDB(dbName);
+ assert.writeOK(orphanShardDB[collName].insert(orphanDoc, {writeConcern: {w: "majority"}}));
+
+ // Start a transaction with majority read concern to ensure the orphan will be visible if
+ // its shard is targeted and send the multi-write.
+ session.startTransaction({readConcern: {level: "majority"}});
+ assert.commandWorked(sessionDB.runCommand(writeCmd));
+
+ // The write shouldn't be visible until the transaction commits.
+ assert.sameMembers(st.getDB(dbName)[collName].find().toArray(), [
+ {_id: 1, counter: 0, skey: -5},
+ {_id: 2, counter: 0, skey: 5},
+ {_id: 3, counter: 0, skey: 15}
+ ]);
+
+ // Commit the transaction and verify the write was successful.
+ assert.commandWorked(session.commitTransaction_forTesting());
+ if (isUpdate) {
+ assert.sameMembers(
+ st.getDB(dbName)[collName].find().toArray(),
+ [
+ {_id: 1, counter: 1, skey: -5},
+ {_id: 2, counter: 1, skey: 5},
+ {_id: 3, counter: 0, skey: 15}
+ ],
+ "document mismatch for update, stale: " + staleRouter + ", cmd: " + tojson(writeCmd));
+ } else { // isDelete
+ assert.sameMembers(
+ st.getDB(dbName)[collName].find().toArray(),
+ [{_id: 3, counter: 0, skey: 15}],
+ "document mismatch for delete, stale: " + staleRouter + ", cmd: " + tojson(writeCmd));
+ }
+
+ // The orphaned document should not have been affected.
+ assert.docEq(
+ orphanDoc,
+ orphanShardDB[collName].findOne({skey: orphanDoc.skey}),
+ "document mismatch for orphaned doc, stale: " + staleRouter + ", cmd: " + tojson(writeCmd));
+
+ // Reset the database state for the next iteration.
+ if (isUpdate) {
+ assert.writeOK(sessionDB[collName].update({}, {$set: {counter: 0}}, {multi: true}));
+ } else { // isDelete
+ assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1, counter: 0, skey: -5}));
+ assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 2, counter: 0, skey: 5}));
+ }
+
+ assert.writeOK(orphanShardDB[collName].remove({skey: orphanDoc.skey}));
+
+ if (staleRouter) {
+ // Move the chunk back with the main router so it isn't stale.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: ns, find: {skey: 5}, to: st.shard1.shardName, _waitForDelete: true}));
+ }
+}
+
+const session = st.s.startSession();
- let multiUpdate = {
- update: collName,
- updates: [{q: {skey: {$lte: 5}}, u: {$inc: {counter: 1}}, multi: true}]
- };
+let multiUpdate = {
+ update: collName,
+ updates: [{q: {skey: {$lte: 5}}, u: {$inc: {counter: 1}}, multi: true}]
+};
- multiUpdate.ordered = false;
- runTest(st, session, multiUpdate, false /*staleRouter*/);
- // TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
- // runTest(st, session, multiUpdate, true /*staleRouter*/);
+multiUpdate.ordered = false;
+runTest(st, session, multiUpdate, false /*staleRouter*/);
+// TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
+// runTest(st, session, multiUpdate, true /*staleRouter*/);
- multiUpdate.ordered = true;
- runTest(st, session, multiUpdate, false /*staleRouter*/);
- // TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
- // runTest(st, session, multiUpdate, true /*staleRouter*/);
+multiUpdate.ordered = true;
+runTest(st, session, multiUpdate, false /*staleRouter*/);
+// TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
+// runTest(st, session, multiUpdate, true /*staleRouter*/);
- let multiDelete = {delete: collName, deletes: [{q: {skey: {$lte: 5}}, limit: 0}]};
+let multiDelete = {delete: collName, deletes: [{q: {skey: {$lte: 5}}, limit: 0}]};
- multiDelete.ordered = false;
- runTest(st, session, multiDelete, false /*staleRouter*/);
- // TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
- // runTest(st, session, multiDelete, true /*staleRouter*/);
+multiDelete.ordered = false;
+runTest(st, session, multiDelete, false /*staleRouter*/);
+// TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
+// runTest(st, session, multiDelete, true /*staleRouter*/);
- multiDelete.ordered = true;
- runTest(st, session, multiDelete, false /*staleRouter*/);
- // TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
- // runTest(st, session, multiDelete, true /*staleRouter*/);
+multiDelete.ordered = true;
+runTest(st, session, multiDelete, false /*staleRouter*/);
+// TODO: SERVER-39704 uncomment when mongos can internally retry txn on stale errors for real.
+// runTest(st, session, multiDelete, true /*staleRouter*/);
- disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_read_concerns.js b/jstests/sharding/transactions_read_concerns.js
index 0b01e2a42ec..af2c24b2b02 100644
--- a/jstests/sharding/transactions_read_concerns.js
+++ b/jstests/sharding/transactions_read_concerns.js
@@ -7,77 +7,76 @@
// uses_transactions,
// ]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
- const st = new ShardingTest({shards: 2, config: 1});
+const st = new ShardingTest({shards: 2, config: 1});
- // Set up a sharded collection with 2 chunks, one on each shard.
+// Set up a sharded collection with 2 chunks, one on each shard.
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
- // Refresh second shard to avoid stale shard version error on the second transaction statement.
- assert.commandWorked(st.rs1.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
+// Refresh second shard to avoid stale shard version error on the second transaction statement.
+assert.commandWorked(st.rs1.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
- function runTest(st, readConcern, sessionOptions) {
- jsTestLog("Testing readConcern: " + tojson(readConcern) + ", sessionOptions: " +
- tojson(sessionOptions));
+function runTest(st, readConcern, sessionOptions) {
+ jsTestLog("Testing readConcern: " + tojson(readConcern) +
+ ", sessionOptions: " + tojson(sessionOptions));
- const session = st.s.startSession(sessionOptions);
- const sessionDB = session.getDatabase(dbName);
+ const session = st.s.startSession(sessionOptions);
+ const sessionDB = session.getDatabase(dbName);
- if (readConcern) {
- session.startTransaction({readConcern: readConcern});
- } else {
- session.startTransaction();
- }
-
- // Target only the first shard.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: -1}}));
-
- // On a separate, causally consistent session, read from the first shard then write to the
- // second one. This write is guaranteed to commit at a later cluster time than that of the
- // snapshot established by the transaction on the first shard.
- const otherSessionDB = st.s.startSession().getDatabase(dbName);
- assert.commandWorked(otherSessionDB.runCommand({find: collName}));
- assert.commandWorked(otherSessionDB.runCommand({insert: collName, documents: [{_id: 5}]}));
-
- // Depending on the transaction's read concern, the new document will or will not be visible
- // to the next statement.
- const numExpectedDocs = readConcern && readConcern.level === "snapshot" ? 0 : 1;
- assert.eq(numExpectedDocs,
- sessionDB[collName].find({_id: 5}).itcount(),
- "sharded transaction with read concern " + tojson(readConcern) +
- " did not see expected number of documents, sessionOptions: " +
- tojson(sessionOptions));
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Clean up for the next iteration.
- assert.writeOK(sessionDB[collName].remove({_id: 5}));
- }
-
- // Specifying no read concern level is allowed and should not compute a global snapshot.
- runTest(st, undefined, {causalConsistency: false});
- runTest(st, undefined, {causalConsistency: true});
-
- const kAllowedReadConcernLevels = ["local", "majority", "snapshot"];
- for (let readConcernLevel of kAllowedReadConcernLevels) {
- runTest(st, {level: readConcernLevel}, {causalConsistency: false});
- runTest(st, {level: readConcernLevel}, {causalConsistency: true});
+ if (readConcern) {
+ session.startTransaction({readConcern: readConcern});
+ } else {
+ session.startTransaction();
}
- st.stop();
+ // Target only the first shard.
+ assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: -1}}));
+
+ // On a separate, causally consistent session, read from the first shard then write to the
+ // second one. This write is guaranteed to commit at a later cluster time than that of the
+ // snapshot established by the transaction on the first shard.
+ const otherSessionDB = st.s.startSession().getDatabase(dbName);
+ assert.commandWorked(otherSessionDB.runCommand({find: collName}));
+ assert.commandWorked(otherSessionDB.runCommand({insert: collName, documents: [{_id: 5}]}));
+
+ // Depending on the transaction's read concern, the new document will or will not be visible
+ // to the next statement.
+ const numExpectedDocs = readConcern && readConcern.level === "snapshot" ? 0 : 1;
+ assert.eq(
+ numExpectedDocs,
+ sessionDB[collName].find({_id: 5}).itcount(),
+ "sharded transaction with read concern " + tojson(readConcern) +
+ " did not see expected number of documents, sessionOptions: " + tojson(sessionOptions));
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // Clean up for the next iteration.
+ assert.writeOK(sessionDB[collName].remove({_id: 5}));
+}
+
+// Specifying no read concern level is allowed and should not compute a global snapshot.
+runTest(st, undefined, {causalConsistency: false});
+runTest(st, undefined, {causalConsistency: true});
+
+const kAllowedReadConcernLevels = ["local", "majority", "snapshot"];
+for (let readConcernLevel of kAllowedReadConcernLevels) {
+ runTest(st, {level: readConcernLevel}, {causalConsistency: false});
+ runTest(st, {level: readConcernLevel}, {causalConsistency: true});
+}
+
+st.stop();
})();
diff --git a/jstests/sharding/transactions_reject_writes_for_moved_chunks.js b/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
index d1c5ef5c225..12c7fa1fab3 100644
--- a/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
+++ b/jstests/sharding/transactions_reject_writes_for_moved_chunks.js
@@ -8,161 +8,159 @@
// uses_transactions,
// ]
(function() {
- "use strict";
+"use strict";
- function expectChunks(st, ns, chunks) {
- for (let i = 0; i < chunks.length; i++) {
- assert.eq(chunks[i],
- st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
- "unexpected number of chunks on shard " + i);
- }
+function expectChunks(st, ns, chunks) {
+ for (let i = 0; i < chunks.length; i++) {
+ assert.eq(chunks[i],
+ st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
+ "unexpected number of chunks on shard " + i);
}
+}
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
+
+const st = new ShardingTest({shards: 3, mongos: 1, config: 1});
+
+// Set up one sharded collection with 2 chunks, both on the primary shard.
+
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+
+expectChunks(st, ns, [2, 0, 0]);
+
+// Force a routing table refresh on Shard2, to avoid picking a global read timestamp before the
+// sharding metadata cache collections are created.
+assert.commandWorked(st.rs2.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
+
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
+
+expectChunks(st, ns, [1, 1, 0]);
+
+// The command should target only the second chunk.
+const kCommandTestCases = [
+ {
+ name: "insert",
+ command: {insert: collName, documents: [{_id: 6}]},
+ },
+ {
+ name: "update_query",
+ command: {update: collName, updates: [{q: {_id: 5}, u: {$set: {x: 1}}}]},
+ },
+ {
+ name: "update_replacement",
+ command: {update: collName, updates: [{q: {_id: 5}, u: {_id: 5, x: 1}}]},
+ },
+ {
+ name: "delete",
+ command: {delete: collName, deletes: [{q: {_id: 5}, limit: 1}]},
+ },
+ {
+ name: "findAndModify_update",
+ command: {findAndModify: collName, query: {_id: 5}, update: {$set: {x: 1}}},
+ },
+ {
+ name: "findAndModify_delete",
+ command: {findAndModify: collName, query: {_id: 5}, remove: true},
+ }
+];
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
-
- const st = new ShardingTest({shards: 3, mongos: 1, config: 1});
-
- // Set up one sharded collection with 2 chunks, both on the primary shard.
-
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
-
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
-
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
-
- expectChunks(st, ns, [2, 0, 0]);
+function runTest(testCase, moveChunkBack) {
+ const testCaseName = testCase.name;
+ const cmdTargetChunk2 = testCase.command;
- // Force a routing table refresh on Shard2, to avoid picking a global read timestamp before the
- // sharding metadata cache collections are created.
- assert.commandWorked(st.rs2.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
-
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
+ jsTestLog("Testing " + testCaseName + ", moveChunkBack: " + moveChunkBack);
expectChunks(st, ns, [1, 1, 0]);
- // The command should target only the second chunk.
- const kCommandTestCases = [
- {
- name: "insert",
- command: {insert: collName, documents: [{_id: 6}]},
- },
- {
- name: "update_query",
- command: {update: collName, updates: [{q: {_id: 5}, u: {$set: {x: 1}}}]},
- },
- {
- name: "update_replacement",
- command: {update: collName, updates: [{q: {_id: 5}, u: {_id: 5, x: 1}}]},
- },
- {
- name: "delete",
- command: {delete: collName, deletes: [{q: {_id: 5}, limit: 1}]},
- },
- {
- name: "findAndModify_update",
- command: {findAndModify: collName, query: {_id: 5}, update: {$set: {x: 1}}},
- },
- {
- name: "findAndModify_delete",
- command: {findAndModify: collName, query: {_id: 5}, remove: true},
- }
- ];
-
- function runTest(testCase, moveChunkBack) {
- const testCaseName = testCase.name;
- const cmdTargetChunk2 = testCase.command;
-
- jsTestLog("Testing " + testCaseName + ", moveChunkBack: " + moveChunkBack);
+ const session = st.s.startSession();
+ const sessionDB = session.getDatabase(dbName);
+ const sessionColl = sessionDB[collName];
- expectChunks(st, ns, [1, 1, 0]);
+ session.startTransaction({readConcern: {level: "snapshot"}});
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB[collName];
+ // Start a transaction on Shard0 which will select and pin a global read timestamp.
+ assert.eq(sessionColl.find({_id: -5}).itcount(), 1, "expected to find document in first chunk");
- session.startTransaction({readConcern: {level: "snapshot"}});
+ // Move a chunk from Shard1 to Shard2 outside of the transaction. This will happen at a
+ // later logical time than the transaction's read timestamp.
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
- // Start a transaction on Shard0 which will select and pin a global read timestamp.
- assert.eq(
- sessionColl.find({_id: -5}).itcount(), 1, "expected to find document in first chunk");
+ if (moveChunkBack) {
+ // If the chunk is moved back to the shard that owned it at the transaction's read
+ // timestamp the later write should still be rejected because conflicting operations may
+ // have occurred while the chunk was moved away, which otherwise may not be detected
+ // when the shard prepares the transaction.
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
- // Move a chunk from Shard1 to Shard2 outside of the transaction. This will happen at a
- // later logical time than the transaction's read timestamp.
+ // Flush metadata on the destination shard so the next request doesn't encounter
+ // StaleConfig. The router refreshes after moving a chunk, so it will already be fresh.
assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
+ st.rs1.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
+ }
- if (moveChunkBack) {
- // If the chunk is moved back to the shard that owned it at the transaction's read
- // timestamp the later write should still be rejected because conflicting operations may
- // have occurred while the chunk was moved away, which otherwise may not be detected
- // when the shard prepares the transaction.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
-
- // Flush metadata on the destination shard so the next request doesn't encounter
- // StaleConfig. The router refreshes after moving a chunk, so it will already be fresh.
- assert.commandWorked(
- st.rs1.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
- }
+ // The write should always fail, but the particular error varies.
+ const res = assert.commandFailed(
+ sessionDB.runCommand(cmdTargetChunk2),
+ "expected write to second chunk to fail, case: " + testCaseName +
+ ", cmd: " + tojson(cmdTargetChunk2) + ", moveChunkBack: " + moveChunkBack);
- // The write should always fail, but the particular error varies.
- const res = assert.commandFailed(sessionDB.runCommand(cmdTargetChunk2),
- "expected write to second chunk to fail, case: " +
- testCaseName + ", cmd: " + tojson(cmdTargetChunk2) +
- ", moveChunkBack: " + moveChunkBack);
+ const errMsg = "write to second chunk failed with unexpected error, res: " + tojson(res) +
+ ", case: " + testCaseName + ", cmd: " + tojson(cmdTargetChunk2) +
+ ", moveChunkBack: " + moveChunkBack;
- const errMsg = "write to second chunk failed with unexpected error, res: " + tojson(res) +
- ", case: " + testCaseName + ", cmd: " + tojson(cmdTargetChunk2) + ", moveChunkBack: " +
- moveChunkBack;
+ // On slow hosts, this request can always fail with SnapshotTooOld or StaleChunkHistory if
+ // a migration takes long enough.
+ const expectedCodes = [ErrorCodes.SnapshotTooOld, ErrorCodes.StaleChunkHistory];
- // On slow hosts, this request can always fail with SnapshotTooOld or StaleChunkHistory if
- // a migration takes long enough.
- const expectedCodes = [ErrorCodes.SnapshotTooOld, ErrorCodes.StaleChunkHistory];
+ if (testCaseName === "insert") {
+ // Insert always inserts a new document, so the only typical error is MigrationConflict.
+ expectedCodes.push(ErrorCodes.MigrationConflict);
+ assert.commandFailedWithCode(res, expectedCodes, errMsg);
+ } else {
+ // The other commands modify an existing document so they may also fail with
+ // WriteConflict, depending on when orphaned documents are modified.
- if (testCaseName === "insert") {
- // Insert always inserts a new document, so the only typical error is MigrationConflict.
- expectedCodes.push(ErrorCodes.MigrationConflict);
- assert.commandFailedWithCode(res, expectedCodes, errMsg);
+ if (moveChunkBack) {
+ // Orphans from the first migration must have been deleted before the chunk was
+ // moved back, so the only typical error is WriteConflict.
+ expectedCodes.push(ErrorCodes.WriteConflict);
} else {
- // The other commands modify an existing document so they may also fail with
- // WriteConflict, depending on when orphaned documents are modified.
-
- if (moveChunkBack) {
- // Orphans from the first migration must have been deleted before the chunk was
- // moved back, so the only typical error is WriteConflict.
- expectedCodes.push(ErrorCodes.WriteConflict);
- } else {
- // If the chunk wasn't moved back, the write races with the range deleter. If the
- // range deleter has not run, the write should fail with MigrationConflict,
- // otherwise with WriteConflict, so both codes are acceptable.
- expectedCodes.push(ErrorCodes.WriteConflict, ErrorCodes.MigrationConflict);
- }
- assert.commandFailedWithCode(res, expectedCodes, errMsg);
+ // If the chunk wasn't moved back, the write races with the range deleter. If the
+ // range deleter has not run, the write should fail with MigrationConflict,
+ // otherwise with WriteConflict, so both codes are acceptable.
+ expectedCodes.push(ErrorCodes.WriteConflict, ErrorCodes.MigrationConflict);
}
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
+ assert.commandFailedWithCode(res, expectedCodes, errMsg);
+ }
+ assert.eq(res.errorLabels, ["TransientTransactionError"]);
- // The commit should fail because the earlier write failed.
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+ // The commit should fail because the earlier write failed.
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
- // Move the chunk back to Shard1, if necessary, and reset the database state for the next
- // iteration.
- if (!moveChunkBack) {
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
- }
- assert.writeOK(sessionColl.remove({}));
- assert.writeOK(sessionColl.insert([{_id: 5}, {_id: -5}]));
+ // Move the chunk back to Shard1, if necessary, and reset the database state for the next
+ // iteration.
+ if (!moveChunkBack) {
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
}
+ assert.writeOK(sessionColl.remove({}));
+ assert.writeOK(sessionColl.insert([{_id: 5}, {_id: -5}]));
+}
- kCommandTestCases.forEach(testCase => runTest(testCase, false /*moveChunkBack*/));
- kCommandTestCases.forEach(testCase => runTest(testCase, true /*moveChunkBack*/));
+kCommandTestCases.forEach(testCase => runTest(testCase, false /*moveChunkBack*/));
+kCommandTestCases.forEach(testCase => runTest(testCase, true /*moveChunkBack*/));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_snapshot_errors_first_statement.js b/jstests/sharding/transactions_snapshot_errors_first_statement.js
index 2d915425033..3b0f5f74953 100644
--- a/jstests/sharding/transactions_snapshot_errors_first_statement.js
+++ b/jstests/sharding/transactions_snapshot_errors_first_statement.js
@@ -8,159 +8,157 @@
//
// @tags: [requires_sharding, uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
-
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
-
- const kCommandTestCases = [
- {name: "aggregate", command: {aggregate: collName, pipeline: [], cursor: {}}},
- {name: "distinct", command: {distinct: collName, query: {}, key: "_id"}},
- {name: "find", command: {find: collName}},
- {
- // findAndModify can only target one shard, even in the two shard case.
- name: "findAndModify",
- command: {findAndModify: collName, query: {_id: 1}, update: {$set: {x: 1}}}
- },
- {name: "insert", command: {insert: collName, documents: [{_id: 1}, {_id: 11}]}},
- {
- name: "update",
- command: {
- update: collName,
- updates: [{q: {_id: 1}, u: {$set: {_id: 2}}}, {q: {_id: 11}, u: {$set: {_id: 12}}}]
- }
- },
- {
- name: "delete",
- command:
- {delete: collName, deletes: [{q: {_id: 2}, limit: 1}, {q: {_id: 12}, limit: 1}]}
- },
- // We cannot test killCursors because mongos discards the response from any killCursors
- // requests that may be sent to shards.
- ];
-
- // Verify that all commands that can start a transaction are able to retry on snapshot errors.
- function runTest(st, collName, numShardsToError, errorCode, isSharded) {
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
-
- for (let commandTestCase of kCommandTestCases) {
- const commandName = commandTestCase.name;
- const commandBody = commandTestCase.command;
-
- if (isSharded && commandName === "distinct") {
- // Distinct isn't allowed on sharded collections in a multi-document transaction.
- print("Skipping distinct test case for sharded collection");
- continue;
- }
-
- //
- // Retry on a single error.
- //
-
- setFailCommandOnShards(st, {times: 1}, [commandName], errorCode, numShardsToError);
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionDB.runCommand(commandBody));
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- unsetFailCommandOnEachShard(st, numShardsToError);
-
- // Clean up after insert to avoid duplicate key errors.
- if (commandName === "insert") {
- assert.writeOK(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
- }
-
- //
- // Retry on multiple errors.
- //
-
- setFailCommandOnShards(st, {times: 3}, [commandName], errorCode, numShardsToError);
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionDB.runCommand(commandBody));
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- unsetFailCommandOnEachShard(st, numShardsToError);
-
- // Clean up after insert to avoid duplicate key errors.
- if (commandName === "insert") {
- assert.writeOK(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
- }
-
- //
- // Exhaust retry attempts.
- //
-
- setFailCommandOnShards(st, "alwaysOn", [commandName], errorCode, numShardsToError);
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandFailedWithCode(sessionDB.runCommand(commandBody), errorCode);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
-
- unsetFailCommandOnEachShard(st, numShardsToError);
-
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
-
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+"use strict";
+
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
+
+const kCommandTestCases = [
+ {name: "aggregate", command: {aggregate: collName, pipeline: [], cursor: {}}},
+ {name: "distinct", command: {distinct: collName, query: {}, key: "_id"}},
+ {name: "find", command: {find: collName}},
+ {
+ // findAndModify can only target one shard, even in the two shard case.
+ name: "findAndModify",
+ command: {findAndModify: collName, query: {_id: 1}, update: {$set: {x: 1}}}
+ },
+ {name: "insert", command: {insert: collName, documents: [{_id: 1}, {_id: 11}]}},
+ {
+ name: "update",
+ command: {
+ update: collName,
+ updates: [{q: {_id: 1}, u: {$set: {_id: 2}}}, {q: {_id: 11}, u: {$set: {_id: 12}}}]
+ }
+ },
+ {
+ name: "delete",
+ command: {delete: collName, deletes: [{q: {_id: 2}, limit: 1}, {q: {_id: 12}, limit: 1}]}
+ },
+ // We cannot test killCursors because mongos discards the response from any killCursors
+ // requests that may be sent to shards.
+];
+
+// Verify that all commands that can start a transaction are able to retry on snapshot errors.
+function runTest(st, collName, numShardsToError, errorCode, isSharded) {
+ const session = st.s.startSession();
+ const sessionDB = session.getDatabase(dbName);
+
+ for (let commandTestCase of kCommandTestCases) {
+ const commandName = commandTestCase.name;
+ const commandBody = commandTestCase.command;
+
+ if (isSharded && commandName === "distinct") {
+ // Distinct isn't allowed on sharded collections in a multi-document transaction.
+ print("Skipping distinct test case for sharded collection");
+ continue;
}
- }
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
+ //
+ // Retry on a single error.
+ //
- enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+ setFailCommandOnShards(st, {times: 1}, [commandName], errorCode, numShardsToError);
- jsTestLog("Unsharded transaction");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ assert.commandWorked(sessionDB.runCommand(commandBody));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+ assert.commandWorked(session.commitTransaction_forTesting());
- for (let errorCode of kSnapshotErrors) {
- runTest(st, collName, 1, errorCode, false /* isSharded */);
- }
+ unsetFailCommandOnEachShard(st, numShardsToError);
- // Enable sharding and set up 2 chunks, [minKey, 10), [10, maxKey), each with one document
- // (includes the document already inserted).
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ // Clean up after insert to avoid duplicate key errors.
+ if (commandName === "insert") {
+ assert.writeOK(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
+ }
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
+ //
+ // Retry on multiple errors.
+ //
- jsTestLog("One shard sharded transaction");
+ setFailCommandOnShards(st, {times: 3}, [commandName], errorCode, numShardsToError);
- assert.eq(2, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(0, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ assert.commandWorked(sessionDB.runCommand(commandBody));
- for (let errorCode of kSnapshotErrors) {
- runTest(st, collName, 1, errorCode, true /* isSharded */);
- }
+ assert.commandWorked(session.commitTransaction_forTesting());
- jsTestLog("Two shard sharded transaction");
+ unsetFailCommandOnEachShard(st, numShardsToError);
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 15}, to: st.shard1.shardName}));
- assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+ // Clean up after insert to avoid duplicate key errors.
+ if (commandName === "insert") {
+ assert.writeOK(sessionDB[collName].remove({_id: {$in: [1, 11]}}));
+ }
- for (let errorCode of kSnapshotErrors) {
- runTest(st, collName, 2, errorCode, true /* isSharded */);
- }
+ //
+ // Exhaust retry attempts.
+ //
- // Test only one shard throwing the error when more than one are targeted.
- for (let errorCode of kSnapshotErrors) {
- runTest(st, collName, 1, errorCode, true /* isSharded */);
+ setFailCommandOnShards(st, "alwaysOn", [commandName], errorCode, numShardsToError);
+
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandFailedWithCode(sessionDB.runCommand(commandBody), errorCode);
+ assert.eq(res.errorLabels, ["TransientTransactionError"]);
+
+ unsetFailCommandOnEachShard(st, numShardsToError);
+
+ assertNoSuchTransactionOnAllShards(
+ st, session.getSessionId(), session.getTxnNumber_forTesting());
+
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
}
+}
+
+const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
+
+enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+
+jsTestLog("Unsharded transaction");
+
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+for (let errorCode of kSnapshotErrors) {
+ runTest(st, collName, 1, errorCode, false /* isSharded */);
+}
+
+// Enable sharding and set up 2 chunks, [minKey, 10), [10, maxKey), each with one document
+// (includes the document already inserted).
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
+
+jsTestLog("One shard sharded transaction");
+
+assert.eq(2, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+assert.eq(0, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+
+for (let errorCode of kSnapshotErrors) {
+ runTest(st, collName, 1, errorCode, true /* isSharded */);
+}
+
+jsTestLog("Two shard sharded transaction");
+
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 15}, to: st.shard1.shardName}));
+assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+
+for (let errorCode of kSnapshotErrors) {
+ runTest(st, collName, 2, errorCode, true /* isSharded */);
+}
+
+// Test only one shard throwing the error when more than one are targeted.
+for (let errorCode of kSnapshotErrors) {
+ runTest(st, collName, 1, errorCode, true /* isSharded */);
+}
- disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js b/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js
index 27855d9bc1a..e83ef670708 100644
--- a/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js
+++ b/jstests/sharding/transactions_snapshot_errors_subsequent_statements.js
@@ -7,116 +7,114 @@
//
// @tags: [requires_sharding, uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
-
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
-
- const kCommandTestCases = [
- {name: "aggregate", command: {aggregate: collName, pipeline: [], cursor: {}}},
- {name: "distinct", command: {distinct: collName, query: {}, key: "_id"}},
- {name: "find", command: {find: collName}},
- {
- // findAndModify can only target one shard, even in the two shard case.
- name: "findAndModify",
- command: {findAndModify: collName, query: {_id: 1}, update: {$set: {x: 1}}}
- },
- {name: "insert", command: {insert: collName, documents: [{_id: 1}, {_id: 11}]}},
- {
- name: "update",
- command: {
- update: collName,
- updates: [{q: {_id: 1}, u: {$set: {_id: 2}}}, {q: {_id: 11}, u: {$set: {_id: 12}}}]
- }
- },
- {
- name: "delete",
- command:
- {delete: collName, deletes: [{q: {_id: 2}, limit: 1}, {q: {_id: 12}, limit: 1}]}
- },
- // We cannot test killCursors because mongos discards the response from any killCursors
- // requests that may be sent to shards.
- ];
-
- function runTest(st, collName, errorCode, isSharded) {
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
-
- for (let commandTestCase of kCommandTestCases) {
- const commandName = commandTestCase.name;
- const commandBody = commandTestCase.command;
-
- if (isSharded && commandName === "distinct") {
- // Distinct isn't allowed on sharded collections in a multi-document transaction.
- print("Skipping distinct test case for sharded collections");
- continue;
- }
-
- // Successfully start a transaction on one shard.
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 15}}));
-
- // Verify the command must fail on a snapshot error from a subsequent statement.
- setFailCommandOnShards(st, {times: 1}, [commandName], errorCode, 1);
- const res = assert.commandFailedWithCode(sessionDB.runCommand(commandBody), errorCode);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
-
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+"use strict";
+
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
+
+const kCommandTestCases = [
+ {name: "aggregate", command: {aggregate: collName, pipeline: [], cursor: {}}},
+ {name: "distinct", command: {distinct: collName, query: {}, key: "_id"}},
+ {name: "find", command: {find: collName}},
+ {
+ // findAndModify can only target one shard, even in the two shard case.
+ name: "findAndModify",
+ command: {findAndModify: collName, query: {_id: 1}, update: {$set: {x: 1}}}
+ },
+ {name: "insert", command: {insert: collName, documents: [{_id: 1}, {_id: 11}]}},
+ {
+ name: "update",
+ command: {
+ update: collName,
+ updates: [{q: {_id: 1}, u: {$set: {_id: 2}}}, {q: {_id: 11}, u: {$set: {_id: 12}}}]
}
+ },
+ {
+ name: "delete",
+ command: {delete: collName, deletes: [{q: {_id: 2}, limit: 1}, {q: {_id: 12}, limit: 1}]}
+ },
+ // We cannot test killCursors because mongos discards the response from any killCursors
+ // requests that may be sent to shards.
+];
+
+function runTest(st, collName, errorCode, isSharded) {
+ const session = st.s.startSession();
+ const sessionDB = session.getDatabase(dbName);
+
+ for (let commandTestCase of kCommandTestCases) {
+ const commandName = commandTestCase.name;
+ const commandBody = commandTestCase.command;
+
+ if (isSharded && commandName === "distinct") {
+ // Distinct isn't allowed on sharded collections in a multi-document transaction.
+ print("Skipping distinct test case for sharded collections");
+ continue;
+ }
+
+ // Successfully start a transaction on one shard.
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 15}}));
+
+ // Verify the command must fail on a snapshot error from a subsequent statement.
+ setFailCommandOnShards(st, {times: 1}, [commandName], errorCode, 1);
+ const res = assert.commandFailedWithCode(sessionDB.runCommand(commandBody), errorCode);
+ assert.eq(res.errorLabels, ["TransientTransactionError"]);
+
+ assertNoSuchTransactionOnAllShards(
+ st, session.getSessionId(), session.getTxnNumber_forTesting());
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
}
+}
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
+const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
- enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- jsTestLog("Unsharded transaction");
+jsTestLog("Unsharded transaction");
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- // Single shard case simulates the storage engine discarding an in-use snapshot.
- for (let errorCode of kSnapshotErrors) {
- runTest(st, collName, errorCode, false /* isSharded */);
- }
+// Single shard case simulates the storage engine discarding an in-use snapshot.
+for (let errorCode of kSnapshotErrors) {
+ runTest(st, collName, errorCode, false /* isSharded */);
+}
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- // Set up 2 chunks, [minKey, 10), [10, maxKey), each with one document (includes the document
- // already inserted).
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
+// Set up 2 chunks, [minKey, 10), [10, maxKey), each with one document (includes the document
+// already inserted).
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 15}, {writeConcern: {w: "majority"}}));
- jsTestLog("One shard transaction");
+jsTestLog("One shard transaction");
- assert.eq(2, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(0, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+assert.eq(2, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+assert.eq(0, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
- for (let errorCode of kSnapshotErrors) {
- runTest(st, collName, errorCode, true /* isSharded */);
- }
+for (let errorCode of kSnapshotErrors) {
+ runTest(st, collName, errorCode, true /* isSharded */);
+}
- jsTestLog("Two shard transaction");
+jsTestLog("Two shard transaction");
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 15}, to: st.shard1.shardName}));
- assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
- assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 15}, to: st.shard1.shardName}));
+assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard0.shardName}));
+assert.eq(1, st.s.getDB('config').chunks.count({ns: ns, shard: st.shard1.shardName}));
- // Multi shard case simulates adding a new participant that can no longer support the already
- // chosen read timestamp.
- for (let errorCode of kSnapshotErrors) {
- runTest(st, collName, errorCode, true /* isSharded */);
- }
+// Multi shard case simulates adding a new participant that can no longer support the already
+// chosen read timestamp.
+for (let errorCode of kSnapshotErrors) {
+ runTest(st, collName, errorCode, true /* isSharded */);
+}
- disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_stale_database_version_errors.js b/jstests/sharding/transactions_stale_database_version_errors.js
index 0302162cf3c..01531a3c208 100644
--- a/jstests/sharding/transactions_stale_database_version_errors.js
+++ b/jstests/sharding/transactions_stale_database_version_errors.js
@@ -2,128 +2,123 @@
//
// @tags: [requires_sharding, uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
+"use strict";
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
- const dbName = "test";
- const collName = "foo";
+const dbName = "test";
+const collName = "foo";
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
+const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
- enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- // Set up two unsharded collections in different databases with shard0 as their primary.
+// Set up two unsharded collections in different databases with shard0 as their primary.
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
+const session = st.s.startSession();
+const sessionDB = session.getDatabase(dbName);
- //
- // Stale database version on first overall command should succeed.
- //
+//
+// Stale database version on first overall command should succeed.
+//
- session.startTransaction();
+session.startTransaction();
- // No database versioned requests have been sent to Shard0, so it is stale.
- assert.commandWorked(sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}));
+// No database versioned requests have been sent to Shard0, so it is stale.
+assert.commandWorked(sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- //
- // Stale database version on second command to a shard should fail.
- //
+//
+// Stale database version on second command to a shard should fail.
+//
- st.ensurePrimaryShard(dbName, st.shard1.shardName);
+st.ensurePrimaryShard(dbName, st.shard1.shardName);
- session.startTransaction();
+session.startTransaction();
- // Find is not database versioned so it will not trigger SDV or a refresh on Shard0.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 0}}));
+// Find is not database versioned so it will not trigger SDV or a refresh on Shard0.
+assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 0}}));
- // Distinct is database versioned, so it will trigger SDV. The router will retry and the retry
- // will discover the transaction was aborted, because a previous statement had completed on
- // Shard0.
- let res = assert.commandFailedWithCode(
- sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}),
- ErrorCodes.NoSuchTransaction);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
+// Distinct is database versioned, so it will trigger SDV. The router will retry and the retry
+// will discover the transaction was aborted, because a previous statement had completed on
+// Shard0.
+let res = assert.commandFailedWithCode(
+ sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}),
+ ErrorCodes.NoSuchTransaction);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+assertNoSuchTransactionOnAllShards(st, session.getSessionId(), session.getTxnNumber_forTesting());
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- //
- // Stale database version on first command to a new shard should succeed.
- //
+//
+// Stale database version on first command to a new shard should succeed.
+//
- // Create a new database on Shard0.
- const otherDbName = "other_test";
- const otherCollName = "bar";
+// Create a new database on Shard0.
+const otherDbName = "other_test";
+const otherCollName = "bar";
- assert.writeOK(
- st.s.getDB(otherDbName)[otherCollName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({enableSharding: otherDbName}));
- st.ensurePrimaryShard(otherDbName, st.shard0.shardName);
+assert.writeOK(
+ st.s.getDB(otherDbName)[otherCollName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(st.s.adminCommand({enableSharding: otherDbName}));
+st.ensurePrimaryShard(otherDbName, st.shard0.shardName);
- const sessionOtherDB = session.getDatabase(otherDbName);
+const sessionOtherDB = session.getDatabase(otherDbName);
- // Advance the router's cached last committed opTime for Shard0, so it chooses a read timestamp
- // after the collection is created on shard1, to avoid SnapshotUnavailable.
- assert.commandWorked(
- sessionOtherDB.runCommand({find: otherCollName})); // Not database versioned.
- assert.writeOK(sessionDB[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+// Advance the router's cached last committed opTime for Shard0, so it chooses a read timestamp
+// after the collection is created on shard1, to avoid SnapshotUnavailable.
+assert.commandWorked(sessionOtherDB.runCommand({find: otherCollName})); // Not database versioned.
+assert.writeOK(sessionDB[collName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
- session.startTransaction();
+session.startTransaction();
- // Target the first database which is on Shard1.
- assert.commandWorked(sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}));
+// Target the first database which is on Shard1.
+assert.commandWorked(sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}));
- // Targets the new database on Shard0 which is stale, so a database versioned request should
- // trigger SDV.
- assert.commandWorked(
- sessionOtherDB.runCommand({distinct: otherCollName, key: "_id", query: {_id: 0}}));
+// Targets the new database on Shard0 which is stale, so a database versioned request should
+// trigger SDV.
+assert.commandWorked(
+ sessionOtherDB.runCommand({distinct: otherCollName, key: "_id", query: {_id: 0}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- //
- // The final StaleDbVersion error should be returned if the router exhausts its retries.
- //
+//
+// The final StaleDbVersion error should be returned if the router exhausts its retries.
+//
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- st.ensurePrimaryShard(otherDbName, st.shard1.shardName);
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+st.ensurePrimaryShard(otherDbName, st.shard1.shardName);
- // Disable database metadata refreshes on the stale shard so it will indefinitely return a stale
- // version error.
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "skipDatabaseVersionMetadataRefresh", mode: "alwaysOn"}));
+// Disable database metadata refreshes on the stale shard so it will indefinitely return a stale
+// version error.
+assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "skipDatabaseVersionMetadataRefresh", mode: "alwaysOn"}));
- session.startTransaction();
+session.startTransaction();
- // Target Shard1, to verify the transaction on it is implicitly aborted later.
- assert.commandWorked(sessionOtherDB.runCommand({find: otherCollName}));
+// Target Shard1, to verify the transaction on it is implicitly aborted later.
+assert.commandWorked(sessionOtherDB.runCommand({find: otherCollName}));
- // Target the first database which is on Shard0. The shard is stale and won't refresh its
- // metadata, so mongos should exhaust its retries and implicitly abort the transaction.
- res = assert.commandFailedWithCode(
- sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}),
- ErrorCodes.StaleDbVersion);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
+// Target the first database which is on Shard0. The shard is stale and won't refresh its
+// metadata, so mongos should exhaust its retries and implicitly abort the transaction.
+res = assert.commandFailedWithCode(
+ sessionDB.runCommand({distinct: collName, key: "_id", query: {_id: 0}}),
+ ErrorCodes.StaleDbVersion);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
- // Verify all shards aborted the transaction.
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// Verify all shards aborted the transaction.
+assertNoSuchTransactionOnAllShards(st, session.getSessionId(), session.getTxnNumber_forTesting());
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "skipDatabaseVersionMetadataRefresh", mode: "off"}));
+assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "skipDatabaseVersionMetadataRefresh", mode: "off"}));
- disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_stale_shard_version_errors.js b/jstests/sharding/transactions_stale_shard_version_errors.js
index bfb2a5b0178..3bc71a01083 100644
--- a/jstests/sharding/transactions_stale_shard_version_errors.js
+++ b/jstests/sharding/transactions_stale_shard_version_errors.js
@@ -2,263 +2,248 @@
//
// @tags: [requires_sharding, uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
+"use strict";
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
- function expectChunks(st, ns, chunks) {
- for (let i = 0; i < chunks.length; i++) {
- assert.eq(chunks[i],
- st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
- "unexpected number of chunks on shard " + i);
- }
+function expectChunks(st, ns, chunks) {
+ for (let i = 0; i < chunks.length; i++) {
+ assert.eq(chunks[i],
+ st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
+ "unexpected number of chunks on shard " + i);
}
+}
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
- const st = new ShardingTest({shards: 3, mongos: 2, config: 1});
+const st = new ShardingTest({shards: 3, mongos: 2, config: 1});
- enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+enableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- // Disable the best-effort recipient metadata refresh after migrations to simplify simulating
- // stale shard version errors.
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
- assert.commandWorked(st.rs1.getPrimary().adminCommand(
- {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
- assert.commandWorked(st.rs2.getPrimary().adminCommand(
- {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
+// Disable the best-effort recipient metadata refresh after migrations to simplify simulating
+// stale shard version errors.
+assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
+assert.commandWorked(st.rs1.getPrimary().adminCommand(
+ {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
+assert.commandWorked(st.rs2.getPrimary().adminCommand(
+ {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
- // Shard two collections in the same database, each with 2 chunks, [minKey, 0), [0, maxKey),
- // with one document each, all on Shard0.
+// Shard two collections in the same database, each with 2 chunks, [minKey, 0), [0, maxKey),
+// with one document each, all on Shard0.
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- expectChunks(st, ns, [2, 0, 0]);
+expectChunks(st, ns, [2, 0, 0]);
- const otherCollName = "bar";
- const otherNs = dbName + "." + otherCollName;
+const otherCollName = "bar";
+const otherNs = dbName + "." + otherCollName;
- assert.writeOK(
- st.s.getDB(dbName)[otherCollName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(
- st.s.getDB(dbName)[otherCollName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(
+ st.s.getDB(dbName)[otherCollName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[otherCollName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({shardCollection: otherNs, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: otherNs, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({shardCollection: otherNs, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: otherNs, middle: {_id: 0}}));
- expectChunks(st, otherNs, [2, 0, 0]);
+expectChunks(st, otherNs, [2, 0, 0]);
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
+const session = st.s.startSession();
+const sessionDB = session.getDatabase(dbName);
- //
- // Stale shard version on first overall command should succeed.
- //
+//
+// Stale shard version on first overall command should succeed.
+//
- // Move a chunk in the first collection from Shard0 to Shard1 through the main mongos, so Shard1
- // is stale but not the router.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
- expectChunks(st, ns, [1, 1, 0]);
+// Move a chunk in the first collection from Shard0 to Shard1 through the main mongos, so Shard1
+// is stale but not the router.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
+expectChunks(st, ns, [1, 1, 0]);
- session.startTransaction();
+session.startTransaction();
- // Targets Shard1, which is stale.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
+// Targets Shard1, which is stale.
+assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- //
- // Stale shard version on second command to a shard should fail.
- //
+//
+// Stale shard version on second command to a shard should fail.
+//
- expectChunks(st, ns, [1, 1, 0]);
+expectChunks(st, ns, [1, 1, 0]);
- // Move a chunk in the other collection from Shard0 to Shard1 through the main mongos, so Shard1
- // is stale for the other collection but not the router.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: otherNs, find: {_id: 5}, to: st.shard1.shardName}));
- expectChunks(st, otherNs, [1, 1, 0]);
+// Move a chunk in the other collection from Shard0 to Shard1 through the main mongos, so Shard1
+// is stale for the other collection but not the router.
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: otherNs, find: {_id: 5}, to: st.shard1.shardName}));
+expectChunks(st, otherNs, [1, 1, 0]);
- session.startTransaction();
+session.startTransaction();
- // Targets Shard1 for the first ns, which is not stale.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
+// Targets Shard1 for the first ns, which is not stale.
+assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
- // Targets the other sharded collection on Shard1, which is stale. Because a previous statement
- // has executed on Shard1, the retry will not restart the transaction, and will fail when it
- // finds the transaction has aborted because of the stale shard version.
- let res =
- assert.commandFailedWithCode(sessionDB.runCommand({find: otherCollName, filter: {_id: 5}}),
- ErrorCodes.NoSuchTransaction);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
+// Targets the other sharded collection on Shard1, which is stale. Because a previous statement
+// has executed on Shard1, the retry will not restart the transaction, and will fail when it
+// finds the transaction has aborted because of the stale shard version.
+let res = assert.commandFailedWithCode(
+ sessionDB.runCommand({find: otherCollName, filter: {_id: 5}}), ErrorCodes.NoSuchTransaction);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+assertNoSuchTransactionOnAllShards(st, session.getSessionId(), session.getTxnNumber_forTesting());
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- //
- // Stale shard version on first command to a new shard should succeed.
- //
+//
+// Stale shard version on first command to a new shard should succeed.
+//
- expectChunks(st, ns, [1, 1, 0]);
+expectChunks(st, ns, [1, 1, 0]);
- // Move a chunk for the other collection from Shard1 to Shard0 through the main mongos, so
- // Shard0 is stale for it and the router is not.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: otherNs, find: {_id: 5}, to: st.shard0.shardName}));
- expectChunks(st, otherNs, [2, 0, 0]);
+// Move a chunk for the other collection from Shard1 to Shard0 through the main mongos, so
+// Shard0 is stale for it and the router is not.
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: otherNs, find: {_id: 5}, to: st.shard0.shardName}));
+expectChunks(st, otherNs, [2, 0, 0]);
- session.startTransaction();
+session.startTransaction();
- // Targets Shard1 for the first ns, which is not stale.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
+// Targets Shard1 for the first ns, which is not stale.
+assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
- // Targets Shard0 for the other ns, which is stale.
- assert.commandWorked(sessionDB.runCommand({find: otherCollName, filter: {_id: 5}}));
+// Targets Shard0 for the other ns, which is stale.
+assert.commandWorked(sessionDB.runCommand({find: otherCollName, filter: {_id: 5}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- //
- // Stale mongos aborts on old shard.
- //
+//
+// Stale mongos aborts on old shard.
+//
- // Move a chunk in the first collection from Shard1 to Shard0 through the other mongos, so
- // Shard1 and the main mongos are stale for it.
- const otherMongos = st.s1;
- assert.commandWorked(
- otherMongos.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard0.shardName}));
- expectChunks(st, ns, [2, 0, 0]);
+// Move a chunk in the first collection from Shard1 to Shard0 through the other mongos, so
+// Shard1 and the main mongos are stale for it.
+const otherMongos = st.s1;
+assert.commandWorked(
+ otherMongos.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard0.shardName}));
+expectChunks(st, ns, [2, 0, 0]);
- session.startTransaction();
+session.startTransaction();
- // Targets Shard1, which hits a stale version error, then re-targets Shard0, which is also
- // stale but should succeed.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
+// Targets Shard1, which hits a stale version error, then re-targets Shard0, which is also
+// stale but should succeed.
+assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- // Verify there is no in-progress transaction on Shard1.
- res = assert.commandFailedWithCode(st.rs1.getPrimary().getDB(dbName).runCommand({
- find: collName,
- lsid: session.getSessionId(),
- txnNumber: NumberLong(session.getTxnNumber_forTesting()),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
+// Verify there is no in-progress transaction on Shard1.
+res = assert.commandFailedWithCode(st.rs1.getPrimary().getDB(dbName).runCommand({
+ find: collName,
+ lsid: session.getSessionId(),
+ txnNumber: NumberLong(session.getTxnNumber_forTesting()),
+ autocommit: false,
+}),
+ ErrorCodes.NoSuchTransaction);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
- //
- // More than one stale shard version error.
- //
+//
+// More than one stale shard version error.
+//
- // Move chunks for the first ns from Shard0 to Shard1 and Shard2 through the main mongos, so
- // both are stale but not the router.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
- expectChunks(st, ns, [1, 0, 1]);
+// Move chunks for the first ns from Shard0 to Shard1 and Shard2 through the main mongos, so
+// both are stale but not the router.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
+expectChunks(st, ns, [1, 0, 1]);
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: -5}, to: st.shard1.shardName}));
- expectChunks(st, ns, [0, 1, 1]);
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: -5}, to: st.shard1.shardName}));
+expectChunks(st, ns, [0, 1, 1]);
- session.startTransaction();
+session.startTransaction();
- // Targets all shards, two of which are stale.
- assert.commandWorked(sessionDB.runCommand({find: collName}));
+// Targets all shards, two of which are stale.
+assert.commandWorked(sessionDB.runCommand({find: collName}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- //
- // Can retry a stale write on the first statement.
- //
+//
+// Can retry a stale write on the first statement.
+//
- // Move a chunk to Shard1 to make it stale.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
- expectChunks(st, ns, [0, 2, 0]);
+// Move a chunk to Shard1 to make it stale.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
+expectChunks(st, ns, [0, 2, 0]);
- session.startTransaction();
+session.startTransaction();
- // Targets Shard1, which is stale.
- assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [{_id: 6}]}));
+// Targets Shard1, which is stale.
+assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [{_id: 6}]}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- //
- // Cannot retry a stale write past the first statement.
- //
- // TODO SERVER-37207: Change batch writes to retry only the failed writes in a batch, to allow
- // retrying writes beyond the first overall statement.
- //
+//
+// Cannot retry a stale write past the first statement.
+//
+// TODO SERVER-37207: Change batch writes to retry only the failed writes in a batch, to allow
+// retrying writes beyond the first overall statement.
+//
- // Move a chunk to Shard2 to make it stale.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
- expectChunks(st, ns, [0, 1, 1]);
+// Move a chunk to Shard2 to make it stale.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
+expectChunks(st, ns, [0, 1, 1]);
- session.startTransaction();
+session.startTransaction();
- // Targets Shard1, which is not stale.
- assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [{_id: -4}]}));
+// Targets Shard1, which is not stale.
+assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [{_id: -4}]}));
- // Targets Shard2, which is stale.
- res = assert.commandFailedWithCode(
- sessionDB.runCommand({insert: collName, documents: [{_id: 7}]}), ErrorCodes.StaleConfig);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
+// Targets Shard2, which is stale.
+res = assert.commandFailedWithCode(sessionDB.runCommand({insert: collName, documents: [{_id: 7}]}),
+ ErrorCodes.StaleConfig);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
- // The transaction should have been implicitly aborted on all shards.
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// The transaction should have been implicitly aborted on all shards.
+assertNoSuchTransactionOnAllShards(st, session.getSessionId(), session.getTxnNumber_forTesting());
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- //
- // The final StaleConfig error should be returned if the router exhausts its retries.
- //
+//
+// The final StaleConfig error should be returned if the router exhausts its retries.
+//
- // Move a chunk to Shard0 to make it stale.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: -5}, to: st.shard0.shardName}));
- expectChunks(st, ns, [1, 0, 1]);
+// Move a chunk to Shard0 to make it stale.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: -5}, to: st.shard0.shardName}));
+expectChunks(st, ns, [1, 0, 1]);
- // Disable metadata refreshes on the stale shard so it will indefinitely return a stale version
- // error.
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "alwaysOn"}));
+// Disable metadata refreshes on the stale shard so it will indefinitely return a stale version
+// error.
+assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "alwaysOn"}));
- session.startTransaction();
+session.startTransaction();
- // Target Shard2, to verify the transaction on it is aborted implicitly later.
- assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
+// Target Shard2, to verify the transaction on it is aborted implicitly later.
+assert.commandWorked(sessionDB.runCommand({find: collName, filter: {_id: 5}}));
- // Targets all shards. Shard0 is stale and won't refresh its metadata, so mongos should exhaust
- // its retries and implicitly abort the transaction.
- res = assert.commandFailedWithCode(sessionDB.runCommand({find: collName}),
- ErrorCodes.StaleConfig);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
+// Targets all shards. Shard0 is stale and won't refresh its metadata, so mongos should exhaust
+// its retries and implicitly abort the transaction.
+res = assert.commandFailedWithCode(sessionDB.runCommand({find: collName}), ErrorCodes.StaleConfig);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
- // Verify the shards that did not return an error were also aborted.
- assertNoSuchTransactionOnAllShards(
- st, session.getSessionId(), session.getTxnNumber_forTesting());
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// Verify the shards that did not return an error were also aborted.
+assertNoSuchTransactionOnAllShards(st, session.getSessionId(), session.getTxnNumber_forTesting());
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "off"}));
+assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "off"}));
- disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
+disableStaleVersionAndSnapshotRetriesWithinTransactions(st);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_target_at_point_in_time.js b/jstests/sharding/transactions_target_at_point_in_time.js
index 1243676cfba..3cdfb4b49fe 100644
--- a/jstests/sharding/transactions_target_at_point_in_time.js
+++ b/jstests/sharding/transactions_target_at_point_in_time.js
@@ -8,103 +8,101 @@
// uses_transactions,
// ]
(function() {
- "use strict";
+"use strict";
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
- function expectChunks(st, ns, chunks) {
- for (let i = 0; i < chunks.length; i++) {
- assert.eq(chunks[i],
- st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
- "unexpected number of chunks on shard " + i);
- }
+function expectChunks(st, ns, chunks) {
+ for (let i = 0; i < chunks.length; i++) {
+ assert.eq(chunks[i],
+ st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
+ "unexpected number of chunks on shard " + i);
}
+}
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
- const st = new ShardingTest({shards: 3, mongos: 1, config: 1});
+const st = new ShardingTest({shards: 3, mongos: 1, config: 1});
- // Set up one sharded collection with 2 chunks, both on the primary shard.
+// Set up one sharded collection with 2 chunks, both on the primary shard.
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- expectChunks(st, ns, [2, 0, 0]);
+expectChunks(st, ns, [2, 0, 0]);
- // Temporarily move a chunk to Shard2, to avoid picking a global read timestamp before the
- // sharding metadata cache collections are created.
+// Temporarily move a chunk to Shard2, to avoid picking a global read timestamp before the
+// sharding metadata cache collections are created.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
+
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
+expectChunks(st, ns, [1, 1, 0]);
+
+// First command targets the first chunk, the second command targets the second chunk.
+const kCommandTestCases = [
+ {
+ name: "aggregate",
+ commandFuncs: [
+ (coll) => coll.aggregate({$match: {_id: -5}}).itcount(),
+ (coll) => coll.aggregate({$match: {_id: 5}}).itcount(),
+ ]
+ },
+ {
+ name: "find",
+ commandFuncs: [
+ (coll) => coll.find({_id: -5}).itcount(),
+ (coll) => coll.find({_id: 5}).itcount(),
+ ]
+ }
+];
+
+function runTest(testCase) {
+ const cmdName = testCase.name;
+ const targetChunk1Func = testCase.commandFuncs[0];
+ const targetChunk2Func = testCase.commandFuncs[1];
+
+ jsTestLog("Testing " + cmdName);
+
+ expectChunks(st, ns, [1, 1, 0]);
+
+ const session = st.s.startSession();
+ const sessionDB = session.getDatabase(dbName);
+ const sessionColl = sessionDB[collName];
+
+ session.startTransaction({readConcern: {level: "snapshot"}});
+
+ // Start a transaction on Shard0 which will select and pin a global read timestamp.
+ assert.eq(targetChunk1Func(sessionColl),
+ 1,
+ "expected to find document in first chunk, cmd: " + cmdName);
+
+ // Move a chunk from Shard1 to Shard2 outside of the transaction. This will happen at a
+ // later logical time than the transaction's read timestamp.
assert.commandWorked(
st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
+ // Target a document in the chunk that was moved. The router should get a stale shard
+ // version from Shard1 then retry on Shard1 and see the document.
+ assert.eq(targetChunk2Func(sessionColl),
+ 1,
+ "expected to find document in second chunk, cmd: " + cmdName);
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // Move the chunk back to Shard1 for the next iteration.
assert.commandWorked(
st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
- expectChunks(st, ns, [1, 1, 0]);
-
- // First command targets the first chunk, the second command targets the second chunk.
- const kCommandTestCases = [
- {
- name: "aggregate",
- commandFuncs: [
- (coll) => coll.aggregate({$match: {_id: -5}}).itcount(),
- (coll) => coll.aggregate({$match: {_id: 5}}).itcount(),
- ]
- },
- {
- name: "find",
- commandFuncs: [
- (coll) => coll.find({_id: -5}).itcount(),
- (coll) => coll.find({_id: 5}).itcount(),
- ]
- }
- ];
-
- function runTest(testCase) {
- const cmdName = testCase.name;
- const targetChunk1Func = testCase.commandFuncs[0];
- const targetChunk2Func = testCase.commandFuncs[1];
-
- jsTestLog("Testing " + cmdName);
-
- expectChunks(st, ns, [1, 1, 0]);
-
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB[collName];
-
- session.startTransaction({readConcern: {level: "snapshot"}});
-
- // Start a transaction on Shard0 which will select and pin a global read timestamp.
- assert.eq(targetChunk1Func(sessionColl),
- 1,
- "expected to find document in first chunk, cmd: " + cmdName);
-
- // Move a chunk from Shard1 to Shard2 outside of the transaction. This will happen at a
- // later logical time than the transaction's read timestamp.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard2.shardName}));
-
- // Target a document in the chunk that was moved. The router should get a stale shard
- // version from Shard1 then retry on Shard1 and see the document.
- assert.eq(targetChunk2Func(sessionColl),
- 1,
- "expected to find document in second chunk, cmd: " + cmdName);
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Move the chunk back to Shard1 for the next iteration.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
- }
+}
- kCommandTestCases.forEach(runTest);
+kCommandTestCases.forEach(runTest);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_targeting_errors.js b/jstests/sharding/transactions_targeting_errors.js
index 9f490994c88..5fb3a0dfba8 100644
--- a/jstests/sharding/transactions_targeting_errors.js
+++ b/jstests/sharding/transactions_targeting_errors.js
@@ -2,41 +2,39 @@
//
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
- const st = new ShardingTest({shards: 2});
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {skey: "hashed"}}));
+const st = new ShardingTest({shards: 2});
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {skey: "hashed"}}));
- const session = st.s.startSession();
- const sessionDB = session.getDatabase("test");
+const session = st.s.startSession();
+const sessionDB = session.getDatabase("test");
- // Failed update.
+// Failed update.
- session.startTransaction();
+session.startTransaction();
- let res = sessionDB.runCommand(
- {update: collName, updates: [{q: {skey: {$lte: 5}}, u: {$set: {x: 1}}, multi: false}]});
- assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
- assert(res.hasOwnProperty("writeErrors"), "expected write errors, res: " + tojson(res));
+let res = sessionDB.runCommand(
+ {update: collName, updates: [{q: {skey: {$lte: 5}}, u: {$set: {x: 1}}, multi: false}]});
+assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
+assert(res.hasOwnProperty("writeErrors"), "expected write errors, res: " + tojson(res));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- // Failed delete.
+// Failed delete.
- session.startTransaction();
+session.startTransaction();
- res = sessionDB.runCommand({delete: collName, deletes: [{q: {skey: {$lte: 5}}, limit: 1}]});
- assert.commandFailedWithCode(res, ErrorCodes.ShardKeyNotFound);
- assert(res.hasOwnProperty("writeErrors"), "expected write errors, res: " + tojson(res));
+res = sessionDB.runCommand({delete: collName, deletes: [{q: {skey: {$lte: 5}}, limit: 1}]});
+assert.commandFailedWithCode(res, ErrorCodes.ShardKeyNotFound);
+assert(res.hasOwnProperty("writeErrors"), "expected write errors, res: " + tojson(res));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/transactions_view_resolution.js b/jstests/sharding/transactions_view_resolution.js
index be5d5ab7845..1a8224ee089 100644
--- a/jstests/sharding/transactions_view_resolution.js
+++ b/jstests/sharding/transactions_view_resolution.js
@@ -7,289 +7,286 @@
// uses_transactions,
// ]
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For arrayEq.
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- const shardedDbName = "shardedDB";
- const shardedCollName = "sharded";
- const shardedViewName = "sharded_view";
-
- const unshardedDbName = "unshardedDB";
- const unshardedCollName = "unsharded";
- const unshardedViewName = "unsharded_view";
-
- const viewOnShardedViewName = "sharded_view_view";
-
- function setUpUnshardedCollectionAndView(st, session, primaryShard) {
- assert.writeOK(st.s.getDB(unshardedDbName)[unshardedCollName].insert(
- {_id: 1, x: "unsharded"}, {writeConcern: {w: "majority"}}));
- st.ensurePrimaryShard(unshardedDbName, primaryShard);
-
- const unshardedView = session.getDatabase(unshardedDbName)[unshardedViewName];
- assert.commandWorked(unshardedView.runCommand(
- "create", {viewOn: unshardedCollName, pipeline: [], writeConcern: {w: "majority"}}));
-
- return unshardedView;
- }
-
- function setUpShardedCollectionAndView(st, session, primaryShard) {
- const ns = shardedDbName + "." + shardedCollName;
-
- assert.writeOK(st.s.getDB(shardedDbName)[shardedCollName].insert(
- {_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(st.s.getDB(shardedDbName)[shardedCollName].insert(
- {_id: 1}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(st.s.adminCommand({enableSharding: shardedDbName}));
- st.ensurePrimaryShard(shardedDbName, primaryShard);
-
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
-
- const shardedView = session.getDatabase(shardedDbName)[shardedViewName];
- assert.commandWorked(shardedView.runCommand(
- "create", {viewOn: shardedCollName, pipeline: [], writeConcern: {w: "majority"}}));
-
- flushRoutersAndRefreshShardMetadata(st, {ns, dbNames: [shardedDbName, unshardedDbName]});
-
- return shardedView;
- }
-
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
- const session = st.s.startSession();
-
- // Set up an unsharded collection on shard0.
- const unshardedView = setUpUnshardedCollectionAndView(st, session, st.shard0.shardName);
-
- // Set up a sharded collection with one chunk on each shard in a database with shard0 as its
- // primary shard.
- const shardedView = setUpShardedCollectionAndView(st, session, st.shard0.shardName);
-
- // Set up a view on the sharded view, in the same database.
- const viewOnShardedView = session.getDatabase(shardedDbName)[viewOnShardedViewName];
- assert.commandWorked(viewOnShardedView.runCommand(
- "create", {viewOn: shardedViewName, pipeline: [], writeConcern: {w: "majority"}}));
-
- //
- // The first statement a participant shard receives reading from a view should succeed.
- //
-
- function readFromViewOnFirstParticipantStatement(session, view, viewFunc, numDocsExpected) {
- session.startTransaction();
- assert.eq(viewFunc(view), numDocsExpected);
- assert.commandWorked(session.commitTransaction_forTesting());
- }
-
- // Unsharded view.
- readFromViewOnFirstParticipantStatement(session, unshardedView, (view) => {
- return view.aggregate({$match: {}}).itcount();
- }, 1);
- readFromViewOnFirstParticipantStatement(session, unshardedView, (view) => {
- return view.distinct("_id").length;
- }, 1);
- readFromViewOnFirstParticipantStatement(session, unshardedView, (view) => {
- return view.find().itcount();
- }, 1);
-
- // Sharded view.
- readFromViewOnFirstParticipantStatement(session, shardedView, (view) => {
- return view.aggregate({$match: {}}).itcount();
- }, 2);
- readFromViewOnFirstParticipantStatement(session, shardedView, (view) => {
- return view.distinct("_id").length;
- }, 2);
- readFromViewOnFirstParticipantStatement(session, shardedView, (view) => {
- return view.find().itcount();
- }, 2);
-
- // View on sharded view.
- readFromViewOnFirstParticipantStatement(session, viewOnShardedView, (view) => {
- return view.aggregate({$match: {}}).itcount();
- }, 2);
- readFromViewOnFirstParticipantStatement(session, viewOnShardedView, (view) => {
- return view.distinct("_id").length;
- }, 2);
- readFromViewOnFirstParticipantStatement(session, viewOnShardedView, (view) => {
- return view.find().itcount();
- }, 2);
-
- //
- // A later statement a participant shard receives reading from a view should succeed.
- //
-
- function readFromViewOnLaterParticipantStatement(session, view, viewFunc, numDocsExpected) {
- session.startTransaction();
- assert.eq(view.aggregate({$match: {}}).itcount(), numDocsExpected);
- assert.eq(viewFunc(view), numDocsExpected);
- assert.commandWorked(session.commitTransaction_forTesting());
- }
-
- // Unsharded view.
- readFromViewOnLaterParticipantStatement(session, unshardedView, (view) => {
- return view.aggregate({$match: {}}).itcount();
- }, 1);
- readFromViewOnLaterParticipantStatement(session, unshardedView, (view) => {
- return view.distinct("_id").length;
- }, 1);
- readFromViewOnLaterParticipantStatement(session, unshardedView, (view) => {
- return view.find().itcount();
- }, 1);
-
- // Sharded view.
- readFromViewOnLaterParticipantStatement(session, shardedView, (view) => {
- return view.aggregate({$match: {}}).itcount();
- }, 2);
- readFromViewOnLaterParticipantStatement(session, shardedView, (view) => {
- return view.distinct("_id").length;
- }, 2);
- readFromViewOnLaterParticipantStatement(session, shardedView, (view) => {
- return view.find().itcount();
- }, 2);
-
- // View on sharded view.
- readFromViewOnLaterParticipantStatement(session, viewOnShardedView, (view) => {
- return view.aggregate({$match: {}}).itcount();
- }, 2);
- readFromViewOnLaterParticipantStatement(session, viewOnShardedView, (view) => {
- return view.distinct("_id").length;
- }, 2);
- readFromViewOnLaterParticipantStatement(session, viewOnShardedView, (view) => {
- return view.find().itcount();
- }, 2);
-
- //
- // Transactions on shards that return a view resolution error on the first statement remain
- // aborted if the shard is not targeted by the retry on the resolved namespace.
- //
- // This may happen when reading from a sharded view, because mongos will target the primary
- // shard first to resolve the view, but the retry on the underlying sharded collection is not
- // guaranteed to target the primary again.
- //
-
- // Assumes the request in viewFunc does not target the primary shard, Shard0.
- function primaryShardNotReTargeted_FirstStatement(session, view, viewFunc, numDocsExpected) {
- session.startTransaction();
- assert.eq(viewFunc(view), numDocsExpected);
-
- // There should not be an in-progress transaction on the primary shard.
- assert.commandFailedWithCode(st.rs0.getPrimary().getDB("foo").runCommand({
- find: "bar",
- lsid: session.getSessionId(),
- txnNumber: NumberLong(session.getTxnNumber_forTesting()),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // The transaction should not have been committed on the primary shard.
- assert.commandFailedWithCode(st.rs0.getPrimary().getDB("foo").runCommand({
- find: "bar",
- lsid: session.getSessionId(),
- txnNumber: NumberLong(session.getTxnNumber_forTesting()),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
- }
-
- // This is only possible against sharded views.
- primaryShardNotReTargeted_FirstStatement(session, shardedView, (view) => {
- return view.aggregate({$match: {_id: 1}}).itcount();
- }, 1);
- primaryShardNotReTargeted_FirstStatement(session, shardedView, (view) => {
- return view.distinct("_id", {_id: {$gte: 1}}).length;
- }, 1);
- primaryShardNotReTargeted_FirstStatement(session, shardedView, (view) => {
- return view.find({_id: 1}).itcount();
- }, 1);
-
- // View on sharded view.
- primaryShardNotReTargeted_FirstStatement(session, viewOnShardedView, (view) => {
- return view.aggregate({$match: {_id: 1}}).itcount();
- }, 1);
- primaryShardNotReTargeted_FirstStatement(session, viewOnShardedView, (view) => {
- return view.distinct("_id", {_id: {$gte: 1}}).length;
- }, 1);
- primaryShardNotReTargeted_FirstStatement(session, viewOnShardedView, (view) => {
- return view.find({_id: 1}).itcount();
- }, 1);
-
- //
- // Shards do not abort on a view resolution error if they have already completed a statement for
- // a transaction.
- //
-
- // Assumes the primary shard for view is Shard0.
- function primaryShardNotReTargeted_LaterStatement(session, view, viewFunc, numDocsExpected) {
- session.startTransaction();
- // Complete a statement on the primary shard for the view.
- assert.eq(view.aggregate({$match: {_id: -1}}).itcount(), 1);
- // Targets the primary first, but the resolved retry only targets Shard1.
- assert.eq(viewFunc(view), numDocsExpected);
- assert.commandWorked(session.commitTransaction_forTesting());
- }
-
- // This is only possible against sharded views.
- primaryShardNotReTargeted_LaterStatement(session, shardedView, (view) => {
- return view.aggregate({$match: {_id: 1}}).itcount();
- }, 1);
- primaryShardNotReTargeted_LaterStatement(session, shardedView, (view) => {
- return view.distinct("_id", {_id: {$gte: 1}}).length;
- }, 1);
- primaryShardNotReTargeted_LaterStatement(session, shardedView, (view) => {
- return view.find({_id: 1}).itcount();
- }, 1);
-
- // View on sharded view.
- primaryShardNotReTargeted_LaterStatement(session, viewOnShardedView, (view) => {
- return view.aggregate({$match: {_id: 1}}).itcount();
- }, 1);
- primaryShardNotReTargeted_LaterStatement(session, viewOnShardedView, (view) => {
- return view.distinct("_id", {_id: {$gte: 1}}).length;
- }, 1);
- primaryShardNotReTargeted_LaterStatement(session, viewOnShardedView, (view) => {
- return view.find({_id: 1}).itcount();
- }, 1);
-
- //
- // Reading from a view using $lookup and $graphLookup should succeed.
- //
-
- function assertAggResultEqInTransaction(coll, pipeline, expected) {
- session.startTransaction();
- const resArray = coll.aggregate(pipeline).toArray();
- assert(arrayEq(resArray, expected), tojson({got: resArray, expected: expected}));
- assert.commandWorked(session.commitTransaction_forTesting());
- }
-
- // Set up an unsharded collection to use for $lookup. We cannot lookup into sharded collections.
- // TODO SERVER-29159: Add testing for lookup into sharded collections in a transaction once that
- // is supported.
- const lookupDbName = "dbForLookup";
- const lookupCollName = "collForLookup";
- assert.writeOK(
- st.s.getDB(lookupDbName)[lookupCollName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
- const lookupColl = session.getDatabase(unshardedDbName)[unshardedCollName];
-
- // Lookup the document in the unsharded collection with _id: 1 through the unsharded view.
- assertAggResultEqInTransaction(
- lookupColl,
- [
- {$match: {_id: 1}},
- {
- $lookup:
- {from: unshardedViewName, localField: "_id", foreignField: "_id", as: "matched"}
- },
- {$unwind: "$matched"},
- {$project: {_id: 1, matchedX: "$matched.x"}}
- ],
- [{_id: 1, matchedX: "unsharded"}]);
-
- // Find the same document through the view using $graphLookup.
- assertAggResultEqInTransaction(lookupColl,
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For arrayEq.
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+const shardedDbName = "shardedDB";
+const shardedCollName = "sharded";
+const shardedViewName = "sharded_view";
+
+const unshardedDbName = "unshardedDB";
+const unshardedCollName = "unsharded";
+const unshardedViewName = "unsharded_view";
+
+const viewOnShardedViewName = "sharded_view_view";
+
+function setUpUnshardedCollectionAndView(st, session, primaryShard) {
+ assert.writeOK(st.s.getDB(unshardedDbName)[unshardedCollName].insert(
+ {_id: 1, x: "unsharded"}, {writeConcern: {w: "majority"}}));
+ st.ensurePrimaryShard(unshardedDbName, primaryShard);
+
+ const unshardedView = session.getDatabase(unshardedDbName)[unshardedViewName];
+ assert.commandWorked(unshardedView.runCommand(
+ "create", {viewOn: unshardedCollName, pipeline: [], writeConcern: {w: "majority"}}));
+
+ return unshardedView;
+}
+
+function setUpShardedCollectionAndView(st, session, primaryShard) {
+ const ns = shardedDbName + "." + shardedCollName;
+
+ assert.writeOK(st.s.getDB(shardedDbName)[shardedCollName].insert(
+ {_id: -1}, {writeConcern: {w: "majority"}}));
+ assert.writeOK(st.s.getDB(shardedDbName)[shardedCollName].insert(
+ {_id: 1}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(st.s.adminCommand({enableSharding: shardedDbName}));
+ st.ensurePrimaryShard(shardedDbName, primaryShard);
+
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
+
+ const shardedView = session.getDatabase(shardedDbName)[shardedViewName];
+ assert.commandWorked(shardedView.runCommand(
+ "create", {viewOn: shardedCollName, pipeline: [], writeConcern: {w: "majority"}}));
+
+ flushRoutersAndRefreshShardMetadata(st, {ns, dbNames: [shardedDbName, unshardedDbName]});
+
+ return shardedView;
+}
+
+const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
+const session = st.s.startSession();
+
+// Set up an unsharded collection on shard0.
+const unshardedView = setUpUnshardedCollectionAndView(st, session, st.shard0.shardName);
+
+// Set up a sharded collection with one chunk on each shard in a database with shard0 as its
+// primary shard.
+const shardedView = setUpShardedCollectionAndView(st, session, st.shard0.shardName);
+
+// Set up a view on the sharded view, in the same database.
+const viewOnShardedView = session.getDatabase(shardedDbName)[viewOnShardedViewName];
+assert.commandWorked(viewOnShardedView.runCommand(
+ "create", {viewOn: shardedViewName, pipeline: [], writeConcern: {w: "majority"}}));
+
+//
+// The first statement a participant shard receives reading from a view should succeed.
+//
+
+function readFromViewOnFirstParticipantStatement(session, view, viewFunc, numDocsExpected) {
+ session.startTransaction();
+ assert.eq(viewFunc(view), numDocsExpected);
+ assert.commandWorked(session.commitTransaction_forTesting());
+}
+
+// Unsharded view.
+readFromViewOnFirstParticipantStatement(session, unshardedView, (view) => {
+ return view.aggregate({$match: {}}).itcount();
+}, 1);
+readFromViewOnFirstParticipantStatement(session, unshardedView, (view) => {
+ return view.distinct("_id").length;
+}, 1);
+readFromViewOnFirstParticipantStatement(session, unshardedView, (view) => {
+ return view.find().itcount();
+}, 1);
+
+// Sharded view.
+readFromViewOnFirstParticipantStatement(session, shardedView, (view) => {
+ return view.aggregate({$match: {}}).itcount();
+}, 2);
+readFromViewOnFirstParticipantStatement(session, shardedView, (view) => {
+ return view.distinct("_id").length;
+}, 2);
+readFromViewOnFirstParticipantStatement(session, shardedView, (view) => {
+ return view.find().itcount();
+}, 2);
+
+// View on sharded view.
+readFromViewOnFirstParticipantStatement(session, viewOnShardedView, (view) => {
+ return view.aggregate({$match: {}}).itcount();
+}, 2);
+readFromViewOnFirstParticipantStatement(session, viewOnShardedView, (view) => {
+ return view.distinct("_id").length;
+}, 2);
+readFromViewOnFirstParticipantStatement(session, viewOnShardedView, (view) => {
+ return view.find().itcount();
+}, 2);
+
+//
+// A later statement a participant shard receives reading from a view should succeed.
+//
+
+function readFromViewOnLaterParticipantStatement(session, view, viewFunc, numDocsExpected) {
+ session.startTransaction();
+ assert.eq(view.aggregate({$match: {}}).itcount(), numDocsExpected);
+ assert.eq(viewFunc(view), numDocsExpected);
+ assert.commandWorked(session.commitTransaction_forTesting());
+}
+
+// Unsharded view.
+readFromViewOnLaterParticipantStatement(session, unshardedView, (view) => {
+ return view.aggregate({$match: {}}).itcount();
+}, 1);
+readFromViewOnLaterParticipantStatement(session, unshardedView, (view) => {
+ return view.distinct("_id").length;
+}, 1);
+readFromViewOnLaterParticipantStatement(session, unshardedView, (view) => {
+ return view.find().itcount();
+}, 1);
+
+// Sharded view.
+readFromViewOnLaterParticipantStatement(session, shardedView, (view) => {
+ return view.aggregate({$match: {}}).itcount();
+}, 2);
+readFromViewOnLaterParticipantStatement(session, shardedView, (view) => {
+ return view.distinct("_id").length;
+}, 2);
+readFromViewOnLaterParticipantStatement(session, shardedView, (view) => {
+ return view.find().itcount();
+}, 2);
+
+// View on sharded view.
+readFromViewOnLaterParticipantStatement(session, viewOnShardedView, (view) => {
+ return view.aggregate({$match: {}}).itcount();
+}, 2);
+readFromViewOnLaterParticipantStatement(session, viewOnShardedView, (view) => {
+ return view.distinct("_id").length;
+}, 2);
+readFromViewOnLaterParticipantStatement(session, viewOnShardedView, (view) => {
+ return view.find().itcount();
+}, 2);
+
+//
+// Transactions on shards that return a view resolution error on the first statement remain
+// aborted if the shard is not targeted by the retry on the resolved namespace.
+//
+// This may happen when reading from a sharded view, because mongos will target the primary
+// shard first to resolve the view, but the retry on the underlying sharded collection is not
+// guaranteed to target the primary again.
+//
+
+// Assumes the request in viewFunc does not target the primary shard, Shard0.
+function primaryShardNotReTargeted_FirstStatement(session, view, viewFunc, numDocsExpected) {
+ session.startTransaction();
+ assert.eq(viewFunc(view), numDocsExpected);
+
+ // There should not be an in-progress transaction on the primary shard.
+ assert.commandFailedWithCode(st.rs0.getPrimary().getDB("foo").runCommand({
+ find: "bar",
+ lsid: session.getSessionId(),
+ txnNumber: NumberLong(session.getTxnNumber_forTesting()),
+ autocommit: false,
+ }),
+ ErrorCodes.NoSuchTransaction);
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // The transaction should not have been committed on the primary shard.
+ assert.commandFailedWithCode(st.rs0.getPrimary().getDB("foo").runCommand({
+ find: "bar",
+ lsid: session.getSessionId(),
+ txnNumber: NumberLong(session.getTxnNumber_forTesting()),
+ autocommit: false,
+ }),
+ ErrorCodes.NoSuchTransaction);
+}
+
+// This is only possible against sharded views.
+primaryShardNotReTargeted_FirstStatement(session, shardedView, (view) => {
+ return view.aggregate({$match: {_id: 1}}).itcount();
+}, 1);
+primaryShardNotReTargeted_FirstStatement(session, shardedView, (view) => {
+ return view.distinct("_id", {_id: {$gte: 1}}).length;
+}, 1);
+primaryShardNotReTargeted_FirstStatement(session, shardedView, (view) => {
+ return view.find({_id: 1}).itcount();
+}, 1);
+
+// View on sharded view.
+primaryShardNotReTargeted_FirstStatement(session, viewOnShardedView, (view) => {
+ return view.aggregate({$match: {_id: 1}}).itcount();
+}, 1);
+primaryShardNotReTargeted_FirstStatement(session, viewOnShardedView, (view) => {
+ return view.distinct("_id", {_id: {$gte: 1}}).length;
+}, 1);
+primaryShardNotReTargeted_FirstStatement(session, viewOnShardedView, (view) => {
+ return view.find({_id: 1}).itcount();
+}, 1);
+
+//
+// Shards do not abort on a view resolution error if they have already completed a statement for
+// a transaction.
+//
+
+// Assumes the primary shard for view is Shard0.
+function primaryShardNotReTargeted_LaterStatement(session, view, viewFunc, numDocsExpected) {
+ session.startTransaction();
+ // Complete a statement on the primary shard for the view.
+ assert.eq(view.aggregate({$match: {_id: -1}}).itcount(), 1);
+ // Targets the primary first, but the resolved retry only targets Shard1.
+ assert.eq(viewFunc(view), numDocsExpected);
+ assert.commandWorked(session.commitTransaction_forTesting());
+}
+
+// This is only possible against sharded views.
+primaryShardNotReTargeted_LaterStatement(session, shardedView, (view) => {
+ return view.aggregate({$match: {_id: 1}}).itcount();
+}, 1);
+primaryShardNotReTargeted_LaterStatement(session, shardedView, (view) => {
+ return view.distinct("_id", {_id: {$gte: 1}}).length;
+}, 1);
+primaryShardNotReTargeted_LaterStatement(session, shardedView, (view) => {
+ return view.find({_id: 1}).itcount();
+}, 1);
+
+// View on sharded view.
+primaryShardNotReTargeted_LaterStatement(session, viewOnShardedView, (view) => {
+ return view.aggregate({$match: {_id: 1}}).itcount();
+}, 1);
+primaryShardNotReTargeted_LaterStatement(session, viewOnShardedView, (view) => {
+ return view.distinct("_id", {_id: {$gte: 1}}).length;
+}, 1);
+primaryShardNotReTargeted_LaterStatement(session, viewOnShardedView, (view) => {
+ return view.find({_id: 1}).itcount();
+}, 1);
+
+//
+// Reading from a view using $lookup and $graphLookup should succeed.
+//
+
+function assertAggResultEqInTransaction(coll, pipeline, expected) {
+ session.startTransaction();
+ const resArray = coll.aggregate(pipeline).toArray();
+ assert(arrayEq(resArray, expected), tojson({got: resArray, expected: expected}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+}
+
+// Set up an unsharded collection to use for $lookup. We cannot lookup into sharded collections.
+// TODO SERVER-29159: Add testing for lookup into sharded collections in a transaction once that
+// is supported.
+const lookupDbName = "dbForLookup";
+const lookupCollName = "collForLookup";
+assert.writeOK(
+ st.s.getDB(lookupDbName)[lookupCollName].insert({_id: 1}, {writeConcern: {w: "majority"}}));
+const lookupColl = session.getDatabase(unshardedDbName)[unshardedCollName];
+
+// Lookup the document in the unsharded collection with _id: 1 through the unsharded view.
+assertAggResultEqInTransaction(
+ lookupColl,
+ [
+ {$match: {_id: 1}},
+ {$lookup: {from: unshardedViewName, localField: "_id", foreignField: "_id", as: "matched"}},
+ {$unwind: "$matched"},
+ {$project: {_id: 1, matchedX: "$matched.x"}}
+ ],
+ [{_id: 1, matchedX: "unsharded"}]);
+
+// Find the same document through the view using $graphLookup.
+assertAggResultEqInTransaction(lookupColl,
[
{$match: {_id: 1}},
{
@@ -306,5 +303,5 @@
],
[{_id: 1, matchedX: "unsharded"}]);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/transactions_writes_not_retryable.js b/jstests/sharding/transactions_writes_not_retryable.js
index d86c7a0e8a5..e6782394ec7 100644
--- a/jstests/sharding/transactions_writes_not_retryable.js
+++ b/jstests/sharding/transactions_writes_not_retryable.js
@@ -4,116 +4,106 @@
* @tags: [requires_sharding, uses_transactions]
*/
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
-
- function runTest(st, session, sessionDB, writeCmdName, writeCmd, isSharded) {
- jsTestLog("Testing " + writeCmdName + ", cmd: " + tojson(writeCmd) + ", sharded: " +
- isSharded);
-
- // Fail with retryable error.
- // Sharding tests require failInternalCommands: true, since the mongos appears to mongod to
- // be an internal client.
- const retryableError = ErrorCodes.InterruptedDueToReplStateChange;
- assert.commandWorked(st.rs0.getPrimary().adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 1},
- data: {
- errorCode: retryableError,
- failCommands: [writeCmdName],
- failInternalCommands: true
- }
- }));
-
- session.startTransaction();
- assert.commandFailedWithCode(
- sessionDB.runCommand(writeCmd),
- retryableError,
- "expected write in transaction not to be retried on retryable error, cmd: " +
- tojson(writeCmd) + ", sharded: " + isSharded);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // Fail with closed connection.
- assert.commandWorked(st.rs0.getPrimary().adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 1},
- data: {
- closeConnection: true,
- failCommands: [writeCmdName],
- failInternalCommands: true
- }
- }));
-
- session.startTransaction();
- let res = assert.commandFailed(
- sessionDB.runCommand(writeCmd),
- "expected write in transaction not to be retried on closed connection, cmd: " +
- tojson(writeCmd) + ", sharded: " + isSharded);
-
- // Network errors during sharded transactions are transient transaction errors, so they're
- // returned as top level codes for all commands, including batch writes.
- assert(ErrorCodes.isNetworkError(res.code),
- "expected network error, got: " + tojson(res.code));
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- assert.commandWorked(
- st.rs0.getPrimary().adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+"use strict";
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
+
+function runTest(st, session, sessionDB, writeCmdName, writeCmd, isSharded) {
+ jsTestLog("Testing " + writeCmdName + ", cmd: " + tojson(writeCmd) + ", sharded: " + isSharded);
+
+ // Fail with retryable error.
+ // Sharding tests require failInternalCommands: true, since the mongos appears to mongod to
+ // be an internal client.
+ const retryableError = ErrorCodes.InterruptedDueToReplStateChange;
+ assert.commandWorked(st.rs0.getPrimary().adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 1},
+ data: {errorCode: retryableError, failCommands: [writeCmdName], failInternalCommands: true}
+ }));
+
+ session.startTransaction();
+ assert.commandFailedWithCode(
+ sessionDB.runCommand(writeCmd),
+ retryableError,
+ "expected write in transaction not to be retried on retryable error, cmd: " +
+ tojson(writeCmd) + ", sharded: " + isSharded);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ // Fail with closed connection.
+ assert.commandWorked(st.rs0.getPrimary().adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 1},
+ data: {closeConnection: true, failCommands: [writeCmdName], failInternalCommands: true}
+ }));
+
+ session.startTransaction();
+ let res = assert.commandFailed(
+ sessionDB.runCommand(writeCmd),
+ "expected write in transaction not to be retried on closed connection, cmd: " +
+ tojson(writeCmd) + ", sharded: " + isSharded);
+
+ // Network errors during sharded transactions are transient transaction errors, so they're
+ // returned as top level codes for all commands, including batch writes.
+ assert(ErrorCodes.isNetworkError(res.code), "expected network error, got: " + tojson(res.code));
+ assert.eq(res.errorLabels, ["TransientTransactionError"]);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ assert.commandWorked(
+ st.rs0.getPrimary().adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+}
+
+const kCmdTestCases = [
+ {
+ name: "insert",
+ command: {insert: collName, documents: [{_id: 6}]},
+ },
+ {
+ name: "update",
+ command: {update: collName, updates: [{q: {_id: 5}, u: {$set: {x: 1}}}]},
+ },
+ {
+ name: "delete",
+ command: {delete: collName, deletes: [{q: {_id: 5}, limit: 1}]},
+ },
+ {
+ name: "findAndModify", // update
+ command: {findAndModify: collName, query: {_id: 5}, update: {$set: {x: 1}}},
+ },
+ {
+ name: "findAndModify", // delete
+ command: {findAndModify: collName, query: {_id: 5}, remove: true},
}
+];
- const kCmdTestCases = [
- {
- name: "insert",
- command: {insert: collName, documents: [{_id: 6}]},
- },
- {
- name: "update",
- command: {update: collName, updates: [{q: {_id: 5}, u: {$set: {x: 1}}}]},
- },
- {
- name: "delete",
- command: {delete: collName, deletes: [{q: {_id: 5}, limit: 1}]},
- },
- {
- name: "findAndModify", // update
- command: {findAndModify: collName, query: {_id: 5}, update: {$set: {x: 1}}},
- },
- {
- name: "findAndModify", // delete
- command: {findAndModify: collName, query: {_id: 5}, remove: true},
- }
- ];
-
- const st = new ShardingTest({shards: 1, config: 1});
-
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
-
- // Unsharded.
- jsTestLog("Testing against unsharded collection");
-
- assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
-
- kCmdTestCases.forEach(cmdTestCase => {
- runTest(st, session, sessionDB, cmdTestCase.name, cmdTestCase.command, false /*isSharded*/);
- });
-
- // Sharded
- jsTestLog("Testing against sharded collection");
-
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.rs0.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
-
- kCmdTestCases.forEach(cmdTestCase => {
- runTest(st, session, sessionDB, cmdTestCase.name, cmdTestCase.command, true /*isSharded*/);
- });
-
- st.stop();
+const st = new ShardingTest({shards: 1, config: 1});
+
+const session = st.s.startSession();
+const sessionDB = session.getDatabase(dbName);
+
+// Unsharded.
+jsTestLog("Testing against unsharded collection");
+
+assert.writeOK(st.s.getDB(dbName)[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
+
+kCmdTestCases.forEach(cmdTestCase => {
+ runTest(st, session, sessionDB, cmdTestCase.name, cmdTestCase.command, false /*isSharded*/);
+});
+
+// Sharded
+jsTestLog("Testing against sharded collection");
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.rs0.getPrimary().adminCommand({_flushRoutingTableCacheUpdates: ns}));
+
+kCmdTestCases.forEach(cmdTestCase => {
+ runTest(st, session, sessionDB, cmdTestCase.name, cmdTestCase.command, true /*isSharded*/);
+});
+
+st.stop();
})();
diff --git a/jstests/sharding/txn_agg.js b/jstests/sharding/txn_agg.js
index cd5170a8ce1..7ae71a37b7c 100644
--- a/jstests/sharding/txn_agg.js
+++ b/jstests/sharding/txn_agg.js
@@ -1,117 +1,116 @@
// @tags: [uses_transactions, requires_find_command, uses_multi_shard_transaction]
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 2});
+const st = new ShardingTest({shards: 2});
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard("test", st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard("test", st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: 'test.user', middle: {_id: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: 'test.user', find: {_id: 0}, to: st.shard1.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: 'test.user', middle: {_id: 0}}));
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: 'test.user', find: {_id: 0}, to: st.shard1.shardName}));
- // Preemptively create the collections in the shard since it is not allowed in transactions.
- let coll = st.s.getDB('test').user;
- coll.insert({_id: 1});
- coll.insert({_id: -1});
- coll.remove({});
+// Preemptively create the collections in the shard since it is not allowed in transactions.
+let coll = st.s.getDB('test').user;
+coll.insert({_id: 1});
+coll.insert({_id: -1});
+coll.remove({});
- let unshardedColl = st.s.getDB('test').foo;
- unshardedColl.insert({_id: 0});
- unshardedColl.remove({});
+let unshardedColl = st.s.getDB('test').foo;
+unshardedColl.insert({_id: 0});
+unshardedColl.remove({});
- let session = st.s.startSession();
- let sessionDB = session.getDatabase('test');
- let sessionColl = sessionDB.getCollection('user');
- let sessionUnsharded = sessionDB.getCollection('foo');
+let session = st.s.startSession();
+let sessionDB = session.getDatabase('test');
+let sessionColl = sessionDB.getCollection('user');
+let sessionUnsharded = sessionDB.getCollection('foo');
- // passthrough
+// passthrough
- session.startTransaction();
+session.startTransaction();
- sessionUnsharded.insert({_id: -1});
- sessionUnsharded.insert({_id: 1});
- assert.eq(2, sessionUnsharded.find().itcount());
+sessionUnsharded.insert({_id: -1});
+sessionUnsharded.insert({_id: 1});
+assert.eq(2, sessionUnsharded.find().itcount());
- let res = sessionUnsharded.aggregate([{$match: {_id: {$gte: -200}}}]).toArray();
- assert.eq(2, res.length, tojson(res));
+let res = sessionUnsharded.aggregate([{$match: {_id: {$gte: -200}}}]).toArray();
+assert.eq(2, res.length, tojson(res));
- assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(session.abortTransaction_forTesting());
- // merge on mongos
+// merge on mongos
- session.startTransaction();
+session.startTransaction();
- sessionColl.insert({_id: -1});
- sessionColl.insert({_id: 1});
- assert.eq(2, sessionColl.find().itcount());
+sessionColl.insert({_id: -1});
+sessionColl.insert({_id: 1});
+assert.eq(2, sessionColl.find().itcount());
- res = sessionColl.aggregate([{$match: {_id: {$gte: -200}}}], {allowDiskUse: false}).toArray();
- assert.eq(2, res.length, tojson(res));
+res = sessionColl.aggregate([{$match: {_id: {$gte: -200}}}], {allowDiskUse: false}).toArray();
+assert.eq(2, res.length, tojson(res));
- assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(session.abortTransaction_forTesting());
- // merge on shard. This will require the merging shard to open a cursor on itself.
- session.startTransaction();
+// merge on shard. This will require the merging shard to open a cursor on itself.
+session.startTransaction();
- sessionColl.insert({_id: -1});
- sessionColl.insert({_id: 1});
- assert.eq(2, sessionColl.find().itcount());
+sessionColl.insert({_id: -1});
+sessionColl.insert({_id: 1});
+assert.eq(2, sessionColl.find().itcount());
- res =
- sessionColl
- .aggregate(
- [{$match: {_id: {$gte: -200}}}, {$_internalSplitPipeline: {mergeType: "anyShard"}}])
- .toArray();
- assert.eq(2, res.length, tojson(res));
+res = sessionColl
+ .aggregate(
+ [{$match: {_id: {$gte: -200}}}, {$_internalSplitPipeline: {mergeType: "anyShard"}}])
+ .toArray();
+assert.eq(2, res.length, tojson(res));
- assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(session.abortTransaction_forTesting());
- // Error case: provide a readConcern on an operation which comes in the middle of a transaction.
- session.startTransaction();
+// Error case: provide a readConcern on an operation which comes in the middle of a transaction.
+session.startTransaction();
- sessionColl.insert({_id: -1});
- assert.eq(1, sessionColl.find().itcount());
+sessionColl.insert({_id: -1});
+assert.eq(1, sessionColl.find().itcount());
- const err = assert.throws(
- () => sessionColl.aggregate(
- [{$match: {_id: {$gte: -200}}}, {$_internalSplitPipeline: {mergeType: "anyShard"}}],
- {readConcern: {level: "majority"}}
+const err = assert.throws(
+ () => sessionColl.aggregate(
+ [{$match: {_id: {$gte: -200}}}, {$_internalSplitPipeline: {mergeType: "anyShard"}}],
+ {readConcern: {level: "majority"}}
- ));
- assert.eq(err.code, ErrorCodes.InvalidOptions, err);
+ ));
+assert.eq(err.code, ErrorCodes.InvalidOptions, err);
- assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(session.abortTransaction_forTesting());
- // Insert some data outside of a transaction.
- assert.commandWorked(sessionColl.insert([{_id: -1}, {_id: 0}, {_id: 1}]));
+// Insert some data outside of a transaction.
+assert.commandWorked(sessionColl.insert([{_id: -1}, {_id: 0}, {_id: 1}]));
- // Run an aggregation which requires merging on a shard as the first operation in a transaction.
- session.startTransaction();
- assert.eq(
- [{_id: -1}, {_id: 0}, {_id: 1}],
- sessionColl
- .aggregate([{$_internalSplitPipeline: {mergeType: "primaryShard"}}, {$sort: {_id: 1}}])
- .toArray());
- assert.commandWorked(session.commitTransaction_forTesting());
+// Run an aggregation which requires merging on a shard as the first operation in a transaction.
+session.startTransaction();
+assert.eq(
+ [{_id: -1}, {_id: 0}, {_id: 1}],
+ sessionColl
+ .aggregate([{$_internalSplitPipeline: {mergeType: "primaryShard"}}, {$sort: {_id: 1}}])
+ .toArray());
+assert.commandWorked(session.commitTransaction_forTesting());
- // Move all of the data to shard 1.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: 'test.user', find: {_id: -1}, to: st.shard1.shardName}));
+// Move all of the data to shard 1.
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: 'test.user', find: {_id: -1}, to: st.shard1.shardName}));
- // Be sure that only one shard will be targeted after the moveChunk.
- const pipeline = [{$_internalSplitPipeline: {mergeType: "primaryShard"}}, {$sort: {_id: 1}}];
- const explain = sessionColl.explain().aggregate(pipeline);
- assert.eq(Object.keys(explain.shards), [st.shard1.shardName], explain);
+// Be sure that only one shard will be targeted after the moveChunk.
+const pipeline = [{$_internalSplitPipeline: {mergeType: "primaryShard"}}, {$sort: {_id: 1}}];
+const explain = sessionColl.explain().aggregate(pipeline);
+assert.eq(Object.keys(explain.shards), [st.shard1.shardName], explain);
- // Now run the same aggregation, but again, force shard 0 to be the merger even though it has no
- // chunks for the collection.
- session.startTransaction();
- assert.eq([{_id: -1}, {_id: 0}, {_id: 1}], sessionColl.aggregate(pipeline).toArray());
- assert.commandWorked(session.commitTransaction_forTesting());
+// Now run the same aggregation, but again, force shard 0 to be the merger even though it has no
+// chunks for the collection.
+session.startTransaction();
+assert.eq([{_id: -1}, {_id: 0}, {_id: 1}], sessionColl.aggregate(pipeline).toArray());
+assert.commandWorked(session.commitTransaction_forTesting());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/txn_being_applied_to_secondary_cannot_be_killed.js b/jstests/sharding/txn_being_applied_to_secondary_cannot_be_killed.js
index 3bc4cce7846..2e5751cc738 100644
--- a/jstests/sharding/txn_being_applied_to_secondary_cannot_be_killed.js
+++ b/jstests/sharding/txn_being_applied_to_secondary_cannot_be_killed.js
@@ -6,101 +6,104 @@
*/
(function() {
- 'use strict';
-
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- // Set to something high enough that a slow machine shouldn't cause our
- // transaction to be aborted before committing, but low enough that the test
- // won't be unnecessarily slow when we wait for the periodic transaction
- // abort job to run.
- TestData.transactionLifetimeLimitSeconds = 10;
-
- const rsOpts = {nodes: 3, settings: {chainingAllowed: false}};
- let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts, rs2: rsOpts}});
-
- const coordinator = st.shard0;
- const participant1 = st.shard1;
- const participant2 = st.shard2;
-
- // Create a sharded collection with a chunk on each shard:
- // shard0: [-inf, 0)
- // shard1: [0, 10)
- // shard2: [10, +inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: coordinator.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
-
- // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
- // from the shards starting, aborting, and restarting the transaction due to needing to
- // refresh after the transaction has started.
- assert.commandWorked(coordinator.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
-
- // Start a new session and start a transaction on that session.
- const session = st.s.startSession();
- session.startTransaction();
-
- // Insert a document onto each shard to make this a cross-shard transaction.
- assert.commandWorked(session.getDatabase(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}, {_id: 5}, {_id: 15}],
- }));
-
- // Set a failpoint to make oplog application hang on one secondary after applying the
- // operations in the transaction but before preparing the TransactionParticipant.
- const applyOpsHangBeforePreparingTransaction = "applyOpsHangBeforePreparingTransaction";
- const firstSecondary = st.rs0.getSecondary();
- assert.commandWorked(firstSecondary.adminCommand({
- configureFailPoint: applyOpsHangBeforePreparingTransaction,
- mode: "alwaysOn",
- }));
-
- // Commit the transaction, which will execute two-phase commit.
- assert.commandWorked(session.commitTransaction_forTesting());
-
- jsTest.log("Verify that the transaction was committed on all shards.");
- // Use assert.soon(), because although coordinateCommitTransaction currently blocks
- // until the commit process is fully complete, it will eventually be changed to only
- // block until the decision is *written*, at which point the test can pass the
- // operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in the
- // read to ensure the read sees the transaction's writes (TODO SERVER-37165).
- assert.soon(function() {
- return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
- });
-
- jsTest.log("Waiting for secondary to apply the prepare oplog entry.");
- waitForFailpoint("Hit " + applyOpsHangBeforePreparingTransaction + " failpoint", 1);
-
- // Wait for the periodic transaction abort job to run while oplog
- // application is hanging. The job should run every 10 seconds due to the
- // transactionLifetimeLimitSeconds parameter being set to 10 above, so the
- // likelihood of it running while sleeping 30 seconds is high. If it does
- // not run, the test will trivially pass without testing the desired
- // behavior, but it will not cause the test to fail.
- sleep(30000);
-
- jsTest.log("Turning off " + applyOpsHangBeforePreparingTransaction + " failpoint.");
- // Allow oplog application to continue by turning off the failpoint. The
- // transaction should prepare successfully and should not have been aborted
- // by the transaction abort job.
- assert.commandWorked(firstSecondary.adminCommand({
- configureFailPoint: applyOpsHangBeforePreparingTransaction,
- mode: "off",
- }));
-
- jsTest.log("Turned off " + applyOpsHangBeforePreparingTransaction + " failpoint.");
-
- st.stop();
+'use strict';
+
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+// Set to something high enough that a slow machine shouldn't cause our
+// transaction to be aborted before committing, but low enough that the test
+// won't be unnecessarily slow when we wait for the periodic transaction
+// abort job to run.
+TestData.transactionLifetimeLimitSeconds = 10;
+
+const rsOpts = {
+ nodes: 3,
+ settings: {chainingAllowed: false}
+};
+let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts, rs2: rsOpts}});
+
+const coordinator = st.shard0;
+const participant1 = st.shard1;
+const participant2 = st.shard2;
+
+// Create a sharded collection with a chunk on each shard:
+// shard0: [-inf, 0)
+// shard1: [0, 10)
+// shard2: [10, +inf)
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: coordinator.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
+
+// These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
+// from the shards starting, aborting, and restarting the transaction due to needing to
+// refresh after the transaction has started.
+assert.commandWorked(coordinator.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+
+// Start a new session and start a transaction on that session.
+const session = st.s.startSession();
+session.startTransaction();
+
+// Insert a document onto each shard to make this a cross-shard transaction.
+assert.commandWorked(session.getDatabase(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+}));
+
+// Set a failpoint to make oplog application hang on one secondary after applying the
+// operations in the transaction but before preparing the TransactionParticipant.
+const applyOpsHangBeforePreparingTransaction = "applyOpsHangBeforePreparingTransaction";
+const firstSecondary = st.rs0.getSecondary();
+assert.commandWorked(firstSecondary.adminCommand({
+ configureFailPoint: applyOpsHangBeforePreparingTransaction,
+ mode: "alwaysOn",
+}));
+
+// Commit the transaction, which will execute two-phase commit.
+assert.commandWorked(session.commitTransaction_forTesting());
+
+jsTest.log("Verify that the transaction was committed on all shards.");
+// Use assert.soon(), because although coordinateCommitTransaction currently blocks
+// until the commit process is fully complete, it will eventually be changed to only
+// block until the decision is *written*, at which point the test can pass the
+// operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in the
+// read to ensure the read sees the transaction's writes (TODO SERVER-37165).
+assert.soon(function() {
+ return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
+});
+
+jsTest.log("Waiting for secondary to apply the prepare oplog entry.");
+waitForFailpoint("Hit " + applyOpsHangBeforePreparingTransaction + " failpoint", 1);
+
+// Wait for the periodic transaction abort job to run while oplog
+// application is hanging. The job should run every 10 seconds due to the
+// transactionLifetimeLimitSeconds parameter being set to 10 above, so the
+// likelihood of it running while sleeping 30 seconds is high. If it does
+// not run, the test will trivially pass without testing the desired
+// behavior, but it will not cause the test to fail.
+sleep(30000);
+
+jsTest.log("Turning off " + applyOpsHangBeforePreparingTransaction + " failpoint.");
+// Allow oplog application to continue by turning off the failpoint. The
+// transaction should prepare successfully and should not have been aborted
+// by the transaction abort job.
+assert.commandWorked(firstSecondary.adminCommand({
+ configureFailPoint: applyOpsHangBeforePreparingTransaction,
+ mode: "off",
+}));
+
+jsTest.log("Turned off " + applyOpsHangBeforePreparingTransaction + " failpoint.");
+
+st.stop();
})();
diff --git a/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js b/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js
index ce643ed3ad1..fc6137f2ff7 100644
--- a/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js
+++ b/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js
@@ -10,355 +10,352 @@
*/
(function() {
- 'use strict';
-
- load("jstests/libs/write_concern_util.js");
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- // Waits for the given log to appear a number of times in the shell's rawMongoProgramOutput.
- // Loops because it is not guaranteed the program output will immediately contain all lines
- // logged at an earlier wall clock time.
- function waitForLog(logLine, times) {
- assert.soon(function() {
- const matches = rawMongoProgramOutput().match(new RegExp(logLine, "g")) || [];
- return matches.length === times;
- }, 'Failed to find "' + logLine + '" logged ' + times + ' times');
- }
-
- const addTxnFields = function(command, lsid, txnNumber, startTransaction) {
- let txnFields = {
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false,
- };
- if (startTransaction) {
- txnFields.startTransaction = true;
- }
- return Object.assign({}, command, txnFields);
- };
-
- const defaultCommitCommand = {
- commitTransaction: 1,
- writeConcern: {w: "majority", wtimeout: 6000}
- };
- const noop = () => {};
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- // TODO (SERVER-37364): Uncomment this line; otherwise, the coordinator will wait too long to
- // time out waiting for votes and the test will time out.
- // Lower the transaction timeout, since this test exercises cases where the coordinator should
- // time out collecting prepare votes.
- // TestData.transactionLifetimeLimitSeconds = 30;
-
- let st = new ShardingTest({
- shards: 3,
- // Create shards with more than one node because we test for writeConcern majority failing.
- config: 1,
- other: {
- mongosOptions: {verbose: 3},
- rs0: {nodes: [{}, {rsConfig: {priority: 0}}]},
- rs1: {nodes: [{}, {rsConfig: {priority: 0}}]},
- rs2: {nodes: [{}, {rsConfig: {priority: 0}}]},
- },
- });
-
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard1.shardName}));
-
- // Create a "dummy" collection for doing noop writes to advance shard's last applied OpTimes.
- assert.commandWorked(st.s.getDB(dbName).getCollection("dummy").insert({dummy: 1}));
-
- // The test uses three shards with one chunk each in order to control which shards are targeted
- // for each statement:
- //
- // (-inf, 0): shard key = txnNumber * -1
- // (0, MAX_TRANSACTIONS): shard key = txnNumber
- // (MAX_TRANSACTIONS, +inf): shard key = txnNumber + MAX_TRANSACTIONS
- //
- // So, if the test ever exceeds txnNumber transactions, statements that are meant to target the
- // middle chunk will instead target the highest chunk. To fix this, increase MAX_TRANSACTIONS.
- const MAX_TRANSACTIONS = 10000;
-
- // Create a sharded collection with a chunk on each shard:
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: MAX_TRANSACTIONS}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: -1}, to: st.shard0.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: MAX_TRANSACTIONS}, to: st.shard2.shardName}));
-
- // Insert something into each chunk so that a multi-update actually results in a write on each
- // shard (otherwise the shard may remain read-only). This also ensures all the routers and
- // shards have fresh routing table caches, so they do not need to be refreshed separately.
- assert.commandWorked(st.s.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -1 * MAX_TRANSACTIONS}, {_id: 0}, {_id: MAX_TRANSACTIONS}]
- }));
-
- let lsid = {id: UUID()};
- let txnNumber = 1;
-
- const readShard0 = txnNumber => {
- return {find: collName, filter: {_id: (-1 * txnNumber)}};
+'use strict';
+
+load("jstests/libs/write_concern_util.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+// Waits for the given log to appear a number of times in the shell's rawMongoProgramOutput.
+// Loops because it is not guaranteed the program output will immediately contain all lines
+// logged at an earlier wall clock time.
+function waitForLog(logLine, times) {
+ assert.soon(function() {
+ const matches = rawMongoProgramOutput().match(new RegExp(logLine, "g")) || [];
+ return matches.length === times;
+ }, 'Failed to find "' + logLine + '" logged ' + times + ' times');
+}
+
+const addTxnFields = function(command, lsid, txnNumber, startTransaction) {
+ let txnFields = {
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ autocommit: false,
};
-
- const readShard1 = txnNumber => {
- return {find: collName, filter: {_id: txnNumber}};
- };
-
- const readShard2 = txnNumber => {
- return {find: collName, filter: {_id: (MAX_TRANSACTIONS + txnNumber)}};
- };
-
- const readAllShards = () => {
- return {find: collName};
- };
-
- const writeShard0 = txnNumber => {
- return {
- update: collName,
- updates: [
- {q: {_id: (txnNumber * -1)}, u: {_id: (txnNumber * -1), updated: 1}, upsert: true}
- ],
- };
+ if (startTransaction) {
+ txnFields.startTransaction = true;
+ }
+ return Object.assign({}, command, txnFields);
+};
+
+const defaultCommitCommand = {
+ commitTransaction: 1,
+ writeConcern: {w: "majority", wtimeout: 6000}
+};
+const noop = () => {};
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+// TODO (SERVER-37364): Uncomment this line; otherwise, the coordinator will wait too long to
+// time out waiting for votes and the test will time out.
+// Lower the transaction timeout, since this test exercises cases where the coordinator should
+// time out collecting prepare votes.
+// TestData.transactionLifetimeLimitSeconds = 30;
+
+let st = new ShardingTest({
+ shards: 3,
+ // Create shards with more than one node because we test for writeConcern majority failing.
+ config: 1,
+ other: {
+ mongosOptions: {verbose: 3},
+ rs0: {nodes: [{}, {rsConfig: {priority: 0}}]},
+ rs1: {nodes: [{}, {rsConfig: {priority: 0}}]},
+ rs2: {nodes: [{}, {rsConfig: {priority: 0}}]},
+ },
+});
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard1.shardName}));
+
+// Create a "dummy" collection for doing noop writes to advance shard's last applied OpTimes.
+assert.commandWorked(st.s.getDB(dbName).getCollection("dummy").insert({dummy: 1}));
+
+// The test uses three shards with one chunk each in order to control which shards are targeted
+// for each statement:
+//
+// (-inf, 0): shard key = txnNumber * -1
+// (0, MAX_TRANSACTIONS): shard key = txnNumber
+// (MAX_TRANSACTIONS, +inf): shard key = txnNumber + MAX_TRANSACTIONS
+//
+// So, if the test ever exceeds txnNumber transactions, statements that are meant to target the
+// middle chunk will instead target the highest chunk. To fix this, increase MAX_TRANSACTIONS.
+const MAX_TRANSACTIONS = 10000;
+
+// Create a sharded collection with a chunk on each shard:
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: MAX_TRANSACTIONS}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: -1}, to: st.shard0.shardName}));
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: MAX_TRANSACTIONS}, to: st.shard2.shardName}));
+
+// Insert something into each chunk so that a multi-update actually results in a write on each
+// shard (otherwise the shard may remain read-only). This also ensures all the routers and
+// shards have fresh routing table caches, so they do not need to be refreshed separately.
+assert.commandWorked(st.s.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -1 * MAX_TRANSACTIONS}, {_id: 0}, {_id: MAX_TRANSACTIONS}]
+}));
+
+let lsid = {id: UUID()};
+let txnNumber = 1;
+
+const readShard0 = txnNumber => {
+ return {find: collName, filter: {_id: (-1 * txnNumber)}};
+};
+
+const readShard1 = txnNumber => {
+ return {find: collName, filter: {_id: txnNumber}};
+};
+
+const readShard2 = txnNumber => {
+ return {find: collName, filter: {_id: (MAX_TRANSACTIONS + txnNumber)}};
+};
+
+const readAllShards = () => {
+ return {find: collName};
+};
+
+const writeShard0 = txnNumber => {
+ return {
+ update: collName,
+ updates:
+ [{q: {_id: (txnNumber * -1)}, u: {_id: (txnNumber * -1), updated: 1}, upsert: true}],
};
+};
- const writeShard1 = txnNumber => {
- return {
- update: collName,
- updates: [{q: {_id: txnNumber}, u: {_id: txnNumber, updated: 1}, upsert: true}],
- };
+const writeShard1 = txnNumber => {
+ return {
+ update: collName,
+ updates: [{q: {_id: txnNumber}, u: {_id: txnNumber, updated: 1}, upsert: true}],
};
-
- const writeShard2 = txnNumber => {
- return {
- update: collName,
- updates: [{
- q: {_id: (txnNumber + MAX_TRANSACTIONS)},
- u: {_id: (txnNumber + MAX_TRANSACTIONS), updated: 1},
- upsert: true
- }],
- };
+};
+
+const writeShard2 = txnNumber => {
+ return {
+ update: collName,
+ updates: [{
+ q: {_id: (txnNumber + MAX_TRANSACTIONS)},
+ u: {_id: (txnNumber + MAX_TRANSACTIONS), updated: 1},
+ upsert: true
+ }],
};
+};
- const writeAllShards = () => {
- return {
- update: collName,
- updates: [{q: {}, u: {$inc: {updated: 1}}, multi: true}],
- };
+const writeAllShards = () => {
+ return {
+ update: collName,
+ updates: [{q: {}, u: {$inc: {updated: 1}}, multi: true}],
};
-
- // For each transaction type, contains the list of statements for that type.
- const transactionTypes = {
- readOnlySingleShardSingleStatementExpectSingleShardCommit: txnNumber => {
- return [readShard0(txnNumber)];
- },
- readOnlySingleShardMultiStatementExpectSingleShardCommit: txnNumber => {
- return [readShard0(txnNumber), readShard0(txnNumber)];
+};
+
+// For each transaction type, contains the list of statements for that type.
+const transactionTypes = {
+ readOnlySingleShardSingleStatementExpectSingleShardCommit: txnNumber => {
+ return [readShard0(txnNumber)];
+ },
+ readOnlySingleShardMultiStatementExpectSingleShardCommit: txnNumber => {
+ return [readShard0(txnNumber), readShard0(txnNumber)];
+ },
+ readOnlyMultiShardSingleStatementExpectReadOnlyCommit: txnNumber => {
+ return [readAllShards(txnNumber)];
+ },
+ readOnlyMultiShardMultiStatementExpectReadOnlyCommit: txnNumber => {
+ return [readShard0(txnNumber), readShard1(txnNumber), readShard2(txnNumber)];
+ },
+ writeSingleShardSingleStatementExpectSingleShardCommit: txnNumber => {
+ return [writeShard0(txnNumber)];
+ },
+ writeSingleShardMultiStatementExpectSingleShardCommit: txnNumber => {
+ return [writeShard0(txnNumber), writeShard0(txnNumber)];
+ },
+ writeMultiShardSingleStatementExpectTwoPhaseCommit: txnNumber => {
+ return [writeAllShards(txnNumber)];
+ },
+ writeMultiShardMultiStatementExpectTwoPhaseCommit: txnNumber => {
+ return [writeShard0(txnNumber), writeShard1(txnNumber), writeShard2(txnNumber)];
+ },
+ readWriteSingleShardExpectSingleShardCommit: txnNumber => {
+ return [readShard0(txnNumber), writeShard0(txnNumber)];
+ },
+ writeReadSingleShardExpectSingleShardCommit: txnNumber => {
+ return [writeShard0(txnNumber), readShard0(txnNumber)];
+ },
+ readOneShardWriteOtherShardExpectSingleWriteShardCommit: txnNumber => {
+ return [readShard0(txnNumber), writeShard1(txnNumber)];
+ },
+ writeOneShardReadOtherShardExpectSingleWriteShardCommit: txnNumber => {
+ return [writeShard0(txnNumber), readShard1(txnNumber)];
+ },
+ readOneShardWriteTwoOtherShardsExpectTwoPhaseCommit: txnNumber => {
+ return [readShard0(txnNumber), writeShard1(txnNumber), writeShard2(txnNumber)];
+ },
+ writeTwoShardsReadOneOtherShardExpectTwoPhaseCommit: txnNumber => {
+ return [writeShard0(txnNumber), writeShard1(txnNumber), readShard2(txnNumber)];
+ },
+};
+
+const failureModes = {
+ noFailures: {
+ beforeStatements: noop,
+ beforeCommit: noop,
+ getCommitCommand: (lsid, txnNumber) => {
+ return addTxnFields(defaultCommitCommand, lsid, txnNumber);
},
- readOnlyMultiShardSingleStatementExpectReadOnlyCommit: txnNumber => {
- return [readAllShards(txnNumber)];
+ checkCommitResult: (res) => {
+ // Commit should return ok without writeConcern error
+ assert.commandWorked(res);
+ assert.eq(null, res.errorLabels);
},
- readOnlyMultiShardMultiStatementExpectReadOnlyCommit: txnNumber => {
- return [readShard0(txnNumber), readShard1(txnNumber), readShard2(txnNumber)];
+ cleanUp: noop,
+ },
+ participantStepsDownBeforeClientSendsCommit: {
+ beforeStatements: noop,
+ beforeCommit: () => {
+ // Participant primary steps down.
+ assert.commandWorked(
+ st.shard0.adminCommand({replSetStepDown: 1 /* stepDownSecs */, force: true}));
},
- writeSingleShardSingleStatementExpectSingleShardCommit: txnNumber => {
- return [writeShard0(txnNumber)];
+ getCommitCommand: (lsid, txnNumber) => {
+ return addTxnFields(defaultCommitCommand, lsid, txnNumber);
},
- writeSingleShardMultiStatementExpectSingleShardCommit: txnNumber => {
- return [writeShard0(txnNumber), writeShard0(txnNumber)];
+ checkCommitResult: (res) => {
+ // Commit should return NoSuchTransaction.
+ assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
+ assert.eq(["TransientTransactionError"], res.errorLabels);
},
- writeMultiShardSingleStatementExpectTwoPhaseCommit: txnNumber => {
- return [writeAllShards(txnNumber)];
+ cleanUp: () => {
+ st.rs0.awaitNodesAgreeOnPrimary();
},
- writeMultiShardMultiStatementExpectTwoPhaseCommit: txnNumber => {
- return [writeShard0(txnNumber), writeShard1(txnNumber), writeShard2(txnNumber)];
+ },
+ participantCannotMajorityCommitWritesClientSendsWriteConcernMajority: {
+ beforeStatements: () => {
+ // Participant cannot majority commit writes.
+ stopServerReplication(st.rs0.getSecondaries());
+
+ // Do a write on rs0 through the router outside the transaction to ensure the
+ // transaction will choose a read time that has not been majority committed.
+ assert.commandWorked(st.s.getDB(dbName).getCollection("dummy").insert({dummy: 1}));
},
- readWriteSingleShardExpectSingleShardCommit: txnNumber => {
- return [readShard0(txnNumber), writeShard0(txnNumber)];
+ beforeCommit: noop,
+ getCommitCommand: (lsid, txnNumber) => {
+ return addTxnFields(defaultCommitCommand, lsid, txnNumber);
},
- writeReadSingleShardExpectSingleShardCommit: txnNumber => {
- return [writeShard0(txnNumber), readShard0(txnNumber)];
+ checkCommitResult: (res) => {
+ // Commit should return ok with a writeConcernError with wtimeout.
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ checkWriteConcernTimedOut(res);
+ assert.eq(null, res.errorLabels);
},
- readOneShardWriteOtherShardExpectSingleWriteShardCommit: txnNumber => {
- return [readShard0(txnNumber), writeShard1(txnNumber)];
+ cleanUp: () => {
+ restartServerReplication(st.rs0.getSecondaries());
},
- writeOneShardReadOtherShardExpectSingleWriteShardCommit: txnNumber => {
- return [writeShard0(txnNumber), readShard1(txnNumber)];
+ },
+ participantCannotMajorityCommitWritesClientSendsWriteConcern1: {
+ beforeStatements: () => {
+ // Participant cannot majority commit writes.
+ stopServerReplication(st.rs0.getSecondaries());
+
+ // Do a write on rs0 through the router outside the transaction to ensure the
+ // transaction will choose a read time that has not been majority committed.
+ assert.commandWorked(st.s.getDB(dbName).getCollection("dummy").insert({dummy: 1}));
},
- readOneShardWriteTwoOtherShardsExpectTwoPhaseCommit: txnNumber => {
- return [readShard0(txnNumber), writeShard1(txnNumber), writeShard2(txnNumber)];
+ beforeCommit: noop,
+ getCommitCommand: (lsid, txnNumber) => {
+ return addTxnFields({commitTransaction: 1, writeConcern: {w: 1}}, lsid, txnNumber);
},
- writeTwoShardsReadOneOtherShardExpectTwoPhaseCommit: txnNumber => {
- return [writeShard0(txnNumber), writeShard1(txnNumber), readShard2(txnNumber)];
- },
- };
-
- const failureModes = {
- noFailures: {
- beforeStatements: noop,
- beforeCommit: noop,
- getCommitCommand: (lsid, txnNumber) => {
- return addTxnFields(defaultCommitCommand, lsid, txnNumber);
- },
- checkCommitResult: (res) => {
- // Commit should return ok without writeConcern error
- assert.commandWorked(res);
- assert.eq(null, res.errorLabels);
- },
- cleanUp: noop,
+ checkCommitResult: (res) => {
+ // Commit should return ok without writeConcern error
+ assert.commandWorked(res);
+ assert.eq(null, res.errorLabels);
},
- participantStepsDownBeforeClientSendsCommit: {
- beforeStatements: noop,
- beforeCommit: () => {
- // Participant primary steps down.
- assert.commandWorked(
- st.shard0.adminCommand({replSetStepDown: 1 /* stepDownSecs */, force: true}));
- },
- getCommitCommand: (lsid, txnNumber) => {
- return addTxnFields(defaultCommitCommand, lsid, txnNumber);
- },
- checkCommitResult: (res) => {
- // Commit should return NoSuchTransaction.
- assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
- assert.eq(["TransientTransactionError"], res.errorLabels);
- },
- cleanUp: () => {
- st.rs0.awaitNodesAgreeOnPrimary();
- },
+ cleanUp: () => {
+ restartServerReplication(st.rs0.getSecondaries());
},
- participantCannotMajorityCommitWritesClientSendsWriteConcernMajority: {
- beforeStatements: () => {
- // Participant cannot majority commit writes.
- stopServerReplication(st.rs0.getSecondaries());
-
- // Do a write on rs0 through the router outside the transaction to ensure the
- // transaction will choose a read time that has not been majority committed.
- assert.commandWorked(st.s.getDB(dbName).getCollection("dummy").insert({dummy: 1}));
- },
- beforeCommit: noop,
- getCommitCommand: (lsid, txnNumber) => {
- return addTxnFields(defaultCommitCommand, lsid, txnNumber);
- },
- checkCommitResult: (res) => {
- // Commit should return ok with a writeConcernError with wtimeout.
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- checkWriteConcernTimedOut(res);
- assert.eq(null, res.errorLabels);
- },
- cleanUp: () => {
- restartServerReplication(st.rs0.getSecondaries());
- },
+ },
+ clientSendsInvalidWriteConcernOnCommit: {
+ beforeStatements: noop,
+ beforeCommit: noop,
+ getCommitCommand: (lsid, txnNumber) => {
+ // Client sends invalid writeConcern on commit.
+ return addTxnFields(
+ {commitTransaction: 1, writeConcern: {w: "invalid"}}, lsid, txnNumber);
},
- participantCannotMajorityCommitWritesClientSendsWriteConcern1: {
- beforeStatements: () => {
- // Participant cannot majority commit writes.
- stopServerReplication(st.rs0.getSecondaries());
-
- // Do a write on rs0 through the router outside the transaction to ensure the
- // transaction will choose a read time that has not been majority committed.
- assert.commandWorked(st.s.getDB(dbName).getCollection("dummy").insert({dummy: 1}));
- },
- beforeCommit: noop,
- getCommitCommand: (lsid, txnNumber) => {
- return addTxnFields({commitTransaction: 1, writeConcern: {w: 1}}, lsid, txnNumber);
- },
- checkCommitResult: (res) => {
- // Commit should return ok without writeConcern error
- assert.commandWorked(res);
- assert.eq(null, res.errorLabels);
- },
- cleanUp: () => {
- restartServerReplication(st.rs0.getSecondaries());
- },
+ checkCommitResult: (res) => {
+ // Commit should return ok with writeConcernError without wtimeout.
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assertWriteConcernError(res);
+ assert.eq(ErrorCodes.UnknownReplWriteConcern, res.writeConcernError.code);
+ assert.eq(null, res.writeConcernError.errInfo); // errInfo only set for wtimeout
+ assert.eq(null, res.errorLabels);
},
- clientSendsInvalidWriteConcernOnCommit: {
- beforeStatements: noop,
- beforeCommit: noop,
- getCommitCommand: (lsid, txnNumber) => {
- // Client sends invalid writeConcern on commit.
- return addTxnFields(
- {commitTransaction: 1, writeConcern: {w: "invalid"}}, lsid, txnNumber);
- },
- checkCommitResult: (res) => {
- // Commit should return ok with writeConcernError without wtimeout.
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assertWriteConcernError(res);
- assert.eq(ErrorCodes.UnknownReplWriteConcern, res.writeConcernError.code);
- assert.eq(null, res.writeConcernError.errInfo); // errInfo only set for wtimeout
- assert.eq(null, res.errorLabels);
- },
- cleanUp: noop,
- },
- };
+ cleanUp: noop,
+ },
+};
+
+for (const failureModeName in failureModes) {
+ for (const type in transactionTypes) {
+ // TODO (SERVER-37364): Unblacklist these test cases once the coordinator returns the
+ // decision as soon as the decision is made. At the moment, the coordinator makes an
+ // abort decision after timing out waiting for votes, but coordinateCommitTransaction
+ // hangs because it waits for the decision to be majority-ack'd by all participants,
+ // which can't happen while a participant can't majority commit writes.
+ if (failureModeName.includes("participantCannotMajorityCommitWrites") &&
+ type.includes("ExpectTwoPhaseCommit")) {
+ jsTest.log(
+ `${failureModeName} with ${type} is skipped until SERVER-37364 is implemented`);
+ continue;
+ }
- for (const failureModeName in failureModes) {
- for (const type in transactionTypes) {
- // TODO (SERVER-37364): Unblacklist these test cases once the coordinator returns the
- // decision as soon as the decision is made. At the moment, the coordinator makes an
- // abort decision after timing out waiting for votes, but coordinateCommitTransaction
- // hangs because it waits for the decision to be majority-ack'd by all participants,
- // which can't happen while a participant can't majority commit writes.
- if (failureModeName.includes("participantCannotMajorityCommitWrites") &&
- type.includes("ExpectTwoPhaseCommit")) {
- jsTest.log(
- `${failureModeName} with ${type} is skipped until SERVER-37364 is implemented`);
- continue;
- }
-
- txnNumber++;
- assert.lt(txnNumber,
- MAX_TRANSACTIONS,
- "Test exceeded maximum number of transactions allowable by the test's chunk" +
- " distribution created during the test setup. Please increase" +
- " MAX_TRANSACTIONS in the test.");
-
- jsTest.log(`Testing ${failureModeName} with ${type} at txnNumber ${txnNumber}`);
-
- const failureMode = failureModes[failureModeName];
-
- // Run the statements.
- failureMode.beforeStatements();
- let startTransaction = true;
- transactionTypes[type](txnNumber).forEach(command => {
- assert.commandWorked(st.s.getDB(dbName).runCommand(
- addTxnFields(command, lsid, txnNumber, startTransaction)));
- startTransaction = false;
- });
-
- // Run commit.
- const commitCmd = failureMode.getCommitCommand(lsid, txnNumber);
- failureMode.beforeCommit();
- const commitRes = st.s.adminCommand(commitCmd);
- failureMode.checkCommitResult(commitRes);
-
- // Re-running commit should return the same response.
- const commitRetryRes = st.s.adminCommand(commitCmd);
- failureMode.checkCommitResult(commitRetryRes);
-
- if (type.includes("ExpectSingleShardCommit")) {
- waitForLog("Committing single-shard transaction", 2);
- } else if (type.includes("ExpectReadOnlyCommit")) {
- waitForLog("Committing read-only transaction", 2);
- } else if (type.includes("ExpectSingleWriteShardCommit")) {
- waitForLog("Committing single-write-shard transaction", 2);
- } else if (type.includes("ExpectTwoPhaseCommit")) {
- waitForLog("Committing using two-phase commit", 2);
- } else {
- assert(false, `Unknown transaction type: ${type}`);
- }
-
- clearRawMongoProgramOutput();
-
- failureMode.cleanUp();
+ txnNumber++;
+ assert.lt(txnNumber,
+ MAX_TRANSACTIONS,
+ "Test exceeded maximum number of transactions allowable by the test's chunk" +
+ " distribution created during the test setup. Please increase" +
+ " MAX_TRANSACTIONS in the test.");
+
+ jsTest.log(`Testing ${failureModeName} with ${type} at txnNumber ${txnNumber}`);
+
+ const failureMode = failureModes[failureModeName];
+
+ // Run the statements.
+ failureMode.beforeStatements();
+ let startTransaction = true;
+ transactionTypes[type](txnNumber).forEach(command => {
+ assert.commandWorked(st.s.getDB(dbName).runCommand(
+ addTxnFields(command, lsid, txnNumber, startTransaction)));
+ startTransaction = false;
+ });
+
+ // Run commit.
+ const commitCmd = failureMode.getCommitCommand(lsid, txnNumber);
+ failureMode.beforeCommit();
+ const commitRes = st.s.adminCommand(commitCmd);
+ failureMode.checkCommitResult(commitRes);
+
+ // Re-running commit should return the same response.
+ const commitRetryRes = st.s.adminCommand(commitCmd);
+ failureMode.checkCommitResult(commitRetryRes);
+
+ if (type.includes("ExpectSingleShardCommit")) {
+ waitForLog("Committing single-shard transaction", 2);
+ } else if (type.includes("ExpectReadOnlyCommit")) {
+ waitForLog("Committing read-only transaction", 2);
+ } else if (type.includes("ExpectSingleWriteShardCommit")) {
+ waitForLog("Committing single-write-shard transaction", 2);
+ } else if (type.includes("ExpectTwoPhaseCommit")) {
+ waitForLog("Committing using two-phase commit", 2);
+ } else {
+ assert(false, `Unknown transaction type: ${type}`);
}
- }
- st.stop();
+ clearRawMongoProgramOutput();
+
+ failureMode.cleanUp();
+ }
+}
+st.stop();
})();
diff --git a/jstests/sharding/txn_recover_decision_using_recovery_router.js b/jstests/sharding/txn_recover_decision_using_recovery_router.js
index 0c5d1cf204c..d148c0fdfbf 100644
--- a/jstests/sharding/txn_recover_decision_using_recovery_router.js
+++ b/jstests/sharding/txn_recover_decision_using_recovery_router.js
@@ -9,559 +9,550 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
-
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
- load("jstests/libs/write_concern_util.js");
-
- // The test modifies config.transactions, which must be done outside of a session.
- TestData.disableImplicitSessions = true;
-
- // Reducing this from the resmoke default, which is several hours, so that tests that rely on a
- // transaction coordinator being canceled after a timeout happen in a reasonable amount of time.
- TestData.transactionLifetimeLimitSeconds = 15;
-
- const readFromShard0 = function({lsid, txnNumber, startTransaction}) {
- let findDocumentOnShard0Command = {
- find: 'user',
- filter: {x: -1},
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- };
-
- if (startTransaction) {
- findDocumentOnShard0Command.startTransaction = true;
- }
-
- let res = assert.commandWorked(testDB.runCommand(findDocumentOnShard0Command));
- assert.neq(null, res.recoveryToken);
- return res.recoveryToken;
+"use strict";
+
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/libs/write_concern_util.js");
+
+// The test modifies config.transactions, which must be done outside of a session.
+TestData.disableImplicitSessions = true;
+
+// Reducing this from the resmoke default, which is several hours, so that tests that rely on a
+// transaction coordinator being canceled after a timeout happen in a reasonable amount of time.
+TestData.transactionLifetimeLimitSeconds = 15;
+
+const readFromShard0 = function({lsid, txnNumber, startTransaction}) {
+ let findDocumentOnShard0Command = {
+ find: 'user',
+ filter: {x: -1},
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
};
- const readFromShard1 = function({lsid, txnNumber, startTransaction}) {
- let findDocumentOnShard1Command = {
- find: 'user',
- filter: {x: 1},
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- };
-
- if (startTransaction) {
- findDocumentOnShard1Command.startTransaction = true;
- }
-
- let res = assert.commandWorked(testDB.runCommand(findDocumentOnShard1Command));
- assert.neq(null, res.recoveryToken);
- return res.recoveryToken;
+ if (startTransaction) {
+ findDocumentOnShard0Command.startTransaction = true;
+ }
+
+ let res = assert.commandWorked(testDB.runCommand(findDocumentOnShard0Command));
+ assert.neq(null, res.recoveryToken);
+ return res.recoveryToken;
+};
+
+const readFromShard1 = function({lsid, txnNumber, startTransaction}) {
+ let findDocumentOnShard1Command = {
+ find: 'user',
+ filter: {x: 1},
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
};
- const writeToShard0 = function({lsid, txnNumber, startTransaction}) {
- const updateDocumentOnShard0 = {
- q: {x: -1},
- u: {"$set": {lastTxnNumber: txnNumber}},
- upsert: true
- };
-
- let updateDocumentOnShard0Command = {
- update: 'user',
- updates: [updateDocumentOnShard0],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- };
-
- if (startTransaction) {
- updateDocumentOnShard0Command.startTransaction = true;
- }
-
- let res = assert.commandWorked(testDB.runCommand(updateDocumentOnShard0Command));
- assert.neq(null, res.recoveryToken);
- return res.recoveryToken;
+ if (startTransaction) {
+ findDocumentOnShard1Command.startTransaction = true;
+ }
+
+ let res = assert.commandWorked(testDB.runCommand(findDocumentOnShard1Command));
+ assert.neq(null, res.recoveryToken);
+ return res.recoveryToken;
+};
+
+const writeToShard0 = function({lsid, txnNumber, startTransaction}) {
+ const updateDocumentOnShard0 = {
+ q: {x: -1},
+ u: {"$set": {lastTxnNumber: txnNumber}},
+ upsert: true
};
- const writeToShard1 = function({lsid, txnNumber, startTransaction}) {
- const updateDocumentOnShard1 = {
- q: {x: 1},
- u: {"$set": {lastTxnNumber: txnNumber}},
- upsert: true
- };
-
- let updateDocumentOnShard1Command = {
- update: 'user',
- updates: [updateDocumentOnShard1],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- };
-
- if (startTransaction) {
- updateDocumentOnShard1Command.startTransaction = true;
- }
-
- let res = assert.commandWorked(testDB.runCommand(updateDocumentOnShard1Command));
- assert.neq(null, res.recoveryToken);
- return res.recoveryToken;
+ let updateDocumentOnShard0Command = {
+ update: 'user',
+ updates: [updateDocumentOnShard0],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
};
- const startNewSingleShardReadOnlyTransaction = function() {
- const recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
- assert.eq(null, recoveryToken.recoveryShardId);
- return recoveryToken;
+ if (startTransaction) {
+ updateDocumentOnShard0Command.startTransaction = true;
+ }
+
+ let res = assert.commandWorked(testDB.runCommand(updateDocumentOnShard0Command));
+ assert.neq(null, res.recoveryToken);
+ return res.recoveryToken;
+};
+
+const writeToShard1 = function({lsid, txnNumber, startTransaction}) {
+ const updateDocumentOnShard1 = {
+ q: {x: 1},
+ u: {"$set": {lastTxnNumber: txnNumber}},
+ upsert: true
};
- const startNewSingleShardWriteTransaction = function() {
- const recoveryToken = writeToShard0({lsid, txnNumber, startTransaction: true});
- assert.neq(null, recoveryToken.recoveryShardId);
- return recoveryToken;
+ let updateDocumentOnShard1Command = {
+ update: 'user',
+ updates: [updateDocumentOnShard1],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
};
- const startNewMultiShardReadOnlyTransaction = function() {
- let recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
- assert.eq(null, recoveryToken.recoveryShardId);
+ if (startTransaction) {
+ updateDocumentOnShard1Command.startTransaction = true;
+ }
+
+ let res = assert.commandWorked(testDB.runCommand(updateDocumentOnShard1Command));
+ assert.neq(null, res.recoveryToken);
+ return res.recoveryToken;
+};
+
+const startNewSingleShardReadOnlyTransaction = function() {
+ const recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
+ assert.eq(null, recoveryToken.recoveryShardId);
+ return recoveryToken;
+};
+
+const startNewSingleShardWriteTransaction = function() {
+ const recoveryToken = writeToShard0({lsid, txnNumber, startTransaction: true});
+ assert.neq(null, recoveryToken.recoveryShardId);
+ return recoveryToken;
+};
+
+const startNewMultiShardReadOnlyTransaction = function() {
+ let recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
+ assert.eq(null, recoveryToken.recoveryShardId);
+
+ recoveryToken = readFromShard1({lsid, txnNumber});
+ assert.eq(null, recoveryToken.recoveryShardId);
+
+ return recoveryToken;
+};
+
+const startNewSingleWriteShardTransaction = function() {
+ let recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
+ assert.eq(null, recoveryToken.recoveryShardId);
+
+ recoveryToken = writeToShard1({lsid, txnNumber});
+ assert.neq(null, recoveryToken.recoveryShardId);
+
+ return recoveryToken;
+};
+
+const startNewMultiShardWriteTransaction = function() {
+ let recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
+ assert.eq(null, recoveryToken.recoveryShardId);
+
+ // Write to shard 1, not shard 0, otherwise the recovery shard will still be the same as the
+ // coordinator shard.
+ recoveryToken = writeToShard1({lsid, txnNumber});
+ assert.neq(null, recoveryToken.recoveryShardId);
+
+ recoveryToken = writeToShard0({lsid, txnNumber});
+ assert.neq(null, recoveryToken.recoveryShardId);
+
+ return recoveryToken;
+};
+
+const abortTransactionOnShardDirectly = function(shardPrimaryConn, lsid, txnNumber) {
+ assert.commandWorked(shardPrimaryConn.adminCommand(
+ {abortTransaction: 1, lsid: lsid, txnNumber: NumberLong(txnNumber), autocommit: false}));
+};
+
+const sendCommitViaOriginalMongos = function(lsid, txnNumber, recoveryToken) {
+ return st.s0.getDB('admin').runCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ recoveryToken: recoveryToken
+ });
+};
+
+const sendCommitViaRecoveryMongos = function(lsid, txnNumber, recoveryToken, writeConcern) {
+ writeConcern = writeConcern || {};
+ return st.s1.getDB('admin').runCommand(Object.merge({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ recoveryToken: recoveryToken
+ },
+ writeConcern));
+};
+
+let st =
+ new ShardingTest({shards: 2, rs: {nodes: 2}, mongos: 2, other: {mongosOptions: {verbose: 3}}});
+
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.name);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: 'test.user', middle: {x: 0}}));
+assert.commandWorked(
+ st.s0.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.name}));
+
+// Insert documents to prime mongos and shards with the latest sharding metadata.
+let testDB = st.s0.getDB('test');
+assert.commandWorked(testDB.runCommand({insert: 'user', documents: [{x: -10}, {x: 10}]}));
+
+const lsid = {
+ id: UUID()
+};
+let txnNumber = 0;
+
+//
+// Generic test cases that are agnostic as to the transaction type
+//
- recoveryToken = readFromShard1({lsid, txnNumber});
- assert.eq(null, recoveryToken.recoveryShardId);
+(function() {
+jsTest.log("Testing recovering transaction with lower number than latest");
+++txnNumber;
- return recoveryToken;
- };
+const oldTxnNumber = txnNumber;
+const oldRecoveryToken = startNewMultiShardWriteTransaction();
- const startNewSingleWriteShardTransaction = function() {
- let recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
- assert.eq(null, recoveryToken.recoveryShardId);
+txnNumber++;
+const newRecoveryToken = startNewMultiShardWriteTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, newRecoveryToken));
- recoveryToken = writeToShard1({lsid, txnNumber});
- assert.neq(null, recoveryToken.recoveryShardId);
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, oldTxnNumber, oldRecoveryToken),
+ ErrorCodes.TransactionTooOld);
- return recoveryToken;
- };
+// The client can still the recover decision for current transaction number.
+assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, newRecoveryToken));
+})();
- const startNewMultiShardWriteTransaction = function() {
- let recoveryToken = readFromShard0({lsid, txnNumber, startTransaction: true});
- assert.eq(null, recoveryToken.recoveryShardId);
+(function() {
+jsTest.log("Testing recovering transaction with higher number than latest");
+txnNumber++;
- // Write to shard 1, not shard 0, otherwise the recovery shard will still be the same as the
- // coordinator shard.
- recoveryToken = writeToShard1({lsid, txnNumber});
- assert.neq(null, recoveryToken.recoveryShardId);
+const oldTxnNumber = txnNumber;
+const oldRecoveryToken = startNewMultiShardWriteTransaction();
- recoveryToken = writeToShard0({lsid, txnNumber});
- assert.neq(null, recoveryToken.recoveryShardId);
+txnNumber++;
+const fakeRecoveryToken = {
+ recoveryShardId: st.shard0.shardName
+};
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, fakeRecoveryToken),
+ ErrorCodes.NoSuchTransaction);
- return recoveryToken;
- };
+// The active transaction can still be committed.
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, oldTxnNumber, oldRecoveryToken));
+})();
- const abortTransactionOnShardDirectly = function(shardPrimaryConn, lsid, txnNumber) {
- assert.commandWorked(shardPrimaryConn.adminCommand({
- abortTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }));
- };
+(function() {
+jsTest.log("Testing recovering transaction whose recovery shard forgot the transaction");
+txnNumber++;
- const sendCommitViaOriginalMongos = function(lsid, txnNumber, recoveryToken) {
- return st.s0.getDB('admin').runCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- recoveryToken: recoveryToken
- });
- };
+const recoveryToken = startNewMultiShardWriteTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- const sendCommitViaRecoveryMongos = function(lsid, txnNumber, recoveryToken, writeConcern) {
- writeConcern = writeConcern || {};
- return st.s1.getDB('admin').runCommand(Object.merge({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- recoveryToken: recoveryToken
- },
- writeConcern));
- };
+assert.writeOK(st.rs1.getPrimary().getDB("config").transactions.remove({}, false /* justOne */));
+
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+})();
+
+(function() {
+jsTest.log("Testing that a recovery node does a noop write before returning 'aborted'");
+
+txnNumber++;
+
+const recoveryToken = startNewMultiShardWriteTransaction();
+abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
+assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+
+const recoveryShardReplSetTest = st.rs1;
+
+stopReplicationOnSecondaries(recoveryShardReplSetTest);
+
+// Do a write on the recovery node to bump the recovery node's system last OpTime.
+recoveryShardReplSetTest.getPrimary().getDB("dummy").getCollection("dummy").insert({dummy: 1});
+
+// While the recovery shard primary cannot majority commit writes, commitTransaction returns
+// NoSuchTransaction with a writeConcern error.
+let res = sendCommitViaRecoveryMongos(
+ lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority", wtimeout: 500}});
+assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
+checkWriteConcernTimedOut(res);
+
+// Once the recovery shard primary can majority commit writes again, commitTransaction
+// returns NoSuchTransaction without a writeConcern error.
+restartReplicationOnSecondaries(recoveryShardReplSetTest);
+res = sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority"}});
+assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
+assert.eq(null, res.writeConcernError);
+})();
+
+(function() {
+jsTest.log("Testing that a recovery node does a noop write before returning 'committed'");
+
+txnNumber++;
+
+const recoveryToken = startNewMultiShardWriteTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- let st = new ShardingTest(
- {shards: 2, rs: {nodes: 2}, mongos: 2, other: {mongosOptions: {verbose: 3}}});
+const recoveryShardReplSetTest = st.rs1;
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.name);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: 'test.user', middle: {x: 0}}));
- assert.commandWorked(
- st.s0.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.name}));
+stopReplicationOnSecondaries(recoveryShardReplSetTest);
- // Insert documents to prime mongos and shards with the latest sharding metadata.
- let testDB = st.s0.getDB('test');
- assert.commandWorked(testDB.runCommand({insert: 'user', documents: [{x: -10}, {x: 10}]}));
+// Do a write on the recovery node to bump the recovery node's system last OpTime.
+recoveryShardReplSetTest.getPrimary().getDB("dummy").getCollection("dummy").insert({dummy: 1});
- const lsid = {id: UUID()};
- let txnNumber = 0;
+// While the recovery shard primary cannot majority commit writes, commitTransaction returns
+// ok with a writeConcern error.
+let res = sendCommitViaRecoveryMongos(
+ lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority", wtimeout: 500}});
+assert.commandWorkedIgnoringWriteConcernErrors(res);
+checkWriteConcernTimedOut(res);
- //
- // Generic test cases that are agnostic as to the transaction type
- //
+// Once the recovery shard primary can majority commit writes again, commitTransaction
+// returns ok without a writeConcern error.
+restartReplicationOnSecondaries(recoveryShardReplSetTest);
+assert.commandWorked(
+ sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority"}}));
+})();
- (function() {
- jsTest.log("Testing recovering transaction with lower number than latest");
- ++txnNumber;
+//
+// Single-shard read-only transactions
+//
- const oldTxnNumber = txnNumber;
- const oldRecoveryToken = startNewMultiShardWriteTransaction();
+(function() {
+jsTest.log("Testing recovering single-shard read-only transaction that is in progress");
+txnNumber++;
+const recoveryToken = startNewSingleShardReadOnlyTransaction();
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
- txnNumber++;
- const newRecoveryToken = startNewMultiShardWriteTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, newRecoveryToken));
+// A read-only transaction can still commit after reporting an abort decision.
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
+}());
- assert.commandFailedWithCode(
- sendCommitViaRecoveryMongos(lsid, oldTxnNumber, oldRecoveryToken),
- ErrorCodes.TransactionTooOld);
+(function() {
+jsTest.log("Testing recovering single-shard read-only transaction that aborted");
+txnNumber++;
+const recoveryToken = startNewSingleShardReadOnlyTransaction();
+abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+}());
- // The client can still the recover decision for current transaction number.
- assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, newRecoveryToken));
- })();
+(function() {
+jsTest.log("Testing recovering single-shard read-only transaction that committed");
+txnNumber++;
+const recoveryToken = startNewSingleShardReadOnlyTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+}());
+
+//
+// Single-shard write transactions
+//
- (function() {
- jsTest.log("Testing recovering transaction with higher number than latest");
- txnNumber++;
+(function() {
+jsTest.log("Testing recovering single-shard write transaction that in progress");
+txnNumber++;
+const recoveryToken = startNewSingleShardWriteTransaction();
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
- const oldTxnNumber = txnNumber;
- const oldRecoveryToken = startNewMultiShardWriteTransaction();
+// A write transaction fails to commit after having reported an abort decision.
+assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+}());
- txnNumber++;
- const fakeRecoveryToken = {recoveryShardId: st.shard0.shardName};
- assert.commandFailedWithCode(
- sendCommitViaRecoveryMongos(lsid, txnNumber, fakeRecoveryToken),
- ErrorCodes.NoSuchTransaction);
+(function() {
+jsTest.log("Testing recovering single-shard write transaction that aborted");
+txnNumber++;
+const recoveryToken = startNewSingleShardWriteTransaction();
+abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+}());
+
+(function() {
+jsTest.log("Testing recovering single-shard write transaction that committed");
+txnNumber++;
+const recoveryToken = startNewSingleShardWriteTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
+assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
+}());
+
+//
+// Multi-shard read-only transactions
+//
+
+(function() {
+jsTest.log("Testing recovering multi-shard read-only transaction that is in progress");
+txnNumber++;
+const recoveryToken = startNewMultiShardReadOnlyTransaction();
+
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+
+// A read-only transaction can still commit after reporting an abort decision.
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
+})();
+
+(function() {
+jsTest.log("Testing recovering multi-shard read-only transaction that aborted");
+txnNumber++;
+const recoveryToken = startNewMultiShardReadOnlyTransaction();
+abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+})();
+
+(function() {
+jsTest.log("Testing recovering multi-shard read-only transaction that committed");
+txnNumber++;
+const recoveryToken = startNewMultiShardReadOnlyTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+})();
+
+//
+// Single-write-shard transactions (there are multiple participants but only one did a write)
+//
+
+(function() {
+jsTest.log("Testing recovering single-write-shard transaction that is in progress");
+txnNumber++;
+const recoveryToken = startNewSingleWriteShardTransaction();
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+
+// A write transaction fails to commit after having reported an abort decision.
+assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+}());
+
+(function() {
+jsTest.log("Testing recovering single-write-shard transaction that aborted on read-only shard" +
+ " but is in progress on write shard");
+txnNumber++;
+const recoveryToken = startNewSingleWriteShardTransaction();
+abortTransactionOnShardDirectly(st.rs1.getPrimary(), lsid, txnNumber);
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+}());
+
+(function() {
+jsTest.log("Testing recovering single-write-shard transaction that aborted on write" +
+ " shard but is in progress on read-only shard");
+txnNumber++;
+const recoveryToken = startNewSingleWriteShardTransaction();
+abortTransactionOnShardDirectly(st.rs1.getPrimary(), lsid, txnNumber);
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+}());
+
+(function() {
+jsTest.log("Testing recovering single-write-shard transaction that committed");
+txnNumber++;
+const recoveryToken = startNewSingleWriteShardTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
+assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
+}());
+
+//
+// Multi-write-shard transactions (there are multiple participants and more than one did writes)
+//
+
+(function() {
+jsTest.log("Testing recovering multi-write-shard transaction that is in progress");
+txnNumber++;
+
+// Set the transaction expiry to be very high, so we can ascertain the recovery request
+// through the alternate router is what causes the transaction to abort.
+const getParamRes =
+ st.rs1.getPrimary().adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1});
+assert.commandWorked(getParamRes);
+assert.neq(null, getParamRes.transactionLifetimeLimitSeconds);
+const originalTransactionLifetimeLimitSeconds = getParamRes.transactionLifetimeLimitSeconds;
+
+assert.commandWorked(st.rs1.getPrimary().adminCommand(
+ {setParameter: 1, transactionLifetimeLimitSeconds: 60 * 60 * 1000 /* 1000 hours */}));
+
+const recoveryToken = startNewMultiShardWriteTransaction();
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+
+// A write transaction fails to commit after having reported an abort decision.
+assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+
+assert.commandWorked(st.rs1.getPrimary().adminCommand(
+ {setParameter: 1, transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds}));
+})();
+
+(function() {
+jsTest.log("Testing recovering multi-write-shard transaction that is in prepare");
+txnNumber++;
+const recoveryToken = startNewMultiShardWriteTransaction();
+
+// Ensure the coordinator will hang after putting the participants into prepare but
+// before sending the decision to the participants.
+clearRawMongoProgramOutput();
+assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "hangBeforeWritingDecision", mode: "alwaysOn"}));
+
+assert.commandFailedWithCode(st.s0.adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ // Specify maxTimeMS to make the command return so the test can continue.
+ maxTimeMS: 3000,
+}),
+ ErrorCodes.MaxTimeMSExpired);
+
+waitForFailpoint("Hit hangBeforeWritingDecision failpoint", 1);
+
+// Trying to recover the decision should block because the recovery shard's participant
+// is in prepare.
+assert.commandFailedWithCode(st.s1.adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ recoveryToken: recoveryToken,
+ // Specify maxTimeMS to make the command return so the test can continue.
+ maxTimeMS: 3000,
+}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Allow the transaction to complete.
+assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "hangBeforeWritingDecision", mode: "off"}));
+
+// Trying to recover the decision should now return that the transaction committed.
+assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
+})();
+
+(function() {
+jsTest.log("Testing recovering multi-write-shard transaction after coordinator finished" +
+ " coordinating an abort decision.");
+txnNumber++;
+
+const recoveryToken = startNewMultiShardWriteTransaction();
+abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
+assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
+ ErrorCodes.NoSuchTransaction);
+})();
+
+(function() {
+jsTest.log("Testing recovering multi-write-shard transaction after coordinator finished" +
+ " coordinating a commit decision.");
+txnNumber++;
+
+const recoveryToken = startNewMultiShardWriteTransaction();
+assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
+assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
+})();
- // The active transaction can still be committed.
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, oldTxnNumber, oldRecoveryToken));
- })();
-
- (function() {
- jsTest.log("Testing recovering transaction whose recovery shard forgot the transaction");
- txnNumber++;
-
- const recoveryToken = startNewMultiShardWriteTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
-
- assert.writeOK(
- st.rs1.getPrimary().getDB("config").transactions.remove({}, false /* justOne */));
-
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- })();
-
- (function() {
- jsTest.log("Testing that a recovery node does a noop write before returning 'aborted'");
-
- txnNumber++;
-
- const recoveryToken = startNewMultiShardWriteTransaction();
- abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
- assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
-
- const recoveryShardReplSetTest = st.rs1;
-
- stopReplicationOnSecondaries(recoveryShardReplSetTest);
-
- // Do a write on the recovery node to bump the recovery node's system last OpTime.
- recoveryShardReplSetTest.getPrimary().getDB("dummy").getCollection("dummy").insert(
- {dummy: 1});
-
- // While the recovery shard primary cannot majority commit writes, commitTransaction returns
- // NoSuchTransaction with a writeConcern error.
- let res = sendCommitViaRecoveryMongos(
- lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority", wtimeout: 500}});
- assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
- checkWriteConcernTimedOut(res);
-
- // Once the recovery shard primary can majority commit writes again, commitTransaction
- // returns NoSuchTransaction without a writeConcern error.
- restartReplicationOnSecondaries(recoveryShardReplSetTest);
- res = sendCommitViaRecoveryMongos(
- lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority"}});
- assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
- assert.eq(null, res.writeConcernError);
- })();
-
- (function() {
- jsTest.log("Testing that a recovery node does a noop write before returning 'committed'");
-
- txnNumber++;
-
- const recoveryToken = startNewMultiShardWriteTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
-
- const recoveryShardReplSetTest = st.rs1;
-
- stopReplicationOnSecondaries(recoveryShardReplSetTest);
-
- // Do a write on the recovery node to bump the recovery node's system last OpTime.
- recoveryShardReplSetTest.getPrimary().getDB("dummy").getCollection("dummy").insert(
- {dummy: 1});
-
- // While the recovery shard primary cannot majority commit writes, commitTransaction returns
- // ok with a writeConcern error.
- let res = sendCommitViaRecoveryMongos(
- lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority", wtimeout: 500}});
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- checkWriteConcernTimedOut(res);
-
- // Once the recovery shard primary can majority commit writes again, commitTransaction
- // returns ok without a writeConcern error.
- restartReplicationOnSecondaries(recoveryShardReplSetTest);
- assert.commandWorked(sendCommitViaRecoveryMongos(
- lsid, txnNumber, recoveryToken, {writeConcern: {w: "majority"}}));
- })();
-
- //
- // Single-shard read-only transactions
- //
-
- (function() {
- jsTest.log("Testing recovering single-shard read-only transaction that is in progress");
- txnNumber++;
- const recoveryToken = startNewSingleShardReadOnlyTransaction();
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
-
- // A read-only transaction can still commit after reporting an abort decision.
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- }());
-
- (function() {
- jsTest.log("Testing recovering single-shard read-only transaction that aborted");
- txnNumber++;
- const recoveryToken = startNewSingleShardReadOnlyTransaction();
- abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- }());
-
- (function() {
- jsTest.log("Testing recovering single-shard read-only transaction that committed");
- txnNumber++;
- const recoveryToken = startNewSingleShardReadOnlyTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- }());
-
- //
- // Single-shard write transactions
- //
-
- (function() {
- jsTest.log("Testing recovering single-shard write transaction that in progress");
- txnNumber++;
- const recoveryToken = startNewSingleShardWriteTransaction();
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
-
- // A write transaction fails to commit after having reported an abort decision.
- assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- }());
-
- (function() {
- jsTest.log("Testing recovering single-shard write transaction that aborted");
- txnNumber++;
- const recoveryToken = startNewSingleShardWriteTransaction();
- abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- }());
-
- (function() {
- jsTest.log("Testing recovering single-shard write transaction that committed");
- txnNumber++;
- const recoveryToken = startNewSingleShardWriteTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
- }());
-
- //
- // Multi-shard read-only transactions
- //
-
- (function() {
- jsTest.log("Testing recovering multi-shard read-only transaction that is in progress");
- txnNumber++;
- const recoveryToken = startNewMultiShardReadOnlyTransaction();
-
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
-
- // A read-only transaction can still commit after reporting an abort decision.
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- })();
-
- (function() {
- jsTest.log("Testing recovering multi-shard read-only transaction that aborted");
- txnNumber++;
- const recoveryToken = startNewMultiShardReadOnlyTransaction();
- abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- })();
-
- (function() {
- jsTest.log("Testing recovering multi-shard read-only transaction that committed");
- txnNumber++;
- const recoveryToken = startNewMultiShardReadOnlyTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- })();
-
- //
- // Single-write-shard transactions (there are multiple participants but only one did a write)
- //
-
- (function() {
- jsTest.log("Testing recovering single-write-shard transaction that is in progress");
- txnNumber++;
- const recoveryToken = startNewSingleWriteShardTransaction();
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
-
- // A write transaction fails to commit after having reported an abort decision.
- assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- }());
-
- (function() {
- jsTest.log(
- "Testing recovering single-write-shard transaction that aborted on read-only shard" +
- " but is in progress on write shard");
- txnNumber++;
- const recoveryToken = startNewSingleWriteShardTransaction();
- abortTransactionOnShardDirectly(st.rs1.getPrimary(), lsid, txnNumber);
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- }());
-
- (function() {
- jsTest.log("Testing recovering single-write-shard transaction that aborted on write" +
- " shard but is in progress on read-only shard");
- txnNumber++;
- const recoveryToken = startNewSingleWriteShardTransaction();
- abortTransactionOnShardDirectly(st.rs1.getPrimary(), lsid, txnNumber);
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- }());
-
- (function() {
- jsTest.log("Testing recovering single-write-shard transaction that committed");
- txnNumber++;
- const recoveryToken = startNewSingleWriteShardTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
- }());
-
- //
- // Multi-write-shard transactions (there are multiple participants and more than one did writes)
- //
-
- (function() {
- jsTest.log("Testing recovering multi-write-shard transaction that is in progress");
- txnNumber++;
-
- // Set the transaction expiry to be very high, so we can ascertain the recovery request
- // through the alternate router is what causes the transaction to abort.
- const getParamRes =
- st.rs1.getPrimary().adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1});
- assert.commandWorked(getParamRes);
- assert.neq(null, getParamRes.transactionLifetimeLimitSeconds);
- const originalTransactionLifetimeLimitSeconds = getParamRes.transactionLifetimeLimitSeconds;
-
- assert.commandWorked(st.rs1.getPrimary().adminCommand(
- {setParameter: 1, transactionLifetimeLimitSeconds: 60 * 60 * 1000 /* 1000 hours */}));
-
- const recoveryToken = startNewMultiShardWriteTransaction();
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
-
- // A write transaction fails to commit after having reported an abort decision.
- assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
-
- assert.commandWorked(st.rs1.getPrimary().adminCommand({
- setParameter: 1,
- transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds
- }));
- })();
-
- (function() {
- jsTest.log("Testing recovering multi-write-shard transaction that is in prepare");
- txnNumber++;
- const recoveryToken = startNewMultiShardWriteTransaction();
-
- // Ensure the coordinator will hang after putting the participants into prepare but
- // before sending the decision to the participants.
- clearRawMongoProgramOutput();
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "hangBeforeWritingDecision", mode: "alwaysOn"}));
-
- assert.commandFailedWithCode(st.s0.adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- // Specify maxTimeMS to make the command return so the test can continue.
- maxTimeMS: 3000,
- }),
- ErrorCodes.MaxTimeMSExpired);
-
- waitForFailpoint("Hit hangBeforeWritingDecision failpoint", 1);
-
- // Trying to recover the decision should block because the recovery shard's participant
- // is in prepare.
- assert.commandFailedWithCode(st.s1.adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- recoveryToken: recoveryToken,
- // Specify maxTimeMS to make the command return so the test can continue.
- maxTimeMS: 3000,
- }),
- ErrorCodes.MaxTimeMSExpired);
-
- // Allow the transaction to complete.
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "hangBeforeWritingDecision", mode: "off"}));
-
- // Trying to recover the decision should now return that the transaction committed.
- assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
- })();
-
- (function() {
- jsTest.log("Testing recovering multi-write-shard transaction after coordinator finished" +
- " coordinating an abort decision.");
- txnNumber++;
-
- const recoveryToken = startNewMultiShardWriteTransaction();
- abortTransactionOnShardDirectly(st.rs0.getPrimary(), lsid, txnNumber);
- assert.commandFailedWithCode(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- assert.commandFailedWithCode(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken),
- ErrorCodes.NoSuchTransaction);
- })();
-
- (function() {
- jsTest.log("Testing recovering multi-write-shard transaction after coordinator finished" +
- " coordinating a commit decision.");
- txnNumber++;
-
- const recoveryToken = startNewMultiShardWriteTransaction();
- assert.commandWorked(sendCommitViaOriginalMongos(lsid, txnNumber, recoveryToken));
- assert.commandWorked(sendCommitViaRecoveryMongos(lsid, txnNumber, recoveryToken));
- })();
-
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/txn_two_phase_commit_basic.js b/jstests/sharding/txn_two_phase_commit_basic.js
index 09f4f1bf0cf..535cbe294b7 100644
--- a/jstests/sharding/txn_two_phase_commit_basic.js
+++ b/jstests/sharding/txn_two_phase_commit_basic.js
@@ -6,249 +6,250 @@
*/
(function() {
- 'use strict';
-
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- let st = new ShardingTest({shards: 3, causallyConsistent: true});
-
- let coordinator = st.shard0;
- let participant1 = st.shard1;
- let participant2 = st.shard2;
-
- let expectedParticipantList =
- [participant1.shardName, participant2.shardName, coordinator.shardName];
-
- let lsid = {id: UUID()};
- let txnNumber = 0;
-
- const checkParticipantListMatches = function(
- coordinatorConn, lsid, txnNumber, expectedParticipantList) {
- let coordDoc = coordinatorConn.getDB("config")
- .getCollection("transaction_coordinators")
- .findOne({"_id.lsid.id": lsid.id, "_id.txnNumber": txnNumber});
- assert.neq(null, coordDoc);
- assert.sameMembers(coordDoc.participants, expectedParticipantList);
- };
-
- const checkDecisionIs = function(coordinatorConn, lsid, txnNumber, expectedDecision) {
- let coordDoc = coordinatorConn.getDB("config")
- .getCollection("transaction_coordinators")
- .findOne({"_id.lsid.id": lsid.id, "_id.txnNumber": txnNumber});
- assert.neq(null, coordDoc);
- assert.eq(expectedDecision, coordDoc.decision.decision);
- if (expectedDecision === "commit") {
- assert.neq(null, coordDoc.decision.commitTimestamp);
- } else {
- assert.eq(null, coordDoc.decision.commitTimestamp);
- }
- };
-
- const checkDocumentDeleted = function(coordinatorConn, lsid, txnNumber) {
- let coordDoc = coordinatorConn.getDB("config")
- .getCollection("transaction_coordinators")
- .findOne({"_id.lsid.id": lsid.id, "_id.txnNumber": txnNumber});
- return null === coordDoc;
- };
-
- const runCommitThroughMongosInParallelShellExpectSuccess = function() {
- const runCommitExpectSuccessCode = "assert.commandWorked(db.adminCommand({" +
- "commitTransaction: 1," + "lsid: " + tojson(lsid) + "," + "txnNumber: NumberLong(" +
- txnNumber + ")," + "stmtId: NumberInt(0)," + "autocommit: false," + "}));";
- return startParallelShell(runCommitExpectSuccessCode, st.s.port);
- };
-
- const runCommitThroughMongosInParallelShellExpectAbort = function() {
- const runCommitExpectSuccessCode = "assert.commandFailedWithCode(db.adminCommand({" +
- "commitTransaction: 1," + "lsid: " + tojson(lsid) + "," + "txnNumber: NumberLong(" +
- txnNumber + ")," + "stmtId: NumberInt(0)," + "autocommit: false," + "})," +
- "ErrorCodes.NoSuchTransaction);";
- return startParallelShell(runCommitExpectSuccessCode, st.s.port);
- };
-
- const startSimulatingNetworkFailures = function(connArray) {
- connArray.forEach(function(conn) {
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 10},
- data: {
- errorCode: ErrorCodes.NotMaster,
- failCommands:
- ["prepareTransaction", "abortTransaction", "commitTransaction"]
- }
- }));
- assert.commandWorked(conn.adminCommand({
- configureFailPoint:
- "participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic",
- mode: {times: 5}
- }));
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "participantReturnNetworkErrorForAbortAfterExecutingAbortLogic",
- mode: {times: 5}
- }));
- assert.commandWorked(conn.adminCommand({
- configureFailPoint:
- "participantReturnNetworkErrorForCommitAfterExecutingCommitLogic",
- mode: {times: 5}
- }));
- });
- };
-
- const stopSimulatingNetworkFailures = function(connArray) {
- connArray.forEach(function(conn) {
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "failCommand",
- mode: "off",
- }));
- assert.commandWorked(conn.adminCommand({
- configureFailPoint:
- "participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic",
- mode: "off"
- }));
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "participantReturnNetworkErrorForAbortAfterExecutingAbortLogic",
- mode: "off"
- }));
- assert.commandWorked(conn.adminCommand({
- configureFailPoint:
- "participantReturnNetworkErrorForCommitAfterExecutingCommitLogic",
- mode: "off"
- }));
- });
- };
-
- const setUp = function() {
- // Create a sharded collection with a chunk on each shard:
- // shard0: [-inf, 0)
- // shard1: [0, 10)
- // shard2: [10, +inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: coordinator.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
-
- // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
- // from the shards starting, aborting, and restarting the transaction due to needing to
- // refresh after the transaction has started.
- assert.commandWorked(coordinator.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
-
- // Start a new transaction by inserting a document onto each shard.
- assert.commandWorked(st.s.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}, {_id: 5}, {_id: 15}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false,
+'use strict';
+
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+let st = new ShardingTest({shards: 3, causallyConsistent: true});
+
+let coordinator = st.shard0;
+let participant1 = st.shard1;
+let participant2 = st.shard2;
+
+let expectedParticipantList =
+ [participant1.shardName, participant2.shardName, coordinator.shardName];
+
+let lsid = {id: UUID()};
+let txnNumber = 0;
+
+const checkParticipantListMatches = function(
+ coordinatorConn, lsid, txnNumber, expectedParticipantList) {
+ let coordDoc = coordinatorConn.getDB("config")
+ .getCollection("transaction_coordinators")
+ .findOne({"_id.lsid.id": lsid.id, "_id.txnNumber": txnNumber});
+ assert.neq(null, coordDoc);
+ assert.sameMembers(coordDoc.participants, expectedParticipantList);
+};
+
+const checkDecisionIs = function(coordinatorConn, lsid, txnNumber, expectedDecision) {
+ let coordDoc = coordinatorConn.getDB("config")
+ .getCollection("transaction_coordinators")
+ .findOne({"_id.lsid.id": lsid.id, "_id.txnNumber": txnNumber});
+ assert.neq(null, coordDoc);
+ assert.eq(expectedDecision, coordDoc.decision.decision);
+ if (expectedDecision === "commit") {
+ assert.neq(null, coordDoc.decision.commitTimestamp);
+ } else {
+ assert.eq(null, coordDoc.decision.commitTimestamp);
+ }
+};
+
+const checkDocumentDeleted = function(coordinatorConn, lsid, txnNumber) {
+ let coordDoc = coordinatorConn.getDB("config")
+ .getCollection("transaction_coordinators")
+ .findOne({"_id.lsid.id": lsid.id, "_id.txnNumber": txnNumber});
+ return null === coordDoc;
+};
+
+const runCommitThroughMongosInParallelShellExpectSuccess = function() {
+ const runCommitExpectSuccessCode = "assert.commandWorked(db.adminCommand({" +
+ "commitTransaction: 1," +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "}));";
+ return startParallelShell(runCommitExpectSuccessCode, st.s.port);
+};
+
+const runCommitThroughMongosInParallelShellExpectAbort = function() {
+ const runCommitExpectSuccessCode = "assert.commandFailedWithCode(db.adminCommand({" +
+ "commitTransaction: 1," +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "})," +
+ "ErrorCodes.NoSuchTransaction);";
+ return startParallelShell(runCommitExpectSuccessCode, st.s.port);
+};
+
+const startSimulatingNetworkFailures = function(connArray) {
+ connArray.forEach(function(conn) {
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 10},
+ data: {
+ errorCode: ErrorCodes.NotMaster,
+ failCommands: ["prepareTransaction", "abortTransaction", "commitTransaction"]
+ }
}));
- };
-
- const testCommitProtocol = function(shouldCommit, simulateNetworkFailures) {
- jsTest.log("Testing two-phase " + (shouldCommit ? "commit" : "abort") +
- " protocol with simulateNetworkFailures: " + simulateNetworkFailures);
-
- txnNumber++;
- setUp();
-
- if (!shouldCommit) {
- // Manually abort the transaction on one of the participants, so that the participant
- // fails to prepare.
- assert.commandWorked(participant2.adminCommand({
- abortTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false,
- }));
- }
-
- if (simulateNetworkFailures) {
- startSimulatingNetworkFailures([participant1, participant2]);
- }
-
- // Turn on failpoints so that the coordinator hangs after each write it does, so that the
- // test can check that the write happened correctly.
- assert.commandWorked(coordinator.adminCommand({
- configureFailPoint: "hangBeforeWaitingForParticipantListWriteConcern",
- mode: "alwaysOn",
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic",
+ mode: {times: 5}
}));
- assert.commandWorked(coordinator.adminCommand({
- configureFailPoint: "hangBeforeWaitingForDecisionWriteConcern",
- mode: "alwaysOn",
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "participantReturnNetworkErrorForAbortAfterExecutingAbortLogic",
+ mode: {times: 5}
}));
-
- // Run commitTransaction through a parallel shell.
- let awaitResult;
- if (shouldCommit) {
- awaitResult = runCommitThroughMongosInParallelShellExpectSuccess();
- } else {
- awaitResult = runCommitThroughMongosInParallelShellExpectAbort();
- }
-
- // Check that the coordinator wrote the participant list.
- waitForFailpoint("Hit hangBeforeWaitingForParticipantListWriteConcern failpoint",
- txnNumber);
- checkParticipantListMatches(coordinator, lsid, txnNumber, expectedParticipantList);
- assert.commandWorked(coordinator.adminCommand({
- configureFailPoint: "hangBeforeWaitingForParticipantListWriteConcern",
- mode: "off",
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "participantReturnNetworkErrorForCommitAfterExecutingCommitLogic",
+ mode: {times: 5}
}));
+ });
+};
- // Check that the coordinator wrote the decision.
- waitForFailpoint("Hit hangBeforeWaitingForDecisionWriteConcern failpoint", txnNumber);
- checkParticipantListMatches(coordinator, lsid, txnNumber, expectedParticipantList);
- checkDecisionIs(coordinator, lsid, txnNumber, (shouldCommit ? "commit" : "abort"));
- assert.commandWorked(coordinator.adminCommand({
- configureFailPoint: "hangBeforeWaitingForDecisionWriteConcern",
+const stopSimulatingNetworkFailures = function(connArray) {
+ connArray.forEach(function(conn) {
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "failCommand",
mode: "off",
}));
-
- // Check that the coordinator deleted its persisted state.
- awaitResult();
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic",
+ mode: "off"
+ }));
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "participantReturnNetworkErrorForAbortAfterExecutingAbortLogic",
+ mode: "off"
+ }));
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "participantReturnNetworkErrorForCommitAfterExecutingCommitLogic",
+ mode: "off"
+ }));
+ });
+};
+
+const setUp = function() {
+ // Create a sharded collection with a chunk on each shard:
+ // shard0: [-inf, 0)
+ // shard1: [0, 10)
+ // shard2: [10, +inf)
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: coordinator.shardName}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
+
+ // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
+ // from the shards starting, aborting, and restarting the transaction due to needing to
+ // refresh after the transaction has started.
+ assert.commandWorked(coordinator.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+ assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+ assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+
+ // Start a new transaction by inserting a document onto each shard.
+ assert.commandWorked(st.s.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false,
+ }));
+};
+
+const testCommitProtocol = function(shouldCommit, simulateNetworkFailures) {
+ jsTest.log("Testing two-phase " + (shouldCommit ? "commit" : "abort") +
+ " protocol with simulateNetworkFailures: " + simulateNetworkFailures);
+
+ txnNumber++;
+ setUp();
+
+ if (!shouldCommit) {
+ // Manually abort the transaction on one of the participants, so that the participant
+ // fails to prepare.
+ assert.commandWorked(participant2.adminCommand({
+ abortTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ autocommit: false,
+ }));
+ }
+
+ if (simulateNetworkFailures) {
+ startSimulatingNetworkFailures([participant1, participant2]);
+ }
+
+ // Turn on failpoints so that the coordinator hangs after each write it does, so that the
+ // test can check that the write happened correctly.
+ assert.commandWorked(coordinator.adminCommand({
+ configureFailPoint: "hangBeforeWaitingForParticipantListWriteConcern",
+ mode: "alwaysOn",
+ }));
+ assert.commandWorked(coordinator.adminCommand({
+ configureFailPoint: "hangBeforeWaitingForDecisionWriteConcern",
+ mode: "alwaysOn",
+ }));
+
+ // Run commitTransaction through a parallel shell.
+ let awaitResult;
+ if (shouldCommit) {
+ awaitResult = runCommitThroughMongosInParallelShellExpectSuccess();
+ } else {
+ awaitResult = runCommitThroughMongosInParallelShellExpectAbort();
+ }
+
+ // Check that the coordinator wrote the participant list.
+ waitForFailpoint("Hit hangBeforeWaitingForParticipantListWriteConcern failpoint", txnNumber);
+ checkParticipantListMatches(coordinator, lsid, txnNumber, expectedParticipantList);
+ assert.commandWorked(coordinator.adminCommand({
+ configureFailPoint: "hangBeforeWaitingForParticipantListWriteConcern",
+ mode: "off",
+ }));
+
+ // Check that the coordinator wrote the decision.
+ waitForFailpoint("Hit hangBeforeWaitingForDecisionWriteConcern failpoint", txnNumber);
+ checkParticipantListMatches(coordinator, lsid, txnNumber, expectedParticipantList);
+ checkDecisionIs(coordinator, lsid, txnNumber, (shouldCommit ? "commit" : "abort"));
+ assert.commandWorked(coordinator.adminCommand({
+ configureFailPoint: "hangBeforeWaitingForDecisionWriteConcern",
+ mode: "off",
+ }));
+
+ // Check that the coordinator deleted its persisted state.
+ awaitResult();
+ assert.soon(function() {
+ return checkDocumentDeleted(coordinator, lsid, txnNumber);
+ });
+
+ if (simulateNetworkFailures) {
+ stopSimulatingNetworkFailures([participant1, participant2]);
+ }
+
+ // Check that the transaction committed or aborted as expected.
+ if (!shouldCommit) {
+ jsTest.log("Verify that the transaction was aborted on all shards.");
+ assert.eq(0, st.s.getDB(dbName).getCollection(collName).find().itcount());
+ } else {
+ jsTest.log("Verify that the transaction was committed on all shards.");
+ // Use assert.soon(), because although coordinateCommitTransaction currently blocks
+ // until the commit process is fully complete, it will eventually be changed to only
+ // block until the decision is *written*, at which point the test can pass the
+ // operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in the
+ // read to ensure the read sees the transaction's writes (TODO SERVER-37165).
assert.soon(function() {
- return checkDocumentDeleted(coordinator, lsid, txnNumber);
+ return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
});
+ }
+
+ st.s.getDB(dbName).getCollection(collName).drop();
+};
- if (simulateNetworkFailures) {
- stopSimulatingNetworkFailures([participant1, participant2]);
- }
-
- // Check that the transaction committed or aborted as expected.
- if (!shouldCommit) {
- jsTest.log("Verify that the transaction was aborted on all shards.");
- assert.eq(0, st.s.getDB(dbName).getCollection(collName).find().itcount());
- } else {
- jsTest.log("Verify that the transaction was committed on all shards.");
- // Use assert.soon(), because although coordinateCommitTransaction currently blocks
- // until the commit process is fully complete, it will eventually be changed to only
- // block until the decision is *written*, at which point the test can pass the
- // operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in the
- // read to ensure the read sees the transaction's writes (TODO SERVER-37165).
- assert.soon(function() {
- return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
- });
- }
-
- st.s.getDB(dbName).getCollection(collName).drop();
- };
-
- testCommitProtocol(false /* test abort */, false /* no network failures */);
- testCommitProtocol(true /* test commit */, false /* no network failures */);
- testCommitProtocol(false /* test abort */, true /* with network failures */);
- testCommitProtocol(true /* test commit */, true /* with network failures */);
-
- st.stop();
+testCommitProtocol(false /* test abort */, false /* no network failures */);
+testCommitProtocol(true /* test commit */, false /* no network failures */);
+testCommitProtocol(false /* test abort */, true /* with network failures */);
+testCommitProtocol(true /* test commit */, true /* with network failures */);
+st.stop();
})();
diff --git a/jstests/sharding/txn_two_phase_commit_commands_basic_requirements.js b/jstests/sharding/txn_two_phase_commit_commands_basic_requirements.js
index 1e48f4f5ad5..b51d2c0c8ea 100644
--- a/jstests/sharding/txn_two_phase_commit_commands_basic_requirements.js
+++ b/jstests/sharding/txn_two_phase_commit_commands_basic_requirements.js
@@ -4,62 +4,63 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "foo";
+const dbName = "test";
+const collName = "foo";
- const txnNumber = 0;
- const lsid = {id: UUID()};
+const txnNumber = 0;
+const lsid = {
+ id: UUID()
+};
- const checkCoordinatorCommandsRejected = function(conn, expectedErrorCode) {
- assert.commandFailedWithCode(conn.adminCommand({
- coordinateCommitTransaction: 1,
- participants: [{shardId: "dummy1"}, {shardId: "dummy2"}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(1),
- autocommit: false
- }),
- expectedErrorCode);
- };
+const checkCoordinatorCommandsRejected = function(conn, expectedErrorCode) {
+ assert.commandFailedWithCode(conn.adminCommand({
+ coordinateCommitTransaction: 1,
+ participants: [{shardId: "dummy1"}, {shardId: "dummy2"}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(1),
+ autocommit: false
+ }),
+ expectedErrorCode);
+};
- const checkCoordinatorCommandsAgainstNonAdminDbRejected = function(conn) {
- const testDB = conn.getDB(dbName);
- assert.commandFailedWithCode(testDB.runCommand({
- coordinateCommitTransaction: 1,
- participants: [{shardId: "dummy1"}, {shardId: "dummy2"}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false
- }),
- ErrorCodes.Unauthorized);
- };
+const checkCoordinatorCommandsAgainstNonAdminDbRejected = function(conn) {
+ const testDB = conn.getDB(dbName);
+ assert.commandFailedWithCode(testDB.runCommand({
+ coordinateCommitTransaction: 1,
+ participants: [{shardId: "dummy1"}, {shardId: "dummy2"}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ autocommit: false
+ }),
+ ErrorCodes.Unauthorized);
+};
- const st = new ShardingTest({shards: 1});
+const st = new ShardingTest({shards: 1});
- jsTest.log("Verify that coordinator commands are only accepted against the admin database");
- checkCoordinatorCommandsAgainstNonAdminDbRejected(st.rs0.getPrimary());
- checkCoordinatorCommandsAgainstNonAdminDbRejected(st.configRS.getPrimary());
+jsTest.log("Verify that coordinator commands are only accepted against the admin database");
+checkCoordinatorCommandsAgainstNonAdminDbRejected(st.rs0.getPrimary());
+checkCoordinatorCommandsAgainstNonAdminDbRejected(st.configRS.getPrimary());
- st.stop();
+st.stop();
- jsTest.log(
- "Verify that a shard server that has not yet been added to a cluster does not accept coordinator commands");
- const shardsvrReplSet = new ReplSetTest({nodes: 1, nodeOptions: {shardsvr: ""}});
- shardsvrReplSet.startSet();
- shardsvrReplSet.initiate();
- checkCoordinatorCommandsRejected(shardsvrReplSet.getPrimary(),
- ErrorCodes.ShardingStateNotInitialized);
- shardsvrReplSet.stopSet();
-
- jsTest.log(
- "Verify that a non-config server, non-shard server does not accept coordinator commands");
- const standaloneReplSet = new ReplSetTest({nodes: 1});
- standaloneReplSet.startSet();
- standaloneReplSet.initiate();
- checkCoordinatorCommandsRejected(standaloneReplSet.getPrimary(), ErrorCodes.NoShardingEnabled);
- standaloneReplSet.stopSet();
+jsTest.log(
+ "Verify that a shard server that has not yet been added to a cluster does not accept coordinator commands");
+const shardsvrReplSet = new ReplSetTest({nodes: 1, nodeOptions: {shardsvr: ""}});
+shardsvrReplSet.startSet();
+shardsvrReplSet.initiate();
+checkCoordinatorCommandsRejected(shardsvrReplSet.getPrimary(),
+ ErrorCodes.ShardingStateNotInitialized);
+shardsvrReplSet.stopSet();
+jsTest.log(
+ "Verify that a non-config server, non-shard server does not accept coordinator commands");
+const standaloneReplSet = new ReplSetTest({nodes: 1});
+standaloneReplSet.startSet();
+standaloneReplSet.initiate();
+checkCoordinatorCommandsRejected(standaloneReplSet.getPrimary(), ErrorCodes.NoShardingEnabled);
+standaloneReplSet.stopSet();
})();
diff --git a/jstests/sharding/txn_two_phase_commit_coordinator_shutdown_and_restart.js b/jstests/sharding/txn_two_phase_commit_coordinator_shutdown_and_restart.js
index f976218f7d5..ef5c42665b6 100644
--- a/jstests/sharding/txn_two_phase_commit_coordinator_shutdown_and_restart.js
+++ b/jstests/sharding/txn_two_phase_commit_coordinator_shutdown_and_restart.js
@@ -16,130 +16,147 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
TestData.skipCheckDBHashes = true;
(function() {
- 'use strict';
-
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
- load('jstests/libs/write_concern_util.js');
-
- const rs0_opts = {nodes: [{}, {}]};
- // Start the participant replSet with one node as a priority 0 node to avoid flip flopping.
- const rs1_opts = {nodes: [{}, {rsConfig: {priority: 0}}]};
- const st = new ShardingTest(
- {shards: {rs0: rs0_opts, rs1: rs1_opts}, mongos: 1, causallyConsistent: true});
-
- // Create a sharded collection:
- // shard0: [-inf, 0)
- // shard1: [0, inf)
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.name);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- assert.commandWorked(st.s0.adminCommand({split: 'test.user', middle: {x: 0}}));
- assert.commandWorked(
- st.s0.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.name}));
-
- const testDB = st.s0.getDB('test');
- assert.commandWorked(testDB.runCommand({insert: 'user', documents: [{x: -10}, {x: 10}]}));
-
- const coordinatorReplSetTest = st.rs0;
- const participantReplSetTest = st.rs1;
-
- let coordinatorPrimaryConn = coordinatorReplSetTest.getPrimary();
- let participantPrimaryConn = participantReplSetTest.getPrimary();
-
- const lsid = {id: UUID()};
- let txnNumber = 0;
- const participantList = [{shardId: st.shard0.shardName}, {shardId: st.shard1.shardName}];
-
- // Build the following command as a string since we need to persist the lsid and the txnNumber
- // into the scope of the parallel shell.
- // assert.commandFailedWithCode(db.adminCommand({
- // commitTransaction: 1,
- // maxTimeMS: 2000 * 10,
- // lsid: lsid,
- // txnNumber: NumberLong(txnNumber),
- // stmtId: NumberInt(0),
- // autocommit: false,
- // }), ErrorCodes.MaxTimeMSExpired);
- const runCommitThroughMongosInParallelShellExpectTimeOut = function() {
- const runCommitExpectTimeOutCode = "assert.commandFailedWithCode(db.adminCommand({" +
- "commitTransaction: 1, maxTimeMS: 2000 * 10, " + "lsid: " + tojson(lsid) + "," +
- "txnNumber: NumberLong(" + txnNumber + ")," + "stmtId: NumberInt(0)," +
- "autocommit: false," + "})," + "ErrorCodes.MaxTimeMSExpired);";
- return startParallelShell(runCommitExpectTimeOutCode, st.s.port);
- };
-
- jsTest.log("Starting a cross-shard transaction");
- // Start a cross shard transaction through mongos.
- const updateDocumentOnShard0 = {q: {x: -1}, u: {"$set": {a: 1}}, upsert: true};
-
- const updateDocumentOnShard1 = {q: {x: 1}, u: {"$set": {a: 1}}, upsert: true};
-
- assert.commandWorked(testDB.runCommand({
- update: 'user',
- updates: [updateDocumentOnShard0, updateDocumentOnShard1],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- startTransaction: true
- }));
-
- jsTest.log("Turn on hangBeforeWritingDecision failpoint");
- // Make the commit coordination hang before writing the decision, and send commitTransaction.
- // The transaction on the participant will remain in prepare.
- assert.commandWorked(coordinatorPrimaryConn.adminCommand({
- configureFailPoint: "hangBeforeWritingDecision",
- mode: "alwaysOn",
- }));
-
- // Run commit through mongos in a parallel shell. This should timeout since we have set the
- // failpoint.
- runCommitThroughMongosInParallelShellExpectTimeOut();
- waitForFailpoint("Hit hangBeforeWritingDecision failpoint", 1 /* numTimes */);
-
- jsTest.log("Stopping coordinator shard");
- // Stop the mongods on the coordinator shard using the SIGTERM signal. We must skip validation
- // checks since we'll be shutting down a node with a prepared transaction.
- coordinatorReplSetTest.stopSet(15, true /* forRestart */, {skipValidation: true} /* opts */);
-
- // Once the coordinator has gone down, do a majority write on the participant while there is a
- // prepared transaction. This will ensure that the stable timestamp is able to advance since
- // this write must be in the committed snapshot.
- const session = participantPrimaryConn.startSession();
- const sessionDB = session.getDatabase("dummy");
- const sessionColl = sessionDB.getCollection("dummy");
- session.resetOperationTime_forTesting();
- assert.commandWorked(sessionColl.insert({dummy: 2}, {writeConcern: {w: "majority"}}));
- assert.neq(session.getOperationTime(), null);
- assert.neq(session.getClusterTime(), null);
- jsTest.log("Successfully completed majority write on participant");
-
- // Confirm that a majority read on the secondary includes the dummy write. This would mean that
- // the stable timestamp also advanced on the secondary.
- // In order to do this read with readConcern majority, we must use afterClusterTime with causal
- // consistency enabled.
- const participantSecondaryConn = participantReplSetTest.getSecondary();
- const secondaryDB = participantSecondaryConn.getDB("dummy");
- const res = secondaryDB.runCommand({
- find: "dummy",
- readConcern: {level: "majority", afterClusterTime: session.getOperationTime()},
- });
- assert.eq(res.cursor.firstBatch.length, 1);
-
- jsTest.log("Restarting coordinator");
- // Restarting the coordinator will reset the fail point.
- coordinatorReplSetTest.startSet({restart: true});
- coordinatorPrimaryConn = coordinatorReplSetTest.getPrimary();
-
- jsTest.log("Committing transaction");
- // Now, commitTransaction should succeed.
- assert.commandWorked(st.s.adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false
- }));
-
- st.stop();
-
+'use strict';
+
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+load('jstests/libs/write_concern_util.js');
+
+const rs0_opts = {
+ nodes: [{}, {}]
+};
+// Start the participant replSet with one node as a priority 0 node to avoid flip flopping.
+const rs1_opts = {
+ nodes: [{}, {rsConfig: {priority: 0}}]
+};
+const st =
+ new ShardingTest({shards: {rs0: rs0_opts, rs1: rs1_opts}, mongos: 1, causallyConsistent: true});
+
+// Create a sharded collection:
+// shard0: [-inf, 0)
+// shard1: [0, inf)
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.name);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+assert.commandWorked(st.s0.adminCommand({split: 'test.user', middle: {x: 0}}));
+assert.commandWorked(
+ st.s0.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.name}));
+
+const testDB = st.s0.getDB('test');
+assert.commandWorked(testDB.runCommand({insert: 'user', documents: [{x: -10}, {x: 10}]}));
+
+const coordinatorReplSetTest = st.rs0;
+const participantReplSetTest = st.rs1;
+
+let coordinatorPrimaryConn = coordinatorReplSetTest.getPrimary();
+let participantPrimaryConn = participantReplSetTest.getPrimary();
+
+const lsid = {
+ id: UUID()
+};
+let txnNumber = 0;
+const participantList = [{shardId: st.shard0.shardName}, {shardId: st.shard1.shardName}];
+
+// Build the following command as a string since we need to persist the lsid and the txnNumber
+// into the scope of the parallel shell.
+// assert.commandFailedWithCode(db.adminCommand({
+// commitTransaction: 1,
+// maxTimeMS: 2000 * 10,
+// lsid: lsid,
+// txnNumber: NumberLong(txnNumber),
+// stmtId: NumberInt(0),
+// autocommit: false,
+// }), ErrorCodes.MaxTimeMSExpired);
+const runCommitThroughMongosInParallelShellExpectTimeOut = function() {
+ const runCommitExpectTimeOutCode = "assert.commandFailedWithCode(db.adminCommand({" +
+ "commitTransaction: 1, maxTimeMS: 2000 * 10, " +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "})," +
+ "ErrorCodes.MaxTimeMSExpired);";
+ return startParallelShell(runCommitExpectTimeOutCode, st.s.port);
+};
+
+jsTest.log("Starting a cross-shard transaction");
+// Start a cross shard transaction through mongos.
+const updateDocumentOnShard0 = {
+ q: {x: -1},
+ u: {"$set": {a: 1}},
+ upsert: true
+};
+
+const updateDocumentOnShard1 = {
+ q: {x: 1},
+ u: {"$set": {a: 1}},
+ upsert: true
+};
+
+assert.commandWorked(testDB.runCommand({
+ update: 'user',
+ updates: [updateDocumentOnShard0, updateDocumentOnShard1],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ startTransaction: true
+}));
+
+jsTest.log("Turn on hangBeforeWritingDecision failpoint");
+// Make the commit coordination hang before writing the decision, and send commitTransaction.
+// The transaction on the participant will remain in prepare.
+assert.commandWorked(coordinatorPrimaryConn.adminCommand({
+ configureFailPoint: "hangBeforeWritingDecision",
+ mode: "alwaysOn",
+}));
+
+// Run commit through mongos in a parallel shell. This should timeout since we have set the
+// failpoint.
+runCommitThroughMongosInParallelShellExpectTimeOut();
+waitForFailpoint("Hit hangBeforeWritingDecision failpoint", 1 /* numTimes */);
+
+jsTest.log("Stopping coordinator shard");
+// Stop the mongods on the coordinator shard using the SIGTERM signal. We must skip validation
+// checks since we'll be shutting down a node with a prepared transaction.
+coordinatorReplSetTest.stopSet(15, true /* forRestart */, {skipValidation: true} /* opts */);
+
+// Once the coordinator has gone down, do a majority write on the participant while there is a
+// prepared transaction. This will ensure that the stable timestamp is able to advance since
+// this write must be in the committed snapshot.
+const session = participantPrimaryConn.startSession();
+const sessionDB = session.getDatabase("dummy");
+const sessionColl = sessionDB.getCollection("dummy");
+session.resetOperationTime_forTesting();
+assert.commandWorked(sessionColl.insert({dummy: 2}, {writeConcern: {w: "majority"}}));
+assert.neq(session.getOperationTime(), null);
+assert.neq(session.getClusterTime(), null);
+jsTest.log("Successfully completed majority write on participant");
+
+// Confirm that a majority read on the secondary includes the dummy write. This would mean that
+// the stable timestamp also advanced on the secondary.
+// In order to do this read with readConcern majority, we must use afterClusterTime with causal
+// consistency enabled.
+const participantSecondaryConn = participantReplSetTest.getSecondary();
+const secondaryDB = participantSecondaryConn.getDB("dummy");
+const res = secondaryDB.runCommand({
+ find: "dummy",
+ readConcern: {level: "majority", afterClusterTime: session.getOperationTime()},
+});
+assert.eq(res.cursor.firstBatch.length, 1);
+
+jsTest.log("Restarting coordinator");
+// Restarting the coordinator will reset the fail point.
+coordinatorReplSetTest.startSet({restart: true});
+coordinatorPrimaryConn = coordinatorReplSetTest.getPrimary();
+
+jsTest.log("Committing transaction");
+// Now, commitTransaction should succeed.
+assert.commandWorked(st.s.adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ autocommit: false
+}));
+
+st.stop();
})();
diff --git a/jstests/sharding/txn_two_phase_commit_failover.js b/jstests/sharding/txn_two_phase_commit_failover.js
index 02fbe8bd88c..7ee5128d23d 100644
--- a/jstests/sharding/txn_two_phase_commit_failover.js
+++ b/jstests/sharding/txn_two_phase_commit_failover.js
@@ -11,200 +11,205 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
+
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+// Lower the transaction timeout for participants, since this test exercises the case where the
+// coordinator fails over before writing the participant list and then checks that the
+// transaction is aborted on all participants, and the participants will only abort on reaching
+// the transaction timeout.
+TestData.transactionLifetimeLimitSeconds = 30;
+
+let lsid = {id: UUID()};
+let txnNumber = 0;
+
+const runTest = function(sameNodeStepsUpAfterFailover) {
+ let stepDownSecs; // The amount of time the node has to wait before becoming primary again.
+ let numCoordinatorNodes;
+ if (sameNodeStepsUpAfterFailover) {
+ numCoordinatorNodes = 1;
+ stepDownSecs = 1;
+ } else {
+ numCoordinatorNodes = 3;
+ stepDownSecs = 3;
+ }
+
+ let st = new ShardingTest({
+ shards: 3,
+ rs0: {nodes: numCoordinatorNodes},
+ causallyConsistent: true,
+ other: {mongosOptions: {verbose: 3}}
+ });
+
+ let coordinatorReplSetTest = st.rs0;
+
+ let participant0 = st.shard0;
+ let participant1 = st.shard1;
+ let participant2 = st.shard2;
+
+ const runCommitThroughMongosInParallelShellExpectSuccess = function() {
+ const runCommitExpectSuccessCode = "assert.commandWorked(db.adminCommand({" +
+ "commitTransaction: 1," +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "}));";
+ return startParallelShell(runCommitExpectSuccessCode, st.s.port);
+ };
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
+ const runCommitThroughMongosInParallelShellExpectAbort = function() {
+ const runCommitExpectSuccessCode = "assert.commandFailedWithCode(db.adminCommand({" +
+ "commitTransaction: 1," +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "})," +
+ "ErrorCodes.NoSuchTransaction);";
+ return startParallelShell(runCommitExpectSuccessCode, st.s.port);
+ };
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
+ const setUp = function() {
+ // Create a sharded collection with a chunk on each shard:
+ // shard0: [-inf, 0)
+ // shard1: [0, 10)
+ // shard2: [10, +inf)
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: participant0.shardName}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
+
+ flushRoutersAndRefreshShardMetadata(st, {ns});
+
+ // Start a new transaction by inserting a document onto each shard.
+ assert.commandWorked(st.s.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false,
+ }));
+ };
- // Lower the transaction timeout for participants, since this test exercises the case where the
- // coordinator fails over before writing the participant list and then checks that the
- // transaction is aborted on all participants, and the participants will only abort on reaching
- // the transaction timeout.
- TestData.transactionLifetimeLimitSeconds = 30;
+ const testCommitProtocol = function(makeAParticipantAbort, failpointData, expectAbortResponse) {
+ jsTest.log("Testing commit protocol with sameNodeStepsUpAfterFailover: " +
+ sameNodeStepsUpAfterFailover + ", makeAParticipantAbort: " +
+ makeAParticipantAbort + ", expectAbortResponse: " + expectAbortResponse +
+ ", and failpointData: " + tojson(failpointData));
- let lsid = {id: UUID()};
- let txnNumber = 0;
+ txnNumber++;
+ setUp();
- const runTest = function(sameNodeStepsUpAfterFailover) {
- let stepDownSecs; // The amount of time the node has to wait before becoming primary again.
- let numCoordinatorNodes;
- if (sameNodeStepsUpAfterFailover) {
- numCoordinatorNodes = 1;
- stepDownSecs = 1;
- } else {
- numCoordinatorNodes = 3;
- stepDownSecs = 3;
- }
+ coordinatorReplSetTest.awaitNodesAgreeOnPrimary();
+ let coordPrimary = coordinatorReplSetTest.getPrimary();
- let st = new ShardingTest({
- shards: 3,
- rs0: {nodes: numCoordinatorNodes},
- causallyConsistent: true,
- other: {mongosOptions: {verbose: 3}}
- });
-
- let coordinatorReplSetTest = st.rs0;
-
- let participant0 = st.shard0;
- let participant1 = st.shard1;
- let participant2 = st.shard2;
-
- const runCommitThroughMongosInParallelShellExpectSuccess = function() {
- const runCommitExpectSuccessCode = "assert.commandWorked(db.adminCommand({" +
- "commitTransaction: 1," + "lsid: " + tojson(lsid) + "," + "txnNumber: NumberLong(" +
- txnNumber + ")," + "stmtId: NumberInt(0)," + "autocommit: false," + "}));";
- return startParallelShell(runCommitExpectSuccessCode, st.s.port);
- };
-
- const runCommitThroughMongosInParallelShellExpectAbort = function() {
- const runCommitExpectSuccessCode = "assert.commandFailedWithCode(db.adminCommand({" +
- "commitTransaction: 1," + "lsid: " + tojson(lsid) + "," + "txnNumber: NumberLong(" +
- txnNumber + ")," + "stmtId: NumberInt(0)," + "autocommit: false," + "})," +
- "ErrorCodes.NoSuchTransaction);";
- return startParallelShell(runCommitExpectSuccessCode, st.s.port);
- };
-
- const setUp = function() {
- // Create a sharded collection with a chunk on each shard:
- // shard0: [-inf, 0)
- // shard1: [0, 10)
- // shard2: [10, +inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(
- st.s.adminCommand({movePrimary: dbName, to: participant0.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
-
- flushRoutersAndRefreshShardMetadata(st, {ns});
-
- // Start a new transaction by inserting a document onto each shard.
- assert.commandWorked(st.s.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+ if (makeAParticipantAbort) {
+ // Manually abort the transaction on one of the participants, so that the
+ // participant fails to prepare.
+ assert.commandWorked(participant2.adminCommand({
+ abortTransaction: 1,
lsid: lsid,
txnNumber: NumberLong(txnNumber),
stmtId: NumberInt(0),
- startTransaction: true,
autocommit: false,
}));
- };
-
- const testCommitProtocol = function(
- makeAParticipantAbort, failpointData, expectAbortResponse) {
- jsTest.log("Testing commit protocol with sameNodeStepsUpAfterFailover: " +
- sameNodeStepsUpAfterFailover + ", makeAParticipantAbort: " +
- makeAParticipantAbort + ", expectAbortResponse: " + expectAbortResponse +
- ", and failpointData: " + tojson(failpointData));
-
- txnNumber++;
- setUp();
-
- coordinatorReplSetTest.awaitNodesAgreeOnPrimary();
- let coordPrimary = coordinatorReplSetTest.getPrimary();
-
- if (makeAParticipantAbort) {
- // Manually abort the transaction on one of the participants, so that the
- // participant fails to prepare.
- assert.commandWorked(participant2.adminCommand({
- abortTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false,
- }));
- }
-
- assert.commandWorked(coordPrimary.adminCommand({
- configureFailPoint: failpointData.failpoint,
- mode: {skip: (failpointData.skip ? failpointData.skip : 0)},
- }));
+ }
- // Run commitTransaction through a parallel shell.
- let awaitResult;
- if (expectAbortResponse) {
- awaitResult = runCommitThroughMongosInParallelShellExpectAbort();
- } else {
- awaitResult = runCommitThroughMongosInParallelShellExpectSuccess();
- }
-
- var numTimesShouldBeHit = failpointData.numTimesShouldBeHit;
- if ((failpointData.failpoint == "hangWhileTargetingLocalHost" &&
- !failpointData.skip) && // We are testing the prepare phase
- makeAParticipantAbort) { // A remote participant will vote abort
- // Wait for the abort to the local host to be scheduled as well.
- numTimesShouldBeHit++;
- }
-
- waitForFailpoint("Hit " + failpointData.failpoint + " failpoint", numTimesShouldBeHit);
-
- // Induce the coordinator primary to step down.
- assert.commandWorked(
- coordPrimary.adminCommand({replSetStepDown: stepDownSecs, force: true}));
- assert.commandWorked(coordPrimary.adminCommand({
- configureFailPoint: failpointData.failpoint,
- mode: "off",
- }));
+ assert.commandWorked(coordPrimary.adminCommand({
+ configureFailPoint: failpointData.failpoint,
+ mode: {skip: (failpointData.skip ? failpointData.skip : 0)},
+ }));
- // The router should retry commitTransaction against the new primary.
- awaitResult();
-
- // Check that the transaction committed or aborted as expected.
- if (expectAbortResponse) {
- jsTest.log("Verify that the transaction was aborted on all shards.");
- assert.eq(0, st.s.getDB(dbName).getCollection(collName).find().itcount());
- } else {
- jsTest.log("Verify that the transaction was committed on all shards.");
- // Use assert.soon(), because although coordinateCommitTransaction currently blocks
- // until the commit process is fully complete, it will eventually be changed to only
- // block until the decision is *written*, at which point the test can pass the
- // operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in
- // the read to ensure the read sees the transaction's writes (TODO SERVER-37165).
- assert.soon(function() {
- return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
- });
- }
-
- st.s.getDB(dbName).getCollection(collName).drop();
- clearRawMongoProgramOutput();
- };
-
- //
- // Run through all the failpoints when one participant responds to prepare with vote abort.
- //
-
- failpointDataArr.forEach(function(failpointData) {
- testCommitProtocol(true /* make a participant abort */,
- failpointData,
- true /* expect abort decision */);
- });
-
- //
- // Run through all the failpoints when all participants respond to prepare with vote commit.
- //
-
- failpointDataArr.forEach(function(failpointData) {
- // Note: If the coordinator fails over before making the participant list durable,
- // the transaction will abort even if all participants could have committed. This is
- // a property of the coordinator only, and would be true even if a participant's
- // in-progress transaction could survive failover.
- let expectAbort = (failpointData.failpoint == "hangBeforeWritingParticipantList") ||
- (failpointData.failpoint == "hangWhileTargetingLocalHost" && !failpointData.skip) ||
- false;
- testCommitProtocol(false /* make a participant abort */, failpointData, expectAbort);
- });
- st.stop();
- };
+ // Run commitTransaction through a parallel shell.
+ let awaitResult;
+ if (expectAbortResponse) {
+ awaitResult = runCommitThroughMongosInParallelShellExpectAbort();
+ } else {
+ awaitResult = runCommitThroughMongosInParallelShellExpectSuccess();
+ }
+
+ var numTimesShouldBeHit = failpointData.numTimesShouldBeHit;
+ if ((failpointData.failpoint == "hangWhileTargetingLocalHost" &&
+ !failpointData.skip) && // We are testing the prepare phase
+ makeAParticipantAbort) { // A remote participant will vote abort
+ // Wait for the abort to the local host to be scheduled as well.
+ numTimesShouldBeHit++;
+ }
+
+ waitForFailpoint("Hit " + failpointData.failpoint + " failpoint", numTimesShouldBeHit);
+
+ // Induce the coordinator primary to step down.
+ assert.commandWorked(
+ coordPrimary.adminCommand({replSetStepDown: stepDownSecs, force: true}));
+ assert.commandWorked(coordPrimary.adminCommand({
+ configureFailPoint: failpointData.failpoint,
+ mode: "off",
+ }));
- const failpointDataArr = getCoordinatorFailpoints();
+ // The router should retry commitTransaction against the new primary.
+ awaitResult();
+
+ // Check that the transaction committed or aborted as expected.
+ if (expectAbortResponse) {
+ jsTest.log("Verify that the transaction was aborted on all shards.");
+ assert.eq(0, st.s.getDB(dbName).getCollection(collName).find().itcount());
+ } else {
+ jsTest.log("Verify that the transaction was committed on all shards.");
+ // Use assert.soon(), because although coordinateCommitTransaction currently blocks
+ // until the commit process is fully complete, it will eventually be changed to only
+ // block until the decision is *written*, at which point the test can pass the
+ // operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in
+ // the read to ensure the read sees the transaction's writes (TODO SERVER-37165).
+ assert.soon(function() {
+ return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
+ });
+ }
+
+ st.s.getDB(dbName).getCollection(collName).drop();
+ clearRawMongoProgramOutput();
+ };
- runTest(true /* same node always steps up after stepping down */, false);
- runTest(false /* same node always steps up after stepping down */, false);
+ //
+ // Run through all the failpoints when one participant responds to prepare with vote abort.
+ //
+
+ failpointDataArr.forEach(function(failpointData) {
+ testCommitProtocol(
+ true /* make a participant abort */, failpointData, true /* expect abort decision */);
+ });
+
+ //
+ // Run through all the failpoints when all participants respond to prepare with vote commit.
+ //
+
+ failpointDataArr.forEach(function(failpointData) {
+ // Note: If the coordinator fails over before making the participant list durable,
+ // the transaction will abort even if all participants could have committed. This is
+ // a property of the coordinator only, and would be true even if a participant's
+ // in-progress transaction could survive failover.
+ let expectAbort = (failpointData.failpoint == "hangBeforeWritingParticipantList") ||
+ (failpointData.failpoint == "hangWhileTargetingLocalHost" && !failpointData.skip) ||
+ false;
+ testCommitProtocol(false /* make a participant abort */, failpointData, expectAbort);
+ });
+ st.stop();
+};
+
+const failpointDataArr = getCoordinatorFailpoints();
+
+runTest(true /* same node always steps up after stepping down */, false);
+runTest(false /* same node always steps up after stepping down */, false);
})();
diff --git a/jstests/sharding/txn_two_phase_commit_killop.js b/jstests/sharding/txn_two_phase_commit_killop.js
index c49123d3fb7..18a13d58dd9 100644
--- a/jstests/sharding/txn_two_phase_commit_killop.js
+++ b/jstests/sharding/txn_two_phase_commit_killop.js
@@ -7,176 +7,183 @@
*/
(function() {
- 'use strict';
-
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- let st = new ShardingTest({shards: 3, causallyConsistent: true});
-
- let coordinator = st.shard0;
- let participant1 = st.shard1;
- let participant2 = st.shard2;
-
- let lsid = {id: UUID()};
- let txnNumber = 0;
-
- const runCommitThroughMongosInParallelShellExpectSuccess = function() {
- const runCommitExpectSuccessCode = "assert.commandWorked(db.adminCommand({" +
- "commitTransaction: 1," + "lsid: " + tojson(lsid) + "," + "txnNumber: NumberLong(" +
- txnNumber + ")," + "stmtId: NumberInt(0)," + "autocommit: false," + "}));";
- return startParallelShell(runCommitExpectSuccessCode, st.s.port);
- };
-
- const runCommitThroughMongosInParallelShellExpectAbort = function() {
- const runCommitExpectSuccessCode = "assert.commandFailedWithCode(db.adminCommand({" +
- "commitTransaction: 1," + "lsid: " + tojson(lsid) + "," + "txnNumber: NumberLong(" +
- txnNumber + ")," + "stmtId: NumberInt(0)," + "autocommit: false," + "})," +
- "ErrorCodes.NoSuchTransaction);";
- return startParallelShell(runCommitExpectSuccessCode, st.s.port);
- };
-
- const setUp = function() {
- // Create a sharded collection with a chunk on each shard:
- // shard0: [-inf, 0)
- // shard1: [0, 10)
- // shard2: [10, +inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: coordinator.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
-
- // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
- // from the shards starting, aborting, and restarting the transaction due to needing to
- // refresh after the transaction has started.
- assert.commandWorked(coordinator.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
-
- // Start a new transaction by inserting a document onto each shard.
- assert.commandWorked(st.s.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+'use strict';
+
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+let st = new ShardingTest({shards: 3, causallyConsistent: true});
+
+let coordinator = st.shard0;
+let participant1 = st.shard1;
+let participant2 = st.shard2;
+
+let lsid = {id: UUID()};
+let txnNumber = 0;
+
+const runCommitThroughMongosInParallelShellExpectSuccess = function() {
+ const runCommitExpectSuccessCode = "assert.commandWorked(db.adminCommand({" +
+ "commitTransaction: 1," +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "}));";
+ return startParallelShell(runCommitExpectSuccessCode, st.s.port);
+};
+
+const runCommitThroughMongosInParallelShellExpectAbort = function() {
+ const runCommitExpectSuccessCode = "assert.commandFailedWithCode(db.adminCommand({" +
+ "commitTransaction: 1," +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "})," +
+ "ErrorCodes.NoSuchTransaction);";
+ return startParallelShell(runCommitExpectSuccessCode, st.s.port);
+};
+
+const setUp = function() {
+ // Create a sharded collection with a chunk on each shard:
+ // shard0: [-inf, 0)
+ // shard1: [0, 10)
+ // shard2: [10, +inf)
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: coordinator.shardName}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
+
+ // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
+ // from the shards starting, aborting, and restarting the transaction due to needing to
+ // refresh after the transaction has started.
+ assert.commandWorked(coordinator.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+ assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+ assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+
+ // Start a new transaction by inserting a document onto each shard.
+ assert.commandWorked(st.s.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false,
+ }));
+};
+
+const testCommitProtocol = function(shouldCommit, failpointData) {
+ jsTest.log("Testing two-phase " + (shouldCommit ? "commit" : "abort") +
+ " protocol with failpointData: " + tojson(failpointData));
+
+ txnNumber++;
+ setUp();
+
+ if (!shouldCommit) {
+ // Manually abort the transaction on one of the participants, so that the participant
+ // fails to prepare.
+ assert.commandWorked(participant2.adminCommand({
+ abortTransaction: 1,
lsid: lsid,
txnNumber: NumberLong(txnNumber),
stmtId: NumberInt(0),
- startTransaction: true,
autocommit: false,
}));
- };
-
- const testCommitProtocol = function(shouldCommit, failpointData) {
- jsTest.log("Testing two-phase " + (shouldCommit ? "commit" : "abort") +
- " protocol with failpointData: " + tojson(failpointData));
-
- txnNumber++;
- setUp();
-
- if (!shouldCommit) {
- // Manually abort the transaction on one of the participants, so that the participant
- // fails to prepare.
- assert.commandWorked(participant2.adminCommand({
- abortTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false,
- }));
- }
-
- // Turn on failpoint to make the coordinator hang at a the specified point.
- assert.commandWorked(coordinator.adminCommand({
- configureFailPoint: failpointData.failpoint,
- mode: {skip: (failpointData.skip ? failpointData.skip : 0)},
- }));
-
- // Run commitTransaction through a parallel shell.
- let awaitResult;
- if (shouldCommit) {
- awaitResult = runCommitThroughMongosInParallelShellExpectSuccess();
- } else {
- awaitResult = runCommitThroughMongosInParallelShellExpectAbort();
- }
-
- // Deliver killOp once the failpoint has been hit.
-
- waitForFailpoint("Hit " + failpointData.failpoint + " failpoint",
- failpointData.numTimesShouldBeHit);
-
- jsTest.log("Going to find coordinator opCtx ids");
- let coordinatorOps =
- coordinator.getDB("admin")
- .aggregate(
- [{$currentOp: {'allUsers': true}}, {$match: {desc: "TransactionCoordinator"}}])
- .toArray();
-
- // Use "greater than or equal to" since, for failpoints that pause the coordinator while
- // it's sending prepare or sending the decision, there might be one additional thread that's
- // doing the "send" to the local participant (or that thread might have already completed).
- assert.gte(coordinatorOps.length, failpointData.numTimesShouldBeHit);
-
- coordinatorOps.forEach(function(coordinatorOp) {
- coordinator.getDB("admin").killOp(coordinatorOp.opid);
+ }
+
+ // Turn on failpoint to make the coordinator hang at a the specified point.
+ assert.commandWorked(coordinator.adminCommand({
+ configureFailPoint: failpointData.failpoint,
+ mode: {skip: (failpointData.skip ? failpointData.skip : 0)},
+ }));
+
+ // Run commitTransaction through a parallel shell.
+ let awaitResult;
+ if (shouldCommit) {
+ awaitResult = runCommitThroughMongosInParallelShellExpectSuccess();
+ } else {
+ awaitResult = runCommitThroughMongosInParallelShellExpectAbort();
+ }
+
+ // Deliver killOp once the failpoint has been hit.
+
+ waitForFailpoint("Hit " + failpointData.failpoint + " failpoint",
+ failpointData.numTimesShouldBeHit);
+
+ jsTest.log("Going to find coordinator opCtx ids");
+ let coordinatorOps =
+ coordinator.getDB("admin")
+ .aggregate(
+ [{$currentOp: {'allUsers': true}}, {$match: {desc: "TransactionCoordinator"}}])
+ .toArray();
+
+ // Use "greater than or equal to" since, for failpoints that pause the coordinator while
+ // it's sending prepare or sending the decision, there might be one additional thread that's
+ // doing the "send" to the local participant (or that thread might have already completed).
+ assert.gte(coordinatorOps.length, failpointData.numTimesShouldBeHit);
+
+ coordinatorOps.forEach(function(coordinatorOp) {
+ coordinator.getDB("admin").killOp(coordinatorOp.opid);
+ });
+ assert.commandWorked(coordinator.adminCommand({
+ configureFailPoint: failpointData.failpoint,
+ mode: "off",
+ }));
+
+ // If the commit coordination was not robust to killOp, then commitTransaction would fail
+ // with an Interrupted error rather than fail with NoSuchTransaction or return success.
+ jsTest.log("Wait for the commit coordination to complete.");
+ awaitResult();
+
+ // If deleting the coordinator doc was not robust to killOp, the document would still exist.
+ assert.eq(0, coordinator.getDB("config").getCollection("transaction_coordinators").count());
+
+ // Check that the transaction committed or aborted as expected.
+ if (!shouldCommit) {
+ jsTest.log("Verify that the transaction was aborted on all shards.");
+ assert.eq(0, st.s.getDB(dbName).getCollection(collName).find().itcount());
+ } else {
+ jsTest.log("Verify that the transaction was committed on all shards.");
+ // Use assert.soon(), because although coordinateCommitTransaction currently blocks
+ // until the commit process is fully complete, it will eventually be changed to only
+ // block until the decision is *written*, at which point the test can pass the
+ // operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in the
+ // read to ensure the read sees the transaction's writes (TODO SERVER-37165).
+ assert.soon(function() {
+ return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
});
- assert.commandWorked(coordinator.adminCommand({
- configureFailPoint: failpointData.failpoint,
- mode: "off",
- }));
+ }
- // If the commit coordination was not robust to killOp, then commitTransaction would fail
- // with an Interrupted error rather than fail with NoSuchTransaction or return success.
- jsTest.log("Wait for the commit coordination to complete.");
- awaitResult();
-
- // If deleting the coordinator doc was not robust to killOp, the document would still exist.
- assert.eq(0, coordinator.getDB("config").getCollection("transaction_coordinators").count());
-
- // Check that the transaction committed or aborted as expected.
- if (!shouldCommit) {
- jsTest.log("Verify that the transaction was aborted on all shards.");
- assert.eq(0, st.s.getDB(dbName).getCollection(collName).find().itcount());
- } else {
- jsTest.log("Verify that the transaction was committed on all shards.");
- // Use assert.soon(), because although coordinateCommitTransaction currently blocks
- // until the commit process is fully complete, it will eventually be changed to only
- // block until the decision is *written*, at which point the test can pass the
- // operationTime returned by coordinateCommitTransaction as 'afterClusterTime' in the
- // read to ensure the read sees the transaction's writes (TODO SERVER-37165).
- assert.soon(function() {
- return 3 === st.s.getDB(dbName).getCollection(collName).find().itcount();
- });
- }
-
- st.s.getDB(dbName).getCollection(collName).drop();
- };
-
- const failpointDataArr = getCoordinatorFailpoints();
-
- // TODO(SERVER-39754): The abort path is unreliable, because depending on the stage at which the
- // transaction is aborted, the failpoints might be hit more than the specified number of times.
- //
- // // Test abort path.
-
- // failpointDataArr.forEach(function(failpointData) {
- // testCommitProtocol(false /* shouldCommit */, failpointData);
- // clearRawMongoProgramOutput();
- // });
-
- // Test commit path.
-
- failpointDataArr.forEach(function(failpointData) {
- testCommitProtocol(true /* shouldCommit */, failpointData);
- clearRawMongoProgramOutput();
- });
+ st.s.getDB(dbName).getCollection(collName).drop();
+};
+
+const failpointDataArr = getCoordinatorFailpoints();
+
+// TODO(SERVER-39754): The abort path is unreliable, because depending on the stage at which the
+// transaction is aborted, the failpoints might be hit more than the specified number of times.
+//
+// // Test abort path.
+
+// failpointDataArr.forEach(function(failpointData) {
+// testCommitProtocol(false /* shouldCommit */, failpointData);
+// clearRawMongoProgramOutput();
+// });
+
+// Test commit path.
- st.stop();
+failpointDataArr.forEach(function(failpointData) {
+ testCommitProtocol(true /* shouldCommit */, failpointData);
+ clearRawMongoProgramOutput();
+});
+st.stop();
})();
diff --git a/jstests/sharding/txn_two_phase_commit_server_status.js b/jstests/sharding/txn_two_phase_commit_server_status.js
index d4bd4b3c75e..407ee038d50 100644
--- a/jstests/sharding/txn_two_phase_commit_server_status.js
+++ b/jstests/sharding/txn_two_phase_commit_server_status.js
@@ -1,21 +1,21 @@
// Basic test that the two-phase commit coordinator metrics fields appear in serverStatus output.
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 1, config: 1});
+const st = new ShardingTest({shards: 1, config: 1});
- const res = assert.commandWorked(st.shard0.adminCommand({serverStatus: 1}));
- assert.neq(null, res.twoPhaseCommitCoordinator);
- assert.eq(0, res.twoPhaseCommitCoordinator.totalCreated);
- assert.eq(0, res.twoPhaseCommitCoordinator.totalStartedTwoPhaseCommit);
- assert.eq(0, res.twoPhaseCommitCoordinator.totalCommittedTwoPhaseCommit);
- assert.eq(0, res.twoPhaseCommitCoordinator.totalAbortedTwoPhaseCommit);
- assert.neq(null, res.twoPhaseCommitCoordinator.currentInSteps);
- assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.writingParticipantList);
- assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.waitingForVotes);
- assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.writingDecision);
- assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.waitingForDecisionAcks);
- assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.deletingCoordinatorDoc);
+const res = assert.commandWorked(st.shard0.adminCommand({serverStatus: 1}));
+assert.neq(null, res.twoPhaseCommitCoordinator);
+assert.eq(0, res.twoPhaseCommitCoordinator.totalCreated);
+assert.eq(0, res.twoPhaseCommitCoordinator.totalStartedTwoPhaseCommit);
+assert.eq(0, res.twoPhaseCommitCoordinator.totalCommittedTwoPhaseCommit);
+assert.eq(0, res.twoPhaseCommitCoordinator.totalAbortedTwoPhaseCommit);
+assert.neq(null, res.twoPhaseCommitCoordinator.currentInSteps);
+assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.writingParticipantList);
+assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.waitingForVotes);
+assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.writingDecision);
+assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.waitingForDecisionAcks);
+assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.deletingCoordinatorDoc);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/txn_two_phase_commit_wait_for_majority_commit_after_stepup.js b/jstests/sharding/txn_two_phase_commit_wait_for_majority_commit_after_stepup.js
index 847c1c64346..aaad8537e7f 100644
--- a/jstests/sharding/txn_two_phase_commit_wait_for_majority_commit_after_stepup.js
+++ b/jstests/sharding/txn_two_phase_commit_wait_for_majority_commit_after_stepup.js
@@ -10,119 +10,123 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
-
- load('jstests/sharding/libs/sharded_transactions_helpers.js'); // for waitForFailpoint
- load('jstests/libs/write_concern_util.js'); // for stopping/restarting replication
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- let st = new ShardingTest({
- shards: 3,
- rs0: {nodes: 2},
- causallyConsistent: true,
- other: {
- mongosOptions: {verbose: 3},
- }
- });
-
- let coordinatorReplSetTest = st.rs0;
- let participant0 = st.shard0;
- let participant1 = st.shard1;
- let participant2 = st.shard2;
-
- let lsid = {id: UUID()};
- let txnNumber = 0;
-
- const runCommitThroughMongosInParallelShellExpectTimeOut = function() {
- const runCommitExpectTimeOutCode = "assert.commandFailedWithCode(db.adminCommand({" +
- "commitTransaction: 1, maxTimeMS: 1000 * 10, " + "lsid: " + tojson(lsid) + "," +
- "txnNumber: NumberLong(" + txnNumber + ")," + "stmtId: NumberInt(0)," +
- "autocommit: false," + "})," + "ErrorCodes.MaxTimeMSExpired);";
- return startParallelShell(runCommitExpectTimeOutCode, st.s.port);
- };
-
- const setUp = function() {
- // Create a sharded collection with a chunk on each shard:
- // shard0: [-inf, 0)
- // shard1: [0, 10)
- // shard2: [10, +inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: participant0.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
-
- // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
- // from the shards starting, aborting, and restarting the transaction due to needing to
- // refresh after the transaction has started.
- assert.commandWorked(participant0.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
-
- // Start a new transaction by inserting a document onto each shard.
- assert.commandWorked(st.s.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}, {_id: 5}, {_id: 15}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false,
- }));
- };
- setUp();
-
- let coordPrimary = coordinatorReplSetTest.getPrimary();
- let coordSecondary = coordinatorReplSetTest.getSecondary();
-
- // Make the commit coordination hang before writing the decision, and send commitTransaction.
- assert.commandWorked(coordPrimary.adminCommand({
- configureFailPoint: "hangBeforeWritingDecision",
- mode: "alwaysOn",
- }));
- let awaitResult = runCommitThroughMongosInParallelShellExpectTimeOut();
- waitForFailpoint("Hit hangBeforeWritingDecision failpoint", 1);
-
- // Stop replication on all nodes in the coordinator replica set so that the write done on stepup
- // cannot become majority committed, regardless of which node steps up.
- stopServerReplication([coordPrimary, coordSecondary]);
-
- // Induce the coordinator primary to step down.
-
- // The amount of time the node has to wait before becoming primary again.
- const stepDownSecs = 1;
- assert.commandWorked(coordPrimary.adminCommand({replSetStepDown: stepDownSecs, force: true}));
-
- assert.commandWorked(coordPrimary.adminCommand({
- configureFailPoint: "hangBeforeWritingDecision",
- mode: "off",
- }));
-
- // The router should retry commitTransaction against the new primary and time out waiting to
- // access the coordinator catalog.
- awaitResult();
-
- // Re-enable replication, so that the write done on stepup can become majority committed.
- restartReplSetReplication(coordinatorReplSetTest);
-
- // Now, commitTransaction should succeed.
- assert.commandWorked(st.s.adminCommand({
- commitTransaction: 1,
+'use strict';
+
+load('jstests/sharding/libs/sharded_transactions_helpers.js'); // for waitForFailpoint
+load('jstests/libs/write_concern_util.js'); // for stopping/restarting replication
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+let st = new ShardingTest({
+ shards: 3,
+ rs0: {nodes: 2},
+ causallyConsistent: true,
+ other: {
+ mongosOptions: {verbose: 3},
+ }
+});
+
+let coordinatorReplSetTest = st.rs0;
+let participant0 = st.shard0;
+let participant1 = st.shard1;
+let participant2 = st.shard2;
+
+let lsid = {id: UUID()};
+let txnNumber = 0;
+
+const runCommitThroughMongosInParallelShellExpectTimeOut = function() {
+ const runCommitExpectTimeOutCode = "assert.commandFailedWithCode(db.adminCommand({" +
+ "commitTransaction: 1, maxTimeMS: 1000 * 10, " +
+ "lsid: " + tojson(lsid) + "," +
+ "txnNumber: NumberLong(" + txnNumber + ")," +
+ "stmtId: NumberInt(0)," +
+ "autocommit: false," +
+ "})," +
+ "ErrorCodes.MaxTimeMSExpired);";
+ return startParallelShell(runCommitExpectTimeOutCode, st.s.port);
+};
+
+const setUp = function() {
+ // Create a sharded collection with a chunk on each shard:
+ // shard0: [-inf, 0)
+ // shard1: [0, 10)
+ // shard2: [10, +inf)
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: participant0.shardName}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: participant1.shardName}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: participant2.shardName}));
+
+ // These forced refreshes are not strictly necessary; they just prevent extra TXN log lines
+ // from the shards starting, aborting, and restarting the transaction due to needing to
+ // refresh after the transaction has started.
+ assert.commandWorked(participant0.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+ assert.commandWorked(participant1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+ assert.commandWorked(participant2.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+
+ // Start a new transaction by inserting a document onto each shard.
+ assert.commandWorked(st.s.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}, {_id: 5}, {_id: 15}],
lsid: lsid,
txnNumber: NumberLong(txnNumber),
stmtId: NumberInt(0),
- autocommit: false
+ startTransaction: true,
+ autocommit: false,
}));
-
- jsTest.log("Verify that the transaction was committed on all shards.");
- assert.eq(3, st.s.getDB(dbName).getCollection(collName).find().itcount());
-
- st.stop();
+};
+setUp();
+
+let coordPrimary = coordinatorReplSetTest.getPrimary();
+let coordSecondary = coordinatorReplSetTest.getSecondary();
+
+// Make the commit coordination hang before writing the decision, and send commitTransaction.
+assert.commandWorked(coordPrimary.adminCommand({
+ configureFailPoint: "hangBeforeWritingDecision",
+ mode: "alwaysOn",
+}));
+let awaitResult = runCommitThroughMongosInParallelShellExpectTimeOut();
+waitForFailpoint("Hit hangBeforeWritingDecision failpoint", 1);
+
+// Stop replication on all nodes in the coordinator replica set so that the write done on stepup
+// cannot become majority committed, regardless of which node steps up.
+stopServerReplication([coordPrimary, coordSecondary]);
+
+// Induce the coordinator primary to step down.
+
+// The amount of time the node has to wait before becoming primary again.
+const stepDownSecs = 1;
+assert.commandWorked(coordPrimary.adminCommand({replSetStepDown: stepDownSecs, force: true}));
+
+assert.commandWorked(coordPrimary.adminCommand({
+ configureFailPoint: "hangBeforeWritingDecision",
+ mode: "off",
+}));
+
+// The router should retry commitTransaction against the new primary and time out waiting to
+// access the coordinator catalog.
+awaitResult();
+
+// Re-enable replication, so that the write done on stepup can become majority committed.
+restartReplSetReplication(coordinatorReplSetTest);
+
+// Now, commitTransaction should succeed.
+assert.commandWorked(st.s.adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ autocommit: false
+}));
+
+jsTest.log("Verify that the transaction was committed on all shards.");
+assert.eq(3, st.s.getDB(dbName).getCollection(collName).find().itcount());
+
+st.stop();
})();
diff --git a/jstests/sharding/txn_with_several_routers.js b/jstests/sharding/txn_with_several_routers.js
index 89906937a3f..4dededd0cb0 100644
--- a/jstests/sharding/txn_with_several_routers.js
+++ b/jstests/sharding/txn_with_several_routers.js
@@ -6,198 +6,196 @@
*/
(function() {
- 'use strict';
-
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + "." + collName;
-
- function removeAllDocumentsFromTestCollection() {
- assert.commandWorked(router0.getDB(dbName).foo.deleteMany({}));
- }
-
- function runTest(testFn) {
- testFn();
- removeAllDocumentsFromTestCollection();
- }
-
- let st = new ShardingTest({shards: 3, mongos: 2, causallyConsistent: true});
- let router0 = st.s0;
- let router1 = st.s1;
-
- // Create a sharded collection with a chunk on each shard:
- // shard0: [-inf, 0)
- // shard1: [0, 10)
- // shard2: [10, +inf)
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: st.shard2.shardName}));
-
- flushRoutersAndRefreshShardMetadata(st, {ns});
-
- // Test that trying to run start txn from two routers with the same transaction number fails
- // through the second router if they target overlapping shards.
- runTest(() => {
- let lsid = {id: UUID()};
- let txnNumber = 0;
-
- // Start a new transaction on router 0 by inserting a document onto each shard.
- assert.commandWorked(router0.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}, {_id: 5}, {_id: 15}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false,
- }));
-
- // Try to start a new transaction with the same transaction number on router 1 by inserting
- // a document onto each shard.
- assert.commandFailedWithCode(router1.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -50}, {_id: 4}, {_id: 150}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- startTransaction: true,
- autocommit: false,
- // Because ordered writes are done serially for different shard targets and abort early
- // on first error, this can leave the transaction on the other shards open.
- // To ensure this router implicitly aborts the transaction on all participants (so
- // that the next test case does not encounter an open transaction), make this
- // router do an *unordered* write that touches all the same participants as the
- // first router touched.
- ordered: false,
- }),
- 50911);
-
- // The automatic abort-on-error path will occur when the above
- // statement fails, so commit will fail.
- assert.commandFailedWithCode(router0.getDB(dbName).adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
- });
-
- // Test that trying to run start txn from one router and running an operation for that same
- // transaction from another router fails through the second router.
- runTest(() => {
- let lsid = {id: UUID()};
- let txnNumber = 0;
- let stmtId = 0;
-
- // Start a new transaction on router 0 by inserting a document onto each shard.
- assert.commandWorked(router0.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}, {_id: 5}, {_id: 15}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- startTransaction: true,
- autocommit: false,
- }));
-
- ++stmtId;
-
- // Try to continue the same transaction but through router 1. Should fail because no txn
- // with this number exists on that router.
- assert.commandFailed(router1.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -50}, {_id: 4}, {_id: 150}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- }));
-
- // Commit should succeed since the command from router 2 should never reach the shard.
- assert.commandWorked(router0.getDB(dbName).adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(0),
- autocommit: false,
- }));
- });
-
- // Test that trying to run start txn from one router, start txn on the second router with the
- // same transaction number, and running operations on overlapping shards will lead to failure.
- runTest(() => {
- let lsid = {id: UUID()};
- let txnNumber = 0;
- let stmtId = 0;
-
- // Start a new transaction on router 0 by inserting a document onto the first shard
- assert.commandWorked(router0.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: -5}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- startTransaction: true,
- autocommit: false,
- }));
-
- // Start a new transaction on router 1 with the same transaction number, targeting the last
- // shard.
- assert.commandWorked(router1.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: 15}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- startTransaction: true,
- autocommit: false,
- }));
-
- ++stmtId;
-
- // Try to do an operation on the last shard through router 0. Fails because it sends
- // startTxn: true to the new participant, which has already seen an operation from router 1.
- // Implicitly aborts the transaction when the error is thrown.
- assert.commandFailedWithCode(router0.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: 50}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- }),
- 50911);
-
- // Commit through router 0 should fail.
- assert.commandFailedWithCode(router0.getDB(dbName).adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
-
- // Commit through router 1 should fail.
- assert.commandFailedWithCode(router1.getDB(dbName).adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
- });
-
- st.stop();
+'use strict';
+
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+function removeAllDocumentsFromTestCollection() {
+ assert.commandWorked(router0.getDB(dbName).foo.deleteMany({}));
+}
+
+function runTest(testFn) {
+ testFn();
+ removeAllDocumentsFromTestCollection();
+}
+
+let st = new ShardingTest({shards: 3, mongos: 2, causallyConsistent: true});
+let router0 = st.s0;
+let router1 = st.s1;
+
+// Create a sharded collection with a chunk on each shard:
+// shard0: [-inf, 0)
+// shard1: [0, 10)
+// shard2: [10, +inf)
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 10}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 10}, to: st.shard2.shardName}));
+
+flushRoutersAndRefreshShardMetadata(st, {ns});
+
+// Test that trying to run start txn from two routers with the same transaction number fails
+// through the second router if they target overlapping shards.
+runTest(() => {
+ let lsid = {id: UUID()};
+ let txnNumber = 0;
+
+ // Start a new transaction on router 0 by inserting a document onto each shard.
+ assert.commandWorked(router0.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false,
+ }));
+
+ // Try to start a new transaction with the same transaction number on router 1 by inserting
+ // a document onto each shard.
+ assert.commandFailedWithCode(router1.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -50}, {_id: 4}, {_id: 150}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ startTransaction: true,
+ autocommit: false,
+ // Because ordered writes are done serially for different shard targets and abort early
+ // on first error, this can leave the transaction on the other shards open.
+ // To ensure this router implicitly aborts the transaction on all participants (so
+ // that the next test case does not encounter an open transaction), make this
+ // router do an *unordered* write that touches all the same participants as the
+ // first router touched.
+ ordered: false,
+ }),
+ 50911);
+
+ // The automatic abort-on-error path will occur when the above
+ // statement fails, so commit will fail.
+ assert.commandFailedWithCode(router0.getDB(dbName).adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ autocommit: false,
+ }),
+ ErrorCodes.NoSuchTransaction);
+});
+
+// Test that trying to run start txn from one router and running an operation for that same
+// transaction from another router fails through the second router.
+runTest(() => {
+ let lsid = {id: UUID()};
+ let txnNumber = 0;
+ let stmtId = 0;
+
+ // Start a new transaction on router 0 by inserting a document onto each shard.
+ assert.commandWorked(router0.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}, {_id: 5}, {_id: 15}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ startTransaction: true,
+ autocommit: false,
+ }));
+
+ ++stmtId;
+
+ // Try to continue the same transaction but through router 1. Should fail because no txn
+ // with this number exists on that router.
+ assert.commandFailed(router1.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -50}, {_id: 4}, {_id: 150}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ }));
+
+ // Commit should succeed since the command from router 2 should never reach the shard.
+ assert.commandWorked(router0.getDB(dbName).adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(0),
+ autocommit: false,
+ }));
+});
+
+// Test that trying to run start txn from one router, start txn on the second router with the
+// same transaction number, and running operations on overlapping shards will lead to failure.
+runTest(() => {
+ let lsid = {id: UUID()};
+ let txnNumber = 0;
+ let stmtId = 0;
+
+ // Start a new transaction on router 0 by inserting a document onto the first shard
+ assert.commandWorked(router0.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: -5}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ startTransaction: true,
+ autocommit: false,
+ }));
+
+ // Start a new transaction on router 1 with the same transaction number, targeting the last
+ // shard.
+ assert.commandWorked(router1.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: 15}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ startTransaction: true,
+ autocommit: false,
+ }));
+
+ ++stmtId;
+
+ // Try to do an operation on the last shard through router 0. Fails because it sends
+ // startTxn: true to the new participant, which has already seen an operation from router 1.
+ // Implicitly aborts the transaction when the error is thrown.
+ assert.commandFailedWithCode(router0.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{_id: 50}],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ }),
+ 50911);
+
+ // Commit through router 0 should fail.
+ assert.commandFailedWithCode(router0.getDB(dbName).adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ }),
+ ErrorCodes.NoSuchTransaction);
+
+ // Commit through router 1 should fail.
+ assert.commandFailedWithCode(router1.getDB(dbName).adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ }),
+ ErrorCodes.NoSuchTransaction);
+});
+
+st.stop();
})();
diff --git a/jstests/sharding/txn_writes_during_movechunk.js b/jstests/sharding/txn_writes_during_movechunk.js
index 47f1dc4a7ba..357ea22e14e 100644
--- a/jstests/sharding/txn_writes_during_movechunk.js
+++ b/jstests/sharding/txn_writes_during_movechunk.js
@@ -2,56 +2,56 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
- 'use strict';
+'use strict';
- let staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+let staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
- let st = new ShardingTest({shards: 2});
+let st = new ShardingTest({shards: 2});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
+assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
- let coll = st.s.getDB('test').user;
- assert.writeOK(coll.insert({_id: 'updateMe'}));
- assert.writeOK(coll.insert({_id: 'deleteMe'}));
+let coll = st.s.getDB('test').user;
+assert.writeOK(coll.insert({_id: 'updateMe'}));
+assert.writeOK(coll.insert({_id: 'deleteMe'}));
- pauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
+pauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
- let joinMoveChunk = moveChunkParallel(
- staticMongod, st.s0.host, {_id: 0}, null, 'test.user', st.shard1.shardName);
+let joinMoveChunk =
+ moveChunkParallel(staticMongod, st.s0.host, {_id: 0}, null, 'test.user', st.shard1.shardName);
- waitForMigrateStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
+waitForMigrateStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
- let session = st.s.startSession();
- let sessionDB = session.getDatabase('test');
- let sessionColl = sessionDB.getCollection('user');
+let session = st.s.startSession();
+let sessionDB = session.getDatabase('test');
+let sessionColl = sessionDB.getCollection('user');
- session.startTransaction();
- sessionColl.insert({_id: 'insertMe'});
- sessionColl.update({_id: 'updateMe'}, {$inc: {y: 1}});
- sessionColl.remove({_id: 'deleteMe'});
+session.startTransaction();
+sessionColl.insert({_id: 'insertMe'});
+sessionColl.update({_id: 'updateMe'}, {$inc: {y: 1}});
+sessionColl.remove({_id: 'deleteMe'});
- pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
- unpauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
- waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+unpauseMigrateAtStep(st.shard1, migrateStepNames.deletedPriorDataInRange);
+waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
- let recipientColl = st.rs1.getPrimary().getDB('test').user;
- assert.eq(null, recipientColl.findOne({_id: 'insertMe'}));
- assert.eq({_id: 'updateMe'}, recipientColl.findOne({_id: 'updateMe'}));
- assert.eq({_id: 'deleteMe'}, recipientColl.findOne({_id: 'deleteMe'}));
+let recipientColl = st.rs1.getPrimary().getDB('test').user;
+assert.eq(null, recipientColl.findOne({_id: 'insertMe'}));
+assert.eq({_id: 'updateMe'}, recipientColl.findOne({_id: 'updateMe'}));
+assert.eq({_id: 'deleteMe'}, recipientColl.findOne({_id: 'deleteMe'}));
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
- joinMoveChunk();
+unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+joinMoveChunk();
- assert.eq({_id: 'insertMe'}, recipientColl.findOne({_id: 'insertMe'}));
- assert.eq({_id: 'updateMe', y: 1}, recipientColl.findOne({_id: 'updateMe'}));
- assert.eq(null, recipientColl.findOne({_id: 'deleteMe'}));
+assert.eq({_id: 'insertMe'}, recipientColl.findOne({_id: 'insertMe'}));
+assert.eq({_id: 'updateMe', y: 1}, recipientColl.findOne({_id: 'updateMe'}));
+assert.eq(null, recipientColl.findOne({_id: 'deleteMe'}));
- assert.eq(null, recipientColl.findOne({x: 1}));
+assert.eq(null, recipientColl.findOne({x: 1}));
- st.stop();
- MongoRunner.stopMongod(staticMongod);
+st.stop();
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/unique_index_on_shardservers.js b/jstests/sharding/unique_index_on_shardservers.js
index 1776aa44260..4ee9bb007d1 100644
--- a/jstests/sharding/unique_index_on_shardservers.js
+++ b/jstests/sharding/unique_index_on_shardservers.js
@@ -1,30 +1,30 @@
// SERVER-34954 This test ensures a node started with --shardsvr and added to a replica set has
// the correct version of unique indexes upon re-initiation.
(function() {
- "use strict";
- load("jstests/libs/check_unique_indexes.js");
+"use strict";
+load("jstests/libs/check_unique_indexes.js");
- let st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
- let mongos = st.s;
- let rs = st.rs0;
+let st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
+let mongos = st.s;
+let rs = st.rs0;
- // Create `test.coll` and add some indexes on it:
- // with index versions as default, v=1 and v=2; both unique and standard types
- assert.writeOK(mongos.getDB("test").coll.insert({_id: 1, a: 1, b: 1, c: 1, d: 1, e: 1, f: 1}));
- assert.commandWorked(mongos.getDB("test").coll.createIndex({a: 1}, {"v": 1}));
- assert.commandWorked(mongos.getDB("test").coll.createIndex({b: 1}, {"v": 1, "unique": true}));
- assert.commandWorked(mongos.getDB("test").coll.createIndex({c: 1}, {"v": 2}));
- assert.commandWorked(mongos.getDB("test").coll.createIndex({d: 1}, {"v": 2, "unique": true}));
- assert.commandWorked(mongos.getDB("test").coll.createIndex({e: 1}));
- assert.commandWorked(mongos.getDB("test").coll.createIndex({f: 1}, {"unique": true}));
+// Create `test.coll` and add some indexes on it:
+// with index versions as default, v=1 and v=2; both unique and standard types
+assert.writeOK(mongos.getDB("test").coll.insert({_id: 1, a: 1, b: 1, c: 1, d: 1, e: 1, f: 1}));
+assert.commandWorked(mongos.getDB("test").coll.createIndex({a: 1}, {"v": 1}));
+assert.commandWorked(mongos.getDB("test").coll.createIndex({b: 1}, {"v": 1, "unique": true}));
+assert.commandWorked(mongos.getDB("test").coll.createIndex({c: 1}, {"v": 2}));
+assert.commandWorked(mongos.getDB("test").coll.createIndex({d: 1}, {"v": 2, "unique": true}));
+assert.commandWorked(mongos.getDB("test").coll.createIndex({e: 1}));
+assert.commandWorked(mongos.getDB("test").coll.createIndex({f: 1}, {"unique": true}));
- // Add a node with --shardsvr to the replica set.
- let newNode = rs.add({'shardsvr': '', rsConfig: {priority: 0, votes: 0}});
- rs.reInitiate();
- rs.awaitSecondaryNodes();
+// Add a node with --shardsvr to the replica set.
+let newNode = rs.add({'shardsvr': '', rsConfig: {priority: 0, votes: 0}});
+rs.reInitiate();
+rs.awaitSecondaryNodes();
- // After adding a new node as a ShardServer ensure the new node has unique indexes
- // in the correct version
- checkUniqueIndexFormatVersion(newNode.getDB("admin"));
- st.stop();
+// After adding a new node as a ShardServer ensure the new node has unique indexes
+// in the correct version
+checkUniqueIndexFormatVersion(newNode.getDB("admin"));
+st.stop();
})();
diff --git a/jstests/sharding/unowned_doc_filtering.js b/jstests/sharding/unowned_doc_filtering.js
index f1d6e96541d..5a337aaa454 100644
--- a/jstests/sharding/unowned_doc_filtering.js
+++ b/jstests/sharding/unowned_doc_filtering.js
@@ -11,44 +11,44 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 2});
+var st = new ShardingTest({shards: 2});
- var testDB = st.s.getDB('test');
+var testDB = st.s.getDB('test');
- assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
- assert.commandWorked(testDB.adminCommand({shardCollection: 'test.foo', key: {x: 1}}));
+assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
+assert.commandWorked(testDB.adminCommand({shardCollection: 'test.foo', key: {x: 1}}));
- var inserts = [];
- for (var i = 0; i < 100; i++) {
- inserts.push({x: i});
- }
- assert.writeOK(testDB.foo.insert(inserts));
-
- assert.commandWorked(testDB.adminCommand({split: 'test.foo', find: {x: 50}}));
- assert.commandWorked(
- testDB.adminCommand({moveChunk: 'test.foo', find: {x: 100}, to: st.shard1.shardName}));
-
- // Insert some documents directly into the shards into chunks not owned by that shard.
- st.rs0.getPrimary().getDB('test').foo.insert({x: 100});
- st.rs1.getPrimary().getDB('test').foo.insert({x: 0});
-
- st.rs0.restart(0);
- st.rs1.restart(0);
-
- var fooCount;
- for (var retries = 0; retries <= 2; retries++) {
- try {
- fooCount = testDB.foo.find().itcount();
- break;
- } catch (e) {
- // expected for reestablishing connections broken by the mongod restart.
- assert.eq(ErrorCodes.HostUnreachable, e.code, tojson(e));
- }
+var inserts = [];
+for (var i = 0; i < 100; i++) {
+ inserts.push({x: i});
+}
+assert.writeOK(testDB.foo.insert(inserts));
+
+assert.commandWorked(testDB.adminCommand({split: 'test.foo', find: {x: 50}}));
+assert.commandWorked(
+ testDB.adminCommand({moveChunk: 'test.foo', find: {x: 100}, to: st.shard1.shardName}));
+
+// Insert some documents directly into the shards into chunks not owned by that shard.
+st.rs0.getPrimary().getDB('test').foo.insert({x: 100});
+st.rs1.getPrimary().getDB('test').foo.insert({x: 0});
+
+st.rs0.restart(0);
+st.rs1.restart(0);
+
+var fooCount;
+for (var retries = 0; retries <= 2; retries++) {
+ try {
+ fooCount = testDB.foo.find().itcount();
+ break;
+ } catch (e) {
+ // expected for reestablishing connections broken by the mongod restart.
+ assert.eq(ErrorCodes.HostUnreachable, e.code, tojson(e));
}
- assert.eq(100, fooCount);
+}
+assert.eq(100, fooCount);
- st.stop();
+st.stop();
}());
diff --git a/jstests/sharding/unsharded_collection_targetting.js b/jstests/sharding/unsharded_collection_targetting.js
index efe4c3c05c4..5393a212ae4 100644
--- a/jstests/sharding/unsharded_collection_targetting.js
+++ b/jstests/sharding/unsharded_collection_targetting.js
@@ -1,32 +1,32 @@
// Tests that a stale mongos would route writes correctly to the right shard after
// an unsharded collection was moved to another shard.
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({
- shards: 2,
- mongos: 2,
- rs: {
- nodes: 1,
- },
- });
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ rs: {
+ nodes: 1,
+ },
+});
- const testName = 'test';
- const mongosDB = st.s0.getDB(testName);
+const testName = 'test';
+const mongosDB = st.s0.getDB(testName);
- // Ensure that shard1 is the primary shard.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs1.getURL());
+// Ensure that shard1 is the primary shard.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs1.getURL());
- // Before moving the collection, issue a write through mongos2 to make it aware
- // about the location of the collection before the move.
- const mongos2DB = st.s1.getDB(testName);
- const mongos2Coll = mongos2DB[testName];
- assert.writeOK(mongos2Coll.insert({_id: 0, a: 0}));
+// Before moving the collection, issue a write through mongos2 to make it aware
+// about the location of the collection before the move.
+const mongos2DB = st.s1.getDB(testName);
+const mongos2Coll = mongos2DB[testName];
+assert.writeOK(mongos2Coll.insert({_id: 0, a: 0}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- assert.writeOK(mongos2Coll.insert({_id: 1, a: 0}));
+assert.writeOK(mongos2Coll.insert({_id: 1, a: 0}));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/unsharded_lookup_in_txn.js b/jstests/sharding/unsharded_lookup_in_txn.js
index 32d0d21e105..ac1bcee7216 100644
--- a/jstests/sharding/unsharded_lookup_in_txn.js
+++ b/jstests/sharding/unsharded_lookup_in_txn.js
@@ -4,36 +4,36 @@
* uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
- const st = new ShardingTest({shards: 2, mongos: 1});
- const kDBName = "unsharded_lookup_in_txn";
+const st = new ShardingTest({shards: 2, mongos: 1});
+const kDBName = "unsharded_lookup_in_txn";
- let session = st.s.startSession();
- let sessionDB = session.getDatabase("unsharded_lookup_in_txn");
+let session = st.s.startSession();
+let sessionDB = session.getDatabase("unsharded_lookup_in_txn");
- const shardedColl = sessionDB.sharded;
- const unshardedColl = sessionDB.unsharded;
+const shardedColl = sessionDB.sharded;
+const unshardedColl = sessionDB.unsharded;
- assert.commandWorked(st.s.adminCommand({enableSharding: sessionDB.getName()}));
- st.ensurePrimaryShard(sessionDB.getName(), st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: sessionDB.getName()}));
+st.ensurePrimaryShard(sessionDB.getName(), st.shard0.shardName);
- assert.commandWorked(
- st.s.adminCommand({shardCollection: shardedColl.getFullName(), key: {_id: 1}}));
+assert.commandWorked(
+ st.s.adminCommand({shardCollection: shardedColl.getFullName(), key: {_id: 1}}));
- // Move all of the data to shard 1.
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: shardedColl.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
- flushRoutersAndRefreshShardMetadata(st, {ns: shardedColl.getFullName()});
+// Move all of the data to shard 1.
+assert.commandWorked(st.s.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
+flushRoutersAndRefreshShardMetadata(st, {ns: shardedColl.getFullName()});
- // Insert a bunch of documents, all of which reside on the same chunk (on shard 1).
- for (let i = -10; i < 10; i++) {
- assert.commandWorked(shardedColl.insert({_id: i, local_always_one: 1}));
- }
+// Insert a bunch of documents, all of which reside on the same chunk (on shard 1).
+for (let i = -10; i < 10; i++) {
+ assert.commandWorked(shardedColl.insert({_id: i, local_always_one: 1}));
+}
- const pipeline = [{
+const pipeline = [{
$lookup: {
from: unshardedColl.getName(),
localField: "local_always_one",
@@ -41,57 +41,57 @@
as: "matches"
}
}];
- const kBatchSize = 2;
-
- const testLookupDoesNotSeeDocumentsOutsideSnapshot = function() {
- unshardedColl.drop();
- // Insert some stuff into the unsharded collection.
- const kUnshardedCollOriginalSize = 10;
- for (let i = 0; i < kUnshardedCollOriginalSize; i++) {
- assert.commandWorked(unshardedColl.insert({_id: i, foreign_always_one: 1}));
- }
+const kBatchSize = 2;
+
+const testLookupDoesNotSeeDocumentsOutsideSnapshot = function() {
+ unshardedColl.drop();
+ // Insert some stuff into the unsharded collection.
+ const kUnshardedCollOriginalSize = 10;
+ for (let i = 0; i < kUnshardedCollOriginalSize; i++) {
+ assert.commandWorked(unshardedColl.insert({_id: i, foreign_always_one: 1}));
+ }
- session.startTransaction();
+ session.startTransaction();
- const curs = shardedColl.aggregate(
- pipeline, {readConcern: {level: "snapshot"}, cursor: {batchSize: kBatchSize}});
+ const curs = shardedColl.aggregate(
+ pipeline, {readConcern: {level: "snapshot"}, cursor: {batchSize: kBatchSize}});
- for (let i = 0; i < kBatchSize; i++) {
- const doc = curs.next();
- assert.eq(doc.matches.length, kUnshardedCollOriginalSize);
- }
+ for (let i = 0; i < kBatchSize; i++) {
+ const doc = curs.next();
+ assert.eq(doc.matches.length, kUnshardedCollOriginalSize);
+ }
- // Do writes on the unsharded collection from outside the session.
- (function() {
- const unshardedCollOutsideSession =
- st.s.getDB(sessionDB.getName())[unshardedColl.getName()];
- assert.commandWorked(unshardedCollOutsideSession.insert({b: 1, xyz: 1}));
- assert.commandWorked(unshardedCollOutsideSession.insert({b: 1, xyz: 2}));
- })();
-
- // We shouldn't see those writes from the aggregation within the session.
- assert.eq(curs.hasNext(), true);
- while (curs.hasNext()) {
- const doc = curs.next();
- assert.eq(doc.matches.length, kUnshardedCollOriginalSize);
- }
+ // Do writes on the unsharded collection from outside the session.
+ (function() {
+ const unshardedCollOutsideSession =
+ st.s.getDB(sessionDB.getName())[unshardedColl.getName()];
+ assert.commandWorked(unshardedCollOutsideSession.insert({b: 1, xyz: 1}));
+ assert.commandWorked(unshardedCollOutsideSession.insert({b: 1, xyz: 2}));
+ })();
+
+ // We shouldn't see those writes from the aggregation within the session.
+ assert.eq(curs.hasNext(), true);
+ while (curs.hasNext()) {
+ const doc = curs.next();
+ assert.eq(doc.matches.length, kUnshardedCollOriginalSize);
+ }
- assert.commandWorked(session.abortTransaction_forTesting());
- };
+ assert.commandWorked(session.abortTransaction_forTesting());
+};
- // Run the test once, with all of the data on shard 1. This means that the merging shard (shard
- // 0) will not be targeted. This is interesting because in contrast to the case below, the
- // merging half of the pipeline will start the transaction on the merging shard.
- testLookupDoesNotSeeDocumentsOutsideSnapshot();
+// Run the test once, with all of the data on shard 1. This means that the merging shard (shard
+// 0) will not be targeted. This is interesting because in contrast to the case below, the
+// merging half of the pipeline will start the transaction on the merging shard.
+testLookupDoesNotSeeDocumentsOutsideSnapshot();
- // Move some data to shard 0, so that the merging shard will be targeted.
- assert.commandWorked(st.s.adminCommand({split: shardedColl.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: shardedColl.getFullName(), find: {_id: -1}, to: st.shard0.shardName}));
- flushRoutersAndRefreshShardMetadata(st, {ns: shardedColl.getFullName()});
+// Move some data to shard 0, so that the merging shard will be targeted.
+assert.commandWorked(st.s.adminCommand({split: shardedColl.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {_id: -1}, to: st.shard0.shardName}));
+flushRoutersAndRefreshShardMetadata(st, {ns: shardedColl.getFullName()});
- // Run the test again.
- testLookupDoesNotSeeDocumentsOutsideSnapshot();
+// Run the test again.
+testLookupDoesNotSeeDocumentsOutsideSnapshot();
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/update_compound_shard_key.js b/jstests/sharding/update_compound_shard_key.js
index c3b1f6b0f46..46e28ed597f 100644
--- a/jstests/sharding/update_compound_shard_key.js
+++ b/jstests/sharding/update_compound_shard_key.js
@@ -3,416 +3,414 @@
* @tags: [uses_transactions, uses_multi_shard_transaction]
*/
(function() {
- 'use strict';
-
- load("jstests/sharding/libs/update_shard_key_helpers.js");
-
- const st = new ShardingTest({mongos: 1, shards: 3});
- const kDbName = 'update_compound_sk';
- const ns = kDbName + '.coll';
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(kDbName);
-
- assert.commandWorked(st.s0.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, st.shard0.shardName);
-
- assert.commandWorked(
- st.s.getDB('config').adminCommand({shardCollection: ns, key: {x: 1, y: 1, z: 1}}));
-
- let docsToInsert = [
- {_id: 0, x: 4, y: 3, z: 3},
- {_id: 1, x: 100, y: 50, z: 3, a: 5},
- {_id: 2, x: 100, y: 500, z: 3, a: 5}
- ];
-
- // Make sure that shard0, shard1 and shard2 has _id 0,1 and 2 documents respectively.
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 100, y: 0, z: 3}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 100, y: 100, z: 3}}));
-
- for (let i = 0; i < docsToInsert.length; i++) {
- assert.commandWorked(st.s.getDB(kDbName).coll.insert(docsToInsert[i]));
- }
-
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {x: 100, y: 50, z: 3}, to: st.shard1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {x: 100, y: 500, z: 3}, to: st.shard2.shardName}));
- cleanupOrphanedDocs(st, ns);
-
- function assertUpdateWorked(query, update, isUpsert, _id) {
- const res = st.s.getDB(kDbName).coll.update(query, update, {upsert: isUpsert});
- assert.commandWorked(res);
- assert.eq(0, res.nUpserted);
- assert.eq(1, res.nMatched);
- assert.eq(1, res.nModified);
-
- // Skip find based validation for pipleline update.
- if (!Array.isArray(update)) {
- if (update["$set"] != undefined) {
- update = update["$set"];
- }
- update["_id"] = _id;
- // Make sure that the update modified the document with the given _id.
- assert.eq(1, st.s.getDB(kDbName).coll.find(update).itcount());
+'use strict';
+
+load("jstests/sharding/libs/update_shard_key_helpers.js");
+
+const st = new ShardingTest({mongos: 1, shards: 3});
+const kDbName = 'update_compound_sk';
+const ns = kDbName + '.coll';
+const session = st.s.startSession();
+const sessionDB = session.getDatabase(kDbName);
+
+assert.commandWorked(st.s0.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, st.shard0.shardName);
+
+assert.commandWorked(
+ st.s.getDB('config').adminCommand({shardCollection: ns, key: {x: 1, y: 1, z: 1}}));
+
+let docsToInsert = [
+ {_id: 0, x: 4, y: 3, z: 3},
+ {_id: 1, x: 100, y: 50, z: 3, a: 5},
+ {_id: 2, x: 100, y: 500, z: 3, a: 5}
+];
+
+// Make sure that shard0, shard1 and shard2 has _id 0,1 and 2 documents respectively.
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 100, y: 0, z: 3}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 100, y: 100, z: 3}}));
+
+for (let i = 0; i < docsToInsert.length; i++) {
+ assert.commandWorked(st.s.getDB(kDbName).coll.insert(docsToInsert[i]));
+}
+
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {x: 100, y: 50, z: 3}, to: st.shard1.shardName}));
+assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {x: 100, y: 500, z: 3}, to: st.shard2.shardName}));
+cleanupOrphanedDocs(st, ns);
+
+function assertUpdateWorked(query, update, isUpsert, _id) {
+ const res = st.s.getDB(kDbName).coll.update(query, update, {upsert: isUpsert});
+ assert.commandWorked(res);
+ assert.eq(0, res.nUpserted);
+ assert.eq(1, res.nMatched);
+ assert.eq(1, res.nModified);
+
+ // Skip find based validation for pipleline update.
+ if (!Array.isArray(update)) {
+ if (update["$set"] != undefined) {
+ update = update["$set"];
}
+ update["_id"] = _id;
+ // Make sure that the update modified the document with the given _id.
+ assert.eq(1, st.s.getDB(kDbName).coll.find(update).itcount());
}
+}
- /**
- * For upserts this will insert a new document, for non-upserts it will be a no-op.
- */
- function assertUpdateWorkedWithNoMatchingDoc(query, update, isUpsert, inTransaction) {
- const res = sessionDB.coll.update(query, update, {upsert: isUpsert});
-
- assert.commandWorked(res);
- assert.eq(isUpsert ? 1 : 0, res.nUpserted);
- assert.eq(0, res.nMatched);
- assert.eq(0, res.nModified);
-
- // Skip find based validation for pipleline update or when inside a transaction.
- if (Array.isArray(update) || inTransaction)
- return;
-
- // Make sure that the upsert inserted the correct document or update did not insert
- // anything.
- assert.eq(
- isUpsert ? 1 : 0,
- st.s.getDB(kDbName).coll.find(update["$set"] ? update["$set"] : update).itcount());
- }
-
- //
- // Update Type Replacement-style.
- //
-
- // Test behaviours common to update and upsert.
- [false, true].forEach(function(isUpsert) {
- // Full shard key in query matches the update document.
- assertUpdateWorked({x: 4, y: 3, z: 3}, {x: 4, y: 3, z: 3, a: 0}, isUpsert, 0);
- assertUpdateWorked({x: 4, _id: 0, z: 3, y: 3}, {x: 4, y: 3, z: 3, a: 0}, isUpsert, 0);
-
- // Case when upsert needs to insert a new document and the new document should belong in the
- // same shard as the targeted shard. For non-upserts, it will be a no-op.
- assertUpdateWorkedWithNoMatchingDoc(
- {x: 4, y: 0, z: 0}, {x: 1, z: 3, y: 110, a: 90}, isUpsert);
- });
-
- //
- // Test behaviours specific to non-upsert updates.
- //
-
- // Partial shard key in query can target a single shard, and shard key of existing document is
- // the same as the replacement's.
- assertUpdateWorked({x: 4}, {x: 4, y: 3, z: 3, a: 1}, false, 0);
- assertUpdateWorked({x: 4, _id: 0, z: 3}, {y: 3, x: 4, z: 3, a: 3}, false, 0);
-
- // Parital shard key in the query, update succeeds with no op when there is no matching document
- // for the query.
- assertUpdateWorkedWithNoMatchingDoc({x: 10}, {x: 10, y: 3, z: 3, a: 5}, false);
- assertUpdateWorkedWithNoMatchingDoc({x: 100, y: 55, a: 15}, {x: 100, y: 55, z: 3, a: 6}, false);
- assertUpdateWorkedWithNoMatchingDoc({x: 11, _id: 3}, {x: 11, y: 3, z: 3, a: 7}, false);
-
- // Partial shard key in query can target a single shard, but fails while attempting to
- // modify shard key value.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {x: 100, y: 50, a: 5}, {x: 100, y: 55, z: 3, a: 1}, {upsert: false}),
- [31025]);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({x: 4, z: 3}, {x: 4, y: 3, z: 4, a: 1}, {upsert: false}),
- [31025]);
-
- // Full shard key in query, matches no document.
- assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0}, {x: 1110, y: 55, z: 3, a: 111}, false);
-
- // Partial shard key in query, but can still target a single shard.
- assertUpdateWorkedWithNoMatchingDoc({x: 100, y: 51, a: 5}, {x: 110, y: 55, z: 3, a: 8}, false);
-
- // Partial shard key in query cannot target a single shard, targeting happens using update
- // document.
-
- // When query doesn't match any doc.
- assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0}, {x: 110, y: 55, z: 3, a: 110}, false);
- assertUpdateWorkedWithNoMatchingDoc({_id: 1}, {x: 110, y: 55, z: 3, a: 110}, false);
-
- // When query matches a doc and updates sucessfully.
- assertUpdateWorked({_id: 0, y: 3}, {z: 3, x: 4, y: 3, a: 2}, false, 0);
- assertUpdateWorked({_id: 0}, {z: 3, x: 4, y: 3, replStyle: 2}, false, 0);
-
- // When query matches a doc and fails to update because shard key needs to be updated.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({}, {x: 110, y: 55, z: 3, a: 110}, false), 31025);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({_id: 2}, {x: 110, y: 55, z: 3, a: 110}, false), 31025);
-
- //
- // Test upsert-specific behaviours.
- //
-
- // Case when upsert needs to insert a new document and the new document should belong in a shard
- // other than the one targeted by the update. These upserts can only succeed in a
- // multi-statement transaction or with retryWrites: true.
- const updateDoc = {x: 1110, y: 55, z: 3, replStyleUpdate: true};
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({x: 4, y: 0, z: 0}, updateDoc, {upsert: true}),
- ErrorCodes.IllegalOperation);
-
- // The above upsert works with transactions.
- session.startTransaction();
- assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0}, updateDoc, true, true);
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(1, st.s.getDB(kDbName).coll.find(updateDoc).itcount());
-
- // Full shard key not specified in query.
-
- // Query on partial shard key.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {x: 100, y: 50, a: 5}, {x: 100, y: 55, z: 3, a: 1}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {x: 100, y: 50, nonExistingField: true}, {x: 100, y: 55, z: 3, a: 1}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- // Query on partial shard key with _id.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {x: 100, y: 50, a: 5, _id: 0}, {x: 100, y: 55, z: 3, a: 1}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({x: 100, y: 50, a: 5, _id: 0, nonExistingField: true},
- {x: 100, y: 55, z: 3, a: 1},
- {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- // Query on only _id.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({_id: 0}, {z: 3, x: 4, y: 3, a: 2}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {_id: "nonExisting"}, {z: 3, x: 4, y: 3, a: 2}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- //
- // Update Type Op-style.
- //
-
- // Test behaviours common to update and upsert.
- [false, true].forEach(function(isUpsert) {
- // Full shard key in query.
- assertUpdateWorked({x: 4, _id: 0, z: 3, y: 3}, {"$set": {opStyle: 1}}, isUpsert, 0);
- assertUpdateWorked({x: 4, z: 3, y: 3}, {"$set": {opStyle: 2}}, isUpsert, 0);
-
- // Case when upsert needs to insert a new document and the new document should belong in the
- // same shard as the targetted shard. For non-upserts, it will be a no op.
- assertUpdateWorkedWithNoMatchingDoc(
- {x: 4, y: 0, z: 0}, {"$set": {x: 1, z: 3, y: 111, a: 90}}, isUpsert);
- });
-
- // Test behaviours specific to non-upsert updates.
-
- // Full shard key in query, matches no document.
+/**
+ * For upserts this will insert a new document, for non-upserts it will be a no-op.
+ */
+function assertUpdateWorkedWithNoMatchingDoc(query, update, isUpsert, inTransaction) {
+ const res = sessionDB.coll.update(query, update, {upsert: isUpsert});
+
+ assert.commandWorked(res);
+ assert.eq(isUpsert ? 1 : 0, res.nUpserted);
+ assert.eq(0, res.nMatched);
+ assert.eq(0, res.nModified);
+
+ // Skip find based validation for pipleline update or when inside a transaction.
+ if (Array.isArray(update) || inTransaction)
+ return;
+
+ // Make sure that the upsert inserted the correct document or update did not insert
+ // anything.
+ assert.eq(isUpsert ? 1 : 0,
+ st.s.getDB(kDbName).coll.find(update["$set"] ? update["$set"] : update).itcount());
+}
+
+//
+// Update Type Replacement-style.
+//
+
+// Test behaviours common to update and upsert.
+[false, true].forEach(function(isUpsert) {
+ // Full shard key in query matches the update document.
+ assertUpdateWorked({x: 4, y: 3, z: 3}, {x: 4, y: 3, z: 3, a: 0}, isUpsert, 0);
+ assertUpdateWorked({x: 4, _id: 0, z: 3, y: 3}, {x: 4, y: 3, z: 3, a: 0}, isUpsert, 0);
+
+ // Case when upsert needs to insert a new document and the new document should belong in the
+ // same shard as the targeted shard. For non-upserts, it will be a no-op.
+ assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0}, {x: 1, z: 3, y: 110, a: 90}, isUpsert);
+});
+
+//
+// Test behaviours specific to non-upsert updates.
+//
+
+// Partial shard key in query can target a single shard, and shard key of existing document is
+// the same as the replacement's.
+assertUpdateWorked({x: 4}, {x: 4, y: 3, z: 3, a: 1}, false, 0);
+assertUpdateWorked({x: 4, _id: 0, z: 3}, {y: 3, x: 4, z: 3, a: 3}, false, 0);
+
+// Parital shard key in the query, update succeeds with no op when there is no matching document
+// for the query.
+assertUpdateWorkedWithNoMatchingDoc({x: 10}, {x: 10, y: 3, z: 3, a: 5}, false);
+assertUpdateWorkedWithNoMatchingDoc({x: 100, y: 55, a: 15}, {x: 100, y: 55, z: 3, a: 6}, false);
+assertUpdateWorkedWithNoMatchingDoc({x: 11, _id: 3}, {x: 11, y: 3, z: 3, a: 7}, false);
+
+// Partial shard key in query can target a single shard, but fails while attempting to
+// modify shard key value.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update(
+ {x: 100, y: 50, a: 5}, {x: 100, y: 55, z: 3, a: 1}, {upsert: false}),
+ [31025]);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({x: 4, z: 3}, {x: 4, y: 3, z: 4, a: 1}, {upsert: false}),
+ [31025]);
+
+// Full shard key in query, matches no document.
+assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0}, {x: 1110, y: 55, z: 3, a: 111}, false);
+
+// Partial shard key in query, but can still target a single shard.
+assertUpdateWorkedWithNoMatchingDoc({x: 100, y: 51, a: 5}, {x: 110, y: 55, z: 3, a: 8}, false);
+
+// Partial shard key in query cannot target a single shard, targeting happens using update
+// document.
+
+// When query doesn't match any doc.
+assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0}, {x: 110, y: 55, z: 3, a: 110}, false);
+assertUpdateWorkedWithNoMatchingDoc({_id: 1}, {x: 110, y: 55, z: 3, a: 110}, false);
+
+// When query matches a doc and updates sucessfully.
+assertUpdateWorked({_id: 0, y: 3}, {z: 3, x: 4, y: 3, a: 2}, false, 0);
+assertUpdateWorked({_id: 0}, {z: 3, x: 4, y: 3, replStyle: 2}, false, 0);
+
+// When query matches a doc and fails to update because shard key needs to be updated.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({}, {x: 110, y: 55, z: 3, a: 110}, false), 31025);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({_id: 2}, {x: 110, y: 55, z: 3, a: 110}, false), 31025);
+
+//
+// Test upsert-specific behaviours.
+//
+
+// Case when upsert needs to insert a new document and the new document should belong in a shard
+// other than the one targeted by the update. These upserts can only succeed in a
+// multi-statement transaction or with retryWrites: true.
+const updateDoc = {
+ x: 1110,
+ y: 55,
+ z: 3,
+ replStyleUpdate: true
+};
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({x: 4, y: 0, z: 0}, updateDoc, {upsert: true}),
+ ErrorCodes.IllegalOperation);
+
+// The above upsert works with transactions.
+session.startTransaction();
+assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0}, updateDoc, true, true);
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.eq(1, st.s.getDB(kDbName).coll.find(updateDoc).itcount());
+
+// Full shard key not specified in query.
+
+// Query on partial shard key.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update(
+ {x: 100, y: 50, a: 5}, {x: 100, y: 55, z: 3, a: 1}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update(
+ {x: 100, y: 50, nonExistingField: true}, {x: 100, y: 55, z: 3, a: 1}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+// Query on partial shard key with _id.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update(
+ {x: 100, y: 50, a: 5, _id: 0}, {x: 100, y: 55, z: 3, a: 1}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({x: 100, y: 50, a: 5, _id: 0, nonExistingField: true},
+ {x: 100, y: 55, z: 3, a: 1},
+ {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+// Query on only _id.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({_id: 0}, {z: 3, x: 4, y: 3, a: 2}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({_id: "nonExisting"}, {z: 3, x: 4, y: 3, a: 2}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+//
+// Update Type Op-style.
+//
+
+// Test behaviours common to update and upsert.
+[false, true].forEach(function(isUpsert) {
+ // Full shard key in query.
+ assertUpdateWorked({x: 4, _id: 0, z: 3, y: 3}, {"$set": {opStyle: 1}}, isUpsert, 0);
+ assertUpdateWorked({x: 4, z: 3, y: 3}, {"$set": {opStyle: 2}}, isUpsert, 0);
+
+ // Case when upsert needs to insert a new document and the new document should belong in the
+ // same shard as the targetted shard. For non-upserts, it will be a no op.
assertUpdateWorkedWithNoMatchingDoc(
- {x: 4, y: 0, z: 0}, {"$set": {x: 2110, y: 55, z: 3, a: 111}}, false);
-
- // Partial shard key in query, but can still target a single shard.
+ {x: 4, y: 0, z: 0}, {"$set": {x: 1, z: 3, y: 111, a: 90}}, isUpsert);
+});
+
+// Test behaviours specific to non-upsert updates.
+
+// Full shard key in query, matches no document.
+assertUpdateWorkedWithNoMatchingDoc(
+ {x: 4, y: 0, z: 0}, {"$set": {x: 2110, y: 55, z: 3, a: 111}}, false);
+
+// Partial shard key in query, but can still target a single shard.
+assertUpdateWorkedWithNoMatchingDoc(
+ {x: 100, y: 51, a: 112}, {"$set": {x: 110, y: 55, z: 3, a: 8}}, false);
+
+// Query on _id works for update.
+assertUpdateWorked({_id: 0}, {"$set": {opStyle: 6}}, false, 0);
+assertUpdateWorked({_id: 0, y: 3}, {"$set": {opStyle: 8, y: 3, x: 4}}, false, 0);
+
+// Parital shard key in the query targets single shard. Update succeeds with no op when there is
+// no matching document for the query.
+assertUpdateWorkedWithNoMatchingDoc({x: 14, _id: 0}, {"$set": {opStyle: 5}}, false);
+assertUpdateWorkedWithNoMatchingDoc({x: 14}, {"$set": {opStyle: 5}}, false);
+
+assertUpdateWorkedWithNoMatchingDoc({x: -1, y: 0}, {"$set": {z: 3, y: 110, a: 91}}, false);
+
+// Partial shard key in query can target a single shard and doesn't try to update shard key
+// value.
+assertUpdateWorked({x: 4, z: 3}, {"$set": {opStyle: 3}}, false, 0);
+assertUpdateWorked({x: 4, _id: 0, z: 3}, {"$set": {y: 3, x: 4, z: 3, opStyle: 4}}, false, 0);
+
+// Partial shard key in query can target a single shard, but fails while attempting to modify
+// shard key value.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update(
+ {_id: 1, x: 100, z: 3, a: 5}, {"$set": {y: 55, a: 11}}, {upsert: false}),
+ [31025]);
+assert.commandFailedWithCode(st.s.getDB(kDbName).coll.update(
+ {x: 4, z: 3}, {"$set": {x: 4, y: 3, z: 4, a: 1}}, {upsert: false}),
+ [31025]);
+
+// Test upsert-specific behaviours.
+
+// Case when upsert needs to insert a new document and the new document should belong in a shard
+// other than the one targeted by the update. These upserts can only succeed in a
+// multi-statement transaction or with retryWrites: true.
+const update = {
+ "$set": {x: 2110, y: 55, z: 3, opStyle: true}
+};
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({x: 4, y: 0, z: 0, opStyle: true}, update, {upsert: true}),
+ ErrorCodes.IllegalOperation);
+
+// The above upsert works with transactions.
+session.startTransaction();
+assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0, opStyle: true}, update, true, true);
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.eq(1, st.s.getDB(kDbName).coll.find(update["$set"]).itcount());
+
+// Full shard key not specified in query.
+
+// Query on _id doesn't work for upserts.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update(
+ {_id: 0}, {"$set": {x: 2, y: 11, z: 10, opStyle: 7}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+// Partial shard key can target single shard. This style of update can work if SERVER-41243 is
+// implemented.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({x: 14}, {"$set": {opStyle: 5}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({x: 100, y: 51, nonExistingField: true},
+ {"$set": {x: 110, y: 55, z: 3, a: 8}},
+ {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+// Partial shard key cannot target single shard.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update(
+ {_id: 0, y: 3}, {"$set": {z: 3, x: 4, y: 3, a: 2}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({y: 3}, {"$set": {z: 3, x: 4, y: 3, a: 2}}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+//
+// Update with pipeline.
+//
+
+// Test behaviours common to update and upsert.
+[false, true].forEach(function(isUpsert) {
+ // Full shard key in query.
+ assertUpdateWorked(
+ {_id: 0, x: 4, z: 3, y: 3}, [{$addFields: {pipelineUpdate: isUpsert}}], isUpsert, 0);
+ assert.eq(1,
+ st.s.getDB(kDbName)
+ .coll.find({_id: 0, x: 4, z: 3, y: 3, pipelineUpdate: isUpsert})
+ .itcount());
assertUpdateWorkedWithNoMatchingDoc(
- {x: 100, y: 51, a: 112}, {"$set": {x: 110, y: 55, z: 3, a: 8}}, false);
-
- // Query on _id works for update.
- assertUpdateWorked({_id: 0}, {"$set": {opStyle: 6}}, false, 0);
- assertUpdateWorked({_id: 0, y: 3}, {"$set": {opStyle: 8, y: 3, x: 4}}, false, 0);
-
- // Parital shard key in the query targets single shard. Update succeeds with no op when there is
- // no matching document for the query.
- assertUpdateWorkedWithNoMatchingDoc({x: 14, _id: 0}, {"$set": {opStyle: 5}}, false);
- assertUpdateWorkedWithNoMatchingDoc({x: 14}, {"$set": {opStyle: 5}}, false);
-
- assertUpdateWorkedWithNoMatchingDoc({x: -1, y: 0}, {"$set": {z: 3, y: 110, a: 91}}, false);
-
- // Partial shard key in query can target a single shard and doesn't try to update shard key
- // value.
- assertUpdateWorked({x: 4, z: 3}, {"$set": {opStyle: 3}}, false, 0);
- assertUpdateWorked({x: 4, _id: 0, z: 3}, {"$set": {y: 3, x: 4, z: 3, opStyle: 4}}, false, 0);
-
- // Partial shard key in query can target a single shard, but fails while attempting to modify
- // shard key value.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {_id: 1, x: 100, z: 3, a: 5}, {"$set": {y: 55, a: 11}}, {upsert: false}),
- [31025]);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {x: 4, z: 3}, {"$set": {x: 4, y: 3, z: 4, a: 1}}, {upsert: false}),
- [31025]);
-
- // Test upsert-specific behaviours.
-
- // Case when upsert needs to insert a new document and the new document should belong in a shard
- // other than the one targeted by the update. These upserts can only succeed in a
- // multi-statement transaction or with retryWrites: true.
- const update = {"$set": {x: 2110, y: 55, z: 3, opStyle: true}};
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({x: 4, y: 0, z: 0, opStyle: true}, update, {upsert: true}),
- ErrorCodes.IllegalOperation);
-
- // The above upsert works with transactions.
- session.startTransaction();
- assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0, opStyle: true}, update, true, true);
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(1, st.s.getDB(kDbName).coll.find(update["$set"]).itcount());
-
- // Full shard key not specified in query.
-
- // Query on _id doesn't work for upserts.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {_id: 0}, {"$set": {x: 2, y: 11, z: 10, opStyle: 7}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- // Partial shard key can target single shard. This style of update can work if SERVER-41243 is
- // implemented.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({x: 14}, {"$set": {opStyle: 5}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({x: 100, y: 51, nonExistingField: true},
- {"$set": {x: 110, y: 55, z: 3, a: 8}},
- {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- // Partial shard key cannot target single shard.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {_id: 0, y: 3}, {"$set": {z: 3, x: 4, y: 3, a: 2}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({y: 3}, {"$set": {z: 3, x: 4, y: 3, a: 2}}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- //
- // Update with pipeline.
- //
-
- // Test behaviours common to update and upsert.
- [false, true].forEach(function(isUpsert) {
- // Full shard key in query.
- assertUpdateWorked(
- {_id: 0, x: 4, z: 3, y: 3}, [{$addFields: {pipelineUpdate: isUpsert}}], isUpsert, 0);
- assert.eq(1,
- st.s.getDB(kDbName)
- .coll.find({_id: 0, x: 4, z: 3, y: 3, pipelineUpdate: isUpsert})
- .itcount());
- assertUpdateWorkedWithNoMatchingDoc(
- {_id: 15, x: 44, z: 3, y: 3}, [{$addFields: {pipelineUpdate: true}}], isUpsert);
- assert.eq(isUpsert ? 1 : 0,
- st.s.getDB(kDbName)
- .coll.find({_id: 15, x: 44, z: 3, y: 3, pipelineUpdate: true})
- .itcount());
-
- assertUpdateWorkedWithNoMatchingDoc(
- {x: 45, z: 4, y: 3}, [{$addFields: {pipelineUpdate: true}}], isUpsert);
- assert.eq(
- isUpsert ? 1 : 0,
- st.s.getDB(kDbName).coll.find({x: 45, z: 4, y: 3, pipelineUpdate: true}).itcount());
-
- // Case when upsert needs to insert a new document and the new document should belong in the
- // same shard as the targeted shard.
- assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0},
- [{
- "$project": {
- x: {$literal: 3},
- y: {$literal: 33},
- z: {$literal: 3},
- pipelineUpdate: {$literal: true}
- }
- }],
- isUpsert);
- assert.eq(
- isUpsert ? 1 : 0,
- st.s.getDB(kDbName).coll.find({x: 3, z: 3, y: 33, pipelineUpdate: true}).itcount());
- });
-
- // Test behaviours specific to non-upsert updates.
-
- // Full shard key in query, matches no document.
- assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0},
- [{
- "$project": {
- x: {$literal: 2111},
- y: {$literal: 55},
- z: {$literal: 3},
- pipelineUpdate: {$literal: true}
- }
- }],
- false);
- assert.eq(
- 0, st.s.getDB(kDbName).coll.find({x: 2111, z: 3, y: 55, pipelineUpdate: true}).itcount());
-
- // Partial shard key in query targets single shard but doesn't match any document on that shard.
- assertUpdateWorkedWithNoMatchingDoc({_id: 14, z: 4, x: 3}, [{$addFields: {foo: 4}}], false);
+ {_id: 15, x: 44, z: 3, y: 3}, [{$addFields: {pipelineUpdate: true}}], isUpsert);
+ assert.eq(isUpsert ? 1 : 0,
+ st.s.getDB(kDbName)
+ .coll.find({_id: 15, x: 44, z: 3, y: 3, pipelineUpdate: true})
+ .itcount());
- // Partial shard key in query can target a single shard and doesn't try to update shard key
- // value.
assertUpdateWorkedWithNoMatchingDoc(
- {x: 46, z: 4}, [{$addFields: {y: 10, pipelineUpdateNoOp: false}}], false);
- assertUpdateWorked({x: 4, z: 3}, [{$addFields: {pipelineUpdateDoc: false}}], false, 0);
+ {x: 45, z: 4, y: 3}, [{$addFields: {pipelineUpdate: true}}], isUpsert);
+ assert.eq(isUpsert ? 1 : 0,
+ st.s.getDB(kDbName).coll.find({x: 45, z: 4, y: 3, pipelineUpdate: true}).itcount());
- // Partial shard key in query cannot target a single shard.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({z: 3, y: 3}, [{$addFields: {foo: 4}}], {upsert: false}),
- [72, ErrorCodes.InvalidOptions]);
-
- // Test upsert-specific behaviours.
-
- // Case when upsert needs to insert a new document and the new document should belong in a shard
- // other than the one targeted by the update. These upserts can only succeed in a
- // multi-statement transaction or with retryWrites: true.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({x: 4, y: 0, z: 0},
- [{
- "$project": {
- x: {$literal: 2111},
- y: {$literal: 55},
- z: {$literal: 3},
- pipelineUpdate: {$literal: true}
- }
- }],
- {upsert: true}),
- ErrorCodes.IllegalOperation);
-
- // The above upsert works with transactions.
- session.startTransaction();
- assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0, pipelineUpdate: true},
- [{
- "$project": {
- x: {$literal: 2111},
- y: {$literal: 55},
- z: {$literal: 3},
- pipelineUpdate: {$literal: true}
- }
- }],
- true);
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(
- 1, st.s.getDB(kDbName).coll.find({x: 2111, y: 55, z: 3, pipelineUpdate: true}).itcount());
-
- // Full shard key not specified in query.
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update(
- {_id: 18, z: 4, x: 3}, [{$addFields: {foo: 4}}], {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(
- st.s.getDB(kDbName).coll.update({_id: 0},
+ // Case when upsert needs to insert a new document and the new document should belong in the
+ // same shard as the targeted shard.
+ assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0},
[{
- "$project": {
- x: {$literal: 2111},
- y: {$literal: 55},
- z: {$literal: 3},
- pipelineUpdate: {$literal: true}
- }
+ "$project": {
+ x: {$literal: 3},
+ y: {$literal: 33},
+ z: {$literal: 3},
+ pipelineUpdate: {$literal: true}
+ }
}],
- {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- st.stop();
+ isUpsert);
+ assert.eq(isUpsert ? 1 : 0,
+ st.s.getDB(kDbName).coll.find({x: 3, z: 3, y: 33, pipelineUpdate: true}).itcount());
+});
+
+// Test behaviours specific to non-upsert updates.
+
+// Full shard key in query, matches no document.
+assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0},
+ [{
+ "$project": {
+ x: {$literal: 2111},
+ y: {$literal: 55},
+ z: {$literal: 3},
+ pipelineUpdate: {$literal: true}
+ }
+ }],
+ false);
+assert.eq(0, st.s.getDB(kDbName).coll.find({x: 2111, z: 3, y: 55, pipelineUpdate: true}).itcount());
+
+// Partial shard key in query targets single shard but doesn't match any document on that shard.
+assertUpdateWorkedWithNoMatchingDoc({_id: 14, z: 4, x: 3}, [{$addFields: {foo: 4}}], false);
+
+// Partial shard key in query can target a single shard and doesn't try to update shard key
+// value.
+assertUpdateWorkedWithNoMatchingDoc(
+ {x: 46, z: 4}, [{$addFields: {y: 10, pipelineUpdateNoOp: false}}], false);
+assertUpdateWorked({x: 4, z: 3}, [{$addFields: {pipelineUpdateDoc: false}}], false, 0);
+
+// Partial shard key in query cannot target a single shard.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({z: 3, y: 3}, [{$addFields: {foo: 4}}], {upsert: false}),
+ [72, ErrorCodes.InvalidOptions]);
+
+// Test upsert-specific behaviours.
+
+// Case when upsert needs to insert a new document and the new document should belong in a shard
+// other than the one targeted by the update. These upserts can only succeed in a
+// multi-statement transaction or with retryWrites: true.
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({x: 4, y: 0, z: 0},
+ [{
+ "$project": {
+ x: {$literal: 2111},
+ y: {$literal: 55},
+ z: {$literal: 3},
+ pipelineUpdate: {$literal: true}
+ }
+ }],
+ {upsert: true}),
+ ErrorCodes.IllegalOperation);
+
+// The above upsert works with transactions.
+session.startTransaction();
+assertUpdateWorkedWithNoMatchingDoc({x: 4, y: 0, z: 0, pipelineUpdate: true},
+ [{
+ "$project": {
+ x: {$literal: 2111},
+ y: {$literal: 55},
+ z: {$literal: 3},
+ pipelineUpdate: {$literal: true}
+ }
+ }],
+ true);
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.eq(1, st.s.getDB(kDbName).coll.find({x: 2111, y: 55, z: 3, pipelineUpdate: true}).itcount());
+
+// Full shard key not specified in query.
+assert.commandFailedWithCode(st.s.getDB(kDbName).coll.update(
+ {_id: 18, z: 4, x: 3}, [{$addFields: {foo: 4}}], {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(
+ st.s.getDB(kDbName).coll.update({_id: 0},
+ [{
+ "$project": {
+ x: {$literal: 2111},
+ y: {$literal: 55},
+ z: {$literal: 3},
+ pipelineUpdate: {$literal: true}
+ }
+ }],
+ {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+st.stop();
})();
diff --git a/jstests/sharding/update_immutable_fields.js b/jstests/sharding/update_immutable_fields.js
index 287e750c176..96bf4f454dc 100644
--- a/jstests/sharding/update_immutable_fields.js
+++ b/jstests/sharding/update_immutable_fields.js
@@ -1,85 +1,83 @@
// Tests that save style updates correctly change immutable fields
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 1});
+var st = new ShardingTest({shards: 2, mongos: 1});
- var mongos = st.s;
- var config = mongos.getDB("config");
- var coll = mongos.getCollection(jsTestName() + ".coll1");
- var shard0 = st.shard0;
+var mongos = st.s;
+var config = mongos.getDB("config");
+var coll = mongos.getCollection(jsTestName() + ".coll1");
+var shard0 = st.shard0;
- assert.commandWorked(config.adminCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
- assert.commandWorked(config.adminCommand({shardCollection: "" + coll, key: {a: 1}}));
+assert.commandWorked(config.adminCommand({enableSharding: coll.getDB() + ""}));
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName);
+assert.commandWorked(config.adminCommand({shardCollection: "" + coll, key: {a: 1}}));
- var getDirectShardedConn = function(st, collName) {
+var getDirectShardedConn = function(st, collName) {
+ var shardConnWithVersion = new Mongo(st.shard0.host);
- var shardConnWithVersion = new Mongo(st.shard0.host);
+ var configConnStr = st._configDB;
- var configConnStr = st._configDB;
+ var maxChunk =
+ st.s0.getCollection("config.chunks").find({ns: collName}).sort({lastmod: -1}).next();
- var maxChunk =
- st.s0.getCollection("config.chunks").find({ns: collName}).sort({lastmod: -1}).next();
-
- var ssvInitCmd = {
- setShardVersion: collName,
- authoritative: true,
- configdb: configConnStr,
- version: maxChunk.lastmod,
- shard: st.shard0.shardName,
- versionEpoch: maxChunk.lastmodEpoch
- };
-
- printjson(ssvInitCmd);
- assert.commandWorked(shardConnWithVersion.getDB("admin").runCommand(ssvInitCmd));
-
- return shardConnWithVersion;
+ var ssvInitCmd = {
+ setShardVersion: collName,
+ authoritative: true,
+ configdb: configConnStr,
+ version: maxChunk.lastmod,
+ shard: st.shard0.shardName,
+ versionEpoch: maxChunk.lastmodEpoch
};
- var shard0Coll = getDirectShardedConn(st, coll.getFullName()).getCollection(coll.getFullName());
+ printjson(ssvInitCmd);
+ assert.commandWorked(shardConnWithVersion.getDB("admin").runCommand(ssvInitCmd));
+
+ return shardConnWithVersion;
+};
- // No shard key
- shard0Coll.remove({});
- assert.writeError(shard0Coll.save({_id: 3}));
+var shard0Coll = getDirectShardedConn(st, coll.getFullName()).getCollection(coll.getFullName());
- // Full shard key in save
- assert.writeOK(shard0Coll.save({_id: 1, a: 1}));
+// No shard key
+shard0Coll.remove({});
+assert.writeError(shard0Coll.save({_id: 3}));
- // Full shard key on replacement (basically the same as above)
- shard0Coll.remove({});
- assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}, true));
+// Full shard key in save
+assert.writeOK(shard0Coll.save({_id: 1, a: 1}));
- // Full shard key after $set
- shard0Coll.remove({});
- assert.writeOK(shard0Coll.update({_id: 1}, {$set: {a: 1}}, true));
+// Full shard key on replacement (basically the same as above)
+shard0Coll.remove({});
+assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}, true));
- // Update existing doc (replacement), same shard key value
- assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}));
+// Full shard key after $set
+shard0Coll.remove({});
+assert.writeOK(shard0Coll.update({_id: 1}, {$set: {a: 1}}, true));
- // Update existing doc ($set), same shard key value
- assert.commandWorked(shard0Coll.update({_id: 1}, {$set: {a: 1}}));
+// Update existing doc (replacement), same shard key value
+assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}));
- // Error when trying to update a shard key outside of a transaction.
- assert.commandFailedWithCode(shard0Coll.update({_id: 1, a: 1}, {_id: 1, a: 2}),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(shard0Coll.update({_id: 1, a: 1}, {"$set": {a: 2}}),
- ErrorCodes.IllegalOperation);
+// Update existing doc ($set), same shard key value
+assert.commandWorked(shard0Coll.update({_id: 1}, {$set: {a: 1}}));
- // Error when unsetting shard key.
- assert.writeError(shard0Coll.update({_id: 1}, {b: 3}));
+// Error when trying to update a shard key outside of a transaction.
+assert.commandFailedWithCode(shard0Coll.update({_id: 1, a: 1}, {_id: 1, a: 2}),
+ ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(shard0Coll.update({_id: 1, a: 1}, {"$set": {a: 2}}),
+ ErrorCodes.IllegalOperation);
- // Error when unsetting shard key ($set).
- assert.writeError(shard0Coll.update({_id: 1}, {$unset: {a: 1}}));
+// Error when unsetting shard key.
+assert.writeError(shard0Coll.update({_id: 1}, {b: 3}));
- // Error due to removing all the embedded fields.
- shard0Coll.remove({});
+// Error when unsetting shard key ($set).
+assert.writeError(shard0Coll.update({_id: 1}, {$unset: {a: 1}}));
- assert.writeOK(shard0Coll.save({_id: 2, a: {c: 1, b: 1}}));
+// Error due to removing all the embedded fields.
+shard0Coll.remove({});
- assert.writeError(shard0Coll.update({}, {$unset: {"a.c": 1}}));
- assert.writeError(shard0Coll.update({}, {$unset: {"a.b": 1, "a.c": 1}}));
+assert.writeOK(shard0Coll.save({_id: 2, a: {c: 1, b: 1}}));
- st.stop();
+assert.writeError(shard0Coll.update({}, {$unset: {"a.c": 1}}));
+assert.writeError(shard0Coll.update({}, {$unset: {"a.b": 1, "a.c": 1}}));
+st.stop();
})();
diff --git a/jstests/sharding/update_replace_id.js b/jstests/sharding/update_replace_id.js
index db8c878674f..0cd19ef1d88 100644
--- a/jstests/sharding/update_replace_id.js
+++ b/jstests/sharding/update_replace_id.js
@@ -12,191 +12,186 @@
* filter.
*/
(function() {
- load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
+load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1, other: {enableBalancer: false}});
+const st = new ShardingTest({shards: 2, mongos: 1, config: 1, other: {enableBalancer: false}});
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB.test;
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB.test;
- const shard0DB = st.shard0.getDB(jsTestName());
- const shard1DB = st.shard1.getDB(jsTestName());
+const shard0DB = st.shard0.getDB(jsTestName());
+const shard1DB = st.shard1.getDB(jsTestName());
- assert.commandWorked(mongosDB.dropDatabase());
+assert.commandWorked(mongosDB.dropDatabase());
- // Enable sharding on the test DB and ensure its primary is shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
+// Enable sharding on the test DB and ensure its primary is shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
- // Enables profiling on both shards so that we can verify the targeting behaviour.
- function restartProfiling() {
- for (let shardDB of[shard0DB, shard1DB]) {
- shardDB.setProfilingLevel(0);
- shardDB.system.profile.drop();
- shardDB.setProfilingLevel(2);
- }
- }
-
- function setUpData() {
- // Write a single document to shard0 and verify that it is present.
- mongosColl.insert({_id: -100, a: -100, msg: "not_updated"});
- assert.docEq(shard0DB.test.find({_id: -100}).toArray(),
- [{_id: -100, a: -100, msg: "not_updated"}]);
-
- // Write a document with the same key directly to shard1. This simulates an orphaned
- // document, or the duplicate document which temporarily exists during a chunk migration.
- shard1DB.test.insert({_id: -100, a: -100, msg: "not_updated"});
-
- // Clear and restart the profiler on both shards.
- restartProfiling();
- }
-
- function runReplacementUpdateTestsForHashedShardKey() {
- setUpData();
-
- // Perform a replacement update whose query is an exact match on _id and whose replacement
- // document contains the remainder of the shard key. Despite the fact that the replacement
- // document does not contain the entire shard key, we expect that mongoS will extract the
- // _id from the query and combine it with the replacement doc to target a single shard.
- let writeRes = assert.commandWorked(
- mongosColl.update({_id: -100}, {a: -100, msg: "update_extracted_id_from_query"}));
-
- // Verify that the update did not modify the orphan document.
- assert.docEq(shard1DB.test.find({_id: -100}).toArray(),
- [{_id: -100, a: -100, msg: "not_updated"}]);
- assert.eq(writeRes.nMatched, 1);
- assert.eq(writeRes.nModified, 1);
-
- // Verify that the update only targeted shard0 and that the resulting document appears as
- // expected.
- assert.docEq(mongosColl.find({_id: -100}).toArray(),
- [{_id: -100, a: -100, msg: "update_extracted_id_from_query"}]);
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard0DB,
- filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
- });
-
- // Perform an upsert replacement whose query is an exact match on _id and whose replacement
- // doc contains the remainder of the shard key. The _id taken from the query should be used
- // both in targeting the update and in generating the new document.
- writeRes = assert.commandWorked(mongosColl.update(
- {_id: 101}, {a: 101, msg: "upsert_extracted_id_from_query"}, {upsert: true}));
- assert.eq(writeRes.nUpserted, 1);
-
- // Verify that the update only targeted shard1, and that the resulting document appears as
- // expected. At this point in the test we expect shard1 to be stale, because it was the
- // destination shard for the first moveChunk; we therefore explicitly check the profiler for
- // a successful update, i.e. one which did not report a stale config exception.
- assert.docEq(mongosColl.find({_id: 101}).toArray(),
- [{_id: 101, a: 101, msg: "upsert_extracted_id_from_query"}]);
- assert.docEq(shard1DB.test.find({_id: 101}).toArray(),
- [{_id: 101, a: 101, msg: "upsert_extracted_id_from_query"}]);
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard0DB,
- filter: {op: "update", "command.u.msg": "upsert_extracted_id_from_query"}
- });
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard1DB,
- filter: {
- op: "update",
- "command.u.msg": "upsert_extracted_id_from_query",
- errName: {$exists: false}
- }
- });
+// Enables profiling on both shards so that we can verify the targeting behaviour.
+function restartProfiling() {
+ for (let shardDB of [shard0DB, shard1DB]) {
+ shardDB.setProfilingLevel(0);
+ shardDB.system.profile.drop();
+ shardDB.setProfilingLevel(2);
}
-
- function runReplacementUpdateTestsForCompoundShardKey() {
- setUpData();
-
- // Perform a replacement update whose query is an exact match on _id and whose replacement
- // document contains the remainder of the shard key. Despite the fact that the replacement
- // document does not contain the entire shard key, we expect that mongoS will extract the
- // _id from the query and combine it with the replacement doc to target a single shard.
- let writeRes = assert.commandWorked(
- mongosColl.update({_id: -100}, {a: -100, msg: "update_extracted_id_from_query"}));
-
- // Verify that the update did not modify the orphan document.
- assert.docEq(shard1DB.test.find({_id: -100}).toArray(),
- [{_id: -100, a: -100, msg: "not_updated"}]);
- assert.eq(writeRes.nMatched, 1);
- assert.eq(writeRes.nModified, 1);
-
- // Verify that the update only targeted shard0 and that the resulting document appears as
- // expected.
- assert.docEq(mongosColl.find({_id: -100}).toArray(),
- [{_id: -100, a: -100, msg: "update_extracted_id_from_query"}]);
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard0DB,
- filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
- });
-
- // An upsert whose query doesn't have full shard key will fail.
- assert.commandFailedWithCode(
- mongosColl.update(
- {_id: 101}, {a: 101, msg: "upsert_extracted_id_from_query"}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
-
- // Verify that the document did not perform any writes.
- assert.docEq(mongosColl.find({_id: 101}).itcount(), 0);
-
- // Verify that an update whose query contains an exact match on _id but whose replacement
- // doc does not contain all other shard key fields will be rejected by mongoS.
- writeRes = assert.commandFailedWithCode(
- mongosColl.update({_id: -100, a: -100}, {msg: "update_failed_missing_shard_key_field"}),
- ErrorCodes.ShardKeyNotFound);
-
- // Check that the existing document remains unchanged, and that the update did not reach
- // either shard per their respective profilers.
- assert.docEq(mongosColl.find({_id: -100, a: -100}).toArray(),
- [{_id: -100, a: -100, msg: "update_extracted_id_from_query"}]);
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard0DB,
- filter: {op: "update", "command.u.msg": "update_failed_missing_shard_key_field"}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {op: "update", "command.u.msg": "update_failed_missing_shard_key_field"}
- });
-
- // Verify that an upsert whose query contains an exact match on _id but whose replacement
- // document does not contain all other shard key fields will be rejected by mongoS, since it
- // does not contain an exact shard key match.
- writeRes = assert.commandFailedWithCode(
- mongosColl.update({_id: 200, a: 200}, {msg: "upsert_targeting_failed"}, {upsert: true}),
- ErrorCodes.ShardKeyNotFound);
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard0DB,
- filter: {op: "update", "command.u.msg": "upsert_targeting_failed"}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {op: "update", "command.u.msg": "upsert_targeting_failed"}
- });
- assert.eq(mongosColl.find({_id: 200, a: 200}).itcount(), 0);
- }
-
- // Shard the test collection on {_id: 1, a: 1}, split it into two chunks, and migrate one of
- // these to the second shard.
- st.shardColl(
- mongosColl, {_id: 1, a: 1}, {_id: 0, a: 0}, {_id: 1, a: 1}, mongosDB.getName(), true);
-
- // Run the replacement behaviour tests that are relevant to a compound key that includes _id.
- runReplacementUpdateTestsForCompoundShardKey();
-
- // Drop and reshard the collection on {_id: "hashed"}, which will autosplit across both shards.
- assert(mongosColl.drop());
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: "hashed"}});
-
- // Run the replacement behaviour tests relevant to a collection sharded on {_id: "hashed"}.
- runReplacementUpdateTestsForHashedShardKey();
-
- st.stop();
+}
+
+function setUpData() {
+ // Write a single document to shard0 and verify that it is present.
+ mongosColl.insert({_id: -100, a: -100, msg: "not_updated"});
+ assert.docEq(shard0DB.test.find({_id: -100}).toArray(),
+ [{_id: -100, a: -100, msg: "not_updated"}]);
+
+ // Write a document with the same key directly to shard1. This simulates an orphaned
+ // document, or the duplicate document which temporarily exists during a chunk migration.
+ shard1DB.test.insert({_id: -100, a: -100, msg: "not_updated"});
+
+ // Clear and restart the profiler on both shards.
+ restartProfiling();
+}
+
+function runReplacementUpdateTestsForHashedShardKey() {
+ setUpData();
+
+ // Perform a replacement update whose query is an exact match on _id and whose replacement
+ // document contains the remainder of the shard key. Despite the fact that the replacement
+ // document does not contain the entire shard key, we expect that mongoS will extract the
+ // _id from the query and combine it with the replacement doc to target a single shard.
+ let writeRes = assert.commandWorked(
+ mongosColl.update({_id: -100}, {a: -100, msg: "update_extracted_id_from_query"}));
+
+ // Verify that the update did not modify the orphan document.
+ assert.docEq(shard1DB.test.find({_id: -100}).toArray(),
+ [{_id: -100, a: -100, msg: "not_updated"}]);
+ assert.eq(writeRes.nMatched, 1);
+ assert.eq(writeRes.nModified, 1);
+
+ // Verify that the update only targeted shard0 and that the resulting document appears as
+ // expected.
+ assert.docEq(mongosColl.find({_id: -100}).toArray(),
+ [{_id: -100, a: -100, msg: "update_extracted_id_from_query"}]);
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shard0DB,
+ filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
+ });
+ profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard1DB,
+ filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
+ });
+
+ // Perform an upsert replacement whose query is an exact match on _id and whose replacement
+ // doc contains the remainder of the shard key. The _id taken from the query should be used
+ // both in targeting the update and in generating the new document.
+ writeRes = assert.commandWorked(mongosColl.update(
+ {_id: 101}, {a: 101, msg: "upsert_extracted_id_from_query"}, {upsert: true}));
+ assert.eq(writeRes.nUpserted, 1);
+
+ // Verify that the update only targeted shard1, and that the resulting document appears as
+ // expected. At this point in the test we expect shard1 to be stale, because it was the
+ // destination shard for the first moveChunk; we therefore explicitly check the profiler for
+ // a successful update, i.e. one which did not report a stale config exception.
+ assert.docEq(mongosColl.find({_id: 101}).toArray(),
+ [{_id: 101, a: 101, msg: "upsert_extracted_id_from_query"}]);
+ assert.docEq(shard1DB.test.find({_id: 101}).toArray(),
+ [{_id: 101, a: 101, msg: "upsert_extracted_id_from_query"}]);
+ profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard0DB,
+ filter: {op: "update", "command.u.msg": "upsert_extracted_id_from_query"}
+ });
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shard1DB,
+ filter: {
+ op: "update",
+ "command.u.msg": "upsert_extracted_id_from_query",
+ errName: {$exists: false}
+ }
+ });
+}
+
+function runReplacementUpdateTestsForCompoundShardKey() {
+ setUpData();
+
+ // Perform a replacement update whose query is an exact match on _id and whose replacement
+ // document contains the remainder of the shard key. Despite the fact that the replacement
+ // document does not contain the entire shard key, we expect that mongoS will extract the
+ // _id from the query and combine it with the replacement doc to target a single shard.
+ let writeRes = assert.commandWorked(
+ mongosColl.update({_id: -100}, {a: -100, msg: "update_extracted_id_from_query"}));
+
+ // Verify that the update did not modify the orphan document.
+ assert.docEq(shard1DB.test.find({_id: -100}).toArray(),
+ [{_id: -100, a: -100, msg: "not_updated"}]);
+ assert.eq(writeRes.nMatched, 1);
+ assert.eq(writeRes.nModified, 1);
+
+ // Verify that the update only targeted shard0 and that the resulting document appears as
+ // expected.
+ assert.docEq(mongosColl.find({_id: -100}).toArray(),
+ [{_id: -100, a: -100, msg: "update_extracted_id_from_query"}]);
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shard0DB,
+ filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
+ });
+ profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard1DB,
+ filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
+ });
+
+ // An upsert whose query doesn't have full shard key will fail.
+ assert.commandFailedWithCode(
+ mongosColl.update(
+ {_id: 101}, {a: 101, msg: "upsert_extracted_id_from_query"}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+
+ // Verify that the document did not perform any writes.
+ assert.docEq(mongosColl.find({_id: 101}).itcount(), 0);
+
+ // Verify that an update whose query contains an exact match on _id but whose replacement
+ // doc does not contain all other shard key fields will be rejected by mongoS.
+ writeRes = assert.commandFailedWithCode(
+ mongosColl.update({_id: -100, a: -100}, {msg: "update_failed_missing_shard_key_field"}),
+ ErrorCodes.ShardKeyNotFound);
+
+ // Check that the existing document remains unchanged, and that the update did not reach
+ // either shard per their respective profilers.
+ assert.docEq(mongosColl.find({_id: -100, a: -100}).toArray(),
+ [{_id: -100, a: -100, msg: "update_extracted_id_from_query"}]);
+ profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard0DB,
+ filter: {op: "update", "command.u.msg": "update_failed_missing_shard_key_field"}
+ });
+ profilerHasZeroMatchingEntriesOrThrow({
+ profileDB: shard1DB,
+ filter: {op: "update", "command.u.msg": "update_failed_missing_shard_key_field"}
+ });
+
+ // Verify that an upsert whose query contains an exact match on _id but whose replacement
+ // document does not contain all other shard key fields will be rejected by mongoS, since it
+ // does not contain an exact shard key match.
+ writeRes = assert.commandFailedWithCode(
+ mongosColl.update({_id: 200, a: 200}, {msg: "upsert_targeting_failed"}, {upsert: true}),
+ ErrorCodes.ShardKeyNotFound);
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: shard0DB, filter: {op: "update", "command.u.msg": "upsert_targeting_failed"}});
+ profilerHasZeroMatchingEntriesOrThrow(
+ {profileDB: shard1DB, filter: {op: "update", "command.u.msg": "upsert_targeting_failed"}});
+ assert.eq(mongosColl.find({_id: 200, a: 200}).itcount(), 0);
+}
+
+// Shard the test collection on {_id: 1, a: 1}, split it into two chunks, and migrate one of
+// these to the second shard.
+st.shardColl(mongosColl, {_id: 1, a: 1}, {_id: 0, a: 0}, {_id: 1, a: 1}, mongosDB.getName(), true);
+
+// Run the replacement behaviour tests that are relevant to a compound key that includes _id.
+runReplacementUpdateTestsForCompoundShardKey();
+
+// Drop and reshard the collection on {_id: "hashed"}, which will autosplit across both shards.
+assert(mongosColl.drop());
+mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: "hashed"}});
+
+// Run the replacement behaviour tests relevant to a collection sharded on {_id: "hashed"}.
+runReplacementUpdateTestsForHashedShardKey();
+
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/update_shard_key_conflicting_writes.js b/jstests/sharding/update_shard_key_conflicting_writes.js
index 3fc3fb9f416..8f228dfb70a 100644
--- a/jstests/sharding/update_shard_key_conflicting_writes.js
+++ b/jstests/sharding/update_shard_key_conflicting_writes.js
@@ -7,151 +7,150 @@
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/parallelTester.js'); // for ScopedThread.
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
+load('jstests/libs/parallelTester.js'); // for ScopedThread.
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
- let st = new ShardingTest({mongos: 1, shards: 2});
- let kDbName = 'db';
- let mongos = st.s0;
- let ns = kDbName + '.foo';
- let db = mongos.getDB(kDbName);
+let st = new ShardingTest({mongos: 1, shards: 2});
+let kDbName = 'db';
+let mongos = st.s0;
+let ns = kDbName + '.foo';
+let db = mongos.getDB(kDbName);
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, st.shard0.shardName);
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, st.shard0.shardName);
- // Shards the collection "db.foo" on shard key {"x" : 1} such that negative "x" values are on
- // shard0 and positive on shard1
- assert.commandWorked(db.foo.createIndex({"x": 1}));
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {"x": 1}}));
- assert.commandWorked(mongos.adminCommand({split: ns, middle: {"x": 0}}));
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, find: {"x": 0}, to: st.shard1.shardName}));
+// Shards the collection "db.foo" on shard key {"x" : 1} such that negative "x" values are on
+// shard0 and positive on shard1
+assert.commandWorked(db.foo.createIndex({"x": 1}));
+assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {"x": 1}}));
+assert.commandWorked(mongos.adminCommand({split: ns, middle: {"x": 0}}));
+assert.commandWorked(mongos.adminCommand({moveChunk: ns, find: {"x": 0}, to: st.shard1.shardName}));
- assert.commandWorked(db.foo.insert({"x": -50, "a": 10}));
- assert.commandWorked(db.foo.insert({"x": -100, "a": 4}));
- assert.commandWorked(db.foo.insert({"x": -150, "a": 15}));
- assert.commandWorked(db.foo.insert({"x": 50, "a": 6}));
- assert.commandWorked(db.foo.insert({"x": 100, "a": 8}));
- assert.commandWorked(db.foo.insert({"x": 150, "a": 20}));
+assert.commandWorked(db.foo.insert({"x": -50, "a": 10}));
+assert.commandWorked(db.foo.insert({"x": -100, "a": 4}));
+assert.commandWorked(db.foo.insert({"x": -150, "a": 15}));
+assert.commandWorked(db.foo.insert({"x": 50, "a": 6}));
+assert.commandWorked(db.foo.insert({"x": 100, "a": 8}));
+assert.commandWorked(db.foo.insert({"x": 150, "a": 20}));
- assert.commandWorked(st.shard0.adminCommand({_flushDatabaseCacheUpdates: kDbName}));
- assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- assert.commandWorked(st.shard1.adminCommand({_flushDatabaseCacheUpdates: kDbName}));
- assert.commandWorked(st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+assert.commandWorked(st.shard0.adminCommand({_flushDatabaseCacheUpdates: kDbName}));
+assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns}));
+assert.commandWorked(st.shard1.adminCommand({_flushDatabaseCacheUpdates: kDbName}));
+assert.commandWorked(st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns}));
- let session = mongos.startSession({retryWrites: false});
- let sessionDB = session.getDatabase(kDbName);
+let session = mongos.startSession({retryWrites: false});
+let sessionDB = session.getDatabase(kDbName);
- let session2 = mongos.startSession({retryWrites: true});
- let sessionDB2 = session2.getDatabase(kDbName);
+let session2 = mongos.startSession({retryWrites: true});
+let sessionDB2 = session2.getDatabase(kDbName);
- // Returns true if the command "cmdName" has started running on the server.
- function opStarted(cmdName) {
- return mongos.getDB(kDbName).currentOp().inprog.some(op => {
- return op.active && (op.ns === "db.foo") && (op.op === cmdName);
- });
- }
+// Returns true if the command "cmdName" has started running on the server.
+function opStarted(cmdName) {
+ return mongos.getDB(kDbName).currentOp().inprog.some(op => {
+ return op.active && (op.ns === "db.foo") && (op.op === cmdName);
+ });
+}
- // Send update that will change the shard key causing the document to move shards. Wait to hit
- // failpoint specified.
- function setFailPointAndSendUpdateToShardKeyInParallelShell(
- failpoint, failpointMode, shard, codeToRunInParallelShell) {
- assert.commandWorked(
- shard.adminCommand({configureFailPoint: failpoint, mode: failpointMode}));
- let awaitShell = startParallelShell(codeToRunInParallelShell, st.s.port);
- waitForFailpoint("Hit " + failpoint, 1);
- clearRawMongoProgramOutput();
- return awaitShell;
- }
+// Send update that will change the shard key causing the document to move shards. Wait to hit
+// failpoint specified.
+function setFailPointAndSendUpdateToShardKeyInParallelShell(
+ failpoint, failpointMode, shard, codeToRunInParallelShell) {
+ assert.commandWorked(shard.adminCommand({configureFailPoint: failpoint, mode: failpointMode}));
+ let awaitShell = startParallelShell(codeToRunInParallelShell, st.s.port);
+ waitForFailpoint("Hit " + failpoint, 1);
+ clearRawMongoProgramOutput();
+ return awaitShell;
+}
- /**
- * Test that an in-transaction update to the shard key and a non-transactional update to the
- * same document will conflict and the non-transactional update will retry indefinitely. Once
- * the transaction will conflict and the non-transactional update will retry indefinitely. Once
- * the transaction commits, the non-transactional update should complete. When 'maxTimeMS' is
- * specified, the non-transactional write will timeout.
- */
- (() => {
- const originalShardKeyValue = 50;
- const updatedShardKeyValue = -10;
+/**
+ * Test that an in-transaction update to the shard key and a non-transactional update to the
+ * same document will conflict and the non-transactional update will retry indefinitely. Once
+ * the transaction will conflict and the non-transactional update will retry indefinitely. Once
+ * the transaction commits, the non-transactional update should complete. When 'maxTimeMS' is
+ * specified, the non-transactional write will timeout.
+ */
+(() => {
+ const originalShardKeyValue = 50;
+ const updatedShardKeyValue = -10;
- session.startTransaction();
- assert.commandWorked(sessionDB.foo.update({"x": originalShardKeyValue},
- {$set: {"x": updatedShardKeyValue}}));
- // Attempt to update the same doc not in a transaction, this update should timeout.
- assert.commandFailedWithCode(db.runCommand({
- update: "foo",
- updates: [{q: {"x": originalShardKeyValue}, u: {$inc: {"a": 1}}}],
- maxTimeMS: 100
- }),
- ErrorCodes.MaxTimeMSExpired);
- // Run the non-transactional update again in a separate thread and wait for it to start.
- function conflictingUpdate(host, kDbName, query, update) {
- const mongosConn = new Mongo(host);
- return mongosConn.getDB(kDbName).foo.update(query, update);
- }
- let thread = new ScopedThread(
- conflictingUpdate, st.s.host, kDbName, {"x": originalShardKeyValue}, {$inc: {"a": 1}});
- thread.start();
- assert.soon(() => opStarted("update"));
- // Once we commit the transaction, the non-transaction update should finish, but it should
- // not actually modify any documents since the transaction commited first.
- assert.commandWorked(session.commitTransaction_forTesting());
- thread.join();
- assert.commandWorked(thread.returnData());
- assert.eq(1, db.foo.find({"x": updatedShardKeyValue, "a": 6}).itcount());
- assert.eq(0, db.foo.find({"x": originalShardKeyValue}).itcount());
- assert.eq(0, db.foo.find({"a": 7}).itcount());
- })();
+ session.startTransaction();
+ assert.commandWorked(
+ sessionDB.foo.update({"x": originalShardKeyValue}, {$set: {"x": updatedShardKeyValue}}));
+ // Attempt to update the same doc not in a transaction, this update should timeout.
+ assert.commandFailedWithCode(db.runCommand({
+ update: "foo",
+ updates: [{q: {"x": originalShardKeyValue}, u: {$inc: {"a": 1}}}],
+ maxTimeMS: 100
+ }),
+ ErrorCodes.MaxTimeMSExpired);
+ // Run the non-transactional update again in a separate thread and wait for it to start.
+ function conflictingUpdate(host, kDbName, query, update) {
+ const mongosConn = new Mongo(host);
+ return mongosConn.getDB(kDbName).foo.update(query, update);
+ }
+ let thread = new ScopedThread(
+ conflictingUpdate, st.s.host, kDbName, {"x": originalShardKeyValue}, {$inc: {"a": 1}});
+ thread.start();
+ assert.soon(() => opStarted("update"));
+ // Once we commit the transaction, the non-transaction update should finish, but it should
+ // not actually modify any documents since the transaction commited first.
+ assert.commandWorked(session.commitTransaction_forTesting());
+ thread.join();
+ assert.commandWorked(thread.returnData());
+ assert.eq(1, db.foo.find({"x": updatedShardKeyValue, "a": 6}).itcount());
+ assert.eq(0, db.foo.find({"x": originalShardKeyValue}).itcount());
+ assert.eq(0, db.foo.find({"a": 7}).itcount());
+})();
- /**
- * When the non-transactional update or delete runs before the transactional update to the shard
- * key, the update to the shard key should fail with WriteConflict.
- */
- (() => {
- const originalShardKeyValue = -10;
- let updatedShardKeyValue = 40;
+/**
+ * When the non-transactional update or delete runs before the transactional update to the shard
+ * key, the update to the shard key should fail with WriteConflict.
+ */
+(() => {
+ const originalShardKeyValue = -10;
+ let updatedShardKeyValue = 40;
- session.startTransaction();
- assert.commandWorked(sessionDB.runCommand({find: "foo"}));
- // Run a non-transactional update before updating the shard key.
- assert.commandWorked(db.foo.update({"x": originalShardKeyValue}, {$inc: {"a": 1}}));
- // Run transactional update to change the shard key for the same doc as updated above
- assert.commandFailedWithCode(
- sessionDB.foo.update({"x": originalShardKeyValue}, {$set: {"x": updatedShardKeyValue}}),
- ErrorCodes.WriteConflict);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.eq(1, db.foo.find({"x": originalShardKeyValue, "a": 7}).itcount());
- assert.eq(0, db.foo.find({"x": updatedShardKeyValue}).itcount());
+ session.startTransaction();
+ assert.commandWorked(sessionDB.runCommand({find: "foo"}));
+ // Run a non-transactional update before updating the shard key.
+ assert.commandWorked(db.foo.update({"x": originalShardKeyValue}, {$inc: {"a": 1}}));
+ // Run transactional update to change the shard key for the same doc as updated above
+ assert.commandFailedWithCode(
+ sessionDB.foo.update({"x": originalShardKeyValue}, {$set: {"x": updatedShardKeyValue}}),
+ ErrorCodes.WriteConflict);
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ assert.eq(1, db.foo.find({"x": originalShardKeyValue, "a": 7}).itcount());
+ assert.eq(0, db.foo.find({"x": updatedShardKeyValue}).itcount());
- // Run a non-transactional delete before updating the shard key.
- updatedShardKeyValue = 20;
- session.startTransaction();
- assert.commandWorked(sessionDB.runCommand({find: "foo"}));
- assert.commandWorked(db.foo.remove({"x": originalShardKeyValue}));
- // Run transactional update to change the shard key for the same doc as updated above
- assert.commandFailedWithCode(
- sessionDB.foo.update({"x": originalShardKeyValue}, {$set: {"x": updatedShardKeyValue}}),
- ErrorCodes.WriteConflict);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.eq(0, db.foo.find({"x": originalShardKeyValue}).itcount());
- assert.eq(0, db.foo.find({"x": updatedShardKeyValue}).itcount());
- })();
+ // Run a non-transactional delete before updating the shard key.
+ updatedShardKeyValue = 20;
+ session.startTransaction();
+ assert.commandWorked(sessionDB.runCommand({find: "foo"}));
+ assert.commandWorked(db.foo.remove({"x": originalShardKeyValue}));
+ // Run transactional update to change the shard key for the same doc as updated above
+ assert.commandFailedWithCode(
+ sessionDB.foo.update({"x": originalShardKeyValue}, {$set: {"x": updatedShardKeyValue}}),
+ ErrorCodes.WriteConflict);
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ assert.eq(0, db.foo.find({"x": originalShardKeyValue}).itcount());
+ assert.eq(0, db.foo.find({"x": updatedShardKeyValue}).itcount());
+})();
- /**
- * Test scenarios where a concurrent update/delete that mutates the same document that a user is
- * updating the shard key for completes just before the update to the shard key throws
- * WouldChangeOwningShard.
- */
+/**
+ * Test scenarios where a concurrent update/delete that mutates the same document that a user is
+ * updating the shard key for completes just before the update to the shard key throws
+ * WouldChangeOwningShard.
+ */
- // Assert that if the concurrent update mutates the same document as the original update to the
- // shard key, we get a write conflict.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if the concurrent update mutates the same document as the original update to the
+// shard key, we get a write conflict.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession();
let sessionDB = session.getDatabase("db");
session.startTransaction();
@@ -160,28 +159,26 @@
assert.commandFailedWithCode(session.commitTransaction_forTesting(),
ErrorCodes.NoSuchTransaction);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangBeforeThrowWouldChangeOwningShard",
- "alwaysOn",
- st.shard0,
- codeToRunInParallelShell);
- // Send update that changes "a" so that the original update will no longer match this doc.
- // Turn off the failpoint so the server stops hanging.
- assert.commandWorked(sessionDB2.foo.update({"x": -50}, {$set: {"a": 300}}));
- assert.commandWorked(st.shard0.adminCommand({
- configureFailPoint: "hangBeforeThrowWouldChangeOwningShard",
- mode: "off",
- }));
- awaitShell();
- assert.eq(1, db.foo.find({"x": -50, "a": 300}).itcount());
- assert.eq(0, db.foo.find({"a": 10}).itcount());
- assert.eq(0, db.foo.find({"x": 10}).itcount());
- })();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangBeforeThrowWouldChangeOwningShard", "alwaysOn", st.shard0, codeToRunInParallelShell);
+ // Send update that changes "a" so that the original update will no longer match this doc.
+ // Turn off the failpoint so the server stops hanging.
+ assert.commandWorked(sessionDB2.foo.update({"x": -50}, {$set: {"a": 300}}));
+ assert.commandWorked(st.shard0.adminCommand({
+ configureFailPoint: "hangBeforeThrowWouldChangeOwningShard",
+ mode: "off",
+ }));
+ awaitShell();
+ assert.eq(1, db.foo.find({"x": -50, "a": 300}).itcount());
+ assert.eq(0, db.foo.find({"a": 10}).itcount());
+ assert.eq(0, db.foo.find({"x": 10}).itcount());
+})();
- // Assert that if a concurrent delete removes the same document that the original update
- // attempts to modify the shard key for, we get a write conflict.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if a concurrent delete removes the same document that the original update
+// attempts to modify the shard key for, we get a write conflict.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession();
let sessionDB = session.getDatabase("db");
session.startTransaction();
@@ -190,27 +187,25 @@
assert.commandFailedWithCode(session.commitTransaction_forTesting(),
ErrorCodes.NoSuchTransaction);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangBeforeThrowWouldChangeOwningShard",
- "alwaysOn",
- st.shard1,
- codeToRunInParallelShell);
- // Send update that changes "a" so that the original update will no longer match this doc.
- // Turn off the failpoint so the server stops hanging.
- assert.commandWorked(sessionDB2.foo.remove({"x": 100}));
- assert.commandWorked(st.shard1.adminCommand({
- configureFailPoint: "hangBeforeThrowWouldChangeOwningShard",
- mode: "off",
- }));
- awaitShell();
- assert.eq(0, db.foo.find({"x": 100}).itcount());
- assert.eq(0, db.foo.find({"x": -1}).itcount());
- })();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangBeforeThrowWouldChangeOwningShard", "alwaysOn", st.shard1, codeToRunInParallelShell);
+ // Send update that changes "a" so that the original update will no longer match this doc.
+ // Turn off the failpoint so the server stops hanging.
+ assert.commandWorked(sessionDB2.foo.remove({"x": 100}));
+ assert.commandWorked(st.shard1.adminCommand({
+ configureFailPoint: "hangBeforeThrowWouldChangeOwningShard",
+ mode: "off",
+ }));
+ awaitShell();
+ assert.eq(0, db.foo.find({"x": 100}).itcount());
+ assert.eq(0, db.foo.find({"x": -1}).itcount());
+})();
- // Assert that if the concurrent update also mutates the shard key (and remains on the same
- // shard), the original update to the shard key will get a write conflict.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if the concurrent update also mutates the shard key (and remains on the same
+// shard), the original update to the shard key will get a write conflict.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession();
let sessionDB = session.getDatabase("db");
session.startTransaction();
@@ -219,36 +214,34 @@
assert.commandFailedWithCode(session.commitTransaction_forTesting(),
ErrorCodes.NoSuchTransaction);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangBeforeThrowWouldChangeOwningShard",
- "alwaysOn",
- st.shard0,
- codeToRunInParallelShell);
- // Send update that changes the shard key so that the original update will no longer match
- // this doc. This doc will still remain on its original shard. Turn off the failpoint so the
- // server stops hanging.
- assert.commandWorked(sessionDB2.foo.update({"x": -50}, {$set: {"x": -500}}));
- assert.commandWorked(st.shard0.adminCommand({
- configureFailPoint: "hangBeforeThrowWouldChangeOwningShard",
- mode: "off",
- }));
- awaitShell();
- assert.eq(0, db.foo.find({"x": -50}).itcount());
- assert.eq(1, db.foo.find({"x": -500}).itcount());
- assert.eq(0, db.foo.find({"x": 80}).itcount());
- })();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangBeforeThrowWouldChangeOwningShard", "alwaysOn", st.shard0, codeToRunInParallelShell);
+ // Send update that changes the shard key so that the original update will no longer match
+ // this doc. This doc will still remain on its original shard. Turn off the failpoint so the
+ // server stops hanging.
+ assert.commandWorked(sessionDB2.foo.update({"x": -50}, {$set: {"x": -500}}));
+ assert.commandWorked(st.shard0.adminCommand({
+ configureFailPoint: "hangBeforeThrowWouldChangeOwningShard",
+ mode: "off",
+ }));
+ awaitShell();
+ assert.eq(0, db.foo.find({"x": -50}).itcount());
+ assert.eq(1, db.foo.find({"x": -500}).itcount());
+ assert.eq(0, db.foo.find({"x": 80}).itcount());
+})();
- /**
- * Test scenario where a concurrent update/delete that mutates the same document that a user is
- * updating the shard key for is sent just after the update to the shard key has deleted the
- * original document but before it has inserted the new one. The second update should not match
- * any documents.
- */
+/**
+ * Test scenario where a concurrent update/delete that mutates the same document that a user is
+ * updating the shard key for is sent just after the update to the shard key has deleted the
+ * original document but before it has inserted the new one. The second update should not match
+ * any documents.
+ */
- // Assert that if the concurrent update mutates the same document as the original update to the
- // shard key, it does not match and documents.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if the concurrent update mutates the same document as the original update to the
+// shard key, it does not match and documents.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession();
let sessionDB = session.getDatabase("db");
session.startTransaction();
@@ -258,7 +251,8 @@
assert.eq(1, res.nModified);
assert.commandWorked(session.commitTransaction_forTesting());
}`;
- let codeToRunInParallelShell2 = `{
+ let codeToRunInParallelShell2 =
+ `{
let session = db.getMongo().startSession();
let sessionDB = session.getDatabase("db");
let res = sessionDB.foo.update({"x": -100}, {$inc: {"a": 1}});
@@ -266,26 +260,27 @@
assert.eq(0, res.nMatched);
assert.eq(0, res.nModified);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangBeforeInsertOnUpdateShardKey", "alwaysOn", st.s, codeToRunInParallelShell);
- let awaitShell2 = startParallelShell(codeToRunInParallelShell2, st.s.port);
- assert.soon(() => opStarted("update"));
- assert.commandWorked(st.s.adminCommand({
- configureFailPoint: "hangBeforeInsertOnUpdateShardKey",
- mode: "off",
- }));
- awaitShell();
- awaitShell2();
- assert.eq(1, db.foo.find({"x": 10}).itcount());
- assert.eq(1, db.foo.find({"a": 4}).itcount());
- assert.eq(0, db.foo.find({"x": -100}).itcount());
- assert.eq(0, db.foo.find({"a": 5}).itcount());
- })();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangBeforeInsertOnUpdateShardKey", "alwaysOn", st.s, codeToRunInParallelShell);
+ let awaitShell2 = startParallelShell(codeToRunInParallelShell2, st.s.port);
+ assert.soon(() => opStarted("update"));
+ assert.commandWorked(st.s.adminCommand({
+ configureFailPoint: "hangBeforeInsertOnUpdateShardKey",
+ mode: "off",
+ }));
+ awaitShell();
+ awaitShell2();
+ assert.eq(1, db.foo.find({"x": 10}).itcount());
+ assert.eq(1, db.foo.find({"a": 4}).itcount());
+ assert.eq(0, db.foo.find({"x": -100}).itcount());
+ assert.eq(0, db.foo.find({"a": 5}).itcount());
+})();
- // Assert that if a concurrent delete removes the same document that the original update
- // attempts to modify the shard key for, we get a write conflict.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if a concurrent delete removes the same document that the original update
+// attempts to modify the shard key for, we get a write conflict.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession();
let sessionDB = session.getDatabase("db");
session.startTransaction();
@@ -295,7 +290,8 @@
assert.eq(1, res.nModified);
assert.commandWorked(session.commitTransaction_forTesting());
}`;
- let codeToRunInParallelShell2 = `{
+ let codeToRunInParallelShell2 =
+ `{
let session = db.getMongo().startSession();
let sessionDB = session.getDatabase("db");
let res = sessionDB.foo.remove({"x": 10});
@@ -303,63 +299,64 @@
assert.eq(0, res.nMatched);
assert.eq(0, res.nModified);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangBeforeInsertOnUpdateShardKey", "alwaysOn", st.s, codeToRunInParallelShell);
- let awaitShell2 = startParallelShell(codeToRunInParallelShell2, st.s.port);
- assert.soon(() => opStarted("remove"));
- assert.commandWorked(st.s.adminCommand({
- configureFailPoint: "hangBeforeInsertOnUpdateShardKey",
- mode: "off",
- }));
- awaitShell();
- awaitShell2();
- assert.eq(0, db.foo.find({"x": 10}).itcount());
- assert.eq(1, db.foo.find({"x": -70}).itcount());
- })();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangBeforeInsertOnUpdateShardKey", "alwaysOn", st.s, codeToRunInParallelShell);
+ let awaitShell2 = startParallelShell(codeToRunInParallelShell2, st.s.port);
+ assert.soon(() => opStarted("remove"));
+ assert.commandWorked(st.s.adminCommand({
+ configureFailPoint: "hangBeforeInsertOnUpdateShardKey",
+ mode: "off",
+ }));
+ awaitShell();
+ awaitShell2();
+ assert.eq(0, db.foo.find({"x": 10}).itcount());
+ assert.eq(1, db.foo.find({"x": -70}).itcount());
+})();
- /**
- * Attempt to update the shard key in two different transactions. The second transaction should
- * fail with WriteConflict.
- */
- (() => {
- session2 = mongos.startSession();
- sessionDB2 = session2.getDatabase(kDbName);
- // Start transactions on both sessions and then run the two change shard key updates for the
- // same document
- session.startTransaction();
- assert.commandWorked(sessionDB.runCommand({find: "foo"}));
- session2.startTransaction();
- // The first update will complete and the second should get a write conflict
- assert.commandWorked(sessionDB2.foo.update({"x": -500}, {$set: {"x": 25}}));
- assert.commandFailedWithCode(sessionDB.foo.update({"x": -500}, {$set: {"x": 250}}),
- ErrorCodes.WriteConflict);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.commandWorked(session2.commitTransaction_forTesting());
- assert.eq(1, db.foo.find({"x": 25}).itcount());
- assert.eq(0, db.foo.find({"x": 250}).itcount());
- assert.eq(0, db.foo.find({"x": -500}).itcount());
- })();
+/**
+ * Attempt to update the shard key in two different transactions. The second transaction should
+ * fail with WriteConflict.
+ */
+(() => {
+ session2 = mongos.startSession();
+ sessionDB2 = session2.getDatabase(kDbName);
+ // Start transactions on both sessions and then run the two change shard key updates for the
+ // same document
+ session.startTransaction();
+ assert.commandWorked(sessionDB.runCommand({find: "foo"}));
+ session2.startTransaction();
+ // The first update will complete and the second should get a write conflict
+ assert.commandWorked(sessionDB2.foo.update({"x": -500}, {$set: {"x": 25}}));
+ assert.commandFailedWithCode(sessionDB.foo.update({"x": -500}, {$set: {"x": 250}}),
+ ErrorCodes.WriteConflict);
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ assert.commandWorked(session2.commitTransaction_forTesting());
+ assert.eq(1, db.foo.find({"x": 25}).itcount());
+ assert.eq(0, db.foo.find({"x": 250}).itcount());
+ assert.eq(0, db.foo.find({"x": -500}).itcount());
+})();
- /**
- * Test scenarios where a user sends an update as a retryable write that changes the shard key
- * and there is a concurrent update/delete that mutates the same document which completes after
- * the change to the shard key throws WouldChangeOwningShard the first time, but before mongos
- * starts a transaction to change the shard key.
- *
- * The scenario looks like:
- * 1. user sends db.foo.update({shardKey: x}, {shardKey: new x})
- * 2. shard throws WCOS for this update
- * 3. user sends db.foo.update({shardKey: x}, {otherFieldInDoc: y}) on a different thread, this
- * write completes successfully
- * 4. mongos starts a transaction and resends the update on line 1
- * 5. mongos deletes the old doc, inserts a doc with the updated shard key, and commits the txn
- */
+/**
+ * Test scenarios where a user sends an update as a retryable write that changes the shard key
+ * and there is a concurrent update/delete that mutates the same document which completes after
+ * the change to the shard key throws WouldChangeOwningShard the first time, but before mongos
+ * starts a transaction to change the shard key.
+ *
+ * The scenario looks like:
+ * 1. user sends db.foo.update({shardKey: x}, {shardKey: new x})
+ * 2. shard throws WCOS for this update
+ * 3. user sends db.foo.update({shardKey: x}, {otherFieldInDoc: y}) on a different thread, this
+ * write completes successfully
+ * 4. mongos starts a transaction and resends the update on line 1
+ * 5. mongos deletes the old doc, inserts a doc with the updated shard key, and commits the txn
+ */
- // Assert that if the concurrent update modifies the document so that the update which changes
- // the shard key no longer matches the doc, it does not modify the doc.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if the concurrent update modifies the document so that the update which changes
+// the shard key no longer matches the doc, it does not modify the doc.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession({retryWrites : true});
let sessionDB = session.getDatabase("db");
let res = sessionDB.foo.update({"x": -150, "a" : 15}, {$set: {"x": 1000}});
@@ -367,28 +364,29 @@
assert.eq(0, res.nMatched);
assert.eq(0, res.nModified);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangAfterThrowWouldChangeOwningShardRetryableWrite",
- "alwaysOn",
- st.s,
- codeToRunInParallelShell);
- // Send update that changes "a" so that the original update will no longer match this doc.
- // Turn off the failpoint so the server stops hanging.
- assert.commandWorked(sessionDB2.foo.update({"x": -150}, {$set: {"a": 3000}}));
- assert.commandWorked(st.s.adminCommand({
- configureFailPoint: "hangAfterThrowWouldChangeOwningShardRetryableWrite",
- mode: "off",
- }));
- awaitShell();
- assert.eq(1, db.foo.find({"x": -150, "a": 3000}).itcount());
- assert.eq(0, db.foo.find({"a": 15}).itcount());
- assert.eq(0, db.foo.find({"x": 1000}).itcount());
- })();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangAfterThrowWouldChangeOwningShardRetryableWrite",
+ "alwaysOn",
+ st.s,
+ codeToRunInParallelShell);
+ // Send update that changes "a" so that the original update will no longer match this doc.
+ // Turn off the failpoint so the server stops hanging.
+ assert.commandWorked(sessionDB2.foo.update({"x": -150}, {$set: {"a": 3000}}));
+ assert.commandWorked(st.s.adminCommand({
+ configureFailPoint: "hangAfterThrowWouldChangeOwningShardRetryableWrite",
+ mode: "off",
+ }));
+ awaitShell();
+ assert.eq(1, db.foo.find({"x": -150, "a": 3000}).itcount());
+ assert.eq(0, db.foo.find({"a": 15}).itcount());
+ assert.eq(0, db.foo.find({"x": 1000}).itcount());
+})();
- // Assert that if the concurrent update modifies the document and the update which changes the
- // shard key still matches the doc, the final document reflects both updates.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if the concurrent update modifies the document and the update which changes the
+// shard key still matches the doc, the final document reflects both updates.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession({retryWrites : true});
let sessionDB = session.getDatabase("db");
let res = sessionDB.foo.update({"x": 150}, {$set: {"x": -1000}});
@@ -396,28 +394,29 @@
assert.eq(1, res.nMatched);
assert.eq(1, res.nModified);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangAfterThrowWouldChangeOwningShardRetryableWrite",
- "alwaysOn",
- st.s,
- codeToRunInParallelShell);
- // Send update that changes "a". The original update will still match this doc because it
- // queries only on the shard key. Turn off the failpoint so the server stops hanging.
- assert.commandWorked(sessionDB2.foo.update({"x": 150}, {$set: {"a": -200}}));
- assert.commandWorked(st.s.adminCommand({
- configureFailPoint: "hangAfterThrowWouldChangeOwningShardRetryableWrite",
- mode: "off",
- }));
- awaitShell();
- assert.eq(1, db.foo.find({"x": -1000, "a": -200}).itcount());
- assert.eq(0, db.foo.find({"a": 20}).itcount());
- assert.eq(0, db.foo.find({"x": 150}).itcount());
- })();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangAfterThrowWouldChangeOwningShardRetryableWrite",
+ "alwaysOn",
+ st.s,
+ codeToRunInParallelShell);
+ // Send update that changes "a". The original update will still match this doc because it
+ // queries only on the shard key. Turn off the failpoint so the server stops hanging.
+ assert.commandWorked(sessionDB2.foo.update({"x": 150}, {$set: {"a": -200}}));
+ assert.commandWorked(st.s.adminCommand({
+ configureFailPoint: "hangAfterThrowWouldChangeOwningShardRetryableWrite",
+ mode: "off",
+ }));
+ awaitShell();
+ assert.eq(1, db.foo.find({"x": -1000, "a": -200}).itcount());
+ assert.eq(0, db.foo.find({"a": 20}).itcount());
+ assert.eq(0, db.foo.find({"x": 150}).itcount());
+})();
- // Assert that if a concurrent delete removes the same document that the original update
- // attempts to modify the shard key for, we don't match any docs.
- (() => {
- let codeToRunInParallelShell = `{
+// Assert that if a concurrent delete removes the same document that the original update
+// attempts to modify the shard key for, we don't match any docs.
+(() => {
+ let codeToRunInParallelShell =
+ `{
let session = db.getMongo().startSession({retryWrites : true});
let sessionDB = session.getDatabase("db");
let res = sessionDB.foo.update({"x": -150}, {$set: {"x": 1000}});
@@ -425,24 +424,23 @@
assert.eq(0, res.nMatched);
assert.eq(0, res.nModified);
}`;
- let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
- "hangAfterThrowWouldChangeOwningShardRetryableWrite",
- "alwaysOn",
- st.s,
- codeToRunInParallelShell);
- // Remove this doc so that the original update will no longer match any doc.
- // Turn off the failpoint so the server stops hanging.
- assert.commandWorked(sessionDB2.foo.remove({"x": -150}));
- assert.commandWorked(st.s.adminCommand({
- configureFailPoint: "hangAfterThrowWouldChangeOwningShardRetryableWrite",
- mode: "off",
- }));
- awaitShell();
- assert.eq(0, db.foo.find({"x": -150}).itcount());
- assert.eq(0, db.foo.find({"a": 3000}).itcount());
- assert.eq(0, db.foo.find({"x": 1000}).itcount());
- })();
-
- st.stop();
+ let awaitShell = setFailPointAndSendUpdateToShardKeyInParallelShell(
+ "hangAfterThrowWouldChangeOwningShardRetryableWrite",
+ "alwaysOn",
+ st.s,
+ codeToRunInParallelShell);
+ // Remove this doc so that the original update will no longer match any doc.
+ // Turn off the failpoint so the server stops hanging.
+ assert.commandWorked(sessionDB2.foo.remove({"x": -150}));
+ assert.commandWorked(st.s.adminCommand({
+ configureFailPoint: "hangAfterThrowWouldChangeOwningShardRetryableWrite",
+ mode: "off",
+ }));
+ awaitShell();
+ assert.eq(0, db.foo.find({"x": -150}).itcount());
+ assert.eq(0, db.foo.find({"a": 3000}).itcount());
+ assert.eq(0, db.foo.find({"x": 1000}).itcount());
+})();
+st.stop();
}());
diff --git a/jstests/sharding/update_shard_key_doc_moves_shards.js b/jstests/sharding/update_shard_key_doc_moves_shards.js
index ff673d91390..9567b807b1e 100644
--- a/jstests/sharding/update_shard_key_doc_moves_shards.js
+++ b/jstests/sharding/update_shard_key_doc_moves_shards.js
@@ -5,457 +5,454 @@
*/
(function() {
- 'use strict';
-
- load("jstests/sharding/libs/update_shard_key_helpers.js");
-
- const st = new ShardingTest({mongos: 1, shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});
- const kDbName = 'db';
- const mongos = st.s0;
- const shard0 = st.shard0.shardName;
- const shard1 = st.shard1.shardName;
- const ns = kDbName + '.foo';
-
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, shard0);
-
- function changeShardKeyWhenFailpointsSet(session, sessionDB, runInTxn, isFindAndModify) {
- let docsToInsert = [{"x": 4, "a": 3}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
- cleanupOrphanedDocs(st, ns);
-
- // Assert that the document is not updated when the delete fails
- assert.commandWorked(st.rs1.getPrimary().getDB(kDbName).adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- errorCode: ErrorCodes.WriteConflict,
- failCommands: ["delete"],
- failInternalCommands: true
- }
- }));
- if (isFindAndModify) {
- runFindAndModifyCmdFail(
- st, kDbName, session, sessionDB, runInTxn, {"x": 300}, {"$set": {"x": 30}}, false);
- } else {
- runUpdateCmdFail(st,
- kDbName,
- session,
- sessionDB,
- runInTxn,
- {"x": 300},
- {"$set": {"x": 30}},
- false,
- ErrorCodes.WriteConflict);
- }
- assert.commandWorked(st.rs1.getPrimary().getDB(kDbName).adminCommand({
- configureFailPoint: "failCommand",
- mode: "off",
- }));
-
- // Assert that the document is not updated when the insert fails
- assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- errorCode: ErrorCodes.NamespaceNotFound,
- failCommands: ["insert"],
- failInternalCommands: true
- }
- }));
- if (isFindAndModify) {
- runFindAndModifyCmdFail(
- st, kDbName, session, sessionDB, runInTxn, {"x": 300}, {"$set": {"x": 30}}, false);
- } else {
- runUpdateCmdFail(st,
- kDbName,
- session,
- sessionDB,
- runInTxn,
- {"x": 300},
- {"$set": {"x": 30}},
- false,
- ErrorCodes.NamespaceNotFound);
- }
- assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).adminCommand({
- configureFailPoint: "failCommand",
- mode: "off",
- }));
-
- // Assert that the shard key update is not committed when there are no write errors and the
- // transaction is explicity aborted.
- if (runInTxn) {
- session.startTransaction();
- if (isFindAndModify) {
- sessionDB.foo.findAndModify({query: {"x": 300}, update: {"$set": {"x": 30}}});
- } else {
- assert.commandWorked(sessionDB.foo.update({"x": 300}, {"$set": {"x": 30}}));
- }
- assert.commandWorked(session.abortTransaction_forTesting());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 300}).itcount());
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
- }
-
- mongos.getDB(kDbName).foo.drop();
- }
-
- //
- // Test that changing the shard key works correctly when either the update or findAndModify
- // command is used and when the command is run either as a retryable write or in a transaction.
- // Tuples represent [shouldRunCommandInTxn, runUpdateAsFindAndModifyCmd, isUpsert].
- //
-
- const changeShardKeyOptions = [
- [false, false, false],
- [true, false, false],
- [true, true, false],
- [false, true, false],
- [false, false, true],
- [true, false, true],
- [false, true, true],
- [true, true, true]
- ];
-
- //
- // Tests for op-style updates.
- //
-
- changeShardKeyOptions.forEach(function(updateConfig) {
- let runInTxn, isFindAndModify, upsert;
- [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
-
- jsTestLog("Testing changing the shard key using op style update and " +
- (isFindAndModify ? "findAndModify command " : "update command ") +
- (runInTxn ? "in transaction " : "as retryable write"));
-
- let session = st.s.startSession({retryWrites: runInTxn ? false : true});
- let sessionDB = session.getDatabase(kDbName);
-
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 30}}, {"$set": {"x": 600}}],
- upsert);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 30}}}, {"$set": {"x": {"a": 600}}}],
- upsert);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 30}}, {"$set": {"x": 600}}],
- upsert);
-
- // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
- // them for both upsert true and false.
- if (!upsert) {
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id": 300}, {
- "$set": {"_id": 30}
- });
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id.a": 300}, {
- "$set": {"_id": {"a": 30}}
- });
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {
- "$set": {"x": [30]}
- });
- assertCannotUnsetSKField(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {
- "$unset": {"x": 1}
- });
-
- if (!isFindAndModify) {
- assertCannotUpdateWithMultiTrue(
- st, kDbName, ns, session, sessionDB, runInTxn, {"x": 300}, {"$set": {"x": 30}});
- }
- changeShardKeyWhenFailpointsSet(session, sessionDB, runInTxn, isFindAndModify);
- }
- });
-
- //
- // Tests for replacement style updates.
- //
-
- changeShardKeyOptions.forEach(function(updateConfig) {
- let runInTxn, isFindAndModify, upsert;
- [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
-
- jsTestLog("Testing changing the shard key using replacement style update and " +
- (isFindAndModify ? "findAndModify command " : "update command ") +
- (runInTxn ? "in transaction " : "as retryable write"));
-
- let session = st.s.startSession({retryWrites: runInTxn ? false : true});
- let sessionDB = session.getDatabase(kDbName);
-
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300}, {"x": 4}],
- [{"x": 30}, {"x": 600}],
- upsert);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 30}}, {"x": {"a": 600}}],
- upsert);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 30, "y": 80}, {"x": 600, "y": 3}],
- upsert);
-
- // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
- // them for both upsert true and false.
- if (!upsert) {
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id": 300}, {
- "_id": 30
- });
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id.a": 300}, {
- "_id": {"a": 30}
- });
- if (!isFindAndModify) {
- assertCannotUpdateWithMultiTrue(
- st, kDbName, ns, session, sessionDB, runInTxn, {"x": 300}, {"x": 30});
- }
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {
- "x": [30]
- });
- assertCannotUnsetSKField(
- st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {});
- }
- });
-
- let session = st.s.startSession({retryWrites: true});
- let sessionDB = session.getDatabase(kDbName);
-
- let docsToInsert =
- [{"x": 4, "a": 3}, {"x": 78}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
-
- // ----Assert correct behavior when collection is hash sharded----
+'use strict';
- // Non-upsert case
- assertCanUpdatePrimitiveShardKeyHashedChangeShards(st, kDbName, ns, session, sessionDB, false);
- assertCanUpdatePrimitiveShardKeyHashedChangeShards(st, kDbName, ns, session, sessionDB, true);
+load("jstests/sharding/libs/update_shard_key_helpers.js");
- // ----Assert correct error when changing a doc shard key conflicts with an orphan----
+const st = new ShardingTest({mongos: 1, shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});
+const kDbName = 'db';
+const mongos = st.s0;
+const shard0 = st.shard0.shardName;
+const shard1 = st.shard1.shardName;
+const ns = kDbName + '.foo';
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
- mongos.getDB(kDbName).foo.insert({"x": 505});
-
- let _id = mongos.getDB(kDbName).foo.find({"x": 505}).toArray()[0]._id;
- assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).foo.insert({"x": 2, "_id": _id}));
-
- let res = sessionDB.foo.update({"x": 505}, {"$set": {"x": 20}});
- assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
- assert(res.getWriteError().errmsg.includes(
- "There is either an orphan for this document or _id for this collection is not globally unique."));
-
- session.startTransaction();
- res = sessionDB.foo.update({"x": 505}, {"$set": {"x": 20}});
- assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
- assert(res.errmsg.includes(
- "There is either an orphan for this document or _id for this collection is not globally unique."));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- mongos.getDB(kDbName).foo.drop();
-
- // ----Assert retryable write result has WCE when the internal commitTransaction fails----
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, shard0);
+function changeShardKeyWhenFailpointsSet(session, sessionDB, runInTxn, isFindAndModify) {
+ let docsToInsert = [{"x": 4, "a": 3}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
cleanupOrphanedDocs(st, ns);
- // Turn on failcommand fail point to fail CoordinateCommitTransaction
- assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).adminCommand({
+ // Assert that the document is not updated when the delete fails
+ assert.commandWorked(st.rs1.getPrimary().getDB(kDbName).adminCommand({
configureFailPoint: "failCommand",
mode: "alwaysOn",
data: {
- writeConcernError: {code: NumberInt(12345), errmsg: "dummy error"},
- failCommands: ["coordinateCommitTransaction"],
+ errorCode: ErrorCodes.WriteConflict,
+ failCommands: ["delete"],
failInternalCommands: true
}
}));
+ if (isFindAndModify) {
+ runFindAndModifyCmdFail(
+ st, kDbName, session, sessionDB, runInTxn, {"x": 300}, {"$set": {"x": 30}}, false);
+ } else {
+ runUpdateCmdFail(st,
+ kDbName,
+ session,
+ sessionDB,
+ runInTxn,
+ {"x": 300},
+ {"$set": {"x": 30}},
+ false,
+ ErrorCodes.WriteConflict);
+ }
+ assert.commandWorked(st.rs1.getPrimary().getDB(kDbName).adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "off",
+ }));
- res = sessionDB.foo.update({x: 4}, {$set: {x: 1000}});
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(12345, res.getWriteConcernError().code);
-
- let findAndModCmd = {
- findAndModify: 'foo',
- query: {x: 78},
- update: {$set: {x: 250}},
- lsid: {id: UUID()},
- txnNumber: NumberLong(1),
- };
- res = sessionDB.runCommand(findAndModCmd);
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.writeConcernError.code, 12345);
- assert(res.writeConcernError.errmsg.includes("dummy error"));
-
+ // Assert that the document is not updated when the insert fails
+ assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {
+ errorCode: ErrorCodes.NamespaceNotFound,
+ failCommands: ["insert"],
+ failInternalCommands: true
+ }
+ }));
+ if (isFindAndModify) {
+ runFindAndModifyCmdFail(
+ st, kDbName, session, sessionDB, runInTxn, {"x": 300}, {"$set": {"x": 30}}, false);
+ } else {
+ runUpdateCmdFail(st,
+ kDbName,
+ session,
+ sessionDB,
+ runInTxn,
+ {"x": 300},
+ {"$set": {"x": 30}},
+ false,
+ ErrorCodes.NamespaceNotFound);
+ }
assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).adminCommand({
configureFailPoint: "failCommand",
mode: "off",
}));
- mongos.getDB(kDbName).foo.drop();
+ // Assert that the shard key update is not committed when there are no write errors and the
+ // transaction is explicity aborted.
+ if (runInTxn) {
+ session.startTransaction();
+ if (isFindAndModify) {
+ sessionDB.foo.findAndModify({query: {"x": 300}, update: {"$set": {"x": 30}}});
+ } else {
+ assert.commandWorked(sessionDB.foo.update({"x": 300}, {"$set": {"x": 30}}));
+ }
+ assert.commandWorked(session.abortTransaction_forTesting());
+ assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 300}).itcount());
+ assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
+ }
- // ----Assert that updating the shard key in a batch with size > 1 fails----
+ mongos.getDB(kDbName).foo.drop();
+}
+
+//
+// Test that changing the shard key works correctly when either the update or findAndModify
+// command is used and when the command is run either as a retryable write or in a transaction.
+// Tuples represent [shouldRunCommandInTxn, runUpdateAsFindAndModifyCmd, isUpsert].
+//
+
+const changeShardKeyOptions = [
+ [false, false, false],
+ [true, false, false],
+ [true, true, false],
+ [false, true, false],
+ [false, false, true],
+ [true, false, true],
+ [false, true, true],
+ [true, true, true]
+];
+
+//
+// Tests for op-style updates.
+//
+
+changeShardKeyOptions.forEach(function(updateConfig) {
+ let runInTxn, isFindAndModify, upsert;
+ [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
+
+ jsTestLog("Testing changing the shard key using op style update and " +
+ (isFindAndModify ? "findAndModify command " : "update command ") +
+ (runInTxn ? "in transaction " : "as retryable write"));
+
+ let session = st.s.startSession({retryWrites: runInTxn ? false : true});
+ let sessionDB = session.getDatabase(kDbName);
- assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, false, true);
- assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, false, false);
+ assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 30}}, {"$set": {"x": 600}}],
+ upsert);
+ assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 30}}}, {"$set": {"x": {"a": 600}}}],
+ upsert);
+ assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 30}}, {"$set": {"x": 600}}],
+ upsert);
+
+ // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
+ // them for both upsert true and false.
+ if (!upsert) {
+ assertCannotUpdate_id(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id": 300}, {
+ "$set": {"_id": 30}
+ });
+ assertCannotUpdate_idDottedPath(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id.a": 300}, {
+ "$set": {"_id": {"a": 30}}
+ });
+ assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {
+ "$set": {"x": [30]}
+ });
+ assertCannotUnsetSKField(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {
+ "$unset": {"x": 1}
+ });
+
+ if (!isFindAndModify) {
+ assertCannotUpdateWithMultiTrue(
+ st, kDbName, ns, session, sessionDB, runInTxn, {"x": 300}, {"$set": {"x": 30}});
+ }
+ changeShardKeyWhenFailpointsSet(session, sessionDB, runInTxn, isFindAndModify);
+ }
+});
- session = st.s.startSession({retryWrites: false});
- sessionDB = session.getDatabase(kDbName);
- assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, true, true);
- assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, true, false);
+//
+// Tests for replacement style updates.
+//
- // ----Multiple writes in txn-----
+changeShardKeyOptions.forEach(function(updateConfig) {
+ let runInTxn, isFindAndModify, upsert;
+ [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
- // Update two docs, updating one twice
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
- cleanupOrphanedDocs(st, ns);
+ jsTestLog("Testing changing the shard key using replacement style update and " +
+ (isFindAndModify ? "findAndModify command " : "update command ") +
+ (runInTxn ? "in transaction " : "as retryable write"));
- session.startTransaction();
- let id = mongos.getDB(kDbName).foo.find({"x": 500}).toArray()[0]._id;
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 30}}));
- assert.commandWorked(sessionDB.foo.update({"x": 30}, {"$set": {"x": 600}}));
- assert.commandWorked(sessionDB.foo.update({"x": 4}, {"$set": {"x": 50}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+ let session = st.s.startSession({retryWrites: runInTxn ? false : true});
+ let sessionDB = session.getDatabase(kDbName);
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
- assert.eq(id, mongos.getDB(kDbName).foo.find({"x": 600}).toArray()[0]._id);
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 4}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 50}).itcount());
+ assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 30}, {"x": 600}],
+ upsert);
+ assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 30}}, {"x": {"a": 600}}],
+ upsert);
+ assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 30, "y": 80}, {"x": 600, "y": 3}],
+ upsert);
+
+ // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
+ // them for both upsert true and false.
+ if (!upsert) {
+ assertCannotUpdate_id(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id": 300}, {
+ "_id": 30
+ });
+ assertCannotUpdate_idDottedPath(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"_id.a": 300}, {
+ "_id": {"a": 30}
+ });
+ if (!isFindAndModify) {
+ assertCannotUpdateWithMultiTrue(
+ st, kDbName, ns, session, sessionDB, runInTxn, {"x": 300}, {"x": 30});
+ }
+ assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {
+ "x": [30]
+ });
+ assertCannotUnsetSKField(
+ st, kDbName, ns, session, sessionDB, runInTxn, isFindAndModify, {"x": 300}, {});
+ }
+});
- mongos.getDB(kDbName).foo.drop();
+let session = st.s.startSession({retryWrites: true});
+let sessionDB = session.getDatabase(kDbName);
- // Check that doing $inc on doc A, then updating shard key for doc A, then $inc again only incs
- // once
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
- cleanupOrphanedDocs(st, ns);
+let docsToInsert =
+ [{"x": 4, "a": 3}, {"x": 78}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
- session.startTransaction();
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 30}}));
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+// ----Assert correct behavior when collection is hash sharded----
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 30, "a": 7}).itcount());
+// Non-upsert case
+assertCanUpdatePrimitiveShardKeyHashedChangeShards(st, kDbName, ns, session, sessionDB, false);
+assertCanUpdatePrimitiveShardKeyHashedChangeShards(st, kDbName, ns, session, sessionDB, true);
- mongos.getDB(kDbName).foo.drop();
+// ----Assert correct error when changing a doc shard key conflicts with an orphan----
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
- cleanupOrphanedDocs(st, ns);
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+mongos.getDB(kDbName).foo.insert({"x": 505});
- // Insert and $inc before moving doc
- session.startTransaction();
- id = mongos.getDB(kDbName).foo.find({"x": 500}).toArray()[0]._id;
- assert.commandWorked(sessionDB.foo.insert({"x": 1, "a": 1}));
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
- sessionDB.foo.findAndModify({query: {"x": 500}, update: {$set: {"x": 20}}});
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).toArray().length);
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 20}).toArray().length);
- assert.eq(20, mongos.getDB(kDbName).foo.find({"_id": id}).toArray()[0].x);
- assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).toArray().length);
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 20, "a": 7}).toArray().length);
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 1}).toArray().length);
+let _id = mongos.getDB(kDbName).foo.find({"x": 505}).toArray()[0]._id;
+assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).foo.insert({"x": 2, "_id": _id}));
- mongos.getDB(kDbName).foo.drop();
+let res = sessionDB.foo.update({"x": 505}, {"$set": {"x": 20}});
+assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
+assert(res.getWriteError().errmsg.includes(
+ "There is either an orphan for this document or _id for this collection is not globally unique."));
- // ----Assert correct behavior when update is sent directly to a shard----
+session.startTransaction();
+res = sessionDB.foo.update({"x": 505}, {"$set": {"x": 20}});
+assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
+assert(res.errmsg.includes(
+ "There is either an orphan for this document or _id for this collection is not globally unique."));
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
- cleanupOrphanedDocs(st, ns);
+mongos.getDB(kDbName).foo.drop();
- //
- // For Op-style updates.
- //
-
- // An update sent directly to a shard cannot change the shard key.
- assert.commandFailedWithCode(
- st.rs1.getPrimary().getDB(kDbName).foo.update({"x": 500}, {$set: {"x": 2}}),
- ErrorCodes.ImmutableField);
- assert.commandFailedWithCode(st.rs1.getPrimary().getDB(kDbName).foo.update(
- {"x": 1000}, {$set: {"x": 2}}, {upsert: true}),
- ErrorCodes.ImmutableField);
- assert.commandFailedWithCode(st.rs0.getPrimary().getDB(kDbName).foo.update(
- {"x": 1000}, {$set: {"x": 2}}, {upsert: true}),
- ErrorCodes.ImmutableField);
-
- // The query will not match a doc and upsert is false, so this will not fail but will be a
- // no-op.
- res = assert.commandWorked(
- st.rs0.getPrimary().getDB(kDbName).foo.update({"x": 500}, {$set: {"x": 2}}));
- assert.eq(0, res.nMatched);
- assert.eq(0, res.nModified);
- assert.eq(0, res.nUpserted);
-
- //
- // For Replacement style updates.
- //
-
- // An update sent directly to a shard cannot change the shard key.
- assert.commandFailedWithCode(
- st.rs1.getPrimary().getDB(kDbName).foo.update({"x": 500}, {"x": 2}),
- ErrorCodes.ImmutableField);
- assert.commandFailedWithCode(
- st.rs1.getPrimary().getDB(kDbName).foo.update({"x": 1000}, {"x": 2}, {upsert: true}),
- ErrorCodes.ImmutableField);
- assert.commandFailedWithCode(
- st.rs0.getPrimary().getDB(kDbName).foo.update({"x": 1000}, {"x": 2}, {upsert: true}),
- ErrorCodes.ImmutableField);
-
- // The query will not match a doc and upsert is false, so this will not fail but will be a
- // no-op.
- res = assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).foo.update({"x": 500}, {"x": 2}));
- assert.eq(0, res.nMatched);
- assert.eq(0, res.nModified);
- assert.eq(0, res.nUpserted);
+// ----Assert retryable write result has WCE when the internal commitTransaction fails----
- mongos.getDB(kDbName).foo.drop();
-
- st.stop();
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+cleanupOrphanedDocs(st, ns);
+// Turn on failcommand fail point to fail CoordinateCommitTransaction
+assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {
+ writeConcernError: {code: NumberInt(12345), errmsg: "dummy error"},
+ failCommands: ["coordinateCommitTransaction"],
+ failInternalCommands: true
+ }
+}));
+
+res = sessionDB.foo.update({x: 4}, {$set: {x: 1000}});
+assert.commandWorkedIgnoringWriteConcernErrors(res);
+assert.eq(12345, res.getWriteConcernError().code);
+
+let findAndModCmd = {
+ findAndModify: 'foo',
+ query: {x: 78},
+ update: {$set: {x: 250}},
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(1),
+};
+res = sessionDB.runCommand(findAndModCmd);
+assert.commandWorkedIgnoringWriteConcernErrors(res);
+assert.eq(res.writeConcernError.code, 12345);
+assert(res.writeConcernError.errmsg.includes("dummy error"));
+
+assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "off",
+}));
+
+mongos.getDB(kDbName).foo.drop();
+
+// ----Assert that updating the shard key in a batch with size > 1 fails----
+
+assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, false, true);
+assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, false, false);
+
+session = st.s.startSession({retryWrites: false});
+sessionDB = session.getDatabase(kDbName);
+assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, true, true);
+assertCannotUpdateInBulkOpWhenDocsMoveShards(st, kDbName, ns, session, sessionDB, true, false);
+
+// ----Multiple writes in txn-----
+
+// Update two docs, updating one twice
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+cleanupOrphanedDocs(st, ns);
+
+session.startTransaction();
+let id = mongos.getDB(kDbName).foo.find({"x": 500}).toArray()[0]._id;
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 30}}));
+assert.commandWorked(sessionDB.foo.update({"x": 30}, {"$set": {"x": 600}}));
+assert.commandWorked(sessionDB.foo.update({"x": 4}, {"$set": {"x": 50}}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
+assert.eq(id, mongos.getDB(kDbName).foo.find({"x": 600}).toArray()[0]._id);
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 4}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 50}).itcount());
+
+mongos.getDB(kDbName).foo.drop();
+
+// Check that doing $inc on doc A, then updating shard key for doc A, then $inc again only incs
+// once
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+cleanupOrphanedDocs(st, ns);
+
+session.startTransaction();
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 30}}));
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 30, "a": 7}).itcount());
+
+mongos.getDB(kDbName).foo.drop();
+
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+cleanupOrphanedDocs(st, ns);
+
+// Insert and $inc before moving doc
+session.startTransaction();
+id = mongos.getDB(kDbName).foo.find({"x": 500}).toArray()[0]._id;
+assert.commandWorked(sessionDB.foo.insert({"x": 1, "a": 1}));
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
+sessionDB.foo.findAndModify({query: {"x": 500}, update: {$set: {"x": 20}}});
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).toArray().length);
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 20}).toArray().length);
+assert.eq(20, mongos.getDB(kDbName).foo.find({"_id": id}).toArray()[0].x);
+assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).toArray().length);
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 20, "a": 7}).toArray().length);
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 1}).toArray().length);
+
+mongos.getDB(kDbName).foo.drop();
+
+// ----Assert correct behavior when update is sent directly to a shard----
+
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+cleanupOrphanedDocs(st, ns);
+
+//
+// For Op-style updates.
+//
+
+// An update sent directly to a shard cannot change the shard key.
+assert.commandFailedWithCode(
+ st.rs1.getPrimary().getDB(kDbName).foo.update({"x": 500}, {$set: {"x": 2}}),
+ ErrorCodes.ImmutableField);
+assert.commandFailedWithCode(
+ st.rs1.getPrimary().getDB(kDbName).foo.update({"x": 1000}, {$set: {"x": 2}}, {upsert: true}),
+ ErrorCodes.ImmutableField);
+assert.commandFailedWithCode(
+ st.rs0.getPrimary().getDB(kDbName).foo.update({"x": 1000}, {$set: {"x": 2}}, {upsert: true}),
+ ErrorCodes.ImmutableField);
+
+// The query will not match a doc and upsert is false, so this will not fail but will be a
+// no-op.
+res = assert.commandWorked(
+ st.rs0.getPrimary().getDB(kDbName).foo.update({"x": 500}, {$set: {"x": 2}}));
+assert.eq(0, res.nMatched);
+assert.eq(0, res.nModified);
+assert.eq(0, res.nUpserted);
+
+//
+// For Replacement style updates.
+//
+
+// An update sent directly to a shard cannot change the shard key.
+assert.commandFailedWithCode(st.rs1.getPrimary().getDB(kDbName).foo.update({"x": 500}, {"x": 2}),
+ ErrorCodes.ImmutableField);
+assert.commandFailedWithCode(
+ st.rs1.getPrimary().getDB(kDbName).foo.update({"x": 1000}, {"x": 2}, {upsert: true}),
+ ErrorCodes.ImmutableField);
+assert.commandFailedWithCode(
+ st.rs0.getPrimary().getDB(kDbName).foo.update({"x": 1000}, {"x": 2}, {upsert: true}),
+ ErrorCodes.ImmutableField);
+
+// The query will not match a doc and upsert is false, so this will not fail but will be a
+// no-op.
+res = assert.commandWorked(st.rs0.getPrimary().getDB(kDbName).foo.update({"x": 500}, {"x": 2}));
+assert.eq(0, res.nMatched);
+assert.eq(0, res.nModified);
+assert.eq(0, res.nUpserted);
+
+mongos.getDB(kDbName).foo.drop();
+
+st.stop();
})();
diff --git a/jstests/sharding/update_shard_key_doc_on_same_shard.js b/jstests/sharding/update_shard_key_doc_on_same_shard.js
index 90133e49325..00f2aa23435 100644
--- a/jstests/sharding/update_shard_key_doc_on_same_shard.js
+++ b/jstests/sharding/update_shard_key_doc_on_same_shard.js
@@ -5,783 +5,768 @@
*/
(function() {
- 'use strict';
-
- load("jstests/sharding/libs/update_shard_key_helpers.js");
-
- const st = new ShardingTest({mongos: 1, shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});
- const kDbName = 'db';
- const ns = kDbName + '.foo';
- const mongos = st.s0;
- const shard0 = st.shard0.shardName;
- const shard1 = st.shard1.shardName;
-
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, shard0);
-
- // -----------------------------------------
- // Updates to the shard key are not allowed if write is not retryable and not in a multi-stmt
- // txn
- // -----------------------------------------
-
- let docsToInsert = [{"x": 4, "a": 3}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
-
- assert.writeError(mongos.getDB(kDbName).foo.update({"x": 300}, {"x": 600}));
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 300}).itcount());
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
-
- assert.throws(function() {
- mongos.getDB(kDbName).foo.findAndModify({query: {"x": 300}, update: {$set: {"x": 600}}});
- });
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 300}).itcount());
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
-
- mongos.getDB(kDbName).foo.drop();
-
- // ---------------------------------
- // Update shard key retryable write
- // ---------------------------------
-
- let session = st.s.startSession({retryWrites: true});
- let sessionDB = session.getDatabase(kDbName);
-
- // Modify updates
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
-
- // upsert : true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 900}, {"x": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, false, false, {"_id": 300}, {"$set": {"_id": 600}});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, false, false, {"_id.a": 300}, {
- "$set": {"_id": {"a": 600}}
- });
- assertCannotUpdateWithMultiTrue(
- st, kDbName, ns, session, sessionDB, false, {"x": 300}, {"$set": {"x": 600}});
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"$set": {"x": [300]}});
- assertCannotUnsetSKField(
- st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"$unset": {"x": 1}});
-
- // Replacement updates
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- false);
-
- // upsert : true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, false, false, {"_id": 300}, {"_id": 600});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, false, false, {"_id.a": 300}, {"_id": {"a": 600}});
- assertCannotUpdateWithMultiTrue(
- st, kDbName, ns, session, sessionDB, false, {"x": 300}, {"x": 600});
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, false, false, {"x": 300, "y": 80}, {"x": 600});
- // Shard key fields are missing in query.
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"x": 600, "y": 80, "a": 2});
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"x": [300]});
-
- // Modify style findAndModify
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
-
- // upsert : true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, false, true, {"_id": 300}, {"$set": {"_id": 600}});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, false, true, {"_id.a": 300}, {
- "$set": {"_id": {"a": 600}}
- });
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"$set": {"x": [300]}});
- assertCannotUnsetSKField(
- st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"$unset": {"x": 1}});
-
- // Replacement style findAndModify
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- false);
-
- // upsert: true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- false,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, false, true, {"_id": 300}, {"_id": 600});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, false, true, {"_id.a": 300}, {"_id": {"a": 600}});
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, false, true, {"x": 300, "y": 80}, {"x": 600});
- // Shard key fields are missing in query.
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"x": 600, "y": 80, "a": 2});
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"x": [300]});
-
- // Bulk writes retryable writes
- assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(
- st, kDbName, ns, session, sessionDB, false, false);
- assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(
- st, kDbName, ns, session, sessionDB, false, true);
-
- // ----Assert correct behavior when collection is hash sharded----
-
- assertCanUpdatePrimitiveShardKeyHashedSameShards(st, kDbName, ns, session, sessionDB, true);
-
- // ---------------------------------------
- // Update shard key in multi statement txn
- // ---------------------------------------
-
- session = st.s.startSession();
- sessionDB = session.getDatabase(kDbName);
-
- // ----Single writes in txn----
-
- // Modify updates
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
-
- // upsert : true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, true, false, {"_id": 300}, {"$set": {"_id": 600}});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, true, false, {"_id.a": 300}, {
- "$set": {"_id": {"a": 600}}
- });
- assertCannotUpdateWithMultiTrue(
- st, kDbName, ns, session, sessionDB, true, {"x": 300}, {"$set": {"x": 600}});
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"$set": {"x": [300]}});
- assertCannotUnsetSKField(
- st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"$unset": {"x": 1}});
-
- // Replacement updates
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- false);
-
- // upsert : true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- false,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, true, false, {"_id": 300}, {"_id": 600});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, true, false, {"_id.a": 300}, {"_id": {"a": 600}});
- assertCannotUpdateWithMultiTrue(
- st, kDbName, ns, session, sessionDB, true, {"x": 300}, {"x": 600});
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, true, false, {"x": 300, "y": 80}, {"x": 600});
- // Shard key fields are missing in query.
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"x": 600, "y": 80, "a": 2});
-
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"x": [300]});
-
- // Modify style findAndModify
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- false);
-
- // upsert : true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300}, {"x": 4}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x.a": 300}, {"x.a": 4}],
- [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, true, true, {"_id": 300}, {"$set": {"_id": 600}});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, true, true, {"_id.a": 300}, {
- "$set": {"_id": {"a": 600}}
- });
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"$set": {"x": [300]}});
- assertCannotUnsetSKField(
- st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"$unset": {"x": 1}});
-
- // Replacement style findAndModify
-
- // upsert : false
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- false);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- false);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- false);
-
- // upsert : true
- assertCanUpdatePrimitiveShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300}, {"x": 4}],
- [{"x": 600}, {"x": 30}],
- true);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x.a": 300}, {"x.a": 4}],
- [{"x": {"a": 600}}, {"x": {"a": 30}}],
- true);
- assertCanUpdatePartialShardKey(st,
- kDbName,
- ns,
- session,
- sessionDB,
- true,
- true,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
- true);
-
- // failing cases
- assertCannotUpdate_id(
- st, kDbName, ns, session, sessionDB, true, true, {"_id": 300}, {"_id": 600});
- assertCannotUpdate_idDottedPath(
- st, kDbName, ns, session, sessionDB, true, true, {"_id.a": 300}, {"_id": {"a": 600}});
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, true, true, {"x": 300, "y": 80}, {"x": 600});
- // Shard key fields are missing in query.
- assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
- st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"x": 600, "y": 80, "a": 2});
- assertCannotUpdateSKToArray(
- st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"x": [300]});
-
- // ----Assert correct behavior when collection is hash sharded----
-
- assertCanUpdatePrimitiveShardKeyHashedSameShards(st, kDbName, ns, session, sessionDB, true);
-
- // ----Multiple writes in txn-----
-
- // Bulk writes in txn
- assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(
- st, kDbName, ns, session, sessionDB, true, false);
- assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(
- st, kDbName, ns, session, sessionDB, true, true);
-
- // Update two docs, updating one twice
- docsToInsert = [{"x": 4, "a": 3}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
-
- session.startTransaction();
- let id = mongos.getDB(kDbName).foo.find({"x": 500}).toArray()[0]._id;
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 400}}));
- assert.commandWorked(sessionDB.foo.update({"x": 400}, {"x": 600, "_id": id}));
- assert.commandWorked(sessionDB.foo.update({"x": 4}, {"$set": {"x": 30}}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 400}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
- assert.eq(id, mongos.getDB(kDbName).foo.find({"x": 600}).toArray()[0]._id);
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 4}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
-
- mongos.getDB(kDbName).foo.drop();
-
- // Check that doing $inc on doc A, then updating shard key for doc A, then $inc again only incs
- // once
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
-
- session.startTransaction();
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 400}}));
- assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 400}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 400, "a": 7}).itcount());
-
- mongos.getDB(kDbName).foo.drop();
-
- // Check that doing findAndModify to update shard key followed by $inc works correctly
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
-
- session.startTransaction();
- sessionDB.foo.findAndModify({query: {"x": 500}, update: {$set: {"x": 600}}});
- assert.commandWorked(sessionDB.foo.update({"x": 600}, {"$inc": {"a": 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600, "a": 7}).itcount());
-
- mongos.getDB(kDbName).foo.drop();
-
- // Check that doing findAndModify followed by and update on a shard key works correctly
- shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
-
- id = mongos.getDB(kDbName).foo.find({"x": 4}).toArray()[0]._id;
- session.startTransaction();
- sessionDB.foo.findAndModify({query: {"x": 4}, update: {$set: {"x": 20}}});
- assert.commandWorked(sessionDB.foo.update({"x": 20}, {$set: {"x": 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 4}).itcount());
- assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 20}).itcount());
- assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 1}).itcount());
- assert.eq(id, mongos.getDB(kDbName).foo.find({"x": 1}).toArray()[0]._id);
-
- mongos.getDB(kDbName).foo.drop();
-
- st.stop();
-
+'use strict';
+
+load("jstests/sharding/libs/update_shard_key_helpers.js");
+
+const st = new ShardingTest({mongos: 1, shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});
+const kDbName = 'db';
+const ns = kDbName + '.foo';
+const mongos = st.s0;
+const shard0 = st.shard0.shardName;
+const shard1 = st.shard1.shardName;
+
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, shard0);
+
+// -----------------------------------------
+// Updates to the shard key are not allowed if write is not retryable and not in a multi-stmt
+// txn
+// -----------------------------------------
+
+let docsToInsert = [{"x": 4, "a": 3}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+
+assert.writeError(mongos.getDB(kDbName).foo.update({"x": 300}, {"x": 600}));
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 300}).itcount());
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
+
+assert.throws(function() {
+ mongos.getDB(kDbName).foo.findAndModify({query: {"x": 300}, update: {$set: {"x": 600}}});
+});
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 300}).itcount());
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
+
+mongos.getDB(kDbName).foo.drop();
+
+// ---------------------------------
+// Update shard key retryable write
+// ---------------------------------
+
+let session = st.s.startSession({retryWrites: true});
+let sessionDB = session.getDatabase(kDbName);
+
+// Modify updates
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+
+// upsert : true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 900}, {"x": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(
+ st, kDbName, ns, session, sessionDB, false, false, {"_id": 300}, {"$set": {"_id": 600}});
+assertCannotUpdate_idDottedPath(st, kDbName, ns, session, sessionDB, false, false, {"_id.a": 300}, {
+ "$set": {"_id": {"a": 600}}
+});
+assertCannotUpdateWithMultiTrue(
+ st, kDbName, ns, session, sessionDB, false, {"x": 300}, {"$set": {"x": 600}});
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"$set": {"x": [300]}});
+assertCannotUnsetSKField(
+ st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"$unset": {"x": 1}});
+
+// Replacement updates
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ false);
+
+// upsert : true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(
+ st, kDbName, ns, session, sessionDB, false, false, {"_id": 300}, {"_id": 600});
+assertCannotUpdate_idDottedPath(
+ st, kDbName, ns, session, sessionDB, false, false, {"_id.a": 300}, {"_id": {"a": 600}});
+assertCannotUpdateWithMultiTrue(st, kDbName, ns, session, sessionDB, false, {"x": 300}, {"x": 600});
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, false, false, {"x": 300, "y": 80}, {"x": 600});
+// Shard key fields are missing in query.
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"x": 600, "y": 80, "a": 2});
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, false, false, {"x": 300}, {"x": [300]});
+
+// Modify style findAndModify
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+
+// upsert : true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(
+ st, kDbName, ns, session, sessionDB, false, true, {"_id": 300}, {"$set": {"_id": 600}});
+assertCannotUpdate_idDottedPath(st, kDbName, ns, session, sessionDB, false, true, {"_id.a": 300}, {
+ "$set": {"_id": {"a": 600}}
+});
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"$set": {"x": [300]}});
+assertCannotUnsetSKField(
+ st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"$unset": {"x": 1}});
+
+// Replacement style findAndModify
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ false);
+
+// upsert: true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ false,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(st, kDbName, ns, session, sessionDB, false, true, {"_id": 300}, {"_id": 600});
+assertCannotUpdate_idDottedPath(
+ st, kDbName, ns, session, sessionDB, false, true, {"_id.a": 300}, {"_id": {"a": 600}});
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, false, true, {"x": 300, "y": 80}, {"x": 600});
+// Shard key fields are missing in query.
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"x": 600, "y": 80, "a": 2});
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, false, true, {"x": 300}, {"x": [300]});
+
+// Bulk writes retryable writes
+assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(st, kDbName, ns, session, sessionDB, false, false);
+assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(st, kDbName, ns, session, sessionDB, false, true);
+
+// ----Assert correct behavior when collection is hash sharded----
+
+assertCanUpdatePrimitiveShardKeyHashedSameShards(st, kDbName, ns, session, sessionDB, true);
+
+// ---------------------------------------
+// Update shard key in multi statement txn
+// ---------------------------------------
+
+session = st.s.startSession();
+sessionDB = session.getDatabase(kDbName);
+
+// ----Single writes in txn----
+
+// Modify updates
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+
+// upsert : true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(
+ st, kDbName, ns, session, sessionDB, true, false, {"_id": 300}, {"$set": {"_id": 600}});
+assertCannotUpdate_idDottedPath(st, kDbName, ns, session, sessionDB, true, false, {"_id.a": 300}, {
+ "$set": {"_id": {"a": 600}}
+});
+assertCannotUpdateWithMultiTrue(
+ st, kDbName, ns, session, sessionDB, true, {"x": 300}, {"$set": {"x": 600}});
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"$set": {"x": [300]}});
+assertCannotUnsetSKField(
+ st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"$unset": {"x": 1}});
+
+// Replacement updates
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ false);
+
+// upsert : true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ false,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(st, kDbName, ns, session, sessionDB, true, false, {"_id": 300}, {"_id": 600});
+assertCannotUpdate_idDottedPath(
+ st, kDbName, ns, session, sessionDB, true, false, {"_id.a": 300}, {"_id": {"a": 600}});
+assertCannotUpdateWithMultiTrue(st, kDbName, ns, session, sessionDB, true, {"x": 300}, {"x": 600});
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, true, false, {"x": 300, "y": 80}, {"x": 600});
+// Shard key fields are missing in query.
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"x": 600, "y": 80, "a": 2});
+
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, true, false, {"x": 300}, {"x": [300]});
+
+// Modify style findAndModify
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ false);
+
+// upsert : true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300}, {"x": 4}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"$set": {"x": {"a": 600}}}, {"$set": {"x": {"a": 30}}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"$set": {"x": 600}}, {"$set": {"x": 30}}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(
+ st, kDbName, ns, session, sessionDB, true, true, {"_id": 300}, {"$set": {"_id": 600}});
+assertCannotUpdate_idDottedPath(
+ st, kDbName, ns, session, sessionDB, true, true, {"_id.a": 300}, {"$set": {"_id": {"a": 600}}});
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"$set": {"x": [300]}});
+assertCannotUnsetSKField(
+ st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"$unset": {"x": 1}});
+
+// Replacement style findAndModify
+
+// upsert : false
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ false);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ false);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ false);
+
+// upsert : true
+assertCanUpdatePrimitiveShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300}, {"x": 4}],
+ [{"x": 600}, {"x": 30}],
+ true);
+assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x.a": 300}, {"x.a": 4}],
+ [{"x": {"a": 600}}, {"x": {"a": 30}}],
+ true);
+assertCanUpdatePartialShardKey(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ true,
+ true,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [{"x": 600, "y": 80}, {"x": 30, "y": 3}],
+ true);
+
+// failing cases
+assertCannotUpdate_id(st, kDbName, ns, session, sessionDB, true, true, {"_id": 300}, {"_id": 600});
+assertCannotUpdate_idDottedPath(
+ st, kDbName, ns, session, sessionDB, true, true, {"_id.a": 300}, {"_id": {"a": 600}});
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, true, true, {"x": 300, "y": 80}, {"x": 600});
+// Shard key fields are missing in query.
+assertCannotDoReplacementUpdateWhereShardKeyMissingFields(
+ st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"x": 600, "y": 80, "a": 2});
+assertCannotUpdateSKToArray(
+ st, kDbName, ns, session, sessionDB, true, true, {"x": 300}, {"x": [300]});
+
+// ----Assert correct behavior when collection is hash sharded----
+
+assertCanUpdatePrimitiveShardKeyHashedSameShards(st, kDbName, ns, session, sessionDB, true);
+
+// ----Multiple writes in txn-----
+
+// Bulk writes in txn
+assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(st, kDbName, ns, session, sessionDB, true, false);
+assertCanUpdateInBulkOpWhenDocsRemainOnSameShard(st, kDbName, ns, session, sessionDB, true, true);
+
+// Update two docs, updating one twice
+docsToInsert = [{"x": 4, "a": 3}, {"x": 100}, {"x": 300, "a": 3}, {"x": 500, "a": 6}];
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+
+session.startTransaction();
+let id = mongos.getDB(kDbName).foo.find({"x": 500}).toArray()[0]._id;
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 400}}));
+assert.commandWorked(sessionDB.foo.update({"x": 400}, {"x": 600, "_id": id}));
+assert.commandWorked(sessionDB.foo.update({"x": 4}, {"$set": {"x": 30}}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 400}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
+assert.eq(id, mongos.getDB(kDbName).foo.find({"x": 600}).toArray()[0]._id);
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 4}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 30}).itcount());
+
+mongos.getDB(kDbName).foo.drop();
+
+// Check that doing $inc on doc A, then updating shard key for doc A, then $inc again only incs
+// once
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+
+session.startTransaction();
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$set": {"x": 400}}));
+assert.commandWorked(sessionDB.foo.update({"x": 500}, {"$inc": {"a": 1}}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 400}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 400, "a": 7}).itcount());
+
+mongos.getDB(kDbName).foo.drop();
+
+// Check that doing findAndModify to update shard key followed by $inc works correctly
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+
+session.startTransaction();
+sessionDB.foo.findAndModify({query: {"x": 500}, update: {$set: {"x": 600}}});
+assert.commandWorked(sessionDB.foo.update({"x": 600}, {"$inc": {"a": 1}}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 500}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"a": 7}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 600, "a": 7}).itcount());
+
+mongos.getDB(kDbName).foo.drop();
+
+// Check that doing findAndModify followed by and update on a shard key works correctly
+shardCollectionMoveChunks(st, kDbName, ns, {"x": 1}, docsToInsert, {"x": 100}, {"x": 300});
+
+id = mongos.getDB(kDbName).foo.find({"x": 4}).toArray()[0]._id;
+session.startTransaction();
+sessionDB.foo.findAndModify({query: {"x": 4}, update: {$set: {"x": 20}}});
+assert.commandWorked(sessionDB.foo.update({"x": 20}, {$set: {"x": 1}}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 4}).itcount());
+assert.eq(0, mongos.getDB(kDbName).foo.find({"x": 20}).itcount());
+assert.eq(1, mongos.getDB(kDbName).foo.find({"x": 1}).itcount());
+assert.eq(id, mongos.getDB(kDbName).foo.find({"x": 1}).toArray()[0]._id);
+
+mongos.getDB(kDbName).foo.drop();
+
+st.stop();
})();
diff --git a/jstests/sharding/update_shard_key_pipeline_update.js b/jstests/sharding/update_shard_key_pipeline_update.js
index b65aefb947b..9f1ff0082e0 100644
--- a/jstests/sharding/update_shard_key_pipeline_update.js
+++ b/jstests/sharding/update_shard_key_pipeline_update.js
@@ -4,236 +4,235 @@
*/
(function() {
- 'use strict';
-
- load("jstests/sharding/libs/update_shard_key_helpers.js");
-
- const st = new ShardingTest({mongos: 1, shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});
- const kDbName = 'db';
- const mongos = st.s0;
- const shard0 = st.shard0.shardName;
- const shard1 = st.shard1.shardName;
- const ns = kDbName + '.foo';
-
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, shard0);
-
- // Tuples represent [shouldRunCommandInTxn, runUpdateAsFindAndModifyCmd, isUpsert].
- const changeShardKeyOptions = [
- [false, false, false],
- [true, false, false],
- [true, true, false],
- [false, true, false],
- [false, false, true],
- [true, false, true],
- [false, true, true],
- [true, true, true]
- ];
-
- // Test pipeline updates where the document being updated remains on the same shard.
-
- changeShardKeyOptions.forEach(function(updateConfig) {
- let runInTxn, isFindAndModify, upsert;
- [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
-
- jsTestLog("Testing changing the shard key using pipeline style update and " +
- (isFindAndModify ? "findAndModify command " : "update command ") +
- (runInTxn ? "in transaction " : "as retryable write"));
-
- let session = st.s.startSession({retryWrites: runInTxn ? false : true});
- let sessionDB = session.getDatabase(kDbName);
-
- assertCanUpdatePrimitiveShardKey(
- st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300}, {"x": 4}],
- [
- [{$set: {"x": {$multiply: ["$x", 2]}}}, {$addFields: {"z": 1}}],
- [{$set: {"x": {$multiply: ["$x", -1]}}}, {$addFields: {"z": 1}}]
- ],
- upsert,
- [{"x": 600, "z": 1}, {"x": -4, "z": 1}]);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x.a": 300}, {"x.a": 4}],
- [
- [{$set: {"x": {"a": {$multiply: ["$x.a", 2]}, "y": 1}}}],
- [{$set: {"x": {"a": {$multiply: ["$x.a", -1]}, "y": 1}}}]
- ],
- upsert,
- [{"x": {"a": 600, "y": 1}}, {"x": {"a": -4, "y": 1}}]);
- assertCanUpdatePartialShardKey(
- st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [[{$set: {"x": {$multiply: ["$x", 2]}}}], [{$set: {"x": {$multiply: ["$x", -1]}}}]],
- upsert,
- [{"x": 600}, {"x": -4}]);
-
- // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
- // them for both upsert true and false.
- if (!upsert) {
- assertCannotUpdate_id(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- {"_id": 300},
- [{$set: {"_id": {$multiply: ["$_id", 2]}}}],
- {"_id": 600});
- assertCannotUpdate_idDottedPath(st,
+'use strict';
+
+load("jstests/sharding/libs/update_shard_key_helpers.js");
+
+const st = new ShardingTest({mongos: 1, shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});
+const kDbName = 'db';
+const mongos = st.s0;
+const shard0 = st.shard0.shardName;
+const shard1 = st.shard1.shardName;
+const ns = kDbName + '.foo';
+
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+st.ensurePrimaryShard(kDbName, shard0);
+
+// Tuples represent [shouldRunCommandInTxn, runUpdateAsFindAndModifyCmd, isUpsert].
+const changeShardKeyOptions = [
+ [false, false, false],
+ [true, false, false],
+ [true, true, false],
+ [false, true, false],
+ [false, false, true],
+ [true, false, true],
+ [false, true, true],
+ [true, true, true]
+];
+
+// Test pipeline updates where the document being updated remains on the same shard.
+
+changeShardKeyOptions.forEach(function(updateConfig) {
+ let runInTxn, isFindAndModify, upsert;
+ [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
+
+ jsTestLog("Testing changing the shard key using pipeline style update and " +
+ (isFindAndModify ? "findAndModify command " : "update command ") +
+ (runInTxn ? "in transaction " : "as retryable write"));
+
+ let session = st.s.startSession({retryWrites: runInTxn ? false : true});
+ let sessionDB = session.getDatabase(kDbName);
+
+ assertCanUpdatePrimitiveShardKey(
+ st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300}, {"x": 4}],
+ [
+ [{$set: {"x": {$multiply: ["$x", 2]}}}, {$addFields: {"z": 1}}],
+ [{$set: {"x": {$multiply: ["$x", -1]}}}, {$addFields: {"z": 1}}]
+ ],
+ upsert,
+ [{"x": 600, "z": 1}, {"x": -4, "z": 1}]);
+ assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x.a": 300}, {"x.a": 4}],
+ [
+ [{$set: {"x": {"a": {$multiply: ["$x.a", 2]}, "y": 1}}}],
+ [{$set: {"x": {"a": {$multiply: ["$x.a", -1]}, "y": 1}}}]
+ ],
+ upsert,
+ [{"x": {"a": 600, "y": 1}}, {"x": {"a": -4, "y": 1}}]);
+ assertCanUpdatePartialShardKey(
+ st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [[{$set: {"x": {$multiply: ["$x", 2]}}}], [{$set: {"x": {$multiply: ["$x", -1]}}}]],
+ upsert,
+ [{"x": 600}, {"x": -4}]);
+
+ // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
+ // them for both upsert true and false.
+ if (!upsert) {
+ assertCannotUpdate_id(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ {"_id": 300},
+ [{$set: {"_id": {$multiply: ["$_id", 2]}}}],
+ {"_id": 600});
+ assertCannotUpdate_idDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ {"_id.a": 300},
+ [{$set: {"_id": {"a": {$multiply: ["$_id.a", 2]}}}}],
+ {"_id": {"a": 600}});
+ assertCannotUnsetSKFieldUsingPipeline(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ {"x": 300, "y": 80},
+ [{$project: {"y": 0}}],
+ {"x": 300, "y": 80});
+ if (!isFindAndModify) {
+ assertCannotUpdateWithMultiTrue(st,
kDbName,
ns,
session,
sessionDB,
runInTxn,
- isFindAndModify,
- {"_id.a": 300},
- [{$set: {"_id": {"a": {$multiply: ["$_id.a", 2]}}}}],
- {"_id": {"a": 600}});
- assertCannotUnsetSKFieldUsingPipeline(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- {"x": 300, "y": 80},
- [{$project: {"y": 0}}],
- {"x": 300, "y": 80});
- if (!isFindAndModify) {
- assertCannotUpdateWithMultiTrue(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- {"x": 300},
- [{$set: {"x": {$multiply: ["$x", 2]}}}],
- {"x": 600});
- }
+ {"x": 300},
+ [{$set: {"x": {$multiply: ["$x", 2]}}}],
+ {"x": 600});
}
- });
-
- // Test pipeline updates where the document being updated will move shards.
-
- changeShardKeyOptions.forEach(function(updateConfig) {
- let runInTxn, isFindAndModify, upsert;
- [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
-
- jsTestLog("Testing changing the shard key using pipeline style update and " +
- (isFindAndModify ? "findAndModify command " : "update command ") +
- (runInTxn ? "in transaction " : "as retryable write"));
-
- let session = st.s.startSession({retryWrites: runInTxn ? false : true});
- let sessionDB = session.getDatabase(kDbName);
-
- assertCanUpdatePrimitiveShardKey(
- st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300}, {"x": 4}],
- [
- [{$set: {"x": {$multiply: ["$x", -1]}}}, {$addFields: {"z": 1}}],
- [{$set: {"x": {$multiply: ["$x", 100]}}}, {$addFields: {"z": 1}}]
- ],
- upsert,
- [{"x": -300, "z": 1}, {"x": 400, "z": 1}]);
- assertCanUpdateDottedPath(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x.a": 300}, {"x.a": 4}],
- [
- [{$set: {"x": {"a": {$multiply: ["$x.a", -1]}, "y": 1}}}],
- [{$set: {"x": {"a": {$multiply: ["$x.a", 100]}, "y": 1}}}]
- ],
- upsert,
- [{"x": {"a": -300, "y": 1}}, {"x": {"a": 400, "y": 1}}]);
- assertCanUpdatePartialShardKey(
- st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
- [[{$set: {"x": {$multiply: ["$x", -1]}}}], [{$set: {"x": {$multiply: ["$x", 100]}}}]],
- upsert,
- [{"x": -300}, {"x": 400}]);
-
- // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
- // them for both upsert true and false.
- if (!upsert) {
- assertCannotUpdate_id(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- {"_id": 300},
- [{$set: {"_id": {$multiply: ["$_id", -1]}}}],
- {"_id": -300});
- assertCannotUpdate_idDottedPath(st,
+ }
+});
+
+// Test pipeline updates where the document being updated will move shards.
+
+changeShardKeyOptions.forEach(function(updateConfig) {
+ let runInTxn, isFindAndModify, upsert;
+ [runInTxn, isFindAndModify, upsert] = [updateConfig[0], updateConfig[1], updateConfig[2]];
+
+ jsTestLog("Testing changing the shard key using pipeline style update and " +
+ (isFindAndModify ? "findAndModify command " : "update command ") +
+ (runInTxn ? "in transaction " : "as retryable write"));
+
+ let session = st.s.startSession({retryWrites: runInTxn ? false : true});
+ let sessionDB = session.getDatabase(kDbName);
+
+ assertCanUpdatePrimitiveShardKey(
+ st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300}, {"x": 4}],
+ [
+ [{$set: {"x": {$multiply: ["$x", -1]}}}, {$addFields: {"z": 1}}],
+ [{$set: {"x": {$multiply: ["$x", 100]}}}, {$addFields: {"z": 1}}]
+ ],
+ upsert,
+ [{"x": -300, "z": 1}, {"x": 400, "z": 1}]);
+ assertCanUpdateDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x.a": 300}, {"x.a": 4}],
+ [
+ [{$set: {"x": {"a": {$multiply: ["$x.a", -1]}, "y": 1}}}],
+ [{$set: {"x": {"a": {$multiply: ["$x.a", 100]}, "y": 1}}}]
+ ],
+ upsert,
+ [{"x": {"a": -300, "y": 1}}, {"x": {"a": 400, "y": 1}}]);
+ assertCanUpdatePartialShardKey(
+ st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ [{"x": 300, "y": 80}, {"x": 4, "y": 3}],
+ [[{$set: {"x": {$multiply: ["$x", -1]}}}], [{$set: {"x": {$multiply: ["$x", 100]}}}]],
+ upsert,
+ [{"x": -300}, {"x": 400}]);
+
+ // Failure cases. These tests do not take 'upsert' as an option so we do not need to test
+ // them for both upsert true and false.
+ if (!upsert) {
+ assertCannotUpdate_id(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ {"_id": 300},
+ [{$set: {"_id": {$multiply: ["$_id", -1]}}}],
+ {"_id": -300});
+ assertCannotUpdate_idDottedPath(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ {"_id.a": 300},
+ [{$set: {"_id": {"a": {$multiply: ["$_id.a", -1]}}}}],
+ {"_id": {"a": -300}});
+ assertCannotUnsetSKFieldUsingPipeline(st,
+ kDbName,
+ ns,
+ session,
+ sessionDB,
+ runInTxn,
+ isFindAndModify,
+ {"x": 300, "y": 80},
+ [{$project: {"y": 0}}],
+ {"x": 300, "y": 80});
+ if (!isFindAndModify) {
+ assertCannotUpdateWithMultiTrue(st,
kDbName,
ns,
session,
sessionDB,
runInTxn,
- isFindAndModify,
- {"_id.a": 300},
- [{$set: {"_id": {"a": {$multiply: ["$_id.a", -1]}}}}],
- {"_id": {"a": -300}});
- assertCannotUnsetSKFieldUsingPipeline(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- isFindAndModify,
- {"x": 300, "y": 80},
- [{$project: {"y": 0}}],
- {"x": 300, "y": 80});
- if (!isFindAndModify) {
- assertCannotUpdateWithMultiTrue(st,
- kDbName,
- ns,
- session,
- sessionDB,
- runInTxn,
- {"x": 300},
- [{$set: {"x": {$multiply: ["$x", -1]}}}],
- {"x": -300});
- }
+ {"x": 300},
+ [{$set: {"x": {$multiply: ["$x", -1]}}}],
+ {"x": -300});
}
- });
-
- st.stop();
+ }
+});
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/update_sharded.js b/jstests/sharding/update_sharded.js
index 432329f7210..ea1939bfd72 100644
--- a/jstests/sharding/update_sharded.js
+++ b/jstests/sharding/update_sharded.js
@@ -2,110 +2,108 @@
// since shard key is immutable.
(function() {
- const s = new ShardingTest({name: "auto1", shards: 2, mongos: 1});
-
- s.adminCommand({enablesharding: "test"});
- s.ensurePrimaryShard('test', s.shard1.shardName);
-
- db = s.getDB("test");
-
- // Repeat same tests with hashed shard key, to ensure identical behavior.
- s.shardColl("update0", {key: 1}, {key: 0}, {key: 1}, db.getName(), true);
- s.adminCommand({shardcollection: "test.update1", key: {key: "hashed"}});
-
- s.shard0.getDB("admin").setLogLevel(1);
- s.shard1.getDB("admin").setLogLevel(1);
-
- for (let i = 0; i < 2; i++) {
- const collName = "update" + i;
- const hashedKey = (collName == "update1");
-
- coll = db.getCollection(collName);
- coll.insert({_id: 1, key: 1});
-
- // Replacment and Opstyle upserts.
- assert.commandWorked(coll.update({_id: 2, key: 2}, {key: 2, foo: 'bar'}, {upsert: true}));
- assert.commandWorked(coll.update({_id: 3, key: 3}, {$set: {foo: 'bar'}}, {upsert: true}));
-
- assert.eq(coll.count(), 3, "count A");
- assert.eq(coll.findOne({_id: 3}).key, 3, "findOne 3 key A");
- assert.eq(coll.findOne({_id: 3}).foo, 'bar', "findOne 3 foo A");
-
- // update existing using update()
- assert.commandWorked(coll.update({_id: 1}, {key: 1, other: 1}));
- assert.commandWorked(coll.update({_id: 2}, {key: 2, other: 2}));
- assert.commandWorked(coll.update({_id: 3}, {key: 3, other: 3}));
-
- // do a replacement-style update which queries the shard key and keeps it constant
- assert.commandWorked(coll.update({key: 4}, {_id: 4, key: 4}, {upsert: true}));
- assert.commandWorked(coll.update({key: 4}, {key: 4, other: 4}));
- assert.eq(coll.find({key: 4, other: 4}).count(), 1, 'replacement update error');
- coll.remove({_id: 4});
-
- assert.eq(coll.count(), 3, "count B");
- coll.find().forEach(function(x) {
- assert.eq(x._id, x.key, "_id == key");
- assert.eq(x._id, x.other, "_id == other");
- });
-
- assert.writeError(coll.update({_id: 1, key: 1}, {$set: {key: 2}}));
- assert.eq(coll.findOne({_id: 1}).key, 1, 'key unchanged');
-
- assert.writeOK(coll.update({_id: 1, key: 1}, {$set: {foo: 2}}));
-
- coll.update({key: 17}, {$inc: {x: 5}}, true);
- assert.eq(5, coll.findOne({key: 17}).x, "up1");
-
- coll.update({key: 18}, {$inc: {x: 5}}, true, true);
- assert.eq(5, coll.findOne({key: 18}).x, "up2");
-
- // Make sure we can extract exact _id from certain queries
- assert.writeOK(coll.update({_id: ObjectId()}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$or: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$and: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
-
- // Invalid extraction of exact _id from query
- assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
- assert.writeError(coll.update({_id: {$gt: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
- assert.writeError(coll.update(
- {$or: [{_id: ObjectId()}, {_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeError(coll.update(
- {$and: [{_id: ObjectId()}, {_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeError(coll.update({'_id.x': ObjectId()}, {$set: {x: 1}}, {multi: false}));
-
- // Make sure we can extract exact shard key from certain queries
- assert.writeOK(coll.update({key: ObjectId()}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({key: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({key: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({key: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$or: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$and: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
-
- // Invalid extraction of exact key from query
- assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
- assert.writeError(coll.update({'key.x': ObjectId()}, {$set: {x: 1}}, {multi: false}));
-
- // Inexact queries may target a single shard. Range queries may target a single shard as
- // long as the collection is not hashed.
- assert[hashedKey ? "writeError" : "writeOK"](
- coll.update({key: {$gt: 0}}, {$set: {x: 1}}, {multi: false}));
- // Note: {key:-1} and {key:-2} fall on shard0 for both hashed and ascending shardkeys.
- assert.writeOK(coll.update({$or: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({$and: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
-
- // In cases where an inexact query does target multiple shards, single update is rejected.
- assert.writeError(coll.update({key: {$gt: MinKey}}, {$set: {x: 1}}, {multi: false}));
- assert.writeError(
- coll.update({$or: [{key: -10}, {key: 10}]}, {$set: {x: 1}}, {multi: false}));
-
- // Make sure failed shard key or _id extraction doesn't affect the other
- assert.writeOK(coll.update({'_id.x': ObjectId(), key: 1}, {$set: {x: 1}}, {multi: false}));
- assert.writeOK(coll.update({_id: ObjectId(), 'key.x': 1}, {$set: {x: 1}}, {multi: false}));
- }
-
- s.stop();
-
+const s = new ShardingTest({name: "auto1", shards: 2, mongos: 1});
+
+s.adminCommand({enablesharding: "test"});
+s.ensurePrimaryShard('test', s.shard1.shardName);
+
+db = s.getDB("test");
+
+// Repeat same tests with hashed shard key, to ensure identical behavior.
+s.shardColl("update0", {key: 1}, {key: 0}, {key: 1}, db.getName(), true);
+s.adminCommand({shardcollection: "test.update1", key: {key: "hashed"}});
+
+s.shard0.getDB("admin").setLogLevel(1);
+s.shard1.getDB("admin").setLogLevel(1);
+
+for (let i = 0; i < 2; i++) {
+ const collName = "update" + i;
+ const hashedKey = (collName == "update1");
+
+ coll = db.getCollection(collName);
+ coll.insert({_id: 1, key: 1});
+
+ // Replacment and Opstyle upserts.
+ assert.commandWorked(coll.update({_id: 2, key: 2}, {key: 2, foo: 'bar'}, {upsert: true}));
+ assert.commandWorked(coll.update({_id: 3, key: 3}, {$set: {foo: 'bar'}}, {upsert: true}));
+
+ assert.eq(coll.count(), 3, "count A");
+ assert.eq(coll.findOne({_id: 3}).key, 3, "findOne 3 key A");
+ assert.eq(coll.findOne({_id: 3}).foo, 'bar', "findOne 3 foo A");
+
+ // update existing using update()
+ assert.commandWorked(coll.update({_id: 1}, {key: 1, other: 1}));
+ assert.commandWorked(coll.update({_id: 2}, {key: 2, other: 2}));
+ assert.commandWorked(coll.update({_id: 3}, {key: 3, other: 3}));
+
+ // do a replacement-style update which queries the shard key and keeps it constant
+ assert.commandWorked(coll.update({key: 4}, {_id: 4, key: 4}, {upsert: true}));
+ assert.commandWorked(coll.update({key: 4}, {key: 4, other: 4}));
+ assert.eq(coll.find({key: 4, other: 4}).count(), 1, 'replacement update error');
+ coll.remove({_id: 4});
+
+ assert.eq(coll.count(), 3, "count B");
+ coll.find().forEach(function(x) {
+ assert.eq(x._id, x.key, "_id == key");
+ assert.eq(x._id, x.other, "_id == other");
+ });
+
+ assert.writeError(coll.update({_id: 1, key: 1}, {$set: {key: 2}}));
+ assert.eq(coll.findOne({_id: 1}).key, 1, 'key unchanged');
+
+ assert.writeOK(coll.update({_id: 1, key: 1}, {$set: {foo: 2}}));
+
+ coll.update({key: 17}, {$inc: {x: 5}}, true);
+ assert.eq(5, coll.findOne({key: 17}).x, "up1");
+
+ coll.update({key: 18}, {$inc: {x: 5}}, true, true);
+ assert.eq(5, coll.findOne({key: 18}).x, "up2");
+
+ // Make sure we can extract exact _id from certain queries
+ assert.writeOK(coll.update({_id: ObjectId()}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({_id: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({_id: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$or: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$and: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({_id: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+
+ // Invalid extraction of exact _id from query
+ assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({_id: {$gt: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(
+ coll.update({$or: [{_id: ObjectId()}, {_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update(
+ {$and: [{_id: ObjectId()}, {_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({'_id.x': ObjectId()}, {$set: {x: 1}}, {multi: false}));
+
+ // Make sure we can extract exact shard key from certain queries
+ assert.writeOK(coll.update({key: ObjectId()}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({key: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({key: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({key: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$or: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$and: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+
+ // Invalid extraction of exact key from query
+ assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({'key.x': ObjectId()}, {$set: {x: 1}}, {multi: false}));
+
+ // Inexact queries may target a single shard. Range queries may target a single shard as
+ // long as the collection is not hashed.
+ assert[hashedKey ? "writeError" : "writeOK"](
+ coll.update({key: {$gt: 0}}, {$set: {x: 1}}, {multi: false}));
+ // Note: {key:-1} and {key:-2} fall on shard0 for both hashed and ascending shardkeys.
+ assert.writeOK(coll.update({$or: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$and: [{key: -1}, {key: -2}]}, {$set: {x: 1}}, {multi: false}));
+
+ // In cases where an inexact query does target multiple shards, single update is rejected.
+ assert.writeError(coll.update({key: {$gt: MinKey}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({$or: [{key: -10}, {key: 10}]}, {$set: {x: 1}}, {multi: false}));
+
+ // Make sure failed shard key or _id extraction doesn't affect the other
+ assert.writeOK(coll.update({'_id.x': ObjectId(), key: 1}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({_id: ObjectId(), 'key.x': 1}, {$set: {x: 1}}, {multi: false}));
+}
+
+s.stop();
})();
diff --git a/jstests/sharding/update_zone_key_range.js b/jstests/sharding/update_zone_key_range.js
index b4babb0f441..97826029b03 100644
--- a/jstests/sharding/update_zone_key_range.js
+++ b/jstests/sharding/update_zone_key_range.js
@@ -3,43 +3,42 @@
* in sharding_catalog_assign_key_range_to_zone_test.cpp.
*/
(function() {
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- var configDB = st.s.getDB('config');
- var shardName = configDB.shards.findOne()._id;
+var configDB = st.s.getDB('config');
+var shardName = configDB.shards.findOne()._id;
- assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: 'x'}));
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: 'x'}));
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- // Testing basic assign.
- assert.commandWorked(
- st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: 'x'}));
+// Testing basic assign.
+assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: 'x'}));
- var tagDoc = configDB.tags.findOne();
+var tagDoc = configDB.tags.findOne();
- assert.eq('test.user', tagDoc.ns);
- assert.eq({x: 0}, tagDoc.min);
- assert.eq({x: 10}, tagDoc.max);
- assert.eq('x', tagDoc.tag);
+assert.eq('test.user', tagDoc.ns);
+assert.eq({x: 0}, tagDoc.min);
+assert.eq({x: 10}, tagDoc.max);
+assert.eq('x', tagDoc.tag);
- // Cannot assign overlapping ranges
- assert.commandFailedWithCode(
- st.s.adminCommand(
- {updateZoneKeyRange: 'test.user', min: {x: -10}, max: {x: 20}, zone: 'x'}),
- ErrorCodes.RangeOverlapConflict);
+// Cannot assign overlapping ranges
+assert.commandFailedWithCode(
+ st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: -10}, max: {x: 20}, zone: 'x'}),
+ ErrorCodes.RangeOverlapConflict);
- tagDoc = configDB.tags.findOne();
- assert.eq('test.user', tagDoc.ns);
- assert.eq({x: 0}, tagDoc.min);
- assert.eq({x: 10}, tagDoc.max);
- assert.eq('x', tagDoc.tag);
+tagDoc = configDB.tags.findOne();
+assert.eq('test.user', tagDoc.ns);
+assert.eq({x: 0}, tagDoc.min);
+assert.eq({x: 10}, tagDoc.max);
+assert.eq('x', tagDoc.tag);
- // Testing basic remove.
- assert.commandWorked(st.s.adminCommand(
- {updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: null}));
+// Testing basic remove.
+assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: null}));
- assert.eq(null, configDB.tags.findOne());
+assert.eq(null, configDB.tags.findOne());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/update_zone_key_range_not_sharded.js b/jstests/sharding/update_zone_key_range_not_sharded.js
index 4f9603a33a9..211dedc8588 100644
--- a/jstests/sharding/update_zone_key_range_not_sharded.js
+++ b/jstests/sharding/update_zone_key_range_not_sharded.js
@@ -3,42 +3,41 @@
* More detailed tests can be found in sharding_catalog_assign_key_range_to_zone_test.cpp.
*/
(function() {
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- var configDB = st.s.getDB('config');
- var shardName = configDB.shards.findOne()._id;
+var configDB = st.s.getDB('config');
+var shardName = configDB.shards.findOne()._id;
- assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: 'x'}));
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: 'x'}));
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- // Testing basic assign.
- assert.commandWorked(
- st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: 'x'}));
+// Testing basic assign.
+assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: 'x'}));
- var tagDoc = configDB.tags.findOne();
+var tagDoc = configDB.tags.findOne();
- assert.eq('test.user', tagDoc.ns);
- assert.eq({x: 0}, tagDoc.min);
- assert.eq({x: 10}, tagDoc.max);
- assert.eq('x', tagDoc.tag);
+assert.eq('test.user', tagDoc.ns);
+assert.eq({x: 0}, tagDoc.min);
+assert.eq({x: 10}, tagDoc.max);
+assert.eq('x', tagDoc.tag);
- // Cannot assign overlapping ranges
- assert.commandFailedWithCode(
- st.s.adminCommand(
- {updateZoneKeyRange: 'test.user', min: {x: -10}, max: {x: 20}, zone: 'x'}),
- ErrorCodes.RangeOverlapConflict);
+// Cannot assign overlapping ranges
+assert.commandFailedWithCode(
+ st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: -10}, max: {x: 20}, zone: 'x'}),
+ ErrorCodes.RangeOverlapConflict);
- tagDoc = configDB.tags.findOne();
- assert.eq('test.user', tagDoc.ns);
- assert.eq({x: 0}, tagDoc.min);
- assert.eq({x: 10}, tagDoc.max);
- assert.eq('x', tagDoc.tag);
+tagDoc = configDB.tags.findOne();
+assert.eq('test.user', tagDoc.ns);
+assert.eq({x: 0}, tagDoc.min);
+assert.eq({x: 10}, tagDoc.max);
+assert.eq('x', tagDoc.tag);
- // Testing basic remove.
- assert.commandWorked(st.s.adminCommand(
- {updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: null}));
+// Testing basic remove.
+assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: 'test.user', min: {x: 0}, max: {x: 10}, zone: null}));
- assert.eq(null, configDB.tags.findOne());
+assert.eq(null, configDB.tags.findOne());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/upsert_sharded.js b/jstests/sharding/upsert_sharded.js
index 317b3107e09..32a59b9a586 100644
--- a/jstests/sharding/upsert_sharded.js
+++ b/jstests/sharding/upsert_sharded.js
@@ -3,111 +3,107 @@
// NOTE: Generic upsert behavior tests belong in the core suite
//
(function() {
- 'use strict';
-
- var st = new ShardingTest({shards: 2, mongos: 1});
-
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
-
- assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
-
- var upsertedResult = function(query, expr) {
- coll.remove({});
- return coll.update(query, expr, {upsert: true});
- };
-
- var upsertedField = function(query, expr, fieldName) {
- assert.writeOK(upsertedResult(query, expr));
- return coll.findOne()[fieldName];
- };
-
- var upsertedId = function(query, expr) {
- return upsertedField(query, expr, "_id");
- };
-
- var upsertedXVal = function(query, expr) {
- return upsertedField(query, expr, "x");
- };
-
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {x: 1}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 0}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: coll + "", find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
-
- st.printShardingStatus();
-
- // upserted update replacement would result in no shard key
- assert.writeError(upsertedResult({x: 1}, {}));
-
- // updates with upsert must contain shard key in query when $op style
- assert.eq(1, upsertedXVal({x: 1}, {$set: {a: 1}}));
- assert.eq(1, upsertedXVal({x: {$eq: 1}}, {$set: {a: 1}}));
- assert.eq(1, upsertedXVal({x: {$all: [1]}}, {$set: {a: 1}}));
- assert.eq(1, upsertedXVal({x: {$in: [1]}}, {$set: {a: 1}}));
- assert.eq(1, upsertedXVal({$and: [{x: {$eq: 1}}]}, {$set: {a: 1}}));
- assert.eq(1, upsertedXVal({$or: [{x: {$eq: 1}}]}, {$set: {a: 1}}));
-
- // Missing shard key in query.
- assert.commandFailedWithCode(upsertedResult({}, {$set: {a: 1, x: 1}}),
- ErrorCodes.ShardKeyNotFound);
-
- // Missing equality match on shard key in query.
- assert.commandFailedWithCode(upsertedResult({x: {$gt: 10}}, {$set: {a: 1, x: 5}}),
- ErrorCodes.ShardKeyNotFound);
-
- // Regex shard key value in query is ambigious and cannot be extracted for an equality match.
- assert.commandFailedWithCode(
- upsertedResult({x: {$eq: /abc*/}}, {$set: {a: 1, x: "regexValue"}}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(upsertedResult({x: {$eq: /abc/}}, {$set: {a: 1, x: /abc/}}),
- ErrorCodes.ShardKeyNotFound);
-
- // Shard key in query not extractable.
- assert.commandFailedWithCode(upsertedResult({x: undefined}, {$set: {a: 1}}),
- ErrorCodes.BadValue);
- assert.commandFailedWithCode(upsertedResult({x: [1, 2]}, {$set: {a: 1}}),
- ErrorCodes.ShardKeyNotFound);
- assert.commandFailedWithCode(upsertedResult({x: {$eq: {$gt: 5}}}, {$set: {a: 1}}),
- ErrorCodes.ShardKeyNotFound);
-
- // nested field extraction always fails with non-nested key - like _id, we require setting the
- // elements directly
- assert.writeError(upsertedResult({"x.x": 1}, {$set: {a: 1}}));
- assert.writeError(upsertedResult({"x.x": {$eq: 1}}, {$set: {a: 1}}));
-
- coll.drop();
-
- st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {'x.x': 1}}));
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {'x.x': 0}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: coll + "", find: {'x.x': 0}, to: st.shard1.shardName, _waitForDelete: true}));
-
- st.printShardingStatus();
-
- // nested field extraction with nested shard key
- assert.docEq({x: 1}, upsertedXVal({"x.x": 1}, {$set: {a: 1}}));
- assert.docEq({x: 1}, upsertedXVal({"x.x": {$eq: 1}}, {$set: {a: 1}}));
- assert.docEq({x: 1}, upsertedXVal({"x.x": {$all: [1]}}, {$set: {a: 1}}));
- assert.docEq({x: 1}, upsertedXVal({$and: [{"x.x": {$eq: 1}}]}, {$set: {a: 1}}));
- assert.docEq({x: 1}, upsertedXVal({$or: [{"x.x": {$eq: 1}}]}, {$set: {a: 1}}));
-
- // Can specify siblings of nested shard keys
- assert.docEq({x: 1, y: 1}, upsertedXVal({"x.x": 1, "x.y": 1}, {$set: {a: 1}}));
- assert.docEq({x: 1, y: {z: 1}}, upsertedXVal({"x.x": 1, "x.y.z": 1}, {$set: {a: 1}}));
-
- // No arrays at any level
- assert.writeError(upsertedResult({"x.x": []}, {$set: {a: 1}}));
- assert.writeError(upsertedResult({x: {x: []}}, {$set: {a: 1}}));
- assert.writeError(upsertedResult({x: [{x: 1}]}, {$set: {a: 1}}));
-
- // Can't set sub-fields of nested key
- assert.writeError(upsertedResult({"x.x.x": {$eq: 1}}, {$set: {a: 1}}));
-
- st.stop();
-
+'use strict';
+
+var st = new ShardingTest({shards: 2, mongos: 1});
+
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
+
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
+
+var upsertedResult = function(query, expr) {
+ coll.remove({});
+ return coll.update(query, expr, {upsert: true});
+};
+
+var upsertedField = function(query, expr, fieldName) {
+ assert.writeOK(upsertedResult(query, expr));
+ return coll.findOne()[fieldName];
+};
+
+var upsertedId = function(query, expr) {
+ return upsertedField(query, expr, "_id");
+};
+
+var upsertedXVal = function(query, expr) {
+ return upsertedField(query, expr, "x");
+};
+
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {x: 1}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 0}}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: coll + "", find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true}));
+
+st.printShardingStatus();
+
+// upserted update replacement would result in no shard key
+assert.writeError(upsertedResult({x: 1}, {}));
+
+// updates with upsert must contain shard key in query when $op style
+assert.eq(1, upsertedXVal({x: 1}, {$set: {a: 1}}));
+assert.eq(1, upsertedXVal({x: {$eq: 1}}, {$set: {a: 1}}));
+assert.eq(1, upsertedXVal({x: {$all: [1]}}, {$set: {a: 1}}));
+assert.eq(1, upsertedXVal({x: {$in: [1]}}, {$set: {a: 1}}));
+assert.eq(1, upsertedXVal({$and: [{x: {$eq: 1}}]}, {$set: {a: 1}}));
+assert.eq(1, upsertedXVal({$or: [{x: {$eq: 1}}]}, {$set: {a: 1}}));
+
+// Missing shard key in query.
+assert.commandFailedWithCode(upsertedResult({}, {$set: {a: 1, x: 1}}), ErrorCodes.ShardKeyNotFound);
+
+// Missing equality match on shard key in query.
+assert.commandFailedWithCode(upsertedResult({x: {$gt: 10}}, {$set: {a: 1, x: 5}}),
+ ErrorCodes.ShardKeyNotFound);
+
+// Regex shard key value in query is ambigious and cannot be extracted for an equality match.
+assert.commandFailedWithCode(upsertedResult({x: {$eq: /abc*/}}, {$set: {a: 1, x: "regexValue"}}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(upsertedResult({x: {$eq: /abc/}}, {$set: {a: 1, x: /abc/}}),
+ ErrorCodes.ShardKeyNotFound);
+
+// Shard key in query not extractable.
+assert.commandFailedWithCode(upsertedResult({x: undefined}, {$set: {a: 1}}), ErrorCodes.BadValue);
+assert.commandFailedWithCode(upsertedResult({x: [1, 2]}, {$set: {a: 1}}),
+ ErrorCodes.ShardKeyNotFound);
+assert.commandFailedWithCode(upsertedResult({x: {$eq: {$gt: 5}}}, {$set: {a: 1}}),
+ ErrorCodes.ShardKeyNotFound);
+
+// nested field extraction always fails with non-nested key - like _id, we require setting the
+// elements directly
+assert.writeError(upsertedResult({"x.x": 1}, {$set: {a: 1}}));
+assert.writeError(upsertedResult({"x.x": {$eq: 1}}, {$set: {a: 1}}));
+
+coll.drop();
+
+st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {'x.x': 1}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {'x.x': 0}}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: coll + "", find: {'x.x': 0}, to: st.shard1.shardName, _waitForDelete: true}));
+
+st.printShardingStatus();
+
+// nested field extraction with nested shard key
+assert.docEq({x: 1}, upsertedXVal({"x.x": 1}, {$set: {a: 1}}));
+assert.docEq({x: 1}, upsertedXVal({"x.x": {$eq: 1}}, {$set: {a: 1}}));
+assert.docEq({x: 1}, upsertedXVal({"x.x": {$all: [1]}}, {$set: {a: 1}}));
+assert.docEq({x: 1}, upsertedXVal({$and: [{"x.x": {$eq: 1}}]}, {$set: {a: 1}}));
+assert.docEq({x: 1}, upsertedXVal({$or: [{"x.x": {$eq: 1}}]}, {$set: {a: 1}}));
+
+// Can specify siblings of nested shard keys
+assert.docEq({x: 1, y: 1}, upsertedXVal({"x.x": 1, "x.y": 1}, {$set: {a: 1}}));
+assert.docEq({x: 1, y: {z: 1}}, upsertedXVal({"x.x": 1, "x.y.z": 1}, {$set: {a: 1}}));
+
+// No arrays at any level
+assert.writeError(upsertedResult({"x.x": []}, {$set: {a: 1}}));
+assert.writeError(upsertedResult({x: {x: []}}, {$set: {a: 1}}));
+assert.writeError(upsertedResult({x: [{x: 1}]}, {$set: {a: 1}}));
+
+// Can't set sub-fields of nested key
+assert.writeError(upsertedResult({"x.x.x": {$eq: 1}}, {$set: {a: 1}}));
+
+st.stop();
})();
diff --git a/jstests/sharding/use_rsm_data_for_cs.js b/jstests/sharding/use_rsm_data_for_cs.js
index c2fafec4889..7ae96385243 100644
--- a/jstests/sharding/use_rsm_data_for_cs.js
+++ b/jstests/sharding/use_rsm_data_for_cs.js
@@ -1,37 +1,37 @@
(function() {
- 'use strict';
+'use strict';
- // init with one shard with one node rs
- var st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
- var mongos = st.s;
- var rs = st.rs0;
+// init with one shard with one node rs
+var st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
+var mongos = st.s;
+var rs = st.rs0;
- assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
+assert.commandWorked(st.s0.adminCommand({enablesharding: "test"}));
- var db = mongos.getDB("test");
- db.foo.save({_id: 1, x: 1});
- assert.eq(db.foo.find({_id: 1}).next().x, 1);
+var db = mongos.getDB("test");
+db.foo.save({_id: 1, x: 1});
+assert.eq(db.foo.find({_id: 1}).next().x, 1);
- // prevent RSM on all nodes to update config shard
- mongos.adminCommand(
- {configureFailPoint: "failReplicaSetChangeConfigServerUpdateHook", mode: "alwaysOn"});
- rs.nodes.forEach(function(node) {
- node.adminCommand(
- {configureFailPoint: "failUpdateShardIdentityConfigString", mode: "alwaysOn"});
- });
+// prevent RSM on all nodes to update config shard
+mongos.adminCommand(
+ {configureFailPoint: "failReplicaSetChangeConfigServerUpdateHook", mode: "alwaysOn"});
+rs.nodes.forEach(function(node) {
+ node.adminCommand(
+ {configureFailPoint: "failUpdateShardIdentityConfigString", mode: "alwaysOn"});
+});
- // add a node to shard rs
- rs.add({'shardsvr': ''});
- rs.reInitiate();
- rs.awaitSecondaryNodes();
+// add a node to shard rs
+rs.add({'shardsvr': ''});
+rs.reInitiate();
+rs.awaitSecondaryNodes();
- jsTest.log("Reload ShardRegistry");
- // force SR reload with flushRouterConfig
- mongos.getDB("admin").runCommand({flushRouterConfig: 1});
+jsTest.log("Reload ShardRegistry");
+// force SR reload with flushRouterConfig
+mongos.getDB("admin").runCommand({flushRouterConfig: 1});
- // issue a read from mongos with secondaryOnly read preference to force it use just added node
- jsTest.log("Issue find");
- assert.eq(db.foo.find({_id: 1}).readPref('secondary').next().x, 1);
+// issue a read from mongos with secondaryOnly read preference to force it use just added node
+jsTest.log("Issue find");
+assert.eq(db.foo.find({_id: 1}).readPref('secondary').next().x, 1);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js b/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js
index fb610a68925..2d6b4c57020 100644
--- a/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js
+++ b/jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js
@@ -3,39 +3,39 @@
* persists it in config.collections.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/uuid_util.js");
+load("jstests/libs/uuid_util.js");
- let db = "test";
+let db = "test";
- let st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}, other: {config: 3}});
+let st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}, other: {config: 3}});
- assert.commandWorked(st.s.adminCommand({enableSharding: db}));
- st.ensurePrimaryShard(db, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({enableSharding: db}));
+st.ensurePrimaryShard(db, st.shard0.shardName);
- // Check that shardCollection propagates and persists UUIDs.
- for (let i = 0; i < 3; i++) {
- let coll = "bar" + i;
- let nss = db + "." + coll;
+// Check that shardCollection propagates and persists UUIDs.
+for (let i = 0; i < 3; i++) {
+ let coll = "bar" + i;
+ let nss = db + "." + coll;
- // It shouldn't matter whether the collection existed on the shard already or not; test
- // both cases.
- if (i === 0) {
- assert.writeOK(st.s.getDB(db).getCollection(coll).insert({x: 1}));
- }
+ // It shouldn't matter whether the collection existed on the shard already or not; test
+ // both cases.
+ if (i === 0) {
+ assert.writeOK(st.s.getDB(db).getCollection(coll).insert({x: 1}));
+ }
- assert.commandWorked(st.s.adminCommand({shardCollection: nss, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: nss, key: {_id: 1}}));
- // Check that the entry for the collection in config.collections has a uuid field.
- let collEntryUUID = getUUIDFromConfigCollections(st.s, nss);
- assert.neq(undefined, collEntryUUID);
+ // Check that the entry for the collection in config.collections has a uuid field.
+ let collEntryUUID = getUUIDFromConfigCollections(st.s, nss);
+ assert.neq(undefined, collEntryUUID);
- // Check that the uuid field in the config.collections entry matches the uuid on the shard.
- let listCollsUUID = getUUIDFromListCollections(st.shard0.getDB(db), coll);
- assert.neq(undefined, listCollsUUID);
- assert.eq(listCollsUUID, collEntryUUID);
- }
+ // Check that the uuid field in the config.collections entry matches the uuid on the shard.
+ let listCollsUUID = getUUIDFromListCollections(st.shard0.getDB(db), coll);
+ assert.neq(undefined, listCollsUUID);
+ assert.eq(listCollsUUID, collEntryUUID);
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/uuid_propagated_to_recipient_shard_on_recvChunkStart.js b/jstests/sharding/uuid_propagated_to_recipient_shard_on_recvChunkStart.js
index 94ac86dbb97..e5dde50d8bb 100644
--- a/jstests/sharding/uuid_propagated_to_recipient_shard_on_recvChunkStart.js
+++ b/jstests/sharding/uuid_propagated_to_recipient_shard_on_recvChunkStart.js
@@ -3,44 +3,43 @@
* collection on itself as part of a migration.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/uuid_util.js");
+load("jstests/libs/uuid_util.js");
- let db = "test";
- let coll = "foo";
- let nss = db + "." + coll;
+let db = "test";
+let coll = "foo";
+let nss = db + "." + coll;
- let st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}, other: {config: 3}});
+let st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}, other: {config: 3}});
- let donor = st.shard0;
- let recipient = st.shard1;
+let donor = st.shard0;
+let recipient = st.shard1;
- let setUp = function() {
- assert.commandWorked(st.s.adminCommand({enableSharding: db}));
- st.ensurePrimaryShard(db, donor.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: nss, key: {_id: 1}}));
- };
+let setUp = function() {
+ assert.commandWorked(st.s.adminCommand({enableSharding: db}));
+ st.ensurePrimaryShard(db, donor.shardName);
+ assert.commandWorked(st.s.adminCommand({shardCollection: nss, key: {_id: 1}}));
+};
- // Check that the recipient accepts the chunk and uses the UUID from the recipient when creating
- // the collection.
+// Check that the recipient accepts the chunk and uses the UUID from the recipient when creating
+// the collection.
- setUp();
- assert.commandWorked(
- st.s.adminCommand({moveChunk: nss, find: {_id: 0}, to: recipient.shardName}));
+setUp();
+assert.commandWorked(st.s.adminCommand({moveChunk: nss, find: {_id: 0}, to: recipient.shardName}));
- let donorUUID = getUUIDFromListCollections(donor.getDB(db), coll);
- assert.neq(undefined, donorUUID);
+let donorUUID = getUUIDFromListCollections(donor.getDB(db), coll);
+assert.neq(undefined, donorUUID);
- let recipientUUID = getUUIDFromListCollections(recipient.getDB(db), coll);
- assert.neq(undefined, recipientUUID);
+let recipientUUID = getUUIDFromListCollections(recipient.getDB(db), coll);
+assert.neq(undefined, recipientUUID);
- assert.eq(donorUUID, recipientUUID);
+assert.eq(donorUUID, recipientUUID);
- // Sanity check that the UUID in config.collections matches the donor's and recipient's UUIDs.
- let collEntryUUID = getUUIDFromConfigCollections(st.s, nss);
- assert.neq(undefined, collEntryUUID);
- assert.eq(donorUUID, collEntryUUID);
+// Sanity check that the UUID in config.collections matches the donor's and recipient's UUIDs.
+let collEntryUUID = getUUIDFromConfigCollections(st.s, nss);
+assert.neq(undefined, collEntryUUID);
+assert.eq(donorUUID, collEntryUUID);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/validate_collection.js b/jstests/sharding/validate_collection.js
index b67a42df6ea..0584c2a8c63 100644
--- a/jstests/sharding/validate_collection.js
+++ b/jstests/sharding/validate_collection.js
@@ -11,76 +11,75 @@
// 4. The previous scenario, but with validation legitimately failing on one of the shards.
(function() {
- const NUM_SHARDS = 3;
- assert(NUM_SHARDS >= 3);
+const NUM_SHARDS = 3;
+assert(NUM_SHARDS >= 3);
- var st = new ShardingTest({shards: NUM_SHARDS});
- var s = st.s;
- var testDb = st.getDB('test');
+var st = new ShardingTest({shards: NUM_SHARDS});
+var s = st.s;
+var testDb = st.getDB('test');
- function setup() {
- assert.writeOK(testDb.test.insert({_id: 0}));
- assert.writeOK(testDb.test.insert({_id: 1}));
+function setup() {
+ assert.writeOK(testDb.test.insert({_id: 0}));
+ assert.writeOK(testDb.test.insert({_id: 1}));
- assert.writeOK(testDb.dummy.insert({_id: 0}));
- assert.writeOK(testDb.dummy.insert({_id: 1}));
- assert.writeOK(testDb.dummy.insert({_id: 2}));
- }
-
- function validate(valid) {
- var res = testDb.runCommand({validate: 'test'});
- assert.commandWorked(res);
- assert.eq(res.valid, valid, tojson(res));
- }
+ assert.writeOK(testDb.dummy.insert({_id: 0}));
+ assert.writeOK(testDb.dummy.insert({_id: 1}));
+ assert.writeOK(testDb.dummy.insert({_id: 2}));
+}
- function setFailValidateFailPointOnShard(enabled, shard) {
- var mode;
- if (enabled) {
- mode = 'alwaysOn';
- } else {
- mode = 'off';
- }
+function validate(valid) {
+ var res = testDb.runCommand({validate: 'test'});
+ assert.commandWorked(res);
+ assert.eq(res.valid, valid, tojson(res));
+}
- var res =
- shard.adminCommand({configureFailPoint: 'validateCmdCollectionNotValid', mode: mode});
- assert.commandWorked(res);
+function setFailValidateFailPointOnShard(enabled, shard) {
+ var mode;
+ if (enabled) {
+ mode = 'alwaysOn';
+ } else {
+ mode = 'off';
}
- setup();
+ var res = shard.adminCommand({configureFailPoint: 'validateCmdCollectionNotValid', mode: mode});
+ assert.commandWorked(res);
+}
- // 1. Collection in an unsharded DB.
- validate(true);
+setup();
+
+// 1. Collection in an unsharded DB.
+validate(true);
- // 2. Sharded collection in a DB.
- assert.commandWorked(s.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard0.shardName);
- assert.commandWorked(s.adminCommand({shardCollection: 'test.test', key: {_id: 1}}));
- assert.commandWorked(s.adminCommand({shardCollection: 'test.dummy', key: {_id: 1}}));
- validate(true);
+// 2. Sharded collection in a DB.
+assert.commandWorked(s.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard0.shardName);
+assert.commandWorked(s.adminCommand({shardCollection: 'test.test', key: {_id: 1}}));
+assert.commandWorked(s.adminCommand({shardCollection: 'test.dummy', key: {_id: 1}}));
+validate(true);
- // 3. Sharded collection with chunks on two shards.
- st.ensurePrimaryShard('test', st.shard0.shardName);
- assert.commandWorked(s.adminCommand({split: 'test.test', middle: {_id: 1}}));
- assert.commandWorked(
- testDb.adminCommand({moveChunk: 'test.test', find: {_id: 1}, to: st.shard1.shardName}));
- // We move the dummy database to NUM_SHARDS shards so that testDb will exist on all NUM_SHARDS
- // shards but the testDb.test collection will only exist on the first two shards. Prior to
- // SERVER-22588, this scenario would cause validation to fail.
- assert.commandWorked(s.adminCommand({split: 'test.dummy', middle: {_id: 1}}));
- assert.commandWorked(s.adminCommand({split: 'test.dummy', middle: {_id: 2}}));
- assert.commandWorked(
- testDb.adminCommand({moveChunk: 'test.dummy', find: {_id: 1}, to: st.shard1.shardName}));
- assert.commandWorked(
- testDb.adminCommand({moveChunk: 'test.dummy', find: {_id: 2}, to: st.shard2.shardName}));
- assert.eq(st.onNumShards('test'), 2);
- assert.eq(st.onNumShards('dummy'), NUM_SHARDS);
- validate(true);
+// 3. Sharded collection with chunks on two shards.
+st.ensurePrimaryShard('test', st.shard0.shardName);
+assert.commandWorked(s.adminCommand({split: 'test.test', middle: {_id: 1}}));
+assert.commandWorked(
+ testDb.adminCommand({moveChunk: 'test.test', find: {_id: 1}, to: st.shard1.shardName}));
+// We move the dummy database to NUM_SHARDS shards so that testDb will exist on all NUM_SHARDS
+// shards but the testDb.test collection will only exist on the first two shards. Prior to
+// SERVER-22588, this scenario would cause validation to fail.
+assert.commandWorked(s.adminCommand({split: 'test.dummy', middle: {_id: 1}}));
+assert.commandWorked(s.adminCommand({split: 'test.dummy', middle: {_id: 2}}));
+assert.commandWorked(
+ testDb.adminCommand({moveChunk: 'test.dummy', find: {_id: 1}, to: st.shard1.shardName}));
+assert.commandWorked(
+ testDb.adminCommand({moveChunk: 'test.dummy', find: {_id: 2}, to: st.shard2.shardName}));
+assert.eq(st.onNumShards('test'), 2);
+assert.eq(st.onNumShards('dummy'), NUM_SHARDS);
+validate(true);
- // 4. Fail validation on one of the shards.
- var primaryShard = st.getPrimaryShard('test');
- setFailValidateFailPointOnShard(true, primaryShard);
- validate(false);
- setFailValidateFailPointOnShard(false, primaryShard);
+// 4. Fail validation on one of the shards.
+var primaryShard = st.getPrimaryShard('test');
+setFailValidateFailPointOnShard(true, primaryShard);
+validate(false);
+setFailValidateFailPointOnShard(false, primaryShard);
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/verify_sessions_expiration_sharded.js b/jstests/sharding/verify_sessions_expiration_sharded.js
index fe743f147d6..96cd020aadd 100644
--- a/jstests/sharding/verify_sessions_expiration_sharded.js
+++ b/jstests/sharding/verify_sessions_expiration_sharded.js
@@ -16,135 +16,139 @@
// @tags: [requires_find_command]
(function() {
- "use strict";
-
- // This test makes assertions about the number of logical session records.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
-
- const refresh = {refreshLogicalSessionCacheNow: 1};
- const startSession = {startSession: 1};
- const failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
-
- function refreshSessionsAndVerifyCount(mongosConfig, shardConfig, expectedCount) {
- mongosConfig.runCommand(refresh);
- shardConfig.runCommand(refresh);
-
- assert.eq(mongosConfig.system.sessions.count(), expectedCount);
- }
-
- function verifyOpenCursorCount(db, expectedCount) {
- assert.eq(db.serverStatus().metrics.cursor.open.total, expectedCount);
- }
-
- function getSessions(config) {
- return config.system.sessions.aggregate([{'$listSessions': {allUsers: true}}]).toArray();
- }
-
- const dbName = "test";
- const testCollName = "verify_sessions_find_get_more";
-
- let shardingTest = new ShardingTest({
- shards: 1,
- });
-
- let mongos = shardingTest.s;
- let db = mongos.getDB(dbName);
- let mongosConfig = mongos.getDB("config");
- let shardConfig = shardingTest.rs0.getPrimary().getDB("config");
-
- // 1. Verify that sessions expire from config.system.sessions after the timeout has passed.
- for (let i = 0; i < 5; i++) {
- let res = db.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
- }
- refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 5);
-
- // Manually delete entries in config.system.sessions to simulate TTL expiration.
- assert.commandWorked(mongosConfig.system.sessions.remove({}));
- refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 0);
-
- // 2. Verify that getMores after finds will update the 'lastUse' field on documents in the
- // config.system.sessions collection.
- for (let i = 0; i < 10; i++) {
- db[testCollName].insert({_id: i, a: i, b: 1});
- }
-
- let cursors = [];
- for (let i = 0; i < 5; i++) {
- let session = mongos.startSession({});
- assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
- "initialize the session");
- cursors.push(session.getDatabase(dbName)[testCollName].find({b: 1}).batchSize(1));
- assert(cursors[i].hasNext());
+"use strict";
+
+// This test makes assertions about the number of logical session records.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
+
+const refresh = {
+ refreshLogicalSessionCacheNow: 1
+};
+const startSession = {
+ startSession: 1
+};
+const failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
+
+function refreshSessionsAndVerifyCount(mongosConfig, shardConfig, expectedCount) {
+ mongosConfig.runCommand(refresh);
+ shardConfig.runCommand(refresh);
+
+ assert.eq(mongosConfig.system.sessions.count(), expectedCount);
+}
+
+function verifyOpenCursorCount(db, expectedCount) {
+ assert.eq(db.serverStatus().metrics.cursor.open.total, expectedCount);
+}
+
+function getSessions(config) {
+ return config.system.sessions.aggregate([{'$listSessions': {allUsers: true}}]).toArray();
+}
+
+const dbName = "test";
+const testCollName = "verify_sessions_find_get_more";
+
+let shardingTest = new ShardingTest({
+ shards: 1,
+});
+
+let mongos = shardingTest.s;
+let db = mongos.getDB(dbName);
+let mongosConfig = mongos.getDB("config");
+let shardConfig = shardingTest.rs0.getPrimary().getDB("config");
+
+// 1. Verify that sessions expire from config.system.sessions after the timeout has passed.
+for (let i = 0; i < 5; i++) {
+ let res = db.runCommand(startSession);
+ assert.commandWorked(res, "unable to start session");
+}
+refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 5);
+
+// Manually delete entries in config.system.sessions to simulate TTL expiration.
+assert.commandWorked(mongosConfig.system.sessions.remove({}));
+refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 0);
+
+// 2. Verify that getMores after finds will update the 'lastUse' field on documents in the
+// config.system.sessions collection.
+for (let i = 0; i < 10; i++) {
+ db[testCollName].insert({_id: i, a: i, b: 1});
+}
+
+let cursors = [];
+for (let i = 0; i < 5; i++) {
+ let session = mongos.startSession({});
+ assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
+ "initialize the session");
+ cursors.push(session.getDatabase(dbName)[testCollName].find({b: 1}).batchSize(1));
+ assert(cursors[i].hasNext());
+}
+
+refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 5);
+verifyOpenCursorCount(mongosConfig, 5);
+
+let sessionsCollectionArray;
+let lastUseValues = [];
+for (let i = 0; i < 3; i++) {
+ for (let j = 0; j < cursors.length; j++) {
+ cursors[j].next();
}
refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 5);
verifyOpenCursorCount(mongosConfig, 5);
- let sessionsCollectionArray;
- let lastUseValues = [];
- for (let i = 0; i < 3; i++) {
- for (let j = 0; j < cursors.length; j++) {
- cursors[j].next();
- }
-
- refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 5);
- verifyOpenCursorCount(mongosConfig, 5);
-
- sessionsCollectionArray = getSessions(mongosConfig);
+ sessionsCollectionArray = getSessions(mongosConfig);
- if (i == 0) {
- for (let j = 0; j < sessionsCollectionArray.length; j++) {
- lastUseValues.push(sessionsCollectionArray[j].lastUse);
- }
- } else {
- for (let j = 0; j < sessionsCollectionArray.length; j++) {
- assert.gt(sessionsCollectionArray[j].lastUse, lastUseValues[j]);
- lastUseValues[j] = sessionsCollectionArray[j].lastUse;
- }
+ if (i == 0) {
+ for (let j = 0; j < sessionsCollectionArray.length; j++) {
+ lastUseValues.push(sessionsCollectionArray[j].lastUse);
+ }
+ } else {
+ for (let j = 0; j < sessionsCollectionArray.length; j++) {
+ assert.gt(sessionsCollectionArray[j].lastUse, lastUseValues[j]);
+ lastUseValues[j] = sessionsCollectionArray[j].lastUse;
}
}
-
- // 3. Verify that letting sessions expire (simulated by manual deletion) will kill their
- // cursors.
- assert.commandWorked(mongosConfig.system.sessions.remove({}));
-
- refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 0);
- verifyOpenCursorCount(mongosConfig, 0);
-
- for (let i = 0; i < cursors.length; i++) {
- assert.commandFailedWithCode(
- db.runCommand({getMore: cursors[i]._cursor._cursorid, collection: testCollName}),
- ErrorCodes.CursorNotFound,
- 'expected getMore to fail because the cursor was killed');
- }
-
- // 4. Verify that an expired session (simulated by manual deletion) that has a currently
- // running operation will be vivified during the logical session cache refresh.
- let pinnedCursorSession = mongos.startSession();
- let pinnedCursorDB = pinnedCursorSession.getDatabase(dbName);
-
- withPinnedCursor({
- conn: mongos,
- sessionId: pinnedCursorSession,
- db: pinnedCursorDB,
- assertFunction: (cursorId, coll) => {
- assert.commandWorked(mongosConfig.system.sessions.remove({}));
- verifyOpenCursorCount(mongosConfig, 1);
-
- refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 1);
-
- let db = coll.getDB();
- assert.commandWorked(db.runCommand({killCursors: coll.getName(), cursors: [cursorId]}));
- },
- runGetMoreFunc: () => {
- db.runCommand({getMore: cursorId, collection: collName, lsid: sessionId});
- },
- failPointName: failPointName
+}
+
+// 3. Verify that letting sessions expire (simulated by manual deletion) will kill their
+// cursors.
+assert.commandWorked(mongosConfig.system.sessions.remove({}));
+
+refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 0);
+verifyOpenCursorCount(mongosConfig, 0);
+
+for (let i = 0; i < cursors.length; i++) {
+ assert.commandFailedWithCode(
+ db.runCommand({getMore: cursors[i]._cursor._cursorid, collection: testCollName}),
+ ErrorCodes.CursorNotFound,
+ 'expected getMore to fail because the cursor was killed');
+}
+
+// 4. Verify that an expired session (simulated by manual deletion) that has a currently
+// running operation will be vivified during the logical session cache refresh.
+let pinnedCursorSession = mongos.startSession();
+let pinnedCursorDB = pinnedCursorSession.getDatabase(dbName);
+
+withPinnedCursor({
+ conn: mongos,
+ sessionId: pinnedCursorSession,
+ db: pinnedCursorDB,
+ assertFunction: (cursorId, coll) => {
+ assert.commandWorked(mongosConfig.system.sessions.remove({}));
+ verifyOpenCursorCount(mongosConfig, 1);
+
+ refreshSessionsAndVerifyCount(mongosConfig, shardConfig, 1);
+
+ let db = coll.getDB();
+ assert.commandWorked(db.runCommand({killCursors: coll.getName(), cursors: [cursorId]}));
+ },
+ runGetMoreFunc: () => {
+ db.runCommand({getMore: cursorId, collection: collName, lsid: sessionId});
},
- /* assertEndCounts */ false);
+ failPointName: failPointName
+},
+ /* assertEndCounts */ false);
- shardingTest.stop();
+shardingTest.stop();
})();
diff --git a/jstests/sharding/version1.js b/jstests/sharding/version1.js
index c8a361f72ed..22314505a9a 100644
--- a/jstests/sharding/version1.js
+++ b/jstests/sharding/version1.js
@@ -1,93 +1,89 @@
(function() {
- var s = new ShardingTest({name: "version1", shards: 1});
-
- s.adminCommand({enablesharding: "alleyinsider"});
- s.adminCommand({shardcollection: "alleyinsider.foo", key: {num: 1}});
-
- // alleyinsider.foo is supposed to have one chunk, version 1|0, in shard000
- s.printShardingStatus();
-
- a = s._connections[0].getDB("admin");
-
- assert.commandFailed(
- a.runCommand({setShardVersion: "alleyinsider.foo", configdb: s._configDB}));
-
- assert.commandFailed(
- a.runCommand({setShardVersion: "alleyinsider.foo", configdb: s._configDB, version: "a"}));
-
- assert.commandFailed(a.runCommand(
- {setShardVersion: "alleyinsider.foo", configdb: s._configDB, authoritative: true}));
-
- assert.commandFailed(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(2, 0)
- }),
- "should have failed b/c no auth");
-
- assert.commandFailed(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(2, 0),
- authoritative: true
- }),
- "should have failed because first setShardVersion needs shard info");
-
- assert.commandFailed(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(2, 0),
- authoritative: true,
- shard: "s.shard0.shardName",
- shardHost: s.s.host
- }),
- "should have failed because version is config is 1|0");
-
- var epoch = s.getDB('config').chunks.findOne({"ns": "alleyinsider.foo"}).lastmodEpoch;
- assert.commandWorked(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(1, 0),
- versionEpoch: epoch,
- authoritative: true,
- shard: s.shard0.shardName,
- shardHost: s.s.host
- }),
- "should have worked");
-
- assert.commandFailed(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: "a",
- version: new Timestamp(0, 2),
- versionEpoch: epoch
- }));
-
- assert.commandFailed(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(0, 2),
- versionEpoch: epoch
- }));
-
- assert.commandFailed(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(0, 1),
- versionEpoch: epoch
- }));
-
- // the only way that setSharVersion passes is if the shard agrees with the version
- // the shard takes its version from config directly
- // TODO bump timestamps in config
- // assert.eq( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB ,
- // version : 3 } ).oldVersion.i , 2 , "oldVersion" );
-
- // assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).mine.i , 3 , "my get
- // version A" );
- // assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).global.i , 3 , "my get
- // version B" );
-
- s.stop();
-
+var s = new ShardingTest({name: "version1", shards: 1});
+
+s.adminCommand({enablesharding: "alleyinsider"});
+s.adminCommand({shardcollection: "alleyinsider.foo", key: {num: 1}});
+
+// alleyinsider.foo is supposed to have one chunk, version 1|0, in shard000
+s.printShardingStatus();
+
+a = s._connections[0].getDB("admin");
+
+assert.commandFailed(a.runCommand({setShardVersion: "alleyinsider.foo", configdb: s._configDB}));
+
+assert.commandFailed(
+ a.runCommand({setShardVersion: "alleyinsider.foo", configdb: s._configDB, version: "a"}));
+
+assert.commandFailed(a.runCommand(
+ {setShardVersion: "alleyinsider.foo", configdb: s._configDB, authoritative: true}));
+
+assert.commandFailed(
+ a.runCommand(
+ {setShardVersion: "alleyinsider.foo", configdb: s._configDB, version: new Timestamp(2, 0)}),
+ "should have failed b/c no auth");
+
+assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(2, 0),
+ authoritative: true
+}),
+ "should have failed because first setShardVersion needs shard info");
+
+assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(2, 0),
+ authoritative: true,
+ shard: "s.shard0.shardName",
+ shardHost: s.s.host
+}),
+ "should have failed because version is config is 1|0");
+
+var epoch = s.getDB('config').chunks.findOne({"ns": "alleyinsider.foo"}).lastmodEpoch;
+assert.commandWorked(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(1, 0),
+ versionEpoch: epoch,
+ authoritative: true,
+ shard: s.shard0.shardName,
+ shardHost: s.s.host
+}),
+ "should have worked");
+
+assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: "a",
+ version: new Timestamp(0, 2),
+ versionEpoch: epoch
+}));
+
+assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(0, 2),
+ versionEpoch: epoch
+}));
+
+assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(0, 1),
+ versionEpoch: epoch
+}));
+
+// the only way that setSharVersion passes is if the shard agrees with the version
+// the shard takes its version from config directly
+// TODO bump timestamps in config
+// assert.eq( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB ,
+// version : 3 } ).oldVersion.i , 2 , "oldVersion" );
+
+// assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).mine.i , 3 , "my get
+// version A" );
+// assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).global.i , 3 , "my get
+// version B" );
+
+s.stop();
})();
diff --git a/jstests/sharding/version2.js b/jstests/sharding/version2.js
index 89b919f7ce4..28d22c5f861 100644
--- a/jstests/sharding/version2.js
+++ b/jstests/sharding/version2.js
@@ -1,79 +1,73 @@
(function() {
- 'use strict';
-
- var s = new ShardingTest({name: "version2", shards: 1});
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: "alleyinsider"}));
- assert.commandWorked(s.s0.adminCommand({shardcollection: "alleyinsider.foo", key: {num: 1}}));
- assert.commandWorked(s.s0.adminCommand({shardcollection: "alleyinsider.bar", key: {num: 1}}));
-
- var a = s._connections[0].getDB("admin");
-
- // Setup from one client
- assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.i,
- 0);
- assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.i,
- 0);
-
- var fooEpoch = s.getDB('config').chunks.findOne({ns: 'alleyinsider.foo'}).lastmodEpoch;
- assert.commandWorked(a.runCommand({
- setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- authoritative: true,
- version: new Timestamp(1, 0),
- versionEpoch: fooEpoch,
- shard: s.shard0.shardName,
- shardHost: s.s.host,
- }));
-
- printjson(s.config.chunks.findOne());
-
- assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.t,
- 1);
- assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.t,
- 1);
-
- // From a different client
- var a2 = connect(`mongodb://${s.rs0.getPrimary().name}/admin`);
-
- assert.eq(
- a2.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.t,
- 1,
- "a2 global 1");
- assert.eq(a2.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.i,
- 0,
- "a2 mine 1");
-
- function simpleFindOne() {
- return a2.getMongo().getDB("alleyinsider").foo.findOne();
- }
-
- var barEpoch = s.getDB('config').chunks.findOne({ns: 'alleyinsider.bar'}).lastmodEpoch;
- assert.commandWorked(a2.runCommand({
- setShardVersion: "alleyinsider.bar",
- configdb: s._configDB,
- version: new Timestamp(1, 0),
- versionEpoch: barEpoch,
- shard: s.shard0.shardName,
- authoritative: true
- }),
- "setShardVersion bar temp");
-
- assert.throws(simpleFindOne, [], "should complain about not in sharded mode 1");
-
- // the only way that setSharVersion passes is if the shard agrees with the version
- // the shard takes its version from config directly
- // TODO bump timestamps in config
- // assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version:
- // 2 }).ok == 1, "setShardVersion a2-1");
-
- // simpleFindOne(); // now should run ok
-
- // assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version:
- // 3 }).ok == 1, "setShardVersion a2-2");
-
- // simpleFindOne(); // newer version is ok
-
- s.stop();
+'use strict';
+var s = new ShardingTest({name: "version2", shards: 1});
+
+assert.commandWorked(s.s0.adminCommand({enablesharding: "alleyinsider"}));
+assert.commandWorked(s.s0.adminCommand({shardcollection: "alleyinsider.foo", key: {num: 1}}));
+assert.commandWorked(s.s0.adminCommand({shardcollection: "alleyinsider.bar", key: {num: 1}}));
+
+var a = s._connections[0].getDB("admin");
+
+// Setup from one client
+assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.i, 0);
+assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.i, 0);
+
+var fooEpoch = s.getDB('config').chunks.findOne({ns: 'alleyinsider.foo'}).lastmodEpoch;
+assert.commandWorked(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ authoritative: true,
+ version: new Timestamp(1, 0),
+ versionEpoch: fooEpoch,
+ shard: s.shard0.shardName,
+ shardHost: s.s.host,
+}));
+
+printjson(s.config.chunks.findOne());
+
+assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.t, 1);
+assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.t, 1);
+
+// From a different client
+var a2 = connect(`mongodb://${s.rs0.getPrimary().name}/admin`);
+
+assert.eq(a2.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.t,
+ 1,
+ "a2 global 1");
+assert.eq(a2.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.i,
+ 0,
+ "a2 mine 1");
+
+function simpleFindOne() {
+ return a2.getMongo().getDB("alleyinsider").foo.findOne();
+}
+
+var barEpoch = s.getDB('config').chunks.findOne({ns: 'alleyinsider.bar'}).lastmodEpoch;
+assert.commandWorked(a2.runCommand({
+ setShardVersion: "alleyinsider.bar",
+ configdb: s._configDB,
+ version: new Timestamp(1, 0),
+ versionEpoch: barEpoch,
+ shard: s.shard0.shardName,
+ authoritative: true
+}),
+ "setShardVersion bar temp");
+
+assert.throws(simpleFindOne, [], "should complain about not in sharded mode 1");
+
+// the only way that setSharVersion passes is if the shard agrees with the version
+// the shard takes its version from config directly
+// TODO bump timestamps in config
+// assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version:
+// 2 }).ok == 1, "setShardVersion a2-1");
+
+// simpleFindOne(); // now should run ok
+
+// assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version:
+// 3 }).ok == 1, "setShardVersion a2-2");
+
+// simpleFindOne(); // newer version is ok
+
+s.stop();
})();
diff --git a/jstests/sharding/view_rewrite.js b/jstests/sharding/view_rewrite.js
index 652937ff113..e0177f84b80 100644
--- a/jstests/sharding/view_rewrite.js
+++ b/jstests/sharding/view_rewrite.js
@@ -3,238 +3,234 @@
* aggregation against the underlying collection.
*/
(function() {
- "use strict";
-
- load("jstests/libs/profiler.js"); // For profilerHasSingleMatchingEntryOrThrow.
-
- const st = new ShardingTest({
- name: "view_rewrite",
- shards: 2,
- other: {
- rs0: {
- nodes: [
- {rsConfig: {priority: 1}},
- {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}
- ]
- },
- rs1: {
- nodes: [
- {rsConfig: {priority: 1}},
- {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}
- ]
- },
- enableBalancer: false
+"use strict";
+
+load("jstests/libs/profiler.js"); // For profilerHasSingleMatchingEntryOrThrow.
+
+const st = new ShardingTest({
+ name: "view_rewrite",
+ shards: 2,
+ other: {
+ rs0: {
+ nodes:
+ [{rsConfig: {priority: 1}}, {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}]
+ },
+ rs1: {
+ nodes:
+ [{rsConfig: {priority: 1}}, {rsConfig: {priority: 0, tags: {"tag": "secondary"}}}]
+ },
+ enableBalancer: false
+ }
+});
+
+const mongos = st.s0;
+const config = mongos.getDB("config");
+const mongosDB = mongos.getDB("view_rewrite");
+const coll = mongosDB.getCollection("coll");
+
+assert.commandWorked(config.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), "view_rewrite-rs0");
+
+const rs0Secondary = st.rs0.getSecondary();
+const rs1Primary = st.rs1.getPrimary();
+const rs1Secondary = st.rs1.getSecondary();
+
+assert.commandWorked(config.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
+assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {a: 5}}));
+assert.commandWorked(
+ mongosDB.adminCommand({moveChunk: coll.getFullName(), find: {a: 5}, to: "view_rewrite-rs1"}));
+
+for (let i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+
+assert.commandWorked(mongosDB.createView("view", coll.getName(), []));
+const view = mongosDB.getCollection("view");
+
+//
+// Confirms that queries run against views on mongos result in execution of a rewritten
+// aggregation that contains all expected query options.
+//
+function confirmOptionsInProfiler(shardPrimary) {
+ assert.commandWorked(shardPrimary.setProfilingLevel(2));
+
+ // Aggregation
+ assert.commandWorked(mongosDB.runCommand({
+ aggregate: "view",
+ pipeline: [],
+ comment: "agg_rewrite",
+ maxTimeMS: 5 * 60 * 1000,
+ readConcern: {level: "linearizable"},
+ cursor: {}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardPrimary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "agg_rewrite",
+ "command.maxTimeMS": {"$exists": true},
+ "command.readConcern": {level: "linearizable"},
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
}
});
- const mongos = st.s0;
- const config = mongos.getDB("config");
- const mongosDB = mongos.getDB("view_rewrite");
- const coll = mongosDB.getCollection("coll");
+ // Find
+ assert.commandWorked(mongosDB.runCommand({
+ find: "view",
+ comment: "find_rewrite",
+ maxTimeMS: 5 * 60 * 1000,
+ readConcern: {level: "linearizable"}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardPrimary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "find_rewrite",
+ "command.maxTimeMS": {"$exists": true},
+ "command.readConcern": {level: "linearizable"},
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
+ }
+ });
- assert.commandWorked(config.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), "view_rewrite-rs0");
+ // Count
+ assert.commandWorked(mongosDB.runCommand({
+ count: "view",
+ comment: "count_rewrite",
+ maxTimeMS: 5 * 60 * 1000,
+ readConcern: {level: "linearizable"}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardPrimary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "count_rewrite",
+ "command.maxTimeMS": {"$exists": true},
+ "command.readConcern": {level: "linearizable"},
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
+ }
+ });
- const rs0Secondary = st.rs0.getSecondary();
- const rs1Primary = st.rs1.getPrimary();
- const rs1Secondary = st.rs1.getSecondary();
+ // Distinct
+ assert.commandWorked(mongosDB.runCommand({
+ distinct: "view",
+ key: "a",
+ comment: "distinct_rewrite",
+ maxTimeMS: 5 * 60 * 1000,
+ readConcern: {level: "linearizable"}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardPrimary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "distinct_rewrite",
+ "command.maxTimeMS": {"$exists": true},
+ "command.readConcern": {level: "linearizable"},
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
+ }
+ });
- assert.commandWorked(config.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
- assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {a: 5}}));
- assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {a: 5}, to: "view_rewrite-rs1"}));
+ assert.commandWorked(shardPrimary.setProfilingLevel(0));
+ shardPrimary.system.profile.drop();
+}
+
+//
+// Confirms that queries run against views on mongos are executed against a tagged secondary, as
+// per readPreference setting.
+//
+function confirmReadPreference(shardSecondary) {
+ assert.commandWorked(shardSecondary.setProfilingLevel(2));
+
+ // Aggregation
+ assert.commandWorked(mongosDB.runCommand({
+ query: {aggregate: "view", pipeline: [], comment: "agg_readPref", cursor: {}},
+ $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
+ readConcern: {level: "local"}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardSecondary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "agg_readPref",
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
+ }
+ });
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
+ // Find
+ assert.commandWorked(mongosDB.runCommand({
+ query: {find: "view", comment: "find_readPref", maxTimeMS: 5 * 60 * 1000},
+ $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
+ readConcern: {level: "local"}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardSecondary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "find_readPref",
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
+ }
+ });
- assert.commandWorked(mongosDB.createView("view", coll.getName(), []));
- const view = mongosDB.getCollection("view");
-
- //
- // Confirms that queries run against views on mongos result in execution of a rewritten
- // aggregation that contains all expected query options.
- //
- function confirmOptionsInProfiler(shardPrimary) {
- assert.commandWorked(shardPrimary.setProfilingLevel(2));
-
- // Aggregation
- assert.commandWorked(mongosDB.runCommand({
- aggregate: "view",
- pipeline: [],
- comment: "agg_rewrite",
- maxTimeMS: 5 * 60 * 1000,
- readConcern: {level: "linearizable"},
- cursor: {}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardPrimary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "agg_rewrite",
- "command.maxTimeMS": {"$exists": true},
- "command.readConcern": {level: "linearizable"},
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- // Find
- assert.commandWorked(mongosDB.runCommand({
- find: "view",
- comment: "find_rewrite",
- maxTimeMS: 5 * 60 * 1000,
- readConcern: {level: "linearizable"}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardPrimary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "find_rewrite",
- "command.maxTimeMS": {"$exists": true},
- "command.readConcern": {level: "linearizable"},
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- // Count
- assert.commandWorked(mongosDB.runCommand({
- count: "view",
- comment: "count_rewrite",
- maxTimeMS: 5 * 60 * 1000,
- readConcern: {level: "linearizable"}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardPrimary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "count_rewrite",
- "command.maxTimeMS": {"$exists": true},
- "command.readConcern": {level: "linearizable"},
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- // Distinct
- assert.commandWorked(mongosDB.runCommand({
- distinct: "view",
- key: "a",
- comment: "distinct_rewrite",
- maxTimeMS: 5 * 60 * 1000,
- readConcern: {level: "linearizable"}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardPrimary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "distinct_rewrite",
- "command.maxTimeMS": {"$exists": true},
- "command.readConcern": {level: "linearizable"},
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- assert.commandWorked(shardPrimary.setProfilingLevel(0));
- shardPrimary.system.profile.drop();
- }
+ // Count
+ assert.commandWorked(mongosDB.runCommand({
+ query: {count: "view", comment: "count_readPref"},
+ $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
+ readConcern: {level: "local"}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardSecondary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "count_readPref",
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
+ }
+ });
- //
- // Confirms that queries run against views on mongos are executed against a tagged secondary, as
- // per readPreference setting.
- //
- function confirmReadPreference(shardSecondary) {
- assert.commandWorked(shardSecondary.setProfilingLevel(2));
-
- // Aggregation
- assert.commandWorked(mongosDB.runCommand({
- query: {aggregate: "view", pipeline: [], comment: "agg_readPref", cursor: {}},
- $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
- readConcern: {level: "local"}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardSecondary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "agg_readPref",
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- // Find
- assert.commandWorked(mongosDB.runCommand({
- query: {find: "view", comment: "find_readPref", maxTimeMS: 5 * 60 * 1000},
- $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
- readConcern: {level: "local"}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardSecondary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "find_readPref",
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- // Count
- assert.commandWorked(mongosDB.runCommand({
- query: {count: "view", comment: "count_readPref"},
- $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
- readConcern: {level: "local"}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardSecondary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "count_readPref",
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- // Distinct
- assert.commandWorked(mongosDB.runCommand({
- query: {distinct: "view", key: "a", comment: "distinct_readPref"},
- $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
- readConcern: {level: "local"}
- }));
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shardSecondary,
- filter: {
- "ns": coll.getFullName(),
- "command.aggregate": coll.getName(),
- "command.comment": "distinct_readPref",
- "command.pipeline.$mergeCursors": {"$exists": false},
- "nreturned": {"$exists": true}
- }
- });
-
- assert.commandWorked(shardSecondary.setProfilingLevel(0));
- }
+ // Distinct
+ assert.commandWorked(mongosDB.runCommand({
+ query: {distinct: "view", key: "a", comment: "distinct_readPref"},
+ $readPreference: {mode: "nearest", tags: [{tag: "secondary"}]},
+ readConcern: {level: "local"}
+ }));
+
+ profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardSecondary,
+ filter: {
+ "ns": coll.getFullName(),
+ "command.aggregate": coll.getName(),
+ "command.comment": "distinct_readPref",
+ "command.pipeline.$mergeCursors": {"$exists": false},
+ "nreturned": {"$exists": true}
+ }
+ });
+
+ assert.commandWorked(shardSecondary.setProfilingLevel(0));
+}
- confirmOptionsInProfiler(st.rs1.getPrimary().getDB(mongosDB.getName()));
+confirmOptionsInProfiler(st.rs1.getPrimary().getDB(mongosDB.getName()));
- confirmReadPreference(st.rs0.getSecondary().getDB(mongosDB.getName()));
- confirmReadPreference(st.rs1.getSecondary().getDB(mongosDB.getName()));
+confirmReadPreference(st.rs0.getSecondary().getDB(mongosDB.getName()));
+confirmReadPreference(st.rs1.getSecondary().getDB(mongosDB.getName()));
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/views.js b/jstests/sharding/views.js
index c1ea43d8e93..876406902a6 100644
--- a/jstests/sharding/views.js
+++ b/jstests/sharding/views.js
@@ -3,170 +3,165 @@
* @tags: [requires_find_command]
*/
(function() {
- "use strict";
-
- // For profilerHasSingleMatchingEntryOrThrow.
- load("jstests/libs/profiler.js");
-
- // Given sharded explain output in 'shardedExplain', verifies that the explain mode 'verbosity'
- // affected the output verbosity appropriately, and that the response has the expected format.
- // Set 'optimizedAwayPipeline' to true if the pipeline is expected to be optimized away.
- function verifyExplainResult(
- {shardedExplain = null, verbosity = "", optimizedAwayPipeline = false} = {}) {
- assert.commandWorked(shardedExplain);
- assert(shardedExplain.hasOwnProperty("shards"), tojson(shardedExplain));
- for (let elem in shardedExplain.shards) {
- let shard = shardedExplain.shards[elem];
- let root;
- if (optimizedAwayPipeline) {
- assert(shard.hasOwnProperty("queryPlanner"), tojson(shardedExplain));
- root = shard;
- } else {
- assert(shard.stages[0].hasOwnProperty("$cursor"), tojson(shardedExplain));
- assert(shard.stages[0].$cursor.hasOwnProperty("queryPlanner"),
- tojson(shardedExplain));
- root = shard.stages[0].$cursor;
- }
- if (verbosity === "queryPlanner") {
- assert(!root.hasOwnProperty("executionStats"), tojson(shardedExplain));
- } else if (verbosity === "executionStats") {
- assert(root.hasOwnProperty("executionStats"), tojson(shardedExplain));
- assert(!root.executionStats.hasOwnProperty("allPlansExecution"),
- tojson("shardedExplain"));
- } else {
- assert.eq(verbosity, "allPlansExecution", tojson(shardedExplain));
- assert(root.hasOwnProperty("executionStats"), tojson(shardedExplain));
- assert(root.executionStats.hasOwnProperty("allPlansExecution"),
- tojson(shardedExplain));
- }
+"use strict";
+
+// For profilerHasSingleMatchingEntryOrThrow.
+load("jstests/libs/profiler.js");
+
+// Given sharded explain output in 'shardedExplain', verifies that the explain mode 'verbosity'
+// affected the output verbosity appropriately, and that the response has the expected format.
+// Set 'optimizedAwayPipeline' to true if the pipeline is expected to be optimized away.
+function verifyExplainResult(
+ {shardedExplain = null, verbosity = "", optimizedAwayPipeline = false} = {}) {
+ assert.commandWorked(shardedExplain);
+ assert(shardedExplain.hasOwnProperty("shards"), tojson(shardedExplain));
+ for (let elem in shardedExplain.shards) {
+ let shard = shardedExplain.shards[elem];
+ let root;
+ if (optimizedAwayPipeline) {
+ assert(shard.hasOwnProperty("queryPlanner"), tojson(shardedExplain));
+ root = shard;
+ } else {
+ assert(shard.stages[0].hasOwnProperty("$cursor"), tojson(shardedExplain));
+ assert(shard.stages[0].$cursor.hasOwnProperty("queryPlanner"), tojson(shardedExplain));
+ root = shard.stages[0].$cursor;
+ }
+ if (verbosity === "queryPlanner") {
+ assert(!root.hasOwnProperty("executionStats"), tojson(shardedExplain));
+ } else if (verbosity === "executionStats") {
+ assert(root.hasOwnProperty("executionStats"), tojson(shardedExplain));
+ assert(!root.executionStats.hasOwnProperty("allPlansExecution"),
+ tojson("shardedExplain"));
+ } else {
+ assert.eq(verbosity, "allPlansExecution", tojson(shardedExplain));
+ assert(root.hasOwnProperty("executionStats"), tojson(shardedExplain));
+ assert(root.executionStats.hasOwnProperty("allPlansExecution"), tojson(shardedExplain));
}
}
+}
- let st = new ShardingTest({name: "views_sharded", shards: 2, other: {enableBalancer: false}});
+let st = new ShardingTest({name: "views_sharded", shards: 2, other: {enableBalancer: false}});
- let mongos = st.s;
- let config = mongos.getDB("config");
- let db = mongos.getDB(jsTestName());
- db.dropDatabase();
+let mongos = st.s;
+let config = mongos.getDB("config");
+let db = mongos.getDB(jsTestName());
+db.dropDatabase();
- let coll = db.getCollection("coll");
+let coll = db.getCollection("coll");
- assert.commandWorked(config.adminCommand({enableSharding: db.getName()}));
- st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
- assert.commandWorked(config.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
+assert.commandWorked(config.adminCommand({enableSharding: db.getName()}));
+st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
+assert.commandWorked(config.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
- assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {a: 6}}));
- assert.commandWorked(
- db.adminCommand({moveChunk: coll.getFullName(), find: {a: 25}, to: st.shard1.shardName}));
+assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {a: 6}}));
+assert.commandWorked(
+ db.adminCommand({moveChunk: coll.getFullName(), find: {a: 25}, to: st.shard1.shardName}));
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
+for (let i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
- assert.commandWorked(db.createView("view", coll.getName(), [{$match: {a: {$gte: 4}}}]));
- let view = db.getCollection("view");
+assert.commandWorked(db.createView("view", coll.getName(), [{$match: {a: {$gte: 4}}}]));
+let view = db.getCollection("view");
- const explainVerbosities = ["queryPlanner", "executionStats", "allPlansExecution"];
+const explainVerbosities = ["queryPlanner", "executionStats", "allPlansExecution"];
- //
- // find
- //
- assert.eq(5, view.find({a: {$lte: 8}}).itcount());
+//
+// find
+//
+assert.eq(5, view.find({a: {$lte: 8}}).itcount());
- let result = db.runCommand({explain: {find: "view", filter: {a: {$lte: 7}}}});
+let result = db.runCommand({explain: {find: "view", filter: {a: {$lte: 7}}}});
+verifyExplainResult(
+ {shardedExplain: result, verbosity: "allPlansExecution", optimizedAwayPipeline: true});
+for (let verbosity of explainVerbosities) {
+ result = db.runCommand({explain: {find: "view", filter: {a: {$lte: 7}}}, verbosity: verbosity});
verifyExplainResult(
- {shardedExplain: result, verbosity: "allPlansExecution", optimizedAwayPipeline: true});
- for (let verbosity of explainVerbosities) {
- result =
- db.runCommand({explain: {find: "view", filter: {a: {$lte: 7}}}, verbosity: verbosity});
- verifyExplainResult(
- {shardedExplain: result, verbosity: verbosity, optimizedAwayPipeline: true});
- }
-
- //
- // aggregate
- //
- assert.eq(5, view.aggregate([{$match: {a: {$lte: 8}}}]).itcount());
-
- // Test that the explain:true flag for the aggregate command results in queryPlanner verbosity.
- result =
- db.runCommand({aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], explain: true});
+ {shardedExplain: result, verbosity: verbosity, optimizedAwayPipeline: true});
+}
+
+//
+// aggregate
+//
+assert.eq(5, view.aggregate([{$match: {a: {$lte: 8}}}]).itcount());
+
+// Test that the explain:true flag for the aggregate command results in queryPlanner verbosity.
+result = db.runCommand({aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], explain: true});
+verifyExplainResult(
+ {shardedExplain: result, verbosity: "queryPlanner", optimizedAwayPipeline: true});
+
+result =
+ db.runCommand({explain: {aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], cursor: {}}});
+verifyExplainResult(
+ {shardedExplain: result, verbosity: "allPlansExecution", optimizedAwayPipeline: true});
+for (let verbosity of explainVerbosities) {
+ result = db.runCommand({
+ explain: {aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], cursor: {}},
+ verbosity: verbosity
+ });
verifyExplainResult(
- {shardedExplain: result, verbosity: "queryPlanner", optimizedAwayPipeline: true});
-
+ {shardedExplain: result, verbosity: verbosity, optimizedAwayPipeline: true});
+}
+
+//
+// count
+//
+assert.eq(5, view.count({a: {$lte: 8}}));
+
+result = db.runCommand({explain: {count: "view", query: {a: {$lte: 8}}}});
+verifyExplainResult({shardedExplain: result, verbosity: "allPlansExecution"});
+for (let verbosity of explainVerbosities) {
+ result = db.runCommand({explain: {count: "view", query: {a: {$lte: 8}}}, verbosity: verbosity});
+ verifyExplainResult({shardedExplain: result, verbosity: verbosity});
+}
+
+//
+// distinct
+//
+result = db.runCommand({distinct: "view", key: "a", query: {a: {$lte: 8}}});
+assert.commandWorked(result);
+assert.eq([4, 5, 6, 7, 8], result.values.sort());
+
+result = db.runCommand({explain: {distinct: "view", key: "a", query: {a: {$lte: 8}}}});
+verifyExplainResult({shardedExplain: result, verbosity: "allPlansExecution"});
+for (let verbosity of explainVerbosities) {
result = db.runCommand(
- {explain: {aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], cursor: {}}});
- verifyExplainResult(
- {shardedExplain: result, verbosity: "allPlansExecution", optimizedAwayPipeline: true});
- for (let verbosity of explainVerbosities) {
- result = db.runCommand({
- explain: {aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], cursor: {}},
- verbosity: verbosity
- });
- verifyExplainResult(
- {shardedExplain: result, verbosity: verbosity, optimizedAwayPipeline: true});
+ {explain: {distinct: "view", key: "a", query: {a: {$lte: 8}}}, verbosity: verbosity});
+ verifyExplainResult({shardedExplain: result, verbosity: verbosity});
+}
+
+//
+// Confirm cleanupOrphaned command fails.
+//
+result = st.getPrimaryShard(db.getName()).getDB("admin").runCommand({
+ cleanupOrphaned: view.getFullName()
+});
+assert.commandFailedWithCode(result, ErrorCodes.CommandNotSupportedOnView);
+
+//
+// Confirm getShardVersion command fails.
+//
+assert.commandFailedWithCode(db.adminCommand({getShardVersion: view.getFullName()}),
+ ErrorCodes.NamespaceNotSharded);
+
+//
+// Confirm that the comment parameter on a find command is retained when rewritten as an
+// expanded aggregation on the view.
+//
+let sdb = st.shard0.getDB(jsTestName());
+assert.commandWorked(sdb.setProfilingLevel(2));
+
+assert.eq(5, view.find({a: {$lte: 8}}).comment("agg_comment").itcount());
+
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: sdb,
+ filter: {
+ "command.aggregate": coll.getName(),
+ "command.comment": "agg_comment",
+ "command.needsMerge": true,
+ "command.pipeline.$mergeCursors": {$exists: false}
}
+});
- //
- // count
- //
- assert.eq(5, view.count({a: {$lte: 8}}));
-
- result = db.runCommand({explain: {count: "view", query: {a: {$lte: 8}}}});
- verifyExplainResult({shardedExplain: result, verbosity: "allPlansExecution"});
- for (let verbosity of explainVerbosities) {
- result =
- db.runCommand({explain: {count: "view", query: {a: {$lte: 8}}}, verbosity: verbosity});
- verifyExplainResult({shardedExplain: result, verbosity: verbosity});
- }
-
- //
- // distinct
- //
- result = db.runCommand({distinct: "view", key: "a", query: {a: {$lte: 8}}});
- assert.commandWorked(result);
- assert.eq([4, 5, 6, 7, 8], result.values.sort());
-
- result = db.runCommand({explain: {distinct: "view", key: "a", query: {a: {$lte: 8}}}});
- verifyExplainResult({shardedExplain: result, verbosity: "allPlansExecution"});
- for (let verbosity of explainVerbosities) {
- result = db.runCommand(
- {explain: {distinct: "view", key: "a", query: {a: {$lte: 8}}}, verbosity: verbosity});
- verifyExplainResult({shardedExplain: result, verbosity: verbosity});
- }
-
- //
- // Confirm cleanupOrphaned command fails.
- //
- result = st.getPrimaryShard(db.getName()).getDB("admin").runCommand({
- cleanupOrphaned: view.getFullName()
- });
- assert.commandFailedWithCode(result, ErrorCodes.CommandNotSupportedOnView);
-
- //
- // Confirm getShardVersion command fails.
- //
- assert.commandFailedWithCode(db.adminCommand({getShardVersion: view.getFullName()}),
- ErrorCodes.NamespaceNotSharded);
-
- //
- // Confirm that the comment parameter on a find command is retained when rewritten as an
- // expanded aggregation on the view.
- //
- let sdb = st.shard0.getDB(jsTestName());
- assert.commandWorked(sdb.setProfilingLevel(2));
-
- assert.eq(5, view.find({a: {$lte: 8}}).comment("agg_comment").itcount());
-
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: sdb,
- filter: {
- "command.aggregate": coll.getName(),
- "command.comment": "agg_comment",
- "command.needsMerge": true,
- "command.pipeline.$mergeCursors": {$exists: false}
- }
- });
-
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/wildcard_index_banned_for_shard_key.js b/jstests/sharding/wildcard_index_banned_for_shard_key.js
index 1b8f8bd4dea..17f905a77fd 100644
--- a/jstests/sharding/wildcard_index_banned_for_shard_key.js
+++ b/jstests/sharding/wildcard_index_banned_for_shard_key.js
@@ -3,38 +3,38 @@
//
(function() {
- 'use strict';
+'use strict';
- const st = new ShardingTest({mongos: 1, shards: 2});
- const kDbName = 'wildcard_index_banned_for_shard_key';
- const mongos = st.s0;
+const st = new ShardingTest({mongos: 1, shards: 2});
+const kDbName = 'wildcard_index_banned_for_shard_key';
+const mongos = st.s0;
- function assertCannotShardCollectionOnWildcardIndex(keyDoc) {
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+function assertCannotShardCollectionOnWildcardIndex(keyDoc) {
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- assert.commandFailedWithCode(
- mongos.adminCommand({shardCollection: `${kDbName}.foo`, key: keyDoc}),
- ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(
+ mongos.adminCommand({shardCollection: `${kDbName}.foo`, key: keyDoc}),
+ ErrorCodes.InvalidOptions);
- assert.eq(mongos.getDB('config').collections.count({_id: `${kDbName}.foo`}), 0);
- assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
- }
+ assert.eq(mongos.getDB('config').collections.count({_id: `${kDbName}.foo`}), 0);
+ assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
+}
- // Can't shard on a path supported by a general wildcard index.
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({"$**": 1}));
- assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1}));
- assertCannotShardCollectionOnWildcardIndex({a: 1});
+// Can't shard on a path supported by a general wildcard index.
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({"$**": 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1}));
+assertCannotShardCollectionOnWildcardIndex({a: 1});
- // Can't shard on a path supported by a targeted wildcard index.
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({"a.$**": 1}));
- assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1}));
- assertCannotShardCollectionOnWildcardIndex({a: 1});
+// Can't shard on a path supported by a targeted wildcard index.
+assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({"a.$**": 1}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1}));
+assertCannotShardCollectionOnWildcardIndex({a: 1});
- // Can't shard on a path supported by wildcard index with projection option.
- assert.commandWorked(
- mongos.getDB(kDbName).foo.createIndex({"$**": 1}, {wildcardProjection: {a: 1}}));
- assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1}));
- assertCannotShardCollectionOnWildcardIndex({a: 1});
+// Can't shard on a path supported by wildcard index with projection option.
+assert.commandWorked(
+ mongos.getDB(kDbName).foo.createIndex({"$**": 1}, {wildcardProjection: {a: 1}}));
+assert.commandWorked(mongos.getDB(kDbName).foo.insert({a: 1}));
+assertCannotShardCollectionOnWildcardIndex({a: 1});
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js
index b88971e8c3d..0c808102bf3 100644
--- a/jstests/sharding/write_cmd_auto_split.js
+++ b/jstests/sharding/write_cmd_auto_split.js
@@ -2,164 +2,160 @@
* Tests the auto split will be triggered when using write commands.
*/
(function() {
- 'use strict';
- load('jstests/sharding/autosplit_include.js');
+'use strict';
+load('jstests/sharding/autosplit_include.js');
- var st = new ShardingTest({shards: 1, other: {chunkSize: 1, enableAutoSplit: true}});
+var st = new ShardingTest({shards: 1, other: {chunkSize: 1, enableAutoSplit: true}});
- var configDB = st.s.getDB('config');
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));
+var configDB = st.s.getDB('config');
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));
- var doc1k = (new Array(1024)).join('x');
- var testDB = st.s.getDB('test');
+var doc1k = (new Array(1024)).join('x');
+var testDB = st.s.getDB('test');
- jsTest.log('Test single batch insert should auto-split');
+jsTest.log('Test single batch insert should auto-split');
- assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount());
+assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount());
- // This should result in a little over 3MB inserted into the chunk, so with
- // a max chunk size of 1MB we'd expect the autosplitter to split this into
- // at least 3 chunks
- for (var x = 0; x < 3100; x++) {
- assert.writeOK(testDB.runCommand({
- insert: 'insert',
- documents: [{x: x, v: doc1k}],
- ordered: false,
- writeConcern: {w: 1}
- }));
- }
+// This should result in a little over 3MB inserted into the chunk, so with
+// a max chunk size of 1MB we'd expect the autosplitter to split this into
+// at least 3 chunks
+for (var x = 0; x < 3100; x++) {
+ assert.writeOK(testDB.runCommand(
+ {insert: 'insert', documents: [{x: x, v: doc1k}], ordered: false, writeConcern: {w: 1}}));
+}
- waitForOngoingChunkSplits(st);
+waitForOngoingChunkSplits(st);
- // Inserted batch is a multiple of the chunkSize, expect the chunks to split into
- // more than 2.
- assert.gt(configDB.chunks.find({"ns": "test.insert"}).itcount(), 2);
- testDB.dropDatabase();
+// Inserted batch is a multiple of the chunkSize, expect the chunks to split into
+// more than 2.
+assert.gt(configDB.chunks.find({"ns": "test.insert"}).itcount(), 2);
+testDB.dropDatabase();
- jsTest.log('Test single batch update should auto-split');
+jsTest.log('Test single batch update should auto-split');
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));
- assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());
+assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());
- for (var x = 0; x < 2100; x++) {
- assert.writeOK(testDB.runCommand({
- update: 'update',
- updates: [{q: {x: x}, u: {x: x, v: doc1k}, upsert: true}],
- ordered: false,
- writeConcern: {w: 1}
- }));
- }
+for (var x = 0; x < 2100; x++) {
+ assert.writeOK(testDB.runCommand({
+ update: 'update',
+ updates: [{q: {x: x}, u: {x: x, v: doc1k}, upsert: true}],
+ ordered: false,
+ writeConcern: {w: 1}
+ }));
+}
- waitForOngoingChunkSplits(st);
+waitForOngoingChunkSplits(st);
- assert.gt(configDB.chunks.find({"ns": "test.update"}).itcount(), 1);
- testDB.dropDatabase();
+assert.gt(configDB.chunks.find({"ns": "test.update"}).itcount(), 1);
+testDB.dropDatabase();
- jsTest.log('Test single delete should not auto-split');
+jsTest.log('Test single delete should not auto-split');
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));
- assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
+assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
- for (var x = 0; x < 1100; x++) {
- assert.writeOK(testDB.runCommand({
- delete: 'delete',
- deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
- ordered: false,
- writeConcern: {w: 1}
- }));
- }
+for (var x = 0; x < 1100; x++) {
+ assert.writeOK(testDB.runCommand({
+ delete: 'delete',
+ deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
+ ordered: false,
+ writeConcern: {w: 1}
+ }));
+}
- // If we are autosplitting (which we shouldn't be), we want to wait until
- // it's finished, otherwise we could falsely think no autosplitting was
- // done when really it was just in progress.
- waitForOngoingChunkSplits(st);
+// If we are autosplitting (which we shouldn't be), we want to wait until
+// it's finished, otherwise we could falsely think no autosplitting was
+// done when really it was just in progress.
+waitForOngoingChunkSplits(st);
- assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
- testDB.dropDatabase();
+assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
+testDB.dropDatabase();
- jsTest.log('Test batched insert should auto-split');
+jsTest.log('Test batched insert should auto-split');
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));
- assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount());
+assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount());
- // Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
- // we are going to be conservative.
- for (var x = 0; x < 2100; x += 400) {
- var docs = [];
+// Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
+// we are going to be conservative.
+for (var x = 0; x < 2100; x += 400) {
+ var docs = [];
- for (var y = 0; y < 400; y++) {
- docs.push({x: (x + y), v: doc1k});
- }
-
- assert.writeOK(testDB.runCommand(
- {insert: 'insert', documents: docs, ordered: false, writeConcern: {w: 1}}));
+ for (var y = 0; y < 400; y++) {
+ docs.push({x: (x + y), v: doc1k});
}
- waitForOngoingChunkSplits(st);
+ assert.writeOK(testDB.runCommand(
+ {insert: 'insert', documents: docs, ordered: false, writeConcern: {w: 1}}));
+}
- assert.gt(configDB.chunks.find({"ns": "test.insert"}).itcount(), 1);
- testDB.dropDatabase();
+waitForOngoingChunkSplits(st);
- jsTest.log('Test batched update should auto-split');
+assert.gt(configDB.chunks.find({"ns": "test.insert"}).itcount(), 1);
+testDB.dropDatabase();
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));
+jsTest.log('Test batched update should auto-split');
- assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));
- for (var x = 0; x < 2100; x += 400) {
- var docs = [];
+assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());
- for (var y = 0; y < 400; y++) {
- var id = x + y;
- docs.push({q: {x: id}, u: {x: id, v: doc1k}, upsert: true});
- }
+for (var x = 0; x < 2100; x += 400) {
+ var docs = [];
- assert.writeOK(testDB.runCommand(
- {update: 'update', updates: docs, ordered: false, writeConcern: {w: 1}}));
+ for (var y = 0; y < 400; y++) {
+ var id = x + y;
+ docs.push({q: {x: id}, u: {x: id, v: doc1k}, upsert: true});
}
- waitForOngoingChunkSplits(st);
+ assert.writeOK(
+ testDB.runCommand({update: 'update', updates: docs, ordered: false, writeConcern: {w: 1}}));
+}
- assert.gt(configDB.chunks.find({"ns": "test.update"}).itcount(), 1);
- testDB.dropDatabase();
+waitForOngoingChunkSplits(st);
- jsTest.log('Test batched delete should not auto-split');
+assert.gt(configDB.chunks.find({"ns": "test.update"}).itcount(), 1);
+testDB.dropDatabase();
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));
+jsTest.log('Test batched delete should not auto-split');
- assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
+assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));
- for (var x = 0; x < 2100; x += 400) {
- var docs = [];
+assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
- for (var y = 0; y < 400; y++) {
- var id = x + y;
- docs.push({q: {x: id, v: doc1k}, top: 0});
- }
+for (var x = 0; x < 2100; x += 400) {
+ var docs = [];
- assert.writeOK(testDB.runCommand({
- delete: 'delete',
- deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
- ordered: false,
- writeConcern: {w: 1}
- }));
+ for (var y = 0; y < 400; y++) {
+ var id = x + y;
+ docs.push({q: {x: id, v: doc1k}, top: 0});
}
- // If we are autosplitting (which we shouldn't be), we want to wait until
- // it's finished, otherwise we could falsely think no autosplitting was
- // done when really it was just in progress.
- waitForOngoingChunkSplits(st);
+ assert.writeOK(testDB.runCommand({
+ delete: 'delete',
+ deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
+ ordered: false,
+ writeConcern: {w: 1}
+ }));
+}
+
+// If we are autosplitting (which we shouldn't be), we want to wait until
+// it's finished, otherwise we could falsely think no autosplitting was
+// done when really it was just in progress.
+waitForOngoingChunkSplits(st);
- assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
+assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
- st.stop();
+st.stop();
})();
diff --git a/jstests/sharding/write_commands_sharding_state.js b/jstests/sharding/write_commands_sharding_state.js
index 395d328a138..5d6595e5758 100644
--- a/jstests/sharding/write_commands_sharding_state.js
+++ b/jstests/sharding/write_commands_sharding_state.js
@@ -3,84 +3,82 @@
// @tags: [requires_persistence]
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({name: "write_commands", mongos: 2, shards: 2});
+var st = new ShardingTest({name: "write_commands", mongos: 2, shards: 2});
- var dbTestName = 'WriteCommandsTestDB';
- var collName = dbTestName + '.TestColl';
+var dbTestName = 'WriteCommandsTestDB';
+var collName = dbTestName + '.TestColl';
- assert.commandWorked(st.s0.adminCommand({enablesharding: dbTestName}));
- st.ensurePrimaryShard(dbTestName, st.shard0.shardName);
+assert.commandWorked(st.s0.adminCommand({enablesharding: dbTestName}));
+st.ensurePrimaryShard(dbTestName, st.shard0.shardName);
- assert.commandWorked(
- st.s0.adminCommand({shardCollection: collName, key: {Key: 1}, unique: true}));
+assert.commandWorked(st.s0.adminCommand({shardCollection: collName, key: {Key: 1}, unique: true}));
- // Split at keys 10 and 20
- assert.commandWorked(st.s0.adminCommand({split: collName, middle: {Key: 10}}));
- assert.commandWorked(st.s0.adminCommand({split: collName, middle: {Key: 20}}));
+// Split at keys 10 and 20
+assert.commandWorked(st.s0.adminCommand({split: collName, middle: {Key: 10}}));
+assert.commandWorked(st.s0.adminCommand({split: collName, middle: {Key: 20}}));
- printjson(st.config.getSiblingDB('config').chunks.find().toArray());
+printjson(st.config.getSiblingDB('config').chunks.find().toArray());
- // Move 10 and 20 to st.shard0.shardName1
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: collName, find: {Key: 19}, to: st.shard1.shardName, _waitForDelete: true}));
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: collName, find: {Key: 21}, to: st.shard1.shardName, _waitForDelete: true}));
+// Move 10 and 20 to st.shard0.shardName1
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: collName, find: {Key: 19}, to: st.shard1.shardName, _waitForDelete: true}));
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: collName, find: {Key: 21}, to: st.shard1.shardName, _waitForDelete: true}));
- printjson(st.config.getSiblingDB('config').chunks.find().toArray());
+printjson(st.config.getSiblingDB('config').chunks.find().toArray());
- // Insert one document in each chunk, which we will use to change
- assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 1}));
- assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 11}));
- assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 21}));
+// Insert one document in each chunk, which we will use to change
+assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 1}));
+assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 11}));
+assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 21}));
- // Make sure the documents are correctly placed
- printjson(st.shard0.getDB(dbTestName).TestColl.find().toArray());
- printjson(st.shard1.getDB(dbTestName).TestColl.find().toArray());
+// Make sure the documents are correctly placed
+printjson(st.shard0.getDB(dbTestName).TestColl.find().toArray());
+printjson(st.shard1.getDB(dbTestName).TestColl.find().toArray());
- assert.eq(1, st.shard0.getDB(dbTestName).TestColl.count());
- assert.eq(2, st.shard1.getDB(dbTestName).TestColl.count());
+assert.eq(1, st.shard0.getDB(dbTestName).TestColl.count());
+assert.eq(2, st.shard1.getDB(dbTestName).TestColl.count());
- assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 1}).count());
- assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 11}).count());
- assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 21}).count());
+assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 1}).count());
+assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 11}).count());
+assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 21}).count());
- // Move chunk [0, 19] to st.shard0.shardName and make sure the documents are correctly placed
- assert.commandWorked(st.s0.adminCommand(
- {moveChunk: collName, find: {Key: 19}, _waitForDelete: true, to: st.shard0.shardName}));
+// Move chunk [0, 19] to st.shard0.shardName and make sure the documents are correctly placed
+assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: collName, find: {Key: 19}, _waitForDelete: true, to: st.shard0.shardName}));
- printjson(st.config.getSiblingDB('config').chunks.find().toArray());
- printjson(st.shard0.getDB(dbTestName).TestColl.find({}).toArray());
- printjson(st.shard1.getDB(dbTestName).TestColl.find({}).toArray());
+printjson(st.config.getSiblingDB('config').chunks.find().toArray());
+printjson(st.shard0.getDB(dbTestName).TestColl.find({}).toArray());
+printjson(st.shard1.getDB(dbTestName).TestColl.find({}).toArray());
- // Now restart all mongod instances, so they don't know yet that they are sharded
- st.restartShardRS(0);
- st.restartShardRS(1);
+// Now restart all mongod instances, so they don't know yet that they are sharded
+st.restartShardRS(0);
+st.restartShardRS(1);
- // Now that both mongod shards are restarted, they don't know yet that they are part of a
- // sharded
- // cluster until they get a setShardVerion command. Mongos instance s1 has stale metadata and
- // doesn't know that chunk with key 19 has moved to st.shard0.shardName so it will send it to
- // st.shard1.shardName at
- // first.
- //
- // Shard0001 would only send back a stale config exception if it receives a setShardVersion
- // command. The bug that this test validates is that setShardVersion is indeed being sent (for
- // more
- // information, see SERVER-19395).
- st.s1.getDB(dbTestName).TestColl.update({Key: 11}, {$inc: {Counter: 1}}, {upsert: true});
+// Now that both mongod shards are restarted, they don't know yet that they are part of a
+// sharded
+// cluster until they get a setShardVerion command. Mongos instance s1 has stale metadata and
+// doesn't know that chunk with key 19 has moved to st.shard0.shardName so it will send it to
+// st.shard1.shardName at
+// first.
+//
+// Shard0001 would only send back a stale config exception if it receives a setShardVersion
+// command. The bug that this test validates is that setShardVersion is indeed being sent (for
+// more
+// information, see SERVER-19395).
+st.s1.getDB(dbTestName).TestColl.update({Key: 11}, {$inc: {Counter: 1}}, {upsert: true});
- printjson(st.shard0.getDB(dbTestName).TestColl.find({}).toArray());
- printjson(st.shard1.getDB(dbTestName).TestColl.find({}).toArray());
+printjson(st.shard0.getDB(dbTestName).TestColl.find({}).toArray());
+printjson(st.shard1.getDB(dbTestName).TestColl.find({}).toArray());
- assert.eq(2, st.shard0.getDB(dbTestName).TestColl.count());
- assert.eq(1, st.shard1.getDB(dbTestName).TestColl.count());
+assert.eq(2, st.shard0.getDB(dbTestName).TestColl.count());
+assert.eq(1, st.shard1.getDB(dbTestName).TestColl.count());
- assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 1}).count());
- assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 11}).count());
- assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 21}).count());
-
- st.stop();
+assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 1}).count());
+assert.eq(1, st.shard0.getDB(dbTestName).TestColl.find({Key: 11}).count());
+assert.eq(1, st.shard1.getDB(dbTestName).TestColl.find({Key: 21}).count());
+st.stop();
})();
diff --git a/jstests/sharding/write_transactions_during_migration.js b/jstests/sharding/write_transactions_during_migration.js
index d8d86bd4516..9b043eb0f1a 100644
--- a/jstests/sharding/write_transactions_during_migration.js
+++ b/jstests/sharding/write_transactions_during_migration.js
@@ -13,159 +13,158 @@ load('./jstests/libs/chunk_manipulation_util.js');
* 4. Retry writes and confirm that writes are not duplicated.
*/
(function() {
- "use strict";
-
- load("jstests/libs/retryable_writes_util.js");
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
- var st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}});
- st.adminCommand({enableSharding: 'test'});
- st.ensurePrimaryShard('test', st.shard0.shardName);
- st.adminCommand({shardCollection: 'test.user', key: {x: 1}});
- assert.commandWorked(st.s.adminCommand({split: 'test.user', middle: {x: 0}}));
-
- pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
- var joinMoveChunk =
- moveChunkParallel(staticMongod, st.s.host, {x: 0}, null, 'test.user', st.shard1.shardName);
-
- waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
-
- const insertCmd = {
- insert: 'user',
- documents: [
- // For findAndModify not touching chunk being migrated.
- {x: -30},
- // For changing doc to become owned by chunk being migrated.
- {x: -20},
- {x: -20},
- // For basic insert.
- {x: 10},
- // For changing doc to become owned by another chunk not being migrated.
- {x: 20},
- {x: 20},
- // For basic findAndModify.
- {x: 30}
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(34),
- };
-
- var testDB = st.getDB('test');
- const insertResult = assert.commandWorked(testDB.runCommand(insertCmd));
-
- const findAndModCmd = {
- findAndModify: 'user',
- query: {x: 30},
- update: {$inc: {y: 1}},
- new: true,
- upsert: true,
- lsid: {id: UUID()},
- txnNumber: NumberLong(37),
- };
-
- const findAndModifyResult = assert.commandWorked(testDB.runCommand(findAndModCmd));
-
- const changeDocToChunkNotMigrated = {
- findAndModify: 'user',
- query: {x: 20},
- update: {$set: {x: -120}, $inc: {y: 1}},
- new: false,
- upsert: true,
- lsid: {id: UUID()},
- txnNumber: NumberLong(37),
- };
-
- const changeDocToNotMigratedResult =
- assert.commandWorked(testDB.runCommand(changeDocToChunkNotMigrated));
-
- const changeDocToChunkMigrated = {
- findAndModify: 'user',
- query: {x: -20},
- update: {$set: {x: 120}, $inc: {y: 1}},
- new: false,
- upsert: true,
- lsid: {id: UUID()},
- txnNumber: NumberLong(37),
- };
-
- const changeDocToMigratedResult =
- assert.commandWorked(testDB.runCommand(changeDocToChunkMigrated));
-
- const findAndModifyNotMigrated = {
- findAndModify: 'user',
- query: {x: -30},
- update: {$inc: {y: 1}},
- new: false,
- upsert: true,
- lsid: {id: UUID()},
- txnNumber: NumberLong(37),
- };
-
- const findAndModifyNotMigratedResult =
- assert.commandWorked(testDB.runCommand(findAndModifyNotMigrated));
-
- unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
- joinMoveChunk();
-
- ///////////////////////////////////////////////////////////////////////////////////////////////
- // Retry phase
-
- var insertRetryResult = assert.commandWorked(testDB.runCommand(insertCmd));
-
- assert.eq(insertResult.ok, insertRetryResult.ok);
- assert.eq(insertResult.n, insertRetryResult.n);
- assert.eq(insertResult.writeErrors, insertRetryResult.writeErrors);
- assert.eq(insertResult.writeConcernErrors, insertRetryResult.writeConcernErrors);
-
- assert.eq(1, testDB.user.find({x: 10}).itcount());
- assert.eq(1, testDB.user.find({x: 30}).itcount());
-
- var findAndModifyRetryResult = assert.commandWorked(testDB.runCommand(findAndModCmd));
-
- assert.eq(findAndModifyResult.ok, findAndModifyRetryResult.ok);
- assert.eq(findAndModifyResult.value, findAndModifyRetryResult.value);
- assert.eq(findAndModifyResult.lastErrorObject, findAndModifyRetryResult.lastErrorObject);
-
- assert.eq(1, testDB.user.findOne({x: 30}).y);
-
- let changeDocToNotMigratedRetryResult =
- assert.commandWorked(testDB.runCommand(changeDocToChunkNotMigrated));
-
- assert.eq(changeDocToNotMigratedResult.ok, changeDocToNotMigratedRetryResult.ok);
- assert.eq(changeDocToNotMigratedResult.value, changeDocToNotMigratedRetryResult.value);
- assert.eq(changeDocToNotMigratedResult.lastErrorObject,
- changeDocToNotMigratedRetryResult.lastErrorObject);
-
- assert.eq(1, testDB.user.find({x: -120}).itcount());
-
- let changeDocToMigratedRetryResult =
- assert.commandWorked(testDB.runCommand(changeDocToChunkMigrated));
-
- assert.eq(changeDocToMigratedResult.ok, changeDocToMigratedRetryResult.ok);
- assert.eq(changeDocToMigratedResult.value, changeDocToMigratedRetryResult.value);
- assert.eq(changeDocToMigratedResult.lastErrorObject,
- changeDocToMigratedRetryResult.lastErrorObject);
-
- assert.eq(1, testDB.user.find({x: 120}).itcount());
+"use strict";
+
+load("jstests/libs/retryable_writes_util.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+var st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}});
+st.adminCommand({enableSharding: 'test'});
+st.ensurePrimaryShard('test', st.shard0.shardName);
+st.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+assert.commandWorked(st.s.adminCommand({split: 'test.user', middle: {x: 0}}));
+
+pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+var joinMoveChunk =
+ moveChunkParallel(staticMongod, st.s.host, {x: 0}, null, 'test.user', st.shard1.shardName);
+
+waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+
+const insertCmd = {
+ insert: 'user',
+ documents: [
+ // For findAndModify not touching chunk being migrated.
+ {x: -30},
+ // For changing doc to become owned by chunk being migrated.
+ {x: -20},
+ {x: -20},
+ // For basic insert.
+ {x: 10},
+ // For changing doc to become owned by another chunk not being migrated.
+ {x: 20},
+ {x: 20},
+ // For basic findAndModify.
+ {x: 30}
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(34),
+};
+
+var testDB = st.getDB('test');
+const insertResult = assert.commandWorked(testDB.runCommand(insertCmd));
+
+const findAndModCmd = {
+ findAndModify: 'user',
+ query: {x: 30},
+ update: {$inc: {y: 1}},
+ new: true,
+ upsert: true,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(37),
+};
+
+const findAndModifyResult = assert.commandWorked(testDB.runCommand(findAndModCmd));
+
+const changeDocToChunkNotMigrated = {
+ findAndModify: 'user',
+ query: {x: 20},
+ update: {$set: {x: -120}, $inc: {y: 1}},
+ new: false,
+ upsert: true,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(37),
+};
+
+const changeDocToNotMigratedResult =
+ assert.commandWorked(testDB.runCommand(changeDocToChunkNotMigrated));
+
+const changeDocToChunkMigrated = {
+ findAndModify: 'user',
+ query: {x: -20},
+ update: {$set: {x: 120}, $inc: {y: 1}},
+ new: false,
+ upsert: true,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(37),
+};
+
+const changeDocToMigratedResult = assert.commandWorked(testDB.runCommand(changeDocToChunkMigrated));
+
+const findAndModifyNotMigrated = {
+ findAndModify: 'user',
+ query: {x: -30},
+ update: {$inc: {y: 1}},
+ new: false,
+ upsert: true,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(37),
+};
+
+const findAndModifyNotMigratedResult =
+ assert.commandWorked(testDB.runCommand(findAndModifyNotMigrated));
+
+unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState);
+joinMoveChunk();
+
+///////////////////////////////////////////////////////////////////////////////////////////////
+// Retry phase
+
+var insertRetryResult = assert.commandWorked(testDB.runCommand(insertCmd));
+
+assert.eq(insertResult.ok, insertRetryResult.ok);
+assert.eq(insertResult.n, insertRetryResult.n);
+assert.eq(insertResult.writeErrors, insertRetryResult.writeErrors);
+assert.eq(insertResult.writeConcernErrors, insertRetryResult.writeConcernErrors);
+
+assert.eq(1, testDB.user.find({x: 10}).itcount());
+assert.eq(1, testDB.user.find({x: 30}).itcount());
+
+var findAndModifyRetryResult = assert.commandWorked(testDB.runCommand(findAndModCmd));
+
+assert.eq(findAndModifyResult.ok, findAndModifyRetryResult.ok);
+assert.eq(findAndModifyResult.value, findAndModifyRetryResult.value);
+assert.eq(findAndModifyResult.lastErrorObject, findAndModifyRetryResult.lastErrorObject);
+
+assert.eq(1, testDB.user.findOne({x: 30}).y);
+
+let changeDocToNotMigratedRetryResult =
+ assert.commandWorked(testDB.runCommand(changeDocToChunkNotMigrated));
+
+assert.eq(changeDocToNotMigratedResult.ok, changeDocToNotMigratedRetryResult.ok);
+assert.eq(changeDocToNotMigratedResult.value, changeDocToNotMigratedRetryResult.value);
+assert.eq(changeDocToNotMigratedResult.lastErrorObject,
+ changeDocToNotMigratedRetryResult.lastErrorObject);
+
+assert.eq(1, testDB.user.find({x: -120}).itcount());
+
+let changeDocToMigratedRetryResult =
+ assert.commandWorked(testDB.runCommand(changeDocToChunkMigrated));
+
+assert.eq(changeDocToMigratedResult.ok, changeDocToMigratedRetryResult.ok);
+assert.eq(changeDocToMigratedResult.value, changeDocToMigratedRetryResult.value);
+assert.eq(changeDocToMigratedResult.lastErrorObject,
+ changeDocToMigratedRetryResult.lastErrorObject);
+
+assert.eq(1, testDB.user.find({x: 120}).itcount());
- let findAndModifyNotMigratedRetryResult =
- assert.commandWorked(testDB.runCommand(findAndModifyNotMigrated));
+let findAndModifyNotMigratedRetryResult =
+ assert.commandWorked(testDB.runCommand(findAndModifyNotMigrated));
- assert.eq(findAndModifyNotMigratedResult.ok, findAndModifyNotMigratedRetryResult.ok);
- assert.eq(findAndModifyNotMigratedResult.value, findAndModifyNotMigratedRetryResult.value);
- assert.eq(findAndModifyNotMigratedResult.lastErrorObject,
- findAndModifyNotMigratedRetryResult.lastErrorObject);
+assert.eq(findAndModifyNotMigratedResult.ok, findAndModifyNotMigratedRetryResult.ok);
+assert.eq(findAndModifyNotMigratedResult.value, findAndModifyNotMigratedRetryResult.value);
+assert.eq(findAndModifyNotMigratedResult.lastErrorObject,
+ findAndModifyNotMigratedRetryResult.lastErrorObject);
- assert.eq(1, testDB.user.findOne({x: -30}).y);
+assert.eq(1, testDB.user.findOne({x: -30}).y);
- st.stop();
+st.stop();
- MongoRunner.stopMongod(staticMongod);
+MongoRunner.stopMongod(staticMongod);
})();
diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js
index baf96845f62..dcd3b93fb71 100644
--- a/jstests/sharding/zbigMapReduce.js
+++ b/jstests/sharding/zbigMapReduce.js
@@ -4,227 +4,225 @@
* @tags: [resource_intensive]
*/
(function() {
- 'use strict';
-
- let s = new ShardingTest({
- shards: 2,
- mongos: 1,
- other: {
- rs: true,
- numReplicas: 2,
- chunkSize: 1,
- rsOptions: {oplogSize: 50},
- enableAutoSplit: true,
- }
- });
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard0.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {"_id": 1}}));
-
- let testDb = s.getDB("test");
-
- jsTest.log("Inserting a lot of documents into test.foo");
+'use strict';
- // Make each document data to be 5K so that the total size is ~250MB
- const str = "#".repeat(5 * 1024);
-
- var idInc = 0;
- var valInc = 0;
-
- var bulk = testDb.foo.initializeUnorderedBulkOp();
- for (var j = 0; j < 100; j++) {
- for (var i = 0; i < 512; i++) {
- bulk.insert({i: idInc++, val: valInc++, y: str});
- }
+let s = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ other: {
+ rs: true,
+ numReplicas: 2,
+ chunkSize: 1,
+ rsOptions: {oplogSize: 50},
+ enableAutoSplit: true,
}
- assert.writeOK(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
+});
- jsTest.log("Documents inserted, doing double-checks of insert...");
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard0.shardName);
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {"_id": 1}}));
- // Collect some useful stats to figure out what happened
- if (testDb.foo.find().itcount() != 51200) {
- s.printShardingStatus(true);
+let testDb = s.getDB("test");
- print("Shard 0: " + s.shard0.getCollection(testDb.foo + "").find().itcount());
- print("Shard 1: " + s.shard1.getCollection(testDb.foo + "").find().itcount());
+jsTest.log("Inserting a lot of documents into test.foo");
- for (var i = 0; i < 51200; i++) {
- if (!testDb.foo.findOne({i: i}, {i: 1})) {
- print("Could not find: " + i);
- }
+// Make each document data to be 5K so that the total size is ~250MB
+const str = "#".repeat(5 * 1024);
- if (i % 100 == 0)
- print("Checked " + i);
- }
+var idInc = 0;
+var valInc = 0;
- assert(false, 'Incorect number of chunks found!');
+var bulk = testDb.foo.initializeUnorderedBulkOp();
+for (var j = 0; j < 100; j++) {
+ for (var i = 0; i < 512; i++) {
+ bulk.insert({i: idInc++, val: valInc++, y: str});
}
+}
+assert.writeOK(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
- s.printChunks(testDb.foo.getFullName());
- s.printChangeLog();
+jsTest.log("Documents inserted, doing double-checks of insert...");
- function map() {
- emit('count', 1);
- }
- function reduce(key, values) {
- return Array.sum(values);
- }
+// Collect some useful stats to figure out what happened
+if (testDb.foo.find().itcount() != 51200) {
+ s.printShardingStatus(true);
- // Let chunks move around while map reduce is running
- s.startBalancer();
+ print("Shard 0: " + s.shard0.getCollection(testDb.foo + "").find().itcount());
+ print("Shard 1: " + s.shard1.getCollection(testDb.foo + "").find().itcount());
- jsTest.log("Test basic mapreduce...");
+ for (var i = 0; i < 51200; i++) {
+ if (!testDb.foo.findOne({i: i}, {i: 1})) {
+ print("Could not find: " + i);
+ }
- // Test basic mapReduce
- for (var iter = 0; iter < 5; iter++) {
- print("Test #" + iter);
- testDb.foo.mapReduce(map, reduce, "big_out");
+ if (i % 100 == 0)
+ print("Checked " + i);
}
- print("Testing output to different db...");
+ assert(false, 'Incorect number of chunks found!');
+}
- // Test output to a different DB - do it multiple times so that the merging shard changes
- for (var iter = 0; iter < 5; iter++) {
- print("Test #" + iter);
+s.printChunks(testDb.foo.getFullName());
+s.printChangeLog();
- assert.eq(51200, testDb.foo.find().itcount(), "Not all data was found!");
+function map() {
+ emit('count', 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
- let outCollStr = "mr_replace_col_" + iter;
- let outDbStr = "mr_db_" + iter;
+// Let chunks move around while map reduce is running
+s.startBalancer();
- print("Testing mr replace into DB " + iter);
+jsTest.log("Test basic mapreduce...");
- var res = testDb.foo.mapReduce(map, reduce, {out: {replace: outCollStr, db: outDbStr}});
- printjson(res);
+// Test basic mapReduce
+for (var iter = 0; iter < 5; iter++) {
+ print("Test #" + iter);
+ testDb.foo.mapReduce(map, reduce, "big_out");
+}
- var outDb = s.getDB(outDbStr);
- var outColl = outDb[outCollStr];
+print("Testing output to different db...");
- var obj = outColl.convertToSingleObject("value");
- assert.eq(51200, obj.count, "Received wrong result " + obj.count);
+// Test output to a different DB - do it multiple times so that the merging shard changes
+for (var iter = 0; iter < 5; iter++) {
+ print("Test #" + iter);
- print("Checking result field");
- assert.eq(res.result.collection, outCollStr, "Wrong collection " + res.result.collection);
- assert.eq(res.result.db, outDbStr, "Wrong db " + res.result.db);
- }
+ assert.eq(51200, testDb.foo.find().itcount(), "Not all data was found!");
- jsTest.log("Verifying nonatomic M/R throws...");
+ let outCollStr = "mr_replace_col_" + iter;
+ let outDbStr = "mr_db_" + iter;
- // Check nonAtomic output
- assert.throws(function() {
- testDb.foo.mapReduce(map, reduce, {out: {replace: "big_out", nonAtomic: true}});
- });
+ print("Testing mr replace into DB " + iter);
- jsTest.log("Adding documents");
+ var res = testDb.foo.mapReduce(map, reduce, {out: {replace: outCollStr, db: outDbStr}});
+ printjson(res);
- // Add docs with dup "i"
- valInc = 0;
- for (var j = 0; j < 100; j++) {
- print("Inserted document: " + (j * 100));
- var bulk = testDb.foo.initializeUnorderedBulkOp();
- for (i = 0; i < 512; i++) {
- bulk.insert({i: idInc++, val: valInc++, y: str});
- }
- assert.writeOK(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
- }
+ var outDb = s.getDB(outDbStr);
+ var outColl = outDb[outCollStr];
- jsTest.log("No errors...");
+ var obj = outColl.convertToSingleObject("value");
+ assert.eq(51200, obj.count, "Received wrong result " + obj.count);
- function map2() {
- emit(this.val, 1);
- }
- function reduce2(key, values) {
- return Array.sum(values);
- }
-
- // Test merge
- let outColMerge = 'big_out_merge';
-
- // M/R quarter of the docs
- {
- jsTestLog("Test A");
- var out = testDb.foo.mapReduce(
- map2, reduce2, {query: {i: {$lt: 25600}}, out: {merge: outColMerge}});
- printjson(out);
- assert.eq(25600, out.counts.emit, "Received wrong result");
- assert.eq(25600, out.counts.output, "Received wrong result");
- }
+ print("Checking result field");
+ assert.eq(res.result.collection, outCollStr, "Wrong collection " + res.result.collection);
+ assert.eq(res.result.db, outDbStr, "Wrong db " + res.result.db);
+}
- // M/R further docs
- {
- jsTestLog("Test B");
- var out = testDb.foo.mapReduce(
- map2, reduce2, {query: {i: {$gte: 25600, $lt: 51200}}, out: {merge: outColMerge}});
- printjson(out);
- assert.eq(25600, out.counts.emit, "Received wrong result");
- assert.eq(51200, out.counts.output, "Received wrong result");
- }
+jsTest.log("Verifying nonatomic M/R throws...");
- // M/R do 2nd half of docs
- {
- jsTestLog("Test C");
- var out = testDb.foo.mapReduce(
- map2, reduce2, {query: {i: {$gte: 51200}}, out: {merge: outColMerge, nonAtomic: true}});
- printjson(out);
- assert.eq(51200, out.counts.emit, "Received wrong result");
- assert.eq(51200, out.counts.output, "Received wrong result");
- assert.eq(1, testDb[outColMerge].findOne().value, "Received wrong result");
- }
+// Check nonAtomic output
+assert.throws(function() {
+ testDb.foo.mapReduce(map, reduce, {out: {replace: "big_out", nonAtomic: true}});
+});
- // Test reduce
- let outColReduce = "big_out_reduce";
-
- // M/R quarter of the docs
- {
- jsTestLog("Test D");
- var out = testDb.foo.mapReduce(
- map2, reduce2, {query: {i: {$lt: 25600}}, out: {reduce: outColReduce}});
- printjson(out);
- assert.eq(25600, out.counts.emit, "Received wrong result");
- assert.eq(25600, out.counts.output, "Received wrong result");
- }
+jsTest.log("Adding documents");
- // M/R further docs
- {
- jsTestLog("Test E");
- var out = testDb.foo.mapReduce(
- map2, reduce2, {query: {i: {$gte: 25600, $lt: 51200}}, out: {reduce: outColReduce}});
- printjson(out);
- assert.eq(25600, out.counts.emit, "Received wrong result");
- assert.eq(51200, out.counts.output, "Received wrong result");
- }
-
- // M/R do 2nd half of docs
- {
- jsTestLog("Test F");
- var out = testDb.foo.mapReduce(
- map2,
- reduce2,
- {query: {i: {$gte: 51200}}, out: {reduce: outColReduce, nonAtomic: true}});
- printjson(out);
- assert.eq(51200, out.counts.emit, "Received wrong result");
- assert.eq(51200, out.counts.output, "Received wrong result");
- assert.eq(2, testDb[outColReduce].findOne().value, "Received wrong result");
- }
-
- // Verify that data is also on secondary
- {
- jsTestLog("Test G");
- var primary = s.rs0._master;
- var secondaries = s.rs0._slaves;
-
- // Stop the balancer to prevent new writes from happening and make sure that replication can
- // keep up even on slow machines
- s.stopBalancer();
- s.rs0.awaitReplication();
- assert.eq(51200, primary.getDB("test")[outColReduce].find().itcount(), "Wrong count");
-
- for (var i = 0; i < secondaries.length; ++i) {
- assert.eq(
- 51200, secondaries[i].getDB("test")[outColReduce].find().itcount(), "Wrong count");
- }
+// Add docs with dup "i"
+valInc = 0;
+for (var j = 0; j < 100; j++) {
+ print("Inserted document: " + (j * 100));
+ var bulk = testDb.foo.initializeUnorderedBulkOp();
+ for (i = 0; i < 512; i++) {
+ bulk.insert({i: idInc++, val: valInc++, y: str});
}
-
- s.stop();
+ assert.writeOK(bulk.execute({w: 2, wtimeout: 10 * 60 * 1000}));
+}
+
+jsTest.log("No errors...");
+
+function map2() {
+ emit(this.val, 1);
+}
+function reduce2(key, values) {
+ return Array.sum(values);
+}
+
+// Test merge
+let outColMerge = 'big_out_merge';
+
+// M/R quarter of the docs
+{
+ jsTestLog("Test A");
+ var out =
+ testDb.foo.mapReduce(map2, reduce2, {query: {i: {$lt: 25600}}, out: {merge: outColMerge}});
+ printjson(out);
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(25600, out.counts.output, "Received wrong result");
+}
+
+// M/R further docs
+{
+ jsTestLog("Test B");
+ var out = testDb.foo.mapReduce(
+ map2, reduce2, {query: {i: {$gte: 25600, $lt: 51200}}, out: {merge: outColMerge}});
+ printjson(out);
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
+}
+
+// M/R do 2nd half of docs
+{
+ jsTestLog("Test C");
+ var out = testDb.foo.mapReduce(
+ map2, reduce2, {query: {i: {$gte: 51200}}, out: {merge: outColMerge, nonAtomic: true}});
+ printjson(out);
+ assert.eq(51200, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
+ assert.eq(1, testDb[outColMerge].findOne().value, "Received wrong result");
+}
+
+// Test reduce
+let outColReduce = "big_out_reduce";
+
+// M/R quarter of the docs
+{
+ jsTestLog("Test D");
+ var out = testDb.foo.mapReduce(
+ map2, reduce2, {query: {i: {$lt: 25600}}, out: {reduce: outColReduce}});
+ printjson(out);
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(25600, out.counts.output, "Received wrong result");
+}
+
+// M/R further docs
+{
+ jsTestLog("Test E");
+ var out = testDb.foo.mapReduce(
+ map2, reduce2, {query: {i: {$gte: 25600, $lt: 51200}}, out: {reduce: outColReduce}});
+ printjson(out);
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
+}
+
+// M/R do 2nd half of docs
+{
+ jsTestLog("Test F");
+ var out = testDb.foo.mapReduce(
+ map2, reduce2, {query: {i: {$gte: 51200}}, out: {reduce: outColReduce, nonAtomic: true}});
+ printjson(out);
+ assert.eq(51200, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
+ assert.eq(2, testDb[outColReduce].findOne().value, "Received wrong result");
+}
+
+// Verify that data is also on secondary
+{
+ jsTestLog("Test G");
+ var primary = s.rs0._master;
+ var secondaries = s.rs0._slaves;
+
+ // Stop the balancer to prevent new writes from happening and make sure that replication can
+ // keep up even on slow machines
+ s.stopBalancer();
+ s.rs0.awaitReplication();
+ assert.eq(51200, primary.getDB("test")[outColReduce].find().itcount(), "Wrong count");
+
+ for (var i = 0; i < secondaries.length; ++i) {
+ assert.eq(
+ 51200, secondaries[i].getDB("test")[outColReduce].find().itcount(), "Wrong count");
+ }
+}
+
+s.stop();
})();
diff --git a/jstests/sharding/zero_shard_version.js b/jstests/sharding/zero_shard_version.js
index 1b29f50c459..7d08bf34d36 100644
--- a/jstests/sharding/zero_shard_version.js
+++ b/jstests/sharding/zero_shard_version.js
@@ -3,181 +3,180 @@
* against a major version of zero or incompatible epochs.
*/
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 2, mongos: 4});
+var st = new ShardingTest({shards: 2, mongos: 4});
- var testDB_s0 = st.s.getDB('test');
- assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
- assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+var testDB_s0 = st.s.getDB('test');
+assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'}));
+st.ensurePrimaryShard('test', st.shard1.shardName);
+assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- var checkShardMajorVersion = function(conn, expectedVersion) {
- var shardVersionInfo = conn.adminCommand({getShardVersion: 'test.user'});
- assert.eq(expectedVersion, shardVersionInfo.global.getTime());
- };
+var checkShardMajorVersion = function(conn, expectedVersion) {
+ var shardVersionInfo = conn.adminCommand({getShardVersion: 'test.user'});
+ assert.eq(expectedVersion, shardVersionInfo.global.getTime());
+};
- ///////////////////////////////////////////////////////
- // Test shard with empty chunk
+///////////////////////////////////////////////////////
+// Test shard with empty chunk
- // shard0: 0|0|a
- // shard1: 1|0|a, [-inf, inf)
- // mongos0: 1|0|a
+// shard0: 0|0|a
+// shard1: 1|0|a, [-inf, inf)
+// mongos0: 1|0|a
- var testDB_s1 = st.s1.getDB('test');
- assert.writeOK(testDB_s1.user.insert({x: 1}));
- assert.commandWorked(
- testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
+var testDB_s1 = st.s1.getDB('test');
+assert.writeOK(testDB_s1.user.insert({x: 1}));
+assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
- st.configRS.awaitLastOpCommitted();
-
- // Official config:
- // shard0: 2|0|a, [-inf, inf)
- // shard1: 0|0|a
- //
- // Shard metadata:
- // shard0: 0|0|a
- // shard1: 0|0|a
- // mongos0: 1|0|a
+st.configRS.awaitLastOpCommitted();
+
+// Official config:
+// shard0: 2|0|a, [-inf, inf)
+// shard1: 0|0|a
+//
+// Shard metadata:
+// shard0: 0|0|a
+// shard1: 0|0|a
+// mongos0: 1|0|a
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
+checkShardMajorVersion(st.rs0.getPrimary(), 0);
+checkShardMajorVersion(st.rs1.getPrimary(), 0);
- // mongos0 still thinks that { x: 1 } belong to st.shard1.shardName, but should be able to
- // refresh it's metadata correctly.
- assert.neq(null, testDB_s0.user.findOne({x: 1}));
+// mongos0 still thinks that { x: 1 } belong to st.shard1.shardName, but should be able to
+// refresh it's metadata correctly.
+assert.neq(null, testDB_s0.user.findOne({x: 1}));
- checkShardMajorVersion(st.rs0.getPrimary(), 2);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
+checkShardMajorVersion(st.rs0.getPrimary(), 2);
+checkShardMajorVersion(st.rs1.getPrimary(), 0);
- // Set mongos2 & mongos3 to version 2|0|a
- var testDB_s2 = st.s2.getDB('test');
- assert.neq(null, testDB_s2.user.findOne({x: 1}));
+// Set mongos2 & mongos3 to version 2|0|a
+var testDB_s2 = st.s2.getDB('test');
+assert.neq(null, testDB_s2.user.findOne({x: 1}));
- var testDB_s3 = st.s3.getDB('test');
- assert.neq(null, testDB_s3.user.findOne({x: 1}));
+var testDB_s3 = st.s3.getDB('test');
+assert.neq(null, testDB_s3.user.findOne({x: 1}));
- ///////////////////////////////////////////////////////
- // Test unsharded collection
- // mongos versions: s0, s2, s3: 2|0|a
+///////////////////////////////////////////////////////
+// Test unsharded collection
+// mongos versions: s0, s2, s3: 2|0|a
- testDB_s1.user.drop();
- assert.writeOK(testDB_s1.user.insert({x: 10}));
+testDB_s1.user.drop();
+assert.writeOK(testDB_s1.user.insert({x: 10}));
- // shard0: 0|0|0
- // shard1: 0|0|0
- // mongos0: 2|0|a
-
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
-
- // mongos0 still thinks { x: 10 } belong to st.shard0.shardName, but since coll is dropped,
- // query should be routed to primary shard.
- assert.neq(null, testDB_s0.user.findOne({x: 10}));
+// shard0: 0|0|0
+// shard1: 0|0|0
+// mongos0: 2|0|a
+
+checkShardMajorVersion(st.rs0.getPrimary(), 0);
+checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+// mongos0 still thinks { x: 10 } belong to st.shard0.shardName, but since coll is dropped,
+// query should be routed to primary shard.
+assert.neq(null, testDB_s0.user.findOne({x: 10}));
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
+checkShardMajorVersion(st.rs0.getPrimary(), 0);
+checkShardMajorVersion(st.rs1.getPrimary(), 0);
- ///////////////////////////////////////////////////////
- // Test 2 shards with 1 chunk
- // mongos versions: s0: 0|0|0, s2, s3: 2|0|a
+///////////////////////////////////////////////////////
+// Test 2 shards with 1 chunk
+// mongos versions: s0: 0|0|0, s2, s3: 2|0|a
- testDB_s1.user.drop();
- testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}});
- testDB_s1.adminCommand({split: 'test.user', middle: {x: 0}});
+testDB_s1.user.drop();
+testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+testDB_s1.adminCommand({split: 'test.user', middle: {x: 0}});
- // shard0: 0|0|b,
- // shard1: 1|1|b, [-inf, 0), [0, inf)
+// shard0: 0|0|b,
+// shard1: 1|1|b, [-inf, 0), [0, inf)
- testDB_s1.user.insert({x: 1});
- testDB_s1.user.insert({x: -11});
- assert.commandWorked(
- testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: -1}, to: st.shard0.shardName}));
+testDB_s1.user.insert({x: 1});
+testDB_s1.user.insert({x: -11});
+assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: -1}, to: st.shard0.shardName}));
- st.configRS.awaitLastOpCommitted();
+st.configRS.awaitLastOpCommitted();
- // Official config:
- // shard0: 2|0|b, [-inf, 0)
- // shard1: 2|1|b, [0, inf)
- //
- // Shard metadata:
- // shard0: 0|0|b
- // shard1: 2|1|b
- //
- // mongos2: 2|0|a
+// Official config:
+// shard0: 2|0|b, [-inf, 0)
+// shard1: 2|1|b, [0, inf)
+//
+// Shard metadata:
+// shard0: 0|0|b
+// shard1: 2|1|b
+//
+// mongos2: 2|0|a
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 2);
+checkShardMajorVersion(st.rs0.getPrimary(), 0);
+checkShardMajorVersion(st.rs1.getPrimary(), 2);
- // mongos2 still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
- // refresh it's metadata correctly.
- assert.neq(null, testDB_s2.user.findOne({x: 1}));
+// mongos2 still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
+// refresh it's metadata correctly.
+assert.neq(null, testDB_s2.user.findOne({x: 1}));
- checkShardMajorVersion(st.rs0.getPrimary(), 2);
- checkShardMajorVersion(st.rs1.getPrimary(), 2);
+checkShardMajorVersion(st.rs0.getPrimary(), 2);
+checkShardMajorVersion(st.rs1.getPrimary(), 2);
- // Set shard metadata to 2|0|b
- assert.neq(null, testDB_s2.user.findOne({x: -11}));
+// Set shard metadata to 2|0|b
+assert.neq(null, testDB_s2.user.findOne({x: -11}));
- checkShardMajorVersion(st.rs0.getPrimary(), 2);
- checkShardMajorVersion(st.rs1.getPrimary(), 2);
+checkShardMajorVersion(st.rs0.getPrimary(), 2);
+checkShardMajorVersion(st.rs1.getPrimary(), 2);
- // Official config:
- // shard0: 2|0|b, [-inf, 0)
- // shard1: 2|1|b, [0, inf)
- //
- // Shard metadata:
- // shard0: 2|0|b
- // shard1: 2|1|b
- //
- // mongos3: 2|0|a
+// Official config:
+// shard0: 2|0|b, [-inf, 0)
+// shard1: 2|1|b, [0, inf)
+//
+// Shard metadata:
+// shard0: 2|0|b
+// shard1: 2|1|b
+//
+// mongos3: 2|0|a
+
+// 4th mongos still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
+// refresh it's metadata correctly.
+assert.neq(null, testDB_s3.user.findOne({x: 1}));
- // 4th mongos still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
- // refresh it's metadata correctly.
- assert.neq(null, testDB_s3.user.findOne({x: 1}));
+///////////////////////////////////////////////////////
+// Test mongos thinks unsharded when it's actually sharded
+// mongos current versions: s0: 0|0|0, s2, s3: 2|0|b
- ///////////////////////////////////////////////////////
- // Test mongos thinks unsharded when it's actually sharded
- // mongos current versions: s0: 0|0|0, s2, s3: 2|0|b
+// Set mongos0 to version 0|0|0
+testDB_s0.user.drop();
- // Set mongos0 to version 0|0|0
- testDB_s0.user.drop();
+checkShardMajorVersion(st.rs0.getPrimary(), 0);
+checkShardMajorVersion(st.rs1.getPrimary(), 0);
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
+assert.eq(null, testDB_s0.user.findOne({x: 1}));
- assert.eq(null, testDB_s0.user.findOne({x: 1}));
+// Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is
+// already sharded.
+assert.eq(null, testDB_s1.user.findOne({x: 1}));
+assert.commandWorked(testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+testDB_s1.user.insert({x: 1});
- // Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is
- // already sharded.
- assert.eq(null, testDB_s1.user.findOne({x: 1}));
- assert.commandWorked(testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- testDB_s1.user.insert({x: 1});
+assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
- assert.commandWorked(
- testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
+st.configRS.awaitLastOpCommitted();
- st.configRS.awaitLastOpCommitted();
+// Official config:
+// shard0: 2|0|c, [-inf, inf)
+// shard1: 0|0|c
+//
+// Shard metadata:
+// shard0: 0|0|c
+// shard1: 0|0|c
+//
+// mongos0: 0|0|0
- // Official config:
- // shard0: 2|0|c, [-inf, inf)
- // shard1: 0|0|c
- //
- // Shard metadata:
- // shard0: 0|0|c
- // shard1: 0|0|c
- //
- // mongos0: 0|0|0
+checkShardMajorVersion(st.rs0.getPrimary(), 0);
+checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+// 1st mongos thinks that collection is unshareded and will attempt to query primary shard.
+assert.neq(null, testDB_s0.user.findOne({x: 1}));
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
-
- // 1st mongos thinks that collection is unshareded and will attempt to query primary shard.
- assert.neq(null, testDB_s0.user.findOne({x: 1}));
-
- checkShardMajorVersion(st.rs0.getPrimary(), 2);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
-
- st.stop();
+checkShardMajorVersion(st.rs0.getPrimary(), 2);
+checkShardMajorVersion(st.rs1.getPrimary(), 0);
+st.stop();
})();
diff --git a/jstests/slow1/conc_update.js b/jstests/slow1/conc_update.js
index 57e4ba134cb..083b333b281 100644
--- a/jstests/slow1/conc_update.js
+++ b/jstests/slow1/conc_update.js
@@ -1,61 +1,61 @@
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod({nojournal: ""});
- assert.neq(null, conn, "mongod was unable to start up");
- db = conn.getDB("concurrency");
- db.dropDatabase();
+const conn = MongoRunner.runMongod({nojournal: ""});
+assert.neq(null, conn, "mongod was unable to start up");
+db = conn.getDB("concurrency");
+db.dropDatabase();
- const NRECORDS = 3 * 1024 * 1024;
+const NRECORDS = 3 * 1024 * 1024;
- print("loading " + NRECORDS + " documents (progress msg every 1024*1024 documents)");
- var bulk = db.conc.initializeUnorderedBulkOp();
- for (var i = 0; i < NRECORDS; i++) {
- bulk.insert({x: i});
- }
- assert.writeOK(bulk.execute());
+print("loading " + NRECORDS + " documents (progress msg every 1024*1024 documents)");
+var bulk = db.conc.initializeUnorderedBulkOp();
+for (var i = 0; i < NRECORDS; i++) {
+ bulk.insert({x: i});
+}
+assert.writeOK(bulk.execute());
- print("making an index (this will take a while)");
- db.conc.ensureIndex({x: 1});
+print("making an index (this will take a while)");
+db.conc.ensureIndex({x: 1});
- var c1 = db.conc.count({x: {$lt: NRECORDS}});
+var c1 = db.conc.count({x: {$lt: NRECORDS}});
- const updater = startParallelShell(
- "db = db.getSisterDB('concurrency');\
+const updater = startParallelShell(
+ "db = db.getSisterDB('concurrency');\
db.concflag.insert({ inprog: true });\
sleep(20);\
assert.writeOK(db.conc.update({}, \
{ $inc: { x: " +
- NRECORDS +
- "}}, false, true)); \
+ NRECORDS +
+ "}}, false, true)); \
assert.writeOK(db.concflag.update({}, { inprog: false }));");
- assert.soon(function() {
- var x = db.concflag.findOne();
- return x && x.inprog;
- }, "wait for fork", 30000, 1);
+assert.soon(function() {
+ var x = db.concflag.findOne();
+ return x && x.inprog;
+}, "wait for fork", 30000, 1);
- let querycount = 0;
- let decrements = 0;
- let misses = 0;
+let querycount = 0;
+let decrements = 0;
+let misses = 0;
- assert.soon(function() {
- const c2 = db.conc.count({x: {$lt: NRECORDS}});
- print(c2);
- querycount++;
- if (c2 < c1)
- decrements++;
- else
- misses++;
- c1 = c2;
- return !db.concflag.findOne().inprog;
- }, "update never finished", 2 * 60 * 60 * 1000, 10);
+assert.soon(function() {
+ const c2 = db.conc.count({x: {$lt: NRECORDS}});
+ print(c2);
+ querycount++;
+ if (c2 < c1)
+ decrements++;
+ else
+ misses++;
+ c1 = c2;
+ return !db.concflag.findOne().inprog;
+}, "update never finished", 2 * 60 * 60 * 1000, 10);
- print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
+print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
- assert.eq(NRECORDS, db.conc.count(), "AT END 1");
+assert.eq(NRECORDS, db.conc.count(), "AT END 1");
- updater(); // wait()
+updater(); // wait()
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/slow1/initial_sync_many_dbs.js b/jstests/slow1/initial_sync_many_dbs.js
index 0d9d1273679..03eea70525e 100644
--- a/jstests/slow1/initial_sync_many_dbs.js
+++ b/jstests/slow1/initial_sync_many_dbs.js
@@ -3,58 +3,58 @@
*/
(function() {
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
- var name = 'initial_sync_many_dbs';
- var num_dbs = 32;
- var max_colls = 32;
- var num_docs = 2;
- var replSet = new ReplSetTest({
- name: name,
- nodes: 1,
- });
- replSet.startSet();
- replSet.initiate();
+var name = 'initial_sync_many_dbs';
+var num_dbs = 32;
+var max_colls = 32;
+var num_docs = 2;
+var replSet = new ReplSetTest({
+ name: name,
+ nodes: 1,
+});
+replSet.startSet();
+replSet.initiate();
- var primary = replSet.getPrimary();
- jsTestLog('Seeding primary with ' + num_dbs + ' databases with up to ' + max_colls +
- ' collections each. Each collection will contain ' + num_docs + ' documents');
- for (var i = 0; i < num_dbs; i++) {
- var dbname = name + '_db' + i;
- for (var j = 0; j < (i % max_colls + 1); j++) {
- var collname = name + '_coll' + j;
- var coll = primary.getDB(dbname)[collname];
- for (var k = 0; k < num_docs; k++) {
- assert.writeOK(coll.insert({_id: k}));
- }
+var primary = replSet.getPrimary();
+jsTestLog('Seeding primary with ' + num_dbs + ' databases with up to ' + max_colls +
+ ' collections each. Each collection will contain ' + num_docs + ' documents');
+for (var i = 0; i < num_dbs; i++) {
+ var dbname = name + '_db' + i;
+ for (var j = 0; j < (i % max_colls + 1); j++) {
+ var collname = name + '_coll' + j;
+ var coll = primary.getDB(dbname)[collname];
+ for (var k = 0; k < num_docs; k++) {
+ assert.writeOK(coll.insert({_id: k}));
}
}
+}
- // Add a secondary that will initial sync from the primary.
- jsTestLog('Adding node to replica set to trigger initial sync process');
- replSet.add();
- replSet.reInitiate();
+// Add a secondary that will initial sync from the primary.
+jsTestLog('Adding node to replica set to trigger initial sync process');
+replSet.add();
+replSet.reInitiate();
- replSet.awaitSecondaryNodes(30 * 60 * 1000);
- var secondary = replSet.getSecondary();
- jsTestLog('New node has transitioned to secondary. Checking collection sizes');
- for (var i = 0; i < num_dbs; i++) {
- var dbname = name + '_db' + i;
- for (var j = 0; j < (i % max_colls + 1); j++) {
- var collname = name + '_coll' + j;
- var coll = secondary.getDB(dbname)[collname];
- assert.eq(num_docs,
- coll.find().itcount(),
- 'collection size inconsistent with primary after initial sync: ' +
- coll.getFullName());
- }
+replSet.awaitSecondaryNodes(30 * 60 * 1000);
+var secondary = replSet.getSecondary();
+jsTestLog('New node has transitioned to secondary. Checking collection sizes');
+for (var i = 0; i < num_dbs; i++) {
+ var dbname = name + '_db' + i;
+ for (var j = 0; j < (i % max_colls + 1); j++) {
+ var collname = name + '_coll' + j;
+ var coll = secondary.getDB(dbname)[collname];
+ assert.eq(
+ num_docs,
+ coll.find().itcount(),
+ 'collection size inconsistent with primary after initial sync: ' + coll.getFullName());
}
+}
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/slow1/mr_during_migrate.js b/jstests/slow1/mr_during_migrate.js
index c5d7b772efb..06d79c46e92 100644
--- a/jstests/slow1/mr_during_migrate.js
+++ b/jstests/slow1/mr_during_migrate.js
@@ -2,111 +2,111 @@
// @tags: [requires_sharding]
(function() {
- 'use strict';
+'use strict';
- var st = new ShardingTest({shards: 10, mongos: 2, verbose: 2});
+var st = new ShardingTest({shards: 10, mongos: 2, verbose: 2});
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var coll = st.s.getCollection(jsTest.name() + ".coll");
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var coll = st.s.getCollection(jsTest.name() + ".coll");
- var numDocs = 1024 * 1024;
- var dataSize = 1024; // bytes, must be power of 2
+var numDocs = 1024 * 1024;
+var dataSize = 1024; // bytes, must be power of 2
- var data = "x";
- while (data.length < dataSize)
- data += data;
+var data = "x";
+while (data.length < dataSize)
+ data += data;
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, data: data});
- }
- assert.writeOK(bulk.execute());
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, data: data});
+}
+assert.writeOK(bulk.execute());
- // Make sure everything got inserted
- assert.eq(numDocs, coll.find().itcount());
+// Make sure everything got inserted
+assert.eq(numDocs, coll.find().itcount());
- jsTest.log("Inserted " + sh._dataFormat(dataSize * numDocs) + " of data.");
+jsTest.log("Inserted " + sh._dataFormat(dataSize * numDocs) + " of data.");
- // Shard collection
- st.shardColl(coll, {_id: 1}, false);
+// Shard collection
+st.shardColl(coll, {_id: 1}, false);
- st.printShardingStatus();
+st.printShardingStatus();
- jsTest.log("Sharded collection now initialized, starting migrations...");
+jsTest.log("Sharded collection now initialized, starting migrations...");
- var checkMigrate = function() {
- print("Result of migrate : ");
- printjson(this);
- };
+var checkMigrate = function() {
+ print("Result of migrate : ");
+ printjson(this);
+};
- // Creates a number of migrations of random chunks to diff shard servers
- var ops = [];
- for (var i = 0; i < st._connections.length; i++) {
- ops.push({
- op: "command",
- ns: "admin",
- command: {
- moveChunk: "" + coll,
- find: {_id: {"#RAND_INT": [0, numDocs]}},
- to: st._connections[i].shardName,
- _waitForDelete: true
- },
- showResult: true
- });
- }
+// Creates a number of migrations of random chunks to diff shard servers
+var ops = [];
+for (var i = 0; i < st._connections.length; i++) {
+ ops.push({
+ op: "command",
+ ns: "admin",
+ command: {
+ moveChunk: "" + coll,
+ find: {_id: {"#RAND_INT": [0, numDocs]}},
+ to: st._connections[i].shardName,
+ _waitForDelete: true
+ },
+ showResult: true
+ });
+}
- // TODO: Also migrate output collection
+// TODO: Also migrate output collection
- jsTest.log("Starting migrations now...");
+jsTest.log("Starting migrations now...");
- var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false});
+var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false});
- //#######################
- // Tests during migration
+//#######################
+// Tests during migration
- var numTests = 5;
+var numTests = 5;
- for (var t = 0; t < numTests; t++) {
- jsTest.log("Test #" + t);
+for (var t = 0; t < numTests; t++) {
+ jsTest.log("Test #" + t);
- var mongos = st.s1; // use other mongos so we get stale shard versions
- var coll = mongos.getCollection(coll + "");
- var outputColl = mongos.getCollection(coll + "_output");
+ var mongos = st.s1; // use other mongos so we get stale shard versions
+ var coll = mongos.getCollection(coll + "");
+ var outputColl = mongos.getCollection(coll + "_output");
- var numTypes = 32;
- var map = function() {
- emit(this._id % 32 /* must be hardcoded */, {c: 1});
- };
+ var numTypes = 32;
+ var map = function() {
+ emit(this._id % 32 /* must be hardcoded */, {c: 1});
+ };
- var reduce = function(k, vals) {
- var total = 0;
- for (var i = 0; i < vals.length; i++)
- total += vals[i].c;
- return {c: total};
- };
+ var reduce = function(k, vals) {
+ var total = 0;
+ for (var i = 0; i < vals.length; i++)
+ total += vals[i].c;
+ return {c: total};
+ };
- printjson(coll.find({_id: 0}).itcount());
+ printjson(coll.find({_id: 0}).itcount());
- jsTest.log("Starting new mapReduce run #" + t);
+ jsTest.log("Starting new mapReduce run #" + t);
- // assert.eq( coll.find().itcount(), numDocs )
+ // assert.eq( coll.find().itcount(), numDocs )
- coll.getMongo().getDB("admin").runCommand({setParameter: 1, traceExceptions: true});
+ coll.getMongo().getDB("admin").runCommand({setParameter: 1, traceExceptions: true});
- printjson(coll.mapReduce(
- map, reduce, {out: {replace: outputColl.getName(), db: outputColl.getDB() + ""}}));
+ printjson(coll.mapReduce(
+ map, reduce, {out: {replace: outputColl.getName(), db: outputColl.getDB() + ""}}));
- jsTest.log("MapReduce run #" + t + " finished.");
+ jsTest.log("MapReduce run #" + t + " finished.");
- assert.eq(outputColl.find().itcount(), numTypes);
+ assert.eq(outputColl.find().itcount(), numTypes);
- outputColl.find().forEach(function(x) {
- assert.eq(x.value.c, numDocs / numTypes);
- });
- }
+ outputColl.find().forEach(function(x) {
+ assert.eq(x.value.c, numDocs / numTypes);
+ });
+}
- printjson(benchFinish(bid));
+printjson(benchFinish(bid));
- st.stop();
+st.stop();
})();
diff --git a/jstests/slow1/replsets_priority1.js b/jstests/slow1/replsets_priority1.js
index f312b4830d6..fb1a46bcddb 100644
--- a/jstests/slow1/replsets_priority1.js
+++ b/jstests/slow1/replsets_priority1.js
@@ -3,215 +3,214 @@
(function() {
- "use strict";
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+"use strict";
- load("jstests/replsets/rslib.js");
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
- var rs = new ReplSetTest({name: 'testSet', nodes: 3, nodeOptions: {verbose: 2}});
- var nodes = rs.startSet();
- rs.initiate();
+load("jstests/replsets/rslib.js");
- var master = rs.getPrimary();
+var rs = new ReplSetTest({name: 'testSet', nodes: 3, nodeOptions: {verbose: 2}});
+var nodes = rs.startSet();
+rs.initiate();
- var everyoneOkSoon = function() {
- var status;
- assert.soon(function() {
- var ok = true;
- status = master.adminCommand({replSetGetStatus: 1});
+var master = rs.getPrimary();
- if (!status.members) {
- return false;
- }
+var everyoneOkSoon = function() {
+ var status;
+ assert.soon(function() {
+ var ok = true;
+ status = master.adminCommand({replSetGetStatus: 1});
- for (var i in status.members) {
- if (status.members[i].health == 0) {
- continue;
- }
- ok &= status.members[i].state == 1 || status.members[i].state == 2;
+ if (!status.members) {
+ return false;
+ }
+
+ for (var i in status.members) {
+ if (status.members[i].health == 0) {
+ continue;
}
- return ok;
- }, tojson(status));
- };
+ ok &= status.members[i].state == 1 || status.members[i].state == 2;
+ }
+ return ok;
+ }, tojson(status));
+};
- var checkPrimaryIs = function(node) {
+var checkPrimaryIs = function(node) {
+ print("nreplsets_priority1.js checkPrimaryIs(" + node.host + ")");
- print("nreplsets_priority1.js checkPrimaryIs(" + node.host + ")");
+ var status;
- var status;
+ assert.soon(function() {
+ var ok = true;
- assert.soon(function() {
- var ok = true;
+ try {
+ status = master.adminCommand({replSetGetStatus: 1});
+ } catch (e) {
+ print(e);
+ print("nreplsets_priority1.js checkPrimaryIs reconnecting");
+ reconnect(master);
+ status = master.adminCommand({replSetGetStatus: 1});
+ }
- try {
- status = master.adminCommand({replSetGetStatus: 1});
- } catch (e) {
- print(e);
- print("nreplsets_priority1.js checkPrimaryIs reconnecting");
- reconnect(master);
- status = master.adminCommand({replSetGetStatus: 1});
- }
+ var str = "goal: " + node.host + "==1 states: ";
+ if (!status || !status.members) {
+ return false;
+ }
+ status.members.forEach(function(m) {
+ str += m.name + ": " + m.state + " ";
- var str = "goal: " + node.host + "==1 states: ";
- if (!status || !status.members) {
- return false;
+ if (m.name == node.host) {
+ ok &= m.state == 1;
+ } else {
+ ok &= m.state != 1 || (m.state == 1 && m.health == 0);
}
- status.members.forEach(function(m) {
- str += m.name + ": " + m.state + " ";
-
- if (m.name == node.host) {
- ok &= m.state == 1;
- } else {
- ok &= m.state != 1 || (m.state == 1 && m.health == 0);
- }
- });
- print();
- print(str);
- print();
-
- occasionally(function() {
- print("\nstatus:");
- printjson(status);
- print();
- }, 15);
+ });
+ print();
+ print(str);
+ print();
- return ok;
- }, node.host + '==1', 240000, 1000);
+ occasionally(function() {
+ print("\nstatus:");
+ printjson(status);
+ print();
+ }, 15);
- everyoneOkSoon();
- };
+ return ok;
+ }, node.host + '==1', 240000, 1000);
everyoneOkSoon();
+};
- print("\n\nreplsets_priority1.js initial sync");
+everyoneOkSoon();
- // intial sync
- master.getDB("foo").bar.insert({x: 1});
- rs.awaitReplication();
+print("\n\nreplsets_priority1.js initial sync");
- print("\n\nreplsets_priority1.js starting loop");
+// intial sync
+master.getDB("foo").bar.insert({x: 1});
+rs.awaitReplication();
- var n = 5;
- for (var i = 0; i < n; i++) {
- print("Round " + i + ": FIGHT!");
+print("\n\nreplsets_priority1.js starting loop");
- var max = null;
- var second = null;
- reconnect(master);
- var config = master.getDB("local").system.replset.findOne();
+var n = 5;
+for (var i = 0; i < n; i++) {
+ print("Round " + i + ": FIGHT!");
- var version = config.version;
- config.version++;
+ var max = null;
+ var second = null;
+ reconnect(master);
+ var config = master.getDB("local").system.replset.findOne();
- for (var j = 0; j < config.members.length; j++) {
- var priority = Math.random() * 100;
- print("random priority : " + priority);
- config.members[j].priority = priority;
+ var version = config.version;
+ config.version++;
- if (!max || priority > max.priority) {
- max = config.members[j];
- }
- }
+ for (var j = 0; j < config.members.length; j++) {
+ var priority = Math.random() * 100;
+ print("random priority : " + priority);
+ config.members[j].priority = priority;
- for (var j = 0; j < config.members.length; j++) {
- if (config.members[j] == max) {
- continue;
- }
- if (!second || config.members[j].priority > second.priority) {
- second = config.members[j];
- }
+ if (!max || priority > max.priority) {
+ max = config.members[j];
}
+ }
- print("\n\nreplsets_priority1.js max is " + max.host + " with priority " + max.priority +
- ", reconfiguring...");
+ for (var j = 0; j < config.members.length; j++) {
+ if (config.members[j] == max) {
+ continue;
+ }
+ if (!second || config.members[j].priority > second.priority) {
+ second = config.members[j];
+ }
+ }
- var count = 0;
- while (config.version != version && count < 100) {
- reconnect(master);
+ print("\n\nreplsets_priority1.js max is " + max.host + " with priority " + max.priority +
+ ", reconfiguring...");
- occasionally(function() {
- print("version is " + version + ", trying to update to " + config.version);
- });
+ var count = 0;
+ while (config.version != version && count < 100) {
+ reconnect(master);
- try {
- master.adminCommand({replSetReconfig: config});
- master = rs.getPrimary();
- reconnect(master);
+ occasionally(function() {
+ print("version is " + version + ", trying to update to " + config.version);
+ });
- version = master.getDB("local").system.replset.findOne().version;
- } catch (e) {
- print("nreplsets_priority1.js Caught exception: " + e);
- }
+ try {
+ master.adminCommand({replSetReconfig: config});
+ master = rs.getPrimary();
+ reconnect(master);
- count++;
+ version = master.getDB("local").system.replset.findOne().version;
+ } catch (e) {
+ print("nreplsets_priority1.js Caught exception: " + e);
}
- print("\nreplsets_priority1.js wait for 2 slaves");
+ count++;
+ }
- assert.soon(function() {
- rs.getPrimary();
- return rs._slaves.length == 2;
- }, "2 slaves");
+ print("\nreplsets_priority1.js wait for 2 slaves");
- print("\nreplsets_priority1.js wait for new config version " + config.version);
+ assert.soon(function() {
+ rs.getPrimary();
+ return rs._slaves.length == 2;
+ }, "2 slaves");
- assert.soon(function() {
- var versions = [0, 0];
- rs._slaves[0].setSlaveOk();
- versions[0] = rs._slaves[0].getDB("local").system.replset.findOne().version;
- rs._slaves[1].setSlaveOk();
- versions[1] = rs._slaves[1].getDB("local").system.replset.findOne().version;
- return versions[0] == config.version && versions[1] == config.version;
- });
+ print("\nreplsets_priority1.js wait for new config version " + config.version);
+
+ assert.soon(function() {
+ var versions = [0, 0];
+ rs._slaves[0].setSlaveOk();
+ versions[0] = rs._slaves[0].getDB("local").system.replset.findOne().version;
+ rs._slaves[1].setSlaveOk();
+ versions[1] = rs._slaves[1].getDB("local").system.replset.findOne().version;
+ return versions[0] == config.version && versions[1] == config.version;
+ });
- print("replsets_priority1.js awaitReplication");
+ print("replsets_priority1.js awaitReplication");
- // the reconfiguration needs to be replicated! the hb sends it out
- // separately from the repl
- rs.awaitReplication();
+ // the reconfiguration needs to be replicated! the hb sends it out
+ // separately from the repl
+ rs.awaitReplication();
- print("reconfigured. Checking statuses.");
+ print("reconfigured. Checking statuses.");
- checkPrimaryIs(max);
+ checkPrimaryIs(max);
- // Wait for election oplog entry to be replicated, to avoid rollbacks later on.
- rs.awaitReplication();
+ // Wait for election oplog entry to be replicated, to avoid rollbacks later on.
+ rs.awaitReplication();
- print("rs.stop");
+ print("rs.stop");
- rs.stop(max._id);
+ rs.stop(max._id);
- master = rs.getPrimary();
+ master = rs.getPrimary();
- print("\nkilled max primary. Checking statuses.");
+ print("\nkilled max primary. Checking statuses.");
- print("second is " + second.host + " with priority " + second.priority);
- checkPrimaryIs(second);
+ print("second is " + second.host + " with priority " + second.priority);
+ checkPrimaryIs(second);
- // Wait for election oplog entry to be replicated, to avoid rollbacks later on.
- let liveSlaves = rs.nodes.filter(function(node) {
- return node.host !== max.host && node.host !== second.host;
- });
- rs.awaitReplication(null, null, liveSlaves);
+ // Wait for election oplog entry to be replicated, to avoid rollbacks later on.
+ let liveSlaves = rs.nodes.filter(function(node) {
+ return node.host !== max.host && node.host !== second.host;
+ });
+ rs.awaitReplication(null, null, liveSlaves);
- print("restart max " + max._id);
+ print("restart max " + max._id);
- rs.restart(max._id);
- master = rs.getPrimary();
+ rs.restart(max._id);
+ master = rs.getPrimary();
- print("max restarted. Checking statuses.");
- checkPrimaryIs(max);
+ print("max restarted. Checking statuses.");
+ checkPrimaryIs(max);
- // Wait for election oplog entry to be replicated, to avoid rollbacks later on.
- rs.awaitReplication();
- }
+ // Wait for election oplog entry to be replicated, to avoid rollbacks later on.
+ rs.awaitReplication();
+}
- rs.stopSet();
+rs.stopSet();
})();
diff --git a/jstests/slow1/sharding_multiple_collections.js b/jstests/slow1/sharding_multiple_collections.js
index ee58da80400..f6570d08a69 100644
--- a/jstests/slow1/sharding_multiple_collections.js
+++ b/jstests/slow1/sharding_multiple_collections.js
@@ -1,70 +1,70 @@
// @tags: [requires_sharding]
(function() {
- 'use strict';
+'use strict';
- var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
+var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', s.shard1.shardName);
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', s.shard1.shardName);
- var S = "";
- while (S.length < 500) {
- S += "123123312312";
- }
+var S = "";
+while (S.length < 500) {
+ S += "123123312312";
+}
- var N = 100000;
+var N = 100000;
- var db = s.getDB("test");
- var bulk = db.foo.initializeUnorderedBulkOp();
- var bulk2 = db.bar.initializeUnorderedBulkOp();
- for (i = 0; i < N; i++) {
- bulk.insert({_id: i, s: S});
- bulk2.insert({_id: i, s: S, s2: S});
- }
- assert.writeOK(bulk.execute());
- assert.writeOK(bulk2.execute());
+var db = s.getDB("test");
+var bulk = db.foo.initializeUnorderedBulkOp();
+var bulk2 = db.bar.initializeUnorderedBulkOp();
+for (i = 0; i < N; i++) {
+ bulk.insert({_id: i, s: S});
+ bulk2.insert({_id: i, s: S, s2: S});
+}
+assert.writeOK(bulk.execute());
+assert.writeOK(bulk2.execute());
- s.printShardingStatus();
+s.printShardingStatus();
- function mytest(coll, i, loopNumber) {
- try {
- var x = coll.find({_id: i}).explain();
- } catch (e) {
- // Ignore stale shard version exceptions, since there are many migrations happening, and
- // mongos may not be able to complete the find within the stale version retries limit.
- if (e.message.contains("stale config")) {
- return;
- }
- throw e;
- }
- if (x)
+function mytest(coll, i, loopNumber) {
+ try {
+ var x = coll.find({_id: i}).explain();
+ } catch (e) {
+ // Ignore stale shard version exceptions, since there are many migrations happening, and
+ // mongos may not be able to complete the find within the stale version retries limit.
+ if (e.message.contains("stale config")) {
return;
- throw Error("can't find " + i + " in " + coll.getName() + " on loopNumber: " + loopNumber +
- " explain: " + tojson(x));
+ }
+ throw e;
}
+ if (x)
+ return;
+ throw Error("can't find " + i + " in " + coll.getName() + " on loopNumber: " + loopNumber +
+ " explain: " + tojson(x));
+}
- for (var loopNumber = 0;; loopNumber++) {
- for (var i = 0; i < N; i++) {
- mytest(db.foo, i, loopNumber);
- mytest(db.bar, i, loopNumber);
- if (i % 1000 == 0)
- print(i);
- }
+for (var loopNumber = 0;; loopNumber++) {
+ for (var i = 0; i < N; i++) {
+ mytest(db.foo, i, loopNumber);
+ mytest(db.bar, i, loopNumber);
+ if (i % 1000 == 0)
+ print(i);
+ }
- s.printShardingStatus();
+ s.printShardingStatus();
- if (loopNumber == 0) {
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.bar", key: {_id: 1}}));
- }
+ if (loopNumber == 0) {
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.bar", key: {_id: 1}}));
+ }
- assert(loopNumber < 1000, "taking too long");
+ assert(loopNumber < 1000, "taking too long");
- if (s.chunkDiff("foo") < 12 && s.chunkDiff("bar") < 12) {
- break;
- }
+ if (s.chunkDiff("foo") < 12 && s.chunkDiff("bar") < 12) {
+ break;
}
+}
- s.stop();
+s.stop();
})();
diff --git a/jstests/ssl/canonicalize_command_line_opts.js b/jstests/ssl/canonicalize_command_line_opts.js
index c2c2c96bcf1..4354ddac4c8 100644
--- a/jstests/ssl/canonicalize_command_line_opts.js
+++ b/jstests/ssl/canonicalize_command_line_opts.js
@@ -1,39 +1,39 @@
// Ensure that all 'ssl' options are canonicalized to their modern 'tls' versions.
(function() {
- 'use strict';
+'use strict';
- function runTest(mongod) {
- assert(mongod);
- const admin = mongod.getDB('admin');
+function runTest(mongod) {
+ assert(mongod);
+ const admin = mongod.getDB('admin');
- const opts = assert.commandWorked(admin.runCommand({getCmdLineOpts: 1}));
- print(tojson(opts));
- assert.eq(typeof(opts), 'object');
- assert.eq(typeof(opts.parsed), 'object');
- assert.eq(typeof(opts.parsed.net), 'object');
+ const opts = assert.commandWorked(admin.runCommand({getCmdLineOpts: 1}));
+ print(tojson(opts));
+ assert.eq(typeof (opts), 'object');
+ assert.eq(typeof (opts.parsed), 'object');
+ assert.eq(typeof (opts.parsed.net), 'object');
- const net = opts.parsed.net;
- assert.eq(typeof(net.ssl), 'undefined');
- assert.eq(typeof(net.tls), 'object');
+ const net = opts.parsed.net;
+ assert.eq(typeof (net.ssl), 'undefined');
+ assert.eq(typeof (net.tls), 'object');
- const tls = net.tls;
- assert.eq(tls.mode, 'requireTLS');
- assert.eq(tls.CAFile, 'jstests/libs/ca.pem');
- assert.eq(tls.certificateKeyFile, 'jstests/libs/server.pem');
- assert.eq(tls.allowConnectionsWithoutCertificates, true);
- assert.eq(tls.allowInvalidHostnames, true);
- }
+ const tls = net.tls;
+ assert.eq(tls.mode, 'requireTLS');
+ assert.eq(tls.CAFile, 'jstests/libs/ca.pem');
+ assert.eq(tls.certificateKeyFile, 'jstests/libs/server.pem');
+ assert.eq(tls.allowConnectionsWithoutCertificates, true);
+ assert.eq(tls.allowInvalidHostnames, true);
+}
- const options = {
- sslMode: 'requireSSL',
- sslCAFile: 'jstests/libs/ca.pem',
- sslPEMKeyFile: 'jstests/libs/server.pem',
- sslAllowConnectionsWithoutCertificates: '',
- sslAllowInvalidHostnames: '',
- };
+const options = {
+ sslMode: 'requireSSL',
+ sslCAFile: 'jstests/libs/ca.pem',
+ sslPEMKeyFile: 'jstests/libs/server.pem',
+ sslAllowConnectionsWithoutCertificates: '',
+ sslAllowInvalidHostnames: '',
+};
- const mongod = MongoRunner.runMongod(options);
- runTest(mongod);
- MongoRunner.stopMongod(mongod);
+const mongod = MongoRunner.runMongod(options);
+runTest(mongod);
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/ssl/config-canonicalize-normal-ports.js b/jstests/ssl/config-canonicalize-normal-ports.js
index a9f2878b020..c04eed0bd32 100644
--- a/jstests/ssl/config-canonicalize-normal-ports.js
+++ b/jstests/ssl/config-canonicalize-normal-ports.js
@@ -1,13 +1,13 @@
// Make sure the psuedo-option --tlsOnNormalPorts is correctly canonicalized.
(function() {
- 'use strict';
+'use strict';
- const mongod = MongoRunner.runMongod({
- tlsOnNormalPorts: '',
- tlsCertificateKeyFile: 'jstests/libs/server.pem',
- });
- assert(mongod);
- assert.commandWorked(mongod.getDB('admin').runCommand({isMaster: 1}));
- MongoRunner.stopMongod(mongod);
+const mongod = MongoRunner.runMongod({
+ tlsOnNormalPorts: '',
+ tlsCertificateKeyFile: 'jstests/libs/server.pem',
+});
+assert(mongod);
+assert.commandWorked(mongod.getDB('admin').runCommand({isMaster: 1}));
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/ssl/libs/ssl_x509_role_auth.js b/jstests/ssl/libs/ssl_x509_role_auth.js
index 1536ac02069..4b81d8f9bc3 100644
--- a/jstests/ssl/libs/ssl_x509_role_auth.js
+++ b/jstests/ssl/libs/ssl_x509_role_auth.js
@@ -1,20 +1,18 @@
// Helper script used to validate login as x509 auth with a certificate with roles works.
(function() {
- "use strict";
+"use strict";
- // Auth as user in certificate
- let ret = db.getSiblingDB("$external").auth({
- mechanism: "MONGODB-X509",
- user:
- "CN=Kernel Client Peer Role,OU=Kernel Users,O=MongoDB,L=New York City,ST=New York,C=US"
- });
- assert.eq(ret, 1, "Auth failed");
+// Auth as user in certificate
+let ret = db.getSiblingDB("$external").auth({
+ mechanism: "MONGODB-X509",
+ user: "CN=Kernel Client Peer Role,OU=Kernel Users,O=MongoDB,L=New York City,ST=New York,C=US"
+});
+assert.eq(ret, 1, "Auth failed");
- // Validate active roles
- let connStatus = db.runCommand('connectionStatus');
- assert.commandWorked(connStatus);
+// Validate active roles
+let connStatus = db.runCommand('connectionStatus');
+assert.commandWorked(connStatus);
- let expectedRoles =
- [{"role": "backup", "db": "admin"}, {"role": "readAnyDatabase", "db": "admin"}];
- assert.sameMembers(connStatus.authInfo.authenticatedUserRoles, expectedRoles);
+let expectedRoles = [{"role": "backup", "db": "admin"}, {"role": "readAnyDatabase", "db": "admin"}];
+assert.sameMembers(connStatus.authInfo.authenticatedUserRoles, expectedRoles);
}());
diff --git a/jstests/ssl/libs/ssl_x509_role_auth_email.js b/jstests/ssl/libs/ssl_x509_role_auth_email.js
index 51a5e75f2f5..6f70f8e9821 100644
--- a/jstests/ssl/libs/ssl_x509_role_auth_email.js
+++ b/jstests/ssl/libs/ssl_x509_role_auth_email.js
@@ -1,12 +1,12 @@
// Helper script used to validate login as x509 auth with a certificate with roles works.
(function() {
- "use strict";
+"use strict";
- // Auth as user in certificate with an email address
- const ret = db.getSiblingDB("$external").auth({
- mechanism: "MONGODB-X509",
- user:
- "emailAddress=example@mongodb.com,CN=client,OU=KernelUser,O=MongoDB,L=New York City,ST=New York,C=US"
- });
- assert.eq(ret, 1, "Auth failed");
+// Auth as user in certificate with an email address
+const ret = db.getSiblingDB("$external").auth({
+ mechanism: "MONGODB-X509",
+ user:
+ "emailAddress=example@mongodb.com,CN=client,OU=KernelUser,O=MongoDB,L=New York City,ST=New York,C=US"
+});
+assert.eq(ret, 1, "Auth failed");
}());
diff --git a/jstests/ssl/libs/ssl_x509_role_auth_escape.js b/jstests/ssl/libs/ssl_x509_role_auth_escape.js
index a9a0595667c..5fed90c2694 100644
--- a/jstests/ssl/libs/ssl_x509_role_auth_escape.js
+++ b/jstests/ssl/libs/ssl_x509_role_auth_escape.js
@@ -1,13 +1,13 @@
// Helper script used to validate login as x509 auth with a certificate with roles works.
(function() {
- "use strict";
+"use strict";
- // Auth as user in certificate with a subject name with lots of RFC 2253 escaping
- // Ex: CN=Test,OU=Escape,O=\;\ ,L=\ \>,ST=\"\\\<,C=\,\+
- // It validates leading space, and the 7 magic characters
- const ret = db.getSiblingDB("$external").auth({
- mechanism: "MONGODB-X509",
- user: "CN=Test,OU=Escape,O=\\;\\ ,L=\\ \\>,ST=\\\"\\\\\\<,C=\\,\\+"
- });
- assert.eq(ret, 1, "Auth failed");
+// Auth as user in certificate with a subject name with lots of RFC 2253 escaping
+// Ex: CN=Test,OU=Escape,O=\;\ ,L=\ \>,ST=\"\\\<,C=\,\+
+// It validates leading space, and the 7 magic characters
+const ret = db.getSiblingDB("$external").auth({
+ mechanism: "MONGODB-X509",
+ user: "CN=Test,OU=Escape,O=\\;\\ ,L=\\ \\>,ST=\\\"\\\\\\<,C=\\,\\+"
+});
+assert.eq(ret, 1, "Auth failed");
}());
diff --git a/jstests/ssl/libs/ssl_x509_role_auth_utf8.js b/jstests/ssl/libs/ssl_x509_role_auth_utf8.js
index f94db33d27f..4a483e9d7ec 100644
--- a/jstests/ssl/libs/ssl_x509_role_auth_utf8.js
+++ b/jstests/ssl/libs/ssl_x509_role_auth_utf8.js
@@ -1,12 +1,12 @@
// Helper script used to validate login as x509 auth with a certificate with roles works.
(function() {
- "use strict";
+"use strict";
- // Authenticate against a certificate with a RDN in the subject name of type UTF8STRING
- const retutf8 = db.getSiblingDB("$external").auth({
- mechanism: "MONGODB-X509",
- user:
- "C=US,ST=New York,L=New York City,O=MongoDB,OU=Kernel Users,CN=\\D0\\9A\\D0\\B0\\D0\\BB\\D0\\BE\\D1\\8F\\D0\\BD"
- });
- assert.eq(retutf8, 1, "Auth failed");
+// Authenticate against a certificate with a RDN in the subject name of type UTF8STRING
+const retutf8 = db.getSiblingDB("$external").auth({
+ mechanism: "MONGODB-X509",
+ user:
+ "C=US,ST=New York,L=New York City,O=MongoDB,OU=Kernel Users,CN=\\D0\\9A\\D0\\B0\\D0\\BB\\D0\\BE\\D1\\8F\\D0\\BD"
+});
+assert.eq(retutf8, 1, "Auth failed");
}());
diff --git a/jstests/ssl/mixed_mode_sharded_transition.js b/jstests/ssl/mixed_mode_sharded_transition.js
index 0955f0ef7dd..8f9136b3e39 100644
--- a/jstests/ssl/mixed_mode_sharded_transition.js
+++ b/jstests/ssl/mixed_mode_sharded_transition.js
@@ -9,25 +9,25 @@
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- 'use strict';
+'use strict';
- var transitionToX509AllowSSL =
- Object.merge(allowSSL, {transitionToAuth: '', clusterAuthMode: 'x509'});
- var transitionToX509PreferSSL =
- Object.merge(preferSSL, {transitionToAuth: '', clusterAuthMode: 'x509'});
- var x509RequireSSL = Object.merge(requireSSL, {clusterAuthMode: 'x509'});
+var transitionToX509AllowSSL =
+ Object.merge(allowSSL, {transitionToAuth: '', clusterAuthMode: 'x509'});
+var transitionToX509PreferSSL =
+ Object.merge(preferSSL, {transitionToAuth: '', clusterAuthMode: 'x509'});
+var x509RequireSSL = Object.merge(requireSSL, {clusterAuthMode: 'x509'});
- function testCombos(opt1, opt2, shouldSucceed) {
- mixedShardTest(opt1, opt2, shouldSucceed);
- mixedShardTest(opt2, opt1, shouldSucceed);
- }
+function testCombos(opt1, opt2, shouldSucceed) {
+ mixedShardTest(opt1, opt2, shouldSucceed);
+ mixedShardTest(opt2, opt1, shouldSucceed);
+}
- print('=== Testing transitionToAuth/allowSSL - transitionToAuth/preferSSL cluster ===');
- testCombos(transitionToX509AllowSSL, transitionToX509PreferSSL, true);
+print('=== Testing transitionToAuth/allowSSL - transitionToAuth/preferSSL cluster ===');
+testCombos(transitionToX509AllowSSL, transitionToX509PreferSSL, true);
- print('=== Testing transitionToAuth/preferSSL - transitionToAuth/preferSSL cluster ===');
- mixedShardTest(transitionToX509PreferSSL, transitionToX509PreferSSL, true);
+print('=== Testing transitionToAuth/preferSSL - transitionToAuth/preferSSL cluster ===');
+mixedShardTest(transitionToX509PreferSSL, transitionToX509PreferSSL, true);
- print('=== Testing transitionToAuth/preferSSL - x509/requireSSL cluster ===');
- testCombos(transitionToX509PreferSSL, x509RequireSSL, true);
+print('=== Testing transitionToAuth/preferSSL - x509/requireSSL cluster ===');
+testCombos(transitionToX509PreferSSL, x509RequireSSL, true);
}());
diff --git a/jstests/ssl/mongo_uri_secondaries.js b/jstests/ssl/mongo_uri_secondaries.js
index 9512a3c23c3..eec34153b30 100644
--- a/jstests/ssl/mongo_uri_secondaries.js
+++ b/jstests/ssl/mongo_uri_secondaries.js
@@ -5,58 +5,52 @@
// To install trusted-ca.pem for local testing on OSX, invoke the following at a console:
// security add-trusted-cert -d jstests/libs/trusted-ca.pem
(function() {
- 'use strict';
+'use strict';
- const HOST_TYPE = getBuildInfo().buildEnvironment.target_os;
- if (HOST_TYPE == "windows") {
- // OpenSSL backed imports Root CA and intermediate CA
- runProgram(
- "certutil.exe", "-addstore", "-user", "-f", "CA", "jstests\\libs\\trusted-ca.pem");
+const HOST_TYPE = getBuildInfo().buildEnvironment.target_os;
+if (HOST_TYPE == "windows") {
+ // OpenSSL backed imports Root CA and intermediate CA
+ runProgram("certutil.exe", "-addstore", "-user", "-f", "CA", "jstests\\libs\\trusted-ca.pem");
- // SChannel backed follows Windows rules and only trusts the Root store in Local Machine and
- // Current User.
- runProgram("certutil.exe", "-addstore", "-f", "Root", "jstests\\libs\\trusted-ca.pem");
- }
+ // SChannel backed follows Windows rules and only trusts the Root store in Local Machine and
+ // Current User.
+ runProgram("certutil.exe", "-addstore", "-f", "Root", "jstests\\libs\\trusted-ca.pem");
+}
- const x509Options = {
- sslMode: 'requireSSL',
- sslPEMKeyFile: 'jstests/libs/trusted-server.pem',
- sslCAFile: 'jstests/libs/trusted-ca.pem',
- sslAllowInvalidCertificates: '',
- sslWeakCertificateValidation: '',
- };
+const x509Options = {
+ sslMode: 'requireSSL',
+ sslPEMKeyFile: 'jstests/libs/trusted-server.pem',
+ sslCAFile: 'jstests/libs/trusted-ca.pem',
+ sslAllowInvalidCertificates: '',
+ sslWeakCertificateValidation: '',
+};
- const rst = new ReplSetTest({
- nodes: 2,
- name: "sslSet",
- useHostName: false,
- nodeOptions: x509Options,
- waitForKeys: false
- });
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest(
+ {nodes: 2, name: "sslSet", useHostName: false, nodeOptions: x509Options, waitForKeys: false});
+rst.startSet();
+rst.initiate();
- const subShellCommand = function(hosts) {
- var Ms = [];
- for (var i = 0; i < 10; i++) {
- Ms.push(new Mongo("mongodb://" + hosts[0] + "," + hosts[1] +
- "/?ssl=true&replicaSet=sslSet"));
- }
+const subShellCommand = function(hosts) {
+ var Ms = [];
+ for (var i = 0; i < 10; i++) {
+ Ms.push(
+ new Mongo("mongodb://" + hosts[0] + "," + hosts[1] + "/?ssl=true&replicaSet=sslSet"));
+ }
- for (var i = 0; i < 10; i++) {
- var db = Ms[i].getDB("test");
- db.setSlaveOk(true);
- db.col.find().readPref("secondary").toArray();
- }
- };
+ for (var i = 0; i < 10; i++) {
+ var db = Ms[i].getDB("test");
+ db.setSlaveOk(true);
+ db.col.find().readPref("secondary").toArray();
+ }
+};
- const subShellCommandFormatter = function(replSet) {
- var hosts = [];
- replSet.nodes.forEach((node) => {
- hosts.push("localhost:" + node.port);
- });
+const subShellCommandFormatter = function(replSet) {
+ var hosts = [];
+ replSet.nodes.forEach((node) => {
+ hosts.push("localhost:" + node.port);
+ });
- let command = `
+ let command = `
(function () {
'use strict';
let command = ${subShellCommand.toString()};
@@ -64,20 +58,20 @@
command(hosts);
}());`;
- return command;
- };
+ return command;
+};
- const subShellArgs = [
- "env",
- "SSL_CERT_FILE=jstests/libs/trusted-ca.pem",
- './mongo',
- '--nodb',
- '--eval',
- subShellCommandFormatter(rst)
- ];
+const subShellArgs = [
+ "env",
+ "SSL_CERT_FILE=jstests/libs/trusted-ca.pem",
+ './mongo',
+ '--nodb',
+ '--eval',
+ subShellCommandFormatter(rst)
+];
- const retVal = _runMongoProgram(...subShellArgs);
- assert.eq(retVal, 0, 'mongo shell did not succeed with exit code 0');
+const retVal = _runMongoProgram(...subShellArgs);
+assert.eq(retVal, 0, 'mongo shell did not succeed with exit code 0');
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/ssl/repl_ssl_noca.js b/jstests/ssl/repl_ssl_noca.js
index 256f56f5ffe..5dea404fb12 100644
--- a/jstests/ssl/repl_ssl_noca.js
+++ b/jstests/ssl/repl_ssl_noca.js
@@ -1,57 +1,56 @@
(function() {
- 'use strict';
- if (_isWindows()) {
- // OpenSSL backed imports Root CA and intermediate CA
- runProgram(
- "certutil.exe", "-addstore", "-user", "-f", "CA", "jstests\\libs\\trusted-ca.pem");
-
- // SChannel backed follows Windows rules and only trusts the Root store in Local Machine and
- // Current User.
- runProgram("certutil.exe", "-addstore", "-f", "Root", "jstests\\libs\\trusted-ca.pem");
+'use strict';
+if (_isWindows()) {
+ // OpenSSL backed imports Root CA and intermediate CA
+ runProgram("certutil.exe", "-addstore", "-user", "-f", "CA", "jstests\\libs\\trusted-ca.pem");
+
+ // SChannel backed follows Windows rules and only trusts the Root store in Local Machine and
+ // Current User.
+ runProgram("certutil.exe", "-addstore", "-f", "Root", "jstests\\libs\\trusted-ca.pem");
+}
+
+var replTest = new ReplSetTest({
+ name: "ssltest",
+ nodes: 1,
+ nodeOptions: {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/trusted-server.pem",
+ },
+ host: "localhost",
+ useHostName: false,
+});
+
+replTest.startSet({
+ env: {
+ SSL_CERT_FILE: 'jstests/libs/trusted-ca.pem',
+ },
+});
+replTest.initiate();
+
+var nodeList = replTest.nodeList().join();
+
+var checkShellOkay = function(url) {
+ // Should not be able to authenticate with x509.
+ // Authenticate call will return 1 on success, 0 on error.
+ var argv = ['./mongo', url, '--eval', ('db.runCommand({replSetGetStatus: 1})')];
+ if (!_isWindows()) {
+ // On Linux we override the default path to the system CA store to point to our
+ // "trusted" CA. On Windows, this CA will have been added to the user's trusted CA list
+ argv.unshift("env", "SSL_CERT_FILE=jstests/libs/trusted-ca.pem");
}
+ return runMongoProgram(...argv);
+};
- var replTest = new ReplSetTest({
- name: "ssltest",
- nodes: 1,
- nodeOptions: {
- sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/trusted-server.pem",
- },
- host: "localhost",
- useHostName: false,
- });
-
- replTest.startSet({
- env: {
- SSL_CERT_FILE: 'jstests/libs/trusted-ca.pem',
- },
- });
- replTest.initiate();
-
- var nodeList = replTest.nodeList().join();
-
- var checkShellOkay = function(url) {
- // Should not be able to authenticate with x509.
- // Authenticate call will return 1 on success, 0 on error.
- var argv = ['./mongo', url, '--eval', ('db.runCommand({replSetGetStatus: 1})')];
- if (!_isWindows()) {
- // On Linux we override the default path to the system CA store to point to our
- // "trusted" CA. On Windows, this CA will have been added to the user's trusted CA list
- argv.unshift("env", "SSL_CERT_FILE=jstests/libs/trusted-ca.pem");
- }
- return runMongoProgram(...argv);
- };
-
- var noMentionSSLURL = `mongodb://${nodeList}/admin?replicaSet=${replTest.name}`;
- jsTestLog(`Replica set url (doesn't mention SSL): ${noMentionSSLURL}`);
- assert.neq(checkShellOkay(noMentionSSLURL), 0, "shell correctly failed to connect without SSL");
-
- var useSSLURL = `mongodb://${nodeList}/admin?replicaSet=${replTest.name}&ssl=true`;
- jsTestLog(`Replica set url (uses SSL): ${useSSLURL}`);
- assert.eq(checkShellOkay(useSSLURL), 0, "successfully connected with SSL");
-
- var disableSSLURL = `mongodb://${nodeList}/admin?replicaSet=${replTest.name}&ssl=false`;
- jsTestLog(`Replica set url (doesnt use SSL): ${disableSSLURL}`);
- assert.neq(checkShellOkay(disableSSLURL), 0, "shell correctly failed to connect without SSL");
- replTest.stopSet();
+var noMentionSSLURL = `mongodb://${nodeList}/admin?replicaSet=${replTest.name}`;
+jsTestLog(`Replica set url (doesn't mention SSL): ${noMentionSSLURL}`);
+assert.neq(checkShellOkay(noMentionSSLURL), 0, "shell correctly failed to connect without SSL");
+
+var useSSLURL = `mongodb://${nodeList}/admin?replicaSet=${replTest.name}&ssl=true`;
+jsTestLog(`Replica set url (uses SSL): ${useSSLURL}`);
+assert.eq(checkShellOkay(useSSLURL), 0, "successfully connected with SSL");
+
+var disableSSLURL = `mongodb://${nodeList}/admin?replicaSet=${replTest.name}&ssl=false`;
+jsTestLog(`Replica set url (doesnt use SSL): ${disableSSLURL}`);
+assert.neq(checkShellOkay(disableSSLURL), 0, "shell correctly failed to connect without SSL");
+replTest.stopSet();
})();
diff --git a/jstests/ssl/repl_ssl_split_horizon.js b/jstests/ssl/repl_ssl_split_horizon.js
index d70980ebe88..b598e8b692d 100644
--- a/jstests/ssl/repl_ssl_split_horizon.js
+++ b/jstests/ssl/repl_ssl_split_horizon.js
@@ -1,190 +1,191 @@
(function() {
- 'use strict';
- // Create a temporary host file that creates two aliases for localhost that are in the
- // splithorizon certificate.
- // The aliases are 'splithorizon1' and 'splithorizon2'
- const hostsFile = MongoRunner.dataPath + 'split-horizon-hosts';
- writeFile(hostsFile, "splithorizon1 localhost\nsplithorizon2 localhost\n");
-
- // Check if HOSTALIASES works on this system (Will not work on Windows or OSX and may not work
- // on Linux)
- try {
- var rc =
- runMongoProgram("env", "HOSTALIASES=" + hostsFile, "getent", "hosts", "splithorizon1");
- } catch (e) {
+'use strict';
+// Create a temporary host file that creates two aliases for localhost that are in the
+// splithorizon certificate.
+// The aliases are 'splithorizon1' and 'splithorizon2'
+const hostsFile = MongoRunner.dataPath + 'split-horizon-hosts';
+writeFile(hostsFile, "splithorizon1 localhost\nsplithorizon2 localhost\n");
+
+// Check if HOSTALIASES works on this system (Will not work on Windows or OSX and may not work
+// on Linux)
+try {
+ var rc = runMongoProgram("env", "HOSTALIASES=" + hostsFile, "getent", "hosts", "splithorizon1");
+} catch (e) {
+ jsTestLog(
+ `Failed the check for HOSTALIASES support using env, we are probably on a non-GNU platform. Skipping this test.`);
+ removeFile(hostsFile);
+ return;
+}
+
+if (rc != 0) {
+ removeFile(hostsFile);
+
+ // Check glibc version to figure out of HOSTALIASES will work as expected
+ clearRawMongoProgramOutput();
+ var rc = runProgram("getconf", "GNU_LIBC_VERSION");
+ if (rc != 0) {
jsTestLog(
- `Failed the check for HOSTALIASES support using env, we are probably on a non-GNU platform. Skipping this test.`);
- removeFile(hostsFile);
+ `Failed the check for GLIBC version, we are probably on a non-GNU platform. Skipping this test.`);
return;
}
- if (rc != 0) {
- removeFile(hostsFile);
-
- // Check glibc version to figure out of HOSTALIASES will work as expected
- clearRawMongoProgramOutput();
- var rc = runProgram("getconf", "GNU_LIBC_VERSION");
- if (rc != 0) {
- jsTestLog(
- `Failed the check for GLIBC version, we are probably on a non-GNU platform. Skipping this test.`);
- return;
- }
-
- // Output is of the format: 'glibc x.yz'
- var output = rawMongoProgramOutput();
- var fields = output.split(" ");
- var glibc_version = parseFloat(fields[2]);
-
- // Fail this test if we are on GLIBC >= 2.2 and HOSTALIASES still doesn't work
- if (glibc_version < 2.2) {
- jsTestLog(`HOSTALIASES does not seem to work as expected on this system. GLIBC
+ // Output is of the format: 'glibc x.yz'
+ var output = rawMongoProgramOutput();
+ var fields = output.split(" ");
+ var glibc_version = parseFloat(fields[2]);
+
+ // Fail this test if we are on GLIBC >= 2.2 and HOSTALIASES still doesn't work
+ if (glibc_version < 2.2) {
+ jsTestLog(
+ `HOSTALIASES does not seem to work as expected on this system. GLIBC
version is ${glibc_version}, skipping this test.`);
- return;
- } else {
- assert(false, `HOSTALIASES does not seem to work as expected on this system. GLIBC
+ return;
+ } else {
+ assert(false,
+ `HOSTALIASES does not seem to work as expected on this system. GLIBC
version is ${glibc_version}`);
- }
}
-
- var replTest = new ReplSetTest({
- name: "splitHorizontest",
- nodes: 2,
- nodeOptions: {
- sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/splithorizon-server.pem",
- },
- host: "localhost",
- useHostName: false,
- });
-
- replTest.startSet({
- env: {
- SSL_CERT_FILE: 'jstests/libs/splithorizon-ca.pem',
- },
- });
-
- // Create some variables needed for our horizons, we're replacing localhost with the horizon
- // name, leaving the port the same (so we can connect)
- var node0 = replTest.nodeList()[0];
- var node1 = replTest.nodeList()[1];
- var node0localHostname = node0;
- var node1localHostname = node1;
- var node0horizonHostname = node0.replace("localhost", "splithorizon1");
- var node1horizonHostname = node1.replace("localhost", "splithorizon1");
- var node0horizonMissingHostname = node0.replace("localhost", "splithorizon2");
- var node1horizonMissingHostname = node1.replace("localhost", "splithorizon2");
-
- var config = replTest.getReplSetConfig();
- config.members[0].horizons = {};
- config.members[0].horizons.horizon_name = node0horizonHostname;
- config.members[1].horizons = {};
- config.members[1].horizons.horizon_name = node1horizonHostname;
-
- replTest.initiate(config);
-
- var checkExpectedHorizon = function(url, memberIndex, expectedHostname) {
- // Run isMaster in the shell and check that we get the expected hostname back
- var argv = [
- 'env',
- "HOSTALIASES=" + hostsFile,
- "SSL_CERT_FILE=jstests/libs/splithorizon-ca.pem",
- './mongo',
- url,
- '--eval',
- ("assert(db.runCommand({isMaster: 1})['hosts'][" + memberIndex + "] == '" +
- expectedHostname + "')")
- ];
- return runMongoProgram(...argv);
- };
-
- // Using localhost should use the default horizon
- var defaultURL = `mongodb://${node0localHostname}/admin?replicaSet=${replTest.name}&ssl=true`;
- jsTestLog(`URL without horizon: ${defaultURL}`);
- assert.eq(checkExpectedHorizon(defaultURL, 0, node0localHostname),
- 0,
- "localhost does not return horizon");
- assert.eq(checkExpectedHorizon(defaultURL, 1, node1localHostname),
- 0,
- "localhost does not return horizon");
-
- // Using 'splithorizon1' should use that horizon
- var horizonURL = `mongodb://${node0horizonHostname}/admin?replicaSet=${replTest.name}&ssl=true`;
- jsTestLog(`URL with horizon: ${horizonURL}`);
- assert.eq(checkExpectedHorizon(horizonURL, 0, node0horizonHostname),
- 0,
- "does not return horizon as expected");
- assert.eq(checkExpectedHorizon(horizonURL, 1, node1horizonHostname),
- 0,
- "does not return horizon as expected");
-
- // Using 'splithorizon2' does not have a horizon so it should return default
- var horizonMissingURL =
- `mongodb://${node0horizonMissingHostname}/admin?replicaSet=${replTest.name}&ssl=true`;
- jsTestLog(`URL with horizon: ${horizonMissingURL}`);
- assert.eq(checkExpectedHorizon(horizonMissingURL, 0, node0localHostname),
- 0,
- "does not return localhost as expected");
- assert.eq(checkExpectedHorizon(horizonMissingURL, 1, node1localHostname),
- 0,
- "does not return localhost as expected");
-
- // Check so we can replSetReconfig to add another horizon
- config.version += 1;
- config.members[0].horizons.other_horizon_name = node0horizonMissingHostname;
- config.members[1].horizons.other_horizon_name = node1horizonMissingHostname;
-
- assert.adminCommandWorkedAllowingNetworkError(replTest.getPrimary(), {replSetReconfig: config});
-
- // Using 'splithorizon2' should now return the new horizon
- var horizonMissingURL =
- `mongodb://${node0horizonMissingHostname}/admin?replicaSet=${replTest.name}&ssl=true`;
- jsTestLog(`URL with horizon: ${horizonMissingURL}`);
- assert.eq(checkExpectedHorizon(horizonMissingURL, 0, node0horizonMissingHostname),
- 0,
- "does not return horizon as expected");
- assert.eq(checkExpectedHorizon(horizonMissingURL, 1, node1horizonMissingHostname),
- 0,
- "does not return horizon as expected");
-
- // Change horizon to return a different port to connect to, so the feature can be used in a
- // port-forwarding environment
- var node0horizonHostnameDifferentPort = "splithorizon1:80";
- var node1horizonHostnameDifferentPort = "splithorizon1:81";
- config.version += 1;
- config.members[0].horizons.horizon_name = node0horizonHostnameDifferentPort;
- config.members[1].horizons.horizon_name = node1horizonHostnameDifferentPort;
-
- assert.adminCommandWorkedAllowingNetworkError(replTest.getPrimary(), {replSetReconfig: config});
-
- // Build the connection URL, do not set replicaSet as that will trigger the ReplicaSetMonitor
- // which will fail as we can't actually connect now (port is wrong)
- var horizonDifferentPortURL = `mongodb://${node0horizonHostname}/admin?ssl=true`;
- jsTestLog(`URL with horizon using different port: ${horizonDifferentPortURL}`);
- assert.eq(checkExpectedHorizon(horizonDifferentPortURL, 0, node0horizonHostnameDifferentPort),
- 0,
- "does not return horizon as expected");
- assert.eq(checkExpectedHorizon(horizonDifferentPortURL, 1, node1horizonHostnameDifferentPort),
- 0,
- "does not return horizon as expected");
-
- // Providing a config where horizons does not exist in all members is expected to fail
- config.version += 1;
- config.members[0].horizons.horizon_mismatch = node0.replace("localhost", "splithorizon3");
- assert.commandFailed(replTest.getPrimary().adminCommand({replSetReconfig: config}));
-
- // Providing a config where horizon hostnames are duplicated in members is expected to fail
- config.version += 1;
- config.members[1].horizons.horizon_mismatch = config.members[0].horizons.horizon_mismatch;
- assert.commandFailed(replTest.getPrimary().adminCommand({replSetReconfig: config}));
-
- // Two horizons with duplicated hostnames are not allowed
- config.version += 1;
- delete config.members[0].horizons.horizon_mismatch;
- delete config.members[1].horizons.horizon_mismatch;
- config.members[0].horizons.horizon_dup_hostname = config.members[0].horizons.horizon_name;
- config.members[1].horizons.horizon_dup_hostname = config.members[1].horizons.horizon_name;
- assert.commandFailed(replTest.getPrimary().adminCommand({replSetReconfig: config}));
-
- replTest.stopSet();
- removeFile(hostsFile);
+}
+
+var replTest = new ReplSetTest({
+ name: "splitHorizontest",
+ nodes: 2,
+ nodeOptions: {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/splithorizon-server.pem",
+ },
+ host: "localhost",
+ useHostName: false,
+});
+
+replTest.startSet({
+ env: {
+ SSL_CERT_FILE: 'jstests/libs/splithorizon-ca.pem',
+ },
+});
+
+// Create some variables needed for our horizons, we're replacing localhost with the horizon
+// name, leaving the port the same (so we can connect)
+var node0 = replTest.nodeList()[0];
+var node1 = replTest.nodeList()[1];
+var node0localHostname = node0;
+var node1localHostname = node1;
+var node0horizonHostname = node0.replace("localhost", "splithorizon1");
+var node1horizonHostname = node1.replace("localhost", "splithorizon1");
+var node0horizonMissingHostname = node0.replace("localhost", "splithorizon2");
+var node1horizonMissingHostname = node1.replace("localhost", "splithorizon2");
+
+var config = replTest.getReplSetConfig();
+config.members[0].horizons = {};
+config.members[0].horizons.horizon_name = node0horizonHostname;
+config.members[1].horizons = {};
+config.members[1].horizons.horizon_name = node1horizonHostname;
+
+replTest.initiate(config);
+
+var checkExpectedHorizon = function(url, memberIndex, expectedHostname) {
+ // Run isMaster in the shell and check that we get the expected hostname back
+ var argv = [
+ 'env',
+ "HOSTALIASES=" + hostsFile,
+ "SSL_CERT_FILE=jstests/libs/splithorizon-ca.pem",
+ './mongo',
+ url,
+ '--eval',
+ ("assert(db.runCommand({isMaster: 1})['hosts'][" + memberIndex + "] == '" +
+ expectedHostname + "')")
+ ];
+ return runMongoProgram(...argv);
+};
+
+// Using localhost should use the default horizon
+var defaultURL = `mongodb://${node0localHostname}/admin?replicaSet=${replTest.name}&ssl=true`;
+jsTestLog(`URL without horizon: ${defaultURL}`);
+assert.eq(checkExpectedHorizon(defaultURL, 0, node0localHostname),
+ 0,
+ "localhost does not return horizon");
+assert.eq(checkExpectedHorizon(defaultURL, 1, node1localHostname),
+ 0,
+ "localhost does not return horizon");
+
+// Using 'splithorizon1' should use that horizon
+var horizonURL = `mongodb://${node0horizonHostname}/admin?replicaSet=${replTest.name}&ssl=true`;
+jsTestLog(`URL with horizon: ${horizonURL}`);
+assert.eq(checkExpectedHorizon(horizonURL, 0, node0horizonHostname),
+ 0,
+ "does not return horizon as expected");
+assert.eq(checkExpectedHorizon(horizonURL, 1, node1horizonHostname),
+ 0,
+ "does not return horizon as expected");
+
+// Using 'splithorizon2' does not have a horizon so it should return default
+var horizonMissingURL =
+ `mongodb://${node0horizonMissingHostname}/admin?replicaSet=${replTest.name}&ssl=true`;
+jsTestLog(`URL with horizon: ${horizonMissingURL}`);
+assert.eq(checkExpectedHorizon(horizonMissingURL, 0, node0localHostname),
+ 0,
+ "does not return localhost as expected");
+assert.eq(checkExpectedHorizon(horizonMissingURL, 1, node1localHostname),
+ 0,
+ "does not return localhost as expected");
+
+// Check so we can replSetReconfig to add another horizon
+config.version += 1;
+config.members[0].horizons.other_horizon_name = node0horizonMissingHostname;
+config.members[1].horizons.other_horizon_name = node1horizonMissingHostname;
+
+assert.adminCommandWorkedAllowingNetworkError(replTest.getPrimary(), {replSetReconfig: config});
+
+// Using 'splithorizon2' should now return the new horizon
+var horizonMissingURL =
+ `mongodb://${node0horizonMissingHostname}/admin?replicaSet=${replTest.name}&ssl=true`;
+jsTestLog(`URL with horizon: ${horizonMissingURL}`);
+assert.eq(checkExpectedHorizon(horizonMissingURL, 0, node0horizonMissingHostname),
+ 0,
+ "does not return horizon as expected");
+assert.eq(checkExpectedHorizon(horizonMissingURL, 1, node1horizonMissingHostname),
+ 0,
+ "does not return horizon as expected");
+
+// Change horizon to return a different port to connect to, so the feature can be used in a
+// port-forwarding environment
+var node0horizonHostnameDifferentPort = "splithorizon1:80";
+var node1horizonHostnameDifferentPort = "splithorizon1:81";
+config.version += 1;
+config.members[0].horizons.horizon_name = node0horizonHostnameDifferentPort;
+config.members[1].horizons.horizon_name = node1horizonHostnameDifferentPort;
+
+assert.adminCommandWorkedAllowingNetworkError(replTest.getPrimary(), {replSetReconfig: config});
+
+// Build the connection URL, do not set replicaSet as that will trigger the ReplicaSetMonitor
+// which will fail as we can't actually connect now (port is wrong)
+var horizonDifferentPortURL = `mongodb://${node0horizonHostname}/admin?ssl=true`;
+jsTestLog(`URL with horizon using different port: ${horizonDifferentPortURL}`);
+assert.eq(checkExpectedHorizon(horizonDifferentPortURL, 0, node0horizonHostnameDifferentPort),
+ 0,
+ "does not return horizon as expected");
+assert.eq(checkExpectedHorizon(horizonDifferentPortURL, 1, node1horizonHostnameDifferentPort),
+ 0,
+ "does not return horizon as expected");
+
+// Providing a config where horizons does not exist in all members is expected to fail
+config.version += 1;
+config.members[0].horizons.horizon_mismatch = node0.replace("localhost", "splithorizon3");
+assert.commandFailed(replTest.getPrimary().adminCommand({replSetReconfig: config}));
+
+// Providing a config where horizon hostnames are duplicated in members is expected to fail
+config.version += 1;
+config.members[1].horizons.horizon_mismatch = config.members[0].horizons.horizon_mismatch;
+assert.commandFailed(replTest.getPrimary().adminCommand({replSetReconfig: config}));
+
+// Two horizons with duplicated hostnames are not allowed
+config.version += 1;
+delete config.members[0].horizons.horizon_mismatch;
+delete config.members[1].horizons.horizon_mismatch;
+config.members[0].horizons.horizon_dup_hostname = config.members[0].horizons.horizon_name;
+config.members[1].horizons.horizon_dup_hostname = config.members[1].horizons.horizon_name;
+assert.commandFailed(replTest.getPrimary().adminCommand({replSetReconfig: config}));
+
+replTest.stopSet();
+removeFile(hostsFile);
})();
diff --git a/jstests/ssl/sharding_with_x509.js b/jstests/ssl/sharding_with_x509.js
index e1dc5ca822d..326d0167585 100644
--- a/jstests/ssl/sharding_with_x509.js
+++ b/jstests/ssl/sharding_with_x509.js
@@ -1,80 +1,80 @@
// Tests basic sharding with x509 cluster auth. The purpose is to verify the connectivity between
// mongos and the shards.
(function() {
- 'use strict';
+'use strict';
- var x509_options = {
- sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/server.pem",
- sslCAFile: "jstests/libs/ca.pem",
- sslClusterFile: "jstests/libs/cluster_cert.pem",
- sslAllowInvalidHostnames: "",
- clusterAuthMode: "x509"
- };
+var x509_options = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/ca.pem",
+ sslClusterFile: "jstests/libs/cluster_cert.pem",
+ sslAllowInvalidHostnames: "",
+ clusterAuthMode: "x509"
+};
- // Start ShardingTest with enableBalancer because ShardingTest attempts to turn off the balancer
- // otherwise, which it will not be authorized to do. Once SERVER-14017 is fixed the
- // "enableBalancer" line could be removed.
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest({
- shards: 2,
- mongos: 1,
- other: {
- enableBalancer: true,
- configOptions: x509_options,
- mongosOptions: x509_options,
- rsOptions: x509_options,
- shardOptions: x509_options,
- shardAsReplicaSet: false
- }
- });
+// Start ShardingTest with enableBalancer because ShardingTest attempts to turn off the balancer
+// otherwise, which it will not be authorized to do. Once SERVER-14017 is fixed the
+// "enableBalancer" line could be removed.
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+var st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ other: {
+ enableBalancer: true,
+ configOptions: x509_options,
+ mongosOptions: x509_options,
+ rsOptions: x509_options,
+ shardOptions: x509_options,
+ shardAsReplicaSet: false
+ }
+});
- st.s.getDB('admin').createUser({user: 'admin', pwd: 'pwd', roles: ['root']});
- st.s.getDB('admin').auth('admin', 'pwd');
+st.s.getDB('admin').createUser({user: 'admin', pwd: 'pwd', roles: ['root']});
+st.s.getDB('admin').auth('admin', 'pwd');
- var coll = st.s.getCollection("test.foo");
+var coll = st.s.getCollection("test.foo");
- st.shardColl(coll, {insert: 1}, false);
+st.shardColl(coll, {insert: 1}, false);
- print("starting insertion phase");
+print("starting insertion phase");
- // Insert a bunch of data
- var toInsert = 2000;
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < toInsert; i++) {
- bulk.insert({my: "test", data: "to", insert: i});
- }
- assert.writeOK(bulk.execute());
+// Insert a bunch of data
+var toInsert = 2000;
+var bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < toInsert; i++) {
+ bulk.insert({my: "test", data: "to", insert: i});
+}
+assert.writeOK(bulk.execute());
- print("starting updating phase");
+print("starting updating phase");
- // Update a bunch of data
- var toUpdate = toInsert;
- bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < toUpdate; i++) {
- var id = coll.findOne({insert: i})._id;
- bulk.find({insert: i, _id: id}).update({$inc: {counter: 1}});
- }
- assert.writeOK(bulk.execute());
+// Update a bunch of data
+var toUpdate = toInsert;
+bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < toUpdate; i++) {
+ var id = coll.findOne({insert: i})._id;
+ bulk.find({insert: i, _id: id}).update({$inc: {counter: 1}});
+}
+assert.writeOK(bulk.execute());
- print("starting deletion");
+print("starting deletion");
- // Remove a bunch of data
- var toDelete = toInsert / 2;
- bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < toDelete; i++) {
- bulk.find({insert: i}).removeOne();
- }
- assert.writeOK(bulk.execute());
+// Remove a bunch of data
+var toDelete = toInsert / 2;
+bulk = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < toDelete; i++) {
+ bulk.find({insert: i}).removeOne();
+}
+assert.writeOK(bulk.execute());
- // Make sure the right amount of data is there
- assert.eq(coll.find().itcount({my: 'test'}), toInsert / 2);
+// Make sure the right amount of data is there
+assert.eq(coll.find().itcount({my: 'test'}), toInsert / 2);
- // Authenticate csrs so ReplSetTest.stopSet() can do db hash check.
- if (st.configRS) {
- st.configRS.nodes.forEach((node) => {
- node.getDB('admin').auth('admin', 'pwd');
- });
- }
- st.stop();
+// Authenticate csrs so ReplSetTest.stopSet() can do db hash check.
+if (st.configRS) {
+ st.configRS.nodes.forEach((node) => {
+ node.getDB('admin').auth('admin', 'pwd');
+ });
+}
+st.stop();
})();
diff --git a/jstests/ssl/shell_option_parsing.js b/jstests/ssl/shell_option_parsing.js
index e51367f836f..9b1b6f914e1 100644
--- a/jstests/ssl/shell_option_parsing.js
+++ b/jstests/ssl/shell_option_parsing.js
@@ -1,216 +1,215 @@
// Test mongo shell connect strings.
(function() {
- 'use strict';
-
- const SERVER_CERT = "jstests/libs/server.pem";
- const CAFILE = "jstests/libs/ca.pem";
-
- var opts = {
- sslMode: "allowSSL",
- sslPEMKeyFile: SERVER_CERT,
- sslAllowInvalidCertificates: "",
- sslAllowConnectionsWithoutCertificates: "",
- sslCAFile: CAFILE,
- setParameter: "authenticationMechanisms=MONGODB-X509,SCRAM-SHA-1"
- };
-
- var rst = new ReplSetTest({name: 'sslSet', nodes: 3, nodeOptions: opts});
-
- rst.startSet();
- rst.initiate();
-
- const mongod = rst.getPrimary();
- const host = mongod.host;
- const port = mongod.port;
-
- const username = "user";
- const usernameNotTest = "userNotTest";
- const usernameX509 = "C=US,ST=New York,L=New York City,O=MongoDB,OU=KernelUser,CN=client";
-
- const password = username;
- const passwordNotTest = usernameNotTest;
-
- mongod.getDB("test").createUser({user: username, pwd: username, roles: []});
- mongod.getDB("notTest").createUser({user: usernameNotTest, pwd: usernameNotTest, roles: []});
- mongod.getDB("$external").createUser({user: usernameX509, roles: []});
-
- var i = 0;
- function testConnect(expectPasswordPrompt, expectSuccess, ...args) {
- const command = [
- 'mongo',
- '--setShellParameter',
- 'newLineAfterPasswordPromptForTest=true',
- '--eval',
- ';',
- '--ssl',
- '--sslAllowInvalidHostnames',
- '--sslCAFile',
- CAFILE,
- ...args
- ];
- print("=========================================> The command (" + (i++) +
- ") I am going to run is: " + command.join(' '));
-
- clearRawMongoProgramOutput();
- var clientPID = _startMongoProgram({args: command});
-
- assert.soon(function() {
- const output = rawMongoProgramOutput();
- if (expectPasswordPrompt) {
- if (output.includes("Enter password:")) {
- stopMongoProgramByPid(clientPID);
- return true;
- }
- return false;
- }
-
- let childStatus = checkProgram(clientPID);
- if (!childStatus.alive) {
- if (expectSuccess) {
- assert.eq(childStatus.exitCode, 0);
- } else {
- assert.eq(childStatus.exitCode, 1);
- }
+'use strict';
+
+const SERVER_CERT = "jstests/libs/server.pem";
+const CAFILE = "jstests/libs/ca.pem";
+
+var opts = {
+ sslMode: "allowSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslAllowInvalidCertificates: "",
+ sslAllowConnectionsWithoutCertificates: "",
+ sslCAFile: CAFILE,
+ setParameter: "authenticationMechanisms=MONGODB-X509,SCRAM-SHA-1"
+};
+
+var rst = new ReplSetTest({name: 'sslSet', nodes: 3, nodeOptions: opts});
+
+rst.startSet();
+rst.initiate();
+
+const mongod = rst.getPrimary();
+const host = mongod.host;
+const port = mongod.port;
+
+const username = "user";
+const usernameNotTest = "userNotTest";
+const usernameX509 = "C=US,ST=New York,L=New York City,O=MongoDB,OU=KernelUser,CN=client";
+
+const password = username;
+const passwordNotTest = usernameNotTest;
+
+mongod.getDB("test").createUser({user: username, pwd: username, roles: []});
+mongod.getDB("notTest").createUser({user: usernameNotTest, pwd: usernameNotTest, roles: []});
+mongod.getDB("$external").createUser({user: usernameX509, roles: []});
+
+var i = 0;
+function testConnect(expectPasswordPrompt, expectSuccess, ...args) {
+ const command = [
+ 'mongo',
+ '--setShellParameter',
+ 'newLineAfterPasswordPromptForTest=true',
+ '--eval',
+ ';',
+ '--ssl',
+ '--sslAllowInvalidHostnames',
+ '--sslCAFile',
+ CAFILE,
+ ...args
+ ];
+ print("=========================================> The command (" + (i++) +
+ ") I am going to run is: " + command.join(' '));
+
+ clearRawMongoProgramOutput();
+ var clientPID = _startMongoProgram({args: command});
+
+ assert.soon(function() {
+ const output = rawMongoProgramOutput();
+ if (expectPasswordPrompt) {
+ if (output.includes("Enter password:")) {
+ stopMongoProgramByPid(clientPID);
return true;
}
-
return false;
- });
- }
+ }
+
+ let childStatus = checkProgram(clientPID);
+ if (!childStatus.alive) {
+ if (expectSuccess) {
+ assert.eq(childStatus.exitCode, 0);
+ } else {
+ assert.eq(childStatus.exitCode, 1);
+ }
+ return true;
+ }
- const testSuccessfulConnect = function(expectPasswordPrompt, ...args) {
- testConnect(expectPasswordPrompt, true, ...args);
- };
+ return false;
+ });
+}
- const testFailedConnect = function(expectPasswordPrompt, ...args) {
- testConnect(expectPasswordPrompt, false, ...args);
- };
+const testSuccessfulConnect = function(expectPasswordPrompt, ...args) {
+ testConnect(expectPasswordPrompt, true, ...args);
+};
- testSuccessfulConnect(true, `mongodb://${username}@${host}/test`);
- testSuccessfulConnect(true, `mongodb://${username}@${host}/test`, '--password');
+const testFailedConnect = function(expectPasswordPrompt, ...args) {
+ testConnect(expectPasswordPrompt, false, ...args);
+};
- testSuccessfulConnect(true, `mongodb://${username}@${host}/test`, '--username', username);
- testSuccessfulConnect(
- true, `mongodb://${username}@${host}/test`, '--password', '--username', username);
-
- testSuccessfulConnect(true,
- `mongodb://${usernameNotTest}@${host}/test?authSource=notTest`,
- '--password',
- '--username',
- usernameNotTest);
-
- testSuccessfulConnect(true, `mongodb://${usernameNotTest}@${host}/test?authSource=notTest`);
-
- testSuccessfulConnect(true,
- `mongodb://${usernameNotTest}@${host}/test?authSource=notTest`,
- '--password',
- '--username',
- usernameNotTest,
- '--authenticationDatabase',
- 'notTest');
-
- testSuccessfulConnect(true,
- `mongodb://${usernameNotTest}@${host}/test`,
- '--password',
- '--username',
- usernameNotTest,
- '--authenticationDatabase',
- 'notTest');
+testSuccessfulConnect(true, `mongodb://${username}@${host}/test`);
+testSuccessfulConnect(true, `mongodb://${username}@${host}/test`, '--password');
- testSuccessfulConnect(
- true, `mongodb://${host}/test?authSource=notTest`, '--username', usernameNotTest);
+testSuccessfulConnect(true, `mongodb://${username}@${host}/test`, '--username', username);
+testSuccessfulConnect(
+ true, `mongodb://${username}@${host}/test`, '--password', '--username', username);
- testSuccessfulConnect(true, `mongodb://${host}/test`, '--username', username);
- testSuccessfulConnect(true, `mongodb://${host}/test`, '--password', '--username', username);
+testSuccessfulConnect(true,
+ `mongodb://${usernameNotTest}@${host}/test?authSource=notTest`,
+ '--password',
+ '--username',
+ usernameNotTest);
- testSuccessfulConnect(
- false, `mongodb://${host}/test`, '--password', password, '--username', username);
+testSuccessfulConnect(true, `mongodb://${usernameNotTest}@${host}/test?authSource=notTest`);
- testSuccessfulConnect(false, `mongodb://${username}:${password}@${host}/test`);
- testSuccessfulConnect(false, `mongodb://${username}:${password}@${host}/test`, '--password');
- testSuccessfulConnect(
- false, `mongodb://${username}:${password}@${host}/test`, '--password', password);
- testSuccessfulConnect(false, `mongodb://${username}@${host}/test`, '--password', password);
+testSuccessfulConnect(true,
+ `mongodb://${usernameNotTest}@${host}/test?authSource=notTest`,
+ '--password',
+ '--username',
+ usernameNotTest,
+ '--authenticationDatabase',
+ 'notTest');
- testSuccessfulConnect(false,
- `mongodb://${usernameNotTest}@${host}/test?authSource=notTest`,
- '--username',
- usernameNotTest,
- '--password',
- passwordNotTest,
- '--authenticationDatabase',
- 'notTest');
+testSuccessfulConnect(true,
+ `mongodb://${usernameNotTest}@${host}/test`,
+ '--password',
+ '--username',
+ usernameNotTest,
+ '--authenticationDatabase',
+ 'notTest');
- testSuccessfulConnect(false,
- `mongodb://${usernameNotTest}@${host}/test?authSource=notTest`,
- '--username',
- usernameNotTest,
- '--password',
- passwordNotTest);
+testSuccessfulConnect(
+ true, `mongodb://${host}/test?authSource=notTest`, '--username', usernameNotTest);
- testSuccessfulConnect(false,
- `mongodb://${usernameNotTest}@${host}/test?authSource=notTest`,
- '--password',
- passwordNotTest);
+testSuccessfulConnect(true, `mongodb://${host}/test`, '--username', username);
+testSuccessfulConnect(true, `mongodb://${host}/test`, '--password', '--username', username);
- testSuccessfulConnect(false,
- `mongodb://${host}/test?authSource=notTest`,
- '--username',
- usernameNotTest,
- '--password',
- passwordNotTest);
-
- // TODO: Enable this set of tests in the future -- needs proper encoding for X509 username in
- // URI
- if (false) {
- testSuccessfulConnect(
- false,
- `mongodb://${usernameX509}@${host}/test?authMechanism=MONGODB-X509&authSource=$external`);
- testSuccessfulConnect(
- false,
- `mongodb://${usernameX509}@${host}/test?authMechanism=MONGODB-X509&authSource=$external`,
- '--username',
- usernameX509);
- testSuccessfulConnect(false,
- `mongodb://${usernameX509}@${host}/test?authSource=$external`,
- '--authenticationMechanism',
- 'MONGODB-X509');
-
- testSuccessfulConnect(
- false,
- `mongodb://${usernameX509}@${host}/test?authMechanism=MONGODB-X509&authSource=$external`,
- '--authenticationMechanism',
- 'MONGODB-X509');
- testSuccessfulConnect(
- false,
- `mongodb://${usernameX509}@${host}/test?authMechanism=MONGODB-X509&authSource=$external`,
- '--authenticationMechanism',
- 'MONGODB-X509',
- '--username',
- usernameX509);
- testSuccessfulConnect(false,
- `mongodb://${usernameX509}@${host}/test?authSource=$external`,
- '--authenticationMechanism',
- 'MONGODB-X509');
- }
- /* */
-
- testFailedConnect(false,
- `mongodb://${host}/test?authMechanism=MONGODB-X509&authSource=$external`);
- testFailedConnect(false,
- `mongodb://${host}/test?authMechanism=MONGODB-X509&authSource=$external`,
+testSuccessfulConnect(
+ false, `mongodb://${host}/test`, '--password', password, '--username', username);
+
+testSuccessfulConnect(false, `mongodb://${username}:${password}@${host}/test`);
+testSuccessfulConnect(false, `mongodb://${username}:${password}@${host}/test`, '--password');
+testSuccessfulConnect(
+ false, `mongodb://${username}:${password}@${host}/test`, '--password', password);
+testSuccessfulConnect(false, `mongodb://${username}@${host}/test`, '--password', password);
+
+testSuccessfulConnect(false,
+ `mongodb://${usernameNotTest}@${host}/test?authSource=notTest`,
'--username',
- usernameX509);
-
- testFailedConnect(false,
- `mongodb://${host}/test?authSource=$external`,
- '--authenticationMechanism',
- 'MONGODB-X509');
- testFailedConnect(false,
- `mongodb://${host}/test?authSource=$external`,
+ usernameNotTest,
+ '--password',
+ passwordNotTest,
+ '--authenticationDatabase',
+ 'notTest');
+
+testSuccessfulConnect(false,
+ `mongodb://${usernameNotTest}@${host}/test?authSource=notTest`,
'--username',
- usernameX509,
- '--authenticationMechanism',
- 'MONGODB-X509');
- rst.stopSet();
+ usernameNotTest,
+ '--password',
+ passwordNotTest);
+
+testSuccessfulConnect(false,
+ `mongodb://${usernameNotTest}@${host}/test?authSource=notTest`,
+ '--password',
+ passwordNotTest);
+
+testSuccessfulConnect(false,
+ `mongodb://${host}/test?authSource=notTest`,
+ '--username',
+ usernameNotTest,
+ '--password',
+ passwordNotTest);
+
+// TODO: Enable this set of tests in the future -- needs proper encoding for X509 username in
+// URI
+if (false) {
+ testSuccessfulConnect(
+ false,
+ `mongodb://${usernameX509}@${host}/test?authMechanism=MONGODB-X509&authSource=$external`);
+ testSuccessfulConnect(
+ false,
+ `mongodb://${usernameX509}@${host}/test?authMechanism=MONGODB-X509&authSource=$external`,
+ '--username',
+ usernameX509);
+ testSuccessfulConnect(false,
+ `mongodb://${usernameX509}@${host}/test?authSource=$external`,
+ '--authenticationMechanism',
+ 'MONGODB-X509');
+
+ testSuccessfulConnect(
+ false,
+ `mongodb://${usernameX509}@${host}/test?authMechanism=MONGODB-X509&authSource=$external`,
+ '--authenticationMechanism',
+ 'MONGODB-X509');
+ testSuccessfulConnect(
+ false,
+ `mongodb://${usernameX509}@${host}/test?authMechanism=MONGODB-X509&authSource=$external`,
+ '--authenticationMechanism',
+ 'MONGODB-X509',
+ '--username',
+ usernameX509);
+ testSuccessfulConnect(false,
+ `mongodb://${usernameX509}@${host}/test?authSource=$external`,
+ '--authenticationMechanism',
+ 'MONGODB-X509');
+}
+/* */
+
+testFailedConnect(false, `mongodb://${host}/test?authMechanism=MONGODB-X509&authSource=$external`);
+testFailedConnect(false,
+ `mongodb://${host}/test?authMechanism=MONGODB-X509&authSource=$external`,
+ '--username',
+ usernameX509);
+
+testFailedConnect(false,
+ `mongodb://${host}/test?authSource=$external`,
+ '--authenticationMechanism',
+ 'MONGODB-X509');
+testFailedConnect(false,
+ `mongodb://${host}/test?authSource=$external`,
+ '--username',
+ usernameX509,
+ '--authenticationMechanism',
+ 'MONGODB-X509');
+rst.stopSet();
})();
diff --git a/jstests/ssl/shell_x509_system_user.js b/jstests/ssl/shell_x509_system_user.js
index 713c0453990..f404b6b7b3b 100644
--- a/jstests/ssl/shell_x509_system_user.js
+++ b/jstests/ssl/shell_x509_system_user.js
@@ -2,75 +2,75 @@
// our auth performance tests (through the dbhash hook).
(function() {
- 'use strict';
+'use strict';
- // The mongo shell cannot authenticate as the internal __system user in tests that use x509 for
- // cluster authentication. Choosing the default value for wcMajorityJournalDefault in
- // ReplSetTest cannot be done automatically without the shell performing such authentication, so
- // in this test we must make the choice explicitly, based on the global test options.
- let wcMajorityJournalDefault;
- if (jsTestOptions().noJournal || jsTestOptions().storageEngine == "ephemeralForTest" ||
- jsTestOptions().storageEngine == "inMemory") {
- wcMajorityJournalDefault = false;
- } else {
- wcMajorityJournalDefault = true;
- }
+// The mongo shell cannot authenticate as the internal __system user in tests that use x509 for
+// cluster authentication. Choosing the default value for wcMajorityJournalDefault in
+// ReplSetTest cannot be done automatically without the shell performing such authentication, so
+// in this test we must make the choice explicitly, based on the global test options.
+let wcMajorityJournalDefault;
+if (jsTestOptions().noJournal || jsTestOptions().storageEngine == "ephemeralForTest" ||
+ jsTestOptions().storageEngine == "inMemory") {
+ wcMajorityJournalDefault = false;
+} else {
+ wcMajorityJournalDefault = true;
+}
- const x509Options = {
- clusterAuthMode: 'x509',
- sslMode: 'requireSSL',
- sslPEMKeyFile: 'jstests/libs/server.pem',
- sslCAFile: 'jstests/libs/ca.pem',
- sslAllowInvalidCertificates: '',
- };
+const x509Options = {
+ clusterAuthMode: 'x509',
+ sslMode: 'requireSSL',
+ sslPEMKeyFile: 'jstests/libs/server.pem',
+ sslCAFile: 'jstests/libs/ca.pem',
+ sslAllowInvalidCertificates: '',
+};
- const rst = new ReplSetTest({nodes: 1, nodeOptions: x509Options, waitForKeys: false});
+const rst = new ReplSetTest({nodes: 1, nodeOptions: x509Options, waitForKeys: false});
- rst.startSet();
+rst.startSet();
- // ReplSetTest.initiate() requires all nodes to be to be authorized to run replSetGetStatus.
- // TODO(SERVER-14017): Remove this in favor of using initiate() everywhere.
- rst.initiateWithAnyNodeAsPrimary(Object.extend(
- rst.getReplSetConfig(), {writeConcernMajorityJournalDefault: wcMajorityJournalDefault}));
+// ReplSetTest.initiate() requires all nodes to be to be authorized to run replSetGetStatus.
+// TODO(SERVER-14017): Remove this in favor of using initiate() everywhere.
+rst.initiateWithAnyNodeAsPrimary(Object.extend(
+ rst.getReplSetConfig(), {writeConcernMajorityJournalDefault: wcMajorityJournalDefault}));
- const primaryConnString = rst.getPrimary().host;
+const primaryConnString = rst.getPrimary().host;
- const subShellCommands = function() {
- TestData = {
- authUser: 'C=US,ST=New York,L=New York City,O=MongoDB,OU=Kernel,CN=server',
- authenticationDatabase: '$external',
- keyFile: 'dummyKeyFile',
- clusterAuthMode: 'x509',
+const subShellCommands = function() {
+ TestData = {
+ authUser: 'C=US,ST=New York,L=New York City,O=MongoDB,OU=Kernel,CN=server',
+ authenticationDatabase: '$external',
+ keyFile: 'dummyKeyFile',
+ clusterAuthMode: 'x509',
- };
- // Explicitly check asCluster can succeed.
- authutil.asCluster(db.getMongo(), 'dummyKeyFile', function() {
- // No need to do anything here. We just need to check we don't error out in the
- // previous auth step.
- });
+ };
+ // Explicitly check asCluster can succeed.
+ authutil.asCluster(db.getMongo(), 'dummyKeyFile', function() {
+ // No need to do anything here. We just need to check we don't error out in the
+ // previous auth step.
+ });
- // Indirectly check that ReplSetTest can successfully call asCluster.
- const rst = new ReplSetTest(db.getMongo().host);
+ // Indirectly check that ReplSetTest can successfully call asCluster.
+ const rst = new ReplSetTest(db.getMongo().host);
- // Directly check that the use case for our auth perf tests can succeed.
- load("jstests/hooks/run_check_repl_dbhash.js");
- };
+ // Directly check that the use case for our auth perf tests can succeed.
+ load("jstests/hooks/run_check_repl_dbhash.js");
+};
- const subShellArgs = [
- 'mongo',
- '--ssl',
- '--sslCAFile=jstests/libs/ca.pem',
- '--sslPEMKeyFile=jstests/libs/server.pem',
- '--sslAllowInvalidHostnames',
- '--authenticationDatabase=$external',
- '--authenticationMechanism=MONGODB-X509',
- primaryConnString,
- '--eval',
- `(${subShellCommands.toString()})();`
- ];
+const subShellArgs = [
+ 'mongo',
+ '--ssl',
+ '--sslCAFile=jstests/libs/ca.pem',
+ '--sslPEMKeyFile=jstests/libs/server.pem',
+ '--sslAllowInvalidHostnames',
+ '--authenticationDatabase=$external',
+ '--authenticationMechanism=MONGODB-X509',
+ primaryConnString,
+ '--eval',
+ `(${subShellCommands.toString()})();`
+];
- const retVal = _runMongoProgram(...subShellArgs);
- assert.eq(retVal, 0, 'mongo shell did not succeed with exit code 0');
+const retVal = _runMongoProgram(...subShellArgs);
+assert.eq(retVal, 0, 'mongo shell did not succeed with exit code 0');
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/ssl/ssl_ECDHE_suites.js b/jstests/ssl/ssl_ECDHE_suites.js
index 551c138d99a..b132948b93b 100644
--- a/jstests/ssl/ssl_ECDHE_suites.js
+++ b/jstests/ssl/ssl_ECDHE_suites.js
@@ -3,99 +3,99 @@
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- "use strict";
-
- // Need to use toolchain python, which is unsupported on Windows
- if (_isWindows()) {
- return;
- }
-
- // Amazon linux does not currently support ECDHE
- const EXCLUDED_BUILDS = ['amazon', 'amzn64'];
-
- const SERVER_CERT = "jstests/libs/server.pem";
- const OUTFILE = 'jstests/ssl/ciphers.json';
-
- const suites = [
- 'sslv2',
- 'sslv3',
- 'tls1',
- 'tls1_1',
- 'tls1_2',
- ];
-
- const x509_options = {
- tlsMode: 'requireTLS',
- tlsCAFile: CA_CERT,
- tlsCertificateKeyFile: SERVER_CERT,
- ipv6: "",
- bind_ip_all: ""
- };
-
- const mongod = MongoRunner.runMongod(x509_options);
-
- // Use new toolchain python, if it exists
- let python_binary = '/opt/mongodbtoolchain/v3/bin/python3';
- if (runProgram('/bin/sh', '-c', 'ls ' + python_binary) !== 0) {
- python_binary = '/opt/mongodbtoolchain/v3/bin/python3';
- }
-
- // Run the tls cipher suite enumerator
- const python = '/usr/bin/env ' + python_binary;
- const enumerator = " jstests/ssl/tls_enumerator.py ";
- const python_command = python + enumerator + '--port=' + mongod.port + ' --cafile=' + CA_CERT +
- ' --cert=' + CLIENT_CERT + ' --outfile=' + OUTFILE;
- assert.eq(runProgram('/bin/sh', '-c', python_command), 0);
-
- // Parse its output
- let cipherDict = {};
- try {
- cipherDict = JSON.parse(cat(OUTFILE));
- } catch (e) {
- jsTestLog("Failed to parse ciphers.json");
- throw e;
- } finally {
- const delete_command = 'rm ' + OUTFILE;
- assert.eq(runProgram('/bin/sh', '-c', delete_command), 0);
- }
-
- // Checking that SSLv2, SSLv3 and TLS 1.0 are not accepted
- suites.slice(0, suites.indexOf('tls1'))
- .forEach(tlsVersion => assert(cipherDict[tlsVersion].length === 0));
-
- let hasECDHE = false;
- let hasDHE = false;
-
- // Printing TLS 1.1 and 1.2 suites that are accepted
- suites.slice(suites.indexOf('tls1_1')).forEach(tlsVersion => {
- print('*************************\n' + tlsVersion + ": ");
- cipherDict[tlsVersion].forEach(cipher => {
- print(cipher);
-
- if (cipher.startsWith('ECDHE')) {
- hasECDHE = true;
- }
+"use strict";
+
+// Need to use toolchain python, which is unsupported on Windows
+if (_isWindows()) {
+ return;
+}
+
+// Amazon linux does not currently support ECDHE
+const EXCLUDED_BUILDS = ['amazon', 'amzn64'];
+
+const SERVER_CERT = "jstests/libs/server.pem";
+const OUTFILE = 'jstests/ssl/ciphers.json';
+
+const suites = [
+ 'sslv2',
+ 'sslv3',
+ 'tls1',
+ 'tls1_1',
+ 'tls1_2',
+];
+
+const x509_options = {
+ tlsMode: 'requireTLS',
+ tlsCAFile: CA_CERT,
+ tlsCertificateKeyFile: SERVER_CERT,
+ ipv6: "",
+ bind_ip_all: ""
+};
+
+const mongod = MongoRunner.runMongod(x509_options);
+
+// Use new toolchain python, if it exists
+let python_binary = '/opt/mongodbtoolchain/v3/bin/python3';
+if (runProgram('/bin/sh', '-c', 'ls ' + python_binary) !== 0) {
+ python_binary = '/opt/mongodbtoolchain/v3/bin/python3';
+}
+
+// Run the tls cipher suite enumerator
+const python = '/usr/bin/env ' + python_binary;
+const enumerator = " jstests/ssl/tls_enumerator.py ";
+const python_command = python + enumerator + '--port=' + mongod.port + ' --cafile=' + CA_CERT +
+ ' --cert=' + CLIENT_CERT + ' --outfile=' + OUTFILE;
+assert.eq(runProgram('/bin/sh', '-c', python_command), 0);
+
+// Parse its output
+let cipherDict = {};
+try {
+ cipherDict = JSON.parse(cat(OUTFILE));
+} catch (e) {
+ jsTestLog("Failed to parse ciphers.json");
+ throw e;
+} finally {
+ const delete_command = 'rm ' + OUTFILE;
+ assert.eq(runProgram('/bin/sh', '-c', delete_command), 0);
+}
+
+// Checking that SSLv2, SSLv3 and TLS 1.0 are not accepted
+suites.slice(0, suites.indexOf('tls1'))
+ .forEach(tlsVersion => assert(cipherDict[tlsVersion].length === 0));
+
+let hasECDHE = false;
+let hasDHE = false;
+
+// Printing TLS 1.1 and 1.2 suites that are accepted
+suites.slice(suites.indexOf('tls1_1')).forEach(tlsVersion => {
+ print('*************************\n' + tlsVersion + ": ");
+ cipherDict[tlsVersion].forEach(cipher => {
+ print(cipher);
+
+ if (cipher.startsWith('ECDHE')) {
+ hasECDHE = true;
+ }
- if (cipher.startsWith('DHE')) {
- hasDHE = true;
- }
- });
+ if (cipher.startsWith('DHE')) {
+ hasDHE = true;
+ }
});
+});
- // All platforms except Amazon Linux 1 should support ECDHE and DHE
- if (!EXCLUDED_BUILDS.includes(buildInfo().buildEnvironment.distmod)) {
- assert(hasECDHE, 'Supports at least one ECDHE cipher suite');
+// All platforms except Amazon Linux 1 should support ECDHE and DHE
+if (!EXCLUDED_BUILDS.includes(buildInfo().buildEnvironment.distmod)) {
+ assert(hasECDHE, 'Supports at least one ECDHE cipher suite');
- // Secure Transport disallows DHE, so we don't require it on those platforms
- if (determineSSLProvider() !== 'apple') {
- assert(hasDHE, 'Supports at least one DHE cipher suite');
- }
- }
-
- // If ECDHE is enabled, DHE should be too (for Java 7 compat)
+ // Secure Transport disallows DHE, so we don't require it on those platforms
if (determineSSLProvider() !== 'apple') {
- assert(hasDHE === hasECDHE, 'Supports both ECDHE and DHE or neither');
+ assert(hasDHE, 'Supports at least one DHE cipher suite');
}
+}
+
+// If ECDHE is enabled, DHE should be too (for Java 7 compat)
+if (determineSSLProvider() !== 'apple') {
+ assert(hasDHE === hasECDHE, 'Supports both ECDHE and DHE or neither');
+}
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
}());
diff --git a/jstests/ssl/ssl_alert_reporting.js b/jstests/ssl/ssl_alert_reporting.js
index d26d2ceca0e..a00fa4023e9 100644
--- a/jstests/ssl/ssl_alert_reporting.js
+++ b/jstests/ssl/ssl_alert_reporting.js
@@ -3,60 +3,60 @@
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- 'use strict';
-
- const clientOptions = [
- "--ssl",
- "--sslPEMKeyFile",
- "jstests/libs/client.pem",
- "--sslCAFile",
- "jstests/libs/ca.pem",
- "--eval",
- ";"
- ];
-
- function runTest(serverDisabledProtos, clientDisabledProtos) {
- const implementation = determineSSLProvider();
- let expectedRegex;
- if (implementation === "openssl") {
- expectedRegex =
- /Error: couldn't connect to server .*:[0-9]*, connection attempt failed: SocketException: tlsv1 alert protocol version/;
- } else if (implementation === "windows") {
- expectedRegex =
- /Error: couldn't connect to server .*:[0-9]*, connection attempt failed: SocketException: The function requested is not supported/;
- } else if (implementation === "apple") {
- expectedRegex =
- /Error: couldn't connect to server .*:[0-9]*, connection attempt failed: SocketException: Secure.Transport: bad protocol version/;
- } else {
- throw Error("Unrecognized TLS implementation!");
- }
-
- var md = MongoRunner.runMongod({
- sslMode: "requireSSL",
- sslCAFile: "jstests/libs/ca.pem",
- sslPEMKeyFile: "jstests/libs/server.pem",
- sslDisabledProtocols: serverDisabledProtos,
- });
-
- let shell;
- let mongoOutput;
-
- assert.soon(function() {
- clearRawMongoProgramOutput();
- shell = runMongoProgram("mongo",
- "--port",
- md.port,
- ...clientOptions,
- "--sslDisabledProtocols",
- clientDisabledProtos);
- mongoOutput = rawMongoProgramOutput();
- return mongoOutput.match(expectedRegex);
- }, "Mongo shell output was as follows:\n" + mongoOutput + "\n************");
-
- MongoRunner.stopMongod(md);
+'use strict';
+
+const clientOptions = [
+ "--ssl",
+ "--sslPEMKeyFile",
+ "jstests/libs/client.pem",
+ "--sslCAFile",
+ "jstests/libs/ca.pem",
+ "--eval",
+ ";"
+];
+
+function runTest(serverDisabledProtos, clientDisabledProtos) {
+ const implementation = determineSSLProvider();
+ let expectedRegex;
+ if (implementation === "openssl") {
+ expectedRegex =
+ /Error: couldn't connect to server .*:[0-9]*, connection attempt failed: SocketException: tlsv1 alert protocol version/;
+ } else if (implementation === "windows") {
+ expectedRegex =
+ /Error: couldn't connect to server .*:[0-9]*, connection attempt failed: SocketException: The function requested is not supported/;
+ } else if (implementation === "apple") {
+ expectedRegex =
+ /Error: couldn't connect to server .*:[0-9]*, connection attempt failed: SocketException: Secure.Transport: bad protocol version/;
+ } else {
+ throw Error("Unrecognized TLS implementation!");
}
- // Client recieves and reports a protocol version alert if it advertises a protocol older than
- // the server's oldest supported protocol
- runTest("TLS1_0", "TLS1_1,TLS1_2");
+ var md = MongoRunner.runMongod({
+ sslMode: "requireSSL",
+ sslCAFile: "jstests/libs/ca.pem",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslDisabledProtocols: serverDisabledProtos,
+ });
+
+ let shell;
+ let mongoOutput;
+
+ assert.soon(function() {
+ clearRawMongoProgramOutput();
+ shell = runMongoProgram("mongo",
+ "--port",
+ md.port,
+ ...clientOptions,
+ "--sslDisabledProtocols",
+ clientDisabledProtos);
+ mongoOutput = rawMongoProgramOutput();
+ return mongoOutput.match(expectedRegex);
+ }, "Mongo shell output was as follows:\n" + mongoOutput + "\n************");
+
+ MongoRunner.stopMongod(md);
+}
+
+// Client recieves and reports a protocol version alert if it advertises a protocol older than
+// the server's oldest supported protocol
+runTest("TLS1_0", "TLS1_1,TLS1_2");
}());
diff --git a/jstests/ssl/ssl_cert_password.js b/jstests/ssl/ssl_cert_password.js
index 47f4acf683f..e52c0d9f926 100644
--- a/jstests/ssl/ssl_cert_password.js
+++ b/jstests/ssl/ssl_cert_password.js
@@ -132,12 +132,12 @@ requireSSLProvider('openssl', function() {
exit_code = MongoRunner.runMongoTool("mongofiles",
{
- db: mongofiles_ssl_dbname,
- port: md.port,
- ssl: "",
- sslCAFile: "jstests/libs/ca.pem",
- sslPEMKeyFile: "jstests/libs/password_protected.pem",
- sslPEMKeyPassword: "qwerty",
+ db: mongofiles_ssl_dbname,
+ port: md.port,
+ ssl: "",
+ sslCAFile: "jstests/libs/ca.pem",
+ sslPEMKeyFile: "jstests/libs/password_protected.pem",
+ sslPEMKeyPassword: "qwerty",
},
"put",
source_filename);
@@ -155,13 +155,13 @@ requireSSLProvider('openssl', function() {
exit_code = MongoRunner.runMongoTool("mongofiles",
{
- db: mongofiles_ssl_dbname,
- local: external_scratch_dir + filename,
- port: md.port,
- ssl: "",
- sslCAFile: "jstests/libs/ca.pem",
- sslPEMKeyFile: "jstests/libs/password_protected.pem",
- sslPEMKeyPassword: "qwerty",
+ db: mongofiles_ssl_dbname,
+ local: external_scratch_dir + filename,
+ port: md.port,
+ ssl: "",
+ sslCAFile: "jstests/libs/ca.pem",
+ sslPEMKeyFile: "jstests/libs/password_protected.pem",
+ sslPEMKeyPassword: "qwerty",
},
"get",
source_filename);
diff --git a/jstests/ssl/ssl_client_certificate_warning_suppression.js b/jstests/ssl/ssl_client_certificate_warning_suppression.js
index f2bbf93e110..4531b4f1bc4 100644
--- a/jstests/ssl/ssl_client_certificate_warning_suppression.js
+++ b/jstests/ssl/ssl_client_certificate_warning_suppression.js
@@ -10,56 +10,56 @@
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- 'use strict';
+'use strict';
- function test(suppress) {
- const opts = {
- sslMode: 'requireSSL',
- sslPEMKeyFile: "jstests/libs/server.pem",
- sslCAFile: "jstests/libs/ca.pem",
- waitForConnect: false,
- sslAllowConnectionsWithoutCertificates: "",
- setParameter: {suppressNoTLSPeerCertificateWarning: suppress}
- };
- clearRawMongoProgramOutput();
- const mongod = MongoRunner.runMongod(opts);
+function test(suppress) {
+ const opts = {
+ sslMode: 'requireSSL',
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/ca.pem",
+ waitForConnect: false,
+ sslAllowConnectionsWithoutCertificates: "",
+ setParameter: {suppressNoTLSPeerCertificateWarning: suppress}
+ };
+ clearRawMongoProgramOutput();
+ const mongod = MongoRunner.runMongod(opts);
- assert.soon(function() {
- return runMongoProgram('mongo',
- '--ssl',
- '--sslAllowInvalidHostnames',
- '--sslCAFile',
- CA_CERT,
- '--port',
- mongod.port,
- '--eval',
- 'quit()') === 0;
- }, "mongo did not initialize properly");
+ assert.soon(function() {
+ return runMongoProgram('mongo',
+ '--ssl',
+ '--sslAllowInvalidHostnames',
+ '--sslCAFile',
+ CA_CERT,
+ '--port',
+ mongod.port,
+ '--eval',
+ 'quit()') === 0;
+ }, "mongo did not initialize properly");
- // Keep checking the log file until client metadata is logged since the SSL warning is
- // logged before it.
- assert.soon(
- () => {
- const log = rawMongoProgramOutput();
- return log.search('client metadata') !== -1;
- },
- "logfile should contain 'client metadata'.\n" +
- "Log File Contents\n==============================\n" + rawMongoProgramOutput() +
- "\n==============================\n");
+ // Keep checking the log file until client metadata is logged since the SSL warning is
+ // logged before it.
+ assert.soon(
+ () => {
+ const log = rawMongoProgramOutput();
+ return log.search('client metadata') !== -1;
+ },
+ "logfile should contain 'client metadata'.\n" +
+ "Log File Contents\n==============================\n" + rawMongoProgramOutput() +
+ "\n==============================\n");
- // Now check for the message
- const log = rawMongoProgramOutput();
- assert.eq(suppress, log.search('no SSL certificate provided by peer') === -1);
+ // Now check for the message
+ const log = rawMongoProgramOutput();
+ assert.eq(suppress, log.search('no SSL certificate provided by peer') === -1);
- try {
- MongoRunner.stopMongod(mongod);
- } catch (e) {
- // Depending on timing, exitCode might be 0, 1, or -9.
- // All that matters is that it dies, resmoke will tell us if that failed.
- // So just let it go, the exit code never bothered us anyway.
- }
+ try {
+ MongoRunner.stopMongod(mongod);
+ } catch (e) {
+ // Depending on timing, exitCode might be 0, 1, or -9.
+ // All that matters is that it dies, resmoke will tell us if that failed.
+ // So just let it go, the exit code never bothered us anyway.
}
+}
- test(true);
- test(false);
+test(true);
+test(false);
})();
diff --git a/jstests/ssl/ssl_cluster_ca.js b/jstests/ssl/ssl_cluster_ca.js
index 4a38ae708bf..5f76e3b9531 100644
--- a/jstests/ssl/ssl_cluster_ca.js
+++ b/jstests/ssl/ssl_cluster_ca.js
@@ -2,81 +2,81 @@
// and client->server communication using different CAs.
(function() {
- "use strict";
+"use strict";
- function testRS(opts, succeed) {
- const origSkipCheck = TestData.skipCheckDBHashes;
- const rsOpts = {
- // Use localhost so that SAN matches.
- useHostName: false,
- nodes: {node0: opts, node1: opts},
- };
- const rs = new ReplSetTest(rsOpts);
- rs.startSet();
- if (succeed) {
+function testRS(opts, succeed) {
+ const origSkipCheck = TestData.skipCheckDBHashes;
+ const rsOpts = {
+ // Use localhost so that SAN matches.
+ useHostName: false,
+ nodes: {node0: opts, node1: opts},
+ };
+ const rs = new ReplSetTest(rsOpts);
+ rs.startSet();
+ if (succeed) {
+ rs.initiate();
+ assert.commandWorked(rs.getPrimary().getDB('admin').runCommand({isMaster: 1}));
+ } else {
+ assert.throws(function() {
rs.initiate();
- assert.commandWorked(rs.getPrimary().getDB('admin').runCommand({isMaster: 1}));
- } else {
- assert.throws(function() {
- rs.initiate();
- });
- TestData.skipCheckDBHashes = true;
- }
- rs.stopSet();
- TestData.skipCheckDBHashes = origSkipCheck;
+ });
+ TestData.skipCheckDBHashes = true;
}
+ rs.stopSet();
+ TestData.skipCheckDBHashes = origSkipCheck;
+}
- // The name "trusted" in these certificates is misleading.
- // They're just a separate trust chain from the ones without the name.
- // ca.pem signed client.pem and server.pem
- // trusted-ca.pem signed trusted-client.pem and trusted-server.pem
- const valid_options = {
- tlsMode: 'requireTLS',
- // Servers present trusted-server.pem to clients and each other for inbound connections.
- // Peers validate trusted-server.pem using trusted-ca.pem when making those connections.
- tlsCertificateKeyFile: 'jstests/libs/trusted-server.pem',
- tlsCAFile: 'jstests/libs/trusted-ca.pem',
- // Servers making outbound connections to other servers present server.pem to their peers
- // which their peers validate using ca.pem.
- tlsClusterFile: 'jstests/libs/server.pem',
- tlsClusterCAFile: 'jstests/libs/ca.pem',
- // SERVER-36895: IP based hostname validation with SubjectAlternateName
- tlsAllowInvalidHostnames: '',
- };
+// The name "trusted" in these certificates is misleading.
+// They're just a separate trust chain from the ones without the name.
+// ca.pem signed client.pem and server.pem
+// trusted-ca.pem signed trusted-client.pem and trusted-server.pem
+const valid_options = {
+ tlsMode: 'requireTLS',
+ // Servers present trusted-server.pem to clients and each other for inbound connections.
+ // Peers validate trusted-server.pem using trusted-ca.pem when making those connections.
+ tlsCertificateKeyFile: 'jstests/libs/trusted-server.pem',
+ tlsCAFile: 'jstests/libs/trusted-ca.pem',
+ // Servers making outbound connections to other servers present server.pem to their peers
+ // which their peers validate using ca.pem.
+ tlsClusterFile: 'jstests/libs/server.pem',
+ tlsClusterCAFile: 'jstests/libs/ca.pem',
+ // SERVER-36895: IP based hostname validation with SubjectAlternateName
+ tlsAllowInvalidHostnames: '',
+};
- testRS(valid_options, true);
+testRS(valid_options, true);
- const wrong_cluster_file =
- Object.assign({}, valid_options, {tlsClusterFile: valid_options.tlsCertificateKeyFile});
- testRS(wrong_cluster_file, false);
+const wrong_cluster_file =
+ Object.assign({}, valid_options, {tlsClusterFile: valid_options.tlsCertificateKeyFile});
+testRS(wrong_cluster_file, false);
- const wrong_key_file =
- Object.assign({}, valid_options, {tlsCertificateKeyFile: valid_options.tlsClusterFile});
- testRS(wrong_key_file, false);
+const wrong_key_file =
+ Object.assign({}, valid_options, {tlsCertificateKeyFile: valid_options.tlsClusterFile});
+testRS(wrong_key_file, false);
- const mongod = MongoRunner.runMongod(valid_options);
- assert(mongod, "Failed starting standalone mongod with alternate CA");
+const mongod = MongoRunner.runMongod(valid_options);
+assert(mongod, "Failed starting standalone mongod with alternate CA");
- function testConnect(cert, succeed) {
- const mongo = runMongoProgram("mongo",
- "--host",
- "localhost",
- "--port",
- mongod.port,
- "--tls",
- "--tlsCAFile",
- valid_options.tlsCAFile,
- "--tlsCertificateKeyFile",
- cert,
- "--eval",
- ";");
+function testConnect(cert, succeed) {
+ const mongo = runMongoProgram("mongo",
+ "--host",
+ "localhost",
+ "--port",
+ mongod.port,
+ "--tls",
+ "--tlsCAFile",
+ valid_options.tlsCAFile,
+ "--tlsCertificateKeyFile",
+ cert,
+ "--eval",
+ ";");
- // runMongoProgram returns 0 on success
- assert.eq(mongo === 0, succeed);
- }
+ // runMongoProgram returns 0 on success
+ assert.eq(mongo === 0, succeed);
+}
- testConnect('jstests/libs/client.pem', true);
- testConnect('jstests/libs/trusted-client.pem', false);
+testConnect('jstests/libs/client.pem', true);
+testConnect('jstests/libs/trusted-client.pem', false);
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
}());
diff --git a/jstests/ssl/ssl_cluster_file.js b/jstests/ssl/ssl_cluster_file.js
index a7296a3b731..3b21f0896ee 100644
--- a/jstests/ssl/ssl_cluster_file.js
+++ b/jstests/ssl/ssl_cluster_file.js
@@ -1,35 +1,35 @@
(function() {
- "use strict";
+"use strict";
- var CA_CERT = "jstests/libs/ca.pem";
- var SERVER_CERT = "jstests/libs/server.pem";
- var CLIENT_CERT = "jstests/libs/client.pem";
- var BAD_SAN_CERT = "jstests/libs/badSAN.pem";
+var CA_CERT = "jstests/libs/ca.pem";
+var SERVER_CERT = "jstests/libs/server.pem";
+var CLIENT_CERT = "jstests/libs/client.pem";
+var BAD_SAN_CERT = "jstests/libs/badSAN.pem";
- var mongod = MongoRunner.runMongod({
- sslMode: "requireSSL",
- sslPEMKeyFile: SERVER_CERT,
- sslCAFile: CA_CERT,
- sslClusterFile: BAD_SAN_CERT
- });
+var mongod = MongoRunner.runMongod({
+ sslMode: "requireSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT,
+ sslClusterFile: BAD_SAN_CERT
+});
- var mongo = runMongoProgram("mongo",
- "--host",
- "localhost",
- "--port",
- mongod.port,
- "--ssl",
- "--sslCAFile",
- CA_CERT,
- "--sslPEMKeyFile",
- CLIENT_CERT,
- "--eval",
- ";");
+var mongo = runMongoProgram("mongo",
+ "--host",
+ "localhost",
+ "--port",
+ mongod.port,
+ "--ssl",
+ "--sslCAFile",
+ CA_CERT,
+ "--sslPEMKeyFile",
+ CLIENT_CERT,
+ "--eval",
+ ";");
- // runMongoProgram returns 0 on success
- assert.eq(
- 0,
- mongo,
- "Connection attempt failed when an irrelevant sslClusterFile was provided to the server!");
- MongoRunner.stopMongod(mongod);
+// runMongoProgram returns 0 on success
+assert.eq(
+ 0,
+ mongo,
+ "Connection attempt failed when an irrelevant sslClusterFile was provided to the server!");
+MongoRunner.stopMongod(mongod);
}());
diff --git a/jstests/ssl/ssl_cn_with_san.js b/jstests/ssl/ssl_cn_with_san.js
index 41e039c0a48..c033b935382 100644
--- a/jstests/ssl/ssl_cn_with_san.js
+++ b/jstests/ssl/ssl_cn_with_san.js
@@ -2,45 +2,45 @@
// does not permit connection, but provides a useful error.
(function() {
- 'use strict';
- load('jstests/ssl/libs/ssl_helpers.js');
+'use strict';
+load('jstests/ssl/libs/ssl_helpers.js');
- // server-intermediate-ca was signed by ca.pem, not trusted-ca.pem
- const CA = 'jstests/libs/ca.pem';
- const SERVER = 'jstests/ssl/libs/localhost-cn-with-san.pem';
+// server-intermediate-ca was signed by ca.pem, not trusted-ca.pem
+const CA = 'jstests/libs/ca.pem';
+const SERVER = 'jstests/ssl/libs/localhost-cn-with-san.pem';
- const mongod = MongoRunner.runMongod({
- sslMode: 'requireSSL',
- sslPEMKeyFile: SERVER,
- sslCAFile: CA,
- });
- assert(mongod);
+const mongod = MongoRunner.runMongod({
+ sslMode: 'requireSSL',
+ sslPEMKeyFile: SERVER,
+ sslCAFile: CA,
+});
+assert(mongod);
- // Try with `tlsAllowInvalidHostnames` to look for the warning.
+// Try with `tlsAllowInvalidHostnames` to look for the warning.
+clearRawMongoProgramOutput();
+const mongo = runMongoProgram('mongo',
+ '--tls',
+ '--tlsCAFile',
+ CA,
+ 'localhost:' + mongod.port,
+ '--eval',
+ ';',
+ '--tlsAllowInvalidHostnames');
+assert.neq(mongo, 0, "Shell connected when it should have failed");
+assert(rawMongoProgramOutput().includes(' would have matched, but was overridden by SAN'),
+ 'Expected detail warning not seen');
+
+// On OpenSSL only, start without `tlsAllowInvalidHostnames`
+// Windowds/Mac will bail out too early to show this message.
+if (determineSSLProvider() === 'openssl') {
clearRawMongoProgramOutput();
- const mongo = runMongoProgram('mongo',
- '--tls',
- '--tlsCAFile',
- CA,
- 'localhost:' + mongod.port,
- '--eval',
- ';',
- '--tlsAllowInvalidHostnames');
+ const mongo = runMongoProgram(
+ 'mongo', '--tls', '--tlsCAFile', CA, 'localhost:' + mongod.port, '--eval', ';');
assert.neq(mongo, 0, "Shell connected when it should have failed");
- assert(rawMongoProgramOutput().includes(' would have matched, but was overridden by SAN'),
+ assert(rawMongoProgramOutput().includes(
+ 'CN: localhost would have matched, but was overridden by SAN'),
'Expected detail warning not seen');
+}
- // On OpenSSL only, start without `tlsAllowInvalidHostnames`
- // Windowds/Mac will bail out too early to show this message.
- if (determineSSLProvider() === 'openssl') {
- clearRawMongoProgramOutput();
- const mongo = runMongoProgram(
- 'mongo', '--tls', '--tlsCAFile', CA, 'localhost:' + mongod.port, '--eval', ';');
- assert.neq(mongo, 0, "Shell connected when it should have failed");
- assert(rawMongoProgramOutput().includes(
- 'CN: localhost would have matched, but was overridden by SAN'),
- 'Expected detail warning not seen');
- }
-
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/ssl/ssl_count_protocols.js b/jstests/ssl/ssl_count_protocols.js
index a9e3202c30f..ccf31941951 100644
--- a/jstests/ssl/ssl_count_protocols.js
+++ b/jstests/ssl/ssl_count_protocols.js
@@ -1,103 +1,100 @@
// Ensure the server counts the server TLS versions used
(function() {
- 'use strict';
-
- load("jstests/ssl/libs/ssl_helpers.js");
-
- var SERVER_CERT = "jstests/libs/server.pem";
- var CLIENT_CERT = "jstests/libs/client.pem";
- var CA_CERT = "jstests/libs/ca.pem";
-
- const protocols = ["TLS1_0", "TLS1_1", "TLS1_2", "TLS1_3"];
-
- // First, figure out what protocol our local TLS stack wants to speak.
- // We're going to observe a connection of this type from the testrunner.
- const expectedDefaultProtocol = detectDefaultTLSProtocol();
- print("Expected default protocol: " + expectedDefaultProtocol);
-
- function runTestWithoutSubset(client) {
- print("Running test: " + client);
- let disabledProtocols = protocols.slice();
- let expectedCounts = [0, 0, 0, 0, 0];
- expectedCounts[protocols.indexOf(expectedDefaultProtocol)] = 1;
- var index = disabledProtocols.indexOf(client);
- disabledProtocols.splice(index, 1);
- expectedCounts[index] += 1;
- print(tojson(expectedCounts));
-
- const conn = MongoRunner.runMongod({
- sslMode: 'allowSSL',
- sslPEMKeyFile: SERVER_CERT,
- sslDisabledProtocols: 'none',
- useLogFiles: true,
- tlsLogVersions: "TLS1_0,TLS1_1,TLS1_2,TLS1_3",
- });
-
- print(disabledProtocols);
- const version_number = client.replace(/TLS/, "").replace(/_/, ".");
-
- const exitStatus =
- runMongoProgram('mongo',
- '--ssl',
- '--sslAllowInvalidHostnames',
- '--sslPEMKeyFile',
- CLIENT_CERT,
- '--sslCAFile',
- CA_CERT,
- '--port',
- conn.port,
- '--sslDisabledProtocols',
- disabledProtocols.join(","),
- '--eval',
- // The Javascript string "1.0" is implicitly converted to the Number(1)
- // Workaround this with parseFloat
- 'one = Number.parseFloat(1).toPrecision(2); a = {};' +
- 'a[one] = NumberLong(' + expectedCounts[0] + ');' +
- 'a["1.1"] = NumberLong(' + expectedCounts[1] + ');' +
- 'a["1.2"] = NumberLong(' + expectedCounts[2] + ');' +
- 'a["1.3"] = NumberLong(' + expectedCounts[3] + ');' +
- 'a["unknown"] = NumberLong(' + expectedCounts[4] + ');' +
- 'assert.eq(db.serverStatus().transportSecurity, a);');
-
- if (expectedDefaultProtocol === "TLS1_2" && client === "TLS1_3") {
- // If the runtime environment does not support TLS 1.3, a client cannot connect to a
- // server if TLS 1.3 is its only usable protocol version.
- assert.neq(
- 0,
- exitStatus,
- "A client which does not support TLS 1.3 should not be able to connect with it");
- MongoRunner.stopMongod(conn);
- return;
- }
-
- assert.eq(0, exitStatus, "");
-
- print(`Checking ${conn.fullOptions.logFile} for TLS version message`);
- const log = cat(conn.fullOptions.logFile);
-
- // Find the last line in the log file and verify it has the right version
- let re = /Accepted connection with TLS Version (1\.\d) from connection 127.0.0.1:\d+/g;
- let result = re.exec(log);
- let lastResult = null;
- while (result !== null) {
- lastResult = result;
- result = re.exec(log);
- }
-
- assert(lastResult !== null,
- "'Accepted connection with TLS Version' log line missing in log file!\n" +
- "Log file contents: " + conn.fullOptions.logFile +
- "\n************************************************************\n" + log +
- "\n************************************************************");
-
- assert.eq(lastResult['1'], version_number);
-
+'use strict';
+
+load("jstests/ssl/libs/ssl_helpers.js");
+
+var SERVER_CERT = "jstests/libs/server.pem";
+var CLIENT_CERT = "jstests/libs/client.pem";
+var CA_CERT = "jstests/libs/ca.pem";
+
+const protocols = ["TLS1_0", "TLS1_1", "TLS1_2", "TLS1_3"];
+
+// First, figure out what protocol our local TLS stack wants to speak.
+// We're going to observe a connection of this type from the testrunner.
+const expectedDefaultProtocol = detectDefaultTLSProtocol();
+print("Expected default protocol: " + expectedDefaultProtocol);
+
+function runTestWithoutSubset(client) {
+ print("Running test: " + client);
+ let disabledProtocols = protocols.slice();
+ let expectedCounts = [0, 0, 0, 0, 0];
+ expectedCounts[protocols.indexOf(expectedDefaultProtocol)] = 1;
+ var index = disabledProtocols.indexOf(client);
+ disabledProtocols.splice(index, 1);
+ expectedCounts[index] += 1;
+ print(tojson(expectedCounts));
+
+ const conn = MongoRunner.runMongod({
+ sslMode: 'allowSSL',
+ sslPEMKeyFile: SERVER_CERT,
+ sslDisabledProtocols: 'none',
+ useLogFiles: true,
+ tlsLogVersions: "TLS1_0,TLS1_1,TLS1_2,TLS1_3",
+ });
+
+ print(disabledProtocols);
+ const version_number = client.replace(/TLS/, "").replace(/_/, ".");
+
+ const exitStatus = runMongoProgram('mongo',
+ '--ssl',
+ '--sslAllowInvalidHostnames',
+ '--sslPEMKeyFile',
+ CLIENT_CERT,
+ '--sslCAFile',
+ CA_CERT,
+ '--port',
+ conn.port,
+ '--sslDisabledProtocols',
+ disabledProtocols.join(","),
+ '--eval',
+ // The Javascript string "1.0" is implicitly converted to the
+ // Number(1) Workaround this with parseFloat
+ 'one = Number.parseFloat(1).toPrecision(2); a = {};' +
+ 'a[one] = NumberLong(' + expectedCounts[0] + ');' +
+ 'a["1.1"] = NumberLong(' + expectedCounts[1] + ');' +
+ 'a["1.2"] = NumberLong(' + expectedCounts[2] + ');' +
+ 'a["1.3"] = NumberLong(' + expectedCounts[3] + ');' +
+ 'a["unknown"] = NumberLong(' + expectedCounts[4] + ');' +
+ 'assert.eq(db.serverStatus().transportSecurity, a);');
+
+ if (expectedDefaultProtocol === "TLS1_2" && client === "TLS1_3") {
+ // If the runtime environment does not support TLS 1.3, a client cannot connect to a
+ // server if TLS 1.3 is its only usable protocol version.
+ assert.neq(0,
+ exitStatus,
+ "A client which does not support TLS 1.3 should not be able to connect with it");
MongoRunner.stopMongod(conn);
+ return;
}
- runTestWithoutSubset("TLS1_0");
- runTestWithoutSubset("TLS1_1");
- runTestWithoutSubset("TLS1_2");
- runTestWithoutSubset("TLS1_3");
+ assert.eq(0, exitStatus, "");
+
+ print(`Checking ${conn.fullOptions.logFile} for TLS version message`);
+ const log = cat(conn.fullOptions.logFile);
+
+ // Find the last line in the log file and verify it has the right version
+ let re = /Accepted connection with TLS Version (1\.\d) from connection 127.0.0.1:\d+/g;
+ let result = re.exec(log);
+ let lastResult = null;
+ while (result !== null) {
+ lastResult = result;
+ result = re.exec(log);
+ }
+
+ assert(lastResult !== null,
+ "'Accepted connection with TLS Version' log line missing in log file!\n" +
+ "Log file contents: " + conn.fullOptions.logFile +
+ "\n************************************************************\n" + log +
+ "\n************************************************************");
+
+ assert.eq(lastResult['1'], version_number);
+
+ MongoRunner.stopMongod(conn);
+}
+runTestWithoutSubset("TLS1_0");
+runTestWithoutSubset("TLS1_1");
+runTestWithoutSubset("TLS1_2");
+runTestWithoutSubset("TLS1_3");
})();
diff --git a/jstests/ssl/ssl_fragment.js b/jstests/ssl/ssl_fragment.js
index bdb16559a0b..cad36a47226 100644
--- a/jstests/ssl/ssl_fragment.js
+++ b/jstests/ssl/ssl_fragment.js
@@ -2,63 +2,62 @@
* Test that a large request and response works correctly.
*/
(function() {
- 'use strict';
-
- function runTest(conn) {
- // SSL packets have a max size of ~16 kb so to test packet fragmentation support, create a
- // string larger then 16kb.
- const chunk = 'E$%G^56w4v5v54Vv$V@#t2#%t56u7B$ub%6 NU@ Y3qv4Yq%yq4C%yx$%zh'; // random data
- let s = '';
- while (s.length < (8 * 1024 * 1024)) {
- s += chunk;
- }
-
- const ssl_frag = conn.getCollection('test.ssl_frag');
- assert.writeOK(ssl_frag.insert({_id: "large_str", foo: s}));
-
- const read = ssl_frag.find({_id: "large_str"}).toArray()[0].foo;
- assert.eq(s, read, "Did not receive value written");
+'use strict';
+
+function runTest(conn) {
+ // SSL packets have a max size of ~16 kb so to test packet fragmentation support, create a
+ // string larger then 16kb.
+ const chunk = 'E$%G^56w4v5v54Vv$V@#t2#%t56u7B$ub%6 NU@ Y3qv4Yq%yq4C%yx$%zh'; // random data
+ let s = '';
+ while (s.length < (8 * 1024 * 1024)) {
+ s += chunk;
}
- let options = {
- sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/server.pem",
- networkMessageCompressors: 'disabled',
- };
-
- let mongosOptions = {
- sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/server.pem",
- networkMessageCompressors: 'disabled',
- };
-
- if (_isWindows()) {
- // Force the ASIO stack to do small reads which will excerise the schannel buffering code
- // and significantly slow down the test
- options = Object.extend(options,
- {setParameter: {"failpoint.smallTLSReads": "{'mode':'alwaysOn'}"}});
- mongosOptions = Object.extend(
- mongosOptions, {setParameter: {"failpoint.smallTLSReads": "{'mode':'alwaysOn'}"}});
+ const ssl_frag = conn.getCollection('test.ssl_frag');
+ assert.writeOK(ssl_frag.insert({_id: "large_str", foo: s}));
+
+ const read = ssl_frag.find({_id: "large_str"}).toArray()[0].foo;
+ assert.eq(s, read, "Did not receive value written");
+}
+
+let options = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ networkMessageCompressors: 'disabled',
+};
+
+let mongosOptions = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ networkMessageCompressors: 'disabled',
+};
+
+if (_isWindows()) {
+ // Force the ASIO stack to do small reads which will excerise the schannel buffering code
+ // and significantly slow down the test
+ options =
+ Object.extend(options, {setParameter: {"failpoint.smallTLSReads": "{'mode':'alwaysOn'}"}});
+ mongosOptions = Object.extend(
+ mongosOptions, {setParameter: {"failpoint.smallTLSReads": "{'mode':'alwaysOn'}"}});
+}
+
+const mongod = MongoRunner.runMongod(options);
+runTest(mongod);
+MongoRunner.stopMongod(mongod);
+
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+const st = new ShardingTest({
+ shards: 3,
+ mongos: 1,
+ config: 1,
+ other: {
+ configOptions: options,
+ mongosOptions: mongosOptions,
+ shardOptions: options,
+ shardAsReplicaSet: false,
}
+});
- const mongod = MongoRunner.runMongod(options);
- runTest(mongod);
- MongoRunner.stopMongod(mongod);
-
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const st = new ShardingTest({
- shards: 3,
- mongos: 1,
- config: 1,
- other: {
- configOptions: options,
- mongosOptions: mongosOptions,
- shardOptions: options,
- shardAsReplicaSet: false,
- }
- });
-
- runTest(st.s0);
- st.stop();
-
+runTest(st.s0);
+st.stop();
})();
diff --git a/jstests/ssl/ssl_get_more.js b/jstests/ssl/ssl_get_more.js
index 5b75a43b377..45a98b7ff75 100644
--- a/jstests/ssl/ssl_get_more.js
+++ b/jstests/ssl/ssl_get_more.js
@@ -1,60 +1,61 @@
(function() {
- "use strict";
-
- var x509_options = {
- sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/server.pem",
- sslCAFile: "jstests/libs/ca.pem",
- sslClusterFile: "jstests/libs/cluster_cert.pem",
- sslAllowInvalidHostnames: "",
- clusterAuthMode: "x509"
- };
-
- const st = new ShardingTest({
- shards: 1,
- other: {
- enableBalancer: true,
- configOptions: x509_options,
- mongosOptions: x509_options,
- rsOptions: x509_options,
- shardOptions: x509_options,
- shardAsReplicaSet: false
- }
- });
-
- st.s.getDB('admin').createUser({user: 'admin', pwd: 'pwd', roles: ['root']});
- st.s.getDB('admin').auth('admin', 'pwd');
-
- const sessionOptions = {causalConsistency: false};
- const session = st.s.startSession(sessionOptions);
- const db = session.getDatabase("test");
- const coll = db.foo;
-
- coll.createIndex({x: 1});
- coll.createIndex({y: 1});
-
- for (let i = 0; i < 10; i++) {
- const res = assert.commandWorked(
- db.runCommand({listIndexes: coll.getName(), cursor: {batchSize: 0}}));
- const cursor = new DBCommandCursor(db, res);
- assert.eq(3, cursor.itcount());
+"use strict";
+
+var x509_options = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/ca.pem",
+ sslClusterFile: "jstests/libs/cluster_cert.pem",
+ sslAllowInvalidHostnames: "",
+ clusterAuthMode: "x509"
+};
+
+const st = new ShardingTest({
+ shards: 1,
+ other: {
+ enableBalancer: true,
+ configOptions: x509_options,
+ mongosOptions: x509_options,
+ rsOptions: x509_options,
+ shardOptions: x509_options,
+ shardAsReplicaSet: false
}
-
- assert.commandWorked(db.createCollection("bar"));
- assert.commandWorked(db.createCollection("baz"));
-
- for (let i = 0; i < 10; i++) {
- const res =
- assert.commandWorked(db.runCommand({listCollections: 1, cursor: {batchSize: 0}}));
- const cursor = new DBCommandCursor(db, res);
- assert.eq(3, cursor.itcount());
- }
-
- // Authenticate csrs so ReplSetTest.stopSet() can do db hash check.
- if (st.configRS) {
- st.configRS.nodes.forEach((node) => {
- node.getDB('admin').auth('admin', 'pwd');
- });
- }
- st.stop();
+});
+
+st.s.getDB('admin').createUser({user: 'admin', pwd: 'pwd', roles: ['root']});
+st.s.getDB('admin').auth('admin', 'pwd');
+
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = st.s.startSession(sessionOptions);
+const db = session.getDatabase("test");
+const coll = db.foo;
+
+coll.createIndex({x: 1});
+coll.createIndex({y: 1});
+
+for (let i = 0; i < 10; i++) {
+ const res =
+ assert.commandWorked(db.runCommand({listIndexes: coll.getName(), cursor: {batchSize: 0}}));
+ const cursor = new DBCommandCursor(db, res);
+ assert.eq(3, cursor.itcount());
+}
+
+assert.commandWorked(db.createCollection("bar"));
+assert.commandWorked(db.createCollection("baz"));
+
+for (let i = 0; i < 10; i++) {
+ const res = assert.commandWorked(db.runCommand({listCollections: 1, cursor: {batchSize: 0}}));
+ const cursor = new DBCommandCursor(db, res);
+ assert.eq(3, cursor.itcount());
+}
+
+// Authenticate csrs so ReplSetTest.stopSet() can do db hash check.
+if (st.configRS) {
+ st.configRS.nodes.forEach((node) => {
+ node.getDB('admin').auth('admin', 'pwd');
+ });
+}
+st.stop();
}());
diff --git a/jstests/ssl/ssl_intermediate_ca.js b/jstests/ssl/ssl_intermediate_ca.js
index 838f43bcb30..887127d6a73 100644
--- a/jstests/ssl/ssl_intermediate_ca.js
+++ b/jstests/ssl/ssl_intermediate_ca.js
@@ -2,36 +2,36 @@
// in the certificate key file will be sent to the remote.
(function() {
- 'use strict';
+'use strict';
- load('jstests/ssl/libs/ssl_helpers.js');
+load('jstests/ssl/libs/ssl_helpers.js');
- if (determineSSLProvider() === 'windows') {
- // FIXME: SERVER-39574
- print("Skipping test with windows SChannel pending SERVER-39574");
- return;
- }
+if (determineSSLProvider() === 'windows') {
+ // FIXME: SERVER-39574
+ print("Skipping test with windows SChannel pending SERVER-39574");
+ return;
+}
- // server-intermediate-ca was signed by ca.pem, not trusted-ca.pem
- const VALID_CA = 'jstests/libs/ca.pem';
- const INVALID_CA = 'jstests/libs/trusted-ca.pem';
+// server-intermediate-ca was signed by ca.pem, not trusted-ca.pem
+const VALID_CA = 'jstests/libs/ca.pem';
+const INVALID_CA = 'jstests/libs/trusted-ca.pem';
- function runTest(inbound, outbound) {
- const mongod = MongoRunner.runMongod({
- sslMode: 'requireSSL',
- sslAllowConnectionsWithoutCertificates: '',
- sslPEMKeyFile: 'jstests/libs/server-intermediate-ca.pem',
- sslCAFile: outbound,
- sslClusterCAFile: inbound,
- });
- assert(mongod);
- assert.eq(mongod.getDB('admin').system.users.find({}).toArray(), []);
- MongoRunner.stopMongod(mongod);
- }
+function runTest(inbound, outbound) {
+ const mongod = MongoRunner.runMongod({
+ sslMode: 'requireSSL',
+ sslAllowConnectionsWithoutCertificates: '',
+ sslPEMKeyFile: 'jstests/libs/server-intermediate-ca.pem',
+ sslCAFile: outbound,
+ sslClusterCAFile: inbound,
+ });
+ assert(mongod);
+ assert.eq(mongod.getDB('admin').system.users.find({}).toArray(), []);
+ MongoRunner.stopMongod(mongod);
+}
- // Normal mode, we have a valid CA being presented for outbound and inbound.
- runTest(VALID_CA, VALID_CA);
+// Normal mode, we have a valid CA being presented for outbound and inbound.
+runTest(VALID_CA, VALID_CA);
- // Alternate CA mode, only the inbound CA is valid.
- runTest(VALID_CA, INVALID_CA);
+// Alternate CA mode, only the inbound CA is valid.
+runTest(VALID_CA, INVALID_CA);
})();
diff --git a/jstests/ssl/ssl_private_key.js b/jstests/ssl/ssl_private_key.js
index 5317d6c86fa..9b395668108 100644
--- a/jstests/ssl/ssl_private_key.js
+++ b/jstests/ssl/ssl_private_key.js
@@ -2,35 +2,39 @@
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- "use strict";
-
- const SERVER_CERT = "jstests/libs/server.pem";
- const CA_CERT = "jstests/libs/ca.pem";
- const CLIENT_CERT = "jstests/libs/client_privatekey.pem";
-
- function authAndTest(port) {
- const mongo = runMongoProgram("mongo",
- "--host",
- "localhost",
- "--port",
- port,
- "--ssl",
- "--sslCAFile",
- CA_CERT,
- "--sslPEMKeyFile",
- CLIENT_CERT,
- "--eval",
- "1");
-
- // runMongoProgram returns 0 on success
- assert.eq(0, mongo, "Connection attempt failed");
- }
-
- const x509_options = {sslMode: "requireSSL", sslPEMKeyFile: SERVER_CERT, sslCAFile: CA_CERT};
-
- let mongo = MongoRunner.runMongod(Object.merge(x509_options, {auth: ""}));
-
- authAndTest(mongo.port);
-
- MongoRunner.stopMongod(mongo);
+"use strict";
+
+const SERVER_CERT = "jstests/libs/server.pem";
+const CA_CERT = "jstests/libs/ca.pem";
+const CLIENT_CERT = "jstests/libs/client_privatekey.pem";
+
+function authAndTest(port) {
+ const mongo = runMongoProgram("mongo",
+ "--host",
+ "localhost",
+ "--port",
+ port,
+ "--ssl",
+ "--sslCAFile",
+ CA_CERT,
+ "--sslPEMKeyFile",
+ CLIENT_CERT,
+ "--eval",
+ "1");
+
+ // runMongoProgram returns 0 on success
+ assert.eq(0, mongo, "Connection attempt failed");
+}
+
+const x509_options = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT
+};
+
+let mongo = MongoRunner.runMongod(Object.merge(x509_options, {auth: ""}));
+
+authAndTest(mongo.port);
+
+MongoRunner.stopMongod(mongo);
}());
diff --git a/jstests/ssl/ssl_restricted_protocols.js b/jstests/ssl/ssl_restricted_protocols.js
index f09c90c4535..1ea7c0dd209 100644
--- a/jstests/ssl/ssl_restricted_protocols.js
+++ b/jstests/ssl/ssl_restricted_protocols.js
@@ -2,39 +2,35 @@
// protocols.
(function() {
- 'use strict';
-
- var SERVER_CERT = "jstests/libs/server.pem";
- var CLIENT_CERT = "jstests/libs/client.pem";
- var CA_CERT = "jstests/libs/ca.pem";
-
- function runTestWithoutSubset(subset) {
- const disabledProtocols = subset.join(",");
- const conn = MongoRunner.runMongod({
- sslMode: 'allowSSL',
- sslPEMKeyFile: SERVER_CERT,
- sslDisabledProtocols: disabledProtocols
- });
-
- const exitStatus = runMongoProgram('mongo',
- '--ssl',
- '--sslAllowInvalidHostnames',
- '--sslPEMKeyFile',
- CLIENT_CERT,
- '--sslCAFile',
- CA_CERT,
- '--port',
- conn.port,
- '--eval',
- 'quit()');
-
- assert.eq(0, exitStatus, "");
-
- MongoRunner.stopMongod(conn);
- }
-
- runTestWithoutSubset(["TLS1_0"]);
- runTestWithoutSubset(["TLS1_2"]);
- runTestWithoutSubset(["TLS1_0", "TLS1_1"]);
-
+'use strict';
+
+var SERVER_CERT = "jstests/libs/server.pem";
+var CLIENT_CERT = "jstests/libs/client.pem";
+var CA_CERT = "jstests/libs/ca.pem";
+
+function runTestWithoutSubset(subset) {
+ const disabledProtocols = subset.join(",");
+ const conn = MongoRunner.runMongod(
+ {sslMode: 'allowSSL', sslPEMKeyFile: SERVER_CERT, sslDisabledProtocols: disabledProtocols});
+
+ const exitStatus = runMongoProgram('mongo',
+ '--ssl',
+ '--sslAllowInvalidHostnames',
+ '--sslPEMKeyFile',
+ CLIENT_CERT,
+ '--sslCAFile',
+ CA_CERT,
+ '--port',
+ conn.port,
+ '--eval',
+ 'quit()');
+
+ assert.eq(0, exitStatus, "");
+
+ MongoRunner.stopMongod(conn);
+}
+
+runTestWithoutSubset(["TLS1_0"]);
+runTestWithoutSubset(["TLS1_2"]);
+runTestWithoutSubset(["TLS1_0", "TLS1_1"]);
})();
diff --git a/jstests/ssl/ssl_uri.js b/jstests/ssl/ssl_uri.js
index 9d43217a46b..830bba98984 100644
--- a/jstests/ssl/ssl_uri.js
+++ b/jstests/ssl/ssl_uri.js
@@ -1,65 +1,65 @@
// Test that the ssl=true/false option is honored in shell URIs.
(function() {
- "use strict";
+"use strict";
- var shouldSucceed = function(uri) {
- var conn = new Mongo(uri);
- var res = conn.getDB('admin').runCommand({"ismaster": 1});
- assert(res.ok);
- };
+var shouldSucceed = function(uri) {
+ var conn = new Mongo(uri);
+ var res = conn.getDB('admin').runCommand({"ismaster": 1});
+ assert(res.ok);
+};
- var shouldFail = function(uri) {
- assert.throws(function(uri) {
- var conn = new Mongo(uri);
- }, [uri], "network error while attempting to run command");
- };
+var shouldFail = function(uri) {
+ assert.throws(function(uri) {
+ var conn = new Mongo(uri);
+ }, [uri], "network error while attempting to run command");
+};
- // Start up a mongod with ssl required.
- var sslMongo = MongoRunner.runMongod({
- sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/server.pem",
- sslCAFile: "jstests/libs/ca.pem",
- });
+// Start up a mongod with ssl required.
+var sslMongo = MongoRunner.runMongod({
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/ca.pem",
+});
- var sslURI = "mongodb://localhost:" + sslMongo.port + "/admin";
+var sslURI = "mongodb://localhost:" + sslMongo.port + "/admin";
- // When talking to a server with SSL, connecting with ssl=false fails.
- shouldSucceed(sslURI);
- shouldSucceed(sslURI + "?ssl=true");
- shouldFail(sslURI + "?ssl=false");
+// When talking to a server with SSL, connecting with ssl=false fails.
+shouldSucceed(sslURI);
+shouldSucceed(sslURI + "?ssl=true");
+shouldFail(sslURI + "?ssl=false");
- var connectWithURI = function(uri) {
- return runMongoProgram('./mongo',
- '--ssl',
- '--sslAllowInvalidCertificates',
- '--sslCAFile',
- 'jstests/libs/ca.pem',
- '--sslPEMKeyFile',
- 'jstests/libs/client.pem',
- uri,
- '--eval',
- 'db.runCommand({ismaster: 1})');
- };
+var connectWithURI = function(uri) {
+ return runMongoProgram('./mongo',
+ '--ssl',
+ '--sslAllowInvalidCertificates',
+ '--sslCAFile',
+ 'jstests/libs/ca.pem',
+ '--sslPEMKeyFile',
+ 'jstests/libs/client.pem',
+ uri,
+ '--eval',
+ 'db.runCommand({ismaster: 1})');
+};
- var shouldConnect = function(uri) {
- assert.eq(connectWithURI(uri), 0, "should have been able to connect with " + uri);
- };
+var shouldConnect = function(uri) {
+ assert.eq(connectWithURI(uri), 0, "should have been able to connect with " + uri);
+};
- var shouldNotConnect = function(uri) {
- assert.eq(connectWithURI(uri), 1, "should not have been able to connect with " + uri);
- };
+var shouldNotConnect = function(uri) {
+ assert.eq(connectWithURI(uri), 1, "should not have been able to connect with " + uri);
+};
- // When talking to a server with SSL, connecting with ssl=false on the command line fails.
- shouldConnect(sslURI);
- shouldNotConnect(sslURI + "?ssl=false");
- shouldConnect(sslURI + "?ssl=true");
+// When talking to a server with SSL, connecting with ssl=false on the command line fails.
+shouldConnect(sslURI);
+shouldNotConnect(sslURI + "?ssl=false");
+shouldConnect(sslURI + "?ssl=true");
- // Connecting with ssl=true without --ssl will not work
- var res =
- runMongoProgram('./mongo', sslURI + "?ssl=true", '--eval', 'db.runCommand({ismaster: 1})');
- assert.eq(res, 1, "should not have been able to connect without --ssl");
+// Connecting with ssl=true without --ssl will not work
+var res =
+ runMongoProgram('./mongo', sslURI + "?ssl=true", '--eval', 'db.runCommand({ismaster: 1})');
+assert.eq(res, 1, "should not have been able to connect without --ssl");
- // Clean up
- MongoRunner.stopMongod(sslMongo);
+// Clean up
+MongoRunner.stopMongod(sslMongo);
}());
diff --git a/jstests/ssl/ssl_with_system_ca.js b/jstests/ssl/ssl_with_system_ca.js
index 570f55c1424..10c4c3ae2b3 100644
--- a/jstests/ssl/ssl_with_system_ca.js
+++ b/jstests/ssl/ssl_with_system_ca.js
@@ -5,50 +5,50 @@
// To install trusted-ca.pem for local testing on OSX, invoke the following at a console:
// security add-trusted-cert -d jstests/libs/trusted-ca.pem
(function() {
- 'use strict';
+'use strict';
- const HOST_TYPE = getBuildInfo().buildEnvironment.target_os;
- if (HOST_TYPE == "windows") {
- // OpenSSL backed imports Root CA and intermediate CA
- runProgram(
- "certutil.exe", "-addstore", "-user", "-f", "CA", "jstests\\libs\\trusted-ca.pem");
+const HOST_TYPE = getBuildInfo().buildEnvironment.target_os;
+if (HOST_TYPE == "windows") {
+ // OpenSSL backed imports Root CA and intermediate CA
+ runProgram("certutil.exe", "-addstore", "-user", "-f", "CA", "jstests\\libs\\trusted-ca.pem");
- // SChannel backed follows Windows rules and only trusts the Root store in Local Machine and
- // Current User.
- runProgram("certutil.exe", "-addstore", "-f", "Root", "jstests\\libs\\trusted-ca.pem");
- }
+ // SChannel backed follows Windows rules and only trusts the Root store in Local Machine and
+ // Current User.
+ runProgram("certutil.exe", "-addstore", "-f", "Root", "jstests\\libs\\trusted-ca.pem");
+}
- function testWithCerts(prefix) {
- jsTest.log(`Testing with SSL certs $ {
+function testWithCerts(prefix) {
+ jsTest.log(
+ `Testing with SSL certs $ {
clientPem connecting to serverPem
}`);
- // allowSSL to get a non-SSL control connection.
- const conn = MongoRunner.runMongod(
- {sslMode: 'allowSSL', sslPEMKeyFile: 'jstests/libs/' + prefix + 'server.pem'});
-
- let argv = [
- './mongo',
- '--ssl',
- '--port',
- conn.port,
- '--sslPEMKeyFile',
- 'jstests/libs/' + prefix + 'client.pem',
- '--eval',
- ';'
- ];
-
- if (HOST_TYPE == "linux") {
- // On Linux we override the default path to the system CA store to point to our
- // "trusted" CA. On Windows, this CA will have been added to the user's trusted CA list
- argv.unshift("env", "SSL_CERT_FILE=jstests/libs/trusted-ca.pem");
- }
-
- const exitCode = runMongoProgram.apply(null, argv);
- MongoRunner.stopMongod(conn);
- return exitCode;
+ // allowSSL to get a non-SSL control connection.
+ const conn = MongoRunner.runMongod(
+ {sslMode: 'allowSSL', sslPEMKeyFile: 'jstests/libs/' + prefix + 'server.pem'});
+
+ let argv = [
+ './mongo',
+ '--ssl',
+ '--port',
+ conn.port,
+ '--sslPEMKeyFile',
+ 'jstests/libs/' + prefix + 'client.pem',
+ '--eval',
+ ';'
+ ];
+
+ if (HOST_TYPE == "linux") {
+ // On Linux we override the default path to the system CA store to point to our
+ // "trusted" CA. On Windows, this CA will have been added to the user's trusted CA list
+ argv.unshift("env", "SSL_CERT_FILE=jstests/libs/trusted-ca.pem");
}
- assert.neq(0, testWithCerts(''), 'Certs signed with untrusted CA');
- assert.eq(0, testWithCerts('trusted-'), 'Certs signed with trusted CA');
+ const exitCode = runMongoProgram.apply(null, argv);
+ MongoRunner.stopMongod(conn);
+ return exitCode;
+}
+
+assert.neq(0, testWithCerts(''), 'Certs signed with untrusted CA');
+assert.eq(0, testWithCerts('trusted-'), 'Certs signed with trusted CA');
})();
diff --git a/jstests/ssl/ssl_withhold_client_cert.js b/jstests/ssl/ssl_withhold_client_cert.js
index 919409bbd60..c83dff3a717 100644
--- a/jstests/ssl/ssl_withhold_client_cert.js
+++ b/jstests/ssl/ssl_withhold_client_cert.js
@@ -1,53 +1,53 @@
// Test setParameter tlsWithholdClientCertificate
(function() {
- "use strict";
-
- function testRS(opts, expectWarning) {
- const rsOpts = {
- nodes: {node0: opts, node1: opts},
- };
- const rs = new ReplSetTest(rsOpts);
- rs.startSet();
- rs.initiate();
- rs.awaitReplication();
-
- const test = rs.getPrimary().getDB('test');
- test.foo.insert({bar: "baz"});
- rs.awaitReplication();
-
- function checkWarning(member) {
- const observed =
- /no SSL certificate provided by peer/.test(cat(member.fullOptions.logFile));
- assert.eq(observed, expectWarning);
- }
- checkWarning(rs.getPrimary());
- checkWarning(rs.getSecondary());
- rs.stopSet();
- }
+"use strict";
- const base_options = {
- tlsMode: 'requireTLS',
- tlsCertificateKeyFile: 'jstests/libs/server.pem',
- tlsCAFile: 'jstests/libs/ca.pem',
- tlsAllowInvalidHostnames: '',
- useLogFiles: true,
+function testRS(opts, expectWarning) {
+ const rsOpts = {
+ nodes: {node0: opts, node1: opts},
};
- testRS(base_options, false);
-
- const test_options = Object.extend({
- tlsAllowConnectionsWithoutCertificates: '',
- setParameter: 'tlsWithholdClientCertificate=true',
- },
- base_options);
-
- testRS(test_options, true);
-
- const depr_options = Object.extend({
- sslAllowConnectionsWithoutCertificates: '',
- setParameter: 'sslWithholdClientCertificate=true',
- },
- base_options);
-
- testRS(depr_options, true);
+ const rs = new ReplSetTest(rsOpts);
+ rs.startSet();
+ rs.initiate();
+ rs.awaitReplication();
+
+ const test = rs.getPrimary().getDB('test');
+ test.foo.insert({bar: "baz"});
+ rs.awaitReplication();
+
+ function checkWarning(member) {
+ const observed =
+ /no SSL certificate provided by peer/.test(cat(member.fullOptions.logFile));
+ assert.eq(observed, expectWarning);
+ }
+ checkWarning(rs.getPrimary());
+ checkWarning(rs.getSecondary());
+ rs.stopSet();
+}
+
+const base_options = {
+ tlsMode: 'requireTLS',
+ tlsCertificateKeyFile: 'jstests/libs/server.pem',
+ tlsCAFile: 'jstests/libs/ca.pem',
+ tlsAllowInvalidHostnames: '',
+ useLogFiles: true,
+};
+testRS(base_options, false);
+
+const test_options = Object.extend({
+ tlsAllowConnectionsWithoutCertificates: '',
+ setParameter: 'tlsWithholdClientCertificate=true',
+},
+ base_options);
+
+testRS(test_options, true);
+
+const depr_options = Object.extend({
+ sslAllowConnectionsWithoutCertificates: '',
+ setParameter: 'sslWithholdClientCertificate=true',
+},
+ base_options);
+
+testRS(depr_options, true);
}());
diff --git a/jstests/ssl/ssl_x509_SAN.js b/jstests/ssl/ssl_x509_SAN.js
index f362d039a36..a20d4cf996f 100644
--- a/jstests/ssl/ssl_x509_SAN.js
+++ b/jstests/ssl/ssl_x509_SAN.js
@@ -1,76 +1,75 @@
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- "use strict";
+"use strict";
- const SERVER1_CERT = "jstests/libs/server_SAN.pem";
- const SERVER2_CERT = "jstests/libs/server_SAN2.pem";
- const CA_CERT = "jstests/libs/ca.pem";
- const CLIENT_CERT = "jstests/libs/client.pem";
+const SERVER1_CERT = "jstests/libs/server_SAN.pem";
+const SERVER2_CERT = "jstests/libs/server_SAN2.pem";
+const CA_CERT = "jstests/libs/ca.pem";
+const CLIENT_CERT = "jstests/libs/client.pem";
- // Some test machines lack ipv6 so test for by starting a mongod that needs to bind to an ipv6
- // address.
- var hasIpv6 = true;
- const mongodHasIpv6 = MongoRunner.runMongod({
- sslMode: "requireSSL",
- sslPEMKeyFile: SERVER1_CERT,
- sslCAFile: CA_CERT,
- ipv6: "",
- bind_ip: "::1,127.0.0.1"
- });
- if (mongodHasIpv6 == null) {
- jsTest.log("Unable to run all tests because ipv6 is not on machine, see BF-10990");
- hasIpv6 = false;
- } else {
- MongoRunner.stopMongod(mongodHasIpv6);
- }
-
- function authAndTest(cert_option) {
- function test_host(host, port) {
- let args = [
- "mongo",
- "--host",
- host,
- "--port",
- port,
- "--ssl",
- "--sslCAFile",
- CA_CERT,
- "--sslPEMKeyFile",
- CLIENT_CERT,
- "--eval",
- ";"
- ];
-
- if (hasIpv6) {
- args.push("--ipv6");
- }
+// Some test machines lack ipv6 so test for by starting a mongod that needs to bind to an ipv6
+// address.
+var hasIpv6 = true;
+const mongodHasIpv6 = MongoRunner.runMongod({
+ sslMode: "requireSSL",
+ sslPEMKeyFile: SERVER1_CERT,
+ sslCAFile: CA_CERT,
+ ipv6: "",
+ bind_ip: "::1,127.0.0.1"
+});
+if (mongodHasIpv6 == null) {
+ jsTest.log("Unable to run all tests because ipv6 is not on machine, see BF-10990");
+ hasIpv6 = false;
+} else {
+ MongoRunner.stopMongod(mongodHasIpv6);
+}
- const mongo = runMongoProgram.apply(null, args);
+function authAndTest(cert_option) {
+ function test_host(host, port) {
+ let args = [
+ "mongo",
+ "--host",
+ host,
+ "--port",
+ port,
+ "--ssl",
+ "--sslCAFile",
+ CA_CERT,
+ "--sslPEMKeyFile",
+ CLIENT_CERT,
+ "--eval",
+ ";"
+ ];
- assert.eq(0, mongo, "Connection succeeded");
+ if (hasIpv6) {
+ args.push("--ipv6");
}
- const x509_options = {sslMode: "requireSSL", sslCAFile: CA_CERT, bind_ip_all: ""};
+ const mongo = runMongoProgram.apply(null, args);
- if (hasIpv6) {
- Object.extend(x509_options, {ipv6: ""});
- }
+ assert.eq(0, mongo, "Connection succeeded");
+ }
- let mongod = MongoRunner.runMongod(Object.merge(x509_options, cert_option));
+ const x509_options = {sslMode: "requireSSL", sslCAFile: CA_CERT, bind_ip_all: ""};
- test_host("localhost", mongod.port);
- test_host("127.0.0.1", mongod.port);
- if (hasIpv6) {
- test_host("::1", mongod.port);
- }
+ if (hasIpv6) {
+ Object.extend(x509_options, {ipv6: ""});
+ }
+
+ let mongod = MongoRunner.runMongod(Object.merge(x509_options, cert_option));
- MongoRunner.stopMongod(mongod);
+ test_host("localhost", mongod.port);
+ test_host("127.0.0.1", mongod.port);
+ if (hasIpv6) {
+ test_host("::1", mongod.port);
}
- print("1. Test parsing different values in SAN DNS and IP fields. ");
- authAndTest({sslPEMKeyFile: SERVER1_CERT});
- print("2. Test parsing IP Addresses in SAN DNS fields. ");
- authAndTest({sslPEMKeyFile: SERVER2_CERT});
+ MongoRunner.stopMongod(mongod);
+}
+print("1. Test parsing different values in SAN DNS and IP fields. ");
+authAndTest({sslPEMKeyFile: SERVER1_CERT});
+print("2. Test parsing IP Addresses in SAN DNS fields. ");
+authAndTest({sslPEMKeyFile: SERVER2_CERT});
}());
diff --git a/jstests/ssl/ssl_x509_roles.js b/jstests/ssl/ssl_x509_roles.js
index 5a872263baa..eb5bba46ab6 100644
--- a/jstests/ssl/ssl_x509_roles.js
+++ b/jstests/ssl/ssl_x509_roles.js
@@ -3,149 +3,150 @@
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- "use strict";
-
- const SERVER_CERT = "jstests/libs/server.pem";
- const CA_CERT = "jstests/libs/ca.pem";
- const CLIENT_CERT = "jstests/libs/client_roles.pem";
- const CLIENT_ESCAPE_CERT = "jstests/libs/client_escape.pem";
- const CLIENT_UTF8_CERT = "jstests/libs/client_utf8.pem";
- const CLIENT_EMAIL_CERT = "jstests/libs/client_email.pem";
- const CLIENT_TITLE_CERT = "jstests/libs/client_title.pem";
- const CLIENT_CERT_NO_ROLES = "jstests/libs/client.pem";
-
- const CLIENT_USER =
- "C=US,ST=New York,L=New York City,O=MongoDB,OU=Kernel Users,CN=Kernel Client Peer Role";
-
- const CLIENT_USER_NO_ROLES =
- "CN=client,OU=KernelUser,O=MongoDB,L=New York City,ST=New York,C=US";
- const smokeScript =
- 'assert(db.getSiblingDB(\'$external\').auth({ mechanism: \'MONGODB-X509\' }));';
-
- function authAndTest(port, expectSuccess) {
- // First we run the shell with the "smoke" user that has no embedded roles to verify
- // that X509 auth works overall.
- const smoke = runMongoProgram("mongo",
- "--host",
- "localhost",
- "--port",
- port,
- "--ssl",
- "--sslCAFile",
- CA_CERT,
- "--sslPEMKeyFile",
- CLIENT_CERT_NO_ROLES,
- "--eval",
- smokeScript);
- assert.eq(smoke, 0, "Could not auth with smoke user");
-
- const runTest = function(cert, script) {
- const res = runMongoProgram("mongo",
- "--host",
- "localhost",
- "--port",
- port,
- "--ssl",
- "--sslCAFile",
- CA_CERT,
- "--sslPEMKeyFile",
- cert,
- script);
-
- let expectExitCode = 0;
- if (!expectSuccess) {
- if (_isWindows()) {
- expectExitCode = -3;
- } else {
- expectExitCode = 253;
- }
+"use strict";
+
+const SERVER_CERT = "jstests/libs/server.pem";
+const CA_CERT = "jstests/libs/ca.pem";
+const CLIENT_CERT = "jstests/libs/client_roles.pem";
+const CLIENT_ESCAPE_CERT = "jstests/libs/client_escape.pem";
+const CLIENT_UTF8_CERT = "jstests/libs/client_utf8.pem";
+const CLIENT_EMAIL_CERT = "jstests/libs/client_email.pem";
+const CLIENT_TITLE_CERT = "jstests/libs/client_title.pem";
+const CLIENT_CERT_NO_ROLES = "jstests/libs/client.pem";
+
+const CLIENT_USER =
+ "C=US,ST=New York,L=New York City,O=MongoDB,OU=Kernel Users,CN=Kernel Client Peer Role";
+
+const CLIENT_USER_NO_ROLES = "CN=client,OU=KernelUser,O=MongoDB,L=New York City,ST=New York,C=US";
+const smokeScript = 'assert(db.getSiblingDB(\'$external\').auth({ mechanism: \'MONGODB-X509\' }));';
+
+function authAndTest(port, expectSuccess) {
+ // First we run the shell with the "smoke" user that has no embedded roles to verify
+ // that X509 auth works overall.
+ const smoke = runMongoProgram("mongo",
+ "--host",
+ "localhost",
+ "--port",
+ port,
+ "--ssl",
+ "--sslCAFile",
+ CA_CERT,
+ "--sslPEMKeyFile",
+ CLIENT_CERT_NO_ROLES,
+ "--eval",
+ smokeScript);
+ assert.eq(smoke, 0, "Could not auth with smoke user");
+
+ const runTest = function(cert, script) {
+ const res = runMongoProgram("mongo",
+ "--host",
+ "localhost",
+ "--port",
+ port,
+ "--ssl",
+ "--sslCAFile",
+ CA_CERT,
+ "--sslPEMKeyFile",
+ cert,
+ script);
+
+ let expectExitCode = 0;
+ if (!expectSuccess) {
+ if (_isWindows()) {
+ expectExitCode = -3;
+ } else {
+ expectExitCode = 253;
}
+ }
- assert.eq(expectExitCode, res, "Connection attempt failed");
- };
-
- // Then we assert success or failure with each of the X509 certs with embedded roles.
- runTest(CLIENT_CERT, "jstests/ssl/libs/ssl_x509_role_auth.js");
- runTest(CLIENT_ESCAPE_CERT, "jstests/ssl/libs/ssl_x509_role_auth_escape.js");
- runTest(CLIENT_UTF8_CERT, "jstests/ssl/libs/ssl_x509_role_auth_utf8.js");
- runTest(CLIENT_EMAIL_CERT, "jstests/ssl/libs/ssl_x509_role_auth_email.js");
- }
-
- const prepConn = function(conn) {
- const admin = conn.getDB('admin');
- admin.createUser({user: "admin", pwd: "admin", roles: ["root"]});
- assert(admin.auth('admin', 'admin'));
-
- const external = conn.getDB('$external');
- external.createUser(
- {user: CLIENT_USER_NO_ROLES, roles: [{'role': 'readWrite', 'db': 'test'}]});
+ assert.eq(expectExitCode, res, "Connection attempt failed");
};
- const x509_options = {sslMode: "requireSSL", sslPEMKeyFile: SERVER_CERT, sslCAFile: CA_CERT};
-
- print("1. Testing x.509 auth to mongod");
- {
- let mongo = MongoRunner.runMongod(Object.merge(x509_options, {auth: ""}));
- prepConn(mongo);
-
- authAndTest(mongo.port, true);
-
- MongoRunner.stopMongod(mongo);
- }
-
- jsTestLog("2. Testing disabling x.509 auth with roles");
- {
- const mongo = MongoRunner.runMongod(Object.merge(
- x509_options, {auth: "", setParameter: "allowRolesFromX509Certificates=false"}));
-
- prepConn(mongo);
-
- authAndTest(mongo.port, false);
-
- MongoRunner.stopMongod(mongo);
- }
-
- print("3. Testing x.509 auth to mongos");
- {
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- let st = new ShardingTest({
- shards: 1,
- mongos: 1,
- other: {
- keyFile: 'jstests/libs/key1',
- configOptions: x509_options,
- mongosOptions: x509_options,
- shardOptions: x509_options,
- useHostname: false,
- shardAsReplicaSet: false
- }
- });
-
- prepConn(st.s0);
- authAndTest(st.s0.port, true);
- st.stop();
- }
-
- print("4. Testing x.509 auth to mongos with x509 roles disabled");
- {
- const localOptions =
- Object.merge(x509_options, {setParameter: "allowRolesFromX509Certificates=false"});
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- let st = new ShardingTest({
- shards: 1,
- mongos: 1,
- other: {
- keyFile: 'jstests/libs/key1',
- configOptions: localOptions,
- mongosOptions: localOptions,
- shardOptions: localOptions,
- useHostname: false,
- shardAsReplicaSet: false
- }
- });
-
- prepConn(st.s0);
- authAndTest(st.s0.port, false);
- st.stop();
- }
+ // Then we assert success or failure with each of the X509 certs with embedded roles.
+ runTest(CLIENT_CERT, "jstests/ssl/libs/ssl_x509_role_auth.js");
+ runTest(CLIENT_ESCAPE_CERT, "jstests/ssl/libs/ssl_x509_role_auth_escape.js");
+ runTest(CLIENT_UTF8_CERT, "jstests/ssl/libs/ssl_x509_role_auth_utf8.js");
+ runTest(CLIENT_EMAIL_CERT, "jstests/ssl/libs/ssl_x509_role_auth_email.js");
+}
+
+const prepConn = function(conn) {
+ const admin = conn.getDB('admin');
+ admin.createUser({user: "admin", pwd: "admin", roles: ["root"]});
+ assert(admin.auth('admin', 'admin'));
+
+ const external = conn.getDB('$external');
+ external.createUser({user: CLIENT_USER_NO_ROLES, roles: [{'role': 'readWrite', 'db': 'test'}]});
+};
+
+const x509_options = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT
+};
+
+print("1. Testing x.509 auth to mongod");
+{
+ let mongo = MongoRunner.runMongod(Object.merge(x509_options, {auth: ""}));
+ prepConn(mongo);
+
+ authAndTest(mongo.port, true);
+
+ MongoRunner.stopMongod(mongo);
+}
+
+jsTestLog("2. Testing disabling x.509 auth with roles");
+{
+ const mongo = MongoRunner.runMongod(Object.merge(
+ x509_options, {auth: "", setParameter: "allowRolesFromX509Certificates=false"}));
+
+ prepConn(mongo);
+
+ authAndTest(mongo.port, false);
+
+ MongoRunner.stopMongod(mongo);
+}
+
+print("3. Testing x.509 auth to mongos");
+{
+ // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+ let st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ other: {
+ keyFile: 'jstests/libs/key1',
+ configOptions: x509_options,
+ mongosOptions: x509_options,
+ shardOptions: x509_options,
+ useHostname: false,
+ shardAsReplicaSet: false
+ }
+ });
+
+ prepConn(st.s0);
+ authAndTest(st.s0.port, true);
+ st.stop();
+}
+
+print("4. Testing x.509 auth to mongos with x509 roles disabled");
+{
+ const localOptions =
+ Object.merge(x509_options, {setParameter: "allowRolesFromX509Certificates=false"});
+ // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+ let st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ other: {
+ keyFile: 'jstests/libs/key1',
+ configOptions: localOptions,
+ mongosOptions: localOptions,
+ shardOptions: localOptions,
+ useHostname: false,
+ shardAsReplicaSet: false
+ }
+ });
+
+ prepConn(st.s0);
+ authAndTest(st.s0.port, false);
+ st.stop();
+}
}());
diff --git a/jstests/ssl/upgrade_noauth_to_x509_ssl.js b/jstests/ssl/upgrade_noauth_to_x509_ssl.js
index ba1f53bdb8e..f533047d1b7 100644
--- a/jstests/ssl/upgrade_noauth_to_x509_ssl.js
+++ b/jstests/ssl/upgrade_noauth_to_x509_ssl.js
@@ -13,46 +13,46 @@
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- 'use strict';
- var dbName = 'upgradeToX509';
+'use strict';
+var dbName = 'upgradeToX509';
- var transitionToX509AllowSSL =
- Object.merge(allowSSL, {transitionToAuth: '', clusterAuthMode: 'x509'});
+var transitionToX509AllowSSL =
+ Object.merge(allowSSL, {transitionToAuth: '', clusterAuthMode: 'x509'});
- // Undefine the flags we're replacing, otherwise upgradeSet will keep old values.
- var x509RequireSSL =
- Object.merge(requireSSL, {transitionToAuth: undefined, clusterAuthMode: 'x509'});
+// Undefine the flags we're replacing, otherwise upgradeSet will keep old values.
+var x509RequireSSL =
+ Object.merge(requireSSL, {transitionToAuth: undefined, clusterAuthMode: 'x509'});
- var rst = new ReplSetTest({name: 'noauthSet', nodes: 3, nodeOptions: transitionToX509AllowSSL});
- rst.startSet();
- rst.initiate();
+var rst = new ReplSetTest({name: 'noauthSet', nodes: 3, nodeOptions: transitionToX509AllowSSL});
+rst.startSet();
+rst.initiate();
- var rstConn1 = rst.getPrimary();
- var testDB = rstConn1.getDB(dbName);
+var rstConn1 = rst.getPrimary();
+var testDB = rstConn1.getDB(dbName);
- // Create a user to login when auth is enabled later
- assert.commandWorked(rstConn1.adminCommand(
- {createUser: 'root', pwd: 'root', roles: ['root'], writeConcern: {w: 3}}));
+// Create a user to login when auth is enabled later
+assert.commandWorked(rstConn1.adminCommand(
+ {createUser: 'root', pwd: 'root', roles: ['root'], writeConcern: {w: 3}}));
- assert.writeOK(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
- assert.eq(1, testDB.a.count(), 'Error interacting with replSet');
+assert.writeOK(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
+assert.eq(1, testDB.a.count(), 'Error interacting with replSet');
- print('=== UPGRADE transition to x509/allowSSL -> transition to x509/preferSSL ===');
- rst.nodes.forEach(function(node) {
- assert.commandWorked(node.adminCommand({setParameter: 1, sslMode: "preferSSL"}));
- });
- rst.awaitSecondaryNodes();
- testDB = rst.getPrimary().getDB(dbName);
- assert.writeOK(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
- assert.eq(2, testDB.a.count(), 'Error interacting with replSet');
+print('=== UPGRADE transition to x509/allowSSL -> transition to x509/preferSSL ===');
+rst.nodes.forEach(function(node) {
+ assert.commandWorked(node.adminCommand({setParameter: 1, sslMode: "preferSSL"}));
+});
+rst.awaitSecondaryNodes();
+testDB = rst.getPrimary().getDB(dbName);
+assert.writeOK(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
+assert.eq(2, testDB.a.count(), 'Error interacting with replSet');
- print('=== UPGRADE transition to x509/preferSSL -> x509/requireSSL ===');
- rst.upgradeSet(x509RequireSSL, 'root', 'root');
+print('=== UPGRADE transition to x509/preferSSL -> x509/requireSSL ===');
+rst.upgradeSet(x509RequireSSL, 'root', 'root');
- // upgradeSet leaves its connections logged in as root
- testDB = rst.getPrimary().getDB(dbName);
- assert.writeOK(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
- assert.eq(3, testDB.a.count(), 'Error interacting with replSet');
+// upgradeSet leaves its connections logged in as root
+testDB = rst.getPrimary().getDB(dbName);
+assert.writeOK(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
+assert.eq(3, testDB.a.count(), 'Error interacting with replSet');
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/ssl/x509_all_the_oids.js b/jstests/ssl/x509_all_the_oids.js
index f99d71cd182..9cbc94d66c2 100644
--- a/jstests/ssl/x509_all_the_oids.js
+++ b/jstests/ssl/x509_all_the_oids.js
@@ -1,47 +1,46 @@
// Test X509 auth with all known RDN OIDs.
(function() {
- 'use strict';
+'use strict';
- const SERVER_CERT = 'jstests/libs/server.pem';
- const CA_CERT = 'jstests/libs/ca.pem';
+const SERVER_CERT = 'jstests/libs/server.pem';
+const CA_CERT = 'jstests/libs/ca.pem';
- function runTest(conn) {
- const script =
- 'assert(db.getSiblingDB(\'$external\').auth({mechanism: \'MONGODB-X509\'}));';
- clearRawMongoProgramOutput();
- const exitCode = runMongoProgram('mongo',
- '--ssl',
- '--sslAllowInvalidHostnames',
- '--sslPEMKeyFile',
- 'jstests/libs/client-all-the-oids.pem',
- '--sslCAFile',
- CA_CERT,
- '--port',
- conn.port,
- '--eval',
- script);
+function runTest(conn) {
+ const script = 'assert(db.getSiblingDB(\'$external\').auth({mechanism: \'MONGODB-X509\'}));';
+ clearRawMongoProgramOutput();
+ const exitCode = runMongoProgram('mongo',
+ '--ssl',
+ '--sslAllowInvalidHostnames',
+ '--sslPEMKeyFile',
+ 'jstests/libs/client-all-the-oids.pem',
+ '--sslCAFile',
+ CA_CERT,
+ '--port',
+ conn.port,
+ '--eval',
+ script);
- // We expect failure, since we can't create a user with this massive username in WT.
- // But at least make sure the error message is sensible.
- assert.neq(exitCode, 0);
- const output = rawMongoProgramOutput();
+ // We expect failure, since we can't create a user with this massive username in WT.
+ // But at least make sure the error message is sensible.
+ assert.neq(exitCode, 0);
+ const output = rawMongoProgramOutput();
- const NAME =
- 'role=Datum-72,pseudonym=Datum-65,dmdName=Datum-54,deltaRevocationList=Datum-53,supportedAlgorithms=Datum-52,houseIdentifier=Datum-51,uniqueMember=Datum-50,distinguishedName=Datum-49,protocolInformation=Datum-48,enhancedSearchGuide=Datum-47,dnQualifier=Datum-46,x500UniqueIdentifier=Datum-45,generationQualifier=Datum-44,initials=Datum-43,GN=Datum-42,name=Datum-41,crossCertificatePair=Datum-40,certificateRevocationList=Datum-39,authorityRevocationList=Datum-38,cACertificate=Datum-37,userCertificate=Datum-36,userPassword=Datum-35,seeAlso=Datum-34,roleOccupant=Datum-33,owner=Datum-32,member=Datum-31,supportedApplicationContext=Datum-30,presentationAddress=Datum-29,preferredDeliveryMethod=Datum-28,destinationIndicator=Datum-27,registeredAddress=Datum-26,internationaliSDNNumber=Datum-25,x121Address=Datum-24,facsimileTelephoneNumber=Datum-23,teletexTerminalIdentifier=Datum-22,telexNumber=Datum-21,telephoneNumber=Datum-20,physicalDeliveryOfficeName=Datum-19,postOfficeBox=Datum-18,postalCode=Datum-17,postalAddress=Datum-16,businessCategory=Datum-15,searchGuide=Datum-14,description=Datum-13,title=Datum-12,OU=Datum-11,O=Datum-10,street=Datum-9,ST=NY,L=Datum-7,C=US,serialNumber=Datum-5,SN=Datum-4,CN=Datum-3';
+ const NAME =
+ 'role=Datum-72,pseudonym=Datum-65,dmdName=Datum-54,deltaRevocationList=Datum-53,supportedAlgorithms=Datum-52,houseIdentifier=Datum-51,uniqueMember=Datum-50,distinguishedName=Datum-49,protocolInformation=Datum-48,enhancedSearchGuide=Datum-47,dnQualifier=Datum-46,x500UniqueIdentifier=Datum-45,generationQualifier=Datum-44,initials=Datum-43,GN=Datum-42,name=Datum-41,crossCertificatePair=Datum-40,certificateRevocationList=Datum-39,authorityRevocationList=Datum-38,cACertificate=Datum-37,userCertificate=Datum-36,userPassword=Datum-35,seeAlso=Datum-34,roleOccupant=Datum-33,owner=Datum-32,member=Datum-31,supportedApplicationContext=Datum-30,presentationAddress=Datum-29,preferredDeliveryMethod=Datum-28,destinationIndicator=Datum-27,registeredAddress=Datum-26,internationaliSDNNumber=Datum-25,x121Address=Datum-24,facsimileTelephoneNumber=Datum-23,teletexTerminalIdentifier=Datum-22,telexNumber=Datum-21,telephoneNumber=Datum-20,physicalDeliveryOfficeName=Datum-19,postOfficeBox=Datum-18,postalCode=Datum-17,postalAddress=Datum-16,businessCategory=Datum-15,searchGuide=Datum-14,description=Datum-13,title=Datum-12,OU=Datum-11,O=Datum-10,street=Datum-9,ST=NY,L=Datum-7,C=US,serialNumber=Datum-5,SN=Datum-4,CN=Datum-3';
- assert(output.includes('Error: Could not find user "' + NAME + '" for db "$external"'),
- "Shell is missing unknown user message");
- }
+ assert(output.includes('Error: Could not find user "' + NAME + '" for db "$external"'),
+ "Shell is missing unknown user message");
+}
- // Standalone.
- const mongod = MongoRunner.runMongod({
- auth: '',
- sslMode: 'requireSSL',
- sslPEMKeyFile: SERVER_CERT,
- sslCAFile: CA_CERT,
- sslAllowInvalidCertificates: '',
- });
- runTest(mongod);
- MongoRunner.stopMongod(mongod);
+// Standalone.
+const mongod = MongoRunner.runMongod({
+ auth: '',
+ sslMode: 'requireSSL',
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT,
+ sslAllowInvalidCertificates: '',
+});
+runTest(mongod);
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/ssl/x509_custom.js b/jstests/ssl/x509_custom.js
index d875a5121c2..43bf23c989b 100644
--- a/jstests/ssl/x509_custom.js
+++ b/jstests/ssl/x509_custom.js
@@ -1,56 +1,56 @@
// Test X509 auth with custom OIDs.
(function() {
- 'use strict';
-
- const SERVER_CERT = 'jstests/libs/server.pem';
- const CA_CERT = 'jstests/libs/ca.pem';
-
- function testClient(conn, name) {
- let auth = {mechanism: 'MONGODB-X509'};
- if (name !== null) {
- auth.name = name;
- }
- const script = 'assert(db.getSiblingDB(\'$external\').auth(' + tojson(auth) + '));';
- clearRawMongoProgramOutput();
- const exitCode = runMongoProgram('mongo',
- '--ssl',
- '--sslAllowInvalidHostnames',
- '--sslPEMKeyFile',
- 'jstests/libs/client-custom-oids.pem',
- '--sslCAFile',
- CA_CERT,
- '--port',
- conn.port,
- '--eval',
- script);
-
- assert.eq(exitCode, 0);
- }
-
- function runTest(conn) {
- const NAME =
- 'C=US,ST=New York,L=New York City,O=MongoDB,OU=KernelUser,CN=client,1.2.3.56=RandoValue,1.2.3.45=Value\\,Rando';
+'use strict';
- const admin = conn.getDB('admin');
- admin.createUser({user: "admin", pwd: "admin", roles: ["root"]});
- admin.auth('admin', 'admin');
+const SERVER_CERT = 'jstests/libs/server.pem';
+const CA_CERT = 'jstests/libs/ca.pem';
- const external = conn.getDB('$external');
- external.createUser({user: NAME, roles: [{'role': 'readWrite', 'db': 'test'}]});
-
- testClient(conn, NAME);
- testClient(conn, null);
+function testClient(conn, name) {
+ let auth = {mechanism: 'MONGODB-X509'};
+ if (name !== null) {
+ auth.name = name;
}
-
- // Standalone.
- const mongod = MongoRunner.runMongod({
- auth: '',
- sslMode: 'requireSSL',
- sslPEMKeyFile: SERVER_CERT,
- sslCAFile: CA_CERT,
- sslAllowInvalidCertificates: '',
- });
- runTest(mongod);
- MongoRunner.stopMongod(mongod);
+ const script = 'assert(db.getSiblingDB(\'$external\').auth(' + tojson(auth) + '));';
+ clearRawMongoProgramOutput();
+ const exitCode = runMongoProgram('mongo',
+ '--ssl',
+ '--sslAllowInvalidHostnames',
+ '--sslPEMKeyFile',
+ 'jstests/libs/client-custom-oids.pem',
+ '--sslCAFile',
+ CA_CERT,
+ '--port',
+ conn.port,
+ '--eval',
+ script);
+
+ assert.eq(exitCode, 0);
+}
+
+function runTest(conn) {
+ const NAME =
+ 'C=US,ST=New York,L=New York City,O=MongoDB,OU=KernelUser,CN=client,1.2.3.56=RandoValue,1.2.3.45=Value\\,Rando';
+
+ const admin = conn.getDB('admin');
+ admin.createUser({user: "admin", pwd: "admin", roles: ["root"]});
+ admin.auth('admin', 'admin');
+
+ const external = conn.getDB('$external');
+ external.createUser({user: NAME, roles: [{'role': 'readWrite', 'db': 'test'}]});
+
+ testClient(conn, NAME);
+ testClient(conn, null);
+}
+
+// Standalone.
+const mongod = MongoRunner.runMongod({
+ auth: '',
+ sslMode: 'requireSSL',
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT,
+ sslAllowInvalidCertificates: '',
+});
+runTest(mongod);
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/ssl/x509_invalid.js b/jstests/ssl/x509_invalid.js
index ca41850d025..abb9a1ada3f 100644
--- a/jstests/ssl/x509_invalid.js
+++ b/jstests/ssl/x509_invalid.js
@@ -1,61 +1,60 @@
// Test X509 auth when --sslAllowInvalidCertificates is enabled
(function() {
- 'use strict';
-
- const CLIENT_NAME = "CN=client,OU=KernelUser,O=MongoDB,L=New York City,ST=New York,C=US";
- const CLIENT_CERT = 'jstests/libs/client.pem';
- const SERVER_CERT = 'jstests/libs/server.pem';
- const CA_CERT = 'jstests/libs/ca.pem';
- const SELF_SIGNED_CERT = 'jstests/libs/client-self-signed.pem';
-
- function testClient(conn, cert, name, shouldSucceed) {
- let auth = {mechanism: 'MONGODB-X509'};
- if (name !== null) {
- auth.user = name;
- }
- const script = 'assert(db.getSiblingDB(\'$external\').auth(' + tojson(auth) + '));';
- clearRawMongoProgramOutput();
- const exitCode = runMongoProgram('mongo',
- '--ssl',
- '--sslAllowInvalidHostnames',
- '--sslPEMKeyFile',
- cert,
- '--sslCAFile',
- CA_CERT,
- '--port',
- conn.port,
- '--eval',
- script);
-
- assert.eq(shouldSucceed, exitCode === 0, "exitCode = " + tojson(exitCode));
- assert.eq(
- !shouldSucceed,
- rawMongoProgramOutput().includes('No verified subject name available from client'));
+'use strict';
+
+const CLIENT_NAME = "CN=client,OU=KernelUser,O=MongoDB,L=New York City,ST=New York,C=US";
+const CLIENT_CERT = 'jstests/libs/client.pem';
+const SERVER_CERT = 'jstests/libs/server.pem';
+const CA_CERT = 'jstests/libs/ca.pem';
+const SELF_SIGNED_CERT = 'jstests/libs/client-self-signed.pem';
+
+function testClient(conn, cert, name, shouldSucceed) {
+ let auth = {mechanism: 'MONGODB-X509'};
+ if (name !== null) {
+ auth.user = name;
}
-
- function runTest(conn) {
- const admin = conn.getDB('admin');
- admin.createUser({user: "admin", pwd: "admin", roles: ["root"]});
- admin.auth('admin', 'admin');
-
- const external = conn.getDB('$external');
- external.createUser({user: CLIENT_NAME, roles: [{'role': 'readWrite', 'db': 'test'}]});
-
- testClient(conn, CLIENT_CERT, CLIENT_NAME, true);
- testClient(conn, SELF_SIGNED_CERT, CLIENT_NAME, false);
- testClient(conn, CLIENT_CERT, null, true);
- testClient(conn, SELF_SIGNED_CERT, null, false);
- }
-
- // Standalone.
- const mongod = MongoRunner.runMongod({
- auth: '',
- sslMode: 'requireSSL',
- sslPEMKeyFile: SERVER_CERT,
- sslCAFile: CA_CERT,
- sslAllowInvalidCertificates: '',
- });
- runTest(mongod);
- MongoRunner.stopMongod(mongod);
+ const script = 'assert(db.getSiblingDB(\'$external\').auth(' + tojson(auth) + '));';
+ clearRawMongoProgramOutput();
+ const exitCode = runMongoProgram('mongo',
+ '--ssl',
+ '--sslAllowInvalidHostnames',
+ '--sslPEMKeyFile',
+ cert,
+ '--sslCAFile',
+ CA_CERT,
+ '--port',
+ conn.port,
+ '--eval',
+ script);
+
+ assert.eq(shouldSucceed, exitCode === 0, "exitCode = " + tojson(exitCode));
+ assert.eq(!shouldSucceed,
+ rawMongoProgramOutput().includes('No verified subject name available from client'));
+}
+
+function runTest(conn) {
+ const admin = conn.getDB('admin');
+ admin.createUser({user: "admin", pwd: "admin", roles: ["root"]});
+ admin.auth('admin', 'admin');
+
+ const external = conn.getDB('$external');
+ external.createUser({user: CLIENT_NAME, roles: [{'role': 'readWrite', 'db': 'test'}]});
+
+ testClient(conn, CLIENT_CERT, CLIENT_NAME, true);
+ testClient(conn, SELF_SIGNED_CERT, CLIENT_NAME, false);
+ testClient(conn, CLIENT_CERT, null, true);
+ testClient(conn, SELF_SIGNED_CERT, null, false);
+}
+
+// Standalone.
+const mongod = MongoRunner.runMongod({
+ auth: '',
+ sslMode: 'requireSSL',
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT,
+ sslAllowInvalidCertificates: '',
+});
+runTest(mongod);
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/ssl/x509_multivalue.js b/jstests/ssl/x509_multivalue.js
index fc7ef3b0c76..f5fb8375869 100644
--- a/jstests/ssl/x509_multivalue.js
+++ b/jstests/ssl/x509_multivalue.js
@@ -1,55 +1,55 @@
// Test X509 auth with custom OIDs.
(function() {
- 'use strict';
-
- const SERVER_CERT = 'jstests/libs/server.pem';
- const CA_CERT = 'jstests/libs/ca.pem';
-
- function testClient(conn, name) {
- let auth = {mechanism: 'MONGODB-X509'};
- if (name !== null) {
- auth.name = name;
- }
- const script = 'assert(db.getSiblingDB(\'$external\').auth(' + tojson(auth) + '));';
- clearRawMongoProgramOutput();
- const exitCode = runMongoProgram('mongo',
- '--ssl',
- '--sslAllowInvalidHostnames',
- '--sslPEMKeyFile',
- 'jstests/libs/client-multivalue-rdn.pem',
- '--sslCAFile',
- CA_CERT,
- '--port',
- conn.port,
- '--eval',
- script);
-
- assert.eq(exitCode, 0);
- }
-
- function runTest(conn) {
- const NAME = 'L=New York City+ST=New York+C=US,OU=KernelUser+O=MongoDB+CN=client';
+'use strict';
- const admin = conn.getDB('admin');
- admin.createUser({user: "admin", pwd: "admin", roles: ["root"]});
- admin.auth('admin', 'admin');
+const SERVER_CERT = 'jstests/libs/server.pem';
+const CA_CERT = 'jstests/libs/ca.pem';
- const external = conn.getDB('$external');
- external.createUser({user: NAME, roles: [{'role': 'readWrite', 'db': 'test'}]});
-
- testClient(conn, NAME);
- testClient(conn, null);
+function testClient(conn, name) {
+ let auth = {mechanism: 'MONGODB-X509'};
+ if (name !== null) {
+ auth.name = name;
}
-
- // Standalone.
- const mongod = MongoRunner.runMongod({
- auth: '',
- sslMode: 'requireSSL',
- sslPEMKeyFile: SERVER_CERT,
- sslCAFile: CA_CERT,
- sslAllowInvalidCertificates: '',
- });
- runTest(mongod);
- MongoRunner.stopMongod(mongod);
+ const script = 'assert(db.getSiblingDB(\'$external\').auth(' + tojson(auth) + '));';
+ clearRawMongoProgramOutput();
+ const exitCode = runMongoProgram('mongo',
+ '--ssl',
+ '--sslAllowInvalidHostnames',
+ '--sslPEMKeyFile',
+ 'jstests/libs/client-multivalue-rdn.pem',
+ '--sslCAFile',
+ CA_CERT,
+ '--port',
+ conn.port,
+ '--eval',
+ script);
+
+ assert.eq(exitCode, 0);
+}
+
+function runTest(conn) {
+ const NAME = 'L=New York City+ST=New York+C=US,OU=KernelUser+O=MongoDB+CN=client';
+
+ const admin = conn.getDB('admin');
+ admin.createUser({user: "admin", pwd: "admin", roles: ["root"]});
+ admin.auth('admin', 'admin');
+
+ const external = conn.getDB('$external');
+ external.createUser({user: NAME, roles: [{'role': 'readWrite', 'db': 'test'}]});
+
+ testClient(conn, NAME);
+ testClient(conn, null);
+}
+
+// Standalone.
+const mongod = MongoRunner.runMongod({
+ auth: '',
+ sslMode: 'requireSSL',
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT,
+ sslAllowInvalidCertificates: '',
+});
+runTest(mongod);
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/ssl/x509_startup_warning.js b/jstests/ssl/x509_startup_warning.js
index a950ceefa39..6f9a88a4225 100644
--- a/jstests/ssl/x509_startup_warning.js
+++ b/jstests/ssl/x509_startup_warning.js
@@ -1,63 +1,60 @@
// Test for startuo warning when X509 auth and sslAllowInvalidCertificates are enabled
(function() {
- 'use strict';
-
- function runTest(checkMongos, opts, expectWarningCertifcates, expectWarningHostnames) {
- clearRawMongoProgramOutput();
- let mongo;
-
- if (checkMongos) {
- mongo = MongoRunner.runMongos(Object.assign({
- configdb: "fakeRS/localhost:27017",
- waitForConnect: false,
- },
- opts));
- } else {
- mongo = MongoRunner.runMongod(Object.assign({
- auth: '',
- sslMode: 'preferSSL',
- sslPEMKeyFile: 'jstests/libs/server.pem',
- sslCAFile: 'jstests/libs/ca.pem',
- waitForConnect: false,
- },
- opts));
- }
-
- assert.soon(function() {
- const output = rawMongoProgramOutput();
- return (expectWarningCertifcates ==
- output.includes('WARNING: While invalid X509 certificates may be used') &&
- expectWarningHostnames ==
- output.includes(
- 'WARNING: This server will not perform X.509 hostname validation'));
- });
-
- stopMongoProgramByPid(mongo.pid);
+'use strict';
+
+function runTest(checkMongos, opts, expectWarningCertifcates, expectWarningHostnames) {
+ clearRawMongoProgramOutput();
+ let mongo;
+
+ if (checkMongos) {
+ mongo = MongoRunner.runMongos(Object.assign({
+ configdb: "fakeRS/localhost:27017",
+ waitForConnect: false,
+ },
+ opts));
+ } else {
+ mongo = MongoRunner.runMongod(Object.assign({
+ auth: '',
+ sslMode: 'preferSSL',
+ sslPEMKeyFile: 'jstests/libs/server.pem',
+ sslCAFile: 'jstests/libs/ca.pem',
+ waitForConnect: false,
+ },
+ opts));
}
- function runTests(checkMongos) {
- // Don't expect a warning for certificates and hostnames when we're not using both options
- // together.
- runTest(checkMongos, {}, false, false);
+ assert.soon(function() {
+ const output = rawMongoProgramOutput();
+ return (
+ expectWarningCertifcates ==
+ output.includes('WARNING: While invalid X509 certificates may be used') &&
+ expectWarningHostnames ==
+ output.includes('WARNING: This server will not perform X.509 hostname validation'));
+ });
- // Do expect a warning for certificates when we're combining options.
- runTest(checkMongos, {sslAllowInvalidCertificates: ''}, true, false);
+ stopMongoProgramByPid(mongo.pid);
+}
- // Do expect a warning for hostnames.
- runTest(checkMongos, {sslAllowInvalidHostnames: ''}, false, true);
+function runTests(checkMongos) {
+ // Don't expect a warning for certificates and hostnames when we're not using both options
+ // together.
+ runTest(checkMongos, {}, false, false);
- // Do expect a warning for certificates and hostnames.
- runTest(checkMongos,
- {sslAllowInvalidCertificates: '', sslAllowInvalidHostnames: ''},
- true,
- true);
- }
+ // Do expect a warning for certificates when we're combining options.
+ runTest(checkMongos, {sslAllowInvalidCertificates: ''}, true, false);
+
+ // Do expect a warning for hostnames.
+ runTest(checkMongos, {sslAllowInvalidHostnames: ''}, false, true);
- // Run tests on mongos
- runTests(true);
+ // Do expect a warning for certificates and hostnames.
+ runTest(
+ checkMongos, {sslAllowInvalidCertificates: '', sslAllowInvalidHostnames: ''}, true, true);
+}
- // Run tests on mongod
- runTests(false);
+// Run tests on mongos
+runTests(true);
+// Run tests on mongod
+runTests(false);
})();
diff --git a/jstests/sslSpecial/SERVER-26369.js b/jstests/sslSpecial/SERVER-26369.js
index 737159a9183..6118bacd1db 100644
--- a/jstests/sslSpecial/SERVER-26369.js
+++ b/jstests/sslSpecial/SERVER-26369.js
@@ -3,23 +3,23 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- load("jstests/ssl/libs/ssl_helpers.js");
+load("jstests/ssl/libs/ssl_helpers.js");
- const st = new ShardingTest({shards: {rs0: {nodes: 1}}});
- let opts = {
- sslMode: "allowSSL",
- sslPEMKeyFile: "jstests/libs/client.pem",
- sslCAFile: "jstests/libs/ca.pem",
- shardsvr: ''
- };
- requireSSLProvider('openssl', function() {
- // Only the OpenSSL provider supports encrypted PKCS#8
- opts.sslPEMKeyFile = "jstests/libs/password_protected.pem";
- opts.sslPEMKeyPassword = "qwerty";
- });
+const st = new ShardingTest({shards: {rs0: {nodes: 1}}});
+let opts = {
+ sslMode: "allowSSL",
+ sslPEMKeyFile: "jstests/libs/client.pem",
+ sslCAFile: "jstests/libs/ca.pem",
+ shardsvr: ''
+};
+requireSSLProvider('openssl', function() {
+ // Only the OpenSSL provider supports encrypted PKCS#8
+ opts.sslPEMKeyFile = "jstests/libs/password_protected.pem";
+ opts.sslPEMKeyPassword = "qwerty";
+});
- st.rs0.restart(0, opts);
- st.stop();
+st.rs0.restart(0, opts);
+st.stop();
})();
diff --git a/jstests/sslSpecial/mixed_mode_sharded_transition_nossl.js b/jstests/sslSpecial/mixed_mode_sharded_transition_nossl.js
index 85f775c3b6b..dde05b3a891 100644
--- a/jstests/sslSpecial/mixed_mode_sharded_transition_nossl.js
+++ b/jstests/sslSpecial/mixed_mode_sharded_transition_nossl.js
@@ -9,18 +9,18 @@
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- 'use strict';
+'use strict';
- // Disable auth explicitly
- var noAuthOptions = {noauth: ''};
- var transitionToX509AllowSSL =
- Object.merge(allowSSL, {transitionToAuth: '', clusterAuthMode: 'x509'});
- var x509RequireSSL = Object.merge(requireSSL, {clusterAuthMode: 'x509'});
+// Disable auth explicitly
+var noAuthOptions = {noauth: ''};
+var transitionToX509AllowSSL =
+ Object.merge(allowSSL, {transitionToAuth: '', clusterAuthMode: 'x509'});
+var x509RequireSSL = Object.merge(requireSSL, {clusterAuthMode: 'x509'});
- print('=== Testing no-auth/transitionToAuth cluster ===');
- mixedShardTest(noAuthOptions, transitionToX509AllowSSL, true);
- mixedShardTest(transitionToX509AllowSSL, noAuthOptions, true);
+print('=== Testing no-auth/transitionToAuth cluster ===');
+mixedShardTest(noAuthOptions, transitionToX509AllowSSL, true);
+mixedShardTest(transitionToX509AllowSSL, noAuthOptions, true);
- print('=== Testing transitionToAuth/transitionToAuth cluster ===');
- mixedShardTest(transitionToX509AllowSSL, transitionToX509AllowSSL, true);
+print('=== Testing transitionToAuth/transitionToAuth cluster ===');
+mixedShardTest(transitionToX509AllowSSL, transitionToX509AllowSSL, true);
}());
diff --git a/jstests/sslSpecial/tls1_0.js b/jstests/sslSpecial/tls1_0.js
index 4947f64e941..699b977725d 100644
--- a/jstests/sslSpecial/tls1_0.js
+++ b/jstests/sslSpecial/tls1_0.js
@@ -1,107 +1,108 @@
// Make sure MongoD starts with TLS 1.0 disabled (except w/ old OpenSSL).
(function() {
- 'use strict';
+'use strict';
- load("jstests/ssl/libs/ssl_helpers.js");
+load("jstests/ssl/libs/ssl_helpers.js");
- // There will be cases where a connect is impossible,
- // let the test runner clean those up.
- TestData.failIfUnterminatedProcesses = false;
+// There will be cases where a connect is impossible,
+// let the test runner clean those up.
+TestData.failIfUnterminatedProcesses = false;
- const supportsTLS1_1 = (function() {
- const openssl = getBuildInfo().openssl || {};
- if (openssl.compiled === undefined) {
- // Native TLS build.
- return true;
- }
- // OpenSSL 0.x.x => TLS 1.0 only.
- if (/OpenSSL 0\./.test(openssl.compiled)) {
- return false;
- }
- // OpenSSL 1.0.0-1.0.0k => TLS 1.0 only.
- if (/OpenSSL 1\.0\.0[ a-k]/.test(openssl.compiled)) {
- return false;
- }
-
- // OpenSSL 1.0.0l and later include TLS 1.1 and 1.2
+const supportsTLS1_1 = (function() {
+ const openssl = getBuildInfo().openssl || {};
+ if (openssl.compiled === undefined) {
+ // Native TLS build.
return true;
- })();
+ }
+ // OpenSSL 0.x.x => TLS 1.0 only.
+ if (/OpenSSL 0\./.test(openssl.compiled)) {
+ return false;
+ }
+ // OpenSSL 1.0.0-1.0.0k => TLS 1.0 only.
+ if (/OpenSSL 1\.0\.0[ a-k]/.test(openssl.compiled)) {
+ return false;
+ }
- const defaultEnableTLS1_0 = (function() {
- // If the build doesn't support TLS 1.1, then TLS 1.0 is left enabled.
- return !supportsTLS1_1;
- })();
+ // OpenSSL 1.0.0l and later include TLS 1.1 and 1.2
+ return true;
+})();
- const supportsTLS1_3 = detectDefaultTLSProtocol() !== "TLS1_2";
+const defaultEnableTLS1_0 = (function() {
+ // If the build doesn't support TLS 1.1, then TLS 1.0 is left enabled.
+ return !supportsTLS1_1;
+})();
- function test(serverDP, clientDP, shouldSucceed) {
- const expectLogMessage = !defaultEnableTLS1_0 && (serverDP === null);
- let serverOpts = {
- sslMode: 'allowSSL',
- sslPEMKeyFile: 'jstests/libs/server.pem',
- sslCAFile: 'jstests/libs/ca.pem',
- waitForConnect: true
- };
- if (serverDP !== null) {
- serverOpts.sslDisabledProtocols = serverDP;
- }
- clearRawMongoProgramOutput();
- const mongod = MongoRunner.runMongod(serverOpts);
- if (!mongod) {
- assert(!shouldSucceed);
- return;
- }
+const supportsTLS1_3 = detectDefaultTLSProtocol() !== "TLS1_2";
- let clientOpts = [];
- if (clientDP !== null) {
- clientOpts = ['--sslDisabledProtocols', clientDP];
- }
- const didSucceed = (0 == runMongoProgram('mongo',
- '--ssl',
- '--port',
- mongod.port,
- '--sslPEMKeyFile',
- 'jstests/libs/client.pem',
- '--sslCAFile',
- 'jstests/libs/ca.pem',
- ...clientOpts,
- '--eval',
- ';'));
+function test(serverDP, clientDP, shouldSucceed) {
+ const expectLogMessage = !defaultEnableTLS1_0 && (serverDP === null);
+ let serverOpts = {
+ sslMode: 'allowSSL',
+ sslPEMKeyFile: 'jstests/libs/server.pem',
+ sslCAFile: 'jstests/libs/ca.pem',
+ waitForConnect: true
+ };
+ if (serverDP !== null) {
+ serverOpts.sslDisabledProtocols = serverDP;
+ }
+ clearRawMongoProgramOutput();
+ const mongod = MongoRunner.runMongod(serverOpts);
+ if (!mongod) {
+ assert(!shouldSucceed);
+ return;
+ }
- MongoRunner.stopMongod(mongod);
+ let clientOpts = [];
+ if (clientDP !== null) {
+ clientOpts = ['--sslDisabledProtocols', clientDP];
+ }
+ const didSucceed = (0 ==
+ runMongoProgram('mongo',
+ '--ssl',
+ '--port',
+ mongod.port,
+ '--sslPEMKeyFile',
+ 'jstests/libs/client.pem',
+ '--sslCAFile',
+ 'jstests/libs/ca.pem',
+ ...clientOpts,
+ '--eval',
+ ';'));
- // Exit code based success/failure.
- assert.eq(
- didSucceed, shouldSucceed, "Running with " + tojson(serverDP) + "/" + tojson(clientDP));
+ MongoRunner.stopMongod(mongod);
- assert.eq(expectLogMessage,
- rawMongoProgramOutput().search('Automatically disabling TLS 1.0') >= 0,
- "TLS 1.0 was/wasn't automatically disabled");
- }
+ // Exit code based success/failure.
+ assert.eq(
+ didSucceed, shouldSucceed, "Running with " + tojson(serverDP) + "/" + tojson(clientDP));
+
+ assert.eq(expectLogMessage,
+ rawMongoProgramOutput().search('Automatically disabling TLS 1.0') >= 0,
+ "TLS 1.0 was/wasn't automatically disabled");
+}
- // Tests with default client behavior (TLS 1.0 disabled if 1.1 available).
- test(null, null, true);
- test('none', null, true);
- test('TLS1_0', null, supportsTLS1_1);
- test('TLS1_1,TLS1_2', null, !supportsTLS1_1 || supportsTLS1_3);
- test('TLS1_1,TLS1_2,TLS1_3', null, !supportsTLS1_1);
- test('TLS1_0,TLS1_1', null, supportsTLS1_1);
- test('TLS1_0,TLS1_1,TLS1_2', null, supportsTLS1_3);
- test('TLS1_0,TLS1_1,TLS1_2,TLS1_3', null, false);
+// Tests with default client behavior (TLS 1.0 disabled if 1.1 available).
+test(null, null, true);
+test('none', null, true);
+test('TLS1_0', null, supportsTLS1_1);
+test('TLS1_1,TLS1_2', null, !supportsTLS1_1 || supportsTLS1_3);
+test('TLS1_1,TLS1_2,TLS1_3', null, !supportsTLS1_1);
+test('TLS1_0,TLS1_1', null, supportsTLS1_1);
+test('TLS1_0,TLS1_1,TLS1_2', null, supportsTLS1_3);
+test('TLS1_0,TLS1_1,TLS1_2,TLS1_3', null, false);
- // Tests with TLS 1.0 always enabled on client.
- test(null, 'none', true);
- test('none', 'none', true);
- test('TLS1_0', 'none', supportsTLS1_1);
- test('TLS1_1,TLS1_2', 'none', true);
- test('TLS1_0,TLS1_1', 'none', supportsTLS1_1);
+// Tests with TLS 1.0 always enabled on client.
+test(null, 'none', true);
+test('none', 'none', true);
+test('TLS1_0', 'none', supportsTLS1_1);
+test('TLS1_1,TLS1_2', 'none', true);
+test('TLS1_0,TLS1_1', 'none', supportsTLS1_1);
- // Tests with TLS 1.0 explicitly disabled on client.
- test(null, 'TLS1_0', supportsTLS1_1);
- test('none', 'TLS1_0', supportsTLS1_1);
- test('TLS1_0', 'TLS1_0', supportsTLS1_1);
- test('TLS1_1,TLS1_2', 'TLS1_0', supportsTLS1_3);
- test('TLS1_1,TLS1_2,TLS1_3', 'TLS1_0', false);
- test('TLS1_0,TLS1_1', 'TLS1_0', supportsTLS1_1);
+// Tests with TLS 1.0 explicitly disabled on client.
+test(null, 'TLS1_0', supportsTLS1_1);
+test('none', 'TLS1_0', supportsTLS1_1);
+test('TLS1_0', 'TLS1_0', supportsTLS1_1);
+test('TLS1_1,TLS1_2', 'TLS1_0', supportsTLS1_3);
+test('TLS1_1,TLS1_2,TLS1_3', 'TLS1_0', false);
+test('TLS1_0,TLS1_1', 'TLS1_0', supportsTLS1_1);
})();
diff --git a/jstests/sslSpecial/upgrade_noauth_to_x509_nossl.js b/jstests/sslSpecial/upgrade_noauth_to_x509_nossl.js
index 29c0ee0238b..79330a27c27 100644
--- a/jstests/sslSpecial/upgrade_noauth_to_x509_nossl.js
+++ b/jstests/sslSpecial/upgrade_noauth_to_x509_nossl.js
@@ -12,31 +12,31 @@
load('jstests/ssl/libs/ssl_helpers.js');
(function() {
- 'use strict';
- var dbName = 'upgradeToX509';
+'use strict';
+var dbName = 'upgradeToX509';
- // Disable auth explicitly
- var noAuth = {noauth: ''};
+// Disable auth explicitly
+var noAuth = {noauth: ''};
- // Undefine the flags we're replacing, otherwise upgradeSet will keep old values.
- var transitionToX509AllowSSL =
- Object.merge(allowSSL, {noauth: undefined, transitionToAuth: '', clusterAuthMode: 'x509'});
+// Undefine the flags we're replacing, otherwise upgradeSet will keep old values.
+var transitionToX509AllowSSL =
+ Object.merge(allowSSL, {noauth: undefined, transitionToAuth: '', clusterAuthMode: 'x509'});
- var rst = new ReplSetTest({name: 'noauthSet', nodes: 3, nodeOptions: noAuth});
- rst.startSet();
- rst.initiate();
+var rst = new ReplSetTest({name: 'noauthSet', nodes: 3, nodeOptions: noAuth});
+rst.startSet();
+rst.initiate();
- var testDB = rst.getPrimary().getDB(dbName);
- assert.writeOK(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
- assert.eq(1, testDB.a.find().itcount(), 'Error interacting with replSet');
+var testDB = rst.getPrimary().getDB(dbName);
+assert.writeOK(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
+assert.eq(1, testDB.a.find().itcount(), 'Error interacting with replSet');
- print('=== UPGRADE no-auth/no-ssl -> transition to X509/allowSSL ===');
- rst.upgradeSet(transitionToX509AllowSSL);
+print('=== UPGRADE no-auth/no-ssl -> transition to X509/allowSSL ===');
+rst.upgradeSet(transitionToX509AllowSSL);
- // Connect to the new primary
- testDB = rst.getPrimary().getDB(dbName);
- assert.writeOK(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
- assert.eq(2, testDB.a.find().itcount(), 'Error interacting with replSet');
+// Connect to the new primary
+testDB = rst.getPrimary().getDB(dbName);
+assert.writeOK(testDB.a.insert({a: 1, str: 'TESTTESTTEST'}));
+assert.eq(2, testDB.a.find().itcount(), 'Error interacting with replSet');
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/sslSpecial/x509_cluster_auth_rollover.js b/jstests/sslSpecial/x509_cluster_auth_rollover.js
index d163274a78c..ab106be3679 100644
--- a/jstests/sslSpecial/x509_cluster_auth_rollover.js
+++ b/jstests/sslSpecial/x509_cluster_auth_rollover.js
@@ -7,106 +7,105 @@
*/
(function() {
- 'use strict';
+'use strict';
- const rst = new ReplSetTest({
- nodes: 3,
- waitForKeys: false,
- nodeOptions: {
- sslMode: "preferSSL",
- clusterAuthMode: "x509",
- sslPEMKeyFile: "jstests/libs/server.pem",
- sslCAFile: "jstests/libs/ca.pem",
- sslAllowInvalidHostnames: ""
- }
- });
- rst.startSet();
+const rst = new ReplSetTest({
+ nodes: 3,
+ waitForKeys: false,
+ nodeOptions: {
+ sslMode: "preferSSL",
+ clusterAuthMode: "x509",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/ca.pem",
+ sslAllowInvalidHostnames: ""
+ }
+});
+rst.startSet();
- rst.initiateWithAnyNodeAsPrimary(
- Object.extend(rst.getReplSetConfig(), {writeConcernMajorityJournalDefault: true}));
+rst.initiateWithAnyNodeAsPrimary(
+ Object.extend(rst.getReplSetConfig(), {writeConcernMajorityJournalDefault: true}));
- // Create a user to login as when auth is enabled later
- rst.getPrimary().getDB('admin').createUser({user: 'root', pwd: 'root', roles: ['root']},
- {w: 3});
- rst.nodes.forEach((node) => {
- assert(node.getDB("admin").auth("root", "root"));
- });
+// Create a user to login as when auth is enabled later
+rst.getPrimary().getDB('admin').createUser({user: 'root', pwd: 'root', roles: ['root']}, {w: 3});
+rst.nodes.forEach((node) => {
+ assert(node.getDB("admin").auth("root", "root"));
+});
- // All the certificates' DNs share this base
- const dnBase = "C=US, ST=New York, L=New York,";
- // This is the DN of the rollover certificate.
- const rolloverDN = dnBase + " O=MongoDB\\, Inc. (Rollover), OU=Kernel (Rollover), CN=server";
- // This is the DN of the original certificate
- const originalDN = dnBase + " O=MongoDB, OU=Kernel, CN=server";
+// All the certificates' DNs share this base
+const dnBase = "C=US, ST=New York, L=New York,";
+// This is the DN of the rollover certificate.
+const rolloverDN = dnBase + " O=MongoDB\\, Inc. (Rollover), OU=Kernel (Rollover), CN=server";
+// This is the DN of the original certificate
+const originalDN = dnBase + " O=MongoDB, OU=Kernel, CN=server";
- // This will rollover the cluster to a new config in a rolling fashion. It will return when
- // there is a primary and we are able to write to it.
- const rolloverConfig = function(newConfig) {
- const restart = function(node) {
- const nodeId = rst.getNodeId(node);
- rst.stop(nodeId);
- const configId = "n" + nodeId;
- rst.nodeOptions[configId] = Object.merge(rst.nodeOptions[configId], newConfig, true);
- const newNode = rst.start(nodeId, {}, true, true);
- assert(newNode.getDB("admin").auth("root", "root"));
- };
+// This will rollover the cluster to a new config in a rolling fashion. It will return when
+// there is a primary and we are able to write to it.
+const rolloverConfig = function(newConfig) {
+ const restart = function(node) {
+ const nodeId = rst.getNodeId(node);
+ rst.stop(nodeId);
+ const configId = "n" + nodeId;
+ rst.nodeOptions[configId] = Object.merge(rst.nodeOptions[configId], newConfig, true);
+ const newNode = rst.start(nodeId, {}, true, true);
+ assert(newNode.getDB("admin").auth("root", "root"));
+ };
- rst.getSecondaries().forEach(function(secondary) {
- restart(secondary);
- });
+ rst.getSecondaries().forEach(function(secondary) {
+ restart(secondary);
+ });
- restart(rst.getPrimary());
+ restart(rst.getPrimary());
- assert.soon(() => {
- let primary = rst.getPrimary();
- assert.commandWorked(primary.getDB("admin").runCommand({isMaster: 1}));
- assert.writeOK(primary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
+ assert.soon(() => {
+ let primary = rst.getPrimary();
+ assert.commandWorked(primary.getDB("admin").runCommand({isMaster: 1}));
+ assert.writeOK(primary.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'}));
- // Start a shell that connects to the server with the current CA/cert configuration
- // and ensure that it's able to connect and authenticate with x509.
- const shellArgs = [
- 'mongo',
- primary.name,
- '--eval',
- ';',
- '--ssl',
- '--sslAllowInvalidHostnames',
- '--sslCAFile',
- newConfig['sslCAFile'],
- '--sslPEMKeyFile',
- newConfig['sslPEMKeyFile'],
- '--authenticationDatabase=$external',
- '--authenticationMechanism=MONGODB-X509'
- ];
- assert.eq(_runMongoProgram.apply(null, shellArgs), 0);
+ // Start a shell that connects to the server with the current CA/cert configuration
+ // and ensure that it's able to connect and authenticate with x509.
+ const shellArgs = [
+ 'mongo',
+ primary.name,
+ '--eval',
+ ';',
+ '--ssl',
+ '--sslAllowInvalidHostnames',
+ '--sslCAFile',
+ newConfig['sslCAFile'],
+ '--sslPEMKeyFile',
+ newConfig['sslPEMKeyFile'],
+ '--authenticationDatabase=$external',
+ '--authenticationMechanism=MONGODB-X509'
+ ];
+ assert.eq(_runMongoProgram.apply(null, shellArgs), 0);
- return true;
- });
- };
-
- jsTestLog("Rolling over CA certificate to combined old and new CA's");
- rolloverConfig({
- sslPEMKeyFile: "jstests/libs/server.pem",
- sslCAFile: "jstests/libs/rollover_ca_merged.pem",
- setParameter: {
- tlsX509ClusterAuthDNOverride: rolloverDN,
- }
+ return true;
});
+};
- jsTestLog("Rolling over to new certificate with new cluster DN and new CA");
- rolloverConfig({
- sslPEMKeyFile: "jstests/libs/rollover_server.pem",
- sslCAFile: "jstests/libs/rollover_ca_merged.pem",
- setParameter: {
- tlsX509ClusterAuthDNOverride: originalDN,
- }
- });
+jsTestLog("Rolling over CA certificate to combined old and new CA's");
+rolloverConfig({
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/rollover_ca_merged.pem",
+ setParameter: {
+ tlsX509ClusterAuthDNOverride: rolloverDN,
+ }
+});
- jsTestLog("Rolling over to new CA only");
- rolloverConfig({
- sslPEMKeyFile: "jstests/libs/rollover_server.pem",
- sslCAFile: "jstests/libs/rollover_ca.pem",
- });
+jsTestLog("Rolling over to new certificate with new cluster DN and new CA");
+rolloverConfig({
+ sslPEMKeyFile: "jstests/libs/rollover_server.pem",
+ sslCAFile: "jstests/libs/rollover_ca_merged.pem",
+ setParameter: {
+ tlsX509ClusterAuthDNOverride: originalDN,
+ }
+});
+
+jsTestLog("Rolling over to new CA only");
+rolloverConfig({
+ sslPEMKeyFile: "jstests/libs/rollover_server.pem",
+ sslCAFile: "jstests/libs/rollover_ca.pem",
+});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/tool/csv1.js b/jstests/tool/csv1.js
index 7a5690062f8..7c0321757ca 100644
--- a/jstests/tool/csv1.js
+++ b/jstests/tool/csv1.js
@@ -7,7 +7,8 @@ c = t.startDB("foo");
base = {
a: 1,
b: "foo,bar\"baz,qux",
- c: 5, 'd d': -6,
+ c: 5,
+ 'd d': -6,
e: '-',
f: "."
};
diff --git a/jstests/tool/dumprestore10.js b/jstests/tool/dumprestore10.js
index 632fac68482..ff63773850f 100644
--- a/jstests/tool/dumprestore10.js
+++ b/jstests/tool/dumprestore10.js
@@ -1,82 +1,81 @@
// simple test to ensure write concern functions as expected
(function() {
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+var name = "dumprestore10";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest({name: name, nodes: 2});
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+var total = 1000;
+
+{
+ step("store data");
+ var foo = master.getDB("foo");
+ for (i = 0; i < total; i++) {
+ foo.bar.insert({x: i, y: "abc"});
}
+}
- var name = "dumprestore10";
+{
+ step("wait");
+ replTest.awaitReplication();
+}
- function step(msg) {
- msg = msg || "";
- this.x = (this.x || 0) + 1;
- print('\n' + name + ".js step " + this.x + ' ' + msg);
- }
-
- step();
-
- var replTest = new ReplSetTest({name: name, nodes: 2});
- var nodes = replTest.startSet();
- replTest.initiate();
- var master = replTest.getPrimary();
- var total = 1000;
-
- {
- step("store data");
- var foo = master.getDB("foo");
- for (i = 0; i < total; i++) {
- foo.bar.insert({x: i, y: "abc"});
- }
- }
+step("mongodump from replset");
- {
- step("wait");
- replTest.awaitReplication();
- }
-
- step("mongodump from replset");
+var data = MongoRunner.dataDir + "/dumprestore10-dump1/";
- var data = MongoRunner.dataDir + "/dumprestore10-dump1/";
+var exitCode = MongoRunner.runMongoTool("mongodump", {
+ host: "127.0.0.1:" + master.port,
+ out: data,
+});
+assert.eq(0, exitCode, "mongodump failed to dump data from the replica set");
- var exitCode = MongoRunner.runMongoTool("mongodump", {
- host: "127.0.0.1:" + master.port,
- out: data,
- });
- assert.eq(0, exitCode, "mongodump failed to dump data from the replica set");
+{
+ step("remove data after dumping");
+ master.getDB("foo").getCollection("bar").drop();
+}
- {
- step("remove data after dumping");
- master.getDB("foo").getCollection("bar").drop();
- }
-
- {
- step("wait");
- replTest.awaitReplication();
- }
+{
+ step("wait");
+ replTest.awaitReplication();
+}
- step("try mongorestore with write concern");
+step("try mongorestore with write concern");
- exitCode = MongoRunner.runMongoTool("mongorestore", {
- writeConcern: "2",
- host: "127.0.0.1:" + master.port,
- dir: data,
- });
- assert.eq(0,
- exitCode,
- "mongorestore failed to restore the data to a replica set while using w=2 writes");
+exitCode = MongoRunner.runMongoTool("mongorestore", {
+ writeConcern: "2",
+ host: "127.0.0.1:" + master.port,
+ dir: data,
+});
+assert.eq(
+ 0, exitCode, "mongorestore failed to restore the data to a replica set while using w=2 writes");
- var x = 0;
+var x = 0;
- // no waiting for replication
- x = master.getDB("foo").getCollection("bar").count();
+// no waiting for replication
+x = master.getDB("foo").getCollection("bar").count();
- assert.eq(x, total, "mongorestore should have successfully restored the collection");
+assert.eq(x, total, "mongorestore should have successfully restored the collection");
- step("stopSet");
- replTest.stopSet();
+step("stopSet");
+replTest.stopSet();
- step("SUCCESS");
+step("SUCCESS");
}());
diff --git a/jstests/tool/dumprestore3.js b/jstests/tool/dumprestore3.js
index 18817bb20ea..13efe2696c0 100644
--- a/jstests/tool/dumprestore3.js
+++ b/jstests/tool/dumprestore3.js
@@ -2,65 +2,65 @@
// secondary node should fail.
(function() {
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
- var name = "dumprestore3";
+var name = "dumprestore3";
- var replTest = new ReplSetTest({name: name, nodes: 2});
- var nodes = replTest.startSet();
- replTest.initiate();
- var primary = replTest.getPrimary();
- var secondary = replTest.getSecondary();
+var replTest = new ReplSetTest({name: name, nodes: 2});
+var nodes = replTest.startSet();
+replTest.initiate();
+var primary = replTest.getPrimary();
+var secondary = replTest.getSecondary();
- jsTestLog("populate primary");
- var foo = primary.getDB("foo");
- for (i = 0; i < 20; i++) {
- foo.bar.insert({x: i, y: "abc"});
- }
+jsTestLog("populate primary");
+var foo = primary.getDB("foo");
+for (i = 0; i < 20; i++) {
+ foo.bar.insert({x: i, y: "abc"});
+}
- jsTestLog("wait for secondary");
- replTest.awaitReplication();
+jsTestLog("wait for secondary");
+replTest.awaitReplication();
- jsTestLog("mongodump from primary");
- var data = MongoRunner.dataDir + "/dumprestore3-other1/";
- resetDbpath(data);
- var ret = MongoRunner.runMongoTool("mongodump", {
- host: primary.host,
- out: data,
- });
- assert.eq(ret, 0, "mongodump should exit w/ 0 on primary");
+jsTestLog("mongodump from primary");
+var data = MongoRunner.dataDir + "/dumprestore3-other1/";
+resetDbpath(data);
+var ret = MongoRunner.runMongoTool("mongodump", {
+ host: primary.host,
+ out: data,
+});
+assert.eq(ret, 0, "mongodump should exit w/ 0 on primary");
- jsTestLog("try mongorestore to secondary");
- ret = MongoRunner.runMongoTool("mongorestore", {
- host: secondary.host,
- dir: data,
- });
- assert.neq(ret, 0, "mongorestore should exit w/ 1 on secondary");
+jsTestLog("try mongorestore to secondary");
+ret = MongoRunner.runMongoTool("mongorestore", {
+ host: secondary.host,
+ dir: data,
+});
+assert.neq(ret, 0, "mongorestore should exit w/ 1 on secondary");
- jsTestLog("mongoexport from primary");
- dataFile = MongoRunner.dataDir + "/dumprestore3-other2.json";
- ret = MongoRunner.runMongoTool("mongoexport", {
- host: primary.host,
- out: dataFile,
- db: "foo",
- collection: "bar",
- });
- assert.eq(ret, 0, "mongoexport should exit w/ 0 on primary");
+jsTestLog("mongoexport from primary");
+dataFile = MongoRunner.dataDir + "/dumprestore3-other2.json";
+ret = MongoRunner.runMongoTool("mongoexport", {
+ host: primary.host,
+ out: dataFile,
+ db: "foo",
+ collection: "bar",
+});
+assert.eq(ret, 0, "mongoexport should exit w/ 0 on primary");
- jsTestLog("mongoimport from secondary");
- ret = MongoRunner.runMongoTool("mongoimport", {
- host: secondary.host,
- file: dataFile,
- });
- assert.neq(ret, 0, "mongoimport should exit w/ 1 on secondary");
+jsTestLog("mongoimport from secondary");
+ret = MongoRunner.runMongoTool("mongoimport", {
+ host: secondary.host,
+ file: dataFile,
+});
+assert.neq(ret, 0, "mongoimport should exit w/ 1 on secondary");
- jsTestLog("stopSet");
- replTest.stopSet();
- jsTestLog("SUCCESS");
+jsTestLog("stopSet");
+replTest.stopSet();
+jsTestLog("SUCCESS");
}());
diff --git a/jstests/tool/dumprestore7.js b/jstests/tool/dumprestore7.js
index 8078a7ab595..782529f0383 100644
--- a/jstests/tool/dumprestore7.js
+++ b/jstests/tool/dumprestore7.js
@@ -1,97 +1,97 @@
(function() {
- "use strict";
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
-
- var name = "dumprestore7";
-
- var step = (function() {
- var n = 0;
- return function(msg) {
- msg = msg || "";
- print('\n' + name + ".js step " + (++n) + ' ' + msg);
- };
- })();
-
- step("starting the replset test");
-
- var replTest = new ReplSetTest({name: name, nodes: 1});
- var nodes = replTest.startSet();
- replTest.initiate();
-
- step("inserting first chunk of data");
- var foo = replTest.getPrimary().getDB("foo");
- for (var i = 0; i < 20; i++) {
- foo.bar.insert({x: i, y: "abc"});
- }
-
- step("waiting for replication");
- replTest.awaitReplication();
- assert.eq(foo.bar.count(), 20, "should have inserted 20 documents");
-
- // The time of the last oplog entry.
- var time = replTest.getPrimary()
- .getDB("local")
- .getCollection("oplog.rs")
- .find()
- .limit(1)
- .sort({$natural: -1})
- .next()
- .ts;
- step("got time of last oplog entry: " + time);
-
- step("inserting second chunk of data");
- for (var i = 30; i < 50; i++) {
- foo.bar.insert({x: i, y: "abc"});
- }
- assert.eq(foo.bar.count(), 40, "should have inserted 40 total documents");
-
- step("try mongodump with $timestamp");
-
- var data = MongoRunner.dataDir + "/dumprestore7-dump1/";
- var query = {ts: {$gt: time}};
- var queryJSON = '{"ts":{"$gt":{"$timestamp":{"t":' + time.t + ',"i":' + time.i + '}}}}';
- print("mongodump query: " + queryJSON);
- if (_isWindows()) {
- queryJSON = '"' + queryJSON.replace(/"/g, '\\"') + '"';
- }
- var testQueryCount =
- replTest.getPrimary().getDB("local").getCollection("oplog.rs").find(query).itcount();
- assert.eq(testQueryCount, 20, "the query should match 20 documents");
-
- var exitCode = MongoRunner.runMongoTool("mongodump", {
- host: "127.0.0.1:" + replTest.ports[0],
- db: "local",
- collection: "oplog.rs",
- query: queryJSON,
- out: data,
- });
- assert.eq(0, exitCode, "monogdump failed to dump the oplog");
-
- step("try mongorestore from $timestamp");
-
- var restoreMongod = MongoRunner.runMongod({});
- exitCode = MongoRunner.runMongoTool("mongorestore", {
- host: "127.0.0.1:" + restoreMongod.port,
- dir: data,
- writeConcern: 1,
- });
- assert.eq(0, exitCode, "mongorestore failed to restore the oplog");
-
- var count = restoreMongod.getDB("local").getCollection("oplog.rs").count();
- if (count != 20) {
- print("mongorestore restored too many documents");
- restoreMongod.getDB("local").getCollection("oplog.rs").find().pretty().shellPrint();
- assert.eq(count, 20, "mongorestore should only have inserted the latter 20 entries");
- }
-
- MongoRunner.stopMongod(restoreMongod);
- step("stopping replset test");
- replTest.stopSet();
+"use strict";
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+var name = "dumprestore7";
+
+var step = (function() {
+ var n = 0;
+ return function(msg) {
+ msg = msg || "";
+ print('\n' + name + ".js step " + (++n) + ' ' + msg);
+ };
+})();
+
+step("starting the replset test");
+
+var replTest = new ReplSetTest({name: name, nodes: 1});
+var nodes = replTest.startSet();
+replTest.initiate();
+
+step("inserting first chunk of data");
+var foo = replTest.getPrimary().getDB("foo");
+for (var i = 0; i < 20; i++) {
+ foo.bar.insert({x: i, y: "abc"});
+}
+
+step("waiting for replication");
+replTest.awaitReplication();
+assert.eq(foo.bar.count(), 20, "should have inserted 20 documents");
+
+// The time of the last oplog entry.
+var time = replTest.getPrimary()
+ .getDB("local")
+ .getCollection("oplog.rs")
+ .find()
+ .limit(1)
+ .sort({$natural: -1})
+ .next()
+ .ts;
+step("got time of last oplog entry: " + time);
+
+step("inserting second chunk of data");
+for (var i = 30; i < 50; i++) {
+ foo.bar.insert({x: i, y: "abc"});
+}
+assert.eq(foo.bar.count(), 40, "should have inserted 40 total documents");
+
+step("try mongodump with $timestamp");
+
+var data = MongoRunner.dataDir + "/dumprestore7-dump1/";
+var query = {ts: {$gt: time}};
+var queryJSON = '{"ts":{"$gt":{"$timestamp":{"t":' + time.t + ',"i":' + time.i + '}}}}';
+print("mongodump query: " + queryJSON);
+if (_isWindows()) {
+ queryJSON = '"' + queryJSON.replace(/"/g, '\\"') + '"';
+}
+var testQueryCount =
+ replTest.getPrimary().getDB("local").getCollection("oplog.rs").find(query).itcount();
+assert.eq(testQueryCount, 20, "the query should match 20 documents");
+
+var exitCode = MongoRunner.runMongoTool("mongodump", {
+ host: "127.0.0.1:" + replTest.ports[0],
+ db: "local",
+ collection: "oplog.rs",
+ query: queryJSON,
+ out: data,
+});
+assert.eq(0, exitCode, "monogdump failed to dump the oplog");
+
+step("try mongorestore from $timestamp");
+
+var restoreMongod = MongoRunner.runMongod({});
+exitCode = MongoRunner.runMongoTool("mongorestore", {
+ host: "127.0.0.1:" + restoreMongod.port,
+ dir: data,
+ writeConcern: 1,
+});
+assert.eq(0, exitCode, "mongorestore failed to restore the oplog");
+
+var count = restoreMongod.getDB("local").getCollection("oplog.rs").count();
+if (count != 20) {
+ print("mongorestore restored too many documents");
+ restoreMongod.getDB("local").getCollection("oplog.rs").find().pretty().shellPrint();
+ assert.eq(count, 20, "mongorestore should only have inserted the latter 20 entries");
+}
+
+MongoRunner.stopMongod(restoreMongod);
+step("stopping replset test");
+replTest.stopSet();
})();
diff --git a/jstests/tool/dumprestore9.js b/jstests/tool/dumprestore9.js
index 2fb63f70889..9235ac45bd5 100644
--- a/jstests/tool/dumprestore9.js
+++ b/jstests/tool/dumprestore9.js
@@ -1,7 +1,6 @@
// Test disabled until SERVER-3853 is finished
if (0) {
(function() {
-
var name = "dumprestore9";
function step(msg) {
msg = msg || "";
@@ -104,6 +103,5 @@ if (0) {
step("Stop cluster");
s.stop();
step("SUCCESS");
-
})();
}
diff --git a/jstests/tool/dumprestore_auth2.js b/jstests/tool/dumprestore_auth2.js
index 39dcaa19b6e..7e3c8c10238 100644
--- a/jstests/tool/dumprestore_auth2.js
+++ b/jstests/tool/dumprestore_auth2.js
@@ -3,7 +3,6 @@
// Tests that the default auth roles of backup and restore work properly.
var dumpRestoreAuth2 = function(backup_role, restore_role) {
-
t = new ToolTest("dumprestore_auth2", {auth: ""});
coll = t.startDB("foo");
@@ -18,10 +17,9 @@ var dumpRestoreAuth2 = function(backup_role, restore_role) {
admindb.createRole({
role: "customRole",
- privileges: [{
- resource: {db: "jstests_tool_dumprestore_auth2", collection: "foo"},
- actions: ["find"]
- }],
+ privileges: [
+ {resource: {db: "jstests_tool_dumprestore_auth2", collection: "foo"}, actions: ["find"]}
+ ],
roles: []
});
admindb.createUser({user: "test", pwd: "pass", roles: ["customRole"]});
@@ -118,7 +116,6 @@ var dumpRestoreAuth2 = function(backup_role, restore_role) {
admindb.logout();
t.stop();
-
};
// Tests that the default auth roles of backup and restore work properly.
diff --git a/jstests/tool/dumprestore_auth3.js b/jstests/tool/dumprestore_auth3.js
index 8fb70d2a91b..b90eebb30bf 100644
--- a/jstests/tool/dumprestore_auth3.js
+++ b/jstests/tool/dumprestore_auth3.js
@@ -10,7 +10,6 @@ function runTool(toolName, mongod, options) {
}
var dumpRestoreAuth3 = function(backup_role, restore_role) {
-
var mongod = MongoRunner.runMongod();
var admindb = mongod.getDB("admin");
var db = mongod.getDB("foo");
diff --git a/jstests/tool/dumprestore_excludecollections.js b/jstests/tool/dumprestore_excludecollections.js
index cfda283b46a..4bf8dd50c60 100644
--- a/jstests/tool/dumprestore_excludecollections.js
+++ b/jstests/tool/dumprestore_excludecollections.js
@@ -35,10 +35,10 @@ assert.neq(ret, 0, "mongodump started successfully with --excludeCollection and
resetDbpath(dumpDir);
ret = MongoRunner.runMongoTool(
"mongodump", {out: dumpDir, excludeCollectionsWithPrefix: "test", host: mongodSource.host});
-assert.neq(
- ret,
- 0,
- "mongodump started successfully with --excludeCollectionsWithPrefix but " + "no --db option");
+assert.neq(ret,
+ 0,
+ "mongodump started successfully with --excludeCollectionsWithPrefix but " +
+ "no --db option");
resetDbpath(dumpDir);
ret = MongoRunner.runMongoTool("mongodump", {
@@ -48,10 +48,10 @@ ret = MongoRunner.runMongoTool("mongodump", {
excludeCollectionsWithPrefix: "test",
host: mongodSource.host
});
-assert.neq(
- ret,
- 0,
- "mongodump started successfully with --excludeCollectionsWithPrefix and " + "--collection");
+assert.neq(ret,
+ 0,
+ "mongodump started successfully with --excludeCollectionsWithPrefix and " +
+ "--collection");
jsTest.log("Testing proper behavior of collection exclusion");
resetDbpath(dumpDir);
diff --git a/jstests/tool/dumpsecondary.js b/jstests/tool/dumpsecondary.js
index 32f075e10fc..059a3eeb4db 100644
--- a/jstests/tool/dumpsecondary.js
+++ b/jstests/tool/dumpsecondary.js
@@ -1,61 +1,61 @@
(function() {
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
-
- var replTest = new ReplSetTest({name: 'testSet', nodes: 2});
-
- var nodes = replTest.startSet();
- replTest.initiate();
-
- var master = replTest.getPrimary();
- db = master.getDB("foo");
- db.foo.save({a: 1000});
- replTest.awaitReplication();
- replTest.awaitSecondaryNodes();
-
- assert.eq(1, db.foo.count(), "setup");
-
- var slaves = replTest._slaves;
- assert(slaves.length == 1, "Expected 1 slave but length was " + slaves.length);
- slave = slaves[0];
-
- var commonOptions = {};
- if (jsTest.options().keyFile) {
- commonOptions.username = jsTest.options().authUser;
- commonOptions.password = jsTest.options().authPassword;
- }
-
- var exitCode = MongoRunner.runMongoTool(
- "mongodump",
- Object.extend({
- host: slave.host,
- out: MongoRunner.dataDir + "/jstests_tool_dumpsecondary_external/",
- },
- commonOptions));
- assert.eq(0, exitCode, "mongodump failed to dump data from the secondary");
-
- db.foo.drop();
- assert.eq(0, db.foo.count(), "after drop");
-
- exitCode = MongoRunner.runMongoTool(
- "mongorestore",
- Object.extend({
- host: master.host,
- dir: MongoRunner.dataDir + "/jstests_tool_dumpsecondary_external/",
- },
- commonOptions));
- assert.eq(0, exitCode, "mongorestore failed to restore data to the primary");
-
- assert.soon("db.foo.findOne()", "no data after sleep");
- assert.eq(1, db.foo.count(), "after restore");
- assert.eq(1000, db.foo.findOne().a, "after restore 2");
-
- resetDbpath(MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external');
-
- replTest.stopSet(15);
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+var replTest = new ReplSetTest({name: 'testSet', nodes: 2});
+
+var nodes = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+db = master.getDB("foo");
+db.foo.save({a: 1000});
+replTest.awaitReplication();
+replTest.awaitSecondaryNodes();
+
+assert.eq(1, db.foo.count(), "setup");
+
+var slaves = replTest._slaves;
+assert(slaves.length == 1, "Expected 1 slave but length was " + slaves.length);
+slave = slaves[0];
+
+var commonOptions = {};
+if (jsTest.options().keyFile) {
+ commonOptions.username = jsTest.options().authUser;
+ commonOptions.password = jsTest.options().authPassword;
+}
+
+var exitCode =
+ MongoRunner.runMongoTool("mongodump",
+ Object.extend({
+ host: slave.host,
+ out: MongoRunner.dataDir + "/jstests_tool_dumpsecondary_external/",
+ },
+ commonOptions));
+assert.eq(0, exitCode, "mongodump failed to dump data from the secondary");
+
+db.foo.drop();
+assert.eq(0, db.foo.count(), "after drop");
+
+exitCode =
+ MongoRunner.runMongoTool("mongorestore",
+ Object.extend({
+ host: master.host,
+ dir: MongoRunner.dataDir + "/jstests_tool_dumpsecondary_external/",
+ },
+ commonOptions));
+assert.eq(0, exitCode, "mongorestore failed to restore data to the primary");
+
+assert.soon("db.foo.findOne()", "no data after sleep");
+assert.eq(1, db.foo.count(), "after restore");
+assert.eq(1000, db.foo.findOne().a, "after restore 2");
+
+resetDbpath(MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external');
+
+replTest.stopSet(15);
}());
diff --git a/jstests/tool/gridfs.js b/jstests/tool/gridfs.js
index d4d84aeb4c4..d559b580ab9 100644
--- a/jstests/tool/gridfs.js
+++ b/jstests/tool/gridfs.js
@@ -21,8 +21,8 @@ function testGridFS(name) {
// upload file (currently calls filemd5 internally)
var exitCode = MongoRunner.runMongoTool("mongofiles",
{
- port: mongos.port,
- db: name,
+ port: mongos.port,
+ db: name,
},
"put",
filename);
diff --git a/jstests/tool/shell_mkdir.js b/jstests/tool/shell_mkdir.js
index b2ac1eae135..ea3b86384ed 100644
--- a/jstests/tool/shell_mkdir.js
+++ b/jstests/tool/shell_mkdir.js
@@ -1,37 +1,35 @@
// Test the shell's mkdir utility.
(function() {
- "use strict";
-
- var dir = MongoRunner.dataPath + "ShellMkdirTestDirectory";
- removeFile(dir);
-
- // Make a new directory
- var res = mkdir(dir);
- printjson(res);
- assert(res);
- assert(res["exists"]);
- assert(res["created"]);
-
- // Make the same directory again
- res = mkdir(dir);
- printjson(res);
- assert(res);
- assert(res["exists"]);
- assert(!res["created"]);
-
- // Check that we throw, rather than crash, on ""
- // (see https://svn.boost.org/trac/boost/ticket/12495)
- assert.throws(function() {
- mkdir("");
- }, [], "");
-
- removeFile(dir);
-
- // check that other internal path functions do not crash on ""
- assert(pathExists("") === false, "expected pathExists to return false on empty path");
- assert(copyDbpath("", "") === undefined,
- "expected copyDbpath to return undefined on empty path");
- assert(resetDbpath("") === undefined, "expected resetDbpath to return undefined on empty path");
-
+"use strict";
+
+var dir = MongoRunner.dataPath + "ShellMkdirTestDirectory";
+removeFile(dir);
+
+// Make a new directory
+var res = mkdir(dir);
+printjson(res);
+assert(res);
+assert(res["exists"]);
+assert(res["created"]);
+
+// Make the same directory again
+res = mkdir(dir);
+printjson(res);
+assert(res);
+assert(res["exists"]);
+assert(!res["created"]);
+
+// Check that we throw, rather than crash, on ""
+// (see https://svn.boost.org/trac/boost/ticket/12495)
+assert.throws(function() {
+ mkdir("");
+}, [], "");
+
+removeFile(dir);
+
+// check that other internal path functions do not crash on ""
+assert(pathExists("") === false, "expected pathExists to return false on empty path");
+assert(copyDbpath("", "") === undefined, "expected copyDbpath to return undefined on empty path");
+assert(resetDbpath("") === undefined, "expected resetDbpath to return undefined on empty path");
}());
diff --git a/jstests/tool/tool_replset.js b/jstests/tool/tool_replset.js
index 162e0e778c6..be65c11118c 100644
--- a/jstests/tool/tool_replset.js
+++ b/jstests/tool/tool_replset.js
@@ -8,95 +8,93 @@
* 6. Export a collection.
* 7. Drop the collection.
* 8. Import the collection.
-*/
+ */
(function() {
- "use strict";
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
-
- var replTest =
- new ReplSetTest({name: 'tool_replset', nodes: 2, oplogSize: 5, nodeOptions: {"vvvvv": ""}});
- var nodes = replTest.startSet();
- var config = replTest.getReplSetConfig();
- config.members[0].priority = 3;
- config.members[1].priority = 0;
- replTest.initiate(config);
- var master = replTest.getPrimary();
- assert.eq(nodes[0], master, "incorrect master elected");
- for (var i = 0; i < 100; i++) {
- assert.writeOK(master.getDB("foo").bar.insert({a: i}));
- }
- replTest.awaitReplication();
-
- var replSetConnString =
- "tool_replset/127.0.0.1:" + replTest.ports[0] + ",127.0.0.1:" + replTest.ports[1];
-
- // Test with mongodump/mongorestore
- var data = MongoRunner.dataDir + "/tool_replset-dump1/";
- print("using mongodump to dump the db to " + data);
- var exitCode = MongoRunner.runMongoTool("mongodump", {
- host: replSetConnString,
- out: data,
- });
- assert.eq(0, exitCode, "mongodump failed to dump from the replica set");
-
- print("db successfully dumped to " + data +
- ". dropping collection before testing the restore process");
- assert(master.getDB("foo").bar.drop());
- replTest.awaitReplication();
-
- print("using mongorestore to restore the db from " + data);
- exitCode = MongoRunner.runMongoTool("mongorestore", {
- host: replSetConnString,
- dir: data,
- });
- assert.eq(0, exitCode, "mongorestore failed to restore data to the replica set");
-
- print("db successfully restored, checking count");
- var x = master.getDB("foo").getCollection("bar").count();
- assert.eq(x, 100, "mongorestore should have successfully restored the collection");
-
- replTest.awaitReplication();
-
- // Test with mongoexport/mongoimport
- print("export the collection");
- var extFile = MongoRunner.dataDir + "/tool_replset/export";
- exitCode = MongoRunner.runMongoTool("mongoexport", {
- host: replSetConnString,
- out: extFile,
- db: "foo",
- collection: "bar",
- });
- assert.eq(
- 0, exitCode, "mongoexport failed to export collection 'foo.bar' from the replica set");
-
- print("collection successfully exported, dropping now");
- master.getDB("foo").getCollection("bar").drop();
- replTest.awaitReplication();
-
- print("import the collection");
- exitCode = MongoRunner.runMongoTool("mongoimport", {
- host: replSetConnString,
- file: extFile,
- db: "foo",
- collection: "bar",
- });
- assert.eq(
- 0, exitCode, "mongoimport failed to import collection 'foo.bar' into the replica set");
-
- var x = master.getDB("foo").getCollection("bar").count();
- assert.eq(x, 100, "mongoimport should have successfully imported the collection");
-
- print("all tests successful, stopping replica set");
-
- replTest.stopSet();
-
- print("replica set stopped, test complete");
+"use strict";
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+var replTest =
+ new ReplSetTest({name: 'tool_replset', nodes: 2, oplogSize: 5, nodeOptions: {"vvvvv": ""}});
+var nodes = replTest.startSet();
+var config = replTest.getReplSetConfig();
+config.members[0].priority = 3;
+config.members[1].priority = 0;
+replTest.initiate(config);
+var master = replTest.getPrimary();
+assert.eq(nodes[0], master, "incorrect master elected");
+for (var i = 0; i < 100; i++) {
+ assert.writeOK(master.getDB("foo").bar.insert({a: i}));
+}
+replTest.awaitReplication();
+
+var replSetConnString =
+ "tool_replset/127.0.0.1:" + replTest.ports[0] + ",127.0.0.1:" + replTest.ports[1];
+
+// Test with mongodump/mongorestore
+var data = MongoRunner.dataDir + "/tool_replset-dump1/";
+print("using mongodump to dump the db to " + data);
+var exitCode = MongoRunner.runMongoTool("mongodump", {
+ host: replSetConnString,
+ out: data,
+});
+assert.eq(0, exitCode, "mongodump failed to dump from the replica set");
+
+print("db successfully dumped to " + data +
+ ". dropping collection before testing the restore process");
+assert(master.getDB("foo").bar.drop());
+replTest.awaitReplication();
+
+print("using mongorestore to restore the db from " + data);
+exitCode = MongoRunner.runMongoTool("mongorestore", {
+ host: replSetConnString,
+ dir: data,
+});
+assert.eq(0, exitCode, "mongorestore failed to restore data to the replica set");
+
+print("db successfully restored, checking count");
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongorestore should have successfully restored the collection");
+
+replTest.awaitReplication();
+
+// Test with mongoexport/mongoimport
+print("export the collection");
+var extFile = MongoRunner.dataDir + "/tool_replset/export";
+exitCode = MongoRunner.runMongoTool("mongoexport", {
+ host: replSetConnString,
+ out: extFile,
+ db: "foo",
+ collection: "bar",
+});
+assert.eq(0, exitCode, "mongoexport failed to export collection 'foo.bar' from the replica set");
+
+print("collection successfully exported, dropping now");
+master.getDB("foo").getCollection("bar").drop();
+replTest.awaitReplication();
+
+print("import the collection");
+exitCode = MongoRunner.runMongoTool("mongoimport", {
+ host: replSetConnString,
+ file: extFile,
+ db: "foo",
+ collection: "bar",
+});
+assert.eq(0, exitCode, "mongoimport failed to import collection 'foo.bar' into the replica set");
+
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongoimport should have successfully imported the collection");
+
+print("all tests successful, stopping replica set");
+
+replTest.stopSet();
+
+print("replica set stopped, test complete");
}());
diff --git a/jstests/watchdog/lib/charybdefs_lib.js b/jstests/watchdog/lib/charybdefs_lib.js
index f80246426d8..bd0d7440e0d 100644
--- a/jstests/watchdog/lib/charybdefs_lib.js
+++ b/jstests/watchdog/lib/charybdefs_lib.js
@@ -94,7 +94,6 @@ function CharybdefsControl(test_name) {
// Wait for watchdog to stop
print("Waiting for MongoDB to hang.");
sleep(fs_delay_sec * 1000);
-
};
/**
@@ -105,7 +104,6 @@ function CharybdefsControl(test_name) {
* @param {number} delay_us - optional delay in microseconds to wait
*/
this.addFault = function(method, file_name, delay_us) {
-
this._runControl("set_fault",
"--methods=" + method,
"--errno=5",
diff --git a/jstests/watchdog/wd_auditpath_hang.js b/jstests/watchdog/wd_auditpath_hang.js
index bd961d55a47..bca58b1f501 100644
--- a/jstests/watchdog/wd_auditpath_hang.js
+++ b/jstests/watchdog/wd_auditpath_hang.js
@@ -3,19 +3,18 @@
load("jstests/watchdog/lib/wd_test_common.js");
(function() {
- 'use strict';
+'use strict';
- if (assert.commandWorked(db.runCommand({buildInfo: 1})).modules.includes("enterprise")) {
- let control = new CharybdefsControl("auditpath_hang");
+if (assert.commandWorked(db.runCommand({buildInfo: 1})).modules.includes("enterprise")) {
+ let control = new CharybdefsControl("auditpath_hang");
- const auditPath = control.getMountPath();
+ const auditPath = control.getMountPath();
- testFuseAndMongoD(control, {
-
- auditDestination: 'file',
- auditFormat: 'JSON',
- auditPath: auditPath + "/auditLog.json"
- });
- }
+ testFuseAndMongoD(control, {
+ auditDestination: 'file',
+ auditFormat: 'JSON',
+ auditPath: auditPath + "/auditLog.json"
+ });
+}
})();
diff --git a/jstests/watchdog/wd_dbpath_hang.js b/jstests/watchdog/wd_dbpath_hang.js
index 39147fe2229..f0c2060294a 100644
--- a/jstests/watchdog/wd_dbpath_hang.js
+++ b/jstests/watchdog/wd_dbpath_hang.js
@@ -3,12 +3,11 @@
load("jstests/watchdog/lib/wd_test_common.js");
(function() {
- 'use strict';
+'use strict';
- let control = new CharybdefsControl("dbpath_hang");
+let control = new CharybdefsControl("dbpath_hang");
- const dbPath = control.getMountPath() + "/db";
-
- testFuseAndMongoD(control, {dbpath: dbPath});
+const dbPath = control.getMountPath() + "/db";
+testFuseAndMongoD(control, {dbpath: dbPath});
})();
diff --git a/jstests/watchdog/wd_journal_hang.js b/jstests/watchdog/wd_journal_hang.js
index c07b4298170..e33931cf5dd 100644
--- a/jstests/watchdog/wd_journal_hang.js
+++ b/jstests/watchdog/wd_journal_hang.js
@@ -4,30 +4,30 @@
load("jstests/watchdog/lib/wd_test_common.js");
(function() {
- 'use strict';
+'use strict';
- function trimTrailingSlash(dir) {
- if (dir.endsWith('/')) {
- return dir.substring(0, dir.length - 1);
- }
-
- return dir;
+function trimTrailingSlash(dir) {
+ if (dir.endsWith('/')) {
+ return dir.substring(0, dir.length - 1);
}
- let control = new CharybdefsControl("journalpath_hang");
+ return dir;
+}
+
+let control = new CharybdefsControl("journalpath_hang");
- const journalFusePath = control.getMountPath();
+const journalFusePath = control.getMountPath();
- const dbPath = MongoRunner.toRealDir("$dataDir/mongod-journal");
+const dbPath = MongoRunner.toRealDir("$dataDir/mongod-journal");
- const journalLinkPath = dbPath + "/journal";
+const journalLinkPath = dbPath + "/journal";
- resetDbpath(dbPath);
+resetDbpath(dbPath);
- // Create a symlink from the non-fuse journal directory to the fuse mount.
- const ret = run("ln", "-s", trimTrailingSlash(journalFusePath), journalLinkPath);
- assert.eq(ret, 0);
+// Create a symlink from the non-fuse journal directory to the fuse mount.
+const ret = run("ln", "-s", trimTrailingSlash(journalFusePath), journalLinkPath);
+assert.eq(ret, 0);
- // Set noCleanData so that the dbPath is not cleaned because we want to use the journal symlink.
- testFuseAndMongoD(control, {dbpath: dbPath, noCleanData: true});
+// Set noCleanData so that the dbPath is not cleaned because we want to use the journal symlink.
+testFuseAndMongoD(control, {dbpath: dbPath, noCleanData: true});
})();
diff --git a/jstests/watchdog/wd_logpath_hang.js b/jstests/watchdog/wd_logpath_hang.js
index 9a3ec13c845..598cb286f3d 100644
--- a/jstests/watchdog/wd_logpath_hang.js
+++ b/jstests/watchdog/wd_logpath_hang.js
@@ -3,12 +3,11 @@
load("jstests/watchdog/lib/wd_test_common.js");
(function() {
- 'use strict';
+'use strict';
- let control = new CharybdefsControl("logpath_hang");
+let control = new CharybdefsControl("logpath_hang");
- const logpath = control.getMountPath();
-
- testFuseAndMongoD(control, {logpath: logpath + "/foo.log"});
+const logpath = control.getMountPath();
+testFuseAndMongoD(control, {logpath: logpath + "/foo.log"});
})();
diff --git a/jstests/watchdog/wd_setparam.js b/jstests/watchdog/wd_setparam.js
index 0857e11b1ff..cc74b96ef0c 100644
--- a/jstests/watchdog/wd_setparam.js
+++ b/jstests/watchdog/wd_setparam.js
@@ -1,60 +1,59 @@
// Storage Node Watchdog test cases
// - Validate set parameter functions correctly.
(function() {
- 'use strict';
- const admin = db.getSiblingDB("admin");
+'use strict';
+const admin = db.getSiblingDB("admin");
- // Check the defaults are correct
- //
- function getparam(adminDb, field) {
- let q = {getParameter: 1};
- q[field] = 1;
+// Check the defaults are correct
+//
+function getparam(adminDb, field) {
+ let q = {getParameter: 1};
+ q[field] = 1;
- const ret = adminDb.runCommand(q);
- return ret[field];
- }
+ const ret = adminDb.runCommand(q);
+ return ret[field];
+}
- // Verify the defaults are as we documented them
- assert.eq(getparam(admin, "watchdogPeriodSeconds"), -1);
+// Verify the defaults are as we documented them
+assert.eq(getparam(admin, "watchdogPeriodSeconds"), -1);
- function setparam(adminDb, obj) {
- const ret = adminDb.runCommand(Object.extend({setParameter: 1}, obj));
- return ret;
- }
+function setparam(adminDb, obj) {
+ const ret = adminDb.runCommand(Object.extend({setParameter: 1}, obj));
+ return ret;
+}
- // Negative tests
- // Negative: set it too low.
- assert.commandFailed(setparam(admin, {"watchdogPeriodSeconds": 1}));
- // Negative: set it the min value but fail since it was not enabled.
- assert.commandFailed(setparam(admin, {"watchdogPeriodSeconds": 60}));
- // Negative: set it the min value + 1 but fail since it was not enabled.
- assert.commandFailed(setparam(admin, {"watchdogPeriodSeconds": 61}));
+// Negative tests
+// Negative: set it too low.
+assert.commandFailed(setparam(admin, {"watchdogPeriodSeconds": 1}));
+// Negative: set it the min value but fail since it was not enabled.
+assert.commandFailed(setparam(admin, {"watchdogPeriodSeconds": 60}));
+// Negative: set it the min value + 1 but fail since it was not enabled.
+assert.commandFailed(setparam(admin, {"watchdogPeriodSeconds": 61}));
- // Now test MongoD with it enabled at startup
- //
- const conn = MongoRunner.runMongod({setParameter: "watchdogPeriodSeconds=60"});
- assert.neq(null, conn, 'mongod was unable to start up');
+// Now test MongoD with it enabled at startup
+//
+const conn = MongoRunner.runMongod({setParameter: "watchdogPeriodSeconds=60"});
+assert.neq(null, conn, 'mongod was unable to start up');
- const admin2 = conn.getDB("admin");
+const admin2 = conn.getDB("admin");
- // Validate defaults
- assert.eq(getparam(admin2, "watchdogPeriodSeconds"), 60);
+// Validate defaults
+assert.eq(getparam(admin2, "watchdogPeriodSeconds"), 60);
- // Negative: set it too low.
- assert.commandFailed(setparam(admin2, {"watchdogPeriodSeconds": 1}));
- // Positive: set it the min value
- assert.commandWorked(setparam(admin2, {"watchdogPeriodSeconds": 60}));
- // Positive: set it the min value + 1
- assert.commandWorked(setparam(admin2, {"watchdogPeriodSeconds": 61}));
+// Negative: set it too low.
+assert.commandFailed(setparam(admin2, {"watchdogPeriodSeconds": 1}));
+// Positive: set it the min value
+assert.commandWorked(setparam(admin2, {"watchdogPeriodSeconds": 60}));
+// Positive: set it the min value + 1
+assert.commandWorked(setparam(admin2, {"watchdogPeriodSeconds": 61}));
- // Positive: disable it
- assert.commandWorked(setparam(admin2, {"watchdogPeriodSeconds": -1}));
+// Positive: disable it
+assert.commandWorked(setparam(admin2, {"watchdogPeriodSeconds": -1}));
- assert.eq(getparam(admin2, "watchdogPeriodSeconds"), -1);
+assert.eq(getparam(admin2, "watchdogPeriodSeconds"), -1);
- // Positive: enable it again
- assert.commandWorked(setparam(admin2, {"watchdogPeriodSeconds": 60}));
-
- MongoRunner.stopMongod(conn);
+// Positive: enable it again
+assert.commandWorked(setparam(admin2, {"watchdogPeriodSeconds": 60}));
+MongoRunner.stopMongod(conn);
})();
diff --git a/src/mongo/base/clonable_ptr.h b/src/mongo/base/clonable_ptr.h
index 135fd272fa0..1693c1b829b 100644
--- a/src/mongo/base/clonable_ptr.h
+++ b/src/mongo/base/clonable_ptr.h
@@ -264,8 +264,9 @@ public:
* NOTE: This constructor is disabled for types with a stateless `CloneFactory` type.
*/
template <typename CloneFactory_ = CloneFactory>
- inline clonable_ptr(typename std::enable_if<!std::is_empty<CloneFactory_>::value,
- std::nullptr_t>::type) = delete;
+ inline clonable_ptr(
+ typename std::enable_if<!std::is_empty<CloneFactory_>::value, std::nullptr_t>::type) =
+ delete;
/*!
* Constructs a pointer to nothing, with a default `CloneFactory`.
diff --git a/src/mongo/base/concept/assignable.h b/src/mongo/base/concept/assignable.h
index 15fcc555b01..0c3e2d68e59 100644
--- a/src/mongo/base/concept/assignable.h
+++ b/src/mongo/base/concept/assignable.h
@@ -33,9 +33,9 @@
namespace mongo {
namespace concept {
-/*!
- * The Assignable concept models a type which can be copy assigned and copy constructed.
- */
-struct Assignable : CopyConstructible, CopyAssignable {};
+ /*!
+ * The Assignable concept models a type which can be copy assigned and copy constructed.
+ */
+ struct Assignable : CopyConstructible, CopyAssignable {};
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/concept/clonable.h b/src/mongo/base/concept/clonable.h
index 63cdceec353..d658b0e5442 100644
--- a/src/mongo/base/concept/clonable.h
+++ b/src/mongo/base/concept/clonable.h
@@ -33,16 +33,16 @@
namespace mongo {
namespace concept {
-/*!
- * Objects conforming to the Clonable concept can be dynamically copied, using `this->clone()`.
- * The Clonable concept does not specify the return type of the `clone()` function.
- */
-struct Clonable {
- /*! Clonable objects must be safe to destroy, by pointer. */
- virtual ~Clonable() noexcept = 0;
+ /*!
+ * Objects conforming to the Clonable concept can be dynamically copied, using `this->clone()`.
+ * The Clonable concept does not specify the return type of the `clone()` function.
+ */
+ struct Clonable {
+ /*! Clonable objects must be safe to destroy, by pointer. */
+ virtual ~Clonable() noexcept = 0;
- /*! Clonable objects can be cloned without knowing the actual dynamic type. */
- Constructible<UniquePtr<Clonable>> clone() const;
-};
+ /*! Clonable objects can be cloned without knowing the actual dynamic type. */
+ Constructible<UniquePtr<Clonable>> clone() const;
+ };
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/concept/clone_factory.h b/src/mongo/base/concept/clone_factory.h
index 5db13c17de9..d263311b79d 100644
--- a/src/mongo/base/concept/clone_factory.h
+++ b/src/mongo/base/concept/clone_factory.h
@@ -34,16 +34,16 @@
namespace mongo {
namespace concept {
-/*!
- * Objects conforming to the `CloneFactory` concept are function-like constructs which return
- * objects that are dynamically allocated copies of their inputs.
- * These copies can be made without knowing the actual dynamic type. The `CloneFactory` type itself
- * must be `Assignable`, in that it can be used with automatically generated copy constructors and
- * copy assignment operators.
- */
-template <typename T>
-struct CloneFactory : Assignable {
- Constructible<UniquePtr<T>> operator()(const T*) const;
-};
+ /*!
+ * Objects conforming to the `CloneFactory` concept are function-like constructs which return
+ * objects that are dynamically allocated copies of their inputs.
+ * These copies can be made without knowing the actual dynamic type. The `CloneFactory` type
+ * itself must be `Assignable`, in that it can be used with automatically generated copy
+ * constructors and copy assignment operators.
+ */
+ template <typename T>
+ struct CloneFactory : Assignable {
+ Constructible<UniquePtr<T>> operator()(const T*) const;
+ };
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/concept/constructible.h b/src/mongo/base/concept/constructible.h
index b0f6d81adc5..f725c952d36 100644
--- a/src/mongo/base/concept/constructible.h
+++ b/src/mongo/base/concept/constructible.h
@@ -35,32 +35,31 @@
namespace mongo {
namespace concept {
-/**
- * The Constructable trait indicates whether `T` is constructible from `Constructible`.
- *
- * RETURNS: true if `T{ std::declval< Constructible >() }` is a valid expression and false
- * otherwise.
- */
-template <typename T, typename Constructible, typename = void>
-struct is_constructible : std::false_type {};
+ /**
+ * The Constructable trait indicates whether `T` is constructible from `Constructible`.
+ *
+ * RETURNS: true if `T{ std::declval< Constructible >() }` is a valid expression and false
+ * otherwise.
+ */
+ template <typename T, typename Constructible, typename = void>
+ struct is_constructible : std::false_type {};
-template <typename T, typename Constructible>
-struct is_constructible<T,
- Constructible,
- stdx::void_t<decltype(T{std::declval<Constructible<T>>()})>>
- : std::true_type {};
+ template <typename T, typename Constructible>
+ struct is_constructible<T,
+ Constructible,
+ stdx::void_t<decltype(T{std::declval<Constructible<T>>()})>>
+ : std::true_type {};
-/**
- * The Constructable concept models a type which can be passed to a single-argument constructor of
- * `T`.
- * This is not possible to describe in the type `Constructible`.
- *
- * The expression: `T{ std::declval< Constructible< T > >() }` should be valid.
- *
- * This concept is more broadly applicable than `ConvertibleTo`. `ConvertibleTo` uses implicit
- * conversion, whereas `Constructible` uses direct construction.
- */
-template <typename T>
-struct Constructible {};
+ /**
+ * The Constructable concept models a type which can be passed to a single-argument constructor
+ * of `T`. This is not possible to describe in the type `Constructible`.
+ *
+ * The expression: `T{ std::declval< Constructible< T > >() }` should be valid.
+ *
+ * This concept is more broadly applicable than `ConvertibleTo`. `ConvertibleTo` uses implicit
+ * conversion, whereas `Constructible` uses direct construction.
+ */
+ template <typename T>
+ struct Constructible {};
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/concept/convertible_to.h b/src/mongo/base/concept/convertible_to.h
index 7cf7e86a73f..9f9187126d5 100644
--- a/src/mongo/base/concept/convertible_to.h
+++ b/src/mongo/base/concept/convertible_to.h
@@ -30,13 +30,13 @@
namespace mongo {
namespace concept {
-/**
- * The ConvertibleTo concept models a type which can be converted implicitly into a `T`.
- * The code: `T x; x= ConvertibleTo< T >{};` should be valid.
- */
-template <typename T>
-struct ConvertibleTo {
- operator T();
-}
+ /**
+ * The ConvertibleTo concept models a type which can be converted implicitly into a `T`.
+ * The code: `T x; x= ConvertibleTo< T >{};` should be valid.
+ */
+ template <typename T>
+ struct ConvertibleTo {
+ operator T();
+ }
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/concept/copy_assignable.h b/src/mongo/base/concept/copy_assignable.h
index e89d4699e87..580325564e0 100644
--- a/src/mongo/base/concept/copy_assignable.h
+++ b/src/mongo/base/concept/copy_assignable.h
@@ -30,17 +30,17 @@
namespace mongo {
namespace concept {
-/**
- * The CopyAssignable concept models a type which can be copy assigned.
- *
- * The expression: `copyAssignable= copyAssignable` should be valid.
- */
-struct CopyAssignable {
/**
- * The copy assignment operator is required by `CopyAssignable`.
- * NOTE: Copy Assignment is only required on lvalue targets of `CopyAssignable`.
+ * The CopyAssignable concept models a type which can be copy assigned.
+ *
+ * The expression: `copyAssignable= copyAssignable` should be valid.
*/
- CopyAssignable& operator=(const CopyAssignable&) &;
-};
+ struct CopyAssignable {
+ /**
+ * The copy assignment operator is required by `CopyAssignable`.
+ * NOTE: Copy Assignment is only required on lvalue targets of `CopyAssignable`.
+ */
+ CopyAssignable& operator=(const CopyAssignable&) &;
+ };
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/concept/copy_constructible.h b/src/mongo/base/concept/copy_constructible.h
index 68d8cab494a..689f8e44b71 100644
--- a/src/mongo/base/concept/copy_constructible.h
+++ b/src/mongo/base/concept/copy_constructible.h
@@ -30,13 +30,13 @@
namespace mongo {
namespace concept {
-/**
- * The CopyConstructable concept models a type which can be copy constructed.
- *
- * The expression: `CopyConstructible{ copyConstructible }` should be valid.
- */
-struct CopyConstructible {
- CopyConstructible(const CopyConstructible&);
-};
+ /**
+ * The CopyConstructable concept models a type which can be copy constructed.
+ *
+ * The expression: `CopyConstructible{ copyConstructible }` should be valid.
+ */
+ struct CopyConstructible {
+ CopyConstructible(const CopyConstructible&);
+ };
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/concept/unique_ptr.h b/src/mongo/base/concept/unique_ptr.h
index e014a6d8a14..b7518963c54 100644
--- a/src/mongo/base/concept/unique_ptr.h
+++ b/src/mongo/base/concept/unique_ptr.h
@@ -32,38 +32,38 @@
namespace mongo {
namespace concept {
-/**
- * The `UniquePtr` Concept models a movable owning pointer of an object.
- * `std::unique_ptr< T >` is a model of `mongo::concept::UniquePtr< T >`.
- */
-template <typename T>
-struct UniquePtr {
- /** The `UniquePtr< T >` must retire its pointer to `T` on destruction. */
- ~UniquePtr() noexcept;
+ /**
+ * The `UniquePtr` Concept models a movable owning pointer of an object.
+ * `std::unique_ptr< T >` is a model of `mongo::concept::UniquePtr< T >`.
+ */
+ template <typename T>
+ struct UniquePtr {
+ /** The `UniquePtr< T >` must retire its pointer to `T` on destruction. */
+ ~UniquePtr() noexcept;
- UniquePtr(UniquePtr&& p);
- UniquePtr& operator=(UniquePtr&& p);
+ UniquePtr(UniquePtr&& p);
+ UniquePtr& operator=(UniquePtr&& p);
- UniquePtr();
- UniquePtr(T* p);
+ UniquePtr();
+ UniquePtr(T* p);
- ConvertibleTo<T*> operator->() const;
- T& operator*() const;
+ ConvertibleTo<T*> operator->() const;
+ T& operator*() const;
- explicit operator bool() const;
+ explicit operator bool() const;
- ConvertibleTo<T*> get() const;
+ ConvertibleTo<T*> get() const;
- void reset() noexcept;
- void reset(ConvertibleTo<T*>);
-};
+ void reset() noexcept;
+ void reset(ConvertibleTo<T*>);
+ };
-/*! A `UniquePtr` object must be equality comparable. */
-template <typename T>
-bool operator==(const UniquePtr<T>& lhs, const UniquePtr<T>& rhs);
+ /*! A `UniquePtr` object must be equality comparable. */
+ template <typename T>
+ bool operator==(const UniquePtr<T>& lhs, const UniquePtr<T>& rhs);
-/*! A `UniquePtr` object must be inequality comparable. */
-template <typename T>
-bool operator!=(const UniquePtr<T>& lhs, const UniquePtr<T>& rhs);
+ /*! A `UniquePtr` object must be inequality comparable. */
+ template <typename T>
+ bool operator!=(const UniquePtr<T>& lhs, const UniquePtr<T>& rhs);
} // namespace concept
} // namespace mongo
diff --git a/src/mongo/base/data_type_validated_test.cpp b/src/mongo/base/data_type_validated_test.cpp
index e6e63f4592a..392ef98989b 100644
--- a/src/mongo/base/data_type_validated_test.cpp
+++ b/src/mongo/base/data_type_validated_test.cpp
@@ -61,8 +61,8 @@ struct Validator<char> {
namespace {
using namespace mongo;
-using std::end;
using std::begin;
+using std::end;
TEST(DataTypeValidated, SuccessfulValidation) {
char buf[1];
diff --git a/src/mongo/base/encoded_value_storage_test.cpp b/src/mongo/base/encoded_value_storage_test.cpp
index e9a70a819e3..2a6ed09b5e2 100644
--- a/src/mongo/base/encoded_value_storage_test.cpp
+++ b/src/mongo/base/encoded_value_storage_test.cpp
@@ -117,7 +117,7 @@ public:
Value(ZeroInitTag_t zit) : EncodedValueStorage<Layout, ConstView, View>(zit) {}
};
-}
+} // namespace EncodedValueStorageTest
TEST(EncodedValueStorage, EncodedValueStorage) {
EncodedValueStorageTest::Value raw;
diff --git a/src/mongo/base/global_initializer_registerer.h b/src/mongo/base/global_initializer_registerer.h
index 14345a3f98a..08b0ba625bc 100644
--- a/src/mongo/base/global_initializer_registerer.h
+++ b/src/mongo/base/global_initializer_registerer.h
@@ -51,42 +51,42 @@ extern const std::string& defaultInitializerName();
class GlobalInitializerRegisterer {
public:
/**
- * Constructor parameters:
- *
- * - std::string name
- *
- * - InitializerFunction initFn
- * Must be nonnull.
- * Example expression:
- *
- * [](InitializerContext* context) {
- * // initialization code
- * return Status::OK();
- * }
- *
- * - DeinitializerFunction deinitFn
- * A deinitialization that will execute in reverse order from initialization and
- * support re-initialization. If not specified, defaults to the `nullptr` function.
- * Example expression:
- *
- * [](DeinitializerContext* context) {
- * // deinitialization code
- * return Status::OK();
- * }
- *
- * - std::vector<std::string> prerequisites
- * If not specified, defaults to {"default"}.
- *
- * - std::vector<std::string> dependents
- * If not specified, defaults to {} (no dependents).
- *
- *
- * At run time, the full set of prerequisites for `name` will be computed as the union of the
- * `prerequisites` (which can be defaulted) and all other mongo initializers that list `name` in
- * their `dependents`.
- *
- * A non-null `deinitFn` will tag the initializer as supporting re-initialization.
- */
+ * Constructor parameters:
+ *
+ * - std::string name
+ *
+ * - InitializerFunction initFn
+ * Must be nonnull.
+ * Example expression:
+ *
+ * [](InitializerContext* context) {
+ * // initialization code
+ * return Status::OK();
+ * }
+ *
+ * - DeinitializerFunction deinitFn
+ * A deinitialization that will execute in reverse order from initialization and
+ * support re-initialization. If not specified, defaults to the `nullptr` function.
+ * Example expression:
+ *
+ * [](DeinitializerContext* context) {
+ * // deinitialization code
+ * return Status::OK();
+ * }
+ *
+ * - std::vector<std::string> prerequisites
+ * If not specified, defaults to {"default"}.
+ *
+ * - std::vector<std::string> dependents
+ * If not specified, defaults to {} (no dependents).
+ *
+ *
+ * At run time, the full set of prerequisites for `name` will be computed as the union of the
+ * `prerequisites` (which can be defaulted) and all other mongo initializers that list `name` in
+ * their `dependents`.
+ *
+ * A non-null `deinitFn` will tag the initializer as supporting re-initialization.
+ */
GlobalInitializerRegisterer(std::string name,
InitializerFunction initFn,
DeinitializerFunction deinitFn = nullptr,
diff --git a/src/mongo/base/initializer.h b/src/mongo/base/initializer.h
index eff1500387c..c7297abacbf 100644
--- a/src/mongo/base/initializer.h
+++ b/src/mongo/base/initializer.h
@@ -97,14 +97,14 @@ Status runGlobalInitializers(int argc, const char* const* argv, const char* cons
void runGlobalInitializersOrDie(int argc, const char* const* argv, const char* const* envp);
/**
-* Run the global deinitializers. They will execute in reverse order from initialization.
-*
-* It's a programming error for this to fail, but if it does it will return a status other
-* than Status::OK.
-*
-* This means that the few initializers that might want to terminate the program by failing
-* should probably arrange to terminate the process themselves.
-*/
+ * Run the global deinitializers. They will execute in reverse order from initialization.
+ *
+ * It's a programming error for this to fail, but if it does it will return a status other
+ * than Status::OK.
+ *
+ * This means that the few initializers that might want to terminate the program by failing
+ * should probably arrange to terminate the process themselves.
+ */
Status runGlobalDeinitializers();
} // namespace mongo
diff --git a/src/mongo/base/initializer_function.h b/src/mongo/base/initializer_function.h
index 7d3575cfd90..50503d9c619 100644
--- a/src/mongo/base/initializer_function.h
+++ b/src/mongo/base/initializer_function.h
@@ -46,11 +46,11 @@ class DeinitializerContext;
typedef stdx::function<Status(InitializerContext*)> InitializerFunction;
/**
-* A DeinitializerFunction implements the behavior of a deinitializer operation.
-*
-* On successful execution, a DeinitializerFunction returns Status::OK(). It may
-* inspect and mutate the supplied DeinitializerContext.
-*/
+ * A DeinitializerFunction implements the behavior of a deinitializer operation.
+ *
+ * On successful execution, a DeinitializerFunction returns Status::OK(). It may
+ * inspect and mutate the supplied DeinitializerContext.
+ */
typedef stdx::function<Status(DeinitializerContext*)> DeinitializerFunction;
diff --git a/src/mongo/bson/bson_obj_test.cpp b/src/mongo/bson/bson_obj_test.cpp
index e966af9d559..1b7715b5946 100644
--- a/src/mongo/bson/bson_obj_test.cpp
+++ b/src/mongo/bson/bson_obj_test.cpp
@@ -637,10 +637,7 @@ TEST(BSONObj, getFields) {
TEST(BSONObj, getFieldsWithDuplicates) {
auto e = BSON("a" << 2 << "b"
<< "3"
- << "a"
- << 9
- << "b"
- << 10);
+ << "a" << 9 << "b" << 10);
std::array<StringData, 2> fieldNames{"a", "b"};
std::array<BSONElement, 2> fields;
e.getFields(fieldNames, &fields);
diff --git a/src/mongo/bson/bson_validate_test.cpp b/src/mongo/bson/bson_validate_test.cpp
index 2c3a9b0d07b..5eabab9aff7 100644
--- a/src/mongo/bson/bson_validate_test.cpp
+++ b/src/mongo/bson/bson_validate_test.cpp
@@ -41,8 +41,8 @@
namespace {
using namespace mongo;
-using std::unique_ptr;
using std::endl;
+using std::unique_ptr;
void appendInvalidStringElement(const char* fieldName, BufBuilder* bb) {
// like a BSONObj string, but without a NUL terminator.
@@ -150,23 +150,16 @@ TEST(BSONValidate, Fuzz) {
log() << "BSONValidate Fuzz random seed: " << seed << endl;
PseudoRandom randomSource(seed);
- BSONObj original = BSON("one" << 3 << "two" << 5 << "three" << BSONObj() << "four"
- << BSON("five" << BSON("six" << 11))
- << "seven"
- << BSON_ARRAY("a"
- << "bb"
- << "ccc"
- << 5)
- << "eight"
- << BSONDBRef("rrr", OID("01234567890123456789aaaa"))
- << "_id"
- << OID("deadbeefdeadbeefdeadbeef")
- << "nine"
- << BSONBinData("\x69\xb7", 2, BinDataGeneral)
- << "ten"
- << Date_t::fromMillisSinceEpoch(44)
- << "eleven"
- << BSONRegEx("foooooo", "i"));
+ BSONObj original =
+ BSON("one" << 3 << "two" << 5 << "three" << BSONObj() << "four"
+ << BSON("five" << BSON("six" << 11)) << "seven"
+ << BSON_ARRAY("a"
+ << "bb"
+ << "ccc" << 5)
+ << "eight" << BSONDBRef("rrr", OID("01234567890123456789aaaa")) << "_id"
+ << OID("deadbeefdeadbeefdeadbeef") << "nine"
+ << BSONBinData("\x69\xb7", 2, BinDataGeneral) << "ten"
+ << Date_t::fromMillisSinceEpoch(44) << "eleven" << BSONRegEx("foooooo", "i"));
int32_t fuzzFrequencies[] = {2, 10, 20, 100, 1000};
for (size_t i = 0; i < sizeof(fuzzFrequencies) / sizeof(int32_t); ++i) {
@@ -245,8 +238,9 @@ TEST(BSONValidateFast, Simple3) {
}
TEST(BSONValidateFast, NestedObject) {
- BSONObj x = BSON("a" << 1 << "b" << BSON("c" << 2 << "d" << BSONArrayBuilder().obj() << "e"
- << BSON_ARRAY("1" << 2 << 3)));
+ BSONObj x = BSON("a" << 1 << "b"
+ << BSON("c" << 2 << "d" << BSONArrayBuilder().obj() << "e"
+ << BSON_ARRAY("1" << 2 << 3)));
ASSERT_OK(validateBSON(x.objdata(), x.objsize(), BSONVersion::kLatest));
ASSERT_NOT_OK(validateBSON(x.objdata(), x.objsize() / 2, BSONVersion::kLatest));
}
@@ -323,13 +317,10 @@ TEST(BSONValidateFast, StringHasSomething) {
bb.appendStr("x", /*withNUL*/ true);
bb.appendNum(0);
const BSONObj x = ob.done();
- ASSERT_EQUALS(5 // overhead
- +
- 1 // type
- +
- 2 // name
- +
- 4 // size
+ ASSERT_EQUALS(5 // overhead
+ + 1 // type
+ + 2 // name
+ + 4 // size
,
x.objsize());
ASSERT_NOT_OK(validateBSON(x.objdata(), x.objsize(), BSONVersion::kLatest));
diff --git a/src/mongo/bson/bsonelement.cpp b/src/mongo/bson/bsonelement.cpp
index 018b1f2e6e6..f596f816e8a 100644
--- a/src/mongo/bson/bsonelement.cpp
+++ b/src/mongo/bson/bsonelement.cpp
@@ -594,8 +594,8 @@ StatusWith<long long> BSONElement::parseIntegerElementToLong() const {
// NaN doubles are rejected.
if (std::isnan(eDouble)) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Expected an integer, but found NaN in: "
- << toString(true, true));
+ str::stream()
+ << "Expected an integer, but found NaN in: " << toString(true, true));
}
// No integral doubles that are too large to be represented as a 64 bit signed integer.
@@ -604,8 +604,8 @@ StatusWith<long long> BSONElement::parseIntegerElementToLong() const {
if (eDouble >= kLongLongMaxPlusOneAsDouble ||
eDouble < std::numeric_limits<long long>::min()) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Cannot represent as a 64-bit integer: "
- << toString(true, true));
+ str::stream()
+ << "Cannot represent as a 64-bit integer: " << toString(true, true));
}
// This checks if elem is an integral double.
@@ -620,8 +620,8 @@ StatusWith<long long> BSONElement::parseIntegerElementToLong() const {
number = numberDecimal().toLongExact(&signalingFlags);
if (signalingFlags != Decimal128::kNoFlag) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Cannot represent as a 64-bit integer: "
- << toString(true, true));
+ str::stream()
+ << "Cannot represent as a 64-bit integer: " << toString(true, true));
}
} else {
number = numberLong();
@@ -692,7 +692,7 @@ BSONElement BSONElement::operator[](StringData field) const {
}
namespace {
-NOINLINE_DECL void msgAssertedBadType[[noreturn]](int8_t type) {
+NOINLINE_DECL void msgAssertedBadType [[noreturn]] (int8_t type) {
msgasserted(10320, str::stream() << "BSONElement: bad type " << (int)type);
}
} // namespace
diff --git a/src/mongo/bson/bsonelement.h b/src/mongo/bson/bsonelement.h
index c5a08e8a9cc..151872be545 100644
--- a/src/mongo/bson/bsonelement.h
+++ b/src/mongo/bson/bsonelement.h
@@ -117,8 +117,7 @@ public:
double Number() const {
uassert(13118,
str::stream() << "expected " << fieldName()
- << " to have a numeric type, but it is a "
- << type(),
+ << " to have a numeric type, but it is a " << type(),
isNumber());
return number();
}
@@ -951,4 +950,4 @@ inline BSONElement::BSONElement() {
fieldNameSize_ = 0;
totalSize = 1;
}
-}
+} // namespace mongo
diff --git a/src/mongo/bson/bsonelement_test.cpp b/src/mongo/bson/bsonelement_test.cpp
index 5c036ebeb23..f98ccf93894 100644
--- a/src/mongo/bson/bsonelement_test.cpp
+++ b/src/mongo/bson/bsonelement_test.cpp
@@ -128,15 +128,13 @@ TEST(BSONElement, ExtractLargeSubObject) {
}
TEST(BSONElement, SafeNumberLongPositiveBound) {
- BSONObj obj = BSON("kLongLongMaxPlusOneAsDouble"
- << BSONElement::kLongLongMaxPlusOneAsDouble
- << "towardsZero"
- << std::nextafter(BSONElement::kLongLongMaxPlusOneAsDouble, 0.0)
- << "towardsInfinity"
- << std::nextafter(BSONElement::kLongLongMaxPlusOneAsDouble,
- std::numeric_limits<double>::max())
- << "positiveInfinity"
- << std::numeric_limits<double>::infinity());
+ BSONObj obj =
+ BSON("kLongLongMaxPlusOneAsDouble"
+ << BSONElement::kLongLongMaxPlusOneAsDouble << "towardsZero"
+ << std::nextafter(BSONElement::kLongLongMaxPlusOneAsDouble, 0.0) << "towardsInfinity"
+ << std::nextafter(BSONElement::kLongLongMaxPlusOneAsDouble,
+ std::numeric_limits<double>::max())
+ << "positiveInfinity" << std::numeric_limits<double>::infinity());
// kLongLongMaxPlusOneAsDouble is the least double value that will overflow a 64-bit signed
// two's-complement integer. Historically, converting this value with safeNumberLong() would
@@ -182,13 +180,10 @@ TEST(BSONElement, SafeNumberLongNegativeBound) {
static_cast<double>(std::numeric_limits<long long>::lowest());
BSONObj obj =
BSON("lowestLongLongAsDouble" // This comment forces clang-format to break here.
- << lowestLongLongAsDouble
- << "towardsZero"
- << std::nextafter(lowestLongLongAsDouble, 0.0)
- << "towardsNegativeInfinity"
+ << lowestLongLongAsDouble << "towardsZero"
+ << std::nextafter(lowestLongLongAsDouble, 0.0) << "towardsNegativeInfinity"
<< std::nextafter(lowestLongLongAsDouble, std::numeric_limits<double>::lowest())
- << "negativeInfinity"
- << -std::numeric_limits<double>::infinity());
+ << "negativeInfinity" << -std::numeric_limits<double>::infinity());
ASSERT_EQ(obj["lowestLongLongAsDouble"].safeNumberLongForHash(),
std::numeric_limits<long long>::lowest());
diff --git a/src/mongo/bson/bsonmisc.h b/src/mongo/bson/bsonmisc.h
index 6baec67d7a6..27437a32f7b 100644
--- a/src/mongo/bson/bsonmisc.h
+++ b/src/mongo/bson/bsonmisc.h
@@ -281,4 +281,4 @@ private:
// considers order
bool fieldsMatch(const BSONObj& lhs, const BSONObj& rhs);
-}
+} // namespace mongo
diff --git a/src/mongo/bson/bsonobj.cpp b/src/mongo/bson/bsonobj.cpp
index 57bbd59969b..7b5439cd40a 100644
--- a/src/mongo/bson/bsonobj.cpp
+++ b/src/mongo/bson/bsonobj.cpp
@@ -387,8 +387,8 @@ Status BSONObj::storageValidEmbedded() const {
if (name.startsWith("$")) {
if (first &&
// $ref is a collection name and must be a String
- (name == "$ref") &&
- e.type() == String && (i.next().fieldNameStringData() == "$id")) {
+ (name == "$ref") && e.type() == String &&
+ (i.next().fieldNameStringData() == "$id")) {
first = false;
// keep inspecting fields for optional "$db"
e = i.next();
diff --git a/src/mongo/bson/bsonobj.h b/src/mongo/bson/bsonobj.h
index 2faa7c9cc09..86f858258ad 100644
--- a/src/mongo/bson/bsonobj.h
+++ b/src/mongo/bson/bsonobj.h
@@ -131,7 +131,7 @@ public:
/** Construct a BSONObj from data in the proper format.
* Use this constructor when something else owns bsonData's buffer
- */
+ */
template <typename Traits = DefaultSizeTrait>
explicit BSONObj(const char* bsonData, Traits t = Traits{}) {
init<Traits>(bsonData);
@@ -142,8 +142,8 @@ public:
_ownedBuffer(std::move(ownedBuffer)) {}
/** Move construct a BSONObj */
- BSONObj(BSONObj&& other) noexcept : _objdata(std::move(other._objdata)),
- _ownedBuffer(std::move(other._ownedBuffer)) {
+ BSONObj(BSONObj&& other) noexcept
+ : _objdata(std::move(other._objdata)), _ownedBuffer(std::move(other._ownedBuffer)) {
other._objdata = BSONObj()._objdata; // To return to an empty state.
dassert(!other.isOwned());
}
@@ -364,7 +364,7 @@ public:
* this.extractFieldsUnDotted({a : 1 , c : 1}) -> {"" : 4 , "" : 6 }
* this.extractFieldsUnDotted({b : "blah"}) -> {"" : 5}
*
- */
+ */
BSONObj extractFieldsUnDotted(const BSONObj& pattern) const;
BSONObj filterFieldsUndotted(const BSONObj& filter, bool inFilter) const;
@@ -693,7 +693,7 @@ private:
class BSONObjIterator {
public:
/** Create an iterator for a BSON object.
- */
+ */
explicit BSONObjIterator(const BSONObj& jso) {
int sz = jso.objsize();
if (MONGO_unlikely(sz == 0)) {
@@ -786,7 +786,7 @@ protected:
private:
const int _nfields;
- const std::unique_ptr<const char* []> _fields;
+ const std::unique_ptr<const char*[]> _fields;
int _cur;
};
diff --git a/src/mongo/bson/bsonobjbuilder.h b/src/mongo/bson/bsonobjbuilder.h
index 75fe33d1d26..676c9e06a55 100644
--- a/src/mongo/bson/bsonobjbuilder.h
+++ b/src/mongo/bson/bsonobjbuilder.h
@@ -659,7 +659,7 @@ public:
* destructive
* The returned BSONObj will free the buffer when it is finished.
* @return owned BSONObj
- */
+ */
template <typename BSONTraits = BSONObj::DefaultSizeTrait>
BSONObj obj() {
massert(10335, "builder does not own memory", owned());
diff --git a/src/mongo/bson/bsonobjbuilder_test.cpp b/src/mongo/bson/bsonobjbuilder_test.cpp
index a2522d04b58..ceaacde2cc5 100644
--- a/src/mongo/bson/bsonobjbuilder_test.cpp
+++ b/src/mongo/bson/bsonobjbuilder_test.cpp
@@ -301,8 +301,7 @@ TEST(BSONObjBuilderTest, ResumeBuildingWithNesting) {
ASSERT_BSONOBJ_EQ(obj,
BSON("ll" << BSON("f" << BSON("cc"
<< "dd"))
- << "a"
- << BSON("c" << 3)));
+ << "a" << BSON("c" << 3)));
}
TEST(BSONObjBuilderTest, ResetToEmptyResultsInEmptyObj) {
@@ -498,12 +497,10 @@ TEST(BSONObjBuilderTest, SizeChecks) {
BSONObjBuilder builder;
ASSERT_THROWS(
[&]() {
-
for (StringData character : {"a", "b", "c"}) {
builder.append(character, obj);
}
BSONObj finalObj = builder.obj<BSONObj::LargeSizeTrait>();
-
}(),
DBException);
}
diff --git a/src/mongo/bson/bsontypes.h b/src/mongo/bson/bsontypes.h
index b77a536742b..ff9217e995e 100644
--- a/src/mongo/bson/bsontypes.h
+++ b/src/mongo/bson/bsontypes.h
@@ -223,4 +223,4 @@ inline int canonicalizeBSONType(BSONType type) {
return -1;
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/bson/json.cpp b/src/mongo/bson/json.cpp
index 8f6587a6eff..bb5d6850ccd 100644
--- a/src/mongo/bson/json.cpp
+++ b/src/mongo/bson/json.cpp
@@ -45,9 +45,9 @@
namespace mongo {
-using std::unique_ptr;
using std::ostringstream;
using std::string;
+using std::unique_ptr;
#if 0
#define MONGO_JSON_DEBUG(message) \
@@ -468,7 +468,7 @@ Status JParse::dateObject(StringData fieldName, BSONObjBuilder& builder) {
}
if (errno == ERANGE) {
/* Need to handle this because jsonString outputs the value of Date_t as unsigned.
- * See SERVER-8330 and SERVER-8573 */
+ * See SERVER-8330 and SERVER-8573 */
errno = 0;
// SERVER-11920: We should use parseNumberFromString here, but that function
// requires that we know ahead of time where the number ends, which is not currently
@@ -763,7 +763,7 @@ Status JParse::date(StringData fieldName, BSONObjBuilder& builder) {
}
if (errno == ERANGE) {
/* Need to handle this because jsonString outputs the value of Date_t as unsigned.
- * See SERVER-8330 and SERVER-8573 */
+ * See SERVER-8330 and SERVER-8573 */
errno = 0;
// SERVER-11920: We should use parseNumberFromString here, but that function requires
// that we know ahead of time where the number ends, which is not currently the case.
diff --git a/src/mongo/bson/oid_test.cpp b/src/mongo/bson/oid_test.cpp
index 6faefa88dfa..5f8350e565e 100644
--- a/src/mongo/bson/oid_test.cpp
+++ b/src/mongo/bson/oid_test.cpp
@@ -163,4 +163,4 @@ TEST(Basic, FromTerm) {
ASSERT_EQUALS("7fffffff", oidHead);
ASSERT_EQUALS(term, std::stoi(oidTail));
}
-}
+} // namespace
diff --git a/src/mongo/bson/ordering.h b/src/mongo/bson/ordering.h
index fe8bf48533f..793e14820f5 100644
--- a/src/mongo/bson/ordering.h
+++ b/src/mongo/bson/ordering.h
@@ -88,4 +88,4 @@ public:
return Ordering(b);
}
};
-}
+} // namespace mongo
diff --git a/src/mongo/bson/timestamp.cpp b/src/mongo/bson/timestamp.cpp
index 3f967766206..14cc982ff49 100644
--- a/src/mongo/bson/timestamp.cpp
+++ b/src/mongo/bson/timestamp.cpp
@@ -74,4 +74,4 @@ BSONObj Timestamp::toBSON() const {
bldr.append("", *this);
return bldr.obj();
}
-}
+} // namespace mongo
diff --git a/src/mongo/bson/ugly_bson_integration_test.cpp b/src/mongo/bson/ugly_bson_integration_test.cpp
index 77a96a14400..5b9d7a2c28c 100644
--- a/src/mongo/bson/ugly_bson_integration_test.cpp
+++ b/src/mongo/bson/ugly_bson_integration_test.cpp
@@ -57,10 +57,7 @@ TEST_F(UglyBSONFixture, DuplicateFields) {
assertCommandFailsOnServer("admin",
BSON("insert"
<< "test"
- << "documents"
- << BSONArray()
- << "documents"
- << BSONArray()),
+ << "documents" << BSONArray() << "documents" << BSONArray()),
ErrorCodes::duplicateCodeForTest(40413));
}
diff --git a/src/mongo/bson/util/bson_check.h b/src/mongo/bson/util/bson_check.h
index 1ca748d88a2..b969ab7a2a6 100644
--- a/src/mongo/bson/util/bson_check.h
+++ b/src/mongo/bson/util/bson_check.h
@@ -56,8 +56,8 @@ Status bsonCheckOnlyHasFieldsImpl(StringData objectName,
if (!allowed(name)) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Unexpected field " << e.fieldName() << " in "
- << objectName);
+ str::stream()
+ << "Unexpected field " << e.fieldName() << " in " << objectName);
}
bool& seenBefore = seenFields[name];
@@ -65,8 +65,8 @@ Status bsonCheckOnlyHasFieldsImpl(StringData objectName,
seenBefore = true;
} else {
return Status(ErrorCodes::Error(51000),
- str::stream() << "Field " << name << " appears multiple times in "
- << objectName);
+ str::stream()
+ << "Field " << name << " appears multiple times in " << objectName);
}
}
return Status::OK();
@@ -105,10 +105,7 @@ Status bsonCheckOnlyHasFieldsForCommand(StringData objectName,
inline void checkBSONType(BSONType expectedType, const BSONElement& elem) {
uassert(elem.type() == BSONType::EOO ? ErrorCodes::NoSuchKey : ErrorCodes::TypeMismatch,
str::stream() << "Wrong type for '" << elem.fieldNameStringData() << "'. Expected a "
- << typeName(expectedType)
- << ", got a "
- << typeName(elem.type())
- << '.',
+ << typeName(expectedType) << ", got a " << typeName(elem.type()) << '.',
elem.type() == expectedType);
}
diff --git a/src/mongo/bson/util/bson_check_test.cpp b/src/mongo/bson/util/bson_check_test.cpp
index f220ee1e0ca..93716c84a6b 100644
--- a/src/mongo/bson/util/bson_check_test.cpp
+++ b/src/mongo/bson/util/bson_check_test.cpp
@@ -52,26 +52,19 @@ TEST(BsonCheck, CheckHasOnlyLegalFields) {
ASSERT_OK(bsonCheckOnlyHasFields("",
BSON("aField"
<< "value"
- << "thirdField"
- << 1
- << "anotherField"
- << 2),
+ << "thirdField" << 1 << "anotherField" << 2),
legals));
ASSERT_OK(bsonCheckOnlyHasFields("",
BSON("aField"
<< "value"
- << "thirdField"
- << 1),
+ << "thirdField" << 1),
legals));
ASSERT_EQUALS(ErrorCodes::BadValue,
bsonCheckOnlyHasFields("",
BSON("aField"
<< "value"
- << "illegal"
- << 4
- << "thirdField"
- << 1),
+ << "illegal" << 4 << "thirdField" << 1),
legals));
}
diff --git a/src/mongo/bson/util/bson_extract.cpp b/src/mongo/bson/util/bson_extract.cpp
index 481c26df5fe..445d81d705f 100644
--- a/src/mongo/bson/util/bson_extract.cpp
+++ b/src/mongo/bson/util/bson_extract.cpp
@@ -65,10 +65,9 @@ Status bsonExtractTypedFieldImpl(const BSONObj& object,
return status;
if (type != outElement->type()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "\"" << fieldName << "\" had the wrong type. Expected "
- << typeName(type)
- << ", found "
- << typeName(outElement->type()));
+ str::stream()
+ << "\"" << fieldName << "\" had the wrong type. Expected "
+ << typeName(type) << ", found " << typeName(outElement->type()));
}
return status;
}
@@ -83,9 +82,9 @@ Status bsonExtractIntegerFieldImpl(const BSONObj& object,
return status;
if (!element.isNumber()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Expected field \"" << fieldName
- << "\" to have numeric type, but found "
- << typeName(element.type()));
+ str::stream()
+ << "Expected field \"" << fieldName
+ << "\" to have numeric type, but found " << typeName(element.type()));
}
long long result = element.safeNumberLong();
if (result != element.numberDouble()) {
@@ -109,9 +108,9 @@ Status bsonExtractDoubleFieldImpl(const BSONObj& object,
return status;
if (!element.isNumber()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Expected field \"" << fieldName
- << "\" to have numeric type, but found "
- << typeName(element.type()));
+ str::stream()
+ << "Expected field \"" << fieldName
+ << "\" to have numeric type, but found " << typeName(element.type()));
}
*out = element.numberDouble();
return status;
@@ -155,8 +154,7 @@ Status bsonExtractBooleanFieldWithDefault(const BSONObj& object,
if (!element.isNumber() && !element.isBoolean()) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected boolean or number type for field \"" << fieldName
- << "\", found "
- << typeName(element.type()));
+ << "\", found " << typeName(element.type()));
}
*out = element.trueValue();
return status;
@@ -261,8 +259,7 @@ Status bsonExtractIntegerFieldWithDefaultIf(const BSONObj& object,
if (!pred(*out)) {
return Status(ErrorCodes::BadValue,
str::stream() << "Invalid value in field \"" << fieldName << "\": " << *out
- << ": "
- << predDescription);
+ << ": " << predDescription);
}
return status;
}
diff --git a/src/mongo/bson/util/bson_extract_test.cpp b/src/mongo/bson/util/bson_extract_test.cpp
index c8d90a59d3c..bb6c72a070e 100644
--- a/src/mongo/bson/util/bson_extract_test.cpp
+++ b/src/mongo/bson/util/bson_extract_test.cpp
@@ -91,12 +91,10 @@ TEST(ExtractBSON, ExtractStringFieldWithDefault) {
TEST(ExtractBSON, ExtractBooleanFieldWithDefault) {
BSONObj obj1 = BSON("a" << 1 << "b"
<< "hello"
- << "c"
- << true);
+ << "c" << true);
BSONObj obj2 = BSON("a" << 0 << "b"
<< "hello"
- << "c"
- << false);
+ << "c" << false);
bool b;
b = false;
ASSERT_OK(bsonExtractBooleanFieldWithDefault(obj1, "a", false, &b));
diff --git a/src/mongo/bson/util/builder_test.cpp b/src/mongo/bson/util/builder_test.cpp
index 2dc2662e29d..805c8ab29a3 100644
--- a/src/mongo/bson/util/builder_test.cpp
+++ b/src/mongo/bson/util/builder_test.cpp
@@ -124,4 +124,4 @@ TEST(Builder, AppendUnsignedLongLong) {
TEST(Builder, AppendShort) {
testStringBuilderIntegral<short>();
}
-}
+} // namespace mongo
diff --git a/src/mongo/client/authenticate.cpp b/src/mongo/client/authenticate.cpp
index e5eab6e439e..2576ef86a42 100644
--- a/src/mongo/client/authenticate.cpp
+++ b/src/mongo/client/authenticate.cpp
@@ -119,8 +119,7 @@ StatusWith<OpMsgRequest> createX509AuthCmd(const BSONObj& params, StringData cli
return OpMsgRequest::fromDBAndBody(db.getValue(),
BSON("authenticate" << 1 << "mechanism"
<< "MONGODB-X509"
- << "user"
- << username));
+ << "user" << username));
}
// Use the MONGODB-X509 protocol to authenticate as "username." The certificate details
@@ -241,14 +240,11 @@ BSONObj getInternalAuthParams(size_t idx, const std::string& mechanism) {
internalSecurity.user->getName().getUser().toString(), password);
}
- return BSON(saslCommandMechanismFieldName << mechanism << saslCommandUserDBFieldName
- << internalSecurity.user->getName().getDB()
- << saslCommandUserFieldName
- << internalSecurity.user->getName().getUser()
- << saslCommandPasswordFieldName
- << password
- << saslCommandDigestPasswordFieldName
- << false);
+ return BSON(saslCommandMechanismFieldName
+ << mechanism << saslCommandUserDBFieldName
+ << internalSecurity.user->getName().getDB() << saslCommandUserFieldName
+ << internalSecurity.user->getName().getUser() << saslCommandPasswordFieldName
+ << password << saslCommandDigestPasswordFieldName << false);
}
Future<std::string> negotiateSaslMechanism(RunCommandHook runCommand,
@@ -313,14 +309,10 @@ BSONObj buildAuthParams(StringData dbname,
StringData username,
StringData passwordText,
bool digestPassword) {
- return BSON(saslCommandMechanismFieldName << "SCRAM-SHA-1" << saslCommandUserDBFieldName
- << dbname
- << saslCommandUserFieldName
- << username
- << saslCommandPasswordFieldName
- << passwordText
- << saslCommandDigestPasswordFieldName
- << digestPassword);
+ return BSON(saslCommandMechanismFieldName
+ << "SCRAM-SHA-1" << saslCommandUserDBFieldName << dbname << saslCommandUserFieldName
+ << username << saslCommandPasswordFieldName << passwordText
+ << saslCommandDigestPasswordFieldName << digestPassword);
}
StringData getSaslCommandUserDBFieldName() {
diff --git a/src/mongo/client/authenticate_test.cpp b/src/mongo/client/authenticate_test.cpp
index c7d477b36c0..c72f6ddba04 100644
--- a/src/mongo/client/authenticate_test.cpp
+++ b/src/mongo/client/authenticate_test.cpp
@@ -127,11 +127,7 @@ public:
<< "MONGODB-CR"
<< "db"
<< "admin"
- << "user"
- << _username
- << "pwd"
- << _password
- << "digest"
+ << "user" << _username << "pwd" << _password << "digest"
<< "true");
}
@@ -141,8 +137,7 @@ public:
pushRequest("$external",
BSON("authenticate" << 1 << "mechanism"
<< "MONGODB-X509"
- << "user"
- << _username));
+ << "user" << _username));
// 2. Client receives 'ok'
pushResponse(BSON("ok" << 1));
@@ -152,8 +147,7 @@ public:
<< "MONGODB-X509"
<< "db"
<< "$external"
- << "user"
- << _username);
+ << "user" << _username);
}
diff --git a/src/mongo/client/connection_string_connect.cpp b/src/mongo/client/connection_string_connect.cpp
index 888d702ab6e..a5d2832c016 100644
--- a/src/mongo/client/connection_string_connect.cpp
+++ b/src/mongo/client/connection_string_connect.cpp
@@ -111,4 +111,4 @@ std::unique_ptr<DBClientBase> ConnectionString::connect(StringData applicationNa
MONGO_UNREACHABLE;
}
-} // namepspace mongo
+} // namespace mongo
diff --git a/src/mongo/client/constants.h b/src/mongo/client/constants.h
index c96076c84ef..5f282e0da7f 100644
--- a/src/mongo/client/constants.h
+++ b/src/mongo/client/constants.h
@@ -49,4 +49,4 @@ enum ResultFlagType {
*/
ResultFlag_AwaitCapable = 8
};
-}
+} // namespace mongo
diff --git a/src/mongo/client/cyrus_sasl_client_session.cpp b/src/mongo/client/cyrus_sasl_client_session.cpp
index 36d7d588142..8b07afcc339 100644
--- a/src/mongo/client/cyrus_sasl_client_session.cpp
+++ b/src/mongo/client/cyrus_sasl_client_session.cpp
@@ -147,8 +147,7 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(CyrusSaslClientContext,
if (result != SASL_OK) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Could not initialize sasl client components ("
- << sasl_errstring(result, NULL, NULL)
- << ")");
+ << sasl_errstring(result, NULL, NULL) << ")");
}
SaslClientSession::create = createCyrusSaslClientSession;
@@ -311,4 +310,4 @@ Status CyrusSaslClientSession::step(StringData inputData, std::string* outputDat
return Status(ErrorCodes::ProtocolError, sasl_errdetail(_saslConnection));
}
}
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/client/dbclient_base.cpp b/src/mongo/client/dbclient_base.cpp
index befee2a0350..951b3fd58f5 100644
--- a/src/mongo/client/dbclient_base.cpp
+++ b/src/mongo/client/dbclient_base.cpp
@@ -73,11 +73,11 @@
namespace mongo {
-using std::unique_ptr;
using std::endl;
using std::list;
using std::string;
using std::stringstream;
+using std::unique_ptr;
using std::vector;
using executor::RemoteCommandRequest;
@@ -221,23 +221,16 @@ std::pair<rpc::UniqueReply, DBClientBase*> DBClientBase::runCommandWithTarget(
// more helpful error message. Note that call() can itself throw a socket exception.
uassert(ErrorCodes::HostUnreachable,
str::stream() << "network error while attempting to run "
- << "command '"
- << request.getCommandName()
- << "' "
- << "on host '"
- << host
- << "' ",
+ << "command '" << request.getCommandName() << "' "
+ << "on host '" << host << "' ",
call(requestMsg, replyMsg, false, &host));
auto commandReply = parseCommandReplyMessage(host, replyMsg);
uassert(ErrorCodes::RPCProtocolNegotiationFailed,
str::stream() << "Mismatched RPC protocols - request was '"
- << networkOpToString(requestMsg.operation())
- << "' '"
- << " but reply was '"
- << networkOpToString(replyMsg.operation())
- << "' ",
+ << networkOpToString(requestMsg.operation()) << "' '"
+ << " but reply was '" << networkOpToString(replyMsg.operation()) << "' ",
rpc::protocolForMessage(requestMsg) == commandReply->getProtocol());
return {std::move(commandReply), this};
@@ -314,8 +307,7 @@ bool DBClientBase::runPseudoCommand(StringData db,
if (status == ErrorCodes::CommandResultSchemaViolation) {
msgasserted(28624,
str::stream() << "Received bad " << realCommandName
- << " response from server: "
- << info);
+ << " response from server: " << info);
} else if (status == ErrorCodes::CommandNotFound) {
NamespaceString pseudoCommandNss(db, pseudoCommandCol);
// if this throws we just let it escape as that's how runCommand works.
@@ -614,10 +606,7 @@ void DBClientBase::findN(vector<BSONObj>& out,
uassert(10276,
str::stream() << "DBClientBase::findN: transport error: " << getServerAddress()
- << " ns: "
- << ns
- << " query: "
- << query.toString(),
+ << " ns: " << ns << " query: " << query.toString(),
c.get());
if (c->hasResultFlag(ResultFlag_ShardConfigStale)) {
diff --git a/src/mongo/client/dbclient_base.h b/src/mongo/client/dbclient_base.h
index ea8914e3fed..8034571fb2e 100644
--- a/src/mongo/client/dbclient_base.h
+++ b/src/mongo/client/dbclient_base.h
@@ -123,7 +123,7 @@ public:
/** query N objects from the database into an array. makes sense mostly when you want a small
* number of results. if a huge number, use query() and iterate the cursor.
- */
+ */
void findN(std::vector<BSONObj>& out,
const std::string& ns,
Query query,
@@ -293,9 +293,9 @@ public:
int options = 0);
/**
- * Authenticates to another cluster member using appropriate authentication data.
- * @return true if the authentication was successful
- */
+ * Authenticates to another cluster member using appropriate authentication data.
+ * @return true if the authentication was successful
+ */
virtual Status authenticateInternalUser();
/**
diff --git a/src/mongo/client/dbclient_connection.cpp b/src/mongo/client/dbclient_connection.cpp
index e0fc4c4eae4..cbe3aadbd4f 100644
--- a/src/mongo/client/dbclient_connection.cpp
+++ b/src/mongo/client/dbclient_connection.cpp
@@ -80,10 +80,10 @@
namespace mongo {
-using std::unique_ptr;
using std::endl;
using std::map;
using std::string;
+using std::unique_ptr;
MONGO_FAIL_POINT_DEFINE(dbClientConnectionDisableChecksum);
@@ -109,8 +109,8 @@ private:
};
/**
-* Initializes the wire version of conn, and returns the isMaster reply.
-*/
+ * Initializes the wire version of conn, and returns the isMaster reply.
+ */
executor::RemoteCommandResponse initWireVersion(DBClientConnection* conn,
StringData applicationName,
const MongoURI& uri,
@@ -327,8 +327,7 @@ Status DBClientConnection::connectSocketOnly(const HostAndPort& serverAddress) {
if (!sws.isOK()) {
return Status(ErrorCodes::HostUnreachable,
str::stream() << "couldn't connect to server " << _serverAddress.toString()
- << ", connection attempt failed: "
- << sws.getStatus());
+ << ", connection attempt failed: " << sws.getStatus());
}
{
@@ -622,9 +621,7 @@ bool DBClientConnection::call(Message& toSend,
if (assertOk)
uasserted(10278,
str::stream() << "dbclient error communicating with server "
- << getServerAddress()
- << ": "
- << redact(errStatus));
+ << getServerAddress() << ": " << redact(errStatus));
return false;
};
@@ -672,7 +669,7 @@ void DBClientConnection::checkResponse(const std::vector<BSONObj>& batch,
string* host) {
/* check for errors. the only one we really care about at
* this stage is "not master"
- */
+ */
*retry = false;
*host = _serverAddress.toString();
@@ -701,8 +698,7 @@ void DBClientConnection::handleNotMasterResponse(const BSONObj& replyBody,
monitor->failedHost(_serverAddress,
{ErrorCodes::NotMaster,
str::stream() << "got not master from: " << _serverAddress
- << " of repl set: "
- << _parentReplSetName});
+ << " of repl set: " << _parentReplSetName});
}
_markFailed(kSetFlag);
diff --git a/src/mongo/client/dbclient_cursor.cpp b/src/mongo/client/dbclient_cursor.cpp
index 737c9c67d77..bb5650cac59 100644
--- a/src/mongo/client/dbclient_cursor.cpp
+++ b/src/mongo/client/dbclient_cursor.cpp
@@ -59,9 +59,9 @@
namespace mongo {
-using std::unique_ptr;
using std::endl;
using std::string;
+using std::unique_ptr;
using std::vector;
namespace {
@@ -255,7 +255,7 @@ void DBClientCursor::requestMore() {
invariant(_scopedHost.size());
DBClientBase::withConnection_do_not_use(_scopedHost, [&](DBClientBase* conn) {
- ON_BLOCK_EXIT([&, origClient = _client ] { _client = origClient; });
+ ON_BLOCK_EXIT([&, origClient = _client] { _client = origClient; });
_client = conn;
doRequestMore();
});
diff --git a/src/mongo/client/dbclient_cursor_test.cpp b/src/mongo/client/dbclient_cursor_test.cpp
index 292ce2c8bb5..234c68ce4af 100644
--- a/src/mongo/client/dbclient_cursor_test.cpp
+++ b/src/mongo/client/dbclient_cursor_test.cpp
@@ -27,8 +27,8 @@
* it in the license file.
*/
-#include "mongo/client/dbclient_cursor.h"
#include "mongo/client/dbclient_connection.h"
+#include "mongo/client/dbclient_cursor.h"
#include "mongo/db/query/cursor_response.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index d2c61154fc2..40cd2f220f7 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -50,12 +50,12 @@
namespace mongo {
-using std::shared_ptr;
-using std::unique_ptr;
using std::endl;
using std::map;
using std::set;
+using std::shared_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
namespace {
@@ -315,9 +315,9 @@ DBClientConnection* DBClientReplicaSet::checkMaster() {
}
if (newConn == NULL || !errmsg.empty()) {
- const std::string message = str::stream() << "can't connect to new replica set master ["
- << _masterHost.toString() << "]"
- << (errmsg.empty() ? "" : ", err: ") << errmsg;
+ const std::string message = str::stream()
+ << "can't connect to new replica set master [" << _masterHost.toString() << "]"
+ << (errmsg.empty() ? "" : ", err: ") << errmsg;
monitor->failedHost(_masterHost, {ErrorCodes::Error(40659), message});
uasserted(ErrorCodes::FailedToSatisfyReadPreference, message);
}
@@ -538,9 +538,9 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::query(const NamespaceStringOrUUID
<< _getMonitor()->getName() << ", read pref is " << readPref->toString()
<< " (primary : "
<< (_master.get() != NULL ? _master->getServerAddress() : "[not cached]")
- << ", lastTagged : " << (_lastSlaveOkConn.get() != NULL
- ? _lastSlaveOkConn->getServerAddress()
- : "[not cached]")
+ << ", lastTagged : "
+ << (_lastSlaveOkConn.get() != NULL ? _lastSlaveOkConn->getServerAddress()
+ : "[not cached]")
<< ")" << endl;
string lastNodeErrMsg;
@@ -590,9 +590,9 @@ BSONObj DBClientReplicaSet::findOne(const string& ns,
<< _getMonitor()->getName() << ", read pref is " << readPref->toString()
<< " (primary : "
<< (_master.get() != NULL ? _master->getServerAddress() : "[not cached]")
- << ", lastTagged : " << (_lastSlaveOkConn.get() != NULL
- ? _lastSlaveOkConn->getServerAddress()
- : "[not cached]")
+ << ", lastTagged : "
+ << (_lastSlaveOkConn.get() != NULL ? _lastSlaveOkConn->getServerAddress()
+ : "[not cached]")
<< ")" << endl;
string lastNodeErrMsg;
@@ -720,7 +720,7 @@ DBClientConnection* DBClientReplicaSet::selectNodeUsingTags(
return _master.get();
}
- auto dtor = [host = _lastSlaveOkHost.toString()](DBClientConnection * ptr) {
+ auto dtor = [host = _lastSlaveOkHost.toString()](DBClientConnection* ptr) {
globalConnPool.release(host, ptr);
};
@@ -769,9 +769,9 @@ void DBClientReplicaSet::say(Message& toSend, bool isRetry, string* actualServer
<< _getMonitor()->getName() << ", read pref is " << readPref->toString()
<< " (primary : "
<< (_master.get() != NULL ? _master->getServerAddress() : "[not cached]")
- << ", lastTagged : " << (_lastSlaveOkConn.get() != NULL
- ? _lastSlaveOkConn->getServerAddress()
- : "[not cached]")
+ << ", lastTagged : "
+ << (_lastSlaveOkConn.get() != NULL ? _lastSlaveOkConn->getServerAddress()
+ : "[not cached]")
<< ")" << endl;
string lastNodeErrMsg;
@@ -883,8 +883,9 @@ void DBClientReplicaSet::checkResponse(const std::vector<BSONObj>& batch,
// query could potentially go to a secondary, so see if this is an error (or empty) and
// retry if we're not past our retry limit.
- if (networkError || (hasErrField(dataObj) && !dataObj["code"].eoo() &&
- dataObj["code"].Int() == ErrorCodes::NotMasterOrSecondary)) {
+ if (networkError ||
+ (hasErrField(dataObj) && !dataObj["code"].eoo() &&
+ dataObj["code"].Int() == ErrorCodes::NotMasterOrSecondary)) {
if (_lazyState._lastClient == _lastSlaveOkConn.get()) {
isntSecondary();
} else if (_lazyState._lastClient == _master.get()) {
@@ -906,8 +907,9 @@ void DBClientReplicaSet::checkResponse(const std::vector<BSONObj>& batch,
} else if (_lazyState._lastOp == dbQuery) {
// if query could not potentially go to a secondary, just mark the master as bad
- if (networkError || (hasErrField(dataObj) && !dataObj["code"].eoo() &&
- dataObj["code"].Int() == ErrorCodes::NotMasterNoSlaveOk)) {
+ if (networkError ||
+ (hasErrField(dataObj) && !dataObj["code"].eoo() &&
+ dataObj["code"].Int() == ErrorCodes::NotMasterNoSlaveOk)) {
if (_lazyState._lastClient == _master.get()) {
isntMaster();
}
@@ -958,8 +960,7 @@ std::pair<rpc::UniqueReply, DBClientBase*> DBClientReplicaSet::runCommandWithTar
uasserted(ErrorCodes::HostNotFound,
str::stream() << "Could not satisfy $readPreference of '" << readPref.toString()
- << "' while attempting to run command "
- << request.getCommandName());
+ << "' while attempting to run command " << request.getCommandName());
}
std::pair<rpc::UniqueReply, std::shared_ptr<DBClientBase>> DBClientReplicaSet::runCommandWithTarget(
@@ -1000,9 +1001,9 @@ bool DBClientReplicaSet::call(Message& toSend,
<< _getMonitor()->getName() << ", read pref is " << readPref->toString()
<< " (primary : "
<< (_master.get() != NULL ? _master->getServerAddress() : "[not cached]")
- << ", lastTagged : " << (_lastSlaveOkConn.get() != NULL
- ? _lastSlaveOkConn->getServerAddress()
- : "[not cached]")
+ << ", lastTagged : "
+ << (_lastSlaveOkConn.get() != NULL ? _lastSlaveOkConn->getServerAddress()
+ : "[not cached]")
<< ")" << endl;
for (size_t retry = 0; retry < MAX_RETRY; retry++) {
diff --git a/src/mongo/client/dbclient_rs.h b/src/mongo/client/dbclient_rs.h
index 506a3cf6f16..9890c6f712b 100644
--- a/src/mongo/client/dbclient_rs.h
+++ b/src/mongo/client/dbclient_rs.h
@@ -57,8 +57,8 @@ typedef std::shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorPtr;
class DBClientReplicaSet : public DBClientBase {
public:
using DBClientBase::query;
- using DBClientBase::update;
using DBClientBase::remove;
+ using DBClientBase::update;
/** Call connect() after constructing. autoReconnect is always on for DBClientReplicaSet
* connections. */
@@ -244,7 +244,7 @@ public:
protected:
/** Authorize. Authorizes all nodes as needed
- */
+ */
void _auth(const BSONObj& params) override;
private:
@@ -353,4 +353,4 @@ protected:
} _lazyState;
};
-}
+} // namespace mongo
diff --git a/src/mongo/client/fetcher.cpp b/src/mongo/client/fetcher.cpp
index 8e707c0ce02..8843227ded5 100644
--- a/src/mongo/client/fetcher.cpp
+++ b/src/mongo/client/fetcher.cpp
@@ -75,13 +75,12 @@ Status parseCursorResponse(const BSONObj& obj,
if (cursorElement.eoo()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "cursor response must contain '" << kCursorFieldName
- << "' field: "
- << obj);
+ << "' field: " << obj);
}
if (!cursorElement.isABSONObj()) {
- return Status(
- ErrorCodes::FailedToParse,
- str::stream() << "'" << kCursorFieldName << "' field must be an object: " << obj);
+ return Status(ErrorCodes::FailedToParse,
+ str::stream()
+ << "'" << kCursorFieldName << "' field must be an object: " << obj);
}
BSONObj cursorObj = cursorElement.Obj();
@@ -89,17 +88,13 @@ Status parseCursorResponse(const BSONObj& obj,
if (cursorIdElement.eoo()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "cursor response must contain '" << kCursorFieldName << "."
- << kCursorIdFieldName
- << "' field: "
- << obj);
+ << kCursorIdFieldName << "' field: " << obj);
}
if (cursorIdElement.type() != mongo::NumberLong) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "'" << kCursorFieldName << "." << kCursorIdFieldName
<< "' field must be a 'long' but was a '"
- << typeName(cursorIdElement.type())
- << "': "
- << obj);
+ << typeName(cursorIdElement.type()) << "': " << obj);
}
batchData->cursorId = cursorIdElement.numberLong();
@@ -107,25 +102,19 @@ Status parseCursorResponse(const BSONObj& obj,
if (namespaceElement.eoo()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "cursor response must contain "
- << "'"
- << kCursorFieldName
- << "."
- << kNamespaceFieldName
- << "' field: "
- << obj);
+ << "'" << kCursorFieldName << "." << kNamespaceFieldName
+ << "' field: " << obj);
}
if (namespaceElement.type() != mongo::String) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "'" << kCursorFieldName << "." << kNamespaceFieldName
- << "' field must be a string: "
- << obj);
+ << "' field must be a string: " << obj);
}
const NamespaceString tempNss(namespaceElement.valueStringData());
if (!tempNss.isValid()) {
return Status(ErrorCodes::BadValue,
str::stream() << "'" << kCursorFieldName << "." << kNamespaceFieldName
- << "' contains an invalid namespace: "
- << obj);
+ << "' contains an invalid namespace: " << obj);
}
batchData->nss = tempNss;
@@ -133,27 +122,20 @@ Status parseCursorResponse(const BSONObj& obj,
if (batchElement.eoo()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "cursor response must contain '" << kCursorFieldName << "."
- << batchFieldName
- << "' field: "
- << obj);
+ << batchFieldName << "' field: " << obj);
}
if (!batchElement.isABSONObj()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "'" << kCursorFieldName << "." << batchFieldName
- << "' field must be an array: "
- << obj);
+ << "' field must be an array: " << obj);
}
BSONObj batchObj = batchElement.Obj();
for (auto itemElement : batchObj) {
if (!itemElement.isABSONObj()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "found non-object " << itemElement << " in "
- << "'"
- << kCursorFieldName
- << "."
- << batchFieldName
- << "' field: "
- << obj);
+ << "'" << kCursorFieldName << "." << batchFieldName
+ << "' field: " << obj);
}
batchData->documents.push_back(itemElement.Obj());
}
diff --git a/src/mongo/client/fetcher_test.cpp b/src/mongo/client/fetcher_test.cpp
index 0f7768d458f..de38843b248 100644
--- a/src/mongo/client/fetcher_test.cpp
+++ b/src/mongo/client/fetcher_test.cpp
@@ -405,8 +405,7 @@ TEST_F(FetcherTest, FindCommandFailed2) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< "bad hint"
- << "code"
- << int(ErrorCodes::BadValue)),
+ << "code" << int(ErrorCodes::BadValue)),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
@@ -432,10 +431,8 @@ TEST_F(FetcherTest, CursorIdFieldMissing) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("ns"
<< "db.coll"
- << "firstBatch"
- << BSONArray())
- << "ok"
- << 1),
+ << "firstBatch" << BSONArray())
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
@@ -446,10 +443,8 @@ TEST_F(FetcherTest, CursorIdNotLongNumber) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 123.1 << "ns"
<< "db.coll"
- << "firstBatch"
- << BSONArray())
- << "ok"
- << 1),
+ << "firstBatch" << BSONArray())
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
@@ -469,11 +464,11 @@ TEST_F(FetcherTest, NamespaceFieldMissing) {
TEST_F(FetcherTest, NamespaceNotAString) {
ASSERT_OK(fetcher->schedule());
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 123LL << "ns" << 123 << "firstBatch" << BSONArray()) << "ok"
- << 1),
- ReadyQueueState::kEmpty,
- FetcherState::kInactive);
+ processNetworkResponse(BSON("cursor"
+ << BSON("id" << 123LL << "ns" << 123 << "firstBatch" << BSONArray())
+ << "ok" << 1),
+ ReadyQueueState::kEmpty,
+ FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_STRING_CONTAINS(status.reason(), "'cursor.ns' field must be a string");
}
@@ -482,10 +477,8 @@ TEST_F(FetcherTest, NamespaceEmpty) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 123LL << "ns"
<< ""
- << "firstBatch"
- << BSONArray())
- << "ok"
- << 1),
+ << "firstBatch" << BSONArray())
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
@@ -496,10 +489,8 @@ TEST_F(FetcherTest, NamespaceMissingCollectionName) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 123LL << "ns"
<< "db."
- << "firstBatch"
- << BSONArray())
- << "ok"
- << 1),
+ << "firstBatch" << BSONArray())
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
@@ -510,8 +501,7 @@ TEST_F(FetcherTest, FirstBatchFieldMissing) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll")
- << "ok"
- << 1),
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
@@ -522,10 +512,8 @@ TEST_F(FetcherTest, FirstBatchNotAnArray) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "firstBatch"
- << 123)
- << "ok"
- << 1),
+ << "firstBatch" << 123)
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
@@ -536,10 +524,8 @@ TEST_F(FetcherTest, FirstBatchArrayContainsNonObject) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(8))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(8))
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
@@ -551,10 +537,8 @@ TEST_F(FetcherTest, FirstBatchEmptyArray) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSONArray())
- << "ok"
- << 1),
+ << "firstBatch" << BSONArray())
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_OK(status);
@@ -568,10 +552,8 @@ TEST_F(FetcherTest, FetchOneDocument) {
const BSONObj doc = BSON("_id" << 1);
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_OK(status);
@@ -596,10 +578,8 @@ TEST_F(FetcherTest, SetNextActionToContinueWhenNextBatchIsNotAvailable) {
};
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_OK(status);
@@ -629,10 +609,8 @@ TEST_F(FetcherTest, FetchMultipleBatches) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
Milliseconds(100),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -650,10 +628,8 @@ TEST_F(FetcherTest, FetchMultipleBatches) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(doc2))
- << "ok"
- << 1),
+ << "nextBatch" << BSON_ARRAY(doc2))
+ << "ok" << 1),
Milliseconds(200),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -671,10 +647,8 @@ TEST_F(FetcherTest, FetchMultipleBatches) {
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(doc3))
- << "ok"
- << 1),
+ << "nextBatch" << BSON_ARRAY(doc3))
+ << "ok" << 1),
Milliseconds(300),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
@@ -698,10 +672,8 @@ TEST_F(FetcherTest, ScheduleGetMoreAndCancel) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -715,10 +687,8 @@ TEST_F(FetcherTest, ScheduleGetMoreAndCancel) {
const BSONObj doc2 = BSON("_id" << 2);
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(doc2))
- << "ok"
- << 1),
+ << "nextBatch" << BSON_ARRAY(doc2))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -761,10 +731,8 @@ TEST_F(FetcherTest, CancelDuringCallbackPutsFetcherInShutdown) {
const BSONObj doc = BSON("_id" << 1);
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kInactive);
@@ -782,10 +750,8 @@ TEST_F(FetcherTest, ScheduleGetMoreButShutdown) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -800,10 +766,8 @@ TEST_F(FetcherTest, ScheduleGetMoreButShutdown) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(doc2))
- << "ok"
- << 1),
+ << "nextBatch" << BSON_ARRAY(doc2))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -839,10 +803,8 @@ TEST_F(FetcherTest, EmptyGetMoreRequestAfterFirstBatchMakesFetcherInactiveAndKil
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kInactive);
@@ -896,10 +858,8 @@ TEST_F(FetcherTest, UpdateNextActionAfterSecondBatch) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -916,10 +876,8 @@ TEST_F(FetcherTest, UpdateNextActionAfterSecondBatch) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(doc2))
- << "ok"
- << 1),
+ << "nextBatch" << BSON_ARRAY(doc2))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kInactive);
@@ -993,10 +951,8 @@ TEST_F(FetcherTest, ShutdownDuringSecondBatch) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
@@ -1016,10 +972,8 @@ TEST_F(FetcherTest, ShutdownDuringSecondBatch) {
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(doc2))
- << "ok"
- << 1),
+ << "nextBatch" << BSON_ARRAY(doc2))
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
@@ -1059,10 +1013,8 @@ TEST_F(FetcherTest, FetcherAppliesRetryPolicyToFirstCommandButNotToGetMoreReques
processNetworkResponse(rs, ReadyQueueState::kHasReadyRequests, FetcherState::kActive);
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSON_ARRAY(doc))
- << "ok"
- << 1),
+ << "firstBatch" << BSON_ARRAY(doc))
+ << "ok" << 1),
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
ASSERT_OK(status);
@@ -1110,10 +1062,8 @@ TEST_F(FetcherTest, FetcherResetsInternalFinishCallbackFunctionPointerAfterLastC
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "firstBatch"
- << BSONArray())
- << "ok"
- << 1),
+ << "firstBatch" << BSONArray())
+ << "ok" << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
diff --git a/src/mongo/client/mongo_uri.cpp b/src/mongo/client/mongo_uri.cpp
index e11f9b0a06f..433c33c8e61 100644
--- a/src/mongo/client/mongo_uri.cpp
+++ b/src/mongo/client/mongo_uri.cpp
@@ -169,8 +169,7 @@ MongoURI::OptionsMap parseOptions(StringData options, StringData url) {
if (opt.empty()) {
uasserted(ErrorCodes::FailedToParse,
str::stream()
- << "Missing a key/value pair in the options for mongodb:// URL: "
- << url);
+ << "Missing a key/value pair in the options for mongodb:// URL: " << url);
}
const auto kvPair = partitionForward(opt, '=');
@@ -190,8 +189,7 @@ MongoURI::OptionsMap parseOptions(StringData options, StringData url) {
if (valRaw.empty()) {
uasserted(ErrorCodes::FailedToParse,
str::stream() << "Missing value for key '" << keyRaw
- << "' in the options for mongodb:// URL: "
- << url);
+ << "' in the options for mongodb:// URL: " << url);
}
const auto val = uassertStatusOKWithContext(
uriDecode(valRaw),
@@ -259,8 +257,7 @@ URIParts::URIParts(StringData uri) {
if (schemeEnd == std::string::npos) {
uasserted(ErrorCodes::FailedToParse,
str::stream() << "URI must begin with " << kURIPrefix << " or " << kURISRVPrefix
- << ": "
- << uri);
+ << ": " << uri);
}
const auto uriWithoutPrefix = uri.substr(schemeEnd + 3);
scheme = uri.substr(0, schemeEnd);
@@ -380,10 +377,10 @@ MongoURI MongoURI::parseImpl(const std::string& url) {
}
if ((host.find('/') != std::string::npos) && !StringData(host).endsWith(".sock")) {
- uasserted(
- ErrorCodes::FailedToParse,
- str::stream() << "'" << host << "' in '" << url
- << "' appears to be a unix socket, but does not end in '.sock'");
+ uasserted(ErrorCodes::FailedToParse,
+ str::stream()
+ << "'" << host << "' in '" << url
+ << "' appears to be a unix socket, but does not end in '.sock'");
}
servers.push_back(uassertStatusOK(HostAndPort::parse(host)));
diff --git a/src/mongo/client/mongo_uri_test.cpp b/src/mongo/client/mongo_uri_test.cpp
index 729d04d8d83..6aa3e4fc5b3 100644
--- a/src/mongo/client/mongo_uri_test.cpp
+++ b/src/mongo/client/mongo_uri_test.cpp
@@ -828,7 +828,8 @@ TEST(MongoURI, srvRecordTest) {
{"localhost.sub.test.build.10gen.cc", 27017},
},
{
- {"ssl", "true"}, {"replicaSet", "repl0"},
+ {"ssl", "true"},
+ {"replicaSet", "repl0"},
},
success},
@@ -842,7 +843,8 @@ TEST(MongoURI, srvRecordTest) {
{"localhost.sub.test.build.10gen.cc", 27017},
},
{
- {"ssl", "true"}, {"replicaSet", "repl0"},
+ {"ssl", "true"},
+ {"replicaSet", "repl0"},
},
success},
@@ -988,19 +990,19 @@ TEST(MongoURI, srvRecordTest) {
for (const auto& test : tests) {
auto rs = MongoURI::parse(test.uri);
if (test.expectation == failure) {
- ASSERT_FALSE(rs.getStatus().isOK()) << "Failing URI: " << test.uri
- << " data on line: " << test.lineNumber;
+ ASSERT_FALSE(rs.getStatus().isOK())
+ << "Failing URI: " << test.uri << " data on line: " << test.lineNumber;
continue;
}
ASSERT_OK(rs.getStatus()) << "Failed on URI: " << test.uri
<< " data on line: " << test.lineNumber;
auto rv = rs.getValue();
- ASSERT_EQ(rv.getUser(), test.user) << "Failed on URI: " << test.uri
- << " data on line: " << test.lineNumber;
- ASSERT_EQ(rv.getPassword(), test.password) << "Failed on URI: " << test.uri
- << " data on line : " << test.lineNumber;
- ASSERT_EQ(rv.getDatabase(), test.database) << "Failed on URI: " << test.uri
- << " data on line : " << test.lineNumber;
+ ASSERT_EQ(rv.getUser(), test.user)
+ << "Failed on URI: " << test.uri << " data on line: " << test.lineNumber;
+ ASSERT_EQ(rv.getPassword(), test.password)
+ << "Failed on URI: " << test.uri << " data on line : " << test.lineNumber;
+ ASSERT_EQ(rv.getDatabase(), test.database)
+ << "Failed on URI: " << test.uri << " data on line : " << test.lineNumber;
compareOptions(test.lineNumber, test.uri, rv.getOptions(), test.options);
std::vector<HostAndPort> hosts(begin(rv.getServers()), end(rv.getServers()));
@@ -1009,9 +1011,9 @@ TEST(MongoURI, srvRecordTest) {
std::sort(begin(expectedHosts), end(expectedHosts));
for (std::size_t i = 0; i < std::min(hosts.size(), expectedHosts.size()); ++i) {
- ASSERT_EQ(hosts[i], expectedHosts[i]) << "Failed on URI: " << test.uri
- << " at host number" << i
- << " data on line: " << test.lineNumber;
+ ASSERT_EQ(hosts[i], expectedHosts[i])
+ << "Failed on URI: " << test.uri << " at host number" << i
+ << " data on line: " << test.lineNumber;
}
ASSERT_TRUE(hosts.size() == expectedHosts.size())
<< "Failed on URI: " << test.uri << " Found " << hosts.size() << " hosts, expected "
diff --git a/src/mongo/client/native_sasl_client_session.cpp b/src/mongo/client/native_sasl_client_session.cpp
index b7f51fa0f68..3fa871bda5d 100644
--- a/src/mongo/client/native_sasl_client_session.cpp
+++ b/src/mongo/client/native_sasl_client_session.cpp
@@ -98,4 +98,4 @@ Status NativeSaslClientSession::step(StringData inputData, std::string* outputDa
}
return status.getStatus();
}
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/client/query_spec.h b/src/mongo/client/query_spec.h
index c115f50cd70..534a841842f 100644
--- a/src/mongo/client/query_spec.h
+++ b/src/mongo/client/query_spec.h
@@ -116,11 +116,7 @@ public:
std::string toString() const {
return str::stream() << "QSpec "
<< BSON("ns" << _ns << "n2skip" << _ntoskip << "n2return" << _ntoreturn
- << "options"
- << _options
- << "query"
- << _query
- << "fields"
+ << "options" << _options << "query" << _query << "fields"
<< _fields);
}
};
diff --git a/src/mongo/client/read_preference.cpp b/src/mongo/client/read_preference.cpp
index 4391db7c388..b71825ad395 100644
--- a/src/mongo/client/read_preference.cpp
+++ b/src/mongo/client/read_preference.cpp
@@ -86,16 +86,9 @@ StatusWith<ReadPreference> parseReadPreferenceMode(StringData prefStr) {
}
return Status(ErrorCodes::FailedToParse,
str::stream() << "Could not parse $readPreference mode '" << prefStr
- << "'. Only the modes '"
- << kPrimaryOnly
- << "', '"
- << kPrimaryPreferred
- << "', '"
- << kSecondaryOnly
- << "', '"
- << kSecondaryPreferred
- << "', and '"
- << kNearest
+ << "'. Only the modes '" << kPrimaryOnly << "', '"
+ << kPrimaryPreferred << "', '" << kSecondaryOnly << "', '"
+ << kSecondaryPreferred << "', and '" << kNearest
<< "' are supported.");
}
@@ -206,8 +199,8 @@ StatusWith<ReadPreferenceSetting> ReadPreferenceSetting::fromInnerBSON(const BSO
if (maxStalenessSecondsValue && maxStalenessSecondsValue < 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << kMaxStalenessSecondsFieldName
- << " must be a non-negative integer");
+ str::stream()
+ << kMaxStalenessSecondsFieldName << " must be a non-negative integer");
}
if (maxStalenessSecondsValue && maxStalenessSecondsValue >= Seconds::max().count()) {
@@ -218,9 +211,9 @@ StatusWith<ReadPreferenceSetting> ReadPreferenceSetting::fromInnerBSON(const BSO
if (maxStalenessSecondsValue && maxStalenessSecondsValue < kMinimalMaxStalenessValue.count()) {
return Status(ErrorCodes::MaxStalenessOutOfRange,
- str::stream() << kMaxStalenessSecondsFieldName
- << " value can not be less than "
- << kMinimalMaxStalenessValue.count());
+ str::stream()
+ << kMaxStalenessSecondsFieldName << " value can not be less than "
+ << kMinimalMaxStalenessValue.count());
}
if ((mode == ReadPreference::PrimaryOnly) && maxStalenessSecondsValue) {
@@ -236,9 +229,7 @@ StatusWith<ReadPreferenceSetting> ReadPreferenceSetting::fromInnerBSON(const BSO
if (elem.type() != mongo::Object) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "$readPreference has incorrect type: expected "
- << mongo::Object
- << " but got "
- << elem.type());
+ << mongo::Object << " but got " << elem.type());
}
return fromInnerBSON(elem.Obj());
}
diff --git a/src/mongo/client/read_preference_test.cpp b/src/mongo/client/read_preference_test.cpp
index f3a0dc78941..92bc2516be0 100644
--- a/src/mongo/client/read_preference_test.cpp
+++ b/src/mongo/client/read_preference_test.cpp
@@ -55,8 +55,7 @@ TEST(ReadPreferenceSetting, ParseValid) {
// that the tags are parsed as the empty TagSet.
checkParse(BSON("mode"
<< "primary"
- << "tags"
- << BSON_ARRAY(BSONObj())),
+ << "tags" << BSON_ARRAY(BSONObj())),
ReadPreferenceSetting(ReadPreference::PrimaryOnly, TagSet::primaryOnly()));
checkParse(BSON("mode"
@@ -69,14 +68,12 @@ TEST(ReadPreferenceSetting, ParseValid) {
<< "ny")))));
checkParse(BSON("mode"
<< "secondary"
- << "maxStalenessSeconds"
- << kMinMaxStaleness.count()),
+ << "maxStalenessSeconds" << kMinMaxStaleness.count()),
ReadPreferenceSetting(ReadPreference::SecondaryOnly, kMinMaxStaleness));
checkParse(BSON("mode"
<< "secondary"
- << "maxStalenessSeconds"
- << 0),
+ << "maxStalenessSeconds" << 0),
ReadPreferenceSetting(ReadPreference::SecondaryOnly, Seconds(0)));
checkParse(BSON("mode"
@@ -84,8 +81,7 @@ TEST(ReadPreferenceSetting, ParseValid) {
<< "tags"
<< BSON_ARRAY(BSON("dc"
<< "ny"))
- << "maxStalenessSeconds"
- << kMinMaxStaleness.count()),
+ << "maxStalenessSeconds" << kMinMaxStaleness.count()),
ReadPreferenceSetting(ReadPreference::SecondaryOnly,
TagSet(BSON_ARRAY(BSON("dc"
<< "ny"))),
@@ -149,8 +145,7 @@ TEST(ReadPreferenceSetting, ParseInvalid) {
// maxStalenessSeconds is negative
checkParseFails(BSON("mode"
<< "secondary"
- << "maxStalenessSeconds"
- << -1));
+ << "maxStalenessSeconds" << -1));
// maxStalenessSeconds is NaN
checkParseFails(BSON("mode"
@@ -161,8 +156,7 @@ TEST(ReadPreferenceSetting, ParseInvalid) {
// maxStalenessSeconds and primary
checkParseFails(BSON("mode"
<< "primary"
- << "maxStalenessSeconds"
- << kMinMaxStaleness.count()));
+ << "maxStalenessSeconds" << kMinMaxStaleness.count()));
// maxStalenessSeconds is less than min
checkParseFailsWithError(BSON("mode"
@@ -174,13 +168,11 @@ TEST(ReadPreferenceSetting, ParseInvalid) {
// maxStalenessSeconds is greater than max
checkParseFails(BSON("mode"
<< "secondary"
- << "maxStalenessSeconds"
- << Seconds::max().count()));
+ << "maxStalenessSeconds" << Seconds::max().count()));
checkParseContainerFailsWithError(BSON("$query" << BSON("pang"
<< "pong")
- << "$readPreference"
- << 2),
+ << "$readPreference" << 2),
ErrorCodes::TypeMismatch);
}
diff --git a/src/mongo/client/remote_command_retry_scheduler_test.cpp b/src/mongo/client/remote_command_retry_scheduler_test.cpp
index 4ef9699d225..211802e589b 100644
--- a/src/mongo/client/remote_command_retry_scheduler_test.cpp
+++ b/src/mongo/client/remote_command_retry_scheduler_test.cpp
@@ -402,8 +402,7 @@ TEST_F(RemoteCommandRetrySchedulerTest, SchedulerIgnoresEmbeddedErrorInSuccessfu
// wire protocol.
ResponseStatus response(BSON("ok" << 0 << "code" << int(ErrorCodes::FailedToParse) << "errmsg"
<< "injected error"
- << "z"
- << 456),
+ << "z" << 456),
Milliseconds(100));
processNetworkResponse(response);
diff --git a/src/mongo/client/replica_set_monitor.cpp b/src/mongo/client/replica_set_monitor.cpp
index 753b358badf..dfbf8e59a3f 100644
--- a/src/mongo/client/replica_set_monitor.cpp
+++ b/src/mongo/client/replica_set_monitor.cpp
@@ -57,9 +57,9 @@
namespace mongo {
-using std::shared_ptr;
using std::numeric_limits;
using std::set;
+using std::shared_ptr;
using std::string;
using std::vector;
@@ -549,7 +549,7 @@ void Refresher::scheduleIsMaster(const HostAndPort& host, WithLock withLock) {
_set->executor
->scheduleRemoteCommand(
std::move(request),
- [ copy = *this, host, timer = Timer() ](
+ [copy = *this, host, timer = Timer()](
const executor::TaskExecutor::RemoteCommandCallbackArgs& result) mutable {
stdx::lock_guard<stdx::mutex> lk(copy._set->mutex);
// Ignore the reply and return if we are no longer the current scan. This might
@@ -690,8 +690,7 @@ void Refresher::receivedIsMaster(const HostAndPort& from,
failedHost(from,
{ErrorCodes::InconsistentReplicaSetNames,
str::stream() << "Target replica set name " << reply.setName
- << " does not match the monitored set name "
- << _set->name});
+ << " does not match the monitored set name " << _set->name});
return;
}
@@ -769,12 +768,11 @@ Status Refresher::receivedIsMasterFromMaster(const HostAndPort& from, const IsMa
// Reject if config version is older. This is for backwards compatibility with nodes in pv0
// since they don't have the same ordering with pv1 electionId.
if (reply.configVersion < _set->configVersion) {
- return {ErrorCodes::NotMaster,
- str::stream() << "Node " << from
- << " believes it is primary, but its config version "
- << reply.configVersion
- << " is older than the most recent config version "
- << _set->configVersion};
+ return {
+ ErrorCodes::NotMaster,
+ str::stream() << "Node " << from << " believes it is primary, but its config version "
+ << reply.configVersion << " is older than the most recent config version "
+ << _set->configVersion};
}
if (reply.electionId.isSet()) {
@@ -783,12 +781,11 @@ Status Refresher::receivedIsMasterFromMaster(const HostAndPort& from, const IsMa
// because configVersion needs to be incremented whenever the protocol version is changed.
if (reply.configVersion == _set->configVersion && _set->maxElectionId.isSet() &&
_set->maxElectionId.compare(reply.electionId) > 0) {
- return {ErrorCodes::NotMaster,
- str::stream() << "Node " << from
- << " believes it is primary, but its election id "
- << reply.electionId
- << " is older than the most recent election id "
- << _set->maxElectionId};
+ return {
+ ErrorCodes::NotMaster,
+ str::stream() << "Node " << from << " believes it is primary, but its election id "
+ << reply.electionId << " is older than the most recent election id "
+ << _set->maxElectionId};
}
_set->maxElectionId = reply.electionId;
@@ -1293,9 +1290,7 @@ void SetState::notify(bool finishedScan) {
Status SetState::makeUnsatisfedReadPrefError(const ReadPreferenceSetting& criteria) const {
return Status(ErrorCodes::FailedToSatisfyReadPreference,
str::stream() << "Could not find host matching read preference "
- << criteria.toString()
- << " for set "
- << name);
+ << criteria.toString() << " for set " << name);
}
void SetState::init() {
@@ -1390,4 +1385,4 @@ void ScanState::retryAllTriedHosts(PseudoRandom& rand) {
std::shuffle(hostsToScan.begin(), hostsToScan.end(), rand.urbg());
triedHosts = waitingFor;
}
-}
+} // namespace mongo
diff --git a/src/mongo/client/replica_set_monitor.h b/src/mongo/client/replica_set_monitor.h
index 92aa3a15add..c78fde4b353 100644
--- a/src/mongo/client/replica_set_monitor.h
+++ b/src/mongo/client/replica_set_monitor.h
@@ -31,7 +31,6 @@
#include <atomic>
#include <memory>
-#include <memory>
#include <set>
#include <string>
diff --git a/src/mongo/client/replica_set_monitor_internal_test.cpp b/src/mongo/client/replica_set_monitor_internal_test.cpp
index b796dff516c..1a90deed113 100644
--- a/src/mongo/client/replica_set_monitor_internal_test.cpp
+++ b/src/mongo/client/replica_set_monitor_internal_test.cpp
@@ -367,22 +367,10 @@ TEST_F(IsMasterReplyTest, IsMasterReplyRSNotInitiated) {
BSONObj ismaster = BSON(
"ismaster" << false << "secondary" << false << "info"
<< "can't get local.system.replset config from self or any seed (EMPTYCONFIG)"
- << "isreplicaset"
- << true
- << "maxBsonObjectSize"
- << 16777216
- << "maxMessageSizeBytes"
- << 48000000
- << "maxWriteBatchSize"
- << 1000
- << "localTime"
- << mongo::jsTime()
- << "maxWireVersion"
- << 2
- << "minWireVersion"
- << 0
- << "ok"
- << 1);
+ << "isreplicaset" << true << "maxBsonObjectSize" << 16777216
+ << "maxMessageSizeBytes" << 48000000 << "maxWriteBatchSize" << 1000
+ << "localTime" << mongo::jsTime() << "maxWireVersion" << 2 << "minWireVersion"
+ << 0 << "ok" << 1);
IsMasterReply imr(HostAndPort(), -1, ismaster);
@@ -401,34 +389,15 @@ TEST_F(IsMasterReplyTest, IsMasterReplyRSNotInitiated) {
TEST_F(IsMasterReplyTest, IsMasterReplyRSPrimary) {
BSONObj ismaster = BSON("setName"
<< "test"
- << "setVersion"
- << 1
- << "electionId"
- << OID("7fffffff0000000000000001")
- << "ismaster"
- << true
- << "secondary"
- << false
- << "hosts"
- << BSON_ARRAY("mongo.example:3000")
- << "primary"
+ << "setVersion" << 1 << "electionId" << OID("7fffffff0000000000000001")
+ << "ismaster" << true << "secondary" << false << "hosts"
+ << BSON_ARRAY("mongo.example:3000") << "primary"
<< "mongo.example:3000"
<< "me"
<< "mongo.example:3000"
- << "maxBsonObjectSize"
- << 16777216
- << "maxMessageSizeBytes"
- << 48000000
- << "maxWriteBatchSize"
- << 1000
- << "localTime"
- << mongo::jsTime()
- << "maxWireVersion"
- << 2
- << "minWireVersion"
- << 0
- << "ok"
- << 1);
+ << "maxBsonObjectSize" << 16777216 << "maxMessageSizeBytes" << 48000000
+ << "maxWriteBatchSize" << 1000 << "localTime" << mongo::jsTime()
+ << "maxWireVersion" << 2 << "minWireVersion" << 0 << "ok" << 1);
IsMasterReply imr(HostAndPort("mongo.example:3000"), -1, ismaster);
@@ -448,38 +417,16 @@ TEST_F(IsMasterReplyTest, IsMasterReplyRSPrimary) {
TEST_F(IsMasterReplyTest, IsMasterReplyPassiveSecondary) {
BSONObj ismaster = BSON("setName"
<< "test"
- << "setVersion"
- << 2
- << "electionId"
- << OID("7fffffff0000000000000001")
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
- << BSON_ARRAY("mongo.example:3000")
- << "passives"
- << BSON_ARRAY("mongo.example:3001")
- << "primary"
+ << "setVersion" << 2 << "electionId" << OID("7fffffff0000000000000001")
+ << "ismaster" << false << "secondary" << true << "hosts"
+ << BSON_ARRAY("mongo.example:3000") << "passives"
+ << BSON_ARRAY("mongo.example:3001") << "primary"
<< "mongo.example:3000"
- << "passive"
- << true
- << "me"
+ << "passive" << true << "me"
<< "mongo.example:3001"
- << "maxBsonObjectSize"
- << 16777216
- << "maxMessageSizeBytes"
- << 48000000
- << "maxWriteBatchSize"
- << 1000
- << "localTime"
- << mongo::jsTime()
- << "maxWireVersion"
- << 2
- << "minWireVersion"
- << 0
- << "ok"
- << 1);
+ << "maxBsonObjectSize" << 16777216 << "maxMessageSizeBytes" << 48000000
+ << "maxWriteBatchSize" << 1000 << "localTime" << mongo::jsTime()
+ << "maxWireVersion" << 2 << "minWireVersion" << 0 << "ok" << 1);
IsMasterReply imr(HostAndPort("mongo.example:3001"), -1, ismaster);
@@ -501,38 +448,15 @@ TEST_F(IsMasterReplyTest, IsMasterReplyPassiveSecondary) {
TEST_F(IsMasterReplyTest, IsMasterReplyHiddenSecondary) {
BSONObj ismaster = BSON("setName"
<< "test"
- << "setVersion"
- << 2
- << "electionId"
- << OID("7fffffff0000000000000001")
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
- << BSON_ARRAY("mongo.example:3000")
- << "primary"
+ << "setVersion" << 2 << "electionId" << OID("7fffffff0000000000000001")
+ << "ismaster" << false << "secondary" << true << "hosts"
+ << BSON_ARRAY("mongo.example:3000") << "primary"
<< "mongo.example:3000"
- << "passive"
- << true
- << "hidden"
- << true
- << "me"
+ << "passive" << true << "hidden" << true << "me"
<< "mongo.example:3001"
- << "maxBsonObjectSize"
- << 16777216
- << "maxMessageSizeBytes"
- << 48000000
- << "maxWriteBatchSize"
- << 1000
- << "localTime"
- << mongo::jsTime()
- << "maxWireVersion"
- << 2
- << "minWireVersion"
- << 0
- << "ok"
- << 1);
+ << "maxBsonObjectSize" << 16777216 << "maxMessageSizeBytes" << 48000000
+ << "maxWriteBatchSize" << 1000 << "localTime" << mongo::jsTime()
+ << "maxWireVersion" << 2 << "minWireVersion" << 0 << "ok" << 1);
IsMasterReply imr(HostAndPort("mongo.example:3001"), -1, ismaster);
@@ -552,40 +476,22 @@ TEST_F(IsMasterReplyTest, IsMasterReplyHiddenSecondary) {
TEST_F(IsMasterReplyTest, IsMasterSecondaryWithTags) {
BSONObj ismaster = BSON("setName"
<< "test"
- << "setVersion"
- << 2
- << "electionId"
- << OID("7fffffff0000000000000001")
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
+ << "setVersion" << 2 << "electionId" << OID("7fffffff0000000000000001")
+ << "ismaster" << false << "secondary" << true << "hosts"
<< BSON_ARRAY("mongo.example:3000"
<< "mongo.example:3001")
<< "primary"
<< "mongo.example:3000"
<< "me"
<< "mongo.example:3001"
- << "maxBsonObjectSize"
- << 16777216
- << "maxMessageSizeBytes"
- << 48000000
- << "maxWriteBatchSize"
- << 1000
- << "localTime"
- << mongo::jsTime()
- << "maxWireVersion"
- << 2
- << "minWireVersion"
- << 0
- << "tags"
+ << "maxBsonObjectSize" << 16777216 << "maxMessageSizeBytes" << 48000000
+ << "maxWriteBatchSize" << 1000 << "localTime" << mongo::jsTime()
+ << "maxWireVersion" << 2 << "minWireVersion" << 0 << "tags"
<< BSON("dc"
<< "nyc"
<< "use"
<< "production")
- << "ok"
- << 1);
+ << "ok" << 1);
IsMasterReply imr(HostAndPort("mongo.example:3001"), -1, ismaster);
diff --git a/src/mongo/client/replica_set_monitor_manager.cpp b/src/mongo/client/replica_set_monitor_manager.cpp
index 452609a39bd..2e5e9567fb6 100644
--- a/src/mongo/client/replica_set_monitor_manager.cpp
+++ b/src/mongo/client/replica_set_monitor_manager.cpp
@@ -51,15 +51,15 @@
namespace mongo {
-using std::shared_ptr;
using std::set;
+using std::shared_ptr;
using std::string;
using std::vector;
using executor::NetworkInterface;
using executor::NetworkInterfaceThreadPool;
-using executor::TaskExecutorPool;
using executor::TaskExecutor;
+using executor::TaskExecutorPool;
using executor::ThreadPoolTaskExecutor;
ReplicaSetMonitorManager::ReplicaSetMonitorManager() {}
@@ -100,7 +100,7 @@ namespace {
void uassertNotMixingSSL(transport::ConnectSSLMode a, transport::ConnectSSLMode b) {
uassert(51042, "Mixing ssl modes with a single replica set is disallowed", a == b);
}
-}
+} // namespace
shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorManager::getOrCreateMonitor(
const ConnectionString& connStr) {
diff --git a/src/mongo/client/replica_set_monitor_scan_test.cpp b/src/mongo/client/replica_set_monitor_scan_test.cpp
index 02af7f2b7e6..a5d123b509e 100644
--- a/src/mongo/client/replica_set_monitor_scan_test.cpp
+++ b/src/mongo/client/replica_set_monitor_scan_test.cpp
@@ -60,16 +60,12 @@ TEST_F(CoreScanTest, CheckAllSeedsSerial) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
+ << "ismaster" << primary << "secondary" << !primary
<< "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
}
NextStep ns = refresher.getNextStep();
@@ -116,16 +112,12 @@ TEST_F(CoreScanTest, CheckAllSeedsParallel) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
+ << "ismaster" << primary << "secondary" << !primary
<< "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
}
// Now all hosts have returned data
@@ -163,16 +155,11 @@ TEST_F(CoreScanTest, NoMasterInitAllUp) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
+ << "ismaster" << false << "secondary" << true << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
}
NextStep ns = refresher.getNextStep();
@@ -209,17 +196,12 @@ TEST_F(CoreScanTest, MasterNotInSeeds_NoPrimaryInIsMaster) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
+ << "ismaster" << false << "secondary" << true << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c"
<< "d")
- << "ok"
- << true));
+ << "ok" << true));
}
// Only look at "d" after exhausting all other hosts
@@ -230,17 +212,12 @@ TEST_F(CoreScanTest, MasterNotInSeeds_NoPrimaryInIsMaster) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "hosts"
+ << "ismaster" << true << "secondary" << false << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c"
<< "d")
- << "ok"
- << true));
+ << "ok" << true));
ns = refresher.getNextStep();
@@ -290,10 +267,7 @@ TEST_F(CoreScanTest, MasterNotInSeeds_PrimaryInIsMaster) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
+ << "ismaster" << primary << "secondary" << !primary
<< "primary"
<< "d"
<< "hosts"
@@ -301,8 +275,7 @@ TEST_F(CoreScanTest, MasterNotInSeeds_PrimaryInIsMaster) {
<< "b"
<< "c"
<< "d")
- << "ok"
- << true));
+ << "ok" << true));
}
NextStep ns = refresher.getNextStep();
@@ -347,14 +320,8 @@ TEST_F(CoreScanTest, SlavesUsableEvenIfNoMaster) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
- << BSON_ARRAY("a")
- << "ok"
- << true));
+ << "ismaster" << false << "secondary" << true << "hosts"
+ << BSON_ARRAY("a") << "ok" << true));
// Check intended conditions for entry to getNextStep().
ASSERT(state->currentScan->hostsToScan.empty());
@@ -399,16 +366,11 @@ TEST_F(CoreScanTest, MultipleMasterLastNodeWins) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "hosts"
+ << "ismaster" << true << "secondary" << false << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
// Ensure the set primary is the host we just got a reply from
HostAndPort currentPrimary = state->getMatchingHost(primaryOnly);
@@ -452,14 +414,9 @@ TEST_F(CoreScanTest, MasterIsSourceOfTruth) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << (primary ? primaryHosts : secondaryHosts)
- << "ok"
- << true));
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << (primary ? primaryHosts : secondaryHosts)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -506,14 +463,8 @@ TEST_F(CoreScanTest, MultipleMastersDisagree) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "hosts"
- << hostsForSeed[i % 2]
- << "ok"
- << true));
+ << "ismaster" << true << "secondary" << false << "hosts"
+ << hostsForSeed[i % 2] << "ok" << true));
// Ensure the primary is the host we just got a reply from
HostAndPort currentPrimary = state->getMatchingHost(primaryOnly);
@@ -542,14 +493,8 @@ TEST_F(CoreScanTest, MultipleMastersDisagree) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
- << hostsForSeed[0]
- << "ok"
- << true));
+ << "ismaster" << false << "secondary" << true << "hosts"
+ << hostsForSeed[0] << "ok" << true));
// scan should be complete
ns = refresher.getNextStep();
@@ -596,16 +541,12 @@ TEST_F(CoreScanTest, GetMatchingDuringScan) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
+ << "ismaster" << primary << "secondary" << !primary
<< "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
bool hasPrimary = !(state->getMatchingHost(primaryOnly).empty());
bool hasSecondary = !(state->getMatchingHost(secondaryOnly).empty());
@@ -642,16 +583,12 @@ TEST_F(CoreScanTest, OutOfBandFailedHost) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
+ << "ismaster" << primary << "secondary" << !primary
<< "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
if (i >= 1) {
HostAndPort a("a");
@@ -699,18 +636,13 @@ TEST_F(CoreScanTest, NewPrimaryWithMaxElectionId) {
BSON(
"setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "hosts"
+ << "ismaster" << true << "secondary" << false << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
<< "electionId"
<< OID::fromTerm(i) // electionId must increase every cycle.
- << "ok"
- << true));
+ << "ok" << true));
// Ensure the set primary is the host we just got a reply from
HostAndPort currentPrimary = state->getMatchingHost(primaryOnly);
@@ -757,18 +689,13 @@ TEST_F(CoreScanTest, IgnoreElectionIdFromSecondaries) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
+ << "ismaster" << primary << "secondary" << !primary
<< "electionId"
- << (primary ? primaryElectionId : OID::gen())
- << "hosts"
+ << (primary ? primaryElectionId : OID::gen()) << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
}
// check that the SetState's maxElectionId == primary's electionId
@@ -802,20 +729,13 @@ TEST_F(CoreScanTest, StalePrimaryWithObsoleteElectionId) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "setVersion"
- << 1
- << "electionId"
- << secondElectionId
+ << "ismaster" << true << "secondary" << false
+ << "setVersion" << 1 << "electionId" << secondElectionId
<< "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
auto node = state->findNode(ns.host);
ASSERT(node);
@@ -835,18 +755,12 @@ TEST_F(CoreScanTest, StalePrimaryWithObsoleteElectionId) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "electionId"
- << firstElectionId
- << "hosts"
+ << "ismaster" << true << "secondary" << false
+ << "electionId" << firstElectionId << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
auto node = state->findNode(ns.host);
ASSERT(node);
@@ -868,16 +782,11 @@ TEST_F(CoreScanTest, StalePrimaryWithObsoleteElectionId) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
+ << "ismaster" << false << "secondary" << true << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
auto node = state->findNode(ns.host);
ASSERT(node);
@@ -920,20 +829,13 @@ TEST_F(CoreScanTest, TwoPrimaries2ndHasNewerConfigVersion) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "setVersion"
- << 1
- << "electionId"
- << OID("7fffffff0000000000000001")
+ << "ismaster" << true << "secondary" << false << "setVersion"
+ << 1 << "electionId" << OID("7fffffff0000000000000001")
<< "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
// check that the SetState's maxElectionId == primary's electionId
ASSERT_EQUALS(state->maxElectionId, OID("7fffffff0000000000000001"));
@@ -946,20 +848,12 @@ TEST_F(CoreScanTest, TwoPrimaries2ndHasNewerConfigVersion) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "setVersion"
- << 2
- << "electionId"
- << primaryElectionId
- << "hosts"
+ << "ismaster" << true << "secondary" << false << "setVersion"
+ << 2 << "electionId" << primaryElectionId << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
ASSERT_EQUALS(state->maxElectionId, primaryElectionId);
ASSERT_EQUALS(state->configVersion, 2);
@@ -981,20 +875,12 @@ TEST_F(CoreScanTest, TwoPrimaries2ndHasOlderConfigVersion) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "electionId"
- << primaryElectionId
- << "setVersion"
- << 2
- << "hosts"
+ << "ismaster" << true << "secondary" << false << "electionId"
+ << primaryElectionId << "setVersion" << 2 << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
ASSERT_EQUALS(state->maxElectionId, primaryElectionId);
ASSERT_EQUALS(state->configVersion, 2);
@@ -1004,20 +890,13 @@ TEST_F(CoreScanTest, TwoPrimaries2ndHasOlderConfigVersion) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << true
- << "secondary"
- << false
- << "setVersion"
- << 1
- << "electionId"
- << OID("7fffffff0000000000000001")
+ << "ismaster" << true << "secondary" << false << "setVersion"
+ << 1 << "electionId" << OID("7fffffff0000000000000001")
<< "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c")
- << "ok"
- << true));
+ << "ok" << true));
ASSERT_EQUALS(state->maxElectionId, primaryElectionId);
ASSERT_EQUALS(state->configVersion, 2);
@@ -1050,19 +929,12 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSMatch) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
- << "lastWrite"
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << hosts << "lastWrite"
<< BSON("lastWriteDate" << (nonStale ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTime)
- << "ok"
- << true));
+ << "opTime" << opTime)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1100,19 +972,12 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSNoMatch) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
- << "lastWrite"
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << hosts << "lastWrite"
<< BSON("lastWriteDate" << (primary ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTime)
- << "ok"
- << true));
+ << "opTime" << opTime)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1151,20 +1016,13 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSNoPrimaryMatch) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
- << hosts
- << "lastWrite"
+ << "ismaster" << false << "secondary" << true << "hosts"
+ << hosts << "lastWrite"
<< BSON("lastWriteDate"
<< (isNonStale ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTime)
- << "ok"
- << true));
+ << "opTime" << opTime)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1205,20 +1063,13 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSAllFailed) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << false
- << "secondary"
- << true
- << "hosts"
- << hosts
- << "lastWrite"
+ << "ismaster" << false << "secondary" << true << "hosts"
+ << hosts << "lastWrite"
<< BSON("lastWriteDate"
<< (isNonStale ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTime)
- << "ok"
- << true));
+ << "opTime" << opTime)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1258,19 +1109,12 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSAllButPrimaryFailed) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
- << "lastWrite"
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << hosts << "lastWrite"
<< BSON("lastWriteDate" << (primary ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTime)
- << "ok"
- << true));
+ << "opTime" << opTime)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1310,19 +1154,12 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSOneSecondaryFailed) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
- << "lastWrite"
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << hosts << "lastWrite"
<< BSON("lastWriteDate" << (primary ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTime)
- << "ok"
- << true));
+ << "opTime" << opTime)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1362,20 +1199,13 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSNonStaleSecondaryMatched) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
- << "lastWrite"
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << hosts << "lastWrite"
<< BSON("lastWriteDate"
<< (isNonStale ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTime)
- << "ok"
- << true));
+ << "opTime" << opTime)
+ << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1410,14 +1240,8 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSNoLastWrite) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
- << "ok"
- << true));
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << hosts << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1451,14 +1275,8 @@ TEST_F(MaxStalenessMSTest, MaxStalenessMSZeroNoLastWrite) {
-1,
BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
- << "ok"
- << true));
+ << "ismaster" << primary << "secondary" << !primary
+ << "hosts" << hosts << "ok" << true));
ns = refresher.getNextStep();
}
@@ -1497,17 +1315,11 @@ TEST_F(MinOpTimeTest, MinOpTimeMatched) {
bool isNonStale = ns.host.host() == "b";
BSONObj bson = BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
+ << "ismaster" << primary << "secondary" << !primary << "hosts" << hosts
<< "lastWrite"
<< BSON("opTime" << (isNonStale ? opTimeNonStale.toBSON()
: opTimeStale.toBSON()))
- << "ok"
- << true);
+ << "ok" << true);
refresher.receivedIsMaster(ns.host, -1, bson);
ns = refresher.getNextStep();
}
@@ -1542,17 +1354,11 @@ TEST_F(MinOpTimeTest, MinOpTimeNotMatched) {
bool isNonStale = ns.host.host() == "a";
BSONObj bson = BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
+ << "ismaster" << primary << "secondary" << !primary << "hosts" << hosts
<< "lastWrite"
<< BSON("opTime" << (isNonStale ? opTimeNonStale.toBSON()
: opTimeStale.toBSON()))
- << "ok"
- << true);
+ << "ok" << true);
refresher.receivedIsMaster(ns.host, -1, bson);
ns = refresher.getNextStep();
}
@@ -1589,20 +1395,13 @@ TEST_F(MinOpTimeTest, MinOpTimeIgnored) {
bool isNonStale = ns.host.host() == "c";
BSONObj bson = BSON("setName"
<< "name"
- << "ismaster"
- << primary
- << "secondary"
- << !primary
- << "hosts"
- << hosts
+ << "ismaster" << primary << "secondary" << !primary << "hosts" << hosts
<< "lastWrite"
<< BSON("lastWriteDate"
<< (isNonStale || primary ? lastWriteDateNonStale
: lastWriteDateStale)
- << "opTime"
- << opTimeStale.toBSON())
- << "ok"
- << true);
+ << "opTime" << opTimeStale.toBSON())
+ << "ok" << true);
refresher.receivedIsMaster(ns.host, -1, bson);
ns = refresher.getNextStep();
}
@@ -1675,7 +1474,7 @@ public:
std::set<HostAndPort> members;
BSONArrayBuilder arrayBuilder;
- for (const auto & [ host, nodeState ] : replicaSet) {
+ for (const auto& [host, nodeState] : replicaSet) {
if (nodeState == NodeState::kStandalone) {
continue;
}
@@ -1691,15 +1490,11 @@ public:
auto bsonHosts = arrayBuilder.arr();
auto markIsMaster = [&](auto host, bool isMaster) {
- refresher.receivedIsMaster(
- host,
- -1,
- BSON("setName" << kSetName << "ismaster" << isMaster << "secondary" << !isMaster
- << "hosts"
- << bsonHosts
- << "ok"
- << true));
-
+ refresher.receivedIsMaster(host,
+ -1,
+ BSON("setName" << kSetName << "ismaster" << isMaster
+ << "secondary" << !isMaster << "hosts"
+ << bsonHosts << "ok" << true));
};
auto markFailed = [&](auto host) {
@@ -1770,13 +1565,16 @@ TEST_F(ChangeNotifierTest, NotifyNominal) {
// 'a' claims to be primary. Signal: Confirmed
updateSet({
{
- HostAndPort("a"), NodeState::kPrimary,
+ HostAndPort("a"),
+ NodeState::kPrimary,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().lastConfirmedSetId, ++currentId);
@@ -1784,13 +1582,16 @@ TEST_F(ChangeNotifierTest, NotifyNominal) {
// Getting another scan with the same details. Signal: null
updateSet({
{
- HostAndPort("a"), NodeState::kPrimary,
+ HostAndPort("a"),
+ NodeState::kPrimary,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().eventId, currentId);
@@ -1813,13 +1614,16 @@ TEST_F(ChangeNotifierTest, NotifyElections) {
// 'a' claims to be primary. Signal: ConfirmedSet
updateSet({
{
- HostAndPort("a"), NodeState::kPrimary,
+ HostAndPort("a"),
+ NodeState::kPrimary,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().lastConfirmedSetId, ++currentId);
@@ -1827,13 +1631,16 @@ TEST_F(ChangeNotifierTest, NotifyElections) {
// 'b' claims to be primary. Signal: ConfirmedSet
updateSet({
{
- HostAndPort("a"), NodeState::kSecondary,
+ HostAndPort("a"),
+ NodeState::kSecondary,
},
{
- HostAndPort("b"), NodeState::kPrimary,
+ HostAndPort("b"),
+ NodeState::kPrimary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().lastConfirmedSetId, ++currentId);
@@ -1841,13 +1648,16 @@ TEST_F(ChangeNotifierTest, NotifyElections) {
// All hosts tell us that they are not primary. Signal: null
updateSet({
{
- HostAndPort("a"), NodeState::kSecondary,
+ HostAndPort("a"),
+ NodeState::kSecondary,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().eventId, currentId);
@@ -1855,13 +1665,16 @@ TEST_F(ChangeNotifierTest, NotifyElections) {
// 'a' claims to be primary again. Signal: ConfirmedSet
updateSet({
{
- HostAndPort("a"), NodeState::kPrimary,
+ HostAndPort("a"),
+ NodeState::kPrimary,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().lastConfirmedSetId, ++currentId);
@@ -1884,13 +1697,16 @@ TEST_F(ChangeNotifierTest, NotifyReconfig) {
// Update the set with a full scan showing no primary. Signal: PossibleSet
updateSet({
{
- HostAndPort("a"), NodeState::kSecondary,
+ HostAndPort("a"),
+ NodeState::kSecondary,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().eventId, ++currentId);
@@ -1898,13 +1714,16 @@ TEST_F(ChangeNotifierTest, NotifyReconfig) {
// Mark 'a' as removed. Signal: null
updateSet({
{
- HostAndPort("a"), NodeState::kStandalone,
+ HostAndPort("a"),
+ NodeState::kStandalone,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().eventId, currentId);
@@ -1912,16 +1731,20 @@ TEST_F(ChangeNotifierTest, NotifyReconfig) {
// Discover 'd' as secondary. Signal: PossibleSet
updateSet({
{
- HostAndPort("a"), NodeState::kSecondary,
+ HostAndPort("a"),
+ NodeState::kSecondary,
},
{
- HostAndPort("b"), NodeState::kSecondary,
+ HostAndPort("b"),
+ NodeState::kSecondary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
{
- HostAndPort("d"), NodeState::kSecondary,
+ HostAndPort("d"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().lastPossibleSetId, ++currentId);
@@ -1929,16 +1752,20 @@ TEST_F(ChangeNotifierTest, NotifyReconfig) {
// Mark 'b' as primary, no 'd'. Signal: ConfirmedSet
updateSet({
{
- HostAndPort("a"), NodeState::kSecondary,
+ HostAndPort("a"),
+ NodeState::kSecondary,
},
{
- HostAndPort("b"), NodeState::kPrimary,
+ HostAndPort("b"),
+ NodeState::kPrimary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
{
- HostAndPort("d"), NodeState::kStandalone,
+ HostAndPort("d"),
+ NodeState::kStandalone,
},
});
ASSERT_EQ(listener().lastConfirmedSetId, ++currentId);
@@ -1946,13 +1773,16 @@ TEST_F(ChangeNotifierTest, NotifyReconfig) {
// Mark 'a' as removed. Signal: ConfirmedSet
updateSet({
{
- HostAndPort("a"), NodeState::kStandalone,
+ HostAndPort("a"),
+ NodeState::kStandalone,
},
{
- HostAndPort("b"), NodeState::kPrimary,
+ HostAndPort("b"),
+ NodeState::kPrimary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().lastConfirmedSetId, ++currentId);
@@ -1960,13 +1790,16 @@ TEST_F(ChangeNotifierTest, NotifyReconfig) {
// Mark 'a' as secondary again. Signal: ConfirmedSet
updateSet({
{
- HostAndPort("b"), NodeState::kPrimary,
+ HostAndPort("b"),
+ NodeState::kPrimary,
},
{
- HostAndPort("c"), NodeState::kSecondary,
+ HostAndPort("c"),
+ NodeState::kSecondary,
},
{
- HostAndPort("a"), NodeState::kSecondary,
+ HostAndPort("a"),
+ NodeState::kSecondary,
},
});
ASSERT_EQ(listener().lastConfirmedSetId, ++currentId);
diff --git a/src/mongo/client/sasl_client_authenticate.h b/src/mongo/client/sasl_client_authenticate.h
index 307e837c38b..4b342a41a99 100644
--- a/src/mongo/client/sasl_client_authenticate.h
+++ b/src/mongo/client/sasl_client_authenticate.h
@@ -83,4 +83,4 @@ extern Future<void> (*saslClientAuthenticate)(auth::RunCommandHook runCommand,
* into "*payload". In all other cases, returns
*/
Status saslExtractPayload(const BSONObj& cmdObj, std::string* payload, BSONType* type);
-}
+} // namespace mongo
diff --git a/src/mongo/client/sasl_client_authenticate_impl.cpp b/src/mongo/client/sasl_client_authenticate_impl.cpp
index b025000fb5b..4d19e5597d2 100644
--- a/src/mongo/client/sasl_client_authenticate_impl.cpp
+++ b/src/mongo/client/sasl_client_authenticate_impl.cpp
@@ -56,9 +56,9 @@
namespace mongo {
-using std::endl;
using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
+using std::endl;
namespace {
diff --git a/src/mongo/client/sasl_scram_client_conversation.cpp b/src/mongo/client/sasl_scram_client_conversation.cpp
index c5a4b788f32..59d50bdb5e4 100644
--- a/src/mongo/client/sasl_scram_client_conversation.cpp
+++ b/src/mongo/client/sasl_scram_client_conversation.cpp
@@ -43,8 +43,8 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
StatusWith<bool> SaslSCRAMClientConversation::step(StringData inputData, std::string* outputData) {
_step++;
@@ -58,8 +58,8 @@ StatusWith<bool> SaslSCRAMClientConversation::step(StringData inputData, std::st
return _thirdStep(inputData, outputData);
default:
return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
- str::stream() << "Invalid SCRAM authentication step: "
- << _step);
+ str::stream()
+ << "Invalid SCRAM authentication step: " << _step);
}
}
@@ -126,8 +126,7 @@ StatusWith<bool> SaslSCRAMClientConversation::_secondStep(StringData inputData,
return Status(ErrorCodes::BadValue,
str::stream()
<< "Incorrect number of arguments for first SCRAM server message, got "
- << input.size()
- << " expected at least 3");
+ << input.size() << " expected at least 3");
}
if (!str::startsWith(input[0], "r=") || input[0].size() < 3) {
diff --git a/src/mongo/client/sasl_sspi.cpp b/src/mongo/client/sasl_sspi.cpp
index fa011a458a0..4cbb94136c0 100644
--- a/src/mongo/client/sasl_sspi.cpp
+++ b/src/mongo/client/sasl_sspi.cpp
@@ -439,8 +439,7 @@ sasl_client_plug_t sspiClientPlugin[] = {
{sspiPluginName, /* mechanism name */
112, /* TODO: (taken from gssapi) best mech additional security layer strength factor */
SASL_SEC_NOPLAINTEXT /* eam: copied from gssapi */
- |
- SASL_SEC_NOACTIVE | SASL_SEC_NOANONYMOUS | SASL_SEC_MUTUAL_AUTH |
+ | SASL_SEC_NOACTIVE | SASL_SEC_NOANONYMOUS | SASL_SEC_MUTUAL_AUTH |
SASL_SEC_PASS_CREDENTIALS, /* security_flags */
SASL_FEAT_NEEDSERVERFQDN | SASL_FEAT_WANT_CLIENT_FIRST | SASL_FEAT_ALLOWS_PROXY,
NULL, /* required prompt ids, NULL = user/pass only */
@@ -482,8 +481,7 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(SaslSspiClientPlugin,
if (SASL_OK != ret) {
return Status(ErrorCodes::UnknownError,
str::stream() << "could not add SASL Client SSPI plugin " << sspiPluginName
- << ": "
- << sasl_errstring(ret, NULL, NULL));
+ << ": " << sasl_errstring(ret, NULL, NULL));
}
return Status::OK();
@@ -496,8 +494,7 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(SaslPlainClientPlugin,
if (SASL_OK != ret) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Could not add SASL Client PLAIN plugin " << sspiPluginName
- << ": "
- << sasl_errstring(ret, NULL, NULL));
+ << ": " << sasl_errstring(ret, NULL, NULL));
}
return Status::OK();
diff --git a/src/mongo/crypto/aead_encryption.cpp b/src/mongo/crypto/aead_encryption.cpp
index 030758850c1..b5e0ae4ce1c 100644
--- a/src/mongo/crypto/aead_encryption.cpp
+++ b/src/mongo/crypto/aead_encryption.cpp
@@ -101,9 +101,7 @@ Status _aesEncrypt(const SymmetricKey& key,
if (len != aesCBCCipherOutputLength(inLen)) {
return {ErrorCodes::BadValue,
str::stream() << "Encrypt error, expected cipher text of length "
- << aesCBCCipherOutputLength(inLen)
- << " but found "
- << len};
+ << aesCBCCipherOutputLength(inLen) << " but found " << len};
}
return Status::OK();
@@ -117,12 +115,11 @@ Status _aesDecrypt(const SymmetricKey& key,
std::size_t outLen,
std::size_t* resultLen) try {
// Check the plaintext buffer can fit the product of decryption
- auto[lowerBound, upperBound] = aesCBCExpectedPlaintextLen(in.length());
+ auto [lowerBound, upperBound] = aesCBCExpectedPlaintextLen(in.length());
if (upperBound > outLen) {
return {ErrorCodes::BadValue,
str::stream() << "Cleartext buffer of size " << outLen
- << " too small for output which can be as large as "
- << upperBound
+ << " too small for output which can be as large as " << upperBound
<< "]"};
}
@@ -145,13 +142,8 @@ Status _aesDecrypt(const SymmetricKey& key,
if (*resultLen < lowerBound || *resultLen > upperBound) {
return {ErrorCodes::BadValue,
str::stream() << "Decrypt error, expected clear text length in interval"
- << "["
- << lowerBound
- << ","
- << upperBound
- << "]"
- << "but found "
- << *resultLen};
+ << "[" << lowerBound << "," << upperBound << "]"
+ << "but found " << *resultLen};
}
/* Check that padding was removed.
@@ -211,8 +203,7 @@ Status aeadEncrypt(const SymmetricKey& key,
return Status(ErrorCodes::BadValue,
str::stream()
<< "AssociatedData for encryption is too large. Cannot be larger than "
- << kMaxAssociatedDataLength
- << " bytes.");
+ << kMaxAssociatedDataLength << " bytes.");
}
// According to the rfc on AES encryption, the associatedDataLength is defined as the
@@ -292,8 +283,7 @@ Status aeadEncryptWithIV(ConstDataRange key,
return Status(ErrorCodes::BadValue,
str::stream()
<< "AssociatedData for encryption is too large. Cannot be larger than "
- << kMaxAssociatedDataLength
- << " bytes.");
+ << kMaxAssociatedDataLength << " bytes.");
}
const uint8_t* macKey = reinterpret_cast<const uint8_t*>(key.data());
@@ -357,8 +347,7 @@ Status aeadDecrypt(const SymmetricKey& key,
return Status(ErrorCodes::BadValue,
str::stream()
<< "AssociatedData for encryption is too large. Cannot be larger than "
- << kMaxAssociatedDataLength
- << " bytes.");
+ << kMaxAssociatedDataLength << " bytes.");
}
const uint8_t* macKey = key.getKey();
diff --git a/src/mongo/crypto/mechanism_scram.h b/src/mongo/crypto/mechanism_scram.h
index ab3c39273fb..fcb16331830 100644
--- a/src/mongo/crypto/mechanism_scram.h
+++ b/src/mongo/crypto/mechanism_scram.h
@@ -291,11 +291,10 @@ public:
Presecrets<HashBlock>(password, salt, iterationCount));
const auto encodedSalt =
base64::encode(reinterpret_cast<const char*>(salt.data()), salt.size());
- return BSON(kIterationCountFieldName << iterationCount << kSaltFieldName << encodedSalt
- << kStoredKeyFieldName
- << secrets.storedKey().toString()
- << kServerKeyFieldName
- << secrets.serverKey().toString());
+ return BSON(kIterationCountFieldName
+ << iterationCount << kSaltFieldName << encodedSalt << kStoredKeyFieldName
+ << secrets.storedKey().toString() << kServerKeyFieldName
+ << secrets.serverKey().toString());
}
const HashBlock& clientKey() const {
diff --git a/src/mongo/crypto/sha_block.h b/src/mongo/crypto/sha_block.h
index 2d2c3684e07..78308bb568b 100644
--- a/src/mongo/crypto/sha_block.h
+++ b/src/mongo/crypto/sha_block.h
@@ -67,9 +67,9 @@ public:
*/
static StatusWith<SHABlock> fromBuffer(const uint8_t* input, size_t inputLen) {
if (inputLen != kHashLength) {
- return {
- ErrorCodes::InvalidLength,
- str::stream() << "Unsupported " << Traits::name << " hash length: " << inputLen};
+ return {ErrorCodes::InvalidLength,
+ str::stream() << "Unsupported " << Traits::name
+ << " hash length: " << inputLen};
}
HashType newHash;
@@ -157,8 +157,8 @@ public:
if (binData.length != kHashLength) {
return {ErrorCodes::UnsupportedFormat,
- str::stream() << "Unsupported " << Traits::name << " hash length: "
- << binData.length};
+ str::stream() << "Unsupported " << Traits::name
+ << " hash length: " << binData.length};
}
HashType newHash;
diff --git a/src/mongo/crypto/symmetric_crypto_apple.cpp b/src/mongo/crypto/symmetric_crypto_apple.cpp
index 9ca5c9c0b1e..216e33b8fa8 100644
--- a/src/mongo/crypto/symmetric_crypto_apple.cpp
+++ b/src/mongo/crypto/symmetric_crypto_apple.cpp
@@ -66,9 +66,7 @@ public:
// Therefore we expect a 128 bit block length.
uassert(ErrorCodes::BadValue,
str::stream() << "Invalid ivlen for selected algorithm, expected "
- << kCCBlockSizeAES128
- << ", got "
- << ivLen,
+ << kCCBlockSizeAES128 << ", got " << ivLen,
ivLen == kCCBlockSizeAES128);
CCCryptorRef context = nullptr;
diff --git a/src/mongo/crypto/symmetric_crypto_openssl.cpp b/src/mongo/crypto/symmetric_crypto_openssl.cpp
index 6329331a511..4e661b98bbd 100644
--- a/src/mongo/crypto/symmetric_crypto_openssl.cpp
+++ b/src/mongo/crypto/symmetric_crypto_openssl.cpp
@@ -63,8 +63,8 @@ void initCipherContext(
}
}
uassert(ErrorCodes::BadValue,
- str::stream() << "Unrecognized AES key size/cipher mode. Size: " << keySize << " Mode: "
- << getStringFromCipherMode(mode),
+ str::stream() << "Unrecognized AES key size/cipher mode. Size: " << keySize
+ << " Mode: " << getStringFromCipherMode(mode),
cipher);
const bool initOk = (1 == init(ctx, cipher, nullptr, key.getKey(), iv));
@@ -188,8 +188,9 @@ public:
// validateEncryptionOption asserts that platforms without GCM will never start in GCM mode
if (_mode == aesMode::gcm) {
#ifdef EVP_CTRL_GCM_GET_TAG
- if (1 != EVP_CIPHER_CTX_ctrl(
- _ctx.get(), EVP_CTRL_GCM_SET_TAG, tagLen, const_cast<uint8_t*>(tag))) {
+ if (1 !=
+ EVP_CIPHER_CTX_ctrl(
+ _ctx.get(), EVP_CTRL_GCM_SET_TAG, tagLen, const_cast<uint8_t*>(tag))) {
return Status(ErrorCodes::UnknownError,
str::stream()
<< "Unable to set GCM tag: "
diff --git a/src/mongo/db/auth/authorization_manager_impl.cpp b/src/mongo/db/auth/authorization_manager_impl.cpp
index 3342212c020..76c52b67947 100644
--- a/src/mongo/db/auth/authorization_manager_impl.cpp
+++ b/src/mongo/db/auth/authorization_manager_impl.cpp
@@ -432,8 +432,7 @@ Status AuthorizationManagerImpl::_initializeUserFromPrivilegeDocument(User* user
return Status(ErrorCodes::BadValue,
str::stream() << "User name from privilege document \"" << userName
<< "\" doesn't match name of provided User \""
- << user->getName().getUser()
- << "\"");
+ << user->getName().getUser() << "\"");
}
user->setID(parser.extractUserIDFromUserDocument(privDoc));
diff --git a/src/mongo/db/auth/authorization_manager_test.cpp b/src/mongo/db/auth/authorization_manager_test.cpp
index f51437f697a..9edef97e315 100644
--- a/src/mongo/db/auth/authorization_manager_test.cpp
+++ b/src/mongo/db/auth/authorization_manager_test.cpp
@@ -126,9 +126,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2User) {
<< "v2read"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "read"
<< "db"
@@ -141,9 +139,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2User) {
<< "v2cluster"
<< "db"
<< "admin"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "clusterAdmin"
<< "db"
@@ -248,19 +244,17 @@ public:
private:
Status _getUserDocument(OperationContext* opCtx, const UserName& userName, BSONObj* userDoc) {
- Status status = findOne(opCtx,
- AuthorizationManager::usersCollectionNamespace,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << userName.getUser()
- << AuthorizationManager::USER_DB_FIELD_NAME
- << userName.getDB()),
- userDoc);
+ Status status =
+ findOne(opCtx,
+ AuthorizationManager::usersCollectionNamespace,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << userName.getUser() << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getDB()),
+ userDoc);
if (status == ErrorCodes::NoMatchingDocument) {
status = Status(ErrorCodes::UserNotFound,
str::stream() << "Could not find user \"" << userName.getUser()
- << "\" for db \""
- << userName.getDB()
- << "\"");
+ << "\" for db \"" << userName.getDB() << "\"");
}
return status;
}
@@ -296,9 +290,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2UserWithUnrecognizedActions) {
<< "myUser"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "myRole"
<< "db"
diff --git a/src/mongo/db/auth/authorization_session_impl.cpp b/src/mongo/db/auth/authorization_session_impl.cpp
index fd186c06d7d..c308e3f5304 100644
--- a/src/mongo/db/auth/authorization_session_impl.cpp
+++ b/src/mongo/db/auth/authorization_session_impl.cpp
@@ -493,8 +493,7 @@ Status AuthorizationSessionImpl::checkAuthorizedToGrantPrivilege(const Privilege
ActionType::grantRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to grant privileges on the "
- << resource.databaseToMatch()
- << "database");
+ << resource.databaseToMatch() << "database");
}
} else if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName("admin"),
ActionType::grantRole)) {
@@ -514,8 +513,7 @@ Status AuthorizationSessionImpl::checkAuthorizedToRevokePrivilege(const Privileg
ActionType::revokeRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to revoke privileges on the "
- << resource.databaseToMatch()
- << "database");
+ << resource.databaseToMatch() << "database");
}
} else if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName("admin"),
ActionType::revokeRole)) {
@@ -1001,9 +999,7 @@ bool AuthorizationSessionImpl::isImpersonating() const {
auto AuthorizationSessionImpl::checkCursorSessionPrivilege(
OperationContext* const opCtx, const boost::optional<LogicalSessionId> cursorSessionId)
-> Status {
- auto nobodyIsLoggedIn = [authSession = this] {
- return !authSession->isAuthenticated();
- };
+ auto nobodyIsLoggedIn = [authSession = this] { return !authSession->isAuthenticated(); };
auto authHasImpersonatePrivilege = [authSession = this] {
return authSession->isAuthorizedForPrivilege(
@@ -1037,13 +1033,12 @@ auto AuthorizationSessionImpl::checkCursorSessionPrivilege(
// Operation Context (which implies a background job
!authHasImpersonatePrivilege() // Or if the user has an impersonation privilege, in which
// case, the user gets to sidestep certain checks.
- ) {
+ ) {
return Status{ErrorCodes::Unauthorized,
- str::stream() << "Cursor session id ("
- << sessionIdToStringOrNone(cursorSessionId)
- << ") is not the same as the operation context's session id ("
- << sessionIdToStringOrNone(opCtx->getLogicalSessionId())
- << ")"};
+ str::stream()
+ << "Cursor session id (" << sessionIdToStringOrNone(cursorSessionId)
+ << ") is not the same as the operation context's session id ("
+ << sessionIdToStringOrNone(opCtx->getLogicalSessionId()) << ")"};
}
return Status::OK();
diff --git a/src/mongo/db/auth/authorization_session_test.cpp b/src/mongo/db/auth/authorization_session_test.cpp
index b0ec73151e4..b7e589d0f9b 100644
--- a/src/mongo/db/auth/authorization_session_test.cpp
+++ b/src/mongo/db/auth/authorization_session_test.cpp
@@ -178,9 +178,7 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -206,9 +204,7 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
<< "admin"
<< "db"
<< "admin"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWriteAnyDatabase"
<< "db"
@@ -252,9 +248,7 @@ TEST_F(AuthorizationSessionTest, DuplicateRolesOK) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -284,9 +278,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "rw"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -301,9 +293,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "useradmin"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "userAdmin"
<< "db"
@@ -315,9 +305,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "rwany"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWriteAnyDatabase"
<< "db"
@@ -333,9 +321,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "useradminany"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "userAdminAnyDatabase"
<< "db"
@@ -412,9 +398,7 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -444,9 +428,7 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "read"
<< "db"
@@ -489,9 +471,7 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -522,9 +502,7 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "read"
<< "db"
@@ -558,9 +536,7 @@ TEST_F(AuthorizationSessionTest, AcquireUserObtainsAndValidatesAuthenticationRes
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -568,8 +544,7 @@ TEST_F(AuthorizationSessionTest, AcquireUserObtainsAndValidatesAuthenticationRes
<< "authenticationRestrictions"
<< BSON_ARRAY(BSON("clientSource" << BSON_ARRAY("192.168.0.0/24"
<< "192.168.2.10")
- << "serverAddress"
- << BSON_ARRAY("192.168.0.2"))
+ << "serverAddress" << BSON_ARRAY("192.168.0.2"))
<< BSON("clientSource" << BSON_ARRAY("2001:DB8::1") << "serverAddress"
<< BSON_ARRAY("2001:DB8::2"))
<< BSON("clientSource" << BSON_ARRAY("127.0.0.1"
@@ -911,11 +886,9 @@ TEST_F(AuthorizationSessionTest, CanAggregateOutWithInsertAndRemoveOnTargetNames
uassertStatusOK(authzSession->getPrivilegesForAggregate(testFooNss, cmdObj, false));
ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges));
- BSONObj cmdObjNoBypassDocumentValidation = BSON(
- "aggregate" << testFooNss.coll() << "pipeline" << pipeline << "bypassDocumentValidation"
- << false
- << "cursor"
- << BSONObj());
+ BSONObj cmdObjNoBypassDocumentValidation =
+ BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline
+ << "bypassDocumentValidation" << false << "cursor" << BSONObj());
privileges = uassertStatusOK(authzSession->getPrivilegesForAggregate(
testFooNss, cmdObjNoBypassDocumentValidation, false));
ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges));
@@ -928,10 +901,8 @@ TEST_F(AuthorizationSessionTest,
Privilege(testBarCollResource, {ActionType::insert, ActionType::remove})});
BSONArray pipeline = BSON_ARRAY(BSON("$out" << testBarNss.coll()));
- BSONObj cmdObj =
- BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj()
- << "bypassDocumentValidation"
- << true);
+ BSONObj cmdObj = BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor"
+ << BSONObj() << "bypassDocumentValidation" << true);
PrivilegeVector privileges =
uassertStatusOK(authzSession->getPrivilegesForAggregate(testFooNss, cmdObj, false));
ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges));
@@ -946,10 +917,8 @@ TEST_F(AuthorizationSessionTest,
{ActionType::insert, ActionType::remove, ActionType::bypassDocumentValidation})});
BSONArray pipeline = BSON_ARRAY(BSON("$out" << testBarNss.coll()));
- BSONObj cmdObj =
- BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj()
- << "bypassDocumentValidation"
- << true);
+ BSONObj cmdObj = BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor"
+ << BSONObj() << "bypassDocumentValidation" << true);
PrivilegeVector privileges =
uassertStatusOK(authzSession->getPrivilegesForAggregate(testFooNss, cmdObj, true));
ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges));
@@ -1144,9 +1113,7 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsNotCoauthorizedWithEmptyUser
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), UserName("spencer", "test")));
@@ -1163,9 +1130,7 @@ TEST_F(AuthorizationSessionTest,
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), UserName("spencer", "test")));
@@ -1180,9 +1145,7 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsCoauthorizedWithIntersecting
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(managerState->insertPrivilegeDocument(_opCtx.get(),
@@ -1190,9 +1153,7 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsCoauthorizedWithIntersecting
<< "admin"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), UserName("spencer", "test")));
@@ -1210,9 +1171,7 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsNotCoauthorizedWithNoninters
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(managerState->insertPrivilegeDocument(_opCtx.get(),
@@ -1220,9 +1179,7 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsNotCoauthorizedWithNoninters
<< "admin"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), UserName("spencer", "test")));
@@ -1241,9 +1198,7 @@ TEST_F(AuthorizationSessionTest,
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(managerState->insertPrivilegeDocument(_opCtx.get(),
@@ -1251,9 +1206,7 @@ TEST_F(AuthorizationSessionTest,
<< "admin"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSONArray()),
BSONObj()));
ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), UserName("spencer", "test")));
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index e3184bef814..c5bf63894f5 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -89,11 +89,8 @@ Status AuthzManagerExternalStateLocal::getStoredAuthorizationVersion(OperationCo
str::stream()
<< "Could not determine schema version of authorization data. "
"Bad (non-numeric) type "
- << typeName(versionElement.type())
- << " ("
- << versionElement.type()
- << ") for "
- << AuthorizationManager::schemaVersionFieldName
+ << typeName(versionElement.type()) << " (" << versionElement.type()
+ << ") for " << AuthorizationManager::schemaVersionFieldName
<< " field in version document");
}
} else if (status == ErrorCodes::NoMatchingDocument) {
@@ -132,8 +129,7 @@ void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privileges
"",
std::string(str::stream() << "Skipped privileges on resource "
<< privileges[i].getResourcePattern().toString()
- << ". Reason: "
- << errmsg)));
+ << ". Reason: " << errmsg)));
}
}
}
@@ -179,11 +175,8 @@ Status AuthzManagerExternalStateLocal::getUserDescription(OperationContext* opCt
userRoles << BSON("role" << role.getRole() << "db" << role.getDB());
}
*result = BSON("_id" << userName.getUser() << "user" << userName.getUser() << "db"
- << userName.getDB()
- << "credentials"
- << BSON("external" << true)
- << "roles"
- << userRoles.arr());
+ << userName.getDB() << "credentials" << BSON("external" << true)
+ << "roles" << userRoles.arr());
}
BSONElement directRolesElement;
@@ -285,17 +278,14 @@ Status AuthzManagerExternalStateLocal::_getUserDocument(OperationContext* opCtx,
Status status = findOne(opCtx,
AuthorizationManager::usersCollectionNamespace,
BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << userName.getUser()
- << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getUser() << AuthorizationManager::USER_DB_FIELD_NAME
<< userName.getDB()),
userDoc);
if (status == ErrorCodes::NoMatchingDocument) {
- status =
- Status(ErrorCodes::UserNotFound,
- str::stream() << "Could not find user \"" << userName.getUser() << "\" for db \""
- << userName.getDB()
- << "\"");
+ status = Status(ErrorCodes::UserNotFound,
+ str::stream() << "Could not find user \"" << userName.getUser()
+ << "\" for db \"" << userName.getDB() << "\"");
}
return status;
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
index acaf8389712..6e365f1a7b4 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
@@ -83,8 +83,7 @@ void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privileges
"",
std::string(str::stream() << "Skipped privileges on resource "
<< privileges[i].getResourcePattern().toString()
- << ". Reason: "
- << errmsg)));
+ << ". Reason: " << errmsg)));
}
}
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_s.cpp b/src/mongo/db/auth/authz_manager_external_state_s.cpp
index fdb23453592..8969faa3a60 100644
--- a/src/mongo/db/auth/authz_manager_external_state_s.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_s.cpp
@@ -128,12 +128,8 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opC
<< userName.getUser()
<< AuthorizationManager::USER_DB_FIELD_NAME
<< userName.getDB()))
- << "showPrivileges"
- << true
- << "showCredentials"
- << true
- << "showAuthenticationRestrictions"
- << true);
+ << "showPrivileges" << true << "showCredentials" << true
+ << "showAuthenticationRestrictions" << true);
BSONObjBuilder builder;
const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand(
opCtx, "admin", usersInfoCmd, &builder);
@@ -150,10 +146,9 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opC
if (foundUsers.size() > 1) {
return Status(ErrorCodes::UserDataInconsistent,
- str::stream() << "Found multiple users on the \"" << userName.getDB()
- << "\" database with name \""
- << userName.getUser()
- << "\"");
+ str::stream()
+ << "Found multiple users on the \"" << userName.getDB()
+ << "\" database with name \"" << userName.getUser() << "\"");
}
*result = foundUsers[0].Obj().getOwned();
return Status::OK();
@@ -163,10 +158,9 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opC
BSONArrayBuilder userRolesBuilder;
auto& sslPeerInfo = SSLPeerInfo::forSession(opCtx->getClient()->session());
for (const RoleName& role : sslPeerInfo.roles) {
- userRolesBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
- << role.getDB()));
+ userRolesBuilder.append(BSON(
+ AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()));
}
BSONArray providedRoles = userRolesBuilder.arr();
@@ -195,16 +189,12 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opC
"Recieved malformed response to request for X509 roles from config server");
}
- *result = BSON("_id" << userName.getUser() << "user" << userName.getUser() << "db"
- << userName.getDB()
- << "credentials"
- << BSON("external" << true)
- << "roles"
- << BSONArray(cmdResult["roles"].Obj())
- << "inheritedRoles"
- << BSONArray(cmdResult["inheritedRoles"].Obj())
- << "inheritedPrivileges"
- << BSONArray(cmdResult["inheritedPrivileges"].Obj()));
+ *result =
+ BSON("_id" << userName.getUser() << "user" << userName.getUser() << "db"
+ << userName.getDB() << "credentials" << BSON("external" << true) << "roles"
+ << BSONArray(cmdResult["roles"].Obj()) << "inheritedRoles"
+ << BSONArray(cmdResult["inheritedRoles"].Obj()) << "inheritedPrivileges"
+ << BSONArray(cmdResult["inheritedPrivileges"].Obj()));
return Status::OK();
}
}
@@ -216,11 +206,11 @@ Status AuthzManagerExternalStateMongos::getRoleDescription(
AuthenticationRestrictionsFormat showRestrictions,
BSONObj* result) {
BSONObjBuilder rolesInfoCmd;
- rolesInfoCmd.append("rolesInfo",
- BSON_ARRAY(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << roleName.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
- << roleName.getDB())));
+ rolesInfoCmd.append(
+ "rolesInfo",
+ BSON_ARRAY(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB())));
addShowToBuilder(&rolesInfoCmd, showPrivileges, showRestrictions);
BSONObjBuilder builder;
@@ -239,9 +229,7 @@ Status AuthzManagerExternalStateMongos::getRoleDescription(
if (foundRoles.size() > 1) {
return Status(ErrorCodes::RoleDataInconsistent,
str::stream() << "Found multiple roles on the \"" << roleName.getDB()
- << "\" database with name \""
- << roleName.getRole()
- << "\"");
+ << "\" database with name \"" << roleName.getRole() << "\"");
}
*result = foundRoles[0].Obj().getOwned();
return Status::OK();
@@ -256,8 +244,7 @@ Status AuthzManagerExternalStateMongos::getRolesDescription(
for (const RoleName& roleName : roles) {
rolesInfoCmdArray << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << roleName.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
<< roleName.getDB());
}
diff --git a/src/mongo/db/auth/privilege_parser_test.cpp b/src/mongo/db/auth/privilege_parser_test.cpp
index 288760ffb0d..969360a6f51 100644
--- a/src/mongo/db/auth/privilege_parser_test.cpp
+++ b/src/mongo/db/auth/privilege_parser_test.cpp
@@ -56,24 +56,21 @@ TEST(PrivilegeParserTest, IsValidTest) {
<< ""
<< "collection"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
// resource can't have db without collection
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
// resource can't have collection without db
parsedPrivilege.parseBSON(BSON("resource" << BSON("collection"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
@@ -82,8 +79,7 @@ TEST(PrivilegeParserTest, IsValidTest) {
<< ""
<< "collection"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
@@ -92,8 +88,7 @@ TEST(PrivilegeParserTest, IsValidTest) {
<< "test"
<< "collection"
<< "foo")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
@@ -116,8 +111,7 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
<< ""
<< "collection"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -143,8 +137,7 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
<< "test"
<< "collection"
<< "foo")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -171,8 +164,7 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
<< "test"
<< "collection"
<< "")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -198,8 +190,7 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
<< ""
<< "collection"
<< "foo")
- << "actions"
- << BSON_ARRAY("find")),
+ << "actions" << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
diff --git a/src/mongo/db/auth/role_graph.cpp b/src/mongo/db/auth/role_graph.cpp
index 8093864dfe3..b05a29fff4d 100644
--- a/src/mongo/db/auth/role_graph.cpp
+++ b/src/mongo/db/auth/role_graph.cpp
@@ -167,8 +167,8 @@ Status RoleGraph::addRoleToRole(const RoleName& recipient, const RoleName& role)
}
if (isBuiltinRole(recipient)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot grant roles to built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot grant roles to built-in role: " << role.getFullName());
}
if (!roleExists(role)) {
return Status(ErrorCodes::RoleNotFound,
@@ -193,8 +193,8 @@ Status RoleGraph::removeRoleFromRole(const RoleName& recipient, const RoleName&
}
if (isBuiltinRole(recipient)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot remove roles from built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot remove roles from built-in role: " << role.getFullName());
}
if (!roleExists(role)) {
return Status(ErrorCodes::RoleNotFound,
@@ -207,8 +207,9 @@ Status RoleGraph::removeRoleFromRole(const RoleName& recipient, const RoleName&
_roleToMembers[role].erase(itToRm);
} else {
return Status(ErrorCodes::RolesNotRelated,
- str::stream() << recipient.getFullName() << " is not a member"
- " of "
+ str::stream() << recipient.getFullName()
+ << " is not a member"
+ " of "
<< role.getFullName());
}
@@ -227,8 +228,8 @@ Status RoleGraph::removeAllRolesFromRole(const RoleName& victim) {
}
if (isBuiltinRole(victim)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot remove roles from built-in role: "
- << victim.getFullName());
+ str::stream()
+ << "Cannot remove roles from built-in role: " << victim.getFullName());
}
RoleNameVector& subordinatesOfVictim = _roleToSubordinates[victim];
@@ -253,8 +254,8 @@ Status RoleGraph::addPrivilegeToRole(const RoleName& role, const Privilege& priv
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot grant privileges to built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot grant privileges to built-in role: " << role.getFullName());
}
_addPrivilegeToRoleNoChecks(role, privilegeToAdd);
@@ -277,8 +278,8 @@ Status RoleGraph::addPrivilegesToRole(const RoleName& role,
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot grant privileges to built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot grant privileges to built-in role: " << role.getFullName());
}
for (PrivilegeVector::const_iterator it = privilegesToAdd.begin(); it != privilegesToAdd.end();
@@ -296,8 +297,8 @@ Status RoleGraph::removePrivilegeFromRole(const RoleName& role,
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot remove privileges from built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot remove privileges from built-in role: " << role.getFullName());
}
PrivilegeVector& currentPrivileges = _directPrivilegesForRole[role];
@@ -325,8 +326,9 @@ Status RoleGraph::removePrivilegeFromRole(const RoleName& role,
}
}
return Status(ErrorCodes::PrivilegeNotFound,
- str::stream() << "Role: " << role.getFullName() << " does not "
- "contain any privileges on "
+ str::stream() << "Role: " << role.getFullName()
+ << " does not "
+ "contain any privileges on "
<< privilegeToRemove.getResourcePattern().toString());
}
@@ -350,8 +352,8 @@ Status RoleGraph::removeAllPrivilegesFromRole(const RoleName& role) {
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot remove privileges from built-in role: "
- << role.getFullName());
+ str::stream()
+ << "Cannot remove privileges from built-in role: " << role.getFullName());
}
_directPrivilegesForRole[role].clear();
return Status::OK();
@@ -434,8 +436,8 @@ Status RoleGraph::_recomputePrivilegeDataHelper(const RoleName& startingRole,
if (!roleExists(currentRole)) {
return Status(ErrorCodes::RoleNotFound,
- str::stream() << "Role: " << currentRole.getFullName()
- << " does not exist");
+ str::stream()
+ << "Role: " << currentRole.getFullName() << " does not exist");
}
// Check for cycles
diff --git a/src/mongo/db/auth/role_graph_test.cpp b/src/mongo/db/auth/role_graph_test.cpp
index a2ed3dece7f..765d3d3c61c 100644
--- a/src/mongo/db/auth/role_graph_test.cpp
+++ b/src/mongo/db/auth/role_graph_test.cpp
@@ -196,7 +196,7 @@ TEST(RoleGraphTest, AddRemoveRoles) {
* |
* v
* D
- */
+ */
it = graph.getDirectSubordinates(roleA); // should be roleB and roleC, order doesn't matter
@@ -825,26 +825,22 @@ TEST(RoleGraphTest, AddRoleFromDocument) {
<< "dbA"
<< "collection"
<< "collA")
- << "actions"
- << BSON_ARRAY("insert"))),
+ << "actions" << BSON_ARRAY("insert"))),
BSON_ARRAY(BSON("resource" << BSON("db"
<< "dbB"
<< "collection"
<< "collB")
- << "actions"
- << BSON_ARRAY("insert"))
+ << "actions" << BSON_ARRAY("insert"))
<< BSON("resource" << BSON("db"
<< "dbC"
<< "collection"
<< "collC")
- << "actions"
- << BSON_ARRAY("compact"))),
+ << "actions" << BSON_ARRAY("compact"))),
BSON_ARRAY(BSON("resource" << BSON("db"
<< ""
<< "collection"
<< "")
- << "actions"
- << BSON_ARRAY("find"))),
+ << "actions" << BSON_ARRAY("find"))),
};
const BSONArray restrictions[] = {
@@ -922,33 +918,28 @@ TEST(RoleGraphTest, AddRoleFromDocumentWithRestricitonMerge) {
BSON_ARRAY(BSON("serverAddress" << BSON_ARRAY("127.0.0.1/8")));
RoleGraph graph;
- ASSERT_OK(graph.addRoleFromDocument(BSON("_id"
- << "dbA.roleA"
- << "role"
- << "roleA"
- << "db"
- << "dbA"
- << "privileges"
- << BSONArray()
- << "roles"
- << BSONArray()
- << "authenticationRestrictions"
- << roleARestrictions)));
- ASSERT_OK(graph.addRoleFromDocument(BSON("_id"
- << "dbB.roleB"
- << "role"
- << "roleB"
- << "db"
- << "dbB"
- << "privileges"
- << BSONArray()
- << "roles"
- << BSON_ARRAY(BSON("role"
- << "roleA"
- << "db"
- << "dbA"))
- << "authenticationRestrictions"
- << roleBRestrictions)));
+ ASSERT_OK(
+ graph.addRoleFromDocument(BSON("_id"
+ << "dbA.roleA"
+ << "role"
+ << "roleA"
+ << "db"
+ << "dbA"
+ << "privileges" << BSONArray() << "roles" << BSONArray()
+ << "authenticationRestrictions" << roleARestrictions)));
+ ASSERT_OK(
+ graph.addRoleFromDocument(BSON("_id"
+ << "dbB.roleB"
+ << "role"
+ << "roleB"
+ << "db"
+ << "dbB"
+ << "privileges" << BSONArray() << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "roleA"
+ << "db"
+ << "dbA"))
+ << "authenticationRestrictions" << roleBRestrictions)));
ASSERT_OK(graph.recomputePrivilegeData());
const auto A = graph.getDirectAuthenticationRestrictions(RoleName("roleA", "dbA"));
diff --git a/src/mongo/db/auth/role_graph_update.cpp b/src/mongo/db/auth/role_graph_update.cpp
index 02c89f36bd7..33ee260fa93 100644
--- a/src/mongo/db/auth/role_graph_update.cpp
+++ b/src/mongo/db/auth/role_graph_update.cpp
@@ -92,9 +92,7 @@ Status checkIdMatchesRoleName(const BSONElement& idElement, const RoleName& role
return Status(ErrorCodes::FailedToParse,
str::stream() << "Role document _id fields must be encoded as the string "
"dbname.rolename. Found "
- << idField
- << " for "
- << roleName.getFullName());
+ << idField << " for " << roleName.getFullName());
}
return Status::OK();
}
@@ -312,16 +310,13 @@ Status handleOplogCommand(RoleGraph* roleGraph, const BSONObj& cmdObj) {
if (cmdName == "createIndexes" &&
cmdObj.firstElement().str() == rolesCollectionNamespace.coll()) {
UnorderedFieldsBSONObjComparator instance;
- if (instance.evaluate(cmdObj == (BSON("createIndexes"
- << "system.roles"
- << "v"
- << 2
- << "name"
- << "role_1_db_1"
- << "key"
- << BSON("role" << 1 << "db" << 1)
- << "unique"
- << true)))) {
+ if (instance.evaluate(
+ cmdObj ==
+ (BSON("createIndexes"
+ << "system.roles"
+ << "v" << 2 << "name"
+ << "role_1_db_1"
+ << "key" << BSON("role" << 1 << "db" << 1) << "unique" << true)))) {
return Status::OK();
}
}
diff --git a/src/mongo/db/auth/sasl_authentication_session_test.cpp b/src/mongo/db/auth/sasl_authentication_session_test.cpp
index 97750182061..e849832d6ff 100644
--- a/src/mongo/db/auth/sasl_authentication_session_test.cpp
+++ b/src/mongo/db/auth/sasl_authentication_session_test.cpp
@@ -131,19 +131,17 @@ SaslConversation::SaslConversation(std::string mech)
<< scram::Secrets<SHA256Block>::generateCredentials(
"frim", saslGlobalParams.scramSHA256IterationCount.load()));
- ASSERT_OK(authManagerExternalState->insert(opCtx.get(),
- NamespaceString("admin.system.users"),
- BSON("_id"
- << "test.andy"
- << "user"
- << "andy"
- << "db"
- << "test"
- << "credentials"
- << creds
- << "roles"
- << BSONArray()),
- BSONObj()));
+ ASSERT_OK(
+ authManagerExternalState->insert(opCtx.get(),
+ NamespaceString("admin.system.users"),
+ BSON("_id"
+ << "test.andy"
+ << "user"
+ << "andy"
+ << "db"
+ << "test"
+ << "credentials" << creds << "roles" << BSONArray()),
+ BSONObj()));
}
void SaslConversation::assertConversationFailure() {
diff --git a/src/mongo/db/auth/sasl_mechanism_registry.cpp b/src/mongo/db/auth/sasl_mechanism_registry.cpp
index 2de9fb02fee..bfe479143d3 100644
--- a/src/mongo/db/auth/sasl_mechanism_registry.cpp
+++ b/src/mongo/db/auth/sasl_mechanism_registry.cpp
@@ -79,8 +79,7 @@ StatusWith<std::unique_ptr<ServerMechanismBase>> SASLServerMechanismRegistry::ge
return Status(ErrorCodes::BadValue,
str::stream() << "Unsupported mechanism '" << mechanismName
- << "' on authentication database '"
- << authenticationDatabase
+ << "' on authentication database '" << authenticationDatabase
<< "'");
}
@@ -147,9 +146,7 @@ bool SASLServerMechanismRegistry::_mechanismSupportedByConfig(StringData mechNam
namespace {
ServiceContext::ConstructorActionRegisterer SASLServerMechanismRegistryInitializer{
- "CreateSASLServerMechanismRegistry",
- {"EndStartupOptionStorage"},
- [](ServiceContext* service) {
+ "CreateSASLServerMechanismRegistry", {"EndStartupOptionStorage"}, [](ServiceContext* service) {
SASLServerMechanismRegistry::set(service,
std::make_unique<SASLServerMechanismRegistry>(
saslGlobalParams.authenticationMechanisms));
diff --git a/src/mongo/db/auth/sasl_mechanism_registry_test.cpp b/src/mongo/db/auth/sasl_mechanism_registry_test.cpp
index 6ca988bc9ae..b16df4ec3f8 100644
--- a/src/mongo/db/auth/sasl_mechanism_registry_test.cpp
+++ b/src/mongo/db/auth/sasl_mechanism_registry_test.cpp
@@ -27,11 +27,11 @@
* it in the license file.
*/
-#include "mongo/db/auth/sasl_mechanism_registry.h"
#include "mongo/crypto/mechanism_scram.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_manager_impl.h"
#include "mongo/db/auth/authz_manager_external_state_mock.h"
+#include "mongo/db/auth/sasl_mechanism_registry.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/service_context_test_fixture.h"
#include "mongo/unittest/unittest.h"
@@ -201,8 +201,7 @@ public:
<< "credentials"
<< BSON("SCRAM-SHA-256"
<< scram::Secrets<SHA256Block>::generateCredentials("sajack‍", 15000))
- << "roles"
- << BSONArray()),
+ << "roles" << BSONArray()),
BSONObj()));
@@ -214,10 +213,8 @@ public:
<< "sajack"
<< "db"
<< "$external"
- << "credentials"
- << BSON("external" << true)
- << "roles"
- << BSONArray()),
+ << "credentials" << BSON("external" << true)
+ << "roles" << BSONArray()),
BSONObj()));
internalSecurity.user = std::make_shared<User>(UserName("__system", "local"));
diff --git a/src/mongo/db/auth/sasl_options_init.cpp b/src/mongo/db/auth/sasl_options_init.cpp
index b83a94fa1c0..51ba683342b 100644
--- a/src/mongo/db/auth/sasl_options_init.cpp
+++ b/src/mongo/db/auth/sasl_options_init.cpp
@@ -95,4 +95,4 @@ MONGO_INITIALIZER_GENERAL(StoreSASLOptions, ("CoreOptions_Store"), ("EndStartupO
(InitializerContext* const context) {
return storeSASLOptions(moe::startupOptionsParsed);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/auth/sasl_plain_server_conversation.cpp b/src/mongo/db/auth/sasl_plain_server_conversation.cpp
index 5ef2cf6ac9e..0a88084dea3 100644
--- a/src/mongo/db/auth/sasl_plain_server_conversation.cpp
+++ b/src/mongo/db/auth/sasl_plain_server_conversation.cpp
@@ -60,8 +60,9 @@ StatusWith<bool> trySCRAM(const User::CredentialData& credentials, StringData pw
reinterpret_cast<const std::uint8_t*>(decodedSalt.c_str()) +
decodedSalt.size()),
scram.iterationCount));
- if (scram.storedKey != base64::encode(reinterpret_cast<const char*>(secrets.storedKey().data()),
- secrets.storedKey().size())) {
+ if (scram.storedKey !=
+ base64::encode(reinterpret_cast<const char*>(secrets.storedKey().data()),
+ secrets.storedKey().size())) {
return Status(ErrorCodes::AuthenticationFailed,
str::stream() << "Incorrect user name or password");
}
diff --git a/src/mongo/db/auth/sasl_plain_server_conversation.h b/src/mongo/db/auth/sasl_plain_server_conversation.h
index 26acd1e0aac..d3c6af215ce 100644
--- a/src/mongo/db/auth/sasl_plain_server_conversation.h
+++ b/src/mongo/db/auth/sasl_plain_server_conversation.h
@@ -49,8 +49,9 @@ public:
static constexpr bool isInternal = true;
bool canMakeMechanismForUser(const User* user) const final {
auto credentials = user->getCredentials();
- return !credentials.isExternal && (credentials.scram<SHA1Block>().isValid() ||
- credentials.scram<SHA256Block>().isValid());
+ return !credentials.isExternal &&
+ (credentials.scram<SHA1Block>().isValid() ||
+ credentials.scram<SHA256Block>().isValid());
}
};
diff --git a/src/mongo/db/auth/sasl_scram_server_conversation.cpp b/src/mongo/db/auth/sasl_scram_server_conversation.cpp
index 04a8e53798a..fc223097b4f 100644
--- a/src/mongo/db/auth/sasl_scram_server_conversation.cpp
+++ b/src/mongo/db/auth/sasl_scram_server_conversation.cpp
@@ -99,8 +99,7 @@ StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_fir
return Status(ErrorCodes::BadValue,
str::stream()
<< "Incorrect number of arguments for first SCRAM client message, got "
- << got
- << " expected at least 3");
+ << got << " expected at least 3");
};
/**
@@ -168,8 +167,7 @@ StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_fir
if (!authzId.empty() && ServerMechanismBase::_principalName != authzId) {
return Status(ErrorCodes::BadValue,
str::stream() << "SCRAM user name " << ServerMechanismBase::_principalName
- << " does not match authzid "
- << authzId);
+ << " does not match authzid " << authzId);
}
if (!str::startsWith(input[1], "r=") || input[1].size() < 6) {
@@ -267,7 +265,7 @@ StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_fir
* e=message
*
* NOTE: we are ignoring the channel binding part of the message
-**/
+ **/
template <typename Policy>
StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_secondStep(
OperationContext* opCtx, StringData inputData) {
@@ -275,8 +273,7 @@ StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_sec
return Status(ErrorCodes::BadValue,
str::stream()
<< "Incorrect number of arguments for second SCRAM client message, got "
- << got
- << " expected at least 3");
+ << got << " expected at least 3");
};
/**
@@ -322,9 +319,7 @@ StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_sec
return Status(ErrorCodes::BadValue,
str::stream()
<< "Unmatched SCRAM nonce received from client in second step, expected "
- << _nonce
- << " but received "
- << nonce);
+ << _nonce << " but received " << nonce);
}
// Do server side computations, compare storedKeys and generate client-final-message
diff --git a/src/mongo/db/auth/sasl_scram_test.cpp b/src/mongo/db/auth/sasl_scram_test.cpp
index b16d58b9288..2c9f5fc3acc 100644
--- a/src/mongo/db/auth/sasl_scram_test.cpp
+++ b/src/mongo/db/auth/sasl_scram_test.cpp
@@ -62,16 +62,10 @@ BSONObj generateSCRAMUserDocument(StringData username, StringData password) {
const auto sha256Cred =
scram::Secrets<SHA256Block>::generateCredentials(password.toString(), 15000);
return BSON("_id" << (str::stream() << database << "." << username).operator StringData()
- << AuthorizationManager::USER_NAME_FIELD_NAME
- << username
- << AuthorizationManager::USER_DB_FIELD_NAME
- << database
- << "credentials"
- << BSON("SCRAM-SHA-1" << sha1Cred << "SCRAM-SHA-256" << sha256Cred)
- << "roles"
- << BSONArray()
- << "privileges"
- << BSONArray());
+ << AuthorizationManager::USER_NAME_FIELD_NAME << username
+ << AuthorizationManager::USER_DB_FIELD_NAME << database << "credentials"
+ << BSON("SCRAM-SHA-1" << sha1Cred << "SCRAM-SHA-256" << sha256Cred) << "roles"
+ << BSONArray() << "privileges" << BSONArray());
}
std::string corruptEncodedPayload(const std::string& message,
@@ -302,7 +296,6 @@ TEST_F(SCRAMFixture, testServerStep1DoesNotIncludeNonceFromClientStep1) {
std::string::iterator nonceBegin = serverMessage.begin() + serverMessage.find("r=");
std::string::iterator nonceEnd = std::find(nonceBegin, serverMessage.end(), ',');
serverMessage = serverMessage.replace(nonceBegin, nonceEnd, "r=");
-
});
ASSERT_EQ(
SCRAMStepsResult(SaslTestState(SaslTestState::kClient, 2),
@@ -348,7 +341,6 @@ TEST_F(SCRAMFixture, testClientStep2GivesBadProof) {
std::string::iterator proofEnd = std::find(proofBegin, clientMessage.end(), ',');
clientMessage = clientMessage.replace(
proofBegin, proofEnd, corruptEncodedPayload(clientMessage, proofBegin, proofEnd));
-
});
ASSERT_EQ(SCRAMStepsResult(SaslTestState(SaslTestState::kServer, 2),
@@ -378,7 +370,6 @@ TEST_F(SCRAMFixture, testServerStep2GivesBadVerifier) {
encodedVerifier = corruptEncodedPayload(serverMessage, verifierBegin, verifierEnd);
serverMessage = serverMessage.replace(verifierBegin, verifierEnd, encodedVerifier);
-
});
auto result = runSteps(mutator);
diff --git a/src/mongo/db/auth/security_file.cpp b/src/mongo/db/auth/security_file.cpp
index 04efa479fbc..0dc4bfafe23 100644
--- a/src/mongo/db/auth/security_file.cpp
+++ b/src/mongo/db/auth/security_file.cpp
@@ -74,8 +74,8 @@ StatusWith<std::vector<std::string>> readSecurityFile(const std::string& filenam
// check obvious file errors
if (stat(filename.c_str(), &stats) == -1) {
return Status(ErrorCodes::InvalidPath,
- str::stream() << "Error reading file " << filename << ": "
- << strerror(errno));
+ str::stream()
+ << "Error reading file " << filename << ": " << strerror(errno));
}
#if !defined(_WIN32)
diff --git a/src/mongo/db/auth/user.cpp b/src/mongo/db/auth/user.cpp
index ce869ea28f5..96d1251c316 100644
--- a/src/mongo/db/auth/user.cpp
+++ b/src/mongo/db/auth/user.cpp
@@ -160,7 +160,7 @@ void User::addPrivileges(const PrivilegeVector& privileges) {
}
}
-void User::setRestrictions(RestrictionDocuments restrictions)& {
+void User::setRestrictions(RestrictionDocuments restrictions) & {
_restrictions = std::move(restrictions);
}
diff --git a/src/mongo/db/auth/user_document_parser.cpp b/src/mongo/db/auth/user_document_parser.cpp
index 1c5da7795be..8eb6dc7a94b 100644
--- a/src/mongo/db/auth/user_document_parser.cpp
+++ b/src/mongo/db/auth/user_document_parser.cpp
@@ -152,8 +152,8 @@ Status V2UserDocumentParser::checkValidUserDocument(const BSONObj& doc) const {
StringData userDBStr = userDBElement.valueStringData();
if (!NamespaceString::validDBName(userDBStr, NamespaceString::DollarInDbNameBehavior::Allow) &&
userDBStr != "$external") {
- return _badValue(str::stream() << "'" << userDBStr
- << "' is not a valid value for the db field.");
+ return _badValue(str::stream()
+ << "'" << userDBStr << "' is not a valid value for the db field.");
}
// Validate the "credentials" element
@@ -184,8 +184,8 @@ Status V2UserDocumentParser::checkValidUserDocument(const BSONObj& doc) const {
str::stream() << fieldName << " does not exist");
}
if (scramElement.type() != Object) {
- return _badValue(str::stream() << fieldName
- << " credential must be an object, if present");
+ return _badValue(str::stream()
+ << fieldName << " credential must be an object, if present");
}
return Status::OK();
};
diff --git a/src/mongo/db/auth/user_document_parser_test.cpp b/src/mongo/db/auth/user_document_parser_test.cpp
index af798f525a6..44721c6570d 100644
--- a/src/mongo/db/auth/user_document_parser_test.cpp
+++ b/src/mongo/db/auth/user_document_parser_test.cpp
@@ -83,23 +83,18 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "pwd"
<< "a"
- << "roles"
- << BSON_ARRAY("read"))));
+ << "roles" << BSON_ARRAY("read"))));
// Need name field
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< emptyArray)));
// Need source field
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< emptyArray)));
// Need credentials field
@@ -107,16 +102,14 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "roles"
- << emptyArray)));
+ << "roles" << emptyArray)));
// Need roles field
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials)));
+ << "credentials" << credentials)));
// authenticationRestricitons must be an array if it exists
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
@@ -131,11 +124,8 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
- << emptyArray
- << "authenticationRestrictions"
+ << "credentials" << credentials << "roles"
+ << emptyArray << "authenticationRestrictions"
<< emptyArray)));
// Empty roles arrays are OK
@@ -143,9 +133,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< emptyArray)));
// Need credentials of {external: true} if user's db is $external
@@ -153,19 +141,15 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "$external"
- << "credentials"
- << BSON("external" << true)
- << "roles"
- << emptyArray)));
+ << "credentials" << BSON("external" << true)
+ << "roles" << emptyArray)));
// Roles must be objects
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY("read"))));
// Role needs name
@@ -173,9 +157,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("db"
<< "dbA")))));
@@ -184,9 +166,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA")))));
@@ -196,9 +176,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA"
<< "db"
@@ -209,9 +187,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "roles"
+ << "credentials" << credentials << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA"
<< "db"
@@ -227,9 +203,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "authenticationRestrictions"
+ << "credentials" << credentials << "authenticationRestrictions"
<< BSON_ARRAY(BSON("clientSource" << BSON_ARRAY("127.0.0.1/8") << "serverAddress"
<< BSON_ARRAY("127.0.0.1/8")))
<< "roles"
@@ -243,9 +217,7 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials"
- << credentials
- << "extraData"
+ << "credentials" << credentials << "extraData"
<< BSON("foo"
<< "bar")
<< "roles"
@@ -318,13 +290,13 @@ TEST_F(V2UserDocumentParsing, V2CredentialExtraction) {
ASSERT(!user->getCredentials().isExternal);
// Make sure extracting valid combined credentials works
- ASSERT_OK(v2parser.initializeUserCredentialsFromUserDocument(user.get(),
- BSON("user"
- << "spencer"
- << "db"
- << "test"
- << "credentials"
- << credentials)));
+ ASSERT_OK(
+ v2parser.initializeUserCredentialsFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << credentials)));
ASSERT(user->getCredentials().scram_sha1.isValid());
ASSERT(user->getCredentials().scram_sha256.isValid());
ASSERT(!user->getCredentials().isExternal);
@@ -350,18 +322,18 @@ TEST_F(V2UserDocumentParsing, V2RoleExtraction) {
user.get()));
// V1-style roles arrays no longer work
- ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
- << "spencer"
- << "roles"
- << BSON_ARRAY("read")),
- user.get()));
+ ASSERT_NOT_OK(
+ v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles" << BSON_ARRAY("read")),
+ user.get()));
// Roles must have "db" field
- ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
- << "spencer"
- << "roles"
- << BSON_ARRAY(BSONObj())),
- user.get()));
+ ASSERT_NOT_OK(
+ v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles" << BSON_ARRAY(BSONObj())),
+ user.get()));
ASSERT_NOT_OK(
v2parser.initializeUserRolesFromUserDocument(BSON("user"
@@ -428,16 +400,14 @@ TEST_F(V2UserDocumentParsing, V2AuthenticationRestrictionsExtraction) {
ASSERT_OK(v2parser.initializeAuthenticationRestrictionsFromUserDocument(
BSON("user"
<< "spencer"
- << "authenticationRestrictions"
- << emptyArray),
+ << "authenticationRestrictions" << emptyArray),
user.get()));
// authenticationRestrictions must have at least one of "clientSource"/"serverAdddress" fields
ASSERT_NOT_OK(v2parser.initializeAuthenticationRestrictionsFromUserDocument(
BSON("user"
<< "spencer"
- << "authenticationRestrictions"
- << BSON_ARRAY(emptyObj)),
+ << "authenticationRestrictions" << BSON_ARRAY(emptyObj)),
user.get()));
// authenticationRestrictions must not have unexpected elements
diff --git a/src/mongo/db/auth/user_management_commands_parser.cpp b/src/mongo/db/auth/user_management_commands_parser.cpp
index 29f4bc53574..0d380888ac9 100644
--- a/src/mongo/db/auth/user_management_commands_parser.cpp
+++ b/src/mongo/db/auth/user_management_commands_parser.cpp
@@ -64,8 +64,9 @@ Status _checkNoExtraFields(const BSONObj& cmdObj,
StringData fieldName = (*iter).fieldNameStringData();
if (!isGenericArgument(fieldName) && !validFieldNames.count(fieldName.toString())) {
return Status(ErrorCodes::BadValue,
- str::stream() << "\"" << fieldName << "\" is not "
- "a valid argument to "
+ str::stream() << "\"" << fieldName
+ << "\" is not "
+ "a valid argument to "
<< cmdName);
}
}
@@ -175,8 +176,9 @@ Status parseRolePossessionManipulationCommands(const BSONObj& cmdObj,
if (!parsedRoleNames->size()) {
return Status(ErrorCodes::BadValue,
- str::stream() << cmdName << " command requires a non-empty "
- "\"roles\" array");
+ str::stream() << cmdName
+ << " command requires a non-empty "
+ "\"roles\" array");
}
return Status::OK();
}
@@ -634,8 +636,9 @@ Status parseAndValidateRolePrivilegeManipulationCommands(const BSONObj& cmdObj,
}
if (!parsedPrivileges->size()) {
return Status(ErrorCodes::BadValue,
- str::stream() << cmdName << " command requires a non-empty "
- "\"privileges\" array");
+ str::stream() << cmdName
+ << " command requires a non-empty "
+ "\"privileges\" array");
}
return Status::OK();
diff --git a/src/mongo/db/baton.cpp b/src/mongo/db/baton.cpp
index 29d973fe3e7..f648c3e13ed 100644
--- a/src/mongo/db/baton.cpp
+++ b/src/mongo/db/baton.cpp
@@ -80,7 +80,7 @@ public:
}
}
- _baton->schedule([ this, anchor = shared_from_this() ](Status status) {
+ _baton->schedule([this, anchor = shared_from_this()](Status status) {
_runJobs(stdx::unique_lock(_mutex), status);
});
}
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index d9481ded941..708f93ce0db 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -64,8 +64,8 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while truncating collection: "
- << collectionName);
+ str::stream()
+ << "Not primary while truncating collection: " << collectionName);
}
Database* db = autoDb.getDb();
@@ -91,8 +91,8 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam
repl::ReplicationCoordinator::modeNone) &&
collectionName.isOplog()) {
return Status(ErrorCodes::OplogOperationUnsupported,
- str::stream() << "Cannot truncate a live oplog while replicating: "
- << collectionName);
+ str::stream()
+ << "Cannot truncate a live oplog while replicating: " << collectionName);
}
BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
@@ -140,8 +140,7 @@ void cloneCollectionAsCapped(OperationContext* opCtx,
uassert(ErrorCodes::NamespaceExists,
str::stream() << "cloneCollectionAsCapped failed - destination collection " << toNss
- << " already exists. source collection: "
- << fromNss,
+ << " already exists. source collection: " << fromNss,
!db->getCollection(opCtx, toNss));
// create new collection
@@ -269,8 +268,7 @@ void convertToCapped(OperationContext* opCtx,
uassertStatusOKWithContext(tmpNameResult,
str::stream()
<< "Cannot generate temporary collection namespace to convert "
- << collectionName
- << " to a capped collection");
+ << collectionName << " to a capped collection");
const auto& longTmpName = tmpNameResult.getValue();
const auto shortTmpName = longTmpName.coll().toString();
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index 95c1381f91e..da12b2bb6b2 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -125,8 +125,7 @@ void openCatalog(OperationContext* opCtx, const MinVisibleTimestampMap& minVisib
fassert(40689,
{ErrorCodes::InternalError,
str::stream() << "failed to get index spec for index " << indexName
- << " in collection "
- << collNss.toString()});
+ << " in collection " << collNss.toString()});
}
auto indexesToRebuild = indexSpecs.getValue();
invariant(
@@ -171,8 +170,8 @@ void openCatalog(OperationContext* opCtx, const MinVisibleTimestampMap& minVisib
// Note that the collection name already includes the database component.
auto collection = db->getCollection(opCtx, collNss);
invariant(collection,
- str::stream() << "failed to get valid collection pointer for namespace "
- << collNss);
+ str::stream()
+ << "failed to get valid collection pointer for namespace " << collNss);
auto uuid = collection->uuid();
invariant(uuid);
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 569dbb1bd0a..f63c3f9deb8 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -134,8 +134,8 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
cmr.idx = coll->getIndexCatalog()->findIndexByName(opCtx, indexName);
if (!cmr.idx) {
return Status(ErrorCodes::IndexNotFound,
- str::stream() << "cannot find index " << indexName << " for ns "
- << nss);
+ str::stream()
+ << "cannot find index " << indexName << " for ns " << nss);
}
} else {
std::vector<const IndexDescriptor*> indexes;
@@ -145,17 +145,14 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
if (indexes.size() > 1) {
return Status(ErrorCodes::AmbiguousIndexKeyPattern,
str::stream() << "index keyPattern " << keyPattern << " matches "
- << indexes.size()
- << " indexes,"
+ << indexes.size() << " indexes,"
<< " must use index name. "
- << "Conflicting indexes:"
- << indexes[0]->infoObj()
- << ", "
- << indexes[1]->infoObj());
+ << "Conflicting indexes:" << indexes[0]->infoObj()
+ << ", " << indexes[1]->infoObj());
} else if (indexes.empty()) {
return Status(ErrorCodes::IndexNotFound,
- str::stream() << "cannot find index " << keyPattern << " for ns "
- << nss);
+ str::stream()
+ << "cannot find index " << keyPattern << " for ns " << nss);
}
cmr.idx = indexes[0];
diff --git a/src/mongo/db/catalog/collection_catalog.h b/src/mongo/db/catalog/collection_catalog.h
index 03f94006a7b..d42a94133e5 100644
--- a/src/mongo/db/catalog/collection_catalog.h
+++ b/src/mongo/db/catalog/collection_catalog.h
@@ -251,9 +251,8 @@ private:
mongo::stdx::unordered_map<CollectionUUID, NamespaceString, CollectionUUID::Hash>>
_shadowCatalog;
- using CollectionCatalogMap = mongo::stdx::unordered_map<CollectionUUID,
- std::unique_ptr<Collection>,
- CollectionUUID::Hash>;
+ using CollectionCatalogMap = mongo::stdx::
+ unordered_map<CollectionUUID, std::unique_ptr<Collection>, CollectionUUID::Hash>;
using OrderedCollectionMap = std::map<std::pair<std::string, CollectionUUID>, Collection*>;
using NamespaceCollectionMap = mongo::stdx::unordered_map<NamespaceString, Collection*>;
CollectionCatalogMap _catalog;
diff --git a/src/mongo/db/catalog/collection_catalog_test.cpp b/src/mongo/db/catalog/collection_catalog_test.cpp
index 32bf6ab8047..c033dcfcada 100644
--- a/src/mongo/db/catalog/collection_catalog_test.cpp
+++ b/src/mongo/db/catalog/collection_catalog_test.cpp
@@ -121,7 +121,7 @@ public:
void checkCollections(std::string dbName) {
unsigned long counter = 0;
- for (auto[orderedIt, catalogIt] = std::tuple{collsIterator(dbName), catalog.begin(dbName)};
+ for (auto [orderedIt, catalogIt] = std::tuple{collsIterator(dbName), catalog.begin(dbName)};
catalogIt != catalog.end() && orderedIt != collsIteratorEnd(dbName);
++catalogIt, ++orderedIt) {
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index c3e886d27eb..867cc6fdf3b 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -93,9 +93,7 @@ StatusWith<CompactStats> compactCollection(OperationContext* opCtx,
return StatusWith<CompactStats>(
ErrorCodes::CannotCreateIndex,
str::stream() << "Cannot compact collection due to invalid index " << spec
- << ": "
- << keyStatus.reason()
- << " For more info see"
+ << ": " << keyStatus.reason() << " For more info see"
<< " http://dochub.mongodb.org/core/index-validation");
}
indexSpecs.push_back(spec);
diff --git a/src/mongo/db/catalog/collection_compact.h b/src/mongo/db/catalog/collection_compact.h
index b17b0ec3886..c3fab5e37ba 100644
--- a/src/mongo/db/catalog/collection_compact.h
+++ b/src/mongo/db/catalog/collection_compact.h
@@ -36,9 +36,9 @@
namespace mongo {
/**
- * Compacts collection.
- * See record_store.h for CompactStats and CompactOptions definitions.
- */
+ * Compacts collection.
+ * See record_store.h for CompactStats and CompactOptions definitions.
+ */
StatusWith<CompactStats> compactCollection(OperationContext* opCtx,
Collection* collection,
const CompactOptions* options);
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index a2aa0a917a4..1711a0a9242 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -330,10 +330,8 @@ StatusWithMatchExpression CollectionImpl::parseValidator(
if (ns().isOnInternalDb()) {
return {ErrorCodes::InvalidOptions,
str::stream() << "Document validators are not allowed on collection " << ns().ns()
- << (_uuid ? " with UUID " + _uuid->toString() : "")
- << " in the "
- << ns().db()
- << " internal database"};
+ << (_uuid ? " with UUID " + _uuid->toString() : "") << " in the "
+ << ns().db() << " internal database"};
}
boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, _collator.get()));
@@ -423,8 +421,9 @@ Status CollectionImpl::insertDocuments(OperationContext* opCtx,
const auto firstIdElem = data["first_id"];
// If the failpoint specifies no collection or matches the existing one, hang.
if ((!collElem || _ns.ns() == collElem.str()) &&
- (!firstIdElem || (begin != end && firstIdElem.type() == mongo::String &&
- begin->doc["_id"].str() == firstIdElem.str()))) {
+ (!firstIdElem ||
+ (begin != end && firstIdElem.type() == mongo::String &&
+ begin->doc["_id"].str() == firstIdElem.str()))) {
string whenFirst =
firstIdElem ? (string(" when first _id is ") + firstIdElem.str()) : "";
while (MONGO_FAIL_POINT(hangAfterCollectionInserts)) {
@@ -680,9 +679,7 @@ RecordId CollectionImpl::updateDocument(OperationContext* opCtx,
if (_recordStore->isCapped() && oldSize != newDoc.objsize())
uasserted(ErrorCodes::CannotGrowDocumentInCappedNamespace,
str::stream() << "Cannot change the size of a document in a capped collection: "
- << oldSize
- << " != "
- << newDoc.objsize());
+ << oldSize << " != " << newDoc.objsize());
args->preImageDoc = oldDoc.value().getOwned();
@@ -857,11 +854,9 @@ Status CollectionImpl::setValidator(OperationContext* opCtx, BSONObj validatorDo
DurableCatalog::get(opCtx)->updateValidator(
opCtx, ns(), validatorDoc, getValidationLevel(), getValidationAction());
- opCtx->recoveryUnit()->onRollback([
- this,
- oldValidator = std::move(_validator),
- oldValidatorDoc = std::move(_validatorDoc)
- ]() mutable {
+ opCtx->recoveryUnit()->onRollback([this,
+ oldValidator = std::move(_validator),
+ oldValidatorDoc = std::move(_validatorDoc)]() mutable {
this->_validator = std::move(oldValidator);
this->_validatorDoc = std::move(oldValidatorDoc);
});
@@ -937,13 +932,11 @@ Status CollectionImpl::updateValidator(OperationContext* opCtx,
StringData newAction) {
invariant(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
- opCtx->recoveryUnit()->onRollback([
- this,
- oldValidator = std::move(_validator),
- oldValidatorDoc = std::move(_validatorDoc),
- oldValidationLevel = _validationLevel,
- oldValidationAction = _validationAction
- ]() mutable {
+ opCtx->recoveryUnit()->onRollback([this,
+ oldValidator = std::move(_validator),
+ oldValidatorDoc = std::move(_validatorDoc),
+ oldValidationLevel = _validationLevel,
+ oldValidationAction = _validationAction]() mutable {
this->_validator = std::move(oldValidator);
this->_validatorDoc = std::move(oldValidatorDoc);
this->_validationLevel = oldValidationLevel;
@@ -1276,10 +1269,8 @@ void addErrorIfUnequal(T stored, T cached, StringData name, ValidateResults* res
if (stored != cached) {
results->valid = false;
results->errors.push_back(str::stream() << "stored value for " << name
- << " does not match cached value: "
- << stored
- << " != "
- << cached);
+ << " does not match cached value: " << stored
+ << " != " << cached);
}
}
@@ -1383,8 +1374,8 @@ Status CollectionImpl::validate(OperationContext* opCtx,
opCtx, _indexCatalog.get(), &indexNsResultsMap, &keysPerIndex, level, results, output);
if (!results->valid) {
- log(LogComponent::kIndex) << "validating collection " << ns() << " failed"
- << uuidString;
+ log(LogComponent::kIndex)
+ << "validating collection " << ns() << " failed" << uuidString;
} else {
log(LogComponent::kIndex) << "validated collection " << ns() << uuidString;
}
diff --git a/src/mongo/db/catalog/collection_options.cpp b/src/mongo/db/catalog/collection_options.cpp
index a156754bf14..1732dfef374 100644
--- a/src/mongo/db/catalog/collection_options.cpp
+++ b/src/mongo/db/catalog/collection_options.cpp
@@ -256,9 +256,9 @@ Status CollectionOptions::parse(const BSONObj& options, ParseKind kind) {
idIndex = std::move(tempIdIndex);
} else if (!createdOn24OrEarlier && !mongo::isGenericArgument(fieldName)) {
return Status(ErrorCodes::InvalidOptions,
- str::stream() << "The field '" << fieldName
- << "' is not a valid collection option. Options: "
- << options);
+ str::stream()
+ << "The field '" << fieldName
+ << "' is not a valid collection option. Options: " << options);
}
}
@@ -414,4 +414,4 @@ bool CollectionOptions::matchesStorageOptions(const CollectionOptions& other,
return true;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_options.h b/src/mongo/db/catalog/collection_options.h
index 2558df0f5ea..6d63c00441d 100644
--- a/src/mongo/db/catalog/collection_options.h
+++ b/src/mongo/db/catalog/collection_options.h
@@ -143,4 +143,4 @@ struct CollectionOptions {
// The aggregation pipeline that defines this view.
BSONObj pipeline;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index 1d15a592b1d..36154c5b9b0 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -236,11 +236,10 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
<< " - existing collection with conflicting UUID " << uuid
<< " is in a drop-pending state: " << *currentName;
return Result(Status(ErrorCodes::NamespaceExists,
- str::stream() << "existing collection "
- << currentName->toString()
- << " with conflicting UUID "
- << uuid.toString()
- << " is in a drop-pending state."));
+ str::stream()
+ << "existing collection " << currentName->toString()
+ << " with conflicting UUID " << uuid.toString()
+ << " is in a drop-pending state."));
}
// In the case of oplog replay, a future command may have created or renamed a
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index 7f7c570d6c0..943350b0c89 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -123,9 +123,7 @@ Database* DatabaseHolderImpl::openDb(OperationContext* opCtx, StringData ns, boo
auto duplicates = _getNamesWithConflictingCasing_inlock(dbname);
uassert(ErrorCodes::DatabaseDifferCase,
str::stream() << "db already exists with different case already have: ["
- << *duplicates.cbegin()
- << "] trying to create ["
- << dbname.toString()
+ << *duplicates.cbegin() << "] trying to create [" << dbname.toString()
<< "]",
duplicates.empty());
@@ -241,8 +239,8 @@ void DatabaseHolderImpl::closeAll(OperationContext* opCtx) {
// It is the caller's responsibility to ensure that no index builds are active in the
// database.
invariant(!coll->getIndexCatalog()->haveAnyIndexesInProgress(),
- str::stream() << "An index is building on collection '" << coll->ns()
- << "'.");
+ str::stream()
+ << "An index is building on collection '" << coll->ns() << "'.");
}
dbs.insert(i->first);
}
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 3371914f7c0..d3e1bebdff2 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -366,8 +366,7 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx,
auto numIndexesInProgress = collection->getIndexCatalog()->numIndexesInProgress(opCtx);
massert(ErrorCodes::BackgroundOperationInProgressForNamespace,
str::stream() << "cannot drop collection " << nss << " (" << uuidString << ") when "
- << numIndexesInProgress
- << " index builds in progress.",
+ << numIndexesInProgress << " index builds in progress.",
numIndexesInProgress == 0);
audit::logDropCollection(&cc(), nss.toString());
@@ -566,9 +565,7 @@ void DatabaseImpl::_checkCanCreateCollection(OperationContext* opCtx,
// This check only applies for actual collections, not indexes or other types of ns.
uassert(17381,
str::stream() << "fully qualified namespace " << nss << " is too long "
- << "(max is "
- << NamespaceString::MaxNsCollectionLen
- << " bytes)",
+ << "(max is " << NamespaceString::MaxNsCollectionLen << " bytes)",
!nss.isNormal() || nss.size() <= NamespaceString::MaxNsCollectionLen);
uassert(17316, "cannot create a blank collection", nss.coll() > 0);
@@ -623,8 +620,8 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
bool generatedUUID = false;
if (!optionsWithUUID.uuid) {
if (!canAcceptWrites) {
- std::string msg = str::stream() << "Attempted to create a new collection " << nss
- << " without a UUID";
+ std::string msg = str::stream()
+ << "Attempted to create a new collection " << nss << " without a UUID";
severe() << msg;
uasserted(ErrorCodes::InvalidOptions, msg);
} else {
@@ -726,8 +723,7 @@ StatusWith<NamespaceString> DatabaseImpl::makeUniqueCollectionNamespace(
"model for collection name "
<< collectionNameModel
<< " must contain at least one percent sign within first "
- << maxModelLength
- << " characters.");
+ << maxModelLength << " characters.");
}
if (!_uniqueCollectionNamespacePseudoRandom) {
@@ -766,9 +762,7 @@ StatusWith<NamespaceString> DatabaseImpl::makeUniqueCollectionNamespace(
return Status(
ErrorCodes::NamespaceExists,
str::stream() << "Cannot generate collection name for temporary collection with model "
- << collectionNameModel
- << " after "
- << numGenerationAttempts
+ << collectionNameModel << " after " << numGenerationAttempts
<< " attempts due to namespace conflicts with existing collections.");
}
@@ -897,8 +891,7 @@ Status DatabaseImpl::userCreateNS(OperationContext* opCtx,
} else {
invariant(createCollection(opCtx, nss, collectionOptions, createDefaultIndexes, idIndex),
str::stream() << "Collection creation failed after validating options: " << nss
- << ". Options: "
- << collectionOptions.toBSON());
+ << ". Options: " << collectionOptions.toBSON());
}
return Status::OK();
diff --git a/src/mongo/db/catalog/database_test.cpp b/src/mongo/db/catalog/database_test.cpp
index 81d2dcf777d..9a919eeabac 100644
--- a/src/mongo/db/catalog/database_test.cpp
+++ b/src/mongo/db/catalog/database_test.cpp
@@ -158,13 +158,13 @@ TEST_F(DatabaseTest, CreateCollectionThrowsExceptionWhenDatabaseIsInADropPending
// tests.
ON_BLOCK_EXIT([&wuow] { wuow.commit(); });
- ASSERT_THROWS_CODE_AND_WHAT(
- db->createCollection(_opCtx.get(), _nss),
- AssertionException,
- ErrorCodes::DatabaseDropPending,
- (StringBuilder() << "Cannot create collection " << _nss
- << " - database is in the process of being dropped.")
- .stringData());
+ ASSERT_THROWS_CODE_AND_WHAT(db->createCollection(_opCtx.get(), _nss),
+ AssertionException,
+ ErrorCodes::DatabaseDropPending,
+ (StringBuilder()
+ << "Cannot create collection " << _nss
+ << " - database is in the process of being dropped.")
+ .stringData());
});
}
@@ -297,11 +297,10 @@ void _testDropCollectionThrowsExceptionIfThereAreIndexesInProgress(OperationCont
auto indexCatalog = collection->getIndexCatalog();
ASSERT_EQUALS(indexCatalog->numIndexesInProgress(opCtx), 0);
- auto indexInfoObj = BSON(
- "v" << int(IndexDescriptor::kLatestIndexVersion) << "key" << BSON("a" << 1) << "name"
- << "a_1"
- << "ns"
- << nss.ns());
+ auto indexInfoObj = BSON("v" << int(IndexDescriptor::kLatestIndexVersion) << "key"
+ << BSON("a" << 1) << "name"
+ << "a_1"
+ << "ns" << nss.ns());
auto indexBuildBlock =
indexCatalog->createIndexBuildBlock(opCtx, indexInfoObj, IndexBuildMethod::kHybrid);
@@ -418,8 +417,7 @@ TEST_F(DatabaseTest, MakeUniqueCollectionNamespaceReplacesPercentSignsWithRandom
auto nss1 = unittest::assertGet(db->makeUniqueCollectionNamespace(_opCtx.get(), model));
if (!re.FullMatch(nss1.ns())) {
FAIL((StringBuilder() << "First generated namespace \"" << nss1.ns()
- << "\" does not match reqular expression \""
- << re.pattern()
+ << "\" does not match reqular expression \"" << re.pattern()
<< "\"")
.str());
}
@@ -436,8 +434,7 @@ TEST_F(DatabaseTest, MakeUniqueCollectionNamespaceReplacesPercentSignsWithRandom
auto nss2 = unittest::assertGet(db->makeUniqueCollectionNamespace(_opCtx.get(), model));
if (!re.FullMatch(nss2.ns())) {
FAIL((StringBuilder() << "Second generated namespace \"" << nss2.ns()
- << "\" does not match reqular expression \""
- << re.pattern()
+ << "\" does not match reqular expression \"" << re.pattern()
<< "\"")
.str());
}
@@ -530,28 +527,28 @@ TEST_F(DatabaseTest, AutoGetCollectionForReadCommandSucceedsWithDeadlineMin) {
}
TEST_F(DatabaseTest, CreateCollectionProhibitsReplicatedCollectionsWithoutIdIndex) {
- writeConflictRetry(
- _opCtx.get(),
- "testÇreateCollectionProhibitsReplicatedCollectionsWithoutIdIndex",
- _nss.ns(),
- [this] {
- AutoGetOrCreateDb autoDb(_opCtx.get(), _nss.db(), MODE_X);
- auto db = autoDb.getDb();
- ASSERT_TRUE(db);
-
- WriteUnitOfWork wuow(_opCtx.get());
-
- CollectionOptions options;
- options.setNoIdIndex();
-
- ASSERT_THROWS_CODE_AND_WHAT(
- db->createCollection(_opCtx.get(), _nss, options),
- AssertionException,
- 50001,
- (StringBuilder() << "autoIndexId:false is not allowed for collection " << _nss
- << " because it can be replicated")
- .stringData());
- });
+ writeConflictRetry(_opCtx.get(),
+ "testÇreateCollectionProhibitsReplicatedCollectionsWithoutIdIndex",
+ _nss.ns(),
+ [this] {
+ AutoGetOrCreateDb autoDb(_opCtx.get(), _nss.db(), MODE_X);
+ auto db = autoDb.getDb();
+ ASSERT_TRUE(db);
+
+ WriteUnitOfWork wuow(_opCtx.get());
+
+ CollectionOptions options;
+ options.setNoIdIndex();
+
+ ASSERT_THROWS_CODE_AND_WHAT(
+ db->createCollection(_opCtx.get(), _nss, options),
+ AssertionException,
+ 50001,
+ (StringBuilder()
+ << "autoIndexId:false is not allowed for collection " << _nss
+ << " because it can be replicated")
+ .stringData());
+ });
}
diff --git a/src/mongo/db/catalog/document_validation.h b/src/mongo/db/catalog/document_validation.h
index 27a7969c6d6..e27dfb11b66 100644
--- a/src/mongo/db/catalog/document_validation.h
+++ b/src/mongo/db/catalog/document_validation.h
@@ -84,4 +84,4 @@ public:
private:
boost::optional<DisableDocumentValidation> _documentValidationDisabler;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index 2602c6e59d5..036e511fb7c 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -275,12 +275,11 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
}
if (!result.status.isOK()) {
- return result.status.withContext(
- str::stream() << "dropDatabase " << dbName << " failed waiting for "
- << numCollectionsToDrop
- << " collection drop(s) (most recent drop optime: "
- << awaitOpTime.toString()
- << ") to replicate.");
+ return result.status.withContext(str::stream()
+ << "dropDatabase " << dbName << " failed waiting for "
+ << numCollectionsToDrop
+ << " collection drop(s) (most recent drop optime: "
+ << awaitOpTime.toString() << ") to replicate.");
}
log() << "dropDatabase " << dbName << " - successfully dropped " << numCollectionsToDrop
@@ -301,8 +300,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
return Status(ErrorCodes::NamespaceNotFound,
str::stream() << "Could not drop database " << dbName
<< " because it does not exist after dropping "
- << numCollectionsToDrop
- << " collection(s).");
+ << numCollectionsToDrop << " collection(s).");
}
bool userInitiatedWritesAndNotPrimary =
@@ -310,12 +308,11 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::PrimarySteppedDown,
- str::stream() << "Could not drop database " << dbName
- << " because we transitioned from PRIMARY to "
- << replCoord->getMemberState().toString()
- << " while waiting for "
- << numCollectionsToDrop
- << " pending collection drop(s).");
+ str::stream()
+ << "Could not drop database " << dbName
+ << " because we transitioned from PRIMARY to "
+ << replCoord->getMemberState().toString() << " while waiting for "
+ << numCollectionsToDrop << " pending collection drop(s).");
}
// _finishDropDatabase creates its own scope guard to ensure drop-pending is unset.
diff --git a/src/mongo/db/catalog/drop_database_test.cpp b/src/mongo/db/catalog/drop_database_test.cpp
index dcc43045646..16fc13ecb44 100644
--- a/src/mongo/db/catalog/drop_database_test.cpp
+++ b/src/mongo/db/catalog/drop_database_test.cpp
@@ -432,10 +432,10 @@ TEST_F(DropDatabaseTest,
auto status = dropDatabase(_opCtx.get(), _nss.db().toString());
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, status);
- ASSERT_EQUALS(
- status.reason(),
- std::string(str::stream() << "Could not drop database " << _nss.db()
- << " because it does not exist after dropping 1 collection(s)."));
+ ASSERT_EQUALS(status.reason(),
+ std::string(str::stream()
+ << "Could not drop database " << _nss.db()
+ << " because it does not exist after dropping 1 collection(s)."));
ASSERT_FALSE(AutoGetDb(_opCtx.get(), _nss.db(), MODE_X).getDb());
}
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index ef6074eaaff..2e3f2383c14 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -105,7 +105,6 @@ Status wrappedRun(OperationContext* opCtx,
collection->uuid(),
desc->indexName(),
desc->infoObj());
-
});
anObjBuilder->append("msg", "non-_id indexes dropped for collection");
@@ -121,16 +120,14 @@ Status wrappedRun(OperationContext* opCtx,
opCtx, indexElem.embeddedObject(), false, &indexes);
if (indexes.empty()) {
return Status(ErrorCodes::IndexNotFound,
- str::stream() << "can't find index with key: "
- << indexElem.embeddedObject());
+ str::stream()
+ << "can't find index with key: " << indexElem.embeddedObject());
} else if (indexes.size() > 1) {
return Status(ErrorCodes::AmbiguousIndexKeyPattern,
- str::stream() << indexes.size() << " indexes found for key: "
- << indexElem.embeddedObject()
+ str::stream() << indexes.size()
+ << " indexes found for key: " << indexElem.embeddedObject()
<< ", identify by name instead."
- << " Conflicting indexes: "
- << indexes[0]->infoObj()
- << ", "
+ << " Conflicting indexes: " << indexes[0]->infoObj() << ", "
<< indexes[1]->infoObj());
}
@@ -166,23 +163,19 @@ Status wrappedRun(OperationContext* opCtx,
for (auto indexNameElem : indexElem.Array()) {
if (indexNameElem.type() != String) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "dropIndexes " << collection->ns() << " ("
- << collection->uuid()
- << ") failed to drop multiple indexes "
- << indexElem.toString(false)
- << ": index name must be a string");
+ str::stream()
+ << "dropIndexes " << collection->ns() << " ("
+ << collection->uuid() << ") failed to drop multiple indexes "
+ << indexElem.toString(false) << ": index name must be a string");
}
auto indexToDelete = indexNameElem.String();
auto status = dropIndexByName(opCtx, collection, indexCatalog, indexToDelete);
if (!status.isOK()) {
- return status.withContext(str::stream() << "dropIndexes " << collection->ns()
- << " ("
- << collection->uuid()
- << ") failed to drop multiple indexes "
- << indexElem.toString(false)
- << ": "
- << indexToDelete);
+ return status.withContext(
+ str::stream() << "dropIndexes " << collection->ns() << " ("
+ << collection->uuid() << ") failed to drop multiple indexes "
+ << indexElem.toString(false) << ": " << indexToDelete);
}
}
diff --git a/src/mongo/db/catalog/health_log.cpp b/src/mongo/db/catalog/health_log.cpp
index 0bd4171c262..2703dee4aa1 100644
--- a/src/mongo/db/catalog/health_log.cpp
+++ b/src/mongo/db/catalog/health_log.cpp
@@ -48,7 +48,7 @@ CollectionOptions getOptions(void) {
options.cappedSize = kDefaultHealthlogSize;
return options;
}
-}
+} // namespace
HealthLog::HealthLog() : _writer(nss, getOptions(), kMaxBufferSize) {}
@@ -78,4 +78,4 @@ bool HealthLog::log(const HealthLogEntry& entry) {
}
const NamespaceString HealthLog::nss("local", "system.healthlog");
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/health_log.h b/src/mongo/db/catalog/health_log.h
index 2b312f741fa..ba2bcbf440a 100644
--- a/src/mongo/db/catalog/health_log.h
+++ b/src/mongo/db/catalog/health_log.h
@@ -91,4 +91,4 @@ public:
private:
DeferredWriter _writer;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/index_build_block.cpp b/src/mongo/db/catalog/index_build_block.cpp
index adee836bfa5..c803d4aa600 100644
--- a/src/mongo/db/catalog/index_build_block.cpp
+++ b/src/mongo/db/catalog/index_build_block.cpp
@@ -110,7 +110,7 @@ Status IndexCatalogImpl::IndexBuildBlock::init(OperationContext* opCtx, Collecti
if (isBackgroundIndex) {
opCtx->recoveryUnit()->onCommit(
- [ entry = _entry, coll = collection ](boost::optional<Timestamp> commitTime) {
+ [entry = _entry, coll = collection](boost::optional<Timestamp> commitTime) {
// This will prevent the unfinished index from being visible on index iterators.
if (commitTime) {
entry->setMinimumVisibleSnapshot(commitTime.get());
@@ -169,7 +169,7 @@ void IndexCatalogImpl::IndexBuildBlock::success(OperationContext* opCtx, Collect
collection->indexBuildSuccess(opCtx, _entry);
opCtx->recoveryUnit()->onCommit(
- [ opCtx, entry = _entry, coll = collection ](boost::optional<Timestamp> commitTime) {
+ [opCtx, entry = _entry, coll = collection](boost::optional<Timestamp> commitTime) {
// Note: this runs after the WUOW commits but before we release our X lock on the
// collection. This means that any snapshot created after this must include the full
// index, and no one can try to read this index before we set the visibility.
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index 197cf85bb70..ab263abaa42 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -86,8 +86,7 @@ Status IndexBuildsManager::setUpIndexBuild(OperationContext* opCtx,
const auto& nss = collection->ns();
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X),
str::stream() << "Unable to set up index build " << buildUUID << ": collection "
- << nss.ns()
- << " is not locked in exclusive mode.");
+ << nss.ns() << " is not locked in exclusive mode.");
auto builder = _getBuilder(buildUUID);
diff --git a/src/mongo/db/catalog/index_builds_manager_test.cpp b/src/mongo/db/catalog/index_builds_manager_test.cpp
index 3ecb5dca2a1..df5e50d244c 100644
--- a/src/mongo/db/catalog/index_builds_manager_test.cpp
+++ b/src/mongo/db/catalog/index_builds_manager_test.cpp
@@ -76,8 +76,7 @@ std::vector<BSONObj> makeSpecs(const NamespaceString& nss, std::vector<std::stri
std::vector<BSONObj> indexSpecs;
for (auto keyName : keys) {
indexSpecs.push_back(BSON("ns" << nss.toString() << "v" << 2 << "key" << BSON(keyName << 1)
- << "name"
- << (keyName + "_1")));
+ << "name" << (keyName + "_1")));
}
return indexSpecs;
}
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
index f10e51daa03..aa711498a71 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
@@ -317,8 +317,10 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
fassert(31164, status);
indexMetadataHasChanged = DurableCatalog::get(opCtx)->setIndexIsMultikey(
opCtx, _ns, _descriptor->indexName(), paths);
- opCtx->recoveryUnit()->onCommit([onMultikeyCommitFn, indexMetadataHasChanged](
- boost::optional<Timestamp>) { onMultikeyCommitFn(indexMetadataHasChanged); });
+ opCtx->recoveryUnit()->onCommit(
+ [onMultikeyCommitFn, indexMetadataHasChanged](boost::optional<Timestamp>) {
+ onMultikeyCommitFn(indexMetadataHasChanged);
+ });
wuow.commit();
});
} else {
@@ -326,8 +328,10 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
opCtx, _ns, _descriptor->indexName(), paths);
}
- opCtx->recoveryUnit()->onCommit([onMultikeyCommitFn, indexMetadataHasChanged](
- boost::optional<Timestamp>) { onMultikeyCommitFn(indexMetadataHasChanged); });
+ opCtx->recoveryUnit()->onCommit(
+ [onMultikeyCommitFn, indexMetadataHasChanged](boost::optional<Timestamp>) {
+ onMultikeyCommitFn(indexMetadataHasChanged);
+ });
// Within a multi-document transaction, reads should be able to see the effect of previous
// writes done within that transaction. If a previous write in a transaction has set the index
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 670622a17be..4e37bffe820 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -173,17 +173,16 @@ IndexCatalogEntry* IndexCatalogImpl::_setupInMemoryStructures(
}
if (!initFromDisk) {
- opCtx->recoveryUnit()->onRollback(
- [ this, opCtx, isReadyIndex, descriptor = descriptorPtr ] {
- // Need to preserve indexName as descriptor no longer exists after remove().
- const std::string indexName = descriptor->indexName();
- if (isReadyIndex) {
- _readyIndexes.remove(descriptor);
- } else {
- _buildingIndexes.remove(descriptor);
- }
- _collection->infoCache()->droppedIndex(opCtx, indexName);
- });
+ opCtx->recoveryUnit()->onRollback([this, opCtx, isReadyIndex, descriptor = descriptorPtr] {
+ // Need to preserve indexName as descriptor no longer exists after remove().
+ const std::string indexName = descriptor->indexName();
+ if (isReadyIndex) {
+ _readyIndexes.remove(descriptor);
+ } else {
+ _buildingIndexes.remove(descriptor);
+ }
+ _collection->infoCache()->droppedIndex(opCtx, indexName);
+ });
}
return save;
@@ -207,8 +206,7 @@ Status IndexCatalogImpl::checkUnfinished() const {
return Status(ErrorCodes::InternalError,
str::stream() << "IndexCatalog has left over indexes that must be cleared"
- << " ns: "
- << _collection->ns());
+ << " ns: " << _collection->ns());
}
std::unique_ptr<IndexCatalog::IndexIterator> IndexCatalogImpl::getIndexIterator(
@@ -244,8 +242,7 @@ string IndexCatalogImpl::_getAccessMethodName(const BSONObj& keyPattern) const {
// supports an index plugin unsupported by this version.
uassert(17197,
str::stream() << "Invalid index type '" << pluginName << "' "
- << "in index "
- << keyPattern,
+ << "in index " << keyPattern,
IndexNames::isKnownName(pluginName));
return pluginName;
@@ -432,10 +429,8 @@ StatusWith<BSONObj> IndexCatalogImpl::createIndexOnEmptyCollection(OperationCont
invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns(), MODE_X));
invariant(_collection->numRecords(opCtx) == 0,
str::stream() << "Collection must be empty. Collection: " << _collection->ns()
- << " UUID: "
- << _collection->uuid()
- << " Count: "
- << _collection->numRecords(opCtx));
+ << " UUID: " << _collection->uuid()
+ << " Count: " << _collection->numRecords(opCtx));
_checkMagic();
Status status = checkUnfinished();
@@ -545,8 +540,7 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
if (!IndexDescriptor::isIndexVersionSupported(indexVersion)) {
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "this version of mongod cannot build new indexes "
- << "of version number "
- << static_cast<int>(indexVersion));
+ << "of version number " << static_cast<int>(indexVersion));
}
if (nss.isOplog())
@@ -563,9 +557,7 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "the \"ns\" field of the index spec '"
<< specNamespace.valueStringData()
- << "' does not match the collection name '"
- << nss
- << "'");
+ << "' does not match the collection name '" << nss << "'");
}
// logical name of the index
@@ -595,16 +587,15 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
if (indexNamespace.size() > NamespaceString::MaxNsLen)
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "namespace name generated from index name \""
- << indexNamespace
- << "\" is too long (127 byte max)");
+ << indexNamespace << "\" is too long (127 byte max)");
}
const BSONObj key = spec.getObjectField("key");
const Status keyStatus = index_key_validate::validateKeyPattern(key, indexVersion);
if (!keyStatus.isOK()) {
return Status(ErrorCodes::CannotCreateIndex,
- str::stream() << "bad index key pattern " << key << ": "
- << keyStatus.reason());
+ str::stream()
+ << "bad index key pattern " << key << ": " << keyStatus.reason());
}
const string pluginName = IndexNames::findPluginName(key);
@@ -633,18 +624,16 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
if (static_cast<IndexVersion>(vElt.numberInt()) < IndexVersion::kV2) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Index version " << vElt.fieldNameStringData() << "="
- << vElt.numberInt()
- << " does not support the '"
- << collationElement.fieldNameStringData()
- << "' option"};
+ << vElt.numberInt() << " does not support the '"
+ << collationElement.fieldNameStringData() << "' option"};
}
if ((pluginName != IndexNames::BTREE) && (pluginName != IndexNames::GEO_2DSPHERE) &&
(pluginName != IndexNames::HASHED) && (pluginName != IndexNames::WILDCARD)) {
return Status(ErrorCodes::CannotCreateIndex,
- str::stream() << "Index type '" << pluginName
- << "' does not support collation: "
- << collator->getSpec().toBSON());
+ str::stream()
+ << "Index type '" << pluginName
+ << "' does not support collation: " << collator->getSpec().toBSON());
}
}
@@ -665,8 +654,8 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
if (spec.getField("expireAfterSeconds")) {
return Status(ErrorCodes::CannotCreateIndex,
- str::stream() << "Index type '" << pluginName
- << "' cannot be a TTL index");
+ str::stream()
+ << "Index type '" << pluginName << "' cannot be a TTL index");
}
}
@@ -776,21 +765,18 @@ Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx,
<< "An index with the same key pattern, but a different "
<< "collation already exists with the same name. Try again with "
<< "a unique name. "
- << "Existing index: "
- << desc->infoObj()
- << " Requested index: "
- << spec);
+ << "Existing index: " << desc->infoObj()
+ << " Requested index: " << spec);
}
if (SimpleBSONObjComparator::kInstance.evaluate(desc->keyPattern() != key) ||
SimpleBSONObjComparator::kInstance.evaluate(
desc->infoObj().getObjectField("collation") != collation)) {
return Status(ErrorCodes::IndexKeySpecsConflict,
- str::stream() << "Index must have unique name."
- << "The existing index: "
- << desc->infoObj()
- << " has the same name as the requested index: "
- << spec);
+ str::stream()
+ << "Index must have unique name."
+ << "The existing index: " << desc->infoObj()
+ << " has the same name as the requested index: " << spec);
}
IndexDescriptor temp(_collection, _getAccessMethodName(key), spec);
@@ -816,9 +802,9 @@ Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx,
IndexDescriptor temp(_collection, _getAccessMethodName(key), spec);
if (!desc->areIndexOptionsEquivalent(&temp))
return Status(ErrorCodes::IndexOptionsConflict,
- str::stream() << "Index: " << spec
- << " already exists with different options: "
- << desc->infoObj());
+ str::stream()
+ << "Index: " << spec
+ << " already exists with different options: " << desc->infoObj());
return Status(ErrorCodes::IndexOptionsConflict,
str::stream() << "Index with name: " << name
@@ -843,8 +829,7 @@ Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx,
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "only one text index per collection allowed, "
<< "found existing text index \""
- << textIndexes[0]->indexName()
- << "\"");
+ << textIndexes[0]->indexName() << "\"");
}
}
return Status::OK();
diff --git a/src/mongo/db/catalog/index_consistency.cpp b/src/mongo/db/catalog/index_consistency.cpp
index 87a9bc74f74..3a2f297cf1a 100644
--- a/src/mongo/db/catalog/index_consistency.cpp
+++ b/src/mongo/db/catalog/index_consistency.cpp
@@ -459,8 +459,7 @@ BSONObj IndexConsistency::_generateInfo(const int& indexNumber,
if (idKey) {
return BSON("indexName" << indexName << "recordId" << recordId.repr() << "idKey" << *idKey
- << "indexKey"
- << rehydratedKey);
+ << "indexKey" << rehydratedKey);
} else {
return BSON("indexName" << indexName << "recordId" << recordId.repr() << "indexKey"
<< rehydratedKey);
diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp
index c9cd4223504..56d521edbc4 100644
--- a/src/mongo/db/catalog/index_key_validate.cpp
+++ b/src/mongo/db/catalog/index_key_validate.cpp
@@ -108,7 +108,7 @@ static const std::set<StringData> allowedIdIndexFieldNames = {
IndexDescriptor::kNamespaceFieldName,
// Index creation under legacy writeMode can result in an index spec with an _id field.
"_id"};
-}
+} // namespace
Status validateKeyPattern(const BSONObj& key, IndexDescriptor::IndexVersion indexVersion) {
const ErrorCodes::Error code = ErrorCodes::CannotCreateIndex;
@@ -134,8 +134,7 @@ Status validateKeyPattern(const BSONObj& key, IndexDescriptor::IndexVersion inde
if (keyElement.type() == BSONType::Object || keyElement.type() == BSONType::Array) {
return {code,
str::stream() << "Values in index key pattern cannot be of type "
- << typeName(keyElement.type())
- << " for index version v:"
+ << typeName(keyElement.type()) << " for index version v:"
<< static_cast<int>(indexVersion)};
}
@@ -276,9 +275,9 @@ StatusWith<BSONObj> validateIndexSpec(
if (IndexDescriptor::kKeyPatternFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << IndexDescriptor::kKeyPatternFieldName
- << "' must be an object, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kKeyPatternFieldName
+ << "' must be an object, but got " << typeName(indexSpecElem.type())};
}
std::vector<StringData> keys;
@@ -313,18 +312,18 @@ StatusWith<BSONObj> validateIndexSpec(
} else if (IndexDescriptor::kIndexNameFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << IndexDescriptor::kIndexNameFieldName
- << "' must be a string, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kIndexNameFieldName
+ << "' must be a string, but got " << typeName(indexSpecElem.type())};
}
hasIndexNameField = true;
} else if (IndexDescriptor::kNamespaceFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << IndexDescriptor::kNamespaceFieldName
- << "' must be a string, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kNamespaceFieldName
+ << "' must be a string, but got " << typeName(indexSpecElem.type())};
}
StringData ns = indexSpecElem.valueStringData();
@@ -336,22 +335,19 @@ StatusWith<BSONObj> validateIndexSpec(
if (ns != expectedNamespace.ns()) {
return {ErrorCodes::BadValue,
- str::stream() << "The value of the field '"
- << IndexDescriptor::kNamespaceFieldName
- << "' ("
- << ns
- << ") doesn't match the namespace '"
- << expectedNamespace
- << "'"};
+ str::stream()
+ << "The value of the field '" << IndexDescriptor::kNamespaceFieldName
+ << "' (" << ns << ") doesn't match the namespace '" << expectedNamespace
+ << "'"};
}
hasNamespaceField = true;
} else if (IndexDescriptor::kIndexVersionFieldName == indexSpecElemFieldName) {
if (!indexSpecElem.isNumber()) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << IndexDescriptor::kIndexVersionFieldName
- << "' must be a number, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kIndexVersionFieldName
+ << "' must be a number, but got " << typeName(indexSpecElem.type())};
}
auto requestedIndexVersionAsInt = representAs<int>(indexSpecElem.number());
@@ -375,9 +371,9 @@ StatusWith<BSONObj> validateIndexSpec(
} else if (IndexDescriptor::kCollationFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << IndexDescriptor::kCollationFieldName
- << "' must be an object, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kCollationFieldName
+ << "' must be an object, but got " << typeName(indexSpecElem.type())};
}
if (indexSpecElem.Obj().isEmpty()) {
@@ -390,10 +386,9 @@ StatusWith<BSONObj> validateIndexSpec(
} else if (IndexDescriptor::kPartialFilterExprFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '"
- << IndexDescriptor::kPartialFilterExprFieldName
- << "' must be an object, but got "
- << typeName(indexSpecElem.type())};
+ str::stream()
+ << "The field '" << IndexDescriptor::kPartialFilterExprFieldName
+ << "' must be an object, but got " << typeName(indexSpecElem.type())};
}
// Just use the simple collator, even though the index may have a separate collation
@@ -419,10 +414,9 @@ StatusWith<BSONObj> validateIndexSpec(
const auto key = indexSpec.getObjectField(IndexDescriptor::kKeyPatternFieldName);
if (IndexNames::findPluginName(key) != IndexNames::WILDCARD) {
return {ErrorCodes::BadValue,
- str::stream() << "The field '" << IndexDescriptor::kPathProjectionFieldName
- << "' is only allowed in an '"
- << IndexNames::WILDCARD
- << "' index"};
+ str::stream()
+ << "The field '" << IndexDescriptor::kPathProjectionFieldName
+ << "' is only allowed in an '" << IndexNames::WILDCARD << "' index"};
}
if (indexSpecElem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
@@ -432,10 +426,10 @@ StatusWith<BSONObj> validateIndexSpec(
}
if (!key.hasField("$**")) {
return {ErrorCodes::FailedToParse,
- str::stream() << "The field '" << IndexDescriptor::kPathProjectionFieldName
- << "' is only allowed when '"
- << IndexDescriptor::kKeyPatternFieldName
- << "' is {\"$**\": ±1}"};
+ str::stream()
+ << "The field '" << IndexDescriptor::kPathProjectionFieldName
+ << "' is only allowed when '" << IndexDescriptor::kKeyPatternFieldName
+ << "' is {\"$**\": ±1}"};
}
if (indexSpecElem.embeddedObject().isEmpty()) {
@@ -478,10 +472,8 @@ StatusWith<BSONObj> validateIndexSpec(
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid index specification " << indexSpec
<< "; cannot create an index with the '"
- << IndexDescriptor::kCollationFieldName
- << "' option and "
- << IndexDescriptor::kIndexVersionFieldName
- << "="
+ << IndexDescriptor::kCollationFieldName << "' option and "
+ << IndexDescriptor::kIndexVersionFieldName << "="
<< static_cast<int>(*resolvedIndexVersion)};
}
diff --git a/src/mongo/db/catalog/index_key_validate_test.cpp b/src/mongo/db/catalog/index_key_validate_test.cpp
index bbb55b5281a..d61cbb8e0d7 100644
--- a/src/mongo/db/catalog/index_key_validate_test.cpp
+++ b/src/mongo/db/catalog/index_key_validate_test.cpp
@@ -127,8 +127,7 @@ TEST(IndexKeyValidateTest, KeyElementBooleanValueFailsForV2Indexes) {
ASSERT_EQ(ErrorCodes::CannotCreateIndex,
validateKeyPattern(BSON("a"
<< "2dsphere"
- << "b"
- << true),
+ << "b" << true),
IndexVersion::kV2));
}
@@ -137,8 +136,7 @@ TEST(IndexKeyValidateTest, KeyElementBooleanValueSucceedsForV1Indexes) {
ASSERT_OK(validateKeyPattern(BSON("x" << false), IndexVersion::kV1));
ASSERT_OK(validateKeyPattern(BSON("a"
<< "2dsphere"
- << "b"
- << true),
+ << "b" << true),
IndexVersion::kV1));
}
diff --git a/src/mongo/db/catalog/index_spec_validate_test.cpp b/src/mongo/db/catalog/index_spec_validate_test.cpp
index 560f4820579..6b472d09073 100644
--- a/src/mongo/db/catalog/index_spec_validate_test.cpp
+++ b/src/mongo/db/catalog/index_spec_validate_test.cpp
@@ -50,8 +50,8 @@
namespace mongo {
namespace {
-using index_key_validate::validateIndexSpec;
using index_key_validate::validateIdIndexSpec;
+using index_key_validate::validateIndexSpec;
using index_key_validate::validateIndexSpecCollation;
using unittest::EnsureFCV;
@@ -140,16 +140,14 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfNamespaceIsNotAString) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << 1),
+ << "ns" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility));
ASSERT_EQ(ErrorCodes::TypeMismatch,
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << BSONObj()),
+ << "ns" << BSONObj()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -181,8 +179,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfNamespaceDoesNotMatch) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.coll()),
+ << "ns" << kTestNamespace.coll()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -191,8 +188,7 @@ TEST(IndexSpecValidateTest, ReturnsIndexSpecWithNamespaceFilledInIfItIsNotPresen
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 1),
+ << "v" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -200,10 +196,7 @@ TEST(IndexSpecValidateTest, ReturnsIndexSpecWithNamespaceFilledInIfItIsNotPresen
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 1)),
+ << "ns" << kTestNamespace.ns() << "v" << 1)),
sorted(result.getValue()));
// Verify that the index specification we returned is still considered valid.
@@ -215,10 +208,7 @@ TEST(IndexSpecValidateTest, ReturnsIndexSpecUnchangedIfNamespaceAndVersionArePre
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 1),
+ << "ns" << kTestNamespace.ns() << "v" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -228,8 +218,7 @@ TEST(IndexSpecValidateTest, ReturnsIndexSpecUnchangedIfNamespaceAndVersionArePre
<< "indexName"
<< "ns"
<< "test.index_spec_validate"
- << "v"
- << 1)),
+ << "v" << 1)),
sorted(result.getValue()));
}
@@ -246,8 +235,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsNotANumber) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << BSONObj()),
+ << "v" << BSONObj()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -257,32 +245,28 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsNotRepresentableAsInt) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2.2),
+ << "v" << 2.2),
kTestNamespace,
serverGlobalParams.featureCompatibility));
ASSERT_EQ(ErrorCodes::BadValue,
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << std::nan("1")),
+ << "v" << std::nan("1")),
kTestNamespace,
serverGlobalParams.featureCompatibility));
ASSERT_EQ(ErrorCodes::BadValue,
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << std::numeric_limits<double>::infinity()),
+ << "v" << std::numeric_limits<double>::infinity()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
ASSERT_EQ(ErrorCodes::BadValue,
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << std::numeric_limits<long long>::max()),
+ << "v" << std::numeric_limits<long long>::max()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -292,8 +276,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsV0) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 0),
+ << "v" << 0),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -303,9 +286,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsUnsupported) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 3
- << "collation"
+ << "v" << 3 << "collation"
<< BSON("locale"
<< "en")),
kTestNamespace,
@@ -315,8 +296,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsUnsupported) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << -3LL),
+ << "v" << -3LL),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -325,8 +305,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionsThatAreAllowedForCreation) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 1),
+ << "v" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -334,17 +313,13 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionsThatAreAllowedForCreation) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 1)),
+ << "ns" << kTestNamespace.ns() << "v" << 1)),
sorted(result.getValue()));
result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2LL),
+ << "v" << 2LL),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -352,10 +327,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionsThatAreAllowedForCreation) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2LL)),
+ << "ns" << kTestNamespace.ns() << "v" << 2LL)),
sorted(result.getValue()));
}
@@ -363,8 +335,7 @@ TEST(IndexSpecValidateTest, DefaultIndexVersionIsV2) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()),
+ << "ns" << kTestNamespace.ns()),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -372,10 +343,7 @@ TEST(IndexSpecValidateTest, DefaultIndexVersionIsV2) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2)),
+ << "ns" << kTestNamespace.ns() << "v" << 2)),
sorted(result.getValue()));
// Verify that the index specification we returned is still considered valid.
@@ -387,8 +355,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionV1) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 1),
+ << "v" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -396,10 +363,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionV1) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 1)),
+ << "ns" << kTestNamespace.ns() << "v" << 1)),
sorted(result.getValue()));
}
@@ -408,8 +372,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfCollationIsNotAnObject) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "collation"
- << 1),
+ << "collation" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility));
ASSERT_EQ(ErrorCodes::TypeMismatch,
@@ -424,8 +387,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfCollationIsNotAnObject) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "collation"
- << BSONArray()),
+ << "collation" << BSONArray()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -435,8 +397,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfCollationIsEmpty) {
validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "collation"
- << BSONObj()),
+ << "collation" << BSONObj()),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -449,8 +410,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfCollationIsPresentAndVersionIsLessTh
<< "collation"
<< BSON("locale"
<< "simple")
- << "v"
- << 1),
+ << "v" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility));
}
@@ -459,9 +419,7 @@ TEST(IndexSpecValidateTest, AcceptsAnyNonEmptyObjectValueForCollation) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2
- << "collation"
+ << "v" << 2 << "collation"
<< BSON("locale"
<< "simple")),
kTestNamespace,
@@ -471,11 +429,7 @@ TEST(IndexSpecValidateTest, AcceptsAnyNonEmptyObjectValueForCollation) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
<< BSON("locale"
<< "simple"))),
sorted(result.getValue()));
@@ -483,9 +437,7 @@ TEST(IndexSpecValidateTest, AcceptsAnyNonEmptyObjectValueForCollation) {
result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2
- << "collation"
+ << "v" << 2 << "collation"
<< BSON("unknownCollationOption" << true)),
kTestNamespace,
serverGlobalParams.featureCompatibility);
@@ -494,11 +446,7 @@ TEST(IndexSpecValidateTest, AcceptsAnyNonEmptyObjectValueForCollation) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
<< BSON("unknownCollationOption" << true))),
sorted(result.getValue()));
}
@@ -507,9 +455,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexSpecIfCollationIsPresentAndVersionIsEqua
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2
- << "collation"
+ << "v" << 2 << "collation"
<< BSON("locale"
<< "en")),
kTestNamespace,
@@ -519,11 +465,7 @@ TEST(IndexSpecValidateTest, AcceptsIndexSpecIfCollationIsPresentAndVersionIsEqua
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
<< BSON("locale"
<< "en"))),
sorted(result.getValue()));
@@ -533,10 +475,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfUnknownFieldIsPresentInSpecV2) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 2
- << "unknownField"
- << 1),
+ << "v" << 2 << "unknownField" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption, result);
@@ -546,10 +485,7 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfUnknownFieldIsPresentInSpecV1) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "v"
- << 1
- << "unknownField"
- << 1),
+ << "v" << 1 << "unknownField" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption, result);
@@ -559,95 +495,59 @@ TEST(IdIndexSpecValidateTest, ReturnsAnErrorIfKeyPatternIsIncorrectForIdIndex) {
ASSERT_EQ(ErrorCodes::BadValue,
validateIdIndexSpec(BSON("key" << BSON("_id" << -1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2)));
+ << "ns" << kTestNamespace.ns() << "v" << 2)));
ASSERT_EQ(ErrorCodes::BadValue,
validateIdIndexSpec(BSON("key" << BSON("a" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2)));
+ << "ns" << kTestNamespace.ns() << "v" << 2)));
}
TEST(IdIndexSpecValidateTest, ReturnsOKStatusIfKeyPatternCorrectForIdIndex) {
ASSERT_OK(validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "anyname"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2)));
+ << "ns" << kTestNamespace.ns() << "v" << 2)));
}
TEST(IdIndexSpecValidateTest, ReturnsAnErrorIfFieldNotAllowedForIdIndex) {
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "background"
- << false)));
+ << "ns" << kTestNamespace.ns() << "v" << 2
+ << "background" << false)));
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "unique"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "unique"
<< true)));
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "partialFilterExpression"
- << BSON("a" << 5))));
+ << "ns" << kTestNamespace.ns() << "v" << 2
+ << "partialFilterExpression" << BSON("a" << 5))));
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "sparse"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "sparse"
<< false)));
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "expireAfterSeconds"
- << 3600)));
+ << "ns" << kTestNamespace.ns() << "v" << 2
+ << "expireAfterSeconds" << 3600)));
ASSERT_EQ(ErrorCodes::InvalidIndexSpecificationOption,
validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "storageEngine"
- << BSONObj())));
+ << "ns" << kTestNamespace.ns() << "v" << 2
+ << "storageEngine" << BSONObj())));
}
TEST(IdIndexSpecValidateTest, ReturnsOKStatusIfAllFieldsAllowedForIdIndex) {
- ASSERT_OK(validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
- << "_id_"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
- << BSON("locale"
- << "simple"))));
+ ASSERT_OK(
+ validateIdIndexSpec(BSON("key" << BSON("_id" << 1) << "name"
+ << "_id_"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
+ << BSON("locale"
+ << "simple"))));
}
TEST(IndexSpecCollationValidateTest, FillsInFullCollationSpec) {
@@ -659,10 +559,7 @@ TEST(IndexSpecCollationValidateTest, FillsInFullCollationSpec) {
auto result = validateIndexSpecCollation(opCtx.get(),
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
+ << "ns" << kTestNamespace.ns() << "v" << 2
<< "collation"
<< BSON("locale"
<< "mock_reverse_string")),
@@ -670,34 +567,21 @@ TEST(IndexSpecCollationValidateTest, FillsInFullCollationSpec) {
ASSERT_OK(result.getStatus());
// We don't care about the order of the fields in the resulting index specification.
- ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
- << "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
- << BSON("locale"
- << "mock_reverse_string"
- << "caseLevel"
- << false
- << "caseFirst"
- << "off"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
- << "non-ignorable"
- << "maxVariable"
- << "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
- << "mock_version"))),
- sorted(result.getValue()));
+ ASSERT_BSONOBJ_EQ(
+ sorted(BSON("key" << BSON("field" << 1) << "name"
+ << "indexName"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
+ << BSON("locale"
+ << "mock_reverse_string"
+ << "caseLevel" << false << "caseFirst"
+ << "off"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "non-ignorable"
+ << "maxVariable"
+ << "punct"
+ << "normalization" << false << "backwards" << false << "version"
+ << "mock_version"))),
+ sorted(result.getValue()));
}
TEST(IndexSpecCollationValidateTest, RemovesCollationFieldIfSimple) {
@@ -709,10 +593,7 @@ TEST(IndexSpecCollationValidateTest, RemovesCollationFieldIfSimple) {
auto result = validateIndexSpecCollation(opCtx.get(),
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
+ << "ns" << kTestNamespace.ns() << "v" << 2
<< "collation"
<< BSON("locale"
<< "simple")),
@@ -722,10 +603,7 @@ TEST(IndexSpecCollationValidateTest, RemovesCollationFieldIfSimple) {
// We don't care about the order of the fields in the resulting index specification.
ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2)),
+ << "ns" << kTestNamespace.ns() << "v" << 2)),
sorted(result.getValue()));
}
@@ -738,50 +616,33 @@ TEST(IndexSpecCollationValidateTest, FillsInCollationFieldWithCollectionDefaultI
auto result = validateIndexSpecCollation(opCtx.get(),
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2),
+ << "ns" << kTestNamespace.ns() << "v" << 2),
&defaultCollator);
ASSERT_OK(result.getStatus());
// We don't care about the order of the fields in the resulting index specification.
- ASSERT_BSONOBJ_EQ(sorted(BSON("key" << BSON("field" << 1) << "name"
- << "indexName"
- << "ns"
- << kTestNamespace.ns()
- << "v"
- << 2
- << "collation"
- << BSON("locale"
- << "mock_reverse_string"
- << "caseLevel"
- << false
- << "caseFirst"
- << "off"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
- << "non-ignorable"
- << "maxVariable"
- << "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
- << "mock_version"))),
- sorted(result.getValue()));
+ ASSERT_BSONOBJ_EQ(
+ sorted(BSON("key" << BSON("field" << 1) << "name"
+ << "indexName"
+ << "ns" << kTestNamespace.ns() << "v" << 2 << "collation"
+ << BSON("locale"
+ << "mock_reverse_string"
+ << "caseLevel" << false << "caseFirst"
+ << "off"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "non-ignorable"
+ << "maxVariable"
+ << "punct"
+ << "normalization" << false << "backwards" << false << "version"
+ << "mock_version"))),
+ sorted(result.getValue()));
}
TEST(IndexSpecPartialFilterTest, FailsIfPartialFilterIsNotAnObject) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "partialFilterExpression"
- << 1),
+ << "partialFilterExpression" << 1),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus(), ErrorCodes::TypeMismatch);
@@ -802,8 +663,7 @@ TEST(IndexSpecPartialFilterTest, AcceptsValidPartialFilterExpression) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
- << "partialFilterExpression"
- << BSON("a" << 1)),
+ << "partialFilterExpression" << BSON("a" << 1)),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
@@ -811,25 +671,25 @@ TEST(IndexSpecPartialFilterTest, AcceptsValidPartialFilterExpression) {
TEST(IndexSpecWildcard, SucceedsWithInclusion) {
EnsureFCV guard(ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42);
- auto result = validateIndexSpec(kDefaultOpCtx,
- BSON("key" << BSON("$**" << 1) << "name"
- << "indexName"
- << "wildcardProjection"
- << BSON("a" << 1 << "b" << 1)),
- kTestNamespace,
- serverGlobalParams.featureCompatibility);
+ auto result =
+ validateIndexSpec(kDefaultOpCtx,
+ BSON("key" << BSON("$**" << 1) << "name"
+ << "indexName"
+ << "wildcardProjection" << BSON("a" << 1 << "b" << 1)),
+ kTestNamespace,
+ serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
}
TEST(IndexSpecWildcard, SucceedsWithExclusion) {
EnsureFCV guard(ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42);
- auto result = validateIndexSpec(kDefaultOpCtx,
- BSON("key" << BSON("$**" << 1) << "name"
- << "indexName"
- << "wildcardProjection"
- << BSON("a" << 0 << "b" << 0)),
- kTestNamespace,
- serverGlobalParams.featureCompatibility);
+ auto result =
+ validateIndexSpec(kDefaultOpCtx,
+ BSON("key" << BSON("$**" << 1) << "name"
+ << "indexName"
+ << "wildcardProjection" << BSON("a" << 0 << "b" << 0)),
+ kTestNamespace,
+ serverGlobalParams.featureCompatibility);
ASSERT_OK(result.getStatus());
}
@@ -895,13 +755,13 @@ TEST(IndexSpecWildcard, FailsWithImproperFeatureCompatabilityVersion) {
TEST(IndexSpecWildcard, FailsWithMixedProjection) {
EnsureFCV guard(ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42);
- auto result = validateIndexSpec(kDefaultOpCtx,
- BSON("key" << BSON("$**" << 1) << "name"
- << "indexName"
- << "wildcardProjection"
- << BSON("a" << 1 << "b" << 0)),
- kTestNamespace,
- serverGlobalParams.featureCompatibility);
+ auto result =
+ validateIndexSpec(kDefaultOpCtx,
+ BSON("key" << BSON("$**" << 1) << "name"
+ << "indexName"
+ << "wildcardProjection" << BSON("a" << 1 << "b" << 0)),
+ kTestNamespace,
+ serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), 40178);
}
@@ -923,8 +783,7 @@ TEST(IndexSpecWildcard, FailsWhenProjectionPluginNotWildcard) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("a" << 1) << "name"
<< "indexName"
- << "wildcardProjection"
- << BSON("a" << 1)),
+ << "wildcardProjection" << BSON("a" << 1)),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), ErrorCodes::BadValue);
@@ -935,8 +794,7 @@ TEST(IndexSpecWildcard, FailsWhenProjectionIsNotAnObject) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("$**" << 1) << "name"
<< "indexName"
- << "wildcardProjection"
- << 4),
+ << "wildcardProjection" << 4),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), ErrorCodes::TypeMismatch);
@@ -947,8 +805,7 @@ TEST(IndexSpecWildcard, FailsWithEmptyProjection) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("$**" << 1) << "name"
<< "indexName"
- << "wildcardProjection"
- << BSONObj()),
+ << "wildcardProjection" << BSONObj()),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
@@ -959,8 +816,7 @@ TEST(IndexSpecWildcard, FailsWhenInclusionWithSubpath) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("a.$**" << 1) << "name"
<< "indexName"
- << "wildcardProjection"
- << BSON("a" << 1)),
+ << "wildcardProjection" << BSON("a" << 1)),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
@@ -971,8 +827,7 @@ TEST(IndexSpecWildcard, FailsWhenExclusionWithSubpath) {
auto result = validateIndexSpec(kDefaultOpCtx,
BSON("key" << BSON("a.$**" << 1) << "name"
<< "indexName"
- << "wildcardProjection"
- << BSON("b" << 0)),
+ << "wildcardProjection" << BSON("b" << 0)),
kTestNamespace,
serverGlobalParams.featureCompatibility);
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
diff --git a/src/mongo/db/catalog/index_timestamp_helper.h b/src/mongo/db/catalog/index_timestamp_helper.h
index 581b1bd4740..9ae4457e409 100644
--- a/src/mongo/db/catalog/index_timestamp_helper.h
+++ b/src/mongo/db/catalog/index_timestamp_helper.h
@@ -55,6 +55,6 @@ void setGhostCommitTimestampForWrite(OperationContext* opCtx, const NamespaceStr
* also throw WriteConflictException.
*/
bool setGhostCommitTimestampForCatalogWrite(OperationContext* opCtx, const NamespaceString& nss);
-};
+}; // namespace IndexTimestampHelper
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index 287f34bb57e..920f6773f83 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -135,8 +135,8 @@ void MultiIndexBlock::cleanUpAfterBuild(OperationContext* opCtx, Collection* col
replCoord->canAcceptWritesForDatabase(opCtx, "admin")) {
opCtx->getServiceContext()->getOpObserver()->onOpMessage(
opCtx,
- BSON("msg" << std::string(str::stream() << "Failing index builds. Coll: "
- << nss)));
+ BSON("msg" << std::string(str::stream()
+ << "Failing index builds. Coll: " << nss)));
} else {
// Simply get a timestamp to write with here; we can't write to the oplog.
repl::UnreplicatedWritesBlock uwb(opCtx);
@@ -195,7 +195,7 @@ MultiIndexBlock::OnInitFn MultiIndexBlock::kNoopOnInitFn =
MultiIndexBlock::OnInitFn MultiIndexBlock::makeTimestampedIndexOnInitFn(OperationContext* opCtx,
const Collection* coll) {
- return [ opCtx, ns = coll->ns() ](std::vector<BSONObj> & specs)->Status {
+ return [opCtx, ns = coll->ns()](std::vector<BSONObj>& specs) -> Status {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
if (opCtx->recoveryUnit()->getCommitTimestamp().isNull() &&
replCoord->canAcceptWritesForDatabase(opCtx, "admin")) {
@@ -226,13 +226,11 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(OperationContext* opCtx,
if (State::kAborted == _getState()) {
return {ErrorCodes::IndexBuildAborted,
str::stream() << "Index build aborted: " << _abortReason
- << ". Cannot initialize index builder: "
- << collection->ns()
+ << ". Cannot initialize index builder: " << collection->ns()
<< (collection->uuid()
? (" (" + collection->uuid()->toString() + "): ")
: ": ")
- << indexSpecs.size()
- << " provided. First index spec: "
+ << indexSpecs.size() << " provided. First index spec: "
<< (indexSpecs.empty() ? BSONObj() : indexSpecs[0])};
}
@@ -741,8 +739,7 @@ Status MultiIndexBlock::commit(OperationContext* opCtx,
return {
ErrorCodes::IndexBuildAborted,
str::stream() << "Index build aborted: " << _abortReason
- << ". Cannot commit index builder: "
- << collection->ns()
+ << ". Cannot commit index builder: " << collection->ns()
<< (_collectionUUID ? (" (" + _collectionUUID->toString() + ")") : "")};
}
diff --git a/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp b/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp
index 064b0f4f359..c1cfd965afd 100644
--- a/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp
+++ b/src/mongo/db/catalog/private/record_store_validate_adaptor.cpp
@@ -60,7 +60,7 @@ KeyString makeWildCardMultikeyMetadataKeyString(const BSONObj& indexKey) {
const RecordId multikeyMetadataRecordId(RecordId::ReservedId::kWildcardMultikeyMetadataId);
return {KeyString::kLatestVersion, indexKey, multikeyMetadataOrd, multikeyMetadataRecordId};
}
-}
+} // namespace
Status RecordStoreValidateAdaptor::validate(const RecordId& recordId,
const RecordData& record,
@@ -117,9 +117,9 @@ Status RecordStoreValidateAdaptor::validate(const RecordId& recordId,
{documentKeySet.begin(), documentKeySet.end()},
{multikeyMetadataKeys.begin(), multikeyMetadataKeys.end()},
multikeyPaths)) {
- std::string msg = str::stream() << "Index " << descriptor->indexName()
- << " is not multi-key, but a multikey path "
- << " is present in document " << recordId;
+ std::string msg = str::stream()
+ << "Index " << descriptor->indexName() << " is not multi-key, but a multikey path "
+ << " is present in document " << recordId;
curRecordResults.errors.push_back(msg);
curRecordResults.valid = false;
}
@@ -203,9 +203,9 @@ void RecordStoreValidateAdaptor::traverseIndex(const IndexAccessMethod* iam,
}
if (results && _indexConsistency->getMultikeyMetadataPathCount(indexNumber) > 0) {
- results->errors.push_back(
- str::stream() << "Index '" << descriptor->indexName()
- << "' has one or more missing multikey metadata index keys");
+ results->errors.push_back(str::stream()
+ << "Index '" << descriptor->indexName()
+ << "' has one or more missing multikey metadata index keys");
results->valid = false;
}
diff --git a/src/mongo/db/catalog/private/record_store_validate_adaptor.h b/src/mongo/db/catalog/private/record_store_validate_adaptor.h
index 4885b98d66f..00ab221e89a 100644
--- a/src/mongo/db/catalog/private/record_store_validate_adaptor.h
+++ b/src/mongo/db/catalog/private/record_store_validate_adaptor.h
@@ -101,4 +101,4 @@ private:
IndexCatalog* _indexCatalog; // Not owned.
ValidateResultsMap* _indexNsResultsMap; // Not owned.
};
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index f756dab37e9..8e087d7e213 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -203,15 +203,8 @@ Status renameTargetCollectionToTmp(OperationContext* opCtx,
if (!tmpNameResult.isOK()) {
return tmpNameResult.getStatus().withContext(
str::stream() << "Cannot generate a temporary collection name for the target "
- << targetNs
- << " ("
- << targetUUID
- << ") so that the source"
- << sourceNs
- << " ("
- << sourceUUID
- << ") could be renamed to "
- << targetNs);
+ << targetNs << " (" << targetUUID << ") so that the source" << sourceNs
+ << " (" << sourceUUID << ") could be renamed to " << targetNs);
}
const auto& tmpName = tmpNameResult.getValue();
const bool stayTemp = true;
@@ -339,9 +332,10 @@ Status renameCollectionWithinDB(OperationContext* opCtx,
boost::optional<Lock::CollectionLock> targetLock;
// To prevent deadlock, always lock system.views collection in the end because concurrent
// view-related operations always lock system.views in the end.
- if (!source.isSystemDotViews() && (target.isSystemDotViews() ||
- ResourceId(RESOURCE_COLLECTION, source.ns()) <
- ResourceId(RESOURCE_COLLECTION, target.ns()))) {
+ if (!source.isSystemDotViews() &&
+ (target.isSystemDotViews() ||
+ ResourceId(RESOURCE_COLLECTION, source.ns()) <
+ ResourceId(RESOURCE_COLLECTION, target.ns()))) {
// To prevent deadlock, always lock source and target in ascending resourceId order.
sourceLock.emplace(opCtx, source, MODE_X);
targetLock.emplace(opCtx, target, MODE_X);
@@ -546,8 +540,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
if (!tmpNameResult.isOK()) {
return tmpNameResult.getStatus().withContext(
str::stream() << "Cannot generate temporary collection name to rename " << source
- << " to "
- << target);
+ << " to " << target);
}
const auto& tmpName = tmpNameResult.getValue();
@@ -639,7 +632,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
*(tmpColl->uuid()),
indexToCopy,
false // fromMigrate
- );
+ );
auto indexResult =
tmpIndexCatalog->createIndexOnEmptyCollection(opCtx, indexToCopy);
if (!indexResult.isOK()) {
@@ -700,7 +693,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
}
cursor->save();
// When this exits via success or WCE, we need to restore the cursor.
- ON_BLOCK_EXIT([ opCtx, ns = tmpName.ns(), &cursor ]() {
+ ON_BLOCK_EXIT([opCtx, ns = tmpName.ns(), &cursor]() {
writeConflictRetry(
opCtx, "retryRestoreCursor", ns, [&cursor] { cursor->restore(); });
});
@@ -867,9 +860,7 @@ Status renameCollectionForRollback(OperationContext* opCtx,
invariant(source->db() == target.db(),
str::stream() << "renameCollectionForRollback: source and target namespaces must "
"have the same database. source: "
- << *source
- << ". target: "
- << target);
+ << *source << ". target: " << target);
log() << "renameCollectionForRollback: rename " << *source << " (" << uuid << ") to " << target
<< ".";
diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp
index ed6dfe5de27..ace8cd0957b 100644
--- a/src/mongo/db/catalog/rename_collection_test.cpp
+++ b/src/mongo/db/catalog/rename_collection_test.cpp
@@ -329,8 +329,8 @@ void _createCollection(OperationContext* opCtx,
<< " does not exist.";
WriteUnitOfWork wuow(opCtx);
- ASSERT_TRUE(db->createCollection(opCtx, nss, options)) << "Failed to create collection "
- << nss << " due to unknown error.";
+ ASSERT_TRUE(db->createCollection(opCtx, nss, options))
+ << "Failed to create collection " << nss << " due to unknown error.";
wuow.commit();
});
@@ -414,11 +414,8 @@ void _createIndexOnEmptyCollection(OperationContext* opCtx,
ASSERT_TRUE(collection) << "Cannot create index on empty collection " << nss
<< " because collection " << nss << " does not exist.";
- auto indexInfoObj = BSON(
- "v" << int(IndexDescriptor::kLatestIndexVersion) << "key" << BSON("a" << 1) << "name"
- << indexName
- << "ns"
- << nss.ns());
+ auto indexInfoObj = BSON("v" << int(IndexDescriptor::kLatestIndexVersion) << "key"
+ << BSON("a" << 1) << "name" << indexName << "ns" << nss.ns());
auto indexCatalog = collection->getIndexCatalog();
WriteUnitOfWork wuow(opCtx);
@@ -733,8 +730,8 @@ TEST_F(RenameCollectionTest, RenameCollectionMakesTargetCollectionDropPendingIfD
ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, _targetNss, options));
ASSERT_FALSE(_collectionExists(_opCtx.get(), _sourceNss))
<< "source collection " << _sourceNss << " still exists after successful rename";
- ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss)) << "target collection " << _targetNss
- << " missing after successful rename";
+ ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss))
+ << "target collection " << _targetNss << " missing after successful rename";
ASSERT_TRUE(_opObserver->onRenameCollectionCalled);
ASSERT(_opObserver->onRenameCollectionDropTarget);
@@ -758,8 +755,8 @@ TEST_F(RenameCollectionTest,
ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, _targetNss, options));
ASSERT_FALSE(_collectionExists(_opCtx.get(), _sourceNss))
<< "source collection " << _sourceNss << " still exists after successful rename";
- ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss)) << "target collection " << _targetNss
- << " missing after successful rename";
+ ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss))
+ << "target collection " << _targetNss << " missing after successful rename";
ASSERT_TRUE(_opObserver->onRenameCollectionCalled);
ASSERT_FALSE(_opObserver->onRenameCollectionDropTarget);
@@ -845,9 +842,8 @@ TEST_F(RenameCollectionTest, RenameCollectionForApplyOpsDropTargetByUUIDEvenIfSo
_createCollectionWithUUID(_opCtx.get(), _targetNss);
auto dropTargetUUID = _createCollectionWithUUID(_opCtx.get(), dropTargetNss);
auto uuidDoc = BSON("ui" << UUID::gen());
- auto cmd =
- BSON("renameCollection" << missingSourceNss.ns() << "to" << _targetNss.ns() << "dropTarget"
- << dropTargetUUID);
+ auto cmd = BSON("renameCollection" << missingSourceNss.ns() << "to" << _targetNss.ns()
+ << "dropTarget" << dropTargetUUID);
ASSERT_OK(renameCollectionForApplyOps(
_opCtx.get(), missingSourceNss.db().toString(), uuidDoc["ui"], cmd, {}));
ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss));
@@ -885,9 +881,8 @@ TEST_F(RenameCollectionTest, RenameCollectionForApplyOpsDropTargetByUUIDEvenIfSo
auto dropTargetUUID = _createCollectionWithUUID(_opCtx.get(), dropTargetNss);
auto uuidDoc = BSON("ui" << _createCollectionWithUUID(_opCtx.get(), dropPendingNss));
- auto cmd =
- BSON("renameCollection" << dropPendingNss.ns() << "to" << _targetNss.ns() << "dropTarget"
- << dropTargetUUID);
+ auto cmd = BSON("renameCollection" << dropPendingNss.ns() << "to" << _targetNss.ns()
+ << "dropTarget" << dropTargetUUID);
repl::UnreplicatedWritesBlock uwb(_opCtx.get());
repl::OpTime renameOpTime = {Timestamp(Seconds(200), 1U), 1LL};
@@ -930,8 +925,8 @@ void _testRenameCollectionStayTemp(OperationContext* opCtx,
RenameCollectionOptions options;
options.stayTemp = stayTemp;
ASSERT_OK(renameCollection(opCtx, sourceNss, targetNss, options));
- ASSERT_FALSE(_collectionExists(opCtx, sourceNss)) << "source collection " << sourceNss
- << " still exists after successful rename";
+ ASSERT_FALSE(_collectionExists(opCtx, sourceNss))
+ << "source collection " << sourceNss << " still exists after successful rename";
if (!isSourceCollectionTemporary) {
ASSERT_FALSE(_isTempCollection(opCtx, targetNss))
@@ -1018,8 +1013,8 @@ void _testRenameCollectionAcrossDatabaseOplogEntries(
_insertDocument(opCtx, sourceNss, BSON("_id" << 0));
oplogEntries->clear();
if (forApplyOps) {
- auto cmd = BSON(
- "renameCollection" << sourceNss.ns() << "to" << targetNss.ns() << "dropTarget" << true);
+ auto cmd = BSON("renameCollection" << sourceNss.ns() << "to" << targetNss.ns()
+ << "dropTarget" << true);
ASSERT_OK(renameCollectionForApplyOps(opCtx, sourceNss.db().toString(), {}, cmd, {}));
} else {
RenameCollectionOptions options;
diff --git a/src/mongo/db/catalog/util/partitioned.h b/src/mongo/db/catalog/util/partitioned.h
index cf3dd0f3625..c449932f653 100644
--- a/src/mongo/db/catalog/util/partitioned.h
+++ b/src/mongo/db/catalog/util/partitioned.h
@@ -237,7 +237,7 @@ public:
KeyPartitioner()(partitioned_detail::getKey(value), nPartitions);
this->_partitionedContainer->_partitions[partitionId].insert(std::move(value));
}
- void insert(value_type)&& = delete;
+ void insert(value_type) && = delete;
/**
* Erases one entry from the partitioned structure, returns the number of entries removed.
diff --git a/src/mongo/db/catalog/util/partitioned_test.cpp b/src/mongo/db/catalog/util/partitioned_test.cpp
index 06de76bfc26..1cd235c95d6 100644
--- a/src/mongo/db/catalog/util/partitioned_test.cpp
+++ b/src/mongo/db/catalog/util/partitioned_test.cpp
@@ -237,7 +237,6 @@ TEST(PartitionedConcurrency, ShouldProtectConcurrentAccesses) {
AtomicWord<unsigned> ready{0};
for (size_t threadId = 1; threadId <= numThreads; ++threadId) {
auto workerThreadBody = [&, threadId, opsPerThread]() {
-
// Busy-wait until everybody is ready
ready.fetchAndAdd(1);
while (ready.load() < numThreads) {
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index 3b2b4ed417a..40b6a883ef7 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -123,8 +123,7 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx,
str::stream()
<< "Unable to read from a snapshot due to pending collection catalog "
"changes; please retry the operation. Snapshot timestamp is "
- << mySnapshot->toString()
- << ". Collection minimum is "
+ << mySnapshot->toString() << ". Collection minimum is "
<< minSnapshot->toString(),
!minSnapshot || *mySnapshot >= *minSnapshot);
}
@@ -158,8 +157,7 @@ NamespaceString AutoGetCollection::resolveNamespaceStringOrUUID(OperationContext
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "UUID " << nsOrUUID.toString() << " specified in " << nsOrUUID.dbname()
- << " resolved to a collection in a different database: "
- << *resolvedNss,
+ << " resolved to a collection in a different database: " << *resolvedNss,
resolvedNss->db() == nsOrUUID.dbname());
return *resolvedNss;
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index b177bc24e07..358de1437de 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -55,9 +55,7 @@ thread_local ServiceContext::UniqueClient currentClient;
void invariantNoCurrentClient() {
invariant(!haveClient(),
str::stream() << "Already have client on this thread: " //
- << '"'
- << Client::getCurrent()->desc()
- << '"');
+ << '"' << Client::getCurrent()->desc() << '"');
}
} // namespace
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index 0ebcff34f09..3e93171254b 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -299,7 +299,7 @@ void _appendCursorStats(BSONObjBuilder& b) {
b.appendNumber("totalNoTimeout", cursorStatsOpenNoTimeout.get());
b.appendNumber("timedOut", cursorStatsTimedOut.get());
}
-}
+} // namespace
void startClientCursorMonitor() {
clientCursorMonitor.go();
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index bf6387956ed..f79fa7067c3 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -132,8 +132,7 @@ struct Cloner::Fun {
uassert(
ErrorCodes::NotMaster,
str::stream() << "Not primary while cloning collection " << from_collection.ns()
- << " to "
- << to_collection.ns(),
+ << " to " << to_collection.ns(),
!opCtx->writesAreReplicated() ||
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, to_collection));
@@ -163,13 +162,12 @@ struct Cloner::Fun {
db->userCreateNS(
opCtx, to_collection, collectionOptions, createDefaultIndexes, indexSpec),
str::stream() << "collection creation failed during clone ["
- << to_collection.ns()
- << "]");
+ << to_collection.ns() << "]");
wunit.commit();
collection = db->getCollection(opCtx, to_collection);
invariant(collection,
- str::stream() << "Missing collection during clone [" << to_collection.ns()
- << "]");
+ str::stream()
+ << "Missing collection during clone [" << to_collection.ns() << "]");
});
}
@@ -209,8 +207,8 @@ struct Cloner::Fun {
collection = db->getCollection(opCtx, to_collection);
uassert(28594,
- str::stream() << "Collection " << to_collection.ns()
- << " dropped while cloning",
+ str::stream()
+ << "Collection " << to_collection.ns() << " dropped while cloning",
collection != NULL);
}
@@ -292,7 +290,7 @@ struct Cloner::Fun {
};
/* copy the specified collection
-*/
+ */
void Cloner::copy(OperationContext* opCtx,
const string& toDBName,
const NamespaceString& from_collection,
@@ -326,10 +324,7 @@ void Cloner::copy(OperationContext* opCtx,
uassert(ErrorCodes::PrimarySteppedDown,
str::stream() << "Not primary while cloning collection " << from_collection.ns()
- << " to "
- << to_collection.ns()
- << " with filter "
- << query.toString(),
+ << " to " << to_collection.ns() << " with filter " << query.toString(),
!opCtx->writesAreReplicated() ||
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, to_collection));
}
@@ -350,9 +345,7 @@ void Cloner::copyIndexes(OperationContext* opCtx,
uassert(ErrorCodes::PrimarySteppedDown,
str::stream() << "Not primary while copying indexes from " << from_collection.ns()
- << " to "
- << to_collection.ns()
- << " (Cloner)",
+ << " to " << to_collection.ns() << " (Cloner)",
!opCtx->writesAreReplicated() ||
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, to_collection));
@@ -381,11 +374,9 @@ void Cloner::copyIndexes(OperationContext* opCtx,
createDefaultIndexes,
fixIndexSpec(to_collection.db().toString(),
getIdIndexSpec(from_indexes))),
- str::stream() << "Collection creation failed while copying indexes from "
- << from_collection.ns()
- << " to "
- << to_collection.ns()
- << " (Cloner)");
+ str::stream()
+ << "Collection creation failed while copying indexes from "
+ << from_collection.ns() << " to " << to_collection.ns() << " (Cloner)");
wunit.commit();
collection = db->getCollection(opCtx, to_collection);
invariant(collection,
@@ -602,8 +593,7 @@ Status Cloner::createCollectionsForDb(
// we're trying to create already exists.
return Status(ErrorCodes::NamespaceExists,
str::stream() << "unsharded collection with same namespace "
- << nss.ns()
- << " already exists.");
+ << nss.ns() << " already exists.");
}
// If the collection is sharded and a collection with the same name already
@@ -625,12 +615,9 @@ Status Cloner::createCollectionsForDb(
return Status(
ErrorCodes::InvalidOptions,
str::stream()
- << "sharded collection with same namespace "
- << nss.ns()
+ << "sharded collection with same namespace " << nss.ns()
<< " already exists, but options don't match. Existing options are "
- << existingOpts
- << " and new options are "
- << options);
+ << existingOpts << " and new options are " << options);
}
// If the collection does not already exist and is sharded, we create a new
diff --git a/src/mongo/db/commands/clone_collection.cpp b/src/mongo/db/commands/clone_collection.cpp
index b0eeddefe5b..152d71f2a7c 100644
--- a/src/mongo/db/commands/clone_collection.cpp
+++ b/src/mongo/db/commands/clone_collection.cpp
@@ -52,10 +52,10 @@
namespace mongo {
-using std::unique_ptr;
+using std::endl;
using std::string;
using std::stringstream;
-using std::endl;
+using std::unique_ptr;
/**
* The cloneCollection command is deprecated.
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index e2fab366906..42b1ff38dfe 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -118,8 +118,7 @@ public:
if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)) {
uasserted(ErrorCodes::NotMaster,
str::stream() << "Not primary while cloning collection " << from << " to "
- << to
- << " (as capped)");
+ << to << " (as capped)");
}
Database* const db = autoDb.getDb();
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index d78af120927..c97db6c58c5 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -149,4 +149,4 @@ public:
}
};
static CompactCmd compactCmd;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/connection_status.cpp b/src/mongo/db/commands/connection_status.cpp
index cf470ebc6ae..04ca3a12f83 100644
--- a/src/mongo/db/commands/connection_status.cpp
+++ b/src/mongo/db/commands/connection_status.cpp
@@ -130,4 +130,4 @@ public:
return true;
}
} cmdConnectionStatus;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 82a6408b6e8..0cf4eda8067 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -51,9 +51,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Failpoint which causes to hang "count" cmd after acquiring the DB lock.
MONGO_FAIL_POINT_DEFINE(hangBeforeCollectionCount);
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 5bc8dc8d00a..d14f5505a1d 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -109,9 +109,9 @@ StatusWith<std::vector<BSONObj>> parseAndValidateIndexSpecs(
if (kIndexesFieldName == cmdElemFieldName) {
if (cmdElem.type() != BSONType::Array) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << kIndexesFieldName
- << "' must be an array, but got "
- << typeName(cmdElem.type())};
+ str::stream()
+ << "The field '" << kIndexesFieldName << "' must be an array, but got "
+ << typeName(cmdElem.type())};
}
for (auto&& indexesElem : cmdElem.Obj()) {
@@ -163,16 +163,15 @@ StatusWith<std::vector<BSONObj>> parseAndValidateIndexSpecs(
continue;
} else {
return {ErrorCodes::BadValue,
- str::stream() << "Invalid field specified for " << kCommandName << " command: "
- << cmdElemFieldName};
+ str::stream() << "Invalid field specified for " << kCommandName
+ << " command: " << cmdElemFieldName};
}
}
if (!hasIndexesField) {
return {ErrorCodes::FailedToParse,
str::stream() << "The '" << kIndexesFieldName
- << "' field is a required argument of the "
- << kCommandName
+ << "' field is a required argument of the " << kCommandName
<< " command"};
}
@@ -202,15 +201,13 @@ Status validateTTLOptions(OperationContext* opCtx, const BSONObj& cmdObj) {
str::stream() << "TTL index '" << kExpireAfterSeconds
<< "' option must be numeric, but received a type of '"
<< typeName(expireAfterSecondsElt.type())
- << "'. Index spec: "
- << indexObj};
+ << "'. Index spec: " << indexObj};
}
if (expireAfterSecondsElt.safeNumberLong() < 0) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "TTL index '" << kExpireAfterSeconds
- << "' option cannot be less than 0. Index spec: "
- << indexObj};
+ << "' option cannot be less than 0. Index spec: " << indexObj};
}
const std::string tooLargeErr = str::stream()
@@ -292,8 +289,7 @@ void checkUniqueIndexConstraints(OperationContext* opCtx,
const ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
uassert(ErrorCodes::CannotCreateIndex,
str::stream() << "cannot create unique index over " << newIdxKey
- << " with shard key pattern "
- << shardKeyPattern.toBSON(),
+ << " with shard key pattern " << shardKeyPattern.toBSON(),
shardKeyPattern.isUniqueIndexCompatible(newIdxKey));
}
@@ -392,8 +388,7 @@ Collection* getOrCreateCollection(OperationContext* opCtx,
auto collection = db->createCollection(opCtx, ns, options);
invariant(collection,
str::stream() << "Failed to create collection " << ns.ns()
- << " during index creation: "
- << redact(cmdObj));
+ << " during index creation: " << redact(cmdObj));
wunit.commit();
return collection;
});
@@ -701,9 +696,7 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
// All other errors should be forwarded to the caller with index build information included.
log() << "Index build failed: " << buildUUID << ": " << ex.toStatus();
ex.addContext(str::stream() << "Index build failed: " << buildUUID << ": Collection " << ns
- << " ( "
- << *collectionUUID
- << " )");
+ << " ( " << *collectionUUID << " )");
throw;
}
diff --git a/src/mongo/db/commands/dbcheck.cpp b/src/mongo/db/commands/dbcheck.cpp
index 3a4cb290a8f..963ce885d85 100644
--- a/src/mongo/db/commands/dbcheck.cpp
+++ b/src/mongo/db/commands/dbcheck.cpp
@@ -345,7 +345,7 @@ private:
return false;
}
- auto[prev, next] = getPrevAndNextUUIDs(opCtx, collection);
+ auto [prev, next] = getPrevAndNextUUIDs(opCtx, collection);
// Find and report collection metadata.
auto indices = collectionIndexInfo(opCtx, collection);
@@ -558,4 +558,4 @@ public:
MONGO_REGISTER_TEST_COMMAND(DbCheckCmd);
} // namespace
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 2f960430e2a..c4a6f06ac49 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -143,8 +143,8 @@ public:
repl::ReplicationCoordinator::modeNone) &&
(dbname == NamespaceString::kLocalDb)) {
uasserted(ErrorCodes::IllegalOperation,
- str::stream() << "Cannot drop '" << dbname
- << "' database while replication is active");
+ str::stream()
+ << "Cannot drop '" << dbname << "' database while replication is active");
}
BSONElement e = cmdObj.firstElement();
int p = (int)e.number();
diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp
index 5bede2d66f6..f94edb2992f 100644
--- a/src/mongo/db/commands/dbcommands_d.cpp
+++ b/src/mongo/db/commands/dbcommands_d.cpp
@@ -108,7 +108,7 @@ namespace {
/**
* Sets the profiling level, logging/profiling threshold, and logging/profiling sample rate for the
* given database.
-*/
+ */
class CmdProfile : public ProfileCmdBase {
public:
CmdProfile() = default;
@@ -200,8 +200,7 @@ public:
uassert(50847,
str::stream() << "The element that calls binDataClean() must be type of "
"BinData, but type of "
- << typeName(stateElem.type())
- << " found.",
+ << typeName(stateElem.type()) << " found.",
(stateElem.type() == BSONType::BinData));
int len;
@@ -288,8 +287,7 @@ public:
uassert(50849,
str::stream() << "The element that calls binDataClean() must be type "
"of BinData, but type of "
- << owned["data"].type()
- << " found.",
+ << owned["data"].type() << " found.",
owned["data"].type() == BSONType::BinData);
exec->saveState();
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index c150adf60bb..853ba96c51c 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -157,8 +157,7 @@ public:
str::stream() << "$_internalReadAtClusterTime value must not be greater"
" than the last applied opTime. Requested clusterTime: "
<< targetClusterTime.toString()
- << "; last applied opTime: "
- << lastAppliedOpTime.toString(),
+ << "; last applied opTime: " << lastAppliedOpTime.toString(),
lastAppliedOpTime.getTimestamp() >= targetClusterTime);
// We aren't holding the global lock in intent mode, so it is possible for the global
@@ -173,8 +172,7 @@ public:
str::stream() << "$_internalReadAtClusterTime value must not be greater"
" than the all_durable timestamp. Requested clusterTime: "
<< targetClusterTime.toString()
- << "; all_durable timestamp: "
- << allDurableTime.toString(),
+ << "; all_durable timestamp: " << allDurableTime.toString(),
allDurableTime >= targetClusterTime);
// The $_internalReadAtClusterTime option causes any storage-layer cursors created
@@ -334,8 +332,7 @@ private:
str::stream() << "Unable to read from a snapshot due to pending collection"
" catalog changes; please retry the operation. Snapshot"
" timestamp is "
- << mySnapshot->toString()
- << ". Collection minimum timestamp is "
+ << mySnapshot->toString() << ". Collection minimum timestamp is "
<< minSnapshot->toString(),
!minSnapshot || *mySnapshot >= *minSnapshot);
} else {
diff --git a/src/mongo/db/commands/do_txn_cmd.cpp b/src/mongo/db/commands/do_txn_cmd.cpp
index 6c5723c6de3..fbc542f952a 100644
--- a/src/mongo/db/commands/do_txn_cmd.cpp
+++ b/src/mongo/db/commands/do_txn_cmd.cpp
@@ -71,9 +71,7 @@ OplogApplicationValidity validateDoTxnCommand(const BSONObj& doTxnObj) {
} catch (...) {
uasserted(ErrorCodes::FailedToParse,
str::stream() << "cannot apply a malformed operation in doTxn: "
- << redact(opObj)
- << ": "
- << exceptionToStatus().toString());
+ << redact(opObj) << ": " << exceptionToStatus().toString());
}
};
diff --git a/src/mongo/db/commands/driverHelpers.cpp b/src/mongo/db/commands/driverHelpers.cpp
index 58f73648b4e..3a3ca1b8704 100644
--- a/src/mongo/db/commands/driverHelpers.cpp
+++ b/src/mongo/db/commands/driverHelpers.cpp
@@ -87,4 +87,4 @@ public:
return true;
}
} driverObjectIdTest;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index e8eee785040..34447f9b4cf 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -244,4 +244,4 @@ public:
return true;
}
} cmdReIndex;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/explain_cmd.cpp b/src/mongo/db/commands/explain_cmd.cpp
index 6a8c49f3e20..a29d01cf985 100644
--- a/src/mongo/db/commands/explain_cmd.cpp
+++ b/src/mongo/db/commands/explain_cmd.cpp
@@ -153,8 +153,7 @@ std::unique_ptr<CommandInvocation> CmdExplain::parse(OperationContext* opCtx,
if (auto innerDb = explainedObj["$db"]) {
uassert(ErrorCodes::InvalidNamespace,
str::stream() << "Mismatched $db in explain command. Expected " << dbname
- << " but got "
- << innerDb.checkAndGetStringData(),
+ << " but got " << innerDb.checkAndGetStringData(),
innerDb.checkAndGetStringData() == dbname);
}
auto explainedCommand = CommandHelpers::findCommand(explainedObj.firstElementFieldName());
diff --git a/src/mongo/db/commands/fail_point_cmd.cpp b/src/mongo/db/commands/fail_point_cmd.cpp
index 52ffb278a22..a50cc4ff06d 100644
--- a/src/mongo/db/commands/fail_point_cmd.cpp
+++ b/src/mongo/db/commands/fail_point_cmd.cpp
@@ -103,4 +103,4 @@ public:
}
};
MONGO_REGISTER_TEST_COMMAND(FaultInjectCmd);
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp
index cab903e15d3..ec1d65deb3b 100644
--- a/src/mongo/db/commands/feature_compatibility_version.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version.cpp
@@ -215,12 +215,9 @@ void FeatureCompatibilityVersion::updateMinWireVersion() {
void FeatureCompatibilityVersion::_validateVersion(StringData version) {
uassert(40284,
str::stream() << "featureCompatibilityVersion must be '"
- << FeatureCompatibilityVersionParser::kVersion42
- << "' or '"
- << FeatureCompatibilityVersionParser::kVersion40
- << "'. See "
- << feature_compatibility_version_documentation::kCompatibilityLink
- << ".",
+ << FeatureCompatibilityVersionParser::kVersion42 << "' or '"
+ << FeatureCompatibilityVersionParser::kVersion40 << "'. See "
+ << feature_compatibility_version_documentation::kCompatibilityLink << ".",
version == FeatureCompatibilityVersionParser::kVersion42 ||
version == FeatureCompatibilityVersionParser::kVersion40);
}
diff --git a/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp b/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp
index 6d68b8f417b..919a2aae34c 100644
--- a/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version_command_parser.cpp
@@ -56,10 +56,7 @@ StatusWith<std::string> FeatureCompatibilityVersionCommandParser::extractVersion
return {ErrorCodes::TypeMismatch,
str::stream() << "Command argument must be of type "
"String, but was of type "
- << typeName(versionElem.type())
- << " in: "
- << cmdObj
- << ". See "
+ << typeName(versionElem.type()) << " in: " << cmdObj << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< "."};
}
@@ -73,9 +70,7 @@ StatusWith<std::string> FeatureCompatibilityVersionCommandParser::extractVersion
uasserted(ErrorCodes::InvalidOptions,
str::stream() << "Unrecognized field found " << cmdElem.fieldNameStringData()
- << " in "
- << cmdObj
- << ". See "
+ << " in " << cmdObj << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
}
@@ -86,14 +81,9 @@ StatusWith<std::string> FeatureCompatibilityVersionCommandParser::extractVersion
version != FeatureCompatibilityVersionParser::kVersion40) {
return {ErrorCodes::BadValue,
str::stream() << "Invalid command argument. Expected '"
- << FeatureCompatibilityVersionParser::kVersion42
- << "' or '"
- << FeatureCompatibilityVersionParser::kVersion40
- << "', found "
- << version
- << " in: "
- << cmdObj
- << ". See "
+ << FeatureCompatibilityVersionParser::kVersion42 << "' or '"
+ << FeatureCompatibilityVersionParser::kVersion40 << "', found "
+ << version << " in: " << cmdObj << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< "."};
}
diff --git a/src/mongo/db/commands/feature_compatibility_version_documentation.h b/src/mongo/db/commands/feature_compatibility_version_documentation.h
index 7b51814b2ac..0be6c0b1f39 100644
--- a/src/mongo/db/commands/feature_compatibility_version_documentation.h
+++ b/src/mongo/db/commands/feature_compatibility_version_documentation.h
@@ -34,5 +34,5 @@ namespace feature_compatibility_version_documentation {
constexpr StringData kCompatibilityLink =
"http://dochub.mongodb.org/core/4.0-feature-compatibility"_sd;
constexpr StringData kUpgradeLink = "http://dochub.mongodb.org/core/4.0-upgrade-fcv"_sd;
-}
-}
+} // namespace feature_compatibility_version_documentation
+} // namespace mongo
diff --git a/src/mongo/db/commands/feature_compatibility_version_parser.cpp b/src/mongo/db/commands/feature_compatibility_version_parser.cpp
index 4a86d174468..0aa872b9041 100644
--- a/src/mongo/db/commands/feature_compatibility_version_parser.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version_parser.cpp
@@ -61,37 +61,26 @@ FeatureCompatibilityVersionParser::parse(const BSONObj& featureCompatibilityVers
continue;
} else if (fieldName == kVersionField || fieldName == kTargetVersionField) {
if (elem.type() != BSONType::String) {
- return Status(
- ErrorCodes::TypeMismatch,
- str::stream() << fieldName << " must be of type String, but was of type "
- << typeName(elem.type())
- << ". Contents of "
- << kParameterName
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream()
+ << fieldName << " must be of type String, but was of type "
+ << typeName(elem.type()) << ". Contents of " << kParameterName
<< " document in "
<< NamespaceString::kServerConfigurationNamespace.toString()
- << ": "
- << featureCompatibilityVersionDoc
- << ". See "
+ << ": " << featureCompatibilityVersionDoc << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
}
if (elem.String() != kVersion42 && elem.String() != kVersion40) {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Invalid value for " << fieldName << ", found "
- << elem.String()
- << ", expected '"
- << kVersion42
- << "' or '"
- << kVersion40
- << "'. Contents of "
- << kParameterName
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Invalid value for " << fieldName << ", found "
+ << elem.String() << ", expected '" << kVersion42 << "' or '"
+ << kVersion40 << "'. Contents of " << kParameterName
<< " document in "
<< NamespaceString::kServerConfigurationNamespace.toString()
- << ": "
- << featureCompatibilityVersionDoc
- << ". See "
+ << ": " << featureCompatibilityVersionDoc << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
}
@@ -102,15 +91,12 @@ FeatureCompatibilityVersionParser::parse(const BSONObj& featureCompatibilityVers
targetVersionString = elem.String();
}
} else {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Unrecognized field '" << fieldName << "'. Contents of "
- << kParameterName
- << " document in "
- << NamespaceString::kServerConfigurationNamespace.toString()
- << ": "
- << featureCompatibilityVersionDoc
- << ". See "
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Unrecognized field '" << fieldName << "'. Contents of "
+ << kParameterName << " document in "
+ << NamespaceString::kServerConfigurationNamespace.toString() << ": "
+ << featureCompatibilityVersionDoc << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
}
@@ -126,28 +112,23 @@ FeatureCompatibilityVersionParser::parse(const BSONObj& featureCompatibilityVers
}
} else if (versionString == kVersion42) {
if (targetVersionString == kVersion42 || targetVersionString == kVersion40) {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Invalid state for " << kParameterName << " document in "
- << NamespaceString::kServerConfigurationNamespace.toString()
- << ": "
- << featureCompatibilityVersionDoc
- << ". See "
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Invalid state for " << kParameterName << " document in "
+ << NamespaceString::kServerConfigurationNamespace.toString() << ": "
+ << featureCompatibilityVersionDoc << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
} else {
version = ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42;
}
} else {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Missing required field '" << kVersionField << "''. Contents of "
- << kParameterName
- << " document in "
- << NamespaceString::kServerConfigurationNamespace.toString()
- << ": "
- << featureCompatibilityVersionDoc
- << ". See "
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Missing required field '" << kVersionField << "''. Contents of "
+ << kParameterName << " document in "
+ << NamespaceString::kServerConfigurationNamespace.toString() << ": "
+ << featureCompatibilityVersionDoc << ". See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< ".");
}
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 533268011ed..e9970426ba7 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -217,8 +217,8 @@ public:
} catch (DBException& error) {
if (error.code() == ErrorCodes::InvalidPipelineOperator) {
uasserted(ErrorCodes::InvalidPipelineOperator,
- str::stream() << "Unsupported in view pipeline: "
- << error.what());
+ str::stream()
+ << "Unsupported in view pipeline: " << error.what());
}
throw;
}
@@ -330,8 +330,7 @@ public:
str::stream() << "$_internalReadAtClusterTime value must not be greater"
" than the last applied opTime. Requested clusterTime: "
<< targetClusterTime->toString()
- << "; last applied opTime: "
- << lastAppliedOpTime.toString(),
+ << "; last applied opTime: " << lastAppliedOpTime.toString(),
lastAppliedOpTime.getTimestamp() >= targetClusterTime);
// We aren't holding the global lock in intent mode, so it is possible for the
@@ -347,8 +346,7 @@ public:
" than the all_durable timestamp. Requested"
" clusterTime: "
<< targetClusterTime->toString()
- << "; all_durable timestamp: "
- << allDurableTime.toString(),
+ << "; all_durable timestamp: " << allDurableTime.toString(),
allDurableTime >= targetClusterTime);
// The $_internalReadAtClusterTime option causes any storage-layer cursors created
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index 37b0bae2cf8..5d9bda46bae 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -64,7 +64,7 @@ namespace {
// Ensures that only one command is operating on fsyncLock state at a time. As a 'ResourceMutex',
// lock time will be reported for a given user operation.
Lock::ResourceMutex commandMutex("fsyncCommandMutex");
-}
+} // namespace
/**
* Maintains a global read lock while mongod is fsyncLocked.
@@ -437,4 +437,4 @@ MONGO_INITIALIZER(fsyncLockedForWriting)(InitializerContext* context) {
setLockedForWritingImpl([]() { return fsyncCmd.fsyncLocked(); });
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/fsync_locked.h b/src/mongo/db/commands/fsync_locked.h
index a84c03209de..bf530eb643e 100644
--- a/src/mongo/db/commands/fsync_locked.h
+++ b/src/mongo/db/commands/fsync_locked.h
@@ -33,14 +33,14 @@
namespace mongo {
/**
-* Returns true if mongod is currently fsyncLocked.
-*/
+ * Returns true if mongod is currently fsyncLocked.
+ */
bool lockedForWriting();
/**
-* Sets the implementation for lockedForWriting(). Should be done once during startup in a
-* MONGO_INITIALIZER.
-*/
+ * Sets the implementation for lockedForWriting(). Should be done once during startup in a
+ * MONGO_INITIALIZER.
+ */
void setLockedForWritingImpl(stdx::function<bool()> impl);
} // namespace mongo
diff --git a/src/mongo/db/commands/generic_servers.cpp b/src/mongo/db/commands/generic_servers.cpp
index 39f1b7cd225..c39c95b4cfd 100644
--- a/src/mongo/db/commands/generic_servers.cpp
+++ b/src/mongo/db/commands/generic_servers.cpp
@@ -240,9 +240,7 @@ public:
if (val.type() != String) {
uasserted(ErrorCodes::TypeMismatch,
str::stream() << "Argument to getLog must be of type String; found "
- << val.toString(false)
- << " of type "
- << typeName(val.type()));
+ << val.toString(false) << " of type " << typeName(val.type()));
}
string p = val.String();
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index b4977da6cf3..8e3530d7169 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -87,17 +87,14 @@ void validateLSID(OperationContext* opCtx, const GetMoreRequest& request, Client
uassert(50737,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in session "
- << *cursor->getSessionId()
+ << ", which was created in session " << *cursor->getSessionId()
<< ", without an lsid",
opCtx->getLogicalSessionId() || !cursor->getSessionId());
uassert(50738,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in session "
- << *cursor->getSessionId()
- << ", in session "
- << *opCtx->getLogicalSessionId(),
+ << ", which was created in session " << *cursor->getSessionId()
+ << ", in session " << *opCtx->getLogicalSessionId(),
!opCtx->getLogicalSessionId() || !cursor->getSessionId() ||
(opCtx->getLogicalSessionId() == cursor->getSessionId()));
}
@@ -117,17 +114,14 @@ void validateTxnNumber(OperationContext* opCtx,
uassert(50740,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in transaction "
- << *cursor->getTxnNumber()
+ << ", which was created in transaction " << *cursor->getTxnNumber()
<< ", without a txnNumber",
opCtx->getTxnNumber() || !cursor->getTxnNumber());
uassert(50741,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in transaction "
- << *cursor->getTxnNumber()
- << ", in transaction "
- << *opCtx->getTxnNumber(),
+ << ", which was created in transaction " << *cursor->getTxnNumber()
+ << ", in transaction " << *opCtx->getTxnNumber(),
!opCtx->getTxnNumber() || !cursor->getTxnNumber() ||
(*opCtx->getTxnNumber() == *cursor->getTxnNumber()));
}
@@ -438,8 +432,8 @@ public:
// Ensure that the client still has the privileges to run the originating command.
if (!authzSession->isAuthorizedForPrivileges(cursorPin->getOriginatingPrivileges())) {
uasserted(ErrorCodes::Unauthorized,
- str::stream() << "not authorized for getMore with cursor id "
- << _request.cursorid);
+ str::stream()
+ << "not authorized for getMore with cursor id " << _request.cursorid);
}
if (_request.nss != cursorPin->nss()) {
diff --git a/src/mongo/db/commands/hashcmd.cpp b/src/mongo/db/commands/hashcmd.cpp
index 2d69dcb6e9f..04b3c7f87ed 100644
--- a/src/mongo/db/commands/hashcmd.cpp
+++ b/src/mongo/db/commands/hashcmd.cpp
@@ -101,4 +101,4 @@ public:
}
};
MONGO_REGISTER_TEST_COMMAND(CmdHashElt);
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index f7be9462caa..de26b32142b 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -110,8 +110,8 @@ namespace mongo {
using std::string;
using std::stringstream;
-using std::vector;
using std::unique_ptr;
+using std::vector;
IndexFilterCommand::IndexFilterCommand(const string& name, const string& helpText)
: BasicCommand(name), helpText(helpText) {}
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index 52f4c49f2f2..2c34bb715c7 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -188,4 +188,4 @@ public:
return true;
}
} cmdListDatabases;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index ae25d883d19..6f119e2baf9 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -127,8 +127,8 @@ public:
}
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to list indexes on collection: "
- << nss.ns());
+ str::stream()
+ << "Not authorized to list indexes on collection: " << nss.ns());
}
bool run(OperationContext* opCtx,
diff --git a/src/mongo/db/commands/lock_info.cpp b/src/mongo/db/commands/lock_info.cpp
index d3250df05b1..bf52720e65e 100644
--- a/src/mongo/db/commands/lock_info.cpp
+++ b/src/mongo/db/commands/lock_info.cpp
@@ -105,4 +105,4 @@ public:
return true;
}
} cmdLockInfo;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 5290d324a2d..0e6d4baa02b 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -658,9 +658,7 @@ void State::appendResults(BSONObjBuilder& final) {
BSONObj idKey = BSON("_id" << 1);
if (!_db.runCommand("admin",
BSON("splitVector" << _config.outputOptions.finalNamespace.ns()
- << "keyPattern"
- << idKey
- << "maxChunkSizeBytes"
+ << "keyPattern" << idKey << "maxChunkSizeBytes"
<< _config.splitInfo),
res)) {
uasserted(15921, str::stream() << "splitVector failed: " << res);
@@ -748,8 +746,7 @@ long long State::postProcessCollectionNonAtomic(OperationContext* opCtx,
if (!_db.runCommand("admin",
BSON("renameCollection" << _config.tempNamespace.ns() << "to"
<< _config.outputOptions.finalNamespace.ns()
- << "stayTemp"
- << _config.shardedFirstPass),
+ << "stayTemp" << _config.shardedFirstPass),
info)) {
uasserted(10076, str::stream() << "rename failed: " << info);
}
@@ -833,9 +830,7 @@ void State::insert(const NamespaceString& nss, const BSONObj& o) {
uassert(
ErrorCodes::PrimarySteppedDown,
str::stream() << "no longer primary while inserting mapReduce result into collection: "
- << nss
- << ": "
- << redact(o),
+ << nss << ": " << redact(o),
repl::ReplicationCoordinator::get(_opCtx)->canAcceptWritesFor(_opCtx, nss));
assertCollectionNotNull(nss, autoColl);
@@ -882,10 +877,8 @@ void State::_insertToInc(BSONObj& o) {
if (o.objsize() > BSONObjMaxUserSize) {
uasserted(ErrorCodes::BadValue,
str::stream() << "object to insert too large for incremental collection"
- << ". size in bytes: "
- << o.objsize()
- << ", max size: "
- << BSONObjMaxUserSize);
+ << ". size in bytes: " << o.objsize()
+ << ", max size: " << BSONObjMaxUserSize);
}
// TODO: Consider whether to pass OpDebug for stats tracking under SERVER-23261.
@@ -934,8 +927,9 @@ State::~State() {
_useIncremental ? _config.incLong : NamespaceString());
} catch (...) {
error() << "Unable to drop temporary collection created by mapReduce: "
- << _config.tempNamespace << ". This collection will be removed automatically "
- "the next time the server starts up. "
+ << _config.tempNamespace
+ << ". This collection will be removed automatically "
+ "the next time the server starts up. "
<< exceptionToStatus();
}
}
diff --git a/src/mongo/db/commands/mr.h b/src/mongo/db/commands/mr.h
index d9ced2102af..aab8f011d09 100644
--- a/src/mongo/db/commands/mr.h
+++ b/src/mongo/db/commands/mr.h
@@ -158,7 +158,7 @@ private:
* result in "__returnValue"
* @param key OUT
* @param endSizeEstimate OUT
- */
+ */
void _reduce(const BSONList& values, BSONObj& key, int& endSizeEstimate);
JSFunction _func;
@@ -281,13 +281,13 @@ public:
void emit(const BSONObj& a);
/**
- * Checks the size of the transient in-memory results accumulated so far and potentially
- * runs reduce in order to compact them. If the data is still too large, it will be
- * spilled to the output collection.
- *
- * NOTE: Make sure that no DB locks are held, when calling this function, because it may
- * try to acquire write DB lock for the write to the output collection.
- */
+ * Checks the size of the transient in-memory results accumulated so far and potentially
+ * runs reduce in order to compact them. If the data is still too large, it will be
+ * spilled to the output collection.
+ *
+ * NOTE: Make sure that no DB locks are held, when calling this function, because it may
+ * try to acquire write DB lock for the write to the output collection.
+ */
void reduceAndSpillInMemoryStateIfNeeded();
/**
diff --git a/src/mongo/db/commands/mr_common.cpp b/src/mongo/db/commands/mr_common.cpp
index b3c973adebd..630379438ba 100644
--- a/src/mongo/db/commands/mr_common.cpp
+++ b/src/mongo/db/commands/mr_common.cpp
@@ -157,5 +157,5 @@ bool mrSupportsWriteConcern(const BSONObj& cmd) {
return true;
}
}
-}
-}
+} // namespace mr
+} // namespace mongo
diff --git a/src/mongo/db/commands/mr_test.cpp b/src/mongo/db/commands/mr_test.cpp
index 615cd5f3c9f..4a6d428dc66 100644
--- a/src/mongo/db/commands/mr_test.cpp
+++ b/src/mongo/db/commands/mr_test.cpp
@@ -74,11 +74,7 @@ void _compareOutputOptionField(const std::string& dbname,
if (actual == expected)
return;
FAIL(str::stream() << "parseOutputOptions(\"" << dbname << ", " << cmdObjStr << "): "
- << fieldName
- << ": Expected: "
- << expected
- << ". Actual: "
- << actual);
+ << fieldName << ": Expected: " << expected << ". Actual: " << actual);
}
/**
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index b68630938dc..6e1366815a3 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -306,8 +306,8 @@ public:
// Make sure we are allowed to change this parameter
if (!foundParameter->second->allowedToChangeAtRuntime()) {
- errmsg = str::stream() << "not allowed to change [" << parameterName
- << "] at runtime";
+ errmsg = str::stream()
+ << "not allowed to change [" << parameterName << "] at runtime";
return false;
}
@@ -365,9 +365,8 @@ public:
log() << "successfully set parameter " << parameterName << " to "
<< redact(parameter.toString(false))
- << (oldValue ? std::string(str::stream() << " (was "
- << redact(oldValue.toString(false))
- << ")")
+ << (oldValue ? std::string(str::stream()
+ << " (was " << redact(oldValue.toString(false)) << ")")
: "");
numSet++;
@@ -422,8 +421,8 @@ void LogComponentVerbosityServerParameter::append(OperationContext*,
Status LogComponentVerbosityServerParameter::set(const BSONElement& newValueElement) {
if (!newValueElement.isABSONObj()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "log component verbosity is not a BSON object: "
- << newValueElement);
+ str::stream()
+ << "log component verbosity is not a BSON object: " << newValueElement);
}
return setLogComponentVerbosity(newValueElement.Obj());
}
@@ -456,9 +455,7 @@ Status AutomationServiceDescriptorServerParameter::setFromString(const std::stri
if (str.size() > kMaxSize)
return {ErrorCodes::Overflow,
str::stream() << "Value for parameter automationServiceDescriptor"
- << " must be no more than "
- << kMaxSize
- << " bytes"};
+ << " must be no more than " << kMaxSize << " bytes"};
{
const stdx::lock_guard<stdx::mutex> lock(autoServiceDescriptorMutex);
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index e112b19b9fc..68c04442192 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -104,8 +104,8 @@ namespace mongo {
using std::string;
using std::stringstream;
-using std::vector;
using std::unique_ptr;
+using std::vector;
PlanCacheCommand::PlanCacheCommand(const string& name,
const string& helpText,
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 1632207d392..b118407ef65 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -341,12 +341,12 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
// Check keys in cache before dropping {b: 1}
vector<BSONObj> shapesBefore = getShapes(planCache);
ASSERT_EQUALS(shapesBefore.size(), 2U);
- BSONObj shapeA = BSON(
- "query" << cqA->getQueryObj() << "sort" << cqA->getQueryRequest().getSort() << "projection"
- << cqA->getQueryRequest().getProj());
- BSONObj shapeB = BSON(
- "query" << cqB->getQueryObj() << "sort" << cqB->getQueryRequest().getSort() << "projection"
- << cqB->getQueryRequest().getProj());
+ BSONObj shapeA =
+ BSON("query" << cqA->getQueryObj() << "sort" << cqA->getQueryRequest().getSort()
+ << "projection" << cqA->getQueryRequest().getProj());
+ BSONObj shapeB =
+ BSON("query" << cqB->getQueryObj() << "sort" << cqB->getQueryRequest().getSort()
+ << "projection" << cqB->getQueryRequest().getProj());
ASSERT_TRUE(
std::find_if(shapesBefore.begin(), shapesBefore.end(), [&shapeA](const BSONObj& obj) {
auto filteredObj = obj.removeField("queryHash");
@@ -413,14 +413,11 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKeyCollation) {
vector<BSONObj> shapesBefore = getShapes(planCache);
ASSERT_EQUALS(shapesBefore.size(), 2U);
BSONObj shape = BSON("query" << cq->getQueryObj() << "sort" << cq->getQueryRequest().getSort()
- << "projection"
- << cq->getQueryRequest().getProj());
- BSONObj shapeWithCollation = BSON("query" << cqCollation->getQueryObj() << "sort"
- << cqCollation->getQueryRequest().getSort()
- << "projection"
- << cqCollation->getQueryRequest().getProj()
- << "collation"
- << cqCollation->getCollator()->getSpec().toBSON());
+ << "projection" << cq->getQueryRequest().getProj());
+ BSONObj shapeWithCollation = BSON(
+ "query" << cqCollation->getQueryObj() << "sort" << cqCollation->getQueryRequest().getSort()
+ << "projection" << cqCollation->getQueryRequest().getProj() << "collation"
+ << cqCollation->getCollator()->getSpec().toBSON());
ASSERT_TRUE(
std::find_if(shapesBefore.begin(), shapesBefore.end(), [&shape](const BSONObj& obj) {
auto filteredObj = obj.removeField("queryHash");
diff --git a/src/mongo/db/commands/repair_cursor.cpp b/src/mongo/db/commands/repair_cursor.cpp
index 9618543a0a9..3fd5a94da1c 100644
--- a/src/mongo/db/commands/repair_cursor.cpp
+++ b/src/mongo/db/commands/repair_cursor.cpp
@@ -115,4 +115,4 @@ public:
return true;
}
} repairCursorCmd;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index 4dccbfef572..c62ebad77c4 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -299,8 +299,8 @@ StatusWith<StringMap<ExpressionContext::ResolvedNamespace>> resolveInvolvedNames
auto resolvedView = viewCatalog->resolveView(opCtx, involvedNs);
if (!resolvedView.isOK()) {
return {ErrorCodes::FailedToParse,
- str::stream() << "Failed to resolve view '" << involvedNs.ns() << "': "
- << resolvedView.getStatus().toString()};
+ str::stream() << "Failed to resolve view '" << involvedNs.ns()
+ << "': " << resolvedView.getStatus().toString()};
}
resolvedNamespaces[involvedNs.coll()] = {resolvedView.getValue().getNamespace(),
diff --git a/src/mongo/db/commands/server_status_internal.cpp b/src/mongo/db/commands/server_status_internal.cpp
index 738b22e8945..d5776746dd1 100644
--- a/src/mongo/db/commands/server_status_internal.cpp
+++ b/src/mongo/db/commands/server_status_internal.cpp
@@ -83,4 +83,4 @@ void MetricTree::appendTo(BSONObjBuilder& b) const {
bb.done();
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/server_status_internal.h b/src/mongo/db/commands/server_status_internal.h
index cbd67fa0056..f9bde775db3 100644
--- a/src/mongo/db/commands/server_status_internal.h
+++ b/src/mongo/db/commands/server_status_internal.h
@@ -52,4 +52,4 @@ private:
std::map<std::string, MetricTree*> _subtrees;
std::map<std::string, ServerStatusMetric*> _metrics;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/server_status_metric.cpp b/src/mongo/db/commands/server_status_metric.cpp
index d56a2970ec6..264844c02ad 100644
--- a/src/mongo/db/commands/server_status_metric.cpp
+++ b/src/mongo/db/commands/server_status_metric.cpp
@@ -49,4 +49,4 @@ string ServerStatusMetric::_parseLeafName(const string& name) {
return name.substr(idx + 1);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/server_status_metric.h b/src/mongo/db/commands/server_status_metric.h
index 01c695ff046..f64327908e7 100644
--- a/src/mongo/db/commands/server_status_metric.h
+++ b/src/mongo/db/commands/server_status_metric.h
@@ -88,4 +88,4 @@ public:
private:
const T* _t;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/sleep_command.cpp b/src/mongo/db/commands/sleep_command.cpp
index 73f10c97422..6315265de72 100644
--- a/src/mongo/db/commands/sleep_command.cpp
+++ b/src/mongo/db/commands/sleep_command.cpp
@@ -153,4 +153,4 @@ public:
};
MONGO_REGISTER_TEST_COMMAND(CmdSleep);
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/commands/snapshot_management.cpp b/src/mongo/db/commands/snapshot_management.cpp
index 3485f623c7d..01b3d7b8c74 100644
--- a/src/mongo/db/commands/snapshot_management.cpp
+++ b/src/mongo/db/commands/snapshot_management.cpp
@@ -128,4 +128,4 @@ public:
}
};
MONGO_REGISTER_TEST_COMMAND(CmdSetCommittedSnapshot);
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 1410bb5a3bc..d981b24ad1d 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -209,4 +209,4 @@ public:
};
MONGO_REGISTER_TEST_COMMAND(EmptyCapped);
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 623cffb0367..a155d443692 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -101,8 +101,7 @@ BSONArray roleSetToBSONArray(const stdx::unordered_set<RoleName>& roles) {
++it) {
const RoleName& role = *it;
rolesArrayBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
<< role.getDB()));
}
return rolesArrayBuilder.arr();
@@ -113,8 +112,7 @@ BSONArray rolesVectorToBSONArray(const std::vector<RoleName>& roles) {
for (std::vector<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
const RoleName& role = *it;
rolesArrayBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
<< role.getDB()));
}
return rolesArrayBuilder.arr();
@@ -174,14 +172,14 @@ Status checkOkayToGrantRolesToRole(OperationContext* opCtx,
const RoleName& roleToAdd = *it;
if (roleToAdd == role) {
return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Cannot grant role " << role.getFullName()
- << " to itself.");
+ str::stream()
+ << "Cannot grant role " << role.getFullName() << " to itself.");
}
if (role.getDB() != "admin" && roleToAdd.getDB() != role.getDB()) {
- return Status(
- ErrorCodes::InvalidRoleModification,
- str::stream() << "Roles on the \'" << role.getDB()
+ return Status(ErrorCodes::InvalidRoleModification,
+ str::stream()
+ << "Roles on the \'" << role.getDB()
<< "\' database cannot be granted roles from other databases");
}
@@ -431,14 +429,13 @@ Status insertRoleDocument(OperationContext* opCtx, const BSONObj& roleObj) {
* Updates the given role object with the given update modifier.
*/
Status updateRoleDocument(OperationContext* opCtx, const RoleName& role, const BSONObj& updateObj) {
- Status status = updateOneAuthzDocument(opCtx,
- AuthorizationManager::rolesCollectionNamespace,
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole()
- << AuthorizationManager::ROLE_DB_FIELD_NAME
- << role.getDB()),
- updateObj,
- false);
+ Status status = updateOneAuthzDocument(
+ opCtx,
+ AuthorizationManager::rolesCollectionNamespace,
+ BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()),
+ updateObj,
+ false);
if (status.isOK()) {
return status;
}
@@ -516,13 +513,12 @@ Status updatePrivilegeDocument(OperationContext* opCtx,
Status updatePrivilegeDocument(OperationContext* opCtx,
const UserName& user,
const BSONObj& updateObj) {
- const auto status = updatePrivilegeDocument(opCtx,
- user,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << user.getUser()
- << AuthorizationManager::USER_DB_FIELD_NAME
- << user.getDB()),
- updateObj);
+ const auto status = updatePrivilegeDocument(
+ opCtx,
+ user,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << user.getUser() << AuthorizationManager::USER_DB_FIELD_NAME << user.getDB()),
+ updateObj);
return status;
}
@@ -621,8 +617,7 @@ StatusWith<AuthzLockGuard> requireWritableAuthSchema28SCRAM(OperationContext* op
str::stream()
<< "User and role management commands require auth data to have "
<< "at least schema version "
- << AuthorizationManager::schemaVersion28SCRAM
- << " but found "
+ << AuthorizationManager::schemaVersion28SCRAM << " but found "
<< foundSchemaVersion);
}
status = writeAuthSchemaVersionIfNeeded(opCtx, authzManager, foundSchemaVersion);
@@ -658,8 +653,7 @@ StatusWith<AuthzLockGuard> requireReadableAuthSchema26Upgrade(OperationContext*
return Status(ErrorCodes::AuthSchemaIncompatible,
str::stream() << "The usersInfo and rolesInfo commands require auth data to "
<< "have at least schema version "
- << AuthorizationManager::schemaVersion26Upgrade
- << " but found "
+ << AuthorizationManager::schemaVersion26Upgrade << " but found "
<< foundSchemaVersion);
}
@@ -2022,9 +2016,9 @@ public:
&nMatched);
if (!status.isOK()) {
uassertStatusOK(useDefaultCode(status, ErrorCodes::UserModificationFailed)
- .withContext(str::stream() << "Failed to remove role "
- << roleName.getFullName()
- << " from all users"));
+ .withContext(str::stream()
+ << "Failed to remove role " << roleName.getFullName()
+ << " from all users"));
}
// Remove this role from all other roles
@@ -2045,9 +2039,9 @@ public:
if (!status.isOK()) {
uassertStatusOK(
useDefaultCode(status, ErrorCodes::RoleModificationFailed)
- .withContext(
- str::stream() << "Removed role " << roleName.getFullName()
- << " from all users but failed to remove from all roles"));
+ .withContext(str::stream()
+ << "Removed role " << roleName.getFullName()
+ << " from all users but failed to remove from all roles"));
}
audit::logDropRole(Client::getCurrent(), roleName);
@@ -2139,13 +2133,12 @@ public:
if (!status.isOK()) {
uassertStatusOK(useDefaultCode(status, ErrorCodes::UserModificationFailed)
.withContext(str::stream() << "Failed to remove roles from \""
- << dbname
- << "\" db from all users"));
+ << dbname << "\" db from all users"));
}
// Remove these roles from all other roles
- std::string sourceFieldName = str::stream() << "roles."
- << AuthorizationManager::ROLE_DB_FIELD_NAME;
+ std::string sourceFieldName = str::stream()
+ << "roles." << AuthorizationManager::ROLE_DB_FIELD_NAME;
status = updateAuthzDocuments(
opCtx,
AuthorizationManager::rolesCollectionNamespace,
@@ -2158,8 +2151,7 @@ public:
if (!status.isOK()) {
uassertStatusOK(useDefaultCode(status, ErrorCodes::RoleModificationFailed)
.withContext(str::stream() << "Failed to remove roles from \""
- << dbname
- << "\" db from all roles"));
+ << dbname << "\" db from all roles"));
}
audit::logDropAllRolesFromDatabase(Client::getCurrent(), dbname);
@@ -2580,9 +2572,7 @@ public:
BSONObj query =
db.empty() ? BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db);
BSONObj fields = BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << 1
- << AuthorizationManager::USER_DB_FIELD_NAME
- << 1);
+ << 1 << AuthorizationManager::USER_DB_FIELD_NAME << 1);
Status status =
queryAuthzDocument(opCtx,
@@ -2653,9 +2643,7 @@ public:
BSONObj query =
db.empty() ? BSONObj() : BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << db);
BSONObj fields = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << 1
- << AuthorizationManager::ROLE_DB_FIELD_NAME
- << 1);
+ << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
Status status =
queryAuthzDocument(opCtx,
diff --git a/src/mongo/db/commands/user_management_commands_common.cpp b/src/mongo/db/commands/user_management_commands_common.cpp
index 7abc55ab60a..08e4e5345c1 100644
--- a/src/mongo/db/commands/user_management_commands_common.cpp
+++ b/src/mongo/db/commands/user_management_commands_common.cpp
@@ -58,8 +58,8 @@ Status checkAuthorizedToGrantRoles(AuthorizationSession* authzSession,
for (size_t i = 0; i < roles.size(); ++i) {
if (!authzSession->isAuthorizedToGrantRole(roles[i])) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to grant role: "
- << roles[i].getFullName());
+ str::stream()
+ << "Not authorized to grant role: " << roles[i].getFullName());
}
}
@@ -83,8 +83,8 @@ Status checkAuthorizedToRevokeRoles(AuthorizationSession* authzSession,
for (size_t i = 0; i < roles.size(); ++i) {
if (!authzSession->isAuthorizedToRevokeRole(roles[i])) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to revoke role: "
- << roles[i].getFullName());
+ str::stream()
+ << "Not authorized to revoke role: " << roles[i].getFullName());
}
}
return Status::OK();
@@ -129,8 +129,8 @@ Status checkAuthForCreateUserCommand(Client* client,
if (!authzSession->isAuthorizedForActionsOnResource(
ResourcePattern::forDatabaseName(args.userName.getDB()), ActionType::createUser)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to create users on db: "
- << args.userName.getDB());
+ str::stream()
+ << "Not authorized to create users on db: " << args.userName.getDB());
}
status = checkAuthorizedToGrantRoles(authzSession, args.roles);
@@ -231,8 +231,8 @@ Status checkAuthForCreateRoleCommand(Client* client,
if (!authzSession->isAuthorizedToCreateRole(args)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to create roles on db: "
- << args.roleName.getDB());
+ str::stream()
+ << "Not authorized to create roles on db: " << args.roleName.getDB());
}
status = checkAuthorizedToGrantRoles(authzSession, args.roles);
@@ -365,8 +365,8 @@ Status checkAuthForDropAllUsersFromDatabaseCommand(Client* client, const std::st
if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname),
ActionType::dropUser)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to drop users from the " << dbname
- << " database");
+ str::stream()
+ << "Not authorized to drop users from the " << dbname << " database");
}
return Status::OK();
}
@@ -415,8 +415,8 @@ Status checkAuthForUsersInfoCommand(Client* client,
if (!authzSession->isAuthorizedForActionsOnResource(
ResourcePattern::forDatabaseName(dbname), ActionType::viewUser)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to view users from the " << dbname
- << " database");
+ str::stream()
+ << "Not authorized to view users from the " << dbname << " database");
}
} else if (args.target == auth::UsersInfoArgs::Target::kGlobal) {
if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
@@ -462,8 +462,8 @@ Status checkAuthForDropAllRolesFromDatabaseCommand(Client* client, const std::st
if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname),
ActionType::dropRole)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to drop roles from the " << dbname
- << " database");
+ str::stream()
+ << "Not authorized to drop roles from the " << dbname << " database");
}
return Status::OK();
}
@@ -482,8 +482,8 @@ Status checkAuthForRolesInfoCommand(Client* client,
if (!authzSession->isAuthorizedForActionsOnResource(
ResourcePattern::forDatabaseName(dbname), ActionType::viewRole)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to view roles from the " << dbname
- << " database");
+ str::stream()
+ << "Not authorized to view roles from the " << dbname << " database");
}
} else {
for (size_t i = 0; i < args.roleNames.size(); ++i) {
@@ -496,8 +496,7 @@ Status checkAuthForRolesInfoCommand(Client* client,
ActionType::viewRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to view roles from the "
- << args.roleNames[i].getDB()
- << " database");
+ << args.roleNames[i].getDB() << " database");
}
}
}
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 2daeaf3372e..71c4ba0852f 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -210,4 +210,4 @@ public:
}
} validateCmd;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 1c63ac0756b..c427ff42598 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -111,7 +111,7 @@ void serializeReply(OperationContext* opCtx,
BSONSizeTracker upsertInfoSizeTracker;
BSONSizeTracker errorsSizeTracker;
- auto errorMessage = [&, errorSize = size_t(0) ](StringData rawMessage) mutable {
+ auto errorMessage = [&, errorSize = size_t(0)](StringData rawMessage) mutable {
// Start truncating error messages once both of these limits are exceeded.
constexpr size_t kErrorSizeTruncationMin = 1024 * 1024;
constexpr size_t kErrorCountTruncationMin = 2;
diff --git a/src/mongo/db/concurrency/d_concurrency_bm.cpp b/src/mongo/db/concurrency/d_concurrency_bm.cpp
index c375ffdd73f..95c6771badf 100644
--- a/src/mongo/db/concurrency/d_concurrency_bm.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_bm.cpp
@@ -52,8 +52,8 @@ public:
void makeKClientsWithLockers(int k) {
clients.reserve(k);
for (int i = 0; i < k; ++i) {
- auto client = getGlobalServiceContext()->makeClient(
- str::stream() << "test client for thread " << i);
+ auto client = getGlobalServiceContext()->makeClient(str::stream()
+ << "test client for thread " << i);
auto opCtx = client->makeOperationContext();
opCtx->swapLockState(std::make_unique<LockerImpl>());
clients.emplace_back(std::move(client), std::move(opCtx));
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 26fab1084cf..bf5e0f224b3 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -2094,7 +2094,7 @@ public:
bool activeTransaction = true;
};
-}
+} // namespace
TEST_F(DConcurrencyTestFixture, TestGlobalLockAbandonsSnapshotWhenNotInWriteUnitOfWork) {
auto clients = makeKClientsWithLockers(1);
diff --git a/src/mongo/db/concurrency/lock_manager.cpp b/src/mongo/db/concurrency/lock_manager.cpp
index e3c7fc77809..5dcbfa07f28 100644
--- a/src/mongo/db/concurrency/lock_manager.cpp
+++ b/src/mongo/db/concurrency/lock_manager.cpp
@@ -102,7 +102,10 @@ uint32_t modeMask(LockMode mode) {
* Maps the LockRequest status to a human-readable string.
*/
static const char* LockRequestStatusNames[] = {
- "new", "granted", "waiting", "converting",
+ "new",
+ "granted",
+ "waiting",
+ "converting",
};
// Ensure we do not add new status types without updating the names array
diff --git a/src/mongo/db/concurrency/lock_manager.h b/src/mongo/db/concurrency/lock_manager.h
index ab113b48aad..50b2116d953 100644
--- a/src/mongo/db/concurrency/lock_manager.h
+++ b/src/mongo/db/concurrency/lock_manager.h
@@ -60,32 +60,32 @@ public:
~LockManager();
/**
- * Acquires lock on the specified resource in the specified mode and returns the outcome
- * of the operation. See the details for LockResult for more information on what the
- * different results mean.
- *
- * Locking the same resource twice increments the reference count of the lock so each call
- * to lock must be matched with a call to unlock with the same resource.
- *
- * @param resId Id of the resource to be locked.
- * @param request LockRequest structure on which the state of the request will be tracked.
- * This value cannot be NULL and the notify value must be set. If the
- * return value is not LOCK_WAITING, this pointer can be freed and will
- * not be used any more.
- *
- * If the return value is LOCK_WAITING, the notification method will be called
- * at some point into the future, when the lock becomes granted. If unlock is
- * called before the lock becomes granted, the notification will not be
- * invoked.
- *
- * If the return value is LOCK_WAITING, the notification object *must*
- * live at least until the notify method has been invoked or unlock has
- * been called for the resource it was assigned to. Failure to do so will
- * cause the lock manager to call into an invalid memory location.
- * @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
- *
- * @return See comments for LockResult.
- */
+ * Acquires lock on the specified resource in the specified mode and returns the outcome
+ * of the operation. See the details for LockResult for more information on what the
+ * different results mean.
+ *
+ * Locking the same resource twice increments the reference count of the lock so each call
+ * to lock must be matched with a call to unlock with the same resource.
+ *
+ * @param resId Id of the resource to be locked.
+ * @param request LockRequest structure on which the state of the request will be tracked.
+ * This value cannot be NULL and the notify value must be set. If the
+ * return value is not LOCK_WAITING, this pointer can be freed and will
+ * not be used any more.
+ *
+ * If the return value is LOCK_WAITING, the notification method will be called
+ * at some point into the future, when the lock becomes granted. If unlock is
+ * called before the lock becomes granted, the notification will not be
+ * invoked.
+ *
+ * If the return value is LOCK_WAITING, the notification object *must*
+ * live at least until the notify method has been invoked or unlock has
+ * been called for the resource it was assigned to. Failure to do so will
+ * cause the lock manager to call into an invalid memory location.
+ * @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
+ *
+ * @return See comments for LockResult.
+ */
LockResult lock(ResourceId resId, LockRequest* request, LockMode mode);
LockResult convert(ResourceId resId, LockRequest* request, LockMode newMode);
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 40e14bff3ac..b08e3cc958a 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -328,8 +328,7 @@ void LockerImpl::reacquireTicket(OperationContext* opCtx) {
} else {
uassert(ErrorCodes::LockTimeout,
str::stream() << "Unable to acquire ticket with mode '" << _modeForTicket
- << "' within a max lock request timeout of '"
- << *_maxLockTimeout
+ << "' within a max lock request timeout of '" << *_maxLockTimeout
<< "' milliseconds.",
_acquireTicket(opCtx, _modeForTicket, Date_t::now() + *_maxLockTimeout));
}
@@ -369,8 +368,7 @@ LockResult LockerImpl::_lockGlobalBegin(OperationContext* opCtx, LockMode mode,
uassert(ErrorCodes::LockTimeout,
str::stream() << "Unable to acquire ticket with mode '" << _modeForTicket
<< "' within a max lock request timeout of '"
- << Date_t::now() - beforeAcquire
- << "' milliseconds.",
+ << Date_t::now() - beforeAcquire << "' milliseconds.",
_acquireTicket(opCtx, mode, deadline));
}
_modeForTicket = mode;
@@ -965,8 +963,7 @@ void LockerImpl::lockComplete(OperationContext* opCtx,
uassert(ErrorCodes::LockTimeout,
str::stream() << "Unable to acquire lock '" << resId.toString() << "' within "
- << timeout
- << "' milliseconds.",
+ << timeout << "' milliseconds.",
waitTime > Milliseconds(0));
}
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index 76f51cf9a13..00297be4d37 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -939,11 +939,12 @@ namespace {
bool lockerInfoContainsLock(const Locker::LockerInfo& lockerInfo,
const ResourceId& resourceId,
const LockMode& mode) {
- return (1U == std::count_if(lockerInfo.locks.begin(),
- lockerInfo.locks.end(),
- [&resourceId, &mode](const Locker::OneLock& lock) {
- return lock.resourceId == resourceId && lock.mode == mode;
- }));
+ return (1U ==
+ std::count_if(lockerInfo.locks.begin(),
+ lockerInfo.locks.end(),
+ [&resourceId, &mode](const Locker::OneLock& lock) {
+ return lock.resourceId == resourceId && lock.mode == mode;
+ }));
}
} // namespace
diff --git a/src/mongo/db/concurrency/write_conflict_exception.cpp b/src/mongo/db/concurrency/write_conflict_exception.cpp
index c36b382b584..9eb18f8d349 100644
--- a/src/mongo/db/concurrency/write_conflict_exception.cpp
+++ b/src/mongo/db/concurrency/write_conflict_exception.cpp
@@ -48,10 +48,10 @@ WriteConflictException::WriteConflictException()
}
void WriteConflictException::logAndBackoff(int attempt, StringData operation, StringData ns) {
- mongo::logAndBackoff(
- ::mongo::logger::LogComponent::kWrite,
- logger::LogSeverity::Debug(1),
- static_cast<size_t>(attempt),
- str::stream() << "Caught WriteConflictException doing " << operation << " on " << ns);
-}
+ mongo::logAndBackoff(::mongo::logger::LogComponent::kWrite,
+ logger::LogSeverity::Debug(1),
+ static_cast<size_t>(attempt),
+ str::stream() << "Caught WriteConflictException doing " << operation
+ << " on " << ns);
}
+} // namespace mongo
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index 893b67cdde8..551bed75d54 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -65,7 +65,14 @@ namespace {
// OP_QUERY find. The $orderby field is omitted because "orderby" (no dollar sign) is also allowed,
// and this requires special handling.
const std::vector<const char*> kDollarQueryModifiers = {
- "$hint", "$comment", "$max", "$min", "$returnKey", "$showDiskLoc", "$snapshot", "$maxTimeMS",
+ "$hint",
+ "$comment",
+ "$max",
+ "$min",
+ "$returnKey",
+ "$showDiskLoc",
+ "$snapshot",
+ "$maxTimeMS",
};
} // namespace
diff --git a/src/mongo/db/curop_failpoint_helpers.cpp b/src/mongo/db/curop_failpoint_helpers.cpp
index b5f9b9e9a36..6afbfb05be5 100644
--- a/src/mongo/db/curop_failpoint_helpers.cpp
+++ b/src/mongo/db/curop_failpoint_helpers.cpp
@@ -85,4 +85,4 @@ void CurOpFailpointHelpers::waitWhileFailPointEnabled(FailPoint* failPoint,
updateCurOpMsg(opCtx, origCurOpMsg);
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/curop_failpoint_helpers.h b/src/mongo/db/curop_failpoint_helpers.h
index e642f601811..a1143805951 100644
--- a/src/mongo/db/curop_failpoint_helpers.h
+++ b/src/mongo/db/curop_failpoint_helpers.h
@@ -64,4 +64,4 @@ public:
bool checkForInterrupt = false,
boost::optional<NamespaceString> nss = boost::none);
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 55c15c26593..e0b32da95f0 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -1007,8 +1007,8 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) {
if (auto svcExec = serviceContext->getServiceExecutor()) {
Status status = svcExec->shutdown(Seconds(10));
if (!status.isOK()) {
- log(LogComponent::kNetwork) << "Service executor failed to shutdown within timelimit: "
- << status.reason();
+ log(LogComponent::kNetwork)
+ << "Service executor failed to shutdown within timelimit: " << status.reason();
}
}
#endif
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index fae5952f834..079a1f32f3f 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -122,8 +122,7 @@ AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* opCtx,
str::stream()
<< "Unable to read from a snapshot due to pending collection catalog "
"changes; please retry the operation. Snapshot timestamp is "
- << mySnapshot->toString()
- << ". Collection minimum is "
+ << mySnapshot->toString() << ". Collection minimum is "
<< minSnapshot->toString());
}
diff --git a/src/mongo/db/dbdirectclient.cpp b/src/mongo/db/dbdirectclient.cpp
index 0b0671b4ecf..d5ce4367612 100644
--- a/src/mongo/db/dbdirectclient.cpp
+++ b/src/mongo/db/dbdirectclient.cpp
@@ -48,8 +48,8 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
namespace {
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 5a4abebd032..db2b2f8e0bd 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -59,9 +59,9 @@
namespace mongo {
-using std::unique_ptr;
using std::set;
using std::string;
+using std::unique_ptr;
/* fetch a single object from collection ns that matches query
set your db SavedContext first
diff --git a/src/mongo/db/dbmessage.cpp b/src/mongo/db/dbmessage.cpp
index f893b53eee9..114dc00d80c 100644
--- a/src/mongo/db/dbmessage.cpp
+++ b/src/mongo/db/dbmessage.cpp
@@ -153,7 +153,7 @@ Message makeMessage(NetworkOp op, Func&& bodyBuilder) {
out.header().setLen(size);
return out;
}
-}
+} // namespace
Message makeInsertMessage(StringData ns, const BSONObj* objs, size_t count, int flags) {
return makeMessage(dbInsert, [&](BufBuilder& b) {
@@ -238,4 +238,4 @@ DbResponse replyToQuery(int queryResultFlags,
reply.bufBuilderForResults().appendBuf(data, size);
return DbResponse{reply.toQueryReply(queryResultFlags, nReturned, startingFrom, cursorId)};
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/dbmessage.h b/src/mongo/db/dbmessage.h
index 69fdfd7375a..78815dbdd42 100644
--- a/src/mongo/db/dbmessage.h
+++ b/src/mongo/db/dbmessage.h
@@ -96,7 +96,7 @@ class OperationContext;
namespace QueryResult {
#pragma pack(1)
/* see http://dochub.mongodb.org/core/mongowireprotocol
-*/
+ */
struct Layout {
MsgData::Layout msgdata;
int64_t cursorId;
@@ -298,7 +298,7 @@ enum QueryOptions {
QueryOption_CursorTailable = 1 << 1,
/** allow query of replica slave. normally these return an error except for namespace "local".
- */
+ */
QueryOption_SlaveOk = 1 << 2,
// findingStart mode is used to find the first operation of interest when
@@ -319,7 +319,7 @@ enum QueryOptions {
/** Use with QueryOption_CursorTailable. If we are at the end of the data, block for a while
* rather than returning no data. After a timeout period, we do return as normal.
- */
+ */
QueryOption_AwaitData = 1 << 5,
/** Stream the data down full blast in multiple "more" packages, on the assumption that the
diff --git a/src/mongo/db/dbmessage_test.cpp b/src/mongo/db/dbmessage_test.cpp
index b804e277407..73354253738 100644
--- a/src/mongo/db/dbmessage_test.cpp
+++ b/src/mongo/db/dbmessage_test.cpp
@@ -140,4 +140,4 @@ TEST(DBMessage1, GoodInsert2) {
}
-} // mongo namespace
+} // namespace mongo
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index cdb095ba08d..7904915ebe7 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -37,8 +37,8 @@
namespace mongo {
-using std::unique_ptr;
using std::numeric_limits;
+using std::unique_ptr;
using std::vector;
using stdx::make_unique;
diff --git a/src/mongo/db/exec/change_stream_proxy.cpp b/src/mongo/db/exec/change_stream_proxy.cpp
index 7750beeaf86..0de28de0b55 100644
--- a/src/mongo/db/exec/change_stream_proxy.cpp
+++ b/src/mongo/db/exec/change_stream_proxy.cpp
@@ -94,8 +94,7 @@ BSONObj ChangeStreamProxyStage::_validateAndConvertToBSON(const Document& event)
"event makes it impossible to resume the stream from that point. Only "
"transformations that retain the unmodified _id field are allowed. "
"Expected: "
- << BSON("_id" << resumeToken)
- << " but found: "
+ << BSON("_id" << resumeToken) << " but found: "
<< (eventBSON["_id"] ? BSON("_id" << eventBSON["_id"]) : BSONObj()),
idField.binaryEqual(resumeToken));
return eventBSON;
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index fb77bc01965..0b3f9b1c23c 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -118,8 +118,7 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
Status status(ErrorCodes::CappedPositionLost,
str::stream() << "CollectionScan died due to failure to restore "
<< "tailable cursor position. "
- << "Last seen record id: "
- << _lastSeenId);
+ << "Last seen record id: " << _lastSeenId);
*out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
return PlanStage::FAILURE;
}
@@ -222,8 +221,7 @@ void CollectionScan::doRestoreStateRequiresCollection() {
uassert(ErrorCodes::CappedPositionLost,
str::stream()
<< "CollectionScan died due to position in capped collection being deleted. "
- << "Last seen record id: "
- << _lastSeenId,
+ << "Last seen record id: " << _lastSeenId,
couldRestore);
}
}
diff --git a/src/mongo/db/exec/count_scan.cpp b/src/mongo/db/exec/count_scan.cpp
index ce19790ef19..66fabdc964c 100644
--- a/src/mongo/db/exec/count_scan.cpp
+++ b/src/mongo/db/exec/count_scan.cpp
@@ -61,7 +61,7 @@ BSONObj replaceBSONFieldNames(const BSONObj& replace, const BSONObj& fieldNames)
return bob.obj();
}
-}
+} // namespace
using std::unique_ptr;
using std::vector;
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index 72ffe359813..32c9294cdce 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -96,7 +96,7 @@ struct StoredGeometry {
BSONElement element;
GeometryContainer geometry;
};
-}
+} // namespace
/**
* Find and parse all geometry elements on the appropriate field path from the document.
@@ -556,7 +556,7 @@ private:
// Owns matcher
const unique_ptr<MatchExpression> _matcher;
};
-}
+} // namespace
static double min2DBoundsIncrement(const GeoNearExpression& query,
const IndexDescriptor* twoDIndex) {
@@ -591,9 +591,9 @@ static R2Annulus projectBoundsToTwoDDegrees(R2Annulus sphereBounds) {
}
StatusWith<NearStage::CoveredInterval*> //
- GeoNear2DStage::nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) {
+GeoNear2DStage::nextInterval(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const Collection* collection) {
// The search is finished if we searched at least once and all the way to the edge
if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
return StatusWith<CoveredInterval*>(NULL);
@@ -830,7 +830,7 @@ S2Region* buildS2Region(const R2Annulus& sphereBounds) {
// Takes ownership of caps
return new S2RegionIntersection(&regions);
}
-}
+} // namespace
// Estimate the density of data by search the nearest cells level by level around center.
class GeoNear2DSphereStage::DensityEstimator {
@@ -1011,9 +1011,9 @@ PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* opCtx,
}
StatusWith<NearStage::CoveredInterval*> //
- GeoNear2DSphereStage::nextInterval(OperationContext* opCtx,
- WorkingSet* workingSet,
- const Collection* collection) {
+GeoNear2DSphereStage::nextInterval(OperationContext* opCtx,
+ WorkingSet* workingSet,
+ const Collection* collection) {
// The search is finished if we searched at least once and all the way to the edge
if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
return StatusWith<CoveredInterval*>(NULL);
diff --git a/src/mongo/db/exec/queued_data_stage_test.cpp b/src/mongo/db/exec/queued_data_stage_test.cpp
index 257423679a7..d441954f8f6 100644
--- a/src/mongo/db/exec/queued_data_stage_test.cpp
+++ b/src/mongo/db/exec/queued_data_stage_test.cpp
@@ -124,4 +124,4 @@ TEST_F(QueuedDataStageTest, validateStats) {
unique_ptr<PlanStageStats> allStats(mock->getStats());
ASSERT_TRUE(stats->isEOF);
}
-}
+} // namespace
diff --git a/src/mongo/db/exec/record_store_fast_count.h b/src/mongo/db/exec/record_store_fast_count.h
index ab601569cd4..973165969be 100644
--- a/src/mongo/db/exec/record_store_fast_count.h
+++ b/src/mongo/db/exec/record_store_fast_count.h
@@ -75,4 +75,4 @@ private:
CountStats _specificStats;
};
-} // namepace mongo
+} // namespace mongo
diff --git a/src/mongo/db/exec/requires_collection_stage.cpp b/src/mongo/db/exec/requires_collection_stage.cpp
index 3d77b61870a..060722dbe14 100644
--- a/src/mongo/db/exec/requires_collection_stage.cpp
+++ b/src/mongo/db/exec/requires_collection_stage.cpp
@@ -61,8 +61,7 @@ void RequiresCollectionStageBase<CollectionT>::doRestoreState() {
// a rename has happened during yield.
uassert(ErrorCodes::QueryPlanKilled,
str::stream() << "collection renamed from '" << _nss << "' to '" << *newNss
- << "'. UUID "
- << _collectionUUID,
+ << "'. UUID " << _collectionUUID,
*newNss == _nss);
// At this point we know that the collection name has not changed, and therefore we have
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index c313f3b592d..687abb12964 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -65,8 +65,8 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
using stdx::make_unique;
@@ -283,11 +283,9 @@ public:
str::stream() << "Can't find index: " << keyPatternObj,
!indexes.empty());
uassert(ErrorCodes::AmbiguousIndexKeyPattern,
- str::stream() << indexes.size() << " matching indexes for key pattern: "
- << keyPatternObj
- << ". Conflicting indexes: "
- << indexes[0]->infoObj()
- << ", "
+ str::stream() << indexes.size()
+ << " matching indexes for key pattern: " << keyPatternObj
+ << ". Conflicting indexes: " << indexes[0]->infoObj() << ", "
<< indexes[1]->infoObj(),
indexes.size() == 1);
desc = indexes[0];
diff --git a/src/mongo/db/exec/text_or.cpp b/src/mongo/db/exec/text_or.cpp
index 52fc60a53a1..eef2993d7e2 100644
--- a/src/mongo/db/exec/text_or.cpp
+++ b/src/mongo/db/exec/text_or.cpp
@@ -45,9 +45,9 @@
namespace mongo {
+using std::string;
using std::unique_ptr;
using std::vector;
-using std::string;
using stdx::make_unique;
using fts::FTSSpec;
diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp
index d77676122a8..a7b307bf49c 100644
--- a/src/mongo/db/exec/update_stage.cpp
+++ b/src/mongo/db/exec/update_stage.cpp
@@ -116,8 +116,7 @@ void assertRequiredPathsPresent(const mb::Document& document, const FieldRefSet&
uassert(ErrorCodes::NoSuchKey,
str::stream() << "After applying the update, the new document was missing the "
"required field '"
- << (*path).dottedField()
- << "'",
+ << (*path).dottedField() << "'",
elem.ok());
uassert(
ErrorCodes::NotSingleValueField,
diff --git a/src/mongo/db/exec/write_stage_common.h b/src/mongo/db/exec/write_stage_common.h
index 2f59e755c7a..1d3934443e6 100644
--- a/src/mongo/db/exec/write_stage_common.h
+++ b/src/mongo/db/exec/write_stage_common.h
@@ -54,5 +54,5 @@ bool ensureStillMatches(const Collection* collection,
WorkingSet* ws,
WorkingSetID id,
const CanonicalQuery* cq);
-}
-}
+} // namespace write_stage_common
+} // namespace mongo
diff --git a/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp b/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp
index 7adf5c74dcd..ad98dcfdc35 100644
--- a/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp
+++ b/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp
@@ -51,9 +51,7 @@ std::unique_ptr<DBClientBase> connect(StringData appName) {
void setWaitWithPinnedCursorDuringGetMoreBatchFailpoint(DBClientBase* conn, bool enable) {
auto cmdObj = BSON("configureFailPoint"
<< "waitWithPinnedCursorDuringGetMoreBatch"
- << "mode"
- << (enable ? "alwaysOn" : "off")
- << "data"
+ << "mode" << (enable ? "alwaysOn" : "off") << "data"
<< BSON("shouldNotdropLock" << true));
auto reply = conn->runCommand(OpMsgRequest::fromDBAndBody("admin", cmdObj));
ASSERT_OK(getStatusFromCommandResult(reply->getCommandReply()));
@@ -63,8 +61,7 @@ void setWaitBeforeUnpinningOrDeletingCursorAfterGetMoreBatchFailpoint(DBClientBa
bool enable) {
auto cmdObj = BSON("configureFailPoint"
<< "waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch"
- << "mode"
- << (enable ? "alwaysOn" : "off"));
+ << "mode" << (enable ? "alwaysOn" : "off"));
auto reply = conn->runCommand(OpMsgRequest::fromDBAndBody("admin", cmdObj));
ASSERT_OK(getStatusFromCommandResult(reply->getCommandReply()));
}
@@ -158,12 +155,9 @@ TEST(CurrentOpExhaustCursorTest, CanSeeEachExhaustCursorPseudoGetMoreInCurrentOp
// Generate a currentOp filter based on the cursorId and the cumulative nDocsReturned.
const auto curOpMatch = BSON("command.collection"
<< "exhaust_cursor_currentop"
- << "command.getMore"
- << queryCursor->getCursorId()
- << "msg"
+ << "command.getMore" << queryCursor->getCursorId() << "msg"
<< "waitWithPinnedCursorDuringGetMoreBatch"
- << "cursor.nDocsReturned"
- << i);
+ << "cursor.nDocsReturned" << i);
// Confirm that the exhaust getMore appears in the $currentOp output.
ASSERT(confirmCurrentOpContents(conn.get(), curOpMatch, parallelWaitTimeoutMS));
diff --git a/src/mongo/db/field_parser_test.cpp b/src/mongo/db/field_parser_test.cpp
index 9627b914c6b..494aad69d63 100644
--- a/src/mongo/db/field_parser_test.cpp
+++ b/src/mongo/db/field_parser_test.cpp
@@ -79,9 +79,7 @@ protected:
valLong = 1LL;
doc = BSON(aBool(valBool) << anArray(valArray) << anObj(valObj) << aDate(valDate)
- << aString(valString)
- << anOID(valOID)
- << aLong(valLong));
+ << aString(valString) << anOID(valOID) << aLong(valLong));
}
void tearDown() {}
@@ -213,9 +211,10 @@ TEST(ComplexExtraction, GetStringVector) {
BSONField<vector<string>> vectorField("testVector");
BSONObjBuilder bob;
- bob << vectorField() << BSON_ARRAY("a"
- << "b"
- << "c");
+ bob << vectorField()
+ << BSON_ARRAY("a"
+ << "b"
+ << "c");
BSONObj obj = bob.obj();
vector<string> parsedVector;
@@ -266,9 +265,10 @@ TEST(ComplexExtraction, RoundTripVector) {
BSONObj obj;
{
BSONObjBuilder bob;
- bob << vectorField() << BSON_ARRAY("a"
- << "b"
- << "c");
+ bob << vectorField()
+ << BSON_ARRAY("a"
+ << "b"
+ << "c");
obj = bob.obj();
}
@@ -295,12 +295,13 @@ TEST(ComplexExtraction, GetStringMap) {
BSONField<map<string, string>> mapField("testMap");
BSONObjBuilder bob;
- bob << mapField() << BSON("a"
- << "a"
- << "b"
- << "b"
- << "c"
- << "c");
+ bob << mapField()
+ << BSON("a"
+ << "a"
+ << "b"
+ << "b"
+ << "c"
+ << "c");
BSONObj obj = bob.obj();
map<string, string> parsedMap;
@@ -317,14 +318,15 @@ TEST(ComplexExtraction, GetObjectMap) {
BSONField<map<string, BSONObj>> mapField("testMap");
BSONObjBuilder bob;
- bob << mapField() << BSON("a" << BSON("a"
- << "a")
- << "b"
- << BSON("b"
- << "b")
- << "c"
- << BSON("c"
- << "c"));
+ bob << mapField()
+ << BSON("a" << BSON("a"
+ << "a")
+ << "b"
+ << BSON("b"
+ << "b")
+ << "c"
+ << BSON("c"
+ << "c"));
BSONObj obj = bob.obj();
map<string, BSONObj> parsedMap;
@@ -347,12 +349,11 @@ TEST(ComplexExtraction, GetBadMap) {
BSONField<map<string, string>> mapField("testMap");
BSONObjBuilder bob;
- bob << mapField() << BSON("a"
- << "a"
- << "b"
- << 123
- << "c"
- << "c");
+ bob << mapField()
+ << BSON("a"
+ << "a"
+ << "b" << 123 << "c"
+ << "c");
BSONObj obj = bob.obj();
map<string, string> parsedMap;
@@ -369,12 +370,13 @@ TEST(ComplexExtraction, RoundTripMap) {
BSONObj obj;
{
BSONObjBuilder bob;
- bob << mapField() << BSON("a"
- << "a"
- << "b"
- << "b"
- << "c"
- << "c");
+ bob << mapField()
+ << BSON("a"
+ << "a"
+ << "b"
+ << "b"
+ << "c"
+ << "c");
obj = bob.obj();
}
@@ -430,9 +432,7 @@ TEST(ComplexExtraction, GetBadNestedMap) {
BSONObj nestedMapObj = BSON("a"
<< "a"
- << "b"
- << 123
- << "c"
+ << "b" << 123 << "c"
<< "c");
BSONObjBuilder bob;
diff --git a/src/mongo/db/field_ref_set.cpp b/src/mongo/db/field_ref_set.cpp
index cbfcee236d3..c55a722b64b 100644
--- a/src/mongo/db/field_ref_set.cpp
+++ b/src/mongo/db/field_ref_set.cpp
@@ -36,8 +36,8 @@
namespace mongo {
-using std::vector;
using std::string;
+using std::vector;
namespace {
@@ -52,7 +52,7 @@ StringData safeFirstPart(const FieldRef* fieldRef) {
return fieldRef->getPart(0);
}
}
-}
+} // namespace
bool FieldRefSet::FieldRefPtrLessThan::operator()(const FieldRef* l, const FieldRef* r) const {
return *l < *r;
diff --git a/src/mongo/db/free_mon/free_mon_controller.h b/src/mongo/db/free_mon/free_mon_controller.h
index 92e1edab444..9307ab7570c 100644
--- a/src/mongo/db/free_mon/free_mon_controller.h
+++ b/src/mongo/db/free_mon/free_mon_controller.h
@@ -157,33 +157,33 @@ private:
private:
/**
- * Private enum to track state.
- *
- * +-----------------------------------------------------------+
- * | v
- * +-------------+ +----------+ +----------------+ +-------+
- * | kNotStarted | --> | kStarted | --> | kStopRequested | --> | kDone |
- * +-------------+ +----------+ +----------------+ +-------+
- */
+ * Private enum to track state.
+ *
+ * +-----------------------------------------------------------+
+ * | v
+ * +-------------+ +----------+ +----------------+ +-------+
+ * | kNotStarted | --> | kStarted | --> | kStopRequested | --> | kDone |
+ * +-------------+ +----------+ +----------------+ +-------+
+ */
enum class State {
/**
- * Initial state. Either start() or stop() can be called next.
- */
+ * Initial state. Either start() or stop() can be called next.
+ */
kNotStarted,
/**
- * start() has been called. stop() should be called next.
- */
+ * start() has been called. stop() should be called next.
+ */
kStarted,
/**
- * stop() has been called, and the background thread is in progress of shutting down
- */
+ * stop() has been called, and the background thread is in progress of shutting down
+ */
kStopRequested,
/**
- * Controller has been stopped.
- */
+ * Controller has been stopped.
+ */
kDone,
};
diff --git a/src/mongo/db/free_mon/free_mon_controller_test.cpp b/src/mongo/db/free_mon/free_mon_controller_test.cpp
index 6892cd034b4..99b2e4c36de 100644
--- a/src/mongo/db/free_mon/free_mon_controller_test.cpp
+++ b/src/mongo/db/free_mon/free_mon_controller_test.cpp
@@ -43,7 +43,6 @@
#include "mongo/base/deinitializer_context.h"
#include "mongo/bson/bson_validate.h"
#include "mongo/bson/bsonmisc.h"
-#include "mongo/bson/bsonmisc.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/client.h"
#include "mongo/db/free_mon/free_mon_op_observer.h"
@@ -119,8 +118,8 @@ public:
private:
/**
- * Private enum to ensure caller uses class correctly.
- */
+ * Private enum to ensure caller uses class correctly.
+ */
enum class State {
kNotStarted,
kStarted,
@@ -248,10 +247,9 @@ public:
if (_options.doSync) {
pf.promise.setFrom(doRegister(req));
} else {
- auto swSchedule =
- _threadPool->scheduleWork([ sharedPromise = std::move(pf.promise), req, this ](
+ auto swSchedule = _threadPool->scheduleWork(
+ [sharedPromise = std::move(pf.promise), req, this](
const executor::TaskExecutor::CallbackArgs& cbArgs) mutable {
-
sharedPromise.setWith([&] { return doRegister(req); });
});
@@ -295,10 +293,9 @@ public:
if (_options.doSync) {
pf.promise.setFrom(doMetrics(req));
} else {
- auto swSchedule =
- _threadPool->scheduleWork([ sharedPromise = std::move(pf.promise), req, this ](
+ auto swSchedule = _threadPool->scheduleWork(
+ [sharedPromise = std::move(pf.promise), req, this](
const executor::TaskExecutor::CallbackArgs& cbArgs) mutable {
-
sharedPromise.setWith([&] { return doMetrics(req); });
});
@@ -543,8 +540,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// max reporting interval
ASSERT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -555,8 +551,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 30 * 60 * 60 * 24LL))));
+ << "reportingInterval" << 30 * 60 * 60 * 24LL))));
// Positive: version 2
ASSERT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -567,8 +562,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Positive: empty registration id string
ASSERT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -579,8 +573,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Negative: bad protocol version
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -591,8 +584,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Negative: halt uploading
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -603,8 +595,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Negative: large registartation id
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -614,20 +605,16 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Negative: large URL
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
IDLParserErrorContext("foo"),
BSON("version" << 1LL << "haltMetricsUploading" << false << "id"
<< "mock123"
- << "informationalURL"
- << std::string(5000, 'b')
- << "message"
+ << "informationalURL" << std::string(5000, 'b') << "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
// Negative: large message
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -636,10 +623,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "mock123"
<< "informationalURL"
<< "http://www.example.com/123"
- << "message"
- << std::string(5000, 'c')
- << "reportingInterval"
- << 1LL))));
+ << "message" << std::string(5000, 'c') << "reportingInterval" << 1LL))));
// Negative: too small a reporting interval
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -650,8 +634,7 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 0LL))));
+ << "reportingInterval" << 0LL))));
// Negative: too large a reporting interval
ASSERT_NOT_OK(FreeMonProcessor::validateRegistrationResponse(FreeMonRegistrationResponse::parse(
@@ -662,39 +645,36 @@ TEST(FreeMonProcessorTest, TestRegistrationResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << (60LL * 60 * 24 * 30 + 1LL)))));
+ << "reportingInterval" << (60LL * 60 * 24 * 30 + 1LL)))));
}
// Positive: Ensure the response is validated correctly
TEST(FreeMonProcessorTest, TestMetricsResponseValidation) {
- ASSERT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
- IDLParserErrorContext("foo"),
+ ASSERT_OK(FreeMonProcessor::validateMetricsResponse(
+ FreeMonMetricsResponse::parse(IDLParserErrorContext("foo"),
- BSON("version" << 1LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
- << "id"
- << "mock123"
- << "informationalURL"
- << "http://www.example.com/123"
- << "message"
- << "msg456"
- << "reportingInterval"
- << 1LL))));
+ BSON("version" << 1LL << "haltMetricsUploading" << false
+ << "permanentlyDelete" << false << "id"
+ << "mock123"
+ << "informationalURL"
+ << "http://www.example.com/123"
+ << "message"
+ << "msg456"
+ << "reportingInterval" << 1LL))));
// Positive: Support version 2
- ASSERT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
- IDLParserErrorContext("foo"),
+ ASSERT_OK(FreeMonProcessor::validateMetricsResponse(
+ FreeMonMetricsResponse::parse(IDLParserErrorContext("foo"),
- BSON("version" << 2LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
- << "id"
- << "mock123"
- << "informationalURL"
- << "http://www.example.com/123"
- << "message"
- << "msg456"
- << "reportingInterval"
- << 1LL))));
+ BSON("version" << 2LL << "haltMetricsUploading" << false
+ << "permanentlyDelete" << false << "id"
+ << "mock123"
+ << "informationalURL"
+ << "http://www.example.com/123"
+ << "message"
+ << "msg456"
+ << "reportingInterval" << 1LL))));
// Positive: Add resendRegistration
ASSERT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
@@ -707,10 +687,7 @@ TEST(FreeMonProcessorTest, TestMetricsResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL
- << "resendRegistration"
- << true))));
+ << "reportingInterval" << 1LL << "resendRegistration" << true))));
// Positive: max reporting interval
@@ -724,89 +701,74 @@ TEST(FreeMonProcessorTest, TestMetricsResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 60 * 60 * 24 * 30LL))));
+ << "reportingInterval" << 60 * 60 * 24 * 30LL))));
// Negative: bad protocol version
+ ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(
+ FreeMonMetricsResponse::parse(IDLParserErrorContext("foo"),
+ BSON("version" << 42LL << "haltMetricsUploading" << false
+ << "permanentlyDelete" << false << "id"
+ << "mock123"
+ << "informationalURL"
+ << "http://www.example.com/123"
+ << "message"
+ << "msg456"
+ << "reportingInterval" << 1LL))));
+
+ // Negative: halt uploading
+ ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(
+ FreeMonMetricsResponse::parse(IDLParserErrorContext("foo"),
+ BSON("version" << 1LL << "haltMetricsUploading" << true
+ << "permanentlyDelete" << false << "id"
+ << "mock123"
+ << "informationalURL"
+ << "http://www.example.com/123"
+ << "message"
+ << "msg456"
+ << "reportingInterval" << 1LL))));
+
+ // Negative: large registartation id
ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
IDLParserErrorContext("foo"),
- BSON("version" << 42LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
- << "id"
- << "mock123"
- << "informationalURL"
+ BSON("version" << 1LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
+ << "id" << std::string(5000, 'a') << "informationalURL"
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
- // Negative: halt uploading
+ // Negative: large URL
ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
IDLParserErrorContext("foo"),
- BSON("version" << 1LL << "haltMetricsUploading" << true << "permanentlyDelete" << false
- << "id"
+ BSON("version" << 1LL << "haltMetricsUploading" << false
+
+ << "permanentlyDelete" << false << "id"
<< "mock123"
- << "informationalURL"
- << "http://www.example.com/123"
- << "message"
+ << "informationalURL" << std::string(5000, 'b') << "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "reportingInterval" << 1LL))));
- // Negative: large registartation id
+ // Negative: large message
ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
IDLParserErrorContext("foo"),
BSON("version" << 1LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
<< "id"
- << std::string(5000, 'a')
+ << "mock123"
<< "informationalURL"
<< "http://www.example.com/123"
- << "message"
- << "msg456"
- << "reportingInterval"
- << 1LL))));
+ << "message" << std::string(5000, 'c') << "reportingInterval" << 1LL))));
- // Negative: large URL
+ // Negative: too small a reporting interval
ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(
FreeMonMetricsResponse::parse(IDLParserErrorContext("foo"),
BSON("version" << 1LL << "haltMetricsUploading" << false
-
- << "permanentlyDelete"
- << false
- << "id"
+ << "permanentlyDelete" << false << "id"
<< "mock123"
<< "informationalURL"
- << std::string(5000, 'b')
+ << "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << 1LL))));
-
- // Negative: large message
- ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
- IDLParserErrorContext("foo"),
- BSON("version" << 1LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
- << "id"
- << "mock123"
- << "informationalURL"
- << "http://www.example.com/123"
- << "message"
- << std::string(5000, 'c')
- << "reportingInterval"
- << 1LL))));
-
- // Negative: too small a reporting interval
- ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
- IDLParserErrorContext("foo"),
- BSON("version" << 1LL << "haltMetricsUploading" << false << "permanentlyDelete" << false
- << "id"
- << "mock123"
- << "informationalURL"
- << "http://www.example.com/123"
- << "message"
- << "msg456"
- << "reportingInterval"
- << 0LL))));
+ << "reportingInterval" << 0LL))));
// Negative: too large a reporting interval
ASSERT_NOT_OK(FreeMonProcessor::validateMetricsResponse(FreeMonMetricsResponse::parse(
@@ -818,8 +780,7 @@ TEST(FreeMonProcessorTest, TestMetricsResponseValidation) {
<< "http://www.example.com/123"
<< "message"
<< "msg456"
- << "reportingInterval"
- << (60LL * 60 * 24 * 30 + 1LL)))));
+ << "reportingInterval" << (60LL * 60 * 24 * 30 + 1LL)))));
}
/**
diff --git a/src/mongo/db/free_mon/free_mon_message.h b/src/mongo/db/free_mon/free_mon_message.h
index 55b3091c34a..71a34dd84b4 100644
--- a/src/mongo/db/free_mon/free_mon_message.h
+++ b/src/mongo/db/free_mon/free_mon_message.h
@@ -67,8 +67,8 @@ enum class FreeMonMessageType {
AsyncRegisterFail,
/**
- * Unregister server from server command.
- */
+ * Unregister server from server command.
+ */
UnregisterCommand,
/**
@@ -117,24 +117,24 @@ enum class FreeMonMessageType {
*/
enum class RegistrationType {
/**
- * Do not register on start because it was not configured via commandline/config file.
- */
+ * Do not register on start because it was not configured via commandline/config file.
+ */
DoNotRegister,
/**
- * Register immediately on start since we are a standalone.
- */
+ * Register immediately on start since we are a standalone.
+ */
RegisterOnStart,
/**
- * Register after transition to becoming primary because we are in a replica set,
- * and Free Monitoring has been explicitly enabled.
- */
+ * Register after transition to becoming primary because we are in a replica set,
+ * and Free Monitoring has been explicitly enabled.
+ */
RegisterAfterOnTransitionToPrimary,
/**
- * As above, but only if we have been runtime enabled.
- */
+ * As above, but only if we have been runtime enabled.
+ */
RegisterAfterOnTransitionToPrimaryIfEnabled,
};
@@ -334,7 +334,7 @@ private:
/**
* For the messages that the caller needs to wait on, this provides a mechanism to wait on messages
* to be processed.
-*/
+ */
template <FreeMonMessageType typeT>
struct FreeMonWaitablePayloadForMessage {
using payload_type = void;
diff --git a/src/mongo/db/free_mon/free_mon_mongod.cpp b/src/mongo/db/free_mon/free_mon_mongod.cpp
index b6c150f5e64..a65dd1ecdcc 100644
--- a/src/mongo/db/free_mon/free_mon_mongod.cpp
+++ b/src/mongo/db/free_mon/free_mon_mongod.cpp
@@ -105,7 +105,6 @@ public:
reqObj.objdata(), reqObj.objdata() + reqObj.objsize());
return post("/register", data).then([](DataBuilder&& blob) {
-
if (!blob.size()) {
uasserted(ErrorCodes::FreeMonHttpTemporaryFailure, "Empty response received");
}
@@ -128,7 +127,6 @@ public:
reqObj.objdata(), reqObj.objdata() + reqObj.objsize());
return post("/metrics", data).then([](DataBuilder&& blob) {
-
if (!blob.size()) {
uasserted(ErrorCodes::FreeMonHttpTemporaryFailure, "Empty response received");
}
@@ -152,7 +150,7 @@ private:
std::string url(FreeMonEndpointURL + path.toString());
auto status = _executor->scheduleWork(
- [ promise = std::move(pf.promise), url = std::move(url), data = std::move(data), this ](
+ [promise = std::move(pf.promise), url = std::move(url), data = std::move(data), this](
const executor::TaskExecutor::CallbackArgs& cbArgs) mutable {
ConstDataRange cdr(data->data(), data->size());
try {
@@ -202,28 +200,11 @@ public:
// Try to filter server status to make it cheaper to collect. Harmless if we gather
// extra
BSON("serverStatus" << 1 << "storageEngine" << true << "extra_info" << false
- << "opLatencies"
- << false
- << "opcountersRepl"
- << false
- << "opcounters"
- << false
- << "transactions"
- << false
- << "connections"
- << false
- << "network"
- << false
- << "tcMalloc"
- << false
- << "network"
- << false
- << "wiredTiger"
- << false
- << "sharding"
- << false
- << "metrics"
- << false)) {}
+ << "opLatencies" << false << "opcountersRepl" << false
+ << "opcounters" << false << "transactions" << false
+ << "connections" << false << "network" << false << "tcMalloc"
+ << false << "network" << false << "wiredTiger" << false
+ << "sharding" << false << "metrics" << false)) {}
std::string name() const final {
return "storageEngine";
diff --git a/src/mongo/db/free_mon/free_mon_op_observer.cpp b/src/mongo/db/free_mon/free_mon_op_observer.cpp
index 09bfb3ff62c..29e380c8baa 100644
--- a/src/mongo/db/free_mon/free_mon_op_observer.cpp
+++ b/src/mongo/db/free_mon/free_mon_op_observer.cpp
@@ -42,8 +42,9 @@ bool isStandaloneOrPrimary(OperationContext* opCtx) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
const bool isReplSet =
replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet;
- return !isReplSet || (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
- repl::MemberState::RS_PRIMARY);
+ return !isReplSet ||
+ (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
+ repl::MemberState::RS_PRIMARY);
}
const auto getFreeMonDeleteState = OperationContext::declareDecoration<bool>();
diff --git a/src/mongo/db/free_mon/free_mon_options.h b/src/mongo/db/free_mon/free_mon_options.h
index 60203dc2b94..19f707e8b65 100644
--- a/src/mongo/db/free_mon/free_mon_options.h
+++ b/src/mongo/db/free_mon/free_mon_options.h
@@ -35,8 +35,8 @@
namespace mongo {
/**
-* Free Moniting Command line choices
-*/
+ * Free Moniting Command line choices
+ */
enum class EnableCloudStateEnum : std::int32_t {
kOn,
kOff,
diff --git a/src/mongo/db/free_mon/free_mon_processor.cpp b/src/mongo/db/free_mon/free_mon_processor.cpp
index 7013d72e244..8cb57bda42f 100644
--- a/src/mongo/db/free_mon/free_mon_processor.cpp
+++ b/src/mongo/db/free_mon/free_mon_processor.cpp
@@ -465,36 +465,29 @@ Status FreeMonProcessor::validateRegistrationResponse(const FreeMonRegistrationR
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream()
<< "Unexpected registration response protocol version, expected ("
- << kMinProtocolVersion
- << ", "
- << kMaxProtocolVersion
- << "), received '"
- << resp.getVersion()
- << "'");
+ << kMinProtocolVersion << ", " << kMaxProtocolVersion << "), received '"
+ << resp.getVersion() << "'");
}
if (resp.getId().size() >= kRegistrationIdMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Id is '" << resp.getId().size()
<< "' bytes in length, maximum allowed length is '"
- << kRegistrationIdMaxLength
- << "'");
+ << kRegistrationIdMaxLength << "'");
}
if (resp.getInformationalURL().size() >= kInformationalURLMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "InformationURL is '" << resp.getInformationalURL().size()
<< "' bytes in length, maximum allowed length is '"
- << kInformationalURLMaxLength
- << "'");
+ << kInformationalURLMaxLength << "'");
}
if (resp.getMessage().size() >= kInformationalMessageMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Message is '" << resp.getMessage().size()
<< "' bytes in length, maximum allowed length is '"
- << kInformationalMessageMaxLength
- << "'");
+ << kInformationalMessageMaxLength << "'");
}
if (resp.getUserReminder().is_initialized() &&
@@ -502,19 +495,15 @@ Status FreeMonProcessor::validateRegistrationResponse(const FreeMonRegistrationR
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "UserReminder is '" << resp.getUserReminder().get().size()
<< "' bytes in length, maximum allowed length is '"
- << kUserReminderMaxLength
- << "'");
+ << kUserReminderMaxLength << "'");
}
if (resp.getReportingInterval() < kReportingIntervalSecondsMin ||
resp.getReportingInterval() > kReportingIntervalSecondsMax) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Reporting Interval '" << resp.getReportingInterval()
- << "' must be in the range ["
- << kReportingIntervalSecondsMin
- << ","
- << kReportingIntervalSecondsMax
- << "]");
+ << "' must be in the range [" << kReportingIntervalSecondsMin
+ << "," << kReportingIntervalSecondsMax << "]");
}
// Did cloud ask us to stop uploading?
@@ -540,30 +529,24 @@ Status FreeMonProcessor::validateMetricsResponse(const FreeMonMetricsResponse& r
if (!(resp.getVersion() >= kMinProtocolVersion && resp.getVersion() <= kMaxProtocolVersion)) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Unexpected metrics response protocol version, expected ("
- << kMinProtocolVersion
- << ", "
- << kMaxProtocolVersion
- << "), received '"
- << resp.getVersion()
- << "'");
+ << kMinProtocolVersion << ", " << kMaxProtocolVersion
+ << "), received '" << resp.getVersion() << "'");
}
if (resp.getId().is_initialized() && resp.getId().get().size() >= kRegistrationIdMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Id is '" << resp.getId().get().size()
<< "' bytes in length, maximum allowed length is '"
- << kRegistrationIdMaxLength
- << "'");
+ << kRegistrationIdMaxLength << "'");
}
if (resp.getInformationalURL().is_initialized() &&
resp.getInformationalURL().get().size() >= kInformationalURLMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
- str::stream() << "InformationURL is '"
- << resp.getInformationalURL().get().size()
- << "' bytes in length, maximum allowed length is '"
- << kInformationalURLMaxLength
- << "'");
+ str::stream()
+ << "InformationURL is '" << resp.getInformationalURL().get().size()
+ << "' bytes in length, maximum allowed length is '"
+ << kInformationalURLMaxLength << "'");
}
if (resp.getMessage().is_initialized() &&
@@ -571,8 +554,7 @@ Status FreeMonProcessor::validateMetricsResponse(const FreeMonMetricsResponse& r
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Message is '" << resp.getMessage().get().size()
<< "' bytes in length, maximum allowed length is '"
- << kInformationalMessageMaxLength
- << "'");
+ << kInformationalMessageMaxLength << "'");
}
if (resp.getUserReminder().is_initialized() &&
@@ -580,19 +562,15 @@ Status FreeMonProcessor::validateMetricsResponse(const FreeMonMetricsResponse& r
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "UserReminder is '" << resp.getUserReminder().get().size()
<< "' bytes in length, maximum allowed length is '"
- << kUserReminderMaxLength
- << "'");
+ << kUserReminderMaxLength << "'");
}
if (resp.getReportingInterval() < kReportingIntervalSecondsMin ||
resp.getReportingInterval() > kReportingIntervalSecondsMax) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream() << "Reporting Interval '" << resp.getReportingInterval()
- << "' must be in the range ["
- << kReportingIntervalSecondsMin
- << ","
- << kReportingIntervalSecondsMax
- << "]");
+ << "' must be in the range [" << kReportingIntervalSecondsMin
+ << "," << kReportingIntervalSecondsMax << "]");
}
// Did cloud ask us to stop uploading?
diff --git a/src/mongo/db/free_mon/free_mon_queue_test.cpp b/src/mongo/db/free_mon/free_mon_queue_test.cpp
index ea38c7bad5c..ad6104c5126 100644
--- a/src/mongo/db/free_mon/free_mon_queue_test.cpp
+++ b/src/mongo/db/free_mon/free_mon_queue_test.cpp
@@ -146,13 +146,11 @@ TEST_F(FreeMonQueueTest, TestQueueStop) {
auto swSchedule =
_mockThreadPool->scheduleWork([&](const executor::TaskExecutor::CallbackArgs& cbArgs) {
-
barrier.countDownAndWait();
// Try to dequeue from a stopped task queue
auto item = queue.dequeue(_opCtx.get()->getServiceContext()->getPreciseClockSource());
ASSERT_FALSE(item.is_initialized());
-
});
ASSERT_OK(swSchedule.getStatus());
diff --git a/src/mongo/db/ftdc/compressor_test.cpp b/src/mongo/db/ftdc/compressor_test.cpp
index 143a6c4b391..509504037b7 100644
--- a/src/mongo/db/ftdc/compressor_test.cpp
+++ b/src/mongo/db/ftdc/compressor_test.cpp
@@ -70,18 +70,12 @@ TEST_F(FTDCCompressorTest, TestBasic) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42),
+ << "key1" << 33 << "key2" << 42),
Date_t());
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t());
ASSERT_HAS_SPACE(st);
@@ -190,112 +184,64 @@ TEST_F(FTDCCompressorTest, TestSchemaChanges) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42));
+ << "key1" << 33 << "key2" << 42));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
ASSERT_HAS_SPACE(st);
// Add Field
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key2" << 45 << "key3" << 47));
ASSERT_SCHEMA_CHANGED(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key2" << 45 << "key3" << 47));
ASSERT_HAS_SPACE(st);
// Rename field
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key5"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key5" << 45 << "key3" << 47));
ASSERT_SCHEMA_CHANGED(st);
// Change type
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key5"
+ << "key1" << 34 << "key5"
<< "45"
- << "key3"
- << 47));
+ << "key3" << 47));
ASSERT_SCHEMA_CHANGED(st);
// Add Field
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45
- << "key3"
- << 47
- << "key7"
- << 34
- << "key9"
- << 45
- << "key13"
- << 47));
+ << "key1" << 34 << "key2" << 45 << "key3" << 47 << "key7" << 34 << "key9"
+ << 45 << "key13" << 47));
ASSERT_SCHEMA_CHANGED(st);
// Remove Field
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << 34
- << "key9"
- << 45
- << "key13"
- << 47));
+ << "key7" << 34 << "key9" << 45 << "key13" << 47));
ASSERT_SCHEMA_CHANGED(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << 34
- << "key9"
- << 45
- << "key13"
- << 47));
+ << "key7" << 34 << "key9" << 45 << "key13" << 47));
ASSERT_HAS_SPACE(st);
// Start new batch
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << 5));
+ << "key7" << 5));
ASSERT_SCHEMA_CHANGED(st);
// Change field to object
@@ -309,22 +255,19 @@ TEST_F(FTDCCompressorTest, TestSchemaChanges) {
// Change field from object to number
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << 7));
+ << "key7" << 7));
ASSERT_SCHEMA_CHANGED(st);
// Change field from number to array
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << BSON_ARRAY(13 << 17)));
+ << "key7" << BSON_ARRAY(13 << 17)));
ASSERT_SCHEMA_CHANGED(st);
// Change field from array to number
st = c.addSample(BSON("name"
<< "joe"
- << "key7"
- << 19));
+ << "key7" << 19));
ASSERT_SCHEMA_CHANGED(st);
@@ -351,13 +294,11 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
auto st = c.addSample(BSON("str1"
<< "joe"
- << "int1"
- << 42));
+ << "int1" << 42));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("str1"
<< "joe"
- << "int1"
- << 45));
+ << "int1" << 45));
ASSERT_HAS_SPACE(st);
// Add string field
@@ -365,8 +306,7 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
<< "joe"
<< "str2"
<< "smith"
- << "int1"
- << 47));
+ << "int1" << 47));
ASSERT_HAS_SPACE(st);
// Reset schema by renaming a int field
@@ -374,41 +314,34 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
<< "joe"
<< "str2"
<< "smith"
- << "int2"
- << 48));
+ << "int2" << 48));
ASSERT_SCHEMA_CHANGED(st);
// Remove string field
st = c.addSample(BSON("str1"
<< "joe"
- << "int2"
- << 49));
+ << "int2" << 49));
ASSERT_HAS_SPACE(st);
// Add string field as last element
st = c.addSample(BSON("str1"
<< "joe"
- << "int2"
- << 50
- << "str3"
+ << "int2" << 50 << "str3"
<< "bar"));
ASSERT_HAS_SPACE(st);
// Reset schema by renaming a int field
st = c.addSample(BSON("str1"
<< "joe"
- << "int1"
- << 51
- << "str3"
+ << "int1" << 51 << "str3"
<< "bar"));
ASSERT_SCHEMA_CHANGED(st);
// Remove string field as last element
st = c.addSample(BSON("str1"
<< "joe"
- << "int1"
- << 52));
+ << "int1" << 52));
ASSERT_HAS_SPACE(st);
@@ -419,8 +352,7 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
<< "smith"
<< "str3"
<< "foo"
- << "int1"
- << 53));
+ << "int1" << 53));
ASSERT_HAS_SPACE(st);
// Reset schema by renaming a int field
@@ -430,15 +362,13 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
<< "smith"
<< "str3"
<< "foo"
- << "int2"
- << 54));
+ << "int2" << 54));
ASSERT_SCHEMA_CHANGED(st);
// Remove 2 string fields
st = c.addSample(BSON("str1"
<< "joe"
- << "int2"
- << 55));
+ << "int2" << 55));
ASSERT_HAS_SPACE(st);
// Change string to number
@@ -448,8 +378,7 @@ TEST_F(FTDCCompressorTest, TestStringSchemaChanges) {
// Change number to string
st = c.addSample(BSON("str1"
<< "joe"
- << "int1"
- << 67));
+ << "int1" << 67));
ASSERT_SCHEMA_CHANGED(st);
}
@@ -459,24 +388,15 @@ TEST_F(FTDCCompressorTest, TestNumbersCompat) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42LL));
+ << "key1" << 33 << "key2" << 42LL));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34LL
- << "key2"
- << 45.0f));
+ << "key1" << 34LL << "key2" << 45.0f));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << static_cast<char>(32)
- << "key2"
- << 45.0F));
+ << "key1" << static_cast<char>(32) << "key2" << 45.0F));
ASSERT_HAS_SPACE(st);
}
@@ -500,50 +420,35 @@ TEST_F(FTDCCompressorTest, Types) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42LL));
+ << "key1" << 33 << "key2" << 42LL));
ASSERT_HAS_SPACE(st);
const char bytes[] = {0x1, 0x2, 0x3};
- BSONObj o = BSON("created" << DATENOW // date_t
- << "null"
- << BSONNULL // { a : null }
- << "undefined"
- << BSONUndefined // { a : undefined }
+ BSONObj o = BSON("created" << DATENOW // date_t
+ << "null" << BSONNULL // { a : null }
+ << "undefined" << BSONUndefined // { a : undefined }
<< "obj"
<< BSON( // nested object
"a"
<< "abc"
- << "b"
- << 123LL)
+ << "b" << 123LL)
<< "foo"
<< BSON_ARRAY("bar"
<< "baz"
- << "qux") // array of strings
- << "foo2"
- << BSON_ARRAY(5 << 6 << 7) // array of ints
- << "bindata"
- << BSONBinData(&bytes[0], 3, bdtCustom) // bindata
- << "oid"
- << OID("010203040506070809101112") // oid
- << "bool"
- << true // bool
- << "regex"
- << BSONRegEx("mongodb") // regex
- << "ref"
- << BSONDBRef("c", OID("010203040506070809101112")) // ref
- << "code"
- << BSONCode("func f() { return 1; }") // code
+ << "qux") // array of strings
+ << "foo2" << BSON_ARRAY(5 << 6 << 7) // array of ints
+ << "bindata" << BSONBinData(&bytes[0], 3, bdtCustom) // bindata
+ << "oid" << OID("010203040506070809101112") // oid
+ << "bool" << true // bool
+ << "regex" << BSONRegEx("mongodb") // regex
+ << "ref" << BSONDBRef("c", OID("010203040506070809101112")) // ref
+ << "code" << BSONCode("func f() { return 1; }") // code
<< "codewscope"
<< BSONCodeWScope("func f() { return 1; }",
BSON("c" << true)) // codew
- << "minkey"
- << MINKEY // minkey
- << "maxkey"
- << MAXKEY // maxkey
- );
+ << "minkey" << MINKEY // minkey
+ << "maxkey" << MAXKEY // maxkey
+ );
st = c.addSample(o);
ASSERT_SCHEMA_CHANGED(st);
@@ -553,17 +458,11 @@ TEST_F(FTDCCompressorTest, Types) {
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34LL
- << "key2"
- << 45.0f));
+ << "key1" << 34LL << "key2" << 45.0f));
ASSERT_SCHEMA_CHANGED(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << static_cast<char>(32)
- << "key2"
- << 45.0F));
+ << "key1" << static_cast<char>(32) << "key2" << 45.0F));
ASSERT_HAS_SPACE(st);
}
@@ -575,37 +474,25 @@ TEST_F(FTDCCompressorTest, TestFull) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42));
+ << "key1" << 33 << "key2" << 42));
ASSERT_HAS_SPACE(st);
for (size_t i = 0; i != FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) {
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << static_cast<long long int>(i * j)
- << "key2"
- << 45));
+ << "key1" << static_cast<long long int>(i * j) << "key2" << 45));
ASSERT_HAS_SPACE(st);
}
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
ASSERT_FULL(st);
// Add Value
st = c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
ASSERT_HAS_SPACE(st);
}
}
diff --git a/src/mongo/db/ftdc/controller.h b/src/mongo/db/ftdc/controller.h
index 26d76b28ad7..5d1f2f5487a 100644
--- a/src/mongo/db/ftdc/controller.h
+++ b/src/mongo/db/ftdc/controller.h
@@ -150,14 +150,14 @@ private:
private:
/**
- * Private enum to track state.
- *
- * +-----------------------------------------------------------+
- * | v
- * +-------------+ +----------+ +----------------+ +-------+
- * | kNotStarted | --> | kStarted | --> | kStopRequested | --> | kDone |
- * +-------------+ +----------+ +----------------+ +-------+
- */
+ * Private enum to track state.
+ *
+ * +-----------------------------------------------------------+
+ * | v
+ * +-------------+ +----------+ +----------------+ +-------+
+ * | kNotStarted | --> | kStarted | --> | kStopRequested | --> | kDone |
+ * +-------------+ +----------+ +----------------+ +-------+
+ */
enum class State {
/**
* Initial state. Either start() or stop() can be called next.
diff --git a/src/mongo/db/ftdc/controller_test.cpp b/src/mongo/db/ftdc/controller_test.cpp
index 8afc65b96a3..4f67923730c 100644
--- a/src/mongo/db/ftdc/controller_test.cpp
+++ b/src/mongo/db/ftdc/controller_test.cpp
@@ -119,8 +119,8 @@ public:
private:
/**
- * Private enum to ensure caller uses class correctly.
- */
+ * Private enum to ensure caller uses class correctly.
+ */
enum class State {
kNotStarted,
kStarted,
diff --git a/src/mongo/db/ftdc/file_manager.cpp b/src/mongo/db/ftdc/file_manager.cpp
index e79d4c9febc..8962e9ae229 100644
--- a/src/mongo/db/ftdc/file_manager.cpp
+++ b/src/mongo/db/ftdc/file_manager.cpp
@@ -76,8 +76,8 @@ StatusWith<std::unique_ptr<FTDCFileManager>> FTDCFileManager::create(
boost::filesystem::create_directories(dir, ec);
if (ec) {
return {ErrorCodes::NonExistentPath,
- str::stream() << "\"" << dir.generic_string() << "\" could not be created: "
- << ec.message()};
+ str::stream() << "\"" << dir.generic_string()
+ << "\" could not be created: " << ec.message()};
}
}
@@ -233,9 +233,9 @@ Status FTDCFileManager::trimDirectory(std::vector<boost::filesystem::path>& file
boost::filesystem::remove(*it, ec);
if (ec) {
return {ErrorCodes::NonExistentPath,
- str::stream() << "\"" << (*it).generic_string()
- << "\" could not be removed during trimming: "
- << ec.message()};
+ str::stream()
+ << "\"" << (*it).generic_string()
+ << "\" could not be removed during trimming: " << ec.message()};
}
}
}
diff --git a/src/mongo/db/ftdc/file_manager_test.cpp b/src/mongo/db/ftdc/file_manager_test.cpp
index 1bb8ea78702..d9bd8aada5c 100644
--- a/src/mongo/db/ftdc/file_manager_test.cpp
+++ b/src/mongo/db/ftdc/file_manager_test.cpp
@@ -72,45 +72,34 @@ TEST_F(FTDCFileManagerTest, TestFull) {
// Test a large numbers of zeros, and incremental numbers in a full buffer
for (int j = 0; j < 10; j++) {
- ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
- BSON("name"
- << "joe"
- << "key1"
- << 3230792343LL
- << "key2"
- << 235135),
- Date_t()));
+ ASSERT_OK(
+ mgr->writeSampleAndRotateIfNeeded(client,
+ BSON("name"
+ << "joe"
+ << "key1" << 3230792343LL << "key2" << 235135),
+ Date_t()));
for (size_t i = 0; i <= FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) {
- ASSERT_OK(
- mgr->writeSampleAndRotateIfNeeded(client,
- BSON("name"
- << "joe"
- << "key1"
- << static_cast<long long int>(i * j * 37)
- << "key2"
- << static_cast<long long int>(i *
- (645 << j))),
- Date_t()));
+ ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(
+ client,
+ BSON("name"
+ << "joe"
+ << "key1" << static_cast<long long int>(i * j * 37) << "key2"
+ << static_cast<long long int>(i * (645 << j))),
+ Date_t()));
}
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
// Add Value
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
}
@@ -175,9 +164,7 @@ TEST_F(FTDCFileManagerTest, TestNormalRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 3230792343LL
- << "key2"
+ << "key1" << 3230792343LL << "key2"
<< 235135),
Date_t()));
@@ -187,9 +174,7 @@ TEST_F(FTDCFileManagerTest, TestNormalRestart) {
client,
BSON("name"
<< "joe"
- << "key1"
- << static_cast<long long int>(i * j * 37)
- << "key2"
+ << "key1" << static_cast<long long int>(i * j * 37) << "key2"
<< static_cast<long long int>(i * (645 << j))),
Date_t()));
}
@@ -197,20 +182,14 @@ TEST_F(FTDCFileManagerTest, TestNormalRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
// Add Value
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
}
@@ -245,9 +224,7 @@ TEST_F(FTDCFileManagerTest, TestCorruptCrashRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 3230792343LL
- << "key2"
+ << "key1" << 3230792343LL << "key2"
<< 235135),
Date_t()));
@@ -257,9 +234,7 @@ TEST_F(FTDCFileManagerTest, TestCorruptCrashRestart) {
client,
BSON("name"
<< "joe"
- << "key1"
- << static_cast<long long int>(i * j * 37)
- << "key2"
+ << "key1" << static_cast<long long int>(i * j * 37) << "key2"
<< static_cast<long long int>(i * (645 << j))),
Date_t()));
}
@@ -267,20 +242,14 @@ TEST_F(FTDCFileManagerTest, TestCorruptCrashRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
// Add Value
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45),
+ << "key1" << 34 << "key2" << 45),
Date_t()));
}
@@ -311,23 +280,14 @@ TEST_F(FTDCFileManagerTest, TestNormalCrashInterim) {
BSONObj mdoc1 = BSON("name"
<< "some_metadata"
- << "key1"
- << 34
- << "something"
- << 98);
+ << "key1" << 34 << "something" << 98);
BSONObj sdoc1 = BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45);
+ << "key1" << 34 << "key2" << 45);
BSONObj sdoc2 = BSON("name"
<< "joe"
- << "key3"
- << 34
- << "key5"
- << 45);
+ << "key3" << 34 << "key5" << 45);
boost::filesystem::path fileOut;
diff --git a/src/mongo/db/ftdc/file_reader.cpp b/src/mongo/db/ftdc/file_reader.cpp
index 23d468aac0f..b71257e4278 100644
--- a/src/mongo/db/ftdc/file_reader.cpp
+++ b/src/mongo/db/ftdc/file_reader.cpp
@@ -195,8 +195,7 @@ StatusWith<BSONObj> FTDCFileReader::readDocument() {
if (readSize != _stream.gcount()) {
return {ErrorCodes::FileStreamFailed,
str::stream() << "Failed to read " << readSize << " bytes from file \""
- << _file.generic_string()
- << "\""};
+ << _file.generic_string() << "\""};
}
ConstDataRange cdr(_buffer.data(), _buffer.data() + bsonLength);
diff --git a/src/mongo/db/ftdc/file_writer.cpp b/src/mongo/db/ftdc/file_writer.cpp
index be4ea127b3b..24a6bf4cb17 100644
--- a/src/mongo/db/ftdc/file_writer.cpp
+++ b/src/mongo/db/ftdc/file_writer.cpp
@@ -210,8 +210,7 @@ Status FTDCFileWriter::flush(const boost::optional<ConstDataRange>& range, Date_
if (ec) {
return {ErrorCodes::NonExistentPath,
str::stream() << "\"" << _interimFile.generic_string()
- << "\" could not be removed during flush: "
- << ec.message()};
+ << "\" could not be removed during flush: " << ec.message()};
}
return Status::OK();
diff --git a/src/mongo/db/ftdc/file_writer_test.cpp b/src/mongo/db/ftdc/file_writer_test.cpp
index 5da93d7026d..545741aa639 100644
--- a/src/mongo/db/ftdc/file_writer_test.cpp
+++ b/src/mongo/db/ftdc/file_writer_test.cpp
@@ -60,16 +60,10 @@ TEST_F(FTDCFileTest, TestFileBasicMetadata) {
BSONObj doc1 = BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45);
+ << "key1" << 34 << "key2" << 45);
BSONObj doc2 = BSON("name"
<< "joe"
- << "key3"
- << 34
- << "key5"
- << 45);
+ << "key3" << 34 << "key5" << 45);
FTDCConfig config;
FTDCFileWriter writer(&config);
@@ -111,16 +105,10 @@ TEST_F(FTDCFileTest, TestFileBasicCompress) {
BSONObj doc1 = BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45);
+ << "key1" << 34 << "key2" << 45);
BSONObj doc2 = BSON("name"
<< "joe"
- << "key3"
- << 34
- << "key5"
- << 45);
+ << "key3" << 34 << "key5" << 45);
FTDCConfig config;
FTDCFileWriter writer(&config);
@@ -216,69 +204,41 @@ TEST_F(FTDCFileTest, TestSchemaChanges) {
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42));
+ << "key1" << 33 << "key2" << 42));
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
// Add Value
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key2" << 45 << "key3" << 47));
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key2" << 45 << "key3" << 47));
// Rename field
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key5"
- << 45
- << "key3"
- << 47));
+ << "key1" << 34 << "key5" << 45 << "key3" << 47));
// Change type
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key5"
+ << "key1" << 34 << "key5"
<< "45"
- << "key3"
- << 47));
+ << "key3" << 47));
// RemoveField
c.addSample(BSON("name"
<< "joe"
<< "key5"
<< "45"
- << "key3"
- << 47));
+ << "key3" << 47));
}
// Test a full buffer
@@ -289,34 +249,22 @@ TEST_F(FTDCFileTest, TestFull) {
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 33
- << "key2"
- << 42));
+ << "key1" << 33 << "key2" << 42));
for (size_t i = 0; i <= FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) {
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << static_cast<long long int>(i * j)
- << "key2"
- << 45));
+ << "key1" << static_cast<long long int>(i * j) << "key2" << 45));
}
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
// Add Value
c.addSample(BSON("name"
<< "joe"
- << "key1"
- << 34
- << "key2"
- << 45));
+ << "key1" << 34 << "key2" << 45));
}
}
diff --git a/src/mongo/db/ftdc/ftdc_system_stats.h b/src/mongo/db/ftdc/ftdc_system_stats.h
index b5886fea819..bdc2e87984c 100644
--- a/src/mongo/db/ftdc/ftdc_system_stats.h
+++ b/src/mongo/db/ftdc/ftdc_system_stats.h
@@ -33,7 +33,6 @@
#include "mongo/base/status.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/ftdc/controller.h"
-#include "mongo/db/ftdc/controller.h"
namespace mongo {
diff --git a/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp b/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp
index e68dcff300c..638380b0bc7 100644
--- a/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp
+++ b/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp
@@ -68,7 +68,10 @@ static const std::vector<StringData> kMemKeys{
};
static const std::vector<StringData> kNetstatKeys{
- "Tcp:"_sd, "Ip:"_sd, "TcpExt:"_sd, "IpExt:"_sd,
+ "Tcp:"_sd,
+ "Ip:"_sd,
+ "TcpExt:"_sd,
+ "IpExt:"_sd,
};
/**
diff --git a/src/mongo/db/ftdc/util.cpp b/src/mongo/db/ftdc/util.cpp
index f745068fdea..9500bf62ecb 100644
--- a/src/mongo/db/ftdc/util.cpp
+++ b/src/mongo/db/ftdc/util.cpp
@@ -444,9 +444,7 @@ StatusWith<FTDCType> getBSONDocumentType(const BSONObj& obj) {
static_cast<FTDCType>(value) != FTDCType::kMetadata) {
return {ErrorCodes::BadValue,
str::stream() << "Field '" << std::string(kFTDCTypeField)
- << "' is not an expected value, found '"
- << value
- << "'"};
+ << "' is not an expected value, found '" << value << "'"};
}
return {static_cast<FTDCType>(value)};
diff --git a/src/mongo/db/ftdc/util.h b/src/mongo/db/ftdc/util.h
index 87defea80ea..4d47c610559 100644
--- a/src/mongo/db/ftdc/util.h
+++ b/src/mongo/db/ftdc/util.h
@@ -45,23 +45,23 @@ namespace mongo {
namespace FTDCBSONUtil {
/**
-* Type of FTDC document.
-*
-* NOTE: Persisted to disk via BSON Objects.
-*/
+ * Type of FTDC document.
+ *
+ * NOTE: Persisted to disk via BSON Objects.
+ */
enum class FTDCType : std::int32_t {
/**
- * A metadata document is composed of a header + an array of bson documents
- *
- * See createBSONMetadataChunkDocument
- */
+ * A metadata document is composed of a header + an array of bson documents
+ *
+ * See createBSONMetadataChunkDocument
+ */
kMetadata = 0,
/**
- * A metrics chunk is composed of a header + a compressed metric chunk.
- *
- * See createBSONMetricChunkDocument
- */
+ * A metrics chunk is composed of a header + a compressed metric chunk.
+ *
+ * See createBSONMetricChunkDocument
+ */
kMetricChunk = 1,
};
diff --git a/src/mongo/db/ftdc/varint.h b/src/mongo/db/ftdc/varint.h
index 08a064de2b4..66a4b30cab7 100644
--- a/src/mongo/db/ftdc/varint.h
+++ b/src/mongo/db/ftdc/varint.h
@@ -46,8 +46,8 @@ namespace mongo {
*/
struct FTDCVarInt {
/**
- * Maximum number of bytes an integer can compress to
- */
+ * Maximum number of bytes an integer can compress to
+ */
static const std::size_t kMaxSizeBytes64 = 10;
FTDCVarInt() = default;
diff --git a/src/mongo/db/fts/fts_element_iterator.cpp b/src/mongo/db/fts/fts_element_iterator.cpp
index ebca711dd2b..c9666f0834a 100644
--- a/src/mongo/db/fts/fts_element_iterator.cpp
+++ b/src/mongo/db/fts/fts_element_iterator.cpp
@@ -64,7 +64,7 @@ inline bool _matchPrefix(const string& dottedName, const string& weight) {
}
return str::startsWith(weight, dottedName + '.');
}
-}
+} // namespace
bool FTSElementIterator::more() {
//_currentValue = advance();
@@ -113,9 +113,10 @@ FTSIteratorValue FTSElementIterator::advance() {
// 1. parent path empty (top level): use the current field name
// 2. parent path non-empty and obj is an array: use the parent path
// 3. parent path non-empty and obj is a sub-doc: append field name to parent path
- string dottedName = (_frame._parentPath.empty() ? fieldName : _frame._isArray
- ? _frame._parentPath
- : _frame._parentPath + '.' + fieldName);
+ string dottedName =
+ (_frame._parentPath.empty()
+ ? fieldName
+ : _frame._isArray ? _frame._parentPath : _frame._parentPath + '.' + fieldName);
// Find lower bound of dottedName in _weights. lower_bound leaves us at the first
// weight that could possibly match or be a prefix of dottedName. And if this
diff --git a/src/mongo/db/fts/fts_index_format.cpp b/src/mongo/db/fts/fts_index_format.cpp
index 2bcf35ff398..98652b875d4 100644
--- a/src/mongo/db/fts/fts_index_format.cpp
+++ b/src/mongo/db/fts/fts_index_format.cpp
@@ -117,8 +117,8 @@ BSONElement extractNonFTSKeyElement(const BSONObj& obj, StringData path) {
dps::extractAllElementsAlongPath(
obj, path, indexedElements, expandArrayOnTrailingField, &arrayComponents);
uassert(ErrorCodes::CannotBuildIndexKeys,
- str::stream() << "Field '" << path << "' of text index contains an array in document: "
- << obj,
+ str::stream() << "Field '" << path
+ << "' of text index contains an array in document: " << obj,
arrayComponents.empty());
// Since there aren't any arrays, there cannot be more than one extracted element on 'path'.
@@ -166,9 +166,7 @@ void FTSIndexFormat::getKeys(const FTSSpec& spec, const BSONObj& obj, BSONObjSet
ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo40) {
uassert(16732,
str::stream() << "too many unique keys for a single document to"
- << " have a text index, max is "
- << term_freqs.size()
- << obj["_id"],
+ << " have a text index, max is " << term_freqs.size() << obj["_id"],
term_freqs.size() <= 400000);
}
@@ -205,9 +203,7 @@ void FTSIndexFormat::getKeys(const FTSSpec& spec, const BSONObj& obj, BSONObjSet
ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo40) {
uassert(16733,
str::stream() << "trying to index text where term list is too big, max is "
- << MaxKeyBSONSizeMB
- << "mb "
- << obj["_id"],
+ << MaxKeyBSONSizeMB << "mb " << obj["_id"],
keyBSONSize <= (MaxKeyBSONSizeMB * 1024 * 1024));
}
}
@@ -267,5 +263,5 @@ void FTSIndexFormat::_appendIndexKey(BSONObjBuilder& b,
b.append("", weight);
}
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_index_format.h b/src/mongo/db/fts/fts_index_format.h
index cff73d5caad..dd83e8603a8 100644
--- a/src/mongo/db/fts/fts_index_format.h
+++ b/src/mongo/db/fts/fts_index_format.h
@@ -70,5 +70,5 @@ private:
const std::string& term,
TextIndexVersion textIndexVersion);
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_index_format_test.cpp b/src/mongo/db/fts/fts_index_format_test.cpp
index b847d16dd9d..c9d6779e639 100644
--- a/src/mongo/db/fts/fts_index_format_test.cpp
+++ b/src/mongo/db/fts/fts_index_format_test.cpp
@@ -68,14 +68,12 @@ TEST(FTSIndexFormat, Simple1) {
TEST(FTSIndexFormat, ExtraBack1) {
FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
<< "text"
- << "x"
- << 1)))));
+ << "x" << 1)))));
BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
FTSIndexFormat::getKeys(spec,
BSON("data"
<< "cat"
- << "x"
- << 5),
+ << "x" << 5),
&keys);
ASSERT_EQUALS(1U, keys.size());
@@ -94,8 +92,7 @@ TEST(FTSIndexFormat, ExtraFront1) {
FTSIndexFormat::getKeys(spec,
BSON("data"
<< "cat"
- << "x"
- << 5),
+ << "x" << 5),
&keys);
ASSERT_EQUALS(1U, keys.size());
@@ -158,8 +155,7 @@ void assertEqualsIndexKeys(std::set<std::string>& expectedKeys, const BSONObjSet
TEST(FTSIndexFormat, LongWordsTextIndexVersion1) {
FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
<< "text")
- << "textIndexVersion"
- << 1))));
+ << "textIndexVersion" << 1))));
BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
string longPrefix(1024U, 'a');
// "aaa...aaacat"
@@ -188,8 +184,7 @@ TEST(FTSIndexFormat, LongWordsTextIndexVersion1) {
TEST(FTSIndexFormat, LongWordTextIndexVersion2) {
FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
<< "text")
- << "textIndexVersion"
- << 2))));
+ << "textIndexVersion" << 2))));
BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
string longPrefix(1024U, 'a');
// "aaa...aaacat"
@@ -222,8 +217,7 @@ TEST(FTSIndexFormat, LongWordTextIndexVersion2) {
TEST(FTSIndexFormat, LongWordTextIndexVersion3) {
FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
<< "text")
- << "textIndexVersion"
- << 3))));
+ << "textIndexVersion" << 3))));
BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
string longPrefix(1024U, 'a');
// "aaa...aaacat"
diff --git a/src/mongo/db/fts/fts_language.cpp b/src/mongo/db/fts/fts_language.cpp
index faa54e79333..33f5ce4d565 100644
--- a/src/mongo/db/fts/fts_language.cpp
+++ b/src/mongo/db/fts/fts_language.cpp
@@ -82,7 +82,7 @@ LanguageMap languageMapV2;
// Case-sensitive by lookup key.
typedef std::map<StringData, const FTSLanguage*> LanguageMapLegacy;
LanguageMapLegacy languageMapV1;
-}
+} // namespace
MONGO_INITIALIZER_GROUP(FTSAllLanguagesRegistered, MONGO_NO_PREREQUISITES, MONGO_NO_DEPENDENTS);
@@ -277,10 +277,10 @@ StatusWithFTSLanguage FTSLanguage::make(StringData langName, TextIndexVersion te
if (it == languageMap->end()) {
// TEXT_INDEX_VERSION_2 and above reject unrecognized language strings.
- Status status = Status(ErrorCodes::BadValue,
- str::stream() << "unsupported language: \"" << langName
- << "\" for text index version "
- << textIndexVersion);
+ Status status =
+ Status(ErrorCodes::BadValue,
+ str::stream() << "unsupported language: \"" << langName
+ << "\" for text index version " << textIndexVersion);
return StatusWithFTSLanguage(status);
}
@@ -312,5 +312,5 @@ std::unique_ptr<FTSTokenizer> UnicodeFTSLanguage::createTokenizer() const {
const FTSPhraseMatcher& UnicodeFTSLanguage::getPhraseMatcher() const {
return _unicodePhraseMatcher;
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_language.h b/src/mongo/db/fts/fts_language.h
index 47a6ab2213d..8bdcd1aa5ce 100644
--- a/src/mongo/db/fts/fts_language.h
+++ b/src/mongo/db/fts/fts_language.h
@@ -168,5 +168,5 @@ private:
extern BasicFTSLanguage languagePorterV1;
extern BasicFTSLanguage languageEnglishV2;
extern BasicFTSLanguage languageFrenchV2;
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_language_test.cpp b/src/mongo/db/fts/fts_language_test.cpp
index e229bbdf0bc..29166d88319 100644
--- a/src/mongo/db/fts/fts_language_test.cpp
+++ b/src/mongo/db/fts/fts_language_test.cpp
@@ -175,5 +175,5 @@ TEST(FTSLanguageV1, Empty) {
ASSERT(swl.getStatus().isOK());
ASSERT_EQUALS(swl.getValue()->str(), "none");
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_matcher.cpp b/src/mongo/db/fts/fts_matcher.cpp
index e14a14d4464..be9daa5801d 100644
--- a/src/mongo/db/fts/fts_matcher.cpp
+++ b/src/mongo/db/fts/fts_matcher.cpp
@@ -176,5 +176,5 @@ FTSTokenizer::Options FTSMatcher::_getTokenizerOptions() const {
return tokenizerOptions;
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_matcher.h b/src/mongo/db/fts/fts_matcher.h
index 5dbcc981109..660194a9585 100644
--- a/src/mongo/db/fts/fts_matcher.h
+++ b/src/mongo/db/fts/fts_matcher.h
@@ -112,5 +112,5 @@ private:
const FTSQueryImpl _query;
const FTSSpec _spec;
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_matcher_test.cpp b/src/mongo/db/fts/fts_matcher_test.cpp
index 31f05cf2268..46c292ce55a 100644
--- a/src/mongo/db/fts/fts_matcher_test.cpp
+++ b/src/mongo/db/fts/fts_matcher_test.cpp
@@ -278,5 +278,5 @@ TEST(FTSMatcher, NegativePhrasesMatchWithCase) {
ASSERT_FALSE(docNegativePhrasesMatchWithCase("John Runs", "-\"n R\""));
ASSERT_FALSE(docNegativePhrasesMatchWithCase("John Runs", "-\"John\" -\"Running\""));
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_query_impl.cpp b/src/mongo/db/fts/fts_query_impl.cpp
index fffc4362fbb..8c3f2e6882c 100644
--- a/src/mongo/db/fts/fts_query_impl.cpp
+++ b/src/mongo/db/fts/fts_query_impl.cpp
@@ -203,5 +203,5 @@ BSONObj FTSQueryImpl::toBSON() const {
bob.append("negatedPhrases", getNegatedPhr());
return bob.obj();
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_query_impl.h b/src/mongo/db/fts/fts_query_impl.h
index d399ee73763..97cdb8388df 100644
--- a/src/mongo/db/fts/fts_query_impl.h
+++ b/src/mongo/db/fts/fts_query_impl.h
@@ -84,5 +84,5 @@ private:
std::vector<std::string> _negatedPhrases;
std::set<std::string> _termsForBounds;
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_query_impl_test.cpp b/src/mongo/db/fts/fts_query_impl_test.cpp
index d458004b0a5..b3b4cad71f1 100644
--- a/src/mongo/db/fts/fts_query_impl_test.cpp
+++ b/src/mongo/db/fts/fts_query_impl_test.cpp
@@ -478,5 +478,5 @@ TEST(FTSQueryImpl, CloneParsedQuery) {
ASSERT(castedClone->getNegatedPhr() == q.getNegatedPhr());
ASSERT(castedClone->getTermsForBounds() == q.getTermsForBounds());
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_query_parser.cpp b/src/mongo/db/fts/fts_query_parser.cpp
index a346e03451b..c6038be4575 100644
--- a/src/mongo/db/fts/fts_query_parser.cpp
+++ b/src/mongo/db/fts/fts_query_parser.cpp
@@ -102,5 +102,5 @@ QueryToken::Type FTSQueryParser::getType(char c) const {
return QueryToken::TEXT;
}
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_query_parser.h b/src/mongo/db/fts/fts_query_parser.h
index f4bab3e7e1c..4f11799337c 100644
--- a/src/mongo/db/fts/fts_query_parser.h
+++ b/src/mongo/db/fts/fts_query_parser.h
@@ -84,5 +84,5 @@ private:
bool _previousWhiteSpace;
const StringData _raw;
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_spec.cpp b/src/mongo/db/fts/fts_spec.cpp
index 20560ccdad5..c358ba4b679 100644
--- a/src/mongo/db/fts/fts_spec.cpp
+++ b/src/mongo/db/fts/fts_spec.cpp
@@ -59,9 +59,9 @@ const std::string moduleDefaultLanguage("english");
bool validateOverride(const string& override) {
// The override field can't be empty, can't be prefixed with a dollar sign, and
// can't contain a dot.
- return !override.empty()&& override[0] != '$' && override.find('.') == std::string::npos;
-}
+ return !override.empty() && override[0] != '$' && override.find('.') == std::string::npos;
}
+} // namespace
FTSSpec::FTSSpec(const BSONObj& indexInfo) {
// indexInfo is a text index spec. Text index specs pass through fixSpec() before being
@@ -90,12 +90,8 @@ FTSSpec::FTSSpec(const BSONObj& indexInfo) {
msgasserted(17364,
str::stream() << "attempt to use unsupported textIndexVersion "
<< textIndexVersionElt.numberInt()
- << "; versions supported: "
- << TEXT_INDEX_VERSION_3
- << ", "
- << TEXT_INDEX_VERSION_2
- << ", "
- << TEXT_INDEX_VERSION_1);
+ << "; versions supported: " << TEXT_INDEX_VERSION_3 << ", "
+ << TEXT_INDEX_VERSION_2 << ", " << TEXT_INDEX_VERSION_1);
}
// Initialize _defaultLanguage. Note that the FTSLanguage constructor requires
@@ -272,7 +268,7 @@ Status verifyFieldNameNotReserved(StringData s) {
return Status::OK();
}
-}
+} // namespace
StatusWith<BSONObj> FTSSpec::fixSpec(const BSONObj& spec) {
if (spec["textIndexVersion"].numberInt() == TEXT_INDEX_VERSION_1) {
@@ -406,9 +402,7 @@ StatusWith<BSONObj> FTSSpec::fixSpec(const BSONObj& spec) {
if (i->second <= 0 || i->second >= MAX_WORD_WEIGHT) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "text index weight must be in the exclusive interval (0,"
- << MAX_WORD_WEIGHT
- << ") but found: "
- << i->second};
+ << MAX_WORD_WEIGHT << ") but found: " << i->second};
}
// Verify weight refers to a valid field.
@@ -513,5 +507,5 @@ StatusWith<BSONObj> FTSSpec::fixSpec(const BSONObj& spec) {
return b.obj();
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_spec_legacy.cpp b/src/mongo/db/fts/fts_spec_legacy.cpp
index 53169f5e213..1d58c1da750 100644
--- a/src/mongo/db/fts/fts_spec_legacy.cpp
+++ b/src/mongo/db/fts/fts_spec_legacy.cpp
@@ -48,7 +48,7 @@ void _addFTSStuff(BSONObjBuilder* b) {
b->append("_fts", INDEX_NAME);
b->append("_ftsx", 1);
}
-}
+} // namespace
const FTSLanguage& FTSSpec::_getLanguageToUseV1(const BSONObj& userDoc) const {
BSONElement e = userDoc[_languageOverrideField];
@@ -240,9 +240,7 @@ StatusWith<BSONObj> FTSSpec::_fixSpecV1(const BSONObj& spec) {
if (kv.second <= 0 || kv.second >= MAX_WORD_WEIGHT) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "text index weight must be in the exclusive interval (0,"
- << MAX_WORD_WEIGHT
- << ") but found: "
- << kv.second};
+ << MAX_WORD_WEIGHT << ") but found: " << kv.second};
}
b.append(kv.first, kv.second);
}
@@ -303,5 +301,5 @@ StatusWith<BSONObj> FTSSpec::_fixSpecV1(const BSONObj& spec) {
return b.obj();
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_spec_test.cpp b/src/mongo/db/fts/fts_spec_test.cpp
index f715b6f05ec..047968f2541 100644
--- a/src/mongo/db/fts/fts_spec_test.cpp
+++ b/src/mongo/db/fts/fts_spec_test.cpp
@@ -184,8 +184,7 @@ TEST(FTSSpec, ScoreSingleField1) {
<< "text"
<< "text"
<< "text")
- << "weights"
- << BSON("title" << 10));
+ << "weights" << BSON("title" << 10));
FTSSpec spec(assertGet(FTSSpec::fixSpec(user)));
@@ -204,8 +203,7 @@ TEST(FTSSpec, ScoreMultipleField1) {
<< "text"
<< "text"
<< "text")
- << "weights"
- << BSON("title" << 10));
+ << "weights" << BSON("title" << 10));
FTSSpec spec(assertGet(FTSSpec::fixSpec(user)));
@@ -247,8 +245,7 @@ TEST(FTSSpec, ScoreRepeatWord) {
<< "text"
<< "text"
<< "text")
- << "weights"
- << BSON("title" << 10));
+ << "weights" << BSON("title" << 10));
FTSSpec spec(assertGet(FTSSpec::fixSpec(user)));
@@ -273,8 +270,7 @@ TEST(FTSSpec, Extra1) {
TEST(FTSSpec, Extra2) {
BSONObj user = BSON("key" << BSON("data"
<< "text"
- << "x"
- << 1));
+ << "x" << 1));
BSONObj fixed = assertGet(FTSSpec::fixSpec(user));
FTSSpec spec(fixed);
ASSERT_EQUALS(0U, spec.numExtraBefore());
@@ -292,8 +288,7 @@ TEST(FTSSpec, Extra3) {
ASSERT_BSONOBJ_EQ(BSON("x" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1),
+ << "_ftsx" << 1),
fixed["key"].Obj());
ASSERT_BSONOBJ_EQ(BSON("data" << 1), fixed["weights"].Obj());
@@ -520,8 +515,7 @@ TEST(FTSSpec, NestedLanguages_Wildcard) {
TEST(FTSSpec, NestedLanguages_WildcardOverride) {
BSONObj indexSpec = BSON("key" << BSON("$**"
<< "text")
- << "weights"
- << BSON("d.e.f" << 20));
+ << "weights" << BSON("d.e.f" << 20));
FTSSpec spec(assertGet(FTSSpec::fixSpec(indexSpec)));
TermFrequencyMap tfm;
@@ -598,5 +592,5 @@ TEST(FTSSpec, TextIndexLegacyLanguageRecognition) {
ASSERT_EQUALS(tfm.size(), 0U); // "the" recognized as stopword
}
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_util.cpp b/src/mongo/db/fts/fts_util.cpp
index 5ef93b16559..f9de9ae33d7 100644
--- a/src/mongo/db/fts/fts_util.cpp
+++ b/src/mongo/db/fts/fts_util.cpp
@@ -35,5 +35,5 @@ namespace fts {
const std::string INDEX_NAME = "text";
const std::string WILDCARD = "$**";
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_util.h b/src/mongo/db/fts/fts_util.h
index 71eebcbf5f3..90eaa9095f6 100644
--- a/src/mongo/db/fts/fts_util.h
+++ b/src/mongo/db/fts/fts_util.h
@@ -46,5 +46,5 @@ enum TextIndexVersion {
TEXT_INDEX_VERSION_2 = 2, // Index format with ASCII support and murmur hashing.
TEXT_INDEX_VERSION_3 = 3, // Current index format with basic Unicode support.
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stemmer.cpp b/src/mongo/db/fts/stemmer.cpp
index db5e97227da..8d54d1af104 100644
--- a/src/mongo/db/fts/stemmer.cpp
+++ b/src/mongo/db/fts/stemmer.cpp
@@ -63,5 +63,5 @@ StringData Stemmer::stem(StringData word) const {
return StringData((const char*)(sb_sym), sb_stemmer_length(_stemmer));
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stemmer.h b/src/mongo/db/fts/stemmer.h
index a5a15174a94..e3608071010 100644
--- a/src/mongo/db/fts/stemmer.h
+++ b/src/mongo/db/fts/stemmer.h
@@ -63,5 +63,5 @@ public:
private:
struct sb_stemmer* _stemmer;
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stemmer_test.cpp b/src/mongo/db/fts/stemmer_test.cpp
index 42c67d7f97b..be09fe34b8c 100644
--- a/src/mongo/db/fts/stemmer_test.cpp
+++ b/src/mongo/db/fts/stemmer_test.cpp
@@ -47,5 +47,5 @@ TEST(English, Caps) {
ASSERT_EQUALS("unit", s.stem("united"));
ASSERT_EQUALS("Unite", s.stem("United"));
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stop_words.cpp b/src/mongo/db/fts/stop_words.cpp
index 48db6836736..39be67707bc 100644
--- a/src/mongo/db/fts/stop_words.cpp
+++ b/src/mongo/db/fts/stop_words.cpp
@@ -44,7 +44,7 @@ void loadStopWordMap(StringMap<std::set<std::string>>* m);
namespace {
StringMap<std::shared_ptr<StopWords>> StopWordsMap;
StopWords empty;
-}
+} // namespace
StopWords::StopWords() {}
@@ -70,5 +70,5 @@ MONGO_INITIALIZER(StopWords)(InitializerContext* context) {
}
return Status::OK();
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stop_words.h b/src/mongo/db/fts/stop_words.h
index 22835300226..6c1c1cc07e1 100644
--- a/src/mongo/db/fts/stop_words.h
+++ b/src/mongo/db/fts/stop_words.h
@@ -61,5 +61,5 @@ public:
private:
StringMap<bool> _words; // Used as a set. The values have no meaning.
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/stop_words_test.cpp b/src/mongo/db/fts/stop_words_test.cpp
index 96b1e941d3b..f0fb8ec37b8 100644
--- a/src/mongo/db/fts/stop_words_test.cpp
+++ b/src/mongo/db/fts/stop_words_test.cpp
@@ -41,5 +41,5 @@ TEST(English, Basic1) {
ASSERT(englishStopWords->isStopWord("the"));
ASSERT(!englishStopWords->isStopWord("computer"));
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/tokenizer.cpp b/src/mongo/db/fts/tokenizer.cpp
index 3de9eb00689..1463dc212bf 100644
--- a/src/mongo/db/fts/tokenizer.cpp
+++ b/src/mongo/db/fts/tokenizer.cpp
@@ -132,5 +132,5 @@ Token::Type Tokenizer::_type(char c) const {
return Token::TEXT;
}
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/tokenizer.h b/src/mongo/db/fts/tokenizer.h
index 1a0e79d9425..426449724e8 100644
--- a/src/mongo/db/fts/tokenizer.h
+++ b/src/mongo/db/fts/tokenizer.h
@@ -70,5 +70,5 @@ private:
const StringData _raw;
bool _english;
};
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/tokenizer_test.cpp b/src/mongo/db/fts/tokenizer_test.cpp
index 9f09736587a..db61f3abc7d 100644
--- a/src/mongo/db/fts/tokenizer_test.cpp
+++ b/src/mongo/db/fts/tokenizer_test.cpp
@@ -117,5 +117,5 @@ TEST(Tokenizer, Quote1French) {
ASSERT_EQUALS("s", b.data.toString());
ASSERT_EQUALS("car", c.data.toString());
}
-}
-}
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/unicode/string.cpp b/src/mongo/db/fts/unicode/string.cpp
index 201c3539d61..8b97a671d92 100644
--- a/src/mongo/db/fts/unicode/string.cpp
+++ b/src/mongo/db/fts/unicode/string.cpp
@@ -61,7 +61,7 @@ inline void appendUtf8Codepoint(char32_t codepoint, OutputIterator* outputIt) {
*(*outputIt)++ = (((codepoint >> (6 * 0)) & 0x3f) | 0x80);
}
}
-}
+} // namespace
using linenoise_utf8::copyString32to8;
using linenoise_utf8::copyString8to32;
diff --git a/src/mongo/db/fts/unicode/string_test.cpp b/src/mongo/db/fts/unicode/string_test.cpp
index 2d3a386d1ec..a2943877b28 100644
--- a/src/mongo/db/fts/unicode/string_test.cpp
+++ b/src/mongo/db/fts/unicode/string_test.cpp
@@ -66,7 +66,7 @@ auto kCaseSensitive = String::kCaseSensitive;
auto kTurkish = CaseFoldMode::kTurkish;
auto kNormal = CaseFoldMode::kNormal;
-}
+} // namespace
// Macro to preserve line numbers and arguments in error messages.
diff --git a/src/mongo/db/geo/big_polygon.cpp b/src/mongo/db/geo/big_polygon.cpp
index f0f77ab51ed..f21c96d3faf 100644
--- a/src/mongo/db/geo/big_polygon.cpp
+++ b/src/mongo/db/geo/big_polygon.cpp
@@ -228,4 +228,4 @@ bool BigSimplePolygon::Decode(Decoder* const decoder) {
bool BigSimplePolygon::DecodeWithinScope(Decoder* const decoder) {
MONGO_UNREACHABLE;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/geo/big_polygon.h b/src/mongo/db/geo/big_polygon.h
index bc0e4ce75f1..6df8d3e4fd9 100644
--- a/src/mongo/db/geo/big_polygon.h
+++ b/src/mongo/db/geo/big_polygon.h
@@ -115,4 +115,4 @@ private:
mutable std::unique_ptr<S2Polyline> _borderLine;
mutable std::unique_ptr<S2Polygon> _borderPoly;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/geo/big_polygon_test.cpp b/src/mongo/db/geo/big_polygon_test.cpp
index b29b7c3eb4a..2a42706906d 100644
--- a/src/mongo/db/geo/big_polygon_test.cpp
+++ b/src/mongo/db/geo/big_polygon_test.cpp
@@ -36,8 +36,8 @@
namespace {
using namespace mongo;
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
// Helper to build a vector of S2Point
@@ -81,8 +81,7 @@ typedef PointBuilder points;
TEST(BigSimplePolygon, Basic) {
// A 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
// A 10x10 square centered at [0,0]
S2Polygon poly10(loopVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
@@ -95,8 +94,7 @@ TEST(BigSimplePolygon, Basic) {
// A 20x20 square centered at [0,20]
BigSimplePolygon bigPoly20Offset(loop(points() << LatLng(10.0, 30.0) << LatLng(10.0, 10.0)
- << LatLng(-10.0, 10.0)
- << LatLng(-10.0, 30.0)));
+ << LatLng(-10.0, 10.0) << LatLng(-10.0, 30.0)));
ASSERT_LESS_THAN(bigPoly20Offset.GetArea(), 2 * M_PI);
ASSERT_LESS_THAN(poly10.GetArea(), bigPoly20Offset.GetArea());
@@ -108,18 +106,15 @@ TEST(BigSimplePolygon, BasicWithHole) {
// A 30x30 square centered at [0,0] with a 20X20 hole
vector<S2Loop*> loops;
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
S2Polygon holePoly(&loops);
// A 16X16 square centered at [0,0]
BigSimplePolygon bigPoly16(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0)
- << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
ASSERT_LESS_THAN(bigPoly16.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly16.Contains(holePoly));
@@ -127,8 +122,7 @@ TEST(BigSimplePolygon, BasicWithHole) {
// A big polygon bigger than the hole.
BigSimplePolygon bigPoly24(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0)
- << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
ASSERT_LESS_THAN(bigPoly24.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24.Contains(holePoly));
ASSERT_TRUE(bigPoly24.Intersects(holePoly));
@@ -139,12 +133,10 @@ TEST(BigSimplePolygon, BasicWithHoleAndShell) {
vector<S2Loop*> loops;
// Border
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
// Hole
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
// Shell
loops.push_back(loop(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
<< LatLng(-5.0, 5.0)));
@@ -152,24 +144,21 @@ TEST(BigSimplePolygon, BasicWithHoleAndShell) {
// A 16X16 square centered at [0,0] containing the shell
BigSimplePolygon bigPoly16(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0)
- << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
ASSERT_LESS_THAN(bigPoly16.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly16.Contains(shellPoly));
ASSERT_TRUE(bigPoly16.Intersects(shellPoly));
// Try a big polygon bigger than the hole.
BigSimplePolygon bigPoly24(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0)
- << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
ASSERT_LESS_THAN(bigPoly24.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24.Contains(shellPoly));
ASSERT_TRUE(bigPoly24.Intersects(shellPoly));
// Try a big polygon smaller than the shell.
BigSimplePolygon bigPoly8(loop(points() << LatLng(4.0, 4.0) << LatLng(4.0, -4.0)
- << LatLng(-4.0, -4.0)
- << LatLng(-4.0, 4.0)));
+ << LatLng(-4.0, -4.0) << LatLng(-4.0, 4.0)));
ASSERT_LESS_THAN(bigPoly8.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly8.Contains(shellPoly));
ASSERT_TRUE(bigPoly8.Intersects(shellPoly));
@@ -178,8 +167,7 @@ TEST(BigSimplePolygon, BasicWithHoleAndShell) {
TEST(BigSimplePolygon, BasicComplement) {
// Everything *not* in a 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
bigPoly20Comp.Invert();
// A 10x10 square centered at [0,0]
@@ -192,8 +180,7 @@ TEST(BigSimplePolygon, BasicComplement) {
// A 10x10 square centered at [0,20], contained by bigPoly20Comp
S2Polygon poly10Contained(loopVec(points() << LatLng(25.0, 25.0) << LatLng(25.0, 15.0)
- << LatLng(15.0, 15.0)
- << LatLng(15.0, 25.0)));
+ << LatLng(15.0, 15.0) << LatLng(15.0, 25.0)));
ASSERT_LESS_THAN(poly10Contained.GetArea(), bigPoly20Comp.GetArea());
ASSERT(bigPoly20Comp.Contains(poly10Contained));
@@ -202,8 +189,7 @@ TEST(BigSimplePolygon, BasicComplement) {
// A 30x30 square centered at [0,0], so that bigPoly20Comp contains its complement entirely,
// which is not allowed by S2.
S2Polygon poly30(loopVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
ASSERT_LESS_THAN(poly30.GetArea(), bigPoly20Comp.GetArea());
ASSERT_FALSE(bigPoly20Comp.Contains(poly30));
ASSERT_TRUE(bigPoly20Comp.Intersects(poly30));
@@ -212,8 +198,7 @@ TEST(BigSimplePolygon, BasicComplement) {
TEST(BigSimplePolygon, BasicIntersects) {
// Everything *not* in a 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
bigPoly20.Invert();
// A 10x10 square centered at [10,10] (partial overlap)
@@ -228,19 +213,16 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// A 30x30 square centered at [0,0] with a 20X20 hole
vector<S2Loop*> loops;
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
S2Polygon holePoly(&loops);
// 1. BigPolygon doesn't touch holePoly
// Everything *not* in a 40x40 square centered at [0,0]
BigSimplePolygon bigPoly40Comp(loop(points() << LatLng(20.0, 20.0) << LatLng(20.0, -20.0)
- << LatLng(-20.0, -20.0)
- << LatLng(-20.0, 20.0)));
+ << LatLng(-20.0, -20.0) << LatLng(-20.0, 20.0)));
bigPoly40Comp.Invert();
ASSERT_GREATER_THAN(bigPoly40Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40Comp.Contains(holePoly));
@@ -249,8 +231,7 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// 2. BigPolygon intersects holePoly
// Everything *not* in a 24X24 square centered at [0,0]
BigSimplePolygon bigPoly24Comp(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0)
- << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
bigPoly24Comp.Invert();
ASSERT_GREATER_THAN(bigPoly24Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24Comp.Contains(holePoly));
@@ -259,8 +240,7 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// 3. BigPolygon contains holePoly
// Everything *not* in a 16X16 square centered at [0,0]
BigSimplePolygon bigPoly16Comp(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0)
- << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
bigPoly16Comp.Invert();
ASSERT_GREATER_THAN(bigPoly16Comp.GetArea(), 2 * M_PI);
ASSERT_TRUE(bigPoly16Comp.Contains(holePoly));
@@ -268,9 +248,9 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// 4. BigPolygon contains the right half of holePoly
// Everything *not* in a 40x40 square centered at [0,20]
- BigSimplePolygon bigPoly40CompOffset(loop(points() << LatLng(20.0, 40.0) << LatLng(20.0, 0.0)
- << LatLng(-20.0, 0.0)
- << LatLng(-20.0, 40.0)));
+ BigSimplePolygon bigPoly40CompOffset(loop(points()
+ << LatLng(20.0, 40.0) << LatLng(20.0, 0.0)
+ << LatLng(-20.0, 0.0) << LatLng(-20.0, 40.0)));
bigPoly40CompOffset.Invert();
ASSERT_GREATER_THAN(bigPoly40CompOffset.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40CompOffset.Contains(holePoly));
@@ -282,12 +262,10 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
vector<S2Loop*> loops;
// Border
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
// Hole
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
// Shell
loops.push_back(loop(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
<< LatLng(-5.0, 5.0)));
@@ -296,8 +274,7 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 1. BigPolygon doesn't touch shellPoly
// Everything *not* in a 40x40 square centered at [0,0]
BigSimplePolygon bigPoly40Comp(loop(points() << LatLng(20.0, 20.0) << LatLng(20.0, -20.0)
- << LatLng(-20.0, -20.0)
- << LatLng(-20.0, 20.0)));
+ << LatLng(-20.0, -20.0) << LatLng(-20.0, 20.0)));
bigPoly40Comp.Invert();
ASSERT_GREATER_THAN(bigPoly40Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40Comp.Contains(shellPoly));
@@ -306,8 +283,7 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 2. BigPolygon intersects shellPoly
// Everything *not* in a 24X24 square centered at [0,0]
BigSimplePolygon bigPoly24Comp(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0)
- << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
bigPoly24Comp.Invert();
ASSERT_GREATER_THAN(bigPoly24Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24Comp.Contains(shellPoly));
@@ -316,8 +292,7 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 3. BigPolygon contains shellPoly's outer ring
// Everything *not* in a 16X16 square centered at [0,0]
BigSimplePolygon bigPoly16Comp(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0)
- << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
bigPoly16Comp.Invert();
ASSERT_GREATER_THAN(bigPoly16Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly16Comp.Contains(shellPoly));
@@ -325,9 +300,9 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 4. BigPolygon contains the right half of shellPoly
// Everything *not* in a 40x40 square centered at [0,20]
- BigSimplePolygon bigPoly40CompOffset(loop(points() << LatLng(20.0, 40.0) << LatLng(20.0, 0.0)
- << LatLng(-20.0, 0.0)
- << LatLng(-20.0, 40.0)));
+ BigSimplePolygon bigPoly40CompOffset(loop(points()
+ << LatLng(20.0, 40.0) << LatLng(20.0, 0.0)
+ << LatLng(-20.0, 0.0) << LatLng(-20.0, 40.0)));
bigPoly40CompOffset.Invert();
ASSERT_GREATER_THAN(bigPoly40CompOffset.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40CompOffset.Contains(shellPoly));
@@ -335,8 +310,7 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 5. BigPolygon contain shellPoly (CW)
BigSimplePolygon bigPolyCompOffset(loop(points() << LatLng(6.0, 6.0) << LatLng(6.0, 8.0)
- << LatLng(-6.0, 8.0)
- << LatLng(-6.0, 6.0)));
+ << LatLng(-6.0, 8.0) << LatLng(-6.0, 6.0)));
ASSERT_GREATER_THAN(bigPolyCompOffset.GetArea(), 2 * M_PI);
ASSERT_TRUE(bigPolyCompOffset.Contains(shellPoly));
ASSERT_TRUE(bigPolyCompOffset.Intersects(shellPoly));
@@ -345,13 +319,11 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
TEST(BigSimplePolygon, BasicWinding) {
// A 20x20 square centered at [0,0] (CCW)
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
// Everything *not* in a 20x20 square centered at [0,0] (CW)
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(-10.0, 10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(10.0, -10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(10.0, -10.0)));
ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
@@ -360,13 +332,11 @@ TEST(BigSimplePolygon, BasicWinding) {
TEST(BigSimplePolygon, LineRelations) {
// A 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
// A 10x10 line circling [0,0]
S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0)
- << LatLng(-5.0, 5.0)));
+ << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
ASSERT(bigPoly20.Contains(line10));
@@ -386,14 +356,12 @@ TEST(BigSimplePolygon, LineRelations) {
TEST(BigSimplePolygon, LineRelationsComplement) {
// A 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
bigPoly20Comp.Invert();
// A 10x10 line circling [0,0]
S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0)
- << LatLng(-5.0, 5.0)));
+ << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly20Comp.Contains(line10));
@@ -406,8 +374,7 @@ TEST(BigSimplePolygon, LineRelationsComplement) {
// A 10x10 line circling [0,0]
S2Polyline line30(pointVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0)
- << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
ASSERT_TRUE(bigPoly20Comp.Contains(line30));
ASSERT_TRUE(bigPoly20Comp.Intersects(line30));
}
@@ -415,13 +382,11 @@ TEST(BigSimplePolygon, LineRelationsComplement) {
TEST(BigSimplePolygon, LineRelationsWinding) {
// Everything *not* in a 20x20 square centered at [0,0] (CW winding)
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(-10.0, 10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(10.0, -10.0)));
+ << LatLng(-10.0, -10.0) << LatLng(10.0, -10.0)));
// A 10x10 line circling [0,0]
S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0)
- << LatLng(-5.0, 5.0)));
+ << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly20Comp.Contains(line10));
@@ -431,13 +396,11 @@ TEST(BigSimplePolygon, LineRelationsWinding) {
TEST(BigSimplePolygon, PolarContains) {
// Square 10 degrees from the north pole [90,0]
BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0)
- << LatLng(80.0, -90.0)));
+ << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
// Square 5 degrees from the north pole [90, 0]
S2Polygon northPoly(loopVec(points() << LatLng(85.0, 0.0) << LatLng(85.0, 90.0)
- << LatLng(85.0, 180.0)
- << LatLng(85.0, -90.0)));
+ << LatLng(85.0, 180.0) << LatLng(85.0, -90.0)));
ASSERT_LESS_THAN(bigNorthPoly.GetArea(), 2 * M_PI);
ASSERT_LESS_THAN(northPoly.GetArea(), bigNorthPoly.GetArea());
@@ -448,8 +411,7 @@ TEST(BigSimplePolygon, PolarContains) {
TEST(BigSimplePolygon, PolarContainsWithHoles) {
// Square 10 degrees from the north pole [90,0]
BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0)
- << LatLng(80.0, -90.0)));
+ << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
// Square 5 degrees from the north pole [90, 0] with a concentric hole 1 degree from the
// north pole
@@ -468,8 +430,7 @@ TEST(BigSimplePolygon, PolarContainsWithHoles) {
TEST(BigSimplePolygon, PolarIntersectsWithHoles) {
// Square 10 degrees from the north pole [90,0]
BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0)
- << LatLng(80.0, -90.0)));
+ << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
// 5-degree square with 1-degree-wide concentric hole, centered on [80.0, 0.0]
vector<S2Loop*> loops;
@@ -512,8 +473,7 @@ void checkConsistency(const BigSimplePolygon& bigPoly,
TEST(BigSimplePolygon, ShareEdgeDisjoint) {
// Big polygon smaller than a hemisphere.
BigSimplePolygon bigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0)
- << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0) << LatLng(80.0, 90.0)));
ASSERT_LESS_THAN(bigPoly.GetArea(), 2 * M_PI);
// Vertex point and collinear point
@@ -522,12 +482,10 @@ TEST(BigSimplePolygon, ShareEdgeDisjoint) {
// Polygon shares one edge
S2Polygon poly(loopVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, -10.0)
- << LatLng(80.0, -10.0)));
+ << LatLng(-80.0, -10.0) << LatLng(80.0, -10.0)));
// Polygon shares a segment of one edge
S2Polygon collinearPoly(loopVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
- << LatLng(-50.0, -10.0)
- << LatLng(50.0, -10.0)));
+ << LatLng(-50.0, -10.0) << LatLng(50.0, -10.0)));
// Line
S2Polyline line(
@@ -538,12 +496,9 @@ TEST(BigSimplePolygon, ShareEdgeDisjoint) {
// Big polygon larger than a hemisphere.
BigSimplePolygon expandedBigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0)
- << LatLng(-80.0, 180.0)
- << LatLng(-80.0, -90.0)
- << LatLng(80.0, -90.0)
- << LatLng(80.0, 180.0)
- << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0) << LatLng(-80.0, 180.0)
+ << LatLng(-80.0, -90.0) << LatLng(80.0, -90.0)
+ << LatLng(80.0, 180.0) << LatLng(80.0, 90.0)));
ASSERT_GREATER_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
checkConsistency(bigPoly, expandedBigPoly, point);
@@ -571,18 +526,15 @@ TEST(BigSimplePolygon, ShareEdgeDisjoint) {
TEST(BigSimplePolygon, ShareEdgeContained) {
// Big polygon smaller than a hemisphere.
BigSimplePolygon bigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0)
- << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0) << LatLng(80.0, 90.0)));
ASSERT_LESS_THAN(bigPoly.GetArea(), 2 * M_PI);
// Polygon
S2Polygon poly(loopVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 10.0)
- << LatLng(80.0, 10.0)));
+ << LatLng(-80.0, 10.0) << LatLng(80.0, 10.0)));
// Polygon shares a segment of one edge
S2Polygon collinearPoly(loopVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
- << LatLng(-50.0, 10.0)
- << LatLng(50.0, 10.0)));
+ << LatLng(-50.0, 10.0) << LatLng(50.0, 10.0)));
// Line
S2Polyline line(
pointVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0) << LatLng(0.0, 10.0)));
@@ -592,12 +544,9 @@ TEST(BigSimplePolygon, ShareEdgeContained) {
// Big polygon larger than a hemisphere.
BigSimplePolygon expandedBigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0)
- << LatLng(-80.0, 180.0)
- << LatLng(-80.0, -90.0)
- << LatLng(80.0, -90.0)
- << LatLng(80.0, 180.0)
- << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0) << LatLng(-80.0, 180.0)
+ << LatLng(-80.0, -90.0) << LatLng(80.0, -90.0)
+ << LatLng(80.0, 180.0) << LatLng(80.0, 90.0)));
ASSERT_GREATER_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
checkConsistency(bigPoly, expandedBigPoly, poly);
@@ -616,4 +565,4 @@ TEST(BigSimplePolygon, ShareEdgeContained) {
checkConsistency(bigPoly, expandedBigPoly, line);
checkConsistency(bigPoly, expandedBigPoly, collinearLine);
}
-}
+} // namespace
diff --git a/src/mongo/db/geo/geometry_container.cpp b/src/mongo/db/geo/geometry_container.cpp
index 5b4ade3d062..97ae2533fc8 100644
--- a/src/mongo/db/geo/geometry_container.cpp
+++ b/src/mongo/db/geo/geometry_container.cpp
@@ -46,8 +46,9 @@ bool GeometryContainer::isPoint() const {
bool GeometryContainer::supportsContains() const {
return NULL != _polygon || NULL != _box || NULL != _cap || NULL != _multiPolygon ||
- (NULL != _geometryCollection && (_geometryCollection->polygons.vector().size() > 0 ||
- _geometryCollection->multiPolygons.vector().size() > 0));
+ (NULL != _geometryCollection &&
+ (_geometryCollection->polygons.vector().size() > 0 ||
+ _geometryCollection->multiPolygons.vector().size() > 0));
}
bool GeometryContainer::hasS2Region() const {
diff --git a/src/mongo/db/geo/geoparser.cpp b/src/mongo/db/geo/geoparser.cpp
index db9e68a0c25..3640d538df9 100644
--- a/src/mongo/db/geo/geoparser.cpp
+++ b/src/mongo/db/geo/geoparser.cpp
@@ -231,8 +231,7 @@ static Status parseGeoJSONPolygonCoordinates(const BSONElement& elem,
"Secondary loops not contained by first exterior loop - "
"secondary loops must be holes: "
<< coordinateElt.toString(false)
- << " first loop: "
- << elem.Obj().firstElement().toString(false));
+ << " first loop: " << elem.Obj().firstElement().toString(false));
}
}
diff --git a/src/mongo/db/geo/geoparser_test.cpp b/src/mongo/db/geo/geoparser_test.cpp
index 921ba70e6d6..01eba23667a 100644
--- a/src/mongo/db/geo/geoparser_test.cpp
+++ b/src/mongo/db/geo/geoparser_test.cpp
@@ -434,4 +434,4 @@ TEST(GeoParser, parseGeometryCollection) {
ASSERT_TRUE(gc.supportsContains());
}
}
-}
+} // namespace
diff --git a/src/mongo/db/geo/hash.cpp b/src/mongo/db/geo/hash.cpp
index f74a403f77b..c5b1a043677 100644
--- a/src/mongo/db/geo/hash.cpp
+++ b/src/mongo/db/geo/hash.cpp
@@ -667,19 +667,13 @@ Status GeoHashConverter::parseParameters(const BSONObj& paramDoc,
if (params->bits < 1 || params->bits > 32) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "bits for hash must be > 0 and <= 32, "
- << "but "
- << params->bits
- << " bits were specified");
+ << "but " << params->bits << " bits were specified");
}
if (params->min >= params->max) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "region for hash must be valid and have positive area, "
- << "but ["
- << params->min
- << ", "
- << params->max
- << "] "
+ << "but [" << params->min << ", " << params->max << "] "
<< "was specified");
}
@@ -774,8 +768,7 @@ GeoHash GeoHashConverter::hash(const BSONObj& o, const BSONObj* src) const {
GeoHash GeoHashConverter::hash(double x, double y) const {
uassert(16433,
str::stream() << "point not in interval of [ " << _params.min << ", " << _params.max
- << " ]"
- << causedBy(BSON_ARRAY(x << y).toString()),
+ << " ]" << causedBy(BSON_ARRAY(x << y).toString()),
x <= _params.max && x >= _params.min && y <= _params.max && y >= _params.min);
return GeoHash(convertToHashScale(x), convertToHashScale(y), _params.bits);
diff --git a/src/mongo/db/geo/hash_test.cpp b/src/mongo/db/geo/hash_test.cpp
index 1681803083f..288a0895d02 100644
--- a/src/mongo/db/geo/hash_test.cpp
+++ b/src/mongo/db/geo/hash_test.cpp
@@ -549,4 +549,4 @@ TEST(GeoHash, ClearUnusedBitsIsNoopIfNoBitsAreUnused) {
GeoHash other = geoHash.parent(32);
ASSERT_EQUALS(geoHash, other);
}
-}
+} // namespace
diff --git a/src/mongo/db/geo/r2_region_coverer.cpp b/src/mongo/db/geo/r2_region_coverer.cpp
index 4b170dbc1d3..2cdb65673f8 100644
--- a/src/mongo/db/geo/r2_region_coverer.cpp
+++ b/src/mongo/db/geo/r2_region_coverer.cpp
@@ -332,7 +332,7 @@ void getDifferenceInternal(GeoHash cellId,
}
}
}
-}
+} // namespace
void R2CellUnion::getDifference(const R2CellUnion& cellUnion) {
std::vector<GeoHash> diffCellIds;
diff --git a/src/mongo/db/geo/shapes.h b/src/mongo/db/geo/shapes.h
index ca400eaa829..be466668110 100644
--- a/src/mongo/db/geo/shapes.h
+++ b/src/mongo/db/geo/shapes.h
@@ -64,8 +64,9 @@ inline double rad2deg(const double rad) {
inline double computeXScanDistance(double y, double maxDistDegrees) {
// TODO: this overestimates for large maxDistDegrees far from the equator
- return maxDistDegrees / std::min(cos(deg2rad(std::min(+89.0, y + maxDistDegrees))),
- cos(deg2rad(std::max(-89.0, y - maxDistDegrees))));
+ return maxDistDegrees /
+ std::min(cos(deg2rad(std::min(+89.0, y + maxDistDegrees))),
+ cos(deg2rad(std::max(-89.0, y - maxDistDegrees))));
}
bool isValidLngLat(double lng, double lat);
diff --git a/src/mongo/db/hasher.h b/src/mongo/db/hasher.h
index 20519e6a58f..a4e86a1b5aa 100644
--- a/src/mongo/db/hasher.h
+++ b/src/mongo/db/hasher.h
@@ -71,4 +71,4 @@ public:
private:
BSONElementHasher();
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/hasher_test.cpp b/src/mongo/db/hasher_test.cpp
index 63ec64417af..fd02d21e12c 100644
--- a/src/mongo/db/hasher_test.cpp
+++ b/src/mongo/db/hasher_test.cpp
@@ -272,8 +272,7 @@ TEST(BSONElementHasher, HashString) {
TEST(BSONElementHasher, HashObject) {
BSONObj o = BSON("check" << BSON("a"
<< "abc"
- << "b"
- << 123LL));
+ << "b" << 123LL));
ASSERT_EQUALS(hashIt(o), 4771603801758380216LL);
o = BSON("check" << BSONObj());
diff --git a/src/mongo/db/index/btree_key_generator.cpp b/src/mongo/db/index/btree_key_generator.cpp
index a86843f80c6..95dbe8fd680 100644
--- a/src/mongo/db/index/btree_key_generator.cpp
+++ b/src/mongo/db/index/btree_key_generator.cpp
@@ -98,9 +98,7 @@ BSONElement BtreeKeyGenerator::_extractNextElement(const BSONObj& obj,
16746,
str::stream() << "Ambiguous field name found in array (do not use numeric field names in "
"embedded elements in an array), field: '"
- << arrField.fieldName()
- << "' for array: "
- << positionalInfo.arrayObj,
+ << arrField.fieldName() << "' for array: " << positionalInfo.arrayObj,
!haveObjField || !positionalInfo.hasPositionallyIndexedElt());
*arrayNestedArray = false;
diff --git a/src/mongo/db/index/btree_key_generator_test.cpp b/src/mongo/db/index/btree_key_generator_test.cpp
index 3301cc3c861..da569fdb203 100644
--- a/src/mongo/db/index/btree_key_generator_test.cpp
+++ b/src/mongo/db/index/btree_key_generator_test.cpp
@@ -43,9 +43,9 @@
#include "mongo/util/log.h"
using namespace mongo;
-using std::unique_ptr;
using std::cout;
using std::endl;
+using std::unique_ptr;
using std::vector;
namespace {
diff --git a/src/mongo/db/index/expression_params.cpp b/src/mongo/db/index/expression_params.cpp
index e47ef01a5e6..4dc0ebbb8d9 100644
--- a/src/mongo/db/index/expression_params.cpp
+++ b/src/mongo/db/index/expression_params.cpp
@@ -193,14 +193,8 @@ void ExpressionParams::initialize2dsphereParams(const BSONObj& infoObj,
massert(17395,
stream() << "unsupported geo index version { " << kIndexVersionFieldName << " : "
- << out->indexVersion
- << " }, only support versions: ["
- << S2_INDEX_VERSION_1
- << ","
- << S2_INDEX_VERSION_2
- << ","
- << S2_INDEX_VERSION_3
- << "]",
+ << out->indexVersion << " }, only support versions: [" << S2_INDEX_VERSION_1
+ << "," << S2_INDEX_VERSION_2 << "," << S2_INDEX_VERSION_3 << "]",
out->indexVersion == S2_INDEX_VERSION_3 || out->indexVersion == S2_INDEX_VERSION_2 ||
out->indexVersion == S2_INDEX_VERSION_1);
}
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index b118867a899..09b1b8e1c7b 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -92,8 +92,8 @@ const int TempKeyMaxSize = 1024;
// TODO SERVER-36385: Completely remove the key size check in 4.4
Status checkKeySize(const BSONObj& key) {
if (key.objsize() >= TempKeyMaxSize) {
- std::string msg = str::stream() << "Index key too large to index, failing " << key.objsize()
- << ' ' << redact(key);
+ std::string msg = str::stream()
+ << "Index key too large to index, failing " << key.objsize() << ' ' << redact(key);
return Status(ErrorCodes::KeyTooLong, msg);
}
return Status::OK();
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index f308c4251cb..fc6f6067484 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -397,8 +397,8 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx,
// other writes making up this operation are given. When index builds can cope with
// replication rollbacks, side table writes associated with a CUD operation should
// remain/rollback along with the corresponding oplog entry.
- toInsert.emplace_back(BSON(
- "op" << (op == Op::kInsert ? "i" : "d") << "key" << key << "recordId" << loc.repr()));
+ toInsert.emplace_back(BSON("op" << (op == Op::kInsert ? "i" : "d") << "key" << key
+ << "recordId" << loc.repr()));
}
if (op == Op::kInsert) {
@@ -408,9 +408,7 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx,
for (const auto& key : multikeyMetadataKeys) {
toInsert.emplace_back(BSON("op"
<< "i"
- << "key"
- << key
- << "recordId"
+ << "key" << key << "recordId"
<< static_cast<int64_t>(
RecordId::ReservedId::kWildcardMultikeyMetadataId)));
}
@@ -421,7 +419,7 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx,
// operations outside this table and in the same transaction are rolled back, this counter also
// needs to be rolled back.
opCtx->recoveryUnit()->onRollback(
- [ this, size = toInsert.size() ] { _sideWritesCounter.fetchAndSubtract(size); });
+ [this, size = toInsert.size()] { _sideWritesCounter.fetchAndSubtract(size); });
std::vector<Record> records;
for (auto& doc : toInsert) {
diff --git a/src/mongo/db/index/index_build_interceptor.h b/src/mongo/db/index/index_build_interceptor.h
index 18f98cc72cf..f8afcd4f56a 100644
--- a/src/mongo/db/index/index_build_interceptor.h
+++ b/src/mongo/db/index/index_build_interceptor.h
@@ -121,9 +121,9 @@ public:
bool areAllConstraintsChecked(OperationContext* opCtx) const;
/**
- * When an index builder wants to commit, use this to retrieve any recorded multikey paths
- * that were tracked during the build.
- */
+ * When an index builder wants to commit, use this to retrieve any recorded multikey paths
+ * that were tracked during the build.
+ */
boost::optional<MultikeyPaths> getMultikeyPaths() const;
const std::string& getSideWritesTableIdent() const;
diff --git a/src/mongo/db/index/index_descriptor.cpp b/src/mongo/db/index/index_descriptor.cpp
index e400c3c0df2..f8d70834170 100644
--- a/src/mongo/db/index/index_descriptor.cpp
+++ b/src/mongo/db/index/index_descriptor.cpp
@@ -64,7 +64,7 @@ void populateOptionsMap(std::map<StringData, BSONElement>& theMap, const BSONObj
fieldName == IndexDescriptor::kSparseFieldName || // checked specially
fieldName == IndexDescriptor::kUniqueFieldName || // check specially
fieldName == IndexDescriptor::kNamespaceFieldName // removed in 4.4
- ) {
+ ) {
continue;
}
theMap[fieldName] = e;
@@ -155,8 +155,7 @@ Status IndexDescriptor::isIndexVersionAllowedForCreation(
}
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid index specification " << indexSpec
- << "; cannot create an index with v="
- << static_cast<int>(indexVersion)};
+ << "; cannot create an index with v=" << static_cast<int>(indexVersion)};
}
IndexVersion IndexDescriptor::getDefaultIndexVersion() {
diff --git a/src/mongo/db/index/s2_access_method.cpp b/src/mongo/db/index/s2_access_method.cpp
index 731e0a14123..615838142c8 100644
--- a/src/mongo/db/index/s2_access_method.cpp
+++ b/src/mongo/db/index/s2_access_method.cpp
@@ -96,30 +96,18 @@ StatusWith<BSONObj> S2AccessMethod::fixSpec(const BSONObj& specObj) {
if (!indexVersionElt.isNumber()) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid type for geo index version { " << kIndexVersionFieldName
- << " : "
- << indexVersionElt
- << " }, only versions: ["
- << S2_INDEX_VERSION_1
- << ","
- << S2_INDEX_VERSION_2
- << ","
- << S2_INDEX_VERSION_3
- << "] are supported"};
+ << " : " << indexVersionElt << " }, only versions: ["
+ << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << ","
+ << S2_INDEX_VERSION_3 << "] are supported"};
}
if (indexVersionElt.type() == BSONType::NumberDouble &&
!std::isnormal(indexVersionElt.numberDouble())) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid value for geo index version { " << kIndexVersionFieldName
- << " : "
- << indexVersionElt
- << " }, only versions: ["
- << S2_INDEX_VERSION_1
- << ","
- << S2_INDEX_VERSION_2
- << ","
- << S2_INDEX_VERSION_3
- << "] are supported"};
+ << " : " << indexVersionElt << " }, only versions: ["
+ << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << ","
+ << S2_INDEX_VERSION_3 << "] are supported"};
}
const auto indexVersion = indexVersionElt.numberLong();
@@ -127,15 +115,9 @@ StatusWith<BSONObj> S2AccessMethod::fixSpec(const BSONObj& specObj) {
indexVersion != S2_INDEX_VERSION_3) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "unsupported geo index version { " << kIndexVersionFieldName
- << " : "
- << indexVersionElt
- << " }, only versions: ["
- << S2_INDEX_VERSION_1
- << ","
- << S2_INDEX_VERSION_2
- << ","
- << S2_INDEX_VERSION_3
- << "] are supported"};
+ << " : " << indexVersionElt << " }, only versions: ["
+ << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << ","
+ << S2_INDEX_VERSION_3 << "] are supported"};
}
return specObj;
diff --git a/src/mongo/db/index/s2_key_generator_test.cpp b/src/mongo/db/index/s2_key_generator_test.cpp
index b57a2b58c43..93fc8ac545d 100644
--- a/src/mongo/db/index/s2_key_generator_test.cpp
+++ b/src/mongo/db/index/s2_key_generator_test.cpp
@@ -99,8 +99,7 @@ void assertMultikeyPathsEqual(const MultikeyPaths& expectedMultikeyPaths,
const MultikeyPaths& actualMultikeyPaths) {
if (expectedMultikeyPaths != actualMultikeyPaths) {
FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expectedMultikeyPaths)
- << ", Actual: "
- << dumpMultikeyPaths(actualMultikeyPaths));
+ << ", Actual: " << dumpMultikeyPaths(actualMultikeyPaths));
}
}
@@ -109,13 +108,11 @@ long long getCellID(int x, int y, bool multiPoint = false) {
if (multiPoint) {
obj = BSON("a" << BSON("type"
<< "MultiPoint"
- << "coordinates"
- << BSON_ARRAY(BSON_ARRAY(x << y))));
+ << "coordinates" << BSON_ARRAY(BSON_ARRAY(x << y))));
} else {
obj = BSON("a" << BSON("type"
<< "Point"
- << "coordinates"
- << BSON_ARRAY(x << y)));
+ << "coordinates" << BSON_ARRAY(x << y)));
}
BSONObj keyPattern = fromjson("{a: '2dsphere'}");
BSONObj infoObj = fromjson("{key: {a: '2dsphere'}, '2dsphereIndexVersion': 3}");
@@ -244,8 +241,7 @@ TEST(S2KeyGeneratorTest, CollationAppliedToNonGeoStringFieldBeforeGeoField) {
BSONObjSet expectedKeys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
expectedKeys.insert(BSON(""
<< "gnirts"
- << ""
- << getCellID(0, 0)));
+ << "" << getCellID(0, 0)));
assertKeysetsEqual(expectedKeys, actualKeys);
assertMultikeyPathsEqual(MultikeyPaths{std::set<size_t>{}, std::set<size_t>{}},
@@ -267,9 +263,7 @@ TEST(S2KeyGeneratorTest, CollationAppliedToAllNonGeoStringFields) {
BSONObjSet expectedKeys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
expectedKeys.insert(BSON(""
<< "gnirts"
- << ""
- << getCellID(0, 0)
- << ""
+ << "" << getCellID(0, 0) << ""
<< "2gnirts"));
assertKeysetsEqual(expectedKeys, actualKeys);
@@ -389,8 +383,9 @@ TEST(S2KeyGeneratorTest, CollationAppliedToStringsInNestedObjects) {
ExpressionKeysPrivate::getS2Keys(obj, keyPattern, params, &actualKeys, &actualMultikeyPaths);
BSONObjSet expectedKeys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
- expectedKeys.insert(BSON("" << getCellID(0, 0) << "" << BSON("c"
- << "gnirts")));
+ expectedKeys.insert(BSON("" << getCellID(0, 0) << ""
+ << BSON("c"
+ << "gnirts")));
assertKeysetsEqual(expectedKeys, actualKeys);
assertMultikeyPathsEqual(MultikeyPaths{std::set<size_t>{}, std::set<size_t>{}},
diff --git a/src/mongo/db/index/sort_key_generator_test.cpp b/src/mongo/db/index/sort_key_generator_test.cpp
index 1ec25d713a8..485a6a3d0a5 100644
--- a/src/mongo/db/index/sort_key_generator_test.cpp
+++ b/src/mongo/db/index/sort_key_generator_test.cpp
@@ -147,8 +147,7 @@ DEATH_TEST(SortKeyGeneratorTest,
MONGO_COMPILER_VARIABLE_UNUSED auto ignored =
stdx::make_unique<SortKeyGenerator>(BSON("a" << BSON("$meta"
<< "textScore"
- << "extra"
- << 1)),
+ << "extra" << 1)),
nullptr);
}
diff --git a/src/mongo/db/index_builder.h b/src/mongo/db/index_builder.h
index ba2fc769a25..11eeeea971c 100644
--- a/src/mongo/db/index_builder.h
+++ b/src/mongo/db/index_builder.h
@@ -114,4 +114,4 @@ private:
std::string _name; // name of this builder, not related to the index
static AtomicWord<unsigned> _indexBuildCount;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index bb4d2ae44c4..fb136350b84 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -85,8 +85,7 @@ void checkShardKeyRestrictions(OperationContext* opCtx,
const ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
uassert(ErrorCodes::CannotCreateIndex,
str::stream() << "cannot create unique index over " << newIdxKey
- << " with shard key pattern "
- << shardKeyPattern.toBSON(),
+ << " with shard key pattern " << shardKeyPattern.toBSON(),
shardKeyPattern.isUniqueIndexCompatible(newIdxKey));
}
@@ -163,9 +162,9 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::startIndexRe
for (auto& spec : specs) {
std::string name = spec.getStringField(IndexDescriptor::kIndexNameFieldName);
if (name.empty()) {
- return Status(
- ErrorCodes::CannotCreateIndex,
- str::stream() << "Cannot create an index for a spec '" << spec
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream()
+ << "Cannot create an index for a spec '" << spec
<< "' without a non-empty string value for the 'name' field");
}
indexNames.push_back(name);
@@ -378,8 +377,7 @@ void IndexBuildsCoordinator::assertNoIndexBuildInProgress() const {
stdx::unique_lock<stdx::mutex> lk(_mutex);
uassert(ErrorCodes::BackgroundOperationInProgressForDatabase,
str::stream() << "cannot perform operation: there are currently "
- << _allIndexBuilds.size()
- << " index builds running.",
+ << _allIndexBuilds.size() << " index builds running.",
_allIndexBuilds.size() == 0);
}
@@ -494,12 +492,11 @@ Status IndexBuildsCoordinator::_registerIndexBuild(
auto registeredIndexBuilds =
collIndexBuildsIt->second->getIndexBuildState(lk, name);
return Status(ErrorCodes::IndexBuildAlreadyInProgress,
- str::stream() << "There's already an index with name '" << name
- << "' being built on the collection: "
- << " ( "
- << replIndexBuildState->collectionUUID
- << " ). Index build: "
- << registeredIndexBuilds->buildUUID);
+ str::stream()
+ << "There's already an index with name '" << name
+ << "' being built on the collection: "
+ << " ( " << replIndexBuildState->collectionUUID
+ << " ). Index build: " << registeredIndexBuilds->buildUUID);
}
}
}
@@ -844,8 +841,7 @@ void IndexBuildsCoordinator::_runIndexBuildInner(OperationContext* opCtx,
}
fassert(51101,
status.withContext(str::stream() << "Index build: " << replState->buildUUID
- << "; Database: "
- << replState->dbName));
+ << "; Database: " << replState->dbName));
}
uassertStatusOK(status);
@@ -925,21 +921,13 @@ void IndexBuildsCoordinator::_buildIndex(OperationContext* opCtx,
invariant(db,
str::stream() << "Database not found after relocking. Index build: "
- << replState->buildUUID
- << ": "
- << nss
- << " ("
- << replState->collectionUUID
- << ")");
+ << replState->buildUUID << ": " << nss << " ("
+ << replState->collectionUUID << ")");
invariant(db->getCollection(opCtx, nss),
str::stream() << "Collection not found after relocking. Index build: "
- << replState->buildUUID
- << ": "
- << nss
- << " ("
- << replState->collectionUUID
- << ")");
+ << replState->buildUUID << ": " << nss << " ("
+ << replState->collectionUUID << ")");
// Perform the third and final drain after releasing a shared lock and reacquiring an
// exclusive lock on the database.
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index b0bea23afa9..470f6fe3d27 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -245,8 +245,7 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx,
return Status(ErrorCodes::IndexNotFound,
str::stream()
<< "Cannot set a new commit quorum on an index build in collection '"
- << nss
- << "' without providing any indexes.");
+ << nss << "' without providing any indexes.");
}
AutoGetCollectionForRead autoColl(opCtx, nss);
@@ -280,10 +279,9 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx,
buildState->indexNames.begin(), buildState->indexNames.end(), indexNames.begin());
if (buildState->indexNames.size() != indexNames.size() || !equal) {
return Status(ErrorCodes::IndexNotFound,
- str::stream() << "Provided indexes are not all being "
- << "built by the same index builder in collection '"
- << nss
- << "'.");
+ str::stream()
+ << "Provided indexes are not all being "
+ << "built by the same index builder in collection '" << nss << "'.");
}
// See if the new commit quorum is satisfiable.
diff --git a/src/mongo/db/index_builds_coordinator_mongod_test.cpp b/src/mongo/db/index_builds_coordinator_mongod_test.cpp
index 5dd6938f730..75e076c39ec 100644
--- a/src/mongo/db/index_builds_coordinator_mongod_test.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod_test.cpp
@@ -96,8 +96,7 @@ std::vector<BSONObj> makeSpecs(const NamespaceString& nss, std::vector<std::stri
std::vector<BSONObj> indexSpecs;
for (auto keyName : keys) {
indexSpecs.push_back(BSON("ns" << nss.toString() << "v" << 2 << "key" << BSON(keyName << 1)
- << "name"
- << (keyName + "_1")));
+ << "name" << (keyName + "_1")));
}
return indexSpecs;
}
diff --git a/src/mongo/db/initialize_server_global_state.cpp b/src/mongo/db/initialize_server_global_state.cpp
index 480016eed12..05c90bb7a65 100644
--- a/src/mongo/db/initialize_server_global_state.cpp
+++ b/src/mongo/db/initialize_server_global_state.cpp
@@ -213,8 +213,8 @@ MONGO_INITIALIZER_GENERAL(
("default"))
(InitializerContext*) {
using logger::LogManager;
- using logger::MessageEventEphemeral;
using logger::MessageEventDetailsEncoder;
+ using logger::MessageEventEphemeral;
using logger::MessageEventWithContextEncoder;
using logger::MessageLogDomain;
using logger::RotatableFileAppender;
@@ -254,8 +254,8 @@ MONGO_INITIALIZER_GENERAL(
exists = boost::filesystem::exists(absoluteLogpath);
} catch (boost::filesystem::filesystem_error& e) {
return Status(ErrorCodes::FileNotOpen,
- str::stream() << "Failed probe for \"" << absoluteLogpath << "\": "
- << e.code().message());
+ str::stream() << "Failed probe for \"" << absoluteLogpath
+ << "\": " << e.code().message());
}
if (exists) {
@@ -276,9 +276,7 @@ MONGO_INITIALIZER_GENERAL(
return Status(ErrorCodes::FileRenameFailed,
str::stream()
<< "Could not rename preexisting log file \""
- << absoluteLogpath
- << "\" to \""
- << renameTarget
+ << absoluteLogpath << "\" to \"" << renameTarget
<< "\"; run with --logappend or manually remove file: "
<< ec.message());
}
diff --git a/src/mongo/db/initialize_server_security_state.cpp b/src/mongo/db/initialize_server_security_state.cpp
index b5d660869c4..cb9c29b63bd 100644
--- a/src/mongo/db/initialize_server_security_state.cpp
+++ b/src/mongo/db/initialize_server_security_state.cpp
@@ -64,9 +64,7 @@ bool initializeServerSecurityGlobalState(ServiceContext* service) {
clusterAuthMode == ServerGlobalParams::ClusterAuthMode_sendX509) {
auth::setInternalUserAuthParams(
BSON(saslCommandMechanismFieldName
- << "MONGODB-X509"
- << saslCommandUserDBFieldName
- << "$external"
+ << "MONGODB-X509" << saslCommandUserDBFieldName << "$external"
<< saslCommandUserFieldName
<< getSSLManager()->getSSLConfiguration().clientSubjectName.toString()));
}
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index 425d100f183..ab0b749c51e 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -49,9 +49,9 @@
namespace mongo {
-using std::unique_ptr;
using std::endl;
using std::string;
+using std::unique_ptr;
namespace {
diff --git a/src/mongo/db/keypattern.cpp b/src/mongo/db/keypattern.cpp
index 67fd8b08460..43a46da79de 100644
--- a/src/mongo/db/keypattern.cpp
+++ b/src/mongo/db/keypattern.cpp
@@ -96,8 +96,7 @@ BSONObj KeyPattern::extendRangeBound(const BSONObj& bound, bool makeUpperInclusi
BSONElement patElt = pat.next();
massert(16634,
str::stream() << "field names of bound " << bound
- << " do not match those of keyPattern "
- << _pattern,
+ << " do not match those of keyPattern " << _pattern,
srcElt.fieldNameStringData() == patElt.fieldNameStringData());
newBound.append(srcElt);
}
diff --git a/src/mongo/db/keypattern_test.cpp b/src/mongo/db/keypattern_test.cpp
index 45fef6a9e6f..fbb7c4e7af6 100644
--- a/src/mongo/db/keypattern_test.cpp
+++ b/src/mongo/db/keypattern_test.cpp
@@ -142,4 +142,4 @@ TEST(KeyPattern, GlobalMinMax) {
ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a.b.c" << -1)).globalMin(), BSON("a.b.c" << MAXKEY));
ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a.b.c" << -1)).globalMax(), BSON("a.b.c" << MINKEY));
}
-}
+} // namespace
diff --git a/src/mongo/db/keys_collection_cache.cpp b/src/mongo/db/keys_collection_cache.cpp
index 20e3273af35..c97697aea41 100644
--- a/src/mongo/db/keys_collection_cache.cpp
+++ b/src/mongo/db/keys_collection_cache.cpp
@@ -106,10 +106,8 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::getKeyById(long long key
return {ErrorCodes::KeyNotFound,
str::stream() << "Cache Reader No keys found for " << _purpose
- << " that is valid for time: "
- << forThisTime.toString()
- << " with id: "
- << keyId};
+ << " that is valid for time: " << forThisTime.toString()
+ << " with id: " << keyId};
}
StatusWith<KeysCollectionDocument> KeysCollectionCache::getKey(const LogicalTime& forThisTime) {
diff --git a/src/mongo/db/keys_collection_client.h b/src/mongo/db/keys_collection_client.h
index 54ac6fedc44..debff147f53 100644
--- a/src/mongo/db/keys_collection_client.h
+++ b/src/mongo/db/keys_collection_client.h
@@ -56,8 +56,8 @@ public:
bool useMajority) = 0;
/**
- * Directly inserts a key document to the storage
- */
+ * Directly inserts a key document to the storage
+ */
virtual Status insertNewKey(OperationContext* opCtx, const BSONObj& doc) = 0;
/**
diff --git a/src/mongo/db/keys_collection_client_direct.h b/src/mongo/db/keys_collection_client_direct.h
index 9ad5dbb7490..6e96d8e94ed 100644
--- a/src/mongo/db/keys_collection_client_direct.h
+++ b/src/mongo/db/keys_collection_client_direct.h
@@ -55,8 +55,8 @@ public:
bool useMajority) override;
/**
- * Directly inserts a key document to the storage
- */
+ * Directly inserts a key document to the storage
+ */
Status insertNewKey(OperationContext* opCtx, const BSONObj& doc) override;
/**
diff --git a/src/mongo/db/keys_collection_client_sharded.h b/src/mongo/db/keys_collection_client_sharded.h
index eabd0f2051d..111948e0139 100644
--- a/src/mongo/db/keys_collection_client_sharded.h
+++ b/src/mongo/db/keys_collection_client_sharded.h
@@ -49,8 +49,8 @@ public:
bool useMajority) override;
/**
- * Directly inserts a key document to the storage
- */
+ * Directly inserts a key document to the storage
+ */
Status insertNewKey(OperationContext* opCtx, const BSONObj& doc) override;
bool supportsMajorityReads() const final {
diff --git a/src/mongo/db/log_process_details.cpp b/src/mongo/db/log_process_details.cpp
index 9435fc24485..8f7bd8cf5ba 100644
--- a/src/mongo/db/log_process_details.cpp
+++ b/src/mongo/db/log_process_details.cpp
@@ -82,4 +82,4 @@ void logProcessDetailsForLogRotate(ServiceContext* serviceContext) {
logProcessDetails();
}
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/db/logical_clock.cpp b/src/mongo/db/logical_clock.cpp
index fbd87f49421..415566094d2 100644
--- a/src/mongo/db/logical_clock.cpp
+++ b/src/mongo/db/logical_clock.cpp
@@ -50,7 +50,7 @@ bool lessThanOrEqualToMaxPossibleTime(LogicalTime time, uint64_t nTicks) {
return time.asTimestamp().getSecs() <= LogicalClock::kMaxSignedInt &&
time.asTimestamp().getInc() <= (LogicalClock::kMaxSignedInt - nTicks);
}
-}
+} // namespace
LogicalTime LogicalClock::getClusterTimeForReplicaSet(OperationContext* opCtx) {
if (getGlobalReplSettings().usingReplSets()) {
@@ -166,8 +166,7 @@ Status LogicalClock::_passesRateLimiter_inlock(LogicalTime newTime) {
return Status(ErrorCodes::ClusterTimeFailsRateLimiter,
str::stream() << "New cluster time, " << newTimeSecs
<< ", is too far from this node's wall clock time, "
- << wallClockSecs
- << ".");
+ << wallClockSecs << ".");
}
uassert(40484,
diff --git a/src/mongo/db/logical_session_cache_test.cpp b/src/mongo/db/logical_session_cache_test.cpp
index d0f41415e8c..b604d776903 100644
--- a/src/mongo/db/logical_session_cache_test.cpp
+++ b/src/mongo/db/logical_session_cache_test.cpp
@@ -349,8 +349,9 @@ TEST_F(LogicalSessionCacheTest, RefreshMatrixSessionState) {
failText << " session case failed: ";
ASSERT(sessions()->has(ids[i]) == testCases[i].inCollection)
- << failText.str() << (testCases[i].inCollection ? "session wasn't in collection"
- : "session was in collection");
+ << failText.str()
+ << (testCases[i].inCollection ? "session wasn't in collection"
+ : "session was in collection");
ASSERT((service()->matchKilled(ids[i]) != nullptr) == testCases[i].killed)
<< failText.str()
<< (testCases[i].killed ? "session wasn't killed" : "session was killed");
diff --git a/src/mongo/db/logical_session_id_test.cpp b/src/mongo/db/logical_session_id_test.cpp
index 160e718201d..4e679b3639e 100644
--- a/src/mongo/db/logical_session_id_test.cpp
+++ b/src/mongo/db/logical_session_id_test.cpp
@@ -284,14 +284,14 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SessionIdAndTransact
LogicalSessionFromClient lsid;
lsid.setId(UUID::gen());
- initializeOperationSessionInfo(
- _opCtx.get(),
- BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" << 100LL << "OtherField"
- << "TestField"),
- true,
- true,
- true,
- true);
+ initializeOperationSessionInfo(_opCtx.get(),
+ BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber"
+ << 100LL << "OtherField"
+ << "TestField"),
+ true,
+ true,
+ true,
+ true);
ASSERT(_opCtx->getLogicalSessionId());
ASSERT_EQ(lsid.getId(), _opCtx->getLogicalSessionId()->getId());
@@ -306,14 +306,14 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_IsReplSetMemberOrMon
lsid.setId(UUID::gen());
ASSERT_THROWS_CODE(
- initializeOperationSessionInfo(
- _opCtx.get(),
- BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" << 100LL << "OtherField"
- << "TestField"),
- true,
- true,
- false,
- true),
+ initializeOperationSessionInfo(_opCtx.get(),
+ BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber"
+ << 100LL << "OtherField"
+ << "TestField"),
+ true,
+ true,
+ false,
+ true),
AssertionException,
ErrorCodes::IllegalOperation);
}
@@ -324,14 +324,14 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SupportsDocLockingFa
lsid.setId(UUID::gen());
ASSERT_THROWS_CODE(
- initializeOperationSessionInfo(
- _opCtx.get(),
- BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" << 100LL << "OtherField"
- << "TestField"),
- true,
- true,
- true,
- false),
+ initializeOperationSessionInfo(_opCtx.get(),
+ BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber"
+ << 100LL << "OtherField"
+ << "TestField"),
+ true,
+ true,
+ true,
+ false),
AssertionException,
ErrorCodes::IllegalOperation);
}
diff --git a/src/mongo/db/logical_time_test.cpp b/src/mongo/db/logical_time_test.cpp
index a03497b416f..19c3d5832b5 100644
--- a/src/mongo/db/logical_time_test.cpp
+++ b/src/mongo/db/logical_time_test.cpp
@@ -28,8 +28,8 @@
*/
-#include "mongo/db/logical_time.h"
#include "mongo/bson/timestamp.h"
+#include "mongo/db/logical_time.h"
#include "mongo/db/signed_logical_time.h"
#include "mongo/db/time_proof_service.h"
#include "mongo/platform/basic.h"
@@ -119,10 +119,10 @@ TEST(LogicalTime, appendAsOperationTime) {
}
TEST(LogicalTime, fromOperationTime) {
- const auto actualTime = LogicalTime::fromOperationTime(BSON("someOtherCommandParameter"
- << "Value"
- << "operationTime"
- << Timestamp(1)));
+ const auto actualTime =
+ LogicalTime::fromOperationTime(BSON("someOtherCommandParameter"
+ << "Value"
+ << "operationTime" << Timestamp(1)));
ASSERT_EQ(LogicalTime(Timestamp(1)), actualTime);
}
diff --git a/src/mongo/db/matcher/expression.cpp b/src/mongo/db/matcher/expression.cpp
index 364ebdd68d7..649eb1a6e77 100644
--- a/src/mongo/db/matcher/expression.cpp
+++ b/src/mongo/db/matcher/expression.cpp
@@ -95,4 +95,4 @@ void MatchExpression::addDependencies(DepsTracker* deps) const {
_doAddDependencies(deps);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression.h b/src/mongo/db/matcher/expression.h
index e3e3d652b4a..de5878c115c 100644
--- a/src/mongo/db/matcher/expression.h
+++ b/src/mongo/db/matcher/expression.h
@@ -365,4 +365,4 @@ private:
MatchType _matchType;
std::unique_ptr<TagData> _tagData;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_array.cpp b/src/mongo/db/matcher/expression_array.cpp
index af7205ef822..51a06e2b81c 100644
--- a/src/mongo/db/matcher/expression_array.cpp
+++ b/src/mongo/db/matcher/expression_array.cpp
@@ -238,4 +238,4 @@ bool SizeMatchExpression::equivalent(const MatchExpression* other) const {
// ------------------
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_array.h b/src/mongo/db/matcher/expression_array.h
index 471549f105e..959d536632f 100644
--- a/src/mongo/db/matcher/expression_array.h
+++ b/src/mongo/db/matcher/expression_array.h
@@ -117,8 +117,8 @@ private:
class ElemMatchValueMatchExpression : public ArrayMatchingMatchExpression {
public:
/**
- * This constructor takes ownership of 'sub.'
- */
+ * This constructor takes ownership of 'sub.'
+ */
ElemMatchValueMatchExpression(StringData path, MatchExpression* sub);
explicit ElemMatchValueMatchExpression(StringData path);
virtual ~ElemMatchValueMatchExpression();
@@ -207,4 +207,4 @@ private:
int _size; // >= 0 real, < 0, nothing will match
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_geo.cpp b/src/mongo/db/matcher/expression_geo.cpp
index 99081c3e910..f52f331989e 100644
--- a/src/mongo/db/matcher/expression_geo.cpp
+++ b/src/mongo/db/matcher/expression_geo.cpp
@@ -132,8 +132,8 @@ Status GeoExpression::parseFrom(const BSONObj& obj) {
if (GeoExpression::INTERSECT == predicate) {
if (!geoContainer->supportsProject(SPHERE)) {
return Status(ErrorCodes::BadValue,
- str::stream() << "$geoIntersect not supported with provided geometry: "
- << obj);
+ str::stream()
+ << "$geoIntersect not supported with provided geometry: " << obj);
}
geoContainer->projectInto(SPHERE);
}
@@ -218,8 +218,7 @@ Status GeoNearExpression::parseNewQuery(const BSONObj& obj) {
return Status(ErrorCodes::BadValue,
str::stream()
<< "geo near accepts just one argument when querying for a GeoJSON "
- << "point. Extra field found: "
- << objIt.next());
+ << "point. Extra field found: " << objIt.next());
}
// Parse "new" near:
@@ -247,9 +246,7 @@ Status GeoNearExpression::parseNewQuery(const BSONObj& obj) {
return Status(ErrorCodes::BadValue,
str::stream()
<< "invalid point in geo near query $geometry argument: "
- << embeddedObj
- << " "
- << status.reason());
+ << embeddedObj << " " << status.reason());
}
uassert(16681,
"$near requires geojson point, given " + embeddedObj.toString(),
@@ -326,16 +323,16 @@ Status GeoNearExpression::parseFrom(const BSONObj& obj) {
//
/**
-* Takes ownership of the passed-in GeoExpression.
-*/
+ * Takes ownership of the passed-in GeoExpression.
+ */
GeoMatchExpression::GeoMatchExpression(StringData path,
const GeoExpression* query,
const BSONObj& rawObj)
: LeafMatchExpression(GEO, path), _rawObj(rawObj), _query(query), _canSkipValidation(false) {}
/**
-* Takes shared ownership of the passed-in GeoExpression.
-*/
+ * Takes shared ownership of the passed-in GeoExpression.
+ */
GeoMatchExpression::GeoMatchExpression(StringData path,
std::shared_ptr<const GeoExpression> query,
const BSONObj& rawObj)
@@ -467,4 +464,4 @@ std::unique_ptr<MatchExpression> GeoNearMatchExpression::shallowClone() const {
}
return std::move(next);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_geo_test.cpp b/src/mongo/db/matcher/expression_geo_test.cpp
index 56ddaa674ea..6bf40daf87a 100644
--- a/src/mongo/db/matcher/expression_geo_test.cpp
+++ b/src/mongo/db/matcher/expression_geo_test.cpp
@@ -181,4 +181,4 @@ TEST(ExpressionGeoTest, GeoNearNotEquivalent) {
gne2(makeGeoNearMatchExpression(query2));
ASSERT(!gne1->equivalent(gne2.get()));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_leaf.cpp b/src/mongo/db/matcher/expression_leaf.cpp
index 334dd0201e7..67b67dad77d 100644
--- a/src/mongo/db/matcher/expression_leaf.cpp
+++ b/src/mongo/db/matcher/expression_leaf.cpp
@@ -806,4 +806,4 @@ bool BitTestMatchExpression::equivalent(const MatchExpression* other) const {
return path() == realOther->path() && myBitPositions == otherBitPositions;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_leaf.h b/src/mongo/db/matcher/expression_leaf.h
index 931ef828d03..84d04c63a5c 100644
--- a/src/mongo/db/matcher/expression_leaf.h
+++ b/src/mongo/db/matcher/expression_leaf.h
@@ -40,7 +40,7 @@
namespace pcrecpp {
class RE;
-} // namespace pcrecpp;
+} // namespace pcrecpp
namespace mongo {
diff --git a/src/mongo/db/matcher/expression_leaf_test.cpp b/src/mongo/db/matcher/expression_leaf_test.cpp
index bb06d26b7f7..cefa46d58b7 100644
--- a/src/mongo/db/matcher/expression_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_leaf_test.cpp
@@ -1861,4 +1861,4 @@ TEST(BitTestMatchExpression, DoesNotMatchBinaryWithBitMask) {
ASSERT(banyc.matchesSingleElement(match1["a"]));
ASSERT(banyc.matchesSingleElement(match2["a"]));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_parser.cpp b/src/mongo/db/matcher/expression_parser.cpp
index ede09820502..98a64e2d4bd 100644
--- a/src/mongo/db/matcher/expression_parser.cpp
+++ b/src/mongo/db/matcher/expression_parser.cpp
@@ -255,8 +255,8 @@ StatusWithMatchExpression parse(const BSONObj& obj,
if (!parseExpressionMatchFunction) {
return {Status(ErrorCodes::BadValue,
- str::stream() << "unknown top level operator: "
- << e.fieldNameStringData())};
+ str::stream()
+ << "unknown top level operator: " << e.fieldNameStringData())};
}
auto parsedExpression = parseExpressionMatchFunction(
@@ -569,8 +569,7 @@ StatusWith<std::vector<uint32_t>> parseBitPositionsArray(const BSONObj& theArray
return Status(
ErrorCodes::BadValue,
str::stream()
- << "bit positions cannot be represented as a 32-bit signed integer: "
- << e);
+ << "bit positions cannot be represented as a 32-bit signed integer: " << e);
}
// This checks if e is integral.
@@ -589,8 +588,7 @@ StatusWith<std::vector<uint32_t>> parseBitPositionsArray(const BSONObj& theArray
return Status(
ErrorCodes::BadValue,
str::stream()
- << "bit positions cannot be represented as a 32-bit signed integer: "
- << e);
+ << "bit positions cannot be represented as a 32-bit signed integer: " << e);
}
}
@@ -635,9 +633,9 @@ StatusWithMatchExpression parseBitTest(StringData name, BSONElement e) {
auto eBinary = e.binData(eBinaryLen);
bitTestMatchExpression = stdx::make_unique<T>(name, eBinary, eBinaryLen);
} else {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << name << " takes an Array, a number, or a BinData but received: " << e);
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << name << " takes an Array, a number, or a BinData but received: " << e);
}
return {std::move(bitTestMatchExpression)};
@@ -692,8 +690,7 @@ StatusWithMatchExpression parseInternalSchemaRootDocEq(
if (elem.type() != BSONType::Object) {
return {Status(ErrorCodes::TypeMismatch,
str::stream() << InternalSchemaRootDocEqMatchExpression::kName
- << " must be an object, found type "
- << elem.type())};
+ << " must be an object, found type " << elem.type())};
}
auto rootDocEq =
stdx::make_unique<InternalSchemaRootDocEqMatchExpression>(elem.embeddedObject());
@@ -750,8 +747,7 @@ StatusWith<StringData> parseNamePlaceholder(const BSONObj& containingObject,
} else if (namePlaceholderElem.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
str::stream() << expressionName << " requires '" << namePlaceholderFieldName
- << "' to be a string, not "
- << namePlaceholderElem.type()};
+ << "' to be a string, not " << namePlaceholderElem.type()};
}
return {namePlaceholderElem.valueStringData()};
}
@@ -803,12 +799,9 @@ StatusWith<std::unique_ptr<ExpressionWithPlaceholder>> parseExprWithPlaceholder(
if (placeholder && (*placeholder != expectedPlaceholder)) {
return {ErrorCodes::FailedToParse,
str::stream() << expressionName << " expected a name placeholder of "
- << expectedPlaceholder
- << ", but '"
+ << expectedPlaceholder << ", but '"
<< exprWithPlaceholderElem.fieldNameStringData()
- << "' has a mismatching placeholder '"
- << *placeholder
- << "'"};
+ << "' has a mismatching placeholder '" << *placeholder << "'"};
}
return result;
}
@@ -1248,8 +1241,7 @@ StatusWithMatchExpression parseInternalSchemaFixedArityArgument(
if (static_cast<size_t>(inputObj.nFields()) != arity) {
return {ErrorCodes::FailedToParse,
str::stream() << elem.fieldNameStringData() << " requires exactly " << arity
- << " MatchExpressions, but got "
- << inputObj.nFields()};
+ << " MatchExpressions, but got " << inputObj.nFields()};
}
// Fill out 'expressions' with all of the parsed subexpressions contained in the array,
@@ -1320,17 +1312,16 @@ StatusWithMatchExpression parseInternalSchemaBinDataSubType(StringData name, BSO
auto valueAsInt = e.parseIntegerElementToInt();
if (!valueAsInt.isOK()) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Invalid numerical BinData subtype value for "
- << InternalSchemaBinDataSubTypeExpression::kName
- << ": "
- << e.number());
+ str::stream()
+ << "Invalid numerical BinData subtype value for "
+ << InternalSchemaBinDataSubTypeExpression::kName << ": " << e.number());
}
if (!isValidBinDataType(valueAsInt.getValue())) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << InternalSchemaBinDataSubTypeExpression::kName
- << " value must represent BinData subtype: "
- << valueAsInt.getValue());
+ str::stream()
+ << InternalSchemaBinDataSubTypeExpression::kName
+ << " value must represent BinData subtype: " << valueAsInt.getValue());
}
return {stdx::make_unique<InternalSchemaBinDataSubTypeExpression>(
diff --git a/src/mongo/db/matcher/expression_parser_array_test.cpp b/src/mongo/db/matcher/expression_parser_array_test.cpp
index 534b20f3a1e..8ead6ff5d2b 100644
--- a/src/mongo/db/matcher/expression_parser_array_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_array_test.cpp
@@ -198,16 +198,12 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef1) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db");
BSONObj query = BSON("x" << BSON("$elemMatch" << BSON("$eq" << match)));
@@ -224,16 +220,12 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef2) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db");
BSONObj query = BSON("x" << BSON("$elemMatch" << match));
@@ -251,17 +243,11 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef3) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "foo"
- << 12345);
+ << "$id" << oid << "foo" << 12345);
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "foo"
- << 12345);
+ << "$id" << oidx << "foo" << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << match));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -273,14 +259,10 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef3) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345
- << "bar"
- << 678)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
}
// Query with DBRef fields out of order.
@@ -288,22 +270,16 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef4) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db");
BSONObj matchOutOfOrder = BSON("$db"
<< "db"
- << "$id"
- << oid
- << "$ref"
+ << "$id" << oid << "$ref"
<< "coll");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db");
BSONObj query = BSON("x" << BSON("$elemMatch" << matchOutOfOrder));
@@ -322,19 +298,13 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef5) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "foo"
- << 12345);
+ << "$id" << oid << "foo" << 12345);
BSONObj matchOutOfOrder = BSON("foo" << 12345 << "$id" << oid << "$ref"
<< "coll");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "foo"
- << 12345);
+ << "$id" << oidx << "foo" << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchOutOfOrder));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -346,14 +316,10 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef5) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345
- << "bar"
- << 678)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
}
// Incomplete DBRef - $id missing.
@@ -361,20 +327,13 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef6) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "foo"
- << 12345);
+ << "$id" << oid << "foo" << 12345);
BSONObj matchMissingID = BSON("$ref"
<< "coll"
- << "foo"
- << 12345);
+ << "foo" << 12345);
BSONObj notMatch = BSON("$ref"
<< "collx"
- << "$id"
- << oid
- << "foo"
- << 12345);
+ << "$id" << oid << "foo" << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchMissingID));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -386,14 +345,10 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef6) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345
- << "bar"
- << 678)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
}
// Incomplete DBRef - $ref missing.
@@ -401,18 +356,12 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef7) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "foo"
- << 12345);
+ << "$id" << oid << "foo" << 12345);
BSONObj matchMissingRef = BSON("$id" << oid << "foo" << 12345);
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "foo"
- << 12345);
+ << "$id" << oidx << "foo" << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchMissingRef));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -424,14 +373,10 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef7) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345
- << "bar"
- << 678)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
}
// Incomplete DBRef - $db only.
@@ -439,24 +384,17 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef8) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db"
- << "foo"
- << 12345);
+ << "foo" << 12345);
BSONObj matchDBOnly = BSON("$db"
<< "db"
- << "foo"
- << 12345);
+ << "foo" << 12345);
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "dbx"
- << "foo"
- << 12345);
+ << "foo" << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchDBOnly));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -468,16 +406,12 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef8) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "$db"
- << "db"
- << "foo"
- << 12345
- << "bar"
- << 678)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db"
+ << "foo" << 12345 << "bar" << 678)))));
}
TEST(MatchExpressionParserArrayTest, All1) {
@@ -843,4 +777,4 @@ TEST(MatchExpressionParserArrayTest, AllStringCollation) {
EqualityMatchExpression* eqMatch = static_cast<EqualityMatchExpression*>(child);
ASSERT_TRUE(eqMatch->getCollator() == &collator);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_parser_leaf_test.cpp b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
index acf3ec3742b..ad76f13ff92 100644
--- a/src/mongo/db/matcher/expression_parser_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
@@ -435,9 +435,7 @@ TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
OID oid = OID::gen();
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db"))));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
@@ -446,15 +444,11 @@ TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
OID oidx = OID::gen();
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$id" << oid << "$ref"
<< "coll"
@@ -470,39 +464,28 @@ TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "dbx"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$db"
<< "db"
<< "$ref"
<< "coll"
- << "$id"
- << oid))));
+ << "$id" << oid))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db"))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db")))));
}
@@ -511,15 +494,11 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
OID oidy = OID::gen();
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "colly"
- << "$id"
- << oidy
- << "$db"
+ << "$id" << oidy << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db"))));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
@@ -528,15 +507,11 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
OID oidx = OID::gen();
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$id" << oid << "$ref"
<< "coll"
@@ -544,15 +519,11 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oidy
- << "$db"
+ << "$id" << oidy << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "colly"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$id" << oid << "$ref"
<< "coll"
@@ -560,9 +531,7 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "dbx")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$id" << oidy << "$ref"
<< "colly"
@@ -570,87 +539,59 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "colly"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "dbx")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db"))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "colly"
- << "$id"
- << oidy
- << "$db"
+ << "$id" << oidy << "$db"
<< "db"))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "colly"
- << "$id"
- << oidy
- << "$db"
+ << "$id" << oidy << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "$db"
+ << "$id" << oid << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db")
<< BSON("$ref"
<< "colly"
- << "$id"
- << oidy
- << "$db"
+ << "$id" << oidy << "$db"
<< "db")))));
}
@@ -658,10 +599,7 @@ TEST(MatchExpressionParserLeafTest, INDBRefWithOptionalField1) {
OID oid = OID::gen();
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id"
- << oid
- << "foo"
- << 12345))));
+ << "$id" << oid << "foo" << 12345))));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
@@ -669,28 +607,19 @@ TEST(MatchExpressionParserLeafTest, INDBRefWithOptionalField1) {
OID oidx = OID::gen();
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id"
- << oidx
- << "$db"
+ << "$id" << oidx << "$db"
<< "db"))));
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345)))));
- ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
- << "collx"
- << "$id"
- << oidx
- << "foo"
- << 12345)
- << BSON("$ref"
- << "coll"
- << "$id"
- << oid
- << "foo"
- << 12345)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "collx"
+ << "$id" << oidx << "foo" << 12345)
+ << BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345)))));
}
TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
@@ -704,8 +633,7 @@ TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
// second field is not $id
query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$foo"
- << 1))));
+ << "$foo" << 1))));
result = MatchExpressionParser::parse(query, expCtx);
ASSERT_NOT_OK(result.getStatus());
@@ -719,8 +647,7 @@ TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
// missing $id and $ref field
query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$db"
<< "test"
- << "foo"
- << 3))));
+ << "foo" << 3))));
result = MatchExpressionParser::parse(query, expCtx);
ASSERT_NOT_OK(result.getStatus());
}
diff --git a/src/mongo/db/matcher/expression_parser_test.cpp b/src/mongo/db/matcher/expression_parser_test.cpp
index 17e77fa2522..e60bd62ccc0 100644
--- a/src/mongo/db/matcher/expression_parser_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_test.cpp
@@ -258,8 +258,7 @@ TEST(MatchExpressionParserTest, RegexParsesSuccessfullyWithOptionsNotInline) {
TEST(MatchExpressionParserTest, RegexDoesNotParseSuccessfullyWithMultipleOptions) {
auto query = BSON("a" << BSON("$options"
<< "s"
- << "$regex"
- << BSONRegEx("/myRegex/", "i")));
+ << "$regex" << BSONRegEx("/myRegex/", "i")));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
ASSERT_NOT_OK(MatchExpressionParser::parse(query, expCtx).getStatus());
}
@@ -267,8 +266,7 @@ TEST(MatchExpressionParserTest, RegexDoesNotParseSuccessfullyWithMultipleOptions
TEST(MatchExpressionParserTest, RegexParsesSuccessfullyWithOptionsFirst) {
auto query = BSON("a" << BSON("$options"
<< "s"
- << "$regex"
- << BSONRegEx("/myRegex/", "")));
+ << "$regex" << BSONRegEx("/myRegex/", "")));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
ASSERT_OK(MatchExpressionParser::parse(query, expCtx).getStatus());
}
@@ -276,8 +274,7 @@ TEST(MatchExpressionParserTest, RegexParsesSuccessfullyWithOptionsFirst) {
TEST(MatchExpressionParserTest, RegexParsesSuccessfullyWithOptionsFirstEmptyOptions) {
auto query = BSON("a" << BSON("$options"
<< ""
- << "$regex"
- << BSONRegEx("/myRegex/", "")));
+ << "$regex" << BSONRegEx("/myRegex/", "")));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
ASSERT_OK(MatchExpressionParser::parse(query, expCtx).getStatus());
}
diff --git a/src/mongo/db/matcher/expression_parser_tree_test.cpp b/src/mongo/db/matcher/expression_parser_tree_test.cpp
index 0cc3a23f06a..9aa066b7cca 100644
--- a/src/mongo/db/matcher/expression_parser_tree_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_tree_test.cpp
@@ -116,4 +116,4 @@ TEST(MatchExpressionParserLeafTest, NotRegex1) {
ASSERT(result.getValue()->matchesBSON(BSON("x"
<< "AC")));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_text.cpp b/src/mongo/db/matcher/expression_text.cpp
index 8fdcd65befc..3bba2e76a02 100644
--- a/src/mongo/db/matcher/expression_text.cpp
+++ b/src/mongo/db/matcher/expression_text.cpp
@@ -62,16 +62,14 @@ TextMatchExpression::TextMatchExpression(OperationContext* opCtx,
uassert(ErrorCodes::IndexNotFound,
str::stream() << "text index required for $text query (no such collection '"
- << nss.ns()
- << "')",
+ << nss.ns() << "')",
db);
Collection* collection = db->getCollection(opCtx, nss);
uassert(ErrorCodes::IndexNotFound,
str::stream() << "text index required for $text query (no such collection '"
- << nss.ns()
- << "')",
+ << nss.ns() << "')",
collection);
std::vector<const IndexDescriptor*> idxMatches;
diff --git a/src/mongo/db/matcher/expression_text_base.cpp b/src/mongo/db/matcher/expression_text_base.cpp
index ea3fa147de2..08f2ade599f 100644
--- a/src/mongo/db/matcher/expression_text_base.cpp
+++ b/src/mongo/db/matcher/expression_text_base.cpp
@@ -60,10 +60,8 @@ void TextMatchExpressionBase::serialize(BSONObjBuilder* out) const {
const fts::FTSQuery& ftsQuery = getFTSQuery();
out->append("$text",
BSON("$search" << ftsQuery.getQuery() << "$language" << ftsQuery.getLanguage()
- << "$caseSensitive"
- << ftsQuery.getCaseSensitive()
- << "$diacriticSensitive"
- << ftsQuery.getDiacriticSensitive()));
+ << "$caseSensitive" << ftsQuery.getCaseSensitive()
+ << "$diacriticSensitive" << ftsQuery.getDiacriticSensitive()));
}
bool TextMatchExpressionBase::equivalent(const MatchExpression* other) const {
diff --git a/src/mongo/db/matcher/expression_tree.cpp b/src/mongo/db/matcher/expression_tree.cpp
index 2cbdb1886f0..99d1886ab3c 100644
--- a/src/mongo/db/matcher/expression_tree.cpp
+++ b/src/mongo/db/matcher/expression_tree.cpp
@@ -419,4 +419,4 @@ MatchExpression::ExpressionOptimizerFunc NotMatchExpression::getOptimizer() cons
return expression;
};
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_tree.h b/src/mongo/db/matcher/expression_tree.h
index b89efb19461..8f9d66108a9 100644
--- a/src/mongo/db/matcher/expression_tree.h
+++ b/src/mongo/db/matcher/expression_tree.h
@@ -248,4 +248,4 @@ private:
std::unique_ptr<MatchExpression> _exp;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_tree_test.cpp b/src/mongo/db/matcher/expression_tree_test.cpp
index a0770c2a4df..cb79bb139ac 100644
--- a/src/mongo/db/matcher/expression_tree_test.cpp
+++ b/src/mongo/db/matcher/expression_tree_test.cpp
@@ -321,4 +321,4 @@ TEST(NorOp, Equivalent) {
ASSERT(e1.equivalent(&e1));
ASSERT(!e1.equivalent(&e2));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_type_test.cpp b/src/mongo/db/matcher/expression_type_test.cpp
index 403a60ee9f4..89c3795b6a2 100644
--- a/src/mongo/db/matcher/expression_type_test.cpp
+++ b/src/mongo/db/matcher/expression_type_test.cpp
@@ -27,8 +27,8 @@
* it in the license file.
*/
-#include "mongo/db/matcher/expression_type.h"
#include "mongo/bson/json.h"
+#include "mongo/db/matcher/expression_type.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -314,4 +314,4 @@ TEST(InternalSchemaBinDataEncryptedTypeTest, DoesNotTraverseLeafArrays) {
}
} // namespace
-} // namepace mongo
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_where.cpp b/src/mongo/db/matcher/expression_where.cpp
index 997b673c6cc..c5a19e2f881 100644
--- a/src/mongo/db/matcher/expression_where.cpp
+++ b/src/mongo/db/matcher/expression_where.cpp
@@ -45,9 +45,9 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
using stdx::make_unique;
WhereMatchExpression::WhereMatchExpression(OperationContext* opCtx,
@@ -110,4 +110,4 @@ unique_ptr<MatchExpression> WhereMatchExpression::shallowClone() const {
}
return std::move(e);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_where_noop.cpp b/src/mongo/db/matcher/expression_where_noop.cpp
index bd1469036e4..5668e0ad661 100644
--- a/src/mongo/db/matcher/expression_where_noop.cpp
+++ b/src/mongo/db/matcher/expression_where_noop.cpp
@@ -53,4 +53,4 @@ std::unique_ptr<MatchExpression> WhereNoOpMatchExpression::shallowClone() const
}
return std::move(e);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_with_placeholder.cpp b/src/mongo/db/matcher/expression_with_placeholder.cpp
index b0e1f1d118d..d0b2e65eb9e 100644
--- a/src/mongo/db/matcher/expression_with_placeholder.cpp
+++ b/src/mongo/db/matcher/expression_with_placeholder.cpp
@@ -65,11 +65,9 @@ StatusWith<boost::optional<StringData>> parseTopLevelFieldName(MatchExpression*
if (statusWithId.getValue() && placeholder != statusWithId.getValue()) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Expected a single top-level field name, found '"
- << *placeholder
- << "' and '"
- << *statusWithId.getValue()
- << "'");
+ str::stream()
+ << "Expected a single top-level field name, found '"
+ << *placeholder << "' and '" << *statusWithId.getValue() << "'");
}
}
return placeholder;
@@ -105,8 +103,7 @@ StatusWith<std::unique_ptr<ExpressionWithPlaceholder>> ExpressionWithPlaceholder
return Status(ErrorCodes::BadValue,
str::stream() << "The top-level field name must be an alphanumeric "
"string beginning with a lowercase letter, found '"
- << *placeholder
- << "'");
+ << *placeholder << "'");
}
}
diff --git a/src/mongo/db/matcher/match_details.cpp b/src/mongo/db/matcher/match_details.cpp
index 734ba6165e2..be9c657c3d9 100644
--- a/src/mongo/db/matcher/match_details.cpp
+++ b/src/mongo/db/matcher/match_details.cpp
@@ -68,4 +68,4 @@ string MatchDetails::toString() const {
ss << "elemMatchKey: " << (_elemMatchKey ? _elemMatchKey->c_str() : "NONE") << " ";
return ss.str();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/match_details.h b/src/mongo/db/matcher/match_details.h
index 9b364b34130..aadb5552b9f 100644
--- a/src/mongo/db/matcher/match_details.h
+++ b/src/mongo/db/matcher/match_details.h
@@ -77,4 +77,4 @@ private:
bool _elemMatchKeyRequested;
std::unique_ptr<std::string> _elemMatchKey;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/matchable.cpp b/src/mongo/db/matcher/matchable.cpp
index be404399189..5c5bfa55fd3 100644
--- a/src/mongo/db/matcher/matchable.cpp
+++ b/src/mongo/db/matcher/matchable.cpp
@@ -38,4 +38,4 @@ BSONMatchableDocument::BSONMatchableDocument(const BSONObj& obj) : _obj(obj) {
}
BSONMatchableDocument::~BSONMatchableDocument() {}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/matchable.h b/src/mongo/db/matcher/matchable.h
index b0e7a601b89..062a3f28826 100644
--- a/src/mongo/db/matcher/matchable.h
+++ b/src/mongo/db/matcher/matchable.h
@@ -48,7 +48,7 @@ public:
* The neewly returned ElementIterator is allowed to keep a pointer to path.
* So the caller of this function should make sure path is in scope until
* the ElementIterator is deallocated
- */
+ */
virtual ElementIterator* allocateIterator(const ElementPath* path) const = 0;
virtual void releaseIterator(ElementIterator* iterator) const = 0;
@@ -148,4 +148,4 @@ private:
mutable BSONElementIterator _iterator;
mutable bool _iteratorUsed;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/path.cpp b/src/mongo/db/matcher/path.cpp
index efd2c69aa06..1f6c8565d78 100644
--- a/src/mongo/db/matcher/path.cpp
+++ b/src/mongo/db/matcher/path.cpp
@@ -359,4 +359,4 @@ ElementIterator::Context BSONElementIterator::next() {
_next.reset();
return x;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/path.h b/src/mongo/db/matcher/path.h
index 1c0500e104f..88d759462f5 100644
--- a/src/mongo/db/matcher/path.h
+++ b/src/mongo/db/matcher/path.h
@@ -260,4 +260,4 @@ private:
std::unique_ptr<ElementIterator> _subCursor;
std::unique_ptr<ElementPath> _subCursorPath;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/path_accepting_keyword_test.cpp b/src/mongo/db/matcher/path_accepting_keyword_test.cpp
index 2a69e76afcf..52b42e5d959 100644
--- a/src/mongo/db/matcher/path_accepting_keyword_test.cpp
+++ b/src/mongo/db/matcher/path_accepting_keyword_test.cpp
@@ -49,33 +49,42 @@ TEST(PathAcceptingKeyword, CanParseKnownMatchTypes) {
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$in" << 1).firstElement()));
ASSERT_TRUE(PathAcceptingKeyword::NOT_EQUAL ==
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$ne" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::SIZE == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$size" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::SIZE ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$size" << 1).firstElement()));
ASSERT_TRUE(PathAcceptingKeyword::ALL ==
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$all" << 1).firstElement()));
ASSERT_TRUE(PathAcceptingKeyword::NOT_IN ==
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$nin" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::EXISTS == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$exists" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::EXISTS ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$exists" << 1).firstElement()));
ASSERT_TRUE(PathAcceptingKeyword::MOD ==
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$mod" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::TYPE == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$type" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::REGEX == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$regex" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::OPTIONS == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$options" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::TYPE ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$type" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::REGEX ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$regex" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::OPTIONS ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$options" << 1).firstElement()));
ASSERT_TRUE(
PathAcceptingKeyword::ELEM_MATCH ==
MatchExpressionParser::parsePathAcceptingKeyword(BSON("$elemMatch" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::GEO_NEAR == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$near" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::GEO_NEAR == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$geoNear" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::WITHIN == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$within" << 1).firstElement()));
- ASSERT_TRUE(PathAcceptingKeyword::WITHIN == MatchExpressionParser::parsePathAcceptingKeyword(
- BSON("$geoWithin" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::GEO_NEAR ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$near" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::GEO_NEAR ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$geoNear" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::WITHIN ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$within" << 1).firstElement()));
+ ASSERT_TRUE(
+ PathAcceptingKeyword::WITHIN ==
+ MatchExpressionParser::parsePathAcceptingKeyword(BSON("$geoWithin" << 1).firstElement()));
ASSERT_TRUE(PathAcceptingKeyword::GEO_INTERSECTS ==
MatchExpressionParser::parsePathAcceptingKeyword(
BSON("$geoIntersects" << 1).firstElement()));
diff --git a/src/mongo/db/matcher/path_test.cpp b/src/mongo/db/matcher/path_test.cpp
index af7856d366a..dd0d7314ca9 100644
--- a/src/mongo/db/matcher/path_test.cpp
+++ b/src/mongo/db/matcher/path_test.cpp
@@ -566,4 +566,4 @@ TEST(SingleElementElementIterator, Simple1) {
ASSERT(!i.more());
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h b/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h
index 69b524f18f4..268bb97376e 100644
--- a/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h
+++ b/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h
@@ -43,9 +43,7 @@ public:
}
Validator getComparator() const final {
- return [strLen = strLen()](int lenWithoutNullTerm) {
- return lenWithoutNullTerm <= strLen;
- };
+ return [strLen = strLen()](int lenWithoutNullTerm) { return lenWithoutNullTerm <= strLen; };
}
std::unique_ptr<MatchExpression> shallowClone() const final {
diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h b/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h
index f3128007500..b0a7953f42d 100644
--- a/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h
+++ b/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h
@@ -43,9 +43,7 @@ public:
}
Validator getComparator() const final {
- return [strLen = strLen()](int lenWithoutNullTerm) {
- return lenWithoutNullTerm >= strLen;
- };
+ return [strLen = strLen()](int lenWithoutNullTerm) { return lenWithoutNullTerm >= strLen; };
}
std::unique_ptr<MatchExpression> shallowClone() const final {
diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp
index 13bc5c47f1c..8eb9332aed7 100644
--- a/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp
+++ b/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp
@@ -80,8 +80,7 @@ TEST(InternalSchemaObjectMatchExpression, AcceptsObjectsThatMatch) {
<< "string"))));
ASSERT_TRUE(objMatch.matchesBSON(BSON("a" << BSON("b"
<< "string"
- << "c"
- << 1))));
+ << "c" << 1))));
ASSERT_FALSE(
objMatch.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 1) << BSON("b"
<< "string")))));
diff --git a/src/mongo/db/matcher/schema/json_pointer_test.cpp b/src/mongo/db/matcher/schema/json_pointer_test.cpp
index aed92b17784..f91d8888719 100644
--- a/src/mongo/db/matcher/schema/json_pointer_test.cpp
+++ b/src/mongo/db/matcher/schema/json_pointer_test.cpp
@@ -49,9 +49,8 @@ void assertPointerEvaluatesTo(std::string pointerStr,
}
TEST(JSONPointerTest, ParseInterestingCharacterFields) {
- BSONObj obj = BSON(
- "" << 1 << "c%d" << 2 << "e^f" << 3 << "g|h" << 4 << "i\\\\j" << 5 << "k\"l" << 6 << " "
- << 7);
+ BSONObj obj = BSON("" << 1 << "c%d" << 2 << "e^f" << 3 << "g|h" << 4 << "i\\\\j" << 5 << "k\"l"
+ << 6 << " " << 7);
assertPointerEvaluatesTo("/", obj, "", 1);
assertPointerEvaluatesTo("/c%d", obj, "c%d", 2);
assertPointerEvaluatesTo("/e^f", obj, "e^f", 3);
@@ -129,9 +128,8 @@ TEST(JSONPointerTest, ArrayTraversalTest) {
<< "value2")
<< BSON("builder3"
<< "value3"));
- auto topLevel =
- BSON("transit" << BSON("arrBottom" << arrBottom) << "arrTop" << arrTop << "toBSONArray"
- << bsonArray);
+ auto topLevel = BSON("transit" << BSON("arrBottom" << arrBottom) << "arrTop" << arrTop
+ << "toBSONArray" << bsonArray);
assertPointerEvaluatesTo("/transit/arrBottom/0", topLevel, "0", 0);
assertPointerEvaluatesTo("/toBSONArray/0/builder0", topLevel, "builder0", "value0");
assertPointerEvaluatesTo("/toBSONArray/3/builder3", topLevel, "builder3", "value3");
diff --git a/src/mongo/db/matcher/schema/json_schema_parser.cpp b/src/mongo/db/matcher/schema/json_schema_parser.cpp
index 6f0d6e6c947..72ed89bf3f6 100644
--- a/src/mongo/db/matcher/schema/json_schema_parser.cpp
+++ b/src/mongo/db/matcher/schema/json_schema_parser.cpp
@@ -71,7 +71,12 @@ namespace {
// Explicitly unsupported JSON Schema keywords.
const std::set<StringData> unsupportedKeywords{
- "$ref"_sd, "$schema"_sd, "default"_sd, "definitions"_sd, "format"_sd, "id"_sd,
+ "$ref"_sd,
+ "$schema"_sd,
+ "default"_sd,
+ "definitions"_sd,
+ "format"_sd,
+ "id"_sd,
};
constexpr StringData kNamePlaceholder = "i"_sd;
@@ -173,9 +178,9 @@ StatusWithMatchExpression parseMaximum(StringData path,
bool isExclusiveMaximum) {
if (!maximum.isNumber()) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaMaximumKeyword
- << "' must be a number")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMaximumKeyword
+ << "' must be a number")};
}
if (path.empty()) {
@@ -201,9 +206,9 @@ StatusWithMatchExpression parseMinimum(StringData path,
bool isExclusiveMinimum) {
if (!minimum.isNumber()) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaMinimumKeyword
- << "' must be a number")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMinimumKeyword
+ << "' must be a number")};
}
if (path.empty()) {
@@ -249,9 +254,9 @@ StatusWithMatchExpression parsePattern(StringData path,
InternalSchemaTypeExpression* typeExpr) {
if (pattern.type() != BSONType::String) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaPatternKeyword
- << "' must be a string")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaPatternKeyword
+ << "' must be a string")};
}
if (path.empty()) {
@@ -271,16 +276,16 @@ StatusWithMatchExpression parseMultipleOf(StringData path,
InternalSchemaTypeExpression* typeExpr) {
if (!multipleOf.isNumber()) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaMultipleOfKeyword
- << "' must be a number")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMultipleOfKeyword
+ << "' must be a number")};
}
if (multipleOf.numberDecimal().isNegative() || multipleOf.numberDecimal().isZero()) {
return {Status(ErrorCodes::FailedToParse,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaMultipleOfKeyword
- << "' must have a positive value")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMultipleOfKeyword
+ << "' must have a positive value")};
}
if (path.empty()) {
return {stdx::make_unique<AlwaysTrueMatchExpression>()};
@@ -405,7 +410,7 @@ StatusWith<StringDataSet> parseRequired(BSONElement requiredElt) {
<< propertyName.type()};
}
- const auto[it, didInsert] = properties.insert(propertyName.valueStringData());
+ const auto [it, didInsert] = properties.insert(propertyName.valueStringData());
if (!didInsert) {
return {ErrorCodes::FailedToParse,
str::stream() << "$jsonSchema keyword '"
@@ -458,9 +463,9 @@ StatusWithMatchExpression parseProperties(const boost::intrusive_ptr<ExpressionC
bool ignoreUnknownKeywords) {
if (propertiesElt.type() != BSONType::Object) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaPropertiesKeyword
- << "' must be an object")};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaPropertiesKeyword
+ << "' must be an object")};
}
auto propertiesObj = propertiesElt.embeddedObject();
@@ -469,8 +474,7 @@ StatusWithMatchExpression parseProperties(const boost::intrusive_ptr<ExpressionC
if (property.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
str::stream() << "Nested schema for $jsonSchema property '"
- << property.fieldNameStringData()
- << "' must be an object"};
+ << property.fieldNameStringData() << "' must be an object"};
}
auto nestedSchemaMatch = _parse(expCtx,
@@ -532,11 +536,11 @@ StatusWith<std::vector<PatternSchema>> parsePatternProperties(
for (auto&& patternSchema : patternPropertiesElt.embeddedObject()) {
if (patternSchema.type() != BSONType::Object) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaPatternPropertiesKeyword
- << "' has property '"
- << patternSchema.fieldNameStringData()
- << "' which is not an object")};
+ str::stream()
+ << "$jsonSchema keyword '"
+ << JSONSchemaParser::kSchemaPatternPropertiesKeyword
+ << "' has property '" << patternSchema.fieldNameStringData()
+ << "' which is not an object")};
}
// Parse the nested schema using a placeholder as the path, since we intend on using the
@@ -840,11 +844,11 @@ StatusWith<boost::optional<long long>> parseItems(
for (auto subschema : itemsElt.embeddedObject()) {
if (subschema.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaItemsKeyword
- << "' requires that each element of the array is an "
- "object, but found a "
- << subschema.type()};
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaItemsKeyword
+ << "' requires that each element of the array is an "
+ "object, but found a "
+ << subschema.type()};
}
// We want to make an ExpressionWithPlaceholder for $_internalSchemaMatchArrayIndex,
@@ -895,8 +899,7 @@ StatusWith<boost::optional<long long>> parseItems(
} else {
return {ErrorCodes::TypeMismatch,
str::stream() << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaItemsKeyword
- << "' must be an array or an object, not "
- << itemsElt.type()};
+ << "' must be an array or an object, not " << itemsElt.type()};
}
return startIndexForAdditionalItems;
@@ -1267,8 +1270,7 @@ Status translateScalarKeywords(StringMap<BSONElement>& keywordMap,
return {ErrorCodes::FailedToParse,
str::stream() << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMaximumKeyword
<< "' must be a present if "
- << JSONSchemaParser::kSchemaExclusiveMaximumKeyword
- << " is present"};
+ << JSONSchemaParser::kSchemaExclusiveMaximumKeyword << " is present"};
}
if (auto minimumElt = keywordMap[JSONSchemaParser::kSchemaMinimumKeyword]) {
@@ -1294,8 +1296,7 @@ Status translateScalarKeywords(StringMap<BSONElement>& keywordMap,
return {ErrorCodes::FailedToParse,
str::stream() << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaMinimumKeyword
<< "' must be a present if "
- << JSONSchemaParser::kSchemaExclusiveMinimumKeyword
- << " is present"};
+ << JSONSchemaParser::kSchemaExclusiveMinimumKeyword << " is present"};
}
return Status::OK();
@@ -1316,19 +1317,17 @@ Status translateEncryptionKeywords(StringMap<BSONElement>& keywordMap,
expCtx->maxFeatureCompatibilityVersion <
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42) {
return Status(ErrorCodes::QueryFeatureNotAllowed,
- str::stream() << "The featureCompatiblityVersion must be 4.2 to use "
- "encryption keywords in $jsonSchema. See "
- << feature_compatibility_version_documentation::kUpgradeLink
- << ".");
+ str::stream()
+ << "The featureCompatiblityVersion must be 4.2 to use "
+ "encryption keywords in $jsonSchema. See "
+ << feature_compatibility_version_documentation::kUpgradeLink << ".");
}
if (encryptElt && encryptMetadataElt) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Cannot specify both $jsonSchema keywords '"
- << JSONSchemaParser::kSchemaEncryptKeyword
- << "' and '"
- << JSONSchemaParser::kSchemaEncryptMetadataKeyword
- << "'");
+ << JSONSchemaParser::kSchemaEncryptKeyword << "' and '"
+ << JSONSchemaParser::kSchemaEncryptMetadataKeyword << "'");
}
if (encryptMetadataElt) {
@@ -1398,9 +1397,9 @@ Status validateMetadataKeywords(StringMap<BSONElement>& keywordMap) {
if (auto titleElem = keywordMap[JSONSchemaParser::kSchemaTitleKeyword]) {
if (titleElem.type() != BSONType::String) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaTitleKeyword
- << "' must be of type string");
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaTitleKeyword
+ << "' must be of type string");
}
}
return Status::OK();
@@ -1455,16 +1454,16 @@ StatusWithMatchExpression _parse(const boost::intrusive_ptr<ExpressionContext>&
<< "' is not currently supported");
} else if (!ignoreUnknownKeywords) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Unknown $jsonSchema keyword: "
- << elt.fieldNameStringData());
+ str::stream()
+ << "Unknown $jsonSchema keyword: " << elt.fieldNameStringData());
}
continue;
}
if (it->second) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Duplicate $jsonSchema keyword: "
- << elt.fieldNameStringData());
+ str::stream()
+ << "Duplicate $jsonSchema keyword: " << elt.fieldNameStringData());
}
keywordMap[elt.fieldNameStringData()] = elt;
@@ -1481,28 +1480,24 @@ StatusWithMatchExpression _parse(const boost::intrusive_ptr<ExpressionContext>&
if (typeElem && bsonTypeElem) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Cannot specify both $jsonSchema keywords '"
- << JSONSchemaParser::kSchemaTypeKeyword
- << "' and '"
- << JSONSchemaParser::kSchemaBsonTypeKeyword
- << "'");
+ << JSONSchemaParser::kSchemaTypeKeyword << "' and '"
+ << JSONSchemaParser::kSchemaBsonTypeKeyword << "'");
} else if (typeElem && encryptElem) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaEncryptKeyword
- << "' cannot be used in conjunction with '"
- << JSONSchemaParser::kSchemaTypeKeyword
- << "', '"
- << JSONSchemaParser::kSchemaEncryptKeyword
- << "' implies type 'bsonType::BinData'");
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaEncryptKeyword
+ << "' cannot be used in conjunction with '"
+ << JSONSchemaParser::kSchemaTypeKeyword << "', '"
+ << JSONSchemaParser::kSchemaEncryptKeyword
+ << "' implies type 'bsonType::BinData'");
} else if (bsonTypeElem && encryptElem) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "$jsonSchema keyword '"
- << JSONSchemaParser::kSchemaEncryptKeyword
- << "' cannot be used in conjunction with '"
- << JSONSchemaParser::kSchemaBsonTypeKeyword
- << "', '"
- << JSONSchemaParser::kSchemaEncryptKeyword
- << "' implies type 'bsonType::BinData'");
+ str::stream()
+ << "$jsonSchema keyword '" << JSONSchemaParser::kSchemaEncryptKeyword
+ << "' cannot be used in conjunction with '"
+ << JSONSchemaParser::kSchemaBsonTypeKeyword << "', '"
+ << JSONSchemaParser::kSchemaEncryptKeyword
+ << "' implies type 'bsonType::BinData'");
}
std::unique_ptr<InternalSchemaTypeExpression> typeExpr;
@@ -1593,25 +1588,25 @@ StatusWith<MatcherTypeSet> JSONSchemaParser::parseTypeSet(BSONElement typeElt,
for (auto&& typeArrayEntry : typeElt.embeddedObject()) {
if (typeArrayEntry.type() != BSONType::String) {
return {Status(ErrorCodes::TypeMismatch,
- str::stream() << "$jsonSchema keyword '"
- << typeElt.fieldNameStringData()
- << "' array elements must be strings")};
+ str::stream()
+ << "$jsonSchema keyword '" << typeElt.fieldNameStringData()
+ << "' array elements must be strings")};
}
if (typeArrayEntry.valueStringData() == JSONSchemaParser::kSchemaTypeInteger) {
return {ErrorCodes::FailedToParse,
- str::stream() << "$jsonSchema type '"
- << JSONSchemaParser::kSchemaTypeInteger
- << "' is not currently supported."};
+ str::stream()
+ << "$jsonSchema type '" << JSONSchemaParser::kSchemaTypeInteger
+ << "' is not currently supported."};
}
auto insertionResult = aliases.insert(typeArrayEntry.valueStringData());
if (!insertionResult.second) {
- return {Status(ErrorCodes::FailedToParse,
- str::stream() << "$jsonSchema keyword '"
- << typeElt.fieldNameStringData()
- << "' has duplicate value: "
- << typeArrayEntry.valueStringData())};
+ return {
+ Status(ErrorCodes::FailedToParse,
+ str::stream()
+ << "$jsonSchema keyword '" << typeElt.fieldNameStringData()
+ << "' has duplicate value: " << typeArrayEntry.valueStringData())};
}
}
}
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index 756263a6ff5..11a75108e9c 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -417,8 +417,7 @@ Status storeMongodOptions(const moe::Environment& params) {
storageGlobalParams.syncdelay > StorageGlobalParams::kMaxSyncdelaySecs) {
return Status(ErrorCodes::BadValue,
str::stream() << "syncdelay out of allowed range (0-"
- << StorageGlobalParams::kMaxSyncdelaySecs
- << "s)");
+ << StorageGlobalParams::kMaxSyncdelaySecs << "s)");
}
}
@@ -457,9 +456,9 @@ Status storeMongodOptions(const moe::Environment& params) {
if (journalCommitIntervalMs < 1 ||
journalCommitIntervalMs > StorageGlobalParams::kMaxJournalCommitIntervalMs) {
return Status(ErrorCodes::BadValue,
- str::stream() << "--journalCommitInterval out of allowed range (1-"
- << StorageGlobalParams::kMaxJournalCommitIntervalMs
- << "ms)");
+ str::stream()
+ << "--journalCommitInterval out of allowed range (1-"
+ << StorageGlobalParams::kMaxJournalCommitIntervalMs << "ms)");
}
}
diff --git a/src/mongo/db/mongod_options.h b/src/mongo/db/mongod_options.h
index 62f86b51611..4ed3efa0afa 100644
--- a/src/mongo/db/mongod_options.h
+++ b/src/mongo/db/mongod_options.h
@@ -84,4 +84,4 @@ Status storeMongodOptions(const moe::Environment& params);
* Help test user for storage.dbPath config option.
*/
std::string storageDBPathDescription();
-}
+} // namespace mongo
diff --git a/src/mongo/db/multi_key_path_tracker.cpp b/src/mongo/db/multi_key_path_tracker.cpp
index d1c2c1ca293..d78271932e5 100644
--- a/src/mongo/db/multi_key_path_tracker.cpp
+++ b/src/mongo/db/multi_key_path_tracker.cpp
@@ -61,8 +61,8 @@ std::string MultikeyPathTracker::dumpMultikeyPaths(const MultikeyPaths& multikey
void MultikeyPathTracker::mergeMultikeyPaths(MultikeyPaths* toMergeInto,
const MultikeyPaths& newPaths) {
invariant(toMergeInto->size() == newPaths.size(),
- str::stream() << "toMergeInto: " << dumpMultikeyPaths(*toMergeInto) << "; newPaths: "
- << dumpMultikeyPaths(newPaths));
+ str::stream() << "toMergeInto: " << dumpMultikeyPaths(*toMergeInto)
+ << "; newPaths: " << dumpMultikeyPaths(newPaths));
for (auto idx = std::size_t(0); idx < toMergeInto->size(); ++idx) {
toMergeInto->at(idx).insert(newPaths[idx].begin(), newPaths[idx].end());
}
diff --git a/src/mongo/db/multi_key_path_tracker_test.cpp b/src/mongo/db/multi_key_path_tracker_test.cpp
index 580b69519f3..9203ff5ff4a 100644
--- a/src/mongo/db/multi_key_path_tracker_test.cpp
+++ b/src/mongo/db/multi_key_path_tracker_test.cpp
@@ -47,8 +47,7 @@ void assertMultikeyPathsAreEqual(const MultikeyPaths& actual, const MultikeyPath
if (!match) {
FAIL(str::stream() << "Expected: " << MultikeyPathTracker::dumpMultikeyPaths(expected)
<< ", "
- << "Actual: "
- << MultikeyPathTracker::dumpMultikeyPaths(actual));
+ << "Actual: " << MultikeyPathTracker::dumpMultikeyPaths(actual));
}
ASSERT(match);
}
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index 5fbb645c09c..5e87c48e59c 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -194,8 +194,8 @@ StatusWith<repl::OpTime> NamespaceString::getDropPendingNamespaceOpTime() const
long long term;
status = mongo::parseNumberFromString(opTimeStr.substr(termSeparatorIndex + 1), &term);
if (!status.isOK()) {
- return status.withContext(str::stream() << "Invalid term in drop-pending namespace: "
- << _ns);
+ return status.withContext(str::stream()
+ << "Invalid term in drop-pending namespace: " << _ns);
}
return repl::OpTime(Timestamp(Seconds(seconds), increment), term);
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index d55722595ac..6dd6e52f66e 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -534,11 +534,8 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg
if (!collElem || args.nss.ns() == collElem.String()) {
uasserted(40654,
str::stream() << "failCollectionUpdates failpoint enabled, namespace: "
- << args.nss.ns()
- << ", update: "
- << args.updateArgs.update
- << " on document with "
- << args.updateArgs.criteria);
+ << args.nss.ns() << ", update: " << args.updateArgs.update
+ << " on document with " << args.updateArgs.criteria);
}
}
@@ -1228,7 +1225,6 @@ void logCommitOrAbortForPreparedTransaction(OperationContext* opCtx,
writeConflictRetry(
opCtx, "onPreparedTransactionCommitOrAbort", NamespaceString::kRsOplogNamespace.ns(), [&] {
-
// Writes to the oplog only require a Global intent lock. Guaranteed by
// OplogSlotReserver.
invariant(opCtx->lockState()->isWriteLocked());
diff --git a/src/mongo/db/op_observer_impl_test.cpp b/src/mongo/db/op_observer_impl_test.cpp
index 147c1960e50..92f55a4ba40 100644
--- a/src/mongo/db/op_observer_impl_test.cpp
+++ b/src/mongo/db/op_observer_impl_test.cpp
@@ -120,12 +120,10 @@ TEST_F(OpObserverTest, StartIndexBuildExpectedOplogEntry) {
BSONObj specX = BSON("key" << BSON("x" << 1) << "name"
<< "x_1"
- << "v"
- << 2);
+ << "v" << 2);
BSONObj specA = BSON("key" << BSON("a" << 1) << "name"
<< "a_1"
- << "v"
- << 2);
+ << "v" << 2);
std::vector<BSONObj> specs = {specX, specA};
// Write to the oplog.
@@ -162,12 +160,10 @@ TEST_F(OpObserverTest, CommitIndexBuildExpectedOplogEntry) {
BSONObj specX = BSON("key" << BSON("x" << 1) << "name"
<< "x_1"
- << "v"
- << 2);
+ << "v" << 2);
BSONObj specA = BSON("key" << BSON("a" << 1) << "name"
<< "a_1"
- << "v"
- << 2);
+ << "v" << 2);
std::vector<BSONObj> specs = {specX, specA};
// Write to the oplog.
@@ -204,12 +200,10 @@ TEST_F(OpObserverTest, AbortIndexBuildExpectedOplogEntry) {
BSONObj specX = BSON("key" << BSON("x" << 1) << "name"
<< "x_1"
- << "v"
- << 2);
+ << "v" << 2);
BSONObj specA = BSON("key" << BSON("a" << 1) << "name"
<< "a_1"
- << "v"
- << 2);
+ << "v" << 2);
std::vector<BSONObj> specs = {specX, specA};
// Write to the oplog.
@@ -289,8 +283,7 @@ TEST_F(OpObserverTest, CollModWithCollectionOptionsAndTTLInfo) {
BSON("collectionOptions_old"
<< BSON("validationLevel" << oldCollOpts.validationLevel << "validationAction"
<< oldCollOpts.validationAction)
- << "expireAfterSeconds_old"
- << durationCount<Seconds>(ttlInfo.oldExpireAfterSeconds));
+ << "expireAfterSeconds_old" << durationCount<Seconds>(ttlInfo.oldExpireAfterSeconds));
ASSERT_BSONOBJ_EQ(o2Expected, o2);
}
@@ -392,10 +385,9 @@ TEST_F(OpObserverTest, OnRenameCollectionReturnsRenameOpTime) {
// Ensure that renameCollection fields were properly added to oplog entry.
ASSERT_EQUALS(uuid, unittest::assertGet(UUID::parse(oplogEntry["ui"])));
auto o = oplogEntry.getObjectField("o");
- auto oExpected = BSON(
- "renameCollection" << sourceNss.ns() << "to" << targetNss.ns() << "stayTemp" << stayTemp
- << "dropTarget"
- << dropTargetUuid);
+ auto oExpected =
+ BSON("renameCollection" << sourceNss.ns() << "to" << targetNss.ns() << "stayTemp"
+ << stayTemp << "dropTarget" << dropTargetUuid);
ASSERT_BSONOBJ_EQ(oExpected, o);
// Ensure that the rename optime returned is the same as the last optime in the ReplClientInfo.
@@ -424,8 +416,8 @@ TEST_F(OpObserverTest, OnRenameCollectionOmitsDropTargetFieldIfDropTargetUuidIsN
// Ensure that renameCollection fields were properly added to oplog entry.
ASSERT_EQUALS(uuid, unittest::assertGet(UUID::parse(oplogEntry["ui"])));
auto o = oplogEntry.getObjectField("o");
- auto oExpected = BSON(
- "renameCollection" << sourceNss.ns() << "to" << targetNss.ns() << "stayTemp" << stayTemp);
+ auto oExpected = BSON("renameCollection" << sourceNss.ns() << "to" << targetNss.ns()
+ << "stayTemp" << stayTemp);
ASSERT_BSONOBJ_EQ(oExpected, o);
}
@@ -734,45 +726,28 @@ TEST_F(OpObserverTransactionTest, TransactionalPrepareTest) {
checkCommonFields(oplogEntryObj);
OplogEntry oplogEntry = assertGet(OplogEntry::parse(oplogEntryObj));
auto o = oplogEntry.getObject();
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0 << "data"
- << "x"))
- << BSON("op"
- << "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1 << "data"
- << "y"))
- << BSON("op"
- << "u"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("$set" << BSON("data"
- << "y"))
- << "o2"
- << BSON("_id" << 0))
- << BSON("op"
- << "d"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0)))
- << "prepare"
- << true);
+ auto oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("_id" << 0 << "data"
+ << "x"))
+ << BSON("op"
+ << "i"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("_id" << 1 << "data"
+ << "y"))
+ << BSON("op"
+ << "u"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("$set" << BSON("data"
+ << "y"))
+ << "o2" << BSON("_id" << 0))
+ << BSON("op"
+ << "d"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("_id" << 0)))
+ << "prepare" << true);
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT(oplogEntry.shouldPrepare());
ASSERT_EQ(oplogEntry.getTimestamp(), opCtx()->recoveryUnit()->getPrepareTimestamp());
@@ -837,16 +812,11 @@ TEST_F(OpObserverTransactionTest, TransactionalPreparedCommitTest) {
checkCommonFields(oplogEntryObj);
OplogEntry oplogEntry = assertGet(OplogEntry::parse(oplogEntryObj));
auto o = oplogEntry.getObject();
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.toString()
- << "ui"
- << uuid
- << "o"
- << doc))
- << "prepare"
- << true);
+ auto oExpected = BSON(
+ "applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.toString() << "ui" << uuid << "o" << doc))
+ << "prepare" << true);
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT(oplogEntry.shouldPrepare());
}
@@ -905,16 +875,11 @@ TEST_F(OpObserverTransactionTest, TransactionalPreparedAbortTest) {
checkCommonFields(oplogEntryObj);
OplogEntry oplogEntry = assertGet(OplogEntry::parse(oplogEntryObj));
auto o = oplogEntry.getObject();
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.toString()
- << "ui"
- << uuid
- << "o"
- << doc))
- << "prepare"
- << true);
+ auto oExpected = BSON(
+ "applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.toString() << "ui" << uuid << "o" << doc))
+ << "prepare" << true);
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT(oplogEntry.shouldPrepare());
}
@@ -1159,42 +1124,27 @@ TEST_F(OpObserverTransactionTest, TransactionalInsertTest) {
checkCommonFields(oplogEntryObj);
OplogEntry oplogEntry = assertGet(OplogEntry::parse(oplogEntryObj));
auto o = oplogEntry.getObject();
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0 << "data"
- << "x"))
- << BSON("op"
- << "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1 << "data"
- << "y"))
- << BSON("op"
- << "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 2 << "data"
- << "z"))
- << BSON("op"
- << "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 3 << "data"
- << "w"))));
+ auto oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("_id" << 0 << "data"
+ << "x"))
+ << BSON("op"
+ << "i"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("_id" << 1 << "data"
+ << "y"))
+ << BSON("op"
+ << "i"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("_id" << 2 << "data"
+ << "z"))
+ << BSON("op"
+ << "i"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("_id" << 3 << "data"
+ << "w"))));
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT(!oplogEntry.shouldPrepare());
ASSERT_FALSE(oplogEntryObj.hasField("prepare"));
@@ -1236,28 +1186,19 @@ TEST_F(OpObserverTransactionTest, TransactionalUpdateTest) {
auto oplogEntry = getSingleOplogEntry(opCtx());
checkCommonFields(oplogEntry);
auto o = oplogEntry.getObjectField("o");
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "u"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("$set" << BSON("data"
- << "x"))
- << "o2"
- << BSON("_id" << 0))
- << BSON("op"
- << "u"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("$set" << BSON("data"
- << "y"))
- << "o2"
- << BSON("_id" << 1))));
+ auto oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "u"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("$set" << BSON("data"
+ << "x"))
+ << "o2" << BSON("_id" << 0))
+ << BSON("op"
+ << "u"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("$set" << BSON("data"
+ << "y"))
+ << "o2" << BSON("_id" << 1))));
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT_FALSE(oplogEntry.hasField("prepare"));
ASSERT_FALSE(oplogEntry.getBoolField("prepare"));
@@ -1292,20 +1233,12 @@ TEST_F(OpObserverTransactionTest, TransactionalDeleteTest) {
auto o = oplogEntry.getObjectField("o");
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0))
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0))
<< BSON("op"
<< "d"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 1))));
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 1))));
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT_FALSE(oplogEntry.hasField("prepare"));
ASSERT_FALSE(oplogEntry.getBoolField("prepare"));
@@ -1350,12 +1283,8 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionSingleStatementTest) {
// The implicit commit oplog entry.
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss.toString()
- << "ui"
- << uuid
- << "o"
- << BSON("_id" << 0))));
+ << "ns" << nss.toString() << "ui" << uuid
+ << "o" << BSON("_id" << 0))));
ASSERT_BSONOBJ_EQ(oExpected, oplogEntry.getObject());
}
@@ -1394,52 +1323,32 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalInsertTest) {
}
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 1)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 2)))
- << "partialTxn"
- << true);
+ << "ns" << nss2.toString() << "ui" << uuid2
+ << "o" << BSON("_id" << 2)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[2].getObject());
// This should be the implicit commit oplog entry, indicated by the absence of the 'partialTxn'
// field.
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 3)))
- << "count"
- << 4);
+ << "ns" << nss2.toString() << "ui" << uuid2
+ << "o" << BSON("_id" << 3)))
+ << "count" << 4);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[3].getObject());
}
@@ -1490,36 +1399,26 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalUpdateTest) {
expectedPrevWriteOpTime = repl::OpTime{oplogEntry.getTimestamp(), *oplogEntry.getTerm()};
}
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "u"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("$set" << BSON("data"
- << "x"))
- << "o2"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ auto oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "u"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("$set" << BSON("data"
+ << "x"))
+ << "o2" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
// This should be the implicit commit oplog entry, indicated by the absence of the 'partialTxn'
// field.
- oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "u"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("$set" << BSON("data"
- << "y"))
- << "o2"
- << BSON("_id" << 1)))
- << "count"
- << 2);
+ oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "u"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("$set" << BSON("data"
+ << "y"))
+ << "o2" << BSON("_id" << 1)))
+ << "count" << 2);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
}
@@ -1563,28 +1462,18 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalDeleteTest) {
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
// This should be the implicit commit oplog entry, indicated by the absence of the 'partialTxn'
// field.
oExpected = oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 1)))
- << "count"
- << 2);
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 1)))
+ << "count" << 2);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
}
@@ -1634,52 +1523,30 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalInsertPrepareTest) {
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 1)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 2)))
- << "partialTxn"
- << true);
+ << "ns" << nss2.toString() << "ui" << uuid2
+ << "o" << BSON("_id" << 2)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[2].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 3)))
- << "prepare"
- << true
- << "count"
- << 4);
+ << "ns" << nss2.toString() << "ui" << uuid2
+ << "o" << BSON("_id" << 3)))
+ << "prepare" << true << "count" << 4);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[3].getObject());
ASSERT_EQ(prepareOpTime.getTimestamp(), opCtx()->recoveryUnit()->getPrepareTimestamp());
@@ -1742,36 +1609,24 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalUpdatePrepareTest) {
expectedPrevWriteOpTime = repl::OpTime{oplogEntry.getTimestamp(), *oplogEntry.getTerm()};
}
- auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "u"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("$set" << BSON("data"
- << "x"))
- << "o2"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ auto oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "u"
+ << "ns" << nss1.toString() << "ui" << uuid1 << "o"
+ << BSON("$set" << BSON("data"
+ << "x"))
+ << "o2" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
- oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "u"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("$set" << BSON("data"
- << "y"))
- << "o2"
- << BSON("_id" << 1)))
- << "prepare"
- << true
- << "count"
- << 2);
+ oExpected =
+ BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "u"
+ << "ns" << nss2.toString() << "ui" << uuid2 << "o"
+ << BSON("$set" << BSON("data"
+ << "y"))
+ << "o2" << BSON("_id" << 1)))
+ << "prepare" << true << "count" << 2);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
ASSERT_EQ(prepareOpTime.getTimestamp(), opCtx()->recoveryUnit()->getPrepareTimestamp());
@@ -1831,28 +1686,16 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalDeletePrepareTest) {
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0)))
- << "partialTxn"
- << true);
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0)))
+ << "partialTxn" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "d"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 1)))
- << "prepare"
- << true
- << "count"
- << 2);
+ << "ns" << nss2.toString() << "ui" << uuid2
+ << "o" << BSON("_id" << 1)))
+ << "prepare" << true << "count" << 2);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject());
ASSERT_EQ(prepareOpTime.getTimestamp(), opCtx()->recoveryUnit()->getPrepareTimestamp());
@@ -2060,36 +1903,20 @@ TEST_F(OpObserverMultiEntryTransactionTest, UnpreparedTransactionPackingTest) {
}
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0))
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0))
<< BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1))
+ << "ns" << nss1.toString() << "ui"
+ << uuid1 << "o" << BSON("_id" << 1))
<< BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 2))
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 2))
<< BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 3))));
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 3))));
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
}
@@ -2133,38 +1960,21 @@ TEST_F(OpObserverMultiEntryTransactionTest, PreparedTransactionPackingTest) {
expectedPrevWriteOpTime = repl::OpTime{oplogEntry.getTimestamp(), *oplogEntry.getTerm()};
auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 0))
+ << "ns" << nss1.toString() << "ui" << uuid1
+ << "o" << BSON("_id" << 0))
<< BSON("op"
<< "i"
- << "ns"
- << nss1.toString()
- << "ui"
- << uuid1
- << "o"
- << BSON("_id" << 1))
+ << "ns" << nss1.toString() << "ui"
+ << uuid1 << "o" << BSON("_id" << 1))
<< BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 2))
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 2))
<< BSON("op"
<< "i"
- << "ns"
- << nss2.toString()
- << "ui"
- << uuid2
- << "o"
- << BSON("_id" << 3)))
- << "prepare"
- << true);
+ << "ns" << nss2.toString() << "ui"
+ << uuid2 << "o" << BSON("_id" << 3)))
+ << "prepare" << true);
ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject());
}
diff --git a/src/mongo/db/op_observer_util.h b/src/mongo/db/op_observer_util.h
index e3a7d195e7a..7e60c66cca8 100644
--- a/src/mongo/db/op_observer_util.h
+++ b/src/mongo/db/op_observer_util.h
@@ -42,4 +42,4 @@ BSONObj makeCreateCollCmdObj(const NamespaceString& collectionName,
BSONObj makeCollModCmdObj(const BSONObj& collModCmd,
const CollectionOptions& oldCollOptions,
boost::optional<TTLCollModInfo> ttlInfo);
-}
+} // namespace mongo
diff --git a/src/mongo/db/operation_time_tracker.cpp b/src/mongo/db/operation_time_tracker.cpp
index 9c2b6d74774..27832209b69 100644
--- a/src/mongo/db/operation_time_tracker.cpp
+++ b/src/mongo/db/operation_time_tracker.cpp
@@ -42,7 +42,7 @@ struct OperationTimeTrackerHolder {
const OperationContext::Decoration<OperationTimeTrackerHolder> OperationTimeTrackerHolder::get =
OperationContext::declareDecoration<OperationTimeTrackerHolder>();
-}
+} // namespace
std::shared_ptr<OperationTimeTracker> OperationTimeTracker::get(OperationContext* opCtx) {
auto timeTrackerHolder = OperationTimeTrackerHolder::get(opCtx);
diff --git a/src/mongo/db/ops/delete.h b/src/mongo/db/ops/delete.h
index 99ebccf0378..b26f583d460 100644
--- a/src/mongo/db/ops/delete.h
+++ b/src/mongo/db/ops/delete.h
@@ -50,4 +50,4 @@ long long deleteObjects(OperationContext* opCtx,
bool justOne,
bool god = false,
bool fromMigrate = false);
-}
+} // namespace mongo
diff --git a/src/mongo/db/ops/insert.cpp b/src/mongo/db/ops/insert.cpp
index d891c998a7c..dfc841588dc 100644
--- a/src/mongo/db/ops/insert.cpp
+++ b/src/mongo/db/ops/insert.cpp
@@ -58,9 +58,9 @@ Status validateDepth(const BSONObj& obj) {
// We're exactly at the limit, so descending to the next level would exceed
// the maximum depth.
return {ErrorCodes::Overflow,
- str::stream() << "cannot insert document because it exceeds "
- << BSONDepth::getMaxDepthForUserStorage()
- << " levels of nesting"};
+ str::stream()
+ << "cannot insert document because it exceeds "
+ << BSONDepth::getMaxDepthForUserStorage() << " levels of nesting"};
}
frames.emplace_back(elem.embeddedObject());
}
@@ -78,10 +78,8 @@ StatusWith<BSONObj> fixDocumentForInsert(ServiceContext* service, const BSONObj&
if (doc.objsize() > BSONObjMaxUserSize)
return StatusWith<BSONObj>(ErrorCodes::BadValue,
str::stream() << "object to insert too large"
- << ". size in bytes: "
- << doc.objsize()
- << ", max size: "
- << BSONObjMaxUserSize);
+ << ". size in bytes: " << doc.objsize()
+ << ", max size: " << BSONObjMaxUserSize);
auto depthStatus = validateDepth(doc);
if (!depthStatus.isOK()) {
@@ -206,11 +204,9 @@ Status userAllowedCreateNS(StringData db, StringData coll) {
if (db.size() + 1 /* dot */ + coll.size() > NamespaceString::MaxNsCollectionLen)
return Status(ErrorCodes::InvalidNamespace,
- str::stream() << "fully qualified namespace " << db << '.' << coll
- << " is too long "
- << "(max is "
- << NamespaceString::MaxNsCollectionLen
- << " bytes)");
+ str::stream()
+ << "fully qualified namespace " << db << '.' << coll << " is too long "
+ << "(max is " << NamespaceString::MaxNsCollectionLen << " bytes)");
// check spceial areas
@@ -274,4 +270,4 @@ Status userAllowedCreateNS(StringData db, StringData coll) {
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/ops/insert.h b/src/mongo/db/ops/insert.h
index ebbf9738460..8bdcbadc281 100644
--- a/src/mongo/db/ops/insert.h
+++ b/src/mongo/db/ops/insert.h
@@ -58,4 +58,4 @@ Status userAllowedWriteNS(const NamespaceString& ns);
* operations. If not, returns an error Status.
*/
Status userAllowedCreateNS(StringData db, StringData coll);
-}
+} // namespace mongo
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index a506bb88c0c..a600f37a543 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -82,8 +82,7 @@ UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest&
if (userInitiatedWritesAndNotPrimary) {
uassertStatusOK(Status(ErrorCodes::PrimarySteppedDown,
str::stream() << "Not primary while creating collection "
- << nsString
- << " during upsert"));
+ << nsString << " during upsert"));
}
WriteUnitOfWork wuow(opCtx);
collection = db->createCollection(opCtx, nsString, CollectionOptions());
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index fc39b35d0c9..685672de251 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -365,8 +365,9 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
"hangDuringBatchInsert",
[&wholeOp]() {
log() << "batch insert - hangDuringBatchInsert fail point enabled for namespace "
- << wholeOp.getNamespace() << ". Blocking "
- "until fail point is disabled.";
+ << wholeOp.getNamespace()
+ << ". Blocking "
+ "until fail point is disabled.";
},
true, // Check for interrupt periodically.
wholeOp.getNamespace());
@@ -504,7 +505,6 @@ WriteResult performInserts(OperationContext* opCtx,
durationCount<Microseconds>(curOp.elapsedTimeExcludingPauses()),
curOp.isCommand(),
curOp.getReadWriteType());
-
});
{
@@ -861,7 +861,7 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx,
"until fail point is disabled.";
},
true // Check for interrupt periodically.
- );
+ );
if (MONGO_FAIL_POINT(failAllRemoves)) {
uasserted(ErrorCodes::InternalError, "failAllRemoves failpoint active!");
}
diff --git a/src/mongo/db/ops/write_ops_parsers.cpp b/src/mongo/db/ops/write_ops_parsers.cpp
index e63dbb500d9..935139adfda 100644
--- a/src/mongo/db/ops/write_ops_parsers.cpp
+++ b/src/mongo/db/ops/write_ops_parsers.cpp
@@ -39,11 +39,11 @@
namespace mongo {
+using write_ops::Delete;
+using write_ops::DeleteOpEntry;
using write_ops::Insert;
using write_ops::Update;
-using write_ops::Delete;
using write_ops::UpdateOpEntry;
-using write_ops::DeleteOpEntry;
namespace {
@@ -51,10 +51,7 @@ template <class T>
void checkOpCountForCommand(const T& op, size_t numOps) {
uassert(ErrorCodes::InvalidLength,
str::stream() << "Write batch sizes must be between 1 and "
- << write_ops::kMaxWriteBatchSize
- << ". Got "
- << numOps
- << " operations.",
+ << write_ops::kMaxWriteBatchSize << ". Got " << numOps << " operations.",
numOps != 0 && numOps <= write_ops::kMaxWriteBatchSize);
const auto& stmtIds = op.getWriteCommandBase().getStmtIds();
diff --git a/src/mongo/db/ops/write_ops_parsers_test.cpp b/src/mongo/db/ops/write_ops_parsers_test.cpp
index e9499ecde08..b5074350ef4 100644
--- a/src/mongo/db/ops/write_ops_parsers_test.cpp
+++ b/src/mongo/db/ops/write_ops_parsers_test.cpp
@@ -44,9 +44,7 @@ TEST(CommandWriteOpsParsers, CommonFields_BypassDocumentValidation) {
for (BSONElement bypassDocumentValidation : BSON_ARRAY(true << false << 1 << 0 << 1.0 << 0.0)) {
auto cmd = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "bypassDocumentValidation"
+ << "documents" << BSON_ARRAY(BSONObj()) << "bypassDocumentValidation"
<< bypassDocumentValidation);
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
@@ -61,10 +59,7 @@ TEST(CommandWriteOpsParsers, CommonFields_Ordered) {
for (bool ordered : {true, false}) {
auto cmd = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "ordered"
- << ordered);
+ << "documents" << BSON_ARRAY(BSONObj()) << "ordered" << ordered);
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
auto op = InsertOp::parse(request);
@@ -77,14 +72,8 @@ TEST(CommandWriteOpsParsers, CommonFields_IgnoredFields) {
// These flags are ignored, so there is nothing to check other than that this doesn't throw.
auto cmd = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "maxTimeMS"
- << 1000
- << "shardVersion"
- << BSONObj()
- << "writeConcern"
- << BSONObj());
+ << "documents" << BSON_ARRAY(BSONObj()) << "maxTimeMS" << 1000 << "shardVersion"
+ << BSONObj() << "writeConcern" << BSONObj());
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
InsertOp::parse(request);
@@ -94,10 +83,7 @@ TEST(CommandWriteOpsParsers, CommonFields_IgnoredFields) {
TEST(CommandWriteOpsParsers, GarbageFieldsAtTopLevel_Body) {
auto cmd = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "GARBAGE"
- << BSON_ARRAY(BSONObj()));
+ << "documents" << BSON_ARRAY(BSONObj()) << "GARBAGE" << BSON_ARRAY(BSONObj()));
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS(InsertOp::parse(request), AssertionException);
@@ -105,12 +91,10 @@ TEST(CommandWriteOpsParsers, GarbageFieldsAtTopLevel_Body) {
}
TEST(CommandWriteOpsParsers, ErrorOnDuplicateCommonField) {
- auto cmd = BSON("insert"
- << "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "documents"
- << BSON_ARRAY(BSONObj()));
+ auto cmd =
+ BSON("insert"
+ << "bar"
+ << "documents" << BSON_ARRAY(BSONObj()) << "documents" << BSON_ARRAY(BSONObj()));
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS(InsertOp::parse(request), AssertionException);
@@ -121,9 +105,7 @@ TEST(CommandWriteOpsParsers, ErrorOnDuplicateCommonFieldBetweenBodyAndSequence)
OpMsgRequest request;
request.body = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "$db"
+ << "documents" << BSON_ARRAY(BSONObj()) << "$db"
<< "foo");
request.sequences = {{"documents",
{
@@ -134,12 +116,10 @@ TEST(CommandWriteOpsParsers, ErrorOnDuplicateCommonFieldBetweenBodyAndSequence)
}
TEST(CommandWriteOpsParsers, ErrorOnWrongSizeStmtIdsArray) {
- auto cmd = BSON("insert"
- << "bar"
- << "documents"
- << BSON_ARRAY(BSONObj() << BSONObj())
- << "stmtIds"
- << BSON_ARRAY(12));
+ auto cmd =
+ BSON("insert"
+ << "bar"
+ << "documents" << BSON_ARRAY(BSONObj() << BSONObj()) << "stmtIds" << BSON_ARRAY(12));
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS_CODE(InsertOp::parse(request), AssertionException, ErrorCodes::InvalidLength);
@@ -149,12 +129,8 @@ TEST(CommandWriteOpsParsers, ErrorOnWrongSizeStmtIdsArray) {
TEST(CommandWriteOpsParsers, ErrorOnStmtIdSpecifiedTwoWays) {
auto cmd = BSON("insert"
<< "bar"
- << "documents"
- << BSON_ARRAY(BSONObj())
- << "stmtIds"
- << BSON_ARRAY(12)
- << "stmtId"
- << 13);
+ << "documents" << BSON_ARRAY(BSONObj()) << "stmtIds" << BSON_ARRAY(12)
+ << "stmtId" << 13);
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS_CODE(
@@ -174,10 +150,10 @@ TEST(CommandWriteOpsParsers, GarbageFieldsInUpdateDoc) {
}
TEST(CommandWriteOpsParsers, GarbageFieldsInDeleteDoc) {
- auto cmd = BSON("delete"
- << "bar"
- << "deletes"
- << BSON_ARRAY(BSON("q" << BSONObj() << "limit" << 0 << "GARBAGE" << 1)));
+ auto cmd =
+ BSON("delete"
+ << "bar"
+ << "deletes" << BSON_ARRAY(BSON("q" << BSONObj() << "limit" << 0 << "GARBAGE" << 1)));
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS(DeleteOp::parse(request), AssertionException);
@@ -324,12 +300,7 @@ TEST(CommandWriteOpsParsers, Update) {
for (bool multi : {false, true}) {
auto rawUpdate =
BSON("q" << query << "u" << update << "arrayFilters" << BSON_ARRAY(arrayFilter)
- << "multi"
- << multi
- << "upsert"
- << upsert
- << "collation"
- << collation);
+ << "multi" << multi << "upsert" << upsert << "collation" << collation);
auto cmd = BSON("update" << ns.coll() << "updates" << BSON_ARRAY(rawUpdate));
for (bool seq : {false, true}) {
auto request = toOpMsg(ns.db(), cmd, seq);
@@ -365,10 +336,8 @@ TEST(CommandWriteOpsParsers, UpdateWithPipeline) {
<< "en_US");
for (bool upsert : {false, true}) {
for (bool multi : {false, true}) {
- auto rawUpdate = BSON(
- "q" << query["q"] << "u" << update["u"] << "multi" << multi << "upsert" << upsert
- << "collation"
- << collation);
+ auto rawUpdate = BSON("q" << query["q"] << "u" << update["u"] << "multi" << multi
+ << "upsert" << upsert << "collation" << collation);
auto cmd = BSON("update" << ns.coll() << "updates" << BSON_ARRAY(rawUpdate));
for (bool seq : {false, true}) {
auto request = toOpMsg(ns.db(), cmd, seq);
@@ -423,8 +392,7 @@ TEST(CommandWriteOpsParsers, RemoveErrorsWithBadLimit) {
for (BSONElement limit : BSON_ARRAY(-1 << 2 << 0.5)) {
auto cmd = BSON("delete"
<< "bar"
- << "deletes"
- << BSON_ARRAY(BSON("q" << BSONObj() << "limit" << limit)));
+ << "deletes" << BSON_ARRAY(BSON("q" << BSONObj() << "limit" << limit)));
for (bool seq : {false, true}) {
auto request = toOpMsg("foo", cmd, seq);
ASSERT_THROWS_CODE(
diff --git a/src/mongo/db/ops/write_ops_retryability.cpp b/src/mongo/db/ops/write_ops_retryability.cpp
index 32a160d433c..866385c73fe 100644
--- a/src/mongo/db/ops/write_ops_retryability.cpp
+++ b/src/mongo/db/ops/write_ops_retryability.cpp
@@ -56,11 +56,8 @@ void validateFindAndModifyRetryability(const FindAndModifyRequest& request,
40606,
str::stream() << "findAndModify retry request: " << redact(request.toBSON({}))
<< " is not compatible with previous write in the transaction of type: "
- << OpType_serializer(oplogEntry.getOpType())
- << ", oplogTs: "
- << ts.toString()
- << ", oplog: "
- << redact(oplogEntry.toBSON()),
+ << OpType_serializer(oplogEntry.getOpType()) << ", oplogTs: "
+ << ts.toString() << ", oplog: " << redact(oplogEntry.toBSON()),
request.isRemove());
uassert(40607,
str::stream() << "No pre-image available for findAndModify retry request:"
@@ -71,22 +68,16 @@ void validateFindAndModifyRetryability(const FindAndModifyRequest& request,
40608,
str::stream() << "findAndModify retry request: " << redact(request.toBSON({}))
<< " is not compatible with previous write in the transaction of type: "
- << OpType_serializer(oplogEntry.getOpType())
- << ", oplogTs: "
- << ts.toString()
- << ", oplog: "
- << redact(oplogEntry.toBSON()),
+ << OpType_serializer(oplogEntry.getOpType()) << ", oplogTs: "
+ << ts.toString() << ", oplog: " << redact(oplogEntry.toBSON()),
request.isUpsert());
} else {
uassert(
40609,
str::stream() << "findAndModify retry request: " << redact(request.toBSON({}))
<< " is not compatible with previous write in the transaction of type: "
- << OpType_serializer(oplogEntry.getOpType())
- << ", oplogTs: "
- << ts.toString()
- << ", oplog: "
- << redact(oplogEntry.toBSON()),
+ << OpType_serializer(oplogEntry.getOpType()) << ", oplogTs: "
+ << ts.toString() << ", oplog: " << redact(oplogEntry.toBSON()),
opType == repl::OpTypeEnum::kUpdate);
if (request.shouldReturnNew()) {
@@ -94,18 +85,14 @@ void validateFindAndModifyRetryability(const FindAndModifyRequest& request,
str::stream() << "findAndModify retry request: " << redact(request.toBSON({}))
<< " wants the document after update returned, but only before "
"update document is stored, oplogTs: "
- << ts.toString()
- << ", oplog: "
- << redact(oplogEntry.toBSON()),
+ << ts.toString() << ", oplog: " << redact(oplogEntry.toBSON()),
oplogWithCorrectLinks.getPostImageOpTime());
} else {
uassert(40612,
str::stream() << "findAndModify retry request: " << redact(request.toBSON({}))
<< " wants the document before update returned, but only after "
"update document is stored, oplogTs: "
- << ts.toString()
- << ", oplog: "
- << redact(oplogEntry.toBSON()),
+ << ts.toString() << ", oplog: " << redact(oplogEntry.toBSON()),
oplogWithCorrectLinks.getPreImageOpTime());
}
}
@@ -129,8 +116,7 @@ BSONObj extractPreOrPostImage(OperationContext* opCtx, const repl::OplogEntry& o
uassert(40613,
str::stream() << "oplog no longer contains the complete write history of this "
"transaction, log with opTime "
- << opTime.toString()
- << " cannot be found",
+ << opTime.toString() << " cannot be found",
!oplogDoc.isEmpty());
auto oplogEntry = uassertStatusOK(repl::OplogEntry::parse(oplogDoc));
@@ -172,8 +158,7 @@ repl::OplogEntry getInnerNestedOplogEntry(const repl::OplogEntry& entry) {
uassert(40635,
str::stream() << "expected nested oplog entry with ts: "
<< entry.getTimestamp().toString()
- << " to have o2 field: "
- << redact(entry.toBSON()),
+ << " to have o2 field: " << redact(entry.toBSON()),
entry.getObject2());
return uassertStatusOK(repl::OplogEntry::parse(*entry.getObject2()));
}
@@ -200,10 +185,8 @@ SingleWriteResult parseOplogEntryForUpdate(const repl::OplogEntry& entry) {
str::stream() << "update retry request is not compatible with previous write in "
"the transaction of type: "
<< OpType_serializer(entry.getOpType())
- << ", oplogTs: "
- << entry.getTimestamp().toString()
- << ", oplog: "
- << redact(entry.toBSON()));
+ << ", oplogTs: " << entry.getTimestamp().toString()
+ << ", oplog: " << redact(entry.toBSON()));
}
return res;
diff --git a/src/mongo/db/ops/write_ops_retryability_test.cpp b/src/mongo/db/ops/write_ops_retryability_test.cpp
index 05c4828dae1..550744fa95c 100644
--- a/src/mongo/db/ops/write_ops_retryability_test.cpp
+++ b/src/mongo/db/ops/write_ops_retryability_test.cpp
@@ -78,15 +78,12 @@ repl::OplogEntry makeOplogEntry(repl::OpTime opTime,
}
TEST_F(WriteOpsRetryability, ParseOplogEntryForUpdate) {
- const auto entry =
- assertGet(repl::OplogEntry::parse(BSON("ts" << Timestamp(50, 10) << "t" << 1LL << "op"
- << "u"
- << "ns"
- << "a.b"
- << "o"
- << BSON("_id" << 1 << "x" << 5)
- << "o2"
- << BSON("_id" << 1))));
+ const auto entry = assertGet(repl::OplogEntry::parse(
+ BSON("ts" << Timestamp(50, 10) << "t" << 1LL << "op"
+ << "u"
+ << "ns"
+ << "a.b"
+ << "o" << BSON("_id" << 1 << "x" << 5) << "o2" << BSON("_id" << 1))));
auto res = parseOplogEntryForUpdate(entry);
@@ -120,8 +117,7 @@ TEST_F(WriteOpsRetryability, ParseOplogEntryForUpsert) {
<< "i"
<< "ns"
<< "a.b"
- << "o"
- << BSON("_id" << 1 << "x" << 5))));
+ << "o" << BSON("_id" << 1 << "x" << 5))));
auto res = parseOplogEntryForUpdate(entry);
@@ -187,8 +183,7 @@ TEST_F(FindAndModifyRetryability, BasicUpsertReturnNew) {
kNs, // namespace
BSON("_id"
<< "ID value"
- << "x"
- << 1)); // o
+ << "x" << 1)); // o
auto result = constructFindAndModifyRetryResult(opCtx(), request, insertOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject"
@@ -197,8 +192,7 @@ TEST_F(FindAndModifyRetryability, BasicUpsertReturnNew) {
<< "value"
<< BSON("_id"
<< "ID value"
- << "x"
- << 1)),
+ << "x" << 1)),
result);
}
@@ -212,15 +206,13 @@ TEST_F(FindAndModifyRetryability, BasicUpsertReturnOld) {
kNs, // namespace
BSON("_id"
<< "ID value"
- << "x"
- << 1)); // o
+ << "x" << 1)); // o
auto result = constructFindAndModifyRetryResult(opCtx(), request, insertOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject"
<< BSON("n" << 1 << "updatedExisting" << false << "upserted"
<< "ID value")
- << "value"
- << BSONNULL),
+ << "value" << BSONNULL),
result);
}
@@ -242,8 +234,7 @@ TEST_F(FindAndModifyRetryability, NestedUpsert) {
auto result = constructFindAndModifyRetryResult(opCtx(), request, insertOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject"
<< BSON("n" << 1 << "updatedExisting" << false << "upserted" << 1)
- << "value"
- << BSON("_id" << 1)),
+ << "value" << BSON("_id" << 1)),
result);
}
@@ -353,8 +344,7 @@ TEST_F(FindAndModifyRetryability, UpdateWithPreImage) {
auto result = constructFindAndModifyRetryResult(opCtx(), request, updateOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject" << BSON("n" << 1 << "updatedExisting" << true)
- << "value"
- << BSON("_id" << 1 << "z" << 1)),
+ << "value" << BSON("_id" << 1 << "z" << 1)),
result);
}
@@ -386,8 +376,7 @@ TEST_F(FindAndModifyRetryability, NestedUpdateWithPreImage) {
auto result = constructFindAndModifyRetryResult(opCtx(), request, updateOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject" << BSON("n" << 1 << "updatedExisting" << true)
- << "value"
- << BSON("_id" << 1 << "z" << 1)),
+ << "value" << BSON("_id" << 1 << "z" << 1)),
result);
}
@@ -413,8 +402,7 @@ TEST_F(FindAndModifyRetryability, UpdateWithPostImage) {
auto result = constructFindAndModifyRetryResult(opCtx(), request, updateOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject" << BSON("n" << 1 << "updatedExisting" << true)
- << "value"
- << BSON("a" << 1 << "b" << 1)),
+ << "value" << BSON("a" << 1 << "b" << 1)),
result);
}
@@ -446,8 +434,7 @@ TEST_F(FindAndModifyRetryability, NestedUpdateWithPostImage) {
auto result = constructFindAndModifyRetryResult(opCtx(), request, updateOplog);
ASSERT_BSONOBJ_EQ(BSON("lastErrorObject" << BSON("n" << 1 << "updatedExisting" << true)
- << "value"
- << BSON("a" << 1 << "b" << 1)),
+ << "value" << BSON("a" << 1 << "b" << 1)),
result);
}
diff --git a/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp b/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp
index 0f55d053fb3..1e4dbc1c303 100644
--- a/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp
+++ b/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp
@@ -108,15 +108,15 @@ void PeriodicThreadToAbortExpiredTransactions::_init(ServiceContext* serviceCont
_anchor = std::make_shared<PeriodicJobAnchor>(periodicRunner->makeJob(std::move(job)));
- TransactionParticipant::observeTransactionLifetimeLimitSeconds.addObserver([anchor = _anchor](
- const Argument& secs) {
- try {
- anchor->setPeriod(getPeriod(secs));
- } catch (const DBException& ex) {
- log() << "Failed to update period of thread which aborts expired transactions "
- << ex.toStatus();
- }
- });
+ TransactionParticipant::observeTransactionLifetimeLimitSeconds.addObserver(
+ [anchor = _anchor](const Argument& secs) {
+ try {
+ anchor->setPeriod(getPeriod(secs));
+ } catch (const DBException& ex) {
+ log() << "Failed to update period of thread which aborts expired transactions "
+ << ex.toStatus();
+ }
+ });
}
} // namespace mongo
diff --git a/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.cpp b/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.cpp
index a550f9a3624..252277130e0 100644
--- a/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.cpp
+++ b/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.cpp
@@ -92,16 +92,16 @@ void PeriodicThreadToDecreaseSnapshotHistoryIfNotNeeded::_init(ServiceContext* s
_anchor = std::make_shared<PeriodicJobAnchor>(periodicRunner->makeJob(std::move(job)));
- SnapshotWindowParams::observeCheckCachePressurePeriodSeconds.addObserver([anchor = _anchor](
- const auto& secs) {
- try {
- anchor->setPeriod(Seconds(secs));
- } catch (const DBException& ex) {
- log() << "Failed to update the period of the thread which decreases data history "
- "target window size if there have been no new SnapshotTooOld errors."
- << ex.toStatus();
- }
- });
+ SnapshotWindowParams::observeCheckCachePressurePeriodSeconds.addObserver(
+ [anchor = _anchor](const auto& secs) {
+ try {
+ anchor->setPeriod(Seconds(secs));
+ } catch (const DBException& ex) {
+ log() << "Failed to update the period of the thread which decreases data history "
+ "target window size if there have been no new SnapshotTooOld errors."
+ << ex.toStatus();
+ }
+ });
}
} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator.h b/src/mongo/db/pipeline/accumulator.h
index 801c356020f..80693dbf739 100644
--- a/src/mongo/db/pipeline/accumulator.h
+++ b/src/mongo/db/pipeline/accumulator.h
@@ -351,4 +351,4 @@ public:
private:
MutableDocument _output;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_avg.cpp b/src/mongo/db/pipeline/accumulator_avg.cpp
index 38946678389..43550e9e361 100644
--- a/src/mongo/db/pipeline/accumulator_avg.cpp
+++ b/src/mongo/db/pipeline/accumulator_avg.cpp
@@ -134,4 +134,4 @@ void AccumulatorAvg::reset() {
_decimalTotal = {};
_count = 0;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_first.cpp b/src/mongo/db/pipeline/accumulator_first.cpp
index 3e452f1d0e6..6fcc334af83 100644
--- a/src/mongo/db/pipeline/accumulator_first.cpp
+++ b/src/mongo/db/pipeline/accumulator_first.cpp
@@ -74,4 +74,4 @@ intrusive_ptr<Accumulator> AccumulatorFirst::create(
const boost::intrusive_ptr<ExpressionContext>& expCtx) {
return new AccumulatorFirst(expCtx);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_last.cpp b/src/mongo/db/pipeline/accumulator_last.cpp
index 3c667d16d53..4774abca5e9 100644
--- a/src/mongo/db/pipeline/accumulator_last.cpp
+++ b/src/mongo/db/pipeline/accumulator_last.cpp
@@ -68,4 +68,4 @@ intrusive_ptr<Accumulator> AccumulatorLast::create(
const boost::intrusive_ptr<ExpressionContext>& expCtx) {
return new AccumulatorLast(expCtx);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_merge_objects.cpp b/src/mongo/db/pipeline/accumulator_merge_objects.cpp
index 4f8ef357f35..8878ff97676 100644
--- a/src/mongo/db/pipeline/accumulator_merge_objects.cpp
+++ b/src/mongo/db/pipeline/accumulator_merge_objects.cpp
@@ -71,8 +71,7 @@ void AccumulatorMergeObjects::processInternal(const Value& input, bool merging)
uassert(40400,
str::stream() << "$mergeObjects requires object inputs, but input " << input.toString()
- << " is of type "
- << typeName(input.getType()),
+ << " is of type " << typeName(input.getType()),
(input.getType() == BSONType::Object));
FieldIterator iter = input.getDocument().fieldIterator();
diff --git a/src/mongo/db/pipeline/accumulator_min_max.cpp b/src/mongo/db/pipeline/accumulator_min_max.cpp
index d81403eac85..496d9d94220 100644
--- a/src/mongo/db/pipeline/accumulator_min_max.cpp
+++ b/src/mongo/db/pipeline/accumulator_min_max.cpp
@@ -89,4 +89,4 @@ intrusive_ptr<Accumulator> AccumulatorMax::create(
const boost::intrusive_ptr<ExpressionContext>& expCtx) {
return new AccumulatorMax(expCtx);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_push.cpp b/src/mongo/db/pipeline/accumulator_push.cpp
index becb6828635..5c1f640cef8 100644
--- a/src/mongo/db/pipeline/accumulator_push.cpp
+++ b/src/mongo/db/pipeline/accumulator_push.cpp
@@ -86,4 +86,4 @@ intrusive_ptr<Accumulator> AccumulatorPush::create(
const boost::intrusive_ptr<ExpressionContext>& expCtx) {
return new AccumulatorPush(expCtx);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator_std_dev.cpp b/src/mongo/db/pipeline/accumulator_std_dev.cpp
index a10da2a41c0..a2bce628539 100644
--- a/src/mongo/db/pipeline/accumulator_std_dev.cpp
+++ b/src/mongo/db/pipeline/accumulator_std_dev.cpp
@@ -118,4 +118,4 @@ void AccumulatorStdDev::reset() {
_mean = 0;
_m2 = 0;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/aggregation_request.cpp b/src/mongo/db/pipeline/aggregation_request.cpp
index 6cf7b38c573..5d550eb7c8b 100644
--- a/src/mongo/db/pipeline/aggregation_request.cpp
+++ b/src/mongo/db/pipeline/aggregation_request.cpp
@@ -131,8 +131,7 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(
if (elem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
str::stream() << repl::ReadConcernArgs::kReadConcernFieldName
- << " must be an object, not a "
- << typeName(elem.type())};
+ << " must be an object, not a " << typeName(elem.type())};
}
request.setReadConcern(elem.embeddedObject().getOwned());
} else if (kHintName == fieldName) {
@@ -214,8 +213,8 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(
} else if (WriteConcernOptions::kWriteConcernField == fieldName) {
if (elem.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << fieldName << " must be an object, not a "
- << typeName(elem.type())};
+ str::stream()
+ << fieldName << " must be an object, not a " << typeName(elem.type())};
}
WriteConcernOptions writeConcern;
@@ -250,23 +249,20 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(
if (!hasCursorElem && !hasExplainElem) {
return {ErrorCodes::FailedToParse,
str::stream()
- << "The '"
- << kCursorName
+ << "The '" << kCursorName
<< "' option is required, except for aggregate with the explain argument"};
}
if (request.getExplain() && cmdObj[WriteConcernOptions::kWriteConcernField]) {
return {ErrorCodes::FailedToParse,
str::stream() << "Aggregation explain does not support the'"
- << WriteConcernOptions::kWriteConcernField
- << "' option"};
+ << WriteConcernOptions::kWriteConcernField << "' option"};
}
if (hasNeedsMergeElem && !hasFromMongosElem) {
return {ErrorCodes::FailedToParse,
str::stream() << "Cannot specify '" << kNeedsMergeName << "' without '"
- << kFromMongosName
- << "'"};
+ << kFromMongosName << "'"};
}
return request;
diff --git a/src/mongo/db/pipeline/dependencies.cpp b/src/mongo/db/pipeline/dependencies.cpp
index 6bfdc19bdce..1586a68f96b 100644
--- a/src/mongo/db/pipeline/dependencies.cpp
+++ b/src/mongo/db/pipeline/dependencies.cpp
@@ -282,4 +282,4 @@ Document documentHelper(const BSONObj& bson, const Document& neededFields, int n
Document ParsedDeps::extractFields(const BSONObj& input) const {
return documentHelper(input, _fields, _nFields);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/dependencies.h b/src/mongo/db/pipeline/dependencies.h
index b7e31a6237b..3487584a4a0 100644
--- a/src/mongo/db/pipeline/dependencies.h
+++ b/src/mongo/db/pipeline/dependencies.h
@@ -205,4 +205,4 @@ private:
Document _fields;
int _nFields; // Cache the number of top-level fields needed.
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/dependencies_test.cpp b/src/mongo/db/pipeline/dependencies_test.cpp
index 2fdf25c799b..6d2741a78e4 100644
--- a/src/mongo/db/pipeline/dependencies_test.cpp
+++ b/src/mongo/db/pipeline/dependencies_test.cpp
@@ -147,8 +147,7 @@ TEST(DependenciesToProjectionTest, ShouldAttemptToExcludeOtherFieldsIfOnlyTextSc
deps.setNeedsMetadata(DepsTracker::MetadataType::TEXT_SCORE, true);
ASSERT_BSONOBJ_EQ(deps.toProjection(),
BSON(Document::metaFieldTextScore << metaTextScore << "_id" << 0
- << "$noFieldsNeeded"
- << 1));
+ << "$noFieldsNeeded" << 1));
}
TEST(DependenciesToProjectionTest,
diff --git a/src/mongo/db/pipeline/document.cpp b/src/mongo/db/pipeline/document.cpp
index 1b8d8ecb5cb..17f3110ba9d 100644
--- a/src/mongo/db/pipeline/document.cpp
+++ b/src/mongo/db/pipeline/document.cpp
@@ -288,8 +288,7 @@ BSONObjBuilder& operator<<(BSONObjBuilderValueStream& builder, const Document& d
void Document::toBson(BSONObjBuilder* builder, size_t recursionLevel) const {
uassert(ErrorCodes::Overflow,
str::stream() << "cannot convert document to BSON because it exceeds the limit of "
- << BSONDepth::getMaxAllowableDepth()
- << " levels of nesting",
+ << BSONDepth::getMaxAllowableDepth() << " levels of nesting",
recursionLevel <= BSONDepth::getMaxAllowableDepth());
for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
@@ -587,4 +586,4 @@ Document Document::deserializeForSorter(BufReader& buf, const SorterDeserializeS
return doc.freeze();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document.h b/src/mongo/db/pipeline/document.h
index f465ecce0e2..0ad560e888e 100644
--- a/src/mongo/db/pipeline/document.h
+++ b/src/mongo/db/pipeline/document.h
@@ -770,4 +770,4 @@ inline MutableValue MutableValue::getField(Position pos) {
inline MutableValue MutableValue::getField(StringData key) {
return MutableDocument(*this).getField(key);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_internal.h b/src/mongo/db/pipeline/document_internal.h
index 1d2b1b58951..c922219d000 100644
--- a/src/mongo/db/pipeline/document_internal.h
+++ b/src/mongo/db/pipeline/document_internal.h
@@ -481,4 +481,4 @@ private:
// Defined in document.cpp
static const DocumentStorage kEmptyDoc;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_add_fields.cpp b/src/mongo/db/pipeline/document_source_add_fields.cpp
index 319ef9776c6..2c05a15766a 100644
--- a/src/mongo/db/pipeline/document_source_add_fields.cpp
+++ b/src/mongo/db/pipeline/document_source_add_fields.cpp
@@ -74,4 +74,4 @@ intrusive_ptr<DocumentSource> DocumentSourceAddFields::createFromBson(
return DocumentSourceAddFields::create(elem.Obj(), expCtx, specifiedName);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_bucket.cpp b/src/mongo/db/pipeline/document_source_bucket.cpp
index e7efd9b202e..3245d21b742 100644
--- a/src/mongo/db/pipeline/document_source_bucket.cpp
+++ b/src/mongo/db/pipeline/document_source_bucket.cpp
@@ -37,8 +37,8 @@
namespace mongo {
using boost::intrusive_ptr;
-using std::vector;
using std::list;
+using std::vector;
REGISTER_MULTI_STAGE_ALIAS(bucket,
LiteParsedDocumentSourceDefault::parse,
@@ -58,8 +58,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) {
uassert(40201,
str::stream() << "Argument to $bucket stage must be an object, but found type: "
- << typeName(elem.type())
- << ".",
+ << typeName(elem.type()) << ".",
elem.type() == BSONType::Object);
const BSONObj bucketObj = elem.embeddedObject();
@@ -86,15 +85,13 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(40202,
str::stream() << "The $bucket 'groupBy' field must be defined as a $-prefixed "
"path or an expression, but found: "
- << groupByField.toString(false, false)
- << ".",
+ << groupByField.toString(false, false) << ".",
groupByIsExpressionInObject || groupByIsPrefixedPath);
} else if ("boundaries" == argName) {
uassert(
40200,
str::stream() << "The $bucket 'boundaries' field must be an array, but found type: "
- << typeName(argument.type())
- << ".",
+ << typeName(argument.type()) << ".",
argument.type() == BSONType::Array);
for (auto&& boundaryElem : argument.embeddedObject()) {
@@ -102,8 +99,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(40191,
str::stream() << "The $bucket 'boundaries' field must be an array of "
"constant values, but found value: "
- << boundaryElem.toString(false, false)
- << ".",
+ << boundaryElem.toString(false, false) << ".",
exprConst);
boundaryValues.push_back(exprConst->getValue());
}
@@ -111,8 +107,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(40192,
str::stream()
<< "The $bucket 'boundaries' field must have at least 2 values, but found "
- << boundaryValues.size()
- << " value(s).",
+ << boundaryValues.size() << " value(s).",
boundaryValues.size() >= 2);
// Make sure that the boundaries are unique, sorted in ascending order, and have the
@@ -126,22 +121,14 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(40193,
str::stream() << "All values in the the 'boundaries' option to $bucket "
"must have the same type. Found conflicting types "
- << typeName(lower.getType())
- << " and "
- << typeName(upper.getType())
- << ".",
+ << typeName(lower.getType()) << " and "
+ << typeName(upper.getType()) << ".",
lowerCanonicalType == upperCanonicalType);
uassert(40194,
str::stream()
<< "The 'boundaries' option to $bucket must be sorted, but elements "
- << i - 1
- << " and "
- << i
- << " are not in ascending order ("
- << lower.toString()
- << " is not less than "
- << upper.toString()
- << ").",
+ << i - 1 << " and " << i << " are not in ascending order ("
+ << lower.toString() << " is not less than " << upper.toString() << ").",
pExpCtx->getValueComparator().evaluate(lower < upper));
}
} else if ("default" == argName) {
@@ -151,8 +138,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(40195,
str::stream()
<< "The $bucket 'default' field must be a constant expression, but found: "
- << argument.toString(false, false)
- << ".",
+ << argument.toString(false, false) << ".",
exprConst);
defaultValue = exprConst->getValue();
@@ -162,8 +148,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
uassert(
40196,
str::stream() << "The $bucket 'output' field must be an object, but found type: "
- << typeName(argument.type())
- << ".",
+ << typeName(argument.type()) << ".",
argument.type() == BSONType::Object);
for (auto&& outputElem : argument.embeddedObject()) {
diff --git a/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp b/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp
index da86580ef02..ffed55cd488 100644
--- a/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp
+++ b/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp
@@ -51,10 +51,10 @@
namespace mongo {
namespace {
+using boost::intrusive_ptr;
using std::deque;
-using std::vector;
using std::string;
-using boost::intrusive_ptr;
+using std::vector;
class BucketAutoTests : public AggregationContextFixture {
public:
diff --git a/src/mongo/db/pipeline/document_source_change_stream.cpp b/src/mongo/db/pipeline/document_source_change_stream.cpp
index 5e5861f2971..9050b9990dd 100644
--- a/src/mongo/db/pipeline/document_source_change_stream.cpp
+++ b/src/mongo/db/pipeline/document_source_change_stream.cpp
@@ -147,9 +147,7 @@ void DocumentSourceChangeStream::checkValueType(const Value v,
BSONType expectedType) {
uassert(40532,
str::stream() << "Entry field \"" << filedName << "\" should be "
- << typeName(expectedType)
- << ", found: "
- << typeName(v.getType()),
+ << typeName(expectedType) << ", found: " << typeName(v.getType()),
(v.getType() == expectedType));
}
@@ -402,11 +400,12 @@ list<intrusive_ptr<DocumentSource>> buildPipeline(const intrusive_ptr<Expression
// There might not be a starting point if we're on mongos, otherwise we should either have a
// 'resumeAfter' starting point, or should start from the latest majority committed operation.
auto replCoord = repl::ReplicationCoordinator::get(expCtx->opCtx);
- uassert(40573,
- "The $changeStream stage is only supported on replica sets",
- expCtx->inMongos || (replCoord &&
- replCoord->getReplicationMode() ==
- repl::ReplicationCoordinator::Mode::modeReplSet));
+ uassert(
+ 40573,
+ "The $changeStream stage is only supported on replica sets",
+ expCtx->inMongos ||
+ (replCoord &&
+ replCoord->getReplicationMode() == repl::ReplicationCoordinator::Mode::modeReplSet));
if (!startFrom && !expCtx->inMongos) {
startFrom = replCoord->getMyLastAppliedOpTime().getTimestamp();
}
@@ -464,8 +463,7 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceChangeStream::createFromBson(
str::stream() << "unrecognized value for the 'fullDocument' option to the "
"$changeStream stage. Expected \"default\" or "
"\"updateLookup\", got \""
- << fullDocOption
- << "\"",
+ << fullDocOption << "\"",
fullDocOption == "updateLookup"_sd || fullDocOption == "default"_sd);
const bool shouldLookupPostImage = (fullDocOption == "updateLookup"_sd);
diff --git a/src/mongo/db/pipeline/document_source_change_stream_test.cpp b/src/mongo/db/pipeline/document_source_change_stream_test.cpp
index 32af6d5ef2e..501045ceadf 100644
--- a/src/mongo/db/pipeline/document_source_change_stream_test.cpp
+++ b/src/mongo/db/pipeline/document_source_change_stream_test.cpp
@@ -62,8 +62,8 @@ namespace mongo {
namespace {
using boost::intrusive_ptr;
-using repl::OpTypeEnum;
using repl::OplogEntry;
+using repl::OpTypeEnum;
using std::list;
using std::string;
using std::vector;
@@ -423,8 +423,7 @@ TEST_F(ChangeStreamStageTest, ShouldRejectBothStartAtOperationTimeAndResumeAfter
BSON(DSChangeStream::kStageName
<< BSON("resumeAfter"
<< makeResumeToken(kDefaultTs, testUuid(), BSON("x" << 2 << "_id" << 1))
- << "startAtOperationTime"
- << kDefaultTs))
+ << "startAtOperationTime" << kDefaultTs))
.firstElement(),
expCtx),
AssertionException,
@@ -467,8 +466,7 @@ TEST_F(ChangeStreamStageTest, ShouldRejectBothStartAtOperationTimeAndStartAfterO
BSON(DSChangeStream::kStageName
<< BSON("startAfter"
<< makeResumeToken(kDefaultTs, testUuid(), BSON("x" << 2 << "_id" << 1))
- << "startAtOperationTime"
- << kDefaultTs))
+ << "startAtOperationTime" << kDefaultTs))
.firstElement(),
expCtx),
AssertionException,
@@ -629,7 +627,8 @@ TEST_F(ChangeStreamStageTest, TransformUpdateFields) {
{DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}},
{DSChangeStream::kDocumentKeyField, D{{"_id", 1}, {"x", 2}}},
{
- "updateDescription", D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
+ "updateDescription",
+ D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
},
};
checkTransformation(updateField, expectedUpdateField);
@@ -655,7 +654,8 @@ TEST_F(ChangeStreamStageTest, TransformUpdateFieldsLegacyNoId) {
{DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}},
{DSChangeStream::kDocumentKeyField, D{{"x", 1}, {"y", 1}}},
{
- "updateDescription", D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
+ "updateDescription",
+ D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
},
};
checkTransformation(updateField, expectedUpdateField);
@@ -679,7 +679,8 @@ TEST_F(ChangeStreamStageTest, TransformRemoveFields) {
{DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}},
{DSChangeStream::kDocumentKeyField, D{{{"_id", 1}, {"x", 2}}}},
{
- "updateDescription", D{{"updatedFields", D{}}, {"removedFields", vector<V>{V("y"_sd)}}},
+ "updateDescription",
+ D{{"updatedFields", D{}}, {"removedFields", vector<V>{V("y"_sd)}}},
}};
checkTransformation(removeField, expectedRemoveField);
}
@@ -1374,7 +1375,8 @@ TEST_F(ChangeStreamStageTest, ClusterTimeMatchesOplogEntry) {
{DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}},
{DSChangeStream::kDocumentKeyField, D{{"_id", 1}, {"x", 2}}},
{
- "updateDescription", D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
+ "updateDescription",
+ D{{"updatedFields", D{{"y", 1}}}, {"removedFields", vector<V>()}},
},
};
checkTransformation(updateField, expectedUpdateField);
@@ -1659,9 +1661,9 @@ TEST_F(ChangeStreamStageTest, ResumeAfterWithTokenFromInvalidateShouldFail) {
ResumeTokenData::FromInvalidate::kFromInvalidate);
ASSERT_THROWS_CODE(DSChangeStream::createFromBson(
- BSON(DSChangeStream::kStageName << BSON(
- "resumeAfter" << resumeTokenInvalidate << "startAtOperationTime"
- << kDefaultTs))
+ BSON(DSChangeStream::kStageName
+ << BSON("resumeAfter" << resumeTokenInvalidate
+ << "startAtOperationTime" << kDefaultTs))
.firstElement(),
expCtx),
AssertionException,
@@ -1877,7 +1879,8 @@ TEST_F(ChangeStreamStageDBTest, TransformRemoveFields) {
{DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}},
{DSChangeStream::kDocumentKeyField, D{{{"_id", 1}, {"x", 2}}}},
{
- "updateDescription", D{{"updatedFields", D{}}, {"removedFields", vector<V>{V("y"_sd)}}},
+ "updateDescription",
+ D{{"updatedFields", D{}}, {"removedFields", vector<V>{V("y"_sd)}}},
}};
checkTransformation(removeField, expectedRemoveField);
}
diff --git a/src/mongo/db/pipeline/document_source_coll_stats.cpp b/src/mongo/db/pipeline/document_source_coll_stats.cpp
index df6063dad52..9afc1c730e3 100644
--- a/src/mongo/db/pipeline/document_source_coll_stats.cpp
+++ b/src/mongo/db/pipeline/document_source_coll_stats.cpp
@@ -62,28 +62,23 @@ intrusive_ptr<DocumentSource> DocumentSourceCollStats::createFromBson(
if ("latencyStats" == fieldName) {
uassert(40167,
str::stream() << "latencyStats argument must be an object, but got " << elem
- << " of type "
- << typeName(elem.type()),
+ << " of type " << typeName(elem.type()),
elem.type() == BSONType::Object);
if (!elem["histograms"].eoo()) {
uassert(40305,
str::stream() << "histograms option to latencyStats must be bool, got "
- << elem
- << "of type "
- << typeName(elem.type()),
+ << elem << "of type " << typeName(elem.type()),
elem["histograms"].isBoolean());
}
} else if ("storageStats" == fieldName) {
uassert(40279,
str::stream() << "storageStats argument must be an object, but got " << elem
- << " of type "
- << typeName(elem.type()),
+ << " of type " << typeName(elem.type()),
elem.type() == BSONType::Object);
} else if ("count" == fieldName) {
uassert(40480,
str::stream() << "count argument must be an object, but got " << elem
- << " of type "
- << typeName(elem.type()),
+ << " of type " << typeName(elem.type()),
elem.type() == BSONType::Object);
} else {
uasserted(40168, str::stream() << "unrecognized option to $collStats: " << fieldName);
@@ -144,8 +139,8 @@ DocumentSource::GetNextResult DocumentSourceCollStats::getNext() {
pExpCtx->opCtx, pExpCtx->ns, &builder);
if (!status.isOK()) {
uasserted(40481,
- str::stream() << "Unable to retrieve count in $collStats stage: "
- << status.reason());
+ str::stream()
+ << "Unable to retrieve count in $collStats stage: " << status.reason());
}
}
diff --git a/src/mongo/db/pipeline/document_source_current_op.cpp b/src/mongo/db/pipeline/document_source_current_op.cpp
index 0010bd25ce3..cbdf5eae988 100644
--- a/src/mongo/db/pipeline/document_source_current_op.cpp
+++ b/src/mongo/db/pipeline/document_source_current_op.cpp
@@ -153,9 +153,7 @@ DocumentSource::GetNextResult DocumentSourceCurrentOp::getNext() {
if (fieldName == kOpIdFieldName) {
uassert(ErrorCodes::TypeMismatch,
str::stream() << "expected numeric opid for $currentOp response from '"
- << _shardName
- << "' but got: "
- << typeName(elt.type()),
+ << _shardName << "' but got: " << typeName(elt.type()),
elt.isNumber());
std::string shardOpID = (str::stream() << _shardName << ":" << elt.numberInt());
@@ -247,8 +245,8 @@ intrusive_ptr<DocumentSource> DocumentSourceCurrentOp::createFromBson(
(elem.boolean() ? CursorMode::kIncludeCursors : CursorMode::kExcludeCursors);
} else {
uasserted(ErrorCodes::FailedToParse,
- str::stream() << "Unrecognized option '" << fieldName
- << "' in $currentOp stage.");
+ str::stream()
+ << "Unrecognized option '" << fieldName << "' in $currentOp stage.");
}
}
diff --git a/src/mongo/db/pipeline/document_source_current_op.h b/src/mongo/db/pipeline/document_source_current_op.h
index 44055dcb5ad..0aa281f56c6 100644
--- a/src/mongo/db/pipeline/document_source_current_op.h
+++ b/src/mongo/db/pipeline/document_source_current_op.h
@@ -82,8 +82,7 @@ public:
uassert(ErrorCodes::InvalidOptions,
str::stream() << "Aggregation stage " << kStageName << " cannot run with a "
<< "readConcern other than 'local', or in a multi-document "
- << "transaction. Current readConcern: "
- << readConcern.toString(),
+ << "transaction. Current readConcern: " << readConcern.toString(),
readConcern.getLevel() == repl::ReadConcernLevel::kLocalReadConcern);
}
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index 1d971d188a8..491fa942be3 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -335,4 +335,4 @@ intrusive_ptr<DocumentSourceCursor> DocumentSourceCursor::create(
new DocumentSourceCursor(collection, std::move(exec), pExpCtx, trackOplogTimestamp));
return source;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_exchange.cpp b/src/mongo/db/pipeline/document_source_exchange.cpp
index f37bbe62cdb..93cb6771f35 100644
--- a/src/mongo/db/pipeline/document_source_exchange.cpp
+++ b/src/mongo/db/pipeline/document_source_exchange.cpp
@@ -124,9 +124,7 @@ Exchange::Exchange(ExchangeSpec spec, std::unique_ptr<Pipeline, PipelineDeleter>
uassert(50951,
str::stream() << "Specified exchange buffer size (" << _maxBufferSize
- << ") exceeds the maximum allowable amount ("
- << kMaxBufferSize
- << ").",
+ << ") exceeds the maximum allowable amount (" << kMaxBufferSize << ").",
_maxBufferSize <= kMaxBufferSize);
for (int idx = 0; idx < _spec.getConsumers(); ++idx) {
@@ -205,8 +203,7 @@ std::vector<size_t> Exchange::extractConsumerIds(
uassert(50950,
str::stream() << "Specified number of exchange consumers (" << nConsumers
- << ") exceeds the maximum allowable amount ("
- << kMaxNumberConsumers
+ << ") exceeds the maximum allowable amount (" << kMaxNumberConsumers
<< ").",
nConsumers <= kMaxNumberConsumers);
@@ -411,8 +408,9 @@ size_t Exchange::getTargetConsumer(const Document& input) {
}
if (elem.type() == BSONType::String && elem.str() == "hashed") {
- kb << "" << BSONElementHasher::hash64(BSON("" << value).firstElement(),
- BSONElementHasher::DEFAULT_HASH_SEED);
+ kb << ""
+ << BSONElementHasher::hash64(BSON("" << value).firstElement(),
+ BSONElementHasher::DEFAULT_HASH_SEED);
} else {
kb << "" << value;
}
diff --git a/src/mongo/db/pipeline/document_source_exchange_test.cpp b/src/mongo/db/pipeline/document_source_exchange_test.cpp
index cd66171a246..ef4f626e7b6 100644
--- a/src/mongo/db/pipeline/document_source_exchange_test.cpp
+++ b/src/mongo/db/pipeline/document_source_exchange_test.cpp
@@ -556,7 +556,6 @@ TEST_F(DocumentSourceExchangeTest, RandomExchangeNConsumerResourceYielding) {
ThreadInfo* threadInfo = &threads[id];
auto handle = _executor->scheduleWork(
[threadInfo, &processedDocs](const executor::TaskExecutor::CallbackArgs& cb) {
-
DocumentSourceExchange* exchange = threadInfo->documentSourceExchange.get();
const auto getNext = [exchange, threadInfo]() {
// Will acquire 'artificalGlobalMutex'. Within getNext() it will be released and
@@ -652,8 +651,7 @@ TEST_F(DocumentSourceExchangeTest, RangeRandomHashExchangeNConsumer) {
TEST_F(DocumentSourceExchangeTest, RejectNoConsumers) {
BSONObj spec = BSON("policy"
<< "broadcast"
- << "consumers"
- << 0);
+ << "consumers" << 0);
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
AssertionException,
@@ -663,10 +661,7 @@ TEST_F(DocumentSourceExchangeTest, RejectNoConsumers) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidKey) {
BSONObj spec = BSON("policy"
<< "broadcast"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 2));
+ << "consumers" << 1 << "key" << BSON("a" << 2));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
AssertionException,
@@ -676,9 +671,7 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidKey) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyHashExpected) {
BSONObj spec = BSON("policy"
<< "broadcast"
- << "consumers"
- << 1
- << "key"
+ << "consumers" << 1 << "key"
<< BSON("a"
<< "nothash"));
ASSERT_THROWS_CODE(
@@ -690,10 +683,7 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyHashExpected) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyWrongType) {
BSONObj spec = BSON("policy"
<< "broadcast"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << true));
+ << "consumers" << 1 << "key" << BSON("a" << true));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
AssertionException,
@@ -703,10 +693,7 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyWrongType) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyEmpty) {
BSONObj spec = BSON("policy"
<< "broadcast"
- << "consumers"
- << 1
- << "key"
- << BSON("" << 1));
+ << "consumers" << 1 << "key" << BSON("" << 1));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
AssertionException,
@@ -716,13 +703,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidKeyEmpty) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundaries) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MAXKEY) << BSON("a" << MINKEY))
- << "consumerIds"
+ << "consumers" << 1 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << MAXKEY) << BSON("a" << MINKEY)) << "consumerIds"
<< BSON_ARRAY(0));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -733,13 +715,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundaries) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesMissingMin) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << 0) << BSON("a" << MAXKEY))
- << "consumerIds"
+ << "consumers" << 1 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << 0) << BSON("a" << MAXKEY)) << "consumerIds"
<< BSON_ARRAY(0));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -750,13 +727,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesMissingMin) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesMissingMax) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << 0))
- << "consumerIds"
+ << "consumers" << 1 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << 0)) << "consumerIds"
<< BSON_ARRAY(0));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -767,13 +739,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesMissingMax) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesAndConsumerIds) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 2
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY))
- << "consumerIds"
+ << "consumers" << 2 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY)) << "consumerIds"
<< BSON_ARRAY(0 << 1));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -784,13 +751,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidBoundariesAndConsumerIds) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidPolicyBoundaries) {
BSONObj spec = BSON("policy"
<< "roundrobin"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY))
- << "consumerIds"
+ << "consumers" << 1 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY)) << "consumerIds"
<< BSON_ARRAY(0));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -801,13 +763,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidPolicyBoundaries) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidConsumerIds) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 1
- << "key"
- << BSON("a" << 1)
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY))
- << "consumerIds"
+ << "consumers" << 1 << "key" << BSON("a" << 1) << "boundaries"
+ << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY)) << "consumerIds"
<< BSON_ARRAY(1));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
@@ -818,11 +775,8 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidConsumerIds) {
TEST_F(DocumentSourceExchangeTest, RejectInvalidMissingKeys) {
BSONObj spec = BSON("policy"
<< "keyRange"
- << "consumers"
- << 1
- << "boundaries"
- << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY))
- << "consumerIds"
+ << "consumers" << 1 << "boundaries"
+ << BSON_ARRAY(BSON("a" << MINKEY) << BSON("a" << MAXKEY)) << "consumerIds"
<< BSON_ARRAY(0));
ASSERT_THROWS_CODE(
Exchange(parseSpec(spec), unittest::assertGet(Pipeline::create({}, getExpCtx()))),
diff --git a/src/mongo/db/pipeline/document_source_facet.cpp b/src/mongo/db/pipeline/document_source_facet.cpp
index 6974dfc44f2..e429878d7c5 100644
--- a/src/mongo/db/pipeline/document_source_facet.cpp
+++ b/src/mongo/db/pipeline/document_source_facet.cpp
@@ -94,11 +94,8 @@ vector<pair<string, vector<BSONObj>>> extractRawPipelines(const BSONElement& ele
for (auto&& subPipeElem : facetElem.Obj()) {
uassert(40171,
str::stream() << "elements of arrays in $facet spec must be non-empty objects, "
- << facetName
- << " argument contained an element of type "
- << typeName(subPipeElem.type())
- << ": "
- << subPipeElem,
+ << facetName << " argument contained an element of type "
+ << typeName(subPipeElem.type()) << ": " << subPipeElem,
subPipeElem.type() == BSONType::Object);
rawPipeline.push_back(subPipeElem.embeddedObject());
}
@@ -351,8 +348,7 @@ intrusive_ptr<DocumentSource> DocumentSourceFacet::createFromBson(
}
uassert(ErrorCodes::IllegalOperation,
str::stream() << "$facet pipeline '" << *needsMongoS
- << "' must run on mongoS, but '"
- << *needsShard
+ << "' must run on mongoS, but '" << *needsShard
<< "' requires a shard",
!(needsShard && needsMongoS));
diff --git a/src/mongo/db/pipeline/document_source_graph_lookup.cpp b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
index 9f1091fc0e3..a989f8f389d 100644
--- a/src/mongo/db/pipeline/document_source_graph_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
@@ -210,8 +210,7 @@ void DocumentSourceGraphLookUp::doBreadthFirstSearch() {
while (auto next = pipeline->getNext()) {
uassert(40271,
str::stream()
- << "Documents in the '"
- << _from.ns()
+ << "Documents in the '" << _from.ns()
<< "' namespace must contain an _id for de-duplication in $graphLookup",
!(*next)["_id"].missing());
@@ -391,10 +390,8 @@ void DocumentSourceGraphLookUp::serializeToArray(
std::vector<Value>& array, boost::optional<ExplainOptions::Verbosity> explain) const {
// Serialize default options.
MutableDocument spec(DOC("from" << _from.coll() << "as" << _as.fullPath() << "connectToField"
- << _connectToField.fullPath()
- << "connectFromField"
- << _connectFromField.fullPath()
- << "startWith"
+ << _connectToField.fullPath() << "connectFromField"
+ << _connectFromField.fullPath() << "startWith"
<< _startWith->serialize(false)));
// depthField is optional; serialize it if it was specified.
@@ -413,10 +410,10 @@ void DocumentSourceGraphLookUp::serializeToArray(
// If we are explaining, include an absorbed $unwind inside the $graphLookup specification.
if (_unwind && explain) {
const boost::optional<FieldPath> indexPath = (*_unwind)->indexPath();
- spec["unwinding"] = Value(DOC("preserveNullAndEmptyArrays"
- << (*_unwind)->preserveNullAndEmptyArrays()
- << "includeArrayIndex"
- << (indexPath ? Value((*indexPath).fullPath()) : Value())));
+ spec["unwinding"] =
+ Value(DOC("preserveNullAndEmptyArrays"
+ << (*_unwind)->preserveNullAndEmptyArrays() << "includeArrayIndex"
+ << (indexPath ? Value((*indexPath).fullPath()) : Value())));
}
array.push_back(Value(DOC(getSourceName() << spec.freeze())));
@@ -549,8 +546,8 @@ intrusive_ptr<DocumentSource> DocumentSourceGraphLookUp::createFromBson(
argName == "depthField" || argName == "connectToField") {
// All remaining arguments to $graphLookup are expected to be strings.
uassert(40103,
- str::stream() << "expected string as argument for " << argName << ", found: "
- << argument.toString(false, false),
+ str::stream() << "expected string as argument for " << argName
+ << ", found: " << argument.toString(false, false),
argument.type() == String);
}
@@ -566,8 +563,8 @@ intrusive_ptr<DocumentSource> DocumentSourceGraphLookUp::createFromBson(
depthField = boost::optional<FieldPath>(FieldPath(argument.String()));
} else {
uasserted(40104,
- str::stream() << "Unknown argument to $graphLookup: "
- << argument.fieldName());
+ str::stream()
+ << "Unknown argument to $graphLookup: " << argument.fieldName());
}
}
diff --git a/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp b/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp
index 0e402da49a1..27b364ca2cd 100644
--- a/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp
+++ b/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp
@@ -247,10 +247,8 @@ TEST_F(DocumentSourceGraphLookUpTest,
ASSERT(next.isEOF());
} else {
FAIL(str::stream() << "Expected either [ " << to0from1.toString() << " ] or [ "
- << to0from2.toString()
- << " ] but found [ "
- << next.getDocument().toString()
- << " ]");
+ << to0from2.toString() << " ] but found [ "
+ << next.getDocument().toString() << " ]");
}
}
diff --git a/src/mongo/db/pipeline/document_source_group_test.cpp b/src/mongo/db/pipeline/document_source_group_test.cpp
index 8ea0cbc912a..ae1083715b9 100644
--- a/src/mongo/db/pipeline/document_source_group_test.cpp
+++ b/src/mongo/db/pipeline/document_source_group_test.cpp
@@ -215,10 +215,10 @@ TEST_F(DocumentSourceGroupTest, ShouldReportMultipleFieldGroupKeysAsARename) {
std::vector<std::pair<std::string, boost::intrusive_ptr<Expression>&>> expressions;
auto doc = std::vector<std::pair<std::string, boost::intrusive_ptr<Expression>>>{{"x", x},
{"y", y}};
- for (auto & [ unused, expression ] : doc)
+ for (auto& [unused, expression] : doc)
children.push_back(std::move(expression));
std::vector<boost::intrusive_ptr<Expression>>::size_type index = 0;
- for (auto & [ fieldName, unused ] : doc) {
+ for (auto& [fieldName, unused] : doc) {
expressions.emplace_back(fieldName, children[index]);
++index;
}
@@ -523,8 +523,9 @@ class AggregateObjectExpression : public ExpressionBase {
return BSON("a" << 6);
}
BSONObj spec() {
- return BSON("_id" << 0 << "z" << BSON("$first" << BSON("x"
- << "$a")));
+ return BSON("_id" << 0 << "z"
+ << BSON("$first" << BSON("x"
+ << "$a")));
}
BSONObj expected() {
return BSON("_id" << 0 << "z" << BSON("x" << 6));
@@ -537,8 +538,9 @@ class AggregateOperatorExpression : public ExpressionBase {
return BSON("a" << 6);
}
BSONObj spec() {
- return BSON("_id" << 0 << "z" << BSON("$first"
- << "$a"));
+ return BSON("_id" << 0 << "z"
+ << BSON("$first"
+ << "$a"));
}
BSONObj expected() {
return BSON("_id" << 0 << "z" << 6);
@@ -635,8 +637,9 @@ class SingleDocument : public CheckResultsBase {
return {DOC("a" << 1)};
}
virtual BSONObj groupSpec() {
- return BSON("_id" << 0 << "a" << BSON("$sum"
- << "$a"));
+ return BSON("_id" << 0 << "a"
+ << BSON("$sum"
+ << "$a"));
}
virtual string expectedResultSetString() {
return "[{_id:0,a:1}]";
@@ -649,8 +652,9 @@ class TwoValuesSingleKey : public CheckResultsBase {
return {DOC("a" << 1), DOC("a" << 2)};
}
virtual BSONObj groupSpec() {
- return BSON("_id" << 0 << "a" << BSON("$push"
- << "$a"));
+ return BSON("_id" << 0 << "a"
+ << BSON("$push"
+ << "$a"));
}
virtual string expectedResultSetString() {
return "[{_id:0,a:[1,2]}]";
@@ -708,8 +712,7 @@ class FourValuesTwoKeysTwoAccumulators : public CheckResultsBase {
<< "list"
<< BSON("$push"
<< "$a")
- << "sum"
- << BSON("$sum" << BSON("$divide" << BSON_ARRAY("$a" << 2))));
+ << "sum" << BSON("$sum" << BSON("$divide" << BSON_ARRAY("$a" << 2))));
}
virtual string expectedResultSetString() {
return "[{_id:0,list:[1,3],sum:2},{_id:1,list:[2,4],sum:3}]";
@@ -770,8 +773,9 @@ class UndefinedAccumulatorValue : public CheckResultsBase {
return {Document()};
}
virtual BSONObj groupSpec() {
- return BSON("_id" << 0 << "first" << BSON("$first"
- << "$missing"));
+ return BSON("_id" << 0 << "first"
+ << BSON("$first"
+ << "$missing"));
}
virtual string expectedResultSetString() {
return "[{_id:0, first:null}]";
diff --git a/src/mongo/db/pipeline/document_source_index_stats.cpp b/src/mongo/db/pipeline/document_source_index_stats.cpp
index 23343699114..c24671624f6 100644
--- a/src/mongo/db/pipeline/document_source_index_stats.cpp
+++ b/src/mongo/db/pipeline/document_source_index_stats.cpp
@@ -85,4 +85,4 @@ Value DocumentSourceIndexStats::serialize(
boost::optional<ExplainOptions::Verbosity> explain) const {
return Value(DOC(getSourceName() << Document()));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp
index b4dd8a61adf..13a0c173424 100644
--- a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp
+++ b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp
@@ -65,4 +65,4 @@ Value DocumentSourceInternalInhibitOptimization::serialize(
return Value(Document{{getSourceName(), Value{Document{}}}});
}
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h
index 86b919fb848..75f3e637a7d 100644
--- a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h
+++ b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h
@@ -73,4 +73,4 @@ private:
Value serialize(boost::optional<ExplainOptions::Verbosity> explain = boost::none) const final;
};
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp b/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp
index 3b7eb1f86a8..0eb5a85f0d0 100644
--- a/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp
+++ b/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp
@@ -69,14 +69,12 @@ boost::intrusive_ptr<DocumentSource> DocumentSourceInternalSplitPipeline::create
} else {
uasserted(ErrorCodes::BadValue,
str::stream() << "unrecognized field while parsing mergeType: '"
- << elt.fieldNameStringData()
- << "'");
+ << elt.fieldNameStringData() << "'");
}
} else {
uasserted(ErrorCodes::BadValue,
str::stream() << "unrecognized field while parsing $_internalSplitPipeline: '"
- << elt.fieldNameStringData()
- << "'");
+ << elt.fieldNameStringData() << "'");
}
}
@@ -120,4 +118,4 @@ Value DocumentSourceInternalSplitPipeline::serialize(
mergeTypeString.empty() ? Value() : Value(mergeTypeString)}}}}});
}
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_internal_split_pipeline.h b/src/mongo/db/pipeline/document_source_internal_split_pipeline.h
index 9d58b7e3fd5..d2d4b14e685 100644
--- a/src/mongo/db/pipeline/document_source_internal_split_pipeline.h
+++ b/src/mongo/db/pipeline/document_source_internal_split_pipeline.h
@@ -85,4 +85,4 @@ private:
HostTypeRequirement _mergeType = HostTypeRequirement::kNone;
};
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp
index 3c189798db9..9195d0aa0aa 100644
--- a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp
+++ b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp
@@ -51,8 +51,7 @@ DocumentSource::GetNextResult DocumentSourceListCachedAndActiveUsers::getNext()
const auto info = std::move(_users.back());
_users.pop_back();
return Document(BSON("username" << info.userName.getUser() << "db" << info.userName.getDB()
- << "active"
- << info.active));
+ << "active" << info.active));
}
return GetNextResult::makeEOF();
diff --git a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h
index 44d2e57ee8e..d984f755dda 100644
--- a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h
+++ b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h
@@ -73,8 +73,7 @@ public:
uassert(ErrorCodes::InvalidOptions,
str::stream() << "Aggregation stage " << kStageName << " cannot run with a "
<< "readConcern other than 'local', or in a multi-document "
- << "transaction. Current readConcern: "
- << readConcern.toString(),
+ << "transaction. Current readConcern: " << readConcern.toString(),
readConcern.getLevel() == repl::ReadConcernLevel::kLocalReadConcern);
}
};
diff --git a/src/mongo/db/pipeline/document_source_list_local_sessions.h b/src/mongo/db/pipeline/document_source_list_local_sessions.h
index 81cc484d618..0954aaa3e36 100644
--- a/src/mongo/db/pipeline/document_source_list_local_sessions.h
+++ b/src/mongo/db/pipeline/document_source_list_local_sessions.h
@@ -84,8 +84,7 @@ public:
uassert(ErrorCodes::InvalidOptions,
str::stream() << "Aggregation stage " << kStageName << " cannot run with a "
<< "readConcern other than 'local', or in a multi-document "
- << "transaction. Current readConcern: "
- << readConcern.toString(),
+ << "transaction. Current readConcern: " << readConcern.toString(),
readConcern.getLevel() == repl::ReadConcernLevel::kLocalReadConcern);
}
diff --git a/src/mongo/db/pipeline/document_source_lookup.cpp b/src/mongo/db/pipeline/document_source_lookup.cpp
index b1c3d9ad950..ef11b582394 100644
--- a/src/mongo/db/pipeline/document_source_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup.cpp
@@ -260,8 +260,7 @@ DocumentSource::GetNextResult DocumentSourceLookUp::getNext() {
objsize += result->getApproximateSize();
uassert(4568,
str::stream() << "Total size of documents in " << _fromNs.coll()
- << " matching pipeline's $lookup stage exceeds "
- << maxBytes
+ << " matching pipeline's $lookup stage exceeds " << maxBytes
<< " bytes",
objsize <= maxBytes);
@@ -686,8 +685,7 @@ void DocumentSourceLookUp::serializeToArray(
const boost::optional<FieldPath> indexPath = _unwindSrc->indexPath();
output[getSourceName()]["unwinding"] =
Value(DOC("preserveNullAndEmptyArrays"
- << _unwindSrc->preserveNullAndEmptyArrays()
- << "includeArrayIndex"
+ << _unwindSrc->preserveNullAndEmptyArrays() << "includeArrayIndex"
<< (indexPath ? Value(indexPath->fullPath()) : Value())));
}
@@ -809,8 +807,7 @@ intrusive_ptr<DocumentSource> DocumentSourceLookUp::createFromBson(
if (argName == "let") {
uassert(ErrorCodes::FailedToParse,
str::stream() << "$lookup argument '" << argument
- << "' must be an object, is type "
- << argument.type(),
+ << "' must be an object, is type " << argument.type(),
argument.type() == BSONType::Object);
letVariables = argument.Obj();
hasLet = true;
@@ -819,9 +816,7 @@ intrusive_ptr<DocumentSource> DocumentSourceLookUp::createFromBson(
uassert(ErrorCodes::FailedToParse,
str::stream() << "$lookup argument '" << argName << "' must be a string, found "
- << argument
- << ": "
- << argument.type(),
+ << argument << ": " << argument.type(),
argument.type() == BSONType::String);
if (argName == "from") {
diff --git a/src/mongo/db/pipeline/document_source_lookup_change_post_image.cpp b/src/mongo/db/pipeline/document_source_lookup_change_post_image.cpp
index 327fdf6f703..cb24b7b9ae8 100644
--- a/src/mongo/db/pipeline/document_source_lookup_change_post_image.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup_change_post_image.cpp
@@ -43,14 +43,9 @@ Value assertFieldHasType(const Document& fullDoc, StringData fieldName, BSONType
auto val = fullDoc[fieldName];
uassert(40578,
str::stream() << "failed to look up post image after change: expected \"" << fieldName
- << "\" field to have type "
- << typeName(expectedType)
- << ", instead found type "
- << typeName(val.getType())
- << ": "
- << val.toString()
- << ", full object: "
- << fullDoc.toString(),
+ << "\" field to have type " << typeName(expectedType)
+ << ", instead found type " << typeName(val.getType()) << ": "
+ << val.toString() << ", full object: " << fullDoc.toString(),
val.getType() == expectedType);
return val;
}
@@ -88,8 +83,7 @@ NamespaceString DocumentSourceLookupChangePostImage::assertValidNamespace(
// lookup into any namespace.
uassert(40579,
str::stream() << "unexpected namespace during post image lookup: " << nss.ns()
- << ", expected "
- << pExpCtx->ns.ns(),
+ << ", expected " << pExpCtx->ns.ns(),
nss == pExpCtx->ns ||
(pExpCtx->isClusterAggregation() || pExpCtx->isDBAggregation(nss.db())));
@@ -112,8 +106,7 @@ Value DocumentSourceLookupChangePostImage::lookupPostImage(const Document& updat
const auto readConcern = pExpCtx->inMongos
? boost::optional<BSONObj>(BSON("level"
<< "majority"
- << "afterClusterTime"
- << resumeToken.getData().clusterTime))
+ << "afterClusterTime" << resumeToken.getData().clusterTime))
: boost::none;
diff --git a/src/mongo/db/pipeline/document_source_lookup_test.cpp b/src/mongo/db/pipeline/document_source_lookup_test.cpp
index bc406def88e..e5e354caedc 100644
--- a/src/mongo/db/pipeline/document_source_lookup_test.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup_test.cpp
@@ -95,9 +95,7 @@ TEST_F(DocumentSourceLookUpTest, PreservesParentPipelineLetVariables) {
auto docSource = DocumentSourceLookUp::createFromBson(
BSON("$lookup" << BSON("from"
<< "coll"
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1))) << "as"
<< "as"))
.firstElement(),
expCtx);
@@ -117,9 +115,7 @@ TEST_F(DocumentSourceLookUpTest, AcceptsPipelineSyntax) {
auto docSource = DocumentSourceLookUp::createFromBson(
BSON("$lookup" << BSON("from"
<< "coll"
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1))) << "as"
<< "as"))
.firstElement(),
expCtx);
@@ -229,17 +225,17 @@ TEST_F(DocumentSourceLookUpTest, RejectLookupWhenDepthLimitIsExceeded) {
expCtx->subPipelineDepth = DocumentSourceLookUp::kMaxSubPipelineDepth;
- ASSERT_THROWS_CODE(DocumentSourceLookUp::createFromBson(
- BSON("$lookup" << BSON("from"
- << "coll"
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
- << "as"))
- .firstElement(),
- expCtx),
- AssertionException,
- ErrorCodes::MaxSubPipelineDepthExceeded);
+ ASSERT_THROWS_CODE(
+ DocumentSourceLookUp::createFromBson(
+ BSON("$lookup" << BSON("from"
+ << "coll"
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
+ << "as"
+ << "as"))
+ .firstElement(),
+ expCtx),
+ AssertionException,
+ ErrorCodes::MaxSubPipelineDepthExceeded);
}
TEST_F(ReplDocumentSourceLookUpTest, RejectsPipelineWithChangeStreamStage) {
@@ -286,8 +282,7 @@ TEST_F(DocumentSourceLookUpTest, RejectsLocalFieldForeignFieldWhenPipelineIsSpec
auto lookupStage = DocumentSourceLookUp::createFromBson(
BSON("$lookup" << BSON("from"
<< "coll"
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
<< "localField"
<< "a"
<< "foreignField"
@@ -298,8 +293,7 @@ TEST_F(DocumentSourceLookUpTest, RejectsLocalFieldForeignFieldWhenPipelineIsSpec
expCtx);
FAIL(str::stream()
- << "Expected creation of the "
- << lookupStage->getSourceName()
+ << "Expected creation of the " << lookupStage->getSourceName()
<< " stage to uassert on mix of localField/foreignField and pipeline options");
} catch (const AssertionException& ex) {
ASSERT_EQ(ErrorCodes::FailedToParse, ex.code());
@@ -335,50 +329,50 @@ TEST_F(DocumentSourceLookUpTest, RejectsInvalidLetVariableName) {
expCtx->setResolvedNamespaces(StringMap<ExpressionContext::ResolvedNamespace>{
{fromNs.coll().toString(), {fromNs, std::vector<BSONObj>()}}});
- ASSERT_THROWS_CODE(DocumentSourceLookUp::createFromBson(
- BSON("$lookup" << BSON("from"
- << "coll"
- << "let"
- << BSON("" // Empty variable name.
- << "$a")
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
- << "as"))
- .firstElement(),
- expCtx),
- AssertionException,
- 16866);
-
- ASSERT_THROWS_CODE(DocumentSourceLookUp::createFromBson(
- BSON("$lookup" << BSON("from"
- << "coll"
- << "let"
- << BSON("^invalidFirstChar"
- << "$a")
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
- << "as"))
- .firstElement(),
- expCtx),
- AssertionException,
- 16867);
-
- ASSERT_THROWS_CODE(DocumentSourceLookUp::createFromBson(
- BSON("$lookup" << BSON("from"
- << "coll"
- << "let"
- << BSON("contains.invalidChar"
- << "$a")
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
- << "as"))
- .firstElement(),
- expCtx),
- AssertionException,
- 16868);
+ ASSERT_THROWS_CODE(
+ DocumentSourceLookUp::createFromBson(
+ BSON("$lookup" << BSON("from"
+ << "coll"
+ << "let"
+ << BSON("" // Empty variable name.
+ << "$a")
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
+ << "as"
+ << "as"))
+ .firstElement(),
+ expCtx),
+ AssertionException,
+ 16866);
+
+ ASSERT_THROWS_CODE(
+ DocumentSourceLookUp::createFromBson(
+ BSON("$lookup" << BSON("from"
+ << "coll"
+ << "let"
+ << BSON("^invalidFirstChar"
+ << "$a")
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
+ << "as"
+ << "as"))
+ .firstElement(),
+ expCtx),
+ AssertionException,
+ 16867);
+
+ ASSERT_THROWS_CODE(
+ DocumentSourceLookUp::createFromBson(
+ BSON("$lookup" << BSON("from"
+ << "coll"
+ << "let"
+ << BSON("contains.invalidChar"
+ << "$a")
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
+ << "as"
+ << "as"))
+ .firstElement(),
+ expCtx),
+ AssertionException,
+ 16868);
}
TEST_F(DocumentSourceLookUpTest, ShouldBeAbleToReParseSerializedStage) {
@@ -393,9 +387,7 @@ TEST_F(DocumentSourceLookUpTest, ShouldBeAbleToReParseSerializedStage) {
<< "let"
<< BSON("local_x"
<< "$x")
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSON("x" << 1)))
- << "as"
+ << "pipeline" << BSON_ARRAY(BSON("$match" << BSON("x" << 1))) << "as"
<< "as"))
.firstElement(),
expCtx);
@@ -729,8 +721,7 @@ TEST_F(DocumentSourceLookUpTest, ShouldCacheNonCorrelatedSubPipelinePrefix) {
auto expectedPipe = fromjson(
str::stream() << "[{mock: {}}, {$match: {x:{$eq: 1}}}, {$sort: {sortKey: {x: 1}}}, "
- << sequentialCacheStageObj()
- << ", {$addFields: {varField: {$const: 5} }}]");
+ << sequentialCacheStageObj() << ", {$addFields: {varField: {$const: 5} }}]");
ASSERT_VALUE_EQ(Value(subPipeline->writeExplainOps(kExplain)), Value(BSONArray(expectedPipe)));
}
@@ -914,8 +905,7 @@ TEST_F(DocumentSourceLookUpTest,
str::stream() << "[{mock: {}}, {$match: {x:{$eq: 1}}}, {$sort: {sortKey: {x: 1}}}, "
"{$lookup: {from: 'coll', as: 'subas', let: {var1: '$y'}, "
"pipeline: [{$match: {$expr: { $eq: ['$z', '$$var1']}}}]}}, "
- << sequentialCacheStageObj()
- << ", {$addFields: {varField: {$const: 5} }}]");
+ << sequentialCacheStageObj() << ", {$addFields: {varField: {$const: 5} }}]");
ASSERT_VALUE_EQ(Value(subPipeline->writeExplainOps(kExplain)), Value(BSONArray(expectedPipe)));
}
@@ -947,8 +937,7 @@ TEST_F(DocumentSourceLookUpTest, ShouldCacheEntirePipelineIfNonCorrelated) {
<< "[{mock: {}}, {$match: {x:{$eq: 1}}}, {$sort: {sortKey: {x: 1}}}, {$lookup: {from: "
"'coll', as: 'subas', let: {}, pipeline: [{$match: {y: 5}}]}}, {$addFields: "
"{constField: {$const: 5}}}, "
- << sequentialCacheStageObj()
- << "]");
+ << sequentialCacheStageObj() << "]");
ASSERT_VALUE_EQ(Value(subPipeline->writeExplainOps(kExplain)), Value(BSONArray(expectedPipe)));
}
diff --git a/src/mongo/db/pipeline/document_source_match.cpp b/src/mongo/db/pipeline/document_source_match.cpp
index 1900a644627..c2c7ce66f29 100644
--- a/src/mongo/db/pipeline/document_source_match.cpp
+++ b/src/mongo/db/pipeline/document_source_match.cpp
@@ -48,8 +48,8 @@ namespace mongo {
using boost::intrusive_ptr;
using std::pair;
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
REGISTER_DOCUMENT_SOURCE(match,
diff --git a/src/mongo/db/pipeline/document_source_merge.cpp b/src/mongo/db/pipeline/document_source_merge.cpp
index 2550e5a490f..23129c98f85 100644
--- a/src/mongo/db/pipeline/document_source_merge.cpp
+++ b/src/mongo/db/pipeline/document_source_merge.cpp
@@ -82,7 +82,7 @@ constexpr auto kPipelineDiscardMode = MergeMode{WhenMatched::kPipeline, WhenNotM
*/
MergeStrategy makeUpdateStrategy(bool upsert, BatchTransform transform) {
return [upsert, transform](
- const auto& expCtx, const auto& ns, const auto& wc, auto epoch, auto&& batch) {
+ const auto& expCtx, const auto& ns, const auto& wc, auto epoch, auto&& batch) {
if (transform) {
transform(batch);
}
@@ -103,7 +103,7 @@ MergeStrategy makeUpdateStrategy(bool upsert, BatchTransform transform) {
*/
MergeStrategy makeStrictUpdateStrategy(bool upsert, BatchTransform transform) {
return [upsert, transform](
- const auto& expCtx, const auto& ns, const auto& wc, auto epoch, auto&& batch) {
+ const auto& expCtx, const auto& ns, const auto& wc, auto epoch, auto&& batch) {
if (transform) {
transform(batch);
}
@@ -411,7 +411,7 @@ boost::intrusive_ptr<DocumentSource> DocumentSourceMerge::createFromBson(
mergeSpec.getWhenMatched() ? mergeSpec.getWhenMatched()->mode : kDefaultWhenMatched;
auto whenNotMatched = mergeSpec.getWhenNotMatched().value_or(kDefaultWhenNotMatched);
auto pipeline = mergeSpec.getWhenMatched() ? mergeSpec.getWhenMatched()->pipeline : boost::none;
- auto[mergeOnFields, targetCollectionVersion] =
+ auto [mergeOnFields, targetCollectionVersion] =
expCtx->mongoProcessInterface->ensureFieldsUniqueOrResolveDocumentKey(
expCtx, mergeSpec.getOn(), mergeSpec.getTargetCollectionVersion(), targetNss);
@@ -434,7 +434,7 @@ Value DocumentSourceMerge::serialize(boost::optional<ExplainOptions::Verbosity>
}
BSONObjBuilder bob;
- for (auto && [ name, expr ] : *_letVariables) {
+ for (auto&& [name, expr] : *_letVariables) {
bob << name << expr->serialize(static_cast<bool>(explain));
}
return bob.obj();
diff --git a/src/mongo/db/pipeline/document_source_merge.h b/src/mongo/db/pipeline/document_source_merge.h
index 927c0376245..f7889528930 100644
--- a/src/mongo/db/pipeline/document_source_merge.h
+++ b/src/mongo/db/pipeline/document_source_merge.h
@@ -180,7 +180,7 @@ private:
}
BSONObjBuilder bob;
- for (auto && [ name, expr ] : *_letVariables) {
+ for (auto&& [name, expr] : *_letVariables) {
bob << name << expr->evaluate(doc, &pExpCtx->variables);
}
return bob.obj();
diff --git a/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp b/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp
index e6b26b06c70..3fda91dc77d 100644
--- a/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp
+++ b/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp
@@ -132,8 +132,8 @@ TEST_F(DocumentSourceMergeCursorsTest, ShouldRejectEmptyArray) {
TEST_F(DocumentSourceMergeCursorsTest, ShouldRejectLegacySerializationFormats) {
// Formats like this were used in old versions of the server but are no longer supported.
- auto spec = BSON("$mergeCursors" << BSON_ARRAY(BSON(
- "ns" << kTestNss.ns() << "id" << 0LL << "host" << kTestHost.toString())));
+ auto spec = BSON("$mergeCursors" << BSON_ARRAY(BSON("ns" << kTestNss.ns() << "id" << 0LL
+ << "host" << kTestHost.toString())));
ASSERT_THROWS_CODE(DocumentSourceMergeCursors::createFromBson(spec.firstElement(), getExpCtx()),
AssertionException,
17026);
diff --git a/src/mongo/db/pipeline/document_source_merge_test.cpp b/src/mongo/db/pipeline/document_source_merge_test.cpp
index 50e75e9d264..dbebf226ced 100644
--- a/src/mongo/db/pipeline/document_source_merge_test.cpp
+++ b/src/mongo/db/pipeline/document_source_merge_test.cpp
@@ -140,8 +140,7 @@ TEST_F(DocumentSourceMergeTest, CorrectlyParsesIfWhenMatchedIsStringOrArray) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << BSONArray()));
+ << "whenMatched" << BSONArray()));
ASSERT(createMergeStage(spec));
}
@@ -238,14 +237,12 @@ TEST_F(DocumentSourceMergeTest, FailsToParseIfIntoIsNotAValidUserCollection) {
TEST_F(DocumentSourceMergeTest, FailsToParseIfDbIsNotString) {
auto spec = BSON("$merge" << BSON("into" << BSON("coll"
<< "target_collection"
- << "db"
- << true)));
+ << "db" << true)));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into" << BSON("coll"
<< "target_collection"
- << "db"
- << BSONArray())));
+ << "db" << BSONArray())));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into" << BSON("coll"
@@ -259,14 +256,12 @@ TEST_F(DocumentSourceMergeTest, FailsToParseIfDbIsNotString) {
TEST_F(DocumentSourceMergeTest, FailsToParseIfCollIsNotString) {
auto spec = BSON("$merge" << BSON("into" << BSON("db"
<< "target_db"
- << "coll"
- << true)));
+ << "coll" << true)));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into" << BSON("db"
<< "target_db"
- << "coll"
- << BSONArray())));
+ << "coll" << BSONArray())));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into" << BSON("db"
@@ -294,40 +289,34 @@ TEST_F(DocumentSourceMergeTest, FailsToParseIfDbIsNotAValidDatabaseName) {
TEST_F(DocumentSourceMergeTest, FailsToParseIfWhenMatchedModeIsNotStringOrArray) {
auto spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << true));
+ << "whenMatched" << true));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51191);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << 100));
+ << "whenMatched" << 100));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51191);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << BSON("" << kDefaultWhenMatchedMode)));
+ << "whenMatched" << BSON("" << kDefaultWhenMatchedMode)));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51191);
}
TEST_F(DocumentSourceMergeTest, FailsToParseIfWhenNotMatchedModeIsNotString) {
auto spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenNotMatched"
- << true));
+ << "whenNotMatched" << true));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenNotMatched"
- << BSONArray()));
+ << "whenNotMatched" << BSONArray()));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenNotMatched"
- << BSON("" << kDefaultWhenNotMatchedMode)));
+ << "whenNotMatched" << BSON("" << kDefaultWhenNotMatchedMode)));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
}
@@ -371,26 +360,22 @@ TEST_F(DocumentSourceMergeTest, FailsToParseIfWhenNotMatchedModeIsUnsupportedStr
TEST_F(DocumentSourceMergeTest, FailsToParseIfOnFieldIsNotStringOrArrayOfStrings) {
auto spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "on"
- << 1));
+ << "on" << 1));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51186);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "on"
- << BSONArray()));
+ << "on" << BSONArray()));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51187);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "on"
- << BSON_ARRAY(1 << 2 << BSON("a" << 3))));
+ << "on" << BSON_ARRAY(1 << 2 << BSON("a" << 3))));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51134);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "on"
- << BSON("_id" << 1)));
+ << "on" << BSON("_id" << 1)));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, 51186);
}
@@ -646,24 +631,21 @@ TEST_F(DocumentSourceMergeTest, CorrectlyHandlesWhenMatchedAndWhenNotMatchedMode
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
+ << "whenMatched" << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
<< "whenNotMatched"
<< "insert"));
ASSERT(createMergeStage(spec));
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
+ << "whenMatched" << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
<< "whenNotMatched"
<< "fail"));
ASSERT(createMergeStage(spec));
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
+ << "whenMatched" << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
<< "whenNotMatched"
<< "discard"));
ASSERT(createMergeStage(spec));
@@ -688,41 +670,33 @@ TEST_F(DocumentSourceMergeTest, CorrectlyHandlesWhenMatchedAndWhenNotMatchedMode
TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
auto let = BSON("foo"
<< "bar");
- auto spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "let"
- << let
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
- << "whenNotMatched"
- << "insert"));
+ auto spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "let" << let << "whenMatched"
+ << BSON_ARRAY(BSON("$project" << BSON("x" << 1))) << "whenNotMatched"
+ << "insert"));
ASSERT(createMergeStage(spec));
- spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "let"
- << let
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
- << "whenNotMatched"
- << "fail"));
+ spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "let" << let << "whenMatched"
+ << BSON_ARRAY(BSON("$project" << BSON("x" << 1))) << "whenNotMatched"
+ << "fail"));
ASSERT(createMergeStage(spec));
- spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "let"
- << let
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
- << "whenNotMatched"
- << "discard"));
+ spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "let" << let << "whenMatched"
+ << BSON_ARRAY(BSON("$project" << BSON("x" << 1))) << "whenNotMatched"
+ << "discard"));
ASSERT(createMergeStage(spec));
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "replace"
<< "whenNotMatched"
<< "insert"));
@@ -730,9 +704,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "replace"
<< "whenNotMatched"
<< "fail"));
@@ -740,9 +712,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "replace"
<< "whenNotMatched"
<< "discard"));
@@ -750,9 +720,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "merge"
<< "whenNotMatched"
<< "insert"));
@@ -760,9 +728,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "merge"
<< "whenNotMatched"
<< "fail"));
@@ -770,9 +736,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "merge"
<< "whenNotMatched"
<< "discard"));
@@ -780,9 +744,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "keepExisting"
<< "whenNotMatched"
<< "insert"));
@@ -790,9 +752,7 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << let
- << "whenMatched"
+ << "let" << let << "whenMatched"
<< "fail"
<< "whenNotMatched"
<< "insert"));
@@ -800,12 +760,12 @@ TEST_F(DocumentSourceMergeTest, LetVariablesCanOnlyBeUsedWithPipelineMode) {
}
TEST_F(DocumentSourceMergeTest, SerializeDefaultLetVariable) {
- auto spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "whenMatched"
- << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
- << "whenNotMatched"
- << "insert"));
+ auto spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "whenMatched" << BSON_ARRAY(BSON("$project" << BSON("x" << 1)))
+ << "whenNotMatched"
+ << "insert"));
auto mergeStage = createMergeStage(spec);
auto serialized = mergeStage->serialize().getDocument();
ASSERT_VALUE_EQ(serialized["$merge"]["let"],
@@ -826,11 +786,10 @@ TEST_F(DocumentSourceMergeTest, SerializeLetVariables) {
<< BSON("v1" << 10 << "v2"
<< "foo"
<< "v3"
- << BSON("x" << 1 << "y" << BSON("z"
- << "bar")))
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
+ << BSON("x" << 1 << "y"
+ << BSON("z"
+ << "bar")))
+ << "whenMatched" << pipeline << "whenNotMatched"
<< "insert"));
auto mergeStage = createMergeStage(spec);
ASSERT(mergeStage);
@@ -840,8 +799,9 @@ TEST_F(DocumentSourceMergeTest, SerializeLetVariables) {
Value(BSON("$const"
<< "foo")));
ASSERT_VALUE_EQ(serialized["$merge"]["let"]["v3"],
- Value(BSON("x" << BSON("$const" << 1) << "y" << BSON("z" << BSON("$const"
- << "bar")))));
+ Value(BSON("x" << BSON("$const" << 1) << "y"
+ << BSON("z" << BSON("$const"
+ << "bar")))));
ASSERT_VALUE_EQ(serialized["$merge"]["whenMatched"], Value(pipeline));
}
@@ -853,9 +813,7 @@ TEST_F(DocumentSourceMergeTest, SerializeLetArrayVariable) {
<< "target_collection"
<< "let"
<< BSON("v1" << BSON_ARRAY(1 << "2" << BSON("x" << 1 << "y" << 2)))
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
+ << "whenMatched" << pipeline << "whenNotMatched"
<< "insert"));
auto mergeStage = createMergeStage(spec);
ASSERT(mergeStage);
@@ -877,14 +835,11 @@ TEST_F(DocumentSourceMergeTest, SerializeLetArrayVariable) {
TEST_F(DocumentSourceMergeTest, SerializeNullLetVariablesAsDefault) {
auto pipeline = BSON_ARRAY(BSON("$project" << BSON("x"
<< "1")));
- auto spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "let"
- << BSONNULL
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
- << "insert"));
+ auto spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "let" << BSONNULL << "whenMatched" << pipeline << "whenNotMatched"
+ << "insert"));
auto mergeStage = createMergeStage(spec);
ASSERT(mergeStage);
auto serialized = mergeStage->serialize().getDocument();
@@ -897,14 +852,11 @@ TEST_F(DocumentSourceMergeTest, SerializeNullLetVariablesAsDefault) {
TEST_F(DocumentSourceMergeTest, SerializeEmptyLetVariables) {
auto pipeline = BSON_ARRAY(BSON("$project" << BSON("x"
<< "1")));
- auto spec = BSON("$merge" << BSON("into"
- << "target_collection"
- << "let"
- << BSONObj()
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
- << "insert"));
+ auto spec =
+ BSON("$merge" << BSON("into"
+ << "target_collection"
+ << "let" << BSONObj() << "whenMatched" << pipeline << "whenNotMatched"
+ << "insert"));
auto mergeStage = createMergeStage(spec);
ASSERT(mergeStage);
auto serialized = mergeStage->serialize().getDocument();
@@ -917,11 +869,7 @@ TEST_F(DocumentSourceMergeTest, OnlyObjectCanBeUsedAsLetVariables) {
<< "1")));
auto spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << 1
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
+ << "let" << 1 << "whenMatched" << pipeline << "whenNotMatched"
<< "insert"));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
@@ -929,18 +877,13 @@ TEST_F(DocumentSourceMergeTest, OnlyObjectCanBeUsedAsLetVariables) {
<< "target_collection"
<< "let"
<< "foo"
- << "whenMatched"
- << pipeline
- << "whenNotMatched"
+ << "whenMatched" << pipeline << "whenNotMatched"
<< "insert"));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
spec = BSON("$merge" << BSON("into"
<< "target_collection"
- << "let"
- << BSON_ARRAY(1 << "2")
- << "whenMatched"
- << pipeline
+ << "let" << BSON_ARRAY(1 << "2") << "whenMatched" << pipeline
<< "whenNotMatched"
<< "insert"));
ASSERT_THROWS_CODE(createMergeStage(spec), AssertionException, ErrorCodes::TypeMismatch);
diff --git a/src/mongo/db/pipeline/document_source_mock.cpp b/src/mongo/db/pipeline/document_source_mock.cpp
index f4efb3e731e..86e9ebda0ee 100644
--- a/src/mongo/db/pipeline/document_source_mock.cpp
+++ b/src/mongo/db/pipeline/document_source_mock.cpp
@@ -76,4 +76,4 @@ intrusive_ptr<DocumentSourceMock> DocumentSourceMock::createForTest(
}
return new DocumentSourceMock(std::move(results));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp
index ab340885632..f8478395328 100644
--- a/src/mongo/db/pipeline/document_source_out.cpp
+++ b/src/mongo/db/pipeline/document_source_out.cpp
@@ -106,8 +106,8 @@ void DocumentSourceOut::initialize() {
DBClientBase* conn = pExpCtx->mongoProcessInterface->directClient();
const auto& outputNs = getOutputNs();
- _tempNs = NamespaceString(str::stream() << outputNs.db() << ".tmp.agg_out."
- << aggOutCounter.addAndFetch(1));
+ _tempNs = NamespaceString(str::stream()
+ << outputNs.db() << ".tmp.agg_out." << aggOutCounter.addAndFetch(1));
// Save the original collection options and index specs so we can check they didn't change
// during computation.
@@ -123,8 +123,8 @@ void DocumentSourceOut::initialize() {
// We will write all results into a temporary collection, then rename the temporary
// collection to be the target collection once we are done.
- _tempNs = NamespaceString(str::stream() << outputNs.db() << ".tmp.agg_out."
- << aggOutCounter.addAndFetch(1));
+ _tempNs = NamespaceString(str::stream()
+ << outputNs.db() << ".tmp.agg_out." << aggOutCounter.addAndFetch(1));
// Create temp collection, copying options from the existing output collection if any.
{
diff --git a/src/mongo/db/pipeline/document_source_plan_cache_stats.cpp b/src/mongo/db/pipeline/document_source_plan_cache_stats.cpp
index d95043e66fe..dfa460c3f9f 100644
--- a/src/mongo/db/pipeline/document_source_plan_cache_stats.cpp
+++ b/src/mongo/db/pipeline/document_source_plan_cache_stats.cpp
@@ -41,14 +41,14 @@ REGISTER_DOCUMENT_SOURCE(planCacheStats,
boost::intrusive_ptr<DocumentSource> DocumentSourcePlanCacheStats::createFromBson(
BSONElement spec, const boost::intrusive_ptr<ExpressionContext>& pExpCtx) {
- uassert(
- ErrorCodes::FailedToParse,
- str::stream() << kStageName << " value must be an object. Found: " << typeName(spec.type()),
- spec.type() == BSONType::Object);
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << kStageName
+ << " value must be an object. Found: " << typeName(spec.type()),
+ spec.type() == BSONType::Object);
uassert(ErrorCodes::FailedToParse,
- str::stream() << kStageName << " parameters object must be empty. Found: "
- << typeName(spec.type()),
+ str::stream() << kStageName
+ << " parameters object must be empty. Found: " << typeName(spec.type()),
spec.embeddedObject().isEmpty());
uassert(50932,
diff --git a/src/mongo/db/pipeline/document_source_plan_cache_stats_test.cpp b/src/mongo/db/pipeline/document_source_plan_cache_stats_test.cpp
index 3eec42538f2..6980b400972 100644
--- a/src/mongo/db/pipeline/document_source_plan_cache_stats_test.cpp
+++ b/src/mongo/db/pipeline/document_source_plan_cache_stats_test.cpp
@@ -159,8 +159,7 @@ TEST_F(DocumentSourcePlanCacheStatsTest, ReturnsOnlyMatchingStatsAfterAbsorbingM
<< "baz"),
BSON("foo"
<< "bar"
- << "match"
- << true)};
+ << "match" << true)};
getExpCtx()->mongoProcessInterface =
std::make_shared<PlanCacheStatsMongoProcessInterface>(stats);
diff --git a/src/mongo/db/pipeline/document_source_queue.cpp b/src/mongo/db/pipeline/document_source_queue.cpp
index 80559de1a71..47a77709363 100644
--- a/src/mongo/db/pipeline/document_source_queue.cpp
+++ b/src/mongo/db/pipeline/document_source_queue.cpp
@@ -55,4 +55,4 @@ DocumentSource::GetNextResult DocumentSourceQueue::getNext() {
_queue.pop_front();
return next;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_redact.cpp b/src/mongo/db/pipeline/document_source_redact.cpp
index 3ff60410a95..7afc1eea75a 100644
--- a/src/mongo/db/pipeline/document_source_redact.cpp
+++ b/src/mongo/db/pipeline/document_source_redact.cpp
@@ -161,8 +161,7 @@ boost::optional<Document> DocumentSourceRedact::redactObject(const Document& roo
uasserted(17053,
str::stream() << "$redact's expression should not return anything "
<< "aside from the variables $$KEEP, $$DESCEND, and "
- << "$$PRUNE, but returned "
- << expressionResult.toString());
+ << "$$PRUNE, but returned " << expressionResult.toString());
}
}
@@ -196,4 +195,4 @@ intrusive_ptr<DocumentSource> DocumentSourceRedact::createFromBson(
return source;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_replace_root.cpp b/src/mongo/db/pipeline/document_source_replace_root.cpp
index deefe509bb7..0d144bdc368 100644
--- a/src/mongo/db/pipeline/document_source_replace_root.cpp
+++ b/src/mongo/db/pipeline/document_source_replace_root.cpp
@@ -50,11 +50,8 @@ Document ReplaceRootTransformation::applyTransformation(const Document& input) {
uassert(40228,
str::stream()
<< "'newRoot' expression must evaluate to an object, but resulting value was: "
- << newRoot.toString()
- << ". Type of resulting value: '"
- << typeName(newRoot.getType())
- << "'. Input document: "
- << input.toString(),
+ << newRoot.toString() << ". Type of resulting value: '"
+ << typeName(newRoot.getType()) << "'. Input document: " << input.toString(),
newRoot.getType() == BSONType::Object);
// Turn the value into a document.
@@ -84,8 +81,7 @@ intrusive_ptr<DocumentSource> DocumentSourceReplaceRoot::createFromBson(
<< stageName);
uassert(40229,
str::stream() << "expected an object as specification for " << kStageName
- << " stage, got "
- << typeName(elem.type()),
+ << " stage, got " << typeName(elem.type()),
elem.type() == Object);
auto spec =
diff --git a/src/mongo/db/pipeline/document_source_replace_root_test.cpp b/src/mongo/db/pipeline/document_source_replace_root_test.cpp
index 71c356e98f2..cb71448fa7b 100644
--- a/src/mongo/db/pipeline/document_source_replace_root_test.cpp
+++ b/src/mongo/db/pipeline/document_source_replace_root_test.cpp
@@ -336,14 +336,12 @@ TEST_F(ReplaceRootSpec, CreationRequiresObjectSpecification) {
TEST_F(ReplaceRootSpec, OnlyValidOptionInObjectSpecIsNewRoot) {
ASSERT_THROWS_CODE(createReplaceRoot(createSpec(BSON("newRoot"
<< "$a"
- << "root"
- << 2))),
+ << "root" << 2))),
AssertionException,
40415);
ASSERT_THROWS_CODE(createReplaceRoot(createSpec(BSON("newRoot"
<< "$a"
- << "path"
- << 2))),
+ << "path" << 2))),
AssertionException,
40415);
ASSERT_THROWS_CODE(createReplaceRoot(createSpec(BSON("path"
diff --git a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
index ad84c24e9aa..0f82e7466de 100644
--- a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
@@ -116,9 +116,7 @@ DocumentSource::GetNextResult DocumentSourceSampleFromRandomCursor::getNextNonDu
<< _idField
<< " field in order to de-duplicate results, but encountered a "
"document without a "
- << _idField
- << " field: "
- << nextInput.getDocument().toString(),
+ << _idField << " field: " << nextInput.getDocument().toString(),
!idField.missing());
if (_seenDocs.insert(std::move(idField)).second) {
@@ -163,4 +161,4 @@ intrusive_ptr<DocumentSourceSampleFromRandomCursor> DocumentSourceSampleFromRand
new DocumentSourceSampleFromRandomCursor(expCtx, size, idField, nDocsInCollection));
return source;
}
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_sequential_document_cache.cpp b/src/mongo/db/pipeline/document_source_sequential_document_cache.cpp
index 61814540336..15627744247 100644
--- a/src/mongo/db/pipeline/document_source_sequential_document_cache.cpp
+++ b/src/mongo/db/pipeline/document_source_sequential_document_cache.cpp
@@ -144,12 +144,12 @@ Value DocumentSourceSequentialDocumentCache::serialize(
{kStageName,
Document{{"maxSizeBytes"_sd, Value(static_cast<long long>(_cache->maxSizeBytes()))},
{"status"_sd,
- _cache->isBuilding() ? "kBuilding"_sd : _cache->isServing()
- ? "kServing"_sd
- : "kAbandoned"_sd}}}});
+ _cache->isBuilding()
+ ? "kBuilding"_sd
+ : _cache->isServing() ? "kServing"_sd : "kAbandoned"_sd}}}});
}
return Value();
}
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_sequential_document_cache.h b/src/mongo/db/pipeline/document_source_sequential_document_cache.h
index 19119a1a0f3..0031ca8694b 100644
--- a/src/mongo/db/pipeline/document_source_sequential_document_cache.h
+++ b/src/mongo/db/pipeline/document_source_sequential_document_cache.h
@@ -99,4 +99,4 @@ private:
bool _hasOptimizedPos = false;
};
-} // namesace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_skip.cpp b/src/mongo/db/pipeline/document_source_skip.cpp
index 2eead90aa3f..143a796cdf6 100644
--- a/src/mongo/db/pipeline/document_source_skip.cpp
+++ b/src/mongo/db/pipeline/document_source_skip.cpp
@@ -116,4 +116,4 @@ intrusive_ptr<DocumentSource> DocumentSourceSkip::createFromBson(
return DocumentSourceSkip::create(pExpCtx, nToSkip);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_unwind.cpp b/src/mongo/db/pipeline/document_source_unwind.cpp
index 576541c207b..870394a277c 100644
--- a/src/mongo/db/pipeline/document_source_unwind.cpp
+++ b/src/mongo/db/pipeline/document_source_unwind.cpp
@@ -286,4 +286,4 @@ intrusive_ptr<DocumentSource> DocumentSourceUnwind::createFromBson(
string pathString(Expression::removeFieldPrefix(prefixedPathString));
return DocumentSourceUnwind::create(pExpCtx, pathString, preserveNullAndEmptyArrays, indexPath);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_unwind_test.cpp b/src/mongo/db/pipeline/document_source_unwind_test.cpp
index ec8bb3af24c..2c024fb6f5f 100644
--- a/src/mongo/db/pipeline/document_source_unwind_test.cpp
+++ b/src/mongo/db/pipeline/document_source_unwind_test.cpp
@@ -164,8 +164,7 @@ private:
void createUnwind(bool preserveNullAndEmptyArrays, bool includeArrayIndex) {
auto specObj =
DOC("$unwind" << DOC("path" << unwindFieldPath() << "preserveNullAndEmptyArrays"
- << preserveNullAndEmptyArrays
- << "includeArrayIndex"
+ << preserveNullAndEmptyArrays << "includeArrayIndex"
<< (includeArrayIndex ? Value(indexPath()) : Value())));
_unwind = static_cast<DocumentSourceUnwind*>(
DocumentSourceUnwind::createFromBson(specObj.toBson().firstElement(), ctx()).get());
@@ -475,8 +474,9 @@ class SeveralMoreDocuments : public CheckResultsBase {
deque<DocumentSource::GetNextResult> inputData() override {
return {DOC("_id" << 0 << "a" << BSONNULL),
DOC("_id" << 1),
- DOC("_id" << 2 << "a" << DOC_ARRAY("a"_sd
- << "b"_sd)),
+ DOC("_id" << 2 << "a"
+ << DOC_ARRAY("a"_sd
+ << "b"_sd)),
DOC("_id" << 3),
DOC("_id" << 4 << "a" << DOC_ARRAY(1 << 2 << 3)),
DOC("_id" << 5 << "a" << DOC_ARRAY(4 << 5 << 6)),
@@ -764,8 +764,7 @@ TEST_F(UnwindStageTest, ShouldRejectNonDollarPrefixedPath) {
TEST_F(UnwindStageTest, ShouldRejectNonBoolPreserveNullAndEmptyArrays) {
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << 2))),
+ << "preserveNullAndEmptyArrays" << 2))),
AssertionException,
28809);
}
@@ -773,8 +772,7 @@ TEST_F(UnwindStageTest, ShouldRejectNonBoolPreserveNullAndEmptyArrays) {
TEST_F(UnwindStageTest, ShouldRejectNonStringIncludeArrayIndex) {
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "includeArrayIndex"
- << 2))),
+ << "includeArrayIndex" << 2))),
AssertionException,
28810);
}
@@ -806,16 +804,13 @@ TEST_F(UnwindStageTest, ShoudlRejectDollarPrefixedIncludeArrayIndex) {
TEST_F(UnwindStageTest, ShouldRejectUnrecognizedOption) {
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << true
- << "foo"
- << 3))),
+ << "preserveNullAndEmptyArrays" << true
+ << "foo" << 3))),
AssertionException,
28811);
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "foo"
- << 3))),
+ << "foo" << 3))),
AssertionException,
28811);
}
diff --git a/src/mongo/db/pipeline/document_source_writer.h b/src/mongo/db/pipeline/document_source_writer.h
index fd10532d469..ada2fc72a53 100644
--- a/src/mongo/db/pipeline/document_source_writer.h
+++ b/src/mongo/db/pipeline/document_source_writer.h
@@ -193,7 +193,7 @@ DocumentSource::GetNextResult DocumentSourceWriter<B>::getNext() {
waitWhileFailPointEnabled();
auto doc = nextInput.releaseDocument();
- auto[obj, objSize] = makeBatchObject(std::move(doc));
+ auto [obj, objSize] = makeBatchObject(std::move(doc));
bufferedBytes += objSize;
if (!batch.empty() &&
diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp
index b965b86f244..e8e8ce3f0e3 100644
--- a/src/mongo/db/pipeline/expression.cpp
+++ b/src/mongo/db/pipeline/expression.cpp
@@ -112,7 +112,7 @@ struct ParserRegistration {
};
StringMap<ParserRegistration> parserMap;
-}
+} // namespace
void Expression::registerExpression(
string key,
@@ -145,17 +145,16 @@ intrusive_ptr<Expression> Expression::parseExpression(
// Make sure we are allowed to use this expression under the current feature compatibility
// version.
auto& entry = it->second;
- uassert(
- ErrorCodes::QueryFeatureNotAllowed,
- // TODO SERVER-31968 we would like to include the current version and the required minimum
- // version in this error message, but using FeatureCompatibilityVersion::toString() would
- // introduce a dependency cycle.
- str::stream() << opName
- << " is not allowed in the current feature compatibility version. See "
- << feature_compatibility_version_documentation::kCompatibilityLink
- << " for more information.",
- !expCtx->maxFeatureCompatibilityVersion || !entry.requiredMinVersion ||
- (*entry.requiredMinVersion <= *expCtx->maxFeatureCompatibilityVersion));
+ uassert(ErrorCodes::QueryFeatureNotAllowed,
+ // TODO SERVER-31968 we would like to include the current version and the required
+ // minimum version in this error message, but using
+ // FeatureCompatibilityVersion::toString() would introduce a dependency cycle.
+ str::stream() << opName
+ << " is not allowed in the current feature compatibility version. See "
+ << feature_compatibility_version_documentation::kCompatibilityLink
+ << " for more information.",
+ !expCtx->maxFeatureCompatibilityVersion || !entry.requiredMinVersion ||
+ (*entry.requiredMinVersion <= *expCtx->maxFeatureCompatibilityVersion));
return entry.parser(expCtx, obj.firstElement(), vps);
}
@@ -522,13 +521,11 @@ Value ExpressionArrayElemAt::evaluate(const Document& root, Variables* variables
array.isArray());
uassert(28690,
str::stream() << getOpName() << "'s second argument must be a numeric value,"
- << " but is "
- << typeName(indexArg.getType()),
+ << " but is " << typeName(indexArg.getType()),
indexArg.numeric());
uassert(28691,
str::stream() << getOpName() << "'s second argument must be representable as"
- << " a 32-bit integer: "
- << indexArg.coerceToDouble(),
+ << " a 32-bit integer: " << indexArg.coerceToDouble(),
indexArg.integral());
long long i = indexArg.coerceToLong();
@@ -808,7 +805,7 @@ static const CmpLookup cmpLookup[7] = {
// CMP is special. Only name is used.
/* CMP */ {{false, false, false}, ExpressionCompare::CMP, "$cmp"},
};
-}
+} // namespace
Value ExpressionCompare::evaluate(const Document& root, Variables* variables) const {
Value pLeft(_children[0]->evaluate(root, variables));
@@ -1063,8 +1060,8 @@ intrusive_ptr<Expression> ExpressionDateFromParts::parse(
timeZoneElem = arg;
} else {
uasserted(40518,
- str::stream() << "Unrecognized argument to $dateFromParts: "
- << arg.fieldName());
+ str::stream()
+ << "Unrecognized argument to $dateFromParts: " << arg.fieldName());
}
}
@@ -1222,8 +1219,7 @@ bool ExpressionDateFromParts::evaluateNumberWithDefault(const Document& root,
uassert(40515,
str::stream() << "'" << fieldName << "' must evaluate to an integer, found "
- << typeName(fieldValue.getType())
- << " with value "
+ << typeName(fieldValue.getType()) << " with value "
<< fieldValue.toString(),
fieldValue.integral64Bit());
@@ -1241,17 +1237,12 @@ bool ExpressionDateFromParts::evaluateNumberWithDefaultAndBounds(const Document&
bool result =
evaluateNumberWithDefault(root, field, fieldName, defaultValue, returnValue, variables);
- uassert(31034,
- str::stream() << "'" << fieldName << "'"
- << " must evaluate to a value in the range ["
- << kMinValueForDatePart
- << ", "
- << kMaxValueForDatePart
- << "]; value "
- << *returnValue
- << " is not in range",
- !result ||
- (*returnValue >= kMinValueForDatePart && *returnValue <= kMaxValueForDatePart));
+ uassert(
+ 31034,
+ str::stream() << "'" << fieldName << "'"
+ << " must evaluate to a value in the range [" << kMinValueForDatePart << ", "
+ << kMaxValueForDatePart << "]; value " << *returnValue << " is not in range",
+ !result || (*returnValue >= kMinValueForDatePart && *returnValue <= kMaxValueForDatePart));
return result;
}
@@ -1289,9 +1280,7 @@ Value ExpressionDateFromParts::evaluate(const Document& root, Variables* variabl
uassert(40523,
str::stream() << "'year' must evaluate to an integer in the range " << 0 << " to "
- << 9999
- << ", found "
- << year,
+ << 9999 << ", found " << year,
year >= 0 && year <= 9999);
return Value(
@@ -1313,10 +1302,7 @@ Value ExpressionDateFromParts::evaluate(const Document& root, Variables* variabl
uassert(31095,
str::stream() << "'isoWeekYear' must evaluate to an integer in the range " << 0
- << " to "
- << 9999
- << ", found "
- << isoWeekYear,
+ << " to " << 9999 << ", found " << isoWeekYear,
isoWeekYear >= 0 && isoWeekYear <= 9999);
return Value(timeZone->createFromIso8601DateParts(
@@ -1393,8 +1379,8 @@ intrusive_ptr<Expression> ExpressionDateFromString::parse(
onErrorElem = arg;
} else {
uasserted(40541,
- str::stream() << "Unrecognized argument to $dateFromString: "
- << arg.fieldName());
+ str::stream()
+ << "Unrecognized argument to $dateFromString: " << arg.fieldName());
}
}
@@ -1476,8 +1462,7 @@ Value ExpressionDateFromString::evaluate(const Document& root, Variables* variab
if (!formatValue.nullish()) {
uassert(40684,
str::stream() << "$dateFromString requires that 'format' be a string, found: "
- << typeName(formatValue.getType())
- << " with value "
+ << typeName(formatValue.getType()) << " with value "
<< formatValue.toString(),
formatValue.getType() == BSONType::String);
@@ -1498,8 +1483,7 @@ Value ExpressionDateFromString::evaluate(const Document& root, Variables* variab
try {
uassert(ErrorCodes::ConversionFailure,
str::stream() << "$dateFromString requires that 'dateString' be a string, found: "
- << typeName(dateString.getType())
- << " with value "
+ << typeName(dateString.getType()) << " with value "
<< dateString.toString(),
dateString.getType() == BSONType::String);
@@ -1575,8 +1559,8 @@ intrusive_ptr<Expression> ExpressionDateToParts::parse(
isoDateElem = arg;
} else {
uasserted(40520,
- str::stream() << "Unrecognized argument to $dateToParts: "
- << arg.fieldName());
+ str::stream()
+ << "Unrecognized argument to $dateToParts: " << arg.fieldName());
}
}
@@ -1723,8 +1707,8 @@ intrusive_ptr<Expression> ExpressionDateToString::parse(
onNullElem = arg;
} else {
uasserted(18534,
- str::stream() << "Unrecognized argument to $dateToString: "
- << arg.fieldName());
+ str::stream()
+ << "Unrecognized argument to $dateToString: " << arg.fieldName());
}
}
@@ -1794,8 +1778,7 @@ Value ExpressionDateToString::evaluate(const Document& root, Variables* variable
if (!formatValue.nullish()) {
uassert(18533,
str::stream() << "$dateToString requires that 'format' be a string, found: "
- << typeName(formatValue.getType())
- << " with value "
+ << typeName(formatValue.getType()) << " with value "
<< formatValue.toString(),
formatValue.getType() == BSONType::String);
@@ -1869,9 +1852,7 @@ Value ExpressionDivide::evaluate(const Document& root, Variables* variables) con
} else {
uasserted(16609,
str::stream() << "$divide only supports numeric types, not "
- << typeName(lhs.getType())
- << " and "
- << typeName(rhs.getType()));
+ << typeName(lhs.getType()) << " and " << typeName(rhs.getType()));
}
}
@@ -2028,8 +2009,7 @@ intrusive_ptr<ExpressionFieldPath> ExpressionFieldPath::parse(
if (varId == Variables::kNowId || varId == Variables::kClusterTimeId) {
uassert(ErrorCodes::QueryFeatureNotAllowed,
str::stream()
- << "'$$"
- << varName
+ << "'$$" << varName
<< "' is not allowed in the current feature compatibility version. See "
<< feature_compatibility_version_documentation::kCompatibilityLink
<< " for more information.",
@@ -2248,9 +2228,8 @@ intrusive_ptr<Expression> ExpressionFilter::optimize() {
}
Value ExpressionFilter::serialize(bool explain) const {
- return Value(
- DOC("$filter" << DOC("input" << _input->serialize(explain) << "as" << _varName << "cond"
- << _filter->serialize(explain))));
+ return Value(DOC("$filter" << DOC("input" << _input->serialize(explain) << "as" << _varName
+ << "cond" << _filter->serialize(explain))));
}
Value ExpressionFilter::evaluate(const Document& root, Variables* variables) const {
@@ -2667,9 +2646,7 @@ Value ExpressionMod::evaluate(const Document& root, Variables* variables) const
} else {
uasserted(16611,
str::stream() << "$mod only supports numeric types, not "
- << typeName(lhs.getType())
- << " and "
- << typeName(rhs.getType()));
+ << typeName(lhs.getType()) << " and " << typeName(rhs.getType()));
}
}
@@ -2789,15 +2766,12 @@ void uassertIfNotIntegralAndNonNegative(Value val,
StringData argumentName) {
uassert(40096,
str::stream() << expressionName << "requires an integral " << argumentName
- << ", found a value of type: "
- << typeName(val.getType())
- << ", with value: "
- << val.toString(),
+ << ", found a value of type: " << typeName(val.getType())
+ << ", with value: " << val.toString(),
val.integral());
uassert(40097,
str::stream() << expressionName << " requires a nonnegative " << argumentName
- << ", found: "
- << val.toString(),
+ << ", found: " << val.toString(),
val.coerceToInt() >= 0);
}
@@ -2907,8 +2881,7 @@ intrusive_ptr<Expression> ExpressionIndexOfArray::optimize() {
}
uassert(50809,
str::stream() << "First operand of $indexOfArray must be an array. First "
- << "argument is of type: "
- << typeName(valueArray.getType()),
+ << "argument is of type: " << typeName(valueArray.getType()),
valueArray.isArray());
auto arr = valueArray.getArray();
@@ -3464,7 +3437,7 @@ bool representableAsLong(long long base, long long exp) {
return base >= kBaseLimits[exp].min && base <= kBaseLimits[exp].max;
};
-}
+} // namespace
/* ----------------------- ExpressionPow ---------------------------- */
@@ -3778,7 +3751,7 @@ ValueSet arrayToSet(const Value& val, const ValueComparator& valueComparator) {
valueSet.insert(array.begin(), array.end());
return valueSet;
}
-}
+} // namespace
/* ----------------------- ExpressionSetDifference ---------------------------- */
@@ -3792,13 +3765,11 @@ Value ExpressionSetDifference::evaluate(const Document& root, Variables* variabl
uassert(17048,
str::stream() << "both operands of $setDifference must be arrays. First "
- << "argument is of type: "
- << typeName(lhs.getType()),
+ << "argument is of type: " << typeName(lhs.getType()),
lhs.isArray());
uassert(17049,
str::stream() << "both operands of $setDifference must be arrays. Second "
- << "argument is of type: "
- << typeName(rhs.getType()),
+ << "argument is of type: " << typeName(rhs.getType()),
rhs.isArray());
ValueSet rhsSet = arrayToSet(rhs, getExpressionContext()->getValueComparator());
@@ -3837,8 +3808,7 @@ Value ExpressionSetEquals::evaluate(const Document& root, Variables* variables)
const Value nextEntry = _children[i]->evaluate(root, variables);
uassert(17044,
str::stream() << "All operands of $setEquals must be arrays. One "
- << "argument is of type: "
- << typeName(nextEntry.getType()),
+ << "argument is of type: " << typeName(nextEntry.getType()),
nextEntry.isArray());
if (i == 0) {
@@ -3876,8 +3846,7 @@ Value ExpressionSetIntersection::evaluate(const Document& root, Variables* varia
}
uassert(17047,
str::stream() << "All operands of $setIntersection must be arrays. One "
- << "argument is of type: "
- << typeName(nextEntry.getType()),
+ << "argument is of type: " << typeName(nextEntry.getType()),
nextEntry.isArray());
if (i == 0) {
@@ -3924,7 +3893,7 @@ Value setIsSubsetHelper(const vector<Value>& lhs, const ValueSet& rhs) {
}
return Value(true);
}
-}
+} // namespace
Value ExpressionSetIsSubset::evaluate(const Document& root, Variables* variables) const {
const Value lhs = _children[0]->evaluate(root, variables);
@@ -3932,13 +3901,11 @@ Value ExpressionSetIsSubset::evaluate(const Document& root, Variables* variables
uassert(17046,
str::stream() << "both operands of $setIsSubset must be arrays. First "
- << "argument is of type: "
- << typeName(lhs.getType()),
+ << "argument is of type: " << typeName(lhs.getType()),
lhs.isArray());
uassert(17042,
str::stream() << "both operands of $setIsSubset must be arrays. Second "
- << "argument is of type: "
- << typeName(rhs.getType()),
+ << "argument is of type: " << typeName(rhs.getType()),
rhs.isArray());
return setIsSubsetHelper(lhs.getArray(),
@@ -3966,8 +3933,7 @@ public:
uassert(17310,
str::stream() << "both operands of $setIsSubset must be arrays. First "
- << "argument is of type: "
- << typeName(lhs.getType()),
+ << "argument is of type: " << typeName(lhs.getType()),
lhs.isArray());
return setIsSubsetHelper(lhs.getArray(), _cachedRhsSet);
@@ -3989,8 +3955,7 @@ intrusive_ptr<Expression> ExpressionSetIsSubset::optimize() {
const Value rhs = ec->getValue();
uassert(17311,
str::stream() << "both operands of $setIsSubset must be arrays. Second "
- << "argument is of type: "
- << typeName(rhs.getType()),
+ << "argument is of type: " << typeName(rhs.getType()),
rhs.isArray());
intrusive_ptr<Expression> optimizedWithConstant(
@@ -4019,8 +3984,7 @@ Value ExpressionSetUnion::evaluate(const Document& root, Variables* variables) c
}
uassert(17043,
str::stream() << "All operands of $setUnion must be arrays. One argument"
- << " is of type: "
- << typeName(newEntries.getType()),
+ << " is of type: " << typeName(newEntries.getType()),
newEntries.isArray());
unionedSet.insert(newEntries.getArray().begin(), newEntries.getArray().end());
@@ -4060,18 +4024,15 @@ Value ExpressionSlice::evaluate(const Document& root, Variables* variables) cons
uassert(28724,
str::stream() << "First argument to $slice must be an array, but is"
- << " of type: "
- << typeName(arrayVal.getType()),
+ << " of type: " << typeName(arrayVal.getType()),
arrayVal.isArray());
uassert(28725,
str::stream() << "Second argument to $slice must be a numeric value,"
- << " but is of type: "
- << typeName(arg2.getType()),
+ << " but is of type: " << typeName(arg2.getType()),
arg2.numeric());
uassert(28726,
str::stream() << "Second argument to $slice can't be represented as"
- << " a 32-bit integer: "
- << arg2.coerceToDouble(),
+ << " a 32-bit integer: " << arg2.coerceToDouble(),
arg2.integral());
const auto& array = arrayVal.getArray();
@@ -4111,13 +4072,11 @@ Value ExpressionSlice::evaluate(const Document& root, Variables* variables) cons
uassert(28727,
str::stream() << "Third argument to $slice must be numeric, but "
- << "is of type: "
- << typeName(countVal.getType()),
+ << "is of type: " << typeName(countVal.getType()),
countVal.numeric());
uassert(28728,
str::stream() << "Third argument to $slice can't be represented"
- << " as a 32-bit integer: "
- << countVal.coerceToDouble(),
+ << " as a 32-bit integer: " << countVal.coerceToDouble(),
countVal.integral());
uassert(28729,
str::stream() << "Third argument to $slice must be positive: "
@@ -4266,23 +4225,20 @@ Value ExpressionSubstrBytes::evaluate(const Document& root, Variables* variables
uassert(16034,
str::stream() << getOpName()
<< ": starting index must be a numeric type (is BSON type "
- << typeName(pLower.getType())
- << ")",
+ << typeName(pLower.getType()) << ")",
(pLower.getType() == NumberInt || pLower.getType() == NumberLong ||
pLower.getType() == NumberDouble));
uassert(16035,
str::stream() << getOpName() << ": length must be a numeric type (is BSON type "
- << typeName(pLength.getType())
- << ")",
+ << typeName(pLength.getType()) << ")",
(pLength.getType() == NumberInt || pLength.getType() == NumberLong ||
pLength.getType() == NumberDouble));
const long long signedLower = pLower.coerceToLong();
uassert(50752,
- str::stream() << getOpName() << ": starting index must be non-negative (got: "
- << signedLower
- << ")",
+ str::stream() << getOpName()
+ << ": starting index must be non-negative (got: " << signedLower << ")",
signedLower >= 0);
const string::size_type lower = static_cast<string::size_type>(signedLower);
@@ -4330,8 +4286,7 @@ Value ExpressionSubstrCP::evaluate(const Document& root, Variables* variables) c
std::string str = inputVal.coerceToString();
uassert(34450,
str::stream() << getOpName() << ": starting index must be a numeric type (is BSON type "
- << typeName(lowerVal.getType())
- << ")",
+ << typeName(lowerVal.getType()) << ")",
lowerVal.numeric());
uassert(34451,
str::stream() << getOpName()
@@ -4340,8 +4295,7 @@ Value ExpressionSubstrCP::evaluate(const Document& root, Variables* variables) c
lowerVal.integral());
uassert(34452,
str::stream() << getOpName() << ": length must be a numeric type (is BSON type "
- << typeName(lengthVal.getType())
- << ")",
+ << typeName(lengthVal.getType()) << ")",
lengthVal.numeric());
uassert(34453,
str::stream() << getOpName()
@@ -4476,8 +4430,8 @@ Value ExpressionSubtract::evaluate(const Document& root, Variables* variables) c
return Value(lhs.getDate() - Milliseconds(rhs.coerceToLong()));
} else {
uasserted(16613,
- str::stream() << "cant $subtract a " << typeName(rhs.getType())
- << " from a Date");
+ str::stream()
+ << "cant $subtract a " << typeName(rhs.getType()) << " from a Date");
}
} else {
uasserted(16556,
@@ -4603,7 +4557,7 @@ boost::intrusive_ptr<Expression> ExpressionSwitch::optimize() {
_default = _default->optimize();
}
- for (auto && [ switchCase, switchThen ] : _branches) {
+ for (auto&& [switchCase, switchThen] : _branches) {
switchCase = switchCase->optimize();
switchThen = switchThen->optimize();
}
@@ -4760,8 +4714,7 @@ std::vector<StringData> extractCodePointsFromChars(StringData utf8String,
}
uassert(50697,
str::stream()
- << "Failed to parse \"chars\" argument to "
- << expressionName
+ << "Failed to parse \"chars\" argument to " << expressionName
<< ": Detected invalid UTF-8. Missing expected continuation byte at end of string.",
i <= utf8String.size());
return codePoints;
@@ -4775,10 +4728,8 @@ Value ExpressionTrim::evaluate(const Document& root, Variables* variables) const
}
uassert(50699,
str::stream() << _name << " requires its input to be a string, got "
- << unvalidatedInput.toString()
- << " (of type "
- << typeName(unvalidatedInput.getType())
- << ") instead.",
+ << unvalidatedInput.toString() << " (of type "
+ << typeName(unvalidatedInput.getType()) << ") instead.",
unvalidatedInput.getType() == BSONType::String);
const StringData input(unvalidatedInput.getStringData());
@@ -4791,10 +4742,8 @@ Value ExpressionTrim::evaluate(const Document& root, Variables* variables) const
}
uassert(50700,
str::stream() << _name << " requires 'chars' to be a string, got "
- << unvalidatedUserChars.toString()
- << " (of type "
- << typeName(unvalidatedUserChars.getType())
- << ") instead.",
+ << unvalidatedUserChars.toString() << " (of type "
+ << typeName(unvalidatedUserChars.getType()) << ") instead.",
unvalidatedUserChars.getType() == BSONType::String);
return Value(
@@ -4893,11 +4842,8 @@ void assertFlagsValid(uint32_t flags,
long long precisionValue) {
uassert(51080,
str::stream() << "invalid conversion from Decimal128 result in " << opName
- << " resulting from arguments: ["
- << numericValue
- << ", "
- << precisionValue
- << "]",
+ << " resulting from arguments: [" << numericValue << ", "
+ << precisionValue << "]",
!Decimal128::hasFlag(flags, Decimal128::kInvalid));
}
@@ -4930,8 +4876,7 @@ static Value evaluateRoundOrTrunc(const Document& root,
precisionArg.integral());
uassert(51083,
str::stream() << "cannot apply " << opName << " with precision value "
- << precisionValue
- << " value must be in [-20, 100]",
+ << precisionValue << " value must be in [-20, 100]",
minPrecision <= precisionValue && precisionValue <= maxPrecision);
}
@@ -5200,8 +5145,7 @@ Value ExpressionZip::serialize(bool explain) const {
}
return Value(DOC("$zip" << DOC("inputs" << Value(serializedInput) << "defaults"
- << Value(serializedDefaults)
- << "useLongestLength"
+ << Value(serializedDefaults) << "useLongestLength"
<< serializedUseLongestLength)));
}
@@ -5236,9 +5180,10 @@ public:
//
table[BSONType::NumberDouble][BSONType::NumberDouble] = &performIdentityConversion;
table[BSONType::NumberDouble][BSONType::String] = &performFormatDouble;
- table[BSONType::NumberDouble]
- [BSONType::Bool] = [](const boost::intrusive_ptr<ExpressionContext>& expCtx,
- Value inputValue) { return Value(inputValue.coerceToBool()); };
+ table[BSONType::NumberDouble][BSONType::Bool] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(inputValue.coerceToBool());
+ };
table[BSONType::NumberDouble][BSONType::Date] = &performCastNumberToDate;
table[BSONType::NumberDouble][BSONType::NumberInt] = &performCastDoubleToInt;
table[BSONType::NumberDouble][BSONType::NumberLong] = &performCastDoubleToLong;
@@ -5254,11 +5199,11 @@ public:
table[BSONType::String][BSONType::String] = &performIdentityConversion;
table[BSONType::String][BSONType::jstOID] = &parseStringToOID;
table[BSONType::String][BSONType::Bool] = &performConvertToTrue;
- table[BSONType::String][BSONType::Date] = [](
- const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
- return Value(expCtx->timeZoneDatabase->fromString(inputValue.getStringData(),
- mongo::TimeZoneDatabase::utcZone()));
- };
+ table[BSONType::String][BSONType::Date] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(expCtx->timeZoneDatabase->fromString(
+ inputValue.getStringData(), mongo::TimeZoneDatabase::utcZone()));
+ };
table[BSONType::String][BSONType::NumberInt] = &parseStringToNumber<int, 10>;
table[BSONType::String][BSONType::NumberLong] = &parseStringToNumber<long long, 10>;
table[BSONType::String][BSONType::NumberDecimal] = &parseStringToNumber<Decimal128, 0>;
@@ -5315,9 +5260,10 @@ public:
inputValue.getDate());
return Value(dateString);
};
- table[BSONType::Date]
- [BSONType::Bool] = [](const boost::intrusive_ptr<ExpressionContext>& expCtx,
- Value inputValue) { return Value(inputValue.coerceToBool()); };
+ table[BSONType::Date][BSONType::Bool] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(inputValue.coerceToBool());
+ };
table[BSONType::Date][BSONType::Date] = &performIdentityConversion;
table[BSONType::Date][BSONType::NumberLong] =
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
@@ -5340,9 +5286,10 @@ public:
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
return Value(static_cast<std::string>(str::stream() << inputValue.getInt()));
};
- table[BSONType::NumberInt]
- [BSONType::Bool] = [](const boost::intrusive_ptr<ExpressionContext>& expCtx,
- Value inputValue) { return Value(inputValue.coerceToBool()); };
+ table[BSONType::NumberInt][BSONType::Bool] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(inputValue.coerceToBool());
+ };
table[BSONType::NumberInt][BSONType::NumberInt] = &performIdentityConversion;
table[BSONType::NumberInt][BSONType::NumberLong] =
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
@@ -5364,9 +5311,10 @@ public:
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
return Value(static_cast<std::string>(str::stream() << inputValue.getLong()));
};
- table[BSONType::NumberLong]
- [BSONType::Bool] = [](const boost::intrusive_ptr<ExpressionContext>& expCtx,
- Value inputValue) { return Value(inputValue.coerceToBool()); };
+ table[BSONType::NumberLong][BSONType::Bool] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(inputValue.coerceToBool());
+ };
table[BSONType::NumberLong][BSONType::Date] = &performCastNumberToDate;
table[BSONType::NumberLong][BSONType::NumberInt] = &performCastLongToInt;
table[BSONType::NumberLong][BSONType::NumberLong] = &performIdentityConversion;
@@ -5383,9 +5331,10 @@ public:
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
return Value(inputValue.getDecimal().toString());
};
- table[BSONType::NumberDecimal]
- [BSONType::Bool] = [](const boost::intrusive_ptr<ExpressionContext>& expCtx,
- Value inputValue) { return Value(inputValue.coerceToBool()); };
+ table[BSONType::NumberDecimal][BSONType::Bool] =
+ [](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
+ return Value(inputValue.coerceToBool());
+ };
table[BSONType::NumberDecimal][BSONType::Date] = &performCastNumberToDate;
table[BSONType::NumberDecimal][BSONType::NumberInt] =
[](const boost::intrusive_ptr<ExpressionContext>& expCtx, Value inputValue) {
@@ -5432,8 +5381,7 @@ public:
uassert(ErrorCodes::ConversionFailure,
str::stream() << "Unsupported conversion from " << typeName(inputType) << " to "
- << typeName(targetType)
- << " in $convert with no onError value",
+ << typeName(targetType) << " in $convert with no onError value",
foundFunction);
return foundFunction;
}
@@ -5607,8 +5555,7 @@ private:
Status parseStatus = parseNumberFromStringWithBase(stringValue, base, &result);
uassert(ErrorCodes::ConversionFailure,
str::stream() << "Failed to parse number '" << stringValue
- << "' in $convert with no onError value: "
- << parseStatus.reason(),
+ << "' in $convert with no onError value: " << parseStatus.reason(),
parseStatus.isOK());
return Value(result);
@@ -5623,8 +5570,7 @@ private:
// and returned.
uasserted(ErrorCodes::ConversionFailure,
str::stream() << "Failed to parse objectId '" << inputValue.getString()
- << "' in $convert with no onError value: "
- << ex.reason());
+ << "' in $convert with no onError value: " << ex.reason());
}
}
@@ -5643,7 +5589,6 @@ Expression::Parser makeConversionAlias(const StringData shortcutName, BSONType t
return [=](const intrusive_ptr<ExpressionContext>& expCtx,
BSONElement elem,
const VariablesParseState& vps) -> intrusive_ptr<Expression> {
-
// Use parseArguments to allow for a singleton array, or the unwrapped version.
auto operands = ExpressionNary::parseArguments(expCtx, elem, vps);
@@ -5718,8 +5663,8 @@ intrusive_ptr<Expression> ExpressionConvert::parse(
onNull = parseOperand(expCtx, elem, vps);
} else {
uasserted(ErrorCodes::FailedToParse,
- str::stream() << "$convert found an unknown argument: "
- << elem.fieldNameStringData());
+ str::stream()
+ << "$convert found an unknown argument: " << elem.fieldNameStringData());
}
}
@@ -5845,8 +5790,8 @@ auto CommonRegexParse(const boost::intrusive_ptr<ExpressionContext>& expCtx,
const VariablesParseState& vpsIn,
StringData opName) {
uassert(51103,
- str::stream() << opName << " expects an object of named arguments but found: "
- << expr.type(),
+ str::stream() << opName
+ << " expects an object of named arguments but found: " << expr.type(),
expr.type() == BSONType::Object);
struct {
@@ -5918,8 +5863,7 @@ int ExpressionRegex::execute(RegexExecutionState* regexState) const {
// capacity is not sufficient to hold all the results. The latter scenario should never occur.
uassert(51156,
str::stream() << "Error occurred while executing the regular expression in " << _opName
- << ". Result code: "
- << execResult,
+ << ". Result code: " << execResult,
execResult == -1 || execResult == (regexState->numCaptures + 1));
return execResult;
}
@@ -6109,7 +6053,7 @@ boost::intrusive_ptr<Expression> ExpressionRegexFind::parse(
BSONElement expr,
const VariablesParseState& vpsIn) {
auto opName = "$regexFind"_sd;
- auto[input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
+ auto [input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
return new ExpressionRegexFind(
expCtx, std::move(input), std::move(regex), std::move(options), opName);
}
@@ -6133,7 +6077,7 @@ boost::intrusive_ptr<Expression> ExpressionRegexFindAll::parse(
BSONElement expr,
const VariablesParseState& vpsIn) {
auto opName = "$regexFindAll"_sd;
- auto[input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
+ auto [input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
return new ExpressionRegexFindAll(
expCtx, std::move(input), std::move(regex), std::move(options), opName);
}
@@ -6197,7 +6141,7 @@ boost::intrusive_ptr<Expression> ExpressionRegexMatch::parse(
BSONElement expr,
const VariablesParseState& vpsIn) {
auto opName = "$regexMatch"_sd;
- auto[input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
+ auto [input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName);
return new ExpressionRegexMatch(
expCtx, std::move(input), std::move(regex), std::move(options), opName);
}
diff --git a/src/mongo/db/pipeline/expression.h b/src/mongo/db/pipeline/expression.h
index 0de82b2947b..02f7858745b 100644
--- a/src/mongo/db/pipeline/expression.h
+++ b/src/mongo/db/pipeline/expression.h
@@ -369,10 +369,7 @@ public:
void validateArguments(const Expression::ExpressionVector& args) const override {
uassert(28667,
str::stream() << "Expression " << this->getOpName() << " takes at least " << MinArgs
- << " arguments, and at most "
- << MaxArgs
- << ", but "
- << args.size()
+ << " arguments, and at most " << MaxArgs << ", but " << args.size()
<< " were passed in.",
MinArgs <= args.size() && args.size() <= MaxArgs);
}
@@ -388,9 +385,7 @@ public:
void validateArguments(const Expression::ExpressionVector& args) const override {
uassert(16020,
str::stream() << "Expression " << this->getOpName() << " takes exactly " << NArgs
- << " arguments. "
- << args.size()
- << " were passed in.",
+ << " arguments. " << args.size() << " were passed in.",
args.size() == NArgs);
}
};
@@ -613,9 +608,7 @@ public:
uassert(40533,
str::stream() << _opName
<< " requires a string for the timezone argument, but was given a "
- << typeName(timeZoneId.getType())
- << " ("
- << timeZoneId.toString()
+ << typeName(timeZoneId.getType()) << " (" << timeZoneId.toString()
<< ")",
timeZoneId.getType() == BSONType::String);
@@ -676,13 +669,12 @@ public:
} else {
uasserted(40535,
str::stream() << "unrecognized option to " << opName << ": \""
- << argName
- << "\"");
+ << argName << "\"");
}
}
uassert(40539,
- str::stream() << "missing 'date' argument to " << opName << ", provided: "
- << operatorElem,
+ str::stream() << "missing 'date' argument to " << opName
+ << ", provided: " << operatorElem,
date);
return new SubClass(expCtx, std::move(date), std::move(timeZone));
}
@@ -2718,4 +2710,4 @@ public:
using ExpressionRegex::ExpressionRegex;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/expression_convert_test.cpp b/src/mongo/db/pipeline/expression_convert_test.cpp
index a0a5c2d4a64..25e18ed4b2f 100644
--- a/src/mongo/db/pipeline/expression_convert_test.cpp
+++ b/src/mongo/db/pipeline/expression_convert_test.cpp
@@ -80,8 +80,7 @@ TEST_F(ExpressionConvertTest, ParseAndSerializeWithOnError) {
<< "$path1"
<< "to"
<< "int"
- << "onError"
- << 0));
+ << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(
@@ -100,8 +99,7 @@ TEST_F(ExpressionConvertTest, ParseAndSerializeWithOnNull) {
<< "$path1"
<< "to"
<< "int"
- << "onNull"
- << 0));
+ << "onNull" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(
@@ -118,8 +116,7 @@ TEST_F(ExpressionConvertTest, ConvertWithoutInputFailsToParse) {
auto spec = BSON("$convert" << BSON("to"
<< "int"
- << "onError"
- << 0));
+ << "onError" << 0));
ASSERT_THROWS_WITH_CHECK(Expression::parseExpression(expCtx, spec, expCtx->variablesParseState),
AssertionException,
[](const AssertionException& exception) {
@@ -134,8 +131,7 @@ TEST_F(ExpressionConvertTest, ConvertWithoutToFailsToParse) {
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "onError"
- << 0));
+ << "onError" << 0));
ASSERT_THROWS_WITH_CHECK(Expression::parseExpression(expCtx, spec, expCtx->variablesParseState),
AssertionException,
[](const AssertionException& exception) {
@@ -152,8 +148,7 @@ TEST_F(ExpressionConvertTest, InvalidTypeNameFails) {
<< "$path1"
<< "to"
<< "dinosaur"
- << "onError"
- << 0));
+ << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
@@ -170,10 +165,7 @@ TEST_F(ExpressionConvertTest, NonIntegralTypeFails) {
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "to"
- << 3.6
- << "onError"
- << 0));
+ << "to" << 3.6 << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
@@ -195,8 +187,7 @@ TEST_F(ExpressionConvertTest, NonStringNonNumericalTypeFails) {
<< "to"
<< BSON("dinosaur"
<< "Tyrannosaurus rex")
- << "onError"
- << 0));
+ << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
@@ -215,10 +206,7 @@ TEST_F(ExpressionConvertTest, InvalidNumericTargetTypeFails) {
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "to"
- << 100
- << "onError"
- << 0));
+ << "to" << 100 << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
@@ -238,10 +226,7 @@ TEST_F(ExpressionConvertTest, NegativeNumericTargetTypeFails) {
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "to"
- << -2
- << "onError"
- << 0));
+ << "to" << -2 << "onError" << 0));
auto convertExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
@@ -297,8 +282,7 @@ TEST_F(ExpressionConvertTest, UnsupportedConversionShouldThrowUnlessOnErrorProvi
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "to"
- << Value(targetTypeName)));
+ << "to" << Value(targetTypeName)));
Document input{{"path1", inputValue}};
@@ -320,9 +304,7 @@ TEST_F(ExpressionConvertTest, UnsupportedConversionShouldThrowUnlessOnErrorProvi
auto spec = BSON("$convert" << BSON("input"
<< "$path1"
- << "to"
- << Value(targetTypeName)
- << "onError"
+ << "to" << Value(targetTypeName) << "onError"
<< "X"));
Document input{{"path1", inputValue}};
diff --git a/src/mongo/db/pipeline/expression_date_test.cpp b/src/mongo/db/pipeline/expression_date_test.cpp
index 67e798d17af..49099b47b36 100644
--- a/src/mongo/db/pipeline/expression_date_test.cpp
+++ b/src/mongo/db/pipeline/expression_date_test.cpp
@@ -46,14 +46,10 @@ TEST_F(ExpressionDateFromPartsTest, SerializesToObjectSyntax) {
// Test that it serializes to the full format if given an object specification.
BSONObj spec =
- BSON("$dateFromParts" << BSON(
- "year" << 2017 << "month" << 6 << "day" << 27 << "hour" << 14 << "minute" << 37
- << "second"
- << 15
- << "millisecond"
- << 414
- << "timezone"
- << "America/Los_Angeles"));
+ BSON("$dateFromParts" << BSON("year" << 2017 << "month" << 6 << "day" << 27 << "hour" << 14
+ << "minute" << 37 << "second" << 15 << "millisecond"
+ << 414 << "timezone"
+ << "America/Los_Angeles"));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
auto expectedSerialization =
Value(Document{{"$dateFromParts",
@@ -84,16 +80,15 @@ TEST_F(ExpressionDateFromPartsTest, OptimizesToConstantIfAllInputsAreConstant) {
// Test that it becomes a constant if both year, hour and minute are provided, and are both
// expressions which evaluate to constants.
spec = BSON("$dateFromParts" << BSON("year" << BSON("$add" << BSON_ARRAY(1900 << 107)) << "hour"
- << BSON("$add" << BSON_ARRAY(13 << 1))
- << "minute"
+ << BSON("$add" << BSON_ARRAY(13 << 1)) << "minute"
<< BSON("$add" << BSON_ARRAY(40 << 3))));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
// Test that it becomes a constant if both year and milliseconds are provided, and year is an
// expressions which evaluate to a constant, with milliseconds a constant
- spec = BSON("$dateFromParts" << BSON(
- "year" << BSON("$add" << BSON_ARRAY(1900 << 107)) << "millisecond" << 514));
+ spec = BSON("$dateFromParts" << BSON("year" << BSON("$add" << BSON_ARRAY(1900 << 107))
+ << "millisecond" << 514));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -105,11 +100,10 @@ TEST_F(ExpressionDateFromPartsTest, OptimizesToConstantIfAllInputsAreConstant) {
// Test that it becomes a constant if both isoWeekYear, isoWeek and isoDayOfWeek are provided,
// and are both expressions which evaluate to constants.
- spec = BSON("$dateFromParts" << BSON("isoWeekYear" << BSON("$add" << BSON_ARRAY(1017 << 1000))
- << "isoWeek"
- << BSON("$add" << BSON_ARRAY(20 << 6))
- << "isoDayOfWeek"
- << BSON("$add" << BSON_ARRAY(3 << 2))));
+ spec = BSON("$dateFromParts" << BSON("isoWeekYear"
+ << BSON("$add" << BSON_ARRAY(1017 << 1000)) << "isoWeek"
+ << BSON("$add" << BSON_ARRAY(20 << 6)) << "isoDayOfWeek"
+ << BSON("$add" << BSON_ARRAY(3 << 2))));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -117,8 +111,7 @@ TEST_F(ExpressionDateFromPartsTest, OptimizesToConstantIfAllInputsAreConstant) {
// year is not a constant.
spec = BSON("$dateFromParts" << BSON("year"
<< "$year"
- << "month"
- << 6));
+ << "month" << 6));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_FALSE(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -180,8 +173,7 @@ TEST_F(ExpressionDateToPartsTest, SerializesToObjectSyntax) {
// Test that it serializes to the full format if given an object specification.
BSONObj spec = BSON("$dateToParts" << BSON("date" << Date_t{} << "timezone"
<< "Europe/London"
- << "iso8601"
- << false));
+ << "iso8601" << false));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
auto expectedSerialization =
Value(Document{{"$dateToParts",
@@ -224,8 +216,7 @@ TEST_F(ExpressionDateToPartsTest, OptimizesToConstantIfAllInputsAreConstant) {
// Test that it becomes a constant if both date and iso8601 are provided, and are both
// expressions which evaluate to constants.
spec = BSON("$dateToParts" << BSON("date" << BSON("$add" << BSON_ARRAY(Date_t{} << 1000))
- << "iso8601"
- << BSON("$not" << false)));
+ << "iso8601" << BSON("$not" << false)));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -305,8 +296,7 @@ TEST_F(DateExpressionTest, ParsingRejectsUnrecognizedFieldsInObjectSpecification
for (auto&& expName : dateExpressions) {
BSONObj spec = BSON(expName << BSON("date" << Date_t{} << "timezone"
<< "Europe/London"
- << "extra"
- << 4));
+ << "extra" << 4));
ASSERT_THROWS_CODE(Expression::parseExpression(expCtx, spec, expCtx->variablesParseState),
AssertionException,
40535);
@@ -561,8 +551,7 @@ TEST_F(DateExpressionTest, DoesResultInNullIfGivenNullishInput) {
// Test that the expression results in null if the date and timezone both nullish.
spec = BSON(expName << BSON("date"
<< "$missing"
- << "timezone"
- << BSONUndefined));
+ << "timezone" << BSONUndefined));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate(contextDoc, &expCtx->variables));
@@ -619,8 +608,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// missing.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << Date_t{}));
+ << "date" << Date_t{}));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -628,9 +616,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// constants.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << Date_t{}
- << "timezone"
+ << "date" << Date_t{} << "timezone"
<< "Europe/Amsterdam"));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -639,8 +625,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// expressions which evaluate to constants.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m%d"
- << "date"
- << BSON("$add" << BSON_ARRAY(Date_t{} << 1000))
+ << "date" << BSON("$add" << BSON_ARRAY(Date_t{} << 1000))
<< "timezone"
<< BSON("$concat" << BSON_ARRAY("Europe"
<< "/"
@@ -652,9 +637,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// 'onNull'.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << Date_t{}
- << "timezone"
+ << "date" << Date_t{} << "timezone"
<< "Europe/Amsterdam"
<< "onNull"
<< "null default"));
@@ -676,9 +659,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// timezone is not a constant.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << Date_t{}
- << "timezone"
+ << "date" << Date_t{} << "timezone"
<< "$tz"));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_FALSE(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -686,9 +667,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// Test that it does *not* become a constant if 'onNull' does not evaluate to a constant.
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << Date_t{}
- << "onNull"
+ << "date" << Date_t{} << "onNull"
<< "$onNull"));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_FALSE(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
@@ -696,8 +675,7 @@ TEST_F(ExpressionDateToStringTest, OptimizesToConstantIfAllInputsAreConstant) {
// Test that it does *not* become a constant if 'format' does not evaluate to a constant.
spec = BSON("$dateToString" << BSON("format"
<< "$format"
- << "date"
- << Date_t{}));
+ << "date" << Date_t{}));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_FALSE(dynamic_cast<ExpressionConstant*>(dateExp->optimize().get()));
}
@@ -707,19 +685,14 @@ TEST_F(ExpressionDateToStringTest, ReturnsOnNullValueWhenInputIsNullish) {
auto spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << BSONNULL
- << "onNull"
+ << "date" << BSONNULL << "onNull"
<< "null default"));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value("null default"_sd), dateExp->evaluate({}, &expCtx->variables));
spec = BSON("$dateToString" << BSON("format"
<< "%Y-%m-%d"
- << "date"
- << BSONNULL
- << "onNull"
- << BSONNULL));
+ << "date" << BSONNULL << "onNull" << BSONNULL));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate({}, &expCtx->variables));
@@ -1074,15 +1047,13 @@ TEST_F(ExpressionDateFromStringTest, RejectsNonStringFormat) {
auto spec = BSON("$dateFromString" << BSON("dateString"
<< "2017-07-13T10:02:57"
- << "format"
- << 2));
+ << "format" << 2));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40684);
spec = BSON("$dateFromString" << BSON("dateString"
<< "July 4, 2017"
- << "format"
- << true));
+ << "format" << true));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40684);
}
@@ -1126,8 +1097,7 @@ TEST_F(ExpressionDateFromStringTest, EvaluatesToNullIfFormatIsNullish) {
auto spec = BSON("$dateFromString" << BSON("dateString"
<< "1/1/2017"
- << "format"
- << BSONNULL));
+ << "format" << BSONNULL));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate({}, &expCtx->variables));
@@ -1140,8 +1110,7 @@ TEST_F(ExpressionDateFromStringTest, EvaluatesToNullIfFormatIsNullish) {
spec = BSON("$dateFromString" << BSON("dateString"
<< "1/1/2017"
- << "format"
- << BSONUndefined));
+ << "format" << BSONUndefined));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate({}, &expCtx->variables));
}
@@ -1265,8 +1234,7 @@ TEST_F(ExpressionDateFromStringTest, InvalidFormatTakesPrecedenceOverOnNull) {
auto spec = BSON("$dateFromString" << BSON("dateString" << BSONNULL << "onNull"
<< "Null default"
- << "format"
- << 5));
+ << "format" << 5));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40684);
@@ -1285,8 +1253,7 @@ TEST_F(ExpressionDateFromStringTest, InvalidFormatTakesPrecedenceOverOnError) {
<< "Invalid dateString"
<< "onError"
<< "Not used default"
- << "format"
- << 5));
+ << "format" << 5));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40684);
@@ -1303,8 +1270,7 @@ TEST_F(ExpressionDateFromStringTest, InvalidTimezoneTakesPrecedenceOverOnNull) {
auto spec = BSON("$dateFromString" << BSON("dateString" << BSONNULL << "onNull"
<< "Null default"
- << "timezone"
- << 5));
+ << "timezone" << 5));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40517);
@@ -1323,8 +1289,7 @@ TEST_F(ExpressionDateFromStringTest, InvalidTimezoneTakesPrecedenceOverOnError)
<< "Invalid dateString"
<< "onError"
<< "On error default"
- << "timezone"
- << 5));
+ << "timezone" << 5));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_THROWS_CODE(dateExp->evaluate({}, &expCtx->variables), AssertionException, 40517);
@@ -1341,15 +1306,13 @@ TEST_F(ExpressionDateFromStringTest, OnNullTakesPrecedenceOverOtherNullishParame
auto spec = BSON("$dateFromString" << BSON("dateString" << BSONNULL << "onNull"
<< "Null default"
- << "timezone"
- << BSONNULL));
+ << "timezone" << BSONNULL));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value("Null default"_sd), dateExp->evaluate({}, &expCtx->variables));
spec = BSON("$dateFromString" << BSON("dateString" << BSONNULL << "onNull"
<< "Null default"
- << "format"
- << BSONNULL));
+ << "format" << BSONNULL));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value("Null default"_sd), dateExp->evaluate({}, &expCtx->variables));
}
@@ -1361,8 +1324,7 @@ TEST_F(ExpressionDateFromStringTest, OnNullOnlyUsedIfInputStringIsNullish) {
<< "2018-02-14"
<< "onNull"
<< "Null default"
- << "timezone"
- << BSONNULL));
+ << "timezone" << BSONNULL));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate({}, &expCtx->variables));
@@ -1370,8 +1332,7 @@ TEST_F(ExpressionDateFromStringTest, OnNullOnlyUsedIfInputStringIsNullish) {
<< "2018-02-14"
<< "onNull"
<< "Null default"
- << "format"
- << BSONNULL));
+ << "format" << BSONNULL));
dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_VALUE_EQ(Value(BSONNULL), dateExp->evaluate({}, &expCtx->variables));
}
@@ -1406,10 +1367,10 @@ TEST_F(ExpressionDateFromStringTest, ReturnsOnErrorForFormatMismatch) {
TEST_F(ExpressionDateFromStringTest, OnNullEvaluatedLazily) {
auto expCtx = getExpCtx();
- auto spec = BSON("$dateFromString" << BSON("dateString"
- << "$date"
- << "onNull"
- << BSON("$divide" << BSON_ARRAY(1 << 0))));
+ auto spec =
+ BSON("$dateFromString" << BSON("dateString"
+ << "$date"
+ << "onNull" << BSON("$divide" << BSON_ARRAY(1 << 0))));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_EQ(
"2018-02-14T00:00:00.000Z",
@@ -1420,10 +1381,10 @@ TEST_F(ExpressionDateFromStringTest, OnNullEvaluatedLazily) {
TEST_F(ExpressionDateFromStringTest, OnErrorEvaluatedLazily) {
auto expCtx = getExpCtx();
- auto spec = BSON("$dateFromString" << BSON("dateString"
- << "$date"
- << "onError"
- << BSON("$divide" << BSON_ARRAY(1 << 0))));
+ auto spec =
+ BSON("$dateFromString" << BSON("dateString"
+ << "$date"
+ << "onError" << BSON("$divide" << BSON_ARRAY(1 << 0))));
auto dateExp = Expression::parseExpression(expCtx, spec, expCtx->variablesParseState);
ASSERT_EQ(
"2018-02-14T00:00:00.000Z",
diff --git a/src/mongo/db/pipeline/expression_test.cpp b/src/mongo/db/pipeline/expression_test.cpp
index 581e6963e1e..a95cf3ff303 100644
--- a/src/mongo/db/pipeline/expression_test.cpp
+++ b/src/mongo/db/pipeline/expression_test.cpp
@@ -47,13 +47,13 @@ namespace ExpressionTests {
using boost::intrusive_ptr;
using std::initializer_list;
+using std::list;
using std::numeric_limits;
using std::pair;
using std::set;
using std::sort;
using std::string;
using std::vector;
-using std::list;
/**
* Creates an expression given by 'expressionName' and evaluates it using
@@ -590,8 +590,8 @@ TEST_F(ExpressionNaryTest, FlattenInnerOperandsOptimizationOnAssociativeOnlyMidd
intrusive_ptr<Expression> optimized = _associativeOnly->optimize();
ASSERT(_associativeOnly == optimized);
- BSONArray expectedContent = BSON_ARRAY(
- 200 << "$path3" << BSON_ARRAY(201 << 100) << "$path1" << BSON_ARRAY(101 << 99) << "$path2");
+ BSONArray expectedContent = BSON_ARRAY(200 << "$path3" << BSON_ARRAY(201 << 100) << "$path1"
+ << BSON_ARRAY(101 << 99) << "$path2");
assertContents(_associativeOnly, expectedContent);
}
@@ -737,12 +737,10 @@ TEST(ExpressionArrayToObjectTest, KVFormatSimple) {
assertExpectedResults("$arrayToObject",
{{{Value(BSON_ARRAY(BSON("k"
<< "key1"
- << "v"
- << 2)
+ << "v" << 2)
<< BSON("k"
<< "key2"
- << "v"
- << 3)))},
+ << "v" << 3)))},
{Value(BSON("key1" << 2 << "key2" << 3))}}});
}
@@ -750,12 +748,10 @@ TEST(ExpressionArrayToObjectTest, KVFormatWithDuplicates) {
assertExpectedResults("$arrayToObject",
{{{Value(BSON_ARRAY(BSON("k"
<< "hi"
- << "v"
- << 2)
+ << "v" << 2)
<< BSON("k"
<< "hi"
- << "v"
- << 3)))},
+ << "v" << 3)))},
{Value(BSON("hi" << 3))}}});
}
@@ -1888,8 +1884,7 @@ class NonConstantZero : public OptimizeBase {
class NonConstantNonConstantOne : public OptimizeBase {
BSONObj spec() {
return BSON("$and" << BSON_ARRAY("$a"
- << "$b"
- << 1));
+ << "$b" << 1));
}
BSONObj expectedOptimized() {
return BSON("$and" << BSON_ARRAY("$a"
@@ -1901,8 +1896,7 @@ class NonConstantNonConstantOne : public OptimizeBase {
class NonConstantNonConstantZero : public OptimizeBase {
BSONObj spec() {
return BSON("$and" << BSON_ARRAY("$a"
- << "$b"
- << 0));
+ << "$b" << 0));
}
BSONObj expectedOptimized() {
return BSON("$const" << false);
@@ -3261,8 +3255,7 @@ TEST(ExpressionObjectParse, ShouldAcceptLiteralsAsValues) {
auto object = ExpressionObject::parse(expCtx,
BSON("a" << 5 << "b"
<< "string"
- << "c"
- << BSONNULL),
+ << "c" << BSONNULL),
vps);
auto expectedResult =
Value(Document{{"a", literal(5)}, {"b", literal("string"_sd)}, {"c", literal(BSONNULL)}});
@@ -3386,10 +3379,10 @@ auto expressionObjectCreateHelper(
expressionsWithChildrenInPlace) {
std::vector<boost::intrusive_ptr<Expression>> children;
std::vector<std::pair<std::string, boost::intrusive_ptr<Expression>&>> expressions;
- for (auto & [ unused, expression ] : expressionsWithChildrenInPlace)
+ for (auto& [unused, expression] : expressionsWithChildrenInPlace)
children.push_back(std::move(expression));
std::vector<boost::intrusive_ptr<Expression>>::size_type index = 0;
- for (auto & [ fieldName, unused ] : expressionsWithChildrenInPlace) {
+ for (auto& [fieldName, unused] : expressionsWithChildrenInPlace) {
expressions.emplace_back(fieldName, children[index]);
++index;
}
@@ -3840,8 +3833,7 @@ class NonConstantZero : public OptimizeBase {
class NonConstantNonConstantOne : public OptimizeBase {
BSONObj spec() {
return BSON("$or" << BSON_ARRAY("$a"
- << "$b"
- << 1));
+ << "$b" << 1));
}
BSONObj expectedOptimized() {
return BSON("$const" << true);
@@ -3852,8 +3844,7 @@ class NonConstantNonConstantOne : public OptimizeBase {
class NonConstantNonConstantZero : public OptimizeBase {
BSONObj spec() {
return BSON("$or" << BSON_ARRAY("$a"
- << "$b"
- << 0));
+ << "$b" << 0));
}
BSONObj expectedOptimized() {
return BSON("$or" << BSON_ARRAY("$a"
@@ -4174,12 +4165,9 @@ class Same : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true
- << "$setIntersection"
- << DOC_ARRAY(1 << 2)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << vector<Value>()));
+ << "$setIntersection" << DOC_ARRAY(1 << 2)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
}
};
@@ -4187,12 +4175,9 @@ class Redundant : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1 << 2 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true
- << "$setIntersection"
- << DOC_ARRAY(1 << 2)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << vector<Value>()));
+ << "$setIntersection" << DOC_ARRAY(1 << 2)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
}
};
@@ -4201,11 +4186,8 @@ class DoubleRedundant : public ExpectedResultBase {
return DOC(
"input" << DOC_ARRAY(DOC_ARRAY(1 << 1 << 2) << DOC_ARRAY(1 << 2 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true << "$setIntersection"
- << DOC_ARRAY(1 << 2)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << vector<Value>()));
+ << DOC_ARRAY(1 << 2) << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
}
};
@@ -4213,12 +4195,9 @@ class Super : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection"
- << DOC_ARRAY(1)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << DOC_ARRAY(2)));
+ << "$setIntersection" << DOC_ARRAY(1)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << DOC_ARRAY(2)));
}
};
@@ -4226,12 +4205,9 @@ class SuperWithRedundant : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 2) << DOC_ARRAY(1)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection"
- << DOC_ARRAY(1)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << DOC_ARRAY(2)));
+ << "$setIntersection" << DOC_ARRAY(1)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << DOC_ARRAY(2)));
}
};
@@ -4239,12 +4215,9 @@ class Sub : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1) << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << false
- << "$setIntersection"
- << DOC_ARRAY(1)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << vector<Value>()));
+ << "$setIntersection" << DOC_ARRAY(1)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
}
};
@@ -4252,12 +4225,9 @@ class SameBackwards : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(2 << 1)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true
- << "$setIntersection"
- << DOC_ARRAY(1 << 2)
- << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setDifference"
- << vector<Value>()));
+ << "$setIntersection" << DOC_ARRAY(1 << 2)
+ << "$setUnion" << DOC_ARRAY(1 << 2)
+ << "$setDifference" << vector<Value>()));
}
};
@@ -4265,12 +4235,9 @@ class NoOverlap : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(8 << 4)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection"
- << vector<Value>()
- << "$setUnion"
- << DOC_ARRAY(1 << 2 << 4 << 8)
- << "$setDifference"
- << DOC_ARRAY(1 << 2)));
+ << "$setIntersection" << vector<Value>()
+ << "$setUnion" << DOC_ARRAY(1 << 2 << 4 << 8)
+ << "$setDifference" << DOC_ARRAY(1 << 2)));
}
};
@@ -4278,12 +4245,9 @@ class Overlap : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(8 << 2 << 4)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection"
- << DOC_ARRAY(2)
- << "$setUnion"
- << DOC_ARRAY(1 << 2 << 4 << 8)
- << "$setDifference"
- << DOC_ARRAY(1)));
+ << "$setIntersection" << DOC_ARRAY(2)
+ << "$setUnion" << DOC_ARRAY(1 << 2 << 4 << 8)
+ << "$setDifference" << DOC_ARRAY(1)));
}
};
@@ -4291,8 +4255,7 @@ class LastNull : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << Value(BSONNULL)) << "expected"
<< DOC("$setIntersection" << BSONNULL << "$setUnion" << BSONNULL
- << "$setDifference"
- << BSONNULL)
+ << "$setDifference" << BSONNULL)
<< "error"
<< DOC_ARRAY("$setEquals"_sd
<< "$setIsSubset"_sd));
@@ -4303,8 +4266,7 @@ class FirstNull : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(Value(BSONNULL) << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIntersection" << BSONNULL << "$setUnion" << BSONNULL
- << "$setDifference"
- << BSONNULL)
+ << "$setDifference" << BSONNULL)
<< "error"
<< DOC_ARRAY("$setEquals"_sd
<< "$setIsSubset"_sd));
@@ -4351,12 +4313,8 @@ class LeftArgEmpty : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(vector<Value>() << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIntersection" << vector<Value>() << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setIsSubset"
- << true
- << "$setEquals"
- << false
- << "$setDifference"
+ << DOC_ARRAY(1 << 2) << "$setIsSubset" << true
+ << "$setEquals" << false << "$setDifference"
<< vector<Value>()));
}
};
@@ -4365,45 +4323,39 @@ class RightArgEmpty : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << vector<Value>()) << "expected"
<< DOC("$setIntersection" << vector<Value>() << "$setUnion"
- << DOC_ARRAY(1 << 2)
- << "$setIsSubset"
- << false
- << "$setEquals"
- << false
- << "$setDifference"
+ << DOC_ARRAY(1 << 2) << "$setIsSubset" << false
+ << "$setEquals" << false << "$setDifference"
<< DOC_ARRAY(1 << 2)));
}
};
class ManyArgs : public ExpectedResultBase {
Document getSpec() {
- return DOC(
- "input" << DOC_ARRAY(DOC_ARRAY(8 << 3) << DOC_ARRAY("asdf"_sd
- << "foo"_sd)
- << DOC_ARRAY(80.3 << 34)
- << vector<Value>()
- << DOC_ARRAY(80.3 << "foo"_sd << 11 << "yay"_sd))
- << "expected"
- << DOC("$setIntersection" << vector<Value>() << "$setEquals" << false
- << "$setUnion"
- << DOC_ARRAY(3 << 8 << 11 << 34 << 80.3 << "asdf"_sd
- << "foo"_sd
- << "yay"_sd))
- << "error"
- << DOC_ARRAY("$setIsSubset"_sd
- << "$setDifference"_sd));
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(8 << 3)
+ << DOC_ARRAY("asdf"_sd
+ << "foo"_sd)
+ << DOC_ARRAY(80.3 << 34) << vector<Value>()
+ << DOC_ARRAY(80.3 << "foo"_sd << 11 << "yay"_sd))
+ << "expected"
+ << DOC("$setIntersection"
+ << vector<Value>() << "$setEquals" << false << "$setUnion"
+ << DOC_ARRAY(3 << 8 << 11 << 34 << 80.3 << "asdf"_sd
+ << "foo"_sd
+ << "yay"_sd))
+ << "error"
+ << DOC_ARRAY("$setIsSubset"_sd
+ << "$setDifference"_sd));
}
};
class ManyArgsEqual : public ExpectedResultBase {
Document getSpec() {
- return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 4) << DOC_ARRAY(1 << 2 << 2 << 4)
- << DOC_ARRAY(4 << 1 << 2)
- << DOC_ARRAY(2 << 1 << 1 << 4))
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 4)
+ << DOC_ARRAY(1 << 2 << 2 << 4) << DOC_ARRAY(4 << 1 << 2)
+ << DOC_ARRAY(2 << 1 << 1 << 4))
<< "expected"
<< DOC("$setIntersection" << DOC_ARRAY(1 << 2 << 4) << "$setEquals"
- << true
- << "$setUnion"
+ << true << "$setUnion"
<< DOC_ARRAY(1 << 2 << 4))
<< "error"
<< DOC_ARRAY("$setIsSubset"_sd
@@ -4690,7 +4642,7 @@ TEST(ExpressionSubstrTest, ThrowsWithNegativeStart) {
ASSERT_THROWS([&] { expr->evaluate({}, &expCtx->variables); }(), AssertionException);
}
-} // namespace Substr
+} // namespace SubstrBytes
namespace SubstrCP {
@@ -4805,8 +4757,7 @@ TEST(ExpressionTrimParsingTest, ThrowsIfSpecContainsUnrecognizedField) {
ASSERT_THROWS(Expression::parseExpression(expCtx,
BSON("$ltrim" << BSON("chars"
<< "xyz"
- << "other"
- << 1)),
+ << "other" << 1)),
expCtx->variablesParseState),
AssertionException);
ASSERT_THROWS(Expression::parseExpression(expCtx,
@@ -4814,8 +4765,7 @@ TEST(ExpressionTrimParsingTest, ThrowsIfSpecContainsUnrecognizedField) {
<< "$x"
<< "chars"
<< "xyz"
- << "other"
- << 1)),
+ << "other" << 1)),
expCtx->variablesParseState),
AssertionException);
}
@@ -5333,8 +5283,7 @@ TEST(ExpressionTrimTest, DoesOptimizeToConstantWithCustomChars) {
expCtx,
BSON("$trim" << BSON("input"
<< " abc "
- << "chars"
- << BSON("$substrCP" << BSON_ARRAY(" " << 1 << 1)))),
+ << "chars" << BSON("$substrCP" << BSON_ARRAY(" " << 1 << 1)))),
expCtx->variablesParseState);
optimized = trim->optimize();
constant = dynamic_cast<ExpressionConstant*>(optimized.get());
@@ -5879,8 +5828,9 @@ class FalseViaInt : public ExpectedResultBase {
class Null : public ExpectedResultBase {
Document getSpec() {
- return DOC("input" << DOC_ARRAY(BSONNULL) << "error" << DOC_ARRAY("$allElementsTrue"_sd
- << "$anyElementTrue"_sd));
+ return DOC("input" << DOC_ARRAY(BSONNULL) << "error"
+ << DOC_ARRAY("$allElementsTrue"_sd
+ << "$anyElementTrue"_sd));
}
};
@@ -6465,5 +6415,5 @@ TEST(NowAndClusterTime, BasicTest) {
ASSERT_VALUE_EQ(result, Value{true});
}
}
-}
+} // namespace NowAndClusterTime
} // namespace ExpressionTests
diff --git a/src/mongo/db/pipeline/expression_trigonometric.h b/src/mongo/db/pipeline/expression_trigonometric.h
index 41f10ca2e29..cc8ca852f8b 100644
--- a/src/mongo/db/pipeline/expression_trigonometric.h
+++ b/src/mongo/db/pipeline/expression_trigonometric.h
@@ -135,12 +135,8 @@ public:
void assertBounds(T input) const {
uassert(50989,
str::stream() << "cannot apply " << getOpName() << " to " << toString(input)
- << ", value must in "
- << BoundType::leftBracket()
- << getLowerBound()
- << ","
- << getUpperBound()
- << BoundType::rightBracket(),
+ << ", value must in " << BoundType::leftBracket() << getLowerBound()
+ << "," << getUpperBound() << BoundType::rightBracket(),
checkBounds(input));
}
diff --git a/src/mongo/db/pipeline/expression_trigonometric_test.cpp b/src/mongo/db/pipeline/expression_trigonometric_test.cpp
index 49ea60e1f9b..b9356e60bae 100644
--- a/src/mongo/db/pipeline/expression_trigonometric_test.cpp
+++ b/src/mongo/db/pipeline/expression_trigonometric_test.cpp
@@ -1403,4 +1403,4 @@ TEST(ExpressionDegreesToRadiansTest, DecimalArg) {
TEST(ExpressionDegreesToRadiansTest, NullArg) {
assertEvaluates("$degreesToRadians", Value(BSONNULL), Value(BSONNULL));
}
-} // namespace expression_trigonometric_test
+} // namespace expression_tests
diff --git a/src/mongo/db/pipeline/field_path.cpp b/src/mongo/db/pipeline/field_path.cpp
index bb26fc478ca..4c9e23a86df 100644
--- a/src/mongo/db/pipeline/field_path.cpp
+++ b/src/mongo/db/pipeline/field_path.cpp
@@ -81,4 +81,4 @@ void FieldPath::uassertValidFieldName(StringData fieldName) {
uassert(
16412, "FieldPath field names may not contain '.'.", fieldName.find('.') == string::npos);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/field_path.h b/src/mongo/db/pipeline/field_path.h
index 347b236fb6b..bbc775be9db 100644
--- a/src/mongo/db/pipeline/field_path.h
+++ b/src/mongo/db/pipeline/field_path.h
@@ -136,4 +136,4 @@ inline bool operator<(const FieldPath& lhs, const FieldPath& rhs) {
inline bool operator==(const FieldPath& lhs, const FieldPath& rhs) {
return lhs.fullPath() == rhs.fullPath();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp b/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp
index 6db3d45ea78..56164da1b21 100644
--- a/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp
+++ b/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp
@@ -106,13 +106,9 @@ void testRoundingUpInSeries(intrusive_ptr<GranularityRounder> rounder) {
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding up the value "
- << input.coerceToDouble()
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
+ << " failed rounding up the value " << input.coerceToDouble()
+ << " at multiplier level " << multiplier << ". Expected "
+ << expectedValue.coerceToDouble() << ", but got "
<< roundedValue.coerceToDouble());
}
}
@@ -140,15 +136,12 @@ void testRoundingUpInSeriesDecimal(intrusive_ptr<GranularityRounder> rounder) {
try {
testEquals(roundedValue, expectedValue);
} catch (...) {
- FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding up the value "
- << input.coerceToDecimal().toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
- << roundedValue.coerceToDecimal().toString());
+ FAIL(str::stream()
+ << "The GranularityRounder for " << rounder->getName()
+ << " failed rounding up the value " << input.coerceToDecimal().toString()
+ << " at multiplier level " << multiplier.toString() << ". Expected "
+ << expectedValue.coerceToDecimal().toString() << ", but got "
+ << roundedValue.coerceToDecimal().toString());
}
}
multiplier = multiplier.multiply(Decimal128(10));
@@ -175,15 +168,11 @@ void testRoundingUpBetweenSeries(intrusive_ptr<GranularityRounder> rounder) {
try {
testEquals(roundedValue, expectedValue);
} catch (...) {
- FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding up the value "
- << middle
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
- << roundedValue.coerceToDouble());
+ FAIL(str::stream()
+ << "The GranularityRounder for " << rounder->getName()
+ << " failed rounding up the value " << middle << " at multiplier level "
+ << multiplier << ". Expected " << expectedValue.coerceToDouble()
+ << ", but got " << roundedValue.coerceToDouble());
}
}
multiplier *= 10.0;
@@ -212,14 +201,10 @@ void testRoundingUpBetweenSeriesDecimal(intrusive_ptr<GranularityRounder> rounde
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding up the value "
- << middle.toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
- << roundedValue.coerceToDecimal().toString());
+ << " failed rounding up the value " << middle.toString()
+ << " at multiplier level " << multiplier.toString()
+ << ". Expected " << expectedValue.coerceToDecimal().toString()
+ << ", but got " << roundedValue.coerceToDecimal().toString());
}
}
multiplier = multiplier.multiply(Decimal128(10));
@@ -244,13 +229,9 @@ void testRoundingDownInSeries(intrusive_ptr<GranularityRounder> rounder) {
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding down the value "
- << input.coerceToDouble()
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
+ << " failed rounding down the value " << input.coerceToDouble()
+ << " at multiplier level " << multiplier << ". Expected "
+ << expectedValue.coerceToDouble() << ", but got "
<< roundedValue.coerceToDouble());
}
}
@@ -277,15 +258,12 @@ void testRoundingDownInSeriesDecimal(intrusive_ptr<GranularityRounder> rounder)
try {
testEquals(roundedValue, expectedValue);
} catch (...) {
- FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding down the value "
- << input.coerceToDecimal().toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
- << roundedValue.coerceToDecimal().toString());
+ FAIL(str::stream()
+ << "The GranularityRounder for " << rounder->getName()
+ << " failed rounding down the value " << input.coerceToDecimal().toString()
+ << " at multiplier level " << multiplier.toString() << ". Expected "
+ << expectedValue.coerceToDecimal().toString() << ", but got "
+ << roundedValue.coerceToDecimal().toString());
}
}
multiplier = multiplier.multiply(Decimal128(10));
@@ -312,15 +290,11 @@ void testRoundingDownBetweenSeries(intrusive_ptr<GranularityRounder> rounder) {
try {
testEquals(roundedValue, expectedValue);
} catch (...) {
- FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding down the value "
- << middle
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
- << roundedValue.coerceToDouble());
+ FAIL(str::stream()
+ << "The GranularityRounder for " << rounder->getName()
+ << " failed rounding down the value " << middle << " at multiplier level "
+ << multiplier << ". Expected " << expectedValue.coerceToDouble()
+ << ", but got " << roundedValue.coerceToDouble());
}
}
multiplier *= 10.0;
@@ -349,14 +323,10 @@ void testRoundingDownBetweenSeriesDecimal(intrusive_ptr<GranularityRounder> roun
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding down the value "
- << middle.toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
- << roundedValue.coerceToDecimal().toString());
+ << " failed rounding down the value " << middle.toString()
+ << " at multiplier level " << multiplier.toString()
+ << ". Expected " << expectedValue.coerceToDecimal().toString()
+ << ", but got " << roundedValue.coerceToDecimal().toString());
}
}
multiplier = multiplier.multiply(Decimal128(10));
@@ -383,13 +353,9 @@ void testSeriesWrappingAround(intrusive_ptr<GranularityRounder> rounder) {
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding up the value "
- << input.coerceToDouble()
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
+ << " failed rounding up the value " << input.coerceToDouble()
+ << " at multiplier level " << multiplier << ". Expected "
+ << expectedValue.coerceToDouble() << ", but got "
<< roundedValue.coerceToDouble());
}
@@ -400,13 +366,9 @@ void testSeriesWrappingAround(intrusive_ptr<GranularityRounder> rounder) {
testEquals(roundedValue, expectedValue);
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
- << " failed rounding down the value "
- << input.coerceToDouble()
- << " at multiplier level "
- << multiplier
- << ". Expected "
- << expectedValue.coerceToDouble()
- << ", but got "
+ << " failed rounding down the value " << input.coerceToDouble()
+ << " at multiplier level " << multiplier << ". Expected "
+ << expectedValue.coerceToDouble() << ", but got "
<< roundedValue.coerceToDouble());
}
multiplier *= 10.0;
@@ -430,12 +392,9 @@ void testSeriesWrappingAroundDecimal(intrusive_ptr<GranularityRounder> rounder)
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
<< " failed rounding up the value "
- << input.coerceToDecimal().toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
+ << input.coerceToDecimal().toString() << " at multiplier level "
+ << multiplier.toString() << ". Expected "
+ << expectedValue.coerceToDecimal().toString() << ", but got "
<< roundedValue.coerceToDecimal().toString());
}
@@ -449,12 +408,9 @@ void testSeriesWrappingAroundDecimal(intrusive_ptr<GranularityRounder> rounder)
} catch (...) {
FAIL(str::stream() << "The GranularityRounder for " << rounder->getName()
<< " failed rounding down the value "
- << input.coerceToDecimal().toString()
- << " at multiplier level "
- << multiplier.toString()
- << ". Expected "
- << expectedValue.coerceToDecimal().toString()
- << ", but got "
+ << input.coerceToDecimal().toString() << " at multiplier level "
+ << multiplier.toString() << ". Expected "
+ << expectedValue.coerceToDecimal().toString() << ", but got "
<< roundedValue.coerceToDecimal().toString());
}
multiplier.multiply(Decimal128(10));
diff --git a/src/mongo/db/pipeline/lite_parsed_document_source.cpp b/src/mongo/db/pipeline/lite_parsed_document_source.cpp
index 87aebb72238..28b5b133a65 100644
--- a/src/mongo/db/pipeline/lite_parsed_document_source.cpp
+++ b/src/mongo/db/pipeline/lite_parsed_document_source.cpp
@@ -61,4 +61,4 @@ std::unique_ptr<LiteParsedDocumentSource> LiteParsedDocumentSource::parse(
return it->second(request, specElem);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/lite_parsed_pipeline.cpp b/src/mongo/db/pipeline/lite_parsed_pipeline.cpp
index b1802c91970..81a10467a58 100644
--- a/src/mongo/db/pipeline/lite_parsed_pipeline.cpp
+++ b/src/mongo/db/pipeline/lite_parsed_pipeline.cpp
@@ -54,8 +54,7 @@ void LiteParsedPipeline::assertSupportsReadConcern(
uassert(ErrorCodes::InvalidOptions,
str::stream() << "Explain for the aggregate command cannot run with a readConcern "
<< "other than 'local', or in a multi-document transaction. Current "
- << "readConcern: "
- << readConcern.toString(),
+ << "readConcern: " << readConcern.toString(),
!explain || readConcern.getLevel() == repl::ReadConcernLevel::kLocalReadConcern);
for (auto&& spec : _stageSpecs) {
diff --git a/src/mongo/db/pipeline/lookup_set_cache.h b/src/mongo/db/pipeline/lookup_set_cache.h
index 0a68a65d086..69f897f5583 100644
--- a/src/mongo/db/pipeline/lookup_set_cache.h
+++ b/src/mongo/db/pipeline/lookup_set_cache.h
@@ -47,10 +47,10 @@
namespace mongo {
using boost::multi_index_container;
-using boost::multi_index::sequenced;
using boost::multi_index::hashed_unique;
-using boost::multi_index::member;
using boost::multi_index::indexed_by;
+using boost::multi_index::member;
+using boost::multi_index::sequenced;
/**
* A least-recently-used cache from key to a vector of values. It does not implement any default
diff --git a/src/mongo/db/pipeline/mongos_process_interface.cpp b/src/mongo/db/pipeline/mongos_process_interface.cpp
index 331b02bcbcf..6cc2a11d8eb 100644
--- a/src/mongo/db/pipeline/mongos_process_interface.cpp
+++ b/src/mongo/db/pipeline/mongos_process_interface.cpp
@@ -227,15 +227,12 @@ boost::optional<Document> MongoSInterface::lookupSingleDocument(
uassert(ErrorCodes::InternalError,
str::stream() << "Shard cursor was unexpectedly open after lookup: "
<< shardResult.front().getHostAndPort()
- << ", id: "
- << cursor.getCursorId(),
+ << ", id: " << cursor.getCursorId(),
cursor.getCursorId() == 0);
uassert(ErrorCodes::TooManyMatchingDocuments,
str::stream() << "found more than one document matching " << filter.toString() << " ["
- << batch.begin()->toString()
- << ", "
- << std::next(batch.begin())->toString()
- << "]",
+ << batch.begin()->toString() << ", "
+ << std::next(batch.begin())->toString() << "]",
batch.size() <= 1u);
return (!batch.empty() ? Document(batch.front()) : boost::optional<Document>{});
diff --git a/src/mongo/db/pipeline/parsed_aggregation_projection.cpp b/src/mongo/db/pipeline/parsed_aggregation_projection.cpp
index 3f283079ac4..42a76ef94e5 100644
--- a/src/mongo/db/pipeline/parsed_aggregation_projection.cpp
+++ b/src/mongo/db/pipeline/parsed_aggregation_projection.cpp
@@ -86,11 +86,7 @@ void ProjectionSpecValidator::ensurePathDoesNotConflictOrThrow(const std::string
uassert(40176,
str::stream() << "specification contains two conflicting paths. "
"Cannot specify both '"
- << path
- << "' and '"
- << *conflictingPath
- << "': "
- << _rawObj.toString(),
+ << path << "' and '" << *conflictingPath << "': " << _rawObj.toString(),
!conflictingPath);
}
@@ -129,10 +125,8 @@ void ProjectionSpecValidator::parseNestedObject(const BSONObj& thisLevelSpec,
uasserted(40181,
str::stream() << "an expression specification must contain exactly "
"one field, the name of the expression. Found "
- << thisLevelSpec.nFields()
- << " fields in "
- << thisLevelSpec.toString()
- << ", while parsing object "
+ << thisLevelSpec.nFields() << " fields in "
+ << thisLevelSpec.toString() << ", while parsing object "
<< _rawObj.toString());
}
ensurePathDoesNotConflictOrThrow(prefix.fullPath());
@@ -141,8 +135,7 @@ void ProjectionSpecValidator::parseNestedObject(const BSONObj& thisLevelSpec,
if (fieldName.find('.') != std::string::npos) {
uasserted(40183,
str::stream() << "cannot use dotted field name '" << fieldName
- << "' in a sub object: "
- << _rawObj.toString());
+ << "' in a sub object: " << _rawObj.toString());
}
parseElement(elem, FieldPath::getFullyQualifiedPath(prefix.fullPath(), fieldName));
}
@@ -245,23 +238,25 @@ private:
} else if ((elem.isBoolean() || elem.isNumber()) && !elem.trueValue()) {
// If this is an excluded field other than '_id', ensure that the projection type has
// not already been set to kInclusionProjection.
- uassert(40178,
- str::stream() << "Bad projection specification, cannot exclude fields "
- "other than '_id' in an inclusion projection: "
- << _rawObj.toString(),
- !_parsedType || (*_parsedType ==
- TransformerInterface::TransformerType::kExclusionProjection));
+ uassert(
+ 40178,
+ str::stream() << "Bad projection specification, cannot exclude fields "
+ "other than '_id' in an inclusion projection: "
+ << _rawObj.toString(),
+ !_parsedType ||
+ (*_parsedType == TransformerInterface::TransformerType::kExclusionProjection));
_parsedType = TransformerInterface::TransformerType::kExclusionProjection;
} else {
// A boolean true, a truthy numeric value, or any expression can only be used with an
// inclusion projection. Note that literal values like "string" or null are also treated
// as expressions.
- uassert(40179,
- str::stream() << "Bad projection specification, cannot include fields or "
- "add computed fields during an exclusion projection: "
- << _rawObj.toString(),
- !_parsedType || (*_parsedType ==
- TransformerInterface::TransformerType::kInclusionProjection));
+ uassert(
+ 40179,
+ str::stream() << "Bad projection specification, cannot include fields or "
+ "add computed fields during an exclusion projection: "
+ << _rawObj.toString(),
+ !_parsedType ||
+ (*_parsedType == TransformerInterface::TransformerType::kInclusionProjection));
_parsedType = TransformerInterface::TransformerType::kInclusionProjection;
}
}
diff --git a/src/mongo/db/pipeline/parsed_aggregation_projection_test.cpp b/src/mongo/db/pipeline/parsed_aggregation_projection_test.cpp
index 15efa442726..27ce39b9c86 100644
--- a/src/mongo/db/pipeline/parsed_aggregation_projection_test.cpp
+++ b/src/mongo/db/pipeline/parsed_aggregation_projection_test.cpp
@@ -149,15 +149,13 @@ TEST(ParsedAggregationProjectionErrors, ShouldRejectPathConflictsWithNonAlphaNum
// Then assert that we throw when we introduce a prefixed field.
ASSERT_THROWS(
- makeProjectionWithDefaultPolicies(
- BSON("a.b-c" << true << "a.b" << true << "a.b?c" << true << "a.b c" << true << "a.b.d"
- << true)),
- AssertionException);
- ASSERT_THROWS(
- makeProjectionWithDefaultPolicies(BSON(
- "a.b.d" << false << "a.b c" << false << "a.b?c" << false << "a.b" << false << "a.b-c"
- << false)),
+ makeProjectionWithDefaultPolicies(BSON("a.b-c" << true << "a.b" << true << "a.b?c" << true
+ << "a.b c" << true << "a.b.d" << true)),
AssertionException);
+ ASSERT_THROWS(makeProjectionWithDefaultPolicies(BSON("a.b.d" << false << "a.b c" << false
+ << "a.b?c" << false << "a.b"
+ << false << "a.b-c" << false)),
+ AssertionException);
// Adding the same field twice.
ASSERT_THROWS(makeProjectionWithDefaultPolicies(
@@ -168,34 +166,24 @@ TEST(ParsedAggregationProjectionErrors, ShouldRejectPathConflictsWithNonAlphaNum
AssertionException);
// Mix of include/exclude and adding a shared prefix.
- ASSERT_THROWS(
- makeProjectionWithDefaultPolicies(
- BSON("a.b-c" << true << "a.b" << wrapInLiteral(1) << "a.b?c" << true << "a.b c" << true
- << "a.b.d"
- << true)),
- AssertionException);
+ ASSERT_THROWS(makeProjectionWithDefaultPolicies(
+ BSON("a.b-c" << true << "a.b" << wrapInLiteral(1) << "a.b?c" << true
+ << "a.b c" << true << "a.b.d" << true)),
+ AssertionException);
ASSERT_THROWS(makeProjectionWithDefaultPolicies(
BSON("a.b.d" << false << "a.b c" << false << "a.b?c" << false << "a.b"
- << wrapInLiteral(0)
- << "a.b-c"
- << false)),
+ << wrapInLiteral(0) << "a.b-c" << false)),
AssertionException);
// Adding a shared prefix twice.
ASSERT_THROWS(makeProjectionWithDefaultPolicies(
BSON("a.b-c" << wrapInLiteral(1) << "a.b" << wrapInLiteral(1) << "a.b?c"
- << wrapInLiteral(1)
- << "a.b c"
- << wrapInLiteral(1)
- << "a.b.d"
+ << wrapInLiteral(1) << "a.b c" << wrapInLiteral(1) << "a.b.d"
<< wrapInLiteral(0))),
AssertionException);
ASSERT_THROWS(makeProjectionWithDefaultPolicies(
BSON("a.b.d" << wrapInLiteral(1) << "a.b c" << wrapInLiteral(1) << "a.b?c"
- << wrapInLiteral(1)
- << "a.b"
- << wrapInLiteral(0)
- << "a.b-c"
+ << wrapInLiteral(1) << "a.b" << wrapInLiteral(0) << "a.b-c"
<< wrapInLiteral(1))),
AssertionException);
}
diff --git a/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp b/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp
index ca85af5fae1..df71508b41e 100644
--- a/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp
+++ b/src/mongo/db/pipeline/parsed_inclusion_projection_test.cpp
@@ -240,10 +240,8 @@ TEST(InclusionProjectionExecutionTest, ShouldOptimizeNestedExpressions) {
TEST(InclusionProjectionExecutionTest, ShouldReportThatAllExceptIncludedFieldsAreModified) {
auto inclusion = makeInclusionProjectionWithDefaultPolicies();
- inclusion.parse(BSON(
- "a" << wrapInLiteral("computedVal") << "b.c" << wrapInLiteral("computedVal") << "d" << true
- << "e.f"
- << true));
+ inclusion.parse(BSON("a" << wrapInLiteral("computedVal") << "b.c"
+ << wrapInLiteral("computedVal") << "d" << true << "e.f" << true));
auto modifiedPaths = inclusion.getModifiedPaths();
ASSERT(modifiedPaths.type == DocumentSource::GetModPathsReturn::Type::kAllExcept);
@@ -261,11 +259,7 @@ TEST(InclusionProjectionExecutionTest,
ShouldReportThatAllExceptIncludedFieldsAreModifiedWithIdExclusion) {
auto inclusion = makeInclusionProjectionWithDefaultPolicies();
inclusion.parse(BSON("_id" << false << "a" << wrapInLiteral("computedVal") << "b.c"
- << wrapInLiteral("computedVal")
- << "d"
- << true
- << "e.f"
- << true));
+ << wrapInLiteral("computedVal") << "d" << true << "e.f" << true));
auto modifiedPaths = inclusion.getModifiedPaths();
ASSERT(modifiedPaths.type == DocumentSource::GetModPathsReturn::Type::kAllExcept);
@@ -573,11 +567,10 @@ TEST(InclusionProjectionExecutionTest, ShouldAllowMixedNestedAndDottedFields) {
auto inclusion = makeInclusionProjectionWithDefaultPolicies();
// Include all of "a.b", "a.c", "a.d", and "a.e".
// Add new computed fields "a.W", "a.X", "a.Y", and "a.Z".
- inclusion.parse(BSON(
- "a.b" << true << "a.c" << true << "a.W" << wrapInLiteral("W") << "a.X" << wrapInLiteral("X")
- << "a"
- << BSON("d" << true << "e" << true << "Y" << wrapInLiteral("Y") << "Z"
- << wrapInLiteral("Z"))));
+ inclusion.parse(BSON("a.b" << true << "a.c" << true << "a.W" << wrapInLiteral("W") << "a.X"
+ << wrapInLiteral("X") << "a"
+ << BSON("d" << true << "e" << true << "Y" << wrapInLiteral("Y")
+ << "Z" << wrapInLiteral("Z"))));
auto result = inclusion.applyProjection(Document{
{"a",
Document{{"b", "b"_sd}, {"c", "c"_sd}, {"d", "d"_sd}, {"e", "e"_sd}, {"f", "f"_sd}}}});
diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp
index 439d4d9ab88..1ce178126dd 100644
--- a/src/mongo/db/pipeline/pipeline.cpp
+++ b/src/mongo/db/pipeline/pipeline.cpp
@@ -169,9 +169,9 @@ void Pipeline::validateTopLevelPipeline() const {
if (nss.isCollectionlessAggregateNS() &&
!firstStageConstraints.isIndependentOfAnyCollection) {
uasserted(ErrorCodes::InvalidNamespace,
- str::stream() << "{aggregate: 1} is not valid for '"
- << _sources.front()->getSourceName()
- << "'; a collection is required.");
+ str::stream()
+ << "{aggregate: 1} is not valid for '"
+ << _sources.front()->getSourceName() << "'; a collection is required.");
}
if (!nss.isCollectionlessAggregateNS() &&
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 178426d0c94..96047148104 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -516,9 +516,9 @@ PipelineD::buildInnerQueryExecutorGeneric(Collection* collection,
(pipeline->peekFront() && pipeline->peekFront()->constraints().isChangeStreamStage());
auto attachExecutorCallback = [deps, queryObj, sortObj, projForQuery, trackOplogTS](
- Collection* collection,
- std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
- Pipeline* pipeline) {
+ Collection* collection,
+ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
+ Pipeline* pipeline) {
auto cursor = DocumentSourceCursor::create(
collection, std::move(exec), pipeline->getContext(), trackOplogTS);
addCursorSource(
@@ -575,15 +575,14 @@ PipelineD::buildInnerQueryExecutorGeoNear(Collection* collection,
str::stream() << "Unexpectedly got the following sort from the query system: "
<< sortFromQuerySystem.jsonString());
- auto attachExecutorCallback =
- [
- deps,
- distanceField = geoNearStage->getDistanceField(),
- locationField = geoNearStage->getLocationField(),
- distanceMultiplier = geoNearStage->getDistanceMultiplier().value_or(1.0)
- ](Collection * collection,
- std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
- Pipeline * pipeline) {
+ auto attachExecutorCallback = [deps,
+ distanceField = geoNearStage->getDistanceField(),
+ locationField = geoNearStage->getLocationField(),
+ distanceMultiplier =
+ geoNearStage->getDistanceMultiplier().value_or(1.0)](
+ Collection* collection,
+ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec,
+ Pipeline* pipeline) {
auto cursor = DocumentSourceGeoNearCursor::create(collection,
std::move(exec),
pipeline->getContext(),
diff --git a/src/mongo/db/pipeline/pipeline_metadata_tree.h b/src/mongo/db/pipeline/pipeline_metadata_tree.h
index fe8c1f02770..1a22c452590 100644
--- a/src/mongo/db/pipeline/pipeline_metadata_tree.h
+++ b/src/mongo/db/pipeline/pipeline_metadata_tree.h
@@ -117,8 +117,7 @@ inline auto findStageContents(const NamespaceString& ns,
auto it = initialStageContents.find(ns);
uassert(51213,
str::stream() << "Metadata to initialize an aggregation pipeline associated with "
- << ns.coll()
- << " is missing.",
+ << ns.coll() << " is missing.",
it != initialStageContents.end());
return it->second;
}
@@ -154,7 +153,7 @@ inline auto makeAdditionalChildren(
std::vector<T> offTheEndContents;
if (auto lookupSource = dynamic_cast<const DocumentSourceLookUp*>(&source);
lookupSource && lookupSource->wasConstructedWithPipelineSyntax()) {
- auto[child, offTheEndReshaper] =
+ auto [child, offTheEndReshaper] =
makeTreeWithOffTheEndStage(std::move(initialStageContents),
lookupSource->getResolvedIntrospectionPipeline(),
propagator);
@@ -166,7 +165,7 @@ inline auto makeAdditionalChildren(
facetSource->getFacetPipelines().end(),
std::back_inserter(children),
[&](const auto& fPipe) {
- auto[child, offTheEndReshaper] = makeTreeWithOffTheEndStage(
+ auto [child, offTheEndReshaper] = makeTreeWithOffTheEndStage(
std::move(initialStageContents), *fPipe.pipeline, propagator);
offTheEndContents.push_back(offTheEndReshaper(child.get().contents));
return std::move(*child);
@@ -192,13 +191,15 @@ inline auto makeStage(
auto contents = (previous) ? reshapeContents(previous.get().contents)
: findStageContents(source.getContext()->ns, initialStageContents);
- auto[additionalChildren, offTheEndContents] =
+ auto [additionalChildren, offTheEndContents] =
makeAdditionalChildren(std::move(initialStageContents), source, propagator, contents);
auto principalChild = previous ? std::make_unique<Stage<T>>(std::move(previous.get()))
: std::unique_ptr<Stage<T>>();
- std::function<T(const T&)> reshaper([&, offTheEndContents{std::move(offTheEndContents)} ](
- const T& reshapable) { return propagator(reshapable, offTheEndContents, source); });
+ std::function<T(const T&)> reshaper(
+ [&, offTheEndContents{std::move(offTheEndContents)}](const T& reshapable) {
+ return propagator(reshapable, offTheEndContents, source);
+ });
return std::pair(
boost::optional<Stage<T>>(
Stage(std::move(contents), std::move(principalChild), std::move(additionalChildren))),
@@ -278,7 +279,7 @@ inline std::pair<boost::optional<Stage<T>>, T> makeTree(
findStageContents(pipeline.getContext()->ns, initialStageContents));
}
- auto && [ finalStage, reshaper ] =
+ auto&& [finalStage, reshaper] =
detail::makeTreeWithOffTheEndStage(std::move(initialStageContents), pipeline, propagator);
return std::pair(std::move(*finalStage), reshaper(finalStage.get().contents));
diff --git a/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp b/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp
index 25a161c2048..5a15074b361 100644
--- a/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp
+++ b/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp
@@ -129,7 +129,8 @@ TEST_F(PipelineMetadataTreeTest, LinearPipelinesConstructProperTrees) {
auto pipePtr = jsonToPipeline("[{$project: {name: 1}}]");
return makeTree<TestThing>(
{{NamespaceString("test.collection"), initial}}, *pipePtr, ignoreDocumentSourceAddOne);
- }().first.get() == Stage(TestThing{23}, {}, {}));
+ }()
+ .first.get() == Stage(TestThing{23}, {}, {}));
ASSERT([&]() {
auto pipePtr = jsonToPipeline(
@@ -137,7 +138,8 @@ TEST_F(PipelineMetadataTreeTest, LinearPipelinesConstructProperTrees) {
"{$match: {status: \"completed\"}}]");
return makeTree<TestThing>(
{{NamespaceString("test.collection"), initial}}, *pipePtr, ignoreDocumentSourceAddOne);
- }().first.get() == Stage(TestThing{24}, makeUniqueStage(TestThing{23}, {}, {}), {}));
+ }()
+ .first.get() == Stage(TestThing{24}, makeUniqueStage(TestThing{23}, {}, {}), {}));
ASSERT([&]() {
auto pipePtr = jsonToPipeline(
@@ -149,7 +151,8 @@ TEST_F(PipelineMetadataTreeTest, LinearPipelinesConstructProperTrees) {
"{$match: {status: \"completed\"}}]");
return makeTree<TestThing>(
{{NamespaceString("test.collection"), initial}}, *pipePtr, ignoreDocumentSourceAddOne);
- }().first.get() ==
+ }()
+ .first.get() ==
Stage(TestThing{28},
makeUniqueStage(
TestThing{27},
@@ -247,7 +250,8 @@ TEST_F(PipelineMetadataTreeTest, BranchingPipelinesConstructProperTrees) {
{NamespaceString("test.instruments"), {"2"}}},
*pipePtr,
buildRepresentativeString);
- }().first.get() ==
+ }()
+ .first.get() ==
Stage(TestThing{"1mpxul[2m]ulu"},
makeUniqueStage(
TestThing{"1mpxul[2m]ul"},
@@ -283,7 +287,8 @@ TEST_F(PipelineMetadataTreeTest, BranchingPipelinesConstructProperTrees) {
"{$limit: 12}]");
return makeTree<TestThing>(
{{NamespaceString("test.collection"), {""}}}, *pipePtr, buildRepresentativeString);
- }().first.get() ==
+ }()
+ .first.get() ==
Stage(TestThing{"f[tugs, tmgs, tb]"},
makeUniqueStage(
TestThing{""},
diff --git a/src/mongo/db/pipeline/process_interface_standalone.cpp b/src/mongo/db/pipeline/process_interface_standalone.cpp
index d7a51028503..225303b9999 100644
--- a/src/mongo/db/pipeline/process_interface_standalone.cpp
+++ b/src/mongo/db/pipeline/process_interface_standalone.cpp
@@ -187,7 +187,7 @@ Update MongoInterfaceStandalone::buildUpdateOp(
for (auto&& obj : batch) {
updateEntries.push_back([&] {
UpdateOpEntry entry;
- auto && [ q, u, c ] = obj;
+ auto&& [q, u, c] = obj;
entry.setQ(std::move(q));
entry.setU(std::move(u));
entry.setC(std::move(c));
@@ -306,8 +306,7 @@ void MongoInterfaceStandalone::renameIfOptionsAndIndexesHaveNotChanged(
str::stream() << "collection options of target collection " << targetNs.ns()
<< " changed during processing. Original options: "
<< originalCollectionOptions
- << ", new options: "
- << getCollectionOptions(targetNs),
+ << ", new options: " << getCollectionOptions(targetNs),
SimpleBSONObjComparator::kInstance.evaluate(originalCollectionOptions ==
getCollectionOptions(targetNs)));
@@ -432,12 +431,8 @@ boost::optional<Document> MongoInterfaceStandalone::lookupSingleDocument(
if (auto next = pipeline->getNext()) {
uasserted(ErrorCodes::TooManyMatchingDocuments,
str::stream() << "found more than one document with document key "
- << documentKey.toString()
- << " ["
- << lookedUpDocument->toString()
- << ", "
- << next->toString()
- << "]");
+ << documentKey.toString() << " [" << lookedUpDocument->toString()
+ << ", " << next->toString() << "]");
}
// Set the speculative read timestamp appropriately after we do a document lookup locally. We
@@ -581,14 +576,12 @@ void MongoInterfaceStandalone::_reportCurrentOpsForIdleSessions(OperationContext
? makeSessionFilterForAuthenticatedUsers(opCtx)
: KillAllSessionsByPatternSet{{}});
- sessionCatalog->scanSessions(
- {std::move(sessionFilter)},
- [&](const ObservableSession& session) {
- auto op = TransactionParticipant::get(session).reportStashedState(opCtx);
- if (!op.isEmpty()) {
- ops->emplace_back(op);
- }
- });
+ sessionCatalog->scanSessions({std::move(sessionFilter)}, [&](const ObservableSession& session) {
+ auto op = TransactionParticipant::get(session).reportStashedState(opCtx);
+ if (!op.isEmpty()) {
+ ops->emplace_back(op);
+ }
+ });
}
std::unique_ptr<CollatorInterface> MongoInterfaceStandalone::_getCollectionDefaultCollator(
diff --git a/src/mongo/db/pipeline/process_interface_standalone_test.cpp b/src/mongo/db/pipeline/process_interface_standalone_test.cpp
index fa246fc2e9d..e522111e395 100644
--- a/src/mongo/db/pipeline/process_interface_standalone_test.cpp
+++ b/src/mongo/db/pipeline/process_interface_standalone_test.cpp
@@ -93,7 +93,7 @@ TEST_F(ProcessInterfaceStandaloneTest,
// Test that 'targetCollectionVersion' is accepted if from mongos.
expCtx->fromMongos = true;
- auto[joinKey, chunkVersion] = processInterface->ensureFieldsUniqueOrResolveDocumentKey(
+ auto [joinKey, chunkVersion] = processInterface->ensureFieldsUniqueOrResolveDocumentKey(
expCtx, {{"_id"}}, targetCollectionVersion, expCtx->ns);
ASSERT_EQ(joinKey.size(), 1UL);
ASSERT_EQ(joinKey.count(FieldPath("_id")), 1UL);
diff --git a/src/mongo/db/pipeline/resume_token.cpp b/src/mongo/db/pipeline/resume_token.cpp
index 02a3fdbccf3..0a9cdfe32c4 100644
--- a/src/mongo/db/pipeline/resume_token.cpp
+++ b/src/mongo/db/pipeline/resume_token.cpp
@@ -90,8 +90,9 @@ ResumeToken::ResumeToken(const Document& resumeDoc) {
_typeBits = resumeDoc[kTypeBitsFieldName];
uassert(40648,
str::stream() << "Bad resume token: _typeBits of wrong type " << resumeDoc.toString(),
- _typeBits.missing() || (_typeBits.getType() == BSONType::BinData &&
- _typeBits.getBinData().type == BinDataGeneral));
+ _typeBits.missing() ||
+ (_typeBits.getType() == BSONType::BinData &&
+ _typeBits.getBinData().type == BinDataGeneral));
}
// We encode the resume token as a KeyString with the sequence:
diff --git a/src/mongo/db/pipeline/resume_token_test.cpp b/src/mongo/db/pipeline/resume_token_test.cpp
index 72894880953..d684e30cc26 100644
--- a/src/mongo/db/pipeline/resume_token_test.cpp
+++ b/src/mongo/db/pipeline/resume_token_test.cpp
@@ -360,5 +360,5 @@ TEST(ResumeToken, StringEncodingSortsCorrectly) {
{ts10_4, 0, 0, lower_uuid, Value(Document{{"_id", 0}})});
}
-} // namspace
-} // namspace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.cpp b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
index 9c84317c8d9..1c32c251ff5 100644
--- a/src/mongo/db/pipeline/sharded_agg_helpers.cpp
+++ b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
@@ -112,8 +112,7 @@ BSONObj genericTransformForShards(MutableDocument&& cmdForShards,
invariant(cmdForShards.peek()[OperationSessionInfo::kTxnNumberFieldName].missing(),
str::stream() << "Command for shards unexpectedly had the "
<< OperationSessionInfo::kTxnNumberFieldName
- << " field set: "
- << cmdForShards.peek().toString());
+ << " field set: " << cmdForShards.peek().toString());
cmdForShards[OperationSessionInfo::kTxnNumberFieldName] =
Value(static_cast<long long>(*opCtx->getTxnNumber()));
}
@@ -336,9 +335,7 @@ DispatchShardPipelineResults dispatchShardPipeline(
shardQuery);
invariant(cursors.size() % shardIds.size() == 0,
str::stream() << "Number of cursors (" << cursors.size()
- << ") is not a multiple of producers ("
- << shardIds.size()
- << ")");
+ << ") is not a multiple of producers (" << shardIds.size() << ")");
}
// Convert remote cursors into a vector of "owned" cursors.
@@ -350,9 +347,9 @@ DispatchShardPipelineResults dispatchShardPipeline(
// Record the number of shards involved in the aggregation. If we are required to merge on
// the primary shard, but the primary shard was not in the set of targeted shards, then we
// must increment the number of involved shards.
- CurOp::get(opCtx)->debug().nShards =
- shardIds.size() + (needsPrimaryShardMerge && executionNsRoutingInfo &&
- !shardIds.count(executionNsRoutingInfo->db().primaryId()));
+ CurOp::get(opCtx)->debug().nShards = shardIds.size() +
+ (needsPrimaryShardMerge && executionNsRoutingInfo &&
+ !shardIds.count(executionNsRoutingInfo->db().primaryId()));
return DispatchShardPipelineResults{needsPrimaryShardMerge,
std::move(ownedCursors),
diff --git a/src/mongo/db/pipeline/stub_mongo_process_interface_lookup_single_document.cpp b/src/mongo/db/pipeline/stub_mongo_process_interface_lookup_single_document.cpp
index a5b877a9e49..506acd514e8 100644
--- a/src/mongo/db/pipeline/stub_mongo_process_interface_lookup_single_document.cpp
+++ b/src/mongo/db/pipeline/stub_mongo_process_interface_lookup_single_document.cpp
@@ -93,12 +93,8 @@ boost::optional<Document> StubMongoProcessInterfaceLookupSingleDocument::lookupS
if (auto next = pipeline->getNext()) {
uasserted(ErrorCodes::TooManyMatchingDocuments,
str::stream() << "found more than one document matching "
- << documentKey.toString()
- << " ["
- << lookedUpDocument->toString()
- << ", "
- << next->toString()
- << "]");
+ << documentKey.toString() << " [" << lookedUpDocument->toString()
+ << ", " << next->toString() << "]");
}
return lookedUpDocument;
}
diff --git a/src/mongo/db/pipeline/value.cpp b/src/mongo/db/pipeline/value.cpp
index 52a1c5fd71d..b804adaf797 100644
--- a/src/mongo/db/pipeline/value.cpp
+++ b/src/mongo/db/pipeline/value.cpp
@@ -389,8 +389,7 @@ void Value::addToBsonObj(BSONObjBuilder* builder,
size_t recursionLevel) const {
uassert(ErrorCodes::Overflow,
str::stream() << "cannot convert document to BSON because it exceeds the limit of "
- << BSONDepth::getMaxAllowableDepth()
- << " levels of nesting",
+ << BSONDepth::getMaxAllowableDepth() << " levels of nesting",
recursionLevel <= BSONDepth::getMaxAllowableDepth());
if (getType() == BSONType::Object) {
@@ -411,8 +410,7 @@ void Value::addToBsonObj(BSONObjBuilder* builder,
void Value::addToBsonArray(BSONArrayBuilder* builder, size_t recursionLevel) const {
uassert(ErrorCodes::Overflow,
str::stream() << "cannot convert document to BSON because it exceeds the limit of "
- << BSONDepth::getMaxAllowableDepth()
- << " levels of nesting",
+ << BSONDepth::getMaxAllowableDepth() << " levels of nesting",
recursionLevel <= BSONDepth::getMaxAllowableDepth());
// If this Value is empty, do nothing to avoid incrementing the builder's counter.
@@ -704,7 +702,7 @@ int Value::compare(const Value& rL,
case Date: // signed
return cmp(rL._storage.dateValue, rR._storage.dateValue);
- // Numbers should compare by equivalence even if different types
+ // Numbers should compare by equivalence even if different types
case NumberDecimal: {
switch (rType) {
@@ -1078,9 +1076,9 @@ size_t Value::getApproximateSize() const {
case Symbol:
case BinData:
case String:
- return sizeof(Value) + (_storage.shortStr
- ? 0 // string stored inline, so no extra mem usage
- : sizeof(RCString) + _storage.getString().size());
+ return sizeof(Value) +
+ (_storage.shortStr ? 0 // string stored inline, so no extra mem usage
+ : sizeof(RCString) + _storage.getString().size());
case Object:
return sizeof(Value) + getDocument().getApproximateSize();
diff --git a/src/mongo/db/pipeline/value.h b/src/mongo/db/pipeline/value.h
index ef0ac8b6afd..296d6d08480 100644
--- a/src/mongo/db/pipeline/value.h
+++ b/src/mongo/db/pipeline/value.h
@@ -146,7 +146,7 @@ public:
* Used when preforming arithmetic operations with int where the
* result may be too large and need to be stored as long. The Value
* will be an int if value fits, otherwise it will be a long.
- */
+ */
static Value createIntOrLong(long long value);
/** A "missing" value indicates the lack of a Value.
@@ -396,7 +396,7 @@ public:
return Value(values);
}
};
-}
+} // namespace mongo
/* ======================= INLINED IMPLEMENTATIONS ========================== */
diff --git a/src/mongo/db/pipeline/variables.cpp b/src/mongo/db/pipeline/variables.cpp
index cf6b81e9605..8a37fecc10f 100644
--- a/src/mongo/db/pipeline/variables.cpp
+++ b/src/mongo/db/pipeline/variables.cpp
@@ -68,9 +68,7 @@ void Variables::uassertValidNameForUserWrite(StringData varName) {
uassert(16868,
str::stream() << "'" << varName << "' contains an invalid character "
- << "for a variable name: '"
- << varName[i]
- << "'",
+ << "for a variable name: '" << varName[i] << "'",
charIsValid);
}
}
@@ -95,9 +93,7 @@ void Variables::uassertValidNameForUserRead(StringData varName) {
uassert(16871,
str::stream() << "'" << varName << "' contains an invalid character "
- << "for a variable name: '"
- << varName[i]
- << "'",
+ << "for a variable name: '" << varName[i] << "'",
charIsValid);
}
}
@@ -258,4 +254,4 @@ std::set<Variables::Id> VariablesParseState::getDefinedVariableIDs() const {
return ids;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/variables.h b/src/mongo/db/pipeline/variables.h
index 9627659b25b..b4826f1f460 100644
--- a/src/mongo/db/pipeline/variables.h
+++ b/src/mongo/db/pipeline/variables.h
@@ -156,7 +156,7 @@ private:
void setValue(Id id, const Value& value, bool isConstant);
static auto getBuiltinVariableName(Variables::Id variable) {
- for (auto & [ name, id ] : kBuiltinVarNameToId) {
+ for (auto& [name, id] : kBuiltinVarNameToId) {
if (variable == id) {
return name;
}
diff --git a/src/mongo/db/query/canonical_query_encoder.cpp b/src/mongo/db/query/canonical_query_encoder.cpp
index 91982bff80a..6698e56766c 100644
--- a/src/mongo/db/query/canonical_query_encoder.cpp
+++ b/src/mongo/db/query/canonical_query_encoder.cpp
@@ -427,10 +427,10 @@ void encodeKeyForMatch(const MatchExpression* tree, StringBuilder* keyBuilder) {
}
/**
-* Encodes sort order into cache key.
-* Sort order is normalized because it provided by
-* QueryRequest.
-*/
+ * Encodes sort order into cache key.
+ * Sort order is normalized because it provided by
+ * QueryRequest.
+ */
void encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) {
if (sortObj.isEmpty()) {
return;
@@ -463,12 +463,12 @@ void encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) {
}
/**
-* Encodes parsed projection into cache key.
-* Does a simple toString() on each projected field
-* in the BSON object.
-* Orders the encoded elements in the projection by field name.
-* This handles all the special projection types ($meta, $elemMatch, etc.)
-*/
+ * Encodes parsed projection into cache key.
+ * Does a simple toString() on each projected field
+ * in the BSON object.
+ * Orders the encoded elements in the projection by field name.
+ * This handles all the special projection types ($meta, $elemMatch, etc.)
+ */
void encodeKeyForProj(const BSONObj& projObj, StringBuilder* keyBuilder) {
// Sorts the BSON elements by field name using a map.
std::map<StringData, BSONElement> elements;
diff --git a/src/mongo/db/query/canonical_query_encoder.h b/src/mongo/db/query/canonical_query_encoder.h
index d0019ba08c9..73c0eff5fa7 100644
--- a/src/mongo/db/query/canonical_query_encoder.h
+++ b/src/mongo/db/query/canonical_query_encoder.h
@@ -45,5 +45,5 @@ CanonicalQuery::QueryShapeString encode(const CanonicalQuery& cq);
* Returns a hash of the given key (produced from either a QueryShapeString or a PlanCacheKey).
*/
uint32_t computeHash(StringData key);
-}
-}
+} // namespace canonical_query_encoder
+} // namespace mongo
diff --git a/src/mongo/db/query/collation/collation_index_key.cpp b/src/mongo/db/query/collation/collation_index_key.cpp
index 3af408e8abd..a973f419f06 100644
--- a/src/mongo/db/query/collation/collation_index_key.cpp
+++ b/src/mongo/db/query/collation/collation_index_key.cpp
@@ -114,9 +114,7 @@ void translateElement(StringData fieldName,
uasserted(ErrorCodes::CannotBuildIndexKeys,
str::stream()
<< "Cannot index type Symbol with a collation. Failed to index element: "
- << element
- << ". Index collation: "
- << collator->getSpec().toBSON());
+ << element << ". Index collation: " << collator->getSpec().toBSON());
}
default:
out->appendAs(element, fieldName);
@@ -144,7 +142,7 @@ void translate(BSONObj obj, const CollatorInterface* collator, BufBuilder* out)
element.fieldNameStringData(), element, collator, &ctx.getBuilder(), &ctxStack);
}
}
-}
+} // namespace
void CollationIndexKey::collationAwareIndexKeyAppend(BSONElement elt,
const CollatorInterface* collator,
diff --git a/src/mongo/db/query/collation/collation_index_key_test.cpp b/src/mongo/db/query/collation/collation_index_key_test.cpp
index 7696561060a..20a788d7df4 100644
--- a/src/mongo/db/query/collation/collation_index_key_test.cpp
+++ b/src/mongo/db/query/collation/collation_index_key_test.cpp
@@ -171,8 +171,7 @@ TEST(CollationIndexKeyTest, CollationAwareAppendThrowsIfSymbolInsideObject) {
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
BSONObj dataObj = BSON("" << BSON("a"
<< "foo"
- << "b"
- << BSONSymbol("mySymbol")));
+ << "b" << BSONSymbol("mySymbol")));
BSONObjBuilder out;
ASSERT_THROWS_CODE(
CollationIndexKey::collationAwareIndexKeyAppend(dataObj.firstElement(), &collator, &out),
diff --git a/src/mongo/db/query/collation/collation_spec_test.cpp b/src/mongo/db/query/collation/collation_spec_test.cpp
index 8036e463a54..c255476292e 100644
--- a/src/mongo/db/query/collation/collation_spec_test.cpp
+++ b/src/mongo/db/query/collation/collation_spec_test.cpp
@@ -185,23 +185,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesDefaults) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -215,23 +205,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesCaseFirstUpper) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "upper"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -245,23 +225,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesCaseFirstLower) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "lower"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -275,23 +245,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesPrimaryStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 1
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 1 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -305,23 +265,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesSecondaryStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 2
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 2 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -335,23 +285,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesQuaternaryStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 4
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 4 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -365,23 +305,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesIdenticalStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 5
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 5 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -395,23 +325,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesAlternateShifted) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
<< "shifted"
<< "maxVariable"
<< "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
@@ -425,23 +345,13 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesMaxVariableSpace) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel"
- << false
- << "caseFirst"
+ << "caseLevel" << false << "caseFirst"
<< "off"
- << "strength"
- << 3
- << "numericOrdering"
- << false
- << "alternate"
+ << "strength" << 3 << "numericOrdering" << false << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "space"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
+ << "normalization" << false << "backwards" << false << "version"
<< "myVersion");
ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
diff --git a/src/mongo/db/query/collation/collator_factory_icu.cpp b/src/mongo/db/query/collation/collator_factory_icu.cpp
index c8b8de7a5ab..507ef83cd91 100644
--- a/src/mongo/db/query/collation/collator_factory_icu.cpp
+++ b/src/mongo/db/query/collation/collator_factory_icu.cpp
@@ -185,13 +185,9 @@ StatusWith<CollationSpec::CaseFirstType> stringToCaseFirstType(const std::string
} else {
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kCaseFirstField << "' must be '"
- << CollationSpec::kCaseFirstUpper
- << "', '"
- << CollationSpec::kCaseFirstLower
- << "', or '"
- << CollationSpec::kCaseFirstOff
- << "'. Got: "
- << caseFirst};
+ << CollationSpec::kCaseFirstUpper << "', '"
+ << CollationSpec::kCaseFirstLower << "', or '"
+ << CollationSpec::kCaseFirstOff << "'. Got: " << caseFirst};
}
}
@@ -210,8 +206,7 @@ StatusWith<CollationSpec::StrengthType> integerToStrengthType(long long strength
}
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kStrengthField
- << "' must be an integer 1 through 5. Got: "
- << strength};
+ << "' must be an integer 1 through 5. Got: " << strength};
}
StatusWith<CollationSpec::AlternateType> stringToAlternateType(const std::string& alternate) {
@@ -222,11 +217,8 @@ StatusWith<CollationSpec::AlternateType> stringToAlternateType(const std::string
} else {
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kAlternateField << "' must be '"
- << CollationSpec::kAlternateNonIgnorable
- << "' or '"
- << CollationSpec::kAlternateShifted
- << "'. Got: "
- << alternate};
+ << CollationSpec::kAlternateNonIgnorable << "' or '"
+ << CollationSpec::kAlternateShifted << "'. Got: " << alternate};
}
}
@@ -238,11 +230,8 @@ StatusWith<CollationSpec::MaxVariableType> stringToMaxVariableType(const std::st
} else {
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kMaxVariableField << "' must be '"
- << CollationSpec::kMaxVariablePunct
- << "' or '"
- << CollationSpec::kMaxVariableSpace
- << "'. Got: "
- << maxVariable};
+ << CollationSpec::kMaxVariablePunct << "' or '"
+ << CollationSpec::kMaxVariableSpace << "'. Got: " << maxVariable};
}
}
@@ -272,10 +261,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kCaseLevelField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.caseLevel = attributeToBool(caseLevelAttribute);
} else if (!parseStatus.isOK()) {
@@ -289,10 +276,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kCaseLevelField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -307,10 +292,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kCaseFirstField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.caseFirst = getCaseFirstFromAttribute(caseFirstAttribute);
} else if (!parseStatus.isOK()) {
@@ -332,10 +315,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kCaseFirstField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -350,10 +331,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kStrengthField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.strength = getStrengthFromAttribute(strengthAttribute);
} else if (!parseStatus.isOK()) {
@@ -374,10 +353,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kStrengthField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -393,10 +370,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kNumericOrderingField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.numericOrdering = attributeToBool(numericOrderingAttribute);
} else if (!parseStatus.isOK()) {
@@ -411,10 +386,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kNumericOrderingField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -430,10 +403,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kAlternateField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.alternate = getAlternateFromAttribute(alternateAttribute);
} else if (!parseStatus.isOK()) {
@@ -455,10 +426,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kAlternateField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -485,10 +454,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kMaxVariableField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -504,10 +471,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kNormalizationField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.normalization = attributeToBool(normalizationAttribute);
} else if (!parseStatus.isOK()) {
@@ -522,10 +487,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kNormalizationField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -541,10 +504,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kBackwardsField
- << "' attribute from icu::Collator: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute from icu::Collator: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
parsedSpec.backwards = attributeToBool(backwardsAttribute);
} else if (!parseStatus.isOK()) {
@@ -559,10 +520,8 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kBackwardsField
- << "' attribute: "
- << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << "' attribute: " << icuError.errorName()
+ << ". Collation spec: " << spec};
}
}
@@ -584,9 +543,7 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
return {ErrorCodes::IncompatibleCollationVersion,
str::stream() << "Requested collation version " << specVersionStr
<< " but the only available collator version was "
- << parsedSpec.version
- << ". Requested collation spec: "
- << spec};
+ << parsedSpec.version << ". Requested collation spec: " << spec};
}
++parsedFields;
@@ -612,8 +569,7 @@ StatusWith<std::string> parseLocaleID(const BSONObj& spec) {
if (localeID.find('\0') != std::string::npos) {
return {ErrorCodes::BadValue,
str::stream() << "Field '" << CollationSpec::kLocaleField
- << "' cannot contain null byte. Collation spec: "
- << spec};
+ << "' cannot contain null byte. Collation spec: " << spec};
}
return localeID;
}
@@ -629,15 +585,13 @@ Status validateLocaleID(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get locale from icu::Collator: " << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << ". Collation spec: " << spec};
}
if (originalID.empty()) {
return {ErrorCodes::BadValue,
str::stream() << "Field '" << CollationSpec::kLocaleField
- << "' cannot be the empty string in: "
- << spec};
+ << "' cannot be the empty string in: " << spec};
}
// Check that each component of the locale ID is recognized by ICU. If ICU 1) cannot parse the
@@ -668,11 +622,9 @@ Status validateCollationSpec(const CollationSpec& spec) {
if (spec.backwards && spec.strength == CollationSpec::StrengthType::kPrimary) {
return {ErrorCodes::BadValue,
str::stream() << "'" << CollationSpec::kBackwardsField << "' is invalid with '"
- << CollationSpec::kStrengthField
- << "' of "
+ << CollationSpec::kStrengthField << "' of "
<< static_cast<int>(CollationSpec::StrengthType::kPrimary)
- << " in: "
- << spec.toBSON()};
+ << " in: " << spec.toBSON()};
}
// The caseFirst option only affects tertiary level or caseLevel comparisons. It will have no
@@ -682,13 +634,10 @@ Status validateCollationSpec(const CollationSpec& spec) {
spec.strength == CollationSpec::StrengthType::kSecondary)) {
return {ErrorCodes::BadValue,
str::stream() << "'" << CollationSpec::kCaseFirstField << "' is invalid unless '"
- << CollationSpec::kCaseLevelField
- << "' is on or '"
- << CollationSpec::kStrengthField
- << "' is greater than "
+ << CollationSpec::kCaseLevelField << "' is on or '"
+ << CollationSpec::kStrengthField << "' is greater than "
<< static_cast<int>(CollationSpec::StrengthType::kSecondary)
- << " in: "
- << spec.toBSON()};
+ << " in: " << spec.toBSON()};
}
return Status::OK();
@@ -711,8 +660,7 @@ StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryICU::makeFromBSON(
return {ErrorCodes::FailedToParse,
str::stream() << "If " << CollationSpec::kLocaleField << "="
<< CollationSpec::kSimpleBinaryComparison
- << ", no other fields should be present in: "
- << spec};
+ << ", no other fields should be present in: " << spec};
}
return {nullptr};
}
@@ -721,8 +669,8 @@ StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryICU::makeFromBSON(
auto userLocale = icu::Locale::createFromName(parsedLocaleID.getValue().c_str());
if (userLocale.isBogus()) {
return {ErrorCodes::BadValue,
- str::stream() << "Field '" << CollationSpec::kLocaleField << "' is not valid in: "
- << spec};
+ str::stream() << "Field '" << CollationSpec::kLocaleField
+ << "' is not valid in: " << spec};
}
// Construct an icu::Collator.
@@ -733,8 +681,7 @@ StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryICU::makeFromBSON(
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to create collator: " << icuError.errorName()
- << ". Collation spec: "
- << spec};
+ << ". Collation spec: " << spec};
}
Status localeValidationStatus = validateLocaleID(spec, parsedLocaleID.getValue(), *icuCollator);
diff --git a/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp b/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp
index df6233398ba..9c540817ec7 100644
--- a/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp
+++ b/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp
@@ -39,9 +39,7 @@ namespace mongo {
namespace {
ServiceContext::ConstructorActionRegisterer registerIcuCollator{
- "CreateCollatorFactory",
- {"LoadICUData"},
- [](ServiceContext* service) {
+ "CreateCollatorFactory", {"LoadICUData"}, [](ServiceContext* service) {
CollatorFactoryInterface::set(service, stdx::make_unique<CollatorFactoryICU>());
}};
} // namespace
diff --git a/src/mongo/db/query/collation/collator_factory_icu_test.cpp b/src/mongo/db/query/collation/collator_factory_icu_test.cpp
index 052e03decee..275ae55ff84 100644
--- a/src/mongo/db/query/collation/collator_factory_icu_test.cpp
+++ b/src/mongo/db/query/collation/collator_factory_icu_test.cpp
@@ -60,8 +60,7 @@ TEST(CollatorFactoryICUTest, SimpleLocaleWithOtherFieldsFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "simple"
- << "caseLevel"
- << true));
+ << "caseLevel" << true));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -443,8 +442,7 @@ TEST(CollatorFactoryICUTest, CaseLevelFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "caseLevel"
- << false));
+ << "caseLevel" << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().caseLevel);
}
@@ -453,8 +451,7 @@ TEST(CollatorFactoryICUTest, CaseLevelTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "caseLevel"
- << true));
+ << "caseLevel" << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().caseLevel);
}
@@ -496,8 +493,7 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1));
+ << "strength" << 1));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kPrimary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -507,8 +503,7 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 2));
+ << "strength" << 2));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kSecondary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -518,8 +513,7 @@ TEST(CollatorFactoryICUTest, TertiaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 3));
+ << "strength" << 3));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kTertiary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -529,8 +523,7 @@ TEST(CollatorFactoryICUTest, QuaternaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 4));
+ << "strength" << 4));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kQuaternary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -540,8 +533,7 @@ TEST(CollatorFactoryICUTest, IdenticalStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 5));
+ << "strength" << 5));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kIdentical),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -551,8 +543,7 @@ TEST(CollatorFactoryICUTest, NumericOrderingFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "numericOrdering"
- << false));
+ << "numericOrdering" << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().numericOrdering);
}
@@ -561,8 +552,7 @@ TEST(CollatorFactoryICUTest, NumericOrderingTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "numericOrdering"
- << true));
+ << "numericOrdering" << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().numericOrdering);
}
@@ -615,8 +605,7 @@ TEST(CollatorFactoryICUTest, NormalizationFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "normalization"
- << false));
+ << "normalization" << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().normalization);
}
@@ -625,8 +614,7 @@ TEST(CollatorFactoryICUTest, NormalizationTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "normalization"
- << true));
+ << "normalization" << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().normalization);
}
@@ -635,8 +623,7 @@ TEST(CollatorFactoryICUTest, BackwardsFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards"
- << false));
+ << "backwards" << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().backwards);
}
@@ -645,8 +632,7 @@ TEST(CollatorFactoryICUTest, BackwardsTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards"
- << true));
+ << "backwards" << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().backwards);
}
@@ -655,8 +641,7 @@ TEST(CollatorFactoryICUTest, LongStrengthFieldParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1LL));
+ << "strength" << 1LL));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kPrimary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -666,8 +651,7 @@ TEST(CollatorFactoryICUTest, DoubleStrengthFieldParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1.0));
+ << "strength" << 1.0));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kPrimary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -687,8 +671,7 @@ TEST(CollatorFactoryICUTest, NonStringCaseFirstFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "caseFirst"
- << 1));
+ << "caseFirst" << 1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -717,8 +700,7 @@ TEST(CollatorFactoryICUTest, TooLargeStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 2147483648LL));
+ << "strength" << 2147483648LL));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -727,8 +709,7 @@ TEST(CollatorFactoryICUTest, FractionalStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 0.5));
+ << "strength" << 0.5));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::BadValue);
}
@@ -737,8 +718,7 @@ TEST(CollatorFactoryICUTest, NegativeStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << -1));
+ << "strength" << -1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -747,8 +727,7 @@ TEST(CollatorFactoryICUTest, InvalidIntegerStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 6));
+ << "strength" << 6));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -767,8 +746,7 @@ TEST(CollatorFactoryICUTest, NonStringAlternateFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "alternate"
- << 1));
+ << "alternate" << 1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -787,8 +765,7 @@ TEST(CollatorFactoryICUTest, NonStringMaxVariableFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "maxVariable"
- << 1));
+ << "maxVariable" << 1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -845,8 +822,7 @@ TEST(CollatorFactoryICUTest, NonStringVersionFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "version"
- << 3));
+ << "version" << 3));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -878,8 +854,7 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCollatorIgnoresCaseAndAccents) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1));
+ << "strength" << 1));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -891,8 +866,7 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthCollatorsIgnoresCaseButNotAccents)
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 2));
+ << "strength" << 2));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -904,8 +878,7 @@ TEST(CollatorFactoryICUTest, TertiaryStrengthCollatorConsidersCaseAndAccents) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 3));
+ << "strength" << 3));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -917,10 +890,7 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrue) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1
- << "caseLevel"
- << true));
+ << "strength" << 1 << "caseLevel" << true));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -930,14 +900,11 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrue) {
TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrueCaseFirstUpper) {
CollatorFactoryICU factory;
- auto collator = factory.makeFromBSON(BSON("locale"
- << "en_US"
- << "strength"
- << 1
- << "caseLevel"
- << true
- << "caseFirst"
- << "upper"));
+ auto collator =
+ factory.makeFromBSON(BSON("locale"
+ << "en_US"
+ << "strength" << 1 << "caseLevel" << true << "caseFirst"
+ << "upper"));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -947,14 +914,11 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrueCaseFirstUpper) {
TEST(CollatorFactoryICUTest, TertiaryStrengthCaseLevelTrueCaseFirstUpper) {
CollatorFactoryICU factory;
- auto collator = factory.makeFromBSON(BSON("locale"
- << "en_US"
- << "strength"
- << 3
- << "caseLevel"
- << true
- << "caseFirst"
- << "upper"));
+ auto collator =
+ factory.makeFromBSON(BSON("locale"
+ << "en_US"
+ << "strength" << 3 << "caseLevel" << true << "caseFirst"
+ << "upper"));
ASSERT_OK(collator.getStatus());
ASSERT_LT(collator.getValue()->compare("A", "a"), 0);
}
@@ -971,8 +935,7 @@ TEST(CollatorFactoryICUTest, NumericOrderingTrue) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "numericOrdering"
- << true));
+ << "numericOrdering" << true));
ASSERT_OK(collator.getStatus());
ASSERT_LT(collator.getValue()->compare("2", "10"), 0);
}
@@ -981,9 +944,7 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthAlternateShifted) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1
- << "alternate"
+ << "strength" << 1 << "alternate"
<< "shifted"));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(collator.getValue()->compare("a b", "ab"), 0);
@@ -994,9 +955,7 @@ TEST(CollatorFactoryICUTest, QuaternaryStrengthAlternateShifted) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 4
- << "alternate"
+ << "strength" << 4 << "alternate"
<< "shifted"));
ASSERT_OK(collator.getStatus());
ASSERT_LT(collator.getValue()->compare("a b", "ab"), 0);
@@ -1007,9 +966,7 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthAlternateShiftedMaxVariableSpace) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 1
- << "alternate"
+ << "strength" << 1 << "alternate"
<< "shifted"
<< "maxVariable"
<< "space"));
@@ -1022,8 +979,7 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthBackwardsFalse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 2));
+ << "strength" << 2));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -1034,10 +990,7 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthBackwardsTrue) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength"
- << 2
- << "backwards"
- << true));
+ << "strength" << 2 << "backwards" << true));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -1068,10 +1021,7 @@ TEST(CollatorFactoryICUTest, BackwardsTrueWithStrengthOneFails) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards"
- << true
- << "strength"
- << 1));
+ << "backwards" << true << "strength" << 1));
ASSERT_NOT_OK(collator.getStatus());
}
@@ -1079,10 +1029,7 @@ TEST(CollatorFactoryICUTest, BackwardsTrueWithStrengthTwoSucceeds) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards"
- << true
- << "strength"
- << 2));
+ << "backwards" << true << "strength" << 2));
ASSERT_OK(collator.getStatus());
}
@@ -1092,8 +1039,7 @@ TEST(CollatorFactoryICUTest, CaseFirstLowerWithStrengthThreeSucceeds) {
<< "en_US"
<< "caseFirst"
<< "lower"
- << "strength"
- << 3));
+ << "strength" << 3));
ASSERT_OK(collator.getStatus());
}
@@ -1103,8 +1049,7 @@ TEST(CollatorFactoryICUTest, CaseFirstUpperWithStrengthThreeSucceeds) {
<< "en_US"
<< "caseFirst"
<< "upper"
- << "strength"
- << 3));
+ << "strength" << 3));
ASSERT_OK(collator.getStatus());
}
@@ -1114,10 +1059,7 @@ TEST(CollatorFactoryICUTest, CaseFirstLowerWithCaseLevelSucceeds) {
<< "en_US"
<< "caseFirst"
<< "lower"
- << "caseLevel"
- << true
- << "strength"
- << 1));
+ << "caseLevel" << true << "strength" << 1));
ASSERT_OK(collator.getStatus());
}
@@ -1127,10 +1069,7 @@ TEST(CollatorFactoryICUTest, CaseFirstUpperWithCaseLevelSucceeds) {
<< "en_US"
<< "caseFirst"
<< "upper"
- << "caseLevel"
- << true
- << "strength"
- << 1));
+ << "caseLevel" << true << "strength" << 1));
ASSERT_OK(collator.getStatus());
}
@@ -1140,8 +1079,7 @@ TEST(CollatorFactoryICUTest, CaseFirstOffWithStrengthOneSucceeds) {
<< "en_US"
<< "caseFirst"
<< "off"
- << "strength"
- << 1));
+ << "strength" << 1));
ASSERT_OK(collator.getStatus());
}
@@ -1151,8 +1089,7 @@ TEST(CollatorFactoryICUTest, CaseFirstLowerWithStrengthOneFails) {
<< "en_US"
<< "caseFirst"
<< "lower"
- << "strength"
- << 1));
+ << "strength" << 1));
ASSERT_NOT_OK(collator.getStatus());
}
@@ -1162,8 +1099,7 @@ TEST(CollatorFactoryICUTest, CaseFirstLowerWithStrengthTwoFails) {
<< "en_US"
<< "caseFirst"
<< "lower"
- << "strength"
- << 2));
+ << "strength" << 2));
ASSERT_NOT_OK(collator.getStatus());
}
@@ -1173,8 +1109,7 @@ TEST(CollatorFactoryICUTest, CaseFirstUpperWithStrengthOneFails) {
<< "en_US"
<< "caseFirst"
<< "upper"
- << "strength"
- << 1));
+ << "strength" << 1));
ASSERT_NOT_OK(collator.getStatus());
}
@@ -1184,8 +1119,7 @@ TEST(CollatorFactoryICUTest, CaseFirstUpperWithStrengthTwoFails) {
<< "en_US"
<< "caseFirst"
<< "upper"
- << "strength"
- << 2));
+ << "strength" << 2));
ASSERT_NOT_OK(collator.getStatus());
}
diff --git a/src/mongo/db/query/collation/collator_interface_mock_test.cpp b/src/mongo/db/query/collation/collator_interface_mock_test.cpp
index d792d95c2a1..340e9690ef6 100644
--- a/src/mongo/db/query/collation/collator_interface_mock_test.cpp
+++ b/src/mongo/db/query/collation/collator_interface_mock_test.cpp
@@ -242,10 +242,12 @@ TEST(CollatorInterfaceMockSelfTest, BSONObjsEqualUnderCollatorHashEquallyNested)
SimpleBSONObjComparator bsonCmpConsiderCase;
BSONObjComparator bsonCmpIgnoreCase(
BSONObj(), BSONObjComparator::FieldNamesMode::kConsider, &toLowerCollator);
- BSONObj obj1 = BSON("a" << 1 << "b" << BSON("c"
- << "foo"));
- BSONObj obj2 = BSON("a" << 1 << "b" << BSON("c"
- << "FOO"));
+ BSONObj obj1 = BSON("a" << 1 << "b"
+ << BSON("c"
+ << "foo"));
+ BSONObj obj2 = BSON("a" << 1 << "b"
+ << BSON("c"
+ << "FOO"));
ASSERT_NE(bsonCmpConsiderCase.hash(obj1), bsonCmpConsiderCase.hash(obj2));
ASSERT_EQ(bsonCmpIgnoreCase.hash(obj1), bsonCmpIgnoreCase.hash(obj2));
}
diff --git a/src/mongo/db/query/count_command_test.cpp b/src/mongo/db/query/count_command_test.cpp
index c660bc6adec..b7ea431f678 100644
--- a/src/mongo/db/query/count_command_test.cpp
+++ b/src/mongo/db/query/count_command_test.cpp
@@ -50,8 +50,7 @@ TEST(CountCommandTest, ParserDealsWithMissingFieldsCorrectly) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "query"
- << BSON("a" << BSON("$lte" << 10)));
+ << "query" << BSON("a" << BSON("$lte" << 10)));
auto countCmd = CountCommand::parse(ctxt, commandObj);
ASSERT_BSONOBJ_EQ(countCmd.getQuery(), fromjson("{ a : { '$lte' : 10 } }"));
@@ -70,15 +69,8 @@ TEST(CountCommandTest, ParserParsesCommandWithAllFieldsCorrectly) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "query"
- << BSON("a" << BSON("$gte" << 11))
- << "limit"
- << 100
- << "skip"
- << 1000
- << "hint"
- << BSON("b" << 5)
- << "collation"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "limit" << 100 << "skip"
+ << 1000 << "hint" << BSON("b" << 5) << "collation"
<< BSON("locale"
<< "en_US")
<< "readConcern"
@@ -89,8 +81,7 @@ TEST(CountCommandTest, ParserParsesCommandWithAllFieldsCorrectly) {
<< "secondary")
<< "comment"
<< "aComment"
- << "maxTimeMS"
- << 10000);
+ << "maxTimeMS" << 10000);
const auto countCmd = CountCommand::parse(ctxt, commandObj);
ASSERT_BSONOBJ_EQ(countCmd.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
@@ -110,8 +101,7 @@ TEST(CountCommandTest, ParsingNegativeLimitGivesPositiveLimit) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "limit"
- << -100);
+ << "limit" << -100);
const auto countCmd = CountCommand::parse(ctxt, commandObj);
ASSERT_EQ(countCmd.getLimit().get(), 100);
@@ -122,9 +112,7 @@ TEST(CountCommandTest, LimitCannotBeMinLong) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "query"
- << BSON("a" << BSON("$gte" << 11))
- << "limit"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "limit"
<< std::numeric_limits<long long>::min());
ASSERT_THROWS_CODE(
@@ -132,31 +120,28 @@ TEST(CountCommandTest, LimitCannotBeMinLong) {
}
TEST(CountCommandTest, FailParseBadSkipValue) {
- ASSERT_THROWS_CODE(CountCommand::parse(ctxt,
- BSON("count"
- << "TestColl"
- << "$db"
- << "TestDB"
- << "query"
- << BSON("a" << BSON("$gte" << 11))
- << "skip"
- << -1000)),
- AssertionException,
- ErrorCodes::FailedToParse);
+ ASSERT_THROWS_CODE(
+ CountCommand::parse(ctxt,
+ BSON("count"
+ << "TestColl"
+ << "$db"
+ << "TestDB"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "skip" << -1000)),
+ AssertionException,
+ ErrorCodes::FailedToParse);
}
TEST(CountCommandTest, FailParseBadCollationType) {
- ASSERT_THROWS_CODE(CountCommand::parse(ctxt,
- BSON("count"
- << "TestColl"
- << "$db"
- << "TestDB"
- << "query"
- << BSON("a" << BSON("$gte" << 11))
- << "collation"
- << "en_US")),
- AssertionException,
- ErrorCodes::TypeMismatch);
+ ASSERT_THROWS_CODE(
+ CountCommand::parse(ctxt,
+ BSON("count"
+ << "TestColl"
+ << "$db"
+ << "TestDB"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "collation"
+ << "en_US")),
+ AssertionException,
+ ErrorCodes::TypeMismatch);
}
TEST(CountCommandTest, FailParseUnknownField) {
@@ -176,8 +161,7 @@ TEST(CountCommandTest, ConvertToAggregationWithHint) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "hint"
- << BSON("x" << 1));
+ << "hint" << BSON("x" << 1));
auto countCmd = CountCommand::parse(ctxt, commandObj);
auto agg = uassertStatusOK(countCommandAsAggregationCommand(countCmd, testns));
@@ -198,12 +182,7 @@ TEST(CountCommandTest, ConvertToAggregationWithQueryAndFilterAndLimit) {
<< "TestColl"
<< "$db"
<< "TestDB"
- << "limit"
- << 200
- << "skip"
- << 300
- << "query"
- << BSON("x" << 7));
+ << "limit" << 200 << "skip" << 300 << "query" << BSON("x" << 7));
auto countCmd = CountCommand::parse(ctxt, commandObj);
auto agg = uassertStatusOK(countCommandAsAggregationCommand(countCmd, testns));
@@ -227,9 +206,7 @@ TEST(CountCommandTest, ConvertToAggregationWithMaxTimeMS) {
auto countCmd = CountCommand::parse(ctxt,
BSON("count"
<< "TestColl"
- << "maxTimeMS"
- << 100
- << "$db"
+ << "maxTimeMS" << 100 << "$db"
<< "TestDB"));
auto agg = uassertStatusOK(countCommandAsAggregationCommand(countCmd, testns));
diff --git a/src/mongo/db/query/cursor_response.cpp b/src/mongo/db/query/cursor_response.cpp
index f9fcf3c7af9..f62c57fe40f 100644
--- a/src/mongo/db/query/cursor_response.cpp
+++ b/src/mongo/db/query/cursor_response.cpp
@@ -175,24 +175,24 @@ StatusWith<CursorResponse> CursorResponse::parseFromBSON(const BSONObj& cmdRespo
BSONElement cursorElt = cmdResponse[kCursorField];
if (cursorElt.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kCursorField << "' must be a nested object in: "
- << cmdResponse};
+ str::stream() << "Field '" << kCursorField
+ << "' must be a nested object in: " << cmdResponse};
}
BSONObj cursorObj = cursorElt.Obj();
BSONElement idElt = cursorObj[kIdField];
if (idElt.type() != BSONType::NumberLong) {
- return {
- ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kIdField << "' must be of type long in: " << cmdResponse};
+ return {ErrorCodes::TypeMismatch,
+ str::stream() << "Field '" << kIdField
+ << "' must be of type long in: " << cmdResponse};
}
cursorId = idElt.Long();
BSONElement nsElt = cursorObj[kNsField];
if (nsElt.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kNsField << "' must be of type string in: "
- << cmdResponse};
+ str::stream() << "Field '" << kNsField
+ << "' must be of type string in: " << cmdResponse};
}
fullns = nsElt.String();
@@ -204,9 +204,7 @@ StatusWith<CursorResponse> CursorResponse::parseFromBSON(const BSONObj& cmdRespo
if (batchElt.type() != BSONType::Array) {
return {ErrorCodes::TypeMismatch,
str::stream() << "Must have array field '" << kBatchFieldInitial << "' or '"
- << kBatchField
- << "' in: "
- << cmdResponse};
+ << kBatchField << "' in: " << cmdResponse};
}
batchObj = batchElt.Obj();
diff --git a/src/mongo/db/query/cursor_response_test.cpp b/src/mongo/db/query/cursor_response_test.cpp
index 6a3a2229813..952edb125f4 100644
--- a/src/mongo/db/query/cursor_response_test.cpp
+++ b/src/mongo/db/query/cursor_response_test.cpp
@@ -41,13 +41,11 @@ namespace mongo {
namespace {
TEST(CursorResponseTest, parseFromBSONFirstBatch) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "firstBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "firstBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -60,13 +58,11 @@ TEST(CursorResponseTest, parseFromBSONFirstBatch) {
}
TEST(CursorResponseTest, parseFromBSONNextBatch) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -79,13 +75,11 @@ TEST(CursorResponseTest, parseFromBSONNextBatch) {
}
TEST(CursorResponseTest, parseFromBSONCursorIdZero) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(0) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
+ "cursor" << BSON("id" << CursorId(0) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -97,13 +91,11 @@ TEST(CursorResponseTest, parseFromBSONCursorIdZero) {
}
TEST(CursorResponseTest, parseFromBSONEmptyBatch) {
- StatusWith<CursorResponse> result =
- CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSONArrayBuilder().arr())
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSONArrayBuilder().arr())
+ << "ok" << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -113,15 +105,11 @@ TEST(CursorResponseTest, parseFromBSONEmptyBatch) {
}
TEST(CursorResponseTest, parseFromBSONLatestOplogEntry) {
- StatusWith<CursorResponse> result =
- CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSONArrayBuilder().arr())
- << "$_internalLatestOplogTimestamp"
- << Timestamp(1, 2)
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSONArrayBuilder().arr())
+ << "$_internalLatestOplogTimestamp" << Timestamp(1, 2) << "ok" << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -146,8 +134,7 @@ TEST(CursorResponseTest, parseFromBSONNsFieldMissing) {
StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
BSON("cursor" << BSON("id" << CursorId(123) << "firstBatch"
<< BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -155,8 +142,7 @@ TEST(CursorResponseTest, parseFromBSONNsFieldWrongType) {
StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
BSON("cursor" << BSON("id" << CursorId(123) << "ns" << 456 << "firstBatch"
<< BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -164,10 +150,8 @@ TEST(CursorResponseTest, parseFromBSONIdFieldMissing) {
StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
BSON("cursor" << BSON("ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -177,10 +161,8 @@ TEST(CursorResponseTest, parseFromBSONIdFieldWrongType) {
<< "123"
<< "ns"
<< "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1));
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -188,19 +170,16 @@ TEST(CursorResponseTest, parseFromBSONBatchFieldMissing) {
StatusWith<CursorResponse> result =
CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
<< "db.coll")
- << "ok"
- << 1));
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONFirstBatchFieldWrongType) {
- StatusWith<CursorResponse> result =
- CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "firstBatch"
- << BSON("_id" << 1))
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "firstBatch" << BSON("_id" << 1))
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -208,32 +187,25 @@ TEST(CursorResponseTest, parseFromBSONNextBatchFieldWrongType) {
StatusWith<CursorResponse> result =
CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
<< "db.coll"
- << "nextBatch"
- << BSON("_id" << 1))
- << "ok"
- << 1));
+ << "nextBatch" << BSON("_id" << 1))
+ << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONLatestOplogEntryWrongType) {
- StatusWith<CursorResponse> result =
- CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1)))
- << "$_internalLatestOplogTimestamp"
- << 1
- << "ok"
- << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1)))
+ << "$_internalLatestOplogTimestamp" << 1 << "ok" << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONOkFieldMissing) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))));
ASSERT_NOT_OK(result.getStatus());
}
@@ -250,13 +222,11 @@ TEST(CursorResponseTest, toBSONInitialResponse) {
std::vector<BSONObj> batch = {BSON("_id" << 1), BSON("_id" << 2)};
CursorResponse response(NamespaceString("testdb.testcoll"), CursorId(123), batch);
BSONObj responseObj = response.toBSON(CursorResponse::ResponseType::InitialResponse);
- BSONObj expectedResponse =
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "firstBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1.0);
+ BSONObj expectedResponse = BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "firstBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1.0);
ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
@@ -264,13 +234,11 @@ TEST(CursorResponseTest, toBSONSubsequentResponse) {
std::vector<BSONObj> batch = {BSON("_id" << 1), BSON("_id" << 2)};
CursorResponse response(NamespaceString("testdb.testcoll"), CursorId(123), batch);
BSONObj responseObj = response.toBSON(CursorResponse::ResponseType::SubsequentResponse);
- BSONObj expectedResponse =
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1.0);
+ BSONObj expectedResponse = BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1.0);
ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
@@ -282,13 +250,11 @@ TEST(CursorResponseTest, addToBSONInitialResponse) {
response.addToBSON(CursorResponse::ResponseType::InitialResponse, &builder);
BSONObj responseObj = builder.obj();
- BSONObj expectedResponse =
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "firstBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1.0);
+ BSONObj expectedResponse = BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "firstBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1.0);
ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
@@ -300,13 +266,11 @@ TEST(CursorResponseTest, addToBSONSubsequentResponse) {
response.addToBSON(CursorResponse::ResponseType::SubsequentResponse, &builder);
BSONObj responseObj = builder.obj();
- BSONObj expectedResponse =
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "nextBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok"
- << 1.0);
+ BSONObj expectedResponse = BSON(
+ "cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok" << 1.0);
ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
@@ -321,10 +285,7 @@ TEST(CursorResponseTest, serializeLatestOplogEntry) {
<< "db.coll"
<< "nextBatch"
<< BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "$_internalLatestOplogTimestamp"
- << Timestamp(1, 2)
- << "ok"
- << 1));
+ << "$_internalLatestOplogTimestamp" << Timestamp(1, 2) << "ok" << 1));
auto reparsed = CursorResponse::parseFromBSON(serialized);
ASSERT_OK(reparsed.getStatus());
CursorResponse reparsedResponse = std::move(reparsed.getValue());
@@ -350,10 +311,8 @@ TEST(CursorResponseTest, serializePostBatchResumeToken) {
<< "db.coll"
<< "nextBatch"
<< BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2))
- << "postBatchResumeToken"
- << postBatchResumeToken)
- << "ok"
- << 1));
+ << "postBatchResumeToken" << postBatchResumeToken)
+ << "ok" << 1));
auto reparsed = CursorResponse::parseFromBSON(serialized);
ASSERT_OK(reparsed.getStatus());
CursorResponse reparsedResponse = std::move(reparsed.getValue());
diff --git a/src/mongo/db/query/datetime/date_time_support.cpp b/src/mongo/db/query/datetime/date_time_support.cpp
index 01397b1c605..8229dd2d13f 100644
--- a/src/mongo/db/query/datetime/date_time_support.cpp
+++ b/src/mongo/db/query/datetime/date_time_support.cpp
@@ -180,9 +180,7 @@ void TimeZoneDatabase::loadTimeZoneInfo(
40475,
{ErrorCodes::FailedToParse,
str::stream() << "failed to parse time zone file for time zone identifier \""
- << entry.id
- << "\": "
- << timelib_get_error_message(errorCode)});
+ << entry.id << "\": " << timelib_get_error_message(errorCode)});
}
invariant(errorCode == TIMELIB_ERROR_NO_ERROR);
@@ -276,8 +274,7 @@ Date_t TimeZoneDatabase::fromString(StringData dateString,
uasserted(ErrorCodes::ConversionFailure,
str::stream()
<< "an incomplete date/time string has been found, with elements missing: \""
- << dateString
- << "\"");
+ << dateString << "\"");
}
if (!tz.isUtcZone()) {
@@ -295,8 +292,7 @@ Date_t TimeZoneDatabase::fromString(StringData dateString,
ErrorCodes::ConversionFailure,
str::stream()
<< "you cannot pass in a date/time string with time zone information ('"
- << parsedTime.get()->tz_abbr
- << "') together with a timezone argument");
+ << parsedTime.get()->tz_abbr << "') together with a timezone argument");
break;
default: // should technically not be possible to reach
uasserted(ErrorCodes::ConversionFailure,
diff --git a/src/mongo/db/query/datetime/date_time_support.h b/src/mongo/db/query/datetime/date_time_support.h
index 94ac4c4d08e..f5efdcb8fc3 100644
--- a/src/mongo/db/query/datetime/date_time_support.h
+++ b/src/mongo/db/query/datetime/date_time_support.h
@@ -295,8 +295,7 @@ private:
uassert(18537,
str::stream() << "Could not convert date to string: date component was outside "
- << "the supported range of 0-9999: "
- << number,
+ << "the supported range of 0-9999: " << number,
(number >= 0) && (number <= 9999));
int digits = 1;
diff --git a/src/mongo/db/query/datetime/init_timezone_data.cpp b/src/mongo/db/query/datetime/init_timezone_data.cpp
index dea7322dd90..f2de36a65af 100644
--- a/src/mongo/db/query/datetime/init_timezone_data.cpp
+++ b/src/mongo/db/query/datetime/init_timezone_data.cpp
@@ -49,8 +49,7 @@ ServiceContext::ConstructorActionRegisterer loadTimeZoneDB{
if (!timeZoneDatabase) {
uasserted(ErrorCodes::FailedToParse,
str::stream() << "failed to load time zone database from path \""
- << serverGlobalParams.timeZoneInfoPath
- << "\"");
+ << serverGlobalParams.timeZoneInfoPath << "\"");
}
TimeZoneDatabase::set(service,
stdx::make_unique<TimeZoneDatabase>(std::move(timeZoneDatabase)));
diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h
index 8317fc50cfc..e6ad7cc0c5c 100644
--- a/src/mongo/db/query/explain.h
+++ b/src/mongo/db/query/explain.h
@@ -246,4 +246,4 @@ private:
static void generateServerInfo(BSONObjBuilder* out);
};
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/explain_options.cpp b/src/mongo/db/query/explain_options.cpp
index b9c771de18e..581252ffdfc 100644
--- a/src/mongo/db/query/explain_options.cpp
+++ b/src/mongo/db/query/explain_options.cpp
@@ -72,13 +72,10 @@ StatusWith<ExplainOptions::Verbosity> ExplainOptions::parseCmdBSON(const BSONObj
verbosity = Verbosity::kExecStats;
} else if (verbStr != kAllPlansExecutionVerbosityStr) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "verbosity string must be one of {'"
- << kQueryPlannerVerbosityStr
- << "', '"
- << kExecStatsVerbosityStr
- << "', '"
- << kAllPlansExecutionVerbosityStr
- << "'}");
+ str::stream()
+ << "verbosity string must be one of {'" << kQueryPlannerVerbosityStr
+ << "', '" << kExecStatsVerbosityStr << "', '"
+ << kAllPlansExecutionVerbosityStr << "'}");
}
}
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 7485d1a7260..9fc2b14cd22 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -337,8 +337,7 @@ Message getMore(OperationContext* opCtx,
// cursor.
uassert(ErrorCodes::Unauthorized,
str::stream() << "Requested getMore on namespace " << ns << ", but cursor " << cursorid
- << " belongs to namespace "
- << cursorPin->nss().ns(),
+ << " belongs to namespace " << cursorPin->nss().ns(),
nss == cursorPin->nss());
// A user can only call getMore on their own cursor. If there were multiple users authenticated
diff --git a/src/mongo/db/query/find_and_modify_request.cpp b/src/mongo/db/query/find_and_modify_request.cpp
index 20f62d2a407..9bf40a1f456 100644
--- a/src/mongo/db/query/find_and_modify_request.cpp
+++ b/src/mongo/db/query/find_and_modify_request.cpp
@@ -171,18 +171,18 @@ StatusWith<FindAndModifyRequest> FindAndModifyRequest::parseFromBSON(NamespaceSt
auto queryElement = cmdObj[kQueryField];
if (queryElement.type() != Object) {
return {ErrorCodes::Error(31160),
- str::stream() << "'" << kQueryField
- << "' parameter must be an object, found "
- << queryElement.type()};
+ str::stream()
+ << "'" << kQueryField << "' parameter must be an object, found "
+ << queryElement.type()};
}
query = queryElement.embeddedObject();
} else if (field == kSortField) {
auto sortElement = cmdObj[kSortField];
if (sortElement.type() != Object) {
return {ErrorCodes::Error(31174),
- str::stream() << "'" << kSortField
- << "' parameter must be an object, found "
- << sortElement.type()};
+ str::stream()
+ << "'" << kSortField << "' parameter must be an object, found "
+ << sortElement.type()};
}
sort = sortElement.embeddedObject();
} else if (field == kRemoveField) {
@@ -195,9 +195,9 @@ StatusWith<FindAndModifyRequest> FindAndModifyRequest::parseFromBSON(NamespaceSt
auto projectionElement = cmdObj[kFieldProjectionField];
if (projectionElement.type() != Object) {
return {ErrorCodes::Error(31175),
- str::stream() << "'" << kFieldProjectionField
- << "' parameter must be an object, found "
- << projectionElement.type()};
+ str::stream()
+ << "'" << kFieldProjectionField
+ << "' parameter must be an object, found " << projectionElement.type()};
}
fields = projectionElement.embeddedObject();
} else if (field == kUpsertField) {
diff --git a/src/mongo/db/query/find_and_modify_request.h b/src/mongo/db/query/find_and_modify_request.h
index a8b350e691f..a5212570755 100644
--- a/src/mongo/db/query/find_and_modify_request.h
+++ b/src/mongo/db/query/find_and_modify_request.h
@@ -117,13 +117,13 @@ public:
//
/**
- * Sets the filter to find a document.
- */
+ * Sets the filter to find a document.
+ */
void setQuery(BSONObj query);
/**
- * Sets the update object that specifies how a document gets updated.
- */
+ * Sets the update object that specifies how a document gets updated.
+ */
void setUpdateObj(BSONObj updateObj);
/**
@@ -134,8 +134,8 @@ public:
void setShouldReturnNew(bool shouldReturnNew);
/**
- * Sets a flag whether the statement performs an upsert.
- */
+ * Sets a flag whether the statement performs an upsert.
+ */
void setUpsert(bool upsert);
//
@@ -210,4 +210,4 @@ private:
// Holds value when performing an update request and none when a remove request.
boost::optional<write_ops::UpdateModification> _update;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 54ade9343eb..6b3273792c0 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -765,8 +765,9 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorFind(
bool permitYield,
size_t plannerOptions) {
const auto& readConcernArgs = repl::ReadConcernArgs::get(opCtx);
- auto yieldPolicy = (permitYield && (readConcernArgs.getLevel() !=
- repl::ReadConcernLevel::kSnapshotReadConcern))
+ auto yieldPolicy =
+ (permitYield &&
+ (readConcernArgs.getLevel() != repl::ReadConcernLevel::kSnapshotReadConcern))
? PlanExecutor::YIELD_AUTO
: PlanExecutor::INTERRUPT_ONLY;
return _getExecutorFind(
@@ -1512,10 +1513,11 @@ QueryPlannerParams fillOutPlannerParamsForDistinct(OperationContext* opCtx,
const IndexCatalogEntry* ice = ii->next();
const IndexDescriptor* desc = ice->descriptor();
if (desc->keyPattern().hasField(parsedDistinct.getKey())) {
- if (!mayUnwindArrays && isAnyComponentOfPathMultikey(desc->keyPattern(),
- desc->isMultikey(opCtx),
- desc->getMultikeyPaths(opCtx),
- parsedDistinct.getKey())) {
+ if (!mayUnwindArrays &&
+ isAnyComponentOfPathMultikey(desc->keyPattern(),
+ desc->isMultikey(opCtx),
+ desc->getMultikeyPaths(opCtx),
+ parsedDistinct.getKey())) {
// If the caller requested "strict" distinct that does not "pre-unwind" arrays,
// then an index which is multikey on the distinct field may not be used. This is
// because when indexing an array each element gets inserted individually. Any plan
diff --git a/src/mongo/db/query/get_executor_test.cpp b/src/mongo/db/query/get_executor_test.cpp
index 16cdf77016a..4e6350630ee 100644
--- a/src/mongo/db/query/get_executor_test.cpp
+++ b/src/mongo/db/query/get_executor_test.cpp
@@ -189,14 +189,13 @@ TEST(GetExecutorTest, GetAllowedIndicesDescendingOrder) {
}
TEST(GetExecutorTest, GetAllowedIndicesMatchesByName) {
- testAllowedIndices(
- {buildSimpleIndexEntry(fromjson("{a: 1}"), "a_1"),
- buildSimpleIndexEntry(fromjson("{a: 1}"), "a_1:en")},
- // BSONObjSet default constructor is explicit, so we cannot copy-list-initialize until
- // C++14.
- SimpleBSONObjComparator::kInstance.makeBSONObjSet(),
- {"a_1"},
- {"a_1"});
+ testAllowedIndices({buildSimpleIndexEntry(fromjson("{a: 1}"), "a_1"),
+ buildSimpleIndexEntry(fromjson("{a: 1}"), "a_1:en")},
+ // BSONObjSet default constructor is explicit, so we cannot
+ // copy-list-initialize until C++14.
+ SimpleBSONObjComparator::kInstance.makeBSONObjSet(),
+ {"a_1"},
+ {"a_1"});
}
TEST(GetExecutorTest, GetAllowedIndicesMatchesMultipleIndexesByKey) {
diff --git a/src/mongo/db/query/getmore_request.cpp b/src/mongo/db/query/getmore_request.cpp
index e577671f2fd..e78f6e4e37c 100644
--- a/src/mongo/db/query/getmore_request.cpp
+++ b/src/mongo/db/query/getmore_request.cpp
@@ -84,8 +84,7 @@ Status GetMoreRequest::isValid() const {
if (batchSize && *batchSize <= 0) {
return Status(ErrorCodes::BadValue,
str::stream() << "Batch size for getMore must be positive, "
- << "but received: "
- << *batchSize);
+ << "but received: " << *batchSize);
}
return Status::OK();
@@ -116,8 +115,8 @@ StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbna
} else if (fieldName == kCollectionField) {
if (el.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "Field 'collection' must be of type string in: "
- << cmdObj};
+ str::stream()
+ << "Field 'collection' must be of type string in: " << cmdObj};
}
BSONElement collElt = cmdObj["collection"];
@@ -155,9 +154,7 @@ StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbna
} else if (!isGenericArgument(fieldName)) {
return {ErrorCodes::FailedToParse,
str::stream() << "Failed to parse: " << cmdObj << ". "
- << "Unrecognized field '"
- << fieldName
- << "'."};
+ << "Unrecognized field '" << fieldName << "'."};
}
}
diff --git a/src/mongo/db/query/getmore_request_test.cpp b/src/mongo/db/query/getmore_request_test.cpp
index f9fe0627cbe..78b235153f8 100644
--- a/src/mongo/db/query/getmore_request_test.cpp
+++ b/src/mongo/db/query/getmore_request_test.cpp
@@ -61,8 +61,7 @@ TEST(GetMoreRequestTest, parseFromBSONCursorIdNotLongLong) {
StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON("db",
BSON("getMore"
<< "not a number"
- << "collection"
- << 123));
+ << "collection" << 123));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
}
@@ -117,8 +116,7 @@ TEST(GetMoreRequestTest, parseFromBSONUnrecognizedFieldName) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "unknown_field"
- << 1));
+ << "unknown_field" << 1));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
}
@@ -128,8 +126,7 @@ TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSize) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "batchSize"
- << -1));
+ << "batchSize" << -1));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
}
@@ -139,8 +136,7 @@ TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSizeOfZero) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "batchSize"
- << 0));
+ << "batchSize" << 0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
}
@@ -161,8 +157,7 @@ TEST(GetMoreRequestTest, parseFromBSONBatchSizeProvided) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "batchSize"
- << 200));
+ << "batchSize" << 200));
ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
ASSERT(result.getValue().batchSize);
@@ -186,8 +181,7 @@ TEST(GetMoreRequestTest, parseFromBSONHasMaxTimeMS) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "maxTimeMS"
- << 100));
+ << "maxTimeMS" << 100));
ASSERT_OK(result.getStatus());
ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
ASSERT(result.getValue().awaitDataTimeout);
@@ -200,8 +194,7 @@ TEST(GetMoreRequestTest, parseFromBSONHasMaxTimeMSOfZero) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "maxTimeMS"
- << 0));
+ << "maxTimeMS" << 0));
ASSERT_OK(result.getStatus());
ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
@@ -216,8 +209,7 @@ TEST(GetMoreRequestTest, toBSONHasBatchSize) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "batchSize"
- << 99);
+ << "batchSize" << 99);
ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
@@ -240,10 +232,7 @@ TEST(GetMoreRequestTest, toBSONHasTerm) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "batchSize"
- << 99
- << "term"
- << 1);
+ << "batchSize" << 99 << "term" << 1);
ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
@@ -255,14 +244,11 @@ TEST(GetMoreRequestTest, toBSONHasCommitLevel) {
1,
repl::OpTime(Timestamp(0, 10), 2));
BSONObj requestObj = request.toBSON();
- BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
- << "testcoll"
- << "batchSize"
- << 99
- << "term"
- << 1
- << "lastKnownCommittedOpTime"
- << BSON("ts" << Timestamp(0, 10) << "t" << 2LL));
+ BSONObj expectedRequest =
+ BSON("getMore" << CursorId(123) << "collection"
+ << "testcoll"
+ << "batchSize" << 99 << "term" << 1 << "lastKnownCommittedOpTime"
+ << BSON("ts" << Timestamp(0, 10) << "t" << 2LL));
ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
@@ -276,8 +262,7 @@ TEST(GetMoreRequestTest, toBSONHasMaxTimeMS) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "maxTimeMS"
- << 789);
+ << "maxTimeMS" << 789);
ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
diff --git a/src/mongo/db/query/killcursors_request.cpp b/src/mongo/db/query/killcursors_request.cpp
index df44d73043d..5f21b82d489 100644
--- a/src/mongo/db/query/killcursors_request.cpp
+++ b/src/mongo/db/query/killcursors_request.cpp
@@ -67,8 +67,8 @@ StatusWith<KillCursorsRequest> KillCursorsRequest::parseFromBSON(const std::stri
if (cmdObj[kCursorsField].type() != BSONType::Array) {
return {ErrorCodes::FailedToParse,
- str::stream() << "Field '" << kCursorsField << "' must be of type array in: "
- << cmdObj};
+ str::stream() << "Field '" << kCursorsField
+ << "' must be of type array in: " << cmdObj};
}
std::vector<CursorId> cursorIds;
diff --git a/src/mongo/db/query/killcursors_request_test.cpp b/src/mongo/db/query/killcursors_request_test.cpp
index fef544d0b42..d1cdb1f4650 100644
--- a/src/mongo/db/query/killcursors_request_test.cpp
+++ b/src/mongo/db/query/killcursors_request_test.cpp
@@ -95,8 +95,7 @@ TEST(KillCursorsRequestTest, parseFromBSONCursorFieldNotArray) {
KillCursorsRequest::parseFromBSON("db",
BSON("killCursors"
<< "coll"
- << "cursors"
- << CursorId(123)));
+ << "cursors" << CursorId(123)));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -106,21 +105,18 @@ TEST(KillCursorsRequestTest, parseFromBSONCursorFieldEmptyArray) {
KillCursorsRequest::parseFromBSON("db",
BSON("killCursors"
<< "coll"
- << "cursors"
- << BSONArrayBuilder().arr()));
+ << "cursors" << BSONArrayBuilder().arr()));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::BadValue);
}
TEST(KillCursorsRequestTest, parseFromBSONCursorFieldContainsEltOfWrongType) {
- StatusWith<KillCursorsRequest> result =
- KillCursorsRequest::parseFromBSON("db",
- BSON("killCursors"
- << "coll"
- << "cursors"
- << BSON_ARRAY(CursorId(123) << "foo"
- << CursorId(456))));
+ StatusWith<KillCursorsRequest> result = KillCursorsRequest::parseFromBSON(
+ "db",
+ BSON("killCursors"
+ << "coll"
+ << "cursors" << BSON_ARRAY(CursorId(123) << "foo" << CursorId(456))));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -132,8 +128,7 @@ TEST(KillCursorsRequestTest, toBSON) {
BSONObj requestObj = request.toBSON();
BSONObj expectedObj = BSON("killCursors"
<< "coll"
- << "cursors"
- << BSON_ARRAY(CursorId(123) << CursorId(456)));
+ << "cursors" << BSON_ARRAY(CursorId(123) << CursorId(456)));
ASSERT_BSONOBJ_EQ(requestObj, expectedObj);
}
diff --git a/src/mongo/db/query/killcursors_response.cpp b/src/mongo/db/query/killcursors_response.cpp
index 798b2bf8cb0..8b482772b59 100644
--- a/src/mongo/db/query/killcursors_response.cpp
+++ b/src/mongo/db/query/killcursors_response.cpp
@@ -51,8 +51,8 @@ Status fillOutCursorArray(const BSONObj& cmdResponse,
if (elt.type() != BSONType::Array) {
return {ErrorCodes::FailedToParse,
- str::stream() << "Field '" << fieldName << "' must be of type array in: "
- << cmdResponse};
+ str::stream() << "Field '" << fieldName
+ << "' must be of type array in: " << cmdResponse};
}
for (BSONElement cursorElt : elt.Obj()) {
diff --git a/src/mongo/db/query/killcursors_response_test.cpp b/src/mongo/db/query/killcursors_response_test.cpp
index c0c5da3f278..8f091635bb4 100644
--- a/src/mongo/db/query/killcursors_response_test.cpp
+++ b/src/mongo/db/query/killcursors_response_test.cpp
@@ -41,13 +41,9 @@ namespace {
TEST(KillCursorsResponseTest, parseFromBSONSuccess) {
StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6))
- << "cursorsAlive"
+ << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsAlive"
<< BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
- << "cursorsUnknown"
- << BSONArray()
- << "ok"
- << 1.0));
+ << "cursorsUnknown" << BSONArray() << "ok" << 1.0));
ASSERT_OK(result.getStatus());
KillCursorsResponse response = result.getValue();
ASSERT_EQ(response.cursorsKilled.size(), 1U);
@@ -65,11 +61,8 @@ TEST(KillCursorsResponseTest, parseFromBSONSuccess) {
TEST(KillCursorsResponseTest, parseFromBSONSuccessOmitCursorsAlive) {
StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6))
- << "cursorsUnknown"
- << BSON_ARRAY(CursorId(789))
- << "ok"
- << 1.0));
+ << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsUnknown"
+ << BSON_ARRAY(CursorId(789)) << "ok" << 1.0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -84,13 +77,11 @@ TEST(KillCursorsResponseTest, parseFromBSONCommandNotOk) {
}
TEST(KillCursorsResponseTest, parseFromBSONFieldNotArray) {
- StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
- BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << "foobar"
- << "cursorsAlive"
- << BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
- << "ok"
- << 1.0));
+ StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(BSON(
+ "cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
+ << "foobar"
+ << "cursorsAlive" << BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
+ << "ok" << 1.0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -98,11 +89,8 @@ TEST(KillCursorsResponseTest, parseFromBSONFieldNotArray) {
TEST(KillCursorsResponseTest, parseFromBSONArrayContainsInvalidElement) {
StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6))
- << "cursorsAlive"
- << BSON_ARRAY(CursorId(7) << "foobar" << CursorId(9))
- << "ok"
- << 1.0));
+ << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsAlive"
+ << BSON_ARRAY(CursorId(7) << "foobar" << CursorId(9)) << "ok" << 1.0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -116,13 +104,9 @@ TEST(KillCursorsResponseTest, toBSON) {
BSONObj responseObj = response.toBSON();
BSONObj expectedResponse =
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6))
- << "cursorsAlive"
+ << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsAlive"
<< BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
- << "cursorsUnknown"
- << BSONArray()
- << "ok"
- << 1.0);
+ << "cursorsUnknown" << BSONArray() << "ok" << 1.0);
ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
diff --git a/src/mongo/db/query/parsed_distinct.cpp b/src/mongo/db/query/parsed_distinct.cpp
index 2d5a74af4c0..b5420ecaf3d 100644
--- a/src/mongo/db/query/parsed_distinct.cpp
+++ b/src/mongo/db/query/parsed_distinct.cpp
@@ -292,11 +292,10 @@ StatusWith<ParsedDistinct> ParsedDistinct::parse(OperationContext* opCtx,
if (auto readConcernElt = cmdObj[repl::ReadConcernArgs::kReadConcernFieldName]) {
if (readConcernElt.type() != BSONType::Object) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "\"" << repl::ReadConcernArgs::kReadConcernFieldName
- << "\" had the wrong type. Expected "
- << typeName(BSONType::Object)
- << ", found "
- << typeName(readConcernElt.type()));
+ str::stream()
+ << "\"" << repl::ReadConcernArgs::kReadConcernFieldName
+ << "\" had the wrong type. Expected " << typeName(BSONType::Object)
+ << ", found " << typeName(readConcernElt.type()));
}
qr->setReadConcern(readConcernElt.embeddedObject());
}
@@ -304,11 +303,10 @@ StatusWith<ParsedDistinct> ParsedDistinct::parse(OperationContext* opCtx,
if (auto queryOptionsElt = cmdObj[QueryRequest::kUnwrappedReadPrefField]) {
if (queryOptionsElt.type() != BSONType::Object) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "\"" << QueryRequest::kUnwrappedReadPrefField
- << "\" had the wrong type. Expected "
- << typeName(BSONType::Object)
- << ", found "
- << typeName(queryOptionsElt.type()));
+ str::stream()
+ << "\"" << QueryRequest::kUnwrappedReadPrefField
+ << "\" had the wrong type. Expected " << typeName(BSONType::Object)
+ << ", found " << typeName(queryOptionsElt.type()));
}
qr->setUnwrappedReadPref(queryOptionsElt.embeddedObject());
}
diff --git a/src/mongo/db/query/parsed_distinct_test.cpp b/src/mongo/db/query/parsed_distinct_test.cpp
index bf48d19439e..dd6e501ed24 100644
--- a/src/mongo/db/query/parsed_distinct_test.cpp
+++ b/src/mongo/db/query/parsed_distinct_test.cpp
@@ -73,10 +73,10 @@ TEST(ParsedDistinctTest, ConvertToAggregationNoQuery) {
std::vector<BSONObj> expectedPipeline{
BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << true)),
- BSON("$group" << BSON("_id" << BSONNULL << "distinct" << BSON("$addToSet"
- << "$x")))};
+ << "preserveNullAndEmptyArrays" << true)),
+ BSON("$group" << BSON("_id" << BSONNULL << "distinct"
+ << BSON("$addToSet"
+ << "$x")))};
ASSERT(std::equal(expectedPipeline.begin(),
expectedPipeline.end(),
ar.getValue().getPipeline().begin(),
@@ -113,23 +113,21 @@ TEST(ParsedDistinctTest, ConvertToAggregationDottedPathNoQuery) {
std::vector<BSONObj> expectedPipeline{
BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << true)),
+ << "preserveNullAndEmptyArrays" << true)),
BSON("$unwind" << BSON("path"
<< "$x.y"
- << "preserveNullAndEmptyArrays"
- << true)),
+ << "preserveNullAndEmptyArrays" << true)),
BSON("$unwind" << BSON("path"
<< "$x.y.z"
- << "preserveNullAndEmptyArrays"
- << true)),
+ << "preserveNullAndEmptyArrays" << true)),
BSON("$match" << BSON("x" << BSON("$_internalSchemaType"
<< "object")
<< "x.y"
<< BSON("$_internalSchemaType"
<< "object"))),
- BSON("$group" << BSON("_id" << BSONNULL << "distinct" << BSON("$addToSet"
- << "$x.y.z")))};
+ BSON("$group" << BSON("_id" << BSONNULL << "distinct"
+ << BSON("$addToSet"
+ << "$x.y.z")))};
ASSERT(std::equal(expectedPipeline.begin(),
expectedPipeline.end(),
ar.getValue().getPipeline().begin(),
@@ -159,9 +157,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithAllOptions) {
<< "secondary")
<< "comment"
<< "aComment"
- << "maxTimeMS"
- << 100
- << "$db"
+ << "maxTimeMS" << 100 << "$db"
<< "testdb"),
ExtensionsCallbackNoop(),
!isExplain);
@@ -190,10 +186,10 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithAllOptions) {
std::vector<BSONObj> expectedPipeline{
BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << true)),
- BSON("$group" << BSON("_id" << BSONNULL << "distinct" << BSON("$addToSet"
- << "$x")))};
+ << "preserveNullAndEmptyArrays" << true)),
+ BSON("$group" << BSON("_id" << BSONNULL << "distinct"
+ << BSON("$addToSet"
+ << "$x")))};
ASSERT(std::equal(expectedPipeline.begin(),
expectedPipeline.end(),
ar.getValue().getPipeline().begin(),
@@ -232,10 +228,10 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithQuery) {
BSON("$match" << BSON("z" << 7)),
BSON("$unwind" << BSON("path"
<< "$y"
- << "preserveNullAndEmptyArrays"
- << true)),
- BSON("$group" << BSON("_id" << BSONNULL << "distinct" << BSON("$addToSet"
- << "$y")))};
+ << "preserveNullAndEmptyArrays" << true)),
+ BSON("$group" << BSON("_id" << BSONNULL << "distinct"
+ << BSON("$addToSet"
+ << "$y")))};
ASSERT(std::equal(expectedPipeline.begin(),
expectedPipeline.end(),
ar.getValue().getPipeline().begin(),
@@ -269,10 +265,10 @@ TEST(ParsedDistinctTest, ExplainNotIncludedWhenConvertingToAggregationCommand) {
std::vector<BSONObj> expectedPipeline{
BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays"
- << true)),
- BSON("$group" << BSON("_id" << BSONNULL << "distinct" << BSON("$addToSet"
- << "$x")))};
+ << "preserveNullAndEmptyArrays" << true)),
+ BSON("$group" << BSON("_id" << BSONNULL << "distinct"
+ << BSON("$addToSet"
+ << "$x")))};
ASSERT(std::equal(expectedPipeline.begin(),
expectedPipeline.end(),
ar.getValue().getPipeline().begin(),
diff --git a/src/mongo/db/query/parsed_projection.cpp b/src/mongo/db/query/parsed_projection.cpp
index aaa3bd36f3d..359ad5c23d8 100644
--- a/src/mongo/db/query/parsed_projection.cpp
+++ b/src/mongo/db/query/parsed_projection.cpp
@@ -34,8 +34,8 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
/**
* Parses the projection 'spec' and checks its validity with respect to the query 'query'.
@@ -297,9 +297,9 @@ Status ParsedProjection::make(OperationContext* opCtx,
// $meta sortKey should not be checked as a part of _requiredFields, since it can
// potentially produce a covered projection as long as the sort key is covered.
if (BSONType::Object == elt.type()) {
- dassert(
- SimpleBSONObjComparator::kInstance.evaluate(elt.Obj() == BSON("$meta"
- << "sortKey")));
+ dassert(SimpleBSONObjComparator::kInstance.evaluate(elt.Obj() ==
+ BSON("$meta"
+ << "sortKey")));
continue;
}
if (elt.trueValue()) {
diff --git a/src/mongo/db/query/parsed_projection_test.cpp b/src/mongo/db/query/parsed_projection_test.cpp
index 84669166c8d..dc00e1c86ee 100644
--- a/src/mongo/db/query/parsed_projection_test.cpp
+++ b/src/mongo/db/query/parsed_projection_test.cpp
@@ -38,8 +38,8 @@
namespace {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
using namespace mongo;
@@ -62,8 +62,7 @@ unique_ptr<ParsedProjection> createParsedProjection(const BSONObj& query, const
Status status = ParsedProjection::make(opCtx.get(), projObj, queryMatchExpr.get(), &out);
if (!status.isOK()) {
FAIL(str::stream() << "failed to parse projection " << projObj << " (query: " << query
- << "): "
- << status.toString());
+ << "): " << status.toString());
}
ASSERT(out);
return unique_ptr<ParsedProjection>(out);
diff --git a/src/mongo/db/query/plan_cache_indexability.cpp b/src/mongo/db/query/plan_cache_indexability.cpp
index 7687ed1dca6..553b8002232 100644
--- a/src/mongo/db/query/plan_cache_indexability.cpp
+++ b/src/mongo/db/query/plan_cache_indexability.cpp
@@ -91,7 +91,7 @@ bool nodeIsConservativelySupportedBySparseIndex(const MatchExpression* me) {
const bool inElemMatch = false;
return QueryPlannerIXSelect::nodeIsSupportedBySparseIndex(me, inElemMatch);
}
-}
+} // namespace
void PlanCacheIndexabilityState::processSparseIndex(const std::string& indexName,
const BSONObj& keyPattern) {
diff --git a/src/mongo/db/query/plan_cache_indexability_test.cpp b/src/mongo/db/query/plan_cache_indexability_test.cpp
index d4d91dfe7f9..48116f58416 100644
--- a/src/mongo/db/query/plan_cache_indexability_test.cpp
+++ b/src/mongo/db/query/plan_cache_indexability_test.cpp
@@ -47,8 +47,8 @@ std::unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj,
expCtx->setCollator(collator);
StatusWithMatchExpression status = MatchExpressionParser::parse(obj, std::move(expCtx));
if (!status.isOK()) {
- FAIL(str::stream() << "failed to parse query: " << obj.toString() << ". Reason: "
- << status.getStatus().toString());
+ FAIL(str::stream() << "failed to parse query: " << obj.toString()
+ << ". Reason: " << status.getStatus().toString());
}
return std::move(status.getValue());
}
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 5614137b90a..8507ab4707f 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -1337,8 +1337,7 @@ TEST_F(CachePlanSelectionTest, Or2DSphereNonNear) {
TEST_F(CachePlanSelectionTest, AndWithinPolygonWithinCenterSphere) {
addIndex(BSON("a"
<< "2dsphere"
- << "b"
- << 1),
+ << "b" << 1),
"a_2dsphere_b_2dsphere");
BSONObj query = fromjson(
diff --git a/src/mongo/db/query/plan_enumerator.cpp b/src/mongo/db/query/plan_enumerator.cpp
index 7c6d498007c..910b306502d 100644
--- a/src/mongo/db/query/plan_enumerator.cpp
+++ b/src/mongo/db/query/plan_enumerator.cpp
@@ -41,10 +41,10 @@
namespace {
using namespace mongo;
-using std::unique_ptr;
using std::endl;
using std::set;
using std::string;
+using std::unique_ptr;
using std::vector;
std::string getPathPrefix(std::string path) {
@@ -668,9 +668,9 @@ bool PlanEnumerator::enumerateMandatoryIndex(const IndexToPredMap& idxToFirst,
// multikey information.
invariant(INDEX_2DSPHERE == thisIndex.type);
- if (predsOverLeadingField.end() != std::find(predsOverLeadingField.begin(),
- predsOverLeadingField.end(),
- mandatoryPred)) {
+ if (predsOverLeadingField.end() !=
+ std::find(
+ predsOverLeadingField.begin(), predsOverLeadingField.end(), mandatoryPred)) {
// The mandatory predicate is on the leading field of 'thisIndex'. We assign it to
// 'thisIndex' and skip assigning any other predicates on the leading field to
// 'thisIndex' because no additional predicate on the leading field will generate a
@@ -722,9 +722,9 @@ bool PlanEnumerator::enumerateMandatoryIndex(const IndexToPredMap& idxToFirst,
}
} else if (thisIndex.multikey) {
// Special handling for multikey mandatory indices.
- if (predsOverLeadingField.end() != std::find(predsOverLeadingField.begin(),
- predsOverLeadingField.end(),
- mandatoryPred)) {
+ if (predsOverLeadingField.end() !=
+ std::find(
+ predsOverLeadingField.begin(), predsOverLeadingField.end(), mandatoryPred)) {
// The mandatory predicate is over the first field of the index. Assign
// it now.
indexAssign.preds.push_back(mandatoryPred);
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index 1e8e84da6a7..3c4b601d1aa 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -46,9 +46,9 @@
namespace mongo {
-using std::unique_ptr;
using std::endl;
using std::string;
+using std::unique_ptr;
using std::vector;
namespace dps = ::mongo::dotted_path_support;
diff --git a/src/mongo/db/query/planner_ixselect.cpp b/src/mongo/db/query/planner_ixselect.cpp
index c35bb3cbdfb..07e1532dc60 100644
--- a/src/mongo/db/query/planner_ixselect.cpp
+++ b/src/mongo/db/query/planner_ixselect.cpp
@@ -682,13 +682,14 @@ void QueryPlannerIXSelect::_rateIndices(MatchExpression* node,
const IndexEntry& index = indices[i];
std::size_t keyPatternIndex = 0;
for (auto&& keyPatternElt : index.keyPattern) {
- if (keyPatternElt.fieldNameStringData() == fullPath && _compatible(keyPatternElt,
- index,
- keyPatternIndex,
- node,
- fullPath,
- collator,
- elemMatchCtx)) {
+ if (keyPatternElt.fieldNameStringData() == fullPath &&
+ _compatible(keyPatternElt,
+ index,
+ keyPatternIndex,
+ node,
+ fullPath,
+ collator,
+ elemMatchCtx)) {
if (keyPatternIndex == 0) {
rt->first.push_back(i);
} else {
diff --git a/src/mongo/db/query/planner_ixselect_test.cpp b/src/mongo/db/query/planner_ixselect_test.cpp
index e80eddd187b..e1018a87944 100644
--- a/src/mongo/db/query/planner_ixselect_test.cpp
+++ b/src/mongo/db/query/planner_ixselect_test.cpp
@@ -51,8 +51,8 @@ namespace {
constexpr CollatorInterface* kSimpleCollator = nullptr;
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
/**
@@ -1131,8 +1131,7 @@ TEST(QueryPlannerIXSelectTest, InternalExprEqCanUseHashedIndex) {
TEST(QueryPlannerIXSelectTest, InternalExprEqCannotUseTextIndexPrefix) {
auto entry = buildSimpleIndexEntry(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
std::vector<IndexEntry> indices;
indices.push_back(entry);
std::set<size_t> expectedIndices;
@@ -1143,10 +1142,7 @@ TEST(QueryPlannerIXSelectTest, InternalExprEqCannotUseTextIndexPrefix) {
TEST(QueryPlannerIXSelectTest, InternalExprEqCanUseTextIndexSuffix) {
auto entry = buildSimpleIndexEntry(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1
- << "a"
- << 1));
+ << "_ftsx" << 1 << "a" << 1));
std::vector<IndexEntry> indices;
indices.push_back(entry);
std::set<size_t> expectedIndices = {0};
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index 655a6816194..54c6a0b9fb0 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -58,8 +58,8 @@
namespace mongo {
-using std::unique_ptr;
using std::numeric_limits;
+using std::unique_ptr;
namespace dps = ::mongo::dotted_path_support;
@@ -520,8 +520,8 @@ StatusWith<std::unique_ptr<QuerySolution>> QueryPlanner::planFromCache(
auto soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, std::move(solnRoot));
if (!soln) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Failed to analyze plan from cache. Query: "
- << query.toStringShort());
+ str::stream()
+ << "Failed to analyze plan from cache. Query: " << query.toStringShort());
}
LOG(5) << "Planner: solution constructed from the cache:\n" << redact(soln->toString());
@@ -610,11 +610,10 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
}
if (fullIndexList.size() > 1) {
return Status(ErrorCodes::IndexNotFound,
- str::stream() << "Hint matched multiple indexes, "
- << "must hint by index name. Matched: "
- << fullIndexList[0].toString()
- << " and "
- << fullIndexList[1].toString());
+ str::stream()
+ << "Hint matched multiple indexes, "
+ << "must hint by index name. Matched: " << fullIndexList[0].toString()
+ << " and " << fullIndexList[1].toString());
}
hintedIndexEntry.emplace(fullIndexList.front());
diff --git a/src/mongo/db/query/query_planner_geo_test.cpp b/src/mongo/db/query/query_planner_geo_test.cpp
index c70ec258481..b23c40a64fe 100644
--- a/src/mongo/db/query/query_planner_geo_test.cpp
+++ b/src/mongo/db/query/query_planner_geo_test.cpp
@@ -89,8 +89,7 @@ TEST_F(QueryPlannerTest, Basic2DSphereCompound) {
TEST_F(QueryPlannerTest, Basic2DCompound) {
addIndex(BSON("loc"
<< "2d"
- << "a"
- << 1));
+ << "a" << 1));
runQuery(
fromjson("{ loc: { $geoWithin: { $box : [[0, 0],[10, 10]] } },"
@@ -247,8 +246,7 @@ TEST_F(QueryPlannerTest, Multikey2DSphereGeoNearReverseCompound) {
TEST_F(QueryPlannerTest, 2DNonNearContainedOr) {
addIndex(BSON("a"
<< "2d"
- << "x"
- << 1));
+ << "x" << 1));
addIndex(BSON("y" << 1));
runQuery(
fromjson("{$and: [{x: 1}, {$or: [{a: {$within: {$polygon: [[0, 0], [0, 1], [1, 0], [0, "
@@ -649,10 +647,7 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearCompoundTest) {
// true means multikey
addIndex(BSON("a" << 1 << "b"
<< "2dsphere"
- << "c"
- << 1
- << "d"
- << 1),
+ << "c" << 1 << "d" << 1),
true);
runQuery(
fromjson("{a: {$gte: 0}, c: {$gte: 0, $lt: 4}, d: {$gt: 1, $lt: 5},"
@@ -671,8 +666,7 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DNear) {
// true means multikey
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1),
+ << "b" << 1),
true);
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$gte: 0}}"));
@@ -1163,10 +1157,7 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{1U}, {1U}, {1U}};
addIndex(BSON("a.geo"
<< "2dsphere"
- << "a.b"
- << 1
- << "a.c"
- << 1),
+ << "a.b" << 1 << "a.c" << 1),
multikeyPaths);
runQuery(fromjson("{'a.geo': {$nearSphere: [0, 0]}, 'a.b': 2, 'a.c': 3}"));
@@ -1196,10 +1187,7 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{0U}, {0U}, {0U}};
addIndex(BSON("a.geo"
<< "2dsphere"
- << "a.b"
- << 1
- << "a.c"
- << 1),
+ << "a.b" << 1 << "a.c" << 1),
multikeyPaths);
runQuery(fromjson("{'a.geo': {$nearSphere: [0, 0]}, 'a.b': 2, 'a.c': 3}"));
@@ -1230,10 +1218,7 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{0U}, {0U}, {0U}};
addIndex(BSON("a.geo"
<< "2dsphere"
- << "a.b"
- << 1
- << "a.c"
- << 1),
+ << "a.b" << 1 << "a.c" << 1),
multikeyPaths);
runQuery(fromjson("{'a.geo': {$nearSphere: [0, 0]}, a: {$elemMatch: {b: 2, c: 3}}}"));
@@ -1265,10 +1250,7 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{0U, 1U}, {0U, 1U}, {0U, 1U}};
addIndex(BSON("a.b.geo"
<< "2dsphere"
- << "a.b.c"
- << 1
- << "a.b.d"
- << 1),
+ << "a.b.c" << 1 << "a.b.d" << 1),
multikeyPaths);
runQuery(fromjson("{'a.b.geo': {$nearSphere: [0, 0]}, a: {$elemMatch: {'b.c': 2, 'b.d': 3}}}"));
@@ -1432,8 +1414,7 @@ TEST_F(QueryPlanner2dsphereVersionTest, TwoDNearCompound) {
std::vector<int> versions{2, 3};
std::vector<BSONObj> keyPatterns = {BSON("geo"
<< "2dsphere"
- << "nongeo"
- << 1)};
+ << "nongeo" << 1)};
BSONObj predicate = fromjson("{geo: {$nearSphere: [-71.34895, 42.46037]}}");
testMultiple2dsphereIndexVersions(versions, keyPatterns, predicate, 1U);
}
@@ -1444,16 +1425,10 @@ TEST_F(QueryPlanner2dsphereVersionTest, TwoDSphereSparseBelowOr) {
std::vector<int> versions{2, 3};
std::vector<BSONObj> keyPatterns = {BSON("geo1"
<< "2dsphere"
- << "a"
- << 1
- << "b"
- << 1),
+ << "a" << 1 << "b" << 1),
BSON("geo2"
<< "2dsphere"
- << "a"
- << 1
- << "b"
- << 1)};
+ << "a" << 1 << "b" << 1)};
BSONObj predicate = fromjson(
"{a: 4, b: 5, $or: ["
@@ -1475,8 +1450,7 @@ TEST_F(QueryPlanner2dsphereVersionTest, TwoDSphereSparseBelowElemMatch) {
std::vector<int> versions{2, 3};
std::vector<BSONObj> keyPatterns = {BSON("a.b"
<< "2dsphere"
- << "a.c"
- << 1)};
+ << "a.c" << 1)};
BSONObj predicate = fromjson(
"{a: {$elemMatch: {b: {$geoWithin: {$centerSphere: [[10,20], 0.01]}},"
@@ -1600,8 +1574,7 @@ TEST_F(QueryPlannerTest, 2dInexactFetchPredicateOverTrailingFieldHandledCorrectl
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1));
+ << "b" << 1));
runQuery(fromjson("{a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$exists: true}}"));
assertNumSolutions(1U);
@@ -1616,8 +1589,7 @@ TEST_F(QueryPlannerTest, 2dInexactFetchPredicateOverTrailingFieldHandledCorrectl
const bool multikey = true;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1),
+ << "b" << 1),
multikey);
runQuery(fromjson("{a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$exists: true}}"));
@@ -1632,8 +1604,7 @@ TEST_F(QueryPlannerTest, 2dNearInexactFetchPredicateOverTrailingFieldHandledCorr
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1));
+ << "b" << 1));
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$exists: true}}"));
assertNumSolutions(1U);
@@ -1647,8 +1618,7 @@ TEST_F(QueryPlannerTest, 2dNearInexactFetchPredicateOverTrailingFieldMultikey) {
const bool multikey = true;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1),
+ << "b" << 1),
multikey);
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$exists: true}}"));
@@ -1661,8 +1631,7 @@ TEST_F(QueryPlannerTest, 2dNearWithInternalExprEqOverTrailingField) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1));
+ << "b" << 1));
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$_internalExprEq: 1}}"));
assertNumSolutions(1U);
@@ -1673,8 +1642,7 @@ TEST_F(QueryPlannerTest, 2dNearWithInternalExprEqOverTrailingFieldMultikey) {
const bool multikey = true;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1),
+ << "b" << 1),
multikey);
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$_internalExprEq: 1}}"));
@@ -1687,8 +1655,7 @@ TEST_F(QueryPlannerTest, 2dGeoWithinWithInternalExprEqOverTrailingField) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1));
+ << "b" << 1));
runQuery(
fromjson("{a: {$within: {$polygon: [[0,0], [2,0], [4,0]]}}, b: {$_internalExprEq: 2}}"));
@@ -1745,8 +1712,7 @@ TEST_F(QueryPlannerTest, 2dsphereNonNearWithInternalExprEqOverTrailingField) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a"
<< "2dsphere"
- << "b"
- << 1));
+ << "b" << 1));
runQuery(
fromjson("{b: {$_internalExprEq: 0}, a: {$geoWithin: {$centerSphere: [[0, 0], 10]}}}"));
@@ -1767,8 +1733,7 @@ TEST_F(QueryPlannerTest, 2dsphereNonNearWithInternalExprEqOverTrailingFieldMulti
const bool multikey = true;
addIndex(BSON("a"
<< "2dsphere"
- << "b"
- << 1),
+ << "b" << 1),
multikey);
runQuery(
@@ -1791,8 +1756,7 @@ TEST_F(QueryPlannerTest, 2dWithinPredicateOverTrailingFieldElemMatchMultikey) {
const bool multikey = true;
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1),
+ << "b" << 1),
multikey);
runQuery(fromjson("{a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$elemMatch: {c: 1}}}"));
diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp
index 8d944b29eb9..4a182e5a27e 100644
--- a/src/mongo/db/query/query_planner_test.cpp
+++ b/src/mongo/db/query/query_planner_test.cpp
@@ -434,7 +434,7 @@ TEST_F(QueryPlannerTest, NotEqualsNullSparseIndex) {
addIndex(BSON("x" << 1),
false, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{x: {$ne: null}}"));
@@ -449,7 +449,7 @@ TEST_F(QueryPlannerTest, NotEqualsNullSparseMultiKeyIndex) {
addIndex(BSON("x" << 1),
true, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{x: {$ne: null}}"));
@@ -462,7 +462,7 @@ TEST_F(QueryPlannerTest, NotEqualsNullInElemMatchValueSparseMultiKeyIndex) {
addIndex(BSON("x" << 1),
true, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{'x': {$elemMatch: {$ne: null}}}"));
@@ -1674,8 +1674,7 @@ TEST_F(QueryPlannerTest, CantUseHashedIndexToProvideSortWithIndexablePred) {
TEST_F(QueryPlannerTest, CantUseTextIndexToProvideSort) {
addIndex(BSON("x" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
ASSERT_EQUALS(getNumSolutions(), 1U);
@@ -2744,7 +2743,7 @@ TEST_F(QueryPlannerTest, NegationCannotUseSparseIndex) {
addIndex(fromjson("{a: 1}"),
false, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{a: {$ne: 5}}"));
assertHasOnlyCollscan();
@@ -2758,7 +2757,7 @@ TEST_F(QueryPlannerTest, NegationInElemMatchDoesNotUseSparseIndex) {
addIndex(fromjson("{a: 1}"),
true, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{a: {$elemMatch: {$ne: 5}}}"));
assertHasOnlyCollscan();
@@ -2770,7 +2769,7 @@ TEST_F(QueryPlannerTest, SparseIndexCannotSupportEqualsNull) {
addIndex(BSON("i" << 1),
false, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{i: {$eq: null}}"));
assertHasOnlyCollscan();
@@ -2784,7 +2783,7 @@ TEST_F(QueryPlannerTest, SparseIndexCanSupportGTEOrLTENull) {
addIndex(BSON("i" << 1),
false, // multikey
true // sparse
- );
+ );
runQuery(fromjson("{i: {$gte: null}}"));
assertNumSolutions(1U);
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index 59306ff1feb..14251a98af0 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -548,8 +548,8 @@ std::unique_ptr<MatchExpression> QueryPlannerTest::parseMatchExpression(
expCtx->setCollator(collator);
StatusWithMatchExpression status = MatchExpressionParser::parse(obj, std::move(expCtx));
if (!status.isOK()) {
- FAIL(str::stream() << "failed to parse query: " << obj.toString() << ". Reason: "
- << status.getStatus().toString());
+ FAIL(str::stream() << "failed to parse query: " << obj.toString()
+ << ". Reason: " << status.getStatus().toString());
}
return std::move(status.getValue());
}
diff --git a/src/mongo/db/query/query_planner_text_test.cpp b/src/mongo/db/query/query_planner_text_test.cpp
index d0b148349ca..ed4b1e45247 100644
--- a/src/mongo/db/query/query_planner_text_test.cpp
+++ b/src/mongo/db/query/query_planner_text_test.cpp
@@ -52,8 +52,7 @@ using namespace mongo;
TEST_F(QueryPlannerTest, SimpleText) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{$text: {$search: 'blah'}}"));
assertNumSolutions(1);
@@ -65,8 +64,7 @@ TEST_F(QueryPlannerTest, CantUseTextUnlessHaveTextPred) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{a:1}"));
// No table scans allowed so there is no solution.
@@ -79,8 +77,7 @@ TEST_F(QueryPlannerTest, HaveOKPrefixOnTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -99,8 +96,7 @@ TEST_F(QueryPlannerTest, HaveBadPrefixOnTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runInvalidQuery(fromjson("{a:{$gt: 1}, $text:{$search: 'blah'}}"));
runInvalidQuery(fromjson("{$text: {$search: 'blah'}}"));
@@ -113,8 +109,7 @@ TEST_F(QueryPlannerTest, PrefixOnTextIndexIsOutsidePred) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
addIndex(BSON("b" << 1));
runInvalidQuery(fromjson("{$and: [{a: 5}, {$or: [{$text: {$search: 'blah'}}, {b: 6}]}]}"));
}
@@ -124,8 +119,7 @@ TEST_F(QueryPlannerTest, ManyPrefixTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "b" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
// Both points.
runQuery(fromjson("{a:1, b:1, $text:{$search: 'blah'}}"));
@@ -150,10 +144,7 @@ TEST_F(QueryPlannerTest, SuffixOptional) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1));
+ << "_ftsx" << 1 << "b" << 1));
runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -168,10 +159,7 @@ TEST_F(QueryPlannerTest, RemoveFromSubtree) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1));
+ << "_ftsx" << 1 << "b" << 1));
runQuery(fromjson("{a:1, $or: [{a:1}, {b:7}], $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -187,8 +175,7 @@ TEST_F(QueryPlannerTest, CompoundPrefixEvenIfMultikey) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "b" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1),
+ << "_ftsx" << 1),
true);
// Both points.
@@ -201,10 +188,7 @@ TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafPrefix) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1));
+ << "_ftsx" << 1 << "b" << 1));
// 'a' is not an EQ so it doesn't compound w/the text pred. We also shouldn't use the text
// index to satisfy it w/o the text query.
@@ -215,10 +199,7 @@ TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafSuffixNoPrefix) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1));
+ << "_ftsx" << 1 << "b" << 1));
runQuery(fromjson("{b:{$elemMatch:{$gt: 0, $lt: 2}}, $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -228,8 +209,7 @@ TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{$and: [{a: 3}, {$text: {$search: 'foo'}}], a: 3}"));
assertNumSolutions(1U);
@@ -242,8 +222,7 @@ TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndexAndMultiplePredsOnIndexPr
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{$and: [{a: 1}, {a: 2}, {$text: {$search: 'foo'}}]}"));
assertNumSolutions(1U);
@@ -257,8 +236,7 @@ TEST_F(QueryPlannerTest, TextInsideOrBasic) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{a: 0, $or: [{_id: 1}, {$text: {$search: 'foo'}}]}"));
assertNumSolutions(1U);
@@ -274,8 +252,7 @@ TEST_F(QueryPlannerTest, TextInsideOrWithAnotherOr) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{$and: [{$or: [{a: 3}, {a: 4}]}, "
"{$or: [{$text: {$search: 'foo'}}, {a: 5}]}]}"));
@@ -294,8 +271,7 @@ TEST_F(QueryPlannerTest, TextInsideOrOfAnd) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{$or: [{a: {$gt: 1, $gt: 2}}, "
"{a: {$gt: 3}, $text: {$search: 'foo'}}]}"));
@@ -316,8 +292,7 @@ TEST_F(QueryPlannerTest, TextInsideAndOrAnd) {
addIndex(BSON("b" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{a: 1, $or: [{a:2}, {b:2}, "
"{a: 1, $text: {$search: 'foo'}}]}"));
@@ -336,8 +311,7 @@ TEST_F(QueryPlannerTest, TextInsideAndOrAndOr) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{$or: [{a: {$gt: 1, $gt: 2}}, "
"{a: {$gt: 3}, $or: [{$text: {$search: 'foo'}}, "
@@ -360,8 +334,7 @@ TEST_F(QueryPlannerTest, TextInsideOrOneBranchNotIndexed) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{a: 1, $or: [{b: 2}, {$text: {$search: 'foo'}}]}"));
assertNumSolutions(0);
@@ -374,8 +347,7 @@ TEST_F(QueryPlannerTest, TextInsideOrWithAnotherUnindexableOr) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{$and: [{$or: [{a: 1}, {b: 1}]}, "
"{$or: [{a: 2}, {$text: {$search: 'foo'}}]}]}"));
@@ -390,8 +362,7 @@ TEST_F(QueryPlannerTest, TextInsideOrWithAnotherUnindexableOr) {
TEST_F(QueryPlannerTest, AndTextWithGeoNonNear) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(
fromjson("{$text: {$search: 'foo'}, a: {$geoIntersects: {$geometry: "
"{type: 'Point', coordinates: [3.0, 1.0]}}}}"));
@@ -405,8 +376,7 @@ TEST_F(QueryPlannerTest, AndTextWithGeoNonNear) {
TEST_F(QueryPlannerTest, OrTextExact) {
addIndex(BSON("pre" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
addIndex(BSON("other" << 1));
runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: 2}]}"));
@@ -421,8 +391,7 @@ TEST_F(QueryPlannerTest, OrTextExact) {
TEST_F(QueryPlannerTest, OrTextInexactCovered) {
addIndex(BSON("pre" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
addIndex(BSON("other" << 1));
runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: /bar/}]}"));
@@ -437,8 +406,7 @@ TEST_F(QueryPlannerTest, OrTextInexactCovered) {
TEST_F(QueryPlannerTest, TextCaseSensitive) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{$text: {$search: 'blah', $caseSensitive: true}}"));
assertNumSolutions(1);
@@ -448,8 +416,7 @@ TEST_F(QueryPlannerTest, TextCaseSensitive) {
TEST_F(QueryPlannerTest, TextDiacriticSensitive) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{$text: {$search: 'blah', $diacriticSensitive: true}}"));
assertNumSolutions(1);
@@ -459,8 +426,7 @@ TEST_F(QueryPlannerTest, TextDiacriticSensitive) {
TEST_F(QueryPlannerTest, SortKeyMetaProjectionWithTextScoreMetaSort) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuerySortProj(fromjson("{$text: {$search: 'foo'}}"),
fromjson("{a: {$meta: 'textScore'}}"),
@@ -477,8 +443,7 @@ TEST_F(QueryPlannerTest, PredicatesOverLeadingFieldsWithSharedPathPrefixHandledC
const bool multikey = true;
addIndex(BSON("a.x" << 1 << "a.y" << 1 << "b.x" << 1 << "b.y" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1),
+ << "_ftsx" << 1),
multikey);
runQuery(fromjson("{'a.x': 1, 'a.y': 2, 'b.x': 3, 'b.y': 4, $text: {$search: 'foo'}}"));
@@ -491,8 +456,7 @@ TEST_F(QueryPlannerTest, PredicatesOverLeadingFieldsWithSharedPathPrefixHandledC
TEST_F(QueryPlannerTest, EqualityToArrayOverLeadingFieldHandledCorrectly) {
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runQuery(fromjson("{a: [1, 2, 3], $text: {$search: 'foo'}}"));
@@ -504,8 +468,7 @@ TEST_F(QueryPlannerTest, EqualityToArrayOverLeadingFieldHandledCorrectlyWithMult
const bool multikey = true;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1),
+ << "_ftsx" << 1),
multikey);
runQuery(fromjson("{a: [1, 2, 3], $text: {$search: 'foo'}}"));
@@ -517,10 +480,7 @@ TEST_F(QueryPlannerTest, EqualityToArrayOverLeadingFieldHandledCorrectlyWithMult
TEST_F(QueryPlannerTest, InexactFetchPredicateOverTrailingFieldHandledCorrectly) {
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1));
+ << "_ftsx" << 1 << "b" << 1));
runQuery(fromjson("{a: 3, $text: {$search: 'foo'}, b: {$exists: true}}"));
@@ -533,10 +493,7 @@ TEST_F(QueryPlannerTest, InexactFetchPredicateOverTrailingFieldHandledCorrectlyM
const bool multikey = true;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1
- << "b"
- << 1),
+ << "_ftsx" << 1 << "b" << 1),
multikey);
runQuery(fromjson("{a: 3, $text: {$search: 'foo'}, b: {$exists: true}}"));
@@ -550,8 +507,7 @@ TEST_F(QueryPlannerTest, ExprEqCannotUsePrefixOfTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
runInvalidQuery(fromjson("{a: {$_internalExprEq: 3}, $text: {$search: 'blah'}}"));
}
@@ -560,10 +516,7 @@ TEST_F(QueryPlannerTest, ExprEqCanUseSuffixOfTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("_fts"
<< "text"
- << "_ftsx"
- << 1
- << "a"
- << 1));
+ << "_ftsx" << 1 << "a" << 1));
runQuery(fromjson("{a: {$_internalExprEq: 3}, $text: {$search: 'blah'}}"));
diff --git a/src/mongo/db/query/query_planner_wildcard_index_test.cpp b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
index d0fd0def30e..eba458736af 100644
--- a/src/mongo/db/query/query_planner_wildcard_index_test.cpp
+++ b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
@@ -901,8 +901,7 @@ TEST_F(QueryPlannerWildcardTest, WildcardIndexDoesNotSupplyCandidatePlanForTextS
addWildcardIndex(BSON("$**" << 1));
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx"
- << 1));
+ << "_ftsx" << 1));
// Confirm that the wildcard index generates candidate plans for queries which do not include a
// $text predicate.
diff --git a/src/mongo/db/query/query_request.cpp b/src/mongo/db/query/query_request.cpp
index c43317b584b..e0082a90d80 100644
--- a/src/mongo/db/query/query_request.cpp
+++ b/src/mongo/db/query/query_request.cpp
@@ -399,9 +399,7 @@ StatusWith<unique_ptr<QueryRequest>> QueryRequest::parseFromFindCommand(unique_p
} else if (!isGenericArgument(fieldName)) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Failed to parse: " << cmdObj.toString() << ". "
- << "Unrecognized field '"
- << fieldName
- << "'.");
+ << "Unrecognized field '" << fieldName << "'.");
}
}
@@ -645,26 +643,26 @@ Status QueryRequest::validate() const {
if (_limit && *_limit < 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Limit value must be non-negative, but received: "
- << *_limit);
+ str::stream()
+ << "Limit value must be non-negative, but received: " << *_limit);
}
if (_batchSize && *_batchSize < 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "BatchSize value must be non-negative, but received: "
- << *_batchSize);
+ str::stream()
+ << "BatchSize value must be non-negative, but received: " << *_batchSize);
}
if (_ntoreturn && *_ntoreturn < 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "NToReturn value must be non-negative, but received: "
- << *_ntoreturn);
+ str::stream()
+ << "NToReturn value must be non-negative, but received: " << *_ntoreturn);
}
if (_maxTimeMS < 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "MaxTimeMS value must be non-negative, but received: "
- << _maxTimeMS);
+ str::stream()
+ << "MaxTimeMS value must be non-negative, but received: " << _maxTimeMS);
}
if (_tailableMode != TailableModeEnum::kNormal) {
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
index ed4d369602b..f7cc73a1419 100644
--- a/src/mongo/db/query/query_request_test.cpp
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -1485,5 +1485,5 @@ TEST_F(QueryRequestTest, ParseFromUUID) {
ASSERT_EQ(nss, qr.nss());
}
-} // namespace mongo
} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/query_settings_test.cpp b/src/mongo/db/query/query_settings_test.cpp
index 41cb1cc0c3d..6a6d0dce66f 100644
--- a/src/mongo/db/query/query_settings_test.cpp
+++ b/src/mongo/db/query/query_settings_test.cpp
@@ -42,9 +42,9 @@
using mongo::AllowedIndicesFilter;
using mongo::BSONObj;
+using mongo::fromjson;
using mongo::IndexEntry;
using mongo::SimpleBSONObjComparator;
-using mongo::fromjson;
namespace {
TEST(QuerySettingsTest, AllowedIndicesFilterAllowsIndexesByName) {
@@ -113,4 +113,4 @@ TEST(QuerySettingsTest, AllowedIndicesFilterAllowsIndexesByKeyPattern) {
ASSERT_TRUE(filter.allows(a_idx));
ASSERT_FALSE(filter.allows(ab_idx));
}
-}
+} // namespace
diff --git a/src/mongo/db/query/query_solution.cpp b/src/mongo/db/query/query_solution.cpp
index 23c83c6fb8a..20fe9824450 100644
--- a/src/mongo/db/query/query_solution.cpp
+++ b/src/mongo/db/query/query_solution.cpp
@@ -154,7 +154,7 @@ void addEqualityFieldSorts(const BSONObj& sortPattern,
sortsOut->insert(prefixBob.obj());
}
}
-}
+} // namespace
string QuerySolutionNode::toString() const {
str::stream ss;
diff --git a/src/mongo/db/query/query_solution_test.cpp b/src/mongo/db/query/query_solution_test.cpp
index 7ac47cb2aad..ec3821b7bef 100644
--- a/src/mongo/db/query/query_solution_test.cpp
+++ b/src/mongo/db/query/query_solution_test.cpp
@@ -727,8 +727,7 @@ auto createMatchExprAndParsedProjection(const BSONObj& query, const BSONObj& pro
ParsedProjection::make(opCtx.get(), projObj, queryMatchExpr.getValue().get(), &out);
if (!status.isOK()) {
FAIL(str::stream() << "failed to parse projection " << projObj << " (query: " << query
- << "): "
- << status.toString());
+ << "): " << status.toString());
}
ASSERT(out);
return std::make_pair(std::move(queryMatchExpr.getValue()),
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index 012efb8a262..02056023010 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -94,10 +94,9 @@ PlanStage* buildStages(OperationContext* opCtx,
auto descriptor = collection->getIndexCatalog()->findIndexByName(
opCtx, ixn->index.identifier.catalogName);
invariant(descriptor,
- str::stream() << "Namespace: " << collection->ns() << ", CanonicalQuery: "
- << cq.toStringShort()
- << ", IndexEntry: "
- << ixn->index.toString());
+ str::stream() << "Namespace: " << collection->ns()
+ << ", CanonicalQuery: " << cq.toStringShort()
+ << ", IndexEntry: " << ixn->index.toString());
// We use the node's internal name, keyPattern and multikey details here. For $**
// indexes, these may differ from the information recorded in the index's descriptor.
diff --git a/src/mongo/db/read_concern.h b/src/mongo/db/read_concern.h
index 7bd7594e143..c9ac7f08e1c 100644
--- a/src/mongo/db/read_concern.h
+++ b/src/mongo/db/read_concern.h
@@ -42,7 +42,7 @@ enum class PrepareConflictBehavior;
namespace repl {
class ReadConcernArgs;
class SpeculativeMajorityReadInfo;
-}
+} // namespace repl
/**
* Given the specified read concern arguments, performs checks that the read concern can actually be
diff --git a/src/mongo/db/read_concern_mongod.cpp b/src/mongo/db/read_concern_mongod.cpp
index 7844f28ebea..ea270fef283 100644
--- a/src/mongo/db/read_concern_mongod.cpp
+++ b/src/mongo/db/read_concern_mongod.cpp
@@ -29,7 +29,6 @@
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
-#include "mongo/db/read_concern.h"
#include "mongo/base/status.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
@@ -37,6 +36,7 @@
#include "mongo/db/logical_clock.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/read_concern.h"
#include "mongo/db/read_concern_mongod_gen.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/repl_client_info.h"
@@ -168,10 +168,9 @@ Status makeNoopWriteIfNeeded(OperationContext* opCtx, LogicalTime clusterTime) {
opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
"admin",
- BSON("appendOplogNote" << 1 << "maxClusterTime" << clusterTime.asTimestamp()
- << "data"
- << BSON("noop write for afterClusterTime read concern"
- << 1)),
+ BSON("appendOplogNote"
+ << 1 << "maxClusterTime" << clusterTime.asTimestamp() << "data"
+ << BSON("noop write for afterClusterTime read concern" << 1)),
Shard::RetryPolicy::kIdempotent);
status = swRes.getStatus();
std::get<1>(myWriteRequest)->set(status);
@@ -295,8 +294,7 @@ MONGO_REGISTER_SHIM(waitForReadConcern)
<< " value must not be greater than the current clusterTime. "
"Requested clusterTime: "
<< targetClusterTime->toString()
- << "; current clusterTime: "
- << currentTime.toString()};
+ << "; current clusterTime: " << currentTime.toString()};
}
auto status = makeNoopWriteIfNeeded(opCtx, *targetClusterTime);
diff --git a/src/mongo/db/read_concern_test.cpp b/src/mongo/db/read_concern_test.cpp
index df078b59aca..50ff8761aeb 100644
--- a/src/mongo/db/read_concern_test.cpp
+++ b/src/mongo/db/read_concern_test.cpp
@@ -48,9 +48,7 @@ using ReadConcernTest = ReplCoordTest;
TEST_F(ReadConcernTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))),
HostAndPort("node1", 12345));
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 3ac2f9c6a06..092857b8a81 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -98,10 +98,7 @@ StatusWith<IndexNameObjs> getIndexNameObjs(OperationContext* opCtx,
return Status(
ErrorCodes::CannotCreateIndex,
str::stream()
- << "Cannot rebuild index "
- << spec
- << ": "
- << keyStatus.reason()
+ << "Cannot rebuild index " << spec << ": " << keyStatus.reason()
<< " For more info see http://dochub.mongodb.org/core/index-validation");
}
}
@@ -126,7 +123,7 @@ Status rebuildIndexesOnCollection(OperationContext* opCtx,
return swRebuild.getStatus();
}
- auto[numRecords, dataSize] = swRebuild.getValue();
+ auto [numRecords, dataSize] = swRebuild.getValue();
auto rs = collection->getRecordStore();
diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp
index b3ffa50a846..dcd9b7cf3ba 100644
--- a/src/mongo/db/repair_database_and_check_version.cpp
+++ b/src/mongo/db/repair_database_and_check_version.cpp
@@ -243,9 +243,9 @@ bool hasReplSetConfigDoc(OperationContext* opCtx) {
}
/**
-* Check that the oplog is capped, and abort the process if it is not.
-* Caller must lock DB before calling this function.
-*/
+ * Check that the oplog is capped, and abort the process if it is not.
+ * Caller must lock DB before calling this function.
+ */
void checkForCappedOplog(OperationContext* opCtx, Database* db) {
const NamespaceString oplogNss(NamespaceString::kRsOplogNamespace);
invariant(opCtx->lockState()->isDbLockedForMode(oplogNss.db(), MODE_IS));
@@ -283,15 +283,13 @@ void rebuildIndexes(OperationContext* opCtx, StorageEngine* storageEngine) {
fassert(40590,
{ErrorCodes::InternalError,
str::stream() << "failed to get index spec for index " << indexName
- << " in collection "
- << collNss.toString()});
+ << " in collection " << collNss.toString()});
}
auto& indexesToRebuild = swIndexSpecs.getValue();
invariant(indexesToRebuild.first.size() == 1 && indexesToRebuild.second.size() == 1,
str::stream() << "Num Index Names: " << indexesToRebuild.first.size()
- << " Num Index Objects: "
- << indexesToRebuild.second.size());
+ << " Num Index Objects: " << indexesToRebuild.second.size());
auto& ino = nsToIndexNameObjMap[collNss.ns()];
ino.first.emplace_back(std::move(indexesToRebuild.first.back()));
ino.second.emplace_back(std::move(indexesToRebuild.second.back()));
@@ -515,8 +513,7 @@ bool repairDatabasesAndCheckVersion(OperationContext* opCtx) {
<< swVersion.getStatus()
<< "). If the current featureCompatibilityVersion is below "
"4.0, see the documentation on upgrading at "
- << feature_compatibility_version_documentation::kUpgradeLink
- << ".",
+ << feature_compatibility_version_documentation::kUpgradeLink << ".",
swVersion.isOK());
fcvDocumentExists = true;
@@ -535,8 +532,9 @@ bool repairDatabasesAndCheckVersion(OperationContext* opCtx) {
<< startupWarningsLog;
log() << "** To fix this, use the setFeatureCompatibilityVersion "
<< "command to resume upgrade to 4.2." << startupWarningsLog;
- } else if (version == ServerGlobalParams::FeatureCompatibility::Version::
- kDowngradingTo40) {
+ } else if (version ==
+ ServerGlobalParams::FeatureCompatibility::Version::
+ kDowngradingTo40) {
log() << "** WARNING: A featureCompatibilityVersion downgrade did not "
<< "complete. " << startupWarningsLog;
log() << "** The current featureCompatibilityVersion is "
diff --git a/src/mongo/db/repl/abstract_async_component.cpp b/src/mongo/db/repl/abstract_async_component.cpp
index 181f2f5ef69..1b99507fc5c 100644
--- a/src/mongo/db/repl/abstract_async_component.cpp
+++ b/src/mongo/db/repl/abstract_async_component.cpp
@@ -189,16 +189,15 @@ Status AbstractAsyncComponent::_scheduleWorkAtAndSaveHandle_inlock(
const std::string& name) {
invariant(handle);
if (_isShuttingDown_inlock()) {
- return Status(
- ErrorCodes::CallbackCanceled,
- str::stream() << "failed to schedule work " << name << " at " << when.toString() << ": "
- << _componentName
- << " is shutting down");
+ return Status(ErrorCodes::CallbackCanceled,
+ str::stream()
+ << "failed to schedule work " << name << " at " << when.toString() << ": "
+ << _componentName << " is shutting down");
}
auto result = _executor->scheduleWorkAt(when, std::move(work));
if (!result.isOK()) {
- return result.getStatus().withContext(
- str::stream() << "failed to schedule work " << name << " at " << when.toString());
+ return result.getStatus().withContext(str::stream() << "failed to schedule work " << name
+ << " at " << when.toString());
}
*handle = result.getValue();
return Status::OK();
diff --git a/src/mongo/db/repl/abstract_async_component.h b/src/mongo/db/repl/abstract_async_component.h
index 8d5e784b591..64d88ad41e8 100644
--- a/src/mongo/db/repl/abstract_async_component.h
+++ b/src/mongo/db/repl/abstract_async_component.h
@@ -247,8 +247,7 @@ Status AbstractAsyncComponent::_startupComponent_inlock(std::unique_ptr<T>& comp
component.reset();
return Status(ErrorCodes::CallbackCanceled,
str::stream() << "failed to start up " << componentToStartUp << ": "
- << _componentName
- << " is shutting down");
+ << _componentName << " is shutting down");
}
auto status = component->startup();
diff --git a/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.cpp b/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.cpp
index f3d44242ffb..882cf5f4fa8 100644
--- a/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.cpp
+++ b/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.cpp
@@ -135,4 +135,4 @@ executor::RemoteCommandRequest AbstractOplogFetcherTest::processNetworkResponse(
}
} // namespace repl
-} // namespace mango
+} // namespace mongo
diff --git a/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.h b/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.h
index 2164f93cac6..7349689bb32 100644
--- a/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.h
+++ b/src/mongo/db/repl/abstract_oplog_fetcher_test_fixture.h
@@ -97,4 +97,4 @@ protected:
Date_t lastFetchedWall;
};
} // namespace repl
-} // namespace mango
+} // namespace mongo
diff --git a/src/mongo/db/repl/applier_helpers.cpp b/src/mongo/db/repl/applier_helpers.cpp
index ef92ed6c52d..1672585a071 100644
--- a/src/mongo/db/repl/applier_helpers.cpp
+++ b/src/mongo/db/repl/applier_helpers.cpp
@@ -196,8 +196,7 @@ StatusWith<InsertGroup::ConstIterator> InsertGroup::groupAndApplyInserts(ConstIt
// application of an individual op.
auto status = exceptionToStatus().withContext(
str::stream() << "Error applying inserts in bulk: " << redact(groupedInsertObj)
- << ". Trying first insert as a lone insert: "
- << redact(entry.raw));
+ << ". Trying first insert as a lone insert: " << redact(entry.raw));
// It's not an error during initial sync to encounter DuplicateKey errors.
if (Mode::kInitialSync == _mode && ErrorCodes::DuplicateKey == status) {
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 9b3d0b50632..a562679e63d 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -159,9 +159,7 @@ Status _applyOps(OperationContext* opCtx,
ErrorCodes::AtomicityFailure,
str::stream()
<< "cannot apply insert or update operation on a non-existent namespace "
- << nss.ns()
- << " in atomic applyOps mode: "
- << redact(opObj));
+ << nss.ns() << " in atomic applyOps mode: " << redact(opObj));
}
// Reject malformed operations in an atomic applyOps.
@@ -171,8 +169,7 @@ Status _applyOps(OperationContext* opCtx,
uasserted(ErrorCodes::AtomicityFailure,
str::stream()
<< "cannot apply a malformed operation in atomic applyOps mode: "
- << redact(opObj)
- << "; will retry without atomicity: "
+ << redact(opObj) << "; will retry without atomicity: "
<< exceptionToStatus().toString());
}
@@ -231,9 +228,7 @@ Status _applyOps(OperationContext* opCtx,
str::stream()
<< "cannot apply insert or update operation on a "
"non-existent namespace "
- << nss.ns()
- << ": "
- << mongo::redact(opObj));
+ << nss.ns() << ": " << mongo::redact(opObj));
}
OldClientContext ctx(opCtx, nss.ns());
diff --git a/src/mongo/db/repl/apply_ops.h b/src/mongo/db/repl/apply_ops.h
index c5cca31569f..8aac61a39b9 100644
--- a/src/mongo/db/repl/apply_ops.h
+++ b/src/mongo/db/repl/apply_ops.h
@@ -116,7 +116,7 @@ Status applyOps(OperationContext* opCtx,
/**
* Applies a non-transactional 'applyOps' oplog entry. That is, an 'applyOps' entry that was not
* generated by a transaction.
-*/
+ */
Status applyApplyOpsOplogEntry(OperationContext* opCtx,
const OplogEntry& entry,
repl::OplogApplication::Mode oplogApplicationMode);
diff --git a/src/mongo/db/repl/apply_ops_test.cpp b/src/mongo/db/repl/apply_ops_test.cpp
index 13575d81fb0..2f38aa54a5f 100644
--- a/src/mongo/db/repl/apply_ops_test.cpp
+++ b/src/mongo/db/repl/apply_ops_test.cpp
@@ -141,17 +141,13 @@ TEST_F(ApplyOpsTest, CommandInNestedApplyOpsReturnsSuccess) {
auto mode = OplogApplication::Mode::kApplyOpsCmd;
BSONObjBuilder resultBuilder;
NamespaceString nss("test", "foo");
- auto innerCmdObj = BSON("op"
- << "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
- << BSON("create" << nss.coll()));
+ auto innerCmdObj =
+ BSON("op"
+ << "c"
+ << "ns" << nss.getCommandNS().ns() << "o" << BSON("create" << nss.coll()));
auto innerApplyOpsObj = BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
+ << "ns" << nss.getCommandNS().ns() << "o"
<< BSON("applyOps" << BSON_ARRAY(innerCmdObj)));
auto cmdObj = BSON("applyOps" << BSON_ARRAY(innerApplyOpsObj));
@@ -169,18 +165,13 @@ TEST_F(ApplyOpsTest, InsertInNestedApplyOpsReturnsSuccess) {
NamespaceString nss("test", "foo");
auto innerCmdObj = BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "o"
+ << "ns" << nss.ns() << "o"
<< BSON("_id"
<< "a")
- << "ui"
- << options.uuid.get());
+ << "ui" << options.uuid.get());
auto innerApplyOpsObj = BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
+ << "ns" << nss.getCommandNS().ns() << "o"
<< BSON("applyOps" << BSON_ARRAY(innerCmdObj)));
auto cmdObj = BSON("applyOps" << BSON_ARRAY(innerApplyOpsObj));
@@ -206,18 +197,10 @@ BSONObj makeApplyOpsWithInsertOperation(const NamespaceString& nss,
const BSONObj& documentToInsert) {
auto insertOp = uuid ? BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "o"
- << documentToInsert
- << "ui"
- << *uuid)
+ << "ns" << nss.ns() << "o" << documentToInsert << "ui" << *uuid)
: BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "o"
- << documentToInsert);
+ << "ns" << nss.ns() << "o" << documentToInsert);
return BSON("applyOps" << BSON_ARRAY(insertOp));
}
@@ -395,53 +378,35 @@ TEST_F(ApplyOpsTest, ExtractOperationsReturnsOperationsWithSameOpTimeAsApplyOps)
auto ui1 = UUID::gen();
auto op1 = BSON("op"
<< "i"
- << "ns"
- << ns1.ns()
- << "ui"
- << ui1
- << "o"
- << BSON("_id" << 1));
+ << "ns" << ns1.ns() << "ui" << ui1 << "o" << BSON("_id" << 1));
NamespaceString ns2("test.b");
auto ui2 = UUID::gen();
auto op2 = BSON("op"
<< "i"
- << "ns"
- << ns2.ns()
- << "ui"
- << ui2
- << "o"
- << BSON("_id" << 2));
+ << "ns" << ns2.ns() << "ui" << ui2 << "o" << BSON("_id" << 2));
NamespaceString ns3("test.c");
auto ui3 = UUID::gen();
auto op3 = BSON("op"
<< "u"
- << "ns"
- << ns3.ns()
- << "ui"
- << ui3
- << "b"
- << true
- << "o"
- << BSON("x" << 1)
- << "o2"
- << BSON("_id" << 3));
+ << "ns" << ns3.ns() << "ui" << ui3 << "b" << true << "o" << BSON("x" << 1)
+ << "o2" << BSON("_id" << 3));
auto oplogEntry =
makeOplogEntry(OpTypeEnum::kCommand, BSON("applyOps" << BSON_ARRAY(op1 << op2 << op3)));
auto operations = ApplyOps::extractOperations(oplogEntry);
- ASSERT_EQUALS(3U, operations.size()) << "Unexpected number of operations extracted: "
- << oplogEntry.toBSON();
+ ASSERT_EQUALS(3U, operations.size())
+ << "Unexpected number of operations extracted: " << oplogEntry.toBSON();
// Check extracted CRUD operations.
auto it = operations.cbegin();
{
ASSERT(operations.cend() != it);
const auto& operation1 = *(it++);
- ASSERT(OpTypeEnum::kInsert == operation1.getOpType()) << "Unexpected op type: "
- << operation1.toBSON();
+ ASSERT(OpTypeEnum::kInsert == operation1.getOpType())
+ << "Unexpected op type: " << operation1.toBSON();
ASSERT_EQUALS(ui1, *operation1.getUuid());
ASSERT_EQUALS(ns1, operation1.getNss());
ASSERT_BSONOBJ_EQ(BSON("_id" << 1), operation1.getOperationToApply());
@@ -453,8 +418,8 @@ TEST_F(ApplyOpsTest, ExtractOperationsReturnsOperationsWithSameOpTimeAsApplyOps)
{
ASSERT(operations.cend() != it);
const auto& operation2 = *(it++);
- ASSERT(OpTypeEnum::kInsert == operation2.getOpType()) << "Unexpected op type: "
- << operation2.toBSON();
+ ASSERT(OpTypeEnum::kInsert == operation2.getOpType())
+ << "Unexpected op type: " << operation2.toBSON();
ASSERT_EQUALS(ui2, *operation2.getUuid());
ASSERT_EQUALS(ns2, operation2.getNss());
ASSERT_BSONOBJ_EQ(BSON("_id" << 2), operation2.getOperationToApply());
@@ -466,8 +431,8 @@ TEST_F(ApplyOpsTest, ExtractOperationsReturnsOperationsWithSameOpTimeAsApplyOps)
{
ASSERT(operations.cend() != it);
const auto& operation3 = *(it++);
- ASSERT(OpTypeEnum::kUpdate == operation3.getOpType()) << "Unexpected op type: "
- << operation3.toBSON();
+ ASSERT(OpTypeEnum::kUpdate == operation3.getOpType())
+ << "Unexpected op type: " << operation3.toBSON();
ASSERT_EQUALS(ui3, *operation3.getUuid());
ASSERT_EQUALS(ns3, operation3.getNss());
ASSERT_BSONOBJ_EQ(BSON("x" << 1), operation3.getOperationToApply());
@@ -495,9 +460,7 @@ TEST_F(ApplyOpsTest, ApplyOpsFailsToDropAdmin) {
auto dropDatabaseOp = BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
+ << "ns" << nss.getCommandNS().ns() << "o"
<< BSON("dropDatabase" << 1));
auto dropDatabaseCmdObj = BSON("applyOps" << BSON_ARRAY(dropDatabaseOp));
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.cpp b/src/mongo/db/repl/base_cloner_test_fixture.cpp
index 359f6a2c4a2..6d7918a7f5c 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.cpp
+++ b/src/mongo/db/repl/base_cloner_test_fixture.cpp
@@ -47,8 +47,7 @@ const HostAndPort BaseClonerTest::target("localhost", -1);
const NamespaceString BaseClonerTest::nss("db.coll");
const BSONObj BaseClonerTest::idIndexSpec = BSON("v" << 1 << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << nss.ns());
+ << "ns" << nss.ns());
// static
BSONObj BaseClonerTest::createCountResponse(int documentCount) {
diff --git a/src/mongo/db/repl/bgsync.h b/src/mongo/db/repl/bgsync.h
index 7e64afae171..194bf202b8f 100644
--- a/src/mongo/db/repl/bgsync.h
+++ b/src/mongo/db/repl/bgsync.h
@@ -215,17 +215,17 @@ private:
ReplicationProcess* _replicationProcess;
/**
- * All member variables are labeled with one of the following codes indicating the
- * synchronization rules for accessing them:
- *
- * (PR) Completely private to BackgroundSync. Can be read or written to from within the main
- * BackgroundSync thread without synchronization. Shouldn't be accessed outside of this
- * thread.
- *
- * (S) Self-synchronizing; access in any way from any context.
- *
- * (M) Reads and writes guarded by _mutex
- *
+ * All member variables are labeled with one of the following codes indicating the
+ * synchronization rules for accessing them:
+ *
+ * (PR) Completely private to BackgroundSync. Can be read or written to from within the main
+ * BackgroundSync thread without synchronization. Shouldn't be accessed outside of this
+ * thread.
+ *
+ * (S) Self-synchronizing; access in any way from any context.
+ *
+ * (M) Reads and writes guarded by _mutex
+ *
*/
// Protects member data of BackgroundSync.
diff --git a/src/mongo/db/repl/check_quorum_for_config_change.cpp b/src/mongo/db/repl/check_quorum_for_config_change.cpp
index 7f6f6af9672..8f2dfc40664 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/repl/scatter_gather_algorithm.h"
#include "mongo/db/repl/scatter_gather_runner.h"
#include "mongo/db/server_options.h"
-#include "mongo/db/server_options.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/util/log.h"
#include "mongo/util/str.h"
@@ -199,8 +198,8 @@ void QuorumChecker::_tabulateHeartbeatResponse(const RemoteCommandRequest& reque
Status hbStatus = hbResp.initialize(resBSON, 0, /*requireWallTime*/ false);
if (hbStatus.code() == ErrorCodes::InconsistentReplicaSetNames) {
- std::string message = str::stream() << "Our set name did not match that of "
- << request.target.toString();
+ std::string message = str::stream()
+ << "Our set name did not match that of " << request.target.toString();
_vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
warning() << message;
return;
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index 89e352b3ae6..a80a9160896 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -47,18 +47,18 @@
#include "mongo/unittest/unittest.h"
#include "mongo/util/net/hostandport.h"
-#define ASSERT_REASON_CONTAINS(STATUS, PATTERN) \
- do { \
- const mongo::Status s_ = (STATUS); \
- ASSERT_FALSE(s_.reason().find(PATTERN) == std::string::npos) << #STATUS ".reason() == " \
- << s_.reason(); \
+#define ASSERT_REASON_CONTAINS(STATUS, PATTERN) \
+ do { \
+ const mongo::Status s_ = (STATUS); \
+ ASSERT_FALSE(s_.reason().find(PATTERN) == std::string::npos) \
+ << #STATUS ".reason() == " << s_.reason(); \
} while (false)
-#define ASSERT_NOT_REASON_CONTAINS(STATUS, PATTERN) \
- do { \
- const mongo::Status s_ = (STATUS); \
- ASSERT_TRUE(s_.reason().find(PATTERN) == std::string::npos) << #STATUS ".reason() == " \
- << s_.reason(); \
+#define ASSERT_NOT_REASON_CONTAINS(STATUS, PATTERN) \
+ do { \
+ const mongo::Status s_ = (STATUS); \
+ ASSERT_TRUE(s_.reason().find(PATTERN) == std::string::npos) \
+ << #STATUS ".reason() == " << s_.reason(); \
} while (false)
namespace mongo {
@@ -140,30 +140,24 @@ ReplSetConfig assertMakeRSConfig(const BSONObj& configBson) {
}
TEST_F(CheckQuorumForInitiate, ValidSingleNodeSet) {
- ReplSetConfig config = assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1"))));
+ ReplSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1"))));
startQuorumCheck(config, 0);
ASSERT_OK(waitForQuorumCheck());
}
TEST_F(CheckQuorumForInitiate, QuorumCheckCanceledByShutdown) {
getExecutor().shutdown();
- ReplSetConfig config = assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1"))));
+ ReplSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1"))));
startQuorumCheck(config, 0);
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, waitForQuorumCheck());
}
@@ -172,23 +166,20 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSeveralDownNodes) {
// In this test, "we" are host "h3:1". All other nodes time out on
// their heartbeat request, and so the quorum check for initiate
// will fail because some members were unavailable.
- ReplSetConfig config = assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1")
- << BSON("_id" << 3 << "host"
- << "h3:1")
- << BSON("_id" << 4 << "host"
- << "h4:1")
- << BSON("_id" << 5 << "host"
- << "h5:1"))));
+ ReplSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))));
startQuorumCheck(config, 2);
getNet()->enterNetwork();
const Date_t startDate = getNet()->now();
@@ -254,11 +245,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckSuccessForFiveNodes) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -282,8 +269,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckSuccessForFiveNodes) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
getNet()->scheduleResponse(
noi, startDate + Milliseconds(10), makeHeartbeatResponse(rsConfig, Milliseconds(8)));
}
@@ -301,19 +288,12 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToOneDownNode) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
<< "h2:1"
- << "priority"
- << 0
- << "votes"
- << 0)
+ << "priority" << 0 << "votes" << 0)
<< BSON("_id" << 3 << "host"
<< "h3:1")
<< BSON("_id" << 4 << "host"
@@ -335,8 +315,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToOneDownNode) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h2", 1)) {
getNet()->scheduleResponse(
noi, startDate + Milliseconds(10), {ErrorCodes::NoSuchKey, "No response"});
@@ -368,11 +348,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetNameMismatch) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -396,8 +372,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetNameMismatch) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h4", 1)) {
getNet()->scheduleResponse(
noi,
@@ -433,11 +409,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -448,8 +420,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
<< "h4:1")
<< BSON("_id" << 5 << "host"
<< "h5:1"))
- << "settings"
- << BSON("replicaSetId" << replicaSetId)));
+ << "settings" << BSON("replicaSetId" << replicaSetId)));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -466,8 +437,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == incompatibleHost) {
OpTime opTime{Timestamp{10, 10}, 10};
Date_t wallTime = Date_t();
@@ -498,10 +469,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
ASSERT_REASON_CONTAINS(status,
str::stream() << "Our replica set ID of " << replicaSetId
- << " did not match that of "
- << incompatibleHost.toString()
- << ", which is "
- << unexpectedId);
+ << " did not match that of " << incompatibleHost.toString()
+ << ", which is " << unexpectedId);
ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
ASSERT_NOT_REASON_CONTAINS(status, "h2:1");
ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
@@ -517,11 +486,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNode) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -545,8 +510,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNode) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h5", 1)) {
long long configVersion = 1;
getNet()->scheduleResponse(
@@ -581,11 +546,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNodeOnlyOneRespo
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -609,8 +570,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNodeOnlyOneRespo
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h5", 1)) {
long long configVersion = 1;
getNet()->scheduleResponse(
@@ -641,11 +602,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToHigherConfigVersion) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -665,8 +622,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToHigherConfigVersion) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h1", 1)) {
long long configVersion = 5;
getNet()->scheduleResponse(
@@ -695,11 +652,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToIncompatibleSetName) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -719,8 +672,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToIncompatibleSetName) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h2", 1)) {
getNet()->scheduleResponse(
noi,
@@ -753,11 +706,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToInsufficientVoters) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -766,16 +715,10 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToInsufficientVoters) {
<< "h3:1")
<< BSON("_id" << 4 << "host"
<< "h4:1"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 5 << "host"
<< "h5:1"
- << "votes"
- << 0
- << "priority"
- << 0))));
+ << "votes" << 0 << "priority" << 0))));
const int myConfigIndex = 3;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -789,8 +732,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToInsufficientVoters) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h1", 1) || request.target == HostAndPort("h5", 1)) {
getNet()->scheduleResponse(noi,
startDate + Milliseconds(10),
@@ -819,11 +762,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToNoElectableNodeResponding) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -832,12 +771,10 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToNoElectableNodeResponding) {
<< "h3:1")
<< BSON("_id" << 4 << "host"
<< "h4:1"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 5 << "host"
<< "h5:1"
- << "priority"
- << 0))));
+ << "priority" << 0))));
const int myConfigIndex = 3;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -851,8 +788,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToNoElectableNodeResponding) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h5", 1)) {
getNet()->scheduleResponse(noi,
startDate + Milliseconds(10),
@@ -877,11 +814,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckSucceedsWithAsSoonAsPossible) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -890,16 +823,10 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckSucceedsWithAsSoonAsPossible) {
<< "h3:1")
<< BSON("_id" << 4 << "host"
<< "h4:1"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 5 << "host"
<< "h5:1"
- << "votes"
- << 0
- << "priority"
- << 0))));
+ << "votes" << 0 << "priority" << 0))));
const int myConfigIndex = 3;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -913,8 +840,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckSucceedsWithAsSoonAsPossible) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h1", 1) || request.target == HostAndPort("h2", 1)) {
getNet()->scheduleResponse(noi,
startDate + Milliseconds(10),
@@ -937,11 +864,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckProcessesCallbackCanceledResponse) {
const ReplSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -961,8 +884,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckProcessesCallbackCanceledResponse) {
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) << "Already saw "
- << request.target.toString();
+ ASSERT(seenHosts.insert(request.target).second)
+ << "Already saw " << request.target.toString();
if (request.target == HostAndPort("h1", 1)) {
getNet()->scheduleResponse(
noi,
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
index a19dc5c6c8d..68f760d284a 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
@@ -75,42 +75,41 @@ CollectionBulkLoaderImpl::~CollectionBulkLoaderImpl() {
}
Status CollectionBulkLoaderImpl::init(const std::vector<BSONObj>& secondaryIndexSpecs) {
- return _runTaskReleaseResourcesOnFailure(
- [ coll = _autoColl->getCollection(), &secondaryIndexSpecs, this ]()->Status {
- // All writes in CollectionBulkLoaderImpl should be unreplicated.
- // The opCtx is accessed indirectly through _secondaryIndexesBlock.
- UnreplicatedWritesBlock uwb(_opCtx.get());
- // This enforces the buildIndexes setting in the replica set configuration.
- auto indexCatalog = coll->getIndexCatalog();
- auto specs =
- indexCatalog->removeExistingIndexesNoChecks(_opCtx.get(), secondaryIndexSpecs);
- if (specs.size()) {
- _secondaryIndexesBlock->ignoreUniqueConstraint();
- auto status =
- _secondaryIndexesBlock
- ->init(_opCtx.get(), _collection, specs, MultiIndexBlock::kNoopOnInitFn)
- .getStatus();
- if (!status.isOK()) {
- return status;
- }
- } else {
- _secondaryIndexesBlock.reset();
+ return _runTaskReleaseResourcesOnFailure([coll = _autoColl->getCollection(),
+ &secondaryIndexSpecs,
+ this]() -> Status {
+ // All writes in CollectionBulkLoaderImpl should be unreplicated.
+ // The opCtx is accessed indirectly through _secondaryIndexesBlock.
+ UnreplicatedWritesBlock uwb(_opCtx.get());
+ // This enforces the buildIndexes setting in the replica set configuration.
+ auto indexCatalog = coll->getIndexCatalog();
+ auto specs = indexCatalog->removeExistingIndexesNoChecks(_opCtx.get(), secondaryIndexSpecs);
+ if (specs.size()) {
+ _secondaryIndexesBlock->ignoreUniqueConstraint();
+ auto status =
+ _secondaryIndexesBlock
+ ->init(_opCtx.get(), _collection, specs, MultiIndexBlock::kNoopOnInitFn)
+ .getStatus();
+ if (!status.isOK()) {
+ return status;
}
- if (!_idIndexSpec.isEmpty()) {
- auto status =
- _idIndexBlock
- ->init(
- _opCtx.get(), _collection, _idIndexSpec, MultiIndexBlock::kNoopOnInitFn)
- .getStatus();
- if (!status.isOK()) {
- return status;
- }
- } else {
- _idIndexBlock.reset();
+ } else {
+ _secondaryIndexesBlock.reset();
+ }
+ if (!_idIndexSpec.isEmpty()) {
+ auto status =
+ _idIndexBlock
+ ->init(_opCtx.get(), _collection, _idIndexSpec, MultiIndexBlock::kNoopOnInitFn)
+ .getStatus();
+ if (!status.isOK()) {
+ return status;
}
+ } else {
+ _idIndexBlock.reset();
+ }
- return Status::OK();
- });
+ return Status::OK();
+ });
}
Status CollectionBulkLoaderImpl::insertDocuments(const std::vector<BSONObj>::const_iterator begin,
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index e86e8ce022c..cf3d0b63bde 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -132,8 +132,8 @@ CollectionCloner::CollectionCloner(executor::TaskExecutor* executor,
_sourceNss.db().toString(),
makeCommandWithUUIDorCollectionName("listIndexes", _options.uuid, sourceNss),
[this](const Fetcher::QueryResponseStatus& fetchResult,
- Fetcher::NextAction * nextAction,
- BSONObjBuilder * getMoreBob) {
+ Fetcher::NextAction* nextAction,
+ BSONObjBuilder* getMoreBob) {
_listIndexesCallback(fetchResult, nextAction, getMoreBob);
},
ReadPreferenceSetting::secondaryPreferredMetadata(),
@@ -332,9 +332,7 @@ void CollectionCloner::_countCallback(
_finishCallback(countStatus.withContext(
str::stream() << "There was an error parsing document count from count "
"command result on collection "
- << _sourceNss.ns()
- << " from "
- << _source.toString()));
+ << _sourceNss.ns() << " from " << _source.toString()));
return;
}
}
@@ -343,8 +341,7 @@ void CollectionCloner::_countCallback(
_finishCallback({ErrorCodes::BadValue,
str::stream() << "Count call on collection " << _sourceNss.ns() << " from "
<< _source.toString()
- << " returned negative document count: "
- << count});
+ << " returned negative document count: " << count});
return;
}
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index 20c4eb00ae3..09e61df9080 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -261,15 +261,15 @@ void CollectionClonerTest::setUp() {
const BSONObj idIndexSpec,
const std::vector<BSONObj>& nonIdIndexSpecs)
-> StatusWith<std::unique_ptr<CollectionBulkLoaderMock>> {
- auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collectionStats);
- Status result = localLoader->init(nonIdIndexSpecs);
- if (!result.isOK())
- return result;
+ auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collectionStats);
+ Status result = localLoader->init(nonIdIndexSpecs);
+ if (!result.isOK())
+ return result;
- _loader = localLoader.get();
+ _loader = localLoader.get();
- return std::move(localLoader);
- };
+ return std::move(localLoader);
+ };
_server = std::make_unique<MockRemoteDBServer>(target.toString());
_server->assignCollectionUuid(nss.ns(), *options.uuid);
_client = new FailableMockDBClientConnection(_server.get(), getNet());
@@ -283,12 +283,10 @@ void CollectionClonerTest::setUp() {
std::vector<BSONObj> CollectionClonerTest::makeSecondaryIndexSpecs(const NamespaceString& nss) {
return {BSON("v" << 1 << "key" << BSON("a" << 1) << "name"
<< "a_1"
- << "ns"
- << nss.ns()),
+ << "ns" << nss.ns()),
BSON("v" << 1 << "key" << BSON("b" << 1) << "name"
<< "b_1"
- << "ns"
- << nss.ns())};
+ << "ns" << nss.ns())};
}
void CollectionClonerTest::tearDown() {
@@ -443,8 +441,7 @@ TEST_F(CollectionClonerTest, CollectionClonerPassesThroughCommandStatusErrorFrom
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< "count error"
- << "code"
- << int(ErrorCodes::OperationFailed)));
+ << "code" << int(ErrorCodes::OperationFailed)));
}
collectionCloner->join();
ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus());
@@ -566,15 +563,15 @@ TEST_F(CollectionClonerNoAutoIndexTest, DoNotCreateIDIndexIfAutoIndexIdUsed) {
const BSONObj idIndexSpec,
const std::vector<BSONObj>& theIndexSpecs)
-> StatusWith<std::unique_ptr<CollectionBulkLoader>> {
- auto loader = std::make_unique<CollectionBulkLoaderMock>(collectionStats);
- collNss = theNss;
- collOptions = theOptions;
- collIndexSpecs = theIndexSpecs;
- const auto status = loader->init(theIndexSpecs);
- if (!status.isOK())
- return status;
- return std::move(loader);
- };
+ auto loader = std::make_unique<CollectionBulkLoaderMock>(collectionStats);
+ collNss = theNss;
+ collOptions = theOptions;
+ collIndexSpecs = theIndexSpecs;
+ const auto status = loader->init(theIndexSpecs);
+ if (!status.isOK())
+ return status;
+ return std::move(loader);
+ };
const BSONObj doc = BSON("_id" << 1);
_server->insert(nss.ns(), doc);
@@ -633,13 +630,14 @@ TEST_F(CollectionClonerTest, ListIndexesReturnedNamespaceNotFound) {
bool collectionCreated = false;
bool writesAreReplicatedOnOpCtx = false;
NamespaceString collNss;
- storageInterface->createCollFn = [&collNss, &collectionCreated, &writesAreReplicatedOnOpCtx](
- OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) {
- writesAreReplicatedOnOpCtx = opCtx->writesAreReplicated();
- collectionCreated = true;
- collNss = nss;
- return Status::OK();
- };
+ storageInterface->createCollFn =
+ [&collNss, &collectionCreated, &writesAreReplicatedOnOpCtx](
+ OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) {
+ writesAreReplicatedOnOpCtx = opCtx->writesAreReplicated();
+ collectionCreated = true;
+ collNss = nss;
+ return Status::OK();
+ };
// Using a non-zero cursor to ensure that
// the cloner stops the fetcher from retrieving more results.
{
@@ -688,9 +686,9 @@ TEST_F(CollectionClonerTest,
// status.
auto exec = &getExecutor();
collectionCloner->setScheduleDbWorkFn_forTest([exec](
- executor::TaskExecutor::CallbackFn workFn) {
+ executor::TaskExecutor::CallbackFn workFn) {
auto wrappedTask = [workFn = std::move(workFn)](
- const executor::TaskExecutor::CallbackArgs& cbd) {
+ const executor::TaskExecutor::CallbackArgs& cbd) {
workFn(executor::TaskExecutor::CallbackArgs(
cbd.executor, cbd.myHandle, Status(ErrorCodes::CallbackCanceled, ""), cbd.opCtx));
};
@@ -698,8 +696,9 @@ TEST_F(CollectionClonerTest,
});
bool collectionCreated = false;
- storageInterface->createCollFn = [&collectionCreated](
- OperationContext*, const NamespaceString& nss, const CollectionOptions&) {
+ storageInterface->createCollFn = [&collectionCreated](OperationContext*,
+ const NamespaceString& nss,
+ const CollectionOptions&) {
collectionCreated = true;
return Status::OK();
};
@@ -1402,8 +1401,7 @@ TEST_F(CollectionClonerRenamedBeforeStartTest, BeginCollectionWithUUID) {
BSONObj expectedIdIndexSpec = BSON("v" << 1 << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << alternateNss.ns());
+ << "ns" << alternateNss.ns());
ASSERT_BSONOBJ_EQ(collIdIndexSpec, expectedIdIndexSpec);
auto expectedNonIdIndexSpecs = makeSecondaryIndexSpecs(alternateNss);
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index 267428f3831..1d1f3dda338 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -117,8 +117,8 @@ DatabaseCloner::DatabaseCloner(executor::TaskExecutor* executor,
_dbname,
createListCollectionsCommandObject(_listCollectionsFilter),
[=](const StatusWith<Fetcher::QueryResponse>& result,
- Fetcher::NextAction * nextAction,
- BSONObjBuilder * getMoreBob) {
+ Fetcher::NextAction* nextAction,
+ BSONObjBuilder* getMoreBob) {
_listCollectionsCallback(result, nextAction, getMoreBob);
},
ReadPreferenceSetting::secondaryPreferredMetadata(),
@@ -263,9 +263,8 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
BSONObjBuilder* getMoreBob) {
if (!result.isOK()) {
_finishCallback(result.getStatus().withContext(
- str::stream() << "Error issuing listCollections on db '" << _dbname << "' (host:"
- << _source.toString()
- << ")"));
+ str::stream() << "Error issuing listCollections on db '" << _dbname
+ << "' (host:" << _source.toString() << ")"));
return;
}
@@ -311,12 +310,11 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
for (auto&& info : _collectionInfos) {
BSONElement nameElement = info.getField(kNameFieldName);
if (nameElement.eoo()) {
- _finishCallback_inlock(
- lk,
- {ErrorCodes::FailedToParse,
- str::stream() << "collection info must contain '" << kNameFieldName << "' "
- << "field : "
- << info});
+ _finishCallback_inlock(lk,
+ {ErrorCodes::FailedToParse,
+ str::stream() << "collection info must contain '"
+ << kNameFieldName << "' "
+ << "field : " << info});
return;
}
if (nameElement.type() != mongo::String) {
@@ -332,29 +330,24 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
{ErrorCodes::Error(51005),
str::stream()
<< "collection info contains duplicate collection name "
- << "'"
- << collectionName
- << "': "
- << info});
+ << "'" << collectionName << "': " << info});
return;
}
BSONElement optionsElement = info.getField(kOptionsFieldName);
if (optionsElement.eoo()) {
- _finishCallback_inlock(
- lk,
- {ErrorCodes::FailedToParse,
- str::stream() << "collection info must contain '" << kOptionsFieldName << "' "
- << "field : "
- << info});
+ _finishCallback_inlock(lk,
+ {ErrorCodes::FailedToParse,
+ str::stream() << "collection info must contain '"
+ << kOptionsFieldName << "' "
+ << "field : " << info});
return;
}
if (!optionsElement.isABSONObj()) {
_finishCallback_inlock(lk,
Status(ErrorCodes::TypeMismatch,
str::stream() << "'" << kOptionsFieldName
- << "' field must be an object: "
- << info));
+ << "' field must be an object: " << info));
return;
}
const BSONObj optionsObj = optionsElement.Obj();
@@ -426,8 +419,8 @@ void DatabaseCloner::_collectionClonerCallback(const Status& status, const Names
// Record failure, but do not return just yet, in case we want to do some logging.
if (!status.isOK()) {
- collStatus = status.withContext(
- str::stream() << "Error cloning collection '" << nss.toString() << "'");
+ collStatus = status.withContext(str::stream()
+ << "Error cloning collection '" << nss.toString() << "'");
}
// Forward collection cloner result to caller.
diff --git a/src/mongo/db/repl/database_cloner_test.cpp b/src/mongo/db/repl/database_cloner_test.cpp
index 66948a482ef..25fc845b088 100644
--- a/src/mongo/db/repl/database_cloner_test.cpp
+++ b/src/mongo/db/repl/database_cloner_test.cpp
@@ -126,16 +126,16 @@ void DatabaseClonerTest::setUp() {
const BSONObj& idIndexSpec,
const std::vector<BSONObj>& secondaryIndexSpecs)
-> StatusWith<std::unique_ptr<CollectionBulkLoaderMock>> {
- const auto collInfo = &_collections[nss];
+ const auto collInfo = &_collections[nss];
- auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
- auto status = localLoader->init(secondaryIndexSpecs);
- if (!status.isOK())
- return status;
- collInfo->loader = localLoader.get();
+ auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
+ auto status = localLoader->init(secondaryIndexSpecs);
+ if (!status.isOK())
+ return status;
+ collInfo->loader = localLoader.get();
- return std::move(localLoader);
- };
+ return std::move(localLoader);
+ };
}
void DatabaseClonerTest::tearDown() {
@@ -335,8 +335,7 @@ TEST_F(DatabaseClonerTest, InvalidListCollectionsFilter) {
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< "unknown operator"
- << "code"
- << ErrorCodes::BadValue));
+ << "code" << ErrorCodes::BadValue));
}
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
@@ -391,16 +390,13 @@ TEST_F(DatabaseClonerTest, ListCollectionsPredicate) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options"
- << _options1.toBSON()),
+ << "options" << _options1.toBSON()),
BSON("name"
<< "b"
- << "options"
- << _options2.toBSON()),
+ << "options" << _options2.toBSON()),
BSON("name"
<< "c"
- << "options"
- << _options3.toBSON())};
+ << "options" << _options3.toBSON())};
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(createListCollectionsResponse(
@@ -425,12 +421,10 @@ TEST_F(DatabaseClonerTest, ListCollectionsMultipleBatches) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options"
- << _options1.toBSON()),
+ << "options" << _options1.toBSON()),
BSON("name"
<< "b"
- << "options"
- << _options2.toBSON())};
+ << "options" << _options2.toBSON())};
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(createListCollectionsResponse(1, BSON_ARRAY(sourceInfos[0])));
@@ -512,8 +506,7 @@ TEST_F(DatabaseClonerTest, CollectionInfoNameEmpty) {
createListCollectionsResponse(0,
BSON_ARRAY(BSON("name"
<< ""
- << "options"
- << _options1.toBSON()))));
+ << "options" << _options1.toBSON()))));
}
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
@@ -534,12 +527,10 @@ TEST_F(DatabaseClonerTest, CollectionInfoNameDuplicate) {
createListCollectionsResponse(0,
BSON_ARRAY(BSON("name"
<< "a"
- << "options"
- << _options1.toBSON())
+ << "options" << _options1.toBSON())
<< BSON("name"
<< "a"
- << "options"
- << _options2.toBSON()))));
+ << "options" << _options2.toBSON()))));
}
ASSERT_EQUALS(51005, getStatus().code());
@@ -575,11 +566,11 @@ TEST_F(DatabaseClonerTest, CollectionInfoOptionsNotAnObject) {
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
- processNetworkResponse(createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << 123))));
+ processNetworkResponse(
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << 123))));
}
ASSERT_EQUALS(ErrorCodes::TypeMismatch, getStatus().code());
@@ -596,12 +587,11 @@ TEST_F(DatabaseClonerTest, InvalidCollectionOptions) {
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
- processNetworkResponse(
- createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << BSON("storageEngine" << 1)))));
+ processNetworkResponse(createListCollectionsResponse(
+ 0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << BSON("storageEngine" << 1)))));
}
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
@@ -617,11 +607,11 @@ TEST_F(DatabaseClonerTest, InvalidMissingUUID) {
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
- processNetworkResponse(createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << BSONObj()))));
+ processNetworkResponse(
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << BSONObj()))));
}
ASSERT_EQUALS(50953, getStatus().code());
@@ -670,11 +660,11 @@ TEST_F(DatabaseClonerTest, ListCollectionsReturnsEmptyCollectionName) {
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
- processNetworkResponse(createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << ""
- << "options"
- << BSONObj()))));
+ processNetworkResponse(
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << ""
+ << "options" << BSONObj()))));
}
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
@@ -700,8 +690,7 @@ TEST_F(DatabaseClonerTest, StartFirstCollectionClonerFailed) {
createListCollectionsResponse(0,
BSON_ARRAY(BSON("name"
<< "a"
- << "options"
- << _options1.toBSON()))));
+ << "options" << _options1.toBSON()))));
}
ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus().code());
@@ -732,12 +721,10 @@ TEST_F(DatabaseClonerTest, StartSecondCollectionClonerFailed) {
createListCollectionsResponse(0,
BSON_ARRAY(BSON("name"
<< "a"
- << "options"
- << _options1.toBSON())
+ << "options" << _options1.toBSON())
<< BSON("name"
<< "b"
- << "options"
- << _options2.toBSON()))));
+ << "options" << _options2.toBSON()))));
processNetworkResponse(createCountResponse(0));
processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
@@ -763,8 +750,7 @@ TEST_F(DatabaseClonerTest, ShutdownCancelsCollectionCloning) {
0,
BSON_ARRAY(BSON("name"
<< "a"
- << "options"
- << _options1.toBSON())))));
+ << "options" << _options1.toBSON())))));
net->runReadyNetworkOperations();
// CollectionCloner sends collection count request on startup.
@@ -797,12 +783,10 @@ TEST_F(DatabaseClonerTest, FirstCollectionListIndexesFailed) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options"
- << _options1.toBSON()),
+ << "options" << _options1.toBSON()),
BSON("name"
<< "b"
- << "options"
- << _options2.toBSON())};
+ << "options" << _options2.toBSON())};
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(
@@ -818,8 +802,7 @@ TEST_F(DatabaseClonerTest, FirstCollectionListIndexesFailed) {
processNetworkResponse(createCountResponse(0));
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< "fake message"
- << "code"
- << ErrorCodes::CursorNotFound));
+ << "code" << ErrorCodes::CursorNotFound));
processNetworkResponse(createCountResponse(0));
processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
@@ -854,12 +837,10 @@ TEST_F(DatabaseClonerTest, CreateCollections) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options"
- << _options1.toBSON()),
+ << "options" << _options1.toBSON()),
BSON("name"
<< "b"
- << "options"
- << _options2.toBSON())};
+ << "options" << _options2.toBSON())};
{
executor::NetworkInterfaceMock::InNetworkGuard guard(getNet());
processNetworkResponse(
diff --git a/src/mongo/db/repl/databases_cloner_test.cpp b/src/mongo/db/repl/databases_cloner_test.cpp
index c13154b179a..a631fff5dbc 100644
--- a/src/mongo/db/repl/databases_cloner_test.cpp
+++ b/src/mongo/db/repl/databases_cloner_test.cpp
@@ -177,19 +177,19 @@ protected:
const BSONObj idIndexSpec,
const std::vector<BSONObj>& secondaryIndexSpecs)
-> StatusWith<std::unique_ptr<CollectionBulkLoaderMock>> {
- // Get collection info from map.
- const auto collInfo = &_collections[nss];
- if (collInfo->stats->initCalled) {
- log() << "reusing collection during test which may cause problems, ns:" << nss;
- }
- auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
- auto status = localLoader->init(secondaryIndexSpecs);
- if (!status.isOK())
- return status;
- collInfo->loader = localLoader.get();
-
- return std::move(localLoader);
- };
+ // Get collection info from map.
+ const auto collInfo = &_collections[nss];
+ if (collInfo->stats->initCalled) {
+ log() << "reusing collection during test which may cause problems, ns:" << nss;
+ }
+ auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
+ auto status = localLoader->init(secondaryIndexSpecs);
+ if (!status.isOK())
+ return status;
+ collInfo->loader = localLoader.get();
+
+ return std::move(localLoader);
+ };
_dbWorkThreadPool.startup();
_target = HostAndPort{"local:1234"};
@@ -924,13 +924,13 @@ TEST_F(DBsClonerTest, SingleDatabaseCopiesCompletely) {
{"listDatabases", fromjson("{ok:1, databases:[{name:'a'}]}")},
// listCollections for "a"
{"listCollections",
- BSON("ok" << 1 << "cursor" << BSON("id" << 0ll << "ns"
- << "a.$cmd.listCollections"
- << "firstBatch"
- << BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << options.toBSON()))))},
+ BSON("ok" << 1 << "cursor"
+ << BSON("id" << 0ll << "ns"
+ << "a.$cmd.listCollections"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << options.toBSON()))))},
// count:a
{"count", BSON("n" << 1 << "ok" << 1)},
// listIndexes:a
@@ -957,13 +957,13 @@ TEST_F(DBsClonerTest, TwoDatabasesCopiesCompletely) {
{"listDatabases", fromjson("{ok:1, databases:[{name:'a'}, {name:'b'}]}")},
// listCollections for "a"
{"listCollections",
- BSON("ok" << 1 << "cursor" << BSON("id" << 0ll << "ns"
- << "a.$cmd.listCollections"
- << "firstBatch"
- << BSON_ARRAY(BSON("name"
- << "a"
- << "options"
- << options1.toBSON()))))},
+ BSON("ok" << 1 << "cursor"
+ << BSON("id" << 0ll << "ns"
+ << "a.$cmd.listCollections"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << options1.toBSON()))))},
// count:a
{"count", BSON("n" << 1 << "ok" << 1)},
// listIndexes:a
@@ -974,13 +974,13 @@ TEST_F(DBsClonerTest, TwoDatabasesCopiesCompletely) {
<< ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}")},
// listCollections for "b"
{"listCollections",
- BSON("ok" << 1 << "cursor" << BSON("id" << 0ll << "ns"
- << "b.$cmd.listCollections"
- << "firstBatch"
- << BSON_ARRAY(BSON("name"
- << "b"
- << "options"
- << options2.toBSON()))))},
+ BSON("ok" << 1 << "cursor"
+ << BSON("id" << 0ll << "ns"
+ << "b.$cmd.listCollections"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("name"
+ << "b"
+ << "options" << options2.toBSON()))))},
// count:b
{"count", BSON("n" << 2 << "ok" << 1)},
// listIndexes:b
diff --git a/src/mongo/db/repl/dbcheck.cpp b/src/mongo/db/repl/dbcheck.cpp
index 19c9507e981..a69b2c07e71 100644
--- a/src/mongo/db/repl/dbcheck.cpp
+++ b/src/mongo/db/repl/dbcheck.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/dbcheck.h"
-#include "mongo/db/repl/dbcheck.h"
#include "mongo/db/repl/dbcheck_gen.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/optime.h"
@@ -132,7 +131,7 @@ std::unique_ptr<HealthLogEntry> dbCheckHealthLogEntry(const NamespaceString& nss
entry->setData(data);
return entry;
}
-}
+} // namespace
/**
* Get an error message if the check fails.
@@ -161,14 +160,9 @@ std::unique_ptr<HealthLogEntry> dbCheckBatchEntry(const NamespaceString& nss,
const repl::OpTime& optime) {
auto hashes = expectedFound(expectedHash, foundHash);
- auto data =
- BSON("success" << true << "count" << count << "bytes" << bytes << "md5" << hashes.second
- << "minKey"
- << minKey.elem()
- << "maxKey"
- << maxKey.elem()
- << "optime"
- << optime);
+ auto data = BSON("success" << true << "count" << count << "bytes" << bytes << "md5"
+ << hashes.second << "minKey" << minKey.elem() << "maxKey"
+ << maxKey.elem() << "optime" << optime);
auto severity = hashes.first ? SeverityEnum::Info : SeverityEnum::Error;
std::string msg =
@@ -284,19 +278,9 @@ std::unique_ptr<HealthLogEntry> dbCheckCollectionEntry(const NamespaceString& ns
std::string msg =
"dbCheck collection " + (match ? std::string("consistent") : std::string("inconsistent"));
auto data = BSON("success" << true << "uuid" << uuid.toString() << "found" << true << "name"
- << names.second
- << "prev"
- << prevs.second
- << "next"
- << nexts.second
- << "indexes"
- << indices.second
- << "options"
- << options.second
- << "md5"
- << md5s.second
- << "optime"
- << optime);
+ << names.second << "prev" << prevs.second << "next" << nexts.second
+ << "indexes" << indices.second << "options" << options.second
+ << "md5" << md5s.second << "optime" << optime);
return dbCheckHealthLogEntry(nss, severity, msg, OplogEntriesEnum::Collection, data);
}
@@ -520,7 +504,7 @@ Status dbCheckDatabaseOnSecondary(OperationContext* opCtx,
return Status::OK();
}
-}
+} // namespace
namespace repl {
diff --git a/src/mongo/db/repl/dbcheck.h b/src/mongo/db/repl/dbcheck.h
index dde6de369b8..457087a9365 100644
--- a/src/mongo/db/repl/dbcheck.h
+++ b/src/mongo/db/repl/dbcheck.h
@@ -228,5 +228,5 @@ Status dbCheckOplogCommand(OperationContext* opCtx,
const repl::OplogEntry& entry,
OplogApplication::Mode mode,
boost::optional<Timestamp> stableTimestampForRecovery);
-}
-}
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/dbcheck_idl.h b/src/mongo/db/repl/dbcheck_idl.h
index c49bff7a5b1..9e2d9c880e6 100644
--- a/src/mongo/db/repl/dbcheck_idl.h
+++ b/src/mongo/db/repl/dbcheck_idl.h
@@ -91,4 +91,4 @@ private:
explicit BSONKey(const BSONElement& elem);
BSONObj _obj;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/repl/do_txn.cpp b/src/mongo/db/repl/do_txn.cpp
index 7f0f50787c9..d9d6f7bf2ce 100644
--- a/src/mongo/db/repl/do_txn.cpp
+++ b/src/mongo/db/repl/do_txn.cpp
@@ -126,9 +126,7 @@ Status _doTxn(OperationContext* opCtx,
uasserted(ErrorCodes::NamespaceNotFound,
str::stream() << "cannot apply insert, delete, or update operation on a "
"non-existent namespace "
- << nss->ns()
- << ": "
- << mongo::redact(opObj));
+ << nss->ns() << ": " << mongo::redact(opObj));
}
if (opObj.hasField("ui")) {
@@ -155,9 +153,7 @@ Status _doTxn(OperationContext* opCtx,
if (!collection) {
uasserted(ErrorCodes::NamespaceNotFound,
str::stream() << "cannot apply operation on a non-existent namespace "
- << nss->ns()
- << " with doTxn: "
- << redact(opObj));
+ << nss->ns() << " with doTxn: " << redact(opObj));
}
// Setting alwaysUpsert to true makes sense only during oplog replay, and doTxn commands
diff --git a/src/mongo/db/repl/do_txn_test.cpp b/src/mongo/db/repl/do_txn_test.cpp
index 3b92a944ea0..561579a069c 100644
--- a/src/mongo/db/repl/do_txn_test.cpp
+++ b/src/mongo/db/repl/do_txn_test.cpp
@@ -218,18 +218,10 @@ BSONObj makeInsertOperation(const NamespaceString& nss,
const BSONObj& documentToInsert) {
return uuid ? BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "o"
- << documentToInsert
- << "ui"
- << *uuid)
+ << "ns" << nss.ns() << "o" << documentToInsert << "ui" << *uuid)
: BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "o"
- << documentToInsert);
+ << "ns" << nss.ns() << "o" << documentToInsert);
}
/**
diff --git a/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp b/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
index 281b864d0d1..1e87ffa17e6 100644
--- a/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
+++ b/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
@@ -253,7 +253,7 @@ TEST_F(DropPendingCollectionReaperTest,
decltype(dpns) droppedNss;
bool writesAreReplicatedDuringDrop = true;
storageInterfaceMock.dropCollFn = [&droppedNss, &writesAreReplicatedDuringDrop](
- OperationContext* opCtx, const NamespaceString& nss) {
+ OperationContext* opCtx, const NamespaceString& nss) {
droppedNss = nss;
writesAreReplicatedDuringDrop = opCtx->writesAreReplicated();
return Status::OK();
diff --git a/src/mongo/db/repl/idempotency_test_fixture.cpp b/src/mongo/db/repl/idempotency_test_fixture.cpp
index a6c643b0ff0..0146f92a19d 100644
--- a/src/mongo/db/repl/idempotency_test_fixture.cpp
+++ b/src/mongo/db/repl/idempotency_test_fixture.cpp
@@ -661,12 +661,7 @@ template OplogEntry IdempotencyTest::update<const char*>(char const* _id, const
BSONObj makeInsertApplyOpsEntry(const NamespaceString& nss, const UUID& uuid, const BSONObj& doc) {
return BSON("op"
<< "i"
- << "ns"
- << nss.toString()
- << "ui"
- << uuid
- << "o"
- << doc);
+ << "ns" << nss.toString() << "ui" << uuid << "o" << doc);
}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 09127b6c6ba..9376eccb3dd 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -483,8 +483,7 @@ void InitialSyncer::_startInitialSyncAttemptCallback(
auto status = _checkForShutdownAndConvertStatus_inlock(
callbackArgs,
str::stream() << "error while starting initial sync attempt " << (initialSyncAttempt + 1)
- << " of "
- << initialSyncMaxAttempts);
+ << " of " << initialSyncMaxAttempts);
if (!status.isOK()) {
_finishInitialSyncAttempt(status);
return;
@@ -748,11 +747,8 @@ void InitialSyncer::_getBeginFetchingOpTimeCallback(
Status(ErrorCodes::TooManyMatchingDocuments,
str::stream() << "Expected to receive one document for the oldest active "
"transaction entry, but received: "
- << docs.size()
- << ". First: "
- << redact(docs.front())
- << ". Last: "
- << redact(docs.back())));
+ << docs.size() << ". First: " << redact(docs.front())
+ << ". Last: " << redact(docs.back())));
return;
}
@@ -859,11 +855,8 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
Status(ErrorCodes::TooManyMatchingDocuments,
str::stream() << "Expected to receive one feature compatibility version "
"document, but received: "
- << docs.size()
- << ". First: "
- << redact(docs.front())
- << ". Last: "
- << redact(docs.back())));
+ << docs.size() << ". First: " << redact(docs.front())
+ << ". Last: " << redact(docs.back())));
return;
}
const auto hasDoc = docs.begin() != docs.end();
@@ -1528,8 +1521,8 @@ void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
}
Status InitialSyncer::_scheduleLastOplogEntryFetcher_inlock(Fetcher::CallbackFn callback) {
- BSONObj query = BSON(
- "find" << _opts.remoteOplogNS.coll() << "sort" << BSON("$natural" << -1) << "limit" << 1);
+ BSONObj query = BSON("find" << _opts.remoteOplogNS.coll() << "sort" << BSON("$natural" << -1)
+ << "limit" << 1);
_lastOplogEntryFetcher =
stdx::make_unique<Fetcher>(_exec,
@@ -1680,13 +1673,12 @@ Status InitialSyncer::_scheduleWorkAtAndSaveHandle_inlock(
if (_isShuttingDown_inlock()) {
return Status(ErrorCodes::CallbackCanceled,
str::stream() << "failed to schedule work " << name << " at "
- << when.toString()
- << ": initial syncer is shutting down");
+ << when.toString() << ": initial syncer is shutting down");
}
auto result = _exec->scheduleWorkAt(when, std::move(work));
if (!result.isOK()) {
- return result.getStatus().withContext(
- str::stream() << "failed to schedule work " << name << " at " << when.toString());
+ return result.getStatus().withContext(str::stream() << "failed to schedule work " << name
+ << " at " << when.toString());
}
*handle = result.getValue();
return Status::OK();
diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp
index 9bc286759ff..f9b94d7193d 100644
--- a/src/mongo/db/repl/initial_syncer_test.cpp
+++ b/src/mongo/db/repl/initial_syncer_test.cpp
@@ -299,19 +299,19 @@ protected:
const BSONObj idIndexSpec,
const std::vector<BSONObj>& secondaryIndexSpecs)
-> StatusWith<std::unique_ptr<CollectionBulkLoaderMock>> {
- // Get collection info from map.
- const auto collInfo = &_collections[nss];
- if (collInfo->stats->initCalled) {
- log() << "reusing collection during test which may cause problems, ns:" << nss;
- }
- auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
- auto status = localLoader->init(secondaryIndexSpecs);
- if (!status.isOK())
- return status;
- collInfo->loader = localLoader.get();
-
- return std::move(localLoader);
- };
+ // Get collection info from map.
+ const auto collInfo = &_collections[nss];
+ if (collInfo->stats->initCalled) {
+ log() << "reusing collection during test which may cause problems, ns:" << nss;
+ }
+ auto localLoader = std::make_unique<CollectionBulkLoaderMock>(collInfo->stats);
+ auto status = localLoader->init(secondaryIndexSpecs);
+ if (!status.isOK())
+ return status;
+ collInfo->loader = localLoader.get();
+
+ return std::move(localLoader);
+ };
_storageInterface->upgradeNonReplicatedUniqueIndexesFn = [this](OperationContext* opCtx) {
LockGuard lock(_storageInterfaceWorkDoneMutex);
if (_storageInterfaceWorkDone.upgradeNonReplicatedUniqueIndexesShouldFail) {
@@ -372,17 +372,13 @@ protected:
dataReplicatorExternalState->lastCommittedOpTime = _myLastOpTime;
{
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "myset"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"))
- << "settings"
- << BSON("electionTimeoutMillis" << 10000))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "myset"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings" << BSON("electionTimeoutMillis" << 10000))));
dataReplicatorExternalState->replSetConfigResult = config;
}
_externalState = dataReplicatorExternalState.get();
@@ -1170,14 +1166,14 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughGetBeginFetchingOpTimeSchedu
// We reject the 'find' command for the begin fetching optime and save the request for
// inspection at the end of this test case.
executor::RemoteCommandRequest request;
- _executorProxy->shouldFailScheduleRemoteCommandRequest = [&request](
- const executor::RemoteCommandRequestOnAny& requestToSend) {
- request = {requestToSend, 0};
- auto elem = requestToSend.cmdObj.firstElement();
- return (
- ("find" == elem.fieldNameStringData()) &&
- (NamespaceString::kSessionTransactionsTableNamespace.coll() == elem.valueStringData()));
- };
+ _executorProxy->shouldFailScheduleRemoteCommandRequest =
+ [&request](const executor::RemoteCommandRequestOnAny& requestToSend) {
+ request = {requestToSend, 0};
+ auto elem = requestToSend.cmdObj.firstElement();
+ return (("find" == elem.fieldNameStringData()) &&
+ (NamespaceString::kSessionTransactionsTableNamespace.coll() ==
+ elem.valueStringData()));
+ };
HostAndPort syncSource("localhost", 12345);
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource);
@@ -1260,12 +1256,13 @@ TEST_F(InitialSyncerTest, InitialSyncerPassesThroughLastOplogEntryFetcherSchedul
// We reject the 'find' command on the oplog and save the request for inspection at the end of
// this test case.
executor::RemoteCommandRequest request;
- _executorProxy->shouldFailScheduleRemoteCommandRequest = [&request](
- const executor::RemoteCommandRequestOnAny& requestToSend) {
- request = {requestToSend, 0};
- auto elem = requestToSend.cmdObj.firstElement();
- return (("find" == elem.fieldNameStringData()) && ("oplog.rs" == elem.valueStringData()));
- };
+ _executorProxy->shouldFailScheduleRemoteCommandRequest =
+ [&request](const executor::RemoteCommandRequestOnAny& requestToSend) {
+ request = {requestToSend, 0};
+ auto elem = requestToSend.cmdObj.firstElement();
+ return (("find" == elem.fieldNameStringData()) &&
+ ("oplog.rs" == elem.valueStringData()));
+ };
HostAndPort syncSource("localhost", 12345);
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource);
@@ -1680,8 +1677,7 @@ TEST_F(InitialSyncerTest,
TEST_F(InitialSyncerTest,
InitialSyncerReturnsIncompatibleServerVersionWhenFCVFetcherReturnsUpgradeTargetVersion) {
auto docs = {BSON("_id" << FeatureCompatibilityVersionParser::kParameterName << "version"
- << FeatureCompatibilityVersionParser::kVersion40
- << "targetVersion"
+ << FeatureCompatibilityVersionParser::kVersion40 << "targetVersion"
<< FeatureCompatibilityVersionParser::kVersion42)};
runInitialSyncWithBadFCVResponse(docs, ErrorCodes::IncompatibleServerVersion);
}
@@ -1689,8 +1685,7 @@ TEST_F(InitialSyncerTest,
TEST_F(InitialSyncerTest,
InitialSyncerReturnsIncompatibleServerVersionWhenFCVFetcherReturnsDowngradeTargetVersion) {
auto docs = {BSON("_id" << FeatureCompatibilityVersionParser::kParameterName << "version"
- << FeatureCompatibilityVersionParser::kVersion40
- << "targetVersion"
+ << FeatureCompatibilityVersionParser::kVersion40 << "targetVersion"
<< FeatureCompatibilityVersionParser::kVersion40)};
runInitialSyncWithBadFCVResponse(docs, ErrorCodes::IncompatibleServerVersion);
}
@@ -2241,8 +2236,7 @@ TEST_F(InitialSyncerTest,
<< "dbinfo")
<< BSON("name"
<< "b"))
- << "ok"
- << 1)));
+ << "ok" << 1)));
net->runReadyNetworkOperations();
// Oplog tailing query.
@@ -2609,8 +2603,7 @@ TEST_F(
// Second last oplog entry fetcher.
processSuccessfulLastOplogEntryFetcherResponse({BSON("ts"
<< "not a timestamp"
- << "t"
- << 1)});
+ << "t" << 1)});
// _lastOplogEntryFetcherCallbackAfterCloningData() will shut down the OplogFetcher after
// setting the completion status.
@@ -3226,8 +3219,7 @@ TEST_F(InitialSyncerTest, LastOpTimeShouldBeSetEvenIfNoOperationsAreAppliedAfter
NamespaceString(nss.getCommandNS()),
{BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << nss.ns())})));
+ << "ns" << nss.ns())})));
ASSERT_EQUALS(*_options1.uuid, UUID::parse(request.cmdObj.firstElement()));
ASSERT_EQUALS(nss.db(), request.dbname);
@@ -3960,8 +3952,7 @@ TEST_F(InitialSyncerTest,
NamespaceString(nss.getCommandNS()),
{BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << nss.ns())}));
+ << "ns" << nss.ns())}));
assertRemoteCommandNameEquals("listIndexes", request);
ASSERT_EQUALS(*_options1.uuid, UUID::parse(request.cmdObj.firstElement()));
ASSERT_EQUALS(nss.db(), request.dbname);
@@ -4344,8 +4335,7 @@ TEST_F(InitialSyncerTest, GetInitialSyncProgressReturnsCorrectProgress) {
NamespaceString(nss.getCommandNS()),
{BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns"
- << nss.ns())}));
+ << "ns" << nss.ns())}));
assertRemoteCommandNameEquals("listIndexes", request);
ASSERT_EQUALS(*_options1.uuid, UUID::parse(request.cmdObj.firstElement()));
ASSERT_EQUALS(nss.db(), request.dbname);
diff --git a/src/mongo/db/repl/is_master_response.cpp b/src/mongo/db/repl/is_master_response.cpp
index e160054208b..06e0d1c1896 100644
--- a/src/mongo/db/repl/is_master_response.cpp
+++ b/src/mongo/db/repl/is_master_response.cpp
@@ -220,8 +220,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Found \"" << kIsReplicaSetFieldName
<< "\" field which should indicate that no valid config "
"is loaded, but we didn't also have an \""
- << kInfoFieldName
- << "\" field as we expected");
+ << kInfoFieldName << "\" field as we expected");
}
}
@@ -248,8 +247,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Elements in \"" << kHostsFieldName
<< "\" array of isMaster response must be of type "
- << typeName(String)
- << " but found type "
+ << typeName(String) << " but found type "
<< typeName(hostElement.type()));
}
_hosts.push_back(HostAndPort(hostElement.String()));
@@ -269,8 +267,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Elements in \"" << kPassivesFieldName
<< "\" array of isMaster response must be of type "
- << typeName(String)
- << " but found type "
+ << typeName(String) << " but found type "
<< typeName(passiveElement.type()));
}
_passives.push_back(HostAndPort(passiveElement.String()));
@@ -290,8 +287,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Elements in \"" << kArbitersFieldName
<< "\" array of isMaster response must be of type "
- << typeName(String)
- << " but found type "
+ << typeName(String) << " but found type "
<< typeName(arbiterElement.type()));
}
_arbiters.push_back(HostAndPort(arbiterElement.String()));
@@ -364,8 +360,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kTagsFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(String)
- << " but found type "
+ << typeName(String) << " but found type "
<< typeName(tagsElement.type()));
}
_tags[tagElement.fieldNameStringData().toString()] = tagElement.String();
@@ -397,8 +392,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastWriteOpTimeFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Object)
- << " but found type "
+ << typeName(Object) << " but found type "
<< typeName(lastWriteOpTimeElement.type()));
}
auto lastWriteOpTime = OpTime::parseFromOplogEntry(lastWriteOpTimeElement.Obj());
@@ -418,8 +412,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastWriteDateFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Date)
- << " but found type "
+ << typeName(Date) << " but found type "
<< typeName(lastWriteDateElement.type()));
}
if (_lastWrite) {
@@ -439,8 +432,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastMajorityWriteOpTimeFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Object)
- << " but found type "
+ << typeName(Object) << " but found type "
<< typeName(lastMajorityWriteOpTimeElement.type()));
}
auto lastMajorityWriteOpTime =
@@ -461,8 +453,7 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastMajorityWriteDateFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Date)
- << " but found type "
+ << typeName(Date) << " but found type "
<< typeName(lastMajorityWriteDateElement.type()));
}
if (_lastMajorityWrite) {
diff --git a/src/mongo/db/repl/isself.cpp b/src/mongo/db/repl/isself.cpp
index a78298933fd..3d62a2b60b4 100644
--- a/src/mongo/db/repl/isself.cpp
+++ b/src/mongo/db/repl/isself.cpp
@@ -273,8 +273,7 @@ std::vector<std::string> getBoundAddrs(const bool ipv6enabled) {
for (int tries = 0; tries < 3; ++tries) {
err = GetAdaptersAddresses(family,
GAA_FLAG_SKIP_ANYCAST | // only want unicast addrs
- GAA_FLAG_SKIP_MULTICAST |
- GAA_FLAG_SKIP_DNS_SERVER,
+ GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_DNS_SERVER,
NULL,
adapters,
&adaptersLen);
diff --git a/src/mongo/db/repl/member_config.cpp b/src/mongo/db/repl/member_config.cpp
index 02fb978173e..5ab43763c8c 100644
--- a/src/mongo/db/repl/member_config.cpp
+++ b/src/mongo/db/repl/member_config.cpp
@@ -178,9 +178,9 @@ MemberConfig::MemberConfig(const BSONObj& mcfg, ReplSetTagConfig* tagConfig) {
for (auto&& tag : tagsElement.Obj()) {
if (tag.type() != String) {
uasserted(ErrorCodes::TypeMismatch,
- str::stream() << "tags." << tag.fieldName()
- << " field has non-string value of type "
- << typeName(tag.type()));
+ str::stream()
+ << "tags." << tag.fieldName()
+ << " field has non-string value of type " << typeName(tag.type()));
}
_tags.push_back(tagConfig->makeTag(tag.fieldNameStringData(), tag.valueStringData()));
}
@@ -240,9 +240,9 @@ Status MemberConfig::validate() const {
}
if (_slaveDelay < Seconds(0) || _slaveDelay > kMaxSlaveDelay) {
return Status(ErrorCodes::BadValue,
- str::stream() << kSlaveDelayFieldName << " field value of "
- << durationCount<Seconds>(_slaveDelay)
- << " seconds is out of range");
+ str::stream()
+ << kSlaveDelayFieldName << " field value of "
+ << durationCount<Seconds>(_slaveDelay) << " seconds is out of range");
}
// Check for additional electable requirements, when priority is non zero
if (_priority != 0) {
diff --git a/src/mongo/db/repl/member_config_test.cpp b/src/mongo/db/repl/member_config_test.cpp
index 6176d230463..cf84b37ccdc 100644
--- a/src/mongo/db/repl/member_config_test.cpp
+++ b/src/mongo/db/repl/member_config_test.cpp
@@ -60,8 +60,7 @@ TEST(MemberConfig, ParseFailsWithIllegalFieldName) {
ReplSetTagConfig tagConfig;
ASSERT_THROWS(MemberConfig(BSON("_id" << 0 << "host"
<< "localhost"
- << "frim"
- << 1),
+ << "frim" << 1),
&tagConfig),
ExceptionFor<ErrorCodes::BadValue>);
}
@@ -133,8 +132,7 @@ TEST(MemberConfig, ParseArbiterOnly) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "arbiterOnly"
- << 1.0),
+ << "arbiterOnly" << 1.0),
&tagConfig);
ASSERT_TRUE(mc.isArbiter());
ASSERT_EQUALS(0.0, mc.getPriority());
@@ -142,8 +140,7 @@ TEST(MemberConfig, ParseArbiterOnly) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "arbiterOnly"
- << false),
+ << "arbiterOnly" << false),
&tagConfig);
ASSERT_TRUE(!mc.isArbiter());
ASSERT_EQUALS(1.0, mc.getPriority());
@@ -155,16 +152,14 @@ TEST(MemberConfig, ParseHidden) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "hidden"
- << 1.0),
+ << "hidden" << 1.0),
&tagConfig);
ASSERT_TRUE(mc.isHidden());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "hidden"
- << false),
+ << "hidden" << false),
&tagConfig);
ASSERT_TRUE(!mc.isHidden());
}
@@ -181,16 +176,14 @@ TEST(MemberConfig, ParseBuildIndexes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "buildIndexes"
- << 1.0),
+ << "buildIndexes" << 1.0),
&tagConfig);
ASSERT_TRUE(mc.shouldBuildIndexes());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "buildIndexes"
- << false),
+ << "buildIndexes" << false),
&tagConfig);
ASSERT_TRUE(!mc.shouldBuildIndexes());
}
@@ -201,18 +194,14 @@ TEST(MemberConfig, ParseVotes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1.0),
+ << "votes" << 1.0),
&tagConfig);
ASSERT_TRUE(mc.isVoter());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0
- << "priority"
- << 0),
+ << "votes" << 0 << "priority" << 0),
&tagConfig);
ASSERT_FALSE(mc.isVoter());
}
@@ -220,38 +209,33 @@ TEST(MemberConfig, ParseVotes) {
// For backwards compatibility, truncate 1.X to 1, and 0.X to 0 (and -0.X to 0).
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1.5),
+ << "votes" << 1.5),
&tagConfig);
ASSERT_TRUE(mc.isVoter());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0.5),
+ << "votes" << 0.5),
&tagConfig);
ASSERT_FALSE(mc.isVoter());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << -0.5),
+ << "votes" << -0.5),
&tagConfig);
ASSERT_FALSE(mc.isVoter());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 2),
+ << "votes" << 2),
&tagConfig);
}
ASSERT_THROWS(MemberConfig(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << Date_t::fromMillisSinceEpoch(2)),
+ << "votes" << Date_t::fromMillisSinceEpoch(2)),
&tagConfig),
ExceptionFor<ErrorCodes::TypeMismatch>);
}
@@ -261,31 +245,27 @@ TEST(MemberConfig, ParsePriority) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1),
+ << "priority" << 1),
&tagConfig);
ASSERT_EQUALS(1.0, mc.getPriority());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0),
+ << "priority" << 0),
&tagConfig);
ASSERT_EQUALS(0.0, mc.getPriority());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 100.8),
+ << "priority" << 100.8),
&tagConfig);
ASSERT_EQUALS(100.8, mc.getPriority());
}
ASSERT_THROWS(MemberConfig(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << Date_t::fromMillisSinceEpoch(2)),
+ << "priority" << Date_t::fromMillisSinceEpoch(2)),
&tagConfig),
ExceptionFor<ErrorCodes::TypeMismatch>);
}
@@ -294,8 +274,7 @@ TEST(MemberConfig, ParseSlaveDelay) {
ReplSetTagConfig tagConfig;
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "slaveDelay"
- << 100),
+ << "slaveDelay" << 100),
&tagConfig);
ASSERT_EQUALS(Seconds(100), mc.getSlaveDelay());
}
@@ -365,14 +344,13 @@ TEST(MemberConfig, DuplicateHorizonNames) {
ASSERT_NOT_EQUALS(s.reason().find("Duplicate horizon name found"), std::string::npos);
}
try {
- MemberConfig(BSON("_id" << 0 << "host"
- << "h"
- << "horizons"
- << BSON("someUniqueHorizonName"
- << "a.host:43"
- << SplitHorizon::kDefaultHorizon
- << "b.host:256")),
- &tagConfig);
+ MemberConfig(
+ BSON("_id" << 0 << "host"
+ << "h"
+ << "horizons"
+ << BSON("someUniqueHorizonName"
+ << "a.host:43" << SplitHorizon::kDefaultHorizon << "b.host:256")),
+ &tagConfig);
ASSERT_TRUE(false); // Should not succeed.
} catch (const ExceptionFor<ErrorCodes::BadValue>& ex) {
const Status& s = ex.toStatus();
@@ -489,8 +467,7 @@ TEST(MemberConfig, ValidateVotes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1.0),
+ << "votes" << 1.0),
&tagConfig);
ASSERT_OK(mc.validate());
ASSERT_TRUE(mc.isVoter());
@@ -498,10 +475,7 @@ TEST(MemberConfig, ValidateVotes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0
- << "priority"
- << 0),
+ << "votes" << 0 << "priority" << 0),
&tagConfig);
ASSERT_OK(mc.validate());
ASSERT_FALSE(mc.isVoter());
@@ -510,8 +484,7 @@ TEST(MemberConfig, ValidateVotes) {
// For backwards compatibility, truncate 1.X to 1, and 0.X to 0 (and -0.X to 0).
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1.5),
+ << "votes" << 1.5),
&tagConfig);
ASSERT_OK(mc.validate());
ASSERT_TRUE(mc.isVoter());
@@ -519,10 +492,7 @@ TEST(MemberConfig, ValidateVotes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0.5
- << "priority"
- << 0),
+ << "votes" << 0.5 << "priority" << 0),
&tagConfig);
ASSERT_OK(mc.validate());
ASSERT_FALSE(mc.isVoter());
@@ -530,10 +500,7 @@ TEST(MemberConfig, ValidateVotes) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << -0.5
- << "priority"
- << 0),
+ << "votes" << -0.5 << "priority" << 0),
&tagConfig);
ASSERT_OK(mc.validate());
ASSERT_FALSE(mc.isVoter());
@@ -542,16 +509,14 @@ TEST(MemberConfig, ValidateVotes) {
// Invalid values
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 2),
+ << "votes" << 2),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << -1),
+ << "votes" << -1),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -562,32 +527,28 @@ TEST(MemberConfig, ValidatePriorityRanges) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0),
+ << "priority" << 0),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1000),
+ << "priority" << 1000),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << -1),
+ << "priority" << -1),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1001),
+ << "priority" << 1001),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -598,40 +559,28 @@ TEST(MemberConfig, ValidateSlaveDelays) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0
- << "slaveDelay"
- << 0),
+ << "priority" << 0 << "slaveDelay" << 0),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0
- << "slaveDelay"
- << 3600 * 10),
+ << "priority" << 0 << "slaveDelay" << 3600 * 10),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0
- << "slaveDelay"
- << -1),
+ << "priority" << 0 << "slaveDelay" << -1),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 0
- << "slaveDelay"
- << 3600 * 24 * 400),
+ << "priority" << 0 << "slaveDelay" << 3600 * 24 * 400),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -641,10 +590,7 @@ TEST(MemberConfig, ValidatePriorityAndSlaveDelayRelationship) {
ReplSetTagConfig tagConfig;
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1
- << "slaveDelay"
- << 60),
+ << "priority" << 1 << "slaveDelay" << 60),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -654,20 +600,14 @@ TEST(MemberConfig, ValidatePriorityAndHiddenRelationship) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1
- << "hidden"
- << true),
+ << "priority" << 1 << "hidden" << true),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1
- << "hidden"
- << false),
+ << "priority" << 1 << "hidden" << false),
&tagConfig);
ASSERT_OK(mc.validate());
}
@@ -678,10 +618,7 @@ TEST(MemberConfig, ValidatePriorityAndBuildIndexesRelationship) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1
- << "buildIndexes"
- << false),
+ << "priority" << 1 << "buildIndexes" << false),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
@@ -689,10 +626,7 @@ TEST(MemberConfig, ValidatePriorityAndBuildIndexesRelationship) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "priority"
- << 1
- << "buildIndexes"
- << true),
+ << "priority" << 1 << "buildIndexes" << true),
&tagConfig);
ASSERT_OK(mc.validate());
}
@@ -703,42 +637,28 @@ TEST(MemberConfig, ValidateArbiterVotesRelationship) {
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1
- << "arbiterOnly"
- << true),
+ << "votes" << 1 << "arbiterOnly" << true),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0
- << "priority"
- << 0
- << "arbiterOnly"
- << false),
+ << "votes" << 0 << "priority" << 0 << "arbiterOnly" << false),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 1
- << "arbiterOnly"
- << false),
+ << "votes" << 1 << "arbiterOnly" << false),
&tagConfig);
ASSERT_OK(mc.validate());
}
{
MemberConfig mc(BSON("_id" << 0 << "host"
<< "h"
- << "votes"
- << 0
- << "arbiterOnly"
- << true),
+ << "votes" << 0 << "arbiterOnly" << true),
&tagConfig);
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
diff --git a/src/mongo/db/repl/member_data.cpp b/src/mongo/db/repl/member_data.cpp
index 40a081ba6a2..247167bc150 100644
--- a/src/mongo/db/repl/member_data.cpp
+++ b/src/mongo/db/repl/member_data.cpp
@@ -141,8 +141,9 @@ void MemberData::setLastDurableOpTimeAndWallTime(OpTimeAndWallTime opTime, Date_
// TODO(russotto): We think this should never happen, rollback or no rollback. Make this an
// invariant and see what happens.
log() << "Durable progress (" << opTime.opTime << ") is ahead of the applied progress ("
- << _lastAppliedOpTime << ". This is likely due to a "
- "rollback."
+ << _lastAppliedOpTime
+ << ". This is likely due to a "
+ "rollback."
<< " memberid: " << _memberId << _hostAndPort.toString()
<< " previous durable progress: " << _lastDurableOpTime;
} else {
diff --git a/src/mongo/db/repl/mock_repl_coord_server_fixture.h b/src/mongo/db/repl/mock_repl_coord_server_fixture.h
index 9bac2e16d74..7f52f4a3f21 100644
--- a/src/mongo/db/repl/mock_repl_coord_server_fixture.h
+++ b/src/mongo/db/repl/mock_repl_coord_server_fixture.h
@@ -39,7 +39,7 @@ class OperationContext;
namespace repl {
class OplogEntry;
class StorageInterfaceMock;
-}
+} // namespace repl
/**
* This is a basic fixture that is backed by an ephemeral storage engine and a mock replication
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index d9fc1390de3..35719c01913 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -409,7 +409,7 @@ OplogDocWriter _logOpWriter(OperationContext* opCtx,
return OplogDocWriter(OplogDocWriter(b.obj(), obj));
}
-} // end anon namespace
+} // namespace
/* we write to local.oplog.rs:
{ ts : ..., h: ..., v: ..., op: ..., etc }
@@ -429,8 +429,8 @@ OplogDocWriter _logOpWriter(OperationContext* opCtx,
* writers - an array with size nDocs of DocWriter objects.
* timestamps - an array with size nDocs of respective Timestamp objects for each DocWriter.
* oplogCollection - collection to be written to.
- * finalOpTime - the OpTime of the last DocWriter object.
- * wallTime - the wall clock time of the corresponding oplog entry.
+ * finalOpTime - the OpTime of the last DocWriter object.
+ * wallTime - the wall clock time of the corresponding oplog entry.
*/
void _logOpsInner(OperationContext* opCtx,
const NamespaceString& nss,
@@ -459,8 +459,7 @@ void _logOpsInner(OperationContext* opCtx,
// are logging within one WriteUnitOfWork.
invariant(finalOpTime.getTimestamp() <= *commitTime,
str::stream() << "Final OpTime: " << finalOpTime.toString()
- << ". Commit Time: "
- << commitTime->toString());
+ << ". Commit Time: " << commitTime->toString());
}
// Optionally hang before advancing lastApplied.
@@ -495,12 +494,8 @@ OpTime logOp(OperationContext* opCtx,
// All collections should have UUIDs now, so all insert, update, and delete oplog entries should
// also have uuids. Some no-op (n) and command (c) entries may still elide the uuid field.
invariant(uuid || 'n' == *opstr || 'c' == *opstr,
- str::stream() << "Expected uuid for logOp with opstr: " << opstr << ", nss: "
- << nss.ns()
- << ", obj: "
- << obj
- << ", os: "
- << o2);
+ str::stream() << "Expected uuid for logOp with opstr: " << opstr
+ << ", nss: " << nss.ns() << ", obj: " << obj << ", os: " << o2);
auto replCoord = ReplicationCoordinator::get(opCtx);
// For commands, the test below is on the command ns and therefore does not check for
@@ -629,7 +624,7 @@ std::vector<OpTime> logInsertOps(OperationContext* opCtx,
sleepmillis(numMillis);
}
- std::unique_ptr<DocWriter const* []> basePtrs(new DocWriter const*[count]);
+ std::unique_ptr<DocWriter const*[]> basePtrs(new DocWriter const*[count]);
for (size_t i = 0; i < count; i++) {
basePtrs[i] = &writers[i];
}
@@ -656,7 +651,7 @@ long long getNewOplogSizeBytes(OperationContext* opCtx, const ReplSettings& repl
LOG(3) << "32bit system; choosing " << sz << " bytes oplog";
return sz;
}
-// First choose a minimum size.
+ // First choose a minimum size.
#if defined(__APPLE__)
// typically these are desktops (dev machines), so keep it smallish
@@ -786,8 +781,7 @@ std::pair<OptionalCollectionUUID, NamespaceString> parseCollModUUIDAndNss(Operat
const auto nsByUUID = catalog.lookupNSSByUUID(uuid);
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to apply operation due to missing collection (" << uuid
- << "): "
- << redact(cmd.toString()),
+ << "): " << redact(cmd.toString()),
nsByUUID);
return std::pair<OptionalCollectionUUID, NamespaceString>(uuid, *nsByUUID);
}
@@ -1342,8 +1336,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
collection = catalog.lookupCollectionByUUID(uuid);
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to apply operation due to missing collection (" << uuid
- << "): "
- << redact(op.toString()),
+ << "): " << redact(op.toString()),
collection);
requestNss = collection->ns();
dassert(opCtx->lockState()->isCollectionLockedForMode(
diff --git a/src/mongo/db/repl/oplog_buffer_collection.cpp b/src/mongo/db/repl/oplog_buffer_collection.cpp
index 39c5ab03036..cfea973d17d 100644
--- a/src/mongo/db/repl/oplog_buffer_collection.cpp
+++ b/src/mongo/db/repl/oplog_buffer_collection.cpp
@@ -66,17 +66,16 @@ std::tuple<BSONObj, Timestamp, std::size_t> OplogBufferCollection::addIdToDocume
const BSONObj& orig, const Timestamp& lastTimestamp, std::size_t sentinelCount) {
if (orig.isEmpty()) {
return std::make_tuple(
- BSON(kIdFieldName << BSON(
- kTimestampFieldName << lastTimestamp << kSentinelFieldName
- << static_cast<long long>(sentinelCount + 1))),
+ BSON(kIdFieldName << BSON(kTimestampFieldName
+ << lastTimestamp << kSentinelFieldName
+ << static_cast<long long>(sentinelCount + 1))),
lastTimestamp,
sentinelCount + 1);
}
const auto ts = orig[kTimestampFieldName].timestamp();
invariant(!ts.isNull());
auto doc = BSON(kIdFieldName << BSON(kTimestampFieldName << ts << kSentinelFieldName << 0)
- << kOplogEntryFieldName
- << orig);
+ << kOplogEntryFieldName << orig);
return std::make_tuple(doc, ts, 0);
}
diff --git a/src/mongo/db/repl/oplog_buffer_collection_test.cpp b/src/mongo/db/repl/oplog_buffer_collection_test.cpp
index 0d544104fc6..64ee03f9d0f 100644
--- a/src/mongo/db/repl/oplog_buffer_collection_test.cpp
+++ b/src/mongo/db/repl/oplog_buffer_collection_test.cpp
@@ -111,12 +111,9 @@ NamespaceString makeNamespace(const T& t, const char* suffix = "") {
BSONObj makeOplogEntry(int t) {
return BSON("ts" << Timestamp(t, t) << "ns"
<< "a.a"
- << "v"
- << 2
- << "op"
+ << "v" << 2 << "op"
<< "i"
- << "o"
- << BSON("_id" << t << "a" << t));
+ << "o" << BSON("_id" << t << "a" << t));
}
TEST_F(OplogBufferCollectionTest, DefaultNamespace) {
@@ -623,7 +620,9 @@ TEST_F(OplogBufferCollectionTest, PopAndPeekReturnDocumentsInOrder) {
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- makeOplogEntry(1), makeOplogEntry(2), makeOplogEntry(3),
+ makeOplogEntry(1),
+ makeOplogEntry(2),
+ makeOplogEntry(3),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.begin(), oplog.end());
@@ -666,7 +665,9 @@ TEST_F(OplogBufferCollectionTest, LastObjectPushedReturnsNewestOplogEntry) {
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- makeOplogEntry(1), makeOplogEntry(2), makeOplogEntry(3),
+ makeOplogEntry(1),
+ makeOplogEntry(2),
+ makeOplogEntry(3),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.begin(), oplog.end());
@@ -702,7 +703,9 @@ TEST_F(OplogBufferCollectionTest,
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- makeOplogEntry(3), makeOplogEntry(4), makeOplogEntry(5),
+ makeOplogEntry(3),
+ makeOplogEntry(4),
+ makeOplogEntry(5),
};
ASSERT_BSONOBJ_EQ(*oplogBuffer.lastObjectPushed(_opCtx.get()), secondDoc);
@@ -929,7 +932,12 @@ void _testPushSentinelsProperly(
OplogBufferCollection oplogBuffer(storageInterface, nss);
oplogBuffer.startup(opCtx);
const std::vector<BSONObj> oplog = {
- BSONObj(), makeOplogEntry(1), BSONObj(), BSONObj(), makeOplogEntry(2), BSONObj(),
+ BSONObj(),
+ makeOplogEntry(1),
+ BSONObj(),
+ BSONObj(),
+ makeOplogEntry(2),
+ BSONObj(),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
pushDocsFn(opCtx, &oplogBuffer, oplog);
@@ -1016,7 +1024,8 @@ DEATH_TEST_F(
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- makeOplogEntry(2), makeOplogEntry(1),
+ makeOplogEntry(2),
+ makeOplogEntry(1),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.begin(), oplog.end());
@@ -1028,7 +1037,10 @@ TEST_F(OplogBufferCollectionTest, SentinelInMiddleIsReturnedInOrder) {
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- makeOplogEntry(1), makeOplogEntry(2), BSONObj(), makeOplogEntry(3),
+ makeOplogEntry(1),
+ makeOplogEntry(2),
+ BSONObj(),
+ makeOplogEntry(3),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[0]);
@@ -1149,7 +1161,12 @@ TEST_F(OplogBufferCollectionTest, MultipleSentinelsAreReturnedInOrder) {
oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
- BSONObj(), makeOplogEntry(1), BSONObj(), BSONObj(), makeOplogEntry(2), BSONObj(),
+ BSONObj(),
+ makeOplogEntry(1),
+ BSONObj(),
+ BSONObj(),
+ makeOplogEntry(2),
+ BSONObj(),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.cbegin(), oplog.cend());
diff --git a/src/mongo/db/repl/oplog_entry.cpp b/src/mongo/db/repl/oplog_entry.cpp
index 6d549dd1965..38a71114cf1 100644
--- a/src/mongo/db/repl/oplog_entry.cpp
+++ b/src/mongo/db/repl/oplog_entry.cpp
@@ -81,8 +81,7 @@ OplogEntry::CommandType parseCommandType(const BSONObj& objectField) {
} else {
uasserted(ErrorCodes::BadValue,
str::stream() << "Unknown oplog entry command type: " << commandString
- << " Object field: "
- << redact(objectField));
+ << " Object field: " << redact(objectField));
}
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp
index 9c4df1a1bec..3162319ab2b 100644
--- a/src/mongo/db/repl/oplog_fetcher.cpp
+++ b/src/mongo/db/repl/oplog_fetcher.cpp
@@ -161,11 +161,10 @@ Status checkRemoteOplogStart(const Fetcher::Documents& documents,
// sync source is now behind us, choose a new sync source to prevent going into rollback.
if (remoteLastOpApplied && (*remoteLastOpApplied < lastFetched)) {
return Status(ErrorCodes::InvalidSyncSource,
- str::stream() << "Sync source's last applied OpTime "
- << remoteLastOpApplied->toString()
- << " is older than our last fetched OpTime "
- << lastFetched.toString()
- << ". Choosing new sync source.");
+ str::stream()
+ << "Sync source's last applied OpTime " << remoteLastOpApplied->toString()
+ << " is older than our last fetched OpTime " << lastFetched.toString()
+ << ". Choosing new sync source.");
}
// If 'requireFresherSyncSource' is true, we must check that the sync source's
@@ -181,8 +180,7 @@ Status checkRemoteOplogStart(const Fetcher::Documents& documents,
return Status(ErrorCodes::InvalidSyncSource,
str::stream()
<< "Sync source must be ahead of me. My last fetched oplog optime: "
- << lastFetched.toString()
- << ", latest oplog optime of sync source: "
+ << lastFetched.toString() << ", latest oplog optime of sync source: "
<< remoteLastOpApplied->toString());
}
@@ -202,9 +200,7 @@ Status checkRemoteOplogStart(const Fetcher::Documents& documents,
return Status(ErrorCodes::InvalidBSON,
str::stream() << "our last optime fetched: " << lastFetched.toString()
<< ". failed to parse optime from first oplog on source: "
- << o.toString()
- << ": "
- << opTimeResult.getStatus().toString());
+ << o.toString() << ": " << opTimeResult.getStatus().toString());
}
auto opTime = opTimeResult.getValue();
if (opTime != lastFetched) {
@@ -289,15 +285,9 @@ StatusWith<OplogFetcher::DocumentsInfo> OplogFetcher::validateDocuments(
if (lastTS >= docTS) {
return Status(ErrorCodes::OplogOutOfOrder,
str::stream() << "Out of order entries in oplog. lastTS: "
- << lastTS.toString()
- << " outOfOrderTS:"
- << docTS.toString()
- << " in batch with "
- << info.networkDocumentCount
- << "docs; first-batch:"
- << first
- << ", doc:"
- << doc);
+ << lastTS.toString() << " outOfOrderTS:" << docTS.toString()
+ << " in batch with " << info.networkDocumentCount
+ << "docs; first-batch:" << first << ", doc:" << doc);
}
lastTS = docTS;
}
diff --git a/src/mongo/db/repl/oplog_interface_mock.cpp b/src/mongo/db/repl/oplog_interface_mock.cpp
index 6352fa7566a..95930bf6d62 100644
--- a/src/mongo/db/repl/oplog_interface_mock.cpp
+++ b/src/mongo/db/repl/oplog_interface_mock.cpp
@@ -90,8 +90,7 @@ public:
str::stream()
<< "oplog no longer contains the complete write history of this "
"transaction, log with opTime "
- << _nextOpTime.toBSON()
- << " cannot be found");
+ << _nextOpTime.toBSON() << " cannot be found");
}
// We shouldn't get any other error.
MONGO_UNREACHABLE;
diff --git a/src/mongo/db/repl/oplog_test.cpp b/src/mongo/db/repl/oplog_test.cpp
index b95f8d2d4fc..00f76f96c4d 100644
--- a/src/mongo/db/repl/oplog_test.cpp
+++ b/src/mongo/db/repl/oplog_test.cpp
@@ -124,9 +124,9 @@ TEST_F(OplogTest, LogOpReturnsOpTimeOnSuccessfulInsertIntoOplogCollection) {
<< "OpTime returned from logOp() did not match that in the oplog entry written to the "
"oplog: "
<< oplogEntry.toBSON();
- ASSERT(OpTypeEnum::kNoop == oplogEntry.getOpType()) << "Expected 'n' op type but found '"
- << OpType_serializer(oplogEntry.getOpType())
- << "' instead: " << oplogEntry.toBSON();
+ ASSERT(OpTypeEnum::kNoop == oplogEntry.getOpType())
+ << "Expected 'n' op type but found '" << OpType_serializer(oplogEntry.getOpType())
+ << "' instead: " << oplogEntry.toBSON();
ASSERT_BSONOBJ_EQ(msgObj, oplogEntry.getObject());
// Ensure that the msg optime returned is the same as the last optime in the ReplClientInfo.
diff --git a/src/mongo/db/repl/optime_extract_test.cpp b/src/mongo/db/repl/optime_extract_test.cpp
index d1e2b0d7e49..9192738a31c 100644
--- a/src/mongo/db/repl/optime_extract_test.cpp
+++ b/src/mongo/db/repl/optime_extract_test.cpp
@@ -51,8 +51,7 @@ TEST(ExtractBSON, ExtractOpTimeField) {
// Missing timestamp field.
obj = BSON("a" << BSON("ts"
<< "notATimestamp"
- << "t"
- << 2));
+ << "t" << 2));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, bsonExtractOpTimeField(obj, "a", &opTime));
// Wrong typed timestamp field.
obj = BSON("a" << BSON("t" << 2));
diff --git a/src/mongo/db/repl/read_concern_args.cpp b/src/mongo/db/repl/read_concern_args.cpp
index 89545a0fb4a..5ec5ae968c1 100644
--- a/src/mongo/db/repl/read_concern_args.cpp
+++ b/src/mongo/db/repl/read_concern_args.cpp
@@ -202,23 +202,20 @@ Status ReadConcernArgs::initialize(const BSONElement& readConcernElem) {
} else {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "Unrecognized option in " << kReadConcernFieldName
- << ": "
- << fieldName);
+ << ": " << fieldName);
}
}
if (_afterClusterTime && _opTime) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "Can not specify both " << kAfterClusterTimeFieldName
- << " and "
- << kAfterOpTimeFieldName);
+ << " and " << kAfterOpTimeFieldName);
}
if (_afterClusterTime && _atClusterTime) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "Can not specify both " << kAfterClusterTimeFieldName
- << " and "
- << kAtClusterTimeFieldName);
+ << " and " << kAtClusterTimeFieldName);
}
// Note: 'available' should not be used with after cluster time, as cluster time can wait for
@@ -228,30 +225,24 @@ Status ReadConcernArgs::initialize(const BSONElement& readConcernElem) {
getLevel() != ReadConcernLevel::kLocalReadConcern &&
getLevel() != ReadConcernLevel::kSnapshotReadConcern) {
return Status(ErrorCodes::InvalidOptions,
- str::stream() << kAfterClusterTimeFieldName << " field can be set only if "
- << kLevelFieldName
- << " is equal to "
- << kMajorityReadConcernStr
- << ", "
- << kLocalReadConcernStr
- << ", or "
- << kSnapshotReadConcernStr);
+ str::stream()
+ << kAfterClusterTimeFieldName << " field can be set only if "
+ << kLevelFieldName << " is equal to " << kMajorityReadConcernStr << ", "
+ << kLocalReadConcernStr << ", or " << kSnapshotReadConcernStr);
}
if (_opTime && getLevel() == ReadConcernLevel::kSnapshotReadConcern) {
return Status(ErrorCodes::InvalidOptions,
- str::stream() << kAfterOpTimeFieldName << " field cannot be set if "
- << kLevelFieldName
- << " is equal to "
- << kSnapshotReadConcernStr);
+ str::stream()
+ << kAfterOpTimeFieldName << " field cannot be set if " << kLevelFieldName
+ << " is equal to " << kSnapshotReadConcernStr);
}
if (_atClusterTime && getLevel() != ReadConcernLevel::kSnapshotReadConcern) {
return Status(ErrorCodes::InvalidOptions,
- str::stream() << kAtClusterTimeFieldName << " field can be set only if "
- << kLevelFieldName
- << " is equal to "
- << kSnapshotReadConcernStr);
+ str::stream()
+ << kAtClusterTimeFieldName << " field can be set only if "
+ << kLevelFieldName << " is equal to " << kSnapshotReadConcernStr);
}
if (_afterClusterTime && _afterClusterTime == LogicalTime::kUninitialized) {
@@ -294,8 +285,7 @@ Status ReadConcernArgs::upconvertReadConcernLevelToSnapshot() {
if (_opTime) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "Cannot upconvert the readConcern level to 'snapshot' when '"
- << kAfterOpTimeFieldName
- << "' is provided");
+ << kAfterOpTimeFieldName << "' is provided");
}
_originalLevel = _level;
diff --git a/src/mongo/db/repl/read_concern_args_test.cpp b/src/mongo/db/repl/read_concern_args_test.cpp
index ed6ec48875c..d6907a31f26 100644
--- a/src/mongo/db/repl/read_concern_args_test.cpp
+++ b/src/mongo/db/repl/read_concern_args_test.cpp
@@ -39,13 +39,12 @@ namespace {
TEST(ReadAfterParse, OpTimeOnly) {
ReadConcernArgs readConcern;
- ASSERT_OK(readConcern.initialize(BSON(
- "find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
- << 2)))));
+ ASSERT_OK(readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(20, 30)
+ << OpTime::kTermFieldName << 2)))));
ASSERT_TRUE(readConcern.getArgsOpTime());
ASSERT_TRUE(!readConcern.getArgsAfterClusterTime());
@@ -59,8 +58,7 @@ TEST(ReadAfterParse, AfterClusterTimeOnly) {
ReadConcernArgs readConcern;
auto afterClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterClusterTimeFieldName
<< afterClusterTime.asTimestamp()))));
auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
@@ -73,13 +71,12 @@ TEST(ReadAfterParse, AfterClusterTimeAndLevelLocal) {
ReadConcernArgs readConcern;
// Must have level=majority
auto afterClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
- << afterClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "local"))));
+ ASSERT_OK(
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "local"))));
auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
ASSERT_TRUE(argsAfterClusterTime);
ASSERT_TRUE(!readConcern.getArgsOpTime());
@@ -91,13 +88,12 @@ TEST(ReadAfterParse, AfterClusterTimeAndLevelMajority) {
ReadConcernArgs readConcern;
// Must have level=majority
auto afterClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
- << afterClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "majority"))));
+ ASSERT_OK(
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "majority"))));
auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
ASSERT_TRUE(argsAfterClusterTime);
ASSERT_TRUE(!readConcern.getArgsOpTime());
@@ -108,13 +104,12 @@ TEST(ReadAfterParse, AfterClusterTimeAndLevelMajority) {
TEST(ReadAfterParse, AfterClusterTimeAndLevelSnapshot) {
ReadConcernArgs readConcern;
auto afterClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
- << afterClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "snapshot"))));
auto argsAfterClusterTime = readConcern.getArgsAfterClusterTime();
ASSERT_TRUE(argsAfterClusterTime);
ASSERT_TRUE(!readConcern.getArgsOpTime());
@@ -127,8 +122,7 @@ TEST(ReadAfterParse, AtClusterTimeOnly) {
auto atClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_EQ(ErrorCodes::InvalidOptions,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAtClusterTimeFieldName
<< atClusterTime.asTimestamp()))));
}
@@ -136,13 +130,12 @@ TEST(ReadAfterParse, AtClusterTimeOnly) {
TEST(ReadAfterParse, AtClusterTimeAndLevelSnapshot) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << atClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "snapshot"))));
auto argsAtClusterTime = readConcern.getArgsAtClusterTime();
ASSERT_TRUE(argsAtClusterTime);
ASSERT_FALSE(readConcern.getArgsOpTime());
@@ -153,40 +146,37 @@ TEST(ReadAfterParse, AtClusterTimeAndLevelSnapshot) {
TEST(ReadAfterParse, AtClusterTimeAndLevelMajority) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << atClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "majority"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "majority"))));
}
TEST(ReadAfterParse, AtClusterTimeAndLevelLocal) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << atClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "local"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "local"))));
}
TEST(ReadAfterParse, AtClusterTimeAndLevelAvailable) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << atClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "available"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "available"))));
}
TEST(ReadAfterParse, AtClusterTimeAndLevelLinearizable) {
@@ -194,8 +184,7 @@ TEST(ReadAfterParse, AtClusterTimeAndLevelLinearizable) {
auto atClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_EQ(ErrorCodes::InvalidOptions,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAtClusterTimeFieldName
<< atClusterTime.asTimestamp()
<< ReadConcernArgs::kLevelFieldName
@@ -206,8 +195,7 @@ TEST(ReadAfterParse, LevelMajorityOnly) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "majority"))));
ASSERT_TRUE(!readConcern.getArgsOpTime());
@@ -219,8 +207,7 @@ TEST(ReadAfterParse, LevelSnapshotOnly) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "snapshot"))));
ASSERT_TRUE(!readConcern.getArgsOpTime());
@@ -234,15 +221,12 @@ TEST(ReadAfterParse, ReadCommittedFullSpecification) {
auto afterClusterTime = LogicalTime(Timestamp(100, 200));
ASSERT_NOT_OK(readConcern.initialize(BSON(
"find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName
<< BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
<< 2)
- << ReadConcernArgs::kAfterClusterTimeFieldName
- << afterClusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "majority"))));
+ << ReadConcernArgs::kAfterClusterTimeFieldName << afterClusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "majority"))));
}
TEST(ReadAfterParse, Empty) {
@@ -257,58 +241,51 @@ TEST(ReadAfterParse, Empty) {
TEST(ReadAfterParse, BadRootType) {
ReadConcernArgs readConcern;
- ASSERT_NOT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << "x")));
+ ASSERT_NOT_OK(
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName << "x")));
}
TEST(ReadAfterParse, BadAtClusterTimeType) {
ReadConcernArgs readConcern;
ASSERT_EQ(ErrorCodes::TypeMismatch,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << 2
- << ReadConcernArgs::kLevelFieldName
+ << 2 << ReadConcernArgs::kLevelFieldName
<< "snapshot"))));
}
TEST(ReadAfterParse, BadAtClusterTimeValue) {
ReadConcernArgs readConcern;
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << LogicalTime::kUninitialized.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << LogicalTime::kUninitialized.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "snapshot"))));
}
TEST(ReadAfterParse, BadOpTimeType) {
ReadConcernArgs readConcern;
ASSERT_NOT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName << 2))));
}
TEST(ReadAfterParse, OpTimeNotNeededForValidReadConcern) {
ReadConcernArgs readConcern;
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSONObj())));
}
TEST(ReadAfterParse, NoOpTimeTS) {
ReadConcernArgs readConcern;
ASSERT_NOT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName
<< BSON(OpTime::kTimestampFieldName << 2)))));
}
@@ -316,40 +293,36 @@ TEST(ReadAfterParse, NoOpTimeTS) {
TEST(ReadAfterParse, NoOpTimeTerm) {
ReadConcernArgs readConcern;
ASSERT_NOT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName
<< BSON(OpTime::kTermFieldName << 2)))));
}
TEST(ReadAfterParse, BadOpTimeTSType) {
ReadConcernArgs readConcern;
- ASSERT_NOT_OK(readConcern.initialize(
- BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << BSON("x" << 1) << OpTime::kTermFieldName
- << 2)))));
+ ASSERT_NOT_OK(readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << BSON("x" << 1)
+ << OpTime::kTermFieldName << 2)))));
}
TEST(ReadAfterParse, BadOpTimeTermType) {
ReadConcernArgs readConcern;
- ASSERT_NOT_OK(readConcern.initialize(BSON(
- "find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << Timestamp(1, 0) << OpTime::kTermFieldName
- << "y")))));
+ ASSERT_NOT_OK(readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(1, 0)
+ << OpTime::kTermFieldName << "y")))));
}
TEST(ReadAfterParse, BadLevelType) {
ReadConcernArgs readConcern;
ASSERT_EQ(ErrorCodes::TypeMismatch,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << 7))));
}
@@ -357,8 +330,7 @@ TEST(ReadAfterParse, BadLevelValue) {
ReadConcernArgs readConcern;
ASSERT_EQ(ErrorCodes::FailedToParse,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName
<< "seven is not a real level"))));
}
@@ -367,39 +339,35 @@ TEST(ReadAfterParse, BadOption) {
ReadConcernArgs readConcern;
ASSERT_EQ(ErrorCodes::InvalidOptions,
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON("asdf" << 1))));
}
TEST(ReadAfterParse, AtClusterTimeAndAfterClusterTime) {
ReadConcernArgs readConcern;
auto clusterTime = LogicalTime(Timestamp(20, 30));
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAtClusterTimeFieldName
- << clusterTime.asTimestamp()
- << ReadConcernArgs::kAfterClusterTimeFieldName
- << clusterTime.asTimestamp()
- << ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAtClusterTimeFieldName
+ << clusterTime.asTimestamp()
+ << ReadConcernArgs::kAfterClusterTimeFieldName
+ << clusterTime.asTimestamp()
+ << ReadConcernArgs::kLevelFieldName << "snapshot"))));
}
TEST(ReadAfterParse, AfterOpTimeAndLevelSnapshot) {
ReadConcernArgs readConcern;
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName
- << Timestamp(20, 30)
- << OpTime::kTermFieldName
- << 2)
- << ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(20, 30) << OpTime::kTermFieldName
+ << 2)
+ << ReadConcernArgs::kLevelFieldName << "snapshot"))));
}
TEST(ReadAfterSerialize, Empty) {
@@ -430,10 +398,10 @@ TEST(ReadAfterSerialize, AfterOpTimeOnly) {
ReadConcernArgs readConcern(OpTime(Timestamp(20, 30), 2), boost::none);
readConcern.appendInfo(&builder);
- BSONObj expectedObj(BSON(
- ReadConcernArgs::kReadConcernFieldName << BSON(
- ReadConcernArgs::kAfterOpTimeFieldName << BSON(
- OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
+ BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName << BSON(
+ OpTime::kTimestampFieldName
+ << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
@@ -455,11 +423,10 @@ TEST(ReadAfterSerialize, iAfterCLusterTimeAndLevel) {
ReadConcernArgs readConcern(afterClusterTime, ReadConcernLevel::kMajorityReadConcern);
readConcern.appendInfo(&builder);
- BSONObj expectedObj(
- BSON(ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kLevelFieldName << "majority"
- << ReadConcernArgs::kAfterClusterTimeFieldName
- << afterClusterTime.asTimestamp())));
+ BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kLevelFieldName
+ << "majority" << ReadConcernArgs::kAfterClusterTimeFieldName
+ << afterClusterTime.asTimestamp())));
ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
@@ -470,13 +437,11 @@ TEST(ReadAfterSerialize, AfterOpTimeAndLevel) {
ReadConcernLevel::kMajorityReadConcern);
readConcern.appendInfo(&builder);
- BSONObj expectedObj(BSON(
- ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kLevelFieldName
- << "majority"
- << ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
- << 2))));
+ BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName << BSON(
+ ReadConcernArgs::kLevelFieldName
+ << "majority" << ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
@@ -486,8 +451,7 @@ TEST(ReadAfterSerialize, AtClusterTimeAndLevelSnapshot) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName
<< "snapshot"
<< ReadConcernArgs::kAtClusterTimeFieldName
@@ -495,11 +459,10 @@ TEST(ReadAfterSerialize, AtClusterTimeAndLevelSnapshot) {
readConcern.appendInfo(&builder);
- BSONObj expectedObj(
- BSON(ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kLevelFieldName << "snapshot"
- << ReadConcernArgs::kAtClusterTimeFieldName
- << atClusterTime.asTimestamp())));
+ BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kLevelFieldName
+ << "snapshot" << ReadConcernArgs::kAtClusterTimeFieldName
+ << atClusterTime.asTimestamp())));
ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
@@ -516,8 +479,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, EmptyLevel) {
TEST(UpconvertReadConcernLevelToSnapshot, LevelLocal) {
ReadConcernArgs readConcern;
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "local"))));
ASSERT(ReadConcernLevel::kLocalReadConcern == readConcern.getLevel());
@@ -530,8 +492,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelMajority) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "majority"))));
ASSERT(ReadConcernLevel::kMajorityReadConcern == readConcern.getLevel());
@@ -544,8 +505,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelSnapshot) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "snapshot"))));
ASSERT(ReadConcernLevel::kSnapshotReadConcern == readConcern.getLevel());
@@ -558,8 +518,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelSnapshotWithAtClusterTime) {
ReadConcernArgs readConcern;
auto atClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName
<< "snapshot"
<< ReadConcernArgs::kAtClusterTimeFieldName
@@ -577,8 +536,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, AfterClusterTime) {
ReadConcernArgs readConcern;
auto afterClusterTime = LogicalTime(Timestamp(20, 30));
ASSERT_OK(readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterClusterTimeFieldName
<< afterClusterTime.asTimestamp()))));
ASSERT(ReadConcernLevel::kLocalReadConcern == readConcern.getLevel());
@@ -594,8 +552,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelAvailable) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "available"))));
ASSERT(ReadConcernLevel::kAvailableReadConcern == readConcern.getLevel());
@@ -608,8 +565,7 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelLinearizable) {
ReadConcernArgs readConcern;
ASSERT_OK(
readConcern.initialize(BSON("find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
+ << "test" << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "linearizable"))));
ASSERT(ReadConcernLevel::kLinearizableReadConcern == readConcern.getLevel());
@@ -620,13 +576,12 @@ TEST(UpconvertReadConcernLevelToSnapshot, LevelLinearizable) {
TEST(UpconvertReadConcernLevelToSnapshot, AfterOpTime) {
ReadConcernArgs readConcern;
- ASSERT_OK(readConcern.initialize(BSON(
- "find"
- << "test"
- << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
- << 2)))));
+ ASSERT_OK(readConcern.initialize(BSON("find"
+ << "test" << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName
+ << Timestamp(20, 30)
+ << OpTime::kTermFieldName << 2)))));
ASSERT(ReadConcernLevel::kLocalReadConcern == readConcern.getLevel());
ASSERT_TRUE(readConcern.getArgsOpTime());
diff --git a/src/mongo/db/repl/repl_set_config.cpp b/src/mongo/db/repl/repl_set_config.cpp
index 7614bd7ff7b..db794713203 100644
--- a/src/mongo/db/repl/repl_set_config.cpp
+++ b/src/mongo/db/repl/repl_set_config.cpp
@@ -138,17 +138,16 @@ Status ReplSetConfig::_initialize(const BSONObj& cfg, bool forInitiate, OID defa
if (memberElement.type() != Object) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected type of " << kMembersFieldName << "."
- << memberElement.fieldName()
- << " to be Object, but found "
+ << memberElement.fieldName() << " to be Object, but found "
<< typeName(memberElement.type()));
}
const auto& memberBSON = memberElement.Obj();
try {
_members.emplace_back(memberBSON, &_tagConfig);
} catch (const DBException& ex) {
- return Status(
- ErrorCodes::InvalidReplicaSetConfig,
- str::stream() << ex.toStatus().toString() << " for member:" << memberBSON);
+ return Status(ErrorCodes::InvalidReplicaSetConfig,
+ str::stream()
+ << ex.toStatus().toString() << " for member:" << memberBSON);
}
}
@@ -348,43 +347,35 @@ Status ReplSetConfig::_parseSettingsSubdocument(const BSONObj& settings) {
if (_customWriteConcernModes.find(modeElement.fieldNameStringData()) !=
_customWriteConcernModes.end()) {
return Status(ErrorCodes::Error(51001),
- str::stream() << kSettingsFieldName << '.' << kGetLastErrorModesFieldName
- << " contains multiple fields named "
- << modeElement.fieldName());
+ str::stream()
+ << kSettingsFieldName << '.' << kGetLastErrorModesFieldName
+ << " contains multiple fields named " << modeElement.fieldName());
}
if (modeElement.type() != Object) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Expected " << kSettingsFieldName << '.'
- << kGetLastErrorModesFieldName
- << '.'
- << modeElement.fieldName()
- << " to be an Object, not "
- << typeName(modeElement.type()));
+ str::stream()
+ << "Expected " << kSettingsFieldName << '.'
+ << kGetLastErrorModesFieldName << '.' << modeElement.fieldName()
+ << " to be an Object, not " << typeName(modeElement.type()));
}
ReplSetTagPattern pattern = _tagConfig.makePattern();
for (auto&& constraintElement : modeElement.Obj()) {
if (!constraintElement.isNumber()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Expected " << kSettingsFieldName << '.'
- << kGetLastErrorModesFieldName
- << '.'
- << modeElement.fieldName()
- << '.'
- << constraintElement.fieldName()
- << " to be a number, not "
- << typeName(constraintElement.type()));
+ str::stream()
+ << "Expected " << kSettingsFieldName << '.'
+ << kGetLastErrorModesFieldName << '.' << modeElement.fieldName()
+ << '.' << constraintElement.fieldName() << " to be a number, not "
+ << typeName(constraintElement.type()));
}
const int minCount = constraintElement.numberInt();
if (minCount <= 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Value of " << kSettingsFieldName << '.'
- << kGetLastErrorModesFieldName
- << '.'
- << modeElement.fieldName()
- << '.'
- << constraintElement.fieldName()
- << " must be positive, but found "
- << minCount);
+ str::stream()
+ << "Value of " << kSettingsFieldName << '.'
+ << kGetLastErrorModesFieldName << '.' << modeElement.fieldName()
+ << '.' << constraintElement.fieldName()
+ << " must be positive, but found " << minCount);
}
status = _tagConfig.addTagCountConstraintToPattern(
&pattern, constraintElement.fieldNameStringData(), minCount);
@@ -420,8 +411,7 @@ Status ReplSetConfig::validate() const {
if (_replSetName.empty()) {
return Status(ErrorCodes::BadValue,
str::stream() << "Replica set configuration must have non-empty "
- << kIdFieldName
- << " field");
+ << kIdFieldName << " field");
}
if (_heartbeatInterval < Milliseconds(0)) {
return Status(ErrorCodes::BadValue,
@@ -506,41 +496,22 @@ Status ReplSetConfig::validate() const {
const MemberConfig& memberJ = _members[j];
if (memberI.getId() == memberJ.getId()) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Found two member configurations with same "
- << MemberConfig::kIdFieldName
- << " field, "
- << kMembersFieldName
- << "."
- << i
- << "."
- << MemberConfig::kIdFieldName
- << " == "
- << kMembersFieldName
- << "."
- << j
- << "."
- << MemberConfig::kIdFieldName
- << " == "
- << memberI.getId());
+ str::stream()
+ << "Found two member configurations with same "
+ << MemberConfig::kIdFieldName << " field, " << kMembersFieldName
+ << "." << i << "." << MemberConfig::kIdFieldName
+ << " == " << kMembersFieldName << "." << j << "."
+ << MemberConfig::kIdFieldName << " == " << memberI.getId());
}
if (memberI.getHostAndPort() == memberJ.getHostAndPort()) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Found two member configurations with same "
- << MemberConfig::kHostFieldName
- << " field, "
- << kMembersFieldName
- << "."
- << i
- << "."
- << MemberConfig::kHostFieldName
- << " == "
- << kMembersFieldName
- << "."
- << j
- << "."
- << MemberConfig::kHostFieldName
- << " == "
- << memberI.getHostAndPort().toString());
+ str::stream()
+ << "Found two member configurations with same "
+ << MemberConfig::kHostFieldName << " field, " << kMembersFieldName
+ << "." << i << "." << MemberConfig::kHostFieldName
+ << " == " << kMembersFieldName << "." << j << "."
+ << MemberConfig::kHostFieldName
+ << " == " << memberI.getHostAndPort().toString());
}
}
}
@@ -593,9 +564,7 @@ Status ReplSetConfig::validate() const {
str::stream()
<< "Either all host names in a replica set configuration must be localhost "
"references, or none must be; found "
- << localhostCount
- << " out of "
- << _members.size());
+ << localhostCount << " out of " << _members.size());
}
if (voterCount > kMaxVotingMembers || voterCount == 0) {
@@ -636,9 +605,9 @@ Status ReplSetConfig::validate() const {
}
if (_protocolVersion != 1) {
return Status(ErrorCodes::BadValue,
- str::stream() << kProtocolVersionFieldName
- << " of 1 is the only supported value. Found: "
- << _protocolVersion);
+ str::stream()
+ << kProtocolVersionFieldName
+ << " of 1 is the only supported value. Found: " << _protocolVersion);
}
if (_configServer) {
@@ -708,8 +677,7 @@ Status ReplSetConfig::checkIfWriteConcernCanBeSatisfied(
// write concern mode.
return Status(ErrorCodes::UnsatisfiableWriteConcern,
str::stream() << "Not enough nodes match write concern mode \""
- << writeConcern.wMode
- << "\"");
+ << writeConcern.wMode << "\"");
} else {
int nodesRemaining = writeConcern.wNumNodes;
for (size_t j = 0; j < _members.size(); ++j) {
diff --git a/src/mongo/db/repl/repl_set_config_checks.cpp b/src/mongo/db/repl/repl_set_config_checks.cpp
index 14cc8e99e61..5c0eeecdb97 100644
--- a/src/mongo/db/repl/repl_set_config_checks.cpp
+++ b/src/mongo/db/repl/repl_set_config_checks.cpp
@@ -63,10 +63,8 @@ StatusWith<int> findSelfInConfig(ReplicationCoordinatorExternalState* externalSt
if (meConfigs.empty()) {
return StatusWith<int>(ErrorCodes::NodeNotFound,
str::stream() << "No host described in new configuration "
- << newConfig.getConfigVersion()
- << " for replica set "
- << newConfig.getReplSetName()
- << " maps to this node");
+ << newConfig.getConfigVersion() << " for replica set "
+ << newConfig.getReplSetName() << " maps to this node");
}
if (meConfigs.size() > 1) {
str::stream message;
@@ -95,11 +93,9 @@ Status checkElectable(const ReplSetConfig& newConfig, int configIndex) {
if (!myConfig.isElectable()) {
return Status(ErrorCodes::NodeNotElectable,
str::stream() << "This node, " << myConfig.getHostAndPort().toString()
- << ", with _id "
- << myConfig.getId()
+ << ", with _id " << myConfig.getId()
<< " is not electable under the new configuration version "
- << newConfig.getConfigVersion()
- << " for replica set "
+ << newConfig.getConfigVersion() << " for replica set "
<< newConfig.getReplSetName());
}
return Status::OK();
@@ -133,8 +129,7 @@ Status validateArbiterPriorities(const ReplSetConfig& config) {
if (iter->isArbiter() && iter->getPriority() != 0) {
return Status(ErrorCodes::InvalidReplicaSetConfig,
str::stream() << "Member " << iter->getHostAndPort().toString()
- << " is an arbiter but has priority "
- << iter->getPriority()
+ << " is an arbiter but has priority " << iter->getPriority()
<< ". Arbiter priority must be 0.");
}
}
@@ -164,10 +159,8 @@ Status validateOldAndNewConfigsCompatible(const ReplSetConfig& oldConfig,
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream()
<< "New replica set configuration version must be greater than old, but "
- << newConfig.getConfigVersion()
- << " is not greater than "
- << oldConfig.getConfigVersion()
- << " for replica set "
+ << newConfig.getConfigVersion() << " is not greater than "
+ << oldConfig.getConfigVersion() << " for replica set "
<< newConfig.getReplSetName());
}
@@ -175,8 +168,7 @@ Status validateOldAndNewConfigsCompatible(const ReplSetConfig& oldConfig,
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream() << "New and old configurations differ in replica set name; "
"old was "
- << oldConfig.getReplSetName()
- << ", and new is "
+ << oldConfig.getReplSetName() << ", and new is "
<< newConfig.getReplSetName());
}
@@ -184,8 +176,7 @@ Status validateOldAndNewConfigsCompatible(const ReplSetConfig& oldConfig,
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream() << "New and old configurations differ in replica set ID; "
"old was "
- << oldConfig.getReplicaSetId()
- << ", and new is "
+ << oldConfig.getReplicaSetId() << ", and new is "
<< newConfig.getReplicaSetId());
}
@@ -216,18 +207,14 @@ Status validateOldAndNewConfigsCompatible(const ReplSetConfig& oldConfig,
}
if (hostsEqual && !idsEqual) {
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- str::stream() << "New and old configurations both have members with "
- << MemberConfig::kHostFieldName
- << " of "
- << mOld->getHostAndPort().toString()
- << " but in the new configuration the "
- << MemberConfig::kIdFieldName
- << " field is "
- << mNew->getId()
- << " and in the old configuration it is "
- << mOld->getId()
- << " for replica set "
- << newConfig.getReplSetName());
+ str::stream()
+ << "New and old configurations both have members with "
+ << MemberConfig::kHostFieldName << " of "
+ << mOld->getHostAndPort().toString()
+ << " but in the new configuration the "
+ << MemberConfig::kIdFieldName << " field is " << mNew->getId()
+ << " and in the old configuration it is " << mOld->getId()
+ << " for replica set " << newConfig.getReplSetName());
}
// At this point, the _id and host fields are equal, so we're looking at the old and
// new configurations for the same member node.
diff --git a/src/mongo/db/repl/repl_set_config_checks_test.cpp b/src/mongo/db/repl/repl_set_config_checks_test.cpp
index b8579f1f6e2..c887e11f69b 100644
--- a/src/mongo/db/repl/repl_set_config_checks_test.cpp
+++ b/src/mongo/db/repl/repl_set_config_checks_test.cpp
@@ -49,34 +49,28 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_VersionMustBe1) {
rses.addSelf(HostAndPort("h1"));
ReplSetConfig config;
- ASSERT_OK(config.initializeForInitiate(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")))));
+ ASSERT_OK(
+ config.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")))));
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
validateConfigForInitiate(&rses, config, getGlobalServiceContext()).getStatus());
}
TEST_F(ServiceContextTest, ValidateConfigForInitiate_MustFindSelf) {
ReplSetConfig config;
- ASSERT_OK(config.initializeForInitiate(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2")
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(
+ config.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ReplicationCoordinatorExternalStateMock notPresentExternalState;
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h2"));
@@ -99,21 +93,17 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_MustFindSelf) {
TEST_F(ServiceContextTest, ValidateConfigForInitiate_SelfMustBeElectable) {
ReplSetConfig config;
- ASSERT_OK(config.initializeForInitiate(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2"
- << "priority"
- << 0)
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(
+ config.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "priority" << 0)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h2"));
@@ -128,11 +118,7 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_WriteConcernMustBeSatisfiab
ASSERT_OK(
config.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1"))
<< "settings"
@@ -152,55 +138,37 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_ArbiterPriorityMustBeZeroOr
ReplSetConfig twoConfig;
ASSERT_OK(zeroConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "arbiterOnly"
- << true)
+ << "priority" << 0
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(oneConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 1
- << "arbiterOnly"
- << true)
+ << "priority" << 1
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(twoConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 2
- << "arbiterOnly"
- << true)
+ << "priority" << 2
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
@@ -228,11 +196,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigVersionNumberMustB
// Two configurations, identical except for version.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -242,11 +206,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigVersionNumberMustB
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -296,11 +256,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigMustNotChangeSetNa
// Two configurations, compatible except for set name.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -310,11 +266,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigMustNotChangeSetNa
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs1"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -345,35 +297,25 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigMustNotChangeSetId
// Two configurations, compatible except for set ID.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2")
<< BSON("_id" << 3 << "host"
<< "h3"))
- << "settings"
- << BSON("replicaSetId" << OID::gen()))));
+ << "settings" << BSON("replicaSetId" << OID::gen()))));
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2")
<< BSON("_id" << 3 << "host"
<< "h3"))
- << "settings"
- << BSON("replicaSetId" << OID::gen()))));
+ << "settings" << BSON("replicaSetId" << OID::gen()))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -402,57 +344,40 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigMustNotFlipBuildIn
// The third, compatible with the first.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "buildIndexes"
- << false
- << "priority"
- << 0)
+ << "buildIndexes" << false
+ << "priority" << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "buildIndexes"
- << true
- << "priority"
- << 0)
+ << "buildIndexes" << true
+ << "priority" << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
- ASSERT_OK(oldConfigRefresh.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2"
- << "buildIndexes"
- << false
- << "priority"
- << 0)
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(
+ oldConfigRefresh.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "buildIndexes" << false
+ << "priority" << 0)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -484,51 +409,37 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigMustNotFlipArbiter
// The third, compatible with the first.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "arbiterOnly"
- << false)
+ << "arbiterOnly" << false)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
- ASSERT_OK(oldConfigRefresh.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2"
- << "arbiterOnly"
- << false)
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(
+ oldConfigRefresh.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "arbiterOnly" << false)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -562,11 +473,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_HostAndIdRemappingRestricte
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -582,10 +489,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_HostAndIdRemappingRestricte
ASSERT_OK(
legalNewConfigWithNewHostAndId.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
+ << "version" << 2 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
@@ -607,11 +511,8 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_HostAndIdRemappingRestricte
//
ASSERT_OK(illegalNewConfigReusingHost.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion"
+ << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 4 << "host"
@@ -638,10 +539,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_HostAndIdRemappingRestricte
//
ASSERT_OK(illegalNewConfigReusingId.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
+ << "version" << 2 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
@@ -662,11 +560,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_MustFindSelf) {
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -677,11 +571,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_MustFindSelf) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -738,69 +628,46 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_ArbiterPriorityValueMustBeZ
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(zeroConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "arbiterOnly"
- << true)
+ << "priority" << 0
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(oneConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 1
- << "arbiterOnly"
- << true)
+ << "priority" << 1
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(twoConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 2
- << "arbiterOnly"
- << true)
+ << "priority" << 2
+ << "arbiterOnly" << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
@@ -831,11 +698,7 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_SelfMustEndElectable) {
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -846,17 +709,12 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_SelfMustEndElectable) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
@@ -880,10 +738,7 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_NewConfigInvalid) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
+ << "version" << 2 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
@@ -905,22 +760,14 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigInvalid) {
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")))));
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -948,22 +795,14 @@ TEST_F(ServiceContextTest, ValidateConfigForReconfig_NewConfigWriteConcernNotSat
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")))));
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2"))
<< "settings"
@@ -991,11 +830,7 @@ TEST_F(ServiceContextTest, ValidateConfigForStartUp_NewConfigInvalid) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -1015,15 +850,10 @@ TEST_F(ServiceContextTest, ValidateConfigForStartUp_NewConfigValid) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2"
- << "priority"
- << 3)
+ << "priority" << 3)
<< BSON("_id" << 1 << "host"
<< "h3")))));
@@ -1041,11 +871,7 @@ TEST_F(ServiceContextTest, ValidateConfigForStartUp_NewConfigWriteConcernNotSati
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2"))
<< "settings"
@@ -1065,11 +891,7 @@ TEST_F(ServiceContextTest, ValidateConfigForHeartbeatReconfig_NewConfigInvalid)
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -1089,11 +911,7 @@ TEST_F(ServiceContextTest, ValidateConfigForHeartbeatReconfig_NewConfigValid) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -1112,11 +930,7 @@ TEST_F(ServiceContextTest, ValidateConfigForHeartbeatReconfig_NewConfigWriteConc
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -1137,11 +951,7 @@ TEST_F(ServiceContextTest, ValidateForReconfig_ForceStillNeedsValidConfig) {
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -1151,11 +961,7 @@ TEST_F(ServiceContextTest, ValidateForReconfig_ForceStillNeedsValidConfig) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -1176,11 +982,7 @@ TEST_F(ServiceContextTest, ValidateForReconfig_ForceStillNeedsSelfPresent) {
ReplSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -1190,11 +992,7 @@ TEST_F(ServiceContextTest, ValidateForReconfig_ForceStillNeedsSelfPresent) {
ReplSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h3")
<< BSON("_id" << 2 << "host"
diff --git a/src/mongo/db/repl/repl_set_config_test.cpp b/src/mongo/db/repl/repl_set_config_test.cpp
index c795d711aa3..88d36b1b174 100644
--- a/src/mongo/db/repl/repl_set_config_test.cpp
+++ b/src/mongo/db/repl/repl_set_config_test.cpp
@@ -63,11 +63,7 @@ TEST(ReplSetConfig, ParseMinimalConfigAndCheckDefaults) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
@@ -92,32 +88,24 @@ TEST(ReplSetConfig, ParseMinimalConfigAndCheckDefaults) {
TEST(ReplSetConfig, ParseLargeConfigAndCheckAccessors) {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1234
- << "members"
- << BSON_ARRAY(BSON("_id" << 234 << "host"
- << "localhost:12345"
- << "tags"
- << BSON("NYC"
- << "NY")))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("getLastErrorDefaults"
- << BSON("w"
- << "majority")
- << "getLastErrorModes"
- << BSON("eastCoast" << BSON("NYC" << 1))
- << "chainingAllowed"
- << false
- << "heartbeatIntervalMillis"
- << 5000
- << "heartbeatTimeoutSecs"
- << 120
- << "electionTimeoutMillis"
- << 10))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1234 << "members"
+ << BSON_ARRAY(BSON("_id" << 234 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("NYC"
+ << "NY")))
+ << "protocolVersion" << 1 << "settings"
+ << BSON("getLastErrorDefaults"
+ << BSON("w"
+ << "majority")
+ << "getLastErrorModes"
+ << BSON("eastCoast" << BSON("NYC" << 1)) << "chainingAllowed"
+ << false << "heartbeatIntervalMillis" << 5000
+ << "heartbeatTimeoutSecs" << 120 << "electionTimeoutMillis"
+ << 10))));
ASSERT_OK(config.validate());
ASSERT_EQUALS("rs0", config.getReplSetName());
ASSERT_EQUALS(1234, config.getConfigVersion());
@@ -139,27 +127,20 @@ TEST(ReplSetConfig, ParseLargeConfigAndCheckAccessors) {
TEST(ReplSetConfig, GetConnectionStringFiltersHiddenNodes) {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:11111")
- << BSON("_id" << 1 << "host"
- << "localhost:22222"
- << "arbiterOnly"
- << true)
- << BSON("_id" << 2 << "host"
- << "localhost:33333"
- << "hidden"
- << true
- << "priority"
- << 0)
- << BSON("_id" << 3 << "host"
- << "localhost:44444")))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:11111")
+ << BSON("_id" << 1 << "host"
+ << "localhost:22222"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 2 << "host"
+ << "localhost:33333"
+ << "hidden" << true << "priority" << 0)
+ << BSON("_id" << 3 << "host"
+ << "localhost:44444")))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(ConnectionString::forReplicaSet(
"rs0", {HostAndPort{"localhost:11111"}, HostAndPort{"localhost:44444"}})
@@ -169,31 +150,22 @@ TEST(ReplSetConfig, GetConnectionStringFiltersHiddenNodes) {
TEST(ReplSetConfig, MajorityCalculationThreeVotersNoArbiters) {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1")
- << BSON("_id" << 3 << "host"
- << "h3:1")
- << BSON("_id" << 4 << "host"
- << "h4:1"
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("_id" << 5 << "host"
- << "h5:1"
- << "votes"
- << 0
- << "priority"
- << 0)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1"
+ << "votes" << 0 << "priority" << 0)
+ << BSON("_id" << 5 << "host"
+ << "h5:1"
+ << "votes" << 0 << "priority" << 0)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(2, config.getWriteMajority());
@@ -201,37 +173,25 @@ TEST(ReplSetConfig, MajorityCalculationThreeVotersNoArbiters) {
TEST(ReplSetConfig, MajorityCalculationNearlyHalfArbiters) {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id"
- << 0)
- << BSON("host"
- << "node2:12345"
- << "_id"
- << 1)
- << BSON("host"
- << "node3:12345"
- << "_id"
- << 2)
- << BSON("host"
- << "node4:12345"
- << "_id"
- << 3
- << "arbiterOnly"
- << true)
- << BSON("host"
- << "node5:12345"
- << "_id"
- << 4
- << "arbiterOnly"
- << true)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id" << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id" << 3 << "arbiterOnly" << true)
+ << BSON("host"
+ << "node5:12345"
+ << "_id" << 4 << "arbiterOnly" << true)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(3, config.getWriteMajority());
}
@@ -240,68 +200,45 @@ TEST(ReplSetConfig, MajorityCalculationEvenNumberOfMembers) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3)))));
+ << "_id" << 3)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(3, config.getWriteMajority());
}
TEST(ReplSetConfig, MajorityCalculationNearlyHalfSecondariesNoVotes) {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id"
- << 0)
- << BSON("host"
- << "node2:12345"
- << "_id"
- << 1
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("host"
- << "node3:12345"
- << "_id"
- << 2
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("host"
- << "node4:12345"
- << "_id"
- << 3)
- << BSON("host"
- << "node5:12345"
- << "_id"
- << 4)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(
+ BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1 << "votes" << 0 << "priority" << 0)
+ << BSON("host"
+ << "node3:12345"
+ << "_id" << 2 << "votes" << 0 << "priority" << 0)
+ << BSON("host"
+ << "node4:12345"
+ << "_id" << 3)
+ << BSON("host"
+ << "node5:12345"
+ << "_id" << 4)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(2, config.getWriteMajority());
}
@@ -317,18 +254,14 @@ TEST(ReplSetConfig, ParseFailsWithBadOrMissingIdField) {
// Replica set name must be present.
ASSERT_EQUALS(
ErrorCodes::NoSuchKey,
- config.initialize(
- BSON("version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ config.initialize(BSON("version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
// Empty repl set name parses, but does not validate.
ASSERT_OK(config.initialize(BSON("_id"
<< ""
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
@@ -341,9 +274,7 @@ TEST(ReplSetConfig, ParseFailsWithBadOrMissingVersionField) {
ASSERT_EQUALS(ErrorCodes::NoSuchKey,
config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_EQUALS(ErrorCodes::TypeMismatch,
@@ -351,29 +282,19 @@ TEST(ReplSetConfig, ParseFailsWithBadOrMissingVersionField) {
<< "rs0"
<< "version"
<< "1"
- << "protocolVersion"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1.0
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1.0 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 0.0
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 0.0 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
@@ -381,9 +302,7 @@ TEST(ReplSetConfig, ParseFailsWithBadOrMissingVersionField) {
<< "rs0"
<< "version"
<< static_cast<long long>(std::numeric_limits<int>::max()) + 1
- << "protocolVersion"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
@@ -394,21 +313,13 @@ TEST(ReplSetConfig, ParseFailsWithBadMembers) {
ASSERT_EQUALS(ErrorCodes::TypeMismatch,
config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< "localhost:23456"))));
ASSERT_NOT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("host"
<< "localhost:12345")))));
}
@@ -417,11 +328,7 @@ TEST(ReplSetConfig, ParseFailsWithLocalNonLocalHostMix) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost")
<< BSON("_id" << 1 << "host"
@@ -433,15 +340,11 @@ TEST(ReplSetConfig, ParseFailsWithNoElectableNodes) {
ReplSetConfig config;
const BSONObj configBsonNoElectableNodes = BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
+ << "version" << 1 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 1 << "host"
<< "localhost:2"
<< "priority"
@@ -450,57 +353,41 @@ TEST(ReplSetConfig, ParseFailsWithNoElectableNodes) {
ASSERT_OK(config.initialize(configBsonNoElectableNodes));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- const BSONObj configBsonNoElectableNodesOneArbiter = BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(
- BSON("_id" << 0 << "host"
- << "localhost:1"
- << "arbiterOnly"
- << 1)
- << BSON("_id" << 1 << "host"
- << "localhost:2"
- << "priority"
- << 0)));
+ const BSONObj configBsonNoElectableNodesOneArbiter =
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "arbiterOnly" << 1)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "priority" << 0)));
ASSERT_OK(config.initialize(configBsonNoElectableNodesOneArbiter));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- const BSONObj configBsonNoElectableNodesTwoArbiters = BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(
- BSON("_id" << 0 << "host"
- << "localhost:1"
- << "arbiterOnly"
- << 1)
- << BSON("_id" << 1 << "host"
- << "localhost:2"
- << "arbiterOnly"
- << 1)));
+ const BSONObj configBsonNoElectableNodesTwoArbiters =
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "arbiterOnly" << 1)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "arbiterOnly" << 1)));
ASSERT_OK(config.initialize(configBsonNoElectableNodesOneArbiter));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
const BSONObj configBsonOneElectableNode = BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
+ << "version" << 1 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 1 << "host"
<< "localhost:2"
<< "priority"
@@ -511,46 +398,30 @@ TEST(ReplSetConfig, ParseFailsWithNoElectableNodes) {
TEST(ReplSetConfig, ParseFailsWithTooFewVoters) {
ReplSetConfig config;
- const BSONObj configBsonNoVoters = BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:1"
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("_id" << 1 << "host"
- << "localhost:2"
- << "votes"
- << 0
- << "priority"
- << 0)));
+ const BSONObj configBsonNoVoters =
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "votes" << 0 << "priority" << 0)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "votes" << 0 << "priority" << 0)));
ASSERT_OK(config.initialize(configBsonNoVoters));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
const BSONObj configBsonOneVoter = BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1"
- << "votes"
- << 0
- << "priority"
+ << "votes" << 0 << "priority"
<< 0)
<< BSON("_id" << 1 << "host"
<< "localhost:2"
- << "votes"
- << 1)));
+ << "votes" << 1)));
ASSERT_OK(config.initialize(configBsonOneVoter));
ASSERT_OK(config.validate());
}
@@ -567,11 +438,7 @@ TEST(ReplSetConfig, ParseFailsWithDuplicateHost) {
ReplSetConfig config;
const BSONObj configBson = BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1")
<< BSON("_id" << 1 << "host"
@@ -621,14 +488,11 @@ TEST(ReplSetConfig, ParseFailsWithTooManyNodes) {
TEST(ReplSetConfig, ParseFailsWithUnexpectedField) {
ReplSetConfig config;
- Status status = config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "unexpectedfield"
- << "value"));
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "unexpectedfield"
+ << "value"));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
}
@@ -636,11 +500,7 @@ TEST(ReplSetConfig, ParseFailsWithNonArrayMembersField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< "value"));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -649,11 +509,7 @@ TEST(ReplSetConfig, ParseFailsWithNonNumericHeartbeatIntervalMillisField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -671,11 +527,7 @@ TEST(ReplSetConfig, ParseFailsWithNonNumericElectionTimeoutMillisField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -688,11 +540,7 @@ TEST(ReplSetConfig, ParseFailsWithNonNumericHeartbeatTimeoutSecsField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -705,11 +553,7 @@ TEST(ReplSetConfig, ParseFailsWithNonBoolChainingAllowedField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -722,11 +566,7 @@ TEST(ReplSetConfig, ParseFailsWithNonBoolConfigServerField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "configsvr"
@@ -738,11 +578,7 @@ TEST(ReplSetConfig, ParseFailsWithNonObjectSettingsField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -754,11 +590,7 @@ TEST(ReplSetConfig, ParseFailsWithGetLastErrorDefaultsFieldUnparseable) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -771,11 +603,7 @@ TEST(ReplSetConfig, ParseFailsWithNonObjectGetLastErrorDefaultsField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -788,11 +616,7 @@ TEST(ReplSetConfig, ParseFailsWithNonObjectGetLastErrorModesField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -805,11 +629,7 @@ TEST(ReplSetConfig, ParseFailsWithDuplicateGetLastErrorModesField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "tags"
@@ -824,20 +644,16 @@ TEST(ReplSetConfig, ParseFailsWithDuplicateGetLastErrorModesField) {
TEST(ReplSetConfig, ParseFailsWithNonObjectGetLastErrorModesEntryField) {
ReplSetConfig config;
- Status status = config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "tags"
- << BSON("tag"
- << "yes")))
- << "settings"
- << BSON("getLastErrorModes" << BSON("one" << 1))));
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("tag"
+ << "yes")))
+ << "settings" << BSON("getLastErrorModes" << BSON("one" << 1))));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -846,11 +662,7 @@ TEST(ReplSetConfig, ParseFailsWithNonNumericGetLastErrorModesConstraintValue) {
Status status =
config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "tags"
@@ -867,11 +679,7 @@ TEST(ReplSetConfig, ParseFailsWithNegativeGetLastErrorModesConstraintValue) {
Status status =
config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "tags"
@@ -887,11 +695,7 @@ TEST(ReplSetConfig, ParseFailsWithNonExistentGetLastErrorModesConstraintTag) {
Status status =
config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "tags"
@@ -906,13 +710,8 @@ TEST(ReplSetConfig, ParseFailsWithRepairField) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "repaired"
- << true
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "repaired" << true << "version" << 1
+ << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))));
ASSERT_EQUALS(ErrorCodes::RepairedReplicaSetNode, status);
@@ -922,11 +721,7 @@ TEST(ReplSetConfig, ValidateFailsWithBadProtocolVersion) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 3
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 3 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
@@ -941,11 +736,7 @@ TEST(ReplSetConfig, ValidateFailsWithProtocolVersion0) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 0
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 0 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
@@ -960,11 +751,7 @@ TEST(ReplSetConfig, ValidateFailsWithDuplicateMemberId) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 0 << "host"
@@ -979,15 +766,10 @@ TEST(ReplSetConfig, ValidateFailsWithInvalidMember) {
ReplSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "hidden"
- << true))));
+ << "hidden" << true))));
ASSERT_OK(status);
status = config.validate();
@@ -998,29 +780,19 @@ TEST(ReplSetConfig, ChainingAllowedField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("chainingAllowed" << true))));
+ << "settings" << BSON("chainingAllowed" << true))));
ASSERT_OK(config.validate());
ASSERT_TRUE(config.isChainingAllowed());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("chainingAllowed" << false))));
+ << "settings" << BSON("chainingAllowed" << false))));
ASSERT_OK(config.validate());
ASSERT_FALSE(config.isChainingAllowed());
}
@@ -1029,13 +801,8 @@ TEST(ReplSetConfig, ConfigServerField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr"
+ << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_TRUE(config.isConfigServer());
@@ -1043,13 +810,8 @@ TEST(ReplSetConfig, ConfigServerField) {
ReplSetConfig config2;
ASSERT_OK(config2.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "configsvr"
- << false
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "configsvr"
+ << false << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_FALSE(config2.isConfigServer());
@@ -1072,25 +834,18 @@ TEST(ReplSetConfig, ConfigServerFieldDefaults) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_FALSE(config.isConfigServer());
ReplSetConfig config2;
- ASSERT_OK(config2.initializeForInitiate(BSON("_id"
- << "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_OK(
+ config2.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_FALSE(config2.isConfigServer());
serverGlobalParams.clusterRole = ClusterRole::ConfigServer;
@@ -1099,25 +854,18 @@ TEST(ReplSetConfig, ConfigServerFieldDefaults) {
ReplSetConfig config3;
ASSERT_OK(config3.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_FALSE(config3.isConfigServer());
ReplSetConfig config4;
- ASSERT_OK(config4.initializeForInitiate(BSON("_id"
- << "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_OK(
+ config4.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_TRUE(config4.isConfigServer());
}
@@ -1125,29 +873,19 @@ TEST(ReplSetConfig, HeartbeatIntervalField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("heartbeatIntervalMillis" << 5000))));
+ << "settings" << BSON("heartbeatIntervalMillis" << 5000))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Seconds(5), config.getHeartbeatInterval());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("heartbeatIntervalMillis" << -5000))));
+ << "settings" << BSON("heartbeatIntervalMillis" << -5000))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
}
@@ -1155,29 +893,19 @@ TEST(ReplSetConfig, ElectionTimeoutField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("electionTimeoutMillis" << 20))));
+ << "settings" << BSON("electionTimeoutMillis" << 20))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Milliseconds(20), config.getElectionTimeoutPeriod());
auto status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("electionTimeoutMillis" << -20)));
+ << "settings" << BSON("electionTimeoutMillis" << -20)));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "election timeout must be greater than 0");
}
@@ -1186,29 +914,19 @@ TEST(ReplSetConfig, HeartbeatTimeoutField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 20))));
+ << "settings" << BSON("heartbeatTimeoutSecs" << 20))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Seconds(20), config.getHeartbeatTimeoutPeriod());
auto status = config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("heartbeatTimeoutSecs" << -20)));
+ << "settings" << BSON("heartbeatTimeoutSecs" << -20)));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "heartbeat timeout must be greater than 0");
}
@@ -1217,11 +935,7 @@ TEST(ReplSetConfig, GleDefaultField) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -1232,11 +946,7 @@ TEST(ReplSetConfig, GleDefaultField) {
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
<< "settings"
@@ -1244,27 +954,19 @@ TEST(ReplSetConfig, GleDefaultField) {
<< "frim")))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"))
- << "settings"
- << BSON("getLastErrorDefaults" << BSON("w" << 0)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings" << BSON("getLastErrorDefaults" << BSON("w" << 0)))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
ASSERT_OK(
config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "tags"
@@ -1352,19 +1054,15 @@ bool operator==(const ReplSetConfig& a, const ReplSetConfig& b) {
TEST(ReplSetConfig, toBSONRoundTripAbility) {
ReplSetConfig configA;
ReplSetConfig configB;
- ASSERT_OK(configA.initialize(BSON(
- "_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"))
- << "settings"
- << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20 << "replicaSetId"
- << OID::gen()))));
+ ASSERT_OK(configA.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings"
+ << BSON("heartbeatIntervalMillis"
+ << 5000 << "heartbeatTimeoutSecs" << 20
+ << "replicaSetId" << OID::gen()))));
ASSERT_OK(configB.initialize(configA.toBSON()));
ASSERT_TRUE(configA == configB);
}
@@ -1372,132 +1070,83 @@ TEST(ReplSetConfig, toBSONRoundTripAbility) {
TEST(ReplSetConfig, toBSONRoundTripAbilityWithHorizon) {
ReplSetConfig configA;
ReplSetConfig configB;
+ ASSERT_OK(configA.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "horizons"
+ << BSON("horizon"
+ << "example.com:42")))
+ << "settings"
+ << BSON("heartbeatIntervalMillis"
+ << 5000 << "heartbeatTimeoutSecs" << 20
+ << "replicaSetId" << OID::gen()))));
+ ASSERT_OK(configB.initialize(configA.toBSON()));
+ ASSERT_TRUE(configA == configB);
+}
+
+TEST(ReplSetConfig, toBSONRoundTripAbilityLarge) {
+ ReplSetConfig configA;
+ ReplSetConfig configB;
ASSERT_OK(configA.initialize(BSON(
"_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "asdf"
+ << "version" << 9 << "writeConcernMajorityJournalDefault" << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "horizons"
- << BSON("horizon"
- << "example.com:42")))
- << "settings"
- << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20 << "replicaSetId"
- << OID::gen()))));
- ASSERT_OK(configB.initialize(configA.toBSON()));
+ << "arbiterOnly" << true << "votes" << 1)
+ << BSON("_id" << 3 << "host"
+ << "localhost:3828"
+ << "arbiterOnly" << false << "hidden" << true << "buildIndexes"
+ << false << "priority" << 0 << "slaveDelay" << 17 << "votes"
+ << 0 << "tags"
+ << BSON("coast"
+ << "east"
+ << "ssd"
+ << "true"))
+ << BSON("_id" << 2 << "host"
+ << "foo.com:3828"
+ << "votes" << 0 << "priority" << 0 << "tags"
+ << BSON("coast"
+ << "west"
+ << "hdd"
+ << "true")))
+ << "protocolVersion" << 1 << "settings"
+
+ << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20
+ << "electionTimeoutMillis" << 4 << "chainingAllowd"
+ << true << "getLastErrorDefaults"
+ << BSON("w"
+ << "majority")
+ << "getLastErrorModes"
+ << BSON("disks" << BSON("ssd" << 1 << "hdd" << 1)
+ << "coasts" << BSON("coast" << 2))))));
+ BSONObj configObjA = configA.toBSON();
+ ASSERT_OK(configB.initialize(configObjA));
ASSERT_TRUE(configA == configB);
}
-TEST(ReplSetConfig, toBSONRoundTripAbilityLarge) {
+TEST(ReplSetConfig, toBSONRoundTripAbilityInvalid) {
ReplSetConfig configA;
ReplSetConfig configB;
ASSERT_OK(configA.initialize(
BSON("_id"
- << "asdf"
- << "version"
- << 9
- << "writeConcernMajorityJournalDefault"
- << true
- << "members"
+ << ""
+ << "version" << -3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "arbiterOnly"
- << true
- << "votes"
- << 1)
- << BSON("_id" << 3 << "host"
+ << "arbiterOnly" << true << "votes" << 0 << "priority" << 0)
+ << BSON("_id" << 0 << "host"
<< "localhost:3828"
- << "arbiterOnly"
- << false
- << "hidden"
- << true
- << "buildIndexes"
- << false
- << "priority"
- << 0
- << "slaveDelay"
- << 17
- << "votes"
- << 0
- << "tags"
- << BSON("coast"
- << "east"
- << "ssd"
- << "true"))
+ << "arbiterOnly" << false << "buildIndexes" << false
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
- << "foo.com:3828"
- << "votes"
- << 0
- << "priority"
- << 0
- << "tags"
- << BSON("coast"
- << "west"
- << "hdd"
- << "true")))
- << "protocolVersion"
- << 1
+ << "localhost:3828"
+ << "votes" << 0 << "priority" << 0))
<< "settings"
-
- << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20
- << "electionTimeoutMillis"
- << 4
- << "chainingAllowd"
- << true
- << "getLastErrorDefaults"
- << BSON("w"
- << "majority")
- << "getLastErrorModes"
- << BSON("disks" << BSON("ssd" << 1 << "hdd" << 1)
- << "coasts"
- << BSON("coast" << 2))))));
- BSONObj configObjA = configA.toBSON();
- ASSERT_OK(configB.initialize(configObjA));
- ASSERT_TRUE(configA == configB);
-}
-
-TEST(ReplSetConfig, toBSONRoundTripAbilityInvalid) {
- ReplSetConfig configA;
- ReplSetConfig configB;
- ASSERT_OK(
- configA.initialize(BSON("_id"
- << ""
- << "version"
- << -3
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "arbiterOnly"
- << true
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("_id" << 0 << "host"
- << "localhost:3828"
- << "arbiterOnly"
- << false
- << "buildIndexes"
- << false
- << "priority"
- << 2)
- << BSON("_id" << 2 << "host"
- << "localhost:3828"
- << "votes"
- << 0
- << "priority"
- << 0))
- << "settings"
- << BSON("heartbeatIntervalMillis" << -5000 << "heartbeatTimeoutSecs"
- << 20
- << "electionTimeoutMillis"
- << 2))));
+ << BSON("heartbeatIntervalMillis" << -5000 << "heartbeatTimeoutSecs" << 20
+ << "electionTimeoutMillis" << 2))));
ASSERT_OK(configB.initialize(configA.toBSON()));
ASSERT_NOT_OK(configA.validate());
ASSERT_NOT_OK(configB.validate());
@@ -1506,59 +1155,52 @@ TEST(ReplSetConfig, toBSONRoundTripAbilityInvalid) {
TEST(ReplSetConfig, CheckIfWriteConcernCanBeSatisfied) {
ReplSetConfig configA;
- ASSERT_OK(configA.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "node0"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA1"))
- << BSON("_id" << 1 << "host"
- << "node1"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA2"))
- << BSON("_id" << 2 << "host"
- << "node2"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA3"))
- << BSON("_id" << 3 << "host"
- << "node3"
- << "tags"
- << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU1"))
- << BSON("_id" << 4 << "host"
- << "node4"
- << "tags"
- << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU2"))
- << BSON("_id" << 5 << "host"
- << "node5"
- << "arbiterOnly"
- << true))
- << "settings"
- << BSON("getLastErrorModes"
- << BSON("valid" << BSON("dc" << 2 << "rack" << 3)
- << "invalidNotEnoughValues"
- << BSON("dc" << 3)
- << "invalidNotEnoughNodes"
- << BSON("rack" << 6))))));
+ ASSERT_OK(configA.initialize(BSON(
+ "_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node0"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA1"))
+ << BSON("_id" << 1 << "host"
+ << "node1"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA2"))
+ << BSON("_id" << 2 << "host"
+ << "node2"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA3"))
+ << BSON("_id" << 3 << "host"
+ << "node3"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU1"))
+ << BSON("_id" << 4 << "host"
+ << "node4"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU2"))
+ << BSON("_id" << 5 << "host"
+ << "node5"
+ << "arbiterOnly" << true))
+ << "settings"
+ << BSON("getLastErrorModes" << BSON(
+ "valid" << BSON("dc" << 2 << "rack" << 3) << "invalidNotEnoughValues"
+ << BSON("dc" << 3) << "invalidNotEnoughNodes" << BSON("rack" << 6))))));
WriteConcernOptions validNumberWC;
validNumberWC.wNumNodes = 5;
@@ -1619,19 +1261,13 @@ TEST(ReplSetConfig, CheckConfigServerCantHaveArbiters) {
ReplSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr"
+ << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "arbiterOnly"
- << true)))));
+ << "arbiterOnly" << true)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "Arbiters are not allowed");
@@ -1641,21 +1277,14 @@ TEST(ReplSetConfig, CheckConfigServerMustBuildIndexes) {
ReplSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr"
+ << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority"
- << 0
- << "buildIndexes"
- << false)))));
+ << "priority" << 0
+ << "buildIndexes" << false)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "must build indexes");
@@ -1665,20 +1294,13 @@ TEST(ReplSetConfig, CheckConfigServerCantHaveSlaveDelay) {
ReplSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr"
+ << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority"
- << 0
- << "slaveDelay"
+ << "priority" << 0 << "slaveDelay"
<< 3)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
@@ -1691,19 +1313,13 @@ TEST(ReplSetConfig, CheckConfigServerMustHaveTrueForWriteConcernMajorityJournalD
ReplSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr"
+ << true << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"))
- << "writeConcernMajorityJournalDefault"
- << false)));
+ << "writeConcernMajorityJournalDefault" << false)));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), " must be true in replica set configurations being ");
@@ -1713,33 +1329,23 @@ TEST(ReplSetConfig, GetPriorityTakeoverDelay) {
ReplSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1)
+ << "priority" << 1)
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "localhost:5321"
- << "priority"
- << 3)
+ << "priority" << 3)
<< BSON("_id" << 3 << "host"
<< "localhost:5421"
- << "priority"
- << 4)
+ << "priority" << 4)
<< BSON("_id" << 4 << "host"
<< "localhost:5431"
- << "priority"
- << 5))
- << "settings"
- << BSON("electionTimeoutMillis" << 1000))));
+ << "priority" << 5))
+ << "settings" << BSON("electionTimeoutMillis" << 1000))));
ASSERT_OK(configA.validate());
ASSERT_EQUALS(Milliseconds(5000), configA.getPriorityTakeoverDelay(0));
ASSERT_EQUALS(Milliseconds(4000), configA.getPriorityTakeoverDelay(1));
@@ -1750,33 +1356,23 @@ TEST(ReplSetConfig, GetPriorityTakeoverDelay) {
ReplSetConfig configB;
ASSERT_OK(configB.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1)
+ << "priority" << 1)
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "localhost:5321"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 3 << "host"
<< "localhost:5421"
- << "priority"
- << 3)
+ << "priority" << 3)
<< BSON("_id" << 4 << "host"
<< "localhost:5431"
- << "priority"
- << 3))
- << "settings"
- << BSON("electionTimeoutMillis" << 1000))));
+ << "priority" << 3))
+ << "settings" << BSON("electionTimeoutMillis" << 1000))));
ASSERT_OK(configB.validate());
ASSERT_EQUALS(Milliseconds(5000), configB.getPriorityTakeoverDelay(0));
ASSERT_EQUALS(Milliseconds(3000), configB.getPriorityTakeoverDelay(1));
@@ -1789,29 +1385,20 @@ TEST(ReplSetConfig, GetCatchUpTakeoverDelay) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings"
- << BSON("catchUpTakeoverDelayMillis" << 5000))));
+ << "settings" << BSON("catchUpTakeoverDelayMillis" << 5000))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Milliseconds(5000), config.getCatchUpTakeoverDelay());
- Status status = config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"))
- << "settings"
- << BSON("catchUpTakeoverDelayMillis" << -5000)));
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings" << BSON("catchUpTakeoverDelayMillis" << -5000)));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(
status.reason(),
@@ -1822,23 +1409,16 @@ TEST(ReplSetConfig, GetCatchUpTakeoverDelayDefault) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1)
+ << "priority" << 1)
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "localhost:5321"
- << "priority"
- << 3)))));
+ << "priority" << 3)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Milliseconds(30000), config.getCatchUpTakeoverDelay());
}
@@ -1849,11 +1429,7 @@ TEST(ReplSetConfig, ConfirmDefaultValuesOfAndAbilityToSetWriteConcernMajorityJou
// PV1, should default to true.
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
@@ -1863,15 +1439,10 @@ TEST(ReplSetConfig, ConfirmDefaultValuesOfAndAbilityToSetWriteConcernMajorityJou
// Should be able to set it false in PV1.
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "writeConcernMajorityJournalDefault"
- << false)));
+ << "writeConcernMajorityJournalDefault" << false)));
ASSERT_OK(config.validate());
ASSERT_FALSE(config.getWriteConcernMajorityShouldJournal());
ASSERT_TRUE(config.toBSON().hasField("writeConcernMajorityJournalDefault"));
@@ -1881,11 +1452,7 @@ TEST(ReplSetConfig, HorizonConsistency) {
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
<< "horizons"
@@ -1914,8 +1481,7 @@ TEST(ReplSetConfig, HorizonConsistency) {
<< "delta"
<< "c.host3:44")))
- << "writeConcernMajorityJournalDefault"
- << false)));
+ << "writeConcernMajorityJournalDefault" << false)));
Status status = config.validate();
ASSERT_NOT_OK(status);
@@ -1929,11 +1495,7 @@ TEST(ReplSetConfig, HorizonConsistency) {
// in the member-config code path.
status = config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "same1"
<< "horizons"
@@ -1978,8 +1540,7 @@ TEST(ReplSetConfig, HorizonConsistency) {
<< "d.host3:44"
<< "delta"
<< "d.host4:44")))
- << "writeConcernMajorityJournalDefault"
- << false));
+ << "writeConcernMajorityJournalDefault" << false));
ASSERT_OK(status) << " failing status was: " << status.reason();
status = config.validate();
@@ -2003,15 +1564,11 @@ TEST(ReplSetConfig, ReplSetId) {
auto status =
ReplSetConfig().initializeForInitiate(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
+ << "version" << 1 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1))
+ << "priority" << 1))
<< "settings"
<< BSON("replicaSetId" << OID::gen())));
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, status);
@@ -2025,15 +1582,11 @@ TEST(ReplSetConfig, ReplSetId) {
ASSERT_OK(
configInitiate.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
+ << "version" << 1 << "protocolVersion" << 1
<< "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1)))));
+ << "priority" << 1)))));
ASSERT_OK(configInitiate.validate());
ASSERT_TRUE(configInitiate.hasReplicaSetId());
OID replicaSetId = configInitiate.getReplicaSetId();
@@ -2042,17 +1595,11 @@ TEST(ReplSetConfig, ReplSetId) {
ReplSetConfig configLocal;
ASSERT_OK(configLocal.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1))
- << "settings"
- << BSON("replicaSetId" << replicaSetId))));
+ << "priority" << 1))
+ << "settings" << BSON("replicaSetId" << replicaSetId))));
ASSERT_OK(configLocal.validate());
ASSERT_TRUE(configLocal.hasReplicaSetId());
ASSERT_EQUALS(replicaSetId, configLocal.getReplicaSetId());
@@ -2061,15 +1608,10 @@ TEST(ReplSetConfig, ReplSetId) {
OID defaultReplicaSetId = OID::gen();
ASSERT_OK(configLocal.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1))),
+ << "priority" << 1))),
defaultReplicaSetId));
ASSERT_OK(configLocal.validate());
ASSERT_TRUE(configLocal.hasReplicaSetId());
@@ -2078,34 +1620,22 @@ TEST(ReplSetConfig, ReplSetId) {
// 'replicaSetId' field cannot be null.
status = configLocal.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1))
- << "settings"
- << BSON("replicaSetId" << OID())));
+ << "priority" << 1))
+ << "settings" << BSON("replicaSetId" << OID())));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "replicaSetId field value cannot be null");
// 'replicaSetId' field must be an OID.
status = configLocal.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority"
- << 1))
- << "settings"
- << BSON("replicaSetId" << 12345)));
+ << "priority" << 1))
+ << "settings" << BSON("replicaSetId" << 12345)));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
ASSERT_STRING_CONTAINS(status.reason(),
"\"replicaSetId\" had the wrong type. Expected objectId, found int");
diff --git a/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp b/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp
index 3b79768db8d..a7cc785995e 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp
@@ -78,10 +78,9 @@ Status ReplSetHeartbeatArgsV1::initialize(const BSONObj& argsObj) {
if (status.isOK()) {
if (tempHeartbeatVersion != 1) {
return Status(ErrorCodes::Error(40666),
- str::stream() << "Found invalid value for field "
- << kHeartbeatVersionFieldName
- << ": "
- << tempHeartbeatVersion);
+ str::stream()
+ << "Found invalid value for field " << kHeartbeatVersionFieldName
+ << ": " << tempHeartbeatVersion);
}
_heartbeatVersion = tempHeartbeatVersion;
_hasHeartbeatVersion = true;
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response.cpp b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
index 4b16c88e389..5c43a35c71b 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
@@ -193,18 +193,18 @@ Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc,
if (memberStateElement.eoo()) {
_stateSet = false;
} else if (memberStateElement.type() != NumberInt && memberStateElement.type() != NumberLong) {
- return Status(
- ErrorCodes::TypeMismatch,
- str::stream() << "Expected \"" << kMemberStateFieldName
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream()
+ << "Expected \"" << kMemberStateFieldName
<< "\" field in response to replSetHeartbeat "
"command to have type NumberInt or NumberLong, but found type "
<< typeName(memberStateElement.type()));
} else {
long long stateInt = memberStateElement.numberLong();
if (stateInt < 0 || stateInt > MemberState::RS_MAX) {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Value for \"" << kMemberStateFieldName
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Value for \"" << kMemberStateFieldName
<< "\" in response to replSetHeartbeat is "
"out of range; legal values are non-negative and no more than "
<< MemberState::RS_MAX);
@@ -217,8 +217,7 @@ Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc,
if (configVersionElement.eoo()) {
return Status(ErrorCodes::NoSuchKey,
str::stream() << "Response to replSetHeartbeat missing required \""
- << kConfigVersionFieldName
- << "\" field");
+ << kConfigVersionFieldName << "\" field");
}
if (configVersionElement.type() != NumberInt) {
return Status(ErrorCodes::TypeMismatch,
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
index f3f0f1ce8bb..352456c929d 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
@@ -166,19 +166,16 @@ TEST(ReplSetHeartbeatResponse, InitializeNoDurableWallTime) {
TEST(ReplSetHeartbeatResponse, InitializeWrongAppliedOpTimeType) {
ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON(
- "ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << "hello");
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << "hello");
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS("\"opTime\" had the wrong type. Expected object, found string", result.reason());
initializerObj = BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
- << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
<< OpTime().getTimestamp());
result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
@@ -190,9 +187,7 @@ TEST(ReplSetHeartbeatResponse, InitializeNoAppliedWallTime) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj = BSON(
"ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON());
+ << Date_t() + Seconds(100) << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON());
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::NoSuchKey, result);
ASSERT_EQUALS("Missing expected field \"wallTime\"", result.reason());
@@ -202,12 +197,8 @@ TEST(ReplSetHeartbeatResponse, InitializeMemberStateWrongType) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj = BSON(
"ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "state"
+ << Date_t() + Seconds(100) << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "wallTime" << Date_t() + Seconds(100) << "state"
<< "hello");
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
@@ -221,13 +212,8 @@ TEST(ReplSetHeartbeatResponse, InitializeMemberStateTooLow) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj = BSON(
"ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "state"
- << -1);
+ << Date_t() + Seconds(100) << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "wallTime" << Date_t() + Seconds(100) << "state" << -1);
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::BadValue, result);
ASSERT_EQUALS(
@@ -240,13 +226,8 @@ TEST(ReplSetHeartbeatResponse, InitializeMemberStateTooHigh) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj = BSON(
"ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "state"
- << 11);
+ << Date_t() + Seconds(100) << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "wallTime" << Date_t() + Seconds(100) << "state" << 11);
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::BadValue, result);
ASSERT_EQUALS(
@@ -259,12 +240,8 @@ TEST(ReplSetHeartbeatResponse, InitializeVersionWrongType) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj = BSON(
"ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
+ << Date_t() + Seconds(100) << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "wallTime" << Date_t() + Seconds(100) << "v"
<< "hello");
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
@@ -276,17 +253,12 @@ TEST(ReplSetHeartbeatResponse, InitializeVersionWrongType) {
TEST(ReplSetHeartbeatResponse, InitializeReplSetNameWrongType) {
ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON(
- "ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
- << 2 // needs a version to get this far in initialize()
- << "set"
- << 4);
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << OpTime(Timestamp(100, 0), 0).toBSON() << "wallTime" << Date_t() + Seconds(100)
+ << "v" << 2 // needs a version to get this far in initialize()
+ << "set" << 4);
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS(
@@ -297,17 +269,12 @@ TEST(ReplSetHeartbeatResponse, InitializeReplSetNameWrongType) {
TEST(ReplSetHeartbeatResponse, InitializeSyncingToWrongType) {
ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON(
- "ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
- << 2 // needs a version to get this far in initialize()
- << "syncingTo"
- << 4);
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << OpTime(Timestamp(100, 0), 0).toBSON() << "wallTime" << Date_t() + Seconds(100)
+ << "v" << 2 // needs a version to get this far in initialize()
+ << "syncingTo" << 4);
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS(
@@ -318,17 +285,12 @@ TEST(ReplSetHeartbeatResponse, InitializeSyncingToWrongType) {
TEST(ReplSetHeartbeatResponse, InitializeConfigWrongType) {
ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON(
- "ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
- << 2 // needs a version to get this far in initialize()
- << "config"
- << 4);
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << OpTime(Timestamp(100, 0), 0).toBSON() << "wallTime" << Date_t() + Seconds(100)
+ << "v" << 2 // needs a version to get this far in initialize()
+ << "config" << 4);
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS(
@@ -339,17 +301,12 @@ TEST(ReplSetHeartbeatResponse, InitializeConfigWrongType) {
TEST(ReplSetHeartbeatResponse, InitializeBadConfig) {
ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON(
- "ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON() << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
- << 2 // needs a version to get this far in initialize()
- << "config"
- << BSON("illegalFieldName" << 2));
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << OpTime(Timestamp(100, 0), 0).toBSON() << "wallTime" << Date_t() + Seconds(100)
+ << "v" << 2 // needs a version to get this far in initialize()
+ << "config" << BSON("illegalFieldName" << 2));
Status result = hbResponse.initialize(initializerObj, 0, /*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::BadValue, result);
ASSERT_EQUALS("Unexpected field illegalFieldName in replica set configuration",
@@ -371,12 +328,9 @@ TEST(ReplSetHeartbeatResponse, InvalidResponseOpTimeMissesConfigVersion) {
ReplSetHeartbeatResponse hbResp;
Status result = hbResp.initialize(BSON("ok" << 1.0 << "durableOpTime"
<< OpTime(Timestamp(100, 0), 0).toBSON()
- << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)),
+ << "durableWallTime" << Date_t() + Seconds(100)
+ << "opTime" << OpTime(Timestamp(100, 0), 0).toBSON()
+ << "wallTime" << Date_t() + Seconds(100)),
0,
/*requireWallTime*/ true);
ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.code());
diff --git a/src/mongo/db/repl/replication_consistency_markers_impl.cpp b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
index f2127b70518..a57b7e35ceb 100644
--- a/src/mongo/db/repl/replication_consistency_markers_impl.cpp
+++ b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
@@ -100,8 +100,7 @@ void ReplicationConsistencyMarkersImpl::initializeMinValidDocument(OperationCont
// will always be greater than the provided ones.
TimestampedBSONObj upsert;
upsert.obj = BSON("$max" << BSON(MinValidDocument::kMinValidTimestampFieldName
- << Timestamp()
- << MinValidDocument::kMinValidTermFieldName
+ << Timestamp() << MinValidDocument::kMinValidTermFieldName
<< OpTime::kUninitializedTerm));
// The initialization write should go into the first checkpoint taken, so we provide no
@@ -153,10 +152,8 @@ void ReplicationConsistencyMarkersImpl::clearInitialSyncFlag(OperationContext* o
update.obj = BSON("$unset" << kInitialSyncFlag << "$set"
<< BSON(MinValidDocument::kMinValidTimestampFieldName
<< time.getTimestamp()
- << MinValidDocument::kMinValidTermFieldName
- << time.getTerm()
- << MinValidDocument::kAppliedThroughFieldName
- << time));
+ << MinValidDocument::kMinValidTermFieldName << time.getTerm()
+ << MinValidDocument::kAppliedThroughFieldName << time));
// We clear the initial sync flag at the 'lastAppliedOpTime'. This is unnecessary, since there
// should not be any stable checkpoints being taken that this write could inadvertantly enter.
@@ -194,10 +191,10 @@ void ReplicationConsistencyMarkersImpl::setMinValid(OperationContext* opCtx,
LOG(3) << "setting minvalid to exactly: " << minValid.toString() << "(" << minValid.toBSON()
<< ")";
TimestampedBSONObj update;
- update.obj = BSON("$set" << BSON(MinValidDocument::kMinValidTimestampFieldName
- << minValid.getTimestamp()
- << MinValidDocument::kMinValidTermFieldName
- << minValid.getTerm()));
+ update.obj =
+ BSON("$set" << BSON(MinValidDocument::kMinValidTimestampFieldName
+ << minValid.getTimestamp() << MinValidDocument::kMinValidTermFieldName
+ << minValid.getTerm()));
// This method is only used with storage engines that do not support recover to stable
// timestamp. As a result, their timestamps do not matter.
@@ -346,8 +343,8 @@ Status ReplicationConsistencyMarkersImpl::createInternalCollections(OperationCon
auto status = _storageInterface->createCollection(opCtx, nss, CollectionOptions());
if (!status.isOK() && status.code() != ErrorCodes::NamespaceExists) {
return {ErrorCodes::CannotCreateCollection,
- str::stream() << "Failed to create collection. Ns: " << nss.ns() << " Error: "
- << status.toString()};
+ str::stream() << "Failed to create collection. Ns: " << nss.ns()
+ << " Error: " << status.toString()};
}
}
diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h
index b9eee2a78a9..8b767924eac 100644
--- a/src/mongo/db/repl/replication_coordinator.h
+++ b/src/mongo/db/repl/replication_coordinator.h
@@ -774,12 +774,12 @@ public:
virtual std::vector<MemberData> getMemberData() const = 0;
/*
- * Handles an incoming replSetRequestVotes command.
- *
- * Populates the given 'response' object with the result of the request. If there is a failure
- * processing the vote request, returns an error status. If an error is returned, the value of
- * the populated 'response' object is invalid.
- */
+ * Handles an incoming replSetRequestVotes command.
+ *
+ * Populates the given 'response' object with the result of the request. If there is a failure
+ * processing the vote request, returns an error status. If an error is returned, the value of
+ * the populated 'response' object is invalid.
+ */
virtual Status processReplSetRequestVotes(OperationContext* opCtx,
const ReplSetRequestVotesArgs& args,
ReplSetRequestVotesResponse* response) = 0;
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 0df40b457c2..1052ccec515 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -171,13 +171,13 @@ auto makeTaskExecutor(ServiceContext* service, const std::string& poolName) {
* down.
*/
void scheduleWork(executor::TaskExecutor* executor, executor::TaskExecutor::CallbackFn work) {
- auto cbh = executor->scheduleWork([work = std::move(work)](
- const executor::TaskExecutor::CallbackArgs& args) {
- if (args.status == ErrorCodes::CallbackCanceled) {
- return;
- }
- work(args);
- });
+ auto cbh = executor->scheduleWork(
+ [work = std::move(work)](const executor::TaskExecutor::CallbackArgs& args) {
+ if (args.status == ErrorCodes::CallbackCanceled) {
+ return;
+ }
+ work(args);
+ });
if (cbh == ErrorCodes::ShutdownInProgress) {
return;
}
@@ -552,9 +552,7 @@ Status ReplicationCoordinatorExternalStateImpl::createLocalLastVoteCollection(
if (!status.isOK() && status.code() != ErrorCodes::NamespaceExists) {
return {ErrorCodes::CannotCreateCollection,
str::stream() << "Failed to create local last vote collection. Ns: "
- << lastVoteCollectionName
- << " Error: "
- << status.toString()};
+ << lastVoteCollectionName << " Error: " << status.toString()};
}
// Make sure there's always a last vote document.
@@ -682,9 +680,7 @@ StatusWith<OpTimeAndWallTime> ReplicationCoordinatorExternalStateImpl::loadLastO
return StatusWith<OpTimeAndWallTime>(
ErrorCodes::NoSuchKey,
str::stream() << "Most recent entry in " << NamespaceString::kRsOplogNamespace.ns()
- << " missing \""
- << tsFieldName
- << "\" field");
+ << " missing \"" << tsFieldName << "\" field");
}
if (tsElement.type() != bsonTimestamp) {
return StatusWith<OpTimeAndWallTime>(
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
index af36200521b..41abb8bc4e6 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
@@ -134,11 +134,11 @@ private:
void _shardingOnTransitionToPrimaryHook(OperationContext* opCtx);
/**
- * Drops all temporary collections on all databases except "local".
- *
- * The implementation may assume that the caller has acquired the global exclusive lock
- * for "opCtx".
- */
+ * Drops all temporary collections on all databases except "local".
+ *
+ * The implementation may assume that the caller has acquired the global exclusive lock
+ * for "opCtx".
+ */
void _dropAllTempCollections(OperationContext* opCtx);
ServiceContext* _service;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index db61adde61d..2509aa1d29e 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -927,10 +927,9 @@ Status ReplicationCoordinatorImpl::waitForMemberState(MemberState expectedState,
auto pred = [this, expectedState]() { return _memberState == expectedState; };
if (!_memberStateChange.wait_for(lk, timeout.toSystemDuration(), pred)) {
return Status(ErrorCodes::ExceededTimeLimit,
- str::stream() << "Timed out waiting for state to become "
- << expectedState.toString()
- << ". Current state is "
- << _memberState.toString());
+ str::stream()
+ << "Timed out waiting for state to become " << expectedState.toString()
+ << ". Current state is " << _memberState.toString());
}
return Status::OK();
}
@@ -1642,8 +1641,9 @@ bool ReplicationCoordinatorImpl::_doneWaitingForReplication_inlock(
"'committed' optime "
<< opTime
<< ". There are still drop pending collections (earliest drop optime: "
- << *dropOpTime << ") that have to be removed from storage before we can "
- "satisfy the write concern "
+ << *dropOpTime
+ << ") that have to be removed from storage before we can "
+ "satisfy the write concern "
<< writeConcern.toBSON();
return false;
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 8bb0ca7488c..73fa6dbae41 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -546,7 +546,7 @@ private:
* Loops continuously to kill all conflicting operations. And, aborts all stashed (inactive)
* transactions.
* Terminates once killSignaled is set true.
- */
+ */
void _killOpThreadFn();
/*
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index b0d3d8c3556..0402d4a2aed 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -59,11 +59,7 @@ using ApplierState = ReplicationCoordinator::ApplierState;
TEST_F(ReplCoordTest, RandomizedElectionOffsetWithinProperBounds) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -90,18 +86,14 @@ TEST_F(ReplCoordTest, RandomizedElectionOffsetWithinProperBounds) {
TEST_F(ReplCoordTest, RandomizedElectionOffsetAvoidsDivideByZero) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 1));
assertStartSuccess(configObj, HostAndPort("node1", 12345));
@@ -112,24 +104,17 @@ TEST_F(ReplCoordTest, RandomizedElectionOffsetAvoidsDivideByZero) {
}
TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) {
- assertStartSuccess(BSON("_id"
- << "mySet"
- << "version"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"
- << "votes"
- << 0
- << "hidden"
- << true
- << "priority"
- << 0))
- << "protocolVersion"
- << 1),
- HostAndPort("node1", 12345));
+ assertStartSuccess(
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"
+ << "votes" << 0 << "hidden" << true << "priority" << 0))
+ << "protocolVersion" << 1),
+ HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -184,15 +169,12 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) {
TEST_F(ReplCoordTest, StartElectionDoesNotStartAnElectionWhenNodeIsRecovering) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_RECOVERING));
@@ -212,13 +194,10 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(10, 1), 0), Date_t() + Seconds(10));
@@ -246,17 +225,14 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
TEST_F(ReplCoordTest, ElectionSucceedsWhenAllNodesVoteYea) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
OperationContextNoop opCtx;
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -279,9 +255,7 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenAllNodesVoteYea) {
TEST_F(ReplCoordTest, ElectionSucceedsWhenMaxSevenNodesVoteYea) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -296,8 +270,7 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenMaxSevenNodesVoteYea) {
<< "node6:12345")
<< BSON("_id" << 7 << "host"
<< "node7:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
OperationContextNoop opCtx;
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -321,17 +294,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringDryRun)
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -363,9 +333,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringDryRun)
} else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
net->scheduleResponse(noi,
net->now(),
- makeResponseStatus(BSON(
- "ok" << 1 << "term" << 0 << "voteGranted" << false << "reason"
- << "don't like him much")));
+ makeResponseStatus(BSON("ok" << 1 << "term" << 0 << "voteGranted"
+ << false << "reason"
+ << "don't like him much")));
voteRequests++;
} else {
net->blackHole(noi);
@@ -382,17 +352,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenDryRunResponseContainsANewerTerm) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -426,9 +393,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenDryRunResponseContainsANewerTerm) {
noi,
net->now(),
makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long() + 1
- << "voteGranted"
- << false
- << "reason"
+ << "voteGranted" << false << "reason"
<< "quit living in the past")));
voteRequests++;
} else {
@@ -449,9 +414,7 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
OperationContextNoop opCtx;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -462,8 +425,7 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
<< "node4:12345")
<< BSON("_id" << 5 << "host"
<< "node5:12345"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -481,15 +443,12 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
config
.initialize(BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "members"
+ << "version" << 3 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "protocolVersion"
- << 1))
+ << "protocolVersion" << 1))
.transitional_ignore();
hbResp2.setConfig(config);
hbResp2.setConfigVersion(3);
@@ -576,17 +535,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -610,9 +566,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
} else {
net->scheduleResponse(noi,
net->now(),
- makeResponseStatus(BSON(
- "ok" << 1 << "term" << 1 << "voteGranted" << false << "reason"
- << "don't like him much")));
+ makeResponseStatus(BSON("ok" << 1 << "term" << 1 << "voteGranted"
+ << false << "reason"
+ << "don't like him much")));
}
net->runReadyNetworkOperations();
}
@@ -627,17 +583,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
TEST_F(ReplCoordTest, TransitionToRollbackFailsWhenElectionInProgress) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -666,17 +619,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenVoteRequestResponseContainsANewerTerm) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -702,9 +652,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenVoteRequestResponseContainsANewerTerm) {
noi,
net->now(),
makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long() + 1
- << "voteGranted"
- << false
- << "reason"
+ << "voteGranted" << false << "reason"
<< "quit living in the past")));
}
net->runReadyNetworkOperations();
@@ -721,17 +669,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringDryRun) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -762,17 +707,14 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -799,10 +741,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) {
net->scheduleResponse(
noi,
net->now(),
- makeResponseStatus(BSON(
- "ok" << 1 << "term" << request.cmdObj["term"].Long() << "voteGranted" << true
- << "reason"
- << "")));
+ makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long()
+ << "voteGranted" << true << "reason"
+ << "")));
}
net->runReadyNetworkOperations();
}
@@ -965,18 +906,14 @@ private:
TEST_F(TakeoverTest, DoesntScheduleCatchupTakeoverIfCatchupDisabledButTakeoverDelaySet) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("catchUpTimeoutMillis" << 0 << "catchUpTakeoverDelay"
<< 10000));
assertStartSuccess(configObj, HostAndPort("node1", 12345));
@@ -1007,17 +944,14 @@ TEST_F(TakeoverTest, DoesntScheduleCatchupTakeoverIfCatchupDisabledButTakeoverDe
TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfNodeIsFresherThanCurrentPrimary) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1054,21 +988,16 @@ TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfNodeIsFresherThanCurrentPrimary)
TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfBothTakeoversAnOption) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1);
+ << "priority" << 3))
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1106,19 +1035,15 @@ TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfBothTakeoversAnOption) {
TEST_F(TakeoverTest, PrefersPriorityToCatchupTakeoverIfNodeHasHighestPriority) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(2));
startCapturingLogMessages();
@@ -1162,17 +1087,14 @@ TEST_F(TakeoverTest, PrefersPriorityToCatchupTakeoverIfNodeHasHighestPriority) {
TEST_F(TakeoverTest, CatchupTakeoverNotScheduledTwice) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1219,21 +1141,16 @@ TEST_F(TakeoverTest, CatchupTakeoverNotScheduledTwice) {
TEST_F(TakeoverTest, CatchupAndPriorityTakeoverNotScheduledAtSameTime) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1);
+ << "priority" << 3))
+ << "protocolVersion" << 1);
// In order for node 1 to first schedule a catchup takeover, then a priority takeover
// once the first gets canceled, it must have a higher priority than the current primary
// (node 2). But, it must not have the highest priority in the replica set. Otherwise,
@@ -1285,17 +1202,14 @@ TEST_F(TakeoverTest, CatchupAndPriorityTakeoverNotScheduledAtSameTime) {
TEST_F(TakeoverTest, CatchupTakeoverCallbackCanceledIfElectionTimeoutRuns) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1359,17 +1273,14 @@ TEST_F(TakeoverTest, CatchupTakeoverCallbackCanceledIfElectionTimeoutRuns) {
TEST_F(TakeoverTest, CatchupTakeoverCanceledIfTransitionToRollback) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1422,17 +1333,14 @@ TEST_F(TakeoverTest, CatchupTakeoverCanceledIfTransitionToRollback) {
TEST_F(TakeoverTest, SuccessfulCatchupTakeover) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
HostAndPort primaryHostAndPort("node2", 12345);
@@ -1489,9 +1397,7 @@ TEST_F(TakeoverTest, CatchupTakeoverDryRunFailsPrimarySaysNo) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -1502,8 +1408,7 @@ TEST_F(TakeoverTest, CatchupTakeoverDryRunFailsPrimarySaysNo) {
<< "node4:12345")
<< BSON("_id" << 5 << "host"
<< "node5:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
HostAndPort primaryHostAndPort("node2", 12345);
@@ -1565,12 +1470,11 @@ TEST_F(TakeoverTest, CatchupTakeoverDryRunFailsPrimarySaysNo) {
net->blackHole(noi);
} else {
bool voteGranted = request.target != primaryHostAndPort;
- net->scheduleResponse(
- noi,
- until,
- makeResponseStatus(BSON("ok" << 1 << "term" << 1 << "voteGranted" << voteGranted
- << "reason"
- << "")));
+ net->scheduleResponse(noi,
+ until,
+ makeResponseStatus(BSON("ok" << 1 << "term" << 1 << "voteGranted"
+ << voteGranted << "reason"
+ << "")));
voteRequests++;
}
net->runReadyNetworkOperations();
@@ -1598,17 +1502,14 @@ TEST_F(TakeoverTest, CatchupTakeoverDryRunFailsPrimarySaysNo) {
TEST_F(TakeoverTest, PrimaryCatchesUpBeforeCatchupTakeover) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1659,21 +1560,16 @@ TEST_F(TakeoverTest, PrimaryCatchesUpBeforeCatchupTakeover) {
TEST_F(TakeoverTest, PrimaryCatchesUpBeforeHighPriorityNodeCatchupTakeover) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1);
+ << "priority" << 3))
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1743,19 +1639,15 @@ TEST_F(TakeoverTest, PrimaryCatchesUpBeforeHighPriorityNodeCatchupTakeover) {
TEST_F(TakeoverTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityThanCurrentPrimary) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1790,19 +1682,15 @@ TEST_F(TakeoverTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityThanCurrent
TEST_F(TakeoverTest, SuccessfulPriorityTakeover) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
@@ -1845,19 +1733,15 @@ TEST_F(TakeoverTest, SuccessfulPriorityTakeover) {
TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedSameSecond) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
HostAndPort primaryHostAndPort("node2", 12345);
@@ -1924,19 +1808,15 @@ TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedSameSecond) {
TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedDifferentSecond) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
HostAndPort primaryHostAndPort("node2", 12345);
@@ -2004,19 +1884,14 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringDryRun) {
// Start up and become electable.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "settings"
- << BSON("heartbeatIntervalMillis" << 100)),
+ << "settings" << BSON("heartbeatIntervalMillis" << 100)),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -2044,11 +1919,7 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringDryRun) {
ReplicationCoordinatorImpl::ReplSetReconfigArgs config = {
BSON("_id"
<< "mySet"
- << "version"
- << 4
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 4 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -2069,19 +1940,14 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringVotePhase)
// Start up and become electable.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "settings"
- << BSON("heartbeatIntervalMillis" << 100)),
+ << "settings" << BSON("heartbeatIntervalMillis" << 100)),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -2094,11 +1960,7 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringVotePhase)
ReplicationCoordinatorImpl::ReplSetReconfigArgs config = {
BSON("_id"
<< "mySet"
- << "version"
- << 4
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 4 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -2160,14 +2022,13 @@ protected:
net->getNextReadyRequest(), net->now(), makeHeartbeatResponse(opTime));
} else if (request.cmdObj.firstElement().fieldNameStringData() ==
"replSetRequestVotes") {
- net->scheduleResponse(net->getNextReadyRequest(),
- net->now(),
- makeResponseStatus(BSON("ok" << 1 << "reason"
- << ""
- << "term"
- << request.cmdObj["term"].Long()
- << "voteGranted"
- << true)));
+ net->scheduleResponse(
+ net->getNextReadyRequest(),
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "reason"
+ << ""
+ << "term" << request.cmdObj["term"].Long()
+ << "voteGranted" << true)));
} else {
// Stop the loop and let the caller handle unexpected requests.
net->exitNetwork();
@@ -2181,18 +2042,14 @@ protected:
ReplSetConfig setUp3NodeReplSetAndRunForElection(OpTime opTime, long long timeout = 5000) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
<< "node3:12345"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 1 << "catchUpTimeoutMillis"
<< timeout));
assertStartSuccess(configObj, HostAndPort("node1", 12345));
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 62834fe3d0c..5af7e96e979 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -174,11 +174,11 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
if (replMetadata.isOK() && _rsConfig.isInitialized() && _rsConfig.hasReplicaSetId() &&
replMetadata.getValue().getReplicaSetId().isSet() &&
_rsConfig.getReplicaSetId() != replMetadata.getValue().getReplicaSetId()) {
- responseStatus = Status(ErrorCodes::InvalidReplicaSetConfig,
- str::stream() << "replica set IDs do not match, ours: "
- << _rsConfig.getReplicaSetId()
- << "; remote node's: "
- << replMetadata.getValue().getReplicaSetId());
+ responseStatus =
+ Status(ErrorCodes::InvalidReplicaSetConfig,
+ str::stream()
+ << "replica set IDs do not match, ours: " << _rsConfig.getReplicaSetId()
+ << "; remote node's: " << replMetadata.getValue().getReplicaSetId());
// Ignore metadata.
replMetadata = responseStatus;
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
index 53eedf88523..ec9fb647668 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
@@ -88,17 +88,14 @@ TEST_F(ReplCoordHBV1Test,
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
ReplSetConfig rsConfig = assertMakeRSConfig(BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "members"
+ << "version" << 3 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
<< "h2:1")
<< BSON("_id" << 3 << "host"
<< "h3:1"))
- << "protocolVersion"
- << 1));
+ << "protocolVersion" << 1));
init("mySet");
addSelf(HostAndPort("h2", 1));
const Date_t startDate = getNet()->now();
@@ -158,21 +155,18 @@ TEST_F(ReplCoordHBV1Test,
TEST_F(ReplCoordHBV1Test,
ArbiterJoinsExistingReplSetWhenReceivingAConfigContainingTheArbiterViaHeartbeat) {
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- ReplSetConfig rsConfig = assertMakeRSConfig(BSON("_id"
- << "mySet"
- << "version"
- << 3
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1"
- << "arbiterOnly"
- << true)
- << BSON("_id" << 3 << "host"
- << "h3:1"))
- << "protocolVersion"
- << 1));
+ ReplSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 3 << "host"
+ << "h3:1"))
+ << "protocolVersion" << 1));
init("mySet");
addSelf(HostAndPort("h2", 1));
const Date_t startDate = getNet()->now();
@@ -236,17 +230,14 @@ TEST_F(ReplCoordHBV1Test,
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
ReplSetConfig rsConfig = assertMakeRSConfig(BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "members"
+ << "version" << 3 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
<< "h2:1")
<< BSON("_id" << 3 << "host"
<< "h3:1"))
- << "protocolVersion"
- << 1));
+ << "protocolVersion" << 1));
init("mySet");
addSelf(HostAndPort("h4", 1));
const Date_t startDate = getNet()->now();
@@ -321,9 +312,7 @@ TEST_F(ReplCoordHBV1Test,
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -336,12 +325,12 @@ TEST_F(ReplCoordHBV1Test,
const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
log() << request.target.toString() << " processing " << request.cmdObj;
- getNet()->scheduleResponse(noi,
- getNet()->now(),
- makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
- << "unauth'd"
- << "code"
- << ErrorCodes::Unauthorized)));
+ getNet()->scheduleResponse(
+ noi,
+ getNet()->now(),
+ makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
+ << "unauth'd"
+ << "code" << ErrorCodes::Unauthorized)));
if (request.target != HostAndPort("node2", 12345) &&
request.cmdObj.firstElement().fieldNameStringData() != "replSetHeartbeat") {
@@ -362,15 +351,11 @@ TEST_F(ReplCoordHBV1Test, IgnoreTheContentsOfMetadataWhenItsReplicaSetIdDoesNotM
HostAndPort host2("node2:12345");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host" << host2.toString()))
- << "settings"
- << BSON("replicaSetId" << OID::gen())
- << "protocolVersion"
+ << "settings" << BSON("replicaSetId" << OID::gen()) << "protocolVersion"
<< 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -442,10 +427,9 @@ TEST_F(ReplCoordHBV1Test, IgnoreTheContentsOfMetadataWhenItsReplicaSetIdDoesNotM
ASSERT_EQ(MemberState(MemberState::RS_DOWN).toString(),
MemberState(member["state"].numberInt()).toString());
ASSERT_EQ(member["lastHeartbeatMessage"].String(),
- std::string(str::stream() << "replica set IDs do not match, ours: "
- << rsConfig.getReplicaSetId()
- << "; remote node's: "
- << unexpectedId));
+ std::string(str::stream()
+ << "replica set IDs do not match, ours: " << rsConfig.getReplicaSetId()
+ << "; remote node's: " << unexpectedId));
}
} // namespace
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 3fad34dfe2a..738ff86ef87 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -73,9 +73,7 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenReconfigReceivedWhileSecondary) {
init();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -99,9 +97,7 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
// start up, become primary, receive uninitializable config
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -117,21 +113,14 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "invalidlyNamedField"
- << 3
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "invalidlyNamedField"
+ << 3 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "node2:12345"
- << "arbiterOnly"
- << true)));
+ << "arbiterOnly" << true)));
const auto opCtx = makeOperationContext();
// ErrorCodes::BadValue should be propagated from ReplSetConfig::initialize()
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
@@ -143,9 +132,7 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
// start up, become primary, receive config with incorrect replset name
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -161,11 +148,7 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
args.force = false;
args.newConfigObj = BSON("_id"
<< "notMySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -181,15 +164,12 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
// start up, become primary, receive config with incorrect replset name
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "settings"
- << BSON("replicaSetId" << OID::gen())),
+ << "settings" << BSON("replicaSetId" << OID::gen())),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -201,17 +181,12 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "settings"
- << BSON("replicaSetId" << OID::gen()));
+ << "settings" << BSON("replicaSetId" << OID::gen()));
const auto opCtx = makeOperationContext();
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
@@ -224,9 +199,7 @@ TEST_F(ReplCoordTest,
// start up, become primary, validate fails
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -242,11 +215,7 @@ TEST_F(ReplCoordTest,
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << -3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << -3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -266,9 +235,7 @@ void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord,
replCoord->processReplSetInitiate(opCtx,
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -285,17 +252,12 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord,
// Replica set id will be copied from existing configuration.
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"
- << "priority"
- << 3)));
+ << "priority" << 3)));
*status = replCoord->processReplSetReconfig(opCtx, args, &garbage);
}
@@ -305,9 +267,7 @@ TEST_F(ReplCoordTest,
// containing a higher config version
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -348,9 +308,7 @@ TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenSavingANewConfigFailsDuringRe
// start up, become primary, saving the config fails
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -377,9 +335,7 @@ TEST_F(ReplCoordTest,
// start up, become primary, reconfig, then before that reconfig concludes, reconfig again
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -404,11 +360,7 @@ TEST_F(ReplCoordTest,
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -443,11 +395,7 @@ TEST_F(ReplCoordTest, NodeReturnsConfigurationInProgressWhenReceivingAReconfigWh
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -464,15 +412,12 @@ TEST_F(ReplCoordTest, PrimaryNodeAcceptsNewConfigWhenReceivingAReconfigWithAComp
// start up, become primary, reconfig successfully
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "settings"
- << BSON("replicaSetId" << OID::gen())),
+ << "settings" << BSON("replicaSetId" << OID::gen())),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -512,9 +457,7 @@ TEST_F(
// from reconfig
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -538,11 +481,7 @@ TEST_F(
config
.initialize(BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -581,9 +520,7 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
// start up, become primary, reconfig, while reconfigging receive reconfig via heartbeat
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -614,9 +551,7 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
config
.initialize(BSON("_id"
<< "mySet"
- << "version"
- << 4
- << "members"
+ << "version" << 4 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -652,9 +587,7 @@ TEST_F(ReplCoordTest, NodeAcceptsConfigFromAReconfigWithForceTrueWhileNotPrimary
init();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -670,11 +603,7 @@ TEST_F(ReplCoordTest, NodeAcceptsConfigFromAReconfigWithForceTrueWhileNotPrimary
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index a7ff18688e8..ee40d510288 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -120,15 +120,12 @@ void killOperation(OperationContext* opCtx) {
TEST_F(ReplCoordTest, IsMasterIsFalseDuringStepdown) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"))
- << "protocolVersion"
- << 1);
+ << "protocolVersion" << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
auto replCoord = getReplCoord();
@@ -162,9 +159,7 @@ TEST_F(ReplCoordTest, IsMasterIsFalseDuringStepdown) {
TEST_F(ReplCoordTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))),
HostAndPort("node1", 12345));
@@ -175,13 +170,10 @@ TEST_F(ReplCoordTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig)
TEST_F(ReplCoordTest, NodeEntersArbiterStateWhenStartingUpWithValidLocalConfigWhereItIsAnArbiter) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "node2:12345"))),
HostAndPort("node1", 12345));
@@ -193,9 +185,7 @@ TEST_F(ReplCoordTest, NodeEntersRemovedStateWhenStartingUpWithALocalConfigWhichL
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -212,9 +202,7 @@ TEST_F(ReplCoordTest,
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "notMySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))),
HostAndPort("node1", 12345));
@@ -255,9 +243,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -271,9 +257,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result2));
@@ -296,9 +280,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"
<< "arbiterOnly"
@@ -327,9 +309,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -347,9 +327,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node4"))),
&result));
@@ -363,9 +341,7 @@ void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord, Status* status) {
replCoord->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345")
<< BSON("_id" << 1 << "host"
@@ -460,9 +436,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "wrongSet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -491,8 +465,9 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenInitiatingWithoutAn_
BSONObjBuilder result1;
auto status = getReplCoord()->processReplSetInitiate(
opCtx.get(),
- BSON("version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "node1:12345"))),
+ BSON("version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node1:12345"))),
&result1);
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, status);
ASSERT_STRING_CONTAINS(status.reason(), "Missing expected field \"_id\"");
@@ -511,9 +486,7 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1);
@@ -534,9 +507,7 @@ TEST_F(ReplCoordTest, InitiateFailsWithoutReplSetFlag) {
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -557,9 +528,7 @@ TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenInitiateCannotWriteConfigToDi
getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -611,13 +580,10 @@ TEST_F(
TEST_F(ReplCoordTest, NodeReturnsOkWhenCheckReplEnabledForCommandAfterReceivingAConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
// check status OK and result is empty
@@ -647,21 +613,16 @@ TEST_F(ReplCoordTest, NodeReturnsImmediatelyWhenAwaitReplicationIsRanAgainstASta
TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningAwaitReplicationAgainstASecondaryNode) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
@@ -682,21 +643,16 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningAwaitReplicationAgainstASec
TEST_F(ReplCoordTest, NodeReturnsOkWhenRunningAwaitReplicationAgainstPrimaryWithWTermOne) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
OpTimeWithTermOne time(100, 1);
@@ -724,25 +680,19 @@ TEST_F(ReplCoordTest,
NodeReturnsWriteConcernFailedUntilASufficientNumberOfNodesHaveTheWriteDurable) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3))),
+ << "_id" << 3))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -804,25 +754,19 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest, NodeReturnsWriteConcernFailedUntilASufficientNumberOfNodesHaveTheWrite) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3))),
+ << "_id" << 3))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -881,9 +825,7 @@ TEST_F(ReplCoordTest,
NodeReturnsUnknownReplWriteConcernWhenAwaitReplicationReceivesAnInvalidWriteConcernMode) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node0")
<< BSON("_id" << 1 << "host"
@@ -920,9 +862,7 @@ TEST_F(
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node0"
<< "tags"
@@ -1102,21 +1042,16 @@ private:
TEST_F(ReplCoordTest, NodeReturnsOkWhenAWriteConcernWithNoTimeoutHasBeenSatisfied) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -1166,21 +1101,16 @@ TEST_F(ReplCoordTest, NodeReturnsOkWhenAWriteConcernWithNoTimeoutHasBeenSatisfie
TEST_F(ReplCoordTest, NodeReturnsWriteConcernFailedWhenAWriteConcernTimesOutBeforeBeingSatisified) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -1217,21 +1147,16 @@ TEST_F(ReplCoordTest,
NodeReturnsShutDownInProgressWhenANodeShutsDownPriorToSatisfyingAWriteConcern) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -1267,21 +1192,16 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenSteppingDownBeforeSatisfyingAWrite
// if the node steps down while it is waiting.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -1315,9 +1235,7 @@ TEST_F(ReplCoordTest,
// Tests that a thread blocked in awaitReplication can be killed by a killOp operation
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1")
<< BSON("_id" << 1 << "host"
@@ -1435,9 +1353,7 @@ private:
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -1462,9 +1378,7 @@ TEST_F(ReplCoordTest, UpdatePositionArgsAdvancesWallTimes) {
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -1498,33 +1412,27 @@ TEST_F(ReplCoordTest, UpdatePositionArgsAdvancesWallTimes) {
ASSERT_OK(updatePositionArgsInitialize(
updatePositionArgs,
- BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << opTime2.asOpTime().toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << memberOneAppliedWallTime
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << opTime2.asOpTime().toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << memberOneDurableWallTime)
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << opTime2.asOpTime().toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << memberTwoAppliedWallTime
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << opTime2.asOpTime().toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << memberTwoDurableWallTime)))));
+ BSON(
+ UpdatePositionArgs::kCommandFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(
+ BSON(
+ UpdatePositionArgs::kConfigVersionFieldName
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << opTime2.asOpTime().toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName << memberOneAppliedWallTime
+ << UpdatePositionArgs::kDurableOpTimeFieldName << opTime2.asOpTime().toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName << memberOneDurableWallTime)
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 2
+ << UpdatePositionArgs::kAppliedOpTimeFieldName
+ << opTime2.asOpTime().toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << memberTwoAppliedWallTime
+ << UpdatePositionArgs::kDurableOpTimeFieldName
+ << opTime2.asOpTime().toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << memberTwoDurableWallTime)))));
ASSERT_OK(repl->processReplSetUpdatePosition(updatePositionArgs, &configVersion));
@@ -1546,17 +1454,14 @@ TEST_F(ReplCoordTest, ElectionIdTracksTermInPV1) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("test1", 1234));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -1610,17 +1515,14 @@ TEST_F(ReplCoordTest, NodeChangesTermAndStepsDownWhenAndOnlyWhenUpdateTermSuppli
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("test1", 1234));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -1656,17 +1558,14 @@ TEST_F(ReplCoordTest, ConcurrentStepDownShouldNotSignalTheSameFinishEventMoreTha
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("test1", 1234));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -1712,17 +1611,14 @@ TEST_F(ReplCoordTest, DrainCompletionMidStepDown) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
HostAndPort("test1", 1234));
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
@@ -1784,12 +1680,9 @@ TEST_F(StepDownTest, StepDownCanCompleteBasedOnReplSetUpdatePositionAlone) {
ASSERT_OK(updatePositionArgsInitialize(
updatePositionArgs,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 1
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime2.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -1799,9 +1692,7 @@ TEST_F(StepDownTest, StepDownCanCompleteBasedOnReplSetUpdatePositionAlone) {
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(opTime2.asOpTime().getSecs()))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 2
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime1.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -1886,17 +1777,12 @@ private:
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 2 << "host"
<< "test3:1234"))),
HostAndPort("test1", 1234));
@@ -1936,12 +1822,9 @@ TEST_F(StepDownTestWithUnelectableNode,
ASSERT_OK(updatePositionArgsInitialize(
catchupFirstSecondary,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 1
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime2.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -1951,9 +1834,7 @@ TEST_F(StepDownTestWithUnelectableNode,
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(opTime2.asOpTime().getSecs()))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 2
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime1.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -1975,12 +1856,9 @@ TEST_F(StepDownTestWithUnelectableNode,
ASSERT_OK(updatePositionArgsInitialize(
catchupOtherSecondary,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 1
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime2.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -1990,9 +1868,7 @@ TEST_F(StepDownTestWithUnelectableNode,
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(opTime2.asOpTime().getSecs()))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << configVersion
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
+ << configVersion << UpdatePositionArgs::kMemberIdFieldName << 2
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< opTime2.asOpTime().toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
@@ -2121,9 +1997,7 @@ private:
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2220,14 +2094,10 @@ TEST_F(ReplCoordTest, SingleNodeReplSetStepDownTimeoutAndElectionTimeoutExpiresA
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 1000)),
HostAndPort("test1", 1234));
auto opCtx = makeOperationContext();
@@ -2256,14 +2126,10 @@ TEST_F(ReplCoordTest, SingleNodeReplSetUnfreeze) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 10000)),
HostAndPort("test1", 1234));
auto opCtx = makeOperationContext();
@@ -2305,9 +2171,7 @@ TEST_F(ReplCoordTest, NodeBecomesPrimaryAgainWhenStepDownTimeoutExpiresInASingle
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -2337,9 +2201,7 @@ TEST_F(
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -2693,13 +2555,10 @@ TEST_F(ReplCoordTest,
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
}
@@ -2707,9 +2566,7 @@ TEST_F(ReplCoordTest, NodeIncludesOtherMembersProgressInUpdatePositionCommand) {
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2786,11 +2643,7 @@ TEST_F(ReplCoordTest,
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2813,11 +2666,7 @@ TEST_F(ReplCoordTest,
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2849,11 +2698,7 @@ TEST_F(ReplCoordTest, AllowAsManyUnsetMaintenanceModesAsThereHaveBeenSetMaintena
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2883,11 +2728,7 @@ TEST_F(ReplCoordTest, SettingAndUnsettingMaintenanceModeShouldNotAffectRollbackS
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2929,11 +2770,7 @@ TEST_F(ReplCoordTest, DoNotAllowMaintenanceModeWhilePrimary) {
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -2968,11 +2805,7 @@ TEST_F(ReplCoordTest, DoNotAllowSettingMaintenanceModeWhileConductingAnElection)
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -3040,9 +2873,7 @@ TEST_F(ReplCoordTest,
HostAndPort client2Host("node3:12345");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host" << myHost.toString())
<< BSON("_id" << 1 << "host" << client1Host.toString())
<< BSON("_id" << 2 << "host" << client2Host.toString()))),
@@ -3085,9 +2916,7 @@ TEST_F(ReplCoordTest,
HostAndPort client2Host("node3:12345");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host" << myHost.toString())
<< BSON("_id" << 1 << "host" << client1Host.toString())
<< BSON("_id" << 2 << "host" << client2Host.toString()))),
@@ -3123,19 +2952,14 @@ TEST_F(ReplCoordTest, NodeReturnsNoNodesWhenGetOtherNodesInReplSetIsRunBeforeHav
TEST_F(ReplCoordTest, NodeReturnsListOfNodesOtherThanItselfInResponseToGetOtherNodesInReplSet) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h1")
<< BSON("_id" << 1 << "host"
<< "h2")
<< BSON("_id" << 2 << "host"
<< "h3"
- << "priority"
- << 0
- << "hidden"
- << true))),
+ << "priority" << 0 << "hidden" << true))),
HostAndPort("h1"));
std::vector<HostAndPort> otherNodes = getReplCoord()->getOtherNodesInReplSet();
@@ -3173,9 +2997,7 @@ TEST_F(ReplCoordTest, IsMaster) {
BSON(
"_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host" << h1.toString())
<< BSON("_id" << 1 << "host" << h2.toString())
<< BSON("_id" << 2 << "host" << h3.toString() << "arbiterOnly" << true)
@@ -3238,9 +3060,7 @@ TEST_F(ReplCoordTest, IsMasterWithCommittedSnapshot) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3270,9 +3090,7 @@ TEST_F(ReplCoordTest, IsMasterInShutdown) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3308,21 +3126,16 @@ TEST_F(ReplCoordTest, LogAMessageWhenShutDownBeforeReplicationStartUpFinished) {
TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -3349,18 +3162,13 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) {
ASSERT_OK(updatePositionArgsInitialize(
args,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 0
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << time2.toBSON()
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 0
+ << UpdatePositionArgs::kDurableOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs())
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << time2.toBSON()
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs()))))));
@@ -3372,21 +3180,16 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) {
TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -3407,18 +3210,13 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect)
ASSERT_OK(updatePositionArgsInitialize(
args,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 3
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << time2.toBSON()
+ << 3 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kDurableOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs())
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << time2.toBSON()
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs()))))));
@@ -3435,21 +3233,16 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect)
TEST_F(ReplCoordTest, DoNotProcessUpdatePositionOfMembersWhoseIdsAreNotInTheConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -3470,18 +3263,13 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionOfMembersWhoseIdsAreNotInTheConf
ASSERT_OK(updatePositionArgsInitialize(
args,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 9
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << time2.toBSON()
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 9
+ << UpdatePositionArgs::kDurableOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kDurableWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs())
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << time2.toBSON()
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << time2.toBSON()
<< UpdatePositionArgs::kAppliedWallTimeFieldName
<< Date_t() + Seconds(time2.getSecs()))))));
@@ -3497,21 +3285,16 @@ TEST_F(ReplCoordTest,
ProcessUpdateWhenUpdatePositionContainsOnlyConfigVersionAndMemberIdsWithoutRIDs) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -3535,32 +3318,26 @@ TEST_F(ReplCoordTest,
ASSERT_OK(updatePositionArgsInitialize(
args,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << time2.asOpTime().toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(time2.asOpTime().getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << time2.asOpTime().toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(time2.asOpTime().getSecs()))
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << time2.asOpTime().toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(time2.asOpTime().getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << time2.asOpTime().toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(time2.asOpTime().getSecs()))))));
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(
+ BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << time2.asOpTime().toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(time2.asOpTime().getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << time2.asOpTime().toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(time2.asOpTime().getSecs()))
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 2
+ << UpdatePositionArgs::kAppliedOpTimeFieldName
+ << time2.asOpTime().toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(time2.asOpTime().getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName
+ << time2.asOpTime().toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(time2.asOpTime().getSecs()))))));
auto opCtx = makeOperationContext();
@@ -3581,15 +3358,10 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"
- << "priority"
- << 3)
+ << "priority" << 3)
<< BSON("_id" << 1 << "host"
<< "node2:12345")
<< BSON("_id" << 2 << "host"
@@ -3600,21 +3372,16 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
TEST_F(ReplCoordTest, AwaitReplicationShouldResolveAsNormalDuringAReconfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
// Turn off readconcern majority support, and snapshots.
@@ -3681,11 +3448,7 @@ void doReplSetReconfigToFewer(ReplicationCoordinatorImpl* replCoord, Status* sta
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version"
- << 3
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 3 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -3698,21 +3461,16 @@ TEST_F(
NodeReturnsUnsatisfiableWriteConcernWhenReconfiggingToAClusterThatCannotSatisfyTheWriteConcern) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 2), Date_t() + Seconds(100));
@@ -3759,29 +3517,22 @@ TEST_F(ReplCoordTest,
NodeReturnsOKFromAwaitReplicationWhenReconfiggingToASetWhereMajorityIsSmallerAndSatisfied) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3)
+ << "_id" << 3)
<< BSON("host"
<< "node5:12345"
- << "_id"
- << 4))),
+ << "_id" << 4))),
HostAndPort("node1", 12345));
// Turn off readconcern majority support, and snapshots.
@@ -3842,35 +3593,22 @@ TEST_F(ReplCoordTest,
// satisfied by voting data-bearing members.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "_id" << 3 << "votes" << 0 << "priority" << 0)
<< BSON("host"
<< "node5:12345"
- << "_id"
- << 4
- << "arbiterOnly"
- << true))),
+ << "_id" << 4 << "arbiterOnly" << true))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
OpTime time(Timestamp(100, 1), 1);
@@ -3910,35 +3648,22 @@ TEST_F(ReplCoordTest,
// Test that the commit level advances properly.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "_id" << 3 << "votes" << 0 << "priority" << 0)
<< BSON("host"
<< "node5:12345"
- << "_id"
- << 4
- << "arbiterOnly"
- << true))),
+ << "_id" << 4 << "arbiterOnly" << true))),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
OpTime zero(Timestamp(0, 0), 0);
@@ -4170,11 +3895,7 @@ TEST_F(StableOpTimeTest, SetMyLastAppliedSetsStableOpTimeForStorage) {
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -4238,11 +3959,7 @@ TEST_F(StableOpTimeTest, SetMyLastAppliedSetsStableOpTimeForStorageDisableMajori
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -4275,11 +3992,7 @@ TEST_F(StableOpTimeTest, AdvanceCommitPointSetsStableOpTimeForStorage) {
init("mySet/test1:1234,test2:1234,test3:1234");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "members"
+ << "protocolVersion" << 1 << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -4337,15 +4050,11 @@ TEST_F(StableOpTimeTest, ClearOpTimeCandidatesPastCommonPointAfterRollback) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))
- << "protocolVersion"
- << 1),
+ << "_id" << 0))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
auto repl = getReplCoord();
@@ -4441,13 +4150,10 @@ TEST_F(StableOpTimeTest, OpTimeCandidatesAreNotAddedWhenStateIsNotConsistent) {
TEST_F(ReplCoordTest, NodeReturnsShutdownInProgressWhenWaitingUntilAnOpTimeDuringShutdown) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(10, 1), Date_t() + Seconds(100));
@@ -4466,13 +4172,10 @@ TEST_F(ReplCoordTest, NodeReturnsShutdownInProgressWhenWaitingUntilAnOpTimeDurin
TEST_F(ReplCoordTest, NodeReturnsInterruptedWhenWaitingUntilAnOpTimeIsInterrupted) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(10, 1), Date_t() + Seconds(100));
@@ -4490,13 +4193,10 @@ TEST_F(ReplCoordTest, NodeReturnsInterruptedWhenWaitingUntilAnOpTimeIsInterrupte
TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesNoOpTime) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
@@ -4507,13 +4207,10 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesNoOpTi
TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTimePriorToOurLast) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
replCoordSetMyLastAppliedOpTime(OpTimeWithTermOne(100, 1), Date_t() + Seconds(100));
@@ -4529,13 +4226,10 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTi
TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTimeEqualToOurLast) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -4576,13 +4270,10 @@ TEST_F(ReplCoordTest, NodeReturnsNotAReplicaSetWhenWaitUntilOpTimeIsRunAgainstAS
TEST_F(ReplCoordTest, ReadAfterCommittedWhileShutdown) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
@@ -4602,13 +4293,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedWhileShutdown) {
TEST_F(ReplCoordTest, ReadAfterCommittedInterrupted) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
const auto opCtx = makeOperationContext();
runSingleNodeElection(opCtx.get());
@@ -4625,13 +4313,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedInterrupted) {
TEST_F(ReplCoordTest, ReadAfterCommittedGreaterOpTime) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
runSingleNodeElection(opCtx.get());
@@ -4647,13 +4332,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedGreaterOpTime) {
TEST_F(ReplCoordTest, ReadAfterCommittedEqualOpTime) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
runSingleNodeElection(opCtx.get());
@@ -4669,13 +4351,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedEqualOpTime) {
TEST_F(ReplCoordTest, ReadAfterCommittedDeferredGreaterOpTime) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
@@ -4697,13 +4376,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredGreaterOpTime) {
TEST_F(ReplCoordTest, ReadAfterCommittedDeferredEqualOpTime) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
runSingleNodeElection(opCtx.get());
@@ -4727,13 +4403,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredEqualOpTime) {
TEST_F(ReplCoordTest, WaitUntilOpTimeforReadRejectsUnsupportedMajorityReadConcern) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
auto opCtx = makeOperationContext();
@@ -4759,21 +4432,16 @@ TEST_F(ReplCoordTest, IgnoreTheContentsOfMetadataWhenItsConfigVersionDoesNotMatc
// Ensure that we do not process ReplSetMetadata when ConfigVersions do not match.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
@@ -4781,35 +4449,20 @@ TEST_F(ReplCoordTest, IgnoreTheContentsOfMetadataWhenItsConfigVersionDoesNotMatc
StatusWith<rpc::ReplSetMetadata> metadata = replReadFromMetadata(BSON(
rpc::kReplSetMetadataFieldName << BSON(
"lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 2)
- << "configVersion"
- << 1
- << "primaryIndex"
- << 2
- << "term"
- << 2
- << "syncSourceIndex"
- << 1)));
+ << Date_t() + Seconds(100) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "configVersion" << 1
+ << "primaryIndex" << 2 << "term" << 2 << "syncSourceIndex" << 1)));
getReplCoord()->processReplSetMetadata(metadata.getValue());
ASSERT_EQUALS(0, getReplCoord()->getTerm());
// higher configVersion
- StatusWith<rpc::ReplSetMetadata> metadata2 = replReadFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 2)
- << "configVersion"
- << 100
- << "primaryIndex"
- << 2
- << "term"
- << 2
- << "syncSourceIndex"
- << 1)));
+ StatusWith<rpc::ReplSetMetadata> metadata2 = replReadFromMetadata(
+ BSON(rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastCommittedWall"
+ << Date_t() + Seconds(100) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "configVersion" << 100
+ << "primaryIndex" << 2 << "term" << 2 << "syncSourceIndex" << 1)));
getReplCoord()->processReplSetMetadata(metadata2.getValue());
ASSERT_EQUALS(0, getReplCoord()->getTerm());
}
@@ -4819,23 +4472,17 @@ TEST_F(ReplCoordTest, UpdateLastCommittedOpTimeWhenTheLastCommittedOpTimeIsNewer
// but not if the OpTime is older than the current LastCommittedOpTime.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))
- << "protocolVersion"
- << 1),
+ << "_id" << 2))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
@@ -4865,23 +4512,17 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
// Ensure that currentPrimaryIndex is never altered by ReplSetMetadata.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))
- << "protocolVersion"
- << 1),
+ << "_id" << 2))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
@@ -4892,17 +4533,9 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
StatusWith<rpc::ReplSetMetadata> metadata = replReadFromMetadata(BSON(
rpc::kReplSetMetadataFieldName << BSON(
"lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 3)
- << "configVersion"
- << 2
- << "primaryIndex"
- << 2
- << "term"
- << 3
- << "syncSourceIndex"
- << 1)));
+ << Date_t() + Seconds(100) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "configVersion" << 2
+ << "primaryIndex" << 2 << "term" << 3 << "syncSourceIndex" << 1)));
getReplCoord()->processReplSetMetadata(metadata.getValue());
ASSERT_EQUALS(3, getReplCoord()->getTerm());
ASSERT_EQUALS(-1, getTopoCoord().getCurrentPrimaryIndex());
@@ -4912,17 +4545,9 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
StatusWith<rpc::ReplSetMetadata> metadata2 = replReadFromMetadata(BSON(
rpc::kReplSetMetadataFieldName << BSON(
"lastOpCommitted" << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(11, 0) << "t" << 3)
- << "configVersion"
- << 2
- << "primaryIndex"
- << 1
- << "term"
- << 2
- << "syncSourceIndex"
- << 1)));
+ << Date_t() + Seconds(100) << "lastOpVisible"
+ << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "configVersion" << 2
+ << "primaryIndex" << 1 << "term" << 2 << "syncSourceIndex" << 1)));
getReplCoord()->processReplSetMetadata(metadata2.getValue());
ASSERT_EQUALS(3, getReplCoord()->getTerm());
ASSERT_EQUALS(-1, getTopoCoord().getCurrentPrimaryIndex());
@@ -4932,17 +4557,9 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
StatusWith<rpc::ReplSetMetadata> metadata3 = replReadFromMetadata(BSON(
rpc::kReplSetMetadataFieldName << BSON(
"lastOpCommitted" << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(11, 0) << "t" << 3)
- << "configVersion"
- << 2
- << "primaryIndex"
- << 1
- << "term"
- << 3
- << "syncSourceIndex"
- << 1)));
+ << Date_t() + Seconds(100) << "lastOpVisible"
+ << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "configVersion" << 2
+ << "primaryIndex" << 1 << "term" << 3 << "syncSourceIndex" << 1)));
getReplCoord()->processReplSetMetadata(metadata3.getValue());
ASSERT_EQUALS(3, getReplCoord()->getTerm());
ASSERT_EQUALS(-1, getTopoCoord().getCurrentPrimaryIndex());
@@ -4954,19 +4571,14 @@ TEST_F(ReplCoordTest,
// Ensure that the metadata is processed if it is contained in a heartbeat response.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
@@ -4978,19 +4590,12 @@ TEST_F(ReplCoordTest,
// Higher term - should update term but not last committed optime.
StatusWith<rpc::ReplSetMetadata> metadata = replReadFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 3)
- << "configVersion"
- << config.getConfigVersion()
- << "primaryIndex"
- << 1
- << "term"
- << 3
- << "syncSourceIndex"
- << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 3)
+ << "lastCommittedWall" << Date_t() + Seconds(100)
+ << "lastOpVisible" << BSON("ts" << Timestamp(10, 0) << "t" << 3)
+ << "configVersion" << config.getConfigVersion() << "primaryIndex"
+ << 1 << "term" << 3 << "syncSourceIndex" << 1)));
BSONObjBuilder responseBuilder;
ASSERT_OK(metadata.getValue().writeToMetadata(&responseBuilder));
@@ -5021,19 +4626,14 @@ TEST_F(ReplCoordTest, LastCommittedOpTimeOnlyUpdatedFromHeartbeatWhenLastApplied
// Ensure that the metadata is processed if it is contained in a heartbeat response.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(), getReplCoord()->getLastCommittedOpTime());
@@ -5103,19 +4703,14 @@ TEST_F(ReplCoordTest, LastCommittedOpTimeOnlyUpdatedFromHeartbeatInFCV42) {
// Ensure that the metadata is processed if it is contained in a heartbeat response.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(), getReplCoord()->getLastCommittedOpTime());
@@ -5182,19 +4777,14 @@ TEST_F(ReplCoordTest, LastCommittedOpTimeOnlyUpdatedFromHeartbeatInFCV42) {
TEST_F(ReplCoordTest, AdvanceCommitPointFromSyncSourceCanSetCommitPointToLastAppliedIgnoringTerm) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(), getReplCoord()->getLastCommittedOpTime());
@@ -5212,23 +4802,17 @@ TEST_F(ReplCoordTest, AdvanceCommitPointFromSyncSourceCanSetCommitPointToLastApp
TEST_F(ReplCoordTest, PrepareOplogQueryMetadata) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))
- << "protocolVersion"
- << 1),
+ << "_id" << 2))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -5276,21 +4860,14 @@ TEST_F(ReplCoordTest, TermAndLastCommittedOpTimeUpdatedFromHeartbeatWhenArbiter)
// Ensure that the metadata is processed if it is contained in a heartbeat response.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0
- << "arbiterOnly"
- << true)
+ << "_id" << 0 << "arbiterOnly" << true)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))
- << "protocolVersion"
- << 1),
+ << "_id" << 1))
+ << "protocolVersion" << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
@@ -5303,19 +4880,12 @@ TEST_F(ReplCoordTest, TermAndLastCommittedOpTimeUpdatedFromHeartbeatWhenArbiter)
// Higher term - should update term and lastCommittedOpTime since arbiters learn of the
// commit point via heartbeats.
StatusWith<rpc::ReplSetMetadata> metadata = replReadFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(10, 1) << "t" << 3) << "lastCommittedWall"
- << Date_t() + Seconds(100)
- << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 1) << "t" << 3)
- << "configVersion"
- << config.getConfigVersion()
- << "primaryIndex"
- << 1
- << "term"
- << 3
- << "syncSourceIndex"
- << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 1) << "t" << 3)
+ << "lastCommittedWall" << Date_t() + Seconds(100)
+ << "lastOpVisible" << BSON("ts" << Timestamp(10, 1) << "t" << 3)
+ << "configVersion" << config.getConfigVersion() << "primaryIndex"
+ << 1 << "term" << 3 << "syncSourceIndex" << 1)));
BSONObjBuilder responseBuilder;
ASSERT_OK(metadata.getValue().writeToMetadata(&responseBuilder));
@@ -5346,19 +4916,13 @@ TEST_F(ReplCoordTest,
ScheduleElectionToBeRunInElectionTimeoutFromNowWhenCancelAndRescheduleElectionTimeoutIsRun) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5397,19 +4961,13 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest, DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunInRollback) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5428,23 +4986,13 @@ TEST_F(ReplCoordTest,
DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunWhileUnelectable) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0
- << "priority"
- << 0
- << "hidden"
- << true)
+ << "_id" << 0 << "priority" << 0 << "hidden" << true)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
ASSERT_OK(replCoord->setFollowerMode(MemberState::RS_SECONDARY));
@@ -5459,19 +5007,13 @@ TEST_F(ReplCoordTest,
DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunWhileRemoved) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5497,15 +5039,10 @@ TEST_F(ReplCoordTest,
config
.initialize(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 3
- << "members"
+ << "protocolVersion" << 1 << "version" << 3 << "members"
<< BSON_ARRAY(BSON("host"
<< "node2:12345"
- << "_id"
- << 1))))
+ << "_id" << 1))))
.transitional_ignore();
hbResp.setConfig(config);
hbResp.setConfigVersion(3);
@@ -5529,19 +5066,13 @@ TEST_F(ReplCoordTest,
RescheduleElectionTimeoutWhenProcessingHeartbeatResponseFromPrimaryInSameTerm) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5586,19 +5117,13 @@ TEST_F(ReplCoordTest,
DontRescheduleElectionTimeoutWhenProcessingHeartbeatResponseFromPrimaryInDiffertTerm) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5640,19 +5165,13 @@ TEST_F(ReplCoordTest,
CancelAndRescheduleElectionTimeoutWhenProcessingHeartbeatResponseWithoutState) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 2
- << "members"
+ << "protocolVersion" << 1 << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -5692,9 +5211,7 @@ TEST_F(ReplCoordTest, AdvanceCommittedSnapshotToMostRecentSnapshotPriorToOpTimeW
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -5728,9 +5245,7 @@ TEST_F(ReplCoordTest, ZeroCommittedSnapshotWhenAllSnapshotsAreDropped) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -5760,9 +5275,7 @@ TEST_F(ReplCoordTest, DoNotAdvanceCommittedSnapshotWhenAppliedOpTimeChanges) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -5785,13 +5298,10 @@ TEST_F(ReplCoordTest,
NodeChangesMyLastOpTimeWhenAndOnlyWhensetMyLastDurableOpTimeReceivesANewerOpTime4DurableSE) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -5815,13 +5325,10 @@ DEATH_TEST_F(ReplCoordTest,
"opTime.getTimestamp() > myLastAppliedOpTime.getTimestamp()") {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -5841,13 +5348,10 @@ DEATH_TEST_F(ReplCoordTest,
"opTime.getTimestamp() > myLastAppliedOpTime.getTimestamp()") {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -5867,13 +5371,10 @@ DEATH_TEST_F(ReplCoordTest,
"opTime.getTimestamp() < myLastAppliedOpTime.getTimestamp()") {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -5893,13 +5394,10 @@ DEATH_TEST_F(ReplCoordTest,
"opTime.getTimestamp() < myLastAppliedOpTime.getTimestamp()") {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0))),
+ << "_id" << 0))),
HostAndPort("node1", 12345));
@@ -5918,18 +5416,14 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 2000 << "heartbeatIntervalMillis" << 40000)),
HostAndPort("test1", 1234));
OpTime optime(Timestamp(100, 2), 0);
@@ -5992,18 +5486,14 @@ TEST_F(ReplCoordTest, UpdatePositionCmdHasMetadata) {
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
<< "test2:1234")
<< BSON("_id" << 2 << "host"
<< "test3:1234"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 2000 << "heartbeatIntervalMillis" << 40000)),
HostAndPort("test1", 1234));
OpTime optime(Timestamp(100, 2), 0);
@@ -6033,32 +5523,23 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2)
+ << "_id" << 2)
<< BSON("host"
<< "node4:12345"
- << "_id"
- << 3)
+ << "_id" << 3)
<< BSON("host"
<< "node5:12345"
- << "_id"
- << 4))
- << "protocolVersion"
- << 1
- << "settings"
+ << "_id" << 4))
+ << "protocolVersion" << 1 << "settings"
<< BSON("electionTimeoutMillis" << 2000 << "heartbeatIntervalMillis" << 40000)),
HostAndPort("node1", 12345));
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -6070,57 +5551,42 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
UpdatePositionArgs args;
ASSERT_OK(updatePositionArgsInitialize(
args,
- BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 3
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 4
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))))));
+ BSON(
+ UpdatePositionArgs::kCommandFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(
+ BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 2
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 3
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 4
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0));
// Become PRIMARY.
@@ -6130,33 +5596,26 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
UpdatePositionArgs args1;
ASSERT_OK(updatePositionArgsInitialize(
args1,
- BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))
- << BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 2
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())))),
+ BSON(
+ UpdatePositionArgs::kCommandFieldName
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(
+ BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))
+ << BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 2
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())))),
/*requireWallTime*/ true));
const Date_t startDate = getNet()->now();
getNet()->enterNetwork();
@@ -6198,20 +5657,16 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
ASSERT_OK(updatePositionArgsInitialize(
args2,
BSON(UpdatePositionArgs::kCommandFieldName
- << 1
- << UpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2
- << UpdatePositionArgs::kMemberIdFieldName
- << 1
- << UpdatePositionArgs::kDurableOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kDurableWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs())
- << UpdatePositionArgs::kAppliedOpTimeFieldName
- << startingOpTime.toBSON()
- << UpdatePositionArgs::kAppliedWallTimeFieldName
- << Date_t() + Seconds(startingOpTime.getSecs()))))));
+ << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(
+ BSON(UpdatePositionArgs::kConfigVersionFieldName
+ << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << UpdatePositionArgs::kDurableOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kDurableWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs())
+ << UpdatePositionArgs::kAppliedOpTimeFieldName << startingOpTime.toBSON()
+ << UpdatePositionArgs::kAppliedWallTimeFieldName
+ << Date_t() + Seconds(startingOpTime.getSecs()))))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args2, 0));
hbArgs.setSetName("mySet");
@@ -6241,9 +5696,7 @@ TEST_F(ReplCoordTest, WaitForMemberState) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -6278,9 +5731,7 @@ TEST_F(ReplCoordTest, WaitForDrainFinish) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -6319,13 +5770,10 @@ TEST_F(
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault"
- << false),
+ << "writeConcernMajorityJournalDefault" << false),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -6342,13 +5790,10 @@ TEST_F(
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault"
- << true),
+ << "writeConcernMajorityJournalDefault" << true),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -6363,13 +5808,10 @@ TEST_F(ReplCoordTest, PopulateUnsetWriteConcernOptionsSyncModeReturnsInputIfSync
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault"
- << false),
+ << "writeConcernMajorityJournalDefault" << false),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -6391,13 +5833,10 @@ TEST_F(ReplCoordTest, PopulateUnsetWriteConcernOptionsSyncModeReturnsInputIfWMod
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault"
- << false),
+ << "writeConcernMajorityJournalDefault" << false),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -6414,21 +5853,16 @@ TEST_F(ReplCoordTest, PopulateUnsetWriteConcernOptionsSyncModeReturnsInputIfWMod
TEST_F(ReplCoordTest, NodeStoresElectionVotes) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
auto time = OpTimeWithTermOne(100, 1);
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -6441,15 +5875,9 @@ TEST_F(ReplCoordTest, NodeStoresElectionVotes) {
ReplSetRequestVotesArgs args;
ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "mySet"
- << "term"
- << 7LL
- << "candidateIndex"
- << 2LL
- << "configVersion"
- << 2LL
- << "dryRun"
- << false
- << "lastCommittedOp"
+ << "term" << 7LL << "candidateIndex" << 2LL
+ << "configVersion" << 2LL << "dryRun"
+ << false << "lastCommittedOp"
<< time.asOpTime().toBSON())));
ReplSetRequestVotesResponse response;
@@ -6468,21 +5896,16 @@ TEST_F(ReplCoordTest, NodeStoresElectionVotes) {
TEST_F(ReplCoordTest, NodeDoesNotStoreDryRunVotes) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1)
+ << "_id" << 1)
<< BSON("host"
<< "node3:12345"
- << "_id"
- << 2))),
+ << "_id" << 2))),
HostAndPort("node1", 12345));
auto time = OpTimeWithTermOne(100, 1);
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -6495,15 +5918,9 @@ TEST_F(ReplCoordTest, NodeDoesNotStoreDryRunVotes) {
ReplSetRequestVotesArgs args;
ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "mySet"
- << "term"
- << 7LL
- << "candidateIndex"
- << 2LL
- << "configVersion"
- << 2LL
- << "dryRun"
- << true
- << "lastCommittedOp"
+ << "term" << 7LL << "candidateIndex" << 2LL
+ << "configVersion" << 2LL << "dryRun"
+ << true << "lastCommittedOp"
<< time.asOpTime().toBSON())));
ReplSetRequestVotesResponse response;
@@ -6524,17 +5941,13 @@ TEST_F(ReplCoordTest, NodeFailsVoteRequestIfItFailsToStoreLastVote) {
// Set up a 2-node replica set config.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
auto time = OpTimeWithTermOne(100, 1);
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -6549,18 +5962,12 @@ TEST_F(ReplCoordTest, NodeFailsVoteRequestIfItFailsToStoreLastVote) {
auto opCtx = makeOperationContext();
ReplSetRequestVotesArgs args;
- ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
- << "mySet"
- << "term"
- << initTerm + 1 // term of new candidate.
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 2LL
- << "dryRun"
- << false
- << "lastCommittedOp"
- << time.asOpTime().toBSON())));
+ ASSERT_OK(args.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "mySet"
+ << "term" << initTerm + 1 // term of new candidate.
+ << "candidateIndex" << 1LL << "configVersion" << 2LL << "dryRun"
+ << false << "lastCommittedOp" << time.asOpTime().toBSON())));
ReplSetRequestVotesResponse response;
// Simulate a failure to write the 'last vote' document. The specific error code isn't
@@ -6585,17 +5992,13 @@ TEST_F(ReplCoordTest, NodeNodesNotGrantVoteIfInTerminalShutdown) {
// Set up a 2-node replica set config.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id"
- << 0)
+ << "_id" << 0)
<< BSON("host"
<< "node2:12345"
- << "_id"
- << 1))),
+ << "_id" << 1))),
HostAndPort("node1", 12345));
auto time = OpTimeWithTermOne(100, 1);
ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -6610,18 +6013,12 @@ TEST_F(ReplCoordTest, NodeNodesNotGrantVoteIfInTerminalShutdown) {
auto opCtx = makeOperationContext();
ReplSetRequestVotesArgs args;
- ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
- << "mySet"
- << "term"
- << initTerm + 1 // term of new candidate.
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 2LL
- << "dryRun"
- << false
- << "lastCommittedOp"
- << time.asOpTime().toBSON())));
+ ASSERT_OK(args.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "mySet"
+ << "term" << initTerm + 1 // term of new candidate.
+ << "candidateIndex" << 1LL << "configVersion" << 2LL << "dryRun"
+ << false << "lastCommittedOp" << time.asOpTime().toBSON())));
ReplSetRequestVotesResponse response;
getReplCoord()->enterTerminalShutdown();
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
index 82317b139a2..068a769e735 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
@@ -276,14 +276,13 @@ void ReplCoordTest::simulateSuccessfulDryRun(
if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
ASSERT_TRUE(request.cmdObj.getBoolField("dryRun"));
onDryRunRequest(request);
- net->scheduleResponse(noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 << "reason"
- << ""
- << "term"
- << request.cmdObj["term"].Long()
- << "voteGranted"
- << true)));
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "reason"
+ << ""
+ << "term" << request.cmdObj["term"].Long()
+ << "voteGranted" << true)));
voteRequests++;
} else if (consumeHeartbeatV1(noi)) {
// The heartbeat has been consumed.
@@ -345,14 +344,13 @@ void ReplCoordTest::simulateSuccessfulV1ElectionWithoutExitingDrainMode(Date_t e
hbResp.setConfigVersion(rsConfig.getConfigVersion());
net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON()));
} else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
- net->scheduleResponse(noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 << "reason"
- << ""
- << "term"
- << request.cmdObj["term"].Long()
- << "voteGranted"
- << true)));
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "reason"
+ << ""
+ << "term" << request.cmdObj["term"].Long()
+ << "voteGranted" << true)));
} else {
error() << "Black holing unexpected request to " << request.target << ": "
<< request.cmdObj;
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index e3f26e73513..40c1499ef67 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -66,10 +66,10 @@ namespace mongo {
MONGO_FAIL_POINT_DEFINE(waitInIsMaster);
-using std::unique_ptr;
using std::list;
using std::string;
using std::stringstream;
+using std::unique_ptr;
namespace repl {
namespace {
@@ -328,8 +328,7 @@ public:
} else {
uasserted(ErrorCodes::BadValue,
str::stream() << "Unrecognized field of 'internalClient': '"
- << fieldName
- << "'");
+ << fieldName << "'");
}
}
diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp
index fba6d41fd6d..f4dd3e104c5 100644
--- a/src/mongo/db/repl/replication_recovery.cpp
+++ b/src/mongo/db/repl/replication_recovery.cpp
@@ -413,8 +413,7 @@ void ReplicationRecoveryImpl::_applyToEndOfOplog(OperationContext* opCtx,
invariant(applyThroughOpTime.getTimestamp() == topOfOplog,
str::stream() << "Did not apply to top of oplog. Applied through: "
<< applyThroughOpTime.toString()
- << ". Top of oplog: "
- << topOfOplog.toString());
+ << ". Top of oplog: " << topOfOplog.toString());
oplogBuffer.shutdown(opCtx);
// We may crash before setting appliedThrough. If we have a stable checkpoint, we will recover
diff --git a/src/mongo/db/repl/replication_recovery_test.cpp b/src/mongo/db/repl/replication_recovery_test.cpp
index 0d12cd58fa8..c97746080e5 100644
--- a/src/mongo/db/repl/replication_recovery_test.cpp
+++ b/src/mongo/db/repl/replication_recovery_test.cpp
@@ -1051,9 +1051,7 @@ TEST_F(ReplicationRecoveryTest, CommitTransactionOplogEntryCorrectlyUpdatesConfi
const auto txnOperations = BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << testNs.toString()
- << "o"
+ << "ns" << testNs.toString() << "o"
<< BSON("_id" << 1)));
const auto prepareDate = Date_t::now();
const auto prepareOp =
@@ -1128,9 +1126,7 @@ TEST_F(ReplicationRecoveryTest,
const auto txnOperations = BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << testNs.toString()
- << "o"
+ << "ns" << testNs.toString() << "o"
<< BSON("_id" << 1)));
const auto prepareDate = Date_t::now();
const auto prepareOp =
diff --git a/src/mongo/db/repl/reporter_test.cpp b/src/mongo/db/repl/reporter_test.cpp
index 6213eb4fe26..f056fee9332 100644
--- a/src/mongo/db/repl/reporter_test.cpp
+++ b/src/mongo/db/repl/reporter_test.cpp
@@ -379,8 +379,7 @@ TEST_F(ReporterTestNoTriggerAtSetUp,
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig)
<< "errmsg"
<< "newer config"
- << "configVersion"
- << 100));
+ << "configVersion" << 100));
ASSERT_EQUALS(Status(ErrorCodes::InvalidReplicaSetConfig, "invalid config"), reporter->join());
assertReporterDone();
@@ -399,8 +398,7 @@ TEST_F(ReporterTest, InvalidReplicaSetResponseWithSameConfigVersionOnSyncTargetS
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig)
<< "errmsg"
<< "invalid config"
- << "configVersion"
- << posUpdater->getConfigVersion()));
+ << "configVersion" << posUpdater->getConfigVersion()));
ASSERT_EQUALS(Status(ErrorCodes::InvalidReplicaSetConfig, "invalid config"), reporter->join());
assertReporterDone();
@@ -416,8 +414,7 @@ TEST_F(ReporterTest,
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig)
<< "errmsg"
<< "newer config"
- << "configVersion"
- << posUpdater->getConfigVersion() + 1));
+ << "configVersion" << posUpdater->getConfigVersion() + 1));
ASSERT_TRUE(reporter->isActive());
}
diff --git a/src/mongo/db/repl/roll_back_local_operations.cpp b/src/mongo/db/repl/roll_back_local_operations.cpp
index 09047074164..1e5b102a595 100644
--- a/src/mongo/db/repl/roll_back_local_operations.cpp
+++ b/src/mongo/db/repl/roll_back_local_operations.cpp
@@ -123,14 +123,11 @@ StatusWith<RollBackLocalOperations::RollbackCommonPoint> RollBackLocalOperations
auto result = _localOplogIterator->next();
if (!result.isOK()) {
return Status(ErrorCodes::NoMatchingDocument,
- str::stream() << "reached beginning of local oplog: {"
- << "scanned: "
- << _scanned
- << ", theirTime: "
- << getTimestamp(operation).toString()
- << ", ourTime: "
- << getTimestamp(_localOplogValue).toString()
- << "}");
+ str::stream()
+ << "reached beginning of local oplog: {"
+ << "scanned: " << _scanned
+ << ", theirTime: " << getTimestamp(operation).toString()
+ << ", ourTime: " << getTimestamp(_localOplogValue).toString() << "}");
}
opAfterCurrentEntry = _localOplogValue.first;
_localOplogValue = result.getValue();
@@ -200,11 +197,8 @@ StatusWith<RollBackLocalOperations::RollbackCommonPoint> syncRollBackLocalOperat
}
return Status(ErrorCodes::NoMatchingDocument,
str::stream() << "reached beginning of remote oplog: {"
- << "them: "
- << remoteOplog.toString()
- << ", theirTime: "
- << theirTime.toString()
- << "}");
+ << "them: " << remoteOplog.toString()
+ << ", theirTime: " << theirTime.toString() << "}");
}
} // namespace repl
diff --git a/src/mongo/db/repl/roll_back_local_operations_test.cpp b/src/mongo/db/repl/roll_back_local_operations_test.cpp
index 1f8a933b67c..67fff417d0a 100644
--- a/src/mongo/db/repl/roll_back_local_operations_test.cpp
+++ b/src/mongo/db/repl/roll_back_local_operations_test.cpp
@@ -50,26 +50,18 @@ BSONObj makeOp(long long seconds, long long term = 1LL) {
auto uuid = unittest::assertGet(UUID::parse("b4c66a44-c1ca-4d86-8d25-12e82fa2de5b"));
return BSON("ts" << Timestamp(seconds, seconds) << "t" << term << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
+ << "o" << BSONObj() << "ns"
<< "roll_back_local_operations.test"
- << "ui"
- << uuid);
+ << "ui" << uuid);
}
BSONObj makeOpWithWallClockTime(long count, long wallClockMillis, long long term = 1LL) {
auto uuid = unittest::assertGet(UUID::parse("b4c66a44-c1ca-4d86-8d25-12e82fa2de5b"));
return BSON("ts" << Timestamp(count, count) << "t" << term << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
+ << "o" << BSONObj() << "ns"
<< "roll_back_local_operations.test"
- << "ui"
- << uuid
- << "wall"
- << Date_t::fromMillisSinceEpoch(wallClockMillis));
+ << "ui" << uuid << "wall" << Date_t::fromMillisSinceEpoch(wallClockMillis));
};
int recordId = 0;
@@ -150,7 +142,8 @@ TEST(RollBackLocalOperationsTest, RollbackMultipleLocalOperations) {
TEST(RollBackLocalOperationsTest, RollbackOperationFailed) {
auto commonOperation = makeOpAndRecordId(1);
OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(2), commonOperation,
+ makeOpAndRecordId(2),
+ commonOperation,
});
OplogInterfaceMock localOplog(localOperations);
auto rollbackOperation = [&](const BSONObj& operation) {
@@ -175,7 +168,10 @@ TEST(RollBackLocalOperationsTest, EndOfLocalOplog) {
TEST(RollBackLocalOperationsTest, SkipRemoteOperations) {
auto commonOperation = makeOpAndRecordId(1);
OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(5), makeOpAndRecordId(4), makeOpAndRecordId(2), commonOperation,
+ makeOpAndRecordId(5),
+ makeOpAndRecordId(4),
+ makeOpAndRecordId(2),
+ commonOperation,
});
OplogInterfaceMock localOplog(localOperations);
auto i = localOperations.cbegin();
@@ -209,7 +205,8 @@ TEST(RollBackLocalOperationsTest, SkipRemoteOperations) {
TEST(RollBackLocalOperationsTest, SameTimestampDifferentTermsRollbackNoSuchKey) {
auto commonOperation = makeOpAndRecordId(1, 1);
OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(2, 3), commonOperation,
+ makeOpAndRecordId(2, 3),
+ commonOperation,
});
OplogInterfaceMock localOplog(localOperations);
auto rollbackOperation = [&](const BSONObj& operation) {
@@ -242,7 +239,9 @@ TEST(SyncRollBackLocalOperationsTest, RollbackTwoOperations) {
auto commonOperation = makeOpWithWallClockTimeAndRecordId(1, 1 * 5000);
auto firstOpAfterCommonPoint = makeOpWithWallClockTimeAndRecordId(2, 2 * 60 * 60 * 24 * 1000);
OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(3), firstOpAfterCommonPoint, commonOperation,
+ makeOpAndRecordId(3),
+ firstOpAfterCommonPoint,
+ commonOperation,
});
auto i = localOperations.cbegin();
auto result = syncRollBackLocalOperations(OplogInterfaceMock(localOperations),
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index b6aca140721..73c484ec452 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -100,8 +100,9 @@ boost::optional<long long> _parseDroppedCollectionCount(const OplogEntry& oplogE
auto obj2 = oplogEntry.getObject2();
if (!obj2) {
- warning() << "Unable to get collection count from " << desc << " without the o2 "
- "field. oplog op: "
+ warning() << "Unable to get collection count from " << desc
+ << " without the o2 "
+ "field. oplog op: "
<< redact(oplogEntry.toBSON());
return boost::none;
}
@@ -324,10 +325,10 @@ Status RollbackImpl::_transitionToRollback(OperationContext* opCtx) {
auto status =
_replicationCoordinator->setFollowerModeStrict(opCtx, MemberState::RS_ROLLBACK);
if (!status.isOK()) {
- status.addContext(str::stream() << "Cannot transition from "
- << _replicationCoordinator->getMemberState().toString()
- << " to "
- << MemberState(MemberState::RS_ROLLBACK).toString());
+ status.addContext(str::stream()
+ << "Cannot transition from "
+ << _replicationCoordinator->getMemberState().toString() << " to "
+ << MemberState(MemberState::RS_ROLLBACK).toString());
log() << status;
return status;
}
@@ -416,9 +417,9 @@ StatusWith<std::set<NamespaceString>> RollbackImpl::_namespacesForOp(const Oplog
// These commands do not need to be supported by rollback. 'convertToCapped' should
// always be converted to lower level DDL operations, and 'emptycapped' is a
// testing-only command.
- std::string message = str::stream() << "Encountered unsupported command type '"
- << firstElem.fieldName()
- << "' during rollback.";
+ std::string message = str::stream()
+ << "Encountered unsupported command type '" << firstElem.fieldName()
+ << "' during rollback.";
return Status(ErrorCodes::UnrecoverableRollbackError, message);
}
case OplogEntry::CommandType::kCreate:
@@ -594,8 +595,7 @@ void RollbackImpl::_correctRecordStoreCounts(OperationContext* opCtx) {
auto collToScan = autoCollToScan.getCollection();
invariant(coll == collToScan,
str::stream() << "Catalog returned invalid collection: " << nss.ns() << " ("
- << uuid.toString()
- << ")");
+ << uuid.toString() << ")");
auto exec = collToScan->makePlanExecutor(
opCtx, PlanExecutor::INTERRUPT_ONLY, Collection::ScanDirection::kForward);
long long countFromScan = 0;
@@ -816,8 +816,7 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr
const auto uuid = oplogEntry.getUuid().get();
invariant(_countDiffs.find(uuid) == _countDiffs.end(),
str::stream() << "Unexpected existing count diff for " << uuid.toString()
- << " op: "
- << redact(oplogEntry.toBSON()));
+ << " op: " << redact(oplogEntry.toBSON()));
if (auto countResult = _parseDroppedCollectionCount(oplogEntry)) {
PendingDropInfo info;
info.count = *countResult;
@@ -843,10 +842,9 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr
<< "Oplog entry to roll back is unexpectedly missing dropTarget UUID: "
<< redact(oplogEntry.toBSON()));
invariant(_countDiffs.find(dropTargetUUID) == _countDiffs.end(),
- str::stream() << "Unexpected existing count diff for "
- << dropTargetUUID.toString()
- << " op: "
- << redact(oplogEntry.toBSON()));
+ str::stream()
+ << "Unexpected existing count diff for " << dropTargetUUID.toString()
+ << " op: " << redact(oplogEntry.toBSON()));
if (auto countResult = _parseDroppedCollectionCount(oplogEntry)) {
PendingDropInfo info;
info.count = *countResult;
@@ -1012,9 +1010,7 @@ Status RollbackImpl::_checkAgainstTimeLimit(
if (diff > timeLimit) {
return Status(ErrorCodes::UnrecoverableRollbackError,
str::stream() << "not willing to roll back more than " << timeLimit
- << " seconds of data. Have: "
- << diff
- << " seconds.");
+ << " seconds of data. Have: " << diff << " seconds.");
}
} else {
@@ -1044,8 +1040,7 @@ Timestamp RollbackImpl::_findTruncateTimestamp(
invariant(commonPointTime.getStatus());
invariant(commonPointTime.getValue() == commonPointOpTime,
str::stream() << "Common point: " << commonPointOpTime.toString()
- << ", record found: "
- << commonPointTime.getValue().toString());
+ << ", record found: " << commonPointTime.getValue().toString());
// Get the next document, which will be the first document to truncate.
auto truncatePointRecord = oplogCursor->next();
diff --git a/src/mongo/db/repl/rollback_impl.h b/src/mongo/db/repl/rollback_impl.h
index 5343879c633..660231c4dbc 100644
--- a/src/mongo/db/repl/rollback_impl.h
+++ b/src/mongo/db/repl/rollback_impl.h
@@ -284,7 +284,7 @@ public:
virtual const std::vector<BSONObj>& docsDeletedForNamespace_forTest(UUID uuid) const& {
MONGO_UNREACHABLE;
}
- void docsDeletedForNamespace_forTest(UUID)&& = delete;
+ void docsDeletedForNamespace_forTest(UUID) && = delete;
protected:
/**
diff --git a/src/mongo/db/repl/rollback_impl_test.cpp b/src/mongo/db/repl/rollback_impl_test.cpp
index 927219c46db..98ac60952ca 100644
--- a/src/mongo/db/repl/rollback_impl_test.cpp
+++ b/src/mongo/db/repl/rollback_impl_test.cpp
@@ -69,37 +69,21 @@ std::string kGenericUUIDStr = "b4c66a44-c1ca-4d86-8d25-12e82fa2de5b";
BSONObj makeInsertOplogEntry(long long time, BSONObj obj, StringData ns, UUID uuid) {
return BSON("ts" << Timestamp(time, time) << "t" << time << "op"
<< "i"
- << "o"
- << obj
- << "ns"
- << ns
- << "ui"
- << uuid);
+ << "o" << obj << "ns" << ns << "ui" << uuid);
}
BSONObj makeUpdateOplogEntry(
long long time, BSONObj query, BSONObj update, StringData ns, UUID uuid) {
return BSON("ts" << Timestamp(time, time) << "t" << time << "op"
<< "u"
- << "ns"
- << ns
- << "ui"
- << uuid
- << "o2"
- << query
- << "o"
+ << "ns" << ns << "ui" << uuid << "o2" << query << "o"
<< BSON("$set" << update));
}
BSONObj makeDeleteOplogEntry(long long time, BSONObj id, StringData ns, UUID uuid) {
return BSON("ts" << Timestamp(time, time) << "t" << time << "op"
<< "d"
- << "ns"
- << ns
- << "ui"
- << uuid
- << "o"
- << id);
+ << "ns" << ns << "ui" << uuid << "o" << id);
}
class RollbackImplForTest final : public RollbackImpl {
@@ -380,12 +364,7 @@ BSONObj makeOp(OpTime time) {
auto kGenericUUID = unittest::assertGet(UUID::parse(kGenericUUIDStr));
return BSON("ts" << time.getTimestamp() << "t" << time.getTerm() << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
- << nss.ns()
- << "ui"
- << kGenericUUID);
+ << "o" << BSONObj() << "ns" << nss.ns() << "ui" << kGenericUUID);
}
BSONObj makeOp(int count) {
@@ -400,13 +379,9 @@ auto makeOpWithWallClockTime(long count, long wallClockMillis) {
auto kGenericUUID = unittest::assertGet(UUID::parse(kGenericUUIDStr));
return BSON("ts" << Timestamp(count, count) << "t" << (long long)count << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
+ << "o" << BSONObj() << "ns"
<< "top"
- << "ui"
- << kGenericUUID
- << "wall"
+ << "ui" << kGenericUUID << "wall"
<< Date_t::fromMillisSinceEpoch(wallClockMillis));
};
@@ -955,14 +930,10 @@ TEST_F(RollbackImplTest, RollbackDoesNotWriteRollbackFilesIfNoInsertsOrUpdatesAf
const auto uuid = UUID::gen();
const auto nss = NamespaceString("db.coll");
const auto coll = _initializeCollection(_opCtx.get(), uuid, nss);
- const auto oplogEntry = BSON("ts" << Timestamp(3, 3) << "t" << 3LL << "op"
- << "c"
- << "o"
- << BSON("create" << nss.coll())
- << "ns"
- << nss.ns()
- << "ui"
- << uuid);
+ const auto oplogEntry =
+ BSON("ts" << Timestamp(3, 3) << "t" << 3LL << "op"
+ << "c"
+ << "o" << BSON("create" << nss.coll()) << "ns" << nss.ns() << "ui" << uuid);
ASSERT_OK(_insertOplogEntry(oplogEntry));
ASSERT_OK(_rollback->runRollback(_opCtx.get()));
@@ -1183,12 +1154,7 @@ TEST_F(RollbackImplTest, RollbackProperlySavesFilesWhenInsertsAndDropOfCollectio
const auto oplogEntry =
BSON("ts" << dropOpTime.getTimestamp() << "t" << dropOpTime.getTerm() << "op"
<< "c"
- << "o"
- << BSON("drop" << nss.coll())
- << "ns"
- << nss.ns()
- << "ui"
- << uuid);
+ << "o" << BSON("drop" << nss.coll()) << "ns" << nss.ns() << "ui" << uuid);
ASSERT_OK(_insertOplogEntry(oplogEntry));
ASSERT_OK(_rollback->runRollback(_opCtx.get()));
@@ -1213,14 +1179,10 @@ TEST_F(RollbackImplTest, RollbackProperlySavesFilesWhenCreateCollAndInsertsAreRo
const auto nss = NamespaceString("db.people");
const auto uuid = UUID::gen();
const auto coll = _initializeCollection(_opCtx.get(), uuid, nss);
- const auto oplogEntry = BSON("ts" << Timestamp(3, 3) << "t" << 3LL << "op"
- << "c"
- << "o"
- << BSON("create" << nss.coll())
- << "ns"
- << nss.ns()
- << "ui"
- << uuid);
+ const auto oplogEntry =
+ BSON("ts" << Timestamp(3, 3) << "t" << 3LL << "op"
+ << "c"
+ << "o" << BSON("create" << nss.coll()) << "ns" << nss.ns() << "ui" << uuid);
ASSERT_OK(_insertOplogEntry(oplogEntry));
// Insert documents into the collection.
@@ -1584,14 +1546,14 @@ public:
void assertRollbackInfoContainsObjectForUUID(UUID uuid, BSONObj bson) {
const auto& uuidToIdMap = _rbInfo.rollbackDeletedIdsMap;
auto search = uuidToIdMap.find(uuid);
- ASSERT(search != uuidToIdMap.end()) << "map is unexpectedly missing an entry for uuid "
- << uuid.toString() << " containing object "
- << bson.jsonString();
+ ASSERT(search != uuidToIdMap.end())
+ << "map is unexpectedly missing an entry for uuid " << uuid.toString()
+ << " containing object " << bson.jsonString();
const auto& idObjSet = search->second;
const auto iter = idObjSet.find(bson);
- ASSERT(iter != idObjSet.end()) << "_id object set is unexpectedly missing object "
- << bson.jsonString() << " in namespace with uuid "
- << uuid.toString();
+ ASSERT(iter != idObjSet.end())
+ << "_id object set is unexpectedly missing object " << bson.jsonString()
+ << " in namespace with uuid " << uuid.toString();
}
@@ -1675,12 +1637,12 @@ TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfDropColl
TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfCreateIndexOplogEntry) {
auto nss = NamespaceString("test", "coll");
- auto indexObj = BSON("createIndexes" << nss.coll() << "ns" << nss.toString() << "v"
- << static_cast<int>(IndexDescriptor::IndexVersion::kV2)
- << "key"
- << "x"
- << "name"
- << "x_1");
+ auto indexObj =
+ BSON("createIndexes" << nss.coll() << "ns" << nss.toString() << "v"
+ << static_cast<int>(IndexDescriptor::IndexVersion::kV2) << "key"
+ << "x"
+ << "name"
+ << "x_1");
auto cmdOp =
makeCommandOp(Timestamp(2, 2), UUID::gen(), nss.getCommandNS().toString(), indexObj, 2);
diff --git a/src/mongo/db/repl/rollback_source_impl.cpp b/src/mongo/db/repl/rollback_source_impl.cpp
index 2170ee1663d..9af1717b4ab 100644
--- a/src/mongo/db/repl/rollback_source_impl.cpp
+++ b/src/mongo/db/repl/rollback_source_impl.cpp
@@ -105,9 +105,7 @@ StatusWith<BSONObj> RollbackSourceImpl::getCollectionInfoByUUID(const std::strin
return StatusWith<BSONObj>(ErrorCodes::NoSuchKey,
str::stream()
<< "No collection info found for collection with uuid: "
- << uuid.toString()
- << " in db: "
- << db);
+ << uuid.toString() << " in db: " << db);
}
invariant(info.size() == 1U);
return info.front();
diff --git a/src/mongo/db/repl/rollback_test_fixture.cpp b/src/mongo/db/repl/rollback_test_fixture.cpp
index 46baaac89be..b2afd664b94 100644
--- a/src/mongo/db/repl/rollback_test_fixture.cpp
+++ b/src/mongo/db/repl/rollback_test_fixture.cpp
@@ -295,12 +295,9 @@ void RollbackResyncsCollectionOptionsTest::resyncCollectionOptionsTest(
auto commonOpUuid = unittest::assertGet(UUID::parse("f005ba11-cafe-bead-f00d-123456789abc"));
auto commonOpBson = BSON("ts" << Timestamp(1, 1) << "t" << 1LL << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
+ << "o" << BSONObj() << "ns"
<< "rollback_test.test"
- << "ui"
- << commonOpUuid);
+ << "ui" << commonOpUuid);
auto commonOperation = std::make_pair(commonOpBson, RecordId(1));
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 11ff4960681..2ef523e34b0 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -83,13 +83,13 @@
namespace mongo {
-using std::shared_ptr;
-using std::unique_ptr;
using std::list;
using std::map;
+using std::pair;
using std::set;
+using std::shared_ptr;
using std::string;
-using std::pair;
+using std::unique_ptr;
namespace repl {
@@ -199,10 +199,10 @@ Status FixUpInfo::recordDropTargetInfo(const BSONElement& dropTarget,
OpTime opTime) {
StatusWith<UUID> dropTargetUUIDStatus = UUID::parse(dropTarget);
if (!dropTargetUUIDStatus.isOK()) {
- std::string message = str::stream() << "Unable to roll back renameCollection. Cannot parse "
- "dropTarget UUID. Returned status: "
- << redact(dropTargetUUIDStatus.getStatus())
- << ", oplog entry: " << redact(obj);
+ std::string message = str::stream()
+ << "Unable to roll back renameCollection. Cannot parse "
+ "dropTarget UUID. Returned status: "
+ << redact(dropTargetUUIDStatus.getStatus()) << ", oplog entry: " << redact(obj);
error() << message;
return dropTargetUUIDStatus.getStatus();
}
@@ -227,8 +227,8 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
// Checks that the oplog entry is smaller than 512 MB. We do not roll back if the
// oplog entry is larger than 512 MB.
if (ourObj.objsize() > 512 * 1024 * 1024)
- throw RSFatalException(str::stream() << "Rollback too large, oplog size: "
- << ourObj.objsize());
+ throw RSFatalException(str::stream()
+ << "Rollback too large, oplog size: " << ourObj.objsize());
// If required fields are not present in the BSONObj for an applyOps entry, create these fields
// and populate them with dummy values before parsing ourObj as an oplog entry.
@@ -1235,8 +1235,9 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
// is rolled back upstream and we restart, we expect to still have the
// collection.
- log() << nss->ns() << " not found on remote host, so we do not roll back collmod "
- "operation. Instead, we will drop the collection soon.";
+ log() << nss->ns()
+ << " not found on remote host, so we do not roll back collmod "
+ "operation. Instead, we will drop the collection soon.";
continue;
}
@@ -1246,10 +1247,10 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
// Updates the collection flags.
if (auto optionsField = info["options"]) {
if (optionsField.type() != Object) {
- throw RSFatalException(str::stream() << "Failed to parse options " << info
- << ": expected 'options' to be an "
- << "Object, got "
- << typeName(optionsField.type()));
+ throw RSFatalException(str::stream()
+ << "Failed to parse options " << info
+ << ": expected 'options' to be an "
+ << "Object, got " << typeName(optionsField.type()));
}
// Removes the option.uuid field. We do not allow the options.uuid field
@@ -1261,8 +1262,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
auto status = options.parse(optionsFieldObj, CollectionOptions::parseForCommand);
if (!status.isOK()) {
throw RSFatalException(str::stream() << "Failed to parse options " << info
- << ": "
- << status.toString());
+ << ": " << status.toString());
}
// TODO(SERVER-27992): Set options.uuid.
@@ -1281,13 +1281,10 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
auto validatorStatus = collection->updateValidator(
opCtx, options.validator, options.validationLevel, options.validationAction);
if (!validatorStatus.isOK()) {
- throw RSFatalException(
- str::stream() << "Failed to update validator for " << nss->toString() << " ("
- << uuid
- << ") with "
- << redact(info)
- << ". Got: "
- << validatorStatus.toString());
+ throw RSFatalException(str::stream()
+ << "Failed to update validator for " << nss->toString()
+ << " (" << uuid << ") with " << redact(info)
+ << ". Got: " << validatorStatus.toString());
}
wuow.commit();
@@ -1377,8 +1374,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
<< " to archive file: " << redact(status);
throw RSFatalException(str::stream()
<< "Rollback cannot write document in namespace "
- << nss->ns()
- << " to archive file.");
+ << nss->ns() << " to archive file.");
}
} else {
error() << "Rollback cannot find object: " << pattern << " in namespace "
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index bfacdd849cb..991d0851afc 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -81,21 +81,16 @@ OplogInterfaceMock::Operation makeDropIndexOplogEntry(Collection* collection,
BSONObj key,
std::string indexName,
int time) {
- auto indexSpec =
- BSON("ns" << collection->ns().ns() << "key" << key << "name" << indexName << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("ns" << collection->ns().ns() << "key" << key << "name" << indexName
+ << "v" << static_cast<int>(kIndexVersion));
return std::make_pair(
BSON("ts" << Timestamp(Seconds(time), 0) << "op"
<< "c"
- << "ui"
- << collection->uuid().get()
- << "ns"
+ << "ui" << collection->uuid().get() << "ns"
<< "test.$cmd"
- << "o"
- << BSON("dropIndexes" << collection->ns().coll() << "index" << indexName)
- << "o2"
- << indexSpec),
+ << "o" << BSON("dropIndexes" << collection->ns().coll() << "index" << indexName)
+ << "o2" << indexSpec),
RecordId(time));
}
@@ -103,22 +98,15 @@ OplogInterfaceMock::Operation makeCreateIndexOplogEntry(Collection* collection,
BSONObj key,
std::string indexName,
int time) {
- auto indexSpec =
- BSON("createIndexes" << collection->ns().coll() << "ns" << collection->ns().ns() << "v"
- << static_cast<int>(kIndexVersion)
- << "key"
- << key
- << "name"
- << indexName);
+ auto indexSpec = BSON(
+ "createIndexes" << collection->ns().coll() << "ns" << collection->ns().ns() << "v"
+ << static_cast<int>(kIndexVersion) << "key" << key << "name" << indexName);
return std::make_pair(BSON("ts" << Timestamp(Seconds(time), 0) << "op"
<< "c"
<< "ns"
<< "test.$cmd"
- << "ui"
- << collection->uuid().get()
- << "o"
- << indexSpec),
+ << "ui" << collection->uuid().get() << "o" << indexSpec),
RecordId(time));
}
@@ -140,11 +128,7 @@ OplogInterfaceMock::Operation makeRenameCollectionOplogEntry(const NamespaceStri
}
return std::make_pair(BSON("ts" << opTime.getTimestamp() << "t" << opTime.getTerm() << "op"
<< "c"
- << "ui"
- << collectionUUID
- << "ns"
- << renameFrom.ns()
- << "o"
+ << "ui" << collectionUUID << "ns" << renameFrom.ns() << "o"
<< obj),
RecordId(opTime.getTimestamp().getSecs()));
}
@@ -153,12 +137,9 @@ BSONObj makeOp(long long seconds) {
auto uuid = unittest::assertGet(UUID::parse("f005ba11-cafe-bead-f00d-123456789abc"));
return BSON("ts" << Timestamp(seconds, seconds) << "t" << seconds << "op"
<< "n"
- << "o"
- << BSONObj()
- << "ns"
+ << "o" << BSONObj() << "ns"
<< "rs_rollback.test"
- << "ui"
- << uuid);
+ << "ui" << uuid);
}
int recordId = 0;
@@ -294,12 +275,9 @@ int _testRollbackDelete(OperationContext* opCtx,
auto commonOperation = makeOpAndRecordId(1);
auto deleteOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "d"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 0)),
+ << "o" << BSON("_id" << 0)),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -423,12 +401,9 @@ TEST_F(RSRollbackTest, RollbackInsertDocumentWithNoId) {
auto commonOperation = makeOpAndRecordId(1);
auto insertDocumentOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ns"
+ << "ui" << UUID::gen() << "ns"
<< "test.t"
- << "o"
- << BSON("a" << 1)),
+ << "o" << BSON("a" << 1)),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -467,8 +442,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
NamespaceString nss("test", "coll");
auto collection = _createCollection(_opCtx.get(), nss.toString(), options);
auto indexSpec = BSON("ns" << nss.toString() << "v" << static_cast<int>(kIndexVersion) << "key"
- << BSON("a" << 1)
- << "name"
+ << BSON("a" << 1) << "name"
<< "a_1");
int numIndexes = _createIndexOnEmptyCollection(_opCtx.get(), collection, nss, indexSpec);
@@ -492,13 +466,11 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
_coordinator,
_replicationProcess.get()));
stopCapturingLogMessages();
- ASSERT_EQUALS(1,
- countLogLinesContaining(str::stream()
- << "Dropped index in rollback for collection: "
- << nss.toString()
- << ", UUID: "
- << options.uuid->toString()
- << ", index: a_1"));
+ ASSERT_EQUALS(
+ 1,
+ countLogLinesContaining(str::stream()
+ << "Dropped index in rollback for collection: " << nss.toString()
+ << ", UUID: " << options.uuid->toString() << ", index: a_1"));
{
Lock::DBLock dbLock(_opCtx.get(), nss.db(), MODE_S);
auto indexCatalog = collection->getIndexCatalog();
@@ -514,9 +486,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) {
auto collection = _createCollection(_opCtx.get(), "test.t", options);
auto indexSpec = BSON("ns"
<< "test.t"
- << "key"
- << BSON("a" << 1)
- << "name"
+ << "key" << BSON("a" << 1) << "name"
<< "a_1");
// Skip index creation to trigger warning during rollback.
{
@@ -665,9 +635,7 @@ TEST_F(RSRollbackTest, RollingBackCreateIndexAndRenameWithLongName) {
auto longName = std::string(115, 'a');
auto indexSpec = BSON("ns" << nss.toString() << "v" << static_cast<int>(kIndexVersion) << "key"
- << BSON("b" << 1)
- << "name"
- << longName);
+ << BSON("b" << 1) << "name" << longName);
int numIndexes = _createIndexOnEmptyCollection(_opCtx.get(), collection, nss, indexSpec);
ASSERT_EQUALS(2, numIndexes);
@@ -720,8 +688,7 @@ TEST_F(RSRollbackTest, RollingBackDropAndCreateOfSameIndexNameWithDifferentSpecs
auto collection = _createCollection(_opCtx.get(), nss.toString(), options);
auto indexSpec = BSON("ns" << nss.toString() << "v" << static_cast<int>(kIndexVersion) << "key"
- << BSON("b" << 1)
- << "name"
+ << BSON("b" << 1) << "name"
<< "a_1");
int numIndexes = _createIndexOnEmptyCollection(_opCtx.get(), collection, nss, indexSpec);
@@ -752,19 +719,15 @@ TEST_F(RSRollbackTest, RollingBackDropAndCreateOfSameIndexNameWithDifferentSpecs
ASSERT(indexCatalog);
ASSERT_EQUALS(2, indexCatalog->numIndexesReady(_opCtx.get()));
ASSERT_EQUALS(1,
- countLogLinesContaining(str::stream()
- << "Dropped index in rollback for collection: "
- << nss.toString()
- << ", UUID: "
- << options.uuid->toString()
- << ", index: a_1"));
+ countLogLinesContaining(
+ str::stream()
+ << "Dropped index in rollback for collection: " << nss.toString()
+ << ", UUID: " << options.uuid->toString() << ", index: a_1"));
ASSERT_EQUALS(1,
- countLogLinesContaining(str::stream()
- << "Created index in rollback for collection: "
- << nss.toString()
- << ", UUID: "
- << options.uuid->toString()
- << ", index: a_1"));
+ countLogLinesContaining(
+ str::stream()
+ << "Created index in rollback for collection: " << nss.toString()
+ << ", UUID: " << options.uuid->toString() << ", index: a_1"));
std::vector<const IndexDescriptor*> indexes;
indexCatalog->findIndexesByKeyPattern(_opCtx.get(), BSON("a" << 1), false, &indexes);
ASSERT(indexes.size() == 1);
@@ -786,20 +749,15 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingIndexName) {
<< "t"
<< "ns"
<< "test.t"
- << "v"
- << static_cast<int>(kIndexVersion)
- << "key"
- << BSON("a" << 1));
-
- auto createIndexOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
- << "c"
- << "ns"
- << "test.$cmd"
- << "ui"
- << collection->uuid().get()
- << "o"
- << command),
- RecordId(2));
+ << "v" << static_cast<int>(kIndexVersion) << "key" << BSON("a" << 1));
+
+ auto createIndexOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
+ << "c"
+ << "ns"
+ << "test.$cmd"
+ << "ui" << collection->uuid().get() << "o" << command),
+ RecordId(2));
RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
commonOperation,
})));
@@ -829,9 +787,7 @@ std::string idxName(std::string id) {
// Create an index spec object given the namespace and the index 'id'.
BSONObj idxSpec(NamespaceString nss, std::string id) {
return BSON("ns" << nss.toString() << "v" << static_cast<int>(kIndexVersion) << "key"
- << BSON(idxKey(id) << 1)
- << "name"
- << idxName(id));
+ << BSON(idxKey(id) << 1) << "name" << idxName(id));
}
// Returns the number of indexes that exist on the given collection.
@@ -954,9 +910,7 @@ TEST_F(RSRollbackTest, RollbackCreateDropRecreateIndexOnCollection) {
// Create the necessary indexes. Index 0 is created, dropped, and created again in the
// sequence of ops, so we create that index.
auto indexSpec = BSON("ns" << nss.toString() << "v" << static_cast<int>(kIndexVersion) << "key"
- << BSON(idxKey("0") << 1)
- << "name"
- << idxName("0"));
+ << BSON(idxKey("0") << 1) << "name" << idxName("0"));
int numIndexes = _createIndexOnEmptyCollection(_opCtx.get(), coll, nss, indexSpec);
ASSERT_EQUALS(2, numIndexes);
@@ -991,9 +945,7 @@ TEST_F(RSRollbackTest, RollbackUnknownCommand) {
auto commonOperation = makeOpAndRecordId(1);
auto unknownCommandOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "c"
- << "ui"
- << UUID::gen()
- << "ns"
+ << "ui" << UUID::gen() << "ns"
<< "test.t"
<< "o"
<< BSON("convertToCapped"
@@ -1027,9 +979,7 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommand) {
auto dropCollectionOperation =
std::make_pair(BSON("ts" << dropTime.getTimestamp() << "t" << dropTime.getTerm() << "op"
<< "c"
- << "ui"
- << coll->uuid().get()
- << "ns"
+ << "ui" << coll->uuid().get() << "ns"
<< "test.t"
<< "o"
<< BSON("drop"
@@ -1351,9 +1301,7 @@ TEST_F(RSRollbackTest, RollbackDropCollectionThenRenameCollectionToDroppedCollec
auto dropCollectionOperation =
std::make_pair(BSON("ts" << dropTime.getTimestamp() << "t" << dropTime.getTerm() << "op"
<< "c"
- << "ui"
- << droppedCollectionUUID
- << "ns"
+ << "ui" << droppedCollectionUUID << "ns"
<< "test.x"
<< "o"
<< BSON("drop"
@@ -1423,16 +1371,15 @@ TEST_F(RSRollbackTest, RollbackRenameCollectionThenCreateNewCollectionWithOldNam
false,
OpTime(Timestamp(2, 0), 5));
- auto createCollectionOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(3), 0) << "op"
- << "c"
- << "ui"
- << createdCollectionUUID
- << "ns"
- << "test.x"
- << "o"
- << BSON("create"
- << "x")),
- RecordId(3));
+ auto createCollectionOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(3), 0) << "op"
+ << "c"
+ << "ui" << createdCollectionUUID << "ns"
+ << "test.x"
+ << "o"
+ << BSON("create"
+ << "x")),
+ RecordId(3));
RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
@@ -1473,9 +1420,7 @@ TEST_F(RSRollbackTest, RollbackCollModCommandFailsIfRBIDChangesWhileSyncingColle
auto commonOperation = makeOpAndRecordId(1);
auto collModOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "c"
- << "ui"
- << coll->uuid().get()
- << "ns"
+ << "ui" << coll->uuid().get() << "ns"
<< "test.t"
<< "o"
<< BSON("collMod"
@@ -1519,8 +1464,7 @@ TEST_F(RSRollbackTest, RollbackDropDatabaseCommand) {
<< "c"
<< "ns"
<< "test.$cmd"
- << "o"
- << BSON("dropDatabase" << 1)),
+ << "o" << BSON("dropDatabase" << 1)),
RecordId(2));
RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
commonOperation,
@@ -1588,93 +1532,47 @@ TEST_F(RSRollbackTest, RollbackApplyOpsCommand) {
UUID uuid = coll->uuid().get();
const auto commonOperation = makeOpAndRecordId(1);
const auto applyOpsOperation =
- std::make_pair(makeApplyOpsOplogEntry(Timestamp(Seconds(2), 0),
- {BSON("op"
- << "u"
- << "ui"
- << uuid
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o2"
- << BSON("_id" << 1)
- << "o"
- << BSON("_id" << 1 << "v" << 2)),
- BSON("op"
- << "u"
- << "ui"
- << uuid
- << "ts"
- << Timestamp(2, 1)
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o2"
- << BSON("_id" << 2)
- << "o"
- << BSON("_id" << 2 << "v" << 4)),
- BSON("op"
- << "d"
- << "ui"
- << uuid
- << "ts"
- << Timestamp(3, 1)
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 3)),
- BSON("op"
- << "i"
- << "ui"
- << uuid
- << "ts"
- << Timestamp(4, 1)
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 4)),
- // applyOps internal oplog entries are not required
- // to have a timestamp.
- BSON("op"
- << "i"
- << "ui"
- << uuid
- << "ts"
- << Timestamp(4, 1)
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 4)),
- BSON("op"
- << "i"
- << "ui"
- << uuid
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 4)),
- BSON("op"
- << "i"
- << "ui"
- << uuid
- << "t"
- << 1LL
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 4))}),
+ std::make_pair(makeApplyOpsOplogEntry(
+ Timestamp(Seconds(2), 0),
+ {BSON("op"
+ << "u"
+ << "ui" << uuid << "ts" << Timestamp(1, 1) << "t" << 1LL << "ns"
+ << "test.t"
+ << "o2" << BSON("_id" << 1) << "o"
+ << BSON("_id" << 1 << "v" << 2)),
+ BSON("op"
+ << "u"
+ << "ui" << uuid << "ts" << Timestamp(2, 1) << "t" << 1LL << "ns"
+ << "test.t"
+ << "o2" << BSON("_id" << 2) << "o"
+ << BSON("_id" << 2 << "v" << 4)),
+ BSON("op"
+ << "d"
+ << "ui" << uuid << "ts" << Timestamp(3, 1) << "t" << 1LL << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 3)),
+ BSON("op"
+ << "i"
+ << "ui" << uuid << "ts" << Timestamp(4, 1) << "t" << 1LL << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 4)),
+ // applyOps internal oplog entries are not required
+ // to have a timestamp.
+ BSON("op"
+ << "i"
+ << "ui" << uuid << "ts" << Timestamp(4, 1) << "t" << 1LL << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 4)),
+ BSON("op"
+ << "i"
+ << "ui" << uuid << "t" << 1LL << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 4)),
+ BSON("op"
+ << "i"
+ << "ui" << uuid << "t" << 1LL << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 4))}),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
@@ -1742,9 +1640,7 @@ TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) {
auto commonOperation = makeOpAndRecordId(1);
auto createCollectionOperation = std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "op"
<< "c"
- << "ui"
- << coll->uuid().get()
- << "ns"
+ << "ui" << coll->uuid().get() << "ns"
<< "test.t"
<< "o"
<< BSON("create"
@@ -1972,31 +1868,19 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommandInvalidCollectionOpt
TEST(RSRollbackTest, LocalEntryWithoutNsIsFatal) {
const auto validOplogEntry = BSON("op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1));
+ << "o" << BSON("_id" << 1 << "a" << 1));
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, validOplogEntry, false));
const auto invalidOplogEntry = BSON("op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< ""
- << "o"
- << BSON("_id" << 1 << "a" << 1));
+ << "o" << BSON("_id" << 1 << "a" << 1));
ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, invalidOplogEntry, false),
RSFatalException);
@@ -2005,31 +1889,19 @@ TEST(RSRollbackTest, LocalEntryWithoutNsIsFatal) {
TEST(RSRollbackTest, LocalEntryWithoutOIsFatal) {
const auto validOplogEntry = BSON("op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1));
+ << "o" << BSON("_id" << 1 << "a" << 1));
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, validOplogEntry, false));
const auto invalidOplogEntry = BSON("op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSONObj());
+ << "o" << BSONObj());
ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, invalidOplogEntry, false),
RSFatalException);
@@ -2038,16 +1910,10 @@ TEST(RSRollbackTest, LocalEntryWithoutOIsFatal) {
DEATH_TEST_F(RSRollbackTest, LocalUpdateEntryWithoutO2IsFatal, "Fatal Assertion") {
const auto invalidOplogEntry = BSON("op"
<< "u"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1));
+ << "o" << BSON("_id" << 1 << "a" << 1));
FixUpInfo fui;
updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, invalidOplogEntry, false)
@@ -2057,34 +1923,20 @@ DEATH_TEST_F(RSRollbackTest, LocalUpdateEntryWithoutO2IsFatal, "Fatal Assertion"
TEST(RSRollbackTest, LocalUpdateEntryWithEmptyO2IsFatal) {
const auto validOplogEntry = BSON("op"
<< "u"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)
- << "o2"
+ << "o" << BSON("_id" << 1 << "a" << 1) << "o2"
<< BSON("_id" << 1));
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, validOplogEntry, false));
const auto invalidOplogEntry = BSON("op"
<< "u"
- << "ui"
- << UUID::gen()
- << "ts"
- << Timestamp(1, 1)
- << "t"
- << 1LL
- << "ns"
+ << "ui" << UUID::gen() << "ts" << Timestamp(1, 1) << "t"
+ << 1LL << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)
- << "o2"
+ << "o" << BSON("_id" << 1 << "a" << 1) << "o2"
<< BSONObj());
ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, invalidOplogEntry, false),
@@ -2094,12 +1946,9 @@ TEST(RSRollbackTest, LocalUpdateEntryWithEmptyO2IsFatal) {
DEATH_TEST_F(RSRollbackTest, LocalEntryWithTxnNumberWithoutSessionIdIsFatal, "invariant") {
auto validOplogEntry = BSON("ts" << Timestamp(Seconds(1), 0) << "t" << 1LL << "op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ns"
+ << "ui" << UUID::gen() << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1));
+ << "o" << BSON("_id" << 1 << "a" << 1));
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, validOplogEntry, false));
@@ -2120,18 +1969,10 @@ TEST_F(RSRollbackTest, LocalEntryWithTxnNumberWithoutTxnTableUUIDIsFatal) {
auto lsid = makeLogicalSessionIdForTest();
auto entryWithTxnNumber = BSON("ts" << Timestamp(Seconds(1), 0) << "t" << 1LL << "op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON());
+ << "o" << BSON("_id" << 1 << "a" << 1) << "txnNumber" << 1LL
+ << "stmtId" << 1 << "lsid" << lsid.toBSON());
FixUpInfo fui;
ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(
@@ -2145,12 +1986,9 @@ TEST_F(RSRollbackTest, LocalEntryWithTxnNumberAddsTransactionTableDocToBeRefetch
// With no txnNumber present, no extra documents need to be refetched.
auto entryWithoutTxnNumber = BSON("ts" << Timestamp(Seconds(1), 0) << "t" << 1LL << "op"
<< "i"
- << "ui"
- << UUID::gen()
- << "ns"
+ << "ui" << UUID::gen() << "ns"
<< "test.t2"
- << "o"
- << BSON("_id" << 2 << "a" << 2));
+ << "o" << BSON("_id" << 2 << "a" << 2));
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(
nullptr /* opCtx */, OplogInterfaceMock(), fui, entryWithoutTxnNumber, false));
@@ -2163,18 +2001,10 @@ TEST_F(RSRollbackTest, LocalEntryWithTxnNumberAddsTransactionTableDocToBeRefetch
auto lsid = makeLogicalSessionIdForTest();
auto entryWithTxnNumber = BSON("ts" << Timestamp(Seconds(1), 0) << "t" << 1LL << "op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON());
+ << "o" << BSON("_id" << 1 << "a" << 1) << "txnNumber" << 1LL
+ << "stmtId" << 1 << "lsid" << lsid.toBSON());
UUID transactionTableUUID = UUID::gen();
fui.transactionTableUUID = transactionTableUUID;
@@ -2204,20 +2034,11 @@ TEST_F(RSRollbackTest, LocalEntryWithPartialTxnAddsTransactionTableDocToBeRefetc
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON());
+ << "o" << BSON("_id" << 1 << "a" << 1)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 1 << "lsid" << lsid.toBSON());
UUID transactionTableUUID = UUID::gen();
fui.transactionTableUUID = transactionTableUUID;
@@ -2240,15 +2061,8 @@ TEST_F(RSRollbackTest, LocalAbortTxnRefetchesTransactionTableEntry) {
<< "c"
<< "ns"
<< "admin.$cmd"
- << "o"
- << BSON("abortTransaction" << 1)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("abortTransaction" << 1) << "txnNumber" << 1LL
+ << "stmtId" << 1 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(1), 0) << "t" << 1LL));
UUID transactionTableUUID = UUID::gen();
@@ -2276,15 +2090,8 @@ TEST_F(RSRollbackTest, LocalEntryWithAbortedPartialTxnRefetchesOnlyTransactionTa
<< "c"
<< "ns"
<< "admin.$cmd"
- << "o"
- << BSON("abortTransaction" << 1)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("abortTransaction" << 1) << "txnNumber" << 1LL
+ << "stmtId" << 1 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(1), 1) << "t" << 1LL));
auto entryWithTxnNumber =
@@ -2295,20 +2102,11 @@ TEST_F(RSRollbackTest, LocalEntryWithAbortedPartialTxnRefetchesOnlyTransactionTa
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON());
+ << "o" << BSON("_id" << 1 << "a" << 1)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 1 << "lsid" << lsid.toBSON());
UUID transactionTableUUID = UUID::gen();
fui.transactionTableUUID = transactionTableUUID;
@@ -2335,21 +2133,11 @@ TEST_F(RSRollbackTest, LocalEntryWithCommittedTxnRefetchesDocsAndTransactionTabl
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 2 << "a" << 2)))
- << "count"
- << 2)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 2
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 2 << "a" << 2)))
+ << "count" << 2)
+ << "txnNumber" << 1LL << "stmtId" << 2 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(1), 1) << "t" << 1LL));
auto commitTxnOperation = std::make_pair(commitTxnEntry, RecordId(2));
@@ -2361,21 +2149,11 @@ TEST_F(RSRollbackTest, LocalEntryWithCommittedTxnRefetchesDocsAndTransactionTabl
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 1 << "a" << 1)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 1 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(0, 0) << "t" << -1LL));
auto partialTxnOperation = std::make_pair(partialTxnEntry, RecordId(1));
@@ -2428,21 +2206,11 @@ TEST_F(RSRollbackTest, RollbackFetchesTransactionOperationBeforeCommonPoint) {
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 0 << "a" << 0)))
- << "count"
- << 3)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 3
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 0 << "a" << 0)))
+ << "count" << 3)
+ << "txnNumber" << 1LL << "stmtId" << 3 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(10), 11) << "t" << 10LL));
auto commitTxnOperation = std::make_pair(commitTxnEntry, RecordId(12));
@@ -2454,21 +2222,11 @@ TEST_F(RSRollbackTest, RollbackFetchesTransactionOperationBeforeCommonPoint) {
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 2
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 1 << "a" << 1)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 2 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(10), 9) << "t" << 10LL));
auto operationAfterCommonPoint = std::make_pair(entryAfterCommonPoint, RecordId(11));
@@ -2480,21 +2238,11 @@ TEST_F(RSRollbackTest, RollbackFetchesTransactionOperationBeforeCommonPoint) {
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 2 << "a" << 2)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 2 << "a" << 2)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 1 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(0, 0) << "t" << -1LL));
auto operationBeforeCommonPoint = std::make_pair(entryBeforeCommonPoint, RecordId(9));
@@ -2572,19 +2320,11 @@ TEST_F(RSRollbackTest, RollbackIncompleteTransactionReturnsUnrecoverableRollback
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 0 << "a" << 0)))
- << "count"
- << 3)
- << "stmtId"
- << 3
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 0 << "a" << 0)))
+ << "count" << 3)
+ << "stmtId" << 3 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(10), 11) << "t" << 10LL));
auto commitTxnOperation = std::make_pair(commitTxnEntry, RecordId(12));
@@ -2596,21 +2336,11 @@ TEST_F(RSRollbackTest, RollbackIncompleteTransactionReturnsUnrecoverableRollback
<< "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ui"
- << uuid
- << "ns"
+ << "ui" << uuid << "ns"
<< "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)))
- << "partialTxn"
- << true)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 2
- << "lsid"
- << lsid.toBSON()
- << "prevOpTime"
+ << "o" << BSON("_id" << 1 << "a" << 1)))
+ << "partialTxn" << true)
+ << "txnNumber" << 1LL << "stmtId" << 2 << "lsid" << lsid.toBSON() << "prevOpTime"
<< BSON("ts" << Timestamp(Seconds(10), 9) << "t" << 10LL));
auto operationAfterCommonPoint = std::make_pair(entryAfterCommonPoint, RecordId(11));
@@ -2653,20 +2383,13 @@ TEST_F(RSRollbackTest, RollbackFailsIfTransactionDocumentRefetchReturnsDifferent
// transaction number and session id.
FixUpInfo fui;
- auto entryWithTxnNumber = BSON("ts" << Timestamp(Seconds(2), 1) << "t" << 1LL << "op"
- << "i"
- << "ui"
- << UUID::gen()
- << "ns"
- << "test.t"
- << "o"
- << BSON("_id" << 1 << "a" << 1)
- << "txnNumber"
- << 1LL
- << "stmtId"
- << 1
- << "lsid"
- << makeLogicalSessionIdForTest().toBSON());
+ auto entryWithTxnNumber =
+ BSON("ts" << Timestamp(Seconds(2), 1) << "t" << 1LL << "op"
+ << "i"
+ << "ui" << UUID::gen() << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 1 << "a" << 1) << "txnNumber" << 1LL << "stmtId" << 1
+ << "lsid" << makeLogicalSessionIdForTest().toBSON());
UUID transactionTableUUID = UUID::gen();
fui.transactionTableUUID = transactionTableUUID;
diff --git a/src/mongo/db/repl/split_horizon_test.cpp b/src/mongo/db/repl/split_horizon_test.cpp
index 0a3a655ccaf..95b2df2ad36 100644
--- a/src/mongo/db/repl/split_horizon_test.cpp
+++ b/src/mongo/db/repl/split_horizon_test.cpp
@@ -300,8 +300,7 @@ TEST(SplitHorizonTesting, BSONConstruction) {
// Two horizons with duplicate host and ports.
{BSON("horizonWithDuplicateHost1" << matchingHostAndPort << "horizonWithDuplicateHost2"
- << matchingHostAndPort
- << "uniqueHorizon"
+ << matchingHostAndPort << "uniqueHorizon"
<< nonmatchingHost),
defaultHostAndPort,
{},
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 39d5c73b6e4..19e7c8840fa 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -501,20 +501,16 @@ Status StorageInterfaceImpl::renameCollection(OperationContext* opCtx,
if (fromNS.db() != toNS.db()) {
return Status(ErrorCodes::InvalidNamespace,
str::stream() << "Cannot rename collection between databases. From NS: "
- << fromNS.ns()
- << "; to NS: "
- << toNS.ns());
+ << fromNS.ns() << "; to NS: " << toNS.ns());
}
return writeConflictRetry(opCtx, "StorageInterfaceImpl::renameCollection", fromNS.ns(), [&] {
AutoGetDb autoDB(opCtx, fromNS.db(), MODE_X);
if (!autoDB.getDb()) {
return Status(ErrorCodes::NamespaceNotFound,
- str::stream() << "Cannot rename collection from " << fromNS.ns() << " to "
- << toNS.ns()
- << ". Database "
- << fromNS.db()
- << " not found.");
+ str::stream()
+ << "Cannot rename collection from " << fromNS.ns() << " to "
+ << toNS.ns() << ". Database " << fromNS.db() << " not found.");
}
WriteUnitOfWork wunit(opCtx);
const auto status = autoDB.getDb()->renameCollection(opCtx, fromNS, toNS, stayTemp);
@@ -557,8 +553,7 @@ Status StorageInterfaceImpl::setIndexIsMultikey(OperationContext* opCtx,
if (!idx) {
return Status(ErrorCodes::IndexNotFound,
str::stream() << "Could not find index " << indexName << " in "
- << nss.ns()
- << " to set to multikey.");
+ << nss.ns() << " to set to multikey.");
}
collection->getIndexCatalog()->setMultikeyPaths(opCtx, idx, paths);
wunit.commit();
@@ -646,16 +641,13 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
if (!indexDescriptor) {
return Result(ErrorCodes::IndexNotFound,
str::stream() << "Index not found, ns:" << nsOrUUID.toString()
- << ", index: "
- << *indexName);
+ << ", index: " << *indexName);
}
if (indexDescriptor->isPartial()) {
return Result(ErrorCodes::IndexOptionsConflict,
str::stream()
<< "Partial index is not allowed for this operation, ns:"
- << nsOrUUID.toString()
- << ", index: "
- << *indexName);
+ << nsOrUUID.toString() << ", index: " << *indexName);
}
KeyPattern keyPattern(indexDescriptor->keyPattern());
@@ -855,11 +847,11 @@ Status _updateWithQuery(OperationContext* opCtx,
}
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
- auto collectionResult = getCollection(
- autoColl,
- nss,
- str::stream() << "Unable to update documents in " << nss.ns() << " using query "
- << request.getQuery());
+ auto collectionResult =
+ getCollection(autoColl,
+ nss,
+ str::stream() << "Unable to update documents in " << nss.ns()
+ << " using query " << request.getQuery());
if (!collectionResult.isOK()) {
return collectionResult.getStatus();
}
@@ -988,11 +980,11 @@ Status StorageInterfaceImpl::deleteByFilter(OperationContext* opCtx,
}
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
- auto collectionResult = getCollection(
- autoColl,
- nss,
- str::stream() << "Unable to delete documents in " << nss.ns() << " using filter "
- << filter);
+ auto collectionResult =
+ getCollection(autoColl,
+ nss,
+ str::stream() << "Unable to delete documents in " << nss.ns()
+ << " using filter " << filter);
if (!collectionResult.isOK()) {
return collectionResult.getStatus();
}
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index 19d57312b19..8ade17d6efa 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -69,11 +69,7 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
BSONObj makeIdIndexSpec(const NamespaceString& nss) {
return BSON("ns" << nss.toString() << "name"
<< "_id_"
- << "key"
- << BSON("_id" << 1)
- << "unique"
- << true
- << "v"
+ << "key" << BSON("_id" << 1) << "unique" << true << "v"
<< static_cast<int>(kIndexVersion));
}
@@ -299,8 +295,7 @@ void _assertRollbackIDDocument(OperationContext* opCtx, int id) {
opCtx,
NamespaceString(StorageInterfaceImpl::kDefaultRollbackIdNamespace),
{BSON("_id" << StorageInterfaceImpl::kRollbackIdDocumentId
- << StorageInterfaceImpl::kRollbackIdFieldName
- << id)});
+ << StorageInterfaceImpl::kRollbackIdFieldName << id)});
}
TEST_F(StorageInterfaceImplTest, RollbackIdInitializesIncrementsAndReadsProperly) {
@@ -380,8 +375,7 @@ TEST_F(StorageInterfaceImplTest, GetRollbackIDReturnsBadStatusIfRollbackIDIsNotI
std::vector<TimestampedBSONObj> badDoc = {
TimestampedBSONObj{BSON("_id" << StorageInterfaceImpl::kRollbackIdDocumentId
- << StorageInterfaceImpl::kRollbackIdFieldName
- << "bad id"),
+ << StorageInterfaceImpl::kRollbackIdFieldName << "bad id"),
Timestamp::min()}};
ASSERT_OK(storage.insertDocuments(opCtx, nss, transformInserts(badDoc)));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, storage.getRollbackID(opCtx).getStatus());
@@ -625,8 +619,7 @@ TEST_F(StorageInterfaceImplTest, DestroyingUncommittedCollectionBulkLoaderDropsI
auto nss = makeNamespace(_agent);
std::vector<BSONObj> indexes = {BSON("v" << 1 << "key" << BSON("x" << 1) << "name"
<< "x_1"
- << "ns"
- << nss.ns())};
+ << "ns" << nss.ns())};
auto destroyLoaderFn = [](std::unique_ptr<CollectionBulkLoader> loader) {
// Destroy 'loader' by letting it go out of scope.
};
@@ -650,8 +643,7 @@ TEST_F(StorageInterfaceImplTest,
auto nss = makeNamespace(_agent);
std::vector<BSONObj> indexes = {BSON("v" << 1 << "key" << BSON("x" << 1) << "name"
<< "x_1"
- << "ns"
- << nss.ns())};
+ << "ns" << nss.ns())};
auto destroyLoaderFn = [](std::unique_ptr<CollectionBulkLoader> loader) {
// Destroy 'loader' in a new thread that does not have a Client.
stdx::thread([&loader]() { loader.reset(); }).join();
@@ -914,9 +906,7 @@ TEST_F(StorageInterfaceImplTest, FindDocumentsReturnsIndexOptionsConflictIfIndex
auto nss = makeNamespace(_agent);
std::vector<BSONObj> indexes = {BSON("v" << 1 << "key" << BSON("x" << 1) << "name"
<< "x_1"
- << "ns"
- << nss.ns()
- << "partialFilterExpression"
+ << "ns" << nss.ns() << "partialFilterExpression"
<< BSON("y" << 1))};
auto loader = unittest::assertGet(storage.createCollectionForBulkLoading(
nss, generateOptionsWithUuid(), makeIdIndexSpec(nss), indexes));
@@ -975,8 +965,8 @@ void _assertDocumentsEqual(const StatusWith<std::vector<BSONObj>>& statusWithDoc
const std::vector<BSONObj>& expectedDocs) {
const auto actualDocs = unittest::assertGet(statusWithDocs);
auto iter = actualDocs.cbegin();
- std::string msg = str::stream() << "expected: " << _toString(expectedDocs)
- << "; actual: " << _toString(actualDocs);
+ std::string msg = str::stream()
+ << "expected: " << _toString(expectedDocs) << "; actual: " << _toString(actualDocs);
for (const auto& doc : expectedDocs) {
ASSERT_TRUE(iter != actualDocs.cend()) << msg;
ASSERT_BSONOBJ_EQ(doc, *(iter++));
@@ -2264,9 +2254,7 @@ TEST_F(StorageInterfaceImplTest, DeleteByFilterReturnsNamespaceNotFoundWhenDatab
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, status);
ASSERT_EQUALS(std::string(str::stream()
<< "Database [nosuchdb] not found. Unable to delete documents in "
- << nss.ns()
- << " using filter "
- << filter),
+ << nss.ns() << " using filter " << filter),
status.reason());
}
@@ -2362,9 +2350,7 @@ TEST_F(StorageInterfaceImplTest, DeleteByFilterReturnsNamespaceNotFoundWhenColle
ASSERT_EQUALS(std::string(
str::stream()
<< "Collection [mydb.wrongColl] not found. Unable to delete documents in "
- << wrongColl.ns()
- << " using filter "
- << filter),
+ << wrongColl.ns() << " using filter " << filter),
status.reason());
}
@@ -2484,8 +2470,7 @@ TEST_F(StorageInterfaceImplTest,
CollectionOptions options = generateOptionsWithUuid();
options.collation = BSON("locale"
<< "en_US"
- << "strength"
- << 2);
+ << "strength" << 2);
ASSERT_OK(storage.createCollection(opCtx, nss, options));
auto doc1 = BSON("_id" << 1 << "x"
@@ -2660,9 +2645,8 @@ TEST_F(StorageInterfaceImplTest, SetIndexIsMultikeySucceeds) {
ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
auto indexName = "a_b_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a.b" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a.b" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_EQUALS(_createIndexOnEmptyCollection(opCtx, nss, indexSpec), 2);
MultikeyPaths paths = {{1}};
diff --git a/src/mongo/db/repl/storage_interface_mock.h b/src/mongo/db/repl/storage_interface_mock.h
index 119f682fba9..cc031904cb8 100644
--- a/src/mongo/db/repl/storage_interface_mock.h
+++ b/src/mongo/db/repl/storage_interface_mock.h
@@ -352,8 +352,8 @@ public:
[](const NamespaceString& nss,
const CollectionOptions& options,
const BSONObj idIndexSpec,
- const std::vector<BSONObj>&
- secondaryIndexSpecs) -> StatusWith<std::unique_ptr<CollectionBulkLoader>> {
+ const std::vector<BSONObj>& secondaryIndexSpecs)
+ -> StatusWith<std::unique_ptr<CollectionBulkLoader>> {
return Status{ErrorCodes::IllegalOperation, "CreateCollectionForBulkFn not implemented."};
};
InsertDocumentFn insertDocumentFn = [](OperationContext* opCtx,
@@ -404,8 +404,9 @@ public:
IsAdminDbValidFn isAdminDbValidFn = [](OperationContext*) {
return Status{ErrorCodes::IllegalOperation, "IsAdminDbValidFn not implemented."};
};
- GetCollectionUUIDFn getCollectionUUIDFn = [](
- OperationContext* opCtx, const NamespaceString& nss) -> StatusWith<OptionalCollectionUUID> {
+ GetCollectionUUIDFn getCollectionUUIDFn =
+ [](OperationContext* opCtx,
+ const NamespaceString& nss) -> StatusWith<OptionalCollectionUUID> {
return Status{ErrorCodes::IllegalOperation, "GetCollectionUUIDFn not implemented."};
};
UpgradeNonReplicatedUniqueIndexesFn upgradeNonReplicatedUniqueIndexesFn =
diff --git a/src/mongo/db/repl/sync_source_resolver.cpp b/src/mongo/db/repl/sync_source_resolver.cpp
index adfcc7b2f31..af82a940d35 100644
--- a/src/mongo/db/repl/sync_source_resolver.cpp
+++ b/src/mongo/db/repl/sync_source_resolver.cpp
@@ -74,8 +74,7 @@ SyncSourceResolver::SyncSourceResolver(executor::TaskExecutor* taskExecutor,
str::stream() << "required optime (if provided) must be more recent than last "
"fetched optime. requiredOpTime: "
<< requiredOpTime.toString()
- << ", lastOpTimeFetched: "
- << lastOpTimeFetched.toString(),
+ << ", lastOpTimeFetched: " << lastOpTimeFetched.toString(),
requiredOpTime.isNull() || requiredOpTime > lastOpTimeFetched);
uassert(ErrorCodes::BadValue, "callback function cannot be null", onCompletion);
}
@@ -171,9 +170,8 @@ std::unique_ptr<Fetcher> SyncSourceResolver::_makeFirstOplogEntryFetcher(
kLocalOplogNss.db().toString(),
BSON("find" << kLocalOplogNss.coll() << "limit" << 1 << "sort" << BSON("$natural" << 1)
<< "projection"
- << BSON(OplogEntryBase::kTimestampFieldName << 1
- << OplogEntryBase::kTermFieldName
- << 1)),
+ << BSON(OplogEntryBase::kTimestampFieldName
+ << 1 << OplogEntryBase::kTermFieldName << 1)),
[=](const StatusWith<Fetcher::QueryResponse>& response,
Fetcher::NextAction*,
BSONObjBuilder*) {
@@ -413,12 +411,11 @@ Status SyncSourceResolver::_compareRequiredOpTimeWithQueryResponse(
const auto opTime = oplogEntry.getOpTime();
if (_requiredOpTime != opTime) {
return Status(ErrorCodes::BadValue,
- str::stream() << "remote oplog contain entry with matching timestamp "
- << opTime.getTimestamp().toString()
- << " but optime "
- << opTime.toString()
- << " does not "
- "match our required optime");
+ str::stream()
+ << "remote oplog contain entry with matching timestamp "
+ << opTime.getTimestamp().toString() << " but optime " << opTime.toString()
+ << " does not "
+ "match our required optime");
}
if (_requiredOpTime.getTerm() != opTime.getTerm()) {
return Status(ErrorCodes::BadValue,
@@ -439,8 +436,7 @@ void SyncSourceResolver::_requiredOpTimeFetcherCallback(
str::stream() << "sync source resolver shut down while looking for "
"required optime "
<< _requiredOpTime.toString()
- << " in candidate's oplog: "
- << candidate))
+ << " in candidate's oplog: " << candidate))
.transitional_ignore();
return;
}
diff --git a/src/mongo/db/repl/sync_source_selector.h b/src/mongo/db/repl/sync_source_selector.h
index 0a620d691a2..c21a5e82a14 100644
--- a/src/mongo/db/repl/sync_source_selector.h
+++ b/src/mongo/db/repl/sync_source_selector.h
@@ -41,7 +41,7 @@ class Timestamp;
namespace rpc {
class ReplSetMetadata;
class OplogQueryMetadata;
-}
+} // namespace rpc
namespace repl {
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index a21c1162829..4e7f6553e11 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -786,8 +786,7 @@ void SyncTail::_oplogApplication(ReplicationCoordinator* replCoord,
str::stream() << "Attempted to apply an oplog entry ("
<< firstOpTimeInBatch.toString()
<< ") which is not greater than our last applied OpTime ("
- << lastAppliedOpTimeAtStartOfBatch.toString()
- << ")."));
+ << lastAppliedOpTimeAtStartOfBatch.toString() << ")."));
}
// Don't allow the fsync+lock thread to see intermediate states of batch application.
@@ -817,8 +816,7 @@ void SyncTail::_oplogApplication(ReplicationCoordinator* replCoord,
const auto lastAppliedOpTimeAtEndOfBatch = replCoord->getMyLastAppliedOpTime();
invariant(lastAppliedOpTimeAtStartOfBatch == lastAppliedOpTimeAtEndOfBatch,
str::stream() << "the last known applied OpTime has changed from "
- << lastAppliedOpTimeAtStartOfBatch.toString()
- << " to "
+ << lastAppliedOpTimeAtStartOfBatch.toString() << " to "
<< lastAppliedOpTimeAtEndOfBatch.toString()
<< " in the middle of batch application");
@@ -1299,23 +1297,23 @@ void SyncTail::_applyOps(std::vector<MultiApplier::OperationPtrs>& writerVectors
if (writerVectors[i].empty())
continue;
- _writerPool->schedule([
- this,
- &writer = writerVectors.at(i),
- &status = statusVector->at(i),
- &workerMultikeyPathInfo = workerMultikeyPathInfo->at(i)
- ](auto scheduleStatus) {
- invariant(scheduleStatus);
+ _writerPool->schedule(
+ [this,
+ &writer = writerVectors.at(i),
+ &status = statusVector->at(i),
+ &workerMultikeyPathInfo = workerMultikeyPathInfo->at(i)](auto scheduleStatus) {
+ invariant(scheduleStatus);
- auto opCtx = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
- // This code path is only executed on secondaries and initial syncing nodes, so it is
- // safe to exclude any writes from Flow Control.
- opCtx->setShouldParticipateInFlowControl(false);
+ // This code path is only executed on secondaries and initial syncing nodes, so it
+ // is safe to exclude any writes from Flow Control.
+ opCtx->setShouldParticipateInFlowControl(false);
- status = opCtx->runWithoutInterruptionExceptAtGlobalShutdown(
- [&] { return _applyFunc(opCtx.get(), &writer, this, &workerMultikeyPathInfo); });
- });
+ status = opCtx->runWithoutInterruptionExceptAtGlobalShutdown([&] {
+ return _applyFunc(opCtx.get(), &writer, this, &workerMultikeyPathInfo);
+ });
+ });
}
}
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index 33558e09a39..b5aeb361244 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -355,14 +355,8 @@ TEST_F(SyncTailTest, SyncApplyCommand) {
NamespaceString nss("test.t");
auto op = BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
- << BSON("create" << nss.coll())
- << "ts"
- << Timestamp(1, 1)
- << "ui"
- << UUID::gen());
+ << "ns" << nss.getCommandNS().ns() << "o" << BSON("create" << nss.coll()) << "ts"
+ << Timestamp(1, 1) << "ui" << UUID::gen());
bool applyCmdCalled = false;
_opObserver->onCreateCollectionFn = [&](OperationContext* opCtx,
Collection*,
@@ -387,13 +381,10 @@ TEST_F(SyncTailTest, SyncApplyCommand) {
TEST_F(SyncTailTest, SyncApplyCommandThrowsException) {
const BSONObj op = BSON("op"
<< "c"
- << "ns"
- << 12345
- << "o"
+ << "ns" << 12345 << "o"
<< BSON("create"
<< "t")
- << "ts"
- << Timestamp(1, 1));
+ << "ts" << Timestamp(1, 1));
// This test relies on the namespace type check of IDL.
ASSERT_THROWS(
SyncTail::syncApply(_opCtx.get(), op, OplogApplication::Mode::kInitialSync, boost::none),
@@ -493,14 +484,9 @@ protected:
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 1)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
_txnNum,
StmtId(0),
@@ -510,14 +496,9 @@ protected:
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss2.ns()
- << "ui"
- << *_uuid2
- << "o"
+ << "ns" << _nss2.ns() << "ui" << *_uuid2 << "o"
<< BSON("_id" << 2)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
_txnNum,
StmtId(1),
@@ -527,11 +508,7 @@ protected:
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss2.ns()
- << "ui"
- << *_uuid2
- << "o"
+ << "ns" << _nss2.ns() << "ui" << *_uuid2 << "o"
<< BSON("_id" << 3)))),
_lsid,
_txnNum,
@@ -683,14 +660,10 @@ TEST_F(MultiOplogEntrySyncTailTest, MultiApplyUnpreparedTransactionTwoBatches) {
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << (i == 1 ? _nss2.ns() : _nss1.ns())
- << "ui"
- << (i == 1 ? *_uuid2 : *_uuid1)
- << "o"
+ << "ns" << (i == 1 ? _nss2.ns() : _nss1.ns()) << "ui"
+ << (i == 1 ? *_uuid2 : *_uuid1) << "o"
<< insertDocs.back()))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
_txnNum,
StmtId(i),
@@ -757,14 +730,9 @@ TEST_F(MultiOplogEntrySyncTailTest, MultiApplyTwoTransactionsOneBatch) {
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 1)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
txnNum1,
StmtId(0),
@@ -774,14 +742,9 @@ TEST_F(MultiOplogEntrySyncTailTest, MultiApplyTwoTransactionsOneBatch) {
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 2)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
txnNum1,
@@ -792,14 +755,9 @@ TEST_F(MultiOplogEntrySyncTailTest, MultiApplyTwoTransactionsOneBatch) {
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 3)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
txnNum2,
StmtId(0),
@@ -809,14 +767,9 @@ TEST_F(MultiOplogEntrySyncTailTest, MultiApplyTwoTransactionsOneBatch) {
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 4)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
_lsid,
txnNum2,
StmtId(1),
@@ -877,14 +830,9 @@ protected:
_nss1,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss2.ns()
- << "ui"
- << *_uuid2
- << "o"
+ << "ns" << _nss2.ns() << "ui" << *_uuid2 << "o"
<< BSON("_id" << 3)))
- << "prepare"
- << true),
+ << "prepare" << true),
_lsid,
_txnNum,
StmtId(2),
@@ -894,14 +842,9 @@ protected:
_nss1,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << _nss1.ns()
- << "ui"
- << *_uuid1
- << "o"
+ << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o"
<< BSON("_id" << 0)))
- << "prepare"
- << true),
+ << "prepare" << true),
_lsid,
_txnNum,
StmtId(0),
@@ -2190,28 +2133,18 @@ TEST_F(IdempotencyTest, CreateCollectionWithCollation) {
auto insertOp2 = insert(fromjson("{ _id: 'Foo', x: 1 }"));
auto updateOp = update("foo", BSON("$set" << BSON("x" << 2)));
auto dropColl = makeCommandOplogEntry(nextOpTime(), nss, BSON("drop" << nss.coll()));
- auto options = BSON("collation" << BSON("locale"
- << "en"
- << "caseLevel"
- << false
- << "caseFirst"
- << "off"
- << "strength"
- << 1
- << "numericOrdering"
- << false
- << "alternate"
- << "non-ignorable"
- << "maxVariable"
- << "punct"
- << "normalization"
- << false
- << "backwards"
- << false
- << "version"
- << "57.1")
- << "uuid"
- << uuid);
+ auto options = BSON("collation"
+ << BSON("locale"
+ << "en"
+ << "caseLevel" << false << "caseFirst"
+ << "off"
+ << "strength" << 1 << "numericOrdering" << false << "alternate"
+ << "non-ignorable"
+ << "maxVariable"
+ << "punct"
+ << "normalization" << false << "backwards" << false << "version"
+ << "57.1")
+ << "uuid" << uuid);
auto createColl = makeCreateCollectionOplogEntry(nextOpTime(), nss, options);
// We don't drop and re-create the collection since we don't have ways
@@ -2235,12 +2168,8 @@ TEST_F(IdempotencyTest, CreateCollectionWithIdIndex) {
auto options1 = BSON("idIndex" << BSON("key" << fromjson("{_id: 1}") << "name"
<< "_id_"
- << "v"
- << 2
- << "ns"
- << nss.ns())
- << "uuid"
- << uuid);
+ << "v" << 2 << "ns" << nss.ns())
+ << "uuid" << uuid);
auto createColl1 = makeCreateCollectionOplogEntry(nextOpTime(), nss, options1);
ASSERT_OK(runOpInitialSync(createColl1));
@@ -2274,9 +2203,8 @@ TEST_F(IdempotencyTest, CreateCollectionWithView) {
ASSERT_OK(
runOpInitialSync(makeCreateCollectionOplogEntry(nextOpTime(), viewNss, options.toBSON())));
- auto viewDoc =
- BSON("_id" << NamespaceString(nss.db(), "view").ns() << "viewOn" << nss.coll() << "pipeline"
- << fromjson("[ { '$project' : { 'x' : 1 } } ]"));
+ auto viewDoc = BSON("_id" << NamespaceString(nss.db(), "view").ns() << "viewOn" << nss.coll()
+ << "pipeline" << fromjson("[ { '$project' : { 'x' : 1 } } ]"));
auto insertViewOp = makeInsertDocumentOplogEntry(nextOpTime(), viewNss, viewDoc);
auto dropColl = makeCommandOplogEntry(nextOpTime(), nss, BSON("drop" << nss.coll()));
@@ -2698,14 +2626,9 @@ TEST_F(SyncTailTxnTableTest, RetryableWriteThenMultiStatementTxnWriteOnSameSessi
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss().ns()
- << "ui"
- << *uuid
- << "o"
+ << "ns" << nss().ns() << "ui" << *uuid << "o"
<< BSON("_id" << 2)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
sessionId,
*sessionInfo.getTxnNumber(),
StmtId(0),
@@ -2754,14 +2677,9 @@ TEST_F(SyncTailTxnTableTest, MultiStatementTxnWriteThenRetryableWriteOnSameSessi
cmdNss,
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss().ns()
- << "ui"
- << *uuid
- << "o"
+ << "ns" << nss().ns() << "ui" << *uuid << "o"
<< BSON("_id" << 2)))
- << "partialTxn"
- << true),
+ << "partialTxn" << true),
sessionId,
*sessionInfo.getTxnNumber(),
StmtId(0),
diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp
index a79cdfa7faa..4c53b558aa1 100644
--- a/src/mongo/db/repl/task_runner.cpp
+++ b/src/mongo/db/repl/task_runner.cpp
@@ -182,7 +182,6 @@ void TaskRunner::_runTasks() {
"this task has been canceled by a previously invoked task"));
}
tasks.clear();
-
};
cancelTasks();
diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp
index 43b0a7a8605..81056086087 100644
--- a/src/mongo/db/repl/topology_coordinator.cpp
+++ b/src/mongo/db/repl/topology_coordinator.cpp
@@ -249,8 +249,8 @@ HostAndPort TopologyCoordinator::chooseNewSyncSource(Date_t now,
_syncSource = _rsConfig.getMemberAt(_forceSyncSourceIndex).getHostAndPort();
_forceSyncSourceIndex = -1;
log() << "choosing sync source candidate by request: " << _syncSource;
- std::string msg(str::stream() << "syncing from: " << _syncSource.toString()
- << " by request");
+ std::string msg(str::stream()
+ << "syncing from: " << _syncSource.toString() << " by request");
setMyHeartbeatMessage(now, msg);
return _syncSource;
}
@@ -572,8 +572,7 @@ Status TopologyCoordinator::prepareHeartbeatResponseV1(Date_t now,
<< "; remote node's: " << rshb;
return Status(ErrorCodes::InconsistentReplicaSetNames,
str::stream() << "Our set name of " << ourSetName << " does not match name "
- << rshb
- << " reported by remote node");
+ << rshb << " reported by remote node");
}
const MemberState myState = getMemberState();
@@ -782,8 +781,9 @@ HeartbeatResponseAction TopologyCoordinator::processHeartbeatResponse(
}
const int memberIndex = _rsConfig.findMemberIndexByHostAndPort(target);
if (memberIndex == -1) {
- LOG(1) << "Could not find " << target << " in current config so ignoring --"
- " current config: "
+ LOG(1) << "Could not find " << target
+ << " in current config so ignoring --"
+ " current config: "
<< _rsConfig.toBSON();
HeartbeatResponseAction nextAction = HeartbeatResponseAction::makeNoAction();
nextAction.setNextHeartbeatStartDate(nextHeartbeatStartDate);
@@ -1131,8 +1131,9 @@ HeartbeatResponseAction TopologyCoordinator::_updatePrimaryFromHBDataV1(
bool scheduleCatchupTakeover = false;
bool schedulePriorityTakeover = false;
- if (!catchupTakeoverDisabled && (_memberData.at(primaryIndex).getLastAppliedOpTime() <
- _memberData.at(_selfIndex).getLastAppliedOpTime())) {
+ if (!catchupTakeoverDisabled &&
+ (_memberData.at(primaryIndex).getLastAppliedOpTime() <
+ _memberData.at(_selfIndex).getLastAppliedOpTime())) {
LOG_FOR_ELECTION(2) << "I can take over the primary due to fresher data."
<< " Current primary index: " << primaryIndex << " in term "
<< _memberData.at(primaryIndex).getTerm() << "."
@@ -2711,38 +2712,30 @@ void TopologyCoordinator::processReplSetRequestVotes(const ReplSetRequestVotesAr
if (args.getTerm() < _term) {
response->setVoteGranted(false);
response->setReason(str::stream() << "candidate's term (" << args.getTerm()
- << ") is lower than mine ("
- << _term
- << ")");
+ << ") is lower than mine (" << _term << ")");
} else if (args.getConfigVersion() != _rsConfig.getConfigVersion()) {
response->setVoteGranted(false);
- response->setReason(str::stream() << "candidate's config version ("
- << args.getConfigVersion()
- << ") differs from mine ("
- << _rsConfig.getConfigVersion()
- << ")");
+ response->setReason(str::stream()
+ << "candidate's config version (" << args.getConfigVersion()
+ << ") differs from mine (" << _rsConfig.getConfigVersion() << ")");
} else if (args.getSetName() != _rsConfig.getReplSetName()) {
response->setVoteGranted(false);
- response->setReason(str::stream() << "candidate's set name (" << args.getSetName()
- << ") differs from mine ("
- << _rsConfig.getReplSetName()
- << ")");
+ response->setReason(str::stream()
+ << "candidate's set name (" << args.getSetName()
+ << ") differs from mine (" << _rsConfig.getReplSetName() << ")");
} else if (args.getLastDurableOpTime() < getMyLastAppliedOpTime()) {
response->setVoteGranted(false);
response
->setReason(str::stream()
<< "candidate's data is staler than mine. candidate's last applied OpTime: "
<< args.getLastDurableOpTime().toString()
- << ", my last applied OpTime: "
- << getMyLastAppliedOpTime().toString());
+ << ", my last applied OpTime: " << getMyLastAppliedOpTime().toString());
} else if (!args.isADryRun() && _lastVote.getTerm() == args.getTerm()) {
response->setVoteGranted(false);
response->setReason(str::stream()
<< "already voted for another candidate ("
<< _rsConfig.getMemberAt(_lastVote.getCandidateIndex()).getHostAndPort()
- << ") this term ("
- << _lastVote.getTerm()
- << ")");
+ << ") this term (" << _lastVote.getTerm() << ")");
} else {
int betterPrimary = _findHealthyPrimaryOfEqualOrGreaterPriority(args.getCandidateIndex());
if (_selfConfig().isArbiter() && betterPrimary >= 0) {
diff --git a/src/mongo/db/repl/topology_coordinator.h b/src/mongo/db/repl/topology_coordinator.h
index b07ef7deb3f..6a6a6af4652 100644
--- a/src/mongo/db/repl/topology_coordinator.h
+++ b/src/mongo/db/repl/topology_coordinator.h
@@ -1076,7 +1076,7 @@ public:
/**
* Gets the number of retries left for this heartbeat attempt. Invalid to call if the current
* state is 'UNINITIALIZED'.
- */
+ */
int retriesLeft() const {
return kMaxHeartbeatRetries - _numFailuresSinceLastStart;
}
diff --git a/src/mongo/db/repl/topology_coordinator_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
index 73b2fd6bcf6..633c0220372 100644
--- a/src/mongo/db/repl/topology_coordinator_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
@@ -53,9 +53,9 @@
#define ASSERT_NO_ACTION(EXPRESSION) \
ASSERT_EQUALS(mongo::repl::HeartbeatResponseAction::NoAction, (EXPRESSION))
-using std::unique_ptr;
-using mongo::rpc::ReplSetMetadata;
using mongo::rpc::OplogQueryMetadata;
+using mongo::rpc::ReplSetMetadata;
+using std::unique_ptr;
namespace mongo {
namespace repl {
@@ -326,9 +326,7 @@ TEST_F(TopoCoordTest, NodeReturnsSecondaryWithMostRecentDataAsSyncSource) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -399,44 +397,31 @@ TEST_F(TopoCoordTest, NodeReturnsSecondaryWithMostRecentDataAsSyncSource) {
}
TEST_F(TopoCoordTest, NodeReturnsClosestValidSyncSourceAsSyncSource) {
- updateConfig(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "hself")
- << BSON("_id" << 10 << "host"
- << "h1")
- << BSON("_id" << 20 << "host"
- << "h2"
- << "buildIndexes"
- << false
- << "priority"
- << 0)
- << BSON("_id" << 30 << "host"
- << "h3"
- << "hidden"
- << true
- << "priority"
- << 0
- << "votes"
- << 0)
- << BSON("_id" << 40 << "host"
- << "h4"
- << "arbiterOnly"
- << true)
- << BSON("_id" << 50 << "host"
- << "h5"
- << "slaveDelay"
- << 1
- << "priority"
- << 0)
- << BSON("_id" << 60 << "host"
- << "h6")
- << BSON("_id" << 70 << "host"
- << "hprimary"))),
- 0);
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself")
+ << BSON("_id" << 10 << "host"
+ << "h1")
+ << BSON("_id" << 20 << "host"
+ << "h2"
+ << "buildIndexes" << false << "priority" << 0)
+ << BSON("_id" << 30 << "host"
+ << "h3"
+ << "hidden" << true << "priority" << 0 << "votes" << 0)
+ << BSON("_id" << 40 << "host"
+ << "h4"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 50 << "host"
+ << "h5"
+ << "slaveDelay" << 1 << "priority" << 0)
+ << BSON("_id" << 60 << "host"
+ << "h6")
+ << BSON("_id" << 70 << "host"
+ << "hprimary"))),
+ 0);
setSelfMemberState(MemberState::RS_SECONDARY);
OpTime lastOpTimeWeApplied = OpTime(Timestamp(100, 0), 0);
@@ -573,9 +558,7 @@ TEST_F(TopoCoordTest, NodeReturnsClosestValidSyncSourceAsSyncSource) {
TEST_F(TopoCoordTest, NodeWontChooseSyncSourceFromOlderTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself")
<< BSON("_id" << 10 << "host"
@@ -625,10 +608,7 @@ TEST_F(TopoCoordTest, NodeWontChooseSyncSourceFromOlderTerm) {
TEST_F(TopoCoordTest, ChooseOnlyPrimaryAsSyncSourceWhenChainingIsDisallowed) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "settings"
- << BSON("chainingAllowed" << false)
+ << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
@@ -752,9 +732,7 @@ TEST_F(TopoCoordTest, ChooseOnlyVotersAsSyncSourceWhenNodeIsAVoter) {
TEST_F(TopoCoordTest, ChooseSameSyncSourceEvenWhenPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -809,9 +787,7 @@ TEST_F(TopoCoordTest, ChooseSameSyncSourceEvenWhenPrimary) {
TEST_F(TopoCoordTest, ChooseRequestedSyncSourceOnlyTheFirstTimeAfterTheSyncSourceIsForciblySet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -862,9 +838,7 @@ TEST_F(TopoCoordTest, ChooseRequestedSyncSourceOnlyTheFirstTimeAfterTheSyncSourc
TEST_F(TopoCoordTest, NodeDoesNotChooseBlacklistedSyncSourceUntilBlacklistingExpires) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -917,10 +891,7 @@ TEST_F(TopoCoordTest, NodeDoesNotChooseBlacklistedSyncSourceUntilBlacklistingExp
TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimaryIsBlacklistedAndChainingIsDisallowed) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "settings"
- << BSON("chainingAllowed" << false)
+ << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
@@ -975,9 +946,7 @@ TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimaryIsBlacklistedAndChainingIsDis
TEST_F(TopoCoordTest, NodeChangesToRecoveringWhenOnlyUnauthorizedNodesAreUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -1050,9 +1019,7 @@ TEST_F(TopoCoordTest, NodeChangesToRecoveringWhenOnlyUnauthorizedNodesAreUp) {
TEST_F(TopoCoordTest, NodeDoesNotActOnHeartbeatsWhenAbsentFromConfig) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "h1")
<< BSON("_id" << 20 << "host"
@@ -1086,13 +1053,10 @@ TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunAgainstArbiter) {
// Test trying to sync from another node when we are an arbiter
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 1 << "host"
<< "h1"))),
0);
@@ -1108,21 +1072,15 @@ TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunAgainstPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1150,21 +1108,15 @@ TEST_F(TopoCoordTest, NodeReturnsNodeNotFoundWhenSyncFromRequestsANodeNotInConfi
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1187,21 +1139,15 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsSelf) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1225,21 +1171,15 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1264,21 +1204,15 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsAnIndexNonbui
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1303,21 +1237,15 @@ TEST_F(TopoCoordTest, NodeReturnsHostUnreachableWhenSyncFromRequestsADownNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1346,21 +1274,15 @@ TEST_F(TopoCoordTest, ChooseRequestedNodeWhenSyncFromRequestsAStaleNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1395,21 +1317,15 @@ TEST_F(TopoCoordTest, ChooseRequestedNodeWhenSyncFromRequestsAValidNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1445,21 +1361,15 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1494,21 +1404,15 @@ TEST_F(TopoCoordTest, NodeReturnsUnauthorizedWhenSyncFromRequestsANodeWeAreNotAu
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1562,21 +1466,15 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority"
- << 0
- << "buildIndexes"
- << false)
+ << "priority" << 0 << "buildIndexes" << false)
<< BSON("_id" << 3 << "host"
<< "h3")
<< BSON("_id" << 4 << "host"
@@ -1920,13 +1818,10 @@ TEST_F(TopoCoordTest, HeartbeatFrequencyShouldBeHalfElectionTimeoutWhenArbiter)
TEST_F(TopoCoordTest, PrepareStepDownAttemptFailsIfNotLeader) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
getTopoCoord().changeMemberState_forTest(MemberState::RS_SECONDARY);
Status expectedStatus(ErrorCodes::NotMaster, "This node is not a primary. ");
@@ -1940,17 +1835,14 @@ public:
TopoCoordTest::setUp();
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
<< "h2")
<< BSON("_id" << 30 << "host"
<< "h3"))
- << "settings"
- << BSON("protocolVersion" << 1)),
+ << "settings" << BSON("protocolVersion" << 1)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
}
@@ -1974,8 +1866,8 @@ TEST_F(PrepareHeartbeatResponseV1Test,
prepareHeartbeatResponseV1(args, &response, &result);
stopCapturingLogMessages();
ASSERT_EQUALS(ErrorCodes::InconsistentReplicaSetNames, result);
- ASSERT(result.reason().find("repl set names do not match")) << "Actual string was \""
- << result.reason() << '"';
+ ASSERT(result.reason().find("repl set names do not match"))
+ << "Actual string was \"" << result.reason() << '"';
ASSERT_EQUALS(1,
countLogLinesContaining("replSet set names do not match, ours: rs0; remote "
"node's: rs1"));
@@ -1988,15 +1880,12 @@ TEST_F(PrepareHeartbeatResponseV1Test,
// reconfig self out of set
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 3
- << "members"
+ << "version" << 3 << "members"
<< BSON_ARRAY(BSON("_id" << 20 << "host"
<< "h2")
<< BSON("_id" << 30 << "host"
<< "h3"))
- << "settings"
- << BSON("protocolVersion" << 1)),
+ << "settings" << BSON("protocolVersion" << 1)),
-1);
ReplSetHeartbeatArgsV1 args;
args.setSetName("rs0");
@@ -2192,9 +2081,7 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenBecomingSecondaryInSingleNodeSet) {
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"))),
0);
@@ -2212,9 +2099,7 @@ TEST_F(TopoCoordTest, DoNotBecomeCandidateWhenBecomingSecondaryInSingleNodeSetIf
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"))),
0);
@@ -2242,15 +2127,10 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
ReplSetConfig cfg;
cfg.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority"
- << 0))))
+ << "priority" << 0))))
.transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -2264,9 +2144,7 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"))),
0);
@@ -2280,15 +2158,10 @@ TEST_F(TopoCoordTest,
ReplSetConfig cfg;
ASSERT_OK(cfg.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority"
- << 0)))));
+ << "priority" << 0)))));
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -2302,9 +2175,7 @@ TEST_F(TopoCoordTest,
getTopoCoord().adjustMaintenanceCountBy(1);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"))),
0);
@@ -2317,13 +2188,10 @@ TEST_F(TopoCoordTest, NodeDoesNotBecomeCandidateWhenBecomingSecondaryInSingleNod
ReplSetConfig cfg;
cfg.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority"
- << 0))))
+ << "priority" << 0))))
.transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
@@ -2342,9 +2210,7 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
// config to be absent from the set
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -2357,9 +2223,7 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
// reconfig to add to set
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2377,9 +2241,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfig) {
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2393,9 +2255,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfig) {
// reconfig to remove self
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -2411,9 +2271,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfigEvenWhenPrima
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"))),
0);
@@ -2430,9 +2288,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfigEvenWhenPrima
// reconfig to remove self
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -2448,11 +2304,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToSecondaryWhenReconfiggingToBeUnelectable)
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"))),
0);
@@ -2469,13 +2321,10 @@ TEST_F(TopoCoordTest, NodeTransitionsToSecondaryWhenReconfiggingToBeUnelectable)
// now lose primary due to loss of electability
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -2490,9 +2339,7 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"))),
0);
@@ -2511,9 +2358,7 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
// Add hosts
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2528,18 +2373,13 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
// Change priorities and tags
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority"
- << 10)
+ << "priority" << 10)
<< BSON("_id" << 1 << "host"
<< "host2:27017"
- << "priority"
- << 5
- << "tags"
+ << "priority" << 5 << "tags"
<< BSON("dc"
<< "NA"
<< "rack"
@@ -2553,9 +2393,7 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
@@ -2569,9 +2407,7 @@ TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
// reconfig and stay secondary
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2586,13 +2422,10 @@ TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
TEST_F(TopoCoordTest, NodeReturnsArbiterWhenGetMemberStateRunsAgainstArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 20 << "host"
<< "h2")
<< BSON("_id" << 30 << "host"
@@ -2611,9 +2444,7 @@ TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileRemovedFromTheConfig) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2626,13 +2457,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response;
@@ -2645,13 +2471,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
args2
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 1LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response2;
@@ -2666,9 +2487,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2682,14 +2501,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2704,14 +2517,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
args2
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2726,14 +2533,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
args3
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << false << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2748,14 +2549,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
args4
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << false << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2770,9 +2565,7 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2786,14 +2579,8 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << false << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2808,14 +2595,8 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
args2
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << false << "term" << 1LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -2830,9 +2611,7 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2846,13 +2625,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "wrongName"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response;
@@ -2865,9 +2639,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2881,13 +2653,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 0LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 1LL
+ << "configVersion" << 0LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response;
@@ -2900,9 +2667,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2920,13 +2685,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 1LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response;
@@ -2940,9 +2700,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -2957,13 +2715,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 3LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 3LL << "candidateIndex" << 1LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse response;
@@ -2973,8 +2726,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
ASSERT_EQUALS(
str::stream() << "candidate's data is staler than mine. candidate's last applied OpTime: "
<< OpTime().toString()
- << ", my last applied OpTime: "
- << OpTime(Timestamp(20, 0), 0).toString(),
+ << ", my last applied OpTime: " << OpTime(Timestamp(20, 0), 0).toString(),
response.getReason());
ASSERT_FALSE(response.getVoteGranted());
}
@@ -2982,9 +2734,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -3001,13 +2751,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
argsForRealVote
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
@@ -3021,14 +2766,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "wrongName"
- << "dryRun"
- << true
- << "term"
- << 2LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 2LL
+ << "candidateIndex" << 0LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -3043,9 +2782,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -3062,13 +2799,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
argsForRealVote
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
@@ -3082,14 +2814,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 2LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 0LL
+ << "dryRun" << true << "term" << 2LL
+ << "candidateIndex" << 1LL << "configVersion" << 0LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -3104,9 +2830,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -3123,13 +2847,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
argsForRealVote
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
@@ -3142,14 +2861,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 0LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 0LL
+ << "candidateIndex" << 1LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -3164,9 +2877,7 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -3183,13 +2894,8 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
argsForRealVote
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
@@ -3203,14 +2909,8 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 1LL
+ << "candidateIndex" << 1LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -3225,9 +2925,7 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 1
- << "members"
+ << "version" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
@@ -3244,13 +2942,8 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
argsForRealVote
.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
+ << "term" << 1LL << "candidateIndex" << 0LL
+ << "configVersion" << 1LL << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
@@ -3264,14 +2957,8 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun"
- << true
- << "term"
- << 3LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
+ << "dryRun" << true << "term" << 3LL
+ << "candidateIndex" << 1LL << "configVersion" << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
.transitional_ignore();
@@ -3282,8 +2969,7 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
ASSERT_EQUALS(
str::stream() << "candidate's data is staler than mine. candidate's last applied OpTime: "
<< OpTime().toString()
- << ", my last applied OpTime: "
- << OpTime(Timestamp(20, 0), 0).toString(),
+ << ", my last applied OpTime: " << OpTime(Timestamp(20, 0), 0).toString(),
response.getReason());
ASSERT_EQUALS(1, response.getTerm());
ASSERT_FALSE(response.getVoteGranted());
@@ -3299,12 +2985,7 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedIfCSRSButHaveNoReadCommittedSuppor
updateConfig(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
@@ -3326,12 +3007,7 @@ TEST_F(TopoCoordTest, NodeBecomesSecondaryAsNormalWhenReadCommittedSupportedAndC
updateConfig(BSON("_id"
<< "rs0"
- << "protocolVersion"
- << 1
- << "version"
- << 1
- << "configsvr"
- << true
+ << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
@@ -3352,18 +3028,14 @@ public:
TopoCoordTest::setUp();
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
}
@@ -3381,23 +3053,15 @@ TEST_F(HeartbeatResponseTestV1,
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 7
- << "members"
+ << "version" << 7 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself"
- << "buildIndexes"
- << false
- << "priority"
- << 0)
+ << "buildIndexes" << false << "priority" << 0)
<< BSON("_id" << 1 << "host"
<< "host2")
<< BSON("_id" << 2 << "host"
<< "host3"
- << "buildIndexes"
- << false
- << "priority"
- << 0))),
+ << "buildIndexes" << false << "priority" << 0))),
0);
topoCoordSetMyLastAppliedOpTime(lastOpTimeApplied, Date_t(), false);
HeartbeatResponseAction nextAction = receiveUpHeartbeat(
@@ -3733,15 +3397,12 @@ TEST_F(HeartbeatResponseTestV1, ReconfigNodeRemovedBetweenHeartbeatRequestAndRep
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
ReplSetHeartbeatResponse hb;
@@ -3783,28 +3444,19 @@ TEST_F(HeartbeatResponseTestV1, ReconfigBetweenHeartbeatRequestAndRepsonse) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
ReplSetHeartbeatResponse hb;
hb.initialize(BSON("ok" << 1 << "durableOpTime" << OpTime(Timestamp(100, 0), 0).toBSON()
- << "durableWallTime"
- << Date_t() + Seconds(100)
- << "opTime"
- << OpTime(Timestamp(100, 0), 0).toBSON()
- << "wallTime"
- << Date_t() + Seconds(100)
- << "v"
- << 1
- << "state"
+ << "durableWallTime" << Date_t() + Seconds(100) << "opTime"
+ << OpTime(Timestamp(100, 0), 0).toBSON() << "wallTime"
+ << Date_t() + Seconds(100) << "v" << 1 << "state"
<< MemberState::RS_PRIMARY),
0,
/*requireWallTime*/ true)
@@ -3863,20 +3515,15 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleAPriorityTakeoverWhenElectableAndReceiveHeartbeatFromLowerPriorityPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 6 << "host"
<< "host7:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -3897,21 +3544,16 @@ TEST_F(HeartbeatResponseTestV1,
TEST_F(HeartbeatResponseTestV1, UpdateHeartbeatDataTermPreventsPriorityTakeover) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 1 << "host"
<< "host1:27017"
- << "priority"
- << 3)
+ << "priority" << 3)
<< BSON("_id" << 2 << "host"
<< "host2:27017"))
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -3952,18 +3594,14 @@ TEST_F(HeartbeatResponseTestV1, UpdateHeartbeatDataTermPreventsPriorityTakeover)
TEST_F(TopoCoordTest, FreshestNodeDoesCatchupTakeover) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017")
<< BSON("_id" << 3 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -4009,18 +3647,14 @@ TEST_F(TopoCoordTest, FreshestNodeDoesCatchupTakeover) {
TEST_F(TopoCoordTest, StaleNodeDoesntDoCatchupTakeover) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017")
<< BSON("_id" << 3 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -4068,18 +3702,14 @@ TEST_F(TopoCoordTest, StaleNodeDoesntDoCatchupTakeover) {
TEST_F(TopoCoordTest, NodeDoesntDoCatchupTakeoverHeartbeatSaysPrimaryCaughtUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017")
<< BSON("_id" << 3 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -4124,18 +3754,14 @@ TEST_F(TopoCoordTest, NodeDoesntDoCatchupTakeoverHeartbeatSaysPrimaryCaughtUp) {
TEST_F(TopoCoordTest, NodeDoesntDoCatchupTakeoverIfTermNumbersSayPrimaryCaughtUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017")
<< BSON("_id" << 3 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -4185,19 +3811,14 @@ TEST_F(TopoCoordTest, NodeDoesntDoCatchupTakeoverIfTermNumbersSayPrimaryCaughtUp
TEST_F(TopoCoordTest, StepDownAttemptFailsWhenNotPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4214,19 +3835,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsWhenNotPrimary) {
TEST_F(TopoCoordTest, StepDownAttemptFailsWhenAlreadySteppingDown) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4244,19 +3860,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsWhenAlreadySteppingDown) {
TEST_F(TopoCoordTest, StepDownAttemptFailsForDifferentTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4274,19 +3885,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsForDifferentTerm) {
TEST_F(TopoCoordTest, StepDownAttemptFailsIfPastStepDownUntil) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4306,19 +3912,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsIfPastStepDownUntil) {
TEST_F(TopoCoordTest, StepDownAttemptFailsIfPastWaitUntil) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4341,19 +3942,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsIfPastWaitUntil) {
TEST_F(TopoCoordTest, StepDownAttemptFailsIfNoSecondariesCaughtUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4374,19 +3970,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsIfNoSecondariesCaughtUp) {
TEST_F(TopoCoordTest, StepDownAttemptFailsIfNoSecondariesCaughtUpForceIsTrueButNotPastWaitUntil) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4407,19 +3998,14 @@ TEST_F(TopoCoordTest, StepDownAttemptFailsIfNoSecondariesCaughtUpForceIsTrueButN
TEST_F(TopoCoordTest, StepDownAttemptSucceedsIfNoSecondariesCaughtUpForceIsTrueAndPastWaitUntil) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4440,19 +4026,14 @@ TEST_F(TopoCoordTest, StepDownAttemptSucceedsIfNoSecondariesCaughtUpForceIsTrueA
TEST_F(TopoCoordTest, StepDownAttemptSucceedsIfSecondariesCaughtUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4473,23 +4054,15 @@ TEST_F(TopoCoordTest, StepDownAttemptSucceedsIfSecondariesCaughtUp) {
TEST_F(TopoCoordTest, StepDownAttemptFailsIfSecondaryCaughtUpButNotElectable) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017"
- << "priority"
- << 0
- << "hidden"
- << true)
+ << "priority" << 0 << "hidden" << true)
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
const auto term = getTopoCoord().getTerm();
Date_t curTime = now();
@@ -4519,15 +4092,12 @@ TEST_F(TopoCoordTest,
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
{
BSONObjBuilder statusBuilder;
@@ -4576,15 +4146,12 @@ TEST_F(TopoCoordTest,
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
ASSERT(getTopoCoord().getSyncSourceAddress().empty());
@@ -4649,10 +4216,7 @@ TEST_F(TopoCoordTest, replSetGetStatusForThreeMemberedReplicaSet) {
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "settings"
- << BSON("chainingAllowed" << false)
+ << "version" << 5 << "settings" << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 30 << "host"
<< "hself:27017")
@@ -4660,8 +4224,7 @@ TEST_F(TopoCoordTest, replSetGetStatusForThreeMemberedReplicaSet) {
<< "hprimary:27017")
<< BSON("_id" << 10 << "host"
<< "h1:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
ASSERT(getTopoCoord().getSyncSourceAddress().empty());
@@ -4752,13 +4315,10 @@ TEST_F(TopoCoordTest, StatusResponseAlwaysIncludesStringStatusFieldsForNonMember
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
-1); // This node is no longer part of this replica set.
BSONObjBuilder statusBuilder;
@@ -4788,9 +4348,7 @@ TEST_F(TopoCoordTest, StatusResponseAlwaysIncludesStringStatusFieldsForNonMember
TEST_F(TopoCoordTest, NoElectionHandoffCandidateInSingleNodeReplicaSet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017"))),
0);
@@ -4805,9 +4363,7 @@ TEST_F(TopoCoordTest, NoElectionHandoffCandidateInSingleNodeReplicaSet) {
TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneLaggedNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
@@ -4828,15 +4384,12 @@ TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneLaggedNode) {
TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneUnelectableNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017"
- << "priority"
- << 0))),
+ << "priority" << 0))),
0);
const auto term = getTopoCoord().getTerm();
@@ -4853,17 +4406,14 @@ TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneUnelectableNode) {
TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneLaggedAndOneUnelectableNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017"
- << "priority"
- << 0))),
+ << "priority" << 0))),
0);
const auto term = getTopoCoord().getTerm();
@@ -4883,9 +4433,7 @@ TEST_F(TopoCoordTest, NoElectionHandoffCandidateWithOneLaggedAndOneUnelectableNo
TEST_F(TopoCoordTest, ExactlyOneNodeEligibleForElectionHandoffOutOfOneSecondary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
@@ -4906,15 +4454,12 @@ TEST_F(TopoCoordTest, ExactlyOneNodeEligibleForElectionHandoffOutOfOneSecondary)
TEST_F(TopoCoordTest, ExactlyOneNodeEligibleForElectionHandoffOutOfThreeSecondaries) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 2 << "host"
<< "host2:27017")
<< BSON("_id" << 3 << "host"
@@ -4943,17 +4488,14 @@ TEST_F(TopoCoordTest, ExactlyOneNodeEligibleForElectionHandoffOutOfThreeSecondar
TEST_F(TopoCoordTest, TwoNodesEligibleForElectionHandoffResolveByPriority) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017"
- << "priority"
- << 5))),
+ << "priority" << 5))),
0);
const auto term = getTopoCoord().getTerm();
@@ -4975,9 +4517,7 @@ TEST_F(TopoCoordTest, TwoNodesEligibleForElectionHandoffResolveByPriority) {
TEST_F(TopoCoordTest, TwoNodesEligibleForElectionHandoffEqualPriorityResolveByMemberId) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
@@ -5006,23 +4546,17 @@ TEST_F(TopoCoordTest, ArbiterNotIncludedInW3WriteInPSSAReplSet) {
// In a PSSA set, a w:3 write should only be acknowledged if both secondaries can satisfy it.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
<< "host2:27017"
- << "priority"
- << 0
- << "votes"
- << 0)
+ << "priority" << 0 << "votes" << 0)
<< BSON("_id" << 3 << "host"
<< "host3:27017"
- << "arbiterOnly"
- << true))),
+ << "arbiterOnly" << true))),
0);
const auto term = getTopoCoord().getTerm();
@@ -5051,31 +4585,21 @@ TEST_F(TopoCoordTest, ArbitersNotIncludedInW2WriteInPSSAAReplSet) {
// can satisfy it.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "members"
+ << "version" << 2 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017")
<< BSON("_id" << 1 << "host"
<< "host1:27017"
- << "priority"
- << 0
- << "votes"
- << 0)
+ << "priority" << 0 << "votes" << 0)
<< BSON("_id" << 2 << "host"
<< "host2:27017"
- << "priority"
- << 0
- << "votes"
- << 0)
+ << "priority" << 0 << "votes" << 0)
<< BSON("_id" << 3 << "host"
<< "host3:27017"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 4 << "host"
<< "host4:27017"
- << "arbiterOnly"
- << true))),
+ << "arbiterOnly" << true))),
0);
const auto term = getTopoCoord().getTerm();
@@ -5100,59 +4624,52 @@ TEST_F(TopoCoordTest, ArbitersNotIncludedInW2WriteInPSSAAReplSet) {
TEST_F(TopoCoordTest, CheckIfCommitQuorumCanBeSatisfied) {
ReplSetConfig configA;
- ASSERT_OK(configA.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "node0"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA1"))
- << BSON("_id" << 1 << "host"
- << "node1"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA2"))
- << BSON("_id" << 2 << "host"
- << "node2"
- << "tags"
- << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA3"))
- << BSON("_id" << 3 << "host"
- << "node3"
- << "tags"
- << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU1"))
- << BSON("_id" << 4 << "host"
- << "node4"
- << "tags"
- << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU2"))
- << BSON("_id" << 5 << "host"
- << "node5"
- << "arbiterOnly"
- << true))
- << "settings"
- << BSON("getLastErrorModes"
- << BSON("valid" << BSON("dc" << 2 << "rack" << 3)
- << "invalidNotEnoughValues"
- << BSON("dc" << 3)
- << "invalidNotEnoughNodes"
- << BSON("rack" << 6))))));
+ ASSERT_OK(configA.initialize(BSON(
+ "_id"
+ << "rs0"
+ << "version" << 1 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node0"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA1"))
+ << BSON("_id" << 1 << "host"
+ << "node1"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA2"))
+ << BSON("_id" << 2 << "host"
+ << "node2"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA3"))
+ << BSON("_id" << 3 << "host"
+ << "node3"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU1"))
+ << BSON("_id" << 4 << "host"
+ << "node4"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU2"))
+ << BSON("_id" << 5 << "host"
+ << "node5"
+ << "arbiterOnly" << true))
+ << "settings"
+ << BSON("getLastErrorModes" << BSON(
+ "valid" << BSON("dc" << 2 << "rack" << 3) << "invalidNotEnoughValues"
+ << BSON("dc" << 3) << "invalidNotEnoughNodes" << BSON("rack" << 6))))));
getTopoCoord().updateConfig(configA, -1, Date_t());
std::vector<MemberConfig> memberConfig;
@@ -5323,18 +4840,14 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleACatchupTakeoverWhenElectableAndReceiveHeartbeatFromPrimaryInCatchup) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 6 << "host"
<< "host7:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -5357,22 +4870,16 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleACatchupTakeoverWhenBothCatchupAndPriorityTakeoverPossible) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0:27017"
- << "priority"
- << 2)
+ << "priority" << 2)
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 6 << "host"
<< "host7:27017"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1
- << "settings"
+ << "priority" << 3))
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -5395,43 +4902,26 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleElectionIfAMajorityOfVotersIsVisibleEvenThoughATrueMajorityIsNot) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 3 << "host"
<< "host4:27017"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 4 << "host"
<< "host5:27017"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 5 << "host"
<< "host6:27017"
- << "votes"
- << 0
- << "priority"
- << 0)
+ << "votes" << 0 << "priority" << 0)
<< BSON("_id" << 6 << "host"
<< "host7:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5500,19 +4990,15 @@ TEST_F(HeartbeatResponseTestV1,
NodeDoesNotStandForElectionWhenPrimaryIsMarkedDownViaHeartbeatButWeAreAnArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "arbiterOnly"
- << true)
+ << "arbiterOnly" << true)
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
OpTime election = OpTime(Timestamp(400, 0), 0);
@@ -5613,19 +5099,15 @@ TEST_F(HeartbeatResponseTestV1,
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority"
- << 0)
+ << "priority" << 0)
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1),
+ << "protocolVersion" << 1),
0);
OpTime election = OpTime(Timestamp(400, 0), 0);
@@ -5700,21 +5182,15 @@ TEST_F(HeartbeatResponseTestV1,
// multiprimary states in PV1.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 6
- << "members"
+ << "version" << 6 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "priority" << 3))
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5739,21 +5215,15 @@ TEST_F(HeartbeatResponseTestV1,
// multiprimary states in PV1.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 6
- << "members"
+ << "version" << 6 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "priority" << 3))
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
OpTime election = OpTime(Timestamp(1000, 0), 0);
OpTime staleTime = OpTime();
@@ -5775,21 +5245,15 @@ TEST_F(HeartbeatResponseTestV1,
// multiprimary states in PV1.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 6
- << "members"
+ << "version" << 6 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "priority" << 3))
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
OpTime election = OpTime(Timestamp(1000, 0), 0);
@@ -5812,21 +5276,15 @@ TEST_F(HeartbeatResponseTestV1,
// in all multiprimary states in PV1.
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 6
- << "members"
+ << "version" << 6 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "priority"
- << 3))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)),
+ << "priority" << 3))
+ << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5983,21 +5441,15 @@ TEST_F(HeartbeatResponseTestV1, ShouldNotChangeSyncSourceWhenFresherMemberDoesNo
updateConfig(BSON("_id"
<< "rs0"
- << "version"
- << 6
- << "members"
+ << "version" << 6 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "host2")
<< BSON("_id" << 2 << "host"
<< "host3"
- << "buildIndexes"
- << false
- << "priority"
- << 0))
- << "protocolVersion"
- << 1),
+ << "buildIndexes" << false << "priority" << 0))
+ << "protocolVersion" << 1),
0);
topoCoordSetMyLastAppliedOpTime(lastOpTimeApplied, Date_t(), false);
HeartbeatResponseAction nextAction = receiveUpHeartbeat(
@@ -6311,18 +5763,14 @@ TEST_F(HeartbeatResponseHighVerbosityTestV1, UpdateHeartbeatDataSameConfig) {
originalConfig
.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 5
- << "members"
+ << "version" << 5 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
+ << "protocolVersion" << 1 << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)))
.transitional_ignore();
diff --git a/src/mongo/db/repl/vote_requester_test.cpp b/src/mongo/db/repl/vote_requester_test.cpp
index ca0bc29844e..7d2dc73cf8c 100644
--- a/src/mongo/db/repl/vote_requester_test.cpp
+++ b/src/mongo/db/repl/vote_requester_test.cpp
@@ -59,31 +59,23 @@ class VoteRequesterTest : public mongo::unittest::Test {
public:
virtual void setUp() {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host0")
- << BSON("_id" << 1 << "host"
- << "host1")
- << BSON("_id" << 2 << "host"
- << "host2")
- << BSON("_id" << 3 << "host"
- << "host3"
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("_id" << 4 << "host"
- << "host4"
- << "votes"
- << 0
- << "priority"
- << 0)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")
+ << BSON("_id" << 3 << "host"
+ << "host3"
+ << "votes" << 0 << "priority" << 0)
+ << BSON("_id" << 4 << "host"
+ << "host4"
+ << "votes" << 0 << "priority"
+ << 0)))));
ASSERT_OK(config.validate());
long long candidateId = 0;
long long term = 2;
@@ -216,31 +208,23 @@ class VoteRequesterDryRunTest : public VoteRequesterTest {
public:
virtual void setUp() {
ReplSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host0")
- << BSON("_id" << 1 << "host"
- << "host1")
- << BSON("_id" << 2 << "host"
- << "host2")
- << BSON("_id" << 3 << "host"
- << "host3"
- << "votes"
- << 0
- << "priority"
- << 0)
- << BSON("_id" << 4 << "host"
- << "host4"
- << "votes"
- << 0
- << "priority"
- << 0)))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")
+ << BSON("_id" << 3 << "host"
+ << "host3"
+ << "votes" << 0 << "priority" << 0)
+ << BSON("_id" << 4 << "host"
+ << "host4"
+ << "votes" << 0 << "priority"
+ << 0)))));
ASSERT_OK(config.validate());
long long candidateId = 0;
long long term = 2;
@@ -261,11 +245,7 @@ public:
ReplSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version"
- << 2
- << "protocolVersion"
- << 1
- << "members"
+ << "version" << 2 << "protocolVersion" << 1 << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0")
<< BSON("_id" << 1 << "host"
diff --git a/src/mongo/db/repl_index_build_state.h b/src/mongo/db/repl_index_build_state.h
index cd50f2c0289..363eba6eb94 100644
--- a/src/mongo/db/repl_index_build_state.h
+++ b/src/mongo/db/repl_index_build_state.h
@@ -146,9 +146,7 @@ private:
invariant(!name.empty(),
str::stream()
<< "Bad spec passed into ReplIndexBuildState constructor, missing '"
- << IndexDescriptor::kIndexNameFieldName
- << "' field: "
- << spec);
+ << IndexDescriptor::kIndexNameFieldName << "' field: " << spec);
indexNames.push_back(name);
}
return indexNames;
diff --git a/src/mongo/db/s/active_migrations_registry.cpp b/src/mongo/db/s/active_migrations_registry.cpp
index f8e26472a8d..503f3f24d1f 100644
--- a/src/mongo/db/s/active_migrations_registry.cpp
+++ b/src/mongo/db/s/active_migrations_registry.cpp
@@ -148,9 +148,7 @@ Status ActiveMigrationsRegistry::ActiveMoveChunkState::constructErrorStatus() co
str::stream() << "Unable to start new migration because this shard is currently "
"donating chunk "
<< ChunkRange(args.getMinKey(), args.getMaxKey()).toString()
- << " for namespace "
- << args.getNss().ns()
- << " to "
+ << " for namespace " << args.getNss().ns() << " to "
<< args.getToShardId()};
}
@@ -158,10 +156,7 @@ Status ActiveMigrationsRegistry::ActiveReceiveChunkState::constructErrorStatus()
return {ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Unable to start new migration because this shard is currently "
"receiving chunk "
- << range.toString()
- << " for namespace "
- << nss.ns()
- << " from "
+ << range.toString() << " for namespace " << nss.ns() << " from "
<< fromShardId};
}
diff --git a/src/mongo/db/s/active_move_primaries_registry.cpp b/src/mongo/db/s/active_move_primaries_registry.cpp
index a02da4c899b..f71f7a63d80 100644
--- a/src/mongo/db/s/active_move_primaries_registry.cpp
+++ b/src/mongo/db/s/active_move_primaries_registry.cpp
@@ -90,9 +90,7 @@ Status ActiveMovePrimariesRegistry::ActiveMovePrimaryState::constructErrorStatus
str::stream()
<< "Unable to start new movePrimary operation because this shard is currently "
"moving its primary for namespace "
- << requestArgs.get_movePrimary().ns()
- << " to "
- << requestArgs.getTo()};
+ << requestArgs.get_movePrimary().ns() << " to " << requestArgs.getTo()};
}
ScopedMovePrimary::ScopedMovePrimary(ActiveMovePrimariesRegistry* registry,
diff --git a/src/mongo/db/s/active_move_primaries_registry.h b/src/mongo/db/s/active_move_primaries_registry.h
index 8ddd051478e..38b19a6c94f 100644
--- a/src/mongo/db/s/active_move_primaries_registry.h
+++ b/src/mongo/db/s/active_move_primaries_registry.h
@@ -159,4 +159,4 @@ private:
// This is the future, which will be signaled at the end of a movePrimary command.
std::shared_ptr<Notification<Status>> _completionNotification;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/s/active_move_primaries_registry_test.cpp b/src/mongo/db/s/active_move_primaries_registry_test.cpp
index 52b7d7daf6d..65fd968f377 100644
--- a/src/mongo/db/s/active_move_primaries_registry_test.cpp
+++ b/src/mongo/db/s/active_move_primaries_registry_test.cpp
@@ -27,9 +27,9 @@
* it in the license file.
*/
-#include "mongo/db/s/active_move_primaries_registry.h"
#include "mongo/bson/bsonmisc.h"
#include "mongo/db/client.h"
+#include "mongo/db/s/active_move_primaries_registry.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/s/request_types/move_primary_gen.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/db/s/active_shard_collection_registry.cpp b/src/mongo/db/s/active_shard_collection_registry.cpp
index eb6c42923a6..6a01fdd90ee 100644
--- a/src/mongo/db/s/active_shard_collection_registry.cpp
+++ b/src/mongo/db/s/active_shard_collection_registry.cpp
@@ -134,11 +134,9 @@ Status ActiveShardCollectionRegistry::ActiveShardCollectionState::constructError
return {ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Unable to shard collection "
<< request.get_shardsvrShardCollection().get().ns()
- << " with arguments: "
- << request.toBSON()
+ << " with arguments: " << request.toBSON()
<< " because this shard is currently running shard collection on this "
- << "collection with arguments: "
- << activeRequest.toBSON()};
+ << "collection with arguments: " << activeRequest.toBSON()};
}
ScopedShardCollection::ScopedShardCollection(std::string nss,
diff --git a/src/mongo/db/s/add_shard_util.cpp b/src/mongo/db/s/add_shard_util.cpp
index 466d1a3fe6d..0dae94c0102 100644
--- a/src/mongo/db/s/add_shard_util.cpp
+++ b/src/mongo/db/s/add_shard_util.cpp
@@ -77,5 +77,5 @@ BSONObj createShardIdentityUpsertForAddShard(const AddShard& addShardCmd) {
return request.toBSON();
}
-} // namespace mongo
} // namespace add_shard_util
+} // namespace mongo
diff --git a/src/mongo/db/s/add_shard_util.h b/src/mongo/db/s/add_shard_util.h
index b7ab9fd0b36..020831833ba 100644
--- a/src/mongo/db/s/add_shard_util.h
+++ b/src/mongo/db/s/add_shard_util.h
@@ -60,5 +60,5 @@ AddShard createAddShardCmd(OperationContext* opCtx, const ShardId& shardName);
*/
BSONObj createShardIdentityUpsertForAddShard(const AddShard& addShardCmd);
-} // namespace mongo
} // namespace add_shard_util
+} // namespace mongo
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index 3162ca40daf..62677b3eafa 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -441,12 +441,10 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi
if (chunkAtZoneMin.getMin().woCompare(tagRange.min)) {
return {ErrorCodes::IllegalOperation,
str::stream()
- << "Tag boundaries "
- << tagRange.toString()
+ << "Tag boundaries " << tagRange.toString()
<< " fall in the middle of an existing chunk "
<< ChunkRange(chunkAtZoneMin.getMin(), chunkAtZoneMin.getMax()).toString()
- << ". Balancing for collection "
- << nss.ns()
+ << ". Balancing for collection " << nss.ns()
<< " will be postponed until the chunk is split appropriately."};
}
@@ -462,12 +460,10 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi
chunkAtZoneMax.getMax().woCompare(tagRange.max)) {
return {ErrorCodes::IllegalOperation,
str::stream()
- << "Tag boundaries "
- << tagRange.toString()
+ << "Tag boundaries " << tagRange.toString()
<< " fall in the middle of an existing chunk "
<< ChunkRange(chunkAtZoneMax.getMin(), chunkAtZoneMax.getMax()).toString()
- << ". Balancing for collection "
- << nss.ns()
+ << ". Balancing for collection " << nss.ns()
<< " will be postponed until the chunk is split appropriately."};
}
}
diff --git a/src/mongo/db/s/balancer/balancer_policy.cpp b/src/mongo/db/s/balancer/balancer_policy.cpp
index a12893339d1..97a18c7ae08 100644
--- a/src/mongo/db/s/balancer/balancer_policy.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy.cpp
@@ -126,8 +126,7 @@ Status DistributionStatus::addRangeToZone(const ZoneRange& range) {
return {ErrorCodes::RangeOverlapConflict,
str::stream() << "Zone range: " << range.toString()
- << " is overlapping with existing: "
- << intersectingRange.toString()};
+ << " is overlapping with existing: " << intersectingRange.toString()};
}
// Check for containment
@@ -137,8 +136,7 @@ Status DistributionStatus::addRangeToZone(const ZoneRange& range) {
invariant(SimpleBSONObjComparator::kInstance.evaluate(range.max < nextRange.max));
return {ErrorCodes::RangeOverlapConflict,
str::stream() << "Zone range: " << range.toString()
- << " is overlapping with existing: "
- << nextRange.toString()};
+ << " is overlapping with existing: " << nextRange.toString()};
}
}
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index b131bbafde7..0a988cf1b13 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -517,7 +517,7 @@ void MigrationManager::_schedule(WithLock lock,
StatusWith<executor::TaskExecutor::CallbackHandle> callbackHandleWithStatus =
executor->scheduleRemoteCommand(
remoteRequest,
- [ this, service = opCtx->getServiceContext(), itMigration ](
+ [this, service = opCtx->getServiceContext(), itMigration](
const executor::TaskExecutor::RemoteCommandCallbackArgs& args) {
ThreadClient tc(getThreadName(), service);
auto opCtx = cc().makeOperationContext();
@@ -614,8 +614,7 @@ Status MigrationManager::_processRemoteCommandResponse(
scopedMigrationRequest->keepDocumentOnDestruct();
return {ErrorCodes::BalancerInterrupted,
stream() << "Migration interrupted because the balancer is stopping."
- << " Command status: "
- << remoteCommandResponse.status.toString()};
+ << " Command status: " << remoteCommandResponse.status.toString()};
}
if (!remoteCommandResponse.isOK()) {
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index ec2bdc8b12d..44ba02a9e2b 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -137,17 +137,17 @@ protected:
// Random static initialization order can result in X constructor running before Y constructor
// if X and Y are defined in different source files. Defining variables here to enforce order.
const BSONObj kShard0 =
- BSON(ShardType::name(kShardId0.toString()) << ShardType::host(kShardHost0.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId0.toString())
+ << ShardType::host(kShardHost0.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const BSONObj kShard1 =
- BSON(ShardType::name(kShardId1.toString()) << ShardType::host(kShardHost1.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId1.toString())
+ << ShardType::host(kShardHost1.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const BSONObj kShard2 =
- BSON(ShardType::name(kShardId2.toString()) << ShardType::host(kShardHost2.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId2.toString())
+ << ShardType::host(kShardHost2.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const BSONObj kShard3 =
- BSON(ShardType::name(kShardId3.toString()) << ShardType::host(kShardHost3.toString())
- << ShardType::maxSizeMB(kMaxSizeMB));
+ BSON(ShardType::name(kShardId3.toString())
+ << ShardType::host(kShardHost3.toString()) << ShardType::maxSizeMB(kMaxSizeMB));
const KeyPattern kKeyPattern = KeyPattern(BSON(kPattern << 1));
diff --git a/src/mongo/db/s/balancer/scoped_migration_request.cpp b/src/mongo/db/s/balancer/scoped_migration_request.cpp
index a0ef6dadf16..40441637ba4 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request.cpp
@@ -118,8 +118,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
if (!statusWithMigrationQueryResult.isOK()) {
return statusWithMigrationQueryResult.getStatus().withContext(
str::stream() << "Failed to verify whether conflicting migration is in "
- << "progress for migration '"
- << redact(migrateInfo.toString())
+ << "progress for migration '" << redact(migrateInfo.toString())
<< "' while trying to query config.migrations.");
}
if (statusWithMigrationQueryResult.getValue().docs.empty()) {
@@ -134,11 +133,9 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
if (!statusWithActiveMigration.isOK()) {
return statusWithActiveMigration.getStatus().withContext(
str::stream() << "Failed to verify whether conflicting migration is in "
- << "progress for migration '"
- << redact(migrateInfo.toString())
+ << "progress for migration '" << redact(migrateInfo.toString())
<< "' while trying to parse active migration document '"
- << redact(activeMigrationBSON.toString())
- << "'.");
+ << redact(activeMigrationBSON.toString()) << "'.");
}
MigrateInfo activeMigrateInfo = statusWithActiveMigration.getValue().toMigrateInfo();
@@ -172,8 +169,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
str::stream() << "Failed to insert the config.migrations document after max "
<< "number of retries. Chunk '"
<< ChunkRange(migrateInfo.minKey, migrateInfo.maxKey).toString()
- << "' in collection '"
- << migrateInfo.nss.ns()
+ << "' in collection '" << migrateInfo.nss.ns()
<< "' was being moved (somewhere) by another operation.");
}
diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp
index 7bc56741ae8..12325cfe830 100644
--- a/src/mongo/db/s/check_sharding_index_command.cpp
+++ b/src/mongo/db/s/check_sharding_index_command.cpp
@@ -165,8 +165,8 @@ public:
BSONObjIterator i(currKey);
for (int k = 0; k < keyPatternLength; k++) {
if (!i.more()) {
- errmsg = str::stream() << "index key " << currKey << " too short for pattern "
- << keyPattern;
+ errmsg = str::stream()
+ << "index key " << currKey << " too short for pattern " << keyPattern;
return false;
}
BSONElement currKeyElt = i.next();
@@ -192,8 +192,9 @@ public:
const string msg = str::stream()
<< "There are documents which have missing or incomplete shard key fields ("
- << redact(currKey) << "). Please ensure that all documents in the collection "
- "include all fields from the shard key.";
+ << redact(currKey)
+ << "). Please ensure that all documents in the collection "
+ "include all fields from the shard key.";
log() << "checkShardingIndex for '" << nss.toString() << "' failed: " << msg;
errmsg = msg;
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index cfe972510a7..049ab0ae261 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -176,8 +176,7 @@ BSONObj findExtremeKeyForShard(OperationContext* opCtx,
uassert(40618,
str::stream() << "failed to initialize cursor during auto split due to "
- << "connection problem with "
- << client.getServerAddress(),
+ << "connection problem with " << client.getServerAddress(),
cursor.get() != nullptr);
if (cursor->more()) {
@@ -273,8 +272,8 @@ void ChunkSplitter::trySplitting(std::shared_ptr<ChunkSplitStateDriver> chunkSpl
return;
}
_threadPool.schedule(
- [ this, csd = std::move(chunkSplitStateDriver), nss, min, max, dataWritten ](
- auto status) noexcept {
+ [ this, csd = std::move(chunkSplitStateDriver), nss, min, max,
+ dataWritten ](auto status) noexcept {
invariant(status);
_runAutosplit(csd, nss, min, max, dataWritten);
@@ -384,7 +383,8 @@ void ChunkSplitter::_runAutosplit(std::shared_ptr<ChunkSplitStateDriver> chunkSp
log() << "autosplitted " << nss << " chunk: " << redact(chunk.toString()) << " into "
<< (splitPoints.size() + 1) << " parts (maxChunkSizeBytes " << maxChunkSizeBytes
<< ")"
- << (topChunkMinKey.isEmpty() ? "" : " (top chunk migration suggested" +
+ << (topChunkMinKey.isEmpty() ? ""
+ : " (top chunk migration suggested" +
(std::string)(shouldBalance ? ")" : ", but no migrations allowed)"));
// Because the ShardServerOpObserver uses the metadata from the CSS for tracking incoming
diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
index 28eab0d23bb..303c8a7a602 100644
--- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
@@ -89,9 +89,9 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx,
BSONObj keyPattern = metadata->getKeyPattern();
if (!startingFromKey.isEmpty()) {
if (!metadata->isValidKey(startingFromKey)) {
- *errMsg = str::stream() << "could not cleanup orphaned data, start key "
- << startingFromKey << " does not match shard key pattern "
- << keyPattern;
+ *errMsg = str::stream()
+ << "could not cleanup orphaned data, start key " << startingFromKey
+ << " does not match shard key pattern " << keyPattern;
log() << *errMsg;
return CleanupResult_Error;
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index 112499944af..9d9f48bab24 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -131,8 +131,7 @@ Status CollectionMetadata::checkChunkIsValid(const ChunkType& chunk) const {
return {ErrorCodes::StaleShardVersion,
str::stream() << "Unable to find chunk with the exact bounds "
<< ChunkRange(chunk.getMin(), chunk.getMax()).toString()
- << " at collection version "
- << getCollVersion().toString()};
+ << " at collection version " << getCollVersion().toString()};
}
return Status::OK();
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index d125e651adc..34ff588020f 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -132,8 +132,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInTheFuture) {
{
BSONObj readConcern = BSON("readConcern" << BSON("level"
<< "snapshot"
- << "atClusterTime"
- << Timestamp(100, 0)));
+ << "atClusterTime" << Timestamp(100, 0)));
auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext());
ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"]));
@@ -163,8 +162,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInThePast) {
{
BSONObj readConcern = BSON("readConcern" << BSON("level"
<< "snapshot"
- << "atClusterTime"
- << Timestamp(50, 0)));
+ << "atClusterTime" << Timestamp(50, 0)));
auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext());
ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"]));
@@ -202,8 +200,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsTooFarInThePastThrowsStal
{
BSONObj readConcern = BSON("readConcern" << BSON("level"
<< "snapshot"
- << "atClusterTime"
- << Timestamp(10, 0)));
+ << "atClusterTime" << Timestamp(10, 0)));
auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext());
ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"]));
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 33f81707841..afcaf6be496 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -111,8 +111,7 @@ TEST_F(NoChunkFixture, IsValidKey) {
ASSERT(makeCollectionMetadata()->isValidKey(BSON("a" << 3)));
ASSERT(!makeCollectionMetadata()->isValidKey(BSON("a"
<< "abcde"
- << "b"
- << 1)));
+ << "b" << 1)));
ASSERT(!makeCollectionMetadata()->isValidKey(BSON("c"
<< "abcde")));
}
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index 1b63c1ce74c..d5affc26cc0 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -178,14 +178,8 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
NamespaceString::kServerConfigurationNamespace.ns(),
BSON("_id"
<< "startRangeDeletion"
- << "ns"
- << nss.ns()
- << "epoch"
- << epoch
- << "min"
- << range->getMin()
- << "max"
- << range->getMax()));
+ << "ns" << nss.ns() << "epoch" << epoch << "min"
+ << range->getMin() << "max" << range->getMax()));
} catch (const DBException& e) {
stdx::lock_guard<stdx::mutex> scopedLock(csr->_metadataManager->_managerLock);
csr->_metadataManager->_clearAllCleanups(
@@ -354,8 +348,8 @@ StatusWith<int> CollectionRangeDeleter::_doDeletion(OperationContext* opCtx,
auto catalog = collection->getIndexCatalog();
const IndexDescriptor* idx = catalog->findShardKeyPrefixedIndex(opCtx, keyPattern, false);
if (!idx) {
- std::string msg = str::stream() << "Unable to find shard key index for "
- << keyPattern.toString() << " in " << nss.ns();
+ std::string msg = str::stream()
+ << "Unable to find shard key index for " << keyPattern.toString() << " in " << nss.ns();
LOG(0) << msg;
return {ErrorCodes::InternalError, msg};
}
@@ -375,8 +369,8 @@ StatusWith<int> CollectionRangeDeleter::_doDeletion(OperationContext* opCtx,
const IndexDescriptor* descriptor =
collection->getIndexCatalog()->findIndexByName(opCtx, indexName);
if (!descriptor) {
- std::string msg = str::stream() << "shard key index with name " << indexName << " on '"
- << nss.ns() << "' was dropped";
+ std::string msg = str::stream()
+ << "shard key index with name " << indexName << " on '" << nss.ns() << "' was dropped";
LOG(0) << msg;
return {ErrorCodes::InternalError, msg};
}
diff --git a/src/mongo/db/s/collection_range_deleter.h b/src/mongo/db/s/collection_range_deleter.h
index 6fae0ee5d18..0ebc79ac8a6 100644
--- a/src/mongo/db/s/collection_range_deleter.h
+++ b/src/mongo/db/s/collection_range_deleter.h
@@ -59,14 +59,14 @@ class CollectionRangeDeleter {
public:
/**
- * This is an object n that asynchronously changes state when a scheduled range deletion
- * completes or fails. Call n.ready() to discover if the event has already occurred. Call
- * n.waitStatus(opCtx) to sleep waiting for the event, and get its result. If the wait is
- * interrupted, waitStatus throws.
- *
- * It is an error to destroy a returned CleanupNotification object n unless either n.ready()
- * is true or n.abandon() has been called. After n.abandon(), n is in a moved-from state.
- */
+ * This is an object n that asynchronously changes state when a scheduled range deletion
+ * completes or fails. Call n.ready() to discover if the event has already occurred. Call
+ * n.waitStatus(opCtx) to sleep waiting for the event, and get its result. If the wait is
+ * interrupted, waitStatus throws.
+ *
+ * It is an error to destroy a returned CleanupNotification object n unless either n.ready()
+ * is true or n.abandon() has been called. After n.abandon(), n is in a moved-from state.
+ */
class DeleteNotification {
public:
DeleteNotification();
diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp
index 684ae740a00..1fc98f41876 100644
--- a/src/mongo/db/s/collection_sharding_runtime.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime.cpp
@@ -162,8 +162,7 @@ Status CollectionShardingRuntime::waitForClean(OperationContext* opCtx,
Status result = stillScheduled->waitStatus(opCtx);
if (!result.isOK()) {
return result.withContext(str::stream() << "Failed to delete orphaned " << nss.ns()
- << " range "
- << orphanRange.toString());
+ << " range " << orphanRange.toString());
}
}
diff --git a/src/mongo/db/s/collection_sharding_state_test.cpp b/src/mongo/db/s/collection_sharding_state_test.cpp
index d085f9440f3..1ee6cfbeed8 100644
--- a/src/mongo/db/s/collection_sharding_state_test.cpp
+++ b/src/mongo/db/s/collection_sharding_state_test.cpp
@@ -80,12 +80,9 @@ TEST_F(DeleteStateTest, MakeDeleteStateUnsharded) {
auto doc = BSON("key3"
<< "abc"
- << "key"
- << 3
- << "_id"
+ << "key" << 3 << "_id"
<< "hello"
- << "key2"
- << true);
+ << "key2" << true);
// Check that an order for deletion from an unsharded collection extracts just the "_id" field
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
@@ -103,12 +100,9 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithoutIdInShardKey) {
// The order of fields in `doc` deliberately does not match the shard key
auto doc = BSON("key3"
<< "abc"
- << "key"
- << 100
- << "_id"
+ << "key" << 100 << "_id"
<< "hello"
- << "key2"
- << true);
+ << "key2" << true);
// Verify the shard key is extracted, in correct order, followed by the "_id" field.
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
@@ -130,15 +124,13 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdInShardKey) {
<< "abc"
<< "_id"
<< "hello"
- << "key"
- << 100);
+ << "key" << 100);
// Verify the shard key is extracted with "_id" in the right place.
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
BSON("key" << 100 << "_id"
<< "hello"
- << "key2"
- << true));
+ << "key2" << true));
ASSERT_FALSE(OpObserverShardingImpl::isMigrating(operationContext(), kTestNss, doc));
}
@@ -151,8 +143,7 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdHashInShardKey) {
auto doc = BSON("key2" << true << "_id"
<< "hello"
- << "key"
- << 100);
+ << "key" << 100);
// Verify the shard key is extracted with "_id" in the right place, not hashed.
ASSERT_BSONOBJ_EQ(OpObserverImpl::getDocumentKey(operationContext(), kTestNss, doc),
diff --git a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
index b1c3717f3ff..e9ca1356b62 100644
--- a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
+++ b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
@@ -50,8 +50,8 @@
namespace mongo {
-using std::shared_ptr;
using std::set;
+using std::shared_ptr;
using std::string;
namespace {
diff --git a/src/mongo/db/s/config/configsvr_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
index f0272e1a92c..5e37c8cf0eb 100644
--- a/src/mongo/db/s/config/configsvr_move_primary_command.cpp
+++ b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
@@ -160,10 +160,9 @@ public:
if (!toShardStatus.isOK()) {
log() << "Could not move database '" << dbname << "' to shard '" << to
<< causedBy(toShardStatus.getStatus());
- uassertStatusOKWithContext(
- toShardStatus.getStatus(),
- str::stream() << "Could not move database '" << dbname << "' to shard '" << to
- << "'");
+ uassertStatusOKWithContext(toShardStatus.getStatus(),
+ str::stream() << "Could not move database '" << dbname
+ << "' to shard '" << to << "'");
}
return toShardStatus.getValue();
diff --git a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
index 2f39f852bc8..5186128ef8c 100644
--- a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
@@ -112,8 +112,8 @@ public:
const auto shardStatus =
Grid::get(opCtx)->shardRegistry()->getShard(opCtx, ShardId(target));
if (!shardStatus.isOK()) {
- std::string msg(str::stream() << "Could not drop shard '" << target
- << "' because it does not exist");
+ std::string msg(str::stream()
+ << "Could not drop shard '" << target << "' because it does not exist");
log() << msg;
uasserted(ErrorCodes::ShardNotFound, msg);
}
diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
index 3cdb4d2e5d3..b63165e4517 100644
--- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
@@ -158,8 +158,7 @@ void validateAndDeduceFullRequestOptions(OperationContext* opCtx,
CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collation));
uassert(ErrorCodes::BadValue,
str::stream() << "The collation for shardCollection must be {locale: 'simple'}, "
- << "but found: "
- << collation,
+ << "but found: " << collation,
!collator);
simpleCollationSpecified = true;
}
@@ -173,8 +172,7 @@ void validateAndDeduceFullRequestOptions(OperationContext* opCtx,
int numChunks = request->getNumInitialChunks();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "numInitialChunks cannot be more than either: "
- << maxNumInitialChunksForShards
- << ", 8192 * number of shards; or "
+ << maxNumInitialChunksForShards << ", 8192 * number of shards; or "
<< maxNumInitialChunksTotal,
numChunks >= 0 && numChunks <= maxNumInitialChunksForShards &&
numChunks <= maxNumInitialChunksTotal);
@@ -303,9 +301,7 @@ void validateShardKeyAgainstExistingIndexes(OperationContext* opCtx,
bool isUnique = idx["unique"].trueValue();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection '" << nss.ns() << "' with unique index on "
- << currentKey
- << " and proposed shard key "
- << proposedKey
+ << currentKey << " and proposed shard key " << proposedKey
<< ". Uniqueness can't be maintained unless shard key is a prefix",
!isUnique || shardKeyPattern.isUniqueIndexCompatible(currentKey));
}
@@ -323,8 +319,7 @@ void validateShardKeyAgainstExistingIndexes(OperationContext* opCtx,
// per field per collection.
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection " << nss.ns()
- << " with hashed shard key "
- << proposedKey
+ << " with hashed shard key " << proposedKey
<< " because the hashed index uses a non-default seed of "
<< idx["seed"].numberInt(),
!shardKeyPattern.isHashedPattern() || idx["seed"].eoo() ||
@@ -438,9 +433,7 @@ void migrateAndFurtherSplitInitialChunks(OperationContext* opCtx,
auto chunkManager = routingInfo.cm();
// Move and commit each "big chunk" to a different shard.
- auto nextShardId = [&, indx = 0 ]() mutable {
- return shardIds[indx++ % shardIds.size()];
- };
+ auto nextShardId = [&, indx = 0]() mutable { return shardIds[indx++ % shardIds.size()]; };
for (auto chunk : chunkManager->chunks()) {
const auto shardId = nextShardId();
@@ -553,10 +546,7 @@ boost::optional<UUID> getUUIDFromPrimaryShard(OperationContext* opCtx,
uassert(ErrorCodes::InternalError,
str::stream() << "expected the primary shard host " << primaryShard->getConnString()
- << " for database "
- << nss.db()
- << " to return an entry for "
- << nss.ns()
+ << " for database " << nss.db() << " to return an entry for " << nss.ns()
<< " in its listCollections response, but it did not",
!res.isEmpty());
@@ -568,15 +558,12 @@ boost::optional<UUID> getUUIDFromPrimaryShard(OperationContext* opCtx,
uassert(ErrorCodes::InternalError,
str::stream() << "expected primary shard to return 'info' field as part of "
"listCollections for "
- << nss.ns()
- << ", but got "
- << res,
+ << nss.ns() << ", but got " << res,
!collectionInfo.isEmpty());
uassert(ErrorCodes::InternalError,
str::stream() << "expected primary shard to return a UUID for collection " << nss.ns()
- << " as part of 'info' field but got "
- << res,
+ << " as part of 'info' field but got " << res,
collectionInfo.hasField("uuid"));
return uassertStatusOK(UUID::parse(collectionInfo["uuid"]));
@@ -806,8 +793,7 @@ public:
if (fromMapReduce) {
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Map reduce with sharded output to a new collection found "
- << nss.ns()
- << " to be non-empty which is not supported.",
+ << nss.ns() << " to be non-empty which is not supported.",
isEmpty);
}
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 52d363c6e8c..ca164a0264f 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -222,7 +222,7 @@ InitialSplitPolicy::generateShardCollectionInitialZonedChunks(
const auto& keyPattern = shardKeyPattern.getKeyPattern();
- auto nextShardIdForHole = [&, indx = 0 ]() mutable {
+ auto nextShardIdForHole = [&, indx = 0]() mutable {
return shardIdsForGaps[indx++ % shardIdsForGaps.size()];
};
@@ -249,10 +249,7 @@ InitialSplitPolicy::generateShardCollectionInitialZonedChunks(
const auto& shardIdsForChunk = it->second;
uassert(50973,
str::stream()
- << "Cannot shard collection "
- << nss.ns()
- << " due to zone "
- << tag.getTag()
+ << "Cannot shard collection " << nss.ns() << " due to zone " << tag.getTag()
<< " which is not assigned to a shard. Please assign this zone to a shard.",
!shardIdsForChunk.empty());
@@ -395,7 +392,7 @@ InitialSplitPolicy::ShardCollectionConfig InitialSplitPolicy::createFirstChunksU
shardSelectedSplitPoints,
shardIds,
1 // numContiguousChunksPerShard
- );
+ );
}
boost::optional<CollectionType> InitialSplitPolicy::checkIfCollectionAlreadyShardedWithSameOptions(
@@ -424,8 +421,7 @@ boost::optional<CollectionType> InitialSplitPolicy::checkIfCollectionAlreadyShar
// match the options the collection was originally sharded with.
uassert(ErrorCodes::AlreadyInitialized,
str::stream() << "sharding already enabled for collection " << nss.ns()
- << " with options "
- << existingOptions.toString(),
+ << " with options " << existingOptions.toString(),
requestedOptions.hasSameOptions(existingOptions));
return existingOptions;
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index fc610ed35a3..424db73a9d0 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -198,8 +198,7 @@ Status ShardingCatalogManager::_initConfigVersion(OperationContext* opCtx) {
if (versionInfo.getCurrentVersion() < CURRENT_CONFIG_VERSION) {
return {ErrorCodes::IncompatibleShardingConfigVersion,
str::stream() << "need to upgrade current cluster version to v"
- << CURRENT_CONFIG_VERSION
- << "; currently at v"
+ << CURRENT_CONFIG_VERSION << "; currently at v"
<< versionInfo.getCurrentVersion()};
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
index ce9460cf3e6..61d1439379e 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
@@ -129,8 +129,9 @@ protected:
ASSERT_EQ(request.target, target);
ASSERT_EQ(request.dbname, nss.db());
ASSERT_BSONOBJ_EQ(request.cmdObj,
- BSON("drop" << nss.coll() << "writeConcern" << BSON("w"
- << "majority")));
+ BSON("drop" << nss.coll() << "writeConcern"
+ << BSON("w"
+ << "majority")));
ASSERT_BSONOBJ_EQ(rpc::makeEmptyMetadata(), request.metadata);
return BSON("ok" << 1);
@@ -146,8 +147,7 @@ protected:
ASSERT_BSONOBJ_EQ(request.cmdObj,
BSON("setFeatureCompatibilityVersion"
<< "4.2"
- << "writeConcern"
- << writeConcern));
+ << "writeConcern" << writeConcern));
return response;
});
@@ -315,18 +315,16 @@ protected:
* describing the addShard request for 'addedShard'.
*/
void assertChangeWasLogged(const ShardType& addedShard) {
- auto response = assertGet(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{
- ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString("config.changelog"),
- BSON("what"
- << "addShard"
- << "details.name"
- << addedShard.getName()),
- BSONObj(),
- 1));
+ auto response = assertGet(getConfigShard()->exhaustiveFindOnConfig(
+ operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString("config.changelog"),
+ BSON("what"
+ << "addShard"
+ << "details.name" << addedShard.getName()),
+ BSONObj(),
+ 1));
ASSERT_EQ(1U, response.docs.size());
auto logEntryBSON = response.docs.front();
auto logEntry = assertGet(ChangeLogType::fromBSON(logEntryBSON));
@@ -347,35 +345,24 @@ protected:
TEST_F(AddShardTest, CreateShardIdentityUpsertForAddShard) {
std::string shardName = "shardName";
- BSONObj expectedBSON = BSON("update"
- << "system.version"
- << "bypassDocumentValidation"
- << false
- << "ordered"
- << true
- << "updates"
- << BSON_ARRAY(BSON(
- "q"
- << BSON("_id"
- << "shardIdentity")
- << "u"
- << BSON("shardName" << shardName << "clusterId" << _clusterId
- << "configsvrConnectionString"
- << replicationCoordinator()
- ->getConfig()
- .getConnectionString()
- .toString())
- << "multi"
- << false
- << "upsert"
- << true))
- << "writeConcern"
- << BSON("w"
- << "majority"
- << "wtimeout"
- << 60000)
- << "allowImplicitCollectionCreation"
- << true);
+ BSONObj expectedBSON = BSON(
+ "update"
+ << "system.version"
+ << "bypassDocumentValidation" << false << "ordered" << true << "updates"
+ << BSON_ARRAY(BSON(
+ "q" << BSON("_id"
+ << "shardIdentity")
+ << "u"
+ << BSON(
+ "shardName"
+ << shardName << "clusterId" << _clusterId << "configsvrConnectionString"
+ << replicationCoordinator()->getConfig().getConnectionString().toString())
+ << "multi" << false << "upsert" << true))
+ << "writeConcern"
+ << BSON("w"
+ << "majority"
+ << "wtimeout" << 60000)
+ << "allowImplicitCollectionCreation" << true);
auto addShardCmd = add_shard_util::createAddShardCmd(operationContext(), shardName);
auto actualBSON = add_shard_util::createShardIdentityUpsertForAddShard(addShardCmd);
ASSERT_BSONOBJ_EQ(expectedBSON, actualBSON);
@@ -427,8 +414,7 @@ TEST_F(AddShardTest, StandaloneBasicSuccess) {
shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk"
- << 1000),
+ << "sizeOnDisk" << 1000),
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
@@ -508,8 +494,7 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk"
- << 1000),
+ << "sizeOnDisk" << 1000),
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
@@ -648,8 +633,7 @@ TEST_F(AddShardTest, AddReplicaSetShardAsStandalone) {
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "myOtherSet"
- << "maxWireVersion"
- << WireVersion::LATEST_WIRE_VERSION);
+ << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
future.timed_get(kLongFutureTimeout);
@@ -706,8 +690,7 @@ TEST_F(AddShardTest, ReplicaSetMistmatchedReplicaSetName) {
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "myOtherSet"
- << "maxWireVersion"
- << WireVersion::LATEST_WIRE_VERSION);
+ << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
future.timed_get(kLongFutureTimeout);
@@ -735,12 +718,10 @@ TEST_F(AddShardTest, ShardIsCSRSConfigServer) {
"as a shard since it is a config server");
});
- BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
- << "config"
- << "configsvr"
- << true
- << "maxWireVersion"
- << WireVersion::LATEST_WIRE_VERSION);
+ BSONObj commandResponse =
+ BSON("ok" << 1 << "ismaster" << true << "setName"
+ << "config"
+ << "configsvr" << true << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
future.timed_get(kLongFutureTimeout);
@@ -772,9 +753,7 @@ TEST_F(AddShardTest, ReplicaSetMissingHostsProvidedInSeedList) {
hosts.append("host1:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -808,9 +787,7 @@ TEST_F(AddShardTest, AddShardWithNameConfigFails) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -855,9 +832,7 @@ TEST_F(AddShardTest, ShardContainsExistingDatabase) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -900,9 +875,7 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -966,9 +939,7 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts"
- << hosts.arr()
- << "maxWireVersion"
+ << "hosts" << hosts.arr() << "maxWireVersion"
<< WireVersion::LATEST_WIRE_VERSION);
expectIsMaster(shardTarget, commandResponse);
@@ -1049,8 +1020,7 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk"
- << 1000),
+ << "sizeOnDisk" << 1000),
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 2cf8b41864a..86aa76b89dc 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -129,8 +129,7 @@ BSONArray buildMergeChunksTransactionPrecond(const std::vector<ChunkType>& chunk
BSON("query" << BSON(ChunkType::ns(chunk.getNS().ns())
<< ChunkType::min(chunk.getMin())
<< ChunkType::max(chunk.getMax()))
- << "orderby"
- << BSON(ChunkType::lastmod() << -1)));
+ << "orderby" << BSON(ChunkType::lastmod() << -1)));
b.append("res",
BSON(ChunkType::epoch(collVersion.epoch())
<< ChunkType::shard(chunk.getShard().toString())));
@@ -146,8 +145,7 @@ Status checkChunkIsOnShard(OperationContext* opCtx,
const ShardId& shard) {
BSONObj chunkQuery =
BSON(ChunkType::ns() << nss.ns() << ChunkType::min() << min << ChunkType::max() << max
- << ChunkType::shard()
- << shard);
+ << ChunkType::shard() << shard);
// Must use local read concern because we're going to perform subsequent writes.
auto findResponseWith =
@@ -166,8 +164,7 @@ Status checkChunkIsOnShard(OperationContext* opCtx,
if (findResponseWith.getValue().docs.empty()) {
return {ErrorCodes::Error(40165),
str::stream()
- << "Could not find the chunk ("
- << chunkQuery.toString()
+ << "Could not find the chunk (" << chunkQuery.toString()
<< ") on the shard. Cannot execute the migration commit with invalid chunks."};
}
@@ -345,18 +342,14 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
return {
ErrorCodes::InvalidOptions,
str::stream() << "Split keys must be specified in strictly increasing order. Key "
- << endKey
- << " was specified after "
- << startKey
- << "."};
+ << endKey << " was specified after " << startKey << "."};
}
// Verify that splitPoints are not repeated
if (endKey.woCompare(startKey) == 0) {
return {ErrorCodes::InvalidOptions,
str::stream() << "Split on lower bound of chunk "
- << ChunkRange(startKey, endKey).toString()
- << "is not allowed"};
+ << ChunkRange(startKey, endKey).toString() << "is not allowed"};
}
// verify that splits don't create too-big shard keys
@@ -419,10 +412,8 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
b.append("ns", ChunkType::ConfigNS.ns());
b.append("q",
BSON("query" << BSON(ChunkType::ns(nss.ns()) << ChunkType::min() << range.getMin()
- << ChunkType::max()
- << range.getMax())
- << "orderby"
- << BSON(ChunkType::lastmod() << -1)));
+ << ChunkType::max() << range.getMax())
+ << "orderby" << BSON(ChunkType::lastmod() << -1)));
{
BSONObjBuilder bb(b.subobjStart("res"));
bb.append(ChunkType::epoch(), requestEpoch);
@@ -544,10 +535,7 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
ErrorCodes::InvalidOptions,
str::stream()
<< "Chunk boundaries must be specified in strictly increasing order. Boundary "
- << chunkBoundaries[i]
- << " was specified after "
- << itChunk.getMin()
- << "."};
+ << chunkBoundaries[i] << " was specified after " << itChunk.getMin() << "."};
}
itChunk.setMax(chunkBoundaries[i]);
@@ -660,11 +648,9 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
<< "' has been dropped and recreated since the migration began."
" The config server's collection version epoch is now '"
<< currentCollectionVersion.epoch().toString()
- << "', but the shard's is "
- << collectionEpoch.toString()
+ << "', but the shard's is " << collectionEpoch.toString()
<< "'. Aborting migration commit for chunk ("
- << migratedChunk.getRange().toString()
- << ")."};
+ << migratedChunk.getRange().toString() << ")."};
}
// Check that migratedChunk is where it should be, on fromShard.
@@ -827,9 +813,7 @@ StatusWith<ChunkVersion> ShardingCatalogManager::_findCollectionVersion(
<< "' has been dropped and recreated since the migration began."
" The config server's collection version epoch is now '"
<< currentCollectionVersion.epoch().toString()
- << "', but the shard's is "
- << collectionEpoch.toString()
- << "'."};
+ << "', but the shard's is " << collectionEpoch.toString() << "'."};
}
return currentCollectionVersion;
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 66825b32c47..a442232d4c3 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -75,9 +75,9 @@
namespace mongo {
using CollectionUUID = UUID;
+using std::set;
using std::string;
using std::vector;
-using std::set;
namespace {
@@ -114,8 +114,8 @@ boost::optional<UUID> checkCollectionOptions(OperationContext* opCtx,
// TODO: SERVER-33048 check idIndex field
uassert(ErrorCodes::NamespaceExists,
- str::stream() << "ns: " << ns.ns() << " already exists with different options: "
- << actualOptions.toBSON(),
+ str::stream() << "ns: " << ns.ns()
+ << " already exists with different options: " << actualOptions.toBSON(),
options.matchesStorageOptions(
actualOptions, CollatorFactoryInterface::get(opCtx->getServiceContext())));
@@ -171,8 +171,7 @@ void checkForExistingChunks(OperationContext* opCtx, const NamespaceString& nss)
str::stream() << "A previous attempt to shard collection " << nss.ns()
<< " failed after writing some initial chunks to config.chunks. Please "
"manually delete the partially written chunks for collection "
- << nss.ns()
- << " from config.chunks",
+ << nss.ns() << " from config.chunks",
numChunks == 0);
}
@@ -433,7 +432,7 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
optimizationType,
treatAsEmpty,
1 // numContiguousChunksPerShard
- );
+ );
} else {
initialChunks = InitialSplitPolicy::createFirstChunksUnoptimized(
opCtx, nss, fieldsAndOrder, dbPrimaryShardId);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
index f66e29f6f74..8a2a0b0490e 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
@@ -91,10 +91,7 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
uassert(ErrorCodes::DatabaseDifferCase,
str::stream() << "can't have 2 databases that just differ on case "
- << " have: "
- << actualDbName
- << " want to add: "
- << dbName,
+ << " have: " << actualDbName << " want to add: " << dbName,
actualDbName == dbName);
// We did a local read of the database entry above and found that the database already
@@ -251,8 +248,7 @@ Status ShardingCatalogManager::commitMovePrimary(OperationContext* opCtx,
// are holding the dist lock during the movePrimary operation.
uassert(ErrorCodes::IncompatibleShardingMetadata,
str::stream() << "Tried to update primary shard for database '" << dbname
- << " with version "
- << currentDatabaseVersion.getLastMod(),
+ << " with version " << currentDatabaseVersion.getLastMod(),
updateStatus.getValue());
// Ensure the next attempt to retrieve the database or any of its collections will do a full
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
index 7452b250f14..52681ef3bdd 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
@@ -141,15 +141,13 @@ TEST_F(EnableShardingTest, dbExistsInvalidFormat) {
setupShards(vector<ShardType>{shard});
// Set up database with bad type for primary field.
- ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- DatabaseType::ConfigNS,
- BSON("_id"
- << "db6"
- << "primary"
- << 12
- << "partitioned"
- << false),
- ShardingCatalogClient::kMajorityWriteConcern));
+ ASSERT_OK(
+ catalogClient()->insertConfigDocument(operationContext(),
+ DatabaseType::ConfigNS,
+ BSON("_id"
+ << "db6"
+ << "primary" << 12 << "partitioned" << false),
+ ShardingCatalogClient::kMajorityWriteConcern));
ASSERT_THROWS_CODE(
ShardingCatalogManager::get(operationContext())->enableSharding(operationContext(), "db6"),
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index 8e6e2e29423..066405d32b8 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -181,19 +181,17 @@ StatusWith<Shard::CommandResponse> ShardingCatalogManager::_runCommandForAddShar
Status commandStatus = getStatusFromCommandResult(result);
if (!Shard::shouldErrorBePropagated(commandStatus.code())) {
- commandStatus = {ErrorCodes::OperationFailed,
- str::stream() << "failed to run command " << cmdObj
- << " when attempting to add shard "
- << targeter->connectionString().toString()
- << causedBy(commandStatus)};
+ commandStatus = {
+ ErrorCodes::OperationFailed,
+ str::stream() << "failed to run command " << cmdObj << " when attempting to add shard "
+ << targeter->connectionString().toString() << causedBy(commandStatus)};
}
Status writeConcernStatus = getWriteConcernStatusFromCommandResult(result);
if (!Shard::shouldErrorBePropagated(writeConcernStatus.code())) {
writeConcernStatus = {ErrorCodes::OperationFailed,
str::stream() << "failed to satisfy writeConcern for command "
- << cmdObj
- << " when attempting to add shard "
+ << cmdObj << " when attempting to add shard "
<< targeter->connectionString().toString()
<< causedBy(writeConcernStatus)};
}
@@ -257,8 +255,7 @@ StatusWith<boost::optional<ShardType>> ShardingCatalogManager::_checkIfShardExis
} else {
return {ErrorCodes::IllegalOperation,
str::stream() << "A shard already exists containing the replica set '"
- << existingShardConnStr.getSetName()
- << "'"};
+ << existingShardConnStr.getSetName() << "'"};
}
}
@@ -277,10 +274,8 @@ StatusWith<boost::optional<ShardType>> ShardingCatalogManager::_checkIfShardExis
return {ErrorCodes::IllegalOperation,
str::stream() << "'" << addingHost.toString() << "' "
<< "is already a member of the existing shard '"
- << existingShard.getHost()
- << "' ("
- << existingShard.getName()
- << ")."};
+ << existingShard.getHost() << "' ("
+ << existingShard.getName() << ")."};
}
}
}
@@ -340,8 +335,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!status.isOK()) {
return status.withContext(str::stream() << "isMaster returned invalid 'maxWireVersion' "
<< "field when attempting to add "
- << connectionString.toString()
- << " as a shard");
+ << connectionString.toString() << " as a shard");
}
if (serverGlobalParams.featureCompatibility.getVersion() >
ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo40) {
@@ -362,8 +356,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!status.isOK()) {
return status.withContext(str::stream() << "isMaster returned invalid 'ismaster' "
<< "field when attempting to add "
- << connectionString.toString()
- << " as a shard");
+ << connectionString.toString() << " as a shard");
}
if (!isMaster) {
return {ErrorCodes::NotMaster,
@@ -387,8 +380,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!providedSetName.empty() && foundSetName.empty()) {
return {ErrorCodes::OperationFailed,
str::stream() << "host did not return a set name; "
- << "is the replica set still initializing? "
- << resIsMaster};
+ << "is the replica set still initializing? " << resIsMaster};
}
// Make sure the set name specified in the connection string matches the one where its hosts
@@ -396,8 +388,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (!providedSetName.empty() && (providedSetName != foundSetName)) {
return {ErrorCodes::OperationFailed,
str::stream() << "the provided connection string (" << connectionString.toString()
- << ") does not match the actual set name "
- << foundSetName};
+ << ") does not match the actual set name " << foundSetName};
}
// Is it a config server?
@@ -437,11 +428,8 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
if (hostSet.find(host) == hostSet.end()) {
return {ErrorCodes::OperationFailed,
str::stream() << "in seed list " << connectionString.toString() << ", host "
- << host
- << " does not belong to replica set "
- << foundSetName
- << "; found "
- << resIsMaster.toString()};
+ << host << " does not belong to replica set " << foundSetName
+ << "; found " << resIsMaster.toString()};
}
}
}
@@ -611,13 +599,9 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
const auto& dbDoc = dbt.getValue().value;
return Status(ErrorCodes::OperationFailed,
str::stream() << "can't add shard "
- << "'"
- << shardConnectionString.toString()
- << "'"
- << " because a local database '"
- << dbName
- << "' exists in another "
- << dbDoc.getPrimary());
+ << "'" << shardConnectionString.toString() << "'"
+ << " because a local database '" << dbName
+ << "' exists in another " << dbDoc.getPrimary());
} else if (dbt != ErrorCodes::NamespaceNotFound) {
return dbt.getStatus();
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
index 34b03b338d3..f0c13ec3fef 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
@@ -154,17 +154,13 @@ StatusWith<ChunkRange> includeFullShardKey(OperationContext* opCtx,
if (!range.getMin().isFieldNamePrefixOf(shardKeyBSON)) {
return {ErrorCodes::ShardKeyNotFound,
str::stream() << "min: " << range.getMin() << " is not a prefix of the shard key "
- << shardKeyBSON
- << " of ns: "
- << nss.ns()};
+ << shardKeyBSON << " of ns: " << nss.ns()};
}
if (!range.getMax().isFieldNamePrefixOf(shardKeyBSON)) {
return {ErrorCodes::ShardKeyNotFound,
str::stream() << "max: " << range.getMax() << " is not a prefix of the shard key "
- << shardKeyBSON
- << " of ns: "
- << nss.ns()};
+ << shardKeyBSON << " of ns: " << nss.ns()};
}
return ChunkRange(shardKeyPattern.extendRangeBound(range.getMin(), false),
diff --git a/src/mongo/db/s/config_server_op_observer_test.cpp b/src/mongo/db/s/config_server_op_observer_test.cpp
index fc5ff24708d..eca0a3a19b5 100644
--- a/src/mongo/db/s/config_server_op_observer_test.cpp
+++ b/src/mongo/db/s/config_server_op_observer_test.cpp
@@ -27,8 +27,8 @@
* it in the license file.
*/
-#include "mongo/db/s/config_server_op_observer.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/config_server_op_observer.h"
#include "mongo/s/cluster_identity_loader.h"
#include "mongo/s/config_server_test_fixture.h"
#include "mongo/unittest/death_test.h"
diff --git a/src/mongo/db/s/flush_database_cache_updates_command.cpp b/src/mongo/db/s/flush_database_cache_updates_command.cpp
index 13429421b43..77728821151 100644
--- a/src/mongo/db/s/flush_database_cache_updates_command.cpp
+++ b/src/mongo/db/s/flush_database_cache_updates_command.cpp
@@ -119,8 +119,7 @@ public:
uasserted(ErrorCodes::NamespaceNotFound,
str::stream()
<< "Can't issue _flushDatabaseCacheUpdates on the database "
- << _dbName()
- << " because it does not exist on this shard.");
+ << _dbName() << " because it does not exist on this shard.");
}
// If the primary is in the critical section, secondaries must wait for the commit
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 0a808e8daac..75ea7635773 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -79,16 +79,13 @@ void mergeChunks(OperationContext* opCtx,
const BSONObj& minKey,
const BSONObj& maxKey,
const OID& epoch) {
- const std::string whyMessage = str::stream() << "merging chunks in " << nss.ns() << " from "
- << minKey << " to " << maxKey;
+ const std::string whyMessage = str::stream()
+ << "merging chunks in " << nss.ns() << " from " << minKey << " to " << maxKey;
auto scopedDistLock = uassertStatusOKWithContext(
Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
opCtx, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout),
str::stream() << "could not acquire collection lock for " << nss.ns()
- << " to merge chunks in ["
- << redact(minKey)
- << ", "
- << redact(maxKey)
+ << " to merge chunks in [" << redact(minKey) << ", " << redact(maxKey)
<< ")");
auto const shardingState = ShardingState::get(opCtx);
@@ -109,20 +106,14 @@ void mergeChunks(OperationContext* opCtx,
const auto shardVersion = metadata->getShardVersion();
uassert(ErrorCodes::StaleEpoch,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " has changed since merge was sent (sent epoch: "
- << epoch.toString()
- << ", current epoch: "
- << shardVersion.epoch()
- << ")",
+ << " has changed since merge was sent (sent epoch: " << epoch.toString()
+ << ", current epoch: " << shardVersion.epoch() << ")",
shardVersion.epoch() == epoch);
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, the range "
- << redact(ChunkRange(minKey, maxKey).toString())
- << " is not valid"
- << " for collection "
- << nss.ns()
- << " with key pattern "
+ << redact(ChunkRange(minKey, maxKey).toString()) << " is not valid"
+ << " for collection " << nss.ns() << " with key pattern "
<< metadata->getKeyPattern().toString(),
metadata->isValidKey(minKey) && metadata->isValidKey(maxKey));
@@ -145,11 +136,8 @@ void mergeChunks(OperationContext* opCtx,
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " range starting at "
- << redact(minKey)
- << " and ending at "
- << redact(maxKey)
- << " does not belong to shard "
+ << " range starting at " << redact(minKey) << " and ending at "
+ << redact(maxKey) << " does not belong to shard "
<< shardingState->shardId(),
!chunksToMerge.empty());
@@ -164,9 +152,7 @@ void mergeChunks(OperationContext* opCtx,
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " range starting at "
- << redact(minKey)
- << " does not belong to shard "
+ << " range starting at " << redact(minKey) << " does not belong to shard "
<< shardingState->shardId(),
minKeyInRange);
@@ -177,9 +163,7 @@ void mergeChunks(OperationContext* opCtx,
uassert(ErrorCodes::IllegalOperation,
str::stream() << "could not merge chunks, collection " << nss.ns()
- << " range ending at "
- << redact(maxKey)
- << " does not belong to shard "
+ << " range ending at " << redact(maxKey) << " does not belong to shard "
<< shardingState->shardId(),
maxKeyInRange);
@@ -205,11 +189,8 @@ void mergeChunks(OperationContext* opCtx,
uassert(
ErrorCodes::IllegalOperation,
str::stream()
- << "could not merge chunks, collection "
- << nss.ns()
- << " has a hole in the range "
- << ChunkRange(minKey, maxKey).toString()
- << " at "
+ << "could not merge chunks, collection " << nss.ns() << " has a hole in the range "
+ << ChunkRange(minKey, maxKey).toString() << " at "
<< ChunkRange(chunksToMerge[i - 1].getMax(), chunksToMerge[i].getMin()).toString(),
chunksToMerge[i - 1].getMax().woCompare(chunksToMerge[i].getMin()) == 0);
}
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index b111875db39..4926fe86508 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -143,7 +143,7 @@ void scheduleCleanup(executor::TaskExecutor* executor,
Date_t when) {
LOG(1) << "Scheduling cleanup on " << nss.ns() << " at " << when;
auto swCallbackHandle = executor->scheduleWorkAt(
- when, [ executor, nss = std::move(nss), epoch = std::move(epoch) ](auto& args) {
+ when, [executor, nss = std::move(nss), epoch = std::move(epoch)](auto& args) {
auto& status = args.status;
if (ErrorCodes::isCancelationError(status.code())) {
return;
@@ -229,11 +229,11 @@ MetadataManager::~MetadataManager() {
}
void MetadataManager::_clearAllCleanups(WithLock lock) {
- _clearAllCleanups(
- lock,
- {ErrorCodes::InterruptedDueToReplStateChange,
- str::stream() << "Range deletions in " << _nss.ns()
- << " abandoned because collection was dropped or became unsharded"});
+ _clearAllCleanups(lock,
+ {ErrorCodes::InterruptedDueToReplStateChange,
+ str::stream()
+ << "Range deletions in " << _nss.ns()
+ << " abandoned because collection was dropped or became unsharded"});
}
void MetadataManager::_clearAllCleanups(WithLock, Status status) {
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index cc632bcbbc2..a0ca0696f16 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -328,8 +328,7 @@ Status MigrationChunkClonerSourceLegacy::awaitUntilCriticalSectionIsAppropriate(
return {ErrorCodes::OperationIncomplete,
str::stream() << "Unable to enter critical section because the recipient "
"shard thinks all data is cloned while there are still "
- << cloneLocsRemaining
- << " documents remaining"};
+ << cloneLocsRemaining << " documents remaining"};
}
return Status::OK();
@@ -746,8 +745,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
if (!idx) {
return {ErrorCodes::IndexNotFound,
str::stream() << "can't find index with prefix " << _shardKeyPattern.toBSON()
- << " in storeCurrentLocs for "
- << _args.getNss().ns()};
+ << " in storeCurrentLocs for " << _args.getNss().ns()};
}
// Assume both min and max non-empty, append MinKey's to make them fit chosen index
@@ -819,19 +817,10 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
return {
ErrorCodes::ChunkTooBig,
str::stream() << "Cannot move chunk: the maximum number of documents for a chunk is "
- << maxRecsWhenFull
- << ", the maximum chunk size is "
- << _args.getMaxChunkSizeBytes()
- << ", average document size is "
- << avgRecSize
- << ". Found "
- << recCount
- << " documents in chunk "
- << " ns: "
- << _args.getNss().ns()
- << " "
- << _args.getMinKey()
- << " -> "
+ << maxRecsWhenFull << ", the maximum chunk size is "
+ << _args.getMaxChunkSizeBytes() << ", average document size is "
+ << avgRecSize << ". Found " << recCount << " documents in chunk "
+ << " ns: " << _args.getNss().ns() << " " << _args.getMinKey() << " -> "
<< _args.getMaxKey()};
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
index aa21bce528a..1e5fe3ec7e1 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
@@ -86,8 +86,8 @@ public:
invariant(_chunkCloner);
} else {
uasserted(ErrorCodes::IllegalOperation,
- str::stream() << "No active migrations were found for collection "
- << nss->ns());
+ str::stream()
+ << "No active migrations were found for collection " << nss->ns());
}
}
@@ -317,9 +317,7 @@ public:
auto rollbackId = repl::ReplicationProcess::get(opCtx)->getRollbackID();
uassert(50881,
str::stream() << "rollback detected, rollbackId was "
- << rollbackIdAtMigrationInit
- << " but is now "
- << rollbackId,
+ << rollbackIdAtMigrationInit << " but is now " << rollbackId,
rollbackId == rollbackIdAtMigrationInit);
}
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 68bb3aba86e..a88be055ad6 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -436,8 +436,7 @@ Status MigrationDestinationManager::abort(const MigrationSessionId& sessionId) {
if (!_sessionId->matches(sessionId)) {
return {ErrorCodes::CommandFailed,
str::stream() << "received abort request from a stale session "
- << sessionId.toString()
- << ". Current session is "
+ << sessionId.toString() << ". Current session is "
<< _sessionId->toString()};
}
@@ -462,8 +461,7 @@ Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessio
if (_state != STEADY) {
return {ErrorCodes::CommandFailed,
str::stream() << "Migration startCommit attempted when not in STEADY state."
- << " Sender's session is "
- << sessionId.toString()
+ << " Sender's session is " << sessionId.toString()
<< (_sessionId ? (". Current session is " + _sessionId->toString())
: ". No active session on this shard.")};
}
@@ -477,8 +475,7 @@ Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessio
if (!_sessionId->matches(sessionId)) {
return {ErrorCodes::CommandFailed,
str::stream() << "startCommit received commit request from a stale session "
- << sessionId.toString()
- << ". Current session is "
+ << sessionId.toString() << ". Current session is "
<< _sessionId->toString()};
}
@@ -550,9 +547,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
auto infos = infosRes.docs;
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "expected listCollections against the primary shard for "
- << nss.toString()
- << " to return 1 entry, but got "
- << infos.size()
+ << nss.toString() << " to return 1 entry, but got " << infos.size()
<< " entries",
infos.size() == 1);
@@ -574,8 +569,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
uassert(ErrorCodes::InvalidUUID,
str::stream() << "The donor shard did not return a UUID for collection " << nss.ns()
- << " as part of its listCollections response: "
- << entry
+ << " as part of its listCollections response: " << entry
<< ", but this node expects to see a UUID.",
!info["uuid"].eoo());
@@ -602,8 +596,7 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
uassert(ErrorCodes::InvalidUUID,
str::stream()
- << "Cannot create collection "
- << nss.ns()
+ << "Cannot create collection " << nss.ns()
<< " because we already have an identically named collection with UUID "
<< (collection->uuid() ? collection->uuid()->toString() : "(none)")
<< ", which differs from the donor's UUID "
@@ -622,10 +615,10 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions(OperationCont
if (!indexSpecs.empty()) {
// Only allow indexes to be copied if the collection does not have any documents.
uassert(ErrorCodes::CannotCreateCollection,
- str::stream() << "aborting, shard is missing " << indexSpecs.size()
- << " indexes and "
- << "collection is not empty. Non-trivial "
- << "index creation should be scheduled manually",
+ str::stream()
+ << "aborting, shard is missing " << indexSpecs.size() << " indexes and "
+ << "collection is not empty. Non-trivial "
+ << "index creation should be scheduled manually",
collection->numRecords(opCtx) == 0);
}
return indexSpecs;
@@ -1153,10 +1146,9 @@ CollectionShardingRuntime::CleanupNotification MigrationDestinationManager::_not
if (!optMetadata || !(*optMetadata)->isSharded() ||
(*optMetadata)->getCollVersion().epoch() != _epoch) {
return Status{ErrorCodes::StaleShardVersion,
- str::stream() << "Not marking chunk " << redact(range.toString())
- << " as pending because the epoch of "
- << _nss.ns()
- << " changed"};
+ str::stream()
+ << "Not marking chunk " << redact(range.toString())
+ << " as pending because the epoch of " << _nss.ns() << " changed"};
}
// Start clearing any leftovers that would be in the new chunk
diff --git a/src/mongo/db/s/migration_session_id.cpp b/src/mongo/db/s/migration_session_id.cpp
index d2cfeab3254..7049a0870cf 100644
--- a/src/mongo/db/s/migration_session_id.cpp
+++ b/src/mongo/db/s/migration_session_id.cpp
@@ -53,8 +53,8 @@ MigrationSessionId MigrationSessionId::generate(StringData donor, StringData rec
invariant(!donor.empty());
invariant(!recipient.empty());
- return MigrationSessionId(str::stream() << donor << "_" << recipient << "_"
- << OID::gen().toString());
+ return MigrationSessionId(str::stream()
+ << donor << "_" << recipient << "_" << OID::gen().toString());
}
StatusWith<MigrationSessionId> MigrationSessionId::extractFromBSON(const BSONObj& obj) {
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index ab4d76a8952..4533ec35968 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -191,10 +191,8 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
uassert(ErrorCodes::StaleEpoch,
str::stream() << "cannot move chunk " << _args.toString()
<< " because collection may have been dropped. "
- << "current epoch: "
- << collectionVersion.epoch()
- << ", cmd epoch: "
- << _args.getVersionEpoch(),
+ << "current epoch: " << collectionVersion.epoch()
+ << ", cmd epoch: " << _args.getVersionEpoch(),
_args.getVersionEpoch() == collectionVersion.epoch());
ChunkType chunkToMove;
@@ -229,9 +227,7 @@ Status MigrationSourceManager::startClone(OperationContext* opCtx) {
"moveChunk.start",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
+ << _args.getFromShardId() << "to" << _args.getToShardId()),
ShardingCatalogClient::kMajorityWriteConcern);
if (logStatus != Status::OK()) {
return logStatus;
@@ -455,9 +451,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
"moveChunk.validating",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
+ << _args.getFromShardId() << "to" << _args.getToShardId()),
ShardingCatalogClient::kMajorityWriteConcern);
if ((ErrorCodes::isInterruption(status.code()) ||
@@ -490,12 +484,11 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
}
fassert(40137,
- status.withContext(
- str::stream() << "Failed to commit migration for chunk " << _args.toString()
- << " due to "
- << redact(migrationCommitStatus)
- << ". Updating the optime with a write before refreshing the "
- << "metadata also failed"));
+ status.withContext(str::stream()
+ << "Failed to commit migration for chunk " << _args.toString()
+ << " due to " << redact(migrationCommitStatus)
+ << ". Updating the optime with a write before refreshing the "
+ << "metadata also failed"));
}
// Do a best effort attempt to incrementally refresh the metadata before leaving the critical
@@ -527,8 +520,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
return migrationCommitStatus.withContext(
str::stream() << "Orphaned range not cleaned up. Failed to refresh metadata after"
" migration commit due to '"
- << refreshStatus.toString()
- << "' after commit failed");
+ << refreshStatus.toString() << "' after commit failed");
}
const auto refreshedMetadata = _getCurrentMetadataAndCheckEpoch(opCtx);
@@ -572,10 +564,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
"moveChunk.commit",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()
- << "counts"
+ << _args.getFromShardId() << "to" << _args.getToShardId() << "counts"
<< _recipientCloneCounts),
ShardingCatalogClient::kMajorityWriteConcern);
@@ -635,9 +624,7 @@ void MigrationSourceManager::cleanupOnError(OperationContext* opCtx) {
"moveChunk.error",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
+ << _args.getFromShardId() << "to" << _args.getToShardId()),
ShardingCatalogClient::kMajorityWriteConcern);
try {
@@ -664,8 +651,7 @@ ScopedCollectionMetadata MigrationSourceManager::_getCurrentMetadataAndCheckEpoc
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "The collection was dropped or recreated since the migration began. "
- << "Expected collection epoch: "
- << _collectionEpoch.toString()
+ << "Expected collection epoch: " << _collectionEpoch.toString()
<< ", but found: "
<< (metadata->isSharded() ? metadata->getCollVersion().epoch().toString()
: "unsharded collection."),
@@ -687,9 +673,7 @@ void MigrationSourceManager::_notifyChangeStreamsOnRecipientFirstChunk(
// The message expected by change streams
const auto o2Message = BSON("type"
<< "migrateChunkToNewShard"
- << "from"
- << _args.getFromShardId()
- << "to"
+ << "from" << _args.getFromShardId() << "to"
<< _args.getToShardId());
auto const serviceContext = opCtx->getClient()->getServiceContext();
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index ac20cb2f350..a66109e73ba 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -45,7 +45,7 @@ const char kDestinationShard[] = "destination";
const char kIsDonorShard[] = "isDonorShard";
const char kChunk[] = "chunk";
const char kCollection[] = "collection";
-}
+} // namespace
BSONObj makeMigrationStatusDocument(const NamespaceString& nss,
const ShardId& fromShard,
diff --git a/src/mongo/db/s/migration_util.h b/src/mongo/db/s/migration_util.h
index dc2469d8602..67b59761477 100644
--- a/src/mongo/db/s/migration_util.h
+++ b/src/mongo/db/s/migration_util.h
@@ -56,6 +56,6 @@ BSONObj makeMigrationStatusDocument(const NamespaceString& nss,
const BSONObj& min,
const BSONObj& max);
-} // namespace shardutil
+} // namespace migrationutil
} // namespace mongo
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index dd62c984292..8fafb8c0253 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -149,8 +149,8 @@ public:
} catch (const std::exception& e) {
scopedMigration.signalComplete(
{ErrorCodes::InternalError,
- str::stream() << "Severe error occurred while running moveChunk command: "
- << e.what()});
+ str::stream()
+ << "Severe error occurred while running moveChunk command: " << e.what()});
throw;
}
diff --git a/src/mongo/db/s/move_primary_source_manager.cpp b/src/mongo/db/s/move_primary_source_manager.cpp
index 63a1ebb7bd6..be0a16193eb 100644
--- a/src/mongo/db/s/move_primary_source_manager.cpp
+++ b/src/mongo/db/s/move_primary_source_manager.cpp
@@ -282,8 +282,7 @@ Status MovePrimarySourceManager::commitOnConfig(OperationContext* opCtx) {
fassert(50762,
validateStatus.withContext(
str::stream() << "Failed to commit movePrimary for database " << getNss().ns()
- << " due to "
- << redact(commitStatus)
+ << " due to " << redact(commitStatus)
<< ". Updating the optime with a write before clearing the "
<< "version also failed"));
diff --git a/src/mongo/db/s/scoped_operation_completion_sharding_actions.h b/src/mongo/db/s/scoped_operation_completion_sharding_actions.h
index de61f5fbfd2..baea9099032 100644
--- a/src/mongo/db/s/scoped_operation_completion_sharding_actions.h
+++ b/src/mongo/db/s/scoped_operation_completion_sharding_actions.h
@@ -37,7 +37,7 @@ namespace mongo {
* This class has a destructor that handles rerouting exceptions that might have occurred
* during an operation. For this reason, there should be only one instance of this object
* on the chain of one OperationContext.
-*/
+ */
class OperationContext;
class ScopedOperationCompletionShardingActions : public PolymorphicScoped {
diff --git a/src/mongo/db/s/session_catalog_migration_destination.cpp b/src/mongo/db/s/session_catalog_migration_destination.cpp
index fd6a3086dd7..0ff9dcaa737 100644
--- a/src/mongo/db/s/session_catalog_migration_destination.cpp
+++ b/src/mongo/db/s/session_catalog_migration_destination.cpp
@@ -92,10 +92,8 @@ repl::OplogLink extractPrePostImageTs(const ProcessOplogResult& lastResult,
if (!lastResult.isPrePostImage) {
uassert(40628,
str::stream() << "expected oplog with ts: " << entry.getTimestamp().toString()
- << " to not have "
- << repl::OplogEntryBase::kPreImageOpTimeFieldName
- << " or "
- << repl::OplogEntryBase::kPostImageOpTimeFieldName,
+ << " to not have " << repl::OplogEntryBase::kPreImageOpTimeFieldName
+ << " or " << repl::OplogEntryBase::kPostImageOpTimeFieldName,
!entry.getPreImageOpTime() && !entry.getPostImageOpTime());
return oplogLink;
@@ -109,15 +107,11 @@ repl::OplogLink extractPrePostImageTs(const ProcessOplogResult& lastResult,
uassert(40629,
str::stream() << "expected oplog with ts: " << entry.getTimestamp().toString() << ": "
- << redact(entry.toBSON())
- << " to have session: "
- << lastResult.sessionId,
+ << redact(entry.toBSON()) << " to have session: " << lastResult.sessionId,
lastResult.sessionId == sessionId);
uassert(40630,
str::stream() << "expected oplog with ts: " << entry.getTimestamp().toString() << ": "
- << redact(entry.toBSON())
- << " to have txnNumber: "
- << lastResult.txnNum,
+ << redact(entry.toBSON()) << " to have txnNumber: " << lastResult.txnNum,
lastResult.txnNum == txnNum);
if (entry.getPreImageOpTime()) {
@@ -127,11 +121,8 @@ repl::OplogLink extractPrePostImageTs(const ProcessOplogResult& lastResult,
} else {
uasserted(40631,
str::stream() << "expected oplog with opTime: " << entry.getOpTime().toString()
- << ": "
- << redact(entry.toBSON())
- << " to have either "
- << repl::OplogEntryBase::kPreImageOpTimeFieldName
- << " or "
+ << ": " << redact(entry.toBSON()) << " to have either "
+ << repl::OplogEntryBase::kPreImageOpTimeFieldName << " or "
<< repl::OplogEntryBase::kPostImageOpTimeFieldName);
}
@@ -152,20 +143,17 @@ repl::OplogEntry parseOplog(const BSONObj& oplogBSON) {
uassert(ErrorCodes::UnsupportedFormat,
str::stream() << "oplog with opTime " << oplogEntry.getTimestamp().toString()
- << " does not have sessionId: "
- << redact(oplogBSON),
+ << " does not have sessionId: " << redact(oplogBSON),
sessionInfo.getSessionId());
uassert(ErrorCodes::UnsupportedFormat,
str::stream() << "oplog with opTime " << oplogEntry.getTimestamp().toString()
- << " does not have txnNumber: "
- << redact(oplogBSON),
+ << " does not have txnNumber: " << redact(oplogBSON),
sessionInfo.getTxnNumber());
uassert(ErrorCodes::UnsupportedFormat,
str::stream() << "oplog with opTime " << oplogEntry.getTimestamp().toString()
- << " does not have stmtId: "
- << redact(oplogBSON),
+ << " does not have stmtId: " << redact(oplogBSON),
oplogEntry.getStatementId());
return oplogEntry;
@@ -234,9 +222,7 @@ ProcessOplogResult processSessionOplog(const BSONObj& oplogBSON,
uassert(40632,
str::stream() << "Can't handle 2 pre/post image oplog in a row. Prevoius oplog "
<< lastResult.oplogTime.getTimestamp().toString()
- << ", oplog ts: "
- << oplogEntry.getTimestamp().toString()
- << ": "
+ << ", oplog ts: " << oplogEntry.getTimestamp().toString() << ": "
<< oplogBSON,
!lastResult.isPrePostImage);
}
@@ -310,9 +296,7 @@ ProcessOplogResult processSessionOplog(const BSONObj& oplogBSON,
const auto& oplogOpTime = result.oplogTime;
uassert(40633,
str::stream() << "Failed to create new oplog entry for oplog with opTime: "
- << oplogEntry.getOpTime().toString()
- << ": "
- << redact(oplogBSON),
+ << oplogEntry.getOpTime().toString() << ": " << redact(oplogBSON),
!oplogOpTime.isNull());
// Do not call onWriteOpCompletedOnPrimary if we inserted a pre/post image, because the
diff --git a/src/mongo/db/s/session_catalog_migration_source.cpp b/src/mongo/db/s/session_catalog_migration_source.cpp
index 11efb9ad1d3..86f8a8a6cf6 100644
--- a/src/mongo/db/s/session_catalog_migration_source.cpp
+++ b/src/mongo/db/s/session_catalog_migration_source.cpp
@@ -259,8 +259,9 @@ bool SessionCatalogMigrationSource::_handleWriteHistory(WithLock, OperationConte
// Skip the rest of the chain for this session since the ns is unrelated with the
// current one being migrated. It is ok to not check the rest of the chain because
// retryable writes doesn't allow touching different namespaces.
- if (!nextStmtId || (nextStmtId && *nextStmtId != kIncompleteHistoryStmtId &&
- nextOplog->getNss() != _ns)) {
+ if (!nextStmtId ||
+ (nextStmtId && *nextStmtId != kIncompleteHistoryStmtId &&
+ nextOplog->getNss() != _ns)) {
_currentOplogIterator.reset();
return false;
}
@@ -419,8 +420,7 @@ boost::optional<repl::OplogEntry> SessionCatalogMigrationSource::SessionOplogIte
uassert(40656,
str::stream() << "rollback detected, rollbackId was " << _initialRollbackId
- << " but is now "
- << rollbackId,
+ << " but is now " << rollbackId,
rollbackId == _initialRollbackId);
// If the rollbackId hasn't changed, and this record corresponds to a retryable write,
diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp
index dd03e31b206..10564146ca4 100644
--- a/src/mongo/db/s/set_shard_version_command.cpp
+++ b/src/mongo/db/s/set_shard_version_command.cpp
@@ -164,8 +164,7 @@ public:
const auto storedShardName = shardingState->shardId().toString();
uassert(ErrorCodes::BadValue,
str::stream() << "received shardName " << shardName
- << " which differs from stored shardName "
- << storedShardName,
+ << " which differs from stored shardName " << storedShardName,
storedShardName == shardName);
// Validate config connection string parameter.
@@ -184,8 +183,7 @@ public:
Grid::get(opCtx)->shardRegistry()->getConfigServerConnectionString();
uassert(ErrorCodes::IllegalOperation,
str::stream() << "Given config server set name: " << givenConnStr.getSetName()
- << " differs from known set name: "
- << storedConnStr.getSetName(),
+ << " differs from known set name: " << storedConnStr.getSetName(),
givenConnStr.getSetName() == storedConnStr.getSetName());
// Validate namespace parameter.
@@ -366,11 +364,11 @@ public:
if (!status.isOK()) {
// The reload itself was interrupted or confused here
- errmsg = str::stream() << "could not refresh metadata for " << nss.ns()
- << " with requested shard version "
- << requestedVersion.toString()
- << ", stored shard version is " << currVersion.toString()
- << causedBy(redact(status));
+ errmsg = str::stream()
+ << "could not refresh metadata for " << nss.ns()
+ << " with requested shard version " << requestedVersion.toString()
+ << ", stored shard version is " << currVersion.toString()
+ << causedBy(redact(status));
warning() << errmsg;
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index 3776b6e89e9..0c25b399c43 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -101,8 +101,7 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
maxCollVersion.incMajor();
BSONObj shardChunk =
BSON(ChunkType::minShardID(mins[i])
- << ChunkType::max(maxs[i])
- << ChunkType::shard(kShardId.toString())
+ << ChunkType::max(maxs[i]) << ChunkType::shard(kShardId.toString())
<< ChunkType::lastmod(Date_t::fromMillisSinceEpoch(maxCollVersion.toLong())));
chunks.push_back(
@@ -142,8 +141,8 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
try {
DBDirectClient client(operationContext());
for (auto& chunk : chunks) {
- Query query(BSON(ChunkType::minShardID() << chunk.getMin() << ChunkType::max()
- << chunk.getMax()));
+ Query query(BSON(ChunkType::minShardID()
+ << chunk.getMin() << ChunkType::max() << chunk.getMax()));
query.readPref(ReadPreference::Nearest, BSONArray());
std::unique_ptr<DBClientCursor> cursor = client.query(chunkMetadataNss, query, 1);
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index e1a9cb39cd9..142b7d3e69f 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -158,9 +158,7 @@ ChunkVersion getPersistedMaxChunkVersion(OperationContext* opCtx, const Namespac
}
uassert(ErrorCodes::OperationFailed,
str::stream() << "Failed to read persisted collections entry for collection '"
- << nss.ns()
- << "' due to '"
- << statusWithCollection.getStatus().toString()
+ << nss.ns() << "' due to '" << statusWithCollection.getStatus().toString()
<< "'.",
statusWithCollection.isOK());
@@ -173,9 +171,7 @@ ChunkVersion getPersistedMaxChunkVersion(OperationContext* opCtx, const Namespac
statusWithCollection.getValue().getEpoch());
uassert(ErrorCodes::OperationFailed,
str::stream() << "Failed to read highest version persisted chunk for collection '"
- << nss.ns()
- << "' due to '"
- << statusWithChunk.getStatus().toString()
+ << nss.ns() << "' due to '" << statusWithChunk.getStatus().toString()
<< "'.",
statusWithChunk.isOK());
@@ -263,8 +259,8 @@ StatusWith<CollectionAndChangedChunks> getIncompletePersistedMetadataSinceVersio
return CollectionAndChangedChunks();
}
return Status(ErrorCodes::OperationFailed,
- str::stream() << "Failed to load local metadata due to '" << status.toString()
- << "'.");
+ str::stream()
+ << "Failed to load local metadata due to '" << status.toString() << "'.");
}
}
@@ -437,8 +433,8 @@ void ShardServerCatalogCacheLoader::getDatabase(
return std::make_tuple(_role == ReplicaSetRole::Primary, _term);
}();
- _threadPool.schedule([ this, name = dbName.toString(), callbackFn, isPrimary, term ](
- auto status) noexcept {
+ _threadPool.schedule([ this, name = dbName.toString(), callbackFn, isPrimary,
+ term ](auto status) noexcept {
invariant(status);
auto context = _contexts.makeOperationContext(*Client::getCurrent());
@@ -611,19 +607,18 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
}();
auto remoteRefreshFn = [this, nss, catalogCacheSinceVersion, maxLoaderVersion, termScheduled](
- OperationContext* opCtx,
- StatusWith<CollectionAndChangedChunks>
- swCollectionAndChangedChunks) -> StatusWith<CollectionAndChangedChunks> {
-
+ OperationContext* opCtx,
+ StatusWith<CollectionAndChangedChunks> swCollectionAndChangedChunks)
+ -> StatusWith<CollectionAndChangedChunks> {
if (swCollectionAndChangedChunks == ErrorCodes::NamespaceNotFound) {
_ensureMajorityPrimaryAndScheduleCollAndChunksTask(
opCtx,
nss,
collAndChunkTask{swCollectionAndChangedChunks, maxLoaderVersion, termScheduled});
- LOG_CATALOG_REFRESH(1) << "Cache loader remotely refreshed for collection " << nss
- << " from version " << maxLoaderVersion
- << " and no metadata was found.";
+ LOG_CATALOG_REFRESH(1)
+ << "Cache loader remotely refreshed for collection " << nss << " from version "
+ << maxLoaderVersion << " and no metadata was found.";
return swCollectionAndChangedChunks;
}
@@ -634,12 +629,11 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
auto& collAndChunks = swCollectionAndChangedChunks.getValue();
if (collAndChunks.changedChunks.back().getVersion().epoch() != collAndChunks.epoch) {
- return Status{
- ErrorCodes::ConflictingOperationInProgress,
- str::stream() << "Invalid chunks found when reloading '" << nss.toString()
+ return Status{ErrorCodes::ConflictingOperationInProgress,
+ str::stream()
+ << "Invalid chunks found when reloading '" << nss.toString()
<< "' Previous collection epoch was '"
- << collAndChunks.epoch.toString()
- << "', but found a new epoch '"
+ << collAndChunks.epoch.toString() << "', but found a new epoch '"
<< collAndChunks.changedChunks.back().getVersion().epoch().toString()
<< "'. Collection was dropped and recreated."};
}
@@ -716,8 +710,8 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetDatabase(
StringData dbName,
long long termScheduled,
stdx::function<void(OperationContext*, StatusWith<DatabaseType>)> callbackFn) {
- auto remoteRefreshFn = [ this, name = dbName.toString(), termScheduled ](
- OperationContext * opCtx, StatusWith<DatabaseType> swDatabaseType) {
+ auto remoteRefreshFn = [this, name = dbName.toString(), termScheduled](
+ OperationContext* opCtx, StatusWith<DatabaseType> swDatabaseType) {
if (swDatabaseType == ErrorCodes::NamespaceNotFound) {
_ensureMajorityPrimaryAndScheduleDbTask(
opCtx, name, DBTask{swDatabaseType, termScheduled});
@@ -777,11 +771,12 @@ StatusWith<CollectionAndChangedChunks> ShardServerCatalogCacheLoader::_getLoader
: ("enqueued metadata from " +
enqueued.changedChunks.front().getVersion().toString() + " to " +
enqueued.changedChunks.back().getVersion().toString()))
- << " and " << (persisted.changedChunks.empty()
- ? "no persisted metadata"
- : ("persisted metadata from " +
- persisted.changedChunks.front().getVersion().toString() + " to " +
- persisted.changedChunks.back().getVersion().toString()))
+ << " and "
+ << (persisted.changedChunks.empty()
+ ? "no persisted metadata"
+ : ("persisted metadata from " +
+ persisted.changedChunks.front().getVersion().toString() + " to " +
+ persisted.changedChunks.back().getVersion().toString()))
<< ", GTE cache version " << catalogCacheSinceVersion;
if (!tasksAreEnqueued) {
@@ -892,7 +887,7 @@ void ShardServerCatalogCacheLoader::_ensureMajorityPrimaryAndScheduleDbTask(Oper
return;
}
- _threadPool.schedule([ this, name = dbName.toString() ](auto status) {
+ _threadPool.schedule([this, name = dbName.toString()](auto status) {
invariant(status);
_runDbTasks(name);
@@ -979,7 +974,7 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) {
}
}
- _threadPool.schedule([ this, name = dbName.toString() ](auto status) {
+ _threadPool.schedule([this, name = dbName.toString()](auto status) {
if (ErrorCodes::isCancelationError(status.code())) {
LOG(0) << "Cache loader failed to schedule a persisted metadata update"
<< " task for namespace '" << name << "' due to '" << redact(status)
@@ -1026,12 +1021,8 @@ void ShardServerCatalogCacheLoader::_updatePersistedCollAndChunksMetadata(
uassertStatusOKWithContext(
persistCollectionAndChangedChunks(opCtx, nss, task.collectionAndChangedChunks.get()),
str::stream() << "Failed to update the persisted chunk metadata for collection '"
- << nss.ns()
- << "' from '"
- << task.minQueryVersion.toString()
- << "' to '"
- << task.maxQueryVersion.toString()
- << "'. Will be retried.");
+ << nss.ns() << "' from '" << task.minQueryVersion.toString() << "' to '"
+ << task.maxQueryVersion.toString() << "'. Will be retried.");
LOG_CATALOG_REFRESH(1) << "Successfully updated persisted chunk metadata for collection '"
<< nss << "' from '" << task.minQueryVersion
@@ -1057,15 +1048,13 @@ void ShardServerCatalogCacheLoader::_updatePersistedDbMetadata(OperationContext*
// The database was dropped. The persisted metadata for the collection must be cleared.
uassertStatusOKWithContext(deleteDatabasesEntry(opCtx, dbName),
str::stream() << "Failed to clear persisted metadata for db '"
- << dbName.toString()
- << "'. Will be retried.");
+ << dbName.toString() << "'. Will be retried.");
return;
}
uassertStatusOKWithContext(persistDbVersion(opCtx, *task.dbType),
str::stream() << "Failed to update the persisted metadata for db '"
- << dbName.toString()
- << "'. Will be retried.");
+ << dbName.toString() << "'. Will be retried.");
LOG_CATALOG_REFRESH(1) << "Successfully updated persisted metadata for db "
<< dbName.toString();
diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp
index 43653b40a2a..980a4be7865 100644
--- a/src/mongo/db/s/shard_server_op_observer.cpp
+++ b/src/mongo/db/s/shard_server_op_observer.cpp
@@ -59,8 +59,9 @@ bool isStandaloneOrPrimary(OperationContext* opCtx) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
const bool isReplSet =
replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet;
- return !isReplSet || (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
- repl::MemberState::RS_PRIMARY);
+ return !isReplSet ||
+ (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
+ repl::MemberState::RS_PRIMARY);
}
/**
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index e95c954e10d..0abb64e96cc 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -98,36 +98,36 @@ public:
// Update the shard identy config string
void onConfirmedSet(const State& state) final {
- Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor()->schedule([
- serviceContext = _serviceContext,
- connStr = state.connStr
- ](Status status) {
- if (ErrorCodes::isCancelationError(status.code())) {
- LOG(2) << "Unable to schedule confirmed set update due to " << status;
- return;
- }
- uassertStatusOK(status);
-
- LOG(0) << "Updating config server with confirmed set " << connStr;
- Grid::get(serviceContext)->shardRegistry()->updateReplSetHosts(connStr);
-
- if (MONGO_FAIL_POINT(failUpdateShardIdentityConfigString)) {
- return;
- }
-
- auto configsvrConnStr =
- Grid::get(serviceContext)->shardRegistry()->getConfigServerConnectionString();
-
- // Only proceed if the notification is for the configsvr
- if (configsvrConnStr.getSetName() != connStr.getSetName()) {
- return;
- }
-
- ThreadClient tc("updateShardIdentityConfigString", serviceContext);
- auto opCtx = tc->makeOperationContext();
-
- ShardingInitializationMongoD::updateShardIdentityConfigString(opCtx.get(), connStr);
- });
+ Grid::get(_serviceContext)
+ ->getExecutorPool()
+ ->getFixedExecutor()
+ ->schedule([serviceContext = _serviceContext, connStr = state.connStr](Status status) {
+ if (ErrorCodes::isCancelationError(status.code())) {
+ LOG(2) << "Unable to schedule confirmed set update due to " << status;
+ return;
+ }
+ uassertStatusOK(status);
+
+ LOG(0) << "Updating config server with confirmed set " << connStr;
+ Grid::get(serviceContext)->shardRegistry()->updateReplSetHosts(connStr);
+
+ if (MONGO_FAIL_POINT(failUpdateShardIdentityConfigString)) {
+ return;
+ }
+
+ auto configsvrConnStr =
+ Grid::get(serviceContext)->shardRegistry()->getConfigServerConnectionString();
+
+ // Only proceed if the notification is for the configsvr
+ if (configsvrConnStr.getSetName() != connStr.getSetName()) {
+ return;
+ }
+
+ ThreadClient tc("updateShardIdentityConfigString", serviceContext);
+ auto opCtx = tc->makeOperationContext();
+
+ ShardingInitializationMongoD::updateShardIdentityConfigString(opCtx.get(), connStr);
+ });
}
void onPossibleSet(const State& state) final {
Grid::get(_serviceContext)->shardRegistry()->updateReplSetHosts(state.connStr);
diff --git a/src/mongo/db/s/sharding_initialization_mongod_test.cpp b/src/mongo/db/s/sharding_initialization_mongod_test.cpp
index d124b98cc21..bb8bc7abc58 100644
--- a/src/mongo/db/s/sharding_initialization_mongod_test.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod_test.cpp
@@ -183,18 +183,19 @@ TEST_F(ShardingInitializationMongoDTest, InitWhilePreviouslyInErrorStateWillStay
shardIdentity.setShardName(kShardName);
shardIdentity.setClusterId(OID::gen());
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
+ shardingInitialization()->setGlobalInitMethodForTest([](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) {
uasserted(ErrorCodes::ShutdownInProgress, "Not an actual shutdown");
});
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity);
// ShardingState is now in error state, attempting to call it again will still result in error.
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
- FAIL("Should not be invoked!");
- });
+ shardingInitialization()->setGlobalInitMethodForTest(
+ [](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) { FAIL("Should not be invoked!"); });
ASSERT_THROWS_CODE(
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity),
@@ -223,10 +224,10 @@ TEST_F(ShardingInitializationMongoDTest, InitializeAgainWithMatchingShardIdentit
shardIdentity2.setShardName(kShardName);
shardIdentity2.setClusterId(clusterID);
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
- FAIL("Should not be invoked!");
- });
+ shardingInitialization()->setGlobalInitMethodForTest(
+ [](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) { FAIL("Should not be invoked!"); });
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity2);
@@ -256,10 +257,10 @@ TEST_F(ShardingInitializationMongoDTest, InitializeAgainWithMatchingReplSetNameS
shardIdentity2.setShardName(kShardName);
shardIdentity2.setClusterId(clusterID);
- shardingInitialization()->setGlobalInitMethodForTest([](
- OperationContext* opCtx, const ShardIdentity& shardIdentity, StringData distLockProcessId) {
- FAIL("Should not be invoked!");
- });
+ shardingInitialization()->setGlobalInitMethodForTest(
+ [](OperationContext* opCtx,
+ const ShardIdentity& shardIdentity,
+ StringData distLockProcessId) { FAIL("Should not be invoked!"); });
shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity2);
@@ -291,13 +292,9 @@ TEST_F(ShardingInitializationMongoDTest,
storageGlobalParams.readOnly = true;
serverGlobalParams.overrideShardIdentity =
BSON("_id"
- << "shardIdentity"
- << ShardIdentity::kShardNameFieldName
- << kShardName
- << ShardIdentity::kClusterIdFieldName
- << OID::gen()
- << ShardIdentity::kConfigsvrConnectionStringFieldName
- << "invalid");
+ << "shardIdentity" << ShardIdentity::kShardNameFieldName << kShardName
+ << ShardIdentity::kClusterIdFieldName << OID::gen()
+ << ShardIdentity::kConfigsvrConnectionStringFieldName << "invalid");
ASSERT_THROWS_CODE(
shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext()),
@@ -436,10 +433,8 @@ TEST_F(ShardingInitializationMongoDTest,
ScopedSetStandaloneMode standalone(getServiceContext());
BSONObj invalidShardIdentity = BSON("_id"
- << "shardIdentity"
- << ShardIdentity::kShardNameFieldName
- << kShardName
- << ShardIdentity::kClusterIdFieldName
+ << "shardIdentity" << ShardIdentity::kShardNameFieldName
+ << kShardName << ShardIdentity::kClusterIdFieldName
<< OID::gen()
<< ShardIdentity::kConfigsvrConnectionStringFieldName
<< "invalid");
diff --git a/src/mongo/db/s/sharding_logging.cpp b/src/mongo/db/s/sharding_logging.cpp
index 3529a42cfbd..c3d07903ceb 100644
--- a/src/mongo/db/s/sharding_logging.cpp
+++ b/src/mongo/db/s/sharding_logging.cpp
@@ -121,10 +121,10 @@ Status ShardingLogging::_log(OperationContext* opCtx,
const BSONObj& detail,
const WriteConcernOptions& writeConcern) {
Date_t now = Grid::get(opCtx)->getNetwork()->now();
- const std::string serverName = str::stream() << Grid::get(opCtx)->getNetwork()->getHostName()
- << ":" << serverGlobalParams.port;
- const std::string changeId = str::stream() << serverName << "-" << now.toString() << "-"
- << OID::gen();
+ const std::string serverName = str::stream()
+ << Grid::get(opCtx)->getNetwork()->getHostName() << ":" << serverGlobalParams.port;
+ const std::string changeId = str::stream()
+ << serverName << "-" << now.toString() << "-" << OID::gen();
ChangeLogType changeLog;
changeLog.setChangeId(changeId);
@@ -162,9 +162,9 @@ Status ShardingLogging::_createCappedConfigCollection(OperationContext* opCtx,
StringData collName,
int cappedSize,
const WriteConcernOptions& writeConcern) {
- BSONObj createCmd = BSON("create" << collName << "capped" << true << "size" << cappedSize
- << WriteConcernOptions::kWriteConcernField
- << writeConcern.toBSON());
+ BSONObj createCmd =
+ BSON("create" << collName << "capped" << true << "size" << cappedSize
+ << WriteConcernOptions::kWriteConcernField << writeConcern.toBSON());
auto result =
Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
diff --git a/src/mongo/db/s/shardsvr_shard_collection.cpp b/src/mongo/db/s/shardsvr_shard_collection.cpp
index e59ed3568f7..e229badedbc 100644
--- a/src/mongo/db/s/shardsvr_shard_collection.cpp
+++ b/src/mongo/db/s/shardsvr_shard_collection.cpp
@@ -124,8 +124,7 @@ void checkForExistingChunks(OperationContext* opCtx, const NamespaceString& nss)
str::stream() << "A previous attempt to shard collection " << nss.ns()
<< " failed after writing some initial chunks to config.chunks. Please "
"manually delete the partially written chunks for collection "
- << nss.ns()
- << " from config.chunks",
+ << nss.ns() << " from config.chunks",
numChunks == 0);
}
@@ -229,9 +228,7 @@ void createCollectionOrValidateExisting(OperationContext* opCtx,
bool isUnique = idx["unique"].trueValue();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection '" << nss.ns() << "' with unique index on "
- << currentKey
- << " and proposed shard key "
- << proposedKey
+ << currentKey << " and proposed shard key " << proposedKey
<< ". Uniqueness can't be maintained unless shard key is a prefix",
!isUnique || shardKeyPattern.isUniqueIndexCompatible(currentKey));
}
@@ -249,8 +246,7 @@ void createCollectionOrValidateExisting(OperationContext* opCtx,
// per field per collection.
uassert(ErrorCodes::InvalidOptions,
str::stream() << "can't shard collection " << nss.ns()
- << " with hashed shard key "
- << proposedKey
+ << " with hashed shard key " << proposedKey
<< " because the hashed index uses a non-default seed of "
<< idx["seed"].numberInt(),
!shardKeyPattern.isHashedPattern() || idx["seed"].eoo() ||
@@ -336,9 +332,7 @@ void validateShardKeyAgainstExistingZones(OperationContext* opCtx,
BSONElement tagMaxKeyElement = tagMaxFields.next();
uassert(ErrorCodes::InvalidOptions,
str::stream() << "the min and max of the existing zone " << tag.getMinKey()
- << " -->> "
- << tag.getMaxKey()
- << " have non-matching keys",
+ << " -->> " << tag.getMaxKey() << " have non-matching keys",
tagMinKeyElement.fieldNameStringData() ==
tagMaxKeyElement.fieldNameStringData());
@@ -350,20 +344,15 @@ void validateShardKeyAgainstExistingZones(OperationContext* opCtx,
uassert(ErrorCodes::InvalidOptions,
str::stream() << "the proposed shard key " << proposedKey.toString()
<< " does not match with the shard key of the existing zone "
- << tag.getMinKey()
- << " -->> "
- << tag.getMaxKey(),
+ << tag.getMinKey() << " -->> " << tag.getMaxKey(),
match);
if (ShardKeyPattern::isHashedPatternEl(proposedKeyElement) &&
(tagMinKeyElement.type() != NumberLong || tagMaxKeyElement.type() != NumberLong)) {
uasserted(ErrorCodes::InvalidOptions,
str::stream() << "cannot do hash sharding with the proposed key "
- << proposedKey.toString()
- << " because there exists a zone "
- << tag.getMinKey()
- << " -->> "
- << tag.getMaxKey()
+ << proposedKey.toString() << " because there exists a zone "
+ << tag.getMinKey() << " -->> " << tag.getMaxKey()
<< " whose boundaries are not "
"of type NumberLong");
}
@@ -418,8 +407,7 @@ boost::optional<UUID> getUUIDFromPrimaryShard(OperationContext* opCtx, const Nam
uassert(ErrorCodes::InternalError,
str::stream() << "expected to return a UUID for collection " << nss.ns()
- << " as part of 'info' field but got "
- << res,
+ << " as part of 'info' field but got " << res,
collectionInfo.hasField("uuid"));
return uassertStatusOK(UUID::parse(collectionInfo["uuid"]));
@@ -503,8 +491,7 @@ ShardCollectionTargetState calculateTargetState(OperationContext* opCtx,
if (fromMapReduce) {
uassert(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Map reduce with sharded output to a new collection found "
- << nss.ns()
- << " to be non-empty which is not supported.",
+ << nss.ns() << " to be non-empty which is not supported.",
isEmpty);
}
@@ -704,17 +691,21 @@ UUID shardCollection(OperationContext* opCtx,
InitialSplitPolicy::ShardCollectionConfig initialChunks;
boost::optional<ShardCollectionTargetState> targetState;
- auto writeChunkDocumentsAndRefreshShards = [&](
- const ShardCollectionTargetState& targetState,
- const InitialSplitPolicy::ShardCollectionConfig& initialChunks) {
- // Insert chunk documents to config.chunks on the config server.
- writeFirstChunksToConfig(opCtx, initialChunks);
-
- updateShardingCatalogEntryForCollection(
- opCtx, nss, targetState, initialChunks, *request.getCollation(), request.getUnique());
-
- refreshAllShards(opCtx, nss, dbPrimaryShardId, initialChunks.chunks);
- };
+ auto writeChunkDocumentsAndRefreshShards =
+ [&](const ShardCollectionTargetState& targetState,
+ const InitialSplitPolicy::ShardCollectionConfig& initialChunks) {
+ // Insert chunk documents to config.chunks on the config server.
+ writeFirstChunksToConfig(opCtx, initialChunks);
+
+ updateShardingCatalogEntryForCollection(opCtx,
+ nss,
+ targetState,
+ initialChunks,
+ *request.getCollation(),
+ request.getUnique());
+
+ refreshAllShards(opCtx, nss, dbPrimaryShardId, initialChunks.chunks);
+ };
{
// From this point onward the collection can only be read, not written to, so it is safe to
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp
index c76edbf8c6d..3a6a2d3a577 100644
--- a/src/mongo/db/s/split_chunk.cpp
+++ b/src/mongo/db/s/split_chunk.cpp
@@ -137,15 +137,14 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
//
// TODO(SERVER-25086): Remove distLock acquisition from split chunk
//
- const std::string whyMessage(
- str::stream() << "splitting chunk " << chunkRange.toString() << " in " << nss.toString());
+ const std::string whyMessage(str::stream() << "splitting chunk " << chunkRange.toString()
+ << " in " << nss.toString());
auto scopedDistLock = Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
opCtx, nss.ns(), whyMessage, DistLockManager::kDefaultLockTimeout);
if (!scopedDistLock.isOK()) {
return scopedDistLock.getStatus().withContext(
str::stream() << "could not acquire collection lock for " << nss.toString()
- << " to split chunk "
- << chunkRange.toString());
+ << " to split chunk " << chunkRange.toString());
}
// If the shard key is hashed, then we must make sure that the split points are of type
@@ -157,12 +156,11 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
BSONElement splitKeyElement = it.next();
if (splitKeyElement.type() != NumberLong) {
return {ErrorCodes::CannotSplit,
- str::stream() << "splitChunk cannot split chunk "
- << chunkRange.toString()
- << ", split point "
- << splitKeyElement.toString()
- << " must be of type "
- "NumberLong for hashed shard key patterns"};
+ str::stream()
+ << "splitChunk cannot split chunk " << chunkRange.toString()
+ << ", split point " << splitKeyElement.toString()
+ << " must be of type "
+ "NumberLong for hashed shard key patterns"};
}
}
}
diff --git a/src/mongo/db/s/transaction_coordinator.cpp b/src/mongo/db/s/transaction_coordinator.cpp
index 95815c552d9..06953ff88ea 100644
--- a/src/mongo/db/s/transaction_coordinator.cpp
+++ b/src/mongo/db/s/transaction_coordinator.cpp
@@ -291,13 +291,13 @@ TransactionCoordinator::TransactionCoordinator(ServiceContext* serviceContext,
return txn::deleteCoordinatorDoc(*_scheduler, _lsid, _txnNumber);
})
- .onCompletion([ this, deadlineFuture = std::move(deadlineFuture) ](Status s) mutable {
+ .onCompletion([this, deadlineFuture = std::move(deadlineFuture)](Status s) mutable {
// Interrupt this coordinator's scheduler hierarchy and join the deadline task's future
// in order to guarantee that there are no more threads running within the coordinator.
_scheduler->shutdown(
{ErrorCodes::TransactionCoordinatorDeadlineTaskCanceled, "Coordinator completed"});
- return std::move(deadlineFuture).onCompletion([ this, s = std::move(s) ](Status) {
+ return std::move(deadlineFuture).onCompletion([this, s = std::move(s)](Status) {
// Notify all the listeners which are interested in the coordinator's lifecycle.
// After this call, the coordinator object could potentially get destroyed by its
// lifetime controller, so there shouldn't be any accesses to `this` after this
@@ -373,8 +373,7 @@ void TransactionCoordinator::_done(Status status) {
if (status == ErrorCodes::TransactionCoordinatorSteppingDown)
status = Status(ErrorCodes::InterruptedDueToReplStateChange,
str::stream() << "Coordinator " << _lsid.getId() << ':' << _txnNumber
- << " stopped due to: "
- << status.reason());
+ << " stopped due to: " << status.reason());
LOG(3) << "Two-phase commit for " << _lsid.getId() << ':' << _txnNumber << " completed with "
<< redact(status);
diff --git a/src/mongo/db/s/transaction_coordinator_catalog.cpp b/src/mongo/db/s/transaction_coordinator_catalog.cpp
index b45b4449838..6fa5d45226e 100644
--- a/src/mongo/db/s/transaction_coordinator_catalog.cpp
+++ b/src/mongo/db/s/transaction_coordinator_catalog.cpp
@@ -61,8 +61,8 @@ void TransactionCoordinatorCatalog::onStepDown() {
stdx::unique_lock<stdx::mutex> ul(_mutex);
std::vector<std::shared_ptr<TransactionCoordinator>> coordinatorsToCancel;
- for (auto && [ sessionId, coordinatorsForSession ] : _coordinatorsBySession) {
- for (auto && [ txnNumber, coordinator ] : coordinatorsForSession) {
+ for (auto&& [sessionId, coordinatorsForSession] : _coordinatorsBySession) {
+ for (auto&& [txnNumber, coordinator] : coordinatorsForSession) {
coordinatorsToCancel.emplace_back(coordinator);
}
}
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.cpp b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
index 0af8b465353..58766aa1d28 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
@@ -83,8 +83,8 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
// rather than going through the host targeting below. This ensures that the state changes
// for the participant and coordinator occur sequentially on a single branch of replica set
// history. See SERVER-38142 for details.
- return scheduleWork([ this, shardId, commandObj = commandObj.getOwned() ](OperationContext *
- opCtx) {
+ return scheduleWork([this, shardId, commandObj = commandObj.getOwned()](
+ OperationContext* opCtx) {
// Note: This internal authorization is tied to the lifetime of the client, which will
// be destroyed by 'scheduleWork' immediately after this lambda ends
AuthorizationSession::get(opCtx->getClient())
@@ -114,8 +114,8 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
}
return _targetHostAsync(shardId, readPref)
- .then([ this, shardId, commandObj = commandObj.getOwned(), readPref ](
- HostAndShard hostAndShard) mutable {
+ .then([this, shardId, commandObj = commandObj.getOwned(), readPref](
+ HostAndShard hostAndShard) mutable {
executor::RemoteCommandRequest request(hostAndShard.hostTargeted,
NamespaceString::kAdminDb.toString(),
commandObj,
@@ -166,7 +166,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
ul.unlock();
return std::move(pf.future).tapAll(
- [ this, it = std::move(it) ](StatusWith<ResponseStatus> s) {
+ [this, it = std::move(it)](StatusWith<ResponseStatus> s) {
stdx::lock_guard<stdx::mutex> lg(_mutex);
_activeHandles.erase(it);
_notifyAllTasksComplete(lg);
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.h b/src/mongo/db/s/transaction_coordinator_futures_util.h
index 1c654d8707f..7aef1fc8e78 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.h
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.h
@@ -115,7 +115,7 @@ public:
ul.unlock();
return std::move(pf.future).tapAll(
- [ this, it = std::move(it) ](StatusOrStatusWith<ReturnType> s) {
+ [this, it = std::move(it)](StatusOrStatusWith<ReturnType> s) {
stdx::lock_guard<stdx::mutex> lg(_mutex);
_activeHandles.erase(it);
_notifyAllTasksComplete(lg);
@@ -284,7 +284,7 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
combiner(std::move(combiner)) {}
/*****************************************************
* The first few fields have fixed values. *
- ******************************************************/
+ ******************************************************/
// Protects all state in the SharedBlock.
stdx::mutex mutex;
@@ -299,7 +299,7 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
/*****************************************************
* The below have initial values based on user input.*
- ******************************************************/
+ ******************************************************/
// The number of input futures that have not yet been resolved and processed.
size_t numOutstandingResponses;
// The variable where the intermediate results and final result is stored.
@@ -374,26 +374,25 @@ Future<FutureContinuationResult<LoopBodyFn>> doWhile(AsyncWorkScheduler& schedul
LoopBodyFn&& f) {
using ReturnType = typename decltype(f())::value_type;
auto future = f();
- return std::move(future).onCompletion([
- &scheduler,
- backoff = std::move(backoff),
- shouldRetryFn = std::forward<ShouldRetryFn>(shouldRetryFn),
- f = std::forward<LoopBodyFn>(f)
- ](StatusOrStatusWith<ReturnType> s) mutable {
- if (!shouldRetryFn(s))
- return Future<ReturnType>(std::move(s));
-
- // Retry after a delay.
- const auto delayMillis = (backoff ? backoff->nextSleep() : Milliseconds(0));
- return scheduler.scheduleWorkIn(delayMillis, [](OperationContext* opCtx) {}).then([
- &scheduler,
- backoff = std::move(backoff),
- shouldRetryFn = std::move(shouldRetryFn),
- f = std::move(f)
- ]() mutable {
- return doWhile(scheduler, std::move(backoff), std::move(shouldRetryFn), std::move(f));
+ return std::move(future).onCompletion(
+ [&scheduler,
+ backoff = std::move(backoff),
+ shouldRetryFn = std::forward<ShouldRetryFn>(shouldRetryFn),
+ f = std::forward<LoopBodyFn>(f)](StatusOrStatusWith<ReturnType> s) mutable {
+ if (!shouldRetryFn(s))
+ return Future<ReturnType>(std::move(s));
+
+ // Retry after a delay.
+ const auto delayMillis = (backoff ? backoff->nextSleep() : Milliseconds(0));
+ return scheduler.scheduleWorkIn(delayMillis, [](OperationContext* opCtx) {})
+ .then([&scheduler,
+ backoff = std::move(backoff),
+ shouldRetryFn = std::move(shouldRetryFn),
+ f = std::move(f)]() mutable {
+ return doWhile(
+ scheduler, std::move(backoff), std::move(shouldRetryFn), std::move(f));
+ });
});
- });
}
} // namespace txn
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
index bed0927a650..e3df22d9b7c 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp
@@ -359,7 +359,7 @@ TEST_F(AsyncWorkSchedulerTest, ScheduledBlockingWorkSucceeds) {
unittest::Barrier barrier(2);
auto pf = makePromiseFuture<int>();
auto future =
- async.scheduleWork([&barrier, future = std::move(pf.future) ](OperationContext * opCtx) {
+ async.scheduleWork([&barrier, future = std::move(pf.future)](OperationContext* opCtx) {
barrier.countDownAndWait();
return future.get(opCtx);
});
@@ -377,7 +377,7 @@ TEST_F(AsyncWorkSchedulerTest, ScheduledBlockingWorkThrowsException) {
unittest::Barrier barrier(2);
auto pf = makePromiseFuture<int>();
auto future =
- async.scheduleWork([&barrier, future = std::move(pf.future) ](OperationContext * opCtx) {
+ async.scheduleWork([&barrier, future = std::move(pf.future)](OperationContext* opCtx) {
barrier.countDownAndWait();
future.get(opCtx);
uasserted(ErrorCodes::InternalError, "Test error");
@@ -396,7 +396,7 @@ TEST_F(AsyncWorkSchedulerTest, ScheduledBlockingWorkInSucceeds) {
auto pf = makePromiseFuture<int>();
auto future = async.scheduleWorkIn(
Milliseconds{10},
- [future = std::move(pf.future)](OperationContext * opCtx) { return future.get(opCtx); });
+ [future = std::move(pf.future)](OperationContext* opCtx) { return future.get(opCtx); });
pf.promise.emplaceValue(5);
ASSERT(!future.isReady());
diff --git a/src/mongo/db/s/transaction_coordinator_service.cpp b/src/mongo/db/s/transaction_coordinator_service.cpp
index dac4caee608..6be674d1ad7 100644
--- a/src/mongo/db/s/transaction_coordinator_service.cpp
+++ b/src/mongo/db/s/transaction_coordinator_service.cpp
@@ -147,7 +147,7 @@ void TransactionCoordinatorService::onStepUp(OperationContext* opCtx,
_catalogAndScheduler->scheduler
.scheduleWorkIn(
recoveryDelayForTesting,
- [catalogAndScheduler = _catalogAndScheduler](OperationContext * opCtx) {
+ [catalogAndScheduler = _catalogAndScheduler](OperationContext* opCtx) {
auto& replClientInfo = repl::ReplClientInfo::forClient(opCtx->getClient());
replClientInfo.setLastOpToSystemLastOpTime(opCtx);
diff --git a/src/mongo/db/s/transaction_coordinator_structures_test.cpp b/src/mongo/db/s/transaction_coordinator_structures_test.cpp
index f29b442559b..df1d3cc2ade 100644
--- a/src/mongo/db/s/transaction_coordinator_structures_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_structures_test.cpp
@@ -44,8 +44,7 @@ TEST(CoordinatorCommitDecisionTest, SerializeCommitHasTimestampAndNoAbortStatus)
ASSERT_BSONOBJ_EQ(BSON("decision"
<< "commit"
- << "commitTimestamp"
- << Timestamp(100, 200)),
+ << "commitTimestamp" << Timestamp(100, 200)),
obj);
}
diff --git a/src/mongo/db/s/transaction_coordinator_test.cpp b/src/mongo/db/s/transaction_coordinator_test.cpp
index cbed3eb021a..ad4554b406b 100644
--- a/src/mongo/db/s/transaction_coordinator_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_test.cpp
@@ -192,8 +192,7 @@ auto makeDummyPrepareCommand(const LogicalSessionId& lsid, const TxnNumber& txnN
prepareCmd.setDbName(NamespaceString::kAdminDb);
auto prepareObj = prepareCmd.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
return prepareObj;
@@ -546,17 +545,23 @@ protected:
TxnNumber txnNumber,
const std::vector<ShardId>& participants,
const boost::optional<Timestamp>& commitTimestamp) {
- txn::persistDecision(*_aws, lsid, txnNumber, participants, [&] {
- txn::CoordinatorCommitDecision decision;
- if (commitTimestamp) {
- decision.setDecision(txn::CommitDecision::kCommit);
- decision.setCommitTimestamp(commitTimestamp);
- } else {
- decision.setDecision(txn::CommitDecision::kAbort);
- decision.setAbortStatus(Status(ErrorCodes::NoSuchTransaction, "Test abort status"));
- }
- return decision;
- }()).get();
+ txn::persistDecision(*_aws,
+ lsid,
+ txnNumber,
+ participants,
+ [&] {
+ txn::CoordinatorCommitDecision decision;
+ if (commitTimestamp) {
+ decision.setDecision(txn::CommitDecision::kCommit);
+ decision.setCommitTimestamp(commitTimestamp);
+ } else {
+ decision.setDecision(txn::CommitDecision::kAbort);
+ decision.setAbortStatus(Status(ErrorCodes::NoSuchTransaction,
+ "Test abort status"));
+ }
+ return decision;
+ }())
+ .get();
auto allCoordinatorDocs = txn::readAllCoordinatorDocs(opCtx);
ASSERT_EQUALS(allCoordinatorDocs.size(), size_t(1));
@@ -733,11 +738,17 @@ TEST_F(TransactionCoordinatorDriverPersistenceTest,
// Delete the document for the first transaction and check that only the second transaction's
// document still exists.
- txn::persistDecision(*_aws, _lsid, txnNumber1, _participants, [&] {
- txn::CoordinatorCommitDecision decision(txn::CommitDecision::kAbort);
- decision.setAbortStatus(Status(ErrorCodes::NoSuchTransaction, "Test abort error"));
- return decision;
- }()).get();
+ txn::persistDecision(*_aws,
+ _lsid,
+ txnNumber1,
+ _participants,
+ [&] {
+ txn::CoordinatorCommitDecision decision(txn::CommitDecision::kAbort);
+ decision.setAbortStatus(
+ Status(ErrorCodes::NoSuchTransaction, "Test abort error"));
+ return decision;
+ }())
+ .get();
txn::deleteCoordinatorDoc(*_aws, _lsid, txnNumber1).get();
allCoordinatorDocs = txn::readAllCoordinatorDocs(operationContext());
@@ -1466,8 +1477,7 @@ TEST_F(TransactionCoordinatorMetricsTest, SimpleTwoPhaseCommitRealCoordinator) {
setGlobalFailPoint("hangBeforeWaitingForParticipantListWriteConcern",
BSON("mode"
<< "alwaysOn"
- << "data"
- << BSON("useUninterruptibleSleep" << 1)));
+ << "data" << BSON("useUninterruptibleSleep" << 1)));
coordinator.runCommit(kTwoShardIdList);
waitUntilCoordinatorDocIsPresent();
@@ -1511,8 +1521,7 @@ TEST_F(TransactionCoordinatorMetricsTest, SimpleTwoPhaseCommitRealCoordinator) {
setGlobalFailPoint("hangBeforeWaitingForDecisionWriteConcern",
BSON("mode"
<< "alwaysOn"
- << "data"
- << BSON("useUninterruptibleSleep" << 1)));
+ << "data" << BSON("useUninterruptibleSleep" << 1)));
// Respond to the second prepare request in a separate thread, because the coordinator will
// hijack that thread to run its continuation.
assertPrepareSentAndRespondWithSuccess();
@@ -1562,8 +1571,7 @@ TEST_F(TransactionCoordinatorMetricsTest, SimpleTwoPhaseCommitRealCoordinator) {
setGlobalFailPoint("hangAfterDeletingCoordinatorDoc",
BSON("mode"
<< "alwaysOn"
- << "data"
- << BSON("useUninterruptibleSleep" << 1)));
+ << "data" << BSON("useUninterruptibleSleep" << 1)));
// Respond to the second commit request in a separate thread, because the coordinator will
// hijack that thread to run its continuation.
assertCommitSentAndRespondWithSuccess();
@@ -2122,11 +2130,10 @@ TEST_F(TransactionCoordinatorMetricsTest, SlowLogLineIncludesTransactionParamete
runSimpleTwoPhaseCommitWithCommitDecisionAndCaptureLogLines();
BSONObjBuilder lsidBob;
_lsid.serialize(&lsidBob);
- ASSERT_EQUALS(
- 1,
- countLogLinesContaining(str::stream() << "parameters:{ lsid: " << lsidBob.done().toString()
- << ", txnNumber: "
- << _txnNumber));
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(str::stream()
+ << "parameters:{ lsid: " << lsidBob.done().toString()
+ << ", txnNumber: " << _txnNumber));
}
TEST_F(TransactionCoordinatorMetricsTest,
diff --git a/src/mongo/db/s/transaction_coordinator_util.cpp b/src/mongo/db/s/transaction_coordinator_util.cpp
index f49da0ac61f..dbffc60de1d 100644
--- a/src/mongo/db/s/transaction_coordinator_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_util.cpp
@@ -126,8 +126,7 @@ repl::OpTime persistParticipantListBlocking(OperationContext* opCtx,
BSONObj sameParticipantList =
BSON("$and" << buildParticipantListMatchesConditions(participantList));
entry.setQ(BSON(TransactionCoordinatorDocument::kIdFieldName
- << sessionInfo.toBSON()
- << "$or"
+ << sessionInfo.toBSON() << "$or"
<< BSON_ARRAY(noParticipantList << sameParticipantList)));
// Update with participant list.
@@ -154,13 +153,9 @@ repl::OpTime persistParticipantListBlocking(OperationContext* opCtx,
QUERY(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
uasserted(51025,
str::stream() << "While attempting to write participant list "
- << buildParticipantListString(participantList)
- << " for "
- << lsid.getId()
- << ':'
- << txnNumber
- << ", found document with a different participant list: "
- << doc);
+ << buildParticipantListString(participantList) << " for "
+ << lsid.getId() << ':' << txnNumber
+ << ", found document with a different participant list: " << doc);
}
// Throw any other error.
@@ -223,8 +218,7 @@ Future<PrepareVoteConsensus> sendPrepare(ServiceContext* service,
prepareTransaction.setDbName(NamespaceString::kAdminDb);
auto prepareObj = prepareTransaction.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
std::vector<Future<PrepareResponse>> responses;
@@ -245,7 +239,7 @@ Future<PrepareVoteConsensus> sendPrepare(ServiceContext* service,
// Initial value
PrepareVoteConsensus{int(participants.size())},
// Aggregates an incoming response (next) with the existing aggregate value (result)
- [&prepareScheduler = *prepareScheduler](PrepareVoteConsensus & result,
+ [&prepareScheduler = *prepareScheduler](PrepareVoteConsensus& result,
const PrepareResponse& next) {
result.registerVote(next);
@@ -300,10 +294,8 @@ repl::OpTime persistDecisionBlocking(OperationContext* opCtx,
BSON(TransactionCoordinatorDocument::kDecisionFieldName << decision.toBSON());
entry.setQ(BSON(TransactionCoordinatorDocument::kIdFieldName
- << sessionInfo.toBSON()
- << "$and"
- << buildParticipantListMatchesConditions(participantList)
- << "$or"
+ << sessionInfo.toBSON() << "$and"
+ << buildParticipantListMatchesConditions(participantList) << "$or"
<< BSON_ARRAY(noDecision << sameDecision)));
entry.setU([&] {
@@ -333,11 +325,8 @@ repl::OpTime persistDecisionBlocking(OperationContext* opCtx,
QUERY(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
uasserted(51026,
str::stream() << "While attempting to write decision "
- << (isCommit ? "'commit'" : "'abort'")
- << " for"
- << lsid.getId()
- << ':'
- << txnNumber
+ << (isCommit ? "'commit'" : "'abort'") << " for" << lsid.getId()
+ << ':' << txnNumber
<< ", either failed to find document for this lsid:txnNumber or "
"document existed with a different participant list, decision "
"or commitTimestamp: "
@@ -379,8 +368,7 @@ Future<void> sendCommit(ServiceContext* service,
commitTransaction.setCommitTimestamp(commitTimestamp);
auto commitObj = commitTransaction.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
std::vector<Future<void>> responses;
for (const auto& participant : participants) {
@@ -398,8 +386,7 @@ Future<void> sendAbort(ServiceContext* service,
abortTransaction.setDbName(NamespaceString::kAdminDb);
auto abortObj = abortTransaction.toBSON(
BSON("lsid" << lsid.toBSON() << "txnNumber" << txnNumber << "autocommit" << false
- << WriteConcernOptions::kWriteConcernField
- << WriteConcernOptions::Majority));
+ << WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority));
std::vector<Future<void>> responses;
for (const auto& participant : participants) {
@@ -529,12 +516,12 @@ Future<PrepareResponse> sendPrepareToShard(ServiceContext* service,
swPrepareResponse != ErrorCodes::TransactionCoordinatorSteppingDown &&
swPrepareResponse != ErrorCodes::TransactionCoordinatorReachedAbortDecision;
},
- [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned() ] {
+ [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned()] {
LOG(3) << "Coordinator going to send command " << commandObj << " to "
<< (isLocalShard ? " local " : "") << " shard " << shardId;
return scheduler.scheduleRemoteCommand(shardId, kPrimaryReadPreference, commandObj)
- .then([ shardId, commandObj = commandObj.getOwned() ](ResponseStatus response) {
+ .then([shardId, commandObj = commandObj.getOwned()](ResponseStatus response) {
auto status = getStatusFromCommandResult(response.data);
auto wcStatus = getWriteConcernStatusFromCommandResult(response.data);
@@ -621,12 +608,12 @@ Future<void> sendDecisionToShard(ServiceContext* service,
// coordinator-specific code.
return !s.isOK() && s != ErrorCodes::TransactionCoordinatorSteppingDown;
},
- [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned() ] {
+ [&scheduler, shardId, isLocalShard, commandObj = commandObj.getOwned()] {
LOG(3) << "Coordinator going to send command " << commandObj << " to "
<< (isLocalShard ? "local" : "") << " shard " << shardId;
return scheduler.scheduleRemoteCommand(shardId, kPrimaryReadPreference, commandObj)
- .then([ shardId, commandObj = commandObj.getOwned() ](ResponseStatus response) {
+ .then([shardId, commandObj = commandObj.getOwned()](ResponseStatus response) {
auto status = getStatusFromCommandResult(response.data);
auto wcStatus = getWriteConcernStatusFromCommandResult(response.data);
diff --git a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
index 3cb6b8c1cbe..b48811ec994 100644
--- a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
+++ b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
@@ -132,12 +132,11 @@ public:
replClient.setLastOp(opCtx, prepareOpTime);
}
- invariant(opCtx->recoveryUnit()->getPrepareTimestamp() ==
- prepareOpTime.getTimestamp(),
- str::stream() << "recovery unit prepareTimestamp: "
- << opCtx->recoveryUnit()->getPrepareTimestamp().toString()
- << " participant prepareOpTime: "
- << prepareOpTime.toString());
+ invariant(
+ opCtx->recoveryUnit()->getPrepareTimestamp() == prepareOpTime.getTimestamp(),
+ str::stream() << "recovery unit prepareTimestamp: "
+ << opCtx->recoveryUnit()->getPrepareTimestamp().toString()
+ << " participant prepareOpTime: " << prepareOpTime.toString());
if (MONGO_FAIL_POINT(
participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic)) {
diff --git a/src/mongo/db/s/type_shard_identity_test.cpp b/src/mongo/db/s/type_shard_identity_test.cpp
index 56c2ca059de..b4999f5c6eb 100644
--- a/src/mongo/db/s/type_shard_identity_test.cpp
+++ b/src/mongo/db/s/type_shard_identity_test.cpp
@@ -46,9 +46,7 @@ TEST(ShardIdentityType, RoundTrip) {
<< "shardIdentity"
<< "shardName"
<< "s1"
- << "clusterId"
- << clusterId
- << "configsvrConnectionString"
+ << "clusterId" << clusterId << "configsvrConnectionString"
<< "test/a:123");
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
@@ -67,8 +65,7 @@ TEST(ShardIdentityType, ParseMissingId) {
<< "test/a:123"
<< "shardName"
<< "s1"
- << "clusterId"
- << OID::gen());
+ << "clusterId" << OID::gen());
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -79,8 +76,7 @@ TEST(ShardIdentityType, ParseMissingConfigsvrConnString) {
<< "shardIdentity"
<< "shardName"
<< "s1"
- << "clusterId"
- << OID::gen());
+ << "clusterId" << OID::gen());
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -91,8 +87,7 @@ TEST(ShardIdentityType, ParseMissingShardName) {
<< "shardIdentity"
<< "configsvrConnectionString"
<< "test/a:123"
- << "clusterId"
- << OID::gen());
+ << "clusterId" << OID::gen());
auto result = ShardIdentityType::fromShardIdentityDocument(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -118,8 +113,7 @@ TEST(ShardIdentityType, InvalidConnectionString) {
<< "test/,,,"
<< "shardName"
<< "s1"
- << "clusterId"
- << clusterId);
+ << "clusterId" << clusterId);
ASSERT_EQ(ErrorCodes::FailedToParse,
ShardIdentityType::fromShardIdentityDocument(doc).getStatus());
@@ -133,8 +127,7 @@ TEST(ShardIdentityType, NonReplSetConnectionString) {
<< "local:123"
<< "shardName"
<< "s1"
- << "clusterId"
- << clusterId);
+ << "clusterId" << clusterId);
ASSERT_EQ(ErrorCodes::UnsupportedFormat,
ShardIdentityType::fromShardIdentityDocument(doc).getStatus());
@@ -147,5 +140,5 @@ TEST(ShardIdentityType, CreateUpdateObject) {
ASSERT_BSONOBJ_EQ(expectedObj, updateObj);
}
+} // namespace
} // namespace mongo
-} // unnamed namespace
diff --git a/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp b/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp
index 1ff67ff3257..d1ceaaeeba6 100644
--- a/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp
+++ b/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp
@@ -90,5 +90,5 @@ MONGO_INITIALIZER(RegisterWaitForOngoingChunkSplitsCommand)(InitializerContext*
}
return Status::OK();
}
-}
-}
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/server_options.h b/src/mongo/db/server_options.h
index 14237da05e2..641f7f8fa9c 100644
--- a/src/mongo/db/server_options.h
+++ b/src/mongo/db/server_options.h
@@ -130,23 +130,23 @@ struct ServerGlobalParams {
enum ClusterAuthModes {
ClusterAuthMode_undefined,
/**
- * Authenticate using keyfile, accept only keyfiles
- */
+ * Authenticate using keyfile, accept only keyfiles
+ */
ClusterAuthMode_keyFile,
/**
- * Authenticate using keyfile, accept both keyfiles and X.509
- */
+ * Authenticate using keyfile, accept both keyfiles and X.509
+ */
ClusterAuthMode_sendKeyFile,
/**
- * Authenticate using X.509, accept both keyfiles and X.509
- */
+ * Authenticate using X.509, accept both keyfiles and X.509
+ */
ClusterAuthMode_sendX509,
/**
- * Authenticate using X.509, accept only X.509
- */
+ * Authenticate using X.509, accept only X.509
+ */
ClusterAuthMode_x509
};
@@ -271,4 +271,4 @@ struct TraitNamedDomain {
return ret;
}
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/server_options_helpers.h b/src/mongo/db/server_options_helpers.h
index da7098f23c4..a79dde98b39 100644
--- a/src/mongo/db/server_options_helpers.h
+++ b/src/mongo/db/server_options_helpers.h
@@ -43,18 +43,18 @@ class Environment;
namespace moe = mongo::optionenvironment;
/**
-* Handle custom validation of base options that can not currently be done by using
-* Constraints in the Environment. See the "validate" function in the Environment class for
-* more details.
-*/
+ * Handle custom validation of base options that can not currently be done by using
+ * Constraints in the Environment. See the "validate" function in the Environment class for
+ * more details.
+ */
Status validateBaseOptions(const moe::Environment& params);
/**
-* Canonicalize base options for the given environment.
-*
-* For example, the options "objcheck", "noobjcheck", and "net.wireObjectCheck" should all be
-* merged into "net.wireObjectCheck".
-*/
+ * Canonicalize base options for the given environment.
+ *
+ * For example, the options "objcheck", "noobjcheck", and "net.wireObjectCheck" should all be
+ * merged into "net.wireObjectCheck".
+ */
Status canonicalizeBaseOptions(moe::Environment* params);
/**
@@ -67,11 +67,11 @@ Status canonicalizeBaseOptions(moe::Environment* params);
Status setupBaseOptions(const std::vector<std::string>& args);
/**
-* Store the given parsed params in global server state.
-*
-* For example, sets the serverGlobalParams.quiet variable based on the systemLog.quiet config
-* parameter.
-*/
+ * Store the given parsed params in global server state.
+ *
+ * For example, sets the serverGlobalParams.quiet variable based on the systemLog.quiet config
+ * parameter.
+ */
Status storeBaseOptions(const moe::Environment& params);
} // namespace mongo
diff --git a/src/mongo/db/service_context_test_fixture.h b/src/mongo/db/service_context_test_fixture.h
index edbd5021816..e7508898c0f 100644
--- a/src/mongo/db/service_context_test_fixture.h
+++ b/src/mongo/db/service_context_test_fixture.h
@@ -39,9 +39,9 @@ namespace mongo {
class ScopedGlobalServiceContextForTest {
public:
/**
- * Returns a service context, which is only valid for this instance of the test.
- * Must not be called before setUp or after tearDown.
- */
+ * Returns a service context, which is only valid for this instance of the test.
+ * Must not be called before setUp or after tearDown.
+ */
ServiceContext* getServiceContext();
protected:
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 849a6744128..349d25326f4 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -123,9 +123,10 @@ void generateLegacyQueryErrorResponse(const AssertionException& exception,
curop->debug().errInfo = exception.toStatus();
log(LogComponent::kQuery) << "assertion " << exception.toString() << " ns:" << queryMessage.ns
- << " query:" << (queryMessage.query.valid(BSONVersion::kLatest)
- ? redact(queryMessage.query)
- : "query object is corrupt");
+ << " query:"
+ << (queryMessage.query.valid(BSONVersion::kLatest)
+ ? redact(queryMessage.query)
+ : "query object is corrupt");
if (queryMessage.ntoskip || queryMessage.ntoreturn) {
log(LogComponent::kQuery) << " ntoskip:" << queryMessage.ntoskip
<< " ntoreturn:" << queryMessage.ntoreturn;
@@ -971,8 +972,8 @@ DbResponse receivedCommands(OperationContext* opCtx,
// However, the complete command object will still be echoed to the client.
if (!(c = CommandHelpers::findCommand(request.getCommandName()))) {
globalCommandRegistry()->incrementUnknownCommands();
- std::string msg = str::stream() << "no such command: '" << request.getCommandName()
- << "'";
+ std::string msg = str::stream()
+ << "no such command: '" << request.getCommandName() << "'";
LOG(2) << msg;
uasserted(ErrorCodes::CommandNotFound, str::stream() << msg);
}
@@ -1008,12 +1009,10 @@ DbResponse receivedCommands(OperationContext* opCtx,
if (LastError::get(opCtx->getClient()).hadNotMasterError()) {
notMasterUnackWrites.increment();
uasserted(ErrorCodes::NotMaster,
- str::stream() << "Not-master error while processing '"
- << request.getCommandName()
- << "' operation on '"
- << request.getDatabase()
- << "' database via "
- << "fire-and-forget command execution.");
+ str::stream()
+ << "Not-master error while processing '" << request.getCommandName()
+ << "' operation on '" << request.getDatabase() << "' database via "
+ << "fire-and-forget command execution.");
}
return {}; // Don't reply.
}
@@ -1302,10 +1301,8 @@ DbResponse ServiceEntryPointCommon::handleRequest(OperationContext* opCtx,
if (!opCtx->getClient()->isInDirectClient()) {
uassert(18663,
str::stream() << "legacy writeOps not longer supported for "
- << "versioned connections, ns: "
- << nsString.ns()
- << ", op: "
- << networkOpToString(op),
+ << "versioned connections, ns: " << nsString.ns()
+ << ", op: " << networkOpToString(op),
!ShardedConnectionInfo::get(&c, false));
}
@@ -1333,12 +1330,10 @@ DbResponse ServiceEntryPointCommon::handleRequest(OperationContext* opCtx,
if (LastError::get(opCtx->getClient()).hadNotMasterError()) {
notMasterLegacyUnackWrites.increment();
uasserted(ErrorCodes::NotMaster,
- str::stream() << "Not-master error while processing '"
- << networkOpToString(op)
- << "' operation on '"
- << nsString
- << "' namespace via legacy "
- << "fire-and-forget command execution.");
+ str::stream()
+ << "Not-master error while processing '" << networkOpToString(op)
+ << "' operation on '" << nsString << "' namespace via legacy "
+ << "fire-and-forget command execution.");
}
}
diff --git a/src/mongo/db/session_catalog_mongod.cpp b/src/mongo/db/session_catalog_mongod.cpp
index e52a99383f1..2f6145f0287 100644
--- a/src/mongo/db/session_catalog_mongod.cpp
+++ b/src/mongo/db/session_catalog_mongod.cpp
@@ -37,7 +37,6 @@
#include "mongo/db/catalog_raii.h"
#include "mongo/db/client.h"
#include "mongo/db/dbdirectclient.h"
-#include "mongo/db/dbdirectclient.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/ops/write_ops.h"
@@ -92,8 +91,8 @@ void killSessionTokens(OperationContext* opCtx,
return;
getThreadPool(opCtx)->schedule(
- [ service = opCtx->getServiceContext(),
- sessionKillTokens = std::move(sessionKillTokens) ](auto status) mutable {
+ [service = opCtx->getServiceContext(),
+ sessionKillTokens = std::move(sessionKillTokens)](auto status) mutable {
invariant(status);
ThreadClient tc("Kill-Sessions", service);
@@ -185,11 +184,10 @@ void createTransactionTable(OperationContext* opCtx) {
return;
}
- uassertStatusOKWithContext(status,
- str::stream()
- << "Failed to create the "
- << NamespaceString::kSessionTransactionsTableNamespace.ns()
- << " collection");
+ uassertStatusOKWithContext(
+ status,
+ str::stream() << "Failed to create the "
+ << NamespaceString::kSessionTransactionsTableNamespace.ns() << " collection");
}
void abortInProgressTransactions(OperationContext* opCtx) {
diff --git a/src/mongo/db/session_catalog_test.cpp b/src/mongo/db/session_catalog_test.cpp
index da7712f89d4..a5512625dbf 100644
--- a/src/mongo/db/session_catalog_test.cpp
+++ b/src/mongo/db/session_catalog_test.cpp
@@ -123,12 +123,14 @@ TEST_F(SessionCatalogTest, ScanSession) {
makeLogicalSessionIdForTest(),
makeLogicalSessionIdForTest()};
for (const auto& lsid : lsids) {
- stdx::async(stdx::launch::async, [this, lsid] {
- ThreadClient tc(getServiceContext());
- auto opCtx = makeOperationContext();
- opCtx->setLogicalSessionId(lsid);
- OperationContextSession ocs(opCtx.get());
- }).get();
+ stdx::async(stdx::launch::async,
+ [this, lsid] {
+ ThreadClient tc(getServiceContext());
+ auto opCtx = makeOperationContext();
+ opCtx->setLogicalSessionId(lsid);
+ OperationContextSession ocs(opCtx.get());
+ })
+ .get();
}
catalog()->scanSession(lsids[0], [&lsids](const ObservableSession& session) {
@@ -154,12 +156,14 @@ TEST_F(SessionCatalogTest, ScanSessionMarkForReapWhenSessionIsIdle) {
makeLogicalSessionIdForTest(),
makeLogicalSessionIdForTest()};
for (const auto& lsid : lsids) {
- stdx::async(stdx::launch::async, [this, lsid] {
- ThreadClient tc(getServiceContext());
- auto opCtx = makeOperationContext();
- opCtx->setLogicalSessionId(lsid);
- OperationContextSession ocs(opCtx.get());
- }).get();
+ stdx::async(stdx::launch::async,
+ [this, lsid] {
+ ThreadClient tc(getServiceContext());
+ auto opCtx = makeOperationContext();
+ opCtx->setLogicalSessionId(lsid);
+ OperationContextSession ocs(opCtx.get());
+ })
+ .get();
}
catalog()->scanSession(lsids[0],
@@ -196,12 +200,14 @@ TEST_F(SessionCatalogTestWithDefaultOpCtx, ScanSessions) {
makeLogicalSessionIdForTest(),
makeLogicalSessionIdForTest()};
for (const auto& lsid : lsids) {
- stdx::async(stdx::launch::async, [this, lsid] {
- ThreadClient tc(getServiceContext());
- auto opCtx = makeOperationContext();
- opCtx->setLogicalSessionId(lsid);
- OperationContextSession ocs(opCtx.get());
- }).get();
+ stdx::async(stdx::launch::async,
+ [this, lsid] {
+ ThreadClient tc(getServiceContext());
+ auto opCtx = makeOperationContext();
+ opCtx->setLogicalSessionId(lsid);
+ OperationContextSession ocs(opCtx.get());
+ })
+ .get();
}
// Scan over all Sessions.
diff --git a/src/mongo/db/sessions_collection_config_server.h b/src/mongo/db/sessions_collection_config_server.h
index 3338979d8e2..bdfac76abff 100644
--- a/src/mongo/db/sessions_collection_config_server.h
+++ b/src/mongo/db/sessions_collection_config_server.h
@@ -46,18 +46,18 @@ class OperationContext;
class SessionsCollectionConfigServer : public SessionsCollectionSharded {
public:
/**
- * Ensures that the sessions collection has been set up for this cluster,
- * sharded, and with the proper indexes.
- *
- * This method may safely be called multiple times.
- *
- * If there are no shards in this cluster, this method will do nothing.
- */
+ * Ensures that the sessions collection has been set up for this cluster,
+ * sharded, and with the proper indexes.
+ *
+ * This method may safely be called multiple times.
+ *
+ * If there are no shards in this cluster, this method will do nothing.
+ */
Status setupSessionsCollection(OperationContext* opCtx) override;
/**
- * Checks if the sessions collection exists.
- */
+ * Checks if the sessions collection exists.
+ */
Status checkSessionsCollectionExists(OperationContext* opCtx) override;
private:
diff --git a/src/mongo/db/sorter/sorter.cpp b/src/mongo/db/sorter/sorter.cpp
index 4efa4a79840..37b8d98aa02 100644
--- a/src/mongo/db/sorter/sorter.cpp
+++ b/src/mongo/db/sorter/sorter.cpp
@@ -172,24 +172,21 @@ public:
void openSource() {
_file.open(_fileName.c_str(), std::ios::in | std::ios::binary);
uassert(16814,
- str::stream() << "error opening file \"" << _fileName << "\": "
- << myErrnoWithDescription(),
+ str::stream() << "error opening file \"" << _fileName
+ << "\": " << myErrnoWithDescription(),
_file.good());
_file.seekg(_fileStartOffset);
uassert(50979,
str::stream() << "error seeking starting offset of '" << _fileStartOffset
- << "' in file \""
- << _fileName
- << "\": "
- << myErrnoWithDescription(),
+ << "' in file \"" << _fileName << "\": " << myErrnoWithDescription(),
_file.good());
}
void closeSource() {
_file.close();
uassert(50969,
- str::stream() << "error closing file \"" << _fileName << "\": "
- << myErrnoWithDescription(),
+ str::stream() << "error closing file \"" << _fileName
+ << "\": " << myErrnoWithDescription(),
!_file.fail());
}
@@ -290,8 +287,8 @@ private:
const std::streampos offset = _file.tellg();
uassert(51049,
- str::stream() << "error reading file \"" << _fileName << "\": "
- << myErrnoWithDescription(),
+ str::stream() << "error reading file \"" << _fileName
+ << "\": " << myErrnoWithDescription(),
offset >= 0);
if (offset >= _fileEndOffset) {
@@ -302,8 +299,8 @@ private:
_file.read(reinterpret_cast<char*>(out), size);
uassert(16817,
- str::stream() << "error reading file \"" << _fileName << "\": "
- << myErrnoWithDescription(),
+ str::stream() << "error reading file \"" << _fileName
+ << "\": " << myErrnoWithDescription(),
_file.good());
verify(_file.gcount() == static_cast<std::streamsize>(size));
}
@@ -556,8 +553,7 @@ private:
// need to be revisited.
uasserted(16819,
str::stream()
- << "Sort exceeded memory limit of "
- << _opts.maxMemoryUsageBytes
+ << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
<< " bytes, but did not opt in to external sorting. Aborting operation."
<< " Pass allowDiskUse:true to opt in.");
}
@@ -844,8 +840,7 @@ private:
// need to be revisited.
uasserted(16820,
str::stream()
- << "Sort exceeded memory limit of "
- << _opts.maxMemoryUsageBytes
+ << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
<< " bytes, but did not opt in to external sorting. Aborting operation."
<< " Pass allowDiskUse:true to opt in.");
}
@@ -921,8 +916,8 @@ SortedFileWriter<Key, Value>::SortedFileWriter(const SortOptions& opts,
// limits.
_file.open(_fileName.c_str(), std::ios::binary | std::ios::app | std::ios::out);
uassert(16818,
- str::stream() << "error opening file \"" << _fileName << "\": "
- << sorter::myErrnoWithDescription(),
+ str::stream() << "error opening file \"" << _fileName
+ << "\": " << sorter::myErrnoWithDescription(),
_file.good());
// The file descriptor is positioned at the end of a file when opened in append mode, but
// _file.tellp() is not initialized on all systems to reflect this. Therefore, we must also pass
@@ -985,8 +980,8 @@ void SortedFileWriter<Key, Value>::spill() {
_file.write(outBuffer, std::abs(size));
} catch (const std::exception&) {
msgasserted(16821,
- str::stream() << "error writing to file \"" << _fileName << "\": "
- << sorter::myErrnoWithDescription());
+ str::stream() << "error writing to file \"" << _fileName
+ << "\": " << sorter::myErrnoWithDescription());
}
_buffer.reset();
@@ -998,8 +993,7 @@ SortIteratorInterface<Key, Value>* SortedFileWriter<Key, Value>::done() {
std::streampos currentFileOffset = _file.tellp();
uassert(50980,
str::stream() << "error fetching current file descriptor offset in file \"" << _fileName
- << "\": "
- << sorter::myErrnoWithDescription(),
+ << "\": " << sorter::myErrnoWithDescription(),
currentFileOffset >= 0);
// In case nothing was written to disk, use _fileStartOffset because tellp() may not be
@@ -1047,4 +1041,4 @@ Sorter<Key, Value>* Sorter<Key, Value>::make(const SortOptions& opts,
return new sorter::TopKSorter<Key, Value, Comparator>(opts, comp, settings);
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/sorter/sorter.h b/src/mongo/db/sorter/sorter.h
index f504d466ac3..fccb3eef115 100644
--- a/src/mongo/db/sorter/sorter.h
+++ b/src/mongo/db/sorter/sorter.h
@@ -266,7 +266,7 @@ private:
std::streampos _fileStartOffset;
std::streampos _fileEndOffset;
};
-}
+} // namespace mongo
/**
* #include "mongo/db/sorter/sorter.cpp" and call this in a single translation
diff --git a/src/mongo/db/startup_warnings_common.cpp b/src/mongo/db/startup_warnings_common.cpp
index 31a8b6c04b8..099df94ceac 100644
--- a/src/mongo/db/startup_warnings_common.cpp
+++ b/src/mongo/db/startup_warnings_common.cpp
@@ -100,9 +100,9 @@ void logCommonStartupWarnings(const ServerGlobalParams& serverParams) {
#endif
/*
- * We did not add the message to startupWarningsLog as the user can not
- * specify a sslCAFile parameter from the shell
- */
+ * We did not add the message to startupWarningsLog as the user can not
+ * specify a sslCAFile parameter from the shell
+ */
if (sslGlobalParams.sslMode.load() != SSLParams::SSLMode_disabled &&
#ifdef MONGO_CONFIG_SSL_CERTIFICATE_SELECTORS
sslGlobalParams.sslCertificateSelector.empty() &&
diff --git a/src/mongo/db/startup_warnings_mongod.cpp b/src/mongo/db/startup_warnings_mongod.cpp
index 470fc90388e..8cffdb2088a 100644
--- a/src/mongo/db/startup_warnings_mongod.cpp
+++ b/src/mongo/db/startup_warnings_mongod.cpp
@@ -111,9 +111,9 @@ StatusWith<std::string> StartupWarningsMongod::readTransparentHugePagesParameter
opMode = line.substr(posBegin + 1, posEnd - posBegin - 1);
if (opMode.empty()) {
- return StatusWith<std::string>(
- ErrorCodes::BadValue,
- str::stream() << "invalid mode in " << filename << ": '" << line << "'");
+ return StatusWith<std::string>(ErrorCodes::BadValue,
+ str::stream() << "invalid mode in " << filename << ": '"
+ << line << "'");
}
// Check against acceptable values of opMode.
@@ -122,16 +122,12 @@ StatusWith<std::string> StartupWarningsMongod::readTransparentHugePagesParameter
ErrorCodes::BadValue,
str::stream()
<< "** WARNING: unrecognized transparent Huge Pages mode of operation in "
- << filename
- << ": '"
- << opMode
- << "''");
+ << filename << ": '" << opMode << "''");
}
} catch (const boost::filesystem::filesystem_error& err) {
return StatusWith<std::string>(ErrorCodes::UnknownError,
str::stream() << "Failed to probe \"" << err.path1().string()
- << "\": "
- << err.code().message());
+ << "\": " << err.code().message());
}
return StatusWith<std::string>(opMode);
diff --git a/src/mongo/db/stats/counters.cpp b/src/mongo/db/stats/counters.cpp
index 4c5fa73bcd4..5e667340d1a 100644
--- a/src/mongo/db/stats/counters.cpp
+++ b/src/mongo/db/stats/counters.cpp
@@ -159,4 +159,4 @@ void NetworkCounter::append(BSONObjBuilder& b) {
OpCounters globalOpCounters;
OpCounters replOpCounters;
NetworkCounter networkCounter;
-}
+} // namespace mongo
diff --git a/src/mongo/db/stats/counters.h b/src/mongo/db/stats/counters.h
index d74402c8571..d7b8a0b88ec 100644
--- a/src/mongo/db/stats/counters.h
+++ b/src/mongo/db/stats/counters.h
@@ -139,4 +139,4 @@ private:
};
extern NetworkCounter networkCounter;
-}
+} // namespace mongo
diff --git a/src/mongo/db/stats/fine_clock.h b/src/mongo/db/stats/fine_clock.h
index d01c2e74d4a..fe793ef16bc 100644
--- a/src/mongo/db/stats/fine_clock.h
+++ b/src/mongo/db/stats/fine_clock.h
@@ -69,6 +69,6 @@ public:
return diff;
}
};
-}
+} // namespace mongo
#endif // DB_STATS_FINE_CLOCK_HEADER
diff --git a/src/mongo/db/stats/timer_stats.cpp b/src/mongo/db/stats/timer_stats.cpp
index bb52e0226d7..35b1027fff1 100644
--- a/src/mongo/db/stats/timer_stats.cpp
+++ b/src/mongo/db/stats/timer_stats.cpp
@@ -69,4 +69,4 @@ BSONObj TimerStats::getReport() const {
b.appendNumber("totalMillis", t);
return b.obj();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/stats/timer_stats.h b/src/mongo/db/stats/timer_stats.h
index d09533bd537..029a238577c 100644
--- a/src/mongo/db/stats/timer_stats.h
+++ b/src/mongo/db/stats/timer_stats.h
@@ -88,4 +88,4 @@ private:
bool _recorded;
Timer _t;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/biggie/biggie_record_store.cpp b/src/mongo/db/storage/biggie/biggie_record_store.cpp
index 8f69ee8d617..8cd4ae9d893 100644
--- a/src/mongo/db/storage/biggie/biggie_record_store.cpp
+++ b/src/mongo/db/storage/biggie/biggie_record_store.cpp
@@ -55,8 +55,7 @@ Ordering allAscending = Ordering::make(BSONObj());
auto const version = KeyString::Version::V1;
BSONObj const sample = BSON(""
<< "s"
- << ""
- << (int64_t)0);
+ << "" << (int64_t)0);
std::string createKey(StringData ident, int64_t recordId) {
KeyString ks(version, BSON("" << ident << "" << recordId), allAscending);
@@ -608,7 +607,7 @@ RecordStore::SizeAdjuster::~SizeAdjuster() {
int64_t deltaDataSize = _workingCopy->dataSize() - _origDataSize;
_rs->_numRecords.fetchAndAdd(deltaNumRecords);
_rs->_dataSize.fetchAndAdd(deltaDataSize);
- RecoveryUnit::get(_opCtx)->onRollback([ rs = _rs, deltaNumRecords, deltaDataSize ]() {
+ RecoveryUnit::get(_opCtx)->onRollback([rs = _rs, deltaNumRecords, deltaDataSize]() {
invariant(rs->_numRecords.load() >= deltaNumRecords);
rs->_numRecords.fetchAndSubtract(deltaNumRecords);
rs->_dataSize.fetchAndSubtract(deltaDataSize);
diff --git a/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp b/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp
index 5ff7ca7cb75..95147d485a3 100644
--- a/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp
+++ b/src/mongo/db/storage/biggie/biggie_sorted_impl_test.cpp
@@ -57,12 +57,8 @@ public:
BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
<< "testIndex"
- << "v"
- << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
- << "ns"
- << ns
- << "unique"
- << unique);
+ << "v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
+ << "ns" << ns << "unique" << unique);
if (partial) {
auto partialBSON =
BSON(IndexDescriptor::kPartialFilterExprFieldName.toString() << BSON(""
diff --git a/src/mongo/db/storage/biggie/store.h b/src/mongo/db/storage/biggie/store.h
index 6c0c883f108..a09b5b49b63 100644
--- a/src/mongo/db/storage/biggie/store.h
+++ b/src/mongo/db/storage/biggie/store.h
@@ -153,10 +153,10 @@ public:
: _root(root), _current(current) {}
/**
- * This function traverses the tree to find the next left-most node with data. Modifies
- * '_current' to point to this node. It uses a pre-order traversal ('visit' the current
- * node itself then 'visit' the child subtrees from left to right).
- */
+ * This function traverses the tree to find the next left-most node with data. Modifies
+ * '_current' to point to this node. It uses a pre-order traversal ('visit' the current
+ * node itself then 'visit' the child subtrees from left to right).
+ */
void _findNext() {
// If 'current' is a nullptr there is no next node to go to.
if (_current == nullptr)
diff --git a/src/mongo/db/storage/biggie/store_test.cpp b/src/mongo/db/storage/biggie/store_test.cpp
index cc4c8a5d7ca..e75a81bc7c7 100644
--- a/src/mongo/db/storage/biggie/store_test.cpp
+++ b/src/mongo/db/storage/biggie/store_test.cpp
@@ -2492,5 +2492,5 @@ TEST_F(RadixStoreTest, LowerBoundEndpoint) {
ASSERT_TRUE(it == thisStore.end());
}
-} // biggie namespace
-} // mongo namespace
+} // namespace biggie
+} // namespace mongo
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.cpp b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
index 62ff6b13d93..2d7bc0b42c6 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
@@ -254,4 +254,4 @@ void BSONCollectionCatalogEntry::MetaData::parse(const BSONObj& obj) {
prefix = KVPrefix::fromBSONElement(obj["prefix"]);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.h b/src/mongo/db/storage/bson_collection_catalog_entry.h
index 15405b1942d..4f71435937c 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.h
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.h
@@ -110,4 +110,4 @@ public:
KVPrefix prefix = KVPrefix::kNotPrefixed;
};
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/capped_callback.h b/src/mongo/db/storage/capped_callback.h
index 44b11310544..cced6f61f5d 100644
--- a/src/mongo/db/storage/capped_callback.h
+++ b/src/mongo/db/storage/capped_callback.h
@@ -63,4 +63,4 @@ public:
*/
virtual void notifyCappedWaitersIfNeeded() = 0;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.h b/src/mongo/db/storage/devnull/devnull_kv_engine.h
index cbf4373476f..ddf0406bf10 100644
--- a/src/mongo/db/storage/devnull/devnull_kv_engine.h
+++ b/src/mongo/db/storage/devnull/devnull_kv_engine.h
@@ -157,4 +157,4 @@ private:
int _cachePressureForTest;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/durable_catalog_impl.cpp b/src/mongo/db/storage/durable_catalog_impl.cpp
index 499a4215fb0..69e263b7e9d 100644
--- a/src/mongo/db/storage/durable_catalog_impl.cpp
+++ b/src/mongo/db/storage/durable_catalog_impl.cpp
@@ -804,7 +804,7 @@ StatusWith<std::unique_ptr<RecordStore>> DurableCatalogImpl::createCollection(
}
CollectionUUID uuid = options.uuid.get();
- opCtx->recoveryUnit()->onRollback([ opCtx, catalog = this, nss, ident, uuid ]() {
+ opCtx->recoveryUnit()->onRollback([opCtx, catalog = this, nss, ident, uuid]() {
// Intentionally ignoring failure
catalog->_engine->getEngine()->dropIdent(opCtx, ident).ignore();
});
@@ -871,7 +871,7 @@ Status DurableCatalogImpl::dropCollection(OperationContext* opCtx, const Namespa
// This will notify the storageEngine to drop the collection only on WUOW::commit().
opCtx->recoveryUnit()->onCommit(
- [ opCtx, catalog = this, nss, uuid, ident ](boost::optional<Timestamp> commitTimestamp) {
+ [opCtx, catalog = this, nss, uuid, ident](boost::optional<Timestamp> commitTimestamp) {
StorageEngineInterface* engine = catalog->_engine;
auto storageEngine = engine->getStorageEngine();
if (storageEngine->supportsPendingDrops() && commitTimestamp) {
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
index 54ee0141b7f..6e921284506 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
@@ -126,4 +126,4 @@ std::vector<std::string> EphemeralForTestEngine::getAllIdents(OperationContext*
}
return all;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
index 9a6e30d9e5b..04480585d29 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
@@ -134,4 +134,4 @@ private:
// Notified when we write as everything is considered "journalled" since repl depends on it.
JournalListener* _journalListener = &NoOpJournalListener::instance;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
index 3ce3af87357..cea71436ecd 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
@@ -387,10 +387,8 @@ StatusWith<RecordId> EphemeralForTestRecordStore::extractAndCheckLocForOplog(con
return StatusWith<RecordId>(ErrorCodes::BadValue,
str::stream() << "attempted out-of-order oplog insert of "
- << status.getValue()
- << " (oplog last insert was "
- << _data->records.rbegin()->first
- << " )");
+ << status.getValue() << " (oplog last insert was "
+ << _data->records.rbegin()->first << " )");
}
return status;
}
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp
index 2305f72b52d..fbddf0ebd94 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.cpp
@@ -71,4 +71,4 @@ void EphemeralForTestRecoveryUnit::abortUnitOfWork() {
Status EphemeralForTestRecoveryUnit::obtainMajorityCommittedSnapshot() {
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/journal_listener.h b/src/mongo/db/storage/journal_listener.h
index 275b8ad05d7..88597adb2bc 100644
--- a/src/mongo/db/storage/journal_listener.h
+++ b/src/mongo/db/storage/journal_listener.h
@@ -70,4 +70,4 @@ public:
// As this has no state, it is de facto const and can be safely shared freely.
static NoOpJournalListener instance;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/key_string.cpp b/src/mongo/db/storage/key_string.cpp
index 6a279990fb6..8c37cc2ef52 100644
--- a/src/mongo/db/storage/key_string.cpp
+++ b/src/mongo/db/storage/key_string.cpp
@@ -1302,9 +1302,9 @@ void toBsonValue(uint8_t ctype,
break;
}
- //
- // Numerics
- //
+ //
+ // Numerics
+ //
case CType::kNumericNaN: {
auto type = typeBits->readNumeric();
@@ -1417,7 +1417,7 @@ void toBsonValue(uint8_t ctype,
case CType::kNumericNegativeSmallMagnitude:
inverted = !inverted;
isNegative = true;
- // fallthrough (format is the same as positive, but inverted)
+ // fallthrough (format is the same as positive, but inverted)
case CType::kNumericPositiveSmallMagnitude: {
const uint8_t originalType = typeBits->readNumeric();
@@ -1548,7 +1548,7 @@ void toBsonValue(uint8_t ctype,
case CType::kNumericNegative1ByteInt:
inverted = !inverted;
isNegative = true;
- // fallthrough (format is the same as positive, but inverted)
+ // fallthrough (format is the same as positive, but inverted)
case CType::kNumericPositive1ByteInt:
case CType::kNumericPositive2ByteInt:
@@ -1788,9 +1788,9 @@ void filterKeyFromKeyString(uint8_t ctype,
break;
}
- //
- // Numerics
- //
+ //
+ // Numerics
+ //
case CType::kNumericNaN: {
break;
@@ -1829,7 +1829,7 @@ void filterKeyFromKeyString(uint8_t ctype,
case CType::kNumericNegativeSmallMagnitude:
inverted = !inverted;
isNegative = true;
- // fallthrough (format is the same as positive, but inverted)
+ // fallthrough (format is the same as positive, but inverted)
case CType::kNumericPositiveSmallMagnitude: {
uint64_t encoded = readType<uint64_t>(reader, inverted);
@@ -1891,7 +1891,7 @@ void filterKeyFromKeyString(uint8_t ctype,
case CType::kNumericNegative1ByteInt:
inverted = !inverted;
isNegative = true;
- // fallthrough (format is the same as positive, but inverted)
+ // fallthrough (format is the same as positive, but inverted)
case CType::kNumericPositive1ByteInt:
case CType::kNumericPositive2ByteInt:
diff --git a/src/mongo/db/storage/key_string_test.cpp b/src/mongo/db/storage/key_string_test.cpp
index f5f8b50c47c..86055a38fd1 100644
--- a/src/mongo/db/storage/key_string_test.cpp
+++ b/src/mongo/db/storage/key_string_test.cpp
@@ -524,7 +524,6 @@ TEST_F(KeyStringTest, LotsOfNumbers3) {
for (double k = 0; k < 8; k++) {
futures.push_back(stdx::async(stdx::launch::async, [k, this] {
-
for (double i = -1100; i < 1100; i++) {
for (double j = 0; j < 52; j++) {
const auto V1 = KeyString::Version::V1;
@@ -746,10 +745,8 @@ const std::vector<BSONObj>& getInterestingElements(KeyString::Version version) {
// Something with exceptional typeBits for Decimal
elements.push_back(
BSON("" << BSON_ARRAY("" << BSONSymbol("") << Decimal128::kNegativeInfinity
- << Decimal128::kPositiveInfinity
- << Decimal128::kPositiveNaN
- << Decimal128("0.0000000")
- << Decimal128("-0E1000"))));
+ << Decimal128::kPositiveInfinity << Decimal128::kPositiveNaN
+ << Decimal128("0.0000000") << Decimal128("-0E1000"))));
}
//
diff --git a/src/mongo/db/storage/kv/durable_catalog_test.cpp b/src/mongo/db/storage/kv/durable_catalog_test.cpp
index 6daff729d9d..2d1706237fc 100644
--- a/src/mongo/db/storage/kv/durable_catalog_test.cpp
+++ b/src/mongo/db/storage/kv/durable_catalog_test.cpp
@@ -129,8 +129,7 @@ public:
bool match = (expected == actual);
if (!match) {
FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expected) << ", "
- << "Actual: "
- << dumpMultikeyPaths(actual));
+ << "Actual: " << dumpMultikeyPaths(actual));
}
ASSERT(match);
}
diff --git a/src/mongo/db/storage/kv/kv_engine.h b/src/mongo/db/storage/kv/kv_engine.h
index 75a065fee40..12054cb6ec0 100644
--- a/src/mongo/db/storage/kv/kv_engine.h
+++ b/src/mongo/db/storage/kv/kv_engine.h
@@ -439,4 +439,4 @@ protected:
*/
const int64_t kDefaultCappedSizeBytes = 4096;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
index fc53d677e1d..48310040e62 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
@@ -168,9 +168,7 @@ TEST(KVEngineTestHarness, SimpleSorted1) {
IndexDescriptor desc(collection.get(),
"",
BSON("v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion) << "ns"
- << ns.ns()
- << "key"
- << BSON("a" << 1)));
+ << ns.ns() << "key" << BSON("a" << 1)));
std::unique_ptr<SortedDataInterface> sorted;
{
MyOperationContext opCtx(engine);
@@ -706,10 +704,7 @@ DEATH_TEST_F(DurableCatalogImplTest, TerminateOnNonNumericIndexVersion, "Fatal A
"",
BSON("v"
<< "1"
- << "ns"
- << ns.ns()
- << "key"
- << BSON("a" << 1)));
+ << "ns" << ns.ns() << "key" << BSON("a" << 1)));
std::unique_ptr<SortedDataInterface> sorted;
{
MyOperationContext opCtx(engine);
diff --git a/src/mongo/db/storage/kv/kv_prefix.cpp b/src/mongo/db/storage/kv/kv_prefix.cpp
index 078446493bc..6b88dc22c3b 100644
--- a/src/mongo/db/storage/kv/kv_prefix.cpp
+++ b/src/mongo/db/storage/kv/kv_prefix.cpp
@@ -70,4 +70,4 @@ std::string KVPrefix::toString() const {
stdx::lock_guard<stdx::mutex> lk(_nextValueMutex);
return KVPrefix(_nextValue++);
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/kv/kv_prefix.h b/src/mongo/db/storage/kv/kv_prefix.h
index ee35720cbe5..6a785dc19db 100644
--- a/src/mongo/db/storage/kv/kv_prefix.h
+++ b/src/mongo/db/storage/kv/kv_prefix.h
@@ -100,4 +100,4 @@ private:
inline std::ostream& operator<<(std::ostream& s, const KVPrefix& prefix) {
return (s << prefix.toString());
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/kv/temporary_kv_record_store.h b/src/mongo/db/storage/kv/temporary_kv_record_store.h
index a992ca69cd4..f4b7c6033bd 100644
--- a/src/mongo/db/storage/kv/temporary_kv_record_store.h
+++ b/src/mongo/db/storage/kv/temporary_kv_record_store.h
@@ -53,8 +53,7 @@ public:
// Move constructor.
TemporaryKVRecordStore(TemporaryKVRecordStore&& other) noexcept
- : TemporaryRecordStore(std::move(other._rs)),
- _kvEngine(other._kvEngine) {}
+ : TemporaryRecordStore(std::move(other._rs)), _kvEngine(other._kvEngine) {}
~TemporaryKVRecordStore();
diff --git a/src/mongo/db/storage/mobile/mobile_session_pool.h b/src/mongo/db/storage/mobile/mobile_session_pool.h
index 605117e6983..08586e0ece8 100644
--- a/src/mongo/db/storage/mobile/mobile_session_pool.h
+++ b/src/mongo/db/storage/mobile/mobile_session_pool.h
@@ -102,8 +102,8 @@ public:
private:
/**
- * Gets the front element from _sessions and then pops it off the queue.
- */
+ * Gets the front element from _sessions and then pops it off the queue.
+ */
sqlite3* _popSession_inlock();
// This is used to lock the _sessions vector.
diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h
index 1ba6dfda971..ab8752a2d93 100644
--- a/src/mongo/db/storage/record_store.h
+++ b/src/mongo/db/storage/record_store.h
@@ -623,4 +623,4 @@ public:
const RecordData& recordData,
size_t* dataSize) = 0;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_harness.cpp b/src/mongo/db/storage/record_store_test_harness.cpp
index 6a72d25e954..9c1578c260b 100644
--- a/src/mongo/db/storage/record_store_test_harness.cpp
+++ b/src/mongo/db/storage/record_store_test_harness.cpp
@@ -38,8 +38,8 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
TEST(RecordStoreTestHarness, Simple1) {
const auto harnessHelper(newRecordStoreHarnessHelper());
@@ -115,7 +115,7 @@ public:
return false;
}
};
-}
+} // namespace
TEST(RecordStoreTestHarness, Simple1InsertDocWroter) {
diff --git a/src/mongo/db/storage/record_store_test_randomiter.cpp b/src/mongo/db/storage/record_store_test_randomiter.cpp
index c9c9757d827..dda51057e6d 100644
--- a/src/mongo/db/storage/record_store_test_randomiter.cpp
+++ b/src/mongo/db/storage/record_store_test_randomiter.cpp
@@ -38,10 +38,10 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::set;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Create a random iterator for empty record store.
TEST(RecordStoreTestHarness, GetRandomIteratorEmpty) {
diff --git a/src/mongo/db/storage/record_store_test_recorditer.cpp b/src/mongo/db/storage/record_store_test_recorditer.cpp
index 38a5f356aad..c50ebba023c 100644
--- a/src/mongo/db/storage/record_store_test_recorditer.cpp
+++ b/src/mongo/db/storage/record_store_test_recorditer.cpp
@@ -42,9 +42,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Insert multiple records and iterate through them in the forward direction.
// When curr() or getNext() is called on an iterator positioned at EOF,
diff --git a/src/mongo/db/storage/record_store_test_recordstore.cpp b/src/mongo/db/storage/record_store_test_recordstore.cpp
index c5a95f250c2..00ed5598017 100644
--- a/src/mongo/db/storage/record_store_test_recordstore.cpp
+++ b/src/mongo/db/storage/record_store_test_recordstore.cpp
@@ -38,8 +38,8 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
// Verify that the name of the record store is not NULL and nonempty.
TEST(RecordStoreTestHarness, RecordStoreName) {
diff --git a/src/mongo/db/storage/record_store_test_repairiter.cpp b/src/mongo/db/storage/record_store_test_repairiter.cpp
index 74aa0237cbc..cad095d0286 100644
--- a/src/mongo/db/storage/record_store_test_repairiter.cpp
+++ b/src/mongo/db/storage/record_store_test_repairiter.cpp
@@ -40,10 +40,10 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::set;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Create an iterator for repairing an empty record store.
TEST(RecordStoreTestHarness, GetIteratorForRepairEmpty) {
diff --git a/src/mongo/db/storage/record_store_test_storagesize.cpp b/src/mongo/db/storage/record_store_test_storagesize.cpp
index 743559a1079..5b54853cab8 100644
--- a/src/mongo/db/storage/record_store_test_storagesize.cpp
+++ b/src/mongo/db/storage/record_store_test_storagesize.cpp
@@ -38,9 +38,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Verify that a nonempty collection maybe takes up some space on disk.
TEST(RecordStoreTestHarness, StorageSizeNonEmpty) {
diff --git a/src/mongo/db/storage/record_store_test_touch.cpp b/src/mongo/db/storage/record_store_test_touch.cpp
index 43b52b39ba2..3f3ccc34ee1 100644
--- a/src/mongo/db/storage/record_store_test_touch.cpp
+++ b/src/mongo/db/storage/record_store_test_touch.cpp
@@ -38,9 +38,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Verify that calling touch() on an empty collection returns an OK status.
TEST(RecordStoreTestHarness, TouchEmpty) {
diff --git a/src/mongo/db/storage/record_store_test_truncate.cpp b/src/mongo/db/storage/record_store_test_truncate.cpp
index d05e3e9a117..a37c9a6681c 100644
--- a/src/mongo/db/storage/record_store_test_truncate.cpp
+++ b/src/mongo/db/storage/record_store_test_truncate.cpp
@@ -38,9 +38,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Verify that calling truncate() on an already empty collection returns an OK status.
TEST(RecordStoreTestHarness, TruncateEmpty) {
diff --git a/src/mongo/db/storage/record_store_test_updaterecord.cpp b/src/mongo/db/storage/record_store_test_updaterecord.cpp
index d6f16586cde..b07d215cfa0 100644
--- a/src/mongo/db/storage/record_store_test_updaterecord.cpp
+++ b/src/mongo/db/storage/record_store_test_updaterecord.cpp
@@ -38,9 +38,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
// Insert a record and try to update it.
TEST(RecordStoreTestHarness, UpdateRecord) {
diff --git a/src/mongo/db/storage/record_store_test_updatewithdamages.cpp b/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
index 298685c7285..9753e7d76b6 100644
--- a/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
+++ b/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
@@ -40,8 +40,8 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
// Insert a record and try to perform an in-place update on it.
TEST(RecordStoreTestHarness, UpdateWithDamages) {
diff --git a/src/mongo/db/storage/remove_saver.cpp b/src/mongo/db/storage/remove_saver.cpp
index d49e1cc5dd5..e24f33ecadd 100644
--- a/src/mongo/db/storage/remove_saver.cpp
+++ b/src/mongo/db/storage/remove_saver.cpp
@@ -45,8 +45,8 @@
using std::ios_base;
using std::ofstream;
-using std::stringstream;
using std::string;
+using std::stringstream;
namespace mongo {
diff --git a/src/mongo/db/storage/snapshot.h b/src/mongo/db/storage/snapshot.h
index 57045aae502..d169e4dada0 100644
--- a/src/mongo/db/storage/snapshot.h
+++ b/src/mongo/db/storage/snapshot.h
@@ -93,4 +93,4 @@ private:
SnapshotId _id;
T _value;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
index 895fc5560d9..20aba3337b3 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
@@ -40,12 +40,15 @@ namespace {
void testSetEndPosition_Next_Forward(bool unique, bool inclusive) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(
- unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ {key4, loc1},
+ {key5, loc1},
+ });
// Dup key on end point. Illegal for unique indexes.
if (!unique)
@@ -80,12 +83,15 @@ TEST(SortedDataInterface, SetEndPosition_Next_Forward_Standard_Exclusive) {
void testSetEndPosition_Next_Reverse(bool unique, bool inclusive) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(
- unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ {key4, loc1},
+ {key5, loc1},
+ });
// Dup key on end point. Illegal for unique indexes.
if (!unique)
@@ -220,12 +226,14 @@ TEST(SortedDataInterface, SetEndPosition_Seek_Reverse_Standard_Exclusive) {
void testSetEndPosition_Restore_Forward(bool unique) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(
- unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ {key4, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
cursor->setEndPosition(key3, false); // Should never see key3 or key4.
@@ -241,7 +249,8 @@ void testSetEndPosition_Restore_Forward(bool unique) {
removeFromIndex(opCtx,
sorted,
{
- {key2, loc1}, {key3, loc1},
+ {key2, loc1},
+ {key3, loc1},
});
cursor->restore();
@@ -257,12 +266,14 @@ TEST(SortedDataInterface, SetEndPosition_Restore_Forward_Standard) {
void testSetEndPosition_Restore_Reverse(bool unique) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(
- unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ {key4, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get(), false);
cursor->setEndPosition(key2, false); // Should never see key1 or key2.
@@ -278,7 +289,8 @@ void testSetEndPosition_Restore_Reverse(bool unique) {
removeFromIndex(opCtx,
sorted,
{
- {key2, loc1}, {key3, loc1},
+ {key2, loc1},
+ {key3, loc1},
});
cursor->restore();
@@ -302,7 +314,8 @@ void testSetEndPosition_RestoreEndCursor_Forward(bool unique) {
auto sorted = harnessHelper->newSortedDataInterface(unique,
/*partial=*/false,
{
- {key1, loc1}, {key4, loc1},
+ {key1, loc1},
+ {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get());
@@ -337,7 +350,8 @@ void testSetEndPosition_RestoreEndCursor_Reverse(bool unique) {
auto sorted = harnessHelper->newSortedDataInterface(unique,
/*partial=*/false,
{
- {key1, loc1}, {key4, loc1},
+ {key1, loc1},
+ {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), false);
@@ -370,12 +384,13 @@ TEST(SortedDataInterface, SetEndPosition_RestoreEndCursor_Reverse_Unique) {
void testSetEndPosition_Empty_Forward(bool unique, bool inclusive) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
cursor->setEndPosition(BSONObj(), inclusive);
@@ -401,12 +416,13 @@ TEST(SortedDataInterface, SetEndPosition_Empty_Forward_Standard_Exclusive) {
void testSetEndPosition_Empty_Reverse(bool unique, bool inclusive) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get(), false);
cursor->setEndPosition(BSONObj(), inclusive);
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
index 88a43ed0005..30d207d5031 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
@@ -300,7 +300,8 @@ void testSaveAndRestorePositionSeesNewInserts(bool forward, bool unique) {
auto sorted = harnessHelper->newSortedDataInterface(unique,
/*partial=*/false,
{
- {key1, loc1}, {key3, loc1},
+ {key1, loc1},
+ {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -335,7 +336,8 @@ void testSaveAndRestorePositionSeesNewInsertsAfterRemove(bool forward, bool uniq
auto sorted = harnessHelper->newSortedDataInterface(unique,
/*partial=*/false,
{
- {key1, loc1}, {key3, loc1},
+ {key1, loc1},
+ {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -414,12 +416,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterEOF_Reverse_S
TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_Forward) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(/*unique*/ false,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(/*unique*/ false,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
@@ -497,12 +500,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionUniqueIndexWontReturnDupKeys_For
TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_Reverse) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(/*unique*/ false,
- /*partial=*/false,
- {
- {key0, loc1}, {key1, loc1}, {key2, loc2},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(/*unique*/ false,
+ /*partial=*/false,
+ {
+ {key0, loc1},
+ {key1, loc1},
+ {key2, loc2},
+ });
auto cursor = sorted->newCursor(opCtx.get(), false);
@@ -580,12 +584,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionUniqueIndexWontReturnDupKeys_Rev
TEST(SortedDataInterface, SaveUnpositionedAndRestore) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(/*unique=*/false,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(/*unique=*/false,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
index fa608652d8f..4a0584e0559 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
@@ -40,12 +40,13 @@ namespace {
void testSeekExact_Hit(bool unique, bool forward) {
const auto harnessHelper = newSortedDataInterfaceHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted =
- harnessHelper->newSortedDataInterface(unique,
- /*partial=*/false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ /*partial=*/false,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -111,7 +112,10 @@ TEST(SortedDataInterface, SeekExact_HitWithDups_Forward) {
/*unique=*/false,
/*partial=*/false,
{
- {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
+ {key1, loc1},
+ {key2, loc1},
+ {key2, loc2},
+ {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get());
@@ -131,7 +135,10 @@ TEST(SortedDataInterface, SeekExact_HitWithDups_Reverse) {
/*unique=*/false,
/*partial=*/false,
{
- {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
+ {key1, loc1},
+ {key2, loc1},
+ {key2, loc2},
+ {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), false);
diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h
index d8f6f764e27..a1a68e421dc 100644
--- a/src/mongo/db/storage/storage_engine.h
+++ b/src/mongo/db/storage/storage_engine.h
@@ -151,8 +151,8 @@ public:
};
/**
- * The destructor should only be called if we are tearing down but not exiting the process.
- */
+ * The destructor should only be called if we are tearing down but not exiting the process.
+ */
virtual ~StorageEngine() {}
/**
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index f485102aa23..72b53bd7fb9 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -92,8 +92,8 @@ void StorageEngineImpl::loadCatalog(OperationContext* opCtx) {
if (status.code() == ErrorCodes::DataModifiedByRepair) {
warning() << "Catalog data modified by repair: " << status.reason();
- repairObserver->onModification(str::stream() << "DurableCatalog repaired: "
- << status.reason());
+ repairObserver->onModification(str::stream()
+ << "DurableCatalog repaired: " << status.reason());
} else {
fassertNoTrace(50926, status);
}
@@ -209,8 +209,8 @@ void StorageEngineImpl::loadCatalog(OperationContext* opCtx) {
if (_options.forRepair) {
StorageRepairObserver::get(getGlobalServiceContext())
- ->onModification(str::stream() << "Collection " << nss << " dropped: "
- << status.reason());
+ ->onModification(str::stream() << "Collection " << nss
+ << " dropped: " << status.reason());
}
wuow.commit();
continue;
@@ -298,8 +298,8 @@ Status StorageEngineImpl::_recoverOrphanedCollection(OperationContext* opCtx,
}
if (dataModified) {
StorageRepairObserver::get(getGlobalServiceContext())
- ->onModification(str::stream() << "Collection " << collectionName << " recovered: "
- << status.reason());
+ ->onModification(str::stream() << "Collection " << collectionName
+ << " recovered: " << status.reason());
}
wuow.commit();
return Status::OK();
@@ -397,8 +397,7 @@ StorageEngineImpl::reconcileCatalogAndIdents(OperationContext* opCtx) {
if (engineIdents.find(identForColl) == engineIdents.end()) {
return {ErrorCodes::UnrecoverableRollbackError,
str::stream() << "Expected collection does not exist. Collection: " << coll
- << " Ident: "
- << identForColl};
+ << " Ident: " << identForColl};
}
}
}
@@ -494,8 +493,8 @@ StorageEngineImpl::reconcileCatalogAndIdents(OperationContext* opCtx) {
for (auto&& indexName : indexesToDrop) {
invariant(metaData.eraseIndex(indexName),
- str::stream() << "Index is missing. Collection: " << coll << " Index: "
- << indexName);
+ str::stream()
+ << "Index is missing. Collection: " << coll << " Index: " << indexName);
}
if (indexesToDrop.size() > 0) {
WriteUnitOfWork wuow(opCtx);
@@ -683,8 +682,8 @@ Status StorageEngineImpl::repairRecordStore(OperationContext* opCtx, const Names
}
if (dataModified) {
- repairObserver->onModification(str::stream() << "Collection " << nss << ": "
- << status.reason());
+ repairObserver->onModification(str::stream()
+ << "Collection " << nss << ": " << status.reason());
}
// After repairing, re-initialize the collection with a valid RecordStore.
@@ -819,8 +818,8 @@ void StorageEngineImpl::_dumpCatalog(OperationContext* opCtx) {
while (rec) {
// This should only be called by a parent that's done an appropriate `shouldLog` check. Do
// not duplicate the log level policy.
- LOG_FOR_RECOVERY(kCatalogLogLevel) << "\tId: " << rec->id
- << " Value: " << rec->data.toBson();
+ LOG_FOR_RECOVERY(kCatalogLogLevel)
+ << "\tId: " << rec->id << " Value: " << rec->data.toBson();
rec = cursor->next();
}
opCtx->recoveryUnit()->abandonSnapshot();
diff --git a/src/mongo/db/storage/storage_engine_init.cpp b/src/mongo/db/storage/storage_engine_init.cpp
index fd7701b8c8d..7418219ec8c 100644
--- a/src/mongo/db/storage/storage_engine_init.cpp
+++ b/src/mongo/db/storage/storage_engine_init.cpp
@@ -106,14 +106,12 @@ void initializeStorageEngine(ServiceContext* service, const StorageEngineInitFla
getFactoryForStorageEngine(service, storageGlobalParams.engine);
if (factory) {
uassert(28662,
- str::stream() << "Cannot start server. Detected data files in " << dbpath
- << " created by"
- << " the '"
- << *existingStorageEngine
- << "' storage engine, but the"
- << " specified storage engine was '"
- << factory->getCanonicalName()
- << "'.",
+ str::stream()
+ << "Cannot start server. Detected data files in " << dbpath
+ << " created by"
+ << " the '" << *existingStorageEngine << "' storage engine, but the"
+ << " specified storage engine was '" << factory->getCanonicalName()
+ << "'.",
factory->getCanonicalName() == *existingStorageEngine);
}
} else {
@@ -156,8 +154,7 @@ void initializeStorageEngine(ServiceContext* service, const StorageEngineInitFla
uassert(34368,
str::stream()
<< "Server was started in read-only mode, but the configured storage engine, "
- << storageGlobalParams.engine
- << ", does not support read-only operation",
+ << storageGlobalParams.engine << ", does not support read-only operation",
factory->supportsReadOnly());
}
@@ -223,9 +220,7 @@ void createLockFile(ServiceContext* service) {
} catch (const std::exception& ex) {
uassert(28596,
str::stream() << "Unable to determine status of lock file in the data directory "
- << storageGlobalParams.dbpath
- << ": "
- << ex.what(),
+ << storageGlobalParams.dbpath << ": " << ex.what(),
false);
}
const bool wasUnclean = lockFile->createdByUncleanShutdown();
diff --git a/src/mongo/db/storage/storage_engine_interface.h b/src/mongo/db/storage/storage_engine_interface.h
index db7201c1492..77703c8aa6f 100644
--- a/src/mongo/db/storage/storage_engine_interface.h
+++ b/src/mongo/db/storage/storage_engine_interface.h
@@ -46,4 +46,4 @@ public:
StringData ident) = 0;
virtual DurableCatalog* getCatalog() = 0;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine_lock_file_posix.cpp b/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
index b39b0503547..c0398eddec3 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
@@ -67,8 +67,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
int fd = ::open(dir.string().c_str(), O_RDONLY); // DO NOT THROW OR ASSERT BEFORE CLOSING
massert(40387,
- str::stream() << "Couldn't open directory '" << dir.string() << "' for flushing: "
- << errnoWithDescription(),
+ str::stream() << "Couldn't open directory '" << dir.string()
+ << "' for flushing: " << errnoWithDescription(),
fd >= 0);
if (fsync(fd) != 0) {
int e = errno;
@@ -85,8 +85,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
} else {
close(fd);
massert(40388,
- str::stream() << "Couldn't fsync directory '" << dir.string() << "': "
- << errnoWithDescription(e),
+ str::stream() << "Couldn't fsync directory '" << dir.string()
+ << "': " << errnoWithDescription(e),
false);
}
}
@@ -136,8 +136,7 @@ Status StorageEngineLockFile::open() {
} catch (const std::exception& ex) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Unable to check existence of data directory " << _dbpath
- << ": "
- << ex.what());
+ << ": " << ex.what());
}
// Use file permissions 644
@@ -153,13 +152,11 @@ Status StorageEngineLockFile::open() {
}
return Status(ErrorCodes::DBPathInUse,
str::stream() << "Unable to create/open the lock file: " << _filespec << " ("
- << errnoWithDescription(errorcode)
- << ")."
+ << errnoWithDescription(errorcode) << ")."
<< " Ensure the user executing mongod is the owner of the lock "
"file and has the appropriate permissions. Also make sure "
"that another mongod instance is not already running on the "
- << _dbpath
- << " directory");
+ << _dbpath << " directory");
}
int ret = ::flock(lockFile, LOCK_EX | LOCK_NB);
if (ret != 0) {
@@ -167,11 +164,9 @@ Status StorageEngineLockFile::open() {
::close(lockFile);
return Status(ErrorCodes::DBPathInUse,
str::stream() << "Unable to lock the lock file: " << _filespec << " ("
- << errnoWithDescription(errorcode)
- << ")."
+ << errnoWithDescription(errorcode) << ")."
<< " Another mongod instance is already running on the "
- << _dbpath
- << " directory");
+ << _dbpath << " directory");
}
_lockFileHandle->_fd = lockFile;
return Status::OK();
@@ -197,9 +192,7 @@ Status StorageEngineLockFile::writeString(StringData str) {
int errorcode = errno;
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write string to file (ftruncate failed): "
- << _filespec
- << ' '
- << errnoWithDescription(errorcode));
+ << _filespec << ' ' << errnoWithDescription(errorcode));
}
int bytesWritten = ::write(_lockFileHandle->_fd, str.rawData(), str.size());
@@ -207,8 +200,7 @@ Status StorageEngineLockFile::writeString(StringData str) {
int errorcode = errno;
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write string " << str << " to file: " << _filespec
- << ' '
- << errnoWithDescription(errorcode));
+ << ' ' << errnoWithDescription(errorcode));
} else if (bytesWritten == 0) {
return Status(ErrorCodes::FileStreamFailed,
@@ -220,9 +212,7 @@ Status StorageEngineLockFile::writeString(StringData str) {
int errorcode = errno;
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write process id " << str
- << " to file (fsync failed): "
- << _filespec
- << ' '
+ << " to file (fsync failed): " << _filespec << ' '
<< errnoWithDescription(errorcode));
}
diff --git a/src/mongo/db/storage/storage_engine_lock_file_test.cpp b/src/mongo/db/storage/storage_engine_lock_file_test.cpp
index 153040ef874..df4967e2d41 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_test.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_test.cpp
@@ -46,8 +46,8 @@
namespace {
-using std::string;
using mongo::unittest::TempDir;
+using std::string;
using namespace mongo;
diff --git a/src/mongo/db/storage/storage_engine_lock_file_windows.cpp b/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
index 2be6f11bb03..4055318d1d8 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
@@ -108,8 +108,7 @@ Status StorageEngineLockFile::open() {
} catch (const std::exception& ex) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Unable to check existence of data directory " << _dbpath
- << ": "
- << ex.what());
+ << ": " << ex.what());
}
HANDLE lockFileHandle = CreateFileW(toNativeString(_filespec.c_str()).c_str(),
@@ -130,13 +129,11 @@ Status StorageEngineLockFile::open() {
}
return Status(ErrorCodes::DBPathInUse,
str::stream() << "Unable to create/open the lock file: " << _filespec << " ("
- << errnoWithDescription(errorcode)
- << ")."
+ << errnoWithDescription(errorcode) << ")."
<< " Ensure the user executing mongod is the owner of the lock "
"file and has the appropriate permissions. Also make sure "
"that another mongod instance is not already running on the "
- << _dbpath
- << " directory");
+ << _dbpath << " directory");
}
_lockFileHandle->_handle = lockFileHandle;
return Status::OK();
@@ -171,8 +168,7 @@ Status StorageEngineLockFile::writeString(StringData str) {
int errorcode = GetLastError();
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write string " << str << " to file: " << _filespec
- << ' '
- << errnoWithDescription(errorcode));
+ << ' ' << errnoWithDescription(errorcode));
} else if (bytesWritten == 0) {
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write string " << str << " to file: " << _filespec
diff --git a/src/mongo/db/storage/storage_engine_metadata.cpp b/src/mongo/db/storage/storage_engine_metadata.cpp
index 62fecc4c102..ecf401f3ee9 100644
--- a/src/mongo/db/storage/storage_engine_metadata.cpp
+++ b/src/mongo/db/storage/storage_engine_metadata.cpp
@@ -142,13 +142,13 @@ Status StorageEngineMetadata::read() {
boost::uintmax_t fileSize = boost::filesystem::file_size(metadataPath);
if (fileSize == 0) {
return Status(ErrorCodes::InvalidPath,
- str::stream() << "Metadata file " << metadataPath.string()
- << " cannot be empty.");
+ str::stream()
+ << "Metadata file " << metadataPath.string() << " cannot be empty.");
}
if (fileSize == static_cast<boost::uintmax_t>(-1)) {
return Status(ErrorCodes::InvalidPath,
- str::stream() << "Unable to determine size of metadata file "
- << metadataPath.string());
+ str::stream()
+ << "Unable to determine size of metadata file " << metadataPath.string());
}
std::vector<char> buffer(fileSize);
@@ -156,23 +156,21 @@ Status StorageEngineMetadata::read() {
std::ifstream ifs(metadataPath.c_str(), std::ios_base::in | std::ios_base::binary);
if (!ifs) {
return Status(ErrorCodes::FileNotOpen,
- str::stream() << "Failed to read metadata from "
- << metadataPath.string());
+ str::stream()
+ << "Failed to read metadata from " << metadataPath.string());
}
// Read BSON from file
ifs.read(&buffer[0], buffer.size());
if (!ifs) {
return Status(ErrorCodes::FileStreamFailed,
- str::stream() << "Unable to read BSON data from "
- << metadataPath.string());
+ str::stream()
+ << "Unable to read BSON data from " << metadataPath.string());
}
} catch (const std::exception& ex) {
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unexpected error reading BSON data from "
- << metadataPath.string()
- << ": "
- << ex.what());
+ << metadataPath.string() << ": " << ex.what());
}
ConstDataRange cdr(&buffer[0], buffer.size());
@@ -232,8 +230,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
int fd = ::open(dir.string().c_str(), O_RDONLY); // DO NOT THROW OR ASSERT BEFORE CLOSING
massert(13650,
- str::stream() << "Couldn't open directory '" << dir.string() << "' for flushing: "
- << errnoWithDescription(),
+ str::stream() << "Couldn't open directory '" << dir.string()
+ << "' for flushing: " << errnoWithDescription(),
fd >= 0);
if (fsync(fd) != 0) {
int e = errno;
@@ -250,8 +248,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
} else {
close(fd);
massert(13651,
- str::stream() << "Couldn't fsync directory '" << dir.string() << "': "
- << errnoWithDescription(e),
+ str::stream() << "Couldn't fsync directory '" << dir.string()
+ << "': " << errnoWithDescription(e),
false);
}
}
@@ -270,9 +268,9 @@ Status StorageEngineMetadata::write() const {
{
std::ofstream ofs(metadataTempPath.c_str(), std::ios_base::out | std::ios_base::binary);
if (!ofs) {
- return Status(
- ErrorCodes::FileNotOpen,
- str::stream() << "Failed to write metadata to " << metadataTempPath.string() << ": "
+ return Status(ErrorCodes::FileNotOpen,
+ str::stream()
+ << "Failed to write metadata to " << metadataTempPath.string() << ": "
<< errnoWithDescription());
}
@@ -281,10 +279,9 @@ Status StorageEngineMetadata::write() const {
ofs.write(obj.objdata(), obj.objsize());
if (!ofs) {
return Status(ErrorCodes::OperationFailed,
- str::stream() << "Failed to write BSON data to "
- << metadataTempPath.string()
- << ": "
- << errnoWithDescription());
+ str::stream()
+ << "Failed to write BSON data to " << metadataTempPath.string()
+ << ": " << errnoWithDescription());
}
}
@@ -304,11 +301,8 @@ Status StorageEngineMetadata::write() const {
} catch (const std::exception& ex) {
return Status(ErrorCodes::FileRenameFailed,
str::stream() << "Unexpected error while renaming temporary metadata file "
- << metadataTempPath.string()
- << " to "
- << metadataPath.string()
- << ": "
- << ex.what());
+ << metadataTempPath.string() << " to " << metadataPath.string()
+ << ": " << ex.what());
}
return Status::OK();
@@ -324,21 +318,16 @@ Status StorageEngineMetadata::validateStorageEngineOption<bool>(
ErrorCodes::InvalidOptions,
str::stream()
<< "Requested option conflicts with the current storage engine option for "
- << fieldName
- << "; you requested "
- << (expectedValue ? "true" : "false")
+ << fieldName << "; you requested " << (expectedValue ? "true" : "false")
<< " but the current server storage is implicitly set to "
- << (*defaultValue ? "true" : "false")
- << " and cannot be changed");
+ << (*defaultValue ? "true" : "false") << " and cannot be changed");
}
return Status::OK();
}
if (!element.isBoolean()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Expected boolean field " << fieldName << " but got "
- << typeName(element.type())
- << " instead: "
- << element);
+ << typeName(element.type()) << " instead: " << element);
}
if (element.boolean() == expectedValue) {
return Status::OK();
@@ -346,12 +335,9 @@ Status StorageEngineMetadata::validateStorageEngineOption<bool>(
return Status(
ErrorCodes::InvalidOptions,
str::stream() << "Requested option conflicts with current storage engine option for "
- << fieldName
- << "; you requested "
- << (expectedValue ? "true" : "false")
+ << fieldName << "; you requested " << (expectedValue ? "true" : "false")
<< " but the current server storage is already set to "
- << (element.boolean() ? "true" : "false")
- << " and cannot be changed");
+ << (element.boolean() ? "true" : "false") << " and cannot be changed");
}
} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine_metadata_test.cpp b/src/mongo/db/storage/storage_engine_metadata_test.cpp
index 0e1e59dc5c2..57e386644d6 100644
--- a/src/mongo/db/storage/storage_engine_metadata_test.cpp
+++ b/src/mongo/db/storage/storage_engine_metadata_test.cpp
@@ -44,8 +44,8 @@
namespace {
-using std::string;
using mongo::unittest::TempDir;
+using std::string;
using namespace mongo;
diff --git a/src/mongo/db/storage/storage_file_util.cpp b/src/mongo/db/storage/storage_file_util.cpp
index c267b292ee1..dd47a85642d 100644
--- a/src/mongo/db/storage/storage_file_util.cpp
+++ b/src/mongo/db/storage/storage_file_util.cpp
@@ -72,8 +72,8 @@ Status fsyncParentDirectory(const boost::filesystem::path& file) {
int fd = ::open(dir.string().c_str(), O_RDONLY);
if (fd < 0) {
return {ErrorCodes::FileOpenFailed,
- str::stream() << "Failed to open directory " << dir.string() << " for flushing: "
- << errnoWithDescription()};
+ str::stream() << "Failed to open directory " << dir.string()
+ << " for flushing: " << errnoWithDescription()};
}
if (fsync(fd) != 0) {
int e = errno;
@@ -82,8 +82,8 @@ Status fsyncParentDirectory(const boost::filesystem::path& file) {
} else {
close(fd);
return {ErrorCodes::OperationFailed,
- str::stream() << "Failed to fsync directory '" << dir.string() << "': "
- << errnoWithDescription(e)};
+ str::stream() << "Failed to fsync directory '" << dir.string()
+ << "': " << errnoWithDescription(e)};
}
}
close(fd);
@@ -102,9 +102,7 @@ Status fsyncRename(const boost::filesystem::path& source, const boost::filesyste
if (ec) {
return {ErrorCodes::FileRenameFailed,
str::stream() << "Error renaming data file from " << source.string() << " to "
- << dest.string()
- << ": "
- << ec.message()};
+ << dest.string() << ": " << ec.message()};
}
auto status = fsyncFile(dest);
if (!status.isOK()) {
diff --git a/src/mongo/db/storage/storage_init.cpp b/src/mongo/db/storage/storage_init.cpp
index fb1d025289d..1da860e1e0d 100644
--- a/src/mongo/db/storage/storage_init.cpp
+++ b/src/mongo/db/storage/storage_init.cpp
@@ -63,17 +63,12 @@ public:
<< (oldestRequiredTimestampForCrashRecovery
? *oldestRequiredTimestampForCrashRecovery
: Timestamp())
- << "supportsPendingDrops"
- << engine->supportsPendingDrops()
+ << "supportsPendingDrops" << engine->supportsPendingDrops()
<< "dropPendingIdents"
<< static_cast<long long>(engine->getDropPendingIdents().size())
- << "supportsSnapshotReadConcern"
- << engine->supportsReadConcernSnapshot()
- << "readOnly"
- << storageGlobalParams.readOnly
- << "persistent"
- << !engine->isEphemeral()
- << "backupCursorOpen"
+ << "supportsSnapshotReadConcern" << engine->supportsReadConcernSnapshot()
+ << "readOnly" << storageGlobalParams.readOnly << "persistent"
+ << !engine->isEphemeral() << "backupCursorOpen"
<< backupCursorHooks->isBackupCursorOpen());
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h b/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h
index abadc810e6a..2c321725173 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h
@@ -77,4 +77,4 @@ protected:
WT_CURSOR* _cursor = nullptr; // Owned
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index 502cc540aab..ac0a40b1958 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -145,8 +145,7 @@ StatusWith<std::string> WiredTigerIndex::parseIndexOptions(const BSONObj& option
// Return error on first unrecognized field.
return StatusWith<std::string>(ErrorCodes::InvalidOptions,
str::stream() << '\'' << elem.fieldNameStringData()
- << '\''
- << " is not a supported option.");
+ << '\'' << " is not a supported option.");
}
}
return StatusWith<std::string>(ss.str());
@@ -274,9 +273,7 @@ WiredTigerIndex::WiredTigerIndex(OperationContext* ctx,
Status indexVersionStatus(
ErrorCodes::UnsupportedFormat,
str::stream() << versionStatus.reason() << " Index: {name: " << desc->indexName()
- << ", ns: "
- << desc->parentNS()
- << "} - version too new for this mongod."
+ << ", ns: " << desc->parentNS() << "} - version too new for this mongod."
<< " See http://dochub.mongodb.org/core/4.2-downgrade-index for detailed"
<< " instructions on how to handle this error.");
fassertFailedWithStatusNoTrace(28579, indexVersionStatus);
@@ -346,10 +343,10 @@ void WiredTigerIndex::fullValidate(OperationContext* opCtx,
warning() << msg;
fullResults->warnings.push_back(msg);
} else if (err) {
- std::string msg = str::stream() << "verify() returned " << wiredtiger_strerror(err)
- << ". "
- << "This indicates structural damage. "
- << "Not examining individual index entries.";
+ std::string msg = str::stream()
+ << "verify() returned " << wiredtiger_strerror(err) << ". "
+ << "This indicates structural damage. "
+ << "Not examining individual index entries.";
error() << msg;
fullResults->errors.push_back(msg);
fullResults->valid = false;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
index e4a6d84c447..854c8799b67 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
@@ -291,4 +291,4 @@ public:
bool dupsAllowed) override;
};
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
index b84d3e812b1..f53623761d1 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
@@ -85,15 +85,9 @@ void _testValidateMetadata(const StorageEngine::Factory* factory,
if (expectedCode != status.code()) {
FAIL(str::stream()
<< "Unexpected StorageEngine::Factory::validateMetadata result. Expected: "
- << ErrorCodes::errorString(expectedCode)
- << " but got "
- << status.toString()
- << " instead. metadataOptions: "
- << metadataOptions
- << "; directoryPerDB: "
- << directoryPerDB
- << "; directoryForIndexes: "
- << directoryForIndexes);
+ << ErrorCodes::errorString(expectedCode) << " but got " << status.toString()
+ << " instead. metadataOptions: " << metadataOptions << "; directoryPerDB: "
+ << directoryPerDB << "; directoryForIndexes: " << directoryForIndexes);
}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 2b463a302d9..47d2e14e9dd 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -1756,8 +1756,7 @@ StatusWith<Timestamp> WiredTigerKVEngine::recoverToStableTimestamp(OperationCont
str::stream()
<< "No stable timestamp available to recover to. Initial data timestamp: "
<< initialDataTS.toString()
- << ", Stable timestamp: "
- << stableTS.toString());
+ << ", Stable timestamp: " << stableTS.toString());
}
LOG_FOR_ROLLBACK(2) << "WiredTiger::RecoverToStableTimestamp syncing size storer to disk.";
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index a3fee8cde25..b80f0698a59 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -473,4 +473,4 @@ private:
// timestamp. Provided by replication layer because WT does not persist timestamps.
AtomicWord<std::uint64_t> _initialDataTimestamp;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp
index 6e4cbf157ab..90292778505 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp
@@ -50,4 +50,4 @@ MONGO_STARTUP_OPTIONS_STORE(WiredTigerOptions)(InitializerContext* context) {
}
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
index 5cda75b3c2f..dcbbbf34a65 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
@@ -56,9 +56,8 @@ Status applyMaxCacheOverflowSizeGBParameter(WiredTigerMaxCacheOverflowSizeGBPara
int ret = param._data.second->reconfigure(
fmt::format("cache_overflow=(file_max={}M)", valueMB).c_str());
if (ret != 0) {
- string result =
- (str::stream() << "WiredTiger reconfiguration failed with error code (" << ret << "): "
- << wiredtiger_strerror(ret));
+ string result = (str::stream() << "WiredTiger reconfiguration failed with error code ("
+ << ret << "): " << wiredtiger_strerror(ret));
error() << result;
return Status(ErrorCodes::BadValue, result);
@@ -91,9 +90,8 @@ Status WiredTigerEngineRuntimeConfigParameter::setFromString(const std::string&
invariant(_data.second);
int ret = _data.second->reconfigure(str.c_str());
if (ret != 0) {
- string result =
- (str::stream() << "WiredTiger reconfiguration failed with error code (" << ret << "): "
- << wiredtiger_strerror(ret));
+ string result = (str::stream() << "WiredTiger reconfiguration failed with error code ("
+ << ret << "): " << wiredtiger_strerror(ret));
error() << result;
return Status(ErrorCodes::BadValue, result);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp
index 98d91659b4e..e5c44e02365 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp
@@ -77,12 +77,8 @@ public:
BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
<< "testIndex"
- << "v"
- << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
- << "ns"
- << ns
- << "unique"
- << unique);
+ << "v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
+ << "ns" << ns << "unique" << unique);
if (partial) {
auto partialBSON =
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp
index 76c8e5121a2..ac09e8d7574 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp
@@ -65,9 +65,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
class PrefixedWiredTigerHarnessHelper final : public RecordStoreHarnessHelper {
public:
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index 627a21dec18..f05ce0457f8 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -71,8 +71,8 @@
namespace mongo {
using namespace fmt::literals;
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
namespace {
@@ -480,8 +480,7 @@ StatusWith<std::string> WiredTigerRecordStore::parseOptionsField(const BSONObj o
// Return error on first unrecognized field.
return StatusWith<std::string>(ErrorCodes::InvalidOptions,
str::stream() << '\'' << elem.fieldNameStringData()
- << '\''
- << " is not a supported option.");
+ << '\'' << " is not a supported option.");
}
}
return StatusWith<std::string>(ss.str());
@@ -644,10 +643,11 @@ WiredTigerRecordStore::WiredTigerRecordStore(WiredTigerKVEngine* kvEngine,
_engineName(params.engineName),
_isCapped(params.isCapped),
_isEphemeral(params.isEphemeral),
- _isLogged(!isTemp() && WiredTigerUtil::useTableLogging(
- NamespaceString(ns()),
- getGlobalReplSettings().usingReplSets() ||
- repl::ReplSettings::shouldRecoverFromOplogAsStandalone())),
+ _isLogged(!isTemp() &&
+ WiredTigerUtil::useTableLogging(
+ NamespaceString(ns()),
+ getGlobalReplSettings().usingReplSets() ||
+ repl::ReplSettings::shouldRecoverFromOplogAsStandalone())),
_isOplog(NamespaceString::oplog(params.ns)),
_cappedMaxSize(params.cappedMaxSize),
_cappedMaxSizeSlack(std::min(params.cappedMaxSize / 10, int64_t(16 * 1024 * 1024))),
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 93e0f221432..b9c323678b3 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -529,4 +529,4 @@ MONGO_FAIL_POINT_DECLARE(WTWriteConflictExceptionForReads);
// will not be considered durable until deactivated. It is unspecified whether writes that commit
// before activation will become visible while active.
MONGO_FAIL_POINT_DECLARE(WTPausePrimaryOplogDurabilityLoop);
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
index 4c7dcf641a6..ebc51a57ef0 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
@@ -57,9 +57,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
TEST(WiredTigerRecordStoreTest, GenerateCreateStringEmptyDocument) {
BSONObj spec = fromjson("{}");
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index 79126850f5c..f049f5f57a0 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -420,8 +420,7 @@ void WiredTigerRecoveryUnit::_txnClose(bool commit) {
str::stream() << "Cannot have both a _lastTimestampSet and a "
"_commitTimestamp. _lastTimestampSet: "
<< _lastTimestampSet->toString()
- << ". _commitTimestamp: "
- << _commitTimestamp.toString());
+ << ". _commitTimestamp: " << _commitTimestamp.toString());
// We reset the _lastTimestampSet between transactions. Since it is legal for one
// transaction on a RecoveryUnit to call setTimestamp() and another to call
@@ -658,8 +657,7 @@ Status WiredTigerRecoveryUnit::setTimestamp(Timestamp timestamp) {
invariant(_prepareTimestamp.isNull());
invariant(_commitTimestamp.isNull(),
str::stream() << "Commit timestamp set to " << _commitTimestamp.toString()
- << " and trying to set WUOW timestamp to "
- << timestamp.toString());
+ << " and trying to set WUOW timestamp to " << timestamp.toString());
invariant(_readAtTimestamp.isNull() || timestamp >= _readAtTimestamp,
str::stream() << "future commit timestamp " << timestamp.toString()
<< " cannot be older than read timestamp "
@@ -686,12 +684,10 @@ void WiredTigerRecoveryUnit::setCommitTimestamp(Timestamp timestamp) {
invariant(!_inUnitOfWork() || !_prepareTimestamp.isNull(), toString(_state));
invariant(_commitTimestamp.isNull(),
str::stream() << "Commit timestamp set to " << _commitTimestamp.toString()
- << " and trying to set it to "
- << timestamp.toString());
+ << " and trying to set it to " << timestamp.toString());
invariant(!_lastTimestampSet,
str::stream() << "Last timestamp set is " << _lastTimestampSet->toString()
- << " and trying to set commit timestamp to "
- << timestamp.toString());
+ << " and trying to set commit timestamp to " << timestamp.toString());
invariant(!_isTimestamped);
_commitTimestamp = timestamp;
@@ -705,9 +701,7 @@ void WiredTigerRecoveryUnit::setDurableTimestamp(Timestamp timestamp) {
invariant(
_durableTimestamp.isNull(),
str::stream() << "Trying to reset durable timestamp when it was already set. wasSetTo: "
- << _durableTimestamp.toString()
- << " setTo: "
- << timestamp.toString());
+ << _durableTimestamp.toString() << " setTo: " << timestamp.toString());
_durableTimestamp = timestamp;
}
@@ -731,16 +725,13 @@ void WiredTigerRecoveryUnit::setPrepareTimestamp(Timestamp timestamp) {
invariant(_inUnitOfWork(), toString(_state));
invariant(_prepareTimestamp.isNull(),
str::stream() << "Trying to set prepare timestamp to " << timestamp.toString()
- << ". It's already set to "
- << _prepareTimestamp.toString());
+ << ". It's already set to " << _prepareTimestamp.toString());
invariant(_commitTimestamp.isNull(),
str::stream() << "Commit timestamp is " << _commitTimestamp.toString()
- << " and trying to set prepare timestamp to "
- << timestamp.toString());
+ << " and trying to set prepare timestamp to " << timestamp.toString());
invariant(!_lastTimestampSet,
str::stream() << "Last timestamp set is " << _lastTimestampSet->toString()
- << " and trying to set prepare timestamp to "
- << timestamp.toString());
+ << " and trying to set prepare timestamp to " << timestamp.toString());
_prepareTimestamp = timestamp;
}
@@ -780,8 +771,7 @@ void WiredTigerRecoveryUnit::setRoundUpPreparedTimestamps(bool value) {
// This cannot be called after WiredTigerRecoveryUnit::_txnOpen.
invariant(!_isActive(),
str::stream() << "Can't change round up prepared timestamps flag "
- << "when current state is "
- << toString(_state));
+ << "when current state is " << toString(_state));
_roundUpPreparedTimestamps =
(value) ? RoundUpPreparedTimestamps::kRound : RoundUpPreparedTimestamps::kNoRound;
}
@@ -794,8 +784,7 @@ void WiredTigerRecoveryUnit::setTimestampReadSource(ReadSource readSource,
invariant(!_isActive() || _timestampReadSource == readSource,
str::stream() << "Current state: " << toString(_state)
<< ". Invalid internal state while setting timestamp read source: "
- << static_cast<int>(readSource)
- << ", provided timestamp: "
+ << static_cast<int>(readSource) << ", provided timestamp: "
<< (provided ? provided->toString() : "none"));
invariant(!provided == (readSource != ReadSource::kProvided));
invariant(!(provided && provided->isNull()));
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
index df5e5935b8f..db32a031cb5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
@@ -60,7 +60,7 @@ public:
false, // .ephemeral
false, // .repair
false // .readOnly
- ) {
+ ) {
repl::ReplicationCoordinator::set(
getGlobalServiceContext(),
std::unique_ptr<repl::ReplicationCoordinator>(new repl::ReplicationCoordinatorMock(
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
index ace8580f465..afb2da1fbed 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
@@ -359,4 +359,4 @@ typedef std::unique_ptr<WiredTigerSession,
UniqueWiredTigerSession;
extern const std::string kWTRepairMsg;
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
index b9096b29279..5db2a4e72bc 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
@@ -103,4 +103,4 @@ private:
mutable stdx::mutex _bufferMutex; // Guards _buffer
Buffer _buffer;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
index c5f2fc17651..75c9777a502 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
@@ -98,4 +98,4 @@ private:
mutable stdx::mutex _localSnapshotMutex; // Guards _localSnapshot.
boost::optional<Timestamp> _localSnapshot;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp
index 77ce7b9f222..7349d5786f1 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp
@@ -77,12 +77,8 @@ public:
BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
<< "testIndex"
- << "v"
- << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
- << "ns"
- << ns
- << "unique"
- << unique);
+ << "v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
+ << "ns" << ns << "unique" << unique);
if (partial) {
auto partialBSON =
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
index 4e09d0fdd9b..754171418b6 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
@@ -64,9 +64,9 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
class WiredTigerHarnessHelper final : public RecordStoreHarnessHelper {
public:
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
index 829dbd1a99f..9c2b1155483 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
@@ -187,9 +187,7 @@ Status WiredTigerUtil::getApplicationMetadata(OperationContext* opCtx,
if (keysSeen.count(key)) {
return Status(ErrorCodes::Error(50998),
str::stream() << "app_metadata must not contain duplicate keys. "
- << "Found multiple instances of key '"
- << key
- << "'.");
+ << "Found multiple instances of key '" << key << "'.");
}
keysSeen.insert(key);
@@ -265,9 +263,7 @@ StatusWith<int64_t> WiredTigerUtil::checkApplicationMetadataFormatVersion(Operat
if (version < minimumVersion || version > maximumVersion) {
return Status(ErrorCodes::UnsupportedFormat,
str::stream() << "Application metadata for " << uri
- << " has unsupported format version: "
- << version
- << ".");
+ << " has unsupported format version: " << version << ".");
}
LOG(2) << "WiredTigerUtil::checkApplicationMetadataFormatVersion "
@@ -320,8 +316,7 @@ StatusWith<uint64_t> WiredTigerUtil::getStatisticsValue(WT_SESSION* session,
if (ret != 0) {
return StatusWith<uint64_t>(ErrorCodes::CursorNotFound,
str::stream() << "unable to open cursor at URI " << uri
- << ". reason: "
- << wiredtiger_strerror(ret));
+ << ". reason: " << wiredtiger_strerror(ret));
}
invariant(cursor);
ON_BLOCK_EXIT([&] { cursor->close(cursor); });
@@ -329,21 +324,19 @@ StatusWith<uint64_t> WiredTigerUtil::getStatisticsValue(WT_SESSION* session,
cursor->set_key(cursor, statisticsKey);
ret = cursor->search(cursor);
if (ret != 0) {
- return StatusWith<uint64_t>(
- ErrorCodes::NoSuchKey,
- str::stream() << "unable to find key " << statisticsKey << " at URI " << uri
- << ". reason: "
- << wiredtiger_strerror(ret));
+ return StatusWith<uint64_t>(ErrorCodes::NoSuchKey,
+ str::stream()
+ << "unable to find key " << statisticsKey << " at URI "
+ << uri << ". reason: " << wiredtiger_strerror(ret));
}
uint64_t value;
ret = cursor->get_value(cursor, NULL, NULL, &value);
if (ret != 0) {
- return StatusWith<uint64_t>(
- ErrorCodes::BadValue,
- str::stream() << "unable to get value for key " << statisticsKey << " at URI " << uri
- << ". reason: "
- << wiredtiger_strerror(ret));
+ return StatusWith<uint64_t>(ErrorCodes::BadValue,
+ str::stream() << "unable to get value for key " << statisticsKey
+ << " at URI " << uri
+ << ". reason: " << wiredtiger_strerror(ret));
}
return StatusWith<uint64_t>(value);
@@ -461,7 +454,7 @@ WT_EVENT_HANDLER defaultEventHandlers() {
handlers.handle_progress = mdb_handle_progress;
return handlers;
}
-}
+} // namespace
WiredTigerEventHandler::WiredTigerEventHandler() {
WT_EVENT_HANDLER* handler = static_cast<WT_EVENT_HANDLER*>(this);
@@ -577,8 +570,7 @@ Status WiredTigerUtil::setTableLogging(WT_SESSION* session, const std::string& u
// Sanity check against a table having multiple logging specifications.
invariant(false,
str::stream() << "Table has contradictory logging settings. Uri: " << uri
- << " Conf: "
- << existingMetadata);
+ << " Conf: " << existingMetadata);
}
if (existingMetadata.find(setting) != std::string::npos) {
@@ -617,8 +609,8 @@ Status WiredTigerUtil::exportTableToBSON(WT_SESSION* session,
int ret = session->open_cursor(session, uri.c_str(), NULL, cursorConfig, &c);
if (ret != 0) {
return Status(ErrorCodes::CursorNotFound,
- str::stream() << "unable to open cursor at URI " << uri << ". reason: "
- << wiredtiger_strerror(ret));
+ str::stream() << "unable to open cursor at URI " << uri
+ << ". reason: " << wiredtiger_strerror(ret));
}
bob->append("uri", uri);
invariant(c);
diff --git a/src/mongo/db/system_index.cpp b/src/mongo/db/system_index.cpp
index d6f6ebca0d2..28c27b67460 100644
--- a/src/mongo/db/system_index.cpp
+++ b/src/mongo/db/system_index.cpp
@@ -73,20 +73,16 @@ const NamespaceString sessionCollectionNamespace("config.system.sessions");
MONGO_INITIALIZER(AuthIndexKeyPatterns)(InitializerContext*) {
v1SystemUsersKeyPattern = BSON("user" << 1 << "userSource" << 1);
- v3SystemUsersKeyPattern = BSON(
- AuthorizationManager::USER_NAME_FIELD_NAME << 1 << AuthorizationManager::USER_DB_FIELD_NAME
- << 1);
- v3SystemRolesKeyPattern = BSON(
- AuthorizationManager::ROLE_NAME_FIELD_NAME << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME
- << 1);
+ v3SystemUsersKeyPattern = BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << 1 << AuthorizationManager::USER_DB_FIELD_NAME << 1);
+ v3SystemRolesKeyPattern = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
v3SystemUsersIndexName =
std::string(str::stream() << AuthorizationManager::USER_NAME_FIELD_NAME << "_1_"
- << AuthorizationManager::USER_DB_FIELD_NAME
- << "_1");
+ << AuthorizationManager::USER_DB_FIELD_NAME << "_1");
v3SystemRolesIndexName =
std::string(str::stream() << AuthorizationManager::ROLE_NAME_FIELD_NAME << "_1_"
- << AuthorizationManager::ROLE_DB_FIELD_NAME
- << "_1");
+ << AuthorizationManager::ROLE_DB_FIELD_NAME << "_1");
v3SystemUsersIndexSpec.addKeys(v3SystemUsersKeyPattern);
v3SystemUsersIndexSpec.unique();
diff --git a/src/mongo/db/traffic_reader.cpp b/src/mongo/db/traffic_reader.cpp
index 18fa2baf7dd..b6de5022d0b 100644
--- a/src/mongo/db/traffic_reader.cpp
+++ b/src/mongo/db/traffic_reader.cpp
@@ -93,8 +93,8 @@ bool readBytes(size_t toRead, char* buf, int fd) {
auto pair = errnoAndDescription();
uassert(ErrorCodes::FileStreamFailed,
- str::stream() << "failed to read bytes: errno(" << pair.first << ") : "
- << pair.second,
+ str::stream() << "failed to read bytes: errno(" << pair.first
+ << ") : " << pair.second,
pair.first == EINTR);
continue;
diff --git a/src/mongo/db/traffic_recorder.cpp b/src/mongo/db/traffic_recorder.cpp
index 17f4756cce9..4252cc1cfb5 100644
--- a/src/mongo/db/traffic_recorder.cpp
+++ b/src/mongo/db/traffic_recorder.cpp
@@ -100,7 +100,7 @@ public:
}
void run() {
- _thread = stdx::thread([ consumer = std::move(_pcqPipe.consumer), this ] {
+ _thread = stdx::thread([consumer = std::move(_pcqPipe.consumer), this] {
try {
DataBuilder db;
std::fstream out(_path,
diff --git a/src/mongo/db/traffic_recorder_validators.cpp b/src/mongo/db/traffic_recorder_validators.cpp
index 918784563d2..c9c48501e8d 100644
--- a/src/mongo/db/traffic_recorder_validators.cpp
+++ b/src/mongo/db/traffic_recorder_validators.cpp
@@ -38,8 +38,8 @@ namespace mongo {
Status validateTrafficRecordDestination(const std::string& path) {
if (!path.empty() && !boost::filesystem::is_directory(path)) {
return Status(ErrorCodes::FileNotOpen,
- str::stream() << "traffic recording directory \"" << path
- << "\" is not a directory.");
+ str::stream()
+ << "traffic recording directory \"" << path << "\" is not a directory.");
}
return Status::OK();
diff --git a/src/mongo/db/transaction_history_iterator.cpp b/src/mongo/db/transaction_history_iterator.cpp
index cbef1bafc4d..e9e57c1d0b8 100644
--- a/src/mongo/db/transaction_history_iterator.cpp
+++ b/src/mongo/db/transaction_history_iterator.cpp
@@ -96,8 +96,7 @@ BSONObj findOneOplogEntry(OperationContext* opCtx,
uassert(ErrorCodes::IncompleteTransactionHistory,
str::stream() << "oplog no longer contains the complete write history of this "
"transaction, log with opTime "
- << opTime.toBSON()
- << " cannot be found",
+ << opTime.toBSON() << " cannot be found",
getNextResult != PlanExecutor::IS_EOF);
if (getNextResult != PlanExecutor::ADVANCED) {
uassertStatusOKWithContext(WorkingSetCommon::getMemberObjectStatus(oplogBSON),
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index 7fbbce22dd0..903d3a01b13 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -419,8 +419,7 @@ void TransactionParticipant::Participant::_continueMultiDocumentTransaction(Oper
TxnNumber txnNumber) {
uassert(ErrorCodes::NoSuchTransaction,
str::stream()
- << "Given transaction number "
- << txnNumber
+ << "Given transaction number " << txnNumber
<< " does not match any in-progress transactions. The active transaction number is "
<< o().activeTxnNumber,
txnNumber == o().activeTxnNumber && !o().txnState.isInRetryableWriteMode());
@@ -442,8 +441,7 @@ void TransactionParticipant::Participant::_continueMultiDocumentTransaction(Oper
uasserted(
ErrorCodes::NoSuchTransaction,
str::stream()
- << "Transaction "
- << txnNumber
+ << "Transaction " << txnNumber
<< " has been aborted because an earlier command in this transaction failed.");
}
return;
@@ -503,9 +501,7 @@ void TransactionParticipant::Participant::beginOrContinue(OperationContext* opCt
uassert(ErrorCodes::TransactionTooOld,
str::stream() << "Cannot start transaction " << txnNumber << " on session "
- << _sessionId()
- << " because a newer transaction "
- << o().activeTxnNumber
+ << _sessionId() << " because a newer transaction " << o().activeTxnNumber
<< " has already started.",
txnNumber >= o().activeTxnNumber);
@@ -552,8 +548,7 @@ void TransactionParticipant::Participant::beginOrContinue(OperationContext* opCt
TransactionState::kNone | TransactionState::kAbortedWithoutPrepare;
uassert(50911,
str::stream() << "Cannot start a transaction at given transaction number "
- << txnNumber
- << " a transaction with the same number is in state "
+ << txnNumber << " a transaction with the same number is in state "
<< o().txnState,
o().txnState.isInSet(restartableStates));
}
@@ -1087,8 +1082,7 @@ Timestamp TransactionParticipant::Participant::prepareTransaction(
uassert(ErrorCodes::OperationNotSupportedInTransaction,
str::stream() << "prepareTransaction failed because one of the transaction "
"operations was done against a temporary collection '"
- << collection->ns()
- << "'.",
+ << collection->ns() << "'.",
!collection->isTemporary(opCtx));
}
@@ -1394,8 +1388,7 @@ void TransactionParticipant::Participant::commitPreparedTransaction(
str::stream() << "Commit oplog entry must be greater than or equal to commit "
"timestamp due to causal consistency. commit timestamp: "
<< commitTimestamp.toBSON()
- << ", commit oplog entry optime: "
- << commitOplogSlot.toBSON());
+ << ", commit oplog entry optime: " << commitOplogSlot.toBSON());
} else {
// We always expect a non-null commitOplogEntryOpTime to be passed in on secondaries
// in order to set the finishOpTime.
@@ -1852,8 +1845,7 @@ void TransactionParticipant::TransactionState::transitionTo(StateFlag newState,
if (shouldValidate == TransitionValidation::kValidateTransition) {
invariant(TransactionState::_isLegalTransition(_state, newState),
str::stream() << "Current state: " << toString(_state)
- << ", Illegal attempted next state: "
- << toString(newState));
+ << ", Illegal attempted next state: " << toString(newState));
}
// If we are transitioning out of prepare, signal waiters by fulfilling the completion promise.
@@ -2191,9 +2183,7 @@ boost::optional<repl::OpTime> TransactionParticipant::Participant::_checkStateme
if (it == p().activeTxnCommittedStatements.end()) {
uassert(ErrorCodes::IncompleteTransactionHistory,
str::stream() << "Incomplete history detected for transaction "
- << o().activeTxnNumber
- << " on session "
- << _sessionId(),
+ << o().activeTxnNumber << " on session " << _sessionId(),
!p().hasIncompleteHistory);
return boost::none;
@@ -2217,45 +2207,45 @@ void TransactionParticipant::Participant::_registerUpdateCacheOnCommit(
OperationContext* opCtx,
std::vector<StmtId> stmtIdsWritten,
const repl::OpTime& lastStmtIdWriteOpTime) {
- opCtx->recoveryUnit()->onCommit(
- [ opCtx, stmtIdsWritten = std::move(stmtIdsWritten), lastStmtIdWriteOpTime ](
- boost::optional<Timestamp>) {
- TransactionParticipant::Participant participant(opCtx);
- invariant(participant.p().isValid);
-
- RetryableWritesStats::get(opCtx->getServiceContext())
- ->incrementTransactionsCollectionWriteCount();
-
- stdx::lock_guard<Client> lg(*opCtx->getClient());
-
- // The cache of the last written record must always be advanced after a write so that
- // subsequent writes have the correct point to start from.
- participant.o(lg).lastWriteOpTime = lastStmtIdWriteOpTime;
-
- for (const auto stmtId : stmtIdsWritten) {
- if (stmtId == kIncompleteHistoryStmtId) {
- participant.p().hasIncompleteHistory = true;
- continue;
- }
-
- const auto insertRes = participant.p().activeTxnCommittedStatements.emplace(
- stmtId, lastStmtIdWriteOpTime);
- if (!insertRes.second) {
- const auto& existingOpTime = insertRes.first->second;
- fassertOnRepeatedExecution(participant._sessionId(),
- participant.o().activeTxnNumber,
- stmtId,
- existingOpTime,
- lastStmtIdWriteOpTime);
- }
+ opCtx->recoveryUnit()->onCommit([opCtx,
+ stmtIdsWritten = std::move(stmtIdsWritten),
+ lastStmtIdWriteOpTime](boost::optional<Timestamp>) {
+ TransactionParticipant::Participant participant(opCtx);
+ invariant(participant.p().isValid);
+
+ RetryableWritesStats::get(opCtx->getServiceContext())
+ ->incrementTransactionsCollectionWriteCount();
+
+ stdx::lock_guard<Client> lg(*opCtx->getClient());
+
+ // The cache of the last written record must always be advanced after a write so that
+ // subsequent writes have the correct point to start from.
+ participant.o(lg).lastWriteOpTime = lastStmtIdWriteOpTime;
+
+ for (const auto stmtId : stmtIdsWritten) {
+ if (stmtId == kIncompleteHistoryStmtId) {
+ participant.p().hasIncompleteHistory = true;
+ continue;
}
- // If this is the first time executing a retryable write, we should indicate that to
- // the transaction participant.
- if (participant.o(lg).txnState.isNone()) {
- participant.o(lg).txnState.transitionTo(TransactionState::kExecutedRetryableWrite);
+ const auto insertRes =
+ participant.p().activeTxnCommittedStatements.emplace(stmtId, lastStmtIdWriteOpTime);
+ if (!insertRes.second) {
+ const auto& existingOpTime = insertRes.first->second;
+ fassertOnRepeatedExecution(participant._sessionId(),
+ participant.o().activeTxnNumber,
+ stmtId,
+ existingOpTime,
+ lastStmtIdWriteOpTime);
}
- });
+ }
+
+ // If this is the first time executing a retryable write, we should indicate that to
+ // the transaction participant.
+ if (participant.o(lg).txnState.isNone()) {
+ participant.o(lg).txnState.transitionTo(TransactionState::kExecutedRetryableWrite);
+ }
+ });
MONGO_FAIL_POINT_BLOCK(onPrimaryTransactionalWrite, customArgs) {
const auto& data = customArgs.getData();
@@ -2269,9 +2259,9 @@ void TransactionParticipant::Participant::_registerUpdateCacheOnCommit(
if (!failBeforeCommitExceptionElem.eoo()) {
const auto failureCode = ErrorCodes::Error(int(failBeforeCommitExceptionElem.Number()));
uasserted(failureCode,
- str::stream() << "Failing write for " << _sessionId() << ":"
- << o().activeTxnNumber
- << " due to failpoint. The write must not be reflected.");
+ str::stream()
+ << "Failing write for " << _sessionId() << ":" << o().activeTxnNumber
+ << " due to failpoint. The write must not be reflected.");
}
}
}
diff --git a/src/mongo/db/transaction_participant_test.cpp b/src/mongo/db/transaction_participant_test.cpp
index f4764ad0e8c..047310244f5 100644
--- a/src/mongo/db/transaction_participant_test.cpp
+++ b/src/mongo/db/transaction_participant_test.cpp
@@ -376,11 +376,11 @@ TEST_F(TxnParticipantTest, StashAndUnstashResources) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
// Perform initial unstash which sets up a WriteUnitOfWork.
@@ -1154,20 +1154,19 @@ TEST_F(TxnParticipantTest, CannotStartNewTransactionWhilePreparedTransactionInPr
auto guard = makeGuard([&]() { OperationContextSession::checkOut(opCtx()); });
// Try to start a new transaction while there is already a prepared transaction on the
// session. This should fail with a PreparedTransactionInProgress error.
- runFunctionFromDifferentOpCtx([
- lsid = *opCtx()->getLogicalSessionId(),
- txnNumberToStart = *opCtx()->getTxnNumber() + 1
- ](OperationContext * newOpCtx) {
- newOpCtx->setLogicalSessionId(lsid);
- newOpCtx->setTxnNumber(txnNumberToStart);
-
- MongoDOperationContextSession ocs(newOpCtx);
- auto txnParticipant = TransactionParticipant::get(newOpCtx);
- ASSERT_THROWS_CODE(
- txnParticipant.beginOrContinue(newOpCtx, txnNumberToStart, false, true),
- AssertionException,
- ErrorCodes::PreparedTransactionInProgress);
- });
+ runFunctionFromDifferentOpCtx(
+ [lsid = *opCtx()->getLogicalSessionId(),
+ txnNumberToStart = *opCtx()->getTxnNumber() + 1](OperationContext* newOpCtx) {
+ newOpCtx->setLogicalSessionId(lsid);
+ newOpCtx->setTxnNumber(txnNumberToStart);
+
+ MongoDOperationContextSession ocs(newOpCtx);
+ auto txnParticipant = TransactionParticipant::get(newOpCtx);
+ ASSERT_THROWS_CODE(
+ txnParticipant.beginOrContinue(newOpCtx, txnNumberToStart, false, true),
+ AssertionException,
+ ErrorCodes::PreparedTransactionInProgress);
+ });
}
ASSERT_FALSE(txnParticipant.transactionIsAborted());
@@ -1278,11 +1277,11 @@ TEST_F(TxnParticipantTest, StashInNestedSessionIsANoop) {
// Set the readConcern on the OperationContext.
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
// Perform initial unstash, which sets up a WriteUnitOfWork.
@@ -2672,11 +2671,11 @@ TEST_F(TransactionsMetricsTest, ReportStashedResources) {
std::move(clientMetadata.getValue()));
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
// Perform initial unstash which sets up a WriteUnitOfWork.
@@ -2759,11 +2758,11 @@ TEST_F(TransactionsMetricsTest, ReportUnstashedResources) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
// Perform initial unstash which sets up a WriteUnitOfWork.
@@ -3108,11 +3107,11 @@ TEST_F(TransactionsMetricsTest, TestTransactionInfoForLogAfterCommit) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
@@ -3148,11 +3147,11 @@ TEST_F(TransactionsMetricsTest, TestPreparedTransactionInfoForLogAfterCommit) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
@@ -3190,11 +3189,11 @@ TEST_F(TransactionsMetricsTest, TestTransactionInfoForLogAfterAbort) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3231,11 +3230,11 @@ TEST_F(TransactionsMetricsTest, TestPreparedTransactionInfoForLogAfterAbort) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
// Prepare the transaction and extend the duration in the prepared state.
@@ -3269,11 +3268,11 @@ DEATH_TEST_F(TransactionsMetricsTest, TestTransactionInfoForLogWithNoLockerInfoS
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3293,11 +3292,11 @@ TEST_F(TransactionsMetricsTest, LogTransactionInfoAfterSlowCommit) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3331,11 +3330,11 @@ TEST_F(TransactionsMetricsTest, LogPreparedTransactionInfoAfterSlowCommit) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3368,11 +3367,11 @@ TEST_F(TransactionsMetricsTest, LogTransactionInfoAfterSlowAbort) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3411,11 +3410,11 @@ TEST_F(TransactionsMetricsTest, LogPreparedTransactionInfoAfterSlowAbort) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3457,11 +3456,11 @@ TEST_F(TransactionsMetricsTest, LogTransactionInfoAfterExceptionInPrepare) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3504,11 +3503,11 @@ TEST_F(TransactionsMetricsTest, LogTransactionInfoAfterSlowStashedAbort) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
@@ -3589,11 +3588,11 @@ TEST_F(TxnParticipantTest, RollbackResetsInMemoryStateOfPreparedTransaction) {
auto sessionCheckout = checkOutSession();
repl::ReadConcernArgs readConcernArgs;
- ASSERT_OK(readConcernArgs.initialize(BSON("find"
- << "test"
- << repl::ReadConcernArgs::kReadConcernFieldName
- << BSON(repl::ReadConcernArgs::kLevelFieldName
- << "snapshot"))));
+ ASSERT_OK(
+ readConcernArgs.initialize(BSON("find"
+ << "test" << repl::ReadConcernArgs::kReadConcernFieldName
+ << BSON(repl::ReadConcernArgs::kLevelFieldName
+ << "snapshot"))));
repl::ReadConcernArgs::get(opCtx()) = readConcernArgs;
auto txnParticipant = TransactionParticipant::get(opCtx());
diff --git a/src/mongo/db/update/addtoset_node.cpp b/src/mongo/db/update/addtoset_node.cpp
index 4805ae5c825..b12c4ceeb9b 100644
--- a/src/mongo/db/update/addtoset_node.cpp
+++ b/src/mongo/db/update/addtoset_node.cpp
@@ -108,8 +108,7 @@ ModifierNode::ModifyResult AddToSetNode::updateExistingElement(
mutablebson::Element* element, std::shared_ptr<FieldRef> elementPath) const {
uassert(ErrorCodes::BadValue,
str::stream() << "Cannot apply $addToSet to non-array field. Field named '"
- << element->getFieldName()
- << "' has non-array type "
+ << element->getFieldName() << "' has non-array type "
<< typeName(element->getType()),
element->getType() == BSONType::Array);
diff --git a/src/mongo/db/update/addtoset_node_test.cpp b/src/mongo/db/update/addtoset_node_test.cpp
index 9c3bfc283a5..0aaf434fcdb 100644
--- a/src/mongo/db/update/addtoset_node_test.cpp
+++ b/src/mongo/db/update/addtoset_node_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using AddToSetNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(AddToSetNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$addToSet: {}}");
diff --git a/src/mongo/db/update/arithmetic_node.cpp b/src/mongo/db/update/arithmetic_node.cpp
index 304b0261e90..58c0d4a27ab 100644
--- a/src/mongo/db/update/arithmetic_node.cpp
+++ b/src/mongo/db/update/arithmetic_node.cpp
@@ -55,9 +55,7 @@ Status ArithmeticNode::init(BSONElement modExpr,
if (!modExpr.isNumber()) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Cannot " << getNameForOp(_op)
- << " with non-numeric argument: {"
- << modExpr
- << "}");
+ << " with non-numeric argument: {" << modExpr << "}");
}
_val = modExpr;
@@ -72,10 +70,8 @@ ModifierNode::ModifyResult ArithmeticNode::updateExistingElement(
str::stream() << "Cannot apply " << operatorName()
<< " to a value of non-numeric type. {"
<< (idElem.ok() ? idElem.toString() : "no id")
- << "} has the field '"
- << element->getFieldName()
- << "' of non-numeric type "
- << typeName(element->getType()));
+ << "} has the field '" << element->getFieldName()
+ << "' of non-numeric type " << typeName(element->getType()));
}
SafeNum originalValue = element->getValueSafeNum();
@@ -97,10 +93,8 @@ ModifierNode::ModifyResult ArithmeticNode::updateExistingElement(
auto idElem = mutablebson::findFirstChildNamed(element->getDocument().root(), "_id");
uasserted(ErrorCodes::BadValue,
str::stream() << "Failed to apply " << operatorName()
- << " operations to current value ("
- << originalValue.debugString()
- << ") for document {"
- << (idElem.ok() ? idElem.toString() : "no id")
+ << " operations to current value (" << originalValue.debugString()
+ << ") for document {" << (idElem.ok() ? idElem.toString() : "no id")
<< "}");
} else {
invariant(element->setValueSafeNum(valueToSet));
diff --git a/src/mongo/db/update/arithmetic_node_test.cpp b/src/mongo/db/update/arithmetic_node_test.cpp
index d18cc4f1314..2783a32d547 100644
--- a/src/mongo/db/update/arithmetic_node_test.cpp
+++ b/src/mongo/db/update/arithmetic_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using ArithmeticNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(ArithmeticNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$inc: {}}");
diff --git a/src/mongo/db/update/bit_node.cpp b/src/mongo/db/update/bit_node.cpp
index 67a334970fc..19f7a560846 100644
--- a/src/mongo/db/update/bit_node.cpp
+++ b/src/mongo/db/update/bit_node.cpp
@@ -60,9 +60,7 @@ Status BitNode::init(BSONElement modExpr, const boost::intrusive_ptr<ExpressionC
return Status(ErrorCodes::BadValue,
str::stream()
<< "The $bit modifier only supports 'and', 'or', and 'xor', not '"
- << payloadFieldName
- << "' which is an unknown operator: {"
- << curOp
+ << payloadFieldName << "' which is an unknown operator: {" << curOp
<< "}");
}
@@ -70,9 +68,7 @@ Status BitNode::init(BSONElement modExpr, const boost::intrusive_ptr<ExpressionC
return Status(ErrorCodes::BadValue,
str::stream()
<< "The $bit modifier field must be an Integer(32/64 bit); a '"
- << typeName(curOp.type())
- << "' is not supported here: {"
- << curOp
+ << typeName(curOp.type()) << "' is not supported here: {" << curOp
<< "}");
}
@@ -97,11 +93,8 @@ ModifierNode::ModifyResult BitNode::updateExistingElement(
mutablebson::findFirstChildNamed(element->getDocument().root(), "_id");
uasserted(ErrorCodes::BadValue,
str::stream() << "Cannot apply $bit to a value of non-integral type."
- << idElem.toString()
- << " has the field "
- << element->getFieldName()
- << " of non-integer type "
- << typeName(element->getType()));
+ << idElem.toString() << " has the field " << element->getFieldName()
+ << " of non-integer type " << typeName(element->getType()));
}
SafeNum value = applyOpList(element->getValueSafeNum());
diff --git a/src/mongo/db/update/bit_node.h b/src/mongo/db/update/bit_node.h
index 96840fdec1e..91b7181a116 100644
--- a/src/mongo/db/update/bit_node.h
+++ b/src/mongo/db/update/bit_node.h
@@ -71,7 +71,7 @@ private:
BSONObjBuilder bob;
{
BSONObjBuilder subBuilder(bob.subobjStart(""));
- for (const auto[bitOperator, operand] : _opList) {
+ for (const auto [bitOperator, operand] : _opList) {
operand.toBSON(
[](SafeNum (SafeNum::*bitOperator)(const SafeNum&) const) {
if (bitOperator == &SafeNum::bitAnd)
diff --git a/src/mongo/db/update/bit_node_test.cpp b/src/mongo/db/update/bit_node_test.cpp
index 78734dd63dd..488ad971e5d 100644
--- a/src/mongo/db/update/bit_node_test.cpp
+++ b/src/mongo/db/update/bit_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using BitNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST(BitNodeTest, InitWithDoubleFails) {
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
@@ -292,4 +292,4 @@ TEST_F(BitNodeTest, ApplyRepeatedBitOps) {
}
} // namespace
-} // namepace mongo
+} // namespace mongo
diff --git a/src/mongo/db/update/compare_node_test.cpp b/src/mongo/db/update/compare_node_test.cpp
index b500701cf2d..05c5d9ee68a 100644
--- a/src/mongo/db/update/compare_node_test.cpp
+++ b/src/mongo/db/update/compare_node_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using CompareNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(CompareNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$max: {}}");
diff --git a/src/mongo/db/update/current_date_node_test.cpp b/src/mongo/db/update/current_date_node_test.cpp
index 7bd11c9140b..e16a2cdbe46 100644
--- a/src/mongo/db/update/current_date_node_test.cpp
+++ b/src/mongo/db/update/current_date_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using CurrentDateNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(CurrentDateNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$currentDate: {}}");
@@ -286,4 +286,4 @@ TEST_F(CurrentDateNodeTest, ApplyNoIndexDataOrLogBuilder) {
}
} // namespace
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/update/field_checker_test.cpp b/src/mongo/db/update/field_checker_test.cpp
index 99deff9fb07..d95b2bc681f 100644
--- a/src/mongo/db/update/field_checker_test.cpp
+++ b/src/mongo/db/update/field_checker_test.cpp
@@ -38,9 +38,9 @@ namespace {
using mongo::ErrorCodes;
using mongo::FieldRef;
-using mongo::fieldchecker::isUpdatable;
-using mongo::fieldchecker::isPositional;
using mongo::Status;
+using mongo::fieldchecker::isPositional;
+using mongo::fieldchecker::isUpdatable;
TEST(IsUpdatable, Basics) {
FieldRef fieldRef("x");
diff --git a/src/mongo/db/update/log_builder.cpp b/src/mongo/db/update/log_builder.cpp
index e78cd295b1f..5fbd6514791 100644
--- a/src/mongo/db/update/log_builder.cpp
+++ b/src/mongo/db/update/log_builder.cpp
@@ -89,11 +89,9 @@ Status LogBuilder::addToSetsWithNewFieldName(StringData name, const mutablebson:
mutablebson::Element elemToSet = _logRoot.getDocument().makeElementWithNewFieldName(name, val);
if (!elemToSet.ok())
return Status(ErrorCodes::InternalError,
- str::stream() << "Could not create new '" << name
- << "' element from existing element '"
- << val.getFieldName()
- << "' of type "
- << typeName(val.getType()));
+ str::stream()
+ << "Could not create new '" << name << "' element from existing element '"
+ << val.getFieldName() << "' of type " << typeName(val.getType()));
return addToSets(elemToSet);
}
@@ -102,11 +100,9 @@ Status LogBuilder::addToSetsWithNewFieldName(StringData name, const BSONElement&
mutablebson::Element elemToSet = _logRoot.getDocument().makeElementWithNewFieldName(name, val);
if (!elemToSet.ok())
return Status(ErrorCodes::InternalError,
- str::stream() << "Could not create new '" << name
- << "' element from existing element '"
- << val.fieldName()
- << "' of type "
- << typeName(val.type()));
+ str::stream()
+ << "Could not create new '" << name << "' element from existing element '"
+ << val.fieldName() << "' of type " << typeName(val.type()));
return addToSets(elemToSet);
}
diff --git a/src/mongo/db/update/modifier_node.cpp b/src/mongo/db/update/modifier_node.cpp
index 674a2d8e361..dd0341255a3 100644
--- a/src/mongo/db/update/modifier_node.cpp
+++ b/src/mongo/db/update/modifier_node.cpp
@@ -66,10 +66,8 @@ void checkImmutablePathsNotModifiedFromOriginal(mutablebson::Element element,
if (prefixSize == (*immutablePath)->numParts()) {
uasserted(ErrorCodes::ImmutableField,
str::stream() << "Updating the path '" << pathTaken->dottedField() << "' to "
- << element.toString()
- << " would modify the immutable field '"
- << (*immutablePath)->dottedField()
- << "'");
+ << element.toString() << " would modify the immutable field '"
+ << (*immutablePath)->dottedField() << "'");
}
// If 'pathTaken' is a strict prefix of 'immutablePath', then we may have modified
@@ -106,8 +104,7 @@ void checkImmutablePathsNotModifiedFromOriginal(mutablebson::Element element,
uassert(ErrorCodes::ImmutableField,
str::stream() << "After applying the update, the immutable field '"
<< (*immutablePath)->dottedField()
- << "' was found to have been altered to "
- << newElem.toString(),
+ << "' was found to have been altered to " << newElem.toString(),
newElem.compareWithBSONElement(oldElem, nullptr, false) == 0);
}
}
@@ -137,8 +134,7 @@ void checkImmutablePathsNotModified(mutablebson::Element element,
uassert(ErrorCodes::ImmutableField,
str::stream() << "Performing an update on the path '" << pathTaken->dottedField()
<< "' would modify the immutable field '"
- << (*immutablePath)->dottedField()
- << "'",
+ << (*immutablePath)->dottedField() << "'",
pathTaken->commonPrefixSize(**immutablePath) <
std::min(pathTaken->numParts(), (*immutablePath)->numParts()));
}
@@ -265,12 +261,10 @@ UpdateExecutor::ApplyResult ModifierNode::applyToNonexistentElement(
// because we just created this element.)
uassert(ErrorCodes::ImmutableField,
str::stream() << "Updating the path '"
- << updateNodeApplyParams.pathTaken->dottedField()
- << "' to "
+ << updateNodeApplyParams.pathTaken->dottedField() << "' to "
<< applyParams.element.toString()
<< " would modify the immutable field '"
- << (*immutablePath)->dottedField()
- << "'",
+ << (*immutablePath)->dottedField() << "'",
updateNodeApplyParams.pathTaken->commonPrefixSize(**immutablePath) !=
(*immutablePath)->numParts());
}
diff --git a/src/mongo/db/update/object_replace_executor.cpp b/src/mongo/db/update/object_replace_executor.cpp
index 8a65cd1b0ca..31ea35df114 100644
--- a/src/mongo/db/update/object_replace_executor.cpp
+++ b/src/mongo/db/update/object_replace_executor.cpp
@@ -136,8 +136,7 @@ UpdateExecutor::ApplyResult ObjectReplaceExecutor::applyReplacementUpdate(
uassert(ErrorCodes::ImmutableField,
str::stream() << "After applying the update, the (immutable) field '"
<< (*path)->dottedField()
- << "' was found to have been altered to "
- << newElem.toString(),
+ << "' was found to have been altered to " << newElem.toString(),
newElem.compareWithBSONElement(oldElem, nullptr, false) == 0);
}
}
diff --git a/src/mongo/db/update/object_replace_executor_test.cpp b/src/mongo/db/update/object_replace_executor_test.cpp
index cef054fd289..6b0d93f6e46 100644
--- a/src/mongo/db/update/object_replace_executor_test.cpp
+++ b/src/mongo/db/update/object_replace_executor_test.cpp
@@ -42,8 +42,8 @@ namespace mongo {
namespace {
using ObjectReplaceExecutorTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST_F(ObjectReplaceExecutorTest, Noop) {
auto obj = fromjson("{a: 1, b: 2}");
diff --git a/src/mongo/db/update/path_support.cpp b/src/mongo/db/update/path_support.cpp
index 6b9fc80a284..934e17e0006 100644
--- a/src/mongo/db/update/path_support.cpp
+++ b/src/mongo/db/update/path_support.cpp
@@ -53,8 +53,8 @@ Status maybePadTo(mutablebson::Element* elemArray, size_t sizeRequired) {
if (toPad > kMaxPaddingAllowed) {
return Status(ErrorCodes::CannotBackfillArray,
- str::stream() << "can't backfill more than " << kMaxPaddingAllowed
- << " elements");
+ str::stream()
+ << "can't backfill more than " << kMaxPaddingAllowed << " elements");
}
for (size_t i = 0; i < toPad; i++) {
@@ -128,10 +128,8 @@ Status findLongestPrefix(const FieldRef& prefix,
*elemFound = prev;
return Status(ErrorCodes::PathNotViable,
str::stream() << "cannot use the part (" << prefix.getPart(i - 1) << " of "
- << prefix.dottedField()
- << ") to traverse the element ({"
- << curr.toString()
- << "})");
+ << prefix.dottedField() << ") to traverse the element ({"
+ << curr.toString() << "})");
} else if (curr.ok()) {
*idxFound = i - 1;
*elemFound = curr;
@@ -153,9 +151,7 @@ StatusWith<mutablebson::Element> createPathAt(const FieldRef& prefix,
if (elemFound.getType() != BSONType::Object && elemFound.getType() != BSONType::Array) {
return Status(ErrorCodes::PathNotViable,
str::stream() << "Cannot create field '" << prefix.getPart(idxFound)
- << "' in element {"
- << elemFound.toString()
- << "}");
+ << "' in element {" << elemFound.toString() << "}");
}
// Sanity check that 'idxField' is an actual part.
@@ -175,9 +171,7 @@ StatusWith<mutablebson::Element> createPathAt(const FieldRef& prefix,
if (!newIdx) {
return Status(ErrorCodes::PathNotViable,
str::stream() << "Cannot create field '" << prefix.getPart(idxFound)
- << "' in element {"
- << elemFound.toString()
- << "}");
+ << "' in element {" << elemFound.toString() << "}");
}
status = maybePadTo(&elemFound, *newIdx);
diff --git a/src/mongo/db/update/path_support_test.cpp b/src/mongo/db/update/path_support_test.cpp
index 78f721e558d..bed1be8cadb 100644
--- a/src/mongo/db/update/path_support_test.cpp
+++ b/src/mongo/db/update/path_support_test.cpp
@@ -58,10 +58,10 @@ namespace {
using namespace mongo;
using namespace pathsupport;
-using str::stream;
using mutablebson::Element;
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
+using str::stream;
class EmptyDoc : public mongo::unittest::Test {
public:
@@ -607,9 +607,7 @@ static void assertContains(const EqualityMatches& equalities, const BSONObj& wra
&SimpleStringDataComparator::kInstance);
if (eltCmp.evaluate(it->second->getData() != value)) {
FAIL(stream() << "Equality match at path \"" << path << "\" contains value "
- << it->second->getData()
- << ", not value "
- << value);
+ << it->second->getData() << ", not value " << value);
}
}
@@ -899,19 +897,14 @@ static void assertParent(const EqualityMatches& equalities,
StringData foundParentPath = path.dottedSubstring(0, parentPathPart);
if (foundParentPath != parentPath) {
FAIL(stream() << "Equality match parent at path \"" << foundParentPath
- << "\" does not match \""
- << parentPath
- << "\"");
+ << "\" does not match \"" << parentPath << "\"");
}
BSONElementComparator eltCmp(BSONElementComparator::FieldNamesMode::kIgnore,
&SimpleStringDataComparator::kInstance);
if (eltCmp.evaluate(parentEl != value)) {
FAIL(stream() << "Equality match parent for \"" << pathStr << "\" at path \"" << parentPath
- << "\" contains value "
- << parentEl
- << ", not value "
- << value);
+ << "\" contains value " << parentEl << ", not value " << value);
}
}
@@ -931,8 +924,7 @@ static void assertNoParent(const EqualityMatches& equalities, StringData pathStr
if (!parentEl.eoo()) {
StringData foundParentPath = path.dottedSubstring(0, parentPathPart);
FAIL(stream() << "Equality matches contained parent for \"" << pathStr << "\" at \""
- << foundParentPath
- << "\"");
+ << foundParentPath << "\"");
}
}
diff --git a/src/mongo/db/update/pipeline_executor_test.cpp b/src/mongo/db/update/pipeline_executor_test.cpp
index 2a10c292532..1c5c4297485 100644
--- a/src/mongo/db/update/pipeline_executor_test.cpp
+++ b/src/mongo/db/update/pipeline_executor_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using PipelineExecutorTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST_F(PipelineExecutorTest, Noop) {
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
diff --git a/src/mongo/db/update/pop_node.cpp b/src/mongo/db/update/pop_node.cpp
index 35ff39204af..3d4355793f1 100644
--- a/src/mongo/db/update/pop_node.cpp
+++ b/src/mongo/db/update/pop_node.cpp
@@ -54,8 +54,7 @@ ModifierNode::ModifyResult PopNode::updateExistingElement(
uassert(ErrorCodes::TypeMismatch,
str::stream() << "Path '" << elementPath->dottedField()
<< "' contains an element of non-array type '"
- << typeName(element->getType())
- << "'",
+ << typeName(element->getType()) << "'",
element->getType() == BSONType::Array);
if (!element->hasChildren()) {
diff --git a/src/mongo/db/update/pull_node_test.cpp b/src/mongo/db/update/pull_node_test.cpp
index b9092a98927..39f41ba06f1 100644
--- a/src/mongo/db/update/pull_node_test.cpp
+++ b/src/mongo/db/update/pull_node_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using PullNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST(PullNodeTest, InitWithBadMatchExpressionFails) {
auto update = fromjson("{$pull: {a: {b: {$foo: 1}}}}");
diff --git a/src/mongo/db/update/pullall_node.cpp b/src/mongo/db/update/pullall_node.cpp
index c082823657c..88f0e8bc6eb 100644
--- a/src/mongo/db/update/pullall_node.cpp
+++ b/src/mongo/db/update/pullall_node.cpp
@@ -48,7 +48,7 @@ public:
bool match(const mutablebson::ConstElement& element) final {
return std::any_of(_elementsToMatch.begin(),
_elementsToMatch.end(),
- [&element, collator{_collator} ](const auto& elementToMatch) {
+ [&element, collator{_collator}](const auto& elementToMatch) {
return element.compareWithBSONElement(
elementToMatch, collator, false) == 0;
});
diff --git a/src/mongo/db/update/pullall_node_test.cpp b/src/mongo/db/update/pullall_node_test.cpp
index 60b09e7b77d..dd77b411dcf 100644
--- a/src/mongo/db/update/pullall_node_test.cpp
+++ b/src/mongo/db/update/pullall_node_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using PullAllNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST(PullAllNodeTest, InitWithIntFails) {
auto update = fromjson("{$pullAll: {a: 1}}");
diff --git a/src/mongo/db/update/push_node.cpp b/src/mongo/db/update/push_node.cpp
index 6702af4fec3..a4a79fb6e5a 100644
--- a/src/mongo/db/update/push_node.cpp
+++ b/src/mongo/db/update/push_node.cpp
@@ -292,10 +292,8 @@ ModifierNode::ModifyResult PushNode::performPush(mutablebson::Element* element,
uasserted(ErrorCodes::BadValue,
str::stream() << "The field '" << elementPath->dottedField() << "'"
<< " must be an array but is of type "
- << typeName(element->getType())
- << " in document {"
- << (idElem.ok() ? idElem.toString() : "no id")
- << "}");
+ << typeName(element->getType()) << " in document {"
+ << (idElem.ok() ? idElem.toString() : "no id") << "}");
}
auto result = insertElementsWithPosition(element, _position, _valuesToPush);
diff --git a/src/mongo/db/update/push_node_test.cpp b/src/mongo/db/update/push_node_test.cpp
index d0ef73e22e5..985ee81ca2c 100644
--- a/src/mongo/db/update/push_node_test.cpp
+++ b/src/mongo/db/update/push_node_test.cpp
@@ -44,8 +44,8 @@ namespace mongo {
namespace {
using PushNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST(PushNodeTest, EachClauseWithNonArrayObjectFails) {
auto update = fromjson("{$push: {x: {$each: {'0': 1}}}}");
@@ -670,12 +670,9 @@ void checkDocumentAndResult(BSONObj updateModifier,
FAIL(str::stream() << "apply() failure for " << updateModifier << ". Expected "
<< expectedDocument
<< " (noop = false, indexesAffected = false) but got "
- << actualDocument.toString()
- << " (noop = "
- << (applyResult.noop ? "true" : "false")
- << ", indexesAffected = "
- << (applyResult.indexesAffected ? "true" : "false")
- << ").");
+ << actualDocument.toString() << " (noop = "
+ << (applyResult.noop ? "true" : "false") << ", indexesAffected = "
+ << (applyResult.indexesAffected ? "true" : "false") << ").");
}
}
@@ -828,9 +825,7 @@ TEST_F(PushNodeTest, ApplyToPopulatedArrayWithSortAndSliceValues) {
auto update =
BSON("$push" << BSON("a" << BSON("$each" << BSON_ARRAY(BSON("a" << 2 << "b" << 1)
<< BSON("a" << 1 << "b" << 1))
- << "$slice"
- << data.sliceValue
- << "$sort"
+ << "$slice" << data.sliceValue << "$sort"
<< data.sortOrder)));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
PushNode node;
diff --git a/src/mongo/db/update/rename_node.cpp b/src/mongo/db/update/rename_node.cpp
index 4177a5f446d..fe9e12650ef 100644
--- a/src/mongo/db/update/rename_node.cpp
+++ b/src/mongo/db/update/rename_node.cpp
@@ -133,8 +133,8 @@ Status RenameNode::init(BSONElement modExpr,
// Though we could treat this as a no-op, it is illegal in the current implementation.
if (fromFieldRef == toFieldRef) {
return Status(ErrorCodes::BadValue,
- str::stream() << "The source and target field for $rename must differ: "
- << modExpr);
+ str::stream()
+ << "The source and target field for $rename must differ: " << modExpr);
}
if (fromFieldRef.isPrefixOf(toFieldRef) || toFieldRef.isPrefixOf(fromFieldRef)) {
@@ -203,12 +203,10 @@ UpdateExecutor::ApplyResult RenameNode::apply(ApplyParams applyParams,
auto idElem = mutablebson::findFirstChildNamed(document.root(), "_id");
uasserted(ErrorCodes::BadValue,
str::stream() << "The source field cannot be an array element, '"
- << fromFieldRef->dottedField()
- << "' in doc with "
+ << fromFieldRef->dottedField() << "' in doc with "
<< (idElem.ok() ? idElem.toString() : "no id")
<< " has an array field called '"
- << currentElement.getFieldName()
- << "'");
+ << currentElement.getFieldName() << "'");
}
}
@@ -225,12 +223,10 @@ UpdateExecutor::ApplyResult RenameNode::apply(ApplyParams applyParams,
auto idElem = mutablebson::findFirstChildNamed(document.root(), "_id");
uasserted(ErrorCodes::BadValue,
str::stream() << "The destination field cannot be an array element, '"
- << toFieldRef.dottedField()
- << "' in doc with "
+ << toFieldRef.dottedField() << "' in doc with "
<< (idElem.ok() ? idElem.toString() : "no id")
<< " has an array field called '"
- << currentElement.getFieldName()
- << "'");
+ << currentElement.getFieldName() << "'");
}
}
diff --git a/src/mongo/db/update/rename_node_test.cpp b/src/mongo/db/update/rename_node_test.cpp
index 93ddfd61714..6eec4d8f498 100644
--- a/src/mongo/db/update/rename_node_test.cpp
+++ b/src/mongo/db/update/rename_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using RenameNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
TEST(RenameNodeTest, PositionalNotAllowedInFromField) {
auto update = fromjson("{$rename: {'a.$': 'b'}}");
@@ -476,8 +476,7 @@ TEST_F(RenameNodeTest, ApplyCanRemoveRequiredPartOfDBRefIfValidateForStorageIsFa
ASSERT_TRUE(result.indexesAffected);
auto updated = BSON("a" << BSON("$ref"
<< "c")
- << "b"
- << 0);
+ << "b" << 0);
ASSERT_EQUALS(updated, doc);
ASSERT_FALSE(doc.isInPlaceModeEnabled());
ASSERT_EQUALS(fromjson("{$set: {'b': 0}, $unset: {'a.$id': true}}"), getLogDoc());
diff --git a/src/mongo/db/update/set_node_test.cpp b/src/mongo/db/update/set_node_test.cpp
index f7280e83110..8f160c4fe13 100644
--- a/src/mongo/db/update/set_node_test.cpp
+++ b/src/mongo/db/update/set_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using SetNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(SetNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$set: {}}");
diff --git a/src/mongo/db/update/storage_validation.cpp b/src/mongo/db/update/storage_validation.cpp
index ce5147f42e2..009343776f0 100644
--- a/src/mongo/db/update/storage_validation.cpp
+++ b/src/mongo/db/update/storage_validation.cpp
@@ -104,8 +104,7 @@ void validateDollarPrefixElement(mutablebson::ConstElement elem) {
// Not an okay, $ prefixed field name.
uasserted(ErrorCodes::DollarPrefixedFieldName,
str::stream() << "The dollar ($) prefixed field '" << elem.getFieldName()
- << "' in '"
- << mutablebson::getFullName(elem)
+ << "' in '" << mutablebson::getFullName(elem)
<< "' is not valid for storage.");
}
}
diff --git a/src/mongo/db/update/unset_node_test.cpp b/src/mongo/db/update/unset_node_test.cpp
index 346c5e4551c..09788ef573b 100644
--- a/src/mongo/db/update/unset_node_test.cpp
+++ b/src/mongo/db/update/unset_node_test.cpp
@@ -43,8 +43,8 @@ namespace mongo {
namespace {
using UnsetNodeTest = UpdateNodeTest;
-using mongo::mutablebson::Element;
using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Element;
DEATH_TEST(UnsetNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok()") {
auto update = fromjson("{$unset: {}}");
diff --git a/src/mongo/db/update/update_array_node.h b/src/mongo/db/update/update_array_node.h
index 0c0ec5550d8..67af11a6cd3 100644
--- a/src/mongo/db/update/update_array_node.h
+++ b/src/mongo/db/update/update_array_node.h
@@ -86,7 +86,7 @@ public:
FieldRef* currentPath,
std::map<std::string, std::vector<std::pair<std::string, BSONObj>>>*
operatorOrientedUpdates) const final {
- for (const auto & [ pathSuffix, child ] : _children) {
+ for (const auto& [pathSuffix, child] : _children) {
FieldRef::FieldRefTempAppend tempAppend(*currentPath,
toArrayFilterIdentifier(pathSuffix));
child->produceSerializationMap(currentPath, operatorOrientedUpdates);
diff --git a/src/mongo/db/update/update_driver.cpp b/src/mongo/db/update/update_driver.cpp
index 3f2024dba1c..0349deb4e8f 100644
--- a/src/mongo/db/update/update_driver.cpp
+++ b/src/mongo/db/update/update_driver.cpp
@@ -76,26 +76,21 @@ modifiertable::ModifierType validateMod(BSONElement mod) {
uassert(
ErrorCodes::FailedToParse,
str::stream()
- << "Unknown modifier: "
- << mod.fieldName()
+ << "Unknown modifier: " << mod.fieldName()
<< ". Expected a valid update modifier or pipeline-style update specified as an array",
modType != modifiertable::MOD_UNKNOWN);
uassert(ErrorCodes::FailedToParse,
str::stream() << "Modifiers operate on fields but we found type "
- << typeName(mod.type())
- << " instead. For example: {$mod: {<field>: ...}}"
- << " not {"
- << mod
- << "}",
+ << typeName(mod.type()) << " instead. For example: {$mod: {<field>: ...}}"
+ << " not {" << mod << "}",
mod.type() == BSONType::Object);
uassert(ErrorCodes::FailedToParse,
str::stream() << "'" << mod.fieldName()
<< "' is empty. You must specify a field like so: "
"{"
- << mod.fieldName()
- << ": {<field>: ...}}",
+ << mod.fieldName() << ": {<field>: ...}}",
!mod.embeddedObject().isEmpty());
return modType;
@@ -134,8 +129,7 @@ bool parseUpdateExpression(
for (const auto& arrayFilter : arrayFilters) {
uassert(ErrorCodes::FailedToParse,
str::stream() << "The array filter for identifier '" << arrayFilter.first
- << "' was not used in the update "
- << updateExpr,
+ << "' was not used in the update " << updateExpr,
foundIdentifiers.find(arrayFilter.first.toString()) != foundIdentifiers.end());
}
diff --git a/src/mongo/db/update/update_leaf_node.cpp b/src/mongo/db/update/update_leaf_node.cpp
index 5d1f8931b53..b09919772a2 100644
--- a/src/mongo/db/update/update_leaf_node.cpp
+++ b/src/mongo/db/update/update_leaf_node.cpp
@@ -52,13 +52,9 @@ void UpdateLeafNode::checkViability(mutablebson::Element element,
} else {
uasserted(ErrorCodes::PathNotViable,
str::stream() << "Cannot use the part (" << pathToCreate.getPart(0) << ") of ("
- << pathTaken.dottedField()
- << "."
- << pathToCreate.dottedField()
- << ") to traverse the element ({"
- << element.toString()
- << "})");
+ << pathTaken.dottedField() << "." << pathToCreate.dottedField()
+ << ") to traverse the element ({" << element.toString() << "})");
}
}
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/update/update_object_node.cpp b/src/mongo/db/update/update_object_node.cpp
index 3ca3a85f797..4686d2895f7 100644
--- a/src/mongo/db/update/update_object_node.cpp
+++ b/src/mongo/db/update/update_object_node.cpp
@@ -61,8 +61,7 @@ StatusWith<std::string> parseArrayFilterIdentifier(
return Status(ErrorCodes::BadValue,
str::stream() << "Cannot have array filter identifier (i.e. '$[<id>]') "
"element in the first position in path '"
- << fieldRef.dottedField()
- << "'");
+ << fieldRef.dottedField() << "'");
}
auto identifier = field.substr(2, field.size() - 3);
@@ -70,9 +69,7 @@ StatusWith<std::string> parseArrayFilterIdentifier(
if (!identifier.empty() && arrayFilters.find(identifier) == arrayFilters.end()) {
return Status(ErrorCodes::BadValue,
str::stream() << "No array filter found for identifier '" << identifier
- << "' in path '"
- << fieldRef.dottedField()
- << "'");
+ << "' in path '" << fieldRef.dottedField() << "'");
}
if (!identifier.empty()) {
@@ -189,7 +186,7 @@ void applyChild(const UpdateNode& child,
BSONObj makeBSONForOperator(const std::vector<std::pair<std::string, BSONObj>>& updatesForOp) {
BSONObjBuilder bob;
- for (const auto & [ path, value ] : updatesForOp)
+ for (const auto& [path, value] : updatesForOp)
bob << path << value.firstElement();
return bob.obj();
}
@@ -227,8 +224,8 @@ StatusWith<bool> UpdateObjectNode::parseAndMerge(
// be a string value.
if (BSONType::String != modExpr.type()) {
return Status(ErrorCodes::BadValue,
- str::stream() << "The 'to' field for $rename must be a string: "
- << modExpr);
+ str::stream()
+ << "The 'to' field for $rename must be a string: " << modExpr);
}
fieldRef.parse(modExpr.valueStringData());
@@ -249,8 +246,7 @@ StatusWith<bool> UpdateObjectNode::parseAndMerge(
if (positional && positionalCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << fieldRef.dottedField()
- << "'");
+ << fieldRef.dottedField() << "'");
}
if (positional && positionalIndex == 0) {
@@ -258,8 +254,7 @@ StatusWith<bool> UpdateObjectNode::parseAndMerge(
ErrorCodes::BadValue,
str::stream()
<< "Cannot have positional (i.e. '$') element in the first position in path '"
- << fieldRef.dottedField()
- << "'");
+ << fieldRef.dottedField() << "'");
}
// Construct and initialize the leaf node.
@@ -297,8 +292,7 @@ StatusWith<bool> UpdateObjectNode::parseAndMerge(
return Status(ErrorCodes::ConflictingUpdateOperators,
str::stream() << "Updating the path '" << fieldRef.dottedField()
<< "' would create a conflict at '"
- << fieldRef.dottedSubstring(0, i + 1)
- << "'");
+ << fieldRef.dottedSubstring(0, i + 1) << "'");
}
} else {
std::unique_ptr<UpdateInternalNode> ownedChild;
@@ -334,10 +328,9 @@ StatusWith<bool> UpdateObjectNode::parseAndMerge(
if (current->getChild(childName)) {
return Status(ErrorCodes::ConflictingUpdateOperators,
- str::stream() << "Updating the path '" << fieldRef.dottedField()
- << "' would create a conflict at '"
- << fieldRef.dottedField()
- << "'");
+ str::stream()
+ << "Updating the path '" << fieldRef.dottedField()
+ << "' would create a conflict at '" << fieldRef.dottedField() << "'");
}
current->setChild(std::move(childName), std::move(leaf));
@@ -388,12 +381,12 @@ BSONObj UpdateObjectNode::serialize() const {
BSONObjBuilder bob;
- for (const auto & [ pathPrefix, child ] : _children) {
+ for (const auto& [pathPrefix, child] : _children) {
auto path = FieldRef(pathPrefix);
child->produceSerializationMap(&path, &operatorOrientedUpdates);
}
- for (const auto & [ op, updates ] : operatorOrientedUpdates)
+ for (const auto& [op, updates] : operatorOrientedUpdates)
bob << op << makeBSONForOperator(updates);
return bob.obj();
diff --git a/src/mongo/db/update/update_object_node.h b/src/mongo/db/update/update_object_node.h
index 5cbae91f1a5..cbb462da152 100644
--- a/src/mongo/db/update/update_object_node.h
+++ b/src/mongo/db/update/update_object_node.h
@@ -111,7 +111,7 @@ public:
FieldRef* currentPath,
std::map<std::string, std::vector<std::pair<std::string, BSONObj>>>*
operatorOrientedUpdates) const final {
- for (const auto & [ pathSuffix, child ] : _children) {
+ for (const auto& [pathSuffix, child] : _children) {
FieldRef::FieldRefTempAppend tempAppend(*currentPath, pathSuffix);
child->produceSerializationMap(currentPath, operatorOrientedUpdates);
}
diff --git a/src/mongo/db/update/update_serialization_test.cpp b/src/mongo/db/update/update_serialization_test.cpp
index 046efec9825..89ae2ac03c4 100644
--- a/src/mongo/db/update/update_serialization_test.cpp
+++ b/src/mongo/db/update/update_serialization_test.cpp
@@ -248,4 +248,4 @@ TEST(UpdateSerialization, CompoundStatementsSerialize) {
}
} // namespace
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/db/update_index_data.cpp b/src/mongo/db/update_index_data.cpp
index 539fcc27b67..8aad16e5552 100644
--- a/src/mongo/db/update_index_data.cpp
+++ b/src/mongo/db/update_index_data.cpp
@@ -112,4 +112,4 @@ FieldRef UpdateIndexData::getCanonicalIndexField(const FieldRef& path) {
return buf;
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/update_index_data.h b/src/mongo/db/update_index_data.h
index aee2c968742..9477eab10e1 100644
--- a/src/mongo/db/update_index_data.h
+++ b/src/mongo/db/update_index_data.h
@@ -83,4 +83,4 @@ private:
bool _allPathsIndexed;
};
-}
+} // namespace mongo
diff --git a/src/mongo/db/update_index_data_test.cpp b/src/mongo/db/update_index_data_test.cpp
index c55f0235d09..ae230e70f30 100644
--- a/src/mongo/db/update_index_data_test.cpp
+++ b/src/mongo/db/update_index_data_test.cpp
@@ -129,4 +129,4 @@ TEST(UpdateIndexDataTest, CanonicalIndexFieldForNestedNumericFieldNames) {
ASSERT_EQ(UpdateIndexData::getCanonicalIndexField(FieldRef("a.0.b.1.2")), FieldRef("a.b"_sd));
ASSERT_EQ(UpdateIndexData::getCanonicalIndexField(FieldRef("a.01.02.b.c")), FieldRef("a"_sd));
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/views/durable_view_catalog.cpp b/src/mongo/db/views/durable_view_catalog.cpp
index 193f7a6f432..3d969308c9a 100644
--- a/src/mongo/db/views/durable_view_catalog.cpp
+++ b/src/mongo/db/views/durable_view_catalog.cpp
@@ -170,9 +170,7 @@ BSONObj DurableViewCatalogImpl::_validateViewDefinition(OperationContext* opCtx,
uassert(ErrorCodes::InvalidViewDefinition,
str::stream() << "found invalid view definition " << viewDefinition["_id"]
- << " while reading '"
- << _db->getSystemViewsName()
- << "'",
+ << " while reading '" << _db->getSystemViewsName() << "'",
valid);
return viewDefinition;
diff --git a/src/mongo/db/views/resolved_view_test.cpp b/src/mongo/db/views/resolved_view_test.cpp
index b15ccab582d..a4b5111419a 100644
--- a/src/mongo/db/views/resolved_view_test.cpp
+++ b/src/mongo/db/views/resolved_view_test.cpp
@@ -57,9 +57,8 @@ TEST(ResolvedViewTest, ExpandingAggRequestWithEmptyPipelineOnNoOpViewYieldsEmpty
AggregationRequest requestOnView{viewNss, emptyPipeline};
auto result = resolvedView.asExpandedViewAggregation(requestOnView);
- BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor"
- << kDefaultCursorOptionDocument);
+ BSONObj expected = BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray()
+ << "cursor" << kDefaultCursorOptionDocument);
ASSERT_BSONOBJ_EQ(result.serializeToCommandObj().toBson(), expected);
}
@@ -72,8 +71,7 @@ TEST(ResolvedViewTest, ExpandingAggRequestWithNonemptyPipelineAppendsToViewPipel
BSONObj expected = BSON("aggregate" << backingNss.coll() << "pipeline"
<< BSON_ARRAY(BSON("skip" << 7) << BSON("limit" << 3))
- << "cursor"
- << kDefaultCursorOptionDocument);
+ << "cursor" << kDefaultCursorOptionDocument);
ASSERT_BSONOBJ_EQ(result.serializeToCommandObj().toBson(), expected);
}
@@ -216,9 +214,8 @@ TEST(ResolvedViewTest, FromBSONFailsOnInvalidPipelineType) {
}
TEST(ResolvedViewTest, FromBSONFailsOnInvalidCollationType) {
- BSONObj badCmdResponse =
- BSON("resolvedView" << BSON(
- "ns" << backingNss.ns() << "pipeline" << BSONArray() << "collation" << 1));
+ BSONObj badCmdResponse = BSON("resolvedView" << BSON("ns" << backingNss.ns() << "pipeline"
+ << BSONArray() << "collation" << 1));
ASSERT_THROWS_CODE(ResolvedView::fromBSON(badCmdResponse), AssertionException, 40639);
}
@@ -234,10 +231,10 @@ TEST(ResolvedViewTest, FromBSONSuccessfullyParsesEmptyBSONArrayIntoEmptyVector)
}
TEST(ResolvedViewTest, FromBSONSuccessfullyParsesCollation) {
- BSONObj cmdResponse = BSON(
- "resolvedView" << BSON("ns" << backingNss.ns() << "pipeline" << BSONArray() << "collation"
- << BSON("locale"
- << "fil")));
+ BSONObj cmdResponse = BSON("resolvedView" << BSON("ns" << backingNss.ns() << "pipeline"
+ << BSONArray() << "collation"
+ << BSON("locale"
+ << "fil")));
const ResolvedView result = ResolvedView::fromBSON(cmdResponse);
ASSERT_EQ(result.getNamespace(), backingNss);
ASSERT(std::equal(emptyPipeline.begin(),
@@ -257,8 +254,7 @@ TEST(ResolvedViewTest, FromBSONSuccessfullyParsesPopulatedBSONArrayIntoVector) {
BSONArray pipeline = BSON_ARRAY(matchStage << sortStage << limitStage);
BSONObj cmdResponse = BSON("resolvedView" << BSON("ns"
<< "testdb.testcoll"
- << "pipeline"
- << pipeline));
+ << "pipeline" << pipeline));
const ResolvedView result = ResolvedView::fromBSON(cmdResponse);
ASSERT_EQ(result.getNamespace(), backingNss);
@@ -274,8 +270,7 @@ TEST(ResolvedViewTest, IsResolvedViewErrorResponseDetectsKickbackErrorCodeSucces
BSONObj errorResponse =
BSON("ok" << 0 << "code" << ErrorCodes::CommandOnShardedViewNotSupportedOnMongod << "errmsg"
<< "This view is sharded and cannot be run on mongod"
- << "resolvedView"
- << BSON("ns" << backingNss.ns() << "pipeline" << BSONArray()));
+ << "resolvedView" << BSON("ns" << backingNss.ns() << "pipeline" << BSONArray()));
auto status = getStatusFromCommandResult(errorResponse);
ASSERT_EQ(status, ErrorCodes::CommandOnShardedViewNotSupportedOnMongod);
ASSERT(status.extraInfo<ResolvedView>());
diff --git a/src/mongo/db/views/view_catalog.cpp b/src/mongo/db/views/view_catalog.cpp
index 237a9495cf2..6019a012b1a 100644
--- a/src/mongo/db/views/view_catalog.cpp
+++ b/src/mongo/db/views/view_catalog.cpp
@@ -115,8 +115,7 @@ Status ViewCatalog::_reload(WithLock,
return Status(ErrorCodes::InvalidViewDefinition,
str::stream() << "View 'pipeline' entries must be objects, but "
<< viewName.toString()
- << " has a pipeline element of type "
- << stage.type());
+ << " has a pipeline element of type " << stage.type());
}
}
diff --git a/src/mongo/db/views/view_catalog_test.cpp b/src/mongo/db/views/view_catalog_test.cpp
index 78412ada08d..94d3d011c22 100644
--- a/src/mongo/db/views/view_catalog_test.cpp
+++ b/src/mongo/db/views/view_catalog_test.cpp
@@ -257,8 +257,7 @@ TEST_F(ViewCatalogFixture, CanCreateViewWithLookupUsingPipelineSyntax) {
<< "fcoll"
<< "as"
<< "as"
- << "pipeline"
- << BSONArray()))),
+ << "pipeline" << BSONArray()))),
emptyCollation));
}
diff --git a/src/mongo/db/views/view_graph.cpp b/src/mongo/db/views/view_graph.cpp
index 7ecc1544e31..def5d50154d 100644
--- a/src/mongo/db/views/view_graph.cpp
+++ b/src/mongo/db/views/view_graph.cpp
@@ -110,8 +110,7 @@ Status ViewGraph::insertAndValidate(const ViewDefinition& view,
return {ErrorCodes::ViewPipelineMaxSizeExceeded,
str::stream() << "Operation would result in a resolved view pipeline that exceeds "
"the maximum size of "
- << kMaxViewPipelineSizeBytes
- << " bytes"};
+ << kMaxViewPipelineSizeBytes << " bytes"};
}
guard.dismiss();
@@ -217,8 +216,7 @@ Status ViewGraph::_validateParents(uint64_t currentId, int currentDepth, StatsMa
if (size > kMaxViewPipelineSizeBytes) {
return {ErrorCodes::ViewPipelineMaxSizeExceeded,
str::stream() << "View pipeline is too large and exceeds the maximum size of "
- << ViewGraph::kMaxViewPipelineSizeBytes
- << " bytes"};
+ << ViewGraph::kMaxViewPipelineSizeBytes << " bytes"};
}
return Status::OK();
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index a2cc613282d..ca87ea7d50c 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -51,9 +51,9 @@
namespace mongo {
-using std::string;
using repl::OpTime;
using repl::OpTimeAndWallTime;
+using std::string;
static TimerStats gleWtimeStats;
static ServerStatusMetricField<TimerStats> displayGleLatency("getLastError.wtime", &gleWtimeStats);
diff --git a/src/mongo/dbtests/basictests.cpp b/src/mongo/dbtests/basictests.cpp
index c6a53840094..c3160ef54bd 100644
--- a/src/mongo/dbtests/basictests.cpp
+++ b/src/mongo/dbtests/basictests.cpp
@@ -42,14 +42,14 @@
namespace BasicTests {
-using std::unique_ptr;
-using std::shared_ptr;
using std::cout;
using std::dec;
using std::endl;
using std::hex;
+using std::shared_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
using std::vector;
class RarelyTest {
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp
index 1c71b2b8e84..bdb97bed3df 100644
--- a/src/mongo/dbtests/clienttests.cpp
+++ b/src/mongo/dbtests/clienttests.cpp
@@ -40,8 +40,8 @@
namespace ClientTests {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
class Base {
@@ -407,4 +407,4 @@ public:
};
SuiteInstance<All> all;
-}
+} // namespace ClientTests
diff --git a/src/mongo/dbtests/commandtests.cpp b/src/mongo/dbtests/commandtests.cpp
index a5c7ce5fbf5..a97da36e73c 100644
--- a/src/mongo/dbtests/commandtests.cpp
+++ b/src/mongo/dbtests/commandtests.cpp
@@ -159,7 +159,7 @@ struct Type2 : Base {
ASSERT_EQUALS(string("5eb63bbbe01eeed093cb22bb8f5acdc3"), result["md5"].valuestr());
}
};
-}
+} // namespace FileMD5
namespace SymbolArgument {
// SERVER-16260
@@ -293,12 +293,10 @@ public:
cmd.append("indexes",
BSON_ARRAY(BSON("key" << BSON("loc"
<< "geoHaystack"
- << "z"
- << 1.0)
+ << "z" << 1.0)
<< "name"
<< "loc_geoHaystack_z_1"
- << "bucketSize"
- << static_cast<double>(0.7))));
+ << "bucketSize" << static_cast<double>(0.7))));
BSONObj result;
ASSERT(db.runCommand(nsDb(), cmd.obj(), result));
@@ -377,4 +375,4 @@ public:
};
SuiteInstance<All> all;
-}
+} // namespace CommandTests
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 36a59a07d90..9f820418793 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -58,12 +58,9 @@ public:
_collection = _database->createCollection(&_opCtx, nss());
IndexCatalog* indexCatalog = _collection->getIndexCatalog();
- auto indexSpec =
- BSON("v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion) << "ns" << ns()
- << "key"
- << BSON("a" << 1)
- << "name"
- << "a_1");
+ auto indexSpec = BSON("v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
+ << "ns" << ns() << "key" << BSON("a" << 1) << "name"
+ << "a_1");
uassertStatusOK(indexCatalog->createIndexOnEmptyCollection(&_opCtx, indexSpec));
wunit.commit();
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 1a3f889eb8e..427faa513de 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -45,8 +45,8 @@ namespace mongo {
namespace {
-using std::unique_ptr;
using std::set;
+using std::unique_ptr;
/**
* Unit tests related to DBHelpers
diff --git a/src/mongo/dbtests/deferred_writer.cpp b/src/mongo/dbtests/deferred_writer.cpp
index 576cd28f6b5..4da43c43449 100644
--- a/src/mongo/dbtests/deferred_writer.cpp
+++ b/src/mongo/dbtests/deferred_writer.cpp
@@ -33,7 +33,6 @@
#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/client.h"
-#include "mongo/db/client.h"
#include "mongo/db/concurrency/deferred_writer.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
@@ -57,7 +56,7 @@ struct BSONObjCompare {
return SimpleBSONObjComparator::kInstance.compare(lhs, rhs) < 0;
}
};
-}
+} // namespace
static const NamespaceString kTestNamespace("unittests", "deferred_writer_tests");
@@ -384,4 +383,4 @@ public:
add<DeferredWriterTestAsync>();
}
} deferredWriterTests;
-}
+} // namespace deferred_writer_tests
diff --git a/src/mongo/dbtests/directclienttests.cpp b/src/mongo/dbtests/directclienttests.cpp
index a87f38463cf..6ca684003d9 100644
--- a/src/mongo/dbtests/directclienttests.cpp
+++ b/src/mongo/dbtests/directclienttests.cpp
@@ -77,10 +77,7 @@ public:
BSONObj info;
BSONObj cmd = BSON("captrunc"
<< "b"
- << "n"
- << 1
- << "inc"
- << true);
+ << "n" << 1 << "inc" << true);
// cout << cmd.toString() << endl;
bool ok = client.runCommand("a", cmd, info);
// cout << info.toString() << endl;
diff --git a/src/mongo/dbtests/framework.h b/src/mongo/dbtests/framework.h
index a7a0f57090d..8ed12ba9faf 100644
--- a/src/mongo/dbtests/framework.h
+++ b/src/mongo/dbtests/framework.h
@@ -37,5 +37,5 @@
namespace mongo {
namespace dbtests {
int runDbTests(int argc, char** argv);
-} // dbtests
+} // namespace dbtests
} // namespace mongo
diff --git a/src/mongo/dbtests/framework_options.cpp b/src/mongo/dbtests/framework_options.cpp
index ea4f54b65d8..e24c9dd9898 100644
--- a/src/mongo/dbtests/framework_options.cpp
+++ b/src/mongo/dbtests/framework_options.cpp
@@ -138,4 +138,4 @@ Status storeTestFrameworkOptions(const moe::Environment& params,
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/framework_options.h b/src/mongo/dbtests/framework_options.h
index 602bef0b35c..b79b4eca905 100644
--- a/src/mongo/dbtests/framework_options.h
+++ b/src/mongo/dbtests/framework_options.h
@@ -68,4 +68,4 @@ bool handlePreValidationTestFrameworkOptions(const moe::Environment& params,
Status storeTestFrameworkOptions(const moe::Environment& params,
const std::vector<std::string>& args);
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/framework_options_init.cpp b/src/mongo/dbtests/framework_options_init.cpp
index 5fb68b995c0..9ecf7993499 100644
--- a/src/mongo/dbtests/framework_options_init.cpp
+++ b/src/mongo/dbtests/framework_options_init.cpp
@@ -67,4 +67,4 @@ MONGO_INITIALIZER_GENERAL(CoreOptions_Store, MONGO_NO_PREREQUISITES, MONGO_NO_DE
(InitializerContext* context) {
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp
index 512a3e4b087..9ac94209601 100644
--- a/src/mongo/dbtests/indexcatalogtests.cpp
+++ b/src/mongo/dbtests/indexcatalogtests.cpp
@@ -147,9 +147,7 @@ public:
&opCtx,
_nss.ns(),
BSON("name" << indexName << "ns" << _nss.ns() << "key" << BSON("x" << 1) << "v"
- << static_cast<int>(kIndexVersion)
- << "expireAfterSeconds"
- << 5)));
+ << static_cast<int>(kIndexVersion) << "expireAfterSeconds" << 5)));
const IndexDescriptor* desc = _catalog->findIndexByName(&opCtx, indexName);
ASSERT(desc);
@@ -194,4 +192,4 @@ public:
};
SuiteInstance<IndexCatalogTests> indexCatalogTests;
-}
+} // namespace IndexCatalogTests
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 39a32505a57..b4e93a32e83 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -134,16 +134,9 @@ public:
const BSONObj spec = BSON("name"
<< "a"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "unique"
- << true
- << "background"
- << background);
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v"
+ << static_cast<int>(kIndexVersion) << "unique" << true
+ << "background" << background);
ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, coll); });
@@ -188,16 +181,9 @@ public:
const BSONObj spec = BSON("name"
<< "a"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "unique"
- << true
- << "background"
- << background);
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v"
+ << static_cast<int>(kIndexVersion) << "unique" << true
+ << "background" << background);
ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, coll); });
@@ -241,8 +227,7 @@ public:
getGlobalServiceContext()->setKillAllOperations();
BSONObj indexInfo = BSON("key" << BSON("a" << 1) << "ns" << _ns << "name"
<< "a_1"
- << "v"
- << static_cast<int>(kIndexVersion));
+ << "v" << static_cast<int>(kIndexVersion));
// The call is interrupted because mayInterrupt == true.
ASSERT_TRUE(buildIndexInterrupted(indexInfo));
// only want to interrupt the index build
@@ -285,8 +270,7 @@ public:
getGlobalServiceContext()->setKillAllOperations();
BSONObj indexInfo = BSON("key" << BSON("_id" << 1) << "ns" << _ns << "name"
<< "_id_"
- << "v"
- << static_cast<int>(kIndexVersion));
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_TRUE(buildIndexInterrupted(indexInfo));
// only want to interrupt the index build
getGlobalServiceContext()->unsetKillAllOperations();
@@ -328,11 +312,7 @@ public:
ASSERT_OK(createIndex("unittest",
BSON("name"
<< "x"
- << "ns"
- << _ns
- << "key"
- << BSON("x" << 1 << "y" << 1)
- << "v"
+ << "ns" << _ns << "key" << BSON("x" << 1 << "y" << 1) << "v"
<< static_cast<int>(kIndexVersion))));
}
};
@@ -345,13 +325,8 @@ public:
createIndex("unittest",
BSON("name"
<< "x"
- << "ns"
- << _ns
- << "unique"
- << true
- << "key"
- << BSON("x" << 1 << "y" << 1)
- << "v"
+ << "ns" << _ns << "unique" << true << "key"
+ << BSON("x" << 1 << "y" << 1) << "v"
<< static_cast<int>(kIndexVersion))));
}
};
@@ -362,11 +337,7 @@ public:
ASSERT_OK(createIndex("unittest",
BSON("name"
<< "x"
- << "ns"
- << _ns
- << "key"
- << BSON("x" << 1 << "y" << 1)
- << "v"
+ << "ns" << _ns << "key" << BSON("x" << 1 << "y" << 1) << "v"
<< static_cast<int>(kIndexVersion))));
}
};
@@ -379,11 +350,7 @@ public:
createIndex("unittest",
BSON("name"
<< "x"
- << "ns"
- << _ns
- << "key"
- << BSON("y" << 1 << "x" << 1)
- << "v"
+ << "ns" << _ns << "key" << BSON("y" << 1 << "x" << 1) << "v"
<< static_cast<int>(kIndexVersion))));
}
};
@@ -397,19 +364,11 @@ public:
ASSERT_OK(createIndex("unittests",
BSON("name"
<< "super"
- << "ns"
- << _ns
- << "unique"
- << 1
- << "sparse"
- << true
- << "expireAfterSeconds"
- << 3600
- << "key"
+ << "ns" << _ns << "unique" << 1 << "sparse" << true
+ << "expireAfterSeconds" << 3600 << "key"
<< BSON("superIdx"
<< "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
@@ -423,19 +382,11 @@ public:
createIndex("unittests",
BSON("name"
<< "super2"
- << "ns"
- << _ns
- << "expireAfterSeconds"
- << 3600
- << "sparse"
- << true
- << "unique"
- << 1
- << "key"
+ << "ns" << _ns << "expireAfterSeconds" << 3600 << "sparse"
+ << true << "unique" << 1 << "key"
<< BSON("superIdx"
<< "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
@@ -447,19 +398,11 @@ public:
ASSERT_OK(createIndex("unittests",
BSON("name"
<< "super"
- << "ns"
- << _ns
- << "expireAfterSeconds"
- << 3600
- << "sparse"
- << true
- << "unique"
- << 1
- << "key"
+ << "ns" << _ns << "expireAfterSeconds" << 3600 << "sparse"
+ << true << "unique" << 1 << "key"
<< BSON("superIdx"
<< "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
@@ -473,44 +416,27 @@ public:
createIndex("unittest",
BSON("name"
<< "super2"
- << "ns"
- << _ns
- << "unique"
- << false
- << "sparse"
- << true
- << "expireAfterSeconds"
- << 3600
- << "key"
+ << "ns" << _ns << "unique" << false << "sparse" << true
+ << "expireAfterSeconds" << 3600 << "key"
<< BSON("superIdx"
<< "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
class SameSpecDifferentSparse : public ComplexIndex {
public:
void run() {
- ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
- createIndex("unittest",
- BSON("name"
- << "super2"
- << "ns"
- << _ns
- << "unique"
- << 1
- << "sparse"
- << false
- << "background"
- << true
- << "expireAfterSeconds"
- << 3600
- << "key"
- << BSON("superIdx"
- << "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ ASSERT_EQUALS(
+ ErrorCodes::IndexOptionsConflict,
+ createIndex("unittest",
+ BSON("name"
+ << "super2"
+ << "ns" << _ns << "unique" << 1 << "sparse" << false << "background"
+ << true << "expireAfterSeconds" << 3600 << "key"
+ << BSON("superIdx"
+ << "2d")
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
@@ -521,19 +447,11 @@ public:
createIndex("unittest",
BSON("name"
<< "super2"
- << "ns"
- << _ns
- << "unique"
- << 1
- << "sparse"
- << true
- << "expireAfterSeconds"
- << 2400
- << "key"
+ << "ns" << _ns << "unique" << 1 << "sparse" << true
+ << "expireAfterSeconds" << 2400 << "key"
<< BSON("superIdx"
<< "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
@@ -580,14 +498,8 @@ protected:
BSONObj _createSpec(T storageEngineValue) {
return BSON("name"
<< "super2"
- << "ns"
- << _ns
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "storageEngine"
- << storageEngineValue);
+ << "ns" << _ns << "key" << BSON("a" << 1) << "v"
+ << static_cast<int>(kIndexVersion) << "storageEngine" << storageEngineValue);
}
};
diff --git a/src/mongo/dbtests/jsobjtests.cpp b/src/mongo/dbtests/jsobjtests.cpp
index 65b4b142ac1..cc7731bc122 100644
--- a/src/mongo/dbtests/jsobjtests.cpp
+++ b/src/mongo/dbtests/jsobjtests.cpp
@@ -1163,18 +1163,13 @@ class LabelShares : public LabelBase {
BSONObj expected() {
return BSON("z"
<< "q"
- << "a"
- << (BSON("$gt" << 1))
- << "x"
+ << "a" << (BSON("$gt" << 1)) << "x"
<< "p");
}
BSONObj actual() {
return BSON("z"
<< "q"
- << "a"
- << GT
- << 1
- << "x"
+ << "a" << GT << 1 << "x"
<< "p");
}
};
@@ -1202,11 +1197,7 @@ class LabelDoubleShares : public LabelBase {
BSONObj actual() {
return BSON("z"
<< "q"
- << "a"
- << GT
- << 1
- << LTE
- << "x"
+ << "a" << GT << 1 << LTE << "x"
<< "x"
<< "p");
}
@@ -1231,27 +1222,15 @@ class LabelMulti : public LabelBase {
<< "b"
<< BSON("$ne" << 1 << "$ne"
<< "f"
- << "$ne"
- << 22.3)
+ << "$ne" << 22.3)
<< "x"
<< "p");
}
BSONObj actual() {
return BSON("z"
<< "q"
- << "a"
- << GT
- << 1
- << LTE
- << "x"
- << "b"
- << NE
- << 1
- << NE
- << "f"
- << NE
- << 22.3
- << "x"
+ << "a" << GT << 1 << LTE << "x"
+ << "b" << NE << 1 << NE << "f" << NE << 22.3 << "x"
<< "p");
}
};
@@ -1261,8 +1240,7 @@ class LabelishOr : public LabelBase {
<< "x"))
<< BSON("b" << BSON("$ne" << 1 << "$ne"
<< "f"
- << "$ne"
- << 22.3))
+ << "$ne" << 22.3))
<< BSON("x"
<< "p")));
}
@@ -1627,8 +1605,9 @@ struct BSONArrayBuilderTest {
objb << objb.numStr(i++) << string("World");
arrb << string("World");
- objb << objb.numStr(i++) << BSON("a" << 1 << "b"
- << "foo");
+ objb << objb.numStr(i++)
+ << BSON("a" << 1 << "b"
+ << "foo");
arrb << BSON("a" << 1 << "b"
<< "foo");
@@ -1682,14 +1661,13 @@ struct BSONArrayBuilderTest {
struct ArrayMacroTest {
void run() {
- BSONArray arr = BSON_ARRAY("hello" << 1 << BSON("foo" << BSON_ARRAY("bar"
- << "baz"
- << "qux")));
+ BSONArray arr = BSON_ARRAY("hello" << 1
+ << BSON("foo" << BSON_ARRAY("bar"
+ << "baz"
+ << "qux")));
BSONObj obj = BSON("0"
<< "hello"
- << "1"
- << 1
- << "2"
+ << "1" << 1 << "2"
<< BSON("foo" << BSON_ARRAY("bar"
<< "baz"
<< "qux")));
@@ -1796,38 +1774,26 @@ public:
// DBRef stuff -- json parser can't handle this yet
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1)));
+ << "$id" << 1)));
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1
- << "$db"
+ << "$id" << 1 << "$db"
<< "a")));
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1
- << "stuff"
- << 1)));
+ << "$id" << 1 << "stuff" << 1)));
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1
- << "$db"
+ << "$id" << 1 << "$db"
<< "a"
- << "stuff"
- << 1)));
+ << "stuff" << 1)));
bad(BSON("a" << BSON("$ref" << 1 << "$id" << 1)));
bad(BSON("a" << BSON("$ref" << 1 << "$id" << 1 << "$db"
<< "a")));
bad(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1
- << "$db"
- << 1)));
+ << "$id" << 1 << "$db" << 1)));
bad(BSON("a" << BSON("$ref"
<< "coll")));
bad(BSON("a" << BSON("$ref"
@@ -1839,10 +1805,7 @@ public:
<< "coll")));
bad(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1
- << "$hater"
- << 1)));
+ << "$id" << 1 << "$hater" << 1)));
}
};
diff --git a/src/mongo/dbtests/jsontests.cpp b/src/mongo/dbtests/jsontests.cpp
index 69953b19469..6efd8bae42d 100644
--- a/src/mongo/dbtests/jsontests.cpp
+++ b/src/mongo/dbtests/jsontests.cpp
@@ -2479,8 +2479,7 @@ public:
virtual BSONObj bson() const {
return BSON("int" << 123 << "long" << 9223372036854775807ll // 2**63 - 1
- << "double"
- << 3.14);
+ << "double" << 3.14);
}
virtual string json() const {
return "{ \"int\": 123, \"long\": 9223372036854775807, \"double\": 3.14 }";
@@ -2503,8 +2502,7 @@ public:
virtual BSONObj bson() const {
return BSON("int" << 123 << "long" << 9223372036854775807ll // 2**63 - 1
- << "double"
- << 3.14);
+ << "double" << 3.14);
}
virtual string json() const {
return "{ 'int': NumberInt(123), "
@@ -2604,8 +2602,7 @@ public:
virtual BSONObj bson() const {
return BSON("int" << -123 << "long" << -9223372036854775807ll // -1 * (2**63 - 1)
- << "double"
- << -3.14);
+ << "double" << -3.14);
}
virtual string json() const {
return "{ \"int\": -123, \"long\": -9223372036854775807, \"double\": -3.14 }";
diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp
index 7f06eea59f1..6fe21eaf469 100644
--- a/src/mongo/dbtests/jstests.cpp
+++ b/src/mongo/dbtests/jstests.cpp
@@ -411,8 +411,7 @@ public:
<< "eliot"
<< "z"
<< "sara"
- << "zz"
- << BSONObj());
+ << "zz" << BSONObj());
s->setObject("blah", o, true);
BSONObj out;
@@ -1238,7 +1237,22 @@ class NovelNaN {
public:
void run() {
uint8_t bits[] = {
- 16, 0, 0, 0, 0x01, 'a', '\0', 0x61, 0x79, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
+ 16,
+ 0,
+ 0,
+ 0,
+ 0x01,
+ 'a',
+ '\0',
+ 0x61,
+ 0x79,
+ 0xfe,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0,
};
unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
index adcb5ca3fb1..5f7255a9417 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
@@ -182,4 +182,4 @@ void MockDBClientConnection::checkConnection() {
_remoteServerInstanceID = _remoteServer->getInstanceID();
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.h b/src/mongo/dbtests/mock/mock_dbclient_connection.h
index afed04c393d..24aca01cf10 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.h
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.h
@@ -132,4 +132,4 @@ private:
uint64_t _sockCreationTime;
bool _autoReconnect;
};
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.cpp b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
index 5b6a884ba8d..f8d271fe61c 100644
--- a/src/mongo/dbtests/mock/mock_remote_db_server.cpp
+++ b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
@@ -239,4 +239,4 @@ void MockRemoteDBServer::checkIfUp(InstanceID id) const {
throwSocketError(mongo::SocketErrorKind::CLOSED, _hostAndPort);
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/mock/mock_replica_set.cpp b/src/mongo/dbtests/mock/mock_replica_set.cpp
index f6158af9abb..d286d39450e 100644
--- a/src/mongo/dbtests/mock/mock_replica_set.cpp
+++ b/src/mongo/dbtests/mock/mock_replica_set.cpp
@@ -350,4 +350,4 @@ void MockReplicaSet::mockReplSetGetStatusCmd() {
node->setCommandReply("replSetGetStatus", fullStatBuilder.done());
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/mock/mock_replica_set.h b/src/mongo/dbtests/mock/mock_replica_set.h
index a2f442d8beb..01929b0e203 100644
--- a/src/mongo/dbtests/mock/mock_replica_set.h
+++ b/src/mongo/dbtests/mock/mock_replica_set.h
@@ -150,4 +150,4 @@ private:
std::string _primaryHost;
};
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/mock_dbclient_conn_test.cpp b/src/mongo/dbtests/mock_dbclient_conn_test.cpp
index 555f982002b..236b80d45d4 100644
--- a/src/mongo/dbtests/mock_dbclient_conn_test.cpp
+++ b/src/mongo/dbtests/mock_dbclient_conn_test.cpp
@@ -414,16 +414,10 @@ TEST(MockDBClientConnTest, CyclingCmd) {
vector<BSONObj> isMasterSequence;
isMasterSequence.push_back(BSON("set"
<< "a"
- << "isMaster"
- << true
- << "ok"
- << 1));
+ << "isMaster" << true << "ok" << 1));
isMasterSequence.push_back(BSON("set"
<< "a"
- << "isMaster"
- << false
- << "ok"
- << 1));
+ << "isMaster" << false << "ok" << 1));
server.setCommandReply("isMaster", isMasterSequence);
}
@@ -630,4 +624,4 @@ TEST(MockDBClientConnTest, Delay) {
ASSERT_EQUALS(1U, server.getQueryCount());
ASSERT_EQUALS(1U, server.getCmdCount());
}
-}
+} // namespace mongo_test
diff --git a/src/mongo/dbtests/mock_replica_set_test.cpp b/src/mongo/dbtests/mock_replica_set_test.cpp
index 4ff02ce5bbe..bb43a53a3ce 100644
--- a/src/mongo/dbtests/mock_replica_set_test.cpp
+++ b/src/mongo/dbtests/mock_replica_set_test.cpp
@@ -424,4 +424,4 @@ TEST(MockReplicaSetTest, KillMultipleNode) {
const string priHostName(replSet.getPrimary());
ASSERT(replSet.getNode(priHostName)->isRunning());
}
-}
+} // namespace mongo_test
diff --git a/src/mongo/dbtests/multikey_paths_test.cpp b/src/mongo/dbtests/multikey_paths_test.cpp
index d12ce069a54..8f0759f69b1 100644
--- a/src/mongo/dbtests/multikey_paths_test.cpp
+++ b/src/mongo/dbtests/multikey_paths_test.cpp
@@ -96,8 +96,7 @@ public:
const bool match = (expectedMultikeyPaths == actualMultikeyPaths);
if (!match) {
FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expectedMultikeyPaths)
- << ", Actual: "
- << dumpMultikeyPaths(actualMultikeyPaths));
+ << ", Actual: " << dumpMultikeyPaths(actualMultikeyPaths));
}
ASSERT_TRUE(match);
}
@@ -143,11 +142,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreation) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPattern
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPattern << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -177,11 +172,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreationWithMultipleDocuments) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPattern
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPattern << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -197,11 +188,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentInsert) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPattern
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPattern << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -239,11 +226,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentUpdate) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPattern
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPattern << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -292,11 +275,7 @@ TEST_F(MultikeyPathsTest, PathsNotUpdatedOnDocumentDelete) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPattern
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPattern << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -337,11 +316,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedForMultipleIndexesOnDocumentInsert) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPatternAB
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPatternAB << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -349,11 +324,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedForMultipleIndexesOnDocumentInsert) {
createIndex(collection,
BSON("name"
<< "a_1_c_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPatternAC
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPatternAC << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
{
diff --git a/src/mongo/dbtests/plan_executor_invalidation_test.cpp b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
index 5918628d4fc..f6a81de3a69 100644
--- a/src/mongo/dbtests/plan_executor_invalidation_test.cpp
+++ b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
@@ -354,8 +354,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanDiesOnCollectionRenameWithinDatabas
ASSERT_TRUE(_client.runCommand("admin",
BSON("renameCollection" << nss.ns() << "to"
<< "unittests.new_collection_name"
- << "dropTarget"
- << true),
+ << "dropTarget" << true),
info));
ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled);
@@ -381,8 +380,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanDiesOnCollectionRenameWithinDatabase)
ASSERT_TRUE(_client.runCommand("admin",
BSON("renameCollection" << nss.ns() << "to"
<< "unittests.new_collection_name"
- << "dropTarget"
- << true),
+ << "dropTarget" << true),
info));
ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled);
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index af13d03fb61..38180e1e643 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -632,4 +632,4 @@ public:
SuiteInstance<All> planRankingAll;
-} // namespace PlanRankingTest
+} // namespace PlanRankingTests
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index 6b617565423..37ac3d379cb 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -64,7 +64,7 @@ std::unique_ptr<CanonicalQuery> canonicalQueryFromFilterObj(OperationContext* op
uassertStatusOK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
-}
+} // namespace
class QueryStageCachedPlan : public unittest::Test {
public:
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 0312bb411da..f1da4aec8ab 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -408,4 +408,4 @@ public:
};
SuiteInstance<All> all;
-}
+} // namespace QueryStageCollectionScan
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 80770a69e70..5aa7a9d7618 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -73,10 +73,7 @@ public:
->createIndexOnEmptyCollection(&_opCtx,
BSON("key" << BSON("x" << 1) << "name"
<< "x_1"
- << "ns"
- << ns()
- << "v"
- << 1))
+ << "ns" << ns() << "v" << 1))
.status_with_transitional_ignore();
for (int i = 0; i < kDocuments; i++) {
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index d5112419175..c070d2f1b1c 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -60,8 +60,7 @@ public:
ASSERT_OK(_coll->getIndexCatalog()->createIndexOnEmptyCollection(
&_opCtx,
BSON("ns" << ns() << "key" << BSON("x" << 1) << "name"
- << DBClientBase::genIndexName(BSON("x" << 1))
- << "v"
+ << DBClientBase::genIndexName(BSON("x" << 1)) << "v"
<< static_cast<int>(kIndexVersion))));
wunit.commit();
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 05678ce2c64..7b3d0bbea12 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -883,4 +883,4 @@ public:
SuiteInstance<All> queryStageMergeSortTest;
-} // namespace
+} // namespace QueryStageMergeSortTests
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index e4296c2c8de..92ad952cce1 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -236,4 +236,4 @@ TEST_F(QueryStageNearTest, EmptyResults) {
ASSERT_EQUALS(results.size(), 3u);
assertAscendingAndValid(results);
}
-}
+} // namespace
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 1c0c9c4de5f..3543e1740fa 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -596,4 +596,4 @@ public:
SuiteInstance<All> queryStageSortTest;
-} // namespace
+} // namespace QueryStageSortTests
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index e04a5c6b3db..d050082bd8b 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -120,8 +120,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanGeo2dOr) {
dbtests::WriteContextForTests ctx(opCtx(), nss.ns());
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1));
+ << "b" << 1));
addIndex(BSON("a"
<< "2d"));
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index ae30d3ef2d2..62253f22b7e 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -241,4 +241,4 @@ public:
SuiteInstance<All> queryStageTestsAll;
-} // namespace
+} // namespace QueryStageTests
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index c981c80a888..848f47ff68f 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -56,9 +56,9 @@
namespace {
namespace QueryTests {
-using std::unique_ptr;
using std::endl;
using std::string;
+using std::unique_ptr;
using std::vector;
class Base {
@@ -233,8 +233,7 @@ public:
bool ok = cl.runCommand("unittests",
BSON("godinsert"
<< "querytests"
- << "obj"
- << BSONObj()),
+ << "obj" << BSONObj()),
info);
ASSERT(ok);
@@ -646,12 +645,7 @@ public:
_client.runCommand("unittests",
BSON("create"
<< "querytests.TailableQueryOnId"
- << "capped"
- << true
- << "size"
- << 8192
- << "autoIndexId"
- << true),
+ << "capped" << true << "size" << 8192 << "autoIndexId" << true),
info);
insertA(ns, 0);
insertA(ns, 1);
@@ -1504,11 +1498,7 @@ public:
ASSERT(_client.runCommand("local",
BSON("create"
<< "querytests.findingstart"
- << "capped"
- << true
- << "size"
- << 4096
- << "autoIndexId"
+ << "capped" << true << "size" << 4096 << "autoIndexId"
<< false),
info));
@@ -1571,11 +1561,7 @@ public:
ASSERT(_client.runCommand("local",
BSON("create"
<< "querytests.findingstart"
- << "capped"
- << true
- << "size"
- << 4096
- << "autoIndexId"
+ << "capped" << true << "size" << 4096 << "autoIndexId"
<< false),
info));
@@ -1643,11 +1629,7 @@ public:
ASSERT(_client.runCommand("local",
BSON("create"
<< "querytests.findingstart"
- << "capped"
- << true
- << "size"
- << 4096
- << "autoIndexId"
+ << "capped" << true << "size" << 4096 << "autoIndexId"
<< false),
info));
@@ -1741,10 +1723,7 @@ public:
ASSERT(_client.runCommand("unittests",
BSON("create"
<< "querytests.exhaust"
- << "capped"
- << true
- << "size"
- << 8192),
+ << "capped" << true << "size" << 8192),
info));
_client.insert(ns(), BSON("ts" << Timestamp(1000, 0)));
Message message;
diff --git a/src/mongo/dbtests/replica_set_monitor_test.cpp b/src/mongo/dbtests/replica_set_monitor_test.cpp
index 4477d727839..81472049c19 100644
--- a/src/mongo/dbtests/replica_set_monitor_test.cpp
+++ b/src/mongo/dbtests/replica_set_monitor_test.cpp
@@ -45,10 +45,10 @@ namespace mongo {
namespace {
using std::map;
-using std::vector;
using std::set;
using std::string;
using std::unique_ptr;
+using std::vector;
using unittest::assertGet;
MONGO_INITIALIZER(DisableReplicaSetMonitorRefreshRetries)(InitializerContext*) {
@@ -216,22 +216,24 @@ protected:
const string host(_replSet->getPrimary());
const mongo::repl::MemberConfig* member =
oldConfig.findMemberByHostAndPort(HostAndPort(host));
- membersBuilder.append(BSON(
- "_id" << member->getId().getData() << "host" << host << "tags" << BSON("dc"
- << "ny"
- << "num"
- << "1")));
+ membersBuilder.append(BSON("_id" << member->getId().getData() << "host" << host
+ << "tags"
+ << BSON("dc"
+ << "ny"
+ << "num"
+ << "1")));
}
{
const string host(_replSet->getSecondaries().front());
const mongo::repl::MemberConfig* member =
oldConfig.findMemberByHostAndPort(HostAndPort(host));
- membersBuilder.append(BSON(
- "_id" << member->getId().getData() << "host" << host << "tags" << BSON("dc"
- << "ny"
- << "num"
- << "2")));
+ membersBuilder.append(BSON("_id" << member->getId().getData() << "host" << host
+ << "tags"
+ << BSON("dc"
+ << "ny"
+ << "num"
+ << "2")));
}
membersBuilder.done();
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 825d075b63f..0c57660a473 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -55,10 +55,10 @@ using namespace mongo::repl;
namespace ReplTests {
-using std::unique_ptr;
using std::endl;
using std::string;
using std::stringstream;
+using std::unique_ptr;
using std::vector;
/**
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index 71824e31bb1..1f1a8399fc3 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -42,9 +42,9 @@
#include "mongo/dbtests/dbtests.h"
#include "mongo/unittest/unittest.h"
-using std::unique_ptr;
using std::list;
using std::string;
+using std::unique_ptr;
namespace RollbackTests {
diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp
index 4561a93860b..4c7495f2ca9 100644
--- a/src/mongo/dbtests/storage_timestamp_tests.cpp
+++ b/src/mongo/dbtests/storage_timestamp_tests.cpp
@@ -124,7 +124,7 @@ public:
private:
OperationContext* _opCtx;
};
-}
+} // namespace
const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
@@ -258,12 +258,12 @@ public:
BSONObj indexInfoObj;
{
- auto swIndexInfoObj = indexer.init(
- _opCtx,
- coll,
- {BSON("v" << 2 << "name" << indexName << "ns" << coll->ns().ns() << "key"
- << indexKey)},
- MultiIndexBlock::makeTimestampedIndexOnInitFn(_opCtx, coll));
+ auto swIndexInfoObj =
+ indexer.init(_opCtx,
+ coll,
+ {BSON("v" << 2 << "name" << indexName << "ns" << coll->ns().ns()
+ << "key" << indexKey)},
+ MultiIndexBlock::makeTimestampedIndexOnInitFn(_opCtx, coll));
ASSERT_OK(swIndexInfoObj.getStatus());
indexInfoObj = std::move(swIndexInfoObj.getValue()[0]);
}
@@ -388,11 +388,11 @@ public:
const BSONObj& expectedDoc) {
OneOffRead oor(_opCtx, ts);
if (expectedDoc.isEmpty()) {
- ASSERT_EQ(0, itCount(coll)) << "Should not find any documents in " << coll->ns()
- << " at ts: " << ts;
+ ASSERT_EQ(0, itCount(coll))
+ << "Should not find any documents in " << coll->ns() << " at ts: " << ts;
} else {
- ASSERT_EQ(1, itCount(coll)) << "Should find one document in " << coll->ns()
- << " at ts: " << ts;
+ ASSERT_EQ(1, itCount(coll))
+ << "Should find one document in " << coll->ns() << " at ts: " << ts;
auto doc = findOne(coll);
ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(doc, expectedDoc))
<< "Doc: " << doc.toString() << " Expected: " << expectedDoc.toString();
@@ -669,8 +669,7 @@ public:
const bool match = (expectedMultikeyPaths == actualMultikeyPaths);
if (!match) {
FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expectedMultikeyPaths)
- << ", Actual: "
- << dumpMultikeyPaths(actualMultikeyPaths));
+ << ", Actual: " << dumpMultikeyPaths(actualMultikeyPaths));
}
ASSERT_TRUE(match);
}
@@ -705,23 +704,17 @@ public:
nss.db().toString(),
BSON("applyOps" << BSON_ARRAY(
BSON("ts" << firstInsertTime.addTicks(idx).asTimestamp() << "t" << 1LL
- << "v"
- << 2
- << "op"
+ << "v" << 2 << "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid().get()
- << "o"
+ << "ns" << nss.ns() << "ui"
+ << autoColl.getCollection()->uuid().get() << "o"
<< BSON("_id" << idx))
<< BSON("ts" << firstInsertTime.addTicks(idx).asTimestamp() << "t" << 1LL
<< "op"
<< "c"
<< "ns"
<< "test.$cmd"
- << "o"
- << BSON("applyOps" << BSONArrayBuilder().obj())))),
+ << "o" << BSON("applyOps" << BSONArrayBuilder().obj())))),
repl::OplogApplication::Mode::kApplyOpsCmd,
&result));
}
@@ -823,20 +816,14 @@ public:
// Delete all documents one at a time.
const LogicalTime startDeleteTime = _clock->reserveTicks(docsToInsert);
for (std::int32_t num = 0; num < docsToInsert; ++num) {
- ASSERT_OK(
- doNonAtomicApplyOps(
- nss.db().toString(),
- {BSON("ts" << startDeleteTime.addTicks(num).asTimestamp() << "t" << 0LL << "v"
- << 2
- << "op"
- << "d"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid().get()
- << "o"
- << BSON("_id" << num))})
- .getStatus());
+ ASSERT_OK(doNonAtomicApplyOps(nss.db().toString(),
+ {BSON("ts" << startDeleteTime.addTicks(num).asTimestamp()
+ << "t" << 0LL << "v" << 2 << "op"
+ << "d"
+ << "ns" << nss.ns() << "ui"
+ << autoColl.getCollection()->uuid().get()
+ << "o" << BSON("_id" << num))})
+ .getStatus());
}
for (std::int32_t num = 0; num <= docsToInsert; ++num) {
@@ -892,17 +879,10 @@ public:
doNonAtomicApplyOps(
nss.db().toString(),
{BSON("ts" << firstUpdateTime.addTicks(idx).asTimestamp() << "t" << 0LL << "v"
- << 2
- << "op"
+ << 2 << "op"
<< "u"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid().get()
- << "o2"
- << BSON("_id" << 0)
- << "o"
- << updates[idx].first)})
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid().get()
+ << "o2" << BSON("_id" << 0) << "o" << updates[idx].first)})
.getStatus());
}
@@ -940,19 +920,11 @@ public:
nss.db().toString(),
{BSON("ts" << insertTime.asTimestamp() << "t" << 1LL << "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid().get()
- << "o"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid().get() << "o"
<< BSON("_id" << 0 << "field" << 0)),
BSON("ts" << insertTime.addTicks(1).asTimestamp() << "t" << 1LL << "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid().get()
- << "o"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid().get() << "o"
<< BSON("_id" << 0))}));
ASSERT_EQ(2, result.getIntField("applied"));
@@ -994,19 +966,13 @@ public:
auto swResult = doAtomicApplyOps(nss.db().toString(),
{BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid().get()
- << "o"
+ << "ns" << nss.ns() << "ui"
+ << autoColl.getCollection()->uuid().get() << "o"
<< BSON("_id" << 0)),
BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid().get()
- << "o"
+ << "ns" << nss.ns() << "ui"
+ << autoColl.getCollection()->uuid().get() << "o"
<< BSON("_id" << 1))});
ASSERT_OK(swResult);
@@ -1053,19 +1019,13 @@ public:
auto swResult = doAtomicApplyOps(nss.db().toString(),
{BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid().get()
- << "o"
+ << "ns" << nss.ns() << "ui"
+ << autoColl.getCollection()->uuid().get() << "o"
<< BSON("_id" << 0 << "field" << 0)),
BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid().get()
- << "o"
+ << "ns" << nss.ns() << "ui"
+ << autoColl.getCollection()->uuid().get() << "o"
<< BSON("_id" << 0))});
ASSERT_OK(swResult);
@@ -1104,17 +1064,14 @@ public:
{ ASSERT_FALSE(AutoGetCollectionForReadCommand(_opCtx, nss).getCollection()); }
BSONObjBuilder resultBuilder;
- auto swResult = doNonAtomicApplyOps(nss.db().toString(),
- {
- BSON("ts" << presentTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << UUID::gen()
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
- << BSON("create" << nss.coll())),
- });
+ auto swResult = doNonAtomicApplyOps(
+ nss.db().toString(),
+ {
+ BSON("ts" << presentTs << "t" << 1LL << "op"
+ << "c"
+ << "ui" << UUID::gen() << "ns" << nss.getCommandNS().ns() << "o"
+ << BSON("create" << nss.coll())),
+ });
ASSERT_OK(swResult);
{ ASSERT(AutoGetCollectionForReadCommand(_opCtx, nss).getCollection()); }
@@ -1145,25 +1102,18 @@ public:
const Timestamp dummyTs = dummyLt.asTimestamp();
BSONObjBuilder resultBuilder;
- auto swResult = doNonAtomicApplyOps(dbName,
- {
- BSON("ts" << presentTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << UUID::gen()
- << "ns"
- << nss1.getCommandNS().ns()
- << "o"
- << BSON("create" << nss1.coll())),
- BSON("ts" << futureTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << UUID::gen()
- << "ns"
- << nss2.getCommandNS().ns()
- << "o"
- << BSON("create" << nss2.coll())),
- });
+ auto swResult = doNonAtomicApplyOps(
+ dbName,
+ {
+ BSON("ts" << presentTs << "t" << 1LL << "op"
+ << "c"
+ << "ui" << UUID::gen() << "ns" << nss1.getCommandNS().ns() << "o"
+ << BSON("create" << nss1.coll())),
+ BSON("ts" << futureTs << "t" << 1LL << "op"
+ << "c"
+ << "ui" << UUID::gen() << "ns" << nss2.getCommandNS().ns() << "o"
+ << BSON("create" << nss2.coll())),
+ });
ASSERT_OK(swResult);
{ ASSERT(AutoGetCollectionForReadCommand(_opCtx, nss1).getCollection()); }
@@ -1211,34 +1161,21 @@ public:
{ ASSERT_FALSE(AutoGetCollectionForReadCommand(_opCtx, nss2).getCollection()); }
BSONObjBuilder resultBuilder;
- auto swResult =
- doNonAtomicApplyOps(dbName,
- {
- BSON("ts" << presentTs << "t" << 1LL << "op"
- << "i"
- << "ns"
- << nss1.ns()
- << "ui"
- << autoColl.getCollection()->uuid().get()
- << "o"
- << doc1),
- BSON("ts" << futureTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << uuid2
- << "ns"
- << nss2.getCommandNS().ns()
- << "o"
- << BSON("create" << nss2.coll())),
- BSON("ts" << insert2Ts << "t" << 1LL << "op"
- << "i"
- << "ns"
- << nss2.ns()
- << "ui"
- << uuid2
- << "o"
- << doc2),
- });
+ auto swResult = doNonAtomicApplyOps(
+ dbName,
+ {
+ BSON("ts" << presentTs << "t" << 1LL << "op"
+ << "i"
+ << "ns" << nss1.ns() << "ui" << autoColl.getCollection()->uuid().get()
+ << "o" << doc1),
+ BSON("ts" << futureTs << "t" << 1LL << "op"
+ << "c"
+ << "ui" << uuid2 << "ns" << nss2.getCommandNS().ns() << "o"
+ << BSON("create" << nss2.coll())),
+ BSON("ts" << insert2Ts << "t" << 1LL << "op"
+ << "i"
+ << "ns" << nss2.ns() << "ui" << uuid2 << "o" << doc2),
+ });
ASSERT_OK(swResult);
}
@@ -1283,17 +1220,14 @@ public:
{ ASSERT_FALSE(AutoGetCollectionForReadCommand(_opCtx, nss).getCollection()); }
BSONObjBuilder resultBuilder;
- auto swResult = doNonAtomicApplyOps(nss.db().toString(),
- {
- BSON("ts" << presentTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << UUID::gen()
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
- << BSON("create" << nss.coll())),
- });
+ auto swResult = doNonAtomicApplyOps(
+ nss.db().toString(),
+ {
+ BSON("ts" << presentTs << "t" << 1LL << "op"
+ << "c"
+ << "ui" << UUID::gen() << "ns" << nss.getCommandNS().ns() << "o"
+ << BSON("create" << nss.coll())),
+ });
ASSERT_OK(swResult);
{ ASSERT(AutoGetCollectionForReadCommand(_opCtx, nss).getCollection()); }
@@ -1331,9 +1265,8 @@ public:
uuid = autoColl.getCollection()->uuid().get();
}
auto indexName = "a_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_OK(dbtests::createIndexFromSpec(_opCtx, nss.ns(), indexSpec));
_coordinatorMock->alwaysAllowWrites(false);
@@ -1349,30 +1282,15 @@ public:
auto op0 = repl::OplogEntry(BSON("ts" << insertTime0.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc0));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc0));
auto op1 = repl::OplogEntry(BSON("ts" << insertTime1.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc1));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc1));
auto op2 = repl::OplogEntry(BSON("ts" << insertTime2.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc2));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc2));
std::vector<repl::OplogEntry> ops = {op0, op1, op2};
DoNothingOplogApplierObserver observer;
@@ -1417,9 +1335,8 @@ public:
uuid = autoColl.getCollection()->uuid().get();
}
auto indexName = "a_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_OK(dbtests::createIndexFromSpec(_opCtx, nss.ns(), indexSpec));
_coordinatorMock->alwaysAllowWrites(false);
@@ -1437,45 +1354,23 @@ public:
auto op0 = repl::OplogEntry(BSON("ts" << insertTime0.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc0));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc0));
auto op1 = repl::OplogEntry(BSON("ts" << insertTime1.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc1));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc1));
auto op2 = repl::OplogEntry(BSON("ts" << insertTime2.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc2));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc2));
auto indexSpec2 = BSON("createIndexes" << nss.coll() << "ns" << nss.ns() << "v"
- << static_cast<int>(kIndexVersion)
- << "key"
- << BSON("b" << 1)
- << "name"
+ << static_cast<int>(kIndexVersion) << "key"
+ << BSON("b" << 1) << "name"
<< "b_1");
auto createIndexOp = repl::OplogEntry(
BSON("ts" << indexBuildTime.asTimestamp() << "t" << 1LL << "v" << 2 << "op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "ui"
- << uuid
- << "o"
- << indexSpec2));
+ << "ns" << nss.getCommandNS().ns() << "ui" << uuid << "o" << indexSpec2));
// We add in an index creation op to test that we restart tracking multikey path info
// after bulk index builds.
@@ -1536,9 +1431,8 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X, LockMode::MODE_IX);
auto indexName = "a_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_OK(dbtests::createIndexFromSpec(_opCtx, nss.ns(), indexSpec));
const LogicalTime pastTime = _clock->reserveTicks(1);
@@ -1566,9 +1460,8 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X, LockMode::MODE_IX);
auto indexName = "a_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_OK(dbtests::createIndexFromSpec(_opCtx, nss.ns(), indexSpec));
const LogicalTime pastTime = _clock->reserveTicks(1);
@@ -1599,9 +1492,8 @@ public:
reset(nss);
auto indexName = "a_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
auto doc = BSON("_id" << 1 << "a" << BSON_ARRAY(1 << 2));
{
@@ -1991,10 +1883,7 @@ public:
autoColl.getCollection(),
{BSON("v" << 2 << "unique" << true << "name"
<< "a_1"
- << "ns"
- << nss.ns()
- << "key"
- << BSON("a" << 1))},
+ << "ns" << nss.ns() << "key" << BSON("a" << 1))},
MultiIndexBlock::makeTimestampedIndexOnInitFn(_opCtx, autoColl.getCollection()));
ASSERT_OK(swIndexInfoObj.getStatus());
indexInfoObj = std::move(swIndexInfoObj.getValue()[0]);
@@ -2101,10 +1990,7 @@ public:
autoColl.getCollection(),
{BSON("v" << 2 << "unique" << true << "name"
<< "a_1"
- << "ns"
- << nss.ns()
- << "key"
- << BSON("a" << 1))},
+ << "ns" << nss.ns() << "key" << BSON("a" << 1))},
MultiIndexBlock::makeTimestampedIndexOnInitFn(_opCtx, autoColl.getCollection()));
ASSERT_OK(swIndexInfoObj.getStatus());
indexInfoObj = std::move(swIndexInfoObj.getValue()[0]);
@@ -2257,8 +2143,7 @@ public:
const Timestamp indexAComplete = queryOplog(BSON("op"
<< "c"
- << "o.createIndexes"
- << nss.coll()
+ << "o.createIndexes" << nss.coll()
<< "o.name"
<< "a_1"))["ts"]
.timestamp();
@@ -2348,9 +2233,9 @@ public:
BSON("renameCollection" << nss.ns() << "to" << renamedNss.ns() << "dropTarget" << true),
renameResult);
- const auto createIndexesDocument = queryOplog(BSON("ns" << renamedNss.db() + ".$cmd"
- << "o.createIndexes"
- << BSON("$exists" << true)));
+ const auto createIndexesDocument =
+ queryOplog(BSON("ns" << renamedNss.db() + ".$cmd"
+ << "o.createIndexes" << BSON("$exists" << true)));
// Find index creation timestamps.
const auto createIndexesString =
@@ -2363,15 +2248,13 @@ public:
const Timestamp indexCreateInitTs = queryOplog(BSON("op"
<< "c"
- << "o.create"
- << tmpName.coll()))["ts"]
+ << "o.create" << tmpName.coll()))["ts"]
.timestamp();
const Timestamp indexAComplete = createIndexesDocument["ts"].timestamp();
const Timestamp indexBComplete = queryOplog(BSON("op"
<< "c"
- << "o.createIndexes"
- << tmpName.coll()
+ << "o.createIndexes" << tmpName.coll()
<< "o.name"
<< "b_1"))["ts"]
.timestamp();
@@ -2550,14 +2433,10 @@ public:
// Make a simple insert operation.
BSONObj doc0 = BSON("_id" << 0 << "a" << 0);
- auto insertOp = repl::OplogEntry(BSON("ts" << futureTs << "t" << 1LL << "v" << 2 << "op"
- << "i"
- << "ns"
- << ns.ns()
- << "ui"
- << uuid
- << "o"
- << doc0));
+ auto insertOp =
+ repl::OplogEntry(BSON("ts" << futureTs << "t" << 1LL << "v" << 2 << "op"
+ << "i"
+ << "ns" << ns.ns() << "ui" << uuid << "o" << doc0));
// Apply the operation.
auto storageInterface = repl::StorageInterface::get(_opCtx);
@@ -2635,20 +2514,14 @@ public:
}
auto indexSpec = BSON("createIndexes" << nss.coll() << "ns" << nss.ns() << "v"
- << static_cast<int>(kIndexVersion)
- << "key"
- << BSON("field" << 1)
- << "name"
+ << static_cast<int>(kIndexVersion) << "key"
+ << BSON("field" << 1) << "name"
<< "field_1");
auto createIndexOp = BSON("ts" << startBuildTs << "t" << 1LL << "v" << 2 << "op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "ui"
- << collUUID
- << "o"
- << indexSpec);
+ << "ns" << nss.getCommandNS().ns() << "ui" << collUUID
+ << "o" << indexSpec);
ASSERT_OK(doAtomicApplyOps(nss.db().toString(), {createIndexOp}));
@@ -2683,21 +2556,17 @@ public:
ASSERT_OK(createCollection(_opCtx,
viewNss.db().toString(),
BSON("create" << viewNss.coll() << "pipeline" << BSONArray()
- << "viewOn"
- << backingCollNss.coll())));
+ << "viewOn" << backingCollNss.coll())));
const Timestamp systemViewsCreateTs = queryOplog(BSON("op"
<< "c"
- << "ns"
- << (viewNss.db() + ".$cmd")
+ << "ns" << (viewNss.db() + ".$cmd")
<< "o.create"
<< "system.views"))["ts"]
.timestamp();
const Timestamp viewCreateTs = queryOplog(BSON("op"
<< "i"
- << "ns"
- << systemViewsNss.ns()
- << "o._id"
+ << "ns" << systemViewsNss.ns() << "o._id"
<< viewNss.ns()))["ts"]
.timestamp();
@@ -2714,11 +2583,11 @@ public:
AutoGetCollection autoColl(_opCtx, systemViewsNss, LockMode::MODE_IS);
assertDocumentAtTimestamp(autoColl.getCollection(), systemViewsCreateTs, BSONObj());
- assertDocumentAtTimestamp(
- autoColl.getCollection(),
- viewCreateTs,
- BSON("_id" << viewNss.ns() << "viewOn" << backingCollNss.coll() << "pipeline"
- << BSONArray()));
+ assertDocumentAtTimestamp(autoColl.getCollection(),
+ viewCreateTs,
+ BSON("_id" << viewNss.ns() << "viewOn"
+ << backingCollNss.coll() << "pipeline"
+ << BSONArray()));
}
}
};
@@ -2745,9 +2614,7 @@ public:
BSONObj result = queryOplog(BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o.create"
+ << "ns" << nss.getCommandNS().ns() << "o.create"
<< nss.coll()));
repl::OplogEntry op(result);
// The logOp() call for createCollection should have timestamp 'futureTs', which will also
@@ -2763,9 +2630,7 @@ public:
result = queryOplog(BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o.createIndexes"
+ << "ns" << nss.getCommandNS().ns() << "o.createIndexes"
<< nss.coll()));
repl::OplogEntry indexOp(result);
ASSERT_EQ(indexOp.getObject()["name"].str(), "user_1_db_1");
@@ -2969,18 +2834,13 @@ public:
assertFilteredDocumentAtTimestamp(coll, query2, nullTs, doc2);
// Implicit commit oplog entry should exist at commitEntryTs.
- const auto commitFilter =
- BSON("ts" << commitEntryTs << "o"
- << BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << coll->uuid().get()
- << "o"
- << doc2))
- << "count"
- << 2));
+ const auto commitFilter = BSON(
+ "ts" << commitEntryTs << "o"
+ << BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui"
+ << coll->uuid().get() << "o" << doc2))
+ << "count" << 2));
assertOplogDocumentExistsAtTimestamp(commitFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(commitFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(commitFilter, firstOplogEntryTs, false);
@@ -2996,18 +2856,13 @@ public:
assertOldestActiveTxnTimestampEquals(boost::none, nullTs);
// first oplog entry should exist at firstOplogEntryTs and after it.
- const auto firstOplogEntryFilter =
- BSON("ts" << firstOplogEntryTs << "o"
- << BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << coll->uuid().get()
- << "o"
- << doc))
- << "partialTxn"
- << true));
+ const auto firstOplogEntryFilter = BSON(
+ "ts" << firstOplogEntryTs << "o"
+ << BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui"
+ << coll->uuid().get() << "o" << doc))
+ << "partialTxn" << true));
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, firstOplogEntryTs, true);
@@ -3175,18 +3030,13 @@ public:
assertOplogDocumentExistsAtTimestamp(commitFilter, nullTs, true);
// The first oplog entry should exist at firstOplogEntryTs and onwards.
- const auto firstOplogEntryFilter =
- BSON("ts" << firstOplogEntryTs << "o"
- << BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << coll->uuid().get()
- << "o"
- << doc))
- << "partialTxn"
- << true));
+ const auto firstOplogEntryFilter = BSON(
+ "ts" << firstOplogEntryTs << "o"
+ << BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui"
+ << coll->uuid().get() << "o" << doc))
+ << "partialTxn" << true));
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, firstOplogEntryTs, true);
@@ -3194,20 +3044,13 @@ public:
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, commitEntryTs, true);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, nullTs, true);
// The prepare oplog entry should exist at prepareEntryTs and onwards.
- const auto prepareOplogEntryFilter =
- BSON("ts" << prepareEntryTs << "o"
- << BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << coll->uuid().get()
- << "o"
- << doc2))
- << "prepare"
- << true
- << "count"
- << 2));
+ const auto prepareOplogEntryFilter = BSON(
+ "ts" << prepareEntryTs << "o"
+ << BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui"
+ << coll->uuid().get() << "o" << doc2))
+ << "prepare" << true << "count" << 2));
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, firstOplogEntryTs, false);
@@ -3320,17 +3163,13 @@ public:
}
// The prepare oplog entry should exist at firstOplogEntryTs and onwards.
- const auto prepareOplogEntryFilter =
- BSON("ts" << prepareEntryTs << "o" << BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << ui
- << "o"
- << doc))
- << "prepare"
- << true));
+ const auto prepareOplogEntryFilter = BSON(
+ "ts" << prepareEntryTs << "o"
+ << BSON("applyOps"
+ << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui" << ui << "o" << doc))
+ << "prepare" << true));
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, prepareEntryTs, true);
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 5cef4ec9af2..b419c09e036 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -48,10 +48,10 @@
namespace ThreadedTests {
-using std::unique_ptr;
using std::cout;
using std::endl;
using std::string;
+using std::unique_ptr;
template <int nthreads_param = 10>
class ThreadedTest {
diff --git a/src/mongo/dbtests/updatetests.cpp b/src/mongo/dbtests/updatetests.cpp
index 106960fb6c2..62bed088466 100644
--- a/src/mongo/dbtests/updatetests.cpp
+++ b/src/mongo/dbtests/updatetests.cpp
@@ -47,10 +47,10 @@
namespace UpdateTests {
-using std::unique_ptr;
using std::numeric_limits;
using std::string;
using std::stringstream;
+using std::unique_ptr;
using std::vector;
namespace dps = ::mongo::dotted_path_support;
@@ -1665,8 +1665,8 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0,x:[{a:1},{a:3}]}"));
// { $push : { x : { $each : [ {a:2} ], $sort: {a:1}, $slice:-2 } } }
- BSONObj pushObj = BSON(
- "$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1) << "$slice" << -2.0);
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1)
+ << "$slice" << -2.0);
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj expected = fromjson("{'_id':0,x:[{a:2},{a:3}]}");
BSONObj result = _client.findOne(ns(), Query());
@@ -1680,9 +1680,8 @@ public:
BSONObj expected = fromjson("{'_id':0,x:[{a:1},{a:3}]}");
_client.insert(ns(), expected);
// { $push : { x : { $each : [ {a:2} ], $sort : {a:1}, $sort: {a:1} } } }
- BSONObj pushObj =
- BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1) << "$sort"
- << BSON("a" << 1));
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1)
+ << "$sort" << BSON("a" << 1));
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj result = _client.findOne(ns(), Query());
ASSERT_BSONOBJ_EQ(result, expected);
@@ -1763,9 +1762,7 @@ public:
ns(), BSON("_id" << 0 << "a" << 1 << "x" << BSONObj() << "x" << BSONObj() << "z" << 5));
_client.update(ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1)));
ASSERT_BSONOBJ_EQ(BSON("_id" << 0 << "a" << 1 << "x" << BSON("b" << 1 << "c" << 1) << "x"
- << BSONObj()
- << "z"
- << 5),
+ << BSONObj() << "z" << 5),
_client.findOne(ns(), BSONObj()));
}
};
@@ -1779,9 +1776,7 @@ public:
_client.update(
ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1 << "x.d" << 1)));
ASSERT_BSONOBJ_EQ(BSON("_id" << 0 << "x" << BSON("b" << 1 << "c" << 1 << "d" << 1) << "x"
- << BSONObj()
- << "x"
- << BSONObj()),
+ << BSONObj() << "x" << BSONObj()),
_client.findOne(ns(), BSONObj()));
}
};
diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp
index 9c04d360214..b0605d2af15 100644
--- a/src/mongo/dbtests/validate_tests.cpp
+++ b/src/mongo/dbtests/validate_tests.cpp
@@ -228,18 +228,14 @@ public:
wunit.commit();
}
- auto status = dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "a"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ auto status =
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "a"
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -304,18 +300,14 @@ public:
wunit.commit();
}
- auto status = dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "a"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ auto status =
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "a"
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -466,14 +458,10 @@ public:
coll->ns().ns(),
BSON("name"
<< "multikey_index"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a.b" << 1)
- << "v"
+ << "ns" << coll->ns().ns() << "key"
+ << BSON("a.b" << 1) << "v"
<< static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -540,20 +528,14 @@ public:
}
// Create a sparse index.
- auto status = dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "sparse_index"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false
- << "sparse"
- << true));
+ auto status =
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "sparse_index"
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false << "sparse" << true));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -613,20 +595,15 @@ public:
}
// Create a partial index.
- auto status = dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "partial_index"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false
- << "partialFilterExpression"
- << BSON("a" << BSON("$gt" << 1))));
+ auto status =
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "partial_index"
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false << "partialFilterExpression"
+ << BSON("a" << BSON("$gt" << 1))));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -679,38 +656,30 @@ public:
}
// Create a partial geo index that indexes the document. This should return an error.
- ASSERT_NOT_OK(dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "partial_index"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("x"
- << "2dsphere")
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false
- << "partialFilterExpression"
- << BSON("a" << BSON("$eq" << 2)))));
+ ASSERT_NOT_OK(
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "partial_index"
+ << "ns" << coll->ns().ns() << "key"
+ << BSON("x"
+ << "2dsphere")
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false << "partialFilterExpression"
+ << BSON("a" << BSON("$eq" << 2)))));
// Create a partial geo index that does not index the document.
- auto status = dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "partial_index"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("x"
- << "2dsphere")
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false
- << "partialFilterExpression"
- << BSON("a" << BSON("$eq" << 1))));
+ auto status =
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "partial_index"
+ << "ns" << coll->ns().ns() << "key"
+ << BSON("x"
+ << "2dsphere")
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false << "partialFilterExpression"
+ << BSON("a" << BSON("$eq" << 1))));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
releaseDb();
@@ -765,28 +734,20 @@ public:
coll->ns().ns(),
BSON("name"
<< "compound_index_1"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1 << "b" << -1)
- << "v"
+ << "ns" << coll->ns().ns() << "key"
+ << BSON("a" << 1 << "b" << -1) << "v"
<< static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << "background" << false));
ASSERT_OK(status);
status = dbtests::createIndexFromSpec(&_opCtx,
coll->ns().ns(),
BSON("name"
<< "compound_index_2"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << -1 << "b" << 1)
- << "v"
+ << "ns" << coll->ns().ns() << "key"
+ << BSON("a" << -1 << "b" << 1) << "v"
<< static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -845,9 +806,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -929,9 +888,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -978,9 +935,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << indexKey << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
// Insert non-multikey documents.
@@ -1089,9 +1044,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << indexKey << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
// Insert documents with indexed and not-indexed paths.
@@ -1182,9 +1135,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << indexKey << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
// Insert documents.
@@ -1267,9 +1218,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << indexKey << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
// Insert documents.
@@ -1371,9 +1320,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << indexKey << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
// Insert documents.
diff --git a/src/mongo/embedded/embedded.cpp b/src/mongo/embedded/embedded.cpp
index a405cbc2185..6a58f70004a 100644
--- a/src/mongo/embedded/embedded.cpp
+++ b/src/mongo/embedded/embedded.cpp
@@ -110,9 +110,7 @@ void setUpCatalog(ServiceContext* serviceContext) {
// Create a minimalistic replication coordinator to provide a limited interface for users. Not
// functional to provide any replication logic.
ServiceContext::ConstructorActionRegisterer replicationManagerInitializer(
- "CreateReplicationManager",
- {"SSLManager", "default"},
- [](ServiceContext* serviceContext) {
+ "CreateReplicationManager", {"SSLManager", "default"}, [](ServiceContext* serviceContext) {
repl::StorageInterface::set(serviceContext, std::make_unique<repl::StorageInterfaceImpl>());
auto logicalClock = stdx::make_unique<LogicalClock>(serviceContext);
diff --git a/src/mongo/embedded/embedded_ismaster.cpp b/src/mongo/embedded/embedded_ismaster.cpp
index 20b9b4d8c83..e42c4292dac 100644
--- a/src/mongo/embedded/embedded_ismaster.cpp
+++ b/src/mongo/embedded/embedded_ismaster.cpp
@@ -110,5 +110,5 @@ public:
}
} CmdIsMaster;
-} // namespace repl
+} // namespace
} // namespace mongo
diff --git a/src/mongo/embedded/embedded_options_helpers.cpp b/src/mongo/embedded/embedded_options_helpers.cpp
index 7924db33f18..6815584a9d0 100644
--- a/src/mongo/embedded/embedded_options_helpers.cpp
+++ b/src/mongo/embedded/embedded_options_helpers.cpp
@@ -60,4 +60,4 @@ Status parseCommandLineOptions(int argc,
}
} // namespace embedded_integration_helpers
-} // namepsace mongo
+} // namespace mongo
diff --git a/src/mongo/embedded/stitch_support/stitch_support_test.cpp b/src/mongo/embedded/stitch_support/stitch_support_test.cpp
index e73ef6627e8..e5e552260b5 100644
--- a/src/mongo/embedded/stitch_support/stitch_support_test.cpp
+++ b/src/mongo/embedded/stitch_support/stitch_support_test.cpp
@@ -402,7 +402,7 @@ TEST_F(StitchSupportTest, CheckMatchWorksWithCollation) {
}
TEST_F(StitchSupportTest, CheckProjectionWorksWithDefaults) {
- auto[results, needsMatch] =
+ auto [results, needsMatch] =
checkProjection("{a: 1}", {"{_id: 1, a: 100, b: 200}", "{_id: 1, a: 200, b: 300}"});
ASSERT_FALSE(needsMatch);
ASSERT_EQ("{ \"_id\" : 1, \"a\" : 100 }", results[0]);
@@ -443,7 +443,7 @@ TEST_F(StitchSupportTest, CheckProjectionCollatesRespectfully) {
lib, toBSONForAPI("{locale: 'en', strength: 2}").first, nullptr);
ON_BLOCK_EXIT([collator] { stitch_support_v1_collator_destroy(collator); });
- auto[results, needsMatch] =
+ auto [results, needsMatch] =
checkProjection("{a: {$elemMatch: {$eq: 'MiXedcAse'}}}",
{"{_id: 1, a: ['lowercase', 'mixEdCaSe', 'UPPERCASE']}"},
nullptr,
diff --git a/src/mongo/executor/connection_pool.cpp b/src/mongo/executor/connection_pool.cpp
index 2381f2bceec..c2705ae9e1d 100644
--- a/src/mongo/executor/connection_pool.cpp
+++ b/src/mongo/executor/connection_pool.cpp
@@ -71,7 +71,7 @@ void emplaceOrInvariant(Map&& map, Args&&... args) noexcept {
invariant(ret.second, "Element already existed in map/set");
}
-} // anonymous
+} // namespace
namespace executor {
@@ -161,7 +161,8 @@ public:
const auto& data = getOrInvariant(_poolData, id);
return {
- getPool()->_options.maxConnecting, data.target,
+ getPool()->_options.maxConnecting,
+ data.target,
};
}
@@ -216,11 +217,11 @@ public:
template <typename Callback>
auto guardCallback(Callback&& cb) {
return
- [ this, cb = std::forward<Callback>(cb), anchor = shared_from_this() ](auto&&... args) {
- stdx::lock_guard lk(_parent->_mutex);
- cb(std::forward<decltype(args)>(args)...);
- updateState();
- };
+ [this, cb = std::forward<Callback>(cb), anchor = shared_from_this()](auto&&... args) {
+ stdx::lock_guard lk(_parent->_mutex);
+ cb(std::forward<decltype(args)>(args)...);
+ updateState();
+ };
}
SpecificPool(std::shared_ptr<ConnectionPool> parent,
@@ -516,7 +517,7 @@ void ConnectionPool::get_forTest(const HostAndPort& hostAndPort,
Milliseconds timeout,
GetConnectionCallback cb) {
// We kick ourselves onto the executor queue to prevent us from deadlocking with our own thread
- auto getConnectionFunc = [ this, hostAndPort, timeout, cb = std::move(cb) ](Status &&) mutable {
+ auto getConnectionFunc = [this, hostAndPort, timeout, cb = std::move(cb)](Status&&) mutable {
get(hostAndPort, transport::kGlobalSSLMode, timeout)
.thenRunOn(_factory->getExecutor())
.getAsync(std::move(cb));
@@ -647,7 +648,7 @@ Future<ConnectionPool::ConnectionHandle> ConnectionPool::SpecificPool::getConnec
}
auto ConnectionPool::SpecificPool::makeHandle(ConnectionInterface* connection) -> ConnectionHandle {
- auto deleter = [ this, anchor = shared_from_this() ](ConnectionInterface * connection) {
+ auto deleter = [this, anchor = shared_from_this()](ConnectionInterface* connection) {
stdx::lock_guard lk(_parent->_mutex);
returnConnection(connection);
_lastActiveTime = _parent->_factory->now();
@@ -1116,7 +1117,7 @@ void ConnectionPool::SpecificPool::updateState() {
}
ExecutorFuture(ExecutorPtr(_parent->_factory->getExecutor())) //
- .getAsync([ this, anchor = shared_from_this() ](Status && status) mutable {
+ .getAsync([this, anchor = shared_from_this()](Status&& status) mutable {
invariant(status);
stdx::lock_guard lk(_parent->_mutex);
diff --git a/src/mongo/executor/connection_pool_test.cpp b/src/mongo/executor/connection_pool_test.cpp
index c47641441ef..c0dca54ff75 100644
--- a/src/mongo/executor/connection_pool_test.cpp
+++ b/src/mongo/executor/connection_pool_test.cpp
@@ -77,14 +77,14 @@ protected:
template <typename... Args>
auto getFromPool(Args&&... args) {
return ExecutorFuture(_executor)
- .then([ pool = _pool, args... ]() { return pool->get(args...); })
+ .then([pool = _pool, args...]() { return pool->get(args...); })
.semi();
}
void doneWith(ConnectionPool::ConnectionHandle& conn) {
dynamic_cast<ConnectionImpl*>(conn.get())->indicateSuccess();
- ExecutorFuture(_executor).getAsync([conn = std::move(conn)](auto){});
+ ExecutorFuture(_executor).getAsync([conn = std::move(conn)](auto) {});
}
using StatusWithConn = StatusWith<ConnectionPool::ConnectionHandle>;
diff --git a/src/mongo/executor/connection_pool_test_fixture.cpp b/src/mongo/executor/connection_pool_test_fixture.cpp
index 6598a029101..0179301255a 100644
--- a/src/mongo/executor/connection_pool_test_fixture.cpp
+++ b/src/mongo/executor/connection_pool_test_fixture.cpp
@@ -73,9 +73,7 @@ void TimerImpl::fireIfNecessary() {
for (auto&& x : timers) {
if (_timers.count(x) && (x->_expiration <= x->now())) {
- auto execCB = [cb = std::move(x->_cb)](auto&&) mutable {
- std::move(cb)();
- };
+ auto execCB = [cb = std::move(x->_cb)](auto&&) mutable { std::move(cb)(); };
auto global = x->_global;
_timers.erase(x);
global->_executor->schedule(std::move(execCB));
@@ -121,7 +119,7 @@ void ConnectionImpl::processSetup() {
_setupQueue.pop_front();
_pushSetupQueue.pop_front();
- connPtr->_global->_executor->schedule([ connPtr, callback = std::move(callback) ](auto&&) {
+ connPtr->_global->_executor->schedule([connPtr, callback = std::move(callback)](auto&&) {
auto cb = std::move(connPtr->_setupCallback);
connPtr->indicateUsed();
cb(connPtr, callback());
@@ -151,7 +149,7 @@ void ConnectionImpl::processRefresh() {
_refreshQueue.pop_front();
_pushRefreshQueue.pop_front();
- connPtr->_global->_executor->schedule([ connPtr, callback = std::move(callback) ](auto&&) {
+ connPtr->_global->_executor->schedule([connPtr, callback = std::move(callback)](auto&&) {
auto cb = std::move(connPtr->_refreshCallback);
connPtr->indicateUsed();
cb(connPtr, callback());
diff --git a/src/mongo/executor/connection_pool_tl.cpp b/src/mongo/executor/connection_pool_tl.cpp
index a7ab984b600..e2f7711cca7 100644
--- a/src/mongo/executor/connection_pool_tl.cpp
+++ b/src/mongo/executor/connection_pool_tl.cpp
@@ -139,7 +139,7 @@ AsyncDBClient* TLConnection::client() {
void TLConnection::setTimeout(Milliseconds timeout, TimeoutCallback cb) {
auto anchor = shared_from_this();
- _timer->setTimeout(timeout, [ cb = std::move(cb), anchor = std::move(anchor) ] { cb(); });
+ _timer->setTimeout(timeout, [cb = std::move(cb), anchor = std::move(anchor)] { cb(); });
}
void TLConnection::cancelTimeout() {
@@ -213,14 +213,14 @@ void TLConnection::setup(Milliseconds timeout, SetupCallback cb) {
auto pf = makePromiseFuture<void>();
auto handler = std::make_shared<TimeoutHandler>(std::move(pf.promise));
std::move(pf.future).thenRunOn(_reactor).getAsync(
- [ this, cb = std::move(cb), anchor ](Status status) { cb(this, std::move(status)); });
+ [this, cb = std::move(cb), anchor](Status status) { cb(this, std::move(status)); });
setTimeout(timeout, [this, handler, timeout] {
if (handler->done.swap(true)) {
return;
}
- std::string reason = str::stream() << "Timed out connecting to " << _peer << " after "
- << timeout;
+ std::string reason = str::stream()
+ << "Timed out connecting to " << _peer << " after " << timeout;
handler->promise.setError(
Status(ErrorCodes::NetworkInterfaceExceededTimeLimit, std::move(reason)));
@@ -286,7 +286,7 @@ void TLConnection::refresh(Milliseconds timeout, RefreshCallback cb) {
auto pf = makePromiseFuture<void>();
auto handler = std::make_shared<TimeoutHandler>(std::move(pf.promise));
std::move(pf.future).thenRunOn(_reactor).getAsync(
- [ this, cb = std::move(cb), anchor ](Status status) { cb(this, status); });
+ [this, cb = std::move(cb), anchor](Status status) { cb(this, status); });
setTimeout(timeout, [this, handler] {
if (handler->done.swap(true)) {
@@ -361,4 +361,4 @@ Date_t TLTypeFactory::now() {
} // namespace connection_pool_tl
} // namespace executor
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/executor/connection_pool_tl.h b/src/mongo/executor/connection_pool_tl.h
index 7a138589055..7297713b92b 100644
--- a/src/mongo/executor/connection_pool_tl.h
+++ b/src/mongo/executor/connection_pool_tl.h
@@ -182,6 +182,6 @@ private:
AsyncDBClient::Handle _client;
};
-} // namespace connection_pool_asio
+} // namespace connection_pool_tl
} // namespace executor
} // namespace mongo
diff --git a/src/mongo/executor/egress_tag_closer.h b/src/mongo/executor/egress_tag_closer.h
index 62448950c0e..4affb5e5c5b 100644
--- a/src/mongo/executor/egress_tag_closer.h
+++ b/src/mongo/executor/egress_tag_closer.h
@@ -49,9 +49,10 @@ public:
virtual void dropConnections(const HostAndPort& hostAndPort) = 0;
- virtual void mutateTags(const HostAndPort& hostAndPort,
- const stdx::function<transport::Session::TagMask(
- transport::Session::TagMask)>& mutateFunc) = 0;
+ virtual void mutateTags(
+ const HostAndPort& hostAndPort,
+ const stdx::function<transport::Session::TagMask(transport::Session::TagMask)>&
+ mutateFunc) = 0;
protected:
EgressTagCloser() {}
diff --git a/src/mongo/executor/network_interface_integration_test.cpp b/src/mongo/executor/network_interface_integration_test.cpp
index 57371e05545..f897d814409 100644
--- a/src/mongo/executor/network_interface_integration_test.cpp
+++ b/src/mongo/executor/network_interface_integration_test.cpp
@@ -87,8 +87,7 @@ class HangingHook : public executor::NetworkConnectionHook {
"admin",
BSON("sleep" << 1 << "lock"
<< "none"
- << "secs"
- << 100000000),
+ << "secs" << 100000000),
BSONObj(),
nullptr))};
}
@@ -274,8 +273,7 @@ TEST_F(NetworkInterfaceTest, AsyncOpTimeout) {
auto request = makeTestCommand(Milliseconds{1000});
request.cmdObj = BSON("sleep" << 1 << "lock"
<< "none"
- << "secs"
- << 1000000000);
+ << "secs" << 1000000000);
auto deferred = runCommand(cb, request);
waitForIsMaster();
@@ -322,14 +320,15 @@ TEST_F(NetworkInterfaceTest, SetAlarm) {
Date_t expiration = net().now() + Milliseconds(100);
auto makeTimerFuture = [&] {
auto pf = makePromiseFuture<Date_t>();
- return std::make_pair([ this, promise = std::move(pf.promise) ](Status status) mutable {
- if (status.isOK()) {
- promise.emplaceValue(net().now());
- } else {
- promise.setError(status);
- }
- },
- std::move(pf.future));
+ return std::make_pair(
+ [this, promise = std::move(pf.promise)](Status status) mutable {
+ if (status.isOK()) {
+ promise.emplaceValue(net().now());
+ } else {
+ promise.setError(status);
+ }
+ },
+ std::move(pf.future));
};
auto futurePair = makeTimerFuture();
diff --git a/src/mongo/executor/network_interface_mock.cpp b/src/mongo/executor/network_interface_mock.cpp
index 7f156744803..5d185619b4c 100644
--- a/src/mongo/executor/network_interface_mock.cpp
+++ b/src/mongo/executor/network_interface_mock.cpp
@@ -487,12 +487,14 @@ void NetworkInterfaceMock::_enqueueOperation_inlock(
ResponseStatus rs(
ErrorCodes::NetworkInterfaceExceededTimeLimit, "Network timeout", Milliseconds(0));
std::vector<NetworkOperationList*> queuesToCheck{&_unscheduled, &_blackHoled, &_scheduled};
- _alarms.emplace(cbh, _now_inlock() + timeout, [
- this,
- cbh = std::move(cbh),
- queuesToCheck = std::move(queuesToCheck),
- rs = std::move(rs)
- ](Status) { _interruptWithResponse_inlock(cbh, queuesToCheck, rs); });
+ _alarms.emplace(cbh,
+ _now_inlock() + timeout,
+ [this,
+ cbh = std::move(cbh),
+ queuesToCheck = std::move(queuesToCheck),
+ rs = std::move(rs)](Status) {
+ _interruptWithResponse_inlock(cbh, queuesToCheck, rs);
+ });
}
}
@@ -535,25 +537,25 @@ void NetworkInterfaceMock::_connectThenEnqueueOperation_inlock(const HostAndPort
auto cbh = op.getCallbackHandle();
// The completion handler for the postconnect command schedules the original command.
auto postconnectCompletionHandler =
- [ this, op = std::move(op) ](TaskExecutor::ResponseOnAnyStatus rs) mutable {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- if (!rs.isOK()) {
- op.setResponse(_now_inlock(), rs);
- op.finishResponse();
- return;
- }
-
- auto handleStatus = _hook->handleReply(op.getRequest().target, std::move(rs));
-
- if (!handleStatus.isOK()) {
- op.setResponse(_now_inlock(), handleStatus);
- op.finishResponse();
- return;
- }
-
- _connections.emplace(op.getRequest().target);
- _enqueueOperation_inlock(std::move(op));
- };
+ [this, op = std::move(op)](TaskExecutor::ResponseOnAnyStatus rs) mutable {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ if (!rs.isOK()) {
+ op.setResponse(_now_inlock(), rs);
+ op.finishResponse();
+ return;
+ }
+
+ auto handleStatus = _hook->handleReply(op.getRequest().target, std::move(rs));
+
+ if (!handleStatus.isOK()) {
+ op.setResponse(_now_inlock(), handleStatus);
+ op.finishResponse();
+ return;
+ }
+
+ _connections.emplace(op.getRequest().target);
+ _enqueueOperation_inlock(std::move(op));
+ };
auto postconnectOp = NetworkOperation(cbh,
std::move(*hookPostconnectCommand),
diff --git a/src/mongo/executor/network_interface_tl.cpp b/src/mongo/executor/network_interface_tl.cpp
index c7528397951..cbd3484bf1c 100644
--- a/src/mongo/executor/network_interface_tl.cpp
+++ b/src/mongo/executor/network_interface_tl.cpp
@@ -239,21 +239,21 @@ Status NetworkInterfaceTL::startCommand(const TaskExecutor::CallbackHandle& cbHa
auto executor = baton ? ExecutorPtr(baton) : ExecutorPtr(_reactor);
std::move(cmdPF.future)
.thenRunOn(executor)
- .onError([requestId = cmdState->requestOnAny.id](auto error)
- ->StatusWith<RemoteCommandOnAnyResponse> {
- LOG(2) << "Failed to get connection from pool for request " << requestId
- << ": " << redact(error);
-
- // The TransportLayer has, for historical reasons returned SocketException
- // for network errors, but sharding assumes HostUnreachable on network
- // errors.
- if (error == ErrorCodes::SocketException) {
- error = Status(ErrorCodes::HostUnreachable, error.reason());
- }
- return error;
- })
- .getAsync([ this, cmdState, onFinish = std::move(onFinish) ](
- StatusWith<RemoteCommandOnAnyResponse> response) {
+ .onError([requestId = cmdState->requestOnAny.id](
+ auto error) -> StatusWith<RemoteCommandOnAnyResponse> {
+ LOG(2) << "Failed to get connection from pool for request " << requestId << ": "
+ << redact(error);
+
+ // The TransportLayer has, for historical reasons returned SocketException
+ // for network errors, but sharding assumes HostUnreachable on network
+ // errors.
+ if (error == ErrorCodes::SocketException) {
+ error = Status(ErrorCodes::HostUnreachable, error.reason());
+ }
+ return error;
+ })
+ .getAsync([this, cmdState, onFinish = std::move(onFinish)](
+ StatusWith<RemoteCommandOnAnyResponse> response) {
auto duration = now() - cmdState->start;
if (!response.isOK()) {
onFinish(RemoteCommandOnAnyResponse(boost::none, response.getStatus(), duration));
@@ -270,7 +270,7 @@ Status NetworkInterfaceTL::startCommand(const TaskExecutor::CallbackHandle& cbHa
return Status::OK();
}
- auto[connPromise, connFuture] = makePromiseFuture<ConnectionPool::ConnectionHandle>();
+ auto [connPromise, connFuture] = makePromiseFuture<ConnectionPool::ConnectionHandle>();
std::move(connFuture).thenRunOn(executor).getAsync([this, cmdState, baton](auto swConn) {
auto status = swConn.getStatus();
@@ -360,8 +360,7 @@ void NetworkInterfaceTL::_onAcquireConn(std::shared_ptr<CommandState> state,
uasserted(ErrorCodes::NetworkInterfaceExceededTimeLimit,
str::stream() << "Remote command timed out while waiting to get a "
"connection from the pool, took "
- << connDuration
- << ", timeout was set to "
+ << connDuration << ", timeout was set to "
<< state->requestOnAny.timeout);
}
@@ -509,14 +508,14 @@ Status NetworkInterfaceTL::setAlarm(const TaskExecutor::CallbackHandle& cbHandle
// If a user has already scheduled an alarm with a handle, make sure they intentionally
// override it by canceling and setting a new one.
auto alarmPair = std::make_pair(cbHandle, std::shared_ptr<AlarmState>(alarmState));
- auto && [ _, wasInserted ] = _inProgressAlarms.insert(std::move(alarmPair));
+ auto&& [_, wasInserted] = _inProgressAlarms.insert(std::move(alarmPair));
invariant(wasInserted);
}
- alarmState->timer->waitUntil(alarmState->when, nullptr).getAsync([
- this,
- state = std::move(alarmState)
- ](Status status) mutable { _answerAlarm(status, state); });
+ alarmState->timer->waitUntil(alarmState->when, nullptr)
+ .getAsync([this, state = std::move(alarmState)](Status status) mutable {
+ _answerAlarm(status, state);
+ });
return Status::OK();
}
@@ -546,7 +545,7 @@ void NetworkInterfaceTL::_cancelAllAlarms() {
return std::exchange(_inProgressAlarms, {});
}();
- for (auto && [ cbHandle, state ] : alarms) {
+ for (auto&& [cbHandle, state] : alarms) {
state->timer->cancel();
state->promise.setError(Status(ErrorCodes::CallbackCanceled, "Alarm cancelled"));
}
@@ -566,10 +565,10 @@ void NetworkInterfaceTL::_answerAlarm(Status status, std::shared_ptr<AlarmState>
if (status.isOK() && currentTime < state->when) {
LOG(2) << "Alarm returned early. Expected at: " << state->when
<< ", fired at: " << currentTime;
- state->timer->waitUntil(state->when, nullptr).getAsync([
- this,
- state = std::move(state)
- ](Status status) mutable { _answerAlarm(status, state); });
+ state->timer->waitUntil(state->when, nullptr)
+ .getAsync([this, state = std::move(state)](Status status) mutable {
+ _answerAlarm(status, state);
+ });
return;
}
diff --git a/src/mongo/executor/scoped_task_executor.cpp b/src/mongo/executor/scoped_task_executor.cpp
index 0f718242163..6f2b4823139 100644
--- a/src/mongo/executor/scoped_task_executor.cpp
+++ b/src/mongo/executor/scoped_task_executor.cpp
@@ -68,7 +68,7 @@ public:
return _cbHandles;
}();
- for (auto & [ id, handle ] : handles) {
+ for (auto& [id, handle] : handles) {
// If we don't have a handle yet, it means there's a scheduling thread that's
// dropped the lock but hasn't yet stashed it (or failed to schedule it on the
// underlying executor).
@@ -223,7 +223,7 @@ private:
// State 2 - Indeterminate state. We don't know yet if the task will get scheduled.
auto swCbHandle = std::forward<ScheduleCall>(schedule)(
- [ id, work = std::forward<Work>(work), self = shared_from_this() ](const auto& cargs) {
+ [id, work = std::forward<Work>(work), self = shared_from_this()](const auto& cargs) {
using ArgsT = std::decay_t<decltype(cargs)>;
stdx::unique_lock<stdx::mutex> lk(self->_mutex);
diff --git a/src/mongo/executor/task_executor_cursor_integration_test.cpp b/src/mongo/executor/task_executor_cursor_integration_test.cpp
index e65068990dc..55b75650083 100644
--- a/src/mongo/executor/task_executor_cursor_integration_test.cpp
+++ b/src/mongo/executor/task_executor_cursor_integration_test.cpp
@@ -90,8 +90,7 @@ TEST_F(TaskExecutorCursorFixture, Basic) {
"test",
BSON("find"
<< "test"
- << "batchSize"
- << 10),
+ << "batchSize" << 10),
opCtx.get());
TaskExecutorCursor tec(executor(), rcr, [] {
diff --git a/src/mongo/executor/task_executor_cursor_test.cpp b/src/mongo/executor/task_executor_cursor_test.cpp
index 7fc7af43c65..57719c44a2c 100644
--- a/src/mongo/executor/task_executor_cursor_test.cpp
+++ b/src/mongo/executor/task_executor_cursor_test.cpp
@@ -95,14 +95,10 @@ public:
NetworkInterfaceMock::InNetworkGuard ing(getNet());
ASSERT(getNet()->hasReadyRequests());
- auto rcr = getNet()->scheduleSuccessfulResponse(BSON(
- "cursorsKilled" << BSON_ARRAY((long long)(cursorId)) << "cursorsNotFound" << BSONArray()
- << "cursorsAlive"
- << BSONArray()
- << "cursorsUnknown"
- << BSONArray()
- << "ok"
- << 1));
+ auto rcr = getNet()->scheduleSuccessfulResponse(
+ BSON("cursorsKilled" << BSON_ARRAY((long long)(cursorId)) << "cursorsNotFound"
+ << BSONArray() << "cursorsAlive" << BSONArray() << "cursorsUnknown"
+ << BSONArray() << "ok" << 1));
getNet()->runReadyNetworkOperations();
return rcr.cmdObj.getOwned();
@@ -124,8 +120,7 @@ public:
TEST_F(TaskExecutorCursorFixture, SingleBatchWorks) {
auto findCmd = BSON("find"
<< "test"
- << "batchSize"
- << 2);
+ << "batchSize" << 2);
RemoteCommandRequest rcr(HostAndPort("localhost"), "test", findCmd, opCtx.get());
@@ -150,8 +145,7 @@ TEST_F(TaskExecutorCursorFixture, FailureInFind) {
"test",
BSON("find"
<< "test"
- << "batchSize"
- << 2),
+ << "batchSize" << 2),
opCtx.get());
TaskExecutorCursor tec(&getExecutor(), rcr);
@@ -175,8 +169,7 @@ TEST_F(TaskExecutorCursorFixture, EarlyReturnKillsCursor) {
"test",
BSON("find"
<< "test"
- << "batchSize"
- << 2),
+ << "batchSize" << 2),
opCtx.get());
{
@@ -189,8 +182,7 @@ TEST_F(TaskExecutorCursorFixture, EarlyReturnKillsCursor) {
ASSERT_BSONOBJ_EQ(BSON("killCursors"
<< "test"
- << "cursors"
- << BSON_ARRAY(1)),
+ << "cursors" << BSON_ARRAY(1)),
scheduleSuccessfulKillCursorResponse(1));
}
@@ -202,8 +194,7 @@ TEST_F(TaskExecutorCursorFixture, MultipleBatchesWorks) {
"test",
BSON("find"
<< "test"
- << "batchSize"
- << 2),
+ << "batchSize" << 2),
opCtx.get());
TaskExecutorCursor tec(&getExecutor(), rcr, [] {
@@ -230,8 +221,7 @@ TEST_F(TaskExecutorCursorFixture, MultipleBatchesWorks) {
// We can pick up after that interruption though
ASSERT_BSONOBJ_EQ(BSON("getMore" << (long long)(1) << "collection"
<< "test"
- << "batchSize"
- << 3),
+ << "batchSize" << 3),
scheduleSuccessfulCursorResponse("nextBatch", 3, 5, 1));
ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 3);
@@ -257,8 +247,7 @@ TEST_F(TaskExecutorCursorFixture, LsidIsPassed) {
auto findCmd = BSON("find"
<< "test"
- << "batchSize"
- << 1);
+ << "batchSize" << 1);
RemoteCommandRequest rcr(HostAndPort("localhost"), "test", findCmd, opCtx.get());
@@ -272,10 +261,7 @@ TEST_F(TaskExecutorCursorFixture, LsidIsPassed) {
// lsid in the first batch
ASSERT_BSONOBJ_EQ(BSON("find"
<< "test"
- << "batchSize"
- << 1
- << "lsid"
- << lsid.toBSON()),
+ << "batchSize" << 1 << "lsid" << lsid.toBSON()),
scheduleSuccessfulCursorResponse("firstBatch", 1, 1, 1));
ASSERT_EQUALS(tec->getNext(opCtx.get()).get()["x"].Int(), 1);
@@ -283,10 +269,7 @@ TEST_F(TaskExecutorCursorFixture, LsidIsPassed) {
// lsid in the getmore
ASSERT_BSONOBJ_EQ(BSON("getMore" << (long long)(1) << "collection"
<< "test"
- << "batchSize"
- << 1
- << "lsid"
- << lsid.toBSON()),
+ << "batchSize" << 1 << "lsid" << lsid.toBSON()),
scheduleSuccessfulCursorResponse("nextBatch", 2, 2, 1));
tec.reset();
@@ -294,10 +277,7 @@ TEST_F(TaskExecutorCursorFixture, LsidIsPassed) {
// lsid in the killcursor
ASSERT_BSONOBJ_EQ(BSON("killCursors"
<< "test"
- << "cursors"
- << BSON_ARRAY(1)
- << "lsid"
- << lsid.toBSON()),
+ << "cursors" << BSON_ARRAY(1) << "lsid" << lsid.toBSON()),
scheduleSuccessfulKillCursorResponse(1));
ASSERT_FALSE(hasReadyRequests());
diff --git a/src/mongo/executor/task_executor_test_common.cpp b/src/mongo/executor/task_executor_test_common.cpp
index f98e71e5736..6c3edfd5bef 100644
--- a/src/mongo/executor/task_executor_test_common.cpp
+++ b/src/mongo/executor/task_executor_test_common.cpp
@@ -150,10 +150,9 @@ auto makeSetStatusOnRemoteCommandCompletionClosure(const RemoteCommandRequest* e
return str::stream() << "Request(" << request.target.toString() << ", "
<< request.dbname << ", " << request.cmdObj << ')';
};
- *outStatus =
- Status(ErrorCodes::BadValue,
- str::stream() << "Actual request: " << desc(cbData.request) << "; expected: "
- << desc(*expectedRequest));
+ *outStatus = Status(ErrorCodes::BadValue,
+ str::stream() << "Actual request: " << desc(cbData.request)
+ << "; expected: " << desc(*expectedRequest));
return;
}
*outStatus = cbData.response.status;
diff --git a/src/mongo/executor/task_executor_test_common.h b/src/mongo/executor/task_executor_test_common.h
index ba8c9ceb967..54e2fb6ae5f 100644
--- a/src/mongo/executor/task_executor_test_common.h
+++ b/src/mongo/executor/task_executor_test_common.h
@@ -50,9 +50,10 @@ class TaskExecutor;
* presumably after the release of MSVC2015, the signature can be changed to take the unique_ptr
* by value.
*/
-void addTestsForExecutor(const std::string& suiteName,
- stdx::function<std::unique_ptr<TaskExecutor>(
- std::unique_ptr<NetworkInterfaceMock>)> makeExecutor);
+void addTestsForExecutor(
+ const std::string& suiteName,
+ stdx::function<std::unique_ptr<TaskExecutor>(std::unique_ptr<NetworkInterfaceMock>)>
+ makeExecutor);
} // namespace executor
} // namespace mongo
diff --git a/src/mongo/executor/thread_pool_task_executor.cpp b/src/mongo/executor/thread_pool_task_executor.cpp
index 7ef0669aea0..808b2a7350c 100644
--- a/src/mongo/executor/thread_pool_task_executor.cpp
+++ b/src/mongo/executor/thread_pool_task_executor.cpp
@@ -360,7 +360,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleWorkAt(
lk.unlock();
auto status = _net->setAlarm(
- cbHandle.getValue(), when, [ this, cbHandle = cbHandle.getValue() ](Status status) {
+ cbHandle.getValue(), when, [this, cbHandle = cbHandle.getValue()](Status status) {
if (status == ErrorCodes::CallbackCanceled) {
return;
}
diff --git a/src/mongo/idl/config_option_test.cpp b/src/mongo/idl/config_option_test.cpp
index 9e94434da85..e3e8b8782df 100644
--- a/src/mongo/idl/config_option_test.cpp
+++ b/src/mongo/idl/config_option_test.cpp
@@ -501,8 +501,7 @@ TEST(RedactionBSON, Strings) {
<< "also not a password"
<< "test.config.opt16depr2"
<< "this password should also be censored"
- << "lastarg"
- << false);
+ << "lastarg" << false);
BSONObj res = BSON("firstarg"
<< "not a password"
@@ -514,8 +513,7 @@ TEST(RedactionBSON, Strings) {
<< "also not a password"
<< "test.config.opt16depr2"
<< "<password>"
- << "lastarg"
- << false);
+ << "lastarg" << false);
cmdline_utils::censorBSONObj(&obj);
ASSERT_BSONOBJ_EQ(res, obj);
@@ -535,8 +533,7 @@ TEST(RedactionBSON, Arrays) {
<< "test.config.opt16depr2"
<< BSON_ARRAY("first censored password"
<< "next censored password")
- << "lastarg"
- << false);
+ << "lastarg" << false);
BSONObj res = BSON("firstarg"
<< "not a password"
@@ -551,8 +548,7 @@ TEST(RedactionBSON, Arrays) {
<< "test.config.opt16depr2"
<< BSON_ARRAY("<password>"
<< "<password>")
- << "lastarg"
- << false);
+ << "lastarg" << false);
cmdline_utils::censorBSONObj(&obj);
ASSERT_BSONOBJ_EQ(res, obj);
@@ -571,8 +567,7 @@ TEST(RedactionBSON, SubObjects) {
<< "next censored password")
<< "opt16depr"
<< "should be censored too"))
- << "lastarg"
- << false);
+ << "lastarg" << false);
BSONObj res = BSON("firstarg"
<< "not a password"
@@ -586,8 +581,7 @@ TEST(RedactionBSON, SubObjects) {
<< "<password>")
<< "opt16depr"
<< "<password>"))
- << "lastarg"
- << false);
+ << "lastarg" << false);
cmdline_utils::censorBSONObj(&obj);
ASSERT_BSONOBJ_EQ(res, obj);
@@ -620,7 +614,9 @@ TEST(ConfigOptionNoInit, Opt1) {
ASSERT_OK(addIDLTestConfigs(&options));
const std::vector<std::string> argv({
- "mongod", "--testConfigNoInitOpt1", "Hello",
+ "mongod",
+ "--testConfigNoInitOpt1",
+ "Hello",
});
moe::Environment parsed;
ASSERT_OK(moe::OptionsParser().run(options, argv, {}, &parsed));
diff --git a/src/mongo/idl/idl_parser.cpp b/src/mongo/idl/idl_parser.cpp
index c28a17c16c0..0adb3fa62c2 100644
--- a/src/mongo/idl/idl_parser.cpp
+++ b/src/mongo/idl/idl_parser.cpp
@@ -76,9 +76,7 @@ bool IDLParserErrorContext::checkAndAssertTypeSlowPath(const BSONElement& elemen
std::string path = getElementPath(element);
uasserted(ErrorCodes::TypeMismatch,
str::stream() << "BSON field '" << path << "' is the wrong type '"
- << typeName(elementType)
- << "', expected type '"
- << typeName(type)
+ << typeName(elementType) << "', expected type '" << typeName(type)
<< "'");
}
@@ -93,10 +91,8 @@ bool IDLParserErrorContext::checkAndAssertBinDataTypeSlowPath(const BSONElement&
std::string path = getElementPath(element);
uasserted(ErrorCodes::TypeMismatch,
str::stream() << "BSON field '" << path << "' is the wrong bindData type '"
- << typeName(element.binDataType())
- << "', expected type '"
- << typeName(type)
- << "'");
+ << typeName(element.binDataType()) << "', expected type '"
+ << typeName(type) << "'");
}
return true;
@@ -117,9 +113,7 @@ bool IDLParserErrorContext::checkAndAssertTypes(const BSONElement& element,
std::string type_str = toCommaDelimitedList(types);
uasserted(ErrorCodes::TypeMismatch,
str::stream() << "BSON field '" << path << "' is the wrong type '"
- << typeName(element.type())
- << "', expected types '["
- << type_str
+ << typeName(element.type()) << "', expected types '[" << type_str
<< "']");
}
@@ -204,10 +198,8 @@ void IDLParserErrorContext::throwBadArrayFieldNumberSequence(std::uint32_t actua
std::string path = getElementPath(StringData());
uasserted(40423,
str::stream() << "BSON array field '" << path << "' has a non-sequential value '"
- << actualValue
- << "' for an array field name, expected value '"
- << expectedValue
- << "'.");
+ << actualValue << "' for an array field name, expected value '"
+ << expectedValue << "'.");
}
void IDLParserErrorContext::throwBadEnumValue(int enumValue) const {
diff --git a/src/mongo/idl/idl_parser.h b/src/mongo/idl/idl_parser.h
index 32a3f83b1af..70fdcd97f76 100644
--- a/src/mongo/idl/idl_parser.h
+++ b/src/mongo/idl/idl_parser.h
@@ -188,8 +188,8 @@ private:
bool checkAndAssertTypeSlowPath(const BSONElement& element, BSONType type) const;
/**
- * See comment on checkAndAssertBinDataType.
- */
+ * See comment on checkAndAssertBinDataType.
+ */
bool checkAndAssertBinDataTypeSlowPath(const BSONElement& element, BinDataType type) const;
private:
@@ -222,10 +222,7 @@ template <typename T>
void throwComparisonError(StringData fieldName, StringData op, T actualValue, T expectedValue) {
uasserted(51024,
str::stream() << "BSON field '" << fieldName << "' value must be " << op << " "
- << expectedValue
- << ", actual value '"
- << actualValue
- << "'");
+ << expectedValue << ", actual value '" << actualValue << "'");
}
diff --git a/src/mongo/idl/idl_test.cpp b/src/mongo/idl/idl_test.cpp
index f08ec698351..48bfe499823 100644
--- a/src/mongo/idl/idl_test.cpp
+++ b/src/mongo/idl/idl_test.cpp
@@ -100,8 +100,8 @@ void assertOpMsgEquals(const OpMsgRequest& left, const OpMsgRequest& right) {
}
/**
-* Validate two OpMsgRequests are the same including their DocumentSequences.
-*/
+ * Validate two OpMsgRequests are the same including their DocumentSequences.
+ */
void assertOpMsgEqualsExact(const OpMsgRequest& left, const OpMsgRequest& right) {
ASSERT_BSONOBJ_EQ(left.body, right.body);
@@ -766,11 +766,8 @@ TEST(IDLFieldTests, TestOptionalFields) {
template <typename TestT>
void TestWeakType(TestT test_value) {
IDLParserErrorContext ctxt("root");
- auto testDoc =
- BSON("field1" << test_value << "field2" << test_value << "field3" << test_value << "field4"
- << test_value
- << "field5"
- << test_value);
+ auto testDoc = BSON("field1" << test_value << "field2" << test_value << "field3" << test_value
+ << "field4" << test_value << "field5" << test_value);
auto testStruct = Optional_field::parse(ctxt, testDoc);
ASSERT_FALSE(testStruct.getField1().is_initialized());
@@ -860,11 +857,8 @@ TEST(IDLArrayTests, TestSimpleArrays) {
auto testDoc = BSON("field1" << BSON_ARRAY("Foo"
<< "Bar"
<< "???")
- << "field2"
- << BSON_ARRAY(1 << 2 << 3)
- << "field3"
- << BSON_ARRAY(1.2 << 3.4 << 5.6)
- << "field4"
+ << "field2" << BSON_ARRAY(1 << 2 << 3) << "field3"
+ << BSON_ARRAY(1.2 << 3.4 << 5.6) << "field4"
<< BSON_ARRAY(BSONBinData(array1, 3, BinDataGeneral)
<< BSONBinData(array2, 3, BinDataGeneral))
<< "field5"
@@ -927,12 +921,10 @@ TEST(IDLArrayTests, TestSimpleOptionalArrays) {
auto testDoc = BSON("field1" << BSON_ARRAY("Foo"
<< "Bar"
<< "???")
- << "field2"
- << BSON_ARRAY(1 << 2 << 3)
- << "field3"
+ << "field2" << BSON_ARRAY(1 << 2 << 3) << "field3"
<< BSON_ARRAY(1.2 << 3.4 << 5.6)
- );
+ );
auto testStruct = Optional_array_fields::parse(ctxt, testDoc);
assert_same_types<decltype(testStruct.getField1()),
@@ -1062,35 +1054,27 @@ TEST(IDLArrayTests, TestArraysOfComplexTypes) {
IDLParserErrorContext ctxt("root");
// Positive: Test document
- auto testDoc = BSON("field1" << BSON_ARRAY(1 << 2 << 3) << "field2" << BSON_ARRAY("a.b"
- << "c.d")
- << "field3"
- << BSON_ARRAY(1 << "2")
- << "field4"
- << BSON_ARRAY(BSONObj() << BSONObj())
- << "field5"
- << BSON_ARRAY(BSONObj() << BSONObj() << BSONObj())
- << "field6"
+ auto testDoc = BSON("field1" << BSON_ARRAY(1 << 2 << 3) << "field2"
+ << BSON_ARRAY("a.b"
+ << "c.d")
+ << "field3" << BSON_ARRAY(1 << "2") << "field4"
+ << BSON_ARRAY(BSONObj() << BSONObj()) << "field5"
+ << BSON_ARRAY(BSONObj() << BSONObj() << BSONObj()) << "field6"
<< BSON_ARRAY(BSON("value"
<< "hello")
<< BSON("value"
<< "world"))
- << "field1o"
- << BSON_ARRAY(1 << 2 << 3)
- << "field2o"
+ << "field1o" << BSON_ARRAY(1 << 2 << 3) << "field2o"
<< BSON_ARRAY("a.b"
<< "c.d")
- << "field3o"
- << BSON_ARRAY(1 << "2")
- << "field4o"
- << BSON_ARRAY(BSONObj() << BSONObj())
- << "field6o"
+ << "field3o" << BSON_ARRAY(1 << "2") << "field4o"
+ << BSON_ARRAY(BSONObj() << BSONObj()) << "field6o"
<< BSON_ARRAY(BSON("value"
<< "goodbye")
<< BSON("value"
<< "world"))
- );
+ );
auto testStruct = Complex_array_fields::parse(ctxt, testDoc);
assert_same_types<decltype(testStruct.getField1()), const std::vector<std::int64_t>&>();
@@ -1406,8 +1390,7 @@ TEST(IDLChainedType, TestChainedType) {
auto testDoc = BSON("field1"
<< "abc"
- << "field2"
- << 5);
+ << "field2" << 5);
auto testStruct = Chained_struct_only::parse(ctxt, testDoc);
@@ -1450,10 +1433,7 @@ TEST(IDLChainedType, TestExtraFields) {
auto testDoc = BSON("field1"
<< "abc"
- << "field2"
- << 5
- << "field3"
- << 123456);
+ << "field2" << 5 << "field3" << 123456);
auto testStruct = Chained_struct_only::parse(ctxt, testDoc);
ASSERT_EQUALS(testStruct.getChainedType().getField1(), "abc");
@@ -1467,10 +1447,7 @@ TEST(IDLChainedType, TestDuplicateFields) {
auto testDoc = BSON("field1"
<< "abc"
- << "field2"
- << 5
- << "field2"
- << 123456);
+ << "field2" << 5 << "field2" << 123456);
ASSERT_THROWS(Chained_struct_only::parse(ctxt, testDoc), AssertionException);
}
@@ -1480,8 +1457,9 @@ TEST(IDLChainedType, TestDuplicateFields) {
TEST(IDLChainedType, TestChainedStruct) {
IDLParserErrorContext ctxt("root");
- auto testDoc = BSON("anyField" << 123.456 << "objectField" << BSON("random"
- << "pair")
+ auto testDoc = BSON("anyField" << 123.456 << "objectField"
+ << BSON("random"
+ << "pair")
<< "field3"
<< "abc");
@@ -1511,13 +1489,10 @@ TEST(IDLChainedType, TestChainedStructWithExtraFields) {
{
auto testDoc = BSON("field3"
<< "abc"
- << "anyField"
- << 123.456
- << "objectField"
+ << "anyField" << 123.456 << "objectField"
<< BSON("random"
<< "pair")
- << "extraField"
- << 787);
+ << "extraField" << 787);
ASSERT_THROWS(Chained_struct_mixed::parse(ctxt, testDoc), AssertionException);
}
@@ -1526,13 +1501,10 @@ TEST(IDLChainedType, TestChainedStructWithExtraFields) {
{
auto testDoc = BSON("field3"
<< "abc"
- << "anyField"
- << 123.456
- << "objectField"
+ << "anyField" << 123.456 << "objectField"
<< BSON("random"
<< "pair")
- << "anyField"
- << 787);
+ << "anyField" << 787);
ASSERT_THROWS(Chained_struct_mixed::parse(ctxt, testDoc), AssertionException);
}
@@ -1542,9 +1514,7 @@ TEST(IDLChainedType, TestChainedStructWithExtraFields) {
<< "thing")
<< "field3"
<< "abc"
- << "anyField"
- << 123.456
- << "objectField"
+ << "anyField" << 123.456 << "objectField"
<< BSON("random"
<< "pair"));
ASSERT_THROWS(Chained_struct_mixed::parse(ctxt, testDoc), AssertionException);
@@ -1554,9 +1524,7 @@ TEST(IDLChainedType, TestChainedStructWithExtraFields) {
{
auto testDoc = BSON("field3"
<< "abc"
- << "anyField"
- << 123.456
- << "objectField"
+ << "anyField" << 123.456 << "objectField"
<< BSON("random"
<< "pair")
<< "field3"
@@ -1572,12 +1540,9 @@ TEST(IDLChainedType, TestChainedMixedStruct) {
auto testDoc = BSON("field1"
<< "abc"
- << "field2"
- << 5
- << "stringField"
+ << "field2" << 5 << "stringField"
<< "def"
- << "field3"
- << 456);
+ << "field3" << 456);
auto testStruct = Chained_struct_type_mixed::parse(ctxt, testDoc);
@@ -1718,9 +1683,7 @@ TEST(IDLCommand, TestConcatentateWithDb) {
IDLParserErrorContext ctxt("root");
auto testDoc = BSON(BasicConcatenateWithDbCommand::kCommandName << "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
<< "$db"
<< "db");
@@ -1742,11 +1705,10 @@ TEST(IDLCommand, TestConcatentateWithDb) {
// Positive: Test we can serialize from nothing the same document except for $db
{
- auto testDocWithoutDb = BSON(BasicConcatenateWithDbCommand::kCommandName << "coll1"
- << "field1"
- << 3
- << "field2"
- << "five");
+ auto testDocWithoutDb =
+ BSON(BasicConcatenateWithDbCommand::kCommandName << "coll1"
+ << "field1" << 3 << "field2"
+ << "five");
BSONObjBuilder builder;
BasicConcatenateWithDbCommand one_new(NamespaceString("db.coll1"));
@@ -1791,11 +1753,10 @@ TEST(IDLCommand, TestConcatentateWithDbNegative) {
// Negative - duplicate namespace field
{
- auto testDoc = BSON("BasicConcatenateWithDbCommand" << 1 << "field1" << 3
- << "BasicConcatenateWithDbCommand"
- << 1
- << "field2"
- << "five");
+ auto testDoc =
+ BSON("BasicConcatenateWithDbCommand" << 1 << "field1" << 3
+ << "BasicConcatenateWithDbCommand" << 1 << "field2"
+ << "five");
ASSERT_THROWS(BasicConcatenateWithDbCommand::parse(ctxt, makeOMR(testDoc)),
AssertionException);
}
@@ -1838,13 +1799,12 @@ TEST(IDLCommand, TestConcatentateWithDbNegative) {
TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestNSS) {
IDLParserErrorContext ctxt("root");
- auto testDoc = BSON(BasicConcatenateWithDbOrUUIDCommand::kCommandName << "coll1"
- << "field1"
- << 3
- << "field2"
- << "five"
- << "$db"
- << "db");
+ auto testDoc =
+ BSON(BasicConcatenateWithDbOrUUIDCommand::kCommandName << "coll1"
+ << "field1" << 3 << "field2"
+ << "five"
+ << "$db"
+ << "db");
auto testStruct = BasicConcatenateWithDbOrUUIDCommand::parse(ctxt, makeOMR(testDoc));
ASSERT_EQUALS(testStruct.getField1(), 3);
@@ -1863,11 +1823,10 @@ TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestNSS) {
// Positive: Test we can serialize from nothing the same document except for $db
{
- auto testDocWithoutDb = BSON(BasicConcatenateWithDbOrUUIDCommand::kCommandName << "coll1"
- << "field1"
- << 3
- << "field2"
- << "five");
+ auto testDocWithoutDb =
+ BSON(BasicConcatenateWithDbOrUUIDCommand::kCommandName << "coll1"
+ << "field1" << 3 << "field2"
+ << "five");
BSONObjBuilder builder;
BasicConcatenateWithDbOrUUIDCommand one_new(NamespaceString("db.coll1"));
@@ -1921,9 +1880,9 @@ TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestUUID) {
// Positive: Test we can serialize from nothing the same document except for $db
{
- auto testDocWithoutDb = BSON(
- BasicConcatenateWithDbOrUUIDCommand::kCommandName << uuid << "field1" << 3 << "field2"
- << "five");
+ auto testDocWithoutDb = BSON(BasicConcatenateWithDbOrUUIDCommand::kCommandName
+ << uuid << "field1" << 3 << "field2"
+ << "five");
BSONObjBuilder builder;
BasicConcatenateWithDbOrUUIDCommand one_new(NamespaceStringOrUUID("db", uuid));
@@ -1954,11 +1913,9 @@ TEST(IDLCommand, TestConcatentateWithDbOrUUIDNegative) {
// Negative - duplicate namespace field
{
auto testDoc =
- BSON("BasicConcatenateWithDbOrUUIDCommand" << 1 << "field1" << 3
- << "BasicConcatenateWithDbOrUUIDCommand"
- << 1
- << "field2"
- << "five");
+ BSON("BasicConcatenateWithDbOrUUIDCommand"
+ << 1 << "field1" << 3 << "BasicConcatenateWithDbOrUUIDCommand" << 1 << "field2"
+ << "five");
ASSERT_THROWS(BasicConcatenateWithDbOrUUIDCommand::parse(ctxt, makeOMR(testDoc)),
AssertionException);
}
@@ -2040,9 +1997,9 @@ TEST(IDLCommand, TestIgnoredNegative) {
// Negative - duplicate namespace field
{
- auto testDoc = BSON(
- "BasicIgnoredCommand" << 1 << "field1" << 3 << "BasicIgnoredCommand" << 1 << "field2"
- << "five");
+ auto testDoc = BSON("BasicIgnoredCommand" << 1 << "field1" << 3 << "BasicIgnoredCommand"
+ << 1 << "field2"
+ << "five");
ASSERT_THROWS(BasicIgnoredCommand::parse(ctxt, makeOMR(testDoc)), AssertionException);
}
@@ -2067,9 +2024,7 @@ TEST(IDLDocSequence, TestBasic) {
auto testTempDoc = BSON("DocSequenceCommand"
<< "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
<< "$db"
<< "db"
@@ -2078,8 +2033,7 @@ TEST(IDLDocSequence, TestBasic) {
<< "hello")
<< BSON("value"
<< "world"))
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request;
request.body = testTempDoc;
@@ -2145,15 +2099,12 @@ TEST(IDLDocSequence, TestMissingDB) {
auto testTempDoc = BSON("DocSequenceCommand"
<< "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
<< "structs"
<< BSON_ARRAY(BSON("value"
<< "hello"))
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request;
request.body = testTempDoc;
@@ -2167,9 +2118,7 @@ void TestDocSequence(StringData name) {
IDLParserErrorContext ctxt("root");
auto testTempDoc = BSON(name << "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five");
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
@@ -2208,9 +2157,7 @@ void TestBadDocSequences(StringData name, bool extraFieldAllowed) {
IDLParserErrorContext ctxt("root");
auto testTempDoc = BSON(name << "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five");
// Negative: Duplicate fields in doc sequence
@@ -2279,17 +2226,14 @@ void TestDuplicateDocSequences(StringData name) {
// Negative: Duplicate fields in doc sequence and body
{
auto testTempDoc = BSON(name << "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
<< "structs"
<< BSON_ARRAY(BSON("value"
<< "hello")
<< BSON("value"
<< "world"))
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
request.sequences.push_back({"structs",
@@ -2304,17 +2248,14 @@ void TestDuplicateDocSequences(StringData name) {
// Negative: Duplicate fields in doc sequence and body
{
auto testTempDoc = BSON(name << "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
<< "structs"
<< BSON_ARRAY(BSON("value"
<< "hello")
<< BSON("value"
<< "world"))
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
request.sequences.push_back({"objects", {BSON("foo" << 1)}});
@@ -2337,17 +2278,14 @@ TEST(IDLDocSequence, TestEmptySequence) {
{
auto testTempDoc = BSON("DocSequenceCommand"
<< "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
<< "structs"
<< BSON_ARRAY(BSON("value"
<< "hello")
<< BSON("value"
<< "world"))
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
request.sequences.push_back({"structs", {}});
@@ -2359,12 +2297,9 @@ TEST(IDLDocSequence, TestEmptySequence) {
{
auto testTempDoc = BSON("DocSequenceCommand"
<< "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
request.sequences.push_back({"structs", {}});
@@ -2396,19 +2331,14 @@ TEST(IDLDocSequence, TestWellKnownFieldsAreIgnored) {
for (auto knownField : knownFields) {
auto testTempDoc = BSON("DocSequenceCommand"
<< "coll1"
- << "field1"
- << 3
- << "field2"
- << "five"
- << knownField
- << "extra"
+ << "field1" << 3 << "field2"
+ << "five" << knownField << "extra"
<< "structs"
<< BSON_ARRAY(BSON("value"
<< "hello")
<< BSON("value"
<< "world"))
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
@@ -2448,21 +2378,16 @@ TEST(IDLDocSequence, TestWellKnownFieldsPassthrough) {
for (auto knownField : knownFields) {
auto testTempDoc = BSON("DocSequenceCommand"
<< "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
<< "$db"
- << "db"
- << knownField
- << "extra"
+ << "db" << knownField << "extra"
<< "structs"
<< BSON_ARRAY(BSON("value"
<< "hello")
<< BSON("value"
<< "world"))
- << "objects"
- << BSON_ARRAY(BSON("foo" << 1)));
+ << "objects" << BSON_ARRAY(BSON("foo" << 1)));
OpMsgRequest request;
request.body = testTempDoc;
@@ -2482,9 +2407,7 @@ TEST(IDLDocSequence, TestNonStrict) {
{
auto testTempDoc = BSON("DocSequenceCommandNonStrict"
<< "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five");
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
@@ -2504,12 +2427,9 @@ TEST(IDLDocSequence, TestNonStrict) {
{
auto testTempDoc = BSON("DocSequenceCommandNonStrict"
<< "coll1"
- << "field1"
- << 3
- << "field2"
+ << "field1" << 3 << "field2"
<< "five"
- << "extra"
- << 1);
+ << "extra" << 1);
OpMsgRequest request = OpMsgRequest::fromDBAndBody("db", testTempDoc);
request.sequences.push_back({"structs",
@@ -2531,19 +2451,14 @@ TEST(IDLCommand, TestKnownFieldDuplicate) {
auto testPassthrough = BSON("$db"
<< "foo"
- << "maxTimeMS"
- << 6
- << "$client"
+ << "maxTimeMS" << 6 << "$client"
<< "foo");
auto testDoc = BSON("KnownFieldCommand"
<< "coll1"
<< "$db"
<< "db"
- << "field1"
- << 28
- << "maxTimeMS"
- << 42);
+ << "field1" << 28 << "maxTimeMS" << 42);
auto testStruct = KnownFieldCommand::parse(ctxt, makeOMR(testDoc));
ASSERT_EQUALS(28, testStruct.getField1());
@@ -2553,11 +2468,7 @@ TEST(IDLCommand, TestKnownFieldDuplicate) {
auto expectedOpMsgDoc = BSON("KnownFieldCommand"
<< "coll1"
- << "field1"
- << 28
- << "maxTimeMS"
- << 42
- << "$db"
+ << "field1" << 28 << "maxTimeMS" << 42 << "$db"
<< "db"
<< "$client"
@@ -2569,11 +2480,7 @@ TEST(IDLCommand, TestKnownFieldDuplicate) {
auto expectedBSONDoc = BSON("KnownFieldCommand"
<< "coll1"
- << "field1"
- << 28
- << "maxTimeMS"
- << 42
- << "$db"
+ << "field1" << 28 << "maxTimeMS" << 42 << "$db"
<< "foo"
<< "$client"
@@ -2664,14 +2571,9 @@ TEST(IDLValidatedField, Int_basic_ranges) {
std::int32_t byte_range,
std::int32_t int_range) {
IDLParserErrorContext ctxt("root");
- auto doc =
- BSON("positive_int" << pos << "negative_int" << neg << "non_negative_int" << nonneg
- << "non_positive_int"
- << nonpos
- << "byte_range_int"
- << byte_range
- << "range_int"
- << int_range);
+ auto doc = BSON("positive_int" << pos << "negative_int" << neg << "non_negative_int"
+ << nonneg << "non_positive_int" << nonpos << "byte_range_int"
+ << byte_range << "range_int" << int_range);
auto obj = Int_basic_ranges::parse(ctxt, doc);
ASSERT_EQUALS(obj.getPositive_int(), pos);
ASSERT_EQUALS(obj.getNegative_int(), neg);
@@ -2689,14 +2591,9 @@ TEST(IDLValidatedField, Int_basic_ranges) {
std::int32_t byte_range,
std::int32_t int_range) {
IDLParserErrorContext ctxt("root");
- auto doc =
- BSON("positive_int" << pos << "negative_int" << neg << "non_negative_int" << nonneg
- << "non_positive_int"
- << nonpos
- << "byte_range_int"
- << byte_range
- << "range_int"
- << int_range);
+ auto doc = BSON("positive_int" << pos << "negative_int" << neg << "non_negative_int"
+ << nonneg << "non_positive_int" << nonpos << "byte_range_int"
+ << byte_range << "range_int" << int_range);
ASSERT_THROWS(Int_basic_ranges::parse(ctxt, doc), AssertionException);
};
@@ -2744,13 +2641,9 @@ TEST(IDLValidatedField, Double_basic_ranges) {
const auto tryPass =
[](double pos, double neg, double nonneg, double nonpos, double double_range) {
IDLParserErrorContext ctxt("root");
- auto doc =
- BSON("positive_double" << pos << "negative_double" << neg << "non_negative_double"
- << nonneg
- << "non_positive_double"
- << nonpos
- << "range_double"
- << double_range);
+ auto doc = BSON("positive_double"
+ << pos << "negative_double" << neg << "non_negative_double" << nonneg
+ << "non_positive_double" << nonpos << "range_double" << double_range);
auto obj = Double_basic_ranges::parse(ctxt, doc);
ASSERT_EQUALS(obj.getPositive_double(), pos);
ASSERT_EQUALS(obj.getNegative_double(), neg);
@@ -2763,13 +2656,9 @@ TEST(IDLValidatedField, Double_basic_ranges) {
const auto tryFail =
[](double pos, double neg, double nonneg, double nonpos, double double_range) {
IDLParserErrorContext ctxt("root");
- auto doc =
- BSON("positive_double" << pos << "negative_double" << neg << "non_negative_double"
- << nonneg
- << "non_positive_double"
- << nonpos
- << "range_double"
- << double_range);
+ auto doc = BSON("positive_double"
+ << pos << "negative_double" << neg << "non_negative_double" << nonneg
+ << "non_positive_double" << nonpos << "range_double" << double_range);
ASSERT_THROWS(Double_basic_ranges::parse(ctxt, doc), AssertionException);
};
@@ -2807,8 +2696,7 @@ TEST(IDLValidatedField, Callback_validators) {
[](std::int32_t int_even, double double_nearly_int, StringData string_starts_with_x) {
IDLParserErrorContext ctxt("root");
auto doc = BSON("int_even" << int_even << "double_nearly_int" << double_nearly_int
- << "string_starts_with_x"
- << string_starts_with_x);
+ << "string_starts_with_x" << string_starts_with_x);
auto obj = Callback_validators::parse(ctxt, doc);
ASSERT_EQUALS(obj.getInt_even(), int_even);
ASSERT_EQUALS(obj.getDouble_nearly_int(), double_nearly_int);
@@ -2820,8 +2708,7 @@ TEST(IDLValidatedField, Callback_validators) {
[](std::int32_t int_even, double double_nearly_int, StringData string_starts_with_x) {
IDLParserErrorContext ctxt("root");
auto doc = BSON("int_even" << int_even << "double_nearly_int" << double_nearly_int
- << "string_starts_with_x"
- << string_starts_with_x);
+ << "string_starts_with_x" << string_starts_with_x);
ASSERT_THROWS(Callback_validators::parse(ctxt, doc), AssertionException);
};
@@ -2844,9 +2731,7 @@ TEST(IDLTypeCommand, TestString) {
IDLParserErrorContext ctxt("root");
auto testDoc = BSON(CommandTypeStringCommand::kCommandName << "foo"
- << "field1"
- << 3
- << "$db"
+ << "field1" << 3 << "$db"
<< "db");
auto testStruct = CommandTypeStringCommand::parse(ctxt, makeOMR(testDoc));
@@ -2866,8 +2751,7 @@ TEST(IDLTypeCommand, TestString) {
// Positive: Test we can serialize from nothing the same document except for $db
{
auto testDocWithoutDb = BSON(CommandTypeStringCommand::kCommandName << "foo"
- << "field1"
- << 3);
+ << "field1" << 3);
BSONObjBuilder builder;
CommandTypeStringCommand one_new("foo");
@@ -3007,9 +2891,7 @@ TEST(IDLTypeCommand, TestUnderscoreCommand) {
IDLParserErrorContext ctxt("root");
auto testDoc = BSON(WellNamedCommand::kCommandName << "foo"
- << "field1"
- << 3
- << "$db"
+ << "field1" << 3 << "$db"
<< "db");
auto testStruct = WellNamedCommand::parse(ctxt, makeOMR(testDoc));
@@ -3029,8 +2911,7 @@ TEST(IDLTypeCommand, TestUnderscoreCommand) {
// Positive: Test we can serialize from nothing the same document except for $db
{
auto testDocWithoutDb = BSON(WellNamedCommand::kCommandName << "foo"
- << "field1"
- << 3);
+ << "field1" << 3);
BSONObjBuilder builder;
WellNamedCommand one_new("foo");
diff --git a/src/mongo/idl/server_parameter_specialized_test.cpp b/src/mongo/idl/server_parameter_specialized_test.cpp
index 07ce7e4dc8c..5587e29fad9 100644
--- a/src/mongo/idl/server_parameter_specialized_test.cpp
+++ b/src/mongo/idl/server_parameter_specialized_test.cpp
@@ -242,24 +242,20 @@ TEST(SpecializedServerParameter, multiValue) {
ASSERT_APPENDED_OBJECT(edsp,
BSON("value"
<< "start value"
- << "flag"
- << true));
+ << "flag" << true));
ASSERT_OK(edsp->setFromString("second value"));
ASSERT_APPENDED_OBJECT(edsp,
BSON("value"
<< "second value"
- << "flag"
- << false));
+ << "flag" << false));
ASSERT_OK(edsp->set(BSON("" << BSON("value"
<< "third value"
- << "flag"
- << true))
+ << "flag" << true))
.firstElement()));
ASSERT_APPENDED_OBJECT(edsp,
BSON("value"
<< "third value"
- << "flag"
- << true));
+ << "flag" << true));
}
// specializedWithCtorAndValue
diff --git a/src/mongo/idl/server_parameter_with_storage.h b/src/mongo/idl/server_parameter_with_storage.h
index 0da90e950f7..afe21cd9dee 100644
--- a/src/mongo/idl/server_parameter_with_storage.h
+++ b/src/mongo/idl/server_parameter_with_storage.h
@@ -293,15 +293,12 @@ public:
*/
template <class predicate>
void addBound(const element_type& bound) {
- addValidator([ bound, spname = name() ](const element_type& value) {
+ addValidator([bound, spname = name()](const element_type& value) {
if (!predicate::evaluate(value, bound)) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Invalid value for parameter " << spname << ": "
- << value
- << " is not "
- << predicate::description
- << " "
- << bound);
+ str::stream()
+ << "Invalid value for parameter " << spname << ": " << value
+ << " is not " << predicate::description << " " << bound);
}
return Status::OK();
});
diff --git a/src/mongo/logger/encoder.h b/src/mongo/logger/encoder.h
index f51642d62e7..29226dc2f5c 100644
--- a/src/mongo/logger/encoder.h
+++ b/src/mongo/logger/encoder.h
@@ -48,4 +48,4 @@ public:
};
} // namespace logger
-} // nnamspace mongo
+} // namespace mongo
diff --git a/src/mongo/logger/log_component.cpp b/src/mongo/logger/log_component.cpp
index a214fd757e2..ec389788d43 100644
--- a/src/mongo/logger/log_component.cpp
+++ b/src/mongo/logger/log_component.cpp
@@ -245,5 +245,5 @@ std::ostream& operator<<(std::ostream& os, LogComponent component) {
return os << component.getNameForLog();
}
-} // logger
-} // mongo
+} // namespace logger
+} // namespace mongo
diff --git a/src/mongo/logger/log_component_settings.cpp b/src/mongo/logger/log_component_settings.cpp
index c43e5d7d79b..f1da736be2c 100644
--- a/src/mongo/logger/log_component_settings.cpp
+++ b/src/mongo/logger/log_component_settings.cpp
@@ -117,5 +117,5 @@ bool LogComponentSettings::shouldLog(LogComponent component, LogSeverity severit
return severity >= LogSeverity::cast(_minimumLoggedSeverity[component].loadRelaxed());
}
-} // logger
-} // mongo
+} // namespace logger
+} // namespace mongo
diff --git a/src/mongo/logger/log_manager.cpp b/src/mongo/logger/log_manager.cpp
index 6d349e29459..c167853353f 100644
--- a/src/mongo/logger/log_manager.cpp
+++ b/src/mongo/logger/log_manager.cpp
@@ -72,5 +72,5 @@ bool LogManager::isDefaultConsoleAppenderAttached() const {
return static_cast<bool>(_defaultAppender);
}
-} // logger
-} // mongo
+} // namespace logger
+} // namespace mongo
diff --git a/src/mongo/logger/log_severity.cpp b/src/mongo/logger/log_severity.cpp
index 90ba9967e88..349be573983 100644
--- a/src/mongo/logger/log_severity.cpp
+++ b/src/mongo/logger/log_severity.cpp
@@ -46,7 +46,11 @@ constexpr auto infoSeverityString = "info"_sd;
constexpr auto debugSeverityString = "debug"_sd;
constexpr StringData kDebugLevelStrings[LogSeverity::kMaxDebugLevel] = {
- "D1"_sd, "D2"_sd, "D3"_sd, "D4"_sd, "D5"_sd,
+ "D1"_sd,
+ "D2"_sd,
+ "D3"_sd,
+ "D4"_sd,
+ "D5"_sd,
};
} // namespace
diff --git a/src/mongo/logger/log_test.cpp b/src/mongo/logger/log_test.cpp
index 836e82bb8ea..9fb534a135f 100644
--- a/src/mongo/logger/log_test.cpp
+++ b/src/mongo/logger/log_test.cpp
@@ -382,8 +382,7 @@ void testEncodedLogLine(const MessageEventEphemeral& event, const std::string& e
std::string s = os.str();
if (s.find(expectedSubstring) == std::string::npos) {
FAIL(str::stream() << "encoded log line does not contain substring \"" << expectedSubstring
- << "\". log line: "
- << s);
+ << "\". log line: " << s);
}
}
diff --git a/src/mongo/logger/parse_log_component_settings.cpp b/src/mongo/logger/parse_log_component_settings.cpp
index 7a8ee40f7cc..1982587e130 100644
--- a/src/mongo/logger/parse_log_component_settings.cpp
+++ b/src/mongo/logger/parse_log_component_settings.cpp
@@ -81,10 +81,10 @@ StatusWith<std::vector<LogComponentSetting>> parseLogComponentSettings(const BSO
if (elem.fieldNameStringData() == "verbosity") {
if (!elem.isNumber()) {
return StatusWith<Result>(ErrorCodes::BadValue,
- str::stream() << "Expected "
- << parentComponent.getDottedName()
- << ".verbosity to be a number, but found "
- << typeName(elem.type()));
+ str::stream()
+ << "Expected " << parentComponent.getDottedName()
+ << ".verbosity to be a number, but found "
+ << typeName(elem.type()));
}
levelsToSet.push_back((LogComponentSetting(parentComponent, elem.numberInt())));
continue;
@@ -93,22 +93,20 @@ StatusWith<std::vector<LogComponentSetting>> parseLogComponentSettings(const BSO
const LogComponent curr = _getComponentForShortName(shortName);
if (curr == LogComponent::kNumLogComponents || curr.parent() != parentComponent) {
- return StatusWith<Result>(
- ErrorCodes::BadValue,
- str::stream() << "Invalid component name " << parentComponent.getDottedName() << "."
- << shortName);
+ return StatusWith<Result>(ErrorCodes::BadValue,
+ str::stream()
+ << "Invalid component name "
+ << parentComponent.getDottedName() << "." << shortName);
}
if (elem.isNumber()) {
levelsToSet.push_back(LogComponentSetting(curr, elem.numberInt()));
continue;
}
if (elem.type() != Object) {
- return StatusWith<Result>(ErrorCodes::BadValue,
- str::stream() << "Invalid type " << typeName(elem.type())
- << "for component "
- << parentComponent.getDottedName()
- << "."
- << shortName);
+ return StatusWith<Result>(
+ ErrorCodes::BadValue,
+ str::stream() << "Invalid type " << typeName(elem.type()) << "for component "
+ << parentComponent.getDottedName() << "." << shortName);
}
iterators.push_back(iter);
parentComponent = curr;
diff --git a/src/mongo/logger/parse_log_component_settings_test.cpp b/src/mongo/logger/parse_log_component_settings_test.cpp
index 5d91f7b0f78..2271a16dbdd 100644
--- a/src/mongo/logger/parse_log_component_settings_test.cpp
+++ b/src/mongo/logger/parse_log_component_settings_test.cpp
@@ -140,10 +140,7 @@ TEST(Multi, FailBadComponent) {
BSONObj input =
BSON("verbosity" << 6 << "accessControl" << BSON("verbosity" << 5) << "storage"
<< BSON("verbosity" << 4 << "journal" << BSON("verbosity" << 6))
- << "No Such Component"
- << BSON("verbosity" << 2)
- << "extrafield"
- << 123);
+ << "No Such Component" << BSON("verbosity" << 2) << "extrafield" << 123);
StatusWith<Settings> result = parseLogComponentSettings(input);
@@ -175,4 +172,4 @@ TEST(DeeplyNested, FailLast) {
ASSERT_EQUALS(result.getStatus().reason(),
"Invalid component name storage.journal.No Such Component");
}
-}
+} // namespace
diff --git a/src/mongo/logger/ramlog.cpp b/src/mongo/logger/ramlog.cpp
index a1f32e26726..ac2d12fdd44 100644
--- a/src/mongo/logger/ramlog.cpp
+++ b/src/mongo/logger/ramlog.cpp
@@ -221,4 +221,4 @@ MONGO_INITIALIZER(RamLogCatalog)(InitializerContext*) {
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/logger/ramlog.h b/src/mongo/logger/ramlog.h
index 15ad0d8526e..306dc36bff4 100644
--- a/src/mongo/logger/ramlog.h
+++ b/src/mongo/logger/ramlog.h
@@ -183,4 +183,4 @@ public:
private:
RamLog* _ramlog;
};
-}
+} // namespace mongo
diff --git a/src/mongo/logger/rotatable_file_writer.cpp b/src/mongo/logger/rotatable_file_writer.cpp
index fa22a7f1e4a..7a41f76144d 100644
--- a/src/mongo/logger/rotatable_file_writer.cpp
+++ b/src/mongo/logger/rotatable_file_writer.cpp
@@ -70,7 +70,7 @@ std::wstring utf8ToWide(StringData utf8Str) {
utf8Str.size(), // Count
tempBuffer.get(), // UTF-16 output buffer
utf8Str.size() // Buffer size in wide characters
- );
+ );
// TODO(schwerin): fassert finalSize > 0?
return std::wstring(tempBuffer.get(), finalSize);
}
@@ -142,7 +142,7 @@ bool Win32FileStreambuf::open(StringData fileName, bool append) {
OPEN_ALWAYS, // dwCreationDisposition
FILE_ATTRIBUTE_NORMAL, // dwFlagsAndAttributes
NULL // hTemplateFile
- );
+ );
if (INVALID_HANDLE_VALUE == _fileHandle)
@@ -212,9 +212,9 @@ Status RotatableFileWriter::Use::rotate(bool renameOnRotate, const std::string&
try {
if (boost::filesystem::exists(renameTarget)) {
return Status(ErrorCodes::FileRenameFailed,
- str::stream() << "Renaming file " << _writer->_fileName << " to "
- << renameTarget
- << " failed; destination already exists");
+ str::stream()
+ << "Renaming file " << _writer->_fileName << " to "
+ << renameTarget << " failed; destination already exists");
}
} catch (const std::exception& e) {
return Status(ErrorCodes::FileRenameFailed,
@@ -229,11 +229,9 @@ Status RotatableFileWriter::Use::rotate(bool renameOnRotate, const std::string&
boost::filesystem::rename(_writer->_fileName, renameTarget, ec);
if (ec) {
return Status(ErrorCodes::FileRenameFailed,
- str::stream() << "Failed to rename \"" << _writer->_fileName
- << "\" to \""
- << renameTarget
- << "\": "
- << ec.message());
+ str::stream()
+ << "Failed to rename \"" << _writer->_fileName << "\" to \""
+ << renameTarget << "\": " << ec.message());
// TODO(schwerin): Make errnoWithDescription() available in the logger library, and
// use it here.
}
diff --git a/src/mongo/logger/rotatable_file_writer_test.cpp b/src/mongo/logger/rotatable_file_writer_test.cpp
index 2254e96f0be..ee97a5bede2 100644
--- a/src/mongo/logger/rotatable_file_writer_test.cpp
+++ b/src/mongo/logger/rotatable_file_writer_test.cpp
@@ -143,4 +143,4 @@ TEST_F(RotatableFileWriterTest, RotationTest) {
}
}
-} // namespace mongo
+} // namespace
diff --git a/src/mongo/platform/atomic_proxy.h b/src/mongo/platform/atomic_proxy.h
index 70c367421ec..b65cb5fb232 100644
--- a/src/mongo/platform/atomic_proxy.h
+++ b/src/mongo/platform/atomic_proxy.h
@@ -40,9 +40,9 @@
namespace mongo {
/**
-* Provides a simple version of an atomic version of T
-* that uses std::atomic<BaseWordT> as a backing type;
-*/
+ * Provides a simple version of an atomic version of T
+ * that uses std::atomic<BaseWordT> as a backing type;
+ */
template <typename T, typename BaseWordT>
class AtomicProxy {
MONGO_STATIC_ASSERT_MSG(sizeof(T) == sizeof(BaseWordT),
@@ -87,4 +87,4 @@ private:
};
using AtomicDouble = AtomicProxy<double, std::uint64_t>;
-}
+} // namespace mongo
diff --git a/src/mongo/platform/bits.h b/src/mongo/platform/bits.h
index 721e7be9c92..b12bda75b3e 100644
--- a/src/mongo/platform/bits.h
+++ b/src/mongo/platform/bits.h
@@ -93,4 +93,4 @@ int countTrailingZeros64(unsigned long long num) {
#else
#error "No bit-ops definitions for your platform"
#endif
-}
+} // namespace mongo
diff --git a/src/mongo/platform/bits_test.cpp b/src/mongo/platform/bits_test.cpp
index bb8f014e051..f6234660682 100644
--- a/src/mongo/platform/bits_test.cpp
+++ b/src/mongo/platform/bits_test.cpp
@@ -54,4 +54,4 @@ TEST(BitsTest_CountZeros, EachBit) {
ASSERT_EQUALS(countTrailingZeros64(x), i);
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/platform/decimal128_test.cpp b/src/mongo/platform/decimal128_test.cpp
index dfce92f4fb8..90749ddd0c4 100644
--- a/src/mongo/platform/decimal128_test.cpp
+++ b/src/mongo/platform/decimal128_test.cpp
@@ -1396,9 +1396,9 @@ TEST(Decimal128Test, TestDecimal128GetLargestNegativeExponentZero) {
}
/**
-* Test data was generated using 64 bit versions of these functions, so we must test
-* approximate results.
-*/
+ * Test data was generated using 64 bit versions of these functions, so we must test
+ * approximate results.
+ */
void assertDecimal128ApproxEqual(Decimal128 x, Decimal128 y) {
ASSERT_TRUE(x.subtract(y).toAbs().isLess(Decimal128("0.00000005")));
diff --git a/src/mongo/platform/random_test.cpp b/src/mongo/platform/random_test.cpp
index f2d5353887e..ee82a89490f 100644
--- a/src/mongo/platform/random_test.cpp
+++ b/src/mongo/platform/random_test.cpp
@@ -219,4 +219,4 @@ TEST(RandomTest, Secure1) {
ASSERT_NOT_EQUALS(a->nextInt64(), b->nextInt64());
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/platform/shared_library_posix.cpp b/src/mongo/platform/shared_library_posix.cpp
index 16728da3d12..c896b9d230b 100644
--- a/src/mongo/platform/shared_library_posix.cpp
+++ b/src/mongo/platform/shared_library_posix.cpp
@@ -79,8 +79,7 @@ StatusWith<void*> SharedLibrary::getSymbol(StringData name) {
if (error_msg != nullptr) {
return StatusWith<void*>(ErrorCodes::InternalError,
str::stream() << "dlsym failed for symbol " << name
- << " with error message: "
- << error_msg);
+ << " with error message: " << error_msg);
}
return StatusWith<void*>(symbol);
diff --git a/src/mongo/platform/strcasestr.h b/src/mongo/platform/strcasestr.h
index 1530520f1a2..6f9b42cb5a0 100644
--- a/src/mongo/platform/strcasestr.h
+++ b/src/mongo/platform/strcasestr.h
@@ -36,7 +36,7 @@ namespace pal {
const char* strcasestr(const char* haystack, const char* needle);
}
using mongo::pal::strcasestr;
-}
+} // namespace mongo
#else
diff --git a/src/mongo/rpc/get_status_from_command_result.cpp b/src/mongo/rpc/get_status_from_command_result.cpp
index b63ae786832..2607ff15e3c 100644
--- a/src/mongo/rpc/get_status_from_command_result.cpp
+++ b/src/mongo/rpc/get_status_from_command_result.cpp
@@ -98,14 +98,14 @@ Status getWriteConcernStatusFromCommandResult(const BSONObj& obj) {
std::string wcErrorParseMsg;
if (!wcError.parseBSON(wcErrObj, &wcErrorParseMsg)) {
return Status(ErrorCodes::UnsupportedFormat,
- str::stream() << "Failed to parse write concern section due to "
- << wcErrorParseMsg);
+ str::stream()
+ << "Failed to parse write concern section due to " << wcErrorParseMsg);
}
std::string wcErrorInvalidMsg;
if (!wcError.isValid(&wcErrorInvalidMsg)) {
return Status(ErrorCodes::UnsupportedFormat,
- str::stream() << "Failed to parse write concern section due to "
- << wcErrorInvalidMsg);
+ str::stream()
+ << "Failed to parse write concern section due to " << wcErrorInvalidMsg);
}
return wcError.toStatus();
}
diff --git a/src/mongo/rpc/legacy_reply.cpp b/src/mongo/rpc/legacy_reply.cpp
index 75c69c16d9f..affdadbd38c 100644
--- a/src/mongo/rpc/legacy_reply.cpp
+++ b/src/mongo/rpc/legacy_reply.cpp
@@ -54,20 +54,17 @@ LegacyReply::LegacyReply(const Message* message) {
uassert(ErrorCodes::BadValue,
str::stream() << "Got legacy command reply with a bad cursorId field,"
- << " expected a value of 0 but got "
- << qr.getCursorId(),
+ << " expected a value of 0 but got " << qr.getCursorId(),
qr.getCursorId() == 0);
uassert(ErrorCodes::BadValue,
str::stream() << "Got legacy command reply with a bad nReturned field,"
- << " expected a value of 1 but got "
- << qr.getNReturned(),
+ << " expected a value of 1 but got " << qr.getNReturned(),
qr.getNReturned() == 1);
uassert(ErrorCodes::BadValue,
str::stream() << "Got legacy command reply with a bad startingFrom field,"
- << " expected a value of 0 but got "
- << qr.getStartingFrom(),
+ << " expected a value of 0 but got " << qr.getStartingFrom(),
qr.getStartingFrom() == 0);
auto status = Validator<BSONObj>::validateLoad(qr.data(), qr.dataLen());
diff --git a/src/mongo/rpc/legacy_request.cpp b/src/mongo/rpc/legacy_request.cpp
index 426eba475fc..2c05714d4f4 100644
--- a/src/mongo/rpc/legacy_request.cpp
+++ b/src/mongo/rpc/legacy_request.cpp
@@ -48,9 +48,7 @@ OpMsgRequest opMsgRequestFromLegacyRequest(const Message& message) {
if (qm.queryOptions & QueryOption_Exhaust) {
uasserted(18527,
str::stream() << "The 'exhaust' OP_QUERY flag is invalid for commands: "
- << ns.ns()
- << " "
- << qm.query.toString());
+ << ns.ns() << " " << qm.query.toString());
}
uassert(40473,
diff --git a/src/mongo/rpc/metadata.cpp b/src/mongo/rpc/metadata.cpp
index c217db2d9f4..e3ed093a693 100644
--- a/src/mongo/rpc/metadata.cpp
+++ b/src/mongo/rpc/metadata.cpp
@@ -148,7 +148,7 @@ bool isArrayOfObjects(BSONElement array) {
return true;
}
-}
+} // namespace
OpMsgRequest upconvertRequest(StringData db, BSONObj cmdObj, int queryFlags) {
cmdObj = cmdObj.getOwned(); // Usually this is a no-op since it is already owned.
diff --git a/src/mongo/rpc/metadata/client_metadata.cpp b/src/mongo/rpc/metadata/client_metadata.cpp
index d2199c13016..9b51a4bc750 100644
--- a/src/mongo/rpc/metadata/client_metadata.cpp
+++ b/src/mongo/rpc/metadata/client_metadata.cpp
@@ -99,8 +99,7 @@ Status ClientMetadata::parseClientMetadataDocument(const BSONObj& doc) {
if (static_cast<uint32_t>(doc.objsize()) > maxLength) {
return Status(ErrorCodes::ClientMetadataDocumentTooLarge,
str::stream() << "The client metadata document must be less then or equal to "
- << maxLength
- << "bytes");
+ << maxLength << "bytes");
}
// Get a copy so that we can take a stable reference to the app name inside
@@ -135,9 +134,10 @@ Status ClientMetadata::parseClientMetadataDocument(const BSONObj& doc) {
} else if (name == kDriver) {
if (!e.isABSONObj()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "The '" << kDriver << "' field is required to be a "
- "BSON document in the client "
- "metadata document");
+ str::stream() << "The '" << kDriver
+ << "' field is required to be a "
+ "BSON document in the client "
+ "metadata document");
}
Status s = validateDriverDocument(e.Obj());
@@ -196,10 +196,10 @@ StatusWith<StringData> ClientMetadata::parseApplicationDocument(const BSONObj& d
if (name == kName) {
if (e.type() != String) {
- return {
- ErrorCodes::TypeMismatch,
- str::stream() << "The '" << kApplication << "." << kName
- << "' field must be a string in the client metadata document"};
+ return {ErrorCodes::TypeMismatch,
+ str::stream()
+ << "The '" << kApplication << "." << kName
+ << "' field must be a string in the client metadata document"};
}
StringData value = e.checkAndGetStringData();
@@ -230,18 +230,18 @@ Status ClientMetadata::validateDriverDocument(const BSONObj& doc) {
if (name == kName) {
if (e.type() != String) {
- return Status(
- ErrorCodes::TypeMismatch,
- str::stream() << "The '" << kDriver << "." << kName
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream()
+ << "The '" << kDriver << "." << kName
<< "' field must be a string in the client metadata document");
}
foundName = true;
} else if (name == kVersion) {
if (e.type() != String) {
- return Status(
- ErrorCodes::TypeMismatch,
- str::stream() << "The '" << kDriver << "." << kVersion
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream()
+ << "The '" << kDriver << "." << kVersion
<< "' field must be a string in the client metadata document");
}
@@ -274,9 +274,9 @@ Status ClientMetadata::validateOperatingSystemDocument(const BSONObj& doc) {
if (name == kType) {
if (e.type() != String) {
- return Status(
- ErrorCodes::TypeMismatch,
- str::stream() << "The '" << kOperatingSystem << "." << kType
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream()
+ << "The '" << kOperatingSystem << "." << kType
<< "' field must be a string in the client metadata document");
}
@@ -287,8 +287,7 @@ Status ClientMetadata::validateOperatingSystemDocument(const BSONObj& doc) {
if (foundType == false) {
return Status(ErrorCodes::ClientMetadataMissingField,
str::stream() << "Missing required field '" << kOperatingSystem << "."
- << kType
- << "' in the client metadata document");
+ << kType << "' in the client metadata document");
}
return Status::OK();
diff --git a/src/mongo/rpc/metadata/client_metadata_test.cpp b/src/mongo/rpc/metadata/client_metadata_test.cpp
index e70355d37ca..be9b666d222 100644
--- a/src/mongo/rpc/metadata/client_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/client_metadata_test.cpp
@@ -86,13 +86,11 @@ TEST(ClientMetadatTest, TestLoopbackTest) {
ASSERT_EQUALS("g", swParseStatus.getValue().get().getApplicationName());
BSONObj outDoc =
- BSON(kMetadataDoc << BSON(
- kApplication << BSON(kName << "g") << kDriver
- << BSON(kName << "a" << kVersion << "b")
- << kOperatingSystem
- << BSON(kType << "c" << kName << "d" << kArchitecture << "e"
- << kVersion
- << "f")));
+ BSON(kMetadataDoc << BSON(kApplication
+ << BSON(kName << "g") << kDriver
+ << BSON(kName << "a" << kVersion << "b") << kOperatingSystem
+ << BSON(kType << "c" << kName << "d" << kArchitecture << "e"
+ << kVersion << "f")));
ASSERT_BSONOBJ_EQ(obj, outDoc);
}
@@ -105,11 +103,11 @@ TEST(ClientMetadatTest, TestLoopbackTest) {
auto swParseStatus = ClientMetadata::parse(obj[kMetadataDoc]);
ASSERT_OK(swParseStatus.getStatus());
- BSONObj outDoc = BSON(
- kMetadataDoc << BSON(
- kDriver << BSON(kName << "a" << kVersion << "b") << kOperatingSystem
- << BSON(kType << "c" << kName << "d" << kArchitecture << "e" << kVersion
- << "f")));
+ BSONObj outDoc =
+ BSON(kMetadataDoc << BSON(kDriver
+ << BSON(kName << "a" << kVersion << "b") << kOperatingSystem
+ << BSON(kType << "c" << kName << "d" << kArchitecture << "e"
+ << kVersion << "f")));
ASSERT_BSONOBJ_EQ(obj, outDoc);
}
@@ -150,8 +148,7 @@ TEST(ClientMetadatTest, TestRequiredOnlyFields) {
// With AppName
ASSERT_DOC_OK(kApplication << BSON(kName << "1") << kDriver
- << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
+ << BSON(kName << "n1" << kVersion << "v1") << kOperatingSystem
<< BSON(kType << kUnknown));
}
@@ -160,24 +157,20 @@ TEST(ClientMetadatTest, TestRequiredOnlyFields) {
TEST(ClientMetadatTest, TestWithAppNameSpelledWrong) {
ASSERT_DOC_OK(kApplication << BSON("extra"
<< "1")
- << kDriver
- << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
- << BSON(kType << kUnknown));
+ << kDriver << BSON(kName << "n1" << kVersion << "v1")
+ << kOperatingSystem << BSON(kType << kUnknown));
}
// Positive: test with empty application document
TEST(ClientMetadatTest, TestWithEmptyApplication) {
ASSERT_DOC_OK(kApplication << BSONObj() << kDriver << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
- << BSON(kType << kUnknown));
+ << kOperatingSystem << BSON(kType << kUnknown));
}
// Negative: test with appplication wrong type
TEST(ClientMetadatTest, TestNegativeWithAppNameWrongType) {
ASSERT_DOC_NOT_OK(kApplication << "1" << kDriver << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
- << BSON(kType << kUnknown));
+ << kOperatingSystem << BSON(kType << kUnknown));
}
// Positive: test with extra fields
@@ -185,10 +178,8 @@ TEST(ClientMetadatTest, TestExtraFields) {
ASSERT_DOC_OK(kApplication << BSON(kName << "1"
<< "extra"
<< "v1")
- << kDriver
- << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
- << BSON(kType << kUnknown));
+ << kDriver << BSON(kName << "n1" << kVersion << "v1")
+ << kOperatingSystem << BSON(kType << kUnknown));
ASSERT_DOC_OK(kApplication << BSON(kName << "1"
<< "extra"
<< "v1")
@@ -196,24 +187,19 @@ TEST(ClientMetadatTest, TestExtraFields) {
<< BSON(kName << "n1" << kVersion << "v1"
<< "extra"
<< "v1")
- << kOperatingSystem
- << BSON(kType << kUnknown));
+ << kOperatingSystem << BSON(kType << kUnknown));
ASSERT_DOC_OK(kApplication << BSON(kName << "1"
<< "extra"
<< "v1")
- << kDriver
- << BSON(kName << "n1" << kVersion << "v1")
+ << kDriver << BSON(kName << "n1" << kVersion << "v1")
<< kOperatingSystem
<< BSON(kType << kUnknown << "extra"
<< "v1"));
ASSERT_DOC_OK(kApplication << BSON(kName << "1"
<< "extra"
<< "v1")
- << kDriver
- << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
- << BSON(kType << kUnknown)
- << "extra"
+ << kDriver << BSON(kName << "n1" << kVersion << "v1")
+ << kOperatingSystem << BSON(kType << kUnknown) << "extra"
<< "v1");
}
@@ -236,20 +222,16 @@ TEST(ClientMetadatTest, TestNegativeMissingRequiredOneField) {
// Negative: document with wrong types for required fields
TEST(ClientMetadatTest, TestNegativeWrongTypes) {
ASSERT_DOC_NOT_OK(kApplication << BSON(kName << 1) << kDriver
- << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
+ << BSON(kName << "n1" << kVersion << "v1") << kOperatingSystem
<< BSON(kType << kUnknown));
ASSERT_DOC_NOT_OK(kApplication << BSON(kName << "1") << kDriver
- << BSON(kName << 1 << kVersion << "v1")
- << kOperatingSystem
+ << BSON(kName << 1 << kVersion << "v1") << kOperatingSystem
<< BSON(kType << kUnknown));
ASSERT_DOC_NOT_OK(kApplication << BSON(kName << "1") << kDriver
- << BSON(kName << "n1" << kVersion << 1)
- << kOperatingSystem
+ << BSON(kName << "n1" << kVersion << 1) << kOperatingSystem
<< BSON(kType << kUnknown));
ASSERT_DOC_NOT_OK(kApplication << BSON(kName << "1") << kDriver
- << BSON(kName << "n1" << kVersion << "v1")
- << kOperatingSystem
+ << BSON(kName << "n1" << kVersion << "v1") << kOperatingSystem
<< BSON(kType << 1));
}
@@ -262,20 +244,14 @@ TEST(ClientMetadatTest, TestNegativeLargeDocument) {
{
std::string str(350, 'x');
ASSERT_DOC_OK(kApplication << BSON(kName << "1") << kDriver
- << BSON(kName << "n1" << kVersion << "1")
- << kOperatingSystem
- << BSON(kType << kUnknown)
- << "extra"
- << str);
+ << BSON(kName << "n1" << kVersion << "1") << kOperatingSystem
+ << BSON(kType << kUnknown) << "extra" << str);
}
{
std::string str(512, 'x');
ASSERT_DOC_NOT_OK(kApplication << BSON(kName << "1") << kDriver
- << BSON(kName << "n1" << kVersion << "1")
- << kOperatingSystem
- << BSON(kType << kUnknown)
- << "extra"
- << str);
+ << BSON(kName << "n1" << kVersion << "1") << kOperatingSystem
+ << BSON(kType << kUnknown) << "extra" << str);
}
}
@@ -284,8 +260,7 @@ TEST(ClientMetadatTest, TestNegativeLargeAppName) {
{
std::string str(128, 'x');
ASSERT_DOC_OK(kApplication << BSON(kName << str) << kDriver
- << BSON(kName << "n1" << kVersion << "1")
- << kOperatingSystem
+ << BSON(kName << "n1" << kVersion << "1") << kOperatingSystem
<< BSON(kType << kUnknown));
BSONObjBuilder builder;
@@ -294,8 +269,7 @@ TEST(ClientMetadatTest, TestNegativeLargeAppName) {
{
std::string str(129, 'x');
ASSERT_DOC_NOT_OK(kApplication << BSON(kName << str) << kDriver
- << BSON(kName << "n1" << kVersion << "1")
- << kOperatingSystem
+ << BSON(kName << "n1" << kVersion << "1") << kOperatingSystem
<< BSON(kType << kUnknown));
BSONObjBuilder builder;
@@ -327,8 +301,7 @@ TEST(ClientMetadatTest, TestMongoSAppend) {
<< kOperatingSystem
<< BSON(kType << "c" << kName << "d" << kArchitecture << "e" << kVersion
<< "f")
- << kMongos
- << BSON(kHost << "h" << kClient << "i" << kVersion << "j"));
+ << kMongos << BSON(kHost << "h" << kClient << "i" << kVersion << "j"));
ASSERT_BSONOBJ_EQ(doc, outDoc);
}
diff --git a/src/mongo/rpc/metadata/config_server_metadata.cpp b/src/mongo/rpc/metadata/config_server_metadata.cpp
index 0fb6859b28b..3dffe940087 100644
--- a/src/mongo/rpc/metadata/config_server_metadata.cpp
+++ b/src/mongo/rpc/metadata/config_server_metadata.cpp
@@ -64,9 +64,7 @@ StatusWith<ConfigServerMetadata> ConfigServerMetadata::readFromMetadata(
} else if (metadataElem.type() != mongo::Object) {
return {ErrorCodes::TypeMismatch,
str::stream() << "ConfigServerMetadata element has incorrect type: expected"
- << mongo::Object
- << " but got "
- << metadataElem.type()};
+ << mongo::Object << " but got " << metadataElem.type()};
}
BSONObj configMetadataObj = metadataElem.Obj();
diff --git a/src/mongo/rpc/metadata/logical_time_metadata_test.cpp b/src/mongo/rpc/metadata/logical_time_metadata_test.cpp
index ea9a0fbbdab..a3553a1db6c 100644
--- a/src/mongo/rpc/metadata/logical_time_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/logical_time_metadata_test.cpp
@@ -191,6 +191,6 @@ TEST(LogicalTimeMetadataTest, UpconvertPass) {
converted.body);
}
+} // namespace
} // namespace rpc
} // namespace mongo
-}
diff --git a/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp b/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp
index c79dbeee5d9..9f07a7775ad 100644
--- a/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp
@@ -52,19 +52,12 @@ TEST(ReplResponseMetadataTest, OplogQueryMetadataRoundtrip) {
BSONObjBuilder builder;
metadata.writeToMetadata(&builder).transitional_ignore();
- BSONObj expectedObj(BSON(kOplogQueryMetadataFieldName << BSON(
- "lastOpCommitted"
- << BSON("ts" << opTime1.getTimestamp() << "t" << opTime1.getTerm())
- << "lastCommittedWall"
- << committedWall
- << "lastOpApplied"
- << BSON("ts" << opTime2.getTimestamp() << "t" << opTime2.getTerm())
- << "rbid"
- << 6
- << "primaryIndex"
- << 12
- << "syncSourceIndex"
- << -1)));
+ BSONObj expectedObj(BSON(
+ kOplogQueryMetadataFieldName << BSON(
+ "lastOpCommitted" << BSON("ts" << opTime1.getTimestamp() << "t" << opTime1.getTerm())
+ << "lastCommittedWall" << committedWall << "lastOpApplied"
+ << BSON("ts" << opTime2.getTimestamp() << "t" << opTime2.getTerm())
+ << "rbid" << 6 << "primaryIndex" << 12 << "syncSourceIndex" << -1)));
BSONObj serializedObj = builder.obj();
ASSERT_BSONOBJ_EQ(expectedObj, serializedObj);
diff --git a/src/mongo/rpc/metadata/repl_set_metadata_test.cpp b/src/mongo/rpc/metadata/repl_set_metadata_test.cpp
index a2802b35416..5b3e746d8e7 100644
--- a/src/mongo/rpc/metadata/repl_set_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/repl_set_metadata_test.cpp
@@ -63,18 +63,10 @@ TEST(ReplResponseMetadataTest, Roundtrip) {
BSON(kReplSetMetadataFieldName
<< BSON("term" << 3 << "lastOpCommitted"
<< BSON("ts" << opTime.getTimestamp() << "t" << opTime.getTerm())
- << "lastCommittedWall"
- << committedWallTime
- << "lastOpVisible"
+ << "lastCommittedWall" << committedWallTime << "lastOpVisible"
<< BSON("ts" << opTime2.getTimestamp() << "t" << opTime2.getTerm())
- << "configVersion"
- << 6
- << "replicaSetId"
- << metadata.getReplicaSetId()
- << "primaryIndex"
- << 12
- << "syncSourceIndex"
- << -1)));
+ << "configVersion" << 6 << "replicaSetId" << metadata.getReplicaSetId()
+ << "primaryIndex" << 12 << "syncSourceIndex" << -1)));
BSONObj serializedObj = builder.obj();
ASSERT_BSONOBJ_EQ(expectedObj, serializedObj);
diff --git a/src/mongo/rpc/metadata/sharding_metadata_test.cpp b/src/mongo/rpc/metadata/sharding_metadata_test.cpp
index 92d1e5cb24b..dec0fb1c3d1 100644
--- a/src/mongo/rpc/metadata/sharding_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/sharding_metadata_test.cpp
@@ -54,8 +54,7 @@ TEST(ShardingMetadata, ReadFromMetadata) {
auto sm = checkParse(
BSON("$gleStats" << BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp() << "t"
<< kLastOpTime.getTerm())
- << "electionId"
- << kElectionId)));
+ << "electionId" << kElectionId)));
ASSERT_EQ(sm.getLastElectionId(), kElectionId);
ASSERT_EQ(sm.getLastOpTime(), kLastOpTime);
}
@@ -89,8 +88,7 @@ TEST(ShardingMetadata, ReadFromInvalidMetadata) {
checkParseFails(
BSON("$gleStats" << BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp() << "t"
<< kLastOpTime.getTerm())
- << "electionId"
- << 3)),
+ << "electionId" << 3)),
ErrorCodes::TypeMismatch);
}
{
@@ -104,9 +102,7 @@ TEST(ShardingMetadata, ReadFromInvalidMetadata) {
checkParseFails(
BSON("$gleStats" << BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp() << "t"
<< kLastOpTime.getTerm())
- << "electionId"
- << kElectionId
- << "extra"
+ << "electionId" << kElectionId << "extra"
<< "this should not be here")),
ErrorCodes::InvalidOptions);
}
diff --git a/src/mongo/rpc/metadata/tracking_metadata.cpp b/src/mongo/rpc/metadata/tracking_metadata.cpp
index b284ceb8692..ba2fedb5d4d 100644
--- a/src/mongo/rpc/metadata/tracking_metadata.cpp
+++ b/src/mongo/rpc/metadata/tracking_metadata.cpp
@@ -99,9 +99,7 @@ StatusWith<TrackingMetadata> TrackingMetadata::readFromMetadata(const BSONElemen
} else if (metadataElem.type() != mongo::Object) {
return {ErrorCodes::TypeMismatch,
str::stream() << "TrackingMetadata element has incorrect type: expected"
- << mongo::Object
- << " but got "
- << metadataElem.type()};
+ << mongo::Object << " but got " << metadataElem.type()};
}
BSONObj metadataObj = metadataElem.Obj();
diff --git a/src/mongo/rpc/metadata/tracking_metadata_test.cpp b/src/mongo/rpc/metadata/tracking_metadata_test.cpp
index c2c2897a6f8..2244483dfb5 100644
--- a/src/mongo/rpc/metadata/tracking_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/tracking_metadata_test.cpp
@@ -52,9 +52,9 @@ const auto kParentOperId = "541b1a00e8a23afa832b2016";
TEST(TrackingMetadata, ReadFromMetadata) {
{
- auto metadata = checkParse(BSON(
- "tracking_info" << BSON("operId" << kOperId << "operName" << kOperName << "parentOperId"
- << kParentOperId)));
+ auto metadata =
+ checkParse(BSON("tracking_info" << BSON("operId" << kOperId << "operName" << kOperName
+ << "parentOperId" << kParentOperId)));
ASSERT_EQ(*metadata.getOperId(), kOperId);
ASSERT_EQ(*metadata.getParentOperId(), kParentOperId);
ASSERT_EQ(*metadata.getOperName(), kOperName);
@@ -78,8 +78,7 @@ TEST(TrackingMetadata, ReadFromInvalidMetadata) {
}
{
checkParseFails(BSON("tracking_info" << BSON("operId" << kOperId << "operName" << kOperName
- << "parentOperId"
- << 111)),
+ << "parentOperId" << 111)),
ErrorCodes::TypeMismatch);
}
}
diff --git a/src/mongo/rpc/metadata_test.cpp b/src/mongo/rpc/metadata_test.cpp
index f94802ac9f5..c0bac93aedd 100644
--- a/src/mongo/rpc/metadata_test.cpp
+++ b/src/mongo/rpc/metadata_test.cpp
@@ -72,8 +72,9 @@ TEST(Metadata, UpconvertValidMetadata) {
<< BSON("mode"
<< "secondary")),
mongo::QueryOption_SlaveOk,
- BSON("ping" << 1 << "$readPreference" << BSON("mode"
- << "secondary")));
+ BSON("ping" << 1 << "$readPreference"
+ << BSON("mode"
+ << "secondary")));
// Wrapped in 'query', with readPref.
checkUpconvert(BSON("query" << BSON("pong" << 1 << "foo"
@@ -121,16 +122,14 @@ TEST(Metadata, UpconvertInvalidMetadata) {
ASSERT_THROWS_CODE(upconvertRequest("db",
BSON("query" << BSON("foo"
<< "bar")
- << "$maxTimeMS"
- << 200),
+ << "$maxTimeMS" << 200),
0),
AssertionException,
ErrorCodes::InvalidOptions);
ASSERT_THROWS_CODE(upconvertRequest("db",
BSON("$query" << BSON("foo"
<< "bar")
- << "$maxTimeMS"
- << 200),
+ << "$maxTimeMS" << 200),
0),
AssertionException,
ErrorCodes::InvalidOptions);
diff --git a/src/mongo/rpc/object_check_test.cpp b/src/mongo/rpc/object_check_test.cpp
index 4006db63bc1..52010604f53 100644
--- a/src/mongo/rpc/object_check_test.cpp
+++ b/src/mongo/rpc/object_check_test.cpp
@@ -88,4 +88,4 @@ TEST(DataTypeValidated, BSONValidationEnabled) {
ASSERT_OK(cdrc.readAndAdvanceNoThrow(&v));
}
}
-}
+} // namespace
diff --git a/src/mongo/rpc/op_msg_integration_test.cpp b/src/mongo/rpc/op_msg_integration_test.cpp
index 561264b5db7..7aa41d66cb1 100644
--- a/src/mongo/rpc/op_msg_integration_test.cpp
+++ b/src/mongo/rpc/op_msg_integration_test.cpp
@@ -181,7 +181,8 @@ TEST(OpMsg, CloseConnectionOnFireAndForgetNotMasterError) {
documents: [
{a: 1}
]
- })")).serialize();
+ })"))
+ .serialize();
// Round-trip command fails with NotMaster error. Note that this failure is in command
// dispatch which ignores w:0.
diff --git a/src/mongo/rpc/op_msg_test.cpp b/src/mongo/rpc/op_msg_test.cpp
index bf280768638..8da383bc90e 100644
--- a/src/mongo/rpc/op_msg_test.cpp
+++ b/src/mongo/rpc/op_msg_test.cpp
@@ -183,11 +183,13 @@ const uint32_t kNoFlags = 0;
const uint32_t kHaveChecksum = 1;
TEST_F(OpMsgParser, SucceedsWithJustBody) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kBodySection,
- fromjson("{ping: 1}"),
- }.parse();
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kBodySection,
+ fromjson("{ping: 1}"),
+ }
+ .parse();
ASSERT_BSONOBJ_EQ(msg.body, fromjson("{ping: 1}"));
ASSERT_EQ(msg.sequences.size(), 0u);
@@ -205,18 +207,20 @@ TEST_F(OpMsgParser, SucceedsWithChecksum) {
}
TEST_F(OpMsgParser, SucceedsWithBodyThenSequence) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kBodySection,
- fromjson("{ping: 1}"),
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kBodySection,
+ fromjson("{ping: 1}"),
- kDocSequenceSection,
- Sized{
- "docs", //
- fromjson("{a: 1}"),
- fromjson("{a: 2}"),
- },
- }.parse();
+ kDocSequenceSection,
+ Sized{
+ "docs", //
+ fromjson("{a: 1}"),
+ fromjson("{a: 2}"),
+ },
+ }
+ .parse();
ASSERT_BSONOBJ_EQ(msg.body, fromjson("{ping: 1}"));
ASSERT_EQ(msg.sequences.size(), 1u);
@@ -227,17 +231,19 @@ TEST_F(OpMsgParser, SucceedsWithBodyThenSequence) {
}
TEST_F(OpMsgParser, SucceedsWithSequenceThenBody) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kDocSequenceSection,
- Sized{
- "docs", //
- fromjson("{a: 1}"),
- },
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kDocSequenceSection,
+ Sized{
+ "docs", //
+ fromjson("{a: 1}"),
+ },
- kBodySection,
- fromjson("{ping: 1}"),
- }.parse();
+ kBodySection,
+ fromjson("{ping: 1}"),
+ }
+ .parse();
ASSERT_BSONOBJ_EQ(msg.body, fromjson("{ping: 1}"));
ASSERT_EQ(msg.sequences.size(), 1u);
@@ -247,22 +253,24 @@ TEST_F(OpMsgParser, SucceedsWithSequenceThenBody) {
}
TEST_F(OpMsgParser, SucceedsWithSequenceThenBodyThenSequence) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kDocSequenceSection,
- Sized{
- "empty", //
- },
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kDocSequenceSection,
+ Sized{
+ "empty", //
+ },
- kBodySection,
- fromjson("{ping: 1}"),
+ kBodySection,
+ fromjson("{ping: 1}"),
- kDocSequenceSection,
- Sized{
- "docs", //
- fromjson("{a: 1}"),
- },
- }.parse();
+ kDocSequenceSection,
+ Sized{
+ "docs", //
+ fromjson("{a: 1}"),
+ },
+ }
+ .parse();
ASSERT_BSONOBJ_EQ(msg.body, fromjson("{ping: 1}"));
ASSERT_EQ(msg.sequences.size(), 2u);
@@ -274,22 +282,24 @@ TEST_F(OpMsgParser, SucceedsWithSequenceThenBodyThenSequence) {
}
TEST_F(OpMsgParser, SucceedsWithSequenceThenSequenceThenBody) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kDocSequenceSection,
- Sized{
- "empty", //
- },
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kDocSequenceSection,
+ Sized{
+ "empty", //
+ },
- kDocSequenceSection,
- Sized{
- "docs", //
- fromjson("{a: 1}"),
- },
+ kDocSequenceSection,
+ Sized{
+ "docs", //
+ fromjson("{a: 1}"),
+ },
- kBodySection,
- fromjson("{ping: 1}"),
- }.parse();
+ kBodySection,
+ fromjson("{ping: 1}"),
+ }
+ .parse();
ASSERT_BSONOBJ_EQ(msg.body, fromjson("{ping: 1}"));
ASSERT_EQ(msg.sequences.size(), 2u);
@@ -301,22 +311,24 @@ TEST_F(OpMsgParser, SucceedsWithSequenceThenSequenceThenBody) {
}
TEST_F(OpMsgParser, SucceedsWithBodyThenSequenceThenSequence) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kBodySection,
- fromjson("{ping: 1}"),
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kBodySection,
+ fromjson("{ping: 1}"),
- kDocSequenceSection,
- Sized{
- "docs", //
- fromjson("{a: 1}"),
- },
+ kDocSequenceSection,
+ Sized{
+ "docs", //
+ fromjson("{a: 1}"),
+ },
- kDocSequenceSection,
- Sized{
- "empty", //
- },
- }.parse();
+ kDocSequenceSection,
+ Sized{
+ "empty", //
+ },
+ }
+ .parse();
ASSERT_BSONOBJ_EQ(msg.body, fromjson("{ping: 1}"));
ASSERT_EQ(msg.sequences.size(), 2u);
@@ -402,17 +414,19 @@ TEST_F(OpMsgParser, FailsIfDuplicateSequenceWithBodyNested) {
}
TEST_F(OpMsgParser, SucceedsIfSequenceAndBodyHaveCommonPrefix) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kBodySection,
- fromjson("{cursor: {ns: 'foo.bar', id: 1}}"),
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kBodySection,
+ fromjson("{cursor: {ns: 'foo.bar', id: 1}}"),
- kDocSequenceSection,
- Sized{
- "cursor.firstBatch", //
- fromjson("{_id: 1}"),
- },
- }.parse();
+ kDocSequenceSection,
+ Sized{
+ "cursor.firstBatch", //
+ fromjson("{_id: 1}"),
+ },
+ }
+ .parse();
ASSERT_BSONOBJ_EQ(msg.body, fromjson("{cursor: {ns: 'foo.bar', id: 1}}"));
ASSERT_EQ(msg.sequences.size(), 1u);
@@ -432,11 +446,13 @@ TEST_F(OpMsgParser, FailsIfUnknownSectionKind) {
}
TEST_F(OpMsgParser, FailsIfBodyTooBig) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kBodySection,
- fromjson("{ping: 1}"),
- }.addToSize(-1); // Shrink message so body extends past end.
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kBodySection,
+ fromjson("{ping: 1}"),
+ }
+ .addToSize(-1); // Shrink message so body extends past end.
ASSERT_THROWS_CODE(msg.parse(), AssertionException, ErrorCodes::InvalidBSON);
}
@@ -447,24 +463,27 @@ TEST_F(OpMsgParser, FailsIfBodyTooBigIntoChecksum) {
kHaveChecksum, //
kBodySection,
fromjson("{ping: 1}"),
- }.appendChecksum()
+ }
+ .appendChecksum()
.addToSize(-1); // Shrink message so body extends past end.
ASSERT_THROWS_CODE(msg.parse(), AssertionException, ErrorCodes::InvalidBSON);
}
TEST_F(OpMsgParser, FailsIfDocumentSequenceTooBig) {
- auto msg = OpMsgBytes{
- kNoFlags, //
- kBodySection,
- fromjson("{ping: 1}"),
+ auto msg =
+ OpMsgBytes{
+ kNoFlags, //
+ kBodySection,
+ fromjson("{ping: 1}"),
- kDocSequenceSection,
- Sized{
- "docs", //
- fromjson("{a: 1}"),
- },
- }.addToSize(-1); // Shrink message so body extends past end.
+ kDocSequenceSection,
+ Sized{
+ "docs", //
+ fromjson("{a: 1}"),
+ },
+ }
+ .addToSize(-1); // Shrink message so body extends past end.
ASSERT_THROWS_CODE(msg.parse(), AssertionException, ErrorCodes::Overflow);
}
@@ -481,7 +500,8 @@ TEST_F(OpMsgParser, FailsIfDocumentSequenceTooBigIntoChecksum) {
"docs", //
fromjson("{a: 1}"),
},
- }.appendChecksum()
+ }
+ .appendChecksum()
.addToSize(-1); // Shrink message so body extends past end.
ASSERT_THROWS_CODE(msg.parse(), AssertionException, ErrorCodes::Overflow);
@@ -497,7 +517,8 @@ TEST_F(OpMsgParser, FailsIfDocumentInSequenceTooBig) {
Sized{
"docs", //
fromjson("{a: 1}"),
- }.addToSize(-1), // Shrink sequence so document extends past end.
+ }
+ .addToSize(-1), // Shrink sequence so document extends past end.
};
ASSERT_THROWS_CODE(msg.parse(), AssertionException, ErrorCodes::InvalidBSON);
@@ -512,7 +533,8 @@ TEST_F(OpMsgParser, FailsIfNameOfDocumentSequenceTooBig) {
kDocSequenceSection,
Sized{
"foo",
- }.addToSize(-1), // Shrink sequence so document extends past end.
+ }
+ .addToSize(-1), // Shrink sequence so document extends past end.
};
ASSERT_THROWS_CODE(msg.parse(), AssertionException, ErrorCodes::Overflow);
@@ -611,7 +633,8 @@ TEST_F(OpMsgParser, SucceedsWithUnknownOptionalFlags) {
flags, //
kBodySection,
fromjson("{ping: 1}"),
- }.parse();
+ }
+ .parse();
}
}
diff --git a/src/mongo/rpc/protocol.cpp b/src/mongo/rpc/protocol.cpp
index a578d342b00..c021e79140a 100644
--- a/src/mongo/rpc/protocol.cpp
+++ b/src/mongo/rpc/protocol.cpp
@@ -140,10 +140,7 @@ StatusWith<ProtocolSetAndWireVersionInfo> parseProtocolSetFromIsMasterReply(
maxWireVersion >= std::numeric_limits<int>::max()) {
return Status(ErrorCodes::IncompatibleServerVersion,
str::stream() << "Server min and max wire version have invalid values ("
- << minWireVersion
- << ","
- << maxWireVersion
- << ")");
+ << minWireVersion << "," << maxWireVersion << ")");
}
WireVersionInfo version{static_cast<int>(minWireVersion), static_cast<int>(maxWireVersion)};
@@ -176,11 +173,9 @@ Status validateWireVersion(const WireVersionInfo client, const WireVersionInfo s
// Server may return bad data.
if (server.minWireVersion > server.maxWireVersion) {
return Status(ErrorCodes::IncompatibleServerVersion,
- str::stream() << "Server min and max wire version are incorrect ("
- << server.minWireVersion
- << ","
- << server.maxWireVersion
- << ")");
+ str::stream()
+ << "Server min and max wire version are incorrect ("
+ << server.minWireVersion << "," << server.maxWireVersion << ")");
}
// Determine if the [min, max] tuples overlap.
diff --git a/src/mongo/rpc/protocol.h b/src/mongo/rpc/protocol.h
index 33d19486fcf..f81fcaa542b 100644
--- a/src/mongo/rpc/protocol.h
+++ b/src/mongo/rpc/protocol.h
@@ -133,8 +133,8 @@ StatusWith<ProtocolSetAndWireVersionInfo> parseProtocolSetFromIsMasterReply(
const BSONObj& isMasterReply);
/**
- * Computes supported protocols from wire versions.
- */
+ * Computes supported protocols from wire versions.
+ */
ProtocolSet computeProtocolSet(const WireVersionInfo version);
} // namespace rpc
diff --git a/src/mongo/rpc/protocol_test.cpp b/src/mongo/rpc/protocol_test.cpp
index 8acb3d1d01d..61ca6e894f7 100644
--- a/src/mongo/rpc/protocol_test.cpp
+++ b/src/mongo/rpc/protocol_test.cpp
@@ -39,8 +39,8 @@ namespace {
using mongo::WireVersion;
using namespace mongo::rpc;
-using mongo::unittest::assertGet;
using mongo::BSONObj;
+using mongo::unittest::assertGet;
// Checks if negotiation of the first to protocol sets results in the 'proto'
const auto assert_negotiated = [](ProtocolSet fst, ProtocolSet snd, Protocol proto) {
@@ -105,8 +105,7 @@ TEST(Protocol, parseProtocolSetFromIsMasterReply) {
auto mongos32 =
BSON("maxWireVersion" << static_cast<int>(WireVersion::COMMANDS_ACCEPT_WRITE_CONCERN)
<< "minWireVersion"
- << static_cast<int>(WireVersion::RELEASE_2_4_AND_BEFORE)
- << "msg"
+ << static_cast<int>(WireVersion::RELEASE_2_4_AND_BEFORE) << "msg"
<< "isdbgrid");
ASSERT_EQ(assertGet(parseProtocolSetFromIsMasterReply(mongos32)).protocolSet,
@@ -114,8 +113,8 @@ TEST(Protocol, parseProtocolSetFromIsMasterReply) {
}
{
// MongoDB 3.0 (mongod)
- auto mongod30 = BSON(
- "maxWireVersion" << static_cast<int>(WireVersion::RELEASE_2_7_7) << "minWireVersion"
+ auto mongod30 = BSON("maxWireVersion"
+ << static_cast<int>(WireVersion::RELEASE_2_7_7) << "minWireVersion"
<< static_cast<int>(WireVersion::RELEASE_2_4_AND_BEFORE));
ASSERT_EQ(assertGet(parseProtocolSetFromIsMasterReply(mongod30)).protocolSet,
supports::kOpQueryOnly);
diff --git a/src/mongo/rpc/write_concern_error_detail.cpp b/src/mongo/rpc/write_concern_error_detail.cpp
index aa134a9f1a0..477c7011430 100644
--- a/src/mongo/rpc/write_concern_error_detail.cpp
+++ b/src/mongo/rpc/write_concern_error_detail.cpp
@@ -137,8 +137,8 @@ Status WriteConcernErrorDetail::toStatus() const {
return _status;
}
- return _status.withReason(
- str::stream() << _status.reason() << "; Error details: " << _errInfo.toString());
+ return _status.withReason(str::stream()
+ << _status.reason() << "; Error details: " << _errInfo.toString());
}
void WriteConcernErrorDetail::setErrInfo(const BSONObj& errInfo) {
diff --git a/src/mongo/s/async_requests_sender.cpp b/src/mongo/s/async_requests_sender.cpp
index 609b8db39fb..7ecfe36313e 100644
--- a/src/mongo/s/async_requests_sender.cpp
+++ b/src/mongo/s/async_requests_sender.cpp
@@ -186,7 +186,7 @@ auto AsyncRequestsSender::RemoteData::scheduleRemoteCommand(std::vector<HostAndP
// We have to make a promise future pair because the TaskExecutor doesn't currently support a
// future returning variant of scheduleRemoteCommand
- auto[p, f] = makePromiseFuture<RemoteCommandOnAnyCallbackArgs>();
+ auto [p, f] = makePromiseFuture<RemoteCommandOnAnyCallbackArgs>();
// Failures to schedule skip the retry loop
uassertStatusOK(_ars->_subExecutor->scheduleRemoteCommandOnAny(
@@ -242,8 +242,9 @@ auto AsyncRequestsSender::RemoteData::handleResponse(RemoteCommandOnAnyCallbackA
_retryCount < kMaxNumFailedHostRetryAttempts) {
LOG(1) << "Command to remote " << _shardId
- << (failedTargets.empty() ? " " : (failedTargets.size() > 1 ? " for hosts "
- : " at host "))
+ << (failedTargets.empty()
+ ? " "
+ : (failedTargets.size() > 1 ? " for hosts " : " at host "))
<< "{}"_format(fmt::join(failedTargets, ", "))
<< "failed with retriable error and will be retried "
<< causedBy(redact(status));
diff --git a/src/mongo/s/balancer_configuration_test.cpp b/src/mongo/s/balancer_configuration_test.cpp
index b456aa29039..2081f9ec8b3 100644
--- a/src/mongo/s/balancer_configuration_test.cpp
+++ b/src/mongo/s/balancer_configuration_test.cpp
@@ -310,8 +310,7 @@ TEST(BalancerSettingsType, InvalidBalancingWindowTimeFormat) {
ASSERT_NOT_OK(BalancerSettingsType::fromBSON(BSON("activeWindow" << BSON("start"
<< "23:00"
- << "stop"
- << 6LL)))
+ << "stop" << 6LL)))
.getStatus());
}
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
index 7a4b6e1564a..d8574212532 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
@@ -93,8 +93,7 @@ StatusWith<BSONObj> extractFindAndModifyNewObj(StatusWith<Shard::CommandResponse
return {ErrorCodes::UnsupportedFormat,
str::stream() << "expected an object from the findAndModify response '"
<< kFindAndModifyResponseResultDocField
- << "'field, got: "
- << newDocElem};
+ << "'field, got: " << newDocElem};
}
return newDocElem.Obj().getOwned();
@@ -220,14 +219,10 @@ StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* opCtx,
Date_t time,
StringData why,
const WriteConcernOptions& writeConcern) {
- BSONObj newLockDetails(BSON(
- LocksType::lockID(lockSessionID) << LocksType::state(LocksType::LOCKED) << LocksType::who()
- << who
- << LocksType::process()
- << processId
- << LocksType::when(time)
- << LocksType::why()
- << why));
+ BSONObj newLockDetails(BSON(LocksType::lockID(lockSessionID)
+ << LocksType::state(LocksType::LOCKED) << LocksType::who() << who
+ << LocksType::process() << processId << LocksType::when(time)
+ << LocksType::why() << why));
auto request = FindAndModifyRequest::makeUpdate(
_locksNS,
@@ -281,14 +276,10 @@ StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* opCtx,
BSON(LocksType::name() << lockID << LocksType::state(LocksType::UNLOCKED)));
orQueryBuilder.append(BSON(LocksType::name() << lockID << LocksType::lockID(currentHolderTS)));
- BSONObj newLockDetails(BSON(
- LocksType::lockID(lockSessionID) << LocksType::state(LocksType::LOCKED) << LocksType::who()
- << who
- << LocksType::process()
- << processId
- << LocksType::when(time)
- << LocksType::why()
- << why));
+ BSONObj newLockDetails(BSON(LocksType::lockID(lockSessionID)
+ << LocksType::state(LocksType::LOCKED) << LocksType::who() << who
+ << LocksType::process() << processId << LocksType::when(time)
+ << LocksType::why() << why));
auto request = FindAndModifyRequest::makeUpdate(
_locksNS, BSON("$or" << orQueryBuilder.arr()), BSON("$set" << newLockDetails));
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
index f2eca5abcf7..5dae286da5a 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
@@ -53,14 +53,8 @@ void noGrabLockFuncSet(StringData lockID,
Date_t time,
StringData why) {
FAIL(str::stream() << "grabLock not expected to be called. "
- << "lockID: "
- << lockID
- << ", who: "
- << who
- << ", processId: "
- << processId
- << ", why: "
- << why);
+ << "lockID: " << lockID << ", who: " << who << ", processId: " << processId
+ << ", why: " << why);
}
void noOvertakeLockFuncSet(StringData lockID,
@@ -71,22 +65,13 @@ void noOvertakeLockFuncSet(StringData lockID,
Date_t time,
StringData why) {
FAIL(str::stream() << "overtakeLock not expected to be called. "
- << "lockID: "
- << lockID
- << ", currentHolderTS: "
- << currentHolderTS
- << ", who: "
- << who
- << ", processId: "
- << processId
- << ", why: "
- << why);
+ << "lockID: " << lockID << ", currentHolderTS: " << currentHolderTS
+ << ", who: " << who << ", processId: " << processId << ", why: " << why);
}
void noUnLockFuncSet(const OID& lockSessionID) {
FAIL(str::stream() << "unlock not expected to be called. "
- << "lockSessionID: "
- << lockSessionID);
+ << "lockSessionID: " << lockSessionID);
}
void noPingFuncSet(StringData processID, Date_t ping) {
@@ -95,26 +80,22 @@ void noPingFuncSet(StringData processID, Date_t ping) {
void noStopPingFuncSet(StringData processID) {
FAIL(str::stream() << "stopPing not expected to be called. "
- << "processID: "
- << processID);
+ << "processID: " << processID);
}
void noGetLockByTSSet(const OID& lockSessionID) {
FAIL(str::stream() << "getLockByTS not expected to be called. "
- << "lockSessionID: "
- << lockSessionID);
+ << "lockSessionID: " << lockSessionID);
}
void noGetLockByNameSet(StringData name) {
FAIL(str::stream() << "getLockByName not expected to be called. "
- << "lockName: "
- << name);
+ << "lockName: " << name);
}
void noGetPingSet(StringData processId) {
FAIL(str::stream() << "getPing not expected to be called. "
- << "lockName: "
- << processId);
+ << "lockName: " << processId);
}
void noGetServerInfoSet() {
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.h b/src/mongo/s/catalog/dist_lock_catalog_mock.h
index d8b9a5a42bc..d407a9c523f 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.h
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.h
@@ -219,4 +219,4 @@ private:
GetServerInfoFunc _getServerInfoChecker;
StatusWith<DistLockCatalog::ServerInfo> _getServerInfoReturnValue;
};
-}
+} // namespace mongo
diff --git a/src/mongo/s/catalog/dist_lock_manager_mock.cpp b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
index ceb0611669b..6a17de30fad 100644
--- a/src/mongo/s/catalog/dist_lock_manager_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
@@ -45,12 +45,8 @@ namespace {
void NoLockFuncSet(StringData name, StringData whyMessage, Milliseconds waitFor) {
FAIL(str::stream() << "Lock not expected to be called. "
- << "Name: "
- << name
- << ", whyMessage: "
- << whyMessage
- << ", waitFor: "
- << waitFor);
+ << "Name: " << name << ", whyMessage: " << whyMessage
+ << ", waitFor: " << waitFor);
}
} // namespace
diff --git a/src/mongo/s/catalog/dist_lock_ping_info.cpp b/src/mongo/s/catalog/dist_lock_ping_info.cpp
index c0643c1fa12..2549e55bb19 100644
--- a/src/mongo/s/catalog/dist_lock_ping_info.cpp
+++ b/src/mongo/s/catalog/dist_lock_ping_info.cpp
@@ -42,4 +42,4 @@ DistLockPingInfo::DistLockPingInfo(
configLocalTime(remoteArg),
lockSessionId(std::move(tsArg)),
electionId(std::move(electionIdArg)) {}
-}
+} // namespace mongo
diff --git a/src/mongo/s/catalog/dist_lock_ping_info.h b/src/mongo/s/catalog/dist_lock_ping_info.h
index e3db046db20..6e236fb5133 100644
--- a/src/mongo/s/catalog/dist_lock_ping_info.h
+++ b/src/mongo/s/catalog/dist_lock_ping_info.h
@@ -64,4 +64,4 @@ struct DistLockPingInfo {
// Note: unused by legacy dist lock.
OID electionId;
};
-}
+} // namespace mongo
diff --git a/src/mongo/s/catalog/mongo_version_range.cpp b/src/mongo/s/catalog/mongo_version_range.cpp
index c92fcb0b749..a0c5f505817 100644
--- a/src/mongo/s/catalog/mongo_version_range.cpp
+++ b/src/mongo/s/catalog/mongo_version_range.cpp
@@ -148,4 +148,4 @@ bool isInMongoVersionRanges(StringData version, const vector<MongoVersionRange>&
return false;
}
-}
+} // namespace mongo
diff --git a/src/mongo/s/catalog/mongo_version_range.h b/src/mongo/s/catalog/mongo_version_range.h
index 5e8d79807a7..f995864a689 100644
--- a/src/mongo/s/catalog/mongo_version_range.h
+++ b/src/mongo/s/catalog/mongo_version_range.h
@@ -60,4 +60,4 @@ struct MongoVersionRange {
};
bool isInMongoVersionRanges(StringData version, const std::vector<MongoVersionRange>& ranges);
-}
+} // namespace mongo
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index b64f17d968c..a26142eb958 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -259,14 +259,14 @@ StatusWith<repl::OpTimeWith<std::vector<DatabaseType>>> ShardingCatalogClientImp
for (const BSONObj& doc : findStatus.getValue().value) {
auto dbRes = DatabaseType::fromBSON(doc);
if (!dbRes.isOK()) {
- return dbRes.getStatus().withContext(stream() << "Failed to parse database document "
- << doc);
+ return dbRes.getStatus().withContext(stream()
+ << "Failed to parse database document " << doc);
}
Status validateStatus = dbRes.getValue().validate();
if (!validateStatus.isOK()) {
- return validateStatus.withContext(stream() << "Failed to validate database document "
- << doc);
+ return validateStatus.withContext(stream()
+ << "Failed to validate database document " << doc);
}
databases.push_back(dbRes.getValue());
@@ -376,9 +376,7 @@ StatusWith<std::vector<CollectionType>> ShardingCatalogClientImpl::getCollection
if (!collectionResult.isOK()) {
return {ErrorCodes::FailedToParse,
str::stream() << "error while parsing " << CollectionType::ConfigNS.ns()
- << " document: "
- << obj
- << " : "
+ << " document: " << obj << " : "
<< collectionResult.getStatus().toString()};
}
@@ -590,14 +588,14 @@ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientImpl::
for (const BSONObj& doc : findStatus.getValue().value) {
auto shardRes = ShardType::fromBSON(doc);
if (!shardRes.isOK()) {
- return shardRes.getStatus().withContext(stream() << "Failed to parse shard document "
- << doc);
+ return shardRes.getStatus().withContext(stream()
+ << "Failed to parse shard document " << doc);
}
Status validateStatus = shardRes.getValue().validate();
if (!validateStatus.isOK()) {
- return validateStatus.withContext(stream() << "Failed to validate shard document "
- << doc);
+ return validateStatus.withContext(stream()
+ << "Failed to validate shard document " << doc);
}
shards.push_back(shardRes.getValue());
@@ -713,9 +711,9 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCt
invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer ||
(readConcern == repl::ReadConcernLevel::kMajorityReadConcern &&
writeConcern.wMode == WriteConcernOptions::kMajority));
- BSONObj cmd = BSON("applyOps" << updateOps << "preCondition" << preCondition
- << WriteConcernOptions::kWriteConcernField
- << writeConcern.toBSON());
+ BSONObj cmd =
+ BSON("applyOps" << updateOps << "preCondition" << preCondition
+ << WriteConcernOptions::kWriteConcernField << writeConcern.toBSON());
auto response =
Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
@@ -772,11 +770,11 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCt
const auto& newestChunk = chunkWithStatus.getValue();
if (newestChunk.empty()) {
- errMsg = str::stream() << "chunk operation commit failed: version "
- << lastChunkVersion.toString()
- << " doesn't exist in namespace: " << nss.ns()
- << ". Unable to save chunk ops. Command: " << cmd
- << ". Result: " << response.getValue().response;
+ errMsg = str::stream()
+ << "chunk operation commit failed: version " << lastChunkVersion.toString()
+ << " doesn't exist in namespace: " << nss.ns()
+ << ". Unable to save chunk ops. Command: " << cmd
+ << ". Result: " << response.getValue().response;
return status.withContext(errMsg);
};
diff --git a/src/mongo/s/catalog/sharding_catalog_test.cpp b/src/mongo/s/catalog/sharding_catalog_test.cpp
index 10bd2e3afe8..f0a3ec47636 100644
--- a/src/mongo/s/catalog/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_test.cpp
@@ -67,8 +67,8 @@ using executor::NetworkInterfaceMock;
using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
using executor::TaskExecutor;
-using rpc::ReplSetMetadata;
using repl::OpTime;
+using rpc::ReplSetMetadata;
using std::vector;
using unittest::assertGet;
@@ -101,7 +101,6 @@ TEST_F(ShardingCatalogClientTest, GetCollectionExisting) {
onFindWithMetadataCommand(
[this, &expectedColl, newOpTime](const RemoteCommandRequest& request) {
-
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
@@ -597,10 +596,8 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandSuccess) {
<< "writeConcern"
<< BSON("w"
<< "majority"
- << "wtimeout"
- << 0)
- << "maxTimeMS"
- << 30000),
+ << "wtimeout" << 0)
+ << "maxTimeMS" << 30000),
request.cmdObj);
ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1),
@@ -620,14 +617,14 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandInvalidWriteConce
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
BSONObjBuilder responseBuilder;
- bool ok = catalogClient()->runUserManagementWriteCommand(operationContext(),
- "dropUser",
- "test",
- BSON("dropUser"
- << "test"
- << "writeConcern"
- << BSON("w" << 2)),
- &responseBuilder);
+ bool ok =
+ catalogClient()->runUserManagementWriteCommand(operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"
+ << "writeConcern" << BSON("w" << 2)),
+ &responseBuilder);
ASSERT_FALSE(ok);
Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
@@ -646,22 +643,23 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandRewriteWriteConce
},
Status::OK());
- auto future = launchAsync([this] {
- BSONObjBuilder responseBuilder;
- bool ok = catalogClient()->runUserManagementWriteCommand(operationContext(),
- "dropUser",
- "test",
- BSON("dropUser"
- << "test"
- << "writeConcern"
- << BSON("w" << 1 << "wtimeout"
- << 30)),
- &responseBuilder);
- ASSERT_FALSE(ok);
-
- Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
- ASSERT_EQUALS(ErrorCodes::UserNotFound, commandStatus);
- });
+ auto future =
+ launchAsync([this] {
+ BSONObjBuilder responseBuilder;
+ bool ok =
+ catalogClient()->runUserManagementWriteCommand(
+ operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"
+ << "writeConcern" << BSON("w" << 1 << "wtimeout" << 30)),
+ &responseBuilder);
+ ASSERT_FALSE(ok);
+
+ Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
+ ASSERT_EQUALS(ErrorCodes::UserNotFound, commandStatus);
+ });
onCommand([](const RemoteCommandRequest& request) {
ASSERT_EQUALS("test", request.dbname);
@@ -670,10 +668,8 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandRewriteWriteConce
<< "writeConcern"
<< BSON("w"
<< "majority"
- << "wtimeout"
- << 30)
- << "maxTimeMS"
- << 30000),
+ << "wtimeout" << 30)
+ << "maxTimeMS" << 30000),
request.cmdObj);
ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1),
@@ -761,10 +757,8 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMasterRetrySuc
<< "writeConcern"
<< BSON("w"
<< "majority"
- << "wtimeout"
- << 0)
- << "maxTimeMS"
- << 30000),
+ << "wtimeout" << 0)
+ << "maxTimeMS" << 30000),
request.cmdObj);
ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1),
@@ -799,7 +793,6 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) {
const OpTime newOpTime(Timestamp(7, 6), 5);
auto future = launchAsync([this, newOpTime] {
-
OpTime opTime;
const auto& collections =
assertGet(catalogClient()->getCollections(operationContext(), nullptr, &opTime));
@@ -1200,8 +1193,7 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedSuccessful) {
ASSERT_EQUALS("config", request.dbname);
ASSERT_BSONOBJ_EQ(BSON("w"
<< "majority"
- << "wtimeout"
- << 60000),
+ << "wtimeout" << 60000),
request.cmdObj["writeConcern"].Obj());
ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
diff --git a/src/mongo/s/catalog/type_changelog_test.cpp b/src/mongo/s/catalog/type_changelog_test.cpp
index b2a2b522299..3142901d06a 100644
--- a/src/mongo/s/catalog/type_changelog_test.cpp
+++ b/src/mongo/s/catalog/type_changelog_test.cpp
@@ -46,12 +46,10 @@ TEST(ChangeLogType, Empty) {
TEST(ChangeLogType, Valid) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
@@ -77,8 +75,7 @@ TEST(ChangeLogType, MissingChangeId) {
<< ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
@@ -91,8 +88,7 @@ TEST(ChangeLogType, MissingServer) {
<< ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
@@ -102,11 +98,9 @@ TEST(ChangeLogType, MissingServer) {
TEST(ChangeLogType, MissingClientAddr) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
@@ -116,11 +110,9 @@ TEST(ChangeLogType, MissingClientAddr) {
TEST(ChangeLogType, MissingTime) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
@@ -130,8 +122,7 @@ TEST(ChangeLogType, MissingTime) {
TEST(ChangeLogType, MissingWhat) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
<< ChangeLogType::ns("test.test")
@@ -143,14 +134,13 @@ TEST(ChangeLogType, MissingWhat) {
}
TEST(ChangeLogType, MissingNS) {
- BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
- << ChangeLogType::clientAddr("192.168.0.189:51128")
- << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::details(BSON("dummy"
- << "info")));
+ BSONObj obj =
+ BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
+ << ChangeLogType::clientAddr("192.168.0.189:51128")
+ << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1)) << ChangeLogType::what("split")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_OK(changeLogResult.getStatus());
@@ -170,12 +160,10 @@ TEST(ChangeLogType, MissingNS) {
TEST(ChangeLogType, MissingDetails) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::shard("shardname")
+ << ChangeLogType::server("host.local") << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test"));
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test"));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
@@ -186,8 +174,7 @@ TEST(ChangeLogType, MissingShard) {
<< ChangeLogType::server("host.local")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test")
+ << ChangeLogType::what("split") << ChangeLogType::ns("test.test")
<< ChangeLogType::details(BSON("dummy"
<< "info")));
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index 1e2d5dff754..cf97e57845f 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -68,8 +68,8 @@ const char kMaxKey[] = "max";
Status extractObject(const BSONObj& obj, const std::string& fieldName, BSONElement* bsonElement) {
Status elementStatus = bsonExtractTypedField(obj, fieldName, Object, bsonElement);
if (!elementStatus.isOK()) {
- return elementStatus.withContext(str::stream() << "The field '" << fieldName
- << "' cannot be parsed");
+ return elementStatus.withContext(str::stream()
+ << "The field '" << fieldName << "' cannot be parsed");
}
if (bsonElement->Obj().isEmpty()) {
@@ -108,8 +108,8 @@ StatusWith<ChunkRange> ChunkRange::fromBSON(const BSONObj& obj) {
if (SimpleBSONObjComparator::kInstance.evaluate(minKey.Obj() >= maxKey.Obj())) {
return {ErrorCodes::FailedToParse,
- str::stream() << "min: " << minKey.Obj() << " should be less than max: "
- << maxKey.Obj()};
+ str::stream() << "min: " << minKey.Obj()
+ << " should be less than max: " << maxKey.Obj()};
}
return ChunkRange(minKey.Obj().getOwned(), maxKey.Obj().getOwned());
@@ -135,8 +135,7 @@ const Status ChunkRange::extractKeyPattern(KeyPattern* shardKeyPatternOut) const
(!min.more() && max.more())) {
return {ErrorCodes::ShardKeyNotFound,
str::stream() << "the shard key of min " << _minKey << " doesn't match with "
- << "the shard key of max "
- << _maxKey};
+ << "the shard key of max " << _maxKey};
}
b.append(x.fieldName(), 1);
}
@@ -311,8 +310,8 @@ StatusWith<ChunkType> ChunkType::fromShardBSON(const BSONObj& source, const OID&
if (SimpleBSONObjComparator::kInstance.evaluate(minKey.Obj() >= maxKey.Obj())) {
return {ErrorCodes::FailedToParse,
- str::stream() << "min: " << minKey.Obj() << " should be less than max: "
- << maxKey.Obj()};
+ str::stream() << "min: " << minKey.Obj()
+ << " should be less than max: " << maxKey.Obj()};
}
chunk._min = minKey.Obj().getOwned();
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index 5bc960179e1..49ae676b153 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -50,41 +50,32 @@ TEST(ChunkType, MissingConfigRequiredFields) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj objModNS =
- BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name("test.mycol-a_MinKey")
+ << ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20))
+ << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
+ << chunkVersion.epoch() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(objModNS);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModKeys =
- BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol") << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name("test.mycol-a_MinKey")
+ << ChunkType::ns("test.mycol") << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
chunkRes = ChunkType::fromConfigBSON(objModKeys);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModShard =
- BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch());
+ BSON(ChunkType::name("test.mycol-a_MinKey")
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch());
chunkRes = ChunkType::fromConfigBSON(objModShard);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModVersion =
- BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20))
- << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name("test.mycol-a_MinKey")
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001"));
chunkRes = ChunkType::fromConfigBSON(objModVersion);
ASSERT_FALSE(chunkRes.isOK());
}
@@ -100,8 +91,8 @@ TEST(ChunkType, MissingShardRequiredFields) {
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::minShardID.name());
- BSONObj objModMax = BSON(
- ChunkType::minShardID(kMin) << ChunkType::shard(kShard.toString()) << "lastmod" << lastmod);
+ BSONObj objModMax = BSON(ChunkType::minShardID(kMin)
+ << ChunkType::shard(kShard.toString()) << "lastmod" << lastmod);
chunkRes = ChunkType::fromShardBSON(objModMax, epoch);
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::max.name());
@@ -112,8 +103,8 @@ TEST(ChunkType, MissingShardRequiredFields) {
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
ASSERT_STRING_CONTAINS(chunkRes.getStatus().reason(), ChunkType::shard.name());
- BSONObj objModLastmod = BSON(
- ChunkType::minShardID(kMin) << ChunkType::max(kMax) << ChunkType::shard(kShard.toString()));
+ BSONObj objModLastmod = BSON(ChunkType::minShardID(kMin)
+ << ChunkType::max(kMax) << ChunkType::shard(kShard.toString()));
chunkRes = ChunkType::fromShardBSON(objModLastmod, epoch);
ASSERT_EQUALS(chunkRes.getStatus(), ErrorCodes::NoSuchKey);
}
@@ -123,10 +114,9 @@ TEST(ChunkType, ToFromShardBSON) {
ChunkVersion chunkVersion(1, 2, epoch);
auto lastmod = Timestamp(chunkVersion.toLong());
- BSONObj obj = BSON(ChunkType::minShardID(kMin) << ChunkType::max(kMax)
- << ChunkType::shard(kShard.toString())
- << "lastmod"
- << lastmod);
+ BSONObj obj = BSON(ChunkType::minShardID(kMin)
+ << ChunkType::max(kMax) << ChunkType::shard(kShard.toString()) << "lastmod"
+ << lastmod);
ChunkType shardChunk = assertGet(ChunkType::fromShardBSON(obj, epoch));
ASSERT_BSONOBJ_EQ(obj, shardChunk.toShardBSON());
@@ -140,14 +130,10 @@ TEST(ChunkType, ToFromShardBSON) {
TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj obj =
- BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name("test.mycol-a_MinKey")
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -155,14 +141,11 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj = BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10))
- << ChunkType::max(BSON("b" << 20))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
+ BSONObj obj =
+ BSON(ChunkType::name("test.mycol-a_MinKey")
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
+ << ChunkType::max(BSON("b" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -170,28 +153,22 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
TEST(ChunkType, MinToMaxNotAscending) {
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj = BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 20))
- << ChunkType::max(BSON("a" << 10))
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch()
- << ChunkType::shard("shard0001"));
+ BSONObj obj =
+ BSON(ChunkType::name("test.mycol-a_MinKey")
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 20))
+ << ChunkType::max(BSON("a" << 10)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_EQ(ErrorCodes::FailedToParse, chunkRes.getStatus());
}
TEST(ChunkType, ToFromConfigBSON) {
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj = BSON(ChunkType::name("test.mycol-a_10") << ChunkType::ns("test.mycol")
- << ChunkType::min(BSON("a" << 10))
- << ChunkType::max(BSON("a" << 20))
- << ChunkType::shard("shard0001")
- << "lastmod"
- << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch"
- << chunkVersion.epoch());
+ BSONObj obj =
+ BSON(ChunkType::name("test.mycol-a_10")
+ << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
+ << ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001") << "lastmod"
+ << Timestamp(chunkVersion.toLong()) << "lastmodEpoch" << chunkVersion.epoch());
StatusWith<ChunkType> chunkRes = ChunkType::fromConfigBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ChunkType chunk = chunkRes.getValue();
@@ -208,18 +185,14 @@ TEST(ChunkType, ToFromConfigBSON) {
}
TEST(ChunkType, Pre22Format) {
- ChunkType chunk = assertGet(ChunkType::fromConfigBSON(BSON("_id"
- << "test.mycol-a_MinKey"
- << "lastmod"
- << Date_t::fromMillisSinceEpoch(1)
- << "ns"
- << "test.mycol"
- << "min"
- << BSON("a" << 10)
- << "max"
- << BSON("a" << 20)
- << "shard"
- << "shard0001")));
+ ChunkType chunk = assertGet(
+ ChunkType::fromConfigBSON(BSON("_id"
+ << "test.mycol-a_MinKey"
+ << "lastmod" << Date_t::fromMillisSinceEpoch(1) << "ns"
+ << "test.mycol"
+ << "min" << BSON("a" << 10) << "max" << BSON("a" << 20)
+ << "shard"
+ << "shard0001")));
ASSERT_OK(chunk.validate());
ASSERT_EQUALS(chunk.getNS().ns(), "test.mycol");
diff --git a/src/mongo/s/catalog/type_collection_test.cpp b/src/mongo/s/catalog/type_collection_test.cpp
index 68c1e73b096..9130562aaac 100644
--- a/src/mongo/s/catalog/type_collection_test.cpp
+++ b/src/mongo/s/catalog/type_collection_test.cpp
@@ -48,14 +48,13 @@ TEST(CollectionType, Empty) {
TEST(CollectionType, Basic) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::defaultCollation(BSON("locale"
- << "fr_CA"))
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1))
+ << CollectionType::defaultCollation(BSON("locale"
+ << "fr_CA"))
+ << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -76,18 +75,14 @@ TEST(CollectionType, Basic) {
TEST(CollectionType, AllFieldsPresent) {
const OID oid = OID::gen();
const auto uuid = UUID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::defaultCollation(BSON("locale"
- << "fr_CA"))
- << CollectionType::unique(true)
- << CollectionType::uuid()
- << uuid
- << "isAssignedShardKey"
- << false));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1))
+ << CollectionType::defaultCollation(BSON("locale"
+ << "fr_CA"))
+ << CollectionType::unique(true) << CollectionType::uuid() << uuid << "isAssignedShardKey"
+ << false));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -109,24 +104,20 @@ TEST(CollectionType, AllFieldsPresent) {
TEST(CollectionType, EmptyDefaultCollationFailsToParse) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::defaultCollation(BSONObj())
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::defaultCollation(BSONObj())
+ << CollectionType::unique(true)));
ASSERT_FALSE(status.isOK());
}
TEST(CollectionType, MissingDefaultCollationParses) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -136,14 +127,13 @@ TEST(CollectionType, MissingDefaultCollationParses) {
TEST(CollectionType, DefaultCollationSerializesCorrectly) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::defaultCollation(BSON("locale"
- << "fr_CA"))
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1))
+ << CollectionType::defaultCollation(BSON("locale"
+ << "fr_CA"))
+ << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -156,12 +146,10 @@ TEST(CollectionType, DefaultCollationSerializesCorrectly) {
TEST(CollectionType, MissingDefaultCollationIsNotSerialized) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -194,16 +182,11 @@ TEST(CollectionType, EpochCorrectness) {
}
TEST(CollectionType, Pre22Format) {
- CollectionType coll = assertGet(CollectionType::fromBSON(BSON("_id"
- << "db.coll"
- << "lastmod"
- << Date_t::fromMillisSinceEpoch(1)
- << "dropped"
- << false
- << "key"
- << BSON("a" << 1)
- << "unique"
- << false)));
+ CollectionType coll = assertGet(
+ CollectionType::fromBSON(BSON("_id"
+ << "db.coll"
+ << "lastmod" << Date_t::fromMillisSinceEpoch(1) << "dropped"
+ << false << "key" << BSON("a" << 1) << "unique" << false)));
ASSERT(coll.getNs() == NamespaceString{"db.coll"});
ASSERT(!coll.getEpoch().isSet());
@@ -216,12 +199,10 @@ TEST(CollectionType, Pre22Format) {
TEST(CollectionType, InvalidCollectionNamespace) {
const OID oid = OID::gen();
- StatusWith<CollectionType> result =
- CollectionType::fromBSON(BSON(CollectionType::fullNs("foo\\bar.coll")
- << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::unique(true)));
+ StatusWith<CollectionType> result = CollectionType::fromBSON(BSON(
+ CollectionType::fullNs("foo\\bar.coll")
+ << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
ASSERT_TRUE(result.isOK());
CollectionType collType = result.getValue();
ASSERT_FALSE(collType.validate().isOK());
@@ -230,10 +211,10 @@ TEST(CollectionType, InvalidCollectionNamespace) {
TEST(CollectionType, BadType) {
const OID oid = OID::gen();
StatusWith<CollectionType> status = CollectionType::fromBSON(
- BSON(CollectionType::fullNs() << 1 << CollectionType::epoch(oid)
- << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1))
- << CollectionType::unique(true)));
+ BSON(CollectionType::fullNs()
+ << 1 << CollectionType::epoch(oid)
+ << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
ASSERT_FALSE(status.isOK());
}
diff --git a/src/mongo/s/catalog/type_config_version_test.cpp b/src/mongo/s/catalog/type_config_version_test.cpp
index df3a9486f64..6bc2d7eaac2 100644
--- a/src/mongo/s/catalog/type_config_version_test.cpp
+++ b/src/mongo/s/catalog/type_config_version_test.cpp
@@ -254,10 +254,10 @@ TEST(Excludes, BadRangeArray) {
<< "1.2.3"); // empty bound
BSONArray includeArr = bab.arr();
- auto versionInfoResult = VersionType::fromBSON(BSON(
- VersionType::minCompatibleVersion(3) << VersionType::currentVersion(4)
- << VersionType::clusterId(OID::gen())
- << VersionType::excludingMongoVersions(includeArr)));
+ auto versionInfoResult = VersionType::fromBSON(
+ BSON(VersionType::minCompatibleVersion(3)
+ << VersionType::currentVersion(4) << VersionType::clusterId(OID::gen())
+ << VersionType::excludingMongoVersions(includeArr)));
ASSERT_EQ(ErrorCodes::FailedToParse, versionInfoResult.getStatus());
}
diff --git a/src/mongo/s/catalog/type_database.cpp b/src/mongo/s/catalog/type_database.cpp
index 5dbeb34ab7d..2caf60f308f 100644
--- a/src/mongo/s/catalog/type_database.cpp
+++ b/src/mongo/s/catalog/type_database.cpp
@@ -83,10 +83,10 @@ StatusWith<DatabaseType> DatabaseType::fromBSON(const BSONObj& source) {
BSONObj versionField = source.getObjectField("version");
if (versionField.isEmpty()) {
return Status{ErrorCodes::InternalError,
- str::stream() << "DatabaseVersion doesn't exist in database entry "
- << source
- << " despite the config server being in binary version 4.2 "
- "or later."};
+ str::stream()
+ << "DatabaseVersion doesn't exist in database entry " << source
+ << " despite the config server being in binary version 4.2 "
+ "or later."};
}
dbtVersion = DatabaseVersion::parse(IDLParserErrorContext("DatabaseType"), versionField);
}
diff --git a/src/mongo/s/catalog/type_database_test.cpp b/src/mongo/s/catalog/type_database_test.cpp
index e4e4b046232..8a9eb73dcda 100644
--- a/src/mongo/s/catalog/type_database_test.cpp
+++ b/src/mongo/s/catalog/type_database_test.cpp
@@ -49,8 +49,7 @@ TEST(DatabaseType, Basic) {
UUID uuid = UUID::gen();
StatusWith<DatabaseType> status = DatabaseType::fromBSON(
BSON(DatabaseType::name("mydb")
- << DatabaseType::primary("shard")
- << DatabaseType::sharded(true)
+ << DatabaseType::primary("shard") << DatabaseType::sharded(true)
<< DatabaseType::version(BSON("uuid" << uuid << "lastMod" << 0))));
ASSERT_TRUE(status.isOK());
diff --git a/src/mongo/s/catalog/type_locks_test.cpp b/src/mongo/s/catalog/type_locks_test.cpp
index b00ffe06c0e..b249bb648a9 100644
--- a/src/mongo/s/catalog/type_locks_test.cpp
+++ b/src/mongo/s/catalog/type_locks_test.cpp
@@ -46,12 +46,12 @@ TEST(Validity, Empty) {
TEST(Validity, UnlockedWithOptional) {
OID testLockID = OID::gen();
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::UNLOCKED)
- << LocksType::lockID(testLockID)
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
- << LocksType::why("twiddling thumbs"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::UNLOCKED) << LocksType::lockID(testLockID)
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
+ << LocksType::why("twiddling thumbs"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -78,12 +78,12 @@ TEST(Validity, UnlockedWithoutOptional) {
TEST(Validity, LockedValid) {
OID testLockID = OID::gen();
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCKED)
- << LocksType::lockID(testLockID)
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCKED) << LocksType::lockID(testLockID)
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -98,11 +98,11 @@ TEST(Validity, LockedValid) {
}
TEST(Validity, LockedMissingProcess) {
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::state(LocksType::State::LOCKED)
- << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::state(LocksType::State::LOCKED) << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -124,11 +124,10 @@ TEST(Validity, LockedMissingLockID) {
}
TEST(Validity, LockedMissingWho) {
- BSONObj obj =
- BSON(LocksType::name("dummy") << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCKED)
- << LocksType::lockID(OID::gen())
- << LocksType::why("twiddling thumbs"));
+ BSONObj obj = BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCKED)
+ << LocksType::lockID(OID::gen()) << LocksType::why("twiddling thumbs"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -137,11 +136,11 @@ TEST(Validity, LockedMissingWho) {
}
TEST(Validity, LockedMissingWhy) {
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCKED)
- << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCKED) << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -150,12 +149,12 @@ TEST(Validity, LockedMissingWhy) {
}
TEST(Validity, ContestedValid) {
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCK_PREP)
- << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
- << LocksType::why("twiddling thumbs"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCK_PREP) << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
+ << LocksType::why("twiddling thumbs"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -164,11 +163,11 @@ TEST(Validity, ContestedValid) {
}
TEST(Validity, ContestedMissingProcess) {
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::state(LocksType::State::LOCK_PREP)
- << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
- << LocksType::why("twiddling thumbs"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::state(LocksType::State::LOCK_PREP) << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249")
+ << LocksType::why("twiddling thumbs"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -190,11 +189,10 @@ TEST(Validity, ContestedMissingLockID) {
}
TEST(Validity, ContestedMissingWho) {
- BSONObj obj =
- BSON(LocksType::name("dummy") << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCK_PREP)
- << LocksType::lockID(OID::gen())
- << LocksType::why("doing balance round"));
+ BSONObj obj = BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCK_PREP)
+ << LocksType::lockID(OID::gen()) << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -203,11 +201,11 @@ TEST(Validity, ContestedMissingWho) {
}
TEST(Validity, ContestedMissingWhy) {
- BSONObj obj = BSON(LocksType::name("dummy")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCK_PREP)
- << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249"));
+ BSONObj obj =
+ BSON(LocksType::name("dummy")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCK_PREP) << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Dummy:282475249"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
diff --git a/src/mongo/s/catalog/type_mongos_test.cpp b/src/mongo/s/catalog/type_mongos_test.cpp
index a253ed68d00..7007305f412 100644
--- a/src/mongo/s/catalog/type_mongos_test.cpp
+++ b/src/mongo/s/catalog/type_mongos_test.cpp
@@ -41,10 +41,8 @@ using namespace mongo;
TEST(Validity, MissingName) {
BSONObj obj = BSON(MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
@@ -53,10 +51,8 @@ TEST(Validity, MissingName) {
TEST(Validity, MissingPing) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
@@ -64,36 +60,33 @@ TEST(Validity, MissingPing) {
}
TEST(Validity, MissingUp) {
- BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
- << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
+ BSONObj obj =
+ BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
+ << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingWaiting) {
- BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
- << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
+ BSONObj obj =
+ BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
+ << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingMongoVersion) {
- BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::configVersion(0)
- << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
+ BSONObj obj =
+ BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
+ << MongosType::waiting(false) << MongosType::configVersion(0)
+ << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
@@ -107,12 +100,11 @@ TEST(Validity, MissingMongoVersion) {
}
TEST(Validity, MissingConfigVersion) {
- BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
+ BSONObj obj =
+ BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
+ << MongosType::waiting(false) << MongosType::mongoVersion("x.x.x")
+ << MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
@@ -128,10 +120,8 @@ TEST(Validity, MissingConfigVersion) {
TEST(Validity, MissingAdvisoryHostFQDNs) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0));
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
@@ -144,10 +134,8 @@ TEST(Validity, MissingAdvisoryHostFQDNs) {
TEST(Validity, EmptyAdvisoryHostFQDNs) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSONArrayBuilder().arr()));
auto mongosTypeResult = MongosType::fromBSON(obj);
@@ -162,10 +150,8 @@ TEST(Validity, EmptyAdvisoryHostFQDNs) {
TEST(Validity, BadTypeAdvisoryHostFQDNs) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSON_ARRAY("foo" << 0 << "baz")));
auto mongosTypeResult = MongosType::fromBSON(obj);
@@ -175,10 +161,8 @@ TEST(Validity, BadTypeAdvisoryHostFQDNs) {
TEST(Validity, Valid) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100)
- << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x")
- << MongosType::configVersion(0)
+ << MongosType::uptime(100) << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0)
<< MongosType::advisoryHostFQDNs(BSON_ARRAY("foo"
<< "bar"
<< "baz")));
diff --git a/src/mongo/s/catalog/type_shard_database.cpp b/src/mongo/s/catalog/type_shard_database.cpp
index 268460023e2..059516dc3ac 100644
--- a/src/mongo/s/catalog/type_shard_database.cpp
+++ b/src/mongo/s/catalog/type_shard_database.cpp
@@ -65,10 +65,10 @@ StatusWith<ShardDatabaseType> ShardDatabaseType::fromBSON(const BSONObj& source)
BSONObj versionField = source.getObjectField("version");
if (versionField.isEmpty()) {
return Status{ErrorCodes::InternalError,
- str::stream() << "DatabaseVersion doesn't exist in database entry "
- << source
- << " despite the shard being in binary version 4.2 or "
- "later."};
+ str::stream()
+ << "DatabaseVersion doesn't exist in database entry " << source
+ << " despite the shard being in binary version 4.2 or "
+ "later."};
}
dbVersion = DatabaseVersion::parse(IDLParserErrorContext("DatabaseType"), versionField);
}
diff --git a/src/mongo/s/catalog/type_shard_test.cpp b/src/mongo/s/catalog/type_shard_test.cpp
index b39725e9c0f..d2c9ab0326e 100644
--- a/src/mongo/s/catalog/type_shard_test.cpp
+++ b/src/mongo/s/catalog/type_shard_test.cpp
@@ -62,9 +62,9 @@ TEST(ShardType, OnlyMandatory) {
}
TEST(ShardType, AllOptionalsPresent) {
- BSONObj obj = BSON(ShardType::name("shard0000") << ShardType::host("localhost:27017")
- << ShardType::draining(true)
- << ShardType::maxSizeMB(100));
+ BSONObj obj = BSON(ShardType::name("shard0000")
+ << ShardType::host("localhost:27017") << ShardType::draining(true)
+ << ShardType::maxSizeMB(100));
StatusWith<ShardType> shardRes = ShardType::fromBSON(obj);
ASSERT(shardRes.isOK());
ShardType shard = shardRes.getValue();
@@ -72,9 +72,8 @@ TEST(ShardType, AllOptionalsPresent) {
}
TEST(ShardType, MaxSizeAsFloat) {
- BSONObj obj = BSON(ShardType::name("shard0000") << ShardType::host("localhost:27017")
- << ShardType::maxSizeMB()
- << 100.0);
+ BSONObj obj = BSON(ShardType::name("shard0000")
+ << ShardType::host("localhost:27017") << ShardType::maxSizeMB() << 100.0);
StatusWith<ShardType> shardRes = ShardType::fromBSON(obj);
ASSERT(shardRes.isOK());
ShardType shard = shardRes.getValue();
diff --git a/src/mongo/s/catalog/type_tags_test.cpp b/src/mongo/s/catalog/type_tags_test.cpp
index f466fc234ae..1cd8ed6d276 100644
--- a/src/mongo/s/catalog/type_tags_test.cpp
+++ b/src/mongo/s/catalog/type_tags_test.cpp
@@ -58,8 +58,8 @@ TEST(TagsType, Valid) {
}
TEST(TagsType, MissingNsField) {
- BSONObj obj = BSON(TagsType::tag("tag") << TagsType::min(BSON("a" << 10))
- << TagsType::max(BSON("a" << 20)));
+ BSONObj obj = BSON(TagsType::tag("tag")
+ << TagsType::min(BSON("a" << 10)) << TagsType::max(BSON("a" << 20)));
StatusWith<TagsType> status = TagsType::fromBSON(obj);
ASSERT_FALSE(status.isOK());
@@ -67,8 +67,8 @@ TEST(TagsType, MissingNsField) {
}
TEST(TagsType, MissingTagField) {
- BSONObj obj = BSON(TagsType::ns("test.mycol") << TagsType::min(BSON("a" << 10))
- << TagsType::max(BSON("a" << 20)));
+ BSONObj obj = BSON(TagsType::ns("test.mycol")
+ << TagsType::min(BSON("a" << 10)) << TagsType::max(BSON("a" << 20)));
StatusWith<TagsType> status = TagsType::fromBSON(obj);
ASSERT_FALSE(status.isOK());
@@ -94,9 +94,9 @@ TEST(TagsType, MissingMaxKey) {
}
TEST(TagsType, KeysWithDifferentNumberOfColumns) {
- BSONObj obj = BSON(TagsType::ns("test.mycol") << TagsType::tag("tag")
- << TagsType::min(BSON("a" << 10 << "b" << 10))
- << TagsType::max(BSON("a" << 20)));
+ BSONObj obj = BSON(TagsType::ns("test.mycol")
+ << TagsType::tag("tag") << TagsType::min(BSON("a" << 10 << "b" << 10))
+ << TagsType::max(BSON("a" << 20)));
StatusWith<TagsType> status = TagsType::fromBSON(obj);
const TagsType& tag = status.getValue();
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index e4a151f9ccc..c2206848332 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -373,8 +373,7 @@ void CatalogCache::checkEpochOrThrow(const NamespaceString& nss,
const auto itDb = _collectionsByDb.find(nss.db());
uassert(StaleConfigInfo(nss, targetCollectionVersion, boost::none),
str::stream() << "could not act as router for " << nss.ns()
- << ", no entry for database "
- << nss.db(),
+ << ", no entry for database " << nss.db(),
itDb != _collectionsByDb.end());
auto itColl = itDb->second.find(nss.ns());
@@ -392,8 +391,7 @@ void CatalogCache::checkEpochOrThrow(const NamespaceString& nss,
auto foundVersion = itColl->second->routingInfo->getVersion();
uassert(StaleConfigInfo(nss, targetCollectionVersion, foundVersion),
str::stream() << "could not act as router for " << nss.ns() << ", wanted "
- << targetCollectionVersion.toString()
- << ", but found "
+ << targetCollectionVersion.toString() << ", but found "
<< foundVersion.toString(),
foundVersion.epoch() == targetCollectionVersion.epoch());
}
@@ -467,8 +465,8 @@ void CatalogCache::report(BSONObjBuilder* builder) const {
void CatalogCache::_scheduleDatabaseRefresh(WithLock lk,
const std::string& dbName,
std::shared_ptr<DatabaseInfoEntry> dbEntry) {
- const auto onRefreshCompleted =
- [ this, t = Timer(), dbName, dbEntry ](const StatusWith<DatabaseType>& swDbt) {
+ const auto onRefreshCompleted = [this, t = Timer(), dbName, dbEntry](
+ const StatusWith<DatabaseType>& swDbt) {
// TODO (SERVER-34164): Track and increment stats for database refreshes.
if (!swDbt.isOK()) {
LOG_CATALOG_REFRESH(0) << "Refresh for database " << dbName << " took " << t.millis()
@@ -556,8 +554,9 @@ void CatalogCache::_scheduleCollectionRefresh(WithLock lk,
}
// Invoked when one iteration of getChunksSince has completed, whether with success or error
- const auto onRefreshCompleted = [ this, t = Timer(), nss, isIncremental, existingRoutingInfo ](
- const Status& status, RoutingTableHistory* routingInfoAfterRefresh) {
+ const auto onRefreshCompleted = [this, t = Timer(), nss, isIncremental, existingRoutingInfo](
+ const Status& status,
+ RoutingTableHistory* routingInfoAfterRefresh) {
if (isIncremental) {
_stats.numActiveIncrementalRefreshes.subtractAndFetch(1);
} else {
@@ -570,9 +569,10 @@ void CatalogCache::_scheduleCollectionRefresh(WithLock lk,
LOG_CATALOG_REFRESH(0) << "Refresh for collection " << nss << " took " << t.millis()
<< " ms and failed" << causedBy(redact(status));
} else if (routingInfoAfterRefresh) {
- const int logLevel = (!existingRoutingInfo || (existingRoutingInfo &&
- routingInfoAfterRefresh->getVersion() !=
- existingRoutingInfo->getVersion()))
+ const int logLevel =
+ (!existingRoutingInfo ||
+ (existingRoutingInfo &&
+ routingInfoAfterRefresh->getVersion() != existingRoutingInfo->getVersion()))
? 0
: 1;
LOG_CATALOG_REFRESH(logLevel)
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index 6524460ad41..647742c3408 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -88,8 +88,7 @@ void ChunkInfo::throwIfMovedSince(const Timestamp& ts) const {
uasserted(ErrorCodes::MigrationConflict,
str::stream() << "Chunk has moved since timestamp: " << ts.toString()
- << ", most recently at timestamp: "
- << latestValidAfter.toString());
+ << ", most recently at timestamp: " << latestValidAfter.toString());
}
bool ChunkInfo::containsKey(const BSONObj& shardKey) const {
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 5e59e59a079..c1012cfb0f5 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -454,15 +454,13 @@ ShardVersionMap RoutingTableHistory::_constructShardVersionMap() const {
str::stream()
<< "Gap exists in the routing table between chunks "
<< _chunkMap.at(_extractKeyString(*lastMax))->getRange().toString()
- << " and "
- << rangeLast->second->getRange().toString());
+ << " and " << rangeLast->second->getRange().toString());
else
uasserted(ErrorCodes::ConflictingOperationInProgress,
str::stream()
<< "Overlap exists in the routing table between chunks "
<< _chunkMap.at(_extractKeyString(*lastMax))->getRange().toString()
- << " and "
- << rangeLast->second->getRange().toString());
+ << " and " << rangeLast->second->getRange().toString());
}
if (!firstMin)
diff --git a/src/mongo/s/chunk_manager_index_bounds_test.cpp b/src/mongo/s/chunk_manager_index_bounds_test.cpp
index bf5b9b4b827..f24cfa72f57 100644
--- a/src/mongo/s/chunk_manager_index_bounds_test.cpp
+++ b/src/mongo/s/chunk_manager_index_bounds_test.cpp
@@ -320,8 +320,7 @@ TEST_F(CMCollapseTreeTest, Regex) {
OrderedIntervalList expected;
expected.intervals.push_back(Interval(BSON(""
<< ""
- << ""
- << BSONObj()),
+ << "" << BSONObj()),
true,
false));
BSONObjBuilder builder;
diff --git a/src/mongo/s/client/parallel.cpp b/src/mongo/s/client/parallel.cpp
index 7903b2c48e5..85df5bf1720 100644
--- a/src/mongo/s/client/parallel.cpp
+++ b/src/mongo/s/client/parallel.cpp
@@ -49,9 +49,9 @@
namespace mongo {
-using std::shared_ptr;
using std::map;
using std::set;
+using std::shared_ptr;
using std::string;
using std::vector;
@@ -565,10 +565,11 @@ void ParallelSortClusteredCursor::startInit(OperationContext* opCtx) {
// shard or if we keep better track of chunks, we can actually add the skip
// value into the cursor and/or make some assumptions about the return value
// size ( (batch size + skip amount) / num_servers ).
- _qSpec.ntoreturn() == 0 ? 0 : (_qSpec.ntoreturn() > 0
- ? _qSpec.ntoreturn() + _qSpec.ntoskip()
- : _qSpec.ntoreturn() -
- _qSpec.ntoskip()))); // batchSize
+ _qSpec.ntoreturn() == 0
+ ? 0
+ : (_qSpec.ntoreturn() > 0
+ ? _qSpec.ntoreturn() + _qSpec.ntoskip()
+ : _qSpec.ntoreturn() - _qSpec.ntoskip()))); // batchSize
} else {
// Single shard query
@@ -596,9 +597,9 @@ void ParallelSortClusteredCursor::startInit(OperationContext* opCtx) {
// Without full initialization, throw an exception
uassert(15987,
- str::stream() << "could not fully initialize cursor on shard " << shardId
- << ", current connection state is "
- << mdata.toBSON().toString(),
+ str::stream()
+ << "could not fully initialize cursor on shard " << shardId
+ << ", current connection state is " << mdata.toBSON().toString(),
success);
mdata.retryNext = false;
@@ -991,8 +992,7 @@ void ParallelSortClusteredCursor::_oldInit(OperationContext* opCtx) {
// Version is zero b/c this is deprecated codepath
staleConfigExs.push_back(str::stream() << "stale config detected for " << _ns
- << " in ParallelCursor::_init "
- << errLoc);
+ << " in ParallelCursor::_init " << errLoc);
break;
}
@@ -1054,8 +1054,8 @@ void ParallelSortClusteredCursor::_oldInit(OperationContext* opCtx) {
_cursors[i].reset(NULL, NULL);
if (!retry) {
- socketExs.push_back(str::stream() << "error querying server: "
- << servers[i]);
+ socketExs.push_back(str::stream()
+ << "error querying server: " << servers[i]);
conns[i]->done();
} else {
retryQueries.insert(i);
@@ -1275,12 +1275,7 @@ void ParallelConnectionMetadata::cleanup(bool full) {
BSONObj ParallelConnectionMetadata::toBSON() const {
return BSON("state" << (pcState ? pcState->toBSON() : BSONObj()) << "retryNext" << retryNext
- << "init"
- << initialized
- << "finish"
- << finished
- << "errored"
- << errored);
+ << "init" << initialized << "finish" << finished << "errored" << errored);
}
std::string ParallelConnectionState::toString() const {
diff --git a/src/mongo/s/client/shard.h b/src/mongo/s/client/shard.h
index 69db3fdbc87..44a2c48c43b 100644
--- a/src/mongo/s/client/shard.h
+++ b/src/mongo/s/client/shard.h
@@ -205,9 +205,9 @@ public:
const BSONObj& cmdObj) = 0;
/**
- * Runs a cursor command, exhausts the cursor, and pulls all data into memory. Performs retries
- * if the command fails in accordance with the kIdempotent RetryPolicy.
- */
+ * Runs a cursor command, exhausts the cursor, and pulls all data into memory. Performs retries
+ * if the command fails in accordance with the kIdempotent RetryPolicy.
+ */
StatusWith<QueryResponse> runExhaustiveCursorCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
@@ -225,13 +225,13 @@ public:
RetryPolicy retryPolicy);
/**
- * Warning: This method exhausts the cursor and pulls all data into memory.
- * Do not use other than for very small (i.e., admin or metadata) collections.
- * Performs retries if the query fails in accordance with the kIdempotent RetryPolicy.
- *
- * ShardRemote instances expect "readConcernLevel" to always be kMajorityReadConcern, whereas
- * ShardLocal instances expect either kLocalReadConcern or kMajorityReadConcern.
- */
+ * Warning: This method exhausts the cursor and pulls all data into memory.
+ * Do not use other than for very small (i.e., admin or metadata) collections.
+ * Performs retries if the query fails in accordance with the kIdempotent RetryPolicy.
+ *
+ * ShardRemote instances expect "readConcernLevel" to always be kMajorityReadConcern, whereas
+ * ShardLocal instances expect either kLocalReadConcern or kMajorityReadConcern.
+ */
StatusWith<QueryResponse> exhaustiveFindOnConfig(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index a24e4fd1bbb..dfa120f49c3 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -65,17 +65,17 @@
namespace mongo {
-using std::shared_ptr;
using std::set;
+using std::shared_ptr;
using std::string;
using std::unique_ptr;
using std::vector;
using executor::NetworkInterface;
using executor::NetworkInterfaceThreadPool;
+using executor::TaskExecutor;
using executor::TaskExecutorPool;
using executor::ThreadPoolTaskExecutor;
-using executor::TaskExecutor;
using CallbackArgs = TaskExecutor::CallbackArgs;
using CallbackHandle = TaskExecutor::CallbackHandle;
diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp
index 362160babec..8602c3d31d2 100644
--- a/src/mongo/s/client/shard_remote.cpp
+++ b/src/mongo/s/client/shard_remote.cpp
@@ -261,7 +261,6 @@ StatusWith<Shard::QueryResponse> ShardRemote::_runExhaustiveCursorCommand(
auto fetcherCallback = [&status, &response](const Fetcher::QueryResponseStatus& dataStatus,
Fetcher::NextAction* nextAction,
BSONObjBuilder* getMoreBob) {
-
// Throw out any accumulated results on error
if (!dataStatus.isOK()) {
status = dataStatus.getStatus();
diff --git a/src/mongo/s/client/shard_remote.h b/src/mongo/s/client/shard_remote.h
index e58ec0a8809..3b19fd8ab0f 100644
--- a/src/mongo/s/client/shard_remote.h
+++ b/src/mongo/s/client/shard_remote.h
@@ -136,10 +136,10 @@ private:
mutable stdx::mutex _lastCommittedOpTimeMutex;
/**
- * Logical time representing the latest opTime timestamp known to be in this shard's majority
- * committed snapshot. Only the latest time is kept because lagged secondaries may return earlier
- * times.
- */
+ * Logical time representing the latest opTime timestamp known to be in this shard's majority
+ * committed snapshot. Only the latest time is kept because lagged secondaries may return
+ * earlier times.
+ */
LogicalTime _lastCommittedOpTime;
/**
diff --git a/src/mongo/s/client/sharding_connection_hook.cpp b/src/mongo/s/client/sharding_connection_hook.cpp
index c16190f3949..cbdad3a1257 100644
--- a/src/mongo/s/client/sharding_connection_hook.cpp
+++ b/src/mongo/s/client/sharding_connection_hook.cpp
@@ -99,9 +99,7 @@ void ShardingConnectionHook::onCreate(DBClientBase* conn) {
uassert(28785,
str::stream() << "Unrecognized configsvr mode number: " << configServerModeNumber
<< ". Range of known configsvr mode numbers is: ["
- << minKnownConfigServerMode
- << ", "
- << maxKnownConfigServerMode
+ << minKnownConfigServerMode << ", " << maxKnownConfigServerMode
<< "]",
configServerModeNumber >= minKnownConfigServerMode &&
configServerModeNumber <= maxKnownConfigServerMode);
diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp
index 75c235d6cc6..b9dff2f77cd 100644
--- a/src/mongo/s/client/version_manager.cpp
+++ b/src/mongo/s/client/version_manager.cpp
@@ -48,8 +48,8 @@
namespace mongo {
-using std::shared_ptr;
using std::map;
+using std::shared_ptr;
using std::string;
namespace {
@@ -302,33 +302,24 @@ bool checkShardVersion(OperationContext* opCtx,
const ChunkVersion refVersion(refManager->getVersion(shard->getId()));
const ChunkVersion currentVersion(manager->getVersion(shard->getId()));
- string msg(str::stream() << "manager (" << currentVersion.toString() << " : "
- << manager->getSequenceNumber()
- << ") "
- << "not compatible with reference manager ("
- << refVersion.toString()
- << " : "
- << refManager->getSequenceNumber()
- << ") "
- << "on shard "
- << shard->getId()
- << " ("
- << shard->getConnString().toString()
- << ")");
+ string msg(str::stream()
+ << "manager (" << currentVersion.toString() << " : "
+ << manager->getSequenceNumber() << ") "
+ << "not compatible with reference manager (" << refVersion.toString()
+ << " : " << refManager->getSequenceNumber() << ") "
+ << "on shard " << shard->getId() << " (" << shard->getConnString().toString()
+ << ")");
uasserted(StaleConfigInfo(nss, refVersion, currentVersion), msg);
}
} else if (refManager) {
- string msg(str::stream() << "not sharded (" << (!manager ? string("<none>") : str::stream()
- << manager->getSequenceNumber())
+ string msg(str::stream() << "not sharded ("
+ << (!manager ? string("<none>")
+ : str::stream() << manager->getSequenceNumber())
<< ") but has reference manager ("
- << refManager->getSequenceNumber()
- << ") "
- << "on conn "
- << conn->getServerAddress()
- << " ("
- << conn_in->getServerAddress()
- << ")");
+ << refManager->getSequenceNumber() << ") "
+ << "on conn " << conn->getServerAddress() << " ("
+ << conn_in->getServerAddress() << ")");
uasserted(
StaleConfigInfo(nss, refManager->getVersion(shard->getId()), ChunkVersion::UNSHARDED()),
diff --git a/src/mongo/s/cluster_commands_helpers.cpp b/src/mongo/s/cluster_commands_helpers.cpp
index 3cf00aba6dd..616433389c5 100644
--- a/src/mongo/s/cluster_commands_helpers.cpp
+++ b/src/mongo/s/cluster_commands_helpers.cpp
@@ -218,15 +218,13 @@ std::vector<AsyncRequestsSender::Response> gatherResponses(
if (ErrorCodes::isStaleShardVersionError(status.code())) {
uassertStatusOK(status.withContext(str::stream()
<< "got stale shardVersion response from shard "
- << response.shardId
- << " at host "
+ << response.shardId << " at host "
<< response.shardHostAndPort->toString()));
}
if (ErrorCodes::StaleDbVersion == status) {
uassertStatusOK(status.withContext(
str::stream() << "got stale databaseVersion response from shard "
- << response.shardId
- << " at host "
+ << response.shardId << " at host "
<< response.shardHostAndPort->toString()));
}
@@ -527,8 +525,8 @@ void createShardDatabase(OperationContext* opCtx, StringData dbName) {
if (createDbResponse.commandStatus != ErrorCodes::NamespaceExists) {
uassertStatusOKWithContext(createDbResponse.commandStatus,
- str::stream() << "Database " << dbName
- << " could not be created");
+ str::stream()
+ << "Database " << dbName << " could not be created");
}
dbStatus = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index 872adb0028a..67e486f9b3a 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -133,7 +133,11 @@ public:
}
const std::initializer_list<StringData> passthroughFields = {
- "$queryOptions", "collation", "hint", "readConcern", QueryRequest::cmdOptionMaxTimeMS,
+ "$queryOptions",
+ "collation",
+ "hint",
+ "readConcern",
+ QueryRequest::cmdOptionMaxTimeMS,
};
for (auto name : passthroughFields) {
if (auto field = cmdObj[name]) {
diff --git a/src/mongo/s/commands/cluster_data_size_cmd.cpp b/src/mongo/s/commands/cluster_data_size_cmd.cpp
index 6666ccda065..c8d410e1634 100644
--- a/src/mongo/s/commands/cluster_data_size_cmd.cpp
+++ b/src/mongo/s/commands/cluster_data_size_cmd.cpp
@@ -86,9 +86,10 @@ public:
uassert(ErrorCodes::BadValue,
"keyPattern must be empty or must be an object that equals the shard key",
- !keyPattern || (keyPattern.type() == Object &&
- SimpleBSONObjComparator::kInstance.evaluate(
- cm->getShardKeyPattern().toBSON() == keyPattern.Obj())));
+ !keyPattern ||
+ (keyPattern.type() == Object &&
+ SimpleBSONObjComparator::kInstance.evaluate(
+ cm->getShardKeyPattern().toBSON() == keyPattern.Obj())));
uassert(ErrorCodes::BadValue,
str::stream() << "min value " << min << " does not have shard key",
diff --git a/src/mongo/s/commands/cluster_explain.cpp b/src/mongo/s/commands/cluster_explain.cpp
index d814f6080d9..a433d211287 100644
--- a/src/mongo/s/commands/cluster_explain.cpp
+++ b/src/mongo/s/commands/cluster_explain.cpp
@@ -165,17 +165,16 @@ Status ClusterExplain::validateShardResults(const vector<Strategy::CommandResult
for (size_t i = 0; i < shardResults.size(); i++) {
auto status = getStatusFromCommandResult(shardResults[i].result);
if (!status.isOK()) {
- return status.withContext(str::stream() << "Explain command on shard "
- << shardResults[i].target.toString()
- << " failed");
+ return status.withContext(str::stream()
+ << "Explain command on shard "
+ << shardResults[i].target.toString() << " failed");
}
if (Object != shardResults[i].result["queryPlanner"].type()) {
return Status(ErrorCodes::OperationFailed,
- str::stream() << "Explain command on shard "
- << shardResults[i].target.toString()
- << " failed, caused by: "
- << shardResults[i].result);
+ str::stream()
+ << "Explain command on shard " << shardResults[i].target.toString()
+ << " failed, caused by: " << shardResults[i].result);
}
if (shardResults[i].result.hasField("executionStats")) {
@@ -197,9 +196,9 @@ Status ClusterExplain::validateShardResults(const vector<Strategy::CommandResult
// Either all shards should have all plans execution stats, or none should.
if (0 != numShardsAllPlansStats && shardResults.size() != numShardsAllPlansStats) {
return Status(ErrorCodes::InternalError,
- str::stream() << "Only " << numShardsAllPlansStats << "/"
- << shardResults.size()
- << " had allPlansExecution explain information.");
+ str::stream()
+ << "Only " << numShardsAllPlansStats << "/" << shardResults.size()
+ << " had allPlansExecution explain information.");
}
return Status::OK();
diff --git a/src/mongo/s/commands/cluster_explain_cmd.cpp b/src/mongo/s/commands/cluster_explain_cmd.cpp
index a4549e39e51..c188fe19b65 100644
--- a/src/mongo/s/commands/cluster_explain_cmd.cpp
+++ b/src/mongo/s/commands/cluster_explain_cmd.cpp
@@ -148,8 +148,7 @@ BSONObj makeExplainedObj(const BSONObj& outerObj, StringData dbName) {
if (auto innerDb = innerObj["$db"]) {
uassert(ErrorCodes::InvalidNamespace,
str::stream() << "Mismatched $db in explain command. Expected " << dbName
- << " but got "
- << innerDb.checkAndGetStringData(),
+ << " but got " << innerDb.checkAndGetStringData(),
innerDb.checkAndGetStringData() == dbName);
}
diff --git a/src/mongo/s/commands/cluster_find_cmd.cpp b/src/mongo/s/commands/cluster_find_cmd.cpp
index dd726a64cc6..e133875439b 100644
--- a/src/mongo/s/commands/cluster_find_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_cmd.cpp
@@ -49,8 +49,8 @@
namespace mongo {
namespace {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
const char kTermField[] = "term";
diff --git a/src/mongo/s/commands/cluster_find_test.cpp b/src/mongo/s/commands/cluster_find_test.cpp
index 7ebb923448f..8d0dc6792d4 100644
--- a/src/mongo/s/commands/cluster_find_test.cpp
+++ b/src/mongo/s/commands/cluster_find_test.cpp
@@ -41,8 +41,7 @@ protected:
<< "coll");
const BSONObj kFindCmdTargeted = BSON("find"
<< "coll"
- << "filter"
- << BSON("_id" << 0));
+ << "filter" << BSON("_id" << 0));
// The index of the shard expected to receive the response is used to prevent different shards
// from returning documents with the same shard key. This is expected to be 0 for queries
diff --git a/src/mongo/s/commands/cluster_kill_op.cpp b/src/mongo/s/commands/cluster_kill_op.cpp
index 91cdc8f1e91..2f72968826c 100644
--- a/src/mongo/s/commands/cluster_kill_op.cpp
+++ b/src/mongo/s/commands/cluster_kill_op.cpp
@@ -86,9 +86,7 @@ private:
uassert(28625,
str::stream() << "The op argument to killOp must be of the format shardid:opid"
- << " but found \""
- << opToKill
- << '"',
+ << " but found \"" << opToKill << '"',
(opToKill.size() >= 3) && // must have at least N:N
(opSepPos != std::string::npos) && // must have ':' as separator
(opSepPos != 0) && // can't be :NN
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index 8abfe34d7fb..53432a49499 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -345,9 +345,7 @@ public:
opCtx, dbname, shardedCommand, nss.ns(), q, collation, &mrCommandResults);
} catch (DBException& e) {
e.addContext(str::stream() << "could not run map command on all shards for ns "
- << nss.ns()
- << " and query "
- << q);
+ << nss.ns() << " and query " << q);
throw;
}
@@ -378,8 +376,8 @@ public:
if (!ok) {
// At this point we will return
- errmsg = str::stream() << "MR parallel processing failed: "
- << singleResult.toString();
+ errmsg = str::stream()
+ << "MR parallel processing failed: " << singleResult.toString();
continue;
}
@@ -498,11 +496,11 @@ public:
// the output collection exists and is unsharded, fail because we should not go
// from unsharded to sharded.
BSONObj listCollsCmdResponse;
- ok = conn->runCommand(
- outDB,
- BSON("listCollections" << 1 << "filter"
+ ok = conn->runCommand(outDB,
+ BSON("listCollections"
+ << 1 << "filter"
<< BSON("name" << outputCollNss.coll())),
- listCollsCmdResponse);
+ listCollsCmdResponse);
BSONObj cursorObj = listCollsCmdResponse.getObjectField("cursor");
BSONObj collections = cursorObj["firstBatch"].Obj();
@@ -575,9 +573,7 @@ public:
ok = true;
} catch (DBException& e) {
e.addContext(str::stream() << "could not run final reduce on all shards for "
- << nss.ns()
- << ", output "
- << outputCollNss.ns());
+ << nss.ns() << ", output " << outputCollNss.ns());
throw;
}
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index ef20c6dde10..c686efce22e 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -138,10 +138,10 @@ public:
if (!cm->getShardKeyPattern().isShardKey(minKey) ||
!cm->getShardKeyPattern().isShardKey(maxKey)) {
- errmsg = str::stream() << "shard key bounds "
- << "[" << minKey << "," << maxKey << ")"
- << " are not valid for shard key pattern "
- << cm->getShardKeyPattern().toBSON();
+ errmsg = str::stream()
+ << "shard key bounds "
+ << "[" << minKey << "," << maxKey << ")"
+ << " are not valid for shard key pattern " << cm->getShardKeyPattern().toBSON();
return false;
}
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index 9b87c67733a..81400604b41 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -116,10 +116,9 @@ public:
const auto toStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, toString);
if (!toStatus.isOK()) {
- std::string msg(str::stream() << "Could not move chunk in '" << nss.ns()
- << "' to shard '"
- << toString
- << "' because that shard does not exist");
+ std::string msg(str::stream()
+ << "Could not move chunk in '" << nss.ns() << "' to shard '" << toString
+ << "' because that shard does not exist");
log() << msg;
uasserted(ErrorCodes::ShardNotFound, msg);
}
@@ -158,10 +157,10 @@ public:
// bounds
if (!cm->getShardKeyPattern().isShardKey(bounds[0].Obj()) ||
!cm->getShardKeyPattern().isShardKey(bounds[1].Obj())) {
- errmsg = str::stream() << "shard key bounds "
- << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
- << " are not valid for shard key pattern "
- << cm->getShardKeyPattern().toBSON();
+ errmsg = str::stream()
+ << "shard key bounds "
+ << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
+ << " are not valid for shard key pattern " << cm->getShardKeyPattern().toBSON();
return false;
}
diff --git a/src/mongo/s/commands/cluster_split_cmd.cpp b/src/mongo/s/commands/cluster_split_cmd.cpp
index a3eb246a2b1..47f42767a5a 100644
--- a/src/mongo/s/commands/cluster_split_cmd.cpp
+++ b/src/mongo/s/commands/cluster_split_cmd.cpp
@@ -205,10 +205,10 @@ public:
// bounds
if (!cm->getShardKeyPattern().isShardKey(bounds[0].Obj()) ||
!cm->getShardKeyPattern().isShardKey(bounds[1].Obj())) {
- errmsg = str::stream() << "shard key bounds "
- << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
- << " are not valid for shard key pattern "
- << cm->getShardKeyPattern().toBSON();
+ errmsg = str::stream()
+ << "shard key bounds "
+ << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
+ << " are not valid for shard key pattern " << cm->getShardKeyPattern().toBSON();
return false;
}
@@ -225,9 +225,9 @@ public:
} else {
// middle
if (!cm->getShardKeyPattern().isShardKey(middle)) {
- errmsg = str::stream() << "new split key " << middle
- << " is not valid for shard key pattern "
- << cm->getShardKeyPattern().toBSON();
+ errmsg = str::stream()
+ << "new split key " << middle << " is not valid for shard key pattern "
+ << cm->getShardKeyPattern().toBSON();
return false;
}
@@ -239,9 +239,9 @@ public:
chunk.emplace(cm->findIntersectingChunkWithSimpleCollation(middle));
if (chunk->getMin().woCompare(middle) == 0 || chunk->getMax().woCompare(middle) == 0) {
- errmsg = str::stream() << "new split key " << middle
- << " is a boundary key of existing chunk "
- << "[" << chunk->getMin() << "," << chunk->getMax() << ")";
+ errmsg = str::stream()
+ << "new split key " << middle << " is a boundary key of existing chunk "
+ << "[" << chunk->getMin() << "," << chunk->getMax() << ")";
return false;
}
}
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index 3a47c10867a..9537bc920e9 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -503,8 +503,8 @@ public:
}
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to list indexes on collection: "
- << ns.coll());
+ str::stream()
+ << "Not authorized to list indexes on collection: " << ns.coll());
}
bool run(OperationContext* opCtx,
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index cbe2d514ec8..cba00c83e2c 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -188,9 +188,7 @@ void addContextForTransactionAbortingError(StringData txnIdAsString,
DBException& ex,
StringData reason) {
ex.addContext(str::stream() << "Transaction " << txnIdAsString << " was aborted on statement "
- << latestStmtId
- << " due to: "
- << reason);
+ << latestStmtId << " due to: " << reason);
}
void execCommandClient(OperationContext* opCtx,
@@ -644,9 +642,7 @@ DbResponse Strategy::queryOp(OperationContext* opCtx, const NamespaceString& nss
if (q.queryOptions & QueryOption_Exhaust) {
uasserted(18526,
str::stream() << "The 'exhaust' query option is invalid for mongos queries: "
- << nss.ns()
- << " "
- << q.query.toString());
+ << nss.ns() << " " << q.query.toString());
}
// Determine the default read preference mode based on the value of the slaveOk flag.
@@ -860,9 +856,7 @@ void Strategy::killCursors(OperationContext* opCtx, DbMessage* dbm) {
const int numCursors = dbm->pullInt();
massert(34425,
str::stream() << "Invalid killCursors message. numCursors: " << numCursors
- << ", message size: "
- << dbm->msg().dataSize()
- << ".",
+ << ", message size: " << dbm->msg().dataSize() << ".",
dbm->msg().dataSize() == 8 + (8 * numCursors));
uassert(28794,
str::stream() << "numCursors must be between 1 and 29999. numCursors: " << numCursors
diff --git a/src/mongo/s/grid.cpp b/src/mongo/s/grid.cpp
index bc9d9abfd23..97e2ccef518 100644
--- a/src/mongo/s/grid.cpp
+++ b/src/mongo/s/grid.cpp
@@ -130,8 +130,9 @@ boost::optional<repl::OpTime> Grid::advanceConfigOpTime(OperationContext* opCtx,
if (opCtx && opCtx->getClient()) {
clientAddr = opCtx->getClient()->clientAddress(true);
}
- log() << "Received " << what << " " << clientAddr << " indicating config server optime "
- "term has increased, previous optime "
+ log() << "Received " << what << " " << clientAddr
+ << " indicating config server optime "
+ "term has increased, previous optime "
<< prevOpTime << ", now " << opTime;
}
return prevOpTime;
diff --git a/src/mongo/s/mongos_options.h b/src/mongo/s/mongos_options.h
index b7adce6e829..97c3bc53e34 100644
--- a/src/mongo/s/mongos_options.h
+++ b/src/mongo/s/mongos_options.h
@@ -78,4 +78,4 @@ Status validateMongosOptions(const moe::Environment& params);
Status canonicalizeMongosOptions(moe::Environment* params);
Status storeMongosOptions(const moe::Environment& params);
-}
+} // namespace mongo
diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp
index 0f76cdd3f67..2d88c7b4f53 100644
--- a/src/mongo/s/query/async_results_merger.cpp
+++ b/src/mongo/s/query/async_results_merger.cpp
@@ -654,15 +654,13 @@ bool AsyncResultsMerger::_addBatchToBuffer(WithLock lk,
remote.status =
Status(ErrorCodes::InternalError,
str::stream() << "Missing field '" << AsyncResultsMerger::kSortKeyField
- << "' in document: "
- << obj);
+ << "' in document: " << obj);
return false;
} else if (!_params.getCompareWholeSortKey() && key.type() != BSONType::Object) {
remote.status =
Status(ErrorCodes::InternalError,
str::stream() << "Field '" << AsyncResultsMerger::kSortKeyField
- << "' was not of type Object in document: "
- << obj);
+ << "' was not of type Object in document: " << obj);
return false;
}
}
diff --git a/src/mongo/s/query/async_results_merger_test.cpp b/src/mongo/s/query/async_results_merger_test.cpp
index 30d3a182c42..f3bd002c89f 100644
--- a/src/mongo/s/query/async_results_merger_test.cpp
+++ b/src/mongo/s/query/async_results_merger_test.cpp
@@ -42,7 +42,6 @@
#include "mongo/s/query/results_merger_test_fixture.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/death_test.h"
-#include "mongo/unittest/death_test.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -1323,8 +1322,7 @@ TEST_F(AsyncResultsMergerTest, GetMoreRequestIncludesMaxTimeMS) {
// The next getMore request should include the maxTimeMS.
expectedCmdObj = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "maxTimeMS"
- << 789);
+ << "maxTimeMS" << 789);
ASSERT_BSONOBJ_EQ(getNthPendingRequest(0).cmdObj, expectedCmdObj);
// Clean up.
@@ -1345,11 +1343,10 @@ DEATH_TEST_F(AsyncResultsMergerTest,
// Create one cursor whose initial response has a postBatchResumeToken.
auto pbrtFirstCursor = makePostBatchResumeToken(Timestamp(1, 5));
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
cursors.push_back(makeRemoteCursor(
kTestShardIds[0],
kTestShardHosts[0],
@@ -1380,11 +1377,10 @@ DEATH_TEST_F(AsyncResultsMergerTest,
std::vector<RemoteCursor> cursors;
BSONObj pbrtFirstCursor;
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
cursors.push_back(makeRemoteCursor(
kTestShardIds[0],
kTestShardHosts[0],
@@ -1410,11 +1406,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorNotReadyIfRemoteHasLowerPostB
std::vector<RemoteCursor> cursors;
auto pbrtFirstCursor = makePostBatchResumeToken(Timestamp(1, 5));
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
cursors.push_back(makeRemoteCursor(
kTestShardIds[0],
kTestShardHosts[0],
@@ -1451,11 +1446,10 @@ DEATH_TEST_F(AsyncResultsMergerTest,
UUID uuid = UUID::gen();
std::vector<RemoteCursor> cursors;
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
cursors.push_back(makeRemoteCursor(
kTestShardIds[0],
kTestShardHosts[0],
@@ -1483,11 +1477,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorIgnoresOplogTimestamp) {
auto pbrtFirstCursor = makePostBatchResumeToken(Timestamp(1, 5));
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
// Set the first cursor to have both a PBRT and a matching oplog timestamp.
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
cursors.push_back(makeRemoteCursor(
kTestShardIds[0],
kTestShardHosts[0],
@@ -1541,11 +1534,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorNewShardOrderedAfterExisting)
std::vector<CursorResponse> responses;
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
auto pbrtFirstCursor = makePostBatchResumeToken(Timestamp(1, 6));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
std::vector<BSONObj> batch1 = {firstCursorResponse};
auto firstDoc = batch1.front();
responses.emplace_back(
@@ -1572,11 +1564,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorNewShardOrderedAfterExisting)
responses.clear();
auto secondDocSortKey = makeResumeToken(Timestamp(1, 5), uuid, BSON("_id" << 2));
auto pbrtSecondCursor = makePostBatchResumeToken(Timestamp(1, 6));
- auto secondCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 5)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 2}}, $sortKey: {'': '"
- << secondDocSortKey.firstElement().String()
- << "'}}");
+ auto secondCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 5)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 2}}, $sortKey: {'': '"
+ << secondDocSortKey.firstElement().String() << "'}}");
std::vector<BSONObj> batch2 = {secondCursorResponse};
auto secondDoc = batch2.front();
responses.emplace_back(
@@ -1623,11 +1614,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorNewShardOrderedBeforeExisting
std::vector<CursorResponse> responses;
auto firstDocSortKey = makeResumeToken(Timestamp(1, 4), uuid, BSON("_id" << 1));
auto pbrtFirstCursor = makePostBatchResumeToken(Timestamp(1, 5));
- auto firstCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 1}}, $sortKey: {'': '"
- << firstDocSortKey.firstElement().String()
- << "'}}");
+ auto firstCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 4)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 1}}, $sortKey: {'': '"
+ << firstDocSortKey.firstElement().String() << "'}}");
std::vector<BSONObj> batch1 = {firstCursorResponse};
responses.emplace_back(
kTestNss, CursorId(123), batch1, boost::none, boost::none, pbrtFirstCursor);
@@ -1653,11 +1643,10 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorNewShardOrderedBeforeExisting
responses.clear();
auto secondDocSortKey = makeResumeToken(Timestamp(1, 3), uuid, BSON("_id" << 2));
auto pbrtSecondCursor = makePostBatchResumeToken(Timestamp(1, 5));
- auto secondCursorResponse = fromjson(
- str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 3)}, uuid: '" << uuid.toString()
- << "', documentKey: {_id: 2}}, $sortKey: {'': '"
- << secondDocSortKey.firstElement().String()
- << "'}}");
+ auto secondCursorResponse =
+ fromjson(str::stream() << "{_id: {clusterTime: {ts: Timestamp(1, 3)}, uuid: '"
+ << uuid.toString() << "', documentKey: {_id: 2}}, $sortKey: {'': '"
+ << secondDocSortKey.firstElement().String() << "'}}");
std::vector<BSONObj> batch2 = {secondCursorResponse};
// The last observed time should still be later than the first shard, so we can get the data
// from it.
diff --git a/src/mongo/s/query/blocking_results_merger_test.cpp b/src/mongo/s/query/blocking_results_merger_test.cpp
index 2c269fd2d6b..5d07b0e2c75 100644
--- a/src/mongo/s/query/blocking_results_merger_test.cpp
+++ b/src/mongo/s/query/blocking_results_merger_test.cpp
@@ -169,7 +169,6 @@ TEST_F(ResultsMergerTestFixture, ShouldBeAbleToBlockUntilNextResultIsReadyWithDe
operationContext(), RouterExecStage::ExecContext::kGetMoreNoResultsYet));
ASSERT_FALSE(next.isEOF());
ASSERT_BSONOBJ_EQ(*next.getResult(), BSON("x" << 1));
-
});
// Schedule the response to the getMore which will return the next result and mark the cursor as
diff --git a/src/mongo/s/query/cluster_aggregate.cpp b/src/mongo/s/query/cluster_aggregate.cpp
index 6b3e1902537..d523439eb20 100644
--- a/src/mongo/s/query/cluster_aggregate.cpp
+++ b/src/mongo/s/query/cluster_aggregate.cpp
@@ -284,8 +284,7 @@ Status appendExplainResults(sharded_agg_helpers::DispatchShardPipelineResults&&
auto queryPlannerElement = data["queryPlanner"];
uassert(51157,
str::stream() << "Malformed explain response received from shard " << shardId
- << ": "
- << data.toString(),
+ << ": " << data.toString(),
queryPlannerElement);
explain << "queryPlanner" << queryPlannerElement;
if (auto executionStatsElement = data["executionStats"]) {
@@ -739,9 +738,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
!request.getRuntimeConstants());
uassert(51089,
str::stream() << "Internal parameter(s) [" << AggregationRequest::kNeedsMergeName
- << ", "
- << AggregationRequest::kFromMongosName
- << ", "
+ << ", " << AggregationRequest::kFromMongosName << ", "
<< AggregationRequest::kMergeByPBRTName
<< "] cannot be set to 'true' when sent to mongos",
!request.needsMerge() && !request.isFromMongos() && !request.mergeByPBRT());
diff --git a/src/mongo/s/query/cluster_aggregation_planner.cpp b/src/mongo/s/query/cluster_aggregation_planner.cpp
index da55bf71dad..5b25ea371e1 100644
--- a/src/mongo/s/query/cluster_aggregation_planner.cpp
+++ b/src/mongo/s/query/cluster_aggregation_planner.cpp
@@ -317,8 +317,7 @@ BSONObj buildNewKeyPattern(const ShardKeyPattern& shardKey, StringMap<std::strin
auto it = renames.find(elem.fieldNameStringData());
invariant(it != renames.end(),
str::stream() << "Could not find new name of shard key field \""
- << elem.fieldName()
- << "\": rename map was "
+ << elem.fieldName() << "\": rename map was "
<< mapToString(renames));
newPattern.appendAs(elem, it->second);
}
diff --git a/src/mongo/s/query/cluster_client_cursor_params.h b/src/mongo/s/query/cluster_client_cursor_params.h
index 7106afcdfa1..9fff8d392df 100644
--- a/src/mongo/s/query/cluster_client_cursor_params.h
+++ b/src/mongo/s/query/cluster_client_cursor_params.h
@@ -157,4 +157,4 @@ struct ClusterClientCursorParams {
boost::optional<bool> isAutoCommit;
};
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp
index b25c26946cf..f5b3290a59a 100644
--- a/src/mongo/s/query/cluster_cursor_manager.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager.cpp
@@ -57,9 +57,8 @@ Status cursorNotFoundStatus(const NamespaceString& nss, CursorId cursorId) {
Status cursorInUseStatus(const NamespaceString& nss, CursorId cursorId) {
return {ErrorCodes::CursorInUse,
- str::stream() << "Cursor already in use (namespace: '" << nss.ns() << "', id: "
- << cursorId
- << ")."};
+ str::stream() << "Cursor already in use (namespace: '" << nss.ns()
+ << "', id: " << cursorId << ")."};
}
//
@@ -349,9 +348,9 @@ StatusWith<ClusterCursorManager::PinnedCursor> ClusterCursorManager::checkOutCur
// Check if the user is coauthorized to access this cursor.
auto authCheckStatus = authChecker(entry->getAuthenticatedUsers());
if (!authCheckStatus.isOK()) {
- return authCheckStatus.withContext(
- str::stream() << "cursor id " << cursorId
- << " was not created by the authenticated user");
+ return authCheckStatus.withContext(str::stream()
+ << "cursor id " << cursorId
+ << " was not created by the authenticated user");
}
if (checkSessionAuth == kCheckSession) {
diff --git a/src/mongo/s/query/cluster_cursor_manager.h b/src/mongo/s/query/cluster_cursor_manager.h
index 3d0afe5db94..88d89a7704b 100644
--- a/src/mongo/s/query/cluster_cursor_manager.h
+++ b/src/mongo/s/query/cluster_cursor_manager.h
@@ -718,4 +718,4 @@ private:
size_t _cursorsTimedOut = 0;
};
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index 07c10167123..cbc0d09d368 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -99,9 +99,7 @@ StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(
ErrorCodes::Overflow,
str::stream()
<< "sum of limit and skip cannot be represented as a 64-bit integer, limit: "
- << *qr.getLimit()
- << ", skip: "
- << qr.getSkip().value_or(0));
+ << *qr.getLimit() << ", skip: " << qr.getSkip().value_or(0));
}
newLimit = newLimitValue;
}
@@ -118,9 +116,7 @@ StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(
str::stream()
<< "sum of ntoreturn and skip cannot be represented as a 64-bit "
"integer, ntoreturn: "
- << *qr.getNToReturn()
- << ", skip: "
- << qr.getSkip().value_or(0));
+ << *qr.getNToReturn() << ", skip: " << qr.getSkip().value_or(0));
}
newLimit = newLimitValue;
} else {
@@ -131,9 +127,7 @@ StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(
str::stream()
<< "sum of ntoreturn and skip cannot be represented as a 64-bit "
"integer, ntoreturn: "
- << *qr.getNToReturn()
- << ", skip: "
- << qr.getSkip().value_or(0));
+ << *qr.getNToReturn() << ", skip: " << qr.getSkip().value_or(0));
}
newNToReturn = newNToReturnValue;
}
@@ -410,8 +404,7 @@ CursorId ClusterFind::runQuery(OperationContext* opCtx,
uasserted(ErrorCodes::BadValue,
str::stream() << "Projection contains illegal field '"
<< AsyncResultsMerger::kSortKeyField
- << "': "
- << query.getQueryRequest().getProj());
+ << "': " << query.getQueryRequest().getProj());
}
auto const catalogCache = Grid::get(opCtx)->catalogCache();
@@ -434,8 +427,8 @@ CursorId ClusterFind::runQuery(OperationContext* opCtx,
if (retries >= kMaxRetries) {
// Check if there are no retries remaining, so the last received error can be
// propagated to the caller.
- ex.addContext(str::stream() << "Failed to run query after " << kMaxRetries
- << " retries");
+ ex.addContext(str::stream()
+ << "Failed to run query after " << kMaxRetries << " retries");
throw;
} else if (!ErrorCodes::isStaleShardVersionError(ex.code()) &&
ex.code() != ErrorCodes::ShardNotFound) {
@@ -485,8 +478,7 @@ void validateLSID(OperationContext* opCtx,
if (!opCtx->getLogicalSessionId() && cursor->getLsid()) {
uasserted(50800,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in session "
- << *cursor->getLsid()
+ << ", which was created in session " << *cursor->getLsid()
<< ", without an lsid");
}
@@ -494,10 +486,8 @@ void validateLSID(OperationContext* opCtx,
(*opCtx->getLogicalSessionId() != *cursor->getLsid())) {
uasserted(50801,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in session "
- << *cursor->getLsid()
- << ", in session "
- << *opCtx->getLogicalSessionId());
+ << ", which was created in session " << *cursor->getLsid()
+ << ", in session " << *opCtx->getLogicalSessionId());
}
}
@@ -518,8 +508,7 @@ void validateTxnNumber(OperationContext* opCtx,
if (!opCtx->getTxnNumber() && cursor->getTxnNumber()) {
uasserted(50803,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in transaction "
- << *cursor->getTxnNumber()
+ << ", which was created in transaction " << *cursor->getTxnNumber()
<< ", without a txnNumber");
}
@@ -527,10 +516,8 @@ void validateTxnNumber(OperationContext* opCtx,
(*opCtx->getTxnNumber() != *cursor->getTxnNumber())) {
uasserted(50804,
str::stream() << "Cannot run getMore on cursor " << request.cursorid
- << ", which was created in transaction "
- << *cursor->getTxnNumber()
- << ", in transaction "
- << *opCtx->getTxnNumber());
+ << ", which was created in transaction " << *cursor->getTxnNumber()
+ << ", in transaction " << *opCtx->getTxnNumber());
}
}
diff --git a/src/mongo/s/query/router_stage_pipeline.cpp b/src/mongo/s/query/router_stage_pipeline.cpp
index ce1c56c103b..aaaad0c3e96 100644
--- a/src/mongo/s/query/router_stage_pipeline.cpp
+++ b/src/mongo/s/query/router_stage_pipeline.cpp
@@ -106,8 +106,7 @@ BSONObj RouterStagePipeline::_validateAndConvertToBSON(const Document& event) {
"event makes it impossible to resume the stream from that point. Only "
"transformations that retain the unmodified _id field are allowed. "
"Expected: "
- << BSON("_id" << resumeToken)
- << " but found: "
+ << BSON("_id" << resumeToken) << " but found: "
<< (eventBSON["_id"] ? BSON("_id" << eventBSON["_id"]) : BSONObj()),
idField.binaryEqual(resumeToken));
diff --git a/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp b/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp
index c66f4d6e3d0..b101d1ca37b 100644
--- a/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp
+++ b/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp
@@ -49,8 +49,9 @@ OperationContext* opCtx = nullptr;
TEST(RouterStageRemoveMetadataFieldsTest, RemovesMetaDataFields) {
auto mockStage = stdx::make_unique<RouterStageMock>(opCtx);
mockStage->queueResult(BSON("a" << 4 << "$sortKey" << 1 << "b" << 3));
- mockStage->queueResult(BSON("$sortKey" << BSON("" << 3) << "c" << BSON("d"
- << "foo")));
+ mockStage->queueResult(BSON("$sortKey" << BSON("" << 3) << "c"
+ << BSON("d"
+ << "foo")));
mockStage->queueResult(BSON("a" << 3));
mockStage->queueResult(BSON("a" << 3 << "$randVal" << 4 << "$sortKey" << 2));
mockStage->queueResult(
diff --git a/src/mongo/s/query/store_possible_cursor.h b/src/mongo/s/query/store_possible_cursor.h
index 38b13b4ea7a..43157322b0b 100644
--- a/src/mongo/s/query/store_possible_cursor.h
+++ b/src/mongo/s/query/store_possible_cursor.h
@@ -72,7 +72,7 @@ class TaskExecutor;
* @ cursorManager the ClusterCursorManager on which to register the resulting ClusterClientCursor
* @ privileges the PrivilegeVector of privileges needed for the original command, to be used for
* auth checking by GetMore
-*/
+ */
StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
const ShardId& shardId,
const HostAndPort& server,
diff --git a/src/mongo/s/request_types/add_shard_request_test.cpp b/src/mongo/s/request_types/add_shard_request_test.cpp
index 87ae164f2a7..8b28a1921b5 100644
--- a/src/mongo/s/request_types/add_shard_request_test.cpp
+++ b/src/mongo/s/request_types/add_shard_request_test.cpp
@@ -66,9 +66,8 @@ TEST(AddShardRequest, ParseInternalFieldsInvalidConnectionString) {
TEST(AddShardRequest, ParseInternalFieldsMissingMaxSize) {
{
- BSONObj obj =
- BSON(AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName
- << kShardName);
+ BSONObj obj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::shardName << kShardName);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -81,9 +80,8 @@ TEST(AddShardRequest, ParseInternalFieldsMissingMaxSize) {
}
{
- BSONObj obj =
- BSON(AddShardRequest::configsvrAddShard << kConnString << AddShardRequest::shardName
- << kShardName);
+ BSONObj obj = BSON(AddShardRequest::configsvrAddShard
+ << kConnString << AddShardRequest::shardName << kShardName);
auto swAddShardRequest = AddShardRequest::parseFromConfigCommand(obj);
@@ -99,9 +97,8 @@ TEST(AddShardRequest, ParseInternalFieldsMissingMaxSize) {
TEST(AddShardRequest, ParseInternalFieldsMissingName) {
{
- BSONObj obj =
- BSON(AddShardRequest::mongosAddShard << kConnString << AddShardRequest::maxSizeMB
- << kMaxSizeMB);
+ BSONObj obj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -114,9 +111,8 @@ TEST(AddShardRequest, ParseInternalFieldsMissingName) {
}
{
- BSONObj obj =
- BSON(AddShardRequest::configsvrAddShard << kConnString << AddShardRequest::maxSizeMB
- << kMaxSizeMB);
+ BSONObj obj = BSON(AddShardRequest::configsvrAddShard
+ << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromConfigCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -131,11 +127,9 @@ TEST(AddShardRequest, ParseInternalFieldsMissingName) {
TEST(AddShardRequest, ParseInternalFieldsAllFieldsPresent) {
{
- BSONObj obj =
- BSON(AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName
- << kShardName
- << AddShardRequest::maxSizeMB
- << kMaxSizeMB);
+ BSONObj obj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::shardName << kShardName
+ << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -149,11 +143,9 @@ TEST(AddShardRequest, ParseInternalFieldsAllFieldsPresent) {
}
{
- BSONObj obj =
- BSON(AddShardRequest::configsvrAddShard << kConnString << AddShardRequest::shardName
- << kShardName
- << AddShardRequest::maxSizeMB
- << kMaxSizeMB);
+ BSONObj obj = BSON(AddShardRequest::configsvrAddShard
+ << kConnString << AddShardRequest::shardName << kShardName
+ << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromConfigCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -170,10 +162,9 @@ TEST(AddShardRequest, ParseInternalFieldsAllFieldsPresent) {
// Test converting a valid AddShardRequest to the internal config version of the command.
TEST(AddShardRequest, ToCommandForConfig) {
- BSONObj mongosCmdObj = BSON(
- AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName << kShardName
- << AddShardRequest::maxSizeMB
- << kMaxSizeMB);
+ BSONObj mongosCmdObj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::shardName << kShardName
+ << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(mongosCmdObj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -186,8 +177,8 @@ TEST(AddShardRequest, ToCommandForConfig) {
}
TEST(AddShardRequest, ToCommandForConfigMissingName) {
- BSONObj mongosCmdObj = BSON(
- AddShardRequest::mongosAddShard << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
+ BSONObj mongosCmdObj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(mongosCmdObj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -200,8 +191,8 @@ TEST(AddShardRequest, ToCommandForConfigMissingName) {
}
TEST(AddShardRequest, ToCommandForConfigMissingMaxSize) {
- BSONObj mongosCmdObj = BSON(
- AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName << kShardName);
+ BSONObj mongosCmdObj = BSON(AddShardRequest::mongosAddShard
+ << kConnString << AddShardRequest::shardName << kShardName);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(mongosCmdObj);
ASSERT_OK(swAddShardRequest.getStatus());
diff --git a/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp b/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp
index 277302c3c0c..7a9b2b8141e 100644
--- a/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp
+++ b/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp
@@ -92,8 +92,7 @@ TEST(AddShardToZoneRequest, WrongShardNameTypeErrors) {
TEST(AddShardToZoneRequest, WrongZoneNameTypeErrors) {
auto request = AddShardToZoneRequest::parseFromMongosCommand(BSON("addShardToZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
@@ -141,25 +140,23 @@ TEST(CfgAddShardToZoneRequest, MissingShardNameErrors) {
}
TEST(CfgAddShardToZoneRequest, WrongShardNameTypeErrors) {
- auto request = AddShardToZoneRequest::parseFromConfigCommand(
- BSON("_configsvrAddShardToZone" << 1234 << "zone"
- << "z"));
+ auto request = AddShardToZoneRequest::parseFromConfigCommand(BSON("_configsvrAddShardToZone"
+ << 1234 << "zone"
+ << "z"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(CfgAddShardToZoneRequest, WrongZoneNameTypeErrors) {
auto request = AddShardToZoneRequest::parseFromConfigCommand(BSON("_configsvrAddShardToZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(CfgAddShardToZoneRequest, CannotUseConfigToParseMongosCommand) {
auto request = AddShardToZoneRequest::parseFromConfigCommand(BSON("addShardToZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
diff --git a/src/mongo/s/request_types/balance_chunk_request_test.cpp b/src/mongo/s/request_types/balance_chunk_request_test.cpp
index f3f0a14b320..df15b79669d 100644
--- a/src/mongo/s/request_types/balance_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/balance_chunk_request_test.cpp
@@ -45,18 +45,13 @@ using unittest::assertGet;
TEST(BalanceChunkRequest, ParseFromConfigCommandNoSecondaryThrottle) {
const ChunkVersion version(1, 0, OID::gen());
auto request = assertGet(BalanceChunkRequest::parseFromConfigCommand(
- BSON("_configsvrMoveChunk" << 1 << "ns"
- << "TestDB.TestColl"
- << "min"
- << BSON("a" << -100LL)
- << "max"
- << BSON("a" << 100LL)
- << "shard"
- << "TestShard0000"
- << "lastmod"
- << Date_t::fromMillisSinceEpoch(version.toLong())
- << "lastmodEpoch"
- << version.epoch())));
+ BSON("_configsvrMoveChunk"
+ << 1 << "ns"
+ << "TestDB.TestColl"
+ << "min" << BSON("a" << -100LL) << "max" << BSON("a" << 100LL) << "shard"
+ << "TestShard0000"
+ << "lastmod" << Date_t::fromMillisSinceEpoch(version.toLong()) << "lastmodEpoch"
+ << version.epoch())));
const auto& chunk = request.getChunk();
ASSERT_EQ("TestDB.TestColl", chunk.getNS().ns());
ASSERT_BSONOBJ_EQ(BSON("a" << -100LL), chunk.getMin());
@@ -72,21 +67,14 @@ TEST(BalanceChunkRequest, ParseFromConfigCommandNoSecondaryThrottle) {
TEST(BalanceChunkRequest, ParseFromConfigCommandWithSecondaryThrottle) {
const ChunkVersion version(1, 0, OID::gen());
auto request = assertGet(BalanceChunkRequest::parseFromConfigCommand(
- BSON("_configsvrMoveChunk" << 1 << "ns"
- << "TestDB.TestColl"
- << "min"
- << BSON("a" << -100LL)
- << "max"
- << BSON("a" << 100LL)
- << "shard"
- << "TestShard0000"
- << "lastmod"
- << Date_t::fromMillisSinceEpoch(version.toLong())
- << "lastmodEpoch"
- << version.epoch()
- << "secondaryThrottle"
- << BSON("_secondaryThrottle" << true << "writeConcern"
- << BSON("w" << 2)))));
+ BSON("_configsvrMoveChunk"
+ << 1 << "ns"
+ << "TestDB.TestColl"
+ << "min" << BSON("a" << -100LL) << "max" << BSON("a" << 100LL) << "shard"
+ << "TestShard0000"
+ << "lastmod" << Date_t::fromMillisSinceEpoch(version.toLong()) << "lastmodEpoch"
+ << version.epoch() << "secondaryThrottle"
+ << BSON("_secondaryThrottle" << true << "writeConcern" << BSON("w" << 2)))));
const auto& chunk = request.getChunk();
ASSERT_EQ("TestDB.TestColl", chunk.getNS().ns());
ASSERT_BSONOBJ_EQ(BSON("a" << -100LL), chunk.getMin());
diff --git a/src/mongo/s/request_types/merge_chunk_request_test.cpp b/src/mongo/s/request_types/merge_chunk_request_test.cpp
index 7a300c5e813..94a7bf68511 100644
--- a/src/mongo/s/request_types/merge_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/merge_chunk_request_test.cpp
@@ -42,11 +42,8 @@ TEST(MergeChunkRequest, BasicValidConfigCommand) {
auto request = assertGet(MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
<< "shard0000")));
ASSERT_EQ(NamespaceString("TestDB", "TestColl"), request.getNamespace());
ASSERT_EQ(OID("7fffffff0000000000000001"), request.getEpoch());
@@ -60,14 +57,10 @@ TEST(MergeChunkRequest, ConfigCommandtoBSON) {
BSONObj serializedRequest =
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
<< "shard0000"
- << "validAfter"
- << Timestamp{100});
+ << "validAfter" << Timestamp{100});
BSONObj writeConcernObj = BSON("writeConcern" << BSON("w"
<< "majority"));
@@ -84,11 +77,10 @@ TEST(MergeChunkRequest, ConfigCommandtoBSON) {
}
TEST(MergeChunkRequest, MissingNameSpaceErrors) {
- auto request = MergeChunkRequest::parseFromConfigCommand(
- BSON("collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
- << "shard0000"));
+ auto request = MergeChunkRequest::parseFromConfigCommand(BSON(
+ "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
@@ -96,20 +88,18 @@ TEST(MergeChunkRequest, MissingCollEpochErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
+ << "chunkBoundaries" << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
<< "shard"
<< "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(MergeChunkRequest, MissingChunkBoundariesErrors) {
- auto request = MergeChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkMerge"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "shard"
- << "shard0000"));
+ auto request = MergeChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkMerge"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
@@ -117,21 +107,17 @@ TEST(MergeChunkRequest, MissingShardNameErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
<< BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(MergeChunkRequest, WrongNamespaceTypeErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
- BSON("_configsvrCommitChunkMerge" << 1234 << "collEpoch" << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5)
- << BSON("a" << 10))
- << "shard"
- << "shard0000"));
+ BSON("_configsvrCommitChunkMerge"
+ << 1234 << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
@@ -139,37 +125,27 @@ TEST(MergeChunkRequest, WrongCollEpochTypeErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "collEpoch"
- << 1234
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
+ << "collEpoch" << 1234 << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
<< "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(MergeChunkRequest, WrongChunkBoundariesTypeErrors) {
- auto request = MergeChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkMerge"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << 1234
- << "shard"
- << "shard0000"));
+ auto request = MergeChunkRequest::parseFromConfigCommand(BSON(
+ "_configsvrCommitChunkMerge"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries" << 1234 << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(MergeChunkRequest, WrongShardNameTypeErrors) {
- auto request = MergeChunkRequest::parseFromConfigCommand(
- BSON("_configsvrCommitChunkMerge"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
- << 1234));
+ auto request = MergeChunkRequest::parseFromConfigCommand(BSON(
+ "_configsvrCommitChunkMerge"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
@@ -177,24 +153,19 @@ TEST(MergeChunkRequest, InvalidNamespaceErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< ""
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10))
- << "shard"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 5) << BSON("a" << 10)) << "shard"
<< "shard0000"));
ASSERT_EQ(ErrorCodes::InvalidNamespace, request.getStatus());
}
TEST(MergeChunkRequest, EmptyChunkBoundariesErrors) {
- auto request = MergeChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkMerge"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSONArray()
- << "shard"
- << "shard0000"));
+ auto request = MergeChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkMerge"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries" << BSONArray()
+ << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::InvalidOptions, request.getStatus());
}
@@ -202,11 +173,8 @@ TEST(MergeChunkRequest, TooFewChunkBoundariesErrors) {
auto request = MergeChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkMerge"
<< "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "chunkBoundaries"
- << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 10))
- << "shard"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "chunkBoundaries"
+ << BSON_ARRAY(BSON("a" << 1) << BSON("a" << 10)) << "shard"
<< "shard0000"));
ASSERT_EQ(ErrorCodes::InvalidOptions, request.getStatus());
}
diff --git a/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp b/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp
index 49332950329..b295e3f0b3d 100644
--- a/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp
+++ b/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp
@@ -178,8 +178,9 @@ TEST(MigrationSecondaryThrottleOptions, ParseFailsDisabledInCommandBSONWriteConc
TEST(MigrationSecondaryThrottleOptions, ParseFailsNotSpecifiedInCommandBSONWriteConcernSpecified) {
auto status = MigrationSecondaryThrottleOptions::createFromCommand(
- BSON("someOtherField" << 1 << "writeConcern" << BSON("w"
- << "majority")));
+ BSON("someOtherField" << 1 << "writeConcern"
+ << BSON("w"
+ << "majority")));
ASSERT_EQ(ErrorCodes::UnsupportedFormat, status.getStatus().code());
}
diff --git a/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp b/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp
index d8b6c94c61e..67981bd7f67 100644
--- a/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp
+++ b/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp
@@ -85,17 +85,16 @@ TEST(RemoveShardFromZoneRequest, MissingShardNameErrors) {
}
TEST(RemoveShardFromZoneRequest, WrongShardNameTypeErrors) {
- auto request = RemoveShardFromZoneRequest::parseFromMongosCommand(
- BSON("removeShardFromZone" << 1234 << "zone"
- << "z"));
+ auto request = RemoveShardFromZoneRequest::parseFromMongosCommand(BSON("removeShardFromZone"
+ << 1234 << "zone"
+ << "z"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(RemoveShardFromZoneRequest, WrongZoneNameTypeErrors) {
auto request = RemoveShardFromZoneRequest::parseFromMongosCommand(BSON("removeShardFromZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
@@ -155,16 +154,14 @@ TEST(CfgRemoveShardFromZoneRequest, WrongZoneNameTypeErrors) {
auto request =
RemoveShardFromZoneRequest::parseFromConfigCommand(BSON("_configsvrRemoveShardFromZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(CfgRemoveShardFromZoneRequest, CannotUseConfigToParseMongosCommand) {
auto request = RemoveShardFromZoneRequest::parseFromConfigCommand(BSON("removeShardFromZone"
<< "a"
- << "zone"
- << 1234));
+ << "zone" << 1234));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
diff --git a/src/mongo/s/request_types/set_shard_version_request_test.cpp b/src/mongo/s/request_types/set_shard_version_request_test.cpp
index fb1052cc48d..59003730f98 100644
--- a/src/mongo/s/request_types/set_shard_version_request_test.cpp
+++ b/src/mongo/s/request_types/set_shard_version_request_test.cpp
@@ -47,15 +47,12 @@ const ConnectionString shardCS = ConnectionString::forReplicaSet(
"ShardRS", {HostAndPort{"shardHost1:12345"}, HostAndPort{"shardHost2:12345"}});
TEST(SetShardVersionRequest, ParseInitMissingAuthoritative) {
- SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << ""
- << "init"
- << true
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString())));
+ SetShardVersionRequest request = assertGet(
+ SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << ""
+ << "init" << true << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString())));
ASSERT(request.isInit());
ASSERT(!request.isAuthoritative());
@@ -66,16 +63,12 @@ TEST(SetShardVersionRequest, ParseInitMissingAuthoritative) {
TEST(SetShardVersionRequest, ParseInitWithAuthoritative) {
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << ""
- << "init"
- << true
- << "authoritative"
- << true
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString())));
+ assertGet(SetShardVersionRequest::parseFromBSON(
+ BSON("setShardVersion"
+ << ""
+ << "init" << true << "authoritative" << true << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString())));
ASSERT(request.isInit());
ASSERT(request.isAuthoritative());
@@ -86,18 +79,12 @@ TEST(SetShardVersionRequest, ParseInitWithAuthoritative) {
TEST(SetShardVersionRequest, ParseInitNoConnectionVersioning) {
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << ""
- << "init"
- << true
- << "authoritative"
- << true
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "noConnectionVersioning"
- << true)));
+ assertGet(SetShardVersionRequest::parseFromBSON(
+ BSON("setShardVersion"
+ << ""
+ << "init" << true << "authoritative" << true << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "noConnectionVersioning" << true)));
ASSERT(request.isInit());
ASSERT(request.isAuthoritative());
@@ -110,16 +97,13 @@ TEST(SetShardVersionRequest, ParseFull) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << "db.coll"
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch())));
+ assertGet(SetShardVersionRequest::parseFromBSON(
+ BSON("setShardVersion"
+ << "db.coll"
+ << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch" << chunkVersion.epoch())));
ASSERT(!request.isInit());
ASSERT(!request.shouldForceRefresh());
@@ -137,18 +121,14 @@ TEST(SetShardVersionRequest, ParseFullWithAuthoritative) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << "db.coll"
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()
- << "authoritative"
- << true)));
+ assertGet(SetShardVersionRequest::parseFromBSON(
+ BSON("setShardVersion"
+ << "db.coll"
+ << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch" << chunkVersion.epoch()
+ << "authoritative" << true)));
ASSERT(!request.isInit());
ASSERT(!request.shouldForceRefresh());
@@ -166,18 +146,14 @@ TEST(SetShardVersionRequest, ParseFullNoConnectionVersioning) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << "db.coll"
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()
- << "noConnectionVersioning"
- << true)));
+ assertGet(SetShardVersionRequest::parseFromBSON(
+ BSON("setShardVersion"
+ << "db.coll"
+ << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch" << chunkVersion.epoch()
+ << "noConnectionVersioning" << true)));
ASSERT(!request.isInit());
ASSERT(!request.shouldForceRefresh());
@@ -194,16 +170,14 @@ TEST(SetShardVersionRequest, ParseFullNoConnectionVersioning) {
TEST(SetShardVersionRequest, ParseFullNoNS) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
- auto ssvStatus = SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << ""
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()));
+ auto ssvStatus =
+ SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << ""
+ << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch" << chunkVersion.epoch()));
ASSERT_EQ(ErrorCodes::InvalidNamespace, ssvStatus.getStatus().code());
}
@@ -211,16 +185,14 @@ TEST(SetShardVersionRequest, ParseFullNoNS) {
TEST(SetShardVersionRequest, ParseFullNSContainsDBOnly) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
- auto ssvStatus = SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << "dbOnly"
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()));
+ auto ssvStatus =
+ SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << "dbOnly"
+ << "shard"
+ << "TestShard"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch" << chunkVersion.epoch()));
ASSERT_EQ(ErrorCodes::InvalidNamespace, ssvStatus.getStatus().code());
}
@@ -239,20 +211,10 @@ TEST(SetShardVersionRequest, ToSSVCommandInit) {
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< ""
- << "init"
- << true
- << "forceRefresh"
- << false
- << "authoritative"
- << true
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << true << "forceRefresh" << false << "authoritative" << true
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "maxTimeMS"
- << 30000));
+ << "shardHost" << shardCS.toString() << "maxTimeMS" << 30000));
}
TEST(SetShardVersionRequest, ToSSVCommandFull) {
@@ -273,21 +235,11 @@ TEST(SetShardVersionRequest, ToSSVCommandFull) {
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init"
- << false
- << "forceRefresh"
- << false
- << "authoritative"
- << false
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << false << "forceRefresh" << false << "authoritative" << false
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch"
<< chunkVersion.epoch()));
}
@@ -309,21 +261,11 @@ TEST(SetShardVersionRequest, ToSSVCommandFullAuthoritative) {
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init"
- << false
- << "forceRefresh"
- << false
- << "authoritative"
- << true
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << false << "forceRefresh" << false << "authoritative" << true
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch"
<< chunkVersion.epoch()));
}
@@ -351,21 +293,11 @@ TEST(SetShardVersionRequest, ToSSVCommandFullForceRefresh) {
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init"
- << false
- << "forceRefresh"
- << true
- << "authoritative"
- << false
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << false << "forceRefresh" << true << "authoritative" << false
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch"
<< chunkVersion.epoch()));
}
@@ -387,24 +319,12 @@ TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioning) {
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init"
- << false
- << "forceRefresh"
- << false
- << "authoritative"
- << true
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << false << "forceRefresh" << false << "authoritative" << true
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()
- << "noConnectionVersioning"
- << true));
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch"
+ << chunkVersion.epoch() << "noConnectionVersioning" << true));
}
TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioningForceRefresh) {
@@ -431,24 +351,12 @@ TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioningForceRefresh)
ASSERT_BSONOBJ_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init"
- << false
- << "forceRefresh"
- << true
- << "authoritative"
- << false
- << "configdb"
- << configCS.toString()
- << "shard"
+ << "init" << false << "forceRefresh" << true << "authoritative" << false
+ << "configdb" << configCS.toString() << "shard"
<< "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()
- << "noConnectionVersioning"
- << true));
+ << "shardHost" << shardCS.toString() << "version"
+ << Timestamp(chunkVersion.toLong()) << "versionEpoch"
+ << chunkVersion.epoch() << "noConnectionVersioning" << true));
}
diff --git a/src/mongo/s/request_types/split_chunk_request_test.cpp b/src/mongo/s/request_types/split_chunk_request_test.cpp
index d73f6c96591..1727c3aa792 100644
--- a/src/mongo/s/request_types/split_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/split_chunk_request_test.cpp
@@ -41,19 +41,12 @@ namespace {
using unittest::assertGet;
TEST(SplitChunkRequest, BasicValidConfigCommand) {
- auto request =
- assertGet(SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000")));
+ auto request = assertGet(SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000")));
ASSERT_EQ(NamespaceString("TestDB", "TestColl"), request.getNamespace());
ASSERT_EQ(OID("7fffffff0000000000000001"), request.getEpoch());
ASSERT(ChunkRange(BSON("a" << 1), BSON("a" << 10)) == request.getChunkRange());
@@ -65,14 +58,8 @@ TEST(SplitChunkRequest, ValidWithMultipleSplits) {
auto request = assertGet(SplitChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkSplit"
<< "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5) << BSON("a" << 7))
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5) << BSON("a" << 7))
<< "shard"
<< "shard0000")));
ASSERT_EQ(NamespaceString("TestDB", "TestColl"), request.getNamespace());
@@ -84,18 +71,12 @@ TEST(SplitChunkRequest, ValidWithMultipleSplits) {
}
TEST(SplitChunkRequest, ConfigCommandtoBSON) {
- BSONObj serializedRequest = BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000");
+ BSONObj serializedRequest =
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000");
BSONObj writeConcernObj = BSON("writeConcern" << BSON("w"
<< "majority"));
@@ -112,197 +93,129 @@ TEST(SplitChunkRequest, ConfigCommandtoBSON) {
}
TEST(SplitChunkRequest, MissingNamespaceErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(
- BSON("collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(BSON(
+ "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(SplitChunkRequest, MissingCollEpochErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "min" << BSON("a" << 1) << "max" << BSON("a" << 10) << "splitPoints"
+ << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(SplitChunkRequest, MissingChunkToSplitErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "max" << BSON("a" << 10)
+ << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(SplitChunkRequest, MissingSplitPointErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(SplitChunkRequest, MissingShardNameErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5))));
ASSERT_EQ(ErrorCodes::NoSuchKey, request.getStatus());
}
TEST(SplitChunkRequest, WrongNamespaceTypeErrors) {
auto request = SplitChunkRequest::parseFromConfigCommand(
BSON("_configsvrCommitChunkSplit" << 1234 << "collEpoch" << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
+ << "min" << BSON("a" << 1) << "max" << BSON("a" << 10)
+ << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
<< "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(SplitChunkRequest, WrongCollEpochTypeErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << 1234
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << 1234 << "min" << BSON("a" << 1) << "max" << BSON("a" << 10)
+ << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(SplitChunkRequest, WrongChunkToSplitTypeErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << 1234
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << 1234 << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(SplitChunkRequest, WrongSplitPointTypeErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << 1234
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << 1234 << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(SplitChunkRequest, WrongShardNameTypeErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << 1234));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard" << 1234));
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
TEST(SplitChunkRequest, InvalidNamespaceErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << ""
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << ""
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::InvalidNamespace, request.getStatus());
}
TEST(SplitChunkRequest, EmptyChunkToSplitErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSONObj()
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSON_ARRAY(BSON("a" << 5))
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSONObj() << "max"
+ << BSON("a" << 10) << "splitPoints" << BSON_ARRAY(BSON("a" << 5)) << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::BadValue, request.getStatus());
}
TEST(SplitChunkRequest, EmptySplitPointsErrors) {
- auto request = SplitChunkRequest::parseFromConfigCommand(BSON("_configsvrCommitChunkSplit"
- << "TestDB.TestColl"
- << "collEpoch"
- << OID("7fffffff0000000000000001")
- << "min"
- << BSON("a" << 1)
- << "max"
- << BSON("a" << 10)
- << "splitPoints"
- << BSONArray()
- << "shard"
- << "shard0000"));
+ auto request = SplitChunkRequest::parseFromConfigCommand(
+ BSON("_configsvrCommitChunkSplit"
+ << "TestDB.TestColl"
+ << "collEpoch" << OID("7fffffff0000000000000001") << "min" << BSON("a" << 1) << "max"
+ << BSON("a" << 10) << "splitPoints" << BSONArray() << "shard"
+ << "shard0000"));
ASSERT_EQ(ErrorCodes::InvalidOptions, request.getStatus());
}
-}
+} // namespace
} // namespace mongo
diff --git a/src/mongo/s/request_types/split_chunk_request_type.cpp b/src/mongo/s/request_types/split_chunk_request_type.cpp
index 8993efac965..6773e413197 100644
--- a/src/mongo/s/request_types/split_chunk_request_type.cpp
+++ b/src/mongo/s/request_types/split_chunk_request_type.cpp
@@ -161,8 +161,8 @@ const string& SplitChunkRequest::getShardName() const {
Status SplitChunkRequest::_validate() {
if (!getNamespace().isValid()) {
return Status(ErrorCodes::InvalidNamespace,
- str::stream() << "invalid namespace '" << _nss.ns()
- << "' specified for request");
+ str::stream()
+ << "invalid namespace '" << _nss.ns() << "' specified for request");
}
if (getSplitPoints().empty()) {
diff --git a/src/mongo/s/request_types/update_zone_key_range_request_type.cpp b/src/mongo/s/request_types/update_zone_key_range_request_type.cpp
index 350489aa242..cfbce859483 100644
--- a/src/mongo/s/request_types/update_zone_key_range_request_type.cpp
+++ b/src/mongo/s/request_types/update_zone_key_range_request_type.cpp
@@ -107,10 +107,7 @@ StatusWith<UpdateZoneKeyRangeRequest> UpdateZoneKeyRangeRequest::_parseFromComma
} else {
return {ErrorCodes::TypeMismatch,
str::stream() << "\"" << kZoneName << "\" had the wrong type. Expected "
- << typeName(String)
- << " or "
- << typeName(jstNULL)
- << ", found "
+ << typeName(String) << " or " << typeName(jstNULL) << ", found "
<< typeName(zoneElem.type())};
}
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index 782620dc454..36e7290866d 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -424,7 +424,7 @@ public:
void onConfirmedSet(const State& state) final {
auto connStr = state.connStr;
- auto fun = [ serviceContext = _serviceContext, connStr ](auto args) {
+ auto fun = [serviceContext = _serviceContext, connStr](auto args) {
if (ErrorCodes::isCancelationError(args.status.code())) {
return;
}
diff --git a/src/mongo/s/shard_key_pattern.cpp b/src/mongo/s/shard_key_pattern.cpp
index 9be98ba15b6..5d59c25653c 100644
--- a/src/mongo/s/shard_key_pattern.cpp
+++ b/src/mongo/s/shard_key_pattern.cpp
@@ -89,8 +89,7 @@ std::vector<std::unique_ptr<FieldRef>> parseShardKeyPattern(const BSONObj& keyPa
// Numeric and ascending (1.0), or "hashed" and single field
uassert(ErrorCodes::BadValue,
str::stream()
- << "Shard key "
- << keyPattern.toString()
+ << "Shard key " << keyPattern.toString()
<< " can contain either a single 'hashed' field"
<< " or multiple numerical fields set to a value of 1. Failed to parse field "
<< patternEl.fieldNameStringData(),
@@ -163,10 +162,7 @@ Status ShardKeyPattern::checkShardKeySize(const BSONObj& shardKey) {
return {ErrorCodes::ShardKeyTooBig,
str::stream() << "shard keys must be less than " << kMaxShardKeySizeBytes
- << " bytes, but key "
- << shardKey
- << " is "
- << shardKey.objsize()
+ << " bytes, but key " << shardKey << " is " << shardKey.objsize()
<< " bytes"};
}
diff --git a/src/mongo/s/shard_key_pattern_test.cpp b/src/mongo/s/shard_key_pattern_test.cpp
index b8c68a38c9a..ecde034896b 100644
--- a/src/mongo/s/shard_key_pattern_test.cpp
+++ b/src/mongo/s/shard_key_pattern_test.cpp
@@ -140,8 +140,7 @@ TEST(ShardKeyPattern, ExtractDocShardKeySingle) {
BSON("a" << regex));
const BSONObj ref = BSON("$ref"
<< "coll"
- << "$id"
- << 1);
+ << "$id" << 1);
ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << ref)), BSON("a" << ref));
ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$dollarPrefixKey:true}}")),
fromjson("{a:{$dollarPrefixKey:true}}"));
@@ -169,8 +168,7 @@ TEST(ShardKeyPattern, ExtractDocShardKeyCompound) {
ASSERT_BSONOBJ_EQ(docKey(pattern,
BSON("c" << 30 << "b"
<< "20"
- << "a"
- << 10)),
+ << "a" << 10)),
fromjson("{a:10, b:'20'}"));
ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:{$dollarPrefixKey:true}}")),
fromjson("{a:10, b:{$dollarPrefixKey:true}}"));
@@ -199,8 +197,7 @@ TEST(ShardKeyPattern, ExtractDocShardKeyNested) {
fromjson("{'a.b':10, c:30}"));
const BSONObj ref = BSON("$ref"
<< "coll"
- << "$id"
- << 1);
+ << "$id" << 1);
ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << BSON("b" << ref) << "c" << 30)),
BSON("a.b" << ref << "c" << 30));
@@ -308,8 +305,7 @@ TEST(ShardKeyPattern, ExtractQueryShardKeyCompound) {
ASSERT_BSONOBJ_EQ(queryKey(pattern,
BSON("c" << 30 << "b"
<< "20"
- << "a"
- << 10)),
+ << "a" << 10)),
fromjson("{a:10, b:'20'}"));
ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:10, b:[1, 2]}")), BSONObj());
diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp
index 59faf424f90..1f430a0d7be 100644
--- a/src/mongo/s/shard_util.cpp
+++ b/src/mongo/s/shard_util.cpp
@@ -156,18 +156,16 @@ StatusWith<boost::optional<ChunkRange>> splitChunkAtMultiplePoints(
// is already performed at chunk split commit time, but we are performing it here for parity
// with old auto-split code, which might rely on it.
if (SimpleBSONObjComparator::kInstance.evaluate(chunkRange.getMin() == splitPoints.front())) {
- const std::string msg(str::stream() << "not splitting chunk " << chunkRange.toString()
- << ", split point "
- << splitPoints.front()
- << " is exactly on chunk bounds");
+ const std::string msg(str::stream()
+ << "not splitting chunk " << chunkRange.toString() << ", split point "
+ << splitPoints.front() << " is exactly on chunk bounds");
return {ErrorCodes::CannotSplit, msg};
}
if (SimpleBSONObjComparator::kInstance.evaluate(chunkRange.getMax() == splitPoints.back())) {
- const std::string msg(str::stream() << "not splitting chunk " << chunkRange.toString()
- << ", split point "
- << splitPoints.back()
- << " is exactly on chunk bounds");
+ const std::string msg(str::stream()
+ << "not splitting chunk " << chunkRange.toString() << ", split point "
+ << splitPoints.back() << " is exactly on chunk bounds");
return {ErrorCodes::CannotSplit, msg};
}
diff --git a/src/mongo/s/sharding_egress_metadata_hook.cpp b/src/mongo/s/sharding_egress_metadata_hook.cpp
index 468fe77bae1..10e837a2430 100644
--- a/src/mongo/s/sharding_egress_metadata_hook.cpp
+++ b/src/mongo/s/sharding_egress_metadata_hook.cpp
@@ -120,8 +120,8 @@ Status ShardingEgressMetadataHook::_advanceConfigOpTimeFromShard(OperationContex
if (opTime.is_initialized()) {
grid->advanceConfigOpTime(opCtx,
opTime.get(),
- str::stream() << "reply from shard " << shardId
- << " node");
+ str::stream()
+ << "reply from shard " << shardId << " node");
}
}
return Status::OK();
diff --git a/src/mongo/s/sharding_initialization.h b/src/mongo/s/sharding_initialization.h
index e3e7acce876..1c84f4ceda4 100644
--- a/src/mongo/s/sharding_initialization.h
+++ b/src/mongo/s/sharding_initialization.h
@@ -86,7 +86,7 @@ Status initializeGlobalShardingState(OperationContext* opCtx,
/**
* Loads cluster ID and waits for the reload of the Shard Registry.
-*/
+ */
Status waitForShardRegistryReload(OperationContext* opCtx);
diff --git a/src/mongo/s/sharding_mongod_test_fixture.cpp b/src/mongo/s/sharding_mongod_test_fixture.cpp
index 92a27af4c92..93c87da38d9 100644
--- a/src/mongo/s/sharding_mongod_test_fixture.cpp
+++ b/src/mongo/s/sharding_mongod_test_fixture.cpp
@@ -115,9 +115,8 @@ void ShardingMongodTestFixture::setUp() {
serversBob.append(BSON("host" << _servers[i].toString() << "_id" << static_cast<int>(i)));
}
repl::ReplSetConfig replSetConfig;
- ASSERT_OK(replSetConfig.initialize(
- BSON("_id" << _setName << "protocolVersion" << 1 << "version" << 3 << "members"
- << serversBob.arr())));
+ ASSERT_OK(replSetConfig.initialize(BSON("_id" << _setName << "protocolVersion" << 1 << "version"
+ << 3 << "members" << serversBob.arr())));
replCoordPtr->setGetConfigReturnValue(replSetConfig);
repl::ReplicationCoordinator::set(service, std::move(replCoordPtr));
diff --git a/src/mongo/s/sharding_router_test_fixture.cpp b/src/mongo/s/sharding_router_test_fixture.cpp
index 0261a54f8ba..c533f14a125 100644
--- a/src/mongo/s/sharding_router_test_fixture.cpp
+++ b/src/mongo/s/sharding_router_test_fixture.cpp
@@ -336,10 +336,8 @@ void ShardingTestFixture::expectConfigCollectionCreate(const HostAndPort& config
BSON("create" << collName << "capped" << true << "size" << cappedSize << "writeConcern"
<< BSON("w"
<< "majority"
- << "wtimeout"
- << 60000)
- << "maxTimeMS"
- << 30000);
+ << "wtimeout" << 60000)
+ << "maxTimeMS" << 30000);
ASSERT_BSONOBJ_EQ(expectedCreateCmd, request.cmdObj);
return response;
diff --git a/src/mongo/s/sharding_task_executor.cpp b/src/mongo/s/sharding_task_executor.cpp
index 8a3e3c39b60..c8db2851af7 100644
--- a/src/mongo/s/sharding_task_executor.cpp
+++ b/src/mongo/s/sharding_task_executor.cpp
@@ -160,9 +160,12 @@ StatusWith<TaskExecutor::CallbackHandle> ShardingTaskExecutor::scheduleRemoteCom
auto clusterGLE = ClusterLastErrorInfo::get(request.opCtx->getClient());
- auto shardingCb =
- [ timeTracker, clusterGLE, cb, grid = Grid::get(request.opCtx), hosts = request.target ](
- const TaskExecutor::RemoteCommandOnAnyCallbackArgs& args) {
+ auto shardingCb = [timeTracker,
+ clusterGLE,
+ cb,
+ grid = Grid::get(request.opCtx),
+ hosts = request.target](
+ const TaskExecutor::RemoteCommandOnAnyCallbackArgs& args) {
ON_BLOCK_EXIT([&cb, &args]() { cb(args); });
if (!args.response.isOK()) {
diff --git a/src/mongo/s/sharding_task_executor_pool_controller.cpp b/src/mongo/s/sharding_task_executor_pool_controller.cpp
index ffcdd6cd82b..871293699ea 100644
--- a/src/mongo/s/sharding_task_executor_pool_controller.cpp
+++ b/src/mongo/s/sharding_task_executor_pool_controller.cpp
@@ -53,7 +53,7 @@ void emplaceOrInvariant(Map&& map, Args&&... args) noexcept {
invariant(ret.second, "Element already existed in map/set");
}
-} // anonymous
+} // namespace
Status ShardingTaskExecutorPoolController::validateHostTimeout(const int& hostTimeoutMS) {
auto toRefreshTimeoutMS = gParameters.toRefreshTimeoutMS.load();
diff --git a/src/mongo/s/transaction_router.cpp b/src/mongo/s/transaction_router.cpp
index c2bdc3d7f68..8bfb6cbbb39 100644
--- a/src/mongo/s/transaction_router.cpp
+++ b/src/mongo/s/transaction_router.cpp
@@ -41,7 +41,6 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/logical_clock.h"
#include "mongo/db/logical_session_id.h"
-#include "mongo/db/logical_session_id.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -520,17 +519,13 @@ void TransactionRouter::Router::_assertAbortStatusIsOkOrNoSuchTransaction(
auto shardResponse = uassertStatusOKWithContext(
std::move(response.swResponse),
str::stream() << "Failed to send abort to shard " << response.shardId
- << " between retries of statement "
- << p().latestStmtId);
+ << " between retries of statement " << p().latestStmtId);
auto status = getStatusFromCommandResult(shardResponse.data);
uassert(ErrorCodes::NoSuchTransaction,
str::stream() << txnIdToString() << "Transaction aborted between retries of statement "
- << p().latestStmtId
- << " due to error: "
- << status
- << " from shard: "
- << response.shardId,
+ << p().latestStmtId << " due to error: " << status
+ << " from shard: " << response.shardId,
status.isOK() || status.code() == ErrorCodes::NoSuchTransaction);
// abortTransaction is sent with no write concern, so there's no need to check for a write
@@ -658,8 +653,9 @@ void TransactionRouter::Router::onSnapshotError(OperationContext* opCtx,
const Status& errorStatus) {
invariant(canContinueOnSnapshotError());
- LOG(3) << txnIdToString() << " Clearing pending participants and resetting global snapshot "
- "timestamp after snapshot error: "
+ LOG(3) << txnIdToString()
+ << " Clearing pending participants and resetting global snapshot "
+ "timestamp after snapshot error: "
<< errorStatus << ", previous timestamp: " << o().atClusterTime->getTime();
// The transaction must be restarted on all participants because a new read timestamp will be
@@ -711,17 +707,14 @@ void TransactionRouter::Router::beginOrContinueTxn(OperationContext* opCtx,
// This transaction is older than the transaction currently in progress, so throw an error.
uasserted(ErrorCodes::TransactionTooOld,
str::stream() << "txnNumber " << txnNumber << " is less than last txnNumber "
- << o().txnNumber
- << " seen in session "
- << _sessionId());
+ << o().txnNumber << " seen in session " << _sessionId());
} else if (txnNumber == o().txnNumber) {
// This is the same transaction as the one in progress.
switch (action) {
case TransactionActions::kStart: {
uasserted(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "txnNumber " << o().txnNumber << " for session "
- << _sessionId()
- << " already started");
+ << _sessionId() << " already started");
}
case TransactionActions::kContinue: {
uassert(ErrorCodes::InvalidOptions,
@@ -767,11 +760,9 @@ void TransactionRouter::Router::beginOrContinueTxn(OperationContext* opCtx,
}
case TransactionActions::kContinue: {
uasserted(ErrorCodes::NoSuchTransaction,
- str::stream() << "cannot continue txnId " << o().txnNumber
- << " for session "
- << _sessionId()
- << " with txnId "
- << txnNumber);
+ str::stream()
+ << "cannot continue txnId " << o().txnNumber << " for session "
+ << _sessionId() << " with txnId " << txnNumber);
}
case TransactionActions::kCommit: {
_resetRouterState(opCtx, txnNumber);
@@ -896,11 +887,10 @@ BSONObj TransactionRouter::Router::_commitTransaction(
switch (participant.second.readOnly) {
case Participant::ReadOnly::kUnset:
uasserted(ErrorCodes::NoSuchTransaction,
- str::stream() << txnIdToString() << " Failed to commit transaction "
- << "because a previous statement on the transaction "
- << "participant "
- << participant.first
- << " was unsuccessful.");
+ str::stream()
+ << txnIdToString() << " Failed to commit transaction "
+ << "because a previous statement on the transaction "
+ << "participant " << participant.first << " was unsuccessful.");
case Participant::ReadOnly::kReadOnly:
readOnlyShards.push_back(participant.first);
break;
@@ -1019,8 +1009,9 @@ void TransactionRouter::Router::implicitlyAbortTransaction(OperationContext* opC
const Status& errorStatus) {
if (o().commitType == CommitType::kTwoPhaseCommit ||
o().commitType == CommitType::kRecoverWithToken) {
- LOG(3) << txnIdToString() << " Router not sending implicit abortTransaction because commit "
- "may have been handed off to the coordinator";
+ LOG(3) << txnIdToString()
+ << " Router not sending implicit abortTransaction because commit "
+ "may have been handed off to the coordinator";
return;
}
diff --git a/src/mongo/s/transaction_router.h b/src/mongo/s/transaction_router.h
index 4d442f3a225..82e7498523a 100644
--- a/src/mongo/s/transaction_router.h
+++ b/src/mongo/s/transaction_router.h
@@ -215,78 +215,78 @@ public:
}
/**
- * Starts a fresh transaction in this session or continue an existing one. Also cleans up the
- * previous transaction state.
- */
+ * Starts a fresh transaction in this session or continue an existing one. Also cleans up
+ * the previous transaction state.
+ */
void beginOrContinueTxn(OperationContext* opCtx,
TxnNumber txnNumber,
TransactionActions action);
/**
- * Attaches the required transaction related fields for a request to be sent to the given
- * shard.
- *
- * Calling this method has the following side effects:
- * 1. Potentially selecting a coordinator.
- * 2. Adding the shard to the list of participants.
- * 3. Also append fields for first statements (ex. startTransaction, readConcern)
- * if the shard was newly added to the list of participants.
- */
+ * Attaches the required transaction related fields for a request to be sent to the given
+ * shard.
+ *
+ * Calling this method has the following side effects:
+ * 1. Potentially selecting a coordinator.
+ * 2. Adding the shard to the list of participants.
+ * 3. Also append fields for first statements (ex. startTransaction, readConcern)
+ * if the shard was newly added to the list of participants.
+ */
BSONObj attachTxnFieldsIfNeeded(OperationContext* opCtx,
const ShardId& shardId,
const BSONObj& cmdObj);
/**
- * Processes the transaction metadata in the response from the participant if the response
- * indicates the operation succeeded.
- */
+ * Processes the transaction metadata in the response from the participant if the response
+ * indicates the operation succeeded.
+ */
void processParticipantResponse(OperationContext* opCtx,
const ShardId& shardId,
const BSONObj& responseObj);
/**
- * Returns true if the current transaction can retry on a stale version error from a
- * contacted shard. This is always true except for an error received by a write that is not
- * the first overall statement in the sharded transaction. This is because the entire
- * command will be retried, and shards that were not stale and are targeted again may
- * incorrectly execute the command a second time.
- *
- * Note: Even if this method returns true, the retry attempt may still fail, e.g. if one of
- * the shards that returned a stale version error was involved in a previously completed a
- * statement for this transaction.
- *
- * TODO SERVER-37207: Change batch writes to retry only the failed writes in a batch, to
- * allow retrying writes beyond the first overall statement.
- */
+ * Returns true if the current transaction can retry on a stale version error from a
+ * contacted shard. This is always true except for an error received by a write that is not
+ * the first overall statement in the sharded transaction. This is because the entire
+ * command will be retried, and shards that were not stale and are targeted again may
+ * incorrectly execute the command a second time.
+ *
+ * Note: Even if this method returns true, the retry attempt may still fail, e.g. if one of
+ * the shards that returned a stale version error was involved in a previously completed a
+ * statement for this transaction.
+ *
+ * TODO SERVER-37207: Change batch writes to retry only the failed writes in a batch, to
+ * allow retrying writes beyond the first overall statement.
+ */
bool canContinueOnStaleShardOrDbError(StringData cmdName) const;
/**
- * Updates the transaction state to allow for a retry of the current command on a stale
- * version error. This includes sending abortTransaction to all cleared participants. Will
- * throw if the transaction cannot be continued.
- */
+ * Updates the transaction state to allow for a retry of the current command on a stale
+ * version error. This includes sending abortTransaction to all cleared participants. Will
+ * throw if the transaction cannot be continued.
+ */
void onStaleShardOrDbError(OperationContext* opCtx,
StringData cmdName,
const Status& errorStatus);
/**
- * Returns true if the current transaction can retry on a snapshot error. This is only true
- * on the first command recevied for a transaction.
- */
+ * Returns true if the current transaction can retry on a snapshot error. This is only true
+ * on the first command recevied for a transaction.
+ */
bool canContinueOnSnapshotError() const;
/**
- * Resets the transaction state to allow for a retry attempt. This includes clearing all
- * participants, clearing the coordinator, resetting the global read timestamp, and sending
- * abortTransaction to all cleared participants. Will throw if the transaction cannot be
- * continued.
- */
+ * Resets the transaction state to allow for a retry attempt. This includes clearing all
+ * participants, clearing the coordinator, resetting the global read timestamp, and sending
+ * abortTransaction to all cleared participants. Will throw if the transaction cannot be
+ * continued.
+ */
void onSnapshotError(OperationContext* opCtx, const Status& errorStatus);
/**
- * Updates the transaction tracking state to allow for a retry attempt on a view resolution
- * error. This includes sending abortTransaction to all cleared participants.
- */
+ * Updates the transaction tracking state to allow for a retry attempt on a view resolution
+ * error. This includes sending abortTransaction to all cleared participants.
+ */
void onViewResolutionError(OperationContext* opCtx, const NamespaceString& nss);
/**
@@ -301,206 +301,207 @@ public:
LogicalTime getSelectedAtClusterTime() const;
/**
- * Sets the atClusterTime for the current transaction to the latest time in the router's
- * logical clock. Does nothing if the transaction does not have snapshot read concern or an
- * atClusterTime has already been selected and cannot be changed.
- */
+ * Sets the atClusterTime for the current transaction to the latest time in the router's
+ * logical clock. Does nothing if the transaction does not have snapshot read concern or an
+ * atClusterTime has already been selected and cannot be changed.
+ */
void setDefaultAtClusterTime(OperationContext* opCtx);
/**
- * If a coordinator has been selected for the current transaction, returns its id.
- */
+ * If a coordinator has been selected for the current transaction, returns its id.
+ */
const boost::optional<ShardId>& getCoordinatorId() const;
/**
- * If a recovery shard has been selected for the current transaction, returns its id.
- */
+ * If a recovery shard has been selected for the current transaction, returns its id.
+ */
const boost::optional<ShardId>& getRecoveryShardId() const;
/**
- * Commits the transaction.
- *
- * For transactions that only did reads or only wrote to one shard, sends commit directly to
- * the participants and returns the first error response or the last (success) response.
- *
- * For transactions that performed writes to multiple shards, hands off the participant list
- * to the coordinator to do two-phase commit, and returns the coordinator's response.
- */
+ * Commits the transaction.
+ *
+ * For transactions that only did reads or only wrote to one shard, sends commit directly to
+ * the participants and returns the first error response or the last (success) response.
+ *
+ * For transactions that performed writes to multiple shards, hands off the participant list
+ * to the coordinator to do two-phase commit, and returns the coordinator's response.
+ */
BSONObj commitTransaction(OperationContext* opCtx,
const boost::optional<TxnRecoveryToken>& recoveryToken);
/**
- * Sends abort to all participants.
- *
- * Returns the first error response or the last (success) response.
- */
+ * Sends abort to all participants.
+ *
+ * Returns the first error response or the last (success) response.
+ */
BSONObj abortTransaction(OperationContext* opCtx);
/**
- * Sends abort to all shards in the current participant list. Will retry on retryable errors,
- * but ignores the responses from each shard.
- */
+ * Sends abort to all shards in the current participant list. Will retry on retryable
+ * errors, but ignores the responses from each shard.
+ */
void implicitlyAbortTransaction(OperationContext* opCtx, const Status& errorStatus);
/**
- * If a coordinator has been selected for this transaction already, constructs a recovery
- * token, which can be used to resume commit or abort of the transaction from a different
- * router.
- */
+ * If a coordinator has been selected for this transaction already, constructs a recovery
+ * token, which can be used to resume commit or abort of the transaction from a different
+ * router.
+ */
void appendRecoveryToken(BSONObjBuilder* builder) const;
/**
- * Returns a string with the active transaction's transaction number and logical session id
- * (i.e. the transaction id).
- */
+ * Returns a string with the active transaction's transaction number and logical session id
+ * (i.e. the transaction id).
+ */
std::string txnIdToString() const;
/**
- * Returns the participant for this transaction or nullptr if the specified shard is not
- * participant of this transaction.
- */
+ * Returns the participant for this transaction or nullptr if the specified shard is not
+ * participant of this transaction.
+ */
const Participant* getParticipant(const ShardId& shard);
/**
- * Returns the statement id of the latest received command for this transaction.
- */
+ * Returns the statement id of the latest received command for this transaction.
+ */
StmtId getLatestStmtId() const {
return p().latestStmtId;
}
/**
- * Returns a copy of the timing stats of the transaction router's active transaction.
- */
+ * Returns a copy of the timing stats of the transaction router's active transaction.
+ */
const TimingStats& getTimingStats() const {
return o().timingStats;
}
private:
/**
- * Resets the router's state. Used when the router sees a new transaction for the first time.
- * This is required because we don't create a new router object for each transaction, but
- * instead reuse the same object across different transactions.
- */
+ * Resets the router's state. Used when the router sees a new transaction for the first
+ * time. This is required because we don't create a new router object for each transaction,
+ * but instead reuse the same object across different transactions.
+ */
void _resetRouterState(OperationContext* opCtx, const TxnNumber& txnNumber);
/**
- * Internal method for committing a transaction. Should only throw on failure to send commit.
- */
+ * Internal method for committing a transaction. Should only throw on failure to send
+ * commit.
+ */
BSONObj _commitTransaction(OperationContext* opCtx,
const boost::optional<TxnRecoveryToken>& recoveryToken);
/**
- * Retrieves the transaction's outcome from the shard specified in the recovery token.
- */
+ * Retrieves the transaction's outcome from the shard specified in the recovery token.
+ */
BSONObj _commitWithRecoveryToken(OperationContext* opCtx,
const TxnRecoveryToken& recoveryToken);
/**
- * Hands off coordinating a two-phase commit across all participants to the coordinator
- * shard.
- */
+ * Hands off coordinating a two-phase commit across all participants to the coordinator
+ * shard.
+ */
BSONObj _handOffCommitToCoordinator(OperationContext* opCtx);
/**
- * Sets the given logical time as the atClusterTime for the transaction to be the greater of
- * the given time and the user's afterClusterTime, if one was provided.
- */
+ * Sets the given logical time as the atClusterTime for the transaction to be the greater of
+ * the given time and the user's afterClusterTime, if one was provided.
+ */
void _setAtClusterTime(OperationContext* opCtx,
const boost::optional<LogicalTime>& afterClusterTime,
LogicalTime candidateTime);
/**
- * Throws NoSuchTransaction if the response from abortTransaction failed with a code other
- * than NoSuchTransaction. Does not check for write concern errors.
- */
+ * Throws NoSuchTransaction if the response from abortTransaction failed with a code other
+ * than NoSuchTransaction. Does not check for write concern errors.
+ */
void _assertAbortStatusIsOkOrNoSuchTransaction(
const AsyncRequestsSender::Response& response) const;
/**
- * If the transaction's read concern level is snapshot, asserts the participant's
- * atClusterTime matches the transaction's.
- */
+ * If the transaction's read concern level is snapshot, asserts the participant's
+ * atClusterTime matches the transaction's.
+ */
void _verifyParticipantAtClusterTime(const Participant& participant);
/**
- * Removes all participants created during the current statement from the participant list
- * and sends abortTransaction to each. Waits for all responses before returning.
- */
+ * Removes all participants created during the current statement from the participant list
+ * and sends abortTransaction to each. Waits for all responses before returning.
+ */
void _clearPendingParticipants(OperationContext* opCtx);
/**
- * Creates a new participant for the shard.
- */
+ * Creates a new participant for the shard.
+ */
TransactionRouter::Participant& _createParticipant(OperationContext* opCtx,
const ShardId& shard);
/**
- * Sets the new readOnly value for the current participant on the shard.
- */
+ * Sets the new readOnly value for the current participant on the shard.
+ */
void _setReadOnlyForParticipant(OperationContext* opCtx,
const ShardId& shard,
const Participant::ReadOnly readOnly);
/**
- * Updates relevant metrics when a new transaction is begun.
- */
+ * Updates relevant metrics when a new transaction is begun.
+ */
void _onNewTransaction(OperationContext* opCtx);
/**
- * Updates relevant metrics when a router receives commit for a higher txnNumber than it has
- * seen so far.
- */
+ * Updates relevant metrics when a router receives commit for a higher txnNumber than it has
+ * seen so far.
+ */
void _onBeginRecoveringDecision(OperationContext* opCtx);
/**
- * Updates relevant metrics when the router receives an explicit abort from the client.
- */
+ * Updates relevant metrics when the router receives an explicit abort from the client.
+ */
void _onExplicitAbort(OperationContext* opCtx);
/**
- * Updates relevant metrics when the router begins an implicit abort after an error.
- */
+ * Updates relevant metrics when the router begins an implicit abort after an error.
+ */
void _onImplicitAbort(OperationContext* opCtx, const Status& errorStatus);
/**
- * Updates relevant metrics when a transaction is about to begin commit.
- */
+ * Updates relevant metrics when a transaction is about to begin commit.
+ */
void _onStartCommit(WithLock wl, OperationContext* opCtx);
/**
- * Updates relevant metrics when a transaction receives a successful response for commit.
- */
+ * Updates relevant metrics when a transaction receives a successful response for commit.
+ */
void _onSuccessfulCommit(OperationContext* opCtx);
/**
- * Updates relevant metrics when commit receives a response with a non-retryable command
- * error per the retryable writes specification.
- */
+ * Updates relevant metrics when commit receives a response with a non-retryable command
+ * error per the retryable writes specification.
+ */
void _onNonRetryableCommitError(OperationContext* opCtx, Status commitStatus);
/**
- * The first time this method is called it marks the transaction as over in the router's
- * diagnostics and will log transaction information if its duration is over the global slowMS
- * threshold or the transaction log componenet verbosity >= 1. Only meant to be called when
- * the router definitively knows the transaction's outcome, e.g. it should not be invoked
- * after a network error on commit.
- */
+ * The first time this method is called it marks the transaction as over in the router's
+ * diagnostics and will log transaction information if its duration is over the global
+ * slowMS threshold or the transaction log componenet verbosity >= 1. Only meant to be
+ * called when the router definitively knows the transaction's outcome, e.g. it should not
+ * be invoked after a network error on commit.
+ */
void _endTransactionTrackingIfNecessary(OperationContext* opCtx,
TerminationCause terminationCause);
/**
- * Returns all participants created during the current statement.
- */
+ * Returns all participants created during the current statement.
+ */
std::vector<ShardId> _getPendingParticipants() const;
/**
- * Prints slow transaction information to the log.
- */
+ * Prints slow transaction information to the log.
+ */
void _logSlowTransaction(OperationContext* opCtx, TerminationCause terminationCause) const;
/**
- * Returns a string to be logged for slow transactions.
- */
+ * Returns a string to be logged for slow transactions.
+ */
std::string _transactionInfoForLog(OperationContext* opCtx,
TerminationCause terminationCause) const;
diff --git a/src/mongo/s/transaction_router_test.cpp b/src/mongo/s/transaction_router_test.cpp
index 1451e8f3816..d551c2e6399 100644
--- a/src/mongo/s/transaction_router_test.cpp
+++ b/src/mongo/s/transaction_router_test.cpp
@@ -232,16 +232,9 @@ TEST_F(TransactionRouterTestWithDefaultSession,
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum);
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum);
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -258,11 +251,7 @@ TEST_F(TransactionRouterTestWithDefaultSession,
<< "test"));
ASSERT_BSONOBJ_EQ(BSON("update"
<< "test"
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
+ << "coordinator" << true << "autocommit" << false << "txnNumber"
<< txnNum),
newCmd);
}
@@ -281,16 +270,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, BasicStartTxnWithAtClusterTime)
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum);
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum);
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -307,11 +289,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, BasicStartTxnWithAtClusterTime)
<< "test"));
ASSERT_BSONOBJ_EQ(BSON("update"
<< "test"
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
+ << "coordinator" << true << "autocommit" << false << "txnNumber"
<< txnNum),
newCmd);
}
@@ -341,16 +319,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, NewParticipantMustAttachTxnAndRe
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum);
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum);
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -367,11 +338,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, NewParticipantMustAttachTxnAndRe
<< "test"));
ASSERT_BSONOBJ_EQ(BSON("update"
<< "test"
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
+ << "coordinator" << true << "autocommit" << false << "txnNumber"
<< txnNum),
newCmd);
}
@@ -381,13 +348,8 @@ TEST_F(TransactionRouterTestWithDefaultSession, NewParticipantMustAttachTxnAndRe
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "autocommit"
- << false
- << "txnNumber"
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "autocommit" << false << "txnNumber"
<< txnNum);
{
@@ -405,10 +367,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, NewParticipantMustAttachTxnAndRe
<< "test"));
ASSERT_BSONOBJ_EQ(BSON("update"
<< "test"
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum),
+ << "autocommit" << false << "txnNumber" << txnNum),
newCmd);
}
}
@@ -431,16 +390,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, StartingNewTxnShouldClearState)
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum),
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum),
newCmd);
}
@@ -454,16 +406,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, StartingNewTxnShouldClearState)
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum2);
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum2);
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -707,26 +652,18 @@ TEST_F(TransactionRouterTestWithDefaultSession, DoesNotAttachTxnNumIfAlreadyTher
BSONObj expectedNewObj = BSON("insert"
<< "test"
- << "txnNumber"
- << txnNum
- << "readConcern"
+ << "txnNumber" << txnNum << "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false);
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false);
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
BSON("insert"
<< "test"
- << "txnNumber"
- << txnNum));
+ << "txnNumber" << txnNum));
ASSERT_BSONOBJ_EQ(expectedNewObj, newCmd);
}
@@ -744,8 +681,7 @@ DEATH_TEST_F(TransactionRouterTestWithDefaultSession,
shard1,
BSON("insert"
<< "test"
- << "txnNumber"
- << TxnNumber(10)));
+ << "txnNumber" << TxnNumber(10)));
}
TEST_F(TransactionRouterTestWithDefaultSession, AttachTxnValidatesReadConcernIfAlreadyOnCmd) {
@@ -769,16 +705,9 @@ TEST_F(TransactionRouterTestWithDefaultSession, AttachTxnValidatesReadConcernIfA
<< "readConcern"
<< BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum),
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp())
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum),
newCmd);
}
}
@@ -810,14 +739,8 @@ TEST_F(TransactionRouterTestWithDefaultSession, PassesThroughNoReadConcernToPart
BSONObj expectedNewObj = BSON("insert"
<< "test"
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum);
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum);
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
@@ -843,14 +766,8 @@ TEST_F(TransactionRouterTestWithDefaultSession,
<< "test"
<< "readConcern"
<< BSON("afterClusterTime" << kAfterClusterTime.asTimestamp())
- << "startTransaction"
- << true
- << "coordinator"
- << true
- << "autocommit"
- << false
- << "txnNumber"
- << txnNum);
+ << "startTransaction" << true << "coordinator" << true
+ << "autocommit" << false << "txnNumber" << txnNum);
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
@@ -1489,8 +1406,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, SnapshotErrorsResetAtClusterTime
BSONObj expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp());
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp());
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -1516,8 +1432,7 @@ TEST_F(TransactionRouterTestWithDefaultSession, SnapshotErrorsResetAtClusterTime
expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << laterTime.asTimestamp());
+ << "atClusterTime" << laterTime.asTimestamp());
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -1539,8 +1454,7 @@ TEST_F(TransactionRouterTestWithDefaultSession,
BSONObj expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp());
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp());
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -1560,8 +1474,7 @@ TEST_F(TransactionRouterTestWithDefaultSession,
expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << laterTimeSameStmt.asTimestamp());
+ << "atClusterTime" << laterTimeSameStmt.asTimestamp());
{
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
@@ -1835,8 +1748,7 @@ TEST_F(TransactionRouterTestWithDefaultSession,
BSONObj expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp());
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp());
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
@@ -2348,8 +2260,7 @@ TEST_F(TransactionRouterTestWithDefaultSession,
BSONObj expectedReadConcern = BSON("level"
<< "snapshot"
- << "atClusterTime"
- << kInMemoryLogicalTime.asTimestamp());
+ << "atClusterTime" << kInMemoryLogicalTime.asTimestamp());
auto newCmd = txnRouter.attachTxnFieldsIfNeeded(operationContext(),
shard1,
@@ -3194,12 +3105,10 @@ TEST_F(TransactionRouterMetricsTest, SlowLoggingPrintsTransactionParameters) {
BSONObjBuilder lsidBob;
getSessionId().serialize(&lsidBob);
- ASSERT_EQUALS(
- 1,
- countLogLinesContaining(str::stream() << "parameters:{ lsid: " << lsidBob.done().toString()
- << ", txnNumber: "
- << kTxnNumber
- << ", autocommit: false"));
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(
+ str::stream() << "parameters:{ lsid: " << lsidBob.done().toString()
+ << ", txnNumber: " << kTxnNumber << ", autocommit: false"));
}
TEST_F(TransactionRouterMetricsTest, SlowLoggingPrintsDurationAtEnd) {
diff --git a/src/mongo/s/write_ops/batch_downconvert.cpp b/src/mongo/s/write_ops/batch_downconvert.cpp
index f313a01b8dd..323af2928c1 100644
--- a/src/mongo/s/write_ops/batch_downconvert.cpp
+++ b/src/mongo/s/write_ops/batch_downconvert.cpp
@@ -78,14 +78,11 @@ Status extractGLEErrors(const BSONObj& gleResponse, GLEErrors* errors) {
}
errors->wcError->setStatus({ErrorCodes::WriteConcernFailed, msg});
errors->wcError->setErrInfo(BSON("wtimeout" << true));
- } else if (code == 10990 /* no longer primary */
- ||
- code == 16805 /* replicatedToNum no longer primary */
- ||
- code == 14830 /* gle wmode changed / invalid */
+ } else if (code == 10990 /* no longer primary */
+ || code == 16805 /* replicatedToNum no longer primary */
+ || code == 14830 /* gle wmode changed / invalid */
// 2.6 Error codes
- ||
- code == ErrorCodes::NotMaster || code == ErrorCodes::UnknownReplWriteConcern ||
+ || code == ErrorCodes::NotMaster || code == ErrorCodes::UnknownReplWriteConcern ||
code == ErrorCodes::WriteConcernFailed || code == ErrorCodes::PrimarySteppedDown) {
// Write concern errors that get returned as regular errors (result may not be ok: 1.0)
errors->wcError.reset(new WriteConcernErrorDetail());
diff --git a/src/mongo/s/write_ops/batch_downconvert_test.cpp b/src/mongo/s/write_ops/batch_downconvert_test.cpp
index ca9a3cd34d9..a45e7ac1aaa 100644
--- a/src/mongo/s/write_ops/batch_downconvert_test.cpp
+++ b/src/mongo/s/write_ops/batch_downconvert_test.cpp
@@ -40,8 +40,8 @@
namespace {
using namespace mongo;
-using std::vector;
using std::deque;
+using std::vector;
//
// Tests for parsing GLE responses into write errors and write concern errors for write
@@ -205,14 +205,9 @@ TEST(LegacyGLESuppress, StripCode) {
TEST(LegacyGLESuppress, TimeoutDupError24) {
const BSONObj gleResponse = BSON("ok" << 0.0 << "err"
<< "message"
- << "code"
- << 12345
- << "err"
+ << "code" << 12345 << "err"
<< "timeout"
- << "code"
- << 56789
- << "wtimeout"
- << true);
+ << "code" << 56789 << "wtimeout" << true);
BSONObj stripped = stripNonWCInfo(gleResponse);
ASSERT_EQUALS(stripped.nFields(), 4);
@@ -221,4 +216,4 @@ TEST(LegacyGLESuppress, TimeoutDupError24) {
ASSERT_EQUALS(stripped["code"].numberInt(), 56789);
ASSERT(stripped["wtimeout"].trueValue());
}
-}
+} // namespace
diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp
index 4412cd325ef..b06b0c1c63b 100644
--- a/src/mongo/s/write_ops/batch_write_exec.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec.cpp
@@ -428,14 +428,9 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx,
batchOp.abortBatch(errorFromStatus(
{ErrorCodes::NoProgressMade,
str::stream() << "no progress was made executing batch write op in "
- << clientRequest.getNS().ns()
- << " after "
- << kMaxRoundsWithoutProgress
- << " rounds ("
- << numCompletedOps
- << " ops completed in "
- << rounds
- << " rounds total)"}));
+ << clientRequest.getNS().ns() << " after "
+ << kMaxRoundsWithoutProgress << " rounds (" << numCompletedOps
+ << " ops completed in " << rounds << " rounds total)"}));
break;
}
}
@@ -469,4 +464,4 @@ const HostOpTimeMap& BatchWriteExecStats::getWriteOpTimes() const {
return _writeOpTimes;
}
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp
index c517f92a76f..b046db54ef1 100644
--- a/src/mongo/s/write_ops/batch_write_op.cpp
+++ b/src/mongo/s/write_ops/batch_write_op.cpp
@@ -42,9 +42,9 @@
namespace mongo {
-using std::unique_ptr;
using std::set;
using std::stringstream;
+using std::unique_ptr;
using std::vector;
namespace {
@@ -171,9 +171,9 @@ int getWriteSizeBytes(const WriteOp& writeOp) {
static const auto boolSize = 1;
// Add the size of the 'collation' field, if present.
- estSize +=
- !item.getUpdate().getCollation() ? 0 : (UpdateOpEntry::kCollationFieldName.size() +
- item.getUpdate().getCollation()->objsize());
+ estSize += !item.getUpdate().getCollation() ? 0
+ : (UpdateOpEntry::kCollationFieldName.size() +
+ item.getUpdate().getCollation()->objsize());
// Add the size of the 'arrayFilters' field, if present.
estSize += !item.getUpdate().getArrayFilters() ? 0 : ([&item]() {
@@ -209,9 +209,9 @@ int getWriteSizeBytes(const WriteOp& writeOp) {
static const auto intSize = 4;
// Add the size of the 'collation' field, if present.
- estSize +=
- !item.getDelete().getCollation() ? 0 : (DeleteOpEntry::kCollationFieldName.size() +
- item.getDelete().getCollation()->objsize());
+ estSize += !item.getDelete().getCollation() ? 0
+ : (DeleteOpEntry::kCollationFieldName.size() +
+ item.getDelete().getCollation()->objsize());
// Add the size of the 'limit' field.
estSize += DeleteOpEntry::kMultiFieldName.size() + intSize;
@@ -592,7 +592,7 @@ void BatchWriteOp::noteBatchResponse(const TargetedWriteBatch& targetedBatch,
vector<WriteErrorDetail*>::iterator itemErrorIt = itemErrors.begin();
int index = 0;
WriteErrorDetail* lastError = NULL;
- for (vector<TargetedWrite *>::const_iterator it = targetedBatch.getWrites().begin();
+ for (vector<TargetedWrite*>::const_iterator it = targetedBatch.getWrites().begin();
it != targetedBatch.getWrites().end();
++it, ++index) {
const TargetedWrite* write = *it;
@@ -766,9 +766,9 @@ void BatchWriteOp::buildClientResponse(BatchedCommandResponse* batchResp) {
// Generate the multi-error message below
if (_wcErrors.size() == 1) {
auto status = _wcErrors.front().error.toStatus();
- error->setStatus(
- status.withReason(str::stream() << status.reason() << " at "
- << _wcErrors.front().endpoint.shardName));
+ error->setStatus(status.withReason(str::stream()
+ << status.reason() << " at "
+ << _wcErrors.front().endpoint.shardName));
} else {
StringBuilder msg;
msg << "multiple errors reported : ";
diff --git a/src/mongo/s/write_ops/batched_command_request_test.cpp b/src/mongo/s/write_ops/batched_command_request_test.cpp
index 079960cf320..133d077dc1a 100644
--- a/src/mongo/s/write_ops/batched_command_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_request_test.cpp
@@ -43,14 +43,9 @@ TEST(BatchedCommandRequest, BasicInsert) {
BSONObj origInsertRequestObj = BSON("insert"
<< "test"
- << "documents"
- << insertArray
- << "writeConcern"
- << BSON("w" << 1)
- << "ordered"
- << true
- << "allowImplicitCollectionCreation"
- << false);
+ << "documents" << insertArray << "writeConcern"
+ << BSON("w" << 1) << "ordered" << true
+ << "allowImplicitCollectionCreation" << false);
for (auto docSeq : {false, true}) {
const auto opMsgRequest(toOpMsg("TestDB", origInsertRequestObj, docSeq));
@@ -69,13 +64,8 @@ TEST(BatchedCommandRequest, InsertWithShardVersion) {
BSONObj origInsertRequestObj = BSON("insert"
<< "test"
- << "documents"
- << insertArray
- << "writeConcern"
- << BSON("w" << 1)
- << "ordered"
- << true
- << "shardVersion"
+ << "documents" << insertArray << "writeConcern"
+ << BSON("w" << 1) << "ordered" << true << "shardVersion"
<< BSON_ARRAY(Timestamp(1, 2) << epoch));
for (auto docSeq : {false, true}) {
diff --git a/src/mongo/s/write_ops/batched_command_response.cpp b/src/mongo/s/write_ops/batched_command_response.cpp
index 9ec01a62e61..cd40da6ae1b 100644
--- a/src/mongo/s/write_ops/batched_command_response.cpp
+++ b/src/mongo/s/write_ops/batched_command_response.cpp
@@ -40,8 +40,8 @@
namespace mongo {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using str::stream;
@@ -112,8 +112,8 @@ BSONObj BatchedCommandResponse::toBSON() const {
builder.appendOID(electionId(), const_cast<OID*>(&_electionId));
if (_writeErrorDetails.get()) {
- auto errorMessage =
- [ errorCount = size_t(0), errorSize = size_t(0) ](StringData rawMessage) mutable {
+ auto errorMessage = [errorCount = size_t(0),
+ errorSize = size_t(0)](StringData rawMessage) mutable {
// Start truncating error messages once both of these limits are exceeded.
constexpr size_t kErrorSizeTruncationMin = 1024 * 1024;
constexpr size_t kErrorCountTruncationMin = 2;
diff --git a/src/mongo/s/write_ops/batched_command_response_test.cpp b/src/mongo/s/write_ops/batched_command_response_test.cpp
index 09e2b7d0eed..726760554eb 100644
--- a/src/mongo/s/write_ops/batched_command_response_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_response_test.cpp
@@ -58,17 +58,13 @@ TEST(BatchedCommandResponse, Basic) {
BSONObj writeConcernError(
BSON("code" << 8 << "codeName" << ErrorCodes::errorString(ErrorCodes::Error(8)) << "errmsg"
<< "norepl"
- << "errInfo"
- << BSON("a" << 1)));
+ << "errInfo" << BSON("a" << 1)));
BSONObj origResponseObj =
- BSON(BatchedCommandResponse::n(0) << "opTime" << mongo::Timestamp(1ULL)
- << BatchedCommandResponse::writeErrors()
- << writeErrorsArray
- << BatchedCommandResponse::writeConcernError()
- << writeConcernError
- << "ok"
- << 1.0);
+ BSON(BatchedCommandResponse::n(0)
+ << "opTime" << mongo::Timestamp(1ULL) << BatchedCommandResponse::writeErrors()
+ << writeErrorsArray << BatchedCommandResponse::writeConcernError() << writeConcernError
+ << "ok" << 1.0);
string errMsg;
BatchedCommandResponse response;
diff --git a/src/mongo/s/write_ops/chunk_manager_targeter.cpp b/src/mongo/s/write_ops/chunk_manager_targeter.cpp
index d723f59d70c..39bb70a734b 100644
--- a/src/mongo/s/write_ops/chunk_manager_targeter.cpp
+++ b/src/mongo/s/write_ops/chunk_manager_targeter.cpp
@@ -322,9 +322,9 @@ bool isMetadataDifferent(const std::shared_ptr<ChunkManager>& managerA,
}
/**
-* Whether or not the manager/primary pair was changed or refreshed from a previous version
-* of the metadata.
-*/
+ * Whether or not the manager/primary pair was changed or refreshed from a previous version
+ * of the metadata.
+ */
bool wasMetadataRefreshed(const std::shared_ptr<ChunkManager>& managerA,
const std::shared_ptr<Shard>& primaryA,
const std::shared_ptr<ChunkManager>& managerB,
@@ -456,8 +456,9 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetUpdate(
}
// Utility function to target an update by shard key, and to handle any potential error results.
- const auto targetByShardKey = [&collation, this](
- StatusWith<BSONObj> shardKey, StringData msg) -> StatusWith<std::vector<ShardEndpoint>> {
+ const auto targetByShardKey = [&collation,
+ this](StatusWith<BSONObj> shardKey,
+ StringData msg) -> StatusWith<std::vector<ShardEndpoint>> {
if (!shardKey.isOK()) {
return shardKey.getStatus().withContext(msg);
}
@@ -505,10 +506,8 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetUpdate(
"collation) or must target a single shard (and have the simple "
"collation), but this update targeted "
<< shardEndPoints.getValue().size()
- << " shards. Update request: "
- << updateDoc.toBSON()
- << ", shard key pattern: "
- << shardKeyPattern.toString()};
+ << " shards. Update request: " << updateDoc.toBSON()
+ << ", shard key pattern: " << shardKeyPattern.toString()};
}
// If the request is {multi:false}, then this is a single op-style update which we are
@@ -567,8 +566,8 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetDelete(
ExtensionsCallbackNoop(),
MatchExpressionParser::kAllowAllSpecialFeatures);
if (!cq.isOK()) {
- return cq.getStatus().withContext(str::stream() << "Could not parse delete query "
- << deleteDoc.getQ());
+ return cq.getStatus().withContext(str::stream()
+ << "Could not parse delete query " << deleteDoc.getQ());
}
// Single deletes must target a single shard or be exact-ID.
@@ -580,8 +579,7 @@ StatusWith<std::vector<ShardEndpoint>> ChunkManagerTargeter::targetDelete(
"match on _id (and have the collection default collation) or "
"contain the shard key (and have the simple collation). Delete "
"request: "
- << deleteDoc.toBSON()
- << ", shard key pattern: "
+ << deleteDoc.toBSON() << ", shard key pattern: "
<< _routingInfo->cm()->getShardKeyPattern().toString());
}
diff --git a/src/mongo/scripting/bson_template_evaluator.h b/src/mongo/scripting/bson_template_evaluator.h
index 6d2f0565790..7a21f07680d 100644
--- a/src/mongo/scripting/bson_template_evaluator.h
+++ b/src/mongo/scripting/bson_template_evaluator.h
@@ -269,4 +269,4 @@ private:
PseudoRandom rng;
};
-} // end namespace
+} // namespace mongo
diff --git a/src/mongo/scripting/bson_template_evaluator_test.cpp b/src/mongo/scripting/bson_template_evaluator_test.cpp
index cebc0281576..6309fb79f35 100644
--- a/src/mongo/scripting/bson_template_evaluator_test.cpp
+++ b/src/mongo/scripting/bson_template_evaluator_test.cpp
@@ -27,8 +27,8 @@
* it in the license file.
*/
-#include "mongo/scripting/bson_template_evaluator.h"
#include "mongo/db/jsobj.h"
+#include "mongo/scripting/bson_template_evaluator.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -95,8 +95,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField" << randObj << "hello"
<< "world"
- << "id"
- << 1),
+ << "id" << 1),
builder8));
BSONObj obj8 = builder8.obj();
ASSERT_EQUALS(obj8.nFields(), 3);
@@ -123,8 +122,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField1" << randObj << "hello"
<< "world"
- << "randField2"
- << randObj),
+ << "randField2" << randObj),
builder10));
BSONObj obj10 = builder10.obj();
ASSERT_EQUALS(obj10.nFields(), 3);
@@ -141,8 +139,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("testArray" << BSON_ARRAY(0 << 5 << 10 << 20) << "hello"
<< "world"
- << "randField"
- << randObj),
+ << "randField" << randObj),
builder11));
BSONObj obj11 = builder11.obj();
ASSERT_EQUALS(obj11.nFields(), 3);
@@ -192,8 +189,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT_PLUS_THREAD) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField" << randObj << "hello"
<< "world"
- << "id"
- << 1),
+ << "id" << 1),
builder8));
BSONObj obj8 = builder8.obj();
ASSERT_EQUALS(obj8.nFields(), 3);
@@ -220,8 +216,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT_PLUS_THREAD) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField1" << randObj << "hello"
<< "world"
- << "randField2"
- << randObj),
+ << "randField2" << randObj),
builder10));
BSONObj obj10 = builder10.obj();
ASSERT_EQUALS(obj10.nFields(), 3);
@@ -240,8 +235,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT_PLUS_THREAD) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("testArray" << BSON_ARRAY(0 << 5 << 10 << 20) << "hello"
<< "world"
- << "randField"
- << randObj),
+ << "randField" << randObj),
builder11));
BSONObj obj11 = builder11.obj();
ASSERT_EQUALS(obj11.nFields(), 3);
@@ -445,8 +439,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_STRING) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("id" << 1 << "hello"
<< "world"
- << "randField"
- << randObj),
+ << "randField" << randObj),
builder6));
BSONObj obj6 = builder6.obj();
ASSERT_EQUALS(obj6.nFields(), 3);
@@ -460,8 +453,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_STRING) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField" << randObj << "hello"
<< "world"
- << "id"
- << 1),
+ << "id" << 1),
builder7));
BSONObj obj7 = builder7.obj();
ASSERT_EQUALS(obj7.nFields(), 3);
@@ -486,8 +478,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_STRING) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField1" << randObj << "hello"
<< "world"
- << "randField2"
- << randObj),
+ << "randField2" << randObj),
builder10));
BSONObj obj10 = builder10.obj();
ASSERT_EQUALS(obj10.nFields(), 3);
@@ -503,8 +494,7 @@ TEST(BSONTemplateEvaluatorTest, RAND_STRING) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("testArray" << BSON_ARRAY(0 << 5 << 10 << 20) << "hello"
<< "world"
- << "randField"
- << randObj),
+ << "randField" << randObj),
builder11));
BSONObj obj11 = builder11.obj();
ASSERT_EQUALS(obj11.nFields(), 3);
@@ -559,9 +549,7 @@ TEST(BSONTemplateEvaluatorTest, CONCAT) {
ASSERT_EQUALS(obj4.nFields(), 3);
expectedObj = BSON("concatField1"
<< "hello world"
- << "middleKey"
- << 1
- << "concatField2"
+ << "middleKey" << 1 << "concatField2"
<< "hello world");
ASSERT_BSONOBJ_EQ(obj4, expectedObj);
@@ -683,8 +671,7 @@ TEST(BSONTemplateEvaluatorTest, NESTING) {
BSONObj bazObj = BSON("baz" << innerObj);
outerObj = BSON("foo"
<< "hi"
- << "bar"
- << bazObj);
+ << "bar" << bazObj);
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("id" << outerObj), builder3));
BSONObj obj3 = builder3.obj();
@@ -705,10 +692,7 @@ TEST(BSONTemplateEvaluatorTest, NESTING) {
<< "bye");
outerObj = BSON("foo"
<< "hi"
- << "bar"
- << barObj4
- << "baz"
- << bazObj4);
+ << "bar" << barObj4 << "baz" << bazObj4);
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("id" << outerObj), builder4));
BSONObj obj4 = builder4.obj();
@@ -732,8 +716,7 @@ TEST(BSONTemplateEvaluatorTest, NESTING) {
<< "let"
<< "target"
<< "x"
- << "value"
- << innerObj);
+ << "value" << innerObj);
ASSERT_EQUALS(BsonTemplateEvaluator::StatusBadOperator, t.evaluate(outerObj, builder5));
// Test success for elements in an array that need evaluation
@@ -744,8 +727,7 @@ TEST(BSONTemplateEvaluatorTest, NESTING) {
BSONObj elem3 = BSON("baz" << 42);
outerObj = BSON("foo"
<< "hi"
- << "bar"
- << BSON_ARRAY(elem1 << elem2 << elem3 << 7));
+ << "bar" << BSON_ARRAY(elem1 << elem2 << elem3 << 7));
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess, t.evaluate(outerObj, builder6));
BSONObj obj6 = builder6.obj();
BSONElement obj6_bar = obj6["bar"];
diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp
index 28cb06c949e..4fb071471eb 100644
--- a/src/mongo/scripting/engine.cpp
+++ b/src/mongo/scripting/engine.cpp
@@ -240,9 +240,9 @@ void Scope::loadStored(OperationContext* opCtx, bool ignoreNotConnected) {
if (MONGO_FAIL_POINT(mr_killop_test_fp)) {
/* This thread sleep makes the interrupts in the test come in at a time
- * where the js misses the interrupt and throw an exception instead of
- * being interrupted
- */
+ * where the js misses the interrupt and throw an exception instead of
+ * being interrupted
+ */
stdx::this_thread::sleep_for(stdx::chrono::seconds(1));
}
@@ -309,7 +309,7 @@ extern const JSFile utils_sh;
extern const JSFile utils_auth;
extern const JSFile bulk_api;
extern const JSFile error_codes;
-}
+} // namespace JSFiles
void Scope::execCoreFiles() {
execSetup(JSFiles::utils);
diff --git a/src/mongo/scripting/engine.h b/src/mongo/scripting/engine.h
index f5a85d44f3e..18eb7554e6e 100644
--- a/src/mongo/scripting/engine.h
+++ b/src/mongo/scripting/engine.h
@@ -279,4 +279,4 @@ const char* jsSkipWhiteSpace(const char* raw);
ScriptEngine* getGlobalScriptEngine();
void setGlobalScriptEngine(ScriptEngine* impl);
-}
+} // namespace mongo
diff --git a/src/mongo/scripting/engine_none.cpp b/src/mongo/scripting/engine_none.cpp
index d6297be697a..0262fbf24fe 100644
--- a/src/mongo/scripting/engine_none.cpp
+++ b/src/mongo/scripting/engine_none.cpp
@@ -37,4 +37,4 @@ void ScriptEngine::setup() {
std::string ScriptEngine::getInterpreterVersionString() {
return "";
}
-}
+} // namespace mongo
diff --git a/src/mongo/scripting/mozjs/bson.cpp b/src/mongo/scripting/mozjs/bson.cpp
index abef2b769a0..7972cdbaca0 100644
--- a/src/mongo/scripting/mozjs/bson.cpp
+++ b/src/mongo/scripting/mozjs/bson.cpp
@@ -47,7 +47,9 @@ namespace mozjs {
const char* const BSONInfo::className = "BSON";
const JSFunctionSpec BSONInfo::freeFunctions[3] = {
- MONGO_ATTACH_JS_FUNCTION(bsonWoCompare), MONGO_ATTACH_JS_FUNCTION(bsonBinaryEqual), JS_FS_END,
+ MONGO_ATTACH_JS_FUNCTION(bsonWoCompare),
+ MONGO_ATTACH_JS_FUNCTION(bsonBinaryEqual),
+ JS_FS_END,
};
diff --git a/src/mongo/scripting/mozjs/code.cpp b/src/mongo/scripting/mozjs/code.cpp
index 957a6e5feb9..66dfa086559 100644
--- a/src/mongo/scripting/mozjs/code.cpp
+++ b/src/mongo/scripting/mozjs/code.cpp
@@ -43,7 +43,8 @@ namespace mongo {
namespace mozjs {
const JSFunctionSpec CodeInfo::methods[2] = {
- MONGO_ATTACH_JS_CONSTRAINED_METHOD(toString, CodeInfo), JS_FS_END,
+ MONGO_ATTACH_JS_CONSTRAINED_METHOD(toString, CodeInfo),
+ JS_FS_END,
};
const char* const CodeInfo::className = "Code";
@@ -51,9 +52,9 @@ const char* const CodeInfo::className = "Code";
void CodeInfo::Functions::toString::call(JSContext* cx, JS::CallArgs args) {
ObjectWrapper o(cx, args.thisv());
- std::string str = str::stream() << "Code({\"code\":\"" << o.getString(InternedString::code)
- << "\","
- << "\"scope\":" << o.getObject(InternedString::scope) << "\"})";
+ std::string str = str::stream()
+ << "Code({\"code\":\"" << o.getString(InternedString::code) << "\","
+ << "\"scope\":" << o.getObject(InternedString::scope) << "\"})";
ValueReader(cx, args.rval()).fromStringData(str);
}
diff --git a/src/mongo/scripting/mozjs/cursor_handle.cpp b/src/mongo/scripting/mozjs/cursor_handle.cpp
index 28c7a483936..ee781700357 100644
--- a/src/mongo/scripting/mozjs/cursor_handle.cpp
+++ b/src/mongo/scripting/mozjs/cursor_handle.cpp
@@ -41,7 +41,8 @@ namespace mongo {
namespace mozjs {
const JSFunctionSpec CursorHandleInfo::methods[2] = {
- MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(zeroCursorId, CursorHandleInfo), JS_FS_END,
+ MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(zeroCursorId, CursorHandleInfo),
+ JS_FS_END,
};
const char* const CursorHandleInfo::className = "CursorHandle";
diff --git a/src/mongo/scripting/mozjs/implscope.cpp b/src/mongo/scripting/mozjs/implscope.cpp
index 4bf7815e924..1d16da68f19 100644
--- a/src/mongo/scripting/mozjs/implscope.cpp
+++ b/src/mongo/scripting/mozjs/implscope.cpp
@@ -61,7 +61,7 @@ namespace mongo {
namespace JSFiles {
extern const JSFile types;
extern const JSFile assert;
-} // namespace
+} // namespace JSFiles
namespace mozjs {
diff --git a/src/mongo/scripting/mozjs/mongo.cpp b/src/mongo/scripting/mozjs/mongo.cpp
index 6d23852be71..943d0b04716 100644
--- a/src/mongo/scripting/mozjs/mongo.cpp
+++ b/src/mongo/scripting/mozjs/mongo.cpp
@@ -657,12 +657,11 @@ void MongoBase::Functions::copyDatabaseWithSCRAM::call(JSContext* cx, JS::CallAr
BSONObj saslFirstCommandPrefix =
BSON("copydbsaslstart" << 1 << "fromhost" << fromHost << "fromdb" << fromDb
- << saslCommandMechanismFieldName
- << "SCRAM-SHA-1");
+ << saslCommandMechanismFieldName << "SCRAM-SHA-1");
- BSONObj saslFollowupCommandPrefix = BSON(
- "copydb" << 1 << "fromhost" << fromHost << "fromdb" << fromDb << "todb" << toDb << "slaveOk"
- << slaveOk);
+ BSONObj saslFollowupCommandPrefix =
+ BSON("copydb" << 1 << "fromhost" << fromHost << "fromdb" << fromDb << "todb" << toDb
+ << "slaveOk" << slaveOk);
BSONObj saslCommandPrefix = saslFirstCommandPrefix;
BSONObj inputObj = BSON(saslCommandPayloadFieldName << "");
diff --git a/src/mongo/scripting/mozjs/mongohelpers.js b/src/mongo/scripting/mozjs/mongohelpers.js
index a59c7787918..c8edfb23ca6 100644
--- a/src/mongo/scripting/mozjs/mongohelpers.js
+++ b/src/mongo/scripting/mozjs/mongohelpers.js
@@ -35,7 +35,6 @@
exportToMongoHelpers = {
// This function accepts an expression or function body and returns a function definition
'functionExpressionParser': function functionExpressionParser(fnSrc) {
-
// Ensure that a provided expression or function body is not terminated with a ';'.
// This ensures we interpret the input as a single expression, rather than a sequence
// of expressions, and can wrap it in parentheses.
@@ -52,7 +51,7 @@ exportToMongoHelpers = {
} else if (e == 'SyntaxError: return not in function') {
return 'function() { ' + fnSrc + ' }';
} else {
- throw(e);
+ throw (e);
}
}
// Input source is a series of expressions. we should prepend the last one with return
diff --git a/src/mongo/scripting/mozjs/nativefunction.cpp b/src/mongo/scripting/mozjs/nativefunction.cpp
index a23b7174311..135d2600e44 100644
--- a/src/mongo/scripting/mozjs/nativefunction.cpp
+++ b/src/mongo/scripting/mozjs/nativefunction.cpp
@@ -47,7 +47,8 @@ const char* const NativeFunctionInfo::inheritFrom = "Function";
const char* const NativeFunctionInfo::className = "NativeFunction";
const JSFunctionSpec NativeFunctionInfo::methods[2] = {
- MONGO_ATTACH_JS_CONSTRAINED_METHOD(toString, NativeFunctionInfo), JS_FS_END,
+ MONGO_ATTACH_JS_CONSTRAINED_METHOD(toString, NativeFunctionInfo),
+ JS_FS_END,
};
namespace {
diff --git a/src/mongo/scripting/mozjs/object.cpp b/src/mongo/scripting/mozjs/object.cpp
index 3f9c84df90d..ec1de920391 100644
--- a/src/mongo/scripting/mozjs/object.cpp
+++ b/src/mongo/scripting/mozjs/object.cpp
@@ -40,7 +40,8 @@ namespace mongo {
namespace mozjs {
const JSFunctionSpec ObjectInfo::methods[2] = {
- MONGO_ATTACH_JS_FUNCTION(bsonsize), JS_FS_END,
+ MONGO_ATTACH_JS_FUNCTION(bsonsize),
+ JS_FS_END,
};
const char* const ObjectInfo::className = "Object";
diff --git a/src/mongo/scripting/mozjs/objectwrapper.cpp b/src/mongo/scripting/mozjs/objectwrapper.cpp
index 3c57e262029..d934c28ed37 100644
--- a/src/mongo/scripting/mozjs/objectwrapper.cpp
+++ b/src/mongo/scripting/mozjs/objectwrapper.cpp
@@ -615,11 +615,8 @@ BSONObj ObjectWrapper::toBSON() {
const int sizeWithEOO = b.len() + 1 /*EOO*/ - 4 /*BSONObj::Holder ref count*/;
uassert(17260,
str::stream() << "Converting from JavaScript to BSON failed: "
- << "Object size "
- << sizeWithEOO
- << " exceeds limit of "
- << BSONObjMaxInternalSize
- << " bytes.",
+ << "Object size " << sizeWithEOO << " exceeds limit of "
+ << BSONObjMaxInternalSize << " bytes.",
sizeWithEOO <= BSONObjMaxInternalSize);
return b.obj();
diff --git a/src/mongo/scripting/mozjs/regexp.cpp b/src/mongo/scripting/mozjs/regexp.cpp
index b2e4d0b85a7..75d7a7ac915 100644
--- a/src/mongo/scripting/mozjs/regexp.cpp
+++ b/src/mongo/scripting/mozjs/regexp.cpp
@@ -37,7 +37,8 @@ namespace mongo {
namespace mozjs {
const JSFunctionSpec RegExpInfo::methods[2] = {
- MONGO_ATTACH_JS_FUNCTION(toJSON), JS_FS_END,
+ MONGO_ATTACH_JS_FUNCTION(toJSON),
+ JS_FS_END,
};
const char* const RegExpInfo::className = "RegExp";
diff --git a/src/mongo/scripting/mozjs/session.cpp b/src/mongo/scripting/mozjs/session.cpp
index d1617892a4c..7e785888a52 100644
--- a/src/mongo/scripting/mozjs/session.cpp
+++ b/src/mongo/scripting/mozjs/session.cpp
@@ -122,9 +122,7 @@ void endSession(SessionHolder* holder) {
if (holder->txnState == SessionHolder::TransactionState::kActive) {
holder->txnState = SessionHolder::TransactionState::kAborted;
BSONObj abortObj = BSON("abortTransaction" << 1 << "lsid" << holder->lsid << "txnNumber"
- << holder->txnNumber
- << "autocommit"
- << false);
+ << holder->txnNumber << "autocommit" << false);
MONGO_COMPILER_VARIABLE_UNUSED auto ignored =
holder->client->runCommand("admin", abortObj, out);
diff --git a/src/mongo/scripting/mozjs/timestamp.cpp b/src/mongo/scripting/mozjs/timestamp.cpp
index 88f9331bef3..e114535afee 100644
--- a/src/mongo/scripting/mozjs/timestamp.cpp
+++ b/src/mongo/scripting/mozjs/timestamp.cpp
@@ -46,7 +46,8 @@ namespace mongo {
namespace mozjs {
const JSFunctionSpec TimestampInfo::methods[2] = {
- MONGO_ATTACH_JS_CONSTRAINED_METHOD(toJSON, TimestampInfo), JS_FS_END,
+ MONGO_ATTACH_JS_CONSTRAINED_METHOD(toJSON, TimestampInfo),
+ JS_FS_END,
};
const char* const TimestampInfo::className = "Timestamp";
@@ -62,9 +63,7 @@ double getTimestampArg(JSContext* cx, JS::CallArgs args, int idx, std::string na
if (val < 0 || val > maxArgVal) {
uasserted(ErrorCodes::BadValue,
str::stream() << name << " must be non-negative and not greater than "
- << maxArgVal
- << ", got "
- << val);
+ << maxArgVal << ", got " << val);
}
return val;
}
diff --git a/src/mongo/scripting/mozjs/uri.cpp b/src/mongo/scripting/mozjs/uri.cpp
index f381eae9298..4b46ce71570 100644
--- a/src/mongo/scripting/mozjs/uri.cpp
+++ b/src/mongo/scripting/mozjs/uri.cpp
@@ -47,7 +47,8 @@ namespace mongo {
namespace mozjs {
const JSFunctionSpec URIInfo::methods[2] = {
- MONGO_ATTACH_JS_CONSTRAINED_METHOD(toString, URIInfo), JS_FS_END,
+ MONGO_ATTACH_JS_CONSTRAINED_METHOD(toString, URIInfo),
+ JS_FS_END,
};
const char* const URIInfo::className = "MongoURI";
diff --git a/src/mongo/scripting/mozjs/valuewriter.cpp b/src/mongo/scripting/mozjs/valuewriter.cpp
index ef3b0b4d428..f40ef984576 100644
--- a/src/mongo/scripting/mozjs/valuewriter.cpp
+++ b/src/mongo/scripting/mozjs/valuewriter.cpp
@@ -316,8 +316,7 @@ void ValueWriter::_writeObject(BSONObjBuilder* b,
if (scope->getProto<CodeInfo>().getJSClass() == jsclass) {
if (o.hasOwnField(InternedString::scope) // CodeWScope
- &&
- o.type(InternedString::scope) == mongo::Object) {
+ && o.type(InternedString::scope) == mongo::Object) {
if (o.type(InternedString::code) != mongo::String) {
uasserted(ErrorCodes::BadValue, "code must be a string");
}
diff --git a/src/mongo/scripting/mozjs/wrapconstrainedmethod.h b/src/mongo/scripting/mozjs/wrapconstrainedmethod.h
index 12a94458896..e5110b1bd1c 100644
--- a/src/mongo/scripting/mozjs/wrapconstrainedmethod.h
+++ b/src/mongo/scripting/mozjs/wrapconstrainedmethod.h
@@ -94,24 +94,21 @@ bool wrapConstrainedMethod(JSContext* cx, unsigned argc, JS::Value* vp) {
if (!args.thisv().isObject()) {
uasserted(ErrorCodes::BadValue,
- str::stream() << "Cannot call \"" << T::name()
- << "\" on non-object of type \""
- << ValueWriter(cx, args.thisv()).typeAsString()
- << "\"");
+ str::stream()
+ << "Cannot call \"" << T::name() << "\" on non-object of type \""
+ << ValueWriter(cx, args.thisv()).typeAsString() << "\"");
}
if (!instanceOf<Args..., void>(getScope(cx), &isProto, args.thisv())) {
uasserted(ErrorCodes::BadValue,
str::stream() << "Cannot call \"" << T::name() << "\" on object of type \""
- << ObjectWrapper(cx, args.thisv()).getClassName()
- << "\"");
+ << ObjectWrapper(cx, args.thisv()).getClassName() << "\"");
}
if (noProto && isProto) {
uasserted(ErrorCodes::BadValue,
str::stream() << "Cannot call \"" << T::name() << "\" on prototype of \""
- << ObjectWrapper(cx, args.thisv()).getClassName()
- << "\"");
+ << ObjectWrapper(cx, args.thisv()).getClassName() << "\"");
}
T::call(cx, args);
diff --git a/src/mongo/scripting/mozjs/wraptype.h b/src/mongo/scripting/mozjs/wraptype.h
index e2ca4b358be..e3e4acde7bd 100644
--- a/src/mongo/scripting/mozjs/wraptype.h
+++ b/src/mongo/scripting/mozjs/wraptype.h
@@ -67,20 +67,22 @@
#define MONGO_ATTACH_JS_FUNCTION(name) MONGO_ATTACH_JS_FUNCTION_WITH_FLAGS(name, 0)
-#define MONGO_ATTACH_JS_CONSTRAINED_METHOD(name, ...) \
- { \
- #name, {smUtils::wrapConstrainedMethod < Functions::name, false, __VA_ARGS__ >, nullptr }, \
- 0, \
- 0, \
- nullptr \
- }
-
-#define MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(name, ...) \
- { \
- #name, {smUtils::wrapConstrainedMethod < Functions::name, true, __VA_ARGS__ >, nullptr }, \
- 0, \
- 0, \
- nullptr \
+#define MONGO_ATTACH_JS_CONSTRAINED_METHOD(name, ...) \
+ { \
+#name, \
+ {smUtils::wrapConstrainedMethod < Functions::name, false, __VA_ARGS__>, nullptr }, \
+ 0, \
+ 0, \
+ nullptr \
+ }
+
+#define MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(name, ...) \
+ { \
+#name, \
+ {smUtils::wrapConstrainedMethod < Functions::name, true, __VA_ARGS__>, nullptr }, \
+ 0, \
+ 0, \
+ nullptr \
}
namespace mongo {
diff --git a/src/mongo/shell/assert.js b/src/mongo/shell/assert.js
index ce2ec257763..20c8f2d481b 100644
--- a/src/mongo/shell/assert.js
+++ b/src/mongo/shell/assert.js
@@ -1,13 +1,13 @@
doassert = function(msg, obj) {
// eval if msg is a function
- if (typeof(msg) == "function")
+ if (typeof (msg) == "function")
msg = msg();
- if (typeof(msg) == "object")
+ if (typeof (msg) == "object")
msg = tojson(msg);
if (jsTest.options().traceExceptions) {
- if (typeof(msg) == "string" && msg.indexOf("assert") == 0)
+ if (typeof (msg) == "string" && msg.indexOf("assert") == 0)
print(msg);
else
print("assert: " + msg);
@@ -27,7 +27,6 @@ doassert = function(msg, obj) {
// Sort doc/obj fields and return new sorted obj
sortDoc = function(doc) {
-
// Helper to sort the elements of the array
var sortElementsOfArray = function(arr) {
var newArr = [];
@@ -318,7 +317,7 @@ assert = (function() {
var msgPrefix = "assert.soon failed: " + func;
if (msg) {
- if (typeof(msg) != "function") {
+ if (typeof (msg) != "function") {
msgPrefix = "assert.soon failed, msg";
}
}
@@ -328,7 +327,7 @@ assert = (function() {
interval = interval || 200;
var last;
while (1) {
- if (typeof(func) == "string") {
+ if (typeof (func) == "string") {
if (eval(func))
return;
} else {
@@ -418,7 +417,7 @@ assert = (function() {
var start = new Date();
timeout = timeout || 30000;
- if (typeof(f) == "string") {
+ if (typeof (f) == "string") {
res = eval(f);
} else {
res = f();
@@ -929,7 +928,6 @@ assert = (function() {
};
assert.gleOK = function(res, msg) {
-
var errMsg = null;
if (!res) {
@@ -950,7 +948,7 @@ assert = (function() {
assert.gleSuccess = function(dbOrGLEDoc, msg) {
var gle = dbOrGLEDoc instanceof DB ? dbOrGLEDoc.getLastErrorObj() : dbOrGLEDoc;
if (gle.err) {
- if (typeof(msg) == "function")
+ if (typeof (msg) == "function")
msg = msg(gle);
doassert(_buildAssertionMessage(msg, "getLastError not null: " + tojson(gle)), gle);
}
@@ -960,7 +958,7 @@ assert = (function() {
assert.gleError = function(dbOrGLEDoc, msg) {
var gle = dbOrGLEDoc instanceof DB ? dbOrGLEDoc.getLastErrorObj() : dbOrGLEDoc;
if (!gle.err) {
- if (typeof(msg) == "function")
+ if (typeof (msg) == "function")
msg = msg(gle);
doassert(_buildAssertionMessage(msg, "getLastError is null: " + tojson(gle)));
}
@@ -969,7 +967,7 @@ assert = (function() {
assert.gleErrorCode = function(dbOrGLEDoc, code, msg) {
var gle = dbOrGLEDoc instanceof DB ? dbOrGLEDoc.getLastErrorObj() : dbOrGLEDoc;
if (!gle.err || gle.code != code) {
- if (typeof(msg) == "function")
+ if (typeof (msg) == "function")
msg = msg(gle);
doassert(_buildAssertionMessage(
msg,
@@ -980,7 +978,7 @@ assert = (function() {
assert.gleErrorRegex = function(dbOrGLEDoc, regex, msg) {
var gle = dbOrGLEDoc instanceof DB ? dbOrGLEDoc.getLastErrorObj() : dbOrGLEDoc;
if (!gle.err || !regex.test(gle.err)) {
- if (typeof(msg) == "function")
+ if (typeof (msg) == "function")
msg = msg(gle);
doassert(_buildAssertionMessage(
msg,
diff --git a/src/mongo/shell/bench.cpp b/src/mongo/shell/bench.cpp
index 78769755580..e682a08f2bd 100644
--- a/src/mongo/shell/bench.cpp
+++ b/src/mongo/shell/bench.cpp
@@ -924,8 +924,7 @@ void BenchRunWorker::generateLoadOnConnection(DBClientBase* conn) {
{
opState.stats->trappedErrors.push_back(
BSON("error" << ex.what() << "op" << kOpTypeNames.find(op.op)->second
- << "count"
- << count));
+ << "count" << count));
}
if (_config->breakOnTrap)
return;
@@ -1040,8 +1039,8 @@ void BenchRunOp::executeOnce(DBClientBase* conn,
boost::none); // lastKnownCommittedOpTime
BSONObj getMoreCommandResult;
uassert(ErrorCodes::CommandFailed,
- str::stream() << "getMore command failed; reply was: "
- << getMoreCommandResult,
+ str::stream()
+ << "getMore command failed; reply was: " << getMoreCommandResult,
runCommandWithSession(conn,
this->ns,
getMoreRequest.toBSON(),
@@ -1390,11 +1389,11 @@ void BenchRunner::start() {
if (_config->username != "") {
std::string errmsg;
if (!conn->auth("admin", _config->username, _config->password, errmsg)) {
- uasserted(
- 16704,
- str::stream() << "User " << _config->username
- << " could not authenticate to admin db; admin db access is "
- "required to use benchRun with auth enabled");
+ uasserted(16704,
+ str::stream()
+ << "User " << _config->username
+ << " could not authenticate to admin db; admin db access is "
+ "required to use benchRun with auth enabled");
}
}
@@ -1429,11 +1428,11 @@ void BenchRunner::stop() {
std::string errmsg;
// this can only fail if admin access was revoked since start of run
if (!conn->auth("admin", _config->username, _config->password, errmsg)) {
- uasserted(
- 16705,
- str::stream() << "User " << _config->username
- << " could not authenticate to admin db; admin db access is "
- "still required to use benchRun with auth enabled");
+ uasserted(16705,
+ str::stream()
+ << "User " << _config->username
+ << " could not authenticate to admin db; admin db access is "
+ "still required to use benchRun with auth enabled");
}
}
}
diff --git a/src/mongo/shell/bench.h b/src/mongo/shell/bench.h
index 527beecae49..f73d2149abe 100644
--- a/src/mongo/shell/bench.h
+++ b/src/mongo/shell/bench.h
@@ -45,7 +45,7 @@
namespace pcrecpp {
class RE;
-} // namespace pcrecpp;
+} // namespace pcrecpp
namespace mongo {
@@ -431,9 +431,9 @@ public:
bool shouldWorkerFinish() const;
/**
- * Predicate that workers call to see if they should start collecting stats (as a result
- * of a call to tellWorkersToCollectStats()).
- */
+ * Predicate that workers call to see if they should start collecting stats (as a result
+ * of a call to tellWorkersToCollectStats()).
+ */
bool shouldWorkerCollectStats() const;
/**
diff --git a/src/mongo/shell/bulk_api.js b/src/mongo/shell/bulk_api.js
index eac31f7a871..eac2c063374 100644
--- a/src/mongo/shell/bulk_api.js
+++ b/src/mongo/shell/bulk_api.js
@@ -2,7 +2,6 @@
// Scope for the function
//
var _bulk_api_module = (function() {
-
// Batch types
var NONE = 0;
var INSERT = 1;
@@ -37,7 +36,6 @@ var _bulk_api_module = (function() {
* Accepts { w : x, j : x, wtimeout : x, fsync: x } or w, wtimeout, j
*/
var WriteConcern = function(wValue, wTimeout, jValue) {
-
if (!(this instanceof WriteConcern)) {
var writeConcern = Object.create(WriteConcern.prototype);
WriteConcern.apply(writeConcern, arguments);
@@ -97,7 +95,6 @@ var _bulk_api_module = (function() {
this.shellPrint = function() {
return this.toString();
};
-
};
/**
@@ -107,7 +104,6 @@ var _bulk_api_module = (function() {
* are used to filter the WriteResult to only include relevant result fields.
*/
var WriteResult = function(bulkResult, singleBatchType, writeConcern) {
-
if (!(this instanceof WriteResult))
return new WriteResult(bulkResult, singleBatchType, writeConcern);
@@ -217,7 +213,6 @@ var _bulk_api_module = (function() {
* Wraps the result for the commands
*/
var BulkWriteResult = function(bulkResult, singleBatchType, writeConcern) {
-
if (!(this instanceof BulkWriteResult) && !(this instanceof BulkWriteError))
return new BulkWriteResult(bulkResult, singleBatchType, writeConcern);
@@ -354,7 +349,6 @@ var _bulk_api_module = (function() {
* Represents a bulk write error, identical to a BulkWriteResult but thrown
*/
var BulkWriteError = function(bulkResult, singleBatchType, writeConcern, message) {
-
if (!(this instanceof BulkWriteError))
return new BulkWriteError(bulkResult, singleBatchType, writeConcern, message);
@@ -397,7 +391,6 @@ var _bulk_api_module = (function() {
* Wraps a command error
*/
var WriteCommandError = function(commandError) {
-
if (!(this instanceof WriteCommandError))
return new WriteCommandError(commandError);
@@ -607,7 +600,6 @@ var _bulk_api_module = (function() {
// Add to internal list of documents
var addToOperationsList = function(docType, document) {
-
if (Array.isArray(document))
throw Error("operation passed in cannot be an Array");
@@ -638,7 +630,7 @@ var _bulk_api_module = (function() {
* Otherwise, returns the same object passed.
*/
var addIdIfNeeded = function(obj) {
- if (typeof(obj._id) == "undefined" && !Array.isArray(obj)) {
+ if (typeof (obj._id) == "undefined" && !Array.isArray(obj)) {
var tmp = obj; // don't want to modify input
obj = {_id: new ObjectId()};
for (var key in tmp) {
@@ -812,7 +804,6 @@ var _bulk_api_module = (function() {
//
// Merge write command result into aggregated results object
var mergeBatchResults = function(batch, bulkResult, result) {
-
// If we have an insert Batch type
if (batch.batchType == INSERT) {
bulkResult.nInserted = bulkResult.nInserted + result.n;
@@ -1009,8 +1000,8 @@ var _bulk_api_module = (function() {
} else if (code == 19900 || // No longer primary
code == 16805 || // replicatedToNum no longer primary
code == 14330 || // gle wmode changed; invalid
- code == NOT_MASTER ||
- code == UNKNOWN_REPL_WRITE_CONCERN || code == WRITE_CONCERN_FAILED) {
+ code == NOT_MASTER || code == UNKNOWN_REPL_WRITE_CONCERN ||
+ code == WRITE_CONCERN_FAILED) {
extractedErr.wcError = {code: code, errmsg: errMsg};
} else if (!isOK) {
// This is a GLE failure we don't understand
@@ -1037,7 +1028,6 @@ var _bulk_api_module = (function() {
// Execute the operations, serially
var executeBatchWithLegacyOps = function(batch) {
-
var batchResult = {n: 0, writeErrors: [], upserted: []};
var extractedErr = null;
@@ -1113,10 +1103,11 @@ var _bulk_api_module = (function() {
bsonWoCompare(writeConcern, {w: 0}) != 0;
extractedErr = null;
- if (needToEnforceWC && (batchResult.writeErrors.length == 0 ||
- (!ordered &&
- // not all errored.
- batchResult.writeErrors.length < batch.operations.length))) {
+ if (needToEnforceWC &&
+ (batchResult.writeErrors.length == 0 ||
+ (!ordered &&
+ // not all errored.
+ batchResult.writeErrors.length < batch.operations.length))) {
// if last write errored
if (batchResult.writeErrors.length > 0 &&
batchResult.writeErrors[batchResult.writeErrors.length - 1].index ==
@@ -1237,7 +1228,6 @@ var _bulk_api_module = (function() {
};
return module;
-
})();
// Globals
diff --git a/src/mongo/shell/collection.js b/src/mongo/shell/collection.js
index c6c03b35e46..04aeddea965 100644
--- a/src/mongo/shell/collection.js
+++ b/src/mongo/shell/collection.js
@@ -167,7 +167,7 @@ DBCollection.prototype._makeCommand = function(cmd, params) {
};
DBCollection.prototype._dbCommand = function(cmd, params) {
- if (typeof(cmd) === "object")
+ if (typeof (cmd) === "object")
return this._db._dbCommand(cmd, {}, this.getQueryOptions());
return this._db._dbCommand(this._makeCommand(cmd, params), {}, this.getQueryOptions());
@@ -175,7 +175,7 @@ DBCollection.prototype._dbCommand = function(cmd, params) {
// Like _dbCommand, but applies $readPreference
DBCollection.prototype._dbReadCommand = function(cmd, params) {
- if (typeof(cmd) === "object")
+ if (typeof (cmd) === "object")
return this._db._dbReadCommand(cmd, {}, this.getQueryOptions());
return this._db._dbReadCommand(this._makeCommand(cmd, params), {}, this.getQueryOptions());
@@ -210,7 +210,6 @@ DBCollection.prototype._massageObject = function(q) {
}
throw Error("don't know how to massage : " + type);
-
};
DBCollection.prototype.find = function(query, fields, limit, skip, batchSize, options) {
@@ -276,7 +275,7 @@ DBCollection.prototype.insert = function(obj, options) {
var allowDottedFields = false;
if (options === undefined) {
// do nothing
- } else if (typeof(options) == 'object') {
+ } else if (typeof (options) == 'object') {
if (options.ordered === undefined) {
// do nothing, like above
} else {
@@ -299,7 +298,7 @@ DBCollection.prototype.insert = function(obj, options) {
var result = undefined;
var startTime =
- (typeof(_verboseShell) === 'undefined' || !_verboseShell) ? 0 : new Date().getTime();
+ (typeof (_verboseShell) === 'undefined' || !_verboseShell) ? 0 : new Date().getTime();
if (this.getMongo().writeMode() != "legacy") {
// Bit 1 of option flag is continueOnError. Bit 0 (stop on error) is the default.
@@ -329,7 +328,7 @@ DBCollection.prototype.insert = function(obj, options) {
}
}
} else {
- if (typeof(obj._id) == "undefined" && !Array.isArray(obj)) {
+ if (typeof (obj._id) == "undefined" && !Array.isArray(obj)) {
var tmp = obj; // don't want to modify input
obj = {_id: new ObjectId()};
for (var key in tmp) {
@@ -361,7 +360,7 @@ DBCollection.prototype._parseRemove = function(t, justOne) {
var wc = undefined;
var collation = undefined;
- if (typeof(justOne) === "object") {
+ if (typeof (justOne) === "object") {
var opts = justOne;
wc = opts.writeConcern;
justOne = opts.justOne;
@@ -390,7 +389,7 @@ DBCollection.prototype.remove = function(t, justOne) {
var result = undefined;
var startTime =
- (typeof(_verboseShell) === 'undefined' || !_verboseShell) ? 0 : new Date().getTime();
+ (typeof (_verboseShell) === 'undefined' || !_verboseShell) ? 0 : new Date().getTime();
if (this.getMongo().writeMode() != "legacy") {
var bulk = this.initializeOrderedBulkOp();
@@ -452,7 +451,7 @@ DBCollection.prototype._parseUpdate = function(query, updateSpec, upsert, multi)
let hint = undefined;
// can pass options via object for improved readability
- if (typeof(upsert) === "object") {
+ if (typeof (upsert) === "object") {
if (multi) {
throw Error("Fourth argument must be empty when specifying " +
"upsert and multi with an object.");
@@ -502,7 +501,7 @@ DBCollection.prototype.update = function(query, updateSpec, upsert, multi) {
var result = undefined;
var startTime =
- (typeof(_verboseShell) === 'undefined' || !_verboseShell) ? 0 : new Date().getTime();
+ (typeof (_verboseShell) === 'undefined' || !_verboseShell) ? 0 : new Date().getTime();
if (this.getMongo().writeMode() != "legacy") {
var bulk = this.initializeOrderedBulkOp();
@@ -567,10 +566,10 @@ DBCollection.prototype.save = function(obj, opts) {
if (obj == null)
throw Error("can't save a null");
- if (typeof(obj) == "number" || typeof(obj) == "string")
+ if (typeof (obj) == "number" || typeof (obj) == "string")
throw Error("can't save a number or string");
- if (typeof(obj._id) == "undefined") {
+ if (typeof (obj._id) == "undefined") {
obj._id = new ObjectId();
return this.insert(obj, opts);
} else {
@@ -598,11 +597,11 @@ DBCollection.prototype._indexSpec = function(keys, options) {
var ret = {ns: this._fullName, key: keys, name: this._genIndexName(keys)};
if (!options) {
- } else if (typeof(options) == "string")
+ } else if (typeof (options) == "string")
ret.name = options;
- else if (typeof(options) == "boolean")
+ else if (typeof (options) == "boolean")
ret.unique = true;
- else if (typeof(options) == "object") {
+ else if (typeof (options) == "object") {
if (Array.isArray(options)) {
if (options.length > 3) {
throw new Error("Index options that are supplied in array form may only specify" +
@@ -610,9 +609,9 @@ DBCollection.prototype._indexSpec = function(keys, options) {
}
var nb = 0;
for (var i = 0; i < options.length; i++) {
- if (typeof(options[i]) == "string")
+ if (typeof (options[i]) == "string")
ret.name = options[i];
- else if (typeof(options[i]) == "boolean") {
+ else if (typeof (options[i]) == "boolean") {
if (options[i]) {
if (nb == 0)
ret.unique = true;
@@ -626,7 +625,7 @@ DBCollection.prototype._indexSpec = function(keys, options) {
Object.extend(ret, options);
}
} else {
- throw Error("can't handle: " + typeof(options));
+ throw Error("can't handle: " + typeof (options));
}
return ret;
@@ -780,14 +779,14 @@ DBCollection.prototype._printExtraInfo = function(action, startTime) {
DBCollection.prototype.validate = function(full) {
var cmd = {validate: this.getName()};
- if (typeof(full) == 'object') // support arbitrary options here
+ if (typeof (full) == 'object') // support arbitrary options here
Object.extend(cmd, full);
else
cmd.full = full;
var res = this._db.runCommand(cmd);
- if (typeof(res.valid) == 'undefined') {
+ if (typeof (res.valid) == 'undefined') {
// old-style format just put everything in a string. Now using proper fields
res.valid = false;
@@ -842,7 +841,7 @@ DBCollection.prototype.hashAllDocs = function() {
var res = this._dbCommand(cmd);
var hash = res.collections[this._shortName];
assert(hash);
- assert(typeof(hash) == "string");
+ assert(typeof (hash) == "string");
return hash;
};
@@ -880,14 +879,14 @@ DBCollection.prototype.getCollection = function(subName) {
};
/**
- * scale: The scale at which to deliver results. Unless specified, this command returns all data
- * in bytes.
- * indexDetails: Includes indexDetails field in results. Default: false.
- * indexDetailsKey: If indexDetails is true, filter contents in indexDetails by this index key.
- * indexDetailsname: If indexDetails is true, filter contents in indexDetails by this index name.
- *
- * It is an error to provide both indexDetailsKey and indexDetailsName.
- */
+ * scale: The scale at which to deliver results. Unless specified, this command returns all data
+ * in bytes.
+ * indexDetails: Includes indexDetails field in results. Default: false.
+ * indexDetailsKey: If indexDetails is true, filter contents in indexDetails by this index key.
+ * indexDetailsname: If indexDetails is true, filter contents in indexDetails by this index name.
+ *
+ * It is an error to provide both indexDetailsKey and indexDetailsName.
+ */
DBCollection.prototype.stats = function(args) {
'use strict';
@@ -1040,8 +1039,8 @@ MapReduceResult.prototype.drop = function() {
};
/**
-* just for debugging really
-*/
+ * just for debugging really
+ */
MapReduceResult.prototype.convertToSingleObject = function() {
var z = {};
var it = this.results != null ? this.results : this._coll.find();
@@ -1060,13 +1059,13 @@ DBCollection.prototype.convertToSingleObject = function(valueField) {
};
/**
-* @param optional object of optional fields;
-*/
+ * @param optional object of optional fields;
+ */
DBCollection.prototype.mapReduce = function(map, reduce, optionsOrOutString) {
var c = {mapreduce: this._shortName, map: map, reduce: reduce};
assert(optionsOrOutString, "need to supply an optionsOrOutString");
- if (typeof(optionsOrOutString) == "string")
+ if (typeof (optionsOrOutString) == "string")
c["out"] = optionsOrOutString;
else
Object.extend(c, optionsOrOutString);
@@ -1086,7 +1085,6 @@ DBCollection.prototype.mapReduce = function(map, reduce, optionsOrOutString) {
throw _getErrorWithCode(raw, "map reduce failed:" + tojson(raw));
}
return new MapReduceResult(this._db, raw);
-
};
DBCollection.prototype.toString = function() {
@@ -1142,7 +1140,6 @@ will actually
*/
DBCollection.prototype.getShardDistribution = function() {
-
var stats = this.stats();
if (!stats.sharded) {
@@ -1175,8 +1172,8 @@ DBCollection.prototype.getShardDistribution = function() {
}
print("\nTotals");
- print(" data : " + sh._dataFormat(stats.size) + " docs : " + stats.count + " chunks : " +
- numChunks);
+ print(" data : " + sh._dataFormat(stats.size) + " docs : " + stats.count +
+ " chunks : " + numChunks);
for (var shard in stats.shards) {
var shardStats = stats.shards[shard];
@@ -1186,16 +1183,14 @@ DBCollection.prototype.getShardDistribution = function() {
(stats.count == 0) ? 0 : (Math.floor(shardStats.count / stats.count * 10000) / 100);
print(" Shard " + shard + " contains " + estDataPercent + "% data, " + estDocPercent +
- "% docs in cluster, " + "avg obj size on shard : " +
- sh._dataFormat(stats.shards[shard].avgObjSize));
+ "% docs in cluster, " +
+ "avg obj size on shard : " + sh._dataFormat(stats.shards[shard].avgObjSize));
}
print("\n");
-
};
DBCollection.prototype.getSplitKeysForChunks = function(chunkSize) {
-
var stats = this.stats();
if (!stats.sharded) {
@@ -1263,7 +1258,6 @@ DBCollection.prototype.getSplitKeysForChunks = function(chunkSize) {
var admin = this.getDB().getSiblingDB("admin");
var coll = this;
var splitFunction = function() {
-
// Turn off the balancer, just to be safe
print("Turning off balancer...");
config.settings.update({_id: "balancer"}, {$set: {stopped: true}}, true);
@@ -1290,11 +1284,11 @@ DBCollection.prototype.getSplitKeysForChunks = function(chunkSize) {
};
print("\nGenerated " + numSplits + " split keys, run output function to perform splits.\n" +
- " ex : \n" + " > var splitter = <collection>.getSplitKeysForChunks()\n" +
+ " ex : \n" +
+ " > var splitter = <collection>.getSplitKeysForChunks()\n" +
" > splitter() // Execute splits on cluster !\n");
return splitFunction;
-
};
DBCollection.prototype.setSlaveOk = function(value) {
@@ -1352,21 +1346,21 @@ DBCollection.prototype.unsetWriteConcern = function() {
//
/**
-* Count number of matching documents in the db to a query.
-*
-* @method
-* @param {object} query The query for the count.
-* @param {object} [options=null] Optional settings.
-* @param {number} [options.limit=null] The limit of documents to count.
-* @param {number} [options.skip=null] The number of documents to skip for the count.
-* @param {string|object} [options.hint=null] An index name hint or specification for the query.
-* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
-* @param {string} [options.readConcern=null] The level of readConcern passed to the count command
-* @param {object} [options.collation=null] The collation that should be used for string comparisons
-* for this count op.
-* @return {number}
-*
-*/
+ * Count number of matching documents in the db to a query.
+ *
+ * @method
+ * @param {object} query The query for the count.
+ * @param {object} [options=null] Optional settings.
+ * @param {number} [options.limit=null] The limit of documents to count.
+ * @param {number} [options.skip=null] The number of documents to skip for the count.
+ * @param {string|object} [options.hint=null] An index name hint or specification for the query.
+ * @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
+ * @param {string} [options.readConcern=null] The level of readConcern passed to the count command
+ * @param {object} [options.collation=null] The collation that should be used for string comparisons
+ * for this count op.
+ * @return {number}
+ *
+ */
DBCollection.prototype.count = function(query, options) {
query = this.find(query);
@@ -1375,19 +1369,19 @@ DBCollection.prototype.count = function(query, options) {
};
/**
-* Count number of matching documents in the db to a query using aggregation.
-*
-* @method
-* @param {object} query The query for the count.
-* @param {object} [options=null] Optional settings.
-* @param {number} [options.limit=null] The limit of documents to count.
-* @param {number} [options.skip=null] The number of documents to skip for the count.
-* @param {string|object} [options.hint=null] An index name hint or specification for the query.
-* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
-* @param {object} [options.collation=null] The collation that should be used for string comparisons
-* for this count op.
-* @return {number}
-*/
+ * Count number of matching documents in the db to a query using aggregation.
+ *
+ * @method
+ * @param {object} query The query for the count.
+ * @param {object} [options=null] Optional settings.
+ * @param {number} [options.limit=null] The limit of documents to count.
+ * @param {number} [options.skip=null] The number of documents to skip for the count.
+ * @param {string|object} [options.hint=null] An index name hint or specification for the query.
+ * @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
+ * @param {object} [options.collation=null] The collation that should be used for string comparisons
+ * for this count op.
+ * @return {number}
+ */
DBCollection.prototype.countDocuments = function(query, options) {
"use strict";
let pipeline = [{"$match": query}];
@@ -1424,13 +1418,13 @@ DBCollection.prototype.countDocuments = function(query, options) {
};
/**
-* Estimates the count of documents in a collection using collection metadata.
-*
-* @method
-* @param {object} [options=null] Optional settings.
-* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
-* @return {number}
-*/
+ * Estimates the count of documents in a collection using collection metadata.
+ *
+ * @method
+ * @param {object} [options=null] Optional settings.
+ * @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
+ * @return {number}
+ */
DBCollection.prototype.estimatedDocumentCount = function(options) {
"use strict";
let cmd = {count: this.getName()};
@@ -1452,17 +1446,17 @@ DBCollection.prototype.estimatedDocumentCount = function(options) {
};
/**
-* The distinct command returns returns a list of distinct values for the given key across a
-*collection.
-*
-* @method
-* @param {string} key Field of the document to find distinct values for.
-* @param {object} query The query for filtering the set of documents to which we apply the distinct
-*filter.
-* @param {object} [options=null] Optional settings.
-* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
-* @return {object}
-*/
+ * The distinct command returns returns a list of distinct values for the given key across a
+ *collection.
+ *
+ * @method
+ * @param {string} key Field of the document to find distinct values for.
+ * @param {object} query The query for filtering the set of documents to which we apply the distinct
+ *filter.
+ * @param {object} [options=null] Optional settings.
+ * @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
+ * @return {object}
+ */
DBCollection.prototype.distinct = function(keyString, query, options) {
var opts = Object.extend({}, options || {});
var keyStringType = typeof keyString;
@@ -1576,7 +1570,7 @@ PlanCache.prototype._parseQueryShape = function(query, projection, sort, collati
// Accept query shape object as only argument.
// Query shape must contain 'query', 'projection', and 'sort', and may optionally contain
// 'collation'. 'collation' must be non-empty if present.
- if (typeof(query) == 'object' && projection == undefined && sort == undefined &&
+ if (typeof (query) == 'object' && projection == undefined && sort == undefined &&
collation == undefined) {
var keysSorted = Object.keys(query).sort();
// Expected keys must be sorted for the comparison to work.
diff --git a/src/mongo/shell/crud_api.js b/src/mongo/shell/crud_api.js
index bcd245f4878..dd7d334291a 100644
--- a/src/mongo/shell/crud_api.js
+++ b/src/mongo/shell/crud_api.js
@@ -30,7 +30,7 @@ DBCollection.prototype.addIdIfNeeded = function(obj) {
if (typeof obj !== "object") {
throw new Error('argument passed to addIdIfNeeded is not an object');
}
- if (typeof(obj._id) == "undefined" && !Array.isArray(obj)) {
+ if (typeof (obj._id) == "undefined" && !Array.isArray(obj)) {
var tmp = obj; // don't want to modify input
obj = {_id: new ObjectId()};
@@ -45,32 +45,32 @@ DBCollection.prototype.addIdIfNeeded = function(obj) {
};
/**
-* Perform a bulkWrite operation without a fluent API
-*
-* Legal operation types are
-*
-* { insertOne: { document: { a: 1 } } }
-*
-* { updateOne: { filter: {a:2}, update: {$set: {"a.$[i]":2}}, upsert:true, collation: {locale:
-* "fr"}, arrayFilters: [{i: 0}] } }
-*
-* { updateMany: { filter: {a:2}, update: {$set: {"a.$[i]":2}}, upsert:true collation: {locale:
-* "fr"}, arrayFilters: [{i: 0}] } }
-*
-* { deleteOne: { filter: {c:1}, collation: {locale: "fr"} } }
-*
-* { deleteMany: { filter: {c:1}, collation: {locale: "fr"} } }
-*
-* { replaceOne: { filter: {c:3}, replacement: {c:4}, upsert:true, collation: {locale: "fr"} } }
-*
-* @method
-* @param {object[]} operations Bulk operations to perform.
-* @param {object} [options=null] Optional settings.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @return {object}
-*/
+ * Perform a bulkWrite operation without a fluent API
+ *
+ * Legal operation types are
+ *
+ * { insertOne: { document: { a: 1 } } }
+ *
+ * { updateOne: { filter: {a:2}, update: {$set: {"a.$[i]":2}}, upsert:true, collation: {locale:
+ * "fr"}, arrayFilters: [{i: 0}] } }
+ *
+ * { updateMany: { filter: {a:2}, update: {$set: {"a.$[i]":2}}, upsert:true collation: {locale:
+ * "fr"}, arrayFilters: [{i: 0}] } }
+ *
+ * { deleteOne: { filter: {c:1}, collation: {locale: "fr"} } }
+ *
+ * { deleteMany: { filter: {c:1}, collation: {locale: "fr"} } }
+ *
+ * { replaceOne: { filter: {c:3}, replacement: {c:4}, upsert:true, collation: {locale: "fr"} } }
+ *
+ * @method
+ * @param {object[]} operations Bulk operations to perform.
+ * @param {object} [options=null] Optional settings.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @return {object}
+ */
DBCollection.prototype.bulkWrite = function(operations, options) {
var opts = Object.extend({}, options || {});
opts.ordered = (typeof opts.ordered == 'boolean') ? opts.ordered : true;
@@ -221,16 +221,16 @@ DBCollection.prototype.bulkWrite = function(operations, options) {
};
/**
-* Inserts a single document into MongoDB.
-*
-* @method
-* @param {object} doc Document to insert.
-* @param {object} [options=null] Optional settings.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @return {object}
-*/
+ * Inserts a single document into MongoDB.
+ *
+ * @method
+ * @param {object} doc Document to insert.
+ * @param {object} [options=null] Optional settings.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @return {object}
+ */
DBCollection.prototype.insertOne = function(document, options) {
var opts = Object.extend({}, options || {});
@@ -276,17 +276,17 @@ DBCollection.prototype.insertOne = function(document, options) {
};
/**
-* Inserts an array of documents into MongoDB.
-*
-* @method
-* @param {object[]} docs Documents to insert.
-* @param {object} [options=null] Optional settings.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @param {boolean} [options.ordered=true] Execute inserts in ordered or unordered fashion.
-* @return {object}
-*/
+ * Inserts an array of documents into MongoDB.
+ *
+ * @method
+ * @param {object[]} docs Documents to insert.
+ * @param {object} [options=null] Optional settings.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @param {boolean} [options.ordered=true] Execute inserts in ordered or unordered fashion.
+ * @return {object}
+ */
DBCollection.prototype.insertMany = function(documents, options) {
var opts = Object.extend({}, options || {});
opts.ordered = (typeof opts.ordered == 'boolean') ? opts.ordered : true;
@@ -327,16 +327,16 @@ DBCollection.prototype.insertMany = function(documents, options) {
};
/**
-* Delete a document on MongoDB
-*
-* @method
-* @param {object} filter The filter used to select the document to remove
-* @param {object} [options=null] Optional settings.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @return {object}
-*/
+ * Delete a document on MongoDB
+ *
+ * @method
+ * @param {object} filter The filter used to select the document to remove
+ * @param {object} [options=null] Optional settings.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @return {object}
+ */
DBCollection.prototype.deleteOne = function(filter, options) {
var opts = Object.extend({}, options || {});
@@ -384,16 +384,16 @@ DBCollection.prototype.deleteOne = function(filter, options) {
};
/**
-* Delete multiple documents on MongoDB
-*
-* @method
-* @param {object} filter The Filter used to select the documents to remove
-* @param {object} [options=null] Optional settings.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @return {object}
-*/
+ * Delete multiple documents on MongoDB
+ *
+ * @method
+ * @param {object} filter The Filter used to select the documents to remove
+ * @param {object} [options=null] Optional settings.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @return {object}
+ */
DBCollection.prototype.deleteMany = function(filter, options) {
var opts = Object.extend({}, options || {});
@@ -441,18 +441,18 @@ DBCollection.prototype.deleteMany = function(filter, options) {
};
/**
-* Replace a document on MongoDB
-*
-* @method
-* @param {object} filter The Filter used to select the document to update
-* @param {object} doc The Document that replaces the matching document
-* @param {object} [options=null] Optional settings.
-* @param {boolean} [options.upsert=false] Update operation is an upsert.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @return {object}
-*/
+ * Replace a document on MongoDB
+ *
+ * @method
+ * @param {object} filter The Filter used to select the document to update
+ * @param {object} doc The Document that replaces the matching document
+ * @param {object} [options=null] Optional settings.
+ * @param {boolean} [options.upsert=false] Update operation is an upsert.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @return {object}
+ */
DBCollection.prototype.replaceOne = function(filter, replacement, options) {
var opts = Object.extend({}, options || {});
@@ -521,18 +521,18 @@ DBCollection.prototype.replaceOne = function(filter, replacement, options) {
};
/**
-* Update a single document on MongoDB
-*
-* @method
-* @param {object} filter The Filter used to select the document to update
-* @param {object} update The update operations to be applied to the document
-* @param {object} [options=null] Optional settings.
-* @param {boolean} [options.upsert=false] Update operation is an upsert.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @return {object}
-*/
+ * Update a single document on MongoDB
+ *
+ * @method
+ * @param {object} filter The Filter used to select the document to update
+ * @param {object} update The update operations to be applied to the document
+ * @param {object} [options=null] Optional settings.
+ * @param {boolean} [options.upsert=false] Update operation is an upsert.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @return {object}
+ */
DBCollection.prototype.updateOne = function(filter, update, options) {
var opts = Object.extend({}, options || {});
@@ -607,18 +607,18 @@ DBCollection.prototype.updateOne = function(filter, update, options) {
};
/**
-* Update multiple documents on MongoDB
-*
-* @method
-* @param {object} filter The Filter used to select the document to update
-* @param {object} update The update operations to be applied to the document
-* @param {object} [options=null] Optional settings.
-* @param {boolean} [options.upsert=false] Update operation is an upsert.
-* @param {(number|string)} [options.w=null] The write concern.
-* @param {number} [options.wtimeout=null] The write concern timeout.
-* @param {boolean} [options.j=false] Specify a journal write concern.
-* @return {object}
-*/
+ * Update multiple documents on MongoDB
+ *
+ * @method
+ * @param {object} filter The Filter used to select the document to update
+ * @param {object} update The update operations to be applied to the document
+ * @param {object} [options=null] Optional settings.
+ * @param {boolean} [options.upsert=false] Update operation is an upsert.
+ * @param {(number|string)} [options.w=null] The write concern.
+ * @param {number} [options.wtimeout=null] The write concern timeout.
+ * @param {boolean} [options.j=false] Specify a journal write concern.
+ * @return {object}
+ */
DBCollection.prototype.updateMany = function(filter, update, options) {
var opts = Object.extend({}, options || {});
@@ -693,18 +693,18 @@ DBCollection.prototype.updateMany = function(filter, update, options) {
};
/**
-* Find a document and delete it in one atomic operation,
-* requires a write lock for the duration of the operation.
-*
-* @method
-* @param {object} filter Document selection filter.
-* @param {object} [options=null] Optional settings.
-* @param {object} [options.projection=null] Limits the fields to return for all matching documents.
-* @param {object} [options.sort=null] Determines which document the operation modifies if the query
-*selects multiple documents.
-* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
-* @return {object}
-*/
+ * Find a document and delete it in one atomic operation,
+ * requires a write lock for the duration of the operation.
+ *
+ * @method
+ * @param {object} filter Document selection filter.
+ * @param {object} [options=null] Optional settings.
+ * @param {object} [options.projection=null] Limits the fields to return for all matching documents.
+ * @param {object} [options.sort=null] Determines which document the operation modifies if the query
+ *selects multiple documents.
+ * @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
+ * @return {object}
+ */
DBCollection.prototype.findOneAndDelete = function(filter, options) {
var opts = Object.extend({}, options || {});
// Set up the command
@@ -739,22 +739,22 @@ DBCollection.prototype.findOneAndDelete = function(filter, options) {
};
/**
-* Find a document and replace it in one atomic operation, requires a write lock for the duration of
-*the operation.
-*
-* @method
-* @param {object} filter Document selection filter.
-* @param {object} replacement Document replacing the matching document.
-* @param {object} [options=null] Optional settings.
-* @param {object} [options.projection=null] Limits the fields to return for all matching documents.
-* @param {object} [options.sort=null] Determines which document the operation modifies if the query
-*selects multiple documents.
-* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
-* @param {boolean} [options.upsert=false] Upsert the document if it does not exist.
-* @param {boolean} [options.returnNewDocument=false] When true, returns the updated document rather
-*than the original. The default is false.
-* @return {object}
-*/
+ * Find a document and replace it in one atomic operation, requires a write lock for the duration of
+ *the operation.
+ *
+ * @method
+ * @param {object} filter Document selection filter.
+ * @param {object} replacement Document replacing the matching document.
+ * @param {object} [options=null] Optional settings.
+ * @param {object} [options.projection=null] Limits the fields to return for all matching documents.
+ * @param {object} [options.sort=null] Determines which document the operation modifies if the query
+ *selects multiple documents.
+ * @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
+ * @param {boolean} [options.upsert=false] Upsert the document if it does not exist.
+ * @param {boolean} [options.returnNewDocument=false] When true, returns the updated document rather
+ *than the original. The default is false.
+ * @return {object}
+ */
DBCollection.prototype.findOneAndReplace = function(filter, replacement, options) {
var opts = Object.extend({}, options || {});
@@ -805,22 +805,22 @@ DBCollection.prototype.findOneAndReplace = function(filter, replacement, options
};
/**
-* Find a document and update it in one atomic operation, requires a write lock for the duration of
-*the operation.
-*
-* @method
-* @param {object} filter Document selection filter.
-* @param {object} update Update operations to be performed on the document
-* @param {object} [options=null] Optional settings.
-* @param {object} [options.projection=null] Limits the fields to return for all matching documents.
-* @param {object} [options.sort=null] Determines which document the operation modifies if the query
-*selects multiple documents.
-* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
-* @param {boolean} [options.upsert=false] Upsert the document if it does not exist.
-* @param {boolean} [options.returnNewDocument=false] When true, returns the updated document rather
-*than the original. The default is false.
-* @return {object}
-*/
+ * Find a document and update it in one atomic operation, requires a write lock for the duration of
+ *the operation.
+ *
+ * @method
+ * @param {object} filter Document selection filter.
+ * @param {object} update Update operations to be performed on the document
+ * @param {object} [options=null] Optional settings.
+ * @param {object} [options.projection=null] Limits the fields to return for all matching documents.
+ * @param {object} [options.sort=null] Determines which document the operation modifies if the query
+ *selects multiple documents.
+ * @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
+ * @param {boolean} [options.upsert=false] Upsert the document if it does not exist.
+ * @param {boolean} [options.returnNewDocument=false] When true, returns the updated document rather
+ *than the original. The default is false.
+ * @return {object}
+ */
DBCollection.prototype.findOneAndUpdate = function(filter, update, options) {
var opts = Object.extend({}, options || {});
diff --git a/src/mongo/shell/db.js b/src/mongo/shell/db.js
index 234f905a9db..2c34a176fb0 100644
--- a/src/mongo/shell/db.js
+++ b/src/mongo/shell/db.js
@@ -4,1881 +4,1865 @@ var DB;
(function() {
- var _defaultWriteConcern = {w: 'majority', wtimeout: 10 * 60 * 1000};
+var _defaultWriteConcern = {w: 'majority', wtimeout: 10 * 60 * 1000};
- if (DB === undefined) {
- DB = function(mongo, name) {
- this._mongo = mongo;
- this._name = name;
- };
- }
-
- DB.prototype.getMongo = function() {
- assert(this._mongo, "why no mongo!");
- return this._mongo;
- };
-
- DB.prototype.getSiblingDB = function(name) {
- return this.getSession().getDatabase(name);
+if (DB === undefined) {
+ DB = function(mongo, name) {
+ this._mongo = mongo;
+ this._name = name;
};
+}
- DB.prototype.getSisterDB = DB.prototype.getSiblingDB;
+DB.prototype.getMongo = function() {
+ assert(this._mongo, "why no mongo!");
+ return this._mongo;
+};
- DB.prototype.getName = function() {
- return this._name;
- };
+DB.prototype.getSiblingDB = function(name) {
+ return this.getSession().getDatabase(name);
+};
- DB.prototype.stats = function(scale) {
- return this.runCommand({dbstats: 1, scale: scale});
- };
+DB.prototype.getSisterDB = DB.prototype.getSiblingDB;
- DB.prototype.getCollection = function(name) {
- return new DBCollection(this._mongo, this, name, this._name + "." + name);
- };
+DB.prototype.getName = function() {
+ return this._name;
+};
- DB.prototype.commandHelp = function(name) {
- var c = {};
- c[name] = 1;
- c.help = true;
- var res = this.runCommand(c);
- if (!res.ok)
- throw _getErrorWithCode(res, res.errmsg);
- return res.help;
- };
+DB.prototype.stats = function(scale) {
+ return this.runCommand({dbstats: 1, scale: scale});
+};
- // utility to attach readPreference if needed.
- DB.prototype._attachReadPreferenceToCommand = function(cmdObj, readPref) {
- "use strict";
- // if the user has not set a readpref, return the original cmdObj
- if ((readPref === null) || typeof(readPref) !== "object") {
- return cmdObj;
- }
+DB.prototype.getCollection = function(name) {
+ return new DBCollection(this._mongo, this, name, this._name + "." + name);
+};
- // if user specifies $readPreference manually, then don't change it
- if (cmdObj.hasOwnProperty("$readPreference")) {
- return cmdObj;
- }
+DB.prototype.commandHelp = function(name) {
+ var c = {};
+ c[name] = 1;
+ c.help = true;
+ var res = this.runCommand(c);
+ if (!res.ok)
+ throw _getErrorWithCode(res, res.errmsg);
+ return res.help;
+};
+
+// utility to attach readPreference if needed.
+DB.prototype._attachReadPreferenceToCommand = function(cmdObj, readPref) {
+ "use strict";
+ // if the user has not set a readpref, return the original cmdObj
+ if ((readPref === null) || typeof (readPref) !== "object") {
+ return cmdObj;
+ }
- // copy object so we don't mutate the original
- var clonedCmdObj = Object.extend({}, cmdObj);
- // The server selection spec mandates that the key is '$query', but
- // the shell has historically used 'query'. The server accepts both,
- // so we maintain the existing behavior
- var cmdObjWithReadPref = {query: clonedCmdObj, $readPreference: readPref};
- return cmdObjWithReadPref;
- };
+ // if user specifies $readPreference manually, then don't change it
+ if (cmdObj.hasOwnProperty("$readPreference")) {
+ return cmdObj;
+ }
- /**
- * If someone passes i.e. runCommand("foo", {bar: "baz"}), we merge it in to
- * runCommand({foo: 1, bar: "baz"}).
- * If we already have a command object in the first argument, we ensure that the second
- * argument 'extraKeys' is either null or an empty object. This prevents users from accidentally
- * calling runCommand({foo: 1}, {bar: 1}) and expecting the final command invocation to be
- * runCommand({foo: 1, bar: 1}).
- * This helper abstracts that logic.
- */
- DB.prototype._mergeCommandOptions = function(obj, extraKeys) {
- "use strict";
-
- if (typeof(obj) === "object") {
- if (Object.keys(extraKeys || {}).length > 0) {
- throw Error("Unexpected second argument to DB.runCommand(): (type: " +
- typeof(extraKeys) + "): " + tojson(extraKeys));
- }
- return obj;
- } else if (typeof(obj) !== "string") {
- throw Error("First argument to DB.runCommand() must be either an object or a string: " +
- "(type: " + typeof(obj) + "): " + tojson(obj));
- }
+ // copy object so we don't mutate the original
+ var clonedCmdObj = Object.extend({}, cmdObj);
+ // The server selection spec mandates that the key is '$query', but
+ // the shell has historically used 'query'. The server accepts both,
+ // so we maintain the existing behavior
+ var cmdObjWithReadPref = {query: clonedCmdObj, $readPreference: readPref};
+ return cmdObjWithReadPref;
+};
+
+/**
+ * If someone passes i.e. runCommand("foo", {bar: "baz"}), we merge it in to
+ * runCommand({foo: 1, bar: "baz"}).
+ * If we already have a command object in the first argument, we ensure that the second
+ * argument 'extraKeys' is either null or an empty object. This prevents users from accidentally
+ * calling runCommand({foo: 1}, {bar: 1}) and expecting the final command invocation to be
+ * runCommand({foo: 1, bar: 1}).
+ * This helper abstracts that logic.
+ */
+DB.prototype._mergeCommandOptions = function(obj, extraKeys) {
+ "use strict";
+
+ if (typeof (obj) === "object") {
+ if (Object.keys(extraKeys || {}).length > 0) {
+ throw Error("Unexpected second argument to DB.runCommand(): (type: " +
+ typeof (extraKeys) + "): " + tojson(extraKeys));
+ }
+ return obj;
+ } else if (typeof (obj) !== "string") {
+ throw Error("First argument to DB.runCommand() must be either an object or a string: " +
+ "(type: " + typeof (obj) + "): " + tojson(obj));
+ }
- var commandName = obj;
- var mergedCmdObj = {};
- mergedCmdObj[commandName] = 1;
-
- if (!extraKeys) {
- return mergedCmdObj;
- } else if (typeof(extraKeys) === "object") {
- // this will traverse the prototype chain of extra, but keeping
- // to maintain legacy behavior
- for (var key in extraKeys) {
- mergedCmdObj[key] = extraKeys[key];
- }
- } else {
- throw Error("Second argument to DB.runCommand(" + commandName +
- ") must be an object: (type: " + typeof(extraKeys) + "): " +
- tojson(extraKeys));
- }
+ var commandName = obj;
+ var mergedCmdObj = {};
+ mergedCmdObj[commandName] = 1;
+ if (!extraKeys) {
return mergedCmdObj;
- };
-
- // Like runCommand but applies readPreference if one has been set
- // on the connection. Also sets slaveOk if a (non-primary) readPref has been set.
- DB.prototype.runReadCommand = function(obj, extra, queryOptions) {
- "use strict";
-
- // Support users who call this function with a string commandName, e.g.
- // db.runReadCommand("commandName", {arg1: "value", arg2: "value"}).
- obj = this._mergeCommandOptions(obj, extra);
- queryOptions = queryOptions !== undefined ? queryOptions : this.getQueryOptions();
-
- {
- const session = this.getSession();
+ } else if (typeof (extraKeys) === "object") {
+ // this will traverse the prototype chain of extra, but keeping
+ // to maintain legacy behavior
+ for (var key in extraKeys) {
+ mergedCmdObj[key] = extraKeys[key];
+ }
+ } else {
+ throw Error("Second argument to DB.runCommand(" + commandName +
+ ") must be an object: (type: " + typeof (extraKeys) +
+ "): " + tojson(extraKeys));
+ }
- const readPreference = session._getSessionAwareClient().getReadPreference(session);
- if (readPreference !== null) {
- obj = this._attachReadPreferenceToCommand(obj, readPreference);
+ return mergedCmdObj;
+};
- if (readPreference.mode !== "primary") {
- // Set slaveOk if readPrefMode has been explicitly set with a readPreference
- // other than primary.
- queryOptions |= 4;
- }
- }
- }
+// Like runCommand but applies readPreference if one has been set
+// on the connection. Also sets slaveOk if a (non-primary) readPref has been set.
+DB.prototype.runReadCommand = function(obj, extra, queryOptions) {
+ "use strict";
- // The 'extra' parameter is not used as we have already created a merged command object.
- return this.runCommand(obj, null, queryOptions);
- };
+ // Support users who call this function with a string commandName, e.g.
+ // db.runReadCommand("commandName", {arg1: "value", arg2: "value"}).
+ obj = this._mergeCommandOptions(obj, extra);
+ queryOptions = queryOptions !== undefined ? queryOptions : this.getQueryOptions();
- // runCommand uses this impl to actually execute the command
- DB.prototype._runCommandImpl = function(name, obj, options) {
+ {
const session = this.getSession();
- return session._getSessionAwareClient().runCommand(session, name, obj, options);
- };
-
- DB.prototype.runCommand = function(obj, extra, queryOptions) {
- "use strict";
- // Support users who call this function with a string commandName, e.g.
- // db.runCommand("commandName", {arg1: "value", arg2: "value"}).
- var mergedObj = this._mergeCommandOptions(obj, extra);
+ const readPreference = session._getSessionAwareClient().getReadPreference(session);
+ if (readPreference !== null) {
+ obj = this._attachReadPreferenceToCommand(obj, readPreference);
- // if options were passed (i.e. because they were overridden on a collection), use them.
- // Otherwise use getQueryOptions.
- var options =
- (typeof(queryOptions) !== "undefined") ? queryOptions : this.getQueryOptions();
-
- try {
- return this._runCommandImpl(this._name, mergedObj, options);
- } catch (ex) {
- // When runCommand flowed through query, a connection error resulted in the message
- // "error doing query: failed". Even though this message is arguably incorrect
- // for a command failing due to a connection failure, we preserve it for backwards
- // compatibility. See SERVER-18334 for details.
- if (ex.message.indexOf("network error") >= 0) {
- throw new Error("error doing query: failed: " + ex.message);
+ if (readPreference.mode !== "primary") {
+ // Set slaveOk if readPrefMode has been explicitly set with a readPreference
+ // other than primary.
+ queryOptions |= 4;
}
- throw ex;
}
- };
-
- DB.prototype.runCommandWithMetadata = function(commandArgs, metadata) {
- const session = this.getSession();
- return session._getSessionAwareClient().runCommandWithMetadata(
- session, this._name, metadata, commandArgs);
- };
+ }
- DB.prototype._dbCommand = DB.prototype.runCommand;
- DB.prototype._dbReadCommand = DB.prototype.runReadCommand;
+ // The 'extra' parameter is not used as we have already created a merged command object.
+ return this.runCommand(obj, null, queryOptions);
+};
+
+// runCommand uses this impl to actually execute the command
+DB.prototype._runCommandImpl = function(name, obj, options) {
+ const session = this.getSession();
+ return session._getSessionAwareClient().runCommand(session, name, obj, options);
+};
+
+DB.prototype.runCommand = function(obj, extra, queryOptions) {
+ "use strict";
+
+ // Support users who call this function with a string commandName, e.g.
+ // db.runCommand("commandName", {arg1: "value", arg2: "value"}).
+ var mergedObj = this._mergeCommandOptions(obj, extra);
+
+ // if options were passed (i.e. because they were overridden on a collection), use them.
+ // Otherwise use getQueryOptions.
+ var options = (typeof (queryOptions) !== "undefined") ? queryOptions : this.getQueryOptions();
+
+ try {
+ return this._runCommandImpl(this._name, mergedObj, options);
+ } catch (ex) {
+ // When runCommand flowed through query, a connection error resulted in the message
+ // "error doing query: failed". Even though this message is arguably incorrect
+ // for a command failing due to a connection failure, we preserve it for backwards
+ // compatibility. See SERVER-18334 for details.
+ if (ex.message.indexOf("network error") >= 0) {
+ throw new Error("error doing query: failed: " + ex.message);
+ }
+ throw ex;
+ }
+};
- DB.prototype.adminCommand = function(obj, extra) {
- if (this._name == "admin")
- return this.runCommand(obj, extra);
- return this.getSiblingDB("admin").runCommand(obj, extra);
- };
+DB.prototype.runCommandWithMetadata = function(commandArgs, metadata) {
+ const session = this.getSession();
+ return session._getSessionAwareClient().runCommandWithMetadata(
+ session, this._name, metadata, commandArgs);
+};
- DB.prototype._adminCommand = DB.prototype.adminCommand; // alias old name
+DB.prototype._dbCommand = DB.prototype.runCommand;
+DB.prototype._dbReadCommand = DB.prototype.runReadCommand;
- DB.prototype._runAggregate = function(cmdObj, aggregateOptions) {
- assert(cmdObj.pipeline instanceof Array, "cmdObj must contain a 'pipeline' array");
- assert(cmdObj.aggregate !== undefined, "cmdObj must contain 'aggregate' field");
- assert(aggregateOptions === undefined || aggregateOptions instanceof Object,
- "'aggregateOptions' argument must be an object");
+DB.prototype.adminCommand = function(obj, extra) {
+ if (this._name == "admin")
+ return this.runCommand(obj, extra);
+ return this.getSiblingDB("admin").runCommand(obj, extra);
+};
- // Make a copy of the initial command object, i.e. {aggregate: x, pipeline: [...]}.
- cmdObj = Object.extend({}, cmdObj);
+DB.prototype._adminCommand = DB.prototype.adminCommand; // alias old name
- // Make a copy of the aggregation options.
- let optcpy = Object.extend({}, (aggregateOptions || {}));
+DB.prototype._runAggregate = function(cmdObj, aggregateOptions) {
+ assert(cmdObj.pipeline instanceof Array, "cmdObj must contain a 'pipeline' array");
+ assert(cmdObj.aggregate !== undefined, "cmdObj must contain 'aggregate' field");
+ assert(aggregateOptions === undefined || aggregateOptions instanceof Object,
+ "'aggregateOptions' argument must be an object");
- if ('batchSize' in optcpy) {
- if (optcpy.cursor == null) {
- optcpy.cursor = {};
- }
+ // Make a copy of the initial command object, i.e. {aggregate: x, pipeline: [...]}.
+ cmdObj = Object.extend({}, cmdObj);
- optcpy.cursor.batchSize = optcpy['batchSize'];
- delete optcpy['batchSize'];
- } else if ('useCursor' in optcpy) {
- if (optcpy.cursor == null) {
- optcpy.cursor = {};
- }
+ // Make a copy of the aggregation options.
+ let optcpy = Object.extend({}, (aggregateOptions || {}));
- delete optcpy['useCursor'];
+ if ('batchSize' in optcpy) {
+ if (optcpy.cursor == null) {
+ optcpy.cursor = {};
}
- const maxAwaitTimeMS = optcpy.maxAwaitTimeMS;
- delete optcpy.maxAwaitTimeMS;
-
- // Reassign the cleaned-up options.
- aggregateOptions = optcpy;
-
- // Add the options to the command object.
- Object.extend(cmdObj, aggregateOptions);
-
- if (!('cursor' in cmdObj)) {
- cmdObj.cursor = {};
+ optcpy.cursor.batchSize = optcpy['batchSize'];
+ delete optcpy['batchSize'];
+ } else if ('useCursor' in optcpy) {
+ if (optcpy.cursor == null) {
+ optcpy.cursor = {};
}
- const pipeline = cmdObj.pipeline;
-
- // Check whether the pipeline has a stage which performs writes like $out. If not, we may
- // run on a Secondary and should attach a readPreference.
- const hasWritingStage = (function() {
- if (pipeline.length == 0) {
- return false;
- }
- const lastStage = pipeline[pipeline.length - 1];
- return lastStage.hasOwnProperty("$out") || lastStage.hasOwnProperty("$merge");
- }());
-
- const doAgg = function(cmdObj) {
- return hasWritingStage ? this.runCommand(cmdObj) : this.runReadCommand(cmdObj);
- }.bind(this);
-
- const res = doAgg(cmdObj);
-
- if (!res.ok && (res.code == 17020 || res.errmsg == "unrecognized field \"cursor") &&
- !("cursor" in aggregateOptions)) {
- // If the command failed because cursors aren't supported and the user didn't explicitly
- // request a cursor, try again without requesting a cursor.
- delete cmdObj.cursor;
+ delete optcpy['useCursor'];
+ }
- res = doAgg(cmdObj);
+ const maxAwaitTimeMS = optcpy.maxAwaitTimeMS;
+ delete optcpy.maxAwaitTimeMS;
- if ('result' in res && !("cursor" in res)) {
- // convert old-style output to cursor-style output
- res.cursor = {ns: '', id: NumberLong(0)};
- res.cursor.firstBatch = res.result;
- delete res.result;
- }
- }
+ // Reassign the cleaned-up options.
+ aggregateOptions = optcpy;
- assert.commandWorked(res, "aggregate failed");
+ // Add the options to the command object.
+ Object.extend(cmdObj, aggregateOptions);
- if ("cursor" in res) {
- let batchSizeValue = undefined;
+ if (!('cursor' in cmdObj)) {
+ cmdObj.cursor = {};
+ }
- if (cmdObj["cursor"]["batchSize"] > 0) {
- batchSizeValue = cmdObj["cursor"]["batchSize"];
- }
+ const pipeline = cmdObj.pipeline;
- return new DBCommandCursor(this, res, batchSizeValue, maxAwaitTimeMS);
+ // Check whether the pipeline has a stage which performs writes like $out. If not, we may
+ // run on a Secondary and should attach a readPreference.
+ const hasWritingStage = (function() {
+ if (pipeline.length == 0) {
+ return false;
}
+ const lastStage = pipeline[pipeline.length - 1];
+ return lastStage.hasOwnProperty("$out") || lastStage.hasOwnProperty("$merge");
+ }());
- return res;
- };
+ const doAgg = function(cmdObj) {
+ return hasWritingStage ? this.runCommand(cmdObj) : this.runReadCommand(cmdObj);
+ }.bind(this);
- DB.prototype.aggregate = function(pipeline, aggregateOptions) {
- assert(pipeline instanceof Array, "pipeline argument must be an array");
- const cmdObj = this._mergeCommandOptions("aggregate", {pipeline: pipeline});
+ const res = doAgg(cmdObj);
- return this._runAggregate(cmdObj, (aggregateOptions || {}));
- };
+ if (!res.ok && (res.code == 17020 || res.errmsg == "unrecognized field \"cursor") &&
+ !("cursor" in aggregateOptions)) {
+ // If the command failed because cursors aren't supported and the user didn't explicitly
+ // request a cursor, try again without requesting a cursor.
+ delete cmdObj.cursor;
- /**
- Create a new collection in the database. Normally, collection creation is automatic. You
- would
- use this function if you wish to specify special options on creation.
-
- If the collection already exists, no action occurs.
-
- <p>Options:</p>
- <ul>
- <li>
- size: desired initial extent size for the collection. Must be <= 1000000000.
- for fixed size (capped) collections, this size is the total/max size of the
- collection.
- </li>
- <li>
- capped: if true, this is a capped collection (where old data rolls out).
- </li>
- <li> max: maximum number of objects if capped (optional).</li>
- <li>
- storageEngine: BSON document containing storage engine specific options. Format:
- {
- storageEngine: {
- storageEngine1: {
- ...
- },
- storageEngine2: {
- ...
- },
- ...
- }
- }
- </li>
- </ul>
+ res = doAgg(cmdObj);
- <p>Example:</p>
- <code>db.createCollection("movies", { size: 10 * 1024 * 1024, capped:true } );</code>
-
- * @param {String} name Name of new collection to create
- * @param {Object} options Object with options for call. Options are listed above.
- * @return {Object} returned has member ok set to true if operation succeeds, false otherwise.
- */
- DB.prototype.createCollection = function(name, opt) {
- var options = opt || {};
-
- var cmd = {create: name};
- Object.extend(cmd, options);
-
- return this._dbCommand(cmd);
- };
-
- /**
- * Command to create a view based on the specified aggregation pipeline.
- * Usage: db.createView(name, viewOn, pipeline: [{ $operator: {...}}, ... ])
- *
- * @param name String - name of the new view to create
- * @param viewOn String - name of the backing view or collection
- * @param pipeline [{ $operator: {...}}, ... ] - the aggregation pipeline that defines the view
- * @param options { } - options on the view, e.g., collations
- */
- DB.prototype.createView = function(name, viewOn, pipeline, opt) {
- var options = opt || {};
-
- var cmd = {create: name};
-
- if (viewOn == undefined) {
- throw Error("Must specify a backing view or collection");
+ if ('result' in res && !("cursor" in res)) {
+ // convert old-style output to cursor-style output
+ res.cursor = {ns: '', id: NumberLong(0)};
+ res.cursor.firstBatch = res.result;
+ delete res.result;
}
+ }
- // Since we allow a single stage pipeline to be specified as an object
- // in aggregation, we need to account for that here for consistency.
- if (pipeline != undefined) {
- if (!Array.isArray(pipeline)) {
- pipeline = [pipeline];
- }
- }
- options.pipeline = pipeline;
- options.viewOn = viewOn;
-
- Object.extend(cmd, options);
+ assert.commandWorked(res, "aggregate failed");
- return this._dbCommand(cmd);
- };
+ if ("cursor" in res) {
+ let batchSizeValue = undefined;
- /**
- * @deprecated use getProfilingStatus
- * Returns the current profiling level of this database
- * @return SOMETHING_FIXME or null on error
- */
- DB.prototype.getProfilingLevel = function() {
- var res = assert.commandWorked(this._dbCommand({profile: -1}));
- return res ? res.was : null;
- };
+ if (cmdObj["cursor"]["batchSize"] > 0) {
+ batchSizeValue = cmdObj["cursor"]["batchSize"];
+ }
- /**
- * @return the current profiling status
- * example { was : 0, slowms : 100 }
- * @return SOMETHING_FIXME or null on error
- */
- DB.prototype.getProfilingStatus = function() {
- var res = this._dbCommand({profile: -1});
- if (!res.ok)
- throw _getErrorWithCode(res, "profile command failed: " + tojson(res));
- delete res.ok;
- return res;
- };
+ return new DBCommandCursor(this, res, batchSizeValue, maxAwaitTimeMS);
+ }
- /**
- * Erase the entire database.
- * @params writeConcern: (document) expresses the write concern of the drop command.
- * @return Object returned has member ok set to true if operation succeeds, false otherwise.
- */
- DB.prototype.dropDatabase = function(writeConcern) {
- return this._dbCommand(
- {dropDatabase: 1, writeConcern: writeConcern ? writeConcern : _defaultWriteConcern});
- };
+ return res;
+};
+
+DB.prototype.aggregate = function(pipeline, aggregateOptions) {
+ assert(pipeline instanceof Array, "pipeline argument must be an array");
+ const cmdObj = this._mergeCommandOptions("aggregate", {pipeline: pipeline});
+
+ return this._runAggregate(cmdObj, (aggregateOptions || {}));
+};
+
+/**
+ Create a new collection in the database. Normally, collection creation is automatic. You
+ would
+ use this function if you wish to specify special options on creation.
+
+ If the collection already exists, no action occurs.
+
+ <p>Options:</p>
+ <ul>
+ <li>
+ size: desired initial extent size for the collection. Must be <= 1000000000.
+ for fixed size (capped) collections, this size is the total/max size of the
+ collection.
+ </li>
+ <li>
+ capped: if true, this is a capped collection (where old data rolls out).
+ </li>
+ <li> max: maximum number of objects if capped (optional).</li>
+ <li>
+ storageEngine: BSON document containing storage engine specific options. Format:
+ {
+ storageEngine: {
+ storageEngine1: {
+ ...
+ },
+ storageEngine2: {
+ ...
+ },
+ ...
+ }
+ }
+ </li>
+ </ul>
+
+ <p>Example:</p>
+ <code>db.createCollection("movies", { size: 10 * 1024 * 1024, capped:true } );</code>
+
+ * @param {String} name Name of new collection to create
+ * @param {Object} options Object with options for call. Options are listed above.
+ * @return {Object} returned has member ok set to true if operation succeeds, false otherwise.
+*/
+DB.prototype.createCollection = function(name, opt) {
+ var options = opt || {};
+
+ var cmd = {create: name};
+ Object.extend(cmd, options);
+
+ return this._dbCommand(cmd);
+};
+
+/**
+ * Command to create a view based on the specified aggregation pipeline.
+ * Usage: db.createView(name, viewOn, pipeline: [{ $operator: {...}}, ... ])
+ *
+ * @param name String - name of the new view to create
+ * @param viewOn String - name of the backing view or collection
+ * @param pipeline [{ $operator: {...}}, ... ] - the aggregation pipeline that defines the view
+ * @param options { } - options on the view, e.g., collations
+ */
+DB.prototype.createView = function(name, viewOn, pipeline, opt) {
+ var options = opt || {};
+
+ var cmd = {create: name};
+
+ if (viewOn == undefined) {
+ throw Error("Must specify a backing view or collection");
+ }
- /**
- * Shuts down the database. Must be run while using the admin database.
- * @param opts Options for shutdown. Possible options are:
- * - force: (boolean) if the server should shut down, even if there is no
- * up-to-date slave
- * - timeoutSecs: (number) the server will continue checking over timeoutSecs
- * if any other servers have caught up enough for it to shut down.
- */
- DB.prototype.shutdownServer = function(opts) {
- if ("admin" != this._name) {
- return "shutdown command only works with the admin database; try 'use admin'";
+ // Since we allow a single stage pipeline to be specified as an object
+ // in aggregation, we need to account for that here for consistency.
+ if (pipeline != undefined) {
+ if (!Array.isArray(pipeline)) {
+ pipeline = [pipeline];
}
+ }
+ options.pipeline = pipeline;
+ options.viewOn = viewOn;
+
+ Object.extend(cmd, options);
+
+ return this._dbCommand(cmd);
+};
+
+/**
+ * @deprecated use getProfilingStatus
+ * Returns the current profiling level of this database
+ * @return SOMETHING_FIXME or null on error
+ */
+DB.prototype.getProfilingLevel = function() {
+ var res = assert.commandWorked(this._dbCommand({profile: -1}));
+ return res ? res.was : null;
+};
+
+/**
+ * @return the current profiling status
+ * example { was : 0, slowms : 100 }
+ * @return SOMETHING_FIXME or null on error
+ */
+DB.prototype.getProfilingStatus = function() {
+ var res = this._dbCommand({profile: -1});
+ if (!res.ok)
+ throw _getErrorWithCode(res, "profile command failed: " + tojson(res));
+ delete res.ok;
+ return res;
+};
+
+/**
+ * Erase the entire database.
+ * @params writeConcern: (document) expresses the write concern of the drop command.
+ * @return Object returned has member ok set to true if operation succeeds, false otherwise.
+ */
+DB.prototype.dropDatabase = function(writeConcern) {
+ return this._dbCommand(
+ {dropDatabase: 1, writeConcern: writeConcern ? writeConcern : _defaultWriteConcern});
+};
+
+/**
+ * Shuts down the database. Must be run while using the admin database.
+ * @param opts Options for shutdown. Possible options are:
+ * - force: (boolean) if the server should shut down, even if there is no
+ * up-to-date slave
+ * - timeoutSecs: (number) the server will continue checking over timeoutSecs
+ * if any other servers have caught up enough for it to shut down.
+ */
+DB.prototype.shutdownServer = function(opts) {
+ if ("admin" != this._name) {
+ return "shutdown command only works with the admin database; try 'use admin'";
+ }
- var cmd = {'shutdown': 1};
- opts = opts || {};
- for (var o in opts) {
- cmd[o] = opts[o];
- }
+ var cmd = {'shutdown': 1};
+ opts = opts || {};
+ for (var o in opts) {
+ cmd[o] = opts[o];
+ }
- try {
- var res = this.runCommand(cmd);
- if (!res.ok) {
- throw _getErrorWithCode(res, 'shutdownServer failed: ' + tojson(res));
- }
- throw Error('shutdownServer failed: server is still up.');
- } catch (e) {
- // we expect the command to not return a response, as the server will shut down
- // immediately.
- if (isNetworkError(e)) {
- print('server should be down...');
- return;
- }
- throw e;
+ try {
+ var res = this.runCommand(cmd);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, 'shutdownServer failed: ' + tojson(res));
+ }
+ throw Error('shutdownServer failed: server is still up.');
+ } catch (e) {
+ // we expect the command to not return a response, as the server will shut down
+ // immediately.
+ if (isNetworkError(e)) {
+ print('server should be down...');
+ return;
}
- };
-
- /**
- Clone database on another server to here. This functionality was removed as of MongoDB 4.2.
- The shell helper is kept to maintain compatibility with previous versions of MongoDB.
- <p>
- Generally, you should dropDatabase() first as otherwise the cloned information will MERGE
- into whatever data is already present in this database. (That is however a valid way to use
- clone if you are trying to do something intentionally, such as union three non-overlapping
- databases into one.)
- <p>
- This is a low level administrative function will is not typically used.
-
- * @param {String} from Where to clone from (dbhostname[:port]). May not be this database
- (self) as you cannot clone to yourself.
- * @return Object returned has member ok set to true if operation succeeds, false otherwise.
- * See also: db.copyDatabase()
- */
- DB.prototype.cloneDatabase = function(from) {
- print(
- "WARNING: db.cloneDatabase will only function with MongoDB 4.0 and below. See http://dochub.mongodb.org/core/4.2-copydb-clone");
- assert(isString(from) && from.length);
- return this._dbCommand({clone: from});
- };
+ throw e;
+ }
+};
+
+/**
+ Clone database on another server to here. This functionality was removed as of MongoDB 4.2.
+ The shell helper is kept to maintain compatibility with previous versions of MongoDB.
+ <p>
+ Generally, you should dropDatabase() first as otherwise the cloned information will MERGE
+ into whatever data is already present in this database. (That is however a valid way to use
+ clone if you are trying to do something intentionally, such as union three non-overlapping
+ databases into one.)
+ <p>
+ This is a low level administrative function will is not typically used.
+
+ * @param {String} from Where to clone from (dbhostname[:port]). May not be this database
+ (self) as you cannot clone to yourself.
+ * @return Object returned has member ok set to true if operation succeeds, false otherwise.
+ * See also: db.copyDatabase()
+ */
+DB.prototype.cloneDatabase = function(from) {
+ print(
+ "WARNING: db.cloneDatabase will only function with MongoDB 4.0 and below. See http://dochub.mongodb.org/core/4.2-copydb-clone");
+ assert(isString(from) && from.length);
+ return this._dbCommand({clone: from});
+};
+
+/**
+ Clone collection on another server to here.
+ <p>
+ Generally, you should drop() first as otherwise the cloned information will MERGE
+ into whatever data is already present in this collection. (That is however a valid way to use
+ clone if you are trying to do something intentionally, such as union three non-overlapping
+ collections into one.)
+ <p>
+ This is a low level administrative function is not typically used.
+
+ * @param {String} from mongod instance from which to clnoe (dbhostname:port). May
+ not be this mongod instance, as clone from self is not allowed.
+ * @param {String} collection name of collection to clone.
+ * @param {Object} query query specifying which elements of collection are to be cloned.
+ * @return Object returned has member ok set to true if operation succeeds, false otherwise.
+ * See also: db.cloneDatabase()
+ */
+DB.prototype.cloneCollection = function(from, collection, query) {
+ print(
+ "WARNING: db.cloneCollection is deprecated. See http://dochub.mongodb.org/core/clonecollection-deprecation");
+ assert(isString(from) && from.length);
+ assert(isString(collection) && collection.length);
+ collection = this._name + "." + collection;
+ query = query || {};
+ return this._dbCommand({cloneCollection: collection, from: from, query: query});
+};
+
+/**
+ Copy database from one server or name to another server or name. This functionality was
+ removed as of MongoDB 4.2. The shell helper is kept to maintain compatibility with previous
+ versions of MongoDB.
+
+ Generally, you should dropDatabase() first as otherwise the copied information will MERGE
+ into whatever data is already present in this database (and you will get duplicate objects
+ in collections potentially.)
+
+ For security reasons this function only works when executed on the "admin" db. However,
+ if you have access to said db, you can copy any database from one place to another.
+
+ This method provides a way to "rename" a database by copying it to a new db name and
+ location. Additionally, it effectively provides a repair facility.
+
+ * @param {String} fromdb database name from which to copy.
+ * @param {String} todb database name to copy to.
+ * @param {String} fromhost hostname of the database (and optionally, ":port") from which to
+ copy the data. default if unspecified is to copy from self.
+ * @return Object returned has member ok set to true if operation succeeds, false otherwise.
+ * See also: db.clone()
+*/
+DB.prototype.copyDatabase = function(
+ fromdb, todb, fromhost, username, password, mechanism, slaveOk) {
+ print(
+ "WARNING: db.copyDatabase will only function with MongoDB 4.0 and below. See http://dochub.mongodb.org/core/4.2-copydb-clone");
+ assert(isString(fromdb) && fromdb.length);
+ assert(isString(todb) && todb.length);
+ fromhost = fromhost || "";
+ if ((typeof username === "boolean") && (typeof password === "undefined") &&
+ (typeof mechanism === "undefined") && (typeof slaveOk === "undefined")) {
+ slaveOk = username;
+ username = undefined;
+ }
+ if (typeof slaveOk !== "boolean") {
+ slaveOk = false;
+ }
- /**
- Clone collection on another server to here.
- <p>
- Generally, you should drop() first as otherwise the cloned information will MERGE
- into whatever data is already present in this collection. (That is however a valid way to use
- clone if you are trying to do something intentionally, such as union three non-overlapping
- collections into one.)
- <p>
- This is a low level administrative function is not typically used.
-
- * @param {String} from mongod instance from which to clnoe (dbhostname:port). May
- not be this mongod instance, as clone from self is not allowed.
- * @param {String} collection name of collection to clone.
- * @param {Object} query query specifying which elements of collection are to be cloned.
- * @return Object returned has member ok set to true if operation succeeds, false otherwise.
- * See also: db.cloneDatabase()
- */
- DB.prototype.cloneCollection = function(from, collection, query) {
- print(
- "WARNING: db.cloneCollection is deprecated. See http://dochub.mongodb.org/core/clonecollection-deprecation");
- assert(isString(from) && from.length);
- assert(isString(collection) && collection.length);
- collection = this._name + "." + collection;
- query = query || {};
- return this._dbCommand({cloneCollection: collection, from: from, query: query});
- };
+ if (!mechanism) {
+ mechanism = this._getDefaultAuthenticationMechanism(username, fromdb);
+ }
+ assert(mechanism == "SCRAM-SHA-1" || mechanism == "SCRAM-SHA-256" || mechanism == "MONGODB-CR");
- /**
- Copy database from one server or name to another server or name. This functionality was
- removed as of MongoDB 4.2. The shell helper is kept to maintain compatibility with previous
- versions of MongoDB.
-
- Generally, you should dropDatabase() first as otherwise the copied information will MERGE
- into whatever data is already present in this database (and you will get duplicate objects
- in collections potentially.)
-
- For security reasons this function only works when executed on the "admin" db. However,
- if you have access to said db, you can copy any database from one place to another.
-
- This method provides a way to "rename" a database by copying it to a new db name and
- location. Additionally, it effectively provides a repair facility.
-
- * @param {String} fromdb database name from which to copy.
- * @param {String} todb database name to copy to.
- * @param {String} fromhost hostname of the database (and optionally, ":port") from which to
- copy the data. default if unspecified is to copy from self.
- * @return Object returned has member ok set to true if operation succeeds, false otherwise.
- * See also: db.clone()
- */
- DB.prototype.copyDatabase = function(
- fromdb, todb, fromhost, username, password, mechanism, slaveOk) {
- print(
- "WARNING: db.copyDatabase will only function with MongoDB 4.0 and below. See http://dochub.mongodb.org/core/4.2-copydb-clone");
- assert(isString(fromdb) && fromdb.length);
- assert(isString(todb) && todb.length);
- fromhost = fromhost || "";
- if ((typeof username === "boolean") && (typeof password === "undefined") &&
- (typeof mechanism === "undefined") && (typeof slaveOk === "undefined")) {
- slaveOk = username;
- username = undefined;
- }
- if (typeof slaveOk !== "boolean") {
- slaveOk = false;
- }
+ // Check for no auth or copying from localhost
+ if (!username || !password || fromhost == "") {
+ return this._adminCommand(
+ {copydb: 1, fromhost: fromhost, fromdb: fromdb, todb: todb, slaveOk: slaveOk});
+ }
- if (!mechanism) {
- mechanism = this._getDefaultAuthenticationMechanism(username, fromdb);
- }
- assert(mechanism == "SCRAM-SHA-1" || mechanism == "SCRAM-SHA-256" ||
- mechanism == "MONGODB-CR");
+ // Use the copyDatabase native helper for SCRAM-SHA-1/256
+ if (mechanism != "MONGODB-CR") {
+ // TODO SERVER-30886: Add session support for Mongo.prototype.copyDatabaseWithSCRAM().
+ return this.getMongo().copyDatabaseWithSCRAM(
+ fromdb, todb, fromhost, username, password, slaveOk);
+ }
- // Check for no auth or copying from localhost
- if (!username || !password || fromhost == "") {
- return this._adminCommand(
- {copydb: 1, fromhost: fromhost, fromdb: fromdb, todb: todb, slaveOk: slaveOk});
+ // Fall back to MONGODB-CR
+ var n = assert.commandWorked(this._adminCommand({copydbgetnonce: 1, fromhost: fromhost}));
+ return this._adminCommand({
+ copydb: 1,
+ fromhost: fromhost,
+ fromdb: fromdb,
+ todb: todb,
+ username: username,
+ nonce: n.nonce,
+ key: this.__pwHash(n.nonce, username, password),
+ slaveOk: slaveOk,
+ });
+};
+
+DB.prototype.help = function() {
+ print("DB methods:");
+ print(
+ "\tdb.adminCommand(nameOrDocument) - switches to 'admin' db, and runs command [just calls db.runCommand(...)]");
+ print(
+ "\tdb.aggregate([pipeline], {options}) - performs a collectionless aggregation on this database; returns a cursor");
+ print("\tdb.auth(username, password)");
+ print("\tdb.cloneDatabase(fromhost) - will only function with MongoDB 4.0 and below");
+ print("\tdb.commandHelp(name) returns the help for the command");
+ print(
+ "\tdb.copyDatabase(fromdb, todb, fromhost) - will only function with MongoDB 4.0 and below");
+ print("\tdb.createCollection(name, {size: ..., capped: ..., max: ...})");
+ print("\tdb.createUser(userDocument)");
+ print("\tdb.createView(name, viewOn, [{$operator: {...}}, ...], {viewOptions})");
+ print("\tdb.currentOp() displays currently executing operations in the db");
+ print("\tdb.dropDatabase(writeConcern)");
+ print("\tdb.dropUser(username)");
+ print("\tdb.eval() - deprecated");
+ print("\tdb.fsyncLock() flush data to disk and lock server for backups");
+ print("\tdb.fsyncUnlock() unlocks server following a db.fsyncLock()");
+ print("\tdb.getCollection(cname) same as db['cname'] or db.cname");
+ print("\tdb.getCollectionInfos([filter]) - returns a list that contains the names and options" +
+ " of the db's collections");
+ print("\tdb.getCollectionNames()");
+ print("\tdb.getLastError() - just returns the err msg string");
+ print("\tdb.getLastErrorObj() - return full status object");
+ print("\tdb.getLogComponents()");
+ print("\tdb.getMongo() get the server connection object");
+ print("\tdb.getMongo().setSlaveOk() allow queries on a replication slave server");
+ print("\tdb.getName()");
+ print("\tdb.getProfilingLevel() - deprecated");
+ print("\tdb.getProfilingStatus() - returns if profiling is on and slow threshold");
+ print("\tdb.getReplicationInfo()");
+ print("\tdb.getSiblingDB(name) get the db at the same server as this one");
+ print(
+ "\tdb.getWriteConcern() - returns the write concern used for any operations on this db, inherited from server object if set");
+ print("\tdb.hostInfo() get details about the server's host");
+ print("\tdb.isMaster() check replica primary status");
+ print("\tdb.killOp(opid) kills the current operation in the db");
+ print("\tdb.listCommands() lists all the db commands");
+ print("\tdb.loadServerScripts() loads all the scripts in db.system.js");
+ print("\tdb.logout()");
+ print("\tdb.printCollectionStats()");
+ print("\tdb.printReplicationInfo()");
+ print("\tdb.printShardingStatus()");
+ print("\tdb.printSlaveReplicationInfo()");
+ print("\tdb.resetError()");
+ print(
+ "\tdb.runCommand(cmdObj) run a database command. if cmdObj is a string, turns it into {cmdObj: 1}");
+ print("\tdb.serverStatus()");
+ print("\tdb.setLogLevel(level,<component>)");
+ print("\tdb.setProfilingLevel(level,slowms) 0=off 1=slow 2=all");
+ print("\tdb.setVerboseShell(flag) display extra information in shell output");
+ print(
+ "\tdb.setWriteConcern(<write concern doc>) - sets the write concern for writes to the db");
+ print("\tdb.shutdownServer()");
+ print("\tdb.stats()");
+ print(
+ "\tdb.unsetWriteConcern(<write concern doc>) - unsets the write concern for writes to the db");
+ print("\tdb.version() current version of the server");
+ print("\tdb.watch() - opens a change stream cursor for a database to report on all " +
+ " changes to its non-system collections.");
+ return __magicNoPrint;
+};
+
+DB.prototype.printCollectionStats = function(scale) {
+ if (arguments.length > 1) {
+ print("printCollectionStats() has a single optional argument (scale)");
+ return;
+ }
+ if (typeof scale != 'undefined') {
+ if (typeof scale != 'number') {
+ print("scale has to be a number >= 1");
+ return;
}
-
- // Use the copyDatabase native helper for SCRAM-SHA-1/256
- if (mechanism != "MONGODB-CR") {
- // TODO SERVER-30886: Add session support for Mongo.prototype.copyDatabaseWithSCRAM().
- return this.getMongo().copyDatabaseWithSCRAM(
- fromdb, todb, fromhost, username, password, slaveOk);
+ if (scale < 1) {
+ print("scale has to be >= 1");
+ return;
}
+ }
+ var mydb = this;
+ this.getCollectionNames().forEach(function(z) {
+ print(z);
+ printjson(mydb.getCollection(z).stats(scale));
+ print("---");
+ });
+};
+
+/**
+ * Configures settings for capturing operations inside the system.profile collection and in the
+ * slow query log.
+ *
+ * The 'level' can be 0, 1, or 2:
+ * - 0 means that profiling is off and nothing will be written to system.profile.
+ * - 1 means that profiling is on for operations slower than the currently configured 'slowms'
+ * threshold (more on 'slowms' below).
+ * - 2 means that profiling is on for all operations, regardless of whether or not they are
+ * slower than 'slowms'.
+ *
+ * The 'options' parameter, if a number, is interpreted as the 'slowms' value to send to the
+ * server. 'slowms' determines the threshold, in milliseconds, above which slow operations get
+ * profiled at profiling level 1 or logged at logLevel 0.
+ *
+ * If 'options' is not a number, it is expected to be an object containing additional parameters
+ * to get passed to the server. For example, db.setProfilingLevel(2, {foo: "bar"}) will issue
+ * the command {profile: 2, foo: "bar"} to the server.
+ */
+DB.prototype.setProfilingLevel = function(level, options) {
+ if (level < 0 || level > 2) {
+ var errorText = "input level " + level + " is out of range [0..2]";
+ var errorObject = new Error(errorText);
+ errorObject['dbSetProfilingException'] = errorText;
+ throw errorObject;
+ }
- // Fall back to MONGODB-CR
- var n = assert.commandWorked(this._adminCommand({copydbgetnonce: 1, fromhost: fromhost}));
- return this._adminCommand({
- copydb: 1,
- fromhost: fromhost,
- fromdb: fromdb,
- todb: todb,
- username: username,
- nonce: n.nonce,
- key: this.__pwHash(n.nonce, username, password),
- slaveOk: slaveOk,
- });
- };
-
- DB.prototype.help = function() {
- print("DB methods:");
- print(
- "\tdb.adminCommand(nameOrDocument) - switches to 'admin' db, and runs command [just calls db.runCommand(...)]");
- print(
- "\tdb.aggregate([pipeline], {options}) - performs a collectionless aggregation on this database; returns a cursor");
- print("\tdb.auth(username, password)");
- print("\tdb.cloneDatabase(fromhost) - will only function with MongoDB 4.0 and below");
- print("\tdb.commandHelp(name) returns the help for the command");
- print(
- "\tdb.copyDatabase(fromdb, todb, fromhost) - will only function with MongoDB 4.0 and below");
- print("\tdb.createCollection(name, {size: ..., capped: ..., max: ...})");
- print("\tdb.createUser(userDocument)");
- print("\tdb.createView(name, viewOn, [{$operator: {...}}, ...], {viewOptions})");
- print("\tdb.currentOp() displays currently executing operations in the db");
- print("\tdb.dropDatabase(writeConcern)");
- print("\tdb.dropUser(username)");
- print("\tdb.eval() - deprecated");
- print("\tdb.fsyncLock() flush data to disk and lock server for backups");
- print("\tdb.fsyncUnlock() unlocks server following a db.fsyncLock()");
- print("\tdb.getCollection(cname) same as db['cname'] or db.cname");
- print(
- "\tdb.getCollectionInfos([filter]) - returns a list that contains the names and options" +
- " of the db's collections");
- print("\tdb.getCollectionNames()");
- print("\tdb.getLastError() - just returns the err msg string");
- print("\tdb.getLastErrorObj() - return full status object");
- print("\tdb.getLogComponents()");
- print("\tdb.getMongo() get the server connection object");
- print("\tdb.getMongo().setSlaveOk() allow queries on a replication slave server");
- print("\tdb.getName()");
- print("\tdb.getProfilingLevel() - deprecated");
- print("\tdb.getProfilingStatus() - returns if profiling is on and slow threshold");
- print("\tdb.getReplicationInfo()");
- print("\tdb.getSiblingDB(name) get the db at the same server as this one");
- print(
- "\tdb.getWriteConcern() - returns the write concern used for any operations on this db, inherited from server object if set");
- print("\tdb.hostInfo() get details about the server's host");
- print("\tdb.isMaster() check replica primary status");
- print("\tdb.killOp(opid) kills the current operation in the db");
- print("\tdb.listCommands() lists all the db commands");
- print("\tdb.loadServerScripts() loads all the scripts in db.system.js");
- print("\tdb.logout()");
- print("\tdb.printCollectionStats()");
- print("\tdb.printReplicationInfo()");
- print("\tdb.printShardingStatus()");
- print("\tdb.printSlaveReplicationInfo()");
- print("\tdb.resetError()");
- print(
- "\tdb.runCommand(cmdObj) run a database command. if cmdObj is a string, turns it into {cmdObj: 1}");
- print("\tdb.serverStatus()");
- print("\tdb.setLogLevel(level,<component>)");
- print("\tdb.setProfilingLevel(level,slowms) 0=off 1=slow 2=all");
- print("\tdb.setVerboseShell(flag) display extra information in shell output");
- print(
- "\tdb.setWriteConcern(<write concern doc>) - sets the write concern for writes to the db");
- print("\tdb.shutdownServer()");
- print("\tdb.stats()");
- print(
- "\tdb.unsetWriteConcern(<write concern doc>) - unsets the write concern for writes to the db");
- print("\tdb.version() current version of the server");
- print("\tdb.watch() - opens a change stream cursor for a database to report on all " +
- " changes to its non-system collections.");
- return __magicNoPrint;
- };
+ var cmd = {profile: level};
+ if (isNumber(options)) {
+ cmd.slowms = options;
+ } else {
+ cmd = Object.extend(cmd, options);
+ }
+ return assert.commandWorked(this._dbCommand(cmd));
+};
+
+/**
+ * @deprecated
+ * <p> Evaluate a js expression at the database server.</p>
+ *
+ * <p>Useful if you need to touch a lot of data lightly; in such a scenario
+ * the network transfer of the data could be a bottleneck. A good example
+ * is "select count(*)" -- can be done server side via this mechanism.
+ * </p>
+ *
+ * <p>
+ * If the eval fails, an exception is thrown of the form:
+ * </p>
+ * <code>{ dbEvalException: { retval: functionReturnValue, ok: num [, errno: num] [, errmsg:
+ *str] } }</code>
+ *
+ * <p>Example: </p>
+ * <code>print( "mycount: " + db.eval( function(){db.mycoll.find({},{_id:ObjId()}).length();}
+ *);</code>
+ *
+ * @param {Function} jsfunction Javascript function to run on server. Note this it not a
+ *closure, but rather just "code".
+ * @return result of your function, or null if error
+ *
+ */
+DB.prototype.eval = function(jsfunction) {
+ print("WARNING: db.eval is deprecated");
+
+ var cmd = {$eval: jsfunction};
+ if (arguments.length > 1) {
+ cmd.args = Array.from(arguments).slice(1);
+ }
- DB.prototype.printCollectionStats = function(scale) {
- if (arguments.length > 1) {
- print("printCollectionStats() has a single optional argument (scale)");
- return;
- }
- if (typeof scale != 'undefined') {
- if (typeof scale != 'number') {
- print("scale has to be a number >= 1");
- return;
- }
- if (scale < 1) {
- print("scale has to be >= 1");
- return;
+ var res = this._dbCommand(cmd);
+
+ if (!res.ok)
+ throw _getErrorWithCode(res, tojson(res));
+
+ return res.retval;
+};
+
+DB.prototype.dbEval = DB.prototype.eval;
+
+/**
+ * <p>
+ * An array of grouped items is returned. The array must fit in RAM, thus this function is not
+ * suitable when the return set is extremely large.
+ * </p>
+ * <p>
+ * To order the grouped data, simply sort it client side upon return.
+ * <p>
+ Defaults
+ cond may be null if you want to run against all rows in the collection
+ keyf is a function which takes an object and returns the desired key. set either key or
+ keyf (not both).
+ * </p>
+ */
+DB.prototype.groupeval = function(parmsObj) {
+ var groupFunction = function() {
+ var parms = args[0];
+ var c = db[parms.ns].find(parms.cond || {});
+ var map = new Map();
+ var pks = parms.key ? Object.keySet(parms.key) : null;
+ var pkl = pks ? pks.length : 0;
+ var key = {};
+
+ while (c.hasNext()) {
+ var obj = c.next();
+ if (pks) {
+ for (var i = 0; i < pkl; i++) {
+ var k = pks[i];
+ key[k] = obj[k];
+ }
+ } else {
+ key = parms.$keyf(obj);
}
- }
- var mydb = this;
- this.getCollectionNames().forEach(function(z) {
- print(z);
- printjson(mydb.getCollection(z).stats(scale));
- print("---");
- });
- };
- /**
- * Configures settings for capturing operations inside the system.profile collection and in the
- * slow query log.
- *
- * The 'level' can be 0, 1, or 2:
- * - 0 means that profiling is off and nothing will be written to system.profile.
- * - 1 means that profiling is on for operations slower than the currently configured 'slowms'
- * threshold (more on 'slowms' below).
- * - 2 means that profiling is on for all operations, regardless of whether or not they are
- * slower than 'slowms'.
- *
- * The 'options' parameter, if a number, is interpreted as the 'slowms' value to send to the
- * server. 'slowms' determines the threshold, in milliseconds, above which slow operations get
- * profiled at profiling level 1 or logged at logLevel 0.
- *
- * If 'options' is not a number, it is expected to be an object containing additional parameters
- * to get passed to the server. For example, db.setProfilingLevel(2, {foo: "bar"}) will issue
- * the command {profile: 2, foo: "bar"} to the server.
- */
- DB.prototype.setProfilingLevel = function(level, options) {
- if (level < 0 || level > 2) {
- var errorText = "input level " + level + " is out of range [0..2]";
- var errorObject = new Error(errorText);
- errorObject['dbSetProfilingException'] = errorText;
- throw errorObject;
+ var aggObj = map.get(key);
+ if (aggObj == null) {
+ var newObj = Object.extend({}, key); // clone
+ aggObj = Object.extend(newObj, parms.initial);
+ map.put(key, aggObj);
+ }
+ parms.$reduce(obj, aggObj);
}
- var cmd = {profile: level};
- if (isNumber(options)) {
- cmd.slowms = options;
- } else {
- cmd = Object.extend(cmd, options);
- }
- return assert.commandWorked(this._dbCommand(cmd));
+ return map.values();
};
- /**
- * @deprecated
- * <p> Evaluate a js expression at the database server.</p>
- *
- * <p>Useful if you need to touch a lot of data lightly; in such a scenario
- * the network transfer of the data could be a bottleneck. A good example
- * is "select count(*)" -- can be done server side via this mechanism.
- * </p>
- *
- * <p>
- * If the eval fails, an exception is thrown of the form:
- * </p>
- * <code>{ dbEvalException: { retval: functionReturnValue, ok: num [, errno: num] [, errmsg:
- *str] } }</code>
- *
- * <p>Example: </p>
- * <code>print( "mycount: " + db.eval( function(){db.mycoll.find({},{_id:ObjId()}).length();}
- *);</code>
- *
- * @param {Function} jsfunction Javascript function to run on server. Note this it not a
- *closure, but rather just "code".
- * @return result of your function, or null if error
- *
- */
- DB.prototype.eval = function(jsfunction) {
- print("WARNING: db.eval is deprecated");
-
- var cmd = {$eval: jsfunction};
- if (arguments.length > 1) {
- cmd.args = Array.from(arguments).slice(1);
- }
+ return this.eval(groupFunction, this._groupFixParms(parmsObj));
+};
- var res = this._dbCommand(cmd);
+DB.prototype._groupFixParms = function(parmsObj) {
+ var parms = Object.extend({}, parmsObj);
- if (!res.ok)
- throw _getErrorWithCode(res, tojson(res));
+ if (parms.reduce) {
+ parms.$reduce = parms.reduce; // must have $ to pass to db
+ delete parms.reduce;
+ }
- return res.retval;
- };
+ if (parms.keyf) {
+ parms.$keyf = parms.keyf;
+ delete parms.keyf;
+ }
- DB.prototype.dbEval = DB.prototype.eval;
-
- /**
- * <p>
- * An array of grouped items is returned. The array must fit in RAM, thus this function is not
- * suitable when the return set is extremely large.
- * </p>
- * <p>
- * To order the grouped data, simply sort it client side upon return.
- * <p>
- Defaults
- cond may be null if you want to run against all rows in the collection
- keyf is a function which takes an object and returns the desired key. set either key or
- keyf (not both).
- * </p>
- */
- DB.prototype.groupeval = function(parmsObj) {
-
- var groupFunction = function() {
- var parms = args[0];
- var c = db[parms.ns].find(parms.cond || {});
- var map = new Map();
- var pks = parms.key ? Object.keySet(parms.key) : null;
- var pkl = pks ? pks.length : 0;
- var key = {};
-
- while (c.hasNext()) {
- var obj = c.next();
- if (pks) {
- for (var i = 0; i < pkl; i++) {
- var k = pks[i];
- key[k] = obj[k];
- }
- } else {
- key = parms.$keyf(obj);
- }
+ return parms;
+};
+
+DB.prototype.resetError = function() {
+ return this.runCommand({reseterror: 1});
+};
+
+DB.prototype.forceError = function() {
+ return this.runCommand({forceerror: 1});
+};
+
+DB.prototype.getLastError = function(w, wtimeout) {
+ var res = this.getLastErrorObj(w, wtimeout);
+ if (!res.ok)
+ throw _getErrorWithCode(ret, "getlasterror failed: " + tojson(res));
+ return res.err;
+};
+DB.prototype.getLastErrorObj = function(w, wtimeout, j) {
+ var cmd = {getlasterror: 1};
+ if (w) {
+ cmd.w = w;
+ if (wtimeout)
+ cmd.wtimeout = wtimeout;
+ if (j != null)
+ cmd.j = j;
+ }
+ var res = this.runCommand(cmd);
+
+ if (!res.ok)
+ throw _getErrorWithCode(res, "getlasterror failed: " + tojson(res));
+ return res;
+};
+DB.prototype.getLastErrorCmd = DB.prototype.getLastErrorObj;
+
+DB.prototype._getCollectionInfosCommand = function(
+ filter, nameOnly = false, authorizedCollections = false, options = {}) {
+ filter = filter || {};
+ const cmd = {
+ listCollections: 1,
+ filter: filter,
+ nameOnly: nameOnly,
+ authorizedCollections: authorizedCollections
+ };
+
+ const res = this.runCommand(Object.merge(cmd, options));
+ if (!res.ok) {
+ throw _getErrorWithCode(res, "listCollections failed: " + tojson(res));
+ }
- var aggObj = map.get(key);
- if (aggObj == null) {
- var newObj = Object.extend({}, key); // clone
- aggObj = Object.extend(newObj, parms.initial);
- map.put(key, aggObj);
- }
- parms.$reduce(obj, aggObj);
- }
+ return new DBCommandCursor(this, res).toArray().sort(compareOn("name"));
+};
- return map.values();
- };
+DB.prototype._getCollectionInfosFromPrivileges = function() {
+ let ret = this.runCommand({connectionStatus: 1, showPrivileges: 1});
+ if (!ret.ok) {
+ throw _getErrorWithCode(res, "Failed to acquire collection information from privileges");
+ }
- return this.eval(groupFunction, this._groupFixParms(parmsObj));
- };
+ // Parse apart collection information.
+ let result = [];
- DB.prototype._groupFixParms = function(parmsObj) {
- var parms = Object.extend({}, parmsObj);
+ let privileges = ret.authInfo.authenticatedUserPrivileges;
+ if (privileges === undefined) {
+ return result;
+ }
- if (parms.reduce) {
- parms.$reduce = parms.reduce; // must have $ to pass to db
- delete parms.reduce;
+ privileges.forEach(privilege => {
+ let resource = privilege.resource;
+ if (resource === undefined) {
+ return;
}
-
- if (parms.keyf) {
- parms.$keyf = parms.keyf;
- delete parms.keyf;
+ let db = resource.db;
+ if (db === undefined || db !== this.getName()) {
+ return;
}
-
- return parms;
- };
-
- DB.prototype.resetError = function() {
- return this.runCommand({reseterror: 1});
- };
-
- DB.prototype.forceError = function() {
- return this.runCommand({forceerror: 1});
- };
-
- DB.prototype.getLastError = function(w, wtimeout) {
- var res = this.getLastErrorObj(w, wtimeout);
- if (!res.ok)
- throw _getErrorWithCode(ret, "getlasterror failed: " + tojson(res));
- return res.err;
- };
- DB.prototype.getLastErrorObj = function(w, wtimeout, j) {
- var cmd = {getlasterror: 1};
- if (w) {
- cmd.w = w;
- if (wtimeout)
- cmd.wtimeout = wtimeout;
- if (j != null)
- cmd.j = j;
+ let collection = resource.collection;
+ if (collection === undefined || typeof collection !== "string" || collection === "") {
+ return;
}
- var res = this.runCommand(cmd);
- if (!res.ok)
- throw _getErrorWithCode(res, "getlasterror failed: " + tojson(res));
- return res;
- };
- DB.prototype.getLastErrorCmd = DB.prototype.getLastErrorObj;
-
- DB.prototype._getCollectionInfosCommand = function(
- filter, nameOnly = false, authorizedCollections = false, options = {}) {
- filter = filter || {};
- const cmd = {
- listCollections: 1,
- filter: filter,
- nameOnly: nameOnly,
- authorizedCollections: authorizedCollections
- };
-
- const res = this.runCommand(Object.merge(cmd, options));
- if (!res.ok) {
- throw _getErrorWithCode(res, "listCollections failed: " + tojson(res));
+ result.push({name: collection});
+ });
+
+ return result.sort(compareOn("name"));
+};
+
+/**
+ * Returns a list that contains the names and options of this database's collections, sorted
+ * by collection name. An optional filter can be specified to match only collections with
+ * certain metadata.
+ */
+DB.prototype.getCollectionInfos = function(
+ filter, nameOnly = false, authorizedCollections = false) {
+ try {
+ return this._getCollectionInfosCommand(filter, nameOnly, authorizedCollections);
+ } catch (ex) {
+ if (ex.code !== ErrorCodes.Unauthorized) {
+ // We cannot recover from this error, propagate it.
+ throw ex;
}
- return new DBCommandCursor(this, res).toArray().sort(compareOn("name"));
- };
+ // We may be able to compute a set of *some* collections which exist and we have access
+ // to from our privileges. For this to work, the previous operation must have failed due
+ // to authorization, we must be attempting to recover the names of our own collections,
+ // and no filter can have been provided.
- DB.prototype._getCollectionInfosFromPrivileges = function() {
- let ret = this.runCommand({connectionStatus: 1, showPrivileges: 1});
- if (!ret.ok) {
- throw _getErrorWithCode(res,
- "Failed to acquire collection information from privileges");
+ if (nameOnly && authorizedCollections && Object.getOwnPropertyNames(filter).length === 0 &&
+ ex.code === ErrorCodes.Unauthorized) {
+ print(
+ "Warning: unable to run listCollections, attempting to approximate collection names by parsing connectionStatus");
+ return this._getCollectionInfosFromPrivileges();
}
- // Parse apart collection information.
- let result = [];
+ throw ex;
+ }
+};
+
+DB.prototype._getCollectionNamesInternal = function(options) {
+ return this._getCollectionInfosCommand({}, true, true, options).map(function(infoObj) {
+ return infoObj.name;
+ });
+};
+
+/**
+ * Returns this database's list of collection names in sorted order.
+ */
+DB.prototype.getCollectionNames = function() {
+ return this._getCollectionNamesInternal({});
+};
+
+DB.prototype.tojson = function() {
+ return this._name;
+};
+
+DB.prototype.toString = function() {
+ return this._name;
+};
+
+DB.prototype.isMaster = function() {
+ return this.runCommand("isMaster");
+};
+
+var commandUnsupported = function(res) {
+ return (!res.ok &&
+ (res.errmsg.startsWith("no such cmd") || res.errmsg.startsWith("no such command") ||
+ res.code === 59 /* CommandNotFound */));
+};
+
+DB.prototype.currentOp = function(arg) {
+ var q = {};
+ if (arg) {
+ if (typeof (arg) == "object")
+ Object.extend(q, arg);
+ else if (arg)
+ q["$all"] = true;
+ }
- let privileges = ret.authInfo.authenticatedUserPrivileges;
- if (privileges === undefined) {
- return result;
+ var commandObj = {"currentOp": 1};
+ Object.extend(commandObj, q);
+ var res = this.adminCommand(commandObj);
+ if (commandUnsupported(res)) {
+ // always send legacy currentOp with default (null) read preference (SERVER-17951)
+ const session = this.getSession();
+ const readPreference = session.getOptions().getReadPreference();
+ try {
+ session.getOptions().setReadPreference(null);
+ res = this.getSiblingDB("admin").$cmd.sys.inprog.findOne(q);
+ } finally {
+ session.getOptions().setReadPreference(readPreference);
}
-
- privileges.forEach(privilege => {
- let resource = privilege.resource;
- if (resource === undefined) {
- return;
- }
- let db = resource.db;
- if (db === undefined || db !== this.getName()) {
- return;
- }
- let collection = resource.collection;
- if (collection === undefined || typeof collection !== "string" || collection === "") {
- return;
- }
-
- result.push({name: collection});
- });
-
- return result.sort(compareOn("name"));
- };
-
- /**
- * Returns a list that contains the names and options of this database's collections, sorted
- * by collection name. An optional filter can be specified to match only collections with
- * certain metadata.
- */
- DB.prototype.getCollectionInfos = function(
- filter, nameOnly = false, authorizedCollections = false) {
+ }
+ return res;
+};
+DB.prototype.currentOP = DB.prototype.currentOp;
+
+DB.prototype.killOp = function(op) {
+ if (!op)
+ throw Error("no opNum to kill specified");
+ var res = this.adminCommand({'killOp': 1, 'op': op});
+ if (commandUnsupported(res)) {
+ // fall back for old servers
+ const session = this.getSession();
+ const readPreference = session.getOptions().getReadPreference();
try {
- return this._getCollectionInfosCommand(filter, nameOnly, authorizedCollections);
- } catch (ex) {
- if (ex.code !== ErrorCodes.Unauthorized) {
- // We cannot recover from this error, propagate it.
- throw ex;
- }
-
- // We may be able to compute a set of *some* collections which exist and we have access
- // to from our privileges. For this to work, the previous operation must have failed due
- // to authorization, we must be attempting to recover the names of our own collections,
- // and no filter can have been provided.
-
- if (nameOnly && authorizedCollections &&
- Object.getOwnPropertyNames(filter).length === 0 &&
- ex.code === ErrorCodes.Unauthorized) {
- print(
- "Warning: unable to run listCollections, attempting to approximate collection names by parsing connectionStatus");
- return this._getCollectionInfosFromPrivileges();
- }
-
- throw ex;
+ session.getOptions().setReadPreference(null);
+ res = this.getSiblingDB("admin").$cmd.sys.killop.findOne({'op': op});
+ } finally {
+ session.getOptions().setReadPreference(readPreference);
}
- };
-
- DB.prototype._getCollectionNamesInternal = function(options) {
- return this._getCollectionInfosCommand({}, true, true, options).map(function(infoObj) {
- return infoObj.name;
- });
- };
-
- /**
- * Returns this database's list of collection names in sorted order.
- */
- DB.prototype.getCollectionNames = function() {
- return this._getCollectionNamesInternal({});
- };
+ }
+ return res;
+};
+DB.prototype.killOP = DB.prototype.killOp;
+
+DB.tsToSeconds = function(x) {
+ if (x.t && x.i)
+ return x.t;
+ return x / 4294967296; // low 32 bits are ordinal #s within a second
+};
+
+/**
+ Get a replication log information summary.
+ <p>
+ This command is for the database/cloud administer and not applicable to most databases.
+ It is only used with the local database. One might invoke from the JS shell:
+ <pre>
+ use local
+ db.getReplicationInfo();
+ </pre>
+ * @return Object timeSpan: time span of the oplog from start to end if slave is more out
+ * of date than that, it can't recover without a complete resync
+*/
+DB.prototype.getReplicationInfo = function() {
+ var localdb = this.getSiblingDB("local");
+
+ var result = {};
+ var oplog;
+ var localCollections = localdb.getCollectionNames();
+ if (localCollections.indexOf('oplog.rs') >= 0) {
+ oplog = 'oplog.rs';
+ } else {
+ result.errmsg = "replication not detected";
+ return result;
+ }
- DB.prototype.tojson = function() {
- return this._name;
- };
+ var ol = localdb.getCollection(oplog);
+ var ol_stats = ol.stats();
+ if (ol_stats && ol_stats.maxSize) {
+ result.logSizeMB = ol_stats.maxSize / (1024 * 1024);
+ } else {
+ result.errmsg = "Could not get stats for local." + oplog + " collection. " +
+ "collstats returned: " + tojson(ol_stats);
+ return result;
+ }
- DB.prototype.toString = function() {
- return this._name;
- };
+ result.usedMB = ol_stats.size / (1024 * 1024);
+ result.usedMB = Math.ceil(result.usedMB * 100) / 100;
- DB.prototype.isMaster = function() {
- return this.runCommand("isMaster");
- };
+ var firstc = ol.find().sort({$natural: 1}).limit(1);
+ var lastc = ol.find().sort({$natural: -1}).limit(1);
+ if (!firstc.hasNext() || !lastc.hasNext()) {
+ result.errmsg =
+ "objects not found in local.oplog.$main -- is this a new and empty db instance?";
+ result.oplogMainRowCount = ol.count();
+ return result;
+ }
- var commandUnsupported = function(res) {
- return (!res.ok &&
- (res.errmsg.startsWith("no such cmd") || res.errmsg.startsWith("no such command") ||
- res.code === 59 /* CommandNotFound */));
- };
+ var first = firstc.next();
+ var last = lastc.next();
+ var tfirst = first.ts;
+ var tlast = last.ts;
+
+ if (tfirst && tlast) {
+ tfirst = DB.tsToSeconds(tfirst);
+ tlast = DB.tsToSeconds(tlast);
+ result.timeDiff = tlast - tfirst;
+ result.timeDiffHours = Math.round(result.timeDiff / 36) / 100;
+ result.tFirst = (new Date(tfirst * 1000)).toString();
+ result.tLast = (new Date(tlast * 1000)).toString();
+ result.now = Date();
+ } else {
+ result.errmsg = "ts element not found in oplog objects";
+ }
- DB.prototype.currentOp = function(arg) {
- var q = {};
- if (arg) {
- if (typeof(arg) == "object")
- Object.extend(q, arg);
- else if (arg)
- q["$all"] = true;
- }
+ return result;
+};
- var commandObj = {"currentOp": 1};
- Object.extend(commandObj, q);
- var res = this.adminCommand(commandObj);
- if (commandUnsupported(res)) {
- // always send legacy currentOp with default (null) read preference (SERVER-17951)
- const session = this.getSession();
- const readPreference = session.getOptions().getReadPreference();
- try {
- session.getOptions().setReadPreference(null);
- res = this.getSiblingDB("admin").$cmd.sys.inprog.findOne(q);
- } finally {
- session.getOptions().setReadPreference(readPreference);
- }
- }
- return res;
- };
- DB.prototype.currentOP = DB.prototype.currentOp;
-
- DB.prototype.killOp = function(op) {
- if (!op)
- throw Error("no opNum to kill specified");
- var res = this.adminCommand({'killOp': 1, 'op': op});
- if (commandUnsupported(res)) {
- // fall back for old servers
- const session = this.getSession();
- const readPreference = session.getOptions().getReadPreference();
- try {
- session.getOptions().setReadPreference(null);
- res = this.getSiblingDB("admin").$cmd.sys.killop.findOne({'op': op});
- } finally {
- session.getOptions().setReadPreference(readPreference);
- }
+DB.prototype.printReplicationInfo = function() {
+ var result = this.getReplicationInfo();
+ if (result.errmsg) {
+ var isMaster = this.isMaster();
+ if (isMaster.arbiterOnly) {
+ print("cannot provide replication status from an arbiter.");
+ return;
+ } else if (!isMaster.ismaster) {
+ print("this is a slave, printing slave replication info.");
+ this.printSlaveReplicationInfo();
+ return;
}
- return res;
- };
- DB.prototype.killOP = DB.prototype.killOp;
-
- DB.tsToSeconds = function(x) {
- if (x.t && x.i)
- return x.t;
- return x / 4294967296; // low 32 bits are ordinal #s within a second
- };
-
- /**
- Get a replication log information summary.
- <p>
- This command is for the database/cloud administer and not applicable to most databases.
- It is only used with the local database. One might invoke from the JS shell:
- <pre>
- use local
- db.getReplicationInfo();
- </pre>
- * @return Object timeSpan: time span of the oplog from start to end if slave is more out
- * of date than that, it can't recover without a complete resync
- */
- DB.prototype.getReplicationInfo = function() {
- var localdb = this.getSiblingDB("local");
-
- var result = {};
- var oplog;
- var localCollections = localdb.getCollectionNames();
- if (localCollections.indexOf('oplog.rs') >= 0) {
- oplog = 'oplog.rs';
+ print(tojson(result));
+ return;
+ }
+ print("configured oplog size: " + result.logSizeMB + "MB");
+ print("log length start to end: " + result.timeDiff + "secs (" + result.timeDiffHours + "hrs)");
+ print("oplog first event time: " + result.tFirst);
+ print("oplog last event time: " + result.tLast);
+ print("now: " + result.now);
+};
+
+DB.prototype.printSlaveReplicationInfo = function() {
+ var startOptimeDate = null;
+ var primary = null;
+
+ function getReplLag(st) {
+ assert(startOptimeDate, "how could this be null (getReplLag startOptimeDate)");
+ print("\tsyncedTo: " + st.toString());
+ var ago = (startOptimeDate - st) / 1000;
+ var hrs = Math.round(ago / 36) / 100;
+ var suffix = "";
+ if (primary) {
+ suffix = "primary ";
} else {
- result.errmsg = "replication not detected";
- return result;
+ suffix = "freshest member (no primary available at the moment)";
}
+ print("\t" + Math.round(ago) + " secs (" + hrs + " hrs) behind the " + suffix);
+ }
- var ol = localdb.getCollection(oplog);
- var ol_stats = ol.stats();
- if (ol_stats && ol_stats.maxSize) {
- result.logSizeMB = ol_stats.maxSize / (1024 * 1024);
- } else {
- result.errmsg = "Could not get stats for local." + oplog + " collection. " +
- "collstats returned: " + tojson(ol_stats);
- return result;
+ function getMaster(members) {
+ for (i in members) {
+ var row = members[i];
+ if (row.state === 1) {
+ return row;
+ }
}
- result.usedMB = ol_stats.size / (1024 * 1024);
- result.usedMB = Math.ceil(result.usedMB * 100) / 100;
-
- var firstc = ol.find().sort({$natural: 1}).limit(1);
- var lastc = ol.find().sort({$natural: -1}).limit(1);
- if (!firstc.hasNext() || !lastc.hasNext()) {
- result.errmsg =
- "objects not found in local.oplog.$main -- is this a new and empty db instance?";
- result.oplogMainRowCount = ol.count();
- return result;
- }
+ return null;
+ }
- var first = firstc.next();
- var last = lastc.next();
- var tfirst = first.ts;
- var tlast = last.ts;
-
- if (tfirst && tlast) {
- tfirst = DB.tsToSeconds(tfirst);
- tlast = DB.tsToSeconds(tlast);
- result.timeDiff = tlast - tfirst;
- result.timeDiffHours = Math.round(result.timeDiff / 36) / 100;
- result.tFirst = (new Date(tfirst * 1000)).toString();
- result.tLast = (new Date(tlast * 1000)).toString();
- result.now = Date();
+ function g(x) {
+ assert(x, "how could this be null (printSlaveReplicationInfo gx)");
+ print("source: " + x.host);
+ if (x.syncedTo) {
+ var st = new Date(DB.tsToSeconds(x.syncedTo) * 1000);
+ getReplLag(st);
} else {
- result.errmsg = "ts element not found in oplog objects";
+ print("\tdoing initial sync");
}
+ }
- return result;
- };
-
- DB.prototype.printReplicationInfo = function() {
- var result = this.getReplicationInfo();
- if (result.errmsg) {
- var isMaster = this.isMaster();
- if (isMaster.arbiterOnly) {
- print("cannot provide replication status from an arbiter.");
- return;
- } else if (!isMaster.ismaster) {
- print("this is a slave, printing slave replication info.");
- this.printSlaveReplicationInfo();
- return;
- }
- print(tojson(result));
+ function r(x) {
+ assert(x, "how could this be null (printSlaveReplicationInfo rx)");
+ if (x.state == 1 || x.state == 7) { // ignore primaries (1) and arbiters (7)
return;
}
- print("configured oplog size: " + result.logSizeMB + "MB");
- print("log length start to end: " + result.timeDiff + "secs (" + result.timeDiffHours +
- "hrs)");
- print("oplog first event time: " + result.tFirst);
- print("oplog last event time: " + result.tLast);
- print("now: " + result.now);
- };
- DB.prototype.printSlaveReplicationInfo = function() {
- var startOptimeDate = null;
- var primary = null;
-
- function getReplLag(st) {
- assert(startOptimeDate, "how could this be null (getReplLag startOptimeDate)");
- print("\tsyncedTo: " + st.toString());
- var ago = (startOptimeDate - st) / 1000;
- var hrs = Math.round(ago / 36) / 100;
- var suffix = "";
- if (primary) {
- suffix = "primary ";
- } else {
- suffix = "freshest member (no primary available at the moment)";
- }
- print("\t" + Math.round(ago) + " secs (" + hrs + " hrs) behind the " + suffix);
+ print("source: " + x.name);
+ if (x.optime) {
+ getReplLag(x.optimeDate);
+ } else {
+ print("\tno replication info, yet. State: " + x.stateStr);
}
+ }
- function getMaster(members) {
- for (i in members) {
- var row = members[i];
- if (row.state === 1) {
- return row;
- }
- }
+ var L = this.getSiblingDB("local");
- return null;
+ if (L.system.replset.count() != 0) {
+ var status = this.adminCommand({'replSetGetStatus': 1});
+ primary = getMaster(status.members);
+ if (primary) {
+ startOptimeDate = primary.optimeDate;
}
-
- function g(x) {
- assert(x, "how could this be null (printSlaveReplicationInfo gx)");
- print("source: " + x.host);
- if (x.syncedTo) {
- var st = new Date(DB.tsToSeconds(x.syncedTo) * 1000);
- getReplLag(st);
- } else {
- print("\tdoing initial sync");
+ // no primary, find the most recent op among all members
+ else {
+ startOptimeDate = new Date(0, 0);
+ for (i in status.members) {
+ if (status.members[i].optimeDate > startOptimeDate) {
+ startOptimeDate = status.members[i].optimeDate;
+ }
}
}
- function r(x) {
- assert(x, "how could this be null (printSlaveReplicationInfo rx)");
- if (x.state == 1 || x.state == 7) { // ignore primaries (1) and arbiters (7)
- return;
- }
-
- print("source: " + x.name);
- if (x.optime) {
- getReplLag(x.optimeDate);
- } else {
- print("\tno replication info, yet. State: " + x.stateStr);
- }
+ for (i in status.members) {
+ r(status.members[i]);
}
-
- var L = this.getSiblingDB("local");
-
- if (L.system.replset.count() != 0) {
- var status = this.adminCommand({'replSetGetStatus': 1});
- primary = getMaster(status.members);
- if (primary) {
- startOptimeDate = primary.optimeDate;
- }
- // no primary, find the most recent op among all members
- else {
- startOptimeDate = new Date(0, 0);
- for (i in status.members) {
- if (status.members[i].optimeDate > startOptimeDate) {
- startOptimeDate = status.members[i].optimeDate;
- }
- }
- }
-
- for (i in status.members) {
- r(status.members[i]);
+ }
+};
+
+DB.prototype.serverBuildInfo = function() {
+ return this._adminCommand("buildinfo");
+};
+
+// Used to trim entries from the metrics.commands that have never been executed
+getActiveCommands = function(tree) {
+ var result = {};
+ for (var i in tree) {
+ if (!tree.hasOwnProperty(i))
+ continue;
+ if (tree[i].hasOwnProperty("total")) {
+ if (tree[i].total > 0) {
+ result[i] = tree[i];
}
+ continue;
}
- };
-
- DB.prototype.serverBuildInfo = function() {
- return this._adminCommand("buildinfo");
- };
-
- // Used to trim entries from the metrics.commands that have never been executed
- getActiveCommands = function(tree) {
- var result = {};
- for (var i in tree) {
- if (!tree.hasOwnProperty(i))
- continue;
- if (tree[i].hasOwnProperty("total")) {
- if (tree[i].total > 0) {
- result[i] = tree[i];
- }
- continue;
- }
- if (i == "<UNKNOWN>") {
- if (tree[i] > 0) {
- result[i] = tree[i];
- }
- continue;
- }
- // Handles nested commands
- var subStatus = getActiveCommands(tree[i]);
- if (Object.keys(subStatus).length > 0) {
+ if (i == "<UNKNOWN>") {
+ if (tree[i] > 0) {
result[i] = tree[i];
}
+ continue;
}
- return result;
- };
-
- DB.prototype.serverStatus = function(options) {
- var cmd = {serverStatus: 1};
- if (options) {
- Object.extend(cmd, options);
- }
- var res = this._adminCommand(cmd);
- // Only prune if we have a metrics tree with commands.
- if (res.metrics && res.metrics.commands) {
- res.metrics.commands = getActiveCommands(res.metrics.commands);
+ // Handles nested commands
+ var subStatus = getActiveCommands(tree[i]);
+ if (Object.keys(subStatus).length > 0) {
+ result[i] = tree[i];
}
- return res;
- };
+ }
+ return result;
+};
- DB.prototype.hostInfo = function() {
- return this._adminCommand("hostInfo");
- };
+DB.prototype.serverStatus = function(options) {
+ var cmd = {serverStatus: 1};
+ if (options) {
+ Object.extend(cmd, options);
+ }
+ var res = this._adminCommand(cmd);
+ // Only prune if we have a metrics tree with commands.
+ if (res.metrics && res.metrics.commands) {
+ res.metrics.commands = getActiveCommands(res.metrics.commands);
+ }
+ return res;
+};
- DB.prototype.serverCmdLineOpts = function() {
- return this._adminCommand("getCmdLineOpts");
- };
+DB.prototype.hostInfo = function() {
+ return this._adminCommand("hostInfo");
+};
- DB.prototype.version = function() {
- return this.serverBuildInfo().version;
- };
+DB.prototype.serverCmdLineOpts = function() {
+ return this._adminCommand("getCmdLineOpts");
+};
- DB.prototype.serverBits = function() {
- return this.serverBuildInfo().bits;
- };
+DB.prototype.version = function() {
+ return this.serverBuildInfo().version;
+};
- DB.prototype.listCommands = function() {
- var x = this.runCommand("listCommands");
- for (var name in x.commands) {
- var c = x.commands[name];
+DB.prototype.serverBits = function() {
+ return this.serverBuildInfo().bits;
+};
- var s = name + ": ";
+DB.prototype.listCommands = function() {
+ var x = this.runCommand("listCommands");
+ for (var name in x.commands) {
+ var c = x.commands[name];
- if (c.adminOnly)
- s += " adminOnly ";
- if (c.slaveOk)
- s += " slaveOk ";
+ var s = name + ": ";
- s += "\n ";
- s += c.help.replace(/\n/g, '\n ');
- s += "\n";
+ if (c.adminOnly)
+ s += " adminOnly ";
+ if (c.slaveOk)
+ s += " slaveOk ";
- print(s);
- }
- };
+ s += "\n ";
+ s += c.help.replace(/\n/g, '\n ');
+ s += "\n";
- DB.prototype.printShardingStatus = function(verbose) {
- printShardingStatus(this.getSiblingDB("config"), verbose);
- };
+ print(s);
+ }
+};
- DB.prototype.fsyncLock = function() {
- return this.adminCommand({fsync: 1, lock: true});
- };
+DB.prototype.printShardingStatus = function(verbose) {
+ printShardingStatus(this.getSiblingDB("config"), verbose);
+};
- DB.prototype.fsyncUnlock = function() {
- var res = this.adminCommand({fsyncUnlock: 1});
- if (commandUnsupported(res)) {
- const session = this.getSession();
- const readPreference = session.getOptions().getReadPreference();
- try {
- session.getOptions().setReadPreference(null);
- res = this.getSiblingDB("admin").$cmd.sys.unlock.findOne();
- } finally {
- session.getOptions().setReadPreference(readPreference);
- }
- }
- return res;
- };
+DB.prototype.fsyncLock = function() {
+ return this.adminCommand({fsync: 1, lock: true});
+};
- DB.autocomplete = function(obj) {
- // Time out if a transaction or other op holds locks we need. Caller suppresses exceptions.
- var colls = obj._getCollectionNamesInternal({maxTimeMS: 1000});
- var ret = [];
- for (var i = 0; i < colls.length; i++) {
- if (colls[i].match(/^[a-zA-Z0-9_.\$]+$/))
- ret.push(colls[i]);
+DB.prototype.fsyncUnlock = function() {
+ var res = this.adminCommand({fsyncUnlock: 1});
+ if (commandUnsupported(res)) {
+ const session = this.getSession();
+ const readPreference = session.getOptions().getReadPreference();
+ try {
+ session.getOptions().setReadPreference(null);
+ res = this.getSiblingDB("admin").$cmd.sys.unlock.findOne();
+ } finally {
+ session.getOptions().setReadPreference(readPreference);
}
- return ret;
- };
-
- DB.prototype.setSlaveOk = function(value) {
- if (value == undefined)
- value = true;
- this._slaveOk = value;
- };
-
- DB.prototype.getSlaveOk = function() {
- if (this._slaveOk != undefined)
- return this._slaveOk;
- return this._mongo.getSlaveOk();
- };
-
- DB.prototype.getQueryOptions = function() {
- var options = 0;
- if (this.getSlaveOk())
- options |= 4;
- return options;
- };
-
- /* Loads any scripts contained in system.js into the client shell.
- */
- DB.prototype.loadServerScripts = function() {
- var global = Function('return this')();
- this.system.js.find().forEach(function(u) {
- if (u.value.constructor === Code) {
- global[u._id] = eval("(" + u.value.code + ")");
- } else {
- global[u._id] = u.value;
- }
- });
- };
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////
- //////////////////////////// Security shell helpers below
- /////////////////////////////////////////////
- ////////////////////////////////////////////////////////////////////////////////////////////////////
-
- function getUserObjString(userObj) {
- var pwd = userObj.pwd;
- delete userObj.pwd;
- var toreturn = tojson(userObj);
- userObj.pwd = pwd;
- return toreturn;
}
-
- DB.prototype._modifyCommandToDigestPasswordIfNecessary = function(cmdObj, username) {
- if (!cmdObj["pwd"]) {
- return;
- }
- if (cmdObj.hasOwnProperty("digestPassword")) {
- throw Error(
- "Cannot specify 'digestPassword' through the user management shell helpers, " +
- "use 'passwordDigestor' instead");
- }
- var passwordDigestor = cmdObj["passwordDigestor"] ? cmdObj["passwordDigestor"] : "server";
- if (passwordDigestor == "server") {
- cmdObj["digestPassword"] = true;
- } else if (passwordDigestor == "client") {
- cmdObj["pwd"] = _hashPassword(username, cmdObj["pwd"]);
- cmdObj["digestPassword"] = false;
+ return res;
+};
+
+DB.autocomplete = function(obj) {
+ // Time out if a transaction or other op holds locks we need. Caller suppresses exceptions.
+ var colls = obj._getCollectionNamesInternal({maxTimeMS: 1000});
+ var ret = [];
+ for (var i = 0; i < colls.length; i++) {
+ if (colls[i].match(/^[a-zA-Z0-9_.\$]+$/))
+ ret.push(colls[i]);
+ }
+ return ret;
+};
+
+DB.prototype.setSlaveOk = function(value) {
+ if (value == undefined)
+ value = true;
+ this._slaveOk = value;
+};
+
+DB.prototype.getSlaveOk = function() {
+ if (this._slaveOk != undefined)
+ return this._slaveOk;
+ return this._mongo.getSlaveOk();
+};
+
+DB.prototype.getQueryOptions = function() {
+ var options = 0;
+ if (this.getSlaveOk())
+ options |= 4;
+ return options;
+};
+
+/* Loads any scripts contained in system.js into the client shell.
+ */
+DB.prototype.loadServerScripts = function() {
+ var global = Function('return this')();
+ this.system.js.find().forEach(function(u) {
+ if (u.value.constructor === Code) {
+ global[u._id] = eval("(" + u.value.code + ")");
} else {
- throw Error("'passwordDigestor' must be either 'server' or 'client', got: '" +
- passwordDigestor + "'");
- }
- delete cmdObj["passwordDigestor"];
- };
+ global[u._id] = u.value;
+ }
+ });
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////// Security shell helpers below
+/////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+function getUserObjString(userObj) {
+ var pwd = userObj.pwd;
+ delete userObj.pwd;
+ var toreturn = tojson(userObj);
+ userObj.pwd = pwd;
+ return toreturn;
+}
+
+DB.prototype._modifyCommandToDigestPasswordIfNecessary = function(cmdObj, username) {
+ if (!cmdObj["pwd"]) {
+ return;
+ }
+ if (cmdObj.hasOwnProperty("digestPassword")) {
+ throw Error("Cannot specify 'digestPassword' through the user management shell helpers, " +
+ "use 'passwordDigestor' instead");
+ }
+ var passwordDigestor = cmdObj["passwordDigestor"] ? cmdObj["passwordDigestor"] : "server";
+ if (passwordDigestor == "server") {
+ cmdObj["digestPassword"] = true;
+ } else if (passwordDigestor == "client") {
+ cmdObj["pwd"] = _hashPassword(username, cmdObj["pwd"]);
+ cmdObj["digestPassword"] = false;
+ } else {
+ throw Error("'passwordDigestor' must be either 'server' or 'client', got: '" +
+ passwordDigestor + "'");
+ }
+ delete cmdObj["passwordDigestor"];
+};
- DB.prototype.createUser = function(userObj, writeConcern) {
- var name = userObj["user"];
- if (name === undefined) {
- throw Error("no 'user' field provided to 'createUser' function");
- }
+DB.prototype.createUser = function(userObj, writeConcern) {
+ var name = userObj["user"];
+ if (name === undefined) {
+ throw Error("no 'user' field provided to 'createUser' function");
+ }
- if (userObj["createUser"] !== undefined) {
- throw Error("calling 'createUser' function with 'createUser' field is disallowed");
- }
+ if (userObj["createUser"] !== undefined) {
+ throw Error("calling 'createUser' function with 'createUser' field is disallowed");
+ }
- var cmdObj = {createUser: name};
- cmdObj = Object.extend(cmdObj, userObj);
- delete cmdObj["user"];
+ var cmdObj = {createUser: name};
+ cmdObj = Object.extend(cmdObj, userObj);
+ delete cmdObj["user"];
- this._modifyCommandToDigestPasswordIfNecessary(cmdObj, name);
+ this._modifyCommandToDigestPasswordIfNecessary(cmdObj, name);
- cmdObj["writeConcern"] = writeConcern ? writeConcern : _defaultWriteConcern;
+ cmdObj["writeConcern"] = writeConcern ? writeConcern : _defaultWriteConcern;
- var res = this.runCommand(cmdObj);
+ var res = this.runCommand(cmdObj);
- if (res.ok) {
- print("Successfully added user: " + getUserObjString(userObj));
- return;
- }
+ if (res.ok) {
+ print("Successfully added user: " + getUserObjString(userObj));
+ return;
+ }
- if (res.errmsg == "no such cmd: createUser") {
- throw Error("'createUser' command not found. This is most likely because you are " +
- "talking to an old (pre v2.6) MongoDB server");
- }
+ if (res.errmsg == "no such cmd: createUser") {
+ throw Error("'createUser' command not found. This is most likely because you are " +
+ "talking to an old (pre v2.6) MongoDB server");
+ }
- if (res.errmsg == "timeout") {
- throw Error("timed out while waiting for user authentication to replicate - " +
- "database will not be fully secured until replication finishes");
- }
+ if (res.errmsg == "timeout") {
+ throw Error("timed out while waiting for user authentication to replicate - " +
+ "database will not be fully secured until replication finishes");
+ }
- throw _getErrorWithCode(res, "couldn't add user: " + res.errmsg);
- };
+ throw _getErrorWithCode(res, "couldn't add user: " + res.errmsg);
+};
- function _hashPassword(username, password) {
- if (typeof password != 'string') {
- throw Error("User passwords must be of type string. Was given password with type: " +
- typeof(password));
- }
- return hex_md5(username + ":mongo:" + password);
+function _hashPassword(username, password) {
+ if (typeof password != 'string') {
+ throw Error("User passwords must be of type string. Was given password with type: " +
+ typeof (password));
+ }
+ return hex_md5(username + ":mongo:" + password);
+}
+
+/**
+ * Used for updating users in systems with V1 style user information
+ * (ie MongoDB v2.4 and prior)
+ */
+DB.prototype._updateUserV1 = function(name, updateObject, writeConcern) {
+ var setObj = {};
+ if (updateObject.pwd) {
+ setObj["pwd"] = _hashPassword(name, updateObject.pwd);
+ }
+ if (updateObject.extraData) {
+ setObj["extraData"] = updateObject.extraData;
+ }
+ if (updateObject.roles) {
+ setObj["roles"] = updateObject.roles;
}
- /**
- * Used for updating users in systems with V1 style user information
- * (ie MongoDB v2.4 and prior)
- */
- DB.prototype._updateUserV1 = function(name, updateObject, writeConcern) {
- var setObj = {};
- if (updateObject.pwd) {
- setObj["pwd"] = _hashPassword(name, updateObject.pwd);
- }
- if (updateObject.extraData) {
- setObj["extraData"] = updateObject.extraData;
- }
- if (updateObject.roles) {
- setObj["roles"] = updateObject.roles;
- }
+ this.system.users.update({user: name, userSource: null}, {$set: setObj});
+ var errObj = this.getLastErrorObj(writeConcern['w'], writeConcern['wtimeout']);
+ if (errObj.err) {
+ throw _getErrorWithCode(errObj, "Updating user failed: " + errObj.err);
+ }
+};
- this.system.users.update({user: name, userSource: null}, {$set: setObj});
- var errObj = this.getLastErrorObj(writeConcern['w'], writeConcern['wtimeout']);
- if (errObj.err) {
- throw _getErrorWithCode(errObj, "Updating user failed: " + errObj.err);
- }
- };
+DB.prototype.updateUser = function(name, updateObject, writeConcern) {
+ var cmdObj = {updateUser: name};
+ cmdObj = Object.extend(cmdObj, updateObject);
+ cmdObj['writeConcern'] = writeConcern ? writeConcern : _defaultWriteConcern;
+ this._modifyCommandToDigestPasswordIfNecessary(cmdObj, name);
- DB.prototype.updateUser = function(name, updateObject, writeConcern) {
- var cmdObj = {updateUser: name};
- cmdObj = Object.extend(cmdObj, updateObject);
- cmdObj['writeConcern'] = writeConcern ? writeConcern : _defaultWriteConcern;
- this._modifyCommandToDigestPasswordIfNecessary(cmdObj, name);
+ var res = this.runCommand(cmdObj);
+ if (res.ok) {
+ return;
+ }
- var res = this.runCommand(cmdObj);
- if (res.ok) {
- return;
- }
+ if (res.errmsg == "no such cmd: updateUser") {
+ this._updateUserV1(name, updateObject, cmdObj['writeConcern']);
+ return;
+ }
- if (res.errmsg == "no such cmd: updateUser") {
- this._updateUserV1(name, updateObject, cmdObj['writeConcern']);
- return;
- }
+ throw _getErrorWithCode(res, "Updating user failed: " + res.errmsg);
+};
- throw _getErrorWithCode(res, "Updating user failed: " + res.errmsg);
- };
+DB.prototype.changeUserPassword = function(username, password, writeConcern) {
+ this.updateUser(username, {pwd: password}, writeConcern);
+};
- DB.prototype.changeUserPassword = function(username, password, writeConcern) {
- this.updateUser(username, {pwd: password}, writeConcern);
- };
+DB.prototype.logout = function() {
+ // Logging out doesn't require a session since it manipulates connection state.
+ return this.getMongo().logout(this.getName());
+};
- DB.prototype.logout = function() {
- // Logging out doesn't require a session since it manipulates connection state.
- return this.getMongo().logout(this.getName());
- };
+// For backwards compatibility
+DB.prototype.removeUser = function(username, writeConcern) {
+ print("WARNING: db.removeUser has been deprecated, please use db.dropUser instead");
+ return this.dropUser(username, writeConcern);
+};
- // For backwards compatibility
- DB.prototype.removeUser = function(username, writeConcern) {
- print("WARNING: db.removeUser has been deprecated, please use db.dropUser instead");
- return this.dropUser(username, writeConcern);
+DB.prototype.dropUser = function(username, writeConcern) {
+ var cmdObj = {
+ dropUser: username,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
};
+ var res = this.runCommand(cmdObj);
- DB.prototype.dropUser = function(username, writeConcern) {
- var cmdObj = {
- dropUser: username,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
-
- if (res.ok) {
- return true;
- }
+ if (res.ok) {
+ return true;
+ }
- if (res.code == 11) { // Code 11 = UserNotFound
- return false;
- }
+ if (res.code == 11) { // Code 11 = UserNotFound
+ return false;
+ }
- if (res.errmsg == "no such cmd: dropUsers") {
- return this._removeUserV1(username, cmdObj['writeConcern']);
- }
+ if (res.errmsg == "no such cmd: dropUsers") {
+ return this._removeUserV1(username, cmdObj['writeConcern']);
+ }
- throw _getErrorWithCode(res, res.errmsg);
- };
+ throw _getErrorWithCode(res, res.errmsg);
+};
- /**
- * Used for removing users in systems with V1 style user information
- * (ie MongoDB v2.4 and prior)
- */
- DB.prototype._removeUserV1 = function(username, writeConcern) {
- this.getCollection("system.users").remove({user: username});
+/**
+ * Used for removing users in systems with V1 style user information
+ * (ie MongoDB v2.4 and prior)
+ */
+DB.prototype._removeUserV1 = function(username, writeConcern) {
+ this.getCollection("system.users").remove({user: username});
- var le = this.getLastErrorObj(writeConcern['w'], writeConcern['wtimeout']);
+ var le = this.getLastErrorObj(writeConcern['w'], writeConcern['wtimeout']);
- if (le.err) {
- throw _getErrorWithCode(le, "Couldn't remove user: " + le.err);
- }
+ if (le.err) {
+ throw _getErrorWithCode(le, "Couldn't remove user: " + le.err);
+ }
- if (le.n == 1) {
- return true;
- } else {
- return false;
- }
- };
+ if (le.n == 1) {
+ return true;
+ } else {
+ return false;
+ }
+};
- DB.prototype.dropAllUsers = function(writeConcern) {
- var res = this.runCommand({
- dropAllUsersFromDatabase: 1,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- });
+DB.prototype.dropAllUsers = function(writeConcern) {
+ var res = this.runCommand({
+ dropAllUsersFromDatabase: 1,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
+ });
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
- return res.n;
- };
+ return res.n;
+};
- DB.prototype.__pwHash = function(nonce, username, pass) {
- return hex_md5(nonce + username + _hashPassword(username, pass));
- };
+DB.prototype.__pwHash = function(nonce, username, pass) {
+ return hex_md5(nonce + username + _hashPassword(username, pass));
+};
- DB.prototype._defaultAuthenticationMechanism = null;
+DB.prototype._defaultAuthenticationMechanism = null;
- DB.prototype._getDefaultAuthenticationMechanism = function(username, database) {
- if (username !== undefined) {
- const userid = database + "." + username;
- const result = this.runCommand({isMaster: 1, saslSupportedMechs: userid});
- if (result.ok && (result.saslSupportedMechs !== undefined)) {
- const mechs = result.saslSupportedMechs;
- if (!Array.isArray(mechs)) {
- throw Error("Server replied with invalid saslSupportedMechs response");
- }
+DB.prototype._getDefaultAuthenticationMechanism = function(username, database) {
+ if (username !== undefined) {
+ const userid = database + "." + username;
+ const result = this.runCommand({isMaster: 1, saslSupportedMechs: userid});
+ if (result.ok && (result.saslSupportedMechs !== undefined)) {
+ const mechs = result.saslSupportedMechs;
+ if (!Array.isArray(mechs)) {
+ throw Error("Server replied with invalid saslSupportedMechs response");
+ }
- if ((this._defaultAuthenticationMechanism != null) &&
- mechs.includes(this._defaultAuthenticationMechanism)) {
- return this._defaultAuthenticationMechanism;
- }
+ if ((this._defaultAuthenticationMechanism != null) &&
+ mechs.includes(this._defaultAuthenticationMechanism)) {
+ return this._defaultAuthenticationMechanism;
+ }
- // Never include PLAIN in auto-negotiation.
- const priority = ["GSSAPI", "SCRAM-SHA-256", "SCRAM-SHA-1"];
- for (var i = 0; i < priority.length; ++i) {
- if (mechs.includes(priority[i])) {
- return priority[i];
- }
+ // Never include PLAIN in auto-negotiation.
+ const priority = ["GSSAPI", "SCRAM-SHA-256", "SCRAM-SHA-1"];
+ for (var i = 0; i < priority.length; ++i) {
+ if (mechs.includes(priority[i])) {
+ return priority[i];
}
}
- // If isMaster doesn't support saslSupportedMechs,
- // or if we couldn't agree on a mechanism,
- // then fallthrough to configured default or SCRAM-SHA-1.
- }
-
- // Use the default auth mechanism if set on the command line.
- if (this._defaultAuthenticationMechanism != null)
- return this._defaultAuthenticationMechanism;
-
- return "SCRAM-SHA-1";
- };
-
- DB.prototype._defaultGssapiServiceName = null;
-
- DB.prototype._authOrThrow = function() {
- var params;
- if (arguments.length == 2) {
- params = {user: arguments[0], pwd: arguments[1]};
- } else if (arguments.length == 1) {
- if (typeof(arguments[0]) != "object")
- throw Error("Single-argument form of auth expects a parameter object");
- params = Object.extend({}, arguments[0]);
- } else {
- throw Error(
- "auth expects either (username, password) or ({ user: username, pwd: password })");
- }
-
- if (params.mechanism === undefined) {
- params.mechanism = this._getDefaultAuthenticationMechanism(params.user, this.getName());
- }
-
- if (params.db !== undefined) {
- throw Error("Do not override db field on db.auth(). Use getMongo().auth(), instead.");
}
+ // If isMaster doesn't support saslSupportedMechs,
+ // or if we couldn't agree on a mechanism,
+ // then fallthrough to configured default or SCRAM-SHA-1.
+ }
- if (params.mechanism == "GSSAPI" && params.serviceName == null &&
- this._defaultGssapiServiceName != null) {
- params.serviceName = this._defaultGssapiServiceName;
- }
-
- // Logging in doesn't require a session since it manipulates connection state.
- params.db = this.getName();
- var good = this.getMongo().auth(params);
- if (good) {
- // auth enabled, and should try to use isMaster and replSetGetStatus to build prompt
- this.getMongo().authStatus = {
- authRequired: true,
- isMaster: true,
- replSetGetStatus: true
- };
- }
-
- return good;
- };
-
- DB.prototype.auth = function() {
- var ex;
- try {
- this._authOrThrow.apply(this, arguments);
- } catch (ex) {
- print(ex);
- return 0;
- }
- return 1;
- };
-
- DB.prototype.grantRolesToUser = function(username, roles, writeConcern) {
- var cmdObj = {
- grantRolesToUser: username,
- roles: roles,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- };
+ // Use the default auth mechanism if set on the command line.
+ if (this._defaultAuthenticationMechanism != null)
+ return this._defaultAuthenticationMechanism;
+
+ return "SCRAM-SHA-1";
+};
+
+DB.prototype._defaultGssapiServiceName = null;
+
+DB.prototype._authOrThrow = function() {
+ var params;
+ if (arguments.length == 2) {
+ params = {user: arguments[0], pwd: arguments[1]};
+ } else if (arguments.length == 1) {
+ if (typeof (arguments[0]) != "object")
+ throw Error("Single-argument form of auth expects a parameter object");
+ params = Object.extend({}, arguments[0]);
+ } else {
+ throw Error(
+ "auth expects either (username, password) or ({ user: username, pwd: password })");
+ }
- DB.prototype.revokeRolesFromUser = function(username, roles, writeConcern) {
- var cmdObj = {
- revokeRolesFromUser: username,
- roles: roles,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- };
+ if (params.mechanism === undefined) {
+ params.mechanism = this._getDefaultAuthenticationMechanism(params.user, this.getName());
+ }
- DB.prototype.getUser = function(username, args) {
- if (typeof username != "string") {
- throw Error("User name for getUser shell helper must be a string");
- }
- var cmdObj = {usersInfo: username};
- Object.extend(cmdObj, args);
+ if (params.db !== undefined) {
+ throw Error("Do not override db field on db.auth(). Use getMongo().auth(), instead.");
+ }
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
+ if (params.mechanism == "GSSAPI" && params.serviceName == null &&
+ this._defaultGssapiServiceName != null) {
+ params.serviceName = this._defaultGssapiServiceName;
+ }
- if (res.users.length == 0) {
- return null;
- }
- return res.users[0];
- };
+ // Logging in doesn't require a session since it manipulates connection state.
+ params.db = this.getName();
+ var good = this.getMongo().auth(params);
+ if (good) {
+ // auth enabled, and should try to use isMaster and replSetGetStatus to build prompt
+ this.getMongo().authStatus = {authRequired: true, isMaster: true, replSetGetStatus: true};
+ }
- DB.prototype.getUsers = function(args) {
- var cmdObj = {usersInfo: 1};
- Object.extend(cmdObj, args);
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- var authSchemaIncompatibleCode = 69;
- if (res.code == authSchemaIncompatibleCode ||
- (res.code == null && res.errmsg == "no such cmd: usersInfo")) {
- // Working with 2.4 schema user data
- return this.system.users.find({}).toArray();
- }
+ return good;
+};
- throw _getErrorWithCode(res, res.errmsg);
- }
+DB.prototype.auth = function() {
+ var ex;
+ try {
+ this._authOrThrow.apply(this, arguments);
+ } catch (ex) {
+ print(ex);
+ return 0;
+ }
+ return 1;
+};
- return res.users;
+DB.prototype.grantRolesToUser = function(username, roles, writeConcern) {
+ var cmdObj = {
+ grantRolesToUser: username,
+ roles: roles,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
};
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+};
- DB.prototype.createRole = function(roleObj, writeConcern) {
- var name = roleObj["role"];
- var cmdObj = {createRole: name};
- cmdObj = Object.extend(cmdObj, roleObj);
- delete cmdObj["role"];
- cmdObj["writeConcern"] = writeConcern ? writeConcern : _defaultWriteConcern;
-
- var res = this.runCommand(cmdObj);
-
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- printjson(roleObj);
+DB.prototype.revokeRolesFromUser = function(username, roles, writeConcern) {
+ var cmdObj = {
+ revokeRolesFromUser: username,
+ roles: roles,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
};
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+};
- DB.prototype.updateRole = function(name, updateObject, writeConcern) {
- var cmdObj = {updateRole: name};
- cmdObj = Object.extend(cmdObj, updateObject);
- cmdObj['writeConcern'] = writeConcern ? writeConcern : _defaultWriteConcern;
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- };
+DB.prototype.getUser = function(username, args) {
+ if (typeof username != "string") {
+ throw Error("User name for getUser shell helper must be a string");
+ }
+ var cmdObj = {usersInfo: username};
+ Object.extend(cmdObj, args);
- DB.prototype.dropRole = function(name, writeConcern) {
- var cmdObj = {
- dropRole: name,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
- if (res.ok) {
- return true;
- }
+ if (res.users.length == 0) {
+ return null;
+ }
+ return res.users[0];
+};
- if (res.code == 31) { // Code 31 = RoleNotFound
- return false;
+DB.prototype.getUsers = function(args) {
+ var cmdObj = {usersInfo: 1};
+ Object.extend(cmdObj, args);
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ var authSchemaIncompatibleCode = 69;
+ if (res.code == authSchemaIncompatibleCode ||
+ (res.code == null && res.errmsg == "no such cmd: usersInfo")) {
+ // Working with 2.4 schema user data
+ return this.system.users.find({}).toArray();
}
throw _getErrorWithCode(res, res.errmsg);
- };
-
- DB.prototype.dropAllRoles = function(writeConcern) {
- var res = this.runCommand({
- dropAllRolesFromDatabase: 1,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- });
-
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
+ }
- return res.n;
- };
+ return res.users;
+};
- DB.prototype.grantRolesToRole = function(rolename, roles, writeConcern) {
- var cmdObj = {
- grantRolesToRole: rolename,
- roles: roles,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- };
+DB.prototype.createRole = function(roleObj, writeConcern) {
+ var name = roleObj["role"];
+ var cmdObj = {createRole: name};
+ cmdObj = Object.extend(cmdObj, roleObj);
+ delete cmdObj["role"];
+ cmdObj["writeConcern"] = writeConcern ? writeConcern : _defaultWriteConcern;
- DB.prototype.revokeRolesFromRole = function(rolename, roles, writeConcern) {
- var cmdObj = {
- revokeRolesFromRole: rolename,
- roles: roles,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- };
+ var res = this.runCommand(cmdObj);
- DB.prototype.grantPrivilegesToRole = function(rolename, privileges, writeConcern) {
- var cmdObj = {
- grantPrivilegesToRole: rolename,
- privileges: privileges,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- };
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+ printjson(roleObj);
+};
+
+DB.prototype.updateRole = function(name, updateObject, writeConcern) {
+ var cmdObj = {updateRole: name};
+ cmdObj = Object.extend(cmdObj, updateObject);
+ cmdObj['writeConcern'] = writeConcern ? writeConcern : _defaultWriteConcern;
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+};
- DB.prototype.revokePrivilegesFromRole = function(rolename, privileges, writeConcern) {
- var cmdObj = {
- revokePrivilegesFromRole: rolename,
- privileges: privileges,
- writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
- };
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
- };
+DB.prototype.dropRole = function(name, writeConcern) {
+ var cmdObj = {dropRole: name, writeConcern: writeConcern ? writeConcern : _defaultWriteConcern};
+ var res = this.runCommand(cmdObj);
- DB.prototype.getRole = function(rolename, args) {
- if (typeof rolename != "string") {
- throw Error("Role name for getRole shell helper must be a string");
- }
- var cmdObj = {rolesInfo: rolename};
- Object.extend(cmdObj, args);
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
+ if (res.ok) {
+ return true;
+ }
- if (res.roles.length == 0) {
- return null;
- }
- return res.roles[0];
- };
+ if (res.code == 31) { // Code 31 = RoleNotFound
+ return false;
+ }
- DB.prototype.getRoles = function(args) {
- var cmdObj = {rolesInfo: 1};
- Object.extend(cmdObj, args);
- var res = this.runCommand(cmdObj);
- if (!res.ok) {
- throw _getErrorWithCode(res, res.errmsg);
- }
+ throw _getErrorWithCode(res, res.errmsg);
+};
- return res.roles;
- };
+DB.prototype.dropAllRoles = function(writeConcern) {
+ var res = this.runCommand({
+ dropAllRolesFromDatabase: 1,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
+ });
- DB.prototype.setWriteConcern = function(wc) {
- if (wc instanceof WriteConcern) {
- this._writeConcern = wc;
- } else {
- this._writeConcern = new WriteConcern(wc);
- }
- };
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
- DB.prototype.getWriteConcern = function() {
- if (this._writeConcern)
- return this._writeConcern;
+ return res.n;
+};
- {
- const session = this.getSession();
- return session._getSessionAwareClient().getWriteConcern(session);
- }
+DB.prototype.grantRolesToRole = function(rolename, roles, writeConcern) {
+ var cmdObj = {
+ grantRolesToRole: rolename,
+ roles: roles,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
};
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+};
- DB.prototype.unsetWriteConcern = function() {
- delete this._writeConcern;
+DB.prototype.revokeRolesFromRole = function(rolename, roles, writeConcern) {
+ var cmdObj = {
+ revokeRolesFromRole: rolename,
+ roles: roles,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
};
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+};
- DB.prototype.getLogComponents = function() {
- return this.getMongo().getLogComponents(this.getSession());
+DB.prototype.grantPrivilegesToRole = function(rolename, privileges, writeConcern) {
+ var cmdObj = {
+ grantPrivilegesToRole: rolename,
+ privileges: privileges,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
};
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+};
- DB.prototype.setLogLevel = function(logLevel, component) {
- return this.getMongo().setLogLevel(logLevel, component, this.getSession());
+DB.prototype.revokePrivilegesFromRole = function(rolename, privileges, writeConcern) {
+ var cmdObj = {
+ revokePrivilegesFromRole: rolename,
+ privileges: privileges,
+ writeConcern: writeConcern ? writeConcern : _defaultWriteConcern
};
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
+};
- DB.prototype.watch = function(pipeline, options) {
- pipeline = pipeline || [];
- assert(pipeline instanceof Array, "'pipeline' argument must be an array");
-
- let changeStreamStage;
- [changeStreamStage, aggOptions] = this.getMongo()._extractChangeStreamOptions(options);
- pipeline.unshift(changeStreamStage);
- return this._runAggregate({aggregate: 1, pipeline: pipeline}, aggOptions);
- };
+DB.prototype.getRole = function(rolename, args) {
+ if (typeof rolename != "string") {
+ throw Error("Role name for getRole shell helper must be a string");
+ }
+ var cmdObj = {rolesInfo: rolename};
+ Object.extend(cmdObj, args);
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
- DB.prototype.getFreeMonitoringStatus = function() {
- 'use strict';
- return assert.commandWorked(this.adminCommand({getFreeMonitoringStatus: 1}));
- };
+ if (res.roles.length == 0) {
+ return null;
+ }
+ return res.roles[0];
+};
+
+DB.prototype.getRoles = function(args) {
+ var cmdObj = {rolesInfo: 1};
+ Object.extend(cmdObj, args);
+ var res = this.runCommand(cmdObj);
+ if (!res.ok) {
+ throw _getErrorWithCode(res, res.errmsg);
+ }
- DB.prototype.enableFreeMonitoring = function() {
- 'use strict';
- const isMaster = this.isMaster();
- if (isMaster.ismaster == false) {
- print("ERROR: db.enableFreeMonitoring() may only be run on a primary");
- return;
- }
+ return res.roles;
+};
- assert.commandWorked(this.adminCommand({setFreeMonitoring: 1, action: 'enable'}));
+DB.prototype.setWriteConcern = function(wc) {
+ if (wc instanceof WriteConcern) {
+ this._writeConcern = wc;
+ } else {
+ this._writeConcern = new WriteConcern(wc);
+ }
+};
- const cmd = this.adminCommand({getFreeMonitoringStatus: 1});
- if (!cmd.ok && (cmd.code == ErrorCode.Unauthorized)) {
- // Edge case: It's technically possible that a user can change free-mon state,
- // but is not allowed to inspect it.
- print("Successfully initiated free monitoring, but unable to determine status " +
- "as you lack the 'checkFreeMonitoringStatus' privilege.");
- return;
- }
- assert.commandWorked(cmd);
+DB.prototype.getWriteConcern = function() {
+ if (this._writeConcern)
+ return this._writeConcern;
- if (cmd.state !== 'enabled') {
- const url = this.adminCommand({'getParameter': 1, 'cloudFreeMonitoringEndpointURL': 1})
- .cloudFreeMonitoringEndpointURL;
+ {
+ const session = this.getSession();
+ return session._getSessionAwareClient().getWriteConcern(session);
+ }
+};
+
+DB.prototype.unsetWriteConcern = function() {
+ delete this._writeConcern;
+};
+
+DB.prototype.getLogComponents = function() {
+ return this.getMongo().getLogComponents(this.getSession());
+};
+
+DB.prototype.setLogLevel = function(logLevel, component) {
+ return this.getMongo().setLogLevel(logLevel, component, this.getSession());
+};
+
+DB.prototype.watch = function(pipeline, options) {
+ pipeline = pipeline || [];
+ assert(pipeline instanceof Array, "'pipeline' argument must be an array");
+
+ let changeStreamStage;
+ [changeStreamStage, aggOptions] = this.getMongo()._extractChangeStreamOptions(options);
+ pipeline.unshift(changeStreamStage);
+ return this._runAggregate({aggregate: 1, pipeline: pipeline}, aggOptions);
+};
+
+DB.prototype.getFreeMonitoringStatus = function() {
+ 'use strict';
+ return assert.commandWorked(this.adminCommand({getFreeMonitoringStatus: 1}));
+};
+
+DB.prototype.enableFreeMonitoring = function() {
+ 'use strict';
+ const isMaster = this.isMaster();
+ if (isMaster.ismaster == false) {
+ print("ERROR: db.enableFreeMonitoring() may only be run on a primary");
+ return;
+ }
- print("Unable to get immediate response from the Cloud Monitoring service. We will" +
- "continue to retry in the background. Please check your firewall " +
- "settings to ensure that mongod can communicate with \"" + url + "\"");
- return;
- }
+ assert.commandWorked(this.adminCommand({setFreeMonitoring: 1, action: 'enable'}));
- print(tojson(cmd));
- };
+ const cmd = this.adminCommand({getFreeMonitoringStatus: 1});
+ if (!cmd.ok && (cmd.code == ErrorCode.Unauthorized)) {
+ // Edge case: It's technically possible that a user can change free-mon state,
+ // but is not allowed to inspect it.
+ print("Successfully initiated free monitoring, but unable to determine status " +
+ "as you lack the 'checkFreeMonitoringStatus' privilege.");
+ return;
+ }
+ assert.commandWorked(cmd);
- DB.prototype.disableFreeMonitoring = function() {
- 'use strict';
- assert.commandWorked(this.adminCommand({setFreeMonitoring: 1, action: 'disable'}));
- };
+ if (cmd.state !== 'enabled') {
+ const url = this.adminCommand({'getParameter': 1, 'cloudFreeMonitoringEndpointURL': 1})
+ .cloudFreeMonitoringEndpointURL;
- // Writing `this.hasOwnProperty` would cause DB.prototype.getCollection() to be called since the
- // DB's getProperty() handler in C++ takes precedence when a property isn't defined on the DB
- // instance directly. The "hasOwnProperty" property is defined on Object.prototype, so we must
- // resort to using the function explicitly ourselves.
- (function(hasOwnProperty) {
- DB.prototype.getSession = function() {
- if (!hasOwnProperty.call(this, "_session")) {
- this._session = this.getMongo()._getDefaultSession();
- }
- return this._session;
- };
- })(Object.prototype.hasOwnProperty);
+ print("Unable to get immediate response from the Cloud Monitoring service. We will" +
+ "continue to retry in the background. Please check your firewall " +
+ "settings to ensure that mongod can communicate with \"" + url + "\"");
+ return;
+ }
+ print(tojson(cmd));
+};
+
+DB.prototype.disableFreeMonitoring = function() {
+ 'use strict';
+ assert.commandWorked(this.adminCommand({setFreeMonitoring: 1, action: 'disable'}));
+};
+
+// Writing `this.hasOwnProperty` would cause DB.prototype.getCollection() to be called since the
+// DB's getProperty() handler in C++ takes precedence when a property isn't defined on the DB
+// instance directly. The "hasOwnProperty" property is defined on Object.prototype, so we must
+// resort to using the function explicitly ourselves.
+(function(hasOwnProperty) {
+DB.prototype.getSession = function() {
+ if (!hasOwnProperty.call(this, "_session")) {
+ this._session = this.getMongo()._getDefaultSession();
+ }
+ return this._session;
+};
+})(Object.prototype.hasOwnProperty);
}());
diff --git a/src/mongo/shell/dbshell.cpp b/src/mongo/shell/dbshell.cpp
index bc6e211ae6f..ce086bb03ee 100644
--- a/src/mongo/shell/dbshell.cpp
+++ b/src/mongo/shell/dbshell.cpp
@@ -176,7 +176,7 @@ enum ShellExitCode : int {
};
Scope* shellMainScope;
-}
+} // namespace mongo
void generateCompletions(const std::string& prefix, std::vector<std::string>& all) {
if (prefix.find('"') != std::string::npos)
@@ -477,10 +477,9 @@ size_t skipOverString(const std::string& code, size_t start, char quote) {
// that the escaping backslash is not itself escaped. Comparisons of start and pos
// are to keep us from reading beyond the beginning of the quoted string.
//
- if (start == pos || code[pos - 1] != '\\' || // previous char was backslash
- start == pos - 1 ||
- code[pos - 2] == '\\' // char before backslash was not another
- ) {
+ if (start == pos || code[pos - 1] != '\\' || // previous char was backslash
+ start == pos - 1 || code[pos - 2] == '\\' // char before backslash was not another
+ ) {
break; // The quote we found was not preceded by an unescaped backslash; it is real
}
++pos; // The quote we found was escaped with backslash, so it doesn't count
@@ -941,8 +940,8 @@ int _main(int argc, char* argv[], char** envp) {
#else
wchar_t programDataPath[MAX_PATH];
if (S_OK == SHGetFolderPathW(NULL, CSIDL_COMMON_APPDATA, NULL, 0, programDataPath)) {
- rcGlobalLocation = str::stream() << toUtf8String(programDataPath)
- << "\\MongoDB\\mongorc.js";
+ rcGlobalLocation = str::stream()
+ << toUtf8String(programDataPath) << "\\MongoDB\\mongorc.js";
}
#endif
if (!rcGlobalLocation.empty() && ::mongo::shell_utils::fileExists(rcGlobalLocation)) {
@@ -1022,9 +1021,9 @@ int _main(int argc, char* argv[], char** envp) {
rcLocation = str::stream() << getenv("HOME") << "/.mongorc.js";
#else
if (getenv("HOMEDRIVE") != NULL && getenv("HOMEPATH") != NULL)
- rcLocation = str::stream() << toUtf8String(_wgetenv(L"HOMEDRIVE"))
- << toUtf8String(_wgetenv(L"HOMEPATH"))
- << "\\.mongorc.js";
+ rcLocation = str::stream()
+ << toUtf8String(_wgetenv(L"HOMEDRIVE")) << toUtf8String(_wgetenv(L"HOMEPATH"))
+ << "\\.mongorc.js";
#endif
if (!rcLocation.empty() && ::mongo::shell_utils::fileExists(rcLocation)) {
hasMongoRC = true;
diff --git a/src/mongo/shell/encrypted_dbclient_base.cpp b/src/mongo/shell/encrypted_dbclient_base.cpp
index bec5bb8ae5e..be82f6b97bb 100644
--- a/src/mongo/shell/encrypted_dbclient_base.cpp
+++ b/src/mongo/shell/encrypted_dbclient_base.cpp
@@ -138,7 +138,7 @@ BSONObj EncryptedDBClientBase::encryptDecryptCommand(const BSONObj& object,
uassert(31096,
"Object too deep to be encrypted. Exceeded stack depth.",
frameStack.size() < BSONDepth::kDefaultMaxAllowableDepth);
- auto & [ iterator, builder ] = frameStack.top();
+ auto& [iterator, builder] = frameStack.top();
if (iterator.more()) {
BSONElement elem = iterator.next();
if (elem.type() == BSONType::Object) {
@@ -609,7 +609,7 @@ std::shared_ptr<SymmetricKey> EncryptedDBClientBase::getDataKey(const UUID& uuid
auto ts_new = Date_t::now();
if (_datakeyCache.hasKey(uuid)) {
- auto[key, ts] = _datakeyCache.find(uuid)->second;
+ auto [key, ts] = _datakeyCache.find(uuid)->second;
if (ts_new - ts < kCacheInvalidationTime) {
return key;
} else {
diff --git a/src/mongo/shell/encrypted_shell_options.h b/src/mongo/shell/encrypted_shell_options.h
index f839c637d9a..b4b30aba2fe 100644
--- a/src/mongo/shell/encrypted_shell_options.h
+++ b/src/mongo/shell/encrypted_shell_options.h
@@ -42,4 +42,4 @@ struct EncryptedShellGlobalParams {
};
extern EncryptedShellGlobalParams encryptedShellGlobalParams;
-}
+} // namespace mongo
diff --git a/src/mongo/shell/explain_query.js b/src/mongo/shell/explain_query.js
index 78e57c86e69..89a922e225a 100644
--- a/src/mongo/shell/explain_query.js
+++ b/src/mongo/shell/explain_query.js
@@ -4,7 +4,6 @@
//
var DBExplainQuery = (function() {
-
//
// Private methods.
//
@@ -15,7 +14,7 @@ var DBExplainQuery = (function() {
* is implemented here for backwards compatibility.
*/
function removeVerboseFields(obj) {
- if (typeof(obj) !== "object") {
+ if (typeof (obj) !== "object") {
return;
}
@@ -23,7 +22,7 @@ var DBExplainQuery = (function() {
delete obj.oldPlan;
delete obj.stats;
- if (typeof(obj.length) === "number") {
+ if (typeof (obj.length) === "number") {
for (var i = 0; i < obj.length; i++) {
removeVerboseFields(obj[i]);
}
diff --git a/src/mongo/shell/explainable.js b/src/mongo/shell/explainable.js
index 637d19d2bf7..4f32af22221 100644
--- a/src/mongo/shell/explainable.js
+++ b/src/mongo/shell/explainable.js
@@ -4,7 +4,6 @@
//
var Explainable = (function() {
-
var parseVerbosity = function(verbosity) {
// Truthy non-strings are interpreted as "allPlansExecution" verbosity.
if (verbosity && (typeof verbosity !== "string")) {
@@ -19,8 +18,10 @@ var Explainable = (function() {
// If we're here, then the verbosity is a string. We reject invalid strings.
if (verbosity !== "queryPlanner" && verbosity !== "executionStats" &&
verbosity !== "allPlansExecution") {
- throw Error("explain verbosity must be one of {" + "'queryPlanner'," +
- "'executionStats'," + "'allPlansExecution'}");
+ throw Error("explain verbosity must be one of {" +
+ "'queryPlanner'," +
+ "'executionStats'," +
+ "'allPlansExecution'}");
}
return verbosity;
diff --git a/src/mongo/shell/kms_aws.cpp b/src/mongo/shell/kms_aws.cpp
index a0c3ecffe06..b923a59355c 100644
--- a/src/mongo/shell/kms_aws.cpp
+++ b/src/mongo/shell/kms_aws.cpp
@@ -449,7 +449,7 @@ public:
}
};
-} // namspace
+} // namespace
MONGO_INITIALIZER(KMSRegister)(::mongo::InitializerContext* context) {
kms_message_init();
diff --git a/src/mongo/shell/kms_local.cpp b/src/mongo/shell/kms_local.cpp
index 628ea9ed9c2..32d5f760383 100644
--- a/src/mongo/shell/kms_local.cpp
+++ b/src/mongo/shell/kms_local.cpp
@@ -143,7 +143,7 @@ public:
}
};
-} // namspace
+} // namespace
MONGO_INITIALIZER(LocalKMSRegister)(::mongo::InitializerContext* context) {
KMSServiceController::registerFactory(KMSProviderEnum::local,
diff --git a/src/mongo/shell/linenoise.cpp b/src/mongo/shell/linenoise.cpp
index bc3373def85..58bd4ea666e 100644
--- a/src/mongo/shell/linenoise.cpp
+++ b/src/mongo/shell/linenoise.cpp
@@ -126,16 +126,16 @@ using std::vector;
using std::unique_ptr;
-using linenoise_utf8::UChar8;
-using linenoise_utf8::UChar32;
-using linenoise_utf8::copyString8to32;
using linenoise_utf8::copyString32;
using linenoise_utf8::copyString32to8;
+using linenoise_utf8::copyString8to32;
using linenoise_utf8::strlen32;
using linenoise_utf8::strncmp32;
-using linenoise_utf8::write32;
-using linenoise_utf8::Utf8String;
+using linenoise_utf8::UChar32;
+using linenoise_utf8::UChar8;
using linenoise_utf8::Utf32String;
+using linenoise_utf8::Utf8String;
+using linenoise_utf8::write32;
struct linenoiseCompletions {
vector<Utf32String> completionStrings;
@@ -1234,7 +1234,7 @@ static UChar32 setMetaRoutine(UChar32 c) {
return doDispatch(c, initialDispatch);
}
-} // namespace EscapeSequenceProcessing // move these out of global namespace
+} // namespace EscapeSequenceProcessing
#endif // #ifndef _WIN32
diff --git a/src/mongo/shell/linenoise_utf8.h b/src/mongo/shell/linenoise_utf8.h
index d5d4c6db7d9..dca7a8b0ef4 100644
--- a/src/mongo/shell/linenoise_utf8.h
+++ b/src/mongo/shell/linenoise_utf8.h
@@ -141,10 +141,7 @@ struct UtfStringMixin {
UtfStringMixin() : _len(0), _cap(0), _chars(0) {}
UtfStringMixin(const UtfStringMixin& other) // copies like std::string
- : _len(other._len),
- _cap(other._len + 1),
- _chars(other._chars),
- _str(new char_t[_cap]) {
+ : _len(other._len), _cap(other._len + 1), _chars(other._chars), _str(new char_t[_cap]) {
memcpy(_str.get(), other._str.get(), _cap * sizeof(char_t));
}
diff --git a/src/mongo/shell/mk_wcwidth.cpp b/src/mongo/shell/mk_wcwidth.cpp
index cb4674344f5..1a09cc2e874 100644
--- a/src/mongo/shell/mk_wcwidth.cpp
+++ b/src/mongo/shell/mk_wcwidth.cpp
@@ -177,15 +177,15 @@ int mk_wcwidth(int ucs) {
return 1 +
(ucs >= 0x1100 &&
(ucs <= 0x115f || /* Hangul Jamo init. consonants */
- ucs == 0x2329 ||
- ucs == 0x232a || (ucs >= 0x2e80 && ucs <= 0xa4cf && ucs != 0x303f) || /* CJK ... Yi */
- (ucs >= 0xac00 && ucs <= 0xd7a3) || /* Hangul Syllables */
- (ucs >= 0xf900 && ucs <= 0xfaff) || /* CJK Compatibility Ideographs */
- (ucs >= 0xfe10 && ucs <= 0xfe19) || /* Vertical forms */
- (ucs >= 0xfe30 && ucs <= 0xfe6f) || /* CJK Compatibility Forms */
- (ucs >= 0xff00 && ucs <= 0xff60) || /* Fullwidth Forms */
- (ucs >= 0xffe0 && ucs <= 0xffe6) ||
- (ucs >= 0x20000 && ucs <= 0x2fffd) || (ucs >= 0x30000 && ucs <= 0x3fffd)));
+ ucs == 0x2329 || ucs == 0x232a ||
+ (ucs >= 0x2e80 && ucs <= 0xa4cf && ucs != 0x303f) || /* CJK ... Yi */
+ (ucs >= 0xac00 && ucs <= 0xd7a3) || /* Hangul Syllables */
+ (ucs >= 0xf900 && ucs <= 0xfaff) || /* CJK Compatibility Ideographs */
+ (ucs >= 0xfe10 && ucs <= 0xfe19) || /* Vertical forms */
+ (ucs >= 0xfe30 && ucs <= 0xfe6f) || /* CJK Compatibility Forms */
+ (ucs >= 0xff00 && ucs <= 0xff60) || /* Fullwidth Forms */
+ (ucs >= 0xffe0 && ucs <= 0xffe6) || (ucs >= 0x20000 && ucs <= 0x2fffd) ||
+ (ucs >= 0x30000 && ucs <= 0x3fffd)));
}
diff --git a/src/mongo/shell/mongo.js b/src/mongo/shell/mongo.js
index 39dbd402f7d..481aff7c6ad 100644
--- a/src/mongo/shell/mongo.js
+++ b/src/mongo/shell/mongo.js
@@ -45,7 +45,7 @@ Mongo.prototype.getDB = function(name) {
// There is a weird issue where typeof(db._name) !== "string" when the db name
// is created from objects returned from native C++ methods.
// This hack ensures that the db._name is always a string.
- if (typeof(name) === "object") {
+ if (typeof (name) === "object") {
name = name.toString();
}
return new DB(this, name);
@@ -84,7 +84,6 @@ Mongo.prototype.getDBs = function(driverSession = this._getDefaultSession(),
filter = undefined,
nameOnly = undefined,
authorizedDatabases = undefined) {
-
return function(driverSession, filter, nameOnly, authorizedDatabases) {
'use strict';
@@ -227,7 +226,7 @@ Mongo.prototype.tojson = Mongo.prototype.toString;
* Note that this object only keeps a shallow copy of this array.
*/
Mongo.prototype.setReadPref = function(mode, tagSet) {
- if ((this._readPrefMode === "primary") && (typeof(tagSet) !== "undefined") &&
+ if ((this._readPrefMode === "primary") && (typeof (tagSet) !== "undefined") &&
(Object.keys(tagSet).length > 0)) {
// we allow empty arrays/objects or no tagSet for compatibility reasons
throw Error("Can not supply tagSet with readPref mode primary");
@@ -252,7 +251,7 @@ Mongo.prototype.getReadPrefTagSet = function() {
// Returns a readPreference object of the type expected by mongos.
Mongo.prototype.getReadPref = function() {
var obj = {}, mode, tagSet;
- if (typeof(mode = this.getReadPrefMode()) === "string") {
+ if (typeof (mode = this.getReadPrefMode()) === "string") {
obj.mode = mode;
} else {
return null;
@@ -381,7 +380,8 @@ connect = function(url, user, pass) {
return db;
};
-/** deprecated, use writeMode below
+/**
+ * deprecated, use writeMode below
*
*/
Mongo.prototype.useWriteCommands = function() {
@@ -410,7 +410,6 @@ Mongo.prototype.hasExplainCommand = function() {
*/
Mongo.prototype.writeMode = function() {
-
if ('_writeMode' in this) {
return this._writeMode;
}
@@ -539,8 +538,8 @@ Mongo.prototype.startSession = function startSession(options = {}) {
// Only log this message if we are running a test
if (typeof TestData === "object" && TestData.testName) {
jsTest.log("New session started with sessionID: " +
- tojsononeline(newDriverSession.getSessionId()) + " and options: " +
- tojsononeline(options));
+ tojsononeline(newDriverSession.getSessionId()) +
+ " and options: " + tojsononeline(options));
}
return newDriverSession;
@@ -560,7 +559,7 @@ Mongo.prototype._getDefaultSession = function getDefaultSession() {
this._setDummyDefaultSession();
} else {
print("ERROR: Implicit session failed: " + e.message);
- throw(e);
+ throw (e);
}
}
} else {
diff --git a/src/mongo/shell/query.js b/src/mongo/shell/query.js
index 4304903ca36..c067e9d3a4b 100644
--- a/src/mongo/shell/query.js
+++ b/src/mongo/shell/query.js
@@ -2,7 +2,6 @@
if (typeof DBQuery == "undefined") {
DBQuery = function(mongo, db, collection, ns, query, fields, limit, skip, batchSize, options) {
-
this._mongo = mongo; // 0
this._db = db; // 1
this._collection = collection; // 2
@@ -394,8 +393,8 @@ DBQuery.prototype.countReturn = function() {
};
/**
-* iterative count - only for testing
-*/
+ * iterative count - only for testing
+ */
DBQuery.prototype.itcount = function() {
var num = 0;
@@ -546,7 +545,6 @@ DBQuery.prototype.shellPrint = function() {
} catch (e) {
print(e);
}
-
};
DBQuery.prototype.toString = function() {
@@ -558,12 +556,12 @@ DBQuery.prototype.toString = function() {
//
/**
-* Get partial results from a mongos if some shards are down (instead of throwing an error).
-*
-* @method
-* @see http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#op-query
-* @return {DBQuery}
-*/
+ * Get partial results from a mongos if some shards are down (instead of throwing an error).
+ *
+ * @method
+ * @see http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#op-query
+ * @return {DBQuery}
+ */
DBQuery.prototype.allowPartialResults = function() {
this._checkModify();
this.addOption(DBQuery.Option.partial);
@@ -571,13 +569,13 @@ DBQuery.prototype.allowPartialResults = function() {
};
/**
-* The server normally times out idle cursors after an inactivity period (10 minutes)
-* to prevent excess memory use. Set this option to prevent that.
-*
-* @method
-* @see http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#op-query
-* @return {DBQuery}
-*/
+ * The server normally times out idle cursors after an inactivity period (10 minutes)
+ * to prevent excess memory use. Set this option to prevent that.
+ *
+ * @method
+ * @see http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#op-query
+ * @return {DBQuery}
+ */
DBQuery.prototype.noCursorTimeout = function() {
this._checkModify();
this.addOption(DBQuery.Option.noTimeout);
@@ -585,12 +583,12 @@ DBQuery.prototype.noCursorTimeout = function() {
};
/**
-* Internal replication use only - driver should not set
-*
-* @method
-* @see http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#op-query
-* @return {DBQuery}
-*/
+ * Internal replication use only - driver should not set
+ *
+ * @method
+ * @see http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#op-query
+ * @return {DBQuery}
+ */
DBQuery.prototype.oplogReplay = function() {
this._checkModify();
this.addOption(DBQuery.Option.oplogReplay);
@@ -598,13 +596,13 @@ DBQuery.prototype.oplogReplay = function() {
};
/**
-* Limits the fields to return for all matching documents.
-*
-* @method
-* @see http://docs.mongodb.org/manual/tutorial/project-fields-from-query-results/
-* @param {object} document Document specifying the projection of the resulting documents.
-* @return {DBQuery}
-*/
+ * Limits the fields to return for all matching documents.
+ *
+ * @method
+ * @see http://docs.mongodb.org/manual/tutorial/project-fields-from-query-results/
+ * @param {object} document Document specifying the projection of the resulting documents.
+ * @return {DBQuery}
+ */
DBQuery.prototype.projection = function(document) {
this._checkModify();
this._fields = document;
@@ -612,14 +610,14 @@ DBQuery.prototype.projection = function(document) {
};
/**
-* Specify cursor as a tailable cursor, allowing to specify if it will use awaitData
-*
-* @method
-* @see http://docs.mongodb.org/manual/tutorial/create-tailable-cursor/
-* @param {boolean} [awaitData=true] cursor blocks for a few seconds to wait for data if no documents
-*found.
-* @return {DBQuery}
-*/
+ * Specify cursor as a tailable cursor, allowing to specify if it will use awaitData
+ *
+ * @method
+ * @see http://docs.mongodb.org/manual/tutorial/create-tailable-cursor/
+ * @param {boolean} [awaitData=true] cursor blocks for a few seconds to wait for data if no
+ *documents found.
+ * @return {DBQuery}
+ */
DBQuery.prototype.tailable = function(awaitData) {
this._checkModify();
this.addOption(DBQuery.Option.tailable);
@@ -633,13 +631,13 @@ DBQuery.prototype.tailable = function(awaitData) {
};
/**
-* Specify a document containing modifiers for the query.
-*
-* @method
-* @see http://docs.mongodb.org/manual/reference/operator/query-modifier/
-* @param {object} document A document containing modifers to apply to the cursor.
-* @return {DBQuery}
-*/
+ * Specify a document containing modifiers for the query.
+ *
+ * @method
+ * @see http://docs.mongodb.org/manual/reference/operator/query-modifier/
+ * @param {object} document A document containing modifers to apply to the cursor.
+ * @return {DBQuery}
+ */
DBQuery.prototype.modifiers = function(document) {
this._checkModify();
@@ -804,16 +802,16 @@ DBCommandCursor.prototype._runGetMoreCommand = function() {
assert.commandWorked(cmdRes, () => "getMore command failed: " + tojson(cmdRes));
if (this._ns !== cmdRes.cursor.ns) {
- throw Error("unexpected collection in getMore response: " + this._ns + " != " +
- cmdRes.cursor.ns);
+ throw Error("unexpected collection in getMore response: " + this._ns +
+ " != " + cmdRes.cursor.ns);
}
if (!cmdRes.cursor.id.compare(NumberLong("0"))) {
this._cursorHandle.zeroCursorId();
this._cursorid = NumberLong("0");
} else if (this._cursorid.compare(cmdRes.cursor.id)) {
- throw Error("unexpected cursor id: " + this._cursorid.toString() + " != " +
- cmdRes.cursor.id.toString());
+ throw Error("unexpected cursor id: " + this._cursorid.toString() +
+ " != " + cmdRes.cursor.id.toString());
}
// If the command result represents a change stream cursor, update our postBatchResumeToken.
diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js
index 1b177f5d773..0ea928edf30 100644
--- a/src/mongo/shell/replsettest.js
+++ b/src/mongo/shell/replsettest.js
@@ -282,13 +282,14 @@ var ReplSetTest = function(opts) {
if (status.members[i].name == node.host || status.members[i].name == node.name) {
for (var j = 0; j < states.length; j++) {
if (printStatus) {
- print("Status -- " + " current state: " + status.members[i][ind] +
+ print("Status -- " +
+ " current state: " + status.members[i][ind] +
", target state : " + states[j]);
}
- if (typeof(states[j]) != "number") {
- throw new Error("State was not an number -- type:" + typeof(states[j]) +
- ", value:" + states[j]);
+ if (typeof (states[j]) != "number") {
+ throw new Error("State was not an number -- type:" +
+ typeof (states[j]) + ", value:" + states[j]);
}
if (status.members[i][ind] == states[j]) {
foundState = states[j];
@@ -299,7 +300,6 @@ var ReplSetTest = function(opts) {
}
return false;
-
}, "waiting for state indicator " + ind + " for " + timeout + "ms", timeout);
// If we were waiting for the node to step down, wait until we can connect to it again,
@@ -894,7 +894,6 @@ var ReplSetTest = function(opts) {
* and returns the 'config' object unchanged. Does not affect 'config' when running CSRS.
*/
this._updateConfigIfNotDurable = function(config) {
-
// Get a replica set node (check for use of bridge).
var replNode = _useBridge ? _unbridgedNodes[0] : this.nodes[0];
@@ -936,9 +935,9 @@ var ReplSetTest = function(opts) {
const result = assert.commandWorkedOrFailedWithCode(
master.runCommand(cmd),
[
- ErrorCodes.NodeNotFound,
- ErrorCodes.NewReplicaSetConfigurationIncompatible,
- ErrorCodes.InterruptedDueToReplStateChange
+ ErrorCodes.NodeNotFound,
+ ErrorCodes.NewReplicaSetConfigurationIncompatible,
+ ErrorCodes.InterruptedDueToReplStateChange
],
errorMsg);
return result.ok;
@@ -1032,7 +1031,7 @@ var ReplSetTest = function(opts) {
} else {
Object.keys(self.nodeOptions).forEach(function(key, index) {
let val = self.nodeOptions[key];
- if (typeof(val) === "object" &&
+ if (typeof (val) === "object" &&
(val.hasOwnProperty("shardsvr") ||
val.hasOwnProperty("binVersion") &&
// Should not wait for keys if version is less than 3.6
@@ -1043,7 +1042,7 @@ var ReplSetTest = function(opts) {
});
if (self.startOptions != undefined) {
let val = self.startOptions;
- if (typeof(val) === "object" &&
+ if (typeof (val) === "object" &&
(val.hasOwnProperty("shardsvr") ||
val.hasOwnProperty("binVersion") &&
// Should not wait for keys if version is less than 3.6
@@ -1574,7 +1573,8 @@ var ReplSetTest = function(opts) {
this.getHashesUsingSessions = function(sessions, dbName, {
filterCapped: filterCapped = true,
- filterMapReduce: filterMapReduce = true, readAtClusterTime,
+ filterMapReduce: filterMapReduce = true,
+ readAtClusterTime,
} = {}) {
return sessions.map(session => {
const commandObj = {dbHash: 1};
@@ -1906,7 +1906,8 @@ var ReplSetTest = function(opts) {
primarySession, secondarySession, dbName, collName);
for (let {
- primary: primaryDoc, secondary: secondaryDoc,
+ primary: primaryDoc,
+ secondary: secondaryDoc,
} of diff.docsWithDifferentContents) {
print(`Mismatching documents between the primary ${primary.host}` +
` and the secondary ${secondary.host}:`);
@@ -2004,7 +2005,6 @@ var ReplSetTest = function(opts) {
dumpCollectionDiff(primary, secondary, dbName, collName);
success = false;
}
-
});
// Check that collection information is consistent on the primary and
@@ -2373,7 +2373,7 @@ var ReplSetTest = function(opts) {
// Turn off periodic noop writes for replica sets by default.
options.setParameter = options.setParameter || {};
- if (typeof(options.setParameter) === "string") {
+ if (typeof (options.setParameter) === "string") {
var eqIdx = options.setParameter.indexOf("=");
if (eqIdx != -1) {
var param = options.setParameter.substring(0, eqIdx);
diff --git a/src/mongo/shell/servers.js b/src/mongo/shell/servers.js
index f460b5c0d5e..c72e444c1a7 100644
--- a/src/mongo/shell/servers.js
+++ b/src/mongo/shell/servers.js
@@ -2,1112 +2,1192 @@ var MongoRunner, _startMongod, startMongoProgram, runMongoProgram, startMongoPro
myPort;
(function() {
- "use strict";
+"use strict";
- var shellVersion = version;
+var shellVersion = version;
- // Record the exit codes of mongod and mongos processes that crashed during startup keyed by
- // port. This map is cleared when MongoRunner._startWithArgs and MongoRunner.stopMongod/s are
- // called.
- var serverExitCodeMap = {};
+// Record the exit codes of mongod and mongos processes that crashed during startup keyed by
+// port. This map is cleared when MongoRunner._startWithArgs and MongoRunner.stopMongod/s are
+// called.
+var serverExitCodeMap = {};
- var _parsePath = function() {
- var dbpath = "";
- for (var i = 0; i < arguments.length; ++i)
- if (arguments[i] == "--dbpath")
- dbpath = arguments[i + 1];
+var _parsePath = function() {
+ var dbpath = "";
+ for (var i = 0; i < arguments.length; ++i)
+ if (arguments[i] == "--dbpath")
+ dbpath = arguments[i + 1];
- if (dbpath == "")
- throw Error("No dbpath specified");
+ if (dbpath == "")
+ throw Error("No dbpath specified");
- return dbpath;
- };
+ return dbpath;
+};
- var _parsePort = function() {
- var port = "";
- for (var i = 0; i < arguments.length; ++i)
- if (arguments[i] == "--port")
- port = arguments[i + 1];
+var _parsePort = function() {
+ var port = "";
+ for (var i = 0; i < arguments.length; ++i)
+ if (arguments[i] == "--port")
+ port = arguments[i + 1];
- if (port == "")
- throw Error("No port specified");
- return port;
- };
+ if (port == "")
+ throw Error("No port specified");
+ return port;
+};
- var createMongoArgs = function(binaryName, args) {
- if (!Array.isArray(args)) {
- throw new Error("The second argument to createMongoArgs must be an array");
- }
+var createMongoArgs = function(binaryName, args) {
+ if (!Array.isArray(args)) {
+ throw new Error("The second argument to createMongoArgs must be an array");
+ }
- var fullArgs = [binaryName];
-
- if (args.length == 1 && isObject(args[0])) {
- var o = args[0];
- for (var k in o) {
- if (o.hasOwnProperty(k)) {
- if (k == "v" && isNumber(o[k])) {
- var n = o[k];
- if (n > 0) {
- if (n > 10)
- n = 10;
- var temp = "-";
- while (n-- > 0)
- temp += "v";
- fullArgs.push(temp);
- }
- } else {
- fullArgs.push("--" + k);
- if (o[k] != "")
- fullArgs.push("" + o[k]);
+ var fullArgs = [binaryName];
+
+ if (args.length == 1 && isObject(args[0])) {
+ var o = args[0];
+ for (var k in o) {
+ if (o.hasOwnProperty(k)) {
+ if (k == "v" && isNumber(o[k])) {
+ var n = o[k];
+ if (n > 0) {
+ if (n > 10)
+ n = 10;
+ var temp = "-";
+ while (n-- > 0)
+ temp += "v";
+ fullArgs.push(temp);
}
+ } else {
+ fullArgs.push("--" + k);
+ if (o[k] != "")
+ fullArgs.push("" + o[k]);
}
}
- } else {
- for (var i = 0; i < args.length; i++)
- fullArgs.push(args[i]);
}
+ } else {
+ for (var i = 0; i < args.length; i++)
+ fullArgs.push(args[i]);
+ }
- return fullArgs;
- };
-
- MongoRunner = function() {};
-
- MongoRunner.dataDir = "/data/db";
- MongoRunner.dataPath = "/data/db/";
-
- MongoRunner.mongodPath = "mongod";
- MongoRunner.mongosPath = "mongos";
- MongoRunner.mongoShellPath = "mongo";
-
- MongoRunner.VersionSub = function(pattern, version) {
- this.pattern = pattern;
- this.version = version;
- };
-
- /**
- * Returns an array of version elements from a version string.
- *
- * "3.3.4-fade3783" -> ["3", "3", "4-fade3783" ]
- * "3.2" -> [ "3", "2" ]
- * 3 -> exception: versions must have at least two components.
- */
- var convertVersionStringToArray = function(versionString) {
- assert("" !== versionString, "Version strings must not be empty");
- var versionArray = versionString.split('.');
-
- assert.gt(versionArray.length,
- 1,
- "MongoDB versions must have at least two components to compare, but \"" +
- versionString + "\" has " + versionArray.length);
- return versionArray;
- };
-
- /**
- * Returns the major version string from a version string.
- *
- * 3.3.4-fade3783 -> 3.3
- * 3.2 -> 3.2
- * 3 -> exception: versions must have at least two components.
- */
- var extractMajorVersionFromVersionString = function(versionString) {
- return convertVersionStringToArray(versionString).slice(0, 2).join('.');
- };
-
- // These patterns allow substituting the binary versions used for each version string to support
- // the
- // dev/stable MongoDB release cycle.
- //
- // If you add a new version substitution to this list, you should add it to the lists of
- // versions being checked in 'verify_versions_test.js' to verify it is susbstituted correctly.
- MongoRunner.binVersionSubs = [
- new MongoRunner.VersionSub("latest", shellVersion()),
- new MongoRunner.VersionSub(extractMajorVersionFromVersionString(shellVersion()),
- shellVersion()),
- // To-be-updated when we branch for the next release.
- new MongoRunner.VersionSub("last-stable", "4.0")
- ];
-
- MongoRunner.getBinVersionFor = function(version) {
- if (version instanceof MongoRunner.versionIterator.iterator) {
- version = version.current();
- }
-
- if (version == null)
- version = "";
- version = version.trim();
- if (version === "")
- version = "latest";
-
- // See if this version is affected by version substitutions
- for (var i = 0; i < MongoRunner.binVersionSubs.length; i++) {
- var sub = MongoRunner.binVersionSubs[i];
- if (sub.pattern == version) {
- return sub.version;
- }
- }
-
- return version;
- };
-
- /**
- * Returns true if two version strings could represent the same version. This is true
- * if, after passing the versions through getBinVersionFor, the versions have the
- * same value for each version component up through the length of the shorter version.
- *
- * That is, 3.2.4 compares equal to 3.2, but 3.2.4 does not compare equal to 3.2.3.
- */
- MongoRunner.areBinVersionsTheSame = function(versionA, versionB) {
+ return fullArgs;
+};
+
+MongoRunner = function() {};
+
+MongoRunner.dataDir = "/data/db";
+MongoRunner.dataPath = "/data/db/";
+
+MongoRunner.mongodPath = "mongod";
+MongoRunner.mongosPath = "mongos";
+MongoRunner.mongoShellPath = "mongo";
+
+MongoRunner.VersionSub = function(pattern, version) {
+ this.pattern = pattern;
+ this.version = version;
+};
+
+/**
+ * Returns an array of version elements from a version string.
+ *
+ * "3.3.4-fade3783" -> ["3", "3", "4-fade3783" ]
+ * "3.2" -> [ "3", "2" ]
+ * 3 -> exception: versions must have at least two components.
+ */
+var convertVersionStringToArray = function(versionString) {
+ assert("" !== versionString, "Version strings must not be empty");
+ var versionArray = versionString.split('.');
+
+ assert.gt(versionArray.length,
+ 1,
+ "MongoDB versions must have at least two components to compare, but \"" +
+ versionString + "\" has " + versionArray.length);
+ return versionArray;
+};
+
+/**
+ * Returns the major version string from a version string.
+ *
+ * 3.3.4-fade3783 -> 3.3
+ * 3.2 -> 3.2
+ * 3 -> exception: versions must have at least two components.
+ */
+var extractMajorVersionFromVersionString = function(versionString) {
+ return convertVersionStringToArray(versionString).slice(0, 2).join('.');
+};
+
+// These patterns allow substituting the binary versions used for each version string to support
+// the
+// dev/stable MongoDB release cycle.
+//
+// If you add a new version substitution to this list, you should add it to the lists of
+// versions being checked in 'verify_versions_test.js' to verify it is susbstituted correctly.
+MongoRunner.binVersionSubs = [
+ new MongoRunner.VersionSub("latest", shellVersion()),
+ new MongoRunner.VersionSub(extractMajorVersionFromVersionString(shellVersion()),
+ shellVersion()),
+ // To-be-updated when we branch for the next release.
+ new MongoRunner.VersionSub("last-stable", "4.0")
+];
+
+MongoRunner.getBinVersionFor = function(version) {
+ if (version instanceof MongoRunner.versionIterator.iterator) {
+ version = version.current();
+ }
- // Check for invalid version strings first.
- convertVersionStringToArray(MongoRunner.getBinVersionFor(versionA));
- convertVersionStringToArray(MongoRunner.getBinVersionFor(versionB));
+ if (version == null)
+ version = "";
+ version = version.trim();
+ if (version === "")
+ version = "latest";
- try {
- return (0 === MongoRunner.compareBinVersions(versionA, versionB));
- } catch (err) {
- // compareBinVersions() throws an error if two versions differ only by the git hash.
- return false;
+ // See if this version is affected by version substitutions
+ for (var i = 0; i < MongoRunner.binVersionSubs.length; i++) {
+ var sub = MongoRunner.binVersionSubs[i];
+ if (sub.pattern == version) {
+ return sub.version;
}
- };
-
- /**
- * Compares two version strings and returns:
- * 1, if the first is more recent
- * 0, if they are equal
- * -1, if the first is older
- *
- * Note that this function only compares up to the length of the shorter version.
- * Because of this, minor versions will compare equal to the major versions they stem
- * from, but major-major and minor-minor version pairs will undergo strict comparison.
- */
- MongoRunner.compareBinVersions = function(versionA, versionB) {
-
- let stringA = versionA;
- let stringB = versionB;
-
- versionA = convertVersionStringToArray(MongoRunner.getBinVersionFor(versionA));
- versionB = convertVersionStringToArray(MongoRunner.getBinVersionFor(versionB));
-
- // Treat the githash as a separate element, if it's present.
- versionA.push(...versionA.pop().split("-"));
- versionB.push(...versionB.pop().split("-"));
+ }
- var elementsToCompare = Math.min(versionA.length, versionB.length);
+ return version;
+};
+
+/**
+ * Returns true if two version strings could represent the same version. This is true
+ * if, after passing the versions through getBinVersionFor, the versions have the
+ * same value for each version component up through the length of the shorter version.
+ *
+ * That is, 3.2.4 compares equal to 3.2, but 3.2.4 does not compare equal to 3.2.3.
+ */
+MongoRunner.areBinVersionsTheSame = function(versionA, versionB) {
+ // Check for invalid version strings first.
+ convertVersionStringToArray(MongoRunner.getBinVersionFor(versionA));
+ convertVersionStringToArray(MongoRunner.getBinVersionFor(versionB));
+
+ try {
+ return (0 === MongoRunner.compareBinVersions(versionA, versionB));
+ } catch (err) {
+ // compareBinVersions() throws an error if two versions differ only by the git hash.
+ return false;
+ }
+};
- for (var i = 0; i < elementsToCompare; ++i) {
- var elementA = versionA[i];
- var elementB = versionB[i];
+/**
+ * Compares two version strings and returns:
+ * 1, if the first is more recent
+ * 0, if they are equal
+ * -1, if the first is older
+ *
+ * Note that this function only compares up to the length of the shorter version.
+ * Because of this, minor versions will compare equal to the major versions they stem
+ * from, but major-major and minor-minor version pairs will undergo strict comparison.
+ */
+MongoRunner.compareBinVersions = function(versionA, versionB) {
+ let stringA = versionA;
+ let stringB = versionB;
- if (elementA === elementB) {
- continue;
- }
+ versionA = convertVersionStringToArray(MongoRunner.getBinVersionFor(versionA));
+ versionB = convertVersionStringToArray(MongoRunner.getBinVersionFor(versionB));
- var numA = parseInt(elementA);
- var numB = parseInt(elementB);
+ // Treat the githash as a separate element, if it's present.
+ versionA.push(...versionA.pop().split("-"));
+ versionB.push(...versionB.pop().split("-"));
- assert(!isNaN(numA) && !isNaN(numB), "Cannot compare non-equal non-numeric versions.");
+ var elementsToCompare = Math.min(versionA.length, versionB.length);
- if (numA > numB) {
- return 1;
- } else if (numA < numB) {
- return -1;
- }
+ for (var i = 0; i < elementsToCompare; ++i) {
+ var elementA = versionA[i];
+ var elementB = versionB[i];
- assert(false, `Unreachable case. Provided versions: {${stringA}, ${stringB}}`);
+ if (elementA === elementB) {
+ continue;
}
- return 0;
- };
-
- MongoRunner.logicalOptions = {
- runId: true,
- env: true,
- pathOpts: true,
- remember: true,
- noRemember: true,
- appendOptions: true,
- restart: true,
- noCleanData: true,
- cleanData: true,
- startClean: true,
- forceLock: true,
- useLogFiles: true,
- logFile: true,
- useHostName: true,
- useHostname: true,
- noReplSet: true,
- forgetPort: true,
- arbiter: true,
- noJournal: true,
- binVersion: true,
- waitForConnect: true,
- bridgeOptions: true,
- skipValidation: true,
- };
+ var numA = parseInt(elementA);
+ var numB = parseInt(elementB);
- MongoRunner.toRealPath = function(path, pathOpts) {
+ assert(!isNaN(numA) && !isNaN(numB), "Cannot compare non-equal non-numeric versions.");
- // Replace all $pathOptions with actual values
- pathOpts = pathOpts || {};
- path = path.replace(/\$dataPath/g, MongoRunner.dataPath);
- path = path.replace(/\$dataDir/g, MongoRunner.dataDir);
- for (var key in pathOpts) {
- path = path.replace(RegExp("\\$" + RegExp.escape(key), "g"), pathOpts[key]);
+ if (numA > numB) {
+ return 1;
+ } else if (numA < numB) {
+ return -1;
}
- // Relative path
- // Detect Unix and Windows absolute paths
- // as well as Windows drive letters
- // Also captures Windows UNC paths
+ assert(false, `Unreachable case. Provided versions: {${stringA}, ${stringB}}`);
+ }
- if (!path.match(/^(\/|\\|[A-Za-z]:)/)) {
- if (path != "" && !path.endsWith("/"))
- path += "/";
+ return 0;
+};
+
+MongoRunner.logicalOptions = {
+ runId: true,
+ env: true,
+ pathOpts: true,
+ remember: true,
+ noRemember: true,
+ appendOptions: true,
+ restart: true,
+ noCleanData: true,
+ cleanData: true,
+ startClean: true,
+ forceLock: true,
+ useLogFiles: true,
+ logFile: true,
+ useHostName: true,
+ useHostname: true,
+ noReplSet: true,
+ forgetPort: true,
+ arbiter: true,
+ noJournal: true,
+ binVersion: true,
+ waitForConnect: true,
+ bridgeOptions: true,
+ skipValidation: true,
+};
+
+MongoRunner.toRealPath = function(path, pathOpts) {
+ // Replace all $pathOptions with actual values
+ pathOpts = pathOpts || {};
+ path = path.replace(/\$dataPath/g, MongoRunner.dataPath);
+ path = path.replace(/\$dataDir/g, MongoRunner.dataDir);
+ for (var key in pathOpts) {
+ path = path.replace(RegExp("\\$" + RegExp.escape(key), "g"), pathOpts[key]);
+ }
- path = MongoRunner.dataPath + path;
- }
+ // Relative path
+ // Detect Unix and Windows absolute paths
+ // as well as Windows drive letters
+ // Also captures Windows UNC paths
- return path;
+ if (!path.match(/^(\/|\\|[A-Za-z]:)/)) {
+ if (path != "" && !path.endsWith("/"))
+ path += "/";
- };
+ path = MongoRunner.dataPath + path;
+ }
- MongoRunner.toRealDir = function(path, pathOpts) {
+ return path;
+};
- path = MongoRunner.toRealPath(path, pathOpts);
+MongoRunner.toRealDir = function(path, pathOpts) {
+ path = MongoRunner.toRealPath(path, pathOpts);
- if (path.endsWith("/"))
- path = path.substring(0, path.length - 1);
+ if (path.endsWith("/"))
+ path = path.substring(0, path.length - 1);
- return path;
- };
+ return path;
+};
- MongoRunner.toRealFile = MongoRunner.toRealDir;
+MongoRunner.toRealFile = MongoRunner.toRealDir;
- /**
- * Returns an iterator object which yields successive versions on calls to advance(), starting
- * from a random initial position, from an array of versions.
- *
- * If passed a single version string or an already-existing version iterator, just returns the
- * object itself, since it will yield correctly on calls to advance().
- *
- * @param {Array.<String>}|{String}|{versionIterator}
- */
- MongoRunner.versionIterator = function(arr, isRandom) {
+/**
+ * Returns an iterator object which yields successive versions on calls to advance(), starting
+ * from a random initial position, from an array of versions.
+ *
+ * If passed a single version string or an already-existing version iterator, just returns the
+ * object itself, since it will yield correctly on calls to advance().
+ *
+ * @param {Array.<String>}|{String}|{versionIterator}
+ */
+MongoRunner.versionIterator = function(arr, isRandom) {
+ // If this isn't an array of versions, or is already an iterator, just use it
+ if (typeof arr == "string")
+ return arr;
+ if (arr.isVersionIterator)
+ return arr;
- // If this isn't an array of versions, or is already an iterator, just use it
- if (typeof arr == "string")
- return arr;
- if (arr.isVersionIterator)
- return arr;
+ if (isRandom == undefined)
+ isRandom = false;
- if (isRandom == undefined)
- isRandom = false;
+ // Starting pos
+ var i = isRandom ? parseInt(Random.rand() * arr.length) : 0;
- // Starting pos
- var i = isRandom ? parseInt(Random.rand() * arr.length) : 0;
+ return new MongoRunner.versionIterator.iterator(i, arr);
+};
- return new MongoRunner.versionIterator.iterator(i, arr);
+MongoRunner.versionIterator.iterator = function(i, arr) {
+ if (!Array.isArray(arr)) {
+ throw new Error("Expected an array for the second argument, but got: " + tojson(arr));
+ }
+
+ this.current = function current() {
+ return arr[i];
};
- MongoRunner.versionIterator.iterator = function(i, arr) {
- if (!Array.isArray(arr)) {
- throw new Error("Expected an array for the second argument, but got: " + tojson(arr));
- }
+ // We define the toString() method as an alias for current() so that concatenating a version
+ // iterator with a string returns the next version in the list without introducing any
+ // side-effects.
+ this.toString = this.current;
- this.current = function current() {
- return arr[i];
- };
+ this.advance = function advance() {
+ i = (i + 1) % arr.length;
+ };
- // We define the toString() method as an alias for current() so that concatenating a version
- // iterator with a string returns the next version in the list without introducing any
- // side-effects.
- this.toString = this.current;
+ this.isVersionIterator = true;
+};
+
+/**
+ * Converts the args object by pairing all keys with their value and appending
+ * dash-dash (--) to the keys. The only exception to this rule are keys that
+ * are defined in MongoRunner.logicalOptions, of which they will be ignored.
+ *
+ * @param {string} binaryName
+ * @param {Object} args
+ *
+ * @return {Array.<String>} an array of parameter strings that can be passed
+ * to the binary.
+ */
+MongoRunner.arrOptions = function(binaryName, args) {
+ var fullArgs = [""];
+
+ // isObject returns true even if "args" is an array, so the else branch of this statement is
+ // dead code. See SERVER-14220.
+ if (isObject(args) || (args.length == 1 && isObject(args[0]))) {
+ var o = isObject(args) ? args : args[0];
+
+ // If we've specified a particular binary version, use that
+ if (o.binVersion && o.binVersion != "" && o.binVersion != shellVersion()) {
+ binaryName += "-" + o.binVersion;
+ }
+
+ // Manage legacy options
+ var isValidOptionForBinary = function(option, value) {
+ if (!o.binVersion)
+ return true;
- this.advance = function advance() {
- i = (i + 1) % arr.length;
+ return true;
};
- this.isVersionIterator = true;
+ var addOptionsToFullArgs = function(k, v) {
+ if (v === undefined || v === null)
+ return;
- };
+ fullArgs.push("--" + k);
- /**
- * Converts the args object by pairing all keys with their value and appending
- * dash-dash (--) to the keys. The only exception to this rule are keys that
- * are defined in MongoRunner.logicalOptions, of which they will be ignored.
- *
- * @param {string} binaryName
- * @param {Object} args
- *
- * @return {Array.<String>} an array of parameter strings that can be passed
- * to the binary.
- */
- MongoRunner.arrOptions = function(binaryName, args) {
-
- var fullArgs = [""];
-
- // isObject returns true even if "args" is an array, so the else branch of this statement is
- // dead code. See SERVER-14220.
- if (isObject(args) || (args.length == 1 && isObject(args[0]))) {
- var o = isObject(args) ? args : args[0];
-
- // If we've specified a particular binary version, use that
- if (o.binVersion && o.binVersion != "" && o.binVersion != shellVersion()) {
- binaryName += "-" + o.binVersion;
+ if (v != "") {
+ fullArgs.push("" + v);
}
+ };
- // Manage legacy options
- var isValidOptionForBinary = function(option, value) {
-
- if (!o.binVersion)
- return true;
-
- return true;
- };
-
- var addOptionsToFullArgs = function(k, v) {
- if (v === undefined || v === null)
- return;
-
- fullArgs.push("--" + k);
-
- if (v != "") {
- fullArgs.push("" + v);
- }
- };
-
- for (var k in o) {
- // Make sure our logical option should be added to the array of options
- if (!o.hasOwnProperty(k) || k in MongoRunner.logicalOptions ||
- !isValidOptionForBinary(k, o[k]))
- continue;
+ for (var k in o) {
+ // Make sure our logical option should be added to the array of options
+ if (!o.hasOwnProperty(k) || k in MongoRunner.logicalOptions ||
+ !isValidOptionForBinary(k, o[k]))
+ continue;
- if ((k == "v" || k == "verbose") && isNumber(o[k])) {
- var n = o[k];
- if (n > 0) {
- if (n > 10)
- n = 10;
- var temp = "-";
- while (n-- > 0)
- temp += "v";
- fullArgs.push(temp);
- }
- } else if (k === "setParameter" && isObject(o[k])) {
- // If the value associated with the setParameter option is an object, we want
- // to add all key-value pairs in that object as separate --setParameters.
- Object.keys(o[k]).forEach(function(paramKey) {
- addOptionsToFullArgs(k, "" + paramKey + "=" + o[k][paramKey]);
- });
- } else {
- addOptionsToFullArgs(k, o[k]);
+ if ((k == "v" || k == "verbose") && isNumber(o[k])) {
+ var n = o[k];
+ if (n > 0) {
+ if (n > 10)
+ n = 10;
+ var temp = "-";
+ while (n-- > 0)
+ temp += "v";
+ fullArgs.push(temp);
}
+ } else if (k === "setParameter" && isObject(o[k])) {
+ // If the value associated with the setParameter option is an object, we want
+ // to add all key-value pairs in that object as separate --setParameters.
+ Object.keys(o[k]).forEach(function(paramKey) {
+ addOptionsToFullArgs(k, "" + paramKey + "=" + o[k][paramKey]);
+ });
+ } else {
+ addOptionsToFullArgs(k, o[k]);
}
- } else {
- for (var i = 0; i < args.length; i++)
- fullArgs.push(args[i]);
}
+ } else {
+ for (var i = 0; i < args.length; i++)
+ fullArgs.push(args[i]);
+ }
- fullArgs[0] = binaryName;
- return fullArgs;
- };
-
- MongoRunner.arrToOpts = function(arr) {
+ fullArgs[0] = binaryName;
+ return fullArgs;
+};
- var opts = {};
- for (var i = 1; i < arr.length; i++) {
- if (arr[i].startsWith("-")) {
- var opt = arr[i].replace(/^-/, "").replace(/^-/, "");
+MongoRunner.arrToOpts = function(arr) {
+ var opts = {};
+ for (var i = 1; i < arr.length; i++) {
+ if (arr[i].startsWith("-")) {
+ var opt = arr[i].replace(/^-/, "").replace(/^-/, "");
- if (arr.length > i + 1 && !arr[i + 1].startsWith("-")) {
- opts[opt] = arr[i + 1];
- i++;
- } else {
- opts[opt] = "";
- }
+ if (arr.length > i + 1 && !arr[i + 1].startsWith("-")) {
+ opts[opt] = arr[i + 1];
+ i++;
+ } else {
+ opts[opt] = "";
+ }
- if (opt.replace(/v/g, "") == "") {
- opts["verbose"] = opt.length;
- }
+ if (opt.replace(/v/g, "") == "") {
+ opts["verbose"] = opt.length;
}
}
+ }
- return opts;
- };
+ return opts;
+};
- MongoRunner.savedOptions = {};
+MongoRunner.savedOptions = {};
- MongoRunner.mongoOptions = function(opts) {
- // Don't remember waitForConnect
- var waitForConnect = opts.waitForConnect;
- delete opts.waitForConnect;
+MongoRunner.mongoOptions = function(opts) {
+ // Don't remember waitForConnect
+ var waitForConnect = opts.waitForConnect;
+ delete opts.waitForConnect;
- // If we're a mongo object
- if (opts.getDB) {
- opts = {restart: opts.runId};
- }
+ // If we're a mongo object
+ if (opts.getDB) {
+ opts = {restart: opts.runId};
+ }
- // Initialize and create a copy of the opts
- opts = Object.merge(opts || {}, {});
+ // Initialize and create a copy of the opts
+ opts = Object.merge(opts || {}, {});
- if (!opts.restart)
- opts.restart = false;
+ if (!opts.restart)
+ opts.restart = false;
- // RunId can come from a number of places
- // If restart is passed as an old connection
- if (opts.restart && opts.restart.getDB) {
- opts.runId = opts.restart.runId;
- opts.restart = true;
- }
- // If it's the runId itself
- else if (isObject(opts.restart)) {
- opts.runId = opts.restart;
- opts.restart = true;
- }
+ // RunId can come from a number of places
+ // If restart is passed as an old connection
+ if (opts.restart && opts.restart.getDB) {
+ opts.runId = opts.restart.runId;
+ opts.restart = true;
+ }
+ // If it's the runId itself
+ else if (isObject(opts.restart)) {
+ opts.runId = opts.restart;
+ opts.restart = true;
+ }
- if (isObject(opts.remember)) {
- opts.runId = opts.remember;
- opts.remember = true;
- } else if (opts.remember == undefined) {
- // Remember by default if we're restarting
- opts.remember = opts.restart;
- }
+ if (isObject(opts.remember)) {
+ opts.runId = opts.remember;
+ opts.remember = true;
+ } else if (opts.remember == undefined) {
+ // Remember by default if we're restarting
+ opts.remember = opts.restart;
+ }
- // If we passed in restart : <conn> or runId : <conn>
- if (isObject(opts.runId) && opts.runId.runId)
- opts.runId = opts.runId.runId;
+ // If we passed in restart : <conn> or runId : <conn>
+ if (isObject(opts.runId) && opts.runId.runId)
+ opts.runId = opts.runId.runId;
- if (opts.restart && opts.remember) {
- opts = Object.merge(MongoRunner.savedOptions[opts.runId], opts);
- }
+ if (opts.restart && opts.remember) {
+ opts = Object.merge(MongoRunner.savedOptions[opts.runId], opts);
+ }
- // Create a new runId
- opts.runId = opts.runId || ObjectId();
+ // Create a new runId
+ opts.runId = opts.runId || ObjectId();
- if (opts.forgetPort) {
- delete opts.port;
- }
+ if (opts.forgetPort) {
+ delete opts.port;
+ }
- // Normalize and get the binary version to use
- if (opts.hasOwnProperty('binVersion')) {
- if (opts.binVersion instanceof MongoRunner.versionIterator.iterator) {
- // Advance the version iterator so that subsequent calls to
- // MongoRunner.mongoOptions() use the next version in the list.
- const iterator = opts.binVersion;
- opts.binVersion = iterator.current();
- iterator.advance();
- }
- opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+ // Normalize and get the binary version to use
+ if (opts.hasOwnProperty('binVersion')) {
+ if (opts.binVersion instanceof MongoRunner.versionIterator.iterator) {
+ // Advance the version iterator so that subsequent calls to
+ // MongoRunner.mongoOptions() use the next version in the list.
+ const iterator = opts.binVersion;
+ opts.binVersion = iterator.current();
+ iterator.advance();
}
+ opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+ }
- // Default for waitForConnect is true
- opts.waitForConnect =
- (waitForConnect == undefined || waitForConnect == null) ? true : waitForConnect;
+ // Default for waitForConnect is true
+ opts.waitForConnect =
+ (waitForConnect == undefined || waitForConnect == null) ? true : waitForConnect;
- opts.port = opts.port || allocatePort();
+ opts.port = opts.port || allocatePort();
- opts.pathOpts =
- Object.merge(opts.pathOpts || {}, {port: "" + opts.port, runId: "" + opts.runId});
+ opts.pathOpts =
+ Object.merge(opts.pathOpts || {}, {port: "" + opts.port, runId: "" + opts.runId});
- var shouldRemember =
- (!opts.restart && !opts.noRemember) || (opts.restart && opts.appendOptions);
- if (shouldRemember) {
- MongoRunner.savedOptions[opts.runId] = Object.merge(opts, {});
- }
+ var shouldRemember =
+ (!opts.restart && !opts.noRemember) || (opts.restart && opts.appendOptions);
+ if (shouldRemember) {
+ MongoRunner.savedOptions[opts.runId] = Object.merge(opts, {});
+ }
- if (jsTestOptions().networkMessageCompressors) {
- opts.networkMessageCompressors = jsTestOptions().networkMessageCompressors;
- }
+ if (jsTestOptions().networkMessageCompressors) {
+ opts.networkMessageCompressors = jsTestOptions().networkMessageCompressors;
+ }
- if (!opts.hasOwnProperty('bind_ip')) {
- opts.bind_ip = "0.0.0.0";
- }
+ if (!opts.hasOwnProperty('bind_ip')) {
+ opts.bind_ip = "0.0.0.0";
+ }
- return opts;
- };
+ return opts;
+};
- // Returns an array of integers representing the version provided.
- // Ex: "3.3.12" => [3, 3, 12]
- var _convertVersionToIntegerArray = function(version) {
- var versionParts =
- convertVersionStringToArray(version).slice(0, 3).map(part => parseInt(part, 10));
- if (versionParts.length === 2) {
- versionParts.push(Infinity);
- }
- return versionParts;
- };
+// Returns an array of integers representing the version provided.
+// Ex: "3.3.12" => [3, 3, 12]
+var _convertVersionToIntegerArray = function(version) {
+ var versionParts =
+ convertVersionStringToArray(version).slice(0, 3).map(part => parseInt(part, 10));
+ if (versionParts.length === 2) {
+ versionParts.push(Infinity);
+ }
+ return versionParts;
+};
- // Returns if version2 is equal to, or came after, version 1.
- var _isMongodVersionEqualOrAfter = function(version1, version2) {
- if (version2 === "latest") {
- return true;
- }
+// Returns if version2 is equal to, or came after, version 1.
+var _isMongodVersionEqualOrAfter = function(version1, version2) {
+ if (version2 === "latest") {
+ return true;
+ }
- var versionParts1 = _convertVersionToIntegerArray(version1);
- var versionParts2 = _convertVersionToIntegerArray(version2);
- if (versionParts2[0] > versionParts1[0] ||
- (versionParts2[0] === versionParts1[0] && versionParts2[1] > versionParts1[1]) ||
- (versionParts2[0] === versionParts1[0] && versionParts2[1] === versionParts1[1] &&
- versionParts2[2] >= versionParts1[2])) {
- return true;
- }
+ var versionParts1 = _convertVersionToIntegerArray(version1);
+ var versionParts2 = _convertVersionToIntegerArray(version2);
+ if (versionParts2[0] > versionParts1[0] ||
+ (versionParts2[0] === versionParts1[0] && versionParts2[1] > versionParts1[1]) ||
+ (versionParts2[0] === versionParts1[0] && versionParts2[1] === versionParts1[1] &&
+ versionParts2[2] >= versionParts1[2])) {
+ return true;
+ }
- return false;
- };
+ return false;
+};
+
+// Removes a setParameter parameter from mongods running a version that won't recognize them.
+var _removeSetParameterIfBeforeVersion = function(opts, parameterName, requiredVersion) {
+ var versionCompatible = (opts.binVersion === "" || opts.binVersion === undefined ||
+ _isMongodVersionEqualOrAfter(requiredVersion, opts.binVersion));
+ if (!versionCompatible && opts.setParameter && opts.setParameter[parameterName] != undefined) {
+ print("Removing '" + parameterName + "' setParameter with value " +
+ opts.setParameter[parameterName] +
+ " because it isn't compatibile with mongod running version " + opts.binVersion);
+ delete opts.setParameter[parameterName];
+ }
+};
+
+/**
+ * @option {object} opts
+ *
+ * {
+ * dbpath {string}
+ * useLogFiles {boolean}: use with logFile option.
+ * logFile {string}: path to the log file. If not specified and useLogFiles
+ * is true, automatically creates a log file inside dbpath.
+ * noJournal {boolean}
+ * keyFile
+ * replSet
+ * oplogSize
+ * }
+ */
+MongoRunner.mongodOptions = function(opts) {
+ opts = MongoRunner.mongoOptions(opts);
+
+ opts.dbpath = MongoRunner.toRealDir(opts.dbpath || "$dataDir/mongod-$port", opts.pathOpts);
+
+ opts.pathOpts = Object.merge(opts.pathOpts, {dbpath: opts.dbpath});
+
+ _removeSetParameterIfBeforeVersion(opts, "writePeriodicNoops", "3.3.12");
+ _removeSetParameterIfBeforeVersion(opts, "numInitialSyncAttempts", "3.3.12");
+ _removeSetParameterIfBeforeVersion(opts, "numInitialSyncConnectAttempts", "3.3.12");
+ _removeSetParameterIfBeforeVersion(opts, "migrationLockAcquisitionMaxWaitMS", "4.1.7");
+
+ if (!opts.logFile && opts.useLogFiles) {
+ opts.logFile = opts.dbpath + "/mongod.log";
+ } else if (opts.logFile) {
+ opts.logFile = MongoRunner.toRealFile(opts.logFile, opts.pathOpts);
+ }
- // Removes a setParameter parameter from mongods running a version that won't recognize them.
- var _removeSetParameterIfBeforeVersion = function(opts, parameterName, requiredVersion) {
- var versionCompatible = (opts.binVersion === "" || opts.binVersion === undefined ||
- _isMongodVersionEqualOrAfter(requiredVersion, opts.binVersion));
- if (!versionCompatible && opts.setParameter &&
- opts.setParameter[parameterName] != undefined) {
- print("Removing '" + parameterName + "' setParameter with value " +
- opts.setParameter[parameterName] +
- " because it isn't compatibile with mongod running version " + opts.binVersion);
- delete opts.setParameter[parameterName];
- }
- };
+ if (opts.logFile !== undefined) {
+ opts.logpath = opts.logFile;
+ }
- /**
- * @option {object} opts
- *
- * {
- * dbpath {string}
- * useLogFiles {boolean}: use with logFile option.
- * logFile {string}: path to the log file. If not specified and useLogFiles
- * is true, automatically creates a log file inside dbpath.
- * noJournal {boolean}
- * keyFile
- * replSet
- * oplogSize
- * }
- */
- MongoRunner.mongodOptions = function(opts) {
-
- opts = MongoRunner.mongoOptions(opts);
-
- opts.dbpath = MongoRunner.toRealDir(opts.dbpath || "$dataDir/mongod-$port", opts.pathOpts);
-
- opts.pathOpts = Object.merge(opts.pathOpts, {dbpath: opts.dbpath});
-
- _removeSetParameterIfBeforeVersion(opts, "writePeriodicNoops", "3.3.12");
- _removeSetParameterIfBeforeVersion(opts, "numInitialSyncAttempts", "3.3.12");
- _removeSetParameterIfBeforeVersion(opts, "numInitialSyncConnectAttempts", "3.3.12");
- _removeSetParameterIfBeforeVersion(opts, "migrationLockAcquisitionMaxWaitMS", "4.1.7");
-
- if (!opts.logFile && opts.useLogFiles) {
- opts.logFile = opts.dbpath + "/mongod.log";
- } else if (opts.logFile) {
- opts.logFile = MongoRunner.toRealFile(opts.logFile, opts.pathOpts);
- }
+ if ((jsTestOptions().noJournal || opts.noJournal) && !('journal' in opts) &&
+ !('configsvr' in opts)) {
+ opts.nojournal = "";
+ }
- if (opts.logFile !== undefined) {
- opts.logpath = opts.logFile;
- }
+ if (jsTestOptions().keyFile && !opts.keyFile) {
+ opts.keyFile = jsTestOptions().keyFile;
+ }
- if ((jsTestOptions().noJournal || opts.noJournal) && !('journal' in opts) &&
- !('configsvr' in opts)) {
- opts.nojournal = "";
+ if (opts.hasOwnProperty("enableEncryption")) {
+ // opts.enableEncryption, if set, must be an empty string
+ if (opts.enableEncryption !== "") {
+ throw new Error("The enableEncryption option must be an empty string if it is " +
+ "specified");
}
-
- if (jsTestOptions().keyFile && !opts.keyFile) {
- opts.keyFile = jsTestOptions().keyFile;
+ } else if (jsTestOptions().enableEncryption !== undefined) {
+ if (jsTestOptions().enableEncryption !== "") {
+ throw new Error("The enableEncryption option must be an empty string if it is " +
+ "specified");
}
+ opts.enableEncryption = "";
+ }
- if (opts.hasOwnProperty("enableEncryption")) {
- // opts.enableEncryption, if set, must be an empty string
- if (opts.enableEncryption !== "") {
- throw new Error("The enableEncryption option must be an empty string if it is " +
- "specified");
- }
- } else if (jsTestOptions().enableEncryption !== undefined) {
- if (jsTestOptions().enableEncryption !== "") {
- throw new Error("The enableEncryption option must be an empty string if it is " +
- "specified");
- }
- opts.enableEncryption = "";
+ if (opts.hasOwnProperty("encryptionKeyFile")) {
+ // opts.encryptionKeyFile, if set, must be a string
+ if (typeof opts.encryptionKeyFile !== "string") {
+ throw new Error("The encryptionKeyFile option must be a string if it is specified");
}
-
- if (opts.hasOwnProperty("encryptionKeyFile")) {
- // opts.encryptionKeyFile, if set, must be a string
- if (typeof opts.encryptionKeyFile !== "string") {
- throw new Error("The encryptionKeyFile option must be a string if it is specified");
- }
- } else if (jsTestOptions().encryptionKeyFile !== undefined) {
- if (typeof(jsTestOptions().encryptionKeyFile) !== "string") {
- throw new Error("The encryptionKeyFile option must be a string if it is specified");
- }
- opts.encryptionKeyFile = jsTestOptions().encryptionKeyFile;
+ } else if (jsTestOptions().encryptionKeyFile !== undefined) {
+ if (typeof (jsTestOptions().encryptionKeyFile) !== "string") {
+ throw new Error("The encryptionKeyFile option must be a string if it is specified");
}
+ opts.encryptionKeyFile = jsTestOptions().encryptionKeyFile;
+ }
- if (opts.hasOwnProperty("auditDestination")) {
- // opts.auditDestination, if set, must be a string
- if (typeof opts.auditDestination !== "string") {
- throw new Error("The auditDestination option must be a string if it is specified");
- }
- } else if (jsTestOptions().auditDestination !== undefined) {
- if (typeof(jsTestOptions().auditDestination) !== "string") {
- throw new Error("The auditDestination option must be a string if it is specified");
- }
- opts.auditDestination = jsTestOptions().auditDestination;
+ if (opts.hasOwnProperty("auditDestination")) {
+ // opts.auditDestination, if set, must be a string
+ if (typeof opts.auditDestination !== "string") {
+ throw new Error("The auditDestination option must be a string if it is specified");
}
-
- if (opts.noReplSet)
- opts.replSet = null;
- if (opts.arbiter)
- opts.oplogSize = 1;
-
- return opts;
- };
-
- MongoRunner.mongosOptions = function(opts) {
- opts = MongoRunner.mongoOptions(opts);
-
- // Normalize configdb option to be host string if currently a host
- if (opts.configdb && opts.configdb.getDB) {
- opts.configdb = opts.configdb.host;
+ } else if (jsTestOptions().auditDestination !== undefined) {
+ if (typeof (jsTestOptions().auditDestination) !== "string") {
+ throw new Error("The auditDestination option must be a string if it is specified");
}
+ opts.auditDestination = jsTestOptions().auditDestination;
+ }
- opts.pathOpts =
- Object.merge(opts.pathOpts, {configdb: opts.configdb.replace(/:|\/|,/g, "-")});
+ if (opts.noReplSet)
+ opts.replSet = null;
+ if (opts.arbiter)
+ opts.oplogSize = 1;
- if (!opts.logFile && opts.useLogFiles) {
- opts.logFile =
- MongoRunner.toRealFile("$dataDir/mongos-$configdb-$port.log", opts.pathOpts);
- } else if (opts.logFile) {
- opts.logFile = MongoRunner.toRealFile(opts.logFile, opts.pathOpts);
- }
+ return opts;
+};
- if (opts.logFile !== undefined) {
- opts.logpath = opts.logFile;
- }
+MongoRunner.mongosOptions = function(opts) {
+ opts = MongoRunner.mongoOptions(opts);
- var testOptions = jsTestOptions();
- if (testOptions.keyFile && !opts.keyFile) {
- opts.keyFile = testOptions.keyFile;
- }
+ // Normalize configdb option to be host string if currently a host
+ if (opts.configdb && opts.configdb.getDB) {
+ opts.configdb = opts.configdb.host;
+ }
- if (opts.hasOwnProperty("auditDestination")) {
- // opts.auditDestination, if set, must be a string
- if (typeof opts.auditDestination !== "string") {
- throw new Error("The auditDestination option must be a string if it is specified");
- }
- } else if (testOptions.auditDestination !== undefined) {
- if (typeof(testOptions.auditDestination) !== "string") {
- throw new Error("The auditDestination option must be a string if it is specified");
- }
- opts.auditDestination = testOptions.auditDestination;
- }
+ opts.pathOpts = Object.merge(opts.pathOpts, {configdb: opts.configdb.replace(/:|\/|,/g, "-")});
- if (!opts.hasOwnProperty('binVersion') && testOptions.mongosBinVersion) {
- opts.binVersion = MongoRunner.getBinVersionFor(testOptions.mongosBinVersion);
- }
-
- // If the mongos is being restarted with a newer version, make sure we remove any options
- // that no longer exist in the newer version.
- if (opts.restart && MongoRunner.areBinVersionsTheSame('latest', opts.binVersion)) {
- delete opts.noAutoSplit;
- }
+ if (!opts.logFile && opts.useLogFiles) {
+ opts.logFile = MongoRunner.toRealFile("$dataDir/mongos-$configdb-$port.log", opts.pathOpts);
+ } else if (opts.logFile) {
+ opts.logFile = MongoRunner.toRealFile(opts.logFile, opts.pathOpts);
+ }
- return opts;
- };
+ if (opts.logFile !== undefined) {
+ opts.logpath = opts.logFile;
+ }
- /**
- * Starts a mongod instance.
- *
- * @param {Object} opts
- *
- * {
- * useHostName {boolean}: Uses hostname of machine if true.
- * forceLock {boolean}: Deletes the lock file if set to true.
- * dbpath {string}: location of db files.
- * cleanData {boolean}: Removes all files in dbpath if true.
- * startClean {boolean}: same as cleanData.
- * noCleanData {boolean}: Do not clean files (cleanData takes priority).
- * binVersion {string}: version for binary (also see MongoRunner.binVersionSubs).
- *
- * @see MongoRunner.mongodOptions for other options
- * }
- *
- * @return {Mongo} connection object to the started mongod instance.
- *
- * @see MongoRunner.arrOptions
- */
- MongoRunner.runMongod = function(opts) {
-
- opts = opts || {};
- var env = undefined;
- var useHostName = true;
- var runId = null;
- var waitForConnect = true;
- var fullOptions = opts;
-
- if (isObject(opts)) {
- opts = MongoRunner.mongodOptions(opts);
- fullOptions = opts;
-
- if (opts.useHostName != undefined) {
- useHostName = opts.useHostName;
- } else if (opts.useHostname != undefined) {
- useHostName = opts.useHostname;
- } else {
- useHostName = true; // Default to true
- }
- env = opts.env;
- runId = opts.runId;
- waitForConnect = opts.waitForConnect;
-
- if (opts.forceLock)
- removeFile(opts.dbpath + "/mongod.lock");
- if ((opts.cleanData || opts.startClean) || (!opts.restart && !opts.noCleanData)) {
- print("Resetting db path '" + opts.dbpath + "'");
- resetDbpath(opts.dbpath);
- }
+ var testOptions = jsTestOptions();
+ if (testOptions.keyFile && !opts.keyFile) {
+ opts.keyFile = testOptions.keyFile;
+ }
- var mongodProgram = MongoRunner.mongodPath;
- opts = MongoRunner.arrOptions(mongodProgram, opts);
+ if (opts.hasOwnProperty("auditDestination")) {
+ // opts.auditDestination, if set, must be a string
+ if (typeof opts.auditDestination !== "string") {
+ throw new Error("The auditDestination option must be a string if it is specified");
}
-
- var mongod = MongoRunner._startWithArgs(opts, env, waitForConnect);
- if (!mongod) {
- return null;
+ } else if (testOptions.auditDestination !== undefined) {
+ if (typeof (testOptions.auditDestination) !== "string") {
+ throw new Error("The auditDestination option must be a string if it is specified");
}
+ opts.auditDestination = testOptions.auditDestination;
+ }
- mongod.commandLine = MongoRunner.arrToOpts(opts);
- mongod.name = (useHostName ? getHostName() : "localhost") + ":" + mongod.commandLine.port;
- mongod.host = mongod.name;
- mongod.port = parseInt(mongod.commandLine.port);
- mongod.runId = runId || ObjectId();
- mongod.dbpath = fullOptions.dbpath;
- mongod.savedOptions = MongoRunner.savedOptions[mongod.runId];
- mongod.fullOptions = fullOptions;
+ if (!opts.hasOwnProperty('binVersion') && testOptions.mongosBinVersion) {
+ opts.binVersion = MongoRunner.getBinVersionFor(testOptions.mongosBinVersion);
+ }
- return mongod;
- };
+ // If the mongos is being restarted with a newer version, make sure we remove any options
+ // that no longer exist in the newer version.
+ if (opts.restart && MongoRunner.areBinVersionsTheSame('latest', opts.binVersion)) {
+ delete opts.noAutoSplit;
+ }
- MongoRunner.runMongos = function(opts) {
- opts = opts || {};
-
- var env = undefined;
- var useHostName = false;
- var runId = null;
- var waitForConnect = true;
- var fullOptions = opts;
-
- if (isObject(opts)) {
- opts = MongoRunner.mongosOptions(opts);
- fullOptions = opts;
-
- useHostName = opts.useHostName || opts.useHostname;
- runId = opts.runId;
- waitForConnect = opts.waitForConnect;
- env = opts.env;
- var mongosProgram = MongoRunner.mongosPath;
- opts = MongoRunner.arrOptions(mongosProgram, opts);
+ return opts;
+};
+
+/**
+ * Starts a mongod instance.
+ *
+ * @param {Object} opts
+ *
+ * {
+ * useHostName {boolean}: Uses hostname of machine if true.
+ * forceLock {boolean}: Deletes the lock file if set to true.
+ * dbpath {string}: location of db files.
+ * cleanData {boolean}: Removes all files in dbpath if true.
+ * startClean {boolean}: same as cleanData.
+ * noCleanData {boolean}: Do not clean files (cleanData takes priority).
+ * binVersion {string}: version for binary (also see MongoRunner.binVersionSubs).
+ *
+ * @see MongoRunner.mongodOptions for other options
+ * }
+ *
+ * @return {Mongo} connection object to the started mongod instance.
+ *
+ * @see MongoRunner.arrOptions
+ */
+MongoRunner.runMongod = function(opts) {
+ opts = opts || {};
+ var env = undefined;
+ var useHostName = true;
+ var runId = null;
+ var waitForConnect = true;
+ var fullOptions = opts;
+
+ if (isObject(opts)) {
+ opts = MongoRunner.mongodOptions(opts);
+ fullOptions = opts;
+
+ if (opts.useHostName != undefined) {
+ useHostName = opts.useHostName;
+ } else if (opts.useHostname != undefined) {
+ useHostName = opts.useHostname;
+ } else {
+ useHostName = true; // Default to true
}
+ env = opts.env;
+ runId = opts.runId;
+ waitForConnect = opts.waitForConnect;
- var mongos = MongoRunner._startWithArgs(opts, env, waitForConnect);
- if (!mongos) {
- return null;
+ if (opts.forceLock)
+ removeFile(opts.dbpath + "/mongod.lock");
+ if ((opts.cleanData || opts.startClean) || (!opts.restart && !opts.noCleanData)) {
+ print("Resetting db path '" + opts.dbpath + "'");
+ resetDbpath(opts.dbpath);
}
- mongos.commandLine = MongoRunner.arrToOpts(opts);
- mongos.name = (useHostName ? getHostName() : "localhost") + ":" + mongos.commandLine.port;
- mongos.host = mongos.name;
- mongos.port = parseInt(mongos.commandLine.port);
- mongos.runId = runId || ObjectId();
- mongos.savedOptions = MongoRunner.savedOptions[mongos.runId];
- mongos.fullOptions = fullOptions;
-
- return mongos;
- };
+ var mongodProgram = MongoRunner.mongodPath;
+ opts = MongoRunner.arrOptions(mongodProgram, opts);
+ }
- MongoRunner.StopError = function(returnCode) {
- this.name = "StopError";
- this.returnCode = returnCode;
- this.message = "MongoDB process stopped with exit code: " + this.returnCode;
- this.stack = this.toString() + "\n" + (new Error()).stack;
- };
+ var mongod = MongoRunner._startWithArgs(opts, env, waitForConnect);
+ if (!mongod) {
+ return null;
+ }
- MongoRunner.StopError.prototype = Object.create(Error.prototype);
- MongoRunner.StopError.prototype.constructor = MongoRunner.StopError;
-
- // Constants for exit codes of MongoDB processes
- MongoRunner.EXIT_ABORT = -6;
- MongoRunner.EXIT_CLEAN = 0;
- MongoRunner.EXIT_BADOPTIONS = 2;
- MongoRunner.EXIT_REPLICATION_ERROR = 3;
- MongoRunner.EXIT_NEED_UPGRADE = 4;
- MongoRunner.EXIT_SHARDING_ERROR = 5;
- // SIGKILL is translated to TerminateProcess() on Windows, which causes the program to
- // terminate with exit code 1.
- MongoRunner.EXIT_SIGKILL = _isWindows() ? 1 : -9;
- MongoRunner.EXIT_KILL = 12;
- MongoRunner.EXIT_ABRUPT = 14;
- MongoRunner.EXIT_NTSERVICE_ERROR = 20;
- MongoRunner.EXIT_JAVA = 21;
- MongoRunner.EXIT_OOM_MALLOC = 42;
- MongoRunner.EXIT_OOM_REALLOC = 43;
- MongoRunner.EXIT_FS = 45;
- MongoRunner.EXIT_CLOCK_SKEW = 47; // OpTime clock skew; deprecated
- MongoRunner.EXIT_NET_ERROR = 48;
- MongoRunner.EXIT_WINDOWS_SERVICE_STOP = 49;
- MongoRunner.EXIT_POSSIBLE_CORRUPTION = 60;
- MongoRunner.EXIT_NEED_DOWNGRADE = 62;
- MongoRunner.EXIT_UNCAUGHT = 100; // top level exception that wasn't caught
- MongoRunner.EXIT_TEST = 101;
-
- MongoRunner.validateCollectionsCallback = function(port) {};
-
- /**
- * Kills a mongod process.
- *
- * @param {Mongo} conn the connection object to the process to kill
- * @param {number} signal The signal number to use for killing
- * @param {Object} opts Additional options. Format:
- * {
- * auth: {
- * user {string}: admin user name
- * pwd {string}: admin password
- * },
- * skipValidation: <bool>,
- * allowedExitCode: <int>
- * }
- *
- * Note: The auth option is required in a authenticated mongod running in Windows since
- * it uses the shutdown command, which requires admin credentials.
- */
- MongoRunner.stopMongod = function(conn, signal, opts) {
- if (!conn.pid) {
- throw new Error("first arg must have a `pid` property; " +
- "it is usually the object returned from MongoRunner.runMongod/s");
- }
+ mongod.commandLine = MongoRunner.arrToOpts(opts);
+ mongod.name = (useHostName ? getHostName() : "localhost") + ":" + mongod.commandLine.port;
+ mongod.host = mongod.name;
+ mongod.port = parseInt(mongod.commandLine.port);
+ mongod.runId = runId || ObjectId();
+ mongod.dbpath = fullOptions.dbpath;
+ mongod.savedOptions = MongoRunner.savedOptions[mongod.runId];
+ mongod.fullOptions = fullOptions;
+
+ return mongod;
+};
+
+MongoRunner.runMongos = function(opts) {
+ opts = opts || {};
+
+ var env = undefined;
+ var useHostName = false;
+ var runId = null;
+ var waitForConnect = true;
+ var fullOptions = opts;
+
+ if (isObject(opts)) {
+ opts = MongoRunner.mongosOptions(opts);
+ fullOptions = opts;
+
+ useHostName = opts.useHostName || opts.useHostname;
+ runId = opts.runId;
+ waitForConnect = opts.waitForConnect;
+ env = opts.env;
+ var mongosProgram = MongoRunner.mongosPath;
+ opts = MongoRunner.arrOptions(mongosProgram, opts);
+ }
- if (!conn.port) {
- throw new Error("first arg must have a `port` property; " +
- "it is usually the object returned from MongoRunner.runMongod/s");
- }
+ var mongos = MongoRunner._startWithArgs(opts, env, waitForConnect);
+ if (!mongos) {
+ return null;
+ }
- signal = parseInt(signal) || 15;
- opts = opts || {};
+ mongos.commandLine = MongoRunner.arrToOpts(opts);
+ mongos.name = (useHostName ? getHostName() : "localhost") + ":" + mongos.commandLine.port;
+ mongos.host = mongos.name;
+ mongos.port = parseInt(mongos.commandLine.port);
+ mongos.runId = runId || ObjectId();
+ mongos.savedOptions = MongoRunner.savedOptions[mongos.runId];
+ mongos.fullOptions = fullOptions;
+
+ return mongos;
+};
+
+MongoRunner.StopError = function(returnCode) {
+ this.name = "StopError";
+ this.returnCode = returnCode;
+ this.message = "MongoDB process stopped with exit code: " + this.returnCode;
+ this.stack = this.toString() + "\n" + (new Error()).stack;
+};
+
+MongoRunner.StopError.prototype = Object.create(Error.prototype);
+MongoRunner.StopError.prototype.constructor = MongoRunner.StopError;
+
+// Constants for exit codes of MongoDB processes
+MongoRunner.EXIT_ABORT = -6;
+MongoRunner.EXIT_CLEAN = 0;
+MongoRunner.EXIT_BADOPTIONS = 2;
+MongoRunner.EXIT_REPLICATION_ERROR = 3;
+MongoRunner.EXIT_NEED_UPGRADE = 4;
+MongoRunner.EXIT_SHARDING_ERROR = 5;
+// SIGKILL is translated to TerminateProcess() on Windows, which causes the program to
+// terminate with exit code 1.
+MongoRunner.EXIT_SIGKILL = _isWindows() ? 1 : -9;
+MongoRunner.EXIT_KILL = 12;
+MongoRunner.EXIT_ABRUPT = 14;
+MongoRunner.EXIT_NTSERVICE_ERROR = 20;
+MongoRunner.EXIT_JAVA = 21;
+MongoRunner.EXIT_OOM_MALLOC = 42;
+MongoRunner.EXIT_OOM_REALLOC = 43;
+MongoRunner.EXIT_FS = 45;
+MongoRunner.EXIT_CLOCK_SKEW = 47; // OpTime clock skew; deprecated
+MongoRunner.EXIT_NET_ERROR = 48;
+MongoRunner.EXIT_WINDOWS_SERVICE_STOP = 49;
+MongoRunner.EXIT_POSSIBLE_CORRUPTION = 60;
+MongoRunner.EXIT_NEED_DOWNGRADE = 62;
+MongoRunner.EXIT_UNCAUGHT = 100; // top level exception that wasn't caught
+MongoRunner.EXIT_TEST = 101;
+
+MongoRunner.validateCollectionsCallback = function(port) {};
+
+/**
+ * Kills a mongod process.
+ *
+ * @param {Mongo} conn the connection object to the process to kill
+ * @param {number} signal The signal number to use for killing
+ * @param {Object} opts Additional options. Format:
+ * {
+ * auth: {
+ * user {string}: admin user name
+ * pwd {string}: admin password
+ * },
+ * skipValidation: <bool>,
+ * allowedExitCode: <int>
+ * }
+ *
+ * Note: The auth option is required in a authenticated mongod running in Windows since
+ * it uses the shutdown command, which requires admin credentials.
+ */
+MongoRunner.stopMongod = function(conn, signal, opts) {
+ if (!conn.pid) {
+ throw new Error("first arg must have a `pid` property; " +
+ "it is usually the object returned from MongoRunner.runMongod/s");
+ }
- var allowedExitCode = MongoRunner.EXIT_CLEAN;
+ if (!conn.port) {
+ throw new Error("first arg must have a `port` property; " +
+ "it is usually the object returned from MongoRunner.runMongod/s");
+ }
- if (opts.allowedExitCode) {
- allowedExitCode = opts.allowedExitCode;
- }
+ signal = parseInt(signal) || 15;
+ opts = opts || {};
- var port = parseInt(conn.port);
+ var allowedExitCode = MongoRunner.EXIT_CLEAN;
- var pid = conn.pid;
- // If the return code is in the serverExitCodeMap, it means the server crashed on startup.
- // We just use the recorded return code instead of stopping the program.
- var returnCode;
- if (serverExitCodeMap.hasOwnProperty(port)) {
- returnCode = serverExitCodeMap[port];
- delete serverExitCodeMap[port];
- } else {
- // Invoke callback to validate collections and indexes before shutting down mongod.
- // We skip calling the callback function when the expected return code of
- // the mongod process is non-zero since it's likely the process has already exited.
+ if (opts.allowedExitCode) {
+ allowedExitCode = opts.allowedExitCode;
+ }
- var skipValidation = false;
- if (opts.skipValidation) {
- skipValidation = true;
- }
+ var port = parseInt(conn.port);
- if (allowedExitCode === MongoRunner.EXIT_CLEAN && !skipValidation) {
- MongoRunner.validateCollectionsCallback(port);
- }
+ var pid = conn.pid;
+ // If the return code is in the serverExitCodeMap, it means the server crashed on startup.
+ // We just use the recorded return code instead of stopping the program.
+ var returnCode;
+ if (serverExitCodeMap.hasOwnProperty(port)) {
+ returnCode = serverExitCodeMap[port];
+ delete serverExitCodeMap[port];
+ } else {
+ // Invoke callback to validate collections and indexes before shutting down mongod.
+ // We skip calling the callback function when the expected return code of
+ // the mongod process is non-zero since it's likely the process has already exited.
- returnCode = _stopMongoProgram(port, signal, opts);
+ var skipValidation = false;
+ if (opts.skipValidation) {
+ skipValidation = true;
}
- if (allowedExitCode !== returnCode) {
- throw new MongoRunner.StopError(returnCode);
- } else if (returnCode !== MongoRunner.EXIT_CLEAN) {
- print("MongoDB process on port " + port + " intentionally exited with error code ",
- returnCode);
+
+ if (allowedExitCode === MongoRunner.EXIT_CLEAN && !skipValidation) {
+ MongoRunner.validateCollectionsCallback(port);
}
- return returnCode;
- };
+ returnCode = _stopMongoProgram(port, signal, opts);
+ }
+ if (allowedExitCode !== returnCode) {
+ throw new MongoRunner.StopError(returnCode);
+ } else if (returnCode !== MongoRunner.EXIT_CLEAN) {
+ print("MongoDB process on port " + port + " intentionally exited with error code ",
+ returnCode);
+ }
- MongoRunner.stopMongos = MongoRunner.stopMongod;
-
- /**
- * Starts an instance of the specified mongo tool
- *
- * @param {String} binaryName - The name of the tool to run.
- * @param {Object} [opts={}] - Options of the form --flag or --key=value to pass to the tool.
- * @param {string} [opts.binVersion] - The version of the tool to run.
- *
- * @param {...string} positionalArgs - Positional arguments to pass to the tool after all
- * options have been specified. For example,
- * MongoRunner.runMongoTool("executable", {key: value}, arg1, arg2) would invoke
- * ./executable --key value arg1 arg2.
- *
- * @see MongoRunner.arrOptions
- */
- MongoRunner.runMongoTool = function(binaryName, opts, ...positionalArgs) {
-
- var opts = opts || {};
-
- // Normalize and get the binary version to use
- if (opts.binVersion instanceof MongoRunner.versionIterator.iterator) {
- // Advance the version iterator so that subsequent calls to MongoRunner.runMongoTool()
- // use the next version in the list.
- const iterator = opts.binVersion;
- opts.binVersion = iterator.current();
- iterator.advance();
- }
- opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+ return returnCode;
+};
+
+MongoRunner.stopMongos = MongoRunner.stopMongod;
+
+/**
+ * Starts an instance of the specified mongo tool
+ *
+ * @param {String} binaryName - The name of the tool to run.
+ * @param {Object} [opts={}] - Options of the form --flag or --key=value to pass to the tool.
+ * @param {string} [opts.binVersion] - The version of the tool to run.
+ *
+ * @param {...string} positionalArgs - Positional arguments to pass to the tool after all
+ * options have been specified. For example,
+ * MongoRunner.runMongoTool("executable", {key: value}, arg1, arg2) would invoke
+ * ./executable --key value arg1 arg2.
+ *
+ * @see MongoRunner.arrOptions
+ */
+MongoRunner.runMongoTool = function(binaryName, opts, ...positionalArgs) {
+ var opts = opts || {};
+
+ // Normalize and get the binary version to use
+ if (opts.binVersion instanceof MongoRunner.versionIterator.iterator) {
+ // Advance the version iterator so that subsequent calls to MongoRunner.runMongoTool()
+ // use the next version in the list.
+ const iterator = opts.binVersion;
+ opts.binVersion = iterator.current();
+ iterator.advance();
+ }
+ opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
- // Recent versions of the mongo tools support a --dialTimeout flag to set for how
- // long they retry connecting to a mongod or mongos process. We have them retry
- // connecting for up to 30 seconds to handle when the tests are run on a
- // resource-constrained host machine.
- //
- // The bsondump tool doesn't accept the --dialTimeout flag because it doesn't connect to a
- // mongod or mongos process.
- if (!opts.hasOwnProperty('dialTimeout') && binaryName !== 'bsondump' &&
- _toolVersionSupportsDialTimeout(opts.binVersion)) {
- opts['dialTimeout'] = '30';
- }
+ // Recent versions of the mongo tools support a --dialTimeout flag to set for how
+ // long they retry connecting to a mongod or mongos process. We have them retry
+ // connecting for up to 30 seconds to handle when the tests are run on a
+ // resource-constrained host machine.
+ //
+ // The bsondump tool doesn't accept the --dialTimeout flag because it doesn't connect to a
+ // mongod or mongos process.
+ if (!opts.hasOwnProperty('dialTimeout') && binaryName !== 'bsondump' &&
+ _toolVersionSupportsDialTimeout(opts.binVersion)) {
+ opts['dialTimeout'] = '30';
+ }
- // Convert 'opts' into an array of arguments.
- var argsArray = MongoRunner.arrOptions(binaryName, opts);
+ // Convert 'opts' into an array of arguments.
+ var argsArray = MongoRunner.arrOptions(binaryName, opts);
- // Append any positional arguments that were specified.
- argsArray.push(...positionalArgs);
+ // Append any positional arguments that were specified.
+ argsArray.push(...positionalArgs);
- return runMongoProgram.apply(null, argsArray);
+ return runMongoProgram.apply(null, argsArray);
+};
- };
+var _toolVersionSupportsDialTimeout = function(version) {
+ if (version === "latest" || version === "") {
+ return true;
+ }
+ var versionParts =
+ convertVersionStringToArray(version).slice(0, 3).map(part => parseInt(part, 10));
+ if (versionParts.length === 2) {
+ versionParts.push(Infinity);
+ }
- var _toolVersionSupportsDialTimeout = function(version) {
- if (version === "latest" || version === "") {
- return true;
- }
- var versionParts =
- convertVersionStringToArray(version).slice(0, 3).map(part => parseInt(part, 10));
- if (versionParts.length === 2) {
- versionParts.push(Infinity);
- }
+ if (versionParts[0] > 3 || (versionParts[0] === 3 && versionParts[1] > 3)) {
+ // The --dialTimeout command line option is supported by the tools
+ // with a major version newer than 3.3.
+ return true;
+ }
- if (versionParts[0] > 3 || (versionParts[0] === 3 && versionParts[1] > 3)) {
- // The --dialTimeout command line option is supported by the tools
- // with a major version newer than 3.3.
+ for (var supportedVersion of ["3.3.4", "3.2.5", "3.0.12"]) {
+ var supportedVersionParts = convertVersionStringToArray(supportedVersion)
+ .slice(0, 3)
+ .map(part => parseInt(part, 10));
+ if (versionParts[0] === supportedVersionParts[0] &&
+ versionParts[1] === supportedVersionParts[1] &&
+ versionParts[2] >= supportedVersionParts[2]) {
return true;
}
+ }
+ return false;
+};
+
+// Given a test name figures out a directory for that test to use for dump files and makes sure
+// that directory exists and is empty.
+MongoRunner.getAndPrepareDumpDirectory = function(testName) {
+ var dir = MongoRunner.dataPath + testName + "_external/";
+ resetDbpath(dir);
+ return dir;
+};
+
+// Start a mongod instance and return a 'Mongo' object connected to it.
+// This function's arguments are passed as command line arguments to mongod.
+// The specified 'dbpath' is cleared if it exists, created if not.
+// var conn = _startMongodEmpty("--port", 30000, "--dbpath", "asdf");
+var _startMongodEmpty = function() {
+ var args = createMongoArgs("mongod", Array.from(arguments));
+
+ var dbpath = _parsePath.apply(null, args);
+ resetDbpath(dbpath);
+
+ return startMongoProgram.apply(null, args);
+};
+
+_startMongod = function() {
+ print("startMongod WARNING DELETES DATA DIRECTORY THIS IS FOR TESTING ONLY");
+ return _startMongodEmpty.apply(null, arguments);
+};
+
+/**
+ * Returns a new argArray with any test-specific arguments added.
+ */
+function appendSetParameterArgs(argArray) {
+ function argArrayContains(key) {
+ return (argArray
+ .filter((val) => {
+ return typeof val === "string" && val.indexOf(key) === 0;
+ })
+ .length > 0);
+ }
- for (var supportedVersion of["3.3.4", "3.2.5", "3.0.12"]) {
- var supportedVersionParts = convertVersionStringToArray(supportedVersion)
- .slice(0, 3)
- .map(part => parseInt(part, 10));
- if (versionParts[0] === supportedVersionParts[0] &&
- versionParts[1] === supportedVersionParts[1] &&
- versionParts[2] >= supportedVersionParts[2]) {
- return true;
- }
- }
- return false;
- };
-
- // Given a test name figures out a directory for that test to use for dump files and makes sure
- // that directory exists and is empty.
- MongoRunner.getAndPrepareDumpDirectory = function(testName) {
- var dir = MongoRunner.dataPath + testName + "_external/";
- resetDbpath(dir);
- return dir;
- };
-
- // Start a mongod instance and return a 'Mongo' object connected to it.
- // This function's arguments are passed as command line arguments to mongod.
- // The specified 'dbpath' is cleared if it exists, created if not.
- // var conn = _startMongodEmpty("--port", 30000, "--dbpath", "asdf");
- var _startMongodEmpty = function() {
- var args = createMongoArgs("mongod", Array.from(arguments));
-
- var dbpath = _parsePath.apply(null, args);
- resetDbpath(dbpath);
+ function argArrayContainsSetParameterValue(value) {
+ assert(value.endsWith("="), "Expected value argument to be of the form <parameterName>=");
+ return argArray.some(function(el) {
+ return typeof el === "string" && el.startsWith(value);
+ });
+ }
- return startMongoProgram.apply(null, args);
- };
+ // programName includes the version, e.g., mongod-3.2.
+ // baseProgramName is the program name without any version information, e.g., mongod.
+ let programName = argArray[0];
- _startMongod = function() {
- print("startMongod WARNING DELETES DATA DIRECTORY THIS IS FOR TESTING ONLY");
- return _startMongodEmpty.apply(null, arguments);
- };
+ let [baseProgramName, programVersion] = programName.split("-");
+ let programMajorMinorVersion = 0;
+ if (programVersion) {
+ let [major, minor, point] = programVersion.split(".");
+ programMajorMinorVersion = parseInt(major) * 100 + parseInt(minor);
+ }
- /**
- * Returns a new argArray with any test-specific arguments added.
- */
- function appendSetParameterArgs(argArray) {
- function argArrayContains(key) {
- return (argArray
- .filter((val) => {
- return typeof val === "string" && val.indexOf(key) === 0;
- })
- .length > 0);
+ if (baseProgramName === 'mongod' || baseProgramName === 'mongos') {
+ if (jsTest.options().enableTestCommands) {
+ argArray.push(...['--setParameter', "enableTestCommands=1"]);
}
-
- function argArrayContainsSetParameterValue(value) {
- assert(value.endsWith("="),
- "Expected value argument to be of the form <parameterName>=");
- return argArray.some(function(el) {
- return typeof el === "string" && el.startsWith(value);
- });
+ if (jsTest.options().authMechanism && jsTest.options().authMechanism != "SCRAM-SHA-1") {
+ if (!argArrayContainsSetParameterValue('authenticationMechanisms=')) {
+ argArray.push(...['--setParameter',
+ "authenticationMechanisms=" + jsTest.options().authMechanism]);
+ }
}
-
- // programName includes the version, e.g., mongod-3.2.
- // baseProgramName is the program name without any version information, e.g., mongod.
- let programName = argArray[0];
-
- let [baseProgramName, programVersion] = programName.split("-");
- let programMajorMinorVersion = 0;
- if (programVersion) {
- let [major, minor, point] = programVersion.split(".");
- programMajorMinorVersion = parseInt(major) * 100 + parseInt(minor);
+ if (jsTest.options().auth) {
+ argArray.push(...['--setParameter', "enableLocalhostAuthBypass=false"]);
}
- if (baseProgramName === 'mongod' || baseProgramName === 'mongos') {
- if (jsTest.options().enableTestCommands) {
- argArray.push(...['--setParameter', "enableTestCommands=1"]);
- }
- if (jsTest.options().authMechanism && jsTest.options().authMechanism != "SCRAM-SHA-1") {
- if (!argArrayContainsSetParameterValue('authenticationMechanisms=')) {
- argArray.push(
- ...['--setParameter',
- "authenticationMechanisms=" + jsTest.options().authMechanism]);
+ // New options in 3.5.x
+ if (!programMajorMinorVersion || programMajorMinorVersion >= 305) {
+ if (jsTest.options().serviceExecutor) {
+ if (!argArrayContains("--serviceExecutor")) {
+ argArray.push(...["--serviceExecutor", jsTest.options().serviceExecutor]);
}
}
- if (jsTest.options().auth) {
- argArray.push(...['--setParameter', "enableLocalhostAuthBypass=false"]);
+
+ if (jsTest.options().transportLayer) {
+ if (!argArrayContains("--transportLayer")) {
+ argArray.push(...["--transportLayer", jsTest.options().transportLayer]);
+ }
}
- // New options in 3.5.x
- if (!programMajorMinorVersion || programMajorMinorVersion >= 305) {
- if (jsTest.options().serviceExecutor) {
- if (!argArrayContains("--serviceExecutor")) {
- argArray.push(...["--serviceExecutor", jsTest.options().serviceExecutor]);
+ // Disable background cache refreshing to avoid races in tests
+ argArray.push(...['--setParameter', "disableLogicalSessionCacheRefresh=true"]);
+ }
+
+ // Since options may not be backward compatible, mongos options are not
+ // set on older versions, e.g., mongos-3.0.
+ if (programName.endsWith('mongos')) {
+ // apply setParameters for mongos
+ if (jsTest.options().setParametersMongos) {
+ let params = jsTest.options().setParametersMongos;
+ for (let paramName of Object.keys(params)) {
+ // Only set the 'logComponentVerbosity' parameter if it has not already
+ // been specified in the given argument array. This means that any
+ // 'logComponentVerbosity' settings passed through via TestData will
+ // always be overridden by settings passed directly to MongoRunner from
+ // within the shell.
+ if (paramName === "logComponentVerbosity" &&
+ argArrayContains("logComponentVerbosity")) {
+ continue;
}
+ const paramVal = ((param) => {
+ if (typeof param === "object") {
+ return JSON.stringify(param);
+ }
+
+ return param;
+ })(params[paramName]);
+ const setParamStr = paramName + "=" + paramVal;
+ argArray.push(...['--setParameter', setParamStr]);
+ }
+ }
+ } else if (baseProgramName === 'mongod') {
+ if (jsTestOptions().roleGraphInvalidationIsFatal) {
+ argArray.push(...['--setParameter', "roleGraphInvalidationIsFatal=true"]);
+ }
+
+ // Set storageEngine for mongod. There was no storageEngine parameter before 3.0.
+ if (jsTest.options().storageEngine &&
+ (!programVersion || programMajorMinorVersion >= 300)) {
+ if (!argArrayContains("--storageEngine")) {
+ argArray.push(...['--storageEngine', jsTest.options().storageEngine]);
}
+ }
- if (jsTest.options().transportLayer) {
- if (!argArrayContains("--transportLayer")) {
- argArray.push(...["--transportLayer", jsTest.options().transportLayer]);
+ // New mongod-specific options in 4.0.x
+ if (!programMajorMinorVersion || programMajorMinorVersion >= 400) {
+ if (jsTest.options().transactionLifetimeLimitSeconds !== undefined) {
+ if (!argArrayContainsSetParameterValue("transactionLifetimeLimitSeconds=")) {
+ argArray.push(...["--setParameter",
+ "transactionLifetimeLimitSeconds=" +
+ jsTest.options().transactionLifetimeLimitSeconds]);
}
}
+ }
- // Disable background cache refreshing to avoid races in tests
- argArray.push(...['--setParameter', "disableLogicalSessionCacheRefresh=true"]);
+ // TODO: Make this unconditional in 3.8.
+ if (!programMajorMinorVersion || programMajorMinorVersion > 304) {
+ if (!argArrayContainsSetParameterValue('orphanCleanupDelaySecs=')) {
+ argArray.push(...['--setParameter', 'orphanCleanupDelaySecs=1']);
+ }
}
- // Since options may not be backward compatible, mongos options are not
- // set on older versions, e.g., mongos-3.0.
- if (programName.endsWith('mongos')) {
- // apply setParameters for mongos
- if (jsTest.options().setParametersMongos) {
- let params = jsTest.options().setParametersMongos;
+ // Since options may not be backward compatible, mongod options are not
+ // set on older versions, e.g., mongod-3.0.
+ if (programName.endsWith('mongod')) {
+ if (jsTest.options().storageEngine === "wiredTiger" ||
+ !jsTest.options().storageEngine) {
+ if (jsTest.options().enableMajorityReadConcern !== undefined &&
+ !argArrayContains("--enableMajorityReadConcern")) {
+ argArray.push(...['--enableMajorityReadConcern',
+ jsTest.options().enableMajorityReadConcern.toString()]);
+ }
+ if (jsTest.options().storageEngineCacheSizeGB &&
+ !argArrayContains('--wiredTigerCacheSizeGB')) {
+ argArray.push(...['--wiredTigerCacheSizeGB',
+ jsTest.options().storageEngineCacheSizeGB]);
+ }
+ if (jsTest.options().wiredTigerEngineConfigString &&
+ !argArrayContains('--wiredTigerEngineConfigString')) {
+ argArray.push(...['--wiredTigerEngineConfigString',
+ jsTest.options().wiredTigerEngineConfigString]);
+ }
+ if (jsTest.options().wiredTigerCollectionConfigString &&
+ !argArrayContains('--wiredTigerCollectionConfigString')) {
+ argArray.push(...['--wiredTigerCollectionConfigString',
+ jsTest.options().wiredTigerCollectionConfigString]);
+ }
+ if (jsTest.options().wiredTigerIndexConfigString &&
+ !argArrayContains('--wiredTigerIndexConfigString')) {
+ argArray.push(...['--wiredTigerIndexConfigString',
+ jsTest.options().wiredTigerIndexConfigString]);
+ }
+ } else if (jsTest.options().storageEngine === "rocksdb") {
+ if (jsTest.options().storageEngineCacheSizeGB) {
+ argArray.push(
+ ...['--rocksdbCacheSizeGB', jsTest.options().storageEngineCacheSizeGB]);
+ }
+ } else if (jsTest.options().storageEngine === "inMemory") {
+ if (jsTest.options().storageEngineCacheSizeGB &&
+ !argArrayContains("--inMemorySizeGB")) {
+ argArray.push(
+ ...["--inMemorySizeGB", jsTest.options().storageEngineCacheSizeGB]);
+ }
+ }
+ // apply setParameters for mongod. The 'setParameters' field should be given as
+ // a plain JavaScript object, where each key is a parameter name and the value
+ // is the value to set for that parameter.
+ if (jsTest.options().setParameters) {
+ let params = jsTest.options().setParameters;
for (let paramName of Object.keys(params)) {
// Only set the 'logComponentVerbosity' parameter if it has not already
// been specified in the given argument array. This means that any
@@ -1118,6 +1198,7 @@ var MongoRunner, _startMongod, startMongoProgram, runMongoProgram, startMongoPro
argArrayContains("logComponentVerbosity")) {
continue;
}
+
const paramVal = ((param) => {
if (typeof param === "object") {
return JSON.stringify(param);
@@ -1129,261 +1210,157 @@ var MongoRunner, _startMongod, startMongoProgram, runMongoProgram, startMongoPro
argArray.push(...['--setParameter', setParamStr]);
}
}
- } else if (baseProgramName === 'mongod') {
- if (jsTestOptions().roleGraphInvalidationIsFatal) {
- argArray.push(...['--setParameter', "roleGraphInvalidationIsFatal=true"]);
- }
-
- // Set storageEngine for mongod. There was no storageEngine parameter before 3.0.
- if (jsTest.options().storageEngine &&
- (!programVersion || programMajorMinorVersion >= 300)) {
- if (!argArrayContains("--storageEngine")) {
- argArray.push(...['--storageEngine', jsTest.options().storageEngine]);
- }
- }
-
- // New mongod-specific options in 4.0.x
- if (!programMajorMinorVersion || programMajorMinorVersion >= 400) {
- if (jsTest.options().transactionLifetimeLimitSeconds !== undefined) {
- if (!argArrayContainsSetParameterValue(
- "transactionLifetimeLimitSeconds=")) {
- argArray.push(
- ...["--setParameter",
- "transactionLifetimeLimitSeconds=" +
- jsTest.options().transactionLifetimeLimitSeconds]);
- }
- }
- }
-
- // TODO: Make this unconditional in 3.8.
- if (!programMajorMinorVersion || programMajorMinorVersion > 304) {
- if (!argArrayContainsSetParameterValue('orphanCleanupDelaySecs=')) {
- argArray.push(...['--setParameter', 'orphanCleanupDelaySecs=1']);
- }
- }
-
- // Since options may not be backward compatible, mongod options are not
- // set on older versions, e.g., mongod-3.0.
- if (programName.endsWith('mongod')) {
- if (jsTest.options().storageEngine === "wiredTiger" ||
- !jsTest.options().storageEngine) {
- if (jsTest.options().enableMajorityReadConcern !== undefined &&
- !argArrayContains("--enableMajorityReadConcern")) {
- argArray.push(
- ...['--enableMajorityReadConcern',
- jsTest.options().enableMajorityReadConcern.toString()]);
- }
- if (jsTest.options().storageEngineCacheSizeGB &&
- !argArrayContains('--wiredTigerCacheSizeGB')) {
- argArray.push(...['--wiredTigerCacheSizeGB',
- jsTest.options().storageEngineCacheSizeGB]);
- }
- if (jsTest.options().wiredTigerEngineConfigString &&
- !argArrayContains('--wiredTigerEngineConfigString')) {
- argArray.push(...['--wiredTigerEngineConfigString',
- jsTest.options().wiredTigerEngineConfigString]);
- }
- if (jsTest.options().wiredTigerCollectionConfigString &&
- !argArrayContains('--wiredTigerCollectionConfigString')) {
- argArray.push(...['--wiredTigerCollectionConfigString',
- jsTest.options().wiredTigerCollectionConfigString]);
- }
- if (jsTest.options().wiredTigerIndexConfigString &&
- !argArrayContains('--wiredTigerIndexConfigString')) {
- argArray.push(...['--wiredTigerIndexConfigString',
- jsTest.options().wiredTigerIndexConfigString]);
- }
- } else if (jsTest.options().storageEngine === "rocksdb") {
- if (jsTest.options().storageEngineCacheSizeGB) {
- argArray.push(...['--rocksdbCacheSizeGB',
- jsTest.options().storageEngineCacheSizeGB]);
- }
- } else if (jsTest.options().storageEngine === "inMemory") {
- if (jsTest.options().storageEngineCacheSizeGB &&
- !argArrayContains("--inMemorySizeGB")) {
- argArray.push(
- ...["--inMemorySizeGB", jsTest.options().storageEngineCacheSizeGB]);
- }
- }
- // apply setParameters for mongod. The 'setParameters' field should be given as
- // a plain JavaScript object, where each key is a parameter name and the value
- // is the value to set for that parameter.
- if (jsTest.options().setParameters) {
- let params = jsTest.options().setParameters;
- for (let paramName of Object.keys(params)) {
- // Only set the 'logComponentVerbosity' parameter if it has not already
- // been specified in the given argument array. This means that any
- // 'logComponentVerbosity' settings passed through via TestData will
- // always be overridden by settings passed directly to MongoRunner from
- // within the shell.
- if (paramName === "logComponentVerbosity" &&
- argArrayContains("logComponentVerbosity")) {
- continue;
- }
-
- const paramVal = ((param) => {
- if (typeof param === "object") {
- return JSON.stringify(param);
- }
-
- return param;
- })(params[paramName]);
- const setParamStr = paramName + "=" + paramVal;
- argArray.push(...['--setParameter', setParamStr]);
- }
- }
- }
}
}
-
- return argArray;
}
- /**
- * Start a mongo process with a particular argument array.
- * If we aren't waiting for connect, return {pid: <pid>}.
- * If we are waiting for connect:
- * returns connection to process on success;
- * otherwise returns null if we fail to connect.
- */
- MongoRunner._startWithArgs = function(argArray, env, waitForConnect) {
- // TODO: Make there only be one codepath for starting mongo processes
-
- argArray = appendSetParameterArgs(argArray);
- var port = _parsePort.apply(null, argArray);
- var pid = -1;
- if (env === undefined) {
- pid = _startMongoProgram.apply(null, argArray);
- } else {
- pid = _startMongoProgram({args: argArray, env: env});
- }
+ return argArray;
+}
+
+/**
+ * Start a mongo process with a particular argument array.
+ * If we aren't waiting for connect, return {pid: <pid>}.
+ * If we are waiting for connect:
+ * returns connection to process on success;
+ * otherwise returns null if we fail to connect.
+ */
+MongoRunner._startWithArgs = function(argArray, env, waitForConnect) {
+ // TODO: Make there only be one codepath for starting mongo processes
+
+ argArray = appendSetParameterArgs(argArray);
+ var port = _parsePort.apply(null, argArray);
+ var pid = -1;
+ if (env === undefined) {
+ pid = _startMongoProgram.apply(null, argArray);
+ } else {
+ pid = _startMongoProgram({args: argArray, env: env});
+ }
- delete serverExitCodeMap[port];
- if (!waitForConnect) {
- return {
- pid: pid,
- port: port,
- };
- }
+ delete serverExitCodeMap[port];
+ if (!waitForConnect) {
+ return {
+ pid: pid,
+ port: port,
+ };
+ }
- var conn = null;
- assert.soon(function() {
- try {
- conn = new Mongo("127.0.0.1:" + port);
- conn.pid = pid;
+ var conn = null;
+ assert.soon(function() {
+ try {
+ conn = new Mongo("127.0.0.1:" + port);
+ conn.pid = pid;
+ return true;
+ } catch (e) {
+ var res = checkProgram(pid);
+ if (!res.alive) {
+ print("Could not start mongo program at " + port +
+ ", process ended with exit code: " + res.exitCode);
+ serverExitCodeMap[port] = res.exitCode;
return true;
- } catch (e) {
- var res = checkProgram(pid);
- if (!res.alive) {
- print("Could not start mongo program at " + port +
- ", process ended with exit code: " + res.exitCode);
- serverExitCodeMap[port] = res.exitCode;
- return true;
- }
}
- return false;
- }, "unable to connect to mongo program on port " + port, 600 * 1000);
-
- return conn;
- };
-
- /**
- * DEPRECATED
- *
- * Start mongod or mongos and return a Mongo() object connected to there.
- * This function's first argument is "mongod" or "mongos" program name, \
- * and subsequent arguments to this function are passed as
- * command line arguments to the program.
- */
- startMongoProgram = function() {
- var port = _parsePort.apply(null, arguments);
-
- // Enable test commands.
- // TODO: Make this work better with multi-version testing so that we can support
- // enabling this on 2.4 when testing 2.6
- var args = Array.from(arguments);
- args = appendSetParameterArgs(args);
- var pid = _startMongoProgram.apply(null, args);
-
- var m;
- assert.soon(function() {
- try {
- m = new Mongo("127.0.0.1:" + port);
- m.pid = pid;
+ }
+ return false;
+ }, "unable to connect to mongo program on port " + port, 600 * 1000);
+
+ return conn;
+};
+
+/**
+ * DEPRECATED
+ *
+ * Start mongod or mongos and return a Mongo() object connected to there.
+ * This function's first argument is "mongod" or "mongos" program name, \
+ * and subsequent arguments to this function are passed as
+ * command line arguments to the program.
+ */
+startMongoProgram = function() {
+ var port = _parsePort.apply(null, arguments);
+
+ // Enable test commands.
+ // TODO: Make this work better with multi-version testing so that we can support
+ // enabling this on 2.4 when testing 2.6
+ var args = Array.from(arguments);
+ args = appendSetParameterArgs(args);
+ var pid = _startMongoProgram.apply(null, args);
+
+ var m;
+ assert.soon(function() {
+ try {
+ m = new Mongo("127.0.0.1:" + port);
+ m.pid = pid;
+ return true;
+ } catch (e) {
+ var res = checkProgram(pid);
+ if (!res.alive) {
+ print("Could not start mongo program at " + port +
+ ", process ended with exit code: " + res.exitCode);
+ // Break out
+ m = null;
return true;
- } catch (e) {
- var res = checkProgram(pid);
- if (!res.alive) {
- print("Could not start mongo program at " + port +
- ", process ended with exit code: " + res.exitCode);
- // Break out
- m = null;
- return true;
- }
}
- return false;
- }, "unable to connect to mongo program on port " + port, 600 * 1000);
-
- return m;
- };
-
- runMongoProgram = function() {
- var args = Array.from(arguments);
- args = appendSetParameterArgs(args);
- var progName = args[0];
-
- // The bsondump tool doesn't support these auth related command line flags.
- if (jsTestOptions().auth && progName != 'mongod' && progName != 'bsondump') {
- args = args.slice(1);
- args.unshift(progName,
- '-u',
- jsTestOptions().authUser,
- '-p',
- jsTestOptions().authPassword,
- '--authenticationDatabase=admin');
}
+ return false;
+ }, "unable to connect to mongo program on port " + port, 600 * 1000);
+
+ return m;
+};
+
+runMongoProgram = function() {
+ var args = Array.from(arguments);
+ args = appendSetParameterArgs(args);
+ var progName = args[0];
+
+ // The bsondump tool doesn't support these auth related command line flags.
+ if (jsTestOptions().auth && progName != 'mongod' && progName != 'bsondump') {
+ args = args.slice(1);
+ args.unshift(progName,
+ '-u',
+ jsTestOptions().authUser,
+ '-p',
+ jsTestOptions().authPassword,
+ '--authenticationDatabase=admin');
+ }
- if (progName == 'mongo' && !_useWriteCommandsDefault()) {
- progName = args[0];
- args = args.slice(1);
- args.unshift(progName, '--useLegacyWriteOps');
- }
-
- return _runMongoProgram.apply(null, args);
- };
-
- // Start a mongo program instance. This function's first argument is the
- // program name, and subsequent arguments to this function are passed as
- // command line arguments to the program. Returns pid of the spawned program.
- startMongoProgramNoConnect = function() {
- var args = Array.from(arguments);
- args = appendSetParameterArgs(args);
- var progName = args[0];
-
- if (jsTestOptions().auth) {
- args = args.slice(1);
- args.unshift(progName,
- '-u',
- jsTestOptions().authUser,
- '-p',
- jsTestOptions().authPassword,
- '--authenticationDatabase=admin');
- }
+ if (progName == 'mongo' && !_useWriteCommandsDefault()) {
+ progName = args[0];
+ args = args.slice(1);
+ args.unshift(progName, '--useLegacyWriteOps');
+ }
- if (progName == 'mongo' && !_useWriteCommandsDefault()) {
- args = args.slice(1);
- args.unshift(progName, '--useLegacyWriteOps');
- }
+ return _runMongoProgram.apply(null, args);
+};
+
+// Start a mongo program instance. This function's first argument is the
+// program name, and subsequent arguments to this function are passed as
+// command line arguments to the program. Returns pid of the spawned program.
+startMongoProgramNoConnect = function() {
+ var args = Array.from(arguments);
+ args = appendSetParameterArgs(args);
+ var progName = args[0];
+
+ if (jsTestOptions().auth) {
+ args = args.slice(1);
+ args.unshift(progName,
+ '-u',
+ jsTestOptions().authUser,
+ '-p',
+ jsTestOptions().authPassword,
+ '--authenticationDatabase=admin');
+ }
- return _startMongoProgram.apply(null, args);
- };
+ if (progName == 'mongo' && !_useWriteCommandsDefault()) {
+ args = args.slice(1);
+ args.unshift(progName, '--useLegacyWriteOps');
+ }
- myPort = function() {
- var m = db.getMongo();
- if (m.host.match(/:/))
- return m.host.match(/:(.*)/)[1];
- else
- return 27017;
- };
+ return _startMongoProgram.apply(null, args);
+};
+myPort = function() {
+ var m = db.getMongo();
+ if (m.host.match(/:/))
+ return m.host.match(/:(.*)/)[1];
+ else
+ return 27017;
+};
}());
diff --git a/src/mongo/shell/servers_misc.js b/src/mongo/shell/servers_misc.js
index 825bca3d689..4edda039549 100644
--- a/src/mongo/shell/servers_misc.js
+++ b/src/mongo/shell/servers_misc.js
@@ -78,28 +78,28 @@ var allocatePort;
var resetAllocatedPorts;
(function() {
- // Defer initializing these variables until the first call, as TestData attributes may be
- // initialized as part of the --eval argument (e.g. by resmoke.py), which will not be evaluated
- // until after this has loaded.
- var maxPort;
- var nextPort;
-
- allocatePort = function() {
- // The default port was chosen in an attempt to have a large number of unassigned ports that
- // are also outside the ephemeral port range.
- nextPort = nextPort || jsTestOptions().minPort || 20000;
- maxPort = maxPort || jsTestOptions().maxPort || Math.pow(2, 16) - 1;
-
- if (nextPort === maxPort) {
- throw new Error("Exceeded maximum port range in allocatePort()");
- }
- return nextPort++;
- };
+// Defer initializing these variables until the first call, as TestData attributes may be
+// initialized as part of the --eval argument (e.g. by resmoke.py), which will not be evaluated
+// until after this has loaded.
+var maxPort;
+var nextPort;
+
+allocatePort = function() {
+ // The default port was chosen in an attempt to have a large number of unassigned ports that
+ // are also outside the ephemeral port range.
+ nextPort = nextPort || jsTestOptions().minPort || 20000;
+ maxPort = maxPort || jsTestOptions().maxPort || Math.pow(2, 16) - 1;
+
+ if (nextPort === maxPort) {
+ throw new Error("Exceeded maximum port range in allocatePort()");
+ }
+ return nextPort++;
+};
- resetAllocatedPorts = function() {
- jsTest.log("Resetting the range of allocated ports");
- maxPort = nextPort = undefined;
- };
+resetAllocatedPorts = function() {
+ jsTest.log("Resetting the range of allocated ports");
+ maxPort = nextPort = undefined;
+};
})();
/**
@@ -142,9 +142,9 @@ function startParallelShell(jsCode, port, noConnect) {
}
// Convert function into call-string
- if (typeof(jsCode) == "function") {
+ if (typeof (jsCode) == "function") {
jsCode = "(" + jsCode.toString() + ")();";
- } else if (typeof(jsCode) == "string") {
+ } else if (typeof (jsCode) == "string") {
}
// do nothing
else {
@@ -153,7 +153,7 @@ function startParallelShell(jsCode, port, noConnect) {
if (noConnect) {
args.push("--nodb");
- } else if (typeof(db) == "object") {
+ } else if (typeof (db) == "object") {
jsCode = "db = db.getSiblingDB('" + db.getName() + "');" + jsCode;
}
diff --git a/src/mongo/shell/session.js b/src/mongo/shell/session.js
index 0313f46fc0e..0f6eb11323a 100644
--- a/src/mongo/shell/session.js
+++ b/src/mongo/shell/session.js
@@ -5,7 +5,11 @@
* https://github.com/mongodb/specifications/blob/master/source/sessions/driver-sessions.rst#abstract
*/
var {
- DriverSession, SessionOptions, _DummyDriverSession, _DelegatingDriverSession, _ServerSession,
+ DriverSession,
+ SessionOptions,
+ _DummyDriverSession,
+ _DelegatingDriverSession,
+ _ServerSession,
} = (function() {
"use strict";
@@ -416,10 +420,10 @@ var {
if (writeError !== undefined) {
if (jsTest.options().logRetryAttempts) {
- jsTest.log("Retrying " + cmdName +
- " due to retryable write error (code=" +
- writeError.code + "), subsequent retries remaining: " +
- numRetries);
+ jsTest.log(
+ "Retrying " + cmdName +
+ " due to retryable write error (code=" + writeError.code +
+ "), subsequent retries remaining: " + numRetries);
}
if (client.isReplicaSetConnection()) {
client._markNodeAsFailed(
@@ -1035,54 +1039,54 @@ var {
const DummyDriverSession =
makeDriverSessionConstructor( // Force clang-format to break this line.
{
- createServerSession: function createServerSession(client) {
- return {
- injectSessionId: function injectSessionId(cmdObj) {
- return cmdObj;
- },
-
- assignTransactionNumber: function assignTransactionNumber(cmdObj) {
- return cmdObj;
- },
-
- canRetryWrites: function canRetryWrites(cmdObj) {
- return false;
- },
-
- assignTxnInfo: function assignTxnInfo(cmdObj) {
- return cmdObj;
- },
-
- isTxnActive: function isTxnActive() {
- return false;
- },
-
- isFirstStatement: function isFirstStatement() {
- return false;
- },
-
- getTxnOptions: function getTxnOptions() {
- return {};
- },
-
- startTransaction: function startTransaction() {
- throw new Error("Must call startSession() on the Mongo connection " +
- "object before starting a transaction.");
- },
-
- commitTransaction: function commitTransaction() {
- throw new Error("Must call startSession() on the Mongo connection " +
- "object before committing a transaction.");
- },
-
- abortTransaction: function abortTransaction() {
- throw new Error("Must call startSession() on the Mongo connection " +
- "object before aborting a transaction.");
- },
- };
- },
-
- endSession: function endSession(serverSession) {},
+ createServerSession: function createServerSession(client) {
+ return {
+ injectSessionId: function injectSessionId(cmdObj) {
+ return cmdObj;
+ },
+
+ assignTransactionNumber: function assignTransactionNumber(cmdObj) {
+ return cmdObj;
+ },
+
+ canRetryWrites: function canRetryWrites(cmdObj) {
+ return false;
+ },
+
+ assignTxnInfo: function assignTxnInfo(cmdObj) {
+ return cmdObj;
+ },
+
+ isTxnActive: function isTxnActive() {
+ return false;
+ },
+
+ isFirstStatement: function isFirstStatement() {
+ return false;
+ },
+
+ getTxnOptions: function getTxnOptions() {
+ return {};
+ },
+
+ startTransaction: function startTransaction() {
+ throw new Error("Must call startSession() on the Mongo connection " +
+ "object before starting a transaction.");
+ },
+
+ commitTransaction: function commitTransaction() {
+ throw new Error("Must call startSession() on the Mongo connection " +
+ "object before committing a transaction.");
+ },
+
+ abortTransaction: function abortTransaction() {
+ throw new Error("Must call startSession() on the Mongo connection " +
+ "object before aborting a transaction.");
+ },
+ };
+ },
+
+ endSession: function endSession(serverSession) {},
},
{causalConsistency: false, retryWrites: false});
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index f814a02cbf4..6008c769202 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -102,7 +102,6 @@
* configRS - If the config servers are a replset, this will contain the config ReplSetTest object
*/
var ShardingTest = function(params) {
-
if (!(this instanceof ShardingTest)) {
return new ShardingTest(params);
}
@@ -292,16 +291,16 @@ var ShardingTest = function(params) {
countDBsFound++;
printjson(db);
});
- throw Error("couldn't find dbname: " + dbname + " in config.databases. Total DBs: " +
- countDBsFound);
+ throw Error("couldn't find dbname: " + dbname +
+ " in config.databases. Total DBs: " + countDBsFound);
};
this.getNonPrimaries = function(dbname) {
var x = this.config.databases.findOne({_id: dbname});
if (!x) {
this.config.databases.find().forEach(printjson);
- throw Error("couldn't find dbname: " + dbname + " total: " +
- this.config.databases.count());
+ throw Error("couldn't find dbname: " + dbname +
+ " total: " + this.config.databases.count());
}
return this.config.shards.find({_id: {$ne: x.primary}}).map(z => z._id);
@@ -334,8 +333,8 @@ var ShardingTest = function(params) {
}
}
- throw Error("can't find server connection for db '" + dbname + "'s primary shard: " +
- tojson(primaryShard));
+ throw Error("can't find server connection for db '" + dbname +
+ "'s primary shard: " + tojson(primaryShard));
};
this.normalize = function(x) {
@@ -859,9 +858,9 @@ var ShardingTest = function(params) {
}
if (arguments.length >= 3) {
- if (typeof(beforeRestartCallback) !== "function") {
+ if (typeof (beforeRestartCallback) !== "function") {
throw new Error("beforeRestartCallback must be a function but was of type " +
- typeof(beforeRestartCallback));
+ typeof (beforeRestartCallback));
}
beforeRestartCallback();
}
@@ -1621,7 +1620,6 @@ var ShardingTest = function(params) {
MongoRunner.getBinVersionFor(otherParams.configOptions.binVersion)))) {
this.configRS.getPrimary().getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
}
-
};
// Stub for a hook to check that collection UUIDs are consistent across shards and the config
diff --git a/src/mongo/shell/shell_options.cpp b/src/mongo/shell/shell_options.cpp
index 325584765db..8ee472bc777 100644
--- a/src/mongo/shell/shell_options.cpp
+++ b/src/mongo/shell/shell_options.cpp
@@ -61,7 +61,8 @@ using std::vector;
// SERVER-36807: Limit --setShellParameter to SetParameters we know we want to expose.
const std::set<std::string> kSetShellParameterWhitelist = {
- "disabledSecureAllocatorDomains", "newLineAfterPasswordPromptForTest",
+ "disabledSecureAllocatorDomains",
+ "newLineAfterPasswordPromptForTest",
};
std::string getMongoShellHelp(StringData name, const moe::OptionSection& options) {
@@ -317,14 +318,14 @@ Status storeMongoShellOptions(const moe::Environment& params,
auto* param = paramIt->second;
if (!param->allowedToChangeAtStartup()) {
return {ErrorCodes::BadValue,
- str::stream() << "Cannot use --setShellParameter to set '" << name
- << "' at startup"};
+ str::stream()
+ << "Cannot use --setShellParameter to set '" << name << "' at startup"};
}
auto status = param->setFromString(it.second);
if (!status.isOK()) {
return {ErrorCodes::BadValue,
- str::stream() << "Bad value for parameter '" << name << "': "
- << status.reason()};
+ str::stream()
+ << "Bad value for parameter '" << name << "': " << status.reason()};
}
}
}
diff --git a/src/mongo/shell/shell_options.h b/src/mongo/shell/shell_options.h
index 531c6369790..6a54fcabb86 100644
--- a/src/mongo/shell/shell_options.h
+++ b/src/mongo/shell/shell_options.h
@@ -97,4 +97,4 @@ bool handlePreValidationMongoShellOptions(const moe::Environment& params,
Status storeMongoShellOptions(const moe::Environment& params, const std::vector<std::string>& args);
void redactPasswordOptions(int argc, char** argv);
-}
+} // namespace mongo
diff --git a/src/mongo/shell/shell_options_init.cpp b/src/mongo/shell/shell_options_init.cpp
index 98cf02f5c53..2ea64570bc8 100644
--- a/src/mongo/shell/shell_options_init.cpp
+++ b/src/mongo/shell/shell_options_init.cpp
@@ -59,4 +59,4 @@ MONGO_STARTUP_OPTIONS_STORE(MongoShellOptions)(InitializerContext* context) {
}
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/shell/shell_utils.h b/src/mongo/shell/shell_utils.h
index 7bfaf9ab014..0f9c7a7615b 100644
--- a/src/mongo/shell/shell_utils.h
+++ b/src/mongo/shell/shell_utils.h
@@ -94,5 +94,5 @@ extern stdx::mutex& mongoProgramOutputMutex;
// Helper to tell if a file exists cross platform
// TODO: Remove this when we have a cross platform file utility library
bool fileExists(const std::string& file);
-}
-}
+} // namespace shell_utils
+} // namespace mongo
diff --git a/src/mongo/shell/shell_utils_extended.cpp b/src/mongo/shell/shell_utils_extended.cpp
index 2db4d7713a3..7a5b532747c 100644
--- a/src/mongo/shell/shell_utils_extended.cpp
+++ b/src/mongo/shell/shell_utils_extended.cpp
@@ -365,8 +365,8 @@ BSONObj getFileMode(const BSONObj& a, void* data) {
auto fileStatus = boost::filesystem::status(path, ec);
if (ec) {
uasserted(50974,
- str::stream() << "Unable to get status for file \"" << pathStr << "\": "
- << ec.message());
+ str::stream() << "Unable to get status for file \"" << pathStr
+ << "\": " << ec.message());
}
return BSON("" << fileStatus.permissions());
@@ -389,5 +389,5 @@ void installShellUtilsExtended(Scope& scope) {
scope.injectNative("umask", changeUmask);
scope.injectNative("getFileMode", getFileMode);
}
-}
-}
+} // namespace shell_utils
+} // namespace mongo
diff --git a/src/mongo/shell/shell_utils_extended.h b/src/mongo/shell/shell_utils_extended.h
index 0fb83c4d7a0..543095d1187 100644
--- a/src/mongo/shell/shell_utils_extended.h
+++ b/src/mongo/shell/shell_utils_extended.h
@@ -37,4 +37,4 @@ class Scope;
namespace shell_utils {
void installShellUtilsExtended(Scope& scope);
}
-}
+} // namespace mongo
diff --git a/src/mongo/shell/shell_utils_launcher.cpp b/src/mongo/shell/shell_utils_launcher.cpp
index 736232614b0..d5d8e010d87 100644
--- a/src/mongo/shell/shell_utils_launcher.cpp
+++ b/src/mongo/shell/shell_utils_launcher.cpp
@@ -278,11 +278,13 @@ ProgramRunner::ProgramRunner(const BSONObj& args, const BSONObj& env, bool isMon
_port = -1;
string prefix("mongod-");
- bool isMongodProgram = isMongo && (string("mongod") == programName ||
- programName.string().compare(0, prefix.size(), prefix) == 0);
+ bool isMongodProgram = isMongo &&
+ (string("mongod") == programName ||
+ programName.string().compare(0, prefix.size(), prefix) == 0);
prefix = "mongos-";
- bool isMongosProgram = isMongo && (string("mongos") == programName ||
- programName.string().compare(0, prefix.size(), prefix) == 0);
+ bool isMongosProgram = isMongo &&
+ (string("mongos") == programName ||
+ programName.string().compare(0, prefix.size(), prefix) == 0);
if (!isMongo) {
_name = "sh";
diff --git a/src/mongo/shell/types.js b/src/mongo/shell/types.js
index 19c1fb272f3..faaa5d00499 100644
--- a/src/mongo/shell/types.js
+++ b/src/mongo/shell/types.js
@@ -1,5 +1,5 @@
// Date and time types
-if (typeof(Timestamp) != "undefined") {
+if (typeof (Timestamp) != "undefined") {
Timestamp.prototype.tojson = function() {
return this.toString();
};
@@ -265,13 +265,13 @@ Array.stdDev = function(arr) {
Object.extend = function(dst, src, deep) {
for (var k in src) {
var v = src[k];
- if (deep && typeof(v) == "object" && v !== null) {
+ if (deep && typeof (v) == "object" && v !== null) {
if (v.constructor === ObjectId) { // convert ObjectId properly
eval("v = " + tojson(v));
} else if ("floatApprox" in v) { // convert NumberLong properly
eval("v = " + tojson(v));
} else {
- v = Object.extend(typeof(v.length) == "number" ? [] : {}, v, true);
+ v = Object.extend(typeof (v.length) == "number" ? [] : {}, v, true);
}
}
dst[k] = v;
@@ -433,7 +433,7 @@ ObjectId.fromDate = function(source) {
if (source instanceof Date) {
sourceDate = source;
} else {
- throw Error("Cannot create ObjectId from " + typeof(source) + ": " + tojson(source));
+ throw Error("Cannot create ObjectId from " + typeof (source) + ": " + tojson(source));
}
// Convert date object to seconds since Unix epoch.
@@ -449,7 +449,7 @@ ObjectId.fromDate = function(source) {
};
// DBPointer
-if (typeof(DBPointer) != "undefined") {
+if (typeof (DBPointer) != "undefined") {
DBPointer.prototype.fetch = function() {
assert(this.ns, "need a ns");
assert(this.id, "need an id");
@@ -476,7 +476,7 @@ if (typeof(DBPointer) != "undefined") {
}
// DBRef
-if (typeof(DBRef) != "undefined") {
+if (typeof (DBRef) != "undefined") {
DBRef.prototype.fetch = function() {
assert(this.$ref, "need a ns");
assert(this.$id, "need an id");
@@ -513,7 +513,7 @@ if (typeof(DBRef) != "undefined") {
}
// BinData
-if (typeof(BinData) != "undefined") {
+if (typeof (BinData) != "undefined") {
BinData.prototype.tojson = function() {
return this.toString();
};
@@ -529,7 +529,7 @@ if (typeof(BinData) != "undefined") {
}
// Map
-if (typeof(Map) == "undefined") {
+if (typeof (Map) == "undefined") {
Map = function() {
this._data = {};
};
@@ -539,7 +539,7 @@ Map.hash = function(val) {
if (!val)
return val;
- switch (typeof(val)) {
+ switch (typeof (val)) {
case 'string':
case 'number':
case 'date':
@@ -553,7 +553,7 @@ Map.hash = function(val) {
return s;
}
- throw Error("can't hash : " + typeof(val));
+ throw Error("can't hash : " + typeof (val));
};
Map.prototype.put = function(key, value) {
@@ -594,7 +594,7 @@ Map.prototype.values = function() {
return all;
};
-if (typeof(gc) == "undefined") {
+if (typeof (gc) == "undefined") {
gc = function() {
print("warning: using noop gc()");
};
@@ -640,7 +640,6 @@ tojson = function(x, indent, nolint, depth) {
default:
throw Error("tojson can't handle type " + (typeof x));
}
-
};
tojson.MAX_DEPTH = 100;
@@ -655,11 +654,11 @@ tojsonObject = function(x, indent, nolint, depth) {
if (!indent)
indent = "";
- if (typeof(x.tojson) == "function" && x.tojson != tojson) {
+ if (typeof (x.tojson) == "function" && x.tojson != tojson) {
return x.tojson(indent, nolint, depth);
}
- if (x.constructor && typeof(x.constructor.tojson) == "function" &&
+ if (x.constructor && typeof (x.constructor.tojson) == "function" &&
x.constructor.tojson != tojson) {
return x.constructor.tojson(x, indent, nolint, depth);
}
@@ -685,7 +684,7 @@ tojsonObject = function(x, indent, nolint, depth) {
indent += tabSpace;
var keys = x;
- if (typeof(x._simpleKeys) == "function")
+ if (typeof (x._simpleKeys) == "function")
keys = x._simpleKeys();
var fieldStrings = [];
for (var k in keys) {
@@ -721,14 +720,14 @@ printjsononeline = function(x) {
};
isString = function(x) {
- return typeof(x) == "string";
+ return typeof (x) == "string";
};
isNumber = function(x) {
- return typeof(x) == "number";
+ return typeof (x) == "number";
};
// This function returns true even if the argument is an array. See SERVER-14220.
isObject = function(x) {
- return typeof(x) == "object";
+ return typeof (x) == "object";
};
diff --git a/src/mongo/shell/utils.js b/src/mongo/shell/utils.js
index 11e6cde6902..1201e800eeb 100644
--- a/src/mongo/shell/utils.js
+++ b/src/mongo/shell/utils.js
@@ -213,7 +213,7 @@ print.captureAllOutput = function(fn, args) {
};
var indentStr = function(indent, s) {
- if (typeof(s) === "undefined") {
+ if (typeof (s) === "undefined") {
s = indent;
indent = 0;
}
@@ -350,7 +350,7 @@ jsTestLog = function(msg) {
if (typeof msg === "object") {
msg = tojson(msg);
}
- assert.eq(typeof(msg), "string", "Received: " + msg);
+ assert.eq(typeof (msg), "string", "Received: " + msg);
const msgs = ["----", ...msg.split("\n"), "----"].map(s => `[jsTest] ${s}`);
print(`\n\n${msgs.join("\n")}\n\n`);
};
@@ -596,10 +596,10 @@ if (typeof _shouldUseImplicitSessions === 'undefined') {
}
shellPrintHelper = function(x) {
- if (typeof(x) == "undefined") {
+ if (typeof (x) == "undefined") {
// Make sure that we have a db var before we use it
// TODO: This implicit calling of GLE can cause subtle, hard to track issues - remove?
- if (__callLastError && typeof(db) != "undefined" && db.getMongo &&
+ if (__callLastError && typeof (db) != "undefined" && db.getMongo &&
db.getMongo().writeMode() == "legacy") {
__callLastError = false;
// explicit w:1 so that replset getLastErrorDefaults aren't used here which would be bad
@@ -638,7 +638,6 @@ shellPrintHelper = function(x) {
shellAutocomplete = function(
/*prefix*/) { // outer scope function called on init. Actual function at end
-
var universalMethods =
"constructor prototype toString valueOf toLocaleString hasOwnProperty propertyIsEnumerable"
.split(' ');
@@ -743,7 +742,7 @@ shellAutocomplete = function(
{}; // see http://dreaminginjavascript.wordpress.com/2008/08/22/eliminating-duplicates/
for (var i = 0; i < possibilities.length; i++) {
var p = possibilities[i];
- if (typeof(curObj[p]) == "undefined" && curObj != global)
+ if (typeof (curObj[p]) == "undefined" && curObj != global)
continue; // extraGlobals aren't in the global object
if (p.length == 0 || p.length < lastPrefix.length)
continue;
@@ -829,7 +828,7 @@ shellHelper.set = function(str) {
};
shellHelper.it = function() {
- if (typeof(___it___) == "undefined" || ___it___ == null) {
+ if (typeof (___it___) == "undefined" || ___it___ == null) {
print("no cursor");
return;
}
@@ -862,7 +861,7 @@ shellHelper.show = function(what) {
continue;
var val = x[z];
- var mytype = typeof(val);
+ var mytype = typeof (val);
if (mytype == "string" || mytype == "number")
l += z + ":" + val + " ";
@@ -1120,7 +1119,8 @@ shellHelper.show = function(what) {
}
if (matchesKnownImposterSignature) {
- print("\n" + "Warning: Non-Genuine MongoDB Detected\n\n" +
+ print("\n" +
+ "Warning: Non-Genuine MongoDB Detected\n\n" +
"This server or service appears to be an emulation of MongoDB " +
"rather than an official MongoDB product.\n\n" +
@@ -1137,7 +1137,6 @@ shellHelper.show = function(what) {
}
throw Error("don't know how to show [" + what + "]");
-
};
__promptWrapper__ = function(promptFunction) {
@@ -1173,8 +1172,8 @@ Math.sigFig = function(x, N) {
var Random = (function() {
var initialized = false;
- var errorMsg =
- "The random number generator hasn't been seeded yet; " + "call Random.setRandomSeed()";
+ var errorMsg = "The random number generator hasn't been seeded yet; " +
+ "call Random.setRandomSeed()";
// Set the random generator seed.
function srand(s) {
@@ -1248,7 +1247,6 @@ var Random = (function() {
setRandomSeed: setRandomSeed,
srand: srand,
};
-
})();
/**
@@ -1351,7 +1349,8 @@ _awaitRSHostViaRSMonitor = function(hostAddr, desiredState, rsName, timeout) {
desiredState = {ok: true};
}
- print("Awaiting " + hostAddr + " to be " + tojson(desiredState) + " in " + " rs " + rsName);
+ print("Awaiting " + hostAddr + " to be " + tojson(desiredState) + " in " +
+ " rs " + rsName);
var tests = 0;
assert.soon(
@@ -1387,8 +1386,8 @@ _awaitRSHostViaRSMonitor = function(hostAddr, desiredState, rsName, timeout) {
}
return false;
},
- "timed out waiting for replica set member: " + hostAddr + " to reach state: " +
- tojson(desiredState),
+ "timed out waiting for replica set member: " + hostAddr +
+ " to reach state: " + tojson(desiredState),
timeout);
};
@@ -1700,35 +1699,52 @@ help = shellHelper.help = function(x) {
print("\t returns a connection to the new server");
return;
} else if (x == "") {
- print("\t" + "db.help() help on db methods");
- print("\t" + "db.mycoll.help() help on collection methods");
- print("\t" + "sh.help() sharding helpers");
- print("\t" + "rs.help() replica set helpers");
- print("\t" + "help admin administrative help");
- print("\t" + "help connect connecting to a db help");
- print("\t" + "help keys key shortcuts");
- print("\t" + "help misc misc things to know");
- print("\t" + "help mr mapreduce");
+ print("\t" +
+ "db.help() help on db methods");
+ print("\t" +
+ "db.mycoll.help() help on collection methods");
+ print("\t" +
+ "sh.help() sharding helpers");
+ print("\t" +
+ "rs.help() replica set helpers");
+ print("\t" +
+ "help admin administrative help");
+ print("\t" +
+ "help connect connecting to a db help");
+ print("\t" +
+ "help keys key shortcuts");
+ print("\t" +
+ "help misc misc things to know");
+ print("\t" +
+ "help mr mapreduce");
print();
- print("\t" + "show dbs show database names");
- print("\t" + "show collections show collections in current database");
- print("\t" + "show users show users in current database");
+ print("\t" +
+ "show dbs show database names");
+ print("\t" +
+ "show collections show collections in current database");
+ print("\t" +
+ "show users show users in current database");
print(
"\t" +
"show profile show most recent system.profile entries with time >= 1ms");
- print("\t" + "show logs show the accessible logger names");
+ print("\t" +
+ "show logs show the accessible logger names");
print(
"\t" +
"show log [name] prints out the last segment of log in memory, 'global' is default");
- print("\t" + "use <db_name> set current database");
- print("\t" + "db.foo.find() list objects in collection foo");
- print("\t" + "db.foo.find( { a : 1 } ) list objects in foo where a == 1");
+ print("\t" +
+ "use <db_name> set current database");
+ print("\t" +
+ "db.foo.find() list objects in collection foo");
+ print("\t" +
+ "db.foo.find( { a : 1 } ) list objects in foo where a == 1");
print(
"\t" +
"it result of the last line evaluated; use to further iterate");
print("\t" +
"DBQuery.shellBatchSize = x set default number of items to display on shell");
- print("\t" + "exit quit the mongo shell");
+ print("\t" +
+ "exit quit the mongo shell");
} else
print("unknown help option");
};
diff --git a/src/mongo/shell/utils_auth.js b/src/mongo/shell/utils_auth.js
index 0343a81ceef..9beb08db940 100644
--- a/src/mongo/shell/utils_auth.js
+++ b/src/mongo/shell/utils_auth.js
@@ -1,146 +1,146 @@
var authutil;
(function() {
- assert(!authutil);
- authutil = {};
+assert(!authutil);
+authutil = {};
- /**
- * Logs out all connections "conn" from database "dbname".
- */
- authutil.logout = function(conn, dbname) {
- var i;
- if (null == conn.length) {
- conn = [conn];
- }
- for (i = 0; i < conn.length; ++i) {
- var curDB = new DB(conn[i], dbname);
- curDB.logout();
- }
- };
-
- /**
- * Authenticates all connections in "conns" using "authParams" on database "dbName".
- *
- * Raises an exception if any authentication fails, and tries to leave all connnections
- * in "conns" in the logged-out-of-dbName state.
- */
- authutil.assertAuthenticate = function(conns, dbName, authParams) {
- var conn, i, ex, ex2;
- if (conns.length == null)
- conns = [conns];
-
- try {
- for (i = 0; i < conns.length; ++i) {
- conn = conns[i];
- // Bypass the implicit auth call in getDB();
- var db = new DB(conn, dbName);
- try {
- retryOnNetworkError(db._authOrThrow.bind(db, authParams));
- } catch (ex3) {
- doassert("assert failed : " + "Failed to authenticate " + conn + " to " +
- dbName + " using parameters " + tojson(authParams) + " : " + ex3);
- }
- }
- } catch (ex) {
- try {
- authutil.logout(conns, dbName);
- } catch (ex2) {
- }
- throw ex;
- }
- };
+/**
+ * Logs out all connections "conn" from database "dbname".
+ */
+authutil.logout = function(conn, dbname) {
+ var i;
+ if (null == conn.length) {
+ conn = [conn];
+ }
+ for (i = 0; i < conn.length; ++i) {
+ var curDB = new DB(conn[i], dbname);
+ curDB.logout();
+ }
+};
- /**
- * Authenticates all connections in "conns" using "authParams" on database "dbName".
- * Raises in exception if any of the authentications succeed.
- */
- authutil.assertAuthenticateFails = function(conns, dbName, authParams) {
- var conn, i;
- if (conns.length == null)
- conns = [conns];
+/**
+ * Authenticates all connections in "conns" using "authParams" on database "dbName".
+ *
+ * Raises an exception if any authentication fails, and tries to leave all connnections
+ * in "conns" in the logged-out-of-dbName state.
+ */
+authutil.assertAuthenticate = function(conns, dbName, authParams) {
+ var conn, i, ex, ex2;
+ if (conns.length == null)
+ conns = [conns];
+ try {
for (i = 0; i < conns.length; ++i) {
conn = conns[i];
// Bypass the implicit auth call in getDB();
var db = new DB(conn, dbName);
- const ex = assert.throws(retryOnNetworkError,
- [db._authOrThrow.bind(db, authParams)],
- "Unexpectedly authenticated " + conn + " to " + dbName +
- " using parameters " + tojson(authParams));
- if (isNetworkError(ex)) {
- throw ex;
+ try {
+ retryOnNetworkError(db._authOrThrow.bind(db, authParams));
+ } catch (ex3) {
+ doassert("assert failed : " +
+ "Failed to authenticate " + conn + " to " + dbName + " using parameters " +
+ tojson(authParams) + " : " + ex3);
}
}
- };
+ } catch (ex) {
+ try {
+ authutil.logout(conns, dbName);
+ } catch (ex2) {
+ }
+ throw ex;
+ }
+};
- /**
- * Executes action() after authenticating the keyfile user on "conn", then logs out the keyfile
- * user.
- */
- authutil.asCluster = function(conn, keyfile, action) {
- var ex;
- const authMode = jsTest.options().clusterAuthMode;
+/**
+ * Authenticates all connections in "conns" using "authParams" on database "dbName".
+ * Raises in exception if any of the authentications succeed.
+ */
+authutil.assertAuthenticateFails = function(conns, dbName, authParams) {
+ var conn, i;
+ if (conns.length == null)
+ conns = [conns];
- // put a connection in an array for uniform processing.
- let connArray = conn;
- if (conn.length == null)
- connArray = [conn];
+ for (i = 0; i < conns.length; ++i) {
+ conn = conns[i];
+ // Bypass the implicit auth call in getDB();
+ var db = new DB(conn, dbName);
+ const ex = assert.throws(retryOnNetworkError,
+ [db._authOrThrow.bind(db, authParams)],
+ "Unexpectedly authenticated " + conn + " to " + dbName +
+ " using parameters " + tojson(authParams));
+ if (isNetworkError(ex)) {
+ throw ex;
+ }
+ }
+};
- let clusterTimes = connArray.map(connElem => {
- const connClusterTime = connElem.getClusterTime();
- const sessionClusterTime = connElem._getDefaultSession().getClusterTime();
- const operationTime = connElem._getDefaultSession().getOperationTime();
+/**
+ * Executes action() after authenticating the keyfile user on "conn", then logs out the keyfile
+ * user.
+ */
+authutil.asCluster = function(conn, keyfile, action) {
+ var ex;
+ const authMode = jsTest.options().clusterAuthMode;
- connElem.resetClusterTime_forTesting();
- connElem._getDefaultSession().resetClusterTime_forTesting();
- connElem._getDefaultSession().resetOperationTime_forTesting();
+ // put a connection in an array for uniform processing.
+ let connArray = conn;
+ if (conn.length == null)
+ connArray = [conn];
- return {connClusterTime, sessionClusterTime, operationTime};
- });
+ let clusterTimes = connArray.map(connElem => {
+ const connClusterTime = connElem.getClusterTime();
+ const sessionClusterTime = connElem._getDefaultSession().getClusterTime();
+ const operationTime = connElem._getDefaultSession().getOperationTime();
- if (authMode === 'keyFile') {
- authutil.assertAuthenticate(conn, 'admin', {
- user: '__system',
- mechanism: 'SCRAM-SHA-1',
- pwd: cat(keyfile).replace(/[\011-\015\040]/g, '')
- });
- } else if (authMode === 'x509') {
- authutil.assertAuthenticate(conn, '$external', {
- mechanism: 'MONGODB-X509',
- });
- } else {
- throw new Error('clusterAuthMode ' + authMode + ' is currently unsupported');
- }
+ connElem.resetClusterTime_forTesting();
+ connElem._getDefaultSession().resetClusterTime_forTesting();
+ connElem._getDefaultSession().resetOperationTime_forTesting();
+
+ return {connClusterTime, sessionClusterTime, operationTime};
+ });
+
+ if (authMode === 'keyFile') {
+ authutil.assertAuthenticate(conn, 'admin', {
+ user: '__system',
+ mechanism: 'SCRAM-SHA-1',
+ pwd: cat(keyfile).replace(/[\011-\015\040]/g, '')
+ });
+ } else if (authMode === 'x509') {
+ authutil.assertAuthenticate(conn, '$external', {
+ mechanism: 'MONGODB-X509',
+ });
+ } else {
+ throw new Error('clusterAuthMode ' + authMode + ' is currently unsupported');
+ }
+ try {
+ return action();
+ } finally {
try {
- return action();
- } finally {
- try {
- authutil.logout(conn, 'admin');
- let connArray = conn;
- if (conn.length == null)
- connArray = [conn];
+ authutil.logout(conn, 'admin');
+ let connArray = conn;
+ if (conn.length == null)
+ connArray = [conn];
- for (let i = 0; i < connArray.length; i++) {
- let connElem = connArray[i];
- connElem.resetClusterTime_forTesting();
- connElem._getDefaultSession().resetClusterTime_forTesting();
- connElem._getDefaultSession().resetOperationTime_forTesting();
- if (clusterTimes[i].connClusterTime) {
- connElem.advanceClusterTime(clusterTimes[i].connClusterTime);
- }
- if (clusterTimes[i].sessionClusterTime) {
- connElem._getDefaultSession().advanceClusterTime(
- clusterTimes[i].sessionClusterTime);
- }
- if (clusterTimes[i].operationTime) {
- connElem._getDefaultSession().advanceOperationTime(
- clusterTimes[i].operationTime);
- }
+ for (let i = 0; i < connArray.length; i++) {
+ let connElem = connArray[i];
+ connElem.resetClusterTime_forTesting();
+ connElem._getDefaultSession().resetClusterTime_forTesting();
+ connElem._getDefaultSession().resetOperationTime_forTesting();
+ if (clusterTimes[i].connClusterTime) {
+ connElem.advanceClusterTime(clusterTimes[i].connClusterTime);
+ }
+ if (clusterTimes[i].sessionClusterTime) {
+ connElem._getDefaultSession().advanceClusterTime(
+ clusterTimes[i].sessionClusterTime);
+ }
+ if (clusterTimes[i].operationTime) {
+ connElem._getDefaultSession().advanceOperationTime(
+ clusterTimes[i].operationTime);
}
- } catch (ex) {
}
+ } catch (ex) {
}
- };
-
+ }
+};
}());
diff --git a/src/mongo/shell/utils_sh.js b/src/mongo/shell/utils_sh.js
index 3c62675db7b..2ada654ce55 100644
--- a/src/mongo/shell/utils_sh.js
+++ b/src/mongo/shell/utils_sh.js
@@ -109,13 +109,13 @@ sh.enableSharding = function(dbname) {
sh.shardCollection = function(fullName, key, unique, options) {
sh._checkFullName(fullName);
assert(key, "need a key");
- assert(typeof(key) == "object", "key needs to be an object");
+ assert(typeof (key) == "object", "key needs to be an object");
var cmd = {shardCollection: fullName, key: key};
if (unique)
cmd.unique = true;
if (options) {
- if (typeof(options) !== "object") {
+ if (typeof (options) !== "object") {
throw new Error("options must be an object");
}
Object.extend(cmd, options);
@@ -140,7 +140,7 @@ sh.moveChunk = function(fullName, find, to) {
};
sh.setBalancerState = function(isOn) {
- assert(typeof(isOn) == "boolean", "Must pass boolean to setBalancerState");
+ assert(typeof (isOn) == "boolean", "Must pass boolean to setBalancerState");
if (isOn) {
return sh.startBalancer();
} else {
@@ -243,7 +243,7 @@ sh.waitForPingChange = function(activePings, timeout, interval) {
};
sh.waitForBalancer = function(wait, timeout, interval) {
- if (typeof(wait) === 'undefined') {
+ if (typeof (wait) === 'undefined') {
wait = false;
}
var initialStatus = sh._getBalancerStatus();
@@ -296,7 +296,6 @@ sh.enableBalancing = function(coll) {
* mongos )
*/
sh._lastMigration = function(ns) {
-
var coll = null;
var dbase = null;
var config = null;
@@ -480,12 +479,12 @@ sh.getRecentMigrations = function(configDB) {
var result = configDB.changelog
.aggregate([
{
- $match: {
- time: {$gt: yesterday},
- what: "moveChunk.from",
- 'details.errmsg': {$exists: false},
- 'details.note': 'success'
- }
+ $match: {
+ time: {$gt: yesterday},
+ what: "moveChunk.from",
+ 'details.errmsg': {$exists: false},
+ 'details.note': 'success'
+ }
},
{$group: {_id: {msg: "$details.errmsg"}, count: {$sum: 1}}},
{$project: {_id: {$ifNull: ["$_id.msg", "Success"]}, count: "$count"}}
@@ -497,28 +496,28 @@ sh.getRecentMigrations = function(configDB) {
configDB.changelog
.aggregate([
{
- $match: {
- time: {$gt: yesterday},
- what: "moveChunk.from",
- $or: [
- {'details.errmsg': {$exists: true}},
- {'details.note': {$ne: 'success'}}
- ]
- }
+ $match: {
+ time: {$gt: yesterday},
+ what: "moveChunk.from",
+ $or: [
+ {'details.errmsg': {$exists: true}},
+ {'details.note': {$ne: 'success'}}
+ ]
+ }
},
{
- $group: {
- _id: {msg: "$details.errmsg", from: "$details.from", to: "$details.to"},
- count: {$sum: 1}
- }
+ $group: {
+ _id: {msg: "$details.errmsg", from: "$details.from", to: "$details.to"},
+ count: {$sum: 1}
+ }
},
{
- $project: {
- _id: {$ifNull: ['$_id.msg', 'aborted']},
- from: "$_id.from",
- to: "$_id.to",
- count: "$count"
- }
+ $project: {
+ _id: {$ifNull: ['$_id.msg', 'aborted']},
+ from: "$_id.from",
+ to: "$_id.to",
+ count: "$count"
+ }
}
])
.toArray());
@@ -703,7 +702,7 @@ function printShardingStatus(configDB, verbose) {
var nonBooleanNote = function(name, value) {
// If the given value is not a boolean, return a string of the
// form " (<name>: <value>)", where <value> is converted to JSON.
- var t = typeof(value);
+ var t = typeof (value);
var s = "";
if (t != "boolean" && t != "undefined") {
s = " (" + name + ": " + tojson(value) + ")";
@@ -814,9 +813,8 @@ function printShardingSizes(configDB) {
delete out.ok;
output(4,
- tojson(chunk.min) + " -->> " + tojson(chunk.max) + " on : " +
- chunk.shard + " " + tojson(out));
-
+ tojson(chunk.min) + " -->> " + tojson(chunk.max) +
+ " on : " + chunk.shard + " " + tojson(out));
});
});
}
diff --git a/src/mongo/stdx/condition_variable.h b/src/mongo/stdx/condition_variable.h
index 1a6836b3a5f..2f8a0ca6484 100644
--- a/src/mongo/stdx/condition_variable.h
+++ b/src/mongo/stdx/condition_variable.h
@@ -124,10 +124,10 @@ public:
std::condition_variable::notify_all(); // NOLINT
}
+ using std::condition_variable::native_handle; // NOLINT
using std::condition_variable::wait; // NOLINT
using std::condition_variable::wait_for; // NOLINT
using std::condition_variable::wait_until; // NOLINT
- using std::condition_variable::native_handle; // NOLINT
private:
friend class ::mongo::Waitable;
diff --git a/src/mongo/stdx/mutex.h b/src/mongo/stdx/mutex.h
index b75a5b56988..033a5f9b080 100644
--- a/src/mongo/stdx/mutex.h
+++ b/src/mongo/stdx/mutex.h
@@ -35,8 +35,8 @@ namespace mongo {
namespace stdx {
using ::std::mutex; // NOLINT
-using ::std::timed_mutex; // NOLINT
using ::std::recursive_mutex; // NOLINT
+using ::std::timed_mutex; // NOLINT
using ::std::adopt_lock_t; // NOLINT
using ::std::defer_lock_t; // NOLINT
diff --git a/src/mongo/stdx/thread.h b/src/mongo/stdx/thread.h
index 278b7678e72..2968e9dcae2 100644
--- a/src/mongo/stdx/thread.h
+++ b/src/mongo/stdx/thread.h
@@ -57,8 +57,8 @@ namespace stdx {
*/
class thread : private ::std::thread { // NOLINT
public:
- using ::std::thread::native_handle_type; // NOLINT
using ::std::thread::id; // NOLINT
+ using ::std::thread::native_handle_type; // NOLINT
thread() noexcept : ::std::thread::thread() {} // NOLINT
@@ -91,13 +91,13 @@ public:
::std::thread::operator=(static_cast<::std::thread&&>(std::move(other)))); // NOLINT
};
- using ::std::thread::joinable; // NOLINT
using ::std::thread::get_id; // NOLINT
- using ::std::thread::native_handle; // NOLINT
using ::std::thread::hardware_concurrency; // NOLINT
+ using ::std::thread::joinable; // NOLINT
+ using ::std::thread::native_handle; // NOLINT
- using ::std::thread::join; // NOLINT
using ::std::thread::detach; // NOLINT
+ using ::std::thread::join; // NOLINT
void swap(thread& other) noexcept {
::std::thread::swap(static_cast<::std::thread&>(other)); // NOLINT
diff --git a/src/mongo/stdx/variant.h b/src/mongo/stdx/variant.h
index c2d396a7c27..c6c903d6402 100644
--- a/src/mongo/stdx/variant.h
+++ b/src/mongo/stdx/variant.h
@@ -33,16 +33,16 @@
namespace mongo::stdx {
-using ::mpark::variant;
-using ::mpark::visit;
-using ::mpark::holds_alternative;
using ::mpark::get;
using ::mpark::get_if;
+using ::mpark::holds_alternative;
+using ::mpark::variant;
+using ::mpark::visit;
-using ::mpark::variant_size;
-using ::mpark::variant_size_v;
using ::mpark::variant_alternative;
using ::mpark::variant_alternative_t;
+using ::mpark::variant_size;
+using ::mpark::variant_size_v;
constexpr auto variant_npos = ::mpark::variant_npos;
@@ -53,7 +53,7 @@ using ::mpark::operator>;
using ::mpark::operator<=;
using ::mpark::operator>=;
-using ::mpark::monostate;
using ::mpark::bad_variant_access;
+using ::mpark::monostate;
} // namespace mongo::stdx
diff --git a/src/mongo/tools/mongobridge_options.h b/src/mongo/tools/mongobridge_options.h
index 590c84e1abd..4fac287f976 100644
--- a/src/mongo/tools/mongobridge_options.h
+++ b/src/mongo/tools/mongobridge_options.h
@@ -68,4 +68,4 @@ bool handlePreValidationMongoBridgeOptions(const moe::Environment& params);
Status storeMongoBridgeOptions(const moe::Environment& params,
const std::vector<std::string>& args);
-}
+} // namespace mongo
diff --git a/src/mongo/tools/mongobridge_options_init.cpp b/src/mongo/tools/mongobridge_options_init.cpp
index a80336cc5da..d1373ccd7a2 100644
--- a/src/mongo/tools/mongobridge_options_init.cpp
+++ b/src/mongo/tools/mongobridge_options_init.cpp
@@ -69,4 +69,4 @@ MONGO_STARTUP_OPTIONS_STORE(MongoBridgeOptions)(InitializerContext* context) {
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/tools/mongoebench_options.cpp b/src/mongo/tools/mongoebench_options.cpp
index b4f25b89cf5..0ba4394d976 100644
--- a/src/mongo/tools/mongoebench_options.cpp
+++ b/src/mongo/tools/mongoebench_options.cpp
@@ -125,8 +125,7 @@ Status storeMongoeBenchOptions(const moe::Environment& params,
if (!parentPath.empty() && !boost::filesystem::exists(parentPath)) {
return {ErrorCodes::NonExistentPath,
str::stream() << "Directory containing output file must already exist, but "
- << parentPath.string()
- << " wasn't found"};
+ << parentPath.string() << " wasn't found"};
}
return Status::OK();
diff --git a/src/mongo/transport/baton_asio_linux.h b/src/mongo/transport/baton_asio_linux.h
index a24b8f93649..3536bc16ab4 100644
--- a/src/mongo/transport/baton_asio_linux.h
+++ b/src/mongo/transport/baton_asio_linux.h
@@ -83,8 +83,8 @@ class TransportLayerASIO::BatonASIO : public NetworkingBaton {
EventFDHolder() : fd(::eventfd(0, EFD_CLOEXEC)) {
if (fd < 0) {
auto e = errno;
- std::string reason = str::stream() << "error in creating eventfd: "
- << errnoWithDescription(e);
+ std::string reason = str::stream()
+ << "error in creating eventfd: " << errnoWithDescription(e);
auto code = (e == EMFILE || e == ENFILE) ? ErrorCodes::TooManyFilesOpen
: ErrorCodes::UnknownError;
@@ -165,7 +165,7 @@ public:
}
_safeExecute(std::move(lk),
- [ id, expiration, promise = std::move(pf.promise), this ]() mutable {
+ [id, expiration, promise = std::move(pf.promise), this]() mutable {
auto iter = _timers.emplace(std::piecewise_construct,
std::forward_as_tuple(expiration),
std::forward_as_tuple(id, std::move(promise)));
@@ -381,7 +381,7 @@ private:
}
_safeExecute(std::move(lk),
- [ id, fd, type, promise = std::move(pf.promise), this ]() mutable {
+ [id, fd, type, promise = std::move(pf.promise), this]() mutable {
_sessions[id] = TransportSession{fd, type, std::move(promise)};
});
@@ -440,7 +440,7 @@ private:
template <typename Callback>
void _safeExecute(stdx::unique_lock<stdx::mutex> lk, Callback&& cb) {
if (_inPoll) {
- _scheduled.push_back([ cb = std::forward<Callback>(cb), this ](Status) mutable {
+ _scheduled.push_back([cb = std::forward<Callback>(cb), this](Status) mutable {
stdx::lock_guard<stdx::mutex> lk(_mutex);
cb();
});
diff --git a/src/mongo/transport/max_conns_override_test.cpp b/src/mongo/transport/max_conns_override_test.cpp
index c1b38421efd..40c67fdb408 100644
--- a/src/mongo/transport/max_conns_override_test.cpp
+++ b/src/mongo/transport/max_conns_override_test.cpp
@@ -90,4 +90,4 @@ TEST(MaxConnsOverride, UNIXPaths) {
#endif
} // namespace
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/transport/message_compressor_manager_test.cpp b/src/mongo/transport/message_compressor_manager_test.cpp
index bffde5f9c29..53a057d5445 100644
--- a/src/mongo/transport/message_compressor_manager_test.cpp
+++ b/src/mongo/transport/message_compressor_manager_test.cpp
@@ -197,8 +197,9 @@ TEST(MessageCompressorManager, BadCompressionRequested) {
}
TEST(MessageCompressorManager, BadAndGoodCompressionRequested) {
- auto input = BSON("isMaster" << 1 << "compression" << BSON_ARRAY("fakecompressor"
- << "noop"));
+ auto input = BSON("isMaster" << 1 << "compression"
+ << BSON_ARRAY("fakecompressor"
+ << "noop"));
checkServerNegotiation(input, {"noop"});
}
diff --git a/src/mongo/transport/message_compressor_registry.h b/src/mongo/transport/message_compressor_registry.h
index 160475675ae..4e20c2dad1c 100644
--- a/src/mongo/transport/message_compressor_registry.h
+++ b/src/mongo/transport/message_compressor_registry.h
@@ -45,7 +45,7 @@ namespace mongo {
namespace optionenvironment {
class OptionSection;
class Environment;
-} // namespace option environment
+} // namespace optionenvironment
namespace moe = mongo::optionenvironment;
diff --git a/src/mongo/transport/service_entry_point.h b/src/mongo/transport/service_entry_point.h
index 3faa834ea97..2c3ded7849a 100644
--- a/src/mongo/transport/service_entry_point.h
+++ b/src/mongo/transport/service_entry_point.h
@@ -65,8 +65,8 @@ public:
virtual Status start() = 0;
/**
- * Shuts down the service entry point.
- */
+ * Shuts down the service entry point.
+ */
virtual bool shutdown(Milliseconds timeout) = 0;
/**
@@ -75,8 +75,8 @@ public:
virtual void appendStats(BSONObjBuilder* bob) const = 0;
/**
- * Returns the number of sessions currently open.
- */
+ * Returns the number of sessions currently open.
+ */
virtual size_t numOpenSessions() const = 0;
/**
diff --git a/src/mongo/transport/service_entry_point_impl.cpp b/src/mongo/transport/service_entry_point_impl.cpp
index a01401f936d..6d17d653072 100644
--- a/src/mongo/transport/service_entry_point_impl.cpp
+++ b/src/mongo/transport/service_entry_point_impl.cpp
@@ -167,7 +167,7 @@ void ServiceEntryPointImpl::startSession(transport::SessionHandle session) {
<< connectionCount << word << " now open)";
}
- ssm->setCleanupHook([ this, ssmIt, quiet, session = std::move(session) ] {
+ ssm->setCleanupHook([this, ssmIt, quiet, session = std::move(session)] {
size_t connectionCount;
auto remote = session->remote();
{
@@ -223,8 +223,8 @@ bool ServiceEntryPointImpl::shutdown(Milliseconds timeout) {
auto noWorkersLeft = [this] { return numOpenSessions() == 0; };
while (timeSpent < timeout &&
!_shutdownCondition.wait_for(lk, checkInterval.toSystemDuration(), noWorkersLeft)) {
- log(LogComponent::kNetwork) << "shutdown: still waiting on " << numOpenSessions()
- << " active workers to drain... ";
+ log(LogComponent::kNetwork)
+ << "shutdown: still waiting on " << numOpenSessions() << " active workers to drain... ";
timeSpent += checkInterval;
}
diff --git a/src/mongo/transport/service_executor_adaptive.cpp b/src/mongo/transport/service_executor_adaptive.cpp
index 9b9d3e9c734..b8a39e9fcb7 100644
--- a/src/mongo/transport/service_executor_adaptive.cpp
+++ b/src/mongo/transport/service_executor_adaptive.cpp
@@ -183,34 +183,35 @@ Status ServiceExecutorAdaptive::schedule(ServiceExecutorAdaptive::Task task,
}
auto wrappedTask =
- [ this, task = std::move(task), scheduleTime, pendingCounterPtr, taskName, flags ](
+ [this, task = std::move(task), scheduleTime, pendingCounterPtr, taskName, flags](
auto status) {
- pendingCounterPtr->subtractAndFetch(1);
- auto start = _tickSource->getTicks();
- _totalSpentQueued.addAndFetch(start - scheduleTime);
+ pendingCounterPtr->subtractAndFetch(1);
+ auto start = _tickSource->getTicks();
+ _totalSpentQueued.addAndFetch(start - scheduleTime);
- _localThreadState->threadMetrics[static_cast<size_t>(taskName)]
- ._totalSpentQueued.addAndFetch(start - scheduleTime);
+ _localThreadState->threadMetrics[static_cast<size_t>(taskName)]
+ ._totalSpentQueued.addAndFetch(start - scheduleTime);
- if (_localThreadState->recursionDepth++ == 0) {
- _localThreadState->executing.markRunning();
- _threadsInUse.addAndFetch(1);
- }
- const auto guard = makeGuard([this, taskName] {
- if (--_localThreadState->recursionDepth == 0) {
- _localThreadState->executingCurRun += _localThreadState->executing.markStopped();
- _threadsInUse.subtractAndFetch(1);
+ if (_localThreadState->recursionDepth++ == 0) {
+ _localThreadState->executing.markRunning();
+ _threadsInUse.addAndFetch(1);
}
- _totalExecuted.addAndFetch(1);
+ const auto guard = makeGuard([this, taskName] {
+ if (--_localThreadState->recursionDepth == 0) {
+ _localThreadState->executingCurRun +=
+ _localThreadState->executing.markStopped();
+ _threadsInUse.subtractAndFetch(1);
+ }
+ _totalExecuted.addAndFetch(1);
+ _localThreadState->threadMetrics[static_cast<size_t>(taskName)]
+ ._totalExecuted.addAndFetch(1);
+ });
+
+ TickTimer _localTimer(_tickSource);
+ task();
_localThreadState->threadMetrics[static_cast<size_t>(taskName)]
- ._totalExecuted.addAndFetch(1);
- });
-
- TickTimer _localTimer(_tickSource);
- task();
- _localThreadState->threadMetrics[static_cast<size_t>(taskName)]
- ._totalSpentExecuting.addAndFetch(_localTimer.sinceStartTicks());
- };
+ ._totalSpentExecuting.addAndFetch(_localTimer.sinceStartTicks());
+ };
// Dispatching a task on the io_context will run the task immediately, and may run it
// on the current thread (if the current thread is running the io_context right now).
diff --git a/src/mongo/transport/service_executor_adaptive_test.cpp b/src/mongo/transport/service_executor_adaptive_test.cpp
index 62caed4e8b9..5f8a0192a8e 100644
--- a/src/mongo/transport/service_executor_adaptive_test.cpp
+++ b/src/mongo/transport/service_executor_adaptive_test.cpp
@@ -253,10 +253,10 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStuckThreads) {
}
/*
-* This tests that the executor will launch more threads when starvation is detected. We launch
-* another task from itself so there will always be a queue of a waiting task if there's just one
-* thread.
-*/
+ * This tests that the executor will launch more threads when starvation is detected. We launch
+ * another task from itself so there will always be a queue of a waiting task if there's just one
+ * thread.
+ */
TEST_F(ServiceExecutorAdaptiveFixture, TestStarvation) {
auto exec = makeAndStartExecutor<TestOptions>();
@@ -269,7 +269,6 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStarvation) {
stdx::function<void()> task;
task = [this, &task, &exec, &scheduleMutex, &scheduleNew] {
-
// This sleep needs to be larger than the sleep below to be able to limit the amount of
// starvation.
stdx::this_thread::sleep_for(config->maxQueueLatency().toSystemDuration() * 5);
@@ -304,9 +303,9 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStarvation) {
}
/*
-* This tests that the executor can execute tasks recursively. If it can't starvation will be
-* detected and new threads started.
-*/
+ * This tests that the executor can execute tasks recursively. If it can't starvation will be
+ * detected and new threads started.
+ */
TEST_F(ServiceExecutorAdaptiveFixture, TestRecursion) {
auto exec = makeAndStartExecutor<RecursionOptions>();
diff --git a/src/mongo/transport/service_executor_synchronous.cpp b/src/mongo/transport/service_executor_synchronous.cpp
index 4191899e763..79fc88e0033 100644
--- a/src/mongo/transport/service_executor_synchronous.cpp
+++ b/src/mongo/transport/service_executor_synchronous.cpp
@@ -115,7 +115,7 @@ Status ServiceExecutorSynchronous::schedule(Task task,
// into the thread local job queue.
LOG(3) << "Starting new executor thread in passthrough mode";
- Status status = launchServiceWorkerThread([ this, task = std::move(task) ] {
+ Status status = launchServiceWorkerThread([this, task = std::move(task)] {
_numRunningWorkerThreads.addAndFetch(1);
_localWorkQueue.emplace_back(std::move(task));
diff --git a/src/mongo/transport/service_executor_test.cpp b/src/mongo/transport/service_executor_test.cpp
index 2c52deafb69..f3c05a72a2d 100644
--- a/src/mongo/transport/service_executor_test.cpp
+++ b/src/mongo/transport/service_executor_test.cpp
@@ -51,7 +51,7 @@ namespace {
constexpr Milliseconds kWorkerThreadRunTime{1000};
// Run time + generous scheduling time slice
const Milliseconds kShutdownTime = kWorkerThreadRunTime + Milliseconds{50};
-}
+} // namespace
struct TestOptions : public ServiceExecutorAdaptive::Options {
int reservedThreads() const final {
diff --git a/src/mongo/transport/service_state_machine.cpp b/src/mongo/transport/service_state_machine.cpp
index 0d24952f13d..ce81f51b933 100644
--- a/src/mongo/transport/service_state_machine.cpp
+++ b/src/mongo/transport/service_state_machine.cpp
@@ -569,7 +569,7 @@ void ServiceStateMachine::_scheduleNextWithGuard(ThreadGuard guard,
transport::ServiceExecutor::ScheduleFlags flags,
transport::ServiceExecutorTaskName taskName,
Ownership ownershipModel) {
- auto func = [ ssm = shared_from_this(), ownershipModel ] {
+ auto func = [ssm = shared_from_this(), ownershipModel] {
ThreadGuard guard(ssm.get());
if (ownershipModel == Ownership::kStatic)
guard.markStaticOwnership();
diff --git a/src/mongo/transport/service_state_machine_test.cpp b/src/mongo/transport/service_state_machine_test.cpp
index 8e3427717c0..72baf66aad1 100644
--- a/src/mongo/transport/service_state_machine_test.cpp
+++ b/src/mongo/transport/service_state_machine_test.cpp
@@ -247,8 +247,9 @@ public:
if (!_scheduleHook) {
return Status::OK();
} else {
- return _scheduleHook(std::move(task)) ? Status::OK() : Status{ErrorCodes::InternalError,
- "Hook returned error!"};
+ return _scheduleHook(std::move(task))
+ ? Status::OK()
+ : Status{ErrorCodes::InternalError, "Hook returned error!"};
}
}
@@ -485,10 +486,10 @@ TEST_F(ServiceStateMachineFixture, TestGetMoreWithExhaustAndEmptyResponseNamespa
Message getMoreWithExhaust = getMoreRequestWithExhaust(nss, cursorId, initRequestId);
// Construct a 'getMore' response with an empty namespace.
- BSONObj getMoreTerminalResBody = BSON("ok" << 1 << "cursor" << BSON("id" << 42 << "ns"
- << ""
- << "nextBatch"
- << BSONArray()));
+ BSONObj getMoreTerminalResBody = BSON("ok" << 1 << "cursor"
+ << BSON("id" << 42 << "ns"
+ << ""
+ << "nextBatch" << BSONArray()));
Message getMoreTerminalRes = buildOpMsg(getMoreTerminalResBody);
// Let the 'getMore' request be sourced from the network, processed in the database, and
@@ -782,7 +783,7 @@ TEST_F(ServiceStateMachineFixture, TerminateWorksForAllStatesWithScheduleFailure
waitFor = testState;
// This is a dummy thread that just advances the SSM while we track its state/kill it
- stdx::thread runner([ ssm = _ssm, &scheduleFailed ] {
+ stdx::thread runner([ssm = _ssm, &scheduleFailed] {
while (ssm->state() != State::Ended && !scheduleFailed) {
ssm->runNext();
}
diff --git a/src/mongo/transport/session.h b/src/mongo/transport/session.h
index b10fe479b17..b0a47364b70 100644
--- a/src/mongo/transport/session.h
+++ b/src/mongo/transport/session.h
@@ -124,13 +124,13 @@ public:
virtual void cancelAsyncOperations(const BatonHandle& handle = nullptr) = 0;
/**
- * This should only be used to detect when the remote host has disappeared without
- * notice. It does NOT work correctly for ensuring that operations complete or fail
- * by some deadline.
- *
- * This timeout will only effect calls sourceMessage()/sinkMessage(). Async operations do not
- * currently support timeouts.
- */
+ * This should only be used to detect when the remote host has disappeared without
+ * notice. It does NOT work correctly for ensuring that operations complete or fail
+ * by some deadline.
+ *
+ * This timeout will only effect calls sourceMessage()/sinkMessage(). Async operations do not
+ * currently support timeouts.
+ */
virtual void setTimeout(boost::optional<Milliseconds> timeout) = 0;
/**
diff --git a/src/mongo/transport/session_asio.h b/src/mongo/transport/session_asio.h
index 6886894152e..eca9811ffb1 100644
--- a/src/mongo/transport/session_asio.h
+++ b/src/mongo/transport/session_asio.h
@@ -249,7 +249,6 @@ protected:
SSLPeerInfo::forSession(shared_from_this()) =
uassertStatusOK(getSSLManager()->parseAndValidatePeerCertificate(
_sslSocket->native_handle(), target.host(), target));
-
});
}
@@ -355,7 +354,7 @@ private:
auto headerBuffer = SharedBuffer::allocate(kHeaderSize);
auto ptr = headerBuffer.get();
return read(asio::buffer(ptr, kHeaderSize), baton)
- .then([ headerBuffer = std::move(headerBuffer), this, baton ]() mutable {
+ .then([headerBuffer = std::move(headerBuffer), this, baton]() mutable {
if (checkForHTTPRequest(asio::buffer(headerBuffer.get(), kHeaderSize))) {
return sendHTTPResponse(baton);
}
@@ -384,7 +383,7 @@ private:
MsgData::View msgView(buffer.get());
return read(asio::buffer(msgView.data(), msgView.dataLen()), baton)
- .then([ this, buffer = std::move(buffer), msgLen ]() mutable {
+ .then([this, buffer = std::move(buffer), msgLen]() mutable {
if (_isIngressSession) {
networkCounter.hitPhysicalIn(msgLen);
}
diff --git a/src/mongo/transport/transport_layer_asio.cpp b/src/mongo/transport/transport_layer_asio.cpp
index 3a17975ea85..cdbdddf0b6b 100644
--- a/src/mongo/transport/transport_layer_asio.cpp
+++ b/src/mongo/transport/transport_layer_asio.cpp
@@ -457,8 +457,9 @@ StatusWith<SessionHandle> TransportLayerASIO::connect(HostAndPort peer,
#else
auto globalSSLMode = _sslMode();
if (sslMode == kEnableSSL ||
- (sslMode == kGlobalSSLMode && ((globalSSLMode == SSLParams::SSLMode_preferSSL) ||
- (globalSSLMode == SSLParams::SSLMode_requireSSL)))) {
+ (sslMode == kGlobalSSLMode &&
+ ((globalSSLMode == SSLParams::SSLMode_preferSSL) ||
+ (globalSSLMode == SSLParams::SSLMode_requireSSL)))) {
auto sslStatus = session->handshakeSSLForEgress(peer).getNoThrow();
if (!sslStatus.isOK()) {
return sslStatus;
@@ -606,8 +607,9 @@ Future<SessionHandle> TransportLayerASIO::asyncConnect(HostAndPort peer,
#else
auto globalSSLMode = _sslMode();
if (sslMode == kEnableSSL ||
- (sslMode == kGlobalSSLMode && ((globalSSLMode == SSLParams::SSLMode_preferSSL) ||
- (globalSSLMode == SSLParams::SSLMode_requireSSL)))) {
+ (sslMode == kGlobalSSLMode &&
+ ((globalSSLMode == SSLParams::SSLMode_preferSSL) ||
+ (globalSSLMode == SSLParams::SSLMode_requireSSL)))) {
return connector->session
->handshakeSSLForEgressWithLock(std::move(lk), connector->peer)
.then([connector] { return Status::OK(); });
diff --git a/src/mongo/transport/transport_layer_asio_integration_test.cpp b/src/mongo/transport/transport_layer_asio_integration_test.cpp
index 718a0bd56a0..b9bfa10df5e 100644
--- a/src/mongo/transport/transport_layer_asio_integration_test.cpp
+++ b/src/mongo/transport/transport_layer_asio_integration_test.cpp
@@ -65,9 +65,10 @@ TEST(TransportLayerASIO, HTTPRequestGetsHTTPError) {
log() << "Sending HTTP request";
std::string httpReq = str::stream() << "GET /\r\n"
"Host: "
- << server << "\r\n"
- "User-Agent: MongoDB Integration test\r\n"
- "Accept: */*";
+ << server
+ << "\r\n"
+ "User-Agent: MongoDB Integration test\r\n"
+ "Accept: */*";
asio::write(socket, asio::buffer(httpReq.data(), httpReq.size()));
log() << "Waiting for response";
diff --git a/src/mongo/transport/transport_layer_asio_test.cpp b/src/mongo/transport/transport_layer_asio_test.cpp
index bc995283097..08dcd99dcae 100644
--- a/src/mongo/transport/transport_layer_asio_test.cpp
+++ b/src/mongo/transport/transport_layer_asio_test.cpp
@@ -236,7 +236,7 @@ public:
void startSession(transport::SessionHandle session) override {
log() << "Accepted connection from " << session->remote();
- startWorkerThread([ this, session = std::move(session) ]() mutable {
+ startWorkerThread([this, session = std::move(session)]() mutable {
log() << "waiting for message";
session->setTimeout(Milliseconds{500});
auto status = session->sourceMessage().getStatus();
@@ -332,7 +332,7 @@ class TimeoutSwitchModesSEP : public TimeoutSEP {
public:
void startSession(transport::SessionHandle session) override {
log() << "Accepted connection from " << session->remote();
- startWorkerThread([ this, session = std::move(session) ]() mutable {
+ startWorkerThread([this, session = std::move(session)]() mutable {
log() << "waiting for message";
auto sourceMessage = [&] { return session->sourceMessage().getStatus(); };
diff --git a/src/mongo/transport/transport_layer_egress_init.cpp b/src/mongo/transport/transport_layer_egress_init.cpp
index 062d0d284d8..8be5bd39735 100644
--- a/src/mongo/transport/transport_layer_egress_init.cpp
+++ b/src/mongo/transport/transport_layer_egress_init.cpp
@@ -44,7 +44,6 @@ namespace {
ServiceContext::ConstructorActionRegisterer registerEgressTransportLayer{
"ConfigureEgressTransportLayer", [](ServiceContext* sc) {
-
invariant(!sc->getTransportLayer());
transport::TransportLayerASIO::Options opts;
opts.mode = transport::TransportLayerASIO::Options::kEgress;
diff --git a/src/mongo/unittest/system_resource_canary_bm.cpp b/src/mongo/unittest/system_resource_canary_bm.cpp
index beaba9aa585..3203d071427 100644
--- a/src/mongo/unittest/system_resource_canary_bm.cpp
+++ b/src/mongo/unittest/system_resource_canary_bm.cpp
@@ -94,7 +94,7 @@ class CacheLatencyTest : public benchmark::Fixture {
// Fixture for CPU Cache and RAM latency test. Adapted from lmbench's lat_mem_rd test.
public:
// Array of pointers used as a linked list.
- std::unique_ptr<char* []> data;
+ std::unique_ptr<char*[]> data;
void SetUp(benchmark::State& state) override {
if (state.thread_index == 0) {
@@ -107,7 +107,7 @@ public:
const int arrLength = state.range(0);
int counter = 0;
- data = std::make_unique<char* []>(arrLength);
+ data = std::make_unique<char*[]>(arrLength);
char** arr = data.get();
diff --git a/src/mongo/unittest/temp_dir.cpp b/src/mongo/unittest/temp_dir.cpp
index a4a91fa8a90..8b370dfd868 100644
--- a/src/mongo/unittest/temp_dir.cpp
+++ b/src/mongo/unittest/temp_dir.cpp
@@ -73,7 +73,7 @@ MONGO_INITIALIZER(SetTempDirDefaultRoot)(InitializerContext* context) {
}
return Status::OK();
}
-}
+} // namespace
TempDir::TempDir(const std::string& namePrefix) {
fassert(17146, namePrefix.find_first_of("/\\") == std::string::npos);
diff --git a/src/mongo/unittest/unittest_helpers.cpp b/src/mongo/unittest/unittest_helpers.cpp
index 10eae6c2f3f..5e6a8627dcf 100644
--- a/src/mongo/unittest/unittest_helpers.cpp
+++ b/src/mongo/unittest/unittest_helpers.cpp
@@ -42,4 +42,4 @@ std::ostream& operator<<(std::ostream& s, const Timestamp& ot) {
s << ot.toString();
return s;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/alarm.h b/src/mongo/util/alarm.h
index aa46c01f924..449284a3b21 100644
--- a/src/mongo/util/alarm.h
+++ b/src/mongo/util/alarm.h
@@ -192,4 +192,4 @@ private:
AlarmMap _alarms;
};
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/util/alarm_test.cpp b/src/mongo/util/alarm_test.cpp
index 1fc29b1b5f9..f450284fc2a 100644
--- a/src/mongo/util/alarm_test.cpp
+++ b/src/mongo/util/alarm_test.cpp
@@ -112,12 +112,12 @@ TEST(AlarmRunner, BasicTest) {
AtomicWord<bool> future2Filled{false};
auto pf = makePromiseFuture<void>();
- std::move(alarm2.future).getAsync([&future2Filled,
- promise = std::move(pf.promise) ](Status status) mutable {
- ASSERT_OK(status);
- future2Filled.store(true);
- promise.emplaceValue();
- });
+ std::move(alarm2.future)
+ .getAsync([&future2Filled, promise = std::move(pf.promise)](Status status) mutable {
+ ASSERT_OK(status);
+ future2Filled.store(true);
+ promise.emplaceValue();
+ });
clockSource->advance(Milliseconds(11));
diff --git a/src/mongo/util/assert_util.cpp b/src/mongo/util/assert_util.cpp
index 2167b2849ec..7535476799b 100644
--- a/src/mongo/util/assert_util.cpp
+++ b/src/mongo/util/assert_util.cpp
@@ -241,12 +241,11 @@ Status exceptionToStatus() noexcept {
} catch (const std::exception& ex) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Caught std::exception of type " << demangleName(typeid(ex))
- << ": "
- << ex.what());
+ << ": " << ex.what());
} catch (const boost::exception& ex) {
- return Status(
- ErrorCodes::UnknownError,
- str::stream() << "Caught boost::exception of type " << demangleName(typeid(ex)) << ": "
+ return Status(ErrorCodes::UnknownError,
+ str::stream()
+ << "Caught boost::exception of type " << demangleName(typeid(ex)) << ": "
<< boost::diagnostic_information(ex));
} catch (...) {
@@ -254,4 +253,4 @@ Status exceptionToStatus() noexcept {
std::terminate();
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/assert_util_test.cpp b/src/mongo/util/assert_util_test.cpp
index 25dcb159ba0..05b33ea3e4b 100644
--- a/src/mongo/util/assert_util_test.cpp
+++ b/src/mongo/util/assert_util_test.cpp
@@ -311,8 +311,8 @@ DEATH_TEST(InvariantTerminationTest,
DEATH_TEST(InvariantTerminationTest,
invariantWithStdStringMsg,
"Terminating with std::string invariant message: 12345") {
- const std::string msg = str::stream() << "Terminating with std::string invariant message: "
- << 12345;
+ const std::string msg = str::stream()
+ << "Terminating with std::string invariant message: " << 12345;
invariant(false, msg);
}
@@ -326,8 +326,8 @@ DEATH_TEST(InvariantTerminationTest,
DEATH_TEST(InvariantTerminationTest,
invariantOverloadWithStdStringMsg,
"Terminating with std::string invariant message: 12345") {
- const std::string msg = str::stream() << "Terminating with std::string invariant message: "
- << 12345;
+ const std::string msg = str::stream()
+ << "Terminating with std::string invariant message: " << 12345;
invariant(Status(ErrorCodes::InternalError, "Terminating with invariant"), msg);
}
@@ -341,8 +341,8 @@ DEATH_TEST(InvariantTerminationTest,
DEATH_TEST(InvariantTerminationTest,
invariantStatusWithOverloadWithStdStringMsg,
"Terminating with std::string invariant message: 12345") {
- const std::string msg = str::stream() << "Terminating with std::string invariant message: "
- << 12345;
+ const std::string msg = str::stream()
+ << "Terminating with std::string invariant message: " << 12345;
invariant(StatusWith<std::string>(ErrorCodes::InternalError, "Terminating with invariant"),
msg);
}
@@ -367,8 +367,8 @@ DEATH_TEST(DassertTerminationTest,
DEATH_TEST(DassertTerminationTest,
dassertWithStdStringMsg,
"Terminating with std::string dassert message: 12345") {
- const std::string msg = str::stream() << "Terminating with std::string dassert message: "
- << 12345;
+ const std::string msg = str::stream()
+ << "Terminating with std::string dassert message: " << 12345;
dassert(false, msg);
}
#endif // defined(MONGO_CONFIG_DEBUG_BUILD)
diff --git a/src/mongo/util/boost_assert_impl.cpp b/src/mongo/util/boost_assert_impl.cpp
index a541f7993da..f77ed7f2b0e 100644
--- a/src/mongo/util/boost_assert_impl.cpp
+++ b/src/mongo/util/boost_assert_impl.cpp
@@ -40,8 +40,11 @@ struct BoostAssertImpl {
invariantFailed(expr, file, line);
};
- BoostAssertFuncs::global().assertMsgFunc = [](
- char const* expr, char const* msg, char const* function, char const* file, long line) {
+ BoostAssertFuncs::global().assertMsgFunc = [](char const* expr,
+ char const* msg,
+ char const* function,
+ char const* file,
+ long line) {
invariantFailedWithMsg(expr, msg, file, line);
};
}
diff --git a/src/mongo/util/bson_util.h b/src/mongo/util/bson_util.h
index f4f3440318d..ab6c8459889 100644
--- a/src/mongo/util/bson_util.h
+++ b/src/mongo/util/bson_util.h
@@ -45,4 +45,4 @@ void bsonArrToNumVector(BSONElement el, std::vector<T>& results) {
results.push_back((T)el.Number());
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/bufreader.h b/src/mongo/util/bufreader.h
index 90e270e7f90..8c30070bada 100644
--- a/src/mongo/util/bufreader.h
+++ b/src/mongo/util/bufreader.h
@@ -136,4 +136,4 @@ private:
const char* _pos;
const char* _end;
};
-}
+} // namespace mongo
diff --git a/src/mongo/util/checksum.h b/src/mongo/util/checksum.h
index c1e8aa73628..8d612c03e21 100644
--- a/src/mongo/util/checksum.h
+++ b/src/mongo/util/checksum.h
@@ -67,4 +67,4 @@ struct Checksum {
return words[0] != rhs.words[0] || words[1] != rhs.words[1];
}
};
-}
+} // namespace mongo
diff --git a/src/mongo/util/clock_source_mock_test.cpp b/src/mongo/util/clock_source_mock_test.cpp
index 3861dbae4c5..f8dbdc343df 100644
--- a/src/mongo/util/clock_source_mock_test.cpp
+++ b/src/mongo/util/clock_source_mock_test.cpp
@@ -129,11 +129,10 @@ TEST(ClockSourceMockTest, AlarmScheudlesExpiredAlarmWhenSignaled) {
ClockSourceMock cs;
const auto beginning = cs.now();
int alarmFiredCount = 0;
- ASSERT_OK(cs.setAlarm(beginning + Seconds{1},
- [&] {
- ++alarmFiredCount;
- ASSERT_OK(cs.setAlarm(beginning, [&] { ++alarmFiredCount; }));
- }));
+ ASSERT_OK(cs.setAlarm(beginning + Seconds{1}, [&] {
+ ++alarmFiredCount;
+ ASSERT_OK(cs.setAlarm(beginning, [&] { ++alarmFiredCount; }));
+ }));
ASSERT_EQ(0, alarmFiredCount);
cs.advance(Seconds{1});
ASSERT_EQ(2, alarmFiredCount);
@@ -154,17 +153,15 @@ TEST(ClockSourceMockTest, AlarmScheudlesAlarmWhenSignaled) {
ClockSourceMock cs;
const auto beginning = cs.now();
int alarmFiredCount = 0;
- ASSERT_OK(cs.setAlarm(beginning + Seconds{1},
- [&] {
- ++alarmFiredCount;
- ASSERT_OK(
- cs.setAlarm(beginning + Seconds{2}, [&] { ++alarmFiredCount; }));
- }));
+ ASSERT_OK(cs.setAlarm(beginning + Seconds{1}, [&] {
+ ++alarmFiredCount;
+ ASSERT_OK(cs.setAlarm(beginning + Seconds{2}, [&] { ++alarmFiredCount; }));
+ }));
ASSERT_EQ(0, alarmFiredCount);
cs.advance(Seconds{1});
ASSERT_EQ(1, alarmFiredCount);
cs.advance(Seconds{1});
ASSERT_EQ(2, alarmFiredCount);
}
-}
+} // namespace
} // namespace mongo
diff --git a/src/mongo/util/cmdline_utils/censor_cmdline.cpp b/src/mongo/util/cmdline_utils/censor_cmdline.cpp
index 746daf24b2d..526414b21e9 100644
--- a/src/mongo/util/cmdline_utils/censor_cmdline.cpp
+++ b/src/mongo/util/cmdline_utils/censor_cmdline.cpp
@@ -233,4 +233,4 @@ void censorArgvArray(int argc, char** argv) {
}
}
} // namespace cmdline_utils
-}
+} // namespace mongo
diff --git a/src/mongo/util/cmdline_utils/censor_cmdline.h b/src/mongo/util/cmdline_utils/censor_cmdline.h
index df9ef9d1729..806b5d1ef60 100644
--- a/src/mongo/util/cmdline_utils/censor_cmdline.h
+++ b/src/mongo/util/cmdline_utils/censor_cmdline.h
@@ -46,4 +46,4 @@ void censorArgsVector(std::vector<std::string>* args);
void censorBSONObj(BSONObj* params);
} // namespace cmdline_utils
-}
+} // namespace mongo
diff --git a/src/mongo/util/concurrency/idle_thread_block.cpp b/src/mongo/util/concurrency/idle_thread_block.cpp
index 2886a24edd7..64426a47774 100644
--- a/src/mongo/util/concurrency/idle_thread_block.cpp
+++ b/src/mongo/util/concurrency/idle_thread_block.cpp
@@ -36,7 +36,7 @@ namespace mongo {
namespace for_debuggers {
// This needs external linkage to ensure that debuggers can use it.
thread_local const char* idleThreadLocation = nullptr;
-}
+} // namespace for_debuggers
using for_debuggers::idleThreadLocation;
void IdleThreadBlock::beginIdleThreadBlock(const char* location) {
@@ -48,4 +48,4 @@ void IdleThreadBlock::endIdleThreadBlock() {
invariant(idleThreadLocation);
idleThreadLocation = nullptr;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/concurrency/mutex.h b/src/mongo/util/concurrency/mutex.h
index 8674fe269a4..a7e63fb3b3e 100644
--- a/src/mongo/util/concurrency/mutex.h
+++ b/src/mongo/util/concurrency/mutex.h
@@ -44,7 +44,7 @@ namespace mongo {
* timeout). Thus it can be implemented using OS-specific
* facilities in all environments (if desired). On Windows,
* the implementation below is faster than boost mutex.
-*/
+ */
#if defined(_WIN32)
class SimpleMutex {
diff --git a/src/mongo/util/concurrency/thread_name.cpp b/src/mongo/util/concurrency/thread_name.cpp
index 7aa58a3b6f4..03a6ab181c4 100644
--- a/src/mongo/util/concurrency/thread_name.cpp
+++ b/src/mongo/util/concurrency/thread_name.cpp
@@ -145,8 +145,8 @@ void setThreadName(StringData name) {
// limit, it's best to shorten long names.
int error = 0;
if (threadName.size() > 15) {
- std::string shortName = str::stream() << threadName.substr(0, 7) << '.'
- << threadName.substr(threadName.size() - 7);
+ std::string shortName = str::stream()
+ << threadName.substr(0, 7) << '.' << threadName.substr(threadName.size() - 7);
error = pthread_setname_np(pthread_self(), shortName.c_str());
} else {
error = pthread_setname_np(pthread_self(), threadName.rawData());
diff --git a/src/mongo/util/concurrency/thread_pool.cpp b/src/mongo/util/concurrency/thread_pool.cpp
index 69e4e0f64b3..fd8d23377ea 100644
--- a/src/mongo/util/concurrency/thread_pool.cpp
+++ b/src/mongo/util/concurrency/thread_pool.cpp
@@ -173,8 +173,8 @@ void ThreadPool::_drainPendingTasks() {
// Tasks cannot be run inline because they can create OperationContexts and the join() caller
// may already have one associated with the thread.
stdx::thread cleanThread = stdx::thread([&] {
- const std::string threadName = str::stream() << _options.threadNamePrefix
- << _nextThreadId++;
+ const std::string threadName = str::stream()
+ << _options.threadNamePrefix << _nextThreadId++;
setThreadName(threadName);
_options.onCreateThread(threadName);
stdx::unique_lock<stdx::mutex> lock(_mutex);
diff --git a/src/mongo/util/concurrency/ticketholder.cpp b/src/mongo/util/concurrency/ticketholder.cpp
index d836f977b67..e30746807ae 100644
--- a/src/mongo/util/concurrency/ticketholder.cpp
+++ b/src/mongo/util/concurrency/ticketholder.cpp
@@ -137,8 +137,7 @@ Status TicketHolder::resize(int newSize) {
if (newSize > SEM_VALUE_MAX)
return Status(ErrorCodes::BadValue,
str::stream() << "Maximum value for semaphore is " << SEM_VALUE_MAX
- << "; given "
- << newSize);
+ << "; given " << newSize);
while (_outof.load() < newSize) {
release();
@@ -254,4 +253,4 @@ bool TicketHolder::_tryAcquire() {
return true;
}
#endif
-}
+} // namespace mongo
diff --git a/src/mongo/util/concurrency/value.h b/src/mongo/util/concurrency/value.h
index b2759be742a..a0a03d9d260 100644
--- a/src/mongo/util/concurrency/value.h
+++ b/src/mongo/util/concurrency/value.h
@@ -79,4 +79,4 @@ public:
// multiple operations
bool operator==(const std::string& s) const;
};
-}
+} // namespace mongo
diff --git a/src/mongo/util/debugger.cpp b/src/mongo/util/debugger.cpp
index 16a319e87f3..53cbeedbe09 100644
--- a/src/mongo/util/debugger.cpp
+++ b/src/mongo/util/debugger.cpp
@@ -122,4 +122,4 @@ void setupSIGTRAPforGDB() {
#else
void setupSIGTRAPforGDB() {}
#endif
-}
+} // namespace mongo
diff --git a/src/mongo/util/decimal_counter.h b/src/mongo/util/decimal_counter.h
index a85d23f3dd9..b090989792f 100644
--- a/src/mongo/util/decimal_counter.h
+++ b/src/mongo/util/decimal_counter.h
@@ -101,4 +101,4 @@ private:
uint8_t _lastDigitIndex = 0; // Indicates the last digit in _digits.
T _counter = 0;
};
-}
+} // namespace mongo
diff --git a/src/mongo/util/dns_name.h b/src/mongo/util/dns_name.h
index 023bee10516..8b913cadd21 100644
--- a/src/mongo/util/dns_name.h
+++ b/src/mongo/util/dns_name.h
@@ -402,7 +402,7 @@ private:
void streamCore(StreamLike& os) const {
std::for_each(rbegin(_nameComponents),
rend(_nameComponents),
- [ first = true, &os ](const auto& component) mutable {
+ [first = true, &os](const auto& component) mutable {
if (!first)
os << '.';
first = false;
@@ -439,7 +439,7 @@ private:
// FQDNs and Relative Names are discriminated by this field.
Qualification fullyQualified;
};
-} // detail_dns_host_name
+} // namespace detail_dns_host_name
// The `operator==` function has to be defined out-of-line, because it uses `make_equality_lens`
// which is an auto-deduced return type function defined later in the class body.
diff --git a/src/mongo/util/dns_query_test.cpp b/src/mongo/util/dns_query_test.cpp
index b7dac331a12..8c6330b1557 100644
--- a/src/mongo/util/dns_query_test.cpp
+++ b/src/mongo/util/dns_query_test.cpp
@@ -110,11 +110,13 @@ TEST(MongoDnsQuery, srvRecords) {
} tests[] = {
{"test1.test.build.10gen.cc.",
{
- {"localhost.test.build.10gen.cc.", 27017}, {"localhost.test.build.10gen.cc.", 27018},
+ {"localhost.test.build.10gen.cc.", 27017},
+ {"localhost.test.build.10gen.cc.", 27018},
}},
{"test2.test.build.10gen.cc.",
{
- {"localhost.test.build.10gen.cc.", 27018}, {"localhost.test.build.10gen.cc.", 27019},
+ {"localhost.test.build.10gen.cc.", 27018},
+ {"localhost.test.build.10gen.cc.", 27019},
}},
{"test3.test.build.10gen.cc.",
{
@@ -174,7 +176,8 @@ TEST(MongoDnsQuery, txtRecords) {
}},
{"test6.test.build.10gen.cc",
{
- "authSource=otherDB", "replicaSet=repl0",
+ "authSource=otherDB",
+ "replicaSet=repl0",
}},
};
diff --git a/src/mongo/util/exception_filter_win32.cpp b/src/mongo/util/exception_filter_win32.cpp
index dfb631b1196..3759aa57ae5 100644
--- a/src/mongo/util/exception_filter_win32.cpp
+++ b/src/mongo/util/exception_filter_win32.cpp
@@ -178,7 +178,7 @@ LONG WINAPI exceptionFilter(struct _EXCEPTION_POINTERS* excPointers) {
// We won't reach here
return EXCEPTION_EXECUTE_HANDLER;
}
-}
+} // namespace
LPTOP_LEVEL_EXCEPTION_FILTER filtLast = 0;
@@ -192,6 +192,6 @@ void setWindowsUnhandledExceptionFilter() {
namespace mongo {
void setWindowsUnhandledExceptionFilter() {}
-}
+} // namespace mongo
#endif // _WIN32
diff --git a/src/mongo/util/exit.cpp b/src/mongo/util/exit.cpp
index 9f198d91d7e..452d64837e0 100644
--- a/src/mongo/util/exit.cpp
+++ b/src/mongo/util/exit.cpp
@@ -114,8 +114,9 @@ void shutdown(ExitCode code, const ShutdownTaskArgs& shutdownArgs) {
ExitCode originallyRequestedCode = shutdownExitCode.get();
if (code != originallyRequestedCode) {
log() << "While running shutdown tasks with the intent to exit with code "
- << originallyRequestedCode << ", an additional shutdown request arrived with "
- "the intent to exit with a different exit code "
+ << originallyRequestedCode
+ << ", an additional shutdown request arrived with "
+ "the intent to exit with a different exit code "
<< code << "; ignoring the conflicting exit code";
}
diff --git a/src/mongo/util/fail_point.cpp b/src/mongo/util/fail_point.cpp
index 81da6d94723..d384493d5e0 100644
--- a/src/mongo/util/fail_point.cpp
+++ b/src/mongo/util/fail_point.cpp
@@ -266,4 +266,4 @@ BSONObj FailPoint::toBSON() const {
return builder.obj();
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/fail_point_test.cpp b/src/mongo/util/fail_point_test.cpp
index c8f974d475f..d5f9e909f37 100644
--- a/src/mongo/util/fail_point_test.cpp
+++ b/src/mongo/util/fail_point_test.cpp
@@ -43,10 +43,10 @@
#include "mongo/util/log.h"
#include "mongo/util/time_support.h"
-using mongo::getGlobalFailPointRegistry;
using mongo::BSONObj;
using mongo::FailPoint;
using mongo::FailPointEnableBlock;
+using mongo::getGlobalFailPointRegistry;
namespace stdx = mongo::stdx;
namespace mongo_test {
@@ -398,8 +398,7 @@ TEST(FailPoint, parseBSONInvalidDataFails) {
TEST(FailPoint, parseBSONValidDataSucceeds) {
auto swTuple = FailPoint::parseBSON(BSON("mode"
<< "alwaysOn"
- << "data"
- << BSON("a" << 1)));
+ << "data" << BSON("a" << 1)));
ASSERT_TRUE(swTuple.isOK());
}
@@ -448,4 +447,4 @@ TEST(FailPoint, FailPointBlockIfBasicTest) {
ASSERT(!"shouldn't get here");
}
}
-}
+} // namespace mongo_test
diff --git a/src/mongo/util/file.cpp b/src/mongo/util/file.cpp
index 6cb07dcba9b..e68421bee18 100644
--- a/src/mongo/util/file.cpp
+++ b/src/mongo/util/file.cpp
@@ -139,12 +139,8 @@ void File::read(fileofs o, char* data, unsigned len) {
_bad = true;
msgasserted(10438,
str::stream() << "In File::read(), ReadFile for '" << _name << "' read "
- << bytesRead
- << " bytes while trying to read "
- << len
- << " bytes starting at offset "
- << o
- << ", truncated file?");
+ << bytesRead << " bytes while trying to read " << len
+ << " bytes starting at offset " << o << ", truncated file?");
}
}
@@ -242,8 +238,7 @@ void File::open(const char* filename, bool readOnly, bool direct) {
_fd = ::open(filename,
(readOnly ? O_RDONLY : (O_CREAT | O_RDWR | O_NOATIME))
#if defined(O_DIRECT)
- |
- (direct ? O_DIRECT : 0)
+ | (direct ? O_DIRECT : 0)
#endif
,
S_IRUSR | S_IWUSR);
@@ -264,12 +259,8 @@ void File::read(fileofs o, char* data, unsigned len) {
_bad = true;
msgasserted(16569,
str::stream() << "In File::read(), ::pread for '" << _name << "' read "
- << bytesRead
- << " bytes while trying to read "
- << len
- << " bytes starting at offset "
- << o
- << ", truncated file?");
+ << bytesRead << " bytes while trying to read " << len
+ << " bytes starting at offset " << o << ", truncated file?");
}
}
@@ -297,4 +288,4 @@ void File::write(fileofs o, const char* data, unsigned len) {
}
#endif // _WIN32
-}
+} // namespace mongo
diff --git a/src/mongo/util/file.h b/src/mongo/util/file.h
index 6676ee21bd4..10511465142 100644
--- a/src/mongo/util/file.h
+++ b/src/mongo/util/file.h
@@ -69,4 +69,4 @@ private:
#endif
std::string _name;
};
-}
+} // namespace mongo
diff --git a/src/mongo/util/future.h b/src/mongo/util/future.h
index 0dabd2c98ff..cd1a24066c4 100644
--- a/src/mongo/util/future.h
+++ b/src/mongo/util/future.h
@@ -1111,7 +1111,7 @@ NOINLINE_DECL auto ExecutorFuture<T>::wrapCBHelper(unique_function<Sig>&& func)
exec = _exec // can't move this!
](auto&&... args) mutable noexcept
->Future<UnwrappedType<decltype(func(std::forward<decltype(args)>(args)...))>> {
- auto[promise, future] = makePromiseFuture<
+ auto [promise, future] = makePromiseFuture<
UnwrappedType<decltype(func(std::forward<decltype(args)>(args)...))>>();
exec->schedule([
diff --git a/src/mongo/util/future_impl.h b/src/mongo/util/future_impl.h
index 1a37254a52b..37f9b129a4f 100644
--- a/src/mongo/util/future_impl.h
+++ b/src/mongo/util/future_impl.h
@@ -1029,8 +1029,7 @@ public:
// TODO in C++17 with constexpr if this can be done cleaner and more efficiently by not
// throwing.
- return std::move(*this).onError([func =
- std::forward<Func>(func)](Status && status) mutable {
+ return std::move(*this).onError([func = std::forward<Func>(func)](Status&& status) mutable {
if (status != code)
uassertStatusOK(status);
return throwingCall(func, std::move(status));
@@ -1047,8 +1046,7 @@ public:
if (_immediate || (isReady() && _shared->status.isOK()))
return std::move(*this);
- return std::move(*this).onError([func =
- std::forward<Func>(func)](Status && status) mutable {
+ return std::move(*this).onError([func = std::forward<Func>(func)](Status&& status) mutable {
if (!ErrorCodes::isA<category>(status.code()))
uassertStatusOK(status);
return throwingCall(func, std::move(status));
@@ -1070,9 +1068,8 @@ public:
static_assert(std::is_void<decltype(call(func, std::declval<const Status&>()))>::value,
"func passed to tapError must return void");
- return tapImpl(std::forward<Func>(func),
- [](Func && func, const T& val) noexcept {},
- [](Func && func, const Status& status) noexcept { call(func, status); });
+ return tapImpl(std::forward<Func>(func), [](Func && func, const T& val) noexcept {}, [
+ ](Func && func, const Status& status) noexcept { call(func, status); });
}
template <typename Func>
diff --git a/src/mongo/util/future_test_edge_cases.cpp b/src/mongo/util/future_test_edge_cases.cpp
index 53b4a837e9e..b81a049f94f 100644
--- a/src/mongo/util/future_test_edge_cases.cpp
+++ b/src/mongo/util/future_test_edge_cases.cpp
@@ -323,7 +323,7 @@ TEST(Future_EdgeCases, Racing_SharedPromise_getFuture_and_setError) {
TEST(Future_EdgeCases, SharedPromise_CompleteWithUnreadyFuture) {
SharedSemiFuture<void> sf;
- auto[promise, future] = makePromiseFuture<void>();
+ auto [promise, future] = makePromiseFuture<void>();
{
SharedPromise<void> sp;
diff --git a/src/mongo/util/future_test_executor_future.cpp b/src/mongo/util/future_test_executor_future.cpp
index 564d0e69cda..1c6dc09224c 100644
--- a/src/mongo/util/future_test_executor_future.cpp
+++ b/src/mongo/util/future_test_executor_future.cpp
@@ -37,36 +37,34 @@
namespace mongo {
namespace {
TEST(Executor_Future, Success_getAsync) {
- FUTURE_SUCCESS_TEST(
- [] {},
- [](/*Future<void>*/ auto&& fut) {
- auto exec = InlineCountingExecutor::make();
- auto pf = makePromiseFuture<void>();
- ExecutorFuture<void>(exec).thenRunOn(exec).getAsync([outside = std::move(pf.promise)](
- Status status) mutable {
- ASSERT_OK(status);
- outside.emplaceValue();
- });
- ASSERT_EQ(std::move(pf.future).getNoThrow(), Status::OK());
- ASSERT_EQ(exec->tasksRun.load(), 1);
- });
+ FUTURE_SUCCESS_TEST([] {},
+ [](/*Future<void>*/ auto&& fut) {
+ auto exec = InlineCountingExecutor::make();
+ auto pf = makePromiseFuture<void>();
+ ExecutorFuture<void>(exec).thenRunOn(exec).getAsync(
+ [outside = std::move(pf.promise)](Status status) mutable {
+ ASSERT_OK(status);
+ outside.emplaceValue();
+ });
+ ASSERT_EQ(std::move(pf.future).getNoThrow(), Status::OK());
+ ASSERT_EQ(exec->tasksRun.load(), 1);
+ });
}
TEST(Executor_Future, Reject_getAsync) {
- FUTURE_SUCCESS_TEST(
- [] {},
- [](/*Future<void>*/ auto&& fut) {
- auto exec = RejectingExecutor::make();
- auto pf = makePromiseFuture<void>();
- std::move(fut).thenRunOn(exec).getAsync([promise = std::move(pf.promise)](
- Status status) mutable {
- promise.emplaceValue(); // shouldn't be run anyway.
- FAIL("how did I run!?!?!");
- });
-
- // Promise is destroyed without calling the callback.
- ASSERT_EQ(std::move(pf.future).getNoThrow(), ErrorCodes::BrokenPromise);
- });
+ FUTURE_SUCCESS_TEST([] {},
+ [](/*Future<void>*/ auto&& fut) {
+ auto exec = RejectingExecutor::make();
+ auto pf = makePromiseFuture<void>();
+ std::move(fut).thenRunOn(exec).getAsync(
+ [promise = std::move(pf.promise)](Status status) mutable {
+ promise.emplaceValue(); // shouldn't be run anyway.
+ FAIL("how did I run!?!?!");
+ });
+
+ // Promise is destroyed without calling the callback.
+ ASSERT_EQ(std::move(pf.future).getNoThrow(), ErrorCodes::BrokenPromise);
+ });
}
TEST(Executor_Future, Success_then) {
diff --git a/src/mongo/util/future_test_future_int.cpp b/src/mongo/util/future_test_future_int.cpp
index 60691aca811..96023b210d3 100644
--- a/src/mongo/util/future_test_future_int.cpp
+++ b/src/mongo/util/future_test_future_int.cpp
@@ -77,16 +77,16 @@ TEST(Future, Success_semi_get) {
}
TEST(Future, Success_getAsync) {
- FUTURE_SUCCESS_TEST(
- [] { return 1; },
- [](/*Future<int>*/ auto&& fut) {
- auto pf = makePromiseFuture<int>();
- std::move(fut).getAsync([outside = std::move(pf.promise)](StatusWith<int> sw) mutable {
- ASSERT_OK(sw);
- outside.emplaceValue(sw.getValue());
- });
- ASSERT_EQ(std::move(pf.future).get(), 1);
- });
+ FUTURE_SUCCESS_TEST([] { return 1; },
+ [](/*Future<int>*/ auto&& fut) {
+ auto pf = makePromiseFuture<int>();
+ std::move(fut).getAsync(
+ [outside = std::move(pf.promise)](StatusWith<int> sw) mutable {
+ ASSERT_OK(sw);
+ outside.emplaceValue(sw.getValue());
+ });
+ ASSERT_EQ(std::move(pf.future).get(), 1);
+ });
}
TEST(Future, Fail_getLvalue) {
@@ -144,7 +144,6 @@ TEST(Future, Success_isReady) {
ASSERT_EQ(stdx::this_thread::get_id(), id);
ASSERT_EQ(status, 1);
});
-
});
}
@@ -157,7 +156,6 @@ TEST(Future, Fail_isReady) {
ASSERT_EQ(stdx::this_thread::get_id(), id);
ASSERT_NOT_OK(status);
});
-
});
}
diff --git a/src/mongo/util/future_test_future_move_only.cpp b/src/mongo/util/future_test_future_move_only.cpp
index 5c03813679b..7fd124b61c5 100644
--- a/src/mongo/util/future_test_future_move_only.cpp
+++ b/src/mongo/util/future_test_future_move_only.cpp
@@ -130,11 +130,11 @@ TEST(Future_MoveOnly, Success_getAsync) {
FUTURE_SUCCESS_TEST([] { return Widget(1); },
[](/*Future<Widget>*/ auto&& fut) {
auto pf = makePromiseFuture<Widget>();
- std::move(fut).getAsync([outside = std::move(pf.promise)](
- StatusWith<Widget> sw) mutable {
- ASSERT_OK(sw);
- outside.emplaceValue(std::move(sw.getValue()));
- });
+ std::move(fut).getAsync(
+ [outside = std::move(pf.promise)](StatusWith<Widget> sw) mutable {
+ ASSERT_OK(sw);
+ outside.emplaceValue(std::move(sw.getValue()));
+ });
ASSERT_EQ(std::move(pf.future).get(), 1);
});
}
diff --git a/src/mongo/util/future_test_future_void.cpp b/src/mongo/util/future_test_future_void.cpp
index 5281d1a15d3..c9e9f5dfa51 100644
--- a/src/mongo/util/future_test_future_void.cpp
+++ b/src/mongo/util/future_test_future_void.cpp
@@ -73,16 +73,16 @@ TEST(Future_Void, Success_semi_get) {
}
TEST(Future_Void, Success_getAsync) {
- FUTURE_SUCCESS_TEST(
- [] {},
- [](/*Future<void>*/ auto&& fut) {
- auto pf = makePromiseFuture<void>();
- std::move(fut).getAsync([outside = std::move(pf.promise)](Status status) mutable {
- ASSERT_OK(status);
- outside.emplaceValue();
- });
- ASSERT_EQ(std::move(pf.future).getNoThrow(), Status::OK());
- });
+ FUTURE_SUCCESS_TEST([] {},
+ [](/*Future<void>*/ auto&& fut) {
+ auto pf = makePromiseFuture<void>();
+ std::move(fut).getAsync(
+ [outside = std::move(pf.promise)](Status status) mutable {
+ ASSERT_OK(status);
+ outside.emplaceValue();
+ });
+ ASSERT_EQ(std::move(pf.future).getNoThrow(), Status::OK());
+ });
}
TEST(Future_Void, Fail_getLvalue) {
@@ -141,7 +141,6 @@ TEST(Future_Void, Success_isReady) {
ASSERT_EQ(stdx::this_thread::get_id(), id);
ASSERT_OK(status);
});
-
});
}
@@ -154,7 +153,6 @@ TEST(Future_Void, Fail_isReady) {
ASSERT_EQ(stdx::this_thread::get_id(), id);
ASSERT_NOT_OK(status);
});
-
});
}
diff --git a/src/mongo/util/future_test_shared_future.cpp b/src/mongo/util/future_test_shared_future.cpp
index 12914422412..5afb3f6d112 100644
--- a/src/mongo/util/future_test_shared_future.cpp
+++ b/src/mongo/util/future_test_shared_future.cpp
@@ -67,7 +67,8 @@ TEST(SharedFuture, isReady_shared_TSAN_OK) {
auto fut = async([&] {
done = true;
return 1;
- }).share();
+ })
+ .share();
//(void)*const_cast<volatile bool*>(&done); // Data Race! Uncomment to make sure TSAN works.
while (!fut.isReady()) {
}
@@ -183,7 +184,7 @@ TEST(SharedFuture, NoStackOverflow_Destruction) {
// Add 100 children that each use 100K of stack space on destruction.
for (int i = 0; i < 100; i++) {
collector.push_back(
- shared.thenRunOn(exec).then([x = Evil()]{}).semi());
+ shared.thenRunOn(exec).then([x = Evil()] {}).semi());
}
for (auto&& collected : collector) {
diff --git a/src/mongo/util/future_test_utils.h b/src/mongo/util/future_test_utils.h
index 77451f837ff..d4189f28efc 100644
--- a/src/mongo/util/future_test_utils.h
+++ b/src/mongo/util/future_test_utils.h
@@ -131,14 +131,15 @@ template <typename Func, typename Result = std::result_of_t<Func && ()>>
Future<Result> async(Func&& func) {
auto pf = makePromiseFuture<Result>();
- stdx::thread([ promise = std::move(pf.promise), func = std::forward<Func>(func) ]() mutable {
+ stdx::thread([promise = std::move(pf.promise), func = std::forward<Func>(func)]() mutable {
sleepIfShould();
try {
completePromise(&promise, func);
} catch (const DBException& ex) {
promise.setError(ex.toStatus());
}
- }).detach();
+ })
+ .detach();
return std::move(pf.future);
}
diff --git a/src/mongo/util/hex.cpp b/src/mongo/util/hex.cpp
index 1bbf362ada9..41255966894 100644
--- a/src/mongo/util/hex.cpp
+++ b/src/mongo/util/hex.cpp
@@ -107,4 +107,4 @@ std::string hexdump(const char* data, unsigned len) {
std::string s = ss.str();
return s;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/hex.h b/src/mongo/util/hex.h
index d58364fe54f..b01cb9b9336 100644
--- a/src/mongo/util/hex.h
+++ b/src/mongo/util/hex.h
@@ -129,4 +129,4 @@ std::string unsignedIntToFixedLengthHex(uint32_t val);
/* @return a dump of the buffer as hex byte ascii output */
std::string hexdump(const char* data, unsigned len);
-}
+} // namespace mongo
diff --git a/src/mongo/util/if_constexpr.h b/src/mongo/util/if_constexpr.h
index bf1dbdf8867..28900a243a1 100644
--- a/src/mongo/util/if_constexpr.h
+++ b/src/mongo/util/if_constexpr.h
@@ -31,6 +31,4 @@
// Terrible hack to work around clang-format being out of date.
// TODO sed this away and delete this file when we upgrade clang-format.
-#define IF_CONSTEXPR \
- if \
- constexpr
+#define IF_CONSTEXPR if constexpr
diff --git a/src/mongo/util/intrusive_counter.cpp b/src/mongo/util/intrusive_counter.cpp
index 59b177a1fc3..e33cbc87da5 100644
--- a/src/mongo/util/intrusive_counter.cpp
+++ b/src/mongo/util/intrusive_counter.cpp
@@ -39,8 +39,7 @@ using boost::intrusive_ptr;
intrusive_ptr<const RCString> RCString::create(StringData s) {
uassert(16493,
str::stream() << "Tried to create string longer than "
- << (BSONObjMaxUserSize / 1024 / 1024)
- << "MB",
+ << (BSONObjMaxUserSize / 1024 / 1024) << "MB",
s.size() < static_cast<size_t>(BSONObjMaxUserSize));
const size_t sizeWithNUL = s.size() + 1;
diff --git a/src/mongo/util/log.h b/src/mongo/util/log.h
index bca7c87a33a..06fc7e2b004 100644
--- a/src/mongo/util/log.h
+++ b/src/mongo/util/log.h
@@ -174,15 +174,15 @@ inline bool shouldLog(logger::LogSeverity severity) {
} // namespace
// MONGO_LOG uses log component from MongoLogDefaultComponent from current or global namespace.
-#define MONGO_LOG(DLEVEL) \
- if (!(::mongo::logger::globalLogDomain()) \
- ->shouldLog(MongoLogDefaultComponent_component, \
- ::mongo::LogstreamBuilder::severityCast(DLEVEL))) { \
- } else \
- ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
- ::mongo::getThreadName(), \
- ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
- MongoLogDefaultComponent_component)
+#define MONGO_LOG(DLEVEL) \
+ if (!(::mongo::logger::globalLogDomain()) \
+ ->shouldLog(MongoLogDefaultComponent_component, \
+ ::mongo::LogstreamBuilder::severityCast(DLEVEL))) { \
+ } else \
+ ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
+ ::mongo::getThreadName(), \
+ ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
+ MongoLogDefaultComponent_component)
#define LOG MONGO_LOG
@@ -190,32 +190,32 @@ inline bool shouldLog(logger::LogSeverity severity) {
if (!(::mongo::logger::globalLogDomain()) \
->shouldLog((COMPONENT1), ::mongo::LogstreamBuilder::severityCast(DLEVEL))) { \
} else \
- ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
- ::mongo::getThreadName(), \
- ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
- (COMPONENT1))
+ ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
+ ::mongo::getThreadName(), \
+ ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
+ (COMPONENT1))
#define MONGO_LOG_COMPONENT2(DLEVEL, COMPONENT1, COMPONENT2) \
if (!(::mongo::logger::globalLogDomain()) \
->shouldLog( \
(COMPONENT1), (COMPONENT2), ::mongo::LogstreamBuilder::severityCast(DLEVEL))) { \
} else \
- ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
- ::mongo::getThreadName(), \
- ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
- (COMPONENT1))
-
-#define MONGO_LOG_COMPONENT3(DLEVEL, COMPONENT1, COMPONENT2, COMPONENT3) \
- if (!(::mongo::logger::globalLogDomain()) \
- ->shouldLog((COMPONENT1), \
- (COMPONENT2), \
- (COMPONENT3), \
- ::mongo::LogstreamBuilder::severityCast(DLEVEL))) { \
- } else \
- ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
- ::mongo::getThreadName(), \
- ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
- (COMPONENT1))
+ ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
+ ::mongo::getThreadName(), \
+ ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
+ (COMPONENT1))
+
+#define MONGO_LOG_COMPONENT3(DLEVEL, COMPONENT1, COMPONENT2, COMPONENT3) \
+ if (!(::mongo::logger::globalLogDomain()) \
+ ->shouldLog((COMPONENT1), \
+ (COMPONENT2), \
+ (COMPONENT3), \
+ ::mongo::LogstreamBuilder::severityCast(DLEVEL))) { \
+ } else \
+ ::mongo::logger::LogstreamBuilder(::mongo::logger::globalLogDomain(), \
+ ::mongo::getThreadName(), \
+ ::mongo::LogstreamBuilder::severityCast(DLEVEL), \
+ (COMPONENT1))
/**
* Rotates the log files. Returns true if all logs rotate successfully.
diff --git a/src/mongo/util/log_and_backoff.cpp b/src/mongo/util/log_and_backoff.cpp
index e890f86a9ca..3438b4b23b7 100644
--- a/src/mongo/util/log_and_backoff.cpp
+++ b/src/mongo/util/log_and_backoff.cpp
@@ -40,8 +40,8 @@ void logAndBackoff(logger::LogComponent logComponent,
logger::LogSeverity logLevel,
size_t numAttempts,
StringData message) {
- MONGO_LOG_COMPONENT(logLevel, logComponent) << message
- << ". Retrying, attempt: " << numAttempts;
+ MONGO_LOG_COMPONENT(logLevel, logComponent)
+ << message << ". Retrying, attempt: " << numAttempts;
if (numAttempts < 4) {
// no-op
diff --git a/src/mongo/util/lru_cache_test.cpp b/src/mongo/util/lru_cache_test.cpp
index ef24d708828..5a5ae06fae9 100644
--- a/src/mongo/util/lru_cache_test.cpp
+++ b/src/mongo/util/lru_cache_test.cpp
@@ -277,7 +277,6 @@ TEST(LRUCacheTest, SizeOneCache) {
// Test cache eviction when the cache is full and new elements are added.
TEST(LRUCacheTest, EvictionTest) {
runWithDifferentSizes([](int maxSize) {
-
// Test eviction for any permutation of the original cache
for (int i = 0; i < maxSize; i++) {
LRUCache<int, int> cache(maxSize);
@@ -309,7 +308,6 @@ TEST(LRUCacheTest, EvictionTest) {
// from any original position in the cache.
TEST(LRUCacheTest, PromoteTest) {
runWithDifferentSizes([](int maxSize) {
-
// Test promotion for any position in the original cache
// i <= maxSize here, so we test promotion of cache.end(),
// and of a non-existent key.
@@ -354,7 +352,6 @@ TEST(LRUCacheTest, PromoteTest) {
// the existing entry and gets promoted properly
TEST(LRUCacheTest, ReplaceKeyTest) {
runWithDifferentSizes([](int maxSize) {
-
// Test replacement for any position in the original cache
for (int i = 0; i < maxSize; i++) {
LRUCache<int, int> cache(maxSize);
@@ -378,7 +375,6 @@ TEST(LRUCacheTest, ReplaceKeyTest) {
// the existing entry and gets promoted properly
TEST(LRUCacheTest, EraseByKey) {
runWithDifferentSizes([](int maxSize) {
-
// Test replacement for any position in the original cache
// i <= maxSize so we erase a non-existent element
for (int i = 0; i <= maxSize; i++) {
@@ -416,7 +412,6 @@ TEST(LRUCacheTest, EraseByKey) {
// Test removal of elements by iterator from the cache
TEST(LRUCacheTest, EraseByIterator) {
runWithDifferentSizes([](int maxSize) {
-
// Test replacement for any position in the original cache
for (int i = 0; i < maxSize; i++) {
LRUCache<int, int> cache(maxSize);
diff --git a/src/mongo/util/map_util.h b/src/mongo/util/map_util.h
index b576eb573af..5825cfe79b8 100644
--- a/src/mongo/util/map_util.h
+++ b/src/mongo/util/map_util.h
@@ -42,4 +42,4 @@ V mapFindWithDefault(const M& myMap, const K& key, const V& defaultValue = V())
return it->second;
}
-} // end namespace
+} // namespace mongo
diff --git a/src/mongo/util/md5_test.cpp b/src/mongo/util/md5_test.cpp
index 996cf6ae931..79d598eb040 100644
--- a/src/mongo/util/md5_test.cpp
+++ b/src/mongo/util/md5_test.cpp
@@ -35,4 +35,4 @@ namespace mongo {
TEST(MD5, BuiltIn1) {
ASSERT_EQUALS(0, do_md5_test());
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/md5main.cpp b/src/mongo/util/md5main.cpp
index 51291c69686..27047713d55 100644
--- a/src/mongo/util/md5main.cpp
+++ b/src/mongo/util/md5main.cpp
@@ -64,8 +64,8 @@ static const char* const version = "2002-04-13";
/* modified: not static, renamed */
/* Run the self-test. */
/*static*/ int
- // do_test(void)
- do_md5_test(void) {
+// do_test(void)
+do_md5_test(void) {
static const char* const test[7 * 2] = {
"",
"d41d8cd98f00b204e9800998ecf8427e",
diff --git a/src/mongo/util/net/cidr.cpp b/src/mongo/util/net/cidr.cpp
index 8e3f5899c42..20cc7e6069c 100644
--- a/src/mongo/util/net/cidr.cpp
+++ b/src/mongo/util/net/cidr.cpp
@@ -40,8 +40,8 @@
#endif
using std::begin;
-using std::find;
using std::end;
+using std::find;
namespace mongo {
@@ -139,7 +139,7 @@ BSONObjBuilder& BSONObjBuilderValueStream::operator<<<CIDR>(CIDR value) {
return *_builder;
}
-} // namespace
+} // namespace mongo
std::ostream& mongo::operator<<(std::ostream& s, const CIDR& cidr) {
return append(s, cidr._family, cidr._ip, cidr._len);
diff --git a/src/mongo/util/net/hostandport.cpp b/src/mongo/util/net/hostandport.cpp
index eb5e617841f..d083803b4be 100644
--- a/src/mongo/util/net/hostandport.cpp
+++ b/src/mongo/util/net/hostandport.cpp
@@ -80,7 +80,7 @@ int HostAndPort::port() const {
bool HostAndPort::isLocalHost() const {
return (_host == "localhost" || str::startsWith(_host.c_str(), "127.") || _host == "::1" ||
_host == "anonymous unix socket" || _host.c_str()[0] == '/' // unix socket
- );
+ );
}
bool HostAndPort::isDefaultRoute() const {
@@ -150,8 +150,8 @@ Status HostAndPort::initialize(StringData s) {
if (openBracketPos != std::string::npos) {
if (openBracketPos != 0) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "'[' present, but not first character in "
- << s.toString());
+ str::stream()
+ << "'[' present, but not first character in " << s.toString());
}
if (closeBracketPos == std::string::npos) {
return Status(ErrorCodes::FailedToParse,
@@ -165,31 +165,29 @@ Status HostAndPort::initialize(StringData s) {
// If the last colon is inside the brackets, then there must not be a port.
if (s.size() != closeBracketPos + 1) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "missing colon after ']' before the port in "
- << s.toString());
+ str::stream()
+ << "missing colon after ']' before the port in " << s.toString());
}
colonPos = std::string::npos;
} else if (colonPos != closeBracketPos + 1) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Extraneous characters between ']' and pre-port ':'"
- << " in "
- << s.toString());
+ << " in " << s.toString());
}
} else if (closeBracketPos != std::string::npos) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "']' present without '[' in " << s.toString());
} else if (s.find(':') != colonPos) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "More than one ':' detected. If this is an ipv6 address,"
- << " it needs to be surrounded by '[' and ']'; "
- << s.toString());
+ str::stream()
+ << "More than one ':' detected. If this is an ipv6 address,"
+ << " it needs to be surrounded by '[' and ']'; " << s.toString());
}
if (hostPart.empty()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Empty host component parsing HostAndPort from \""
- << str::escape(s.toString())
- << "\"");
+ << str::escape(s.toString()) << "\"");
}
int port;
@@ -203,8 +201,7 @@ Status HostAndPort::initialize(StringData s) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Port number " << port
<< " out of range parsing HostAndPort from \""
- << str::escape(s.toString())
- << "\"");
+ << str::escape(s.toString()) << "\"");
}
} else {
port = -1;
diff --git a/src/mongo/util/net/http_client_none.cpp b/src/mongo/util/net/http_client_none.cpp
index 3e0789c116d..6a3d11e6b1a 100644
--- a/src/mongo/util/net/http_client_none.cpp
+++ b/src/mongo/util/net/http_client_none.cpp
@@ -27,8 +27,8 @@
* it in the license file.
*/
-#include "mongo/util/net/http_client.h"
#include "mongo/base/status.h"
+#include "mongo/util/net/http_client.h"
namespace mongo {
diff --git a/src/mongo/util/net/http_client_winhttp.cpp b/src/mongo/util/net/http_client_winhttp.cpp
index 4ddab8046aa..f774e7387e4 100644
--- a/src/mongo/util/net/http_client_winhttp.cpp
+++ b/src/mongo/util/net/http_client_winhttp.cpp
@@ -60,7 +60,8 @@ namespace mongo {
namespace {
const LPCWSTR kAcceptTypes[] = {
- L"application/octet-stream", nullptr,
+ L"application/octet-stream",
+ nullptr,
};
struct ProcessedUrl {
@@ -253,8 +254,7 @@ private:
const auto msg = errnoWithDescription(err);
uasserted(ErrorCodes::OperationFailed,
str::stream() << "Failed receiving response from server"
- << ": "
- << msg);
+ << ": " << msg);
}
DWORD statusCode = 0;
diff --git a/src/mongo/util/net/private/socket_poll.cpp b/src/mongo/util/net/private/socket_poll.cpp
index 28bfed452de..447fcf5d7d2 100644
--- a/src/mongo/util/net/private/socket_poll.cpp
+++ b/src/mongo/util/net/private/socket_poll.cpp
@@ -70,4 +70,4 @@ int socketPoll(pollfd* fdarray, unsigned long nfds, int timeout) {
}
#endif
-}
+} // namespace mongo
diff --git a/src/mongo/util/net/private/socket_poll.h b/src/mongo/util/net/private/socket_poll.h
index 705633f624b..8b0c116b66e 100644
--- a/src/mongo/util/net/private/socket_poll.h
+++ b/src/mongo/util/net/private/socket_poll.h
@@ -36,4 +36,4 @@
namespace mongo {
bool isPollSupported();
int socketPoll(pollfd* fdarray, unsigned long nfds, int timeout);
-}
+} // namespace mongo
diff --git a/src/mongo/util/net/sock.cpp b/src/mongo/util/net/sock.cpp
index 6d7b2d1e8f7..c728b674b0b 100644
--- a/src/mongo/util/net/sock.cpp
+++ b/src/mongo/util/net/sock.cpp
@@ -120,8 +120,8 @@ void setSockTimeouts(int sock, double secs) {
log() << "unable to set SO_RCVTIMEO: " << errnoWithDescription(WSAGetLastError());
status =
setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, reinterpret_cast<char*>(&timeout), sizeof(DWORD));
- DEV if (report && (status == SOCKET_ERROR)) log() << "unable to set SO_SNDTIMEO: "
- << errnoWithDescription(WSAGetLastError());
+ DEV if (report && (status == SOCKET_ERROR)) log()
+ << "unable to set SO_SNDTIMEO: " << errnoWithDescription(WSAGetLastError());
#else
struct timeval tv;
tv.tv_sec = (int)secs;
@@ -547,7 +547,7 @@ void Socket::handleSendError(int ret, const char* context) {
<< ' ' << remoteString();
throwSocketError(SocketErrorKind::SEND_ERROR, remoteString());
}
-}
+} // namespace mongo
void Socket::handleRecvError(int ret, int len) {
if (ret == 0) {
diff --git a/src/mongo/util/net/ssl/context_schannel.hpp b/src/mongo/util/net/ssl/context_schannel.hpp
index baabea394f6..fff06f9b188 100644
--- a/src/mongo/util/net/ssl/context_schannel.hpp
+++ b/src/mongo/util/net/ssl/context_schannel.hpp
@@ -53,28 +53,28 @@ public:
#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)
/// Move-construct a context from another.
/**
- * This constructor moves an SSL context from one object to another.
- *
- * @param other The other context object from which the move will occur.
- *
- * @note Following the move, the following operations only are valid for the
- * moved-from object:
- * @li Destruction.
- * @li As a target for move-assignment.
- */
+ * This constructor moves an SSL context from one object to another.
+ *
+ * @param other The other context object from which the move will occur.
+ *
+ * @note Following the move, the following operations only are valid for the
+ * moved-from object:
+ * @li Destruction.
+ * @li As a target for move-assignment.
+ */
ASIO_DECL context(context&& other);
/// Move-assign a context from another.
/**
- * This assignment operator moves an SSL context from one object to another.
- *
- * @param other The other context object from which the move will occur.
- *
- * @note Following the move, the following operations only are valid for the
- * moved-from object:
- * @li Destruction.
- * @li As a target for move-assignment.
- */
+ * This assignment operator moves an SSL context from one object to another.
+ *
+ * @param other The other context object from which the move will occur.
+ *
+ * @note Following the move, the following operations only are valid for the
+ * moved-from object:
+ * @li Destruction.
+ * @li As a target for move-assignment.
+ */
ASIO_DECL context& operator=(context&& other);
#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)
@@ -83,10 +83,10 @@ public:
/// Get the underlying implementation in the native type.
/**
- * This function may be used to obtain the underlying implementation of the
- * context. This is intended to allow access to context functionality that is
- * not otherwise provided.
- */
+ * This function may be used to obtain the underlying implementation of the
+ * context. This is intended to allow access to context functionality that is
+ * not otherwise provided.
+ */
ASIO_DECL native_handle_type native_handle();
private:
diff --git a/src/mongo/util/net/ssl/detail/impl/engine_apple.ipp b/src/mongo/util/net/ssl/detail/impl/engine_apple.ipp
index a78460e8d97..154f08707aa 100644
--- a/src/mongo/util/net/ssl/detail/impl/engine_apple.ipp
+++ b/src/mongo/util/net/ssl/detail/impl/engine_apple.ipp
@@ -63,16 +63,16 @@ public:
const auto status = static_cast<::OSStatus>(value);
apple::CFUniquePtr<::CFStringRef> errstr(::SecCopyErrorMessageString(status, nullptr));
if (!errstr) {
- return mongo::str::stream() << "Secure.Transport unknown error: "
- << static_cast<int>(status);
+ return mongo::str::stream()
+ << "Secure.Transport unknown error: " << static_cast<int>(status);
}
const auto len = ::CFStringGetMaximumSizeForEncoding(::CFStringGetLength(errstr.get()),
::kCFStringEncodingUTF8);
std::string ret;
ret.resize(len + 1);
if (!::CFStringGetCString(errstr.get(), &ret[0], len, ::kCFStringEncodingUTF8)) {
- return mongo::str::stream() << "Secure.Transport unknown error: "
- << static_cast<int>(status);
+ return mongo::str::stream()
+ << "Secure.Transport unknown error: " << static_cast<int>(status);
}
ret.resize(strlen(ret.c_str()));
diff --git a/src/mongo/util/net/ssl/detail/io.hpp b/src/mongo/util/net/ssl/detail/io.hpp
index 8a702abc9dd..d6e376b00f0 100644
--- a/src/mongo/util/net/ssl/detail/io.hpp
+++ b/src/mongo/util/net/ssl/detail/io.hpp
@@ -247,7 +247,7 @@ public:
// Release any waiting write operations.
core_.pending_write_.expires_at(core_.neg_infin());
- // Fall through to call handler.
+ // Fall through to call handler.
default:
diff --git a/src/mongo/util/net/ssl_manager.cpp b/src/mongo/util/net/ssl_manager.cpp
index 56b5911e3f6..3cf75312d3f 100644
--- a/src/mongo/util/net/ssl_manager.cpp
+++ b/src/mongo/util/net/ssl_manager.cpp
@@ -171,9 +171,7 @@ std::string RFC4514Parser::extractAttributeName() {
} else {
uasserted(ErrorCodes::BadValue,
str::stream() << "DN attribute names must begin with either a digit or an alpha"
- << " not \'"
- << ch
- << "\'");
+ << " not \'" << ch << "\'");
}
for (; ch != '=' && !done(); ch = _advance()) {
@@ -218,8 +216,7 @@ std::pair<std::string, RFC4514Parser::ValueTerminator> RFC4514Parser::extractVal
uassert(ErrorCodes::BadValue,
str::stream() << "Escaped hex value contains invalid character \'"
- << hexValStr[1]
- << "\'",
+ << hexValStr[1] << "\'",
isHex(hexValStr[1]));
const char hexVal = uassertStatusOK(fromHex(StringData(hexValStr.data(), 2)));
sb << hexVal;
@@ -247,8 +244,8 @@ std::pair<std::string, RFC4514Parser::ValueTerminator> RFC4514Parser::extractVal
}
} else if (isEscaped(ch)) {
uasserted(ErrorCodes::BadValue,
- str::stream() << "Found unescaped character that should be escaped: \'" << ch
- << "\'");
+ str::stream()
+ << "Found unescaped character that should be escaped: \'" << ch << "\'");
} else {
if (ch != ' ') {
trailingSpaces = 0;
@@ -832,9 +829,9 @@ StatusWith<std::string> readDERString(ConstDataRangeCursor& cdc) {
if (derString.getType() != DERType::UTF8String) {
return Status(ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "Unexpected DER Tag, Got "
- << static_cast<char>(derString.getType())
- << ", Expected UTF8String");
+ str::stream()
+ << "Unexpected DER Tag, Got " << static_cast<char>(derString.getType())
+ << ", Expected UTF8String");
}
return derString.readUtf8String();
@@ -970,9 +967,9 @@ StatusWith<stdx::unordered_set<RoleName>> parsePeerRoles(ConstDataRange cdrExten
if (swSet.getValue().getType() != DERType::SET) {
return Status(ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "Unexpected DER Tag, Got "
- << static_cast<char>(swSet.getValue().getType())
- << ", Expected SET");
+ str::stream()
+ << "Unexpected DER Tag, Got "
+ << static_cast<char>(swSet.getValue().getType()) << ", Expected SET");
}
ConstDataRangeCursor cdcSet(swSet.getValue().getSetRange());
diff --git a/src/mongo/util/net/ssl_manager.h b/src/mongo/util/net/ssl_manager.h
index 859f671d24b..ce7b7d9bfc7 100644
--- a/src/mongo/util/net/ssl_manager.h
+++ b/src/mongo/util/net/ssl_manager.h
@@ -69,7 +69,7 @@ Status validateOpensslCipherConfig(const std::string&);
* Validation callback for setParameter 'disableNonTLSConnectionLogging'.
*/
Status validateDisableNonTLSConnectionLogging(const bool&);
-}
+} // namespace mongo
#ifdef MONGO_CONFIG_SSL
namespace mongo {
@@ -220,8 +220,8 @@ public:
virtual const SSLConfiguration& getSSLConfiguration() const = 0;
/**
- * Fetches the error text for an error code, in a thread-safe manner.
- */
+ * Fetches the error text for an error code, in a thread-safe manner.
+ */
static std::string getSSLErrorMessage(int code);
/**
diff --git a/src/mongo/util/net/ssl_manager_apple.cpp b/src/mongo/util/net/ssl_manager_apple.cpp
index 9ba948100e9..94b39327033 100644
--- a/src/mongo/util/net/ssl_manager_apple.cpp
+++ b/src/mongo/util/net/ssl_manager_apple.cpp
@@ -725,8 +725,7 @@ StatusWith<CFUniquePtr<::CFArrayRef>> loadPEM(const std::string& keyfilepath,
return Status(ErrorCodes::InvalidSSLConfiguration,
str::stream() << "Unable to load PEM from '" << keyfilepath << "'"
<< (passphrase.empty() ? "" : " with passphrase")
- << (msg.empty() ? "" : ": ")
- << msg);
+ << (msg.empty() ? "" : ": ") << msg);
};
std::ifstream pemFile(keyfilepath, std::ios::binary);
@@ -746,7 +745,9 @@ StatusWith<CFUniquePtr<::CFArrayRef>> loadPEM(const std::string& keyfilepath,
nullptr, reinterpret_cast<const uint8_t*>(passphrase.c_str()), passphrase.size()));
}
::SecItemImportExportKeyParameters params = {
- SEC_KEY_IMPORT_EXPORT_PARAMS_VERSION, 0, cfpass.get(),
+ SEC_KEY_IMPORT_EXPORT_PARAMS_VERSION,
+ 0,
+ cfpass.get(),
};
CFUniquePtr<CFStringRef> cfkeyfile(
@@ -771,8 +772,8 @@ StatusWith<CFUniquePtr<::CFArrayRef>> loadPEM(const std::string& keyfilepath,
"key. Consider using a certificate selector or PKCS#12 instead");
}
if (status != ::errSecSuccess) {
- return retFail(str::stream() << "Failing importing certificate(s): "
- << stringFromOSStatus(status));
+ return retFail(str::stream()
+ << "Failing importing certificate(s): " << stringFromOSStatus(status));
}
if (mode == kLoadPEMBindIdentities) {
diff --git a/src/mongo/util/net/ssl_manager_openssl.cpp b/src/mongo/util/net/ssl_manager_openssl.cpp
index 56c01f3460c..f9933800123 100644
--- a/src/mongo/util/net/ssl_manager_openssl.cpp
+++ b/src/mongo/util/net/ssl_manager_openssl.cpp
@@ -920,8 +920,9 @@ Status SSLManagerOpenSSL::initSSLContext(SSL_CTX* context,
}
// We use the address of the context as the session id context.
- if (0 == ::SSL_CTX_set_session_id_context(
- context, reinterpret_cast<unsigned char*>(&context), sizeof(context))) {
+ if (0 ==
+ ::SSL_CTX_set_session_id_context(
+ context, reinterpret_cast<unsigned char*>(&context), sizeof(context))) {
return Status(ErrorCodes::InvalidSSLConfiguration,
str::stream() << "Can not store ssl session id context: "
<< getSSLErrorMessage(ERR_get_error()));
@@ -1306,14 +1307,11 @@ Status SSLManagerOpenSSL::_setupSystemCA(SSL_CTX* context) {
// On non-Windows/non-Apple platforms, the OpenSSL libraries should have been configured
// with default locations for CA certificates.
if (SSL_CTX_set_default_verify_paths(context) != 1) {
- return {ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "error loading system CA certificates "
- << "(default certificate file: "
- << X509_get_default_cert_file()
- << ", "
- << "default certificate path: "
- << X509_get_default_cert_dir()
- << ")"};
+ return {
+ ErrorCodes::InvalidSSLConfiguration,
+ str::stream() << "error loading system CA certificates "
+ << "(default certificate file: " << X509_get_default_cert_file() << ", "
+ << "default certificate path: " << X509_get_default_cert_dir() << ")"};
}
#else
@@ -1357,17 +1355,17 @@ bool SSLManagerOpenSSL::_setupCRL(SSL_CTX* context, const std::string& crlFile)
}
/*
-* The interface layer between network and BIO-pair. The BIO-pair buffers
-* the data to/from the TLS layer.
-*/
+ * The interface layer between network and BIO-pair. The BIO-pair buffers
+ * the data to/from the TLS layer.
+ */
void SSLManagerOpenSSL::_flushNetworkBIO(SSLConnectionOpenSSL* conn) {
char buffer[BUFFER_SIZE];
int wantWrite;
/*
- * Write the complete contents of the buffer. Leaving the buffer
- * unflushed could cause a deadlock.
- */
+ * Write the complete contents of the buffer. Leaving the buffer
+ * unflushed could cause a deadlock.
+ */
while ((wantWrite = BIO_ctrl_pending(conn->networkBIO)) > 0) {
if (wantWrite > BUFFER_SIZE) {
wantWrite = BUFFER_SIZE;
diff --git a/src/mongo/util/net/ssl_manager_test.cpp b/src/mongo/util/net/ssl_manager_test.cpp
index acf4d04e438..a7335970125 100644
--- a/src/mongo/util/net/ssl_manager_test.cpp
+++ b/src/mongo/util/net/ssl_manager_test.cpp
@@ -187,7 +187,10 @@ TEST(SSLManager, MongoDBRolesParser) {
// Negative: Runt, only a tag and long length with wrong missing length
{
unsigned char derData[] = {
- 0x31, 0x88, 0xff, 0xff,
+ 0x31,
+ 0x88,
+ 0xff,
+ 0xff,
};
auto swPeer = parsePeerRoles(ConstDataRange(derData));
ASSERT_NOT_OK(swPeer.getStatus());
@@ -196,7 +199,10 @@ TEST(SSLManager, MongoDBRolesParser) {
// Negative: Runt, only a tag and long length
{
unsigned char derData[] = {
- 0x31, 0x82, 0xff, 0xff,
+ 0x31,
+ 0x82,
+ 0xff,
+ 0xff,
};
auto swPeer = parsePeerRoles(ConstDataRange(derData));
ASSERT_NOT_OK(swPeer.getStatus());
@@ -362,9 +368,7 @@ TEST(SSLManager, DNParsingAndNormalization) {
}
TEST(SSLManager, BadDNParsing) {
- std::vector<std::string> tests = {"CN=#12345",
- R"(CN=\B)",
- R"(CN=<", "\)"};
+ std::vector<std::string> tests = {"CN=#12345", R"(CN=\B)", R"(CN=<", "\)"};
for (const auto& test : tests) {
log() << "Testing bad DN: \"" << test << "\"";
auto swDN = parseDN(test);
diff --git a/src/mongo/util/net/ssl_manager_windows.cpp b/src/mongo/util/net/ssl_manager_windows.cpp
index db9816d2683..36e552c6e17 100644
--- a/src/mongo/util/net/ssl_manager_windows.cpp
+++ b/src/mongo/util/net/ssl_manager_windows.cpp
@@ -70,8 +70,8 @@ extern SSLManagerInterface* theSSLManager;
namespace {
/**
-* Free a Certificate Context.
-*/
+ * Free a Certificate Context.
+ */
struct CERTFree {
void operator()(const CERT_CONTEXT* p) noexcept {
if (p) {
@@ -83,8 +83,8 @@ struct CERTFree {
using UniqueCertificate = std::unique_ptr<const CERT_CONTEXT, CERTFree>;
/**
-* Free a CRL Handle
-*/
+ * Free a CRL Handle
+ */
struct CryptCRLFree {
void operator()(const CRL_CONTEXT* p) noexcept {
if (p) {
@@ -97,8 +97,8 @@ using UniqueCRL = std::unique_ptr<const CRL_CONTEXT, CryptCRLFree>;
/**
-* Free a Certificate Chain Context
-*/
+ * Free a Certificate Chain Context
+ */
struct CryptCertChainFree {
void operator()(const CERT_CHAIN_CONTEXT* p) noexcept {
if (p) {
@@ -111,10 +111,10 @@ using UniqueCertChain = std::unique_ptr<const CERT_CHAIN_CONTEXT, CryptCertChain
/**
-* A simple generic class to manage Windows handle like things. Behaves similiar to std::unique_ptr.
-*
-* Only supports move.
-*/
+ * A simple generic class to manage Windows handle like things. Behaves similiar to std::unique_ptr.
+ *
+ * Only supports move.
+ */
template <typename HandleT, class Deleter>
class AutoHandle {
public:
@@ -157,8 +157,8 @@ private:
};
/**
-* Free a HCRYPTPROV Handle
-*/
+ * Free a HCRYPTPROV Handle
+ */
struct CryptProviderFree {
void operator()(HCRYPTPROV const h) noexcept {
if (h) {
@@ -170,8 +170,8 @@ struct CryptProviderFree {
using UniqueCryptProvider = AutoHandle<HCRYPTPROV, CryptProviderFree>;
/**
-* Free a HCRYPTKEY Handle
-*/
+ * Free a HCRYPTKEY Handle
+ */
struct CryptKeyFree {
void operator()(HCRYPTKEY const h) noexcept {
if (h) {
@@ -184,7 +184,7 @@ using UniqueCryptKey = AutoHandle<HCRYPTKEY, CryptKeyFree>;
/**
* Free a CERTSTORE Handle
-*/
+ */
struct CertStoreFree {
void operator()(HCERTSTORE const p) noexcept {
if (p) {
@@ -199,8 +199,8 @@ struct CertStoreFree {
using UniqueCertStore = AutoHandle<HCERTSTORE, CertStoreFree>;
/**
-* Free a HCERTCHAINENGINE Handle
-*/
+ * Free a HCERTCHAINENGINE Handle
+ */
struct CertChainEngineFree {
void operator()(HCERTCHAINENGINE const p) noexcept {
if (p) {
@@ -816,8 +816,8 @@ StatusWith<UniqueCertificateWithPrivateKey> readCertPEMFile(StringData fileName,
// Use the the log file if possible
if (!serverGlobalParams.logpath.empty()) {
static AtomicWord<int> counter{0};
- std::string keyContainerName = str::stream() << serverGlobalParams.logpath
- << counter.fetchAndAdd(1);
+ std::string keyContainerName = str::stream()
+ << serverGlobalParams.logpath << counter.fetchAndAdd(1);
wstr = toNativeString(keyContainerName.c_str());
} else {
auto us = UUID::gen().toString();
@@ -846,8 +846,8 @@ StatusWith<UniqueCertificateWithPrivateKey> readCertPEMFile(StringData fileName,
} else {
return Status(ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "CryptAcquireContextW failed "
- << errnoWithDescription(gle));
+ str::stream()
+ << "CryptAcquireContextW failed " << errnoWithDescription(gle));
}
}
} else {
@@ -857,8 +857,8 @@ StatusWith<UniqueCertificateWithPrivateKey> readCertPEMFile(StringData fileName,
if (!ret) {
DWORD gle = GetLastError();
return Status(ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "CryptAcquireContextW failed "
- << errnoWithDescription(gle));
+ str::stream()
+ << "CryptAcquireContextW failed " << errnoWithDescription(gle));
}
}
UniqueCryptProvider cryptProvider(hProv);
@@ -1013,8 +1013,8 @@ Status readCRLPEMFile(HCERTSTORE certStore, StringData fileName) {
if (!ret) {
DWORD gle = GetLastError();
return Status(ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "CertAddCRLContextToStore Failed "
- << errnoWithDescription(gle));
+ str::stream()
+ << "CertAddCRLContextToStore Failed " << errnoWithDescription(gle));
}
}
@@ -1061,8 +1061,7 @@ StatusWith<UniqueCertificate> loadCertificateSelectorFromStore(
DWORD gle = GetLastError();
return Status(ErrorCodes::InvalidSSLConfiguration,
str::stream() << "CertOpenStore failed to open store 'My' from '" << storeName
- << "': "
- << errnoWithDescription(gle));
+ << "': " << errnoWithDescription(gle));
}
UniqueCertStore storeHolder(store);
@@ -1082,11 +1081,8 @@ StatusWith<UniqueCertificate> loadCertificateSelectorFromStore(
ErrorCodes::InvalidSSLConfiguration,
str::stream()
<< "CertFindCertificateInStore failed to find cert with subject name '"
- << selector.subject.c_str()
- << "' in 'My' store in '"
- << storeName
- << "': "
- << errnoWithDescription(gle));
+ << selector.subject.c_str() << "' in 'My' store in '" << storeName
+ << "': " << errnoWithDescription(gle));
}
return UniqueCertificate(cert);
@@ -1106,10 +1102,8 @@ StatusWith<UniqueCertificate> loadCertificateSelectorFromStore(
str::stream()
<< "CertFindCertificateInStore failed to find cert with thumbprint '"
<< toHex(selector.thumbprint.data(), selector.thumbprint.size())
- << "' in 'My' store in '"
- << storeName
- << "': "
- << errnoWithDescription(gle));
+ << "' in 'My' store in '" << storeName
+ << "': " << errnoWithDescription(gle));
}
return UniqueCertificate(cert);
@@ -1636,8 +1630,8 @@ Status validatePeerCertificate(const std::string& remoteHost,
if (!ret) {
DWORD gle = GetLastError();
return Status(ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "CertGetCertificateChain failed: "
- << errnoWithDescription(gle));
+ str::stream()
+ << "CertGetCertificateChain failed: " << errnoWithDescription(gle));
}
UniqueCertChain certChainHolder(chainContext);
@@ -1761,8 +1755,8 @@ StatusWith<TLSVersion> mapTLSVersion(PCtxtHandle ssl) {
if (ss != SEC_E_OK) {
return Status(ErrorCodes::SSLHandshakeFailed,
- str::stream() << "QueryContextAttributes for connection info failed with"
- << ss);
+ str::stream()
+ << "QueryContextAttributes for connection info failed with" << ss);
}
switch (connInfo.dwProtocol) {
diff --git a/src/mongo/util/net/ssl_options.cpp b/src/mongo/util/net/ssl_options.cpp
index bc87567cafe..30389860a29 100644
--- a/src/mongo/util/net/ssl_options.cpp
+++ b/src/mongo/util/net/ssl_options.cpp
@@ -145,15 +145,14 @@ Status parseCertificateSelector(SSLParams::CertificateSelector* selector,
if (key != "thumbprint") {
return {ErrorCodes::BadValue,
str::stream() << "Unknown certificate selector property for '" << name << "': '"
- << key
- << "'"};
+ << key << "'"};
}
auto swHex = hexToVector(value.substr(delim + 1));
if (!swHex.isOK()) {
return {ErrorCodes::BadValue,
- str::stream() << "Invalid certificate selector value for '" << name << "': "
- << swHex.getStatus().reason()};
+ str::stream() << "Invalid certificate selector value for '" << name
+ << "': " << swHex.getStatus().reason()};
}
selector->thumbprint = std::move(swHex.getValue());
@@ -174,8 +173,7 @@ StatusWith<SSLParams::SSLModes> SSLParams::sslModeParse(StringData strMode) {
return Status(
ErrorCodes::BadValue,
str::stream()
- << "Invalid sslMode setting '"
- << strMode
+ << "Invalid sslMode setting '" << strMode
<< "', expected one of: 'disabled', 'allowSSL', 'preferSSL', or 'requireSSL'");
}
}
@@ -193,8 +191,7 @@ StatusWith<SSLParams::SSLModes> SSLParams::tlsModeParse(StringData strMode) {
return Status(
ErrorCodes::BadValue,
str::stream()
- << "Invalid tlsMode setting '"
- << strMode
+ << "Invalid tlsMode setting '" << strMode
<< "', expected one of: 'disabled', 'allowTLS', 'preferTLS', or 'requireTLS'");
}
}
diff --git a/src/mongo/util/net/ssl_options.h b/src/mongo/util/net/ssl_options.h
index 27402e8a923..cb820ba91e7 100644
--- a/src/mongo/util/net/ssl_options.h
+++ b/src/mongo/util/net/ssl_options.h
@@ -93,23 +93,23 @@ struct SSLParams {
enum SSLModes : int {
/**
- * Make unencrypted outgoing connections and do not accept incoming SSL-connections.
- */
+ * Make unencrypted outgoing connections and do not accept incoming SSL-connections.
+ */
SSLMode_disabled,
/**
- * Make unencrypted outgoing connections and accept both unencrypted and SSL-connections.
- */
+ * Make unencrypted outgoing connections and accept both unencrypted and SSL-connections.
+ */
SSLMode_allowSSL,
/**
- * Make outgoing SSL-connections and accept both unecrypted and SSL-connections.
- */
+ * Make outgoing SSL-connections and accept both unecrypted and SSL-connections.
+ */
SSLMode_preferSSL,
/**
- * Make outgoing SSL-connections and only accept incoming SSL-connections.
- */
+ * Make outgoing SSL-connections and only accept incoming SSL-connections.
+ */
SSLMode_requireSSL
};
@@ -137,10 +137,10 @@ Status storeSSLDisabledProtocols(
SSLDisabledProtocolsMode mode = SSLDisabledProtocolsMode::kStandardFormat);
/**
-* The global SSL configuration. This should be accessed only after global initialization has
-* completed. If it must be accessed in an initializer, the initializer should have
-* "EndStartupOptionStorage" as a prerequisite.
-*/
+ * The global SSL configuration. This should be accessed only after global initialization has
+ * completed. If it must be accessed in an initializer, the initializer should have
+ * "EndStartupOptionStorage" as a prerequisite.
+ */
const SSLParams& getSSLGlobalParams();
Status parseCertificateSelector(SSLParams::CertificateSelector* selector,
diff --git a/src/mongo/util/net/ssl_parameters.cpp b/src/mongo/util/net/ssl_parameters.cpp
index 0ace15fb3a2..fd1f8d23c58 100644
--- a/src/mongo/util/net/ssl_parameters.cpp
+++ b/src/mongo/util/net/ssl_parameters.cpp
@@ -70,9 +70,9 @@ StatusWith<ServerGlobalParams::ClusterAuthModes> clusterAuthModeParse(StringData
} else if (strMode == "x509") {
return ServerGlobalParams::ClusterAuthMode_x509;
} else {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Invalid clusterAuthMode '" << strMode
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Invalid clusterAuthMode '" << strMode
<< "', expected one of: 'keyFile', 'sendKeyFile', 'sendX509', or 'x509'");
}
}
@@ -97,8 +97,7 @@ StatusWith<SSLParams::SSLModes> checkTLSModeTransition(T modeToString,
return {ErrorCodes::BadValue,
str::stream() << "Illegal state transition for " << parameterName
<< ", attempt to change from "
- << modeToString(static_cast<SSLParams::SSLModes>(oldMode))
- << " to "
+ << modeToString(static_cast<SSLParams::SSLModes>(oldMode)) << " to "
<< strMode};
}
}
diff --git a/src/mongo/util/net/ssl_parameters_auth.cpp b/src/mongo/util/net/ssl_parameters_auth.cpp
index fd821f4e52d..612c2bc70cc 100644
--- a/src/mongo/util/net/ssl_parameters_auth.cpp
+++ b/src/mongo/util/net/ssl_parameters_auth.cpp
@@ -67,9 +67,9 @@ StatusWith<ServerGlobalParams::ClusterAuthModes> clusterAuthModeParse(StringData
} else if (strMode == "x509") {
return ServerGlobalParams::ClusterAuthMode_x509;
} else {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Invalid clusterAuthMode '" << strMode
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Invalid clusterAuthMode '" << strMode
<< "', expected one of: 'keyFile', 'sendKeyFile', 'sendX509', or 'x509'");
}
}
@@ -99,18 +99,16 @@ Status ClusterAuthModeServerParameter::setFromString(const std::string& strMode)
"connections"};
}
serverGlobalParams.clusterAuthMode.store(mode);
- auth::setInternalUserAuthParams(
- BSON(saslCommandMechanismFieldName << "MONGODB-X509" << saslCommandUserDBFieldName
- << "$external"));
+ auth::setInternalUserAuthParams(BSON(saslCommandMechanismFieldName
+ << "MONGODB-X509" << saslCommandUserDBFieldName
+ << "$external"));
} else if ((mode == ServerGlobalParams::ClusterAuthMode_x509) &&
(oldMode == ServerGlobalParams::ClusterAuthMode_sendX509)) {
serverGlobalParams.clusterAuthMode.store(mode);
} else {
return {ErrorCodes::BadValue,
str::stream() << "Illegal state transition for clusterAuthMode, change from "
- << clusterAuthModeFormat()
- << " to "
- << strMode};
+ << clusterAuthModeFormat() << " to " << strMode};
}
return Status::OK();
diff --git a/src/mongo/util/net/ssl_stream.cpp b/src/mongo/util/net/ssl_stream.cpp
index 80e3503f7ae..1aeb82d2189 100644
--- a/src/mongo/util/net/ssl_stream.cpp
+++ b/src/mongo/util/net/ssl_stream.cpp
@@ -40,8 +40,8 @@ namespace asio {
namespace ssl {
namespace detail {
MONGO_FAIL_POINT_DEFINE(smallTLSReads);
-} // namespce detail
-} // namespce ssl
-} // namespce asio
+} // namespace detail
+} // namespace ssl
+} // namespace asio
#endif
diff --git a/src/mongo/util/ntservice.cpp b/src/mongo/util/ntservice.cpp
index 11eba4b9d72..dd9910ebb35 100644
--- a/src/mongo/util/ntservice.cpp
+++ b/src/mongo/util/ntservice.cpp
@@ -625,7 +625,7 @@ void startService() {
}
}
-} // namspace ntservice
+} // namespace ntservice
} // namespace mongo
#endif
diff --git a/src/mongo/util/options_parser/constraints.h b/src/mongo/util/options_parser/constraints.h
index 796f7de8721..6be67a8f31c 100644
--- a/src/mongo/util/options_parser/constraints.h
+++ b/src/mongo/util/options_parser/constraints.h
@@ -131,10 +131,9 @@ private:
T typedVal;
if (!val.get(&typedVal).isOK()) {
return {ErrorCodes::InternalError,
- str::stream() << "Error: value for key: " << _key << " was found as type: "
- << val.typeToString()
- << " but is required to be type: "
- << typeid(typedVal).name()};
+ str::stream() << "Error: value for key: " << _key
+ << " was found as type: " << val.typeToString()
+ << " but is required to be type: " << typeid(typedVal).name()};
}
return _valueCallback(typedVal);
diff --git a/src/mongo/util/options_parser/environment_test.cpp b/src/mongo/util/options_parser/environment_test.cpp
index 9f0737e2ad6..6fbd3d70048 100644
--- a/src/mongo/util/options_parser/environment_test.cpp
+++ b/src/mongo/util/options_parser/environment_test.cpp
@@ -92,8 +92,7 @@ TEST(ToBSONTests, DottedValues) {
ASSERT_OK(environment.set(moe::Key("val1.dotted2"), moe::Value(std::string("string"))));
mongo::BSONObj obj = BSON("val1" << BSON("dotted1" << 6 << "dotted2"
<< "string")
- << "val2"
- << true);
+ << "val2" << true);
// TODO: Put a comparison here that doesn't depend on the field order. Right now it is
// based on the sort order of keys in a std::map.
ASSERT_BSONOBJ_EQ(obj, environment.toBSON());
@@ -108,12 +107,10 @@ TEST(ToBSONTests, DeepDottedValues) {
ASSERT_OK(environment.set(moe::Key("val2"), moe::Value(6.0)));
mongo::BSONObj obj =
BSON("val1" << BSON("first1" << BSON("second1" << BSON("third1" << 6 << "third2" << true)
- << "second2"
- << BSON("third1" << false))
+ << "second2" << BSON("third1" << false))
<< "first2"
<< "string")
- << "val2"
- << 6.0);
+ << "val2" << 6.0);
// TODO: Put a comparison here that doesn't depend on the field order. Right now it is
// based on the sort order of keys in a std::map.
ASSERT_BSONOBJ_EQ(obj, environment.toBSON());
diff --git a/src/mongo/util/options_parser/option_section.cpp b/src/mongo/util/options_parser/option_section.cpp
index e8f3c6f9927..d54922fb29c 100644
--- a/src/mongo/util/options_parser/option_section.cpp
+++ b/src/mongo/util/options_parser/option_section.cpp
@@ -157,8 +157,7 @@ OptionDescription& OptionSection::addOptionChaining(
// Should not be the same as dottedName.
uassert(ErrorCodes::InternalError,
str::stream() << "Attempted to register option with conflict between dottedName and "
- << "deprecatedDottedName: "
- << dottedName,
+ << "deprecatedDottedName: " << dottedName,
!std::count(deprecatedDottedNames.begin(), deprecatedDottedNames.end(), dottedName));
// Verify deprecated single names.
@@ -170,8 +169,7 @@ OptionDescription& OptionSection::addOptionChaining(
// Should not be the same as singleName.
uassert(ErrorCodes::InternalError,
str::stream() << "Attempted to register option with conflict between singleName and "
- << "deprecatedSingleName: "
- << singleName,
+ << "deprecatedSingleName: " << singleName,
!std::count(deprecatedSingleNames.begin(), deprecatedSingleNames.end(), singleName));
// Should not contain any already registered name.
diff --git a/src/mongo/util/options_parser/options_parser.cpp b/src/mongo/util/options_parser/options_parser.cpp
index e888351b52c..aa04f99d94f 100644
--- a/src/mongo/util/options_parser/options_parser.cpp
+++ b/src/mongo/util/options_parser/options_parser.cpp
@@ -416,9 +416,7 @@ public:
uassert(ErrorCodes::BadValue,
str::stream()
- << nodeName
- << " expansion block must contain only '"
- << getExpansionName()
+ << nodeName << " expansion block must contain only '" << getExpansionName()
<< "', and optionally 'type', 'trim', and/or 'digest'/'digest_key' fields",
node.size() == numVisitedFields);
@@ -472,8 +470,7 @@ public:
&computed);
uassert(ErrorCodes::BadValue,
str::stream() << "SHA256HMAC of config expansion " << computed.toString()
- << " does not match expected digest: "
- << _digest->toString(),
+ << " does not match expected digest: " << _digest->toString(),
computed == *_digest);
}
@@ -487,8 +484,7 @@ public:
if (!status.isOK()) {
uasserted(status.code(),
str::stream() << "Failed processing output of " << getExpansionName()
- << " block for config file: "
- << status.reason());
+ << " block for config file: " << status.reason());
}
return newNode;
@@ -719,8 +715,7 @@ Status YAMLNodeToValue(const YAML::Node& YAMLNode,
if (stringMap.count(elemKey) > 0) {
return Status(ErrorCodes::BadValue,
str::stream() << "String Map Option: " << key
- << " has duplicate keys in YAML Config: "
- << elemKey);
+ << " has duplicate keys in YAML Config: " << elemKey);
}
stringMap[std::move(elemKey)] = elemVal.Scalar();
@@ -1028,10 +1023,10 @@ Status addYAMLNodesToEnvironment(const YAML::Node& root,
}
/**
-* For all options that we registered as composable, combine the values from source and dest
-* and set the result in dest. Note that this only works for options that are registered as
-* vectors of strings.
-*/
+ * For all options that we registered as composable, combine the values from source and dest
+ * and set the result in dest. Note that this only works for options that are registered as
+ * vectors of strings.
+ */
Status addCompositions(const OptionSection& options, const Environment& source, Environment* dest) {
std::vector<OptionDescription> options_vector;
Status ret = options.getAllOptions(&options_vector);
@@ -1126,9 +1121,9 @@ Status addCompositions(const OptionSection& options, const Environment& source,
}
/**
-* For all options that have constraints, add those constraints to our environment so that
-* they run when the environment gets validated.
-*/
+ * For all options that have constraints, add those constraints to our environment so that
+ * they run when the environment gets validated.
+ */
Status addConstraints(const OptionSection& options, Environment* dest) {
std::vector<std::shared_ptr<Constraint>> constraints_vector;
diff --git a/src/mongo/util/options_parser/options_parser_test.cpp b/src/mongo/util/options_parser/options_parser_test.cpp
index 42530ab08b6..65b61b08ab5 100644
--- a/src/mongo/util/options_parser/options_parser_test.cpp
+++ b/src/mongo/util/options_parser/options_parser_test.cpp
@@ -5096,7 +5096,8 @@ TEST(YAMLConfigFile, canonicalize) {
moe::OptionsParser parser;
moe::Environment env;
std::vector<std::string> argv = {
- "binary", "--bind_ip_all",
+ "binary",
+ "--bind_ip_all",
};
std::map<std::string, std::string> env_map;
ASSERT_OK(parser.run(opts, argv, env_map, &env));
diff --git a/src/mongo/util/perfctr_collect.cpp b/src/mongo/util/perfctr_collect.cpp
index 5ff3ed41e8b..659d15e4359 100644
--- a/src/mongo/util/perfctr_collect.cpp
+++ b/src/mongo/util/perfctr_collect.cpp
@@ -296,9 +296,7 @@ StatusWith<std::vector<PerfCounterCollector::CounterInfo>> PerfCounterCollector:
if (status != PDH_MORE_DATA) {
return {ErrorCodes::WindowsPdhError,
str::stream() << formatFunctionCallError("PdhExpandCounterPathW", status)
- << " for counter '"
- << path
- << "'"};
+ << " for counter '" << path << "'"};
}
auto buf = stdx::make_unique<wchar_t[]>(pathListLength);
diff --git a/src/mongo/util/perfctr_collect_test.cpp b/src/mongo/util/perfctr_collect_test.cpp
index 35380d43421..869f72f9e20 100644
--- a/src/mongo/util/perfctr_collect_test.cpp
+++ b/src/mongo/util/perfctr_collect_test.cpp
@@ -178,22 +178,22 @@ TEST(FTDCPerfCollector, TestBadCollectionInput) {
ASSERT_NOT_OK(collection.addCountersGroup("cpu", {"\\Processor(0)\\% Idle Time"}));
// Duplicate counter
- ASSERT_NOT_OK(collection.addCountersGroup(
- "cpu2",
- {
- "\\Processor(0)\\% Idle Time", "\\Processor(0)\\% Idle Time",
- }));
+ ASSERT_NOT_OK(collection.addCountersGroup("cpu2",
+ {
+ "\\Processor(0)\\% Idle Time",
+ "\\Processor(0)\\% Idle Time",
+ }));
// Duplicate group
ASSERT_NOT_OK(
collection.addCountersGroupedByInstanceName("cpu", {"\\Processor(0)\\% Idle Time"}));
// Duplicate counter
- ASSERT_NOT_OK(collection.addCountersGroupedByInstanceName(
- "cpu2",
- {
- "\\Processor(0)\\% Idle Time", "\\Processor(0)\\% Idle Time",
- }));
+ ASSERT_NOT_OK(collection.addCountersGroupedByInstanceName("cpu2",
+ {
+ "\\Processor(0)\\% Idle Time",
+ "\\Processor(0)\\% Idle Time",
+ }));
}
// Test negative collector input
diff --git a/src/mongo/util/periodic_runner.h b/src/mongo/util/periodic_runner.h
index 93a03498357..e9dcfa67489 100644
--- a/src/mongo/util/periodic_runner.h
+++ b/src/mongo/util/periodic_runner.h
@@ -143,7 +143,7 @@ public:
* Each wrapped PeriodicRunner::ControllableJob function on this object throws
* if the underlying job is gone (e.g. in shutdown).
*/
-class[[nodiscard]] PeriodicJobAnchor {
+class [[nodiscard]] PeriodicJobAnchor {
public:
using Job = PeriodicRunner::ControllableJob;
diff --git a/src/mongo/util/periodic_runner_factory.cpp b/src/mongo/util/periodic_runner_factory.cpp
index 66cddf81be6..34aa8c86458 100644
--- a/src/mongo/util/periodic_runner_factory.cpp
+++ b/src/mongo/util/periodic_runner_factory.cpp
@@ -40,4 +40,4 @@ std::unique_ptr<PeriodicRunner> makePeriodicRunner(ServiceContext* svc) {
return std::make_unique<PeriodicRunnerImpl>(svc, svc->getPreciseClockSource());
}
-} // namespace
+} // namespace mongo
diff --git a/src/mongo/util/periodic_runner_impl.cpp b/src/mongo/util/periodic_runner_impl.cpp
index fc21a7184a4..98a517cf7d9 100644
--- a/src/mongo/util/periodic_runner_impl.cpp
+++ b/src/mongo/util/periodic_runner_impl.cpp
@@ -57,7 +57,7 @@ PeriodicRunnerImpl::PeriodicJobImpl::PeriodicJobImpl(PeriodicJob job,
: _job(std::move(job)), _clockSource(source), _serviceContext(svc) {}
void PeriodicRunnerImpl::PeriodicJobImpl::_run() {
- auto[startPromise, startFuture] = makePromiseFuture<void>();
+ auto [startPromise, startFuture] = makePromiseFuture<void>();
{
stdx::lock_guard lk(_mutex);
@@ -65,7 +65,7 @@ void PeriodicRunnerImpl::PeriodicJobImpl::_run() {
}
- _thread = stdx::thread([ this, startPromise = std::move(startPromise) ]() mutable {
+ _thread = stdx::thread([this, startPromise = std::move(startPromise)]() mutable {
auto guard = makeGuard([this] { _stopPromise.emplaceValue(); });
Client::initThread(_job.name, _serviceContext, nullptr);
diff --git a/src/mongo/util/polymorphic_scoped.h b/src/mongo/util/polymorphic_scoped.h
index cace603334b..c6df4086503 100644
--- a/src/mongo/util/polymorphic_scoped.h
+++ b/src/mongo/util/polymorphic_scoped.h
@@ -31,8 +31,8 @@
namespace mongo {
/**
-* Base class to implement interfaces with RAII-style objects
-*/
+ * Base class to implement interfaces with RAII-style objects
+ */
class PolymorphicScoped {
public:
virtual ~PolymorphicScoped() = default;
diff --git a/src/mongo/util/processinfo.h b/src/mongo/util/processinfo.h
index 58a2ad4c686..43cde512599 100644
--- a/src/mongo/util/processinfo.h
+++ b/src/mongo/util/processinfo.h
@@ -245,4 +245,4 @@ private:
};
bool writePidFile(const std::string& path);
-}
+} // namespace mongo
diff --git a/src/mongo/util/processinfo_linux.cpp b/src/mongo/util/processinfo_linux.cpp
index c9bd249d478..cccb91cce7d 100644
--- a/src/mongo/util/processinfo_linux.cpp
+++ b/src/mongo/util/processinfo_linux.cpp
@@ -132,7 +132,7 @@ public:
&_exit_signal, &_processor,
&_rtprio, &_sched
*/
- );
+ );
if (found == 0) {
std::cout << "system error: reading proc info" << std::endl;
}
@@ -248,8 +248,8 @@ public:
class LinuxSysHelper {
public:
/**
- * Read the first 1023 bytes from a file
- */
+ * Read the first 1023 bytes from a file
+ */
static std::string readLineFromFile(const char* fname) {
FILE* f;
char fstr[1024] = {0};
@@ -264,8 +264,8 @@ public:
}
/**
- * Get some details about the CPU
- */
+ * Get some details about the CPU
+ */
static void getCpuInfo(int& procCount, std::string& freq, std::string& features) {
FILE* f;
char fstr[1024] = {0};
@@ -290,8 +290,8 @@ public:
}
/**
- * Determine linux distro and version
- */
+ * Determine linux distro and version
+ */
static void getLinuxDistro(std::string& name, std::string& version) {
char buf[4096] = {0};
@@ -387,8 +387,8 @@ public:
}
/**
- * Get system memory total
- */
+ * Get system memory total
+ */
static unsigned long long getSystemMemorySize() {
std::string meminfo = readLineFromFile("/proc/meminfo");
size_t lineOff = 0;
@@ -413,11 +413,11 @@ public:
}
/**
- * Get memory limit for the process.
- * If memory is being limited by the applied control group and it's less
- * than the OS system memory (default cgroup limit is ulonglong max) let's
- * return the actual memory we'll have available to the process.
- */
+ * Get memory limit for the process.
+ * If memory is being limited by the applied control group and it's less
+ * than the OS system memory (default cgroup limit is ulonglong max) let's
+ * return the actual memory we'll have available to the process.
+ */
static unsigned long long getMemorySizeLimit() {
unsigned long long systemMemBytes = getSystemMemorySize();
unsigned long long cgroupMemBytes = 0;
@@ -509,8 +509,8 @@ void ProcessInfo::getExtraInfo(BSONObjBuilder& info) {
}
/**
-* Save a BSON obj representing the host system's details
-*/
+ * Save a BSON obj representing the host system's details
+ */
void ProcessInfo::SystemInfo::collectSystemInfo() {
utsname unameData;
std::string distroName, distroVersion;
@@ -564,8 +564,8 @@ void ProcessInfo::SystemInfo::collectSystemInfo() {
}
/**
-* Determine if the process is running with (cc)NUMA
-*/
+ * Determine if the process is running with (cc)NUMA
+ */
bool ProcessInfo::checkNumaEnabled() {
bool hasMultipleNodes = false;
bool hasNumaMaps = false;
@@ -620,4 +620,4 @@ bool ProcessInfo::pagesInMemory(const void* start, size_t numPages, std::vector<
}
return true;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/processinfo_openbsd.cpp b/src/mongo/util/processinfo_openbsd.cpp
index 234d2e9d366..34dade8a885 100644
--- a/src/mongo/util/processinfo_openbsd.cpp
+++ b/src/mongo/util/processinfo_openbsd.cpp
@@ -217,4 +217,4 @@ boost::optional<unsigned long> ProcessInfo::getNumCoresForProcess() {
return nprocs;
return boost::none;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/processinfo_osx.cpp b/src/mongo/util/processinfo_osx.cpp
index a11a8a5974a..75fd4862520 100644
--- a/src/mongo/util/processinfo_osx.cpp
+++ b/src/mongo/util/processinfo_osx.cpp
@@ -240,4 +240,4 @@ bool ProcessInfo::pagesInMemory(const void* start, size_t numPages, std::vector<
}
return true;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/processinfo_solaris.cpp b/src/mongo/util/processinfo_solaris.cpp
index 91f73e41dd9..13e5e75691c 100644
--- a/src/mongo/util/processinfo_solaris.cpp
+++ b/src/mongo/util/processinfo_solaris.cpp
@@ -242,4 +242,4 @@ bool ProcessInfo::pagesInMemory(const void* start, size_t numPages, std::vector<
}
return true;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/processinfo_test.cpp b/src/mongo/util/processinfo_test.cpp
index 47fb5e91b5c..158c1186268 100644
--- a/src/mongo/util/processinfo_test.cpp
+++ b/src/mongo/util/processinfo_test.cpp
@@ -36,8 +36,8 @@
#include "mongo/unittest/unittest.h"
#include "mongo/util/processinfo.h"
-using mongo::ProcessInfo;
using boost::optional;
+using mongo::ProcessInfo;
namespace mongo_test {
TEST(ProcessInfo, SysInfoIsInitialized) {
@@ -65,4 +65,4 @@ TEST(ProcessInfo, GetNumAvailableCores) {
TEST(ProcessInfo, GetNumCoresReturnsNonZeroNumberOfProcessors) {
ASSERT_GREATER_THAN(ProcessInfo::getNumCores(), 0u);
}
-}
+} // namespace mongo_test
diff --git a/src/mongo/util/processinfo_unknown.cpp b/src/mongo/util/processinfo_unknown.cpp
index 338c6efd857..05f84b7f22c 100644
--- a/src/mongo/util/processinfo_unknown.cpp
+++ b/src/mongo/util/processinfo_unknown.cpp
@@ -78,4 +78,4 @@ bool ProcessInfo::pagesInMemory(const void* start, size_t numPages, std::vector<
boost::optional<unsigned long> ProcessInfo::getNumCoresForProcess() {
return boost::none;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/processinfo_windows.cpp b/src/mongo/util/processinfo_windows.cpp
index e545778f897..3e6e0b27aae 100644
--- a/src/mongo/util/processinfo_windows.cpp
+++ b/src/mongo/util/processinfo_windows.cpp
@@ -413,4 +413,4 @@ bool ProcessInfo::pagesInMemory(const void* start, size_t numPages, std::vector<
}
return true;
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/procparser.cpp b/src/mongo/util/procparser.cpp
index 8e6b203da12..c574a3fdbe2 100644
--- a/src/mongo/util/procparser.cpp
+++ b/src/mongo/util/procparser.cpp
@@ -93,8 +93,8 @@ StatusWith<std::string> readFileAsString(StringData filename) {
if (fd == -1) {
int err = errno;
return Status(ErrorCodes::FileOpenFailed,
- str::stream() << "Failed to open file " << filename << " with error: "
- << errnoWithDescription(err));
+ str::stream() << "Failed to open file " << filename
+ << " with error: " << errnoWithDescription(err));
}
auto scopedGuard = makeGuard([fd] { close(fd); });
@@ -122,8 +122,8 @@ StatusWith<std::string> readFileAsString(StringData filename) {
}
return Status(ErrorCodes::FileStreamFailed,
- str::stream() << "Failed to read file " << filename << " with error: "
- << errnoWithDescription(err));
+ str::stream() << "Failed to read file " << filename
+ << " with error: " << errnoWithDescription(err));
}
break;
@@ -432,11 +432,10 @@ Status parseProcNetstat(const std::vector<StringData>& keys,
// Split the file by lines.
uint32_t lineNum = 0;
- for (string_split_iterator
- lineIt = string_split_iterator(
- data.begin(),
- data.end(),
- boost::token_finder([](char c) { return c == '\n'; }, boost::token_compress_on));
+ for (string_split_iterator lineIt = string_split_iterator(
+ data.begin(),
+ data.end(),
+ boost::token_finder([](char c) { return c == '\n'; }, boost::token_compress_on));
lineIt != string_split_iterator();
++lineIt, ++lineNum) {
diff --git a/src/mongo/util/procparser.h b/src/mongo/util/procparser.h
index 9bae32cba10..8fd39d0fd35 100644
--- a/src/mongo/util/procparser.h
+++ b/src/mongo/util/procparser.h
@@ -60,12 +60,12 @@ Status parseProcStat(const std::vector<StringData>& keys,
BSONObjBuilder* builder);
/**
-* Read from file, and write the specified list of keys into builder.
-*
-* See parseProcStat.
-*
-* Returns Status errors on file reading issues.
-*/
+ * Read from file, and write the specified list of keys into builder.
+ *
+ * See parseProcStat.
+ *
+ * Returns Status errors on file reading issues.
+ */
Status parseProcStatFile(StringData filename,
const std::vector<StringData>& keys,
BSONObjBuilder* builder);
diff --git a/src/mongo/util/procparser_test.cpp b/src/mongo/util/procparser_test.cpp
index 0afd85726c1..1fba705f929 100644
--- a/src/mongo/util/procparser_test.cpp
+++ b/src/mongo/util/procparser_test.cpp
@@ -208,7 +208,12 @@ TEST(FTDCProcStat, TestStat) {
// otherwise.
TEST(FTDCProcStat, TestLocalStat) {
std::vector<StringData> keys{
- "btime", "cpu", "ctxt", "processes", "procs_blocked", "procs_running",
+ "btime",
+ "cpu",
+ "ctxt",
+ "processes",
+ "procs_blocked",
+ "procs_running",
};
BSONObjBuilder builder;
@@ -237,7 +242,12 @@ TEST(FTDCProcStat, TestLocalStat) {
TEST(FTDCProcStat, TestLocalNonExistentStat) {
std::vector<StringData> keys{
- "btime", "cpu", "ctxt", "processes", "procs_blocked", "procs_running",
+ "btime",
+ "cpu",
+ "ctxt",
+ "processes",
+ "procs_blocked",
+ "procs_running",
};
BSONObjBuilder builder;
diff --git a/src/mongo/util/producer_consumer_queue.h b/src/mongo/util/producer_consumer_queue.h
index c103515d19f..05b39eff7db 100644
--- a/src/mongo/util/producer_consumer_queue.h
+++ b/src/mongo/util/producer_consumer_queue.h
@@ -336,8 +336,7 @@ public:
explicit Waiter(ProducerState& x, size_t wants) : _x(x) {
uassert(ErrorCodes::ProducerConsumerQueueProducerQueueDepthExceeded,
str::stream() << "ProducerConsumerQueue producer queue depth exceeded, "
- << (_x._producerQueueDepth + wants)
- << " > "
+ << (_x._producerQueueDepth + wants) << " > "
<< _x._maxProducerQueueDepth,
_x._maxProducerQueueDepth == std::numeric_limits<size_t>::max() ||
_x._producerQueueDepth + wants <= _x._maxProducerQueueDepth);
@@ -473,8 +472,7 @@ public:
auto cost = _invokeCostFunc(t, lk);
uassert(ErrorCodes::ProducerConsumerQueueBatchTooLarge,
str::stream() << "cost of item (" << cost
- << ") larger than maximum queue size ("
- << _options.maxQueueDepth
+ << ") larger than maximum queue size (" << _options.maxQueueDepth
<< ")",
cost <= _options.maxQueueDepth);
@@ -506,8 +504,7 @@ public:
uassert(ErrorCodes::ProducerConsumerQueueBatchTooLarge,
str::stream() << "cost of items in batch (" << cost
- << ") larger than maximum queue size ("
- << _options.maxQueueDepth
+ << ") larger than maximum queue size (" << _options.maxQueueDepth
<< ")",
cost <= _options.maxQueueDepth);
diff --git a/src/mongo/util/producer_consumer_queue_test.cpp b/src/mongo/util/producer_consumer_queue_test.cpp
index 34ff9227a8d..ba39482d0d0 100644
--- a/src/mongo/util/producer_consumer_queue_test.cpp
+++ b/src/mongo/util/producer_consumer_queue_test.cpp
@@ -861,7 +861,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(pipeProducerEndClosesAfterProducersLeave,
ASSERT_EQUALS(consumer.pop(), MoveOnly(2));
auto thread3 =
- helper.runThread("Producer3", [producer = std::move(producer)](OperationContext * opCtx) {
+ helper.runThread("Producer3", [producer = std::move(producer)](OperationContext* opCtx) {
producer.push(MoveOnly(3), opCtx);
});
@@ -882,7 +882,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(pipeConsumerEndClosesAfterConsumersLeave,
helper.runThread("Consumer2", [consumer](OperationContext* opCtx) { consumer.pop(opCtx); });
auto thread3 =
- helper.runThread("Consumer3", [consumer = std::move(consumer)](OperationContext * opCtx) {
+ helper.runThread("Consumer3", [consumer = std::move(consumer)](OperationContext* opCtx) {
consumer.pop(opCtx);
});
diff --git a/src/mongo/util/progress_meter.cpp b/src/mongo/util/progress_meter.cpp
index 9ad977581ec..441884d8d48 100644
--- a/src/mongo/util/progress_meter.cpp
+++ b/src/mongo/util/progress_meter.cpp
@@ -100,4 +100,4 @@ std::string ProgressMeter::toString() const {
return buf.str();
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/progress_meter.h b/src/mongo/util/progress_meter.h
index c666c0e90a2..35b3305f94a 100644
--- a/src/mongo/util/progress_meter.h
+++ b/src/mongo/util/progress_meter.h
@@ -168,4 +168,4 @@ public:
private:
ProgressMeter* _pm;
};
-}
+} // namespace mongo
diff --git a/src/mongo/util/queue.h b/src/mongo/util/queue.h
index 6cc79d5d47f..26d2e19f092 100644
--- a/src/mongo/util/queue.h
+++ b/src/mongo/util/queue.h
@@ -278,4 +278,4 @@ private:
stdx::condition_variable _cvNoLongerFull;
stdx::condition_variable _cvNoLongerEmpty;
};
-}
+} // namespace mongo
diff --git a/src/mongo/util/regex_util.cpp b/src/mongo/util/regex_util.cpp
index 1a596f5375c..2338e32f0c3 100644
--- a/src/mongo/util/regex_util.cpp
+++ b/src/mongo/util/regex_util.cpp
@@ -56,13 +56,13 @@ pcrecpp::RE_Options flagsToPcreOptions(StringData optionFlags,
continue;
default:
if (!ignoreInvalidFlags) {
- uasserted(
- 51108,
- str::stream() << opName << " invalid flag in regex options: " << flag);
+ uasserted(51108,
+ str::stream()
+ << opName << " invalid flag in regex options: " << flag);
}
}
}
return opt;
}
-}
-}
+} // namespace regex_util
+} // namespace mongo
diff --git a/src/mongo/util/regex_util.h b/src/mongo/util/regex_util.h
index 9be72ba94d7..f187c8eddfc 100644
--- a/src/mongo/util/regex_util.h
+++ b/src/mongo/util/regex_util.h
@@ -42,5 +42,5 @@ namespace regex_util {
pcrecpp::RE_Options flagsToPcreOptions(StringData optionFlags,
bool ignoreInvalidOptions,
StringData opName = "");
-}
-}
+} // namespace regex_util
+} // namespace mongo
diff --git a/src/mongo/util/safe_num.h b/src/mongo/util/safe_num.h
index 529adec4878..7f16cd036f3 100644
--- a/src/mongo/util/safe_num.h
+++ b/src/mongo/util/safe_num.h
@@ -40,7 +40,7 @@ namespace mongo {
namespace mutablebson {
class Element;
class Document;
-}
+} // namespace mutablebson
/**
* SafeNum holds and does arithmetic on a number in a safe way, handling overflow
diff --git a/src/mongo/util/safe_num_test.cpp b/src/mongo/util/safe_num_test.cpp
index 426c4d2809e..7fb581b786f 100644
--- a/src/mongo/util/safe_num_test.cpp
+++ b/src/mongo/util/safe_num_test.cpp
@@ -40,8 +40,8 @@
namespace {
-using mongo::SafeNum;
using mongo::Decimal128;
+using mongo::SafeNum;
TEST(Basics, Initialization) {
const SafeNum numInt(0);
diff --git a/src/mongo/util/scopeguard.h b/src/mongo/util/scopeguard.h
index 7f4b61761a0..7c304e99fa3 100644
--- a/src/mongo/util/scopeguard.h
+++ b/src/mongo/util/scopeguard.h
@@ -36,7 +36,7 @@
namespace mongo {
template <typename F>
-class[[nodiscard]] ScopeGuard {
+class [[nodiscard]] ScopeGuard {
public:
explicit ScopeGuard(const F& f) : _func(f) {}
explicit ScopeGuard(F && f) : _func(std::move(f)) {}
diff --git a/src/mongo/util/shared_buffer.h b/src/mongo/util/shared_buffer.h
index 00000d0a3d5..b12d37f84a5 100644
--- a/src/mongo/util/shared_buffer.h
+++ b/src/mongo/util/shared_buffer.h
@@ -228,4 +228,4 @@ private:
inline void swap(ConstSharedBuffer& one, ConstSharedBuffer& two) {
one.swap(two);
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/signal_handlers.cpp b/src/mongo/util/signal_handlers.cpp
index 707a76f3233..e89998f1bd2 100644
--- a/src/mongo/util/signal_handlers.cpp
+++ b/src/mongo/util/signal_handlers.cpp
@@ -65,7 +65,7 @@ const char* strsignal(int signalNum) {
return "UNKNOWN";
}
}
-}
+} // namespace
#endif
namespace mongo {
diff --git a/src/mongo/util/signal_win32.cpp b/src/mongo/util/signal_win32.cpp
index 3f4163c514a..e4e51e4b19f 100644
--- a/src/mongo/util/signal_win32.cpp
+++ b/src/mongo/util/signal_win32.cpp
@@ -42,4 +42,4 @@ std::string getShutdownSignalName(int processId) {
return str::stream() << strEventNamePrefix << processId;
}
#endif
-}
+} // namespace mongo
diff --git a/src/mongo/util/signal_win32.h b/src/mongo/util/signal_win32.h
index d05bde04899..1127e549a1f 100644
--- a/src/mongo/util/signal_win32.h
+++ b/src/mongo/util/signal_win32.h
@@ -37,4 +37,4 @@ namespace mongo {
// Generate windows event name for shutdown signal
std::string getShutdownSignalName(int processId);
#endif
-}
+} // namespace mongo
diff --git a/src/mongo/util/stack_introspect.h b/src/mongo/util/stack_introspect.h
index 9087711ad69..9f431160332 100644
--- a/src/mongo/util/stack_introspect.h
+++ b/src/mongo/util/stack_introspect.h
@@ -44,4 +44,4 @@ bool inConstructorChain(bool printOffending = false);
* @return if supported on platform, compile options may still prevent it from working
*/
bool inConstructorChainSupported();
-}
+} // namespace mongo
diff --git a/src/mongo/util/stacktrace_posix.cpp b/src/mongo/util/stacktrace_posix.cpp
index 9eaed27e06c..1d7b3d7689e 100644
--- a/src/mongo/util/stacktrace_posix.cpp
+++ b/src/mongo/util/stacktrace_posix.cpp
@@ -538,12 +538,12 @@ void addOSComponentsToSoMap(BSONObjBuilder* soMap) {
}
}
}
-} // namepace
+} // namespace
} // namespace mongo
#else
namespace mongo {
namespace {
void addOSComponentsToSoMap(BSONObjBuilder* soMap) {}
-} // namepace
+} // namespace
} // namespace mongo
#endif
diff --git a/src/mongo/util/stacktrace_windows.cpp b/src/mongo/util/stacktrace_windows.cpp
index bf98e1f0646..6576300f7ba 100644
--- a/src/mongo/util/stacktrace_windows.cpp
+++ b/src/mongo/util/stacktrace_windows.cpp
@@ -351,4 +351,4 @@ int crtDebugCallback(int, char* originalMessage, int*) {
log() << "*** C runtime error: " << message.substr(0, message.find('\n')) << ", terminating";
fassertFailed(17006);
}
-}
+} // namespace mongo
diff --git a/src/mongo/util/string_map_test.cpp b/src/mongo/util/string_map_test.cpp
index b244313db6b..24bae71587c 100644
--- a/src/mongo/util/string_map_test.cpp
+++ b/src/mongo/util/string_map_test.cpp
@@ -212,11 +212,14 @@ TEST(StringMapTest, Assign) {
TEST(StringMapTest, InitWithInitializerList) {
StringMap<int> smap{
- {"q", 1}, {"coollog", 2}, {"mango", 3}, {"mango", 4},
+ {"q", 1},
+ {"coollog", 2},
+ {"mango", 3},
+ {"mango", 4},
};
ASSERT_EQ(1, smap["q"]);
ASSERT_EQ(2, smap["coollog"]);
ASSERT_EQ(3, smap["mango"]);
}
-}
+} // namespace
diff --git a/src/mongo/util/summation_test.cpp b/src/mongo/util/summation_test.cpp
index 1fd8a632640..72b29cc47de 100644
--- a/src/mongo/util/summation_test.cpp
+++ b/src/mongo/util/summation_test.cpp
@@ -41,41 +41,41 @@ namespace mongo {
namespace {
using limits = std::numeric_limits<long long>;
-std::vector<long long> longValues = {
- limits::min(),
- limits::min() + 1,
- limits::min() / 2,
- -(1LL << 53),
- -(1LL << 52),
- -(1LL << 32),
- -0x100,
- -0xff,
- -0xaa,
- -0x55,
- -1,
- 0,
- 1,
- 2,
- 0x55,
- 0x80,
- 0xaa,
- 0x100,
- 512,
- 1024,
- 2048,
- 1LL << 31,
- 1LL << 32,
- 1LL << 52,
- 1LL << 53,
- limits::max() / 2,
+std::vector<long long> longValues = {limits::min(),
+ limits::min() + 1,
+ limits::min() / 2,
+ -(1LL << 53),
+ -(1LL << 52),
+ -(1LL << 32),
+ -0x100,
+ -0xff,
+ -0xaa,
+ -0x55,
+ -1,
+ 0,
+ 1,
+ 2,
+ 0x55,
+ 0x80,
+ 0xaa,
+ 0x100,
+ 512,
+ 1024,
+ 2048,
+ 1LL << 31,
+ 1LL << 32,
+ 1LL << 52,
+ 1LL << 53,
+ limits::max() / 2,
#pragma warning(push)
// C4308: negative integral constant converted to unsigned type
#pragma warning(disable : 4308)
- static_cast<long long>(1ULL << 63) - (1ULL << (63 - 53 - 1)), // Halfway between two doubles
+ static_cast<long long>(1ULL << 63) -
+ (1ULL << (63 - 53 - 1)), // Halfway between two doubles
#pragma warning(pop)
- limits::max() - 1,
- limits::max()};
+ limits::max() - 1,
+ limits::max()};
std::vector<double> doubleValues = {
1.4831356930199802e-05, -3.121724665346865, 3041897608700.073, 1001318343149.7166,
diff --git a/src/mongo/util/tcmalloc_set_parameter.cpp b/src/mongo/util/tcmalloc_set_parameter.cpp
index 8fa4a5f0c37..f9f43ecd387 100644
--- a/src/mongo/util/tcmalloc_set_parameter.cpp
+++ b/src/mongo/util/tcmalloc_set_parameter.cpp
@@ -75,18 +75,16 @@ StatusWith<size_t> validateTCMallocValue(StringData name, const BSONElement& new
return {ErrorCodes::TypeMismatch,
str::stream() << "Expected server parameter " << name
<< " to have numeric type, but found "
- << newValueElement.toString(false)
- << " of type "
+ << newValueElement.toString(false) << " of type "
<< typeName(newValueElement.type())};
}
long long valueAsLongLong = newValueElement.safeNumberLong();
if (valueAsLongLong < 0 ||
static_cast<unsigned long long>(valueAsLongLong) > std::numeric_limits<size_t>::max()) {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Value " << newValueElement.toString(false) << " is out of range for "
- << name
- << "; expected a value between 0 and "
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Value " << newValueElement.toString(false) << " is out of range for "
+ << name << "; expected a value between 0 and "
<< std::min<unsigned long long>(std::numeric_limits<size_t>::max(),
std::numeric_limits<long long>::max()));
}
diff --git a/src/mongo/util/text.cpp b/src/mongo/util/text.cpp
index df084ac1bae..3585adcf9a6 100644
--- a/src/mongo/util/text.cpp
+++ b/src/mongo/util/text.cpp
@@ -184,7 +184,7 @@ std::wstring toWideString(const char* utf8String) {
-1, // Count, -1 for NUL-terminated
NULL, // No output buffer
0 // Zero means "compute required size"
- );
+ );
if (bufferSize == 0) {
return std::wstring();
}
@@ -196,7 +196,7 @@ std::wstring toWideString(const char* utf8String) {
-1, // Count, -1 for NUL-terminated
tempBuffer.get(), // UTF-16 output buffer
bufferSize // Buffer size in wide characters
- );
+ );
return std::wstring(tempBuffer.get());
}
@@ -214,7 +214,7 @@ bool writeUtf8ToWindowsConsole(const char* utf8String, unsigned int utf8StringSi
utf8StringSize, // Input string length
NULL, // No output buffer
0 // Zero means "compute required size"
- );
+ );
if (bufferSize == 0) {
return true;
}
@@ -225,7 +225,7 @@ bool writeUtf8ToWindowsConsole(const char* utf8String, unsigned int utf8StringSi
utf8StringSize, // Input string length
utf16String.get(), // UTF-16 output buffer
bufferSize // Buffer size in wide characters
- );
+ );
const wchar_t* utf16Pointer = utf16String.get();
size_t numberOfCharactersToWrite = bufferSize;
HANDLE consoleHandle = GetStdHandle(STD_OUTPUT_HANDLE);
diff --git a/src/mongo/util/tick_source_test.cpp b/src/mongo/util/tick_source_test.cpp
index 78ea3dac678..aef28a7e97c 100644
--- a/src/mongo/util/tick_source_test.cpp
+++ b/src/mongo/util/tick_source_test.cpp
@@ -52,5 +52,5 @@ TEST(TickSourceTest, TicksToDurationConversion) {
tsMicros.reset(1);
ASSERT_EQ(tsMicros.ticksTo<Microseconds>(tsMicros.getTicks()).count(), 1);
}
-}
+} // namespace
} // namespace mongo
diff --git a/src/mongo/util/unique_function_test.cpp b/src/mongo/util/unique_function_test.cpp
index 930de9f71a2..07694b67cd9 100644
--- a/src/mongo/util/unique_function_test.cpp
+++ b/src/mongo/util/unique_function_test.cpp
@@ -101,7 +101,7 @@ TEST(UniqueFunctionTest, reassign_simple_unique_function_from_lambda) {
TEST(UniqueFunctionTest, accepts_a_functor_that_is_move_only) {
struct Checker {};
- mongo::unique_function<void()> uf = [checkerPtr = std::make_unique<Checker>()]{};
+ mongo::unique_function<void()> uf = [checkerPtr = std::make_unique<Checker>()] {};
mongo::unique_function<void()> uf2 = std::move(uf);
diff --git a/src/mongo/util/unowned_ptr_test.cpp b/src/mongo/util/unowned_ptr_test.cpp
index ac1bf86067f..c914ea03bc3 100644
--- a/src/mongo/util/unowned_ptr_test.cpp
+++ b/src/mongo/util/unowned_ptr_test.cpp
@@ -155,4 +155,4 @@ TEST(UnownedPtr, Equality) {
ASSERT_NE(unowned_ptr<int>(), unowned_ptr<int>(&i)); // NULL != non-NULL
ASSERT_NE(unowned_ptr<int>(&i), unowned_ptr<int>(&j)); // two distinct non-NULLs
}
-}
+} // namespace mongo
diff --git a/src/mongo/watchdog/watchdog_mongod.h b/src/mongo/watchdog/watchdog_mongod.h
index 186e21e4a47..06892de6543 100644
--- a/src/mongo/watchdog/watchdog_mongod.h
+++ b/src/mongo/watchdog/watchdog_mongod.h
@@ -34,8 +34,8 @@
namespace mongo {
/**
-* Start the watchdog.
-*/
+ * Start the watchdog.
+ */
void startWatchdog();
/**